summaryrefslogtreecommitdiff
path: root/drivers/mmc
diff options
context:
space:
mode:
authorNaveen Kumar Arepalli <naveenk@nvidia.com>2014-05-30 12:17:39 +0530
committerMandar Padmawar <mpadmawar@nvidia.com>2014-06-11 06:52:11 -0700
commit63ecf68a3714926fe79fa60c1adc3f7e61c324f2 (patch)
tree8b238ca7753e5cb50d9b32678c61b24fd78b0c06 /drivers/mmc
parent006fffc2f4680b46b3ba78ecafab4a44e8ca8ed1 (diff)
mmc: sdhci: Use pre-allocated DMA buffers
Use pre-allocated DMA buffers for ADMA descriptor and Bounce buffer instead of dynamic DMA mapping. This improves SDHCI driver performance by reducing dynamic DMA mapping overhead. Bug 1486735 Change-Id: Ic9c646437be047d33304339eccc48a825f0a8bcc Reviewed-on: http://git-master/r/380885 Cherry-picked from commit 7ffcc4cf1a1cec42610c1b55c30b3ec28547a11e Signed-off-by: Jinyoung Park <jinyoungp@nvidia.com> Signed-off-by: Naveen Kumar Arepalli <naveenk@nvidia.com> Change-Id: If850a534ba9fbfd169b4fbefd35ca5922b1d1254 Reviewed-on: http://git-master/r/416955 Reviewed-by: Automatic_Commit_Validation_User GVS: Gerrit_Virtual_Submit Reviewed-by: Venu Byravarasu <vbyravarasu@nvidia.com> Reviewed-by: R Raj Kumar <rrajk@nvidia.com> Reviewed-by: Laxman Dewangan <ldewangan@nvidia.com> (cherry picked from commit 9e58888afe4e66e83eece0a8332c8e7440bd1bcf) Reviewed-on: http://git-master/r/419444
Diffstat (limited to 'drivers/mmc')
-rw-r--r--drivers/mmc/host/sdhci.c78
1 files changed, 60 insertions, 18 deletions
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 9168f676ec2c..8950f641deab 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -556,11 +556,13 @@ static int sdhci_adma_table_pre(struct sdhci_host *host,
* need to fill it with data first.
*/
- host->align_addr = dma_map_single(mmc_dev(host->mmc),
- host->align_buffer, 128 * 8, direction);
- if (dma_mapping_error(mmc_dev(host->mmc), host->align_addr))
- goto fail;
- BUG_ON(host->align_addr & 0x3);
+ if (!host->use_dma_alloc) {
+ host->align_addr = dma_map_single(mmc_dev(host->mmc),
+ host->align_buffer, 128 * 8, direction);
+ if (dma_mapping_error(mmc_dev(host->mmc), host->align_addr))
+ goto fail;
+ BUG_ON(host->align_addr & 0x3);
+ }
host->sg_count = dma_map_sg(mmc_dev(host->mmc),
data->sg, data->sg_len, direction);
@@ -656,11 +658,13 @@ static int sdhci_adma_table_pre(struct sdhci_host *host,
host->align_addr, 128 * 8, direction);
}
- host->adma_addr = dma_map_single(mmc_dev(host->mmc),
- host->adma_desc, (128 * 2 + 1) * 8, DMA_TO_DEVICE);
- if (dma_mapping_error(mmc_dev(host->mmc), host->adma_addr))
- goto unmap_entries;
- BUG_ON(host->adma_addr & 0x3);
+ if (!host->use_dma_alloc) {
+ host->adma_addr = dma_map_single(mmc_dev(host->mmc),
+ host->adma_desc, (128 * 2 + 1) * 8, DMA_TO_DEVICE);
+ if (dma_mapping_error(mmc_dev(host->mmc), host->adma_addr))
+ goto unmap_entries;
+ BUG_ON(host->adma_addr & 0x3);
+ }
return 0;
@@ -668,8 +672,9 @@ unmap_entries:
dma_unmap_sg(mmc_dev(host->mmc), data->sg,
data->sg_len, direction);
unmap_align:
- dma_unmap_single(mmc_dev(host->mmc), host->align_addr,
- 128 * 8, direction);
+ if (!host->use_dma_alloc)
+ dma_unmap_single(mmc_dev(host->mmc), host->align_addr,
+ 128 * 8, direction);
fail:
return -EINVAL;
}
@@ -690,11 +695,13 @@ static void sdhci_adma_table_post(struct sdhci_host *host,
else
direction = DMA_TO_DEVICE;
- dma_unmap_single(mmc_dev(host->mmc), host->adma_addr,
- (128 * 2 + 1) * 8, DMA_TO_DEVICE);
+ if (!host->use_dma_alloc) {
+ dma_unmap_single(mmc_dev(host->mmc), host->adma_addr,
+ (128 * 2 + 1) * 8, DMA_TO_DEVICE);
- dma_unmap_single(mmc_dev(host->mmc), host->align_addr,
- 128 * 8, direction);
+ dma_unmap_single(mmc_dev(host->mmc), host->align_addr,
+ 128 * 8, direction);
+ }
if (data->flags & MMC_DATA_READ) {
dma_sync_sg_for_cpu(mmc_dev(host->mmc), data->sg,
@@ -3535,6 +3542,33 @@ int sdhci_add_host(struct sdhci_host *host)
* each of those entries. Simply allocating 128 bits
* for each entry
*/
+ if (mmc_dev(host->mmc)->dma_mask &&
+ mmc_dev(host->mmc)->coherent_dma_mask) {
+ host->adma_desc = dma_alloc_coherent(
+ mmc_dev(host->mmc), (128 * 2 + 1) * 8,
+ &host->adma_addr, GFP_KERNEL);
+ if (!host->adma_desc)
+ goto err_dma_alloc;
+
+ host->align_buffer = dma_alloc_coherent(
+ mmc_dev(host->mmc), 128 * 8,
+ &host->align_addr, GFP_KERNEL);
+ if (!host->align_buffer) {
+ dma_free_coherent(mmc_dev(host->mmc),
+ (128 * 2 + 1) * 8,
+ host->adma_desc,
+ host->adma_addr);
+ host->adma_desc = NULL;
+ goto err_dma_alloc;
+ }
+
+ host->use_dma_alloc = true;
+
+ BUG_ON(host->adma_addr & 0x3);
+ BUG_ON(host->align_addr & 0x3);
+ goto out_dma_alloc;
+ }
+err_dma_alloc:
host->adma_desc = kmalloc((128 * 2 + 1) * 8, GFP_KERNEL);
host->align_buffer = kmalloc(128 * 8, GFP_KERNEL);
if (!host->adma_desc || !host->align_buffer) {
@@ -3546,6 +3580,7 @@ int sdhci_add_host(struct sdhci_host *host)
host->flags &= ~SDHCI_USE_ADMA;
}
}
+out_dma_alloc:
/*
* If we use DMA, then it's up to the caller to set the DMA
@@ -4031,8 +4066,15 @@ void sdhci_remove_host(struct sdhci_host *host, int dead)
regulator_put(host->vqmmc);
}
- kfree(host->adma_desc);
- kfree(host->align_buffer);
+ if (host->use_dma_alloc) {
+ dma_free_coherent(mmc_dev(host->mmc), (128 * 2 + 1) * 8,
+ host->adma_desc, host->adma_addr);
+ dma_free_coherent(mmc_dev(host->mmc), 128 * 8,
+ host->align_buffer, host->align_addr);
+ } else {
+ kfree(host->adma_desc);
+ kfree(host->align_buffer);
+ }
host->adma_desc = NULL;
host->align_buffer = NULL;