aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mmc
diff options
context:
space:
mode:
authorRussell King <rmk+kernel@arm.linux.org.uk>2014-04-25 07:58:34 -0400
committerChris Ball <chris@printf.net>2014-05-22 07:26:29 -0400
commitd1e49f77d7c7b75fdc022e1d46c1549bbc91c5b7 (patch)
tree186204fee5c6e5407cac6a370bb1f1b1d08a2e55 /drivers/mmc
parentde0b65a786ae83c8f6dfb712f65b9a36af70a981 (diff)
mmc: sdhci: convert ADMA descriptors to a coherent allocation
Rather than using the streaming API, use the coherent allocator to provide this memory, thereby eliminating cache flushing of it each time we map and unmap it. This results in a 7.5% increase in transfer speed with a UHS-1 card operating in 3.3v mode at a clock of 49.5MHz. Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk> Tested-by: Markus Pargmann <mpa@pengutronix.de> Tested-by: Stephen Warren <swarren@nvidia.com> Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org> Signed-off-by: Chris Ball <chris@printf.net>
Diffstat (limited to 'drivers/mmc')
-rw-r--r--drivers/mmc/host/sdhci.c43
1 files changed, 22 insertions, 21 deletions
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 4f878bcfaa2d..fd20e892439a 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -44,6 +44,8 @@
44 44
45#define MAX_TUNING_LOOP 40 45#define MAX_TUNING_LOOP 40
46 46
47#define ADMA_SIZE ((128 * 2 + 1) * 4)
48
47static unsigned int debug_quirks = 0; 49static unsigned int debug_quirks = 0;
48static unsigned int debug_quirks2; 50static unsigned int debug_quirks2;
49 51
@@ -481,11 +483,6 @@ static int sdhci_adma_table_pre(struct sdhci_host *host,
481 else 483 else
482 direction = DMA_TO_DEVICE; 484 direction = DMA_TO_DEVICE;
483 485
484 /*
485 * The ADMA descriptor table is mapped further down as we
486 * need to fill it with data first.
487 */
488
489 host->align_addr = dma_map_single(mmc_dev(host->mmc), 486 host->align_addr = dma_map_single(mmc_dev(host->mmc),
490 host->align_buffer, 128 * 4, direction); 487 host->align_buffer, 128 * 4, direction);
491 if (dma_mapping_error(mmc_dev(host->mmc), host->align_addr)) 488 if (dma_mapping_error(mmc_dev(host->mmc), host->align_addr))
@@ -546,7 +543,7 @@ static int sdhci_adma_table_pre(struct sdhci_host *host,
546 * If this triggers then we have a calculation bug 543 * If this triggers then we have a calculation bug
547 * somewhere. :/ 544 * somewhere. :/
548 */ 545 */
549 WARN_ON((desc - host->adma_desc) > (128 * 2 + 1) * 4); 546 WARN_ON((desc - host->adma_desc) > ADMA_SIZE);
550 } 547 }
551 548
552 if (host->quirks & SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC) { 549 if (host->quirks & SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC) {
@@ -574,17 +571,8 @@ static int sdhci_adma_table_pre(struct sdhci_host *host,
574 host->align_addr, 128 * 4, direction); 571 host->align_addr, 128 * 4, direction);
575 } 572 }
576 573
577 host->adma_addr = dma_map_single(mmc_dev(host->mmc),
578 host->adma_desc, (128 * 2 + 1) * 4, DMA_TO_DEVICE);
579 if (dma_mapping_error(mmc_dev(host->mmc), host->adma_addr))
580 goto unmap_entries;
581 BUG_ON(host->adma_addr & 0x3);
582
583 return 0; 574 return 0;
584 575
585unmap_entries:
586 dma_unmap_sg(mmc_dev(host->mmc), data->sg,
587 data->sg_len, direction);
588unmap_align: 576unmap_align:
589 dma_unmap_single(mmc_dev(host->mmc), host->align_addr, 577 dma_unmap_single(mmc_dev(host->mmc), host->align_addr,
590 128 * 4, direction); 578 128 * 4, direction);
@@ -609,9 +597,6 @@ static void sdhci_adma_table_post(struct sdhci_host *host,
609 else 597 else
610 direction = DMA_TO_DEVICE; 598 direction = DMA_TO_DEVICE;
611 599
612 dma_unmap_single(mmc_dev(host->mmc), host->adma_addr,
613 (128 * 2 + 1) * 4, DMA_TO_DEVICE);
614
615 dma_unmap_single(mmc_dev(host->mmc), host->align_addr, 600 dma_unmap_single(mmc_dev(host->mmc), host->align_addr,
616 128 * 4, direction); 601 128 * 4, direction);
617 602
@@ -2856,15 +2841,29 @@ int sdhci_add_host(struct sdhci_host *host)
2856 * (128) and potentially one alignment transfer for 2841 * (128) and potentially one alignment transfer for
2857 * each of those entries. 2842 * each of those entries.
2858 */ 2843 */
2859 host->adma_desc = kmalloc((128 * 2 + 1) * 4, GFP_KERNEL); 2844 host->adma_desc = dma_alloc_coherent(mmc_dev(host->mmc),
2845 ADMA_SIZE, &host->adma_addr,
2846 GFP_KERNEL);
2860 host->align_buffer = kmalloc(128 * 4, GFP_KERNEL); 2847 host->align_buffer = kmalloc(128 * 4, GFP_KERNEL);
2861 if (!host->adma_desc || !host->align_buffer) { 2848 if (!host->adma_desc || !host->align_buffer) {
2862 kfree(host->adma_desc); 2849 dma_free_coherent(mmc_dev(host->mmc), ADMA_SIZE,
2850 host->adma_desc, host->adma_addr);
2863 kfree(host->align_buffer); 2851 kfree(host->align_buffer);
2864 pr_warning("%s: Unable to allocate ADMA " 2852 pr_warning("%s: Unable to allocate ADMA "
2865 "buffers. Falling back to standard DMA.\n", 2853 "buffers. Falling back to standard DMA.\n",
2866 mmc_hostname(mmc)); 2854 mmc_hostname(mmc));
2867 host->flags &= ~SDHCI_USE_ADMA; 2855 host->flags &= ~SDHCI_USE_ADMA;
2856 host->adma_desc = NULL;
2857 host->align_buffer = NULL;
2858 } else if (host->adma_addr & 3) {
2859 pr_warning("%s: unable to allocate aligned ADMA descriptor\n",
2860 mmc_hostname(mmc));
2861 host->flags &= ~SDHCI_USE_ADMA;
2862 dma_free_coherent(mmc_dev(host->mmc), ADMA_SIZE,
2863 host->adma_desc, host->adma_addr);
2864 kfree(host->align_buffer);
2865 host->adma_desc = NULL;
2866 host->align_buffer = NULL;
2868 } 2867 }
2869 } 2868 }
2870 2869
@@ -3342,7 +3341,9 @@ void sdhci_remove_host(struct sdhci_host *host, int dead)
3342 regulator_put(host->vqmmc); 3341 regulator_put(host->vqmmc);
3343 } 3342 }
3344 3343
3345 kfree(host->adma_desc); 3344 if (host->adma_desc)
3345 dma_free_coherent(mmc_dev(host->mmc), ADMA_SIZE,
3346 host->adma_desc, host->adma_addr);
3346 kfree(host->align_buffer); 3347 kfree(host->align_buffer);
3347 3348
3348 host->adma_desc = NULL; 3349 host->adma_desc = NULL;