aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mmc/host
diff options
context:
space:
mode:
authorSudhakar Rajashekhara <sudhakar.raj@ti.com>2010-05-26 17:41:49 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2010-05-27 12:12:39 -0400
commitca2afb6dbea74ee762ae5856af7045a57a65e9c8 (patch)
tree69bda961b3624d9a783f4ee1b52e973d0b9df507 /drivers/mmc/host
parent31f46717997a83bdf6db0dd04810c0a329eb3148 (diff)
davinci: mmc: pass number of SG segments as platform data
On some platforms like DM355, the number of EDMA parameter slots available for EDMA_SLOT_ANY usage are few. In such cases, if MMC/SD uses 16 slots for each instance of MMC controller, then the number of slots available for other modules will be very few. By passing the number of EDMA slots to be used in MMC driver from platform data, EDMA slots available for other purposes can be controlled. Most of the platforms will not use this platform data variable. But on DM355, as the number of EDMA resources available is limited, the number of scatter- gather segments used inside the MMC driver can be 8 (passed as platform data) instead of 16. On DM355, when the number of scatter-gather segments was reduced to 8, I saw a performance difference of about 0.25-0.4 Mbytes/sec during write. Read performance variations were negligible. Signed-off-by: Sudhakar Rajashekhara <sudhakar.raj@ti.com> Acked-by: Kevin Hilman <khilman@deeprootsystems.com> Cc: <linux-mmc@vger.kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'drivers/mmc/host')
-rw-r--r--drivers/mmc/host/davinci_mmc.c24
1 files changed, 17 insertions, 7 deletions
diff --git a/drivers/mmc/host/davinci_mmc.c b/drivers/mmc/host/davinci_mmc.c
index 3bd0ba294e9d..547d29c31b40 100644
--- a/drivers/mmc/host/davinci_mmc.c
+++ b/drivers/mmc/host/davinci_mmc.c
@@ -137,15 +137,15 @@
137 137
138/* 138/*
139 * One scatterlist dma "segment" is at most MAX_CCNT rw_threshold units, 139 * One scatterlist dma "segment" is at most MAX_CCNT rw_threshold units,
140 * and we handle up to NR_SG segments. MMC_BLOCK_BOUNCE kicks in only 140 * and we handle up to MAX_NR_SG segments. MMC_BLOCK_BOUNCE kicks in only
141 * for drivers with max_hw_segs == 1, making the segments bigger (64KB) 141 * for drivers with max_hw_segs == 1, making the segments bigger (64KB)
142 * than the page or two that's otherwise typical. NR_SG == 16 gives at 142 * than the page or two that's otherwise typical. nr_sg (passed from
143 * least the same throughput boost, using EDMA transfer linkage instead 143 * platform data) == 16 gives at least the same throughput boost, using
144 * of spending CPU time copying pages. 144 * EDMA transfer linkage instead of spending CPU time copying pages.
145 */ 145 */
146#define MAX_CCNT ((1 << 16) - 1) 146#define MAX_CCNT ((1 << 16) - 1)
147 147
148#define NR_SG 16 148#define MAX_NR_SG 16
149 149
150static unsigned rw_threshold = 32; 150static unsigned rw_threshold = 32;
151module_param(rw_threshold, uint, S_IRUGO); 151module_param(rw_threshold, uint, S_IRUGO);
@@ -192,7 +192,7 @@ struct mmc_davinci_host {
192 struct edmacc_param tx_template; 192 struct edmacc_param tx_template;
193 struct edmacc_param rx_template; 193 struct edmacc_param rx_template;
194 unsigned n_link; 194 unsigned n_link;
195 u32 links[NR_SG - 1]; 195 u32 links[MAX_NR_SG - 1];
196 196
197 /* For PIO we walk scatterlists one segment at a time. */ 197 /* For PIO we walk scatterlists one segment at a time. */
198 unsigned int sg_len; 198 unsigned int sg_len;
@@ -202,6 +202,8 @@ struct mmc_davinci_host {
202 u8 version; 202 u8 version;
203 /* for ns in one cycle calculation */ 203 /* for ns in one cycle calculation */
204 unsigned ns_in_one_cycle; 204 unsigned ns_in_one_cycle;
205 /* Number of sg segments */
206 u8 nr_sg;
205#ifdef CONFIG_CPU_FREQ 207#ifdef CONFIG_CPU_FREQ
206 struct notifier_block freq_transition; 208 struct notifier_block freq_transition;
207#endif 209#endif
@@ -568,6 +570,7 @@ davinci_release_dma_channels(struct mmc_davinci_host *host)
568 570
569static int __init davinci_acquire_dma_channels(struct mmc_davinci_host *host) 571static int __init davinci_acquire_dma_channels(struct mmc_davinci_host *host)
570{ 572{
573 u32 link_size;
571 int r, i; 574 int r, i;
572 575
573 /* Acquire master DMA write channel */ 576 /* Acquire master DMA write channel */
@@ -593,7 +596,8 @@ static int __init davinci_acquire_dma_channels(struct mmc_davinci_host *host)
593 /* Allocate parameter RAM slots, which will later be bound to a 596 /* Allocate parameter RAM slots, which will later be bound to a
594 * channel as needed to handle a scatterlist. 597 * channel as needed to handle a scatterlist.
595 */ 598 */
596 for (i = 0; i < ARRAY_SIZE(host->links); i++) { 599 link_size = min_t(unsigned, host->nr_sg, ARRAY_SIZE(host->links));
600 for (i = 0; i < link_size; i++) {
597 r = edma_alloc_slot(EDMA_CTLR(host->txdma), EDMA_SLOT_ANY); 601 r = edma_alloc_slot(EDMA_CTLR(host->txdma), EDMA_SLOT_ANY);
598 if (r < 0) { 602 if (r < 0) {
599 dev_dbg(mmc_dev(host->mmc), "dma PaRAM alloc --> %d\n", 603 dev_dbg(mmc_dev(host->mmc), "dma PaRAM alloc --> %d\n",
@@ -1202,6 +1206,12 @@ static int __init davinci_mmcsd_probe(struct platform_device *pdev)
1202 1206
1203 init_mmcsd_host(host); 1207 init_mmcsd_host(host);
1204 1208
1209 if (pdata->nr_sg)
1210 host->nr_sg = pdata->nr_sg - 1;
1211
1212 if (host->nr_sg > MAX_NR_SG || !host->nr_sg)
1213 host->nr_sg = MAX_NR_SG;
1214
1205 host->use_dma = use_dma; 1215 host->use_dma = use_dma;
1206 host->irq = irq; 1216 host->irq = irq;
1207 1217