aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mmc/host/davinci_mmc.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/mmc/host/davinci_mmc.c')
-rw-r--r--drivers/mmc/host/davinci_mmc.c111
1 files changed, 75 insertions, 36 deletions
diff --git a/drivers/mmc/host/davinci_mmc.c b/drivers/mmc/host/davinci_mmc.c
index 3bd0ba294e9d..33d9f1b00862 100644
--- a/drivers/mmc/host/davinci_mmc.c
+++ b/drivers/mmc/host/davinci_mmc.c
@@ -137,15 +137,15 @@
137 137
138/* 138/*
139 * One scatterlist dma "segment" is at most MAX_CCNT rw_threshold units, 139 * One scatterlist dma "segment" is at most MAX_CCNT rw_threshold units,
140 * and we handle up to NR_SG segments. MMC_BLOCK_BOUNCE kicks in only 140 * and we handle up to MAX_NR_SG segments. MMC_BLOCK_BOUNCE kicks in only
141 * for drivers with max_hw_segs == 1, making the segments bigger (64KB) 141 * for drivers with max_hw_segs == 1, making the segments bigger (64KB)
142 * than the page or two that's otherwise typical. NR_SG == 16 gives at 142 * than the page or two that's otherwise typical. nr_sg (passed from
143 * least the same throughput boost, using EDMA transfer linkage instead 143 * platform data) == 16 gives at least the same throughput boost, using
144 * of spending CPU time copying pages. 144 * EDMA transfer linkage instead of spending CPU time copying pages.
145 */ 145 */
146#define MAX_CCNT ((1 << 16) - 1) 146#define MAX_CCNT ((1 << 16) - 1)
147 147
148#define NR_SG 16 148#define MAX_NR_SG 16
149 149
150static unsigned rw_threshold = 32; 150static unsigned rw_threshold = 32;
151module_param(rw_threshold, uint, S_IRUGO); 151module_param(rw_threshold, uint, S_IRUGO);
@@ -171,6 +171,7 @@ struct mmc_davinci_host {
171#define DAVINCI_MMC_DATADIR_READ 1 171#define DAVINCI_MMC_DATADIR_READ 1
172#define DAVINCI_MMC_DATADIR_WRITE 2 172#define DAVINCI_MMC_DATADIR_WRITE 2
173 unsigned char data_dir; 173 unsigned char data_dir;
174 unsigned char suspended;
174 175
175 /* buffer is used during PIO of one scatterlist segment, and 176 /* buffer is used during PIO of one scatterlist segment, and
176 * is updated along with buffer_bytes_left. bytes_left applies 177 * is updated along with buffer_bytes_left. bytes_left applies
@@ -192,7 +193,7 @@ struct mmc_davinci_host {
192 struct edmacc_param tx_template; 193 struct edmacc_param tx_template;
193 struct edmacc_param rx_template; 194 struct edmacc_param rx_template;
194 unsigned n_link; 195 unsigned n_link;
195 u32 links[NR_SG - 1]; 196 u32 links[MAX_NR_SG - 1];
196 197
197 /* For PIO we walk scatterlists one segment at a time. */ 198 /* For PIO we walk scatterlists one segment at a time. */
198 unsigned int sg_len; 199 unsigned int sg_len;
@@ -202,6 +203,8 @@ struct mmc_davinci_host {
202 u8 version; 203 u8 version;
203 /* for ns in one cycle calculation */ 204 /* for ns in one cycle calculation */
204 unsigned ns_in_one_cycle; 205 unsigned ns_in_one_cycle;
206 /* Number of sg segments */
207 u8 nr_sg;
205#ifdef CONFIG_CPU_FREQ 208#ifdef CONFIG_CPU_FREQ
206 struct notifier_block freq_transition; 209 struct notifier_block freq_transition;
207#endif 210#endif
@@ -568,6 +571,7 @@ davinci_release_dma_channels(struct mmc_davinci_host *host)
568 571
569static int __init davinci_acquire_dma_channels(struct mmc_davinci_host *host) 572static int __init davinci_acquire_dma_channels(struct mmc_davinci_host *host)
570{ 573{
574 u32 link_size;
571 int r, i; 575 int r, i;
572 576
573 /* Acquire master DMA write channel */ 577 /* Acquire master DMA write channel */
@@ -593,7 +597,8 @@ static int __init davinci_acquire_dma_channels(struct mmc_davinci_host *host)
593 /* Allocate parameter RAM slots, which will later be bound to a 597 /* Allocate parameter RAM slots, which will later be bound to a
594 * channel as needed to handle a scatterlist. 598 * channel as needed to handle a scatterlist.
595 */ 599 */
596 for (i = 0; i < ARRAY_SIZE(host->links); i++) { 600 link_size = min_t(unsigned, host->nr_sg, ARRAY_SIZE(host->links));
601 for (i = 0; i < link_size; i++) {
597 r = edma_alloc_slot(EDMA_CTLR(host->txdma), EDMA_SLOT_ANY); 602 r = edma_alloc_slot(EDMA_CTLR(host->txdma), EDMA_SLOT_ANY);
598 if (r < 0) { 603 if (r < 0) {
599 dev_dbg(mmc_dev(host->mmc), "dma PaRAM alloc --> %d\n", 604 dev_dbg(mmc_dev(host->mmc), "dma PaRAM alloc --> %d\n",
@@ -905,19 +910,26 @@ static void mmc_davinci_cmd_done(struct mmc_davinci_host *host,
905 } 910 }
906} 911}
907 912
908static void 913static inline void mmc_davinci_reset_ctrl(struct mmc_davinci_host *host,
909davinci_abort_data(struct mmc_davinci_host *host, struct mmc_data *data) 914 int val)
910{ 915{
911 u32 temp; 916 u32 temp;
912 917
913 /* reset command and data state machines */
914 temp = readl(host->base + DAVINCI_MMCCTL); 918 temp = readl(host->base + DAVINCI_MMCCTL);
915 writel(temp | MMCCTL_CMDRST | MMCCTL_DATRST, 919 if (val) /* reset */
916 host->base + DAVINCI_MMCCTL); 920 temp |= MMCCTL_CMDRST | MMCCTL_DATRST;
921 else /* enable */
922 temp &= ~(MMCCTL_CMDRST | MMCCTL_DATRST);
917 923
918 temp &= ~(MMCCTL_CMDRST | MMCCTL_DATRST);
919 udelay(10);
920 writel(temp, host->base + DAVINCI_MMCCTL); 924 writel(temp, host->base + DAVINCI_MMCCTL);
925 udelay(10);
926}
927
928static void
929davinci_abort_data(struct mmc_davinci_host *host, struct mmc_data *data)
930{
931 mmc_davinci_reset_ctrl(host, 1);
932 mmc_davinci_reset_ctrl(host, 0);
921} 933}
922 934
923static irqreturn_t mmc_davinci_irq(int irq, void *dev_id) 935static irqreturn_t mmc_davinci_irq(int irq, void *dev_id)
@@ -1121,15 +1133,8 @@ static inline void mmc_davinci_cpufreq_deregister(struct mmc_davinci_host *host)
1121#endif 1133#endif
1122static void __init init_mmcsd_host(struct mmc_davinci_host *host) 1134static void __init init_mmcsd_host(struct mmc_davinci_host *host)
1123{ 1135{
1124 /* DAT line portion is diabled and in reset state */
1125 writel(readl(host->base + DAVINCI_MMCCTL) | MMCCTL_DATRST,
1126 host->base + DAVINCI_MMCCTL);
1127
1128 /* CMD line portion is diabled and in reset state */
1129 writel(readl(host->base + DAVINCI_MMCCTL) | MMCCTL_CMDRST,
1130 host->base + DAVINCI_MMCCTL);
1131 1136
1132 udelay(10); 1137 mmc_davinci_reset_ctrl(host, 1);
1133 1138
1134 writel(0, host->base + DAVINCI_MMCCLK); 1139 writel(0, host->base + DAVINCI_MMCCLK);
1135 writel(MMCCLK_CLKEN, host->base + DAVINCI_MMCCLK); 1140 writel(MMCCLK_CLKEN, host->base + DAVINCI_MMCCLK);
@@ -1137,12 +1142,7 @@ static void __init init_mmcsd_host(struct mmc_davinci_host *host)
1137 writel(0x1FFF, host->base + DAVINCI_MMCTOR); 1142 writel(0x1FFF, host->base + DAVINCI_MMCTOR);
1138 writel(0xFFFF, host->base + DAVINCI_MMCTOD); 1143 writel(0xFFFF, host->base + DAVINCI_MMCTOD);
1139 1144
1140 writel(readl(host->base + DAVINCI_MMCCTL) & ~MMCCTL_DATRST, 1145 mmc_davinci_reset_ctrl(host, 0);
1141 host->base + DAVINCI_MMCCTL);
1142 writel(readl(host->base + DAVINCI_MMCCTL) & ~MMCCTL_CMDRST,
1143 host->base + DAVINCI_MMCCTL);
1144
1145 udelay(10);
1146} 1146}
1147 1147
1148static int __init davinci_mmcsd_probe(struct platform_device *pdev) 1148static int __init davinci_mmcsd_probe(struct platform_device *pdev)
@@ -1202,6 +1202,12 @@ static int __init davinci_mmcsd_probe(struct platform_device *pdev)
1202 1202
1203 init_mmcsd_host(host); 1203 init_mmcsd_host(host);
1204 1204
1205 if (pdata->nr_sg)
1206 host->nr_sg = pdata->nr_sg - 1;
1207
1208 if (host->nr_sg > MAX_NR_SG || !host->nr_sg)
1209 host->nr_sg = MAX_NR_SG;
1210
1205 host->use_dma = use_dma; 1211 host->use_dma = use_dma;
1206 host->irq = irq; 1212 host->irq = irq;
1207 1213
@@ -1327,32 +1333,65 @@ static int __exit davinci_mmcsd_remove(struct platform_device *pdev)
1327} 1333}
1328 1334
1329#ifdef CONFIG_PM 1335#ifdef CONFIG_PM
1330static int davinci_mmcsd_suspend(struct platform_device *pdev, pm_message_t msg) 1336static int davinci_mmcsd_suspend(struct device *dev)
1331{ 1337{
1338 struct platform_device *pdev = to_platform_device(dev);
1332 struct mmc_davinci_host *host = platform_get_drvdata(pdev); 1339 struct mmc_davinci_host *host = platform_get_drvdata(pdev);
1340 int ret;
1333 1341
1334 return mmc_suspend_host(host->mmc, msg); 1342 mmc_host_enable(host->mmc);
1343 ret = mmc_suspend_host(host->mmc);
1344 if (!ret) {
1345 writel(0, host->base + DAVINCI_MMCIM);
1346 mmc_davinci_reset_ctrl(host, 1);
1347 mmc_host_disable(host->mmc);
1348 clk_disable(host->clk);
1349 host->suspended = 1;
1350 } else {
1351 host->suspended = 0;
1352 mmc_host_disable(host->mmc);
1353 }
1354
1355 return ret;
1335} 1356}
1336 1357
1337static int davinci_mmcsd_resume(struct platform_device *pdev) 1358static int davinci_mmcsd_resume(struct device *dev)
1338{ 1359{
1360 struct platform_device *pdev = to_platform_device(dev);
1339 struct mmc_davinci_host *host = platform_get_drvdata(pdev); 1361 struct mmc_davinci_host *host = platform_get_drvdata(pdev);
1362 int ret;
1363
1364 if (!host->suspended)
1365 return 0;
1340 1366
1341 return mmc_resume_host(host->mmc); 1367 clk_enable(host->clk);
1368 mmc_host_enable(host->mmc);
1369
1370 mmc_davinci_reset_ctrl(host, 0);
1371 ret = mmc_resume_host(host->mmc);
1372 if (!ret)
1373 host->suspended = 0;
1374
1375 return ret;
1342} 1376}
1377
1378static const struct dev_pm_ops davinci_mmcsd_pm = {
1379 .suspend = davinci_mmcsd_suspend,
1380 .resume = davinci_mmcsd_resume,
1381};
1382
1383#define davinci_mmcsd_pm_ops (&davinci_mmcsd_pm)
1343#else 1384#else
1344#define davinci_mmcsd_suspend NULL 1385#define davinci_mmcsd_pm_ops NULL
1345#define davinci_mmcsd_resume NULL
1346#endif 1386#endif
1347 1387
1348static struct platform_driver davinci_mmcsd_driver = { 1388static struct platform_driver davinci_mmcsd_driver = {
1349 .driver = { 1389 .driver = {
1350 .name = "davinci_mmc", 1390 .name = "davinci_mmc",
1351 .owner = THIS_MODULE, 1391 .owner = THIS_MODULE,
1392 .pm = davinci_mmcsd_pm_ops,
1352 }, 1393 },
1353 .remove = __exit_p(davinci_mmcsd_remove), 1394 .remove = __exit_p(davinci_mmcsd_remove),
1354 .suspend = davinci_mmcsd_suspend,
1355 .resume = davinci_mmcsd_resume,
1356}; 1395};
1357 1396
1358static int __init davinci_mmcsd_init(void) 1397static int __init davinci_mmcsd_init(void)