aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mmc/host/mmci.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/mmc/host/mmci.c')
-rw-r--r--drivers/mmc/host/mmci.c348
1 files changed, 303 insertions, 45 deletions
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index 2d6de3e03e2d..5bbb87d10251 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -2,7 +2,7 @@
2 * linux/drivers/mmc/host/mmci.c - ARM PrimeCell MMCI PL180/1 driver 2 * linux/drivers/mmc/host/mmci.c - ARM PrimeCell MMCI PL180/1 driver
3 * 3 *
4 * Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved. 4 * Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved.
5 * Copyright (C) 2010 ST-Ericsson AB. 5 * Copyright (C) 2010 ST-Ericsson SA
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as 8 * it under the terms of the GNU General Public License version 2 as
@@ -25,8 +25,10 @@
25#include <linux/clk.h> 25#include <linux/clk.h>
26#include <linux/scatterlist.h> 26#include <linux/scatterlist.h>
27#include <linux/gpio.h> 27#include <linux/gpio.h>
28#include <linux/amba/mmci.h>
29#include <linux/regulator/consumer.h> 28#include <linux/regulator/consumer.h>
29#include <linux/dmaengine.h>
30#include <linux/dma-mapping.h>
31#include <linux/amba/mmci.h>
30 32
31#include <asm/div64.h> 33#include <asm/div64.h>
32#include <asm/io.h> 34#include <asm/io.h>
@@ -142,9 +144,6 @@ mmci_request_end(struct mmci_host *host, struct mmc_request *mrq)
142 host->mrq = NULL; 144 host->mrq = NULL;
143 host->cmd = NULL; 145 host->cmd = NULL;
144 146
145 if (mrq->data)
146 mrq->data->bytes_xfered = host->data_xfered;
147
148 /* 147 /*
149 * Need to drop the host lock here; mmc_request_done may call 148 * Need to drop the host lock here; mmc_request_done may call
150 * back into the driver... 149 * back into the driver...
@@ -189,6 +188,248 @@ static void mmci_init_sg(struct mmci_host *host, struct mmc_data *data)
189 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags); 188 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
190} 189}
191 190
191/*
192 * All the DMA operation mode stuff goes inside this ifdef.
193 * This assumes that you have a generic DMA device interface,
194 * no custom DMA interfaces are supported.
195 */
196#ifdef CONFIG_DMA_ENGINE
197static void __devinit mmci_dma_setup(struct mmci_host *host)
198{
199 struct mmci_platform_data *plat = host->plat;
200 const char *rxname, *txname;
201 dma_cap_mask_t mask;
202
203 if (!plat || !plat->dma_filter) {
204 dev_info(mmc_dev(host->mmc), "no DMA platform data\n");
205 return;
206 }
207
208 /* Try to acquire a generic DMA engine slave channel */
209 dma_cap_zero(mask);
210 dma_cap_set(DMA_SLAVE, mask);
211
212 /*
213 * If only an RX channel is specified, the driver will
214 * attempt to use it bidirectionally, however if it is
215 * is specified but cannot be located, DMA will be disabled.
216 */
217 if (plat->dma_rx_param) {
218 host->dma_rx_channel = dma_request_channel(mask,
219 plat->dma_filter,
220 plat->dma_rx_param);
221 /* E.g if no DMA hardware is present */
222 if (!host->dma_rx_channel)
223 dev_err(mmc_dev(host->mmc), "no RX DMA channel\n");
224 }
225
226 if (plat->dma_tx_param) {
227 host->dma_tx_channel = dma_request_channel(mask,
228 plat->dma_filter,
229 plat->dma_tx_param);
230 if (!host->dma_tx_channel)
231 dev_warn(mmc_dev(host->mmc), "no TX DMA channel\n");
232 } else {
233 host->dma_tx_channel = host->dma_rx_channel;
234 }
235
236 if (host->dma_rx_channel)
237 rxname = dma_chan_name(host->dma_rx_channel);
238 else
239 rxname = "none";
240
241 if (host->dma_tx_channel)
242 txname = dma_chan_name(host->dma_tx_channel);
243 else
244 txname = "none";
245
246 dev_info(mmc_dev(host->mmc), "DMA channels RX %s, TX %s\n",
247 rxname, txname);
248
249 /*
250 * Limit the maximum segment size in any SG entry according to
251 * the parameters of the DMA engine device.
252 */
253 if (host->dma_tx_channel) {
254 struct device *dev = host->dma_tx_channel->device->dev;
255 unsigned int max_seg_size = dma_get_max_seg_size(dev);
256
257 if (max_seg_size < host->mmc->max_seg_size)
258 host->mmc->max_seg_size = max_seg_size;
259 }
260 if (host->dma_rx_channel) {
261 struct device *dev = host->dma_rx_channel->device->dev;
262 unsigned int max_seg_size = dma_get_max_seg_size(dev);
263
264 if (max_seg_size < host->mmc->max_seg_size)
265 host->mmc->max_seg_size = max_seg_size;
266 }
267}
268
269/*
270 * This is used in __devinit or __devexit so inline it
271 * so it can be discarded.
272 */
273static inline void mmci_dma_release(struct mmci_host *host)
274{
275 struct mmci_platform_data *plat = host->plat;
276
277 if (host->dma_rx_channel)
278 dma_release_channel(host->dma_rx_channel);
279 if (host->dma_tx_channel && plat->dma_tx_param)
280 dma_release_channel(host->dma_tx_channel);
281 host->dma_rx_channel = host->dma_tx_channel = NULL;
282}
283
284static void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data)
285{
286 struct dma_chan *chan = host->dma_current;
287 enum dma_data_direction dir;
288 u32 status;
289 int i;
290
291 /* Wait up to 1ms for the DMA to complete */
292 for (i = 0; ; i++) {
293 status = readl(host->base + MMCISTATUS);
294 if (!(status & MCI_RXDATAAVLBLMASK) || i >= 100)
295 break;
296 udelay(10);
297 }
298
299 /*
300 * Check to see whether we still have some data left in the FIFO -
301 * this catches DMA controllers which are unable to monitor the
302 * DMALBREQ and DMALSREQ signals while allowing us to DMA to non-
303 * contiguous buffers. On TX, we'll get a FIFO underrun error.
304 */
305 if (status & MCI_RXDATAAVLBLMASK) {
306 dmaengine_terminate_all(chan);
307 if (!data->error)
308 data->error = -EIO;
309 }
310
311 if (data->flags & MMC_DATA_WRITE) {
312 dir = DMA_TO_DEVICE;
313 } else {
314 dir = DMA_FROM_DEVICE;
315 }
316
317 dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, dir);
318
319 /*
320 * Use of DMA with scatter-gather is impossible.
321 * Give up with DMA and switch back to PIO mode.
322 */
323 if (status & MCI_RXDATAAVLBLMASK) {
324 dev_err(mmc_dev(host->mmc), "buggy DMA detected. Taking evasive action.\n");
325 mmci_dma_release(host);
326 }
327}
328
329static void mmci_dma_data_error(struct mmci_host *host)
330{
331 dev_err(mmc_dev(host->mmc), "error during DMA transfer!\n");
332 dmaengine_terminate_all(host->dma_current);
333}
334
335static int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl)
336{
337 struct variant_data *variant = host->variant;
338 struct dma_slave_config conf = {
339 .src_addr = host->phybase + MMCIFIFO,
340 .dst_addr = host->phybase + MMCIFIFO,
341 .src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
342 .dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
343 .src_maxburst = variant->fifohalfsize >> 2, /* # of words */
344 .dst_maxburst = variant->fifohalfsize >> 2, /* # of words */
345 };
346 struct mmc_data *data = host->data;
347 struct dma_chan *chan;
348 struct dma_device *device;
349 struct dma_async_tx_descriptor *desc;
350 int nr_sg;
351
352 host->dma_current = NULL;
353
354 if (data->flags & MMC_DATA_READ) {
355 conf.direction = DMA_FROM_DEVICE;
356 chan = host->dma_rx_channel;
357 } else {
358 conf.direction = DMA_TO_DEVICE;
359 chan = host->dma_tx_channel;
360 }
361
362 /* If there's no DMA channel, fall back to PIO */
363 if (!chan)
364 return -EINVAL;
365
366 /* If less than or equal to the fifo size, don't bother with DMA */
367 if (host->size <= variant->fifosize)
368 return -EINVAL;
369
370 device = chan->device;
371 nr_sg = dma_map_sg(device->dev, data->sg, data->sg_len, conf.direction);
372 if (nr_sg == 0)
373 return -EINVAL;
374
375 dmaengine_slave_config(chan, &conf);
376 desc = device->device_prep_slave_sg(chan, data->sg, nr_sg,
377 conf.direction, DMA_CTRL_ACK);
378 if (!desc)
379 goto unmap_exit;
380
381 /* Okay, go for it. */
382 host->dma_current = chan;
383
384 dev_vdbg(mmc_dev(host->mmc),
385 "Submit MMCI DMA job, sglen %d blksz %04x blks %04x flags %08x\n",
386 data->sg_len, data->blksz, data->blocks, data->flags);
387 dmaengine_submit(desc);
388 dma_async_issue_pending(chan);
389
390 datactrl |= MCI_DPSM_DMAENABLE;
391
392 /* Trigger the DMA transfer */
393 writel(datactrl, host->base + MMCIDATACTRL);
394
395 /*
396 * Let the MMCI say when the data is ended and it's time
397 * to fire next DMA request. When that happens, MMCI will
398 * call mmci_data_end()
399 */
400 writel(readl(host->base + MMCIMASK0) | MCI_DATAENDMASK,
401 host->base + MMCIMASK0);
402 return 0;
403
404unmap_exit:
405 dmaengine_terminate_all(chan);
406 dma_unmap_sg(device->dev, data->sg, data->sg_len, conf.direction);
407 return -ENOMEM;
408}
409#else
410/* Blank functions if the DMA engine is not available */
411static inline void mmci_dma_setup(struct mmci_host *host)
412{
413}
414
415static inline void mmci_dma_release(struct mmci_host *host)
416{
417}
418
419static inline void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data)
420{
421}
422
423static inline void mmci_dma_data_error(struct mmci_host *host)
424{
425}
426
427static inline int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl)
428{
429 return -ENOSYS;
430}
431#endif
432
192static void mmci_start_data(struct mmci_host *host, struct mmc_data *data) 433static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)
193{ 434{
194 struct variant_data *variant = host->variant; 435 struct variant_data *variant = host->variant;
@@ -202,9 +443,7 @@ static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)
202 443
203 host->data = data; 444 host->data = data;
204 host->size = data->blksz * data->blocks; 445 host->size = data->blksz * data->blocks;
205 host->data_xfered = 0; 446 data->bytes_xfered = 0;
206
207 mmci_init_sg(host, data);
208 447
209 clks = (unsigned long long)data->timeout_ns * host->cclk; 448 clks = (unsigned long long)data->timeout_ns * host->cclk;
210 do_div(clks, 1000000000UL); 449 do_div(clks, 1000000000UL);
@@ -219,15 +458,29 @@ static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)
219 BUG_ON(1 << blksz_bits != data->blksz); 458 BUG_ON(1 << blksz_bits != data->blksz);
220 459
221 datactrl = MCI_DPSM_ENABLE | blksz_bits << 4; 460 datactrl = MCI_DPSM_ENABLE | blksz_bits << 4;
222 if (data->flags & MMC_DATA_READ) { 461
462 if (data->flags & MMC_DATA_READ)
223 datactrl |= MCI_DPSM_DIRECTION; 463 datactrl |= MCI_DPSM_DIRECTION;
464
465 /*
466 * Attempt to use DMA operation mode, if this
467 * should fail, fall back to PIO mode
468 */
469 if (!mmci_dma_start_data(host, datactrl))
470 return;
471
472 /* IRQ mode, map the SG list for CPU reading/writing */
473 mmci_init_sg(host, data);
474
475 if (data->flags & MMC_DATA_READ) {
224 irqmask = MCI_RXFIFOHALFFULLMASK; 476 irqmask = MCI_RXFIFOHALFFULLMASK;
225 477
226 /* 478 /*
227 * If we have less than a FIFOSIZE of bytes to transfer, 479 * If we have less than the fifo 'half-full' threshold to
228 * trigger a PIO interrupt as soon as any data is available. 480 * transfer, trigger a PIO interrupt as soon as any data
481 * is available.
229 */ 482 */
230 if (host->size < variant->fifosize) 483 if (host->size < variant->fifohalfsize)
231 irqmask |= MCI_RXDATAAVLBLMASK; 484 irqmask |= MCI_RXDATAAVLBLMASK;
232 } else { 485 } else {
233 /* 486 /*
@@ -283,49 +536,51 @@ mmci_data_irq(struct mmci_host *host, struct mmc_data *data,
283 if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_TXUNDERRUN|MCI_RXOVERRUN)) { 536 if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_TXUNDERRUN|MCI_RXOVERRUN)) {
284 u32 remain, success; 537 u32 remain, success;
285 538
286 /* Calculate how far we are into the transfer */ 539 /* Terminate the DMA transfer */
540 if (dma_inprogress(host))
541 mmci_dma_data_error(host);
542
543 /*
544 * Calculate how far we are into the transfer. Note that
545 * the data counter gives the number of bytes transferred
546 * on the MMC bus, not on the host side. On reads, this
547 * can be as much as a FIFO-worth of data ahead. This
548 * matters for FIFO overruns only.
549 */
287 remain = readl(host->base + MMCIDATACNT); 550 remain = readl(host->base + MMCIDATACNT);
288 success = data->blksz * data->blocks - remain; 551 success = data->blksz * data->blocks - remain;
289 552
290 dev_dbg(mmc_dev(host->mmc), "MCI ERROR IRQ (status %08x)\n", status); 553 dev_dbg(mmc_dev(host->mmc), "MCI ERROR IRQ, status 0x%08x at 0x%08x\n",
554 status, success);
291 if (status & MCI_DATACRCFAIL) { 555 if (status & MCI_DATACRCFAIL) {
292 /* Last block was not successful */ 556 /* Last block was not successful */
293 host->data_xfered = round_down(success - 1, data->blksz); 557 success -= 1;
294 data->error = -EILSEQ; 558 data->error = -EILSEQ;
295 } else if (status & MCI_DATATIMEOUT) { 559 } else if (status & MCI_DATATIMEOUT) {
296 host->data_xfered = round_down(success, data->blksz);
297 data->error = -ETIMEDOUT; 560 data->error = -ETIMEDOUT;
298 } else if (status & (MCI_TXUNDERRUN|MCI_RXOVERRUN)) { 561 } else if (status & MCI_TXUNDERRUN) {
299 host->data_xfered = round_down(success, data->blksz); 562 data->error = -EIO;
563 } else if (status & MCI_RXOVERRUN) {
564 if (success > host->variant->fifosize)
565 success -= host->variant->fifosize;
566 else
567 success = 0;
300 data->error = -EIO; 568 data->error = -EIO;
301 } 569 }
302 570 data->bytes_xfered = round_down(success, data->blksz);
303 /*
304 * We hit an error condition. Ensure that any data
305 * partially written to a page is properly coherent.
306 */
307 if (data->flags & MMC_DATA_READ) {
308 struct sg_mapping_iter *sg_miter = &host->sg_miter;
309 unsigned long flags;
310
311 local_irq_save(flags);
312 if (sg_miter_next(sg_miter)) {
313 flush_dcache_page(sg_miter->page);
314 sg_miter_stop(sg_miter);
315 }
316 local_irq_restore(flags);
317 }
318 } 571 }
319 572
320 if (status & MCI_DATABLOCKEND) 573 if (status & MCI_DATABLOCKEND)
321 dev_err(mmc_dev(host->mmc), "stray MCI_DATABLOCKEND interrupt\n"); 574 dev_err(mmc_dev(host->mmc), "stray MCI_DATABLOCKEND interrupt\n");
322 575
323 if (status & MCI_DATAEND || data->error) { 576 if (status & MCI_DATAEND || data->error) {
577 if (dma_inprogress(host))
578 mmci_dma_unmap(host, data);
324 mmci_stop_data(host); 579 mmci_stop_data(host);
325 580
326 if (!data->error) 581 if (!data->error)
327 /* The error clause is handled above, success! */ 582 /* The error clause is handled above, success! */
328 host->data_xfered += data->blksz * data->blocks; 583 data->bytes_xfered = data->blksz * data->blocks;
329 584
330 if (!data->stop) { 585 if (!data->stop) {
331 mmci_request_end(host, data->mrq); 586 mmci_request_end(host, data->mrq);
@@ -498,9 +753,6 @@ static irqreturn_t mmci_pio_irq(int irq, void *dev_id)
498 if (remain) 753 if (remain)
499 break; 754 break;
500 755
501 if (status & MCI_RXACTIVE)
502 flush_dcache_page(sg_miter->page);
503
504 status = readl(base + MMCISTATUS); 756 status = readl(base + MMCISTATUS);
505 } while (1); 757 } while (1);
506 758
@@ -509,10 +761,10 @@ static irqreturn_t mmci_pio_irq(int irq, void *dev_id)
509 local_irq_restore(flags); 761 local_irq_restore(flags);
510 762
511 /* 763 /*
512 * If we're nearing the end of the read, switch to 764 * If we have less than the fifo 'half-full' threshold to transfer,
513 * "any data available" mode. 765 * trigger a PIO interrupt as soon as any data is available.
514 */ 766 */
515 if (status & MCI_RXACTIVE && host->size < variant->fifosize) 767 if (status & MCI_RXACTIVE && host->size < variant->fifohalfsize)
516 mmci_set_mask1(host, MCI_RXDATAAVLBLMASK); 768 mmci_set_mask1(host, MCI_RXDATAAVLBLMASK);
517 769
518 /* 770 /*
@@ -713,7 +965,8 @@ static const struct mmc_host_ops mmci_ops = {
713 .get_cd = mmci_get_cd, 965 .get_cd = mmci_get_cd,
714}; 966};
715 967
716static int __devinit mmci_probe(struct amba_device *dev, struct amba_id *id) 968static int __devinit mmci_probe(struct amba_device *dev,
969 const struct amba_id *id)
717{ 970{
718 struct mmci_platform_data *plat = dev->dev.platform_data; 971 struct mmci_platform_data *plat = dev->dev.platform_data;
719 struct variant_data *variant = id->data; 972 struct variant_data *variant = id->data;
@@ -776,6 +1029,7 @@ static int __devinit mmci_probe(struct amba_device *dev, struct amba_id *id)
776 dev_dbg(mmc_dev(mmc), "eventual mclk rate: %u Hz\n", 1029 dev_dbg(mmc_dev(mmc), "eventual mclk rate: %u Hz\n",
777 host->mclk); 1030 host->mclk);
778 } 1031 }
1032 host->phybase = dev->res.start;
779 host->base = ioremap(dev->res.start, resource_size(&dev->res)); 1033 host->base = ioremap(dev->res.start, resource_size(&dev->res));
780 if (!host->base) { 1034 if (!host->base) {
781 ret = -ENOMEM; 1035 ret = -ENOMEM;
@@ -903,9 +1157,12 @@ static int __devinit mmci_probe(struct amba_device *dev, struct amba_id *id)
903 1157
904 amba_set_drvdata(dev, mmc); 1158 amba_set_drvdata(dev, mmc);
905 1159
906 dev_info(&dev->dev, "%s: PL%03x rev%u at 0x%08llx irq %d,%d\n", 1160 dev_info(&dev->dev, "%s: PL%03x manf %x rev%u at 0x%08llx irq %d,%d (pio)\n",
907 mmc_hostname(mmc), amba_part(dev), amba_rev(dev), 1161 mmc_hostname(mmc), amba_part(dev), amba_manf(dev),
908 (unsigned long long)dev->res.start, dev->irq[0], dev->irq[1]); 1162 amba_rev(dev), (unsigned long long)dev->res.start,
1163 dev->irq[0], dev->irq[1]);
1164
1165 mmci_dma_setup(host);
909 1166
910 mmc_add_host(mmc); 1167 mmc_add_host(mmc);
911 1168
@@ -952,6 +1209,7 @@ static int __devexit mmci_remove(struct amba_device *dev)
952 writel(0, host->base + MMCICOMMAND); 1209 writel(0, host->base + MMCICOMMAND);
953 writel(0, host->base + MMCIDATACTRL); 1210 writel(0, host->base + MMCIDATACTRL);
954 1211
1212 mmci_dma_release(host);
955 free_irq(dev->irq[0], host); 1213 free_irq(dev->irq[0], host);
956 if (!host->singleirq) 1214 if (!host->singleirq)
957 free_irq(dev->irq[1], host); 1215 free_irq(dev->irq[1], host);