aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mmc/host/atmel-mci.c
diff options
context:
space:
mode:
authorHaavard Skinnemoen <haavard.skinnemoen@atmel.com>2008-07-30 14:29:03 -0400
committerHaavard Skinnemoen <haavard.skinnemoen@atmel.com>2008-10-05 14:39:21 -0400
commit65e8b083fc8ec303499baa1924ae032d46d29990 (patch)
tree4c3e1b4cb6c18e2abe55e590b75e97edf4243cc7 /drivers/mmc/host/atmel-mci.c
parent965ebf33ea5afb6386f5b57cc71e6572253746b3 (diff)
atmel-mci: Add experimental DMA support
This adds support for DMA transfers through the generic DMA engine framework with the DMA slave extensions. The driver has been tested using mmc-block and ext3fs on several SD, SDHC and MMC+ cards. Reads and writes work fine, with read transfer rates up to 7.5 MiB/s on fast cards with debugging disabled. Unfortunately, the driver has been known to lock up from time to time with DMA enabled, so DMA support is currently optional and marked EXPERIMENTAL. However, I didn't see any problems while testing 13 different cards (MMC, SD and SDHC of different brands and sizes), so I suspect the "Initialize BLKR before sending data transfer command" fix that was posted earlier fixed this as well. Signed-off-by: Haavard Skinnemoen <haavard.skinnemoen@atmel.com>
Diffstat (limited to 'drivers/mmc/host/atmel-mci.c')
-rw-r--r--drivers/mmc/host/atmel-mci.c274
1 files changed, 258 insertions, 16 deletions
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
index d8ab35175a53..d45dfa259386 100644
--- a/drivers/mmc/host/atmel-mci.c
+++ b/drivers/mmc/host/atmel-mci.c
@@ -11,6 +11,8 @@
11#include <linux/clk.h> 11#include <linux/clk.h>
12#include <linux/debugfs.h> 12#include <linux/debugfs.h>
13#include <linux/device.h> 13#include <linux/device.h>
14#include <linux/dmaengine.h>
15#include <linux/dma-mapping.h>
14#include <linux/err.h> 16#include <linux/err.h>
15#include <linux/gpio.h> 17#include <linux/gpio.h>
16#include <linux/init.h> 18#include <linux/init.h>
@@ -33,6 +35,7 @@
33#include "atmel-mci-regs.h" 35#include "atmel-mci-regs.h"
34 36
35#define ATMCI_DATA_ERROR_FLAGS (MCI_DCRCE | MCI_DTOE | MCI_OVRE | MCI_UNRE) 37#define ATMCI_DATA_ERROR_FLAGS (MCI_DCRCE | MCI_DTOE | MCI_OVRE | MCI_UNRE)
38#define ATMCI_DMA_THRESHOLD 16
36 39
37enum { 40enum {
38 EVENT_CMD_COMPLETE = 0, 41 EVENT_CMD_COMPLETE = 0,
@@ -50,6 +53,14 @@ enum atmel_mci_state {
50 STATE_DATA_ERROR, 53 STATE_DATA_ERROR,
51}; 54};
52 55
56struct atmel_mci_dma {
57#ifdef CONFIG_MMC_ATMELMCI_DMA
58 struct dma_client client;
59 struct dma_chan *chan;
60 struct dma_async_tx_descriptor *data_desc;
61#endif
62};
63
53/** 64/**
54 * struct atmel_mci - MMC controller state shared between all slots 65 * struct atmel_mci - MMC controller state shared between all slots
55 * @lock: Spinlock protecting the queue and associated data. 66 * @lock: Spinlock protecting the queue and associated data.
@@ -62,6 +73,8 @@ enum atmel_mci_state {
62 * @cmd: The command currently being sent to the card, or NULL. 73 * @cmd: The command currently being sent to the card, or NULL.
63 * @data: The data currently being transferred, or NULL if no data 74 * @data: The data currently being transferred, or NULL if no data
64 * transfer is in progress. 75 * transfer is in progress.
76 * @dma: DMA client state.
77 * @data_chan: DMA channel being used for the current data transfer.
65 * @cmd_status: Snapshot of SR taken upon completion of the current 78 * @cmd_status: Snapshot of SR taken upon completion of the current
66 * command. Only valid when EVENT_CMD_COMPLETE is pending. 79 * command. Only valid when EVENT_CMD_COMPLETE is pending.
67 * @data_status: Snapshot of SR taken upon completion of the current 80 * @data_status: Snapshot of SR taken upon completion of the current
@@ -126,6 +139,9 @@ struct atmel_mci {
126 struct mmc_command *cmd; 139 struct mmc_command *cmd;
127 struct mmc_data *data; 140 struct mmc_data *data;
128 141
142 struct atmel_mci_dma dma;
143 struct dma_chan *data_chan;
144
129 u32 cmd_status; 145 u32 cmd_status;
130 u32 data_status; 146 u32 data_status;
131 u32 stop_cmdr; 147 u32 stop_cmdr;
@@ -485,6 +501,144 @@ static void send_stop_cmd(struct atmel_mci *host, struct mmc_data *data)
485 mci_writel(host, IER, MCI_CMDRDY); 501 mci_writel(host, IER, MCI_CMDRDY);
486} 502}
487 503
504#ifdef CONFIG_MMC_ATMELMCI_DMA
505static void atmci_dma_cleanup(struct atmel_mci *host)
506{
507 struct mmc_data *data = host->data;
508
509 dma_unmap_sg(&host->pdev->dev, data->sg, data->sg_len,
510 ((data->flags & MMC_DATA_WRITE)
511 ? DMA_TO_DEVICE : DMA_FROM_DEVICE));
512}
513
514static void atmci_stop_dma(struct atmel_mci *host)
515{
516 struct dma_chan *chan = host->data_chan;
517
518 if (chan) {
519 chan->device->device_terminate_all(chan);
520 atmci_dma_cleanup(host);
521 } else {
522 /* Data transfer was stopped by the interrupt handler */
523 atmci_set_pending(host, EVENT_XFER_COMPLETE);
524 mci_writel(host, IER, MCI_NOTBUSY);
525 }
526}
527
528/* This function is called by the DMA driver from tasklet context. */
529static void atmci_dma_complete(void *arg)
530{
531 struct atmel_mci *host = arg;
532 struct mmc_data *data = host->data;
533
534 dev_vdbg(&host->pdev->dev, "DMA complete\n");
535
536 atmci_dma_cleanup(host);
537
538 /*
539 * If the card was removed, data will be NULL. No point trying
540 * to send the stop command or waiting for NBUSY in this case.
541 */
542 if (data) {
543 atmci_set_pending(host, EVENT_XFER_COMPLETE);
544 tasklet_schedule(&host->tasklet);
545
546 /*
547 * Regardless of what the documentation says, we have
548 * to wait for NOTBUSY even after block read
549 * operations.
550 *
551 * When the DMA transfer is complete, the controller
552 * may still be reading the CRC from the card, i.e.
553 * the data transfer is still in progress and we
554 * haven't seen all the potential error bits yet.
555 *
556 * The interrupt handler will schedule a different
557 * tasklet to finish things up when the data transfer
558 * is completely done.
559 *
560 * We may not complete the mmc request here anyway
561 * because the mmc layer may call back and cause us to
562 * violate the "don't submit new operations from the
563 * completion callback" rule of the dma engine
564 * framework.
565 */
566 mci_writel(host, IER, MCI_NOTBUSY);
567 }
568}
569
570static int
571atmci_submit_data_dma(struct atmel_mci *host, struct mmc_data *data)
572{
573 struct dma_chan *chan;
574 struct dma_async_tx_descriptor *desc;
575 struct scatterlist *sg;
576 unsigned int i;
577 enum dma_data_direction direction;
578
579 /*
580 * We don't do DMA on "complex" transfers, i.e. with
581 * non-word-aligned buffers or lengths. Also, we don't bother
582 * with all the DMA setup overhead for short transfers.
583 */
584 if (data->blocks * data->blksz < ATMCI_DMA_THRESHOLD)
585 return -EINVAL;
586 if (data->blksz & 3)
587 return -EINVAL;
588
589 for_each_sg(data->sg, sg, data->sg_len, i) {
590 if (sg->offset & 3 || sg->length & 3)
591 return -EINVAL;
592 }
593
594 /* If we don't have a channel, we can't do DMA */
595 chan = host->dma.chan;
596 if (chan) {
597 dma_chan_get(chan);
598 host->data_chan = chan;
599 }
600
601 if (!chan)
602 return -ENODEV;
603
604 if (data->flags & MMC_DATA_READ)
605 direction = DMA_FROM_DEVICE;
606 else
607 direction = DMA_TO_DEVICE;
608
609 desc = chan->device->device_prep_slave_sg(chan,
610 data->sg, data->sg_len, direction,
611 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
612 if (!desc)
613 return -ENOMEM;
614
615 host->dma.data_desc = desc;
616 desc->callback = atmci_dma_complete;
617 desc->callback_param = host;
618 desc->tx_submit(desc);
619
620 /* Go! */
621 chan->device->device_issue_pending(chan);
622
623 return 0;
624}
625
626#else /* CONFIG_MMC_ATMELMCI_DMA */
627
628static int atmci_submit_data_dma(struct atmel_mci *host, struct mmc_data *data)
629{
630 return -ENOSYS;
631}
632
633static void atmci_stop_dma(struct atmel_mci *host)
634{
635 /* Data transfer was stopped by the interrupt handler */
636 atmci_set_pending(host, EVENT_XFER_COMPLETE);
637 mci_writel(host, IER, MCI_NOTBUSY);
638}
639
640#endif /* CONFIG_MMC_ATMELMCI_DMA */
641
488/* 642/*
489 * Returns a mask of interrupt flags to be enabled after the whole 643 * Returns a mask of interrupt flags to be enabled after the whole
490 * request has been prepared. 644 * request has been prepared.
@@ -500,24 +654,27 @@ static u32 atmci_submit_data(struct atmel_mci *host, struct mmc_data *data)
500 host->data = data; 654 host->data = data;
501 655
502 iflags = ATMCI_DATA_ERROR_FLAGS; 656 iflags = ATMCI_DATA_ERROR_FLAGS;
657 if (atmci_submit_data_dma(host, data)) {
658 host->data_chan = NULL;
503 659
504 /* 660 /*
505 * Errata: MMC data write operation with less than 12 661 * Errata: MMC data write operation with less than 12
506 * bytes is impossible. 662 * bytes is impossible.
507 * 663 *
508 * Errata: MCI Transmit Data Register (TDR) FIFO 664 * Errata: MCI Transmit Data Register (TDR) FIFO
509 * corruption when length is not multiple of 4. 665 * corruption when length is not multiple of 4.
510 */ 666 */
511 if (data->blocks * data->blksz < 12 667 if (data->blocks * data->blksz < 12
512 || (data->blocks * data->blksz) & 3) 668 || (data->blocks * data->blksz) & 3)
513 host->need_reset = true; 669 host->need_reset = true;
514 670
515 host->sg = data->sg; 671 host->sg = data->sg;
516 host->pio_offset = 0; 672 host->pio_offset = 0;
517 if (data->flags & MMC_DATA_READ) 673 if (data->flags & MMC_DATA_READ)
518 iflags |= MCI_RXRDY; 674 iflags |= MCI_RXRDY;
519 else 675 else
520 iflags |= MCI_TXRDY; 676 iflags |= MCI_TXRDY;
677 }
521 678
522 return iflags; 679 return iflags;
523} 680}
@@ -848,6 +1005,7 @@ static void atmci_command_complete(struct atmel_mci *host,
848 1005
849 if (cmd->data) { 1006 if (cmd->data) {
850 host->data = NULL; 1007 host->data = NULL;
1008 atmci_stop_dma(host);
851 mci_writel(host, IDR, MCI_NOTBUSY 1009 mci_writel(host, IDR, MCI_NOTBUSY
852 | MCI_TXRDY | MCI_RXRDY 1010 | MCI_TXRDY | MCI_RXRDY
853 | ATMCI_DATA_ERROR_FLAGS); 1011 | ATMCI_DATA_ERROR_FLAGS);
@@ -917,6 +1075,7 @@ static void atmci_detect_change(unsigned long data)
917 /* fall through */ 1075 /* fall through */
918 case STATE_SENDING_DATA: 1076 case STATE_SENDING_DATA:
919 mrq->data->error = -ENOMEDIUM; 1077 mrq->data->error = -ENOMEDIUM;
1078 atmci_stop_dma(host);
920 break; 1079 break;
921 case STATE_DATA_BUSY: 1080 case STATE_DATA_BUSY:
922 case STATE_DATA_ERROR: 1081 case STATE_DATA_ERROR:
@@ -995,6 +1154,7 @@ static void atmci_tasklet_func(unsigned long priv)
995 case STATE_SENDING_DATA: 1154 case STATE_SENDING_DATA:
996 if (atmci_test_and_clear_pending(host, 1155 if (atmci_test_and_clear_pending(host,
997 EVENT_DATA_ERROR)) { 1156 EVENT_DATA_ERROR)) {
1157 atmci_stop_dma(host);
998 if (data->stop) 1158 if (data->stop)
999 send_stop_cmd(host, data); 1159 send_stop_cmd(host, data);
1000 state = STATE_DATA_ERROR; 1160 state = STATE_DATA_ERROR;
@@ -1280,6 +1440,60 @@ static irqreturn_t atmci_detect_interrupt(int irq, void *dev_id)
1280 return IRQ_HANDLED; 1440 return IRQ_HANDLED;
1281} 1441}
1282 1442
1443#ifdef CONFIG_MMC_ATMELMCI_DMA
1444
1445static inline struct atmel_mci *
1446dma_client_to_atmel_mci(struct dma_client *client)
1447{
1448 return container_of(client, struct atmel_mci, dma.client);
1449}
1450
1451static enum dma_state_client atmci_dma_event(struct dma_client *client,
1452 struct dma_chan *chan, enum dma_state state)
1453{
1454 struct atmel_mci *host;
1455 enum dma_state_client ret = DMA_NAK;
1456
1457 host = dma_client_to_atmel_mci(client);
1458
1459 switch (state) {
1460 case DMA_RESOURCE_AVAILABLE:
1461 spin_lock_bh(&host->lock);
1462 if (!host->dma.chan) {
1463 host->dma.chan = chan;
1464 ret = DMA_ACK;
1465 }
1466 spin_unlock_bh(&host->lock);
1467
1468 if (ret == DMA_ACK)
1469 dev_info(&host->pdev->dev,
1470 "Using %s for DMA transfers\n",
1471 chan->dev.bus_id);
1472 break;
1473
1474 case DMA_RESOURCE_REMOVED:
1475 spin_lock_bh(&host->lock);
1476 if (host->dma.chan == chan) {
1477 host->dma.chan = NULL;
1478 ret = DMA_ACK;
1479 }
1480 spin_unlock_bh(&host->lock);
1481
1482 if (ret == DMA_ACK)
1483 dev_info(&host->pdev->dev,
1484 "Lost %s, falling back to PIO\n",
1485 chan->dev.bus_id);
1486 break;
1487
1488 default:
1489 break;
1490 }
1491
1492
1493 return ret;
1494}
1495#endif /* CONFIG_MMC_ATMELMCI_DMA */
1496
1283static int __init atmci_init_slot(struct atmel_mci *host, 1497static int __init atmci_init_slot(struct atmel_mci *host,
1284 struct mci_slot_pdata *slot_data, unsigned int id, 1498 struct mci_slot_pdata *slot_data, unsigned int id,
1285 u32 sdc_reg) 1499 u32 sdc_reg)
@@ -1434,6 +1648,25 @@ static int __init atmci_probe(struct platform_device *pdev)
1434 if (ret) 1648 if (ret)
1435 goto err_request_irq; 1649 goto err_request_irq;
1436 1650
1651#ifdef CONFIG_MMC_ATMELMCI_DMA
1652 if (pdata->dma_slave) {
1653 struct dma_slave *slave = pdata->dma_slave;
1654
1655 slave->tx_reg = regs->start + MCI_TDR;
1656 slave->rx_reg = regs->start + MCI_RDR;
1657
1658 /* Try to grab a DMA channel */
1659 host->dma.client.event_callback = atmci_dma_event;
1660 dma_cap_set(DMA_SLAVE, host->dma.client.cap_mask);
1661 host->dma.client.slave = slave;
1662
1663 dma_async_client_register(&host->dma.client);
1664 dma_async_client_chan_request(&host->dma.client);
1665 } else {
1666 dev_notice(&pdev->dev, "DMA not available, using PIO\n");
1667 }
1668#endif /* CONFIG_MMC_ATMELMCI_DMA */
1669
1437 platform_set_drvdata(pdev, host); 1670 platform_set_drvdata(pdev, host);
1438 1671
1439 /* We need at least one slot to succeed */ 1672 /* We need at least one slot to succeed */
@@ -1462,6 +1695,10 @@ static int __init atmci_probe(struct platform_device *pdev)
1462 return 0; 1695 return 0;
1463 1696
1464err_init_slot: 1697err_init_slot:
1698#ifdef CONFIG_MMC_ATMELMCI_DMA
1699 if (pdata->dma_slave)
1700 dma_async_client_unregister(&host->dma.client);
1701#endif
1465 free_irq(irq, host); 1702 free_irq(irq, host);
1466err_request_irq: 1703err_request_irq:
1467 iounmap(host->regs); 1704 iounmap(host->regs);
@@ -1490,6 +1727,11 @@ static int __exit atmci_remove(struct platform_device *pdev)
1490 mci_readl(host, SR); 1727 mci_readl(host, SR);
1491 clk_disable(host->mck); 1728 clk_disable(host->mck);
1492 1729
1730#ifdef CONFIG_MMC_ATMELMCI_DMA
1731 if (host->dma.client.slave)
1732 dma_async_client_unregister(&host->dma.client);
1733#endif
1734
1493 free_irq(platform_get_irq(pdev, 0), host); 1735 free_irq(platform_get_irq(pdev, 0), host);
1494 iounmap(host->regs); 1736 iounmap(host->regs);
1495 1737