aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mmc/host/atmel-mci.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/mmc/host/atmel-mci.c')
-rw-r--r--drivers/mmc/host/atmel-mci.c527
1 files changed, 409 insertions, 118 deletions
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
index 87eeccc6d9a4..57596782061b 100644
--- a/drivers/mmc/host/atmel-mci.c
+++ b/drivers/mmc/host/atmel-mci.c
@@ -30,6 +30,7 @@
30 30
31#include <mach/atmel-mci.h> 31#include <mach/atmel-mci.h>
32#include <linux/atmel-mci.h> 32#include <linux/atmel-mci.h>
33#include <linux/atmel_pdc.h>
33 34
34#include <asm/io.h> 35#include <asm/io.h>
35#include <asm/unaligned.h> 36#include <asm/unaligned.h>
@@ -58,18 +59,35 @@ enum atmel_mci_state {
58 STATE_DATA_ERROR, 59 STATE_DATA_ERROR,
59}; 60};
60 61
62enum atmci_xfer_dir {
63 XFER_RECEIVE = 0,
64 XFER_TRANSMIT,
65};
66
67enum atmci_pdc_buf {
68 PDC_FIRST_BUF = 0,
69 PDC_SECOND_BUF,
70};
71
72struct atmel_mci_caps {
73 bool has_dma;
74 bool has_pdc;
75 bool has_cfg_reg;
76 bool has_cstor_reg;
77 bool has_highspeed;
78 bool has_rwproof;
79};
80
61struct atmel_mci_dma { 81struct atmel_mci_dma {
62#ifdef CONFIG_MMC_ATMELMCI_DMA
63 struct dma_chan *chan; 82 struct dma_chan *chan;
64 struct dma_async_tx_descriptor *data_desc; 83 struct dma_async_tx_descriptor *data_desc;
65#endif
66}; 84};
67 85
68/** 86/**
69 * struct atmel_mci - MMC controller state shared between all slots 87 * struct atmel_mci - MMC controller state shared between all slots
70 * @lock: Spinlock protecting the queue and associated data. 88 * @lock: Spinlock protecting the queue and associated data.
71 * @regs: Pointer to MMIO registers. 89 * @regs: Pointer to MMIO registers.
72 * @sg: Scatterlist entry currently being processed by PIO code, if any. 90 * @sg: Scatterlist entry currently being processed by PIO or PDC code.
73 * @pio_offset: Offset into the current scatterlist entry. 91 * @pio_offset: Offset into the current scatterlist entry.
74 * @cur_slot: The slot which is currently using the controller. 92 * @cur_slot: The slot which is currently using the controller.
75 * @mrq: The request currently being processed on @cur_slot, 93 * @mrq: The request currently being processed on @cur_slot,
@@ -77,6 +95,7 @@ struct atmel_mci_dma {
77 * @cmd: The command currently being sent to the card, or NULL. 95 * @cmd: The command currently being sent to the card, or NULL.
78 * @data: The data currently being transferred, or NULL if no data 96 * @data: The data currently being transferred, or NULL if no data
79 * transfer is in progress. 97 * transfer is in progress.
98 * @data_size: just data->blocks * data->blksz.
80 * @dma: DMA client state. 99 * @dma: DMA client state.
81 * @data_chan: DMA channel being used for the current data transfer. 100 * @data_chan: DMA channel being used for the current data transfer.
82 * @cmd_status: Snapshot of SR taken upon completion of the current 101 * @cmd_status: Snapshot of SR taken upon completion of the current
@@ -103,6 +122,13 @@ struct atmel_mci_dma {
103 * @mck: The peripheral bus clock hooked up to the MMC controller. 122 * @mck: The peripheral bus clock hooked up to the MMC controller.
104 * @pdev: Platform device associated with the MMC controller. 123 * @pdev: Platform device associated with the MMC controller.
105 * @slot: Slots sharing this MMC controller. 124 * @slot: Slots sharing this MMC controller.
125 * @caps: MCI capabilities depending on MCI version.
126 * @prepare_data: function to setup MCI before data transfer which
127 * depends on MCI capabilities.
128 * @submit_data: function to start data transfer which depends on MCI
129 * capabilities.
130 * @stop_transfer: function to stop data transfer which depends on MCI
131 * capabilities.
106 * 132 *
107 * Locking 133 * Locking
108 * ======= 134 * =======
@@ -143,6 +169,7 @@ struct atmel_mci {
143 struct mmc_request *mrq; 169 struct mmc_request *mrq;
144 struct mmc_command *cmd; 170 struct mmc_command *cmd;
145 struct mmc_data *data; 171 struct mmc_data *data;
172 unsigned int data_size;
146 173
147 struct atmel_mci_dma dma; 174 struct atmel_mci_dma dma;
148 struct dma_chan *data_chan; 175 struct dma_chan *data_chan;
@@ -167,6 +194,12 @@ struct atmel_mci {
167 struct platform_device *pdev; 194 struct platform_device *pdev;
168 195
169 struct atmel_mci_slot *slot[ATMCI_MAX_NR_SLOTS]; 196 struct atmel_mci_slot *slot[ATMCI_MAX_NR_SLOTS];
197
198 struct atmel_mci_caps caps;
199
200 u32 (*prepare_data)(struct atmel_mci *host, struct mmc_data *data);
201 void (*submit_data)(struct atmel_mci *host, struct mmc_data *data);
202 void (*stop_transfer)(struct atmel_mci *host);
170}; 203};
171 204
172/** 205/**
@@ -220,31 +253,6 @@ struct atmel_mci_slot {
220 set_bit(event, &host->pending_events) 253 set_bit(event, &host->pending_events)
221 254
222/* 255/*
223 * Enable or disable features/registers based on
224 * whether the processor supports them
225 */
226static bool atmci_has_rwproof(void)
227{
228 if (cpu_is_at91sam9261() || cpu_is_at91rm9200())
229 return false;
230 else
231 return true;
232}
233
234/*
235 * The new MCI2 module isn't 100% compatible with the old MCI module,
236 * and it has a few nice features which we want to use...
237 */
238static inline bool atmci_is_mci2(void)
239{
240 if (cpu_is_at91sam9g45())
241 return true;
242
243 return false;
244}
245
246
247/*
248 * The debugfs stuff below is mostly optimized away when 256 * The debugfs stuff below is mostly optimized away when
249 * CONFIG_DEBUG_FS is not set. 257 * CONFIG_DEBUG_FS is not set.
250 */ 258 */
@@ -379,7 +387,7 @@ static int atmci_regs_show(struct seq_file *s, void *v)
379 buf[ATMCI_BLKR / 4], 387 buf[ATMCI_BLKR / 4],
380 buf[ATMCI_BLKR / 4] & 0xffff, 388 buf[ATMCI_BLKR / 4] & 0xffff,
381 (buf[ATMCI_BLKR / 4] >> 16) & 0xffff); 389 (buf[ATMCI_BLKR / 4] >> 16) & 0xffff);
382 if (atmci_is_mci2()) 390 if (host->caps.has_cstor_reg)
383 seq_printf(s, "CSTOR:\t0x%08x\n", buf[ATMCI_CSTOR / 4]); 391 seq_printf(s, "CSTOR:\t0x%08x\n", buf[ATMCI_CSTOR / 4]);
384 392
385 /* Don't read RSPR and RDR; it will consume the data there */ 393 /* Don't read RSPR and RDR; it will consume the data there */
@@ -387,7 +395,7 @@ static int atmci_regs_show(struct seq_file *s, void *v)
387 atmci_show_status_reg(s, "SR", buf[ATMCI_SR / 4]); 395 atmci_show_status_reg(s, "SR", buf[ATMCI_SR / 4]);
388 atmci_show_status_reg(s, "IMR", buf[ATMCI_IMR / 4]); 396 atmci_show_status_reg(s, "IMR", buf[ATMCI_IMR / 4]);
389 397
390 if (atmci_is_mci2()) { 398 if (host->caps.has_dma) {
391 u32 val; 399 u32 val;
392 400
393 val = buf[ATMCI_DMA / 4]; 401 val = buf[ATMCI_DMA / 4];
@@ -396,6 +404,9 @@ static int atmci_regs_show(struct seq_file *s, void *v)
396 ((val >> 4) & 3) ? 404 ((val >> 4) & 3) ?
397 1 << (((val >> 4) & 3) + 1) : 1, 405 1 << (((val >> 4) & 3) + 1) : 1,
398 val & ATMCI_DMAEN ? " DMAEN" : ""); 406 val & ATMCI_DMAEN ? " DMAEN" : "");
407 }
408 if (host->caps.has_cfg_reg) {
409 u32 val;
399 410
400 val = buf[ATMCI_CFG / 4]; 411 val = buf[ATMCI_CFG / 4];
401 seq_printf(s, "CFG:\t0x%08x%s%s%s%s\n", 412 seq_printf(s, "CFG:\t0x%08x%s%s%s%s\n",
@@ -574,33 +585,109 @@ static void atmci_send_stop_cmd(struct atmel_mci *host, struct mmc_data *data)
574 atmci_writel(host, ATMCI_IER, ATMCI_CMDRDY); 585 atmci_writel(host, ATMCI_IER, ATMCI_CMDRDY);
575} 586}
576 587
577#ifdef CONFIG_MMC_ATMELMCI_DMA 588/*
578static void atmci_dma_cleanup(struct atmel_mci *host) 589 * Configure given PDC buffer taking care of alignement issues.
590 * Update host->data_size and host->sg.
591 */
592static void atmci_pdc_set_single_buf(struct atmel_mci *host,
593 enum atmci_xfer_dir dir, enum atmci_pdc_buf buf_nb)
579{ 594{
580 struct mmc_data *data = host->data; 595 u32 pointer_reg, counter_reg;
596
597 if (dir == XFER_RECEIVE) {
598 pointer_reg = ATMEL_PDC_RPR;
599 counter_reg = ATMEL_PDC_RCR;
600 } else {
601 pointer_reg = ATMEL_PDC_TPR;
602 counter_reg = ATMEL_PDC_TCR;
603 }
604
605 if (buf_nb == PDC_SECOND_BUF) {
606 pointer_reg += 0x10;
607 counter_reg += 0x10;
608 }
609
610 atmci_writel(host, pointer_reg, sg_dma_address(host->sg));
611 if (host->data_size <= PAGE_SIZE) {
612 if (host->data_size & 0x3) {
613 /* If size is different from modulo 4, transfer bytes */
614 atmci_writel(host, counter_reg, host->data_size);
615 atmci_writel(host, ATMCI_MR, host->mode_reg | ATMCI_MR_PDCFBYTE);
616 } else {
617 /* Else transfer 32-bits words */
618 atmci_writel(host, counter_reg, host->data_size / 4);
619 }
620 host->data_size = 0;
621 } else {
622 /* We assume the size of a page is 32-bits aligned */
623 atmci_writel(host, counter_reg, PAGE_SIZE / 4);
624 host->data_size -= PAGE_SIZE;
625 if (host->data_size)
626 host->sg = sg_next(host->sg);
627 }
628}
629
630/*
631 * Configure PDC buffer according to the data size ie configuring one or two
632 * buffers. Don't use this function if you want to configure only the second
633 * buffer. In this case, use atmci_pdc_set_single_buf.
634 */
635static void atmci_pdc_set_both_buf(struct atmel_mci *host, int dir)
636{
637 atmci_pdc_set_single_buf(host, dir, PDC_FIRST_BUF);
638 if (host->data_size)
639 atmci_pdc_set_single_buf(host, dir, PDC_SECOND_BUF);
640}
641
642/*
643 * Unmap sg lists, called when transfer is finished.
644 */
645static void atmci_pdc_cleanup(struct atmel_mci *host)
646{
647 struct mmc_data *data = host->data;
581 648
582 if (data) 649 if (data)
583 dma_unmap_sg(host->dma.chan->device->dev, 650 dma_unmap_sg(&host->pdev->dev,
584 data->sg, data->sg_len, 651 data->sg, data->sg_len,
585 ((data->flags & MMC_DATA_WRITE) 652 ((data->flags & MMC_DATA_WRITE)
586 ? DMA_TO_DEVICE : DMA_FROM_DEVICE)); 653 ? DMA_TO_DEVICE : DMA_FROM_DEVICE));
587} 654}
588 655
589static void atmci_stop_dma(struct atmel_mci *host) 656/*
657 * Disable PDC transfers. Update pending flags to EVENT_XFER_COMPLETE after
658 * having received ATMCI_TXBUFE or ATMCI_RXBUFF interrupt. Enable ATMCI_NOTBUSY
659 * interrupt needed for both transfer directions.
660 */
661static void atmci_pdc_complete(struct atmel_mci *host)
590{ 662{
591 struct dma_chan *chan = host->data_chan; 663 atmci_writel(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS | ATMEL_PDC_TXTDIS);
664 atmci_pdc_cleanup(host);
592 665
593 if (chan) { 666 /*
594 dmaengine_terminate_all(chan); 667 * If the card was removed, data will be NULL. No point trying
595 atmci_dma_cleanup(host); 668 * to send the stop command or waiting for NBUSY in this case.
596 } else { 669 */
597 /* Data transfer was stopped by the interrupt handler */ 670 if (host->data) {
598 atmci_set_pending(host, EVENT_XFER_COMPLETE); 671 atmci_set_pending(host, EVENT_XFER_COMPLETE);
672 tasklet_schedule(&host->tasklet);
599 atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY); 673 atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
600 } 674 }
601} 675}
602 676
603/* This function is called by the DMA driver from tasklet context. */ 677static void atmci_dma_cleanup(struct atmel_mci *host)
678{
679 struct mmc_data *data = host->data;
680
681 if (data)
682 dma_unmap_sg(host->dma.chan->device->dev,
683 data->sg, data->sg_len,
684 ((data->flags & MMC_DATA_WRITE)
685 ? DMA_TO_DEVICE : DMA_FROM_DEVICE));
686}
687
688/*
689 * This function is called by the DMA driver from tasklet context.
690 */
604static void atmci_dma_complete(void *arg) 691static void atmci_dma_complete(void *arg)
605{ 692{
606 struct atmel_mci *host = arg; 693 struct atmel_mci *host = arg;
@@ -608,7 +695,7 @@ static void atmci_dma_complete(void *arg)
608 695
609 dev_vdbg(&host->pdev->dev, "DMA complete\n"); 696 dev_vdbg(&host->pdev->dev, "DMA complete\n");
610 697
611 if (atmci_is_mci2()) 698 if (host->caps.has_dma)
612 /* Disable DMA hardware handshaking on MCI */ 699 /* Disable DMA hardware handshaking on MCI */
613 atmci_writel(host, ATMCI_DMA, atmci_readl(host, ATMCI_DMA) & ~ATMCI_DMAEN); 700 atmci_writel(host, ATMCI_DMA, atmci_readl(host, ATMCI_DMA) & ~ATMCI_DMAEN);
614 701
@@ -646,7 +733,90 @@ static void atmci_dma_complete(void *arg)
646 } 733 }
647} 734}
648 735
649static int 736/*
737 * Returns a mask of interrupt flags to be enabled after the whole
738 * request has been prepared.
739 */
740static u32 atmci_prepare_data(struct atmel_mci *host, struct mmc_data *data)
741{
742 u32 iflags;
743
744 data->error = -EINPROGRESS;
745
746 host->sg = data->sg;
747 host->data = data;
748 host->data_chan = NULL;
749
750 iflags = ATMCI_DATA_ERROR_FLAGS;
751
752 /*
753 * Errata: MMC data write operation with less than 12
754 * bytes is impossible.
755 *
756 * Errata: MCI Transmit Data Register (TDR) FIFO
757 * corruption when length is not multiple of 4.
758 */
759 if (data->blocks * data->blksz < 12
760 || (data->blocks * data->blksz) & 3)
761 host->need_reset = true;
762
763 host->pio_offset = 0;
764 if (data->flags & MMC_DATA_READ)
765 iflags |= ATMCI_RXRDY;
766 else
767 iflags |= ATMCI_TXRDY;
768
769 return iflags;
770}
771
772/*
773 * Set interrupt flags and set block length into the MCI mode register even
774 * if this value is also accessible in the MCI block register. It seems to be
775 * necessary before the High Speed MCI version. It also map sg and configure
776 * PDC registers.
777 */
778static u32
779atmci_prepare_data_pdc(struct atmel_mci *host, struct mmc_data *data)
780{
781 u32 iflags, tmp;
782 unsigned int sg_len;
783 enum dma_data_direction dir;
784
785 data->error = -EINPROGRESS;
786
787 host->data = data;
788 host->sg = data->sg;
789 iflags = ATMCI_DATA_ERROR_FLAGS;
790
791 /* Enable pdc mode */
792 atmci_writel(host, ATMCI_MR, host->mode_reg | ATMCI_MR_PDCMODE);
793
794 if (data->flags & MMC_DATA_READ) {
795 dir = DMA_FROM_DEVICE;
796 iflags |= ATMCI_ENDRX | ATMCI_RXBUFF;
797 } else {
798 dir = DMA_TO_DEVICE;
799 iflags |= ATMCI_ENDTX | ATMCI_TXBUFE;
800 }
801
802 /* Set BLKLEN */
803 tmp = atmci_readl(host, ATMCI_MR);
804 tmp &= 0x0000ffff;
805 tmp |= ATMCI_BLKLEN(data->blksz);
806 atmci_writel(host, ATMCI_MR, tmp);
807
808 /* Configure PDC */
809 host->data_size = data->blocks * data->blksz;
810 sg_len = dma_map_sg(&host->pdev->dev, data->sg, data->sg_len, dir);
811 BUG_ON(sg_len < host->data_size / PAGE_SIZE);
812 if (host->data_size)
813 atmci_pdc_set_both_buf(host,
814 ((dir == DMA_FROM_DEVICE) ? XFER_RECEIVE : XFER_TRANSMIT));
815
816 return iflags;
817}
818
819static u32
650atmci_prepare_data_dma(struct atmel_mci *host, struct mmc_data *data) 820atmci_prepare_data_dma(struct atmel_mci *host, struct mmc_data *data)
651{ 821{
652 struct dma_chan *chan; 822 struct dma_chan *chan;
@@ -655,20 +825,29 @@ atmci_prepare_data_dma(struct atmel_mci *host, struct mmc_data *data)
655 unsigned int i; 825 unsigned int i;
656 enum dma_data_direction direction; 826 enum dma_data_direction direction;
657 unsigned int sglen; 827 unsigned int sglen;
828 u32 iflags;
829
830 data->error = -EINPROGRESS;
831
832 WARN_ON(host->data);
833 host->sg = NULL;
834 host->data = data;
835
836 iflags = ATMCI_DATA_ERROR_FLAGS;
658 837
659 /* 838 /*
660 * We don't do DMA on "complex" transfers, i.e. with 839 * We don't do DMA on "complex" transfers, i.e. with
661 * non-word-aligned buffers or lengths. Also, we don't bother 840 * non-word-aligned buffers or lengths. Also, we don't bother
662 * with all the DMA setup overhead for short transfers. 841 * with all the DMA setup overhead for short transfers.
663 */ 842 */
664 if (data->blocks * data->blksz < ATATMCI_DMA_THRESHOLD) 843 if (data->blocks * data->blksz < ATMCI_DMA_THRESHOLD)
665 return -EINVAL; 844 return atmci_prepare_data(host, data);
666 if (data->blksz & 3) 845 if (data->blksz & 3)
667 return -EINVAL; 846 return atmci_prepare_data(host, data);
668 847
669 for_each_sg(data->sg, sg, data->sg_len, i) { 848 for_each_sg(data->sg, sg, data->sg_len, i) {
670 if (sg->offset & 3 || sg->length & 3) 849 if (sg->offset & 3 || sg->length & 3)
671 return -EINVAL; 850 return atmci_prepare_data(host, data);
672 } 851 }
673 852
674 /* If we don't have a channel, we can't do DMA */ 853 /* If we don't have a channel, we can't do DMA */
@@ -679,7 +858,7 @@ atmci_prepare_data_dma(struct atmel_mci *host, struct mmc_data *data)
679 if (!chan) 858 if (!chan)
680 return -ENODEV; 859 return -ENODEV;
681 860
682 if (atmci_is_mci2()) 861 if (host->caps.has_dma)
683 atmci_writel(host, ATMCI_DMA, ATMCI_DMA_CHKSIZE(3) | ATMCI_DMAEN); 862 atmci_writel(host, ATMCI_DMA, ATMCI_DMA_CHKSIZE(3) | ATMCI_DMAEN);
684 863
685 if (data->flags & MMC_DATA_READ) 864 if (data->flags & MMC_DATA_READ)
@@ -688,7 +867,7 @@ atmci_prepare_data_dma(struct atmel_mci *host, struct mmc_data *data)
688 direction = DMA_TO_DEVICE; 867 direction = DMA_TO_DEVICE;
689 868
690 sglen = dma_map_sg(chan->device->dev, data->sg, 869 sglen = dma_map_sg(chan->device->dev, data->sg,
691 data->sg_len, direction); 870 data->sg_len, direction);
692 871
693 desc = chan->device->device_prep_slave_sg(chan, 872 desc = chan->device->device_prep_slave_sg(chan,
694 data->sg, sglen, direction, 873 data->sg, sglen, direction,
@@ -700,13 +879,32 @@ atmci_prepare_data_dma(struct atmel_mci *host, struct mmc_data *data)
700 desc->callback = atmci_dma_complete; 879 desc->callback = atmci_dma_complete;
701 desc->callback_param = host; 880 desc->callback_param = host;
702 881
703 return 0; 882 return iflags;
704unmap_exit: 883unmap_exit:
705 dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, direction); 884 dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, direction);
706 return -ENOMEM; 885 return -ENOMEM;
707} 886}
708 887
709static void atmci_submit_data(struct atmel_mci *host) 888static void
889atmci_submit_data(struct atmel_mci *host, struct mmc_data *data)
890{
891 return;
892}
893
894/*
895 * Start PDC according to transfer direction.
896 */
897static void
898atmci_submit_data_pdc(struct atmel_mci *host, struct mmc_data *data)
899{
900 if (data->flags & MMC_DATA_READ)
901 atmci_writel(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTEN);
902 else
903 atmci_writel(host, ATMEL_PDC_PTCR, ATMEL_PDC_TXTEN);
904}
905
906static void
907atmci_submit_data_dma(struct atmel_mci *host, struct mmc_data *data)
710{ 908{
711 struct dma_chan *chan = host->data_chan; 909 struct dma_chan *chan = host->data_chan;
712 struct dma_async_tx_descriptor *desc = host->dma.data_desc; 910 struct dma_async_tx_descriptor *desc = host->dma.data_desc;
@@ -717,64 +915,39 @@ static void atmci_submit_data(struct atmel_mci *host)
717 } 915 }
718} 916}
719 917
720#else /* CONFIG_MMC_ATMELMCI_DMA */ 918static void atmci_stop_transfer(struct atmel_mci *host)
721
722static int atmci_prepare_data_dma(struct atmel_mci *host, struct mmc_data *data)
723{ 919{
724 return -ENOSYS;
725}
726
727static void atmci_submit_data(struct atmel_mci *host) {}
728
729static void atmci_stop_dma(struct atmel_mci *host)
730{
731 /* Data transfer was stopped by the interrupt handler */
732 atmci_set_pending(host, EVENT_XFER_COMPLETE); 920 atmci_set_pending(host, EVENT_XFER_COMPLETE);
733 atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY); 921 atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
734} 922}
735 923
736#endif /* CONFIG_MMC_ATMELMCI_DMA */
737
738/* 924/*
739 * Returns a mask of interrupt flags to be enabled after the whole 925 * Stop data transfer because error(s) occured.
740 * request has been prepared.
741 */ 926 */
742static u32 atmci_prepare_data(struct atmel_mci *host, struct mmc_data *data) 927static void atmci_stop_transfer_pdc(struct atmel_mci *host)
743{ 928{
744 u32 iflags; 929 atmci_set_pending(host, EVENT_XFER_COMPLETE);
745 930 atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
746 data->error = -EINPROGRESS; 931}
747
748 WARN_ON(host->data);
749 host->sg = NULL;
750 host->data = data;
751
752 iflags = ATMCI_DATA_ERROR_FLAGS;
753 if (atmci_prepare_data_dma(host, data)) {
754 host->data_chan = NULL;
755 932
756 /* 933static void atmci_stop_transfer_dma(struct atmel_mci *host)
757 * Errata: MMC data write operation with less than 12 934{
758 * bytes is impossible. 935 struct dma_chan *chan = host->data_chan;
759 *
760 * Errata: MCI Transmit Data Register (TDR) FIFO
761 * corruption when length is not multiple of 4.
762 */
763 if (data->blocks * data->blksz < 12
764 || (data->blocks * data->blksz) & 3)
765 host->need_reset = true;
766 936
767 host->sg = data->sg; 937 if (chan) {
768 host->pio_offset = 0; 938 dmaengine_terminate_all(chan);
769 if (data->flags & MMC_DATA_READ) 939 atmci_dma_cleanup(host);
770 iflags |= ATMCI_RXRDY; 940 } else {
771 else 941 /* Data transfer was stopped by the interrupt handler */
772 iflags |= ATMCI_TXRDY; 942 atmci_set_pending(host, EVENT_XFER_COMPLETE);
943 atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
773 } 944 }
774
775 return iflags;
776} 945}
777 946
947/*
948 * Start a request: prepare data if needed, prepare the command and activate
949 * interrupts.
950 */
778static void atmci_start_request(struct atmel_mci *host, 951static void atmci_start_request(struct atmel_mci *host,
779 struct atmel_mci_slot *slot) 952 struct atmel_mci_slot *slot)
780{ 953{
@@ -796,7 +969,7 @@ static void atmci_start_request(struct atmel_mci *host,
796 atmci_writel(host, ATMCI_CR, ATMCI_CR_SWRST); 969 atmci_writel(host, ATMCI_CR, ATMCI_CR_SWRST);
797 atmci_writel(host, ATMCI_CR, ATMCI_CR_MCIEN); 970 atmci_writel(host, ATMCI_CR, ATMCI_CR_MCIEN);
798 atmci_writel(host, ATMCI_MR, host->mode_reg); 971 atmci_writel(host, ATMCI_MR, host->mode_reg);
799 if (atmci_is_mci2()) 972 if (host->caps.has_cfg_reg)
800 atmci_writel(host, ATMCI_CFG, host->cfg_reg); 973 atmci_writel(host, ATMCI_CFG, host->cfg_reg);
801 host->need_reset = false; 974 host->need_reset = false;
802 } 975 }
@@ -824,7 +997,7 @@ static void atmci_start_request(struct atmel_mci *host,
824 dev_vdbg(&slot->mmc->class_dev, "BLKR=0x%08x\n", 997 dev_vdbg(&slot->mmc->class_dev, "BLKR=0x%08x\n",
825 ATMCI_BCNT(data->blocks) | ATMCI_BLKLEN(data->blksz)); 998 ATMCI_BCNT(data->blocks) | ATMCI_BLKLEN(data->blksz));
826 999
827 iflags |= atmci_prepare_data(host, data); 1000 iflags |= host->prepare_data(host, data);
828 } 1001 }
829 1002
830 iflags |= ATMCI_CMDRDY; 1003 iflags |= ATMCI_CMDRDY;
@@ -833,7 +1006,7 @@ static void atmci_start_request(struct atmel_mci *host,
833 atmci_start_command(host, cmd, cmdflags); 1006 atmci_start_command(host, cmd, cmdflags);
834 1007
835 if (data) 1008 if (data)
836 atmci_submit_data(host); 1009 host->submit_data(host, data);
837 1010
838 if (mrq->stop) { 1011 if (mrq->stop) {
839 host->stop_cmdr = atmci_prepare_command(slot->mmc, mrq->stop); 1012 host->stop_cmdr = atmci_prepare_command(slot->mmc, mrq->stop);
@@ -929,7 +1102,7 @@ static void atmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
929 clk_enable(host->mck); 1102 clk_enable(host->mck);
930 atmci_writel(host, ATMCI_CR, ATMCI_CR_SWRST); 1103 atmci_writel(host, ATMCI_CR, ATMCI_CR_SWRST);
931 atmci_writel(host, ATMCI_CR, ATMCI_CR_MCIEN); 1104 atmci_writel(host, ATMCI_CR, ATMCI_CR_MCIEN);
932 if (atmci_is_mci2()) 1105 if (host->caps.has_cfg_reg)
933 atmci_writel(host, ATMCI_CFG, host->cfg_reg); 1106 atmci_writel(host, ATMCI_CFG, host->cfg_reg);
934 } 1107 }
935 1108
@@ -960,10 +1133,10 @@ static void atmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
960 * stopping the clock when the FIFO is full/empty. 1133 * stopping the clock when the FIFO is full/empty.
961 * This state is not expected to last for long. 1134 * This state is not expected to last for long.
962 */ 1135 */
963 if (atmci_has_rwproof()) 1136 if (host->caps.has_rwproof)
964 host->mode_reg |= (ATMCI_MR_WRPROOF | ATMCI_MR_RDPROOF); 1137 host->mode_reg |= (ATMCI_MR_WRPROOF | ATMCI_MR_RDPROOF);
965 1138
966 if (atmci_is_mci2()) { 1139 if (host->caps.has_cfg_reg) {
967 /* setup High Speed mode in relation with card capacity */ 1140 /* setup High Speed mode in relation with card capacity */
968 if (ios->timing == MMC_TIMING_SD_HS) 1141 if (ios->timing == MMC_TIMING_SD_HS)
969 host->cfg_reg |= ATMCI_CFG_HSMODE; 1142 host->cfg_reg |= ATMCI_CFG_HSMODE;
@@ -973,7 +1146,7 @@ static void atmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
973 1146
974 if (list_empty(&host->queue)) { 1147 if (list_empty(&host->queue)) {
975 atmci_writel(host, ATMCI_MR, host->mode_reg); 1148 atmci_writel(host, ATMCI_MR, host->mode_reg);
976 if (atmci_is_mci2()) 1149 if (host->caps.has_cfg_reg)
977 atmci_writel(host, ATMCI_CFG, host->cfg_reg); 1150 atmci_writel(host, ATMCI_CFG, host->cfg_reg);
978 } else { 1151 } else {
979 host->need_clock_update = true; 1152 host->need_clock_update = true;
@@ -1088,7 +1261,7 @@ static void atmci_request_end(struct atmel_mci *host, struct mmc_request *mrq)
1088 */ 1261 */
1089 if (host->need_clock_update) { 1262 if (host->need_clock_update) {
1090 atmci_writel(host, ATMCI_MR, host->mode_reg); 1263 atmci_writel(host, ATMCI_MR, host->mode_reg);
1091 if (atmci_is_mci2()) 1264 if (host->caps.has_cfg_reg)
1092 atmci_writel(host, ATMCI_CFG, host->cfg_reg); 1265 atmci_writel(host, ATMCI_CFG, host->cfg_reg);
1093 } 1266 }
1094 1267
@@ -1137,7 +1310,7 @@ static void atmci_command_complete(struct atmel_mci *host,
1137 "command error: status=0x%08x\n", status); 1310 "command error: status=0x%08x\n", status);
1138 1311
1139 if (cmd->data) { 1312 if (cmd->data) {
1140 atmci_stop_dma(host); 1313 host->stop_transfer(host);
1141 host->data = NULL; 1314 host->data = NULL;
1142 atmci_writel(host, ATMCI_IDR, ATMCI_NOTBUSY 1315 atmci_writel(host, ATMCI_IDR, ATMCI_NOTBUSY
1143 | ATMCI_TXRDY | ATMCI_RXRDY 1316 | ATMCI_TXRDY | ATMCI_RXRDY
@@ -1195,7 +1368,7 @@ static void atmci_detect_change(unsigned long data)
1195 atmci_writel(host, ATMCI_CR, ATMCI_CR_SWRST); 1368 atmci_writel(host, ATMCI_CR, ATMCI_CR_SWRST);
1196 atmci_writel(host, ATMCI_CR, ATMCI_CR_MCIEN); 1369 atmci_writel(host, ATMCI_CR, ATMCI_CR_MCIEN);
1197 atmci_writel(host, ATMCI_MR, host->mode_reg); 1370 atmci_writel(host, ATMCI_MR, host->mode_reg);
1198 if (atmci_is_mci2()) 1371 if (host->caps.has_cfg_reg)
1199 atmci_writel(host, ATMCI_CFG, host->cfg_reg); 1372 atmci_writel(host, ATMCI_CFG, host->cfg_reg);
1200 1373
1201 host->data = NULL; 1374 host->data = NULL;
@@ -1211,7 +1384,7 @@ static void atmci_detect_change(unsigned long data)
1211 /* fall through */ 1384 /* fall through */
1212 case STATE_SENDING_DATA: 1385 case STATE_SENDING_DATA:
1213 mrq->data->error = -ENOMEDIUM; 1386 mrq->data->error = -ENOMEDIUM;
1214 atmci_stop_dma(host); 1387 host->stop_transfer(host);
1215 break; 1388 break;
1216 case STATE_DATA_BUSY: 1389 case STATE_DATA_BUSY:
1217 case STATE_DATA_ERROR: 1390 case STATE_DATA_ERROR:
@@ -1290,7 +1463,7 @@ static void atmci_tasklet_func(unsigned long priv)
1290 case STATE_SENDING_DATA: 1463 case STATE_SENDING_DATA:
1291 if (atmci_test_and_clear_pending(host, 1464 if (atmci_test_and_clear_pending(host,
1292 EVENT_DATA_ERROR)) { 1465 EVENT_DATA_ERROR)) {
1293 atmci_stop_dma(host); 1466 host->stop_transfer(host);
1294 if (data->stop) 1467 if (data->stop)
1295 atmci_send_stop_cmd(host, data); 1468 atmci_send_stop_cmd(host, data);
1296 state = STATE_DATA_ERROR; 1469 state = STATE_DATA_ERROR;
@@ -1556,6 +1729,56 @@ static irqreturn_t atmci_interrupt(int irq, void *dev_id)
1556 atmci_set_pending(host, EVENT_DATA_ERROR); 1729 atmci_set_pending(host, EVENT_DATA_ERROR);
1557 tasklet_schedule(&host->tasklet); 1730 tasklet_schedule(&host->tasklet);
1558 } 1731 }
1732
1733 if (pending & ATMCI_ENDTX) {
1734 atmci_writel(host, ATMCI_IDR, ATMCI_ENDTX);
1735 if (host->data_size) {
1736 atmci_pdc_set_single_buf(host,
1737 XFER_TRANSMIT, PDC_SECOND_BUF);
1738 atmci_writel(host, ATMCI_IER, ATMCI_ENDTX);
1739 }
1740 }
1741
1742 if (pending & ATMCI_TXBUFE) {
1743 atmci_writel(host, ATMCI_IDR, ATMCI_TXBUFE);
1744 /*
1745 * We can receive this interruption before having configured
1746 * the second pdc buffer, so we need to reconfigure first and
1747 * second buffers again
1748 */
1749 if (host->data_size) {
1750 atmci_pdc_set_both_buf(host, XFER_TRANSMIT);
1751 atmci_writel(host, ATMCI_IER, ATMCI_TXBUFE);
1752 } else {
1753 atmci_pdc_complete(host);
1754 }
1755 }
1756
1757 if (pending & ATMCI_ENDRX) {
1758 atmci_writel(host, ATMCI_IDR, ATMCI_ENDRX);
1759
1760 if (host->data_size) {
1761 atmci_pdc_set_single_buf(host,
1762 XFER_RECEIVE, PDC_SECOND_BUF);
1763 atmci_writel(host, ATMCI_IER, ATMCI_ENDRX);
1764 }
1765 }
1766
1767 if (pending & ATMCI_RXBUFF) {
1768 atmci_writel(host, ATMCI_IDR, ATMCI_RXBUFF);
1769 /*
1770 * We can receive this interruption before having configured
1771 * the second pdc buffer, so we need to reconfigure first and
1772 * second buffers again
1773 */
1774 if (host->data_size) {
1775 atmci_pdc_set_both_buf(host, XFER_RECEIVE);
1776 atmci_writel(host, ATMCI_IER, ATMCI_RXBUFF);
1777 } else {
1778 atmci_pdc_complete(host);
1779 }
1780 }
1781
1559 if (pending & ATMCI_NOTBUSY) { 1782 if (pending & ATMCI_NOTBUSY) {
1560 atmci_writel(host, ATMCI_IDR, 1783 atmci_writel(host, ATMCI_IDR,
1561 ATMCI_DATA_ERROR_FLAGS | ATMCI_NOTBUSY); 1784 ATMCI_DATA_ERROR_FLAGS | ATMCI_NOTBUSY);
@@ -1622,7 +1845,7 @@ static int __init atmci_init_slot(struct atmel_mci *host,
1622 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; 1845 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
1623 if (sdio_irq) 1846 if (sdio_irq)
1624 mmc->caps |= MMC_CAP_SDIO_IRQ; 1847 mmc->caps |= MMC_CAP_SDIO_IRQ;
1625 if (atmci_is_mci2()) 1848 if (host->caps.has_highspeed)
1626 mmc->caps |= MMC_CAP_SD_HIGHSPEED; 1849 mmc->caps |= MMC_CAP_SD_HIGHSPEED;
1627 if (slot_data->bus_width >= 4) 1850 if (slot_data->bus_width >= 4)
1628 mmc->caps |= MMC_CAP_4_BIT_DATA; 1851 mmc->caps |= MMC_CAP_4_BIT_DATA;
@@ -1705,7 +1928,6 @@ static void __exit atmci_cleanup_slot(struct atmel_mci_slot *slot,
1705 mmc_free_host(slot->mmc); 1928 mmc_free_host(slot->mmc);
1706} 1929}
1707 1930
1708#ifdef CONFIG_MMC_ATMELMCI_DMA
1709static bool atmci_filter(struct dma_chan *chan, void *slave) 1931static bool atmci_filter(struct dma_chan *chan, void *slave)
1710{ 1932{
1711 struct mci_dma_data *sl = slave; 1933 struct mci_dma_data *sl = slave;
@@ -1747,9 +1969,60 @@ static void atmci_configure_dma(struct atmel_mci *host)
1747 "Using %s for DMA transfers\n", 1969 "Using %s for DMA transfers\n",
1748 dma_chan_name(host->dma.chan)); 1970 dma_chan_name(host->dma.chan));
1749} 1971}
1972
1973static inline unsigned int atmci_get_version(struct atmel_mci *host)
1974{
1975 return atmci_readl(host, ATMCI_VERSION) & 0x00000fff;
1976}
1977
1978/*
1979 * HSMCI (High Speed MCI) module is not fully compatible with MCI module.
1980 * HSMCI provides DMA support and a new config register but no more supports
1981 * PDC.
1982 */
1983static void __init atmci_get_cap(struct atmel_mci *host)
1984{
1985 unsigned int version;
1986
1987 version = atmci_get_version(host);
1988 dev_info(&host->pdev->dev,
1989 "version: 0x%x\n", version);
1990
1991 host->caps.has_dma = 0;
1992 host->caps.has_pdc = 0;
1993 host->caps.has_cfg_reg = 0;
1994 host->caps.has_cstor_reg = 0;
1995 host->caps.has_highspeed = 0;
1996 host->caps.has_rwproof = 0;
1997
1998 /* keep only major version number */
1999 switch (version & 0xf00) {
2000 case 0x100:
2001 case 0x200:
2002 host->caps.has_pdc = 1;
2003 host->caps.has_rwproof = 1;
2004 break;
2005 case 0x300:
2006 case 0x400:
2007 case 0x500:
2008#ifdef CONFIG_AT_HDMAC
2009 host->caps.has_dma = 1;
1750#else 2010#else
1751static void atmci_configure_dma(struct atmel_mci *host) {} 2011 host->caps.has_dma = 0;
2012 dev_info(&host->pdev->dev,
2013 "has dma capability but dma engine is not selected, then use pio\n");
1752#endif 2014#endif
2015 host->caps.has_cfg_reg = 1;
2016 host->caps.has_cstor_reg = 1;
2017 host->caps.has_highspeed = 1;
2018 host->caps.has_rwproof = 1;
2019 break;
2020 default:
2021 dev_warn(&host->pdev->dev,
2022 "Unmanaged mci version, set minimum capabilities\n");
2023 break;
2024 }
2025}
1753 2026
1754static int __init atmci_probe(struct platform_device *pdev) 2027static int __init atmci_probe(struct platform_device *pdev)
1755{ 2028{
@@ -1802,7 +2075,27 @@ static int __init atmci_probe(struct platform_device *pdev)
1802 if (ret) 2075 if (ret)
1803 goto err_request_irq; 2076 goto err_request_irq;
1804 2077
1805 atmci_configure_dma(host); 2078 /* Get MCI capabilities and set operations according to it */
2079 atmci_get_cap(host);
2080 if (host->caps.has_dma) {
2081 dev_info(&pdev->dev, "using DMA\n");
2082 host->prepare_data = &atmci_prepare_data_dma;
2083 host->submit_data = &atmci_submit_data_dma;
2084 host->stop_transfer = &atmci_stop_transfer_dma;
2085 } else if (host->caps.has_pdc) {
2086 dev_info(&pdev->dev, "using PDC\n");
2087 host->prepare_data = &atmci_prepare_data_pdc;
2088 host->submit_data = &atmci_submit_data_pdc;
2089 host->stop_transfer = &atmci_stop_transfer_pdc;
2090 } else {
2091 dev_info(&pdev->dev, "no DMA, no PDC\n");
2092 host->prepare_data = &atmci_prepare_data;
2093 host->submit_data = &atmci_submit_data;
2094 host->stop_transfer = &atmci_stop_transfer;
2095 }
2096
2097 if (host->caps.has_dma)
2098 atmci_configure_dma(host);
1806 2099
1807 platform_set_drvdata(pdev, host); 2100 platform_set_drvdata(pdev, host);
1808 2101
@@ -1834,10 +2127,8 @@ static int __init atmci_probe(struct platform_device *pdev)
1834 return 0; 2127 return 0;
1835 2128
1836err_init_slot: 2129err_init_slot:
1837#ifdef CONFIG_MMC_ATMELMCI_DMA
1838 if (host->dma.chan) 2130 if (host->dma.chan)
1839 dma_release_channel(host->dma.chan); 2131 dma_release_channel(host->dma.chan);
1840#endif
1841 free_irq(irq, host); 2132 free_irq(irq, host);
1842err_request_irq: 2133err_request_irq:
1843 iounmap(host->regs); 2134 iounmap(host->regs);