aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mmc
diff options
context:
space:
mode:
authorShawn Lin <shawn.lin@rock-chips.com>2015-09-16 02:41:23 -0400
committerUlf Hansson <ulf.hansson@linaro.org>2015-10-26 11:00:16 -0400
commit3fc7eaef44dbcbcd602b6bcd0ac6efba7a30b108 (patch)
treea3e33791bb0ed5bd2fd328f7e5204584cbb47eaf /drivers/mmc
parent9e4703df3aa9829a84d6ccf19c6062ba19a8de71 (diff)
mmc: dw_mmc: Add external dma interface support
DesignWare MMC Controller can supports two types of DMA mode: external dma and internal dma. We get a RK312x platform integrated dw_mmc and ARM pl330 dma controller. This patch add edmac ops to support these platforms. I've tested it on RK31xx platform with edmac mode and RK3288 platform with idmac mode. Signed-off-by: Shawn Lin <shawn.lin@rock-chips.com> Signed-off-by: Jaehoon Chung <jh80.chung@samsung.com> Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
Diffstat (limited to 'drivers/mmc')
-rw-r--r--drivers/mmc/host/Kconfig11
-rw-r--r--drivers/mmc/host/dw_mmc-pltfm.c2
-rw-r--r--drivers/mmc/host/dw_mmc.c268
-rw-r--r--drivers/mmc/host/dw_mmc.h6
4 files changed, 225 insertions, 62 deletions
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index 56f5b1487008..ef54084315cc 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -615,15 +615,7 @@ config MMC_DW
615 help 615 help
616 This selects support for the Synopsys DesignWare Mobile Storage IP 616 This selects support for the Synopsys DesignWare Mobile Storage IP
617 block, this provides host support for SD and MMC interfaces, in both 617 block, this provides host support for SD and MMC interfaces, in both
618 PIO and external DMA modes. 618 PIO, internal DMA mode and external DMA mode.
619
620config MMC_DW_IDMAC
621 bool "Internal DMAC interface"
622 depends on MMC_DW
623 help
624 This selects support for the internal DMAC block within the Synopsys
625 Designware Mobile Storage IP block. This disables the external DMA
626 interface.
627 619
628config MMC_DW_PLTFM 620config MMC_DW_PLTFM
629 tristate "Synopsys Designware MCI Support as platform device" 621 tristate "Synopsys Designware MCI Support as platform device"
@@ -652,7 +644,6 @@ config MMC_DW_K3
652 tristate "K3 specific extensions for Synopsys DW Memory Card Interface" 644 tristate "K3 specific extensions for Synopsys DW Memory Card Interface"
653 depends on MMC_DW 645 depends on MMC_DW
654 select MMC_DW_PLTFM 646 select MMC_DW_PLTFM
655 select MMC_DW_IDMAC
656 help 647 help
657 This selects support for Hisilicon K3 SoC specific extensions to the 648 This selects support for Hisilicon K3 SoC specific extensions to the
658 Synopsys DesignWare Memory Card Interface driver. Select this option 649 Synopsys DesignWare Memory Card Interface driver. Select this option
diff --git a/drivers/mmc/host/dw_mmc-pltfm.c b/drivers/mmc/host/dw_mmc-pltfm.c
index ec6dbcdec693..7e1d13b68b06 100644
--- a/drivers/mmc/host/dw_mmc-pltfm.c
+++ b/drivers/mmc/host/dw_mmc-pltfm.c
@@ -59,6 +59,8 @@ int dw_mci_pltfm_register(struct platform_device *pdev,
59 host->pdata = pdev->dev.platform_data; 59 host->pdata = pdev->dev.platform_data;
60 60
61 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); 61 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
62 /* Get registers' physical base address */
63 host->phy_regs = (void *)(regs->start);
62 host->regs = devm_ioremap_resource(&pdev->dev, regs); 64 host->regs = devm_ioremap_resource(&pdev->dev, regs);
63 if (IS_ERR(host->regs)) 65 if (IS_ERR(host->regs))
64 return PTR_ERR(host->regs); 66 return PTR_ERR(host->regs);
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index b1b7e7fa072c..7fe0315142e6 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -56,7 +56,6 @@
56#define DW_MCI_FREQ_MAX 200000000 /* unit: HZ */ 56#define DW_MCI_FREQ_MAX 200000000 /* unit: HZ */
57#define DW_MCI_FREQ_MIN 400000 /* unit: HZ */ 57#define DW_MCI_FREQ_MIN 400000 /* unit: HZ */
58 58
59#ifdef CONFIG_MMC_DW_IDMAC
60#define IDMAC_INT_CLR (SDMMC_IDMAC_INT_AI | SDMMC_IDMAC_INT_NI | \ 59#define IDMAC_INT_CLR (SDMMC_IDMAC_INT_AI | SDMMC_IDMAC_INT_NI | \
61 SDMMC_IDMAC_INT_CES | SDMMC_IDMAC_INT_DU | \ 60 SDMMC_IDMAC_INT_CES | SDMMC_IDMAC_INT_DU | \
62 SDMMC_IDMAC_INT_FBE | SDMMC_IDMAC_INT_RI | \ 61 SDMMC_IDMAC_INT_FBE | SDMMC_IDMAC_INT_RI | \
@@ -102,7 +101,6 @@ struct idmac_desc {
102 101
103/* Each descriptor can transfer up to 4KB of data in chained mode */ 102/* Each descriptor can transfer up to 4KB of data in chained mode */
104#define DW_MCI_DESC_DATA_LENGTH 0x1000 103#define DW_MCI_DESC_DATA_LENGTH 0x1000
105#endif /* CONFIG_MMC_DW_IDMAC */
106 104
107static bool dw_mci_reset(struct dw_mci *host); 105static bool dw_mci_reset(struct dw_mci *host);
108static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset); 106static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset);
@@ -407,7 +405,6 @@ static int dw_mci_get_dma_dir(struct mmc_data *data)
407 return DMA_FROM_DEVICE; 405 return DMA_FROM_DEVICE;
408} 406}
409 407
410#ifdef CONFIG_MMC_DW_IDMAC
411static void dw_mci_dma_cleanup(struct dw_mci *host) 408static void dw_mci_dma_cleanup(struct dw_mci *host)
412{ 409{
413 struct mmc_data *data = host->data; 410 struct mmc_data *data = host->data;
@@ -445,12 +442,21 @@ static void dw_mci_idmac_stop_dma(struct dw_mci *host)
445 mci_writel(host, BMOD, temp); 442 mci_writel(host, BMOD, temp);
446} 443}
447 444
448static void dw_mci_idmac_complete_dma(struct dw_mci *host) 445static void dw_mci_dmac_complete_dma(void *arg)
449{ 446{
447 struct dw_mci *host = arg;
450 struct mmc_data *data = host->data; 448 struct mmc_data *data = host->data;
451 449
452 dev_vdbg(host->dev, "DMA complete\n"); 450 dev_vdbg(host->dev, "DMA complete\n");
453 451
452 if ((host->use_dma == TRANS_MODE_EDMAC) &&
453 data && (data->flags & MMC_DATA_READ))
454 /* Invalidate cache after read */
455 dma_sync_sg_for_cpu(mmc_dev(host->cur_slot->mmc),
456 data->sg,
457 data->sg_len,
458 DMA_FROM_DEVICE);
459
454 host->dma_ops->cleanup(host); 460 host->dma_ops->cleanup(host);
455 461
456 /* 462 /*
@@ -564,7 +570,7 @@ static void dw_mci_translate_sglist(struct dw_mci *host, struct mmc_data *data,
564 wmb(); /* drain writebuffer */ 570 wmb(); /* drain writebuffer */
565} 571}
566 572
567static void dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len) 573static int dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len)
568{ 574{
569 u32 temp; 575 u32 temp;
570 576
@@ -589,6 +595,8 @@ static void dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len)
589 595
590 /* Start it running */ 596 /* Start it running */
591 mci_writel(host, PLDMND, 1); 597 mci_writel(host, PLDMND, 1);
598
599 return 0;
592} 600}
593 601
594static int dw_mci_idmac_init(struct dw_mci *host) 602static int dw_mci_idmac_init(struct dw_mci *host)
@@ -669,10 +677,112 @@ static const struct dw_mci_dma_ops dw_mci_idmac_ops = {
669 .init = dw_mci_idmac_init, 677 .init = dw_mci_idmac_init,
670 .start = dw_mci_idmac_start_dma, 678 .start = dw_mci_idmac_start_dma,
671 .stop = dw_mci_idmac_stop_dma, 679 .stop = dw_mci_idmac_stop_dma,
672 .complete = dw_mci_idmac_complete_dma, 680 .complete = dw_mci_dmac_complete_dma,
681 .cleanup = dw_mci_dma_cleanup,
682};
683
684static void dw_mci_edmac_stop_dma(struct dw_mci *host)
685{
686 dmaengine_terminate_all(host->dms->ch);
687}
688
689static int dw_mci_edmac_start_dma(struct dw_mci *host,
690 unsigned int sg_len)
691{
692 struct dma_slave_config cfg;
693 struct dma_async_tx_descriptor *desc = NULL;
694 struct scatterlist *sgl = host->data->sg;
695 const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
696 u32 sg_elems = host->data->sg_len;
697 u32 fifoth_val;
698 u32 fifo_offset = host->fifo_reg - host->regs;
699 int ret = 0;
700
701 /* Set external dma config: burst size, burst width */
702 cfg.dst_addr = (dma_addr_t)(host->phy_regs + fifo_offset);
703 cfg.src_addr = cfg.dst_addr;
704 cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
705 cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
706
707 /* Match burst msize with external dma config */
708 fifoth_val = mci_readl(host, FIFOTH);
709 cfg.dst_maxburst = mszs[(fifoth_val >> 28) & 0x7];
710 cfg.src_maxburst = cfg.dst_maxburst;
711
712 if (host->data->flags & MMC_DATA_WRITE)
713 cfg.direction = DMA_MEM_TO_DEV;
714 else
715 cfg.direction = DMA_DEV_TO_MEM;
716
717 ret = dmaengine_slave_config(host->dms->ch, &cfg);
718 if (ret) {
719 dev_err(host->dev, "Failed to config edmac.\n");
720 return -EBUSY;
721 }
722
723 desc = dmaengine_prep_slave_sg(host->dms->ch, sgl,
724 sg_len, cfg.direction,
725 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
726 if (!desc) {
727 dev_err(host->dev, "Can't prepare slave sg.\n");
728 return -EBUSY;
729 }
730
731 /* Set dw_mci_dmac_complete_dma as callback */
732 desc->callback = dw_mci_dmac_complete_dma;
733 desc->callback_param = (void *)host;
734 dmaengine_submit(desc);
735
736 /* Flush cache before write */
737 if (host->data->flags & MMC_DATA_WRITE)
738 dma_sync_sg_for_device(mmc_dev(host->cur_slot->mmc), sgl,
739 sg_elems, DMA_TO_DEVICE);
740
741 dma_async_issue_pending(host->dms->ch);
742
743 return 0;
744}
745
746static int dw_mci_edmac_init(struct dw_mci *host)
747{
748 /* Request external dma channel */
749 host->dms = kzalloc(sizeof(struct dw_mci_dma_slave), GFP_KERNEL);
750 if (!host->dms)
751 return -ENOMEM;
752
753 host->dms->ch = dma_request_slave_channel(host->dev, "rx-tx");
754 if (!host->dms->ch) {
755 dev_err(host->dev,
756 "Failed to get external DMA channel %d\n",
757 host->dms->ch->chan_id);
758 kfree(host->dms);
759 host->dms = NULL;
760 return -ENXIO;
761 }
762
763 return 0;
764}
765
766static void dw_mci_edmac_exit(struct dw_mci *host)
767{
768 if (host->dms) {
769 if (host->dms->ch) {
770 dma_release_channel(host->dms->ch);
771 host->dms->ch = NULL;
772 }
773 kfree(host->dms);
774 host->dms = NULL;
775 }
776}
777
778static const struct dw_mci_dma_ops dw_mci_edmac_ops = {
779 .init = dw_mci_edmac_init,
780 .exit = dw_mci_edmac_exit,
781 .start = dw_mci_edmac_start_dma,
782 .stop = dw_mci_edmac_stop_dma,
783 .complete = dw_mci_dmac_complete_dma,
673 .cleanup = dw_mci_dma_cleanup, 784 .cleanup = dw_mci_dma_cleanup,
674}; 785};
675#endif /* CONFIG_MMC_DW_IDMAC */
676 786
677static int dw_mci_pre_dma_transfer(struct dw_mci *host, 787static int dw_mci_pre_dma_transfer(struct dw_mci *host,
678 struct mmc_data *data, 788 struct mmc_data *data,
@@ -752,7 +862,6 @@ static void dw_mci_post_req(struct mmc_host *mmc,
752 862
753static void dw_mci_adjust_fifoth(struct dw_mci *host, struct mmc_data *data) 863static void dw_mci_adjust_fifoth(struct dw_mci *host, struct mmc_data *data)
754{ 864{
755#ifdef CONFIG_MMC_DW_IDMAC
756 unsigned int blksz = data->blksz; 865 unsigned int blksz = data->blksz;
757 const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256}; 866 const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
758 u32 fifo_width = 1 << host->data_shift; 867 u32 fifo_width = 1 << host->data_shift;
@@ -760,6 +869,10 @@ static void dw_mci_adjust_fifoth(struct dw_mci *host, struct mmc_data *data)
760 u32 msize = 0, rx_wmark = 1, tx_wmark, tx_wmark_invers; 869 u32 msize = 0, rx_wmark = 1, tx_wmark, tx_wmark_invers;
761 int idx = ARRAY_SIZE(mszs) - 1; 870 int idx = ARRAY_SIZE(mszs) - 1;
762 871
872 /* pio should ship this scenario */
873 if (!host->use_dma)
874 return;
875
763 tx_wmark = (host->fifo_depth) / 2; 876 tx_wmark = (host->fifo_depth) / 2;
764 tx_wmark_invers = host->fifo_depth - tx_wmark; 877 tx_wmark_invers = host->fifo_depth - tx_wmark;
765 878
@@ -788,7 +901,6 @@ static void dw_mci_adjust_fifoth(struct dw_mci *host, struct mmc_data *data)
788done: 901done:
789 fifoth_val = SDMMC_SET_FIFOTH(msize, rx_wmark, tx_wmark); 902 fifoth_val = SDMMC_SET_FIFOTH(msize, rx_wmark, tx_wmark);
790 mci_writel(host, FIFOTH, fifoth_val); 903 mci_writel(host, FIFOTH, fifoth_val);
791#endif
792} 904}
793 905
794static void dw_mci_ctrl_rd_thld(struct dw_mci *host, struct mmc_data *data) 906static void dw_mci_ctrl_rd_thld(struct dw_mci *host, struct mmc_data *data)
@@ -850,10 +962,12 @@ static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
850 962
851 host->using_dma = 1; 963 host->using_dma = 1;
852 964
853 dev_vdbg(host->dev, 965 if (host->use_dma == TRANS_MODE_IDMAC)
854 "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n", 966 dev_vdbg(host->dev,
855 (unsigned long)host->sg_cpu, (unsigned long)host->sg_dma, 967 "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n",
856 sg_len); 968 (unsigned long)host->sg_cpu,
969 (unsigned long)host->sg_dma,
970 sg_len);
857 971
858 /* 972 /*
859 * Decide the MSIZE and RX/TX Watermark. 973 * Decide the MSIZE and RX/TX Watermark.
@@ -875,7 +989,11 @@ static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
875 mci_writel(host, INTMASK, temp); 989 mci_writel(host, INTMASK, temp);
876 spin_unlock_irqrestore(&host->irq_lock, irqflags); 990 spin_unlock_irqrestore(&host->irq_lock, irqflags);
877 991
878 host->dma_ops->start(host, sg_len); 992 if (host->dma_ops->start(host, sg_len)) {
993 /* We can't do DMA */
994 dev_err(host->dev, "%s: failed to start DMA.\n", __func__);
995 return -ENODEV;
996 }
879 997
880 return 0; 998 return 0;
881} 999}
@@ -2338,15 +2456,17 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
2338 2456
2339 } 2457 }
2340 2458
2341#ifdef CONFIG_MMC_DW_IDMAC 2459 if (host->use_dma != TRANS_MODE_IDMAC)
2342 /* Handle DMA interrupts */ 2460 return IRQ_HANDLED;
2461
2462 /* Handle IDMA interrupts */
2343 if (host->dma_64bit_address == 1) { 2463 if (host->dma_64bit_address == 1) {
2344 pending = mci_readl(host, IDSTS64); 2464 pending = mci_readl(host, IDSTS64);
2345 if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) { 2465 if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) {
2346 mci_writel(host, IDSTS64, SDMMC_IDMAC_INT_TI | 2466 mci_writel(host, IDSTS64, SDMMC_IDMAC_INT_TI |
2347 SDMMC_IDMAC_INT_RI); 2467 SDMMC_IDMAC_INT_RI);
2348 mci_writel(host, IDSTS64, SDMMC_IDMAC_INT_NI); 2468 mci_writel(host, IDSTS64, SDMMC_IDMAC_INT_NI);
2349 host->dma_ops->complete(host); 2469 host->dma_ops->complete((void *)host);
2350 } 2470 }
2351 } else { 2471 } else {
2352 pending = mci_readl(host, IDSTS); 2472 pending = mci_readl(host, IDSTS);
@@ -2354,10 +2474,9 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
2354 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI | 2474 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI |
2355 SDMMC_IDMAC_INT_RI); 2475 SDMMC_IDMAC_INT_RI);
2356 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI); 2476 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI);
2357 host->dma_ops->complete(host); 2477 host->dma_ops->complete((void *)host);
2358 } 2478 }
2359 } 2479 }
2360#endif
2361 2480
2362 return IRQ_HANDLED; 2481 return IRQ_HANDLED;
2363} 2482}
@@ -2466,13 +2585,21 @@ static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
2466 goto err_host_allocated; 2585 goto err_host_allocated;
2467 2586
2468 /* Useful defaults if platform data is unset. */ 2587 /* Useful defaults if platform data is unset. */
2469 if (host->use_dma) { 2588 if (host->use_dma == TRANS_MODE_IDMAC) {
2470 mmc->max_segs = host->ring_size; 2589 mmc->max_segs = host->ring_size;
2471 mmc->max_blk_size = 65536; 2590 mmc->max_blk_size = 65536;
2472 mmc->max_seg_size = 0x1000; 2591 mmc->max_seg_size = 0x1000;
2473 mmc->max_req_size = mmc->max_seg_size * host->ring_size; 2592 mmc->max_req_size = mmc->max_seg_size * host->ring_size;
2474 mmc->max_blk_count = mmc->max_req_size / 512; 2593 mmc->max_blk_count = mmc->max_req_size / 512;
2594 } else if (host->use_dma == TRANS_MODE_EDMAC) {
2595 mmc->max_segs = 64;
2596 mmc->max_blk_size = 65536;
2597 mmc->max_blk_count = 65535;
2598 mmc->max_req_size =
2599 mmc->max_blk_size * mmc->max_blk_count;
2600 mmc->max_seg_size = mmc->max_req_size;
2475 } else { 2601 } else {
2602 /* TRANS_MODE_PIO */
2476 mmc->max_segs = 64; 2603 mmc->max_segs = 64;
2477 mmc->max_blk_size = 65536; /* BLKSIZ is 16 bits */ 2604 mmc->max_blk_size = 65536; /* BLKSIZ is 16 bits */
2478 mmc->max_blk_count = 512; 2605 mmc->max_blk_count = 512;
@@ -2512,38 +2639,74 @@ static void dw_mci_cleanup_slot(struct dw_mci_slot *slot, unsigned int id)
2512static void dw_mci_init_dma(struct dw_mci *host) 2639static void dw_mci_init_dma(struct dw_mci *host)
2513{ 2640{
2514 int addr_config; 2641 int addr_config;
2515 /* Check ADDR_CONFIG bit in HCON to find IDMAC address bus width */ 2642 struct device *dev = host->dev;
2516 addr_config = (mci_readl(host, HCON) >> 27) & 0x01; 2643 struct device_node *np = dev->of_node;
2517
2518 if (addr_config == 1) {
2519 /* host supports IDMAC in 64-bit address mode */
2520 host->dma_64bit_address = 1;
2521 dev_info(host->dev, "IDMAC supports 64-bit address mode.\n");
2522 if (!dma_set_mask(host->dev, DMA_BIT_MASK(64)))
2523 dma_set_coherent_mask(host->dev, DMA_BIT_MASK(64));
2524 } else {
2525 /* host supports IDMAC in 32-bit address mode */
2526 host->dma_64bit_address = 0;
2527 dev_info(host->dev, "IDMAC supports 32-bit address mode.\n");
2528 }
2529 2644
2530 /* Alloc memory for sg translation */ 2645 /*
2531 host->sg_cpu = dmam_alloc_coherent(host->dev, PAGE_SIZE, 2646 * Check tansfer mode from HCON[17:16]
2532 &host->sg_dma, GFP_KERNEL); 2647 * Clear the ambiguous description of dw_mmc databook:
2533 if (!host->sg_cpu) { 2648 * 2b'00: No DMA Interface -> Actually means using Internal DMA block
2534 dev_err(host->dev, "%s: could not alloc DMA memory\n", 2649 * 2b'01: DesignWare DMA Interface -> Synopsys DW-DMA block
2535 __func__); 2650 * 2b'10: Generic DMA Interface -> non-Synopsys generic DMA block
2651 * 2b'11: Non DW DMA Interface -> pio only
2652 * Compared to DesignWare DMA Interface, Generic DMA Interface has a
2653 * simpler request/acknowledge handshake mechanism and both of them
2654 * are regarded as external dma master for dw_mmc.
2655 */
2656 host->use_dma = SDMMC_GET_TRANS_MODE(mci_readl(host, HCON));
2657 if (host->use_dma == DMA_INTERFACE_IDMA) {
2658 host->use_dma = TRANS_MODE_IDMAC;
2659 } else if (host->use_dma == DMA_INTERFACE_DWDMA ||
2660 host->use_dma == DMA_INTERFACE_GDMA) {
2661 host->use_dma = TRANS_MODE_EDMAC;
2662 } else {
2536 goto no_dma; 2663 goto no_dma;
2537 } 2664 }
2538 2665
2539 /* Determine which DMA interface to use */ 2666 /* Determine which DMA interface to use */
2540#ifdef CONFIG_MMC_DW_IDMAC 2667 if (host->use_dma == TRANS_MODE_IDMAC) {
2541 host->dma_ops = &dw_mci_idmac_ops; 2668 /*
2542 dev_info(host->dev, "Using internal DMA controller.\n"); 2669 * Check ADDR_CONFIG bit in HCON to find
2543#endif 2670 * IDMAC address bus width
2671 */
2672 addr_config = (mci_readl(host, HCON) >> 27) & 0x01;
2673
2674 if (addr_config == 1) {
2675 /* host supports IDMAC in 64-bit address mode */
2676 host->dma_64bit_address = 1;
2677 dev_info(host->dev,
2678 "IDMAC supports 64-bit address mode.\n");
2679 if (!dma_set_mask(host->dev, DMA_BIT_MASK(64)))
2680 dma_set_coherent_mask(host->dev,
2681 DMA_BIT_MASK(64));
2682 } else {
2683 /* host supports IDMAC in 32-bit address mode */
2684 host->dma_64bit_address = 0;
2685 dev_info(host->dev,
2686 "IDMAC supports 32-bit address mode.\n");
2687 }
2544 2688
2545 if (!host->dma_ops) 2689 /* Alloc memory for sg translation */
2546 goto no_dma; 2690 host->sg_cpu = dmam_alloc_coherent(host->dev, PAGE_SIZE,
2691 &host->sg_dma, GFP_KERNEL);
2692 if (!host->sg_cpu) {
2693 dev_err(host->dev,
2694 "%s: could not alloc DMA memory\n",
2695 __func__);
2696 goto no_dma;
2697 }
2698
2699 host->dma_ops = &dw_mci_idmac_ops;
2700 dev_info(host->dev, "Using internal DMA controller.\n");
2701 } else {
2702 /* TRANS_MODE_EDMAC: check dma bindings again */
2703 if ((of_property_count_strings(np, "dma-names") < 0) ||
2704 (!of_find_property(np, "dmas", NULL))) {
2705 goto no_dma;
2706 }
2707 host->dma_ops = &dw_mci_edmac_ops;
2708 dev_info(host->dev, "Using external DMA controller.\n");
2709 }
2547 2710
2548 if (host->dma_ops->init && host->dma_ops->start && 2711 if (host->dma_ops->init && host->dma_ops->start &&
2549 host->dma_ops->stop && host->dma_ops->cleanup) { 2712 host->dma_ops->stop && host->dma_ops->cleanup) {
@@ -2557,12 +2720,11 @@ static void dw_mci_init_dma(struct dw_mci *host)
2557 goto no_dma; 2720 goto no_dma;
2558 } 2721 }
2559 2722
2560 host->use_dma = 1;
2561 return; 2723 return;
2562 2724
2563no_dma: 2725no_dma:
2564 dev_info(host->dev, "Using PIO mode.\n"); 2726 dev_info(host->dev, "Using PIO mode.\n");
2565 host->use_dma = 0; 2727 host->use_dma = TRANS_MODE_PIO;
2566} 2728}
2567 2729
2568static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset) 2730static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset)
@@ -2645,10 +2807,9 @@ static bool dw_mci_reset(struct dw_mci *host)
2645 } 2807 }
2646 } 2808 }
2647 2809
2648#if IS_ENABLED(CONFIG_MMC_DW_IDMAC) 2810 if (host->use_dma == TRANS_MODE_IDMAC)
2649 /* It is also recommended that we reset and reprogram idmac */ 2811 /* It is also recommended that we reset and reprogram idmac */
2650 dw_mci_idmac_reset(host); 2812 dw_mci_idmac_reset(host);
2651#endif
2652 2813
2653 ret = true; 2814 ret = true;
2654 2815
@@ -3062,6 +3223,9 @@ EXPORT_SYMBOL(dw_mci_remove);
3062 */ 3223 */
3063int dw_mci_suspend(struct dw_mci *host) 3224int dw_mci_suspend(struct dw_mci *host)
3064{ 3225{
3226 if (host->use_dma && host->dma_ops->exit)
3227 host->dma_ops->exit(host);
3228
3065 return 0; 3229 return 0;
3066} 3230}
3067EXPORT_SYMBOL(dw_mci_suspend); 3231EXPORT_SYMBOL(dw_mci_suspend);
diff --git a/drivers/mmc/host/dw_mmc.h b/drivers/mmc/host/dw_mmc.h
index 8ce4674730a6..811d4673a1c5 100644
--- a/drivers/mmc/host/dw_mmc.h
+++ b/drivers/mmc/host/dw_mmc.h
@@ -148,6 +148,12 @@
148#define SDMMC_SET_FIFOTH(m, r, t) (((m) & 0x7) << 28 | \ 148#define SDMMC_SET_FIFOTH(m, r, t) (((m) & 0x7) << 28 | \
149 ((r) & 0xFFF) << 16 | \ 149 ((r) & 0xFFF) << 16 | \
150 ((t) & 0xFFF)) 150 ((t) & 0xFFF))
151/* HCON register defines */
152#define DMA_INTERFACE_IDMA (0x0)
153#define DMA_INTERFACE_DWDMA (0x1)
154#define DMA_INTERFACE_GDMA (0x2)
155#define DMA_INTERFACE_NODMA (0x3)
156#define SDMMC_GET_TRANS_MODE(x) (((x)>>16) & 0x3)
151/* Internal DMAC interrupt defines */ 157/* Internal DMAC interrupt defines */
152#define SDMMC_IDMAC_INT_AI BIT(9) 158#define SDMMC_IDMAC_INT_AI BIT(9)
153#define SDMMC_IDMAC_INT_NI BIT(8) 159#define SDMMC_IDMAC_INT_NI BIT(8)