aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mmc
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-08-01 19:41:07 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-08-01 19:41:07 -0400
commita6dc77254b3c3eb0307b372b77b861d5cd2ead08 (patch)
tree5770a808b0527eebeff43f16508ea8f03e459b58 /drivers/mmc
parent02a6ec6a24077ffda33b99cb193e8a536b90711d (diff)
parent0e52d987c0b242fe3fe4c8e9732bd663cce0e50b (diff)
Merge branch 'dmaengine' of git://git.linaro.org/people/rmk/linux-arm
Pull ARM DMA engine updates from Russell King: "This looks scary at first glance, but what it is is: - a rework of the sa11x0 DMA engine driver merged during the previous cycle, to extract a common set of helper functions for DMA engine implementations. - conversion of amba-pl08x.c to use these helper functions. - addition of OMAP DMA engine driver (using these helper functions), and conversion of some of the OMAP DMA users to use DMA engine. Nothing in the helper functions is ARM specific, so I hope that other implementations can consolidate some of their code by making use of these helpers. This has been sitting in linux-next most of the merge cycle, and has been tested by several OMAP folk. I've tested it on sa11x0 platforms, and given it my best shot on my broken platforms which have the amba-pl08x controller. The last point is the addition to feature-removal-schedule.txt, which will have a merge conflict. Between myself and TI, we're planning to remove the old TI DMA implementation next year." Fix up trivial add/add conflicts in Documentation/feature-removal-schedule.txt and drivers/dma/{Kconfig,Makefile} * 'dmaengine' of git://git.linaro.org/people/rmk/linux-arm: (53 commits) ARM: 7481/1: OMAP2+: omap2plus_defconfig: enable OMAP DMA engine ARM: 7464/1: mmc: omap_hsmmc: ensure probe returns error if DMA channel request fails Add feature removal of old OMAP private DMA implementation mtd: omap2: remove private DMA API implementation mtd: omap2: add DMA engine support spi: omap2-mcspi: remove private DMA API implementation spi: omap2-mcspi: add DMA engine support ARM: omap: remove mmc platform data dma_mask and initialization mmc: omap: remove private DMA API implementation mmc: omap: add DMA engine support mmc: omap_hsmmc: remove private DMA API implementation mmc: omap_hsmmc: add DMA engine support dmaengine: omap: add support for cyclic DMA dmaengine: omap: add support for setting fi dmaengine: omap: add support for returning residue in tx_state method dmaengine: add OMAP DMA engine driver dmaengine: sa11x0-dma: add cyclic DMA support dmaengine: sa11x0-dma: fix DMA residue support dmaengine: PL08x: ensure all descriptors are freed when channel is released dmaengine: PL08x: get rid of write only pool_ctr and free_txd locking ...
Diffstat (limited to 'drivers/mmc')
-rw-r--r--drivers/mmc/host/omap.c368
-rw-r--r--drivers/mmc/host/omap_hsmmc.c204
2 files changed, 265 insertions, 307 deletions
diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c
index 3e8dcf8d2e05..50e08f03aa65 100644
--- a/drivers/mmc/host/omap.c
+++ b/drivers/mmc/host/omap.c
@@ -17,10 +17,12 @@
17#include <linux/ioport.h> 17#include <linux/ioport.h>
18#include <linux/platform_device.h> 18#include <linux/platform_device.h>
19#include <linux/interrupt.h> 19#include <linux/interrupt.h>
20#include <linux/dmaengine.h>
20#include <linux/dma-mapping.h> 21#include <linux/dma-mapping.h>
21#include <linux/delay.h> 22#include <linux/delay.h>
22#include <linux/spinlock.h> 23#include <linux/spinlock.h>
23#include <linux/timer.h> 24#include <linux/timer.h>
25#include <linux/omap-dma.h>
24#include <linux/mmc/host.h> 26#include <linux/mmc/host.h>
25#include <linux/mmc/card.h> 27#include <linux/mmc/card.h>
26#include <linux/clk.h> 28#include <linux/clk.h>
@@ -128,6 +130,10 @@ struct mmc_omap_host {
128 unsigned char id; /* 16xx chips have 2 MMC blocks */ 130 unsigned char id; /* 16xx chips have 2 MMC blocks */
129 struct clk * iclk; 131 struct clk * iclk;
130 struct clk * fclk; 132 struct clk * fclk;
133 struct dma_chan *dma_rx;
134 u32 dma_rx_burst;
135 struct dma_chan *dma_tx;
136 u32 dma_tx_burst;
131 struct resource *mem_res; 137 struct resource *mem_res;
132 void __iomem *virt_base; 138 void __iomem *virt_base;
133 unsigned int phys_base; 139 unsigned int phys_base;
@@ -153,12 +159,8 @@ struct mmc_omap_host {
153 159
154 unsigned use_dma:1; 160 unsigned use_dma:1;
155 unsigned brs_received:1, dma_done:1; 161 unsigned brs_received:1, dma_done:1;
156 unsigned dma_is_read:1;
157 unsigned dma_in_use:1; 162 unsigned dma_in_use:1;
158 int dma_ch;
159 spinlock_t dma_lock; 163 spinlock_t dma_lock;
160 struct timer_list dma_timer;
161 unsigned dma_len;
162 164
163 struct mmc_omap_slot *slots[OMAP_MMC_MAX_SLOTS]; 165 struct mmc_omap_slot *slots[OMAP_MMC_MAX_SLOTS];
164 struct mmc_omap_slot *current_slot; 166 struct mmc_omap_slot *current_slot;
@@ -406,18 +408,25 @@ mmc_omap_release_dma(struct mmc_omap_host *host, struct mmc_data *data,
406 int abort) 408 int abort)
407{ 409{
408 enum dma_data_direction dma_data_dir; 410 enum dma_data_direction dma_data_dir;
411 struct device *dev = mmc_dev(host->mmc);
412 struct dma_chan *c;
409 413
410 BUG_ON(host->dma_ch < 0); 414 if (data->flags & MMC_DATA_WRITE) {
411 if (data->error)
412 omap_stop_dma(host->dma_ch);
413 /* Release DMA channel lazily */
414 mod_timer(&host->dma_timer, jiffies + HZ);
415 if (data->flags & MMC_DATA_WRITE)
416 dma_data_dir = DMA_TO_DEVICE; 415 dma_data_dir = DMA_TO_DEVICE;
417 else 416 c = host->dma_tx;
417 } else {
418 dma_data_dir = DMA_FROM_DEVICE; 418 dma_data_dir = DMA_FROM_DEVICE;
419 dma_unmap_sg(mmc_dev(host->mmc), data->sg, host->sg_len, 419 c = host->dma_rx;
420 dma_data_dir); 420 }
421 if (c) {
422 if (data->error) {
423 dmaengine_terminate_all(c);
424 /* Claim nothing transferred on error... */
425 data->bytes_xfered = 0;
426 }
427 dev = c->device->dev;
428 }
429 dma_unmap_sg(dev, data->sg, host->sg_len, dma_data_dir);
421} 430}
422 431
423static void mmc_omap_send_stop_work(struct work_struct *work) 432static void mmc_omap_send_stop_work(struct work_struct *work)
@@ -525,16 +534,6 @@ mmc_omap_end_of_data(struct mmc_omap_host *host, struct mmc_data *data)
525} 534}
526 535
527static void 536static void
528mmc_omap_dma_timer(unsigned long data)
529{
530 struct mmc_omap_host *host = (struct mmc_omap_host *) data;
531
532 BUG_ON(host->dma_ch < 0);
533 omap_free_dma(host->dma_ch);
534 host->dma_ch = -1;
535}
536
537static void
538mmc_omap_dma_done(struct mmc_omap_host *host, struct mmc_data *data) 537mmc_omap_dma_done(struct mmc_omap_host *host, struct mmc_data *data)
539{ 538{
540 unsigned long flags; 539 unsigned long flags;
@@ -891,159 +890,15 @@ static void mmc_omap_cover_handler(unsigned long param)
891 jiffies + msecs_to_jiffies(OMAP_MMC_COVER_POLL_DELAY)); 890 jiffies + msecs_to_jiffies(OMAP_MMC_COVER_POLL_DELAY));
892} 891}
893 892
894/* Prepare to transfer the next segment of a scatterlist */ 893static void mmc_omap_dma_callback(void *priv)
895static void
896mmc_omap_prepare_dma(struct mmc_omap_host *host, struct mmc_data *data)
897{ 894{
898 int dma_ch = host->dma_ch; 895 struct mmc_omap_host *host = priv;
899 unsigned long data_addr; 896 struct mmc_data *data = host->data;
900 u16 buf, frame;
901 u32 count;
902 struct scatterlist *sg = &data->sg[host->sg_idx];
903 int src_port = 0;
904 int dst_port = 0;
905 int sync_dev = 0;
906
907 data_addr = host->phys_base + OMAP_MMC_REG(host, DATA);
908 frame = data->blksz;
909 count = sg_dma_len(sg);
910
911 if ((data->blocks == 1) && (count > data->blksz))
912 count = frame;
913
914 host->dma_len = count;
915
916 /* FIFO is 16x2 bytes on 15xx, and 32x2 bytes on 16xx and 24xx.
917 * Use 16 or 32 word frames when the blocksize is at least that large.
918 * Blocksize is usually 512 bytes; but not for some SD reads.
919 */
920 if (cpu_is_omap15xx() && frame > 32)
921 frame = 32;
922 else if (frame > 64)
923 frame = 64;
924 count /= frame;
925 frame >>= 1;
926
927 if (!(data->flags & MMC_DATA_WRITE)) {
928 buf = 0x800f | ((frame - 1) << 8);
929
930 if (cpu_class_is_omap1()) {
931 src_port = OMAP_DMA_PORT_TIPB;
932 dst_port = OMAP_DMA_PORT_EMIFF;
933 }
934 if (cpu_is_omap24xx())
935 sync_dev = OMAP24XX_DMA_MMC1_RX;
936
937 omap_set_dma_src_params(dma_ch, src_port,
938 OMAP_DMA_AMODE_CONSTANT,
939 data_addr, 0, 0);
940 omap_set_dma_dest_params(dma_ch, dst_port,
941 OMAP_DMA_AMODE_POST_INC,
942 sg_dma_address(sg), 0, 0);
943 omap_set_dma_dest_data_pack(dma_ch, 1);
944 omap_set_dma_dest_burst_mode(dma_ch, OMAP_DMA_DATA_BURST_4);
945 } else {
946 buf = 0x0f80 | ((frame - 1) << 0);
947
948 if (cpu_class_is_omap1()) {
949 src_port = OMAP_DMA_PORT_EMIFF;
950 dst_port = OMAP_DMA_PORT_TIPB;
951 }
952 if (cpu_is_omap24xx())
953 sync_dev = OMAP24XX_DMA_MMC1_TX;
954
955 omap_set_dma_dest_params(dma_ch, dst_port,
956 OMAP_DMA_AMODE_CONSTANT,
957 data_addr, 0, 0);
958 omap_set_dma_src_params(dma_ch, src_port,
959 OMAP_DMA_AMODE_POST_INC,
960 sg_dma_address(sg), 0, 0);
961 omap_set_dma_src_data_pack(dma_ch, 1);
962 omap_set_dma_src_burst_mode(dma_ch, OMAP_DMA_DATA_BURST_4);
963 }
964 897
965 /* Max limit for DMA frame count is 0xffff */ 898 /* If we got to the end of DMA, assume everything went well */
966 BUG_ON(count > 0xffff); 899 data->bytes_xfered += data->blocks * data->blksz;
967 900
968 OMAP_MMC_WRITE(host, BUF, buf); 901 mmc_omap_dma_done(host, data);
969 omap_set_dma_transfer_params(dma_ch, OMAP_DMA_DATA_TYPE_S16,
970 frame, count, OMAP_DMA_SYNC_FRAME,
971 sync_dev, 0);
972}
973
974/* A scatterlist segment completed */
975static void mmc_omap_dma_cb(int lch, u16 ch_status, void *data)
976{
977 struct mmc_omap_host *host = (struct mmc_omap_host *) data;
978 struct mmc_data *mmcdat = host->data;
979
980 if (unlikely(host->dma_ch < 0)) {
981 dev_err(mmc_dev(host->mmc),
982 "DMA callback while DMA not enabled\n");
983 return;
984 }
985 /* FIXME: We really should do something to _handle_ the errors */
986 if (ch_status & OMAP1_DMA_TOUT_IRQ) {
987 dev_err(mmc_dev(host->mmc),"DMA timeout\n");
988 return;
989 }
990 if (ch_status & OMAP_DMA_DROP_IRQ) {
991 dev_err(mmc_dev(host->mmc), "DMA sync error\n");
992 return;
993 }
994 if (!(ch_status & OMAP_DMA_BLOCK_IRQ)) {
995 return;
996 }
997 mmcdat->bytes_xfered += host->dma_len;
998 host->sg_idx++;
999 if (host->sg_idx < host->sg_len) {
1000 mmc_omap_prepare_dma(host, host->data);
1001 omap_start_dma(host->dma_ch);
1002 } else
1003 mmc_omap_dma_done(host, host->data);
1004}
1005
1006static int mmc_omap_get_dma_channel(struct mmc_omap_host *host, struct mmc_data *data)
1007{
1008 const char *dma_dev_name;
1009 int sync_dev, dma_ch, is_read, r;
1010
1011 is_read = !(data->flags & MMC_DATA_WRITE);
1012 del_timer_sync(&host->dma_timer);
1013 if (host->dma_ch >= 0) {
1014 if (is_read == host->dma_is_read)
1015 return 0;
1016 omap_free_dma(host->dma_ch);
1017 host->dma_ch = -1;
1018 }
1019
1020 if (is_read) {
1021 if (host->id == 0) {
1022 sync_dev = OMAP_DMA_MMC_RX;
1023 dma_dev_name = "MMC1 read";
1024 } else {
1025 sync_dev = OMAP_DMA_MMC2_RX;
1026 dma_dev_name = "MMC2 read";
1027 }
1028 } else {
1029 if (host->id == 0) {
1030 sync_dev = OMAP_DMA_MMC_TX;
1031 dma_dev_name = "MMC1 write";
1032 } else {
1033 sync_dev = OMAP_DMA_MMC2_TX;
1034 dma_dev_name = "MMC2 write";
1035 }
1036 }
1037 r = omap_request_dma(sync_dev, dma_dev_name, mmc_omap_dma_cb,
1038 host, &dma_ch);
1039 if (r != 0) {
1040 dev_dbg(mmc_dev(host->mmc), "omap_request_dma() failed with %d\n", r);
1041 return r;
1042 }
1043 host->dma_ch = dma_ch;
1044 host->dma_is_read = is_read;
1045
1046 return 0;
1047} 902}
1048 903
1049static inline void set_cmd_timeout(struct mmc_omap_host *host, struct mmc_request *req) 904static inline void set_cmd_timeout(struct mmc_omap_host *host, struct mmc_request *req)
@@ -1118,33 +973,85 @@ mmc_omap_prepare_data(struct mmc_omap_host *host, struct mmc_request *req)
1118 973
1119 host->sg_idx = 0; 974 host->sg_idx = 0;
1120 if (use_dma) { 975 if (use_dma) {
1121 if (mmc_omap_get_dma_channel(host, data) == 0) { 976 enum dma_data_direction dma_data_dir;
1122 enum dma_data_direction dma_data_dir; 977 struct dma_async_tx_descriptor *tx;
1123 978 struct dma_chan *c;
1124 if (data->flags & MMC_DATA_WRITE) 979 u32 burst, *bp;
1125 dma_data_dir = DMA_TO_DEVICE; 980 u16 buf;
1126 else 981
1127 dma_data_dir = DMA_FROM_DEVICE; 982 /*
1128 983 * FIFO is 16x2 bytes on 15xx, and 32x2 bytes on 16xx
1129 host->sg_len = dma_map_sg(mmc_dev(host->mmc), data->sg, 984 * and 24xx. Use 16 or 32 word frames when the
1130 sg_len, dma_data_dir); 985 * blocksize is at least that large. Blocksize is
1131 host->total_bytes_left = 0; 986 * usually 512 bytes; but not for some SD reads.
1132 mmc_omap_prepare_dma(host, req->data); 987 */
1133 host->brs_received = 0; 988 burst = cpu_is_omap15xx() ? 32 : 64;
1134 host->dma_done = 0; 989 if (burst > data->blksz)
1135 host->dma_in_use = 1; 990 burst = data->blksz;
1136 } else 991
1137 use_dma = 0; 992 burst >>= 1;
993
994 if (data->flags & MMC_DATA_WRITE) {
995 c = host->dma_tx;
996 bp = &host->dma_tx_burst;
997 buf = 0x0f80 | (burst - 1) << 0;
998 dma_data_dir = DMA_TO_DEVICE;
999 } else {
1000 c = host->dma_rx;
1001 bp = &host->dma_rx_burst;
1002 buf = 0x800f | (burst - 1) << 8;
1003 dma_data_dir = DMA_FROM_DEVICE;
1004 }
1005
1006 if (!c)
1007 goto use_pio;
1008
1009 /* Only reconfigure if we have a different burst size */
1010 if (*bp != burst) {
1011 struct dma_slave_config cfg;
1012
1013 cfg.src_addr = host->phys_base + OMAP_MMC_REG(host, DATA);
1014 cfg.dst_addr = host->phys_base + OMAP_MMC_REG(host, DATA);
1015 cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
1016 cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
1017 cfg.src_maxburst = burst;
1018 cfg.dst_maxburst = burst;
1019
1020 if (dmaengine_slave_config(c, &cfg))
1021 goto use_pio;
1022
1023 *bp = burst;
1024 }
1025
1026 host->sg_len = dma_map_sg(c->device->dev, data->sg, sg_len,
1027 dma_data_dir);
1028 if (host->sg_len == 0)
1029 goto use_pio;
1030
1031 tx = dmaengine_prep_slave_sg(c, data->sg, host->sg_len,
1032 data->flags & MMC_DATA_WRITE ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM,
1033 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1034 if (!tx)
1035 goto use_pio;
1036
1037 OMAP_MMC_WRITE(host, BUF, buf);
1038
1039 tx->callback = mmc_omap_dma_callback;
1040 tx->callback_param = host;
1041 dmaengine_submit(tx);
1042 host->brs_received = 0;
1043 host->dma_done = 0;
1044 host->dma_in_use = 1;
1045 return;
1138 } 1046 }
1047 use_pio:
1139 1048
1140 /* Revert to PIO? */ 1049 /* Revert to PIO? */
1141 if (!use_dma) { 1050 OMAP_MMC_WRITE(host, BUF, 0x1f1f);
1142 OMAP_MMC_WRITE(host, BUF, 0x1f1f); 1051 host->total_bytes_left = data->blocks * block_size;
1143 host->total_bytes_left = data->blocks * block_size; 1052 host->sg_len = sg_len;
1144 host->sg_len = sg_len; 1053 mmc_omap_sg_to_buf(host);
1145 mmc_omap_sg_to_buf(host); 1054 host->dma_in_use = 0;
1146 host->dma_in_use = 0;
1147 }
1148} 1055}
1149 1056
1150static void mmc_omap_start_request(struct mmc_omap_host *host, 1057static void mmc_omap_start_request(struct mmc_omap_host *host,
@@ -1157,8 +1064,12 @@ static void mmc_omap_start_request(struct mmc_omap_host *host,
1157 /* only touch fifo AFTER the controller readies it */ 1064 /* only touch fifo AFTER the controller readies it */
1158 mmc_omap_prepare_data(host, req); 1065 mmc_omap_prepare_data(host, req);
1159 mmc_omap_start_command(host, req->cmd); 1066 mmc_omap_start_command(host, req->cmd);
1160 if (host->dma_in_use) 1067 if (host->dma_in_use) {
1161 omap_start_dma(host->dma_ch); 1068 struct dma_chan *c = host->data->flags & MMC_DATA_WRITE ?
1069 host->dma_tx : host->dma_rx;
1070
1071 dma_async_issue_pending(c);
1072 }
1162} 1073}
1163 1074
1164static void mmc_omap_request(struct mmc_host *mmc, struct mmc_request *req) 1075static void mmc_omap_request(struct mmc_host *mmc, struct mmc_request *req)
@@ -1400,6 +1311,8 @@ static int __devinit mmc_omap_probe(struct platform_device *pdev)
1400 struct omap_mmc_platform_data *pdata = pdev->dev.platform_data; 1311 struct omap_mmc_platform_data *pdata = pdev->dev.platform_data;
1401 struct mmc_omap_host *host = NULL; 1312 struct mmc_omap_host *host = NULL;
1402 struct resource *res; 1313 struct resource *res;
1314 dma_cap_mask_t mask;
1315 unsigned sig;
1403 int i, ret = 0; 1316 int i, ret = 0;
1404 int irq; 1317 int irq;
1405 1318
@@ -1439,7 +1352,6 @@ static int __devinit mmc_omap_probe(struct platform_device *pdev)
1439 setup_timer(&host->clk_timer, mmc_omap_clk_timer, (unsigned long) host); 1352 setup_timer(&host->clk_timer, mmc_omap_clk_timer, (unsigned long) host);
1440 1353
1441 spin_lock_init(&host->dma_lock); 1354 spin_lock_init(&host->dma_lock);
1442 setup_timer(&host->dma_timer, mmc_omap_dma_timer, (unsigned long) host);
1443 spin_lock_init(&host->slot_lock); 1355 spin_lock_init(&host->slot_lock);
1444 init_waitqueue_head(&host->slot_wq); 1356 init_waitqueue_head(&host->slot_wq);
1445 1357
@@ -1450,11 +1362,7 @@ static int __devinit mmc_omap_probe(struct platform_device *pdev)
1450 host->id = pdev->id; 1362 host->id = pdev->id;
1451 host->mem_res = res; 1363 host->mem_res = res;
1452 host->irq = irq; 1364 host->irq = irq;
1453
1454 host->use_dma = 1; 1365 host->use_dma = 1;
1455 host->dev->dma_mask = &pdata->dma_mask;
1456 host->dma_ch = -1;
1457
1458 host->irq = irq; 1366 host->irq = irq;
1459 host->phys_base = host->mem_res->start; 1367 host->phys_base = host->mem_res->start;
1460 host->virt_base = ioremap(res->start, resource_size(res)); 1368 host->virt_base = ioremap(res->start, resource_size(res));
@@ -1474,9 +1382,48 @@ static int __devinit mmc_omap_probe(struct platform_device *pdev)
1474 goto err_free_iclk; 1382 goto err_free_iclk;
1475 } 1383 }
1476 1384
1385 dma_cap_zero(mask);
1386 dma_cap_set(DMA_SLAVE, mask);
1387
1388 host->dma_tx_burst = -1;
1389 host->dma_rx_burst = -1;
1390
1391 if (cpu_is_omap24xx())
1392 sig = host->id == 0 ? OMAP24XX_DMA_MMC1_TX : OMAP24XX_DMA_MMC2_TX;
1393 else
1394 sig = host->id == 0 ? OMAP_DMA_MMC_TX : OMAP_DMA_MMC2_TX;
1395 host->dma_tx = dma_request_channel(mask, omap_dma_filter_fn, &sig);
1396#if 0
1397 if (!host->dma_tx) {
1398 dev_err(host->dev, "unable to obtain TX DMA engine channel %u\n",
1399 sig);
1400 goto err_dma;
1401 }
1402#else
1403 if (!host->dma_tx)
1404 dev_warn(host->dev, "unable to obtain TX DMA engine channel %u\n",
1405 sig);
1406#endif
1407 if (cpu_is_omap24xx())
1408 sig = host->id == 0 ? OMAP24XX_DMA_MMC1_RX : OMAP24XX_DMA_MMC2_RX;
1409 else
1410 sig = host->id == 0 ? OMAP_DMA_MMC_RX : OMAP_DMA_MMC2_RX;
1411 host->dma_rx = dma_request_channel(mask, omap_dma_filter_fn, &sig);
1412#if 0
1413 if (!host->dma_rx) {
1414 dev_err(host->dev, "unable to obtain RX DMA engine channel %u\n",
1415 sig);
1416 goto err_dma;
1417 }
1418#else
1419 if (!host->dma_rx)
1420 dev_warn(host->dev, "unable to obtain RX DMA engine channel %u\n",
1421 sig);
1422#endif
1423
1477 ret = request_irq(host->irq, mmc_omap_irq, 0, DRIVER_NAME, host); 1424 ret = request_irq(host->irq, mmc_omap_irq, 0, DRIVER_NAME, host);
1478 if (ret) 1425 if (ret)
1479 goto err_free_fclk; 1426 goto err_free_dma;
1480 1427
1481 if (pdata->init != NULL) { 1428 if (pdata->init != NULL) {
1482 ret = pdata->init(&pdev->dev); 1429 ret = pdata->init(&pdev->dev);
@@ -1510,7 +1457,11 @@ err_plat_cleanup:
1510 pdata->cleanup(&pdev->dev); 1457 pdata->cleanup(&pdev->dev);
1511err_free_irq: 1458err_free_irq:
1512 free_irq(host->irq, host); 1459 free_irq(host->irq, host);
1513err_free_fclk: 1460err_free_dma:
1461 if (host->dma_tx)
1462 dma_release_channel(host->dma_tx);
1463 if (host->dma_rx)
1464 dma_release_channel(host->dma_rx);
1514 clk_put(host->fclk); 1465 clk_put(host->fclk);
1515err_free_iclk: 1466err_free_iclk:
1516 clk_disable(host->iclk); 1467 clk_disable(host->iclk);
@@ -1545,6 +1496,11 @@ static int __devexit mmc_omap_remove(struct platform_device *pdev)
1545 clk_disable(host->iclk); 1496 clk_disable(host->iclk);
1546 clk_put(host->iclk); 1497 clk_put(host->iclk);
1547 1498
1499 if (host->dma_tx)
1500 dma_release_channel(host->dma_tx);
1501 if (host->dma_rx)
1502 dma_release_channel(host->dma_rx);
1503
1548 iounmap(host->virt_base); 1504 iounmap(host->virt_base);
1549 release_mem_region(pdev->resource[0].start, 1505 release_mem_region(pdev->resource[0].start,
1550 pdev->resource[0].end - pdev->resource[0].start + 1); 1506 pdev->resource[0].end - pdev->resource[0].start + 1);
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index bc28627af66b..3a09f93cc3b6 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -19,6 +19,7 @@
19#include <linux/init.h> 19#include <linux/init.h>
20#include <linux/kernel.h> 20#include <linux/kernel.h>
21#include <linux/debugfs.h> 21#include <linux/debugfs.h>
22#include <linux/dmaengine.h>
22#include <linux/seq_file.h> 23#include <linux/seq_file.h>
23#include <linux/interrupt.h> 24#include <linux/interrupt.h>
24#include <linux/delay.h> 25#include <linux/delay.h>
@@ -29,6 +30,7 @@
29#include <linux/of.h> 30#include <linux/of.h>
30#include <linux/of_gpio.h> 31#include <linux/of_gpio.h>
31#include <linux/of_device.h> 32#include <linux/of_device.h>
33#include <linux/omap-dma.h>
32#include <linux/mmc/host.h> 34#include <linux/mmc/host.h>
33#include <linux/mmc/core.h> 35#include <linux/mmc/core.h>
34#include <linux/mmc/mmc.h> 36#include <linux/mmc/mmc.h>
@@ -37,7 +39,6 @@
37#include <linux/gpio.h> 39#include <linux/gpio.h>
38#include <linux/regulator/consumer.h> 40#include <linux/regulator/consumer.h>
39#include <linux/pm_runtime.h> 41#include <linux/pm_runtime.h>
40#include <plat/dma.h>
41#include <mach/hardware.h> 42#include <mach/hardware.h>
42#include <plat/board.h> 43#include <plat/board.h>
43#include <plat/mmc.h> 44#include <plat/mmc.h>
@@ -166,7 +167,8 @@ struct omap_hsmmc_host {
166 int suspended; 167 int suspended;
167 int irq; 168 int irq;
168 int use_dma, dma_ch; 169 int use_dma, dma_ch;
169 int dma_line_tx, dma_line_rx; 170 struct dma_chan *tx_chan;
171 struct dma_chan *rx_chan;
170 int slot_id; 172 int slot_id;
171 int response_busy; 173 int response_busy;
172 int context_loss; 174 int context_loss;
@@ -797,6 +799,12 @@ omap_hsmmc_get_dma_dir(struct omap_hsmmc_host *host, struct mmc_data *data)
797 return DMA_FROM_DEVICE; 799 return DMA_FROM_DEVICE;
798} 800}
799 801
802static struct dma_chan *omap_hsmmc_get_dma_chan(struct omap_hsmmc_host *host,
803 struct mmc_data *data)
804{
805 return data->flags & MMC_DATA_WRITE ? host->tx_chan : host->rx_chan;
806}
807
800static void omap_hsmmc_request_done(struct omap_hsmmc_host *host, struct mmc_request *mrq) 808static void omap_hsmmc_request_done(struct omap_hsmmc_host *host, struct mmc_request *mrq)
801{ 809{
802 int dma_ch; 810 int dma_ch;
@@ -889,10 +897,13 @@ static void omap_hsmmc_dma_cleanup(struct omap_hsmmc_host *host, int errno)
889 spin_unlock_irqrestore(&host->irq_lock, flags); 897 spin_unlock_irqrestore(&host->irq_lock, flags);
890 898
891 if (host->use_dma && dma_ch != -1) { 899 if (host->use_dma && dma_ch != -1) {
892 dma_unmap_sg(mmc_dev(host->mmc), host->data->sg, 900 struct dma_chan *chan = omap_hsmmc_get_dma_chan(host, host->data);
893 host->data->sg_len, 901
902 dmaengine_terminate_all(chan);
903 dma_unmap_sg(chan->device->dev,
904 host->data->sg, host->data->sg_len,
894 omap_hsmmc_get_dma_dir(host, host->data)); 905 omap_hsmmc_get_dma_dir(host, host->data));
895 omap_free_dma(dma_ch); 906
896 host->data->host_cookie = 0; 907 host->data->host_cookie = 0;
897 } 908 }
898 host->data = NULL; 909 host->data = NULL;
@@ -1190,90 +1201,29 @@ static irqreturn_t omap_hsmmc_detect(int irq, void *dev_id)
1190 return IRQ_HANDLED; 1201 return IRQ_HANDLED;
1191} 1202}
1192 1203
1193static int omap_hsmmc_get_dma_sync_dev(struct omap_hsmmc_host *host, 1204static void omap_hsmmc_dma_callback(void *param)
1194 struct mmc_data *data)
1195{
1196 int sync_dev;
1197
1198 if (data->flags & MMC_DATA_WRITE)
1199 sync_dev = host->dma_line_tx;
1200 else
1201 sync_dev = host->dma_line_rx;
1202 return sync_dev;
1203}
1204
1205static void omap_hsmmc_config_dma_params(struct omap_hsmmc_host *host,
1206 struct mmc_data *data,
1207 struct scatterlist *sgl)
1208{
1209 int blksz, nblk, dma_ch;
1210
1211 dma_ch = host->dma_ch;
1212 if (data->flags & MMC_DATA_WRITE) {
1213 omap_set_dma_dest_params(dma_ch, 0, OMAP_DMA_AMODE_CONSTANT,
1214 (host->mapbase + OMAP_HSMMC_DATA), 0, 0);
1215 omap_set_dma_src_params(dma_ch, 0, OMAP_DMA_AMODE_POST_INC,
1216 sg_dma_address(sgl), 0, 0);
1217 } else {
1218 omap_set_dma_src_params(dma_ch, 0, OMAP_DMA_AMODE_CONSTANT,
1219 (host->mapbase + OMAP_HSMMC_DATA), 0, 0);
1220 omap_set_dma_dest_params(dma_ch, 0, OMAP_DMA_AMODE_POST_INC,
1221 sg_dma_address(sgl), 0, 0);
1222 }
1223
1224 blksz = host->data->blksz;
1225 nblk = sg_dma_len(sgl) / blksz;
1226
1227 omap_set_dma_transfer_params(dma_ch, OMAP_DMA_DATA_TYPE_S32,
1228 blksz / 4, nblk, OMAP_DMA_SYNC_FRAME,
1229 omap_hsmmc_get_dma_sync_dev(host, data),
1230 !(data->flags & MMC_DATA_WRITE));
1231
1232 omap_start_dma(dma_ch);
1233}
1234
1235/*
1236 * DMA call back function
1237 */
1238static void omap_hsmmc_dma_cb(int lch, u16 ch_status, void *cb_data)
1239{ 1205{
1240 struct omap_hsmmc_host *host = cb_data; 1206 struct omap_hsmmc_host *host = param;
1207 struct dma_chan *chan;
1241 struct mmc_data *data; 1208 struct mmc_data *data;
1242 int dma_ch, req_in_progress; 1209 int req_in_progress;
1243 unsigned long flags;
1244
1245 if (!(ch_status & OMAP_DMA_BLOCK_IRQ)) {
1246 dev_warn(mmc_dev(host->mmc), "unexpected dma status %x\n",
1247 ch_status);
1248 return;
1249 }
1250 1210
1251 spin_lock_irqsave(&host->irq_lock, flags); 1211 spin_lock_irq(&host->irq_lock);
1252 if (host->dma_ch < 0) { 1212 if (host->dma_ch < 0) {
1253 spin_unlock_irqrestore(&host->irq_lock, flags); 1213 spin_unlock_irq(&host->irq_lock);
1254 return; 1214 return;
1255 } 1215 }
1256 1216
1257 data = host->mrq->data; 1217 data = host->mrq->data;
1258 host->dma_sg_idx++; 1218 chan = omap_hsmmc_get_dma_chan(host, data);
1259 if (host->dma_sg_idx < host->dma_len) {
1260 /* Fire up the next transfer. */
1261 omap_hsmmc_config_dma_params(host, data,
1262 data->sg + host->dma_sg_idx);
1263 spin_unlock_irqrestore(&host->irq_lock, flags);
1264 return;
1265 }
1266
1267 if (!data->host_cookie) 1219 if (!data->host_cookie)
1268 dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len, 1220 dma_unmap_sg(chan->device->dev,
1221 data->sg, data->sg_len,
1269 omap_hsmmc_get_dma_dir(host, data)); 1222 omap_hsmmc_get_dma_dir(host, data));
1270 1223
1271 req_in_progress = host->req_in_progress; 1224 req_in_progress = host->req_in_progress;
1272 dma_ch = host->dma_ch;
1273 host->dma_ch = -1; 1225 host->dma_ch = -1;
1274 spin_unlock_irqrestore(&host->irq_lock, flags); 1226 spin_unlock_irq(&host->irq_lock);
1275
1276 omap_free_dma(dma_ch);
1277 1227
1278 /* If DMA has finished after TC, complete the request */ 1228 /* If DMA has finished after TC, complete the request */
1279 if (!req_in_progress) { 1229 if (!req_in_progress) {
@@ -1286,7 +1236,8 @@ static void omap_hsmmc_dma_cb(int lch, u16 ch_status, void *cb_data)
1286 1236
1287static int omap_hsmmc_pre_dma_transfer(struct omap_hsmmc_host *host, 1237static int omap_hsmmc_pre_dma_transfer(struct omap_hsmmc_host *host,
1288 struct mmc_data *data, 1238 struct mmc_data *data,
1289 struct omap_hsmmc_next *next) 1239 struct omap_hsmmc_next *next,
1240 struct dma_chan *chan)
1290{ 1241{
1291 int dma_len; 1242 int dma_len;
1292 1243
@@ -1301,8 +1252,7 @@ static int omap_hsmmc_pre_dma_transfer(struct omap_hsmmc_host *host,
1301 /* Check if next job is already prepared */ 1252 /* Check if next job is already prepared */
1302 if (next || 1253 if (next ||
1303 (!next && data->host_cookie != host->next_data.cookie)) { 1254 (!next && data->host_cookie != host->next_data.cookie)) {
1304 dma_len = dma_map_sg(mmc_dev(host->mmc), data->sg, 1255 dma_len = dma_map_sg(chan->device->dev, data->sg, data->sg_len,
1305 data->sg_len,
1306 omap_hsmmc_get_dma_dir(host, data)); 1256 omap_hsmmc_get_dma_dir(host, data));
1307 1257
1308 } else { 1258 } else {
@@ -1329,8 +1279,11 @@ static int omap_hsmmc_pre_dma_transfer(struct omap_hsmmc_host *host,
1329static int omap_hsmmc_start_dma_transfer(struct omap_hsmmc_host *host, 1279static int omap_hsmmc_start_dma_transfer(struct omap_hsmmc_host *host,
1330 struct mmc_request *req) 1280 struct mmc_request *req)
1331{ 1281{
1332 int dma_ch = 0, ret = 0, i; 1282 struct dma_slave_config cfg;
1283 struct dma_async_tx_descriptor *tx;
1284 int ret = 0, i;
1333 struct mmc_data *data = req->data; 1285 struct mmc_data *data = req->data;
1286 struct dma_chan *chan;
1334 1287
1335 /* Sanity check: all the SG entries must be aligned by block size. */ 1288 /* Sanity check: all the SG entries must be aligned by block size. */
1336 for (i = 0; i < data->sg_len; i++) { 1289 for (i = 0; i < data->sg_len; i++) {
@@ -1348,22 +1301,41 @@ static int omap_hsmmc_start_dma_transfer(struct omap_hsmmc_host *host,
1348 1301
1349 BUG_ON(host->dma_ch != -1); 1302 BUG_ON(host->dma_ch != -1);
1350 1303
1351 ret = omap_request_dma(omap_hsmmc_get_dma_sync_dev(host, data), 1304 chan = omap_hsmmc_get_dma_chan(host, data);
1352 "MMC/SD", omap_hsmmc_dma_cb, host, &dma_ch); 1305
1353 if (ret != 0) { 1306 cfg.src_addr = host->mapbase + OMAP_HSMMC_DATA;
1354 dev_err(mmc_dev(host->mmc), 1307 cfg.dst_addr = host->mapbase + OMAP_HSMMC_DATA;
1355 "%s: omap_request_dma() failed with %d\n", 1308 cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
1356 mmc_hostname(host->mmc), ret); 1309 cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
1310 cfg.src_maxburst = data->blksz / 4;
1311 cfg.dst_maxburst = data->blksz / 4;
1312
1313 ret = dmaengine_slave_config(chan, &cfg);
1314 if (ret)
1357 return ret; 1315 return ret;
1358 } 1316
1359 ret = omap_hsmmc_pre_dma_transfer(host, data, NULL); 1317 ret = omap_hsmmc_pre_dma_transfer(host, data, NULL, chan);
1360 if (ret) 1318 if (ret)
1361 return ret; 1319 return ret;
1362 1320
1363 host->dma_ch = dma_ch; 1321 tx = dmaengine_prep_slave_sg(chan, data->sg, data->sg_len,
1364 host->dma_sg_idx = 0; 1322 data->flags & MMC_DATA_WRITE ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM,
1323 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1324 if (!tx) {
1325 dev_err(mmc_dev(host->mmc), "prep_slave_sg() failed\n");
1326 /* FIXME: cleanup */
1327 return -1;
1328 }
1329
1330 tx->callback = omap_hsmmc_dma_callback;
1331 tx->callback_param = host;
1365 1332
1366 omap_hsmmc_config_dma_params(host, data, data->sg); 1333 /* Does not fail */
1334 dmaengine_submit(tx);
1335
1336 host->dma_ch = 1;
1337
1338 dma_async_issue_pending(chan);
1367 1339
1368 return 0; 1340 return 0;
1369} 1341}
@@ -1445,11 +1417,11 @@ static void omap_hsmmc_post_req(struct mmc_host *mmc, struct mmc_request *mrq,
1445 struct omap_hsmmc_host *host = mmc_priv(mmc); 1417 struct omap_hsmmc_host *host = mmc_priv(mmc);
1446 struct mmc_data *data = mrq->data; 1418 struct mmc_data *data = mrq->data;
1447 1419
1448 if (host->use_dma) { 1420 if (host->use_dma && data->host_cookie) {
1449 if (data->host_cookie) 1421 struct dma_chan *c = omap_hsmmc_get_dma_chan(host, data);
1450 dma_unmap_sg(mmc_dev(host->mmc), data->sg, 1422
1451 data->sg_len, 1423 dma_unmap_sg(c->device->dev, data->sg, data->sg_len,
1452 omap_hsmmc_get_dma_dir(host, data)); 1424 omap_hsmmc_get_dma_dir(host, data));
1453 data->host_cookie = 0; 1425 data->host_cookie = 0;
1454 } 1426 }
1455} 1427}
@@ -1464,10 +1436,13 @@ static void omap_hsmmc_pre_req(struct mmc_host *mmc, struct mmc_request *mrq,
1464 return ; 1436 return ;
1465 } 1437 }
1466 1438
1467 if (host->use_dma) 1439 if (host->use_dma) {
1440 struct dma_chan *c = omap_hsmmc_get_dma_chan(host, mrq->data);
1441
1468 if (omap_hsmmc_pre_dma_transfer(host, mrq->data, 1442 if (omap_hsmmc_pre_dma_transfer(host, mrq->data,
1469 &host->next_data)) 1443 &host->next_data, c))
1470 mrq->data->host_cookie = 0; 1444 mrq->data->host_cookie = 0;
1445 }
1471} 1446}
1472 1447
1473/* 1448/*
@@ -1800,6 +1775,8 @@ static int __devinit omap_hsmmc_probe(struct platform_device *pdev)
1800 struct resource *res; 1775 struct resource *res;
1801 int ret, irq; 1776 int ret, irq;
1802 const struct of_device_id *match; 1777 const struct of_device_id *match;
1778 dma_cap_mask_t mask;
1779 unsigned tx_req, rx_req;
1803 1780
1804 match = of_match_device(of_match_ptr(omap_mmc_of_match), &pdev->dev); 1781 match = of_match_device(of_match_ptr(omap_mmc_of_match), &pdev->dev);
1805 if (match) { 1782 if (match) {
@@ -1844,7 +1821,6 @@ static int __devinit omap_hsmmc_probe(struct platform_device *pdev)
1844 host->pdata = pdata; 1821 host->pdata = pdata;
1845 host->dev = &pdev->dev; 1822 host->dev = &pdev->dev;
1846 host->use_dma = 1; 1823 host->use_dma = 1;
1847 host->dev->dma_mask = &pdata->dma_mask;
1848 host->dma_ch = -1; 1824 host->dma_ch = -1;
1849 host->irq = irq; 1825 host->irq = irq;
1850 host->slot_id = 0; 1826 host->slot_id = 0;
@@ -1934,7 +1910,7 @@ static int __devinit omap_hsmmc_probe(struct platform_device *pdev)
1934 ret = -ENXIO; 1910 ret = -ENXIO;
1935 goto err_irq; 1911 goto err_irq;
1936 } 1912 }
1937 host->dma_line_tx = res->start; 1913 tx_req = res->start;
1938 1914
1939 res = platform_get_resource_byname(pdev, IORESOURCE_DMA, "rx"); 1915 res = platform_get_resource_byname(pdev, IORESOURCE_DMA, "rx");
1940 if (!res) { 1916 if (!res) {
@@ -1942,7 +1918,24 @@ static int __devinit omap_hsmmc_probe(struct platform_device *pdev)
1942 ret = -ENXIO; 1918 ret = -ENXIO;
1943 goto err_irq; 1919 goto err_irq;
1944 } 1920 }
1945 host->dma_line_rx = res->start; 1921 rx_req = res->start;
1922
1923 dma_cap_zero(mask);
1924 dma_cap_set(DMA_SLAVE, mask);
1925
1926 host->rx_chan = dma_request_channel(mask, omap_dma_filter_fn, &rx_req);
1927 if (!host->rx_chan) {
1928 dev_err(mmc_dev(host->mmc), "unable to obtain RX DMA engine channel %u\n", rx_req);
1929 ret = -ENXIO;
1930 goto err_irq;
1931 }
1932
1933 host->tx_chan = dma_request_channel(mask, omap_dma_filter_fn, &tx_req);
1934 if (!host->tx_chan) {
1935 dev_err(mmc_dev(host->mmc), "unable to obtain TX DMA engine channel %u\n", tx_req);
1936 ret = -ENXIO;
1937 goto err_irq;
1938 }
1946 1939
1947 /* Request IRQ for MMC operations */ 1940 /* Request IRQ for MMC operations */
1948 ret = request_irq(host->irq, omap_hsmmc_irq, 0, 1941 ret = request_irq(host->irq, omap_hsmmc_irq, 0,
@@ -2021,6 +2014,10 @@ err_reg:
2021err_irq_cd_init: 2014err_irq_cd_init:
2022 free_irq(host->irq, host); 2015 free_irq(host->irq, host);
2023err_irq: 2016err_irq:
2017 if (host->tx_chan)
2018 dma_release_channel(host->tx_chan);
2019 if (host->rx_chan)
2020 dma_release_channel(host->rx_chan);
2024 pm_runtime_put_sync(host->dev); 2021 pm_runtime_put_sync(host->dev);
2025 pm_runtime_disable(host->dev); 2022 pm_runtime_disable(host->dev);
2026 clk_put(host->fclk); 2023 clk_put(host->fclk);
@@ -2056,6 +2053,11 @@ static int __devexit omap_hsmmc_remove(struct platform_device *pdev)
2056 if (mmc_slot(host).card_detect_irq) 2053 if (mmc_slot(host).card_detect_irq)
2057 free_irq(mmc_slot(host).card_detect_irq, host); 2054 free_irq(mmc_slot(host).card_detect_irq, host);
2058 2055
2056 if (host->tx_chan)
2057 dma_release_channel(host->tx_chan);
2058 if (host->rx_chan)
2059 dma_release_channel(host->rx_chan);
2060
2059 pm_runtime_put_sync(host->dev); 2061 pm_runtime_put_sync(host->dev);
2060 pm_runtime_disable(host->dev); 2062 pm_runtime_disable(host->dev);
2061 clk_put(host->fclk); 2063 clk_put(host->fclk);