aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mmc/host/omap_hsmmc.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/mmc/host/omap_hsmmc.c')
-rw-r--r--drivers/mmc/host/omap_hsmmc.c192
1 files changed, 165 insertions, 27 deletions
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index 389a3eedfc24..effe328c1c14 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -19,6 +19,7 @@
19#include <linux/init.h> 19#include <linux/init.h>
20#include <linux/kernel.h> 20#include <linux/kernel.h>
21#include <linux/debugfs.h> 21#include <linux/debugfs.h>
22#include <linux/dmaengine.h>
22#include <linux/seq_file.h> 23#include <linux/seq_file.h>
23#include <linux/interrupt.h> 24#include <linux/interrupt.h>
24#include <linux/delay.h> 25#include <linux/delay.h>
@@ -165,7 +166,9 @@ struct omap_hsmmc_host {
165 u32 bytesleft; 166 u32 bytesleft;
166 int suspended; 167 int suspended;
167 int irq; 168 int irq;
168 int use_dma, dma_ch; 169 int use_dma, dma_ch, dma2;
170 struct dma_chan *tx_chan;
171 struct dma_chan *rx_chan;
169 int dma_line_tx, dma_line_rx; 172 int dma_line_tx, dma_line_rx;
170 int slot_id; 173 int slot_id;
171 int response_busy; 174 int response_busy;
@@ -797,19 +800,26 @@ omap_hsmmc_get_dma_dir(struct omap_hsmmc_host *host, struct mmc_data *data)
797 return DMA_FROM_DEVICE; 800 return DMA_FROM_DEVICE;
798} 801}
799 802
803static struct dma_chan *omap_hsmmc_get_dma_chan(struct omap_hsmmc_host *host,
804 struct mmc_data *data)
805{
806 return data->flags & MMC_DATA_WRITE ? host->tx_chan : host->rx_chan;
807}
808
800static void omap_hsmmc_request_done(struct omap_hsmmc_host *host, struct mmc_request *mrq) 809static void omap_hsmmc_request_done(struct omap_hsmmc_host *host, struct mmc_request *mrq)
801{ 810{
802 int dma_ch; 811 int dma_ch, dma2;
803 unsigned long flags; 812 unsigned long flags;
804 813
805 spin_lock_irqsave(&host->irq_lock, flags); 814 spin_lock_irqsave(&host->irq_lock, flags);
806 host->req_in_progress = 0; 815 host->req_in_progress = 0;
807 dma_ch = host->dma_ch; 816 dma_ch = host->dma_ch;
817 dma2 = host->dma2;
808 spin_unlock_irqrestore(&host->irq_lock, flags); 818 spin_unlock_irqrestore(&host->irq_lock, flags);
809 819
810 omap_hsmmc_disable_irq(host); 820 omap_hsmmc_disable_irq(host);
811 /* Do not complete the request if DMA is still in progress */ 821 /* Do not complete the request if DMA is still in progress */
812 if (mrq->data && host->use_dma && dma_ch != -1) 822 if (mrq->data && host->use_dma && (dma_ch != -1 || dma2 != -1))
813 return; 823 return;
814 host->mrq = NULL; 824 host->mrq = NULL;
815 mmc_request_done(host->mmc, mrq); 825 mmc_request_done(host->mmc, mrq);
@@ -878,7 +888,7 @@ omap_hsmmc_cmd_done(struct omap_hsmmc_host *host, struct mmc_command *cmd)
878 */ 888 */
879static void omap_hsmmc_dma_cleanup(struct omap_hsmmc_host *host, int errno) 889static void omap_hsmmc_dma_cleanup(struct omap_hsmmc_host *host, int errno)
880{ 890{
881 int dma_ch; 891 int dma_ch, dma2;
882 unsigned long flags; 892 unsigned long flags;
883 893
884 host->data->error = errno; 894 host->data->error = errno;
@@ -886,8 +896,20 @@ static void omap_hsmmc_dma_cleanup(struct omap_hsmmc_host *host, int errno)
886 spin_lock_irqsave(&host->irq_lock, flags); 896 spin_lock_irqsave(&host->irq_lock, flags);
887 dma_ch = host->dma_ch; 897 dma_ch = host->dma_ch;
888 host->dma_ch = -1; 898 host->dma_ch = -1;
899 dma2 = host->dma2;
900 host->dma2 = -1;
889 spin_unlock_irqrestore(&host->irq_lock, flags); 901 spin_unlock_irqrestore(&host->irq_lock, flags);
890 902
903 if (host->use_dma && dma2 != -1) {
904 struct dma_chan *chan = omap_hsmmc_get_dma_chan(host, host->data);
905
906 dmaengine_terminate_all(chan);
907 dma_unmap_sg(chan->device->dev,
908 host->data->sg, host->data->sg_len,
909 omap_hsmmc_get_dma_dir(host, host->data));
910
911 host->data->host_cookie = 0;
912 }
891 if (host->use_dma && dma_ch != -1) { 913 if (host->use_dma && dma_ch != -1) {
892 dma_unmap_sg(mmc_dev(host->mmc), host->data->sg, 914 dma_unmap_sg(mmc_dev(host->mmc), host->data->sg,
893 host->data->sg_len, 915 host->data->sg_len,
@@ -1284,9 +1306,43 @@ static void omap_hsmmc_dma_cb(int lch, u16 ch_status, void *cb_data)
1284 } 1306 }
1285} 1307}
1286 1308
1309static void omap_hsmmc_dma_callback(void *param)
1310{
1311 struct omap_hsmmc_host *host = param;
1312 struct dma_chan *chan;
1313 struct mmc_data *data;
1314 int req_in_progress;
1315
1316 spin_lock_irq(&host->irq_lock);
1317 if (host->dma2 < 0) {
1318 spin_unlock_irq(&host->irq_lock);
1319 return;
1320 }
1321
1322 data = host->mrq->data;
1323 chan = omap_hsmmc_get_dma_chan(host, data);
1324 if (!data->host_cookie)
1325 dma_unmap_sg(chan->device->dev,
1326 data->sg, data->sg_len,
1327 omap_hsmmc_get_dma_dir(host, data));
1328
1329 req_in_progress = host->req_in_progress;
1330 host->dma2 = -1;
1331 spin_unlock_irq(&host->irq_lock);
1332
1333 /* If DMA has finished after TC, complete the request */
1334 if (!req_in_progress) {
1335 struct mmc_request *mrq = host->mrq;
1336
1337 host->mrq = NULL;
1338 mmc_request_done(host->mmc, mrq);
1339 }
1340}
1341
1287static int omap_hsmmc_pre_dma_transfer(struct omap_hsmmc_host *host, 1342static int omap_hsmmc_pre_dma_transfer(struct omap_hsmmc_host *host,
1288 struct mmc_data *data, 1343 struct mmc_data *data,
1289 struct omap_hsmmc_next *next) 1344 struct omap_hsmmc_next *next,
1345 struct device *dev)
1290{ 1346{
1291 int dma_len; 1347 int dma_len;
1292 1348
@@ -1301,8 +1357,7 @@ static int omap_hsmmc_pre_dma_transfer(struct omap_hsmmc_host *host,
1301 /* Check if next job is already prepared */ 1357 /* Check if next job is already prepared */
1302 if (next || 1358 if (next ||
1303 (!next && data->host_cookie != host->next_data.cookie)) { 1359 (!next && data->host_cookie != host->next_data.cookie)) {
1304 dma_len = dma_map_sg(mmc_dev(host->mmc), data->sg, 1360 dma_len = dma_map_sg(dev, data->sg, data->sg_len,
1305 data->sg_len,
1306 omap_hsmmc_get_dma_dir(host, data)); 1361 omap_hsmmc_get_dma_dir(host, data));
1307 1362
1308 } else { 1363 } else {
@@ -1331,6 +1386,7 @@ static int omap_hsmmc_start_dma_transfer(struct omap_hsmmc_host *host,
1331{ 1386{
1332 int dma_ch = 0, ret = 0, i; 1387 int dma_ch = 0, ret = 0, i;
1333 struct mmc_data *data = req->data; 1388 struct mmc_data *data = req->data;
1389 struct dma_chan *chan;
1334 1390
1335 /* Sanity check: all the SG entries must be aligned by block size. */ 1391 /* Sanity check: all the SG entries must be aligned by block size. */
1336 for (i = 0; i < data->sg_len; i++) { 1392 for (i = 0; i < data->sg_len; i++) {
@@ -1346,24 +1402,66 @@ static int omap_hsmmc_start_dma_transfer(struct omap_hsmmc_host *host,
1346 */ 1402 */
1347 return -EINVAL; 1403 return -EINVAL;
1348 1404
1349 BUG_ON(host->dma_ch != -1); 1405 BUG_ON(host->dma_ch != -1 || host->dma2 != -1);
1350 1406
1351 ret = omap_request_dma(omap_hsmmc_get_dma_sync_dev(host, data), 1407 chan = omap_hsmmc_get_dma_chan(host, data);
1352 "MMC/SD", omap_hsmmc_dma_cb, host, &dma_ch); 1408 if (!chan) {
1353 if (ret != 0) { 1409 ret = omap_request_dma(omap_hsmmc_get_dma_sync_dev(host, data),
1354 dev_err(mmc_dev(host->mmc), 1410 "MMC/SD", omap_hsmmc_dma_cb, host, &dma_ch);
1355 "%s: omap_request_dma() failed with %d\n", 1411 if (ret != 0) {
1356 mmc_hostname(host->mmc), ret); 1412 dev_err(mmc_dev(host->mmc),
1357 return ret; 1413 "%s: omap_request_dma() failed with %d\n",
1358 } 1414 mmc_hostname(host->mmc), ret);
1359 ret = omap_hsmmc_pre_dma_transfer(host, data, NULL); 1415 return ret;
1360 if (ret) 1416 }
1361 return ret; 1417 ret = omap_hsmmc_pre_dma_transfer(host, data, NULL,
1418 mmc_dev(host->mmc));
1419 if (ret)
1420 return ret;
1421
1422 host->dma_ch = dma_ch;
1423 host->dma_sg_idx = 0;
1424
1425 omap_hsmmc_config_dma_params(host, data, data->sg);
1426 } else {
1427 struct dma_slave_config cfg;
1428 struct dma_async_tx_descriptor *tx;
1429
1430 cfg.src_addr = host->mapbase + OMAP_HSMMC_DATA;
1431 cfg.dst_addr = host->mapbase + OMAP_HSMMC_DATA;
1432 cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
1433 cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
1434 cfg.src_maxburst = data->blksz / 4;
1435 cfg.dst_maxburst = data->blksz / 4;
1436
1437 ret = dmaengine_slave_config(chan, &cfg);
1438 if (ret)
1439 return ret;
1440
1441 ret = omap_hsmmc_pre_dma_transfer(host, data, NULL,
1442 chan->device->dev);
1443 if (ret)
1444 return ret;
1445
1446 tx = dmaengine_prep_slave_sg(chan, data->sg, data->sg_len,
1447 data->flags & MMC_DATA_WRITE ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM,
1448 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1449 if (!tx) {
1450 dev_err(mmc_dev(host->mmc), "prep_slave_sg() failed\n");
1451 /* FIXME: cleanup */
1452 return -1;
1453 }
1362 1454
1363 host->dma_ch = dma_ch; 1455 tx->callback = omap_hsmmc_dma_callback;
1364 host->dma_sg_idx = 0; 1456 tx->callback_param = host;
1365 1457
1366 omap_hsmmc_config_dma_params(host, data, data->sg); 1458 /* Does not fail */
1459 dmaengine_submit(tx);
1460
1461 host->dma2 = 1;
1462
1463 dma_async_issue_pending(chan);
1464 }
1367 1465
1368 return 0; 1466 return 0;
1369} 1467}
@@ -1446,9 +1544,12 @@ static void omap_hsmmc_post_req(struct mmc_host *mmc, struct mmc_request *mrq,
1446 struct mmc_data *data = mrq->data; 1544 struct mmc_data *data = mrq->data;
1447 1545
1448 if (host->use_dma) { 1546 if (host->use_dma) {
1547 struct dma_chan *c = omap_hsmmc_get_dma_chan(host, data);
1548 struct device *dev = c ? c->device->dev : mmc_dev(mmc);
1549
1449 if (data->host_cookie) 1550 if (data->host_cookie)
1450 dma_unmap_sg(mmc_dev(host->mmc), data->sg, 1551 dma_unmap_sg(dev,
1451 data->sg_len, 1552 data->sg, data->sg_len,
1452 omap_hsmmc_get_dma_dir(host, data)); 1553 omap_hsmmc_get_dma_dir(host, data));
1453 data->host_cookie = 0; 1554 data->host_cookie = 0;
1454 } 1555 }
@@ -1464,10 +1565,14 @@ static void omap_hsmmc_pre_req(struct mmc_host *mmc, struct mmc_request *mrq,
1464 return ; 1565 return ;
1465 } 1566 }
1466 1567
1467 if (host->use_dma) 1568 if (host->use_dma) {
1569 struct dma_chan *c = omap_hsmmc_get_dma_chan(host, mrq->data);
1570 struct device *dev = c ? c->device->dev : mmc_dev(mmc);
1571
1468 if (omap_hsmmc_pre_dma_transfer(host, mrq->data, 1572 if (omap_hsmmc_pre_dma_transfer(host, mrq->data,
1469 &host->next_data)) 1573 &host->next_data, dev))
1470 mrq->data->host_cookie = 0; 1574 mrq->data->host_cookie = 0;
1575 }
1471} 1576}
1472 1577
1473/* 1578/*
@@ -1479,7 +1584,7 @@ static void omap_hsmmc_request(struct mmc_host *mmc, struct mmc_request *req)
1479 int err; 1584 int err;
1480 1585
1481 BUG_ON(host->req_in_progress); 1586 BUG_ON(host->req_in_progress);
1482 BUG_ON(host->dma_ch != -1); 1587 BUG_ON(host->dma_ch != -1 || host->dma2 != -1);
1483 if (host->protect_card) { 1588 if (host->protect_card) {
1484 if (host->reqs_blocked < 3) { 1589 if (host->reqs_blocked < 3) {
1485 /* 1590 /*
@@ -1846,6 +1951,7 @@ static int __devinit omap_hsmmc_probe(struct platform_device *pdev)
1846 host->use_dma = 1; 1951 host->use_dma = 1;
1847 host->dev->dma_mask = &pdata->dma_mask; 1952 host->dev->dma_mask = &pdata->dma_mask;
1848 host->dma_ch = -1; 1953 host->dma_ch = -1;
1954 host->dma2 = -1;
1849 host->irq = irq; 1955 host->irq = irq;
1850 host->slot_id = 0; 1956 host->slot_id = 0;
1851 host->mapbase = res->start + pdata->reg_offset; 1957 host->mapbase = res->start + pdata->reg_offset;
@@ -1942,6 +2048,29 @@ static int __devinit omap_hsmmc_probe(struct platform_device *pdev)
1942 } 2048 }
1943 host->dma_line_rx = res->start; 2049 host->dma_line_rx = res->start;
1944 2050
2051 {
2052 dma_cap_mask_t mask;
2053 unsigned sig;
2054 extern bool omap_dma_filter_fn(struct dma_chan *chan, void *param);
2055
2056 dma_cap_zero(mask);
2057 dma_cap_set(DMA_SLAVE, mask);
2058#if 1
2059 sig = host->dma_line_rx;
2060 host->rx_chan = dma_request_channel(mask, omap_dma_filter_fn, &sig);
2061 if (!host->rx_chan) {
2062 dev_warn(mmc_dev(host->mmc), "unable to obtain RX DMA engine channel %u\n", sig);
2063 }
2064#endif
2065#if 1
2066 sig = host->dma_line_tx;
2067 host->tx_chan = dma_request_channel(mask, omap_dma_filter_fn, &sig);
2068 if (!host->tx_chan) {
2069 dev_warn(mmc_dev(host->mmc), "unable to obtain TX DMA engine channel %u\n", sig);
2070 }
2071#endif
2072 }
2073
1945 /* Request IRQ for MMC operations */ 2074 /* Request IRQ for MMC operations */
1946 ret = request_irq(host->irq, omap_hsmmc_irq, 0, 2075 ret = request_irq(host->irq, omap_hsmmc_irq, 0,
1947 mmc_hostname(mmc), host); 2076 mmc_hostname(mmc), host);
@@ -2019,6 +2148,10 @@ err_reg:
2019err_irq_cd_init: 2148err_irq_cd_init:
2020 free_irq(host->irq, host); 2149 free_irq(host->irq, host);
2021err_irq: 2150err_irq:
2151 if (host->tx_chan)
2152 dma_release_channel(host->tx_chan);
2153 if (host->rx_chan)
2154 dma_release_channel(host->rx_chan);
2022 pm_runtime_put_sync(host->dev); 2155 pm_runtime_put_sync(host->dev);
2023 pm_runtime_disable(host->dev); 2156 pm_runtime_disable(host->dev);
2024 clk_put(host->fclk); 2157 clk_put(host->fclk);
@@ -2054,6 +2187,11 @@ static int __devexit omap_hsmmc_remove(struct platform_device *pdev)
2054 if (mmc_slot(host).card_detect_irq) 2187 if (mmc_slot(host).card_detect_irq)
2055 free_irq(mmc_slot(host).card_detect_irq, host); 2188 free_irq(mmc_slot(host).card_detect_irq, host);
2056 2189
2190 if (host->tx_chan)
2191 dma_release_channel(host->tx_chan);
2192 if (host->rx_chan)
2193 dma_release_channel(host->rx_chan);
2194
2057 pm_runtime_put_sync(host->dev); 2195 pm_runtime_put_sync(host->dev);
2058 pm_runtime_disable(host->dev); 2196 pm_runtime_disable(host->dev);
2059 clk_put(host->fclk); 2197 clk_put(host->fclk);