aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorRussell King <rmk+kernel@arm.linux.org.uk>2012-04-13 07:27:37 -0400
committerRussell King <rmk+kernel@arm.linux.org.uk>2012-07-31 07:06:22 -0400
commit26b88520b80695a6fa5fd95b5d97c03f4daf87e0 (patch)
tree6ac37cd764314fd7c0f84f401648f1a473b3edc3 /drivers
parentc5c98927d74cd6853579e1a5b56cd6eb0d4885f0 (diff)
mmc: omap_hsmmc: remove private DMA API implementation
Remove the private DMA API implementation from omap_hsmmc, making it use entirely the DMA engine API. Tested-by: Tony Lindgren <tony@atomide.com> Tested-by: Venkatraman S <svenkatr@ti.com> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/mmc/host/omap_hsmmc.c263
1 files changed, 63 insertions, 200 deletions
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index effe328c1c14..2b2c98773f15 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -38,7 +38,6 @@
38#include <linux/gpio.h> 38#include <linux/gpio.h>
39#include <linux/regulator/consumer.h> 39#include <linux/regulator/consumer.h>
40#include <linux/pm_runtime.h> 40#include <linux/pm_runtime.h>
41#include <plat/dma.h>
42#include <mach/hardware.h> 41#include <mach/hardware.h>
43#include <plat/board.h> 42#include <plat/board.h>
44#include <plat/mmc.h> 43#include <plat/mmc.h>
@@ -166,10 +165,9 @@ struct omap_hsmmc_host {
166 u32 bytesleft; 165 u32 bytesleft;
167 int suspended; 166 int suspended;
168 int irq; 167 int irq;
169 int use_dma, dma_ch, dma2; 168 int use_dma, dma_ch;
170 struct dma_chan *tx_chan; 169 struct dma_chan *tx_chan;
171 struct dma_chan *rx_chan; 170 struct dma_chan *rx_chan;
172 int dma_line_tx, dma_line_rx;
173 int slot_id; 171 int slot_id;
174 int response_busy; 172 int response_busy;
175 int context_loss; 173 int context_loss;
@@ -808,18 +806,17 @@ static struct dma_chan *omap_hsmmc_get_dma_chan(struct omap_hsmmc_host *host,
808 806
809static void omap_hsmmc_request_done(struct omap_hsmmc_host *host, struct mmc_request *mrq) 807static void omap_hsmmc_request_done(struct omap_hsmmc_host *host, struct mmc_request *mrq)
810{ 808{
811 int dma_ch, dma2; 809 int dma_ch;
812 unsigned long flags; 810 unsigned long flags;
813 811
814 spin_lock_irqsave(&host->irq_lock, flags); 812 spin_lock_irqsave(&host->irq_lock, flags);
815 host->req_in_progress = 0; 813 host->req_in_progress = 0;
816 dma_ch = host->dma_ch; 814 dma_ch = host->dma_ch;
817 dma2 = host->dma2;
818 spin_unlock_irqrestore(&host->irq_lock, flags); 815 spin_unlock_irqrestore(&host->irq_lock, flags);
819 816
820 omap_hsmmc_disable_irq(host); 817 omap_hsmmc_disable_irq(host);
821 /* Do not complete the request if DMA is still in progress */ 818 /* Do not complete the request if DMA is still in progress */
822 if (mrq->data && host->use_dma && (dma_ch != -1 || dma2 != -1)) 819 if (mrq->data && host->use_dma && dma_ch != -1)
823 return; 820 return;
824 host->mrq = NULL; 821 host->mrq = NULL;
825 mmc_request_done(host->mmc, mrq); 822 mmc_request_done(host->mmc, mrq);
@@ -888,7 +885,7 @@ omap_hsmmc_cmd_done(struct omap_hsmmc_host *host, struct mmc_command *cmd)
888 */ 885 */
889static void omap_hsmmc_dma_cleanup(struct omap_hsmmc_host *host, int errno) 886static void omap_hsmmc_dma_cleanup(struct omap_hsmmc_host *host, int errno)
890{ 887{
891 int dma_ch, dma2; 888 int dma_ch;
892 unsigned long flags; 889 unsigned long flags;
893 890
894 host->data->error = errno; 891 host->data->error = errno;
@@ -896,11 +893,9 @@ static void omap_hsmmc_dma_cleanup(struct omap_hsmmc_host *host, int errno)
896 spin_lock_irqsave(&host->irq_lock, flags); 893 spin_lock_irqsave(&host->irq_lock, flags);
897 dma_ch = host->dma_ch; 894 dma_ch = host->dma_ch;
898 host->dma_ch = -1; 895 host->dma_ch = -1;
899 dma2 = host->dma2;
900 host->dma2 = -1;
901 spin_unlock_irqrestore(&host->irq_lock, flags); 896 spin_unlock_irqrestore(&host->irq_lock, flags);
902 897
903 if (host->use_dma && dma2 != -1) { 898 if (host->use_dma && dma_ch != -1) {
904 struct dma_chan *chan = omap_hsmmc_get_dma_chan(host, host->data); 899 struct dma_chan *chan = omap_hsmmc_get_dma_chan(host, host->data);
905 900
906 dmaengine_terminate_all(chan); 901 dmaengine_terminate_all(chan);
@@ -910,13 +905,6 @@ static void omap_hsmmc_dma_cleanup(struct omap_hsmmc_host *host, int errno)
910 905
911 host->data->host_cookie = 0; 906 host->data->host_cookie = 0;
912 } 907 }
913 if (host->use_dma && dma_ch != -1) {
914 dma_unmap_sg(mmc_dev(host->mmc), host->data->sg,
915 host->data->sg_len,
916 omap_hsmmc_get_dma_dir(host, host->data));
917 omap_free_dma(dma_ch);
918 host->data->host_cookie = 0;
919 }
920 host->data = NULL; 908 host->data = NULL;
921} 909}
922 910
@@ -1212,100 +1200,6 @@ static irqreturn_t omap_hsmmc_detect(int irq, void *dev_id)
1212 return IRQ_HANDLED; 1200 return IRQ_HANDLED;
1213} 1201}
1214 1202
1215static int omap_hsmmc_get_dma_sync_dev(struct omap_hsmmc_host *host,
1216 struct mmc_data *data)
1217{
1218 int sync_dev;
1219
1220 if (data->flags & MMC_DATA_WRITE)
1221 sync_dev = host->dma_line_tx;
1222 else
1223 sync_dev = host->dma_line_rx;
1224 return sync_dev;
1225}
1226
1227static void omap_hsmmc_config_dma_params(struct omap_hsmmc_host *host,
1228 struct mmc_data *data,
1229 struct scatterlist *sgl)
1230{
1231 int blksz, nblk, dma_ch;
1232
1233 dma_ch = host->dma_ch;
1234 if (data->flags & MMC_DATA_WRITE) {
1235 omap_set_dma_dest_params(dma_ch, 0, OMAP_DMA_AMODE_CONSTANT,
1236 (host->mapbase + OMAP_HSMMC_DATA), 0, 0);
1237 omap_set_dma_src_params(dma_ch, 0, OMAP_DMA_AMODE_POST_INC,
1238 sg_dma_address(sgl), 0, 0);
1239 } else {
1240 omap_set_dma_src_params(dma_ch, 0, OMAP_DMA_AMODE_CONSTANT,
1241 (host->mapbase + OMAP_HSMMC_DATA), 0, 0);
1242 omap_set_dma_dest_params(dma_ch, 0, OMAP_DMA_AMODE_POST_INC,
1243 sg_dma_address(sgl), 0, 0);
1244 }
1245
1246 blksz = host->data->blksz;
1247 nblk = sg_dma_len(sgl) / blksz;
1248
1249 omap_set_dma_transfer_params(dma_ch, OMAP_DMA_DATA_TYPE_S32,
1250 blksz / 4, nblk, OMAP_DMA_SYNC_FRAME,
1251 omap_hsmmc_get_dma_sync_dev(host, data),
1252 !(data->flags & MMC_DATA_WRITE));
1253
1254 omap_start_dma(dma_ch);
1255}
1256
1257/*
1258 * DMA call back function
1259 */
1260static void omap_hsmmc_dma_cb(int lch, u16 ch_status, void *cb_data)
1261{
1262 struct omap_hsmmc_host *host = cb_data;
1263 struct mmc_data *data;
1264 int dma_ch, req_in_progress;
1265 unsigned long flags;
1266
1267 if (!(ch_status & OMAP_DMA_BLOCK_IRQ)) {
1268 dev_warn(mmc_dev(host->mmc), "unexpected dma status %x\n",
1269 ch_status);
1270 return;
1271 }
1272
1273 spin_lock_irqsave(&host->irq_lock, flags);
1274 if (host->dma_ch < 0) {
1275 spin_unlock_irqrestore(&host->irq_lock, flags);
1276 return;
1277 }
1278
1279 data = host->mrq->data;
1280 host->dma_sg_idx++;
1281 if (host->dma_sg_idx < host->dma_len) {
1282 /* Fire up the next transfer. */
1283 omap_hsmmc_config_dma_params(host, data,
1284 data->sg + host->dma_sg_idx);
1285 spin_unlock_irqrestore(&host->irq_lock, flags);
1286 return;
1287 }
1288
1289 if (!data->host_cookie)
1290 dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
1291 omap_hsmmc_get_dma_dir(host, data));
1292
1293 req_in_progress = host->req_in_progress;
1294 dma_ch = host->dma_ch;
1295 host->dma_ch = -1;
1296 spin_unlock_irqrestore(&host->irq_lock, flags);
1297
1298 omap_free_dma(dma_ch);
1299
1300 /* If DMA has finished after TC, complete the request */
1301 if (!req_in_progress) {
1302 struct mmc_request *mrq = host->mrq;
1303
1304 host->mrq = NULL;
1305 mmc_request_done(host->mmc, mrq);
1306 }
1307}
1308
1309static void omap_hsmmc_dma_callback(void *param) 1203static void omap_hsmmc_dma_callback(void *param)
1310{ 1204{
1311 struct omap_hsmmc_host *host = param; 1205 struct omap_hsmmc_host *host = param;
@@ -1314,7 +1208,7 @@ static void omap_hsmmc_dma_callback(void *param)
1314 int req_in_progress; 1208 int req_in_progress;
1315 1209
1316 spin_lock_irq(&host->irq_lock); 1210 spin_lock_irq(&host->irq_lock);
1317 if (host->dma2 < 0) { 1211 if (host->dma_ch < 0) {
1318 spin_unlock_irq(&host->irq_lock); 1212 spin_unlock_irq(&host->irq_lock);
1319 return; 1213 return;
1320 } 1214 }
@@ -1327,7 +1221,7 @@ static void omap_hsmmc_dma_callback(void *param)
1327 omap_hsmmc_get_dma_dir(host, data)); 1221 omap_hsmmc_get_dma_dir(host, data));
1328 1222
1329 req_in_progress = host->req_in_progress; 1223 req_in_progress = host->req_in_progress;
1330 host->dma2 = -1; 1224 host->dma_ch = -1;
1331 spin_unlock_irq(&host->irq_lock); 1225 spin_unlock_irq(&host->irq_lock);
1332 1226
1333 /* If DMA has finished after TC, complete the request */ 1227 /* If DMA has finished after TC, complete the request */
@@ -1342,7 +1236,7 @@ static void omap_hsmmc_dma_callback(void *param)
1342static int omap_hsmmc_pre_dma_transfer(struct omap_hsmmc_host *host, 1236static int omap_hsmmc_pre_dma_transfer(struct omap_hsmmc_host *host,
1343 struct mmc_data *data, 1237 struct mmc_data *data,
1344 struct omap_hsmmc_next *next, 1238 struct omap_hsmmc_next *next,
1345 struct device *dev) 1239 struct dma_chan *chan)
1346{ 1240{
1347 int dma_len; 1241 int dma_len;
1348 1242
@@ -1357,7 +1251,7 @@ static int omap_hsmmc_pre_dma_transfer(struct omap_hsmmc_host *host,
1357 /* Check if next job is already prepared */ 1251 /* Check if next job is already prepared */
1358 if (next || 1252 if (next ||
1359 (!next && data->host_cookie != host->next_data.cookie)) { 1253 (!next && data->host_cookie != host->next_data.cookie)) {
1360 dma_len = dma_map_sg(dev, data->sg, data->sg_len, 1254 dma_len = dma_map_sg(chan->device->dev, data->sg, data->sg_len,
1361 omap_hsmmc_get_dma_dir(host, data)); 1255 omap_hsmmc_get_dma_dir(host, data));
1362 1256
1363 } else { 1257 } else {
@@ -1384,7 +1278,9 @@ static int omap_hsmmc_pre_dma_transfer(struct omap_hsmmc_host *host,
1384static int omap_hsmmc_start_dma_transfer(struct omap_hsmmc_host *host, 1278static int omap_hsmmc_start_dma_transfer(struct omap_hsmmc_host *host,
1385 struct mmc_request *req) 1279 struct mmc_request *req)
1386{ 1280{
1387 int dma_ch = 0, ret = 0, i; 1281 struct dma_slave_config cfg;
1282 struct dma_async_tx_descriptor *tx;
1283 int ret = 0, i;
1388 struct mmc_data *data = req->data; 1284 struct mmc_data *data = req->data;
1389 struct dma_chan *chan; 1285 struct dma_chan *chan;
1390 1286
@@ -1402,66 +1298,43 @@ static int omap_hsmmc_start_dma_transfer(struct omap_hsmmc_host *host,
1402 */ 1298 */
1403 return -EINVAL; 1299 return -EINVAL;
1404 1300
1405 BUG_ON(host->dma_ch != -1 || host->dma2 != -1); 1301 BUG_ON(host->dma_ch != -1);
1406 1302
1407 chan = omap_hsmmc_get_dma_chan(host, data); 1303 chan = omap_hsmmc_get_dma_chan(host, data);
1408 if (!chan) {
1409 ret = omap_request_dma(omap_hsmmc_get_dma_sync_dev(host, data),
1410 "MMC/SD", omap_hsmmc_dma_cb, host, &dma_ch);
1411 if (ret != 0) {
1412 dev_err(mmc_dev(host->mmc),
1413 "%s: omap_request_dma() failed with %d\n",
1414 mmc_hostname(host->mmc), ret);
1415 return ret;
1416 }
1417 ret = omap_hsmmc_pre_dma_transfer(host, data, NULL,
1418 mmc_dev(host->mmc));
1419 if (ret)
1420 return ret;
1421
1422 host->dma_ch = dma_ch;
1423 host->dma_sg_idx = 0;
1424 1304
1425 omap_hsmmc_config_dma_params(host, data, data->sg); 1305 cfg.src_addr = host->mapbase + OMAP_HSMMC_DATA;
1426 } else { 1306 cfg.dst_addr = host->mapbase + OMAP_HSMMC_DATA;
1427 struct dma_slave_config cfg; 1307 cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
1428 struct dma_async_tx_descriptor *tx; 1308 cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
1309 cfg.src_maxburst = data->blksz / 4;
1310 cfg.dst_maxburst = data->blksz / 4;
1429 1311
1430 cfg.src_addr = host->mapbase + OMAP_HSMMC_DATA; 1312 ret = dmaengine_slave_config(chan, &cfg);
1431 cfg.dst_addr = host->mapbase + OMAP_HSMMC_DATA; 1313 if (ret)
1432 cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 1314 return ret;
1433 cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
1434 cfg.src_maxburst = data->blksz / 4;
1435 cfg.dst_maxburst = data->blksz / 4;
1436
1437 ret = dmaengine_slave_config(chan, &cfg);
1438 if (ret)
1439 return ret;
1440 1315
1441 ret = omap_hsmmc_pre_dma_transfer(host, data, NULL, 1316 ret = omap_hsmmc_pre_dma_transfer(host, data, NULL, chan);
1442 chan->device->dev); 1317 if (ret)
1443 if (ret) 1318 return ret;
1444 return ret;
1445 1319
1446 tx = dmaengine_prep_slave_sg(chan, data->sg, data->sg_len, 1320 tx = dmaengine_prep_slave_sg(chan, data->sg, data->sg_len,
1447 data->flags & MMC_DATA_WRITE ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM, 1321 data->flags & MMC_DATA_WRITE ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM,
1448 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 1322 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1449 if (!tx) { 1323 if (!tx) {
1450 dev_err(mmc_dev(host->mmc), "prep_slave_sg() failed\n"); 1324 dev_err(mmc_dev(host->mmc), "prep_slave_sg() failed\n");
1451 /* FIXME: cleanup */ 1325 /* FIXME: cleanup */
1452 return -1; 1326 return -1;
1453 } 1327 }
1454 1328
1455 tx->callback = omap_hsmmc_dma_callback; 1329 tx->callback = omap_hsmmc_dma_callback;
1456 tx->callback_param = host; 1330 tx->callback_param = host;
1457 1331
1458 /* Does not fail */ 1332 /* Does not fail */
1459 dmaengine_submit(tx); 1333 dmaengine_submit(tx);
1460 1334
1461 host->dma2 = 1; 1335 host->dma_ch = 1;
1462 1336
1463 dma_async_issue_pending(chan); 1337 dma_async_issue_pending(chan);
1464 }
1465 1338
1466 return 0; 1339 return 0;
1467} 1340}
@@ -1543,14 +1416,11 @@ static void omap_hsmmc_post_req(struct mmc_host *mmc, struct mmc_request *mrq,
1543 struct omap_hsmmc_host *host = mmc_priv(mmc); 1416 struct omap_hsmmc_host *host = mmc_priv(mmc);
1544 struct mmc_data *data = mrq->data; 1417 struct mmc_data *data = mrq->data;
1545 1418
1546 if (host->use_dma) { 1419 if (host->use_dma && data->host_cookie) {
1547 struct dma_chan *c = omap_hsmmc_get_dma_chan(host, data); 1420 struct dma_chan *c = omap_hsmmc_get_dma_chan(host, data);
1548 struct device *dev = c ? c->device->dev : mmc_dev(mmc);
1549 1421
1550 if (data->host_cookie) 1422 dma_unmap_sg(c->device->dev, data->sg, data->sg_len,
1551 dma_unmap_sg(dev, 1423 omap_hsmmc_get_dma_dir(host, data));
1552 data->sg, data->sg_len,
1553 omap_hsmmc_get_dma_dir(host, data));
1554 data->host_cookie = 0; 1424 data->host_cookie = 0;
1555 } 1425 }
1556} 1426}
@@ -1567,10 +1437,9 @@ static void omap_hsmmc_pre_req(struct mmc_host *mmc, struct mmc_request *mrq,
1567 1437
1568 if (host->use_dma) { 1438 if (host->use_dma) {
1569 struct dma_chan *c = omap_hsmmc_get_dma_chan(host, mrq->data); 1439 struct dma_chan *c = omap_hsmmc_get_dma_chan(host, mrq->data);
1570 struct device *dev = c ? c->device->dev : mmc_dev(mmc);
1571 1440
1572 if (omap_hsmmc_pre_dma_transfer(host, mrq->data, 1441 if (omap_hsmmc_pre_dma_transfer(host, mrq->data,
1573 &host->next_data, dev)) 1442 &host->next_data, c))
1574 mrq->data->host_cookie = 0; 1443 mrq->data->host_cookie = 0;
1575 } 1444 }
1576} 1445}
@@ -1584,7 +1453,7 @@ static void omap_hsmmc_request(struct mmc_host *mmc, struct mmc_request *req)
1584 int err; 1453 int err;
1585 1454
1586 BUG_ON(host->req_in_progress); 1455 BUG_ON(host->req_in_progress);
1587 BUG_ON(host->dma_ch != -1 || host->dma2 != -1); 1456 BUG_ON(host->dma_ch != -1);
1588 if (host->protect_card) { 1457 if (host->protect_card) {
1589 if (host->reqs_blocked < 3) { 1458 if (host->reqs_blocked < 3) {
1590 /* 1459 /*
@@ -1897,6 +1766,8 @@ static inline struct omap_mmc_platform_data
1897} 1766}
1898#endif 1767#endif
1899 1768
1769extern bool omap_dma_filter_fn(struct dma_chan *chan, void *param);
1770
1900static int __devinit omap_hsmmc_probe(struct platform_device *pdev) 1771static int __devinit omap_hsmmc_probe(struct platform_device *pdev)
1901{ 1772{
1902 struct omap_mmc_platform_data *pdata = pdev->dev.platform_data; 1773 struct omap_mmc_platform_data *pdata = pdev->dev.platform_data;
@@ -1905,6 +1776,8 @@ static int __devinit omap_hsmmc_probe(struct platform_device *pdev)
1905 struct resource *res; 1776 struct resource *res;
1906 int ret, irq; 1777 int ret, irq;
1907 const struct of_device_id *match; 1778 const struct of_device_id *match;
1779 dma_cap_mask_t mask;
1780 unsigned tx_req, rx_req;
1908 1781
1909 match = of_match_device(of_match_ptr(omap_mmc_of_match), &pdev->dev); 1782 match = of_match_device(of_match_ptr(omap_mmc_of_match), &pdev->dev);
1910 if (match) { 1783 if (match) {
@@ -1949,9 +1822,7 @@ static int __devinit omap_hsmmc_probe(struct platform_device *pdev)
1949 host->pdata = pdata; 1822 host->pdata = pdata;
1950 host->dev = &pdev->dev; 1823 host->dev = &pdev->dev;
1951 host->use_dma = 1; 1824 host->use_dma = 1;
1952 host->dev->dma_mask = &pdata->dma_mask;
1953 host->dma_ch = -1; 1825 host->dma_ch = -1;
1954 host->dma2 = -1;
1955 host->irq = irq; 1826 host->irq = irq;
1956 host->slot_id = 0; 1827 host->slot_id = 0;
1957 host->mapbase = res->start + pdata->reg_offset; 1828 host->mapbase = res->start + pdata->reg_offset;
@@ -2039,36 +1910,28 @@ static int __devinit omap_hsmmc_probe(struct platform_device *pdev)
2039 dev_err(mmc_dev(host->mmc), "cannot get DMA TX channel\n"); 1910 dev_err(mmc_dev(host->mmc), "cannot get DMA TX channel\n");
2040 goto err_irq; 1911 goto err_irq;
2041 } 1912 }
2042 host->dma_line_tx = res->start; 1913 tx_req = res->start;
2043 1914
2044 res = platform_get_resource_byname(pdev, IORESOURCE_DMA, "rx"); 1915 res = platform_get_resource_byname(pdev, IORESOURCE_DMA, "rx");
2045 if (!res) { 1916 if (!res) {
2046 dev_err(mmc_dev(host->mmc), "cannot get DMA RX channel\n"); 1917 dev_err(mmc_dev(host->mmc), "cannot get DMA RX channel\n");
2047 goto err_irq; 1918 goto err_irq;
2048 } 1919 }
2049 host->dma_line_rx = res->start; 1920 rx_req = res->start;
2050 1921
2051 { 1922 dma_cap_zero(mask);
2052 dma_cap_mask_t mask; 1923 dma_cap_set(DMA_SLAVE, mask);
2053 unsigned sig; 1924
2054 extern bool omap_dma_filter_fn(struct dma_chan *chan, void *param); 1925 host->rx_chan = dma_request_channel(mask, omap_dma_filter_fn, &rx_req);
2055 1926 if (!host->rx_chan) {
2056 dma_cap_zero(mask); 1927 dev_err(mmc_dev(host->mmc), "unable to obtain RX DMA engine channel %u\n", rx_req);
2057 dma_cap_set(DMA_SLAVE, mask); 1928 goto err_irq;
2058#if 1 1929 }
2059 sig = host->dma_line_rx; 1930
2060 host->rx_chan = dma_request_channel(mask, omap_dma_filter_fn, &sig); 1931 host->tx_chan = dma_request_channel(mask, omap_dma_filter_fn, &tx_req);
2061 if (!host->rx_chan) { 1932 if (!host->tx_chan) {
2062 dev_warn(mmc_dev(host->mmc), "unable to obtain RX DMA engine channel %u\n", sig); 1933 dev_err(mmc_dev(host->mmc), "unable to obtain TX DMA engine channel %u\n", tx_req);
2063 } 1934 goto err_irq;
2064#endif
2065#if 1
2066 sig = host->dma_line_tx;
2067 host->tx_chan = dma_request_channel(mask, omap_dma_filter_fn, &sig);
2068 if (!host->tx_chan) {
2069 dev_warn(mmc_dev(host->mmc), "unable to obtain TX DMA engine channel %u\n", sig);
2070 }
2071#endif
2072 } 1935 }
2073 1936
2074 /* Request IRQ for MMC operations */ 1937 /* Request IRQ for MMC operations */