aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma/at_hdmac.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/dma/at_hdmac.c')
-rw-r--r--drivers/dma/at_hdmac.c159
1 files changed, 125 insertions, 34 deletions
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index 6a483eac7b3f..3b99dc62874b 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -107,10 +107,11 @@ static struct at_desc *atc_desc_get(struct at_dma_chan *atchan)
107{ 107{
108 struct at_desc *desc, *_desc; 108 struct at_desc *desc, *_desc;
109 struct at_desc *ret = NULL; 109 struct at_desc *ret = NULL;
110 unsigned long flags;
110 unsigned int i = 0; 111 unsigned int i = 0;
111 LIST_HEAD(tmp_list); 112 LIST_HEAD(tmp_list);
112 113
113 spin_lock_bh(&atchan->lock); 114 spin_lock_irqsave(&atchan->lock, flags);
114 list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) { 115 list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) {
115 i++; 116 i++;
116 if (async_tx_test_ack(&desc->txd)) { 117 if (async_tx_test_ack(&desc->txd)) {
@@ -121,7 +122,7 @@ static struct at_desc *atc_desc_get(struct at_dma_chan *atchan)
121 dev_dbg(chan2dev(&atchan->chan_common), 122 dev_dbg(chan2dev(&atchan->chan_common),
122 "desc %p not ACKed\n", desc); 123 "desc %p not ACKed\n", desc);
123 } 124 }
124 spin_unlock_bh(&atchan->lock); 125 spin_unlock_irqrestore(&atchan->lock, flags);
125 dev_vdbg(chan2dev(&atchan->chan_common), 126 dev_vdbg(chan2dev(&atchan->chan_common),
126 "scanned %u descriptors on freelist\n", i); 127 "scanned %u descriptors on freelist\n", i);
127 128
@@ -129,9 +130,9 @@ static struct at_desc *atc_desc_get(struct at_dma_chan *atchan)
129 if (!ret) { 130 if (!ret) {
130 ret = atc_alloc_descriptor(&atchan->chan_common, GFP_ATOMIC); 131 ret = atc_alloc_descriptor(&atchan->chan_common, GFP_ATOMIC);
131 if (ret) { 132 if (ret) {
132 spin_lock_bh(&atchan->lock); 133 spin_lock_irqsave(&atchan->lock, flags);
133 atchan->descs_allocated++; 134 atchan->descs_allocated++;
134 spin_unlock_bh(&atchan->lock); 135 spin_unlock_irqrestore(&atchan->lock, flags);
135 } else { 136 } else {
136 dev_err(chan2dev(&atchan->chan_common), 137 dev_err(chan2dev(&atchan->chan_common),
137 "not enough descriptors available\n"); 138 "not enough descriptors available\n");
@@ -150,8 +151,9 @@ static void atc_desc_put(struct at_dma_chan *atchan, struct at_desc *desc)
150{ 151{
151 if (desc) { 152 if (desc) {
152 struct at_desc *child; 153 struct at_desc *child;
154 unsigned long flags;
153 155
154 spin_lock_bh(&atchan->lock); 156 spin_lock_irqsave(&atchan->lock, flags);
155 list_for_each_entry(child, &desc->tx_list, desc_node) 157 list_for_each_entry(child, &desc->tx_list, desc_node)
156 dev_vdbg(chan2dev(&atchan->chan_common), 158 dev_vdbg(chan2dev(&atchan->chan_common),
157 "moving child desc %p to freelist\n", 159 "moving child desc %p to freelist\n",
@@ -160,7 +162,7 @@ static void atc_desc_put(struct at_dma_chan *atchan, struct at_desc *desc)
160 dev_vdbg(chan2dev(&atchan->chan_common), 162 dev_vdbg(chan2dev(&atchan->chan_common),
161 "moving desc %p to freelist\n", desc); 163 "moving desc %p to freelist\n", desc);
162 list_add(&desc->desc_node, &atchan->free_list); 164 list_add(&desc->desc_node, &atchan->free_list);
163 spin_unlock_bh(&atchan->lock); 165 spin_unlock_irqrestore(&atchan->lock, flags);
164 } 166 }
165} 167}
166 168
@@ -299,7 +301,7 @@ atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc)
299 301
300 /* for cyclic transfers, 302 /* for cyclic transfers,
301 * no need to replay callback function while stopping */ 303 * no need to replay callback function while stopping */
302 if (!test_bit(ATC_IS_CYCLIC, &atchan->status)) { 304 if (!atc_chan_is_cyclic(atchan)) {
303 dma_async_tx_callback callback = txd->callback; 305 dma_async_tx_callback callback = txd->callback;
304 void *param = txd->callback_param; 306 void *param = txd->callback_param;
305 307
@@ -471,16 +473,17 @@ static void atc_handle_cyclic(struct at_dma_chan *atchan)
471static void atc_tasklet(unsigned long data) 473static void atc_tasklet(unsigned long data)
472{ 474{
473 struct at_dma_chan *atchan = (struct at_dma_chan *)data; 475 struct at_dma_chan *atchan = (struct at_dma_chan *)data;
476 unsigned long flags;
474 477
475 spin_lock(&atchan->lock); 478 spin_lock_irqsave(&atchan->lock, flags);
476 if (test_and_clear_bit(ATC_IS_ERROR, &atchan->status)) 479 if (test_and_clear_bit(ATC_IS_ERROR, &atchan->status))
477 atc_handle_error(atchan); 480 atc_handle_error(atchan);
478 else if (test_bit(ATC_IS_CYCLIC, &atchan->status)) 481 else if (atc_chan_is_cyclic(atchan))
479 atc_handle_cyclic(atchan); 482 atc_handle_cyclic(atchan);
480 else 483 else
481 atc_advance_work(atchan); 484 atc_advance_work(atchan);
482 485
483 spin_unlock(&atchan->lock); 486 spin_unlock_irqrestore(&atchan->lock, flags);
484} 487}
485 488
486static irqreturn_t at_dma_interrupt(int irq, void *dev_id) 489static irqreturn_t at_dma_interrupt(int irq, void *dev_id)
@@ -539,8 +542,9 @@ static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx)
539 struct at_desc *desc = txd_to_at_desc(tx); 542 struct at_desc *desc = txd_to_at_desc(tx);
540 struct at_dma_chan *atchan = to_at_dma_chan(tx->chan); 543 struct at_dma_chan *atchan = to_at_dma_chan(tx->chan);
541 dma_cookie_t cookie; 544 dma_cookie_t cookie;
545 unsigned long flags;
542 546
543 spin_lock_bh(&atchan->lock); 547 spin_lock_irqsave(&atchan->lock, flags);
544 cookie = atc_assign_cookie(atchan, desc); 548 cookie = atc_assign_cookie(atchan, desc);
545 549
546 if (list_empty(&atchan->active_list)) { 550 if (list_empty(&atchan->active_list)) {
@@ -554,7 +558,7 @@ static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx)
554 list_add_tail(&desc->desc_node, &atchan->queue); 558 list_add_tail(&desc->desc_node, &atchan->queue);
555 } 559 }
556 560
557 spin_unlock_bh(&atchan->lock); 561 spin_unlock_irqrestore(&atchan->lock, flags);
558 562
559 return cookie; 563 return cookie;
560} 564}
@@ -927,28 +931,29 @@ static int atc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
927 struct at_dma_chan *atchan = to_at_dma_chan(chan); 931 struct at_dma_chan *atchan = to_at_dma_chan(chan);
928 struct at_dma *atdma = to_at_dma(chan->device); 932 struct at_dma *atdma = to_at_dma(chan->device);
929 int chan_id = atchan->chan_common.chan_id; 933 int chan_id = atchan->chan_common.chan_id;
934 unsigned long flags;
930 935
931 LIST_HEAD(list); 936 LIST_HEAD(list);
932 937
933 dev_vdbg(chan2dev(chan), "atc_control (%d)\n", cmd); 938 dev_vdbg(chan2dev(chan), "atc_control (%d)\n", cmd);
934 939
935 if (cmd == DMA_PAUSE) { 940 if (cmd == DMA_PAUSE) {
936 spin_lock_bh(&atchan->lock); 941 spin_lock_irqsave(&atchan->lock, flags);
937 942
938 dma_writel(atdma, CHER, AT_DMA_SUSP(chan_id)); 943 dma_writel(atdma, CHER, AT_DMA_SUSP(chan_id));
939 set_bit(ATC_IS_PAUSED, &atchan->status); 944 set_bit(ATC_IS_PAUSED, &atchan->status);
940 945
941 spin_unlock_bh(&atchan->lock); 946 spin_unlock_irqrestore(&atchan->lock, flags);
942 } else if (cmd == DMA_RESUME) { 947 } else if (cmd == DMA_RESUME) {
943 if (!test_bit(ATC_IS_PAUSED, &atchan->status)) 948 if (!atc_chan_is_paused(atchan))
944 return 0; 949 return 0;
945 950
946 spin_lock_bh(&atchan->lock); 951 spin_lock_irqsave(&atchan->lock, flags);
947 952
948 dma_writel(atdma, CHDR, AT_DMA_RES(chan_id)); 953 dma_writel(atdma, CHDR, AT_DMA_RES(chan_id));
949 clear_bit(ATC_IS_PAUSED, &atchan->status); 954 clear_bit(ATC_IS_PAUSED, &atchan->status);
950 955
951 spin_unlock_bh(&atchan->lock); 956 spin_unlock_irqrestore(&atchan->lock, flags);
952 } else if (cmd == DMA_TERMINATE_ALL) { 957 } else if (cmd == DMA_TERMINATE_ALL) {
953 struct at_desc *desc, *_desc; 958 struct at_desc *desc, *_desc;
954 /* 959 /*
@@ -957,7 +962,7 @@ static int atc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
957 * channel. We still have to poll the channel enable bit due 962 * channel. We still have to poll the channel enable bit due
958 * to AHB/HSB limitations. 963 * to AHB/HSB limitations.
959 */ 964 */
960 spin_lock_bh(&atchan->lock); 965 spin_lock_irqsave(&atchan->lock, flags);
961 966
962 /* disabling channel: must also remove suspend state */ 967 /* disabling channel: must also remove suspend state */
963 dma_writel(atdma, CHDR, AT_DMA_RES(chan_id) | atchan->mask); 968 dma_writel(atdma, CHDR, AT_DMA_RES(chan_id) | atchan->mask);
@@ -978,7 +983,7 @@ static int atc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
978 /* if channel dedicated to cyclic operations, free it */ 983 /* if channel dedicated to cyclic operations, free it */
979 clear_bit(ATC_IS_CYCLIC, &atchan->status); 984 clear_bit(ATC_IS_CYCLIC, &atchan->status);
980 985
981 spin_unlock_bh(&atchan->lock); 986 spin_unlock_irqrestore(&atchan->lock, flags);
982 } else { 987 } else {
983 return -ENXIO; 988 return -ENXIO;
984 } 989 }
@@ -1004,9 +1009,10 @@ atc_tx_status(struct dma_chan *chan,
1004 struct at_dma_chan *atchan = to_at_dma_chan(chan); 1009 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1005 dma_cookie_t last_used; 1010 dma_cookie_t last_used;
1006 dma_cookie_t last_complete; 1011 dma_cookie_t last_complete;
1012 unsigned long flags;
1007 enum dma_status ret; 1013 enum dma_status ret;
1008 1014
1009 spin_lock_bh(&atchan->lock); 1015 spin_lock_irqsave(&atchan->lock, flags);
1010 1016
1011 last_complete = atchan->completed_cookie; 1017 last_complete = atchan->completed_cookie;
1012 last_used = chan->cookie; 1018 last_used = chan->cookie;
@@ -1021,7 +1027,7 @@ atc_tx_status(struct dma_chan *chan,
1021 ret = dma_async_is_complete(cookie, last_complete, last_used); 1027 ret = dma_async_is_complete(cookie, last_complete, last_used);
1022 } 1028 }
1023 1029
1024 spin_unlock_bh(&atchan->lock); 1030 spin_unlock_irqrestore(&atchan->lock, flags);
1025 1031
1026 if (ret != DMA_SUCCESS) 1032 if (ret != DMA_SUCCESS)
1027 dma_set_tx_state(txstate, last_complete, last_used, 1033 dma_set_tx_state(txstate, last_complete, last_used,
@@ -1029,7 +1035,7 @@ atc_tx_status(struct dma_chan *chan,
1029 else 1035 else
1030 dma_set_tx_state(txstate, last_complete, last_used, 0); 1036 dma_set_tx_state(txstate, last_complete, last_used, 0);
1031 1037
1032 if (test_bit(ATC_IS_PAUSED, &atchan->status)) 1038 if (atc_chan_is_paused(atchan))
1033 ret = DMA_PAUSED; 1039 ret = DMA_PAUSED;
1034 1040
1035 dev_vdbg(chan2dev(chan), "tx_status %d: cookie = %d (d%d, u%d)\n", 1041 dev_vdbg(chan2dev(chan), "tx_status %d: cookie = %d (d%d, u%d)\n",
@@ -1046,18 +1052,19 @@ atc_tx_status(struct dma_chan *chan,
1046static void atc_issue_pending(struct dma_chan *chan) 1052static void atc_issue_pending(struct dma_chan *chan)
1047{ 1053{
1048 struct at_dma_chan *atchan = to_at_dma_chan(chan); 1054 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1055 unsigned long flags;
1049 1056
1050 dev_vdbg(chan2dev(chan), "issue_pending\n"); 1057 dev_vdbg(chan2dev(chan), "issue_pending\n");
1051 1058
1052 /* Not needed for cyclic transfers */ 1059 /* Not needed for cyclic transfers */
1053 if (test_bit(ATC_IS_CYCLIC, &atchan->status)) 1060 if (atc_chan_is_cyclic(atchan))
1054 return; 1061 return;
1055 1062
1056 spin_lock_bh(&atchan->lock); 1063 spin_lock_irqsave(&atchan->lock, flags);
1057 if (!atc_chan_is_enabled(atchan)) { 1064 if (!atc_chan_is_enabled(atchan)) {
1058 atc_advance_work(atchan); 1065 atc_advance_work(atchan);
1059 } 1066 }
1060 spin_unlock_bh(&atchan->lock); 1067 spin_unlock_irqrestore(&atchan->lock, flags);
1061} 1068}
1062 1069
1063/** 1070/**
@@ -1073,6 +1080,7 @@ static int atc_alloc_chan_resources(struct dma_chan *chan)
1073 struct at_dma *atdma = to_at_dma(chan->device); 1080 struct at_dma *atdma = to_at_dma(chan->device);
1074 struct at_desc *desc; 1081 struct at_desc *desc;
1075 struct at_dma_slave *atslave; 1082 struct at_dma_slave *atslave;
1083 unsigned long flags;
1076 int i; 1084 int i;
1077 u32 cfg; 1085 u32 cfg;
1078 LIST_HEAD(tmp_list); 1086 LIST_HEAD(tmp_list);
@@ -1116,11 +1124,11 @@ static int atc_alloc_chan_resources(struct dma_chan *chan)
1116 list_add_tail(&desc->desc_node, &tmp_list); 1124 list_add_tail(&desc->desc_node, &tmp_list);
1117 } 1125 }
1118 1126
1119 spin_lock_bh(&atchan->lock); 1127 spin_lock_irqsave(&atchan->lock, flags);
1120 atchan->descs_allocated = i; 1128 atchan->descs_allocated = i;
1121 list_splice(&tmp_list, &atchan->free_list); 1129 list_splice(&tmp_list, &atchan->free_list);
1122 atchan->completed_cookie = chan->cookie = 1; 1130 atchan->completed_cookie = chan->cookie = 1;
1123 spin_unlock_bh(&atchan->lock); 1131 spin_unlock_irqrestore(&atchan->lock, flags);
1124 1132
1125 /* channel parameters */ 1133 /* channel parameters */
1126 channel_writel(atchan, CFG, cfg); 1134 channel_writel(atchan, CFG, cfg);
@@ -1293,15 +1301,13 @@ static int __init at_dma_probe(struct platform_device *pdev)
1293 if (dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask)) 1301 if (dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask))
1294 atdma->dma_common.device_prep_dma_memcpy = atc_prep_dma_memcpy; 1302 atdma->dma_common.device_prep_dma_memcpy = atc_prep_dma_memcpy;
1295 1303
1296 if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask)) 1304 if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask)) {
1297 atdma->dma_common.device_prep_slave_sg = atc_prep_slave_sg; 1305 atdma->dma_common.device_prep_slave_sg = atc_prep_slave_sg;
1298 1306 /* controller can do slave DMA: can trigger cyclic transfers */
1299 if (dma_has_cap(DMA_CYCLIC, atdma->dma_common.cap_mask)) 1307 dma_cap_set(DMA_CYCLIC, atdma->dma_common.cap_mask);
1300 atdma->dma_common.device_prep_dma_cyclic = atc_prep_dma_cyclic; 1308 atdma->dma_common.device_prep_dma_cyclic = atc_prep_dma_cyclic;
1301
1302 if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask) ||
1303 dma_has_cap(DMA_CYCLIC, atdma->dma_common.cap_mask))
1304 atdma->dma_common.device_control = atc_control; 1309 atdma->dma_common.device_control = atc_control;
1310 }
1305 1311
1306 dma_writel(atdma, EN, AT_DMA_ENABLE); 1312 dma_writel(atdma, EN, AT_DMA_ENABLE);
1307 1313
@@ -1377,27 +1383,112 @@ static void at_dma_shutdown(struct platform_device *pdev)
1377 clk_disable(atdma->clk); 1383 clk_disable(atdma->clk);
1378} 1384}
1379 1385
1386static int at_dma_prepare(struct device *dev)
1387{
1388 struct platform_device *pdev = to_platform_device(dev);
1389 struct at_dma *atdma = platform_get_drvdata(pdev);
1390 struct dma_chan *chan, *_chan;
1391
1392 list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
1393 device_node) {
1394 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1395 /* wait for transaction completion (except in cyclic case) */
1396 if (atc_chan_is_enabled(atchan) && !atc_chan_is_cyclic(atchan))
1397 return -EAGAIN;
1398 }
1399 return 0;
1400}
1401
1402static void atc_suspend_cyclic(struct at_dma_chan *atchan)
1403{
1404 struct dma_chan *chan = &atchan->chan_common;
1405
1406 /* Channel should be paused by user
1407 * do it anyway even if it is not done already */
1408 if (!atc_chan_is_paused(atchan)) {
1409 dev_warn(chan2dev(chan),
1410 "cyclic channel not paused, should be done by channel user\n");
1411 atc_control(chan, DMA_PAUSE, 0);
1412 }
1413
1414 /* now preserve additional data for cyclic operations */
1415 /* next descriptor address in the cyclic list */
1416 atchan->save_dscr = channel_readl(atchan, DSCR);
1417
1418 vdbg_dump_regs(atchan);
1419}
1420
1380static int at_dma_suspend_noirq(struct device *dev) 1421static int at_dma_suspend_noirq(struct device *dev)
1381{ 1422{
1382 struct platform_device *pdev = to_platform_device(dev); 1423 struct platform_device *pdev = to_platform_device(dev);
1383 struct at_dma *atdma = platform_get_drvdata(pdev); 1424 struct at_dma *atdma = platform_get_drvdata(pdev);
1425 struct dma_chan *chan, *_chan;
1384 1426
1385 at_dma_off(platform_get_drvdata(pdev)); 1427 /* preserve data */
1428 list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
1429 device_node) {
1430 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1431
1432 if (atc_chan_is_cyclic(atchan))
1433 atc_suspend_cyclic(atchan);
1434 atchan->save_cfg = channel_readl(atchan, CFG);
1435 }
1436 atdma->save_imr = dma_readl(atdma, EBCIMR);
1437
1438 /* disable DMA controller */
1439 at_dma_off(atdma);
1386 clk_disable(atdma->clk); 1440 clk_disable(atdma->clk);
1387 return 0; 1441 return 0;
1388} 1442}
1389 1443
1444static void atc_resume_cyclic(struct at_dma_chan *atchan)
1445{
1446 struct at_dma *atdma = to_at_dma(atchan->chan_common.device);
1447
1448 /* restore channel status for cyclic descriptors list:
1449 * next descriptor in the cyclic list at the time of suspend */
1450 channel_writel(atchan, SADDR, 0);
1451 channel_writel(atchan, DADDR, 0);
1452 channel_writel(atchan, CTRLA, 0);
1453 channel_writel(atchan, CTRLB, 0);
1454 channel_writel(atchan, DSCR, atchan->save_dscr);
1455 dma_writel(atdma, CHER, atchan->mask);
1456
1457 /* channel pause status should be removed by channel user
1458 * We cannot take the initiative to do it here */
1459
1460 vdbg_dump_regs(atchan);
1461}
1462
1390static int at_dma_resume_noirq(struct device *dev) 1463static int at_dma_resume_noirq(struct device *dev)
1391{ 1464{
1392 struct platform_device *pdev = to_platform_device(dev); 1465 struct platform_device *pdev = to_platform_device(dev);
1393 struct at_dma *atdma = platform_get_drvdata(pdev); 1466 struct at_dma *atdma = platform_get_drvdata(pdev);
1467 struct dma_chan *chan, *_chan;
1394 1468
1469 /* bring back DMA controller */
1395 clk_enable(atdma->clk); 1470 clk_enable(atdma->clk);
1396 dma_writel(atdma, EN, AT_DMA_ENABLE); 1471 dma_writel(atdma, EN, AT_DMA_ENABLE);
1472
1473 /* clear any pending interrupt */
1474 while (dma_readl(atdma, EBCISR))
1475 cpu_relax();
1476
1477 /* restore saved data */
1478 dma_writel(atdma, EBCIER, atdma->save_imr);
1479 list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
1480 device_node) {
1481 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1482
1483 channel_writel(atchan, CFG, atchan->save_cfg);
1484 if (atc_chan_is_cyclic(atchan))
1485 atc_resume_cyclic(atchan);
1486 }
1397 return 0; 1487 return 0;
1398} 1488}
1399 1489
1400static const struct dev_pm_ops at_dma_dev_pm_ops = { 1490static const struct dev_pm_ops at_dma_dev_pm_ops = {
1491 .prepare = at_dma_prepare,
1401 .suspend_noirq = at_dma_suspend_noirq, 1492 .suspend_noirq = at_dma_suspend_noirq,
1402 .resume_noirq = at_dma_resume_noirq, 1493 .resume_noirq = at_dma_resume_noirq,
1403}; 1494};