aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma/dw_dmac.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2011-05-28 15:35:15 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2011-05-28 15:35:15 -0400
commit4cb865deec59ef31d966622d1ec87411ae32dfab (patch)
treee060d515f62e4f334aded38c9079485d50166693 /drivers/dma/dw_dmac.c
parent55f08e1baa3ef11c952b626dbc7ef9e3e8332a63 (diff)
parent19d78a61be6dd707dcec298c486303d4ba2c840a (diff)
Merge branch 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/djbw/async_tx
* 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/djbw/async_tx: (33 commits) x86: poll waiting for I/OAT DMA channel status maintainers: add dma engine tree details dmaengine: add TODO items for future work on dma drivers dmaengine: Add API documentation for slave dma usage dmaengine/dw_dmac: Update maintainer-ship dmaengine: move link order dmaengine/dw_dmac: implement pause and resume in dwc_control dmaengine/dw_dmac: Replace spin_lock* with irqsave variants and enable submission from callback dmaengine/dw_dmac: Divide one sg to many desc, if sg len is greater than DWC_MAX_COUNT dmaengine/dw_dmac: set residue as total len in dwc_tx_status if status is !DMA_SUCCESS dmaengine/dw_dmac: don't call callback routine in case dmaengine_terminate_all() is called dmaengine: at_hdmac: pause: no need to wait for FIFO empty pch_dma: modify pci device table definition pch_dma: Support new device ML7223 IOH pch_dma: Support I2S for ML7213 IOH pch_dma: Fix DMA setting issue pch_dma: modify for checkpatch pch_dma: fix dma direction issue for ML7213 IOH video-in dmaengine: at_hdmac: use descriptor chaining help function dmaengine: at_hdmac: implement pause and resume in atc_control ... Fix up trivial conflict in drivers/dma/dw_dmac.c
Diffstat (limited to 'drivers/dma/dw_dmac.c')
-rw-r--r--drivers/dma/dw_dmac.c272
1 files changed, 181 insertions, 91 deletions
diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c
index 2a2e2fa00e9..4d180ca9a1d 100644
--- a/drivers/dma/dw_dmac.c
+++ b/drivers/dma/dw_dmac.c
@@ -3,6 +3,7 @@
3 * AVR32 systems.) 3 * AVR32 systems.)
4 * 4 *
5 * Copyright (C) 2007-2008 Atmel Corporation 5 * Copyright (C) 2007-2008 Atmel Corporation
6 * Copyright (C) 2010-2011 ST Microelectronics
6 * 7 *
7 * This program is free software; you can redistribute it and/or modify 8 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as 9 * it under the terms of the GNU General Public License version 2 as
@@ -93,8 +94,9 @@ static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc)
93 struct dw_desc *desc, *_desc; 94 struct dw_desc *desc, *_desc;
94 struct dw_desc *ret = NULL; 95 struct dw_desc *ret = NULL;
95 unsigned int i = 0; 96 unsigned int i = 0;
97 unsigned long flags;
96 98
97 spin_lock_bh(&dwc->lock); 99 spin_lock_irqsave(&dwc->lock, flags);
98 list_for_each_entry_safe(desc, _desc, &dwc->free_list, desc_node) { 100 list_for_each_entry_safe(desc, _desc, &dwc->free_list, desc_node) {
99 if (async_tx_test_ack(&desc->txd)) { 101 if (async_tx_test_ack(&desc->txd)) {
100 list_del(&desc->desc_node); 102 list_del(&desc->desc_node);
@@ -104,7 +106,7 @@ static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc)
104 dev_dbg(chan2dev(&dwc->chan), "desc %p not ACKed\n", desc); 106 dev_dbg(chan2dev(&dwc->chan), "desc %p not ACKed\n", desc);
105 i++; 107 i++;
106 } 108 }
107 spin_unlock_bh(&dwc->lock); 109 spin_unlock_irqrestore(&dwc->lock, flags);
108 110
109 dev_vdbg(chan2dev(&dwc->chan), "scanned %u descriptors on freelist\n", i); 111 dev_vdbg(chan2dev(&dwc->chan), "scanned %u descriptors on freelist\n", i);
110 112
@@ -130,12 +132,14 @@ static void dwc_sync_desc_for_cpu(struct dw_dma_chan *dwc, struct dw_desc *desc)
130 */ 132 */
131static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc) 133static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc)
132{ 134{
135 unsigned long flags;
136
133 if (desc) { 137 if (desc) {
134 struct dw_desc *child; 138 struct dw_desc *child;
135 139
136 dwc_sync_desc_for_cpu(dwc, desc); 140 dwc_sync_desc_for_cpu(dwc, desc);
137 141
138 spin_lock_bh(&dwc->lock); 142 spin_lock_irqsave(&dwc->lock, flags);
139 list_for_each_entry(child, &desc->tx_list, desc_node) 143 list_for_each_entry(child, &desc->tx_list, desc_node)
140 dev_vdbg(chan2dev(&dwc->chan), 144 dev_vdbg(chan2dev(&dwc->chan),
141 "moving child desc %p to freelist\n", 145 "moving child desc %p to freelist\n",
@@ -143,7 +147,7 @@ static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc)
143 list_splice_init(&desc->tx_list, &dwc->free_list); 147 list_splice_init(&desc->tx_list, &dwc->free_list);
144 dev_vdbg(chan2dev(&dwc->chan), "moving desc %p to freelist\n", desc); 148 dev_vdbg(chan2dev(&dwc->chan), "moving desc %p to freelist\n", desc);
145 list_add(&desc->desc_node, &dwc->free_list); 149 list_add(&desc->desc_node, &dwc->free_list);
146 spin_unlock_bh(&dwc->lock); 150 spin_unlock_irqrestore(&dwc->lock, flags);
147 } 151 }
148} 152}
149 153
@@ -195,18 +199,23 @@ static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first)
195/*----------------------------------------------------------------------*/ 199/*----------------------------------------------------------------------*/
196 200
197static void 201static void
198dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc) 202dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc,
203 bool callback_required)
199{ 204{
200 dma_async_tx_callback callback; 205 dma_async_tx_callback callback = NULL;
201 void *param; 206 void *param = NULL;
202 struct dma_async_tx_descriptor *txd = &desc->txd; 207 struct dma_async_tx_descriptor *txd = &desc->txd;
203 struct dw_desc *child; 208 struct dw_desc *child;
209 unsigned long flags;
204 210
205 dev_vdbg(chan2dev(&dwc->chan), "descriptor %u complete\n", txd->cookie); 211 dev_vdbg(chan2dev(&dwc->chan), "descriptor %u complete\n", txd->cookie);
206 212
213 spin_lock_irqsave(&dwc->lock, flags);
207 dwc->completed = txd->cookie; 214 dwc->completed = txd->cookie;
208 callback = txd->callback; 215 if (callback_required) {
209 param = txd->callback_param; 216 callback = txd->callback;
217 param = txd->callback_param;
218 }
210 219
211 dwc_sync_desc_for_cpu(dwc, desc); 220 dwc_sync_desc_for_cpu(dwc, desc);
212 221
@@ -238,11 +247,9 @@ dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc)
238 } 247 }
239 } 248 }
240 249
241 /* 250 spin_unlock_irqrestore(&dwc->lock, flags);
242 * The API requires that no submissions are done from a 251
243 * callback, so we don't need to drop the lock here 252 if (callback_required && callback)
244 */
245 if (callback)
246 callback(param); 253 callback(param);
247} 254}
248 255
@@ -250,7 +257,9 @@ static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc)
250{ 257{
251 struct dw_desc *desc, *_desc; 258 struct dw_desc *desc, *_desc;
252 LIST_HEAD(list); 259 LIST_HEAD(list);
260 unsigned long flags;
253 261
262 spin_lock_irqsave(&dwc->lock, flags);
254 if (dma_readl(dw, CH_EN) & dwc->mask) { 263 if (dma_readl(dw, CH_EN) & dwc->mask) {
255 dev_err(chan2dev(&dwc->chan), 264 dev_err(chan2dev(&dwc->chan),
256 "BUG: XFER bit set, but channel not idle!\n"); 265 "BUG: XFER bit set, but channel not idle!\n");
@@ -271,8 +280,10 @@ static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc)
271 dwc_dostart(dwc, dwc_first_active(dwc)); 280 dwc_dostart(dwc, dwc_first_active(dwc));
272 } 281 }
273 282
283 spin_unlock_irqrestore(&dwc->lock, flags);
284
274 list_for_each_entry_safe(desc, _desc, &list, desc_node) 285 list_for_each_entry_safe(desc, _desc, &list, desc_node)
275 dwc_descriptor_complete(dwc, desc); 286 dwc_descriptor_complete(dwc, desc, true);
276} 287}
277 288
278static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc) 289static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
@@ -281,7 +292,9 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
281 struct dw_desc *desc, *_desc; 292 struct dw_desc *desc, *_desc;
282 struct dw_desc *child; 293 struct dw_desc *child;
283 u32 status_xfer; 294 u32 status_xfer;
295 unsigned long flags;
284 296
297 spin_lock_irqsave(&dwc->lock, flags);
285 /* 298 /*
286 * Clear block interrupt flag before scanning so that we don't 299 * Clear block interrupt flag before scanning so that we don't
287 * miss any, and read LLP before RAW_XFER to ensure it is 300 * miss any, and read LLP before RAW_XFER to ensure it is
@@ -294,30 +307,47 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
294 if (status_xfer & dwc->mask) { 307 if (status_xfer & dwc->mask) {
295 /* Everything we've submitted is done */ 308 /* Everything we've submitted is done */
296 dma_writel(dw, CLEAR.XFER, dwc->mask); 309 dma_writel(dw, CLEAR.XFER, dwc->mask);
310 spin_unlock_irqrestore(&dwc->lock, flags);
311
297 dwc_complete_all(dw, dwc); 312 dwc_complete_all(dw, dwc);
298 return; 313 return;
299 } 314 }
300 315
301 if (list_empty(&dwc->active_list)) 316 if (list_empty(&dwc->active_list)) {
317 spin_unlock_irqrestore(&dwc->lock, flags);
302 return; 318 return;
319 }
303 320
304 dev_vdbg(chan2dev(&dwc->chan), "scan_descriptors: llp=0x%x\n", llp); 321 dev_vdbg(chan2dev(&dwc->chan), "scan_descriptors: llp=0x%x\n", llp);
305 322
306 list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) { 323 list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) {
307 if (desc->lli.llp == llp) 324 /* check first descriptors addr */
325 if (desc->txd.phys == llp) {
326 spin_unlock_irqrestore(&dwc->lock, flags);
327 return;
328 }
329
330 /* check first descriptors llp */
331 if (desc->lli.llp == llp) {
308 /* This one is currently in progress */ 332 /* This one is currently in progress */
333 spin_unlock_irqrestore(&dwc->lock, flags);
309 return; 334 return;
335 }
310 336
311 list_for_each_entry(child, &desc->tx_list, desc_node) 337 list_for_each_entry(child, &desc->tx_list, desc_node)
312 if (child->lli.llp == llp) 338 if (child->lli.llp == llp) {
313 /* Currently in progress */ 339 /* Currently in progress */
340 spin_unlock_irqrestore(&dwc->lock, flags);
314 return; 341 return;
342 }
315 343
316 /* 344 /*
317 * No descriptors so far seem to be in progress, i.e. 345 * No descriptors so far seem to be in progress, i.e.
318 * this one must be done. 346 * this one must be done.
319 */ 347 */
320 dwc_descriptor_complete(dwc, desc); 348 spin_unlock_irqrestore(&dwc->lock, flags);
349 dwc_descriptor_complete(dwc, desc, true);
350 spin_lock_irqsave(&dwc->lock, flags);
321 } 351 }
322 352
323 dev_err(chan2dev(&dwc->chan), 353 dev_err(chan2dev(&dwc->chan),
@@ -332,6 +362,7 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
332 list_move(dwc->queue.next, &dwc->active_list); 362 list_move(dwc->queue.next, &dwc->active_list);
333 dwc_dostart(dwc, dwc_first_active(dwc)); 363 dwc_dostart(dwc, dwc_first_active(dwc));
334 } 364 }
365 spin_unlock_irqrestore(&dwc->lock, flags);
335} 366}
336 367
337static void dwc_dump_lli(struct dw_dma_chan *dwc, struct dw_lli *lli) 368static void dwc_dump_lli(struct dw_dma_chan *dwc, struct dw_lli *lli)
@@ -346,9 +377,12 @@ static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc)
346{ 377{
347 struct dw_desc *bad_desc; 378 struct dw_desc *bad_desc;
348 struct dw_desc *child; 379 struct dw_desc *child;
380 unsigned long flags;
349 381
350 dwc_scan_descriptors(dw, dwc); 382 dwc_scan_descriptors(dw, dwc);
351 383
384 spin_lock_irqsave(&dwc->lock, flags);
385
352 /* 386 /*
353 * The descriptor currently at the head of the active list is 387 * The descriptor currently at the head of the active list is
354 * borked. Since we don't have any way to report errors, we'll 388 * borked. Since we don't have any way to report errors, we'll
@@ -378,8 +412,10 @@ static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc)
378 list_for_each_entry(child, &bad_desc->tx_list, desc_node) 412 list_for_each_entry(child, &bad_desc->tx_list, desc_node)
379 dwc_dump_lli(dwc, &child->lli); 413 dwc_dump_lli(dwc, &child->lli);
380 414
415 spin_unlock_irqrestore(&dwc->lock, flags);
416
381 /* Pretend the descriptor completed successfully */ 417 /* Pretend the descriptor completed successfully */
382 dwc_descriptor_complete(dwc, bad_desc); 418 dwc_descriptor_complete(dwc, bad_desc, true);
383} 419}
384 420
385/* --------------------- Cyclic DMA API extensions -------------------- */ 421/* --------------------- Cyclic DMA API extensions -------------------- */
@@ -402,6 +438,8 @@ EXPORT_SYMBOL(dw_dma_get_dst_addr);
402static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc, 438static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc,
403 u32 status_block, u32 status_err, u32 status_xfer) 439 u32 status_block, u32 status_err, u32 status_xfer)
404{ 440{
441 unsigned long flags;
442
405 if (status_block & dwc->mask) { 443 if (status_block & dwc->mask) {
406 void (*callback)(void *param); 444 void (*callback)(void *param);
407 void *callback_param; 445 void *callback_param;
@@ -412,11 +450,9 @@ static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc,
412 450
413 callback = dwc->cdesc->period_callback; 451 callback = dwc->cdesc->period_callback;
414 callback_param = dwc->cdesc->period_callback_param; 452 callback_param = dwc->cdesc->period_callback_param;
415 if (callback) { 453
416 spin_unlock(&dwc->lock); 454 if (callback)
417 callback(callback_param); 455 callback(callback_param);
418 spin_lock(&dwc->lock);
419 }
420 } 456 }
421 457
422 /* 458 /*
@@ -430,6 +466,9 @@ static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc,
430 dev_err(chan2dev(&dwc->chan), "cyclic DMA unexpected %s " 466 dev_err(chan2dev(&dwc->chan), "cyclic DMA unexpected %s "
431 "interrupt, stopping DMA transfer\n", 467 "interrupt, stopping DMA transfer\n",
432 status_xfer ? "xfer" : "error"); 468 status_xfer ? "xfer" : "error");
469
470 spin_lock_irqsave(&dwc->lock, flags);
471
433 dev_err(chan2dev(&dwc->chan), 472 dev_err(chan2dev(&dwc->chan),
434 " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n", 473 " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n",
435 channel_readl(dwc, SAR), 474 channel_readl(dwc, SAR),
@@ -453,6 +492,8 @@ static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc,
453 492
454 for (i = 0; i < dwc->cdesc->periods; i++) 493 for (i = 0; i < dwc->cdesc->periods; i++)
455 dwc_dump_lli(dwc, &dwc->cdesc->desc[i]->lli); 494 dwc_dump_lli(dwc, &dwc->cdesc->desc[i]->lli);
495
496 spin_unlock_irqrestore(&dwc->lock, flags);
456 } 497 }
457} 498}
458 499
@@ -476,7 +517,6 @@ static void dw_dma_tasklet(unsigned long data)
476 517
477 for (i = 0; i < dw->dma.chancnt; i++) { 518 for (i = 0; i < dw->dma.chancnt; i++) {
478 dwc = &dw->chan[i]; 519 dwc = &dw->chan[i];
479 spin_lock(&dwc->lock);
480 if (test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) 520 if (test_bit(DW_DMA_IS_CYCLIC, &dwc->flags))
481 dwc_handle_cyclic(dw, dwc, status_block, status_err, 521 dwc_handle_cyclic(dw, dwc, status_block, status_err,
482 status_xfer); 522 status_xfer);
@@ -484,7 +524,6 @@ static void dw_dma_tasklet(unsigned long data)
484 dwc_handle_error(dw, dwc); 524 dwc_handle_error(dw, dwc);
485 else if ((status_block | status_xfer) & (1 << i)) 525 else if ((status_block | status_xfer) & (1 << i))
486 dwc_scan_descriptors(dw, dwc); 526 dwc_scan_descriptors(dw, dwc);
487 spin_unlock(&dwc->lock);
488 } 527 }
489 528
490 /* 529 /*
@@ -539,8 +578,9 @@ static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx)
539 struct dw_desc *desc = txd_to_dw_desc(tx); 578 struct dw_desc *desc = txd_to_dw_desc(tx);
540 struct dw_dma_chan *dwc = to_dw_dma_chan(tx->chan); 579 struct dw_dma_chan *dwc = to_dw_dma_chan(tx->chan);
541 dma_cookie_t cookie; 580 dma_cookie_t cookie;
581 unsigned long flags;
542 582
543 spin_lock_bh(&dwc->lock); 583 spin_lock_irqsave(&dwc->lock, flags);
544 cookie = dwc_assign_cookie(dwc, desc); 584 cookie = dwc_assign_cookie(dwc, desc);
545 585
546 /* 586 /*
@@ -560,7 +600,7 @@ static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx)
560 list_add_tail(&desc->desc_node, &dwc->queue); 600 list_add_tail(&desc->desc_node, &dwc->queue);
561 } 601 }
562 602
563 spin_unlock_bh(&dwc->lock); 603 spin_unlock_irqrestore(&dwc->lock, flags);
564 604
565 return cookie; 605 return cookie;
566} 606}
@@ -689,9 +729,15 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
689 reg = dws->tx_reg; 729 reg = dws->tx_reg;
690 for_each_sg(sgl, sg, sg_len, i) { 730 for_each_sg(sgl, sg, sg_len, i) {
691 struct dw_desc *desc; 731 struct dw_desc *desc;
692 u32 len; 732 u32 len, dlen, mem;
693 u32 mem; 733
734 mem = sg_phys(sg);
735 len = sg_dma_len(sg);
736 mem_width = 2;
737 if (unlikely(mem & 3 || len & 3))
738 mem_width = 0;
694 739
740slave_sg_todev_fill_desc:
695 desc = dwc_desc_get(dwc); 741 desc = dwc_desc_get(dwc);
696 if (!desc) { 742 if (!desc) {
697 dev_err(chan2dev(chan), 743 dev_err(chan2dev(chan),
@@ -699,16 +745,19 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
699 goto err_desc_get; 745 goto err_desc_get;
700 } 746 }
701 747
702 mem = sg_phys(sg);
703 len = sg_dma_len(sg);
704 mem_width = 2;
705 if (unlikely(mem & 3 || len & 3))
706 mem_width = 0;
707
708 desc->lli.sar = mem; 748 desc->lli.sar = mem;
709 desc->lli.dar = reg; 749 desc->lli.dar = reg;
710 desc->lli.ctllo = ctllo | DWC_CTLL_SRC_WIDTH(mem_width); 750 desc->lli.ctllo = ctllo | DWC_CTLL_SRC_WIDTH(mem_width);
711 desc->lli.ctlhi = len >> mem_width; 751 if ((len >> mem_width) > DWC_MAX_COUNT) {
752 dlen = DWC_MAX_COUNT << mem_width;
753 mem += dlen;
754 len -= dlen;
755 } else {
756 dlen = len;
757 len = 0;
758 }
759
760 desc->lli.ctlhi = dlen >> mem_width;
712 761
713 if (!first) { 762 if (!first) {
714 first = desc; 763 first = desc;
@@ -722,7 +771,10 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
722 &first->tx_list); 771 &first->tx_list);
723 } 772 }
724 prev = desc; 773 prev = desc;
725 total_len += len; 774 total_len += dlen;
775
776 if (len)
777 goto slave_sg_todev_fill_desc;
726 } 778 }
727 break; 779 break;
728 case DMA_FROM_DEVICE: 780 case DMA_FROM_DEVICE:
@@ -735,15 +787,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
735 reg = dws->rx_reg; 787 reg = dws->rx_reg;
736 for_each_sg(sgl, sg, sg_len, i) { 788 for_each_sg(sgl, sg, sg_len, i) {
737 struct dw_desc *desc; 789 struct dw_desc *desc;
738 u32 len; 790 u32 len, dlen, mem;
739 u32 mem;
740
741 desc = dwc_desc_get(dwc);
742 if (!desc) {
743 dev_err(chan2dev(chan),
744 "not enough descriptors available\n");
745 goto err_desc_get;
746 }
747 791
748 mem = sg_phys(sg); 792 mem = sg_phys(sg);
749 len = sg_dma_len(sg); 793 len = sg_dma_len(sg);
@@ -751,10 +795,26 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
751 if (unlikely(mem & 3 || len & 3)) 795 if (unlikely(mem & 3 || len & 3))
752 mem_width = 0; 796 mem_width = 0;
753 797
798slave_sg_fromdev_fill_desc:
799 desc = dwc_desc_get(dwc);
800 if (!desc) {
801 dev_err(chan2dev(chan),
802 "not enough descriptors available\n");
803 goto err_desc_get;
804 }
805
754 desc->lli.sar = reg; 806 desc->lli.sar = reg;
755 desc->lli.dar = mem; 807 desc->lli.dar = mem;
756 desc->lli.ctllo = ctllo | DWC_CTLL_DST_WIDTH(mem_width); 808 desc->lli.ctllo = ctllo | DWC_CTLL_DST_WIDTH(mem_width);
757 desc->lli.ctlhi = len >> reg_width; 809 if ((len >> reg_width) > DWC_MAX_COUNT) {
810 dlen = DWC_MAX_COUNT << reg_width;
811 mem += dlen;
812 len -= dlen;
813 } else {
814 dlen = len;
815 len = 0;
816 }
817 desc->lli.ctlhi = dlen >> reg_width;
758 818
759 if (!first) { 819 if (!first) {
760 first = desc; 820 first = desc;
@@ -768,7 +828,10 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
768 &first->tx_list); 828 &first->tx_list);
769 } 829 }
770 prev = desc; 830 prev = desc;
771 total_len += len; 831 total_len += dlen;
832
833 if (len)
834 goto slave_sg_fromdev_fill_desc;
772 } 835 }
773 break; 836 break;
774 default: 837 default:
@@ -799,34 +862,51 @@ static int dwc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
799 struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 862 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
800 struct dw_dma *dw = to_dw_dma(chan->device); 863 struct dw_dma *dw = to_dw_dma(chan->device);
801 struct dw_desc *desc, *_desc; 864 struct dw_desc *desc, *_desc;
865 unsigned long flags;
866 u32 cfglo;
802 LIST_HEAD(list); 867 LIST_HEAD(list);
803 868
804 /* Only supports DMA_TERMINATE_ALL */ 869 if (cmd == DMA_PAUSE) {
805 if (cmd != DMA_TERMINATE_ALL) 870 spin_lock_irqsave(&dwc->lock, flags);
806 return -ENXIO;
807 871
808 /* 872 cfglo = channel_readl(dwc, CFG_LO);
809 * This is only called when something went wrong elsewhere, so 873 channel_writel(dwc, CFG_LO, cfglo | DWC_CFGL_CH_SUSP);
810 * we don't really care about the data. Just disable the 874 while (!(channel_readl(dwc, CFG_LO) & DWC_CFGL_FIFO_EMPTY))
811 * channel. We still have to poll the channel enable bit due 875 cpu_relax();
812 * to AHB/HSB limitations.
813 */
814 spin_lock_bh(&dwc->lock);
815 876
816 channel_clear_bit(dw, CH_EN, dwc->mask); 877 dwc->paused = true;
878 spin_unlock_irqrestore(&dwc->lock, flags);
879 } else if (cmd == DMA_RESUME) {
880 if (!dwc->paused)
881 return 0;
817 882
818 while (dma_readl(dw, CH_EN) & dwc->mask) 883 spin_lock_irqsave(&dwc->lock, flags);
819 cpu_relax();
820 884
821 /* active_list entries will end up before queued entries */ 885 cfglo = channel_readl(dwc, CFG_LO);
822 list_splice_init(&dwc->queue, &list); 886 channel_writel(dwc, CFG_LO, cfglo & ~DWC_CFGL_CH_SUSP);
823 list_splice_init(&dwc->active_list, &list); 887 dwc->paused = false;
824 888
825 spin_unlock_bh(&dwc->lock); 889 spin_unlock_irqrestore(&dwc->lock, flags);
890 } else if (cmd == DMA_TERMINATE_ALL) {
891 spin_lock_irqsave(&dwc->lock, flags);
826 892
827 /* Flush all pending and queued descriptors */ 893 channel_clear_bit(dw, CH_EN, dwc->mask);
828 list_for_each_entry_safe(desc, _desc, &list, desc_node) 894 while (dma_readl(dw, CH_EN) & dwc->mask)
829 dwc_descriptor_complete(dwc, desc); 895 cpu_relax();
896
897 dwc->paused = false;
898
899 /* active_list entries will end up before queued entries */
900 list_splice_init(&dwc->queue, &list);
901 list_splice_init(&dwc->active_list, &list);
902
903 spin_unlock_irqrestore(&dwc->lock, flags);
904
905 /* Flush all pending and queued descriptors */
906 list_for_each_entry_safe(desc, _desc, &list, desc_node)
907 dwc_descriptor_complete(dwc, desc, false);
908 } else
909 return -ENXIO;
830 910
831 return 0; 911 return 0;
832} 912}
@@ -846,9 +926,7 @@ dwc_tx_status(struct dma_chan *chan,
846 926
847 ret = dma_async_is_complete(cookie, last_complete, last_used); 927 ret = dma_async_is_complete(cookie, last_complete, last_used);
848 if (ret != DMA_SUCCESS) { 928 if (ret != DMA_SUCCESS) {
849 spin_lock_bh(&dwc->lock);
850 dwc_scan_descriptors(to_dw_dma(chan->device), dwc); 929 dwc_scan_descriptors(to_dw_dma(chan->device), dwc);
851 spin_unlock_bh(&dwc->lock);
852 930
853 last_complete = dwc->completed; 931 last_complete = dwc->completed;
854 last_used = chan->cookie; 932 last_used = chan->cookie;
@@ -856,7 +934,14 @@ dwc_tx_status(struct dma_chan *chan,
856 ret = dma_async_is_complete(cookie, last_complete, last_used); 934 ret = dma_async_is_complete(cookie, last_complete, last_used);
857 } 935 }
858 936
859 dma_set_tx_state(txstate, last_complete, last_used, 0); 937 if (ret != DMA_SUCCESS)
938 dma_set_tx_state(txstate, last_complete, last_used,
939 dwc_first_active(dwc)->len);
940 else
941 dma_set_tx_state(txstate, last_complete, last_used, 0);
942
943 if (dwc->paused)
944 return DMA_PAUSED;
860 945
861 return ret; 946 return ret;
862} 947}
@@ -865,10 +950,8 @@ static void dwc_issue_pending(struct dma_chan *chan)
865{ 950{
866 struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 951 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
867 952
868 spin_lock_bh(&dwc->lock);
869 if (!list_empty(&dwc->queue)) 953 if (!list_empty(&dwc->queue))
870 dwc_scan_descriptors(to_dw_dma(chan->device), dwc); 954 dwc_scan_descriptors(to_dw_dma(chan->device), dwc);
871 spin_unlock_bh(&dwc->lock);
872} 955}
873 956
874static int dwc_alloc_chan_resources(struct dma_chan *chan) 957static int dwc_alloc_chan_resources(struct dma_chan *chan)
@@ -880,6 +963,7 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan)
880 int i; 963 int i;
881 u32 cfghi; 964 u32 cfghi;
882 u32 cfglo; 965 u32 cfglo;
966 unsigned long flags;
883 967
884 dev_vdbg(chan2dev(chan), "alloc_chan_resources\n"); 968 dev_vdbg(chan2dev(chan), "alloc_chan_resources\n");
885 969
@@ -917,16 +1001,16 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan)
917 * doesn't mean what you think it means), and status writeback. 1001 * doesn't mean what you think it means), and status writeback.
918 */ 1002 */
919 1003
920 spin_lock_bh(&dwc->lock); 1004 spin_lock_irqsave(&dwc->lock, flags);
921 i = dwc->descs_allocated; 1005 i = dwc->descs_allocated;
922 while (dwc->descs_allocated < NR_DESCS_PER_CHANNEL) { 1006 while (dwc->descs_allocated < NR_DESCS_PER_CHANNEL) {
923 spin_unlock_bh(&dwc->lock); 1007 spin_unlock_irqrestore(&dwc->lock, flags);
924 1008
925 desc = kzalloc(sizeof(struct dw_desc), GFP_KERNEL); 1009 desc = kzalloc(sizeof(struct dw_desc), GFP_KERNEL);
926 if (!desc) { 1010 if (!desc) {
927 dev_info(chan2dev(chan), 1011 dev_info(chan2dev(chan),
928 "only allocated %d descriptors\n", i); 1012 "only allocated %d descriptors\n", i);
929 spin_lock_bh(&dwc->lock); 1013 spin_lock_irqsave(&dwc->lock, flags);
930 break; 1014 break;
931 } 1015 }
932 1016
@@ -938,7 +1022,7 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan)
938 sizeof(desc->lli), DMA_TO_DEVICE); 1022 sizeof(desc->lli), DMA_TO_DEVICE);
939 dwc_desc_put(dwc, desc); 1023 dwc_desc_put(dwc, desc);
940 1024
941 spin_lock_bh(&dwc->lock); 1025 spin_lock_irqsave(&dwc->lock, flags);
942 i = ++dwc->descs_allocated; 1026 i = ++dwc->descs_allocated;
943 } 1027 }
944 1028
@@ -947,7 +1031,7 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan)
947 channel_set_bit(dw, MASK.BLOCK, dwc->mask); 1031 channel_set_bit(dw, MASK.BLOCK, dwc->mask);
948 channel_set_bit(dw, MASK.ERROR, dwc->mask); 1032 channel_set_bit(dw, MASK.ERROR, dwc->mask);
949 1033
950 spin_unlock_bh(&dwc->lock); 1034 spin_unlock_irqrestore(&dwc->lock, flags);
951 1035
952 dev_dbg(chan2dev(chan), 1036 dev_dbg(chan2dev(chan),
953 "alloc_chan_resources allocated %d descriptors\n", i); 1037 "alloc_chan_resources allocated %d descriptors\n", i);
@@ -960,6 +1044,7 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
960 struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 1044 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
961 struct dw_dma *dw = to_dw_dma(chan->device); 1045 struct dw_dma *dw = to_dw_dma(chan->device);
962 struct dw_desc *desc, *_desc; 1046 struct dw_desc *desc, *_desc;
1047 unsigned long flags;
963 LIST_HEAD(list); 1048 LIST_HEAD(list);
964 1049
965 dev_dbg(chan2dev(chan), "free_chan_resources (descs allocated=%u)\n", 1050 dev_dbg(chan2dev(chan), "free_chan_resources (descs allocated=%u)\n",
@@ -970,7 +1055,7 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
970 BUG_ON(!list_empty(&dwc->queue)); 1055 BUG_ON(!list_empty(&dwc->queue));
971 BUG_ON(dma_readl(to_dw_dma(chan->device), CH_EN) & dwc->mask); 1056 BUG_ON(dma_readl(to_dw_dma(chan->device), CH_EN) & dwc->mask);
972 1057
973 spin_lock_bh(&dwc->lock); 1058 spin_lock_irqsave(&dwc->lock, flags);
974 list_splice_init(&dwc->free_list, &list); 1059 list_splice_init(&dwc->free_list, &list);
975 dwc->descs_allocated = 0; 1060 dwc->descs_allocated = 0;
976 1061
@@ -979,7 +1064,7 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
979 channel_clear_bit(dw, MASK.BLOCK, dwc->mask); 1064 channel_clear_bit(dw, MASK.BLOCK, dwc->mask);
980 channel_clear_bit(dw, MASK.ERROR, dwc->mask); 1065 channel_clear_bit(dw, MASK.ERROR, dwc->mask);
981 1066
982 spin_unlock_bh(&dwc->lock); 1067 spin_unlock_irqrestore(&dwc->lock, flags);
983 1068
984 list_for_each_entry_safe(desc, _desc, &list, desc_node) { 1069 list_for_each_entry_safe(desc, _desc, &list, desc_node) {
985 dev_vdbg(chan2dev(chan), " freeing descriptor %p\n", desc); 1070 dev_vdbg(chan2dev(chan), " freeing descriptor %p\n", desc);
@@ -1004,13 +1089,14 @@ int dw_dma_cyclic_start(struct dma_chan *chan)
1004{ 1089{
1005 struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 1090 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
1006 struct dw_dma *dw = to_dw_dma(dwc->chan.device); 1091 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
1092 unsigned long flags;
1007 1093
1008 if (!test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) { 1094 if (!test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) {
1009 dev_err(chan2dev(&dwc->chan), "missing prep for cyclic DMA\n"); 1095 dev_err(chan2dev(&dwc->chan), "missing prep for cyclic DMA\n");
1010 return -ENODEV; 1096 return -ENODEV;
1011 } 1097 }
1012 1098
1013 spin_lock(&dwc->lock); 1099 spin_lock_irqsave(&dwc->lock, flags);
1014 1100
1015 /* assert channel is idle */ 1101 /* assert channel is idle */
1016 if (dma_readl(dw, CH_EN) & dwc->mask) { 1102 if (dma_readl(dw, CH_EN) & dwc->mask) {
@@ -1023,7 +1109,7 @@ int dw_dma_cyclic_start(struct dma_chan *chan)
1023 channel_readl(dwc, LLP), 1109 channel_readl(dwc, LLP),
1024 channel_readl(dwc, CTL_HI), 1110 channel_readl(dwc, CTL_HI),
1025 channel_readl(dwc, CTL_LO)); 1111 channel_readl(dwc, CTL_LO));
1026 spin_unlock(&dwc->lock); 1112 spin_unlock_irqrestore(&dwc->lock, flags);
1027 return -EBUSY; 1113 return -EBUSY;
1028 } 1114 }
1029 1115
@@ -1038,7 +1124,7 @@ int dw_dma_cyclic_start(struct dma_chan *chan)
1038 1124
1039 channel_set_bit(dw, CH_EN, dwc->mask); 1125 channel_set_bit(dw, CH_EN, dwc->mask);
1040 1126
1041 spin_unlock(&dwc->lock); 1127 spin_unlock_irqrestore(&dwc->lock, flags);
1042 1128
1043 return 0; 1129 return 0;
1044} 1130}
@@ -1054,14 +1140,15 @@ void dw_dma_cyclic_stop(struct dma_chan *chan)
1054{ 1140{
1055 struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 1141 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
1056 struct dw_dma *dw = to_dw_dma(dwc->chan.device); 1142 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
1143 unsigned long flags;
1057 1144
1058 spin_lock(&dwc->lock); 1145 spin_lock_irqsave(&dwc->lock, flags);
1059 1146
1060 channel_clear_bit(dw, CH_EN, dwc->mask); 1147 channel_clear_bit(dw, CH_EN, dwc->mask);
1061 while (dma_readl(dw, CH_EN) & dwc->mask) 1148 while (dma_readl(dw, CH_EN) & dwc->mask)
1062 cpu_relax(); 1149 cpu_relax();
1063 1150
1064 spin_unlock(&dwc->lock); 1151 spin_unlock_irqrestore(&dwc->lock, flags);
1065} 1152}
1066EXPORT_SYMBOL(dw_dma_cyclic_stop); 1153EXPORT_SYMBOL(dw_dma_cyclic_stop);
1067 1154
@@ -1090,17 +1177,18 @@ struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
1090 unsigned int reg_width; 1177 unsigned int reg_width;
1091 unsigned int periods; 1178 unsigned int periods;
1092 unsigned int i; 1179 unsigned int i;
1180 unsigned long flags;
1093 1181
1094 spin_lock_bh(&dwc->lock); 1182 spin_lock_irqsave(&dwc->lock, flags);
1095 if (!list_empty(&dwc->queue) || !list_empty(&dwc->active_list)) { 1183 if (!list_empty(&dwc->queue) || !list_empty(&dwc->active_list)) {
1096 spin_unlock_bh(&dwc->lock); 1184 spin_unlock_irqrestore(&dwc->lock, flags);
1097 dev_dbg(chan2dev(&dwc->chan), 1185 dev_dbg(chan2dev(&dwc->chan),
1098 "queue and/or active list are not empty\n"); 1186 "queue and/or active list are not empty\n");
1099 return ERR_PTR(-EBUSY); 1187 return ERR_PTR(-EBUSY);
1100 } 1188 }
1101 1189
1102 was_cyclic = test_and_set_bit(DW_DMA_IS_CYCLIC, &dwc->flags); 1190 was_cyclic = test_and_set_bit(DW_DMA_IS_CYCLIC, &dwc->flags);
1103 spin_unlock_bh(&dwc->lock); 1191 spin_unlock_irqrestore(&dwc->lock, flags);
1104 if (was_cyclic) { 1192 if (was_cyclic) {
1105 dev_dbg(chan2dev(&dwc->chan), 1193 dev_dbg(chan2dev(&dwc->chan),
1106 "channel already prepared for cyclic DMA\n"); 1194 "channel already prepared for cyclic DMA\n");
@@ -1214,13 +1302,14 @@ void dw_dma_cyclic_free(struct dma_chan *chan)
1214 struct dw_dma *dw = to_dw_dma(dwc->chan.device); 1302 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
1215 struct dw_cyclic_desc *cdesc = dwc->cdesc; 1303 struct dw_cyclic_desc *cdesc = dwc->cdesc;
1216 int i; 1304 int i;
1305 unsigned long flags;
1217 1306
1218 dev_dbg(chan2dev(&dwc->chan), "cyclic free\n"); 1307 dev_dbg(chan2dev(&dwc->chan), "cyclic free\n");
1219 1308
1220 if (!cdesc) 1309 if (!cdesc)
1221 return; 1310 return;
1222 1311
1223 spin_lock_bh(&dwc->lock); 1312 spin_lock_irqsave(&dwc->lock, flags);
1224 1313
1225 channel_clear_bit(dw, CH_EN, dwc->mask); 1314 channel_clear_bit(dw, CH_EN, dwc->mask);
1226 while (dma_readl(dw, CH_EN) & dwc->mask) 1315 while (dma_readl(dw, CH_EN) & dwc->mask)
@@ -1230,7 +1319,7 @@ void dw_dma_cyclic_free(struct dma_chan *chan)
1230 dma_writel(dw, CLEAR.ERROR, dwc->mask); 1319 dma_writel(dw, CLEAR.ERROR, dwc->mask);
1231 dma_writel(dw, CLEAR.XFER, dwc->mask); 1320 dma_writel(dw, CLEAR.XFER, dwc->mask);
1232 1321
1233 spin_unlock_bh(&dwc->lock); 1322 spin_unlock_irqrestore(&dwc->lock, flags);
1234 1323
1235 for (i = 0; i < cdesc->periods; i++) 1324 for (i = 0; i < cdesc->periods; i++)
1236 dwc_desc_put(dwc, cdesc->desc[i]); 1325 dwc_desc_put(dwc, cdesc->desc[i]);
@@ -1487,3 +1576,4 @@ module_exit(dw_exit);
1487MODULE_LICENSE("GPL v2"); 1576MODULE_LICENSE("GPL v2");
1488MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller driver"); 1577MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller driver");
1489MODULE_AUTHOR("Haavard Skinnemoen (Atmel)"); 1578MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
1579MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>");