diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-03-29 18:34:57 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-03-29 18:34:57 -0400 |
commit | ef08e78268423fc4d7fbc3e54bd9a67fc8da7cc5 (patch) | |
tree | d0561d3ef89c9cd277a38168e33850666cbd33c4 /drivers/dma/timb_dma.c | |
parent | 71db34fc4330f7c784397acb9f1e6ee7f7b32eb2 (diff) | |
parent | 5b2e02e401deb44e7f5befe19404d8b2688efea4 (diff) |
Merge branch 'next' of git://git.infradead.org/users/vkoul/slave-dma
Pull slave-dmaengine update from Vinod Koul:
"This includes the cookie cleanup by Russell, the addition of context
parameter for dmaengine APIs, more arm dmaengine driver cleanup by
moving code to dmaengine, this time for imx by Javier and pl330 by
Boojin along with the usual driver fixes."
Fix up some fairly trivial conflicts with various other cleanups.
* 'next' of git://git.infradead.org/users/vkoul/slave-dma: (67 commits)
dmaengine: imx: fix the build failure on x86_64
dmaengine: i.MX: Fix merge of cookie branch.
dmaengine: i.MX: Add support for interleaved transfers.
dmaengine: imx-dma: use 'dev_dbg' and 'dev_warn' for messages.
dmaengine: imx-dma: remove 'imx_dmav1_baseaddr' and 'dma_clk'.
dmaengine: imx-dma: remove unused arg of imxdma_sg_next.
dmaengine: imx-dma: remove internal structure.
dmaengine: imx-dma: remove 'resbytes' field of 'internal' structure.
dmaengine: imx-dma: remove 'in_use' field of 'internal' structure.
dmaengine: imx-dma: remove sg member from internal structure.
dmaengine: imx-dma: remove 'imxdma_setup_sg_hw' function.
dmaengine: imx-dma: remove 'imxdma_config_channel_hw' function.
dmaengine: imx-dma: remove 'imxdma_setup_mem2mem_hw' function.
dmaengine: imx-dma: remove dma_mode member of internal structure.
dmaengine: imx-dma: remove data member from internal structure.
dmaengine: imx-dma: merge old dma-v1.c with imx-dma.c
dmaengine: at_hdmac: add slave config operation
dmaengine: add context parameter to prep_slave_sg and prep_dma_cyclic
dmaengine/dma_slave: introduce inline wrappers
dma: imx-sdma: Treat firmware messages as warnings instead of erros
...
Diffstat (limited to 'drivers/dma/timb_dma.c')
-rw-r--r-- | drivers/dma/timb_dma.c | 37 |
1 files changed, 11 insertions, 26 deletions
diff --git a/drivers/dma/timb_dma.c b/drivers/dma/timb_dma.c index a6f9c1684a0f..4e0dff59901d 100644 --- a/drivers/dma/timb_dma.c +++ b/drivers/dma/timb_dma.c | |||
@@ -31,6 +31,8 @@ | |||
31 | 31 | ||
32 | #include <linux/timb_dma.h> | 32 | #include <linux/timb_dma.h> |
33 | 33 | ||
34 | #include "dmaengine.h" | ||
35 | |||
34 | #define DRIVER_NAME "timb-dma" | 36 | #define DRIVER_NAME "timb-dma" |
35 | 37 | ||
36 | /* Global DMA registers */ | 38 | /* Global DMA registers */ |
@@ -84,7 +86,6 @@ struct timb_dma_chan { | |||
84 | especially the lists and descriptors, | 86 | especially the lists and descriptors, |
85 | from races between the tasklet and calls | 87 | from races between the tasklet and calls |
86 | from above */ | 88 | from above */ |
87 | dma_cookie_t last_completed_cookie; | ||
88 | bool ongoing; | 89 | bool ongoing; |
89 | struct list_head active_list; | 90 | struct list_head active_list; |
90 | struct list_head queue; | 91 | struct list_head queue; |
@@ -284,7 +285,7 @@ static void __td_finish(struct timb_dma_chan *td_chan) | |||
284 | else | 285 | else |
285 | iowrite32(0, td_chan->membase + TIMBDMA_OFFS_TX_DLAR); | 286 | iowrite32(0, td_chan->membase + TIMBDMA_OFFS_TX_DLAR); |
286 | */ | 287 | */ |
287 | td_chan->last_completed_cookie = txd->cookie; | 288 | dma_cookie_complete(txd); |
288 | td_chan->ongoing = false; | 289 | td_chan->ongoing = false; |
289 | 290 | ||
290 | callback = txd->callback; | 291 | callback = txd->callback; |
@@ -349,12 +350,7 @@ static dma_cookie_t td_tx_submit(struct dma_async_tx_descriptor *txd) | |||
349 | dma_cookie_t cookie; | 350 | dma_cookie_t cookie; |
350 | 351 | ||
351 | spin_lock_bh(&td_chan->lock); | 352 | spin_lock_bh(&td_chan->lock); |
352 | 353 | cookie = dma_cookie_assign(txd); | |
353 | cookie = txd->chan->cookie; | ||
354 | if (++cookie < 0) | ||
355 | cookie = 1; | ||
356 | txd->chan->cookie = cookie; | ||
357 | txd->cookie = cookie; | ||
358 | 354 | ||
359 | if (list_empty(&td_chan->active_list)) { | 355 | if (list_empty(&td_chan->active_list)) { |
360 | dev_dbg(chan2dev(txd->chan), "%s: started %u\n", __func__, | 356 | dev_dbg(chan2dev(txd->chan), "%s: started %u\n", __func__, |
@@ -481,8 +477,7 @@ static int td_alloc_chan_resources(struct dma_chan *chan) | |||
481 | } | 477 | } |
482 | 478 | ||
483 | spin_lock_bh(&td_chan->lock); | 479 | spin_lock_bh(&td_chan->lock); |
484 | td_chan->last_completed_cookie = 1; | 480 | dma_cookie_init(chan); |
485 | chan->cookie = 1; | ||
486 | spin_unlock_bh(&td_chan->lock); | 481 | spin_unlock_bh(&td_chan->lock); |
487 | 482 | ||
488 | return 0; | 483 | return 0; |
@@ -515,24 +510,13 @@ static void td_free_chan_resources(struct dma_chan *chan) | |||
515 | static enum dma_status td_tx_status(struct dma_chan *chan, dma_cookie_t cookie, | 510 | static enum dma_status td_tx_status(struct dma_chan *chan, dma_cookie_t cookie, |
516 | struct dma_tx_state *txstate) | 511 | struct dma_tx_state *txstate) |
517 | { | 512 | { |
518 | struct timb_dma_chan *td_chan = | 513 | enum dma_status ret; |
519 | container_of(chan, struct timb_dma_chan, chan); | ||
520 | dma_cookie_t last_used; | ||
521 | dma_cookie_t last_complete; | ||
522 | int ret; | ||
523 | 514 | ||
524 | dev_dbg(chan2dev(chan), "%s: Entry\n", __func__); | 515 | dev_dbg(chan2dev(chan), "%s: Entry\n", __func__); |
525 | 516 | ||
526 | last_complete = td_chan->last_completed_cookie; | 517 | ret = dma_cookie_status(chan, cookie, txstate); |
527 | last_used = chan->cookie; | ||
528 | |||
529 | ret = dma_async_is_complete(cookie, last_complete, last_used); | ||
530 | |||
531 | dma_set_tx_state(txstate, last_complete, last_used, 0); | ||
532 | 518 | ||
533 | dev_dbg(chan2dev(chan), | 519 | dev_dbg(chan2dev(chan), "%s: exit, ret: %d\n", __func__, ret); |
534 | "%s: exit, ret: %d, last_complete: %d, last_used: %d\n", | ||
535 | __func__, ret, last_complete, last_used); | ||
536 | 520 | ||
537 | return ret; | 521 | return ret; |
538 | } | 522 | } |
@@ -558,7 +542,8 @@ static void td_issue_pending(struct dma_chan *chan) | |||
558 | 542 | ||
559 | static struct dma_async_tx_descriptor *td_prep_slave_sg(struct dma_chan *chan, | 543 | static struct dma_async_tx_descriptor *td_prep_slave_sg(struct dma_chan *chan, |
560 | struct scatterlist *sgl, unsigned int sg_len, | 544 | struct scatterlist *sgl, unsigned int sg_len, |
561 | enum dma_transfer_direction direction, unsigned long flags) | 545 | enum dma_transfer_direction direction, unsigned long flags, |
546 | void *context) | ||
562 | { | 547 | { |
563 | struct timb_dma_chan *td_chan = | 548 | struct timb_dma_chan *td_chan = |
564 | container_of(chan, struct timb_dma_chan, chan); | 549 | container_of(chan, struct timb_dma_chan, chan); |
@@ -766,7 +751,7 @@ static int __devinit td_probe(struct platform_device *pdev) | |||
766 | } | 751 | } |
767 | 752 | ||
768 | td_chan->chan.device = &td->dma; | 753 | td_chan->chan.device = &td->dma; |
769 | td_chan->chan.cookie = 1; | 754 | dma_cookie_init(&td_chan->chan); |
770 | spin_lock_init(&td_chan->lock); | 755 | spin_lock_init(&td_chan->lock); |
771 | INIT_LIST_HEAD(&td_chan->active_list); | 756 | INIT_LIST_HEAD(&td_chan->active_list); |
772 | INIT_LIST_HEAD(&td_chan->queue); | 757 | INIT_LIST_HEAD(&td_chan->queue); |