aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma
diff options
context:
space:
mode:
authorLars-Peter Clausen <lars@metafoo.de>2014-01-11 14:08:38 -0500
committerVinod Koul <vinod.koul@intel.com>2014-01-20 03:20:49 -0500
commit04abf5daf7df852566e5a4782d5954daa40e2542 (patch)
tree8bee95188e65e2264633a8ec9ee8bb8bc898913f /drivers/dma
parentba07d812f58c0ec65fff981a085529ed88965d23 (diff)
dma: pl330: Differentiate between submitted and issued descriptors
The pl330 dmaengine driver currently does not differentiate between submitted and issued descriptors. It won't start transferring a newly submitted descriptor until issue_pending() is called, but only if it is idle. If it is active and a new descriptor is submitted before it goes idle it will happily start the newly submitted descriptor once all earlier submitted descriptors have been completed. This is not a 100% correct with regards to the dmaengine interface semantics. A descriptor is not supposed to be started until the next issue_pending() call after the descriptor has been submitted. This patch adds a second per channel list that keeps track of the submitted descriptors. Once issue_pending() is called the submitted descriptors are moved to the working list and only descriptors on the working list are started. Signed-off-by: Lars-Peter Clausen <lars@metafoo.de> Signed-off-by: Vinod Koul <vinod.koul@intel.com>
Diffstat (limited to 'drivers/dma')
-rw-r--r--drivers/dma/pl330.c24
1 files changed, 20 insertions, 4 deletions
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index 7adaf3abffba..8e018a221f19 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -543,7 +543,9 @@ struct dma_pl330_chan {
543 /* DMA-Engine Channel */ 543 /* DMA-Engine Channel */
544 struct dma_chan chan; 544 struct dma_chan chan;
545 545
546 /* List of to be xfered descriptors */ 546 /* List of submitted descriptors */
547 struct list_head submitted_list;
548 /* List of issued descriptors */
547 struct list_head work_list; 549 struct list_head work_list;
548 /* List of completed descriptors */ 550 /* List of completed descriptors */
549 struct list_head completed_list; 551 struct list_head completed_list;
@@ -2388,6 +2390,11 @@ static int pl330_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned
2388 pl330_chan_ctrl(pch->pl330_chid, PL330_OP_FLUSH); 2390 pl330_chan_ctrl(pch->pl330_chid, PL330_OP_FLUSH);
2389 2391
2390 /* Mark all desc done */ 2392 /* Mark all desc done */
2393 list_for_each_entry(desc, &pch->submitted_list, node) {
2394 desc->status = FREE;
2395 dma_cookie_complete(&desc->txd);
2396 }
2397
2391 list_for_each_entry(desc, &pch->work_list , node) { 2398 list_for_each_entry(desc, &pch->work_list , node) {
2392 desc->status = FREE; 2399 desc->status = FREE;
2393 dma_cookie_complete(&desc->txd); 2400 dma_cookie_complete(&desc->txd);
@@ -2398,6 +2405,7 @@ static int pl330_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned
2398 dma_cookie_complete(&desc->txd); 2405 dma_cookie_complete(&desc->txd);
2399 } 2406 }
2400 2407
2408 list_splice_tail_init(&pch->submitted_list, &pdmac->desc_pool);
2401 list_splice_tail_init(&pch->work_list, &pdmac->desc_pool); 2409 list_splice_tail_init(&pch->work_list, &pdmac->desc_pool);
2402 list_splice_tail_init(&pch->completed_list, &pdmac->desc_pool); 2410 list_splice_tail_init(&pch->completed_list, &pdmac->desc_pool);
2403 spin_unlock_irqrestore(&pch->lock, flags); 2411 spin_unlock_irqrestore(&pch->lock, flags);
@@ -2456,7 +2464,14 @@ pl330_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
2456 2464
2457static void pl330_issue_pending(struct dma_chan *chan) 2465static void pl330_issue_pending(struct dma_chan *chan)
2458{ 2466{
2459 pl330_tasklet((unsigned long) to_pchan(chan)); 2467 struct dma_pl330_chan *pch = to_pchan(chan);
2468 unsigned long flags;
2469
2470 spin_lock_irqsave(&pch->lock, flags);
2471 list_splice_tail_init(&pch->submitted_list, &pch->work_list);
2472 spin_unlock_irqrestore(&pch->lock, flags);
2473
2474 pl330_tasklet((unsigned long)pch);
2460} 2475}
2461 2476
2462/* 2477/*
@@ -2483,11 +2498,11 @@ static dma_cookie_t pl330_tx_submit(struct dma_async_tx_descriptor *tx)
2483 2498
2484 dma_cookie_assign(&desc->txd); 2499 dma_cookie_assign(&desc->txd);
2485 2500
2486 list_move_tail(&desc->node, &pch->work_list); 2501 list_move_tail(&desc->node, &pch->submitted_list);
2487 } 2502 }
2488 2503
2489 cookie = dma_cookie_assign(&last->txd); 2504 cookie = dma_cookie_assign(&last->txd);
2490 list_add_tail(&last->node, &pch->work_list); 2505 list_add_tail(&last->node, &pch->submitted_list);
2491 spin_unlock_irqrestore(&pch->lock, flags); 2506 spin_unlock_irqrestore(&pch->lock, flags);
2492 2507
2493 return cookie; 2508 return cookie;
@@ -2979,6 +2994,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
2979 else 2994 else
2980 pch->chan.private = adev->dev.of_node; 2995 pch->chan.private = adev->dev.of_node;
2981 2996
2997 INIT_LIST_HEAD(&pch->submitted_list);
2982 INIT_LIST_HEAD(&pch->work_list); 2998 INIT_LIST_HEAD(&pch->work_list);
2983 INIT_LIST_HEAD(&pch->completed_list); 2999 INIT_LIST_HEAD(&pch->completed_list);
2984 spin_lock_init(&pch->lock); 3000 spin_lock_init(&pch->lock);