aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDaniel Mack <zonque@gmail.com>2013-08-21 08:08:55 -0400
committerVinod Koul <vinod.koul@intel.com>2013-08-25 12:34:52 -0400
commit0cd6156177a10d39fb5811bcd23e1d3b7e58f1c0 (patch)
tree882750c60c5a5f094594a2289ae063665f4891d9
parentb721f9e800571ca724eed6f1956e58e8f1d47d7d (diff)
dma: mmp_pdma: don't clear DCMD_ENDIRQEN at end of pending chain
In order to fully support multiple transactions per channel, we need to assure we get an interrupt for each completed transaction. That flags bit is also our only way to tell at which descriptor a transaction ends. So, remove the manual clearing of that bit, and then inline the only remaining command that is left in append_pending_queue() for better readability. Signed-off-by: Daniel Mack <zonque@gmail.com> Signed-off-by: Vinod Koul <vinod.koul@intel.com>
-rw-r--r--drivers/dma/mmp_pdma.c22
1 files changed, 2 insertions, 20 deletions
diff --git a/drivers/dma/mmp_pdma.c b/drivers/dma/mmp_pdma.c
index b0a9c94bc0de..3676fdeac96d 100644
--- a/drivers/dma/mmp_pdma.c
+++ b/drivers/dma/mmp_pdma.c
@@ -279,25 +279,6 @@ static void mmp_pdma_free_phy(struct mmp_pdma_chan *pchan)
279 spin_unlock_irqrestore(&pdev->phy_lock, flags); 279 spin_unlock_irqrestore(&pdev->phy_lock, flags);
280} 280}
281 281
282/* desc->tx_list ==> pending list */
283static void append_pending_queue(struct mmp_pdma_chan *chan,
284 struct mmp_pdma_desc_sw *desc)
285{
286 struct mmp_pdma_desc_sw *tail =
287 to_mmp_pdma_desc(chan->chain_pending.prev);
288
289 if (list_empty(&chan->chain_pending))
290 goto out_splice;
291
292 /* one irq per queue, even appended */
293 tail->desc.ddadr = desc->async_tx.phys;
294 tail->desc.dcmd &= ~DCMD_ENDIRQEN;
295
296 /* softly link to pending list */
297out_splice:
298 list_splice_tail_init(&desc->tx_list, &chan->chain_pending);
299}
300
301/** 282/**
302 * start_pending_queue - transfer any pending transactions 283 * start_pending_queue - transfer any pending transactions
303 * pending list ==> running list 284 * pending list ==> running list
@@ -360,7 +341,8 @@ static dma_cookie_t mmp_pdma_tx_submit(struct dma_async_tx_descriptor *tx)
360 cookie = dma_cookie_assign(&child->async_tx); 341 cookie = dma_cookie_assign(&child->async_tx);
361 } 342 }
362 343
363 append_pending_queue(chan, desc); 344 /* softly link to pending list - desc->tx_list ==> pending list */
345 list_splice_tail_init(&desc->tx_list, &chan->chain_pending);
364 346
365 spin_unlock_irqrestore(&chan->desc_lock, flags); 347 spin_unlock_irqrestore(&chan->desc_lock, flags);
366 348