aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma
diff options
context:
space:
mode:
authorFancy Fang <B47543@freescale.com>2013-12-17 03:58:26 -0500
committerNitin Garg <nitin.garg@freescale.com>2014-04-16 09:47:30 -0400
commitdcb7a8c8d8330b16d0e8e2226023fee258dab1d1 (patch)
tree6c9f3f256509744950252fb294283a2053d8da2e /drivers/dma
parentc046b4592119f876589e1adf41c3637e87611250 (diff)
ENGR00292398 PXP: refine two spin locks usage in PXP dma driver
This patch provides the following refinements: 1. For pxp channel lock, use spin_lock() instead of spin_lock_irqsave(). Since this lock is not used in any ISR. Moreover, this can increase the driver's concurrency with no local irq disabled. 2. Narrow down the pxp lock's locking range in pxp_issue_pending(). Since this lock is also used in PXP ISR, so its hold time should be as few as possible to reduce the time when local irq disabled. Signed-off-by: Fancy Fang <B47543@freescale.com>
Diffstat (limited to 'drivers/dma')
-rw-r--r--drivers/dma/pxp/pxp_dma_v2.c42
1 files changed, 20 insertions, 22 deletions
diff --git a/drivers/dma/pxp/pxp_dma_v2.c b/drivers/dma/pxp/pxp_dma_v2.c
index 113f6ae86cd2..aed3ca813672 100644
--- a/drivers/dma/pxp/pxp_dma_v2.c
+++ b/drivers/dma/pxp/pxp_dma_v2.c
@@ -1132,12 +1132,16 @@ static void pxpdma_dostart_work(struct pxps *pxp)
1132 spin_unlock_irqrestore(&pxp->lock, flags); 1132 spin_unlock_irqrestore(&pxp->lock, flags);
1133} 1133}
1134 1134
1135static void pxpdma_dequeue(struct pxp_channel *pxp_chan, struct list_head *list) 1135static void pxpdma_dequeue(struct pxp_channel *pxp_chan, struct pxps *pxp)
1136{ 1136{
1137 unsigned long flags;
1137 struct pxp_tx_desc *desc = NULL; 1138 struct pxp_tx_desc *desc = NULL;
1139
1138 do { 1140 do {
1139 desc = pxpdma_first_queued(pxp_chan); 1141 desc = pxpdma_first_queued(pxp_chan);
1140 list_move_tail(&desc->list, list); 1142 spin_lock_irqsave(&pxp->lock, flags);
1143 list_move_tail(&desc->list, &head);
1144 spin_unlock_irqrestore(&pxp->lock, flags);
1141 } while (!list_empty(&pxp_chan->queue)); 1145 } while (!list_empty(&pxp_chan->queue));
1142} 1146}
1143 1147
@@ -1146,12 +1150,11 @@ static dma_cookie_t pxp_tx_submit(struct dma_async_tx_descriptor *tx)
1146 struct pxp_tx_desc *desc = to_tx_desc(tx); 1150 struct pxp_tx_desc *desc = to_tx_desc(tx);
1147 struct pxp_channel *pxp_chan = to_pxp_channel(tx->chan); 1151 struct pxp_channel *pxp_chan = to_pxp_channel(tx->chan);
1148 dma_cookie_t cookie; 1152 dma_cookie_t cookie;
1149 unsigned long flags;
1150 1153
1151 dev_dbg(&pxp_chan->dma_chan.dev->device, "received TX\n"); 1154 dev_dbg(&pxp_chan->dma_chan.dev->device, "received TX\n");
1152 1155
1153 /* pxp_chan->lock can be taken under ichan->lock, but not v.v. */ 1156 /* pxp_chan->lock can be taken under ichan->lock, but not v.v. */
1154 spin_lock_irqsave(&pxp_chan->lock, flags); 1157 spin_lock(&pxp_chan->lock);
1155 1158
1156 cookie = pxp_chan->dma_chan.cookie; 1159 cookie = pxp_chan->dma_chan.cookie;
1157 1160
@@ -1165,7 +1168,7 @@ static dma_cookie_t pxp_tx_submit(struct dma_async_tx_descriptor *tx)
1165 /* Here we add the tx descriptor to our PxP task queue. */ 1168 /* Here we add the tx descriptor to our PxP task queue. */
1166 list_add_tail(&desc->list, &pxp_chan->queue); 1169 list_add_tail(&desc->list, &pxp_chan->queue);
1167 1170
1168 spin_unlock_irqrestore(&pxp_chan->lock, flags); 1171 spin_unlock(&pxp_chan->lock);
1169 1172
1170 dev_dbg(&pxp_chan->dma_chan.dev->device, "done TX\n"); 1173 dev_dbg(&pxp_chan->dma_chan.dev->device, "done TX\n");
1171 1174
@@ -1343,21 +1346,18 @@ static void pxp_issue_pending(struct dma_chan *chan)
1343 struct pxp_channel *pxp_chan = to_pxp_channel(chan); 1346 struct pxp_channel *pxp_chan = to_pxp_channel(chan);
1344 struct pxp_dma *pxp_dma = to_pxp_dma(chan->device); 1347 struct pxp_dma *pxp_dma = to_pxp_dma(chan->device);
1345 struct pxps *pxp = to_pxp(pxp_dma); 1348 struct pxps *pxp = to_pxp(pxp_dma);
1346 unsigned long flags0, flags;
1347 1349
1348 spin_lock_irqsave(&pxp->lock, flags0); 1350 spin_lock(&pxp_chan->lock);
1349 spin_lock_irqsave(&pxp_chan->lock, flags);
1350 1351
1351 if (!list_empty(&pxp_chan->queue)) { 1352 if (list_empty(&pxp_chan->queue)) {
1352 pxpdma_dequeue(pxp_chan, &head); 1353 spin_unlock(&pxp_chan->lock);
1353 pxp_chan->status = PXP_CHANNEL_READY;
1354 } else {
1355 spin_unlock_irqrestore(&pxp_chan->lock, flags);
1356 spin_unlock_irqrestore(&pxp->lock, flags0);
1357 return; 1354 return;
1358 } 1355 }
1359 spin_unlock_irqrestore(&pxp_chan->lock, flags); 1356
1360 spin_unlock_irqrestore(&pxp->lock, flags0); 1357 pxpdma_dequeue(pxp_chan, pxp);
1358 pxp_chan->status = PXP_CHANNEL_READY;
1359
1360 spin_unlock(&pxp_chan->lock);
1361 1361
1362 pxp_clk_enable(pxp); 1362 pxp_clk_enable(pxp);
1363 wake_up_interruptible(&pxp->thread_waitq); 1363 wake_up_interruptible(&pxp->thread_waitq);
@@ -1373,16 +1373,15 @@ static void __pxp_terminate_all(struct dma_chan *chan)
1373static int pxp_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 1373static int pxp_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
1374 unsigned long arg) 1374 unsigned long arg)
1375{ 1375{
1376 unsigned long flags;
1377 struct pxp_channel *pxp_chan = to_pxp_channel(chan); 1376 struct pxp_channel *pxp_chan = to_pxp_channel(chan);
1378 1377
1379 /* Only supports DMA_TERMINATE_ALL */ 1378 /* Only supports DMA_TERMINATE_ALL */
1380 if (cmd != DMA_TERMINATE_ALL) 1379 if (cmd != DMA_TERMINATE_ALL)
1381 return -ENXIO; 1380 return -ENXIO;
1382 1381
1383 spin_lock_irqsave(&pxp_chan->lock, flags); 1382 spin_lock(&pxp_chan->lock);
1384 __pxp_terminate_all(chan); 1383 __pxp_terminate_all(chan);
1385 spin_unlock_irqrestore(&pxp_chan->lock, flags); 1384 spin_unlock(&pxp_chan->lock);
1386 1385
1387 return 0; 1386 return 0;
1388} 1387}
@@ -1418,16 +1417,15 @@ err_chan:
1418 1417
1419static void pxp_free_chan_resources(struct dma_chan *chan) 1418static void pxp_free_chan_resources(struct dma_chan *chan)
1420{ 1419{
1421 unsigned long flags;
1422 struct pxp_channel *pxp_chan = to_pxp_channel(chan); 1420 struct pxp_channel *pxp_chan = to_pxp_channel(chan);
1423 1421
1424 spin_lock_irqsave(&pxp_chan->lock, flags); 1422 spin_lock(&pxp_chan->lock);
1425 1423
1426 __pxp_terminate_all(chan); 1424 __pxp_terminate_all(chan);
1427 1425
1428 pxp_chan->status = PXP_CHANNEL_FREE; 1426 pxp_chan->status = PXP_CHANNEL_FREE;
1429 1427
1430 spin_unlock_irqrestore(&pxp_chan->lock, flags); 1428 spin_unlock(&pxp_chan->lock);
1431} 1429}
1432 1430
1433static enum dma_status pxp_tx_status(struct dma_chan *chan, 1431static enum dma_status pxp_tx_status(struct dma_chan *chan,