aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorFancy Fang <B47543@freescale.com>2013-12-11 05:21:10 -0500
committerNitin Garg <nitin.garg@freescale.com>2014-04-16 09:47:28 -0400
commitf33d31279349cc94aa419f21b5a56f8b45882f12 (patch)
treea3ca8761b811c066604c8e4a7b4f8028092287b4
parent1747c87425716457223ff12a1df493bb73374206 (diff)
ENGR00291729 PXP: remove a mutex lock from pxp channel
This mutex lock is no longer necessary in PXP dma driver. After the commit "ENGR00291400 PXP: Organize PXP task queue to be FIFO", protection fields can be protected by the spin lock in PXP channel now. Signed-off-by: Fancy Fang <B47543@freescale.com>
-rw-r--r--drivers/dma/pxp/pxp_dma_v2.c18
-rw-r--r--include/linux/pxp_dma.h5
2 files changed, 11 insertions, 12 deletions
diff --git a/drivers/dma/pxp/pxp_dma_v2.c b/drivers/dma/pxp/pxp_dma_v2.c
index 55f0a5a6adf9..18d8279425da 100644
--- a/drivers/dma/pxp/pxp_dma_v2.c
+++ b/drivers/dma/pxp/pxp_dma_v2.c
@@ -1155,7 +1155,8 @@ static dma_cookie_t pxp_tx_submit(struct dma_async_tx_descriptor *tx)
1155 1155
1156 dev_dbg(&pxp_chan->dma_chan.dev->device, "received TX\n"); 1156 dev_dbg(&pxp_chan->dma_chan.dev->device, "received TX\n");
1157 1157
1158 mutex_lock(&pxp_chan->chan_mutex); 1158 /* pxp_chan->lock can be taken under ichan->lock, but not v.v. */
1159 spin_lock_irqsave(&pxp_chan->lock, flags);
1159 1160
1160 cookie = pxp_chan->dma_chan.cookie; 1161 cookie = pxp_chan->dma_chan.cookie;
1161 1162
@@ -1166,9 +1167,6 @@ static dma_cookie_t pxp_tx_submit(struct dma_async_tx_descriptor *tx)
1166 pxp_chan->dma_chan.cookie = cookie; 1167 pxp_chan->dma_chan.cookie = cookie;
1167 tx->cookie = cookie; 1168 tx->cookie = cookie;
1168 1169
1169 /* pxp_chan->lock can be taken under ichan->lock, but not v.v. */
1170 spin_lock_irqsave(&pxp_chan->lock, flags);
1171
1172 /* Here we add the tx descriptor to our PxP task queue. */ 1170 /* Here we add the tx descriptor to our PxP task queue. */
1173 list_add_tail(&desc->list, &pxp_chan->queue); 1171 list_add_tail(&desc->list, &pxp_chan->queue);
1174 1172
@@ -1176,7 +1174,6 @@ static dma_cookie_t pxp_tx_submit(struct dma_async_tx_descriptor *tx)
1176 1174
1177 dev_dbg(&pxp_chan->dma_chan.dev->device, "done TX\n"); 1175 dev_dbg(&pxp_chan->dma_chan.dev->device, "done TX\n");
1178 1176
1179 mutex_unlock(&pxp_chan->chan_mutex);
1180 return cookie; 1177 return cookie;
1181} 1178}
1182 1179
@@ -1385,15 +1382,16 @@ static void __pxp_terminate_all(struct dma_chan *chan)
1385static int pxp_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 1382static int pxp_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
1386 unsigned long arg) 1383 unsigned long arg)
1387{ 1384{
1385 unsigned long flags;
1388 struct pxp_channel *pxp_chan = to_pxp_channel(chan); 1386 struct pxp_channel *pxp_chan = to_pxp_channel(chan);
1389 1387
1390 /* Only supports DMA_TERMINATE_ALL */ 1388 /* Only supports DMA_TERMINATE_ALL */
1391 if (cmd != DMA_TERMINATE_ALL) 1389 if (cmd != DMA_TERMINATE_ALL)
1392 return -ENXIO; 1390 return -ENXIO;
1393 1391
1394 mutex_lock(&pxp_chan->chan_mutex); 1392 spin_lock_irqsave(&pxp_chan->lock, flags);
1395 __pxp_terminate_all(chan); 1393 __pxp_terminate_all(chan);
1396 mutex_unlock(&pxp_chan->chan_mutex); 1394 spin_unlock_irqrestore(&pxp_chan->lock, flags);
1397 1395
1398 return 0; 1396 return 0;
1399} 1397}
@@ -1429,15 +1427,16 @@ err_chan:
1429 1427
1430static void pxp_free_chan_resources(struct dma_chan *chan) 1428static void pxp_free_chan_resources(struct dma_chan *chan)
1431{ 1429{
1430 unsigned long flags;
1432 struct pxp_channel *pxp_chan = to_pxp_channel(chan); 1431 struct pxp_channel *pxp_chan = to_pxp_channel(chan);
1433 1432
1434 mutex_lock(&pxp_chan->chan_mutex); 1433 spin_lock_irqsave(&pxp_chan->lock, flags);
1435 1434
1436 __pxp_terminate_all(chan); 1435 __pxp_terminate_all(chan);
1437 1436
1438 pxp_chan->status = PXP_CHANNEL_FREE; 1437 pxp_chan->status = PXP_CHANNEL_FREE;
1439 1438
1440 mutex_unlock(&pxp_chan->chan_mutex); 1439 spin_unlock_irqrestore(&pxp_chan->lock, flags);
1441} 1440}
1442 1441
1443static enum dma_status pxp_tx_status(struct dma_chan *chan, 1442static enum dma_status pxp_tx_status(struct dma_chan *chan,
@@ -1595,7 +1594,6 @@ static int pxp_dma_init(struct pxps *pxp)
1595 struct dma_chan *dma_chan = &pxp_chan->dma_chan; 1594 struct dma_chan *dma_chan = &pxp_chan->dma_chan;
1596 1595
1597 spin_lock_init(&pxp_chan->lock); 1596 spin_lock_init(&pxp_chan->lock);
1598 mutex_init(&pxp_chan->chan_mutex);
1599 1597
1600 /* Only one EOF IRQ for PxP, shared by all channels */ 1598 /* Only one EOF IRQ for PxP, shared by all channels */
1601 pxp_chan->eof_irq = pxp->irq; 1599 pxp_chan->eof_irq = pxp->irq;
diff --git a/include/linux/pxp_dma.h b/include/linux/pxp_dma.h
index 61b3e7de4c57..613d89f143f9 100644
--- a/include/linux/pxp_dma.h
+++ b/include/linux/pxp_dma.h
@@ -47,8 +47,9 @@ struct pxp_channel {
47 struct pxp_tx_desc *desc; /* allocated tx-descriptors */ 47 struct pxp_tx_desc *desc; /* allocated tx-descriptors */
48 struct list_head queue; /* queued tx-descriptors */ 48 struct list_head queue; /* queued tx-descriptors */
49 struct list_head list; /* track queued channel number */ 49 struct list_head list; /* track queued channel number */
50 spinlock_t lock; /* protects sg[0,1], queue */ 50 spinlock_t lock; /* protects sg[0,1], queue,
51 struct mutex chan_mutex; /* protects status, cookie, free_list */ 51 * status, cookie, free_list
52 */
52 int active_buffer; 53 int active_buffer;
53 unsigned int eof_irq; 54 unsigned int eof_irq;
54 char eof_name[16]; /* EOF IRQ name for request_irq() */ 55 char eof_name[16]; /* EOF IRQ name for request_irq() */