aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma/amba-pl08x.c
diff options
context:
space:
mode:
authorRussell King - ARM Linux <linux@arm.linux.org.uk>2011-01-03 17:45:37 -0500
committerDan Williams <dan.j.williams@intel.com>2011-01-04 22:16:14 -0500
commitc370e594efe2993620d24d41a78f325102e99d1c (patch)
treedfbb5c904bf6ca85c9aba1eec17070fe05c12ca4 /drivers/dma/amba-pl08x.c
parent8087aacda040bdbf84940712d132ce80c30b9d5d (diff)
ARM: PL08x: fix locking between prepare function and submit function
The PL08x driver holds on to the channel lock with interrupts disabled between the prepare and the subsequent submit API functions. This means that the locking state when the prepare function returns is dependent on whether it suceeeds or not. It did this to ensure that the physical channel wasn't released, and as it used to add the descriptor onto the pending list at prepare time rather than submit time. Now that we have reorganized the code to remove those reasons, we can now safely release the spinlock at the end of preparation and reacquire it in our submit function. Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk> Acked-by: Linus Walleij <linus.walleij@stericsson.com> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'drivers/dma/amba-pl08x.c')
-rw-r--r--drivers/dma/amba-pl08x.c28
1 files changed, 9 insertions, 19 deletions
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index bf6f7d02c9f6..1c9f712520d6 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -981,6 +981,9 @@ static dma_cookie_t pl08x_tx_submit(struct dma_async_tx_descriptor *tx)
981{ 981{
982 struct pl08x_dma_chan *plchan = to_pl08x_chan(tx->chan); 982 struct pl08x_dma_chan *plchan = to_pl08x_chan(tx->chan);
983 struct pl08x_txd *txd = to_pl08x_txd(tx); 983 struct pl08x_txd *txd = to_pl08x_txd(tx);
984 unsigned long flags;
985
986 spin_lock_irqsave(&plchan->lock, flags);
984 987
985 plchan->chan.cookie += 1; 988 plchan->chan.cookie += 1;
986 if (plchan->chan.cookie < 0) 989 if (plchan->chan.cookie < 0)
@@ -1003,8 +1006,7 @@ static dma_cookie_t pl08x_tx_submit(struct dma_async_tx_descriptor *tx)
1003 plchan->phychan_hold--; 1006 plchan->phychan_hold--;
1004 } 1007 }
1005 1008
1006 /* This unlock follows the lock in the prep() function */ 1009 spin_unlock_irqrestore(&plchan->lock, flags);
1007 spin_unlock_irqrestore(&plchan->lock, plchan->lockflags);
1008 1010
1009 return tx->cookie; 1011 return tx->cookie;
1010} 1012}
@@ -1225,9 +1227,9 @@ static void pl08x_issue_pending(struct dma_chan *chan)
1225static int pl08x_prep_channel_resources(struct pl08x_dma_chan *plchan, 1227static int pl08x_prep_channel_resources(struct pl08x_dma_chan *plchan,
1226 struct pl08x_txd *txd) 1228 struct pl08x_txd *txd)
1227{ 1229{
1228 int num_llis;
1229 struct pl08x_driver_data *pl08x = plchan->host; 1230 struct pl08x_driver_data *pl08x = plchan->host;
1230 int ret; 1231 unsigned long flags;
1232 int num_llis, ret;
1231 1233
1232 num_llis = pl08x_fill_llis_for_desc(pl08x, txd); 1234 num_llis = pl08x_fill_llis_for_desc(pl08x, txd);
1233 if (!num_llis) { 1235 if (!num_llis) {
@@ -1235,7 +1237,7 @@ static int pl08x_prep_channel_resources(struct pl08x_dma_chan *plchan,
1235 return -EINVAL; 1237 return -EINVAL;
1236 } 1238 }
1237 1239
1238 spin_lock_irqsave(&plchan->lock, plchan->lockflags); 1240 spin_lock_irqsave(&plchan->lock, flags);
1239 1241
1240 /* 1242 /*
1241 * See if we already have a physical channel allocated, 1243 * See if we already have a physical channel allocated,
@@ -1258,7 +1260,7 @@ static int pl08x_prep_channel_resources(struct pl08x_dma_chan *plchan,
1258 if (plchan->slave) { 1260 if (plchan->slave) {
1259 pl08x_free_txd_list(pl08x, plchan); 1261 pl08x_free_txd_list(pl08x, plchan);
1260 pl08x_free_txd(pl08x, txd); 1262 pl08x_free_txd(pl08x, txd);
1261 spin_unlock_irqrestore(&plchan->lock, plchan->lockflags); 1263 spin_unlock_irqrestore(&plchan->lock, flags);
1262 return -EBUSY; 1264 return -EBUSY;
1263 } 1265 }
1264 } else 1266 } else
@@ -1272,11 +1274,7 @@ static int pl08x_prep_channel_resources(struct pl08x_dma_chan *plchan,
1272 if (plchan->state == PL08X_CHAN_IDLE) 1274 if (plchan->state == PL08X_CHAN_IDLE)
1273 plchan->state = PL08X_CHAN_PAUSED; 1275 plchan->state = PL08X_CHAN_PAUSED;
1274 1276
1275 /* 1277 spin_unlock_irqrestore(&plchan->lock, flags);
1276 * Notice that we leave plchan->lock locked on purpose:
1277 * it will be unlocked in the subsequent tx_submit()
1278 * call. This is a consequence of the current API.
1279 */
1280 1278
1281 return 0; 1279 return 0;
1282} 1280}
@@ -1355,10 +1353,6 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy(
1355 ret = pl08x_prep_channel_resources(plchan, txd); 1353 ret = pl08x_prep_channel_resources(plchan, txd);
1356 if (ret) 1354 if (ret)
1357 return NULL; 1355 return NULL;
1358 /*
1359 * NB: the channel lock is held at this point so tx_submit()
1360 * must be called in direct succession.
1361 */
1362 1356
1363 return &txd->tx; 1357 return &txd->tx;
1364} 1358}
@@ -1444,10 +1438,6 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
1444 ret = pl08x_prep_channel_resources(plchan, txd); 1438 ret = pl08x_prep_channel_resources(plchan, txd);
1445 if (ret) 1439 if (ret)
1446 return NULL; 1440 return NULL;
1447 /*
1448 * NB: the channel lock is held at this point so tx_submit()
1449 * must be called in direct succession.
1450 */
1451 1441
1452 return &txd->tx; 1442 return &txd->tx;
1453} 1443}