diff options
author | Russell King - ARM Linux <linux@arm.linux.org.uk> | 2011-01-03 17:44:57 -0500 |
---|---|---|
committer | Dan Williams <dan.j.williams@intel.com> | 2011-01-04 22:16:14 -0500 |
commit | 501e67e82dee68d0a594ec0549f3d6a2943c91f5 (patch) | |
tree | c211a6a59ab79a8c83d432e2053f64586d5b159a /drivers/dma | |
parent | 15c17232fbd1f7687c740c3c26f9e7f337bd9e36 (diff) |
ARM: PL08x: put txd's on the pending list in pl08x_tx_submit()
Don't place TXDs on the pending list when they're prepared - place
them on the list when they're ready to be submitted. Also, only
place memcpy requests in the wait state when they're submitted and
don't have a physical channel associated.
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Acked-by: Linus Walleij <linus.walleij@stericsson.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'drivers/dma')
-rw-r--r-- | drivers/dma/amba-pl08x.c | 46 |
1 files changed, 32 insertions, 14 deletions
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c index 433b9e747f75..650e2bbc7aad 100644 --- a/drivers/dma/amba-pl08x.c +++ b/drivers/dma/amba-pl08x.c | |||
@@ -173,6 +173,11 @@ static inline struct pl08x_dma_chan *to_pl08x_chan(struct dma_chan *chan) | |||
173 | return container_of(chan, struct pl08x_dma_chan, chan); | 173 | return container_of(chan, struct pl08x_dma_chan, chan); |
174 | } | 174 | } |
175 | 175 | ||
176 | static inline struct pl08x_txd *to_pl08x_txd(struct dma_async_tx_descriptor *tx) | ||
177 | { | ||
178 | return container_of(tx, struct pl08x_txd, tx); | ||
179 | } | ||
180 | |||
176 | /* | 181 | /* |
177 | * Physical channel handling | 182 | * Physical channel handling |
178 | */ | 183 | */ |
@@ -974,11 +979,27 @@ static void release_phy_channel(struct pl08x_dma_chan *plchan) | |||
974 | static dma_cookie_t pl08x_tx_submit(struct dma_async_tx_descriptor *tx) | 979 | static dma_cookie_t pl08x_tx_submit(struct dma_async_tx_descriptor *tx) |
975 | { | 980 | { |
976 | struct pl08x_dma_chan *plchan = to_pl08x_chan(tx->chan); | 981 | struct pl08x_dma_chan *plchan = to_pl08x_chan(tx->chan); |
982 | struct pl08x_txd *txd = to_pl08x_txd(tx); | ||
977 | 983 | ||
978 | plchan->chan.cookie += 1; | 984 | plchan->chan.cookie += 1; |
979 | if (plchan->chan.cookie < 0) | 985 | if (plchan->chan.cookie < 0) |
980 | plchan->chan.cookie = 1; | 986 | plchan->chan.cookie = 1; |
981 | tx->cookie = plchan->chan.cookie; | 987 | tx->cookie = plchan->chan.cookie; |
988 | |||
989 | /* Put this onto the pending list */ | ||
990 | list_add_tail(&txd->node, &plchan->pend_list); | ||
991 | |||
992 | /* | ||
993 | * If there was no physical channel available for this memcpy, | ||
994 | * stack the request up and indicate that the channel is waiting | ||
995 | * for a free physical channel. | ||
996 | */ | ||
997 | if (!plchan->slave && !plchan->phychan) { | ||
998 | /* Do this memcpy whenever there is a channel ready */ | ||
999 | plchan->state = PL08X_CHAN_WAITING; | ||
1000 | plchan->waiting = txd; | ||
1001 | } | ||
1002 | |||
982 | /* This unlock follows the lock in the prep() function */ | 1003 | /* This unlock follows the lock in the prep() function */ |
983 | spin_unlock_irqrestore(&plchan->lock, plchan->lockflags); | 1004 | spin_unlock_irqrestore(&plchan->lock, plchan->lockflags); |
984 | 1005 | ||
@@ -1213,8 +1234,6 @@ static int pl08x_prep_channel_resources(struct pl08x_dma_chan *plchan, | |||
1213 | 1234 | ||
1214 | spin_lock_irqsave(&plchan->lock, plchan->lockflags); | 1235 | spin_lock_irqsave(&plchan->lock, plchan->lockflags); |
1215 | 1236 | ||
1216 | list_add_tail(&txd->node, &plchan->pend_list); | ||
1217 | |||
1218 | /* | 1237 | /* |
1219 | * See if we already have a physical channel allocated, | 1238 | * See if we already have a physical channel allocated, |
1220 | * else this is the time to try to get one. | 1239 | * else this is the time to try to get one. |
@@ -1222,24 +1241,23 @@ static int pl08x_prep_channel_resources(struct pl08x_dma_chan *plchan, | |||
1222 | ret = prep_phy_channel(plchan, txd); | 1241 | ret = prep_phy_channel(plchan, txd); |
1223 | if (ret) { | 1242 | if (ret) { |
1224 | /* | 1243 | /* |
1225 | * No physical channel available, we will | 1244 | * No physical channel was available. |
1226 | * stack up the memcpy channels until there is a channel | 1245 | * |
1227 | * available to handle it whereas slave transfers may | 1246 | * memcpy transfers can be sorted out at submission time. |
1228 | * have been denied due to platform channel muxing restrictions | 1247 | * |
1229 | * and since there is no guarantee that this will ever be | 1248 | * Slave transfers may have been denied due to platform |
1230 | * resolved, and since the signal must be acquired AFTER | 1249 | * channel muxing restrictions. Since there is no guarantee |
1231 | * acquiring the physical channel, we will let them be NACK:ed | 1250 | * that this will ever be resolved, and the signal must be |
1232 | * with -EBUSY here. The drivers can alway retry the prep() | 1251 | * acquired AFTER acquiring the physical channel, we will let |
1233 | * call if they are eager on doing this using DMA. | 1252 | * them be NACK:ed with -EBUSY here. The drivers can retry |
1253 | * the prep() call if they are eager on doing this using DMA. | ||
1234 | */ | 1254 | */ |
1235 | if (plchan->slave) { | 1255 | if (plchan->slave) { |
1236 | pl08x_free_txd_list(pl08x, plchan); | 1256 | pl08x_free_txd_list(pl08x, plchan); |
1257 | pl08x_free_txd(pl08x, txd); | ||
1237 | spin_unlock_irqrestore(&plchan->lock, plchan->lockflags); | 1258 | spin_unlock_irqrestore(&plchan->lock, plchan->lockflags); |
1238 | return -EBUSY; | 1259 | return -EBUSY; |
1239 | } | 1260 | } |
1240 | /* Do this memcpy whenever there is a channel ready */ | ||
1241 | plchan->state = PL08X_CHAN_WAITING; | ||
1242 | plchan->waiting = txd; | ||
1243 | } else | 1261 | } else |
1244 | /* | 1262 | /* |
1245 | * Else we're all set, paused and ready to roll, | 1263 | * Else we're all set, paused and ready to roll, |