aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma/ste_dma40.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/dma/ste_dma40.c')
-rw-r--r--drivers/dma/ste_dma40.c41
1 files changed, 13 insertions, 28 deletions
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index cc5ecbc067a3..bdd41d4bfa8d 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -21,6 +21,7 @@
21 21
22#include <plat/ste_dma40.h> 22#include <plat/ste_dma40.h>
23 23
24#include "dmaengine.h"
24#include "ste_dma40_ll.h" 25#include "ste_dma40_ll.h"
25 26
26#define D40_NAME "dma40" 27#define D40_NAME "dma40"
@@ -220,8 +221,6 @@ struct d40_base;
220 * 221 *
221 * @lock: A spinlock to protect this struct. 222 * @lock: A spinlock to protect this struct.
222 * @log_num: The logical number, if any of this channel. 223 * @log_num: The logical number, if any of this channel.
223 * @completed: Starts with 1, after first interrupt it is set to dma engine's
224 * current cookie.
225 * @pending_tx: The number of pending transfers. Used between interrupt handler 224 * @pending_tx: The number of pending transfers. Used between interrupt handler
226 * and tasklet. 225 * and tasklet.
227 * @busy: Set to true when transfer is ongoing on this channel. 226 * @busy: Set to true when transfer is ongoing on this channel.
@@ -250,8 +249,6 @@ struct d40_base;
250struct d40_chan { 249struct d40_chan {
251 spinlock_t lock; 250 spinlock_t lock;
252 int log_num; 251 int log_num;
253 /* ID of the most recent completed transfer */
254 int completed;
255 int pending_tx; 252 int pending_tx;
256 bool busy; 253 bool busy;
257 struct d40_phy_res *phy_chan; 254 struct d40_phy_res *phy_chan;
@@ -1223,21 +1220,14 @@ static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx)
1223 chan); 1220 chan);
1224 struct d40_desc *d40d = container_of(tx, struct d40_desc, txd); 1221 struct d40_desc *d40d = container_of(tx, struct d40_desc, txd);
1225 unsigned long flags; 1222 unsigned long flags;
1223 dma_cookie_t cookie;
1226 1224
1227 spin_lock_irqsave(&d40c->lock, flags); 1225 spin_lock_irqsave(&d40c->lock, flags);
1228 1226 cookie = dma_cookie_assign(tx);
1229 d40c->chan.cookie++;
1230
1231 if (d40c->chan.cookie < 0)
1232 d40c->chan.cookie = 1;
1233
1234 d40d->txd.cookie = d40c->chan.cookie;
1235
1236 d40_desc_queue(d40c, d40d); 1227 d40_desc_queue(d40c, d40d);
1237
1238 spin_unlock_irqrestore(&d40c->lock, flags); 1228 spin_unlock_irqrestore(&d40c->lock, flags);
1239 1229
1240 return tx->cookie; 1230 return cookie;
1241} 1231}
1242 1232
1243static int d40_start(struct d40_chan *d40c) 1233static int d40_start(struct d40_chan *d40c)
@@ -1357,7 +1347,7 @@ static void dma_tasklet(unsigned long data)
1357 goto err; 1347 goto err;
1358 1348
1359 if (!d40d->cyclic) 1349 if (!d40d->cyclic)
1360 d40c->completed = d40d->txd.cookie; 1350 dma_cookie_complete(&d40d->txd);
1361 1351
1362 /* 1352 /*
1363 * If terminating a channel pending_tx is set to zero. 1353 * If terminating a channel pending_tx is set to zero.
@@ -2182,7 +2172,7 @@ static int d40_alloc_chan_resources(struct dma_chan *chan)
2182 bool is_free_phy; 2172 bool is_free_phy;
2183 spin_lock_irqsave(&d40c->lock, flags); 2173 spin_lock_irqsave(&d40c->lock, flags);
2184 2174
2185 d40c->completed = chan->cookie = 1; 2175 dma_cookie_init(chan);
2186 2176
2187 /* If no dma configuration is set use default configuration (memcpy) */ 2177 /* If no dma configuration is set use default configuration (memcpy) */
2188 if (!d40c->configured) { 2178 if (!d40c->configured) {
@@ -2299,7 +2289,8 @@ static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan,
2299 struct scatterlist *sgl, 2289 struct scatterlist *sgl,
2300 unsigned int sg_len, 2290 unsigned int sg_len,
2301 enum dma_transfer_direction direction, 2291 enum dma_transfer_direction direction,
2302 unsigned long dma_flags) 2292 unsigned long dma_flags,
2293 void *context)
2303{ 2294{
2304 if (direction != DMA_DEV_TO_MEM && direction != DMA_MEM_TO_DEV) 2295 if (direction != DMA_DEV_TO_MEM && direction != DMA_MEM_TO_DEV)
2305 return NULL; 2296 return NULL;
@@ -2310,7 +2301,7 @@ static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan,
2310static struct dma_async_tx_descriptor * 2301static struct dma_async_tx_descriptor *
2311dma40_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr, 2302dma40_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr,
2312 size_t buf_len, size_t period_len, 2303 size_t buf_len, size_t period_len,
2313 enum dma_transfer_direction direction) 2304 enum dma_transfer_direction direction, void *context)
2314{ 2305{
2315 unsigned int periods = buf_len / period_len; 2306 unsigned int periods = buf_len / period_len;
2316 struct dma_async_tx_descriptor *txd; 2307 struct dma_async_tx_descriptor *txd;
@@ -2342,25 +2333,19 @@ static enum dma_status d40_tx_status(struct dma_chan *chan,
2342 struct dma_tx_state *txstate) 2333 struct dma_tx_state *txstate)
2343{ 2334{
2344 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); 2335 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2345 dma_cookie_t last_used; 2336 enum dma_status ret;
2346 dma_cookie_t last_complete;
2347 int ret;
2348 2337
2349 if (d40c->phy_chan == NULL) { 2338 if (d40c->phy_chan == NULL) {
2350 chan_err(d40c, "Cannot read status of unallocated channel\n"); 2339 chan_err(d40c, "Cannot read status of unallocated channel\n");
2351 return -EINVAL; 2340 return -EINVAL;
2352 } 2341 }
2353 2342
2354 last_complete = d40c->completed; 2343 ret = dma_cookie_status(chan, cookie, txstate);
2355 last_used = chan->cookie; 2344 if (ret != DMA_SUCCESS)
2345 dma_set_residue(txstate, stedma40_residue(chan));
2356 2346
2357 if (d40_is_paused(d40c)) 2347 if (d40_is_paused(d40c))
2358 ret = DMA_PAUSED; 2348 ret = DMA_PAUSED;
2359 else
2360 ret = dma_async_is_complete(cookie, last_complete, last_used);
2361
2362 dma_set_tx_state(txstate, last_complete, last_used,
2363 stedma40_residue(chan));
2364 2349
2365 return ret; 2350 return ret;
2366} 2351}