aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorGuennadi Liakhovetski <g.liakhovetski@gmx.de>2009-12-17 11:41:39 -0500
committerDan Williams <dan.j.williams@intel.com>2009-12-17 11:41:39 -0500
commit3542a113ab2f5880f1b62e5909d754250fb57d6b (patch)
tree30359d2425678ccc84b62c055c19e943a300ecf6 /drivers
parent4297a462f455e38f08976df7b16c849614a287da (diff)
sh: fix DMA driver's descriptor chaining and cookie assignment
The SH DMA driver wrongly assigns negative cookies to transfer descriptors, also, its chaining of partial descriptors is broken. The latter problem is usually invisible, because maximum transfer size per chunk is 16M, but if you artificially set this limit lower, the driver fails. Since cookies are also used in chunk management, both these problems are fixed in one patch. As side effects a possible memory leak, when descriptors are prepared, but not submitted, and multiple races have also been fixed. Signed-off-by: Guennadi Liakhovetski <g.liakhovetski@gmx.de> Acked-by: Paul Mundt <lethal@linux-sh.org> Acked-by: Nobuhiro Iwamatsu <iwamatsu@nigauri.org> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/dma/shdma.c324
-rw-r--r--drivers/dma/shdma.h9
2 files changed, 213 insertions, 120 deletions
diff --git a/drivers/dma/shdma.c b/drivers/dma/shdma.c
index 8d9acd751ed6..9546f5f356eb 100644
--- a/drivers/dma/shdma.c
+++ b/drivers/dma/shdma.c
@@ -23,16 +23,19 @@
23#include <linux/dmaengine.h> 23#include <linux/dmaengine.h>
24#include <linux/delay.h> 24#include <linux/delay.h>
25#include <linux/dma-mapping.h> 25#include <linux/dma-mapping.h>
26#include <linux/dmapool.h>
27#include <linux/platform_device.h> 26#include <linux/platform_device.h>
28#include <cpu/dma.h> 27#include <cpu/dma.h>
29#include <asm/dma-sh.h> 28#include <asm/dma-sh.h>
30#include "shdma.h" 29#include "shdma.h"
31 30
32/* DMA descriptor control */ 31/* DMA descriptor control */
33#define DESC_LAST (-1) 32enum sh_dmae_desc_status {
34#define DESC_COMP (1) 33 DESC_IDLE,
35#define DESC_NCOMP (0) 34 DESC_PREPARED,
35 DESC_SUBMITTED,
36 DESC_COMPLETED, /* completed, have to call callback */
37 DESC_WAITING, /* callback called, waiting for ack / re-submit */
38};
36 39
37#define NR_DESCS_PER_CHANNEL 32 40#define NR_DESCS_PER_CHANNEL 32
38/* 41/*
@@ -45,6 +48,8 @@
45 */ 48 */
46#define RS_DEFAULT (RS_DUAL) 49#define RS_DEFAULT (RS_DUAL)
47 50
51static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all);
52
48#define SH_DMAC_CHAN_BASE(id) (dma_base_addr[id]) 53#define SH_DMAC_CHAN_BASE(id) (dma_base_addr[id])
49static void sh_dmae_writel(struct sh_dmae_chan *sh_dc, u32 data, u32 reg) 54static void sh_dmae_writel(struct sh_dmae_chan *sh_dc, u32 data, u32 reg)
50{ 55{
@@ -106,11 +111,11 @@ static inline unsigned int calc_xmit_shift(struct sh_dmae_chan *sh_chan)
106 return ts_shift[(chcr & CHCR_TS_MASK) >> CHCR_TS_SHIFT]; 111 return ts_shift[(chcr & CHCR_TS_MASK) >> CHCR_TS_SHIFT];
107} 112}
108 113
109static void dmae_set_reg(struct sh_dmae_chan *sh_chan, struct sh_dmae_regs hw) 114static void dmae_set_reg(struct sh_dmae_chan *sh_chan, struct sh_dmae_regs *hw)
110{ 115{
111 sh_dmae_writel(sh_chan, hw.sar, SAR); 116 sh_dmae_writel(sh_chan, hw->sar, SAR);
112 sh_dmae_writel(sh_chan, hw.dar, DAR); 117 sh_dmae_writel(sh_chan, hw->dar, DAR);
113 sh_dmae_writel(sh_chan, hw.tcr >> calc_xmit_shift(sh_chan), TCR); 118 sh_dmae_writel(sh_chan, hw->tcr >> calc_xmit_shift(sh_chan), TCR);
114} 119}
115 120
116static void dmae_start(struct sh_dmae_chan *sh_chan) 121static void dmae_start(struct sh_dmae_chan *sh_chan)
@@ -184,8 +189,9 @@ static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val)
184 189
185static dma_cookie_t sh_dmae_tx_submit(struct dma_async_tx_descriptor *tx) 190static dma_cookie_t sh_dmae_tx_submit(struct dma_async_tx_descriptor *tx)
186{ 191{
187 struct sh_desc *desc = tx_to_sh_desc(tx); 192 struct sh_desc *desc = tx_to_sh_desc(tx), *chunk, *last = desc, *c;
188 struct sh_dmae_chan *sh_chan = to_sh_chan(tx->chan); 193 struct sh_dmae_chan *sh_chan = to_sh_chan(tx->chan);
194 dma_async_tx_callback callback = tx->callback;
189 dma_cookie_t cookie; 195 dma_cookie_t cookie;
190 196
191 spin_lock_bh(&sh_chan->desc_lock); 197 spin_lock_bh(&sh_chan->desc_lock);
@@ -195,45 +201,53 @@ static dma_cookie_t sh_dmae_tx_submit(struct dma_async_tx_descriptor *tx)
195 if (cookie < 0) 201 if (cookie < 0)
196 cookie = 1; 202 cookie = 1;
197 203
198 /* If desc only in the case of 1 */ 204 sh_chan->common.cookie = cookie;
199 if (desc->async_tx.cookie != -EBUSY) 205 tx->cookie = cookie;
200 desc->async_tx.cookie = cookie; 206
201 sh_chan->common.cookie = desc->async_tx.cookie; 207 /* Mark all chunks of this descriptor as submitted, move to the queue */
208 list_for_each_entry_safe(chunk, c, desc->node.prev, node) {
209 /*
210 * All chunks are on the global ld_free, so, we have to find
211 * the end of the chain ourselves
212 */
213 if (chunk != desc && (chunk->mark == DESC_IDLE ||
214 chunk->async_tx.cookie > 0 ||
215 chunk->async_tx.cookie == -EBUSY ||
216 &chunk->node == &sh_chan->ld_free))
217 break;
218 chunk->mark = DESC_SUBMITTED;
219 /* Callback goes to the last chunk */
220 chunk->async_tx.callback = NULL;
221 chunk->cookie = cookie;
222 list_move_tail(&chunk->node, &sh_chan->ld_queue);
223 last = chunk;
224 }
225
226 last->async_tx.callback = callback;
227 last->async_tx.callback_param = tx->callback_param;
202 228
203 list_splice_init(&desc->tx_list, sh_chan->ld_queue.prev); 229 dev_dbg(sh_chan->dev, "submit #%d@%p on %d: %x[%d] -> %x\n",
230 tx->cookie, &last->async_tx, sh_chan->id,
231 desc->hw.sar, desc->hw.tcr, desc->hw.dar);
204 232
205 spin_unlock_bh(&sh_chan->desc_lock); 233 spin_unlock_bh(&sh_chan->desc_lock);
206 234
207 return cookie; 235 return cookie;
208} 236}
209 237
238/* Called with desc_lock held */
210static struct sh_desc *sh_dmae_get_desc(struct sh_dmae_chan *sh_chan) 239static struct sh_desc *sh_dmae_get_desc(struct sh_dmae_chan *sh_chan)
211{ 240{
212 struct sh_desc *desc, *_desc, *ret = NULL; 241 struct sh_desc *desc;
213 242
214 spin_lock_bh(&sh_chan->desc_lock); 243 list_for_each_entry(desc, &sh_chan->ld_free, node)
215 list_for_each_entry_safe(desc, _desc, &sh_chan->ld_free, node) { 244 if (desc->mark != DESC_PREPARED) {
216 if (async_tx_test_ack(&desc->async_tx)) { 245 BUG_ON(desc->mark != DESC_IDLE);
217 list_del(&desc->node); 246 list_del(&desc->node);
218 ret = desc; 247 return desc;
219 break;
220 } 248 }
221 }
222 spin_unlock_bh(&sh_chan->desc_lock);
223
224 return ret;
225}
226
227static void sh_dmae_put_desc(struct sh_dmae_chan *sh_chan, struct sh_desc *desc)
228{
229 if (desc) {
230 spin_lock_bh(&sh_chan->desc_lock);
231
232 list_splice_init(&desc->tx_list, &sh_chan->ld_free);
233 list_add(&desc->node, &sh_chan->ld_free);
234 249
235 spin_unlock_bh(&sh_chan->desc_lock); 250 return NULL;
236 }
237} 251}
238 252
239static int sh_dmae_alloc_chan_resources(struct dma_chan *chan) 253static int sh_dmae_alloc_chan_resources(struct dma_chan *chan)
@@ -252,11 +266,10 @@ static int sh_dmae_alloc_chan_resources(struct dma_chan *chan)
252 dma_async_tx_descriptor_init(&desc->async_tx, 266 dma_async_tx_descriptor_init(&desc->async_tx,
253 &sh_chan->common); 267 &sh_chan->common);
254 desc->async_tx.tx_submit = sh_dmae_tx_submit; 268 desc->async_tx.tx_submit = sh_dmae_tx_submit;
255 desc->async_tx.flags = DMA_CTRL_ACK; 269 desc->mark = DESC_IDLE;
256 INIT_LIST_HEAD(&desc->tx_list);
257 sh_dmae_put_desc(sh_chan, desc);
258 270
259 spin_lock_bh(&sh_chan->desc_lock); 271 spin_lock_bh(&sh_chan->desc_lock);
272 list_add(&desc->node, &sh_chan->ld_free);
260 sh_chan->descs_allocated++; 273 sh_chan->descs_allocated++;
261 } 274 }
262 spin_unlock_bh(&sh_chan->desc_lock); 275 spin_unlock_bh(&sh_chan->desc_lock);
@@ -273,7 +286,10 @@ static void sh_dmae_free_chan_resources(struct dma_chan *chan)
273 struct sh_desc *desc, *_desc; 286 struct sh_desc *desc, *_desc;
274 LIST_HEAD(list); 287 LIST_HEAD(list);
275 288
276 BUG_ON(!list_empty(&sh_chan->ld_queue)); 289 /* Prepared and not submitted descriptors can still be on the queue */
290 if (!list_empty(&sh_chan->ld_queue))
291 sh_dmae_chan_ld_cleanup(sh_chan, true);
292
277 spin_lock_bh(&sh_chan->desc_lock); 293 spin_lock_bh(&sh_chan->desc_lock);
278 294
279 list_splice_init(&sh_chan->ld_free, &list); 295 list_splice_init(&sh_chan->ld_free, &list);
@@ -292,6 +308,8 @@ static struct dma_async_tx_descriptor *sh_dmae_prep_memcpy(
292 struct sh_dmae_chan *sh_chan; 308 struct sh_dmae_chan *sh_chan;
293 struct sh_desc *first = NULL, *prev = NULL, *new; 309 struct sh_desc *first = NULL, *prev = NULL, *new;
294 size_t copy_size; 310 size_t copy_size;
311 LIST_HEAD(tx_list);
312 int chunks = (len + SH_DMA_TCR_MAX) / (SH_DMA_TCR_MAX + 1);
295 313
296 if (!chan) 314 if (!chan)
297 return NULL; 315 return NULL;
@@ -301,108 +319,189 @@ static struct dma_async_tx_descriptor *sh_dmae_prep_memcpy(
301 319
302 sh_chan = to_sh_chan(chan); 320 sh_chan = to_sh_chan(chan);
303 321
322 /* Have to lock the whole loop to protect against concurrent release */
323 spin_lock_bh(&sh_chan->desc_lock);
324
325 /*
326 * Chaining:
327 * first descriptor is what user is dealing with in all API calls, its
328 * cookie is at first set to -EBUSY, at tx-submit to a positive
329 * number
330 * if more than one chunk is needed further chunks have cookie = -EINVAL
331 * the last chunk, if not equal to the first, has cookie = -ENOSPC
332 * all chunks are linked onto the tx_list head with their .node heads
333 * only during this function, then they are immediately spliced
334 * back onto the free list in form of a chain
335 */
304 do { 336 do {
305 /* Allocate the link descriptor from DMA pool */ 337 /* Allocate the link descriptor from the free list */
306 new = sh_dmae_get_desc(sh_chan); 338 new = sh_dmae_get_desc(sh_chan);
307 if (!new) { 339 if (!new) {
308 dev_err(sh_chan->dev, 340 dev_err(sh_chan->dev,
309 "No free memory for link descriptor\n"); 341 "No free memory for link descriptor\n");
310 goto err_get_desc; 342 list_for_each_entry(new, &tx_list, node)
343 new->mark = DESC_IDLE;
344 list_splice(&tx_list, &sh_chan->ld_free);
345 spin_unlock_bh(&sh_chan->desc_lock);
346 return NULL;
311 } 347 }
312 348
313 copy_size = min(len, (size_t)SH_DMA_TCR_MAX); 349 copy_size = min(len, (size_t)SH_DMA_TCR_MAX + 1);
314 350
315 new->hw.sar = dma_src; 351 new->hw.sar = dma_src;
316 new->hw.dar = dma_dest; 352 new->hw.dar = dma_dest;
317 new->hw.tcr = copy_size; 353 new->hw.tcr = copy_size;
318 if (!first) 354 if (!first) {
355 /* First desc */
356 new->async_tx.cookie = -EBUSY;
319 first = new; 357 first = new;
358 } else {
359 /* Other desc - invisible to the user */
360 new->async_tx.cookie = -EINVAL;
361 }
320 362
321 new->mark = DESC_NCOMP; 363 dev_dbg(sh_chan->dev,
322 async_tx_ack(&new->async_tx); 364 "chaining %u of %u with %p, dst %x, cookie %d\n",
365 copy_size, len, &new->async_tx, dma_dest,
366 new->async_tx.cookie);
367
368 new->mark = DESC_PREPARED;
369 new->async_tx.flags = flags;
370 new->chunks = chunks--;
323 371
324 prev = new; 372 prev = new;
325 len -= copy_size; 373 len -= copy_size;
326 dma_src += copy_size; 374 dma_src += copy_size;
327 dma_dest += copy_size; 375 dma_dest += copy_size;
328 /* Insert the link descriptor to the LD ring */ 376 /* Insert the link descriptor to the LD ring */
329 list_add_tail(&new->node, &first->tx_list); 377 list_add_tail(&new->node, &tx_list);
330 } while (len); 378 } while (len);
331 379
332 new->async_tx.flags = flags; /* client is in control of this ack */ 380 if (new != first)
333 new->async_tx.cookie = -EBUSY; /* Last desc */ 381 new->async_tx.cookie = -ENOSPC;
334 382
335 return &first->async_tx; 383 /* Put them back on the free list, so, they don't get lost */
384 list_splice_tail(&tx_list, &sh_chan->ld_free);
336 385
337err_get_desc: 386 spin_unlock_bh(&sh_chan->desc_lock);
338 sh_dmae_put_desc(sh_chan, first);
339 return NULL;
340 387
388 return &first->async_tx;
341} 389}
342 390
343/* 391static dma_async_tx_callback __ld_cleanup(struct sh_dmae_chan *sh_chan, bool all)
344 * sh_chan_ld_cleanup - Clean up link descriptors
345 *
346 * This function clean up the ld_queue of DMA channel.
347 */
348static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan)
349{ 392{
350 struct sh_desc *desc, *_desc; 393 struct sh_desc *desc, *_desc;
394 /* Is the "exposed" head of a chain acked? */
395 bool head_acked = false;
396 dma_cookie_t cookie = 0;
397 dma_async_tx_callback callback = NULL;
398 void *param = NULL;
351 399
352 spin_lock_bh(&sh_chan->desc_lock); 400 spin_lock_bh(&sh_chan->desc_lock);
353 list_for_each_entry_safe(desc, _desc, &sh_chan->ld_queue, node) { 401 list_for_each_entry_safe(desc, _desc, &sh_chan->ld_queue, node) {
354 dma_async_tx_callback callback; 402 struct dma_async_tx_descriptor *tx = &desc->async_tx;
355 void *callback_param; 403
356 404 BUG_ON(tx->cookie > 0 && tx->cookie != desc->cookie);
357 /* non send data */ 405 BUG_ON(desc->mark != DESC_SUBMITTED &&
358 if (desc->mark == DESC_NCOMP) 406 desc->mark != DESC_COMPLETED &&
407 desc->mark != DESC_WAITING);
408
409 /*
410 * queue is ordered, and we use this loop to (1) clean up all
411 * completed descriptors, and to (2) update descriptor flags of
412 * any chunks in a (partially) completed chain
413 */
414 if (!all && desc->mark == DESC_SUBMITTED &&
415 desc->cookie != cookie)
359 break; 416 break;
360 417
361 /* send data sesc */ 418 if (tx->cookie > 0)
362 callback = desc->async_tx.callback; 419 cookie = tx->cookie;
363 callback_param = desc->async_tx.callback_param;
364 420
365 /* Remove from ld_queue list */ 421 if (desc->mark == DESC_COMPLETED && desc->chunks == 1) {
366 list_splice_init(&desc->tx_list, &sh_chan->ld_free); 422 BUG_ON(sh_chan->completed_cookie != desc->cookie - 1);
423 sh_chan->completed_cookie = desc->cookie;
424 }
367 425
368 dev_dbg(sh_chan->dev, "link descriptor %p will be recycle.\n", 426 /* Call callback on the last chunk */
369 desc); 427 if (desc->mark == DESC_COMPLETED && tx->callback) {
428 desc->mark = DESC_WAITING;
429 callback = tx->callback;
430 param = tx->callback_param;
431 dev_dbg(sh_chan->dev, "descriptor #%d@%p on %d callback\n",
432 tx->cookie, tx, sh_chan->id);
433 BUG_ON(desc->chunks != 1);
434 break;
435 }
370 436
371 list_move(&desc->node, &sh_chan->ld_free); 437 if (tx->cookie > 0 || tx->cookie == -EBUSY) {
372 /* Run the link descriptor callback function */ 438 if (desc->mark == DESC_COMPLETED) {
373 if (callback) { 439 BUG_ON(tx->cookie < 0);
374 spin_unlock_bh(&sh_chan->desc_lock); 440 desc->mark = DESC_WAITING;
375 dev_dbg(sh_chan->dev, "link descriptor %p callback\n", 441 }
376 desc); 442 head_acked = async_tx_test_ack(tx);
377 callback(callback_param); 443 } else {
378 spin_lock_bh(&sh_chan->desc_lock); 444 switch (desc->mark) {
445 case DESC_COMPLETED:
446 desc->mark = DESC_WAITING;
447 /* Fall through */
448 case DESC_WAITING:
449 if (head_acked)
450 async_tx_ack(&desc->async_tx);
451 }
452 }
453
454 dev_dbg(sh_chan->dev, "descriptor %p #%d completed.\n",
455 tx, tx->cookie);
456
457 if (((desc->mark == DESC_COMPLETED ||
458 desc->mark == DESC_WAITING) &&
459 async_tx_test_ack(&desc->async_tx)) || all) {
460 /* Remove from ld_queue list */
461 desc->mark = DESC_IDLE;
462 list_move(&desc->node, &sh_chan->ld_free);
379 } 463 }
380 } 464 }
381 spin_unlock_bh(&sh_chan->desc_lock); 465 spin_unlock_bh(&sh_chan->desc_lock);
466
467 if (callback)
468 callback(param);
469
470 return callback;
471}
472
473/*
474 * sh_chan_ld_cleanup - Clean up link descriptors
475 *
476 * This function cleans up the ld_queue of DMA channel.
477 */
478static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all)
479{
480 while (__ld_cleanup(sh_chan, all))
481 ;
382} 482}
383 483
384static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan) 484static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan)
385{ 485{
386 struct list_head *ld_node; 486 struct sh_desc *sd;
387 struct sh_dmae_regs hw;
388 487
488 spin_lock_bh(&sh_chan->desc_lock);
389 /* DMA work check */ 489 /* DMA work check */
390 if (dmae_is_busy(sh_chan)) 490 if (dmae_is_busy(sh_chan)) {
491 spin_unlock_bh(&sh_chan->desc_lock);
391 return; 492 return;
493 }
392 494
393 /* Find the first un-transfer desciptor */ 495 /* Find the first un-transfer desciptor */
394 for (ld_node = sh_chan->ld_queue.next; 496 list_for_each_entry(sd, &sh_chan->ld_queue, node)
395 (ld_node != &sh_chan->ld_queue) 497 if (sd->mark == DESC_SUBMITTED) {
396 && (to_sh_desc(ld_node)->mark == DESC_COMP); 498 /* Get the ld start address from ld_queue */
397 ld_node = ld_node->next) 499 dmae_set_reg(sh_chan, &sd->hw);
398 cpu_relax(); 500 dmae_start(sh_chan);
399 501 break;
400 if (ld_node != &sh_chan->ld_queue) { 502 }
401 /* Get the ld start address from ld_queue */ 503
402 hw = to_sh_desc(ld_node)->hw; 504 spin_unlock_bh(&sh_chan->desc_lock);
403 dmae_set_reg(sh_chan, hw);
404 dmae_start(sh_chan);
405 }
406} 505}
407 506
408static void sh_dmae_memcpy_issue_pending(struct dma_chan *chan) 507static void sh_dmae_memcpy_issue_pending(struct dma_chan *chan)
@@ -420,12 +519,11 @@ static enum dma_status sh_dmae_is_complete(struct dma_chan *chan,
420 dma_cookie_t last_used; 519 dma_cookie_t last_used;
421 dma_cookie_t last_complete; 520 dma_cookie_t last_complete;
422 521
423 sh_dmae_chan_ld_cleanup(sh_chan); 522 sh_dmae_chan_ld_cleanup(sh_chan, false);
424 523
425 last_used = chan->cookie; 524 last_used = chan->cookie;
426 last_complete = sh_chan->completed_cookie; 525 last_complete = sh_chan->completed_cookie;
427 if (last_complete == -EBUSY) 526 BUG_ON(last_complete < 0);
428 last_complete = last_used;
429 527
430 if (done) 528 if (done)
431 *done = last_complete; 529 *done = last_complete;
@@ -480,11 +578,13 @@ static irqreturn_t sh_dmae_err(int irq, void *data)
480 err = sh_dmae_rst(0); 578 err = sh_dmae_rst(0);
481 if (err) 579 if (err)
482 return err; 580 return err;
581#ifdef SH_DMAC_BASE1
483 if (shdev->pdata.mode & SHDMA_DMAOR1) { 582 if (shdev->pdata.mode & SHDMA_DMAOR1) {
484 err = sh_dmae_rst(1); 583 err = sh_dmae_rst(1);
485 if (err) 584 if (err)
486 return err; 585 return err;
487 } 586 }
587#endif
488 disable_irq(irq); 588 disable_irq(irq);
489 return IRQ_HANDLED; 589 return IRQ_HANDLED;
490 } 590 }
@@ -494,35 +594,25 @@ static irqreturn_t sh_dmae_err(int irq, void *data)
494static void dmae_do_tasklet(unsigned long data) 594static void dmae_do_tasklet(unsigned long data)
495{ 595{
496 struct sh_dmae_chan *sh_chan = (struct sh_dmae_chan *)data; 596 struct sh_dmae_chan *sh_chan = (struct sh_dmae_chan *)data;
497 struct sh_desc *desc, *_desc, *cur_desc = NULL; 597 struct sh_desc *desc;
498 u32 sar_buf = sh_dmae_readl(sh_chan, SAR); 598 u32 sar_buf = sh_dmae_readl(sh_chan, SAR);
499 599
500 list_for_each_entry_safe(desc, _desc, 600 spin_lock(&sh_chan->desc_lock);
501 &sh_chan->ld_queue, node) { 601 list_for_each_entry(desc, &sh_chan->ld_queue, node) {
502 if ((desc->hw.sar + desc->hw.tcr) == sar_buf) { 602 if ((desc->hw.sar + desc->hw.tcr) == sar_buf &&
503 cur_desc = desc; 603 desc->mark == DESC_SUBMITTED) {
604 dev_dbg(sh_chan->dev, "done #%d@%p dst %u\n",
605 desc->async_tx.cookie, &desc->async_tx,
606 desc->hw.dar);
607 desc->mark = DESC_COMPLETED;
504 break; 608 break;
505 } 609 }
506 } 610 }
611 spin_unlock(&sh_chan->desc_lock);
507 612
508 if (cur_desc) {
509 switch (cur_desc->async_tx.cookie) {
510 case 0: /* other desc data */
511 break;
512 case -EBUSY: /* last desc */
513 sh_chan->completed_cookie =
514 cur_desc->async_tx.cookie;
515 break;
516 default: /* first desc ( 0 < )*/
517 sh_chan->completed_cookie =
518 cur_desc->async_tx.cookie - 1;
519 break;
520 }
521 cur_desc->mark = DESC_COMP;
522 }
523 /* Next desc */ 613 /* Next desc */
524 sh_chan_xfer_ld_queue(sh_chan); 614 sh_chan_xfer_ld_queue(sh_chan);
525 sh_dmae_chan_ld_cleanup(sh_chan); 615 sh_dmae_chan_ld_cleanup(sh_chan, false);
526} 616}
527 617
528static unsigned int get_dmae_irq(unsigned int id) 618static unsigned int get_dmae_irq(unsigned int id)
diff --git a/drivers/dma/shdma.h b/drivers/dma/shdma.h
index 60b81e529b42..108f1cffb6f5 100644
--- a/drivers/dma/shdma.h
+++ b/drivers/dma/shdma.h
@@ -13,9 +13,9 @@
13#ifndef __DMA_SHDMA_H 13#ifndef __DMA_SHDMA_H
14#define __DMA_SHDMA_H 14#define __DMA_SHDMA_H
15 15
16#include <linux/device.h>
17#include <linux/dmapool.h>
18#include <linux/dmaengine.h> 16#include <linux/dmaengine.h>
17#include <linux/interrupt.h>
18#include <linux/list.h>
19 19
20#define SH_DMA_TCR_MAX 0x00FFFFFF /* 16MB */ 20#define SH_DMA_TCR_MAX 0x00FFFFFF /* 16MB */
21 21
@@ -26,13 +26,16 @@ struct sh_dmae_regs {
26}; 26};
27 27
28struct sh_desc { 28struct sh_desc {
29 struct list_head tx_list;
30 struct sh_dmae_regs hw; 29 struct sh_dmae_regs hw;
31 struct list_head node; 30 struct list_head node;
32 struct dma_async_tx_descriptor async_tx; 31 struct dma_async_tx_descriptor async_tx;
32 dma_cookie_t cookie;
33 int chunks;
33 int mark; 34 int mark;
34}; 35};
35 36
37struct device;
38
36struct sh_dmae_chan { 39struct sh_dmae_chan {
37 dma_cookie_t completed_cookie; /* The maximum cookie completed */ 40 dma_cookie_t completed_cookie; /* The maximum cookie completed */
38 spinlock_t desc_lock; /* Descriptor operation lock */ 41 spinlock_t desc_lock; /* Descriptor operation lock */