aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorDan Williams <dan.j.williams@intel.com>2009-09-08 20:53:02 -0400
committerDan Williams <dan.j.williams@intel.com>2009-09-08 20:53:02 -0400
commiteda34234578fd822c950fd06b5c5ff7ac08b3001 (patch)
tree860b3c9d347ddd57e6884f9f1e019370de4d45b1 /drivers
parente0bd0f8cb09cf3ccac1425f0f3a6705106c4d65c (diff)
fsldma: implement a private tx_list
Drop fsldma's use of tx_list from struct dma_async_tx_descriptor in preparation for removal of this field. Cc: Li Yang <leoli@freescale.com> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/dma/fsldma.c16
-rw-r--r--drivers/dma/fsldma.h1
2 files changed, 10 insertions, 7 deletions
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index ef87a8984145..73dd74823195 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -326,7 +326,8 @@ static void fsl_chan_toggle_ext_start(struct fsl_dma_chan *fsl_chan, int enable)
326static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx) 326static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx)
327{ 327{
328 struct fsl_dma_chan *fsl_chan = to_fsl_chan(tx->chan); 328 struct fsl_dma_chan *fsl_chan = to_fsl_chan(tx->chan);
329 struct fsl_desc_sw *desc; 329 struct fsl_desc_sw *desc = tx_to_fsl_desc(tx);
330 struct fsl_desc_sw *child;
330 unsigned long flags; 331 unsigned long flags;
331 dma_cookie_t cookie; 332 dma_cookie_t cookie;
332 333
@@ -334,7 +335,7 @@ static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx)
334 spin_lock_irqsave(&fsl_chan->desc_lock, flags); 335 spin_lock_irqsave(&fsl_chan->desc_lock, flags);
335 336
336 cookie = fsl_chan->common.cookie; 337 cookie = fsl_chan->common.cookie;
337 list_for_each_entry(desc, &tx->tx_list, node) { 338 list_for_each_entry(child, &desc->tx_list, node) {
338 cookie++; 339 cookie++;
339 if (cookie < 0) 340 if (cookie < 0)
340 cookie = 1; 341 cookie = 1;
@@ -343,8 +344,8 @@ static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx)
343 } 344 }
344 345
345 fsl_chan->common.cookie = cookie; 346 fsl_chan->common.cookie = cookie;
346 append_ld_queue(fsl_chan, tx_to_fsl_desc(tx)); 347 append_ld_queue(fsl_chan, desc);
347 list_splice_init(&tx->tx_list, fsl_chan->ld_queue.prev); 348 list_splice_init(&desc->tx_list, fsl_chan->ld_queue.prev);
348 349
349 spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); 350 spin_unlock_irqrestore(&fsl_chan->desc_lock, flags);
350 351
@@ -366,6 +367,7 @@ static struct fsl_desc_sw *fsl_dma_alloc_descriptor(
366 desc_sw = dma_pool_alloc(fsl_chan->desc_pool, GFP_ATOMIC, &pdesc); 367 desc_sw = dma_pool_alloc(fsl_chan->desc_pool, GFP_ATOMIC, &pdesc);
367 if (desc_sw) { 368 if (desc_sw) {
368 memset(desc_sw, 0, sizeof(struct fsl_desc_sw)); 369 memset(desc_sw, 0, sizeof(struct fsl_desc_sw));
370 INIT_LIST_HEAD(&desc_sw->tx_list);
369 dma_async_tx_descriptor_init(&desc_sw->async_tx, 371 dma_async_tx_descriptor_init(&desc_sw->async_tx,
370 &fsl_chan->common); 372 &fsl_chan->common);
371 desc_sw->async_tx.tx_submit = fsl_dma_tx_submit; 373 desc_sw->async_tx.tx_submit = fsl_dma_tx_submit;
@@ -455,7 +457,7 @@ fsl_dma_prep_interrupt(struct dma_chan *chan, unsigned long flags)
455 new->async_tx.flags = flags; 457 new->async_tx.flags = flags;
456 458
457 /* Insert the link descriptor to the LD ring */ 459 /* Insert the link descriptor to the LD ring */
458 list_add_tail(&new->node, &new->async_tx.tx_list); 460 list_add_tail(&new->node, &new->tx_list);
459 461
460 /* Set End-of-link to the last link descriptor of new list*/ 462 /* Set End-of-link to the last link descriptor of new list*/
461 set_ld_eol(fsl_chan, new); 463 set_ld_eol(fsl_chan, new);
@@ -513,7 +515,7 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy(
513 dma_dest += copy; 515 dma_dest += copy;
514 516
515 /* Insert the link descriptor to the LD ring */ 517 /* Insert the link descriptor to the LD ring */
516 list_add_tail(&new->node, &first->async_tx.tx_list); 518 list_add_tail(&new->node, &first->tx_list);
517 } while (len); 519 } while (len);
518 520
519 new->async_tx.flags = flags; /* client is in control of this ack */ 521 new->async_tx.flags = flags; /* client is in control of this ack */
@@ -528,7 +530,7 @@ fail:
528 if (!first) 530 if (!first)
529 return NULL; 531 return NULL;
530 532
531 list = &first->async_tx.tx_list; 533 list = &first->tx_list;
532 list_for_each_entry_safe_reverse(new, prev, list, node) { 534 list_for_each_entry_safe_reverse(new, prev, list, node) {
533 list_del(&new->node); 535 list_del(&new->node);
534 dma_pool_free(fsl_chan->desc_pool, new, new->async_tx.phys); 536 dma_pool_free(fsl_chan->desc_pool, new, new->async_tx.phys);
diff --git a/drivers/dma/fsldma.h b/drivers/dma/fsldma.h
index dc7f26865797..4493afed53f0 100644
--- a/drivers/dma/fsldma.h
+++ b/drivers/dma/fsldma.h
@@ -90,6 +90,7 @@ struct fsl_dma_ld_hw {
90struct fsl_desc_sw { 90struct fsl_desc_sw {
91 struct fsl_dma_ld_hw hw; 91 struct fsl_dma_ld_hw hw;
92 struct list_head node; 92 struct list_head node;
93 struct list_head tx_list;
93 struct dma_async_tx_descriptor async_tx; 94 struct dma_async_tx_descriptor async_tx;
94 struct list_head *ld; 95 struct list_head *ld;
95 void *priv; 96 void *priv;