aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma/sa11x0-dma.c
diff options
context:
space:
mode:
authorRussell King <rmk+kernel@arm.linux.org.uk>2012-04-13 07:07:23 -0400
committerRussell King <rmk+kernel@arm.linux.org.uk>2012-07-01 09:15:21 -0400
commit50437bff7f7374f86837986f66e15e73a364f894 (patch)
tree60f2ed3601d374dcc1bf074265e5ab891a3dee55 /drivers/dma/sa11x0-dma.c
parent6887a4131da3adaab011613776d865f4bcfb5678 (diff)
dmaengine: split out virtual channel DMA support from sa11x0 driver
Split the virtual slave channel DMA support from the sa11x0 driver so this code can be shared with other slave DMA engine drivers. Acked-by: Linus Walleij <linus.walleij@linaro.org> Tested-by: Linus Walleij <linus.walleij@linaro.org> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'drivers/dma/sa11x0-dma.c')
-rw-r--r--drivers/dma/sa11x0-dma.c249
1 files changed, 78 insertions, 171 deletions
diff --git a/drivers/dma/sa11x0-dma.c b/drivers/dma/sa11x0-dma.c
index ec78ccef9132..5f1d2e670837 100644
--- a/drivers/dma/sa11x0-dma.c
+++ b/drivers/dma/sa11x0-dma.c
@@ -21,6 +21,8 @@
21#include <linux/slab.h> 21#include <linux/slab.h>
22#include <linux/spinlock.h> 22#include <linux/spinlock.h>
23 23
24#include "virt-dma.h"
25
24#define NR_PHY_CHAN 6 26#define NR_PHY_CHAN 6
25#define DMA_ALIGN 3 27#define DMA_ALIGN 3
26#define DMA_MAX_SIZE 0x1fff 28#define DMA_MAX_SIZE 0x1fff
@@ -72,12 +74,11 @@ struct sa11x0_dma_sg {
72}; 74};
73 75
74struct sa11x0_dma_desc { 76struct sa11x0_dma_desc {
75 struct dma_async_tx_descriptor tx; 77 struct virt_dma_desc vd;
78
76 u32 ddar; 79 u32 ddar;
77 size_t size; 80 size_t size;
78 81
79 /* maybe protected by c->lock */
80 struct list_head node;
81 unsigned sglen; 82 unsigned sglen;
82 struct sa11x0_dma_sg sg[0]; 83 struct sa11x0_dma_sg sg[0];
83}; 84};
@@ -85,15 +86,11 @@ struct sa11x0_dma_desc {
85struct sa11x0_dma_phy; 86struct sa11x0_dma_phy;
86 87
87struct sa11x0_dma_chan { 88struct sa11x0_dma_chan {
88 struct dma_chan chan; 89 struct virt_dma_chan vc;
89 spinlock_t lock;
90 dma_cookie_t lc;
91 90
92 /* protected by c->lock */ 91 /* protected by c->vc.lock */
93 struct sa11x0_dma_phy *phy; 92 struct sa11x0_dma_phy *phy;
94 enum dma_status status; 93 enum dma_status status;
95 struct list_head desc_submitted;
96 struct list_head desc_issued;
97 94
98 /* protected by d->lock */ 95 /* protected by d->lock */
99 struct list_head node; 96 struct list_head node;
@@ -109,7 +106,7 @@ struct sa11x0_dma_phy {
109 106
110 struct sa11x0_dma_chan *vchan; 107 struct sa11x0_dma_chan *vchan;
111 108
112 /* Protected by c->lock */ 109 /* Protected by c->vc.lock */
113 unsigned sg_load; 110 unsigned sg_load;
114 struct sa11x0_dma_desc *txd_load; 111 struct sa11x0_dma_desc *txd_load;
115 unsigned sg_done; 112 unsigned sg_done;
@@ -127,13 +124,12 @@ struct sa11x0_dma_dev {
127 spinlock_t lock; 124 spinlock_t lock;
128 struct tasklet_struct task; 125 struct tasklet_struct task;
129 struct list_head chan_pending; 126 struct list_head chan_pending;
130 struct list_head desc_complete;
131 struct sa11x0_dma_phy phy[NR_PHY_CHAN]; 127 struct sa11x0_dma_phy phy[NR_PHY_CHAN];
132}; 128};
133 129
134static struct sa11x0_dma_chan *to_sa11x0_dma_chan(struct dma_chan *chan) 130static struct sa11x0_dma_chan *to_sa11x0_dma_chan(struct dma_chan *chan)
135{ 131{
136 return container_of(chan, struct sa11x0_dma_chan, chan); 132 return container_of(chan, struct sa11x0_dma_chan, vc.chan);
137} 133}
138 134
139static struct sa11x0_dma_dev *to_sa11x0_dma(struct dma_device *dmadev) 135static struct sa11x0_dma_dev *to_sa11x0_dma(struct dma_device *dmadev)
@@ -141,27 +137,26 @@ static struct sa11x0_dma_dev *to_sa11x0_dma(struct dma_device *dmadev)
141 return container_of(dmadev, struct sa11x0_dma_dev, slave); 137 return container_of(dmadev, struct sa11x0_dma_dev, slave);
142} 138}
143 139
144static struct sa11x0_dma_desc *to_sa11x0_dma_tx(struct dma_async_tx_descriptor *tx) 140static struct sa11x0_dma_desc *sa11x0_dma_next_desc(struct sa11x0_dma_chan *c)
145{ 141{
146 return container_of(tx, struct sa11x0_dma_desc, tx); 142 struct virt_dma_desc *vd = vchan_next_desc(&c->vc);
143
144 return vd ? container_of(vd, struct sa11x0_dma_desc, vd) : NULL;
147} 145}
148 146
149static struct sa11x0_dma_desc *sa11x0_dma_next_desc(struct sa11x0_dma_chan *c) 147static void sa11x0_dma_free_desc(struct virt_dma_desc *vd)
150{ 148{
151 if (list_empty(&c->desc_issued)) 149 kfree(container_of(vd, struct sa11x0_dma_desc, vd));
152 return NULL;
153
154 return list_first_entry(&c->desc_issued, struct sa11x0_dma_desc, node);
155} 150}
156 151
157static void sa11x0_dma_start_desc(struct sa11x0_dma_phy *p, struct sa11x0_dma_desc *txd) 152static void sa11x0_dma_start_desc(struct sa11x0_dma_phy *p, struct sa11x0_dma_desc *txd)
158{ 153{
159 list_del(&txd->node); 154 list_del(&txd->vd.node);
160 p->txd_load = txd; 155 p->txd_load = txd;
161 p->sg_load = 0; 156 p->sg_load = 0;
162 157
163 dev_vdbg(p->dev->slave.dev, "pchan %u: txd %p[%x]: starting: DDAR:%x\n", 158 dev_vdbg(p->dev->slave.dev, "pchan %u: txd %p[%x]: starting: DDAR:%x\n",
164 p->num, txd, txd->tx.cookie, txd->ddar); 159 p->num, &txd->vd, txd->vd.tx.cookie, txd->ddar);
165} 160}
166 161
167static void noinline sa11x0_dma_start_sg(struct sa11x0_dma_phy *p, 162static void noinline sa11x0_dma_start_sg(struct sa11x0_dma_phy *p,
@@ -229,21 +224,13 @@ static void noinline sa11x0_dma_complete(struct sa11x0_dma_phy *p,
229 struct sa11x0_dma_desc *txd = p->txd_done; 224 struct sa11x0_dma_desc *txd = p->txd_done;
230 225
231 if (++p->sg_done == txd->sglen) { 226 if (++p->sg_done == txd->sglen) {
232 struct sa11x0_dma_dev *d = p->dev; 227 vchan_cookie_complete(&txd->vd);
233
234 dev_vdbg(d->slave.dev, "pchan %u: txd %p[%x]: completed\n",
235 p->num, p->txd_done, p->txd_done->tx.cookie);
236
237 c->lc = txd->tx.cookie;
238
239 spin_lock(&d->lock);
240 list_add_tail(&txd->node, &d->desc_complete);
241 spin_unlock(&d->lock);
242 228
243 p->sg_done = 0; 229 p->sg_done = 0;
244 p->txd_done = p->txd_load; 230 p->txd_done = p->txd_load;
245 231
246 tasklet_schedule(&d->task); 232 if (!p->txd_done)
233 tasklet_schedule(&p->dev->task);
247 } 234 }
248 235
249 sa11x0_dma_start_sg(p, c); 236 sa11x0_dma_start_sg(p, c);
@@ -280,7 +267,7 @@ static irqreturn_t sa11x0_dma_irq(int irq, void *dev_id)
280 if (c) { 267 if (c) {
281 unsigned long flags; 268 unsigned long flags;
282 269
283 spin_lock_irqsave(&c->lock, flags); 270 spin_lock_irqsave(&c->vc.lock, flags);
284 /* 271 /*
285 * Now that we're holding the lock, check that the vchan 272 * Now that we're holding the lock, check that the vchan
286 * really is associated with this pchan before touching the 273 * really is associated with this pchan before touching the
@@ -294,7 +281,7 @@ static irqreturn_t sa11x0_dma_irq(int irq, void *dev_id)
294 if (dcsr & DCSR_DONEB) 281 if (dcsr & DCSR_DONEB)
295 sa11x0_dma_complete(p, c); 282 sa11x0_dma_complete(p, c);
296 } 283 }
297 spin_unlock_irqrestore(&c->lock, flags); 284 spin_unlock_irqrestore(&c->vc.lock, flags);
298 } 285 }
299 286
300 return IRQ_HANDLED; 287 return IRQ_HANDLED;
@@ -332,28 +319,15 @@ static void sa11x0_dma_tasklet(unsigned long arg)
332 struct sa11x0_dma_dev *d = (struct sa11x0_dma_dev *)arg; 319 struct sa11x0_dma_dev *d = (struct sa11x0_dma_dev *)arg;
333 struct sa11x0_dma_phy *p; 320 struct sa11x0_dma_phy *p;
334 struct sa11x0_dma_chan *c; 321 struct sa11x0_dma_chan *c;
335 struct sa11x0_dma_desc *txd, *txn;
336 LIST_HEAD(head);
337 unsigned pch, pch_alloc = 0; 322 unsigned pch, pch_alloc = 0;
338 323
339 dev_dbg(d->slave.dev, "tasklet enter\n"); 324 dev_dbg(d->slave.dev, "tasklet enter\n");
340 325
341 /* Get the completed tx descriptors */ 326 list_for_each_entry(c, &d->slave.channels, vc.chan.device_node) {
342 spin_lock_irq(&d->lock); 327 spin_lock_irq(&c->vc.lock);
343 list_splice_init(&d->desc_complete, &head);
344 spin_unlock_irq(&d->lock);
345
346 list_for_each_entry(txd, &head, node) {
347 c = to_sa11x0_dma_chan(txd->tx.chan);
348
349 dev_dbg(d->slave.dev, "vchan %p: txd %p[%x] completed\n",
350 c, txd, txd->tx.cookie);
351
352 spin_lock_irq(&c->lock);
353 p = c->phy; 328 p = c->phy;
354 if (p) { 329 if (p && !p->txd_done) {
355 if (!p->txd_done) 330 sa11x0_dma_start_txd(c);
356 sa11x0_dma_start_txd(c);
357 if (!p->txd_done) { 331 if (!p->txd_done) {
358 /* No current txd associated with this channel */ 332 /* No current txd associated with this channel */
359 dev_dbg(d->slave.dev, "pchan %u: free\n", p->num); 333 dev_dbg(d->slave.dev, "pchan %u: free\n", p->num);
@@ -363,7 +337,7 @@ static void sa11x0_dma_tasklet(unsigned long arg)
363 p->vchan = NULL; 337 p->vchan = NULL;
364 } 338 }
365 } 339 }
366 spin_unlock_irq(&c->lock); 340 spin_unlock_irq(&c->vc.lock);
367 } 341 }
368 342
369 spin_lock_irq(&d->lock); 343 spin_lock_irq(&d->lock);
@@ -380,7 +354,7 @@ static void sa11x0_dma_tasklet(unsigned long arg)
380 /* Mark this channel allocated */ 354 /* Mark this channel allocated */
381 p->vchan = c; 355 p->vchan = c;
382 356
383 dev_dbg(d->slave.dev, "pchan %u: alloc vchan %p\n", pch, c); 357 dev_dbg(d->slave.dev, "pchan %u: alloc vchan %p\n", pch, &c->vc);
384 } 358 }
385 } 359 }
386 spin_unlock_irq(&d->lock); 360 spin_unlock_irq(&d->lock);
@@ -390,42 +364,18 @@ static void sa11x0_dma_tasklet(unsigned long arg)
390 p = &d->phy[pch]; 364 p = &d->phy[pch];
391 c = p->vchan; 365 c = p->vchan;
392 366
393 spin_lock_irq(&c->lock); 367 spin_lock_irq(&c->vc.lock);
394 c->phy = p; 368 c->phy = p;
395 369
396 sa11x0_dma_start_txd(c); 370 sa11x0_dma_start_txd(c);
397 spin_unlock_irq(&c->lock); 371 spin_unlock_irq(&c->vc.lock);
398 } 372 }
399 } 373 }
400 374
401 /* Now free the completed tx descriptor, and call their callbacks */
402 list_for_each_entry_safe(txd, txn, &head, node) {
403 dma_async_tx_callback callback = txd->tx.callback;
404 void *callback_param = txd->tx.callback_param;
405
406 dev_dbg(d->slave.dev, "txd %p[%x]: callback and free\n",
407 txd, txd->tx.cookie);
408
409 kfree(txd);
410
411 if (callback)
412 callback(callback_param);
413 }
414
415 dev_dbg(d->slave.dev, "tasklet exit\n"); 375 dev_dbg(d->slave.dev, "tasklet exit\n");
416} 376}
417 377
418 378
419static void sa11x0_dma_desc_free(struct sa11x0_dma_dev *d, struct list_head *head)
420{
421 struct sa11x0_dma_desc *txd, *txn;
422
423 list_for_each_entry_safe(txd, txn, head, node) {
424 dev_dbg(d->slave.dev, "txd %p: freeing\n", txd);
425 kfree(txd);
426 }
427}
428
429static int sa11x0_dma_alloc_chan_resources(struct dma_chan *chan) 379static int sa11x0_dma_alloc_chan_resources(struct dma_chan *chan)
430{ 380{
431 return 0; 381 return 0;
@@ -436,18 +386,12 @@ static void sa11x0_dma_free_chan_resources(struct dma_chan *chan)
436 struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan); 386 struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
437 struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device); 387 struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device);
438 unsigned long flags; 388 unsigned long flags;
439 LIST_HEAD(head);
440 389
441 spin_lock_irqsave(&c->lock, flags); 390 spin_lock_irqsave(&d->lock, flags);
442 spin_lock(&d->lock);
443 list_del_init(&c->node); 391 list_del_init(&c->node);
444 spin_unlock(&d->lock); 392 spin_unlock_irqrestore(&d->lock, flags);
445
446 list_splice_tail_init(&c->desc_submitted, &head);
447 list_splice_tail_init(&c->desc_issued, &head);
448 spin_unlock_irqrestore(&c->lock, flags);
449 393
450 sa11x0_dma_desc_free(d, &head); 394 vchan_free_chan_resources(&c->vc);
451} 395}
452 396
453static dma_addr_t sa11x0_dma_pos(struct sa11x0_dma_phy *p) 397static dma_addr_t sa11x0_dma_pos(struct sa11x0_dma_phy *p)
@@ -473,21 +417,15 @@ static enum dma_status sa11x0_dma_tx_status(struct dma_chan *chan,
473 struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device); 417 struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device);
474 struct sa11x0_dma_phy *p; 418 struct sa11x0_dma_phy *p;
475 struct sa11x0_dma_desc *txd; 419 struct sa11x0_dma_desc *txd;
476 dma_cookie_t last_used, last_complete;
477 unsigned long flags; 420 unsigned long flags;
478 enum dma_status ret; 421 enum dma_status ret;
479 size_t bytes = 0; 422 size_t bytes = 0;
480 423
481 last_used = c->chan.cookie; 424 ret = dma_cookie_status(&c->vc.chan, cookie, state);
482 last_complete = c->lc; 425 if (ret == DMA_SUCCESS)
483
484 ret = dma_async_is_complete(cookie, last_complete, last_used);
485 if (ret == DMA_SUCCESS) {
486 dma_set_tx_state(state, last_complete, last_used, 0);
487 return ret; 426 return ret;
488 }
489 427
490 spin_lock_irqsave(&c->lock, flags); 428 spin_lock_irqsave(&c->vc.lock, flags);
491 p = c->phy; 429 p = c->phy;
492 ret = c->status; 430 ret = c->status;
493 if (p) { 431 if (p) {
@@ -524,12 +462,13 @@ static enum dma_status sa11x0_dma_tx_status(struct dma_chan *chan,
524 if (txd != p->txd_load && p->txd_load) 462 if (txd != p->txd_load && p->txd_load)
525 bytes += p->txd_load->size; 463 bytes += p->txd_load->size;
526 } 464 }
527 list_for_each_entry(txd, &c->desc_issued, node) { 465 list_for_each_entry(txd, &c->vc.desc_issued, vd.node) {
528 bytes += txd->size; 466 bytes += txd->size;
529 } 467 }
530 spin_unlock_irqrestore(&c->lock, flags); 468 spin_unlock_irqrestore(&c->vc.lock, flags);
531 469
532 dma_set_tx_state(state, last_complete, last_used, bytes); 470 if (state)
471 state->residue = bytes;
533 472
534 dev_vdbg(d->slave.dev, "tx_status: bytes 0x%zx\n", bytes); 473 dev_vdbg(d->slave.dev, "tx_status: bytes 0x%zx\n", bytes);
535 474
@@ -547,40 +486,20 @@ static void sa11x0_dma_issue_pending(struct dma_chan *chan)
547 struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device); 486 struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device);
548 unsigned long flags; 487 unsigned long flags;
549 488
550 spin_lock_irqsave(&c->lock, flags); 489 spin_lock_irqsave(&c->vc.lock, flags);
551 list_splice_tail_init(&c->desc_submitted, &c->desc_issued); 490 if (vchan_issue_pending(&c->vc)) {
552 if (!list_empty(&c->desc_issued)) { 491 if (!c->phy) {
553 spin_lock(&d->lock); 492 spin_lock(&d->lock);
554 if (!c->phy && list_empty(&c->node)) { 493 if (list_empty(&c->node)) {
555 list_add_tail(&c->node, &d->chan_pending); 494 list_add_tail(&c->node, &d->chan_pending);
556 tasklet_schedule(&d->task); 495 tasklet_schedule(&d->task);
557 dev_dbg(d->slave.dev, "vchan %p: issued\n", c); 496 dev_dbg(d->slave.dev, "vchan %p: issued\n", &c->vc);
497 }
498 spin_unlock(&d->lock);
558 } 499 }
559 spin_unlock(&d->lock);
560 } else 500 } else
561 dev_dbg(d->slave.dev, "vchan %p: nothing to issue\n", c); 501 dev_dbg(d->slave.dev, "vchan %p: nothing to issue\n", &c->vc);
562 spin_unlock_irqrestore(&c->lock, flags); 502 spin_unlock_irqrestore(&c->vc.lock, flags);
563}
564
565static dma_cookie_t sa11x0_dma_tx_submit(struct dma_async_tx_descriptor *tx)
566{
567 struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(tx->chan);
568 struct sa11x0_dma_desc *txd = to_sa11x0_dma_tx(tx);
569 unsigned long flags;
570
571 spin_lock_irqsave(&c->lock, flags);
572 c->chan.cookie += 1;
573 if (c->chan.cookie < 0)
574 c->chan.cookie = 1;
575 txd->tx.cookie = c->chan.cookie;
576
577 list_add_tail(&txd->node, &c->desc_submitted);
578 spin_unlock_irqrestore(&c->lock, flags);
579
580 dev_dbg(tx->chan->device->dev, "vchan %p: txd %p[%x]: submitted\n",
581 c, txd, txd->tx.cookie);
582
583 return txd->tx.cookie;
584} 503}
585 504
586static struct dma_async_tx_descriptor *sa11x0_dma_prep_slave_sg( 505static struct dma_async_tx_descriptor *sa11x0_dma_prep_slave_sg(
@@ -596,7 +515,7 @@ static struct dma_async_tx_descriptor *sa11x0_dma_prep_slave_sg(
596 /* SA11x0 channels can only operate in their native direction */ 515 /* SA11x0 channels can only operate in their native direction */
597 if (dir != (c->ddar & DDAR_RW ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV)) { 516 if (dir != (c->ddar & DDAR_RW ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV)) {
598 dev_err(chan->device->dev, "vchan %p: bad DMA direction: DDAR:%08x dir:%u\n", 517 dev_err(chan->device->dev, "vchan %p: bad DMA direction: DDAR:%08x dir:%u\n",
599 c, c->ddar, dir); 518 &c->vc, c->ddar, dir);
600 return NULL; 519 return NULL;
601 } 520 }
602 521
@@ -612,14 +531,14 @@ static struct dma_async_tx_descriptor *sa11x0_dma_prep_slave_sg(
612 j += DIV_ROUND_UP(len, DMA_MAX_SIZE & ~DMA_ALIGN) - 1; 531 j += DIV_ROUND_UP(len, DMA_MAX_SIZE & ~DMA_ALIGN) - 1;
613 if (addr & DMA_ALIGN) { 532 if (addr & DMA_ALIGN) {
614 dev_dbg(chan->device->dev, "vchan %p: bad buffer alignment: %08x\n", 533 dev_dbg(chan->device->dev, "vchan %p: bad buffer alignment: %08x\n",
615 c, addr); 534 &c->vc, addr);
616 return NULL; 535 return NULL;
617 } 536 }
618 } 537 }
619 538
620 txd = kzalloc(sizeof(*txd) + j * sizeof(txd->sg[0]), GFP_ATOMIC); 539 txd = kzalloc(sizeof(*txd) + j * sizeof(txd->sg[0]), GFP_ATOMIC);
621 if (!txd) { 540 if (!txd) {
622 dev_dbg(chan->device->dev, "vchan %p: kzalloc failed\n", c); 541 dev_dbg(chan->device->dev, "vchan %p: kzalloc failed\n", &c->vc);
623 return NULL; 542 return NULL;
624 } 543 }
625 544
@@ -655,17 +574,14 @@ static struct dma_async_tx_descriptor *sa11x0_dma_prep_slave_sg(
655 } while (len); 574 } while (len);
656 } 575 }
657 576
658 dma_async_tx_descriptor_init(&txd->tx, &c->chan);
659 txd->tx.flags = flags;
660 txd->tx.tx_submit = sa11x0_dma_tx_submit;
661 txd->ddar = c->ddar; 577 txd->ddar = c->ddar;
662 txd->size = size; 578 txd->size = size;
663 txd->sglen = j; 579 txd->sglen = j;
664 580
665 dev_dbg(chan->device->dev, "vchan %p: txd %p: size %u nr %u\n", 581 dev_dbg(chan->device->dev, "vchan %p: txd %p: size %u nr %u\n",
666 c, txd, txd->size, txd->sglen); 582 &c->vc, &txd->vd, txd->size, txd->sglen);
667 583
668 return &txd->tx; 584 return vchan_tx_prep(&c->vc, &txd->vd, flags);
669} 585}
670 586
671static int sa11x0_dma_slave_config(struct sa11x0_dma_chan *c, struct dma_slave_config *cfg) 587static int sa11x0_dma_slave_config(struct sa11x0_dma_chan *c, struct dma_slave_config *cfg)
@@ -695,8 +611,8 @@ static int sa11x0_dma_slave_config(struct sa11x0_dma_chan *c, struct dma_slave_c
695 if (maxburst == 8) 611 if (maxburst == 8)
696 ddar |= DDAR_BS; 612 ddar |= DDAR_BS;
697 613
698 dev_dbg(c->chan.device->dev, "vchan %p: dma_slave_config addr %x width %u burst %u\n", 614 dev_dbg(c->vc.chan.device->dev, "vchan %p: dma_slave_config addr %x width %u burst %u\n",
699 c, addr, width, maxburst); 615 &c->vc, addr, width, maxburst);
700 616
701 c->ddar = ddar | (addr & 0xf0000000) | (addr & 0x003ffffc) << 6; 617 c->ddar = ddar | (addr & 0xf0000000) | (addr & 0x003ffffc) << 6;
702 618
@@ -718,16 +634,13 @@ static int sa11x0_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
718 return sa11x0_dma_slave_config(c, (struct dma_slave_config *)arg); 634 return sa11x0_dma_slave_config(c, (struct dma_slave_config *)arg);
719 635
720 case DMA_TERMINATE_ALL: 636 case DMA_TERMINATE_ALL:
721 dev_dbg(d->slave.dev, "vchan %p: terminate all\n", c); 637 dev_dbg(d->slave.dev, "vchan %p: terminate all\n", &c->vc);
722 /* Clear the tx descriptor lists */ 638 /* Clear the tx descriptor lists */
723 spin_lock_irqsave(&c->lock, flags); 639 spin_lock_irqsave(&c->vc.lock, flags);
724 list_splice_tail_init(&c->desc_submitted, &head); 640 vchan_get_all_descriptors(&c->vc, &head);
725 list_splice_tail_init(&c->desc_issued, &head);
726 641
727 p = c->phy; 642 p = c->phy;
728 if (p) { 643 if (p) {
729 struct sa11x0_dma_desc *txd, *txn;
730
731 dev_dbg(d->slave.dev, "pchan %u: terminating\n", p->num); 644 dev_dbg(d->slave.dev, "pchan %u: terminating\n", p->num);
732 /* vchan is assigned to a pchan - stop the channel */ 645 /* vchan is assigned to a pchan - stop the channel */
733 writel(DCSR_RUN | DCSR_IE | 646 writel(DCSR_RUN | DCSR_IE |
@@ -735,17 +648,13 @@ static int sa11x0_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
735 DCSR_STRTB | DCSR_DONEB, 648 DCSR_STRTB | DCSR_DONEB,
736 p->base + DMA_DCSR_C); 649 p->base + DMA_DCSR_C);
737 650
738 list_for_each_entry_safe(txd, txn, &d->desc_complete, node)
739 if (txd->tx.chan == &c->chan)
740 list_move(&txd->node, &head);
741
742 if (p->txd_load) { 651 if (p->txd_load) {
743 if (p->txd_load != p->txd_done) 652 if (p->txd_load != p->txd_done)
744 list_add_tail(&p->txd_load->node, &head); 653 list_add_tail(&p->txd_load->vd.node, &head);
745 p->txd_load = NULL; 654 p->txd_load = NULL;
746 } 655 }
747 if (p->txd_done) { 656 if (p->txd_done) {
748 list_add_tail(&p->txd_done->node, &head); 657 list_add_tail(&p->txd_done->vd.node, &head);
749 p->txd_done = NULL; 658 p->txd_done = NULL;
750 } 659 }
751 c->phy = NULL; 660 c->phy = NULL;
@@ -754,14 +663,14 @@ static int sa11x0_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
754 spin_unlock(&d->lock); 663 spin_unlock(&d->lock);
755 tasklet_schedule(&d->task); 664 tasklet_schedule(&d->task);
756 } 665 }
757 spin_unlock_irqrestore(&c->lock, flags); 666 spin_unlock_irqrestore(&c->vc.lock, flags);
758 sa11x0_dma_desc_free(d, &head); 667 vchan_dma_desc_free_list(&c->vc, &head);
759 ret = 0; 668 ret = 0;
760 break; 669 break;
761 670
762 case DMA_PAUSE: 671 case DMA_PAUSE:
763 dev_dbg(d->slave.dev, "vchan %p: pause\n", c); 672 dev_dbg(d->slave.dev, "vchan %p: pause\n", &c->vc);
764 spin_lock_irqsave(&c->lock, flags); 673 spin_lock_irqsave(&c->vc.lock, flags);
765 if (c->status == DMA_IN_PROGRESS) { 674 if (c->status == DMA_IN_PROGRESS) {
766 c->status = DMA_PAUSED; 675 c->status = DMA_PAUSED;
767 676
@@ -774,26 +683,26 @@ static int sa11x0_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
774 spin_unlock(&d->lock); 683 spin_unlock(&d->lock);
775 } 684 }
776 } 685 }
777 spin_unlock_irqrestore(&c->lock, flags); 686 spin_unlock_irqrestore(&c->vc.lock, flags);
778 ret = 0; 687 ret = 0;
779 break; 688 break;
780 689
781 case DMA_RESUME: 690 case DMA_RESUME:
782 dev_dbg(d->slave.dev, "vchan %p: resume\n", c); 691 dev_dbg(d->slave.dev, "vchan %p: resume\n", &c->vc);
783 spin_lock_irqsave(&c->lock, flags); 692 spin_lock_irqsave(&c->vc.lock, flags);
784 if (c->status == DMA_PAUSED) { 693 if (c->status == DMA_PAUSED) {
785 c->status = DMA_IN_PROGRESS; 694 c->status = DMA_IN_PROGRESS;
786 695
787 p = c->phy; 696 p = c->phy;
788 if (p) { 697 if (p) {
789 writel(DCSR_RUN | DCSR_IE, p->base + DMA_DCSR_S); 698 writel(DCSR_RUN | DCSR_IE, p->base + DMA_DCSR_S);
790 } else if (!list_empty(&c->desc_issued)) { 699 } else if (!list_empty(&c->vc.desc_issued)) {
791 spin_lock(&d->lock); 700 spin_lock(&d->lock);
792 list_add_tail(&c->node, &d->chan_pending); 701 list_add_tail(&c->node, &d->chan_pending);
793 spin_unlock(&d->lock); 702 spin_unlock(&d->lock);
794 } 703 }
795 } 704 }
796 spin_unlock_irqrestore(&c->lock, flags); 705 spin_unlock_irqrestore(&c->vc.lock, flags);
797 ret = 0; 706 ret = 0;
798 break; 707 break;
799 708
@@ -853,15 +762,13 @@ static int __devinit sa11x0_dma_init_dmadev(struct dma_device *dmadev,
853 return -ENOMEM; 762 return -ENOMEM;
854 } 763 }
855 764
856 c->chan.device = dmadev;
857 c->status = DMA_IN_PROGRESS; 765 c->status = DMA_IN_PROGRESS;
858 c->ddar = chan_desc[i].ddar; 766 c->ddar = chan_desc[i].ddar;
859 c->name = chan_desc[i].name; 767 c->name = chan_desc[i].name;
860 spin_lock_init(&c->lock);
861 INIT_LIST_HEAD(&c->desc_submitted);
862 INIT_LIST_HEAD(&c->desc_issued);
863 INIT_LIST_HEAD(&c->node); 768 INIT_LIST_HEAD(&c->node);
864 list_add_tail(&c->chan.device_node, &dmadev->channels); 769
770 c->vc.desc_free = sa11x0_dma_free_desc;
771 vchan_init(&c->vc, dmadev);
865 } 772 }
866 773
867 return dma_async_device_register(dmadev); 774 return dma_async_device_register(dmadev);
@@ -890,8 +797,9 @@ static void sa11x0_dma_free_channels(struct dma_device *dmadev)
890{ 797{
891 struct sa11x0_dma_chan *c, *cn; 798 struct sa11x0_dma_chan *c, *cn;
892 799
893 list_for_each_entry_safe(c, cn, &dmadev->channels, chan.device_node) { 800 list_for_each_entry_safe(c, cn, &dmadev->channels, vc.chan.device_node) {
894 list_del(&c->chan.device_node); 801 list_del(&c->vc.chan.device_node);
802 tasklet_kill(&c->vc.task);
895 kfree(c); 803 kfree(c);
896 } 804 }
897} 805}
@@ -915,7 +823,6 @@ static int __devinit sa11x0_dma_probe(struct platform_device *pdev)
915 823
916 spin_lock_init(&d->lock); 824 spin_lock_init(&d->lock);
917 INIT_LIST_HEAD(&d->chan_pending); 825 INIT_LIST_HEAD(&d->chan_pending);
918 INIT_LIST_HEAD(&d->desc_complete);
919 826
920 d->base = ioremap(res->start, resource_size(res)); 827 d->base = ioremap(res->start, resource_size(res));
921 if (!d->base) { 828 if (!d->base) {