aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma/fsldma.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/dma/fsldma.c')
-rw-r--r--drivers/dma/fsldma.c551
1 files changed, 301 insertions, 250 deletions
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index e3854a8f0de0..6b396759e7f5 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -37,35 +37,16 @@
37 37
38#include "fsldma.h" 38#include "fsldma.h"
39 39
40static const char msg_ld_oom[] = "No free memory for link descriptor\n"; 40#define chan_dbg(chan, fmt, arg...) \
41 dev_dbg(chan->dev, "%s: " fmt, chan->name, ##arg)
42#define chan_err(chan, fmt, arg...) \
43 dev_err(chan->dev, "%s: " fmt, chan->name, ##arg)
41 44
42static void dma_init(struct fsldma_chan *chan) 45static const char msg_ld_oom[] = "No free memory for link descriptor";
43{
44 /* Reset the channel */
45 DMA_OUT(chan, &chan->regs->mr, 0, 32);
46 46
47 switch (chan->feature & FSL_DMA_IP_MASK) { 47/*
48 case FSL_DMA_IP_85XX: 48 * Register Helpers
49 /* Set the channel to below modes: 49 */
50 * EIE - Error interrupt enable
51 * EOSIE - End of segments interrupt enable (basic mode)
52 * EOLNIE - End of links interrupt enable
53 * BWC - Bandwidth sharing among channels
54 */
55 DMA_OUT(chan, &chan->regs->mr, FSL_DMA_MR_BWC
56 | FSL_DMA_MR_EIE | FSL_DMA_MR_EOLNIE
57 | FSL_DMA_MR_EOSIE, 32);
58 break;
59 case FSL_DMA_IP_83XX:
60 /* Set the channel to below modes:
61 * EOTIE - End-of-transfer interrupt enable
62 * PRC_RM - PCI read multiple
63 */
64 DMA_OUT(chan, &chan->regs->mr, FSL_DMA_MR_EOTIE
65 | FSL_DMA_MR_PRC_RM, 32);
66 break;
67 }
68}
69 50
70static void set_sr(struct fsldma_chan *chan, u32 val) 51static void set_sr(struct fsldma_chan *chan, u32 val)
71{ 52{
@@ -77,14 +58,38 @@ static u32 get_sr(struct fsldma_chan *chan)
77 return DMA_IN(chan, &chan->regs->sr, 32); 58 return DMA_IN(chan, &chan->regs->sr, 32);
78} 59}
79 60
61static void set_cdar(struct fsldma_chan *chan, dma_addr_t addr)
62{
63 DMA_OUT(chan, &chan->regs->cdar, addr | FSL_DMA_SNEN, 64);
64}
65
66static dma_addr_t get_cdar(struct fsldma_chan *chan)
67{
68 return DMA_IN(chan, &chan->regs->cdar, 64) & ~FSL_DMA_SNEN;
69}
70
71static u32 get_bcr(struct fsldma_chan *chan)
72{
73 return DMA_IN(chan, &chan->regs->bcr, 32);
74}
75
76/*
77 * Descriptor Helpers
78 */
79
80static void set_desc_cnt(struct fsldma_chan *chan, 80static void set_desc_cnt(struct fsldma_chan *chan,
81 struct fsl_dma_ld_hw *hw, u32 count) 81 struct fsl_dma_ld_hw *hw, u32 count)
82{ 82{
83 hw->count = CPU_TO_DMA(chan, count, 32); 83 hw->count = CPU_TO_DMA(chan, count, 32);
84} 84}
85 85
86static u32 get_desc_cnt(struct fsldma_chan *chan, struct fsl_desc_sw *desc)
87{
88 return DMA_TO_CPU(chan, desc->hw.count, 32);
89}
90
86static void set_desc_src(struct fsldma_chan *chan, 91static void set_desc_src(struct fsldma_chan *chan,
87 struct fsl_dma_ld_hw *hw, dma_addr_t src) 92 struct fsl_dma_ld_hw *hw, dma_addr_t src)
88{ 93{
89 u64 snoop_bits; 94 u64 snoop_bits;
90 95
@@ -93,8 +98,18 @@ static void set_desc_src(struct fsldma_chan *chan,
93 hw->src_addr = CPU_TO_DMA(chan, snoop_bits | src, 64); 98 hw->src_addr = CPU_TO_DMA(chan, snoop_bits | src, 64);
94} 99}
95 100
101static dma_addr_t get_desc_src(struct fsldma_chan *chan,
102 struct fsl_desc_sw *desc)
103{
104 u64 snoop_bits;
105
106 snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
107 ? ((u64)FSL_DMA_SATR_SREADTYPE_SNOOP_READ << 32) : 0;
108 return DMA_TO_CPU(chan, desc->hw.src_addr, 64) & ~snoop_bits;
109}
110
96static void set_desc_dst(struct fsldma_chan *chan, 111static void set_desc_dst(struct fsldma_chan *chan,
97 struct fsl_dma_ld_hw *hw, dma_addr_t dst) 112 struct fsl_dma_ld_hw *hw, dma_addr_t dst)
98{ 113{
99 u64 snoop_bits; 114 u64 snoop_bits;
100 115
@@ -103,8 +118,18 @@ static void set_desc_dst(struct fsldma_chan *chan,
103 hw->dst_addr = CPU_TO_DMA(chan, snoop_bits | dst, 64); 118 hw->dst_addr = CPU_TO_DMA(chan, snoop_bits | dst, 64);
104} 119}
105 120
121static dma_addr_t get_desc_dst(struct fsldma_chan *chan,
122 struct fsl_desc_sw *desc)
123{
124 u64 snoop_bits;
125
126 snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
127 ? ((u64)FSL_DMA_DATR_DWRITETYPE_SNOOP_WRITE << 32) : 0;
128 return DMA_TO_CPU(chan, desc->hw.dst_addr, 64) & ~snoop_bits;
129}
130
106static void set_desc_next(struct fsldma_chan *chan, 131static void set_desc_next(struct fsldma_chan *chan,
107 struct fsl_dma_ld_hw *hw, dma_addr_t next) 132 struct fsl_dma_ld_hw *hw, dma_addr_t next)
108{ 133{
109 u64 snoop_bits; 134 u64 snoop_bits;
110 135
@@ -113,24 +138,46 @@ static void set_desc_next(struct fsldma_chan *chan,
113 hw->next_ln_addr = CPU_TO_DMA(chan, snoop_bits | next, 64); 138 hw->next_ln_addr = CPU_TO_DMA(chan, snoop_bits | next, 64);
114} 139}
115 140
116static void set_cdar(struct fsldma_chan *chan, dma_addr_t addr) 141static void set_ld_eol(struct fsldma_chan *chan, struct fsl_desc_sw *desc)
117{ 142{
118 DMA_OUT(chan, &chan->regs->cdar, addr | FSL_DMA_SNEN, 64); 143 u64 snoop_bits;
119}
120 144
121static dma_addr_t get_cdar(struct fsldma_chan *chan) 145 snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX)
122{ 146 ? FSL_DMA_SNEN : 0;
123 return DMA_IN(chan, &chan->regs->cdar, 64) & ~FSL_DMA_SNEN;
124}
125 147
126static dma_addr_t get_ndar(struct fsldma_chan *chan) 148 desc->hw.next_ln_addr = CPU_TO_DMA(chan,
127{ 149 DMA_TO_CPU(chan, desc->hw.next_ln_addr, 64) | FSL_DMA_EOL
128 return DMA_IN(chan, &chan->regs->ndar, 64); 150 | snoop_bits, 64);
129} 151}
130 152
131static u32 get_bcr(struct fsldma_chan *chan) 153/*
154 * DMA Engine Hardware Control Helpers
155 */
156
157static void dma_init(struct fsldma_chan *chan)
132{ 158{
133 return DMA_IN(chan, &chan->regs->bcr, 32); 159 /* Reset the channel */
160 DMA_OUT(chan, &chan->regs->mr, 0, 32);
161
162 switch (chan->feature & FSL_DMA_IP_MASK) {
163 case FSL_DMA_IP_85XX:
164 /* Set the channel to below modes:
165 * EIE - Error interrupt enable
166 * EOLNIE - End of links interrupt enable
167 * BWC - Bandwidth sharing among channels
168 */
169 DMA_OUT(chan, &chan->regs->mr, FSL_DMA_MR_BWC
170 | FSL_DMA_MR_EIE | FSL_DMA_MR_EOLNIE, 32);
171 break;
172 case FSL_DMA_IP_83XX:
173 /* Set the channel to below modes:
174 * EOTIE - End-of-transfer interrupt enable
175 * PRC_RM - PCI read multiple
176 */
177 DMA_OUT(chan, &chan->regs->mr, FSL_DMA_MR_EOTIE
178 | FSL_DMA_MR_PRC_RM, 32);
179 break;
180 }
134} 181}
135 182
136static int dma_is_idle(struct fsldma_chan *chan) 183static int dma_is_idle(struct fsldma_chan *chan)
@@ -139,25 +186,32 @@ static int dma_is_idle(struct fsldma_chan *chan)
139 return (!(sr & FSL_DMA_SR_CB)) || (sr & FSL_DMA_SR_CH); 186 return (!(sr & FSL_DMA_SR_CB)) || (sr & FSL_DMA_SR_CH);
140} 187}
141 188
189/*
190 * Start the DMA controller
191 *
192 * Preconditions:
193 * - the CDAR register must point to the start descriptor
194 * - the MRn[CS] bit must be cleared
195 */
142static void dma_start(struct fsldma_chan *chan) 196static void dma_start(struct fsldma_chan *chan)
143{ 197{
144 u32 mode; 198 u32 mode;
145 199
146 mode = DMA_IN(chan, &chan->regs->mr, 32); 200 mode = DMA_IN(chan, &chan->regs->mr, 32);
147 201
148 if ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) { 202 if (chan->feature & FSL_DMA_CHAN_PAUSE_EXT) {
149 if (chan->feature & FSL_DMA_CHAN_PAUSE_EXT) { 203 DMA_OUT(chan, &chan->regs->bcr, 0, 32);
150 DMA_OUT(chan, &chan->regs->bcr, 0, 32); 204 mode |= FSL_DMA_MR_EMP_EN;
151 mode |= FSL_DMA_MR_EMP_EN; 205 } else {
152 } else { 206 mode &= ~FSL_DMA_MR_EMP_EN;
153 mode &= ~FSL_DMA_MR_EMP_EN;
154 }
155 } 207 }
156 208
157 if (chan->feature & FSL_DMA_CHAN_START_EXT) 209 if (chan->feature & FSL_DMA_CHAN_START_EXT) {
158 mode |= FSL_DMA_MR_EMS_EN; 210 mode |= FSL_DMA_MR_EMS_EN;
159 else 211 } else {
212 mode &= ~FSL_DMA_MR_EMS_EN;
160 mode |= FSL_DMA_MR_CS; 213 mode |= FSL_DMA_MR_CS;
214 }
161 215
162 DMA_OUT(chan, &chan->regs->mr, mode, 32); 216 DMA_OUT(chan, &chan->regs->mr, mode, 32);
163} 217}
@@ -167,13 +221,26 @@ static void dma_halt(struct fsldma_chan *chan)
167 u32 mode; 221 u32 mode;
168 int i; 222 int i;
169 223
224 /* read the mode register */
170 mode = DMA_IN(chan, &chan->regs->mr, 32); 225 mode = DMA_IN(chan, &chan->regs->mr, 32);
171 mode |= FSL_DMA_MR_CA;
172 DMA_OUT(chan, &chan->regs->mr, mode, 32);
173 226
174 mode &= ~(FSL_DMA_MR_CS | FSL_DMA_MR_EMS_EN | FSL_DMA_MR_CA); 227 /*
228 * The 85xx controller supports channel abort, which will stop
229 * the current transfer. On 83xx, this bit is the transfer error
230 * mask bit, which should not be changed.
231 */
232 if ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) {
233 mode |= FSL_DMA_MR_CA;
234 DMA_OUT(chan, &chan->regs->mr, mode, 32);
235
236 mode &= ~FSL_DMA_MR_CA;
237 }
238
239 /* stop the DMA controller */
240 mode &= ~(FSL_DMA_MR_CS | FSL_DMA_MR_EMS_EN);
175 DMA_OUT(chan, &chan->regs->mr, mode, 32); 241 DMA_OUT(chan, &chan->regs->mr, mode, 32);
176 242
243 /* wait for the DMA controller to become idle */
177 for (i = 0; i < 100; i++) { 244 for (i = 0; i < 100; i++) {
178 if (dma_is_idle(chan)) 245 if (dma_is_idle(chan))
179 return; 246 return;
@@ -182,20 +249,7 @@ static void dma_halt(struct fsldma_chan *chan)
182 } 249 }
183 250
184 if (!dma_is_idle(chan)) 251 if (!dma_is_idle(chan))
185 dev_err(chan->dev, "DMA halt timeout!\n"); 252 chan_err(chan, "DMA halt timeout!\n");
186}
187
188static void set_ld_eol(struct fsldma_chan *chan,
189 struct fsl_desc_sw *desc)
190{
191 u64 snoop_bits;
192
193 snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX)
194 ? FSL_DMA_SNEN : 0;
195
196 desc->hw.next_ln_addr = CPU_TO_DMA(chan,
197 DMA_TO_CPU(chan, desc->hw.next_ln_addr, 64) | FSL_DMA_EOL
198 | snoop_bits, 64);
199} 253}
200 254
201/** 255/**
@@ -321,8 +375,7 @@ static void fsl_chan_toggle_ext_start(struct fsldma_chan *chan, int enable)
321 chan->feature &= ~FSL_DMA_CHAN_START_EXT; 375 chan->feature &= ~FSL_DMA_CHAN_START_EXT;
322} 376}
323 377
324static void append_ld_queue(struct fsldma_chan *chan, 378static void append_ld_queue(struct fsldma_chan *chan, struct fsl_desc_sw *desc)
325 struct fsl_desc_sw *desc)
326{ 379{
327 struct fsl_desc_sw *tail = to_fsl_desc(chan->ld_pending.prev); 380 struct fsl_desc_sw *tail = to_fsl_desc(chan->ld_pending.prev);
328 381
@@ -363,8 +416,8 @@ static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx)
363 cookie = chan->common.cookie; 416 cookie = chan->common.cookie;
364 list_for_each_entry(child, &desc->tx_list, node) { 417 list_for_each_entry(child, &desc->tx_list, node) {
365 cookie++; 418 cookie++;
366 if (cookie < 0) 419 if (cookie < DMA_MIN_COOKIE)
367 cookie = 1; 420 cookie = DMA_MIN_COOKIE;
368 421
369 child->async_tx.cookie = cookie; 422 child->async_tx.cookie = cookie;
370 } 423 }
@@ -385,15 +438,14 @@ static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx)
385 * 438 *
386 * Return - The descriptor allocated. NULL for failed. 439 * Return - The descriptor allocated. NULL for failed.
387 */ 440 */
388static struct fsl_desc_sw *fsl_dma_alloc_descriptor( 441static struct fsl_desc_sw *fsl_dma_alloc_descriptor(struct fsldma_chan *chan)
389 struct fsldma_chan *chan)
390{ 442{
391 struct fsl_desc_sw *desc; 443 struct fsl_desc_sw *desc;
392 dma_addr_t pdesc; 444 dma_addr_t pdesc;
393 445
394 desc = dma_pool_alloc(chan->desc_pool, GFP_ATOMIC, &pdesc); 446 desc = dma_pool_alloc(chan->desc_pool, GFP_ATOMIC, &pdesc);
395 if (!desc) { 447 if (!desc) {
396 dev_dbg(chan->dev, "out of memory for link desc\n"); 448 chan_dbg(chan, "out of memory for link descriptor\n");
397 return NULL; 449 return NULL;
398 } 450 }
399 451
@@ -403,10 +455,13 @@ static struct fsl_desc_sw *fsl_dma_alloc_descriptor(
403 desc->async_tx.tx_submit = fsl_dma_tx_submit; 455 desc->async_tx.tx_submit = fsl_dma_tx_submit;
404 desc->async_tx.phys = pdesc; 456 desc->async_tx.phys = pdesc;
405 457
458#ifdef FSL_DMA_LD_DEBUG
459 chan_dbg(chan, "LD %p allocated\n", desc);
460#endif
461
406 return desc; 462 return desc;
407} 463}
408 464
409
410/** 465/**
411 * fsl_dma_alloc_chan_resources - Allocate resources for DMA channel. 466 * fsl_dma_alloc_chan_resources - Allocate resources for DMA channel.
412 * @chan : Freescale DMA channel 467 * @chan : Freescale DMA channel
@@ -427,13 +482,11 @@ static int fsl_dma_alloc_chan_resources(struct dma_chan *dchan)
427 * We need the descriptor to be aligned to 32bytes 482 * We need the descriptor to be aligned to 32bytes
428 * for meeting FSL DMA specification requirement. 483 * for meeting FSL DMA specification requirement.
429 */ 484 */
430 chan->desc_pool = dma_pool_create("fsl_dma_engine_desc_pool", 485 chan->desc_pool = dma_pool_create(chan->name, chan->dev,
431 chan->dev,
432 sizeof(struct fsl_desc_sw), 486 sizeof(struct fsl_desc_sw),
433 __alignof__(struct fsl_desc_sw), 0); 487 __alignof__(struct fsl_desc_sw), 0);
434 if (!chan->desc_pool) { 488 if (!chan->desc_pool) {
435 dev_err(chan->dev, "unable to allocate channel %d " 489 chan_err(chan, "unable to allocate descriptor pool\n");
436 "descriptor pool\n", chan->id);
437 return -ENOMEM; 490 return -ENOMEM;
438 } 491 }
439 492
@@ -455,6 +508,9 @@ static void fsldma_free_desc_list(struct fsldma_chan *chan,
455 508
456 list_for_each_entry_safe(desc, _desc, list, node) { 509 list_for_each_entry_safe(desc, _desc, list, node) {
457 list_del(&desc->node); 510 list_del(&desc->node);
511#ifdef FSL_DMA_LD_DEBUG
512 chan_dbg(chan, "LD %p free\n", desc);
513#endif
458 dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys); 514 dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys);
459 } 515 }
460} 516}
@@ -466,6 +522,9 @@ static void fsldma_free_desc_list_reverse(struct fsldma_chan *chan,
466 522
467 list_for_each_entry_safe_reverse(desc, _desc, list, node) { 523 list_for_each_entry_safe_reverse(desc, _desc, list, node) {
468 list_del(&desc->node); 524 list_del(&desc->node);
525#ifdef FSL_DMA_LD_DEBUG
526 chan_dbg(chan, "LD %p free\n", desc);
527#endif
469 dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys); 528 dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys);
470 } 529 }
471} 530}
@@ -479,7 +538,7 @@ static void fsl_dma_free_chan_resources(struct dma_chan *dchan)
479 struct fsldma_chan *chan = to_fsl_chan(dchan); 538 struct fsldma_chan *chan = to_fsl_chan(dchan);
480 unsigned long flags; 539 unsigned long flags;
481 540
482 dev_dbg(chan->dev, "Free all channel resources.\n"); 541 chan_dbg(chan, "free all channel resources\n");
483 spin_lock_irqsave(&chan->desc_lock, flags); 542 spin_lock_irqsave(&chan->desc_lock, flags);
484 fsldma_free_desc_list(chan, &chan->ld_pending); 543 fsldma_free_desc_list(chan, &chan->ld_pending);
485 fsldma_free_desc_list(chan, &chan->ld_running); 544 fsldma_free_desc_list(chan, &chan->ld_running);
@@ -502,7 +561,7 @@ fsl_dma_prep_interrupt(struct dma_chan *dchan, unsigned long flags)
502 561
503 new = fsl_dma_alloc_descriptor(chan); 562 new = fsl_dma_alloc_descriptor(chan);
504 if (!new) { 563 if (!new) {
505 dev_err(chan->dev, msg_ld_oom); 564 chan_err(chan, "%s\n", msg_ld_oom);
506 return NULL; 565 return NULL;
507 } 566 }
508 567
@@ -512,14 +571,15 @@ fsl_dma_prep_interrupt(struct dma_chan *dchan, unsigned long flags)
512 /* Insert the link descriptor to the LD ring */ 571 /* Insert the link descriptor to the LD ring */
513 list_add_tail(&new->node, &new->tx_list); 572 list_add_tail(&new->node, &new->tx_list);
514 573
515 /* Set End-of-link to the last link descriptor of new list*/ 574 /* Set End-of-link to the last link descriptor of new list */
516 set_ld_eol(chan, new); 575 set_ld_eol(chan, new);
517 576
518 return &new->async_tx; 577 return &new->async_tx;
519} 578}
520 579
521static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy( 580static struct dma_async_tx_descriptor *
522 struct dma_chan *dchan, dma_addr_t dma_dst, dma_addr_t dma_src, 581fsl_dma_prep_memcpy(struct dma_chan *dchan,
582 dma_addr_t dma_dst, dma_addr_t dma_src,
523 size_t len, unsigned long flags) 583 size_t len, unsigned long flags)
524{ 584{
525 struct fsldma_chan *chan; 585 struct fsldma_chan *chan;
@@ -539,12 +599,9 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy(
539 /* Allocate the link descriptor from DMA pool */ 599 /* Allocate the link descriptor from DMA pool */
540 new = fsl_dma_alloc_descriptor(chan); 600 new = fsl_dma_alloc_descriptor(chan);
541 if (!new) { 601 if (!new) {
542 dev_err(chan->dev, msg_ld_oom); 602 chan_err(chan, "%s\n", msg_ld_oom);
543 goto fail; 603 goto fail;
544 } 604 }
545#ifdef FSL_DMA_LD_DEBUG
546 dev_dbg(chan->dev, "new link desc alloc %p\n", new);
547#endif
548 605
549 copy = min(len, (size_t)FSL_DMA_BCR_MAX_CNT); 606 copy = min(len, (size_t)FSL_DMA_BCR_MAX_CNT);
550 607
@@ -572,7 +629,7 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy(
572 new->async_tx.flags = flags; /* client is in control of this ack */ 629 new->async_tx.flags = flags; /* client is in control of this ack */
573 new->async_tx.cookie = -EBUSY; 630 new->async_tx.cookie = -EBUSY;
574 631
575 /* Set End-of-link to the last link descriptor of new list*/ 632 /* Set End-of-link to the last link descriptor of new list */
576 set_ld_eol(chan, new); 633 set_ld_eol(chan, new);
577 634
578 return &first->async_tx; 635 return &first->async_tx;
@@ -627,12 +684,9 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_sg(struct dma_chan *dchan,
627 /* allocate and populate the descriptor */ 684 /* allocate and populate the descriptor */
628 new = fsl_dma_alloc_descriptor(chan); 685 new = fsl_dma_alloc_descriptor(chan);
629 if (!new) { 686 if (!new) {
630 dev_err(chan->dev, msg_ld_oom); 687 chan_err(chan, "%s\n", msg_ld_oom);
631 goto fail; 688 goto fail;
632 } 689 }
633#ifdef FSL_DMA_LD_DEBUG
634 dev_dbg(chan->dev, "new link desc alloc %p\n", new);
635#endif
636 690
637 set_desc_cnt(chan, &new->hw, len); 691 set_desc_cnt(chan, &new->hw, len);
638 set_desc_src(chan, &new->hw, src); 692 set_desc_src(chan, &new->hw, src);
@@ -744,14 +798,15 @@ static int fsl_dma_device_control(struct dma_chan *dchan,
744 798
745 switch (cmd) { 799 switch (cmd) {
746 case DMA_TERMINATE_ALL: 800 case DMA_TERMINATE_ALL:
801 spin_lock_irqsave(&chan->desc_lock, flags);
802
747 /* Halt the DMA engine */ 803 /* Halt the DMA engine */
748 dma_halt(chan); 804 dma_halt(chan);
749 805
750 spin_lock_irqsave(&chan->desc_lock, flags);
751
752 /* Remove and free all of the descriptors in the LD queue */ 806 /* Remove and free all of the descriptors in the LD queue */
753 fsldma_free_desc_list(chan, &chan->ld_pending); 807 fsldma_free_desc_list(chan, &chan->ld_pending);
754 fsldma_free_desc_list(chan, &chan->ld_running); 808 fsldma_free_desc_list(chan, &chan->ld_running);
809 chan->idle = true;
755 810
756 spin_unlock_irqrestore(&chan->desc_lock, flags); 811 spin_unlock_irqrestore(&chan->desc_lock, flags);
757 return 0; 812 return 0;
@@ -789,140 +844,87 @@ static int fsl_dma_device_control(struct dma_chan *dchan,
789} 844}
790 845
791/** 846/**
792 * fsl_dma_update_completed_cookie - Update the completed cookie. 847 * fsldma_cleanup_descriptor - cleanup and free a single link descriptor
793 * @chan : Freescale DMA channel
794 *
795 * CONTEXT: hardirq
796 */
797static void fsl_dma_update_completed_cookie(struct fsldma_chan *chan)
798{
799 struct fsl_desc_sw *desc;
800 unsigned long flags;
801 dma_cookie_t cookie;
802
803 spin_lock_irqsave(&chan->desc_lock, flags);
804
805 if (list_empty(&chan->ld_running)) {
806 dev_dbg(chan->dev, "no running descriptors\n");
807 goto out_unlock;
808 }
809
810 /* Get the last descriptor, update the cookie to that */
811 desc = to_fsl_desc(chan->ld_running.prev);
812 if (dma_is_idle(chan))
813 cookie = desc->async_tx.cookie;
814 else {
815 cookie = desc->async_tx.cookie - 1;
816 if (unlikely(cookie < DMA_MIN_COOKIE))
817 cookie = DMA_MAX_COOKIE;
818 }
819
820 chan->completed_cookie = cookie;
821
822out_unlock:
823 spin_unlock_irqrestore(&chan->desc_lock, flags);
824}
825
826/**
827 * fsldma_desc_status - Check the status of a descriptor
828 * @chan: Freescale DMA channel 848 * @chan: Freescale DMA channel
829 * @desc: DMA SW descriptor 849 * @desc: descriptor to cleanup and free
830 *
831 * This function will return the status of the given descriptor
832 */
833static enum dma_status fsldma_desc_status(struct fsldma_chan *chan,
834 struct fsl_desc_sw *desc)
835{
836 return dma_async_is_complete(desc->async_tx.cookie,
837 chan->completed_cookie,
838 chan->common.cookie);
839}
840
841/**
842 * fsl_chan_ld_cleanup - Clean up link descriptors
843 * @chan : Freescale DMA channel
844 * 850 *
845 * This function clean up the ld_queue of DMA channel. 851 * This function is used on a descriptor which has been executed by the DMA
852 * controller. It will run any callbacks, submit any dependencies, and then
853 * free the descriptor.
846 */ 854 */
847static void fsl_chan_ld_cleanup(struct fsldma_chan *chan) 855static void fsldma_cleanup_descriptor(struct fsldma_chan *chan,
856 struct fsl_desc_sw *desc)
848{ 857{
849 struct fsl_desc_sw *desc, *_desc; 858 struct dma_async_tx_descriptor *txd = &desc->async_tx;
850 unsigned long flags; 859 struct device *dev = chan->common.device->dev;
851 860 dma_addr_t src = get_desc_src(chan, desc);
852 spin_lock_irqsave(&chan->desc_lock, flags); 861 dma_addr_t dst = get_desc_dst(chan, desc);
853 862 u32 len = get_desc_cnt(chan, desc);
854 dev_dbg(chan->dev, "chan completed_cookie = %d\n", chan->completed_cookie); 863
855 list_for_each_entry_safe(desc, _desc, &chan->ld_running, node) { 864 /* Run the link descriptor callback function */
856 dma_async_tx_callback callback; 865 if (txd->callback) {
857 void *callback_param; 866#ifdef FSL_DMA_LD_DEBUG
858 867 chan_dbg(chan, "LD %p callback\n", desc);
859 if (fsldma_desc_status(chan, desc) == DMA_IN_PROGRESS) 868#endif
860 break; 869 txd->callback(txd->callback_param);
870 }
861 871
862 /* Remove from the list of running transactions */ 872 /* Run any dependencies */
863 list_del(&desc->node); 873 dma_run_dependencies(txd);
864 874
865 /* Run the link descriptor callback function */ 875 /* Unmap the dst buffer, if requested */
866 callback = desc->async_tx.callback; 876 if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
867 callback_param = desc->async_tx.callback_param; 877 if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE)
868 if (callback) { 878 dma_unmap_single(dev, dst, len, DMA_FROM_DEVICE);
869 spin_unlock_irqrestore(&chan->desc_lock, flags); 879 else
870 dev_dbg(chan->dev, "LD %p callback\n", desc); 880 dma_unmap_page(dev, dst, len, DMA_FROM_DEVICE);
871 callback(callback_param); 881 }
872 spin_lock_irqsave(&chan->desc_lock, flags);
873 }
874 882
875 /* Run any dependencies, then free the descriptor */ 883 /* Unmap the src buffer, if requested */
876 dma_run_dependencies(&desc->async_tx); 884 if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
877 dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys); 885 if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE)
886 dma_unmap_single(dev, src, len, DMA_TO_DEVICE);
887 else
888 dma_unmap_page(dev, src, len, DMA_TO_DEVICE);
878 } 889 }
879 890
880 spin_unlock_irqrestore(&chan->desc_lock, flags); 891#ifdef FSL_DMA_LD_DEBUG
892 chan_dbg(chan, "LD %p free\n", desc);
893#endif
894 dma_pool_free(chan->desc_pool, desc, txd->phys);
881} 895}
882 896
883/** 897/**
884 * fsl_chan_xfer_ld_queue - transfer any pending transactions 898 * fsl_chan_xfer_ld_queue - transfer any pending transactions
885 * @chan : Freescale DMA channel 899 * @chan : Freescale DMA channel
886 * 900 *
887 * This will make sure that any pending transactions will be run. 901 * HARDWARE STATE: idle
888 * If the DMA controller is idle, it will be started. Otherwise, 902 * LOCKING: must hold chan->desc_lock
889 * the DMA controller's interrupt handler will start any pending
890 * transactions when it becomes idle.
891 */ 903 */
892static void fsl_chan_xfer_ld_queue(struct fsldma_chan *chan) 904static void fsl_chan_xfer_ld_queue(struct fsldma_chan *chan)
893{ 905{
894 struct fsl_desc_sw *desc; 906 struct fsl_desc_sw *desc;
895 unsigned long flags;
896
897 spin_lock_irqsave(&chan->desc_lock, flags);
898 907
899 /* 908 /*
900 * If the list of pending descriptors is empty, then we 909 * If the list of pending descriptors is empty, then we
901 * don't need to do any work at all 910 * don't need to do any work at all
902 */ 911 */
903 if (list_empty(&chan->ld_pending)) { 912 if (list_empty(&chan->ld_pending)) {
904 dev_dbg(chan->dev, "no pending LDs\n"); 913 chan_dbg(chan, "no pending LDs\n");
905 goto out_unlock; 914 return;
906 } 915 }
907 916
908 /* 917 /*
909 * The DMA controller is not idle, which means the interrupt 918 * The DMA controller is not idle, which means that the interrupt
910 * handler will start any queued transactions when it runs 919 * handler will start any queued transactions when it runs after
911 * at the end of the current transaction 920 * this transaction finishes
912 */ 921 */
913 if (!dma_is_idle(chan)) { 922 if (!chan->idle) {
914 dev_dbg(chan->dev, "DMA controller still busy\n"); 923 chan_dbg(chan, "DMA controller still busy\n");
915 goto out_unlock; 924 return;
916 } 925 }
917 926
918 /* 927 /*
919 * TODO:
920 * make sure the dma_halt() function really un-wedges the
921 * controller as much as possible
922 */
923 dma_halt(chan);
924
925 /*
926 * If there are some link descriptors which have not been 928 * If there are some link descriptors which have not been
927 * transferred, we need to start the controller 929 * transferred, we need to start the controller
928 */ 930 */
@@ -931,18 +933,32 @@ static void fsl_chan_xfer_ld_queue(struct fsldma_chan *chan)
931 * Move all elements from the queue of pending transactions 933 * Move all elements from the queue of pending transactions
932 * onto the list of running transactions 934 * onto the list of running transactions
933 */ 935 */
936 chan_dbg(chan, "idle, starting controller\n");
934 desc = list_first_entry(&chan->ld_pending, struct fsl_desc_sw, node); 937 desc = list_first_entry(&chan->ld_pending, struct fsl_desc_sw, node);
935 list_splice_tail_init(&chan->ld_pending, &chan->ld_running); 938 list_splice_tail_init(&chan->ld_pending, &chan->ld_running);
936 939
937 /* 940 /*
941 * The 85xx DMA controller doesn't clear the channel start bit
942 * automatically at the end of a transfer. Therefore we must clear
943 * it in software before starting the transfer.
944 */
945 if ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) {
946 u32 mode;
947
948 mode = DMA_IN(chan, &chan->regs->mr, 32);
949 mode &= ~FSL_DMA_MR_CS;
950 DMA_OUT(chan, &chan->regs->mr, mode, 32);
951 }
952
953 /*
938 * Program the descriptor's address into the DMA controller, 954 * Program the descriptor's address into the DMA controller,
939 * then start the DMA transaction 955 * then start the DMA transaction
940 */ 956 */
941 set_cdar(chan, desc->async_tx.phys); 957 set_cdar(chan, desc->async_tx.phys);
942 dma_start(chan); 958 get_cdar(chan);
943 959
944out_unlock: 960 dma_start(chan);
945 spin_unlock_irqrestore(&chan->desc_lock, flags); 961 chan->idle = false;
946} 962}
947 963
948/** 964/**
@@ -952,7 +968,11 @@ out_unlock:
952static void fsl_dma_memcpy_issue_pending(struct dma_chan *dchan) 968static void fsl_dma_memcpy_issue_pending(struct dma_chan *dchan)
953{ 969{
954 struct fsldma_chan *chan = to_fsl_chan(dchan); 970 struct fsldma_chan *chan = to_fsl_chan(dchan);
971 unsigned long flags;
972
973 spin_lock_irqsave(&chan->desc_lock, flags);
955 fsl_chan_xfer_ld_queue(chan); 974 fsl_chan_xfer_ld_queue(chan);
975 spin_unlock_irqrestore(&chan->desc_lock, flags);
956} 976}
957 977
958/** 978/**
@@ -964,16 +984,18 @@ static enum dma_status fsl_tx_status(struct dma_chan *dchan,
964 struct dma_tx_state *txstate) 984 struct dma_tx_state *txstate)
965{ 985{
966 struct fsldma_chan *chan = to_fsl_chan(dchan); 986 struct fsldma_chan *chan = to_fsl_chan(dchan);
967 dma_cookie_t last_used;
968 dma_cookie_t last_complete; 987 dma_cookie_t last_complete;
988 dma_cookie_t last_used;
989 unsigned long flags;
969 990
970 fsl_chan_ld_cleanup(chan); 991 spin_lock_irqsave(&chan->desc_lock, flags);
971 992
972 last_used = dchan->cookie;
973 last_complete = chan->completed_cookie; 993 last_complete = chan->completed_cookie;
994 last_used = dchan->cookie;
974 995
975 dma_set_tx_state(txstate, last_complete, last_used, 0); 996 spin_unlock_irqrestore(&chan->desc_lock, flags);
976 997
998 dma_set_tx_state(txstate, last_complete, last_used, 0);
977 return dma_async_is_complete(cookie, last_complete, last_used); 999 return dma_async_is_complete(cookie, last_complete, last_used);
978} 1000}
979 1001
@@ -984,21 +1006,20 @@ static enum dma_status fsl_tx_status(struct dma_chan *dchan,
984static irqreturn_t fsldma_chan_irq(int irq, void *data) 1006static irqreturn_t fsldma_chan_irq(int irq, void *data)
985{ 1007{
986 struct fsldma_chan *chan = data; 1008 struct fsldma_chan *chan = data;
987 int update_cookie = 0;
988 int xfer_ld_q = 0;
989 u32 stat; 1009 u32 stat;
990 1010
991 /* save and clear the status register */ 1011 /* save and clear the status register */
992 stat = get_sr(chan); 1012 stat = get_sr(chan);
993 set_sr(chan, stat); 1013 set_sr(chan, stat);
994 dev_dbg(chan->dev, "irq: channel %d, stat = 0x%x\n", chan->id, stat); 1014 chan_dbg(chan, "irq: stat = 0x%x\n", stat);
995 1015
1016 /* check that this was really our device */
996 stat &= ~(FSL_DMA_SR_CB | FSL_DMA_SR_CH); 1017 stat &= ~(FSL_DMA_SR_CB | FSL_DMA_SR_CH);
997 if (!stat) 1018 if (!stat)
998 return IRQ_NONE; 1019 return IRQ_NONE;
999 1020
1000 if (stat & FSL_DMA_SR_TE) 1021 if (stat & FSL_DMA_SR_TE)
1001 dev_err(chan->dev, "Transfer Error!\n"); 1022 chan_err(chan, "Transfer Error!\n");
1002 1023
1003 /* 1024 /*
1004 * Programming Error 1025 * Programming Error
@@ -1006,29 +1027,10 @@ static irqreturn_t fsldma_chan_irq(int irq, void *data)
1006 * triger a PE interrupt. 1027 * triger a PE interrupt.
1007 */ 1028 */
1008 if (stat & FSL_DMA_SR_PE) { 1029 if (stat & FSL_DMA_SR_PE) {
1009 dev_dbg(chan->dev, "irq: Programming Error INT\n"); 1030 chan_dbg(chan, "irq: Programming Error INT\n");
1010 if (get_bcr(chan) == 0) {
1011 /* BCR register is 0, this is a DMA_INTERRUPT async_tx.
1012 * Now, update the completed cookie, and continue the
1013 * next uncompleted transfer.
1014 */
1015 update_cookie = 1;
1016 xfer_ld_q = 1;
1017 }
1018 stat &= ~FSL_DMA_SR_PE; 1031 stat &= ~FSL_DMA_SR_PE;
1019 } 1032 if (get_bcr(chan) != 0)
1020 1033 chan_err(chan, "Programming Error!\n");
1021 /*
1022 * If the link descriptor segment transfer finishes,
1023 * we will recycle the used descriptor.
1024 */
1025 if (stat & FSL_DMA_SR_EOSI) {
1026 dev_dbg(chan->dev, "irq: End-of-segments INT\n");
1027 dev_dbg(chan->dev, "irq: clndar 0x%llx, nlndar 0x%llx\n",
1028 (unsigned long long)get_cdar(chan),
1029 (unsigned long long)get_ndar(chan));
1030 stat &= ~FSL_DMA_SR_EOSI;
1031 update_cookie = 1;
1032 } 1034 }
1033 1035
1034 /* 1036 /*
@@ -1036,10 +1038,8 @@ static irqreturn_t fsldma_chan_irq(int irq, void *data)
1036 * and start the next transfer if it exist. 1038 * and start the next transfer if it exist.
1037 */ 1039 */
1038 if (stat & FSL_DMA_SR_EOCDI) { 1040 if (stat & FSL_DMA_SR_EOCDI) {
1039 dev_dbg(chan->dev, "irq: End-of-Chain link INT\n"); 1041 chan_dbg(chan, "irq: End-of-Chain link INT\n");
1040 stat &= ~FSL_DMA_SR_EOCDI; 1042 stat &= ~FSL_DMA_SR_EOCDI;
1041 update_cookie = 1;
1042 xfer_ld_q = 1;
1043 } 1043 }
1044 1044
1045 /* 1045 /*
@@ -1048,27 +1048,79 @@ static irqreturn_t fsldma_chan_irq(int irq, void *data)
1048 * prepare next transfer. 1048 * prepare next transfer.
1049 */ 1049 */
1050 if (stat & FSL_DMA_SR_EOLNI) { 1050 if (stat & FSL_DMA_SR_EOLNI) {
1051 dev_dbg(chan->dev, "irq: End-of-link INT\n"); 1051 chan_dbg(chan, "irq: End-of-link INT\n");
1052 stat &= ~FSL_DMA_SR_EOLNI; 1052 stat &= ~FSL_DMA_SR_EOLNI;
1053 xfer_ld_q = 1;
1054 } 1053 }
1055 1054
1056 if (update_cookie) 1055 /* check that the DMA controller is really idle */
1057 fsl_dma_update_completed_cookie(chan); 1056 if (!dma_is_idle(chan))
1058 if (xfer_ld_q) 1057 chan_err(chan, "irq: controller not idle!\n");
1059 fsl_chan_xfer_ld_queue(chan); 1058
1059 /* check that we handled all of the bits */
1060 if (stat) 1060 if (stat)
1061 dev_dbg(chan->dev, "irq: unhandled sr 0x%02x\n", stat); 1061 chan_err(chan, "irq: unhandled sr 0x%08x\n", stat);
1062 1062
1063 dev_dbg(chan->dev, "irq: Exit\n"); 1063 /*
1064 * Schedule the tasklet to handle all cleanup of the current
1065 * transaction. It will start a new transaction if there is
1066 * one pending.
1067 */
1064 tasklet_schedule(&chan->tasklet); 1068 tasklet_schedule(&chan->tasklet);
1069 chan_dbg(chan, "irq: Exit\n");
1065 return IRQ_HANDLED; 1070 return IRQ_HANDLED;
1066} 1071}
1067 1072
1068static void dma_do_tasklet(unsigned long data) 1073static void dma_do_tasklet(unsigned long data)
1069{ 1074{
1070 struct fsldma_chan *chan = (struct fsldma_chan *)data; 1075 struct fsldma_chan *chan = (struct fsldma_chan *)data;
1071 fsl_chan_ld_cleanup(chan); 1076 struct fsl_desc_sw *desc, *_desc;
1077 LIST_HEAD(ld_cleanup);
1078 unsigned long flags;
1079
1080 chan_dbg(chan, "tasklet entry\n");
1081
1082 spin_lock_irqsave(&chan->desc_lock, flags);
1083
1084 /* update the cookie if we have some descriptors to cleanup */
1085 if (!list_empty(&chan->ld_running)) {
1086 dma_cookie_t cookie;
1087
1088 desc = to_fsl_desc(chan->ld_running.prev);
1089 cookie = desc->async_tx.cookie;
1090
1091 chan->completed_cookie = cookie;
1092 chan_dbg(chan, "completed_cookie=%d\n", cookie);
1093 }
1094
1095 /*
1096 * move the descriptors to a temporary list so we can drop the lock
1097 * during the entire cleanup operation
1098 */
1099 list_splice_tail_init(&chan->ld_running, &ld_cleanup);
1100
1101 /* the hardware is now idle and ready for more */
1102 chan->idle = true;
1103
1104 /*
1105 * Start any pending transactions automatically
1106 *
1107 * In the ideal case, we keep the DMA controller busy while we go
1108 * ahead and free the descriptors below.
1109 */
1110 fsl_chan_xfer_ld_queue(chan);
1111 spin_unlock_irqrestore(&chan->desc_lock, flags);
1112
1113 /* Run the callback for each descriptor, in order */
1114 list_for_each_entry_safe(desc, _desc, &ld_cleanup, node) {
1115
1116 /* Remove from the list of transactions */
1117 list_del(&desc->node);
1118
1119 /* Run all cleanup for this descriptor */
1120 fsldma_cleanup_descriptor(chan, desc);
1121 }
1122
1123 chan_dbg(chan, "tasklet exit\n");
1072} 1124}
1073 1125
1074static irqreturn_t fsldma_ctrl_irq(int irq, void *data) 1126static irqreturn_t fsldma_ctrl_irq(int irq, void *data)
@@ -1116,7 +1168,7 @@ static void fsldma_free_irqs(struct fsldma_device *fdev)
1116 for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) { 1168 for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
1117 chan = fdev->chan[i]; 1169 chan = fdev->chan[i];
1118 if (chan && chan->irq != NO_IRQ) { 1170 if (chan && chan->irq != NO_IRQ) {
1119 dev_dbg(fdev->dev, "free channel %d IRQ\n", chan->id); 1171 chan_dbg(chan, "free per-channel IRQ\n");
1120 free_irq(chan->irq, chan); 1172 free_irq(chan->irq, chan);
1121 } 1173 }
1122 } 1174 }
@@ -1143,19 +1195,16 @@ static int fsldma_request_irqs(struct fsldma_device *fdev)
1143 continue; 1195 continue;
1144 1196
1145 if (chan->irq == NO_IRQ) { 1197 if (chan->irq == NO_IRQ) {
1146 dev_err(fdev->dev, "no interrupts property defined for " 1198 chan_err(chan, "interrupts property missing in device tree\n");
1147 "DMA channel %d. Please fix your "
1148 "device tree\n", chan->id);
1149 ret = -ENODEV; 1199 ret = -ENODEV;
1150 goto out_unwind; 1200 goto out_unwind;
1151 } 1201 }
1152 1202
1153 dev_dbg(fdev->dev, "request channel %d IRQ\n", chan->id); 1203 chan_dbg(chan, "request per-channel IRQ\n");
1154 ret = request_irq(chan->irq, fsldma_chan_irq, IRQF_SHARED, 1204 ret = request_irq(chan->irq, fsldma_chan_irq, IRQF_SHARED,
1155 "fsldma-chan", chan); 1205 "fsldma-chan", chan);
1156 if (ret) { 1206 if (ret) {
1157 dev_err(fdev->dev, "unable to request IRQ for DMA " 1207 chan_err(chan, "unable to request per-channel IRQ\n");
1158 "channel %d\n", chan->id);
1159 goto out_unwind; 1208 goto out_unwind;
1160 } 1209 }
1161 } 1210 }
@@ -1230,6 +1279,7 @@ static int __devinit fsl_dma_chan_probe(struct fsldma_device *fdev,
1230 1279
1231 fdev->chan[chan->id] = chan; 1280 fdev->chan[chan->id] = chan;
1232 tasklet_init(&chan->tasklet, dma_do_tasklet, (unsigned long)chan); 1281 tasklet_init(&chan->tasklet, dma_do_tasklet, (unsigned long)chan);
1282 snprintf(chan->name, sizeof(chan->name), "chan%d", chan->id);
1233 1283
1234 /* Initialize the channel */ 1284 /* Initialize the channel */
1235 dma_init(chan); 1285 dma_init(chan);
@@ -1250,6 +1300,7 @@ static int __devinit fsl_dma_chan_probe(struct fsldma_device *fdev,
1250 spin_lock_init(&chan->desc_lock); 1300 spin_lock_init(&chan->desc_lock);
1251 INIT_LIST_HEAD(&chan->ld_pending); 1301 INIT_LIST_HEAD(&chan->ld_pending);
1252 INIT_LIST_HEAD(&chan->ld_running); 1302 INIT_LIST_HEAD(&chan->ld_running);
1303 chan->idle = true;
1253 1304
1254 chan->common.device = &fdev->common; 1305 chan->common.device = &fdev->common;
1255 1306