diff options
author | Glenn Elliott <gelliott@cs.unc.edu> | 2012-03-04 19:47:13 -0500 |
---|---|---|
committer | Glenn Elliott <gelliott@cs.unc.edu> | 2012-03-04 19:47:13 -0500 |
commit | c71c03bda1e86c9d5198c5d83f712e695c4f2a1e (patch) | |
tree | ecb166cb3e2b7e2adb3b5e292245fefd23381ac8 /drivers/dma/fsldma.c | |
parent | ea53c912f8a86a8567697115b6a0d8152beee5c8 (diff) | |
parent | 6a00f206debf8a5c8899055726ad127dbeeed098 (diff) |
Merge branch 'mpi-master' into wip-k-fmlpwip-k-fmlp
Conflicts:
litmus/sched_cedf.c
Diffstat (limited to 'drivers/dma/fsldma.c')
-rw-r--r-- | drivers/dma/fsldma.c | 871 |
1 files changed, 451 insertions, 420 deletions
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c index cea08bed9cf9..8a781540590c 100644 --- a/drivers/dma/fsldma.c +++ b/drivers/dma/fsldma.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * Freescale MPC85xx, MPC83xx DMA Engine support | 2 | * Freescale MPC85xx, MPC83xx DMA Engine support |
3 | * | 3 | * |
4 | * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved. | 4 | * Copyright (C) 2007-2010 Freescale Semiconductor, Inc. All rights reserved. |
5 | * | 5 | * |
6 | * Author: | 6 | * Author: |
7 | * Zhang Wei <wei.zhang@freescale.com>, Jul 2007 | 7 | * Zhang Wei <wei.zhang@freescale.com>, Jul 2007 |
@@ -35,34 +35,18 @@ | |||
35 | #include <linux/dmapool.h> | 35 | #include <linux/dmapool.h> |
36 | #include <linux/of_platform.h> | 36 | #include <linux/of_platform.h> |
37 | 37 | ||
38 | #include <asm/fsldma.h> | ||
39 | #include "fsldma.h" | 38 | #include "fsldma.h" |
40 | 39 | ||
41 | static void dma_init(struct fsldma_chan *chan) | 40 | #define chan_dbg(chan, fmt, arg...) \ |
42 | { | 41 | dev_dbg(chan->dev, "%s: " fmt, chan->name, ##arg) |
43 | /* Reset the channel */ | 42 | #define chan_err(chan, fmt, arg...) \ |
44 | DMA_OUT(chan, &chan->regs->mr, 0, 32); | 43 | dev_err(chan->dev, "%s: " fmt, chan->name, ##arg) |
45 | 44 | ||
46 | switch (chan->feature & FSL_DMA_IP_MASK) { | 45 | static const char msg_ld_oom[] = "No free memory for link descriptor"; |
47 | case FSL_DMA_IP_85XX: | 46 | |
48 | /* Set the channel to below modes: | 47 | /* |
49 | * EIE - Error interrupt enable | 48 | * Register Helpers |
50 | * EOSIE - End of segments interrupt enable (basic mode) | 49 | */ |
51 | * EOLNIE - End of links interrupt enable | ||
52 | */ | ||
53 | DMA_OUT(chan, &chan->regs->mr, FSL_DMA_MR_EIE | ||
54 | | FSL_DMA_MR_EOLNIE | FSL_DMA_MR_EOSIE, 32); | ||
55 | break; | ||
56 | case FSL_DMA_IP_83XX: | ||
57 | /* Set the channel to below modes: | ||
58 | * EOTIE - End-of-transfer interrupt enable | ||
59 | * PRC_RM - PCI read multiple | ||
60 | */ | ||
61 | DMA_OUT(chan, &chan->regs->mr, FSL_DMA_MR_EOTIE | ||
62 | | FSL_DMA_MR_PRC_RM, 32); | ||
63 | break; | ||
64 | } | ||
65 | } | ||
66 | 50 | ||
67 | static void set_sr(struct fsldma_chan *chan, u32 val) | 51 | static void set_sr(struct fsldma_chan *chan, u32 val) |
68 | { | 52 | { |
@@ -74,14 +58,38 @@ static u32 get_sr(struct fsldma_chan *chan) | |||
74 | return DMA_IN(chan, &chan->regs->sr, 32); | 58 | return DMA_IN(chan, &chan->regs->sr, 32); |
75 | } | 59 | } |
76 | 60 | ||
61 | static void set_cdar(struct fsldma_chan *chan, dma_addr_t addr) | ||
62 | { | ||
63 | DMA_OUT(chan, &chan->regs->cdar, addr | FSL_DMA_SNEN, 64); | ||
64 | } | ||
65 | |||
66 | static dma_addr_t get_cdar(struct fsldma_chan *chan) | ||
67 | { | ||
68 | return DMA_IN(chan, &chan->regs->cdar, 64) & ~FSL_DMA_SNEN; | ||
69 | } | ||
70 | |||
71 | static u32 get_bcr(struct fsldma_chan *chan) | ||
72 | { | ||
73 | return DMA_IN(chan, &chan->regs->bcr, 32); | ||
74 | } | ||
75 | |||
76 | /* | ||
77 | * Descriptor Helpers | ||
78 | */ | ||
79 | |||
77 | static void set_desc_cnt(struct fsldma_chan *chan, | 80 | static void set_desc_cnt(struct fsldma_chan *chan, |
78 | struct fsl_dma_ld_hw *hw, u32 count) | 81 | struct fsl_dma_ld_hw *hw, u32 count) |
79 | { | 82 | { |
80 | hw->count = CPU_TO_DMA(chan, count, 32); | 83 | hw->count = CPU_TO_DMA(chan, count, 32); |
81 | } | 84 | } |
82 | 85 | ||
86 | static u32 get_desc_cnt(struct fsldma_chan *chan, struct fsl_desc_sw *desc) | ||
87 | { | ||
88 | return DMA_TO_CPU(chan, desc->hw.count, 32); | ||
89 | } | ||
90 | |||
83 | static void set_desc_src(struct fsldma_chan *chan, | 91 | static void set_desc_src(struct fsldma_chan *chan, |
84 | struct fsl_dma_ld_hw *hw, dma_addr_t src) | 92 | struct fsl_dma_ld_hw *hw, dma_addr_t src) |
85 | { | 93 | { |
86 | u64 snoop_bits; | 94 | u64 snoop_bits; |
87 | 95 | ||
@@ -90,8 +98,18 @@ static void set_desc_src(struct fsldma_chan *chan, | |||
90 | hw->src_addr = CPU_TO_DMA(chan, snoop_bits | src, 64); | 98 | hw->src_addr = CPU_TO_DMA(chan, snoop_bits | src, 64); |
91 | } | 99 | } |
92 | 100 | ||
101 | static dma_addr_t get_desc_src(struct fsldma_chan *chan, | ||
102 | struct fsl_desc_sw *desc) | ||
103 | { | ||
104 | u64 snoop_bits; | ||
105 | |||
106 | snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) | ||
107 | ? ((u64)FSL_DMA_SATR_SREADTYPE_SNOOP_READ << 32) : 0; | ||
108 | return DMA_TO_CPU(chan, desc->hw.src_addr, 64) & ~snoop_bits; | ||
109 | } | ||
110 | |||
93 | static void set_desc_dst(struct fsldma_chan *chan, | 111 | static void set_desc_dst(struct fsldma_chan *chan, |
94 | struct fsl_dma_ld_hw *hw, dma_addr_t dst) | 112 | struct fsl_dma_ld_hw *hw, dma_addr_t dst) |
95 | { | 113 | { |
96 | u64 snoop_bits; | 114 | u64 snoop_bits; |
97 | 115 | ||
@@ -100,8 +118,18 @@ static void set_desc_dst(struct fsldma_chan *chan, | |||
100 | hw->dst_addr = CPU_TO_DMA(chan, snoop_bits | dst, 64); | 118 | hw->dst_addr = CPU_TO_DMA(chan, snoop_bits | dst, 64); |
101 | } | 119 | } |
102 | 120 | ||
121 | static dma_addr_t get_desc_dst(struct fsldma_chan *chan, | ||
122 | struct fsl_desc_sw *desc) | ||
123 | { | ||
124 | u64 snoop_bits; | ||
125 | |||
126 | snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) | ||
127 | ? ((u64)FSL_DMA_DATR_DWRITETYPE_SNOOP_WRITE << 32) : 0; | ||
128 | return DMA_TO_CPU(chan, desc->hw.dst_addr, 64) & ~snoop_bits; | ||
129 | } | ||
130 | |||
103 | static void set_desc_next(struct fsldma_chan *chan, | 131 | static void set_desc_next(struct fsldma_chan *chan, |
104 | struct fsl_dma_ld_hw *hw, dma_addr_t next) | 132 | struct fsl_dma_ld_hw *hw, dma_addr_t next) |
105 | { | 133 | { |
106 | u64 snoop_bits; | 134 | u64 snoop_bits; |
107 | 135 | ||
@@ -110,24 +138,46 @@ static void set_desc_next(struct fsldma_chan *chan, | |||
110 | hw->next_ln_addr = CPU_TO_DMA(chan, snoop_bits | next, 64); | 138 | hw->next_ln_addr = CPU_TO_DMA(chan, snoop_bits | next, 64); |
111 | } | 139 | } |
112 | 140 | ||
113 | static void set_cdar(struct fsldma_chan *chan, dma_addr_t addr) | 141 | static void set_ld_eol(struct fsldma_chan *chan, struct fsl_desc_sw *desc) |
114 | { | 142 | { |
115 | DMA_OUT(chan, &chan->regs->cdar, addr | FSL_DMA_SNEN, 64); | 143 | u64 snoop_bits; |
116 | } | ||
117 | 144 | ||
118 | static dma_addr_t get_cdar(struct fsldma_chan *chan) | 145 | snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX) |
119 | { | 146 | ? FSL_DMA_SNEN : 0; |
120 | return DMA_IN(chan, &chan->regs->cdar, 64) & ~FSL_DMA_SNEN; | ||
121 | } | ||
122 | 147 | ||
123 | static dma_addr_t get_ndar(struct fsldma_chan *chan) | 148 | desc->hw.next_ln_addr = CPU_TO_DMA(chan, |
124 | { | 149 | DMA_TO_CPU(chan, desc->hw.next_ln_addr, 64) | FSL_DMA_EOL |
125 | return DMA_IN(chan, &chan->regs->ndar, 64); | 150 | | snoop_bits, 64); |
126 | } | 151 | } |
127 | 152 | ||
128 | static u32 get_bcr(struct fsldma_chan *chan) | 153 | /* |
154 | * DMA Engine Hardware Control Helpers | ||
155 | */ | ||
156 | |||
157 | static void dma_init(struct fsldma_chan *chan) | ||
129 | { | 158 | { |
130 | return DMA_IN(chan, &chan->regs->bcr, 32); | 159 | /* Reset the channel */ |
160 | DMA_OUT(chan, &chan->regs->mr, 0, 32); | ||
161 | |||
162 | switch (chan->feature & FSL_DMA_IP_MASK) { | ||
163 | case FSL_DMA_IP_85XX: | ||
164 | /* Set the channel to below modes: | ||
165 | * EIE - Error interrupt enable | ||
166 | * EOLNIE - End of links interrupt enable | ||
167 | * BWC - Bandwidth sharing among channels | ||
168 | */ | ||
169 | DMA_OUT(chan, &chan->regs->mr, FSL_DMA_MR_BWC | ||
170 | | FSL_DMA_MR_EIE | FSL_DMA_MR_EOLNIE, 32); | ||
171 | break; | ||
172 | case FSL_DMA_IP_83XX: | ||
173 | /* Set the channel to below modes: | ||
174 | * EOTIE - End-of-transfer interrupt enable | ||
175 | * PRC_RM - PCI read multiple | ||
176 | */ | ||
177 | DMA_OUT(chan, &chan->regs->mr, FSL_DMA_MR_EOTIE | ||
178 | | FSL_DMA_MR_PRC_RM, 32); | ||
179 | break; | ||
180 | } | ||
131 | } | 181 | } |
132 | 182 | ||
133 | static int dma_is_idle(struct fsldma_chan *chan) | 183 | static int dma_is_idle(struct fsldma_chan *chan) |
@@ -136,25 +186,32 @@ static int dma_is_idle(struct fsldma_chan *chan) | |||
136 | return (!(sr & FSL_DMA_SR_CB)) || (sr & FSL_DMA_SR_CH); | 186 | return (!(sr & FSL_DMA_SR_CB)) || (sr & FSL_DMA_SR_CH); |
137 | } | 187 | } |
138 | 188 | ||
189 | /* | ||
190 | * Start the DMA controller | ||
191 | * | ||
192 | * Preconditions: | ||
193 | * - the CDAR register must point to the start descriptor | ||
194 | * - the MRn[CS] bit must be cleared | ||
195 | */ | ||
139 | static void dma_start(struct fsldma_chan *chan) | 196 | static void dma_start(struct fsldma_chan *chan) |
140 | { | 197 | { |
141 | u32 mode; | 198 | u32 mode; |
142 | 199 | ||
143 | mode = DMA_IN(chan, &chan->regs->mr, 32); | 200 | mode = DMA_IN(chan, &chan->regs->mr, 32); |
144 | 201 | ||
145 | if ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) { | 202 | if (chan->feature & FSL_DMA_CHAN_PAUSE_EXT) { |
146 | if (chan->feature & FSL_DMA_CHAN_PAUSE_EXT) { | 203 | DMA_OUT(chan, &chan->regs->bcr, 0, 32); |
147 | DMA_OUT(chan, &chan->regs->bcr, 0, 32); | 204 | mode |= FSL_DMA_MR_EMP_EN; |
148 | mode |= FSL_DMA_MR_EMP_EN; | 205 | } else { |
149 | } else { | 206 | mode &= ~FSL_DMA_MR_EMP_EN; |
150 | mode &= ~FSL_DMA_MR_EMP_EN; | ||
151 | } | ||
152 | } | 207 | } |
153 | 208 | ||
154 | if (chan->feature & FSL_DMA_CHAN_START_EXT) | 209 | if (chan->feature & FSL_DMA_CHAN_START_EXT) { |
155 | mode |= FSL_DMA_MR_EMS_EN; | 210 | mode |= FSL_DMA_MR_EMS_EN; |
156 | else | 211 | } else { |
212 | mode &= ~FSL_DMA_MR_EMS_EN; | ||
157 | mode |= FSL_DMA_MR_CS; | 213 | mode |= FSL_DMA_MR_CS; |
214 | } | ||
158 | 215 | ||
159 | DMA_OUT(chan, &chan->regs->mr, mode, 32); | 216 | DMA_OUT(chan, &chan->regs->mr, mode, 32); |
160 | } | 217 | } |
@@ -164,13 +221,26 @@ static void dma_halt(struct fsldma_chan *chan) | |||
164 | u32 mode; | 221 | u32 mode; |
165 | int i; | 222 | int i; |
166 | 223 | ||
224 | /* read the mode register */ | ||
167 | mode = DMA_IN(chan, &chan->regs->mr, 32); | 225 | mode = DMA_IN(chan, &chan->regs->mr, 32); |
168 | mode |= FSL_DMA_MR_CA; | ||
169 | DMA_OUT(chan, &chan->regs->mr, mode, 32); | ||
170 | 226 | ||
171 | mode &= ~(FSL_DMA_MR_CS | FSL_DMA_MR_EMS_EN | FSL_DMA_MR_CA); | 227 | /* |
228 | * The 85xx controller supports channel abort, which will stop | ||
229 | * the current transfer. On 83xx, this bit is the transfer error | ||
230 | * mask bit, which should not be changed. | ||
231 | */ | ||
232 | if ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) { | ||
233 | mode |= FSL_DMA_MR_CA; | ||
234 | DMA_OUT(chan, &chan->regs->mr, mode, 32); | ||
235 | |||
236 | mode &= ~FSL_DMA_MR_CA; | ||
237 | } | ||
238 | |||
239 | /* stop the DMA controller */ | ||
240 | mode &= ~(FSL_DMA_MR_CS | FSL_DMA_MR_EMS_EN); | ||
172 | DMA_OUT(chan, &chan->regs->mr, mode, 32); | 241 | DMA_OUT(chan, &chan->regs->mr, mode, 32); |
173 | 242 | ||
243 | /* wait for the DMA controller to become idle */ | ||
174 | for (i = 0; i < 100; i++) { | 244 | for (i = 0; i < 100; i++) { |
175 | if (dma_is_idle(chan)) | 245 | if (dma_is_idle(chan)) |
176 | return; | 246 | return; |
@@ -179,20 +249,7 @@ static void dma_halt(struct fsldma_chan *chan) | |||
179 | } | 249 | } |
180 | 250 | ||
181 | if (!dma_is_idle(chan)) | 251 | if (!dma_is_idle(chan)) |
182 | dev_err(chan->dev, "DMA halt timeout!\n"); | 252 | chan_err(chan, "DMA halt timeout!\n"); |
183 | } | ||
184 | |||
185 | static void set_ld_eol(struct fsldma_chan *chan, | ||
186 | struct fsl_desc_sw *desc) | ||
187 | { | ||
188 | u64 snoop_bits; | ||
189 | |||
190 | snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX) | ||
191 | ? FSL_DMA_SNEN : 0; | ||
192 | |||
193 | desc->hw.next_ln_addr = CPU_TO_DMA(chan, | ||
194 | DMA_TO_CPU(chan, desc->hw.next_ln_addr, 64) | FSL_DMA_EOL | ||
195 | | snoop_bits, 64); | ||
196 | } | 253 | } |
197 | 254 | ||
198 | /** | 255 | /** |
@@ -318,8 +375,7 @@ static void fsl_chan_toggle_ext_start(struct fsldma_chan *chan, int enable) | |||
318 | chan->feature &= ~FSL_DMA_CHAN_START_EXT; | 375 | chan->feature &= ~FSL_DMA_CHAN_START_EXT; |
319 | } | 376 | } |
320 | 377 | ||
321 | static void append_ld_queue(struct fsldma_chan *chan, | 378 | static void append_ld_queue(struct fsldma_chan *chan, struct fsl_desc_sw *desc) |
322 | struct fsl_desc_sw *desc) | ||
323 | { | 379 | { |
324 | struct fsl_desc_sw *tail = to_fsl_desc(chan->ld_pending.prev); | 380 | struct fsl_desc_sw *tail = to_fsl_desc(chan->ld_pending.prev); |
325 | 381 | ||
@@ -360,8 +416,8 @@ static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx) | |||
360 | cookie = chan->common.cookie; | 416 | cookie = chan->common.cookie; |
361 | list_for_each_entry(child, &desc->tx_list, node) { | 417 | list_for_each_entry(child, &desc->tx_list, node) { |
362 | cookie++; | 418 | cookie++; |
363 | if (cookie < 0) | 419 | if (cookie < DMA_MIN_COOKIE) |
364 | cookie = 1; | 420 | cookie = DMA_MIN_COOKIE; |
365 | 421 | ||
366 | child->async_tx.cookie = cookie; | 422 | child->async_tx.cookie = cookie; |
367 | } | 423 | } |
@@ -382,15 +438,14 @@ static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx) | |||
382 | * | 438 | * |
383 | * Return - The descriptor allocated. NULL for failed. | 439 | * Return - The descriptor allocated. NULL for failed. |
384 | */ | 440 | */ |
385 | static struct fsl_desc_sw *fsl_dma_alloc_descriptor( | 441 | static struct fsl_desc_sw *fsl_dma_alloc_descriptor(struct fsldma_chan *chan) |
386 | struct fsldma_chan *chan) | ||
387 | { | 442 | { |
388 | struct fsl_desc_sw *desc; | 443 | struct fsl_desc_sw *desc; |
389 | dma_addr_t pdesc; | 444 | dma_addr_t pdesc; |
390 | 445 | ||
391 | desc = dma_pool_alloc(chan->desc_pool, GFP_ATOMIC, &pdesc); | 446 | desc = dma_pool_alloc(chan->desc_pool, GFP_ATOMIC, &pdesc); |
392 | if (!desc) { | 447 | if (!desc) { |
393 | dev_dbg(chan->dev, "out of memory for link desc\n"); | 448 | chan_dbg(chan, "out of memory for link descriptor\n"); |
394 | return NULL; | 449 | return NULL; |
395 | } | 450 | } |
396 | 451 | ||
@@ -400,10 +455,13 @@ static struct fsl_desc_sw *fsl_dma_alloc_descriptor( | |||
400 | desc->async_tx.tx_submit = fsl_dma_tx_submit; | 455 | desc->async_tx.tx_submit = fsl_dma_tx_submit; |
401 | desc->async_tx.phys = pdesc; | 456 | desc->async_tx.phys = pdesc; |
402 | 457 | ||
458 | #ifdef FSL_DMA_LD_DEBUG | ||
459 | chan_dbg(chan, "LD %p allocated\n", desc); | ||
460 | #endif | ||
461 | |||
403 | return desc; | 462 | return desc; |
404 | } | 463 | } |
405 | 464 | ||
406 | |||
407 | /** | 465 | /** |
408 | * fsl_dma_alloc_chan_resources - Allocate resources for DMA channel. | 466 | * fsl_dma_alloc_chan_resources - Allocate resources for DMA channel. |
409 | * @chan : Freescale DMA channel | 467 | * @chan : Freescale DMA channel |
@@ -424,13 +482,11 @@ static int fsl_dma_alloc_chan_resources(struct dma_chan *dchan) | |||
424 | * We need the descriptor to be aligned to 32bytes | 482 | * We need the descriptor to be aligned to 32bytes |
425 | * for meeting FSL DMA specification requirement. | 483 | * for meeting FSL DMA specification requirement. |
426 | */ | 484 | */ |
427 | chan->desc_pool = dma_pool_create("fsl_dma_engine_desc_pool", | 485 | chan->desc_pool = dma_pool_create(chan->name, chan->dev, |
428 | chan->dev, | ||
429 | sizeof(struct fsl_desc_sw), | 486 | sizeof(struct fsl_desc_sw), |
430 | __alignof__(struct fsl_desc_sw), 0); | 487 | __alignof__(struct fsl_desc_sw), 0); |
431 | if (!chan->desc_pool) { | 488 | if (!chan->desc_pool) { |
432 | dev_err(chan->dev, "unable to allocate channel %d " | 489 | chan_err(chan, "unable to allocate descriptor pool\n"); |
433 | "descriptor pool\n", chan->id); | ||
434 | return -ENOMEM; | 490 | return -ENOMEM; |
435 | } | 491 | } |
436 | 492 | ||
@@ -452,6 +508,9 @@ static void fsldma_free_desc_list(struct fsldma_chan *chan, | |||
452 | 508 | ||
453 | list_for_each_entry_safe(desc, _desc, list, node) { | 509 | list_for_each_entry_safe(desc, _desc, list, node) { |
454 | list_del(&desc->node); | 510 | list_del(&desc->node); |
511 | #ifdef FSL_DMA_LD_DEBUG | ||
512 | chan_dbg(chan, "LD %p free\n", desc); | ||
513 | #endif | ||
455 | dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys); | 514 | dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys); |
456 | } | 515 | } |
457 | } | 516 | } |
@@ -463,6 +522,9 @@ static void fsldma_free_desc_list_reverse(struct fsldma_chan *chan, | |||
463 | 522 | ||
464 | list_for_each_entry_safe_reverse(desc, _desc, list, node) { | 523 | list_for_each_entry_safe_reverse(desc, _desc, list, node) { |
465 | list_del(&desc->node); | 524 | list_del(&desc->node); |
525 | #ifdef FSL_DMA_LD_DEBUG | ||
526 | chan_dbg(chan, "LD %p free\n", desc); | ||
527 | #endif | ||
466 | dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys); | 528 | dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys); |
467 | } | 529 | } |
468 | } | 530 | } |
@@ -476,7 +538,7 @@ static void fsl_dma_free_chan_resources(struct dma_chan *dchan) | |||
476 | struct fsldma_chan *chan = to_fsl_chan(dchan); | 538 | struct fsldma_chan *chan = to_fsl_chan(dchan); |
477 | unsigned long flags; | 539 | unsigned long flags; |
478 | 540 | ||
479 | dev_dbg(chan->dev, "Free all channel resources.\n"); | 541 | chan_dbg(chan, "free all channel resources\n"); |
480 | spin_lock_irqsave(&chan->desc_lock, flags); | 542 | spin_lock_irqsave(&chan->desc_lock, flags); |
481 | fsldma_free_desc_list(chan, &chan->ld_pending); | 543 | fsldma_free_desc_list(chan, &chan->ld_pending); |
482 | fsldma_free_desc_list(chan, &chan->ld_running); | 544 | fsldma_free_desc_list(chan, &chan->ld_running); |
@@ -499,7 +561,7 @@ fsl_dma_prep_interrupt(struct dma_chan *dchan, unsigned long flags) | |||
499 | 561 | ||
500 | new = fsl_dma_alloc_descriptor(chan); | 562 | new = fsl_dma_alloc_descriptor(chan); |
501 | if (!new) { | 563 | if (!new) { |
502 | dev_err(chan->dev, "No free memory for link descriptor\n"); | 564 | chan_err(chan, "%s\n", msg_ld_oom); |
503 | return NULL; | 565 | return NULL; |
504 | } | 566 | } |
505 | 567 | ||
@@ -509,14 +571,15 @@ fsl_dma_prep_interrupt(struct dma_chan *dchan, unsigned long flags) | |||
509 | /* Insert the link descriptor to the LD ring */ | 571 | /* Insert the link descriptor to the LD ring */ |
510 | list_add_tail(&new->node, &new->tx_list); | 572 | list_add_tail(&new->node, &new->tx_list); |
511 | 573 | ||
512 | /* Set End-of-link to the last link descriptor of new list*/ | 574 | /* Set End-of-link to the last link descriptor of new list */ |
513 | set_ld_eol(chan, new); | 575 | set_ld_eol(chan, new); |
514 | 576 | ||
515 | return &new->async_tx; | 577 | return &new->async_tx; |
516 | } | 578 | } |
517 | 579 | ||
518 | static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy( | 580 | static struct dma_async_tx_descriptor * |
519 | struct dma_chan *dchan, dma_addr_t dma_dst, dma_addr_t dma_src, | 581 | fsl_dma_prep_memcpy(struct dma_chan *dchan, |
582 | dma_addr_t dma_dst, dma_addr_t dma_src, | ||
520 | size_t len, unsigned long flags) | 583 | size_t len, unsigned long flags) |
521 | { | 584 | { |
522 | struct fsldma_chan *chan; | 585 | struct fsldma_chan *chan; |
@@ -536,13 +599,9 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy( | |||
536 | /* Allocate the link descriptor from DMA pool */ | 599 | /* Allocate the link descriptor from DMA pool */ |
537 | new = fsl_dma_alloc_descriptor(chan); | 600 | new = fsl_dma_alloc_descriptor(chan); |
538 | if (!new) { | 601 | if (!new) { |
539 | dev_err(chan->dev, | 602 | chan_err(chan, "%s\n", msg_ld_oom); |
540 | "No free memory for link descriptor\n"); | ||
541 | goto fail; | 603 | goto fail; |
542 | } | 604 | } |
543 | #ifdef FSL_DMA_LD_DEBUG | ||
544 | dev_dbg(chan->dev, "new link desc alloc %p\n", new); | ||
545 | #endif | ||
546 | 605 | ||
547 | copy = min(len, (size_t)FSL_DMA_BCR_MAX_CNT); | 606 | copy = min(len, (size_t)FSL_DMA_BCR_MAX_CNT); |
548 | 607 | ||
@@ -570,7 +629,7 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy( | |||
570 | new->async_tx.flags = flags; /* client is in control of this ack */ | 629 | new->async_tx.flags = flags; /* client is in control of this ack */ |
571 | new->async_tx.cookie = -EBUSY; | 630 | new->async_tx.cookie = -EBUSY; |
572 | 631 | ||
573 | /* Set End-of-link to the last link descriptor of new list*/ | 632 | /* Set End-of-link to the last link descriptor of new list */ |
574 | set_ld_eol(chan, new); | 633 | set_ld_eol(chan, new); |
575 | 634 | ||
576 | return &first->async_tx; | 635 | return &first->async_tx; |
@@ -583,362 +642,289 @@ fail: | |||
583 | return NULL; | 642 | return NULL; |
584 | } | 643 | } |
585 | 644 | ||
586 | /** | 645 | static struct dma_async_tx_descriptor *fsl_dma_prep_sg(struct dma_chan *dchan, |
587 | * fsl_dma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction | 646 | struct scatterlist *dst_sg, unsigned int dst_nents, |
588 | * @chan: DMA channel | 647 | struct scatterlist *src_sg, unsigned int src_nents, |
589 | * @sgl: scatterlist to transfer to/from | 648 | unsigned long flags) |
590 | * @sg_len: number of entries in @scatterlist | ||
591 | * @direction: DMA direction | ||
592 | * @flags: DMAEngine flags | ||
593 | * | ||
594 | * Prepare a set of descriptors for a DMA_SLAVE transaction. Following the | ||
595 | * DMA_SLAVE API, this gets the device-specific information from the | ||
596 | * chan->private variable. | ||
597 | */ | ||
598 | static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg( | ||
599 | struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len, | ||
600 | enum dma_data_direction direction, unsigned long flags) | ||
601 | { | 649 | { |
602 | struct fsldma_chan *chan; | ||
603 | struct fsl_desc_sw *first = NULL, *prev = NULL, *new = NULL; | 650 | struct fsl_desc_sw *first = NULL, *prev = NULL, *new = NULL; |
604 | struct fsl_dma_slave *slave; | 651 | struct fsldma_chan *chan = to_fsl_chan(dchan); |
605 | size_t copy; | 652 | size_t dst_avail, src_avail; |
606 | 653 | dma_addr_t dst, src; | |
607 | int i; | 654 | size_t len; |
608 | struct scatterlist *sg; | ||
609 | size_t sg_used; | ||
610 | size_t hw_used; | ||
611 | struct fsl_dma_hw_addr *hw; | ||
612 | dma_addr_t dma_dst, dma_src; | ||
613 | 655 | ||
614 | if (!dchan) | 656 | /* basic sanity checks */ |
657 | if (dst_nents == 0 || src_nents == 0) | ||
615 | return NULL; | 658 | return NULL; |
616 | 659 | ||
617 | if (!dchan->private) | 660 | if (dst_sg == NULL || src_sg == NULL) |
618 | return NULL; | 661 | return NULL; |
619 | 662 | ||
620 | chan = to_fsl_chan(dchan); | 663 | /* |
621 | slave = dchan->private; | 664 | * TODO: should we check that both scatterlists have the same |
665 | * TODO: number of bytes in total? Is that really an error? | ||
666 | */ | ||
622 | 667 | ||
623 | if (list_empty(&slave->addresses)) | 668 | /* get prepared for the loop */ |
624 | return NULL; | 669 | dst_avail = sg_dma_len(dst_sg); |
670 | src_avail = sg_dma_len(src_sg); | ||
625 | 671 | ||
626 | hw = list_first_entry(&slave->addresses, struct fsl_dma_hw_addr, entry); | 672 | /* run until we are out of scatterlist entries */ |
627 | hw_used = 0; | 673 | while (true) { |
628 | 674 | ||
629 | /* | 675 | /* create the largest transaction possible */ |
630 | * Build the hardware transaction to copy from the scatterlist to | 676 | len = min_t(size_t, src_avail, dst_avail); |
631 | * the hardware, or from the hardware to the scatterlist | 677 | len = min_t(size_t, len, FSL_DMA_BCR_MAX_CNT); |
632 | * | 678 | if (len == 0) |
633 | * If you are copying from the hardware to the scatterlist and it | 679 | goto fetch; |
634 | * takes two hardware entries to fill an entire page, then both | ||
635 | * hardware entries will be coalesced into the same page | ||
636 | * | ||
637 | * If you are copying from the scatterlist to the hardware and a | ||
638 | * single page can fill two hardware entries, then the data will | ||
639 | * be read out of the page into the first hardware entry, and so on | ||
640 | */ | ||
641 | for_each_sg(sgl, sg, sg_len, i) { | ||
642 | sg_used = 0; | ||
643 | |||
644 | /* Loop until the entire scatterlist entry is used */ | ||
645 | while (sg_used < sg_dma_len(sg)) { | ||
646 | |||
647 | /* | ||
648 | * If we've used up the current hardware address/length | ||
649 | * pair, we need to load a new one | ||
650 | * | ||
651 | * This is done in a while loop so that descriptors with | ||
652 | * length == 0 will be skipped | ||
653 | */ | ||
654 | while (hw_used >= hw->length) { | ||
655 | |||
656 | /* | ||
657 | * If the current hardware entry is the last | ||
658 | * entry in the list, we're finished | ||
659 | */ | ||
660 | if (list_is_last(&hw->entry, &slave->addresses)) | ||
661 | goto finished; | ||
662 | |||
663 | /* Get the next hardware address/length pair */ | ||
664 | hw = list_entry(hw->entry.next, | ||
665 | struct fsl_dma_hw_addr, entry); | ||
666 | hw_used = 0; | ||
667 | } | ||
668 | |||
669 | /* Allocate the link descriptor from DMA pool */ | ||
670 | new = fsl_dma_alloc_descriptor(chan); | ||
671 | if (!new) { | ||
672 | dev_err(chan->dev, "No free memory for " | ||
673 | "link descriptor\n"); | ||
674 | goto fail; | ||
675 | } | ||
676 | #ifdef FSL_DMA_LD_DEBUG | ||
677 | dev_dbg(chan->dev, "new link desc alloc %p\n", new); | ||
678 | #endif | ||
679 | 680 | ||
680 | /* | 681 | dst = sg_dma_address(dst_sg) + sg_dma_len(dst_sg) - dst_avail; |
681 | * Calculate the maximum number of bytes to transfer, | 682 | src = sg_dma_address(src_sg) + sg_dma_len(src_sg) - src_avail; |
682 | * making sure it is less than the DMA controller limit | 683 | |
683 | */ | 684 | /* allocate and populate the descriptor */ |
684 | copy = min_t(size_t, sg_dma_len(sg) - sg_used, | 685 | new = fsl_dma_alloc_descriptor(chan); |
685 | hw->length - hw_used); | 686 | if (!new) { |
686 | copy = min_t(size_t, copy, FSL_DMA_BCR_MAX_CNT); | 687 | chan_err(chan, "%s\n", msg_ld_oom); |
687 | 688 | goto fail; | |
688 | /* | ||
689 | * DMA_FROM_DEVICE | ||
690 | * from the hardware to the scatterlist | ||
691 | * | ||
692 | * DMA_TO_DEVICE | ||
693 | * from the scatterlist to the hardware | ||
694 | */ | ||
695 | if (direction == DMA_FROM_DEVICE) { | ||
696 | dma_src = hw->address + hw_used; | ||
697 | dma_dst = sg_dma_address(sg) + sg_used; | ||
698 | } else { | ||
699 | dma_src = sg_dma_address(sg) + sg_used; | ||
700 | dma_dst = hw->address + hw_used; | ||
701 | } | ||
702 | |||
703 | /* Fill in the descriptor */ | ||
704 | set_desc_cnt(chan, &new->hw, copy); | ||
705 | set_desc_src(chan, &new->hw, dma_src); | ||
706 | set_desc_dst(chan, &new->hw, dma_dst); | ||
707 | |||
708 | /* | ||
709 | * If this is not the first descriptor, chain the | ||
710 | * current descriptor after the previous descriptor | ||
711 | */ | ||
712 | if (!first) { | ||
713 | first = new; | ||
714 | } else { | ||
715 | set_desc_next(chan, &prev->hw, | ||
716 | new->async_tx.phys); | ||
717 | } | ||
718 | |||
719 | new->async_tx.cookie = 0; | ||
720 | async_tx_ack(&new->async_tx); | ||
721 | |||
722 | prev = new; | ||
723 | sg_used += copy; | ||
724 | hw_used += copy; | ||
725 | |||
726 | /* Insert the link descriptor into the LD ring */ | ||
727 | list_add_tail(&new->node, &first->tx_list); | ||
728 | } | 689 | } |
729 | } | ||
730 | 690 | ||
731 | finished: | 691 | set_desc_cnt(chan, &new->hw, len); |
692 | set_desc_src(chan, &new->hw, src); | ||
693 | set_desc_dst(chan, &new->hw, dst); | ||
732 | 694 | ||
733 | /* All of the hardware address/length pairs had length == 0 */ | 695 | if (!first) |
734 | if (!first || !new) | 696 | first = new; |
735 | return NULL; | 697 | else |
698 | set_desc_next(chan, &prev->hw, new->async_tx.phys); | ||
736 | 699 | ||
737 | new->async_tx.flags = flags; | 700 | new->async_tx.cookie = 0; |
738 | new->async_tx.cookie = -EBUSY; | 701 | async_tx_ack(&new->async_tx); |
702 | prev = new; | ||
739 | 703 | ||
740 | /* Set End-of-link to the last link descriptor of new list */ | 704 | /* Insert the link descriptor to the LD ring */ |
741 | set_ld_eol(chan, new); | 705 | list_add_tail(&new->node, &first->tx_list); |
742 | 706 | ||
743 | /* Enable extra controller features */ | 707 | /* update metadata */ |
744 | if (chan->set_src_loop_size) | 708 | dst_avail -= len; |
745 | chan->set_src_loop_size(chan, slave->src_loop_size); | 709 | src_avail -= len; |
746 | 710 | ||
747 | if (chan->set_dst_loop_size) | 711 | fetch: |
748 | chan->set_dst_loop_size(chan, slave->dst_loop_size); | 712 | /* fetch the next dst scatterlist entry */ |
713 | if (dst_avail == 0) { | ||
749 | 714 | ||
750 | if (chan->toggle_ext_start) | 715 | /* no more entries: we're done */ |
751 | chan->toggle_ext_start(chan, slave->external_start); | 716 | if (dst_nents == 0) |
717 | break; | ||
718 | |||
719 | /* fetch the next entry: if there are no more: done */ | ||
720 | dst_sg = sg_next(dst_sg); | ||
721 | if (dst_sg == NULL) | ||
722 | break; | ||
723 | |||
724 | dst_nents--; | ||
725 | dst_avail = sg_dma_len(dst_sg); | ||
726 | } | ||
752 | 727 | ||
753 | if (chan->toggle_ext_pause) | 728 | /* fetch the next src scatterlist entry */ |
754 | chan->toggle_ext_pause(chan, slave->external_pause); | 729 | if (src_avail == 0) { |
755 | 730 | ||
756 | if (chan->set_request_count) | 731 | /* no more entries: we're done */ |
757 | chan->set_request_count(chan, slave->request_count); | 732 | if (src_nents == 0) |
733 | break; | ||
734 | |||
735 | /* fetch the next entry: if there are no more: done */ | ||
736 | src_sg = sg_next(src_sg); | ||
737 | if (src_sg == NULL) | ||
738 | break; | ||
739 | |||
740 | src_nents--; | ||
741 | src_avail = sg_dma_len(src_sg); | ||
742 | } | ||
743 | } | ||
744 | |||
745 | new->async_tx.flags = flags; /* client is in control of this ack */ | ||
746 | new->async_tx.cookie = -EBUSY; | ||
747 | |||
748 | /* Set End-of-link to the last link descriptor of new list */ | ||
749 | set_ld_eol(chan, new); | ||
758 | 750 | ||
759 | return &first->async_tx; | 751 | return &first->async_tx; |
760 | 752 | ||
761 | fail: | 753 | fail: |
762 | /* If first was not set, then we failed to allocate the very first | ||
763 | * descriptor, and we're done */ | ||
764 | if (!first) | 754 | if (!first) |
765 | return NULL; | 755 | return NULL; |
766 | 756 | ||
757 | fsldma_free_desc_list_reverse(chan, &first->tx_list); | ||
758 | return NULL; | ||
759 | } | ||
760 | |||
761 | /** | ||
762 | * fsl_dma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction | ||
763 | * @chan: DMA channel | ||
764 | * @sgl: scatterlist to transfer to/from | ||
765 | * @sg_len: number of entries in @scatterlist | ||
766 | * @direction: DMA direction | ||
767 | * @flags: DMAEngine flags | ||
768 | * | ||
769 | * Prepare a set of descriptors for a DMA_SLAVE transaction. Following the | ||
770 | * DMA_SLAVE API, this gets the device-specific information from the | ||
771 | * chan->private variable. | ||
772 | */ | ||
773 | static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg( | ||
774 | struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len, | ||
775 | enum dma_data_direction direction, unsigned long flags) | ||
776 | { | ||
767 | /* | 777 | /* |
768 | * First is set, so all of the descriptors we allocated have been added | 778 | * This operation is not supported on the Freescale DMA controller |
769 | * to first->tx_list, INCLUDING "first" itself. Therefore we | ||
770 | * must traverse the list backwards freeing each descriptor in turn | ||
771 | * | 779 | * |
772 | * We're re-using variables for the loop, oh well | 780 | * However, we need to provide the function pointer to allow the |
781 | * device_control() method to work. | ||
773 | */ | 782 | */ |
774 | fsldma_free_desc_list_reverse(chan, &first->tx_list); | ||
775 | return NULL; | 783 | return NULL; |
776 | } | 784 | } |
777 | 785 | ||
778 | static int fsl_dma_device_control(struct dma_chan *dchan, | 786 | static int fsl_dma_device_control(struct dma_chan *dchan, |
779 | enum dma_ctrl_cmd cmd, unsigned long arg) | 787 | enum dma_ctrl_cmd cmd, unsigned long arg) |
780 | { | 788 | { |
789 | struct dma_slave_config *config; | ||
781 | struct fsldma_chan *chan; | 790 | struct fsldma_chan *chan; |
782 | unsigned long flags; | 791 | unsigned long flags; |
783 | 792 | int size; | |
784 | /* Only supports DMA_TERMINATE_ALL */ | ||
785 | if (cmd != DMA_TERMINATE_ALL) | ||
786 | return -ENXIO; | ||
787 | 793 | ||
788 | if (!dchan) | 794 | if (!dchan) |
789 | return -EINVAL; | 795 | return -EINVAL; |
790 | 796 | ||
791 | chan = to_fsl_chan(dchan); | 797 | chan = to_fsl_chan(dchan); |
792 | 798 | ||
793 | /* Halt the DMA engine */ | 799 | switch (cmd) { |
794 | dma_halt(chan); | 800 | case DMA_TERMINATE_ALL: |
801 | spin_lock_irqsave(&chan->desc_lock, flags); | ||
795 | 802 | ||
796 | spin_lock_irqsave(&chan->desc_lock, flags); | 803 | /* Halt the DMA engine */ |
804 | dma_halt(chan); | ||
797 | 805 | ||
798 | /* Remove and free all of the descriptors in the LD queue */ | 806 | /* Remove and free all of the descriptors in the LD queue */ |
799 | fsldma_free_desc_list(chan, &chan->ld_pending); | 807 | fsldma_free_desc_list(chan, &chan->ld_pending); |
800 | fsldma_free_desc_list(chan, &chan->ld_running); | 808 | fsldma_free_desc_list(chan, &chan->ld_running); |
809 | chan->idle = true; | ||
801 | 810 | ||
802 | spin_unlock_irqrestore(&chan->desc_lock, flags); | 811 | spin_unlock_irqrestore(&chan->desc_lock, flags); |
812 | return 0; | ||
803 | 813 | ||
804 | return 0; | 814 | case DMA_SLAVE_CONFIG: |
805 | } | 815 | config = (struct dma_slave_config *)arg; |
806 | 816 | ||
807 | /** | 817 | /* make sure the channel supports setting burst size */ |
808 | * fsl_dma_update_completed_cookie - Update the completed cookie. | 818 | if (!chan->set_request_count) |
809 | * @chan : Freescale DMA channel | 819 | return -ENXIO; |
810 | * | ||
811 | * CONTEXT: hardirq | ||
812 | */ | ||
813 | static void fsl_dma_update_completed_cookie(struct fsldma_chan *chan) | ||
814 | { | ||
815 | struct fsl_desc_sw *desc; | ||
816 | unsigned long flags; | ||
817 | dma_cookie_t cookie; | ||
818 | 820 | ||
819 | spin_lock_irqsave(&chan->desc_lock, flags); | 821 | /* we set the controller burst size depending on direction */ |
822 | if (config->direction == DMA_TO_DEVICE) | ||
823 | size = config->dst_addr_width * config->dst_maxburst; | ||
824 | else | ||
825 | size = config->src_addr_width * config->src_maxburst; | ||
820 | 826 | ||
821 | if (list_empty(&chan->ld_running)) { | 827 | chan->set_request_count(chan, size); |
822 | dev_dbg(chan->dev, "no running descriptors\n"); | 828 | return 0; |
823 | goto out_unlock; | ||
824 | } | ||
825 | 829 | ||
826 | /* Get the last descriptor, update the cookie to that */ | 830 | case FSLDMA_EXTERNAL_START: |
827 | desc = to_fsl_desc(chan->ld_running.prev); | ||
828 | if (dma_is_idle(chan)) | ||
829 | cookie = desc->async_tx.cookie; | ||
830 | else { | ||
831 | cookie = desc->async_tx.cookie - 1; | ||
832 | if (unlikely(cookie < DMA_MIN_COOKIE)) | ||
833 | cookie = DMA_MAX_COOKIE; | ||
834 | } | ||
835 | 831 | ||
836 | chan->completed_cookie = cookie; | 832 | /* make sure the channel supports external start */ |
833 | if (!chan->toggle_ext_start) | ||
834 | return -ENXIO; | ||
837 | 835 | ||
838 | out_unlock: | 836 | chan->toggle_ext_start(chan, arg); |
839 | spin_unlock_irqrestore(&chan->desc_lock, flags); | 837 | return 0; |
840 | } | ||
841 | 838 | ||
842 | /** | 839 | default: |
843 | * fsldma_desc_status - Check the status of a descriptor | 840 | return -ENXIO; |
844 | * @chan: Freescale DMA channel | 841 | } |
845 | * @desc: DMA SW descriptor | 842 | |
846 | * | 843 | return 0; |
847 | * This function will return the status of the given descriptor | ||
848 | */ | ||
849 | static enum dma_status fsldma_desc_status(struct fsldma_chan *chan, | ||
850 | struct fsl_desc_sw *desc) | ||
851 | { | ||
852 | return dma_async_is_complete(desc->async_tx.cookie, | ||
853 | chan->completed_cookie, | ||
854 | chan->common.cookie); | ||
855 | } | 844 | } |
856 | 845 | ||
857 | /** | 846 | /** |
858 | * fsl_chan_ld_cleanup - Clean up link descriptors | 847 | * fsldma_cleanup_descriptor - cleanup and free a single link descriptor |
859 | * @chan : Freescale DMA channel | 848 | * @chan: Freescale DMA channel |
849 | * @desc: descriptor to cleanup and free | ||
860 | * | 850 | * |
861 | * This function clean up the ld_queue of DMA channel. | 851 | * This function is used on a descriptor which has been executed by the DMA |
852 | * controller. It will run any callbacks, submit any dependencies, and then | ||
853 | * free the descriptor. | ||
862 | */ | 854 | */ |
863 | static void fsl_chan_ld_cleanup(struct fsldma_chan *chan) | 855 | static void fsldma_cleanup_descriptor(struct fsldma_chan *chan, |
856 | struct fsl_desc_sw *desc) | ||
864 | { | 857 | { |
865 | struct fsl_desc_sw *desc, *_desc; | 858 | struct dma_async_tx_descriptor *txd = &desc->async_tx; |
866 | unsigned long flags; | 859 | struct device *dev = chan->common.device->dev; |
867 | 860 | dma_addr_t src = get_desc_src(chan, desc); | |
868 | spin_lock_irqsave(&chan->desc_lock, flags); | 861 | dma_addr_t dst = get_desc_dst(chan, desc); |
869 | 862 | u32 len = get_desc_cnt(chan, desc); | |
870 | dev_dbg(chan->dev, "chan completed_cookie = %d\n", chan->completed_cookie); | 863 | |
871 | list_for_each_entry_safe(desc, _desc, &chan->ld_running, node) { | 864 | /* Run the link descriptor callback function */ |
872 | dma_async_tx_callback callback; | 865 | if (txd->callback) { |
873 | void *callback_param; | 866 | #ifdef FSL_DMA_LD_DEBUG |
874 | 867 | chan_dbg(chan, "LD %p callback\n", desc); | |
875 | if (fsldma_desc_status(chan, desc) == DMA_IN_PROGRESS) | 868 | #endif |
876 | break; | 869 | txd->callback(txd->callback_param); |
870 | } | ||
877 | 871 | ||
878 | /* Remove from the list of running transactions */ | 872 | /* Run any dependencies */ |
879 | list_del(&desc->node); | 873 | dma_run_dependencies(txd); |
880 | 874 | ||
881 | /* Run the link descriptor callback function */ | 875 | /* Unmap the dst buffer, if requested */ |
882 | callback = desc->async_tx.callback; | 876 | if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) { |
883 | callback_param = desc->async_tx.callback_param; | 877 | if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE) |
884 | if (callback) { | 878 | dma_unmap_single(dev, dst, len, DMA_FROM_DEVICE); |
885 | spin_unlock_irqrestore(&chan->desc_lock, flags); | 879 | else |
886 | dev_dbg(chan->dev, "LD %p callback\n", desc); | 880 | dma_unmap_page(dev, dst, len, DMA_FROM_DEVICE); |
887 | callback(callback_param); | 881 | } |
888 | spin_lock_irqsave(&chan->desc_lock, flags); | ||
889 | } | ||
890 | 882 | ||
891 | /* Run any dependencies, then free the descriptor */ | 883 | /* Unmap the src buffer, if requested */ |
892 | dma_run_dependencies(&desc->async_tx); | 884 | if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) { |
893 | dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys); | 885 | if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE) |
886 | dma_unmap_single(dev, src, len, DMA_TO_DEVICE); | ||
887 | else | ||
888 | dma_unmap_page(dev, src, len, DMA_TO_DEVICE); | ||
894 | } | 889 | } |
895 | 890 | ||
896 | spin_unlock_irqrestore(&chan->desc_lock, flags); | 891 | #ifdef FSL_DMA_LD_DEBUG |
892 | chan_dbg(chan, "LD %p free\n", desc); | ||
893 | #endif | ||
894 | dma_pool_free(chan->desc_pool, desc, txd->phys); | ||
897 | } | 895 | } |
898 | 896 | ||
899 | /** | 897 | /** |
900 | * fsl_chan_xfer_ld_queue - transfer any pending transactions | 898 | * fsl_chan_xfer_ld_queue - transfer any pending transactions |
901 | * @chan : Freescale DMA channel | 899 | * @chan : Freescale DMA channel |
902 | * | 900 | * |
903 | * This will make sure that any pending transactions will be run. | 901 | * HARDWARE STATE: idle |
904 | * If the DMA controller is idle, it will be started. Otherwise, | 902 | * LOCKING: must hold chan->desc_lock |
905 | * the DMA controller's interrupt handler will start any pending | ||
906 | * transactions when it becomes idle. | ||
907 | */ | 903 | */ |
908 | static void fsl_chan_xfer_ld_queue(struct fsldma_chan *chan) | 904 | static void fsl_chan_xfer_ld_queue(struct fsldma_chan *chan) |
909 | { | 905 | { |
910 | struct fsl_desc_sw *desc; | 906 | struct fsl_desc_sw *desc; |
911 | unsigned long flags; | ||
912 | |||
913 | spin_lock_irqsave(&chan->desc_lock, flags); | ||
914 | 907 | ||
915 | /* | 908 | /* |
916 | * If the list of pending descriptors is empty, then we | 909 | * If the list of pending descriptors is empty, then we |
917 | * don't need to do any work at all | 910 | * don't need to do any work at all |
918 | */ | 911 | */ |
919 | if (list_empty(&chan->ld_pending)) { | 912 | if (list_empty(&chan->ld_pending)) { |
920 | dev_dbg(chan->dev, "no pending LDs\n"); | 913 | chan_dbg(chan, "no pending LDs\n"); |
921 | goto out_unlock; | 914 | return; |
922 | } | 915 | } |
923 | 916 | ||
924 | /* | 917 | /* |
925 | * The DMA controller is not idle, which means the interrupt | 918 | * The DMA controller is not idle, which means that the interrupt |
926 | * handler will start any queued transactions when it runs | 919 | * handler will start any queued transactions when it runs after |
927 | * at the end of the current transaction | 920 | * this transaction finishes |
928 | */ | 921 | */ |
929 | if (!dma_is_idle(chan)) { | 922 | if (!chan->idle) { |
930 | dev_dbg(chan->dev, "DMA controller still busy\n"); | 923 | chan_dbg(chan, "DMA controller still busy\n"); |
931 | goto out_unlock; | 924 | return; |
932 | } | 925 | } |
933 | 926 | ||
934 | /* | 927 | /* |
935 | * TODO: | ||
936 | * make sure the dma_halt() function really un-wedges the | ||
937 | * controller as much as possible | ||
938 | */ | ||
939 | dma_halt(chan); | ||
940 | |||
941 | /* | ||
942 | * If there are some link descriptors which have not been | 928 | * If there are some link descriptors which have not been |
943 | * transferred, we need to start the controller | 929 | * transferred, we need to start the controller |
944 | */ | 930 | */ |
@@ -947,18 +933,32 @@ static void fsl_chan_xfer_ld_queue(struct fsldma_chan *chan) | |||
947 | * Move all elements from the queue of pending transactions | 933 | * Move all elements from the queue of pending transactions |
948 | * onto the list of running transactions | 934 | * onto the list of running transactions |
949 | */ | 935 | */ |
936 | chan_dbg(chan, "idle, starting controller\n"); | ||
950 | desc = list_first_entry(&chan->ld_pending, struct fsl_desc_sw, node); | 937 | desc = list_first_entry(&chan->ld_pending, struct fsl_desc_sw, node); |
951 | list_splice_tail_init(&chan->ld_pending, &chan->ld_running); | 938 | list_splice_tail_init(&chan->ld_pending, &chan->ld_running); |
952 | 939 | ||
953 | /* | 940 | /* |
941 | * The 85xx DMA controller doesn't clear the channel start bit | ||
942 | * automatically at the end of a transfer. Therefore we must clear | ||
943 | * it in software before starting the transfer. | ||
944 | */ | ||
945 | if ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) { | ||
946 | u32 mode; | ||
947 | |||
948 | mode = DMA_IN(chan, &chan->regs->mr, 32); | ||
949 | mode &= ~FSL_DMA_MR_CS; | ||
950 | DMA_OUT(chan, &chan->regs->mr, mode, 32); | ||
951 | } | ||
952 | |||
953 | /* | ||
954 | * Program the descriptor's address into the DMA controller, | 954 | * Program the descriptor's address into the DMA controller, |
955 | * then start the DMA transaction | 955 | * then start the DMA transaction |
956 | */ | 956 | */ |
957 | set_cdar(chan, desc->async_tx.phys); | 957 | set_cdar(chan, desc->async_tx.phys); |
958 | dma_start(chan); | 958 | get_cdar(chan); |
959 | 959 | ||
960 | out_unlock: | 960 | dma_start(chan); |
961 | spin_unlock_irqrestore(&chan->desc_lock, flags); | 961 | chan->idle = false; |
962 | } | 962 | } |
963 | 963 | ||
964 | /** | 964 | /** |
@@ -968,7 +968,11 @@ out_unlock: | |||
968 | static void fsl_dma_memcpy_issue_pending(struct dma_chan *dchan) | 968 | static void fsl_dma_memcpy_issue_pending(struct dma_chan *dchan) |
969 | { | 969 | { |
970 | struct fsldma_chan *chan = to_fsl_chan(dchan); | 970 | struct fsldma_chan *chan = to_fsl_chan(dchan); |
971 | unsigned long flags; | ||
972 | |||
973 | spin_lock_irqsave(&chan->desc_lock, flags); | ||
971 | fsl_chan_xfer_ld_queue(chan); | 974 | fsl_chan_xfer_ld_queue(chan); |
975 | spin_unlock_irqrestore(&chan->desc_lock, flags); | ||
972 | } | 976 | } |
973 | 977 | ||
974 | /** | 978 | /** |
@@ -980,16 +984,18 @@ static enum dma_status fsl_tx_status(struct dma_chan *dchan, | |||
980 | struct dma_tx_state *txstate) | 984 | struct dma_tx_state *txstate) |
981 | { | 985 | { |
982 | struct fsldma_chan *chan = to_fsl_chan(dchan); | 986 | struct fsldma_chan *chan = to_fsl_chan(dchan); |
983 | dma_cookie_t last_used; | ||
984 | dma_cookie_t last_complete; | 987 | dma_cookie_t last_complete; |
988 | dma_cookie_t last_used; | ||
989 | unsigned long flags; | ||
985 | 990 | ||
986 | fsl_chan_ld_cleanup(chan); | 991 | spin_lock_irqsave(&chan->desc_lock, flags); |
987 | 992 | ||
988 | last_used = dchan->cookie; | ||
989 | last_complete = chan->completed_cookie; | 993 | last_complete = chan->completed_cookie; |
994 | last_used = dchan->cookie; | ||
990 | 995 | ||
991 | dma_set_tx_state(txstate, last_complete, last_used, 0); | 996 | spin_unlock_irqrestore(&chan->desc_lock, flags); |
992 | 997 | ||
998 | dma_set_tx_state(txstate, last_complete, last_used, 0); | ||
993 | return dma_async_is_complete(cookie, last_complete, last_used); | 999 | return dma_async_is_complete(cookie, last_complete, last_used); |
994 | } | 1000 | } |
995 | 1001 | ||
@@ -1000,21 +1006,20 @@ static enum dma_status fsl_tx_status(struct dma_chan *dchan, | |||
1000 | static irqreturn_t fsldma_chan_irq(int irq, void *data) | 1006 | static irqreturn_t fsldma_chan_irq(int irq, void *data) |
1001 | { | 1007 | { |
1002 | struct fsldma_chan *chan = data; | 1008 | struct fsldma_chan *chan = data; |
1003 | int update_cookie = 0; | ||
1004 | int xfer_ld_q = 0; | ||
1005 | u32 stat; | 1009 | u32 stat; |
1006 | 1010 | ||
1007 | /* save and clear the status register */ | 1011 | /* save and clear the status register */ |
1008 | stat = get_sr(chan); | 1012 | stat = get_sr(chan); |
1009 | set_sr(chan, stat); | 1013 | set_sr(chan, stat); |
1010 | dev_dbg(chan->dev, "irq: channel %d, stat = 0x%x\n", chan->id, stat); | 1014 | chan_dbg(chan, "irq: stat = 0x%x\n", stat); |
1011 | 1015 | ||
1016 | /* check that this was really our device */ | ||
1012 | stat &= ~(FSL_DMA_SR_CB | FSL_DMA_SR_CH); | 1017 | stat &= ~(FSL_DMA_SR_CB | FSL_DMA_SR_CH); |
1013 | if (!stat) | 1018 | if (!stat) |
1014 | return IRQ_NONE; | 1019 | return IRQ_NONE; |
1015 | 1020 | ||
1016 | if (stat & FSL_DMA_SR_TE) | 1021 | if (stat & FSL_DMA_SR_TE) |
1017 | dev_err(chan->dev, "Transfer Error!\n"); | 1022 | chan_err(chan, "Transfer Error!\n"); |
1018 | 1023 | ||
1019 | /* | 1024 | /* |
1020 | * Programming Error | 1025 | * Programming Error |
@@ -1022,29 +1027,10 @@ static irqreturn_t fsldma_chan_irq(int irq, void *data) | |||
1022 | * triger a PE interrupt. | 1027 | * triger a PE interrupt. |
1023 | */ | 1028 | */ |
1024 | if (stat & FSL_DMA_SR_PE) { | 1029 | if (stat & FSL_DMA_SR_PE) { |
1025 | dev_dbg(chan->dev, "irq: Programming Error INT\n"); | 1030 | chan_dbg(chan, "irq: Programming Error INT\n"); |
1026 | if (get_bcr(chan) == 0) { | ||
1027 | /* BCR register is 0, this is a DMA_INTERRUPT async_tx. | ||
1028 | * Now, update the completed cookie, and continue the | ||
1029 | * next uncompleted transfer. | ||
1030 | */ | ||
1031 | update_cookie = 1; | ||
1032 | xfer_ld_q = 1; | ||
1033 | } | ||
1034 | stat &= ~FSL_DMA_SR_PE; | 1031 | stat &= ~FSL_DMA_SR_PE; |
1035 | } | 1032 | if (get_bcr(chan) != 0) |
1036 | 1033 | chan_err(chan, "Programming Error!\n"); | |
1037 | /* | ||
1038 | * If the link descriptor segment transfer finishes, | ||
1039 | * we will recycle the used descriptor. | ||
1040 | */ | ||
1041 | if (stat & FSL_DMA_SR_EOSI) { | ||
1042 | dev_dbg(chan->dev, "irq: End-of-segments INT\n"); | ||
1043 | dev_dbg(chan->dev, "irq: clndar 0x%llx, nlndar 0x%llx\n", | ||
1044 | (unsigned long long)get_cdar(chan), | ||
1045 | (unsigned long long)get_ndar(chan)); | ||
1046 | stat &= ~FSL_DMA_SR_EOSI; | ||
1047 | update_cookie = 1; | ||
1048 | } | 1034 | } |
1049 | 1035 | ||
1050 | /* | 1036 | /* |
@@ -1052,10 +1038,8 @@ static irqreturn_t fsldma_chan_irq(int irq, void *data) | |||
1052 | * and start the next transfer if it exist. | 1038 | * and start the next transfer if it exist. |
1053 | */ | 1039 | */ |
1054 | if (stat & FSL_DMA_SR_EOCDI) { | 1040 | if (stat & FSL_DMA_SR_EOCDI) { |
1055 | dev_dbg(chan->dev, "irq: End-of-Chain link INT\n"); | 1041 | chan_dbg(chan, "irq: End-of-Chain link INT\n"); |
1056 | stat &= ~FSL_DMA_SR_EOCDI; | 1042 | stat &= ~FSL_DMA_SR_EOCDI; |
1057 | update_cookie = 1; | ||
1058 | xfer_ld_q = 1; | ||
1059 | } | 1043 | } |
1060 | 1044 | ||
1061 | /* | 1045 | /* |
@@ -1064,27 +1048,79 @@ static irqreturn_t fsldma_chan_irq(int irq, void *data) | |||
1064 | * prepare next transfer. | 1048 | * prepare next transfer. |
1065 | */ | 1049 | */ |
1066 | if (stat & FSL_DMA_SR_EOLNI) { | 1050 | if (stat & FSL_DMA_SR_EOLNI) { |
1067 | dev_dbg(chan->dev, "irq: End-of-link INT\n"); | 1051 | chan_dbg(chan, "irq: End-of-link INT\n"); |
1068 | stat &= ~FSL_DMA_SR_EOLNI; | 1052 | stat &= ~FSL_DMA_SR_EOLNI; |
1069 | xfer_ld_q = 1; | ||
1070 | } | 1053 | } |
1071 | 1054 | ||
1072 | if (update_cookie) | 1055 | /* check that the DMA controller is really idle */ |
1073 | fsl_dma_update_completed_cookie(chan); | 1056 | if (!dma_is_idle(chan)) |
1074 | if (xfer_ld_q) | 1057 | chan_err(chan, "irq: controller not idle!\n"); |
1075 | fsl_chan_xfer_ld_queue(chan); | 1058 | |
1059 | /* check that we handled all of the bits */ | ||
1076 | if (stat) | 1060 | if (stat) |
1077 | dev_dbg(chan->dev, "irq: unhandled sr 0x%02x\n", stat); | 1061 | chan_err(chan, "irq: unhandled sr 0x%08x\n", stat); |
1078 | 1062 | ||
1079 | dev_dbg(chan->dev, "irq: Exit\n"); | 1063 | /* |
1064 | * Schedule the tasklet to handle all cleanup of the current | ||
1065 | * transaction. It will start a new transaction if there is | ||
1066 | * one pending. | ||
1067 | */ | ||
1080 | tasklet_schedule(&chan->tasklet); | 1068 | tasklet_schedule(&chan->tasklet); |
1069 | chan_dbg(chan, "irq: Exit\n"); | ||
1081 | return IRQ_HANDLED; | 1070 | return IRQ_HANDLED; |
1082 | } | 1071 | } |
1083 | 1072 | ||
1084 | static void dma_do_tasklet(unsigned long data) | 1073 | static void dma_do_tasklet(unsigned long data) |
1085 | { | 1074 | { |
1086 | struct fsldma_chan *chan = (struct fsldma_chan *)data; | 1075 | struct fsldma_chan *chan = (struct fsldma_chan *)data; |
1087 | fsl_chan_ld_cleanup(chan); | 1076 | struct fsl_desc_sw *desc, *_desc; |
1077 | LIST_HEAD(ld_cleanup); | ||
1078 | unsigned long flags; | ||
1079 | |||
1080 | chan_dbg(chan, "tasklet entry\n"); | ||
1081 | |||
1082 | spin_lock_irqsave(&chan->desc_lock, flags); | ||
1083 | |||
1084 | /* update the cookie if we have some descriptors to cleanup */ | ||
1085 | if (!list_empty(&chan->ld_running)) { | ||
1086 | dma_cookie_t cookie; | ||
1087 | |||
1088 | desc = to_fsl_desc(chan->ld_running.prev); | ||
1089 | cookie = desc->async_tx.cookie; | ||
1090 | |||
1091 | chan->completed_cookie = cookie; | ||
1092 | chan_dbg(chan, "completed_cookie=%d\n", cookie); | ||
1093 | } | ||
1094 | |||
1095 | /* | ||
1096 | * move the descriptors to a temporary list so we can drop the lock | ||
1097 | * during the entire cleanup operation | ||
1098 | */ | ||
1099 | list_splice_tail_init(&chan->ld_running, &ld_cleanup); | ||
1100 | |||
1101 | /* the hardware is now idle and ready for more */ | ||
1102 | chan->idle = true; | ||
1103 | |||
1104 | /* | ||
1105 | * Start any pending transactions automatically | ||
1106 | * | ||
1107 | * In the ideal case, we keep the DMA controller busy while we go | ||
1108 | * ahead and free the descriptors below. | ||
1109 | */ | ||
1110 | fsl_chan_xfer_ld_queue(chan); | ||
1111 | spin_unlock_irqrestore(&chan->desc_lock, flags); | ||
1112 | |||
1113 | /* Run the callback for each descriptor, in order */ | ||
1114 | list_for_each_entry_safe(desc, _desc, &ld_cleanup, node) { | ||
1115 | |||
1116 | /* Remove from the list of transactions */ | ||
1117 | list_del(&desc->node); | ||
1118 | |||
1119 | /* Run all cleanup for this descriptor */ | ||
1120 | fsldma_cleanup_descriptor(chan, desc); | ||
1121 | } | ||
1122 | |||
1123 | chan_dbg(chan, "tasklet exit\n"); | ||
1088 | } | 1124 | } |
1089 | 1125 | ||
1090 | static irqreturn_t fsldma_ctrl_irq(int irq, void *data) | 1126 | static irqreturn_t fsldma_ctrl_irq(int irq, void *data) |
@@ -1132,7 +1168,7 @@ static void fsldma_free_irqs(struct fsldma_device *fdev) | |||
1132 | for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) { | 1168 | for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) { |
1133 | chan = fdev->chan[i]; | 1169 | chan = fdev->chan[i]; |
1134 | if (chan && chan->irq != NO_IRQ) { | 1170 | if (chan && chan->irq != NO_IRQ) { |
1135 | dev_dbg(fdev->dev, "free channel %d IRQ\n", chan->id); | 1171 | chan_dbg(chan, "free per-channel IRQ\n"); |
1136 | free_irq(chan->irq, chan); | 1172 | free_irq(chan->irq, chan); |
1137 | } | 1173 | } |
1138 | } | 1174 | } |
@@ -1159,19 +1195,16 @@ static int fsldma_request_irqs(struct fsldma_device *fdev) | |||
1159 | continue; | 1195 | continue; |
1160 | 1196 | ||
1161 | if (chan->irq == NO_IRQ) { | 1197 | if (chan->irq == NO_IRQ) { |
1162 | dev_err(fdev->dev, "no interrupts property defined for " | 1198 | chan_err(chan, "interrupts property missing in device tree\n"); |
1163 | "DMA channel %d. Please fix your " | ||
1164 | "device tree\n", chan->id); | ||
1165 | ret = -ENODEV; | 1199 | ret = -ENODEV; |
1166 | goto out_unwind; | 1200 | goto out_unwind; |
1167 | } | 1201 | } |
1168 | 1202 | ||
1169 | dev_dbg(fdev->dev, "request channel %d IRQ\n", chan->id); | 1203 | chan_dbg(chan, "request per-channel IRQ\n"); |
1170 | ret = request_irq(chan->irq, fsldma_chan_irq, IRQF_SHARED, | 1204 | ret = request_irq(chan->irq, fsldma_chan_irq, IRQF_SHARED, |
1171 | "fsldma-chan", chan); | 1205 | "fsldma-chan", chan); |
1172 | if (ret) { | 1206 | if (ret) { |
1173 | dev_err(fdev->dev, "unable to request IRQ for DMA " | 1207 | chan_err(chan, "unable to request per-channel IRQ\n"); |
1174 | "channel %d\n", chan->id); | ||
1175 | goto out_unwind; | 1208 | goto out_unwind; |
1176 | } | 1209 | } |
1177 | } | 1210 | } |
@@ -1246,6 +1279,7 @@ static int __devinit fsl_dma_chan_probe(struct fsldma_device *fdev, | |||
1246 | 1279 | ||
1247 | fdev->chan[chan->id] = chan; | 1280 | fdev->chan[chan->id] = chan; |
1248 | tasklet_init(&chan->tasklet, dma_do_tasklet, (unsigned long)chan); | 1281 | tasklet_init(&chan->tasklet, dma_do_tasklet, (unsigned long)chan); |
1282 | snprintf(chan->name, sizeof(chan->name), "chan%d", chan->id); | ||
1249 | 1283 | ||
1250 | /* Initialize the channel */ | 1284 | /* Initialize the channel */ |
1251 | dma_init(chan); | 1285 | dma_init(chan); |
@@ -1266,6 +1300,7 @@ static int __devinit fsl_dma_chan_probe(struct fsldma_device *fdev, | |||
1266 | spin_lock_init(&chan->desc_lock); | 1300 | spin_lock_init(&chan->desc_lock); |
1267 | INIT_LIST_HEAD(&chan->ld_pending); | 1301 | INIT_LIST_HEAD(&chan->ld_pending); |
1268 | INIT_LIST_HEAD(&chan->ld_running); | 1302 | INIT_LIST_HEAD(&chan->ld_running); |
1303 | chan->idle = true; | ||
1269 | 1304 | ||
1270 | chan->common.device = &fdev->common; | 1305 | chan->common.device = &fdev->common; |
1271 | 1306 | ||
@@ -1297,8 +1332,7 @@ static void fsl_dma_chan_remove(struct fsldma_chan *chan) | |||
1297 | kfree(chan); | 1332 | kfree(chan); |
1298 | } | 1333 | } |
1299 | 1334 | ||
1300 | static int __devinit fsldma_of_probe(struct platform_device *op, | 1335 | static int __devinit fsldma_of_probe(struct platform_device *op) |
1301 | const struct of_device_id *match) | ||
1302 | { | 1336 | { |
1303 | struct fsldma_device *fdev; | 1337 | struct fsldma_device *fdev; |
1304 | struct device_node *child; | 1338 | struct device_node *child; |
@@ -1327,17 +1361,21 @@ static int __devinit fsldma_of_probe(struct platform_device *op, | |||
1327 | 1361 | ||
1328 | dma_cap_set(DMA_MEMCPY, fdev->common.cap_mask); | 1362 | dma_cap_set(DMA_MEMCPY, fdev->common.cap_mask); |
1329 | dma_cap_set(DMA_INTERRUPT, fdev->common.cap_mask); | 1363 | dma_cap_set(DMA_INTERRUPT, fdev->common.cap_mask); |
1364 | dma_cap_set(DMA_SG, fdev->common.cap_mask); | ||
1330 | dma_cap_set(DMA_SLAVE, fdev->common.cap_mask); | 1365 | dma_cap_set(DMA_SLAVE, fdev->common.cap_mask); |
1331 | fdev->common.device_alloc_chan_resources = fsl_dma_alloc_chan_resources; | 1366 | fdev->common.device_alloc_chan_resources = fsl_dma_alloc_chan_resources; |
1332 | fdev->common.device_free_chan_resources = fsl_dma_free_chan_resources; | 1367 | fdev->common.device_free_chan_resources = fsl_dma_free_chan_resources; |
1333 | fdev->common.device_prep_dma_interrupt = fsl_dma_prep_interrupt; | 1368 | fdev->common.device_prep_dma_interrupt = fsl_dma_prep_interrupt; |
1334 | fdev->common.device_prep_dma_memcpy = fsl_dma_prep_memcpy; | 1369 | fdev->common.device_prep_dma_memcpy = fsl_dma_prep_memcpy; |
1370 | fdev->common.device_prep_dma_sg = fsl_dma_prep_sg; | ||
1335 | fdev->common.device_tx_status = fsl_tx_status; | 1371 | fdev->common.device_tx_status = fsl_tx_status; |
1336 | fdev->common.device_issue_pending = fsl_dma_memcpy_issue_pending; | 1372 | fdev->common.device_issue_pending = fsl_dma_memcpy_issue_pending; |
1337 | fdev->common.device_prep_slave_sg = fsl_dma_prep_slave_sg; | 1373 | fdev->common.device_prep_slave_sg = fsl_dma_prep_slave_sg; |
1338 | fdev->common.device_control = fsl_dma_device_control; | 1374 | fdev->common.device_control = fsl_dma_device_control; |
1339 | fdev->common.dev = &op->dev; | 1375 | fdev->common.dev = &op->dev; |
1340 | 1376 | ||
1377 | dma_set_mask(&(op->dev), DMA_BIT_MASK(36)); | ||
1378 | |||
1341 | dev_set_drvdata(&op->dev, fdev); | 1379 | dev_set_drvdata(&op->dev, fdev); |
1342 | 1380 | ||
1343 | /* | 1381 | /* |
@@ -1410,7 +1448,7 @@ static const struct of_device_id fsldma_of_ids[] = { | |||
1410 | {} | 1448 | {} |
1411 | }; | 1449 | }; |
1412 | 1450 | ||
1413 | static struct of_platform_driver fsldma_of_driver = { | 1451 | static struct platform_driver fsldma_of_driver = { |
1414 | .driver = { | 1452 | .driver = { |
1415 | .name = "fsl-elo-dma", | 1453 | .name = "fsl-elo-dma", |
1416 | .owner = THIS_MODULE, | 1454 | .owner = THIS_MODULE, |
@@ -1426,20 +1464,13 @@ static struct of_platform_driver fsldma_of_driver = { | |||
1426 | 1464 | ||
1427 | static __init int fsldma_init(void) | 1465 | static __init int fsldma_init(void) |
1428 | { | 1466 | { |
1429 | int ret; | ||
1430 | |||
1431 | pr_info("Freescale Elo / Elo Plus DMA driver\n"); | 1467 | pr_info("Freescale Elo / Elo Plus DMA driver\n"); |
1432 | 1468 | return platform_driver_register(&fsldma_of_driver); | |
1433 | ret = of_register_platform_driver(&fsldma_of_driver); | ||
1434 | if (ret) | ||
1435 | pr_err("fsldma: failed to register platform driver\n"); | ||
1436 | |||
1437 | return ret; | ||
1438 | } | 1469 | } |
1439 | 1470 | ||
1440 | static void __exit fsldma_exit(void) | 1471 | static void __exit fsldma_exit(void) |
1441 | { | 1472 | { |
1442 | of_unregister_platform_driver(&fsldma_of_driver); | 1473 | platform_driver_unregister(&fsldma_of_driver); |
1443 | } | 1474 | } |
1444 | 1475 | ||
1445 | subsys_initcall(fsldma_init); | 1476 | subsys_initcall(fsldma_init); |