diff options
author | Sudhakar Rajashekhara <sudhakar.raj@ti.com> | 2009-05-21 07:41:35 -0400 |
---|---|---|
committer | Kevin Hilman <khilman@deeprootsystems.com> | 2009-08-26 03:56:56 -0400 |
commit | 60902a2cb12c3c1682ee7a04ad7448ec16dc0c29 (patch) | |
tree | ba754bff7fadd7106dc9f8549136a514177d0fd1 /arch/arm/mach-davinci/dma.c | |
parent | 4c5adde7943b982d22a7bf711654fbb5cb810667 (diff) |
davinci: EDMA: multiple CCs, channel mapping and API changes
- restructure to support multiple channel controllers by using
additional struct resources for each CC
- interface changes visible to EDMA clients
Introduce macros to build IDs from controller and channel number,
and to extract them. Modify the edma_alloc_slot function to take an
extra argument for the controller.
Also update ASoC drivers to use API. ASoC changes
Acked-by: Mark Brown <broonie@opensource.wolfsonmicro.com>
- Move queue related mappings to dm<soc>.c
EDMA in DM355 and DM644x has two transfer controllers while DM646x
has four transfer controllers. Moving the queue to tc mapping and
queue priority mapping to dm<soc>.c will be helpful to probe these
mappings from platform device so that the machine_is_* testing will
be avoided.
- add channel mapping logic
Channel mapping logic is introduced in dm646x EDMA. This implies
that there is no fixed association for a channel number to a
parameter entry number. In other words, using the DMA channel
mapping registers (DCHMAPn), a PaRAM entry can be mapped to any
channel. While in the case of dm644x and dm355 there is a fixed
mapping between the EDMA channel and Param entry number.
Signed-off-by: Naresh Medisetty <naresh@ti.com>
Signed-off-by: Sudhakar Rajashekhara <sudhakar.raj@ti.com>
Reviewed-by: David Brownell <dbrownell@users.sourceforge.net>
Signed-off-by: Kevin Hilman <khilman@deeprootsystems.com>
Diffstat (limited to 'arch/arm/mach-davinci/dma.c')
-rw-r--r-- | arch/arm/mach-davinci/dma.c | 811 |
1 files changed, 522 insertions, 289 deletions
diff --git a/arch/arm/mach-davinci/dma.c b/arch/arm/mach-davinci/dma.c index 15e9eb158bb7..5908f7717b29 100644 --- a/arch/arm/mach-davinci/dma.c +++ b/arch/arm/mach-davinci/dma.c | |||
@@ -100,132 +100,157 @@ | |||
100 | #define EDMA_SHADOW0 0x2000 /* 4 regions shadowing global channels */ | 100 | #define EDMA_SHADOW0 0x2000 /* 4 regions shadowing global channels */ |
101 | #define EDMA_PARM 0x4000 /* 128 param entries */ | 101 | #define EDMA_PARM 0x4000 /* 128 param entries */ |
102 | 102 | ||
103 | #define DAVINCI_DMA_3PCC_BASE 0x01C00000 | ||
104 | |||
105 | #define PARM_OFFSET(param_no) (EDMA_PARM + ((param_no) << 5)) | 103 | #define PARM_OFFSET(param_no) (EDMA_PARM + ((param_no) << 5)) |
106 | 104 | ||
105 | #define EDMA_DCHMAP 0x0100 /* 64 registers */ | ||
106 | #define CHMAP_EXIST BIT(24) | ||
107 | |||
107 | #define EDMA_MAX_DMACH 64 | 108 | #define EDMA_MAX_DMACH 64 |
108 | #define EDMA_MAX_PARAMENTRY 512 | 109 | #define EDMA_MAX_PARAMENTRY 512 |
109 | #define EDMA_MAX_EVQUE 2 /* FIXME too small */ | 110 | #define EDMA_MAX_CC 2 |
110 | 111 | ||
111 | 112 | ||
112 | /*****************************************************************************/ | 113 | /*****************************************************************************/ |
113 | 114 | ||
114 | static void __iomem *edmacc_regs_base; | 115 | static void __iomem *edmacc_regs_base[EDMA_MAX_CC]; |
115 | 116 | ||
116 | static inline unsigned int edma_read(int offset) | 117 | static inline unsigned int edma_read(unsigned ctlr, int offset) |
117 | { | 118 | { |
118 | return (unsigned int)__raw_readl(edmacc_regs_base + offset); | 119 | return (unsigned int)__raw_readl(edmacc_regs_base[ctlr] + offset); |
119 | } | 120 | } |
120 | 121 | ||
121 | static inline void edma_write(int offset, int val) | 122 | static inline void edma_write(unsigned ctlr, int offset, int val) |
122 | { | 123 | { |
123 | __raw_writel(val, edmacc_regs_base + offset); | 124 | __raw_writel(val, edmacc_regs_base[ctlr] + offset); |
124 | } | 125 | } |
125 | static inline void edma_modify(int offset, unsigned and, unsigned or) | 126 | static inline void edma_modify(unsigned ctlr, int offset, unsigned and, |
127 | unsigned or) | ||
126 | { | 128 | { |
127 | unsigned val = edma_read(offset); | 129 | unsigned val = edma_read(ctlr, offset); |
128 | val &= and; | 130 | val &= and; |
129 | val |= or; | 131 | val |= or; |
130 | edma_write(offset, val); | 132 | edma_write(ctlr, offset, val); |
131 | } | 133 | } |
132 | static inline void edma_and(int offset, unsigned and) | 134 | static inline void edma_and(unsigned ctlr, int offset, unsigned and) |
133 | { | 135 | { |
134 | unsigned val = edma_read(offset); | 136 | unsigned val = edma_read(ctlr, offset); |
135 | val &= and; | 137 | val &= and; |
136 | edma_write(offset, val); | 138 | edma_write(ctlr, offset, val); |
137 | } | 139 | } |
138 | static inline void edma_or(int offset, unsigned or) | 140 | static inline void edma_or(unsigned ctlr, int offset, unsigned or) |
139 | { | 141 | { |
140 | unsigned val = edma_read(offset); | 142 | unsigned val = edma_read(ctlr, offset); |
141 | val |= or; | 143 | val |= or; |
142 | edma_write(offset, val); | 144 | edma_write(ctlr, offset, val); |
143 | } | 145 | } |
144 | static inline unsigned int edma_read_array(int offset, int i) | 146 | static inline unsigned int edma_read_array(unsigned ctlr, int offset, int i) |
145 | { | 147 | { |
146 | return edma_read(offset + (i << 2)); | 148 | return edma_read(ctlr, offset + (i << 2)); |
147 | } | 149 | } |
148 | static inline void edma_write_array(int offset, int i, unsigned val) | 150 | static inline void edma_write_array(unsigned ctlr, int offset, int i, |
151 | unsigned val) | ||
149 | { | 152 | { |
150 | edma_write(offset + (i << 2), val); | 153 | edma_write(ctlr, offset + (i << 2), val); |
151 | } | 154 | } |
152 | static inline void edma_modify_array(int offset, int i, | 155 | static inline void edma_modify_array(unsigned ctlr, int offset, int i, |
153 | unsigned and, unsigned or) | 156 | unsigned and, unsigned or) |
154 | { | 157 | { |
155 | edma_modify(offset + (i << 2), and, or); | 158 | edma_modify(ctlr, offset + (i << 2), and, or); |
156 | } | 159 | } |
157 | static inline void edma_or_array(int offset, int i, unsigned or) | 160 | static inline void edma_or_array(unsigned ctlr, int offset, int i, unsigned or) |
158 | { | 161 | { |
159 | edma_or(offset + (i << 2), or); | 162 | edma_or(ctlr, offset + (i << 2), or); |
160 | } | 163 | } |
161 | static inline void edma_or_array2(int offset, int i, int j, unsigned or) | 164 | static inline void edma_or_array2(unsigned ctlr, int offset, int i, int j, |
165 | unsigned or) | ||
162 | { | 166 | { |
163 | edma_or(offset + ((i*2 + j) << 2), or); | 167 | edma_or(ctlr, offset + ((i*2 + j) << 2), or); |
164 | } | 168 | } |
165 | static inline void edma_write_array2(int offset, int i, int j, unsigned val) | 169 | static inline void edma_write_array2(unsigned ctlr, int offset, int i, int j, |
170 | unsigned val) | ||
166 | { | 171 | { |
167 | edma_write(offset + ((i*2 + j) << 2), val); | 172 | edma_write(ctlr, offset + ((i*2 + j) << 2), val); |
168 | } | 173 | } |
169 | static inline unsigned int edma_shadow0_read(int offset) | 174 | static inline unsigned int edma_shadow0_read(unsigned ctlr, int offset) |
170 | { | 175 | { |
171 | return edma_read(EDMA_SHADOW0 + offset); | 176 | return edma_read(ctlr, EDMA_SHADOW0 + offset); |
172 | } | 177 | } |
173 | static inline unsigned int edma_shadow0_read_array(int offset, int i) | 178 | static inline unsigned int edma_shadow0_read_array(unsigned ctlr, int offset, |
179 | int i) | ||
174 | { | 180 | { |
175 | return edma_read(EDMA_SHADOW0 + offset + (i << 2)); | 181 | return edma_read(ctlr, EDMA_SHADOW0 + offset + (i << 2)); |
176 | } | 182 | } |
177 | static inline void edma_shadow0_write(int offset, unsigned val) | 183 | static inline void edma_shadow0_write(unsigned ctlr, int offset, unsigned val) |
178 | { | 184 | { |
179 | edma_write(EDMA_SHADOW0 + offset, val); | 185 | edma_write(ctlr, EDMA_SHADOW0 + offset, val); |
180 | } | 186 | } |
181 | static inline void edma_shadow0_write_array(int offset, int i, unsigned val) | 187 | static inline void edma_shadow0_write_array(unsigned ctlr, int offset, int i, |
188 | unsigned val) | ||
182 | { | 189 | { |
183 | edma_write(EDMA_SHADOW0 + offset + (i << 2), val); | 190 | edma_write(ctlr, EDMA_SHADOW0 + offset + (i << 2), val); |
184 | } | 191 | } |
185 | static inline unsigned int edma_parm_read(int offset, int param_no) | 192 | static inline unsigned int edma_parm_read(unsigned ctlr, int offset, |
193 | int param_no) | ||
186 | { | 194 | { |
187 | return edma_read(EDMA_PARM + offset + (param_no << 5)); | 195 | return edma_read(ctlr, EDMA_PARM + offset + (param_no << 5)); |
188 | } | 196 | } |
189 | static inline void edma_parm_write(int offset, int param_no, unsigned val) | 197 | static inline void edma_parm_write(unsigned ctlr, int offset, int param_no, |
198 | unsigned val) | ||
190 | { | 199 | { |
191 | edma_write(EDMA_PARM + offset + (param_no << 5), val); | 200 | edma_write(ctlr, EDMA_PARM + offset + (param_no << 5), val); |
192 | } | 201 | } |
193 | static inline void edma_parm_modify(int offset, int param_no, | 202 | static inline void edma_parm_modify(unsigned ctlr, int offset, int param_no, |
194 | unsigned and, unsigned or) | 203 | unsigned and, unsigned or) |
195 | { | 204 | { |
196 | edma_modify(EDMA_PARM + offset + (param_no << 5), and, or); | 205 | edma_modify(ctlr, EDMA_PARM + offset + (param_no << 5), and, or); |
197 | } | 206 | } |
198 | static inline void edma_parm_and(int offset, int param_no, unsigned and) | 207 | static inline void edma_parm_and(unsigned ctlr, int offset, int param_no, |
208 | unsigned and) | ||
199 | { | 209 | { |
200 | edma_and(EDMA_PARM + offset + (param_no << 5), and); | 210 | edma_and(ctlr, EDMA_PARM + offset + (param_no << 5), and); |
201 | } | 211 | } |
202 | static inline void edma_parm_or(int offset, int param_no, unsigned or) | 212 | static inline void edma_parm_or(unsigned ctlr, int offset, int param_no, |
213 | unsigned or) | ||
203 | { | 214 | { |
204 | edma_or(EDMA_PARM + offset + (param_no << 5), or); | 215 | edma_or(ctlr, EDMA_PARM + offset + (param_no << 5), or); |
205 | } | 216 | } |
206 | 217 | ||
207 | /*****************************************************************************/ | 218 | /*****************************************************************************/ |
208 | 219 | ||
209 | /* actual number of DMA channels and slots on this silicon */ | 220 | /* actual number of DMA channels and slots on this silicon */ |
210 | static unsigned num_channels; | 221 | struct edma { |
211 | static unsigned num_slots; | 222 | /* how many dma resources of each type */ |
223 | unsigned num_channels; | ||
224 | unsigned num_region; | ||
225 | unsigned num_slots; | ||
226 | unsigned num_tc; | ||
227 | unsigned num_cc; | ||
228 | |||
229 | /* list of channels with no even trigger; terminated by "-1" */ | ||
230 | const s8 *noevent; | ||
231 | |||
232 | /* The edma_inuse bit for each PaRAM slot is clear unless the | ||
233 | * channel is in use ... by ARM or DSP, for QDMA, or whatever. | ||
234 | */ | ||
235 | DECLARE_BITMAP(edma_inuse, EDMA_MAX_PARAMENTRY); | ||
212 | 236 | ||
213 | static struct dma_interrupt_data { | 237 | /* The edma_noevent bit for each channel is clear unless |
214 | void (*callback)(unsigned channel, unsigned short ch_status, | 238 | * it doesn't trigger DMA events on this platform. It uses a |
215 | void *data); | 239 | * bit of SOC-specific initialization code. |
216 | void *data; | 240 | */ |
217 | } intr_data[EDMA_MAX_DMACH]; | 241 | DECLARE_BITMAP(edma_noevent, EDMA_MAX_DMACH); |
218 | 242 | ||
219 | /* The edma_inuse bit for each PaRAM slot is clear unless the | 243 | unsigned irq_res_start; |
220 | * channel is in use ... by ARM or DSP, for QDMA, or whatever. | 244 | unsigned irq_res_end; |
221 | */ | ||
222 | static DECLARE_BITMAP(edma_inuse, EDMA_MAX_PARAMENTRY); | ||
223 | 245 | ||
224 | /* The edma_noevent bit for each channel is clear unless | 246 | struct dma_interrupt_data { |
225 | * it doesn't trigger DMA events on this platform. It uses a | 247 | void (*callback)(unsigned channel, unsigned short ch_status, |
226 | * bit of SOC-specific initialization code. | 248 | void *data); |
227 | */ | 249 | void *data; |
228 | static DECLARE_BITMAP(edma_noevent, EDMA_MAX_DMACH); | 250 | } intr_data[EDMA_MAX_DMACH]; |
251 | }; | ||
252 | |||
253 | static struct edma *edma_info[EDMA_MAX_CC]; | ||
229 | 254 | ||
230 | /* dummy param set used to (re)initialize parameter RAM slots */ | 255 | /* dummy param set used to (re)initialize parameter RAM slots */ |
231 | static const struct edmacc_param dummy_paramset = { | 256 | static const struct edmacc_param dummy_paramset = { |
@@ -233,25 +258,10 @@ static const struct edmacc_param dummy_paramset = { | |||
233 | .ccnt = 1, | 258 | .ccnt = 1, |
234 | }; | 259 | }; |
235 | 260 | ||
236 | static const int __initconst | ||
237 | queue_tc_mapping[EDMA_MAX_EVQUE + 1][2] = { | ||
238 | /* {event queue no, TC no} */ | ||
239 | {0, 0}, | ||
240 | {1, 1}, | ||
241 | {-1, -1} | ||
242 | }; | ||
243 | |||
244 | static const int __initconst | ||
245 | queue_priority_mapping[EDMA_MAX_EVQUE + 1][2] = { | ||
246 | /* {event queue no, Priority} */ | ||
247 | {0, 3}, | ||
248 | {1, 7}, | ||
249 | {-1, -1} | ||
250 | }; | ||
251 | |||
252 | /*****************************************************************************/ | 261 | /*****************************************************************************/ |
253 | 262 | ||
254 | static void map_dmach_queue(unsigned ch_no, enum dma_event_q queue_no) | 263 | static void map_dmach_queue(unsigned ctlr, unsigned ch_no, |
264 | enum dma_event_q queue_no) | ||
255 | { | 265 | { |
256 | int bit = (ch_no & 0x7) * 4; | 266 | int bit = (ch_no & 0x7) * 4; |
257 | 267 | ||
@@ -260,20 +270,40 @@ static void map_dmach_queue(unsigned ch_no, enum dma_event_q queue_no) | |||
260 | queue_no = EVENTQ_1; | 270 | queue_no = EVENTQ_1; |
261 | 271 | ||
262 | queue_no &= 7; | 272 | queue_no &= 7; |
263 | edma_modify_array(EDMA_DMAQNUM, (ch_no >> 3), | 273 | edma_modify_array(ctlr, EDMA_DMAQNUM, (ch_no >> 3), |
264 | ~(0x7 << bit), queue_no << bit); | 274 | ~(0x7 << bit), queue_no << bit); |
265 | } | 275 | } |
266 | 276 | ||
267 | static void __init map_queue_tc(int queue_no, int tc_no) | 277 | static void __init map_queue_tc(unsigned ctlr, int queue_no, int tc_no) |
268 | { | 278 | { |
269 | int bit = queue_no * 4; | 279 | int bit = queue_no * 4; |
270 | edma_modify(EDMA_QUETCMAP, ~(0x7 << bit), ((tc_no & 0x7) << bit)); | 280 | edma_modify(ctlr, EDMA_QUETCMAP, ~(0x7 << bit), ((tc_no & 0x7) << bit)); |
271 | } | 281 | } |
272 | 282 | ||
273 | static void __init assign_priority_to_queue(int queue_no, int priority) | 283 | static void __init assign_priority_to_queue(unsigned ctlr, int queue_no, |
284 | int priority) | ||
274 | { | 285 | { |
275 | int bit = queue_no * 4; | 286 | int bit = queue_no * 4; |
276 | edma_modify(EDMA_QUEPRI, ~(0x7 << bit), ((priority & 0x7) << bit)); | 287 | edma_modify(ctlr, EDMA_QUEPRI, ~(0x7 << bit), |
288 | ((priority & 0x7) << bit)); | ||
289 | } | ||
290 | |||
291 | /** | ||
292 | * map_dmach_param - Maps channel number to param entry number | ||
293 | * | ||
294 | * This maps the dma channel number to param entry numberter. In | ||
295 | * other words using the DMA channel mapping registers a param entry | ||
296 | * can be mapped to any channel | ||
297 | * | ||
298 | * Callers are responsible for ensuring the channel mapping logic is | ||
299 | * included in that particular EDMA variant (Eg : dm646x) | ||
300 | * | ||
301 | */ | ||
302 | static void __init map_dmach_param(unsigned ctlr) | ||
303 | { | ||
304 | int i; | ||
305 | for (i = 0; i < EDMA_MAX_DMACH; i++) | ||
306 | edma_write_array(ctlr, EDMA_DCHMAP , i , (i << 5)); | ||
277 | } | 307 | } |
278 | 308 | ||
279 | static inline void | 309 | static inline void |
@@ -281,22 +311,39 @@ setup_dma_interrupt(unsigned lch, | |||
281 | void (*callback)(unsigned channel, u16 ch_status, void *data), | 311 | void (*callback)(unsigned channel, u16 ch_status, void *data), |
282 | void *data) | 312 | void *data) |
283 | { | 313 | { |
314 | unsigned ctlr; | ||
315 | |||
316 | ctlr = EDMA_CTLR(lch); | ||
317 | lch = EDMA_CHAN_SLOT(lch); | ||
318 | |||
284 | if (!callback) { | 319 | if (!callback) { |
285 | edma_shadow0_write_array(SH_IECR, lch >> 5, | 320 | edma_shadow0_write_array(ctlr, SH_IECR, lch >> 5, |
286 | (1 << (lch & 0x1f))); | 321 | (1 << (lch & 0x1f))); |
287 | } | 322 | } |
288 | 323 | ||
289 | intr_data[lch].callback = callback; | 324 | edma_info[ctlr]->intr_data[lch].callback = callback; |
290 | intr_data[lch].data = data; | 325 | edma_info[ctlr]->intr_data[lch].data = data; |
291 | 326 | ||
292 | if (callback) { | 327 | if (callback) { |
293 | edma_shadow0_write_array(SH_ICR, lch >> 5, | 328 | edma_shadow0_write_array(ctlr, SH_ICR, lch >> 5, |
294 | (1 << (lch & 0x1f))); | 329 | (1 << (lch & 0x1f))); |
295 | edma_shadow0_write_array(SH_IESR, lch >> 5, | 330 | edma_shadow0_write_array(ctlr, SH_IESR, lch >> 5, |
296 | (1 << (lch & 0x1f))); | 331 | (1 << (lch & 0x1f))); |
297 | } | 332 | } |
298 | } | 333 | } |
299 | 334 | ||
335 | static int irq2ctlr(int irq) | ||
336 | { | ||
337 | if (irq >= edma_info[0]->irq_res_start && | ||
338 | irq <= edma_info[0]->irq_res_end) | ||
339 | return 0; | ||
340 | else if (irq >= edma_info[1]->irq_res_start && | ||
341 | irq <= edma_info[1]->irq_res_end) | ||
342 | return 1; | ||
343 | |||
344 | return -1; | ||
345 | } | ||
346 | |||
300 | /****************************************************************************** | 347 | /****************************************************************************** |
301 | * | 348 | * |
302 | * DMA interrupt handler | 349 | * DMA interrupt handler |
@@ -305,32 +352,39 @@ setup_dma_interrupt(unsigned lch, | |||
305 | static irqreturn_t dma_irq_handler(int irq, void *data) | 352 | static irqreturn_t dma_irq_handler(int irq, void *data) |
306 | { | 353 | { |
307 | int i; | 354 | int i; |
355 | unsigned ctlr; | ||
308 | unsigned int cnt = 0; | 356 | unsigned int cnt = 0; |
309 | 357 | ||
358 | ctlr = irq2ctlr(irq); | ||
359 | |||
310 | dev_dbg(data, "dma_irq_handler\n"); | 360 | dev_dbg(data, "dma_irq_handler\n"); |
311 | 361 | ||
312 | if ((edma_shadow0_read_array(SH_IPR, 0) == 0) | 362 | if ((edma_shadow0_read_array(ctlr, SH_IPR, 0) == 0) |
313 | && (edma_shadow0_read_array(SH_IPR, 1) == 0)) | 363 | && (edma_shadow0_read_array(ctlr, SH_IPR, 1) == 0)) |
314 | return IRQ_NONE; | 364 | return IRQ_NONE; |
315 | 365 | ||
316 | while (1) { | 366 | while (1) { |
317 | int j; | 367 | int j; |
318 | if (edma_shadow0_read_array(SH_IPR, 0)) | 368 | if (edma_shadow0_read_array(ctlr, SH_IPR, 0)) |
319 | j = 0; | 369 | j = 0; |
320 | else if (edma_shadow0_read_array(SH_IPR, 1)) | 370 | else if (edma_shadow0_read_array(ctlr, SH_IPR, 1)) |
321 | j = 1; | 371 | j = 1; |
322 | else | 372 | else |
323 | break; | 373 | break; |
324 | dev_dbg(data, "IPR%d %08x\n", j, | 374 | dev_dbg(data, "IPR%d %08x\n", j, |
325 | edma_shadow0_read_array(SH_IPR, j)); | 375 | edma_shadow0_read_array(ctlr, SH_IPR, j)); |
326 | for (i = 0; i < 32; i++) { | 376 | for (i = 0; i < 32; i++) { |
327 | int k = (j << 5) + i; | 377 | int k = (j << 5) + i; |
328 | if (edma_shadow0_read_array(SH_IPR, j) & (1 << i)) { | 378 | if (edma_shadow0_read_array(ctlr, SH_IPR, j) & |
379 | (1 << i)) { | ||
329 | /* Clear the corresponding IPR bits */ | 380 | /* Clear the corresponding IPR bits */ |
330 | edma_shadow0_write_array(SH_ICR, j, (1 << i)); | 381 | edma_shadow0_write_array(ctlr, SH_ICR, j, |
331 | if (intr_data[k].callback) { | 382 | (1 << i)); |
332 | intr_data[k].callback(k, DMA_COMPLETE, | 383 | if (edma_info[ctlr]->intr_data[k].callback) { |
333 | intr_data[k].data); | 384 | edma_info[ctlr]->intr_data[k].callback( |
385 | k, DMA_COMPLETE, | ||
386 | edma_info[ctlr]->intr_data[k]. | ||
387 | data); | ||
334 | } | 388 | } |
335 | } | 389 | } |
336 | } | 390 | } |
@@ -338,7 +392,7 @@ static irqreturn_t dma_irq_handler(int irq, void *data) | |||
338 | if (cnt > 10) | 392 | if (cnt > 10) |
339 | break; | 393 | break; |
340 | } | 394 | } |
341 | edma_shadow0_write(SH_IEVAL, 1); | 395 | edma_shadow0_write(ctlr, SH_IEVAL, 1); |
342 | return IRQ_HANDLED; | 396 | return IRQ_HANDLED; |
343 | } | 397 | } |
344 | 398 | ||
@@ -350,78 +404,87 @@ static irqreturn_t dma_irq_handler(int irq, void *data) | |||
350 | static irqreturn_t dma_ccerr_handler(int irq, void *data) | 404 | static irqreturn_t dma_ccerr_handler(int irq, void *data) |
351 | { | 405 | { |
352 | int i; | 406 | int i; |
407 | unsigned ctlr; | ||
353 | unsigned int cnt = 0; | 408 | unsigned int cnt = 0; |
354 | 409 | ||
410 | ctlr = irq2ctlr(irq); | ||
411 | |||
355 | dev_dbg(data, "dma_ccerr_handler\n"); | 412 | dev_dbg(data, "dma_ccerr_handler\n"); |
356 | 413 | ||
357 | if ((edma_read_array(EDMA_EMR, 0) == 0) && | 414 | if ((edma_read_array(ctlr, EDMA_EMR, 0) == 0) && |
358 | (edma_read_array(EDMA_EMR, 1) == 0) && | 415 | (edma_read_array(ctlr, EDMA_EMR, 1) == 0) && |
359 | (edma_read(EDMA_QEMR) == 0) && (edma_read(EDMA_CCERR) == 0)) | 416 | (edma_read(ctlr, EDMA_QEMR) == 0) && |
417 | (edma_read(ctlr, EDMA_CCERR) == 0)) | ||
360 | return IRQ_NONE; | 418 | return IRQ_NONE; |
361 | 419 | ||
362 | while (1) { | 420 | while (1) { |
363 | int j = -1; | 421 | int j = -1; |
364 | if (edma_read_array(EDMA_EMR, 0)) | 422 | if (edma_read_array(ctlr, EDMA_EMR, 0)) |
365 | j = 0; | 423 | j = 0; |
366 | else if (edma_read_array(EDMA_EMR, 1)) | 424 | else if (edma_read_array(ctlr, EDMA_EMR, 1)) |
367 | j = 1; | 425 | j = 1; |
368 | if (j >= 0) { | 426 | if (j >= 0) { |
369 | dev_dbg(data, "EMR%d %08x\n", j, | 427 | dev_dbg(data, "EMR%d %08x\n", j, |
370 | edma_read_array(EDMA_EMR, j)); | 428 | edma_read_array(ctlr, EDMA_EMR, j)); |
371 | for (i = 0; i < 32; i++) { | 429 | for (i = 0; i < 32; i++) { |
372 | int k = (j << 5) + i; | 430 | int k = (j << 5) + i; |
373 | if (edma_read_array(EDMA_EMR, j) & (1 << i)) { | 431 | if (edma_read_array(ctlr, EDMA_EMR, j) & |
432 | (1 << i)) { | ||
374 | /* Clear the corresponding EMR bits */ | 433 | /* Clear the corresponding EMR bits */ |
375 | edma_write_array(EDMA_EMCR, j, 1 << i); | 434 | edma_write_array(ctlr, EDMA_EMCR, j, |
435 | 1 << i); | ||
376 | /* Clear any SER */ | 436 | /* Clear any SER */ |
377 | edma_shadow0_write_array(SH_SECR, j, | 437 | edma_shadow0_write_array(ctlr, SH_SECR, |
378 | (1 << i)); | 438 | j, (1 << i)); |
379 | if (intr_data[k].callback) { | 439 | if (edma_info[ctlr]->intr_data[k]. |
380 | intr_data[k].callback(k, | 440 | callback) { |
381 | DMA_CC_ERROR, | 441 | edma_info[ctlr]->intr_data[k]. |
382 | intr_data | 442 | callback(k, |
383 | [k].data); | 443 | DMA_CC_ERROR, |
444 | edma_info[ctlr]->intr_data | ||
445 | [k].data); | ||
384 | } | 446 | } |
385 | } | 447 | } |
386 | } | 448 | } |
387 | } else if (edma_read(EDMA_QEMR)) { | 449 | } else if (edma_read(ctlr, EDMA_QEMR)) { |
388 | dev_dbg(data, "QEMR %02x\n", | 450 | dev_dbg(data, "QEMR %02x\n", |
389 | edma_read(EDMA_QEMR)); | 451 | edma_read(ctlr, EDMA_QEMR)); |
390 | for (i = 0; i < 8; i++) { | 452 | for (i = 0; i < 8; i++) { |
391 | if (edma_read(EDMA_QEMR) & (1 << i)) { | 453 | if (edma_read(ctlr, EDMA_QEMR) & (1 << i)) { |
392 | /* Clear the corresponding IPR bits */ | 454 | /* Clear the corresponding IPR bits */ |
393 | edma_write(EDMA_QEMCR, 1 << i); | 455 | edma_write(ctlr, EDMA_QEMCR, 1 << i); |
394 | edma_shadow0_write(SH_QSECR, (1 << i)); | 456 | edma_shadow0_write(ctlr, SH_QSECR, |
457 | (1 << i)); | ||
395 | 458 | ||
396 | /* NOTE: not reported!! */ | 459 | /* NOTE: not reported!! */ |
397 | } | 460 | } |
398 | } | 461 | } |
399 | } else if (edma_read(EDMA_CCERR)) { | 462 | } else if (edma_read(ctlr, EDMA_CCERR)) { |
400 | dev_dbg(data, "CCERR %08x\n", | 463 | dev_dbg(data, "CCERR %08x\n", |
401 | edma_read(EDMA_CCERR)); | 464 | edma_read(ctlr, EDMA_CCERR)); |
402 | /* FIXME: CCERR.BIT(16) ignored! much better | 465 | /* FIXME: CCERR.BIT(16) ignored! much better |
403 | * to just write CCERRCLR with CCERR value... | 466 | * to just write CCERRCLR with CCERR value... |
404 | */ | 467 | */ |
405 | for (i = 0; i < 8; i++) { | 468 | for (i = 0; i < 8; i++) { |
406 | if (edma_read(EDMA_CCERR) & (1 << i)) { | 469 | if (edma_read(ctlr, EDMA_CCERR) & (1 << i)) { |
407 | /* Clear the corresponding IPR bits */ | 470 | /* Clear the corresponding IPR bits */ |
408 | edma_write(EDMA_CCERRCLR, 1 << i); | 471 | edma_write(ctlr, EDMA_CCERRCLR, 1 << i); |
409 | 472 | ||
410 | /* NOTE: not reported!! */ | 473 | /* NOTE: not reported!! */ |
411 | } | 474 | } |
412 | } | 475 | } |
413 | } | 476 | } |
414 | if ((edma_read_array(EDMA_EMR, 0) == 0) | 477 | if ((edma_read_array(ctlr, EDMA_EMR, 0) == 0) |
415 | && (edma_read_array(EDMA_EMR, 1) == 0) | 478 | && (edma_read_array(ctlr, EDMA_EMR, 1) == 0) |
416 | && (edma_read(EDMA_QEMR) == 0) | 479 | && (edma_read(ctlr, EDMA_QEMR) == 0) |
417 | && (edma_read(EDMA_CCERR) == 0)) { | 480 | && (edma_read(ctlr, EDMA_CCERR) == 0)) { |
418 | break; | 481 | break; |
419 | } | 482 | } |
420 | cnt++; | 483 | cnt++; |
421 | if (cnt > 10) | 484 | if (cnt > 10) |
422 | break; | 485 | break; |
423 | } | 486 | } |
424 | edma_write(EDMA_EEVAL, 1); | 487 | edma_write(ctlr, EDMA_EEVAL, 1); |
425 | return IRQ_HANDLED; | 488 | return IRQ_HANDLED; |
426 | } | 489 | } |
427 | 490 | ||
@@ -484,35 +547,53 @@ int edma_alloc_channel(int channel, | |||
484 | void *data, | 547 | void *data, |
485 | enum dma_event_q eventq_no) | 548 | enum dma_event_q eventq_no) |
486 | { | 549 | { |
550 | unsigned i, done, ctlr = 0; | ||
551 | |||
552 | if (channel >= 0) { | ||
553 | ctlr = EDMA_CTLR(channel); | ||
554 | channel = EDMA_CHAN_SLOT(channel); | ||
555 | } | ||
556 | |||
487 | if (channel < 0) { | 557 | if (channel < 0) { |
488 | channel = 0; | 558 | for (i = 0; i < EDMA_MAX_CC; i++) { |
489 | for (;;) { | 559 | channel = 0; |
490 | channel = find_next_bit(edma_noevent, | 560 | for (;;) { |
491 | num_channels, channel); | 561 | channel = find_next_bit(edma_info[i]-> |
492 | if (channel == num_channels) | 562 | edma_noevent, |
493 | return -ENOMEM; | 563 | edma_info[i]->num_channels, |
494 | if (!test_and_set_bit(channel, edma_inuse)) | 564 | channel); |
565 | if (channel == edma_info[i]->num_channels) | ||
566 | return -ENOMEM; | ||
567 | if (!test_and_set_bit(channel, | ||
568 | edma_info[i]->edma_inuse)) { | ||
569 | done = 1; | ||
570 | ctlr = i; | ||
571 | break; | ||
572 | } | ||
573 | channel++; | ||
574 | } | ||
575 | if (done) | ||
495 | break; | 576 | break; |
496 | channel++; | ||
497 | } | 577 | } |
498 | } else if (channel >= num_channels) { | 578 | } else if (channel >= edma_info[ctlr]->num_channels) { |
499 | return -EINVAL; | 579 | return -EINVAL; |
500 | } else if (test_and_set_bit(channel, edma_inuse)) { | 580 | } else if (test_and_set_bit(channel, edma_info[ctlr]->edma_inuse)) { |
501 | return -EBUSY; | 581 | return -EBUSY; |
502 | } | 582 | } |
503 | 583 | ||
504 | /* ensure access through shadow region 0 */ | 584 | /* ensure access through shadow region 0 */ |
505 | edma_or_array2(EDMA_DRAE, 0, channel >> 5, 1 << (channel & 0x1f)); | 585 | edma_or_array2(ctlr, EDMA_DRAE, 0, channel >> 5, 1 << (channel & 0x1f)); |
506 | 586 | ||
507 | /* ensure no events are pending */ | 587 | /* ensure no events are pending */ |
508 | edma_stop(channel); | 588 | edma_stop(EDMA_CTLR_CHAN(ctlr, channel)); |
509 | memcpy_toio(edmacc_regs_base + PARM_OFFSET(channel), | 589 | memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(channel), |
510 | &dummy_paramset, PARM_SIZE); | 590 | &dummy_paramset, PARM_SIZE); |
511 | 591 | ||
512 | if (callback) | 592 | if (callback) |
513 | setup_dma_interrupt(channel, callback, data); | 593 | setup_dma_interrupt(EDMA_CTLR_CHAN(ctlr, channel), |
594 | callback, data); | ||
514 | 595 | ||
515 | map_dmach_queue(channel, eventq_no); | 596 | map_dmach_queue(ctlr, channel, eventq_no); |
516 | 597 | ||
517 | return channel; | 598 | return channel; |
518 | } | 599 | } |
@@ -532,15 +613,20 @@ EXPORT_SYMBOL(edma_alloc_channel); | |||
532 | */ | 613 | */ |
533 | void edma_free_channel(unsigned channel) | 614 | void edma_free_channel(unsigned channel) |
534 | { | 615 | { |
535 | if (channel >= num_channels) | 616 | unsigned ctlr; |
617 | |||
618 | ctlr = EDMA_CTLR(channel); | ||
619 | channel = EDMA_CHAN_SLOT(channel); | ||
620 | |||
621 | if (channel >= edma_info[ctlr]->num_channels) | ||
536 | return; | 622 | return; |
537 | 623 | ||
538 | setup_dma_interrupt(channel, NULL, NULL); | 624 | setup_dma_interrupt(channel, NULL, NULL); |
539 | /* REVISIT should probably take out of shadow region 0 */ | 625 | /* REVISIT should probably take out of shadow region 0 */ |
540 | 626 | ||
541 | memcpy_toio(edmacc_regs_base + PARM_OFFSET(channel), | 627 | memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(channel), |
542 | &dummy_paramset, PARM_SIZE); | 628 | &dummy_paramset, PARM_SIZE); |
543 | clear_bit(channel, edma_inuse); | 629 | clear_bit(channel, edma_info[ctlr]->edma_inuse); |
544 | } | 630 | } |
545 | EXPORT_SYMBOL(edma_free_channel); | 631 | EXPORT_SYMBOL(edma_free_channel); |
546 | 632 | ||
@@ -558,28 +644,33 @@ EXPORT_SYMBOL(edma_free_channel); | |||
558 | * | 644 | * |
559 | * Returns the number of the slot, else negative errno. | 645 | * Returns the number of the slot, else negative errno. |
560 | */ | 646 | */ |
561 | int edma_alloc_slot(int slot) | 647 | int edma_alloc_slot(unsigned ctlr, int slot) |
562 | { | 648 | { |
649 | if (slot >= 0) | ||
650 | slot = EDMA_CHAN_SLOT(slot); | ||
651 | |||
563 | if (slot < 0) { | 652 | if (slot < 0) { |
564 | slot = num_channels; | 653 | slot = edma_info[ctlr]->num_channels; |
565 | for (;;) { | 654 | for (;;) { |
566 | slot = find_next_zero_bit(edma_inuse, | 655 | slot = find_next_zero_bit(edma_info[ctlr]->edma_inuse, |
567 | num_slots, slot); | 656 | edma_info[ctlr]->num_slots, slot); |
568 | if (slot == num_slots) | 657 | if (slot == edma_info[ctlr]->num_slots) |
569 | return -ENOMEM; | 658 | return -ENOMEM; |
570 | if (!test_and_set_bit(slot, edma_inuse)) | 659 | if (!test_and_set_bit(slot, |
660 | edma_info[ctlr]->edma_inuse)) | ||
571 | break; | 661 | break; |
572 | } | 662 | } |
573 | } else if (slot < num_channels || slot >= num_slots) { | 663 | } else if (slot < edma_info[ctlr]->num_channels || |
664 | slot >= edma_info[ctlr]->num_slots) { | ||
574 | return -EINVAL; | 665 | return -EINVAL; |
575 | } else if (test_and_set_bit(slot, edma_inuse)) { | 666 | } else if (test_and_set_bit(slot, edma_info[ctlr]->edma_inuse)) { |
576 | return -EBUSY; | 667 | return -EBUSY; |
577 | } | 668 | } |
578 | 669 | ||
579 | memcpy_toio(edmacc_regs_base + PARM_OFFSET(slot), | 670 | memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(slot), |
580 | &dummy_paramset, PARM_SIZE); | 671 | &dummy_paramset, PARM_SIZE); |
581 | 672 | ||
582 | return slot; | 673 | return EDMA_CTLR_CHAN(ctlr, slot); |
583 | } | 674 | } |
584 | EXPORT_SYMBOL(edma_alloc_slot); | 675 | EXPORT_SYMBOL(edma_alloc_slot); |
585 | 676 | ||
@@ -593,12 +684,18 @@ EXPORT_SYMBOL(edma_alloc_slot); | |||
593 | */ | 684 | */ |
594 | void edma_free_slot(unsigned slot) | 685 | void edma_free_slot(unsigned slot) |
595 | { | 686 | { |
596 | if (slot < num_channels || slot >= num_slots) | 687 | unsigned ctlr; |
688 | |||
689 | ctlr = EDMA_CTLR(slot); | ||
690 | slot = EDMA_CHAN_SLOT(slot); | ||
691 | |||
692 | if (slot < edma_info[ctlr]->num_channels || | ||
693 | slot >= edma_info[ctlr]->num_slots) | ||
597 | return; | 694 | return; |
598 | 695 | ||
599 | memcpy_toio(edmacc_regs_base + PARM_OFFSET(slot), | 696 | memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(slot), |
600 | &dummy_paramset, PARM_SIZE); | 697 | &dummy_paramset, PARM_SIZE); |
601 | clear_bit(slot, edma_inuse); | 698 | clear_bit(slot, edma_info[ctlr]->edma_inuse); |
602 | } | 699 | } |
603 | EXPORT_SYMBOL(edma_free_slot); | 700 | EXPORT_SYMBOL(edma_free_slot); |
604 | 701 | ||
@@ -620,8 +717,13 @@ EXPORT_SYMBOL(edma_free_slot); | |||
620 | void edma_set_src(unsigned slot, dma_addr_t src_port, | 717 | void edma_set_src(unsigned slot, dma_addr_t src_port, |
621 | enum address_mode mode, enum fifo_width width) | 718 | enum address_mode mode, enum fifo_width width) |
622 | { | 719 | { |
623 | if (slot < num_slots) { | 720 | unsigned ctlr; |
624 | unsigned int i = edma_parm_read(PARM_OPT, slot); | 721 | |
722 | ctlr = EDMA_CTLR(slot); | ||
723 | slot = EDMA_CHAN_SLOT(slot); | ||
724 | |||
725 | if (slot < edma_info[ctlr]->num_slots) { | ||
726 | unsigned int i = edma_parm_read(ctlr, PARM_OPT, slot); | ||
625 | 727 | ||
626 | if (mode) { | 728 | if (mode) { |
627 | /* set SAM and program FWID */ | 729 | /* set SAM and program FWID */ |
@@ -630,11 +732,11 @@ void edma_set_src(unsigned slot, dma_addr_t src_port, | |||
630 | /* clear SAM */ | 732 | /* clear SAM */ |
631 | i &= ~SAM; | 733 | i &= ~SAM; |
632 | } | 734 | } |
633 | edma_parm_write(PARM_OPT, slot, i); | 735 | edma_parm_write(ctlr, PARM_OPT, slot, i); |
634 | 736 | ||
635 | /* set the source port address | 737 | /* set the source port address |
636 | in source register of param structure */ | 738 | in source register of param structure */ |
637 | edma_parm_write(PARM_SRC, slot, src_port); | 739 | edma_parm_write(ctlr, PARM_SRC, slot, src_port); |
638 | } | 740 | } |
639 | } | 741 | } |
640 | EXPORT_SYMBOL(edma_set_src); | 742 | EXPORT_SYMBOL(edma_set_src); |
@@ -653,8 +755,13 @@ EXPORT_SYMBOL(edma_set_src); | |||
653 | void edma_set_dest(unsigned slot, dma_addr_t dest_port, | 755 | void edma_set_dest(unsigned slot, dma_addr_t dest_port, |
654 | enum address_mode mode, enum fifo_width width) | 756 | enum address_mode mode, enum fifo_width width) |
655 | { | 757 | { |
656 | if (slot < num_slots) { | 758 | unsigned ctlr; |
657 | unsigned int i = edma_parm_read(PARM_OPT, slot); | 759 | |
760 | ctlr = EDMA_CTLR(slot); | ||
761 | slot = EDMA_CHAN_SLOT(slot); | ||
762 | |||
763 | if (slot < edma_info[ctlr]->num_slots) { | ||
764 | unsigned int i = edma_parm_read(ctlr, PARM_OPT, slot); | ||
658 | 765 | ||
659 | if (mode) { | 766 | if (mode) { |
660 | /* set DAM and program FWID */ | 767 | /* set DAM and program FWID */ |
@@ -663,10 +770,10 @@ void edma_set_dest(unsigned slot, dma_addr_t dest_port, | |||
663 | /* clear DAM */ | 770 | /* clear DAM */ |
664 | i &= ~DAM; | 771 | i &= ~DAM; |
665 | } | 772 | } |
666 | edma_parm_write(PARM_OPT, slot, i); | 773 | edma_parm_write(ctlr, PARM_OPT, slot, i); |
667 | /* set the destination port address | 774 | /* set the destination port address |
668 | in dest register of param structure */ | 775 | in dest register of param structure */ |
669 | edma_parm_write(PARM_DST, slot, dest_port); | 776 | edma_parm_write(ctlr, PARM_DST, slot, dest_port); |
670 | } | 777 | } |
671 | } | 778 | } |
672 | EXPORT_SYMBOL(edma_set_dest); | 779 | EXPORT_SYMBOL(edma_set_dest); |
@@ -683,8 +790,12 @@ EXPORT_SYMBOL(edma_set_dest); | |||
683 | void edma_get_position(unsigned slot, dma_addr_t *src, dma_addr_t *dst) | 790 | void edma_get_position(unsigned slot, dma_addr_t *src, dma_addr_t *dst) |
684 | { | 791 | { |
685 | struct edmacc_param temp; | 792 | struct edmacc_param temp; |
793 | unsigned ctlr; | ||
794 | |||
795 | ctlr = EDMA_CTLR(slot); | ||
796 | slot = EDMA_CHAN_SLOT(slot); | ||
686 | 797 | ||
687 | edma_read_slot(slot, &temp); | 798 | edma_read_slot(EDMA_CTLR_CHAN(ctlr, slot), &temp); |
688 | if (src != NULL) | 799 | if (src != NULL) |
689 | *src = temp.src; | 800 | *src = temp.src; |
690 | if (dst != NULL) | 801 | if (dst != NULL) |
@@ -704,10 +815,15 @@ EXPORT_SYMBOL(edma_get_position); | |||
704 | */ | 815 | */ |
705 | void edma_set_src_index(unsigned slot, s16 src_bidx, s16 src_cidx) | 816 | void edma_set_src_index(unsigned slot, s16 src_bidx, s16 src_cidx) |
706 | { | 817 | { |
707 | if (slot < num_slots) { | 818 | unsigned ctlr; |
708 | edma_parm_modify(PARM_SRC_DST_BIDX, slot, | 819 | |
820 | ctlr = EDMA_CTLR(slot); | ||
821 | slot = EDMA_CHAN_SLOT(slot); | ||
822 | |||
823 | if (slot < edma_info[ctlr]->num_slots) { | ||
824 | edma_parm_modify(ctlr, PARM_SRC_DST_BIDX, slot, | ||
709 | 0xffff0000, src_bidx); | 825 | 0xffff0000, src_bidx); |
710 | edma_parm_modify(PARM_SRC_DST_CIDX, slot, | 826 | edma_parm_modify(ctlr, PARM_SRC_DST_CIDX, slot, |
711 | 0xffff0000, src_cidx); | 827 | 0xffff0000, src_cidx); |
712 | } | 828 | } |
713 | } | 829 | } |
@@ -725,10 +841,15 @@ EXPORT_SYMBOL(edma_set_src_index); | |||
725 | */ | 841 | */ |
726 | void edma_set_dest_index(unsigned slot, s16 dest_bidx, s16 dest_cidx) | 842 | void edma_set_dest_index(unsigned slot, s16 dest_bidx, s16 dest_cidx) |
727 | { | 843 | { |
728 | if (slot < num_slots) { | 844 | unsigned ctlr; |
729 | edma_parm_modify(PARM_SRC_DST_BIDX, slot, | 845 | |
846 | ctlr = EDMA_CTLR(slot); | ||
847 | slot = EDMA_CHAN_SLOT(slot); | ||
848 | |||
849 | if (slot < edma_info[ctlr]->num_slots) { | ||
850 | edma_parm_modify(ctlr, PARM_SRC_DST_BIDX, slot, | ||
730 | 0x0000ffff, dest_bidx << 16); | 851 | 0x0000ffff, dest_bidx << 16); |
731 | edma_parm_modify(PARM_SRC_DST_CIDX, slot, | 852 | edma_parm_modify(ctlr, PARM_SRC_DST_CIDX, slot, |
732 | 0x0000ffff, dest_cidx << 16); | 853 | 0x0000ffff, dest_cidx << 16); |
733 | } | 854 | } |
734 | } | 855 | } |
@@ -767,16 +888,21 @@ void edma_set_transfer_params(unsigned slot, | |||
767 | u16 acnt, u16 bcnt, u16 ccnt, | 888 | u16 acnt, u16 bcnt, u16 ccnt, |
768 | u16 bcnt_rld, enum sync_dimension sync_mode) | 889 | u16 bcnt_rld, enum sync_dimension sync_mode) |
769 | { | 890 | { |
770 | if (slot < num_slots) { | 891 | unsigned ctlr; |
771 | edma_parm_modify(PARM_LINK_BCNTRLD, slot, | 892 | |
893 | ctlr = EDMA_CTLR(slot); | ||
894 | slot = EDMA_CHAN_SLOT(slot); | ||
895 | |||
896 | if (slot < edma_info[ctlr]->num_slots) { | ||
897 | edma_parm_modify(ctlr, PARM_LINK_BCNTRLD, slot, | ||
772 | 0x0000ffff, bcnt_rld << 16); | 898 | 0x0000ffff, bcnt_rld << 16); |
773 | if (sync_mode == ASYNC) | 899 | if (sync_mode == ASYNC) |
774 | edma_parm_and(PARM_OPT, slot, ~SYNCDIM); | 900 | edma_parm_and(ctlr, PARM_OPT, slot, ~SYNCDIM); |
775 | else | 901 | else |
776 | edma_parm_or(PARM_OPT, slot, SYNCDIM); | 902 | edma_parm_or(ctlr, PARM_OPT, slot, SYNCDIM); |
777 | /* Set the acount, bcount, ccount registers */ | 903 | /* Set the acount, bcount, ccount registers */ |
778 | edma_parm_write(PARM_A_B_CNT, slot, (bcnt << 16) | acnt); | 904 | edma_parm_write(ctlr, PARM_A_B_CNT, slot, (bcnt << 16) | acnt); |
779 | edma_parm_write(PARM_CCNT, slot, ccnt); | 905 | edma_parm_write(ctlr, PARM_CCNT, slot, ccnt); |
780 | } | 906 | } |
781 | } | 907 | } |
782 | EXPORT_SYMBOL(edma_set_transfer_params); | 908 | EXPORT_SYMBOL(edma_set_transfer_params); |
@@ -790,11 +916,19 @@ EXPORT_SYMBOL(edma_set_transfer_params); | |||
790 | */ | 916 | */ |
791 | void edma_link(unsigned from, unsigned to) | 917 | void edma_link(unsigned from, unsigned to) |
792 | { | 918 | { |
793 | if (from >= num_slots) | 919 | unsigned ctlr_from, ctlr_to; |
920 | |||
921 | ctlr_from = EDMA_CTLR(from); | ||
922 | from = EDMA_CHAN_SLOT(from); | ||
923 | ctlr_to = EDMA_CTLR(to); | ||
924 | to = EDMA_CHAN_SLOT(to); | ||
925 | |||
926 | if (from >= edma_info[ctlr_from]->num_slots) | ||
794 | return; | 927 | return; |
795 | if (to >= num_slots) | 928 | if (to >= edma_info[ctlr_to]->num_slots) |
796 | return; | 929 | return; |
797 | edma_parm_modify(PARM_LINK_BCNTRLD, from, 0xffff0000, PARM_OFFSET(to)); | 930 | edma_parm_modify(ctlr_from, PARM_LINK_BCNTRLD, from, 0xffff0000, |
931 | PARM_OFFSET(to)); | ||
798 | } | 932 | } |
799 | EXPORT_SYMBOL(edma_link); | 933 | EXPORT_SYMBOL(edma_link); |
800 | 934 | ||
@@ -807,9 +941,14 @@ EXPORT_SYMBOL(edma_link); | |||
807 | */ | 941 | */ |
808 | void edma_unlink(unsigned from) | 942 | void edma_unlink(unsigned from) |
809 | { | 943 | { |
810 | if (from >= num_slots) | 944 | unsigned ctlr; |
945 | |||
946 | ctlr = EDMA_CTLR(from); | ||
947 | from = EDMA_CHAN_SLOT(from); | ||
948 | |||
949 | if (from >= edma_info[ctlr]->num_slots) | ||
811 | return; | 950 | return; |
812 | edma_parm_or(PARM_LINK_BCNTRLD, from, 0xffff); | 951 | edma_parm_or(ctlr, PARM_LINK_BCNTRLD, from, 0xffff); |
813 | } | 952 | } |
814 | EXPORT_SYMBOL(edma_unlink); | 953 | EXPORT_SYMBOL(edma_unlink); |
815 | 954 | ||
@@ -829,9 +968,15 @@ EXPORT_SYMBOL(edma_unlink); | |||
829 | */ | 968 | */ |
830 | void edma_write_slot(unsigned slot, const struct edmacc_param *param) | 969 | void edma_write_slot(unsigned slot, const struct edmacc_param *param) |
831 | { | 970 | { |
832 | if (slot >= num_slots) | 971 | unsigned ctlr; |
972 | |||
973 | ctlr = EDMA_CTLR(slot); | ||
974 | slot = EDMA_CHAN_SLOT(slot); | ||
975 | |||
976 | if (slot >= edma_info[ctlr]->num_slots) | ||
833 | return; | 977 | return; |
834 | memcpy_toio(edmacc_regs_base + PARM_OFFSET(slot), param, PARM_SIZE); | 978 | memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(slot), param, |
979 | PARM_SIZE); | ||
835 | } | 980 | } |
836 | EXPORT_SYMBOL(edma_write_slot); | 981 | EXPORT_SYMBOL(edma_write_slot); |
837 | 982 | ||
@@ -845,9 +990,15 @@ EXPORT_SYMBOL(edma_write_slot); | |||
845 | */ | 990 | */ |
846 | void edma_read_slot(unsigned slot, struct edmacc_param *param) | 991 | void edma_read_slot(unsigned slot, struct edmacc_param *param) |
847 | { | 992 | { |
848 | if (slot >= num_slots) | 993 | unsigned ctlr; |
994 | |||
995 | ctlr = EDMA_CTLR(slot); | ||
996 | slot = EDMA_CHAN_SLOT(slot); | ||
997 | |||
998 | if (slot >= edma_info[ctlr]->num_slots) | ||
849 | return; | 999 | return; |
850 | memcpy_fromio(param, edmacc_regs_base + PARM_OFFSET(slot), PARM_SIZE); | 1000 | memcpy_fromio(param, edmacc_regs_base[ctlr] + PARM_OFFSET(slot), |
1001 | PARM_SIZE); | ||
851 | } | 1002 | } |
852 | EXPORT_SYMBOL(edma_read_slot); | 1003 | EXPORT_SYMBOL(edma_read_slot); |
853 | 1004 | ||
@@ -864,10 +1015,15 @@ EXPORT_SYMBOL(edma_read_slot); | |||
864 | */ | 1015 | */ |
865 | void edma_pause(unsigned channel) | 1016 | void edma_pause(unsigned channel) |
866 | { | 1017 | { |
867 | if (channel < num_channels) { | 1018 | unsigned ctlr; |
1019 | |||
1020 | ctlr = EDMA_CTLR(channel); | ||
1021 | channel = EDMA_CHAN_SLOT(channel); | ||
1022 | |||
1023 | if (channel < edma_info[ctlr]->num_channels) { | ||
868 | unsigned int mask = (1 << (channel & 0x1f)); | 1024 | unsigned int mask = (1 << (channel & 0x1f)); |
869 | 1025 | ||
870 | edma_shadow0_write_array(SH_EECR, channel >> 5, mask); | 1026 | edma_shadow0_write_array(ctlr, SH_EECR, channel >> 5, mask); |
871 | } | 1027 | } |
872 | } | 1028 | } |
873 | EXPORT_SYMBOL(edma_pause); | 1029 | EXPORT_SYMBOL(edma_pause); |
@@ -880,10 +1036,15 @@ EXPORT_SYMBOL(edma_pause); | |||
880 | */ | 1036 | */ |
881 | void edma_resume(unsigned channel) | 1037 | void edma_resume(unsigned channel) |
882 | { | 1038 | { |
883 | if (channel < num_channels) { | 1039 | unsigned ctlr; |
1040 | |||
1041 | ctlr = EDMA_CTLR(channel); | ||
1042 | channel = EDMA_CHAN_SLOT(channel); | ||
1043 | |||
1044 | if (channel < edma_info[ctlr]->num_channels) { | ||
884 | unsigned int mask = (1 << (channel & 0x1f)); | 1045 | unsigned int mask = (1 << (channel & 0x1f)); |
885 | 1046 | ||
886 | edma_shadow0_write_array(SH_EESR, channel >> 5, mask); | 1047 | edma_shadow0_write_array(ctlr, SH_EESR, channel >> 5, mask); |
887 | } | 1048 | } |
888 | } | 1049 | } |
889 | EXPORT_SYMBOL(edma_resume); | 1050 | EXPORT_SYMBOL(edma_resume); |
@@ -901,28 +1062,33 @@ EXPORT_SYMBOL(edma_resume); | |||
901 | */ | 1062 | */ |
902 | int edma_start(unsigned channel) | 1063 | int edma_start(unsigned channel) |
903 | { | 1064 | { |
904 | if (channel < num_channels) { | 1065 | unsigned ctlr; |
1066 | |||
1067 | ctlr = EDMA_CTLR(channel); | ||
1068 | channel = EDMA_CHAN_SLOT(channel); | ||
1069 | |||
1070 | if (channel < edma_info[ctlr]->num_channels) { | ||
905 | int j = channel >> 5; | 1071 | int j = channel >> 5; |
906 | unsigned int mask = (1 << (channel & 0x1f)); | 1072 | unsigned int mask = (1 << (channel & 0x1f)); |
907 | 1073 | ||
908 | /* EDMA channels without event association */ | 1074 | /* EDMA channels without event association */ |
909 | if (test_bit(channel, edma_noevent)) { | 1075 | if (test_bit(channel, edma_info[ctlr]->edma_noevent)) { |
910 | pr_debug("EDMA: ESR%d %08x\n", j, | 1076 | pr_debug("EDMA: ESR%d %08x\n", j, |
911 | edma_shadow0_read_array(SH_ESR, j)); | 1077 | edma_shadow0_read_array(ctlr, SH_ESR, j)); |
912 | edma_shadow0_write_array(SH_ESR, j, mask); | 1078 | edma_shadow0_write_array(ctlr, SH_ESR, j, mask); |
913 | return 0; | 1079 | return 0; |
914 | } | 1080 | } |
915 | 1081 | ||
916 | /* EDMA channel with event association */ | 1082 | /* EDMA channel with event association */ |
917 | pr_debug("EDMA: ER%d %08x\n", j, | 1083 | pr_debug("EDMA: ER%d %08x\n", j, |
918 | edma_shadow0_read_array(SH_ER, j)); | 1084 | edma_shadow0_read_array(ctlr, SH_ER, j)); |
919 | /* Clear any pending error */ | 1085 | /* Clear any pending error */ |
920 | edma_write_array(EDMA_EMCR, j, mask); | 1086 | edma_write_array(ctlr, EDMA_EMCR, j, mask); |
921 | /* Clear any SER */ | 1087 | /* Clear any SER */ |
922 | edma_shadow0_write_array(SH_SECR, j, mask); | 1088 | edma_shadow0_write_array(ctlr, SH_SECR, j, mask); |
923 | edma_shadow0_write_array(SH_EESR, j, mask); | 1089 | edma_shadow0_write_array(ctlr, SH_EESR, j, mask); |
924 | pr_debug("EDMA: EER%d %08x\n", j, | 1090 | pr_debug("EDMA: EER%d %08x\n", j, |
925 | edma_shadow0_read_array(SH_EER, j)); | 1091 | edma_shadow0_read_array(ctlr, SH_EER, j)); |
926 | return 0; | 1092 | return 0; |
927 | } | 1093 | } |
928 | 1094 | ||
@@ -941,17 +1107,22 @@ EXPORT_SYMBOL(edma_start); | |||
941 | */ | 1107 | */ |
942 | void edma_stop(unsigned channel) | 1108 | void edma_stop(unsigned channel) |
943 | { | 1109 | { |
944 | if (channel < num_channels) { | 1110 | unsigned ctlr; |
1111 | |||
1112 | ctlr = EDMA_CTLR(channel); | ||
1113 | channel = EDMA_CHAN_SLOT(channel); | ||
1114 | |||
1115 | if (channel < edma_info[ctlr]->num_channels) { | ||
945 | int j = channel >> 5; | 1116 | int j = channel >> 5; |
946 | unsigned int mask = (1 << (channel & 0x1f)); | 1117 | unsigned int mask = (1 << (channel & 0x1f)); |
947 | 1118 | ||
948 | edma_shadow0_write_array(SH_EECR, j, mask); | 1119 | edma_shadow0_write_array(ctlr, SH_EECR, j, mask); |
949 | edma_shadow0_write_array(SH_ECR, j, mask); | 1120 | edma_shadow0_write_array(ctlr, SH_ECR, j, mask); |
950 | edma_shadow0_write_array(SH_SECR, j, mask); | 1121 | edma_shadow0_write_array(ctlr, SH_SECR, j, mask); |
951 | edma_write_array(EDMA_EMCR, j, mask); | 1122 | edma_write_array(ctlr, EDMA_EMCR, j, mask); |
952 | 1123 | ||
953 | pr_debug("EDMA: EER%d %08x\n", j, | 1124 | pr_debug("EDMA: EER%d %08x\n", j, |
954 | edma_shadow0_read_array(SH_EER, j)); | 1125 | edma_shadow0_read_array(ctlr, SH_EER, j)); |
955 | 1126 | ||
956 | /* REVISIT: consider guarding against inappropriate event | 1127 | /* REVISIT: consider guarding against inappropriate event |
957 | * chaining by overwriting with dummy_paramset. | 1128 | * chaining by overwriting with dummy_paramset. |
@@ -975,18 +1146,23 @@ EXPORT_SYMBOL(edma_stop); | |||
975 | 1146 | ||
976 | void edma_clean_channel(unsigned channel) | 1147 | void edma_clean_channel(unsigned channel) |
977 | { | 1148 | { |
978 | if (channel < num_channels) { | 1149 | unsigned ctlr; |
1150 | |||
1151 | ctlr = EDMA_CTLR(channel); | ||
1152 | channel = EDMA_CHAN_SLOT(channel); | ||
1153 | |||
1154 | if (channel < edma_info[ctlr]->num_channels) { | ||
979 | int j = (channel >> 5); | 1155 | int j = (channel >> 5); |
980 | unsigned int mask = 1 << (channel & 0x1f); | 1156 | unsigned int mask = 1 << (channel & 0x1f); |
981 | 1157 | ||
982 | pr_debug("EDMA: EMR%d %08x\n", j, | 1158 | pr_debug("EDMA: EMR%d %08x\n", j, |
983 | edma_read_array(EDMA_EMR, j)); | 1159 | edma_read_array(ctlr, EDMA_EMR, j)); |
984 | edma_shadow0_write_array(SH_ECR, j, mask); | 1160 | edma_shadow0_write_array(ctlr, SH_ECR, j, mask); |
985 | /* Clear the corresponding EMR bits */ | 1161 | /* Clear the corresponding EMR bits */ |
986 | edma_write_array(EDMA_EMCR, j, mask); | 1162 | edma_write_array(ctlr, EDMA_EMCR, j, mask); |
987 | /* Clear any SER */ | 1163 | /* Clear any SER */ |
988 | edma_shadow0_write_array(SH_SECR, j, mask); | 1164 | edma_shadow0_write_array(ctlr, SH_SECR, j, mask); |
989 | edma_write(EDMA_CCERRCLR, (1 << 16) | 0x3); | 1165 | edma_write(ctlr, EDMA_CCERRCLR, (1 << 16) | 0x3); |
990 | } | 1166 | } |
991 | } | 1167 | } |
992 | EXPORT_SYMBOL(edma_clean_channel); | 1168 | EXPORT_SYMBOL(edma_clean_channel); |
@@ -998,12 +1174,17 @@ EXPORT_SYMBOL(edma_clean_channel); | |||
998 | */ | 1174 | */ |
999 | void edma_clear_event(unsigned channel) | 1175 | void edma_clear_event(unsigned channel) |
1000 | { | 1176 | { |
1001 | if (channel >= num_channels) | 1177 | unsigned ctlr; |
1178 | |||
1179 | ctlr = EDMA_CTLR(channel); | ||
1180 | channel = EDMA_CHAN_SLOT(channel); | ||
1181 | |||
1182 | if (channel >= edma_info[ctlr]->num_channels) | ||
1002 | return; | 1183 | return; |
1003 | if (channel < 32) | 1184 | if (channel < 32) |
1004 | edma_write(EDMA_ECR, 1 << channel); | 1185 | edma_write(ctlr, EDMA_ECR, 1 << channel); |
1005 | else | 1186 | else |
1006 | edma_write(EDMA_ECRH, 1 << (channel - 32)); | 1187 | edma_write(ctlr, EDMA_ECRH, 1 << (channel - 32)); |
1007 | } | 1188 | } |
1008 | EXPORT_SYMBOL(edma_clear_event); | 1189 | EXPORT_SYMBOL(edma_clear_event); |
1009 | 1190 | ||
@@ -1012,62 +1193,129 @@ EXPORT_SYMBOL(edma_clear_event); | |||
1012 | static int __init edma_probe(struct platform_device *pdev) | 1193 | static int __init edma_probe(struct platform_device *pdev) |
1013 | { | 1194 | { |
1014 | struct edma_soc_info *info = pdev->dev.platform_data; | 1195 | struct edma_soc_info *info = pdev->dev.platform_data; |
1015 | int i; | 1196 | const s8 (*queue_priority_mapping)[2]; |
1016 | int status; | 1197 | const s8 (*queue_tc_mapping)[2]; |
1198 | int i, j, found = 0; | ||
1199 | int status = -1; | ||
1017 | const s8 *noevent; | 1200 | const s8 *noevent; |
1018 | int irq = 0, err_irq = 0; | 1201 | int irq[EDMA_MAX_CC] = {0, 0}; |
1019 | struct resource *r; | 1202 | int err_irq[EDMA_MAX_CC] = {0, 0}; |
1020 | resource_size_t len; | 1203 | struct resource *r[EDMA_MAX_CC] = {NULL}; |
1204 | resource_size_t len[EDMA_MAX_CC]; | ||
1205 | char res_name[10]; | ||
1206 | char irq_name[10]; | ||
1021 | 1207 | ||
1022 | if (!info) | 1208 | if (!info) |
1023 | return -ENODEV; | 1209 | return -ENODEV; |
1024 | 1210 | ||
1025 | r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "edma_cc"); | 1211 | for (j = 0; j < EDMA_MAX_CC; j++) { |
1026 | if (!r) | 1212 | sprintf(res_name, "edma_cc%d", j); |
1027 | return -ENODEV; | 1213 | r[j] = platform_get_resource_byname(pdev, IORESOURCE_MEM, |
1214 | res_name); | ||
1215 | if (!r[j]) { | ||
1216 | if (found) | ||
1217 | break; | ||
1218 | else | ||
1219 | return -ENODEV; | ||
1220 | } else | ||
1221 | found = 1; | ||
1222 | |||
1223 | len[j] = resource_size(r[j]); | ||
1224 | |||
1225 | r[j] = request_mem_region(r[j]->start, len[j], | ||
1226 | dev_name(&pdev->dev)); | ||
1227 | if (!r[j]) { | ||
1228 | status = -EBUSY; | ||
1229 | goto fail1; | ||
1230 | } | ||
1028 | 1231 | ||
1029 | len = r->end - r->start + 1; | 1232 | edmacc_regs_base[j] = ioremap(r[j]->start, len[j]); |
1233 | if (!edmacc_regs_base[j]) { | ||
1234 | status = -EBUSY; | ||
1235 | goto fail1; | ||
1236 | } | ||
1030 | 1237 | ||
1031 | r = request_mem_region(r->start, len, r->name); | 1238 | edma_info[j] = kmalloc(sizeof(struct edma), GFP_KERNEL); |
1032 | if (!r) | 1239 | if (!edma_info[j]) { |
1033 | return -EBUSY; | 1240 | status = -ENOMEM; |
1241 | goto fail1; | ||
1242 | } | ||
1243 | memset(edma_info[j], 0, sizeof(struct edma)); | ||
1244 | |||
1245 | edma_info[j]->num_channels = min_t(unsigned, info[j].n_channel, | ||
1246 | EDMA_MAX_DMACH); | ||
1247 | edma_info[j]->num_slots = min_t(unsigned, info[j].n_slot, | ||
1248 | EDMA_MAX_PARAMENTRY); | ||
1249 | edma_info[j]->num_cc = min_t(unsigned, info[j].n_cc, | ||
1250 | EDMA_MAX_CC); | ||
1251 | |||
1252 | dev_dbg(&pdev->dev, "DMA REG BASE ADDR=%p\n", | ||
1253 | edmacc_regs_base[j]); | ||
1254 | |||
1255 | for (i = 0; i < edma_info[j]->num_slots; i++) | ||
1256 | memcpy_toio(edmacc_regs_base[j] + PARM_OFFSET(i), | ||
1257 | &dummy_paramset, PARM_SIZE); | ||
1258 | |||
1259 | noevent = info[j].noevent; | ||
1260 | if (noevent) { | ||
1261 | while (*noevent != -1) | ||
1262 | set_bit(*noevent++, edma_info[j]->edma_noevent); | ||
1263 | } | ||
1034 | 1264 | ||
1035 | edmacc_regs_base = ioremap(r->start, len); | 1265 | sprintf(irq_name, "edma%d", j); |
1036 | if (!edmacc_regs_base) { | 1266 | irq[j] = platform_get_irq_byname(pdev, irq_name); |
1037 | status = -EBUSY; | 1267 | edma_info[j]->irq_res_start = irq[j]; |
1038 | goto fail1; | 1268 | status = request_irq(irq[j], dma_irq_handler, 0, "edma", |
1039 | } | 1269 | &pdev->dev); |
1270 | if (status < 0) { | ||
1271 | dev_dbg(&pdev->dev, "request_irq %d failed --> %d\n", | ||
1272 | irq[j], status); | ||
1273 | goto fail; | ||
1274 | } | ||
1040 | 1275 | ||
1041 | num_channels = min_t(unsigned, info->n_channel, EDMA_MAX_DMACH); | 1276 | sprintf(irq_name, "edma%d_err", j); |
1042 | num_slots = min_t(unsigned, info->n_slot, EDMA_MAX_PARAMENTRY); | 1277 | err_irq[j] = platform_get_irq_byname(pdev, irq_name); |
1278 | edma_info[j]->irq_res_end = err_irq[j]; | ||
1279 | status = request_irq(err_irq[j], dma_ccerr_handler, 0, | ||
1280 | "edma_error", &pdev->dev); | ||
1281 | if (status < 0) { | ||
1282 | dev_dbg(&pdev->dev, "request_irq %d failed --> %d\n", | ||
1283 | err_irq[j], status); | ||
1284 | goto fail; | ||
1285 | } | ||
1043 | 1286 | ||
1044 | dev_dbg(&pdev->dev, "DMA REG BASE ADDR=%p\n", edmacc_regs_base); | 1287 | /* Everything lives on transfer controller 1 until otherwise |
1288 | * specified. This way, long transfers on the low priority queue | ||
1289 | * started by the codec engine will not cause audio defects. | ||
1290 | */ | ||
1291 | for (i = 0; i < edma_info[j]->num_channels; i++) | ||
1292 | map_dmach_queue(j, i, EVENTQ_1); | ||
1045 | 1293 | ||
1046 | for (i = 0; i < num_slots; i++) | 1294 | queue_tc_mapping = info[j].queue_tc_mapping; |
1047 | memcpy_toio(edmacc_regs_base + PARM_OFFSET(i), | 1295 | queue_priority_mapping = info[j].queue_priority_mapping; |
1048 | &dummy_paramset, PARM_SIZE); | ||
1049 | 1296 | ||
1050 | noevent = info->noevent; | 1297 | /* Event queue to TC mapping */ |
1051 | if (noevent) { | 1298 | for (i = 0; queue_tc_mapping[i][0] != -1; i++) |
1052 | while (*noevent != -1) | 1299 | map_queue_tc(j, queue_tc_mapping[i][0], |
1053 | set_bit(*noevent++, edma_noevent); | 1300 | queue_tc_mapping[i][1]); |
1054 | } | ||
1055 | 1301 | ||
1056 | irq = platform_get_irq(pdev, 0); | 1302 | /* Event queue priority mapping */ |
1057 | status = request_irq(irq, dma_irq_handler, 0, "edma", &pdev->dev); | 1303 | for (i = 0; queue_priority_mapping[i][0] != -1; i++) |
1058 | if (status < 0) { | 1304 | assign_priority_to_queue(j, |
1059 | dev_dbg(&pdev->dev, "request_irq %d failed --> %d\n", | 1305 | queue_priority_mapping[i][0], |
1060 | irq, status); | 1306 | queue_priority_mapping[i][1]); |
1061 | goto fail; | 1307 | |
1062 | } | 1308 | /* Map the channel to param entry if channel mapping logic |
1309 | * exist | ||
1310 | */ | ||
1311 | if (edma_read(j, EDMA_CCCFG) & CHMAP_EXIST) | ||
1312 | map_dmach_param(j); | ||
1063 | 1313 | ||
1064 | err_irq = platform_get_irq(pdev, 1); | 1314 | for (i = 0; i < info[j].n_region; i++) { |
1065 | status = request_irq(err_irq, dma_ccerr_handler, 0, | 1315 | edma_write_array2(j, EDMA_DRAE, i, 0, 0x0); |
1066 | "edma_error", &pdev->dev); | 1316 | edma_write_array2(j, EDMA_DRAE, i, 1, 0x0); |
1067 | if (status < 0) { | 1317 | edma_write_array(j, EDMA_QRAE, i, 0x0); |
1068 | dev_dbg(&pdev->dev, "request_irq %d failed --> %d\n", | 1318 | } |
1069 | err_irq, status); | ||
1070 | goto fail; | ||
1071 | } | 1319 | } |
1072 | 1320 | ||
1073 | if (tc_errs_handled) { | 1321 | if (tc_errs_handled) { |
@@ -1087,38 +1335,23 @@ static int __init edma_probe(struct platform_device *pdev) | |||
1087 | } | 1335 | } |
1088 | } | 1336 | } |
1089 | 1337 | ||
1090 | /* Everything lives on transfer controller 1 until otherwise specified. | ||
1091 | * This way, long transfers on the low priority queue | ||
1092 | * started by the codec engine will not cause audio defects. | ||
1093 | */ | ||
1094 | for (i = 0; i < num_channels; i++) | ||
1095 | map_dmach_queue(i, EVENTQ_1); | ||
1096 | |||
1097 | /* Event queue to TC mapping */ | ||
1098 | for (i = 0; queue_tc_mapping[i][0] != -1; i++) | ||
1099 | map_queue_tc(queue_tc_mapping[i][0], queue_tc_mapping[i][1]); | ||
1100 | |||
1101 | /* Event queue priority mapping */ | ||
1102 | for (i = 0; queue_priority_mapping[i][0] != -1; i++) | ||
1103 | assign_priority_to_queue(queue_priority_mapping[i][0], | ||
1104 | queue_priority_mapping[i][1]); | ||
1105 | |||
1106 | for (i = 0; i < info->n_region; i++) { | ||
1107 | edma_write_array2(EDMA_DRAE, i, 0, 0x0); | ||
1108 | edma_write_array2(EDMA_DRAE, i, 1, 0x0); | ||
1109 | edma_write_array(EDMA_QRAE, i, 0x0); | ||
1110 | } | ||
1111 | |||
1112 | return 0; | 1338 | return 0; |
1113 | 1339 | ||
1114 | fail: | 1340 | fail: |
1115 | if (err_irq) | 1341 | for (i = 0; i < EDMA_MAX_CC; i++) { |
1116 | free_irq(err_irq, NULL); | 1342 | if (err_irq[i]) |
1117 | if (irq) | 1343 | free_irq(err_irq[i], &pdev->dev); |
1118 | free_irq(irq, NULL); | 1344 | if (irq[i]) |
1119 | iounmap(edmacc_regs_base); | 1345 | free_irq(irq[i], &pdev->dev); |
1346 | } | ||
1120 | fail1: | 1347 | fail1: |
1121 | release_mem_region(r->start, len); | 1348 | for (i = 0; i < EDMA_MAX_CC; i++) { |
1349 | if (r[i]) | ||
1350 | release_mem_region(r[i]->start, len[i]); | ||
1351 | if (edmacc_regs_base[i]) | ||
1352 | iounmap(edmacc_regs_base[i]); | ||
1353 | kfree(edma_info[i]); | ||
1354 | } | ||
1122 | return status; | 1355 | return status; |
1123 | } | 1356 | } |
1124 | 1357 | ||