diff options
Diffstat (limited to 'arch/arm/mach-davinci/dma.c')
-rw-r--r-- | arch/arm/mach-davinci/dma.c | 955 |
1 files changed, 665 insertions, 290 deletions
diff --git a/arch/arm/mach-davinci/dma.c b/arch/arm/mach-davinci/dma.c index 15e9eb158bb7..f2e57d272958 100644 --- a/arch/arm/mach-davinci/dma.c +++ b/arch/arm/mach-davinci/dma.c | |||
@@ -100,132 +100,158 @@ | |||
100 | #define EDMA_SHADOW0 0x2000 /* 4 regions shadowing global channels */ | 100 | #define EDMA_SHADOW0 0x2000 /* 4 regions shadowing global channels */ |
101 | #define EDMA_PARM 0x4000 /* 128 param entries */ | 101 | #define EDMA_PARM 0x4000 /* 128 param entries */ |
102 | 102 | ||
103 | #define DAVINCI_DMA_3PCC_BASE 0x01C00000 | ||
104 | |||
105 | #define PARM_OFFSET(param_no) (EDMA_PARM + ((param_no) << 5)) | 103 | #define PARM_OFFSET(param_no) (EDMA_PARM + ((param_no) << 5)) |
106 | 104 | ||
105 | #define EDMA_DCHMAP 0x0100 /* 64 registers */ | ||
106 | #define CHMAP_EXIST BIT(24) | ||
107 | |||
107 | #define EDMA_MAX_DMACH 64 | 108 | #define EDMA_MAX_DMACH 64 |
108 | #define EDMA_MAX_PARAMENTRY 512 | 109 | #define EDMA_MAX_PARAMENTRY 512 |
109 | #define EDMA_MAX_EVQUE 2 /* FIXME too small */ | 110 | #define EDMA_MAX_CC 2 |
110 | 111 | ||
111 | 112 | ||
112 | /*****************************************************************************/ | 113 | /*****************************************************************************/ |
113 | 114 | ||
114 | static void __iomem *edmacc_regs_base; | 115 | static void __iomem *edmacc_regs_base[EDMA_MAX_CC]; |
115 | 116 | ||
116 | static inline unsigned int edma_read(int offset) | 117 | static inline unsigned int edma_read(unsigned ctlr, int offset) |
117 | { | 118 | { |
118 | return (unsigned int)__raw_readl(edmacc_regs_base + offset); | 119 | return (unsigned int)__raw_readl(edmacc_regs_base[ctlr] + offset); |
119 | } | 120 | } |
120 | 121 | ||
121 | static inline void edma_write(int offset, int val) | 122 | static inline void edma_write(unsigned ctlr, int offset, int val) |
122 | { | 123 | { |
123 | __raw_writel(val, edmacc_regs_base + offset); | 124 | __raw_writel(val, edmacc_regs_base[ctlr] + offset); |
124 | } | 125 | } |
125 | static inline void edma_modify(int offset, unsigned and, unsigned or) | 126 | static inline void edma_modify(unsigned ctlr, int offset, unsigned and, |
127 | unsigned or) | ||
126 | { | 128 | { |
127 | unsigned val = edma_read(offset); | 129 | unsigned val = edma_read(ctlr, offset); |
128 | val &= and; | 130 | val &= and; |
129 | val |= or; | 131 | val |= or; |
130 | edma_write(offset, val); | 132 | edma_write(ctlr, offset, val); |
131 | } | 133 | } |
132 | static inline void edma_and(int offset, unsigned and) | 134 | static inline void edma_and(unsigned ctlr, int offset, unsigned and) |
133 | { | 135 | { |
134 | unsigned val = edma_read(offset); | 136 | unsigned val = edma_read(ctlr, offset); |
135 | val &= and; | 137 | val &= and; |
136 | edma_write(offset, val); | 138 | edma_write(ctlr, offset, val); |
137 | } | 139 | } |
138 | static inline void edma_or(int offset, unsigned or) | 140 | static inline void edma_or(unsigned ctlr, int offset, unsigned or) |
139 | { | 141 | { |
140 | unsigned val = edma_read(offset); | 142 | unsigned val = edma_read(ctlr, offset); |
141 | val |= or; | 143 | val |= or; |
142 | edma_write(offset, val); | 144 | edma_write(ctlr, offset, val); |
143 | } | 145 | } |
144 | static inline unsigned int edma_read_array(int offset, int i) | 146 | static inline unsigned int edma_read_array(unsigned ctlr, int offset, int i) |
145 | { | 147 | { |
146 | return edma_read(offset + (i << 2)); | 148 | return edma_read(ctlr, offset + (i << 2)); |
147 | } | 149 | } |
148 | static inline void edma_write_array(int offset, int i, unsigned val) | 150 | static inline void edma_write_array(unsigned ctlr, int offset, int i, |
151 | unsigned val) | ||
149 | { | 152 | { |
150 | edma_write(offset + (i << 2), val); | 153 | edma_write(ctlr, offset + (i << 2), val); |
151 | } | 154 | } |
152 | static inline void edma_modify_array(int offset, int i, | 155 | static inline void edma_modify_array(unsigned ctlr, int offset, int i, |
153 | unsigned and, unsigned or) | 156 | unsigned and, unsigned or) |
154 | { | 157 | { |
155 | edma_modify(offset + (i << 2), and, or); | 158 | edma_modify(ctlr, offset + (i << 2), and, or); |
156 | } | 159 | } |
157 | static inline void edma_or_array(int offset, int i, unsigned or) | 160 | static inline void edma_or_array(unsigned ctlr, int offset, int i, unsigned or) |
158 | { | 161 | { |
159 | edma_or(offset + (i << 2), or); | 162 | edma_or(ctlr, offset + (i << 2), or); |
160 | } | 163 | } |
161 | static inline void edma_or_array2(int offset, int i, int j, unsigned or) | 164 | static inline void edma_or_array2(unsigned ctlr, int offset, int i, int j, |
165 | unsigned or) | ||
162 | { | 166 | { |
163 | edma_or(offset + ((i*2 + j) << 2), or); | 167 | edma_or(ctlr, offset + ((i*2 + j) << 2), or); |
164 | } | 168 | } |
165 | static inline void edma_write_array2(int offset, int i, int j, unsigned val) | 169 | static inline void edma_write_array2(unsigned ctlr, int offset, int i, int j, |
170 | unsigned val) | ||
166 | { | 171 | { |
167 | edma_write(offset + ((i*2 + j) << 2), val); | 172 | edma_write(ctlr, offset + ((i*2 + j) << 2), val); |
168 | } | 173 | } |
169 | static inline unsigned int edma_shadow0_read(int offset) | 174 | static inline unsigned int edma_shadow0_read(unsigned ctlr, int offset) |
170 | { | 175 | { |
171 | return edma_read(EDMA_SHADOW0 + offset); | 176 | return edma_read(ctlr, EDMA_SHADOW0 + offset); |
172 | } | 177 | } |
173 | static inline unsigned int edma_shadow0_read_array(int offset, int i) | 178 | static inline unsigned int edma_shadow0_read_array(unsigned ctlr, int offset, |
179 | int i) | ||
174 | { | 180 | { |
175 | return edma_read(EDMA_SHADOW0 + offset + (i << 2)); | 181 | return edma_read(ctlr, EDMA_SHADOW0 + offset + (i << 2)); |
176 | } | 182 | } |
177 | static inline void edma_shadow0_write(int offset, unsigned val) | 183 | static inline void edma_shadow0_write(unsigned ctlr, int offset, unsigned val) |
178 | { | 184 | { |
179 | edma_write(EDMA_SHADOW0 + offset, val); | 185 | edma_write(ctlr, EDMA_SHADOW0 + offset, val); |
180 | } | 186 | } |
181 | static inline void edma_shadow0_write_array(int offset, int i, unsigned val) | 187 | static inline void edma_shadow0_write_array(unsigned ctlr, int offset, int i, |
188 | unsigned val) | ||
182 | { | 189 | { |
183 | edma_write(EDMA_SHADOW0 + offset + (i << 2), val); | 190 | edma_write(ctlr, EDMA_SHADOW0 + offset + (i << 2), val); |
184 | } | 191 | } |
185 | static inline unsigned int edma_parm_read(int offset, int param_no) | 192 | static inline unsigned int edma_parm_read(unsigned ctlr, int offset, |
193 | int param_no) | ||
186 | { | 194 | { |
187 | return edma_read(EDMA_PARM + offset + (param_no << 5)); | 195 | return edma_read(ctlr, EDMA_PARM + offset + (param_no << 5)); |
188 | } | 196 | } |
189 | static inline void edma_parm_write(int offset, int param_no, unsigned val) | 197 | static inline void edma_parm_write(unsigned ctlr, int offset, int param_no, |
198 | unsigned val) | ||
190 | { | 199 | { |
191 | edma_write(EDMA_PARM + offset + (param_no << 5), val); | 200 | edma_write(ctlr, EDMA_PARM + offset + (param_no << 5), val); |
192 | } | 201 | } |
193 | static inline void edma_parm_modify(int offset, int param_no, | 202 | static inline void edma_parm_modify(unsigned ctlr, int offset, int param_no, |
194 | unsigned and, unsigned or) | 203 | unsigned and, unsigned or) |
195 | { | 204 | { |
196 | edma_modify(EDMA_PARM + offset + (param_no << 5), and, or); | 205 | edma_modify(ctlr, EDMA_PARM + offset + (param_no << 5), and, or); |
197 | } | 206 | } |
198 | static inline void edma_parm_and(int offset, int param_no, unsigned and) | 207 | static inline void edma_parm_and(unsigned ctlr, int offset, int param_no, |
208 | unsigned and) | ||
199 | { | 209 | { |
200 | edma_and(EDMA_PARM + offset + (param_no << 5), and); | 210 | edma_and(ctlr, EDMA_PARM + offset + (param_no << 5), and); |
201 | } | 211 | } |
202 | static inline void edma_parm_or(int offset, int param_no, unsigned or) | 212 | static inline void edma_parm_or(unsigned ctlr, int offset, int param_no, |
213 | unsigned or) | ||
203 | { | 214 | { |
204 | edma_or(EDMA_PARM + offset + (param_no << 5), or); | 215 | edma_or(ctlr, EDMA_PARM + offset + (param_no << 5), or); |
205 | } | 216 | } |
206 | 217 | ||
207 | /*****************************************************************************/ | 218 | /*****************************************************************************/ |
208 | 219 | ||
209 | /* actual number of DMA channels and slots on this silicon */ | 220 | /* actual number of DMA channels and slots on this silicon */ |
210 | static unsigned num_channels; | 221 | struct edma { |
211 | static unsigned num_slots; | 222 | /* how many dma resources of each type */ |
223 | unsigned num_channels; | ||
224 | unsigned num_region; | ||
225 | unsigned num_slots; | ||
226 | unsigned num_tc; | ||
227 | unsigned num_cc; | ||
228 | enum dma_event_q default_queue; | ||
229 | |||
230 | /* list of channels with no even trigger; terminated by "-1" */ | ||
231 | const s8 *noevent; | ||
232 | |||
233 | /* The edma_inuse bit for each PaRAM slot is clear unless the | ||
234 | * channel is in use ... by ARM or DSP, for QDMA, or whatever. | ||
235 | */ | ||
236 | DECLARE_BITMAP(edma_inuse, EDMA_MAX_PARAMENTRY); | ||
212 | 237 | ||
213 | static struct dma_interrupt_data { | 238 | /* The edma_noevent bit for each channel is clear unless |
214 | void (*callback)(unsigned channel, unsigned short ch_status, | 239 | * it doesn't trigger DMA events on this platform. It uses a |
215 | void *data); | 240 | * bit of SOC-specific initialization code. |
216 | void *data; | 241 | */ |
217 | } intr_data[EDMA_MAX_DMACH]; | 242 | DECLARE_BITMAP(edma_noevent, EDMA_MAX_DMACH); |
218 | 243 | ||
219 | /* The edma_inuse bit for each PaRAM slot is clear unless the | 244 | unsigned irq_res_start; |
220 | * channel is in use ... by ARM or DSP, for QDMA, or whatever. | 245 | unsigned irq_res_end; |
221 | */ | ||
222 | static DECLARE_BITMAP(edma_inuse, EDMA_MAX_PARAMENTRY); | ||
223 | 246 | ||
224 | /* The edma_noevent bit for each channel is clear unless | 247 | struct dma_interrupt_data { |
225 | * it doesn't trigger DMA events on this platform. It uses a | 248 | void (*callback)(unsigned channel, unsigned short ch_status, |
226 | * bit of SOC-specific initialization code. | 249 | void *data); |
227 | */ | 250 | void *data; |
228 | static DECLARE_BITMAP(edma_noevent, EDMA_MAX_DMACH); | 251 | } intr_data[EDMA_MAX_DMACH]; |
252 | }; | ||
253 | |||
254 | static struct edma *edma_info[EDMA_MAX_CC]; | ||
229 | 255 | ||
230 | /* dummy param set used to (re)initialize parameter RAM slots */ | 256 | /* dummy param set used to (re)initialize parameter RAM slots */ |
231 | static const struct edmacc_param dummy_paramset = { | 257 | static const struct edmacc_param dummy_paramset = { |
@@ -233,47 +259,52 @@ static const struct edmacc_param dummy_paramset = { | |||
233 | .ccnt = 1, | 259 | .ccnt = 1, |
234 | }; | 260 | }; |
235 | 261 | ||
236 | static const int __initconst | ||
237 | queue_tc_mapping[EDMA_MAX_EVQUE + 1][2] = { | ||
238 | /* {event queue no, TC no} */ | ||
239 | {0, 0}, | ||
240 | {1, 1}, | ||
241 | {-1, -1} | ||
242 | }; | ||
243 | |||
244 | static const int __initconst | ||
245 | queue_priority_mapping[EDMA_MAX_EVQUE + 1][2] = { | ||
246 | /* {event queue no, Priority} */ | ||
247 | {0, 3}, | ||
248 | {1, 7}, | ||
249 | {-1, -1} | ||
250 | }; | ||
251 | |||
252 | /*****************************************************************************/ | 262 | /*****************************************************************************/ |
253 | 263 | ||
254 | static void map_dmach_queue(unsigned ch_no, enum dma_event_q queue_no) | 264 | static void map_dmach_queue(unsigned ctlr, unsigned ch_no, |
265 | enum dma_event_q queue_no) | ||
255 | { | 266 | { |
256 | int bit = (ch_no & 0x7) * 4; | 267 | int bit = (ch_no & 0x7) * 4; |
257 | 268 | ||
258 | /* default to low priority queue */ | 269 | /* default to low priority queue */ |
259 | if (queue_no == EVENTQ_DEFAULT) | 270 | if (queue_no == EVENTQ_DEFAULT) |
260 | queue_no = EVENTQ_1; | 271 | queue_no = edma_info[ctlr]->default_queue; |
261 | 272 | ||
262 | queue_no &= 7; | 273 | queue_no &= 7; |
263 | edma_modify_array(EDMA_DMAQNUM, (ch_no >> 3), | 274 | edma_modify_array(ctlr, EDMA_DMAQNUM, (ch_no >> 3), |
264 | ~(0x7 << bit), queue_no << bit); | 275 | ~(0x7 << bit), queue_no << bit); |
265 | } | 276 | } |
266 | 277 | ||
267 | static void __init map_queue_tc(int queue_no, int tc_no) | 278 | static void __init map_queue_tc(unsigned ctlr, int queue_no, int tc_no) |
268 | { | 279 | { |
269 | int bit = queue_no * 4; | 280 | int bit = queue_no * 4; |
270 | edma_modify(EDMA_QUETCMAP, ~(0x7 << bit), ((tc_no & 0x7) << bit)); | 281 | edma_modify(ctlr, EDMA_QUETCMAP, ~(0x7 << bit), ((tc_no & 0x7) << bit)); |
271 | } | 282 | } |
272 | 283 | ||
273 | static void __init assign_priority_to_queue(int queue_no, int priority) | 284 | static void __init assign_priority_to_queue(unsigned ctlr, int queue_no, |
285 | int priority) | ||
274 | { | 286 | { |
275 | int bit = queue_no * 4; | 287 | int bit = queue_no * 4; |
276 | edma_modify(EDMA_QUEPRI, ~(0x7 << bit), ((priority & 0x7) << bit)); | 288 | edma_modify(ctlr, EDMA_QUEPRI, ~(0x7 << bit), |
289 | ((priority & 0x7) << bit)); | ||
290 | } | ||
291 | |||
292 | /** | ||
293 | * map_dmach_param - Maps channel number to param entry number | ||
294 | * | ||
295 | * This maps the dma channel number to param entry numberter. In | ||
296 | * other words using the DMA channel mapping registers a param entry | ||
297 | * can be mapped to any channel | ||
298 | * | ||
299 | * Callers are responsible for ensuring the channel mapping logic is | ||
300 | * included in that particular EDMA variant (Eg : dm646x) | ||
301 | * | ||
302 | */ | ||
303 | static void __init map_dmach_param(unsigned ctlr) | ||
304 | { | ||
305 | int i; | ||
306 | for (i = 0; i < EDMA_MAX_DMACH; i++) | ||
307 | edma_write_array(ctlr, EDMA_DCHMAP , i , (i << 5)); | ||
277 | } | 308 | } |
278 | 309 | ||
279 | static inline void | 310 | static inline void |
@@ -281,22 +312,39 @@ setup_dma_interrupt(unsigned lch, | |||
281 | void (*callback)(unsigned channel, u16 ch_status, void *data), | 312 | void (*callback)(unsigned channel, u16 ch_status, void *data), |
282 | void *data) | 313 | void *data) |
283 | { | 314 | { |
315 | unsigned ctlr; | ||
316 | |||
317 | ctlr = EDMA_CTLR(lch); | ||
318 | lch = EDMA_CHAN_SLOT(lch); | ||
319 | |||
284 | if (!callback) { | 320 | if (!callback) { |
285 | edma_shadow0_write_array(SH_IECR, lch >> 5, | 321 | edma_shadow0_write_array(ctlr, SH_IECR, lch >> 5, |
286 | (1 << (lch & 0x1f))); | 322 | (1 << (lch & 0x1f))); |
287 | } | 323 | } |
288 | 324 | ||
289 | intr_data[lch].callback = callback; | 325 | edma_info[ctlr]->intr_data[lch].callback = callback; |
290 | intr_data[lch].data = data; | 326 | edma_info[ctlr]->intr_data[lch].data = data; |
291 | 327 | ||
292 | if (callback) { | 328 | if (callback) { |
293 | edma_shadow0_write_array(SH_ICR, lch >> 5, | 329 | edma_shadow0_write_array(ctlr, SH_ICR, lch >> 5, |
294 | (1 << (lch & 0x1f))); | 330 | (1 << (lch & 0x1f))); |
295 | edma_shadow0_write_array(SH_IESR, lch >> 5, | 331 | edma_shadow0_write_array(ctlr, SH_IESR, lch >> 5, |
296 | (1 << (lch & 0x1f))); | 332 | (1 << (lch & 0x1f))); |
297 | } | 333 | } |
298 | } | 334 | } |
299 | 335 | ||
336 | static int irq2ctlr(int irq) | ||
337 | { | ||
338 | if (irq >= edma_info[0]->irq_res_start && | ||
339 | irq <= edma_info[0]->irq_res_end) | ||
340 | return 0; | ||
341 | else if (irq >= edma_info[1]->irq_res_start && | ||
342 | irq <= edma_info[1]->irq_res_end) | ||
343 | return 1; | ||
344 | |||
345 | return -1; | ||
346 | } | ||
347 | |||
300 | /****************************************************************************** | 348 | /****************************************************************************** |
301 | * | 349 | * |
302 | * DMA interrupt handler | 350 | * DMA interrupt handler |
@@ -305,32 +353,39 @@ setup_dma_interrupt(unsigned lch, | |||
305 | static irqreturn_t dma_irq_handler(int irq, void *data) | 353 | static irqreturn_t dma_irq_handler(int irq, void *data) |
306 | { | 354 | { |
307 | int i; | 355 | int i; |
356 | unsigned ctlr; | ||
308 | unsigned int cnt = 0; | 357 | unsigned int cnt = 0; |
309 | 358 | ||
359 | ctlr = irq2ctlr(irq); | ||
360 | |||
310 | dev_dbg(data, "dma_irq_handler\n"); | 361 | dev_dbg(data, "dma_irq_handler\n"); |
311 | 362 | ||
312 | if ((edma_shadow0_read_array(SH_IPR, 0) == 0) | 363 | if ((edma_shadow0_read_array(ctlr, SH_IPR, 0) == 0) |
313 | && (edma_shadow0_read_array(SH_IPR, 1) == 0)) | 364 | && (edma_shadow0_read_array(ctlr, SH_IPR, 1) == 0)) |
314 | return IRQ_NONE; | 365 | return IRQ_NONE; |
315 | 366 | ||
316 | while (1) { | 367 | while (1) { |
317 | int j; | 368 | int j; |
318 | if (edma_shadow0_read_array(SH_IPR, 0)) | 369 | if (edma_shadow0_read_array(ctlr, SH_IPR, 0)) |
319 | j = 0; | 370 | j = 0; |
320 | else if (edma_shadow0_read_array(SH_IPR, 1)) | 371 | else if (edma_shadow0_read_array(ctlr, SH_IPR, 1)) |
321 | j = 1; | 372 | j = 1; |
322 | else | 373 | else |
323 | break; | 374 | break; |
324 | dev_dbg(data, "IPR%d %08x\n", j, | 375 | dev_dbg(data, "IPR%d %08x\n", j, |
325 | edma_shadow0_read_array(SH_IPR, j)); | 376 | edma_shadow0_read_array(ctlr, SH_IPR, j)); |
326 | for (i = 0; i < 32; i++) { | 377 | for (i = 0; i < 32; i++) { |
327 | int k = (j << 5) + i; | 378 | int k = (j << 5) + i; |
328 | if (edma_shadow0_read_array(SH_IPR, j) & (1 << i)) { | 379 | if (edma_shadow0_read_array(ctlr, SH_IPR, j) & |
380 | (1 << i)) { | ||
329 | /* Clear the corresponding IPR bits */ | 381 | /* Clear the corresponding IPR bits */ |
330 | edma_shadow0_write_array(SH_ICR, j, (1 << i)); | 382 | edma_shadow0_write_array(ctlr, SH_ICR, j, |
331 | if (intr_data[k].callback) { | 383 | (1 << i)); |
332 | intr_data[k].callback(k, DMA_COMPLETE, | 384 | if (edma_info[ctlr]->intr_data[k].callback) { |
333 | intr_data[k].data); | 385 | edma_info[ctlr]->intr_data[k].callback( |
386 | k, DMA_COMPLETE, | ||
387 | edma_info[ctlr]->intr_data[k]. | ||
388 | data); | ||
334 | } | 389 | } |
335 | } | 390 | } |
336 | } | 391 | } |
@@ -338,7 +393,7 @@ static irqreturn_t dma_irq_handler(int irq, void *data) | |||
338 | if (cnt > 10) | 393 | if (cnt > 10) |
339 | break; | 394 | break; |
340 | } | 395 | } |
341 | edma_shadow0_write(SH_IEVAL, 1); | 396 | edma_shadow0_write(ctlr, SH_IEVAL, 1); |
342 | return IRQ_HANDLED; | 397 | return IRQ_HANDLED; |
343 | } | 398 | } |
344 | 399 | ||
@@ -350,78 +405,87 @@ static irqreturn_t dma_irq_handler(int irq, void *data) | |||
350 | static irqreturn_t dma_ccerr_handler(int irq, void *data) | 405 | static irqreturn_t dma_ccerr_handler(int irq, void *data) |
351 | { | 406 | { |
352 | int i; | 407 | int i; |
408 | unsigned ctlr; | ||
353 | unsigned int cnt = 0; | 409 | unsigned int cnt = 0; |
354 | 410 | ||
411 | ctlr = irq2ctlr(irq); | ||
412 | |||
355 | dev_dbg(data, "dma_ccerr_handler\n"); | 413 | dev_dbg(data, "dma_ccerr_handler\n"); |
356 | 414 | ||
357 | if ((edma_read_array(EDMA_EMR, 0) == 0) && | 415 | if ((edma_read_array(ctlr, EDMA_EMR, 0) == 0) && |
358 | (edma_read_array(EDMA_EMR, 1) == 0) && | 416 | (edma_read_array(ctlr, EDMA_EMR, 1) == 0) && |
359 | (edma_read(EDMA_QEMR) == 0) && (edma_read(EDMA_CCERR) == 0)) | 417 | (edma_read(ctlr, EDMA_QEMR) == 0) && |
418 | (edma_read(ctlr, EDMA_CCERR) == 0)) | ||
360 | return IRQ_NONE; | 419 | return IRQ_NONE; |
361 | 420 | ||
362 | while (1) { | 421 | while (1) { |
363 | int j = -1; | 422 | int j = -1; |
364 | if (edma_read_array(EDMA_EMR, 0)) | 423 | if (edma_read_array(ctlr, EDMA_EMR, 0)) |
365 | j = 0; | 424 | j = 0; |
366 | else if (edma_read_array(EDMA_EMR, 1)) | 425 | else if (edma_read_array(ctlr, EDMA_EMR, 1)) |
367 | j = 1; | 426 | j = 1; |
368 | if (j >= 0) { | 427 | if (j >= 0) { |
369 | dev_dbg(data, "EMR%d %08x\n", j, | 428 | dev_dbg(data, "EMR%d %08x\n", j, |
370 | edma_read_array(EDMA_EMR, j)); | 429 | edma_read_array(ctlr, EDMA_EMR, j)); |
371 | for (i = 0; i < 32; i++) { | 430 | for (i = 0; i < 32; i++) { |
372 | int k = (j << 5) + i; | 431 | int k = (j << 5) + i; |
373 | if (edma_read_array(EDMA_EMR, j) & (1 << i)) { | 432 | if (edma_read_array(ctlr, EDMA_EMR, j) & |
433 | (1 << i)) { | ||
374 | /* Clear the corresponding EMR bits */ | 434 | /* Clear the corresponding EMR bits */ |
375 | edma_write_array(EDMA_EMCR, j, 1 << i); | 435 | edma_write_array(ctlr, EDMA_EMCR, j, |
436 | 1 << i); | ||
376 | /* Clear any SER */ | 437 | /* Clear any SER */ |
377 | edma_shadow0_write_array(SH_SECR, j, | 438 | edma_shadow0_write_array(ctlr, SH_SECR, |
378 | (1 << i)); | 439 | j, (1 << i)); |
379 | if (intr_data[k].callback) { | 440 | if (edma_info[ctlr]->intr_data[k]. |
380 | intr_data[k].callback(k, | 441 | callback) { |
381 | DMA_CC_ERROR, | 442 | edma_info[ctlr]->intr_data[k]. |
382 | intr_data | 443 | callback(k, |
383 | [k].data); | 444 | DMA_CC_ERROR, |
445 | edma_info[ctlr]->intr_data | ||
446 | [k].data); | ||
384 | } | 447 | } |
385 | } | 448 | } |
386 | } | 449 | } |
387 | } else if (edma_read(EDMA_QEMR)) { | 450 | } else if (edma_read(ctlr, EDMA_QEMR)) { |
388 | dev_dbg(data, "QEMR %02x\n", | 451 | dev_dbg(data, "QEMR %02x\n", |
389 | edma_read(EDMA_QEMR)); | 452 | edma_read(ctlr, EDMA_QEMR)); |
390 | for (i = 0; i < 8; i++) { | 453 | for (i = 0; i < 8; i++) { |
391 | if (edma_read(EDMA_QEMR) & (1 << i)) { | 454 | if (edma_read(ctlr, EDMA_QEMR) & (1 << i)) { |
392 | /* Clear the corresponding IPR bits */ | 455 | /* Clear the corresponding IPR bits */ |
393 | edma_write(EDMA_QEMCR, 1 << i); | 456 | edma_write(ctlr, EDMA_QEMCR, 1 << i); |
394 | edma_shadow0_write(SH_QSECR, (1 << i)); | 457 | edma_shadow0_write(ctlr, SH_QSECR, |
458 | (1 << i)); | ||
395 | 459 | ||
396 | /* NOTE: not reported!! */ | 460 | /* NOTE: not reported!! */ |
397 | } | 461 | } |
398 | } | 462 | } |
399 | } else if (edma_read(EDMA_CCERR)) { | 463 | } else if (edma_read(ctlr, EDMA_CCERR)) { |
400 | dev_dbg(data, "CCERR %08x\n", | 464 | dev_dbg(data, "CCERR %08x\n", |
401 | edma_read(EDMA_CCERR)); | 465 | edma_read(ctlr, EDMA_CCERR)); |
402 | /* FIXME: CCERR.BIT(16) ignored! much better | 466 | /* FIXME: CCERR.BIT(16) ignored! much better |
403 | * to just write CCERRCLR with CCERR value... | 467 | * to just write CCERRCLR with CCERR value... |
404 | */ | 468 | */ |
405 | for (i = 0; i < 8; i++) { | 469 | for (i = 0; i < 8; i++) { |
406 | if (edma_read(EDMA_CCERR) & (1 << i)) { | 470 | if (edma_read(ctlr, EDMA_CCERR) & (1 << i)) { |
407 | /* Clear the corresponding IPR bits */ | 471 | /* Clear the corresponding IPR bits */ |
408 | edma_write(EDMA_CCERRCLR, 1 << i); | 472 | edma_write(ctlr, EDMA_CCERRCLR, 1 << i); |
409 | 473 | ||
410 | /* NOTE: not reported!! */ | 474 | /* NOTE: not reported!! */ |
411 | } | 475 | } |
412 | } | 476 | } |
413 | } | 477 | } |
414 | if ((edma_read_array(EDMA_EMR, 0) == 0) | 478 | if ((edma_read_array(ctlr, EDMA_EMR, 0) == 0) |
415 | && (edma_read_array(EDMA_EMR, 1) == 0) | 479 | && (edma_read_array(ctlr, EDMA_EMR, 1) == 0) |
416 | && (edma_read(EDMA_QEMR) == 0) | 480 | && (edma_read(ctlr, EDMA_QEMR) == 0) |
417 | && (edma_read(EDMA_CCERR) == 0)) { | 481 | && (edma_read(ctlr, EDMA_CCERR) == 0)) { |
418 | break; | 482 | break; |
419 | } | 483 | } |
420 | cnt++; | 484 | cnt++; |
421 | if (cnt > 10) | 485 | if (cnt > 10) |
422 | break; | 486 | break; |
423 | } | 487 | } |
424 | edma_write(EDMA_EEVAL, 1); | 488 | edma_write(ctlr, EDMA_EEVAL, 1); |
425 | return IRQ_HANDLED; | 489 | return IRQ_HANDLED; |
426 | } | 490 | } |
427 | 491 | ||
@@ -445,6 +509,45 @@ static irqreturn_t dma_tc1err_handler(int irq, void *data) | |||
445 | return IRQ_HANDLED; | 509 | return IRQ_HANDLED; |
446 | } | 510 | } |
447 | 511 | ||
512 | static int reserve_contiguous_params(int ctlr, unsigned int id, | ||
513 | unsigned int num_params, | ||
514 | unsigned int start_param) | ||
515 | { | ||
516 | int i, j; | ||
517 | unsigned int count = num_params; | ||
518 | |||
519 | for (i = start_param; i < edma_info[ctlr]->num_slots; ++i) { | ||
520 | j = EDMA_CHAN_SLOT(i); | ||
521 | if (!test_and_set_bit(j, edma_info[ctlr]->edma_inuse)) | ||
522 | count--; | ||
523 | if (count == 0) | ||
524 | break; | ||
525 | else if (id == EDMA_CONT_PARAMS_FIXED_EXACT) | ||
526 | break; | ||
527 | else | ||
528 | count = num_params; | ||
529 | } | ||
530 | |||
531 | /* | ||
532 | * We have to clear any bits that we set | ||
533 | * if we run out parameter RAMs, i.e we do find a set | ||
534 | * of contiguous parameter RAMs but do not find the exact number | ||
535 | * requested as we may reach the total number of parameter RAMs | ||
536 | */ | ||
537 | if (count) { | ||
538 | for (j = i - num_params + count + 1; j <= i ; ++j) | ||
539 | clear_bit(j, edma_info[ctlr]->edma_inuse); | ||
540 | |||
541 | return -EBUSY; | ||
542 | } | ||
543 | |||
544 | for (j = i - num_params + 1; j <= i; ++j) | ||
545 | memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(j), | ||
546 | &dummy_paramset, PARM_SIZE); | ||
547 | |||
548 | return EDMA_CTLR_CHAN(ctlr, i - num_params + 1); | ||
549 | } | ||
550 | |||
448 | /*-----------------------------------------------------------------------*/ | 551 | /*-----------------------------------------------------------------------*/ |
449 | 552 | ||
450 | /* Resource alloc/free: dma channels, parameter RAM slots */ | 553 | /* Resource alloc/free: dma channels, parameter RAM slots */ |
@@ -484,35 +587,53 @@ int edma_alloc_channel(int channel, | |||
484 | void *data, | 587 | void *data, |
485 | enum dma_event_q eventq_no) | 588 | enum dma_event_q eventq_no) |
486 | { | 589 | { |
590 | unsigned i, done, ctlr = 0; | ||
591 | |||
592 | if (channel >= 0) { | ||
593 | ctlr = EDMA_CTLR(channel); | ||
594 | channel = EDMA_CHAN_SLOT(channel); | ||
595 | } | ||
596 | |||
487 | if (channel < 0) { | 597 | if (channel < 0) { |
488 | channel = 0; | 598 | for (i = 0; i < EDMA_MAX_CC; i++) { |
489 | for (;;) { | 599 | channel = 0; |
490 | channel = find_next_bit(edma_noevent, | 600 | for (;;) { |
491 | num_channels, channel); | 601 | channel = find_next_bit(edma_info[i]-> |
492 | if (channel == num_channels) | 602 | edma_noevent, |
493 | return -ENOMEM; | 603 | edma_info[i]->num_channels, |
494 | if (!test_and_set_bit(channel, edma_inuse)) | 604 | channel); |
605 | if (channel == edma_info[i]->num_channels) | ||
606 | return -ENOMEM; | ||
607 | if (!test_and_set_bit(channel, | ||
608 | edma_info[i]->edma_inuse)) { | ||
609 | done = 1; | ||
610 | ctlr = i; | ||
611 | break; | ||
612 | } | ||
613 | channel++; | ||
614 | } | ||
615 | if (done) | ||
495 | break; | 616 | break; |
496 | channel++; | ||
497 | } | 617 | } |
498 | } else if (channel >= num_channels) { | 618 | } else if (channel >= edma_info[ctlr]->num_channels) { |
499 | return -EINVAL; | 619 | return -EINVAL; |
500 | } else if (test_and_set_bit(channel, edma_inuse)) { | 620 | } else if (test_and_set_bit(channel, edma_info[ctlr]->edma_inuse)) { |
501 | return -EBUSY; | 621 | return -EBUSY; |
502 | } | 622 | } |
503 | 623 | ||
504 | /* ensure access through shadow region 0 */ | 624 | /* ensure access through shadow region 0 */ |
505 | edma_or_array2(EDMA_DRAE, 0, channel >> 5, 1 << (channel & 0x1f)); | 625 | edma_or_array2(ctlr, EDMA_DRAE, 0, channel >> 5, 1 << (channel & 0x1f)); |
506 | 626 | ||
507 | /* ensure no events are pending */ | 627 | /* ensure no events are pending */ |
508 | edma_stop(channel); | 628 | edma_stop(EDMA_CTLR_CHAN(ctlr, channel)); |
509 | memcpy_toio(edmacc_regs_base + PARM_OFFSET(channel), | 629 | memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(channel), |
510 | &dummy_paramset, PARM_SIZE); | 630 | &dummy_paramset, PARM_SIZE); |
511 | 631 | ||
512 | if (callback) | 632 | if (callback) |
513 | setup_dma_interrupt(channel, callback, data); | 633 | setup_dma_interrupt(EDMA_CTLR_CHAN(ctlr, channel), |
634 | callback, data); | ||
514 | 635 | ||
515 | map_dmach_queue(channel, eventq_no); | 636 | map_dmach_queue(ctlr, channel, eventq_no); |
516 | 637 | ||
517 | return channel; | 638 | return channel; |
518 | } | 639 | } |
@@ -532,15 +653,20 @@ EXPORT_SYMBOL(edma_alloc_channel); | |||
532 | */ | 653 | */ |
533 | void edma_free_channel(unsigned channel) | 654 | void edma_free_channel(unsigned channel) |
534 | { | 655 | { |
535 | if (channel >= num_channels) | 656 | unsigned ctlr; |
657 | |||
658 | ctlr = EDMA_CTLR(channel); | ||
659 | channel = EDMA_CHAN_SLOT(channel); | ||
660 | |||
661 | if (channel >= edma_info[ctlr]->num_channels) | ||
536 | return; | 662 | return; |
537 | 663 | ||
538 | setup_dma_interrupt(channel, NULL, NULL); | 664 | setup_dma_interrupt(channel, NULL, NULL); |
539 | /* REVISIT should probably take out of shadow region 0 */ | 665 | /* REVISIT should probably take out of shadow region 0 */ |
540 | 666 | ||
541 | memcpy_toio(edmacc_regs_base + PARM_OFFSET(channel), | 667 | memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(channel), |
542 | &dummy_paramset, PARM_SIZE); | 668 | &dummy_paramset, PARM_SIZE); |
543 | clear_bit(channel, edma_inuse); | 669 | clear_bit(channel, edma_info[ctlr]->edma_inuse); |
544 | } | 670 | } |
545 | EXPORT_SYMBOL(edma_free_channel); | 671 | EXPORT_SYMBOL(edma_free_channel); |
546 | 672 | ||
@@ -558,28 +684,33 @@ EXPORT_SYMBOL(edma_free_channel); | |||
558 | * | 684 | * |
559 | * Returns the number of the slot, else negative errno. | 685 | * Returns the number of the slot, else negative errno. |
560 | */ | 686 | */ |
561 | int edma_alloc_slot(int slot) | 687 | int edma_alloc_slot(unsigned ctlr, int slot) |
562 | { | 688 | { |
689 | if (slot >= 0) | ||
690 | slot = EDMA_CHAN_SLOT(slot); | ||
691 | |||
563 | if (slot < 0) { | 692 | if (slot < 0) { |
564 | slot = num_channels; | 693 | slot = edma_info[ctlr]->num_channels; |
565 | for (;;) { | 694 | for (;;) { |
566 | slot = find_next_zero_bit(edma_inuse, | 695 | slot = find_next_zero_bit(edma_info[ctlr]->edma_inuse, |
567 | num_slots, slot); | 696 | edma_info[ctlr]->num_slots, slot); |
568 | if (slot == num_slots) | 697 | if (slot == edma_info[ctlr]->num_slots) |
569 | return -ENOMEM; | 698 | return -ENOMEM; |
570 | if (!test_and_set_bit(slot, edma_inuse)) | 699 | if (!test_and_set_bit(slot, |
700 | edma_info[ctlr]->edma_inuse)) | ||
571 | break; | 701 | break; |
572 | } | 702 | } |
573 | } else if (slot < num_channels || slot >= num_slots) { | 703 | } else if (slot < edma_info[ctlr]->num_channels || |
704 | slot >= edma_info[ctlr]->num_slots) { | ||
574 | return -EINVAL; | 705 | return -EINVAL; |
575 | } else if (test_and_set_bit(slot, edma_inuse)) { | 706 | } else if (test_and_set_bit(slot, edma_info[ctlr]->edma_inuse)) { |
576 | return -EBUSY; | 707 | return -EBUSY; |
577 | } | 708 | } |
578 | 709 | ||
579 | memcpy_toio(edmacc_regs_base + PARM_OFFSET(slot), | 710 | memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(slot), |
580 | &dummy_paramset, PARM_SIZE); | 711 | &dummy_paramset, PARM_SIZE); |
581 | 712 | ||
582 | return slot; | 713 | return EDMA_CTLR_CHAN(ctlr, slot); |
583 | } | 714 | } |
584 | EXPORT_SYMBOL(edma_alloc_slot); | 715 | EXPORT_SYMBOL(edma_alloc_slot); |
585 | 716 | ||
@@ -593,15 +724,119 @@ EXPORT_SYMBOL(edma_alloc_slot); | |||
593 | */ | 724 | */ |
594 | void edma_free_slot(unsigned slot) | 725 | void edma_free_slot(unsigned slot) |
595 | { | 726 | { |
596 | if (slot < num_channels || slot >= num_slots) | 727 | unsigned ctlr; |
728 | |||
729 | ctlr = EDMA_CTLR(slot); | ||
730 | slot = EDMA_CHAN_SLOT(slot); | ||
731 | |||
732 | if (slot < edma_info[ctlr]->num_channels || | ||
733 | slot >= edma_info[ctlr]->num_slots) | ||
597 | return; | 734 | return; |
598 | 735 | ||
599 | memcpy_toio(edmacc_regs_base + PARM_OFFSET(slot), | 736 | memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(slot), |
600 | &dummy_paramset, PARM_SIZE); | 737 | &dummy_paramset, PARM_SIZE); |
601 | clear_bit(slot, edma_inuse); | 738 | clear_bit(slot, edma_info[ctlr]->edma_inuse); |
602 | } | 739 | } |
603 | EXPORT_SYMBOL(edma_free_slot); | 740 | EXPORT_SYMBOL(edma_free_slot); |
604 | 741 | ||
742 | |||
743 | /** | ||
744 | * edma_alloc_cont_slots- alloc contiguous parameter RAM slots | ||
745 | * The API will return the starting point of a set of | ||
746 | * contiguous PARAM's that have been requested | ||
747 | * | ||
748 | * @id: can only be EDMA_CONT_PARAMS_ANY or EDMA_CONT_PARAMS_FIXED_EXACT | ||
749 | * or EDMA_CONT_PARAMS_FIXED_NOT_EXACT | ||
750 | * @count: number of contiguous Paramter RAM's | ||
751 | * @param - the start value of Parameter RAM that should be passed if id | ||
752 | * is EDMA_CONT_PARAMS_FIXED_EXACT or EDMA_CONT_PARAMS_FIXED_NOT_EXACT | ||
753 | * | ||
754 | * If id is EDMA_CONT_PARAMS_ANY then the API starts looking for a set of | ||
755 | * contiguous Parameter RAMs from parameter RAM 64 in the case of DaVinci SOCs | ||
756 | * and 32 in the case of Primus | ||
757 | * | ||
758 | * If id is EDMA_CONT_PARAMS_FIXED_EXACT then the API starts looking for a | ||
759 | * set of contiguous parameter RAMs from the "param" that is passed as an | ||
760 | * argument to the API. | ||
761 | * | ||
762 | * If id is EDMA_CONT_PARAMS_FIXED_NOT_EXACT then the API initially tries | ||
763 | * starts looking for a set of contiguous parameter RAMs from the "param" | ||
764 | * that is passed as an argument to the API. On failure the API will try to | ||
765 | * find a set of contiguous Parameter RAMs in the remaining Parameter RAMs | ||
766 | */ | ||
767 | int edma_alloc_cont_slots(unsigned ctlr, unsigned int id, int slot, int count) | ||
768 | { | ||
769 | /* | ||
770 | * The start slot requested should be greater than | ||
771 | * the number of channels and lesser than the total number | ||
772 | * of slots | ||
773 | */ | ||
774 | if (slot < edma_info[ctlr]->num_channels || | ||
775 | slot >= edma_info[ctlr]->num_slots) | ||
776 | return -EINVAL; | ||
777 | |||
778 | /* | ||
779 | * The number of parameter RAMs requested cannot be less than 1 | ||
780 | * and cannot be more than the number of slots minus the number of | ||
781 | * channels | ||
782 | */ | ||
783 | if (count < 1 || count > | ||
784 | (edma_info[ctlr]->num_slots - edma_info[ctlr]->num_channels)) | ||
785 | return -EINVAL; | ||
786 | |||
787 | switch (id) { | ||
788 | case EDMA_CONT_PARAMS_ANY: | ||
789 | return reserve_contiguous_params(ctlr, id, count, | ||
790 | edma_info[ctlr]->num_channels); | ||
791 | case EDMA_CONT_PARAMS_FIXED_EXACT: | ||
792 | case EDMA_CONT_PARAMS_FIXED_NOT_EXACT: | ||
793 | return reserve_contiguous_params(ctlr, id, count, slot); | ||
794 | default: | ||
795 | return -EINVAL; | ||
796 | } | ||
797 | |||
798 | } | ||
799 | EXPORT_SYMBOL(edma_alloc_cont_slots); | ||
800 | |||
801 | /** | ||
802 | * edma_free_cont_slots - deallocate DMA parameter RAMs | ||
803 | * @slot: first parameter RAM of a set of parameter RAMs to be freed | ||
804 | * @count: the number of contiguous parameter RAMs to be freed | ||
805 | * | ||
806 | * This deallocates the parameter RAM slots allocated by | ||
807 | * edma_alloc_cont_slots. | ||
808 | * Callers/applications need to keep track of sets of contiguous | ||
809 | * parameter RAMs that have been allocated using the edma_alloc_cont_slots | ||
810 | * API. | ||
811 | * Callers are responsible for ensuring the slots are inactive, and will | ||
812 | * not be activated. | ||
813 | */ | ||
814 | int edma_free_cont_slots(unsigned slot, int count) | ||
815 | { | ||
816 | unsigned ctlr; | ||
817 | int i; | ||
818 | |||
819 | ctlr = EDMA_CTLR(slot); | ||
820 | slot = EDMA_CHAN_SLOT(slot); | ||
821 | |||
822 | if (slot < edma_info[ctlr]->num_channels || | ||
823 | slot >= edma_info[ctlr]->num_slots || | ||
824 | count < 1) | ||
825 | return -EINVAL; | ||
826 | |||
827 | for (i = slot; i < slot + count; ++i) { | ||
828 | ctlr = EDMA_CTLR(i); | ||
829 | slot = EDMA_CHAN_SLOT(i); | ||
830 | |||
831 | memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(slot), | ||
832 | &dummy_paramset, PARM_SIZE); | ||
833 | clear_bit(slot, edma_info[ctlr]->edma_inuse); | ||
834 | } | ||
835 | |||
836 | return 0; | ||
837 | } | ||
838 | EXPORT_SYMBOL(edma_free_cont_slots); | ||
839 | |||
605 | /*-----------------------------------------------------------------------*/ | 840 | /*-----------------------------------------------------------------------*/ |
606 | 841 | ||
607 | /* Parameter RAM operations (i) -- read/write partial slots */ | 842 | /* Parameter RAM operations (i) -- read/write partial slots */ |
@@ -620,8 +855,13 @@ EXPORT_SYMBOL(edma_free_slot); | |||
620 | void edma_set_src(unsigned slot, dma_addr_t src_port, | 855 | void edma_set_src(unsigned slot, dma_addr_t src_port, |
621 | enum address_mode mode, enum fifo_width width) | 856 | enum address_mode mode, enum fifo_width width) |
622 | { | 857 | { |
623 | if (slot < num_slots) { | 858 | unsigned ctlr; |
624 | unsigned int i = edma_parm_read(PARM_OPT, slot); | 859 | |
860 | ctlr = EDMA_CTLR(slot); | ||
861 | slot = EDMA_CHAN_SLOT(slot); | ||
862 | |||
863 | if (slot < edma_info[ctlr]->num_slots) { | ||
864 | unsigned int i = edma_parm_read(ctlr, PARM_OPT, slot); | ||
625 | 865 | ||
626 | if (mode) { | 866 | if (mode) { |
627 | /* set SAM and program FWID */ | 867 | /* set SAM and program FWID */ |
@@ -630,11 +870,11 @@ void edma_set_src(unsigned slot, dma_addr_t src_port, | |||
630 | /* clear SAM */ | 870 | /* clear SAM */ |
631 | i &= ~SAM; | 871 | i &= ~SAM; |
632 | } | 872 | } |
633 | edma_parm_write(PARM_OPT, slot, i); | 873 | edma_parm_write(ctlr, PARM_OPT, slot, i); |
634 | 874 | ||
635 | /* set the source port address | 875 | /* set the source port address |
636 | in source register of param structure */ | 876 | in source register of param structure */ |
637 | edma_parm_write(PARM_SRC, slot, src_port); | 877 | edma_parm_write(ctlr, PARM_SRC, slot, src_port); |
638 | } | 878 | } |
639 | } | 879 | } |
640 | EXPORT_SYMBOL(edma_set_src); | 880 | EXPORT_SYMBOL(edma_set_src); |
@@ -653,8 +893,13 @@ EXPORT_SYMBOL(edma_set_src); | |||
653 | void edma_set_dest(unsigned slot, dma_addr_t dest_port, | 893 | void edma_set_dest(unsigned slot, dma_addr_t dest_port, |
654 | enum address_mode mode, enum fifo_width width) | 894 | enum address_mode mode, enum fifo_width width) |
655 | { | 895 | { |
656 | if (slot < num_slots) { | 896 | unsigned ctlr; |
657 | unsigned int i = edma_parm_read(PARM_OPT, slot); | 897 | |
898 | ctlr = EDMA_CTLR(slot); | ||
899 | slot = EDMA_CHAN_SLOT(slot); | ||
900 | |||
901 | if (slot < edma_info[ctlr]->num_slots) { | ||
902 | unsigned int i = edma_parm_read(ctlr, PARM_OPT, slot); | ||
658 | 903 | ||
659 | if (mode) { | 904 | if (mode) { |
660 | /* set DAM and program FWID */ | 905 | /* set DAM and program FWID */ |
@@ -663,10 +908,10 @@ void edma_set_dest(unsigned slot, dma_addr_t dest_port, | |||
663 | /* clear DAM */ | 908 | /* clear DAM */ |
664 | i &= ~DAM; | 909 | i &= ~DAM; |
665 | } | 910 | } |
666 | edma_parm_write(PARM_OPT, slot, i); | 911 | edma_parm_write(ctlr, PARM_OPT, slot, i); |
667 | /* set the destination port address | 912 | /* set the destination port address |
668 | in dest register of param structure */ | 913 | in dest register of param structure */ |
669 | edma_parm_write(PARM_DST, slot, dest_port); | 914 | edma_parm_write(ctlr, PARM_DST, slot, dest_port); |
670 | } | 915 | } |
671 | } | 916 | } |
672 | EXPORT_SYMBOL(edma_set_dest); | 917 | EXPORT_SYMBOL(edma_set_dest); |
@@ -683,8 +928,12 @@ EXPORT_SYMBOL(edma_set_dest); | |||
683 | void edma_get_position(unsigned slot, dma_addr_t *src, dma_addr_t *dst) | 928 | void edma_get_position(unsigned slot, dma_addr_t *src, dma_addr_t *dst) |
684 | { | 929 | { |
685 | struct edmacc_param temp; | 930 | struct edmacc_param temp; |
931 | unsigned ctlr; | ||
932 | |||
933 | ctlr = EDMA_CTLR(slot); | ||
934 | slot = EDMA_CHAN_SLOT(slot); | ||
686 | 935 | ||
687 | edma_read_slot(slot, &temp); | 936 | edma_read_slot(EDMA_CTLR_CHAN(ctlr, slot), &temp); |
688 | if (src != NULL) | 937 | if (src != NULL) |
689 | *src = temp.src; | 938 | *src = temp.src; |
690 | if (dst != NULL) | 939 | if (dst != NULL) |
@@ -704,10 +953,15 @@ EXPORT_SYMBOL(edma_get_position); | |||
704 | */ | 953 | */ |
705 | void edma_set_src_index(unsigned slot, s16 src_bidx, s16 src_cidx) | 954 | void edma_set_src_index(unsigned slot, s16 src_bidx, s16 src_cidx) |
706 | { | 955 | { |
707 | if (slot < num_slots) { | 956 | unsigned ctlr; |
708 | edma_parm_modify(PARM_SRC_DST_BIDX, slot, | 957 | |
958 | ctlr = EDMA_CTLR(slot); | ||
959 | slot = EDMA_CHAN_SLOT(slot); | ||
960 | |||
961 | if (slot < edma_info[ctlr]->num_slots) { | ||
962 | edma_parm_modify(ctlr, PARM_SRC_DST_BIDX, slot, | ||
709 | 0xffff0000, src_bidx); | 963 | 0xffff0000, src_bidx); |
710 | edma_parm_modify(PARM_SRC_DST_CIDX, slot, | 964 | edma_parm_modify(ctlr, PARM_SRC_DST_CIDX, slot, |
711 | 0xffff0000, src_cidx); | 965 | 0xffff0000, src_cidx); |
712 | } | 966 | } |
713 | } | 967 | } |
@@ -725,10 +979,15 @@ EXPORT_SYMBOL(edma_set_src_index); | |||
725 | */ | 979 | */ |
726 | void edma_set_dest_index(unsigned slot, s16 dest_bidx, s16 dest_cidx) | 980 | void edma_set_dest_index(unsigned slot, s16 dest_bidx, s16 dest_cidx) |
727 | { | 981 | { |
728 | if (slot < num_slots) { | 982 | unsigned ctlr; |
729 | edma_parm_modify(PARM_SRC_DST_BIDX, slot, | 983 | |
984 | ctlr = EDMA_CTLR(slot); | ||
985 | slot = EDMA_CHAN_SLOT(slot); | ||
986 | |||
987 | if (slot < edma_info[ctlr]->num_slots) { | ||
988 | edma_parm_modify(ctlr, PARM_SRC_DST_BIDX, slot, | ||
730 | 0x0000ffff, dest_bidx << 16); | 989 | 0x0000ffff, dest_bidx << 16); |
731 | edma_parm_modify(PARM_SRC_DST_CIDX, slot, | 990 | edma_parm_modify(ctlr, PARM_SRC_DST_CIDX, slot, |
732 | 0x0000ffff, dest_cidx << 16); | 991 | 0x0000ffff, dest_cidx << 16); |
733 | } | 992 | } |
734 | } | 993 | } |
@@ -767,16 +1026,21 @@ void edma_set_transfer_params(unsigned slot, | |||
767 | u16 acnt, u16 bcnt, u16 ccnt, | 1026 | u16 acnt, u16 bcnt, u16 ccnt, |
768 | u16 bcnt_rld, enum sync_dimension sync_mode) | 1027 | u16 bcnt_rld, enum sync_dimension sync_mode) |
769 | { | 1028 | { |
770 | if (slot < num_slots) { | 1029 | unsigned ctlr; |
771 | edma_parm_modify(PARM_LINK_BCNTRLD, slot, | 1030 | |
1031 | ctlr = EDMA_CTLR(slot); | ||
1032 | slot = EDMA_CHAN_SLOT(slot); | ||
1033 | |||
1034 | if (slot < edma_info[ctlr]->num_slots) { | ||
1035 | edma_parm_modify(ctlr, PARM_LINK_BCNTRLD, slot, | ||
772 | 0x0000ffff, bcnt_rld << 16); | 1036 | 0x0000ffff, bcnt_rld << 16); |
773 | if (sync_mode == ASYNC) | 1037 | if (sync_mode == ASYNC) |
774 | edma_parm_and(PARM_OPT, slot, ~SYNCDIM); | 1038 | edma_parm_and(ctlr, PARM_OPT, slot, ~SYNCDIM); |
775 | else | 1039 | else |
776 | edma_parm_or(PARM_OPT, slot, SYNCDIM); | 1040 | edma_parm_or(ctlr, PARM_OPT, slot, SYNCDIM); |
777 | /* Set the acount, bcount, ccount registers */ | 1041 | /* Set the acount, bcount, ccount registers */ |
778 | edma_parm_write(PARM_A_B_CNT, slot, (bcnt << 16) | acnt); | 1042 | edma_parm_write(ctlr, PARM_A_B_CNT, slot, (bcnt << 16) | acnt); |
779 | edma_parm_write(PARM_CCNT, slot, ccnt); | 1043 | edma_parm_write(ctlr, PARM_CCNT, slot, ccnt); |
780 | } | 1044 | } |
781 | } | 1045 | } |
782 | EXPORT_SYMBOL(edma_set_transfer_params); | 1046 | EXPORT_SYMBOL(edma_set_transfer_params); |
@@ -790,11 +1054,19 @@ EXPORT_SYMBOL(edma_set_transfer_params); | |||
790 | */ | 1054 | */ |
791 | void edma_link(unsigned from, unsigned to) | 1055 | void edma_link(unsigned from, unsigned to) |
792 | { | 1056 | { |
793 | if (from >= num_slots) | 1057 | unsigned ctlr_from, ctlr_to; |
1058 | |||
1059 | ctlr_from = EDMA_CTLR(from); | ||
1060 | from = EDMA_CHAN_SLOT(from); | ||
1061 | ctlr_to = EDMA_CTLR(to); | ||
1062 | to = EDMA_CHAN_SLOT(to); | ||
1063 | |||
1064 | if (from >= edma_info[ctlr_from]->num_slots) | ||
794 | return; | 1065 | return; |
795 | if (to >= num_slots) | 1066 | if (to >= edma_info[ctlr_to]->num_slots) |
796 | return; | 1067 | return; |
797 | edma_parm_modify(PARM_LINK_BCNTRLD, from, 0xffff0000, PARM_OFFSET(to)); | 1068 | edma_parm_modify(ctlr_from, PARM_LINK_BCNTRLD, from, 0xffff0000, |
1069 | PARM_OFFSET(to)); | ||
798 | } | 1070 | } |
799 | EXPORT_SYMBOL(edma_link); | 1071 | EXPORT_SYMBOL(edma_link); |
800 | 1072 | ||
@@ -807,9 +1079,14 @@ EXPORT_SYMBOL(edma_link); | |||
807 | */ | 1079 | */ |
808 | void edma_unlink(unsigned from) | 1080 | void edma_unlink(unsigned from) |
809 | { | 1081 | { |
810 | if (from >= num_slots) | 1082 | unsigned ctlr; |
1083 | |||
1084 | ctlr = EDMA_CTLR(from); | ||
1085 | from = EDMA_CHAN_SLOT(from); | ||
1086 | |||
1087 | if (from >= edma_info[ctlr]->num_slots) | ||
811 | return; | 1088 | return; |
812 | edma_parm_or(PARM_LINK_BCNTRLD, from, 0xffff); | 1089 | edma_parm_or(ctlr, PARM_LINK_BCNTRLD, from, 0xffff); |
813 | } | 1090 | } |
814 | EXPORT_SYMBOL(edma_unlink); | 1091 | EXPORT_SYMBOL(edma_unlink); |
815 | 1092 | ||
@@ -829,9 +1106,15 @@ EXPORT_SYMBOL(edma_unlink); | |||
829 | */ | 1106 | */ |
830 | void edma_write_slot(unsigned slot, const struct edmacc_param *param) | 1107 | void edma_write_slot(unsigned slot, const struct edmacc_param *param) |
831 | { | 1108 | { |
832 | if (slot >= num_slots) | 1109 | unsigned ctlr; |
1110 | |||
1111 | ctlr = EDMA_CTLR(slot); | ||
1112 | slot = EDMA_CHAN_SLOT(slot); | ||
1113 | |||
1114 | if (slot >= edma_info[ctlr]->num_slots) | ||
833 | return; | 1115 | return; |
834 | memcpy_toio(edmacc_regs_base + PARM_OFFSET(slot), param, PARM_SIZE); | 1116 | memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(slot), param, |
1117 | PARM_SIZE); | ||
835 | } | 1118 | } |
836 | EXPORT_SYMBOL(edma_write_slot); | 1119 | EXPORT_SYMBOL(edma_write_slot); |
837 | 1120 | ||
@@ -845,9 +1128,15 @@ EXPORT_SYMBOL(edma_write_slot); | |||
845 | */ | 1128 | */ |
846 | void edma_read_slot(unsigned slot, struct edmacc_param *param) | 1129 | void edma_read_slot(unsigned slot, struct edmacc_param *param) |
847 | { | 1130 | { |
848 | if (slot >= num_slots) | 1131 | unsigned ctlr; |
1132 | |||
1133 | ctlr = EDMA_CTLR(slot); | ||
1134 | slot = EDMA_CHAN_SLOT(slot); | ||
1135 | |||
1136 | if (slot >= edma_info[ctlr]->num_slots) | ||
849 | return; | 1137 | return; |
850 | memcpy_fromio(param, edmacc_regs_base + PARM_OFFSET(slot), PARM_SIZE); | 1138 | memcpy_fromio(param, edmacc_regs_base[ctlr] + PARM_OFFSET(slot), |
1139 | PARM_SIZE); | ||
851 | } | 1140 | } |
852 | EXPORT_SYMBOL(edma_read_slot); | 1141 | EXPORT_SYMBOL(edma_read_slot); |
853 | 1142 | ||
@@ -864,10 +1153,15 @@ EXPORT_SYMBOL(edma_read_slot); | |||
864 | */ | 1153 | */ |
865 | void edma_pause(unsigned channel) | 1154 | void edma_pause(unsigned channel) |
866 | { | 1155 | { |
867 | if (channel < num_channels) { | 1156 | unsigned ctlr; |
1157 | |||
1158 | ctlr = EDMA_CTLR(channel); | ||
1159 | channel = EDMA_CHAN_SLOT(channel); | ||
1160 | |||
1161 | if (channel < edma_info[ctlr]->num_channels) { | ||
868 | unsigned int mask = (1 << (channel & 0x1f)); | 1162 | unsigned int mask = (1 << (channel & 0x1f)); |
869 | 1163 | ||
870 | edma_shadow0_write_array(SH_EECR, channel >> 5, mask); | 1164 | edma_shadow0_write_array(ctlr, SH_EECR, channel >> 5, mask); |
871 | } | 1165 | } |
872 | } | 1166 | } |
873 | EXPORT_SYMBOL(edma_pause); | 1167 | EXPORT_SYMBOL(edma_pause); |
@@ -880,10 +1174,15 @@ EXPORT_SYMBOL(edma_pause); | |||
880 | */ | 1174 | */ |
881 | void edma_resume(unsigned channel) | 1175 | void edma_resume(unsigned channel) |
882 | { | 1176 | { |
883 | if (channel < num_channels) { | 1177 | unsigned ctlr; |
1178 | |||
1179 | ctlr = EDMA_CTLR(channel); | ||
1180 | channel = EDMA_CHAN_SLOT(channel); | ||
1181 | |||
1182 | if (channel < edma_info[ctlr]->num_channels) { | ||
884 | unsigned int mask = (1 << (channel & 0x1f)); | 1183 | unsigned int mask = (1 << (channel & 0x1f)); |
885 | 1184 | ||
886 | edma_shadow0_write_array(SH_EESR, channel >> 5, mask); | 1185 | edma_shadow0_write_array(ctlr, SH_EESR, channel >> 5, mask); |
887 | } | 1186 | } |
888 | } | 1187 | } |
889 | EXPORT_SYMBOL(edma_resume); | 1188 | EXPORT_SYMBOL(edma_resume); |
@@ -901,28 +1200,33 @@ EXPORT_SYMBOL(edma_resume); | |||
901 | */ | 1200 | */ |
902 | int edma_start(unsigned channel) | 1201 | int edma_start(unsigned channel) |
903 | { | 1202 | { |
904 | if (channel < num_channels) { | 1203 | unsigned ctlr; |
1204 | |||
1205 | ctlr = EDMA_CTLR(channel); | ||
1206 | channel = EDMA_CHAN_SLOT(channel); | ||
1207 | |||
1208 | if (channel < edma_info[ctlr]->num_channels) { | ||
905 | int j = channel >> 5; | 1209 | int j = channel >> 5; |
906 | unsigned int mask = (1 << (channel & 0x1f)); | 1210 | unsigned int mask = (1 << (channel & 0x1f)); |
907 | 1211 | ||
908 | /* EDMA channels without event association */ | 1212 | /* EDMA channels without event association */ |
909 | if (test_bit(channel, edma_noevent)) { | 1213 | if (test_bit(channel, edma_info[ctlr]->edma_noevent)) { |
910 | pr_debug("EDMA: ESR%d %08x\n", j, | 1214 | pr_debug("EDMA: ESR%d %08x\n", j, |
911 | edma_shadow0_read_array(SH_ESR, j)); | 1215 | edma_shadow0_read_array(ctlr, SH_ESR, j)); |
912 | edma_shadow0_write_array(SH_ESR, j, mask); | 1216 | edma_shadow0_write_array(ctlr, SH_ESR, j, mask); |
913 | return 0; | 1217 | return 0; |
914 | } | 1218 | } |
915 | 1219 | ||
916 | /* EDMA channel with event association */ | 1220 | /* EDMA channel with event association */ |
917 | pr_debug("EDMA: ER%d %08x\n", j, | 1221 | pr_debug("EDMA: ER%d %08x\n", j, |
918 | edma_shadow0_read_array(SH_ER, j)); | 1222 | edma_shadow0_read_array(ctlr, SH_ER, j)); |
919 | /* Clear any pending error */ | 1223 | /* Clear any pending error */ |
920 | edma_write_array(EDMA_EMCR, j, mask); | 1224 | edma_write_array(ctlr, EDMA_EMCR, j, mask); |
921 | /* Clear any SER */ | 1225 | /* Clear any SER */ |
922 | edma_shadow0_write_array(SH_SECR, j, mask); | 1226 | edma_shadow0_write_array(ctlr, SH_SECR, j, mask); |
923 | edma_shadow0_write_array(SH_EESR, j, mask); | 1227 | edma_shadow0_write_array(ctlr, SH_EESR, j, mask); |
924 | pr_debug("EDMA: EER%d %08x\n", j, | 1228 | pr_debug("EDMA: EER%d %08x\n", j, |
925 | edma_shadow0_read_array(SH_EER, j)); | 1229 | edma_shadow0_read_array(ctlr, SH_EER, j)); |
926 | return 0; | 1230 | return 0; |
927 | } | 1231 | } |
928 | 1232 | ||
@@ -941,17 +1245,22 @@ EXPORT_SYMBOL(edma_start); | |||
941 | */ | 1245 | */ |
942 | void edma_stop(unsigned channel) | 1246 | void edma_stop(unsigned channel) |
943 | { | 1247 | { |
944 | if (channel < num_channels) { | 1248 | unsigned ctlr; |
1249 | |||
1250 | ctlr = EDMA_CTLR(channel); | ||
1251 | channel = EDMA_CHAN_SLOT(channel); | ||
1252 | |||
1253 | if (channel < edma_info[ctlr]->num_channels) { | ||
945 | int j = channel >> 5; | 1254 | int j = channel >> 5; |
946 | unsigned int mask = (1 << (channel & 0x1f)); | 1255 | unsigned int mask = (1 << (channel & 0x1f)); |
947 | 1256 | ||
948 | edma_shadow0_write_array(SH_EECR, j, mask); | 1257 | edma_shadow0_write_array(ctlr, SH_EECR, j, mask); |
949 | edma_shadow0_write_array(SH_ECR, j, mask); | 1258 | edma_shadow0_write_array(ctlr, SH_ECR, j, mask); |
950 | edma_shadow0_write_array(SH_SECR, j, mask); | 1259 | edma_shadow0_write_array(ctlr, SH_SECR, j, mask); |
951 | edma_write_array(EDMA_EMCR, j, mask); | 1260 | edma_write_array(ctlr, EDMA_EMCR, j, mask); |
952 | 1261 | ||
953 | pr_debug("EDMA: EER%d %08x\n", j, | 1262 | pr_debug("EDMA: EER%d %08x\n", j, |
954 | edma_shadow0_read_array(SH_EER, j)); | 1263 | edma_shadow0_read_array(ctlr, SH_EER, j)); |
955 | 1264 | ||
956 | /* REVISIT: consider guarding against inappropriate event | 1265 | /* REVISIT: consider guarding against inappropriate event |
957 | * chaining by overwriting with dummy_paramset. | 1266 | * chaining by overwriting with dummy_paramset. |
@@ -975,18 +1284,23 @@ EXPORT_SYMBOL(edma_stop); | |||
975 | 1284 | ||
976 | void edma_clean_channel(unsigned channel) | 1285 | void edma_clean_channel(unsigned channel) |
977 | { | 1286 | { |
978 | if (channel < num_channels) { | 1287 | unsigned ctlr; |
1288 | |||
1289 | ctlr = EDMA_CTLR(channel); | ||
1290 | channel = EDMA_CHAN_SLOT(channel); | ||
1291 | |||
1292 | if (channel < edma_info[ctlr]->num_channels) { | ||
979 | int j = (channel >> 5); | 1293 | int j = (channel >> 5); |
980 | unsigned int mask = 1 << (channel & 0x1f); | 1294 | unsigned int mask = 1 << (channel & 0x1f); |
981 | 1295 | ||
982 | pr_debug("EDMA: EMR%d %08x\n", j, | 1296 | pr_debug("EDMA: EMR%d %08x\n", j, |
983 | edma_read_array(EDMA_EMR, j)); | 1297 | edma_read_array(ctlr, EDMA_EMR, j)); |
984 | edma_shadow0_write_array(SH_ECR, j, mask); | 1298 | edma_shadow0_write_array(ctlr, SH_ECR, j, mask); |
985 | /* Clear the corresponding EMR bits */ | 1299 | /* Clear the corresponding EMR bits */ |
986 | edma_write_array(EDMA_EMCR, j, mask); | 1300 | edma_write_array(ctlr, EDMA_EMCR, j, mask); |
987 | /* Clear any SER */ | 1301 | /* Clear any SER */ |
988 | edma_shadow0_write_array(SH_SECR, j, mask); | 1302 | edma_shadow0_write_array(ctlr, SH_SECR, j, mask); |
989 | edma_write(EDMA_CCERRCLR, (1 << 16) | 0x3); | 1303 | edma_write(ctlr, EDMA_CCERRCLR, (1 << 16) | 0x3); |
990 | } | 1304 | } |
991 | } | 1305 | } |
992 | EXPORT_SYMBOL(edma_clean_channel); | 1306 | EXPORT_SYMBOL(edma_clean_channel); |
@@ -998,12 +1312,17 @@ EXPORT_SYMBOL(edma_clean_channel); | |||
998 | */ | 1312 | */ |
999 | void edma_clear_event(unsigned channel) | 1313 | void edma_clear_event(unsigned channel) |
1000 | { | 1314 | { |
1001 | if (channel >= num_channels) | 1315 | unsigned ctlr; |
1316 | |||
1317 | ctlr = EDMA_CTLR(channel); | ||
1318 | channel = EDMA_CHAN_SLOT(channel); | ||
1319 | |||
1320 | if (channel >= edma_info[ctlr]->num_channels) | ||
1002 | return; | 1321 | return; |
1003 | if (channel < 32) | 1322 | if (channel < 32) |
1004 | edma_write(EDMA_ECR, 1 << channel); | 1323 | edma_write(ctlr, EDMA_ECR, 1 << channel); |
1005 | else | 1324 | else |
1006 | edma_write(EDMA_ECRH, 1 << (channel - 32)); | 1325 | edma_write(ctlr, EDMA_ECRH, 1 << (channel - 32)); |
1007 | } | 1326 | } |
1008 | EXPORT_SYMBOL(edma_clear_event); | 1327 | EXPORT_SYMBOL(edma_clear_event); |
1009 | 1328 | ||
@@ -1012,62 +1331,133 @@ EXPORT_SYMBOL(edma_clear_event); | |||
1012 | static int __init edma_probe(struct platform_device *pdev) | 1331 | static int __init edma_probe(struct platform_device *pdev) |
1013 | { | 1332 | { |
1014 | struct edma_soc_info *info = pdev->dev.platform_data; | 1333 | struct edma_soc_info *info = pdev->dev.platform_data; |
1015 | int i; | 1334 | const s8 (*queue_priority_mapping)[2]; |
1016 | int status; | 1335 | const s8 (*queue_tc_mapping)[2]; |
1336 | int i, j, found = 0; | ||
1337 | int status = -1; | ||
1017 | const s8 *noevent; | 1338 | const s8 *noevent; |
1018 | int irq = 0, err_irq = 0; | 1339 | int irq[EDMA_MAX_CC] = {0, 0}; |
1019 | struct resource *r; | 1340 | int err_irq[EDMA_MAX_CC] = {0, 0}; |
1020 | resource_size_t len; | 1341 | struct resource *r[EDMA_MAX_CC] = {NULL}; |
1342 | resource_size_t len[EDMA_MAX_CC]; | ||
1343 | char res_name[10]; | ||
1344 | char irq_name[10]; | ||
1021 | 1345 | ||
1022 | if (!info) | 1346 | if (!info) |
1023 | return -ENODEV; | 1347 | return -ENODEV; |
1024 | 1348 | ||
1025 | r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "edma_cc"); | 1349 | for (j = 0; j < EDMA_MAX_CC; j++) { |
1026 | if (!r) | 1350 | sprintf(res_name, "edma_cc%d", j); |
1027 | return -ENODEV; | 1351 | r[j] = platform_get_resource_byname(pdev, IORESOURCE_MEM, |
1352 | res_name); | ||
1353 | if (!r[j]) { | ||
1354 | if (found) | ||
1355 | break; | ||
1356 | else | ||
1357 | return -ENODEV; | ||
1358 | } else | ||
1359 | found = 1; | ||
1360 | |||
1361 | len[j] = resource_size(r[j]); | ||
1362 | |||
1363 | r[j] = request_mem_region(r[j]->start, len[j], | ||
1364 | dev_name(&pdev->dev)); | ||
1365 | if (!r[j]) { | ||
1366 | status = -EBUSY; | ||
1367 | goto fail1; | ||
1368 | } | ||
1028 | 1369 | ||
1029 | len = r->end - r->start + 1; | 1370 | edmacc_regs_base[j] = ioremap(r[j]->start, len[j]); |
1371 | if (!edmacc_regs_base[j]) { | ||
1372 | status = -EBUSY; | ||
1373 | goto fail1; | ||
1374 | } | ||
1030 | 1375 | ||
1031 | r = request_mem_region(r->start, len, r->name); | 1376 | edma_info[j] = kmalloc(sizeof(struct edma), GFP_KERNEL); |
1032 | if (!r) | 1377 | if (!edma_info[j]) { |
1033 | return -EBUSY; | 1378 | status = -ENOMEM; |
1379 | goto fail1; | ||
1380 | } | ||
1381 | memset(edma_info[j], 0, sizeof(struct edma)); | ||
1382 | |||
1383 | edma_info[j]->num_channels = min_t(unsigned, info[j].n_channel, | ||
1384 | EDMA_MAX_DMACH); | ||
1385 | edma_info[j]->num_slots = min_t(unsigned, info[j].n_slot, | ||
1386 | EDMA_MAX_PARAMENTRY); | ||
1387 | edma_info[j]->num_cc = min_t(unsigned, info[j].n_cc, | ||
1388 | EDMA_MAX_CC); | ||
1389 | |||
1390 | edma_info[j]->default_queue = info[j].default_queue; | ||
1391 | if (!edma_info[j]->default_queue) | ||
1392 | edma_info[j]->default_queue = EVENTQ_1; | ||
1393 | |||
1394 | dev_dbg(&pdev->dev, "DMA REG BASE ADDR=%p\n", | ||
1395 | edmacc_regs_base[j]); | ||
1396 | |||
1397 | for (i = 0; i < edma_info[j]->num_slots; i++) | ||
1398 | memcpy_toio(edmacc_regs_base[j] + PARM_OFFSET(i), | ||
1399 | &dummy_paramset, PARM_SIZE); | ||
1400 | |||
1401 | noevent = info[j].noevent; | ||
1402 | if (noevent) { | ||
1403 | while (*noevent != -1) | ||
1404 | set_bit(*noevent++, edma_info[j]->edma_noevent); | ||
1405 | } | ||
1034 | 1406 | ||
1035 | edmacc_regs_base = ioremap(r->start, len); | 1407 | sprintf(irq_name, "edma%d", j); |
1036 | if (!edmacc_regs_base) { | 1408 | irq[j] = platform_get_irq_byname(pdev, irq_name); |
1037 | status = -EBUSY; | 1409 | edma_info[j]->irq_res_start = irq[j]; |
1038 | goto fail1; | 1410 | status = request_irq(irq[j], dma_irq_handler, 0, "edma", |
1039 | } | 1411 | &pdev->dev); |
1412 | if (status < 0) { | ||
1413 | dev_dbg(&pdev->dev, "request_irq %d failed --> %d\n", | ||
1414 | irq[j], status); | ||
1415 | goto fail; | ||
1416 | } | ||
1040 | 1417 | ||
1041 | num_channels = min_t(unsigned, info->n_channel, EDMA_MAX_DMACH); | 1418 | sprintf(irq_name, "edma%d_err", j); |
1042 | num_slots = min_t(unsigned, info->n_slot, EDMA_MAX_PARAMENTRY); | 1419 | err_irq[j] = platform_get_irq_byname(pdev, irq_name); |
1420 | edma_info[j]->irq_res_end = err_irq[j]; | ||
1421 | status = request_irq(err_irq[j], dma_ccerr_handler, 0, | ||
1422 | "edma_error", &pdev->dev); | ||
1423 | if (status < 0) { | ||
1424 | dev_dbg(&pdev->dev, "request_irq %d failed --> %d\n", | ||
1425 | err_irq[j], status); | ||
1426 | goto fail; | ||
1427 | } | ||
1043 | 1428 | ||
1044 | dev_dbg(&pdev->dev, "DMA REG BASE ADDR=%p\n", edmacc_regs_base); | 1429 | /* Everything lives on transfer controller 1 until otherwise |
1430 | * specified. This way, long transfers on the low priority queue | ||
1431 | * started by the codec engine will not cause audio defects. | ||
1432 | */ | ||
1433 | for (i = 0; i < edma_info[j]->num_channels; i++) | ||
1434 | map_dmach_queue(j, i, EVENTQ_1); | ||
1045 | 1435 | ||
1046 | for (i = 0; i < num_slots; i++) | 1436 | queue_tc_mapping = info[j].queue_tc_mapping; |
1047 | memcpy_toio(edmacc_regs_base + PARM_OFFSET(i), | 1437 | queue_priority_mapping = info[j].queue_priority_mapping; |
1048 | &dummy_paramset, PARM_SIZE); | ||
1049 | 1438 | ||
1050 | noevent = info->noevent; | 1439 | /* Event queue to TC mapping */ |
1051 | if (noevent) { | 1440 | for (i = 0; queue_tc_mapping[i][0] != -1; i++) |
1052 | while (*noevent != -1) | 1441 | map_queue_tc(j, queue_tc_mapping[i][0], |
1053 | set_bit(*noevent++, edma_noevent); | 1442 | queue_tc_mapping[i][1]); |
1054 | } | ||
1055 | 1443 | ||
1056 | irq = platform_get_irq(pdev, 0); | 1444 | /* Event queue priority mapping */ |
1057 | status = request_irq(irq, dma_irq_handler, 0, "edma", &pdev->dev); | 1445 | for (i = 0; queue_priority_mapping[i][0] != -1; i++) |
1058 | if (status < 0) { | 1446 | assign_priority_to_queue(j, |
1059 | dev_dbg(&pdev->dev, "request_irq %d failed --> %d\n", | 1447 | queue_priority_mapping[i][0], |
1060 | irq, status); | 1448 | queue_priority_mapping[i][1]); |
1061 | goto fail; | 1449 | |
1062 | } | 1450 | /* Map the channel to param entry if channel mapping logic |
1451 | * exist | ||
1452 | */ | ||
1453 | if (edma_read(j, EDMA_CCCFG) & CHMAP_EXIST) | ||
1454 | map_dmach_param(j); | ||
1063 | 1455 | ||
1064 | err_irq = platform_get_irq(pdev, 1); | 1456 | for (i = 0; i < info[j].n_region; i++) { |
1065 | status = request_irq(err_irq, dma_ccerr_handler, 0, | 1457 | edma_write_array2(j, EDMA_DRAE, i, 0, 0x0); |
1066 | "edma_error", &pdev->dev); | 1458 | edma_write_array2(j, EDMA_DRAE, i, 1, 0x0); |
1067 | if (status < 0) { | 1459 | edma_write_array(j, EDMA_QRAE, i, 0x0); |
1068 | dev_dbg(&pdev->dev, "request_irq %d failed --> %d\n", | 1460 | } |
1069 | err_irq, status); | ||
1070 | goto fail; | ||
1071 | } | 1461 | } |
1072 | 1462 | ||
1073 | if (tc_errs_handled) { | 1463 | if (tc_errs_handled) { |
@@ -1087,38 +1477,23 @@ static int __init edma_probe(struct platform_device *pdev) | |||
1087 | } | 1477 | } |
1088 | } | 1478 | } |
1089 | 1479 | ||
1090 | /* Everything lives on transfer controller 1 until otherwise specified. | ||
1091 | * This way, long transfers on the low priority queue | ||
1092 | * started by the codec engine will not cause audio defects. | ||
1093 | */ | ||
1094 | for (i = 0; i < num_channels; i++) | ||
1095 | map_dmach_queue(i, EVENTQ_1); | ||
1096 | |||
1097 | /* Event queue to TC mapping */ | ||
1098 | for (i = 0; queue_tc_mapping[i][0] != -1; i++) | ||
1099 | map_queue_tc(queue_tc_mapping[i][0], queue_tc_mapping[i][1]); | ||
1100 | |||
1101 | /* Event queue priority mapping */ | ||
1102 | for (i = 0; queue_priority_mapping[i][0] != -1; i++) | ||
1103 | assign_priority_to_queue(queue_priority_mapping[i][0], | ||
1104 | queue_priority_mapping[i][1]); | ||
1105 | |||
1106 | for (i = 0; i < info->n_region; i++) { | ||
1107 | edma_write_array2(EDMA_DRAE, i, 0, 0x0); | ||
1108 | edma_write_array2(EDMA_DRAE, i, 1, 0x0); | ||
1109 | edma_write_array(EDMA_QRAE, i, 0x0); | ||
1110 | } | ||
1111 | |||
1112 | return 0; | 1480 | return 0; |
1113 | 1481 | ||
1114 | fail: | 1482 | fail: |
1115 | if (err_irq) | 1483 | for (i = 0; i < EDMA_MAX_CC; i++) { |
1116 | free_irq(err_irq, NULL); | 1484 | if (err_irq[i]) |
1117 | if (irq) | 1485 | free_irq(err_irq[i], &pdev->dev); |
1118 | free_irq(irq, NULL); | 1486 | if (irq[i]) |
1119 | iounmap(edmacc_regs_base); | 1487 | free_irq(irq[i], &pdev->dev); |
1488 | } | ||
1120 | fail1: | 1489 | fail1: |
1121 | release_mem_region(r->start, len); | 1490 | for (i = 0; i < EDMA_MAX_CC; i++) { |
1491 | if (r[i]) | ||
1492 | release_mem_region(r[i]->start, len[i]); | ||
1493 | if (edmacc_regs_base[i]) | ||
1494 | iounmap(edmacc_regs_base[i]); | ||
1495 | kfree(edma_info[i]); | ||
1496 | } | ||
1122 | return status; | 1497 | return status; |
1123 | } | 1498 | } |
1124 | 1499 | ||