diff options
author | Javier Martin <javier.martin@vista-silicon.com> | 2012-03-22 09:54:12 -0400 |
---|---|---|
committer | Vinod Koul <vinod.koul@linux.intel.com> | 2012-03-26 02:01:32 -0400 |
commit | cd5cf9da020293118800864641e09b71e23ba41c (patch) | |
tree | c26fe39e278a0df1b7ac74476be85f34596dceb1 /drivers/dma | |
parent | a6cbb2d87d20817e555a6ffa3131bfa1cdd9ab73 (diff) |
dmaengine: imx-dma: remove 'imx_dmav1_baseaddr' and 'dma_clk'.
These global variables are integrated into the dmaengine structure.
Signed-off-by: Javier Martin <javier.martin@vista-silicon.com>
Acked-by: Sascha Hauer <s.hauer@pengutronix.de>
Signed-off-by: Vinod Koul <vinod.koul@linux.intel.com>
Diffstat (limited to 'drivers/dma')
-rw-r--r-- | drivers/dma/imx-dma.c | 169 |
1 files changed, 93 insertions, 76 deletions
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c index 628b0f61ab38..cdca95a5666c 100644 --- a/drivers/dma/imx-dma.c +++ b/drivers/dma/imx-dma.c | |||
@@ -160,6 +160,8 @@ struct imxdma_engine { | |||
160 | struct device *dev; | 160 | struct device *dev; |
161 | struct device_dma_parameters dma_parms; | 161 | struct device_dma_parameters dma_parms; |
162 | struct dma_device dma_device; | 162 | struct dma_device dma_device; |
163 | void __iomem *base; | ||
164 | struct clk *dma_clk; | ||
163 | struct imxdma_channel channel[IMX_DMA_CHANNELS]; | 165 | struct imxdma_channel channel[IMX_DMA_CHANNELS]; |
164 | }; | 166 | }; |
165 | 167 | ||
@@ -181,18 +183,17 @@ static inline bool imxdma_chan_is_doing_cyclic(struct imxdma_channel *imxdmac) | |||
181 | return false; | 183 | return false; |
182 | } | 184 | } |
183 | 185 | ||
184 | /* TODO: put this inside any struct */ | ||
185 | static void __iomem *imx_dmav1_baseaddr; | ||
186 | static struct clk *dma_clk; | ||
187 | 186 | ||
188 | static void imx_dmav1_writel(unsigned val, unsigned offset) | 187 | |
188 | static void imx_dmav1_writel(struct imxdma_engine *imxdma, unsigned val, | ||
189 | unsigned offset) | ||
189 | { | 190 | { |
190 | __raw_writel(val, imx_dmav1_baseaddr + offset); | 191 | __raw_writel(val, imxdma->base + offset); |
191 | } | 192 | } |
192 | 193 | ||
193 | static unsigned imx_dmav1_readl(unsigned offset) | 194 | static unsigned imx_dmav1_readl(struct imxdma_engine *imxdma, unsigned offset) |
194 | { | 195 | { |
195 | return __raw_readl(imx_dmav1_baseaddr + offset); | 196 | return __raw_readl(imxdma->base + offset); |
196 | } | 197 | } |
197 | 198 | ||
198 | static int imxdma_hw_chain(struct imxdma_channel *imxdmac) | 199 | static int imxdma_hw_chain(struct imxdma_channel *imxdmac) |
@@ -209,6 +210,7 @@ static int imxdma_hw_chain(struct imxdma_channel *imxdmac) | |||
209 | static inline int imxdma_sg_next(struct imxdma_desc *d) | 210 | static inline int imxdma_sg_next(struct imxdma_desc *d) |
210 | { | 211 | { |
211 | struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan); | 212 | struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan); |
213 | struct imxdma_engine *imxdma = imxdmac->imxdma; | ||
212 | struct scatterlist *sg = d->sg; | 214 | struct scatterlist *sg = d->sg; |
213 | unsigned long now; | 215 | unsigned long now; |
214 | 216 | ||
@@ -217,17 +219,19 @@ static inline int imxdma_sg_next(struct imxdma_desc *d) | |||
217 | d->len -= now; | 219 | d->len -= now; |
218 | 220 | ||
219 | if (d->direction == DMA_DEV_TO_MEM) | 221 | if (d->direction == DMA_DEV_TO_MEM) |
220 | imx_dmav1_writel(sg->dma_address, DMA_DAR(imxdmac->channel)); | 222 | imx_dmav1_writel(imxdma, sg->dma_address, |
223 | DMA_DAR(imxdmac->channel)); | ||
221 | else | 224 | else |
222 | imx_dmav1_writel(sg->dma_address, DMA_SAR(imxdmac->channel)); | 225 | imx_dmav1_writel(imxdma, sg->dma_address, |
226 | DMA_SAR(imxdmac->channel)); | ||
223 | 227 | ||
224 | imx_dmav1_writel(now, DMA_CNTR(imxdmac->channel)); | 228 | imx_dmav1_writel(imxdma, now, DMA_CNTR(imxdmac->channel)); |
225 | 229 | ||
226 | pr_debug("imxdma%d: next sg chunk dst 0x%08x, src 0x%08x, " | 230 | pr_debug("imxdma%d: next sg chunk dst 0x%08x, src 0x%08x, " |
227 | "size 0x%08x\n", imxdmac->channel, | 231 | "size 0x%08x\n", imxdmac->channel, |
228 | imx_dmav1_readl(DMA_DAR(imxdmac->channel)), | 232 | imx_dmav1_readl(imxdma, DMA_DAR(imxdmac->channel)), |
229 | imx_dmav1_readl(DMA_SAR(imxdmac->channel)), | 233 | imx_dmav1_readl(imxdma, DMA_SAR(imxdmac->channel)), |
230 | imx_dmav1_readl(DMA_CNTR(imxdmac->channel))); | 234 | imx_dmav1_readl(imxdma, DMA_CNTR(imxdmac->channel))); |
231 | 235 | ||
232 | return now; | 236 | return now; |
233 | } | 237 | } |
@@ -235,6 +239,7 @@ static inline int imxdma_sg_next(struct imxdma_desc *d) | |||
235 | static void imxdma_enable_hw(struct imxdma_desc *d) | 239 | static void imxdma_enable_hw(struct imxdma_desc *d) |
236 | { | 240 | { |
237 | struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan); | 241 | struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan); |
242 | struct imxdma_engine *imxdma = imxdmac->imxdma; | ||
238 | int channel = imxdmac->channel; | 243 | int channel = imxdmac->channel; |
239 | unsigned long flags; | 244 | unsigned long flags; |
240 | 245 | ||
@@ -242,10 +247,11 @@ static void imxdma_enable_hw(struct imxdma_desc *d) | |||
242 | 247 | ||
243 | local_irq_save(flags); | 248 | local_irq_save(flags); |
244 | 249 | ||
245 | imx_dmav1_writel(1 << channel, DMA_DISR); | 250 | imx_dmav1_writel(imxdma, 1 << channel, DMA_DISR); |
246 | imx_dmav1_writel(imx_dmav1_readl(DMA_DIMR) & ~(1 << channel), DMA_DIMR); | 251 | imx_dmav1_writel(imxdma, imx_dmav1_readl(imxdma, DMA_DIMR) & |
247 | imx_dmav1_writel(imx_dmav1_readl(DMA_CCR(channel)) | CCR_CEN | | 252 | ~(1 << channel), DMA_DIMR); |
248 | CCR_ACRPT, DMA_CCR(channel)); | 253 | imx_dmav1_writel(imxdma, imx_dmav1_readl(imxdma, DMA_CCR(channel)) | |
254 | CCR_CEN | CCR_ACRPT, DMA_CCR(channel)); | ||
249 | 255 | ||
250 | if ((cpu_is_mx21() || cpu_is_mx27()) && | 256 | if ((cpu_is_mx21() || cpu_is_mx27()) && |
251 | d->sg && imxdma_hw_chain(imxdmac)) { | 257 | d->sg && imxdma_hw_chain(imxdmac)) { |
@@ -253,9 +259,9 @@ static void imxdma_enable_hw(struct imxdma_desc *d) | |||
253 | if (d->sg) { | 259 | if (d->sg) { |
254 | u32 tmp; | 260 | u32 tmp; |
255 | imxdma_sg_next(d); | 261 | imxdma_sg_next(d); |
256 | tmp = imx_dmav1_readl(DMA_CCR(channel)); | 262 | tmp = imx_dmav1_readl(imxdma, DMA_CCR(channel)); |
257 | imx_dmav1_writel(tmp | CCR_RPT | CCR_ACRPT, | 263 | imx_dmav1_writel(imxdma, tmp | CCR_RPT | CCR_ACRPT, |
258 | DMA_CCR(channel)); | 264 | DMA_CCR(channel)); |
259 | } | 265 | } |
260 | } | 266 | } |
261 | 267 | ||
@@ -264,6 +270,7 @@ static void imxdma_enable_hw(struct imxdma_desc *d) | |||
264 | 270 | ||
265 | static void imxdma_disable_hw(struct imxdma_channel *imxdmac) | 271 | static void imxdma_disable_hw(struct imxdma_channel *imxdmac) |
266 | { | 272 | { |
273 | struct imxdma_engine *imxdma = imxdmac->imxdma; | ||
267 | int channel = imxdmac->channel; | 274 | int channel = imxdmac->channel; |
268 | unsigned long flags; | 275 | unsigned long flags; |
269 | 276 | ||
@@ -273,19 +280,21 @@ static void imxdma_disable_hw(struct imxdma_channel *imxdmac) | |||
273 | del_timer(&imxdmac->watchdog); | 280 | del_timer(&imxdmac->watchdog); |
274 | 281 | ||
275 | local_irq_save(flags); | 282 | local_irq_save(flags); |
276 | imx_dmav1_writel(imx_dmav1_readl(DMA_DIMR) | (1 << channel), DMA_DIMR); | 283 | imx_dmav1_writel(imxdma, imx_dmav1_readl(imxdma, DMA_DIMR) | |
277 | imx_dmav1_writel(imx_dmav1_readl(DMA_CCR(channel)) & ~CCR_CEN, | 284 | (1 << channel), DMA_DIMR); |
278 | DMA_CCR(channel)); | 285 | imx_dmav1_writel(imxdma, imx_dmav1_readl(imxdma, DMA_CCR(channel)) & |
279 | imx_dmav1_writel(1 << channel, DMA_DISR); | 286 | ~CCR_CEN, DMA_CCR(channel)); |
287 | imx_dmav1_writel(imxdma, 1 << channel, DMA_DISR); | ||
280 | local_irq_restore(flags); | 288 | local_irq_restore(flags); |
281 | } | 289 | } |
282 | 290 | ||
283 | static void imxdma_watchdog(unsigned long data) | 291 | static void imxdma_watchdog(unsigned long data) |
284 | { | 292 | { |
285 | struct imxdma_channel *imxdmac = (struct imxdma_channel *)data; | 293 | struct imxdma_channel *imxdmac = (struct imxdma_channel *)data; |
294 | struct imxdma_engine *imxdma = imxdmac->imxdma; | ||
286 | int channel = imxdmac->channel; | 295 | int channel = imxdmac->channel; |
287 | 296 | ||
288 | imx_dmav1_writel(0, DMA_CCR(channel)); | 297 | imx_dmav1_writel(imxdma, 0, DMA_CCR(channel)); |
289 | 298 | ||
290 | /* Tasklet watchdog error handler */ | 299 | /* Tasklet watchdog error handler */ |
291 | tasklet_schedule(&imxdmac->dma_tasklet); | 300 | tasklet_schedule(&imxdmac->dma_tasklet); |
@@ -299,37 +308,37 @@ static irqreturn_t imxdma_err_handler(int irq, void *dev_id) | |||
299 | int i, disr; | 308 | int i, disr; |
300 | int errcode; | 309 | int errcode; |
301 | 310 | ||
302 | disr = imx_dmav1_readl(DMA_DISR); | 311 | disr = imx_dmav1_readl(imxdma, DMA_DISR); |
303 | 312 | ||
304 | err_mask = imx_dmav1_readl(DMA_DBTOSR) | | 313 | err_mask = imx_dmav1_readl(imxdma, DMA_DBTOSR) | |
305 | imx_dmav1_readl(DMA_DRTOSR) | | 314 | imx_dmav1_readl(imxdma, DMA_DRTOSR) | |
306 | imx_dmav1_readl(DMA_DSESR) | | 315 | imx_dmav1_readl(imxdma, DMA_DSESR) | |
307 | imx_dmav1_readl(DMA_DBOSR); | 316 | imx_dmav1_readl(imxdma, DMA_DBOSR); |
308 | 317 | ||
309 | if (!err_mask) | 318 | if (!err_mask) |
310 | return IRQ_HANDLED; | 319 | return IRQ_HANDLED; |
311 | 320 | ||
312 | imx_dmav1_writel(disr & err_mask, DMA_DISR); | 321 | imx_dmav1_writel(imxdma, disr & err_mask, DMA_DISR); |
313 | 322 | ||
314 | for (i = 0; i < IMX_DMA_CHANNELS; i++) { | 323 | for (i = 0; i < IMX_DMA_CHANNELS; i++) { |
315 | if (!(err_mask & (1 << i))) | 324 | if (!(err_mask & (1 << i))) |
316 | continue; | 325 | continue; |
317 | errcode = 0; | 326 | errcode = 0; |
318 | 327 | ||
319 | if (imx_dmav1_readl(DMA_DBTOSR) & (1 << i)) { | 328 | if (imx_dmav1_readl(imxdma, DMA_DBTOSR) & (1 << i)) { |
320 | imx_dmav1_writel(1 << i, DMA_DBTOSR); | 329 | imx_dmav1_writel(imxdma, 1 << i, DMA_DBTOSR); |
321 | errcode |= IMX_DMA_ERR_BURST; | 330 | errcode |= IMX_DMA_ERR_BURST; |
322 | } | 331 | } |
323 | if (imx_dmav1_readl(DMA_DRTOSR) & (1 << i)) { | 332 | if (imx_dmav1_readl(imxdma, DMA_DRTOSR) & (1 << i)) { |
324 | imx_dmav1_writel(1 << i, DMA_DRTOSR); | 333 | imx_dmav1_writel(imxdma, 1 << i, DMA_DRTOSR); |
325 | errcode |= IMX_DMA_ERR_REQUEST; | 334 | errcode |= IMX_DMA_ERR_REQUEST; |
326 | } | 335 | } |
327 | if (imx_dmav1_readl(DMA_DSESR) & (1 << i)) { | 336 | if (imx_dmav1_readl(imxdma, DMA_DSESR) & (1 << i)) { |
328 | imx_dmav1_writel(1 << i, DMA_DSESR); | 337 | imx_dmav1_writel(imxdma, 1 << i, DMA_DSESR); |
329 | errcode |= IMX_DMA_ERR_TRANSFER; | 338 | errcode |= IMX_DMA_ERR_TRANSFER; |
330 | } | 339 | } |
331 | if (imx_dmav1_readl(DMA_DBOSR) & (1 << i)) { | 340 | if (imx_dmav1_readl(imxdma, DMA_DBOSR) & (1 << i)) { |
332 | imx_dmav1_writel(1 << i, DMA_DBOSR); | 341 | imx_dmav1_writel(imxdma, 1 << i, DMA_DBOSR); |
333 | errcode |= IMX_DMA_ERR_BUFFER; | 342 | errcode |= IMX_DMA_ERR_BUFFER; |
334 | } | 343 | } |
335 | /* Tasklet error handler */ | 344 | /* Tasklet error handler */ |
@@ -347,6 +356,7 @@ static irqreturn_t imxdma_err_handler(int irq, void *dev_id) | |||
347 | 356 | ||
348 | static void dma_irq_handle_channel(struct imxdma_channel *imxdmac) | 357 | static void dma_irq_handle_channel(struct imxdma_channel *imxdmac) |
349 | { | 358 | { |
359 | struct imxdma_engine *imxdma = imxdmac->imxdma; | ||
350 | int chno = imxdmac->channel; | 360 | int chno = imxdmac->channel; |
351 | struct imxdma_desc *desc; | 361 | struct imxdma_desc *desc; |
352 | 362 | ||
@@ -368,7 +378,7 @@ static void dma_irq_handle_channel(struct imxdma_channel *imxdmac) | |||
368 | if (desc->sg) { | 378 | if (desc->sg) { |
369 | imxdma_sg_next(desc); | 379 | imxdma_sg_next(desc); |
370 | 380 | ||
371 | tmp = imx_dmav1_readl(DMA_CCR(chno)); | 381 | tmp = imx_dmav1_readl(imxdma, DMA_CCR(chno)); |
372 | 382 | ||
373 | if (imxdma_hw_chain(imxdmac)) { | 383 | if (imxdma_hw_chain(imxdmac)) { |
374 | /* FIXME: The timeout should probably be | 384 | /* FIXME: The timeout should probably be |
@@ -378,13 +388,14 @@ static void dma_irq_handle_channel(struct imxdma_channel *imxdmac) | |||
378 | jiffies + msecs_to_jiffies(500)); | 388 | jiffies + msecs_to_jiffies(500)); |
379 | 389 | ||
380 | tmp |= CCR_CEN | CCR_RPT | CCR_ACRPT; | 390 | tmp |= CCR_CEN | CCR_RPT | CCR_ACRPT; |
381 | imx_dmav1_writel(tmp, DMA_CCR(chno)); | 391 | imx_dmav1_writel(imxdma, tmp, DMA_CCR(chno)); |
382 | } else { | 392 | } else { |
383 | imx_dmav1_writel(tmp & ~CCR_CEN, DMA_CCR(chno)); | 393 | imx_dmav1_writel(imxdma, tmp & ~CCR_CEN, |
394 | DMA_CCR(chno)); | ||
384 | tmp |= CCR_CEN; | 395 | tmp |= CCR_CEN; |
385 | } | 396 | } |
386 | 397 | ||
387 | imx_dmav1_writel(tmp, DMA_CCR(chno)); | 398 | imx_dmav1_writel(imxdma, tmp, DMA_CCR(chno)); |
388 | 399 | ||
389 | if (imxdma_chan_is_doing_cyclic(imxdmac)) | 400 | if (imxdma_chan_is_doing_cyclic(imxdmac)) |
390 | /* Tasklet progression */ | 401 | /* Tasklet progression */ |
@@ -400,7 +411,7 @@ static void dma_irq_handle_channel(struct imxdma_channel *imxdmac) | |||
400 | } | 411 | } |
401 | 412 | ||
402 | out: | 413 | out: |
403 | imx_dmav1_writel(0, DMA_CCR(chno)); | 414 | imx_dmav1_writel(imxdma, 0, DMA_CCR(chno)); |
404 | /* Tasklet irq */ | 415 | /* Tasklet irq */ |
405 | tasklet_schedule(&imxdmac->dma_tasklet); | 416 | tasklet_schedule(&imxdmac->dma_tasklet); |
406 | } | 417 | } |
@@ -413,12 +424,12 @@ static irqreturn_t dma_irq_handler(int irq, void *dev_id) | |||
413 | if (cpu_is_mx21() || cpu_is_mx27()) | 424 | if (cpu_is_mx21() || cpu_is_mx27()) |
414 | imxdma_err_handler(irq, dev_id); | 425 | imxdma_err_handler(irq, dev_id); |
415 | 426 | ||
416 | disr = imx_dmav1_readl(DMA_DISR); | 427 | disr = imx_dmav1_readl(imxdma, DMA_DISR); |
417 | 428 | ||
418 | pr_debug("imxdma: dma_irq_handler called, disr=0x%08x\n", | 429 | pr_debug("imxdma: dma_irq_handler called, disr=0x%08x\n", |
419 | disr); | 430 | disr); |
420 | 431 | ||
421 | imx_dmav1_writel(disr, DMA_DISR); | 432 | imx_dmav1_writel(imxdma, disr, DMA_DISR); |
422 | for (i = 0; i < IMX_DMA_CHANNELS; i++) { | 433 | for (i = 0; i < IMX_DMA_CHANNELS; i++) { |
423 | if (disr & (1 << i)) | 434 | if (disr & (1 << i)) |
424 | dma_irq_handle_channel(&imxdma->channel[i]); | 435 | dma_irq_handle_channel(&imxdma->channel[i]); |
@@ -435,12 +446,12 @@ static int imxdma_xfer_desc(struct imxdma_desc *d) | |||
435 | /* Configure and enable */ | 446 | /* Configure and enable */ |
436 | switch (d->type) { | 447 | switch (d->type) { |
437 | case IMXDMA_DESC_MEMCPY: | 448 | case IMXDMA_DESC_MEMCPY: |
438 | imx_dmav1_writel(d->src, DMA_SAR(imxdmac->channel)); | 449 | imx_dmav1_writel(imxdma, d->src, DMA_SAR(imxdmac->channel)); |
439 | imx_dmav1_writel(d->dest, DMA_DAR(imxdmac->channel)); | 450 | imx_dmav1_writel(imxdma, d->dest, DMA_DAR(imxdmac->channel)); |
440 | imx_dmav1_writel(d->config_mem | (d->config_port << 2), | 451 | imx_dmav1_writel(imxdma, d->config_mem | (d->config_port << 2), |
441 | DMA_CCR(imxdmac->channel)); | 452 | DMA_CCR(imxdmac->channel)); |
442 | 453 | ||
443 | imx_dmav1_writel(d->len, DMA_CNTR(imxdmac->channel)); | 454 | imx_dmav1_writel(imxdma, d->len, DMA_CNTR(imxdmac->channel)); |
444 | 455 | ||
445 | dev_dbg(imxdma->dev, "%s channel: %d dest=0x%08x src=0x%08x " | 456 | dev_dbg(imxdma->dev, "%s channel: %d dest=0x%08x src=0x%08x " |
446 | "dma_length=%d\n", __func__, imxdmac->channel, | 457 | "dma_length=%d\n", __func__, imxdmac->channel, |
@@ -451,9 +462,9 @@ static int imxdma_xfer_desc(struct imxdma_desc *d) | |||
451 | case IMXDMA_DESC_CYCLIC: | 462 | case IMXDMA_DESC_CYCLIC: |
452 | case IMXDMA_DESC_SLAVE_SG: | 463 | case IMXDMA_DESC_SLAVE_SG: |
453 | if (d->direction == DMA_DEV_TO_MEM) { | 464 | if (d->direction == DMA_DEV_TO_MEM) { |
454 | imx_dmav1_writel(imxdmac->per_address, | 465 | imx_dmav1_writel(imxdma, imxdmac->per_address, |
455 | DMA_SAR(imxdmac->channel)); | 466 | DMA_SAR(imxdmac->channel)); |
456 | imx_dmav1_writel(imxdmac->ccr_from_device, | 467 | imx_dmav1_writel(imxdma, imxdmac->ccr_from_device, |
457 | DMA_CCR(imxdmac->channel)); | 468 | DMA_CCR(imxdmac->channel)); |
458 | 469 | ||
459 | dev_dbg(imxdma->dev, "%s channel: %d sg=%p sgcount=%d " | 470 | dev_dbg(imxdma->dev, "%s channel: %d sg=%p sgcount=%d " |
@@ -461,9 +472,9 @@ static int imxdma_xfer_desc(struct imxdma_desc *d) | |||
461 | __func__, imxdmac->channel, d->sg, d->sgcount, | 472 | __func__, imxdmac->channel, d->sg, d->sgcount, |
462 | d->len, imxdmac->per_address); | 473 | d->len, imxdmac->per_address); |
463 | } else if (d->direction == DMA_MEM_TO_DEV) { | 474 | } else if (d->direction == DMA_MEM_TO_DEV) { |
464 | imx_dmav1_writel(imxdmac->per_address, | 475 | imx_dmav1_writel(imxdma, imxdmac->per_address, |
465 | DMA_DAR(imxdmac->channel)); | 476 | DMA_DAR(imxdmac->channel)); |
466 | imx_dmav1_writel(imxdmac->ccr_to_device, | 477 | imx_dmav1_writel(imxdma, imxdmac->ccr_to_device, |
467 | DMA_CCR(imxdmac->channel)); | 478 | DMA_CCR(imxdmac->channel)); |
468 | 479 | ||
469 | dev_dbg(imxdma->dev, "%s channel: %d sg=%p sgcount=%d " | 480 | dev_dbg(imxdma->dev, "%s channel: %d sg=%p sgcount=%d " |
@@ -528,6 +539,7 @@ static int imxdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | |||
528 | { | 539 | { |
529 | struct imxdma_channel *imxdmac = to_imxdma_chan(chan); | 540 | struct imxdma_channel *imxdmac = to_imxdma_chan(chan); |
530 | struct dma_slave_config *dmaengine_cfg = (void *)arg; | 541 | struct dma_slave_config *dmaengine_cfg = (void *)arg; |
542 | struct imxdma_engine *imxdma = imxdmac->imxdma; | ||
531 | unsigned long flags; | 543 | unsigned long flags; |
532 | unsigned int mode = 0; | 544 | unsigned int mode = 0; |
533 | 545 | ||
@@ -573,12 +585,12 @@ static int imxdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | |||
573 | imxdmac->ccr_to_device = | 585 | imxdmac->ccr_to_device = |
574 | (IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR) | | 586 | (IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR) | |
575 | ((mode | IMX_DMA_TYPE_FIFO) << 2) | CCR_REN; | 587 | ((mode | IMX_DMA_TYPE_FIFO) << 2) | CCR_REN; |
576 | imx_dmav1_writel(imxdmac->dma_request, | 588 | imx_dmav1_writel(imxdma, imxdmac->dma_request, |
577 | DMA_RSSR(imxdmac->channel)); | 589 | DMA_RSSR(imxdmac->channel)); |
578 | 590 | ||
579 | /* Set burst length */ | 591 | /* Set burst length */ |
580 | imx_dmav1_writel(imxdmac->watermark_level * imxdmac->word_size, | 592 | imx_dmav1_writel(imxdma, imxdmac->watermark_level * |
581 | DMA_BLR(imxdmac->channel)); | 593 | imxdmac->word_size, DMA_BLR(imxdmac->channel)); |
582 | 594 | ||
583 | return 0; | 595 | return 0; |
584 | default: | 596 | default: |
@@ -836,27 +848,35 @@ static int __init imxdma_probe(struct platform_device *pdev) | |||
836 | struct imxdma_engine *imxdma; | 848 | struct imxdma_engine *imxdma; |
837 | int ret, i; | 849 | int ret, i; |
838 | 850 | ||
839 | if (cpu_is_mx1()) | 851 | |
840 | imx_dmav1_baseaddr = MX1_IO_ADDRESS(MX1_DMA_BASE_ADDR); | 852 | imxdma = kzalloc(sizeof(*imxdma), GFP_KERNEL); |
841 | else if (cpu_is_mx21()) | 853 | if (!imxdma) |
842 | imx_dmav1_baseaddr = MX21_IO_ADDRESS(MX21_DMA_BASE_ADDR); | 854 | return -ENOMEM; |
843 | else if (cpu_is_mx27()) | 855 | |
844 | imx_dmav1_baseaddr = MX27_IO_ADDRESS(MX27_DMA_BASE_ADDR); | 856 | if (cpu_is_mx1()) { |
845 | else | 857 | imxdma->base = MX1_IO_ADDRESS(MX1_DMA_BASE_ADDR); |
858 | } else if (cpu_is_mx21()) { | ||
859 | imxdma->base = MX21_IO_ADDRESS(MX21_DMA_BASE_ADDR); | ||
860 | } else if (cpu_is_mx27()) { | ||
861 | imxdma->base = MX27_IO_ADDRESS(MX27_DMA_BASE_ADDR); | ||
862 | } else { | ||
863 | kfree(imxdma); | ||
846 | return 0; | 864 | return 0; |
865 | } | ||
847 | 866 | ||
848 | dma_clk = clk_get(NULL, "dma"); | 867 | imxdma->dma_clk = clk_get(NULL, "dma"); |
849 | if (IS_ERR(dma_clk)) | 868 | if (IS_ERR(imxdma->dma_clk)) |
850 | return PTR_ERR(dma_clk); | 869 | return PTR_ERR(imxdma->dma_clk); |
851 | clk_enable(dma_clk); | 870 | clk_enable(imxdma->dma_clk); |
852 | 871 | ||
853 | /* reset DMA module */ | 872 | /* reset DMA module */ |
854 | imx_dmav1_writel(DCR_DRST, DMA_DCR); | 873 | imx_dmav1_writel(imxdma, DCR_DRST, DMA_DCR); |
855 | 874 | ||
856 | if (cpu_is_mx1()) { | 875 | if (cpu_is_mx1()) { |
857 | ret = request_irq(MX1_DMA_INT, dma_irq_handler, 0, "DMA", imxdma); | 876 | ret = request_irq(MX1_DMA_INT, dma_irq_handler, 0, "DMA", imxdma); |
858 | if (ret) { | 877 | if (ret) { |
859 | pr_crit("Can't register IRQ for DMA\n"); | 878 | pr_crit("Can't register IRQ for DMA\n"); |
879 | kfree(imxdma); | ||
860 | return ret; | 880 | return ret; |
861 | } | 881 | } |
862 | 882 | ||
@@ -864,22 +884,19 @@ static int __init imxdma_probe(struct platform_device *pdev) | |||
864 | if (ret) { | 884 | if (ret) { |
865 | pr_crit("Can't register ERRIRQ for DMA\n"); | 885 | pr_crit("Can't register ERRIRQ for DMA\n"); |
866 | free_irq(MX1_DMA_INT, NULL); | 886 | free_irq(MX1_DMA_INT, NULL); |
887 | kfree(imxdma); | ||
867 | return ret; | 888 | return ret; |
868 | } | 889 | } |
869 | } | 890 | } |
870 | 891 | ||
871 | /* enable DMA module */ | 892 | /* enable DMA module */ |
872 | imx_dmav1_writel(DCR_DEN, DMA_DCR); | 893 | imx_dmav1_writel(imxdma, DCR_DEN, DMA_DCR); |
873 | 894 | ||
874 | /* clear all interrupts */ | 895 | /* clear all interrupts */ |
875 | imx_dmav1_writel((1 << IMX_DMA_CHANNELS) - 1, DMA_DISR); | 896 | imx_dmav1_writel(imxdma, (1 << IMX_DMA_CHANNELS) - 1, DMA_DISR); |
876 | 897 | ||
877 | /* disable interrupts */ | 898 | /* disable interrupts */ |
878 | imx_dmav1_writel((1 << IMX_DMA_CHANNELS) - 1, DMA_DIMR); | 899 | imx_dmav1_writel(imxdma, (1 << IMX_DMA_CHANNELS) - 1, DMA_DIMR); |
879 | |||
880 | imxdma = kzalloc(sizeof(*imxdma), GFP_KERNEL); | ||
881 | if (!imxdma) | ||
882 | return -ENOMEM; | ||
883 | 900 | ||
884 | INIT_LIST_HEAD(&imxdma->dma_device.channels); | 901 | INIT_LIST_HEAD(&imxdma->dma_device.channels); |
885 | 902 | ||