diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-03-29 18:34:57 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-03-29 18:34:57 -0400 |
commit | ef08e78268423fc4d7fbc3e54bd9a67fc8da7cc5 (patch) | |
tree | d0561d3ef89c9cd277a38168e33850666cbd33c4 /drivers/dma/imx-sdma.c | |
parent | 71db34fc4330f7c784397acb9f1e6ee7f7b32eb2 (diff) | |
parent | 5b2e02e401deb44e7f5befe19404d8b2688efea4 (diff) |
Merge branch 'next' of git://git.infradead.org/users/vkoul/slave-dma
Pull slave-dmaengine update from Vinod Koul:
"This includes the cookie cleanup by Russell, the addition of context
parameter for dmaengine APIs, more arm dmaengine driver cleanup by
moving code to dmaengine, this time for imx by Javier and pl330 by
Boojin along with the usual driver fixes."
Fix up some fairly trivial conflicts with various other cleanups.
* 'next' of git://git.infradead.org/users/vkoul/slave-dma: (67 commits)
dmaengine: imx: fix the build failure on x86_64
dmaengine: i.MX: Fix merge of cookie branch.
dmaengine: i.MX: Add support for interleaved transfers.
dmaengine: imx-dma: use 'dev_dbg' and 'dev_warn' for messages.
dmaengine: imx-dma: remove 'imx_dmav1_baseaddr' and 'dma_clk'.
dmaengine: imx-dma: remove unused arg of imxdma_sg_next.
dmaengine: imx-dma: remove internal structure.
dmaengine: imx-dma: remove 'resbytes' field of 'internal' structure.
dmaengine: imx-dma: remove 'in_use' field of 'internal' structure.
dmaengine: imx-dma: remove sg member from internal structure.
dmaengine: imx-dma: remove 'imxdma_setup_sg_hw' function.
dmaengine: imx-dma: remove 'imxdma_config_channel_hw' function.
dmaengine: imx-dma: remove 'imxdma_setup_mem2mem_hw' function.
dmaengine: imx-dma: remove dma_mode member of internal structure.
dmaengine: imx-dma: remove data member from internal structure.
dmaengine: imx-dma: merge old dma-v1.c with imx-dma.c
dmaengine: at_hdmac: add slave config operation
dmaengine: add context parameter to prep_slave_sg and prep_dma_cyclic
dmaengine/dma_slave: introduce inline wrappers
dma: imx-sdma: Treat firmware messages as warnings instead of erros
...
Diffstat (limited to 'drivers/dma/imx-sdma.c')
-rw-r--r-- | drivers/dma/imx-sdma.c | 187 |
1 files changed, 88 insertions, 99 deletions
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c index 63540d3e2153..d3e38e28bb6b 100644 --- a/drivers/dma/imx-sdma.c +++ b/drivers/dma/imx-sdma.c | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <linux/init.h> | 20 | #include <linux/init.h> |
21 | #include <linux/module.h> | 21 | #include <linux/module.h> |
22 | #include <linux/types.h> | 22 | #include <linux/types.h> |
23 | #include <linux/bitops.h> | ||
23 | #include <linux/mm.h> | 24 | #include <linux/mm.h> |
24 | #include <linux/interrupt.h> | 25 | #include <linux/interrupt.h> |
25 | #include <linux/clk.h> | 26 | #include <linux/clk.h> |
@@ -41,6 +42,8 @@ | |||
41 | #include <mach/dma.h> | 42 | #include <mach/dma.h> |
42 | #include <mach/hardware.h> | 43 | #include <mach/hardware.h> |
43 | 44 | ||
45 | #include "dmaengine.h" | ||
46 | |||
44 | /* SDMA registers */ | 47 | /* SDMA registers */ |
45 | #define SDMA_H_C0PTR 0x000 | 48 | #define SDMA_H_C0PTR 0x000 |
46 | #define SDMA_H_INTR 0x004 | 49 | #define SDMA_H_INTR 0x004 |
@@ -259,19 +262,18 @@ struct sdma_channel { | |||
259 | unsigned int pc_from_device, pc_to_device; | 262 | unsigned int pc_from_device, pc_to_device; |
260 | unsigned long flags; | 263 | unsigned long flags; |
261 | dma_addr_t per_address; | 264 | dma_addr_t per_address; |
262 | u32 event_mask0, event_mask1; | 265 | unsigned long event_mask[2]; |
263 | u32 watermark_level; | 266 | unsigned long watermark_level; |
264 | u32 shp_addr, per_addr; | 267 | u32 shp_addr, per_addr; |
265 | struct dma_chan chan; | 268 | struct dma_chan chan; |
266 | spinlock_t lock; | 269 | spinlock_t lock; |
267 | struct dma_async_tx_descriptor desc; | 270 | struct dma_async_tx_descriptor desc; |
268 | dma_cookie_t last_completed; | ||
269 | enum dma_status status; | 271 | enum dma_status status; |
270 | unsigned int chn_count; | 272 | unsigned int chn_count; |
271 | unsigned int chn_real_count; | 273 | unsigned int chn_real_count; |
272 | }; | 274 | }; |
273 | 275 | ||
274 | #define IMX_DMA_SG_LOOP (1 << 0) | 276 | #define IMX_DMA_SG_LOOP BIT(0) |
275 | 277 | ||
276 | #define MAX_DMA_CHANNELS 32 | 278 | #define MAX_DMA_CHANNELS 32 |
277 | #define MXC_SDMA_DEFAULT_PRIORITY 1 | 279 | #define MXC_SDMA_DEFAULT_PRIORITY 1 |
@@ -345,9 +347,9 @@ static const struct of_device_id sdma_dt_ids[] = { | |||
345 | }; | 347 | }; |
346 | MODULE_DEVICE_TABLE(of, sdma_dt_ids); | 348 | MODULE_DEVICE_TABLE(of, sdma_dt_ids); |
347 | 349 | ||
348 | #define SDMA_H_CONFIG_DSPDMA (1 << 12) /* indicates if the DSPDMA is used */ | 350 | #define SDMA_H_CONFIG_DSPDMA BIT(12) /* indicates if the DSPDMA is used */ |
349 | #define SDMA_H_CONFIG_RTD_PINS (1 << 11) /* indicates if Real-Time Debug pins are enabled */ | 351 | #define SDMA_H_CONFIG_RTD_PINS BIT(11) /* indicates if Real-Time Debug pins are enabled */ |
350 | #define SDMA_H_CONFIG_ACR (1 << 4) /* indicates if AHB freq /core freq = 2 or 1 */ | 352 | #define SDMA_H_CONFIG_ACR BIT(4) /* indicates if AHB freq /core freq = 2 or 1 */ |
351 | #define SDMA_H_CONFIG_CSM (3) /* indicates which context switch mode is selected*/ | 353 | #define SDMA_H_CONFIG_CSM (3) /* indicates which context switch mode is selected*/ |
352 | 354 | ||
353 | static inline u32 chnenbl_ofs(struct sdma_engine *sdma, unsigned int event) | 355 | static inline u32 chnenbl_ofs(struct sdma_engine *sdma, unsigned int event) |
@@ -362,37 +364,42 @@ static int sdma_config_ownership(struct sdma_channel *sdmac, | |||
362 | { | 364 | { |
363 | struct sdma_engine *sdma = sdmac->sdma; | 365 | struct sdma_engine *sdma = sdmac->sdma; |
364 | int channel = sdmac->channel; | 366 | int channel = sdmac->channel; |
365 | u32 evt, mcu, dsp; | 367 | unsigned long evt, mcu, dsp; |
366 | 368 | ||
367 | if (event_override && mcu_override && dsp_override) | 369 | if (event_override && mcu_override && dsp_override) |
368 | return -EINVAL; | 370 | return -EINVAL; |
369 | 371 | ||
370 | evt = __raw_readl(sdma->regs + SDMA_H_EVTOVR); | 372 | evt = readl_relaxed(sdma->regs + SDMA_H_EVTOVR); |
371 | mcu = __raw_readl(sdma->regs + SDMA_H_HOSTOVR); | 373 | mcu = readl_relaxed(sdma->regs + SDMA_H_HOSTOVR); |
372 | dsp = __raw_readl(sdma->regs + SDMA_H_DSPOVR); | 374 | dsp = readl_relaxed(sdma->regs + SDMA_H_DSPOVR); |
373 | 375 | ||
374 | if (dsp_override) | 376 | if (dsp_override) |
375 | dsp &= ~(1 << channel); | 377 | __clear_bit(channel, &dsp); |
376 | else | 378 | else |
377 | dsp |= (1 << channel); | 379 | __set_bit(channel, &dsp); |
378 | 380 | ||
379 | if (event_override) | 381 | if (event_override) |
380 | evt &= ~(1 << channel); | 382 | __clear_bit(channel, &evt); |
381 | else | 383 | else |
382 | evt |= (1 << channel); | 384 | __set_bit(channel, &evt); |
383 | 385 | ||
384 | if (mcu_override) | 386 | if (mcu_override) |
385 | mcu &= ~(1 << channel); | 387 | __clear_bit(channel, &mcu); |
386 | else | 388 | else |
387 | mcu |= (1 << channel); | 389 | __set_bit(channel, &mcu); |
388 | 390 | ||
389 | __raw_writel(evt, sdma->regs + SDMA_H_EVTOVR); | 391 | writel_relaxed(evt, sdma->regs + SDMA_H_EVTOVR); |
390 | __raw_writel(mcu, sdma->regs + SDMA_H_HOSTOVR); | 392 | writel_relaxed(mcu, sdma->regs + SDMA_H_HOSTOVR); |
391 | __raw_writel(dsp, sdma->regs + SDMA_H_DSPOVR); | 393 | writel_relaxed(dsp, sdma->regs + SDMA_H_DSPOVR); |
392 | 394 | ||
393 | return 0; | 395 | return 0; |
394 | } | 396 | } |
395 | 397 | ||
398 | static void sdma_enable_channel(struct sdma_engine *sdma, int channel) | ||
399 | { | ||
400 | writel(BIT(channel), sdma->regs + SDMA_H_START); | ||
401 | } | ||
402 | |||
396 | /* | 403 | /* |
397 | * sdma_run_channel - run a channel and wait till it's done | 404 | * sdma_run_channel - run a channel and wait till it's done |
398 | */ | 405 | */ |
@@ -404,7 +411,7 @@ static int sdma_run_channel(struct sdma_channel *sdmac) | |||
404 | 411 | ||
405 | init_completion(&sdmac->done); | 412 | init_completion(&sdmac->done); |
406 | 413 | ||
407 | __raw_writel(1 << channel, sdma->regs + SDMA_H_START); | 414 | sdma_enable_channel(sdma, channel); |
408 | 415 | ||
409 | ret = wait_for_completion_timeout(&sdmac->done, HZ); | 416 | ret = wait_for_completion_timeout(&sdmac->done, HZ); |
410 | 417 | ||
@@ -451,12 +458,12 @@ static void sdma_event_enable(struct sdma_channel *sdmac, unsigned int event) | |||
451 | { | 458 | { |
452 | struct sdma_engine *sdma = sdmac->sdma; | 459 | struct sdma_engine *sdma = sdmac->sdma; |
453 | int channel = sdmac->channel; | 460 | int channel = sdmac->channel; |
454 | u32 val; | 461 | unsigned long val; |
455 | u32 chnenbl = chnenbl_ofs(sdma, event); | 462 | u32 chnenbl = chnenbl_ofs(sdma, event); |
456 | 463 | ||
457 | val = __raw_readl(sdma->regs + chnenbl); | 464 | val = readl_relaxed(sdma->regs + chnenbl); |
458 | val |= (1 << channel); | 465 | __set_bit(channel, &val); |
459 | __raw_writel(val, sdma->regs + chnenbl); | 466 | writel_relaxed(val, sdma->regs + chnenbl); |
460 | } | 467 | } |
461 | 468 | ||
462 | static void sdma_event_disable(struct sdma_channel *sdmac, unsigned int event) | 469 | static void sdma_event_disable(struct sdma_channel *sdmac, unsigned int event) |
@@ -464,11 +471,11 @@ static void sdma_event_disable(struct sdma_channel *sdmac, unsigned int event) | |||
464 | struct sdma_engine *sdma = sdmac->sdma; | 471 | struct sdma_engine *sdma = sdmac->sdma; |
465 | int channel = sdmac->channel; | 472 | int channel = sdmac->channel; |
466 | u32 chnenbl = chnenbl_ofs(sdma, event); | 473 | u32 chnenbl = chnenbl_ofs(sdma, event); |
467 | u32 val; | 474 | unsigned long val; |
468 | 475 | ||
469 | val = __raw_readl(sdma->regs + chnenbl); | 476 | val = readl_relaxed(sdma->regs + chnenbl); |
470 | val &= ~(1 << channel); | 477 | __clear_bit(channel, &val); |
471 | __raw_writel(val, sdma->regs + chnenbl); | 478 | writel_relaxed(val, sdma->regs + chnenbl); |
472 | } | 479 | } |
473 | 480 | ||
474 | static void sdma_handle_channel_loop(struct sdma_channel *sdmac) | 481 | static void sdma_handle_channel_loop(struct sdma_channel *sdmac) |
@@ -522,7 +529,7 @@ static void mxc_sdma_handle_channel_normal(struct sdma_channel *sdmac) | |||
522 | else | 529 | else |
523 | sdmac->status = DMA_SUCCESS; | 530 | sdmac->status = DMA_SUCCESS; |
524 | 531 | ||
525 | sdmac->last_completed = sdmac->desc.cookie; | 532 | dma_cookie_complete(&sdmac->desc); |
526 | if (sdmac->desc.callback) | 533 | if (sdmac->desc.callback) |
527 | sdmac->desc.callback(sdmac->desc.callback_param); | 534 | sdmac->desc.callback(sdmac->desc.callback_param); |
528 | } | 535 | } |
@@ -544,10 +551,10 @@ static void mxc_sdma_handle_channel(struct sdma_channel *sdmac) | |||
544 | static irqreturn_t sdma_int_handler(int irq, void *dev_id) | 551 | static irqreturn_t sdma_int_handler(int irq, void *dev_id) |
545 | { | 552 | { |
546 | struct sdma_engine *sdma = dev_id; | 553 | struct sdma_engine *sdma = dev_id; |
547 | u32 stat; | 554 | unsigned long stat; |
548 | 555 | ||
549 | stat = __raw_readl(sdma->regs + SDMA_H_INTR); | 556 | stat = readl_relaxed(sdma->regs + SDMA_H_INTR); |
550 | __raw_writel(stat, sdma->regs + SDMA_H_INTR); | 557 | writel_relaxed(stat, sdma->regs + SDMA_H_INTR); |
551 | 558 | ||
552 | while (stat) { | 559 | while (stat) { |
553 | int channel = fls(stat) - 1; | 560 | int channel = fls(stat) - 1; |
@@ -555,7 +562,7 @@ static irqreturn_t sdma_int_handler(int irq, void *dev_id) | |||
555 | 562 | ||
556 | mxc_sdma_handle_channel(sdmac); | 563 | mxc_sdma_handle_channel(sdmac); |
557 | 564 | ||
558 | stat &= ~(1 << channel); | 565 | __clear_bit(channel, &stat); |
559 | } | 566 | } |
560 | 567 | ||
561 | return IRQ_HANDLED; | 568 | return IRQ_HANDLED; |
@@ -663,11 +670,11 @@ static int sdma_load_context(struct sdma_channel *sdmac) | |||
663 | return load_address; | 670 | return load_address; |
664 | 671 | ||
665 | dev_dbg(sdma->dev, "load_address = %d\n", load_address); | 672 | dev_dbg(sdma->dev, "load_address = %d\n", load_address); |
666 | dev_dbg(sdma->dev, "wml = 0x%08x\n", sdmac->watermark_level); | 673 | dev_dbg(sdma->dev, "wml = 0x%08x\n", (u32)sdmac->watermark_level); |
667 | dev_dbg(sdma->dev, "shp_addr = 0x%08x\n", sdmac->shp_addr); | 674 | dev_dbg(sdma->dev, "shp_addr = 0x%08x\n", sdmac->shp_addr); |
668 | dev_dbg(sdma->dev, "per_addr = 0x%08x\n", sdmac->per_addr); | 675 | dev_dbg(sdma->dev, "per_addr = 0x%08x\n", sdmac->per_addr); |
669 | dev_dbg(sdma->dev, "event_mask0 = 0x%08x\n", sdmac->event_mask0); | 676 | dev_dbg(sdma->dev, "event_mask0 = 0x%08x\n", (u32)sdmac->event_mask[0]); |
670 | dev_dbg(sdma->dev, "event_mask1 = 0x%08x\n", sdmac->event_mask1); | 677 | dev_dbg(sdma->dev, "event_mask1 = 0x%08x\n", (u32)sdmac->event_mask[1]); |
671 | 678 | ||
672 | mutex_lock(&sdma->channel_0_lock); | 679 | mutex_lock(&sdma->channel_0_lock); |
673 | 680 | ||
@@ -677,8 +684,8 @@ static int sdma_load_context(struct sdma_channel *sdmac) | |||
677 | /* Send by context the event mask,base address for peripheral | 684 | /* Send by context the event mask,base address for peripheral |
678 | * and watermark level | 685 | * and watermark level |
679 | */ | 686 | */ |
680 | context->gReg[0] = sdmac->event_mask1; | 687 | context->gReg[0] = sdmac->event_mask[1]; |
681 | context->gReg[1] = sdmac->event_mask0; | 688 | context->gReg[1] = sdmac->event_mask[0]; |
682 | context->gReg[2] = sdmac->per_addr; | 689 | context->gReg[2] = sdmac->per_addr; |
683 | context->gReg[6] = sdmac->shp_addr; | 690 | context->gReg[6] = sdmac->shp_addr; |
684 | context->gReg[7] = sdmac->watermark_level; | 691 | context->gReg[7] = sdmac->watermark_level; |
@@ -701,7 +708,7 @@ static void sdma_disable_channel(struct sdma_channel *sdmac) | |||
701 | struct sdma_engine *sdma = sdmac->sdma; | 708 | struct sdma_engine *sdma = sdmac->sdma; |
702 | int channel = sdmac->channel; | 709 | int channel = sdmac->channel; |
703 | 710 | ||
704 | __raw_writel(1 << channel, sdma->regs + SDMA_H_STATSTOP); | 711 | writel_relaxed(BIT(channel), sdma->regs + SDMA_H_STATSTOP); |
705 | sdmac->status = DMA_ERROR; | 712 | sdmac->status = DMA_ERROR; |
706 | } | 713 | } |
707 | 714 | ||
@@ -711,13 +718,13 @@ static int sdma_config_channel(struct sdma_channel *sdmac) | |||
711 | 718 | ||
712 | sdma_disable_channel(sdmac); | 719 | sdma_disable_channel(sdmac); |
713 | 720 | ||
714 | sdmac->event_mask0 = 0; | 721 | sdmac->event_mask[0] = 0; |
715 | sdmac->event_mask1 = 0; | 722 | sdmac->event_mask[1] = 0; |
716 | sdmac->shp_addr = 0; | 723 | sdmac->shp_addr = 0; |
717 | sdmac->per_addr = 0; | 724 | sdmac->per_addr = 0; |
718 | 725 | ||
719 | if (sdmac->event_id0) { | 726 | if (sdmac->event_id0) { |
720 | if (sdmac->event_id0 > 32) | 727 | if (sdmac->event_id0 >= sdmac->sdma->num_events) |
721 | return -EINVAL; | 728 | return -EINVAL; |
722 | sdma_event_enable(sdmac, sdmac->event_id0); | 729 | sdma_event_enable(sdmac, sdmac->event_id0); |
723 | } | 730 | } |
@@ -740,15 +747,14 @@ static int sdma_config_channel(struct sdma_channel *sdmac) | |||
740 | (sdmac->peripheral_type != IMX_DMATYPE_DSP)) { | 747 | (sdmac->peripheral_type != IMX_DMATYPE_DSP)) { |
741 | /* Handle multiple event channels differently */ | 748 | /* Handle multiple event channels differently */ |
742 | if (sdmac->event_id1) { | 749 | if (sdmac->event_id1) { |
743 | sdmac->event_mask1 = 1 << (sdmac->event_id1 % 32); | 750 | sdmac->event_mask[1] = BIT(sdmac->event_id1 % 32); |
744 | if (sdmac->event_id1 > 31) | 751 | if (sdmac->event_id1 > 31) |
745 | sdmac->watermark_level |= 1 << 31; | 752 | __set_bit(31, &sdmac->watermark_level); |
746 | sdmac->event_mask0 = 1 << (sdmac->event_id0 % 32); | 753 | sdmac->event_mask[0] = BIT(sdmac->event_id0 % 32); |
747 | if (sdmac->event_id0 > 31) | 754 | if (sdmac->event_id0 > 31) |
748 | sdmac->watermark_level |= 1 << 30; | 755 | __set_bit(30, &sdmac->watermark_level); |
749 | } else { | 756 | } else { |
750 | sdmac->event_mask0 = 1 << sdmac->event_id0; | 757 | __set_bit(sdmac->event_id0, sdmac->event_mask); |
751 | sdmac->event_mask1 = 1 << (sdmac->event_id0 - 32); | ||
752 | } | 758 | } |
753 | /* Watermark Level */ | 759 | /* Watermark Level */ |
754 | sdmac->watermark_level |= sdmac->watermark_level; | 760 | sdmac->watermark_level |= sdmac->watermark_level; |
@@ -774,7 +780,7 @@ static int sdma_set_channel_priority(struct sdma_channel *sdmac, | |||
774 | return -EINVAL; | 780 | return -EINVAL; |
775 | } | 781 | } |
776 | 782 | ||
777 | __raw_writel(priority, sdma->regs + SDMA_CHNPRI_0 + 4 * channel); | 783 | writel_relaxed(priority, sdma->regs + SDMA_CHNPRI_0 + 4 * channel); |
778 | 784 | ||
779 | return 0; | 785 | return 0; |
780 | } | 786 | } |
@@ -796,8 +802,6 @@ static int sdma_request_channel(struct sdma_channel *sdmac) | |||
796 | sdma->channel_control[channel].base_bd_ptr = sdmac->bd_phys; | 802 | sdma->channel_control[channel].base_bd_ptr = sdmac->bd_phys; |
797 | sdma->channel_control[channel].current_bd_ptr = sdmac->bd_phys; | 803 | sdma->channel_control[channel].current_bd_ptr = sdmac->bd_phys; |
798 | 804 | ||
799 | clk_enable(sdma->clk); | ||
800 | |||
801 | sdma_set_channel_priority(sdmac, MXC_SDMA_DEFAULT_PRIORITY); | 805 | sdma_set_channel_priority(sdmac, MXC_SDMA_DEFAULT_PRIORITY); |
802 | 806 | ||
803 | init_completion(&sdmac->done); | 807 | init_completion(&sdmac->done); |
@@ -810,24 +814,6 @@ out: | |||
810 | return ret; | 814 | return ret; |
811 | } | 815 | } |
812 | 816 | ||
813 | static void sdma_enable_channel(struct sdma_engine *sdma, int channel) | ||
814 | { | ||
815 | __raw_writel(1 << channel, sdma->regs + SDMA_H_START); | ||
816 | } | ||
817 | |||
818 | static dma_cookie_t sdma_assign_cookie(struct sdma_channel *sdmac) | ||
819 | { | ||
820 | dma_cookie_t cookie = sdmac->chan.cookie; | ||
821 | |||
822 | if (++cookie < 0) | ||
823 | cookie = 1; | ||
824 | |||
825 | sdmac->chan.cookie = cookie; | ||
826 | sdmac->desc.cookie = cookie; | ||
827 | |||
828 | return cookie; | ||
829 | } | ||
830 | |||
831 | static struct sdma_channel *to_sdma_chan(struct dma_chan *chan) | 817 | static struct sdma_channel *to_sdma_chan(struct dma_chan *chan) |
832 | { | 818 | { |
833 | return container_of(chan, struct sdma_channel, chan); | 819 | return container_of(chan, struct sdma_channel, chan); |
@@ -837,14 +823,11 @@ static dma_cookie_t sdma_tx_submit(struct dma_async_tx_descriptor *tx) | |||
837 | { | 823 | { |
838 | unsigned long flags; | 824 | unsigned long flags; |
839 | struct sdma_channel *sdmac = to_sdma_chan(tx->chan); | 825 | struct sdma_channel *sdmac = to_sdma_chan(tx->chan); |
840 | struct sdma_engine *sdma = sdmac->sdma; | ||
841 | dma_cookie_t cookie; | 826 | dma_cookie_t cookie; |
842 | 827 | ||
843 | spin_lock_irqsave(&sdmac->lock, flags); | 828 | spin_lock_irqsave(&sdmac->lock, flags); |
844 | 829 | ||
845 | cookie = sdma_assign_cookie(sdmac); | 830 | cookie = dma_cookie_assign(tx); |
846 | |||
847 | sdma_enable_channel(sdma, sdmac->channel); | ||
848 | 831 | ||
849 | spin_unlock_irqrestore(&sdmac->lock, flags); | 832 | spin_unlock_irqrestore(&sdmac->lock, flags); |
850 | 833 | ||
@@ -875,11 +858,14 @@ static int sdma_alloc_chan_resources(struct dma_chan *chan) | |||
875 | 858 | ||
876 | sdmac->peripheral_type = data->peripheral_type; | 859 | sdmac->peripheral_type = data->peripheral_type; |
877 | sdmac->event_id0 = data->dma_request; | 860 | sdmac->event_id0 = data->dma_request; |
878 | ret = sdma_set_channel_priority(sdmac, prio); | 861 | |
862 | clk_enable(sdmac->sdma->clk); | ||
863 | |||
864 | ret = sdma_request_channel(sdmac); | ||
879 | if (ret) | 865 | if (ret) |
880 | return ret; | 866 | return ret; |
881 | 867 | ||
882 | ret = sdma_request_channel(sdmac); | 868 | ret = sdma_set_channel_priority(sdmac, prio); |
883 | if (ret) | 869 | if (ret) |
884 | return ret; | 870 | return ret; |
885 | 871 | ||
@@ -916,7 +902,7 @@ static void sdma_free_chan_resources(struct dma_chan *chan) | |||
916 | static struct dma_async_tx_descriptor *sdma_prep_slave_sg( | 902 | static struct dma_async_tx_descriptor *sdma_prep_slave_sg( |
917 | struct dma_chan *chan, struct scatterlist *sgl, | 903 | struct dma_chan *chan, struct scatterlist *sgl, |
918 | unsigned int sg_len, enum dma_transfer_direction direction, | 904 | unsigned int sg_len, enum dma_transfer_direction direction, |
919 | unsigned long flags) | 905 | unsigned long flags, void *context) |
920 | { | 906 | { |
921 | struct sdma_channel *sdmac = to_sdma_chan(chan); | 907 | struct sdma_channel *sdmac = to_sdma_chan(chan); |
922 | struct sdma_engine *sdma = sdmac->sdma; | 908 | struct sdma_engine *sdma = sdmac->sdma; |
@@ -1014,7 +1000,8 @@ err_out: | |||
1014 | 1000 | ||
1015 | static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic( | 1001 | static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic( |
1016 | struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len, | 1002 | struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len, |
1017 | size_t period_len, enum dma_transfer_direction direction) | 1003 | size_t period_len, enum dma_transfer_direction direction, |
1004 | void *context) | ||
1018 | { | 1005 | { |
1019 | struct sdma_channel *sdmac = to_sdma_chan(chan); | 1006 | struct sdma_channel *sdmac = to_sdma_chan(chan); |
1020 | struct sdma_engine *sdma = sdmac->sdma; | 1007 | struct sdma_engine *sdma = sdmac->sdma; |
@@ -1128,7 +1115,7 @@ static enum dma_status sdma_tx_status(struct dma_chan *chan, | |||
1128 | 1115 | ||
1129 | last_used = chan->cookie; | 1116 | last_used = chan->cookie; |
1130 | 1117 | ||
1131 | dma_set_tx_state(txstate, sdmac->last_completed, last_used, | 1118 | dma_set_tx_state(txstate, chan->completed_cookie, last_used, |
1132 | sdmac->chn_count - sdmac->chn_real_count); | 1119 | sdmac->chn_count - sdmac->chn_real_count); |
1133 | 1120 | ||
1134 | return sdmac->status; | 1121 | return sdmac->status; |
@@ -1136,9 +1123,11 @@ static enum dma_status sdma_tx_status(struct dma_chan *chan, | |||
1136 | 1123 | ||
1137 | static void sdma_issue_pending(struct dma_chan *chan) | 1124 | static void sdma_issue_pending(struct dma_chan *chan) |
1138 | { | 1125 | { |
1139 | /* | 1126 | struct sdma_channel *sdmac = to_sdma_chan(chan); |
1140 | * Nothing to do. We only have a single descriptor | 1127 | struct sdma_engine *sdma = sdmac->sdma; |
1141 | */ | 1128 | |
1129 | if (sdmac->status == DMA_IN_PROGRESS) | ||
1130 | sdma_enable_channel(sdma, sdmac->channel); | ||
1142 | } | 1131 | } |
1143 | 1132 | ||
1144 | #define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1 34 | 1133 | #define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1 34 |
@@ -1230,7 +1219,7 @@ static int __init sdma_init(struct sdma_engine *sdma) | |||
1230 | clk_enable(sdma->clk); | 1219 | clk_enable(sdma->clk); |
1231 | 1220 | ||
1232 | /* Be sure SDMA has not started yet */ | 1221 | /* Be sure SDMA has not started yet */ |
1233 | __raw_writel(0, sdma->regs + SDMA_H_C0PTR); | 1222 | writel_relaxed(0, sdma->regs + SDMA_H_C0PTR); |
1234 | 1223 | ||
1235 | sdma->channel_control = dma_alloc_coherent(NULL, | 1224 | sdma->channel_control = dma_alloc_coherent(NULL, |
1236 | MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control) + | 1225 | MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control) + |
@@ -1253,11 +1242,11 @@ static int __init sdma_init(struct sdma_engine *sdma) | |||
1253 | 1242 | ||
1254 | /* disable all channels */ | 1243 | /* disable all channels */ |
1255 | for (i = 0; i < sdma->num_events; i++) | 1244 | for (i = 0; i < sdma->num_events; i++) |
1256 | __raw_writel(0, sdma->regs + chnenbl_ofs(sdma, i)); | 1245 | writel_relaxed(0, sdma->regs + chnenbl_ofs(sdma, i)); |
1257 | 1246 | ||
1258 | /* All channels have priority 0 */ | 1247 | /* All channels have priority 0 */ |
1259 | for (i = 0; i < MAX_DMA_CHANNELS; i++) | 1248 | for (i = 0; i < MAX_DMA_CHANNELS; i++) |
1260 | __raw_writel(0, sdma->regs + SDMA_CHNPRI_0 + i * 4); | 1249 | writel_relaxed(0, sdma->regs + SDMA_CHNPRI_0 + i * 4); |
1261 | 1250 | ||
1262 | ret = sdma_request_channel(&sdma->channel[0]); | 1251 | ret = sdma_request_channel(&sdma->channel[0]); |
1263 | if (ret) | 1252 | if (ret) |
@@ -1266,16 +1255,16 @@ static int __init sdma_init(struct sdma_engine *sdma) | |||
1266 | sdma_config_ownership(&sdma->channel[0], false, true, false); | 1255 | sdma_config_ownership(&sdma->channel[0], false, true, false); |
1267 | 1256 | ||
1268 | /* Set Command Channel (Channel Zero) */ | 1257 | /* Set Command Channel (Channel Zero) */ |
1269 | __raw_writel(0x4050, sdma->regs + SDMA_CHN0ADDR); | 1258 | writel_relaxed(0x4050, sdma->regs + SDMA_CHN0ADDR); |
1270 | 1259 | ||
1271 | /* Set bits of CONFIG register but with static context switching */ | 1260 | /* Set bits of CONFIG register but with static context switching */ |
1272 | /* FIXME: Check whether to set ACR bit depending on clock ratios */ | 1261 | /* FIXME: Check whether to set ACR bit depending on clock ratios */ |
1273 | __raw_writel(0, sdma->regs + SDMA_H_CONFIG); | 1262 | writel_relaxed(0, sdma->regs + SDMA_H_CONFIG); |
1274 | 1263 | ||
1275 | __raw_writel(ccb_phys, sdma->regs + SDMA_H_C0PTR); | 1264 | writel_relaxed(ccb_phys, sdma->regs + SDMA_H_C0PTR); |
1276 | 1265 | ||
1277 | /* Set bits of CONFIG register with given context switching mode */ | 1266 | /* Set bits of CONFIG register with given context switching mode */ |
1278 | __raw_writel(SDMA_H_CONFIG_CSM, sdma->regs + SDMA_H_CONFIG); | 1267 | writel_relaxed(SDMA_H_CONFIG_CSM, sdma->regs + SDMA_H_CONFIG); |
1279 | 1268 | ||
1280 | /* Initializes channel's priorities */ | 1269 | /* Initializes channel's priorities */ |
1281 | sdma_set_channel_priority(&sdma->channel[0], 7); | 1270 | sdma_set_channel_priority(&sdma->channel[0], 7); |
@@ -1367,6 +1356,7 @@ static int __init sdma_probe(struct platform_device *pdev) | |||
1367 | spin_lock_init(&sdmac->lock); | 1356 | spin_lock_init(&sdmac->lock); |
1368 | 1357 | ||
1369 | sdmac->chan.device = &sdma->dma_device; | 1358 | sdmac->chan.device = &sdma->dma_device; |
1359 | dma_cookie_init(&sdmac->chan); | ||
1370 | sdmac->channel = i; | 1360 | sdmac->channel = i; |
1371 | 1361 | ||
1372 | /* | 1362 | /* |
@@ -1387,7 +1377,9 @@ static int __init sdma_probe(struct platform_device *pdev) | |||
1387 | sdma_add_scripts(sdma, pdata->script_addrs); | 1377 | sdma_add_scripts(sdma, pdata->script_addrs); |
1388 | 1378 | ||
1389 | if (pdata) { | 1379 | if (pdata) { |
1390 | sdma_get_firmware(sdma, pdata->fw_name); | 1380 | ret = sdma_get_firmware(sdma, pdata->fw_name); |
1381 | if (ret) | ||
1382 | dev_warn(&pdev->dev, "failed to get firmware from platform data\n"); | ||
1391 | } else { | 1383 | } else { |
1392 | /* | 1384 | /* |
1393 | * Because that device tree does not encode ROM script address, | 1385 | * Because that device tree does not encode ROM script address, |
@@ -1396,15 +1388,12 @@ static int __init sdma_probe(struct platform_device *pdev) | |||
1396 | */ | 1388 | */ |
1397 | ret = of_property_read_string(np, "fsl,sdma-ram-script-name", | 1389 | ret = of_property_read_string(np, "fsl,sdma-ram-script-name", |
1398 | &fw_name); | 1390 | &fw_name); |
1399 | if (ret) { | 1391 | if (ret) |
1400 | dev_err(&pdev->dev, "failed to get firmware name\n"); | 1392 | dev_warn(&pdev->dev, "failed to get firmware name\n"); |
1401 | goto err_init; | 1393 | else { |
1402 | } | 1394 | ret = sdma_get_firmware(sdma, fw_name); |
1403 | 1395 | if (ret) | |
1404 | ret = sdma_get_firmware(sdma, fw_name); | 1396 | dev_warn(&pdev->dev, "failed to get firmware from device tree\n"); |
1405 | if (ret) { | ||
1406 | dev_err(&pdev->dev, "failed to get firmware\n"); | ||
1407 | goto err_init; | ||
1408 | } | 1397 | } |
1409 | } | 1398 | } |
1410 | 1399 | ||