diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-01-17 21:40:24 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-01-17 21:40:24 -0500 |
commit | 57f2685c16fa8e0cb86e4bc7c8ac33bfed943819 (patch) | |
tree | 96a42fe632687c8486c250c4805bf1d4c9c34d19 /drivers/dma/mxs-dma.c | |
parent | 488a9d018256dc9f29e041c0360445b6d25eea9a (diff) | |
parent | e08b881a69d638175bfa99b5af4d72b731633ea7 (diff) |
Merge branch 'next' of git://git.infradead.org/users/vkoul/slave-dma
* 'next' of git://git.infradead.org/users/vkoul/slave-dma: (53 commits)
ARM: mach-shmobile: specify CHCLR registers on SH7372
dma: shdma: fix runtime PM: clear channel buffers on reset
dma/imx-sdma: save irq flags when use spin_lock in sdma_tx_submit
dmaengine/ste_dma40: clear LNK on channel startup
dmaengine: intel_mid_dma: remove legacy pm interface
ASoC: mxs: correct 'direction' of device_prep_dma_cyclic
dmaengine: intel_mid_dma: error path fix
dmaengine: intel_mid_dma: locking and freeing fixes
mtd: gpmi-nand: move to dma_transfer_direction
mtd: fix compile error for gpmi-nand
mmc: mxs-mmc: fix the dma_transfer_direction migration
dmaengine: add DMA_TRANS_NONE to dma_transfer_direction
dma: mxs-dma: Don't use CLKGATE bits in CTRL0 to disable DMA channels
dma: mxs-dma: make mxs_dma_prep_slave_sg() multi user safe
dma: mxs-dma: Always leave mxs_dma_init() with the clock disabled.
dma: mxs-dma: fix a typo in comment
DMA: PL330: Remove pm_runtime_xxx calls from pl330 probe/remove
video i.MX IPU: Fix display connections
i.MX IPU DMA: Fix wrong burstsize settings
dmaengine/ste_dma40: allow fixed physical channel
...
Fix up conflicts in drivers/dma/{Kconfig,mxs-dma.c,pl330.c}
The conflicts looked pretty trivial, but I'll ask people to verify them.
Diffstat (limited to 'drivers/dma/mxs-dma.c')
-rw-r--r-- | drivers/dma/mxs-dma.c | 53 |
1 files changed, 13 insertions, 40 deletions
diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c index fc903c0ed234..b06cd4ca626f 100644 --- a/drivers/dma/mxs-dma.c +++ b/drivers/dma/mxs-dma.c | |||
@@ -44,7 +44,6 @@ | |||
44 | #define HW_APBHX_CTRL0 0x000 | 44 | #define HW_APBHX_CTRL0 0x000 |
45 | #define BM_APBH_CTRL0_APB_BURST8_EN (1 << 29) | 45 | #define BM_APBH_CTRL0_APB_BURST8_EN (1 << 29) |
46 | #define BM_APBH_CTRL0_APB_BURST_EN (1 << 28) | 46 | #define BM_APBH_CTRL0_APB_BURST_EN (1 << 28) |
47 | #define BP_APBH_CTRL0_CLKGATE_CHANNEL 8 | ||
48 | #define BP_APBH_CTRL0_RESET_CHANNEL 16 | 47 | #define BP_APBH_CTRL0_RESET_CHANNEL 16 |
49 | #define HW_APBHX_CTRL1 0x010 | 48 | #define HW_APBHX_CTRL1 0x010 |
50 | #define HW_APBHX_CTRL2 0x020 | 49 | #define HW_APBHX_CTRL2 0x020 |
@@ -111,6 +110,7 @@ struct mxs_dma_chan { | |||
111 | int chan_irq; | 110 | int chan_irq; |
112 | struct mxs_dma_ccw *ccw; | 111 | struct mxs_dma_ccw *ccw; |
113 | dma_addr_t ccw_phys; | 112 | dma_addr_t ccw_phys; |
113 | int desc_count; | ||
114 | dma_cookie_t last_completed; | 114 | dma_cookie_t last_completed; |
115 | enum dma_status status; | 115 | enum dma_status status; |
116 | unsigned int flags; | 116 | unsigned int flags; |
@@ -130,23 +130,6 @@ struct mxs_dma_engine { | |||
130 | struct mxs_dma_chan mxs_chans[MXS_DMA_CHANNELS]; | 130 | struct mxs_dma_chan mxs_chans[MXS_DMA_CHANNELS]; |
131 | }; | 131 | }; |
132 | 132 | ||
133 | static inline void mxs_dma_clkgate(struct mxs_dma_chan *mxs_chan, int enable) | ||
134 | { | ||
135 | struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; | ||
136 | int chan_id = mxs_chan->chan.chan_id; | ||
137 | int set_clr = enable ? MXS_CLR_ADDR : MXS_SET_ADDR; | ||
138 | |||
139 | /* enable apbh channel clock */ | ||
140 | if (dma_is_apbh()) { | ||
141 | if (apbh_is_old()) | ||
142 | writel(1 << (chan_id + BP_APBH_CTRL0_CLKGATE_CHANNEL), | ||
143 | mxs_dma->base + HW_APBHX_CTRL0 + set_clr); | ||
144 | else | ||
145 | writel(1 << chan_id, | ||
146 | mxs_dma->base + HW_APBHX_CTRL0 + set_clr); | ||
147 | } | ||
148 | } | ||
149 | |||
150 | static void mxs_dma_reset_chan(struct mxs_dma_chan *mxs_chan) | 133 | static void mxs_dma_reset_chan(struct mxs_dma_chan *mxs_chan) |
151 | { | 134 | { |
152 | struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; | 135 | struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; |
@@ -165,9 +148,6 @@ static void mxs_dma_enable_chan(struct mxs_dma_chan *mxs_chan) | |||
165 | struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; | 148 | struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; |
166 | int chan_id = mxs_chan->chan.chan_id; | 149 | int chan_id = mxs_chan->chan.chan_id; |
167 | 150 | ||
168 | /* clkgate needs to be enabled before writing other registers */ | ||
169 | mxs_dma_clkgate(mxs_chan, 1); | ||
170 | |||
171 | /* set cmd_addr up */ | 151 | /* set cmd_addr up */ |
172 | writel(mxs_chan->ccw_phys, | 152 | writel(mxs_chan->ccw_phys, |
173 | mxs_dma->base + HW_APBHX_CHn_NXTCMDAR(chan_id)); | 153 | mxs_dma->base + HW_APBHX_CHn_NXTCMDAR(chan_id)); |
@@ -178,9 +158,6 @@ static void mxs_dma_enable_chan(struct mxs_dma_chan *mxs_chan) | |||
178 | 158 | ||
179 | static void mxs_dma_disable_chan(struct mxs_dma_chan *mxs_chan) | 159 | static void mxs_dma_disable_chan(struct mxs_dma_chan *mxs_chan) |
180 | { | 160 | { |
181 | /* disable apbh channel clock */ | ||
182 | mxs_dma_clkgate(mxs_chan, 0); | ||
183 | |||
184 | mxs_chan->status = DMA_SUCCESS; | 161 | mxs_chan->status = DMA_SUCCESS; |
185 | } | 162 | } |
186 | 163 | ||
@@ -268,7 +245,7 @@ static irqreturn_t mxs_dma_int_handler(int irq, void *dev_id) | |||
268 | /* | 245 | /* |
269 | * When both completion and error of termination bits set at the | 246 | * When both completion and error of termination bits set at the |
270 | * same time, we do not take it as an error. IOW, it only becomes | 247 | * same time, we do not take it as an error. IOW, it only becomes |
271 | * an error we need to handler here in case of ether it's (1) an bus | 248 | * an error we need to handle here in case of either it's (1) a bus |
272 | * error or (2) a termination error with no completion. | 249 | * error or (2) a termination error with no completion. |
273 | */ | 250 | */ |
274 | stat2 = ((stat2 >> MXS_DMA_CHANNELS) & stat2) | /* (1) */ | 251 | stat2 = ((stat2 >> MXS_DMA_CHANNELS) & stat2) | /* (1) */ |
@@ -338,10 +315,7 @@ static int mxs_dma_alloc_chan_resources(struct dma_chan *chan) | |||
338 | if (ret) | 315 | if (ret) |
339 | goto err_clk; | 316 | goto err_clk; |
340 | 317 | ||
341 | /* clkgate needs to be enabled for reset to finish */ | ||
342 | mxs_dma_clkgate(mxs_chan, 1); | ||
343 | mxs_dma_reset_chan(mxs_chan); | 318 | mxs_dma_reset_chan(mxs_chan); |
344 | mxs_dma_clkgate(mxs_chan, 0); | ||
345 | 319 | ||
346 | dma_async_tx_descriptor_init(&mxs_chan->desc, chan); | 320 | dma_async_tx_descriptor_init(&mxs_chan->desc, chan); |
347 | mxs_chan->desc.tx_submit = mxs_dma_tx_submit; | 321 | mxs_chan->desc.tx_submit = mxs_dma_tx_submit; |
@@ -377,7 +351,7 @@ static void mxs_dma_free_chan_resources(struct dma_chan *chan) | |||
377 | 351 | ||
378 | static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg( | 352 | static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg( |
379 | struct dma_chan *chan, struct scatterlist *sgl, | 353 | struct dma_chan *chan, struct scatterlist *sgl, |
380 | unsigned int sg_len, enum dma_data_direction direction, | 354 | unsigned int sg_len, enum dma_transfer_direction direction, |
381 | unsigned long append) | 355 | unsigned long append) |
382 | { | 356 | { |
383 | struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); | 357 | struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); |
@@ -386,7 +360,7 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg( | |||
386 | struct scatterlist *sg; | 360 | struct scatterlist *sg; |
387 | int i, j; | 361 | int i, j; |
388 | u32 *pio; | 362 | u32 *pio; |
389 | static int idx; | 363 | int idx = append ? mxs_chan->desc_count : 0; |
390 | 364 | ||
391 | if (mxs_chan->status == DMA_IN_PROGRESS && !append) | 365 | if (mxs_chan->status == DMA_IN_PROGRESS && !append) |
392 | return NULL; | 366 | return NULL; |
@@ -417,7 +391,7 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg( | |||
417 | idx = 0; | 391 | idx = 0; |
418 | } | 392 | } |
419 | 393 | ||
420 | if (direction == DMA_NONE) { | 394 | if (direction == DMA_TRANS_NONE) { |
421 | ccw = &mxs_chan->ccw[idx++]; | 395 | ccw = &mxs_chan->ccw[idx++]; |
422 | pio = (u32 *) sgl; | 396 | pio = (u32 *) sgl; |
423 | 397 | ||
@@ -450,7 +424,7 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg( | |||
450 | ccw->bits |= CCW_CHAIN; | 424 | ccw->bits |= CCW_CHAIN; |
451 | ccw->bits |= CCW_HALT_ON_TERM; | 425 | ccw->bits |= CCW_HALT_ON_TERM; |
452 | ccw->bits |= CCW_TERM_FLUSH; | 426 | ccw->bits |= CCW_TERM_FLUSH; |
453 | ccw->bits |= BF_CCW(direction == DMA_FROM_DEVICE ? | 427 | ccw->bits |= BF_CCW(direction == DMA_DEV_TO_MEM ? |
454 | MXS_DMA_CMD_WRITE : MXS_DMA_CMD_READ, | 428 | MXS_DMA_CMD_WRITE : MXS_DMA_CMD_READ, |
455 | COMMAND); | 429 | COMMAND); |
456 | 430 | ||
@@ -462,6 +436,7 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg( | |||
462 | } | 436 | } |
463 | } | 437 | } |
464 | } | 438 | } |
439 | mxs_chan->desc_count = idx; | ||
465 | 440 | ||
466 | return &mxs_chan->desc; | 441 | return &mxs_chan->desc; |
467 | 442 | ||
@@ -472,7 +447,7 @@ err_out: | |||
472 | 447 | ||
473 | static struct dma_async_tx_descriptor *mxs_dma_prep_dma_cyclic( | 448 | static struct dma_async_tx_descriptor *mxs_dma_prep_dma_cyclic( |
474 | struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len, | 449 | struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len, |
475 | size_t period_len, enum dma_data_direction direction) | 450 | size_t period_len, enum dma_transfer_direction direction) |
476 | { | 451 | { |
477 | struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); | 452 | struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); |
478 | struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; | 453 | struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; |
@@ -515,7 +490,7 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_dma_cyclic( | |||
515 | ccw->bits |= CCW_IRQ; | 490 | ccw->bits |= CCW_IRQ; |
516 | ccw->bits |= CCW_HALT_ON_TERM; | 491 | ccw->bits |= CCW_HALT_ON_TERM; |
517 | ccw->bits |= CCW_TERM_FLUSH; | 492 | ccw->bits |= CCW_TERM_FLUSH; |
518 | ccw->bits |= BF_CCW(direction == DMA_FROM_DEVICE ? | 493 | ccw->bits |= BF_CCW(direction == DMA_DEV_TO_MEM ? |
519 | MXS_DMA_CMD_WRITE : MXS_DMA_CMD_READ, COMMAND); | 494 | MXS_DMA_CMD_WRITE : MXS_DMA_CMD_READ, COMMAND); |
520 | 495 | ||
521 | dma_addr += period_len; | 496 | dma_addr += period_len; |
@@ -523,6 +498,7 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_dma_cyclic( | |||
523 | 498 | ||
524 | i++; | 499 | i++; |
525 | } | 500 | } |
501 | mxs_chan->desc_count = i; | ||
526 | 502 | ||
527 | return &mxs_chan->desc; | 503 | return &mxs_chan->desc; |
528 | 504 | ||
@@ -539,8 +515,8 @@ static int mxs_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | |||
539 | 515 | ||
540 | switch (cmd) { | 516 | switch (cmd) { |
541 | case DMA_TERMINATE_ALL: | 517 | case DMA_TERMINATE_ALL: |
542 | mxs_dma_disable_chan(mxs_chan); | ||
543 | mxs_dma_reset_chan(mxs_chan); | 518 | mxs_dma_reset_chan(mxs_chan); |
519 | mxs_dma_disable_chan(mxs_chan); | ||
544 | break; | 520 | break; |
545 | case DMA_PAUSE: | 521 | case DMA_PAUSE: |
546 | mxs_dma_pause_chan(mxs_chan); | 522 | mxs_dma_pause_chan(mxs_chan); |
@@ -580,7 +556,7 @@ static int __init mxs_dma_init(struct mxs_dma_engine *mxs_dma) | |||
580 | 556 | ||
581 | ret = clk_prepare_enable(mxs_dma->clk); | 557 | ret = clk_prepare_enable(mxs_dma->clk); |
582 | if (ret) | 558 | if (ret) |
583 | goto err_out; | 559 | return ret; |
584 | 560 | ||
585 | ret = mxs_reset_block(mxs_dma->base); | 561 | ret = mxs_reset_block(mxs_dma->base); |
586 | if (ret) | 562 | if (ret) |
@@ -604,11 +580,8 @@ static int __init mxs_dma_init(struct mxs_dma_engine *mxs_dma) | |||
604 | writel(MXS_DMA_CHANNELS_MASK << MXS_DMA_CHANNELS, | 580 | writel(MXS_DMA_CHANNELS_MASK << MXS_DMA_CHANNELS, |
605 | mxs_dma->base + HW_APBHX_CTRL1 + MXS_SET_ADDR); | 581 | mxs_dma->base + HW_APBHX_CTRL1 + MXS_SET_ADDR); |
606 | 582 | ||
607 | clk_disable_unprepare(mxs_dma->clk); | ||
608 | |||
609 | return 0; | ||
610 | |||
611 | err_out: | 583 | err_out: |
584 | clk_disable_unprepare(mxs_dma->clk); | ||
612 | return ret; | 585 | return ret; |
613 | } | 586 | } |
614 | 587 | ||