diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-05-25 12:31:59 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-05-25 12:31:59 -0400 |
commit | d5adf235adc8d8d67c10afd43922c92753f6be3c (patch) | |
tree | 18c3cdcbc9a50a8cd00b03d83ec76bad7c7594f8 /drivers/dma/imx-sdma.c | |
parent | d484864dd96e1830e7689510597707c1df8cd681 (diff) | |
parent | 1dd1ea8eb46a71201943148cc0ed3182cd04e288 (diff) |
Merge branch 'next' of git://git.infradead.org/users/vkoul/slave-dma
Pull slave-dmaengine updates from Vinod Koul:
"Nothing exciting this time, odd fixes in a bunch of drivers"
* 'next' of git://git.infradead.org/users/vkoul/slave-dma:
dmaengine: at_hdmac: take maxburst from slave configuration
dmaengine: at_hdmac: remove ATC_DEFAULT_CTRLA constant
dmaengine: at_hdmac: remove some at_dma_slave comments
dma: imx-sdma: make channel0 operations atomic
dmaengine: Fixup dmaengine_prep_slave_single() to be actually useful
dmaengine: Use dma_sg_len(sg) instead of sg->length
dmaengine: Use sg_dma_address instead of sg_phys
DMA: PL330: Remove duplicate header file inclusion
dma: imx-sdma: keep the callbacks invoked in the tasklet
dmaengine: dw_dma: add Device Tree probing capability
dmaengine: dw_dmac: Add clk_{un}prepare() support
dma/amba-pl08x: add support for the Nomadik variant
dma/amba-pl08x: check for terminal count status only
Diffstat (limited to 'drivers/dma/imx-sdma.c')
-rw-r--r-- | drivers/dma/imx-sdma.c | 68 |
1 files changed, 39 insertions, 29 deletions
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c index d3e38e28bb6b..a472a29d8497 100644 --- a/drivers/dma/imx-sdma.c +++ b/drivers/dma/imx-sdma.c | |||
@@ -24,7 +24,7 @@ | |||
24 | #include <linux/mm.h> | 24 | #include <linux/mm.h> |
25 | #include <linux/interrupt.h> | 25 | #include <linux/interrupt.h> |
26 | #include <linux/clk.h> | 26 | #include <linux/clk.h> |
27 | #include <linux/wait.h> | 27 | #include <linux/delay.h> |
28 | #include <linux/sched.h> | 28 | #include <linux/sched.h> |
29 | #include <linux/semaphore.h> | 29 | #include <linux/semaphore.h> |
30 | #include <linux/spinlock.h> | 30 | #include <linux/spinlock.h> |
@@ -271,6 +271,7 @@ struct sdma_channel { | |||
271 | enum dma_status status; | 271 | enum dma_status status; |
272 | unsigned int chn_count; | 272 | unsigned int chn_count; |
273 | unsigned int chn_real_count; | 273 | unsigned int chn_real_count; |
274 | struct tasklet_struct tasklet; | ||
274 | }; | 275 | }; |
275 | 276 | ||
276 | #define IMX_DMA_SG_LOOP BIT(0) | 277 | #define IMX_DMA_SG_LOOP BIT(0) |
@@ -323,7 +324,7 @@ struct sdma_engine { | |||
323 | dma_addr_t context_phys; | 324 | dma_addr_t context_phys; |
324 | struct dma_device dma_device; | 325 | struct dma_device dma_device; |
325 | struct clk *clk; | 326 | struct clk *clk; |
326 | struct mutex channel_0_lock; | 327 | spinlock_t channel_0_lock; |
327 | struct sdma_script_start_addrs *script_addrs; | 328 | struct sdma_script_start_addrs *script_addrs; |
328 | }; | 329 | }; |
329 | 330 | ||
@@ -401,19 +402,27 @@ static void sdma_enable_channel(struct sdma_engine *sdma, int channel) | |||
401 | } | 402 | } |
402 | 403 | ||
403 | /* | 404 | /* |
404 | * sdma_run_channel - run a channel and wait till it's done | 405 | * sdma_run_channel0 - run a channel and wait till it's done |
405 | */ | 406 | */ |
406 | static int sdma_run_channel(struct sdma_channel *sdmac) | 407 | static int sdma_run_channel0(struct sdma_engine *sdma) |
407 | { | 408 | { |
408 | struct sdma_engine *sdma = sdmac->sdma; | ||
409 | int channel = sdmac->channel; | ||
410 | int ret; | 409 | int ret; |
410 | unsigned long timeout = 500; | ||
411 | 411 | ||
412 | init_completion(&sdmac->done); | 412 | sdma_enable_channel(sdma, 0); |
413 | 413 | ||
414 | sdma_enable_channel(sdma, channel); | 414 | while (!(ret = readl_relaxed(sdma->regs + SDMA_H_INTR) & 1)) { |
415 | if (timeout-- <= 0) | ||
416 | break; | ||
417 | udelay(1); | ||
418 | } | ||
415 | 419 | ||
416 | ret = wait_for_completion_timeout(&sdmac->done, HZ); | 420 | if (ret) { |
421 | /* Clear the interrupt status */ | ||
422 | writel_relaxed(ret, sdma->regs + SDMA_H_INTR); | ||
423 | } else { | ||
424 | dev_err(sdma->dev, "Timeout waiting for CH0 ready\n"); | ||
425 | } | ||
417 | 426 | ||
418 | return ret ? 0 : -ETIMEDOUT; | 427 | return ret ? 0 : -ETIMEDOUT; |
419 | } | 428 | } |
@@ -425,17 +434,17 @@ static int sdma_load_script(struct sdma_engine *sdma, void *buf, int size, | |||
425 | void *buf_virt; | 434 | void *buf_virt; |
426 | dma_addr_t buf_phys; | 435 | dma_addr_t buf_phys; |
427 | int ret; | 436 | int ret; |
428 | 437 | unsigned long flags; | |
429 | mutex_lock(&sdma->channel_0_lock); | ||
430 | 438 | ||
431 | buf_virt = dma_alloc_coherent(NULL, | 439 | buf_virt = dma_alloc_coherent(NULL, |
432 | size, | 440 | size, |
433 | &buf_phys, GFP_KERNEL); | 441 | &buf_phys, GFP_KERNEL); |
434 | if (!buf_virt) { | 442 | if (!buf_virt) { |
435 | ret = -ENOMEM; | 443 | return -ENOMEM; |
436 | goto err_out; | ||
437 | } | 444 | } |
438 | 445 | ||
446 | spin_lock_irqsave(&sdma->channel_0_lock, flags); | ||
447 | |||
439 | bd0->mode.command = C0_SETPM; | 448 | bd0->mode.command = C0_SETPM; |
440 | bd0->mode.status = BD_DONE | BD_INTR | BD_WRAP | BD_EXTD; | 449 | bd0->mode.status = BD_DONE | BD_INTR | BD_WRAP | BD_EXTD; |
441 | bd0->mode.count = size / 2; | 450 | bd0->mode.count = size / 2; |
@@ -444,12 +453,11 @@ static int sdma_load_script(struct sdma_engine *sdma, void *buf, int size, | |||
444 | 453 | ||
445 | memcpy(buf_virt, buf, size); | 454 | memcpy(buf_virt, buf, size); |
446 | 455 | ||
447 | ret = sdma_run_channel(&sdma->channel[0]); | 456 | ret = sdma_run_channel0(sdma); |
448 | 457 | ||
449 | dma_free_coherent(NULL, size, buf_virt, buf_phys); | 458 | spin_unlock_irqrestore(&sdma->channel_0_lock, flags); |
450 | 459 | ||
451 | err_out: | 460 | dma_free_coherent(NULL, size, buf_virt, buf_phys); |
452 | mutex_unlock(&sdma->channel_0_lock); | ||
453 | 461 | ||
454 | return ret; | 462 | return ret; |
455 | } | 463 | } |
@@ -534,13 +542,11 @@ static void mxc_sdma_handle_channel_normal(struct sdma_channel *sdmac) | |||
534 | sdmac->desc.callback(sdmac->desc.callback_param); | 542 | sdmac->desc.callback(sdmac->desc.callback_param); |
535 | } | 543 | } |
536 | 544 | ||
537 | static void mxc_sdma_handle_channel(struct sdma_channel *sdmac) | 545 | static void sdma_tasklet(unsigned long data) |
538 | { | 546 | { |
539 | complete(&sdmac->done); | 547 | struct sdma_channel *sdmac = (struct sdma_channel *) data; |
540 | 548 | ||
541 | /* not interested in channel 0 interrupts */ | 549 | complete(&sdmac->done); |
542 | if (sdmac->channel == 0) | ||
543 | return; | ||
544 | 550 | ||
545 | if (sdmac->flags & IMX_DMA_SG_LOOP) | 551 | if (sdmac->flags & IMX_DMA_SG_LOOP) |
546 | sdma_handle_channel_loop(sdmac); | 552 | sdma_handle_channel_loop(sdmac); |
@@ -554,13 +560,15 @@ static irqreturn_t sdma_int_handler(int irq, void *dev_id) | |||
554 | unsigned long stat; | 560 | unsigned long stat; |
555 | 561 | ||
556 | stat = readl_relaxed(sdma->regs + SDMA_H_INTR); | 562 | stat = readl_relaxed(sdma->regs + SDMA_H_INTR); |
563 | /* not interested in channel 0 interrupts */ | ||
564 | stat &= ~1; | ||
557 | writel_relaxed(stat, sdma->regs + SDMA_H_INTR); | 565 | writel_relaxed(stat, sdma->regs + SDMA_H_INTR); |
558 | 566 | ||
559 | while (stat) { | 567 | while (stat) { |
560 | int channel = fls(stat) - 1; | 568 | int channel = fls(stat) - 1; |
561 | struct sdma_channel *sdmac = &sdma->channel[channel]; | 569 | struct sdma_channel *sdmac = &sdma->channel[channel]; |
562 | 570 | ||
563 | mxc_sdma_handle_channel(sdmac); | 571 | tasklet_schedule(&sdmac->tasklet); |
564 | 572 | ||
565 | __clear_bit(channel, &stat); | 573 | __clear_bit(channel, &stat); |
566 | } | 574 | } |
@@ -659,6 +667,7 @@ static int sdma_load_context(struct sdma_channel *sdmac) | |||
659 | struct sdma_context_data *context = sdma->context; | 667 | struct sdma_context_data *context = sdma->context; |
660 | struct sdma_buffer_descriptor *bd0 = sdma->channel[0].bd; | 668 | struct sdma_buffer_descriptor *bd0 = sdma->channel[0].bd; |
661 | int ret; | 669 | int ret; |
670 | unsigned long flags; | ||
662 | 671 | ||
663 | if (sdmac->direction == DMA_DEV_TO_MEM) { | 672 | if (sdmac->direction == DMA_DEV_TO_MEM) { |
664 | load_address = sdmac->pc_from_device; | 673 | load_address = sdmac->pc_from_device; |
@@ -676,7 +685,7 @@ static int sdma_load_context(struct sdma_channel *sdmac) | |||
676 | dev_dbg(sdma->dev, "event_mask0 = 0x%08x\n", (u32)sdmac->event_mask[0]); | 685 | dev_dbg(sdma->dev, "event_mask0 = 0x%08x\n", (u32)sdmac->event_mask[0]); |
677 | dev_dbg(sdma->dev, "event_mask1 = 0x%08x\n", (u32)sdmac->event_mask[1]); | 686 | dev_dbg(sdma->dev, "event_mask1 = 0x%08x\n", (u32)sdmac->event_mask[1]); |
678 | 687 | ||
679 | mutex_lock(&sdma->channel_0_lock); | 688 | spin_lock_irqsave(&sdma->channel_0_lock, flags); |
680 | 689 | ||
681 | memset(context, 0, sizeof(*context)); | 690 | memset(context, 0, sizeof(*context)); |
682 | context->channel_state.pc = load_address; | 691 | context->channel_state.pc = load_address; |
@@ -695,10 +704,9 @@ static int sdma_load_context(struct sdma_channel *sdmac) | |||
695 | bd0->mode.count = sizeof(*context) / 4; | 704 | bd0->mode.count = sizeof(*context) / 4; |
696 | bd0->buffer_addr = sdma->context_phys; | 705 | bd0->buffer_addr = sdma->context_phys; |
697 | bd0->ext_buffer_addr = 2048 + (sizeof(*context) / 4) * channel; | 706 | bd0->ext_buffer_addr = 2048 + (sizeof(*context) / 4) * channel; |
707 | ret = sdma_run_channel0(sdma); | ||
698 | 708 | ||
699 | ret = sdma_run_channel(&sdma->channel[0]); | 709 | spin_unlock_irqrestore(&sdma->channel_0_lock, flags); |
700 | |||
701 | mutex_unlock(&sdma->channel_0_lock); | ||
702 | 710 | ||
703 | return ret; | 711 | return ret; |
704 | } | 712 | } |
@@ -938,7 +946,7 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg( | |||
938 | 946 | ||
939 | bd->buffer_addr = sg->dma_address; | 947 | bd->buffer_addr = sg->dma_address; |
940 | 948 | ||
941 | count = sg->length; | 949 | count = sg_dma_len(sg); |
942 | 950 | ||
943 | if (count > 0xffff) { | 951 | if (count > 0xffff) { |
944 | dev_err(sdma->dev, "SDMA channel %d: maximum bytes for sg entry exceeded: %d > %d\n", | 952 | dev_err(sdma->dev, "SDMA channel %d: maximum bytes for sg entry exceeded: %d > %d\n", |
@@ -1297,7 +1305,7 @@ static int __init sdma_probe(struct platform_device *pdev) | |||
1297 | if (!sdma) | 1305 | if (!sdma) |
1298 | return -ENOMEM; | 1306 | return -ENOMEM; |
1299 | 1307 | ||
1300 | mutex_init(&sdma->channel_0_lock); | 1308 | spin_lock_init(&sdma->channel_0_lock); |
1301 | 1309 | ||
1302 | sdma->dev = &pdev->dev; | 1310 | sdma->dev = &pdev->dev; |
1303 | 1311 | ||
@@ -1359,6 +1367,8 @@ static int __init sdma_probe(struct platform_device *pdev) | |||
1359 | dma_cookie_init(&sdmac->chan); | 1367 | dma_cookie_init(&sdmac->chan); |
1360 | sdmac->channel = i; | 1368 | sdmac->channel = i; |
1361 | 1369 | ||
1370 | tasklet_init(&sdmac->tasklet, sdma_tasklet, | ||
1371 | (unsigned long) sdmac); | ||
1362 | /* | 1372 | /* |
1363 | * Add the channel to the DMAC list. Do not add channel 0 though | 1373 | * Add the channel to the DMAC list. Do not add channel 0 though |
1364 | * because we need it internally in the SDMA driver. This also means | 1374 | * because we need it internally in the SDMA driver. This also means |