aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma/imx-sdma.c
diff options
context:
space:
mode:
authorRichard Zhao <richard.zhao@freescale.com>2012-05-11 03:14:27 -0400
committerVinod Koul <vinod.koul@linux.intel.com>2012-05-11 04:30:38 -0400
commit2ccaef0520d18d0072153f090d4110b4075c332c (patch)
treec4fcc9cbac1f7448263701ae5c5926f28351935c /drivers/dma/imx-sdma.c
parent922ee08baad2052d0759f100e026d49798c51fef (diff)
dma: imx-sdma: make channel0 operations atomic
device_prep_dma_cyclic may be call in audio trigger function which is atomic context, so we make it atomic too. - change channel0 lock to spinlock. - Use polling to wait for channel0 finish running. Signed-off-by: Richard Zhao <richard.zhao@freescale.com> Acked-by: Shawn Guo <shawn.guo@linaro.org> Signed-off-by: Vinod Koul <vinod.koul@linux.intel.com>
Diffstat (limited to 'drivers/dma/imx-sdma.c')
-rw-r--r--drivers/dma/imx-sdma.c57
1 files changed, 31 insertions, 26 deletions
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index cd0619a897ff..a472a29d8497 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -24,7 +24,7 @@
24#include <linux/mm.h> 24#include <linux/mm.h>
25#include <linux/interrupt.h> 25#include <linux/interrupt.h>
26#include <linux/clk.h> 26#include <linux/clk.h>
27#include <linux/wait.h> 27#include <linux/delay.h>
28#include <linux/sched.h> 28#include <linux/sched.h>
29#include <linux/semaphore.h> 29#include <linux/semaphore.h>
30#include <linux/spinlock.h> 30#include <linux/spinlock.h>
@@ -324,7 +324,7 @@ struct sdma_engine {
324 dma_addr_t context_phys; 324 dma_addr_t context_phys;
325 struct dma_device dma_device; 325 struct dma_device dma_device;
326 struct clk *clk; 326 struct clk *clk;
327 struct mutex channel_0_lock; 327 spinlock_t channel_0_lock;
328 struct sdma_script_start_addrs *script_addrs; 328 struct sdma_script_start_addrs *script_addrs;
329}; 329};
330 330
@@ -402,19 +402,27 @@ static void sdma_enable_channel(struct sdma_engine *sdma, int channel)
402} 402}
403 403
404/* 404/*
405 * sdma_run_channel - run a channel and wait till it's done 405 * sdma_run_channel0 - run a channel and wait till it's done
406 */ 406 */
407static int sdma_run_channel(struct sdma_channel *sdmac) 407static int sdma_run_channel0(struct sdma_engine *sdma)
408{ 408{
409 struct sdma_engine *sdma = sdmac->sdma;
410 int channel = sdmac->channel;
411 int ret; 409 int ret;
410 unsigned long timeout = 500;
412 411
413 init_completion(&sdmac->done); 412 sdma_enable_channel(sdma, 0);
414 413
415 sdma_enable_channel(sdma, channel); 414 while (!(ret = readl_relaxed(sdma->regs + SDMA_H_INTR) & 1)) {
415 if (timeout-- <= 0)
416 break;
417 udelay(1);
418 }
416 419
417 ret = wait_for_completion_timeout(&sdmac->done, HZ); 420 if (ret) {
421 /* Clear the interrupt status */
422 writel_relaxed(ret, sdma->regs + SDMA_H_INTR);
423 } else {
424 dev_err(sdma->dev, "Timeout waiting for CH0 ready\n");
425 }
418 426
419 return ret ? 0 : -ETIMEDOUT; 427 return ret ? 0 : -ETIMEDOUT;
420} 428}
@@ -426,17 +434,17 @@ static int sdma_load_script(struct sdma_engine *sdma, void *buf, int size,
426 void *buf_virt; 434 void *buf_virt;
427 dma_addr_t buf_phys; 435 dma_addr_t buf_phys;
428 int ret; 436 int ret;
429 437 unsigned long flags;
430 mutex_lock(&sdma->channel_0_lock);
431 438
432 buf_virt = dma_alloc_coherent(NULL, 439 buf_virt = dma_alloc_coherent(NULL,
433 size, 440 size,
434 &buf_phys, GFP_KERNEL); 441 &buf_phys, GFP_KERNEL);
435 if (!buf_virt) { 442 if (!buf_virt) {
436 ret = -ENOMEM; 443 return -ENOMEM;
437 goto err_out;
438 } 444 }
439 445
446 spin_lock_irqsave(&sdma->channel_0_lock, flags);
447
440 bd0->mode.command = C0_SETPM; 448 bd0->mode.command = C0_SETPM;
441 bd0->mode.status = BD_DONE | BD_INTR | BD_WRAP | BD_EXTD; 449 bd0->mode.status = BD_DONE | BD_INTR | BD_WRAP | BD_EXTD;
442 bd0->mode.count = size / 2; 450 bd0->mode.count = size / 2;
@@ -445,12 +453,11 @@ static int sdma_load_script(struct sdma_engine *sdma, void *buf, int size,
445 453
446 memcpy(buf_virt, buf, size); 454 memcpy(buf_virt, buf, size);
447 455
448 ret = sdma_run_channel(&sdma->channel[0]); 456 ret = sdma_run_channel0(sdma);
449 457
450 dma_free_coherent(NULL, size, buf_virt, buf_phys); 458 spin_unlock_irqrestore(&sdma->channel_0_lock, flags);
451 459
452err_out: 460 dma_free_coherent(NULL, size, buf_virt, buf_phys);
453 mutex_unlock(&sdma->channel_0_lock);
454 461
455 return ret; 462 return ret;
456} 463}
@@ -541,10 +548,6 @@ static void sdma_tasklet(unsigned long data)
541 548
542 complete(&sdmac->done); 549 complete(&sdmac->done);
543 550
544 /* not interested in channel 0 interrupts */
545 if (sdmac->channel == 0)
546 return;
547
548 if (sdmac->flags & IMX_DMA_SG_LOOP) 551 if (sdmac->flags & IMX_DMA_SG_LOOP)
549 sdma_handle_channel_loop(sdmac); 552 sdma_handle_channel_loop(sdmac);
550 else 553 else
@@ -557,6 +560,8 @@ static irqreturn_t sdma_int_handler(int irq, void *dev_id)
557 unsigned long stat; 560 unsigned long stat;
558 561
559 stat = readl_relaxed(sdma->regs + SDMA_H_INTR); 562 stat = readl_relaxed(sdma->regs + SDMA_H_INTR);
563 /* not interested in channel 0 interrupts */
564 stat &= ~1;
560 writel_relaxed(stat, sdma->regs + SDMA_H_INTR); 565 writel_relaxed(stat, sdma->regs + SDMA_H_INTR);
561 566
562 while (stat) { 567 while (stat) {
@@ -662,6 +667,7 @@ static int sdma_load_context(struct sdma_channel *sdmac)
662 struct sdma_context_data *context = sdma->context; 667 struct sdma_context_data *context = sdma->context;
663 struct sdma_buffer_descriptor *bd0 = sdma->channel[0].bd; 668 struct sdma_buffer_descriptor *bd0 = sdma->channel[0].bd;
664 int ret; 669 int ret;
670 unsigned long flags;
665 671
666 if (sdmac->direction == DMA_DEV_TO_MEM) { 672 if (sdmac->direction == DMA_DEV_TO_MEM) {
667 load_address = sdmac->pc_from_device; 673 load_address = sdmac->pc_from_device;
@@ -679,7 +685,7 @@ static int sdma_load_context(struct sdma_channel *sdmac)
679 dev_dbg(sdma->dev, "event_mask0 = 0x%08x\n", (u32)sdmac->event_mask[0]); 685 dev_dbg(sdma->dev, "event_mask0 = 0x%08x\n", (u32)sdmac->event_mask[0]);
680 dev_dbg(sdma->dev, "event_mask1 = 0x%08x\n", (u32)sdmac->event_mask[1]); 686 dev_dbg(sdma->dev, "event_mask1 = 0x%08x\n", (u32)sdmac->event_mask[1]);
681 687
682 mutex_lock(&sdma->channel_0_lock); 688 spin_lock_irqsave(&sdma->channel_0_lock, flags);
683 689
684 memset(context, 0, sizeof(*context)); 690 memset(context, 0, sizeof(*context));
685 context->channel_state.pc = load_address; 691 context->channel_state.pc = load_address;
@@ -698,10 +704,9 @@ static int sdma_load_context(struct sdma_channel *sdmac)
698 bd0->mode.count = sizeof(*context) / 4; 704 bd0->mode.count = sizeof(*context) / 4;
699 bd0->buffer_addr = sdma->context_phys; 705 bd0->buffer_addr = sdma->context_phys;
700 bd0->ext_buffer_addr = 2048 + (sizeof(*context) / 4) * channel; 706 bd0->ext_buffer_addr = 2048 + (sizeof(*context) / 4) * channel;
707 ret = sdma_run_channel0(sdma);
701 708
702 ret = sdma_run_channel(&sdma->channel[0]); 709 spin_unlock_irqrestore(&sdma->channel_0_lock, flags);
703
704 mutex_unlock(&sdma->channel_0_lock);
705 710
706 return ret; 711 return ret;
707} 712}
@@ -1300,7 +1305,7 @@ static int __init sdma_probe(struct platform_device *pdev)
1300 if (!sdma) 1305 if (!sdma)
1301 return -ENOMEM; 1306 return -ENOMEM;
1302 1307
1303 mutex_init(&sdma->channel_0_lock); 1308 spin_lock_init(&sdma->channel_0_lock);
1304 1309
1305 sdma->dev = &pdev->dev; 1310 sdma->dev = &pdev->dev;
1306 1311