aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma/imx-sdma.c
diff options
context:
space:
mode:
authorRichard Zhao <richard.zhao@linaro.org>2012-01-12 22:10:01 -0500
committerVinod Koul <vinod.koul@linux.intel.com>2012-01-30 22:33:16 -0500
commit0bbc1413028e93629a2ecd5588cc427fa834404b (patch)
treedd9744524a814b549cd207dd9a10e02f04d2ee32 /drivers/dma/imx-sdma.c
parentb78bd91f47b28ba1290a7eb95d8cf48a357e1b90 (diff)
dma/imx-sdma: convernt to use bit ops
We don't need extra lock, so we use non-atomic bit ops to set/clear bits, merge event_mask0 and event_mask1 into an array, it helps use bit ops. It also fixs the issue: sdmac->event_mask0 = 1 << sdmac->event_id0; sdmac->event_mask1 = 1 << (sdmac->event_id0 - 32); It event_id0 < 32, it shifts negative number. If event_id0 >= 32, it shifts number >= sizeof(int). Both the cases behavior is undefined. Signed-off-by: Richard Zhao <richard.zhao@linaro.org> Signed-off-by: Vinod Koul <vinod.koul@linux.intel.com>
Diffstat (limited to 'drivers/dma/imx-sdma.c')
-rw-r--r--drivers/dma/imx-sdma.c68
1 files changed, 34 insertions, 34 deletions
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index 5eb96b53e6da..f380e79fd4d1 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -20,6 +20,7 @@
20#include <linux/init.h> 20#include <linux/init.h>
21#include <linux/module.h> 21#include <linux/module.h>
22#include <linux/types.h> 22#include <linux/types.h>
23#include <linux/bitops.h>
23#include <linux/mm.h> 24#include <linux/mm.h>
24#include <linux/interrupt.h> 25#include <linux/interrupt.h>
25#include <linux/clk.h> 26#include <linux/clk.h>
@@ -260,8 +261,8 @@ struct sdma_channel {
260 unsigned int pc_from_device, pc_to_device; 261 unsigned int pc_from_device, pc_to_device;
261 unsigned long flags; 262 unsigned long flags;
262 dma_addr_t per_address; 263 dma_addr_t per_address;
263 u32 event_mask0, event_mask1; 264 unsigned long event_mask[2];
264 u32 watermark_level; 265 unsigned long watermark_level;
265 u32 shp_addr, per_addr; 266 u32 shp_addr, per_addr;
266 struct dma_chan chan; 267 struct dma_chan chan;
267 spinlock_t lock; 268 spinlock_t lock;
@@ -272,7 +273,7 @@ struct sdma_channel {
272 unsigned int chn_real_count; 273 unsigned int chn_real_count;
273}; 274};
274 275
275#define IMX_DMA_SG_LOOP (1 << 0) 276#define IMX_DMA_SG_LOOP BIT(0)
276 277
277#define MAX_DMA_CHANNELS 32 278#define MAX_DMA_CHANNELS 32
278#define MXC_SDMA_DEFAULT_PRIORITY 1 279#define MXC_SDMA_DEFAULT_PRIORITY 1
@@ -346,9 +347,9 @@ static const struct of_device_id sdma_dt_ids[] = {
346}; 347};
347MODULE_DEVICE_TABLE(of, sdma_dt_ids); 348MODULE_DEVICE_TABLE(of, sdma_dt_ids);
348 349
349#define SDMA_H_CONFIG_DSPDMA (1 << 12) /* indicates if the DSPDMA is used */ 350#define SDMA_H_CONFIG_DSPDMA BIT(12) /* indicates if the DSPDMA is used */
350#define SDMA_H_CONFIG_RTD_PINS (1 << 11) /* indicates if Real-Time Debug pins are enabled */ 351#define SDMA_H_CONFIG_RTD_PINS BIT(11) /* indicates if Real-Time Debug pins are enabled */
351#define SDMA_H_CONFIG_ACR (1 << 4) /* indicates if AHB freq /core freq = 2 or 1 */ 352#define SDMA_H_CONFIG_ACR BIT(4) /* indicates if AHB freq /core freq = 2 or 1 */
352#define SDMA_H_CONFIG_CSM (3) /* indicates which context switch mode is selected*/ 353#define SDMA_H_CONFIG_CSM (3) /* indicates which context switch mode is selected*/
353 354
354static inline u32 chnenbl_ofs(struct sdma_engine *sdma, unsigned int event) 355static inline u32 chnenbl_ofs(struct sdma_engine *sdma, unsigned int event)
@@ -363,7 +364,7 @@ static int sdma_config_ownership(struct sdma_channel *sdmac,
363{ 364{
364 struct sdma_engine *sdma = sdmac->sdma; 365 struct sdma_engine *sdma = sdmac->sdma;
365 int channel = sdmac->channel; 366 int channel = sdmac->channel;
366 u32 evt, mcu, dsp; 367 unsigned long evt, mcu, dsp;
367 368
368 if (event_override && mcu_override && dsp_override) 369 if (event_override && mcu_override && dsp_override)
369 return -EINVAL; 370 return -EINVAL;
@@ -373,19 +374,19 @@ static int sdma_config_ownership(struct sdma_channel *sdmac,
373 dsp = readl_relaxed(sdma->regs + SDMA_H_DSPOVR); 374 dsp = readl_relaxed(sdma->regs + SDMA_H_DSPOVR);
374 375
375 if (dsp_override) 376 if (dsp_override)
376 dsp &= ~(1 << channel); 377 __clear_bit(channel, &dsp);
377 else 378 else
378 dsp |= (1 << channel); 379 __set_bit(channel, &dsp);
379 380
380 if (event_override) 381 if (event_override)
381 evt &= ~(1 << channel); 382 __clear_bit(channel, &evt);
382 else 383 else
383 evt |= (1 << channel); 384 __set_bit(channel, &evt);
384 385
385 if (mcu_override) 386 if (mcu_override)
386 mcu &= ~(1 << channel); 387 __clear_bit(channel, &mcu);
387 else 388 else
388 mcu |= (1 << channel); 389 __set_bit(channel, &mcu);
389 390
390 writel_relaxed(evt, sdma->regs + SDMA_H_EVTOVR); 391 writel_relaxed(evt, sdma->regs + SDMA_H_EVTOVR);
391 writel_relaxed(mcu, sdma->regs + SDMA_H_HOSTOVR); 392 writel_relaxed(mcu, sdma->regs + SDMA_H_HOSTOVR);
@@ -396,7 +397,7 @@ static int sdma_config_ownership(struct sdma_channel *sdmac,
396 397
397static void sdma_enable_channel(struct sdma_engine *sdma, int channel) 398static void sdma_enable_channel(struct sdma_engine *sdma, int channel)
398{ 399{
399 writel(1 << channel, sdma->regs + SDMA_H_START); 400 writel(BIT(channel), sdma->regs + SDMA_H_START);
400} 401}
401 402
402/* 403/*
@@ -457,11 +458,11 @@ static void sdma_event_enable(struct sdma_channel *sdmac, unsigned int event)
457{ 458{
458 struct sdma_engine *sdma = sdmac->sdma; 459 struct sdma_engine *sdma = sdmac->sdma;
459 int channel = sdmac->channel; 460 int channel = sdmac->channel;
460 u32 val; 461 unsigned long val;
461 u32 chnenbl = chnenbl_ofs(sdma, event); 462 u32 chnenbl = chnenbl_ofs(sdma, event);
462 463
463 val = readl_relaxed(sdma->regs + chnenbl); 464 val = readl_relaxed(sdma->regs + chnenbl);
464 val |= (1 << channel); 465 __set_bit(channel, &val);
465 writel_relaxed(val, sdma->regs + chnenbl); 466 writel_relaxed(val, sdma->regs + chnenbl);
466} 467}
467 468
@@ -470,10 +471,10 @@ static void sdma_event_disable(struct sdma_channel *sdmac, unsigned int event)
470 struct sdma_engine *sdma = sdmac->sdma; 471 struct sdma_engine *sdma = sdmac->sdma;
471 int channel = sdmac->channel; 472 int channel = sdmac->channel;
472 u32 chnenbl = chnenbl_ofs(sdma, event); 473 u32 chnenbl = chnenbl_ofs(sdma, event);
473 u32 val; 474 unsigned long val;
474 475
475 val = readl_relaxed(sdma->regs + chnenbl); 476 val = readl_relaxed(sdma->regs + chnenbl);
476 val &= ~(1 << channel); 477 __clear_bit(channel, &val);
477 writel_relaxed(val, sdma->regs + chnenbl); 478 writel_relaxed(val, sdma->regs + chnenbl);
478} 479}
479 480
@@ -550,7 +551,7 @@ static void mxc_sdma_handle_channel(struct sdma_channel *sdmac)
550static irqreturn_t sdma_int_handler(int irq, void *dev_id) 551static irqreturn_t sdma_int_handler(int irq, void *dev_id)
551{ 552{
552 struct sdma_engine *sdma = dev_id; 553 struct sdma_engine *sdma = dev_id;
553 u32 stat; 554 unsigned long stat;
554 555
555 stat = readl_relaxed(sdma->regs + SDMA_H_INTR); 556 stat = readl_relaxed(sdma->regs + SDMA_H_INTR);
556 writel_relaxed(stat, sdma->regs + SDMA_H_INTR); 557 writel_relaxed(stat, sdma->regs + SDMA_H_INTR);
@@ -561,7 +562,7 @@ static irqreturn_t sdma_int_handler(int irq, void *dev_id)
561 562
562 mxc_sdma_handle_channel(sdmac); 563 mxc_sdma_handle_channel(sdmac);
563 564
564 stat &= ~(1 << channel); 565 __clear_bit(channel, &stat);
565 } 566 }
566 567
567 return IRQ_HANDLED; 568 return IRQ_HANDLED;
@@ -669,11 +670,11 @@ static int sdma_load_context(struct sdma_channel *sdmac)
669 return load_address; 670 return load_address;
670 671
671 dev_dbg(sdma->dev, "load_address = %d\n", load_address); 672 dev_dbg(sdma->dev, "load_address = %d\n", load_address);
672 dev_dbg(sdma->dev, "wml = 0x%08x\n", sdmac->watermark_level); 673 dev_dbg(sdma->dev, "wml = 0x%08x\n", (u32)sdmac->watermark_level);
673 dev_dbg(sdma->dev, "shp_addr = 0x%08x\n", sdmac->shp_addr); 674 dev_dbg(sdma->dev, "shp_addr = 0x%08x\n", sdmac->shp_addr);
674 dev_dbg(sdma->dev, "per_addr = 0x%08x\n", sdmac->per_addr); 675 dev_dbg(sdma->dev, "per_addr = 0x%08x\n", sdmac->per_addr);
675 dev_dbg(sdma->dev, "event_mask0 = 0x%08x\n", sdmac->event_mask0); 676 dev_dbg(sdma->dev, "event_mask0 = 0x%08x\n", (u32)sdmac->event_mask[0]);
676 dev_dbg(sdma->dev, "event_mask1 = 0x%08x\n", sdmac->event_mask1); 677 dev_dbg(sdma->dev, "event_mask1 = 0x%08x\n", (u32)sdmac->event_mask[1]);
677 678
678 mutex_lock(&sdma->channel_0_lock); 679 mutex_lock(&sdma->channel_0_lock);
679 680
@@ -683,8 +684,8 @@ static int sdma_load_context(struct sdma_channel *sdmac)
683 /* Send by context the event mask,base address for peripheral 684 /* Send by context the event mask,base address for peripheral
684 * and watermark level 685 * and watermark level
685 */ 686 */
686 context->gReg[0] = sdmac->event_mask1; 687 context->gReg[0] = sdmac->event_mask[1];
687 context->gReg[1] = sdmac->event_mask0; 688 context->gReg[1] = sdmac->event_mask[0];
688 context->gReg[2] = sdmac->per_addr; 689 context->gReg[2] = sdmac->per_addr;
689 context->gReg[6] = sdmac->shp_addr; 690 context->gReg[6] = sdmac->shp_addr;
690 context->gReg[7] = sdmac->watermark_level; 691 context->gReg[7] = sdmac->watermark_level;
@@ -707,7 +708,7 @@ static void sdma_disable_channel(struct sdma_channel *sdmac)
707 struct sdma_engine *sdma = sdmac->sdma; 708 struct sdma_engine *sdma = sdmac->sdma;
708 int channel = sdmac->channel; 709 int channel = sdmac->channel;
709 710
710 writel_relaxed(1 << channel, sdma->regs + SDMA_H_STATSTOP); 711 writel_relaxed(BIT(channel), sdma->regs + SDMA_H_STATSTOP);
711 sdmac->status = DMA_ERROR; 712 sdmac->status = DMA_ERROR;
712} 713}
713 714
@@ -717,8 +718,8 @@ static int sdma_config_channel(struct sdma_channel *sdmac)
717 718
718 sdma_disable_channel(sdmac); 719 sdma_disable_channel(sdmac);
719 720
720 sdmac->event_mask0 = 0; 721 sdmac->event_mask[0] = 0;
721 sdmac->event_mask1 = 0; 722 sdmac->event_mask[1] = 0;
722 sdmac->shp_addr = 0; 723 sdmac->shp_addr = 0;
723 sdmac->per_addr = 0; 724 sdmac->per_addr = 0;
724 725
@@ -746,15 +747,14 @@ static int sdma_config_channel(struct sdma_channel *sdmac)
746 (sdmac->peripheral_type != IMX_DMATYPE_DSP)) { 747 (sdmac->peripheral_type != IMX_DMATYPE_DSP)) {
747 /* Handle multiple event channels differently */ 748 /* Handle multiple event channels differently */
748 if (sdmac->event_id1) { 749 if (sdmac->event_id1) {
749 sdmac->event_mask1 = 1 << (sdmac->event_id1 % 32); 750 sdmac->event_mask[1] = BIT(sdmac->event_id1 % 32);
750 if (sdmac->event_id1 > 31) 751 if (sdmac->event_id1 > 31)
751 sdmac->watermark_level |= 1 << 31; 752 __set_bit(31, &sdmac->watermark_level);
752 sdmac->event_mask0 = 1 << (sdmac->event_id0 % 32); 753 sdmac->event_mask[0] = BIT(sdmac->event_id0 % 32);
753 if (sdmac->event_id0 > 31) 754 if (sdmac->event_id0 > 31)
754 sdmac->watermark_level |= 1 << 30; 755 __set_bit(30, &sdmac->watermark_level);
755 } else { 756 } else {
756 sdmac->event_mask0 = 1 << sdmac->event_id0; 757 __set_bit(sdmac->event_id0, sdmac->event_mask);
757 sdmac->event_mask1 = 1 << (sdmac->event_id0 - 32);
758 } 758 }
759 /* Watermark Level */ 759 /* Watermark Level */
760 sdmac->watermark_level |= sdmac->watermark_level; 760 sdmac->watermark_level |= sdmac->watermark_level;