aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAndy Shevchenko <andriy.shevchenko@linux.intel.com>2015-07-09 06:25:37 -0400
committerVinod Koul <vinod.koul@intel.com>2015-07-16 09:00:46 -0400
commit03734485b71129a954861f298825a490bcade986 (patch)
treec49f64ac63e13dc5ed0bc88fa33601c6e840921f
parentb6c52c634506d52b3a2dc18503980d717e478739 (diff)
dmaengine: hsu: remove excessive lock
All hardware accesses are done under virtual channel lock. That's why specific channel lock is excessive and can be removed safely. This has been tested on Intel Medfield and Merrifield. Signed-off-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com> Signed-off-by: Vinod Koul <vinod.koul@intel.com>
-rw-r--r--drivers/dma/hsu/hsu.c39
-rw-r--r--drivers/dma/hsu/hsu.h1
2 files changed, 4 insertions, 36 deletions
diff --git a/drivers/dma/hsu/hsu.c b/drivers/dma/hsu/hsu.c
index f42f71e37e73..7669c7dd1e34 100644
--- a/drivers/dma/hsu/hsu.c
+++ b/drivers/dma/hsu/hsu.c
@@ -99,21 +99,13 @@ static void hsu_dma_chan_start(struct hsu_dma_chan *hsuc)
99 99
100static void hsu_dma_stop_channel(struct hsu_dma_chan *hsuc) 100static void hsu_dma_stop_channel(struct hsu_dma_chan *hsuc)
101{ 101{
102 unsigned long flags;
103
104 spin_lock_irqsave(&hsuc->lock, flags);
105 hsu_chan_disable(hsuc); 102 hsu_chan_disable(hsuc);
106 hsu_chan_writel(hsuc, HSU_CH_DCR, 0); 103 hsu_chan_writel(hsuc, HSU_CH_DCR, 0);
107 spin_unlock_irqrestore(&hsuc->lock, flags);
108} 104}
109 105
110static void hsu_dma_start_channel(struct hsu_dma_chan *hsuc) 106static void hsu_dma_start_channel(struct hsu_dma_chan *hsuc)
111{ 107{
112 unsigned long flags;
113
114 spin_lock_irqsave(&hsuc->lock, flags);
115 hsu_dma_chan_start(hsuc); 108 hsu_dma_chan_start(hsuc);
116 spin_unlock_irqrestore(&hsuc->lock, flags);
117} 109}
118 110
119static void hsu_dma_start_transfer(struct hsu_dma_chan *hsuc) 111static void hsu_dma_start_transfer(struct hsu_dma_chan *hsuc)
@@ -139,9 +131,9 @@ static u32 hsu_dma_chan_get_sr(struct hsu_dma_chan *hsuc)
139 unsigned long flags; 131 unsigned long flags;
140 u32 sr; 132 u32 sr;
141 133
142 spin_lock_irqsave(&hsuc->lock, flags); 134 spin_lock_irqsave(&hsuc->vchan.lock, flags);
143 sr = hsu_chan_readl(hsuc, HSU_CH_SR); 135 sr = hsu_chan_readl(hsuc, HSU_CH_SR);
144 spin_unlock_irqrestore(&hsuc->lock, flags); 136 spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
145 137
146 return sr; 138 return sr;
147} 139}
@@ -273,14 +265,11 @@ static size_t hsu_dma_active_desc_size(struct hsu_dma_chan *hsuc)
273 struct hsu_dma_desc *desc = hsuc->desc; 265 struct hsu_dma_desc *desc = hsuc->desc;
274 size_t bytes = hsu_dma_desc_size(desc); 266 size_t bytes = hsu_dma_desc_size(desc);
275 int i; 267 int i;
276 unsigned long flags;
277 268
278 spin_lock_irqsave(&hsuc->lock, flags);
279 i = desc->active % HSU_DMA_CHAN_NR_DESC; 269 i = desc->active % HSU_DMA_CHAN_NR_DESC;
280 do { 270 do {
281 bytes += hsu_chan_readl(hsuc, HSU_CH_DxTSR(i)); 271 bytes += hsu_chan_readl(hsuc, HSU_CH_DxTSR(i));
282 } while (--i >= 0); 272 } while (--i >= 0);
283 spin_unlock_irqrestore(&hsuc->lock, flags);
284 273
285 return bytes; 274 return bytes;
286} 275}
@@ -327,24 +316,6 @@ static int hsu_dma_slave_config(struct dma_chan *chan,
327 return 0; 316 return 0;
328} 317}
329 318
330static void hsu_dma_chan_deactivate(struct hsu_dma_chan *hsuc)
331{
332 unsigned long flags;
333
334 spin_lock_irqsave(&hsuc->lock, flags);
335 hsu_chan_disable(hsuc);
336 spin_unlock_irqrestore(&hsuc->lock, flags);
337}
338
339static void hsu_dma_chan_activate(struct hsu_dma_chan *hsuc)
340{
341 unsigned long flags;
342
343 spin_lock_irqsave(&hsuc->lock, flags);
344 hsu_chan_enable(hsuc);
345 spin_unlock_irqrestore(&hsuc->lock, flags);
346}
347
348static int hsu_dma_pause(struct dma_chan *chan) 319static int hsu_dma_pause(struct dma_chan *chan)
349{ 320{
350 struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan); 321 struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan);
@@ -352,7 +323,7 @@ static int hsu_dma_pause(struct dma_chan *chan)
352 323
353 spin_lock_irqsave(&hsuc->vchan.lock, flags); 324 spin_lock_irqsave(&hsuc->vchan.lock, flags);
354 if (hsuc->desc && hsuc->desc->status == DMA_IN_PROGRESS) { 325 if (hsuc->desc && hsuc->desc->status == DMA_IN_PROGRESS) {
355 hsu_dma_chan_deactivate(hsuc); 326 hsu_chan_disable(hsuc);
356 hsuc->desc->status = DMA_PAUSED; 327 hsuc->desc->status = DMA_PAUSED;
357 } 328 }
358 spin_unlock_irqrestore(&hsuc->vchan.lock, flags); 329 spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
@@ -368,7 +339,7 @@ static int hsu_dma_resume(struct dma_chan *chan)
368 spin_lock_irqsave(&hsuc->vchan.lock, flags); 339 spin_lock_irqsave(&hsuc->vchan.lock, flags);
369 if (hsuc->desc && hsuc->desc->status == DMA_PAUSED) { 340 if (hsuc->desc && hsuc->desc->status == DMA_PAUSED) {
370 hsuc->desc->status = DMA_IN_PROGRESS; 341 hsuc->desc->status = DMA_IN_PROGRESS;
371 hsu_dma_chan_activate(hsuc); 342 hsu_chan_enable(hsuc);
372 } 343 }
373 spin_unlock_irqrestore(&hsuc->vchan.lock, flags); 344 spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
374 345
@@ -441,8 +412,6 @@ int hsu_dma_probe(struct hsu_dma_chip *chip)
441 412
442 hsuc->direction = (i & 0x1) ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV; 413 hsuc->direction = (i & 0x1) ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV;
443 hsuc->reg = addr + i * HSU_DMA_CHAN_LENGTH; 414 hsuc->reg = addr + i * HSU_DMA_CHAN_LENGTH;
444
445 spin_lock_init(&hsuc->lock);
446 } 415 }
447 416
448 dma_cap_set(DMA_SLAVE, hsu->dma.cap_mask); 417 dma_cap_set(DMA_SLAVE, hsu->dma.cap_mask);
diff --git a/drivers/dma/hsu/hsu.h b/drivers/dma/hsu/hsu.h
index 0275233cf550..eeb9fff66967 100644
--- a/drivers/dma/hsu/hsu.h
+++ b/drivers/dma/hsu/hsu.h
@@ -78,7 +78,6 @@ struct hsu_dma_chan {
78 struct virt_dma_chan vchan; 78 struct virt_dma_chan vchan;
79 79
80 void __iomem *reg; 80 void __iomem *reg;
81 spinlock_t lock;
82 81
83 /* hardware configuration */ 82 /* hardware configuration */
84 enum dma_transfer_direction direction; 83 enum dma_transfer_direction direction;