aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma
diff options
context:
space:
mode:
authorViresh Kumar <viresh.kumar@st.com>2011-04-15 06:33:35 -0400
committerVinod Koul <vinod.koul@intel.com>2011-05-13 10:10:07 -0400
commit69cea5a00d3135677939fce1fefe54ed522055a0 (patch)
tree2fa4752e87447f9d03828e0d11e04e37f41f0380 /drivers/dma
parent69dc14b51c1aad9d82afd8f96bf4e4835089bffc (diff)
dmaengine/dw_dmac: Replace spin_lock* with irqsave variants and enable submission from callback
dmaengine routines can be called from interrupt context and with interrupts disabled. Whereas spin_unlock_bh can't be called from such contexts. So this patch converts all spin_*_bh routines to irqsave variants. Also, spin_lock() used in tasklet is converted to irqsave variants, as tasklet can be interrupted, and dma requests from such interruptions may also call spin_lock. Now, submission from callbacks are permitted as per dmaengine framework. So we shouldn't hold any locks while calling callbacks. As locks were taken by parent routines, so releasing them before calling callbacks doesn't look clean enough. So, locks are taken inside all routine now, whereever they are required. And dwc_descriptor_complete is always called without taking locks. Signed-off-by: Viresh Kumar <viresh.kumar@st.com> Signed-off-by: Vinod Koul <vinod.koul@intel.com>
Diffstat (limited to 'drivers/dma')
-rw-r--r--drivers/dma/dw_dmac.c116
1 files changed, 77 insertions, 39 deletions
diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c
index a9755e3dd603..442b98b81e7c 100644
--- a/drivers/dma/dw_dmac.c
+++ b/drivers/dma/dw_dmac.c
@@ -93,8 +93,9 @@ static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc)
93 struct dw_desc *desc, *_desc; 93 struct dw_desc *desc, *_desc;
94 struct dw_desc *ret = NULL; 94 struct dw_desc *ret = NULL;
95 unsigned int i = 0; 95 unsigned int i = 0;
96 unsigned long flags;
96 97
97 spin_lock_bh(&dwc->lock); 98 spin_lock_irqsave(&dwc->lock, flags);
98 list_for_each_entry_safe(desc, _desc, &dwc->free_list, desc_node) { 99 list_for_each_entry_safe(desc, _desc, &dwc->free_list, desc_node) {
99 if (async_tx_test_ack(&desc->txd)) { 100 if (async_tx_test_ack(&desc->txd)) {
100 list_del(&desc->desc_node); 101 list_del(&desc->desc_node);
@@ -104,7 +105,7 @@ static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc)
104 dev_dbg(chan2dev(&dwc->chan), "desc %p not ACKed\n", desc); 105 dev_dbg(chan2dev(&dwc->chan), "desc %p not ACKed\n", desc);
105 i++; 106 i++;
106 } 107 }
107 spin_unlock_bh(&dwc->lock); 108 spin_unlock_irqrestore(&dwc->lock, flags);
108 109
109 dev_vdbg(chan2dev(&dwc->chan), "scanned %u descriptors on freelist\n", i); 110 dev_vdbg(chan2dev(&dwc->chan), "scanned %u descriptors on freelist\n", i);
110 111
@@ -130,12 +131,14 @@ static void dwc_sync_desc_for_cpu(struct dw_dma_chan *dwc, struct dw_desc *desc)
130 */ 131 */
131static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc) 132static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc)
132{ 133{
134 unsigned long flags;
135
133 if (desc) { 136 if (desc) {
134 struct dw_desc *child; 137 struct dw_desc *child;
135 138
136 dwc_sync_desc_for_cpu(dwc, desc); 139 dwc_sync_desc_for_cpu(dwc, desc);
137 140
138 spin_lock_bh(&dwc->lock); 141 spin_lock_irqsave(&dwc->lock, flags);
139 list_for_each_entry(child, &desc->tx_list, desc_node) 142 list_for_each_entry(child, &desc->tx_list, desc_node)
140 dev_vdbg(chan2dev(&dwc->chan), 143 dev_vdbg(chan2dev(&dwc->chan),
141 "moving child desc %p to freelist\n", 144 "moving child desc %p to freelist\n",
@@ -143,7 +146,7 @@ static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc)
143 list_splice_init(&desc->tx_list, &dwc->free_list); 146 list_splice_init(&desc->tx_list, &dwc->free_list);
144 dev_vdbg(chan2dev(&dwc->chan), "moving desc %p to freelist\n", desc); 147 dev_vdbg(chan2dev(&dwc->chan), "moving desc %p to freelist\n", desc);
145 list_add(&desc->desc_node, &dwc->free_list); 148 list_add(&desc->desc_node, &dwc->free_list);
146 spin_unlock_bh(&dwc->lock); 149 spin_unlock_irqrestore(&dwc->lock, flags);
147 } 150 }
148} 151}
149 152
@@ -202,9 +205,11 @@ dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc,
202 void *param = NULL; 205 void *param = NULL;
203 struct dma_async_tx_descriptor *txd = &desc->txd; 206 struct dma_async_tx_descriptor *txd = &desc->txd;
204 struct dw_desc *child; 207 struct dw_desc *child;
208 unsigned long flags;
205 209
206 dev_vdbg(chan2dev(&dwc->chan), "descriptor %u complete\n", txd->cookie); 210 dev_vdbg(chan2dev(&dwc->chan), "descriptor %u complete\n", txd->cookie);
207 211
212 spin_lock_irqsave(&dwc->lock, flags);
208 dwc->completed = txd->cookie; 213 dwc->completed = txd->cookie;
209 if (callback_required) { 214 if (callback_required) {
210 callback = txd->callback; 215 callback = txd->callback;
@@ -241,6 +246,8 @@ dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc,
241 } 246 }
242 } 247 }
243 248
249 spin_unlock_irqrestore(&dwc->lock, flags);
250
244 if (callback_required && callback) 251 if (callback_required && callback)
245 callback(param); 252 callback(param);
246} 253}
@@ -249,7 +256,9 @@ static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc)
249{ 256{
250 struct dw_desc *desc, *_desc; 257 struct dw_desc *desc, *_desc;
251 LIST_HEAD(list); 258 LIST_HEAD(list);
259 unsigned long flags;
252 260
261 spin_lock_irqsave(&dwc->lock, flags);
253 if (dma_readl(dw, CH_EN) & dwc->mask) { 262 if (dma_readl(dw, CH_EN) & dwc->mask) {
254 dev_err(chan2dev(&dwc->chan), 263 dev_err(chan2dev(&dwc->chan),
255 "BUG: XFER bit set, but channel not idle!\n"); 264 "BUG: XFER bit set, but channel not idle!\n");
@@ -270,6 +279,8 @@ static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc)
270 dwc_dostart(dwc, dwc_first_active(dwc)); 279 dwc_dostart(dwc, dwc_first_active(dwc));
271 } 280 }
272 281
282 spin_unlock_irqrestore(&dwc->lock, flags);
283
273 list_for_each_entry_safe(desc, _desc, &list, desc_node) 284 list_for_each_entry_safe(desc, _desc, &list, desc_node)
274 dwc_descriptor_complete(dwc, desc, true); 285 dwc_descriptor_complete(dwc, desc, true);
275} 286}
@@ -280,7 +291,9 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
280 struct dw_desc *desc, *_desc; 291 struct dw_desc *desc, *_desc;
281 struct dw_desc *child; 292 struct dw_desc *child;
282 u32 status_xfer; 293 u32 status_xfer;
294 unsigned long flags;
283 295
296 spin_lock_irqsave(&dwc->lock, flags);
284 /* 297 /*
285 * Clear block interrupt flag before scanning so that we don't 298 * Clear block interrupt flag before scanning so that we don't
286 * miss any, and read LLP before RAW_XFER to ensure it is 299 * miss any, and read LLP before RAW_XFER to ensure it is
@@ -293,35 +306,47 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
293 if (status_xfer & dwc->mask) { 306 if (status_xfer & dwc->mask) {
294 /* Everything we've submitted is done */ 307 /* Everything we've submitted is done */
295 dma_writel(dw, CLEAR.XFER, dwc->mask); 308 dma_writel(dw, CLEAR.XFER, dwc->mask);
309 spin_unlock_irqrestore(&dwc->lock, flags);
310
296 dwc_complete_all(dw, dwc); 311 dwc_complete_all(dw, dwc);
297 return; 312 return;
298 } 313 }
299 314
300 if (list_empty(&dwc->active_list)) 315 if (list_empty(&dwc->active_list)) {
316 spin_unlock_irqrestore(&dwc->lock, flags);
301 return; 317 return;
318 }
302 319
303 dev_vdbg(chan2dev(&dwc->chan), "scan_descriptors: llp=0x%x\n", llp); 320 dev_vdbg(chan2dev(&dwc->chan), "scan_descriptors: llp=0x%x\n", llp);
304 321
305 list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) { 322 list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) {
306 /* check first descriptors addr */ 323 /* check first descriptors addr */
307 if (desc->txd.phys == llp) 324 if (desc->txd.phys == llp) {
325 spin_unlock_irqrestore(&dwc->lock, flags);
308 return; 326 return;
327 }
309 328
310 /* check first descriptors llp */ 329 /* check first descriptors llp */
311 if (desc->lli.llp == llp) 330 if (desc->lli.llp == llp) {
312 /* This one is currently in progress */ 331 /* This one is currently in progress */
332 spin_unlock_irqrestore(&dwc->lock, flags);
313 return; 333 return;
334 }
314 335
315 list_for_each_entry(child, &desc->tx_list, desc_node) 336 list_for_each_entry(child, &desc->tx_list, desc_node)
316 if (child->lli.llp == llp) 337 if (child->lli.llp == llp) {
317 /* Currently in progress */ 338 /* Currently in progress */
339 spin_unlock_irqrestore(&dwc->lock, flags);
318 return; 340 return;
341 }
319 342
320 /* 343 /*
321 * No descriptors so far seem to be in progress, i.e. 344 * No descriptors so far seem to be in progress, i.e.
322 * this one must be done. 345 * this one must be done.
323 */ 346 */
347 spin_unlock_irqrestore(&dwc->lock, flags);
324 dwc_descriptor_complete(dwc, desc, true); 348 dwc_descriptor_complete(dwc, desc, true);
349 spin_lock_irqsave(&dwc->lock, flags);
325 } 350 }
326 351
327 dev_err(chan2dev(&dwc->chan), 352 dev_err(chan2dev(&dwc->chan),
@@ -336,6 +361,7 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
336 list_move(dwc->queue.next, &dwc->active_list); 361 list_move(dwc->queue.next, &dwc->active_list);
337 dwc_dostart(dwc, dwc_first_active(dwc)); 362 dwc_dostart(dwc, dwc_first_active(dwc));
338 } 363 }
364 spin_unlock_irqrestore(&dwc->lock, flags);
339} 365}
340 366
341static void dwc_dump_lli(struct dw_dma_chan *dwc, struct dw_lli *lli) 367static void dwc_dump_lli(struct dw_dma_chan *dwc, struct dw_lli *lli)
@@ -350,9 +376,12 @@ static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc)
350{ 376{
351 struct dw_desc *bad_desc; 377 struct dw_desc *bad_desc;
352 struct dw_desc *child; 378 struct dw_desc *child;
379 unsigned long flags;
353 380
354 dwc_scan_descriptors(dw, dwc); 381 dwc_scan_descriptors(dw, dwc);
355 382
383 spin_lock_irqsave(&dwc->lock, flags);
384
356 /* 385 /*
357 * The descriptor currently at the head of the active list is 386 * The descriptor currently at the head of the active list is
358 * borked. Since we don't have any way to report errors, we'll 387 * borked. Since we don't have any way to report errors, we'll
@@ -382,6 +411,8 @@ static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc)
382 list_for_each_entry(child, &bad_desc->tx_list, desc_node) 411 list_for_each_entry(child, &bad_desc->tx_list, desc_node)
383 dwc_dump_lli(dwc, &child->lli); 412 dwc_dump_lli(dwc, &child->lli);
384 413
414 spin_unlock_irqrestore(&dwc->lock, flags);
415
385 /* Pretend the descriptor completed successfully */ 416 /* Pretend the descriptor completed successfully */
386 dwc_descriptor_complete(dwc, bad_desc, true); 417 dwc_descriptor_complete(dwc, bad_desc, true);
387} 418}
@@ -406,6 +437,8 @@ EXPORT_SYMBOL(dw_dma_get_dst_addr);
406static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc, 437static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc,
407 u32 status_block, u32 status_err, u32 status_xfer) 438 u32 status_block, u32 status_err, u32 status_xfer)
408{ 439{
440 unsigned long flags;
441
409 if (status_block & dwc->mask) { 442 if (status_block & dwc->mask) {
410 void (*callback)(void *param); 443 void (*callback)(void *param);
411 void *callback_param; 444 void *callback_param;
@@ -416,11 +449,9 @@ static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc,
416 449
417 callback = dwc->cdesc->period_callback; 450 callback = dwc->cdesc->period_callback;
418 callback_param = dwc->cdesc->period_callback_param; 451 callback_param = dwc->cdesc->period_callback_param;
419 if (callback) { 452
420 spin_unlock(&dwc->lock); 453 if (callback)
421 callback(callback_param); 454 callback(callback_param);
422 spin_lock(&dwc->lock);
423 }
424 } 455 }
425 456
426 /* 457 /*
@@ -434,6 +465,9 @@ static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc,
434 dev_err(chan2dev(&dwc->chan), "cyclic DMA unexpected %s " 465 dev_err(chan2dev(&dwc->chan), "cyclic DMA unexpected %s "
435 "interrupt, stopping DMA transfer\n", 466 "interrupt, stopping DMA transfer\n",
436 status_xfer ? "xfer" : "error"); 467 status_xfer ? "xfer" : "error");
468
469 spin_lock_irqsave(&dwc->lock, flags);
470
437 dev_err(chan2dev(&dwc->chan), 471 dev_err(chan2dev(&dwc->chan),
438 " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n", 472 " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n",
439 channel_readl(dwc, SAR), 473 channel_readl(dwc, SAR),
@@ -457,6 +491,8 @@ static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc,
457 491
458 for (i = 0; i < dwc->cdesc->periods; i++) 492 for (i = 0; i < dwc->cdesc->periods; i++)
459 dwc_dump_lli(dwc, &dwc->cdesc->desc[i]->lli); 493 dwc_dump_lli(dwc, &dwc->cdesc->desc[i]->lli);
494
495 spin_unlock_irqrestore(&dwc->lock, flags);
460 } 496 }
461} 497}
462 498
@@ -480,7 +516,6 @@ static void dw_dma_tasklet(unsigned long data)
480 516
481 for (i = 0; i < dw->dma.chancnt; i++) { 517 for (i = 0; i < dw->dma.chancnt; i++) {
482 dwc = &dw->chan[i]; 518 dwc = &dw->chan[i];
483 spin_lock(&dwc->lock);
484 if (test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) 519 if (test_bit(DW_DMA_IS_CYCLIC, &dwc->flags))
485 dwc_handle_cyclic(dw, dwc, status_block, status_err, 520 dwc_handle_cyclic(dw, dwc, status_block, status_err,
486 status_xfer); 521 status_xfer);
@@ -488,7 +523,6 @@ static void dw_dma_tasklet(unsigned long data)
488 dwc_handle_error(dw, dwc); 523 dwc_handle_error(dw, dwc);
489 else if ((status_block | status_xfer) & (1 << i)) 524 else if ((status_block | status_xfer) & (1 << i))
490 dwc_scan_descriptors(dw, dwc); 525 dwc_scan_descriptors(dw, dwc);
491 spin_unlock(&dwc->lock);
492 } 526 }
493 527
494 /* 528 /*
@@ -543,8 +577,9 @@ static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx)
543 struct dw_desc *desc = txd_to_dw_desc(tx); 577 struct dw_desc *desc = txd_to_dw_desc(tx);
544 struct dw_dma_chan *dwc = to_dw_dma_chan(tx->chan); 578 struct dw_dma_chan *dwc = to_dw_dma_chan(tx->chan);
545 dma_cookie_t cookie; 579 dma_cookie_t cookie;
580 unsigned long flags;
546 581
547 spin_lock_bh(&dwc->lock); 582 spin_lock_irqsave(&dwc->lock, flags);
548 cookie = dwc_assign_cookie(dwc, desc); 583 cookie = dwc_assign_cookie(dwc, desc);
549 584
550 /* 585 /*
@@ -564,7 +599,7 @@ static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx)
564 list_add_tail(&desc->desc_node, &dwc->queue); 599 list_add_tail(&desc->desc_node, &dwc->queue);
565 } 600 }
566 601
567 spin_unlock_bh(&dwc->lock); 602 spin_unlock_irqrestore(&dwc->lock, flags);
568 603
569 return cookie; 604 return cookie;
570} 605}
@@ -826,6 +861,7 @@ static int dwc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
826 struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 861 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
827 struct dw_dma *dw = to_dw_dma(chan->device); 862 struct dw_dma *dw = to_dw_dma(chan->device);
828 struct dw_desc *desc, *_desc; 863 struct dw_desc *desc, *_desc;
864 unsigned long flags;
829 LIST_HEAD(list); 865 LIST_HEAD(list);
830 866
831 /* Only supports DMA_TERMINATE_ALL */ 867 /* Only supports DMA_TERMINATE_ALL */
@@ -838,7 +874,7 @@ static int dwc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
838 * channel. We still have to poll the channel enable bit due 874 * channel. We still have to poll the channel enable bit due
839 * to AHB/HSB limitations. 875 * to AHB/HSB limitations.
840 */ 876 */
841 spin_lock_bh(&dwc->lock); 877 spin_lock_irqsave(&dwc->lock, flags);
842 878
843 channel_clear_bit(dw, CH_EN, dwc->mask); 879 channel_clear_bit(dw, CH_EN, dwc->mask);
844 880
@@ -849,7 +885,7 @@ static int dwc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
849 list_splice_init(&dwc->queue, &list); 885 list_splice_init(&dwc->queue, &list);
850 list_splice_init(&dwc->active_list, &list); 886 list_splice_init(&dwc->active_list, &list);
851 887
852 spin_unlock_bh(&dwc->lock); 888 spin_unlock_irqrestore(&dwc->lock, flags);
853 889
854 /* Flush all pending and queued descriptors */ 890 /* Flush all pending and queued descriptors */
855 list_for_each_entry_safe(desc, _desc, &list, desc_node) 891 list_for_each_entry_safe(desc, _desc, &list, desc_node)
@@ -873,9 +909,7 @@ dwc_tx_status(struct dma_chan *chan,
873 909
874 ret = dma_async_is_complete(cookie, last_complete, last_used); 910 ret = dma_async_is_complete(cookie, last_complete, last_used);
875 if (ret != DMA_SUCCESS) { 911 if (ret != DMA_SUCCESS) {
876 spin_lock_bh(&dwc->lock);
877 dwc_scan_descriptors(to_dw_dma(chan->device), dwc); 912 dwc_scan_descriptors(to_dw_dma(chan->device), dwc);
878 spin_unlock_bh(&dwc->lock);
879 913
880 last_complete = dwc->completed; 914 last_complete = dwc->completed;
881 last_used = chan->cookie; 915 last_used = chan->cookie;
@@ -896,10 +930,8 @@ static void dwc_issue_pending(struct dma_chan *chan)
896{ 930{
897 struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 931 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
898 932
899 spin_lock_bh(&dwc->lock);
900 if (!list_empty(&dwc->queue)) 933 if (!list_empty(&dwc->queue))
901 dwc_scan_descriptors(to_dw_dma(chan->device), dwc); 934 dwc_scan_descriptors(to_dw_dma(chan->device), dwc);
902 spin_unlock_bh(&dwc->lock);
903} 935}
904 936
905static int dwc_alloc_chan_resources(struct dma_chan *chan) 937static int dwc_alloc_chan_resources(struct dma_chan *chan)
@@ -911,6 +943,7 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan)
911 int i; 943 int i;
912 u32 cfghi; 944 u32 cfghi;
913 u32 cfglo; 945 u32 cfglo;
946 unsigned long flags;
914 947
915 dev_vdbg(chan2dev(chan), "alloc_chan_resources\n"); 948 dev_vdbg(chan2dev(chan), "alloc_chan_resources\n");
916 949
@@ -948,16 +981,16 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan)
948 * doesn't mean what you think it means), and status writeback. 981 * doesn't mean what you think it means), and status writeback.
949 */ 982 */
950 983
951 spin_lock_bh(&dwc->lock); 984 spin_lock_irqsave(&dwc->lock, flags);
952 i = dwc->descs_allocated; 985 i = dwc->descs_allocated;
953 while (dwc->descs_allocated < NR_DESCS_PER_CHANNEL) { 986 while (dwc->descs_allocated < NR_DESCS_PER_CHANNEL) {
954 spin_unlock_bh(&dwc->lock); 987 spin_unlock_irqrestore(&dwc->lock, flags);
955 988
956 desc = kzalloc(sizeof(struct dw_desc), GFP_KERNEL); 989 desc = kzalloc(sizeof(struct dw_desc), GFP_KERNEL);
957 if (!desc) { 990 if (!desc) {
958 dev_info(chan2dev(chan), 991 dev_info(chan2dev(chan),
959 "only allocated %d descriptors\n", i); 992 "only allocated %d descriptors\n", i);
960 spin_lock_bh(&dwc->lock); 993 spin_lock_irqsave(&dwc->lock, flags);
961 break; 994 break;
962 } 995 }
963 996
@@ -969,7 +1002,7 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan)
969 sizeof(desc->lli), DMA_TO_DEVICE); 1002 sizeof(desc->lli), DMA_TO_DEVICE);
970 dwc_desc_put(dwc, desc); 1003 dwc_desc_put(dwc, desc);
971 1004
972 spin_lock_bh(&dwc->lock); 1005 spin_lock_irqsave(&dwc->lock, flags);
973 i = ++dwc->descs_allocated; 1006 i = ++dwc->descs_allocated;
974 } 1007 }
975 1008
@@ -978,7 +1011,7 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan)
978 channel_set_bit(dw, MASK.BLOCK, dwc->mask); 1011 channel_set_bit(dw, MASK.BLOCK, dwc->mask);
979 channel_set_bit(dw, MASK.ERROR, dwc->mask); 1012 channel_set_bit(dw, MASK.ERROR, dwc->mask);
980 1013
981 spin_unlock_bh(&dwc->lock); 1014 spin_unlock_irqrestore(&dwc->lock, flags);
982 1015
983 dev_dbg(chan2dev(chan), 1016 dev_dbg(chan2dev(chan),
984 "alloc_chan_resources allocated %d descriptors\n", i); 1017 "alloc_chan_resources allocated %d descriptors\n", i);
@@ -991,6 +1024,7 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
991 struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 1024 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
992 struct dw_dma *dw = to_dw_dma(chan->device); 1025 struct dw_dma *dw = to_dw_dma(chan->device);
993 struct dw_desc *desc, *_desc; 1026 struct dw_desc *desc, *_desc;
1027 unsigned long flags;
994 LIST_HEAD(list); 1028 LIST_HEAD(list);
995 1029
996 dev_dbg(chan2dev(chan), "free_chan_resources (descs allocated=%u)\n", 1030 dev_dbg(chan2dev(chan), "free_chan_resources (descs allocated=%u)\n",
@@ -1001,7 +1035,7 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
1001 BUG_ON(!list_empty(&dwc->queue)); 1035 BUG_ON(!list_empty(&dwc->queue));
1002 BUG_ON(dma_readl(to_dw_dma(chan->device), CH_EN) & dwc->mask); 1036 BUG_ON(dma_readl(to_dw_dma(chan->device), CH_EN) & dwc->mask);
1003 1037
1004 spin_lock_bh(&dwc->lock); 1038 spin_lock_irqsave(&dwc->lock, flags);
1005 list_splice_init(&dwc->free_list, &list); 1039 list_splice_init(&dwc->free_list, &list);
1006 dwc->descs_allocated = 0; 1040 dwc->descs_allocated = 0;
1007 1041
@@ -1010,7 +1044,7 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
1010 channel_clear_bit(dw, MASK.BLOCK, dwc->mask); 1044 channel_clear_bit(dw, MASK.BLOCK, dwc->mask);
1011 channel_clear_bit(dw, MASK.ERROR, dwc->mask); 1045 channel_clear_bit(dw, MASK.ERROR, dwc->mask);
1012 1046
1013 spin_unlock_bh(&dwc->lock); 1047 spin_unlock_irqrestore(&dwc->lock, flags);
1014 1048
1015 list_for_each_entry_safe(desc, _desc, &list, desc_node) { 1049 list_for_each_entry_safe(desc, _desc, &list, desc_node) {
1016 dev_vdbg(chan2dev(chan), " freeing descriptor %p\n", desc); 1050 dev_vdbg(chan2dev(chan), " freeing descriptor %p\n", desc);
@@ -1035,13 +1069,14 @@ int dw_dma_cyclic_start(struct dma_chan *chan)
1035{ 1069{
1036 struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 1070 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
1037 struct dw_dma *dw = to_dw_dma(dwc->chan.device); 1071 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
1072 unsigned long flags;
1038 1073
1039 if (!test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) { 1074 if (!test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) {
1040 dev_err(chan2dev(&dwc->chan), "missing prep for cyclic DMA\n"); 1075 dev_err(chan2dev(&dwc->chan), "missing prep for cyclic DMA\n");
1041 return -ENODEV; 1076 return -ENODEV;
1042 } 1077 }
1043 1078
1044 spin_lock(&dwc->lock); 1079 spin_lock_irqsave(&dwc->lock, flags);
1045 1080
1046 /* assert channel is idle */ 1081 /* assert channel is idle */
1047 if (dma_readl(dw, CH_EN) & dwc->mask) { 1082 if (dma_readl(dw, CH_EN) & dwc->mask) {
@@ -1054,7 +1089,7 @@ int dw_dma_cyclic_start(struct dma_chan *chan)
1054 channel_readl(dwc, LLP), 1089 channel_readl(dwc, LLP),
1055 channel_readl(dwc, CTL_HI), 1090 channel_readl(dwc, CTL_HI),
1056 channel_readl(dwc, CTL_LO)); 1091 channel_readl(dwc, CTL_LO));
1057 spin_unlock(&dwc->lock); 1092 spin_unlock_irqrestore(&dwc->lock, flags);
1058 return -EBUSY; 1093 return -EBUSY;
1059 } 1094 }
1060 1095
@@ -1069,7 +1104,7 @@ int dw_dma_cyclic_start(struct dma_chan *chan)
1069 1104
1070 channel_set_bit(dw, CH_EN, dwc->mask); 1105 channel_set_bit(dw, CH_EN, dwc->mask);
1071 1106
1072 spin_unlock(&dwc->lock); 1107 spin_unlock_irqrestore(&dwc->lock, flags);
1073 1108
1074 return 0; 1109 return 0;
1075} 1110}
@@ -1085,14 +1120,15 @@ void dw_dma_cyclic_stop(struct dma_chan *chan)
1085{ 1120{
1086 struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 1121 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
1087 struct dw_dma *dw = to_dw_dma(dwc->chan.device); 1122 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
1123 unsigned long flags;
1088 1124
1089 spin_lock(&dwc->lock); 1125 spin_lock_irqsave(&dwc->lock, flags);
1090 1126
1091 channel_clear_bit(dw, CH_EN, dwc->mask); 1127 channel_clear_bit(dw, CH_EN, dwc->mask);
1092 while (dma_readl(dw, CH_EN) & dwc->mask) 1128 while (dma_readl(dw, CH_EN) & dwc->mask)
1093 cpu_relax(); 1129 cpu_relax();
1094 1130
1095 spin_unlock(&dwc->lock); 1131 spin_unlock_irqrestore(&dwc->lock, flags);
1096} 1132}
1097EXPORT_SYMBOL(dw_dma_cyclic_stop); 1133EXPORT_SYMBOL(dw_dma_cyclic_stop);
1098 1134
@@ -1121,17 +1157,18 @@ struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
1121 unsigned int reg_width; 1157 unsigned int reg_width;
1122 unsigned int periods; 1158 unsigned int periods;
1123 unsigned int i; 1159 unsigned int i;
1160 unsigned long flags;
1124 1161
1125 spin_lock_bh(&dwc->lock); 1162 spin_lock_irqsave(&dwc->lock, flags);
1126 if (!list_empty(&dwc->queue) || !list_empty(&dwc->active_list)) { 1163 if (!list_empty(&dwc->queue) || !list_empty(&dwc->active_list)) {
1127 spin_unlock_bh(&dwc->lock); 1164 spin_unlock_irqrestore(&dwc->lock, flags);
1128 dev_dbg(chan2dev(&dwc->chan), 1165 dev_dbg(chan2dev(&dwc->chan),
1129 "queue and/or active list are not empty\n"); 1166 "queue and/or active list are not empty\n");
1130 return ERR_PTR(-EBUSY); 1167 return ERR_PTR(-EBUSY);
1131 } 1168 }
1132 1169
1133 was_cyclic = test_and_set_bit(DW_DMA_IS_CYCLIC, &dwc->flags); 1170 was_cyclic = test_and_set_bit(DW_DMA_IS_CYCLIC, &dwc->flags);
1134 spin_unlock_bh(&dwc->lock); 1171 spin_unlock_irqrestore(&dwc->lock, flags);
1135 if (was_cyclic) { 1172 if (was_cyclic) {
1136 dev_dbg(chan2dev(&dwc->chan), 1173 dev_dbg(chan2dev(&dwc->chan),
1137 "channel already prepared for cyclic DMA\n"); 1174 "channel already prepared for cyclic DMA\n");
@@ -1245,13 +1282,14 @@ void dw_dma_cyclic_free(struct dma_chan *chan)
1245 struct dw_dma *dw = to_dw_dma(dwc->chan.device); 1282 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
1246 struct dw_cyclic_desc *cdesc = dwc->cdesc; 1283 struct dw_cyclic_desc *cdesc = dwc->cdesc;
1247 int i; 1284 int i;
1285 unsigned long flags;
1248 1286
1249 dev_dbg(chan2dev(&dwc->chan), "cyclic free\n"); 1287 dev_dbg(chan2dev(&dwc->chan), "cyclic free\n");
1250 1288
1251 if (!cdesc) 1289 if (!cdesc)
1252 return; 1290 return;
1253 1291
1254 spin_lock_bh(&dwc->lock); 1292 spin_lock_irqsave(&dwc->lock, flags);
1255 1293
1256 channel_clear_bit(dw, CH_EN, dwc->mask); 1294 channel_clear_bit(dw, CH_EN, dwc->mask);
1257 while (dma_readl(dw, CH_EN) & dwc->mask) 1295 while (dma_readl(dw, CH_EN) & dwc->mask)
@@ -1261,7 +1299,7 @@ void dw_dma_cyclic_free(struct dma_chan *chan)
1261 dma_writel(dw, CLEAR.ERROR, dwc->mask); 1299 dma_writel(dw, CLEAR.ERROR, dwc->mask);
1262 dma_writel(dw, CLEAR.XFER, dwc->mask); 1300 dma_writel(dw, CLEAR.XFER, dwc->mask);
1263 1301
1264 spin_unlock_bh(&dwc->lock); 1302 spin_unlock_irqrestore(&dwc->lock, flags);
1265 1303
1266 for (i = 0; i < cdesc->periods; i++) 1304 for (i = 0; i < cdesc->periods; i++)
1267 dwc_desc_put(dwc, cdesc->desc[i]); 1305 dwc_desc_put(dwc, cdesc->desc[i]);