aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorGuennadi Liakhovetski <g.liakhovetski@gmx.de>2011-09-25 10:12:18 -0400
committerVinod Koul <vinod.koul@linux.intel.com>2011-09-28 00:37:25 -0400
commitb4dae6e1adaedc9c343b5f00332312d649600bdc (patch)
treea872c8264821ea355520661d40e74893cfebc0ca
parent0745c9a5e3b64ee03784bc58ba5b127418d78b4e (diff)
dmaengine: shdma: protect against the IRQ handler
The IRQ handler of the shdma driver accesses common hardware registers, that are also accessed from other contexts. Therefore access to them has to be performed with interrupts disabled, not only with disabled bottom halves. Signed-off-by: Guennadi Liakhovetski <g.liakhovetski@gmx.de> Signed-off-by: Vinod Koul <vinod.koul@linux.intel.com>
-rw-r--r--drivers/dma/shdma.c55
1 files changed, 28 insertions, 27 deletions
diff --git a/drivers/dma/shdma.c b/drivers/dma/shdma.c
index 7f49235d14b9..e7bb7479b187 100644
--- a/drivers/dma/shdma.c
+++ b/drivers/dma/shdma.c
@@ -265,8 +265,9 @@ static dma_cookie_t sh_dmae_tx_submit(struct dma_async_tx_descriptor *tx)
265 struct sh_dmae_chan *sh_chan = to_sh_chan(tx->chan); 265 struct sh_dmae_chan *sh_chan = to_sh_chan(tx->chan);
266 dma_async_tx_callback callback = tx->callback; 266 dma_async_tx_callback callback = tx->callback;
267 dma_cookie_t cookie; 267 dma_cookie_t cookie;
268 unsigned long flags;
268 269
269 spin_lock_bh(&sh_chan->desc_lock); 270 spin_lock_irqsave(&sh_chan->desc_lock, flags);
270 271
271 cookie = sh_chan->common.cookie; 272 cookie = sh_chan->common.cookie;
272 cookie++; 273 cookie++;
@@ -302,7 +303,7 @@ static dma_cookie_t sh_dmae_tx_submit(struct dma_async_tx_descriptor *tx)
302 tx->cookie, &last->async_tx, sh_chan->id, 303 tx->cookie, &last->async_tx, sh_chan->id,
303 desc->hw.sar, desc->hw.tcr, desc->hw.dar); 304 desc->hw.sar, desc->hw.tcr, desc->hw.dar);
304 305
305 spin_unlock_bh(&sh_chan->desc_lock); 306 spin_unlock_irqrestore(&sh_chan->desc_lock, flags);
306 307
307 return cookie; 308 return cookie;
308} 309}
@@ -374,24 +375,18 @@ static int sh_dmae_alloc_chan_resources(struct dma_chan *chan)
374 dmae_init(sh_chan); 375 dmae_init(sh_chan);
375 } 376 }
376 377
377 spin_lock_bh(&sh_chan->desc_lock);
378 while (sh_chan->descs_allocated < NR_DESCS_PER_CHANNEL) { 378 while (sh_chan->descs_allocated < NR_DESCS_PER_CHANNEL) {
379 spin_unlock_bh(&sh_chan->desc_lock);
380 desc = kzalloc(sizeof(struct sh_desc), GFP_KERNEL); 379 desc = kzalloc(sizeof(struct sh_desc), GFP_KERNEL);
381 if (!desc) { 380 if (!desc)
382 spin_lock_bh(&sh_chan->desc_lock);
383 break; 381 break;
384 }
385 dma_async_tx_descriptor_init(&desc->async_tx, 382 dma_async_tx_descriptor_init(&desc->async_tx,
386 &sh_chan->common); 383 &sh_chan->common);
387 desc->async_tx.tx_submit = sh_dmae_tx_submit; 384 desc->async_tx.tx_submit = sh_dmae_tx_submit;
388 desc->mark = DESC_IDLE; 385 desc->mark = DESC_IDLE;
389 386
390 spin_lock_bh(&sh_chan->desc_lock);
391 list_add(&desc->node, &sh_chan->ld_free); 387 list_add(&desc->node, &sh_chan->ld_free);
392 sh_chan->descs_allocated++; 388 sh_chan->descs_allocated++;
393 } 389 }
394 spin_unlock_bh(&sh_chan->desc_lock);
395 390
396 if (!sh_chan->descs_allocated) { 391 if (!sh_chan->descs_allocated) {
397 ret = -ENOMEM; 392 ret = -ENOMEM;
@@ -405,6 +400,7 @@ edescalloc:
405 clear_bit(param->slave_id, sh_dmae_slave_used); 400 clear_bit(param->slave_id, sh_dmae_slave_used);
406etestused: 401etestused:
407efindslave: 402efindslave:
403 chan->private = NULL;
408 pm_runtime_put(sh_chan->dev); 404 pm_runtime_put(sh_chan->dev);
409 return ret; 405 return ret;
410} 406}
@@ -437,12 +433,12 @@ static void sh_dmae_free_chan_resources(struct dma_chan *chan)
437 chan->private = NULL; 433 chan->private = NULL;
438 } 434 }
439 435
440 spin_lock_bh(&sh_chan->desc_lock); 436 spin_lock_irq(&sh_chan->desc_lock);
441 437
442 list_splice_init(&sh_chan->ld_free, &list); 438 list_splice_init(&sh_chan->ld_free, &list);
443 sh_chan->descs_allocated = 0; 439 sh_chan->descs_allocated = 0;
444 440
445 spin_unlock_bh(&sh_chan->desc_lock); 441 spin_unlock_irq(&sh_chan->desc_lock);
446 442
447 if (descs > 0) 443 if (descs > 0)
448 pm_runtime_put(sh_chan->dev); 444 pm_runtime_put(sh_chan->dev);
@@ -534,6 +530,7 @@ static struct dma_async_tx_descriptor *sh_dmae_prep_sg(struct sh_dmae_chan *sh_c
534 struct sh_desc *first = NULL, *new = NULL /* compiler... */; 530 struct sh_desc *first = NULL, *new = NULL /* compiler... */;
535 LIST_HEAD(tx_list); 531 LIST_HEAD(tx_list);
536 int chunks = 0; 532 int chunks = 0;
533 unsigned long irq_flags;
537 int i; 534 int i;
538 535
539 if (!sg_len) 536 if (!sg_len)
@@ -544,7 +541,7 @@ static struct dma_async_tx_descriptor *sh_dmae_prep_sg(struct sh_dmae_chan *sh_c
544 (SH_DMA_TCR_MAX + 1); 541 (SH_DMA_TCR_MAX + 1);
545 542
546 /* Have to lock the whole loop to protect against concurrent release */ 543 /* Have to lock the whole loop to protect against concurrent release */
547 spin_lock_bh(&sh_chan->desc_lock); 544 spin_lock_irqsave(&sh_chan->desc_lock, irq_flags);
548 545
549 /* 546 /*
550 * Chaining: 547 * Chaining:
@@ -590,7 +587,7 @@ static struct dma_async_tx_descriptor *sh_dmae_prep_sg(struct sh_dmae_chan *sh_c
590 /* Put them back on the free list, so, they don't get lost */ 587 /* Put them back on the free list, so, they don't get lost */
591 list_splice_tail(&tx_list, &sh_chan->ld_free); 588 list_splice_tail(&tx_list, &sh_chan->ld_free);
592 589
593 spin_unlock_bh(&sh_chan->desc_lock); 590 spin_unlock_irqrestore(&sh_chan->desc_lock, irq_flags);
594 591
595 return &first->async_tx; 592 return &first->async_tx;
596 593
@@ -599,7 +596,7 @@ err_get_desc:
599 new->mark = DESC_IDLE; 596 new->mark = DESC_IDLE;
600 list_splice(&tx_list, &sh_chan->ld_free); 597 list_splice(&tx_list, &sh_chan->ld_free);
601 598
602 spin_unlock_bh(&sh_chan->desc_lock); 599 spin_unlock_irqrestore(&sh_chan->desc_lock, irq_flags);
603 600
604 return NULL; 601 return NULL;
605} 602}
@@ -661,6 +658,7 @@ static int sh_dmae_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
661 unsigned long arg) 658 unsigned long arg)
662{ 659{
663 struct sh_dmae_chan *sh_chan = to_sh_chan(chan); 660 struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
661 unsigned long flags;
664 662
665 /* Only supports DMA_TERMINATE_ALL */ 663 /* Only supports DMA_TERMINATE_ALL */
666 if (cmd != DMA_TERMINATE_ALL) 664 if (cmd != DMA_TERMINATE_ALL)
@@ -669,7 +667,7 @@ static int sh_dmae_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
669 if (!chan) 667 if (!chan)
670 return -EINVAL; 668 return -EINVAL;
671 669
672 spin_lock_bh(&sh_chan->desc_lock); 670 spin_lock_irqsave(&sh_chan->desc_lock, flags);
673 dmae_halt(sh_chan); 671 dmae_halt(sh_chan);
674 672
675 if (!list_empty(&sh_chan->ld_queue)) { 673 if (!list_empty(&sh_chan->ld_queue)) {
@@ -680,7 +678,7 @@ static int sh_dmae_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
680 sh_chan->xmit_shift; 678 sh_chan->xmit_shift;
681 679
682 } 680 }
683 spin_unlock_bh(&sh_chan->desc_lock); 681 spin_unlock_irqrestore(&sh_chan->desc_lock, flags);
684 682
685 sh_dmae_chan_ld_cleanup(sh_chan, true); 683 sh_dmae_chan_ld_cleanup(sh_chan, true);
686 684
@@ -695,8 +693,9 @@ static dma_async_tx_callback __ld_cleanup(struct sh_dmae_chan *sh_chan, bool all
695 dma_cookie_t cookie = 0; 693 dma_cookie_t cookie = 0;
696 dma_async_tx_callback callback = NULL; 694 dma_async_tx_callback callback = NULL;
697 void *param = NULL; 695 void *param = NULL;
696 unsigned long flags;
698 697
699 spin_lock_bh(&sh_chan->desc_lock); 698 spin_lock_irqsave(&sh_chan->desc_lock, flags);
700 list_for_each_entry_safe(desc, _desc, &sh_chan->ld_queue, node) { 699 list_for_each_entry_safe(desc, _desc, &sh_chan->ld_queue, node) {
701 struct dma_async_tx_descriptor *tx = &desc->async_tx; 700 struct dma_async_tx_descriptor *tx = &desc->async_tx;
702 701
@@ -773,7 +772,7 @@ static dma_async_tx_callback __ld_cleanup(struct sh_dmae_chan *sh_chan, bool all
773 */ 772 */
774 sh_chan->completed_cookie = sh_chan->common.cookie; 773 sh_chan->completed_cookie = sh_chan->common.cookie;
775 774
776 spin_unlock_bh(&sh_chan->desc_lock); 775 spin_unlock_irqrestore(&sh_chan->desc_lock, flags);
777 776
778 if (callback) 777 if (callback)
779 callback(param); 778 callback(param);
@@ -796,10 +795,12 @@ static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan)
796{ 795{
797 struct sh_desc *desc; 796 struct sh_desc *desc;
798 797
799 spin_lock_bh(&sh_chan->desc_lock); 798 spin_lock_irq(&sh_chan->desc_lock);
800 /* DMA work check */ 799 /* DMA work check */
801 if (dmae_is_busy(sh_chan)) 800 if (dmae_is_busy(sh_chan)) {
802 goto sh_chan_xfer_ld_queue_end; 801 spin_unlock_irq(&sh_chan->desc_lock);
802 return;
803 }
803 804
804 /* Find the first not transferred descriptor */ 805 /* Find the first not transferred descriptor */
805 list_for_each_entry(desc, &sh_chan->ld_queue, node) 806 list_for_each_entry(desc, &sh_chan->ld_queue, node)
@@ -813,8 +814,7 @@ static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan)
813 break; 814 break;
814 } 815 }
815 816
816sh_chan_xfer_ld_queue_end: 817 spin_unlock_irq(&sh_chan->desc_lock);
817 spin_unlock_bh(&sh_chan->desc_lock);
818} 818}
819 819
820static void sh_dmae_memcpy_issue_pending(struct dma_chan *chan) 820static void sh_dmae_memcpy_issue_pending(struct dma_chan *chan)
@@ -831,6 +831,7 @@ static enum dma_status sh_dmae_tx_status(struct dma_chan *chan,
831 dma_cookie_t last_used; 831 dma_cookie_t last_used;
832 dma_cookie_t last_complete; 832 dma_cookie_t last_complete;
833 enum dma_status status; 833 enum dma_status status;
834 unsigned long flags;
834 835
835 sh_dmae_chan_ld_cleanup(sh_chan, false); 836 sh_dmae_chan_ld_cleanup(sh_chan, false);
836 837
@@ -841,7 +842,7 @@ static enum dma_status sh_dmae_tx_status(struct dma_chan *chan,
841 BUG_ON(last_complete < 0); 842 BUG_ON(last_complete < 0);
842 dma_set_tx_state(txstate, last_complete, last_used, 0); 843 dma_set_tx_state(txstate, last_complete, last_used, 0);
843 844
844 spin_lock_bh(&sh_chan->desc_lock); 845 spin_lock_irqsave(&sh_chan->desc_lock, flags);
845 846
846 status = dma_async_is_complete(cookie, last_complete, last_used); 847 status = dma_async_is_complete(cookie, last_complete, last_used);
847 848
@@ -859,7 +860,7 @@ static enum dma_status sh_dmae_tx_status(struct dma_chan *chan,
859 } 860 }
860 } 861 }
861 862
862 spin_unlock_bh(&sh_chan->desc_lock); 863 spin_unlock_irqrestore(&sh_chan->desc_lock, flags);
863 864
864 return status; 865 return status;
865} 866}
@@ -952,7 +953,7 @@ static void dmae_do_tasklet(unsigned long data)
952 u32 sar_buf = sh_dmae_readl(sh_chan, SAR); 953 u32 sar_buf = sh_dmae_readl(sh_chan, SAR);
953 u32 dar_buf = sh_dmae_readl(sh_chan, DAR); 954 u32 dar_buf = sh_dmae_readl(sh_chan, DAR);
954 955
955 spin_lock(&sh_chan->desc_lock); 956 spin_lock_irq(&sh_chan->desc_lock);
956 list_for_each_entry(desc, &sh_chan->ld_queue, node) { 957 list_for_each_entry(desc, &sh_chan->ld_queue, node) {
957 if (desc->mark == DESC_SUBMITTED && 958 if (desc->mark == DESC_SUBMITTED &&
958 ((desc->direction == DMA_FROM_DEVICE && 959 ((desc->direction == DMA_FROM_DEVICE &&
@@ -965,7 +966,7 @@ static void dmae_do_tasklet(unsigned long data)
965 break; 966 break;
966 } 967 }
967 } 968 }
968 spin_unlock(&sh_chan->desc_lock); 969 spin_unlock_irq(&sh_chan->desc_lock);
969 970
970 /* Next desc */ 971 /* Next desc */
971 sh_chan_xfer_ld_queue(sh_chan); 972 sh_chan_xfer_ld_queue(sh_chan);