diff options
author | Ludovic Desroches <ludovic.desroches@atmel.com> | 2015-06-08 04:33:14 -0400 |
---|---|---|
committer | Vinod Koul <vinod.koul@intel.com> | 2015-06-08 06:57:09 -0400 |
commit | 4c374fc7ce944024936a6d9804daec85207d9384 (patch) | |
tree | 75b88bef0cd9e323ac9c96259a3ad2445c15585a | |
parent | d4a4f75cd8f29cd9464a5a32e9224a91571d6649 (diff) |
dmaengine: at_xdmac: lock fixes
Using _bh variant for spin locks causes this kind of warning:
Starting logging: ------------[ cut here ]------------
WARNING: CPU: 0 PID: 3 at /ssd_drive/linux/kernel/softirq.c:151
__local_bh_enable_ip+0xe8/0xf4()
Modules linked in:
CPU: 0 PID: 3 Comm: ksoftirqd/0 Not tainted 4.1.0-rc2+ #94
Hardware name: Atmel SAMA5
[<c0013c04>] (unwind_backtrace) from [<c00118a4>] (show_stack+0x10/0x14)
[<c00118a4>] (show_stack) from [<c001bbcc>]
(warn_slowpath_common+0x80/0xac)
[<c001bbcc>] (warn_slowpath_common) from [<c001bc14>]
(warn_slowpath_null+0x1c/0x24)
[<c001bc14>] (warn_slowpath_null) from [<c001e28c>]
(__local_bh_enable_ip+0xe8/0xf4)
[<c001e28c>] (__local_bh_enable_ip) from [<c01fdbd0>]
(at_xdmac_device_terminate_all+0xf4/0x100)
[<c01fdbd0>] (at_xdmac_device_terminate_all) from [<c02221a4>]
(atmel_complete_tx_dma+0x34/0xf4)
[<c02221a4>] (atmel_complete_tx_dma) from [<c01fe4ac>]
(at_xdmac_tasklet+0x14c/0x1ac)
[<c01fe4ac>] (at_xdmac_tasklet) from [<c001de58>]
(tasklet_action+0x68/0xb4)
[<c001de58>] (tasklet_action) from [<c001dfdc>]
(__do_softirq+0xfc/0x238)
[<c001dfdc>] (__do_softirq) from [<c001e140>] (run_ksoftirqd+0x28/0x34)
[<c001e140>] (run_ksoftirqd) from [<c0033a3c>]
(smpboot_thread_fn+0x138/0x18c)
[<c0033a3c>] (smpboot_thread_fn) from [<c0030e7c>] (kthread+0xdc/0xf0)
[<c0030e7c>] (kthread) from [<c000f480>] (ret_from_fork+0x14/0x34)
---[ end trace b57b14a99c1d8812 ]---
It comes from the fact that devices can called some code from the DMA
controller with irq disabled. _bh variant is not intended to be used in
this case since it can enable irqs. Switch to irqsave/irqrestore variant to
avoid this situation.
Signed-off-by: Ludovic Desroches <ludovic.desroches@atmel.com>
Cc: stable@vger.kernel.org # 4.0 and later
Signed-off-by: Vinod Koul <vinod.koul@intel.com>
-rw-r--r-- | drivers/dma/at_xdmac.c | 75 |
1 files changed, 43 insertions, 32 deletions
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c index 933e4b338459..0dcc9a7a90af 100644 --- a/drivers/dma/at_xdmac.c +++ b/drivers/dma/at_xdmac.c | |||
@@ -415,8 +415,9 @@ static dma_cookie_t at_xdmac_tx_submit(struct dma_async_tx_descriptor *tx) | |||
415 | struct at_xdmac_desc *desc = txd_to_at_desc(tx); | 415 | struct at_xdmac_desc *desc = txd_to_at_desc(tx); |
416 | struct at_xdmac_chan *atchan = to_at_xdmac_chan(tx->chan); | 416 | struct at_xdmac_chan *atchan = to_at_xdmac_chan(tx->chan); |
417 | dma_cookie_t cookie; | 417 | dma_cookie_t cookie; |
418 | unsigned long irqflags; | ||
418 | 419 | ||
419 | spin_lock_bh(&atchan->lock); | 420 | spin_lock_irqsave(&atchan->lock, irqflags); |
420 | cookie = dma_cookie_assign(tx); | 421 | cookie = dma_cookie_assign(tx); |
421 | 422 | ||
422 | dev_vdbg(chan2dev(tx->chan), "%s: atchan 0x%p, add desc 0x%p to xfers_list\n", | 423 | dev_vdbg(chan2dev(tx->chan), "%s: atchan 0x%p, add desc 0x%p to xfers_list\n", |
@@ -425,7 +426,7 @@ static dma_cookie_t at_xdmac_tx_submit(struct dma_async_tx_descriptor *tx) | |||
425 | if (list_is_singular(&atchan->xfers_list)) | 426 | if (list_is_singular(&atchan->xfers_list)) |
426 | at_xdmac_start_xfer(atchan, desc); | 427 | at_xdmac_start_xfer(atchan, desc); |
427 | 428 | ||
428 | spin_unlock_bh(&atchan->lock); | 429 | spin_unlock_irqrestore(&atchan->lock, irqflags); |
429 | return cookie; | 430 | return cookie; |
430 | } | 431 | } |
431 | 432 | ||
@@ -563,6 +564,8 @@ at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
563 | struct scatterlist *sg; | 564 | struct scatterlist *sg; |
564 | int i; | 565 | int i; |
565 | unsigned int xfer_size = 0; | 566 | unsigned int xfer_size = 0; |
567 | unsigned long irqflags; | ||
568 | struct dma_async_tx_descriptor *ret = NULL; | ||
566 | 569 | ||
567 | if (!sgl) | 570 | if (!sgl) |
568 | return NULL; | 571 | return NULL; |
@@ -578,7 +581,7 @@ at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
578 | flags); | 581 | flags); |
579 | 582 | ||
580 | /* Protect dma_sconfig field that can be modified by set_slave_conf. */ | 583 | /* Protect dma_sconfig field that can be modified by set_slave_conf. */ |
581 | spin_lock_bh(&atchan->lock); | 584 | spin_lock_irqsave(&atchan->lock, irqflags); |
582 | 585 | ||
583 | /* Prepare descriptors. */ | 586 | /* Prepare descriptors. */ |
584 | for_each_sg(sgl, sg, sg_len, i) { | 587 | for_each_sg(sgl, sg, sg_len, i) { |
@@ -589,8 +592,7 @@ at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
589 | mem = sg_dma_address(sg); | 592 | mem = sg_dma_address(sg); |
590 | if (unlikely(!len)) { | 593 | if (unlikely(!len)) { |
591 | dev_err(chan2dev(chan), "sg data length is zero\n"); | 594 | dev_err(chan2dev(chan), "sg data length is zero\n"); |
592 | spin_unlock_bh(&atchan->lock); | 595 | goto spin_unlock; |
593 | return NULL; | ||
594 | } | 596 | } |
595 | dev_dbg(chan2dev(chan), "%s: * sg%d len=%u, mem=0x%08x\n", | 597 | dev_dbg(chan2dev(chan), "%s: * sg%d len=%u, mem=0x%08x\n", |
596 | __func__, i, len, mem); | 598 | __func__, i, len, mem); |
@@ -600,8 +602,7 @@ at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
600 | dev_err(chan2dev(chan), "can't get descriptor\n"); | 602 | dev_err(chan2dev(chan), "can't get descriptor\n"); |
601 | if (first) | 603 | if (first) |
602 | list_splice_init(&first->descs_list, &atchan->free_descs_list); | 604 | list_splice_init(&first->descs_list, &atchan->free_descs_list); |
603 | spin_unlock_bh(&atchan->lock); | 605 | goto spin_unlock; |
604 | return NULL; | ||
605 | } | 606 | } |
606 | 607 | ||
607 | /* Linked list descriptor setup. */ | 608 | /* Linked list descriptor setup. */ |
@@ -645,13 +646,15 @@ at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
645 | xfer_size += len; | 646 | xfer_size += len; |
646 | } | 647 | } |
647 | 648 | ||
648 | spin_unlock_bh(&atchan->lock); | ||
649 | 649 | ||
650 | first->tx_dma_desc.flags = flags; | 650 | first->tx_dma_desc.flags = flags; |
651 | first->xfer_size = xfer_size; | 651 | first->xfer_size = xfer_size; |
652 | first->direction = direction; | 652 | first->direction = direction; |
653 | ret = &first->tx_dma_desc; | ||
653 | 654 | ||
654 | return &first->tx_dma_desc; | 655 | spin_unlock: |
656 | spin_unlock_irqrestore(&atchan->lock, irqflags); | ||
657 | return ret; | ||
655 | } | 658 | } |
656 | 659 | ||
657 | static struct dma_async_tx_descriptor * | 660 | static struct dma_async_tx_descriptor * |
@@ -664,6 +667,7 @@ at_xdmac_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, | |||
664 | struct at_xdmac_desc *first = NULL, *prev = NULL; | 667 | struct at_xdmac_desc *first = NULL, *prev = NULL; |
665 | unsigned int periods = buf_len / period_len; | 668 | unsigned int periods = buf_len / period_len; |
666 | int i; | 669 | int i; |
670 | unsigned long irqflags; | ||
667 | 671 | ||
668 | dev_dbg(chan2dev(chan), "%s: buf_addr=%pad, buf_len=%zd, period_len=%zd, dir=%s, flags=0x%lx\n", | 672 | dev_dbg(chan2dev(chan), "%s: buf_addr=%pad, buf_len=%zd, period_len=%zd, dir=%s, flags=0x%lx\n", |
669 | __func__, &buf_addr, buf_len, period_len, | 673 | __func__, &buf_addr, buf_len, period_len, |
@@ -682,16 +686,16 @@ at_xdmac_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, | |||
682 | for (i = 0; i < periods; i++) { | 686 | for (i = 0; i < periods; i++) { |
683 | struct at_xdmac_desc *desc = NULL; | 687 | struct at_xdmac_desc *desc = NULL; |
684 | 688 | ||
685 | spin_lock_bh(&atchan->lock); | 689 | spin_lock_irqsave(&atchan->lock, irqflags); |
686 | desc = at_xdmac_get_desc(atchan); | 690 | desc = at_xdmac_get_desc(atchan); |
687 | if (!desc) { | 691 | if (!desc) { |
688 | dev_err(chan2dev(chan), "can't get descriptor\n"); | 692 | dev_err(chan2dev(chan), "can't get descriptor\n"); |
689 | if (first) | 693 | if (first) |
690 | list_splice_init(&first->descs_list, &atchan->free_descs_list); | 694 | list_splice_init(&first->descs_list, &atchan->free_descs_list); |
691 | spin_unlock_bh(&atchan->lock); | 695 | spin_unlock_irqrestore(&atchan->lock, irqflags); |
692 | return NULL; | 696 | return NULL; |
693 | } | 697 | } |
694 | spin_unlock_bh(&atchan->lock); | 698 | spin_unlock_irqrestore(&atchan->lock, irqflags); |
695 | dev_dbg(chan2dev(chan), | 699 | dev_dbg(chan2dev(chan), |
696 | "%s: desc=0x%p, tx_dma_desc.phys=%pad\n", | 700 | "%s: desc=0x%p, tx_dma_desc.phys=%pad\n", |
697 | __func__, desc, &desc->tx_dma_desc.phys); | 701 | __func__, desc, &desc->tx_dma_desc.phys); |
@@ -766,6 +770,7 @@ at_xdmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, | |||
766 | | AT_XDMAC_CC_SIF(0) | 770 | | AT_XDMAC_CC_SIF(0) |
767 | | AT_XDMAC_CC_MBSIZE_SIXTEEN | 771 | | AT_XDMAC_CC_MBSIZE_SIXTEEN |
768 | | AT_XDMAC_CC_TYPE_MEM_TRAN; | 772 | | AT_XDMAC_CC_TYPE_MEM_TRAN; |
773 | unsigned long irqflags; | ||
769 | 774 | ||
770 | dev_dbg(chan2dev(chan), "%s: src=%pad, dest=%pad, len=%zd, flags=0x%lx\n", | 775 | dev_dbg(chan2dev(chan), "%s: src=%pad, dest=%pad, len=%zd, flags=0x%lx\n", |
771 | __func__, &src, &dest, len, flags); | 776 | __func__, &src, &dest, len, flags); |
@@ -798,9 +803,9 @@ at_xdmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, | |||
798 | 803 | ||
799 | dev_dbg(chan2dev(chan), "%s: remaining_size=%zu\n", __func__, remaining_size); | 804 | dev_dbg(chan2dev(chan), "%s: remaining_size=%zu\n", __func__, remaining_size); |
800 | 805 | ||
801 | spin_lock_bh(&atchan->lock); | 806 | spin_lock_irqsave(&atchan->lock, irqflags); |
802 | desc = at_xdmac_get_desc(atchan); | 807 | desc = at_xdmac_get_desc(atchan); |
803 | spin_unlock_bh(&atchan->lock); | 808 | spin_unlock_irqrestore(&atchan->lock, irqflags); |
804 | if (!desc) { | 809 | if (!desc) { |
805 | dev_err(chan2dev(chan), "can't get descriptor\n"); | 810 | dev_err(chan2dev(chan), "can't get descriptor\n"); |
806 | if (first) | 811 | if (first) |
@@ -886,6 +891,7 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie, | |||
886 | int residue; | 891 | int residue; |
887 | u32 cur_nda, mask, value; | 892 | u32 cur_nda, mask, value; |
888 | u8 dwidth = 0; | 893 | u8 dwidth = 0; |
894 | unsigned long flags; | ||
889 | 895 | ||
890 | ret = dma_cookie_status(chan, cookie, txstate); | 896 | ret = dma_cookie_status(chan, cookie, txstate); |
891 | if (ret == DMA_COMPLETE) | 897 | if (ret == DMA_COMPLETE) |
@@ -894,7 +900,7 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie, | |||
894 | if (!txstate) | 900 | if (!txstate) |
895 | return ret; | 901 | return ret; |
896 | 902 | ||
897 | spin_lock_bh(&atchan->lock); | 903 | spin_lock_irqsave(&atchan->lock, flags); |
898 | 904 | ||
899 | desc = list_first_entry(&atchan->xfers_list, struct at_xdmac_desc, xfer_node); | 905 | desc = list_first_entry(&atchan->xfers_list, struct at_xdmac_desc, xfer_node); |
900 | 906 | ||
@@ -904,8 +910,7 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie, | |||
904 | */ | 910 | */ |
905 | if (!desc->active_xfer) { | 911 | if (!desc->active_xfer) { |
906 | dma_set_residue(txstate, desc->xfer_size); | 912 | dma_set_residue(txstate, desc->xfer_size); |
907 | spin_unlock_bh(&atchan->lock); | 913 | goto spin_unlock; |
908 | return ret; | ||
909 | } | 914 | } |
910 | 915 | ||
911 | residue = desc->xfer_size; | 916 | residue = desc->xfer_size; |
@@ -936,14 +941,14 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie, | |||
936 | } | 941 | } |
937 | residue += at_xdmac_chan_read(atchan, AT_XDMAC_CUBC) << dwidth; | 942 | residue += at_xdmac_chan_read(atchan, AT_XDMAC_CUBC) << dwidth; |
938 | 943 | ||
939 | spin_unlock_bh(&atchan->lock); | ||
940 | |||
941 | dma_set_residue(txstate, residue); | 944 | dma_set_residue(txstate, residue); |
942 | 945 | ||
943 | dev_dbg(chan2dev(chan), | 946 | dev_dbg(chan2dev(chan), |
944 | "%s: desc=0x%p, tx_dma_desc.phys=%pad, tx_status=%d, cookie=%d, residue=%d\n", | 947 | "%s: desc=0x%p, tx_dma_desc.phys=%pad, tx_status=%d, cookie=%d, residue=%d\n", |
945 | __func__, desc, &desc->tx_dma_desc.phys, ret, cookie, residue); | 948 | __func__, desc, &desc->tx_dma_desc.phys, ret, cookie, residue); |
946 | 949 | ||
950 | spin_unlock: | ||
951 | spin_unlock_irqrestore(&atchan->lock, flags); | ||
947 | return ret; | 952 | return ret; |
948 | } | 953 | } |
949 | 954 | ||
@@ -964,8 +969,9 @@ static void at_xdmac_remove_xfer(struct at_xdmac_chan *atchan, | |||
964 | static void at_xdmac_advance_work(struct at_xdmac_chan *atchan) | 969 | static void at_xdmac_advance_work(struct at_xdmac_chan *atchan) |
965 | { | 970 | { |
966 | struct at_xdmac_desc *desc; | 971 | struct at_xdmac_desc *desc; |
972 | unsigned long flags; | ||
967 | 973 | ||
968 | spin_lock_bh(&atchan->lock); | 974 | spin_lock_irqsave(&atchan->lock, flags); |
969 | 975 | ||
970 | /* | 976 | /* |
971 | * If channel is enabled, do nothing, advance_work will be triggered | 977 | * If channel is enabled, do nothing, advance_work will be triggered |
@@ -980,7 +986,7 @@ static void at_xdmac_advance_work(struct at_xdmac_chan *atchan) | |||
980 | at_xdmac_start_xfer(atchan, desc); | 986 | at_xdmac_start_xfer(atchan, desc); |
981 | } | 987 | } |
982 | 988 | ||
983 | spin_unlock_bh(&atchan->lock); | 989 | spin_unlock_irqrestore(&atchan->lock, flags); |
984 | } | 990 | } |
985 | 991 | ||
986 | static void at_xdmac_handle_cyclic(struct at_xdmac_chan *atchan) | 992 | static void at_xdmac_handle_cyclic(struct at_xdmac_chan *atchan) |
@@ -1116,12 +1122,13 @@ static int at_xdmac_device_config(struct dma_chan *chan, | |||
1116 | { | 1122 | { |
1117 | struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); | 1123 | struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); |
1118 | int ret; | 1124 | int ret; |
1125 | unsigned long flags; | ||
1119 | 1126 | ||
1120 | dev_dbg(chan2dev(chan), "%s\n", __func__); | 1127 | dev_dbg(chan2dev(chan), "%s\n", __func__); |
1121 | 1128 | ||
1122 | spin_lock_bh(&atchan->lock); | 1129 | spin_lock_irqsave(&atchan->lock, flags); |
1123 | ret = at_xdmac_set_slave_config(chan, config); | 1130 | ret = at_xdmac_set_slave_config(chan, config); |
1124 | spin_unlock_bh(&atchan->lock); | 1131 | spin_unlock_irqrestore(&atchan->lock, flags); |
1125 | 1132 | ||
1126 | return ret; | 1133 | return ret; |
1127 | } | 1134 | } |
@@ -1130,18 +1137,19 @@ static int at_xdmac_device_pause(struct dma_chan *chan) | |||
1130 | { | 1137 | { |
1131 | struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); | 1138 | struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); |
1132 | struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device); | 1139 | struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device); |
1140 | unsigned long flags; | ||
1133 | 1141 | ||
1134 | dev_dbg(chan2dev(chan), "%s\n", __func__); | 1142 | dev_dbg(chan2dev(chan), "%s\n", __func__); |
1135 | 1143 | ||
1136 | if (test_and_set_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status)) | 1144 | if (test_and_set_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status)) |
1137 | return 0; | 1145 | return 0; |
1138 | 1146 | ||
1139 | spin_lock_bh(&atchan->lock); | 1147 | spin_lock_irqsave(&atchan->lock, flags); |
1140 | at_xdmac_write(atxdmac, AT_XDMAC_GRWS, atchan->mask); | 1148 | at_xdmac_write(atxdmac, AT_XDMAC_GRWS, atchan->mask); |
1141 | while (at_xdmac_chan_read(atchan, AT_XDMAC_CC) | 1149 | while (at_xdmac_chan_read(atchan, AT_XDMAC_CC) |
1142 | & (AT_XDMAC_CC_WRIP | AT_XDMAC_CC_RDIP)) | 1150 | & (AT_XDMAC_CC_WRIP | AT_XDMAC_CC_RDIP)) |
1143 | cpu_relax(); | 1151 | cpu_relax(); |
1144 | spin_unlock_bh(&atchan->lock); | 1152 | spin_unlock_irqrestore(&atchan->lock, flags); |
1145 | 1153 | ||
1146 | return 0; | 1154 | return 0; |
1147 | } | 1155 | } |
@@ -1150,18 +1158,19 @@ static int at_xdmac_device_resume(struct dma_chan *chan) | |||
1150 | { | 1158 | { |
1151 | struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); | 1159 | struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); |
1152 | struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device); | 1160 | struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device); |
1161 | unsigned long flags; | ||
1153 | 1162 | ||
1154 | dev_dbg(chan2dev(chan), "%s\n", __func__); | 1163 | dev_dbg(chan2dev(chan), "%s\n", __func__); |
1155 | 1164 | ||
1156 | spin_lock_bh(&atchan->lock); | 1165 | spin_lock_irqsave(&atchan->lock, flags); |
1157 | if (!at_xdmac_chan_is_paused(atchan)) { | 1166 | if (!at_xdmac_chan_is_paused(atchan)) { |
1158 | spin_unlock_bh(&atchan->lock); | 1167 | spin_unlock_irqrestore(&atchan->lock, flags); |
1159 | return 0; | 1168 | return 0; |
1160 | } | 1169 | } |
1161 | 1170 | ||
1162 | at_xdmac_write(atxdmac, AT_XDMAC_GRWR, atchan->mask); | 1171 | at_xdmac_write(atxdmac, AT_XDMAC_GRWR, atchan->mask); |
1163 | clear_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status); | 1172 | clear_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status); |
1164 | spin_unlock_bh(&atchan->lock); | 1173 | spin_unlock_irqrestore(&atchan->lock, flags); |
1165 | 1174 | ||
1166 | return 0; | 1175 | return 0; |
1167 | } | 1176 | } |
@@ -1171,10 +1180,11 @@ static int at_xdmac_device_terminate_all(struct dma_chan *chan) | |||
1171 | struct at_xdmac_desc *desc, *_desc; | 1180 | struct at_xdmac_desc *desc, *_desc; |
1172 | struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); | 1181 | struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); |
1173 | struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device); | 1182 | struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device); |
1183 | unsigned long flags; | ||
1174 | 1184 | ||
1175 | dev_dbg(chan2dev(chan), "%s\n", __func__); | 1185 | dev_dbg(chan2dev(chan), "%s\n", __func__); |
1176 | 1186 | ||
1177 | spin_lock_bh(&atchan->lock); | 1187 | spin_lock_irqsave(&atchan->lock, flags); |
1178 | at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask); | 1188 | at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask); |
1179 | while (at_xdmac_read(atxdmac, AT_XDMAC_GS) & atchan->mask) | 1189 | while (at_xdmac_read(atxdmac, AT_XDMAC_GS) & atchan->mask) |
1180 | cpu_relax(); | 1190 | cpu_relax(); |
@@ -1184,7 +1194,7 @@ static int at_xdmac_device_terminate_all(struct dma_chan *chan) | |||
1184 | at_xdmac_remove_xfer(atchan, desc); | 1194 | at_xdmac_remove_xfer(atchan, desc); |
1185 | 1195 | ||
1186 | clear_bit(AT_XDMAC_CHAN_IS_CYCLIC, &atchan->status); | 1196 | clear_bit(AT_XDMAC_CHAN_IS_CYCLIC, &atchan->status); |
1187 | spin_unlock_bh(&atchan->lock); | 1197 | spin_unlock_irqrestore(&atchan->lock, flags); |
1188 | 1198 | ||
1189 | return 0; | 1199 | return 0; |
1190 | } | 1200 | } |
@@ -1194,8 +1204,9 @@ static int at_xdmac_alloc_chan_resources(struct dma_chan *chan) | |||
1194 | struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); | 1204 | struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); |
1195 | struct at_xdmac_desc *desc; | 1205 | struct at_xdmac_desc *desc; |
1196 | int i; | 1206 | int i; |
1207 | unsigned long flags; | ||
1197 | 1208 | ||
1198 | spin_lock_bh(&atchan->lock); | 1209 | spin_lock_irqsave(&atchan->lock, flags); |
1199 | 1210 | ||
1200 | if (at_xdmac_chan_is_enabled(atchan)) { | 1211 | if (at_xdmac_chan_is_enabled(atchan)) { |
1201 | dev_err(chan2dev(chan), | 1212 | dev_err(chan2dev(chan), |
@@ -1226,7 +1237,7 @@ static int at_xdmac_alloc_chan_resources(struct dma_chan *chan) | |||
1226 | dev_dbg(chan2dev(chan), "%s: allocated %d descriptors\n", __func__, i); | 1237 | dev_dbg(chan2dev(chan), "%s: allocated %d descriptors\n", __func__, i); |
1227 | 1238 | ||
1228 | spin_unlock: | 1239 | spin_unlock: |
1229 | spin_unlock_bh(&atchan->lock); | 1240 | spin_unlock_irqrestore(&atchan->lock, flags); |
1230 | return i; | 1241 | return i; |
1231 | } | 1242 | } |
1232 | 1243 | ||