diff options
author | Nicolas Ferre <nicolas.ferre@atmel.com> | 2011-07-27 08:21:28 -0400 |
---|---|---|
committer | Vinod Koul <vinod.koul@intel.com> | 2011-08-19 09:09:48 -0400 |
commit | d8cb04b070c2a55f7201714d231cff4f8f9fbd16 (patch) | |
tree | cf5e6ee10c1e05ef0c61cc3bd2fa2538dfba2756 /drivers/dma/at_hdmac.c | |
parent | ef298c21c0d9c06ed89ea2fa724c3a018acfff39 (diff) |
dmaengine: at_hdmac: replace spin_lock* with irqsave variants
dmaengine routines can be called from interrupt context and with
interrupts disabled. Whereas spin_unlock_bh can't be called from
such contexts. So this patch converts all spin_lock* routines
to irqsave variants.
Also, spin_lock() used in tasklet is converted to irqsave variants,
as tasklet can be interrupted, and dma requests from such interruptions
may also call spin_lock.
Idea from dw_dmac patch by Viresh Kumar.
Signed-off-by: Nicolas Ferre <nicolas.ferre@atmel.com>
Signed-off-by: Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
Signed-off-by: Vinod Koul <vinod.koul@intel.com>
Diffstat (limited to 'drivers/dma/at_hdmac.c')
-rw-r--r-- | drivers/dma/at_hdmac.c | 52 |
1 files changed, 30 insertions, 22 deletions
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c index 6a483eac7b3f..fd87b9690e1b 100644 --- a/drivers/dma/at_hdmac.c +++ b/drivers/dma/at_hdmac.c | |||
@@ -107,10 +107,11 @@ static struct at_desc *atc_desc_get(struct at_dma_chan *atchan) | |||
107 | { | 107 | { |
108 | struct at_desc *desc, *_desc; | 108 | struct at_desc *desc, *_desc; |
109 | struct at_desc *ret = NULL; | 109 | struct at_desc *ret = NULL; |
110 | unsigned long flags; | ||
110 | unsigned int i = 0; | 111 | unsigned int i = 0; |
111 | LIST_HEAD(tmp_list); | 112 | LIST_HEAD(tmp_list); |
112 | 113 | ||
113 | spin_lock_bh(&atchan->lock); | 114 | spin_lock_irqsave(&atchan->lock, flags); |
114 | list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) { | 115 | list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) { |
115 | i++; | 116 | i++; |
116 | if (async_tx_test_ack(&desc->txd)) { | 117 | if (async_tx_test_ack(&desc->txd)) { |
@@ -121,7 +122,7 @@ static struct at_desc *atc_desc_get(struct at_dma_chan *atchan) | |||
121 | dev_dbg(chan2dev(&atchan->chan_common), | 122 | dev_dbg(chan2dev(&atchan->chan_common), |
122 | "desc %p not ACKed\n", desc); | 123 | "desc %p not ACKed\n", desc); |
123 | } | 124 | } |
124 | spin_unlock_bh(&atchan->lock); | 125 | spin_unlock_irqrestore(&atchan->lock, flags); |
125 | dev_vdbg(chan2dev(&atchan->chan_common), | 126 | dev_vdbg(chan2dev(&atchan->chan_common), |
126 | "scanned %u descriptors on freelist\n", i); | 127 | "scanned %u descriptors on freelist\n", i); |
127 | 128 | ||
@@ -129,9 +130,9 @@ static struct at_desc *atc_desc_get(struct at_dma_chan *atchan) | |||
129 | if (!ret) { | 130 | if (!ret) { |
130 | ret = atc_alloc_descriptor(&atchan->chan_common, GFP_ATOMIC); | 131 | ret = atc_alloc_descriptor(&atchan->chan_common, GFP_ATOMIC); |
131 | if (ret) { | 132 | if (ret) { |
132 | spin_lock_bh(&atchan->lock); | 133 | spin_lock_irqsave(&atchan->lock, flags); |
133 | atchan->descs_allocated++; | 134 | atchan->descs_allocated++; |
134 | spin_unlock_bh(&atchan->lock); | 135 | spin_unlock_irqrestore(&atchan->lock, flags); |
135 | } else { | 136 | } else { |
136 | dev_err(chan2dev(&atchan->chan_common), | 137 | dev_err(chan2dev(&atchan->chan_common), |
137 | "not enough descriptors available\n"); | 138 | "not enough descriptors available\n"); |
@@ -150,8 +151,9 @@ static void atc_desc_put(struct at_dma_chan *atchan, struct at_desc *desc) | |||
150 | { | 151 | { |
151 | if (desc) { | 152 | if (desc) { |
152 | struct at_desc *child; | 153 | struct at_desc *child; |
154 | unsigned long flags; | ||
153 | 155 | ||
154 | spin_lock_bh(&atchan->lock); | 156 | spin_lock_irqsave(&atchan->lock, flags); |
155 | list_for_each_entry(child, &desc->tx_list, desc_node) | 157 | list_for_each_entry(child, &desc->tx_list, desc_node) |
156 | dev_vdbg(chan2dev(&atchan->chan_common), | 158 | dev_vdbg(chan2dev(&atchan->chan_common), |
157 | "moving child desc %p to freelist\n", | 159 | "moving child desc %p to freelist\n", |
@@ -160,7 +162,7 @@ static void atc_desc_put(struct at_dma_chan *atchan, struct at_desc *desc) | |||
160 | dev_vdbg(chan2dev(&atchan->chan_common), | 162 | dev_vdbg(chan2dev(&atchan->chan_common), |
161 | "moving desc %p to freelist\n", desc); | 163 | "moving desc %p to freelist\n", desc); |
162 | list_add(&desc->desc_node, &atchan->free_list); | 164 | list_add(&desc->desc_node, &atchan->free_list); |
163 | spin_unlock_bh(&atchan->lock); | 165 | spin_unlock_irqrestore(&atchan->lock, flags); |
164 | } | 166 | } |
165 | } | 167 | } |
166 | 168 | ||
@@ -471,8 +473,9 @@ static void atc_handle_cyclic(struct at_dma_chan *atchan) | |||
471 | static void atc_tasklet(unsigned long data) | 473 | static void atc_tasklet(unsigned long data) |
472 | { | 474 | { |
473 | struct at_dma_chan *atchan = (struct at_dma_chan *)data; | 475 | struct at_dma_chan *atchan = (struct at_dma_chan *)data; |
476 | unsigned long flags; | ||
474 | 477 | ||
475 | spin_lock(&atchan->lock); | 478 | spin_lock_irqsave(&atchan->lock, flags); |
476 | if (test_and_clear_bit(ATC_IS_ERROR, &atchan->status)) | 479 | if (test_and_clear_bit(ATC_IS_ERROR, &atchan->status)) |
477 | atc_handle_error(atchan); | 480 | atc_handle_error(atchan); |
478 | else if (test_bit(ATC_IS_CYCLIC, &atchan->status)) | 481 | else if (test_bit(ATC_IS_CYCLIC, &atchan->status)) |
@@ -480,7 +483,7 @@ static void atc_tasklet(unsigned long data) | |||
480 | else | 483 | else |
481 | atc_advance_work(atchan); | 484 | atc_advance_work(atchan); |
482 | 485 | ||
483 | spin_unlock(&atchan->lock); | 486 | spin_unlock_irqrestore(&atchan->lock, flags); |
484 | } | 487 | } |
485 | 488 | ||
486 | static irqreturn_t at_dma_interrupt(int irq, void *dev_id) | 489 | static irqreturn_t at_dma_interrupt(int irq, void *dev_id) |
@@ -539,8 +542,9 @@ static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx) | |||
539 | struct at_desc *desc = txd_to_at_desc(tx); | 542 | struct at_desc *desc = txd_to_at_desc(tx); |
540 | struct at_dma_chan *atchan = to_at_dma_chan(tx->chan); | 543 | struct at_dma_chan *atchan = to_at_dma_chan(tx->chan); |
541 | dma_cookie_t cookie; | 544 | dma_cookie_t cookie; |
545 | unsigned long flags; | ||
542 | 546 | ||
543 | spin_lock_bh(&atchan->lock); | 547 | spin_lock_irqsave(&atchan->lock, flags); |
544 | cookie = atc_assign_cookie(atchan, desc); | 548 | cookie = atc_assign_cookie(atchan, desc); |
545 | 549 | ||
546 | if (list_empty(&atchan->active_list)) { | 550 | if (list_empty(&atchan->active_list)) { |
@@ -554,7 +558,7 @@ static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx) | |||
554 | list_add_tail(&desc->desc_node, &atchan->queue); | 558 | list_add_tail(&desc->desc_node, &atchan->queue); |
555 | } | 559 | } |
556 | 560 | ||
557 | spin_unlock_bh(&atchan->lock); | 561 | spin_unlock_irqrestore(&atchan->lock, flags); |
558 | 562 | ||
559 | return cookie; | 563 | return cookie; |
560 | } | 564 | } |
@@ -927,28 +931,29 @@ static int atc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | |||
927 | struct at_dma_chan *atchan = to_at_dma_chan(chan); | 931 | struct at_dma_chan *atchan = to_at_dma_chan(chan); |
928 | struct at_dma *atdma = to_at_dma(chan->device); | 932 | struct at_dma *atdma = to_at_dma(chan->device); |
929 | int chan_id = atchan->chan_common.chan_id; | 933 | int chan_id = atchan->chan_common.chan_id; |
934 | unsigned long flags; | ||
930 | 935 | ||
931 | LIST_HEAD(list); | 936 | LIST_HEAD(list); |
932 | 937 | ||
933 | dev_vdbg(chan2dev(chan), "atc_control (%d)\n", cmd); | 938 | dev_vdbg(chan2dev(chan), "atc_control (%d)\n", cmd); |
934 | 939 | ||
935 | if (cmd == DMA_PAUSE) { | 940 | if (cmd == DMA_PAUSE) { |
936 | spin_lock_bh(&atchan->lock); | 941 | spin_lock_irqsave(&atchan->lock, flags); |
937 | 942 | ||
938 | dma_writel(atdma, CHER, AT_DMA_SUSP(chan_id)); | 943 | dma_writel(atdma, CHER, AT_DMA_SUSP(chan_id)); |
939 | set_bit(ATC_IS_PAUSED, &atchan->status); | 944 | set_bit(ATC_IS_PAUSED, &atchan->status); |
940 | 945 | ||
941 | spin_unlock_bh(&atchan->lock); | 946 | spin_unlock_irqrestore(&atchan->lock, flags); |
942 | } else if (cmd == DMA_RESUME) { | 947 | } else if (cmd == DMA_RESUME) { |
943 | if (!test_bit(ATC_IS_PAUSED, &atchan->status)) | 948 | if (!test_bit(ATC_IS_PAUSED, &atchan->status)) |
944 | return 0; | 949 | return 0; |
945 | 950 | ||
946 | spin_lock_bh(&atchan->lock); | 951 | spin_lock_irqsave(&atchan->lock, flags); |
947 | 952 | ||
948 | dma_writel(atdma, CHDR, AT_DMA_RES(chan_id)); | 953 | dma_writel(atdma, CHDR, AT_DMA_RES(chan_id)); |
949 | clear_bit(ATC_IS_PAUSED, &atchan->status); | 954 | clear_bit(ATC_IS_PAUSED, &atchan->status); |
950 | 955 | ||
951 | spin_unlock_bh(&atchan->lock); | 956 | spin_unlock_irqrestore(&atchan->lock, flags); |
952 | } else if (cmd == DMA_TERMINATE_ALL) { | 957 | } else if (cmd == DMA_TERMINATE_ALL) { |
953 | struct at_desc *desc, *_desc; | 958 | struct at_desc *desc, *_desc; |
954 | /* | 959 | /* |
@@ -957,7 +962,7 @@ static int atc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | |||
957 | * channel. We still have to poll the channel enable bit due | 962 | * channel. We still have to poll the channel enable bit due |
958 | * to AHB/HSB limitations. | 963 | * to AHB/HSB limitations. |
959 | */ | 964 | */ |
960 | spin_lock_bh(&atchan->lock); | 965 | spin_lock_irqsave(&atchan->lock, flags); |
961 | 966 | ||
962 | /* disabling channel: must also remove suspend state */ | 967 | /* disabling channel: must also remove suspend state */ |
963 | dma_writel(atdma, CHDR, AT_DMA_RES(chan_id) | atchan->mask); | 968 | dma_writel(atdma, CHDR, AT_DMA_RES(chan_id) | atchan->mask); |
@@ -978,7 +983,7 @@ static int atc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | |||
978 | /* if channel dedicated to cyclic operations, free it */ | 983 | /* if channel dedicated to cyclic operations, free it */ |
979 | clear_bit(ATC_IS_CYCLIC, &atchan->status); | 984 | clear_bit(ATC_IS_CYCLIC, &atchan->status); |
980 | 985 | ||
981 | spin_unlock_bh(&atchan->lock); | 986 | spin_unlock_irqrestore(&atchan->lock, flags); |
982 | } else { | 987 | } else { |
983 | return -ENXIO; | 988 | return -ENXIO; |
984 | } | 989 | } |
@@ -1004,9 +1009,10 @@ atc_tx_status(struct dma_chan *chan, | |||
1004 | struct at_dma_chan *atchan = to_at_dma_chan(chan); | 1009 | struct at_dma_chan *atchan = to_at_dma_chan(chan); |
1005 | dma_cookie_t last_used; | 1010 | dma_cookie_t last_used; |
1006 | dma_cookie_t last_complete; | 1011 | dma_cookie_t last_complete; |
1012 | unsigned long flags; | ||
1007 | enum dma_status ret; | 1013 | enum dma_status ret; |
1008 | 1014 | ||
1009 | spin_lock_bh(&atchan->lock); | 1015 | spin_lock_irqsave(&atchan->lock, flags); |
1010 | 1016 | ||
1011 | last_complete = atchan->completed_cookie; | 1017 | last_complete = atchan->completed_cookie; |
1012 | last_used = chan->cookie; | 1018 | last_used = chan->cookie; |
@@ -1021,7 +1027,7 @@ atc_tx_status(struct dma_chan *chan, | |||
1021 | ret = dma_async_is_complete(cookie, last_complete, last_used); | 1027 | ret = dma_async_is_complete(cookie, last_complete, last_used); |
1022 | } | 1028 | } |
1023 | 1029 | ||
1024 | spin_unlock_bh(&atchan->lock); | 1030 | spin_unlock_irqrestore(&atchan->lock, flags); |
1025 | 1031 | ||
1026 | if (ret != DMA_SUCCESS) | 1032 | if (ret != DMA_SUCCESS) |
1027 | dma_set_tx_state(txstate, last_complete, last_used, | 1033 | dma_set_tx_state(txstate, last_complete, last_used, |
@@ -1046,6 +1052,7 @@ atc_tx_status(struct dma_chan *chan, | |||
1046 | static void atc_issue_pending(struct dma_chan *chan) | 1052 | static void atc_issue_pending(struct dma_chan *chan) |
1047 | { | 1053 | { |
1048 | struct at_dma_chan *atchan = to_at_dma_chan(chan); | 1054 | struct at_dma_chan *atchan = to_at_dma_chan(chan); |
1055 | unsigned long flags; | ||
1049 | 1056 | ||
1050 | dev_vdbg(chan2dev(chan), "issue_pending\n"); | 1057 | dev_vdbg(chan2dev(chan), "issue_pending\n"); |
1051 | 1058 | ||
@@ -1053,11 +1060,11 @@ static void atc_issue_pending(struct dma_chan *chan) | |||
1053 | if (test_bit(ATC_IS_CYCLIC, &atchan->status)) | 1060 | if (test_bit(ATC_IS_CYCLIC, &atchan->status)) |
1054 | return; | 1061 | return; |
1055 | 1062 | ||
1056 | spin_lock_bh(&atchan->lock); | 1063 | spin_lock_irqsave(&atchan->lock, flags); |
1057 | if (!atc_chan_is_enabled(atchan)) { | 1064 | if (!atc_chan_is_enabled(atchan)) { |
1058 | atc_advance_work(atchan); | 1065 | atc_advance_work(atchan); |
1059 | } | 1066 | } |
1060 | spin_unlock_bh(&atchan->lock); | 1067 | spin_unlock_irqrestore(&atchan->lock, flags); |
1061 | } | 1068 | } |
1062 | 1069 | ||
1063 | /** | 1070 | /** |
@@ -1073,6 +1080,7 @@ static int atc_alloc_chan_resources(struct dma_chan *chan) | |||
1073 | struct at_dma *atdma = to_at_dma(chan->device); | 1080 | struct at_dma *atdma = to_at_dma(chan->device); |
1074 | struct at_desc *desc; | 1081 | struct at_desc *desc; |
1075 | struct at_dma_slave *atslave; | 1082 | struct at_dma_slave *atslave; |
1083 | unsigned long flags; | ||
1076 | int i; | 1084 | int i; |
1077 | u32 cfg; | 1085 | u32 cfg; |
1078 | LIST_HEAD(tmp_list); | 1086 | LIST_HEAD(tmp_list); |
@@ -1116,11 +1124,11 @@ static int atc_alloc_chan_resources(struct dma_chan *chan) | |||
1116 | list_add_tail(&desc->desc_node, &tmp_list); | 1124 | list_add_tail(&desc->desc_node, &tmp_list); |
1117 | } | 1125 | } |
1118 | 1126 | ||
1119 | spin_lock_bh(&atchan->lock); | 1127 | spin_lock_irqsave(&atchan->lock, flags); |
1120 | atchan->descs_allocated = i; | 1128 | atchan->descs_allocated = i; |
1121 | list_splice(&tmp_list, &atchan->free_list); | 1129 | list_splice(&tmp_list, &atchan->free_list); |
1122 | atchan->completed_cookie = chan->cookie = 1; | 1130 | atchan->completed_cookie = chan->cookie = 1; |
1123 | spin_unlock_bh(&atchan->lock); | 1131 | spin_unlock_irqrestore(&atchan->lock, flags); |
1124 | 1132 | ||
1125 | /* channel parameters */ | 1133 | /* channel parameters */ |
1126 | channel_writel(atchan, CFG, cfg); | 1134 | channel_writel(atchan, CFG, cfg); |