diff options
author | Russell King - ARM Linux <linux@arm.linux.org.uk> | 2012-03-06 17:34:46 -0500 |
---|---|---|
committer | Vinod Koul <vinod.koul@linux.intel.com> | 2012-03-13 02:06:52 -0400 |
commit | 884485e1f12dcd39390f042e772cdbefc9ebb750 (patch) | |
tree | a35fccb601c48ae1ea839aa6d62e4f102f7b66c3 /drivers/dma | |
parent | d2ebfb335b0426deb1a4fb14e4e926d81ecd8235 (diff) |
dmaengine: consolidate assignment of DMA cookies
Everyone deals with assigning DMA cookies in the same way (it's part of
the API so they should be), so lets consolidate the common code into a
helper function to avoid this duplication.
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Tested-by: Linus Walleij <linus.walleij@linaro.org>
Reviewed-by: Linus Walleij <linus.walleij@linaro.org>
Acked-by: Jassi Brar <jassisinghbrar@gmail.com>
[imx-sdma.c & mxs-dma.c]
Tested-by: Shawn Guo <shawn.guo@linaro.org>
Signed-off-by: Vinod Koul <vinod.koul@linux.intel.com>
Diffstat (limited to 'drivers/dma')
-rw-r--r-- | drivers/dma/amba-pl08x.c | 9 | ||||
-rw-r--r-- | drivers/dma/at_hdmac.c | 23 | ||||
-rw-r--r-- | drivers/dma/coh901318.c | 20 | ||||
-rw-r--r-- | drivers/dma/dmaengine.h | 20 | ||||
-rw-r--r-- | drivers/dma/dw_dmac.c | 17 | ||||
-rw-r--r-- | drivers/dma/ep93xx_dma.c | 9 | ||||
-rw-r--r-- | drivers/dma/fsldma.c | 9 | ||||
-rw-r--r-- | drivers/dma/imx-dma.c | 15 | ||||
-rw-r--r-- | drivers/dma/imx-sdma.c | 16 | ||||
-rw-r--r-- | drivers/dma/intel_mid_dma.c | 9 | ||||
-rw-r--r-- | drivers/dma/ioat/dma.c | 7 | ||||
-rw-r--r-- | drivers/dma/ioat/dma_v2.c | 8 | ||||
-rw-r--r-- | drivers/dma/iop-adma.c | 14 | ||||
-rw-r--r-- | drivers/dma/ipu/ipu_idmac.c | 9 | ||||
-rw-r--r-- | drivers/dma/mpc512x_dma.c | 8 | ||||
-rw-r--r-- | drivers/dma/mv_xor.c | 14 | ||||
-rw-r--r-- | drivers/dma/mxs-dma.c | 15 | ||||
-rw-r--r-- | drivers/dma/pch_dma.c | 16 | ||||
-rw-r--r-- | drivers/dma/pl330.c | 14 | ||||
-rw-r--r-- | drivers/dma/ppc4xx/adma.c | 19 | ||||
-rw-r--r-- | drivers/dma/shdma.c | 8 | ||||
-rw-r--r-- | drivers/dma/sirf-dma.c | 8 | ||||
-rw-r--r-- | drivers/dma/ste_dma40.c | 13 | ||||
-rw-r--r-- | drivers/dma/timb_dma.c | 7 | ||||
-rw-r--r-- | drivers/dma/txx9dmac.c | 17 |
25 files changed, 52 insertions, 272 deletions
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c index 45f5e66e1c84..d8d3dc273f29 100644 --- a/drivers/dma/amba-pl08x.c +++ b/drivers/dma/amba-pl08x.c | |||
@@ -921,13 +921,10 @@ static dma_cookie_t pl08x_tx_submit(struct dma_async_tx_descriptor *tx) | |||
921 | struct pl08x_dma_chan *plchan = to_pl08x_chan(tx->chan); | 921 | struct pl08x_dma_chan *plchan = to_pl08x_chan(tx->chan); |
922 | struct pl08x_txd *txd = to_pl08x_txd(tx); | 922 | struct pl08x_txd *txd = to_pl08x_txd(tx); |
923 | unsigned long flags; | 923 | unsigned long flags; |
924 | dma_cookie_t cookie; | ||
924 | 925 | ||
925 | spin_lock_irqsave(&plchan->lock, flags); | 926 | spin_lock_irqsave(&plchan->lock, flags); |
926 | 927 | cookie = dma_cookie_assign(tx); | |
927 | plchan->chan.cookie += 1; | ||
928 | if (plchan->chan.cookie < 0) | ||
929 | plchan->chan.cookie = 1; | ||
930 | tx->cookie = plchan->chan.cookie; | ||
931 | 928 | ||
932 | /* Put this onto the pending list */ | 929 | /* Put this onto the pending list */ |
933 | list_add_tail(&txd->node, &plchan->pend_list); | 930 | list_add_tail(&txd->node, &plchan->pend_list); |
@@ -947,7 +944,7 @@ static dma_cookie_t pl08x_tx_submit(struct dma_async_tx_descriptor *tx) | |||
947 | 944 | ||
948 | spin_unlock_irqrestore(&plchan->lock, flags); | 945 | spin_unlock_irqrestore(&plchan->lock, flags); |
949 | 946 | ||
950 | return tx->cookie; | 947 | return cookie; |
951 | } | 948 | } |
952 | 949 | ||
953 | static struct dma_async_tx_descriptor *pl08x_prep_dma_interrupt( | 950 | static struct dma_async_tx_descriptor *pl08x_prep_dma_interrupt( |
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c index ce26ba381144..df47e7d6164b 100644 --- a/drivers/dma/at_hdmac.c +++ b/drivers/dma/at_hdmac.c | |||
@@ -193,27 +193,6 @@ static void atc_desc_chain(struct at_desc **first, struct at_desc **prev, | |||
193 | } | 193 | } |
194 | 194 | ||
195 | /** | 195 | /** |
196 | * atc_assign_cookie - compute and assign new cookie | ||
197 | * @atchan: channel we work on | ||
198 | * @desc: descriptor to assign cookie for | ||
199 | * | ||
200 | * Called with atchan->lock held and bh disabled | ||
201 | */ | ||
202 | static dma_cookie_t | ||
203 | atc_assign_cookie(struct at_dma_chan *atchan, struct at_desc *desc) | ||
204 | { | ||
205 | dma_cookie_t cookie = atchan->chan_common.cookie; | ||
206 | |||
207 | if (++cookie < 0) | ||
208 | cookie = 1; | ||
209 | |||
210 | atchan->chan_common.cookie = cookie; | ||
211 | desc->txd.cookie = cookie; | ||
212 | |||
213 | return cookie; | ||
214 | } | ||
215 | |||
216 | /** | ||
217 | * atc_dostart - starts the DMA engine for real | 196 | * atc_dostart - starts the DMA engine for real |
218 | * @atchan: the channel we want to start | 197 | * @atchan: the channel we want to start |
219 | * @first: first descriptor in the list we want to begin with | 198 | * @first: first descriptor in the list we want to begin with |
@@ -548,7 +527,7 @@ static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx) | |||
548 | unsigned long flags; | 527 | unsigned long flags; |
549 | 528 | ||
550 | spin_lock_irqsave(&atchan->lock, flags); | 529 | spin_lock_irqsave(&atchan->lock, flags); |
551 | cookie = atc_assign_cookie(atchan, desc); | 530 | cookie = dma_cookie_assign(tx); |
552 | 531 | ||
553 | if (list_empty(&atchan->active_list)) { | 532 | if (list_empty(&atchan->active_list)) { |
554 | dev_vdbg(chan2dev(tx->chan), "tx_submit: started %u\n", | 533 | dev_vdbg(chan2dev(tx->chan), "tx_submit: started %u\n", |
diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c index fb0d1245ade5..843a1a3b8a81 100644 --- a/drivers/dma/coh901318.c +++ b/drivers/dma/coh901318.c | |||
@@ -318,20 +318,6 @@ static int coh901318_prep_linked_list(struct coh901318_chan *cohc, | |||
318 | 318 | ||
319 | return 0; | 319 | return 0; |
320 | } | 320 | } |
321 | static dma_cookie_t | ||
322 | coh901318_assign_cookie(struct coh901318_chan *cohc, | ||
323 | struct coh901318_desc *cohd) | ||
324 | { | ||
325 | dma_cookie_t cookie = cohc->chan.cookie; | ||
326 | |||
327 | if (++cookie < 0) | ||
328 | cookie = 1; | ||
329 | |||
330 | cohc->chan.cookie = cookie; | ||
331 | cohd->desc.cookie = cookie; | ||
332 | |||
333 | return cookie; | ||
334 | } | ||
335 | 321 | ||
336 | static struct coh901318_desc * | 322 | static struct coh901318_desc * |
337 | coh901318_desc_get(struct coh901318_chan *cohc) | 323 | coh901318_desc_get(struct coh901318_chan *cohc) |
@@ -966,16 +952,16 @@ coh901318_tx_submit(struct dma_async_tx_descriptor *tx) | |||
966 | desc); | 952 | desc); |
967 | struct coh901318_chan *cohc = to_coh901318_chan(tx->chan); | 953 | struct coh901318_chan *cohc = to_coh901318_chan(tx->chan); |
968 | unsigned long flags; | 954 | unsigned long flags; |
955 | dma_cookie_t cookie; | ||
969 | 956 | ||
970 | spin_lock_irqsave(&cohc->lock, flags); | 957 | spin_lock_irqsave(&cohc->lock, flags); |
971 | 958 | cookie = dma_cookie_assign(tx); | |
972 | tx->cookie = coh901318_assign_cookie(cohc, cohd); | ||
973 | 959 | ||
974 | coh901318_desc_queue(cohc, cohd); | 960 | coh901318_desc_queue(cohc, cohd); |
975 | 961 | ||
976 | spin_unlock_irqrestore(&cohc->lock, flags); | 962 | spin_unlock_irqrestore(&cohc->lock, flags); |
977 | 963 | ||
978 | return tx->cookie; | 964 | return cookie; |
979 | } | 965 | } |
980 | 966 | ||
981 | static struct dma_async_tx_descriptor * | 967 | static struct dma_async_tx_descriptor * |
diff --git a/drivers/dma/dmaengine.h b/drivers/dma/dmaengine.h index 968570dde2eb..7692c8644045 100644 --- a/drivers/dma/dmaengine.h +++ b/drivers/dma/dmaengine.h | |||
@@ -7,4 +7,24 @@ | |||
7 | 7 | ||
8 | #include <linux/dmaengine.h> | 8 | #include <linux/dmaengine.h> |
9 | 9 | ||
10 | /** | ||
11 | * dma_cookie_assign - assign a DMA engine cookie to the descriptor | ||
12 | * @tx: descriptor needing cookie | ||
13 | * | ||
14 | * Assign a unique non-zero per-channel cookie to the descriptor. | ||
15 | * Note: caller is expected to hold a lock to prevent concurrency. | ||
16 | */ | ||
17 | static inline dma_cookie_t dma_cookie_assign(struct dma_async_tx_descriptor *tx) | ||
18 | { | ||
19 | struct dma_chan *chan = tx->chan; | ||
20 | dma_cookie_t cookie; | ||
21 | |||
22 | cookie = chan->cookie + 1; | ||
23 | if (cookie < DMA_MIN_COOKIE) | ||
24 | cookie = DMA_MIN_COOKIE; | ||
25 | tx->cookie = chan->cookie = cookie; | ||
26 | |||
27 | return cookie; | ||
28 | } | ||
29 | |||
10 | #endif | 30 | #endif |
diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c index b279e1920725..3a4ca67ace02 100644 --- a/drivers/dma/dw_dmac.c +++ b/drivers/dma/dw_dmac.c | |||
@@ -157,21 +157,6 @@ static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc) | |||
157 | } | 157 | } |
158 | } | 158 | } |
159 | 159 | ||
160 | /* Called with dwc->lock held and bh disabled */ | ||
161 | static dma_cookie_t | ||
162 | dwc_assign_cookie(struct dw_dma_chan *dwc, struct dw_desc *desc) | ||
163 | { | ||
164 | dma_cookie_t cookie = dwc->chan.cookie; | ||
165 | |||
166 | if (++cookie < 0) | ||
167 | cookie = 1; | ||
168 | |||
169 | dwc->chan.cookie = cookie; | ||
170 | desc->txd.cookie = cookie; | ||
171 | |||
172 | return cookie; | ||
173 | } | ||
174 | |||
175 | static void dwc_initialize(struct dw_dma_chan *dwc) | 160 | static void dwc_initialize(struct dw_dma_chan *dwc) |
176 | { | 161 | { |
177 | struct dw_dma *dw = to_dw_dma(dwc->chan.device); | 162 | struct dw_dma *dw = to_dw_dma(dwc->chan.device); |
@@ -603,7 +588,7 @@ static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx) | |||
603 | unsigned long flags; | 588 | unsigned long flags; |
604 | 589 | ||
605 | spin_lock_irqsave(&dwc->lock, flags); | 590 | spin_lock_irqsave(&dwc->lock, flags); |
606 | cookie = dwc_assign_cookie(dwc, desc); | 591 | cookie = dma_cookie_assign(tx); |
607 | 592 | ||
608 | /* | 593 | /* |
609 | * REVISIT: We should attempt to chain as many descriptors as | 594 | * REVISIT: We should attempt to chain as many descriptors as |
diff --git a/drivers/dma/ep93xx_dma.c b/drivers/dma/ep93xx_dma.c index 326019832a13..e5aaae87ddfb 100644 --- a/drivers/dma/ep93xx_dma.c +++ b/drivers/dma/ep93xx_dma.c | |||
@@ -783,17 +783,10 @@ static dma_cookie_t ep93xx_dma_tx_submit(struct dma_async_tx_descriptor *tx) | |||
783 | unsigned long flags; | 783 | unsigned long flags; |
784 | 784 | ||
785 | spin_lock_irqsave(&edmac->lock, flags); | 785 | spin_lock_irqsave(&edmac->lock, flags); |
786 | 786 | cookie = dma_cookie_assign(tx); | |
787 | cookie = edmac->chan.cookie; | ||
788 | |||
789 | if (++cookie < 0) | ||
790 | cookie = 1; | ||
791 | 787 | ||
792 | desc = container_of(tx, struct ep93xx_dma_desc, txd); | 788 | desc = container_of(tx, struct ep93xx_dma_desc, txd); |
793 | 789 | ||
794 | edmac->chan.cookie = cookie; | ||
795 | desc->txd.cookie = cookie; | ||
796 | |||
797 | /* | 790 | /* |
798 | * If nothing is currently prosessed, we push this descriptor | 791 | * If nothing is currently prosessed, we push this descriptor |
799 | * directly to the hardware. Otherwise we put the descriptor | 792 | * directly to the hardware. Otherwise we put the descriptor |
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c index 2ebbe572f9e0..04b4347ba4e9 100644 --- a/drivers/dma/fsldma.c +++ b/drivers/dma/fsldma.c | |||
@@ -414,17 +414,10 @@ static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx) | |||
414 | * assign cookies to all of the software descriptors | 414 | * assign cookies to all of the software descriptors |
415 | * that make up this transaction | 415 | * that make up this transaction |
416 | */ | 416 | */ |
417 | cookie = chan->common.cookie; | ||
418 | list_for_each_entry(child, &desc->tx_list, node) { | 417 | list_for_each_entry(child, &desc->tx_list, node) { |
419 | cookie++; | 418 | cookie = dma_cookie_assign(&child->async_tx); |
420 | if (cookie < DMA_MIN_COOKIE) | ||
421 | cookie = DMA_MIN_COOKIE; | ||
422 | |||
423 | child->async_tx.cookie = cookie; | ||
424 | } | 419 | } |
425 | 420 | ||
426 | chan->common.cookie = cookie; | ||
427 | |||
428 | /* put this transaction onto the tail of the pending queue */ | 421 | /* put this transaction onto the tail of the pending queue */ |
429 | append_ld_queue(chan, desc); | 422 | append_ld_queue(chan, desc); |
430 | 423 | ||
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c index cead5e4bd38c..687fc687aaf6 100644 --- a/drivers/dma/imx-dma.c +++ b/drivers/dma/imx-dma.c | |||
@@ -165,19 +165,6 @@ static enum dma_status imxdma_tx_status(struct dma_chan *chan, | |||
165 | return ret; | 165 | return ret; |
166 | } | 166 | } |
167 | 167 | ||
168 | static dma_cookie_t imxdma_assign_cookie(struct imxdma_channel *imxdma) | ||
169 | { | ||
170 | dma_cookie_t cookie = imxdma->chan.cookie; | ||
171 | |||
172 | if (++cookie < 0) | ||
173 | cookie = 1; | ||
174 | |||
175 | imxdma->chan.cookie = cookie; | ||
176 | imxdma->desc.cookie = cookie; | ||
177 | |||
178 | return cookie; | ||
179 | } | ||
180 | |||
181 | static dma_cookie_t imxdma_tx_submit(struct dma_async_tx_descriptor *tx) | 168 | static dma_cookie_t imxdma_tx_submit(struct dma_async_tx_descriptor *tx) |
182 | { | 169 | { |
183 | struct imxdma_channel *imxdmac = to_imxdma_chan(tx->chan); | 170 | struct imxdma_channel *imxdmac = to_imxdma_chan(tx->chan); |
@@ -185,7 +172,7 @@ static dma_cookie_t imxdma_tx_submit(struct dma_async_tx_descriptor *tx) | |||
185 | 172 | ||
186 | spin_lock_irq(&imxdmac->lock); | 173 | spin_lock_irq(&imxdmac->lock); |
187 | 174 | ||
188 | cookie = imxdma_assign_cookie(imxdmac); | 175 | cookie = dma_cookie_assign(tx); |
189 | 176 | ||
190 | spin_unlock_irq(&imxdmac->lock); | 177 | spin_unlock_irq(&imxdmac->lock); |
191 | 178 | ||
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c index 48a791f93adc..3f0c002933f3 100644 --- a/drivers/dma/imx-sdma.c +++ b/drivers/dma/imx-sdma.c | |||
@@ -815,19 +815,6 @@ out: | |||
815 | return ret; | 815 | return ret; |
816 | } | 816 | } |
817 | 817 | ||
818 | static dma_cookie_t sdma_assign_cookie(struct sdma_channel *sdmac) | ||
819 | { | ||
820 | dma_cookie_t cookie = sdmac->chan.cookie; | ||
821 | |||
822 | if (++cookie < 0) | ||
823 | cookie = 1; | ||
824 | |||
825 | sdmac->chan.cookie = cookie; | ||
826 | sdmac->desc.cookie = cookie; | ||
827 | |||
828 | return cookie; | ||
829 | } | ||
830 | |||
831 | static struct sdma_channel *to_sdma_chan(struct dma_chan *chan) | 818 | static struct sdma_channel *to_sdma_chan(struct dma_chan *chan) |
832 | { | 819 | { |
833 | return container_of(chan, struct sdma_channel, chan); | 820 | return container_of(chan, struct sdma_channel, chan); |
@@ -841,7 +828,7 @@ static dma_cookie_t sdma_tx_submit(struct dma_async_tx_descriptor *tx) | |||
841 | 828 | ||
842 | spin_lock_irqsave(&sdmac->lock, flags); | 829 | spin_lock_irqsave(&sdmac->lock, flags); |
843 | 830 | ||
844 | cookie = sdma_assign_cookie(sdmac); | 831 | cookie = dma_cookie_assign(tx); |
845 | 832 | ||
846 | spin_unlock_irqrestore(&sdmac->lock, flags); | 833 | spin_unlock_irqrestore(&sdmac->lock, flags); |
847 | 834 | ||
@@ -1140,7 +1127,6 @@ static void sdma_issue_pending(struct dma_chan *chan) | |||
1140 | struct sdma_engine *sdma = sdmac->sdma; | 1127 | struct sdma_engine *sdma = sdmac->sdma; |
1141 | 1128 | ||
1142 | if (sdmac->status == DMA_IN_PROGRESS) | 1129 | if (sdmac->status == DMA_IN_PROGRESS) |
1143 | sdma_enable_channel(sdma, sdmac->channel); | ||
1144 | } | 1130 | } |
1145 | 1131 | ||
1146 | #define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1 34 | 1132 | #define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1 34 |
diff --git a/drivers/dma/intel_mid_dma.c b/drivers/dma/intel_mid_dma.c index 55d0451670b0..e9217c390b76 100644 --- a/drivers/dma/intel_mid_dma.c +++ b/drivers/dma/intel_mid_dma.c | |||
@@ -436,14 +436,7 @@ static dma_cookie_t intel_mid_dma_tx_submit(struct dma_async_tx_descriptor *tx) | |||
436 | dma_cookie_t cookie; | 436 | dma_cookie_t cookie; |
437 | 437 | ||
438 | spin_lock_bh(&midc->lock); | 438 | spin_lock_bh(&midc->lock); |
439 | cookie = midc->chan.cookie; | 439 | cookie = dma_cookie_assign(tx); |
440 | |||
441 | if (++cookie < 0) | ||
442 | cookie = 1; | ||
443 | |||
444 | midc->chan.cookie = cookie; | ||
445 | desc->txd.cookie = cookie; | ||
446 | |||
447 | 440 | ||
448 | if (list_empty(&midc->active_list)) | 441 | if (list_empty(&midc->active_list)) |
449 | list_add_tail(&desc->desc_node, &midc->active_list); | 442 | list_add_tail(&desc->desc_node, &midc->active_list); |
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c index dfe411b2014f..5c06117ac682 100644 --- a/drivers/dma/ioat/dma.c +++ b/drivers/dma/ioat/dma.c | |||
@@ -237,12 +237,7 @@ static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx) | |||
237 | 237 | ||
238 | spin_lock_bh(&ioat->desc_lock); | 238 | spin_lock_bh(&ioat->desc_lock); |
239 | /* cookie incr and addition to used_list must be atomic */ | 239 | /* cookie incr and addition to used_list must be atomic */ |
240 | cookie = c->cookie; | 240 | cookie = dma_cookie_assign(tx); |
241 | cookie++; | ||
242 | if (cookie < 0) | ||
243 | cookie = 1; | ||
244 | c->cookie = cookie; | ||
245 | tx->cookie = cookie; | ||
246 | dev_dbg(to_dev(&ioat->base), "%s: cookie: %d\n", __func__, cookie); | 241 | dev_dbg(to_dev(&ioat->base), "%s: cookie: %d\n", __func__, cookie); |
247 | 242 | ||
248 | /* write address into NextDescriptor field of last desc in chain */ | 243 | /* write address into NextDescriptor field of last desc in chain */ |
diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c index 6c1e6754d9bd..17ecacb70d40 100644 --- a/drivers/dma/ioat/dma_v2.c +++ b/drivers/dma/ioat/dma_v2.c | |||
@@ -400,13 +400,9 @@ static dma_cookie_t ioat2_tx_submit_unlock(struct dma_async_tx_descriptor *tx) | |||
400 | struct dma_chan *c = tx->chan; | 400 | struct dma_chan *c = tx->chan; |
401 | struct ioat2_dma_chan *ioat = to_ioat2_chan(c); | 401 | struct ioat2_dma_chan *ioat = to_ioat2_chan(c); |
402 | struct ioat_chan_common *chan = &ioat->base; | 402 | struct ioat_chan_common *chan = &ioat->base; |
403 | dma_cookie_t cookie = c->cookie; | 403 | dma_cookie_t cookie; |
404 | 404 | ||
405 | cookie++; | 405 | cookie = dma_cookie_assign(tx); |
406 | if (cookie < 0) | ||
407 | cookie = 1; | ||
408 | tx->cookie = cookie; | ||
409 | c->cookie = cookie; | ||
410 | dev_dbg(to_dev(&ioat->base), "%s: cookie: %d\n", __func__, cookie); | 406 | dev_dbg(to_dev(&ioat->base), "%s: cookie: %d\n", __func__, cookie); |
411 | 407 | ||
412 | if (!test_and_set_bit(IOAT_COMPLETION_PENDING, &chan->state)) | 408 | if (!test_and_set_bit(IOAT_COMPLETION_PENDING, &chan->state)) |
diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c index 650bf1e185e8..f2392d59568d 100644 --- a/drivers/dma/iop-adma.c +++ b/drivers/dma/iop-adma.c | |||
@@ -440,18 +440,6 @@ retry: | |||
440 | return NULL; | 440 | return NULL; |
441 | } | 441 | } |
442 | 442 | ||
443 | static dma_cookie_t | ||
444 | iop_desc_assign_cookie(struct iop_adma_chan *iop_chan, | ||
445 | struct iop_adma_desc_slot *desc) | ||
446 | { | ||
447 | dma_cookie_t cookie = iop_chan->common.cookie; | ||
448 | cookie++; | ||
449 | if (cookie < 0) | ||
450 | cookie = 1; | ||
451 | iop_chan->common.cookie = desc->async_tx.cookie = cookie; | ||
452 | return cookie; | ||
453 | } | ||
454 | |||
455 | static void iop_adma_check_threshold(struct iop_adma_chan *iop_chan) | 443 | static void iop_adma_check_threshold(struct iop_adma_chan *iop_chan) |
456 | { | 444 | { |
457 | dev_dbg(iop_chan->device->common.dev, "pending: %d\n", | 445 | dev_dbg(iop_chan->device->common.dev, "pending: %d\n", |
@@ -479,7 +467,7 @@ iop_adma_tx_submit(struct dma_async_tx_descriptor *tx) | |||
479 | slots_per_op = grp_start->slots_per_op; | 467 | slots_per_op = grp_start->slots_per_op; |
480 | 468 | ||
481 | spin_lock_bh(&iop_chan->lock); | 469 | spin_lock_bh(&iop_chan->lock); |
482 | cookie = iop_desc_assign_cookie(iop_chan, sw_desc); | 470 | cookie = dma_cookie_assign(tx); |
483 | 471 | ||
484 | old_chain_tail = list_entry(iop_chan->chain.prev, | 472 | old_chain_tail = list_entry(iop_chan->chain.prev, |
485 | struct iop_adma_desc_slot, chain_node); | 473 | struct iop_adma_desc_slot, chain_node); |
diff --git a/drivers/dma/ipu/ipu_idmac.c b/drivers/dma/ipu/ipu_idmac.c index 0fcff6508fb1..d4620c53fd2d 100644 --- a/drivers/dma/ipu/ipu_idmac.c +++ b/drivers/dma/ipu/ipu_idmac.c | |||
@@ -867,14 +867,7 @@ static dma_cookie_t idmac_tx_submit(struct dma_async_tx_descriptor *tx) | |||
867 | 867 | ||
868 | dev_dbg(dev, "Submitting sg %p\n", &desc->sg[0]); | 868 | dev_dbg(dev, "Submitting sg %p\n", &desc->sg[0]); |
869 | 869 | ||
870 | cookie = ichan->dma_chan.cookie; | 870 | cookie = dma_cookie_assign(tx); |
871 | |||
872 | if (++cookie < 0) | ||
873 | cookie = 1; | ||
874 | |||
875 | /* from dmaengine.h: "last cookie value returned to client" */ | ||
876 | ichan->dma_chan.cookie = cookie; | ||
877 | tx->cookie = cookie; | ||
878 | 871 | ||
879 | /* ipu->lock can be taken under ichan->lock, but not v.v. */ | 872 | /* ipu->lock can be taken under ichan->lock, but not v.v. */ |
880 | spin_lock_irqsave(&ichan->lock, flags); | 873 | spin_lock_irqsave(&ichan->lock, flags); |
diff --git a/drivers/dma/mpc512x_dma.c b/drivers/dma/mpc512x_dma.c index c56b3fe5d3fa..0253d5aecdb1 100644 --- a/drivers/dma/mpc512x_dma.c +++ b/drivers/dma/mpc512x_dma.c | |||
@@ -439,13 +439,7 @@ static dma_cookie_t mpc_dma_tx_submit(struct dma_async_tx_descriptor *txd) | |||
439 | mpc_dma_execute(mchan); | 439 | mpc_dma_execute(mchan); |
440 | 440 | ||
441 | /* Update cookie */ | 441 | /* Update cookie */ |
442 | cookie = mchan->chan.cookie + 1; | 442 | cookie = dma_cookie_assign(txd); |
443 | if (cookie <= 0) | ||
444 | cookie = 1; | ||
445 | |||
446 | mchan->chan.cookie = cookie; | ||
447 | mdesc->desc.cookie = cookie; | ||
448 | |||
449 | spin_unlock_irqrestore(&mchan->lock, flags); | 443 | spin_unlock_irqrestore(&mchan->lock, flags); |
450 | 444 | ||
451 | return cookie; | 445 | return cookie; |
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c index ee61778ba8a2..d9810ce3794c 100644 --- a/drivers/dma/mv_xor.c +++ b/drivers/dma/mv_xor.c | |||
@@ -536,18 +536,6 @@ retry: | |||
536 | return NULL; | 536 | return NULL; |
537 | } | 537 | } |
538 | 538 | ||
539 | static dma_cookie_t | ||
540 | mv_desc_assign_cookie(struct mv_xor_chan *mv_chan, | ||
541 | struct mv_xor_desc_slot *desc) | ||
542 | { | ||
543 | dma_cookie_t cookie = mv_chan->common.cookie; | ||
544 | |||
545 | if (++cookie < 0) | ||
546 | cookie = 1; | ||
547 | mv_chan->common.cookie = desc->async_tx.cookie = cookie; | ||
548 | return cookie; | ||
549 | } | ||
550 | |||
551 | /************************ DMA engine API functions ****************************/ | 539 | /************************ DMA engine API functions ****************************/ |
552 | static dma_cookie_t | 540 | static dma_cookie_t |
553 | mv_xor_tx_submit(struct dma_async_tx_descriptor *tx) | 541 | mv_xor_tx_submit(struct dma_async_tx_descriptor *tx) |
@@ -565,7 +553,7 @@ mv_xor_tx_submit(struct dma_async_tx_descriptor *tx) | |||
565 | grp_start = sw_desc->group_head; | 553 | grp_start = sw_desc->group_head; |
566 | 554 | ||
567 | spin_lock_bh(&mv_chan->lock); | 555 | spin_lock_bh(&mv_chan->lock); |
568 | cookie = mv_desc_assign_cookie(mv_chan, sw_desc); | 556 | cookie = dma_cookie_assign(tx); |
569 | 557 | ||
570 | if (list_empty(&mv_chan->chain)) | 558 | if (list_empty(&mv_chan->chain)) |
571 | list_splice_init(&sw_desc->tx_list, &mv_chan->chain); | 559 | list_splice_init(&sw_desc->tx_list, &mv_chan->chain); |
diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c index daa84ee2a187..4d3b6ff3050f 100644 --- a/drivers/dma/mxs-dma.c +++ b/drivers/dma/mxs-dma.c | |||
@@ -194,19 +194,6 @@ static void mxs_dma_resume_chan(struct mxs_dma_chan *mxs_chan) | |||
194 | mxs_chan->status = DMA_IN_PROGRESS; | 194 | mxs_chan->status = DMA_IN_PROGRESS; |
195 | } | 195 | } |
196 | 196 | ||
197 | static dma_cookie_t mxs_dma_assign_cookie(struct mxs_dma_chan *mxs_chan) | ||
198 | { | ||
199 | dma_cookie_t cookie = mxs_chan->chan.cookie; | ||
200 | |||
201 | if (++cookie < 0) | ||
202 | cookie = 1; | ||
203 | |||
204 | mxs_chan->chan.cookie = cookie; | ||
205 | mxs_chan->desc.cookie = cookie; | ||
206 | |||
207 | return cookie; | ||
208 | } | ||
209 | |||
210 | static struct mxs_dma_chan *to_mxs_dma_chan(struct dma_chan *chan) | 197 | static struct mxs_dma_chan *to_mxs_dma_chan(struct dma_chan *chan) |
211 | { | 198 | { |
212 | return container_of(chan, struct mxs_dma_chan, chan); | 199 | return container_of(chan, struct mxs_dma_chan, chan); |
@@ -218,7 +205,7 @@ static dma_cookie_t mxs_dma_tx_submit(struct dma_async_tx_descriptor *tx) | |||
218 | 205 | ||
219 | mxs_dma_enable_chan(mxs_chan); | 206 | mxs_dma_enable_chan(mxs_chan); |
220 | 207 | ||
221 | return mxs_dma_assign_cookie(mxs_chan); | 208 | return dma_cookie_assign(tx); |
222 | } | 209 | } |
223 | 210 | ||
224 | static void mxs_dma_tasklet(unsigned long data) | 211 | static void mxs_dma_tasklet(unsigned long data) |
diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c index 2b3479d850c9..5218e48aed0e 100644 --- a/drivers/dma/pch_dma.c +++ b/drivers/dma/pch_dma.c | |||
@@ -417,20 +417,6 @@ static void pdc_advance_work(struct pch_dma_chan *pd_chan) | |||
417 | } | 417 | } |
418 | } | 418 | } |
419 | 419 | ||
420 | static dma_cookie_t pdc_assign_cookie(struct pch_dma_chan *pd_chan, | ||
421 | struct pch_dma_desc *desc) | ||
422 | { | ||
423 | dma_cookie_t cookie = pd_chan->chan.cookie; | ||
424 | |||
425 | if (++cookie < 0) | ||
426 | cookie = 1; | ||
427 | |||
428 | pd_chan->chan.cookie = cookie; | ||
429 | desc->txd.cookie = cookie; | ||
430 | |||
431 | return cookie; | ||
432 | } | ||
433 | |||
434 | static dma_cookie_t pd_tx_submit(struct dma_async_tx_descriptor *txd) | 420 | static dma_cookie_t pd_tx_submit(struct dma_async_tx_descriptor *txd) |
435 | { | 421 | { |
436 | struct pch_dma_desc *desc = to_pd_desc(txd); | 422 | struct pch_dma_desc *desc = to_pd_desc(txd); |
@@ -438,7 +424,7 @@ static dma_cookie_t pd_tx_submit(struct dma_async_tx_descriptor *txd) | |||
438 | dma_cookie_t cookie; | 424 | dma_cookie_t cookie; |
439 | 425 | ||
440 | spin_lock(&pd_chan->lock); | 426 | spin_lock(&pd_chan->lock); |
441 | cookie = pdc_assign_cookie(pd_chan, desc); | 427 | cookie = dma_cookie_assign(txd); |
442 | 428 | ||
443 | if (list_empty(&pd_chan->active_list)) { | 429 | if (list_empty(&pd_chan->active_list)) { |
444 | list_add_tail(&desc->desc_node, &pd_chan->active_list); | 430 | list_add_tail(&desc->desc_node, &pd_chan->active_list); |
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index 2db70748403f..644eb789958b 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c | |||
@@ -429,26 +429,16 @@ static dma_cookie_t pl330_tx_submit(struct dma_async_tx_descriptor *tx) | |||
429 | spin_lock_irqsave(&pch->lock, flags); | 429 | spin_lock_irqsave(&pch->lock, flags); |
430 | 430 | ||
431 | /* Assign cookies to all nodes */ | 431 | /* Assign cookies to all nodes */ |
432 | cookie = tx->chan->cookie; | ||
433 | |||
434 | while (!list_empty(&last->node)) { | 432 | while (!list_empty(&last->node)) { |
435 | desc = list_entry(last->node.next, struct dma_pl330_desc, node); | 433 | desc = list_entry(last->node.next, struct dma_pl330_desc, node); |
436 | 434 | ||
437 | if (++cookie < 0) | 435 | dma_cookie_assign(&desc->txd); |
438 | cookie = 1; | ||
439 | desc->txd.cookie = cookie; | ||
440 | 436 | ||
441 | list_move_tail(&desc->node, &pch->work_list); | 437 | list_move_tail(&desc->node, &pch->work_list); |
442 | } | 438 | } |
443 | 439 | ||
444 | if (++cookie < 0) | 440 | cookie = dma_cookie_assign(&last->txd); |
445 | cookie = 1; | ||
446 | last->txd.cookie = cookie; | ||
447 | |||
448 | list_add_tail(&last->node, &pch->work_list); | 441 | list_add_tail(&last->node, &pch->work_list); |
449 | |||
450 | tx->chan->cookie = cookie; | ||
451 | |||
452 | spin_unlock_irqrestore(&pch->lock, flags); | 442 | spin_unlock_irqrestore(&pch->lock, flags); |
453 | 443 | ||
454 | return cookie; | 444 | return cookie; |
diff --git a/drivers/dma/ppc4xx/adma.c b/drivers/dma/ppc4xx/adma.c index 40082ec8326c..12e94dd6fc3d 100644 --- a/drivers/dma/ppc4xx/adma.c +++ b/drivers/dma/ppc4xx/adma.c | |||
@@ -2151,22 +2151,6 @@ static int ppc440spe_adma_alloc_chan_resources(struct dma_chan *chan) | |||
2151 | } | 2151 | } |
2152 | 2152 | ||
2153 | /** | 2153 | /** |
2154 | * ppc440spe_desc_assign_cookie - assign a cookie | ||
2155 | */ | ||
2156 | static dma_cookie_t ppc440spe_desc_assign_cookie( | ||
2157 | struct ppc440spe_adma_chan *chan, | ||
2158 | struct ppc440spe_adma_desc_slot *desc) | ||
2159 | { | ||
2160 | dma_cookie_t cookie = chan->common.cookie; | ||
2161 | |||
2162 | cookie++; | ||
2163 | if (cookie < 0) | ||
2164 | cookie = 1; | ||
2165 | chan->common.cookie = desc->async_tx.cookie = cookie; | ||
2166 | return cookie; | ||
2167 | } | ||
2168 | |||
2169 | /** | ||
2170 | * ppc440spe_rxor_set_region_data - | 2154 | * ppc440spe_rxor_set_region_data - |
2171 | */ | 2155 | */ |
2172 | static void ppc440spe_rxor_set_region(struct ppc440spe_adma_desc_slot *desc, | 2156 | static void ppc440spe_rxor_set_region(struct ppc440spe_adma_desc_slot *desc, |
@@ -2236,8 +2220,7 @@ static dma_cookie_t ppc440spe_adma_tx_submit(struct dma_async_tx_descriptor *tx) | |||
2236 | slots_per_op = group_start->slots_per_op; | 2220 | slots_per_op = group_start->slots_per_op; |
2237 | 2221 | ||
2238 | spin_lock_bh(&chan->lock); | 2222 | spin_lock_bh(&chan->lock); |
2239 | 2223 | cookie = dma_cookie_assign(tx); | |
2240 | cookie = ppc440spe_desc_assign_cookie(chan, sw_desc); | ||
2241 | 2224 | ||
2242 | if (unlikely(list_empty(&chan->chain))) { | 2225 | if (unlikely(list_empty(&chan->chain))) { |
2243 | /* first peer */ | 2226 | /* first peer */ |
diff --git a/drivers/dma/shdma.c b/drivers/dma/shdma.c index c2914330b8fc..96d0a4fe8dd9 100644 --- a/drivers/dma/shdma.c +++ b/drivers/dma/shdma.c | |||
@@ -298,13 +298,7 @@ static dma_cookie_t sh_dmae_tx_submit(struct dma_async_tx_descriptor *tx) | |||
298 | else | 298 | else |
299 | power_up = false; | 299 | power_up = false; |
300 | 300 | ||
301 | cookie = sh_chan->common.cookie; | 301 | cookie = dma_cookie_assign(tx); |
302 | cookie++; | ||
303 | if (cookie < 0) | ||
304 | cookie = 1; | ||
305 | |||
306 | sh_chan->common.cookie = cookie; | ||
307 | tx->cookie = cookie; | ||
308 | 302 | ||
309 | /* Mark all chunks of this descriptor as submitted, move to the queue */ | 303 | /* Mark all chunks of this descriptor as submitted, move to the queue */ |
310 | list_for_each_entry_safe(chunk, c, desc->node.prev, node) { | 304 | list_for_each_entry_safe(chunk, c, desc->node.prev, node) { |
diff --git a/drivers/dma/sirf-dma.c b/drivers/dma/sirf-dma.c index 60473f00cf1c..7bb154a85332 100644 --- a/drivers/dma/sirf-dma.c +++ b/drivers/dma/sirf-dma.c | |||
@@ -257,13 +257,7 @@ static dma_cookie_t sirfsoc_dma_tx_submit(struct dma_async_tx_descriptor *txd) | |||
257 | /* Move descriptor to queue */ | 257 | /* Move descriptor to queue */ |
258 | list_move_tail(&sdesc->node, &schan->queued); | 258 | list_move_tail(&sdesc->node, &schan->queued); |
259 | 259 | ||
260 | /* Update cookie */ | 260 | cookie = dma_cookie_assign(txd); |
261 | cookie = schan->chan.cookie + 1; | ||
262 | if (cookie <= 0) | ||
263 | cookie = 1; | ||
264 | |||
265 | schan->chan.cookie = cookie; | ||
266 | sdesc->desc.cookie = cookie; | ||
267 | 261 | ||
268 | spin_unlock_irqrestore(&schan->lock, flags); | 262 | spin_unlock_irqrestore(&schan->lock, flags); |
269 | 263 | ||
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index 156b98f661a3..23e2edc4afd4 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c | |||
@@ -1220,21 +1220,14 @@ static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx) | |||
1220 | chan); | 1220 | chan); |
1221 | struct d40_desc *d40d = container_of(tx, struct d40_desc, txd); | 1221 | struct d40_desc *d40d = container_of(tx, struct d40_desc, txd); |
1222 | unsigned long flags; | 1222 | unsigned long flags; |
1223 | dma_cookie_t cookie; | ||
1223 | 1224 | ||
1224 | spin_lock_irqsave(&d40c->lock, flags); | 1225 | spin_lock_irqsave(&d40c->lock, flags); |
1225 | 1226 | cookie = dma_cookie_assign(tx); | |
1226 | d40c->chan.cookie++; | ||
1227 | |||
1228 | if (d40c->chan.cookie < 0) | ||
1229 | d40c->chan.cookie = 1; | ||
1230 | |||
1231 | d40d->txd.cookie = d40c->chan.cookie; | ||
1232 | |||
1233 | d40_desc_queue(d40c, d40d); | 1227 | d40_desc_queue(d40c, d40d); |
1234 | |||
1235 | spin_unlock_irqrestore(&d40c->lock, flags); | 1228 | spin_unlock_irqrestore(&d40c->lock, flags); |
1236 | 1229 | ||
1237 | return tx->cookie; | 1230 | return cookie; |
1238 | } | 1231 | } |
1239 | 1232 | ||
1240 | static int d40_start(struct d40_chan *d40c) | 1233 | static int d40_start(struct d40_chan *d40c) |
diff --git a/drivers/dma/timb_dma.c b/drivers/dma/timb_dma.c index 4b61879284d1..b6e83fc27c4e 100644 --- a/drivers/dma/timb_dma.c +++ b/drivers/dma/timb_dma.c | |||
@@ -350,12 +350,7 @@ static dma_cookie_t td_tx_submit(struct dma_async_tx_descriptor *txd) | |||
350 | dma_cookie_t cookie; | 350 | dma_cookie_t cookie; |
351 | 351 | ||
352 | spin_lock_bh(&td_chan->lock); | 352 | spin_lock_bh(&td_chan->lock); |
353 | 353 | cookie = dma_cookie_assign(txd); | |
354 | cookie = txd->chan->cookie; | ||
355 | if (++cookie < 0) | ||
356 | cookie = 1; | ||
357 | txd->chan->cookie = cookie; | ||
358 | txd->cookie = cookie; | ||
359 | 354 | ||
360 | if (list_empty(&td_chan->active_list)) { | 355 | if (list_empty(&td_chan->active_list)) { |
361 | dev_dbg(chan2dev(txd->chan), "%s: started %u\n", __func__, | 356 | dev_dbg(chan2dev(txd->chan), "%s: started %u\n", __func__, |
diff --git a/drivers/dma/txx9dmac.c b/drivers/dma/txx9dmac.c index db6d809d4d04..66f8fca1bd3c 100644 --- a/drivers/dma/txx9dmac.c +++ b/drivers/dma/txx9dmac.c | |||
@@ -281,21 +281,6 @@ static void txx9dmac_desc_put(struct txx9dmac_chan *dc, | |||
281 | } | 281 | } |
282 | } | 282 | } |
283 | 283 | ||
284 | /* Called with dc->lock held and bh disabled */ | ||
285 | static dma_cookie_t | ||
286 | txx9dmac_assign_cookie(struct txx9dmac_chan *dc, struct txx9dmac_desc *desc) | ||
287 | { | ||
288 | dma_cookie_t cookie = dc->chan.cookie; | ||
289 | |||
290 | if (++cookie < 0) | ||
291 | cookie = 1; | ||
292 | |||
293 | dc->chan.cookie = cookie; | ||
294 | desc->txd.cookie = cookie; | ||
295 | |||
296 | return cookie; | ||
297 | } | ||
298 | |||
299 | /*----------------------------------------------------------------------*/ | 284 | /*----------------------------------------------------------------------*/ |
300 | 285 | ||
301 | static void txx9dmac_dump_regs(struct txx9dmac_chan *dc) | 286 | static void txx9dmac_dump_regs(struct txx9dmac_chan *dc) |
@@ -740,7 +725,7 @@ static dma_cookie_t txx9dmac_tx_submit(struct dma_async_tx_descriptor *tx) | |||
740 | dma_cookie_t cookie; | 725 | dma_cookie_t cookie; |
741 | 726 | ||
742 | spin_lock_bh(&dc->lock); | 727 | spin_lock_bh(&dc->lock); |
743 | cookie = txx9dmac_assign_cookie(dc, desc); | 728 | cookie = dma_cookie_assign(tx); |
744 | 729 | ||
745 | dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u %p\n", | 730 | dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u %p\n", |
746 | desc->txd.cookie, desc); | 731 | desc->txd.cookie, desc); |