aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRussell King - ARM Linux <linux@arm.linux.org.uk>2012-03-06 17:35:27 -0500
committerVinod Koul <vinod.koul@linux.intel.com>2012-03-13 02:07:14 -0400
commit96a2af41c78b1fbb1f567a3486bdc63f7b31c5fd (patch)
treed977c6b2ff1a23dfd523e70315ebe976a3f3f079
parentf7fbce07c6ce26a25b4e0cb5f241c361fde87901 (diff)
dmaengine: consolidate tx_status functions
Now that we have the completed cookie in the dma_chan structure, we can consolidate the tx_status functions by providing a function to set the txstate structure and returning the DMA status. We also provide a separate helper to set the residue for cookies which are still in progress. Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk> Tested-by: Linus Walleij <linus.walleij@linaro.org> Reviewed-by: Linus Walleij <linus.walleij@linaro.org> Acked-by: Jassi Brar <jassisinghbrar@gmail.com> [imx-sdma.c & mxs-dma.c] Tested-by: Shawn Guo <shawn.guo@linaro.org> Signed-off-by: Vinod Koul <vinod.koul@linux.intel.com>
-rw-r--r--drivers/dma/amba-pl08x.c22
-rw-r--r--drivers/dma/at_hdmac.c18
-rw-r--r--drivers/dma/coh901318.c13
-rw-r--r--drivers/dma/dmaengine.h31
-rw-r--r--drivers/dma/dw_dmac.c19
-rw-r--r--drivers/dma/ep93xx_dma.c7
-rw-r--r--drivers/dma/fsldma.c11
-rw-r--r--drivers/dma/imx-dma.c11
-rw-r--r--drivers/dma/intel_mid_dma.c19
-rw-r--r--drivers/dma/ioat/dma.c8
-rw-r--r--drivers/dma/ioat/dma.h21
-rw-r--r--drivers/dma/ioat/dma_v3.c8
-rw-r--r--drivers/dma/iop-adma.c16
-rw-r--r--drivers/dma/mpc512x_dma.c9
-rw-r--r--drivers/dma/mv_xor.c14
-rw-r--r--drivers/dma/pch_dma.c11
-rw-r--r--drivers/dma/pl330.c13
-rw-r--r--drivers/dma/ppc4xx/adma.c16
-rw-r--r--drivers/dma/shdma.c11
-rw-r--r--drivers/dma/sirf-dma.c9
-rw-r--r--drivers/dma/ste_dma40.c14
-rw-r--r--drivers/dma/timb_dma.c11
-rw-r--r--drivers/dma/txx9dmac.c16
23 files changed, 93 insertions, 235 deletions
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index 346327572cf..810f696eda4 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -964,31 +964,17 @@ static enum dma_status pl08x_dma_tx_status(struct dma_chan *chan,
964 dma_cookie_t cookie, struct dma_tx_state *txstate) 964 dma_cookie_t cookie, struct dma_tx_state *txstate)
965{ 965{
966 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 966 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
967 dma_cookie_t last_used;
968 dma_cookie_t last_complete;
969 enum dma_status ret; 967 enum dma_status ret;
970 u32 bytesleft = 0;
971 968
972 last_used = plchan->chan.cookie; 969 ret = dma_cookie_status(chan, cookie, txstate);
973 last_complete = plchan->chan.completed_cookie; 970 if (ret == DMA_SUCCESS)
974
975 ret = dma_async_is_complete(cookie, last_complete, last_used);
976 if (ret == DMA_SUCCESS) {
977 dma_set_tx_state(txstate, last_complete, last_used, 0);
978 return ret; 971 return ret;
979 }
980 972
981 /* 973 /*
982 * This cookie not complete yet 974 * This cookie not complete yet
975 * Get number of bytes left in the active transactions and queue
983 */ 976 */
984 last_used = plchan->chan.cookie; 977 dma_set_residue(txstate, pl08x_getbytes_chan(plchan));
985 last_complete = plchan->chan.completed_cookie;
986
987 /* Get number of bytes left in the active transactions and queue */
988 bytesleft = pl08x_getbytes_chan(plchan);
989
990 dma_set_tx_state(txstate, last_complete, last_used,
991 bytesleft);
992 978
993 if (plchan->state == PL08X_CHAN_PAUSED) 979 if (plchan->state == PL08X_CHAN_PAUSED)
994 return DMA_PAUSED; 980 return DMA_PAUSED;
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index b2826304da2..8a3297418cf 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -996,26 +996,20 @@ atc_tx_status(struct dma_chan *chan,
996 996
997 spin_lock_irqsave(&atchan->lock, flags); 997 spin_lock_irqsave(&atchan->lock, flags);
998 998
999 last_complete = chan->completed_cookie; 999 ret = dma_cookie_status(chan, cookie, txstate);
1000 last_used = chan->cookie;
1001
1002 ret = dma_async_is_complete(cookie, last_complete, last_used);
1003 if (ret != DMA_SUCCESS) { 1000 if (ret != DMA_SUCCESS) {
1004 atc_cleanup_descriptors(atchan); 1001 atc_cleanup_descriptors(atchan);
1005 1002
1006 last_complete = chan->completed_cookie; 1003 ret = dma_cookie_status(chan, cookie, txstate);
1007 last_used = chan->cookie;
1008
1009 ret = dma_async_is_complete(cookie, last_complete, last_used);
1010 } 1004 }
1011 1005
1006 last_complete = chan->completed_cookie;
1007 last_used = chan->cookie;
1008
1012 spin_unlock_irqrestore(&atchan->lock, flags); 1009 spin_unlock_irqrestore(&atchan->lock, flags);
1013 1010
1014 if (ret != DMA_SUCCESS) 1011 if (ret != DMA_SUCCESS)
1015 dma_set_tx_state(txstate, last_complete, last_used, 1012 dma_set_residue(txstate, atc_first_active(atchan)->len);
1016 atc_first_active(atchan)->len);
1017 else
1018 dma_set_tx_state(txstate, last_complete, last_used, 0);
1019 1013
1020 if (atc_chan_is_paused(atchan)) 1014 if (atc_chan_is_paused(atchan))
1021 ret = DMA_PAUSED; 1015 ret = DMA_PAUSED;
diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c
index 24837d70095..f3505178ff4 100644
--- a/drivers/dma/coh901318.c
+++ b/drivers/dma/coh901318.c
@@ -1151,17 +1151,12 @@ coh901318_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
1151 struct dma_tx_state *txstate) 1151 struct dma_tx_state *txstate)
1152{ 1152{
1153 struct coh901318_chan *cohc = to_coh901318_chan(chan); 1153 struct coh901318_chan *cohc = to_coh901318_chan(chan);
1154 dma_cookie_t last_used; 1154 enum dma_status ret;
1155 dma_cookie_t last_complete;
1156 int ret;
1157
1158 last_complete = chan->completed_cookie;
1159 last_used = chan->cookie;
1160 1155
1161 ret = dma_async_is_complete(cookie, last_complete, last_used); 1156 ret = dma_cookie_status(chan, cookie, txstate);
1157 /* FIXME: should be conditional on ret != DMA_SUCCESS? */
1158 dma_set_residue(txstate, coh901318_get_bytes_left(chan));
1162 1159
1163 dma_set_tx_state(txstate, last_complete, last_used,
1164 coh901318_get_bytes_left(chan));
1165 if (ret == DMA_IN_PROGRESS && cohc->stopped) 1160 if (ret == DMA_IN_PROGRESS && cohc->stopped)
1166 ret = DMA_PAUSED; 1161 ret = DMA_PAUSED;
1167 1162
diff --git a/drivers/dma/dmaengine.h b/drivers/dma/dmaengine.h
index 47e099772b8..1ca5e0e633f 100644
--- a/drivers/dma/dmaengine.h
+++ b/drivers/dma/dmaengine.h
@@ -45,4 +45,35 @@ static inline void dma_cookie_complete(struct dma_async_tx_descriptor *tx)
45 tx->cookie = 0; 45 tx->cookie = 0;
46} 46}
47 47
48/**
49 * dma_cookie_status - report cookie status
50 * @chan: dma channel
51 * @cookie: cookie we are interested in
52 * @state: dma_tx_state structure to return last/used cookies
53 *
54 * Report the status of the cookie, filling in the state structure if
55 * non-NULL. No locking is required.
56 */
57static inline enum dma_status dma_cookie_status(struct dma_chan *chan,
58 dma_cookie_t cookie, struct dma_tx_state *state)
59{
60 dma_cookie_t used, complete;
61
62 used = chan->cookie;
63 complete = chan->completed_cookie;
64 barrier();
65 if (state) {
66 state->last = complete;
67 state->used = used;
68 state->residue = 0;
69 }
70 return dma_async_is_complete(cookie, complete, used);
71}
72
73static inline void dma_set_residue(struct dma_tx_state *state, u32 residue)
74{
75 if (state)
76 state->residue = residue;
77}
78
48#endif 79#endif
diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c
index 12ea60b2a1b..33bde5da850 100644
--- a/drivers/dma/dw_dmac.c
+++ b/drivers/dma/dw_dmac.c
@@ -979,28 +979,17 @@ dwc_tx_status(struct dma_chan *chan,
979 struct dma_tx_state *txstate) 979 struct dma_tx_state *txstate)
980{ 980{
981 struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 981 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
982 dma_cookie_t last_used; 982 enum dma_status ret;
983 dma_cookie_t last_complete;
984 int ret;
985 983
986 last_complete = chan->completed_cookie; 984 ret = dma_cookie_status(chan, cookie, txstate);
987 last_used = chan->cookie;
988
989 ret = dma_async_is_complete(cookie, last_complete, last_used);
990 if (ret != DMA_SUCCESS) { 985 if (ret != DMA_SUCCESS) {
991 dwc_scan_descriptors(to_dw_dma(chan->device), dwc); 986 dwc_scan_descriptors(to_dw_dma(chan->device), dwc);
992 987
993 last_complete = chan->completed_cookie; 988 ret = dma_cookie_status(chan, cookie, txstate);
994 last_used = chan->cookie;
995
996 ret = dma_async_is_complete(cookie, last_complete, last_used);
997 } 989 }
998 990
999 if (ret != DMA_SUCCESS) 991 if (ret != DMA_SUCCESS)
1000 dma_set_tx_state(txstate, last_complete, last_used, 992 dma_set_residue(txstate, dwc_first_active(dwc)->len);
1001 dwc_first_active(dwc)->len);
1002 else
1003 dma_set_tx_state(txstate, last_complete, last_used, 0);
1004 993
1005 if (dwc->paused) 994 if (dwc->paused)
1006 return DMA_PAUSED; 995 return DMA_PAUSED;
diff --git a/drivers/dma/ep93xx_dma.c b/drivers/dma/ep93xx_dma.c
index 1c56f75d9fa..142ebf0cd31 100644
--- a/drivers/dma/ep93xx_dma.c
+++ b/drivers/dma/ep93xx_dma.c
@@ -1241,18 +1241,13 @@ static enum dma_status ep93xx_dma_tx_status(struct dma_chan *chan,
1241 struct dma_tx_state *state) 1241 struct dma_tx_state *state)
1242{ 1242{
1243 struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan); 1243 struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
1244 dma_cookie_t last_used, last_completed;
1245 enum dma_status ret; 1244 enum dma_status ret;
1246 unsigned long flags; 1245 unsigned long flags;
1247 1246
1248 spin_lock_irqsave(&edmac->lock, flags); 1247 spin_lock_irqsave(&edmac->lock, flags);
1249 last_used = chan->cookie; 1248 ret = dma_cookie_status(chan, cookie, state);
1250 last_completed = chan->completed_cookie;
1251 spin_unlock_irqrestore(&edmac->lock, flags); 1249 spin_unlock_irqrestore(&edmac->lock, flags);
1252 1250
1253 ret = dma_async_is_complete(cookie, last_completed, last_used);
1254 dma_set_tx_state(state, last_completed, last_used, 0);
1255
1256 return ret; 1251 return ret;
1257} 1252}
1258 1253
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index f36e8b18cba..2f6c806126e 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -978,19 +978,14 @@ static enum dma_status fsl_tx_status(struct dma_chan *dchan,
978 struct dma_tx_state *txstate) 978 struct dma_tx_state *txstate)
979{ 979{
980 struct fsldma_chan *chan = to_fsl_chan(dchan); 980 struct fsldma_chan *chan = to_fsl_chan(dchan);
981 dma_cookie_t last_complete; 981 enum dma_status ret;
982 dma_cookie_t last_used;
983 unsigned long flags; 982 unsigned long flags;
984 983
985 spin_lock_irqsave(&chan->desc_lock, flags); 984 spin_lock_irqsave(&chan->desc_lock, flags);
986 985 ret = dma_cookie_status(dchan, cookie, txstate);
987 last_complete = dchan->completed_cookie;
988 last_used = dchan->cookie;
989
990 spin_unlock_irqrestore(&chan->desc_lock, flags); 986 spin_unlock_irqrestore(&chan->desc_lock, flags);
991 987
992 dma_set_tx_state(txstate, last_complete, last_used, 0); 988 return ret;
993 return dma_async_is_complete(cookie, last_complete, last_used);
994} 989}
995 990
996/*----------------------------------------------------------------------------*/ 991/*----------------------------------------------------------------------------*/
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c
index 9a3cbac3d69..6731f1918c5 100644
--- a/drivers/dma/imx-dma.c
+++ b/drivers/dma/imx-dma.c
@@ -153,16 +153,7 @@ static enum dma_status imxdma_tx_status(struct dma_chan *chan,
153 dma_cookie_t cookie, 153 dma_cookie_t cookie,
154 struct dma_tx_state *txstate) 154 struct dma_tx_state *txstate)
155{ 155{
156 struct imxdma_channel *imxdmac = to_imxdma_chan(chan); 156 return dma_cookie_status(chan, cookie, txstate);
157 dma_cookie_t last_used;
158 enum dma_status ret;
159
160 last_used = chan->cookie;
161
162 ret = dma_async_is_complete(cookie, chan->completed_cookie, last_used);
163 dma_set_tx_state(txstate, chan->completed_cookie, last_used, 0);
164
165 return ret;
166} 157}
167 158
168static dma_cookie_t imxdma_tx_submit(struct dma_async_tx_descriptor *tx) 159static dma_cookie_t imxdma_tx_submit(struct dma_async_tx_descriptor *tx)
diff --git a/drivers/dma/intel_mid_dma.c b/drivers/dma/intel_mid_dma.c
index cee79f6e035..c9ab4ac18e4 100644
--- a/drivers/dma/intel_mid_dma.c
+++ b/drivers/dma/intel_mid_dma.c
@@ -477,30 +477,17 @@ static enum dma_status intel_mid_dma_tx_status(struct dma_chan *chan,
477 dma_cookie_t cookie, 477 dma_cookie_t cookie,
478 struct dma_tx_state *txstate) 478 struct dma_tx_state *txstate)
479{ 479{
480 dma_cookie_t last_used; 480 enum dma_status ret;
481 dma_cookie_t last_complete;
482 int ret;
483 481
484 last_complete = chan->completed_cookie; 482 ret = dma_cookie_status(chan, cookie, txstate);
485 last_used = chan->cookie;
486
487 ret = dma_async_is_complete(cookie, last_complete, last_used);
488 if (ret != DMA_SUCCESS) { 483 if (ret != DMA_SUCCESS) {
489 spin_lock_bh(&midc->lock); 484 spin_lock_bh(&midc->lock);
490 midc_scan_descriptors(to_middma_device(chan->device), midc); 485 midc_scan_descriptors(to_middma_device(chan->device), midc);
491 spin_unlock_bh(&midc->lock); 486 spin_unlock_bh(&midc->lock);
492 487
493 last_complete = chan->completed_cookie; 488 ret = dma_cookie_status(chan, cookie, txstate);
494 last_used = chan->cookie;
495
496 ret = dma_async_is_complete(cookie, last_complete, last_used);
497 } 489 }
498 490
499 if (txstate) {
500 txstate->last = last_complete;
501 txstate->used = last_used;
502 txstate->residue = 0;
503 }
504 return ret; 491 return ret;
505} 492}
506 493
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
index b0517c86c1b..97e100ce43e 100644
--- a/drivers/dma/ioat/dma.c
+++ b/drivers/dma/ioat/dma.c
@@ -729,13 +729,15 @@ ioat_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie,
729{ 729{
730 struct ioat_chan_common *chan = to_chan_common(c); 730 struct ioat_chan_common *chan = to_chan_common(c);
731 struct ioatdma_device *device = chan->device; 731 struct ioatdma_device *device = chan->device;
732 enum dma_status ret;
732 733
733 if (ioat_tx_status(c, cookie, txstate) == DMA_SUCCESS) 734 ret = dma_cookie_status(c, cookie, txstate);
734 return DMA_SUCCESS; 735 if (ret == DMA_SUCCESS)
736 return ret;
735 737
736 device->cleanup_fn((unsigned long) c); 738 device->cleanup_fn((unsigned long) c);
737 739
738 return ioat_tx_status(c, cookie, txstate); 740 return dma_cookie_status(c, cookie, txstate);
739} 741}
740 742
741static void ioat1_dma_start_null_desc(struct ioat_dma_chan *ioat) 743static void ioat1_dma_start_null_desc(struct ioat_dma_chan *ioat)
diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h
index 9653b6b6a71..c7888bccd97 100644
--- a/drivers/dma/ioat/dma.h
+++ b/drivers/dma/ioat/dma.h
@@ -142,27 +142,6 @@ static inline struct ioat_dma_chan *to_ioat_chan(struct dma_chan *c)
142 return container_of(chan, struct ioat_dma_chan, base); 142 return container_of(chan, struct ioat_dma_chan, base);
143} 143}
144 144
145/**
146 * ioat_tx_status - poll the status of an ioat transaction
147 * @c: channel handle
148 * @cookie: transaction identifier
149 * @txstate: if set, updated with the transaction state
150 */
151static inline enum dma_status
152ioat_tx_status(struct dma_chan *c, dma_cookie_t cookie,
153 struct dma_tx_state *txstate)
154{
155 dma_cookie_t last_used;
156 dma_cookie_t last_complete;
157
158 last_used = c->cookie;
159 last_complete = c->completed_cookie;
160
161 dma_set_tx_state(txstate, last_complete, last_used, 0);
162
163 return dma_async_is_complete(cookie, last_complete, last_used);
164}
165
166/* wrapper around hardware descriptor format + additional software fields */ 145/* wrapper around hardware descriptor format + additional software fields */
167 146
168/** 147/**
diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c
index 1bda46c43bd..145eda241de 100644
--- a/drivers/dma/ioat/dma_v3.c
+++ b/drivers/dma/ioat/dma_v3.c
@@ -410,13 +410,15 @@ ioat3_tx_status(struct dma_chan *c, dma_cookie_t cookie,
410 struct dma_tx_state *txstate) 410 struct dma_tx_state *txstate)
411{ 411{
412 struct ioat2_dma_chan *ioat = to_ioat2_chan(c); 412 struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
413 enum dma_status ret;
413 414
414 if (ioat_tx_status(c, cookie, txstate) == DMA_SUCCESS) 415 ret = dma_cookie_status(c, cookie, txstate);
415 return DMA_SUCCESS; 416 if (ret == DMA_SUCCESS)
417 return ret;
416 418
417 ioat3_cleanup(ioat); 419 ioat3_cleanup(ioat);
418 420
419 return ioat_tx_status(c, cookie, txstate); 421 return dma_cookie_status(c, cookie, txstate);
420} 422}
421 423
422static struct dma_async_tx_descriptor * 424static struct dma_async_tx_descriptor *
diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c
index f2392d59568..b1e3be089c5 100644
--- a/drivers/dma/iop-adma.c
+++ b/drivers/dma/iop-adma.c
@@ -894,24 +894,14 @@ static enum dma_status iop_adma_status(struct dma_chan *chan,
894 struct dma_tx_state *txstate) 894 struct dma_tx_state *txstate)
895{ 895{
896 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan); 896 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
897 dma_cookie_t last_used; 897
898 dma_cookie_t last_complete; 898 ret = dma_cookie_status(chan, cookie, txstate);
899 enum dma_status ret;
900
901 last_used = chan->cookie;
902 last_complete = chan->completed_cookie;
903 dma_set_tx_state(txstate, last_complete, last_used, 0);
904 ret = dma_async_is_complete(cookie, last_complete, last_used);
905 if (ret == DMA_SUCCESS) 899 if (ret == DMA_SUCCESS)
906 return ret; 900 return ret;
907 901
908 iop_adma_slot_cleanup(iop_chan); 902 iop_adma_slot_cleanup(iop_chan);
909 903
910 last_used = chan->cookie; 904 return dma_cookie_status(chan, cookie, txstate);
911 last_complete = chan->completed_cookie;
912 dma_set_tx_state(txstate, last_complete, last_used, 0);
913
914 return dma_async_is_complete(cookie, last_complete, last_used);
915} 905}
916 906
917static irqreturn_t iop_adma_eot_handler(int irq, void *data) 907static irqreturn_t iop_adma_eot_handler(int irq, void *data)
diff --git a/drivers/dma/mpc512x_dma.c b/drivers/dma/mpc512x_dma.c
index 0253d5aecdb..138271591ae 100644
--- a/drivers/dma/mpc512x_dma.c
+++ b/drivers/dma/mpc512x_dma.c
@@ -557,17 +557,14 @@ mpc_dma_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
557 struct dma_tx_state *txstate) 557 struct dma_tx_state *txstate)
558{ 558{
559 struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan); 559 struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan);
560 enum dma_status ret;
560 unsigned long flags; 561 unsigned long flags;
561 dma_cookie_t last_used;
562 dma_cookie_t last_complete;
563 562
564 spin_lock_irqsave(&mchan->lock, flags); 563 spin_lock_irqsave(&mchan->lock, flags);
565 last_used = mchan->chan.cookie; 564 ret = dma_cookie_status(chan, cookie, txstate);
566 last_complete = mchan->chan.completed_cookie;
567 spin_unlock_irqrestore(&mchan->lock, flags); 565 spin_unlock_irqrestore(&mchan->lock, flags);
568 566
569 dma_set_tx_state(txstate, last_complete, last_used, 0); 567 return ret;
570 return dma_async_is_complete(cookie, last_complete, last_used);
571} 568}
572 569
573/* Prepare descriptor for memory to memory copy */ 570/* Prepare descriptor for memory to memory copy */
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index d9810ce3794..486353e60a0 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -810,26 +810,16 @@ static enum dma_status mv_xor_status(struct dma_chan *chan,
810 struct dma_tx_state *txstate) 810 struct dma_tx_state *txstate)
811{ 811{
812 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); 812 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
813 dma_cookie_t last_used;
814 dma_cookie_t last_complete;
815 enum dma_status ret; 813 enum dma_status ret;
816 814
817 last_used = chan->cookie; 815 ret = dma_cookie_status(chan, cookie, txstate);
818 last_complete = chan->completed_cookie;
819 dma_set_tx_state(txstate, last_complete, last_used, 0);
820
821 ret = dma_async_is_complete(cookie, last_complete, last_used);
822 if (ret == DMA_SUCCESS) { 816 if (ret == DMA_SUCCESS) {
823 mv_xor_clean_completed_slots(mv_chan); 817 mv_xor_clean_completed_slots(mv_chan);
824 return ret; 818 return ret;
825 } 819 }
826 mv_xor_slot_cleanup(mv_chan); 820 mv_xor_slot_cleanup(mv_chan);
827 821
828 last_used = chan->cookie; 822 return dma_cookie_status(chan, cookie, txstate);
829 last_complete = chan->completed_cookie;
830
831 dma_set_tx_state(txstate, last_complete, last_used, 0);
832 return dma_async_is_complete(cookie, last_complete, last_used);
833} 823}
834 824
835static void mv_dump_xor_regs(struct mv_xor_chan *chan) 825static void mv_dump_xor_regs(struct mv_xor_chan *chan)
diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c
index 5218e48aed0..c30f63ee32c 100644
--- a/drivers/dma/pch_dma.c
+++ b/drivers/dma/pch_dma.c
@@ -565,19 +565,12 @@ static enum dma_status pd_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
565 struct dma_tx_state *txstate) 565 struct dma_tx_state *txstate)
566{ 566{
567 struct pch_dma_chan *pd_chan = to_pd_chan(chan); 567 struct pch_dma_chan *pd_chan = to_pd_chan(chan);
568 dma_cookie_t last_used; 568 enum dma_status ret;
569 dma_cookie_t last_completed;
570 int ret;
571 569
572 spin_lock_irq(&pd_chan->lock); 570 spin_lock_irq(&pd_chan->lock);
573 last_completed = chan->completed_cookie; 571 ret = dma_cookie_status(chan, cookie, txstate);
574 last_used = chan->cookie;
575 spin_unlock_irq(&pd_chan->lock); 572 spin_unlock_irq(&pd_chan->lock);
576 573
577 ret = dma_async_is_complete(cookie, last_completed, last_used);
578
579 dma_set_tx_state(txstate, last_completed, last_used, 0);
580
581 return ret; 574 return ret;
582} 575}
583 576
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index a81d0a5f819..d43019fc349 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -395,18 +395,7 @@ static enum dma_status
395pl330_tx_status(struct dma_chan *chan, dma_cookie_t cookie, 395pl330_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
396 struct dma_tx_state *txstate) 396 struct dma_tx_state *txstate)
397{ 397{
398 struct dma_pl330_chan *pch = to_pchan(chan); 398 return dma_cookie_status(chan, cookie, txstate);
399 dma_cookie_t last_done, last_used;
400 int ret;
401
402 last_done = chan->completed_cookie;
403 last_used = chan->cookie;
404
405 ret = dma_async_is_complete(cookie, last_done, last_used);
406
407 dma_set_tx_state(txstate, last_done, last_used, 0);
408
409 return ret;
410} 399}
411 400
412static void pl330_issue_pending(struct dma_chan *chan) 401static void pl330_issue_pending(struct dma_chan *chan)
diff --git a/drivers/dma/ppc4xx/adma.c b/drivers/dma/ppc4xx/adma.c
index 12e94dd6fc3..86239ea0189 100644
--- a/drivers/dma/ppc4xx/adma.c
+++ b/drivers/dma/ppc4xx/adma.c
@@ -3928,28 +3928,16 @@ static enum dma_status ppc440spe_adma_tx_status(struct dma_chan *chan,
3928 dma_cookie_t cookie, struct dma_tx_state *txstate) 3928 dma_cookie_t cookie, struct dma_tx_state *txstate)
3929{ 3929{
3930 struct ppc440spe_adma_chan *ppc440spe_chan; 3930 struct ppc440spe_adma_chan *ppc440spe_chan;
3931 dma_cookie_t last_used;
3932 dma_cookie_t last_complete;
3933 enum dma_status ret; 3931 enum dma_status ret;
3934 3932
3935 ppc440spe_chan = to_ppc440spe_adma_chan(chan); 3933 ppc440spe_chan = to_ppc440spe_adma_chan(chan);
3936 last_used = chan->cookie; 3934 ret = dma_cookie_status(chan, cookie, txstate);
3937 last_complete = chan->completed_cookie;
3938
3939 dma_set_tx_state(txstate, last_complete, last_used, 0);
3940
3941 ret = dma_async_is_complete(cookie, last_complete, last_used);
3942 if (ret == DMA_SUCCESS) 3935 if (ret == DMA_SUCCESS)
3943 return ret; 3936 return ret;
3944 3937
3945 ppc440spe_adma_slot_cleanup(ppc440spe_chan); 3938 ppc440spe_adma_slot_cleanup(ppc440spe_chan);
3946 3939
3947 last_used = chan->cookie; 3940 return dma_cookie_status(chan, cookie, txstate);
3948 last_complete = chan->completed_cookie;
3949
3950 dma_set_tx_state(txstate, last_complete, last_used, 0);
3951
3952 return dma_async_is_complete(cookie, last_complete, last_used);
3953} 3941}
3954 3942
3955/** 3943/**
diff --git a/drivers/dma/shdma.c b/drivers/dma/shdma.c
index 96d0a4fe8dd..50510ef7db7 100644
--- a/drivers/dma/shdma.c
+++ b/drivers/dma/shdma.c
@@ -879,23 +879,14 @@ static enum dma_status sh_dmae_tx_status(struct dma_chan *chan,
879 struct dma_tx_state *txstate) 879 struct dma_tx_state *txstate)
880{ 880{
881 struct sh_dmae_chan *sh_chan = to_sh_chan(chan); 881 struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
882 dma_cookie_t last_used;
883 dma_cookie_t last_complete;
884 enum dma_status status; 882 enum dma_status status;
885 unsigned long flags; 883 unsigned long flags;
886 884
887 sh_dmae_chan_ld_cleanup(sh_chan, false); 885 sh_dmae_chan_ld_cleanup(sh_chan, false);
888 886
889 /* First read completed cookie to avoid a skew */
890 last_complete = chan->completed_cookie;
891 rmb();
892 last_used = chan->cookie;
893 BUG_ON(last_complete < 0);
894 dma_set_tx_state(txstate, last_complete, last_used, 0);
895
896 spin_lock_irqsave(&sh_chan->desc_lock, flags); 887 spin_lock_irqsave(&sh_chan->desc_lock, flags);
897 888
898 status = dma_async_is_complete(cookie, last_complete, last_used); 889 status = dma_cookie_status(chan, cookie, txstate);
899 890
900 /* 891 /*
901 * If we don't find cookie on the queue, it has been aborted and we have 892 * If we don't find cookie on the queue, it has been aborted and we have
diff --git a/drivers/dma/sirf-dma.c b/drivers/dma/sirf-dma.c
index 7bb154a8533..a760d981ece 100644
--- a/drivers/dma/sirf-dma.c
+++ b/drivers/dma/sirf-dma.c
@@ -407,16 +407,13 @@ sirfsoc_dma_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
407{ 407{
408 struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan); 408 struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
409 unsigned long flags; 409 unsigned long flags;
410 dma_cookie_t last_used; 410 enum dma_status ret;
411 dma_cookie_t last_complete;
412 411
413 spin_lock_irqsave(&schan->lock, flags); 412 spin_lock_irqsave(&schan->lock, flags);
414 last_used = schan->chan.cookie; 413 ret = dma_cookie_status(chan, cookie, txstate);
415 last_complete = schan->chan.completed_cookie;
416 spin_unlock_irqrestore(&schan->lock, flags); 414 spin_unlock_irqrestore(&schan->lock, flags);
417 415
418 dma_set_tx_state(txstate, last_complete, last_used, 0); 416 return ret;
419 return dma_async_is_complete(cookie, last_complete, last_used);
420} 417}
421 418
422static struct dma_async_tx_descriptor *sirfsoc_dma_prep_interleaved( 419static struct dma_async_tx_descriptor *sirfsoc_dma_prep_interleaved(
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index c2463758fed..07b82e367ff 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -2332,25 +2332,19 @@ static enum dma_status d40_tx_status(struct dma_chan *chan,
2332 struct dma_tx_state *txstate) 2332 struct dma_tx_state *txstate)
2333{ 2333{
2334 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); 2334 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2335 dma_cookie_t last_used; 2335 enum dma_status ret;
2336 dma_cookie_t last_complete;
2337 int ret;
2338 2336
2339 if (d40c->phy_chan == NULL) { 2337 if (d40c->phy_chan == NULL) {
2340 chan_err(d40c, "Cannot read status of unallocated channel\n"); 2338 chan_err(d40c, "Cannot read status of unallocated channel\n");
2341 return -EINVAL; 2339 return -EINVAL;
2342 } 2340 }
2343 2341
2344 last_complete = chan->completed_cookie; 2342 ret = dma_cookie_status(chan, cookie, txstate);
2345 last_used = chan->cookie; 2343 if (ret != DMA_SUCCESS)
2344 dma_set_residue(txstate, stedma40_residue(chan));
2346 2345
2347 if (d40_is_paused(d40c)) 2346 if (d40_is_paused(d40c))
2348 ret = DMA_PAUSED; 2347 ret = DMA_PAUSED;
2349 else
2350 ret = dma_async_is_complete(cookie, last_complete, last_used);
2351
2352 dma_set_tx_state(txstate, last_complete, last_used,
2353 stedma40_residue(chan));
2354 2348
2355 return ret; 2349 return ret;
2356} 2350}
diff --git a/drivers/dma/timb_dma.c b/drivers/dma/timb_dma.c
index 1845ac9d6e8..6383abbecce 100644
--- a/drivers/dma/timb_dma.c
+++ b/drivers/dma/timb_dma.c
@@ -513,18 +513,11 @@ static enum dma_status td_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
513{ 513{
514 struct timb_dma_chan *td_chan = 514 struct timb_dma_chan *td_chan =
515 container_of(chan, struct timb_dma_chan, chan); 515 container_of(chan, struct timb_dma_chan, chan);
516 dma_cookie_t last_used; 516 enum dma_status ret;
517 dma_cookie_t last_complete;
518 int ret;
519 517
520 dev_dbg(chan2dev(chan), "%s: Entry\n", __func__); 518 dev_dbg(chan2dev(chan), "%s: Entry\n", __func__);
521 519
522 last_complete = chan->completed_cookie; 520 ret = dma_cookie_status(chan, cookie, txstate);
523 last_used = chan->cookie;
524
525 ret = dma_async_is_complete(cookie, last_complete, last_used);
526
527 dma_set_tx_state(txstate, last_complete, last_used, 0);
528 521
529 dev_dbg(chan2dev(chan), 522 dev_dbg(chan2dev(chan),
530 "%s: exit, ret: %d, last_complete: %d, last_used: %d\n", 523 "%s: exit, ret: %d, last_complete: %d, last_used: %d\n",
diff --git a/drivers/dma/txx9dmac.c b/drivers/dma/txx9dmac.c
index 8a5225bf9bc..bb7b3d96ac6 100644
--- a/drivers/dma/txx9dmac.c
+++ b/drivers/dma/txx9dmac.c
@@ -959,27 +959,17 @@ txx9dmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
959 struct dma_tx_state *txstate) 959 struct dma_tx_state *txstate)
960{ 960{
961 struct txx9dmac_chan *dc = to_txx9dmac_chan(chan); 961 struct txx9dmac_chan *dc = to_txx9dmac_chan(chan);
962 dma_cookie_t last_used; 962 enum dma_status ret;
963 dma_cookie_t last_complete;
964 int ret;
965 963
966 last_complete = chan->completed_cookie; 964 ret = dma_cookie_status(chan, cookie, txstate);
967 last_used = chan->cookie;
968
969 ret = dma_async_is_complete(cookie, last_complete, last_used);
970 if (ret != DMA_SUCCESS) { 965 if (ret != DMA_SUCCESS) {
971 spin_lock_bh(&dc->lock); 966 spin_lock_bh(&dc->lock);
972 txx9dmac_scan_descriptors(dc); 967 txx9dmac_scan_descriptors(dc);
973 spin_unlock_bh(&dc->lock); 968 spin_unlock_bh(&dc->lock);
974 969
975 last_complete = chan->completed_cookie; 970 ret = dma_cookie_status(chan, cookie, txstate);
976 last_used = chan->cookie;
977
978 ret = dma_async_is_complete(cookie, last_complete, last_used);
979 } 971 }
980 972
981 dma_set_tx_state(txstate, last_complete, last_used, 0);
982
983 return ret; 973 return ret;
984} 974}
985 975