aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma
diff options
context:
space:
mode:
authorRussell King - ARM Linux <linux@arm.linux.org.uk>2012-03-06 17:34:06 -0500
committerVinod Koul <vinod.koul@linux.intel.com>2012-03-13 02:06:06 -0400
commit4d4e58de32a192fea65ab84509d17d199bd291c8 (patch)
treebe35531778c9cc6bee73beb94d07e176a6f3599d /drivers/dma
parent08714f60b0fc6ea3a060b69b32e77139f14e6045 (diff)
dmaengine: move last completed cookie into generic dma_chan structure
Every DMA engine implementation declares a last completed dma cookie in their private dma channel structures. This is pointless, and forces driver specific code. Move this out into the common dma_chan structure. Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk> Tested-by: Linus Walleij <linus.walleij@linaro.org> Reviewed-by: Linus Walleij <linus.walleij@linaro.org> Acked-by: Jassi Brar <jassisinghbrar@gmail.com> [imx-sdma.c & mxs-dma.c] Tested-by: Shawn Guo <shawn.guo@linaro.org> Signed-off-by: Vinod Koul <vinod.koul@linux.intel.com>
Diffstat (limited to 'drivers/dma')
-rw-r--r--drivers/dma/amba-pl08x.c8
-rw-r--r--drivers/dma/at_hdmac.c10
-rw-r--r--drivers/dma/at_hdmac_regs.h2
-rw-r--r--drivers/dma/coh901318.c7
-rw-r--r--drivers/dma/dw_dmac.c10
-rw-r--r--drivers/dma/dw_dmac_regs.h1
-rw-r--r--drivers/dma/ep93xx_dma.c8
-rw-r--r--drivers/dma/fsldma.c4
-rw-r--r--drivers/dma/fsldma.h1
-rw-r--r--drivers/dma/imx-dma.c7
-rw-r--r--drivers/dma/imx-sdma.c5
-rw-r--r--drivers/dma/intel_mid_dma.c9
-rw-r--r--drivers/dma/intel_mid_dma_regs.h2
-rw-r--r--drivers/dma/ioat/dma.c2
-rw-r--r--drivers/dma/ioat/dma.h4
-rw-r--r--drivers/dma/ioat/dma_v2.c2
-rw-r--r--drivers/dma/ioat/dma_v3.c2
-rw-r--r--drivers/dma/iop-adma.c10
-rw-r--r--drivers/dma/ipu/ipu_idmac.c10
-rw-r--r--drivers/dma/mpc512x_dma.c7
-rw-r--r--drivers/dma/mv_xor.c6
-rw-r--r--drivers/dma/mv_xor.h2
-rw-r--r--drivers/dma/mxs-dma.c5
-rw-r--r--drivers/dma/pch_dma.c5
-rw-r--r--drivers/dma/pl330.c9
-rw-r--r--drivers/dma/ppc4xx/adma.c10
-rw-r--r--drivers/dma/ppc4xx/adma.h2
-rw-r--r--drivers/dma/shdma.c10
-rw-r--r--drivers/dma/shdma.h1
-rw-r--r--drivers/dma/sirf-dma.c7
-rw-r--r--drivers/dma/ste_dma40.c10
-rw-r--r--drivers/dma/timb_dma.c7
-rw-r--r--drivers/dma/txx9dmac.c10
-rw-r--r--drivers/dma/txx9dmac.h1
34 files changed, 81 insertions, 115 deletions
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index 513184b4fdd1..e510447a685a 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -971,7 +971,7 @@ static enum dma_status pl08x_dma_tx_status(struct dma_chan *chan,
971 u32 bytesleft = 0; 971 u32 bytesleft = 0;
972 972
973 last_used = plchan->chan.cookie; 973 last_used = plchan->chan.cookie;
974 last_complete = plchan->lc; 974 last_complete = plchan->chan.completed_cookie;
975 975
976 ret = dma_async_is_complete(cookie, last_complete, last_used); 976 ret = dma_async_is_complete(cookie, last_complete, last_used);
977 if (ret == DMA_SUCCESS) { 977 if (ret == DMA_SUCCESS) {
@@ -983,7 +983,7 @@ static enum dma_status pl08x_dma_tx_status(struct dma_chan *chan,
983 * This cookie not complete yet 983 * This cookie not complete yet
984 */ 984 */
985 last_used = plchan->chan.cookie; 985 last_used = plchan->chan.cookie;
986 last_complete = plchan->lc; 986 last_complete = plchan->chan.completed_cookie;
987 987
988 /* Get number of bytes left in the active transactions and queue */ 988 /* Get number of bytes left in the active transactions and queue */
989 bytesleft = pl08x_getbytes_chan(plchan); 989 bytesleft = pl08x_getbytes_chan(plchan);
@@ -1543,7 +1543,7 @@ static void pl08x_tasklet(unsigned long data)
1543 1543
1544 if (txd) { 1544 if (txd) {
1545 /* Update last completed */ 1545 /* Update last completed */
1546 plchan->lc = txd->tx.cookie; 1546 plchan->chan.completed_cookie = txd->tx.cookie;
1547 } 1547 }
1548 1548
1549 /* If a new descriptor is queued, set it up plchan->at is NULL here */ 1549 /* If a new descriptor is queued, set it up plchan->at is NULL here */
@@ -1725,7 +1725,7 @@ static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x,
1725 1725
1726 chan->chan.device = dmadev; 1726 chan->chan.device = dmadev;
1727 chan->chan.cookie = 0; 1727 chan->chan.cookie = 0;
1728 chan->lc = 0; 1728 chan->chan.completed_cookie = 0;
1729 1729
1730 spin_lock_init(&chan->lock); 1730 spin_lock_init(&chan->lock);
1731 INIT_LIST_HEAD(&chan->pend_list); 1731 INIT_LIST_HEAD(&chan->pend_list);
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index f4aed5fc2cb6..6baf5d717262 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -269,7 +269,7 @@ atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc)
269 dev_vdbg(chan2dev(&atchan->chan_common), 269 dev_vdbg(chan2dev(&atchan->chan_common),
270 "descriptor %u complete\n", txd->cookie); 270 "descriptor %u complete\n", txd->cookie);
271 271
272 atchan->completed_cookie = txd->cookie; 272 atchan->chan_common.completed_cookie = txd->cookie;
273 273
274 /* move children to free_list */ 274 /* move children to free_list */
275 list_splice_init(&desc->tx_list, &atchan->free_list); 275 list_splice_init(&desc->tx_list, &atchan->free_list);
@@ -1016,14 +1016,14 @@ atc_tx_status(struct dma_chan *chan,
1016 1016
1017 spin_lock_irqsave(&atchan->lock, flags); 1017 spin_lock_irqsave(&atchan->lock, flags);
1018 1018
1019 last_complete = atchan->completed_cookie; 1019 last_complete = chan->completed_cookie;
1020 last_used = chan->cookie; 1020 last_used = chan->cookie;
1021 1021
1022 ret = dma_async_is_complete(cookie, last_complete, last_used); 1022 ret = dma_async_is_complete(cookie, last_complete, last_used);
1023 if (ret != DMA_SUCCESS) { 1023 if (ret != DMA_SUCCESS) {
1024 atc_cleanup_descriptors(atchan); 1024 atc_cleanup_descriptors(atchan);
1025 1025
1026 last_complete = atchan->completed_cookie; 1026 last_complete = chan->completed_cookie;
1027 last_used = chan->cookie; 1027 last_used = chan->cookie;
1028 1028
1029 ret = dma_async_is_complete(cookie, last_complete, last_used); 1029 ret = dma_async_is_complete(cookie, last_complete, last_used);
@@ -1129,7 +1129,7 @@ static int atc_alloc_chan_resources(struct dma_chan *chan)
1129 spin_lock_irqsave(&atchan->lock, flags); 1129 spin_lock_irqsave(&atchan->lock, flags);
1130 atchan->descs_allocated = i; 1130 atchan->descs_allocated = i;
1131 list_splice(&tmp_list, &atchan->free_list); 1131 list_splice(&tmp_list, &atchan->free_list);
1132 atchan->completed_cookie = chan->cookie = 1; 1132 chan->completed_cookie = chan->cookie = 1;
1133 spin_unlock_irqrestore(&atchan->lock, flags); 1133 spin_unlock_irqrestore(&atchan->lock, flags);
1134 1134
1135 /* channel parameters */ 1135 /* channel parameters */
@@ -1329,7 +1329,7 @@ static int __init at_dma_probe(struct platform_device *pdev)
1329 struct at_dma_chan *atchan = &atdma->chan[i]; 1329 struct at_dma_chan *atchan = &atdma->chan[i];
1330 1330
1331 atchan->chan_common.device = &atdma->dma_common; 1331 atchan->chan_common.device = &atdma->dma_common;
1332 atchan->chan_common.cookie = atchan->completed_cookie = 1; 1332 atchan->chan_common.cookie = atchan->chan_common.completed_cookie = 1;
1333 list_add_tail(&atchan->chan_common.device_node, 1333 list_add_tail(&atchan->chan_common.device_node,
1334 &atdma->dma_common.channels); 1334 &atdma->dma_common.channels);
1335 1335
diff --git a/drivers/dma/at_hdmac_regs.h b/drivers/dma/at_hdmac_regs.h
index a8d3277d60b5..08fd8a0ae797 100644
--- a/drivers/dma/at_hdmac_regs.h
+++ b/drivers/dma/at_hdmac_regs.h
@@ -208,7 +208,6 @@ enum atc_status {
208 * @save_dscr: for cyclic operations, preserve next descriptor address in 208 * @save_dscr: for cyclic operations, preserve next descriptor address in
209 * the cyclic list on suspend/resume cycle 209 * the cyclic list on suspend/resume cycle
210 * @lock: serializes enqueue/dequeue operations to descriptors lists 210 * @lock: serializes enqueue/dequeue operations to descriptors lists
211 * @completed_cookie: identifier for the most recently completed operation
212 * @active_list: list of descriptors dmaengine is being running on 211 * @active_list: list of descriptors dmaengine is being running on
213 * @queue: list of descriptors ready to be submitted to engine 212 * @queue: list of descriptors ready to be submitted to engine
214 * @free_list: list of descriptors usable by the channel 213 * @free_list: list of descriptors usable by the channel
@@ -227,7 +226,6 @@ struct at_dma_chan {
227 spinlock_t lock; 226 spinlock_t lock;
228 227
229 /* these other elements are all protected by lock */ 228 /* these other elements are all protected by lock */
230 dma_cookie_t completed_cookie;
231 struct list_head active_list; 229 struct list_head active_list;
232 struct list_head queue; 230 struct list_head queue;
233 struct list_head free_list; 231 struct list_head free_list;
diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c
index d65a718c0f9b..521434bc3130 100644
--- a/drivers/dma/coh901318.c
+++ b/drivers/dma/coh901318.c
@@ -59,7 +59,6 @@ struct coh901318_base {
59struct coh901318_chan { 59struct coh901318_chan {
60 spinlock_t lock; 60 spinlock_t lock;
61 int allocated; 61 int allocated;
62 int completed;
63 int id; 62 int id;
64 int stopped; 63 int stopped;
65 64
@@ -705,7 +704,7 @@ static void dma_tasklet(unsigned long data)
705 callback_param = cohd_fin->desc.callback_param; 704 callback_param = cohd_fin->desc.callback_param;
706 705
707 /* sign this job as completed on the channel */ 706 /* sign this job as completed on the channel */
708 cohc->completed = cohd_fin->desc.cookie; 707 cohc->chan.completed_cookie = cohd_fin->desc.cookie;
709 708
710 /* release the lli allocation and remove the descriptor */ 709 /* release the lli allocation and remove the descriptor */
711 coh901318_lli_free(&cohc->base->pool, &cohd_fin->lli); 710 coh901318_lli_free(&cohc->base->pool, &cohd_fin->lli);
@@ -929,7 +928,7 @@ static int coh901318_alloc_chan_resources(struct dma_chan *chan)
929 coh901318_config(cohc, NULL); 928 coh901318_config(cohc, NULL);
930 929
931 cohc->allocated = 1; 930 cohc->allocated = 1;
932 cohc->completed = chan->cookie = 1; 931 chan->completed_cookie = chan->cookie = 1;
933 932
934 spin_unlock_irqrestore(&cohc->lock, flags); 933 spin_unlock_irqrestore(&cohc->lock, flags);
935 934
@@ -1169,7 +1168,7 @@ coh901318_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
1169 dma_cookie_t last_complete; 1168 dma_cookie_t last_complete;
1170 int ret; 1169 int ret;
1171 1170
1172 last_complete = cohc->completed; 1171 last_complete = chan->completed_cookie;
1173 last_used = chan->cookie; 1172 last_used = chan->cookie;
1174 1173
1175 ret = dma_async_is_complete(cookie, last_complete, last_used); 1174 ret = dma_async_is_complete(cookie, last_complete, last_used);
diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c
index 0e4b5c6a2f86..5bd23006ff4a 100644
--- a/drivers/dma/dw_dmac.c
+++ b/drivers/dma/dw_dmac.c
@@ -249,7 +249,7 @@ dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc,
249 dev_vdbg(chan2dev(&dwc->chan), "descriptor %u complete\n", txd->cookie); 249 dev_vdbg(chan2dev(&dwc->chan), "descriptor %u complete\n", txd->cookie);
250 250
251 spin_lock_irqsave(&dwc->lock, flags); 251 spin_lock_irqsave(&dwc->lock, flags);
252 dwc->completed = txd->cookie; 252 dwc->chan.completed_cookie = txd->cookie;
253 if (callback_required) { 253 if (callback_required) {
254 callback = txd->callback; 254 callback = txd->callback;
255 param = txd->callback_param; 255 param = txd->callback_param;
@@ -997,14 +997,14 @@ dwc_tx_status(struct dma_chan *chan,
997 dma_cookie_t last_complete; 997 dma_cookie_t last_complete;
998 int ret; 998 int ret;
999 999
1000 last_complete = dwc->completed; 1000 last_complete = chan->completed_cookie;
1001 last_used = chan->cookie; 1001 last_used = chan->cookie;
1002 1002
1003 ret = dma_async_is_complete(cookie, last_complete, last_used); 1003 ret = dma_async_is_complete(cookie, last_complete, last_used);
1004 if (ret != DMA_SUCCESS) { 1004 if (ret != DMA_SUCCESS) {
1005 dwc_scan_descriptors(to_dw_dma(chan->device), dwc); 1005 dwc_scan_descriptors(to_dw_dma(chan->device), dwc);
1006 1006
1007 last_complete = dwc->completed; 1007 last_complete = chan->completed_cookie;
1008 last_used = chan->cookie; 1008 last_used = chan->cookie;
1009 1009
1010 ret = dma_async_is_complete(cookie, last_complete, last_used); 1010 ret = dma_async_is_complete(cookie, last_complete, last_used);
@@ -1046,7 +1046,7 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan)
1046 return -EIO; 1046 return -EIO;
1047 } 1047 }
1048 1048
1049 dwc->completed = chan->cookie = 1; 1049 chan->completed_cookie = chan->cookie = 1;
1050 1050
1051 /* 1051 /*
1052 * NOTE: some controllers may have additional features that we 1052 * NOTE: some controllers may have additional features that we
@@ -1474,7 +1474,7 @@ static int __init dw_probe(struct platform_device *pdev)
1474 struct dw_dma_chan *dwc = &dw->chan[i]; 1474 struct dw_dma_chan *dwc = &dw->chan[i];
1475 1475
1476 dwc->chan.device = &dw->dma; 1476 dwc->chan.device = &dw->dma;
1477 dwc->chan.cookie = dwc->completed = 1; 1477 dwc->chan.cookie = dwc->chan.completed_cookie = 1;
1478 if (pdata->chan_allocation_order == CHAN_ALLOCATION_ASCENDING) 1478 if (pdata->chan_allocation_order == CHAN_ALLOCATION_ASCENDING)
1479 list_add_tail(&dwc->chan.device_node, 1479 list_add_tail(&dwc->chan.device_node,
1480 &dw->dma.channels); 1480 &dw->dma.channels);
diff --git a/drivers/dma/dw_dmac_regs.h b/drivers/dma/dw_dmac_regs.h
index eec0481a12f7..f298f69ecbf9 100644
--- a/drivers/dma/dw_dmac_regs.h
+++ b/drivers/dma/dw_dmac_regs.h
@@ -158,7 +158,6 @@ struct dw_dma_chan {
158 158
159 /* these other elements are all protected by lock */ 159 /* these other elements are all protected by lock */
160 unsigned long flags; 160 unsigned long flags;
161 dma_cookie_t completed;
162 struct list_head active_list; 161 struct list_head active_list;
163 struct list_head queue; 162 struct list_head queue;
164 struct list_head free_list; 163 struct list_head free_list;
diff --git a/drivers/dma/ep93xx_dma.c b/drivers/dma/ep93xx_dma.c
index 59e7a965772b..bc457878cffd 100644
--- a/drivers/dma/ep93xx_dma.c
+++ b/drivers/dma/ep93xx_dma.c
@@ -122,7 +122,6 @@ struct ep93xx_dma_desc {
122 * @lock: lock protecting the fields following 122 * @lock: lock protecting the fields following
123 * @flags: flags for the channel 123 * @flags: flags for the channel
124 * @buffer: which buffer to use next (0/1) 124 * @buffer: which buffer to use next (0/1)
125 * @last_completed: last completed cookie value
126 * @active: flattened chain of descriptors currently being processed 125 * @active: flattened chain of descriptors currently being processed
127 * @queue: pending descriptors which are handled next 126 * @queue: pending descriptors which are handled next
128 * @free_list: list of free descriptors which can be used 127 * @free_list: list of free descriptors which can be used
@@ -157,7 +156,6 @@ struct ep93xx_dma_chan {
157#define EP93XX_DMA_IS_CYCLIC 0 156#define EP93XX_DMA_IS_CYCLIC 0
158 157
159 int buffer; 158 int buffer;
160 dma_cookie_t last_completed;
161 struct list_head active; 159 struct list_head active;
162 struct list_head queue; 160 struct list_head queue;
163 struct list_head free_list; 161 struct list_head free_list;
@@ -703,7 +701,7 @@ static void ep93xx_dma_tasklet(unsigned long data)
703 desc = ep93xx_dma_get_active(edmac); 701 desc = ep93xx_dma_get_active(edmac);
704 if (desc) { 702 if (desc) {
705 if (desc->complete) { 703 if (desc->complete) {
706 edmac->last_completed = desc->txd.cookie; 704 edmac->chan.completed_cookie = desc->txd.cookie;
707 list_splice_init(&edmac->active, &list); 705 list_splice_init(&edmac->active, &list);
708 } 706 }
709 callback = desc->txd.callback; 707 callback = desc->txd.callback;
@@ -861,7 +859,7 @@ static int ep93xx_dma_alloc_chan_resources(struct dma_chan *chan)
861 goto fail_clk_disable; 859 goto fail_clk_disable;
862 860
863 spin_lock_irq(&edmac->lock); 861 spin_lock_irq(&edmac->lock);
864 edmac->last_completed = 1; 862 edmac->chan.completed_cookie = 1;
865 edmac->chan.cookie = 1; 863 edmac->chan.cookie = 1;
866 ret = edmac->edma->hw_setup(edmac); 864 ret = edmac->edma->hw_setup(edmac);
867 spin_unlock_irq(&edmac->lock); 865 spin_unlock_irq(&edmac->lock);
@@ -1254,7 +1252,7 @@ static enum dma_status ep93xx_dma_tx_status(struct dma_chan *chan,
1254 1252
1255 spin_lock_irqsave(&edmac->lock, flags); 1253 spin_lock_irqsave(&edmac->lock, flags);
1256 last_used = chan->cookie; 1254 last_used = chan->cookie;
1257 last_completed = edmac->last_completed; 1255 last_completed = chan->completed_cookie;
1258 spin_unlock_irqrestore(&edmac->lock, flags); 1256 spin_unlock_irqrestore(&edmac->lock, flags);
1259 1257
1260 ret = dma_async_is_complete(cookie, last_completed, last_used); 1258 ret = dma_async_is_complete(cookie, last_completed, last_used);
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index b98070c33ca9..9b5cb8a43cfa 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -990,7 +990,7 @@ static enum dma_status fsl_tx_status(struct dma_chan *dchan,
990 990
991 spin_lock_irqsave(&chan->desc_lock, flags); 991 spin_lock_irqsave(&chan->desc_lock, flags);
992 992
993 last_complete = chan->completed_cookie; 993 last_complete = dchan->completed_cookie;
994 last_used = dchan->cookie; 994 last_used = dchan->cookie;
995 995
996 spin_unlock_irqrestore(&chan->desc_lock, flags); 996 spin_unlock_irqrestore(&chan->desc_lock, flags);
@@ -1088,7 +1088,7 @@ static void dma_do_tasklet(unsigned long data)
1088 desc = to_fsl_desc(chan->ld_running.prev); 1088 desc = to_fsl_desc(chan->ld_running.prev);
1089 cookie = desc->async_tx.cookie; 1089 cookie = desc->async_tx.cookie;
1090 1090
1091 chan->completed_cookie = cookie; 1091 chan->common.completed_cookie = cookie;
1092 chan_dbg(chan, "completed_cookie=%d\n", cookie); 1092 chan_dbg(chan, "completed_cookie=%d\n", cookie);
1093 } 1093 }
1094 1094
diff --git a/drivers/dma/fsldma.h b/drivers/dma/fsldma.h
index 9cb5aa57c677..f5c38791fc74 100644
--- a/drivers/dma/fsldma.h
+++ b/drivers/dma/fsldma.h
@@ -137,7 +137,6 @@ struct fsldma_device {
137struct fsldma_chan { 137struct fsldma_chan {
138 char name[8]; /* Channel name */ 138 char name[8]; /* Channel name */
139 struct fsldma_chan_regs __iomem *regs; 139 struct fsldma_chan_regs __iomem *regs;
140 dma_cookie_t completed_cookie; /* The maximum cookie completed */
141 spinlock_t desc_lock; /* Descriptor operation lock */ 140 spinlock_t desc_lock; /* Descriptor operation lock */
142 struct list_head ld_pending; /* Link descriptors queue */ 141 struct list_head ld_pending; /* Link descriptors queue */
143 struct list_head ld_running; /* Link descriptors queue */ 142 struct list_head ld_running; /* Link descriptors queue */
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c
index 3296a7337f25..d3ddcba87f81 100644
--- a/drivers/dma/imx-dma.c
+++ b/drivers/dma/imx-dma.c
@@ -41,7 +41,6 @@ struct imxdma_channel {
41 struct dma_chan chan; 41 struct dma_chan chan;
42 spinlock_t lock; 42 spinlock_t lock;
43 struct dma_async_tx_descriptor desc; 43 struct dma_async_tx_descriptor desc;
44 dma_cookie_t last_completed;
45 enum dma_status status; 44 enum dma_status status;
46 int dma_request; 45 int dma_request;
47 struct scatterlist *sg_list; 46 struct scatterlist *sg_list;
@@ -65,7 +64,7 @@ static void imxdma_handle(struct imxdma_channel *imxdmac)
65{ 64{
66 if (imxdmac->desc.callback) 65 if (imxdmac->desc.callback)
67 imxdmac->desc.callback(imxdmac->desc.callback_param); 66 imxdmac->desc.callback(imxdmac->desc.callback_param);
68 imxdmac->last_completed = imxdmac->desc.cookie; 67 imxdmac->chan.completed_cookie = imxdmac->desc.cookie;
69} 68}
70 69
71static void imxdma_irq_handler(int channel, void *data) 70static void imxdma_irq_handler(int channel, void *data)
@@ -158,8 +157,8 @@ static enum dma_status imxdma_tx_status(struct dma_chan *chan,
158 157
159 last_used = chan->cookie; 158 last_used = chan->cookie;
160 159
161 ret = dma_async_is_complete(cookie, imxdmac->last_completed, last_used); 160 ret = dma_async_is_complete(cookie, chan->completed_cookie, last_used);
162 dma_set_tx_state(txstate, imxdmac->last_completed, last_used, 0); 161 dma_set_tx_state(txstate, chan->completed_cookie, last_used, 0);
163 162
164 return ret; 163 return ret;
165} 164}
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index bf736ad679ca..49aa4e876645 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -267,7 +267,6 @@ struct sdma_channel {
267 struct dma_chan chan; 267 struct dma_chan chan;
268 spinlock_t lock; 268 spinlock_t lock;
269 struct dma_async_tx_descriptor desc; 269 struct dma_async_tx_descriptor desc;
270 dma_cookie_t last_completed;
271 enum dma_status status; 270 enum dma_status status;
272 unsigned int chn_count; 271 unsigned int chn_count;
273 unsigned int chn_real_count; 272 unsigned int chn_real_count;
@@ -529,7 +528,7 @@ static void mxc_sdma_handle_channel_normal(struct sdma_channel *sdmac)
529 else 528 else
530 sdmac->status = DMA_SUCCESS; 529 sdmac->status = DMA_SUCCESS;
531 530
532 sdmac->last_completed = sdmac->desc.cookie; 531 sdmac->chan.completed_cookie = sdmac->desc.cookie;
533 if (sdmac->desc.callback) 532 if (sdmac->desc.callback)
534 sdmac->desc.callback(sdmac->desc.callback_param); 533 sdmac->desc.callback(sdmac->desc.callback_param);
535} 534}
@@ -1127,7 +1126,7 @@ static enum dma_status sdma_tx_status(struct dma_chan *chan,
1127 1126
1128 last_used = chan->cookie; 1127 last_used = chan->cookie;
1129 1128
1130 dma_set_tx_state(txstate, sdmac->last_completed, last_used, 1129 dma_set_tx_state(txstate, chan->completed_cookie, last_used,
1131 sdmac->chn_count - sdmac->chn_real_count); 1130 sdmac->chn_count - sdmac->chn_real_count);
1132 1131
1133 return sdmac->status; 1132 return sdmac->status;
diff --git a/drivers/dma/intel_mid_dma.c b/drivers/dma/intel_mid_dma.c
index 923476d74a5d..40e47e6c7ed8 100644
--- a/drivers/dma/intel_mid_dma.c
+++ b/drivers/dma/intel_mid_dma.c
@@ -288,7 +288,7 @@ static void midc_descriptor_complete(struct intel_mid_dma_chan *midc,
288 struct intel_mid_dma_lli *llitem; 288 struct intel_mid_dma_lli *llitem;
289 void *param_txd = NULL; 289 void *param_txd = NULL;
290 290
291 midc->completed = txd->cookie; 291 midc->chan.completed_cookie = txd->cookie;
292 callback_txd = txd->callback; 292 callback_txd = txd->callback;
293 param_txd = txd->callback_param; 293 param_txd = txd->callback_param;
294 294
@@ -482,12 +482,11 @@ static enum dma_status intel_mid_dma_tx_status(struct dma_chan *chan,
482 dma_cookie_t cookie, 482 dma_cookie_t cookie,
483 struct dma_tx_state *txstate) 483 struct dma_tx_state *txstate)
484{ 484{
485 struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan);
486 dma_cookie_t last_used; 485 dma_cookie_t last_used;
487 dma_cookie_t last_complete; 486 dma_cookie_t last_complete;
488 int ret; 487 int ret;
489 488
490 last_complete = midc->completed; 489 last_complete = chan->completed_cookie;
491 last_used = chan->cookie; 490 last_used = chan->cookie;
492 491
493 ret = dma_async_is_complete(cookie, last_complete, last_used); 492 ret = dma_async_is_complete(cookie, last_complete, last_used);
@@ -496,7 +495,7 @@ static enum dma_status intel_mid_dma_tx_status(struct dma_chan *chan,
496 midc_scan_descriptors(to_middma_device(chan->device), midc); 495 midc_scan_descriptors(to_middma_device(chan->device), midc);
497 spin_unlock_bh(&midc->lock); 496 spin_unlock_bh(&midc->lock);
498 497
499 last_complete = midc->completed; 498 last_complete = chan->completed_cookie;
500 last_used = chan->cookie; 499 last_used = chan->cookie;
501 500
502 ret = dma_async_is_complete(cookie, last_complete, last_used); 501 ret = dma_async_is_complete(cookie, last_complete, last_used);
@@ -886,7 +885,7 @@ static int intel_mid_dma_alloc_chan_resources(struct dma_chan *chan)
886 pm_runtime_put(&mid->pdev->dev); 885 pm_runtime_put(&mid->pdev->dev);
887 return -EIO; 886 return -EIO;
888 } 887 }
889 midc->completed = chan->cookie = 1; 888 chan->completed_cookie = chan->cookie = 1;
890 889
891 spin_lock_bh(&midc->lock); 890 spin_lock_bh(&midc->lock);
892 while (midc->descs_allocated < DESCS_PER_CHANNEL) { 891 while (midc->descs_allocated < DESCS_PER_CHANNEL) {
diff --git a/drivers/dma/intel_mid_dma_regs.h b/drivers/dma/intel_mid_dma_regs.h
index c83d35b97bd8..1bfa9268feaf 100644
--- a/drivers/dma/intel_mid_dma_regs.h
+++ b/drivers/dma/intel_mid_dma_regs.h
@@ -165,7 +165,6 @@ union intel_mid_dma_cfg_hi {
165 * @dma_base: MMIO register space DMA engine base pointer 165 * @dma_base: MMIO register space DMA engine base pointer
166 * @ch_id: DMA channel id 166 * @ch_id: DMA channel id
167 * @lock: channel spinlock 167 * @lock: channel spinlock
168 * @completed: DMA cookie
169 * @active_list: current active descriptors 168 * @active_list: current active descriptors
170 * @queue: current queued up descriptors 169 * @queue: current queued up descriptors
171 * @free_list: current free descriptors 170 * @free_list: current free descriptors
@@ -183,7 +182,6 @@ struct intel_mid_dma_chan {
183 void __iomem *dma_base; 182 void __iomem *dma_base;
184 int ch_id; 183 int ch_id;
185 spinlock_t lock; 184 spinlock_t lock;
186 dma_cookie_t completed;
187 struct list_head active_list; 185 struct list_head active_list;
188 struct list_head queue; 186 struct list_head queue;
189 struct list_head free_list; 187 struct list_head free_list;
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
index a4d6cb0c0343..fab440af1f9a 100644
--- a/drivers/dma/ioat/dma.c
+++ b/drivers/dma/ioat/dma.c
@@ -603,7 +603,7 @@ static void __cleanup(struct ioat_dma_chan *ioat, unsigned long phys_complete)
603 */ 603 */
604 dump_desc_dbg(ioat, desc); 604 dump_desc_dbg(ioat, desc);
605 if (tx->cookie) { 605 if (tx->cookie) {
606 chan->completed_cookie = tx->cookie; 606 chan->common.completed_cookie = tx->cookie;
607 tx->cookie = 0; 607 tx->cookie = 0;
608 ioat_dma_unmap(chan, tx->flags, desc->len, desc->hw); 608 ioat_dma_unmap(chan, tx->flags, desc->len, desc->hw);
609 ioat->active -= desc->hw->tx_cnt; 609 ioat->active -= desc->hw->tx_cnt;
diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h
index 5216c8a92a21..9653b6b6a715 100644
--- a/drivers/dma/ioat/dma.h
+++ b/drivers/dma/ioat/dma.h
@@ -90,7 +90,6 @@ struct ioat_chan_common {
90 void __iomem *reg_base; 90 void __iomem *reg_base;
91 unsigned long last_completion; 91 unsigned long last_completion;
92 spinlock_t cleanup_lock; 92 spinlock_t cleanup_lock;
93 dma_cookie_t completed_cookie;
94 unsigned long state; 93 unsigned long state;
95 #define IOAT_COMPLETION_PENDING 0 94 #define IOAT_COMPLETION_PENDING 0
96 #define IOAT_COMPLETION_ACK 1 95 #define IOAT_COMPLETION_ACK 1
@@ -153,12 +152,11 @@ static inline enum dma_status
153ioat_tx_status(struct dma_chan *c, dma_cookie_t cookie, 152ioat_tx_status(struct dma_chan *c, dma_cookie_t cookie,
154 struct dma_tx_state *txstate) 153 struct dma_tx_state *txstate)
155{ 154{
156 struct ioat_chan_common *chan = to_chan_common(c);
157 dma_cookie_t last_used; 155 dma_cookie_t last_used;
158 dma_cookie_t last_complete; 156 dma_cookie_t last_complete;
159 157
160 last_used = c->cookie; 158 last_used = c->cookie;
161 last_complete = chan->completed_cookie; 159 last_complete = c->completed_cookie;
162 160
163 dma_set_tx_state(txstate, last_complete, last_used, 0); 161 dma_set_tx_state(txstate, last_complete, last_used, 0);
164 162
diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c
index 5d65f8377971..d3f0aff2c02a 100644
--- a/drivers/dma/ioat/dma_v2.c
+++ b/drivers/dma/ioat/dma_v2.c
@@ -147,7 +147,7 @@ static void __cleanup(struct ioat2_dma_chan *ioat, unsigned long phys_complete)
147 dump_desc_dbg(ioat, desc); 147 dump_desc_dbg(ioat, desc);
148 if (tx->cookie) { 148 if (tx->cookie) {
149 ioat_dma_unmap(chan, tx->flags, desc->len, desc->hw); 149 ioat_dma_unmap(chan, tx->flags, desc->len, desc->hw);
150 chan->completed_cookie = tx->cookie; 150 chan->common.completed_cookie = tx->cookie;
151 tx->cookie = 0; 151 tx->cookie = 0;
152 if (tx->callback) { 152 if (tx->callback) {
153 tx->callback(tx->callback_param); 153 tx->callback(tx->callback_param);
diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c
index f519c93a61e7..d4afac741e8a 100644
--- a/drivers/dma/ioat/dma_v3.c
+++ b/drivers/dma/ioat/dma_v3.c
@@ -277,7 +277,7 @@ static void __cleanup(struct ioat2_dma_chan *ioat, unsigned long phys_complete)
277 dump_desc_dbg(ioat, desc); 277 dump_desc_dbg(ioat, desc);
278 tx = &desc->txd; 278 tx = &desc->txd;
279 if (tx->cookie) { 279 if (tx->cookie) {
280 chan->completed_cookie = tx->cookie; 280 chan->common.completed_cookie = tx->cookie;
281 ioat3_dma_unmap(ioat, desc, idx + i); 281 ioat3_dma_unmap(ioat, desc, idx + i);
282 tx->cookie = 0; 282 tx->cookie = 0;
283 if (tx->callback) { 283 if (tx->callback) {
diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c
index 04be90b645b8..d8027c2b42c0 100644
--- a/drivers/dma/iop-adma.c
+++ b/drivers/dma/iop-adma.c
@@ -317,7 +317,7 @@ static void __iop_adma_slot_cleanup(struct iop_adma_chan *iop_chan)
317 } 317 }
318 318
319 if (cookie > 0) { 319 if (cookie > 0) {
320 iop_chan->completed_cookie = cookie; 320 iop_chan->common.completed_cookie = cookie;
321 pr_debug("\tcompleted cookie %d\n", cookie); 321 pr_debug("\tcompleted cookie %d\n", cookie);
322 } 322 }
323} 323}
@@ -909,7 +909,7 @@ static enum dma_status iop_adma_status(struct dma_chan *chan,
909 enum dma_status ret; 909 enum dma_status ret;
910 910
911 last_used = chan->cookie; 911 last_used = chan->cookie;
912 last_complete = iop_chan->completed_cookie; 912 last_complete = chan->completed_cookie;
913 dma_set_tx_state(txstate, last_complete, last_used, 0); 913 dma_set_tx_state(txstate, last_complete, last_used, 0);
914 ret = dma_async_is_complete(cookie, last_complete, last_used); 914 ret = dma_async_is_complete(cookie, last_complete, last_used);
915 if (ret == DMA_SUCCESS) 915 if (ret == DMA_SUCCESS)
@@ -918,7 +918,7 @@ static enum dma_status iop_adma_status(struct dma_chan *chan,
918 iop_adma_slot_cleanup(iop_chan); 918 iop_adma_slot_cleanup(iop_chan);
919 919
920 last_used = chan->cookie; 920 last_used = chan->cookie;
921 last_complete = iop_chan->completed_cookie; 921 last_complete = chan->completed_cookie;
922 dma_set_tx_state(txstate, last_complete, last_used, 0); 922 dma_set_tx_state(txstate, last_complete, last_used, 0);
923 923
924 return dma_async_is_complete(cookie, last_complete, last_used); 924 return dma_async_is_complete(cookie, last_complete, last_used);
@@ -1650,7 +1650,7 @@ static void iop_chan_start_null_memcpy(struct iop_adma_chan *iop_chan)
1650 /* initialize the completed cookie to be less than 1650 /* initialize the completed cookie to be less than
1651 * the most recently used cookie 1651 * the most recently used cookie
1652 */ 1652 */
1653 iop_chan->completed_cookie = cookie - 1; 1653 iop_chan->common.completed_cookie = cookie - 1;
1654 iop_chan->common.cookie = sw_desc->async_tx.cookie = cookie; 1654 iop_chan->common.cookie = sw_desc->async_tx.cookie = cookie;
1655 1655
1656 /* channel should not be busy */ 1656 /* channel should not be busy */
@@ -1707,7 +1707,7 @@ static void iop_chan_start_null_xor(struct iop_adma_chan *iop_chan)
1707 /* initialize the completed cookie to be less than 1707 /* initialize the completed cookie to be less than
1708 * the most recently used cookie 1708 * the most recently used cookie
1709 */ 1709 */
1710 iop_chan->completed_cookie = cookie - 1; 1710 iop_chan->common.completed_cookie = cookie - 1;
1711 iop_chan->common.cookie = sw_desc->async_tx.cookie = cookie; 1711 iop_chan->common.cookie = sw_desc->async_tx.cookie = cookie;
1712 1712
1713 /* channel should not be busy */ 1713 /* channel should not be busy */
diff --git a/drivers/dma/ipu/ipu_idmac.c b/drivers/dma/ipu/ipu_idmac.c
index 6212b16e8cf2..9149ade6a5d9 100644
--- a/drivers/dma/ipu/ipu_idmac.c
+++ b/drivers/dma/ipu/ipu_idmac.c
@@ -1295,7 +1295,7 @@ static irqreturn_t idmac_interrupt(int irq, void *dev_id)
1295 /* Flip the active buffer - even if update above failed */ 1295 /* Flip the active buffer - even if update above failed */
1296 ichan->active_buffer = !ichan->active_buffer; 1296 ichan->active_buffer = !ichan->active_buffer;
1297 if (done) 1297 if (done)
1298 ichan->completed = desc->txd.cookie; 1298 ichan->dma_chan.completed_cookie = desc->txd.cookie;
1299 1299
1300 callback = desc->txd.callback; 1300 callback = desc->txd.callback;
1301 callback_param = desc->txd.callback_param; 1301 callback_param = desc->txd.callback_param;
@@ -1511,7 +1511,7 @@ static int idmac_alloc_chan_resources(struct dma_chan *chan)
1511 WARN_ON(ichan->status != IPU_CHANNEL_FREE); 1511 WARN_ON(ichan->status != IPU_CHANNEL_FREE);
1512 1512
1513 chan->cookie = 1; 1513 chan->cookie = 1;
1514 ichan->completed = -ENXIO; 1514 chan->completed_cookie = -ENXIO;
1515 1515
1516 ret = ipu_irq_map(chan->chan_id); 1516 ret = ipu_irq_map(chan->chan_id);
1517 if (ret < 0) 1517 if (ret < 0)
@@ -1600,9 +1600,7 @@ static void idmac_free_chan_resources(struct dma_chan *chan)
1600static enum dma_status idmac_tx_status(struct dma_chan *chan, 1600static enum dma_status idmac_tx_status(struct dma_chan *chan,
1601 dma_cookie_t cookie, struct dma_tx_state *txstate) 1601 dma_cookie_t cookie, struct dma_tx_state *txstate)
1602{ 1602{
1603 struct idmac_channel *ichan = to_idmac_chan(chan); 1603 dma_set_tx_state(txstate, chan->completed_cookie, chan->cookie, 0);
1604
1605 dma_set_tx_state(txstate, ichan->completed, chan->cookie, 0);
1606 if (cookie != chan->cookie) 1604 if (cookie != chan->cookie)
1607 return DMA_ERROR; 1605 return DMA_ERROR;
1608 return DMA_SUCCESS; 1606 return DMA_SUCCESS;
@@ -1638,11 +1636,11 @@ static int __init ipu_idmac_init(struct ipu *ipu)
1638 1636
1639 ichan->status = IPU_CHANNEL_FREE; 1637 ichan->status = IPU_CHANNEL_FREE;
1640 ichan->sec_chan_en = false; 1638 ichan->sec_chan_en = false;
1641 ichan->completed = -ENXIO;
1642 snprintf(ichan->eof_name, sizeof(ichan->eof_name), "IDMAC EOF %d", i); 1639 snprintf(ichan->eof_name, sizeof(ichan->eof_name), "IDMAC EOF %d", i);
1643 1640
1644 dma_chan->device = &idmac->dma; 1641 dma_chan->device = &idmac->dma;
1645 dma_chan->cookie = 1; 1642 dma_chan->cookie = 1;
1643 dma_chan->completed_cookie = -ENXIO;
1646 dma_chan->chan_id = i; 1644 dma_chan->chan_id = i;
1647 list_add_tail(&dma_chan->device_node, &dma->channels); 1645 list_add_tail(&dma_chan->device_node, &dma->channels);
1648 } 1646 }
diff --git a/drivers/dma/mpc512x_dma.c b/drivers/dma/mpc512x_dma.c
index 4d6d4cf66949..39a5cde9f428 100644
--- a/drivers/dma/mpc512x_dma.c
+++ b/drivers/dma/mpc512x_dma.c
@@ -188,7 +188,6 @@ struct mpc_dma_chan {
188 struct list_head completed; 188 struct list_head completed;
189 struct mpc_dma_tcd *tcd; 189 struct mpc_dma_tcd *tcd;
190 dma_addr_t tcd_paddr; 190 dma_addr_t tcd_paddr;
191 dma_cookie_t completed_cookie;
192 191
193 /* Lock for this structure */ 192 /* Lock for this structure */
194 spinlock_t lock; 193 spinlock_t lock;
@@ -365,7 +364,7 @@ static void mpc_dma_process_completed(struct mpc_dma *mdma)
365 /* Free descriptors */ 364 /* Free descriptors */
366 spin_lock_irqsave(&mchan->lock, flags); 365 spin_lock_irqsave(&mchan->lock, flags);
367 list_splice_tail_init(&list, &mchan->free); 366 list_splice_tail_init(&list, &mchan->free);
368 mchan->completed_cookie = last_cookie; 367 mchan->chan.completed_cookie = last_cookie;
369 spin_unlock_irqrestore(&mchan->lock, flags); 368 spin_unlock_irqrestore(&mchan->lock, flags);
370 } 369 }
371} 370}
@@ -568,7 +567,7 @@ mpc_dma_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
568 567
569 spin_lock_irqsave(&mchan->lock, flags); 568 spin_lock_irqsave(&mchan->lock, flags);
570 last_used = mchan->chan.cookie; 569 last_used = mchan->chan.cookie;
571 last_complete = mchan->completed_cookie; 570 last_complete = mchan->chan.completed_cookie;
572 spin_unlock_irqrestore(&mchan->lock, flags); 571 spin_unlock_irqrestore(&mchan->lock, flags);
573 572
574 dma_set_tx_state(txstate, last_complete, last_used, 0); 573 dma_set_tx_state(txstate, last_complete, last_used, 0);
@@ -742,7 +741,7 @@ static int __devinit mpc_dma_probe(struct platform_device *op)
742 741
743 mchan->chan.device = dma; 742 mchan->chan.device = dma;
744 mchan->chan.cookie = 1; 743 mchan->chan.cookie = 1;
745 mchan->completed_cookie = mchan->chan.cookie; 744 mchan->chan.completed_cookie = mchan->chan.cookie;
746 745
747 INIT_LIST_HEAD(&mchan->free); 746 INIT_LIST_HEAD(&mchan->free);
748 INIT_LIST_HEAD(&mchan->prepared); 747 INIT_LIST_HEAD(&mchan->prepared);
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index ad7d03fe4cb4..c6a84dac112c 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -435,7 +435,7 @@ static void __mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan)
435 } 435 }
436 436
437 if (cookie > 0) 437 if (cookie > 0)
438 mv_chan->completed_cookie = cookie; 438 mv_chan->common.completed_cookie = cookie;
439} 439}
440 440
441static void 441static void
@@ -825,7 +825,7 @@ static enum dma_status mv_xor_status(struct dma_chan *chan,
825 enum dma_status ret; 825 enum dma_status ret;
826 826
827 last_used = chan->cookie; 827 last_used = chan->cookie;
828 last_complete = mv_chan->completed_cookie; 828 last_complete = chan->completed_cookie;
829 dma_set_tx_state(txstate, last_complete, last_used, 0); 829 dma_set_tx_state(txstate, last_complete, last_used, 0);
830 830
831 ret = dma_async_is_complete(cookie, last_complete, last_used); 831 ret = dma_async_is_complete(cookie, last_complete, last_used);
@@ -836,7 +836,7 @@ static enum dma_status mv_xor_status(struct dma_chan *chan,
836 mv_xor_slot_cleanup(mv_chan); 836 mv_xor_slot_cleanup(mv_chan);
837 837
838 last_used = chan->cookie; 838 last_used = chan->cookie;
839 last_complete = mv_chan->completed_cookie; 839 last_complete = chan->completed_cookie;
840 840
841 dma_set_tx_state(txstate, last_complete, last_used, 0); 841 dma_set_tx_state(txstate, last_complete, last_used, 0);
842 return dma_async_is_complete(cookie, last_complete, last_used); 842 return dma_async_is_complete(cookie, last_complete, last_used);
diff --git a/drivers/dma/mv_xor.h b/drivers/dma/mv_xor.h
index da04ac23def3..654876b7ba1d 100644
--- a/drivers/dma/mv_xor.h
+++ b/drivers/dma/mv_xor.h
@@ -78,7 +78,6 @@ struct mv_xor_device {
78/** 78/**
79 * struct mv_xor_chan - internal representation of a XOR channel 79 * struct mv_xor_chan - internal representation of a XOR channel
80 * @pending: allows batching of hardware operations 80 * @pending: allows batching of hardware operations
81 * @completed_cookie: identifier for the most recently completed operation
82 * @lock: serializes enqueue/dequeue operations to the descriptors pool 81 * @lock: serializes enqueue/dequeue operations to the descriptors pool
83 * @mmr_base: memory mapped register base 82 * @mmr_base: memory mapped register base
84 * @idx: the index of the xor channel 83 * @idx: the index of the xor channel
@@ -93,7 +92,6 @@ struct mv_xor_device {
93 */ 92 */
94struct mv_xor_chan { 93struct mv_xor_chan {
95 int pending; 94 int pending;
96 dma_cookie_t completed_cookie;
97 spinlock_t lock; /* protects the descriptor slot pool */ 95 spinlock_t lock; /* protects the descriptor slot pool */
98 void __iomem *mmr_base; 96 void __iomem *mmr_base;
99 unsigned int idx; 97 unsigned int idx;
diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c
index b06cd4ca626f..3696e6e4143a 100644
--- a/drivers/dma/mxs-dma.c
+++ b/drivers/dma/mxs-dma.c
@@ -111,7 +111,6 @@ struct mxs_dma_chan {
111 struct mxs_dma_ccw *ccw; 111 struct mxs_dma_ccw *ccw;
112 dma_addr_t ccw_phys; 112 dma_addr_t ccw_phys;
113 int desc_count; 113 int desc_count;
114 dma_cookie_t last_completed;
115 enum dma_status status; 114 enum dma_status status;
116 unsigned int flags; 115 unsigned int flags;
117#define MXS_DMA_SG_LOOP (1 << 0) 116#define MXS_DMA_SG_LOOP (1 << 0)
@@ -274,7 +273,7 @@ static irqreturn_t mxs_dma_int_handler(int irq, void *dev_id)
274 stat1 &= ~(1 << channel); 273 stat1 &= ~(1 << channel);
275 274
276 if (mxs_chan->status == DMA_SUCCESS) 275 if (mxs_chan->status == DMA_SUCCESS)
277 mxs_chan->last_completed = mxs_chan->desc.cookie; 276 mxs_chan->chan.completed_cookie = mxs_chan->desc.cookie;
278 277
279 /* schedule tasklet on this channel */ 278 /* schedule tasklet on this channel */
280 tasklet_schedule(&mxs_chan->tasklet); 279 tasklet_schedule(&mxs_chan->tasklet);
@@ -538,7 +537,7 @@ static enum dma_status mxs_dma_tx_status(struct dma_chan *chan,
538 dma_cookie_t last_used; 537 dma_cookie_t last_used;
539 538
540 last_used = chan->cookie; 539 last_used = chan->cookie;
541 dma_set_tx_state(txstate, mxs_chan->last_completed, last_used, 0); 540 dma_set_tx_state(txstate, chan->completed_cookie, last_used, 0);
542 541
543 return mxs_chan->status; 542 return mxs_chan->status;
544} 543}
diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c
index 823f58179f9d..79a71858497c 100644
--- a/drivers/dma/pch_dma.c
+++ b/drivers/dma/pch_dma.c
@@ -105,7 +105,6 @@ struct pch_dma_chan {
105 105
106 spinlock_t lock; 106 spinlock_t lock;
107 107
108 dma_cookie_t completed_cookie;
109 struct list_head active_list; 108 struct list_head active_list;
110 struct list_head queue; 109 struct list_head queue;
111 struct list_head free_list; 110 struct list_head free_list;
@@ -544,7 +543,7 @@ static int pd_alloc_chan_resources(struct dma_chan *chan)
544 spin_lock_irq(&pd_chan->lock); 543 spin_lock_irq(&pd_chan->lock);
545 list_splice(&tmp_list, &pd_chan->free_list); 544 list_splice(&tmp_list, &pd_chan->free_list);
546 pd_chan->descs_allocated = i; 545 pd_chan->descs_allocated = i;
547 pd_chan->completed_cookie = chan->cookie = 1; 546 chan->completed_cookie = chan->cookie = 1;
548 spin_unlock_irq(&pd_chan->lock); 547 spin_unlock_irq(&pd_chan->lock);
549 548
550 pdc_enable_irq(chan, 1); 549 pdc_enable_irq(chan, 1);
@@ -583,7 +582,7 @@ static enum dma_status pd_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
583 int ret; 582 int ret;
584 583
585 spin_lock_irq(&pd_chan->lock); 584 spin_lock_irq(&pd_chan->lock);
586 last_completed = pd_chan->completed_cookie; 585 last_completed = chan->completed_cookie;
587 last_used = chan->cookie; 586 last_used = chan->cookie;
588 spin_unlock_irq(&pd_chan->lock); 587 spin_unlock_irq(&pd_chan->lock);
589 588
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index 84ebea9bc53a..99c31a76e74e 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -51,9 +51,6 @@ struct dma_pl330_chan {
51 /* DMA-Engine Channel */ 51 /* DMA-Engine Channel */
52 struct dma_chan chan; 52 struct dma_chan chan;
53 53
54 /* Last completed cookie */
55 dma_cookie_t completed;
56
57 /* List of to be xfered descriptors */ 54 /* List of to be xfered descriptors */
58 struct list_head work_list; 55 struct list_head work_list;
59 56
@@ -234,7 +231,7 @@ static void pl330_tasklet(unsigned long data)
234 /* Pick up ripe tomatoes */ 231 /* Pick up ripe tomatoes */
235 list_for_each_entry_safe(desc, _dt, &pch->work_list, node) 232 list_for_each_entry_safe(desc, _dt, &pch->work_list, node)
236 if (desc->status == DONE) { 233 if (desc->status == DONE) {
237 pch->completed = desc->txd.cookie; 234 pch->chan.completed_cookie = desc->txd.cookie;
238 list_move_tail(&desc->node, &list); 235 list_move_tail(&desc->node, &list);
239 } 236 }
240 237
@@ -305,7 +302,7 @@ static int pl330_alloc_chan_resources(struct dma_chan *chan)
305 302
306 spin_lock_irqsave(&pch->lock, flags); 303 spin_lock_irqsave(&pch->lock, flags);
307 304
308 pch->completed = chan->cookie = 1; 305 chan->completed_cookie = chan->cookie = 1;
309 pch->cyclic = false; 306 pch->cyclic = false;
310 307
311 pch->pl330_chid = pl330_request_channel(&pdmac->pif); 308 pch->pl330_chid = pl330_request_channel(&pdmac->pif);
@@ -400,7 +397,7 @@ pl330_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
400 dma_cookie_t last_done, last_used; 397 dma_cookie_t last_done, last_used;
401 int ret; 398 int ret;
402 399
403 last_done = pch->completed; 400 last_done = chan->completed_cookie;
404 last_used = chan->cookie; 401 last_used = chan->cookie;
405 402
406 ret = dma_async_is_complete(cookie, last_done, last_used); 403 ret = dma_async_is_complete(cookie, last_done, last_used);
diff --git a/drivers/dma/ppc4xx/adma.c b/drivers/dma/ppc4xx/adma.c
index fc457a7e8832..f878322ecbcb 100644
--- a/drivers/dma/ppc4xx/adma.c
+++ b/drivers/dma/ppc4xx/adma.c
@@ -1930,7 +1930,7 @@ static void __ppc440spe_adma_slot_cleanup(struct ppc440spe_adma_chan *chan)
1930 if (end_of_chain && slot_cnt) { 1930 if (end_of_chain && slot_cnt) {
1931 /* Should wait for ZeroSum completion */ 1931 /* Should wait for ZeroSum completion */
1932 if (cookie > 0) 1932 if (cookie > 0)
1933 chan->completed_cookie = cookie; 1933 chan->common.completed_cookie = cookie;
1934 return; 1934 return;
1935 } 1935 }
1936 1936
@@ -1960,7 +1960,7 @@ static void __ppc440spe_adma_slot_cleanup(struct ppc440spe_adma_chan *chan)
1960 BUG_ON(!seen_current); 1960 BUG_ON(!seen_current);
1961 1961
1962 if (cookie > 0) { 1962 if (cookie > 0) {
1963 chan->completed_cookie = cookie; 1963 chan->common.completed_cookie = cookie;
1964 pr_debug("\tcompleted cookie %d\n", cookie); 1964 pr_debug("\tcompleted cookie %d\n", cookie);
1965 } 1965 }
1966 1966
@@ -3950,7 +3950,7 @@ static enum dma_status ppc440spe_adma_tx_status(struct dma_chan *chan,
3950 3950
3951 ppc440spe_chan = to_ppc440spe_adma_chan(chan); 3951 ppc440spe_chan = to_ppc440spe_adma_chan(chan);
3952 last_used = chan->cookie; 3952 last_used = chan->cookie;
3953 last_complete = ppc440spe_chan->completed_cookie; 3953 last_complete = chan->completed_cookie;
3954 3954
3955 dma_set_tx_state(txstate, last_complete, last_used, 0); 3955 dma_set_tx_state(txstate, last_complete, last_used, 0);
3956 3956
@@ -3961,7 +3961,7 @@ static enum dma_status ppc440spe_adma_tx_status(struct dma_chan *chan,
3961 ppc440spe_adma_slot_cleanup(ppc440spe_chan); 3961 ppc440spe_adma_slot_cleanup(ppc440spe_chan);
3962 3962
3963 last_used = chan->cookie; 3963 last_used = chan->cookie;
3964 last_complete = ppc440spe_chan->completed_cookie; 3964 last_complete = chan->completed_cookie;
3965 3965
3966 dma_set_tx_state(txstate, last_complete, last_used, 0); 3966 dma_set_tx_state(txstate, last_complete, last_used, 0);
3967 3967
@@ -4058,7 +4058,7 @@ static void ppc440spe_chan_start_null_xor(struct ppc440spe_adma_chan *chan)
4058 /* initialize the completed cookie to be less than 4058 /* initialize the completed cookie to be less than
4059 * the most recently used cookie 4059 * the most recently used cookie
4060 */ 4060 */
4061 chan->completed_cookie = cookie - 1; 4061 chan->common.completed_cookie = cookie - 1;
4062 chan->common.cookie = sw_desc->async_tx.cookie = cookie; 4062 chan->common.cookie = sw_desc->async_tx.cookie = cookie;
4063 4063
4064 /* channel should not be busy */ 4064 /* channel should not be busy */
diff --git a/drivers/dma/ppc4xx/adma.h b/drivers/dma/ppc4xx/adma.h
index 8ada5a812e3b..26b7a5ed9ac7 100644
--- a/drivers/dma/ppc4xx/adma.h
+++ b/drivers/dma/ppc4xx/adma.h
@@ -81,7 +81,6 @@ struct ppc440spe_adma_device {
81 * @common: common dmaengine channel object members 81 * @common: common dmaengine channel object members
82 * @all_slots: complete domain of slots usable by the channel 82 * @all_slots: complete domain of slots usable by the channel
83 * @pending: allows batching of hardware operations 83 * @pending: allows batching of hardware operations
84 * @completed_cookie: identifier for the most recently completed operation
85 * @slots_allocated: records the actual size of the descriptor slot pool 84 * @slots_allocated: records the actual size of the descriptor slot pool
86 * @hw_chain_inited: h/w descriptor chain initialization flag 85 * @hw_chain_inited: h/w descriptor chain initialization flag
87 * @irq_tasklet: bottom half where ppc440spe_adma_slot_cleanup runs 86 * @irq_tasklet: bottom half where ppc440spe_adma_slot_cleanup runs
@@ -99,7 +98,6 @@ struct ppc440spe_adma_chan {
99 struct list_head all_slots; 98 struct list_head all_slots;
100 struct ppc440spe_adma_desc_slot *last_used; 99 struct ppc440spe_adma_desc_slot *last_used;
101 int pending; 100 int pending;
102 dma_cookie_t completed_cookie;
103 int slots_allocated; 101 int slots_allocated;
104 int hw_chain_inited; 102 int hw_chain_inited;
105 struct tasklet_struct irq_tasklet; 103 struct tasklet_struct irq_tasklet;
diff --git a/drivers/dma/shdma.c b/drivers/dma/shdma.c
index 812fd76e9c18..ae84c12e3865 100644
--- a/drivers/dma/shdma.c
+++ b/drivers/dma/shdma.c
@@ -764,12 +764,12 @@ static dma_async_tx_callback __ld_cleanup(struct sh_dmae_chan *sh_chan, bool all
764 cookie = tx->cookie; 764 cookie = tx->cookie;
765 765
766 if (desc->mark == DESC_COMPLETED && desc->chunks == 1) { 766 if (desc->mark == DESC_COMPLETED && desc->chunks == 1) {
767 if (sh_chan->completed_cookie != desc->cookie - 1) 767 if (sh_chan->common.completed_cookie != desc->cookie - 1)
768 dev_dbg(sh_chan->dev, 768 dev_dbg(sh_chan->dev,
769 "Completing cookie %d, expected %d\n", 769 "Completing cookie %d, expected %d\n",
770 desc->cookie, 770 desc->cookie,
771 sh_chan->completed_cookie + 1); 771 sh_chan->common.completed_cookie + 1);
772 sh_chan->completed_cookie = desc->cookie; 772 sh_chan->common.completed_cookie = desc->cookie;
773 } 773 }
774 774
775 /* Call callback on the last chunk */ 775 /* Call callback on the last chunk */
@@ -823,7 +823,7 @@ static dma_async_tx_callback __ld_cleanup(struct sh_dmae_chan *sh_chan, bool all
823 * Terminating and the loop completed normally: forgive 823 * Terminating and the loop completed normally: forgive
824 * uncompleted cookies 824 * uncompleted cookies
825 */ 825 */
826 sh_chan->completed_cookie = sh_chan->common.cookie; 826 sh_chan->common.completed_cookie = sh_chan->common.cookie;
827 827
828 spin_unlock_irqrestore(&sh_chan->desc_lock, flags); 828 spin_unlock_irqrestore(&sh_chan->desc_lock, flags);
829 829
@@ -891,7 +891,7 @@ static enum dma_status sh_dmae_tx_status(struct dma_chan *chan,
891 sh_dmae_chan_ld_cleanup(sh_chan, false); 891 sh_dmae_chan_ld_cleanup(sh_chan, false);
892 892
893 /* First read completed cookie to avoid a skew */ 893 /* First read completed cookie to avoid a skew */
894 last_complete = sh_chan->completed_cookie; 894 last_complete = chan->completed_cookie;
895 rmb(); 895 rmb();
896 last_used = chan->cookie; 896 last_used = chan->cookie;
897 BUG_ON(last_complete < 0); 897 BUG_ON(last_complete < 0);
diff --git a/drivers/dma/shdma.h b/drivers/dma/shdma.h
index 2b55a276dc5b..0b1d2c105f02 100644
--- a/drivers/dma/shdma.h
+++ b/drivers/dma/shdma.h
@@ -30,7 +30,6 @@ enum dmae_pm_state {
30}; 30};
31 31
32struct sh_dmae_chan { 32struct sh_dmae_chan {
33 dma_cookie_t completed_cookie; /* The maximum cookie completed */
34 spinlock_t desc_lock; /* Descriptor operation lock */ 33 spinlock_t desc_lock; /* Descriptor operation lock */
35 struct list_head ld_queue; /* Link descriptors queue */ 34 struct list_head ld_queue; /* Link descriptors queue */
36 struct list_head ld_free; /* Link descriptors free */ 35 struct list_head ld_free; /* Link descriptors free */
diff --git a/drivers/dma/sirf-dma.c b/drivers/dma/sirf-dma.c
index 2333810d1688..60473f00cf1c 100644
--- a/drivers/dma/sirf-dma.c
+++ b/drivers/dma/sirf-dma.c
@@ -59,7 +59,6 @@ struct sirfsoc_dma_chan {
59 struct list_head queued; 59 struct list_head queued;
60 struct list_head active; 60 struct list_head active;
61 struct list_head completed; 61 struct list_head completed;
62 dma_cookie_t completed_cookie;
63 unsigned long happened_cyclic; 62 unsigned long happened_cyclic;
64 unsigned long completed_cyclic; 63 unsigned long completed_cyclic;
65 64
@@ -208,7 +207,7 @@ static void sirfsoc_dma_process_completed(struct sirfsoc_dma *sdma)
208 /* Free descriptors */ 207 /* Free descriptors */
209 spin_lock_irqsave(&schan->lock, flags); 208 spin_lock_irqsave(&schan->lock, flags);
210 list_splice_tail_init(&list, &schan->free); 209 list_splice_tail_init(&list, &schan->free);
211 schan->completed_cookie = last_cookie; 210 schan->chan.completed_cookie = last_cookie;
212 spin_unlock_irqrestore(&schan->lock, flags); 211 spin_unlock_irqrestore(&schan->lock, flags);
213 } else { 212 } else {
214 /* for cyclic channel, desc is always in active list */ 213 /* for cyclic channel, desc is always in active list */
@@ -419,7 +418,7 @@ sirfsoc_dma_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
419 418
420 spin_lock_irqsave(&schan->lock, flags); 419 spin_lock_irqsave(&schan->lock, flags);
421 last_used = schan->chan.cookie; 420 last_used = schan->chan.cookie;
422 last_complete = schan->completed_cookie; 421 last_complete = schan->chan.completed_cookie;
423 spin_unlock_irqrestore(&schan->lock, flags); 422 spin_unlock_irqrestore(&schan->lock, flags);
424 423
425 dma_set_tx_state(txstate, last_complete, last_used, 0); 424 dma_set_tx_state(txstate, last_complete, last_used, 0);
@@ -636,7 +635,7 @@ static int __devinit sirfsoc_dma_probe(struct platform_device *op)
636 635
637 schan->chan.device = dma; 636 schan->chan.device = dma;
638 schan->chan.cookie = 1; 637 schan->chan.cookie = 1;
639 schan->completed_cookie = schan->chan.cookie; 638 schan->chan.completed_cookie = schan->chan.cookie;
640 639
641 INIT_LIST_HEAD(&schan->free); 640 INIT_LIST_HEAD(&schan->free);
642 INIT_LIST_HEAD(&schan->prepared); 641 INIT_LIST_HEAD(&schan->prepared);
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index cc5ecbc067a3..cfca2a06d1af 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -220,8 +220,6 @@ struct d40_base;
220 * 220 *
221 * @lock: A spinlock to protect this struct. 221 * @lock: A spinlock to protect this struct.
222 * @log_num: The logical number, if any of this channel. 222 * @log_num: The logical number, if any of this channel.
223 * @completed: Starts with 1, after first interrupt it is set to dma engine's
224 * current cookie.
225 * @pending_tx: The number of pending transfers. Used between interrupt handler 223 * @pending_tx: The number of pending transfers. Used between interrupt handler
226 * and tasklet. 224 * and tasklet.
227 * @busy: Set to true when transfer is ongoing on this channel. 225 * @busy: Set to true when transfer is ongoing on this channel.
@@ -250,8 +248,6 @@ struct d40_base;
250struct d40_chan { 248struct d40_chan {
251 spinlock_t lock; 249 spinlock_t lock;
252 int log_num; 250 int log_num;
253 /* ID of the most recent completed transfer */
254 int completed;
255 int pending_tx; 251 int pending_tx;
256 bool busy; 252 bool busy;
257 struct d40_phy_res *phy_chan; 253 struct d40_phy_res *phy_chan;
@@ -1357,7 +1353,7 @@ static void dma_tasklet(unsigned long data)
1357 goto err; 1353 goto err;
1358 1354
1359 if (!d40d->cyclic) 1355 if (!d40d->cyclic)
1360 d40c->completed = d40d->txd.cookie; 1356 d40c->chan.completed_cookie = d40d->txd.cookie;
1361 1357
1362 /* 1358 /*
1363 * If terminating a channel pending_tx is set to zero. 1359 * If terminating a channel pending_tx is set to zero.
@@ -2182,7 +2178,7 @@ static int d40_alloc_chan_resources(struct dma_chan *chan)
2182 bool is_free_phy; 2178 bool is_free_phy;
2183 spin_lock_irqsave(&d40c->lock, flags); 2179 spin_lock_irqsave(&d40c->lock, flags);
2184 2180
2185 d40c->completed = chan->cookie = 1; 2181 chan->completed_cookie = chan->cookie = 1;
2186 2182
2187 /* If no dma configuration is set use default configuration (memcpy) */ 2183 /* If no dma configuration is set use default configuration (memcpy) */
2188 if (!d40c->configured) { 2184 if (!d40c->configured) {
@@ -2351,7 +2347,7 @@ static enum dma_status d40_tx_status(struct dma_chan *chan,
2351 return -EINVAL; 2347 return -EINVAL;
2352 } 2348 }
2353 2349
2354 last_complete = d40c->completed; 2350 last_complete = chan->completed_cookie;
2355 last_used = chan->cookie; 2351 last_used = chan->cookie;
2356 2352
2357 if (d40_is_paused(d40c)) 2353 if (d40_is_paused(d40c))
diff --git a/drivers/dma/timb_dma.c b/drivers/dma/timb_dma.c
index a6f9c1684a0f..a1d15598cf7e 100644
--- a/drivers/dma/timb_dma.c
+++ b/drivers/dma/timb_dma.c
@@ -84,7 +84,6 @@ struct timb_dma_chan {
84 especially the lists and descriptors, 84 especially the lists and descriptors,
85 from races between the tasklet and calls 85 from races between the tasklet and calls
86 from above */ 86 from above */
87 dma_cookie_t last_completed_cookie;
88 bool ongoing; 87 bool ongoing;
89 struct list_head active_list; 88 struct list_head active_list;
90 struct list_head queue; 89 struct list_head queue;
@@ -284,7 +283,7 @@ static void __td_finish(struct timb_dma_chan *td_chan)
284 else 283 else
285 iowrite32(0, td_chan->membase + TIMBDMA_OFFS_TX_DLAR); 284 iowrite32(0, td_chan->membase + TIMBDMA_OFFS_TX_DLAR);
286*/ 285*/
287 td_chan->last_completed_cookie = txd->cookie; 286 td_chan->chan.completed_cookie = txd->cookie;
288 td_chan->ongoing = false; 287 td_chan->ongoing = false;
289 288
290 callback = txd->callback; 289 callback = txd->callback;
@@ -481,7 +480,7 @@ static int td_alloc_chan_resources(struct dma_chan *chan)
481 } 480 }
482 481
483 spin_lock_bh(&td_chan->lock); 482 spin_lock_bh(&td_chan->lock);
484 td_chan->last_completed_cookie = 1; 483 chan->completed_cookie = 1;
485 chan->cookie = 1; 484 chan->cookie = 1;
486 spin_unlock_bh(&td_chan->lock); 485 spin_unlock_bh(&td_chan->lock);
487 486
@@ -523,7 +522,7 @@ static enum dma_status td_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
523 522
524 dev_dbg(chan2dev(chan), "%s: Entry\n", __func__); 523 dev_dbg(chan2dev(chan), "%s: Entry\n", __func__);
525 524
526 last_complete = td_chan->last_completed_cookie; 525 last_complete = chan->completed_cookie;
527 last_used = chan->cookie; 526 last_used = chan->cookie;
528 527
529 ret = dma_async_is_complete(cookie, last_complete, last_used); 528 ret = dma_async_is_complete(cookie, last_complete, last_used);
diff --git a/drivers/dma/txx9dmac.c b/drivers/dma/txx9dmac.c
index 6122c364cf11..a917b6723bad 100644
--- a/drivers/dma/txx9dmac.c
+++ b/drivers/dma/txx9dmac.c
@@ -424,7 +424,7 @@ txx9dmac_descriptor_complete(struct txx9dmac_chan *dc,
424 dev_vdbg(chan2dev(&dc->chan), "descriptor %u %p complete\n", 424 dev_vdbg(chan2dev(&dc->chan), "descriptor %u %p complete\n",
425 txd->cookie, desc); 425 txd->cookie, desc);
426 426
427 dc->completed = txd->cookie; 427 dc->chan.completed_cookie = txd->cookie;
428 callback = txd->callback; 428 callback = txd->callback;
429 param = txd->callback_param; 429 param = txd->callback_param;
430 430
@@ -976,7 +976,7 @@ txx9dmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
976 dma_cookie_t last_complete; 976 dma_cookie_t last_complete;
977 int ret; 977 int ret;
978 978
979 last_complete = dc->completed; 979 last_complete = chan->completed_cookie;
980 last_used = chan->cookie; 980 last_used = chan->cookie;
981 981
982 ret = dma_async_is_complete(cookie, last_complete, last_used); 982 ret = dma_async_is_complete(cookie, last_complete, last_used);
@@ -985,7 +985,7 @@ txx9dmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
985 txx9dmac_scan_descriptors(dc); 985 txx9dmac_scan_descriptors(dc);
986 spin_unlock_bh(&dc->lock); 986 spin_unlock_bh(&dc->lock);
987 987
988 last_complete = dc->completed; 988 last_complete = chan->completed_cookie;
989 last_used = chan->cookie; 989 last_used = chan->cookie;
990 990
991 ret = dma_async_is_complete(cookie, last_complete, last_used); 991 ret = dma_async_is_complete(cookie, last_complete, last_used);
@@ -1057,7 +1057,7 @@ static int txx9dmac_alloc_chan_resources(struct dma_chan *chan)
1057 return -EIO; 1057 return -EIO;
1058 } 1058 }
1059 1059
1060 dc->completed = chan->cookie = 1; 1060 chan->completed_cookie = chan->cookie = 1;
1061 1061
1062 dc->ccr = TXX9_DMA_CCR_IMMCHN | TXX9_DMA_CCR_INTENE | CCR_LE; 1062 dc->ccr = TXX9_DMA_CCR_IMMCHN | TXX9_DMA_CCR_INTENE | CCR_LE;
1063 txx9dmac_chan_set_SMPCHN(dc); 1063 txx9dmac_chan_set_SMPCHN(dc);
@@ -1186,7 +1186,7 @@ static int __init txx9dmac_chan_probe(struct platform_device *pdev)
1186 dc->ddev->chan[ch] = dc; 1186 dc->ddev->chan[ch] = dc;
1187 dc->chan.device = &dc->dma; 1187 dc->chan.device = &dc->dma;
1188 list_add_tail(&dc->chan.device_node, &dc->chan.device->channels); 1188 list_add_tail(&dc->chan.device_node, &dc->chan.device->channels);
1189 dc->chan.cookie = dc->completed = 1; 1189 dc->chan.cookie = dc->chan.completed_cookie = 1;
1190 1190
1191 if (is_dmac64(dc)) 1191 if (is_dmac64(dc))
1192 dc->ch_regs = &__txx9dmac_regs(dc->ddev)->CHAN[ch]; 1192 dc->ch_regs = &__txx9dmac_regs(dc->ddev)->CHAN[ch];
diff --git a/drivers/dma/txx9dmac.h b/drivers/dma/txx9dmac.h
index 365d42366b9f..f5a760598882 100644
--- a/drivers/dma/txx9dmac.h
+++ b/drivers/dma/txx9dmac.h
@@ -172,7 +172,6 @@ struct txx9dmac_chan {
172 spinlock_t lock; 172 spinlock_t lock;
173 173
174 /* these other elements are all protected by lock */ 174 /* these other elements are all protected by lock */
175 dma_cookie_t completed;
176 struct list_head active_list; 175 struct list_head active_list;
177 struct list_head queue; 176 struct list_head queue;
178 struct list_head free_list; 177 struct list_head free_list;