aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorVinod Koul <vinod.koul@linux.intel.com>2012-03-13 03:09:49 -0400
committerVinod Koul <vinod.koul@linux.intel.com>2012-03-13 03:09:49 -0400
commit1f3d6dc0be92f0caca937926cca16ec4fdd585a3 (patch)
treea12562009ae984dd16ed080af13d0036b88a93a7 /drivers
parent5170c051a56244816d948c43592c1b2805ed4f3a (diff)
parent949ff5b8d46b5e3435d21b2651ce3a2599208d44 (diff)
Merge branch 'rmk_cookie_fixes2' into next
Conflicts: drivers/dma/imx-dma.c drivers/dma/pl330.c Signed-off-by: Vinod Koul <vinod.koul@linux.intel.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/dma/amba-pl08x.c38
-rw-r--r--drivers/dma/at_hdmac.c48
-rw-r--r--drivers/dma/at_hdmac_regs.h2
-rw-r--r--drivers/dma/coh901318.c39
-rw-r--r--drivers/dma/dmaengine.h89
-rw-r--r--drivers/dma/dw_dmac.c43
-rw-r--r--drivers/dma/dw_dmac_regs.h1
-rw-r--r--drivers/dma/ep93xx_dma.c25
-rw-r--r--drivers/dma/fsldma.c24
-rw-r--r--drivers/dma/fsldma.h1
-rw-r--r--drivers/dma/imx-dma.c39
-rw-r--r--drivers/dma/imx-sdma.c23
-rw-r--r--drivers/dma/intel_mid_dma.c38
-rw-r--r--drivers/dma/intel_mid_dma_regs.h2
-rw-r--r--drivers/dma/ioat/dma.c21
-rw-r--r--drivers/dma/ioat/dma.h23
-rw-r--r--drivers/dma/ioat/dma_v2.c13
-rw-r--r--drivers/dma/ioat/dma_v3.c12
-rw-r--r--drivers/dma/iop-adma.c52
-rw-r--r--drivers/dma/ipu/ipu_idmac.c22
-rw-r--r--drivers/dma/mpc512x_dma.c25
-rw-r--r--drivers/dma/mv_xor.c34
-rw-r--r--drivers/dma/mv_xor.h3
-rw-r--r--drivers/dma/mxs-dma.c23
-rw-r--r--drivers/dma/pch_dma.c34
-rw-r--r--drivers/dma/pl330.c37
-rw-r--r--drivers/dma/ppc4xx/adma.c49
-rw-r--r--drivers/dma/ppc4xx/adma.h2
-rw-r--r--drivers/dma/shdma.c30
-rw-r--r--drivers/dma/shdma.h1
-rw-r--r--drivers/dma/sirf-dma.c25
-rw-r--r--drivers/dma/ste_dma40.c36
-rw-r--r--drivers/dma/timb_dma.c34
-rw-r--r--drivers/dma/txx9dmac.c41
-rw-r--r--drivers/dma/txx9dmac.h1
35 files changed, 296 insertions, 634 deletions
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index 513184b4fdd1..1b53f2605250 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -85,6 +85,8 @@
85#include <linux/slab.h> 85#include <linux/slab.h>
86#include <asm/hardware/pl080.h> 86#include <asm/hardware/pl080.h>
87 87
88#include "dmaengine.h"
89
88#define DRIVER_NAME "pl08xdmac" 90#define DRIVER_NAME "pl08xdmac"
89 91
90static struct amba_driver pl08x_amba_driver; 92static struct amba_driver pl08x_amba_driver;
@@ -919,13 +921,10 @@ static dma_cookie_t pl08x_tx_submit(struct dma_async_tx_descriptor *tx)
919 struct pl08x_dma_chan *plchan = to_pl08x_chan(tx->chan); 921 struct pl08x_dma_chan *plchan = to_pl08x_chan(tx->chan);
920 struct pl08x_txd *txd = to_pl08x_txd(tx); 922 struct pl08x_txd *txd = to_pl08x_txd(tx);
921 unsigned long flags; 923 unsigned long flags;
924 dma_cookie_t cookie;
922 925
923 spin_lock_irqsave(&plchan->lock, flags); 926 spin_lock_irqsave(&plchan->lock, flags);
924 927 cookie = dma_cookie_assign(tx);
925 plchan->chan.cookie += 1;
926 if (plchan->chan.cookie < 0)
927 plchan->chan.cookie = 1;
928 tx->cookie = plchan->chan.cookie;
929 928
930 /* Put this onto the pending list */ 929 /* Put this onto the pending list */
931 list_add_tail(&txd->node, &plchan->pend_list); 930 list_add_tail(&txd->node, &plchan->pend_list);
@@ -945,7 +944,7 @@ static dma_cookie_t pl08x_tx_submit(struct dma_async_tx_descriptor *tx)
945 944
946 spin_unlock_irqrestore(&plchan->lock, flags); 945 spin_unlock_irqrestore(&plchan->lock, flags);
947 946
948 return tx->cookie; 947 return cookie;
949} 948}
950 949
951static struct dma_async_tx_descriptor *pl08x_prep_dma_interrupt( 950static struct dma_async_tx_descriptor *pl08x_prep_dma_interrupt(
@@ -965,31 +964,17 @@ static enum dma_status pl08x_dma_tx_status(struct dma_chan *chan,
965 dma_cookie_t cookie, struct dma_tx_state *txstate) 964 dma_cookie_t cookie, struct dma_tx_state *txstate)
966{ 965{
967 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 966 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
968 dma_cookie_t last_used;
969 dma_cookie_t last_complete;
970 enum dma_status ret; 967 enum dma_status ret;
971 u32 bytesleft = 0;
972
973 last_used = plchan->chan.cookie;
974 last_complete = plchan->lc;
975 968
976 ret = dma_async_is_complete(cookie, last_complete, last_used); 969 ret = dma_cookie_status(chan, cookie, txstate);
977 if (ret == DMA_SUCCESS) { 970 if (ret == DMA_SUCCESS)
978 dma_set_tx_state(txstate, last_complete, last_used, 0);
979 return ret; 971 return ret;
980 }
981 972
982 /* 973 /*
983 * This cookie not complete yet 974 * This cookie not complete yet
975 * Get number of bytes left in the active transactions and queue
984 */ 976 */
985 last_used = plchan->chan.cookie; 977 dma_set_residue(txstate, pl08x_getbytes_chan(plchan));
986 last_complete = plchan->lc;
987
988 /* Get number of bytes left in the active transactions and queue */
989 bytesleft = pl08x_getbytes_chan(plchan);
990
991 dma_set_tx_state(txstate, last_complete, last_used,
992 bytesleft);
993 978
994 if (plchan->state == PL08X_CHAN_PAUSED) 979 if (plchan->state == PL08X_CHAN_PAUSED)
995 return DMA_PAUSED; 980 return DMA_PAUSED;
@@ -1543,7 +1528,7 @@ static void pl08x_tasklet(unsigned long data)
1543 1528
1544 if (txd) { 1529 if (txd) {
1545 /* Update last completed */ 1530 /* Update last completed */
1546 plchan->lc = txd->tx.cookie; 1531 dma_cookie_complete(&txd->tx);
1547 } 1532 }
1548 1533
1549 /* If a new descriptor is queued, set it up plchan->at is NULL here */ 1534 /* If a new descriptor is queued, set it up plchan->at is NULL here */
@@ -1724,8 +1709,7 @@ static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x,
1724 chan->name); 1709 chan->name);
1725 1710
1726 chan->chan.device = dmadev; 1711 chan->chan.device = dmadev;
1727 chan->chan.cookie = 0; 1712 dma_cookie_init(&chan->chan);
1728 chan->lc = 0;
1729 1713
1730 spin_lock_init(&chan->lock); 1714 spin_lock_init(&chan->lock);
1731 INIT_LIST_HEAD(&chan->pend_list); 1715 INIT_LIST_HEAD(&chan->pend_list);
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index f4aed5fc2cb6..5d225ddc7698 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -27,6 +27,7 @@
27#include <linux/of_device.h> 27#include <linux/of_device.h>
28 28
29#include "at_hdmac_regs.h" 29#include "at_hdmac_regs.h"
30#include "dmaengine.h"
30 31
31/* 32/*
32 * Glossary 33 * Glossary
@@ -192,27 +193,6 @@ static void atc_desc_chain(struct at_desc **first, struct at_desc **prev,
192} 193}
193 194
194/** 195/**
195 * atc_assign_cookie - compute and assign new cookie
196 * @atchan: channel we work on
197 * @desc: descriptor to assign cookie for
198 *
199 * Called with atchan->lock held and bh disabled
200 */
201static dma_cookie_t
202atc_assign_cookie(struct at_dma_chan *atchan, struct at_desc *desc)
203{
204 dma_cookie_t cookie = atchan->chan_common.cookie;
205
206 if (++cookie < 0)
207 cookie = 1;
208
209 atchan->chan_common.cookie = cookie;
210 desc->txd.cookie = cookie;
211
212 return cookie;
213}
214
215/**
216 * atc_dostart - starts the DMA engine for real 196 * atc_dostart - starts the DMA engine for real
217 * @atchan: the channel we want to start 197 * @atchan: the channel we want to start
218 * @first: first descriptor in the list we want to begin with 198 * @first: first descriptor in the list we want to begin with
@@ -269,7 +249,7 @@ atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc)
269 dev_vdbg(chan2dev(&atchan->chan_common), 249 dev_vdbg(chan2dev(&atchan->chan_common),
270 "descriptor %u complete\n", txd->cookie); 250 "descriptor %u complete\n", txd->cookie);
271 251
272 atchan->completed_cookie = txd->cookie; 252 dma_cookie_complete(txd);
273 253
274 /* move children to free_list */ 254 /* move children to free_list */
275 list_splice_init(&desc->tx_list, &atchan->free_list); 255 list_splice_init(&desc->tx_list, &atchan->free_list);
@@ -547,7 +527,7 @@ static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx)
547 unsigned long flags; 527 unsigned long flags;
548 528
549 spin_lock_irqsave(&atchan->lock, flags); 529 spin_lock_irqsave(&atchan->lock, flags);
550 cookie = atc_assign_cookie(atchan, desc); 530 cookie = dma_cookie_assign(tx);
551 531
552 if (list_empty(&atchan->active_list)) { 532 if (list_empty(&atchan->active_list)) {
553 dev_vdbg(chan2dev(tx->chan), "tx_submit: started %u\n", 533 dev_vdbg(chan2dev(tx->chan), "tx_submit: started %u\n",
@@ -1016,26 +996,20 @@ atc_tx_status(struct dma_chan *chan,
1016 996
1017 spin_lock_irqsave(&atchan->lock, flags); 997 spin_lock_irqsave(&atchan->lock, flags);
1018 998
1019 last_complete = atchan->completed_cookie; 999 ret = dma_cookie_status(chan, cookie, txstate);
1020 last_used = chan->cookie;
1021
1022 ret = dma_async_is_complete(cookie, last_complete, last_used);
1023 if (ret != DMA_SUCCESS) { 1000 if (ret != DMA_SUCCESS) {
1024 atc_cleanup_descriptors(atchan); 1001 atc_cleanup_descriptors(atchan);
1025 1002
1026 last_complete = atchan->completed_cookie; 1003 ret = dma_cookie_status(chan, cookie, txstate);
1027 last_used = chan->cookie;
1028
1029 ret = dma_async_is_complete(cookie, last_complete, last_used);
1030 } 1004 }
1031 1005
1006 last_complete = chan->completed_cookie;
1007 last_used = chan->cookie;
1008
1032 spin_unlock_irqrestore(&atchan->lock, flags); 1009 spin_unlock_irqrestore(&atchan->lock, flags);
1033 1010
1034 if (ret != DMA_SUCCESS) 1011 if (ret != DMA_SUCCESS)
1035 dma_set_tx_state(txstate, last_complete, last_used, 1012 dma_set_residue(txstate, atc_first_active(atchan)->len);
1036 atc_first_active(atchan)->len);
1037 else
1038 dma_set_tx_state(txstate, last_complete, last_used, 0);
1039 1013
1040 if (atc_chan_is_paused(atchan)) 1014 if (atc_chan_is_paused(atchan))
1041 ret = DMA_PAUSED; 1015 ret = DMA_PAUSED;
@@ -1129,7 +1103,7 @@ static int atc_alloc_chan_resources(struct dma_chan *chan)
1129 spin_lock_irqsave(&atchan->lock, flags); 1103 spin_lock_irqsave(&atchan->lock, flags);
1130 atchan->descs_allocated = i; 1104 atchan->descs_allocated = i;
1131 list_splice(&tmp_list, &atchan->free_list); 1105 list_splice(&tmp_list, &atchan->free_list);
1132 atchan->completed_cookie = chan->cookie = 1; 1106 dma_cookie_init(chan);
1133 spin_unlock_irqrestore(&atchan->lock, flags); 1107 spin_unlock_irqrestore(&atchan->lock, flags);
1134 1108
1135 /* channel parameters */ 1109 /* channel parameters */
@@ -1329,7 +1303,7 @@ static int __init at_dma_probe(struct platform_device *pdev)
1329 struct at_dma_chan *atchan = &atdma->chan[i]; 1303 struct at_dma_chan *atchan = &atdma->chan[i];
1330 1304
1331 atchan->chan_common.device = &atdma->dma_common; 1305 atchan->chan_common.device = &atdma->dma_common;
1332 atchan->chan_common.cookie = atchan->completed_cookie = 1; 1306 dma_cookie_init(&atchan->chan_common);
1333 list_add_tail(&atchan->chan_common.device_node, 1307 list_add_tail(&atchan->chan_common.device_node,
1334 &atdma->dma_common.channels); 1308 &atdma->dma_common.channels);
1335 1309
diff --git a/drivers/dma/at_hdmac_regs.h b/drivers/dma/at_hdmac_regs.h
index a8d3277d60b5..08fd8a0ae797 100644
--- a/drivers/dma/at_hdmac_regs.h
+++ b/drivers/dma/at_hdmac_regs.h
@@ -208,7 +208,6 @@ enum atc_status {
208 * @save_dscr: for cyclic operations, preserve next descriptor address in 208 * @save_dscr: for cyclic operations, preserve next descriptor address in
209 * the cyclic list on suspend/resume cycle 209 * the cyclic list on suspend/resume cycle
210 * @lock: serializes enqueue/dequeue operations to descriptors lists 210 * @lock: serializes enqueue/dequeue operations to descriptors lists
211 * @completed_cookie: identifier for the most recently completed operation
212 * @active_list: list of descriptors dmaengine is being running on 211 * @active_list: list of descriptors dmaengine is being running on
213 * @queue: list of descriptors ready to be submitted to engine 212 * @queue: list of descriptors ready to be submitted to engine
214 * @free_list: list of descriptors usable by the channel 213 * @free_list: list of descriptors usable by the channel
@@ -227,7 +226,6 @@ struct at_dma_chan {
227 spinlock_t lock; 226 spinlock_t lock;
228 227
229 /* these other elements are all protected by lock */ 228 /* these other elements are all protected by lock */
230 dma_cookie_t completed_cookie;
231 struct list_head active_list; 229 struct list_head active_list;
232 struct list_head queue; 230 struct list_head queue;
233 struct list_head free_list; 231 struct list_head free_list;
diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c
index d65a718c0f9b..187bb9eef4a2 100644
--- a/drivers/dma/coh901318.c
+++ b/drivers/dma/coh901318.c
@@ -24,6 +24,7 @@
24#include <mach/coh901318.h> 24#include <mach/coh901318.h>
25 25
26#include "coh901318_lli.h" 26#include "coh901318_lli.h"
27#include "dmaengine.h"
27 28
28#define COHC_2_DEV(cohc) (&cohc->chan.dev->device) 29#define COHC_2_DEV(cohc) (&cohc->chan.dev->device)
29 30
@@ -59,7 +60,6 @@ struct coh901318_base {
59struct coh901318_chan { 60struct coh901318_chan {
60 spinlock_t lock; 61 spinlock_t lock;
61 int allocated; 62 int allocated;
62 int completed;
63 int id; 63 int id;
64 int stopped; 64 int stopped;
65 65
@@ -318,20 +318,6 @@ static int coh901318_prep_linked_list(struct coh901318_chan *cohc,
318 318
319 return 0; 319 return 0;
320} 320}
321static dma_cookie_t
322coh901318_assign_cookie(struct coh901318_chan *cohc,
323 struct coh901318_desc *cohd)
324{
325 dma_cookie_t cookie = cohc->chan.cookie;
326
327 if (++cookie < 0)
328 cookie = 1;
329
330 cohc->chan.cookie = cookie;
331 cohd->desc.cookie = cookie;
332
333 return cookie;
334}
335 321
336static struct coh901318_desc * 322static struct coh901318_desc *
337coh901318_desc_get(struct coh901318_chan *cohc) 323coh901318_desc_get(struct coh901318_chan *cohc)
@@ -705,7 +691,7 @@ static void dma_tasklet(unsigned long data)
705 callback_param = cohd_fin->desc.callback_param; 691 callback_param = cohd_fin->desc.callback_param;
706 692
707 /* sign this job as completed on the channel */ 693 /* sign this job as completed on the channel */
708 cohc->completed = cohd_fin->desc.cookie; 694 dma_cookie_complete(&cohd_fin->desc);
709 695
710 /* release the lli allocation and remove the descriptor */ 696 /* release the lli allocation and remove the descriptor */
711 coh901318_lli_free(&cohc->base->pool, &cohd_fin->lli); 697 coh901318_lli_free(&cohc->base->pool, &cohd_fin->lli);
@@ -929,7 +915,7 @@ static int coh901318_alloc_chan_resources(struct dma_chan *chan)
929 coh901318_config(cohc, NULL); 915 coh901318_config(cohc, NULL);
930 916
931 cohc->allocated = 1; 917 cohc->allocated = 1;
932 cohc->completed = chan->cookie = 1; 918 dma_cookie_init(chan);
933 919
934 spin_unlock_irqrestore(&cohc->lock, flags); 920 spin_unlock_irqrestore(&cohc->lock, flags);
935 921
@@ -966,16 +952,16 @@ coh901318_tx_submit(struct dma_async_tx_descriptor *tx)
966 desc); 952 desc);
967 struct coh901318_chan *cohc = to_coh901318_chan(tx->chan); 953 struct coh901318_chan *cohc = to_coh901318_chan(tx->chan);
968 unsigned long flags; 954 unsigned long flags;
955 dma_cookie_t cookie;
969 956
970 spin_lock_irqsave(&cohc->lock, flags); 957 spin_lock_irqsave(&cohc->lock, flags);
971 958 cookie = dma_cookie_assign(tx);
972 tx->cookie = coh901318_assign_cookie(cohc, cohd);
973 959
974 coh901318_desc_queue(cohc, cohd); 960 coh901318_desc_queue(cohc, cohd);
975 961
976 spin_unlock_irqrestore(&cohc->lock, flags); 962 spin_unlock_irqrestore(&cohc->lock, flags);
977 963
978 return tx->cookie; 964 return cookie;
979} 965}
980 966
981static struct dma_async_tx_descriptor * 967static struct dma_async_tx_descriptor *
@@ -1165,17 +1151,12 @@ coh901318_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
1165 struct dma_tx_state *txstate) 1151 struct dma_tx_state *txstate)
1166{ 1152{
1167 struct coh901318_chan *cohc = to_coh901318_chan(chan); 1153 struct coh901318_chan *cohc = to_coh901318_chan(chan);
1168 dma_cookie_t last_used; 1154 enum dma_status ret;
1169 dma_cookie_t last_complete;
1170 int ret;
1171
1172 last_complete = cohc->completed;
1173 last_used = chan->cookie;
1174 1155
1175 ret = dma_async_is_complete(cookie, last_complete, last_used); 1156 ret = dma_cookie_status(chan, cookie, txstate);
1157 /* FIXME: should be conditional on ret != DMA_SUCCESS? */
1158 dma_set_residue(txstate, coh901318_get_bytes_left(chan));
1176 1159
1177 dma_set_tx_state(txstate, last_complete, last_used,
1178 coh901318_get_bytes_left(chan));
1179 if (ret == DMA_IN_PROGRESS && cohc->stopped) 1160 if (ret == DMA_IN_PROGRESS && cohc->stopped)
1180 ret = DMA_PAUSED; 1161 ret = DMA_PAUSED;
1181 1162
diff --git a/drivers/dma/dmaengine.h b/drivers/dma/dmaengine.h
new file mode 100644
index 000000000000..17f983a4e9ba
--- /dev/null
+++ b/drivers/dma/dmaengine.h
@@ -0,0 +1,89 @@
1/*
2 * The contents of this file are private to DMA engine drivers, and is not
3 * part of the API to be used by DMA engine users.
4 */
5#ifndef DMAENGINE_H
6#define DMAENGINE_H
7
8#include <linux/bug.h>
9#include <linux/dmaengine.h>
10
11/**
12 * dma_cookie_init - initialize the cookies for a DMA channel
13 * @chan: dma channel to initialize
14 */
15static inline void dma_cookie_init(struct dma_chan *chan)
16{
17 chan->cookie = DMA_MIN_COOKIE;
18 chan->completed_cookie = DMA_MIN_COOKIE;
19}
20
21/**
22 * dma_cookie_assign - assign a DMA engine cookie to the descriptor
23 * @tx: descriptor needing cookie
24 *
25 * Assign a unique non-zero per-channel cookie to the descriptor.
26 * Note: caller is expected to hold a lock to prevent concurrency.
27 */
28static inline dma_cookie_t dma_cookie_assign(struct dma_async_tx_descriptor *tx)
29{
30 struct dma_chan *chan = tx->chan;
31 dma_cookie_t cookie;
32
33 cookie = chan->cookie + 1;
34 if (cookie < DMA_MIN_COOKIE)
35 cookie = DMA_MIN_COOKIE;
36 tx->cookie = chan->cookie = cookie;
37
38 return cookie;
39}
40
41/**
42 * dma_cookie_complete - complete a descriptor
43 * @tx: descriptor to complete
44 *
45 * Mark this descriptor complete by updating the channels completed
46 * cookie marker. Zero the descriptors cookie to prevent accidental
47 * repeated completions.
48 *
49 * Note: caller is expected to hold a lock to prevent concurrency.
50 */
51static inline void dma_cookie_complete(struct dma_async_tx_descriptor *tx)
52{
53 BUG_ON(tx->cookie < DMA_MIN_COOKIE);
54 tx->chan->completed_cookie = tx->cookie;
55 tx->cookie = 0;
56}
57
58/**
59 * dma_cookie_status - report cookie status
60 * @chan: dma channel
61 * @cookie: cookie we are interested in
62 * @state: dma_tx_state structure to return last/used cookies
63 *
64 * Report the status of the cookie, filling in the state structure if
65 * non-NULL. No locking is required.
66 */
67static inline enum dma_status dma_cookie_status(struct dma_chan *chan,
68 dma_cookie_t cookie, struct dma_tx_state *state)
69{
70 dma_cookie_t used, complete;
71
72 used = chan->cookie;
73 complete = chan->completed_cookie;
74 barrier();
75 if (state) {
76 state->last = complete;
77 state->used = used;
78 state->residue = 0;
79 }
80 return dma_async_is_complete(cookie, complete, used);
81}
82
83static inline void dma_set_residue(struct dma_tx_state *state, u32 residue)
84{
85 if (state)
86 state->residue = residue;
87}
88
89#endif
diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c
index 0e4b5c6a2f86..cb173bbdcfdf 100644
--- a/drivers/dma/dw_dmac.c
+++ b/drivers/dma/dw_dmac.c
@@ -23,6 +23,7 @@
23#include <linux/slab.h> 23#include <linux/slab.h>
24 24
25#include "dw_dmac_regs.h" 25#include "dw_dmac_regs.h"
26#include "dmaengine.h"
26 27
27/* 28/*
28 * This supports the Synopsys "DesignWare AHB Central DMA Controller", 29 * This supports the Synopsys "DesignWare AHB Central DMA Controller",
@@ -156,21 +157,6 @@ static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc)
156 } 157 }
157} 158}
158 159
159/* Called with dwc->lock held and bh disabled */
160static dma_cookie_t
161dwc_assign_cookie(struct dw_dma_chan *dwc, struct dw_desc *desc)
162{
163 dma_cookie_t cookie = dwc->chan.cookie;
164
165 if (++cookie < 0)
166 cookie = 1;
167
168 dwc->chan.cookie = cookie;
169 desc->txd.cookie = cookie;
170
171 return cookie;
172}
173
174static void dwc_initialize(struct dw_dma_chan *dwc) 160static void dwc_initialize(struct dw_dma_chan *dwc)
175{ 161{
176 struct dw_dma *dw = to_dw_dma(dwc->chan.device); 162 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
@@ -249,7 +235,7 @@ dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc,
249 dev_vdbg(chan2dev(&dwc->chan), "descriptor %u complete\n", txd->cookie); 235 dev_vdbg(chan2dev(&dwc->chan), "descriptor %u complete\n", txd->cookie);
250 236
251 spin_lock_irqsave(&dwc->lock, flags); 237 spin_lock_irqsave(&dwc->lock, flags);
252 dwc->completed = txd->cookie; 238 dma_cookie_complete(txd);
253 if (callback_required) { 239 if (callback_required) {
254 callback = txd->callback; 240 callback = txd->callback;
255 param = txd->callback_param; 241 param = txd->callback_param;
@@ -602,7 +588,7 @@ static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx)
602 unsigned long flags; 588 unsigned long flags;
603 589
604 spin_lock_irqsave(&dwc->lock, flags); 590 spin_lock_irqsave(&dwc->lock, flags);
605 cookie = dwc_assign_cookie(dwc, desc); 591 cookie = dma_cookie_assign(tx);
606 592
607 /* 593 /*
608 * REVISIT: We should attempt to chain as many descriptors as 594 * REVISIT: We should attempt to chain as many descriptors as
@@ -993,28 +979,17 @@ dwc_tx_status(struct dma_chan *chan,
993 struct dma_tx_state *txstate) 979 struct dma_tx_state *txstate)
994{ 980{
995 struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 981 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
996 dma_cookie_t last_used; 982 enum dma_status ret;
997 dma_cookie_t last_complete;
998 int ret;
999
1000 last_complete = dwc->completed;
1001 last_used = chan->cookie;
1002 983
1003 ret = dma_async_is_complete(cookie, last_complete, last_used); 984 ret = dma_cookie_status(chan, cookie, txstate);
1004 if (ret != DMA_SUCCESS) { 985 if (ret != DMA_SUCCESS) {
1005 dwc_scan_descriptors(to_dw_dma(chan->device), dwc); 986 dwc_scan_descriptors(to_dw_dma(chan->device), dwc);
1006 987
1007 last_complete = dwc->completed; 988 ret = dma_cookie_status(chan, cookie, txstate);
1008 last_used = chan->cookie;
1009
1010 ret = dma_async_is_complete(cookie, last_complete, last_used);
1011 } 989 }
1012 990
1013 if (ret != DMA_SUCCESS) 991 if (ret != DMA_SUCCESS)
1014 dma_set_tx_state(txstate, last_complete, last_used, 992 dma_set_residue(txstate, dwc_first_active(dwc)->len);
1015 dwc_first_active(dwc)->len);
1016 else
1017 dma_set_tx_state(txstate, last_complete, last_used, 0);
1018 993
1019 if (dwc->paused) 994 if (dwc->paused)
1020 return DMA_PAUSED; 995 return DMA_PAUSED;
@@ -1046,7 +1021,7 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan)
1046 return -EIO; 1021 return -EIO;
1047 } 1022 }
1048 1023
1049 dwc->completed = chan->cookie = 1; 1024 dma_cookie_init(chan);
1050 1025
1051 /* 1026 /*
1052 * NOTE: some controllers may have additional features that we 1027 * NOTE: some controllers may have additional features that we
@@ -1474,7 +1449,7 @@ static int __init dw_probe(struct platform_device *pdev)
1474 struct dw_dma_chan *dwc = &dw->chan[i]; 1449 struct dw_dma_chan *dwc = &dw->chan[i];
1475 1450
1476 dwc->chan.device = &dw->dma; 1451 dwc->chan.device = &dw->dma;
1477 dwc->chan.cookie = dwc->completed = 1; 1452 dma_cookie_init(&dwc->chan);
1478 if (pdata->chan_allocation_order == CHAN_ALLOCATION_ASCENDING) 1453 if (pdata->chan_allocation_order == CHAN_ALLOCATION_ASCENDING)
1479 list_add_tail(&dwc->chan.device_node, 1454 list_add_tail(&dwc->chan.device_node,
1480 &dw->dma.channels); 1455 &dw->dma.channels);
diff --git a/drivers/dma/dw_dmac_regs.h b/drivers/dma/dw_dmac_regs.h
index eec0481a12f7..f298f69ecbf9 100644
--- a/drivers/dma/dw_dmac_regs.h
+++ b/drivers/dma/dw_dmac_regs.h
@@ -158,7 +158,6 @@ struct dw_dma_chan {
158 158
159 /* these other elements are all protected by lock */ 159 /* these other elements are all protected by lock */
160 unsigned long flags; 160 unsigned long flags;
161 dma_cookie_t completed;
162 struct list_head active_list; 161 struct list_head active_list;
163 struct list_head queue; 162 struct list_head queue;
164 struct list_head free_list; 163 struct list_head free_list;
diff --git a/drivers/dma/ep93xx_dma.c b/drivers/dma/ep93xx_dma.c
index 59e7a965772b..f25e83bf5678 100644
--- a/drivers/dma/ep93xx_dma.c
+++ b/drivers/dma/ep93xx_dma.c
@@ -28,6 +28,8 @@
28 28
29#include <mach/dma.h> 29#include <mach/dma.h>
30 30
31#include "dmaengine.h"
32
31/* M2P registers */ 33/* M2P registers */
32#define M2P_CONTROL 0x0000 34#define M2P_CONTROL 0x0000
33#define M2P_CONTROL_STALLINT BIT(0) 35#define M2P_CONTROL_STALLINT BIT(0)
@@ -122,7 +124,6 @@ struct ep93xx_dma_desc {
122 * @lock: lock protecting the fields following 124 * @lock: lock protecting the fields following
123 * @flags: flags for the channel 125 * @flags: flags for the channel
124 * @buffer: which buffer to use next (0/1) 126 * @buffer: which buffer to use next (0/1)
125 * @last_completed: last completed cookie value
126 * @active: flattened chain of descriptors currently being processed 127 * @active: flattened chain of descriptors currently being processed
127 * @queue: pending descriptors which are handled next 128 * @queue: pending descriptors which are handled next
128 * @free_list: list of free descriptors which can be used 129 * @free_list: list of free descriptors which can be used
@@ -157,7 +158,6 @@ struct ep93xx_dma_chan {
157#define EP93XX_DMA_IS_CYCLIC 0 158#define EP93XX_DMA_IS_CYCLIC 0
158 159
159 int buffer; 160 int buffer;
160 dma_cookie_t last_completed;
161 struct list_head active; 161 struct list_head active;
162 struct list_head queue; 162 struct list_head queue;
163 struct list_head free_list; 163 struct list_head free_list;
@@ -703,7 +703,7 @@ static void ep93xx_dma_tasklet(unsigned long data)
703 desc = ep93xx_dma_get_active(edmac); 703 desc = ep93xx_dma_get_active(edmac);
704 if (desc) { 704 if (desc) {
705 if (desc->complete) { 705 if (desc->complete) {
706 edmac->last_completed = desc->txd.cookie; 706 dma_cookie_complete(&desc->txd);
707 list_splice_init(&edmac->active, &list); 707 list_splice_init(&edmac->active, &list);
708 } 708 }
709 callback = desc->txd.callback; 709 callback = desc->txd.callback;
@@ -783,17 +783,10 @@ static dma_cookie_t ep93xx_dma_tx_submit(struct dma_async_tx_descriptor *tx)
783 unsigned long flags; 783 unsigned long flags;
784 784
785 spin_lock_irqsave(&edmac->lock, flags); 785 spin_lock_irqsave(&edmac->lock, flags);
786 786 cookie = dma_cookie_assign(tx);
787 cookie = edmac->chan.cookie;
788
789 if (++cookie < 0)
790 cookie = 1;
791 787
792 desc = container_of(tx, struct ep93xx_dma_desc, txd); 788 desc = container_of(tx, struct ep93xx_dma_desc, txd);
793 789
794 edmac->chan.cookie = cookie;
795 desc->txd.cookie = cookie;
796
797 /* 790 /*
798 * If nothing is currently prosessed, we push this descriptor 791 * If nothing is currently prosessed, we push this descriptor
799 * directly to the hardware. Otherwise we put the descriptor 792 * directly to the hardware. Otherwise we put the descriptor
@@ -861,8 +854,7 @@ static int ep93xx_dma_alloc_chan_resources(struct dma_chan *chan)
861 goto fail_clk_disable; 854 goto fail_clk_disable;
862 855
863 spin_lock_irq(&edmac->lock); 856 spin_lock_irq(&edmac->lock);
864 edmac->last_completed = 1; 857 dma_cookie_init(&edmac->chan);
865 edmac->chan.cookie = 1;
866 ret = edmac->edma->hw_setup(edmac); 858 ret = edmac->edma->hw_setup(edmac);
867 spin_unlock_irq(&edmac->lock); 859 spin_unlock_irq(&edmac->lock);
868 860
@@ -1248,18 +1240,13 @@ static enum dma_status ep93xx_dma_tx_status(struct dma_chan *chan,
1248 struct dma_tx_state *state) 1240 struct dma_tx_state *state)
1249{ 1241{
1250 struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan); 1242 struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
1251 dma_cookie_t last_used, last_completed;
1252 enum dma_status ret; 1243 enum dma_status ret;
1253 unsigned long flags; 1244 unsigned long flags;
1254 1245
1255 spin_lock_irqsave(&edmac->lock, flags); 1246 spin_lock_irqsave(&edmac->lock, flags);
1256 last_used = chan->cookie; 1247 ret = dma_cookie_status(chan, cookie, state);
1257 last_completed = edmac->last_completed;
1258 spin_unlock_irqrestore(&edmac->lock, flags); 1248 spin_unlock_irqrestore(&edmac->lock, flags);
1259 1249
1260 ret = dma_async_is_complete(cookie, last_completed, last_used);
1261 dma_set_tx_state(state, last_completed, last_used, 0);
1262
1263 return ret; 1250 return ret;
1264} 1251}
1265 1252
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index b98070c33ca9..7d7384b34621 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -35,6 +35,7 @@
35#include <linux/dmapool.h> 35#include <linux/dmapool.h>
36#include <linux/of_platform.h> 36#include <linux/of_platform.h>
37 37
38#include "dmaengine.h"
38#include "fsldma.h" 39#include "fsldma.h"
39 40
40#define chan_dbg(chan, fmt, arg...) \ 41#define chan_dbg(chan, fmt, arg...) \
@@ -413,17 +414,10 @@ static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx)
413 * assign cookies to all of the software descriptors 414 * assign cookies to all of the software descriptors
414 * that make up this transaction 415 * that make up this transaction
415 */ 416 */
416 cookie = chan->common.cookie;
417 list_for_each_entry(child, &desc->tx_list, node) { 417 list_for_each_entry(child, &desc->tx_list, node) {
418 cookie++; 418 cookie = dma_cookie_assign(&child->async_tx);
419 if (cookie < DMA_MIN_COOKIE)
420 cookie = DMA_MIN_COOKIE;
421
422 child->async_tx.cookie = cookie;
423 } 419 }
424 420
425 chan->common.cookie = cookie;
426
427 /* put this transaction onto the tail of the pending queue */ 421 /* put this transaction onto the tail of the pending queue */
428 append_ld_queue(chan, desc); 422 append_ld_queue(chan, desc);
429 423
@@ -984,19 +978,14 @@ static enum dma_status fsl_tx_status(struct dma_chan *dchan,
984 struct dma_tx_state *txstate) 978 struct dma_tx_state *txstate)
985{ 979{
986 struct fsldma_chan *chan = to_fsl_chan(dchan); 980 struct fsldma_chan *chan = to_fsl_chan(dchan);
987 dma_cookie_t last_complete; 981 enum dma_status ret;
988 dma_cookie_t last_used;
989 unsigned long flags; 982 unsigned long flags;
990 983
991 spin_lock_irqsave(&chan->desc_lock, flags); 984 spin_lock_irqsave(&chan->desc_lock, flags);
992 985 ret = dma_cookie_status(dchan, cookie, txstate);
993 last_complete = chan->completed_cookie;
994 last_used = dchan->cookie;
995
996 spin_unlock_irqrestore(&chan->desc_lock, flags); 986 spin_unlock_irqrestore(&chan->desc_lock, flags);
997 987
998 dma_set_tx_state(txstate, last_complete, last_used, 0); 988 return ret;
999 return dma_async_is_complete(cookie, last_complete, last_used);
1000} 989}
1001 990
1002/*----------------------------------------------------------------------------*/ 991/*----------------------------------------------------------------------------*/
@@ -1087,8 +1076,8 @@ static void dma_do_tasklet(unsigned long data)
1087 1076
1088 desc = to_fsl_desc(chan->ld_running.prev); 1077 desc = to_fsl_desc(chan->ld_running.prev);
1089 cookie = desc->async_tx.cookie; 1078 cookie = desc->async_tx.cookie;
1079 dma_cookie_complete(&desc->async_tx);
1090 1080
1091 chan->completed_cookie = cookie;
1092 chan_dbg(chan, "completed_cookie=%d\n", cookie); 1081 chan_dbg(chan, "completed_cookie=%d\n", cookie);
1093 } 1082 }
1094 1083
@@ -1303,6 +1292,7 @@ static int __devinit fsl_dma_chan_probe(struct fsldma_device *fdev,
1303 chan->idle = true; 1292 chan->idle = true;
1304 1293
1305 chan->common.device = &fdev->common; 1294 chan->common.device = &fdev->common;
1295 dma_cookie_init(&chan->common);
1306 1296
1307 /* find the IRQ line, if it exists in the device tree */ 1297 /* find the IRQ line, if it exists in the device tree */
1308 chan->irq = irq_of_parse_and_map(node, 0); 1298 chan->irq = irq_of_parse_and_map(node, 0);
diff --git a/drivers/dma/fsldma.h b/drivers/dma/fsldma.h
index 9cb5aa57c677..f5c38791fc74 100644
--- a/drivers/dma/fsldma.h
+++ b/drivers/dma/fsldma.h
@@ -137,7 +137,6 @@ struct fsldma_device {
137struct fsldma_chan { 137struct fsldma_chan {
138 char name[8]; /* Channel name */ 138 char name[8]; /* Channel name */
139 struct fsldma_chan_regs __iomem *regs; 139 struct fsldma_chan_regs __iomem *regs;
140 dma_cookie_t completed_cookie; /* The maximum cookie completed */
141 spinlock_t desc_lock; /* Descriptor operation lock */ 140 spinlock_t desc_lock; /* Descriptor operation lock */
142 struct list_head ld_pending; /* Link descriptors queue */ 141 struct list_head ld_pending; /* Link descriptors queue */
143 struct list_head ld_running; /* Link descriptors queue */ 142 struct list_head ld_running; /* Link descriptors queue */
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c
index c32103f04fb3..20c1565a7486 100644
--- a/drivers/dma/imx-dma.c
+++ b/drivers/dma/imx-dma.c
@@ -32,6 +32,7 @@
32#include <mach/dma-v1.h> 32#include <mach/dma-v1.h>
33#include <mach/hardware.h> 33#include <mach/hardware.h>
34 34
35#include "dmaengine.h"
35#define IMXDMA_MAX_CHAN_DESCRIPTORS 16 36#define IMXDMA_MAX_CHAN_DESCRIPTORS 16
36 37
37enum imxdma_prep_type { 38enum imxdma_prep_type {
@@ -77,7 +78,8 @@ struct imxdma_channel {
77 u32 watermark_level; 78 u32 watermark_level;
78 struct dma_chan chan; 79 struct dma_chan chan;
79 spinlock_t lock; 80 spinlock_t lock;
80 dma_cookie_t last_completed; 81 struct dma_async_tx_descriptor desc;
82 enum dma_status status;
81 int dma_request; 83 int dma_request;
82 struct scatterlist *sg_list; 84 struct scatterlist *sg_list;
83}; 85};
@@ -192,7 +194,7 @@ static void imxdma_tasklet(unsigned long data)
192 if (desc->desc.callback) 194 if (desc->desc.callback)
193 desc->desc.callback(desc->desc.callback_param); 195 desc->desc.callback(desc->desc.callback_param);
194 196
195 imxdmac->last_completed = desc->desc.cookie; 197 dma_cookie_complete(&desc->desc);
196 198
197 /* If we are dealing with a cyclic descriptor keep it on ld_active */ 199 /* If we are dealing with a cyclic descriptor keep it on ld_active */
198 if (imxdma_chan_is_doing_cyclic(imxdmac)) 200 if (imxdma_chan_is_doing_cyclic(imxdmac))
@@ -276,31 +278,7 @@ static enum dma_status imxdma_tx_status(struct dma_chan *chan,
276 dma_cookie_t cookie, 278 dma_cookie_t cookie,
277 struct dma_tx_state *txstate) 279 struct dma_tx_state *txstate)
278{ 280{
279 struct imxdma_channel *imxdmac = to_imxdma_chan(chan); 281 return dma_cookie_status(chan, cookie, txstate);
280 dma_cookie_t last_used;
281 enum dma_status ret;
282 unsigned long flags;
283
284 spin_lock_irqsave(&imxdmac->lock, flags);
285 last_used = chan->cookie;
286
287 ret = dma_async_is_complete(cookie, imxdmac->last_completed, last_used);
288 dma_set_tx_state(txstate, imxdmac->last_completed, last_used, 0);
289 spin_unlock_irqrestore(&imxdmac->lock, flags);
290
291 return ret;
292}
293
294static dma_cookie_t imxdma_assign_cookie(struct imxdma_channel *imxdma)
295{
296 dma_cookie_t cookie = imxdma->chan.cookie;
297
298 if (++cookie < 0)
299 cookie = 1;
300
301 imxdma->chan.cookie = cookie;
302
303 return cookie;
304} 282}
305 283
306static dma_cookie_t imxdma_tx_submit(struct dma_async_tx_descriptor *tx) 284static dma_cookie_t imxdma_tx_submit(struct dma_async_tx_descriptor *tx)
@@ -310,11 +288,7 @@ static dma_cookie_t imxdma_tx_submit(struct dma_async_tx_descriptor *tx)
310 unsigned long flags; 288 unsigned long flags;
311 289
312 spin_lock_irqsave(&imxdmac->lock, flags); 290 spin_lock_irqsave(&imxdmac->lock, flags);
313 291 cookie = dma_cookie_assign(tx);
314 list_move_tail(imxdmac->ld_free.next, &imxdmac->ld_queue);
315 cookie = imxdma_assign_cookie(imxdmac);
316 tx->cookie = cookie;
317
318 spin_unlock_irqrestore(&imxdmac->lock, flags); 292 spin_unlock_irqrestore(&imxdmac->lock, flags);
319 293
320 return cookie; 294 return cookie;
@@ -583,6 +557,7 @@ static int __init imxdma_probe(struct platform_device *pdev)
583 tasklet_init(&imxdmac->dma_tasklet, imxdma_tasklet, 557 tasklet_init(&imxdmac->dma_tasklet, imxdma_tasklet,
584 (unsigned long)imxdmac); 558 (unsigned long)imxdmac);
585 imxdmac->chan.device = &imxdma->dma_device; 559 imxdmac->chan.device = &imxdma->dma_device;
560 dma_cookie_init(&imxdmac->chan);
586 imxdmac->channel = i; 561 imxdmac->channel = i;
587 562
588 /* Add the channel to the DMAC list */ 563 /* Add the channel to the DMAC list */
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index f0bfc0e07416..5da552d1f92d 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -43,6 +43,8 @@
43#include <mach/dma.h> 43#include <mach/dma.h>
44#include <mach/hardware.h> 44#include <mach/hardware.h>
45 45
46#include "dmaengine.h"
47
46/* SDMA registers */ 48/* SDMA registers */
47#define SDMA_H_C0PTR 0x000 49#define SDMA_H_C0PTR 0x000
48#define SDMA_H_INTR 0x004 50#define SDMA_H_INTR 0x004
@@ -267,7 +269,6 @@ struct sdma_channel {
267 struct dma_chan chan; 269 struct dma_chan chan;
268 spinlock_t lock; 270 spinlock_t lock;
269 struct dma_async_tx_descriptor desc; 271 struct dma_async_tx_descriptor desc;
270 dma_cookie_t last_completed;
271 enum dma_status status; 272 enum dma_status status;
272 unsigned int chn_count; 273 unsigned int chn_count;
273 unsigned int chn_real_count; 274 unsigned int chn_real_count;
@@ -529,7 +530,7 @@ static void mxc_sdma_handle_channel_normal(struct sdma_channel *sdmac)
529 else 530 else
530 sdmac->status = DMA_SUCCESS; 531 sdmac->status = DMA_SUCCESS;
531 532
532 sdmac->last_completed = sdmac->desc.cookie; 533 dma_cookie_complete(&sdmac->desc);
533 if (sdmac->desc.callback) 534 if (sdmac->desc.callback)
534 sdmac->desc.callback(sdmac->desc.callback_param); 535 sdmac->desc.callback(sdmac->desc.callback_param);
535} 536}
@@ -814,19 +815,6 @@ out:
814 return ret; 815 return ret;
815} 816}
816 817
817static dma_cookie_t sdma_assign_cookie(struct sdma_channel *sdmac)
818{
819 dma_cookie_t cookie = sdmac->chan.cookie;
820
821 if (++cookie < 0)
822 cookie = 1;
823
824 sdmac->chan.cookie = cookie;
825 sdmac->desc.cookie = cookie;
826
827 return cookie;
828}
829
830static struct sdma_channel *to_sdma_chan(struct dma_chan *chan) 818static struct sdma_channel *to_sdma_chan(struct dma_chan *chan)
831{ 819{
832 return container_of(chan, struct sdma_channel, chan); 820 return container_of(chan, struct sdma_channel, chan);
@@ -840,7 +828,7 @@ static dma_cookie_t sdma_tx_submit(struct dma_async_tx_descriptor *tx)
840 828
841 spin_lock_irqsave(&sdmac->lock, flags); 829 spin_lock_irqsave(&sdmac->lock, flags);
842 830
843 cookie = sdma_assign_cookie(sdmac); 831 cookie = dma_cookie_assign(tx);
844 832
845 spin_unlock_irqrestore(&sdmac->lock, flags); 833 spin_unlock_irqrestore(&sdmac->lock, flags);
846 834
@@ -1127,7 +1115,7 @@ static enum dma_status sdma_tx_status(struct dma_chan *chan,
1127 1115
1128 last_used = chan->cookie; 1116 last_used = chan->cookie;
1129 1117
1130 dma_set_tx_state(txstate, sdmac->last_completed, last_used, 1118 dma_set_tx_state(txstate, chan->completed_cookie, last_used,
1131 sdmac->chn_count - sdmac->chn_real_count); 1119 sdmac->chn_count - sdmac->chn_real_count);
1132 1120
1133 return sdmac->status; 1121 return sdmac->status;
@@ -1368,6 +1356,7 @@ static int __init sdma_probe(struct platform_device *pdev)
1368 spin_lock_init(&sdmac->lock); 1356 spin_lock_init(&sdmac->lock);
1369 1357
1370 sdmac->chan.device = &sdma->dma_device; 1358 sdmac->chan.device = &sdma->dma_device;
1359 dma_cookie_init(&sdmac->chan);
1371 sdmac->channel = i; 1360 sdmac->channel = i;
1372 1361
1373 /* 1362 /*
diff --git a/drivers/dma/intel_mid_dma.c b/drivers/dma/intel_mid_dma.c
index 923476d74a5d..2449812f5464 100644
--- a/drivers/dma/intel_mid_dma.c
+++ b/drivers/dma/intel_mid_dma.c
@@ -29,6 +29,8 @@
29#include <linux/intel_mid_dma.h> 29#include <linux/intel_mid_dma.h>
30#include <linux/module.h> 30#include <linux/module.h>
31 31
32#include "dmaengine.h"
33
32#define MAX_CHAN 4 /*max ch across controllers*/ 34#define MAX_CHAN 4 /*max ch across controllers*/
33#include "intel_mid_dma_regs.h" 35#include "intel_mid_dma_regs.h"
34 36
@@ -288,7 +290,7 @@ static void midc_descriptor_complete(struct intel_mid_dma_chan *midc,
288 struct intel_mid_dma_lli *llitem; 290 struct intel_mid_dma_lli *llitem;
289 void *param_txd = NULL; 291 void *param_txd = NULL;
290 292
291 midc->completed = txd->cookie; 293 dma_cookie_complete(txd);
292 callback_txd = txd->callback; 294 callback_txd = txd->callback;
293 param_txd = txd->callback_param; 295 param_txd = txd->callback_param;
294 296
@@ -434,14 +436,7 @@ static dma_cookie_t intel_mid_dma_tx_submit(struct dma_async_tx_descriptor *tx)
434 dma_cookie_t cookie; 436 dma_cookie_t cookie;
435 437
436 spin_lock_bh(&midc->lock); 438 spin_lock_bh(&midc->lock);
437 cookie = midc->chan.cookie; 439 cookie = dma_cookie_assign(tx);
438
439 if (++cookie < 0)
440 cookie = 1;
441
442 midc->chan.cookie = cookie;
443 desc->txd.cookie = cookie;
444
445 440
446 if (list_empty(&midc->active_list)) 441 if (list_empty(&midc->active_list))
447 list_add_tail(&desc->desc_node, &midc->active_list); 442 list_add_tail(&desc->desc_node, &midc->active_list);
@@ -482,31 +477,18 @@ static enum dma_status intel_mid_dma_tx_status(struct dma_chan *chan,
482 dma_cookie_t cookie, 477 dma_cookie_t cookie,
483 struct dma_tx_state *txstate) 478 struct dma_tx_state *txstate)
484{ 479{
485 struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan); 480 struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan);
486 dma_cookie_t last_used; 481 enum dma_status ret;
487 dma_cookie_t last_complete;
488 int ret;
489
490 last_complete = midc->completed;
491 last_used = chan->cookie;
492 482
493 ret = dma_async_is_complete(cookie, last_complete, last_used); 483 ret = dma_cookie_status(chan, cookie, txstate);
494 if (ret != DMA_SUCCESS) { 484 if (ret != DMA_SUCCESS) {
495 spin_lock_bh(&midc->lock); 485 spin_lock_bh(&midc->lock);
496 midc_scan_descriptors(to_middma_device(chan->device), midc); 486 midc_scan_descriptors(to_middma_device(chan->device), midc);
497 spin_unlock_bh(&midc->lock); 487 spin_unlock_bh(&midc->lock);
498 488
499 last_complete = midc->completed; 489 ret = dma_cookie_status(chan, cookie, txstate);
500 last_used = chan->cookie;
501
502 ret = dma_async_is_complete(cookie, last_complete, last_used);
503 } 490 }
504 491
505 if (txstate) {
506 txstate->last = last_complete;
507 txstate->used = last_used;
508 txstate->residue = 0;
509 }
510 return ret; 492 return ret;
511} 493}
512 494
@@ -886,7 +868,7 @@ static int intel_mid_dma_alloc_chan_resources(struct dma_chan *chan)
886 pm_runtime_put(&mid->pdev->dev); 868 pm_runtime_put(&mid->pdev->dev);
887 return -EIO; 869 return -EIO;
888 } 870 }
889 midc->completed = chan->cookie = 1; 871 dma_cookie_init(chan);
890 872
891 spin_lock_bh(&midc->lock); 873 spin_lock_bh(&midc->lock);
892 while (midc->descs_allocated < DESCS_PER_CHANNEL) { 874 while (midc->descs_allocated < DESCS_PER_CHANNEL) {
@@ -1119,7 +1101,7 @@ static int mid_setup_dma(struct pci_dev *pdev)
1119 struct intel_mid_dma_chan *midch = &dma->ch[i]; 1101 struct intel_mid_dma_chan *midch = &dma->ch[i];
1120 1102
1121 midch->chan.device = &dma->common; 1103 midch->chan.device = &dma->common;
1122 midch->chan.cookie = 1; 1104 dma_cookie_init(&midch->chan);
1123 midch->ch_id = dma->chan_base + i; 1105 midch->ch_id = dma->chan_base + i;
1124 pr_debug("MDMA:Init CH %d, ID %d\n", i, midch->ch_id); 1106 pr_debug("MDMA:Init CH %d, ID %d\n", i, midch->ch_id);
1125 1107
diff --git a/drivers/dma/intel_mid_dma_regs.h b/drivers/dma/intel_mid_dma_regs.h
index c83d35b97bd8..1bfa9268feaf 100644
--- a/drivers/dma/intel_mid_dma_regs.h
+++ b/drivers/dma/intel_mid_dma_regs.h
@@ -165,7 +165,6 @@ union intel_mid_dma_cfg_hi {
165 * @dma_base: MMIO register space DMA engine base pointer 165 * @dma_base: MMIO register space DMA engine base pointer
166 * @ch_id: DMA channel id 166 * @ch_id: DMA channel id
167 * @lock: channel spinlock 167 * @lock: channel spinlock
168 * @completed: DMA cookie
169 * @active_list: current active descriptors 168 * @active_list: current active descriptors
170 * @queue: current queued up descriptors 169 * @queue: current queued up descriptors
171 * @free_list: current free descriptors 170 * @free_list: current free descriptors
@@ -183,7 +182,6 @@ struct intel_mid_dma_chan {
183 void __iomem *dma_base; 182 void __iomem *dma_base;
184 int ch_id; 183 int ch_id;
185 spinlock_t lock; 184 spinlock_t lock;
186 dma_cookie_t completed;
187 struct list_head active_list; 185 struct list_head active_list;
188 struct list_head queue; 186 struct list_head queue;
189 struct list_head free_list; 187 struct list_head free_list;
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
index a4d6cb0c0343..31493d80e0e9 100644
--- a/drivers/dma/ioat/dma.c
+++ b/drivers/dma/ioat/dma.c
@@ -40,6 +40,8 @@
40#include "registers.h" 40#include "registers.h"
41#include "hw.h" 41#include "hw.h"
42 42
43#include "../dmaengine.h"
44
43int ioat_pending_level = 4; 45int ioat_pending_level = 4;
44module_param(ioat_pending_level, int, 0644); 46module_param(ioat_pending_level, int, 0644);
45MODULE_PARM_DESC(ioat_pending_level, 47MODULE_PARM_DESC(ioat_pending_level,
@@ -107,6 +109,7 @@ void ioat_init_channel(struct ioatdma_device *device, struct ioat_chan_common *c
107 chan->reg_base = device->reg_base + (0x80 * (idx + 1)); 109 chan->reg_base = device->reg_base + (0x80 * (idx + 1));
108 spin_lock_init(&chan->cleanup_lock); 110 spin_lock_init(&chan->cleanup_lock);
109 chan->common.device = dma; 111 chan->common.device = dma;
112 dma_cookie_init(&chan->common);
110 list_add_tail(&chan->common.device_node, &dma->channels); 113 list_add_tail(&chan->common.device_node, &dma->channels);
111 device->idx[idx] = chan; 114 device->idx[idx] = chan;
112 init_timer(&chan->timer); 115 init_timer(&chan->timer);
@@ -235,12 +238,7 @@ static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx)
235 238
236 spin_lock_bh(&ioat->desc_lock); 239 spin_lock_bh(&ioat->desc_lock);
237 /* cookie incr and addition to used_list must be atomic */ 240 /* cookie incr and addition to used_list must be atomic */
238 cookie = c->cookie; 241 cookie = dma_cookie_assign(tx);
239 cookie++;
240 if (cookie < 0)
241 cookie = 1;
242 c->cookie = cookie;
243 tx->cookie = cookie;
244 dev_dbg(to_dev(&ioat->base), "%s: cookie: %d\n", __func__, cookie); 242 dev_dbg(to_dev(&ioat->base), "%s: cookie: %d\n", __func__, cookie);
245 243
246 /* write address into NextDescriptor field of last desc in chain */ 244 /* write address into NextDescriptor field of last desc in chain */
@@ -603,8 +601,7 @@ static void __cleanup(struct ioat_dma_chan *ioat, unsigned long phys_complete)
603 */ 601 */
604 dump_desc_dbg(ioat, desc); 602 dump_desc_dbg(ioat, desc);
605 if (tx->cookie) { 603 if (tx->cookie) {
606 chan->completed_cookie = tx->cookie; 604 dma_cookie_complete(tx);
607 tx->cookie = 0;
608 ioat_dma_unmap(chan, tx->flags, desc->len, desc->hw); 605 ioat_dma_unmap(chan, tx->flags, desc->len, desc->hw);
609 ioat->active -= desc->hw->tx_cnt; 606 ioat->active -= desc->hw->tx_cnt;
610 if (tx->callback) { 607 if (tx->callback) {
@@ -733,13 +730,15 @@ ioat_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie,
733{ 730{
734 struct ioat_chan_common *chan = to_chan_common(c); 731 struct ioat_chan_common *chan = to_chan_common(c);
735 struct ioatdma_device *device = chan->device; 732 struct ioatdma_device *device = chan->device;
733 enum dma_status ret;
736 734
737 if (ioat_tx_status(c, cookie, txstate) == DMA_SUCCESS) 735 ret = dma_cookie_status(c, cookie, txstate);
738 return DMA_SUCCESS; 736 if (ret == DMA_SUCCESS)
737 return ret;
739 738
740 device->cleanup_fn((unsigned long) c); 739 device->cleanup_fn((unsigned long) c);
741 740
742 return ioat_tx_status(c, cookie, txstate); 741 return dma_cookie_status(c, cookie, txstate);
743} 742}
744 743
745static void ioat1_dma_start_null_desc(struct ioat_dma_chan *ioat) 744static void ioat1_dma_start_null_desc(struct ioat_dma_chan *ioat)
diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h
index 5216c8a92a21..c7888bccd974 100644
--- a/drivers/dma/ioat/dma.h
+++ b/drivers/dma/ioat/dma.h
@@ -90,7 +90,6 @@ struct ioat_chan_common {
90 void __iomem *reg_base; 90 void __iomem *reg_base;
91 unsigned long last_completion; 91 unsigned long last_completion;
92 spinlock_t cleanup_lock; 92 spinlock_t cleanup_lock;
93 dma_cookie_t completed_cookie;
94 unsigned long state; 93 unsigned long state;
95 #define IOAT_COMPLETION_PENDING 0 94 #define IOAT_COMPLETION_PENDING 0
96 #define IOAT_COMPLETION_ACK 1 95 #define IOAT_COMPLETION_ACK 1
@@ -143,28 +142,6 @@ static inline struct ioat_dma_chan *to_ioat_chan(struct dma_chan *c)
143 return container_of(chan, struct ioat_dma_chan, base); 142 return container_of(chan, struct ioat_dma_chan, base);
144} 143}
145 144
146/**
147 * ioat_tx_status - poll the status of an ioat transaction
148 * @c: channel handle
149 * @cookie: transaction identifier
150 * @txstate: if set, updated with the transaction state
151 */
152static inline enum dma_status
153ioat_tx_status(struct dma_chan *c, dma_cookie_t cookie,
154 struct dma_tx_state *txstate)
155{
156 struct ioat_chan_common *chan = to_chan_common(c);
157 dma_cookie_t last_used;
158 dma_cookie_t last_complete;
159
160 last_used = c->cookie;
161 last_complete = chan->completed_cookie;
162
163 dma_set_tx_state(txstate, last_complete, last_used, 0);
164
165 return dma_async_is_complete(cookie, last_complete, last_used);
166}
167
168/* wrapper around hardware descriptor format + additional software fields */ 145/* wrapper around hardware descriptor format + additional software fields */
169 146
170/** 147/**
diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c
index 5d65f8377971..e8e110ff3d96 100644
--- a/drivers/dma/ioat/dma_v2.c
+++ b/drivers/dma/ioat/dma_v2.c
@@ -41,6 +41,8 @@
41#include "registers.h" 41#include "registers.h"
42#include "hw.h" 42#include "hw.h"
43 43
44#include "../dmaengine.h"
45
44int ioat_ring_alloc_order = 8; 46int ioat_ring_alloc_order = 8;
45module_param(ioat_ring_alloc_order, int, 0644); 47module_param(ioat_ring_alloc_order, int, 0644);
46MODULE_PARM_DESC(ioat_ring_alloc_order, 48MODULE_PARM_DESC(ioat_ring_alloc_order,
@@ -147,8 +149,7 @@ static void __cleanup(struct ioat2_dma_chan *ioat, unsigned long phys_complete)
147 dump_desc_dbg(ioat, desc); 149 dump_desc_dbg(ioat, desc);
148 if (tx->cookie) { 150 if (tx->cookie) {
149 ioat_dma_unmap(chan, tx->flags, desc->len, desc->hw); 151 ioat_dma_unmap(chan, tx->flags, desc->len, desc->hw);
150 chan->completed_cookie = tx->cookie; 152 dma_cookie_complete(tx);
151 tx->cookie = 0;
152 if (tx->callback) { 153 if (tx->callback) {
153 tx->callback(tx->callback_param); 154 tx->callback(tx->callback_param);
154 tx->callback = NULL; 155 tx->callback = NULL;
@@ -398,13 +399,9 @@ static dma_cookie_t ioat2_tx_submit_unlock(struct dma_async_tx_descriptor *tx)
398 struct dma_chan *c = tx->chan; 399 struct dma_chan *c = tx->chan;
399 struct ioat2_dma_chan *ioat = to_ioat2_chan(c); 400 struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
400 struct ioat_chan_common *chan = &ioat->base; 401 struct ioat_chan_common *chan = &ioat->base;
401 dma_cookie_t cookie = c->cookie; 402 dma_cookie_t cookie;
402 403
403 cookie++; 404 cookie = dma_cookie_assign(tx);
404 if (cookie < 0)
405 cookie = 1;
406 tx->cookie = cookie;
407 c->cookie = cookie;
408 dev_dbg(to_dev(&ioat->base), "%s: cookie: %d\n", __func__, cookie); 405 dev_dbg(to_dev(&ioat->base), "%s: cookie: %d\n", __func__, cookie);
409 406
410 if (!test_and_set_bit(IOAT_COMPLETION_PENDING, &chan->state)) 407 if (!test_and_set_bit(IOAT_COMPLETION_PENDING, &chan->state))
diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c
index f519c93a61e7..2c4476c0e405 100644
--- a/drivers/dma/ioat/dma_v3.c
+++ b/drivers/dma/ioat/dma_v3.c
@@ -61,6 +61,7 @@
61#include <linux/dmaengine.h> 61#include <linux/dmaengine.h>
62#include <linux/dma-mapping.h> 62#include <linux/dma-mapping.h>
63#include <linux/prefetch.h> 63#include <linux/prefetch.h>
64#include "../dmaengine.h"
64#include "registers.h" 65#include "registers.h"
65#include "hw.h" 66#include "hw.h"
66#include "dma.h" 67#include "dma.h"
@@ -277,9 +278,8 @@ static void __cleanup(struct ioat2_dma_chan *ioat, unsigned long phys_complete)
277 dump_desc_dbg(ioat, desc); 278 dump_desc_dbg(ioat, desc);
278 tx = &desc->txd; 279 tx = &desc->txd;
279 if (tx->cookie) { 280 if (tx->cookie) {
280 chan->completed_cookie = tx->cookie; 281 dma_cookie_complete(tx);
281 ioat3_dma_unmap(ioat, desc, idx + i); 282 ioat3_dma_unmap(ioat, desc, idx + i);
282 tx->cookie = 0;
283 if (tx->callback) { 283 if (tx->callback) {
284 tx->callback(tx->callback_param); 284 tx->callback(tx->callback_param);
285 tx->callback = NULL; 285 tx->callback = NULL;
@@ -411,13 +411,15 @@ ioat3_tx_status(struct dma_chan *c, dma_cookie_t cookie,
411 struct dma_tx_state *txstate) 411 struct dma_tx_state *txstate)
412{ 412{
413 struct ioat2_dma_chan *ioat = to_ioat2_chan(c); 413 struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
414 enum dma_status ret;
414 415
415 if (ioat_tx_status(c, cookie, txstate) == DMA_SUCCESS) 416 ret = dma_cookie_status(c, cookie, txstate);
416 return DMA_SUCCESS; 417 if (ret == DMA_SUCCESS)
418 return ret;
417 419
418 ioat3_cleanup(ioat); 420 ioat3_cleanup(ioat);
419 421
420 return ioat_tx_status(c, cookie, txstate); 422 return dma_cookie_status(c, cookie, txstate);
421} 423}
422 424
423static struct dma_async_tx_descriptor * 425static struct dma_async_tx_descriptor *
diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c
index 04be90b645b8..4499f88789bc 100644
--- a/drivers/dma/iop-adma.c
+++ b/drivers/dma/iop-adma.c
@@ -36,6 +36,8 @@
36 36
37#include <mach/adma.h> 37#include <mach/adma.h>
38 38
39#include "dmaengine.h"
40
39#define to_iop_adma_chan(chan) container_of(chan, struct iop_adma_chan, common) 41#define to_iop_adma_chan(chan) container_of(chan, struct iop_adma_chan, common)
40#define to_iop_adma_device(dev) \ 42#define to_iop_adma_device(dev) \
41 container_of(dev, struct iop_adma_device, common) 43 container_of(dev, struct iop_adma_device, common)
@@ -317,7 +319,7 @@ static void __iop_adma_slot_cleanup(struct iop_adma_chan *iop_chan)
317 } 319 }
318 320
319 if (cookie > 0) { 321 if (cookie > 0) {
320 iop_chan->completed_cookie = cookie; 322 iop_chan->common.completed_cookie = cookie;
321 pr_debug("\tcompleted cookie %d\n", cookie); 323 pr_debug("\tcompleted cookie %d\n", cookie);
322 } 324 }
323} 325}
@@ -438,18 +440,6 @@ retry:
438 return NULL; 440 return NULL;
439} 441}
440 442
441static dma_cookie_t
442iop_desc_assign_cookie(struct iop_adma_chan *iop_chan,
443 struct iop_adma_desc_slot *desc)
444{
445 dma_cookie_t cookie = iop_chan->common.cookie;
446 cookie++;
447 if (cookie < 0)
448 cookie = 1;
449 iop_chan->common.cookie = desc->async_tx.cookie = cookie;
450 return cookie;
451}
452
453static void iop_adma_check_threshold(struct iop_adma_chan *iop_chan) 443static void iop_adma_check_threshold(struct iop_adma_chan *iop_chan)
454{ 444{
455 dev_dbg(iop_chan->device->common.dev, "pending: %d\n", 445 dev_dbg(iop_chan->device->common.dev, "pending: %d\n",
@@ -477,7 +467,7 @@ iop_adma_tx_submit(struct dma_async_tx_descriptor *tx)
477 slots_per_op = grp_start->slots_per_op; 467 slots_per_op = grp_start->slots_per_op;
478 468
479 spin_lock_bh(&iop_chan->lock); 469 spin_lock_bh(&iop_chan->lock);
480 cookie = iop_desc_assign_cookie(iop_chan, sw_desc); 470 cookie = dma_cookie_assign(tx);
481 471
482 old_chain_tail = list_entry(iop_chan->chain.prev, 472 old_chain_tail = list_entry(iop_chan->chain.prev,
483 struct iop_adma_desc_slot, chain_node); 473 struct iop_adma_desc_slot, chain_node);
@@ -904,24 +894,15 @@ static enum dma_status iop_adma_status(struct dma_chan *chan,
904 struct dma_tx_state *txstate) 894 struct dma_tx_state *txstate)
905{ 895{
906 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan); 896 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
907 dma_cookie_t last_used; 897 int ret;
908 dma_cookie_t last_complete; 898
909 enum dma_status ret; 899 ret = dma_cookie_status(chan, cookie, txstate);
910
911 last_used = chan->cookie;
912 last_complete = iop_chan->completed_cookie;
913 dma_set_tx_state(txstate, last_complete, last_used, 0);
914 ret = dma_async_is_complete(cookie, last_complete, last_used);
915 if (ret == DMA_SUCCESS) 900 if (ret == DMA_SUCCESS)
916 return ret; 901 return ret;
917 902
918 iop_adma_slot_cleanup(iop_chan); 903 iop_adma_slot_cleanup(iop_chan);
919 904
920 last_used = chan->cookie; 905 return dma_cookie_status(chan, cookie, txstate);
921 last_complete = iop_chan->completed_cookie;
922 dma_set_tx_state(txstate, last_complete, last_used, 0);
923
924 return dma_async_is_complete(cookie, last_complete, last_used);
925} 906}
926 907
927static irqreturn_t iop_adma_eot_handler(int irq, void *data) 908static irqreturn_t iop_adma_eot_handler(int irq, void *data)
@@ -1565,6 +1546,7 @@ static int __devinit iop_adma_probe(struct platform_device *pdev)
1565 INIT_LIST_HEAD(&iop_chan->chain); 1546 INIT_LIST_HEAD(&iop_chan->chain);
1566 INIT_LIST_HEAD(&iop_chan->all_slots); 1547 INIT_LIST_HEAD(&iop_chan->all_slots);
1567 iop_chan->common.device = dma_dev; 1548 iop_chan->common.device = dma_dev;
1549 dma_cookie_init(&iop_chan->common);
1568 list_add_tail(&iop_chan->common.device_node, &dma_dev->channels); 1550 list_add_tail(&iop_chan->common.device_node, &dma_dev->channels);
1569 1551
1570 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) { 1552 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) {
@@ -1642,16 +1624,12 @@ static void iop_chan_start_null_memcpy(struct iop_adma_chan *iop_chan)
1642 iop_desc_set_dest_addr(grp_start, iop_chan, 0); 1624 iop_desc_set_dest_addr(grp_start, iop_chan, 0);
1643 iop_desc_set_memcpy_src_addr(grp_start, 0); 1625 iop_desc_set_memcpy_src_addr(grp_start, 0);
1644 1626
1645 cookie = iop_chan->common.cookie; 1627 cookie = dma_cookie_assign(&sw_desc->async_tx);
1646 cookie++;
1647 if (cookie <= 1)
1648 cookie = 2;
1649 1628
1650 /* initialize the completed cookie to be less than 1629 /* initialize the completed cookie to be less than
1651 * the most recently used cookie 1630 * the most recently used cookie
1652 */ 1631 */
1653 iop_chan->completed_cookie = cookie - 1; 1632 iop_chan->common.completed_cookie = cookie - 1;
1654 iop_chan->common.cookie = sw_desc->async_tx.cookie = cookie;
1655 1633
1656 /* channel should not be busy */ 1634 /* channel should not be busy */
1657 BUG_ON(iop_chan_is_busy(iop_chan)); 1635 BUG_ON(iop_chan_is_busy(iop_chan));
@@ -1699,16 +1677,12 @@ static void iop_chan_start_null_xor(struct iop_adma_chan *iop_chan)
1699 iop_desc_set_xor_src_addr(grp_start, 0, 0); 1677 iop_desc_set_xor_src_addr(grp_start, 0, 0);
1700 iop_desc_set_xor_src_addr(grp_start, 1, 0); 1678 iop_desc_set_xor_src_addr(grp_start, 1, 0);
1701 1679
1702 cookie = iop_chan->common.cookie; 1680 cookie = dma_cookie_assign(&sw_desc->async_tx);
1703 cookie++;
1704 if (cookie <= 1)
1705 cookie = 2;
1706 1681
1707 /* initialize the completed cookie to be less than 1682 /* initialize the completed cookie to be less than
1708 * the most recently used cookie 1683 * the most recently used cookie
1709 */ 1684 */
1710 iop_chan->completed_cookie = cookie - 1; 1685 iop_chan->common.completed_cookie = cookie - 1;
1711 iop_chan->common.cookie = sw_desc->async_tx.cookie = cookie;
1712 1686
1713 /* channel should not be busy */ 1687 /* channel should not be busy */
1714 BUG_ON(iop_chan_is_busy(iop_chan)); 1688 BUG_ON(iop_chan_is_busy(iop_chan));
diff --git a/drivers/dma/ipu/ipu_idmac.c b/drivers/dma/ipu/ipu_idmac.c
index 6212b16e8cf2..1880274b0850 100644
--- a/drivers/dma/ipu/ipu_idmac.c
+++ b/drivers/dma/ipu/ipu_idmac.c
@@ -25,6 +25,7 @@
25 25
26#include <mach/ipu.h> 26#include <mach/ipu.h>
27 27
28#include "../dmaengine.h"
28#include "ipu_intern.h" 29#include "ipu_intern.h"
29 30
30#define FS_VF_IN_VALID 0x00000002 31#define FS_VF_IN_VALID 0x00000002
@@ -866,14 +867,7 @@ static dma_cookie_t idmac_tx_submit(struct dma_async_tx_descriptor *tx)
866 867
867 dev_dbg(dev, "Submitting sg %p\n", &desc->sg[0]); 868 dev_dbg(dev, "Submitting sg %p\n", &desc->sg[0]);
868 869
869 cookie = ichan->dma_chan.cookie; 870 cookie = dma_cookie_assign(tx);
870
871 if (++cookie < 0)
872 cookie = 1;
873
874 /* from dmaengine.h: "last cookie value returned to client" */
875 ichan->dma_chan.cookie = cookie;
876 tx->cookie = cookie;
877 871
878 /* ipu->lock can be taken under ichan->lock, but not v.v. */ 872 /* ipu->lock can be taken under ichan->lock, but not v.v. */
879 spin_lock_irqsave(&ichan->lock, flags); 873 spin_lock_irqsave(&ichan->lock, flags);
@@ -1295,7 +1289,7 @@ static irqreturn_t idmac_interrupt(int irq, void *dev_id)
1295 /* Flip the active buffer - even if update above failed */ 1289 /* Flip the active buffer - even if update above failed */
1296 ichan->active_buffer = !ichan->active_buffer; 1290 ichan->active_buffer = !ichan->active_buffer;
1297 if (done) 1291 if (done)
1298 ichan->completed = desc->txd.cookie; 1292 dma_cookie_complete(&desc->txd);
1299 1293
1300 callback = desc->txd.callback; 1294 callback = desc->txd.callback;
1301 callback_param = desc->txd.callback_param; 1295 callback_param = desc->txd.callback_param;
@@ -1510,8 +1504,7 @@ static int idmac_alloc_chan_resources(struct dma_chan *chan)
1510 BUG_ON(chan->client_count > 1); 1504 BUG_ON(chan->client_count > 1);
1511 WARN_ON(ichan->status != IPU_CHANNEL_FREE); 1505 WARN_ON(ichan->status != IPU_CHANNEL_FREE);
1512 1506
1513 chan->cookie = 1; 1507 dma_cookie_init(chan);
1514 ichan->completed = -ENXIO;
1515 1508
1516 ret = ipu_irq_map(chan->chan_id); 1509 ret = ipu_irq_map(chan->chan_id);
1517 if (ret < 0) 1510 if (ret < 0)
@@ -1600,9 +1593,7 @@ static void idmac_free_chan_resources(struct dma_chan *chan)
1600static enum dma_status idmac_tx_status(struct dma_chan *chan, 1593static enum dma_status idmac_tx_status(struct dma_chan *chan,
1601 dma_cookie_t cookie, struct dma_tx_state *txstate) 1594 dma_cookie_t cookie, struct dma_tx_state *txstate)
1602{ 1595{
1603 struct idmac_channel *ichan = to_idmac_chan(chan); 1596 dma_set_tx_state(txstate, chan->completed_cookie, chan->cookie, 0);
1604
1605 dma_set_tx_state(txstate, ichan->completed, chan->cookie, 0);
1606 if (cookie != chan->cookie) 1597 if (cookie != chan->cookie)
1607 return DMA_ERROR; 1598 return DMA_ERROR;
1608 return DMA_SUCCESS; 1599 return DMA_SUCCESS;
@@ -1638,11 +1629,10 @@ static int __init ipu_idmac_init(struct ipu *ipu)
1638 1629
1639 ichan->status = IPU_CHANNEL_FREE; 1630 ichan->status = IPU_CHANNEL_FREE;
1640 ichan->sec_chan_en = false; 1631 ichan->sec_chan_en = false;
1641 ichan->completed = -ENXIO;
1642 snprintf(ichan->eof_name, sizeof(ichan->eof_name), "IDMAC EOF %d", i); 1632 snprintf(ichan->eof_name, sizeof(ichan->eof_name), "IDMAC EOF %d", i);
1643 1633
1644 dma_chan->device = &idmac->dma; 1634 dma_chan->device = &idmac->dma;
1645 dma_chan->cookie = 1; 1635 dma_cookie_init(dma_chan);
1646 dma_chan->chan_id = i; 1636 dma_chan->chan_id = i;
1647 list_add_tail(&dma_chan->device_node, &dma->channels); 1637 list_add_tail(&dma_chan->device_node, &dma->channels);
1648 } 1638 }
diff --git a/drivers/dma/mpc512x_dma.c b/drivers/dma/mpc512x_dma.c
index 4d6d4cf66949..2ab0a3d0eed5 100644
--- a/drivers/dma/mpc512x_dma.c
+++ b/drivers/dma/mpc512x_dma.c
@@ -44,6 +44,8 @@
44 44
45#include <linux/random.h> 45#include <linux/random.h>
46 46
47#include "dmaengine.h"
48
47/* Number of DMA Transfer descriptors allocated per channel */ 49/* Number of DMA Transfer descriptors allocated per channel */
48#define MPC_DMA_DESCRIPTORS 64 50#define MPC_DMA_DESCRIPTORS 64
49 51
@@ -188,7 +190,6 @@ struct mpc_dma_chan {
188 struct list_head completed; 190 struct list_head completed;
189 struct mpc_dma_tcd *tcd; 191 struct mpc_dma_tcd *tcd;
190 dma_addr_t tcd_paddr; 192 dma_addr_t tcd_paddr;
191 dma_cookie_t completed_cookie;
192 193
193 /* Lock for this structure */ 194 /* Lock for this structure */
194 spinlock_t lock; 195 spinlock_t lock;
@@ -365,7 +366,7 @@ static void mpc_dma_process_completed(struct mpc_dma *mdma)
365 /* Free descriptors */ 366 /* Free descriptors */
366 spin_lock_irqsave(&mchan->lock, flags); 367 spin_lock_irqsave(&mchan->lock, flags);
367 list_splice_tail_init(&list, &mchan->free); 368 list_splice_tail_init(&list, &mchan->free);
368 mchan->completed_cookie = last_cookie; 369 mchan->chan.completed_cookie = last_cookie;
369 spin_unlock_irqrestore(&mchan->lock, flags); 370 spin_unlock_irqrestore(&mchan->lock, flags);
370 } 371 }
371} 372}
@@ -438,13 +439,7 @@ static dma_cookie_t mpc_dma_tx_submit(struct dma_async_tx_descriptor *txd)
438 mpc_dma_execute(mchan); 439 mpc_dma_execute(mchan);
439 440
440 /* Update cookie */ 441 /* Update cookie */
441 cookie = mchan->chan.cookie + 1; 442 cookie = dma_cookie_assign(txd);
442 if (cookie <= 0)
443 cookie = 1;
444
445 mchan->chan.cookie = cookie;
446 mdesc->desc.cookie = cookie;
447
448 spin_unlock_irqrestore(&mchan->lock, flags); 443 spin_unlock_irqrestore(&mchan->lock, flags);
449 444
450 return cookie; 445 return cookie;
@@ -562,17 +557,14 @@ mpc_dma_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
562 struct dma_tx_state *txstate) 557 struct dma_tx_state *txstate)
563{ 558{
564 struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan); 559 struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan);
560 enum dma_status ret;
565 unsigned long flags; 561 unsigned long flags;
566 dma_cookie_t last_used;
567 dma_cookie_t last_complete;
568 562
569 spin_lock_irqsave(&mchan->lock, flags); 563 spin_lock_irqsave(&mchan->lock, flags);
570 last_used = mchan->chan.cookie; 564 ret = dma_cookie_status(chan, cookie, txstate);
571 last_complete = mchan->completed_cookie;
572 spin_unlock_irqrestore(&mchan->lock, flags); 565 spin_unlock_irqrestore(&mchan->lock, flags);
573 566
574 dma_set_tx_state(txstate, last_complete, last_used, 0); 567 return ret;
575 return dma_async_is_complete(cookie, last_complete, last_used);
576} 568}
577 569
578/* Prepare descriptor for memory to memory copy */ 570/* Prepare descriptor for memory to memory copy */
@@ -741,8 +733,7 @@ static int __devinit mpc_dma_probe(struct platform_device *op)
741 mchan = &mdma->channels[i]; 733 mchan = &mdma->channels[i];
742 734
743 mchan->chan.device = dma; 735 mchan->chan.device = dma;
744 mchan->chan.cookie = 1; 736 dma_cookie_init(&mchan->chan);
745 mchan->completed_cookie = mchan->chan.cookie;
746 737
747 INIT_LIST_HEAD(&mchan->free); 738 INIT_LIST_HEAD(&mchan->free);
748 INIT_LIST_HEAD(&mchan->prepared); 739 INIT_LIST_HEAD(&mchan->prepared);
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index e779b434af45..fa5d55fea46c 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -26,6 +26,8 @@
26#include <linux/platform_device.h> 26#include <linux/platform_device.h>
27#include <linux/memory.h> 27#include <linux/memory.h>
28#include <plat/mv_xor.h> 28#include <plat/mv_xor.h>
29
30#include "dmaengine.h"
29#include "mv_xor.h" 31#include "mv_xor.h"
30 32
31static void mv_xor_issue_pending(struct dma_chan *chan); 33static void mv_xor_issue_pending(struct dma_chan *chan);
@@ -435,7 +437,7 @@ static void __mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan)
435 } 437 }
436 438
437 if (cookie > 0) 439 if (cookie > 0)
438 mv_chan->completed_cookie = cookie; 440 mv_chan->common.completed_cookie = cookie;
439} 441}
440 442
441static void 443static void
@@ -534,18 +536,6 @@ retry:
534 return NULL; 536 return NULL;
535} 537}
536 538
537static dma_cookie_t
538mv_desc_assign_cookie(struct mv_xor_chan *mv_chan,
539 struct mv_xor_desc_slot *desc)
540{
541 dma_cookie_t cookie = mv_chan->common.cookie;
542
543 if (++cookie < 0)
544 cookie = 1;
545 mv_chan->common.cookie = desc->async_tx.cookie = cookie;
546 return cookie;
547}
548
549/************************ DMA engine API functions ****************************/ 539/************************ DMA engine API functions ****************************/
550static dma_cookie_t 540static dma_cookie_t
551mv_xor_tx_submit(struct dma_async_tx_descriptor *tx) 541mv_xor_tx_submit(struct dma_async_tx_descriptor *tx)
@@ -563,7 +553,7 @@ mv_xor_tx_submit(struct dma_async_tx_descriptor *tx)
563 grp_start = sw_desc->group_head; 553 grp_start = sw_desc->group_head;
564 554
565 spin_lock_bh(&mv_chan->lock); 555 spin_lock_bh(&mv_chan->lock);
566 cookie = mv_desc_assign_cookie(mv_chan, sw_desc); 556 cookie = dma_cookie_assign(tx);
567 557
568 if (list_empty(&mv_chan->chain)) 558 if (list_empty(&mv_chan->chain))
569 list_splice_init(&sw_desc->tx_list, &mv_chan->chain); 559 list_splice_init(&sw_desc->tx_list, &mv_chan->chain);
@@ -820,27 +810,16 @@ static enum dma_status mv_xor_status(struct dma_chan *chan,
820 struct dma_tx_state *txstate) 810 struct dma_tx_state *txstate)
821{ 811{
822 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); 812 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
823 dma_cookie_t last_used;
824 dma_cookie_t last_complete;
825 enum dma_status ret; 813 enum dma_status ret;
826 814
827 last_used = chan->cookie; 815 ret = dma_cookie_status(chan, cookie, txstate);
828 last_complete = mv_chan->completed_cookie;
829 mv_chan->is_complete_cookie = cookie;
830 dma_set_tx_state(txstate, last_complete, last_used, 0);
831
832 ret = dma_async_is_complete(cookie, last_complete, last_used);
833 if (ret == DMA_SUCCESS) { 816 if (ret == DMA_SUCCESS) {
834 mv_xor_clean_completed_slots(mv_chan); 817 mv_xor_clean_completed_slots(mv_chan);
835 return ret; 818 return ret;
836 } 819 }
837 mv_xor_slot_cleanup(mv_chan); 820 mv_xor_slot_cleanup(mv_chan);
838 821
839 last_used = chan->cookie; 822 return dma_cookie_status(chan, cookie, txstate);
840 last_complete = mv_chan->completed_cookie;
841
842 dma_set_tx_state(txstate, last_complete, last_used, 0);
843 return dma_async_is_complete(cookie, last_complete, last_used);
844} 823}
845 824
846static void mv_dump_xor_regs(struct mv_xor_chan *chan) 825static void mv_dump_xor_regs(struct mv_xor_chan *chan)
@@ -1214,6 +1193,7 @@ static int __devinit mv_xor_probe(struct platform_device *pdev)
1214 INIT_LIST_HEAD(&mv_chan->completed_slots); 1193 INIT_LIST_HEAD(&mv_chan->completed_slots);
1215 INIT_LIST_HEAD(&mv_chan->all_slots); 1194 INIT_LIST_HEAD(&mv_chan->all_slots);
1216 mv_chan->common.device = dma_dev; 1195 mv_chan->common.device = dma_dev;
1196 dma_cookie_init(&mv_chan->common);
1217 1197
1218 list_add_tail(&mv_chan->common.device_node, &dma_dev->channels); 1198 list_add_tail(&mv_chan->common.device_node, &dma_dev->channels);
1219 1199
diff --git a/drivers/dma/mv_xor.h b/drivers/dma/mv_xor.h
index 977b592e976b..654876b7ba1d 100644
--- a/drivers/dma/mv_xor.h
+++ b/drivers/dma/mv_xor.h
@@ -78,7 +78,6 @@ struct mv_xor_device {
78/** 78/**
79 * struct mv_xor_chan - internal representation of a XOR channel 79 * struct mv_xor_chan - internal representation of a XOR channel
80 * @pending: allows batching of hardware operations 80 * @pending: allows batching of hardware operations
81 * @completed_cookie: identifier for the most recently completed operation
82 * @lock: serializes enqueue/dequeue operations to the descriptors pool 81 * @lock: serializes enqueue/dequeue operations to the descriptors pool
83 * @mmr_base: memory mapped register base 82 * @mmr_base: memory mapped register base
84 * @idx: the index of the xor channel 83 * @idx: the index of the xor channel
@@ -93,7 +92,6 @@ struct mv_xor_device {
93 */ 92 */
94struct mv_xor_chan { 93struct mv_xor_chan {
95 int pending; 94 int pending;
96 dma_cookie_t completed_cookie;
97 spinlock_t lock; /* protects the descriptor slot pool */ 95 spinlock_t lock; /* protects the descriptor slot pool */
98 void __iomem *mmr_base; 96 void __iomem *mmr_base;
99 unsigned int idx; 97 unsigned int idx;
@@ -109,7 +107,6 @@ struct mv_xor_chan {
109#ifdef USE_TIMER 107#ifdef USE_TIMER
110 unsigned long cleanup_time; 108 unsigned long cleanup_time;
111 u32 current_on_last_cleanup; 109 u32 current_on_last_cleanup;
112 dma_cookie_t is_complete_cookie;
113#endif 110#endif
114}; 111};
115 112
diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c
index b06cd4ca626f..a2267f9ab568 100644
--- a/drivers/dma/mxs-dma.c
+++ b/drivers/dma/mxs-dma.c
@@ -28,6 +28,8 @@
28#include <mach/dma.h> 28#include <mach/dma.h>
29#include <mach/common.h> 29#include <mach/common.h>
30 30
31#include "dmaengine.h"
32
31/* 33/*
32 * NOTE: The term "PIO" throughout the mxs-dma implementation means 34 * NOTE: The term "PIO" throughout the mxs-dma implementation means
33 * PIO mode of mxs apbh-dma and apbx-dma. With this working mode, 35 * PIO mode of mxs apbh-dma and apbx-dma. With this working mode,
@@ -111,7 +113,6 @@ struct mxs_dma_chan {
111 struct mxs_dma_ccw *ccw; 113 struct mxs_dma_ccw *ccw;
112 dma_addr_t ccw_phys; 114 dma_addr_t ccw_phys;
113 int desc_count; 115 int desc_count;
114 dma_cookie_t last_completed;
115 enum dma_status status; 116 enum dma_status status;
116 unsigned int flags; 117 unsigned int flags;
117#define MXS_DMA_SG_LOOP (1 << 0) 118#define MXS_DMA_SG_LOOP (1 << 0)
@@ -193,19 +194,6 @@ static void mxs_dma_resume_chan(struct mxs_dma_chan *mxs_chan)
193 mxs_chan->status = DMA_IN_PROGRESS; 194 mxs_chan->status = DMA_IN_PROGRESS;
194} 195}
195 196
196static dma_cookie_t mxs_dma_assign_cookie(struct mxs_dma_chan *mxs_chan)
197{
198 dma_cookie_t cookie = mxs_chan->chan.cookie;
199
200 if (++cookie < 0)
201 cookie = 1;
202
203 mxs_chan->chan.cookie = cookie;
204 mxs_chan->desc.cookie = cookie;
205
206 return cookie;
207}
208
209static struct mxs_dma_chan *to_mxs_dma_chan(struct dma_chan *chan) 197static struct mxs_dma_chan *to_mxs_dma_chan(struct dma_chan *chan)
210{ 198{
211 return container_of(chan, struct mxs_dma_chan, chan); 199 return container_of(chan, struct mxs_dma_chan, chan);
@@ -217,7 +205,7 @@ static dma_cookie_t mxs_dma_tx_submit(struct dma_async_tx_descriptor *tx)
217 205
218 mxs_dma_enable_chan(mxs_chan); 206 mxs_dma_enable_chan(mxs_chan);
219 207
220 return mxs_dma_assign_cookie(mxs_chan); 208 return dma_cookie_assign(tx);
221} 209}
222 210
223static void mxs_dma_tasklet(unsigned long data) 211static void mxs_dma_tasklet(unsigned long data)
@@ -274,7 +262,7 @@ static irqreturn_t mxs_dma_int_handler(int irq, void *dev_id)
274 stat1 &= ~(1 << channel); 262 stat1 &= ~(1 << channel);
275 263
276 if (mxs_chan->status == DMA_SUCCESS) 264 if (mxs_chan->status == DMA_SUCCESS)
277 mxs_chan->last_completed = mxs_chan->desc.cookie; 265 dma_cookie_complete(&mxs_chan->desc);
278 266
279 /* schedule tasklet on this channel */ 267 /* schedule tasklet on this channel */
280 tasklet_schedule(&mxs_chan->tasklet); 268 tasklet_schedule(&mxs_chan->tasklet);
@@ -538,7 +526,7 @@ static enum dma_status mxs_dma_tx_status(struct dma_chan *chan,
538 dma_cookie_t last_used; 526 dma_cookie_t last_used;
539 527
540 last_used = chan->cookie; 528 last_used = chan->cookie;
541 dma_set_tx_state(txstate, mxs_chan->last_completed, last_used, 0); 529 dma_set_tx_state(txstate, chan->completed_cookie, last_used, 0);
542 530
543 return mxs_chan->status; 531 return mxs_chan->status;
544} 532}
@@ -630,6 +618,7 @@ static int __init mxs_dma_probe(struct platform_device *pdev)
630 618
631 mxs_chan->mxs_dma = mxs_dma; 619 mxs_chan->mxs_dma = mxs_dma;
632 mxs_chan->chan.device = &mxs_dma->dma_device; 620 mxs_chan->chan.device = &mxs_dma->dma_device;
621 dma_cookie_init(&mxs_chan->chan);
633 622
634 tasklet_init(&mxs_chan->tasklet, mxs_dma_tasklet, 623 tasklet_init(&mxs_chan->tasklet, mxs_dma_tasklet,
635 (unsigned long) mxs_chan); 624 (unsigned long) mxs_chan);
diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c
index 823f58179f9d..c93bb0459972 100644
--- a/drivers/dma/pch_dma.c
+++ b/drivers/dma/pch_dma.c
@@ -25,6 +25,8 @@
25#include <linux/module.h> 25#include <linux/module.h>
26#include <linux/pch_dma.h> 26#include <linux/pch_dma.h>
27 27
28#include "dmaengine.h"
29
28#define DRV_NAME "pch-dma" 30#define DRV_NAME "pch-dma"
29 31
30#define DMA_CTL0_DISABLE 0x0 32#define DMA_CTL0_DISABLE 0x0
@@ -105,7 +107,6 @@ struct pch_dma_chan {
105 107
106 spinlock_t lock; 108 spinlock_t lock;
107 109
108 dma_cookie_t completed_cookie;
109 struct list_head active_list; 110 struct list_head active_list;
110 struct list_head queue; 111 struct list_head queue;
111 struct list_head free_list; 112 struct list_head free_list;
@@ -416,20 +417,6 @@ static void pdc_advance_work(struct pch_dma_chan *pd_chan)
416 } 417 }
417} 418}
418 419
419static dma_cookie_t pdc_assign_cookie(struct pch_dma_chan *pd_chan,
420 struct pch_dma_desc *desc)
421{
422 dma_cookie_t cookie = pd_chan->chan.cookie;
423
424 if (++cookie < 0)
425 cookie = 1;
426
427 pd_chan->chan.cookie = cookie;
428 desc->txd.cookie = cookie;
429
430 return cookie;
431}
432
433static dma_cookie_t pd_tx_submit(struct dma_async_tx_descriptor *txd) 420static dma_cookie_t pd_tx_submit(struct dma_async_tx_descriptor *txd)
434{ 421{
435 struct pch_dma_desc *desc = to_pd_desc(txd); 422 struct pch_dma_desc *desc = to_pd_desc(txd);
@@ -437,7 +424,7 @@ static dma_cookie_t pd_tx_submit(struct dma_async_tx_descriptor *txd)
437 dma_cookie_t cookie; 424 dma_cookie_t cookie;
438 425
439 spin_lock(&pd_chan->lock); 426 spin_lock(&pd_chan->lock);
440 cookie = pdc_assign_cookie(pd_chan, desc); 427 cookie = dma_cookie_assign(txd);
441 428
442 if (list_empty(&pd_chan->active_list)) { 429 if (list_empty(&pd_chan->active_list)) {
443 list_add_tail(&desc->desc_node, &pd_chan->active_list); 430 list_add_tail(&desc->desc_node, &pd_chan->active_list);
@@ -544,7 +531,7 @@ static int pd_alloc_chan_resources(struct dma_chan *chan)
544 spin_lock_irq(&pd_chan->lock); 531 spin_lock_irq(&pd_chan->lock);
545 list_splice(&tmp_list, &pd_chan->free_list); 532 list_splice(&tmp_list, &pd_chan->free_list);
546 pd_chan->descs_allocated = i; 533 pd_chan->descs_allocated = i;
547 pd_chan->completed_cookie = chan->cookie = 1; 534 dma_cookie_init(chan);
548 spin_unlock_irq(&pd_chan->lock); 535 spin_unlock_irq(&pd_chan->lock);
549 536
550 pdc_enable_irq(chan, 1); 537 pdc_enable_irq(chan, 1);
@@ -578,19 +565,12 @@ static enum dma_status pd_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
578 struct dma_tx_state *txstate) 565 struct dma_tx_state *txstate)
579{ 566{
580 struct pch_dma_chan *pd_chan = to_pd_chan(chan); 567 struct pch_dma_chan *pd_chan = to_pd_chan(chan);
581 dma_cookie_t last_used; 568 enum dma_status ret;
582 dma_cookie_t last_completed;
583 int ret;
584 569
585 spin_lock_irq(&pd_chan->lock); 570 spin_lock_irq(&pd_chan->lock);
586 last_completed = pd_chan->completed_cookie; 571 ret = dma_cookie_status(chan, cookie, txstate);
587 last_used = chan->cookie;
588 spin_unlock_irq(&pd_chan->lock); 572 spin_unlock_irq(&pd_chan->lock);
589 573
590 ret = dma_async_is_complete(cookie, last_completed, last_used);
591
592 dma_set_tx_state(txstate, last_completed, last_used, 0);
593
594 return ret; 574 return ret;
595} 575}
596 576
@@ -932,7 +912,7 @@ static int __devinit pch_dma_probe(struct pci_dev *pdev,
932 struct pch_dma_chan *pd_chan = &pd->channels[i]; 912 struct pch_dma_chan *pd_chan = &pd->channels[i];
933 913
934 pd_chan->chan.device = &pd->dma; 914 pd_chan->chan.device = &pd->dma;
935 pd_chan->chan.cookie = 1; 915 dma_cookie_init(&pd_chan->chan);
936 916
937 pd_chan->membase = &regs->desc[i]; 917 pd_chan->membase = &regs->desc[i];
938 918
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index 7253d17f05f8..e863d7fc465a 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -28,6 +28,7 @@
28#include <linux/scatterlist.h> 28#include <linux/scatterlist.h>
29#include <linux/of.h> 29#include <linux/of.h>
30 30
31#include "dmaengine.h"
31#define PL330_MAX_CHAN 8 32#define PL330_MAX_CHAN 8
32#define PL330_MAX_IRQS 32 33#define PL330_MAX_IRQS 32
33#define PL330_MAX_PERI 32 34#define PL330_MAX_PERI 32
@@ -285,6 +286,7 @@ static unsigned cmd_line;
285#endif 286#endif
286 287
287/* The number of default descriptors */ 288/* The number of default descriptors */
289
288#define NR_DEFAULT_DESC 16 290#define NR_DEFAULT_DESC 16
289 291
290/* Populated by the PL330 core driver for DMA API driver's info */ 292/* Populated by the PL330 core driver for DMA API driver's info */
@@ -545,9 +547,6 @@ struct dma_pl330_chan {
545 /* DMA-Engine Channel */ 547 /* DMA-Engine Channel */
546 struct dma_chan chan; 548 struct dma_chan chan;
547 549
548 /* Last completed cookie */
549 dma_cookie_t completed;
550
551 /* List of to be xfered descriptors */ 550 /* List of to be xfered descriptors */
552 struct list_head work_list; 551 struct list_head work_list;
553 552
@@ -2320,7 +2319,7 @@ static void pl330_tasklet(unsigned long data)
2320 /* Pick up ripe tomatoes */ 2319 /* Pick up ripe tomatoes */
2321 list_for_each_entry_safe(desc, _dt, &pch->work_list, node) 2320 list_for_each_entry_safe(desc, _dt, &pch->work_list, node)
2322 if (desc->status == DONE) { 2321 if (desc->status == DONE) {
2323 pch->completed = desc->txd.cookie; 2322 dma_cookie_complete(&desc->txd);
2324 list_move_tail(&desc->node, &list); 2323 list_move_tail(&desc->node, &list);
2325 } 2324 }
2326 2325
@@ -2391,7 +2390,7 @@ static int pl330_alloc_chan_resources(struct dma_chan *chan)
2391 2390
2392 spin_lock_irqsave(&pch->lock, flags); 2391 spin_lock_irqsave(&pch->lock, flags);
2393 2392
2394 pch->completed = chan->cookie = 1; 2393 dma_cookie_init(chan);
2395 pch->cyclic = false; 2394 pch->cyclic = false;
2396 2395
2397 pch->pl330_chid = pl330_request_channel(&pdmac->pif); 2396 pch->pl330_chid = pl330_request_channel(&pdmac->pif);
@@ -2426,7 +2425,6 @@ static int pl330_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned
2426 /* Mark all desc done */ 2425 /* Mark all desc done */
2427 list_for_each_entry_safe(desc, _dt, &pch->work_list , node) { 2426 list_for_each_entry_safe(desc, _dt, &pch->work_list , node) {
2428 desc->status = DONE; 2427 desc->status = DONE;
2429 pch->completed = desc->txd.cookie;
2430 list_move_tail(&desc->node, &list); 2428 list_move_tail(&desc->node, &list);
2431 } 2429 }
2432 2430
@@ -2482,18 +2480,7 @@ static enum dma_status
2482pl330_tx_status(struct dma_chan *chan, dma_cookie_t cookie, 2480pl330_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
2483 struct dma_tx_state *txstate) 2481 struct dma_tx_state *txstate)
2484{ 2482{
2485 struct dma_pl330_chan *pch = to_pchan(chan); 2483 return dma_cookie_status(chan, cookie, txstate);
2486 dma_cookie_t last_done, last_used;
2487 int ret;
2488
2489 last_done = pch->completed;
2490 last_used = chan->cookie;
2491
2492 ret = dma_async_is_complete(cookie, last_done, last_used);
2493
2494 dma_set_tx_state(txstate, last_done, last_used, 0);
2495
2496 return ret;
2497} 2484}
2498 2485
2499static void pl330_issue_pending(struct dma_chan *chan) 2486static void pl330_issue_pending(struct dma_chan *chan)
@@ -2516,26 +2503,16 @@ static dma_cookie_t pl330_tx_submit(struct dma_async_tx_descriptor *tx)
2516 spin_lock_irqsave(&pch->lock, flags); 2503 spin_lock_irqsave(&pch->lock, flags);
2517 2504
2518 /* Assign cookies to all nodes */ 2505 /* Assign cookies to all nodes */
2519 cookie = tx->chan->cookie;
2520
2521 while (!list_empty(&last->node)) { 2506 while (!list_empty(&last->node)) {
2522 desc = list_entry(last->node.next, struct dma_pl330_desc, node); 2507 desc = list_entry(last->node.next, struct dma_pl330_desc, node);
2523 2508
2524 if (++cookie < 0) 2509 dma_cookie_assign(&desc->txd);
2525 cookie = 1;
2526 desc->txd.cookie = cookie;
2527 2510
2528 list_move_tail(&desc->node, &pch->work_list); 2511 list_move_tail(&desc->node, &pch->work_list);
2529 } 2512 }
2530 2513
2531 if (++cookie < 0) 2514 cookie = dma_cookie_assign(&last->txd);
2532 cookie = 1;
2533 last->txd.cookie = cookie;
2534
2535 list_add_tail(&last->node, &pch->work_list); 2515 list_add_tail(&last->node, &pch->work_list);
2536
2537 tx->chan->cookie = cookie;
2538
2539 spin_unlock_irqrestore(&pch->lock, flags); 2516 spin_unlock_irqrestore(&pch->lock, flags);
2540 2517
2541 return cookie; 2518 return cookie;
diff --git a/drivers/dma/ppc4xx/adma.c b/drivers/dma/ppc4xx/adma.c
index fc457a7e8832..ced98826684a 100644
--- a/drivers/dma/ppc4xx/adma.c
+++ b/drivers/dma/ppc4xx/adma.c
@@ -46,6 +46,7 @@
46#include <asm/dcr.h> 46#include <asm/dcr.h>
47#include <asm/dcr-regs.h> 47#include <asm/dcr-regs.h>
48#include "adma.h" 48#include "adma.h"
49#include "../dmaengine.h"
49 50
50enum ppc_adma_init_code { 51enum ppc_adma_init_code {
51 PPC_ADMA_INIT_OK = 0, 52 PPC_ADMA_INIT_OK = 0,
@@ -1930,7 +1931,7 @@ static void __ppc440spe_adma_slot_cleanup(struct ppc440spe_adma_chan *chan)
1930 if (end_of_chain && slot_cnt) { 1931 if (end_of_chain && slot_cnt) {
1931 /* Should wait for ZeroSum completion */ 1932 /* Should wait for ZeroSum completion */
1932 if (cookie > 0) 1933 if (cookie > 0)
1933 chan->completed_cookie = cookie; 1934 chan->common.completed_cookie = cookie;
1934 return; 1935 return;
1935 } 1936 }
1936 1937
@@ -1960,7 +1961,7 @@ static void __ppc440spe_adma_slot_cleanup(struct ppc440spe_adma_chan *chan)
1960 BUG_ON(!seen_current); 1961 BUG_ON(!seen_current);
1961 1962
1962 if (cookie > 0) { 1963 if (cookie > 0) {
1963 chan->completed_cookie = cookie; 1964 chan->common.completed_cookie = cookie;
1964 pr_debug("\tcompleted cookie %d\n", cookie); 1965 pr_debug("\tcompleted cookie %d\n", cookie);
1965 } 1966 }
1966 1967
@@ -2150,22 +2151,6 @@ static int ppc440spe_adma_alloc_chan_resources(struct dma_chan *chan)
2150} 2151}
2151 2152
2152/** 2153/**
2153 * ppc440spe_desc_assign_cookie - assign a cookie
2154 */
2155static dma_cookie_t ppc440spe_desc_assign_cookie(
2156 struct ppc440spe_adma_chan *chan,
2157 struct ppc440spe_adma_desc_slot *desc)
2158{
2159 dma_cookie_t cookie = chan->common.cookie;
2160
2161 cookie++;
2162 if (cookie < 0)
2163 cookie = 1;
2164 chan->common.cookie = desc->async_tx.cookie = cookie;
2165 return cookie;
2166}
2167
2168/**
2169 * ppc440spe_rxor_set_region_data - 2154 * ppc440spe_rxor_set_region_data -
2170 */ 2155 */
2171static void ppc440spe_rxor_set_region(struct ppc440spe_adma_desc_slot *desc, 2156static void ppc440spe_rxor_set_region(struct ppc440spe_adma_desc_slot *desc,
@@ -2235,8 +2220,7 @@ static dma_cookie_t ppc440spe_adma_tx_submit(struct dma_async_tx_descriptor *tx)
2235 slots_per_op = group_start->slots_per_op; 2220 slots_per_op = group_start->slots_per_op;
2236 2221
2237 spin_lock_bh(&chan->lock); 2222 spin_lock_bh(&chan->lock);
2238 2223 cookie = dma_cookie_assign(tx);
2239 cookie = ppc440spe_desc_assign_cookie(chan, sw_desc);
2240 2224
2241 if (unlikely(list_empty(&chan->chain))) { 2225 if (unlikely(list_empty(&chan->chain))) {
2242 /* first peer */ 2226 /* first peer */
@@ -3944,28 +3928,16 @@ static enum dma_status ppc440spe_adma_tx_status(struct dma_chan *chan,
3944 dma_cookie_t cookie, struct dma_tx_state *txstate) 3928 dma_cookie_t cookie, struct dma_tx_state *txstate)
3945{ 3929{
3946 struct ppc440spe_adma_chan *ppc440spe_chan; 3930 struct ppc440spe_adma_chan *ppc440spe_chan;
3947 dma_cookie_t last_used;
3948 dma_cookie_t last_complete;
3949 enum dma_status ret; 3931 enum dma_status ret;
3950 3932
3951 ppc440spe_chan = to_ppc440spe_adma_chan(chan); 3933 ppc440spe_chan = to_ppc440spe_adma_chan(chan);
3952 last_used = chan->cookie; 3934 ret = dma_cookie_status(chan, cookie, txstate);
3953 last_complete = ppc440spe_chan->completed_cookie;
3954
3955 dma_set_tx_state(txstate, last_complete, last_used, 0);
3956
3957 ret = dma_async_is_complete(cookie, last_complete, last_used);
3958 if (ret == DMA_SUCCESS) 3935 if (ret == DMA_SUCCESS)
3959 return ret; 3936 return ret;
3960 3937
3961 ppc440spe_adma_slot_cleanup(ppc440spe_chan); 3938 ppc440spe_adma_slot_cleanup(ppc440spe_chan);
3962 3939
3963 last_used = chan->cookie; 3940 return dma_cookie_status(chan, cookie, txstate);
3964 last_complete = ppc440spe_chan->completed_cookie;
3965
3966 dma_set_tx_state(txstate, last_complete, last_used, 0);
3967
3968 return dma_async_is_complete(cookie, last_complete, last_used);
3969} 3941}
3970 3942
3971/** 3943/**
@@ -4050,16 +4022,12 @@ static void ppc440spe_chan_start_null_xor(struct ppc440spe_adma_chan *chan)
4050 async_tx_ack(&sw_desc->async_tx); 4022 async_tx_ack(&sw_desc->async_tx);
4051 ppc440spe_desc_init_null_xor(group_start); 4023 ppc440spe_desc_init_null_xor(group_start);
4052 4024
4053 cookie = chan->common.cookie; 4025 cookie = dma_cookie_assign(&sw_desc->async_tx);
4054 cookie++;
4055 if (cookie <= 1)
4056 cookie = 2;
4057 4026
4058 /* initialize the completed cookie to be less than 4027 /* initialize the completed cookie to be less than
4059 * the most recently used cookie 4028 * the most recently used cookie
4060 */ 4029 */
4061 chan->completed_cookie = cookie - 1; 4030 chan->common.completed_cookie = cookie - 1;
4062 chan->common.cookie = sw_desc->async_tx.cookie = cookie;
4063 4031
4064 /* channel should not be busy */ 4032 /* channel should not be busy */
4065 BUG_ON(ppc440spe_chan_is_busy(chan)); 4033 BUG_ON(ppc440spe_chan_is_busy(chan));
@@ -4529,6 +4497,7 @@ static int __devinit ppc440spe_adma_probe(struct platform_device *ofdev)
4529 INIT_LIST_HEAD(&chan->all_slots); 4497 INIT_LIST_HEAD(&chan->all_slots);
4530 chan->device = adev; 4498 chan->device = adev;
4531 chan->common.device = &adev->common; 4499 chan->common.device = &adev->common;
4500 dma_cookie_init(&chan->common);
4532 list_add_tail(&chan->common.device_node, &adev->common.channels); 4501 list_add_tail(&chan->common.device_node, &adev->common.channels);
4533 tasklet_init(&chan->irq_tasklet, ppc440spe_adma_tasklet, 4502 tasklet_init(&chan->irq_tasklet, ppc440spe_adma_tasklet,
4534 (unsigned long)chan); 4503 (unsigned long)chan);
diff --git a/drivers/dma/ppc4xx/adma.h b/drivers/dma/ppc4xx/adma.h
index 8ada5a812e3b..26b7a5ed9ac7 100644
--- a/drivers/dma/ppc4xx/adma.h
+++ b/drivers/dma/ppc4xx/adma.h
@@ -81,7 +81,6 @@ struct ppc440spe_adma_device {
81 * @common: common dmaengine channel object members 81 * @common: common dmaengine channel object members
82 * @all_slots: complete domain of slots usable by the channel 82 * @all_slots: complete domain of slots usable by the channel
83 * @pending: allows batching of hardware operations 83 * @pending: allows batching of hardware operations
84 * @completed_cookie: identifier for the most recently completed operation
85 * @slots_allocated: records the actual size of the descriptor slot pool 84 * @slots_allocated: records the actual size of the descriptor slot pool
86 * @hw_chain_inited: h/w descriptor chain initialization flag 85 * @hw_chain_inited: h/w descriptor chain initialization flag
87 * @irq_tasklet: bottom half where ppc440spe_adma_slot_cleanup runs 86 * @irq_tasklet: bottom half where ppc440spe_adma_slot_cleanup runs
@@ -99,7 +98,6 @@ struct ppc440spe_adma_chan {
99 struct list_head all_slots; 98 struct list_head all_slots;
100 struct ppc440spe_adma_desc_slot *last_used; 99 struct ppc440spe_adma_desc_slot *last_used;
101 int pending; 100 int pending;
102 dma_cookie_t completed_cookie;
103 int slots_allocated; 101 int slots_allocated;
104 int hw_chain_inited; 102 int hw_chain_inited;
105 struct tasklet_struct irq_tasklet; 103 struct tasklet_struct irq_tasklet;
diff --git a/drivers/dma/shdma.c b/drivers/dma/shdma.c
index 812fd76e9c18..5c4088603dd4 100644
--- a/drivers/dma/shdma.c
+++ b/drivers/dma/shdma.c
@@ -30,6 +30,8 @@
30#include <linux/kdebug.h> 30#include <linux/kdebug.h>
31#include <linux/spinlock.h> 31#include <linux/spinlock.h>
32#include <linux/rculist.h> 32#include <linux/rculist.h>
33
34#include "dmaengine.h"
33#include "shdma.h" 35#include "shdma.h"
34 36
35/* DMA descriptor control */ 37/* DMA descriptor control */
@@ -296,13 +298,7 @@ static dma_cookie_t sh_dmae_tx_submit(struct dma_async_tx_descriptor *tx)
296 else 298 else
297 power_up = false; 299 power_up = false;
298 300
299 cookie = sh_chan->common.cookie; 301 cookie = dma_cookie_assign(tx);
300 cookie++;
301 if (cookie < 0)
302 cookie = 1;
303
304 sh_chan->common.cookie = cookie;
305 tx->cookie = cookie;
306 302
307 /* Mark all chunks of this descriptor as submitted, move to the queue */ 303 /* Mark all chunks of this descriptor as submitted, move to the queue */
308 list_for_each_entry_safe(chunk, c, desc->node.prev, node) { 304 list_for_each_entry_safe(chunk, c, desc->node.prev, node) {
@@ -764,12 +760,12 @@ static dma_async_tx_callback __ld_cleanup(struct sh_dmae_chan *sh_chan, bool all
764 cookie = tx->cookie; 760 cookie = tx->cookie;
765 761
766 if (desc->mark == DESC_COMPLETED && desc->chunks == 1) { 762 if (desc->mark == DESC_COMPLETED && desc->chunks == 1) {
767 if (sh_chan->completed_cookie != desc->cookie - 1) 763 if (sh_chan->common.completed_cookie != desc->cookie - 1)
768 dev_dbg(sh_chan->dev, 764 dev_dbg(sh_chan->dev,
769 "Completing cookie %d, expected %d\n", 765 "Completing cookie %d, expected %d\n",
770 desc->cookie, 766 desc->cookie,
771 sh_chan->completed_cookie + 1); 767 sh_chan->common.completed_cookie + 1);
772 sh_chan->completed_cookie = desc->cookie; 768 sh_chan->common.completed_cookie = desc->cookie;
773 } 769 }
774 770
775 /* Call callback on the last chunk */ 771 /* Call callback on the last chunk */
@@ -823,7 +819,7 @@ static dma_async_tx_callback __ld_cleanup(struct sh_dmae_chan *sh_chan, bool all
823 * Terminating and the loop completed normally: forgive 819 * Terminating and the loop completed normally: forgive
824 * uncompleted cookies 820 * uncompleted cookies
825 */ 821 */
826 sh_chan->completed_cookie = sh_chan->common.cookie; 822 sh_chan->common.completed_cookie = sh_chan->common.cookie;
827 823
828 spin_unlock_irqrestore(&sh_chan->desc_lock, flags); 824 spin_unlock_irqrestore(&sh_chan->desc_lock, flags);
829 825
@@ -883,23 +879,14 @@ static enum dma_status sh_dmae_tx_status(struct dma_chan *chan,
883 struct dma_tx_state *txstate) 879 struct dma_tx_state *txstate)
884{ 880{
885 struct sh_dmae_chan *sh_chan = to_sh_chan(chan); 881 struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
886 dma_cookie_t last_used;
887 dma_cookie_t last_complete;
888 enum dma_status status; 882 enum dma_status status;
889 unsigned long flags; 883 unsigned long flags;
890 884
891 sh_dmae_chan_ld_cleanup(sh_chan, false); 885 sh_dmae_chan_ld_cleanup(sh_chan, false);
892 886
893 /* First read completed cookie to avoid a skew */
894 last_complete = sh_chan->completed_cookie;
895 rmb();
896 last_used = chan->cookie;
897 BUG_ON(last_complete < 0);
898 dma_set_tx_state(txstate, last_complete, last_used, 0);
899
900 spin_lock_irqsave(&sh_chan->desc_lock, flags); 887 spin_lock_irqsave(&sh_chan->desc_lock, flags);
901 888
902 status = dma_async_is_complete(cookie, last_complete, last_used); 889 status = dma_cookie_status(chan, cookie, txstate);
903 890
904 /* 891 /*
905 * If we don't find cookie on the queue, it has been aborted and we have 892 * If we don't find cookie on the queue, it has been aborted and we have
@@ -1102,6 +1089,7 @@ static int __devinit sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id,
1102 1089
1103 /* reference struct dma_device */ 1090 /* reference struct dma_device */
1104 new_sh_chan->common.device = &shdev->common; 1091 new_sh_chan->common.device = &shdev->common;
1092 dma_cookie_init(&new_sh_chan->common);
1105 1093
1106 new_sh_chan->dev = shdev->common.dev; 1094 new_sh_chan->dev = shdev->common.dev;
1107 new_sh_chan->id = id; 1095 new_sh_chan->id = id;
diff --git a/drivers/dma/shdma.h b/drivers/dma/shdma.h
index 2b55a276dc5b..0b1d2c105f02 100644
--- a/drivers/dma/shdma.h
+++ b/drivers/dma/shdma.h
@@ -30,7 +30,6 @@ enum dmae_pm_state {
30}; 30};
31 31
32struct sh_dmae_chan { 32struct sh_dmae_chan {
33 dma_cookie_t completed_cookie; /* The maximum cookie completed */
34 spinlock_t desc_lock; /* Descriptor operation lock */ 33 spinlock_t desc_lock; /* Descriptor operation lock */
35 struct list_head ld_queue; /* Link descriptors queue */ 34 struct list_head ld_queue; /* Link descriptors queue */
36 struct list_head ld_free; /* Link descriptors free */ 35 struct list_head ld_free; /* Link descriptors free */
diff --git a/drivers/dma/sirf-dma.c b/drivers/dma/sirf-dma.c
index 2333810d1688..45ba352fb871 100644
--- a/drivers/dma/sirf-dma.c
+++ b/drivers/dma/sirf-dma.c
@@ -18,6 +18,8 @@
18#include <linux/of_platform.h> 18#include <linux/of_platform.h>
19#include <linux/sirfsoc_dma.h> 19#include <linux/sirfsoc_dma.h>
20 20
21#include "dmaengine.h"
22
21#define SIRFSOC_DMA_DESCRIPTORS 16 23#define SIRFSOC_DMA_DESCRIPTORS 16
22#define SIRFSOC_DMA_CHANNELS 16 24#define SIRFSOC_DMA_CHANNELS 16
23 25
@@ -59,7 +61,6 @@ struct sirfsoc_dma_chan {
59 struct list_head queued; 61 struct list_head queued;
60 struct list_head active; 62 struct list_head active;
61 struct list_head completed; 63 struct list_head completed;
62 dma_cookie_t completed_cookie;
63 unsigned long happened_cyclic; 64 unsigned long happened_cyclic;
64 unsigned long completed_cyclic; 65 unsigned long completed_cyclic;
65 66
@@ -208,7 +209,7 @@ static void sirfsoc_dma_process_completed(struct sirfsoc_dma *sdma)
208 /* Free descriptors */ 209 /* Free descriptors */
209 spin_lock_irqsave(&schan->lock, flags); 210 spin_lock_irqsave(&schan->lock, flags);
210 list_splice_tail_init(&list, &schan->free); 211 list_splice_tail_init(&list, &schan->free);
211 schan->completed_cookie = last_cookie; 212 schan->chan.completed_cookie = last_cookie;
212 spin_unlock_irqrestore(&schan->lock, flags); 213 spin_unlock_irqrestore(&schan->lock, flags);
213 } else { 214 } else {
214 /* for cyclic channel, desc is always in active list */ 215 /* for cyclic channel, desc is always in active list */
@@ -258,13 +259,7 @@ static dma_cookie_t sirfsoc_dma_tx_submit(struct dma_async_tx_descriptor *txd)
258 /* Move descriptor to queue */ 259 /* Move descriptor to queue */
259 list_move_tail(&sdesc->node, &schan->queued); 260 list_move_tail(&sdesc->node, &schan->queued);
260 261
261 /* Update cookie */ 262 cookie = dma_cookie_assign(txd);
262 cookie = schan->chan.cookie + 1;
263 if (cookie <= 0)
264 cookie = 1;
265
266 schan->chan.cookie = cookie;
267 sdesc->desc.cookie = cookie;
268 263
269 spin_unlock_irqrestore(&schan->lock, flags); 264 spin_unlock_irqrestore(&schan->lock, flags);
270 265
@@ -414,16 +409,13 @@ sirfsoc_dma_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
414{ 409{
415 struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan); 410 struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
416 unsigned long flags; 411 unsigned long flags;
417 dma_cookie_t last_used; 412 enum dma_status ret;
418 dma_cookie_t last_complete;
419 413
420 spin_lock_irqsave(&schan->lock, flags); 414 spin_lock_irqsave(&schan->lock, flags);
421 last_used = schan->chan.cookie; 415 ret = dma_cookie_status(chan, cookie, txstate);
422 last_complete = schan->completed_cookie;
423 spin_unlock_irqrestore(&schan->lock, flags); 416 spin_unlock_irqrestore(&schan->lock, flags);
424 417
425 dma_set_tx_state(txstate, last_complete, last_used, 0); 418 return ret;
426 return dma_async_is_complete(cookie, last_complete, last_used);
427} 419}
428 420
429static struct dma_async_tx_descriptor *sirfsoc_dma_prep_interleaved( 421static struct dma_async_tx_descriptor *sirfsoc_dma_prep_interleaved(
@@ -635,8 +627,7 @@ static int __devinit sirfsoc_dma_probe(struct platform_device *op)
635 schan = &sdma->channels[i]; 627 schan = &sdma->channels[i];
636 628
637 schan->chan.device = dma; 629 schan->chan.device = dma;
638 schan->chan.cookie = 1; 630 dma_cookie_init(&schan->chan);
639 schan->completed_cookie = schan->chan.cookie;
640 631
641 INIT_LIST_HEAD(&schan->free); 632 INIT_LIST_HEAD(&schan->free);
642 INIT_LIST_HEAD(&schan->prepared); 633 INIT_LIST_HEAD(&schan->prepared);
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index cc5ecbc067a3..1ea6d02d08ab 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -21,6 +21,7 @@
21 21
22#include <plat/ste_dma40.h> 22#include <plat/ste_dma40.h>
23 23
24#include "dmaengine.h"
24#include "ste_dma40_ll.h" 25#include "ste_dma40_ll.h"
25 26
26#define D40_NAME "dma40" 27#define D40_NAME "dma40"
@@ -220,8 +221,6 @@ struct d40_base;
220 * 221 *
221 * @lock: A spinlock to protect this struct. 222 * @lock: A spinlock to protect this struct.
222 * @log_num: The logical number, if any of this channel. 223 * @log_num: The logical number, if any of this channel.
223 * @completed: Starts with 1, after first interrupt it is set to dma engine's
224 * current cookie.
225 * @pending_tx: The number of pending transfers. Used between interrupt handler 224 * @pending_tx: The number of pending transfers. Used between interrupt handler
226 * and tasklet. 225 * and tasklet.
227 * @busy: Set to true when transfer is ongoing on this channel. 226 * @busy: Set to true when transfer is ongoing on this channel.
@@ -250,8 +249,6 @@ struct d40_base;
250struct d40_chan { 249struct d40_chan {
251 spinlock_t lock; 250 spinlock_t lock;
252 int log_num; 251 int log_num;
253 /* ID of the most recent completed transfer */
254 int completed;
255 int pending_tx; 252 int pending_tx;
256 bool busy; 253 bool busy;
257 struct d40_phy_res *phy_chan; 254 struct d40_phy_res *phy_chan;
@@ -1223,21 +1220,14 @@ static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx)
1223 chan); 1220 chan);
1224 struct d40_desc *d40d = container_of(tx, struct d40_desc, txd); 1221 struct d40_desc *d40d = container_of(tx, struct d40_desc, txd);
1225 unsigned long flags; 1222 unsigned long flags;
1223 dma_cookie_t cookie;
1226 1224
1227 spin_lock_irqsave(&d40c->lock, flags); 1225 spin_lock_irqsave(&d40c->lock, flags);
1228 1226 cookie = dma_cookie_assign(tx);
1229 d40c->chan.cookie++;
1230
1231 if (d40c->chan.cookie < 0)
1232 d40c->chan.cookie = 1;
1233
1234 d40d->txd.cookie = d40c->chan.cookie;
1235
1236 d40_desc_queue(d40c, d40d); 1227 d40_desc_queue(d40c, d40d);
1237
1238 spin_unlock_irqrestore(&d40c->lock, flags); 1228 spin_unlock_irqrestore(&d40c->lock, flags);
1239 1229
1240 return tx->cookie; 1230 return cookie;
1241} 1231}
1242 1232
1243static int d40_start(struct d40_chan *d40c) 1233static int d40_start(struct d40_chan *d40c)
@@ -1357,7 +1347,7 @@ static void dma_tasklet(unsigned long data)
1357 goto err; 1347 goto err;
1358 1348
1359 if (!d40d->cyclic) 1349 if (!d40d->cyclic)
1360 d40c->completed = d40d->txd.cookie; 1350 dma_cookie_complete(&d40d->txd);
1361 1351
1362 /* 1352 /*
1363 * If terminating a channel pending_tx is set to zero. 1353 * If terminating a channel pending_tx is set to zero.
@@ -2182,7 +2172,7 @@ static int d40_alloc_chan_resources(struct dma_chan *chan)
2182 bool is_free_phy; 2172 bool is_free_phy;
2183 spin_lock_irqsave(&d40c->lock, flags); 2173 spin_lock_irqsave(&d40c->lock, flags);
2184 2174
2185 d40c->completed = chan->cookie = 1; 2175 dma_cookie_init(chan);
2186 2176
2187 /* If no dma configuration is set use default configuration (memcpy) */ 2177 /* If no dma configuration is set use default configuration (memcpy) */
2188 if (!d40c->configured) { 2178 if (!d40c->configured) {
@@ -2342,25 +2332,19 @@ static enum dma_status d40_tx_status(struct dma_chan *chan,
2342 struct dma_tx_state *txstate) 2332 struct dma_tx_state *txstate)
2343{ 2333{
2344 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); 2334 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2345 dma_cookie_t last_used; 2335 enum dma_status ret;
2346 dma_cookie_t last_complete;
2347 int ret;
2348 2336
2349 if (d40c->phy_chan == NULL) { 2337 if (d40c->phy_chan == NULL) {
2350 chan_err(d40c, "Cannot read status of unallocated channel\n"); 2338 chan_err(d40c, "Cannot read status of unallocated channel\n");
2351 return -EINVAL; 2339 return -EINVAL;
2352 } 2340 }
2353 2341
2354 last_complete = d40c->completed; 2342 ret = dma_cookie_status(chan, cookie, txstate);
2355 last_used = chan->cookie; 2343 if (ret != DMA_SUCCESS)
2344 dma_set_residue(txstate, stedma40_residue(chan));
2356 2345
2357 if (d40_is_paused(d40c)) 2346 if (d40_is_paused(d40c))
2358 ret = DMA_PAUSED; 2347 ret = DMA_PAUSED;
2359 else
2360 ret = dma_async_is_complete(cookie, last_complete, last_used);
2361
2362 dma_set_tx_state(txstate, last_complete, last_used,
2363 stedma40_residue(chan));
2364 2348
2365 return ret; 2349 return ret;
2366} 2350}
diff --git a/drivers/dma/timb_dma.c b/drivers/dma/timb_dma.c
index a6f9c1684a0f..d408c2206023 100644
--- a/drivers/dma/timb_dma.c
+++ b/drivers/dma/timb_dma.c
@@ -31,6 +31,8 @@
31 31
32#include <linux/timb_dma.h> 32#include <linux/timb_dma.h>
33 33
34#include "dmaengine.h"
35
34#define DRIVER_NAME "timb-dma" 36#define DRIVER_NAME "timb-dma"
35 37
36/* Global DMA registers */ 38/* Global DMA registers */
@@ -84,7 +86,6 @@ struct timb_dma_chan {
84 especially the lists and descriptors, 86 especially the lists and descriptors,
85 from races between the tasklet and calls 87 from races between the tasklet and calls
86 from above */ 88 from above */
87 dma_cookie_t last_completed_cookie;
88 bool ongoing; 89 bool ongoing;
89 struct list_head active_list; 90 struct list_head active_list;
90 struct list_head queue; 91 struct list_head queue;
@@ -284,7 +285,7 @@ static void __td_finish(struct timb_dma_chan *td_chan)
284 else 285 else
285 iowrite32(0, td_chan->membase + TIMBDMA_OFFS_TX_DLAR); 286 iowrite32(0, td_chan->membase + TIMBDMA_OFFS_TX_DLAR);
286*/ 287*/
287 td_chan->last_completed_cookie = txd->cookie; 288 dma_cookie_complete(txd);
288 td_chan->ongoing = false; 289 td_chan->ongoing = false;
289 290
290 callback = txd->callback; 291 callback = txd->callback;
@@ -349,12 +350,7 @@ static dma_cookie_t td_tx_submit(struct dma_async_tx_descriptor *txd)
349 dma_cookie_t cookie; 350 dma_cookie_t cookie;
350 351
351 spin_lock_bh(&td_chan->lock); 352 spin_lock_bh(&td_chan->lock);
352 353 cookie = dma_cookie_assign(txd);
353 cookie = txd->chan->cookie;
354 if (++cookie < 0)
355 cookie = 1;
356 txd->chan->cookie = cookie;
357 txd->cookie = cookie;
358 354
359 if (list_empty(&td_chan->active_list)) { 355 if (list_empty(&td_chan->active_list)) {
360 dev_dbg(chan2dev(txd->chan), "%s: started %u\n", __func__, 356 dev_dbg(chan2dev(txd->chan), "%s: started %u\n", __func__,
@@ -481,8 +477,7 @@ static int td_alloc_chan_resources(struct dma_chan *chan)
481 } 477 }
482 478
483 spin_lock_bh(&td_chan->lock); 479 spin_lock_bh(&td_chan->lock);
484 td_chan->last_completed_cookie = 1; 480 dma_cookie_init(chan);
485 chan->cookie = 1;
486 spin_unlock_bh(&td_chan->lock); 481 spin_unlock_bh(&td_chan->lock);
487 482
488 return 0; 483 return 0;
@@ -515,24 +510,13 @@ static void td_free_chan_resources(struct dma_chan *chan)
515static enum dma_status td_tx_status(struct dma_chan *chan, dma_cookie_t cookie, 510static enum dma_status td_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
516 struct dma_tx_state *txstate) 511 struct dma_tx_state *txstate)
517{ 512{
518 struct timb_dma_chan *td_chan = 513 enum dma_status ret;
519 container_of(chan, struct timb_dma_chan, chan);
520 dma_cookie_t last_used;
521 dma_cookie_t last_complete;
522 int ret;
523 514
524 dev_dbg(chan2dev(chan), "%s: Entry\n", __func__); 515 dev_dbg(chan2dev(chan), "%s: Entry\n", __func__);
525 516
526 last_complete = td_chan->last_completed_cookie; 517 ret = dma_cookie_status(chan, cookie, txstate);
527 last_used = chan->cookie;
528
529 ret = dma_async_is_complete(cookie, last_complete, last_used);
530
531 dma_set_tx_state(txstate, last_complete, last_used, 0);
532 518
533 dev_dbg(chan2dev(chan), 519 dev_dbg(chan2dev(chan), "%s: exit, ret: %d\n", __func__, ret);
534 "%s: exit, ret: %d, last_complete: %d, last_used: %d\n",
535 __func__, ret, last_complete, last_used);
536 520
537 return ret; 521 return ret;
538} 522}
@@ -766,7 +750,7 @@ static int __devinit td_probe(struct platform_device *pdev)
766 } 750 }
767 751
768 td_chan->chan.device = &td->dma; 752 td_chan->chan.device = &td->dma;
769 td_chan->chan.cookie = 1; 753 dma_cookie_init(&td_chan->chan);
770 spin_lock_init(&td_chan->lock); 754 spin_lock_init(&td_chan->lock);
771 INIT_LIST_HEAD(&td_chan->active_list); 755 INIT_LIST_HEAD(&td_chan->active_list);
772 INIT_LIST_HEAD(&td_chan->queue); 756 INIT_LIST_HEAD(&td_chan->queue);
diff --git a/drivers/dma/txx9dmac.c b/drivers/dma/txx9dmac.c
index 6122c364cf11..40440f946385 100644
--- a/drivers/dma/txx9dmac.c
+++ b/drivers/dma/txx9dmac.c
@@ -15,6 +15,8 @@
15#include <linux/platform_device.h> 15#include <linux/platform_device.h>
16#include <linux/slab.h> 16#include <linux/slab.h>
17#include <linux/scatterlist.h> 17#include <linux/scatterlist.h>
18
19#include "dmaengine.h"
18#include "txx9dmac.h" 20#include "txx9dmac.h"
19 21
20static struct txx9dmac_chan *to_txx9dmac_chan(struct dma_chan *chan) 22static struct txx9dmac_chan *to_txx9dmac_chan(struct dma_chan *chan)
@@ -279,21 +281,6 @@ static void txx9dmac_desc_put(struct txx9dmac_chan *dc,
279 } 281 }
280} 282}
281 283
282/* Called with dc->lock held and bh disabled */
283static dma_cookie_t
284txx9dmac_assign_cookie(struct txx9dmac_chan *dc, struct txx9dmac_desc *desc)
285{
286 dma_cookie_t cookie = dc->chan.cookie;
287
288 if (++cookie < 0)
289 cookie = 1;
290
291 dc->chan.cookie = cookie;
292 desc->txd.cookie = cookie;
293
294 return cookie;
295}
296
297/*----------------------------------------------------------------------*/ 284/*----------------------------------------------------------------------*/
298 285
299static void txx9dmac_dump_regs(struct txx9dmac_chan *dc) 286static void txx9dmac_dump_regs(struct txx9dmac_chan *dc)
@@ -424,7 +411,7 @@ txx9dmac_descriptor_complete(struct txx9dmac_chan *dc,
424 dev_vdbg(chan2dev(&dc->chan), "descriptor %u %p complete\n", 411 dev_vdbg(chan2dev(&dc->chan), "descriptor %u %p complete\n",
425 txd->cookie, desc); 412 txd->cookie, desc);
426 413
427 dc->completed = txd->cookie; 414 dma_cookie_complete(txd);
428 callback = txd->callback; 415 callback = txd->callback;
429 param = txd->callback_param; 416 param = txd->callback_param;
430 417
@@ -738,7 +725,7 @@ static dma_cookie_t txx9dmac_tx_submit(struct dma_async_tx_descriptor *tx)
738 dma_cookie_t cookie; 725 dma_cookie_t cookie;
739 726
740 spin_lock_bh(&dc->lock); 727 spin_lock_bh(&dc->lock);
741 cookie = txx9dmac_assign_cookie(dc, desc); 728 cookie = dma_cookie_assign(tx);
742 729
743 dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u %p\n", 730 dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u %p\n",
744 desc->txd.cookie, desc); 731 desc->txd.cookie, desc);
@@ -972,27 +959,17 @@ txx9dmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
972 struct dma_tx_state *txstate) 959 struct dma_tx_state *txstate)
973{ 960{
974 struct txx9dmac_chan *dc = to_txx9dmac_chan(chan); 961 struct txx9dmac_chan *dc = to_txx9dmac_chan(chan);
975 dma_cookie_t last_used; 962 enum dma_status ret;
976 dma_cookie_t last_complete;
977 int ret;
978 963
979 last_complete = dc->completed; 964 ret = dma_cookie_status(chan, cookie, txstate);
980 last_used = chan->cookie;
981
982 ret = dma_async_is_complete(cookie, last_complete, last_used);
983 if (ret != DMA_SUCCESS) { 965 if (ret != DMA_SUCCESS) {
984 spin_lock_bh(&dc->lock); 966 spin_lock_bh(&dc->lock);
985 txx9dmac_scan_descriptors(dc); 967 txx9dmac_scan_descriptors(dc);
986 spin_unlock_bh(&dc->lock); 968 spin_unlock_bh(&dc->lock);
987 969
988 last_complete = dc->completed; 970 ret = dma_cookie_status(chan, cookie, txstate);
989 last_used = chan->cookie;
990
991 ret = dma_async_is_complete(cookie, last_complete, last_used);
992 } 971 }
993 972
994 dma_set_tx_state(txstate, last_complete, last_used, 0);
995
996 return ret; 973 return ret;
997} 974}
998 975
@@ -1057,7 +1034,7 @@ static int txx9dmac_alloc_chan_resources(struct dma_chan *chan)
1057 return -EIO; 1034 return -EIO;
1058 } 1035 }
1059 1036
1060 dc->completed = chan->cookie = 1; 1037 dma_cookie_init(chan);
1061 1038
1062 dc->ccr = TXX9_DMA_CCR_IMMCHN | TXX9_DMA_CCR_INTENE | CCR_LE; 1039 dc->ccr = TXX9_DMA_CCR_IMMCHN | TXX9_DMA_CCR_INTENE | CCR_LE;
1063 txx9dmac_chan_set_SMPCHN(dc); 1040 txx9dmac_chan_set_SMPCHN(dc);
@@ -1186,7 +1163,7 @@ static int __init txx9dmac_chan_probe(struct platform_device *pdev)
1186 dc->ddev->chan[ch] = dc; 1163 dc->ddev->chan[ch] = dc;
1187 dc->chan.device = &dc->dma; 1164 dc->chan.device = &dc->dma;
1188 list_add_tail(&dc->chan.device_node, &dc->chan.device->channels); 1165 list_add_tail(&dc->chan.device_node, &dc->chan.device->channels);
1189 dc->chan.cookie = dc->completed = 1; 1166 dma_cookie_init(&dc->chan);
1190 1167
1191 if (is_dmac64(dc)) 1168 if (is_dmac64(dc))
1192 dc->ch_regs = &__txx9dmac_regs(dc->ddev)->CHAN[ch]; 1169 dc->ch_regs = &__txx9dmac_regs(dc->ddev)->CHAN[ch];
diff --git a/drivers/dma/txx9dmac.h b/drivers/dma/txx9dmac.h
index 365d42366b9f..f5a760598882 100644
--- a/drivers/dma/txx9dmac.h
+++ b/drivers/dma/txx9dmac.h
@@ -172,7 +172,6 @@ struct txx9dmac_chan {
172 spinlock_t lock; 172 spinlock_t lock;
173 173
174 /* these other elements are all protected by lock */ 174 /* these other elements are all protected by lock */
175 dma_cookie_t completed;
176 struct list_head active_list; 175 struct list_head active_list;
177 struct list_head queue; 176 struct list_head queue;
178 struct list_head free_list; 177 struct list_head free_list;