aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/avr32/mach-at32ap/at32ap700x.c27
-rw-r--r--crypto/async_tx/async_memcpy.c12
-rw-r--r--crypto/async_tx/async_memset.c12
-rw-r--r--crypto/async_tx/async_tx.c33
-rw-r--r--crypto/async_tx/async_xor.c262
-rw-r--r--drivers/dca/dca-core.c131
-rw-r--r--drivers/dca/dca-sysfs.c3
-rw-r--r--drivers/dma/Kconfig37
-rw-r--r--drivers/dma/Makefile3
-rw-r--r--drivers/dma/dmaengine.c35
-rw-r--r--drivers/dma/dmatest.c444
-rw-r--r--drivers/dma/dw_dmac.c1122
-rw-r--r--drivers/dma/dw_dmac_regs.h225
-rw-r--r--drivers/dma/fsldma.c38
-rw-r--r--drivers/dma/ioat.c15
-rw-r--r--drivers/dma/ioat_dca.c244
-rw-r--r--drivers/dma/ioat_dma.c402
-rw-r--r--drivers/dma/ioatdma.h28
-rw-r--r--drivers/dma/ioatdma_hw.h1
-rw-r--r--drivers/dma/ioatdma_registers.h20
-rw-r--r--drivers/dma/iop-adma.c53
-rw-r--r--drivers/dma/mv_xor.c1375
-rw-r--r--drivers/dma/mv_xor.h183
-rw-r--r--include/asm-arm/arch-iop13xx/adma.h18
-rw-r--r--include/asm-arm/hardware/iop3xx-adma.h4
-rw-r--r--include/asm-arm/plat-orion/mv_xor.h28
-rw-r--r--include/asm-avr32/arch-at32ap/at32ap700x.h16
-rw-r--r--include/linux/async_tx.h11
-rw-r--r--include/linux/dca.h7
-rw-r--r--include/linux/dmaengine.h69
-rw-r--r--include/linux/dw_dmac.h62
-rw-r--r--include/linux/pci_ids.h8
-rw-r--r--net/core/user_dma.c1
33 files changed, 4590 insertions, 339 deletions
diff --git a/arch/avr32/mach-at32ap/at32ap700x.c b/arch/avr32/mach-at32ap/at32ap700x.c
index 021d51217184..604f44f5dd16 100644
--- a/arch/avr32/mach-at32ap/at32ap700x.c
+++ b/arch/avr32/mach-at32ap/at32ap700x.c
@@ -7,6 +7,7 @@
7 */ 7 */
8#include <linux/clk.h> 8#include <linux/clk.h>
9#include <linux/delay.h> 9#include <linux/delay.h>
10#include <linux/dw_dmac.h>
10#include <linux/fb.h> 11#include <linux/fb.h>
11#include <linux/init.h> 12#include <linux/init.h>
12#include <linux/platform_device.h> 13#include <linux/platform_device.h>
@@ -594,6 +595,17 @@ static void __init genclk_init_parent(struct clk *clk)
594 clk->parent = parent; 595 clk->parent = parent;
595} 596}
596 597
598static struct dw_dma_platform_data dw_dmac0_data = {
599 .nr_channels = 3,
600};
601
602static struct resource dw_dmac0_resource[] = {
603 PBMEM(0xff200000),
604 IRQ(2),
605};
606DEFINE_DEV_DATA(dw_dmac, 0);
607DEV_CLK(hclk, dw_dmac0, hsb, 10);
608
597/* -------------------------------------------------------------------- 609/* --------------------------------------------------------------------
598 * System peripherals 610 * System peripherals
599 * -------------------------------------------------------------------- */ 611 * -------------------------------------------------------------------- */
@@ -708,17 +720,6 @@ static struct clk pico_clk = {
708 .users = 1, 720 .users = 1,
709}; 721};
710 722
711static struct resource dmaca0_resource[] = {
712 {
713 .start = 0xff200000,
714 .end = 0xff20ffff,
715 .flags = IORESOURCE_MEM,
716 },
717 IRQ(2),
718};
719DEFINE_DEV(dmaca, 0);
720DEV_CLK(hclk, dmaca0, hsb, 10);
721
722/* -------------------------------------------------------------------- 723/* --------------------------------------------------------------------
723 * HMATRIX 724 * HMATRIX
724 * -------------------------------------------------------------------- */ 725 * -------------------------------------------------------------------- */
@@ -831,7 +832,7 @@ void __init at32_add_system_devices(void)
831 platform_device_register(&at32_eic0_device); 832 platform_device_register(&at32_eic0_device);
832 platform_device_register(&smc0_device); 833 platform_device_register(&smc0_device);
833 platform_device_register(&pdc_device); 834 platform_device_register(&pdc_device);
834 platform_device_register(&dmaca0_device); 835 platform_device_register(&dw_dmac0_device);
835 836
836 platform_device_register(&at32_tcb0_device); 837 platform_device_register(&at32_tcb0_device);
837 platform_device_register(&at32_tcb1_device); 838 platform_device_register(&at32_tcb1_device);
@@ -2032,7 +2033,7 @@ struct clk *at32_clock_list[] = {
2032 &smc0_mck, 2033 &smc0_mck,
2033 &pdc_hclk, 2034 &pdc_hclk,
2034 &pdc_pclk, 2035 &pdc_pclk,
2035 &dmaca0_hclk, 2036 &dw_dmac0_hclk,
2036 &pico_clk, 2037 &pico_clk,
2037 &pio0_mck, 2038 &pio0_mck,
2038 &pio1_mck, 2039 &pio1_mck,
diff --git a/crypto/async_tx/async_memcpy.c b/crypto/async_tx/async_memcpy.c
index a5eda80e8427..ddccfb01c416 100644
--- a/crypto/async_tx/async_memcpy.c
+++ b/crypto/async_tx/async_memcpy.c
@@ -73,15 +73,7 @@ async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset,
73 pr_debug("%s: (sync) len: %zu\n", __func__, len); 73 pr_debug("%s: (sync) len: %zu\n", __func__, len);
74 74
75 /* wait for any prerequisite operations */ 75 /* wait for any prerequisite operations */
76 if (depend_tx) { 76 async_tx_quiesce(&depend_tx);
77 /* if ack is already set then we cannot be sure
78 * we are referring to the correct operation
79 */
80 BUG_ON(async_tx_test_ack(depend_tx));
81 if (dma_wait_for_async_tx(depend_tx) == DMA_ERROR)
82 panic("%s: DMA_ERROR waiting for depend_tx\n",
83 __func__);
84 }
85 77
86 dest_buf = kmap_atomic(dest, KM_USER0) + dest_offset; 78 dest_buf = kmap_atomic(dest, KM_USER0) + dest_offset;
87 src_buf = kmap_atomic(src, KM_USER1) + src_offset; 79 src_buf = kmap_atomic(src, KM_USER1) + src_offset;
@@ -91,7 +83,7 @@ async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset,
91 kunmap_atomic(dest_buf, KM_USER0); 83 kunmap_atomic(dest_buf, KM_USER0);
92 kunmap_atomic(src_buf, KM_USER1); 84 kunmap_atomic(src_buf, KM_USER1);
93 85
94 async_tx_sync_epilog(flags, depend_tx, cb_fn, cb_param); 86 async_tx_sync_epilog(cb_fn, cb_param);
95 } 87 }
96 88
97 return tx; 89 return tx;
diff --git a/crypto/async_tx/async_memset.c b/crypto/async_tx/async_memset.c
index f5ff3906b035..5b5eb99bb244 100644
--- a/crypto/async_tx/async_memset.c
+++ b/crypto/async_tx/async_memset.c
@@ -72,19 +72,11 @@ async_memset(struct page *dest, int val, unsigned int offset,
72 dest_buf = (void *) (((char *) page_address(dest)) + offset); 72 dest_buf = (void *) (((char *) page_address(dest)) + offset);
73 73
74 /* wait for any prerequisite operations */ 74 /* wait for any prerequisite operations */
75 if (depend_tx) { 75 async_tx_quiesce(&depend_tx);
76 /* if ack is already set then we cannot be sure
77 * we are referring to the correct operation
78 */
79 BUG_ON(depend_tx->ack);
80 if (dma_wait_for_async_tx(depend_tx) == DMA_ERROR)
81 panic("%s: DMA_ERROR waiting for depend_tx\n",
82 __func__);
83 }
84 76
85 memset(dest_buf, val, len); 77 memset(dest_buf, val, len);
86 78
87 async_tx_sync_epilog(flags, depend_tx, cb_fn, cb_param); 79 async_tx_sync_epilog(cb_fn, cb_param);
88 } 80 }
89 81
90 return tx; 82 return tx;
diff --git a/crypto/async_tx/async_tx.c b/crypto/async_tx/async_tx.c
index 095c798d3170..85eaf7b1c531 100644
--- a/crypto/async_tx/async_tx.c
+++ b/crypto/async_tx/async_tx.c
@@ -295,7 +295,7 @@ dma_channel_add_remove(struct dma_client *client,
295 case DMA_RESOURCE_REMOVED: 295 case DMA_RESOURCE_REMOVED:
296 found = 0; 296 found = 0;
297 spin_lock_irqsave(&async_tx_lock, flags); 297 spin_lock_irqsave(&async_tx_lock, flags);
298 list_for_each_entry_rcu(ref, &async_tx_master_list, node) 298 list_for_each_entry(ref, &async_tx_master_list, node)
299 if (ref->chan == chan) { 299 if (ref->chan == chan) {
300 /* permit backing devices to go away */ 300 /* permit backing devices to go away */
301 dma_chan_put(ref->chan); 301 dma_chan_put(ref->chan);
@@ -608,23 +608,34 @@ async_trigger_callback(enum async_tx_flags flags,
608 pr_debug("%s: (sync)\n", __func__); 608 pr_debug("%s: (sync)\n", __func__);
609 609
610 /* wait for any prerequisite operations */ 610 /* wait for any prerequisite operations */
611 if (depend_tx) { 611 async_tx_quiesce(&depend_tx);
612 /* if ack is already set then we cannot be sure
613 * we are referring to the correct operation
614 */
615 BUG_ON(async_tx_test_ack(depend_tx));
616 if (dma_wait_for_async_tx(depend_tx) == DMA_ERROR)
617 panic("%s: DMA_ERROR waiting for depend_tx\n",
618 __func__);
619 }
620 612
621 async_tx_sync_epilog(flags, depend_tx, cb_fn, cb_param); 613 async_tx_sync_epilog(cb_fn, cb_param);
622 } 614 }
623 615
624 return tx; 616 return tx;
625} 617}
626EXPORT_SYMBOL_GPL(async_trigger_callback); 618EXPORT_SYMBOL_GPL(async_trigger_callback);
627 619
620/**
621 * async_tx_quiesce - ensure tx is complete and freeable upon return
622 * @tx - transaction to quiesce
623 */
624void async_tx_quiesce(struct dma_async_tx_descriptor **tx)
625{
626 if (*tx) {
627 /* if ack is already set then we cannot be sure
628 * we are referring to the correct operation
629 */
630 BUG_ON(async_tx_test_ack(*tx));
631 if (dma_wait_for_async_tx(*tx) == DMA_ERROR)
632 panic("DMA_ERROR waiting for transaction\n");
633 async_tx_ack(*tx);
634 *tx = NULL;
635 }
636}
637EXPORT_SYMBOL_GPL(async_tx_quiesce);
638
628module_init(async_tx_init); 639module_init(async_tx_init);
629module_exit(async_tx_exit); 640module_exit(async_tx_exit);
630 641
diff --git a/crypto/async_tx/async_xor.c b/crypto/async_tx/async_xor.c
index 3a0dddca5a10..65974c6d3d7a 100644
--- a/crypto/async_tx/async_xor.c
+++ b/crypto/async_tx/async_xor.c
@@ -35,74 +35,121 @@
35 * when CONFIG_DMA_ENGINE=n 35 * when CONFIG_DMA_ENGINE=n
36 */ 36 */
37static __always_inline struct dma_async_tx_descriptor * 37static __always_inline struct dma_async_tx_descriptor *
38do_async_xor(struct dma_device *device, 38do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list,
39 struct dma_chan *chan, struct page *dest, struct page **src_list, 39 unsigned int offset, int src_cnt, size_t len,
40 unsigned int offset, unsigned int src_cnt, size_t len, 40 enum async_tx_flags flags,
41 enum async_tx_flags flags, struct dma_async_tx_descriptor *depend_tx, 41 struct dma_async_tx_descriptor *depend_tx,
42 dma_async_tx_callback cb_fn, void *cb_param) 42 dma_async_tx_callback cb_fn, void *cb_param)
43{ 43{
44 dma_addr_t dma_dest; 44 struct dma_device *dma = chan->device;
45 dma_addr_t *dma_src = (dma_addr_t *) src_list; 45 dma_addr_t *dma_src = (dma_addr_t *) src_list;
46 struct dma_async_tx_descriptor *tx; 46 struct dma_async_tx_descriptor *tx = NULL;
47 int src_off = 0;
47 int i; 48 int i;
48 unsigned long dma_prep_flags = cb_fn ? DMA_PREP_INTERRUPT : 0; 49 dma_async_tx_callback _cb_fn;
49 50 void *_cb_param;
50 pr_debug("%s: len: %zu\n", __func__, len); 51 enum async_tx_flags async_flags;
51 52 enum dma_ctrl_flags dma_flags;
52 dma_dest = dma_map_page(device->dev, dest, offset, len, 53 int xor_src_cnt;
53 DMA_FROM_DEVICE); 54 dma_addr_t dma_dest;
54 55
56 dma_dest = dma_map_page(dma->dev, dest, offset, len, DMA_FROM_DEVICE);
55 for (i = 0; i < src_cnt; i++) 57 for (i = 0; i < src_cnt; i++)
56 dma_src[i] = dma_map_page(device->dev, src_list[i], offset, 58 dma_src[i] = dma_map_page(dma->dev, src_list[i], offset,
57 len, DMA_TO_DEVICE); 59 len, DMA_TO_DEVICE);
58 60
59 /* Since we have clobbered the src_list we are committed 61 while (src_cnt) {
60 * to doing this asynchronously. Drivers force forward progress 62 async_flags = flags;
61 * in case they can not provide a descriptor 63 dma_flags = 0;
62 */ 64 xor_src_cnt = min(src_cnt, dma->max_xor);
63 tx = device->device_prep_dma_xor(chan, dma_dest, dma_src, src_cnt, len, 65 /* if we are submitting additional xors, leave the chain open,
64 dma_prep_flags); 66 * clear the callback parameters, and leave the destination
65 if (!tx) { 67 * buffer mapped
66 if (depend_tx) 68 */
67 dma_wait_for_async_tx(depend_tx); 69 if (src_cnt > xor_src_cnt) {
68 70 async_flags &= ~ASYNC_TX_ACK;
69 while (!tx) 71 dma_flags = DMA_COMPL_SKIP_DEST_UNMAP;
70 tx = device->device_prep_dma_xor(chan, dma_dest, 72 _cb_fn = NULL;
71 dma_src, src_cnt, len, 73 _cb_param = NULL;
72 dma_prep_flags); 74 } else {
73 } 75 _cb_fn = cb_fn;
76 _cb_param = cb_param;
77 }
78 if (_cb_fn)
79 dma_flags |= DMA_PREP_INTERRUPT;
74 80
75 async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param); 81 /* Since we have clobbered the src_list we are committed
82 * to doing this asynchronously. Drivers force forward progress
83 * in case they can not provide a descriptor
84 */
85 tx = dma->device_prep_dma_xor(chan, dma_dest, &dma_src[src_off],
86 xor_src_cnt, len, dma_flags);
87
88 if (unlikely(!tx))
89 async_tx_quiesce(&depend_tx);
90
91 /* spin wait for the preceeding transactions to complete */
92 while (unlikely(!tx)) {
93 dma_async_issue_pending(chan);
94 tx = dma->device_prep_dma_xor(chan, dma_dest,
95 &dma_src[src_off],
96 xor_src_cnt, len,
97 dma_flags);
98 }
99
100 async_tx_submit(chan, tx, async_flags, depend_tx, _cb_fn,
101 _cb_param);
102
103 depend_tx = tx;
104 flags |= ASYNC_TX_DEP_ACK;
105
106 if (src_cnt > xor_src_cnt) {
107 /* drop completed sources */
108 src_cnt -= xor_src_cnt;
109 src_off += xor_src_cnt;
110
111 /* use the intermediate result a source */
112 dma_src[--src_off] = dma_dest;
113 src_cnt++;
114 } else
115 break;
116 }
76 117
77 return tx; 118 return tx;
78} 119}
79 120
80static void 121static void
81do_sync_xor(struct page *dest, struct page **src_list, unsigned int offset, 122do_sync_xor(struct page *dest, struct page **src_list, unsigned int offset,
82 unsigned int src_cnt, size_t len, enum async_tx_flags flags, 123 int src_cnt, size_t len, enum async_tx_flags flags,
83 struct dma_async_tx_descriptor *depend_tx, 124 dma_async_tx_callback cb_fn, void *cb_param)
84 dma_async_tx_callback cb_fn, void *cb_param)
85{ 125{
86 void *_dest;
87 int i; 126 int i;
88 127 int xor_src_cnt;
89 pr_debug("%s: len: %zu\n", __func__, len); 128 int src_off = 0;
129 void *dest_buf;
130 void **srcs = (void **) src_list;
90 131
91 /* reuse the 'src_list' array to convert to buffer pointers */ 132 /* reuse the 'src_list' array to convert to buffer pointers */
92 for (i = 0; i < src_cnt; i++) 133 for (i = 0; i < src_cnt; i++)
93 src_list[i] = (struct page *) 134 srcs[i] = page_address(src_list[i]) + offset;
94 (page_address(src_list[i]) + offset);
95 135
96 /* set destination address */ 136 /* set destination address */
97 _dest = page_address(dest) + offset; 137 dest_buf = page_address(dest) + offset;
98 138
99 if (flags & ASYNC_TX_XOR_ZERO_DST) 139 if (flags & ASYNC_TX_XOR_ZERO_DST)
100 memset(_dest, 0, len); 140 memset(dest_buf, 0, len);
101 141
102 xor_blocks(src_cnt, len, _dest, 142 while (src_cnt > 0) {
103 (void **) src_list); 143 /* process up to 'MAX_XOR_BLOCKS' sources */
144 xor_src_cnt = min(src_cnt, MAX_XOR_BLOCKS);
145 xor_blocks(xor_src_cnt, len, dest_buf, &srcs[src_off]);
104 146
105 async_tx_sync_epilog(flags, depend_tx, cb_fn, cb_param); 147 /* drop completed sources */
148 src_cnt -= xor_src_cnt;
149 src_off += xor_src_cnt;
150 }
151
152 async_tx_sync_epilog(cb_fn, cb_param);
106} 153}
107 154
108/** 155/**
@@ -132,106 +179,34 @@ async_xor(struct page *dest, struct page **src_list, unsigned int offset,
132 struct dma_chan *chan = async_tx_find_channel(depend_tx, DMA_XOR, 179 struct dma_chan *chan = async_tx_find_channel(depend_tx, DMA_XOR,
133 &dest, 1, src_list, 180 &dest, 1, src_list,
134 src_cnt, len); 181 src_cnt, len);
135 struct dma_device *device = chan ? chan->device : NULL;
136 struct dma_async_tx_descriptor *tx = NULL;
137 dma_async_tx_callback _cb_fn;
138 void *_cb_param;
139 unsigned long local_flags;
140 int xor_src_cnt;
141 int i = 0, src_off = 0;
142
143 BUG_ON(src_cnt <= 1); 182 BUG_ON(src_cnt <= 1);
144 183
145 while (src_cnt) { 184 if (chan) {
146 local_flags = flags; 185 /* run the xor asynchronously */
147 if (device) { /* run the xor asynchronously */ 186 pr_debug("%s (async): len: %zu\n", __func__, len);
148 xor_src_cnt = min(src_cnt, device->max_xor);
149 /* if we are submitting additional xors
150 * only set the callback on the last transaction
151 */
152 if (src_cnt > xor_src_cnt) {
153 local_flags &= ~ASYNC_TX_ACK;
154 _cb_fn = NULL;
155 _cb_param = NULL;
156 } else {
157 _cb_fn = cb_fn;
158 _cb_param = cb_param;
159 }
160
161 tx = do_async_xor(device, chan, dest,
162 &src_list[src_off], offset,
163 xor_src_cnt, len, local_flags,
164 depend_tx, _cb_fn, _cb_param);
165 } else { /* run the xor synchronously */
166 /* in the sync case the dest is an implied source
167 * (assumes the dest is at the src_off index)
168 */
169 if (flags & ASYNC_TX_XOR_DROP_DST) {
170 src_cnt--;
171 src_off++;
172 }
173
174 /* process up to 'MAX_XOR_BLOCKS' sources */
175 xor_src_cnt = min(src_cnt, MAX_XOR_BLOCKS);
176
177 /* if we are submitting additional xors
178 * only set the callback on the last transaction
179 */
180 if (src_cnt > xor_src_cnt) {
181 local_flags &= ~ASYNC_TX_ACK;
182 _cb_fn = NULL;
183 _cb_param = NULL;
184 } else {
185 _cb_fn = cb_fn;
186 _cb_param = cb_param;
187 }
188
189 /* wait for any prerequisite operations */
190 if (depend_tx) {
191 /* if ack is already set then we cannot be sure
192 * we are referring to the correct operation
193 */
194 BUG_ON(async_tx_test_ack(depend_tx));
195 if (dma_wait_for_async_tx(depend_tx) ==
196 DMA_ERROR)
197 panic("%s: DMA_ERROR waiting for "
198 "depend_tx\n",
199 __func__);
200 }
201
202 do_sync_xor(dest, &src_list[src_off], offset,
203 xor_src_cnt, len, local_flags, depend_tx,
204 _cb_fn, _cb_param);
205 }
206 187
207 /* the previous tx is hidden from the client, 188 return do_async_xor(chan, dest, src_list, offset, src_cnt, len,
208 * so ack it 189 flags, depend_tx, cb_fn, cb_param);
209 */ 190 } else {
210 if (i && depend_tx) 191 /* run the xor synchronously */
211 async_tx_ack(depend_tx); 192 pr_debug("%s (sync): len: %zu\n", __func__, len);
212 193
213 depend_tx = tx; 194 /* in the sync case the dest is an implied source
195 * (assumes the dest is the first source)
196 */
197 if (flags & ASYNC_TX_XOR_DROP_DST) {
198 src_cnt--;
199 src_list++;
200 }
214 201
215 if (src_cnt > xor_src_cnt) { 202 /* wait for any prerequisite operations */
216 /* drop completed sources */ 203 async_tx_quiesce(&depend_tx);
217 src_cnt -= xor_src_cnt;
218 src_off += xor_src_cnt;
219 204
220 /* unconditionally preserve the destination */ 205 do_sync_xor(dest, src_list, offset, src_cnt, len,
221 flags &= ~ASYNC_TX_XOR_ZERO_DST; 206 flags, cb_fn, cb_param);
222 207
223 /* use the intermediate result a source, but remember 208 return NULL;
224 * it's dropped, because it's implied, in the sync case
225 */
226 src_list[--src_off] = dest;
227 src_cnt++;
228 flags |= ASYNC_TX_XOR_DROP_DST;
229 } else
230 src_cnt = 0;
231 i++;
232 } 209 }
233
234 return tx;
235} 210}
236EXPORT_SYMBOL_GPL(async_xor); 211EXPORT_SYMBOL_GPL(async_xor);
237 212
@@ -285,11 +260,11 @@ async_xor_zero_sum(struct page *dest, struct page **src_list,
285 tx = device->device_prep_dma_zero_sum(chan, dma_src, src_cnt, 260 tx = device->device_prep_dma_zero_sum(chan, dma_src, src_cnt,
286 len, result, 261 len, result,
287 dma_prep_flags); 262 dma_prep_flags);
288 if (!tx) { 263 if (unlikely(!tx)) {
289 if (depend_tx) 264 async_tx_quiesce(&depend_tx);
290 dma_wait_for_async_tx(depend_tx);
291 265
292 while (!tx) 266 while (!tx)
267 dma_async_issue_pending(chan);
293 tx = device->device_prep_dma_zero_sum(chan, 268 tx = device->device_prep_dma_zero_sum(chan,
294 dma_src, src_cnt, len, result, 269 dma_src, src_cnt, len, result,
295 dma_prep_flags); 270 dma_prep_flags);
@@ -307,18 +282,11 @@ async_xor_zero_sum(struct page *dest, struct page **src_list,
307 tx = async_xor(dest, src_list, offset, src_cnt, len, xor_flags, 282 tx = async_xor(dest, src_list, offset, src_cnt, len, xor_flags,
308 depend_tx, NULL, NULL); 283 depend_tx, NULL, NULL);
309 284
310 if (tx) { 285 async_tx_quiesce(&tx);
311 if (dma_wait_for_async_tx(tx) == DMA_ERROR)
312 panic("%s: DMA_ERROR waiting for tx\n",
313 __func__);
314 async_tx_ack(tx);
315 }
316 286
317 *result = page_is_zero(dest, offset, len) ? 0 : 1; 287 *result = page_is_zero(dest, offset, len) ? 0 : 1;
318 288
319 tx = NULL; 289 async_tx_sync_epilog(cb_fn, cb_param);
320
321 async_tx_sync_epilog(flags, depend_tx, cb_fn, cb_param);
322 } 290 }
323 291
324 return tx; 292 return tx;
diff --git a/drivers/dca/dca-core.c b/drivers/dca/dca-core.c
index bf5b92f86df7..ec249d2db633 100644
--- a/drivers/dca/dca-core.c
+++ b/drivers/dca/dca-core.c
@@ -28,13 +28,29 @@
28#include <linux/device.h> 28#include <linux/device.h>
29#include <linux/dca.h> 29#include <linux/dca.h>
30 30
31MODULE_LICENSE("GPL"); 31#define DCA_VERSION "1.4"
32 32
33/* For now we're assuming a single, global, DCA provider for the system. */ 33MODULE_VERSION(DCA_VERSION);
34MODULE_LICENSE("GPL");
35MODULE_AUTHOR("Intel Corporation");
34 36
35static DEFINE_SPINLOCK(dca_lock); 37static DEFINE_SPINLOCK(dca_lock);
36 38
37static struct dca_provider *global_dca = NULL; 39static LIST_HEAD(dca_providers);
40
41static struct dca_provider *dca_find_provider_by_dev(struct device *dev)
42{
43 struct dca_provider *dca, *ret = NULL;
44
45 list_for_each_entry(dca, &dca_providers, node) {
46 if ((!dev) || (dca->ops->dev_managed(dca, dev))) {
47 ret = dca;
48 break;
49 }
50 }
51
52 return ret;
53}
38 54
39/** 55/**
40 * dca_add_requester - add a dca client to the list 56 * dca_add_requester - add a dca client to the list
@@ -42,25 +58,39 @@ static struct dca_provider *global_dca = NULL;
42 */ 58 */
43int dca_add_requester(struct device *dev) 59int dca_add_requester(struct device *dev)
44{ 60{
45 int err, slot; 61 struct dca_provider *dca;
62 int err, slot = -ENODEV;
46 63
47 if (!global_dca) 64 if (!dev)
48 return -ENODEV; 65 return -EFAULT;
49 66
50 spin_lock(&dca_lock); 67 spin_lock(&dca_lock);
51 slot = global_dca->ops->add_requester(global_dca, dev); 68
52 spin_unlock(&dca_lock); 69 /* check if the requester has not been added already */
53 if (slot < 0) 70 dca = dca_find_provider_by_dev(dev);
71 if (dca) {
72 spin_unlock(&dca_lock);
73 return -EEXIST;
74 }
75
76 list_for_each_entry(dca, &dca_providers, node) {
77 slot = dca->ops->add_requester(dca, dev);
78 if (slot >= 0)
79 break;
80 }
81 if (slot < 0) {
82 spin_unlock(&dca_lock);
54 return slot; 83 return slot;
84 }
55 85
56 err = dca_sysfs_add_req(global_dca, dev, slot); 86 err = dca_sysfs_add_req(dca, dev, slot);
57 if (err) { 87 if (err) {
58 spin_lock(&dca_lock); 88 dca->ops->remove_requester(dca, dev);
59 global_dca->ops->remove_requester(global_dca, dev);
60 spin_unlock(&dca_lock); 89 spin_unlock(&dca_lock);
61 return err; 90 return err;
62 } 91 }
63 92
93 spin_unlock(&dca_lock);
64 return 0; 94 return 0;
65} 95}
66EXPORT_SYMBOL_GPL(dca_add_requester); 96EXPORT_SYMBOL_GPL(dca_add_requester);
@@ -71,30 +101,78 @@ EXPORT_SYMBOL_GPL(dca_add_requester);
71 */ 101 */
72int dca_remove_requester(struct device *dev) 102int dca_remove_requester(struct device *dev)
73{ 103{
104 struct dca_provider *dca;
74 int slot; 105 int slot;
75 if (!global_dca) 106
76 return -ENODEV; 107 if (!dev)
108 return -EFAULT;
77 109
78 spin_lock(&dca_lock); 110 spin_lock(&dca_lock);
79 slot = global_dca->ops->remove_requester(global_dca, dev); 111 dca = dca_find_provider_by_dev(dev);
80 spin_unlock(&dca_lock); 112 if (!dca) {
81 if (slot < 0) 113 spin_unlock(&dca_lock);
114 return -ENODEV;
115 }
116 slot = dca->ops->remove_requester(dca, dev);
117 if (slot < 0) {
118 spin_unlock(&dca_lock);
82 return slot; 119 return slot;
120 }
83 121
84 dca_sysfs_remove_req(global_dca, slot); 122 dca_sysfs_remove_req(dca, slot);
123
124 spin_unlock(&dca_lock);
85 return 0; 125 return 0;
86} 126}
87EXPORT_SYMBOL_GPL(dca_remove_requester); 127EXPORT_SYMBOL_GPL(dca_remove_requester);
88 128
89/** 129/**
90 * dca_get_tag - return the dca tag for the given cpu 130 * dca_common_get_tag - return the dca tag (serves both new and old api)
131 * @dev - the device that wants dca service
91 * @cpu - the cpuid as returned by get_cpu() 132 * @cpu - the cpuid as returned by get_cpu()
92 */ 133 */
93u8 dca_get_tag(int cpu) 134u8 dca_common_get_tag(struct device *dev, int cpu)
94{ 135{
95 if (!global_dca) 136 struct dca_provider *dca;
137 u8 tag;
138
139 spin_lock(&dca_lock);
140
141 dca = dca_find_provider_by_dev(dev);
142 if (!dca) {
143 spin_unlock(&dca_lock);
96 return -ENODEV; 144 return -ENODEV;
97 return global_dca->ops->get_tag(global_dca, cpu); 145 }
146 tag = dca->ops->get_tag(dca, dev, cpu);
147
148 spin_unlock(&dca_lock);
149 return tag;
150}
151
152/**
153 * dca3_get_tag - return the dca tag to the requester device
154 * for the given cpu (new api)
155 * @dev - the device that wants dca service
156 * @cpu - the cpuid as returned by get_cpu()
157 */
158u8 dca3_get_tag(struct device *dev, int cpu)
159{
160 if (!dev)
161 return -EFAULT;
162
163 return dca_common_get_tag(dev, cpu);
164}
165EXPORT_SYMBOL_GPL(dca3_get_tag);
166
167/**
168 * dca_get_tag - return the dca tag for the given cpu (old api)
169 * @cpu - the cpuid as returned by get_cpu()
170 */
171u8 dca_get_tag(int cpu)
172{
173 struct device *dev = NULL;
174
175 return dca_common_get_tag(dev, cpu);
98} 176}
99EXPORT_SYMBOL_GPL(dca_get_tag); 177EXPORT_SYMBOL_GPL(dca_get_tag);
100 178
@@ -140,12 +218,10 @@ int register_dca_provider(struct dca_provider *dca, struct device *dev)
140{ 218{
141 int err; 219 int err;
142 220
143 if (global_dca)
144 return -EEXIST;
145 err = dca_sysfs_add_provider(dca, dev); 221 err = dca_sysfs_add_provider(dca, dev);
146 if (err) 222 if (err)
147 return err; 223 return err;
148 global_dca = dca; 224 list_add(&dca->node, &dca_providers);
149 blocking_notifier_call_chain(&dca_provider_chain, 225 blocking_notifier_call_chain(&dca_provider_chain,
150 DCA_PROVIDER_ADD, NULL); 226 DCA_PROVIDER_ADD, NULL);
151 return 0; 227 return 0;
@@ -158,11 +234,9 @@ EXPORT_SYMBOL_GPL(register_dca_provider);
158 */ 234 */
159void unregister_dca_provider(struct dca_provider *dca) 235void unregister_dca_provider(struct dca_provider *dca)
160{ 236{
161 if (!global_dca)
162 return;
163 blocking_notifier_call_chain(&dca_provider_chain, 237 blocking_notifier_call_chain(&dca_provider_chain,
164 DCA_PROVIDER_REMOVE, NULL); 238 DCA_PROVIDER_REMOVE, NULL);
165 global_dca = NULL; 239 list_del(&dca->node);
166 dca_sysfs_remove_provider(dca); 240 dca_sysfs_remove_provider(dca);
167} 241}
168EXPORT_SYMBOL_GPL(unregister_dca_provider); 242EXPORT_SYMBOL_GPL(unregister_dca_provider);
@@ -187,6 +261,7 @@ EXPORT_SYMBOL_GPL(dca_unregister_notify);
187 261
188static int __init dca_init(void) 262static int __init dca_init(void)
189{ 263{
264 printk(KERN_ERR "dca service started, version %s\n", DCA_VERSION);
190 return dca_sysfs_init(); 265 return dca_sysfs_init();
191} 266}
192 267
diff --git a/drivers/dca/dca-sysfs.c b/drivers/dca/dca-sysfs.c
index 9a70377bfb34..7af4b403bd2d 100644
--- a/drivers/dca/dca-sysfs.c
+++ b/drivers/dca/dca-sysfs.c
@@ -13,10 +13,11 @@ static spinlock_t dca_idr_lock;
13int dca_sysfs_add_req(struct dca_provider *dca, struct device *dev, int slot) 13int dca_sysfs_add_req(struct dca_provider *dca, struct device *dev, int slot)
14{ 14{
15 struct device *cd; 15 struct device *cd;
16 static int req_count;
16 17
17 cd = device_create_drvdata(dca_class, dca->cd, 18 cd = device_create_drvdata(dca_class, dca->cd,
18 MKDEV(0, slot + 1), NULL, 19 MKDEV(0, slot + 1), NULL,
19 "requester%d", slot); 20 "requester%d", req_count++);
20 if (IS_ERR(cd)) 21 if (IS_ERR(cd))
21 return PTR_ERR(cd); 22 return PTR_ERR(cd);
22 return 0; 23 return 0;
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 6239c3df30ac..cd303901eb5b 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -4,13 +4,14 @@
4 4
5menuconfig DMADEVICES 5menuconfig DMADEVICES
6 bool "DMA Engine support" 6 bool "DMA Engine support"
7 depends on (PCI && X86) || ARCH_IOP32X || ARCH_IOP33X || ARCH_IOP13XX || PPC 7 depends on !HIGHMEM64G && HAS_DMA
8 depends on !HIGHMEM64G
9 help 8 help
10 DMA engines can do asynchronous data transfers without 9 DMA engines can do asynchronous data transfers without
11 involving the host CPU. Currently, this framework can be 10 involving the host CPU. Currently, this framework can be
12 used to offload memory copies in the network stack and 11 used to offload memory copies in the network stack and
13 RAID operations in the MD driver. 12 RAID operations in the MD driver. This menu only presents
13 DMA Device drivers supported by the configured arch, it may
14 be empty in some cases.
14 15
15if DMADEVICES 16if DMADEVICES
16 17
@@ -37,6 +38,15 @@ config INTEL_IOP_ADMA
37 help 38 help
38 Enable support for the Intel(R) IOP Series RAID engines. 39 Enable support for the Intel(R) IOP Series RAID engines.
39 40
41config DW_DMAC
42 tristate "Synopsys DesignWare AHB DMA support"
43 depends on AVR32
44 select DMA_ENGINE
45 default y if CPU_AT32AP7000
46 help
47 Support the Synopsys DesignWare AHB DMA controller. This
48 can be integrated in chips such as the Atmel AT32ap7000.
49
40config FSL_DMA 50config FSL_DMA
41 bool "Freescale MPC85xx/MPC83xx DMA support" 51 bool "Freescale MPC85xx/MPC83xx DMA support"
42 depends on PPC 52 depends on PPC
@@ -46,6 +56,14 @@ config FSL_DMA
46 MPC8560/40, MPC8555, MPC8548 and MPC8641 processors. 56 MPC8560/40, MPC8555, MPC8548 and MPC8641 processors.
47 The MPC8349, MPC8360 is also supported. 57 The MPC8349, MPC8360 is also supported.
48 58
59config MV_XOR
60 bool "Marvell XOR engine support"
61 depends on PLAT_ORION
62 select ASYNC_CORE
63 select DMA_ENGINE
64 ---help---
65 Enable support for the Marvell XOR engine.
66
49config DMA_ENGINE 67config DMA_ENGINE
50 bool 68 bool
51 69
@@ -55,10 +73,19 @@ comment "DMA Clients"
55config NET_DMA 73config NET_DMA
56 bool "Network: TCP receive copy offload" 74 bool "Network: TCP receive copy offload"
57 depends on DMA_ENGINE && NET 75 depends on DMA_ENGINE && NET
76 default (INTEL_IOATDMA || FSL_DMA)
58 help 77 help
59 This enables the use of DMA engines in the network stack to 78 This enables the use of DMA engines in the network stack to
60 offload receive copy-to-user operations, freeing CPU cycles. 79 offload receive copy-to-user operations, freeing CPU cycles.
61 Since this is the main user of the DMA engine, it should be enabled; 80
62 say Y here. 81 Say Y here if you enabled INTEL_IOATDMA or FSL_DMA, otherwise
82 say N.
83
84config DMATEST
85 tristate "DMA Test client"
86 depends on DMA_ENGINE
87 help
88 Simple DMA test client. Say N unless you're debugging a
89 DMA Device driver.
63 90
64endif 91endif
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index c8036d945902..14f59527d4f6 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -1,6 +1,9 @@
1obj-$(CONFIG_DMA_ENGINE) += dmaengine.o 1obj-$(CONFIG_DMA_ENGINE) += dmaengine.o
2obj-$(CONFIG_NET_DMA) += iovlock.o 2obj-$(CONFIG_NET_DMA) += iovlock.o
3obj-$(CONFIG_DMATEST) += dmatest.o
3obj-$(CONFIG_INTEL_IOATDMA) += ioatdma.o 4obj-$(CONFIG_INTEL_IOATDMA) += ioatdma.o
4ioatdma-objs := ioat.o ioat_dma.o ioat_dca.o 5ioatdma-objs := ioat.o ioat_dma.o ioat_dca.o
5obj-$(CONFIG_INTEL_IOP_ADMA) += iop-adma.o 6obj-$(CONFIG_INTEL_IOP_ADMA) += iop-adma.o
6obj-$(CONFIG_FSL_DMA) += fsldma.o 7obj-$(CONFIG_FSL_DMA) += fsldma.o
8obj-$(CONFIG_MV_XOR) += mv_xor.o
9obj-$(CONFIG_DW_DMAC) += dw_dmac.o
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 97b329e76798..dc003a3a787d 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -169,12 +169,18 @@ static void dma_client_chan_alloc(struct dma_client *client)
169 enum dma_state_client ack; 169 enum dma_state_client ack;
170 170
171 /* Find a channel */ 171 /* Find a channel */
172 list_for_each_entry(device, &dma_device_list, global_node) 172 list_for_each_entry(device, &dma_device_list, global_node) {
173 /* Does the client require a specific DMA controller? */
174 if (client->slave && client->slave->dma_dev
175 && client->slave->dma_dev != device->dev)
176 continue;
177
173 list_for_each_entry(chan, &device->channels, device_node) { 178 list_for_each_entry(chan, &device->channels, device_node) {
174 if (!dma_chan_satisfies_mask(chan, client->cap_mask)) 179 if (!dma_chan_satisfies_mask(chan, client->cap_mask))
175 continue; 180 continue;
176 181
177 desc = chan->device->device_alloc_chan_resources(chan); 182 desc = chan->device->device_alloc_chan_resources(
183 chan, client);
178 if (desc >= 0) { 184 if (desc >= 0) {
179 ack = client->event_callback(client, 185 ack = client->event_callback(client,
180 chan, 186 chan,
@@ -183,12 +189,14 @@ static void dma_client_chan_alloc(struct dma_client *client)
183 /* we are done once this client rejects 189 /* we are done once this client rejects
184 * an available resource 190 * an available resource
185 */ 191 */
186 if (ack == DMA_ACK) 192 if (ack == DMA_ACK) {
187 dma_chan_get(chan); 193 dma_chan_get(chan);
188 else if (ack == DMA_NAK) 194 chan->client_count++;
195 } else if (ack == DMA_NAK)
189 return; 196 return;
190 } 197 }
191 } 198 }
199 }
192} 200}
193 201
194enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie) 202enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
@@ -272,8 +280,10 @@ static void dma_clients_notify_removed(struct dma_chan *chan)
272 /* client was holding resources for this channel so 280 /* client was holding resources for this channel so
273 * free it 281 * free it
274 */ 282 */
275 if (ack == DMA_ACK) 283 if (ack == DMA_ACK) {
276 dma_chan_put(chan); 284 dma_chan_put(chan);
285 chan->client_count--;
286 }
277 } 287 }
278 288
279 mutex_unlock(&dma_list_mutex); 289 mutex_unlock(&dma_list_mutex);
@@ -285,6 +295,10 @@ static void dma_clients_notify_removed(struct dma_chan *chan)
285 */ 295 */
286void dma_async_client_register(struct dma_client *client) 296void dma_async_client_register(struct dma_client *client)
287{ 297{
298 /* validate client data */
299 BUG_ON(dma_has_cap(DMA_SLAVE, client->cap_mask) &&
300 !client->slave);
301
288 mutex_lock(&dma_list_mutex); 302 mutex_lock(&dma_list_mutex);
289 list_add_tail(&client->global_node, &dma_client_list); 303 list_add_tail(&client->global_node, &dma_client_list);
290 mutex_unlock(&dma_list_mutex); 304 mutex_unlock(&dma_list_mutex);
@@ -313,8 +327,10 @@ void dma_async_client_unregister(struct dma_client *client)
313 ack = client->event_callback(client, chan, 327 ack = client->event_callback(client, chan,
314 DMA_RESOURCE_REMOVED); 328 DMA_RESOURCE_REMOVED);
315 329
316 if (ack == DMA_ACK) 330 if (ack == DMA_ACK) {
317 dma_chan_put(chan); 331 dma_chan_put(chan);
332 chan->client_count--;
333 }
318 } 334 }
319 335
320 list_del(&client->global_node); 336 list_del(&client->global_node);
@@ -359,6 +375,10 @@ int dma_async_device_register(struct dma_device *device)
359 !device->device_prep_dma_memset); 375 !device->device_prep_dma_memset);
360 BUG_ON(dma_has_cap(DMA_INTERRUPT, device->cap_mask) && 376 BUG_ON(dma_has_cap(DMA_INTERRUPT, device->cap_mask) &&
361 !device->device_prep_dma_interrupt); 377 !device->device_prep_dma_interrupt);
378 BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) &&
379 !device->device_prep_slave_sg);
380 BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) &&
381 !device->device_terminate_all);
362 382
363 BUG_ON(!device->device_alloc_chan_resources); 383 BUG_ON(!device->device_alloc_chan_resources);
364 BUG_ON(!device->device_free_chan_resources); 384 BUG_ON(!device->device_free_chan_resources);
@@ -378,7 +398,7 @@ int dma_async_device_register(struct dma_device *device)
378 398
379 chan->chan_id = chancnt++; 399 chan->chan_id = chancnt++;
380 chan->dev.class = &dma_devclass; 400 chan->dev.class = &dma_devclass;
381 chan->dev.parent = NULL; 401 chan->dev.parent = device->dev;
382 snprintf(chan->dev.bus_id, BUS_ID_SIZE, "dma%dchan%d", 402 snprintf(chan->dev.bus_id, BUS_ID_SIZE, "dma%dchan%d",
383 device->dev_id, chan->chan_id); 403 device->dev_id, chan->chan_id);
384 404
@@ -394,6 +414,7 @@ int dma_async_device_register(struct dma_device *device)
394 kref_get(&device->refcount); 414 kref_get(&device->refcount);
395 kref_get(&device->refcount); 415 kref_get(&device->refcount);
396 kref_init(&chan->refcount); 416 kref_init(&chan->refcount);
417 chan->client_count = 0;
397 chan->slow_ref = 0; 418 chan->slow_ref = 0;
398 INIT_RCU_HEAD(&chan->rcu); 419 INIT_RCU_HEAD(&chan->rcu);
399 } 420 }
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
new file mode 100644
index 000000000000..a08d19704743
--- /dev/null
+++ b/drivers/dma/dmatest.c
@@ -0,0 +1,444 @@
1/*
2 * DMA Engine test module
3 *
4 * Copyright (C) 2007 Atmel Corporation
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#include <linux/delay.h>
11#include <linux/dmaengine.h>
12#include <linux/init.h>
13#include <linux/kthread.h>
14#include <linux/module.h>
15#include <linux/moduleparam.h>
16#include <linux/random.h>
17#include <linux/wait.h>
18
19static unsigned int test_buf_size = 16384;
20module_param(test_buf_size, uint, S_IRUGO);
21MODULE_PARM_DESC(test_buf_size, "Size of the memcpy test buffer");
22
23static char test_channel[BUS_ID_SIZE];
24module_param_string(channel, test_channel, sizeof(test_channel), S_IRUGO);
25MODULE_PARM_DESC(channel, "Bus ID of the channel to test (default: any)");
26
27static char test_device[BUS_ID_SIZE];
28module_param_string(device, test_device, sizeof(test_device), S_IRUGO);
29MODULE_PARM_DESC(device, "Bus ID of the DMA Engine to test (default: any)");
30
31static unsigned int threads_per_chan = 1;
32module_param(threads_per_chan, uint, S_IRUGO);
33MODULE_PARM_DESC(threads_per_chan,
34 "Number of threads to start per channel (default: 1)");
35
36static unsigned int max_channels;
37module_param(max_channels, uint, S_IRUGO);
38MODULE_PARM_DESC(nr_channels,
39 "Maximum number of channels to use (default: all)");
40
41/*
42 * Initialization patterns. All bytes in the source buffer has bit 7
43 * set, all bytes in the destination buffer has bit 7 cleared.
44 *
45 * Bit 6 is set for all bytes which are to be copied by the DMA
46 * engine. Bit 5 is set for all bytes which are to be overwritten by
47 * the DMA engine.
48 *
49 * The remaining bits are the inverse of a counter which increments by
50 * one for each byte address.
51 */
52#define PATTERN_SRC 0x80
53#define PATTERN_DST 0x00
54#define PATTERN_COPY 0x40
55#define PATTERN_OVERWRITE 0x20
56#define PATTERN_COUNT_MASK 0x1f
57
58struct dmatest_thread {
59 struct list_head node;
60 struct task_struct *task;
61 struct dma_chan *chan;
62 u8 *srcbuf;
63 u8 *dstbuf;
64};
65
66struct dmatest_chan {
67 struct list_head node;
68 struct dma_chan *chan;
69 struct list_head threads;
70};
71
72/*
73 * These are protected by dma_list_mutex since they're only used by
74 * the DMA client event callback
75 */
76static LIST_HEAD(dmatest_channels);
77static unsigned int nr_channels;
78
79static bool dmatest_match_channel(struct dma_chan *chan)
80{
81 if (test_channel[0] == '\0')
82 return true;
83 return strcmp(chan->dev.bus_id, test_channel) == 0;
84}
85
86static bool dmatest_match_device(struct dma_device *device)
87{
88 if (test_device[0] == '\0')
89 return true;
90 return strcmp(device->dev->bus_id, test_device) == 0;
91}
92
93static unsigned long dmatest_random(void)
94{
95 unsigned long buf;
96
97 get_random_bytes(&buf, sizeof(buf));
98 return buf;
99}
100
101static void dmatest_init_srcbuf(u8 *buf, unsigned int start, unsigned int len)
102{
103 unsigned int i;
104
105 for (i = 0; i < start; i++)
106 buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK);
107 for ( ; i < start + len; i++)
108 buf[i] = PATTERN_SRC | PATTERN_COPY
109 | (~i & PATTERN_COUNT_MASK);;
110 for ( ; i < test_buf_size; i++)
111 buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK);
112}
113
114static void dmatest_init_dstbuf(u8 *buf, unsigned int start, unsigned int len)
115{
116 unsigned int i;
117
118 for (i = 0; i < start; i++)
119 buf[i] = PATTERN_DST | (~i & PATTERN_COUNT_MASK);
120 for ( ; i < start + len; i++)
121 buf[i] = PATTERN_DST | PATTERN_OVERWRITE
122 | (~i & PATTERN_COUNT_MASK);
123 for ( ; i < test_buf_size; i++)
124 buf[i] = PATTERN_DST | (~i & PATTERN_COUNT_MASK);
125}
126
127static void dmatest_mismatch(u8 actual, u8 pattern, unsigned int index,
128 unsigned int counter, bool is_srcbuf)
129{
130 u8 diff = actual ^ pattern;
131 u8 expected = pattern | (~counter & PATTERN_COUNT_MASK);
132 const char *thread_name = current->comm;
133
134 if (is_srcbuf)
135 pr_warning("%s: srcbuf[0x%x] overwritten!"
136 " Expected %02x, got %02x\n",
137 thread_name, index, expected, actual);
138 else if ((pattern & PATTERN_COPY)
139 && (diff & (PATTERN_COPY | PATTERN_OVERWRITE)))
140 pr_warning("%s: dstbuf[0x%x] not copied!"
141 " Expected %02x, got %02x\n",
142 thread_name, index, expected, actual);
143 else if (diff & PATTERN_SRC)
144 pr_warning("%s: dstbuf[0x%x] was copied!"
145 " Expected %02x, got %02x\n",
146 thread_name, index, expected, actual);
147 else
148 pr_warning("%s: dstbuf[0x%x] mismatch!"
149 " Expected %02x, got %02x\n",
150 thread_name, index, expected, actual);
151}
152
153static unsigned int dmatest_verify(u8 *buf, unsigned int start,
154 unsigned int end, unsigned int counter, u8 pattern,
155 bool is_srcbuf)
156{
157 unsigned int i;
158 unsigned int error_count = 0;
159 u8 actual;
160
161 for (i = start; i < end; i++) {
162 actual = buf[i];
163 if (actual != (pattern | (~counter & PATTERN_COUNT_MASK))) {
164 if (error_count < 32)
165 dmatest_mismatch(actual, pattern, i, counter,
166 is_srcbuf);
167 error_count++;
168 }
169 counter++;
170 }
171
172 if (error_count > 32)
173 pr_warning("%s: %u errors suppressed\n",
174 current->comm, error_count - 32);
175
176 return error_count;
177}
178
179/*
180 * This function repeatedly tests DMA transfers of various lengths and
181 * offsets until it is told to exit by kthread_stop(). There may be
182 * multiple threads running this function in parallel for a single
183 * channel, and there may be multiple channels being tested in
184 * parallel.
185 *
186 * Before each test, the source and destination buffer is initialized
187 * with a known pattern. This pattern is different depending on
188 * whether it's in an area which is supposed to be copied or
189 * overwritten, and different in the source and destination buffers.
190 * So if the DMA engine doesn't copy exactly what we tell it to copy,
191 * we'll notice.
192 */
193static int dmatest_func(void *data)
194{
195 struct dmatest_thread *thread = data;
196 struct dma_chan *chan;
197 const char *thread_name;
198 unsigned int src_off, dst_off, len;
199 unsigned int error_count;
200 unsigned int failed_tests = 0;
201 unsigned int total_tests = 0;
202 dma_cookie_t cookie;
203 enum dma_status status;
204 int ret;
205
206 thread_name = current->comm;
207
208 ret = -ENOMEM;
209 thread->srcbuf = kmalloc(test_buf_size, GFP_KERNEL);
210 if (!thread->srcbuf)
211 goto err_srcbuf;
212 thread->dstbuf = kmalloc(test_buf_size, GFP_KERNEL);
213 if (!thread->dstbuf)
214 goto err_dstbuf;
215
216 smp_rmb();
217 chan = thread->chan;
218 dma_chan_get(chan);
219
220 while (!kthread_should_stop()) {
221 total_tests++;
222
223 len = dmatest_random() % test_buf_size + 1;
224 src_off = dmatest_random() % (test_buf_size - len + 1);
225 dst_off = dmatest_random() % (test_buf_size - len + 1);
226
227 dmatest_init_srcbuf(thread->srcbuf, src_off, len);
228 dmatest_init_dstbuf(thread->dstbuf, dst_off, len);
229
230 cookie = dma_async_memcpy_buf_to_buf(chan,
231 thread->dstbuf + dst_off,
232 thread->srcbuf + src_off,
233 len);
234 if (dma_submit_error(cookie)) {
235 pr_warning("%s: #%u: submit error %d with src_off=0x%x "
236 "dst_off=0x%x len=0x%x\n",
237 thread_name, total_tests - 1, cookie,
238 src_off, dst_off, len);
239 msleep(100);
240 failed_tests++;
241 continue;
242 }
243 dma_async_memcpy_issue_pending(chan);
244
245 do {
246 msleep(1);
247 status = dma_async_memcpy_complete(
248 chan, cookie, NULL, NULL);
249 } while (status == DMA_IN_PROGRESS);
250
251 if (status == DMA_ERROR) {
252 pr_warning("%s: #%u: error during copy\n",
253 thread_name, total_tests - 1);
254 failed_tests++;
255 continue;
256 }
257
258 error_count = 0;
259
260 pr_debug("%s: verifying source buffer...\n", thread_name);
261 error_count += dmatest_verify(thread->srcbuf, 0, src_off,
262 0, PATTERN_SRC, true);
263 error_count += dmatest_verify(thread->srcbuf, src_off,
264 src_off + len, src_off,
265 PATTERN_SRC | PATTERN_COPY, true);
266 error_count += dmatest_verify(thread->srcbuf, src_off + len,
267 test_buf_size, src_off + len,
268 PATTERN_SRC, true);
269
270 pr_debug("%s: verifying dest buffer...\n",
271 thread->task->comm);
272 error_count += dmatest_verify(thread->dstbuf, 0, dst_off,
273 0, PATTERN_DST, false);
274 error_count += dmatest_verify(thread->dstbuf, dst_off,
275 dst_off + len, src_off,
276 PATTERN_SRC | PATTERN_COPY, false);
277 error_count += dmatest_verify(thread->dstbuf, dst_off + len,
278 test_buf_size, dst_off + len,
279 PATTERN_DST, false);
280
281 if (error_count) {
282 pr_warning("%s: #%u: %u errors with "
283 "src_off=0x%x dst_off=0x%x len=0x%x\n",
284 thread_name, total_tests - 1, error_count,
285 src_off, dst_off, len);
286 failed_tests++;
287 } else {
288 pr_debug("%s: #%u: No errors with "
289 "src_off=0x%x dst_off=0x%x len=0x%x\n",
290 thread_name, total_tests - 1,
291 src_off, dst_off, len);
292 }
293 }
294
295 ret = 0;
296 dma_chan_put(chan);
297 kfree(thread->dstbuf);
298err_dstbuf:
299 kfree(thread->srcbuf);
300err_srcbuf:
301 pr_notice("%s: terminating after %u tests, %u failures (status %d)\n",
302 thread_name, total_tests, failed_tests, ret);
303 return ret;
304}
305
306static void dmatest_cleanup_channel(struct dmatest_chan *dtc)
307{
308 struct dmatest_thread *thread;
309 struct dmatest_thread *_thread;
310 int ret;
311
312 list_for_each_entry_safe(thread, _thread, &dtc->threads, node) {
313 ret = kthread_stop(thread->task);
314 pr_debug("dmatest: thread %s exited with status %d\n",
315 thread->task->comm, ret);
316 list_del(&thread->node);
317 kfree(thread);
318 }
319 kfree(dtc);
320}
321
322static enum dma_state_client dmatest_add_channel(struct dma_chan *chan)
323{
324 struct dmatest_chan *dtc;
325 struct dmatest_thread *thread;
326 unsigned int i;
327
328 dtc = kmalloc(sizeof(struct dmatest_chan), GFP_ATOMIC);
329 if (!dtc) {
330 pr_warning("dmatest: No memory for %s\n", chan->dev.bus_id);
331 return DMA_NAK;
332 }
333
334 dtc->chan = chan;
335 INIT_LIST_HEAD(&dtc->threads);
336
337 for (i = 0; i < threads_per_chan; i++) {
338 thread = kzalloc(sizeof(struct dmatest_thread), GFP_KERNEL);
339 if (!thread) {
340 pr_warning("dmatest: No memory for %s-test%u\n",
341 chan->dev.bus_id, i);
342 break;
343 }
344 thread->chan = dtc->chan;
345 smp_wmb();
346 thread->task = kthread_run(dmatest_func, thread, "%s-test%u",
347 chan->dev.bus_id, i);
348 if (IS_ERR(thread->task)) {
349 pr_warning("dmatest: Failed to run thread %s-test%u\n",
350 chan->dev.bus_id, i);
351 kfree(thread);
352 break;
353 }
354
355 /* srcbuf and dstbuf are allocated by the thread itself */
356
357 list_add_tail(&thread->node, &dtc->threads);
358 }
359
360 pr_info("dmatest: Started %u threads using %s\n", i, chan->dev.bus_id);
361
362 list_add_tail(&dtc->node, &dmatest_channels);
363 nr_channels++;
364
365 return DMA_ACK;
366}
367
368static enum dma_state_client dmatest_remove_channel(struct dma_chan *chan)
369{
370 struct dmatest_chan *dtc, *_dtc;
371
372 list_for_each_entry_safe(dtc, _dtc, &dmatest_channels, node) {
373 if (dtc->chan == chan) {
374 list_del(&dtc->node);
375 dmatest_cleanup_channel(dtc);
376 pr_debug("dmatest: lost channel %s\n",
377 chan->dev.bus_id);
378 return DMA_ACK;
379 }
380 }
381
382 return DMA_DUP;
383}
384
385/*
386 * Start testing threads as new channels are assigned to us, and kill
387 * them when the channels go away.
388 *
389 * When we unregister the client, all channels are removed so this
390 * will also take care of cleaning things up when the module is
391 * unloaded.
392 */
393static enum dma_state_client
394dmatest_event(struct dma_client *client, struct dma_chan *chan,
395 enum dma_state state)
396{
397 enum dma_state_client ack = DMA_NAK;
398
399 switch (state) {
400 case DMA_RESOURCE_AVAILABLE:
401 if (!dmatest_match_channel(chan)
402 || !dmatest_match_device(chan->device))
403 ack = DMA_DUP;
404 else if (max_channels && nr_channels >= max_channels)
405 ack = DMA_NAK;
406 else
407 ack = dmatest_add_channel(chan);
408 break;
409
410 case DMA_RESOURCE_REMOVED:
411 ack = dmatest_remove_channel(chan);
412 break;
413
414 default:
415 pr_info("dmatest: Unhandled event %u (%s)\n",
416 state, chan->dev.bus_id);
417 break;
418 }
419
420 return ack;
421}
422
423static struct dma_client dmatest_client = {
424 .event_callback = dmatest_event,
425};
426
427static int __init dmatest_init(void)
428{
429 dma_cap_set(DMA_MEMCPY, dmatest_client.cap_mask);
430 dma_async_client_register(&dmatest_client);
431 dma_async_client_chan_request(&dmatest_client);
432
433 return 0;
434}
435module_init(dmatest_init);
436
437static void __exit dmatest_exit(void)
438{
439 dma_async_client_unregister(&dmatest_client);
440}
441module_exit(dmatest_exit);
442
443MODULE_AUTHOR("Haavard Skinnemoen <hskinnemoen@atmel.com>");
444MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c
new file mode 100644
index 000000000000..94df91771243
--- /dev/null
+++ b/drivers/dma/dw_dmac.c
@@ -0,0 +1,1122 @@
1/*
2 * Driver for the Synopsys DesignWare DMA Controller (aka DMACA on
3 * AVR32 systems.)
4 *
5 * Copyright (C) 2007-2008 Atmel Corporation
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#include <linux/clk.h>
12#include <linux/delay.h>
13#include <linux/dmaengine.h>
14#include <linux/dma-mapping.h>
15#include <linux/init.h>
16#include <linux/interrupt.h>
17#include <linux/io.h>
18#include <linux/mm.h>
19#include <linux/module.h>
20#include <linux/platform_device.h>
21#include <linux/slab.h>
22
23#include "dw_dmac_regs.h"
24
25/*
26 * This supports the Synopsys "DesignWare AHB Central DMA Controller",
27 * (DW_ahb_dmac) which is used with various AMBA 2.0 systems (not all
28 * of which use ARM any more). See the "Databook" from Synopsys for
29 * information beyond what licensees probably provide.
30 *
31 * The driver has currently been tested only with the Atmel AT32AP7000,
32 * which does not support descriptor writeback.
33 */
34
35/* NOTE: DMS+SMS is system-specific. We should get this information
36 * from the platform code somehow.
37 */
38#define DWC_DEFAULT_CTLLO (DWC_CTLL_DST_MSIZE(0) \
39 | DWC_CTLL_SRC_MSIZE(0) \
40 | DWC_CTLL_DMS(0) \
41 | DWC_CTLL_SMS(1) \
42 | DWC_CTLL_LLP_D_EN \
43 | DWC_CTLL_LLP_S_EN)
44
45/*
46 * This is configuration-dependent and usually a funny size like 4095.
47 * Let's round it down to the nearest power of two.
48 *
49 * Note that this is a transfer count, i.e. if we transfer 32-bit
50 * words, we can do 8192 bytes per descriptor.
51 *
52 * This parameter is also system-specific.
53 */
54#define DWC_MAX_COUNT 2048U
55
56/*
57 * Number of descriptors to allocate for each channel. This should be
58 * made configurable somehow; preferably, the clients (at least the
59 * ones using slave transfers) should be able to give us a hint.
60 */
61#define NR_DESCS_PER_CHANNEL 64
62
63/*----------------------------------------------------------------------*/
64
65/*
66 * Because we're not relying on writeback from the controller (it may not
67 * even be configured into the core!) we don't need to use dma_pool. These
68 * descriptors -- and associated data -- are cacheable. We do need to make
69 * sure their dcache entries are written back before handing them off to
70 * the controller, though.
71 */
72
73static struct dw_desc *dwc_first_active(struct dw_dma_chan *dwc)
74{
75 return list_entry(dwc->active_list.next, struct dw_desc, desc_node);
76}
77
78static struct dw_desc *dwc_first_queued(struct dw_dma_chan *dwc)
79{
80 return list_entry(dwc->queue.next, struct dw_desc, desc_node);
81}
82
83static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc)
84{
85 struct dw_desc *desc, *_desc;
86 struct dw_desc *ret = NULL;
87 unsigned int i = 0;
88
89 spin_lock_bh(&dwc->lock);
90 list_for_each_entry_safe(desc, _desc, &dwc->free_list, desc_node) {
91 if (async_tx_test_ack(&desc->txd)) {
92 list_del(&desc->desc_node);
93 ret = desc;
94 break;
95 }
96 dev_dbg(&dwc->chan.dev, "desc %p not ACKed\n", desc);
97 i++;
98 }
99 spin_unlock_bh(&dwc->lock);
100
101 dev_vdbg(&dwc->chan.dev, "scanned %u descriptors on freelist\n", i);
102
103 return ret;
104}
105
106static void dwc_sync_desc_for_cpu(struct dw_dma_chan *dwc, struct dw_desc *desc)
107{
108 struct dw_desc *child;
109
110 list_for_each_entry(child, &desc->txd.tx_list, desc_node)
111 dma_sync_single_for_cpu(dwc->chan.dev.parent,
112 child->txd.phys, sizeof(child->lli),
113 DMA_TO_DEVICE);
114 dma_sync_single_for_cpu(dwc->chan.dev.parent,
115 desc->txd.phys, sizeof(desc->lli),
116 DMA_TO_DEVICE);
117}
118
119/*
120 * Move a descriptor, including any children, to the free list.
121 * `desc' must not be on any lists.
122 */
123static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc)
124{
125 if (desc) {
126 struct dw_desc *child;
127
128 dwc_sync_desc_for_cpu(dwc, desc);
129
130 spin_lock_bh(&dwc->lock);
131 list_for_each_entry(child, &desc->txd.tx_list, desc_node)
132 dev_vdbg(&dwc->chan.dev,
133 "moving child desc %p to freelist\n",
134 child);
135 list_splice_init(&desc->txd.tx_list, &dwc->free_list);
136 dev_vdbg(&dwc->chan.dev, "moving desc %p to freelist\n", desc);
137 list_add(&desc->desc_node, &dwc->free_list);
138 spin_unlock_bh(&dwc->lock);
139 }
140}
141
142/* Called with dwc->lock held and bh disabled */
143static dma_cookie_t
144dwc_assign_cookie(struct dw_dma_chan *dwc, struct dw_desc *desc)
145{
146 dma_cookie_t cookie = dwc->chan.cookie;
147
148 if (++cookie < 0)
149 cookie = 1;
150
151 dwc->chan.cookie = cookie;
152 desc->txd.cookie = cookie;
153
154 return cookie;
155}
156
157/*----------------------------------------------------------------------*/
158
159/* Called with dwc->lock held and bh disabled */
160static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first)
161{
162 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
163
164 /* ASSERT: channel is idle */
165 if (dma_readl(dw, CH_EN) & dwc->mask) {
166 dev_err(&dwc->chan.dev,
167 "BUG: Attempted to start non-idle channel\n");
168 dev_err(&dwc->chan.dev,
169 " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n",
170 channel_readl(dwc, SAR),
171 channel_readl(dwc, DAR),
172 channel_readl(dwc, LLP),
173 channel_readl(dwc, CTL_HI),
174 channel_readl(dwc, CTL_LO));
175
176 /* The tasklet will hopefully advance the queue... */
177 return;
178 }
179
180 channel_writel(dwc, LLP, first->txd.phys);
181 channel_writel(dwc, CTL_LO,
182 DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN);
183 channel_writel(dwc, CTL_HI, 0);
184 channel_set_bit(dw, CH_EN, dwc->mask);
185}
186
187/*----------------------------------------------------------------------*/
188
189static void
190dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc)
191{
192 dma_async_tx_callback callback;
193 void *param;
194 struct dma_async_tx_descriptor *txd = &desc->txd;
195
196 dev_vdbg(&dwc->chan.dev, "descriptor %u complete\n", txd->cookie);
197
198 dwc->completed = txd->cookie;
199 callback = txd->callback;
200 param = txd->callback_param;
201
202 dwc_sync_desc_for_cpu(dwc, desc);
203 list_splice_init(&txd->tx_list, &dwc->free_list);
204 list_move(&desc->desc_node, &dwc->free_list);
205
206 /*
207 * We use dma_unmap_page() regardless of how the buffers were
208 * mapped before they were submitted...
209 */
210 if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP))
211 dma_unmap_page(dwc->chan.dev.parent, desc->lli.dar, desc->len,
212 DMA_FROM_DEVICE);
213 if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP))
214 dma_unmap_page(dwc->chan.dev.parent, desc->lli.sar, desc->len,
215 DMA_TO_DEVICE);
216
217 /*
218 * The API requires that no submissions are done from a
219 * callback, so we don't need to drop the lock here
220 */
221 if (callback)
222 callback(param);
223}
224
225static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc)
226{
227 struct dw_desc *desc, *_desc;
228 LIST_HEAD(list);
229
230 if (dma_readl(dw, CH_EN) & dwc->mask) {
231 dev_err(&dwc->chan.dev,
232 "BUG: XFER bit set, but channel not idle!\n");
233
234 /* Try to continue after resetting the channel... */
235 channel_clear_bit(dw, CH_EN, dwc->mask);
236 while (dma_readl(dw, CH_EN) & dwc->mask)
237 cpu_relax();
238 }
239
240 /*
241 * Submit queued descriptors ASAP, i.e. before we go through
242 * the completed ones.
243 */
244 if (!list_empty(&dwc->queue))
245 dwc_dostart(dwc, dwc_first_queued(dwc));
246 list_splice_init(&dwc->active_list, &list);
247 list_splice_init(&dwc->queue, &dwc->active_list);
248
249 list_for_each_entry_safe(desc, _desc, &list, desc_node)
250 dwc_descriptor_complete(dwc, desc);
251}
252
253static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
254{
255 dma_addr_t llp;
256 struct dw_desc *desc, *_desc;
257 struct dw_desc *child;
258 u32 status_xfer;
259
260 /*
261 * Clear block interrupt flag before scanning so that we don't
262 * miss any, and read LLP before RAW_XFER to ensure it is
263 * valid if we decide to scan the list.
264 */
265 dma_writel(dw, CLEAR.BLOCK, dwc->mask);
266 llp = channel_readl(dwc, LLP);
267 status_xfer = dma_readl(dw, RAW.XFER);
268
269 if (status_xfer & dwc->mask) {
270 /* Everything we've submitted is done */
271 dma_writel(dw, CLEAR.XFER, dwc->mask);
272 dwc_complete_all(dw, dwc);
273 return;
274 }
275
276 dev_vdbg(&dwc->chan.dev, "scan_descriptors: llp=0x%x\n", llp);
277
278 list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) {
279 if (desc->lli.llp == llp)
280 /* This one is currently in progress */
281 return;
282
283 list_for_each_entry(child, &desc->txd.tx_list, desc_node)
284 if (child->lli.llp == llp)
285 /* Currently in progress */
286 return;
287
288 /*
289 * No descriptors so far seem to be in progress, i.e.
290 * this one must be done.
291 */
292 dwc_descriptor_complete(dwc, desc);
293 }
294
295 dev_err(&dwc->chan.dev,
296 "BUG: All descriptors done, but channel not idle!\n");
297
298 /* Try to continue after resetting the channel... */
299 channel_clear_bit(dw, CH_EN, dwc->mask);
300 while (dma_readl(dw, CH_EN) & dwc->mask)
301 cpu_relax();
302
303 if (!list_empty(&dwc->queue)) {
304 dwc_dostart(dwc, dwc_first_queued(dwc));
305 list_splice_init(&dwc->queue, &dwc->active_list);
306 }
307}
308
309static void dwc_dump_lli(struct dw_dma_chan *dwc, struct dw_lli *lli)
310{
311 dev_printk(KERN_CRIT, &dwc->chan.dev,
312 " desc: s0x%x d0x%x l0x%x c0x%x:%x\n",
313 lli->sar, lli->dar, lli->llp,
314 lli->ctlhi, lli->ctllo);
315}
316
317static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc)
318{
319 struct dw_desc *bad_desc;
320 struct dw_desc *child;
321
322 dwc_scan_descriptors(dw, dwc);
323
324 /*
325 * The descriptor currently at the head of the active list is
326 * borked. Since we don't have any way to report errors, we'll
327 * just have to scream loudly and try to carry on.
328 */
329 bad_desc = dwc_first_active(dwc);
330 list_del_init(&bad_desc->desc_node);
331 list_splice_init(&dwc->queue, dwc->active_list.prev);
332
333 /* Clear the error flag and try to restart the controller */
334 dma_writel(dw, CLEAR.ERROR, dwc->mask);
335 if (!list_empty(&dwc->active_list))
336 dwc_dostart(dwc, dwc_first_active(dwc));
337
338 /*
339 * KERN_CRITICAL may seem harsh, but since this only happens
340 * when someone submits a bad physical address in a
341 * descriptor, we should consider ourselves lucky that the
342 * controller flagged an error instead of scribbling over
343 * random memory locations.
344 */
345 dev_printk(KERN_CRIT, &dwc->chan.dev,
346 "Bad descriptor submitted for DMA!\n");
347 dev_printk(KERN_CRIT, &dwc->chan.dev,
348 " cookie: %d\n", bad_desc->txd.cookie);
349 dwc_dump_lli(dwc, &bad_desc->lli);
350 list_for_each_entry(child, &bad_desc->txd.tx_list, desc_node)
351 dwc_dump_lli(dwc, &child->lli);
352
353 /* Pretend the descriptor completed successfully */
354 dwc_descriptor_complete(dwc, bad_desc);
355}
356
357static void dw_dma_tasklet(unsigned long data)
358{
359 struct dw_dma *dw = (struct dw_dma *)data;
360 struct dw_dma_chan *dwc;
361 u32 status_block;
362 u32 status_xfer;
363 u32 status_err;
364 int i;
365
366 status_block = dma_readl(dw, RAW.BLOCK);
367 status_xfer = dma_readl(dw, RAW.BLOCK);
368 status_err = dma_readl(dw, RAW.ERROR);
369
370 dev_vdbg(dw->dma.dev, "tasklet: status_block=%x status_err=%x\n",
371 status_block, status_err);
372
373 for (i = 0; i < dw->dma.chancnt; i++) {
374 dwc = &dw->chan[i];
375 spin_lock(&dwc->lock);
376 if (status_err & (1 << i))
377 dwc_handle_error(dw, dwc);
378 else if ((status_block | status_xfer) & (1 << i))
379 dwc_scan_descriptors(dw, dwc);
380 spin_unlock(&dwc->lock);
381 }
382
383 /*
384 * Re-enable interrupts. Block Complete interrupts are only
385 * enabled if the INT_EN bit in the descriptor is set. This
386 * will trigger a scan before the whole list is done.
387 */
388 channel_set_bit(dw, MASK.XFER, dw->all_chan_mask);
389 channel_set_bit(dw, MASK.BLOCK, dw->all_chan_mask);
390 channel_set_bit(dw, MASK.ERROR, dw->all_chan_mask);
391}
392
393static irqreturn_t dw_dma_interrupt(int irq, void *dev_id)
394{
395 struct dw_dma *dw = dev_id;
396 u32 status;
397
398 dev_vdbg(dw->dma.dev, "interrupt: status=0x%x\n",
399 dma_readl(dw, STATUS_INT));
400
401 /*
402 * Just disable the interrupts. We'll turn them back on in the
403 * softirq handler.
404 */
405 channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
406 channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
407 channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
408
409 status = dma_readl(dw, STATUS_INT);
410 if (status) {
411 dev_err(dw->dma.dev,
412 "BUG: Unexpected interrupts pending: 0x%x\n",
413 status);
414
415 /* Try to recover */
416 channel_clear_bit(dw, MASK.XFER, (1 << 8) - 1);
417 channel_clear_bit(dw, MASK.BLOCK, (1 << 8) - 1);
418 channel_clear_bit(dw, MASK.SRC_TRAN, (1 << 8) - 1);
419 channel_clear_bit(dw, MASK.DST_TRAN, (1 << 8) - 1);
420 channel_clear_bit(dw, MASK.ERROR, (1 << 8) - 1);
421 }
422
423 tasklet_schedule(&dw->tasklet);
424
425 return IRQ_HANDLED;
426}
427
428/*----------------------------------------------------------------------*/
429
430static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx)
431{
432 struct dw_desc *desc = txd_to_dw_desc(tx);
433 struct dw_dma_chan *dwc = to_dw_dma_chan(tx->chan);
434 dma_cookie_t cookie;
435
436 spin_lock_bh(&dwc->lock);
437 cookie = dwc_assign_cookie(dwc, desc);
438
439 /*
440 * REVISIT: We should attempt to chain as many descriptors as
441 * possible, perhaps even appending to those already submitted
442 * for DMA. But this is hard to do in a race-free manner.
443 */
444 if (list_empty(&dwc->active_list)) {
445 dev_vdbg(&tx->chan->dev, "tx_submit: started %u\n",
446 desc->txd.cookie);
447 dwc_dostart(dwc, desc);
448 list_add_tail(&desc->desc_node, &dwc->active_list);
449 } else {
450 dev_vdbg(&tx->chan->dev, "tx_submit: queued %u\n",
451 desc->txd.cookie);
452
453 list_add_tail(&desc->desc_node, &dwc->queue);
454 }
455
456 spin_unlock_bh(&dwc->lock);
457
458 return cookie;
459}
460
461static struct dma_async_tx_descriptor *
462dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
463 size_t len, unsigned long flags)
464{
465 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
466 struct dw_desc *desc;
467 struct dw_desc *first;
468 struct dw_desc *prev;
469 size_t xfer_count;
470 size_t offset;
471 unsigned int src_width;
472 unsigned int dst_width;
473 u32 ctllo;
474
475 dev_vdbg(&chan->dev, "prep_dma_memcpy d0x%x s0x%x l0x%zx f0x%lx\n",
476 dest, src, len, flags);
477
478 if (unlikely(!len)) {
479 dev_dbg(&chan->dev, "prep_dma_memcpy: length is zero!\n");
480 return NULL;
481 }
482
483 /*
484 * We can be a lot more clever here, but this should take care
485 * of the most common optimization.
486 */
487 if (!((src | dest | len) & 3))
488 src_width = dst_width = 2;
489 else if (!((src | dest | len) & 1))
490 src_width = dst_width = 1;
491 else
492 src_width = dst_width = 0;
493
494 ctllo = DWC_DEFAULT_CTLLO
495 | DWC_CTLL_DST_WIDTH(dst_width)
496 | DWC_CTLL_SRC_WIDTH(src_width)
497 | DWC_CTLL_DST_INC
498 | DWC_CTLL_SRC_INC
499 | DWC_CTLL_FC_M2M;
500 prev = first = NULL;
501
502 for (offset = 0; offset < len; offset += xfer_count << src_width) {
503 xfer_count = min_t(size_t, (len - offset) >> src_width,
504 DWC_MAX_COUNT);
505
506 desc = dwc_desc_get(dwc);
507 if (!desc)
508 goto err_desc_get;
509
510 desc->lli.sar = src + offset;
511 desc->lli.dar = dest + offset;
512 desc->lli.ctllo = ctllo;
513 desc->lli.ctlhi = xfer_count;
514
515 if (!first) {
516 first = desc;
517 } else {
518 prev->lli.llp = desc->txd.phys;
519 dma_sync_single_for_device(chan->dev.parent,
520 prev->txd.phys, sizeof(prev->lli),
521 DMA_TO_DEVICE);
522 list_add_tail(&desc->desc_node,
523 &first->txd.tx_list);
524 }
525 prev = desc;
526 }
527
528
529 if (flags & DMA_PREP_INTERRUPT)
530 /* Trigger interrupt after last block */
531 prev->lli.ctllo |= DWC_CTLL_INT_EN;
532
533 prev->lli.llp = 0;
534 dma_sync_single_for_device(chan->dev.parent,
535 prev->txd.phys, sizeof(prev->lli),
536 DMA_TO_DEVICE);
537
538 first->txd.flags = flags;
539 first->len = len;
540
541 return &first->txd;
542
543err_desc_get:
544 dwc_desc_put(dwc, first);
545 return NULL;
546}
547
548static struct dma_async_tx_descriptor *
549dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
550 unsigned int sg_len, enum dma_data_direction direction,
551 unsigned long flags)
552{
553 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
554 struct dw_dma_slave *dws = dwc->dws;
555 struct dw_desc *prev;
556 struct dw_desc *first;
557 u32 ctllo;
558 dma_addr_t reg;
559 unsigned int reg_width;
560 unsigned int mem_width;
561 unsigned int i;
562 struct scatterlist *sg;
563 size_t total_len = 0;
564
565 dev_vdbg(&chan->dev, "prep_dma_slave\n");
566
567 if (unlikely(!dws || !sg_len))
568 return NULL;
569
570 reg_width = dws->slave.reg_width;
571 prev = first = NULL;
572
573 sg_len = dma_map_sg(chan->dev.parent, sgl, sg_len, direction);
574
575 switch (direction) {
576 case DMA_TO_DEVICE:
577 ctllo = (DWC_DEFAULT_CTLLO
578 | DWC_CTLL_DST_WIDTH(reg_width)
579 | DWC_CTLL_DST_FIX
580 | DWC_CTLL_SRC_INC
581 | DWC_CTLL_FC_M2P);
582 reg = dws->slave.tx_reg;
583 for_each_sg(sgl, sg, sg_len, i) {
584 struct dw_desc *desc;
585 u32 len;
586 u32 mem;
587
588 desc = dwc_desc_get(dwc);
589 if (!desc) {
590 dev_err(&chan->dev,
591 "not enough descriptors available\n");
592 goto err_desc_get;
593 }
594
595 mem = sg_phys(sg);
596 len = sg_dma_len(sg);
597 mem_width = 2;
598 if (unlikely(mem & 3 || len & 3))
599 mem_width = 0;
600
601 desc->lli.sar = mem;
602 desc->lli.dar = reg;
603 desc->lli.ctllo = ctllo | DWC_CTLL_SRC_WIDTH(mem_width);
604 desc->lli.ctlhi = len >> mem_width;
605
606 if (!first) {
607 first = desc;
608 } else {
609 prev->lli.llp = desc->txd.phys;
610 dma_sync_single_for_device(chan->dev.parent,
611 prev->txd.phys,
612 sizeof(prev->lli),
613 DMA_TO_DEVICE);
614 list_add_tail(&desc->desc_node,
615 &first->txd.tx_list);
616 }
617 prev = desc;
618 total_len += len;
619 }
620 break;
621 case DMA_FROM_DEVICE:
622 ctllo = (DWC_DEFAULT_CTLLO
623 | DWC_CTLL_SRC_WIDTH(reg_width)
624 | DWC_CTLL_DST_INC
625 | DWC_CTLL_SRC_FIX
626 | DWC_CTLL_FC_P2M);
627
628 reg = dws->slave.rx_reg;
629 for_each_sg(sgl, sg, sg_len, i) {
630 struct dw_desc *desc;
631 u32 len;
632 u32 mem;
633
634 desc = dwc_desc_get(dwc);
635 if (!desc) {
636 dev_err(&chan->dev,
637 "not enough descriptors available\n");
638 goto err_desc_get;
639 }
640
641 mem = sg_phys(sg);
642 len = sg_dma_len(sg);
643 mem_width = 2;
644 if (unlikely(mem & 3 || len & 3))
645 mem_width = 0;
646
647 desc->lli.sar = reg;
648 desc->lli.dar = mem;
649 desc->lli.ctllo = ctllo | DWC_CTLL_DST_WIDTH(mem_width);
650 desc->lli.ctlhi = len >> reg_width;
651
652 if (!first) {
653 first = desc;
654 } else {
655 prev->lli.llp = desc->txd.phys;
656 dma_sync_single_for_device(chan->dev.parent,
657 prev->txd.phys,
658 sizeof(prev->lli),
659 DMA_TO_DEVICE);
660 list_add_tail(&desc->desc_node,
661 &first->txd.tx_list);
662 }
663 prev = desc;
664 total_len += len;
665 }
666 break;
667 default:
668 return NULL;
669 }
670
671 if (flags & DMA_PREP_INTERRUPT)
672 /* Trigger interrupt after last block */
673 prev->lli.ctllo |= DWC_CTLL_INT_EN;
674
675 prev->lli.llp = 0;
676 dma_sync_single_for_device(chan->dev.parent,
677 prev->txd.phys, sizeof(prev->lli),
678 DMA_TO_DEVICE);
679
680 first->len = total_len;
681
682 return &first->txd;
683
684err_desc_get:
685 dwc_desc_put(dwc, first);
686 return NULL;
687}
688
689static void dwc_terminate_all(struct dma_chan *chan)
690{
691 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
692 struct dw_dma *dw = to_dw_dma(chan->device);
693 struct dw_desc *desc, *_desc;
694 LIST_HEAD(list);
695
696 /*
697 * This is only called when something went wrong elsewhere, so
698 * we don't really care about the data. Just disable the
699 * channel. We still have to poll the channel enable bit due
700 * to AHB/HSB limitations.
701 */
702 spin_lock_bh(&dwc->lock);
703
704 channel_clear_bit(dw, CH_EN, dwc->mask);
705
706 while (dma_readl(dw, CH_EN) & dwc->mask)
707 cpu_relax();
708
709 /* active_list entries will end up before queued entries */
710 list_splice_init(&dwc->queue, &list);
711 list_splice_init(&dwc->active_list, &list);
712
713 spin_unlock_bh(&dwc->lock);
714
715 /* Flush all pending and queued descriptors */
716 list_for_each_entry_safe(desc, _desc, &list, desc_node)
717 dwc_descriptor_complete(dwc, desc);
718}
719
720static enum dma_status
721dwc_is_tx_complete(struct dma_chan *chan,
722 dma_cookie_t cookie,
723 dma_cookie_t *done, dma_cookie_t *used)
724{
725 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
726 dma_cookie_t last_used;
727 dma_cookie_t last_complete;
728 int ret;
729
730 last_complete = dwc->completed;
731 last_used = chan->cookie;
732
733 ret = dma_async_is_complete(cookie, last_complete, last_used);
734 if (ret != DMA_SUCCESS) {
735 dwc_scan_descriptors(to_dw_dma(chan->device), dwc);
736
737 last_complete = dwc->completed;
738 last_used = chan->cookie;
739
740 ret = dma_async_is_complete(cookie, last_complete, last_used);
741 }
742
743 if (done)
744 *done = last_complete;
745 if (used)
746 *used = last_used;
747
748 return ret;
749}
750
751static void dwc_issue_pending(struct dma_chan *chan)
752{
753 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
754
755 spin_lock_bh(&dwc->lock);
756 if (!list_empty(&dwc->queue))
757 dwc_scan_descriptors(to_dw_dma(chan->device), dwc);
758 spin_unlock_bh(&dwc->lock);
759}
760
761static int dwc_alloc_chan_resources(struct dma_chan *chan,
762 struct dma_client *client)
763{
764 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
765 struct dw_dma *dw = to_dw_dma(chan->device);
766 struct dw_desc *desc;
767 struct dma_slave *slave;
768 struct dw_dma_slave *dws;
769 int i;
770 u32 cfghi;
771 u32 cfglo;
772
773 dev_vdbg(&chan->dev, "alloc_chan_resources\n");
774
775 /* Channels doing slave DMA can only handle one client. */
776 if (dwc->dws || client->slave) {
777 if (chan->client_count)
778 return -EBUSY;
779 }
780
781 /* ASSERT: channel is idle */
782 if (dma_readl(dw, CH_EN) & dwc->mask) {
783 dev_dbg(&chan->dev, "DMA channel not idle?\n");
784 return -EIO;
785 }
786
787 dwc->completed = chan->cookie = 1;
788
789 cfghi = DWC_CFGH_FIFO_MODE;
790 cfglo = 0;
791
792 slave = client->slave;
793 if (slave) {
794 /*
795 * We need controller-specific data to set up slave
796 * transfers.
797 */
798 BUG_ON(!slave->dma_dev || slave->dma_dev != dw->dma.dev);
799
800 dws = container_of(slave, struct dw_dma_slave, slave);
801
802 dwc->dws = dws;
803 cfghi = dws->cfg_hi;
804 cfglo = dws->cfg_lo;
805 } else {
806 dwc->dws = NULL;
807 }
808
809 channel_writel(dwc, CFG_LO, cfglo);
810 channel_writel(dwc, CFG_HI, cfghi);
811
812 /*
813 * NOTE: some controllers may have additional features that we
814 * need to initialize here, like "scatter-gather" (which
815 * doesn't mean what you think it means), and status writeback.
816 */
817
818 spin_lock_bh(&dwc->lock);
819 i = dwc->descs_allocated;
820 while (dwc->descs_allocated < NR_DESCS_PER_CHANNEL) {
821 spin_unlock_bh(&dwc->lock);
822
823 desc = kzalloc(sizeof(struct dw_desc), GFP_KERNEL);
824 if (!desc) {
825 dev_info(&chan->dev,
826 "only allocated %d descriptors\n", i);
827 spin_lock_bh(&dwc->lock);
828 break;
829 }
830
831 dma_async_tx_descriptor_init(&desc->txd, chan);
832 desc->txd.tx_submit = dwc_tx_submit;
833 desc->txd.flags = DMA_CTRL_ACK;
834 INIT_LIST_HEAD(&desc->txd.tx_list);
835 desc->txd.phys = dma_map_single(chan->dev.parent, &desc->lli,
836 sizeof(desc->lli), DMA_TO_DEVICE);
837 dwc_desc_put(dwc, desc);
838
839 spin_lock_bh(&dwc->lock);
840 i = ++dwc->descs_allocated;
841 }
842
843 /* Enable interrupts */
844 channel_set_bit(dw, MASK.XFER, dwc->mask);
845 channel_set_bit(dw, MASK.BLOCK, dwc->mask);
846 channel_set_bit(dw, MASK.ERROR, dwc->mask);
847
848 spin_unlock_bh(&dwc->lock);
849
850 dev_dbg(&chan->dev,
851 "alloc_chan_resources allocated %d descriptors\n", i);
852
853 return i;
854}
855
856static void dwc_free_chan_resources(struct dma_chan *chan)
857{
858 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
859 struct dw_dma *dw = to_dw_dma(chan->device);
860 struct dw_desc *desc, *_desc;
861 LIST_HEAD(list);
862
863 dev_dbg(&chan->dev, "free_chan_resources (descs allocated=%u)\n",
864 dwc->descs_allocated);
865
866 /* ASSERT: channel is idle */
867 BUG_ON(!list_empty(&dwc->active_list));
868 BUG_ON(!list_empty(&dwc->queue));
869 BUG_ON(dma_readl(to_dw_dma(chan->device), CH_EN) & dwc->mask);
870
871 spin_lock_bh(&dwc->lock);
872 list_splice_init(&dwc->free_list, &list);
873 dwc->descs_allocated = 0;
874 dwc->dws = NULL;
875
876 /* Disable interrupts */
877 channel_clear_bit(dw, MASK.XFER, dwc->mask);
878 channel_clear_bit(dw, MASK.BLOCK, dwc->mask);
879 channel_clear_bit(dw, MASK.ERROR, dwc->mask);
880
881 spin_unlock_bh(&dwc->lock);
882
883 list_for_each_entry_safe(desc, _desc, &list, desc_node) {
884 dev_vdbg(&chan->dev, " freeing descriptor %p\n", desc);
885 dma_unmap_single(chan->dev.parent, desc->txd.phys,
886 sizeof(desc->lli), DMA_TO_DEVICE);
887 kfree(desc);
888 }
889
890 dev_vdbg(&chan->dev, "free_chan_resources done\n");
891}
892
893/*----------------------------------------------------------------------*/
894
895static void dw_dma_off(struct dw_dma *dw)
896{
897 dma_writel(dw, CFG, 0);
898
899 channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
900 channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
901 channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask);
902 channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask);
903 channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
904
905 while (dma_readl(dw, CFG) & DW_CFG_DMA_EN)
906 cpu_relax();
907}
908
909static int __init dw_probe(struct platform_device *pdev)
910{
911 struct dw_dma_platform_data *pdata;
912 struct resource *io;
913 struct dw_dma *dw;
914 size_t size;
915 int irq;
916 int err;
917 int i;
918
919 pdata = pdev->dev.platform_data;
920 if (!pdata || pdata->nr_channels > DW_DMA_MAX_NR_CHANNELS)
921 return -EINVAL;
922
923 io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
924 if (!io)
925 return -EINVAL;
926
927 irq = platform_get_irq(pdev, 0);
928 if (irq < 0)
929 return irq;
930
931 size = sizeof(struct dw_dma);
932 size += pdata->nr_channels * sizeof(struct dw_dma_chan);
933 dw = kzalloc(size, GFP_KERNEL);
934 if (!dw)
935 return -ENOMEM;
936
937 if (!request_mem_region(io->start, DW_REGLEN, pdev->dev.driver->name)) {
938 err = -EBUSY;
939 goto err_kfree;
940 }
941
942 memset(dw, 0, sizeof *dw);
943
944 dw->regs = ioremap(io->start, DW_REGLEN);
945 if (!dw->regs) {
946 err = -ENOMEM;
947 goto err_release_r;
948 }
949
950 dw->clk = clk_get(&pdev->dev, "hclk");
951 if (IS_ERR(dw->clk)) {
952 err = PTR_ERR(dw->clk);
953 goto err_clk;
954 }
955 clk_enable(dw->clk);
956
957 /* force dma off, just in case */
958 dw_dma_off(dw);
959
960 err = request_irq(irq, dw_dma_interrupt, 0, "dw_dmac", dw);
961 if (err)
962 goto err_irq;
963
964 platform_set_drvdata(pdev, dw);
965
966 tasklet_init(&dw->tasklet, dw_dma_tasklet, (unsigned long)dw);
967
968 dw->all_chan_mask = (1 << pdata->nr_channels) - 1;
969
970 INIT_LIST_HEAD(&dw->dma.channels);
971 for (i = 0; i < pdata->nr_channels; i++, dw->dma.chancnt++) {
972 struct dw_dma_chan *dwc = &dw->chan[i];
973
974 dwc->chan.device = &dw->dma;
975 dwc->chan.cookie = dwc->completed = 1;
976 dwc->chan.chan_id = i;
977 list_add_tail(&dwc->chan.device_node, &dw->dma.channels);
978
979 dwc->ch_regs = &__dw_regs(dw)->CHAN[i];
980 spin_lock_init(&dwc->lock);
981 dwc->mask = 1 << i;
982
983 INIT_LIST_HEAD(&dwc->active_list);
984 INIT_LIST_HEAD(&dwc->queue);
985 INIT_LIST_HEAD(&dwc->free_list);
986
987 channel_clear_bit(dw, CH_EN, dwc->mask);
988 }
989
990 /* Clear/disable all interrupts on all channels. */
991 dma_writel(dw, CLEAR.XFER, dw->all_chan_mask);
992 dma_writel(dw, CLEAR.BLOCK, dw->all_chan_mask);
993 dma_writel(dw, CLEAR.SRC_TRAN, dw->all_chan_mask);
994 dma_writel(dw, CLEAR.DST_TRAN, dw->all_chan_mask);
995 dma_writel(dw, CLEAR.ERROR, dw->all_chan_mask);
996
997 channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
998 channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
999 channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask);
1000 channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask);
1001 channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
1002
1003 dma_cap_set(DMA_MEMCPY, dw->dma.cap_mask);
1004 dma_cap_set(DMA_SLAVE, dw->dma.cap_mask);
1005 dw->dma.dev = &pdev->dev;
1006 dw->dma.device_alloc_chan_resources = dwc_alloc_chan_resources;
1007 dw->dma.device_free_chan_resources = dwc_free_chan_resources;
1008
1009 dw->dma.device_prep_dma_memcpy = dwc_prep_dma_memcpy;
1010
1011 dw->dma.device_prep_slave_sg = dwc_prep_slave_sg;
1012 dw->dma.device_terminate_all = dwc_terminate_all;
1013
1014 dw->dma.device_is_tx_complete = dwc_is_tx_complete;
1015 dw->dma.device_issue_pending = dwc_issue_pending;
1016
1017 dma_writel(dw, CFG, DW_CFG_DMA_EN);
1018
1019 printk(KERN_INFO "%s: DesignWare DMA Controller, %d channels\n",
1020 pdev->dev.bus_id, dw->dma.chancnt);
1021
1022 dma_async_device_register(&dw->dma);
1023
1024 return 0;
1025
1026err_irq:
1027 clk_disable(dw->clk);
1028 clk_put(dw->clk);
1029err_clk:
1030 iounmap(dw->regs);
1031 dw->regs = NULL;
1032err_release_r:
1033 release_resource(io);
1034err_kfree:
1035 kfree(dw);
1036 return err;
1037}
1038
1039static int __exit dw_remove(struct platform_device *pdev)
1040{
1041 struct dw_dma *dw = platform_get_drvdata(pdev);
1042 struct dw_dma_chan *dwc, *_dwc;
1043 struct resource *io;
1044
1045 dw_dma_off(dw);
1046 dma_async_device_unregister(&dw->dma);
1047
1048 free_irq(platform_get_irq(pdev, 0), dw);
1049 tasklet_kill(&dw->tasklet);
1050
1051 list_for_each_entry_safe(dwc, _dwc, &dw->dma.channels,
1052 chan.device_node) {
1053 list_del(&dwc->chan.device_node);
1054 channel_clear_bit(dw, CH_EN, dwc->mask);
1055 }
1056
1057 clk_disable(dw->clk);
1058 clk_put(dw->clk);
1059
1060 iounmap(dw->regs);
1061 dw->regs = NULL;
1062
1063 io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1064 release_mem_region(io->start, DW_REGLEN);
1065
1066 kfree(dw);
1067
1068 return 0;
1069}
1070
1071static void dw_shutdown(struct platform_device *pdev)
1072{
1073 struct dw_dma *dw = platform_get_drvdata(pdev);
1074
1075 dw_dma_off(platform_get_drvdata(pdev));
1076 clk_disable(dw->clk);
1077}
1078
1079static int dw_suspend_late(struct platform_device *pdev, pm_message_t mesg)
1080{
1081 struct dw_dma *dw = platform_get_drvdata(pdev);
1082
1083 dw_dma_off(platform_get_drvdata(pdev));
1084 clk_disable(dw->clk);
1085 return 0;
1086}
1087
1088static int dw_resume_early(struct platform_device *pdev)
1089{
1090 struct dw_dma *dw = platform_get_drvdata(pdev);
1091
1092 clk_enable(dw->clk);
1093 dma_writel(dw, CFG, DW_CFG_DMA_EN);
1094 return 0;
1095
1096}
1097
1098static struct platform_driver dw_driver = {
1099 .remove = __exit_p(dw_remove),
1100 .shutdown = dw_shutdown,
1101 .suspend_late = dw_suspend_late,
1102 .resume_early = dw_resume_early,
1103 .driver = {
1104 .name = "dw_dmac",
1105 },
1106};
1107
1108static int __init dw_init(void)
1109{
1110 return platform_driver_probe(&dw_driver, dw_probe);
1111}
1112module_init(dw_init);
1113
1114static void __exit dw_exit(void)
1115{
1116 platform_driver_unregister(&dw_driver);
1117}
1118module_exit(dw_exit);
1119
1120MODULE_LICENSE("GPL v2");
1121MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller driver");
1122MODULE_AUTHOR("Haavard Skinnemoen <haavard.skinnemoen@atmel.com>");
diff --git a/drivers/dma/dw_dmac_regs.h b/drivers/dma/dw_dmac_regs.h
new file mode 100644
index 000000000000..00fdd187bb0c
--- /dev/null
+++ b/drivers/dma/dw_dmac_regs.h
@@ -0,0 +1,225 @@
1/*
2 * Driver for the Synopsys DesignWare AHB DMA Controller
3 *
4 * Copyright (C) 2005-2007 Atmel Corporation
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#include <linux/dw_dmac.h>
12
13#define DW_DMA_MAX_NR_CHANNELS 8
14
15/*
16 * Redefine this macro to handle differences between 32- and 64-bit
17 * addressing, big vs. little endian, etc.
18 */
19#define DW_REG(name) u32 name; u32 __pad_##name
20
21/* Hardware register definitions. */
22struct dw_dma_chan_regs {
23 DW_REG(SAR); /* Source Address Register */
24 DW_REG(DAR); /* Destination Address Register */
25 DW_REG(LLP); /* Linked List Pointer */
26 u32 CTL_LO; /* Control Register Low */
27 u32 CTL_HI; /* Control Register High */
28 DW_REG(SSTAT);
29 DW_REG(DSTAT);
30 DW_REG(SSTATAR);
31 DW_REG(DSTATAR);
32 u32 CFG_LO; /* Configuration Register Low */
33 u32 CFG_HI; /* Configuration Register High */
34 DW_REG(SGR);
35 DW_REG(DSR);
36};
37
38struct dw_dma_irq_regs {
39 DW_REG(XFER);
40 DW_REG(BLOCK);
41 DW_REG(SRC_TRAN);
42 DW_REG(DST_TRAN);
43 DW_REG(ERROR);
44};
45
46struct dw_dma_regs {
47 /* per-channel registers */
48 struct dw_dma_chan_regs CHAN[DW_DMA_MAX_NR_CHANNELS];
49
50 /* irq handling */
51 struct dw_dma_irq_regs RAW; /* r */
52 struct dw_dma_irq_regs STATUS; /* r (raw & mask) */
53 struct dw_dma_irq_regs MASK; /* rw (set = irq enabled) */
54 struct dw_dma_irq_regs CLEAR; /* w (ack, affects "raw") */
55
56 DW_REG(STATUS_INT); /* r */
57
58 /* software handshaking */
59 DW_REG(REQ_SRC);
60 DW_REG(REQ_DST);
61 DW_REG(SGL_REQ_SRC);
62 DW_REG(SGL_REQ_DST);
63 DW_REG(LAST_SRC);
64 DW_REG(LAST_DST);
65
66 /* miscellaneous */
67 DW_REG(CFG);
68 DW_REG(CH_EN);
69 DW_REG(ID);
70 DW_REG(TEST);
71
72 /* optional encoded params, 0x3c8..0x3 */
73};
74
75/* Bitfields in CTL_LO */
76#define DWC_CTLL_INT_EN (1 << 0) /* irqs enabled? */
77#define DWC_CTLL_DST_WIDTH(n) ((n)<<1) /* bytes per element */
78#define DWC_CTLL_SRC_WIDTH(n) ((n)<<4)
79#define DWC_CTLL_DST_INC (0<<7) /* DAR update/not */
80#define DWC_CTLL_DST_DEC (1<<7)
81#define DWC_CTLL_DST_FIX (2<<7)
82#define DWC_CTLL_SRC_INC (0<<7) /* SAR update/not */
83#define DWC_CTLL_SRC_DEC (1<<9)
84#define DWC_CTLL_SRC_FIX (2<<9)
85#define DWC_CTLL_DST_MSIZE(n) ((n)<<11) /* burst, #elements */
86#define DWC_CTLL_SRC_MSIZE(n) ((n)<<14)
87#define DWC_CTLL_S_GATH_EN (1 << 17) /* src gather, !FIX */
88#define DWC_CTLL_D_SCAT_EN (1 << 18) /* dst scatter, !FIX */
89#define DWC_CTLL_FC_M2M (0 << 20) /* mem-to-mem */
90#define DWC_CTLL_FC_M2P (1 << 20) /* mem-to-periph */
91#define DWC_CTLL_FC_P2M (2 << 20) /* periph-to-mem */
92#define DWC_CTLL_FC_P2P (3 << 20) /* periph-to-periph */
93/* plus 4 transfer types for peripheral-as-flow-controller */
94#define DWC_CTLL_DMS(n) ((n)<<23) /* dst master select */
95#define DWC_CTLL_SMS(n) ((n)<<25) /* src master select */
96#define DWC_CTLL_LLP_D_EN (1 << 27) /* dest block chain */
97#define DWC_CTLL_LLP_S_EN (1 << 28) /* src block chain */
98
99/* Bitfields in CTL_HI */
100#define DWC_CTLH_DONE 0x00001000
101#define DWC_CTLH_BLOCK_TS_MASK 0x00000fff
102
103/* Bitfields in CFG_LO. Platform-configurable bits are in <linux/dw_dmac.h> */
104#define DWC_CFGL_CH_SUSP (1 << 8) /* pause xfer */
105#define DWC_CFGL_FIFO_EMPTY (1 << 9) /* pause xfer */
106#define DWC_CFGL_HS_DST (1 << 10) /* handshake w/dst */
107#define DWC_CFGL_HS_SRC (1 << 11) /* handshake w/src */
108#define DWC_CFGL_MAX_BURST(x) ((x) << 20)
109#define DWC_CFGL_RELOAD_SAR (1 << 30)
110#define DWC_CFGL_RELOAD_DAR (1 << 31)
111
112/* Bitfields in CFG_HI. Platform-configurable bits are in <linux/dw_dmac.h> */
113#define DWC_CFGH_DS_UPD_EN (1 << 5)
114#define DWC_CFGH_SS_UPD_EN (1 << 6)
115
116/* Bitfields in SGR */
117#define DWC_SGR_SGI(x) ((x) << 0)
118#define DWC_SGR_SGC(x) ((x) << 20)
119
120/* Bitfields in DSR */
121#define DWC_DSR_DSI(x) ((x) << 0)
122#define DWC_DSR_DSC(x) ((x) << 20)
123
124/* Bitfields in CFG */
125#define DW_CFG_DMA_EN (1 << 0)
126
127#define DW_REGLEN 0x400
128
129struct dw_dma_chan {
130 struct dma_chan chan;
131 void __iomem *ch_regs;
132 u8 mask;
133
134 spinlock_t lock;
135
136 /* these other elements are all protected by lock */
137 dma_cookie_t completed;
138 struct list_head active_list;
139 struct list_head queue;
140 struct list_head free_list;
141
142 struct dw_dma_slave *dws;
143
144 unsigned int descs_allocated;
145};
146
147static inline struct dw_dma_chan_regs __iomem *
148__dwc_regs(struct dw_dma_chan *dwc)
149{
150 return dwc->ch_regs;
151}
152
153#define channel_readl(dwc, name) \
154 __raw_readl(&(__dwc_regs(dwc)->name))
155#define channel_writel(dwc, name, val) \
156 __raw_writel((val), &(__dwc_regs(dwc)->name))
157
158static inline struct dw_dma_chan *to_dw_dma_chan(struct dma_chan *chan)
159{
160 return container_of(chan, struct dw_dma_chan, chan);
161}
162
163
164struct dw_dma {
165 struct dma_device dma;
166 void __iomem *regs;
167 struct tasklet_struct tasklet;
168 struct clk *clk;
169
170 u8 all_chan_mask;
171
172 struct dw_dma_chan chan[0];
173};
174
175static inline struct dw_dma_regs __iomem *__dw_regs(struct dw_dma *dw)
176{
177 return dw->regs;
178}
179
180#define dma_readl(dw, name) \
181 __raw_readl(&(__dw_regs(dw)->name))
182#define dma_writel(dw, name, val) \
183 __raw_writel((val), &(__dw_regs(dw)->name))
184
185#define channel_set_bit(dw, reg, mask) \
186 dma_writel(dw, reg, ((mask) << 8) | (mask))
187#define channel_clear_bit(dw, reg, mask) \
188 dma_writel(dw, reg, ((mask) << 8) | 0)
189
190static inline struct dw_dma *to_dw_dma(struct dma_device *ddev)
191{
192 return container_of(ddev, struct dw_dma, dma);
193}
194
195/* LLI == Linked List Item; a.k.a. DMA block descriptor */
196struct dw_lli {
197 /* values that are not changed by hardware */
198 dma_addr_t sar;
199 dma_addr_t dar;
200 dma_addr_t llp; /* chain to next lli */
201 u32 ctllo;
202 /* values that may get written back: */
203 u32 ctlhi;
204 /* sstat and dstat can snapshot peripheral register state.
205 * silicon config may discard either or both...
206 */
207 u32 sstat;
208 u32 dstat;
209};
210
211struct dw_desc {
212 /* FIRST values the hardware uses */
213 struct dw_lli lli;
214
215 /* THEN values for driver housekeeping */
216 struct list_head desc_node;
217 struct dma_async_tx_descriptor txd;
218 size_t len;
219};
220
221static inline struct dw_desc *
222txd_to_dw_desc(struct dma_async_tx_descriptor *txd)
223{
224 return container_of(txd, struct dw_desc, txd);
225}
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index 054eabffc185..c0059ca58340 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -366,7 +366,8 @@ static struct fsl_desc_sw *fsl_dma_alloc_descriptor(
366 * 366 *
367 * Return - The number of descriptors allocated. 367 * Return - The number of descriptors allocated.
368 */ 368 */
369static int fsl_dma_alloc_chan_resources(struct dma_chan *chan) 369static int fsl_dma_alloc_chan_resources(struct dma_chan *chan,
370 struct dma_client *client)
370{ 371{
371 struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan); 372 struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan);
372 LIST_HEAD(tmp_list); 373 LIST_HEAD(tmp_list);
@@ -809,8 +810,7 @@ static int fsl_dma_self_test(struct fsl_dma_chan *fsl_chan)
809 if (!src) { 810 if (!src) {
810 dev_err(fsl_chan->dev, 811 dev_err(fsl_chan->dev,
811 "selftest: Cannot alloc memory for test!\n"); 812 "selftest: Cannot alloc memory for test!\n");
812 err = -ENOMEM; 813 return -ENOMEM;
813 goto out;
814 } 814 }
815 815
816 dest = src + test_size; 816 dest = src + test_size;
@@ -820,7 +820,7 @@ static int fsl_dma_self_test(struct fsl_dma_chan *fsl_chan)
820 820
821 chan = &fsl_chan->common; 821 chan = &fsl_chan->common;
822 822
823 if (fsl_dma_alloc_chan_resources(chan) < 1) { 823 if (fsl_dma_alloc_chan_resources(chan, NULL) < 1) {
824 dev_err(fsl_chan->dev, 824 dev_err(fsl_chan->dev,
825 "selftest: Cannot alloc resources for DMA\n"); 825 "selftest: Cannot alloc resources for DMA\n");
826 err = -ENODEV; 826 err = -ENODEV;
@@ -842,13 +842,13 @@ static int fsl_dma_self_test(struct fsl_dma_chan *fsl_chan)
842 if (fsl_dma_is_complete(chan, cookie, NULL, NULL) != DMA_SUCCESS) { 842 if (fsl_dma_is_complete(chan, cookie, NULL, NULL) != DMA_SUCCESS) {
843 dev_err(fsl_chan->dev, "selftest: Time out!\n"); 843 dev_err(fsl_chan->dev, "selftest: Time out!\n");
844 err = -ENODEV; 844 err = -ENODEV;
845 goto out; 845 goto free_resources;
846 } 846 }
847 847
848 /* Test free and re-alloc channel resources */ 848 /* Test free and re-alloc channel resources */
849 fsl_dma_free_chan_resources(chan); 849 fsl_dma_free_chan_resources(chan);
850 850
851 if (fsl_dma_alloc_chan_resources(chan) < 1) { 851 if (fsl_dma_alloc_chan_resources(chan, NULL) < 1) {
852 dev_err(fsl_chan->dev, 852 dev_err(fsl_chan->dev,
853 "selftest: Cannot alloc resources for DMA\n"); 853 "selftest: Cannot alloc resources for DMA\n");
854 err = -ENODEV; 854 err = -ENODEV;
@@ -927,8 +927,7 @@ static int __devinit of_fsl_dma_chan_probe(struct of_device *dev,
927 if (!new_fsl_chan) { 927 if (!new_fsl_chan) {
928 dev_err(&dev->dev, "No free memory for allocating " 928 dev_err(&dev->dev, "No free memory for allocating "
929 "dma channels!\n"); 929 "dma channels!\n");
930 err = -ENOMEM; 930 return -ENOMEM;
931 goto err;
932 } 931 }
933 932
934 /* get dma channel register base */ 933 /* get dma channel register base */
@@ -936,7 +935,7 @@ static int __devinit of_fsl_dma_chan_probe(struct of_device *dev,
936 if (err) { 935 if (err) {
937 dev_err(&dev->dev, "Can't get %s property 'reg'\n", 936 dev_err(&dev->dev, "Can't get %s property 'reg'\n",
938 dev->node->full_name); 937 dev->node->full_name);
939 goto err; 938 goto err_no_reg;
940 } 939 }
941 940
942 new_fsl_chan->feature = *(u32 *)match->data; 941 new_fsl_chan->feature = *(u32 *)match->data;
@@ -958,7 +957,7 @@ static int __devinit of_fsl_dma_chan_probe(struct of_device *dev,
958 dev_err(&dev->dev, "There is no %d channel!\n", 957 dev_err(&dev->dev, "There is no %d channel!\n",
959 new_fsl_chan->id); 958 new_fsl_chan->id);
960 err = -EINVAL; 959 err = -EINVAL;
961 goto err; 960 goto err_no_chan;
962 } 961 }
963 fdev->chan[new_fsl_chan->id] = new_fsl_chan; 962 fdev->chan[new_fsl_chan->id] = new_fsl_chan;
964 tasklet_init(&new_fsl_chan->tasklet, dma_do_tasklet, 963 tasklet_init(&new_fsl_chan->tasklet, dma_do_tasklet,
@@ -997,23 +996,26 @@ static int __devinit of_fsl_dma_chan_probe(struct of_device *dev,
997 if (err) { 996 if (err) {
998 dev_err(&dev->dev, "DMA channel %s request_irq error " 997 dev_err(&dev->dev, "DMA channel %s request_irq error "
999 "with return %d\n", dev->node->full_name, err); 998 "with return %d\n", dev->node->full_name, err);
1000 goto err; 999 goto err_no_irq;
1001 } 1000 }
1002 } 1001 }
1003 1002
1004 err = fsl_dma_self_test(new_fsl_chan); 1003 err = fsl_dma_self_test(new_fsl_chan);
1005 if (err) 1004 if (err)
1006 goto err; 1005 goto err_self_test;
1007 1006
1008 dev_info(&dev->dev, "#%d (%s), irq %d\n", new_fsl_chan->id, 1007 dev_info(&dev->dev, "#%d (%s), irq %d\n", new_fsl_chan->id,
1009 match->compatible, new_fsl_chan->irq); 1008 match->compatible, new_fsl_chan->irq);
1010 1009
1011 return 0; 1010 return 0;
1012err: 1011
1013 dma_halt(new_fsl_chan); 1012err_self_test:
1014 iounmap(new_fsl_chan->reg_base);
1015 free_irq(new_fsl_chan->irq, new_fsl_chan); 1013 free_irq(new_fsl_chan->irq, new_fsl_chan);
1014err_no_irq:
1016 list_del(&new_fsl_chan->common.device_node); 1015 list_del(&new_fsl_chan->common.device_node);
1016err_no_chan:
1017 iounmap(new_fsl_chan->reg_base);
1018err_no_reg:
1017 kfree(new_fsl_chan); 1019 kfree(new_fsl_chan);
1018 return err; 1020 return err;
1019} 1021}
@@ -1054,8 +1056,7 @@ static int __devinit of_fsl_dma_probe(struct of_device *dev,
1054 fdev = kzalloc(sizeof(struct fsl_dma_device), GFP_KERNEL); 1056 fdev = kzalloc(sizeof(struct fsl_dma_device), GFP_KERNEL);
1055 if (!fdev) { 1057 if (!fdev) {
1056 dev_err(&dev->dev, "No enough memory for 'priv'\n"); 1058 dev_err(&dev->dev, "No enough memory for 'priv'\n");
1057 err = -ENOMEM; 1059 return -ENOMEM;
1058 goto err;
1059 } 1060 }
1060 fdev->dev = &dev->dev; 1061 fdev->dev = &dev->dev;
1061 INIT_LIST_HEAD(&fdev->common.channels); 1062 INIT_LIST_HEAD(&fdev->common.channels);
@@ -1065,7 +1066,7 @@ static int __devinit of_fsl_dma_probe(struct of_device *dev,
1065 if (err) { 1066 if (err) {
1066 dev_err(&dev->dev, "Can't get %s property 'reg'\n", 1067 dev_err(&dev->dev, "Can't get %s property 'reg'\n",
1067 dev->node->full_name); 1068 dev->node->full_name);
1068 goto err; 1069 goto err_no_reg;
1069 } 1070 }
1070 1071
1071 dev_info(&dev->dev, "Probe the Freescale DMA driver for %s " 1072 dev_info(&dev->dev, "Probe the Freescale DMA driver for %s "
@@ -1103,6 +1104,7 @@ static int __devinit of_fsl_dma_probe(struct of_device *dev,
1103 1104
1104err: 1105err:
1105 iounmap(fdev->reg_base); 1106 iounmap(fdev->reg_base);
1107err_no_reg:
1106 kfree(fdev); 1108 kfree(fdev);
1107 return err; 1109 return err;
1108} 1110}
diff --git a/drivers/dma/ioat.c b/drivers/dma/ioat.c
index 16e0fd8facfb..9b16a3af9a0a 100644
--- a/drivers/dma/ioat.c
+++ b/drivers/dma/ioat.c
@@ -47,6 +47,16 @@ static struct pci_device_id ioat_pci_tbl[] = {
47 47
48 /* I/OAT v2 platforms */ 48 /* I/OAT v2 platforms */
49 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB) }, 49 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB) },
50
51 /* I/OAT v3 platforms */
52 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG0) },
53 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG1) },
54 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG2) },
55 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG3) },
56 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG4) },
57 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG5) },
58 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG6) },
59 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG7) },
50 { 0, } 60 { 0, }
51}; 61};
52 62
@@ -83,6 +93,11 @@ static int ioat_setup_functionality(struct pci_dev *pdev, void __iomem *iobase)
83 if (device->dma && ioat_dca_enabled) 93 if (device->dma && ioat_dca_enabled)
84 device->dca = ioat2_dca_init(pdev, iobase); 94 device->dca = ioat2_dca_init(pdev, iobase);
85 break; 95 break;
96 case IOAT_VER_3_0:
97 device->dma = ioat_dma_probe(pdev, iobase);
98 if (device->dma && ioat_dca_enabled)
99 device->dca = ioat3_dca_init(pdev, iobase);
100 break;
86 default: 101 default:
87 err = -ENODEV; 102 err = -ENODEV;
88 break; 103 break;
diff --git a/drivers/dma/ioat_dca.c b/drivers/dma/ioat_dca.c
index 9e922760b7ff..6cf622da0286 100644
--- a/drivers/dma/ioat_dca.c
+++ b/drivers/dma/ioat_dca.c
@@ -37,12 +37,18 @@
37#include "ioatdma_registers.h" 37#include "ioatdma_registers.h"
38 38
39/* 39/*
40 * Bit 16 of a tag map entry is the "valid" bit, if it is set then bits 0:15 40 * Bit 7 of a tag map entry is the "valid" bit, if it is set then bits 0:6
41 * contain the bit number of the APIC ID to map into the DCA tag. If the valid 41 * contain the bit number of the APIC ID to map into the DCA tag. If the valid
42 * bit is not set, then the value must be 0 or 1 and defines the bit in the tag. 42 * bit is not set, then the value must be 0 or 1 and defines the bit in the tag.
43 */ 43 */
44#define DCA_TAG_MAP_VALID 0x80 44#define DCA_TAG_MAP_VALID 0x80
45 45
46#define DCA3_TAG_MAP_BIT_TO_INV 0x80
47#define DCA3_TAG_MAP_BIT_TO_SEL 0x40
48#define DCA3_TAG_MAP_LITERAL_VAL 0x1
49
50#define DCA_TAG_MAP_MASK 0xDF
51
46/* 52/*
47 * "Legacy" DCA systems do not implement the DCA register set in the 53 * "Legacy" DCA systems do not implement the DCA register set in the
48 * I/OAT device. Software needs direct support for their tag mappings. 54 * I/OAT device. Software needs direct support for their tag mappings.
@@ -95,6 +101,7 @@ struct ioat_dca_slot {
95}; 101};
96 102
97#define IOAT_DCA_MAX_REQ 6 103#define IOAT_DCA_MAX_REQ 6
104#define IOAT3_DCA_MAX_REQ 2
98 105
99struct ioat_dca_priv { 106struct ioat_dca_priv {
100 void __iomem *iobase; 107 void __iomem *iobase;
@@ -171,7 +178,9 @@ static int ioat_dca_remove_requester(struct dca_provider *dca,
171 return -ENODEV; 178 return -ENODEV;
172} 179}
173 180
174static u8 ioat_dca_get_tag(struct dca_provider *dca, int cpu) 181static u8 ioat_dca_get_tag(struct dca_provider *dca,
182 struct device *dev,
183 int cpu)
175{ 184{
176 struct ioat_dca_priv *ioatdca = dca_priv(dca); 185 struct ioat_dca_priv *ioatdca = dca_priv(dca);
177 int i, apic_id, bit, value; 186 int i, apic_id, bit, value;
@@ -193,10 +202,26 @@ static u8 ioat_dca_get_tag(struct dca_provider *dca, int cpu)
193 return tag; 202 return tag;
194} 203}
195 204
205static int ioat_dca_dev_managed(struct dca_provider *dca,
206 struct device *dev)
207{
208 struct ioat_dca_priv *ioatdca = dca_priv(dca);
209 struct pci_dev *pdev;
210 int i;
211
212 pdev = to_pci_dev(dev);
213 for (i = 0; i < ioatdca->max_requesters; i++) {
214 if (ioatdca->req_slots[i].pdev == pdev)
215 return 1;
216 }
217 return 0;
218}
219
196static struct dca_ops ioat_dca_ops = { 220static struct dca_ops ioat_dca_ops = {
197 .add_requester = ioat_dca_add_requester, 221 .add_requester = ioat_dca_add_requester,
198 .remove_requester = ioat_dca_remove_requester, 222 .remove_requester = ioat_dca_remove_requester,
199 .get_tag = ioat_dca_get_tag, 223 .get_tag = ioat_dca_get_tag,
224 .dev_managed = ioat_dca_dev_managed,
200}; 225};
201 226
202 227
@@ -207,6 +232,8 @@ struct dca_provider *ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase)
207 u8 *tag_map = NULL; 232 u8 *tag_map = NULL;
208 int i; 233 int i;
209 int err; 234 int err;
235 u8 version;
236 u8 max_requesters;
210 237
211 if (!system_has_dca_enabled(pdev)) 238 if (!system_has_dca_enabled(pdev))
212 return NULL; 239 return NULL;
@@ -237,15 +264,20 @@ struct dca_provider *ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase)
237 if (tag_map == NULL) 264 if (tag_map == NULL)
238 return NULL; 265 return NULL;
239 266
267 version = readb(iobase + IOAT_VER_OFFSET);
268 if (version == IOAT_VER_3_0)
269 max_requesters = IOAT3_DCA_MAX_REQ;
270 else
271 max_requesters = IOAT_DCA_MAX_REQ;
272
240 dca = alloc_dca_provider(&ioat_dca_ops, 273 dca = alloc_dca_provider(&ioat_dca_ops,
241 sizeof(*ioatdca) + 274 sizeof(*ioatdca) +
242 (sizeof(struct ioat_dca_slot) * IOAT_DCA_MAX_REQ)); 275 (sizeof(struct ioat_dca_slot) * max_requesters));
243 if (!dca) 276 if (!dca)
244 return NULL; 277 return NULL;
245 278
246 ioatdca = dca_priv(dca); 279 ioatdca = dca_priv(dca);
247 ioatdca->max_requesters = IOAT_DCA_MAX_REQ; 280 ioatdca->max_requesters = max_requesters;
248
249 ioatdca->dca_base = iobase + 0x54; 281 ioatdca->dca_base = iobase + 0x54;
250 282
251 /* copy over the APIC ID to DCA tag mapping */ 283 /* copy over the APIC ID to DCA tag mapping */
@@ -323,11 +355,13 @@ static int ioat2_dca_remove_requester(struct dca_provider *dca,
323 return -ENODEV; 355 return -ENODEV;
324} 356}
325 357
326static u8 ioat2_dca_get_tag(struct dca_provider *dca, int cpu) 358static u8 ioat2_dca_get_tag(struct dca_provider *dca,
359 struct device *dev,
360 int cpu)
327{ 361{
328 u8 tag; 362 u8 tag;
329 363
330 tag = ioat_dca_get_tag(dca, cpu); 364 tag = ioat_dca_get_tag(dca, dev, cpu);
331 tag = (~tag) & 0x1F; 365 tag = (~tag) & 0x1F;
332 return tag; 366 return tag;
333} 367}
@@ -336,6 +370,7 @@ static struct dca_ops ioat2_dca_ops = {
336 .add_requester = ioat2_dca_add_requester, 370 .add_requester = ioat2_dca_add_requester,
337 .remove_requester = ioat2_dca_remove_requester, 371 .remove_requester = ioat2_dca_remove_requester,
338 .get_tag = ioat2_dca_get_tag, 372 .get_tag = ioat2_dca_get_tag,
373 .dev_managed = ioat_dca_dev_managed,
339}; 374};
340 375
341static int ioat2_dca_count_dca_slots(void __iomem *iobase, u16 dca_offset) 376static int ioat2_dca_count_dca_slots(void __iomem *iobase, u16 dca_offset)
@@ -425,3 +460,198 @@ struct dca_provider *ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase)
425 460
426 return dca; 461 return dca;
427} 462}
463
464static int ioat3_dca_add_requester(struct dca_provider *dca, struct device *dev)
465{
466 struct ioat_dca_priv *ioatdca = dca_priv(dca);
467 struct pci_dev *pdev;
468 int i;
469 u16 id;
470 u16 global_req_table;
471
472 /* This implementation only supports PCI-Express */
473 if (dev->bus != &pci_bus_type)
474 return -ENODEV;
475 pdev = to_pci_dev(dev);
476 id = dcaid_from_pcidev(pdev);
477
478 if (ioatdca->requester_count == ioatdca->max_requesters)
479 return -ENODEV;
480
481 for (i = 0; i < ioatdca->max_requesters; i++) {
482 if (ioatdca->req_slots[i].pdev == NULL) {
483 /* found an empty slot */
484 ioatdca->requester_count++;
485 ioatdca->req_slots[i].pdev = pdev;
486 ioatdca->req_slots[i].rid = id;
487 global_req_table =
488 readw(ioatdca->dca_base + IOAT3_DCA_GREQID_OFFSET);
489 writel(id | IOAT_DCA_GREQID_VALID,
490 ioatdca->iobase + global_req_table + (i * 4));
491 return i;
492 }
493 }
494 /* Error, ioatdma->requester_count is out of whack */
495 return -EFAULT;
496}
497
498static int ioat3_dca_remove_requester(struct dca_provider *dca,
499 struct device *dev)
500{
501 struct ioat_dca_priv *ioatdca = dca_priv(dca);
502 struct pci_dev *pdev;
503 int i;
504 u16 global_req_table;
505
506 /* This implementation only supports PCI-Express */
507 if (dev->bus != &pci_bus_type)
508 return -ENODEV;
509 pdev = to_pci_dev(dev);
510
511 for (i = 0; i < ioatdca->max_requesters; i++) {
512 if (ioatdca->req_slots[i].pdev == pdev) {
513 global_req_table =
514 readw(ioatdca->dca_base + IOAT3_DCA_GREQID_OFFSET);
515 writel(0, ioatdca->iobase + global_req_table + (i * 4));
516 ioatdca->req_slots[i].pdev = NULL;
517 ioatdca->req_slots[i].rid = 0;
518 ioatdca->requester_count--;
519 return i;
520 }
521 }
522 return -ENODEV;
523}
524
525static u8 ioat3_dca_get_tag(struct dca_provider *dca,
526 struct device *dev,
527 int cpu)
528{
529 u8 tag;
530
531 struct ioat_dca_priv *ioatdca = dca_priv(dca);
532 int i, apic_id, bit, value;
533 u8 entry;
534
535 tag = 0;
536 apic_id = cpu_physical_id(cpu);
537
538 for (i = 0; i < IOAT_TAG_MAP_LEN; i++) {
539 entry = ioatdca->tag_map[i];
540 if (entry & DCA3_TAG_MAP_BIT_TO_SEL) {
541 bit = entry &
542 ~(DCA3_TAG_MAP_BIT_TO_SEL | DCA3_TAG_MAP_BIT_TO_INV);
543 value = (apic_id & (1 << bit)) ? 1 : 0;
544 } else if (entry & DCA3_TAG_MAP_BIT_TO_INV) {
545 bit = entry & ~DCA3_TAG_MAP_BIT_TO_INV;
546 value = (apic_id & (1 << bit)) ? 0 : 1;
547 } else {
548 value = (entry & DCA3_TAG_MAP_LITERAL_VAL) ? 1 : 0;
549 }
550 tag |= (value << i);
551 }
552
553 return tag;
554}
555
556static struct dca_ops ioat3_dca_ops = {
557 .add_requester = ioat3_dca_add_requester,
558 .remove_requester = ioat3_dca_remove_requester,
559 .get_tag = ioat3_dca_get_tag,
560 .dev_managed = ioat_dca_dev_managed,
561};
562
563static int ioat3_dca_count_dca_slots(void *iobase, u16 dca_offset)
564{
565 int slots = 0;
566 u32 req;
567 u16 global_req_table;
568
569 global_req_table = readw(iobase + dca_offset + IOAT3_DCA_GREQID_OFFSET);
570 if (global_req_table == 0)
571 return 0;
572
573 do {
574 req = readl(iobase + global_req_table + (slots * sizeof(u32)));
575 slots++;
576 } while ((req & IOAT_DCA_GREQID_LASTID) == 0);
577
578 return slots;
579}
580
581struct dca_provider *ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase)
582{
583 struct dca_provider *dca;
584 struct ioat_dca_priv *ioatdca;
585 int slots;
586 int i;
587 int err;
588 u16 dca_offset;
589 u16 csi_fsb_control;
590 u16 pcie_control;
591 u8 bit;
592
593 union {
594 u64 full;
595 struct {
596 u32 low;
597 u32 high;
598 };
599 } tag_map;
600
601 if (!system_has_dca_enabled(pdev))
602 return NULL;
603
604 dca_offset = readw(iobase + IOAT_DCAOFFSET_OFFSET);
605 if (dca_offset == 0)
606 return NULL;
607
608 slots = ioat3_dca_count_dca_slots(iobase, dca_offset);
609 if (slots == 0)
610 return NULL;
611
612 dca = alloc_dca_provider(&ioat3_dca_ops,
613 sizeof(*ioatdca)
614 + (sizeof(struct ioat_dca_slot) * slots));
615 if (!dca)
616 return NULL;
617
618 ioatdca = dca_priv(dca);
619 ioatdca->iobase = iobase;
620 ioatdca->dca_base = iobase + dca_offset;
621 ioatdca->max_requesters = slots;
622
623 /* some bios might not know to turn these on */
624 csi_fsb_control = readw(ioatdca->dca_base + IOAT3_CSI_CONTROL_OFFSET);
625 if ((csi_fsb_control & IOAT3_CSI_CONTROL_PREFETCH) == 0) {
626 csi_fsb_control |= IOAT3_CSI_CONTROL_PREFETCH;
627 writew(csi_fsb_control,
628 ioatdca->dca_base + IOAT3_CSI_CONTROL_OFFSET);
629 }
630 pcie_control = readw(ioatdca->dca_base + IOAT3_PCI_CONTROL_OFFSET);
631 if ((pcie_control & IOAT3_PCI_CONTROL_MEMWR) == 0) {
632 pcie_control |= IOAT3_PCI_CONTROL_MEMWR;
633 writew(pcie_control,
634 ioatdca->dca_base + IOAT3_PCI_CONTROL_OFFSET);
635 }
636
637
638 /* TODO version, compatibility and configuration checks */
639
640 /* copy out the APIC to DCA tag map */
641 tag_map.low =
642 readl(ioatdca->dca_base + IOAT3_APICID_TAG_MAP_OFFSET_LOW);
643 tag_map.high =
644 readl(ioatdca->dca_base + IOAT3_APICID_TAG_MAP_OFFSET_HIGH);
645 for (i = 0; i < 8; i++) {
646 bit = tag_map.full >> (8 * i);
647 ioatdca->tag_map[i] = bit & DCA_TAG_MAP_MASK;
648 }
649
650 err = register_dca_provider(dca, &pdev->dev);
651 if (err) {
652 free_dca_provider(dca);
653 return NULL;
654 }
655
656 return dca;
657}
diff --git a/drivers/dma/ioat_dma.c b/drivers/dma/ioat_dma.c
index 318e8a22d814..a52156e56886 100644
--- a/drivers/dma/ioat_dma.c
+++ b/drivers/dma/ioat_dma.c
@@ -32,6 +32,7 @@
32#include <linux/dmaengine.h> 32#include <linux/dmaengine.h>
33#include <linux/delay.h> 33#include <linux/delay.h>
34#include <linux/dma-mapping.h> 34#include <linux/dma-mapping.h>
35#include <linux/workqueue.h>
35#include "ioatdma.h" 36#include "ioatdma.h"
36#include "ioatdma_registers.h" 37#include "ioatdma_registers.h"
37#include "ioatdma_hw.h" 38#include "ioatdma_hw.h"
@@ -41,11 +42,23 @@
41#define to_ioat_desc(lh) container_of(lh, struct ioat_desc_sw, node) 42#define to_ioat_desc(lh) container_of(lh, struct ioat_desc_sw, node)
42#define tx_to_ioat_desc(tx) container_of(tx, struct ioat_desc_sw, async_tx) 43#define tx_to_ioat_desc(tx) container_of(tx, struct ioat_desc_sw, async_tx)
43 44
45#define chan_num(ch) ((int)((ch)->reg_base - (ch)->device->reg_base) / 0x80)
44static int ioat_pending_level = 4; 46static int ioat_pending_level = 4;
45module_param(ioat_pending_level, int, 0644); 47module_param(ioat_pending_level, int, 0644);
46MODULE_PARM_DESC(ioat_pending_level, 48MODULE_PARM_DESC(ioat_pending_level,
47 "high-water mark for pushing ioat descriptors (default: 4)"); 49 "high-water mark for pushing ioat descriptors (default: 4)");
48 50
51#define RESET_DELAY msecs_to_jiffies(100)
52#define WATCHDOG_DELAY round_jiffies(msecs_to_jiffies(2000))
53static void ioat_dma_chan_reset_part2(struct work_struct *work);
54static void ioat_dma_chan_watchdog(struct work_struct *work);
55
56/*
57 * workaround for IOAT ver.3.0 null descriptor issue
58 * (channel returns error when size is 0)
59 */
60#define NULL_DESC_BUFFER_SIZE 1
61
49/* internal functions */ 62/* internal functions */
50static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan); 63static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan);
51static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan); 64static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan);
@@ -122,6 +135,38 @@ static int ioat_dma_enumerate_channels(struct ioatdma_device *device)
122 int i; 135 int i;
123 struct ioat_dma_chan *ioat_chan; 136 struct ioat_dma_chan *ioat_chan;
124 137
138 /*
139 * IOAT ver.3 workarounds
140 */
141 if (device->version == IOAT_VER_3_0) {
142 u32 chan_err_mask;
143 u16 dev_id;
144 u32 dmauncerrsts;
145
146 /*
147 * Write CHANERRMSK_INT with 3E07h to mask out the errors
148 * that can cause stability issues for IOAT ver.3
149 */
150 chan_err_mask = 0x3E07;
151 pci_write_config_dword(device->pdev,
152 IOAT_PCI_CHANERRMASK_INT_OFFSET,
153 chan_err_mask);
154
155 /*
156 * Clear DMAUNCERRSTS Cfg-Reg Parity Error status bit
157 * (workaround for spurious config parity error after restart)
158 */
159 pci_read_config_word(device->pdev,
160 IOAT_PCI_DEVICE_ID_OFFSET,
161 &dev_id);
162 if (dev_id == PCI_DEVICE_ID_INTEL_IOAT_TBG0) {
163 dmauncerrsts = 0x10;
164 pci_write_config_dword(device->pdev,
165 IOAT_PCI_DMAUNCERRSTS_OFFSET,
166 dmauncerrsts);
167 }
168 }
169
125 device->common.chancnt = readb(device->reg_base + IOAT_CHANCNT_OFFSET); 170 device->common.chancnt = readb(device->reg_base + IOAT_CHANCNT_OFFSET);
126 xfercap_scale = readb(device->reg_base + IOAT_XFERCAP_OFFSET); 171 xfercap_scale = readb(device->reg_base + IOAT_XFERCAP_OFFSET);
127 xfercap = (xfercap_scale == 0 ? -1 : (1UL << xfercap_scale)); 172 xfercap = (xfercap_scale == 0 ? -1 : (1UL << xfercap_scale));
@@ -137,6 +182,7 @@ static int ioat_dma_enumerate_channels(struct ioatdma_device *device)
137 ioat_chan->reg_base = device->reg_base + (0x80 * (i + 1)); 182 ioat_chan->reg_base = device->reg_base + (0x80 * (i + 1));
138 ioat_chan->xfercap = xfercap; 183 ioat_chan->xfercap = xfercap;
139 ioat_chan->desccount = 0; 184 ioat_chan->desccount = 0;
185 INIT_DELAYED_WORK(&ioat_chan->work, ioat_dma_chan_reset_part2);
140 if (ioat_chan->device->version != IOAT_VER_1_2) { 186 if (ioat_chan->device->version != IOAT_VER_1_2) {
141 writel(IOAT_DCACTRL_CMPL_WRITE_ENABLE 187 writel(IOAT_DCACTRL_CMPL_WRITE_ENABLE
142 | IOAT_DMA_DCA_ANY_CPU, 188 | IOAT_DMA_DCA_ANY_CPU,
@@ -175,7 +221,7 @@ static void ioat1_dma_memcpy_issue_pending(struct dma_chan *chan)
175{ 221{
176 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); 222 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
177 223
178 if (ioat_chan->pending != 0) { 224 if (ioat_chan->pending > 0) {
179 spin_lock_bh(&ioat_chan->desc_lock); 225 spin_lock_bh(&ioat_chan->desc_lock);
180 __ioat1_dma_memcpy_issue_pending(ioat_chan); 226 __ioat1_dma_memcpy_issue_pending(ioat_chan);
181 spin_unlock_bh(&ioat_chan->desc_lock); 227 spin_unlock_bh(&ioat_chan->desc_lock);
@@ -194,13 +240,228 @@ static void ioat2_dma_memcpy_issue_pending(struct dma_chan *chan)
194{ 240{
195 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); 241 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
196 242
197 if (ioat_chan->pending != 0) { 243 if (ioat_chan->pending > 0) {
198 spin_lock_bh(&ioat_chan->desc_lock); 244 spin_lock_bh(&ioat_chan->desc_lock);
199 __ioat2_dma_memcpy_issue_pending(ioat_chan); 245 __ioat2_dma_memcpy_issue_pending(ioat_chan);
200 spin_unlock_bh(&ioat_chan->desc_lock); 246 spin_unlock_bh(&ioat_chan->desc_lock);
201 } 247 }
202} 248}
203 249
250
251/**
252 * ioat_dma_chan_reset_part2 - reinit the channel after a reset
253 */
254static void ioat_dma_chan_reset_part2(struct work_struct *work)
255{
256 struct ioat_dma_chan *ioat_chan =
257 container_of(work, struct ioat_dma_chan, work.work);
258 struct ioat_desc_sw *desc;
259
260 spin_lock_bh(&ioat_chan->cleanup_lock);
261 spin_lock_bh(&ioat_chan->desc_lock);
262
263 ioat_chan->completion_virt->low = 0;
264 ioat_chan->completion_virt->high = 0;
265 ioat_chan->pending = 0;
266
267 /*
268 * count the descriptors waiting, and be sure to do it
269 * right for both the CB1 line and the CB2 ring
270 */
271 ioat_chan->dmacount = 0;
272 if (ioat_chan->used_desc.prev) {
273 desc = to_ioat_desc(ioat_chan->used_desc.prev);
274 do {
275 ioat_chan->dmacount++;
276 desc = to_ioat_desc(desc->node.next);
277 } while (&desc->node != ioat_chan->used_desc.next);
278 }
279
280 /*
281 * write the new starting descriptor address
282 * this puts channel engine into ARMED state
283 */
284 desc = to_ioat_desc(ioat_chan->used_desc.prev);
285 switch (ioat_chan->device->version) {
286 case IOAT_VER_1_2:
287 writel(((u64) desc->async_tx.phys) & 0x00000000FFFFFFFF,
288 ioat_chan->reg_base + IOAT1_CHAINADDR_OFFSET_LOW);
289 writel(((u64) desc->async_tx.phys) >> 32,
290 ioat_chan->reg_base + IOAT1_CHAINADDR_OFFSET_HIGH);
291
292 writeb(IOAT_CHANCMD_START, ioat_chan->reg_base
293 + IOAT_CHANCMD_OFFSET(ioat_chan->device->version));
294 break;
295 case IOAT_VER_2_0:
296 writel(((u64) desc->async_tx.phys) & 0x00000000FFFFFFFF,
297 ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_LOW);
298 writel(((u64) desc->async_tx.phys) >> 32,
299 ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_HIGH);
300
301 /* tell the engine to go with what's left to be done */
302 writew(ioat_chan->dmacount,
303 ioat_chan->reg_base + IOAT_CHAN_DMACOUNT_OFFSET);
304
305 break;
306 }
307 dev_err(&ioat_chan->device->pdev->dev,
308 "chan%d reset - %d descs waiting, %d total desc\n",
309 chan_num(ioat_chan), ioat_chan->dmacount, ioat_chan->desccount);
310
311 spin_unlock_bh(&ioat_chan->desc_lock);
312 spin_unlock_bh(&ioat_chan->cleanup_lock);
313}
314
315/**
316 * ioat_dma_reset_channel - restart a channel
317 * @ioat_chan: IOAT DMA channel handle
318 */
319static void ioat_dma_reset_channel(struct ioat_dma_chan *ioat_chan)
320{
321 u32 chansts, chanerr;
322
323 if (!ioat_chan->used_desc.prev)
324 return;
325
326 chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
327 chansts = (ioat_chan->completion_virt->low
328 & IOAT_CHANSTS_DMA_TRANSFER_STATUS);
329 if (chanerr) {
330 dev_err(&ioat_chan->device->pdev->dev,
331 "chan%d, CHANSTS = 0x%08x CHANERR = 0x%04x, clearing\n",
332 chan_num(ioat_chan), chansts, chanerr);
333 writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
334 }
335
336 /*
337 * whack it upside the head with a reset
338 * and wait for things to settle out.
339 * force the pending count to a really big negative
340 * to make sure no one forces an issue_pending
341 * while we're waiting.
342 */
343
344 spin_lock_bh(&ioat_chan->desc_lock);
345 ioat_chan->pending = INT_MIN;
346 writeb(IOAT_CHANCMD_RESET,
347 ioat_chan->reg_base
348 + IOAT_CHANCMD_OFFSET(ioat_chan->device->version));
349 spin_unlock_bh(&ioat_chan->desc_lock);
350
351 /* schedule the 2nd half instead of sleeping a long time */
352 schedule_delayed_work(&ioat_chan->work, RESET_DELAY);
353}
354
355/**
356 * ioat_dma_chan_watchdog - watch for stuck channels
357 */
358static void ioat_dma_chan_watchdog(struct work_struct *work)
359{
360 struct ioatdma_device *device =
361 container_of(work, struct ioatdma_device, work.work);
362 struct ioat_dma_chan *ioat_chan;
363 int i;
364
365 union {
366 u64 full;
367 struct {
368 u32 low;
369 u32 high;
370 };
371 } completion_hw;
372 unsigned long compl_desc_addr_hw;
373
374 for (i = 0; i < device->common.chancnt; i++) {
375 ioat_chan = ioat_lookup_chan_by_index(device, i);
376
377 if (ioat_chan->device->version == IOAT_VER_1_2
378 /* have we started processing anything yet */
379 && ioat_chan->last_completion
380 /* have we completed any since last watchdog cycle? */
381 && (ioat_chan->last_completion ==
382 ioat_chan->watchdog_completion)
383 /* has TCP stuck on one cookie since last watchdog? */
384 && (ioat_chan->watchdog_tcp_cookie ==
385 ioat_chan->watchdog_last_tcp_cookie)
386 && (ioat_chan->watchdog_tcp_cookie !=
387 ioat_chan->completed_cookie)
388 /* is there something in the chain to be processed? */
389 /* CB1 chain always has at least the last one processed */
390 && (ioat_chan->used_desc.prev != ioat_chan->used_desc.next)
391 && ioat_chan->pending == 0) {
392
393 /*
394 * check CHANSTS register for completed
395 * descriptor address.
396 * if it is different than completion writeback,
397 * it is not zero
398 * and it has changed since the last watchdog
399 * we can assume that channel
400 * is still working correctly
401 * and the problem is in completion writeback.
402 * update completion writeback
403 * with actual CHANSTS value
404 * else
405 * try resetting the channel
406 */
407
408 completion_hw.low = readl(ioat_chan->reg_base +
409 IOAT_CHANSTS_OFFSET_LOW(ioat_chan->device->version));
410 completion_hw.high = readl(ioat_chan->reg_base +
411 IOAT_CHANSTS_OFFSET_HIGH(ioat_chan->device->version));
412#if (BITS_PER_LONG == 64)
413 compl_desc_addr_hw =
414 completion_hw.full
415 & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR;
416#else
417 compl_desc_addr_hw =
418 completion_hw.low & IOAT_LOW_COMPLETION_MASK;
419#endif
420
421 if ((compl_desc_addr_hw != 0)
422 && (compl_desc_addr_hw != ioat_chan->watchdog_completion)
423 && (compl_desc_addr_hw != ioat_chan->last_compl_desc_addr_hw)) {
424 ioat_chan->last_compl_desc_addr_hw = compl_desc_addr_hw;
425 ioat_chan->completion_virt->low = completion_hw.low;
426 ioat_chan->completion_virt->high = completion_hw.high;
427 } else {
428 ioat_dma_reset_channel(ioat_chan);
429 ioat_chan->watchdog_completion = 0;
430 ioat_chan->last_compl_desc_addr_hw = 0;
431 }
432
433 /*
434 * for version 2.0 if there are descriptors yet to be processed
435 * and the last completed hasn't changed since the last watchdog
436 * if they haven't hit the pending level
437 * issue the pending to push them through
438 * else
439 * try resetting the channel
440 */
441 } else if (ioat_chan->device->version == IOAT_VER_2_0
442 && ioat_chan->used_desc.prev
443 && ioat_chan->last_completion
444 && ioat_chan->last_completion == ioat_chan->watchdog_completion) {
445
446 if (ioat_chan->pending < ioat_pending_level)
447 ioat2_dma_memcpy_issue_pending(&ioat_chan->common);
448 else {
449 ioat_dma_reset_channel(ioat_chan);
450 ioat_chan->watchdog_completion = 0;
451 }
452 } else {
453 ioat_chan->last_compl_desc_addr_hw = 0;
454 ioat_chan->watchdog_completion
455 = ioat_chan->last_completion;
456 }
457
458 ioat_chan->watchdog_last_tcp_cookie =
459 ioat_chan->watchdog_tcp_cookie;
460 }
461
462 schedule_delayed_work(&device->work, WATCHDOG_DELAY);
463}
464
204static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx) 465static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx)
205{ 466{
206 struct ioat_dma_chan *ioat_chan = to_ioat_chan(tx->chan); 467 struct ioat_dma_chan *ioat_chan = to_ioat_chan(tx->chan);
@@ -250,6 +511,13 @@ static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx)
250 prev = new; 511 prev = new;
251 } while (len && (new = ioat1_dma_get_next_descriptor(ioat_chan))); 512 } while (len && (new = ioat1_dma_get_next_descriptor(ioat_chan)));
252 513
514 if (!new) {
515 dev_err(&ioat_chan->device->pdev->dev,
516 "tx submit failed\n");
517 spin_unlock_bh(&ioat_chan->desc_lock);
518 return -ENOMEM;
519 }
520
253 hw->ctl = IOAT_DMA_DESCRIPTOR_CTL_CP_STS; 521 hw->ctl = IOAT_DMA_DESCRIPTOR_CTL_CP_STS;
254 if (new->async_tx.callback) { 522 if (new->async_tx.callback) {
255 hw->ctl |= IOAT_DMA_DESCRIPTOR_CTL_INT_GN; 523 hw->ctl |= IOAT_DMA_DESCRIPTOR_CTL_INT_GN;
@@ -335,7 +603,14 @@ static dma_cookie_t ioat2_tx_submit(struct dma_async_tx_descriptor *tx)
335 desc_count++; 603 desc_count++;
336 } while (len && (new = ioat2_dma_get_next_descriptor(ioat_chan))); 604 } while (len && (new = ioat2_dma_get_next_descriptor(ioat_chan)));
337 605
338 hw->ctl = IOAT_DMA_DESCRIPTOR_CTL_CP_STS; 606 if (!new) {
607 dev_err(&ioat_chan->device->pdev->dev,
608 "tx submit failed\n");
609 spin_unlock_bh(&ioat_chan->desc_lock);
610 return -ENOMEM;
611 }
612
613 hw->ctl |= IOAT_DMA_DESCRIPTOR_CTL_CP_STS;
339 if (new->async_tx.callback) { 614 if (new->async_tx.callback) {
340 hw->ctl |= IOAT_DMA_DESCRIPTOR_CTL_INT_GN; 615 hw->ctl |= IOAT_DMA_DESCRIPTOR_CTL_INT_GN;
341 if (first != new) { 616 if (first != new) {
@@ -406,6 +681,7 @@ static struct ioat_desc_sw *ioat_dma_alloc_descriptor(
406 desc_sw->async_tx.tx_submit = ioat1_tx_submit; 681 desc_sw->async_tx.tx_submit = ioat1_tx_submit;
407 break; 682 break;
408 case IOAT_VER_2_0: 683 case IOAT_VER_2_0:
684 case IOAT_VER_3_0:
409 desc_sw->async_tx.tx_submit = ioat2_tx_submit; 685 desc_sw->async_tx.tx_submit = ioat2_tx_submit;
410 break; 686 break;
411 } 687 }
@@ -452,7 +728,8 @@ static void ioat2_dma_massage_chan_desc(struct ioat_dma_chan *ioat_chan)
452 * ioat_dma_alloc_chan_resources - returns the number of allocated descriptors 728 * ioat_dma_alloc_chan_resources - returns the number of allocated descriptors
453 * @chan: the channel to be filled out 729 * @chan: the channel to be filled out
454 */ 730 */
455static int ioat_dma_alloc_chan_resources(struct dma_chan *chan) 731static int ioat_dma_alloc_chan_resources(struct dma_chan *chan,
732 struct dma_client *client)
456{ 733{
457 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); 734 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
458 struct ioat_desc_sw *desc; 735 struct ioat_desc_sw *desc;
@@ -555,6 +832,7 @@ static void ioat_dma_free_chan_resources(struct dma_chan *chan)
555 } 832 }
556 break; 833 break;
557 case IOAT_VER_2_0: 834 case IOAT_VER_2_0:
835 case IOAT_VER_3_0:
558 list_for_each_entry_safe(desc, _desc, 836 list_for_each_entry_safe(desc, _desc,
559 ioat_chan->free_desc.next, node) { 837 ioat_chan->free_desc.next, node) {
560 list_del(&desc->node); 838 list_del(&desc->node);
@@ -585,6 +863,10 @@ static void ioat_dma_free_chan_resources(struct dma_chan *chan)
585 ioat_chan->last_completion = ioat_chan->completion_addr = 0; 863 ioat_chan->last_completion = ioat_chan->completion_addr = 0;
586 ioat_chan->pending = 0; 864 ioat_chan->pending = 0;
587 ioat_chan->dmacount = 0; 865 ioat_chan->dmacount = 0;
866 ioat_chan->watchdog_completion = 0;
867 ioat_chan->last_compl_desc_addr_hw = 0;
868 ioat_chan->watchdog_tcp_cookie =
869 ioat_chan->watchdog_last_tcp_cookie = 0;
588} 870}
589 871
590/** 872/**
@@ -640,7 +922,8 @@ ioat2_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan)
640 922
641 /* set up the noop descriptor */ 923 /* set up the noop descriptor */
642 noop_desc = to_ioat_desc(ioat_chan->used_desc.next); 924 noop_desc = to_ioat_desc(ioat_chan->used_desc.next);
643 noop_desc->hw->size = 0; 925 /* set size to non-zero value (channel returns error when size is 0) */
926 noop_desc->hw->size = NULL_DESC_BUFFER_SIZE;
644 noop_desc->hw->ctl = IOAT_DMA_DESCRIPTOR_NUL; 927 noop_desc->hw->ctl = IOAT_DMA_DESCRIPTOR_NUL;
645 noop_desc->hw->src_addr = 0; 928 noop_desc->hw->src_addr = 0;
646 noop_desc->hw->dst_addr = 0; 929 noop_desc->hw->dst_addr = 0;
@@ -690,6 +973,7 @@ static struct ioat_desc_sw *ioat_dma_get_next_descriptor(
690 return ioat1_dma_get_next_descriptor(ioat_chan); 973 return ioat1_dma_get_next_descriptor(ioat_chan);
691 break; 974 break;
692 case IOAT_VER_2_0: 975 case IOAT_VER_2_0:
976 case IOAT_VER_3_0:
693 return ioat2_dma_get_next_descriptor(ioat_chan); 977 return ioat2_dma_get_next_descriptor(ioat_chan);
694 break; 978 break;
695 } 979 }
@@ -716,8 +1000,12 @@ static struct dma_async_tx_descriptor *ioat1_dma_prep_memcpy(
716 new->src = dma_src; 1000 new->src = dma_src;
717 new->async_tx.flags = flags; 1001 new->async_tx.flags = flags;
718 return &new->async_tx; 1002 return &new->async_tx;
719 } else 1003 } else {
1004 dev_err(&ioat_chan->device->pdev->dev,
1005 "chan%d - get_next_desc failed: %d descs waiting, %d total desc\n",
1006 chan_num(ioat_chan), ioat_chan->dmacount, ioat_chan->desccount);
720 return NULL; 1007 return NULL;
1008 }
721} 1009}
722 1010
723static struct dma_async_tx_descriptor *ioat2_dma_prep_memcpy( 1011static struct dma_async_tx_descriptor *ioat2_dma_prep_memcpy(
@@ -744,8 +1032,13 @@ static struct dma_async_tx_descriptor *ioat2_dma_prep_memcpy(
744 new->src = dma_src; 1032 new->src = dma_src;
745 new->async_tx.flags = flags; 1033 new->async_tx.flags = flags;
746 return &new->async_tx; 1034 return &new->async_tx;
747 } else 1035 } else {
1036 spin_unlock_bh(&ioat_chan->desc_lock);
1037 dev_err(&ioat_chan->device->pdev->dev,
1038 "chan%d - get_next_desc failed: %d descs waiting, %d total desc\n",
1039 chan_num(ioat_chan), ioat_chan->dmacount, ioat_chan->desccount);
748 return NULL; 1040 return NULL;
1041 }
749} 1042}
750 1043
751static void ioat_dma_cleanup_tasklet(unsigned long data) 1044static void ioat_dma_cleanup_tasklet(unsigned long data)
@@ -756,6 +1049,27 @@ static void ioat_dma_cleanup_tasklet(unsigned long data)
756 chan->reg_base + IOAT_CHANCTRL_OFFSET); 1049 chan->reg_base + IOAT_CHANCTRL_OFFSET);
757} 1050}
758 1051
1052static void
1053ioat_dma_unmap(struct ioat_dma_chan *ioat_chan, struct ioat_desc_sw *desc)
1054{
1055 /*
1056 * yes we are unmapping both _page and _single
1057 * alloc'd regions with unmap_page. Is this
1058 * *really* that bad?
1059 */
1060 if (!(desc->async_tx.flags & DMA_COMPL_SKIP_DEST_UNMAP))
1061 pci_unmap_page(ioat_chan->device->pdev,
1062 pci_unmap_addr(desc, dst),
1063 pci_unmap_len(desc, len),
1064 PCI_DMA_FROMDEVICE);
1065
1066 if (!(desc->async_tx.flags & DMA_COMPL_SKIP_SRC_UNMAP))
1067 pci_unmap_page(ioat_chan->device->pdev,
1068 pci_unmap_addr(desc, src),
1069 pci_unmap_len(desc, len),
1070 PCI_DMA_TODEVICE);
1071}
1072
759/** 1073/**
760 * ioat_dma_memcpy_cleanup - cleanup up finished descriptors 1074 * ioat_dma_memcpy_cleanup - cleanup up finished descriptors
761 * @chan: ioat channel to be cleaned up 1075 * @chan: ioat channel to be cleaned up
@@ -799,11 +1113,27 @@ static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan)
799 1113
800 if (phys_complete == ioat_chan->last_completion) { 1114 if (phys_complete == ioat_chan->last_completion) {
801 spin_unlock_bh(&ioat_chan->cleanup_lock); 1115 spin_unlock_bh(&ioat_chan->cleanup_lock);
1116 /*
1117 * perhaps we're stuck so hard that the watchdog can't go off?
1118 * try to catch it after 2 seconds
1119 */
1120 if (ioat_chan->device->version != IOAT_VER_3_0) {
1121 if (time_after(jiffies,
1122 ioat_chan->last_completion_time + HZ*WATCHDOG_DELAY)) {
1123 ioat_dma_chan_watchdog(&(ioat_chan->device->work.work));
1124 ioat_chan->last_completion_time = jiffies;
1125 }
1126 }
802 return; 1127 return;
803 } 1128 }
1129 ioat_chan->last_completion_time = jiffies;
804 1130
805 cookie = 0; 1131 cookie = 0;
806 spin_lock_bh(&ioat_chan->desc_lock); 1132 if (!spin_trylock_bh(&ioat_chan->desc_lock)) {
1133 spin_unlock_bh(&ioat_chan->cleanup_lock);
1134 return;
1135 }
1136
807 switch (ioat_chan->device->version) { 1137 switch (ioat_chan->device->version) {
808 case IOAT_VER_1_2: 1138 case IOAT_VER_1_2:
809 list_for_each_entry_safe(desc, _desc, 1139 list_for_each_entry_safe(desc, _desc,
@@ -816,21 +1146,7 @@ static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan)
816 */ 1146 */
817 if (desc->async_tx.cookie) { 1147 if (desc->async_tx.cookie) {
818 cookie = desc->async_tx.cookie; 1148 cookie = desc->async_tx.cookie;
819 1149 ioat_dma_unmap(ioat_chan, desc);
820 /*
821 * yes we are unmapping both _page and _single
822 * alloc'd regions with unmap_page. Is this
823 * *really* that bad?
824 */
825 pci_unmap_page(ioat_chan->device->pdev,
826 pci_unmap_addr(desc, dst),
827 pci_unmap_len(desc, len),
828 PCI_DMA_FROMDEVICE);
829 pci_unmap_page(ioat_chan->device->pdev,
830 pci_unmap_addr(desc, src),
831 pci_unmap_len(desc, len),
832 PCI_DMA_TODEVICE);
833
834 if (desc->async_tx.callback) { 1150 if (desc->async_tx.callback) {
835 desc->async_tx.callback(desc->async_tx.callback_param); 1151 desc->async_tx.callback(desc->async_tx.callback_param);
836 desc->async_tx.callback = NULL; 1152 desc->async_tx.callback = NULL;
@@ -862,6 +1178,7 @@ static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan)
862 } 1178 }
863 break; 1179 break;
864 case IOAT_VER_2_0: 1180 case IOAT_VER_2_0:
1181 case IOAT_VER_3_0:
865 /* has some other thread has already cleaned up? */ 1182 /* has some other thread has already cleaned up? */
866 if (ioat_chan->used_desc.prev == NULL) 1183 if (ioat_chan->used_desc.prev == NULL)
867 break; 1184 break;
@@ -889,16 +1206,7 @@ static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan)
889 if (desc->async_tx.cookie) { 1206 if (desc->async_tx.cookie) {
890 cookie = desc->async_tx.cookie; 1207 cookie = desc->async_tx.cookie;
891 desc->async_tx.cookie = 0; 1208 desc->async_tx.cookie = 0;
892 1209 ioat_dma_unmap(ioat_chan, desc);
893 pci_unmap_page(ioat_chan->device->pdev,
894 pci_unmap_addr(desc, dst),
895 pci_unmap_len(desc, len),
896 PCI_DMA_FROMDEVICE);
897 pci_unmap_page(ioat_chan->device->pdev,
898 pci_unmap_addr(desc, src),
899 pci_unmap_len(desc, len),
900 PCI_DMA_TODEVICE);
901
902 if (desc->async_tx.callback) { 1210 if (desc->async_tx.callback) {
903 desc->async_tx.callback(desc->async_tx.callback_param); 1211 desc->async_tx.callback(desc->async_tx.callback_param);
904 desc->async_tx.callback = NULL; 1212 desc->async_tx.callback = NULL;
@@ -943,6 +1251,7 @@ static enum dma_status ioat_dma_is_complete(struct dma_chan *chan,
943 1251
944 last_used = chan->cookie; 1252 last_used = chan->cookie;
945 last_complete = ioat_chan->completed_cookie; 1253 last_complete = ioat_chan->completed_cookie;
1254 ioat_chan->watchdog_tcp_cookie = cookie;
946 1255
947 if (done) 1256 if (done)
948 *done = last_complete; 1257 *done = last_complete;
@@ -973,10 +1282,19 @@ static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan)
973 spin_lock_bh(&ioat_chan->desc_lock); 1282 spin_lock_bh(&ioat_chan->desc_lock);
974 1283
975 desc = ioat_dma_get_next_descriptor(ioat_chan); 1284 desc = ioat_dma_get_next_descriptor(ioat_chan);
1285
1286 if (!desc) {
1287 dev_err(&ioat_chan->device->pdev->dev,
1288 "Unable to start null desc - get next desc failed\n");
1289 spin_unlock_bh(&ioat_chan->desc_lock);
1290 return;
1291 }
1292
976 desc->hw->ctl = IOAT_DMA_DESCRIPTOR_NUL 1293 desc->hw->ctl = IOAT_DMA_DESCRIPTOR_NUL
977 | IOAT_DMA_DESCRIPTOR_CTL_INT_GN 1294 | IOAT_DMA_DESCRIPTOR_CTL_INT_GN
978 | IOAT_DMA_DESCRIPTOR_CTL_CP_STS; 1295 | IOAT_DMA_DESCRIPTOR_CTL_CP_STS;
979 desc->hw->size = 0; 1296 /* set size to non-zero value (channel returns error when size is 0) */
1297 desc->hw->size = NULL_DESC_BUFFER_SIZE;
980 desc->hw->src_addr = 0; 1298 desc->hw->src_addr = 0;
981 desc->hw->dst_addr = 0; 1299 desc->hw->dst_addr = 0;
982 async_tx_ack(&desc->async_tx); 1300 async_tx_ack(&desc->async_tx);
@@ -994,6 +1312,7 @@ static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan)
994 + IOAT_CHANCMD_OFFSET(ioat_chan->device->version)); 1312 + IOAT_CHANCMD_OFFSET(ioat_chan->device->version));
995 break; 1313 break;
996 case IOAT_VER_2_0: 1314 case IOAT_VER_2_0:
1315 case IOAT_VER_3_0:
997 writel(((u64) desc->async_tx.phys) & 0x00000000FFFFFFFF, 1316 writel(((u64) desc->async_tx.phys) & 0x00000000FFFFFFFF,
998 ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_LOW); 1317 ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_LOW);
999 writel(((u64) desc->async_tx.phys) >> 32, 1318 writel(((u64) desc->async_tx.phys) >> 32,
@@ -1049,7 +1368,7 @@ static int ioat_dma_self_test(struct ioatdma_device *device)
1049 dma_chan = container_of(device->common.channels.next, 1368 dma_chan = container_of(device->common.channels.next,
1050 struct dma_chan, 1369 struct dma_chan,
1051 device_node); 1370 device_node);
1052 if (device->common.device_alloc_chan_resources(dma_chan) < 1) { 1371 if (device->common.device_alloc_chan_resources(dma_chan, NULL) < 1) {
1053 dev_err(&device->pdev->dev, 1372 dev_err(&device->pdev->dev,
1054 "selftest cannot allocate chan resource\n"); 1373 "selftest cannot allocate chan resource\n");
1055 err = -ENODEV; 1374 err = -ENODEV;
@@ -1312,6 +1631,7 @@ struct ioatdma_device *ioat_dma_probe(struct pci_dev *pdev,
1312 ioat1_dma_memcpy_issue_pending; 1631 ioat1_dma_memcpy_issue_pending;
1313 break; 1632 break;
1314 case IOAT_VER_2_0: 1633 case IOAT_VER_2_0:
1634 case IOAT_VER_3_0:
1315 device->common.device_prep_dma_memcpy = ioat2_dma_prep_memcpy; 1635 device->common.device_prep_dma_memcpy = ioat2_dma_prep_memcpy;
1316 device->common.device_issue_pending = 1636 device->common.device_issue_pending =
1317 ioat2_dma_memcpy_issue_pending; 1637 ioat2_dma_memcpy_issue_pending;
@@ -1331,8 +1651,16 @@ struct ioatdma_device *ioat_dma_probe(struct pci_dev *pdev,
1331 if (err) 1651 if (err)
1332 goto err_self_test; 1652 goto err_self_test;
1333 1653
1654 ioat_set_tcp_copy_break(device);
1655
1334 dma_async_device_register(&device->common); 1656 dma_async_device_register(&device->common);
1335 1657
1658 if (device->version != IOAT_VER_3_0) {
1659 INIT_DELAYED_WORK(&device->work, ioat_dma_chan_watchdog);
1660 schedule_delayed_work(&device->work,
1661 WATCHDOG_DELAY);
1662 }
1663
1336 return device; 1664 return device;
1337 1665
1338err_self_test: 1666err_self_test:
@@ -1365,6 +1693,10 @@ void ioat_dma_remove(struct ioatdma_device *device)
1365 pci_release_regions(device->pdev); 1693 pci_release_regions(device->pdev);
1366 pci_disable_device(device->pdev); 1694 pci_disable_device(device->pdev);
1367 1695
1696 if (device->version != IOAT_VER_3_0) {
1697 cancel_delayed_work(&device->work);
1698 }
1699
1368 list_for_each_entry_safe(chan, _chan, 1700 list_for_each_entry_safe(chan, _chan,
1369 &device->common.channels, device_node) { 1701 &device->common.channels, device_node) {
1370 ioat_chan = to_ioat_chan(chan); 1702 ioat_chan = to_ioat_chan(chan);
diff --git a/drivers/dma/ioatdma.h b/drivers/dma/ioatdma.h
index f2c7fedbf009..a3306d0e1372 100644
--- a/drivers/dma/ioatdma.h
+++ b/drivers/dma/ioatdma.h
@@ -27,8 +27,9 @@
27#include <linux/dmapool.h> 27#include <linux/dmapool.h>
28#include <linux/cache.h> 28#include <linux/cache.h>
29#include <linux/pci_ids.h> 29#include <linux/pci_ids.h>
30#include <net/tcp.h>
30 31
31#define IOAT_DMA_VERSION "2.04" 32#define IOAT_DMA_VERSION "3.30"
32 33
33enum ioat_interrupt { 34enum ioat_interrupt {
34 none = 0, 35 none = 0,
@@ -40,6 +41,7 @@ enum ioat_interrupt {
40 41
41#define IOAT_LOW_COMPLETION_MASK 0xffffffc0 42#define IOAT_LOW_COMPLETION_MASK 0xffffffc0
42#define IOAT_DMA_DCA_ANY_CPU ~0 43#define IOAT_DMA_DCA_ANY_CPU ~0
44#define IOAT_WATCHDOG_PERIOD (2 * HZ)
43 45
44 46
45/** 47/**
@@ -62,6 +64,7 @@ struct ioatdma_device {
62 struct dma_device common; 64 struct dma_device common;
63 u8 version; 65 u8 version;
64 enum ioat_interrupt irq_mode; 66 enum ioat_interrupt irq_mode;
67 struct delayed_work work;
65 struct msix_entry msix_entries[4]; 68 struct msix_entry msix_entries[4];
66 struct ioat_dma_chan *idx[4]; 69 struct ioat_dma_chan *idx[4];
67}; 70};
@@ -75,6 +78,7 @@ struct ioat_dma_chan {
75 78
76 dma_cookie_t completed_cookie; 79 dma_cookie_t completed_cookie;
77 unsigned long last_completion; 80 unsigned long last_completion;
81 unsigned long last_completion_time;
78 82
79 size_t xfercap; /* XFERCAP register value expanded out */ 83 size_t xfercap; /* XFERCAP register value expanded out */
80 84
@@ -82,6 +86,10 @@ struct ioat_dma_chan {
82 spinlock_t desc_lock; 86 spinlock_t desc_lock;
83 struct list_head free_desc; 87 struct list_head free_desc;
84 struct list_head used_desc; 88 struct list_head used_desc;
89 unsigned long watchdog_completion;
90 int watchdog_tcp_cookie;
91 u32 watchdog_last_tcp_cookie;
92 struct delayed_work work;
85 93
86 int pending; 94 int pending;
87 int dmacount; 95 int dmacount;
@@ -98,6 +106,7 @@ struct ioat_dma_chan {
98 u32 high; 106 u32 high;
99 }; 107 };
100 } *completion_virt; 108 } *completion_virt;
109 unsigned long last_compl_desc_addr_hw;
101 struct tasklet_struct cleanup_task; 110 struct tasklet_struct cleanup_task;
102}; 111};
103 112
@@ -121,17 +130,34 @@ struct ioat_desc_sw {
121 struct dma_async_tx_descriptor async_tx; 130 struct dma_async_tx_descriptor async_tx;
122}; 131};
123 132
133static inline void ioat_set_tcp_copy_break(struct ioatdma_device *dev)
134{
135 #ifdef CONFIG_NET_DMA
136 switch (dev->version) {
137 case IOAT_VER_1_2:
138 case IOAT_VER_3_0:
139 sysctl_tcp_dma_copybreak = 4096;
140 break;
141 case IOAT_VER_2_0:
142 sysctl_tcp_dma_copybreak = 2048;
143 break;
144 }
145 #endif
146}
147
124#if defined(CONFIG_INTEL_IOATDMA) || defined(CONFIG_INTEL_IOATDMA_MODULE) 148#if defined(CONFIG_INTEL_IOATDMA) || defined(CONFIG_INTEL_IOATDMA_MODULE)
125struct ioatdma_device *ioat_dma_probe(struct pci_dev *pdev, 149struct ioatdma_device *ioat_dma_probe(struct pci_dev *pdev,
126 void __iomem *iobase); 150 void __iomem *iobase);
127void ioat_dma_remove(struct ioatdma_device *device); 151void ioat_dma_remove(struct ioatdma_device *device);
128struct dca_provider *ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase); 152struct dca_provider *ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase);
129struct dca_provider *ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase); 153struct dca_provider *ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase);
154struct dca_provider *ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase);
130#else 155#else
131#define ioat_dma_probe(pdev, iobase) NULL 156#define ioat_dma_probe(pdev, iobase) NULL
132#define ioat_dma_remove(device) do { } while (0) 157#define ioat_dma_remove(device) do { } while (0)
133#define ioat_dca_init(pdev, iobase) NULL 158#define ioat_dca_init(pdev, iobase) NULL
134#define ioat2_dca_init(pdev, iobase) NULL 159#define ioat2_dca_init(pdev, iobase) NULL
160#define ioat3_dca_init(pdev, iobase) NULL
135#endif 161#endif
136 162
137#endif /* IOATDMA_H */ 163#endif /* IOATDMA_H */
diff --git a/drivers/dma/ioatdma_hw.h b/drivers/dma/ioatdma_hw.h
index dd470fa91d86..f1ae2c776f74 100644
--- a/drivers/dma/ioatdma_hw.h
+++ b/drivers/dma/ioatdma_hw.h
@@ -35,6 +35,7 @@
35#define IOAT_PCI_SID 0x8086 35#define IOAT_PCI_SID 0x8086
36#define IOAT_VER_1_2 0x12 /* Version 1.2 */ 36#define IOAT_VER_1_2 0x12 /* Version 1.2 */
37#define IOAT_VER_2_0 0x20 /* Version 2.0 */ 37#define IOAT_VER_2_0 0x20 /* Version 2.0 */
38#define IOAT_VER_3_0 0x30 /* Version 3.0 */
38 39
39struct ioat_dma_descriptor { 40struct ioat_dma_descriptor {
40 uint32_t size; 41 uint32_t size;
diff --git a/drivers/dma/ioatdma_registers.h b/drivers/dma/ioatdma_registers.h
index 9832d7ebd931..827cb503cac6 100644
--- a/drivers/dma/ioatdma_registers.h
+++ b/drivers/dma/ioatdma_registers.h
@@ -25,6 +25,10 @@
25#define IOAT_PCI_DMACTRL_DMA_EN 0x00000001 25#define IOAT_PCI_DMACTRL_DMA_EN 0x00000001
26#define IOAT_PCI_DMACTRL_MSI_EN 0x00000002 26#define IOAT_PCI_DMACTRL_MSI_EN 0x00000002
27 27
28#define IOAT_PCI_DEVICE_ID_OFFSET 0x02
29#define IOAT_PCI_DMAUNCERRSTS_OFFSET 0x148
30#define IOAT_PCI_CHANERRMASK_INT_OFFSET 0x184
31
28/* MMIO Device Registers */ 32/* MMIO Device Registers */
29#define IOAT_CHANCNT_OFFSET 0x00 /* 8-bit */ 33#define IOAT_CHANCNT_OFFSET 0x00 /* 8-bit */
30 34
@@ -149,7 +153,23 @@
149#define IOAT_DCA_GREQID_VALID 0x20000000 153#define IOAT_DCA_GREQID_VALID 0x20000000
150#define IOAT_DCA_GREQID_LASTID 0x80000000 154#define IOAT_DCA_GREQID_LASTID 0x80000000
151 155
156#define IOAT3_CSI_CAPABILITY_OFFSET 0x08
157#define IOAT3_CSI_CAPABILITY_PREFETCH 0x1
158
159#define IOAT3_PCI_CAPABILITY_OFFSET 0x0A
160#define IOAT3_PCI_CAPABILITY_MEMWR 0x1
161
162#define IOAT3_CSI_CONTROL_OFFSET 0x0C
163#define IOAT3_CSI_CONTROL_PREFETCH 0x1
164
165#define IOAT3_PCI_CONTROL_OFFSET 0x0E
166#define IOAT3_PCI_CONTROL_MEMWR 0x1
167
168#define IOAT3_APICID_TAG_MAP_OFFSET 0x10
169#define IOAT3_APICID_TAG_MAP_OFFSET_LOW 0x10
170#define IOAT3_APICID_TAG_MAP_OFFSET_HIGH 0x14
152 171
172#define IOAT3_DCA_GREQID_OFFSET 0x02
153 173
154#define IOAT1_CHAINADDR_OFFSET 0x0C /* 64-bit Descriptor Chain Address Register */ 174#define IOAT1_CHAINADDR_OFFSET 0x0C /* 64-bit Descriptor Chain Address Register */
155#define IOAT2_CHAINADDR_OFFSET 0x10 /* 64-bit Descriptor Chain Address Register */ 175#define IOAT2_CHAINADDR_OFFSET 0x10 /* 64-bit Descriptor Chain Address Register */
diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c
index 0ec0f431e6a1..85bfeba4d85e 100644
--- a/drivers/dma/iop-adma.c
+++ b/drivers/dma/iop-adma.c
@@ -82,17 +82,24 @@ iop_adma_run_tx_complete_actions(struct iop_adma_desc_slot *desc,
82 struct device *dev = 82 struct device *dev =
83 &iop_chan->device->pdev->dev; 83 &iop_chan->device->pdev->dev;
84 u32 len = unmap->unmap_len; 84 u32 len = unmap->unmap_len;
85 u32 src_cnt = unmap->unmap_src_cnt; 85 enum dma_ctrl_flags flags = desc->async_tx.flags;
86 dma_addr_t addr = iop_desc_get_dest_addr(unmap, 86 u32 src_cnt;
87 iop_chan); 87 dma_addr_t addr;
88 88
89 dma_unmap_page(dev, addr, len, DMA_FROM_DEVICE); 89 if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
90 while (src_cnt--) { 90 addr = iop_desc_get_dest_addr(unmap, iop_chan);
91 addr = iop_desc_get_src_addr(unmap, 91 dma_unmap_page(dev, addr, len, DMA_FROM_DEVICE);
92 iop_chan, 92 }
93 src_cnt); 93
94 dma_unmap_page(dev, addr, len, 94 if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
95 DMA_TO_DEVICE); 95 src_cnt = unmap->unmap_src_cnt;
96 while (src_cnt--) {
97 addr = iop_desc_get_src_addr(unmap,
98 iop_chan,
99 src_cnt);
100 dma_unmap_page(dev, addr, len,
101 DMA_TO_DEVICE);
102 }
96 } 103 }
97 desc->group_head = NULL; 104 desc->group_head = NULL;
98 } 105 }
@@ -366,8 +373,8 @@ retry:
366 if (!retry++) 373 if (!retry++)
367 goto retry; 374 goto retry;
368 375
369 /* try to free some slots if the allocation fails */ 376 /* perform direct reclaim if the allocation fails */
370 tasklet_schedule(&iop_chan->irq_tasklet); 377 __iop_adma_slot_cleanup(iop_chan);
371 378
372 return NULL; 379 return NULL;
373} 380}
@@ -443,8 +450,18 @@ iop_adma_tx_submit(struct dma_async_tx_descriptor *tx)
443static void iop_chan_start_null_memcpy(struct iop_adma_chan *iop_chan); 450static void iop_chan_start_null_memcpy(struct iop_adma_chan *iop_chan);
444static void iop_chan_start_null_xor(struct iop_adma_chan *iop_chan); 451static void iop_chan_start_null_xor(struct iop_adma_chan *iop_chan);
445 452
446/* returns the number of allocated descriptors */ 453/**
447static int iop_adma_alloc_chan_resources(struct dma_chan *chan) 454 * iop_adma_alloc_chan_resources - returns the number of allocated descriptors
455 * @chan - allocate descriptor resources for this channel
456 * @client - current client requesting the channel be ready for requests
457 *
458 * Note: We keep the slots for 1 operation on iop_chan->chain at all times. To
459 * avoid deadlock, via async_xor, num_descs_in_pool must at a minimum be
460 * greater than 2x the number slots needed to satisfy a device->max_xor
461 * request.
462 * */
463static int iop_adma_alloc_chan_resources(struct dma_chan *chan,
464 struct dma_client *client)
448{ 465{
449 char *hw_desc; 466 char *hw_desc;
450 int idx; 467 int idx;
@@ -838,7 +855,7 @@ static int __devinit iop_adma_memcpy_self_test(struct iop_adma_device *device)
838 dma_chan = container_of(device->common.channels.next, 855 dma_chan = container_of(device->common.channels.next,
839 struct dma_chan, 856 struct dma_chan,
840 device_node); 857 device_node);
841 if (iop_adma_alloc_chan_resources(dma_chan) < 1) { 858 if (iop_adma_alloc_chan_resources(dma_chan, NULL) < 1) {
842 err = -ENODEV; 859 err = -ENODEV;
843 goto out; 860 goto out;
844 } 861 }
@@ -936,7 +953,7 @@ iop_adma_xor_zero_sum_self_test(struct iop_adma_device *device)
936 dma_chan = container_of(device->common.channels.next, 953 dma_chan = container_of(device->common.channels.next,
937 struct dma_chan, 954 struct dma_chan,
938 device_node); 955 device_node);
939 if (iop_adma_alloc_chan_resources(dma_chan) < 1) { 956 if (iop_adma_alloc_chan_resources(dma_chan, NULL) < 1) {
940 err = -ENODEV; 957 err = -ENODEV;
941 goto out; 958 goto out;
942 } 959 }
@@ -1387,6 +1404,8 @@ static void iop_chan_start_null_xor(struct iop_adma_chan *iop_chan)
1387 spin_unlock_bh(&iop_chan->lock); 1404 spin_unlock_bh(&iop_chan->lock);
1388} 1405}
1389 1406
1407MODULE_ALIAS("platform:iop-adma");
1408
1390static struct platform_driver iop_adma_driver = { 1409static struct platform_driver iop_adma_driver = {
1391 .probe = iop_adma_probe, 1410 .probe = iop_adma_probe,
1392 .remove = iop_adma_remove, 1411 .remove = iop_adma_remove,
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
new file mode 100644
index 000000000000..a4e4494663bf
--- /dev/null
+++ b/drivers/dma/mv_xor.c
@@ -0,0 +1,1375 @@
1/*
2 * offload engine driver for the Marvell XOR engine
3 * Copyright (C) 2007, 2008, Marvell International Ltd.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17 */
18
19#include <linux/init.h>
20#include <linux/module.h>
21#include <linux/async_tx.h>
22#include <linux/delay.h>
23#include <linux/dma-mapping.h>
24#include <linux/spinlock.h>
25#include <linux/interrupt.h>
26#include <linux/platform_device.h>
27#include <linux/memory.h>
28#include <asm/plat-orion/mv_xor.h>
29#include "mv_xor.h"
30
31static void mv_xor_issue_pending(struct dma_chan *chan);
32
33#define to_mv_xor_chan(chan) \
34 container_of(chan, struct mv_xor_chan, common)
35
36#define to_mv_xor_device(dev) \
37 container_of(dev, struct mv_xor_device, common)
38
39#define to_mv_xor_slot(tx) \
40 container_of(tx, struct mv_xor_desc_slot, async_tx)
41
42static void mv_desc_init(struct mv_xor_desc_slot *desc, unsigned long flags)
43{
44 struct mv_xor_desc *hw_desc = desc->hw_desc;
45
46 hw_desc->status = (1 << 31);
47 hw_desc->phy_next_desc = 0;
48 hw_desc->desc_command = (1 << 31);
49}
50
51static u32 mv_desc_get_dest_addr(struct mv_xor_desc_slot *desc)
52{
53 struct mv_xor_desc *hw_desc = desc->hw_desc;
54 return hw_desc->phy_dest_addr;
55}
56
57static u32 mv_desc_get_src_addr(struct mv_xor_desc_slot *desc,
58 int src_idx)
59{
60 struct mv_xor_desc *hw_desc = desc->hw_desc;
61 return hw_desc->phy_src_addr[src_idx];
62}
63
64
65static void mv_desc_set_byte_count(struct mv_xor_desc_slot *desc,
66 u32 byte_count)
67{
68 struct mv_xor_desc *hw_desc = desc->hw_desc;
69 hw_desc->byte_count = byte_count;
70}
71
72static void mv_desc_set_next_desc(struct mv_xor_desc_slot *desc,
73 u32 next_desc_addr)
74{
75 struct mv_xor_desc *hw_desc = desc->hw_desc;
76 BUG_ON(hw_desc->phy_next_desc);
77 hw_desc->phy_next_desc = next_desc_addr;
78}
79
80static void mv_desc_clear_next_desc(struct mv_xor_desc_slot *desc)
81{
82 struct mv_xor_desc *hw_desc = desc->hw_desc;
83 hw_desc->phy_next_desc = 0;
84}
85
86static void mv_desc_set_block_fill_val(struct mv_xor_desc_slot *desc, u32 val)
87{
88 desc->value = val;
89}
90
91static void mv_desc_set_dest_addr(struct mv_xor_desc_slot *desc,
92 dma_addr_t addr)
93{
94 struct mv_xor_desc *hw_desc = desc->hw_desc;
95 hw_desc->phy_dest_addr = addr;
96}
97
98static int mv_chan_memset_slot_count(size_t len)
99{
100 return 1;
101}
102
103#define mv_chan_memcpy_slot_count(c) mv_chan_memset_slot_count(c)
104
105static void mv_desc_set_src_addr(struct mv_xor_desc_slot *desc,
106 int index, dma_addr_t addr)
107{
108 struct mv_xor_desc *hw_desc = desc->hw_desc;
109 hw_desc->phy_src_addr[index] = addr;
110 if (desc->type == DMA_XOR)
111 hw_desc->desc_command |= (1 << index);
112}
113
114static u32 mv_chan_get_current_desc(struct mv_xor_chan *chan)
115{
116 return __raw_readl(XOR_CURR_DESC(chan));
117}
118
119static void mv_chan_set_next_descriptor(struct mv_xor_chan *chan,
120 u32 next_desc_addr)
121{
122 __raw_writel(next_desc_addr, XOR_NEXT_DESC(chan));
123}
124
125static void mv_chan_set_dest_pointer(struct mv_xor_chan *chan, u32 desc_addr)
126{
127 __raw_writel(desc_addr, XOR_DEST_POINTER(chan));
128}
129
130static void mv_chan_set_block_size(struct mv_xor_chan *chan, u32 block_size)
131{
132 __raw_writel(block_size, XOR_BLOCK_SIZE(chan));
133}
134
135static void mv_chan_set_value(struct mv_xor_chan *chan, u32 value)
136{
137 __raw_writel(value, XOR_INIT_VALUE_LOW(chan));
138 __raw_writel(value, XOR_INIT_VALUE_HIGH(chan));
139}
140
141static void mv_chan_unmask_interrupts(struct mv_xor_chan *chan)
142{
143 u32 val = __raw_readl(XOR_INTR_MASK(chan));
144 val |= XOR_INTR_MASK_VALUE << (chan->idx * 16);
145 __raw_writel(val, XOR_INTR_MASK(chan));
146}
147
148static u32 mv_chan_get_intr_cause(struct mv_xor_chan *chan)
149{
150 u32 intr_cause = __raw_readl(XOR_INTR_CAUSE(chan));
151 intr_cause = (intr_cause >> (chan->idx * 16)) & 0xFFFF;
152 return intr_cause;
153}
154
155static int mv_is_err_intr(u32 intr_cause)
156{
157 if (intr_cause & ((1<<4)|(1<<5)|(1<<6)|(1<<7)|(1<<8)|(1<<9)))
158 return 1;
159
160 return 0;
161}
162
163static void mv_xor_device_clear_eoc_cause(struct mv_xor_chan *chan)
164{
165 u32 val = (1 << (1 + (chan->idx * 16)));
166 dev_dbg(chan->device->common.dev, "%s, val 0x%08x\n", __func__, val);
167 __raw_writel(val, XOR_INTR_CAUSE(chan));
168}
169
170static void mv_xor_device_clear_err_status(struct mv_xor_chan *chan)
171{
172 u32 val = 0xFFFF0000 >> (chan->idx * 16);
173 __raw_writel(val, XOR_INTR_CAUSE(chan));
174}
175
176static int mv_can_chain(struct mv_xor_desc_slot *desc)
177{
178 struct mv_xor_desc_slot *chain_old_tail = list_entry(
179 desc->chain_node.prev, struct mv_xor_desc_slot, chain_node);
180
181 if (chain_old_tail->type != desc->type)
182 return 0;
183 if (desc->type == DMA_MEMSET)
184 return 0;
185
186 return 1;
187}
188
189static void mv_set_mode(struct mv_xor_chan *chan,
190 enum dma_transaction_type type)
191{
192 u32 op_mode;
193 u32 config = __raw_readl(XOR_CONFIG(chan));
194
195 switch (type) {
196 case DMA_XOR:
197 op_mode = XOR_OPERATION_MODE_XOR;
198 break;
199 case DMA_MEMCPY:
200 op_mode = XOR_OPERATION_MODE_MEMCPY;
201 break;
202 case DMA_MEMSET:
203 op_mode = XOR_OPERATION_MODE_MEMSET;
204 break;
205 default:
206 dev_printk(KERN_ERR, chan->device->common.dev,
207 "error: unsupported operation %d.\n",
208 type);
209 BUG();
210 return;
211 }
212
213 config &= ~0x7;
214 config |= op_mode;
215 __raw_writel(config, XOR_CONFIG(chan));
216 chan->current_type = type;
217}
218
219static void mv_chan_activate(struct mv_xor_chan *chan)
220{
221 u32 activation;
222
223 dev_dbg(chan->device->common.dev, " activate chan.\n");
224 activation = __raw_readl(XOR_ACTIVATION(chan));
225 activation |= 0x1;
226 __raw_writel(activation, XOR_ACTIVATION(chan));
227}
228
229static char mv_chan_is_busy(struct mv_xor_chan *chan)
230{
231 u32 state = __raw_readl(XOR_ACTIVATION(chan));
232
233 state = (state >> 4) & 0x3;
234
235 return (state == 1) ? 1 : 0;
236}
237
238static int mv_chan_xor_slot_count(size_t len, int src_cnt)
239{
240 return 1;
241}
242
243/**
244 * mv_xor_free_slots - flags descriptor slots for reuse
245 * @slot: Slot to free
246 * Caller must hold &mv_chan->lock while calling this function
247 */
248static void mv_xor_free_slots(struct mv_xor_chan *mv_chan,
249 struct mv_xor_desc_slot *slot)
250{
251 dev_dbg(mv_chan->device->common.dev, "%s %d slot %p\n",
252 __func__, __LINE__, slot);
253
254 slot->slots_per_op = 0;
255
256}
257
258/*
259 * mv_xor_start_new_chain - program the engine to operate on new chain headed by
260 * sw_desc
261 * Caller must hold &mv_chan->lock while calling this function
262 */
263static void mv_xor_start_new_chain(struct mv_xor_chan *mv_chan,
264 struct mv_xor_desc_slot *sw_desc)
265{
266 dev_dbg(mv_chan->device->common.dev, "%s %d: sw_desc %p\n",
267 __func__, __LINE__, sw_desc);
268 if (sw_desc->type != mv_chan->current_type)
269 mv_set_mode(mv_chan, sw_desc->type);
270
271 if (sw_desc->type == DMA_MEMSET) {
272 /* for memset requests we need to program the engine, no
273 * descriptors used.
274 */
275 struct mv_xor_desc *hw_desc = sw_desc->hw_desc;
276 mv_chan_set_dest_pointer(mv_chan, hw_desc->phy_dest_addr);
277 mv_chan_set_block_size(mv_chan, sw_desc->unmap_len);
278 mv_chan_set_value(mv_chan, sw_desc->value);
279 } else {
280 /* set the hardware chain */
281 mv_chan_set_next_descriptor(mv_chan, sw_desc->async_tx.phys);
282 }
283 mv_chan->pending += sw_desc->slot_cnt;
284 mv_xor_issue_pending(&mv_chan->common);
285}
286
287static dma_cookie_t
288mv_xor_run_tx_complete_actions(struct mv_xor_desc_slot *desc,
289 struct mv_xor_chan *mv_chan, dma_cookie_t cookie)
290{
291 BUG_ON(desc->async_tx.cookie < 0);
292
293 if (desc->async_tx.cookie > 0) {
294 cookie = desc->async_tx.cookie;
295
296 /* call the callback (must not sleep or submit new
297 * operations to this channel)
298 */
299 if (desc->async_tx.callback)
300 desc->async_tx.callback(
301 desc->async_tx.callback_param);
302
303 /* unmap dma addresses
304 * (unmap_single vs unmap_page?)
305 */
306 if (desc->group_head && desc->unmap_len) {
307 struct mv_xor_desc_slot *unmap = desc->group_head;
308 struct device *dev =
309 &mv_chan->device->pdev->dev;
310 u32 len = unmap->unmap_len;
311 enum dma_ctrl_flags flags = desc->async_tx.flags;
312 u32 src_cnt;
313 dma_addr_t addr;
314
315 if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
316 addr = mv_desc_get_dest_addr(unmap);
317 dma_unmap_page(dev, addr, len, DMA_FROM_DEVICE);
318 }
319
320 if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
321 src_cnt = unmap->unmap_src_cnt;
322 while (src_cnt--) {
323 addr = mv_desc_get_src_addr(unmap,
324 src_cnt);
325 dma_unmap_page(dev, addr, len,
326 DMA_TO_DEVICE);
327 }
328 }
329 desc->group_head = NULL;
330 }
331 }
332
333 /* run dependent operations */
334 async_tx_run_dependencies(&desc->async_tx);
335
336 return cookie;
337}
338
339static int
340mv_xor_clean_completed_slots(struct mv_xor_chan *mv_chan)
341{
342 struct mv_xor_desc_slot *iter, *_iter;
343
344 dev_dbg(mv_chan->device->common.dev, "%s %d\n", __func__, __LINE__);
345 list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots,
346 completed_node) {
347
348 if (async_tx_test_ack(&iter->async_tx)) {
349 list_del(&iter->completed_node);
350 mv_xor_free_slots(mv_chan, iter);
351 }
352 }
353 return 0;
354}
355
356static int
357mv_xor_clean_slot(struct mv_xor_desc_slot *desc,
358 struct mv_xor_chan *mv_chan)
359{
360 dev_dbg(mv_chan->device->common.dev, "%s %d: desc %p flags %d\n",
361 __func__, __LINE__, desc, desc->async_tx.flags);
362 list_del(&desc->chain_node);
363 /* the client is allowed to attach dependent operations
364 * until 'ack' is set
365 */
366 if (!async_tx_test_ack(&desc->async_tx)) {
367 /* move this slot to the completed_slots */
368 list_add_tail(&desc->completed_node, &mv_chan->completed_slots);
369 return 0;
370 }
371
372 mv_xor_free_slots(mv_chan, desc);
373 return 0;
374}
375
376static void __mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan)
377{
378 struct mv_xor_desc_slot *iter, *_iter;
379 dma_cookie_t cookie = 0;
380 int busy = mv_chan_is_busy(mv_chan);
381 u32 current_desc = mv_chan_get_current_desc(mv_chan);
382 int seen_current = 0;
383
384 dev_dbg(mv_chan->device->common.dev, "%s %d\n", __func__, __LINE__);
385 dev_dbg(mv_chan->device->common.dev, "current_desc %x\n", current_desc);
386 mv_xor_clean_completed_slots(mv_chan);
387
388 /* free completed slots from the chain starting with
389 * the oldest descriptor
390 */
391
392 list_for_each_entry_safe(iter, _iter, &mv_chan->chain,
393 chain_node) {
394 prefetch(_iter);
395 prefetch(&_iter->async_tx);
396
397 /* do not advance past the current descriptor loaded into the
398 * hardware channel, subsequent descriptors are either in
399 * process or have not been submitted
400 */
401 if (seen_current)
402 break;
403
404 /* stop the search if we reach the current descriptor and the
405 * channel is busy
406 */
407 if (iter->async_tx.phys == current_desc) {
408 seen_current = 1;
409 if (busy)
410 break;
411 }
412
413 cookie = mv_xor_run_tx_complete_actions(iter, mv_chan, cookie);
414
415 if (mv_xor_clean_slot(iter, mv_chan))
416 break;
417 }
418
419 if ((busy == 0) && !list_empty(&mv_chan->chain)) {
420 struct mv_xor_desc_slot *chain_head;
421 chain_head = list_entry(mv_chan->chain.next,
422 struct mv_xor_desc_slot,
423 chain_node);
424
425 mv_xor_start_new_chain(mv_chan, chain_head);
426 }
427
428 if (cookie > 0)
429 mv_chan->completed_cookie = cookie;
430}
431
432static void
433mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan)
434{
435 spin_lock_bh(&mv_chan->lock);
436 __mv_xor_slot_cleanup(mv_chan);
437 spin_unlock_bh(&mv_chan->lock);
438}
439
440static void mv_xor_tasklet(unsigned long data)
441{
442 struct mv_xor_chan *chan = (struct mv_xor_chan *) data;
443 __mv_xor_slot_cleanup(chan);
444}
445
446static struct mv_xor_desc_slot *
447mv_xor_alloc_slots(struct mv_xor_chan *mv_chan, int num_slots,
448 int slots_per_op)
449{
450 struct mv_xor_desc_slot *iter, *_iter, *alloc_start = NULL;
451 LIST_HEAD(chain);
452 int slots_found, retry = 0;
453
454 /* start search from the last allocated descrtiptor
455 * if a contiguous allocation can not be found start searching
456 * from the beginning of the list
457 */
458retry:
459 slots_found = 0;
460 if (retry == 0)
461 iter = mv_chan->last_used;
462 else
463 iter = list_entry(&mv_chan->all_slots,
464 struct mv_xor_desc_slot,
465 slot_node);
466
467 list_for_each_entry_safe_continue(
468 iter, _iter, &mv_chan->all_slots, slot_node) {
469 prefetch(_iter);
470 prefetch(&_iter->async_tx);
471 if (iter->slots_per_op) {
472 /* give up after finding the first busy slot
473 * on the second pass through the list
474 */
475 if (retry)
476 break;
477
478 slots_found = 0;
479 continue;
480 }
481
482 /* start the allocation if the slot is correctly aligned */
483 if (!slots_found++)
484 alloc_start = iter;
485
486 if (slots_found == num_slots) {
487 struct mv_xor_desc_slot *alloc_tail = NULL;
488 struct mv_xor_desc_slot *last_used = NULL;
489 iter = alloc_start;
490 while (num_slots) {
491 int i;
492
493 /* pre-ack all but the last descriptor */
494 async_tx_ack(&iter->async_tx);
495
496 list_add_tail(&iter->chain_node, &chain);
497 alloc_tail = iter;
498 iter->async_tx.cookie = 0;
499 iter->slot_cnt = num_slots;
500 iter->xor_check_result = NULL;
501 for (i = 0; i < slots_per_op; i++) {
502 iter->slots_per_op = slots_per_op - i;
503 last_used = iter;
504 iter = list_entry(iter->slot_node.next,
505 struct mv_xor_desc_slot,
506 slot_node);
507 }
508 num_slots -= slots_per_op;
509 }
510 alloc_tail->group_head = alloc_start;
511 alloc_tail->async_tx.cookie = -EBUSY;
512 list_splice(&chain, &alloc_tail->async_tx.tx_list);
513 mv_chan->last_used = last_used;
514 mv_desc_clear_next_desc(alloc_start);
515 mv_desc_clear_next_desc(alloc_tail);
516 return alloc_tail;
517 }
518 }
519 if (!retry++)
520 goto retry;
521
522 /* try to free some slots if the allocation fails */
523 tasklet_schedule(&mv_chan->irq_tasklet);
524
525 return NULL;
526}
527
528static dma_cookie_t
529mv_desc_assign_cookie(struct mv_xor_chan *mv_chan,
530 struct mv_xor_desc_slot *desc)
531{
532 dma_cookie_t cookie = mv_chan->common.cookie;
533
534 if (++cookie < 0)
535 cookie = 1;
536 mv_chan->common.cookie = desc->async_tx.cookie = cookie;
537 return cookie;
538}
539
540/************************ DMA engine API functions ****************************/
541static dma_cookie_t
542mv_xor_tx_submit(struct dma_async_tx_descriptor *tx)
543{
544 struct mv_xor_desc_slot *sw_desc = to_mv_xor_slot(tx);
545 struct mv_xor_chan *mv_chan = to_mv_xor_chan(tx->chan);
546 struct mv_xor_desc_slot *grp_start, *old_chain_tail;
547 dma_cookie_t cookie;
548 int new_hw_chain = 1;
549
550 dev_dbg(mv_chan->device->common.dev,
551 "%s sw_desc %p: async_tx %p\n",
552 __func__, sw_desc, &sw_desc->async_tx);
553
554 grp_start = sw_desc->group_head;
555
556 spin_lock_bh(&mv_chan->lock);
557 cookie = mv_desc_assign_cookie(mv_chan, sw_desc);
558
559 if (list_empty(&mv_chan->chain))
560 list_splice_init(&sw_desc->async_tx.tx_list, &mv_chan->chain);
561 else {
562 new_hw_chain = 0;
563
564 old_chain_tail = list_entry(mv_chan->chain.prev,
565 struct mv_xor_desc_slot,
566 chain_node);
567 list_splice_init(&grp_start->async_tx.tx_list,
568 &old_chain_tail->chain_node);
569
570 if (!mv_can_chain(grp_start))
571 goto submit_done;
572
573 dev_dbg(mv_chan->device->common.dev, "Append to last desc %x\n",
574 old_chain_tail->async_tx.phys);
575
576 /* fix up the hardware chain */
577 mv_desc_set_next_desc(old_chain_tail, grp_start->async_tx.phys);
578
579 /* if the channel is not busy */
580 if (!mv_chan_is_busy(mv_chan)) {
581 u32 current_desc = mv_chan_get_current_desc(mv_chan);
582 /*
583 * and the curren desc is the end of the chain before
584 * the append, then we need to start the channel
585 */
586 if (current_desc == old_chain_tail->async_tx.phys)
587 new_hw_chain = 1;
588 }
589 }
590
591 if (new_hw_chain)
592 mv_xor_start_new_chain(mv_chan, grp_start);
593
594submit_done:
595 spin_unlock_bh(&mv_chan->lock);
596
597 return cookie;
598}
599
600/* returns the number of allocated descriptors */
601static int mv_xor_alloc_chan_resources(struct dma_chan *chan,
602 struct dma_client *client)
603{
604 char *hw_desc;
605 int idx;
606 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
607 struct mv_xor_desc_slot *slot = NULL;
608 struct mv_xor_platform_data *plat_data =
609 mv_chan->device->pdev->dev.platform_data;
610 int num_descs_in_pool = plat_data->pool_size/MV_XOR_SLOT_SIZE;
611
612 /* Allocate descriptor slots */
613 idx = mv_chan->slots_allocated;
614 while (idx < num_descs_in_pool) {
615 slot = kzalloc(sizeof(*slot), GFP_KERNEL);
616 if (!slot) {
617 printk(KERN_INFO "MV XOR Channel only initialized"
618 " %d descriptor slots", idx);
619 break;
620 }
621 hw_desc = (char *) mv_chan->device->dma_desc_pool_virt;
622 slot->hw_desc = (void *) &hw_desc[idx * MV_XOR_SLOT_SIZE];
623
624 dma_async_tx_descriptor_init(&slot->async_tx, chan);
625 slot->async_tx.tx_submit = mv_xor_tx_submit;
626 INIT_LIST_HEAD(&slot->chain_node);
627 INIT_LIST_HEAD(&slot->slot_node);
628 INIT_LIST_HEAD(&slot->async_tx.tx_list);
629 hw_desc = (char *) mv_chan->device->dma_desc_pool;
630 slot->async_tx.phys =
631 (dma_addr_t) &hw_desc[idx * MV_XOR_SLOT_SIZE];
632 slot->idx = idx++;
633
634 spin_lock_bh(&mv_chan->lock);
635 mv_chan->slots_allocated = idx;
636 list_add_tail(&slot->slot_node, &mv_chan->all_slots);
637 spin_unlock_bh(&mv_chan->lock);
638 }
639
640 if (mv_chan->slots_allocated && !mv_chan->last_used)
641 mv_chan->last_used = list_entry(mv_chan->all_slots.next,
642 struct mv_xor_desc_slot,
643 slot_node);
644
645 dev_dbg(mv_chan->device->common.dev,
646 "allocated %d descriptor slots last_used: %p\n",
647 mv_chan->slots_allocated, mv_chan->last_used);
648
649 return mv_chan->slots_allocated ? : -ENOMEM;
650}
651
652static struct dma_async_tx_descriptor *
653mv_xor_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
654 size_t len, unsigned long flags)
655{
656 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
657 struct mv_xor_desc_slot *sw_desc, *grp_start;
658 int slot_cnt;
659
660 dev_dbg(mv_chan->device->common.dev,
661 "%s dest: %x src %x len: %u flags: %ld\n",
662 __func__, dest, src, len, flags);
663 if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
664 return NULL;
665
666 BUG_ON(unlikely(len > MV_XOR_MAX_BYTE_COUNT));
667
668 spin_lock_bh(&mv_chan->lock);
669 slot_cnt = mv_chan_memcpy_slot_count(len);
670 sw_desc = mv_xor_alloc_slots(mv_chan, slot_cnt, 1);
671 if (sw_desc) {
672 sw_desc->type = DMA_MEMCPY;
673 sw_desc->async_tx.flags = flags;
674 grp_start = sw_desc->group_head;
675 mv_desc_init(grp_start, flags);
676 mv_desc_set_byte_count(grp_start, len);
677 mv_desc_set_dest_addr(sw_desc->group_head, dest);
678 mv_desc_set_src_addr(grp_start, 0, src);
679 sw_desc->unmap_src_cnt = 1;
680 sw_desc->unmap_len = len;
681 }
682 spin_unlock_bh(&mv_chan->lock);
683
684 dev_dbg(mv_chan->device->common.dev,
685 "%s sw_desc %p async_tx %p\n",
686 __func__, sw_desc, sw_desc ? &sw_desc->async_tx : 0);
687
688 return sw_desc ? &sw_desc->async_tx : NULL;
689}
690
691static struct dma_async_tx_descriptor *
692mv_xor_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value,
693 size_t len, unsigned long flags)
694{
695 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
696 struct mv_xor_desc_slot *sw_desc, *grp_start;
697 int slot_cnt;
698
699 dev_dbg(mv_chan->device->common.dev,
700 "%s dest: %x len: %u flags: %ld\n",
701 __func__, dest, len, flags);
702 if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
703 return NULL;
704
705 BUG_ON(unlikely(len > MV_XOR_MAX_BYTE_COUNT));
706
707 spin_lock_bh(&mv_chan->lock);
708 slot_cnt = mv_chan_memset_slot_count(len);
709 sw_desc = mv_xor_alloc_slots(mv_chan, slot_cnt, 1);
710 if (sw_desc) {
711 sw_desc->type = DMA_MEMSET;
712 sw_desc->async_tx.flags = flags;
713 grp_start = sw_desc->group_head;
714 mv_desc_init(grp_start, flags);
715 mv_desc_set_byte_count(grp_start, len);
716 mv_desc_set_dest_addr(sw_desc->group_head, dest);
717 mv_desc_set_block_fill_val(grp_start, value);
718 sw_desc->unmap_src_cnt = 1;
719 sw_desc->unmap_len = len;
720 }
721 spin_unlock_bh(&mv_chan->lock);
722 dev_dbg(mv_chan->device->common.dev,
723 "%s sw_desc %p async_tx %p \n",
724 __func__, sw_desc, &sw_desc->async_tx);
725 return sw_desc ? &sw_desc->async_tx : NULL;
726}
727
728static struct dma_async_tx_descriptor *
729mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
730 unsigned int src_cnt, size_t len, unsigned long flags)
731{
732 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
733 struct mv_xor_desc_slot *sw_desc, *grp_start;
734 int slot_cnt;
735
736 if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
737 return NULL;
738
739 BUG_ON(unlikely(len > MV_XOR_MAX_BYTE_COUNT));
740
741 dev_dbg(mv_chan->device->common.dev,
742 "%s src_cnt: %d len: dest %x %u flags: %ld\n",
743 __func__, src_cnt, len, dest, flags);
744
745 spin_lock_bh(&mv_chan->lock);
746 slot_cnt = mv_chan_xor_slot_count(len, src_cnt);
747 sw_desc = mv_xor_alloc_slots(mv_chan, slot_cnt, 1);
748 if (sw_desc) {
749 sw_desc->type = DMA_XOR;
750 sw_desc->async_tx.flags = flags;
751 grp_start = sw_desc->group_head;
752 mv_desc_init(grp_start, flags);
753 /* the byte count field is the same as in memcpy desc*/
754 mv_desc_set_byte_count(grp_start, len);
755 mv_desc_set_dest_addr(sw_desc->group_head, dest);
756 sw_desc->unmap_src_cnt = src_cnt;
757 sw_desc->unmap_len = len;
758 while (src_cnt--)
759 mv_desc_set_src_addr(grp_start, src_cnt, src[src_cnt]);
760 }
761 spin_unlock_bh(&mv_chan->lock);
762 dev_dbg(mv_chan->device->common.dev,
763 "%s sw_desc %p async_tx %p \n",
764 __func__, sw_desc, &sw_desc->async_tx);
765 return sw_desc ? &sw_desc->async_tx : NULL;
766}
767
768static void mv_xor_free_chan_resources(struct dma_chan *chan)
769{
770 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
771 struct mv_xor_desc_slot *iter, *_iter;
772 int in_use_descs = 0;
773
774 mv_xor_slot_cleanup(mv_chan);
775
776 spin_lock_bh(&mv_chan->lock);
777 list_for_each_entry_safe(iter, _iter, &mv_chan->chain,
778 chain_node) {
779 in_use_descs++;
780 list_del(&iter->chain_node);
781 }
782 list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots,
783 completed_node) {
784 in_use_descs++;
785 list_del(&iter->completed_node);
786 }
787 list_for_each_entry_safe_reverse(
788 iter, _iter, &mv_chan->all_slots, slot_node) {
789 list_del(&iter->slot_node);
790 kfree(iter);
791 mv_chan->slots_allocated--;
792 }
793 mv_chan->last_used = NULL;
794
795 dev_dbg(mv_chan->device->common.dev, "%s slots_allocated %d\n",
796 __func__, mv_chan->slots_allocated);
797 spin_unlock_bh(&mv_chan->lock);
798
799 if (in_use_descs)
800 dev_err(mv_chan->device->common.dev,
801 "freeing %d in use descriptors!\n", in_use_descs);
802}
803
804/**
805 * mv_xor_is_complete - poll the status of an XOR transaction
806 * @chan: XOR channel handle
807 * @cookie: XOR transaction identifier
808 */
809static enum dma_status mv_xor_is_complete(struct dma_chan *chan,
810 dma_cookie_t cookie,
811 dma_cookie_t *done,
812 dma_cookie_t *used)
813{
814 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
815 dma_cookie_t last_used;
816 dma_cookie_t last_complete;
817 enum dma_status ret;
818
819 last_used = chan->cookie;
820 last_complete = mv_chan->completed_cookie;
821 mv_chan->is_complete_cookie = cookie;
822 if (done)
823 *done = last_complete;
824 if (used)
825 *used = last_used;
826
827 ret = dma_async_is_complete(cookie, last_complete, last_used);
828 if (ret == DMA_SUCCESS) {
829 mv_xor_clean_completed_slots(mv_chan);
830 return ret;
831 }
832 mv_xor_slot_cleanup(mv_chan);
833
834 last_used = chan->cookie;
835 last_complete = mv_chan->completed_cookie;
836
837 if (done)
838 *done = last_complete;
839 if (used)
840 *used = last_used;
841
842 return dma_async_is_complete(cookie, last_complete, last_used);
843}
844
845static void mv_dump_xor_regs(struct mv_xor_chan *chan)
846{
847 u32 val;
848
849 val = __raw_readl(XOR_CONFIG(chan));
850 dev_printk(KERN_ERR, chan->device->common.dev,
851 "config 0x%08x.\n", val);
852
853 val = __raw_readl(XOR_ACTIVATION(chan));
854 dev_printk(KERN_ERR, chan->device->common.dev,
855 "activation 0x%08x.\n", val);
856
857 val = __raw_readl(XOR_INTR_CAUSE(chan));
858 dev_printk(KERN_ERR, chan->device->common.dev,
859 "intr cause 0x%08x.\n", val);
860
861 val = __raw_readl(XOR_INTR_MASK(chan));
862 dev_printk(KERN_ERR, chan->device->common.dev,
863 "intr mask 0x%08x.\n", val);
864
865 val = __raw_readl(XOR_ERROR_CAUSE(chan));
866 dev_printk(KERN_ERR, chan->device->common.dev,
867 "error cause 0x%08x.\n", val);
868
869 val = __raw_readl(XOR_ERROR_ADDR(chan));
870 dev_printk(KERN_ERR, chan->device->common.dev,
871 "error addr 0x%08x.\n", val);
872}
873
874static void mv_xor_err_interrupt_handler(struct mv_xor_chan *chan,
875 u32 intr_cause)
876{
877 if (intr_cause & (1 << 4)) {
878 dev_dbg(chan->device->common.dev,
879 "ignore this error\n");
880 return;
881 }
882
883 dev_printk(KERN_ERR, chan->device->common.dev,
884 "error on chan %d. intr cause 0x%08x.\n",
885 chan->idx, intr_cause);
886
887 mv_dump_xor_regs(chan);
888 BUG();
889}
890
891static irqreturn_t mv_xor_interrupt_handler(int irq, void *data)
892{
893 struct mv_xor_chan *chan = data;
894 u32 intr_cause = mv_chan_get_intr_cause(chan);
895
896 dev_dbg(chan->device->common.dev, "intr cause %x\n", intr_cause);
897
898 if (mv_is_err_intr(intr_cause))
899 mv_xor_err_interrupt_handler(chan, intr_cause);
900
901 tasklet_schedule(&chan->irq_tasklet);
902
903 mv_xor_device_clear_eoc_cause(chan);
904
905 return IRQ_HANDLED;
906}
907
908static void mv_xor_issue_pending(struct dma_chan *chan)
909{
910 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
911
912 if (mv_chan->pending >= MV_XOR_THRESHOLD) {
913 mv_chan->pending = 0;
914 mv_chan_activate(mv_chan);
915 }
916}
917
918/*
919 * Perform a transaction to verify the HW works.
920 */
921#define MV_XOR_TEST_SIZE 2000
922
923static int __devinit mv_xor_memcpy_self_test(struct mv_xor_device *device)
924{
925 int i;
926 void *src, *dest;
927 dma_addr_t src_dma, dest_dma;
928 struct dma_chan *dma_chan;
929 dma_cookie_t cookie;
930 struct dma_async_tx_descriptor *tx;
931 int err = 0;
932 struct mv_xor_chan *mv_chan;
933
934 src = kmalloc(sizeof(u8) * MV_XOR_TEST_SIZE, GFP_KERNEL);
935 if (!src)
936 return -ENOMEM;
937
938 dest = kzalloc(sizeof(u8) * MV_XOR_TEST_SIZE, GFP_KERNEL);
939 if (!dest) {
940 kfree(src);
941 return -ENOMEM;
942 }
943
944 /* Fill in src buffer */
945 for (i = 0; i < MV_XOR_TEST_SIZE; i++)
946 ((u8 *) src)[i] = (u8)i;
947
948 /* Start copy, using first DMA channel */
949 dma_chan = container_of(device->common.channels.next,
950 struct dma_chan,
951 device_node);
952 if (mv_xor_alloc_chan_resources(dma_chan, NULL) < 1) {
953 err = -ENODEV;
954 goto out;
955 }
956
957 dest_dma = dma_map_single(dma_chan->device->dev, dest,
958 MV_XOR_TEST_SIZE, DMA_FROM_DEVICE);
959
960 src_dma = dma_map_single(dma_chan->device->dev, src,
961 MV_XOR_TEST_SIZE, DMA_TO_DEVICE);
962
963 tx = mv_xor_prep_dma_memcpy(dma_chan, dest_dma, src_dma,
964 MV_XOR_TEST_SIZE, 0);
965 cookie = mv_xor_tx_submit(tx);
966 mv_xor_issue_pending(dma_chan);
967 async_tx_ack(tx);
968 msleep(1);
969
970 if (mv_xor_is_complete(dma_chan, cookie, NULL, NULL) !=
971 DMA_SUCCESS) {
972 dev_printk(KERN_ERR, dma_chan->device->dev,
973 "Self-test copy timed out, disabling\n");
974 err = -ENODEV;
975 goto free_resources;
976 }
977
978 mv_chan = to_mv_xor_chan(dma_chan);
979 dma_sync_single_for_cpu(&mv_chan->device->pdev->dev, dest_dma,
980 MV_XOR_TEST_SIZE, DMA_FROM_DEVICE);
981 if (memcmp(src, dest, MV_XOR_TEST_SIZE)) {
982 dev_printk(KERN_ERR, dma_chan->device->dev,
983 "Self-test copy failed compare, disabling\n");
984 err = -ENODEV;
985 goto free_resources;
986 }
987
988free_resources:
989 mv_xor_free_chan_resources(dma_chan);
990out:
991 kfree(src);
992 kfree(dest);
993 return err;
994}
995
996#define MV_XOR_NUM_SRC_TEST 4 /* must be <= 15 */
997static int __devinit
998mv_xor_xor_self_test(struct mv_xor_device *device)
999{
1000 int i, src_idx;
1001 struct page *dest;
1002 struct page *xor_srcs[MV_XOR_NUM_SRC_TEST];
1003 dma_addr_t dma_srcs[MV_XOR_NUM_SRC_TEST];
1004 dma_addr_t dest_dma;
1005 struct dma_async_tx_descriptor *tx;
1006 struct dma_chan *dma_chan;
1007 dma_cookie_t cookie;
1008 u8 cmp_byte = 0;
1009 u32 cmp_word;
1010 int err = 0;
1011 struct mv_xor_chan *mv_chan;
1012
1013 for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) {
1014 xor_srcs[src_idx] = alloc_page(GFP_KERNEL);
1015 if (!xor_srcs[src_idx])
1016 while (src_idx--) {
1017 __free_page(xor_srcs[src_idx]);
1018 return -ENOMEM;
1019 }
1020 }
1021
1022 dest = alloc_page(GFP_KERNEL);
1023 if (!dest)
1024 while (src_idx--) {
1025 __free_page(xor_srcs[src_idx]);
1026 return -ENOMEM;
1027 }
1028
1029 /* Fill in src buffers */
1030 for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) {
1031 u8 *ptr = page_address(xor_srcs[src_idx]);
1032 for (i = 0; i < PAGE_SIZE; i++)
1033 ptr[i] = (1 << src_idx);
1034 }
1035
1036 for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++)
1037 cmp_byte ^= (u8) (1 << src_idx);
1038
1039 cmp_word = (cmp_byte << 24) | (cmp_byte << 16) |
1040 (cmp_byte << 8) | cmp_byte;
1041
1042 memset(page_address(dest), 0, PAGE_SIZE);
1043
1044 dma_chan = container_of(device->common.channels.next,
1045 struct dma_chan,
1046 device_node);
1047 if (mv_xor_alloc_chan_resources(dma_chan, NULL) < 1) {
1048 err = -ENODEV;
1049 goto out;
1050 }
1051
1052 /* test xor */
1053 dest_dma = dma_map_page(dma_chan->device->dev, dest, 0, PAGE_SIZE,
1054 DMA_FROM_DEVICE);
1055
1056 for (i = 0; i < MV_XOR_NUM_SRC_TEST; i++)
1057 dma_srcs[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i],
1058 0, PAGE_SIZE, DMA_TO_DEVICE);
1059
1060 tx = mv_xor_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
1061 MV_XOR_NUM_SRC_TEST, PAGE_SIZE, 0);
1062
1063 cookie = mv_xor_tx_submit(tx);
1064 mv_xor_issue_pending(dma_chan);
1065 async_tx_ack(tx);
1066 msleep(8);
1067
1068 if (mv_xor_is_complete(dma_chan, cookie, NULL, NULL) !=
1069 DMA_SUCCESS) {
1070 dev_printk(KERN_ERR, dma_chan->device->dev,
1071 "Self-test xor timed out, disabling\n");
1072 err = -ENODEV;
1073 goto free_resources;
1074 }
1075
1076 mv_chan = to_mv_xor_chan(dma_chan);
1077 dma_sync_single_for_cpu(&mv_chan->device->pdev->dev, dest_dma,
1078 PAGE_SIZE, DMA_FROM_DEVICE);
1079 for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) {
1080 u32 *ptr = page_address(dest);
1081 if (ptr[i] != cmp_word) {
1082 dev_printk(KERN_ERR, dma_chan->device->dev,
1083 "Self-test xor failed compare, disabling."
1084 " index %d, data %x, expected %x\n", i,
1085 ptr[i], cmp_word);
1086 err = -ENODEV;
1087 goto free_resources;
1088 }
1089 }
1090
1091free_resources:
1092 mv_xor_free_chan_resources(dma_chan);
1093out:
1094 src_idx = MV_XOR_NUM_SRC_TEST;
1095 while (src_idx--)
1096 __free_page(xor_srcs[src_idx]);
1097 __free_page(dest);
1098 return err;
1099}
1100
1101static int __devexit mv_xor_remove(struct platform_device *dev)
1102{
1103 struct mv_xor_device *device = platform_get_drvdata(dev);
1104 struct dma_chan *chan, *_chan;
1105 struct mv_xor_chan *mv_chan;
1106 struct mv_xor_platform_data *plat_data = dev->dev.platform_data;
1107
1108 dma_async_device_unregister(&device->common);
1109
1110 dma_free_coherent(&dev->dev, plat_data->pool_size,
1111 device->dma_desc_pool_virt, device->dma_desc_pool);
1112
1113 list_for_each_entry_safe(chan, _chan, &device->common.channels,
1114 device_node) {
1115 mv_chan = to_mv_xor_chan(chan);
1116 list_del(&chan->device_node);
1117 }
1118
1119 return 0;
1120}
1121
1122static int __devinit mv_xor_probe(struct platform_device *pdev)
1123{
1124 int ret = 0;
1125 int irq;
1126 struct mv_xor_device *adev;
1127 struct mv_xor_chan *mv_chan;
1128 struct dma_device *dma_dev;
1129 struct mv_xor_platform_data *plat_data = pdev->dev.platform_data;
1130
1131
1132 adev = devm_kzalloc(&pdev->dev, sizeof(*adev), GFP_KERNEL);
1133 if (!adev)
1134 return -ENOMEM;
1135
1136 dma_dev = &adev->common;
1137
1138 /* allocate coherent memory for hardware descriptors
1139 * note: writecombine gives slightly better performance, but
1140 * requires that we explicitly flush the writes
1141 */
1142 adev->dma_desc_pool_virt = dma_alloc_writecombine(&pdev->dev,
1143 plat_data->pool_size,
1144 &adev->dma_desc_pool,
1145 GFP_KERNEL);
1146 if (!adev->dma_desc_pool_virt)
1147 return -ENOMEM;
1148
1149 adev->id = plat_data->hw_id;
1150
1151 /* discover transaction capabilites from the platform data */
1152 dma_dev->cap_mask = plat_data->cap_mask;
1153 adev->pdev = pdev;
1154 platform_set_drvdata(pdev, adev);
1155
1156 adev->shared = platform_get_drvdata(plat_data->shared);
1157
1158 INIT_LIST_HEAD(&dma_dev->channels);
1159
1160 /* set base routines */
1161 dma_dev->device_alloc_chan_resources = mv_xor_alloc_chan_resources;
1162 dma_dev->device_free_chan_resources = mv_xor_free_chan_resources;
1163 dma_dev->device_is_tx_complete = mv_xor_is_complete;
1164 dma_dev->device_issue_pending = mv_xor_issue_pending;
1165 dma_dev->dev = &pdev->dev;
1166
1167 /* set prep routines based on capability */
1168 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask))
1169 dma_dev->device_prep_dma_memcpy = mv_xor_prep_dma_memcpy;
1170 if (dma_has_cap(DMA_MEMSET, dma_dev->cap_mask))
1171 dma_dev->device_prep_dma_memset = mv_xor_prep_dma_memset;
1172 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
1173 dma_dev->max_xor = 8; ;
1174 dma_dev->device_prep_dma_xor = mv_xor_prep_dma_xor;
1175 }
1176
1177 mv_chan = devm_kzalloc(&pdev->dev, sizeof(*mv_chan), GFP_KERNEL);
1178 if (!mv_chan) {
1179 ret = -ENOMEM;
1180 goto err_free_dma;
1181 }
1182 mv_chan->device = adev;
1183 mv_chan->idx = plat_data->hw_id;
1184 mv_chan->mmr_base = adev->shared->xor_base;
1185
1186 if (!mv_chan->mmr_base) {
1187 ret = -ENOMEM;
1188 goto err_free_dma;
1189 }
1190 tasklet_init(&mv_chan->irq_tasklet, mv_xor_tasklet, (unsigned long)
1191 mv_chan);
1192
1193 /* clear errors before enabling interrupts */
1194 mv_xor_device_clear_err_status(mv_chan);
1195
1196 irq = platform_get_irq(pdev, 0);
1197 if (irq < 0) {
1198 ret = irq;
1199 goto err_free_dma;
1200 }
1201 ret = devm_request_irq(&pdev->dev, irq,
1202 mv_xor_interrupt_handler,
1203 0, dev_name(&pdev->dev), mv_chan);
1204 if (ret)
1205 goto err_free_dma;
1206
1207 mv_chan_unmask_interrupts(mv_chan);
1208
1209 mv_set_mode(mv_chan, DMA_MEMCPY);
1210
1211 spin_lock_init(&mv_chan->lock);
1212 INIT_LIST_HEAD(&mv_chan->chain);
1213 INIT_LIST_HEAD(&mv_chan->completed_slots);
1214 INIT_LIST_HEAD(&mv_chan->all_slots);
1215 INIT_RCU_HEAD(&mv_chan->common.rcu);
1216 mv_chan->common.device = dma_dev;
1217
1218 list_add_tail(&mv_chan->common.device_node, &dma_dev->channels);
1219
1220 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) {
1221 ret = mv_xor_memcpy_self_test(adev);
1222 dev_dbg(&pdev->dev, "memcpy self test returned %d\n", ret);
1223 if (ret)
1224 goto err_free_dma;
1225 }
1226
1227 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
1228 ret = mv_xor_xor_self_test(adev);
1229 dev_dbg(&pdev->dev, "xor self test returned %d\n", ret);
1230 if (ret)
1231 goto err_free_dma;
1232 }
1233
1234 dev_printk(KERN_INFO, &pdev->dev, "Marvell XOR: "
1235 "( %s%s%s%s)\n",
1236 dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "",
1237 dma_has_cap(DMA_MEMSET, dma_dev->cap_mask) ? "fill " : "",
1238 dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "",
1239 dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : "");
1240
1241 dma_async_device_register(dma_dev);
1242 goto out;
1243
1244 err_free_dma:
1245 dma_free_coherent(&adev->pdev->dev, plat_data->pool_size,
1246 adev->dma_desc_pool_virt, adev->dma_desc_pool);
1247 out:
1248 return ret;
1249}
1250
1251static void
1252mv_xor_conf_mbus_windows(struct mv_xor_shared_private *msp,
1253 struct mbus_dram_target_info *dram)
1254{
1255 void __iomem *base = msp->xor_base;
1256 u32 win_enable = 0;
1257 int i;
1258
1259 for (i = 0; i < 8; i++) {
1260 writel(0, base + WINDOW_BASE(i));
1261 writel(0, base + WINDOW_SIZE(i));
1262 if (i < 4)
1263 writel(0, base + WINDOW_REMAP_HIGH(i));
1264 }
1265
1266 for (i = 0; i < dram->num_cs; i++) {
1267 struct mbus_dram_window *cs = dram->cs + i;
1268
1269 writel((cs->base & 0xffff0000) |
1270 (cs->mbus_attr << 8) |
1271 dram->mbus_dram_target_id, base + WINDOW_BASE(i));
1272 writel((cs->size - 1) & 0xffff0000, base + WINDOW_SIZE(i));
1273
1274 win_enable |= (1 << i);
1275 win_enable |= 3 << (16 + (2 * i));
1276 }
1277
1278 writel(win_enable, base + WINDOW_BAR_ENABLE(0));
1279 writel(win_enable, base + WINDOW_BAR_ENABLE(1));
1280}
1281
1282static struct platform_driver mv_xor_driver = {
1283 .probe = mv_xor_probe,
1284 .remove = mv_xor_remove,
1285 .driver = {
1286 .owner = THIS_MODULE,
1287 .name = MV_XOR_NAME,
1288 },
1289};
1290
1291static int mv_xor_shared_probe(struct platform_device *pdev)
1292{
1293 struct mv_xor_platform_shared_data *msd = pdev->dev.platform_data;
1294 struct mv_xor_shared_private *msp;
1295 struct resource *res;
1296
1297 dev_printk(KERN_NOTICE, &pdev->dev, "Marvell shared XOR driver\n");
1298
1299 msp = devm_kzalloc(&pdev->dev, sizeof(*msp), GFP_KERNEL);
1300 if (!msp)
1301 return -ENOMEM;
1302
1303 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1304 if (!res)
1305 return -ENODEV;
1306
1307 msp->xor_base = devm_ioremap(&pdev->dev, res->start,
1308 res->end - res->start + 1);
1309 if (!msp->xor_base)
1310 return -EBUSY;
1311
1312 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1313 if (!res)
1314 return -ENODEV;
1315
1316 msp->xor_high_base = devm_ioremap(&pdev->dev, res->start,
1317 res->end - res->start + 1);
1318 if (!msp->xor_high_base)
1319 return -EBUSY;
1320
1321 platform_set_drvdata(pdev, msp);
1322
1323 /*
1324 * (Re-)program MBUS remapping windows if we are asked to.
1325 */
1326 if (msd != NULL && msd->dram != NULL)
1327 mv_xor_conf_mbus_windows(msp, msd->dram);
1328
1329 return 0;
1330}
1331
1332static int mv_xor_shared_remove(struct platform_device *pdev)
1333{
1334 return 0;
1335}
1336
1337static struct platform_driver mv_xor_shared_driver = {
1338 .probe = mv_xor_shared_probe,
1339 .remove = mv_xor_shared_remove,
1340 .driver = {
1341 .owner = THIS_MODULE,
1342 .name = MV_XOR_SHARED_NAME,
1343 },
1344};
1345
1346
1347static int __init mv_xor_init(void)
1348{
1349 int rc;
1350
1351 rc = platform_driver_register(&mv_xor_shared_driver);
1352 if (!rc) {
1353 rc = platform_driver_register(&mv_xor_driver);
1354 if (rc)
1355 platform_driver_unregister(&mv_xor_shared_driver);
1356 }
1357 return rc;
1358}
1359module_init(mv_xor_init);
1360
1361/* it's currently unsafe to unload this module */
1362#if 0
1363static void __exit mv_xor_exit(void)
1364{
1365 platform_driver_unregister(&mv_xor_driver);
1366 platform_driver_unregister(&mv_xor_shared_driver);
1367 return;
1368}
1369
1370module_exit(mv_xor_exit);
1371#endif
1372
1373MODULE_AUTHOR("Saeed Bishara <saeed@marvell.com>");
1374MODULE_DESCRIPTION("DMA engine driver for Marvell's XOR engine");
1375MODULE_LICENSE("GPL");
diff --git a/drivers/dma/mv_xor.h b/drivers/dma/mv_xor.h
new file mode 100644
index 000000000000..06cafe1ef521
--- /dev/null
+++ b/drivers/dma/mv_xor.h
@@ -0,0 +1,183 @@
1/*
2 * Copyright (C) 2007, 2008, Marvell International Ltd.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
11 * for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software Foundation,
15 * Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16 */
17
18#ifndef MV_XOR_H
19#define MV_XOR_H
20
21#include <linux/types.h>
22#include <linux/io.h>
23#include <linux/dmaengine.h>
24#include <linux/interrupt.h>
25
26#define USE_TIMER
27#define MV_XOR_SLOT_SIZE 64
28#define MV_XOR_THRESHOLD 1
29
30#define XOR_OPERATION_MODE_XOR 0
31#define XOR_OPERATION_MODE_MEMCPY 2
32#define XOR_OPERATION_MODE_MEMSET 4
33
34#define XOR_CURR_DESC(chan) (chan->mmr_base + 0x210 + (chan->idx * 4))
35#define XOR_NEXT_DESC(chan) (chan->mmr_base + 0x200 + (chan->idx * 4))
36#define XOR_BYTE_COUNT(chan) (chan->mmr_base + 0x220 + (chan->idx * 4))
37#define XOR_DEST_POINTER(chan) (chan->mmr_base + 0x2B0 + (chan->idx * 4))
38#define XOR_BLOCK_SIZE(chan) (chan->mmr_base + 0x2C0 + (chan->idx * 4))
39#define XOR_INIT_VALUE_LOW(chan) (chan->mmr_base + 0x2E0)
40#define XOR_INIT_VALUE_HIGH(chan) (chan->mmr_base + 0x2E4)
41
42#define XOR_CONFIG(chan) (chan->mmr_base + 0x10 + (chan->idx * 4))
43#define XOR_ACTIVATION(chan) (chan->mmr_base + 0x20 + (chan->idx * 4))
44#define XOR_INTR_CAUSE(chan) (chan->mmr_base + 0x30)
45#define XOR_INTR_MASK(chan) (chan->mmr_base + 0x40)
46#define XOR_ERROR_CAUSE(chan) (chan->mmr_base + 0x50)
47#define XOR_ERROR_ADDR(chan) (chan->mmr_base + 0x60)
48#define XOR_INTR_MASK_VALUE 0x3F5
49
50#define WINDOW_BASE(w) (0x250 + ((w) << 2))
51#define WINDOW_SIZE(w) (0x270 + ((w) << 2))
52#define WINDOW_REMAP_HIGH(w) (0x290 + ((w) << 2))
53#define WINDOW_BAR_ENABLE(chan) (0x240 + ((chan) << 2))
54
55struct mv_xor_shared_private {
56 void __iomem *xor_base;
57 void __iomem *xor_high_base;
58};
59
60
61/**
62 * struct mv_xor_device - internal representation of a XOR device
63 * @pdev: Platform device
64 * @id: HW XOR Device selector
65 * @dma_desc_pool: base of DMA descriptor region (DMA address)
66 * @dma_desc_pool_virt: base of DMA descriptor region (CPU address)
67 * @common: embedded struct dma_device
68 */
69struct mv_xor_device {
70 struct platform_device *pdev;
71 int id;
72 dma_addr_t dma_desc_pool;
73 void *dma_desc_pool_virt;
74 struct dma_device common;
75 struct mv_xor_shared_private *shared;
76};
77
78/**
79 * struct mv_xor_chan - internal representation of a XOR channel
80 * @pending: allows batching of hardware operations
81 * @completed_cookie: identifier for the most recently completed operation
82 * @lock: serializes enqueue/dequeue operations to the descriptors pool
83 * @mmr_base: memory mapped register base
84 * @idx: the index of the xor channel
85 * @chain: device chain view of the descriptors
86 * @completed_slots: slots completed by HW but still need to be acked
87 * @device: parent device
88 * @common: common dmaengine channel object members
89 * @last_used: place holder for allocation to continue from where it left off
90 * @all_slots: complete domain of slots usable by the channel
91 * @slots_allocated: records the actual size of the descriptor slot pool
92 * @irq_tasklet: bottom half where mv_xor_slot_cleanup runs
93 */
94struct mv_xor_chan {
95 int pending;
96 dma_cookie_t completed_cookie;
97 spinlock_t lock; /* protects the descriptor slot pool */
98 void __iomem *mmr_base;
99 unsigned int idx;
100 enum dma_transaction_type current_type;
101 struct list_head chain;
102 struct list_head completed_slots;
103 struct mv_xor_device *device;
104 struct dma_chan common;
105 struct mv_xor_desc_slot *last_used;
106 struct list_head all_slots;
107 int slots_allocated;
108 struct tasklet_struct irq_tasklet;
109#ifdef USE_TIMER
110 unsigned long cleanup_time;
111 u32 current_on_last_cleanup;
112 dma_cookie_t is_complete_cookie;
113#endif
114};
115
116/**
117 * struct mv_xor_desc_slot - software descriptor
118 * @slot_node: node on the mv_xor_chan.all_slots list
119 * @chain_node: node on the mv_xor_chan.chain list
120 * @completed_node: node on the mv_xor_chan.completed_slots list
121 * @hw_desc: virtual address of the hardware descriptor chain
122 * @phys: hardware address of the hardware descriptor chain
123 * @group_head: first operation in a transaction
124 * @slot_cnt: total slots used in an transaction (group of operations)
125 * @slots_per_op: number of slots per operation
126 * @idx: pool index
127 * @unmap_src_cnt: number of xor sources
128 * @unmap_len: transaction bytecount
129 * @async_tx: support for the async_tx api
130 * @group_list: list of slots that make up a multi-descriptor transaction
131 * for example transfer lengths larger than the supported hw max
132 * @xor_check_result: result of zero sum
133 * @crc32_result: result crc calculation
134 */
135struct mv_xor_desc_slot {
136 struct list_head slot_node;
137 struct list_head chain_node;
138 struct list_head completed_node;
139 enum dma_transaction_type type;
140 void *hw_desc;
141 struct mv_xor_desc_slot *group_head;
142 u16 slot_cnt;
143 u16 slots_per_op;
144 u16 idx;
145 u16 unmap_src_cnt;
146 u32 value;
147 size_t unmap_len;
148 struct dma_async_tx_descriptor async_tx;
149 union {
150 u32 *xor_check_result;
151 u32 *crc32_result;
152 };
153#ifdef USE_TIMER
154 unsigned long arrival_time;
155 struct timer_list timeout;
156#endif
157};
158
159/* This structure describes XOR descriptor size 64bytes */
160struct mv_xor_desc {
161 u32 status; /* descriptor execution status */
162 u32 crc32_result; /* result of CRC-32 calculation */
163 u32 desc_command; /* type of operation to be carried out */
164 u32 phy_next_desc; /* next descriptor address pointer */
165 u32 byte_count; /* size of src/dst blocks in bytes */
166 u32 phy_dest_addr; /* destination block address */
167 u32 phy_src_addr[8]; /* source block addresses */
168 u32 reserved0;
169 u32 reserved1;
170};
171
172#define to_mv_sw_desc(addr_hw_desc) \
173 container_of(addr_hw_desc, struct mv_xor_desc_slot, hw_desc)
174
175#define mv_hw_desc_slot_idx(hw_desc, idx) \
176 ((void *)(((unsigned long)hw_desc) + ((idx) << 5)))
177
178#define MV_XOR_MIN_BYTE_COUNT (128)
179#define XOR_MAX_BYTE_COUNT ((16 * 1024 * 1024) - 1)
180#define MV_XOR_MAX_BYTE_COUNT XOR_MAX_BYTE_COUNT
181
182
183#endif
diff --git a/include/asm-arm/arch-iop13xx/adma.h b/include/asm-arm/arch-iop13xx/adma.h
index 90d14ee564f5..ef4f5da2029f 100644
--- a/include/asm-arm/arch-iop13xx/adma.h
+++ b/include/asm-arm/arch-iop13xx/adma.h
@@ -198,17 +198,13 @@ iop_chan_memset_slot_count(size_t len, int *slots_per_op)
198static inline int 198static inline int
199iop_chan_xor_slot_count(size_t len, int src_cnt, int *slots_per_op) 199iop_chan_xor_slot_count(size_t len, int src_cnt, int *slots_per_op)
200{ 200{
201 int num_slots; 201 static const char slot_count_table[] = { 1, 2, 2, 2,
202 /* slots_to_find = 1 for basic descriptor + 1 per 4 sources above 1 202 2, 3, 3, 3,
203 * (1 source => 8 bytes) (1 slot => 32 bytes) 203 3, 4, 4, 4,
204 */ 204 4, 5, 5, 5,
205 num_slots = 1 + (((src_cnt - 1) << 3) >> 5); 205 };
206 if (((src_cnt - 1) << 3) & 0x1f) 206 *slots_per_op = slot_count_table[src_cnt - 1];
207 num_slots++; 207 return *slots_per_op;
208
209 *slots_per_op = num_slots;
210
211 return num_slots;
212} 208}
213 209
214#define ADMA_MAX_BYTE_COUNT (16 * 1024 * 1024) 210#define ADMA_MAX_BYTE_COUNT (16 * 1024 * 1024)
diff --git a/include/asm-arm/hardware/iop3xx-adma.h b/include/asm-arm/hardware/iop3xx-adma.h
index a32b86ac62aa..af64676650a2 100644
--- a/include/asm-arm/hardware/iop3xx-adma.h
+++ b/include/asm-arm/hardware/iop3xx-adma.h
@@ -260,7 +260,7 @@ static inline int iop_chan_memset_slot_count(size_t len, int *slots_per_op)
260static inline int iop3xx_aau_xor_slot_count(size_t len, int src_cnt, 260static inline int iop3xx_aau_xor_slot_count(size_t len, int src_cnt,
261 int *slots_per_op) 261 int *slots_per_op)
262{ 262{
263 static const int slot_count_table[] = { 0, 263 static const char slot_count_table[] = {
264 1, 1, 1, 1, /* 01 - 04 */ 264 1, 1, 1, 1, /* 01 - 04 */
265 2, 2, 2, 2, /* 05 - 08 */ 265 2, 2, 2, 2, /* 05 - 08 */
266 4, 4, 4, 4, /* 09 - 12 */ 266 4, 4, 4, 4, /* 09 - 12 */
@@ -270,7 +270,7 @@ static inline int iop3xx_aau_xor_slot_count(size_t len, int src_cnt,
270 8, 8, 8, 8, /* 25 - 28 */ 270 8, 8, 8, 8, /* 25 - 28 */
271 8, 8, 8, 8, /* 29 - 32 */ 271 8, 8, 8, 8, /* 29 - 32 */
272 }; 272 };
273 *slots_per_op = slot_count_table[src_cnt]; 273 *slots_per_op = slot_count_table[src_cnt - 1];
274 return *slots_per_op; 274 return *slots_per_op;
275} 275}
276 276
diff --git a/include/asm-arm/plat-orion/mv_xor.h b/include/asm-arm/plat-orion/mv_xor.h
new file mode 100644
index 000000000000..c349e8ff5cc0
--- /dev/null
+++ b/include/asm-arm/plat-orion/mv_xor.h
@@ -0,0 +1,28 @@
1/*
2 * Marvell XOR platform device data definition file.
3 */
4
5#ifndef __ASM_PLAT_ORION_MV_XOR_H
6#define __ASM_PLAT_ORION_MV_XOR_H
7
8#include <linux/dmaengine.h>
9#include <linux/mbus.h>
10
11#define MV_XOR_SHARED_NAME "mv_xor_shared"
12#define MV_XOR_NAME "mv_xor"
13
14struct mbus_dram_target_info;
15
16struct mv_xor_platform_shared_data {
17 struct mbus_dram_target_info *dram;
18};
19
20struct mv_xor_platform_data {
21 struct platform_device *shared;
22 int hw_id;
23 dma_cap_mask_t cap_mask;
24 size_t pool_size;
25};
26
27
28#endif
diff --git a/include/asm-avr32/arch-at32ap/at32ap700x.h b/include/asm-avr32/arch-at32ap/at32ap700x.h
index 31e48b0e7324..d18a3053be0d 100644
--- a/include/asm-avr32/arch-at32ap/at32ap700x.h
+++ b/include/asm-avr32/arch-at32ap/at32ap700x.h
@@ -30,4 +30,20 @@
30#define GPIO_PIN_PD(N) (GPIO_PIOD_BASE + (N)) 30#define GPIO_PIN_PD(N) (GPIO_PIOD_BASE + (N))
31#define GPIO_PIN_PE(N) (GPIO_PIOE_BASE + (N)) 31#define GPIO_PIN_PE(N) (GPIO_PIOE_BASE + (N))
32 32
33
34/*
35 * DMAC peripheral hardware handshaking interfaces, used with dw_dmac
36 */
37#define DMAC_MCI_RX 0
38#define DMAC_MCI_TX 1
39#define DMAC_DAC_TX 2
40#define DMAC_AC97_A_RX 3
41#define DMAC_AC97_A_TX 4
42#define DMAC_AC97_B_RX 5
43#define DMAC_AC97_B_TX 6
44#define DMAC_DMAREQ_0 7
45#define DMAC_DMAREQ_1 8
46#define DMAC_DMAREQ_2 9
47#define DMAC_DMAREQ_3 10
48
33#endif /* __ASM_ARCH_AT32AP700X_H__ */ 49#endif /* __ASM_ARCH_AT32AP700X_H__ */
diff --git a/include/linux/async_tx.h b/include/linux/async_tx.h
index eb640f0acfac..0f50d4cc4360 100644
--- a/include/linux/async_tx.h
+++ b/include/linux/async_tx.h
@@ -101,21 +101,14 @@ async_tx_find_channel(struct dma_async_tx_descriptor *depend_tx,
101 101
102/** 102/**
103 * async_tx_sync_epilog - actions to take if an operation is run synchronously 103 * async_tx_sync_epilog - actions to take if an operation is run synchronously
104 * @flags: async_tx flags
105 * @depend_tx: transaction depends on depend_tx
106 * @cb_fn: function to call when the transaction completes 104 * @cb_fn: function to call when the transaction completes
107 * @cb_fn_param: parameter to pass to the callback routine 105 * @cb_fn_param: parameter to pass to the callback routine
108 */ 106 */
109static inline void 107static inline void
110async_tx_sync_epilog(unsigned long flags, 108async_tx_sync_epilog(dma_async_tx_callback cb_fn, void *cb_fn_param)
111 struct dma_async_tx_descriptor *depend_tx,
112 dma_async_tx_callback cb_fn, void *cb_fn_param)
113{ 109{
114 if (cb_fn) 110 if (cb_fn)
115 cb_fn(cb_fn_param); 111 cb_fn(cb_fn_param);
116
117 if (depend_tx && (flags & ASYNC_TX_DEP_ACK))
118 async_tx_ack(depend_tx);
119} 112}
120 113
121void 114void
@@ -152,4 +145,6 @@ struct dma_async_tx_descriptor *
152async_trigger_callback(enum async_tx_flags flags, 145async_trigger_callback(enum async_tx_flags flags,
153 struct dma_async_tx_descriptor *depend_tx, 146 struct dma_async_tx_descriptor *depend_tx,
154 dma_async_tx_callback cb_fn, void *cb_fn_param); 147 dma_async_tx_callback cb_fn, void *cb_fn_param);
148
149void async_tx_quiesce(struct dma_async_tx_descriptor **tx);
155#endif /* _ASYNC_TX_H_ */ 150#endif /* _ASYNC_TX_H_ */
diff --git a/include/linux/dca.h b/include/linux/dca.h
index af61cd1f37e9..b00a753eda53 100644
--- a/include/linux/dca.h
+++ b/include/linux/dca.h
@@ -10,6 +10,7 @@ void dca_unregister_notify(struct notifier_block *nb);
10#define DCA_PROVIDER_REMOVE 0x0002 10#define DCA_PROVIDER_REMOVE 0x0002
11 11
12struct dca_provider { 12struct dca_provider {
13 struct list_head node;
13 struct dca_ops *ops; 14 struct dca_ops *ops;
14 struct device *cd; 15 struct device *cd;
15 int id; 16 int id;
@@ -18,7 +19,9 @@ struct dca_provider {
18struct dca_ops { 19struct dca_ops {
19 int (*add_requester) (struct dca_provider *, struct device *); 20 int (*add_requester) (struct dca_provider *, struct device *);
20 int (*remove_requester) (struct dca_provider *, struct device *); 21 int (*remove_requester) (struct dca_provider *, struct device *);
21 u8 (*get_tag) (struct dca_provider *, int cpu); 22 u8 (*get_tag) (struct dca_provider *, struct device *,
23 int cpu);
24 int (*dev_managed) (struct dca_provider *, struct device *);
22}; 25};
23 26
24struct dca_provider *alloc_dca_provider(struct dca_ops *ops, int priv_size); 27struct dca_provider *alloc_dca_provider(struct dca_ops *ops, int priv_size);
@@ -32,9 +35,11 @@ static inline void *dca_priv(struct dca_provider *dca)
32} 35}
33 36
34/* Requester API */ 37/* Requester API */
38#define DCA_GET_TAG_TWO_ARGS
35int dca_add_requester(struct device *dev); 39int dca_add_requester(struct device *dev);
36int dca_remove_requester(struct device *dev); 40int dca_remove_requester(struct device *dev);
37u8 dca_get_tag(int cpu); 41u8 dca_get_tag(int cpu);
42u8 dca3_get_tag(struct device *dev, int cpu);
38 43
39/* internal stuff */ 44/* internal stuff */
40int __init dca_sysfs_init(void); 45int __init dca_sysfs_init(void);
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index d08a5c5eb928..adb0b084eb5a 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -89,10 +89,23 @@ enum dma_transaction_type {
89 DMA_MEMSET, 89 DMA_MEMSET,
90 DMA_MEMCPY_CRC32C, 90 DMA_MEMCPY_CRC32C,
91 DMA_INTERRUPT, 91 DMA_INTERRUPT,
92 DMA_SLAVE,
92}; 93};
93 94
94/* last transaction type for creation of the capabilities mask */ 95/* last transaction type for creation of the capabilities mask */
95#define DMA_TX_TYPE_END (DMA_INTERRUPT + 1) 96#define DMA_TX_TYPE_END (DMA_SLAVE + 1)
97
98/**
99 * enum dma_slave_width - DMA slave register access width.
100 * @DMA_SLAVE_WIDTH_8BIT: Do 8-bit slave register accesses
101 * @DMA_SLAVE_WIDTH_16BIT: Do 16-bit slave register accesses
102 * @DMA_SLAVE_WIDTH_32BIT: Do 32-bit slave register accesses
103 */
104enum dma_slave_width {
105 DMA_SLAVE_WIDTH_8BIT,
106 DMA_SLAVE_WIDTH_16BIT,
107 DMA_SLAVE_WIDTH_32BIT,
108};
96 109
97/** 110/**
98 * enum dma_ctrl_flags - DMA flags to augment operation preparation, 111 * enum dma_ctrl_flags - DMA flags to augment operation preparation,
@@ -102,10 +115,14 @@ enum dma_transaction_type {
102 * @DMA_CTRL_ACK - the descriptor cannot be reused until the client 115 * @DMA_CTRL_ACK - the descriptor cannot be reused until the client
103 * acknowledges receipt, i.e. has has a chance to establish any 116 * acknowledges receipt, i.e. has has a chance to establish any
104 * dependency chains 117 * dependency chains
118 * @DMA_COMPL_SKIP_SRC_UNMAP - set to disable dma-unmapping the source buffer(s)
119 * @DMA_COMPL_SKIP_DEST_UNMAP - set to disable dma-unmapping the destination(s)
105 */ 120 */
106enum dma_ctrl_flags { 121enum dma_ctrl_flags {
107 DMA_PREP_INTERRUPT = (1 << 0), 122 DMA_PREP_INTERRUPT = (1 << 0),
108 DMA_CTRL_ACK = (1 << 1), 123 DMA_CTRL_ACK = (1 << 1),
124 DMA_COMPL_SKIP_SRC_UNMAP = (1 << 2),
125 DMA_COMPL_SKIP_DEST_UNMAP = (1 << 3),
109}; 126};
110 127
111/** 128/**
@@ -115,6 +132,32 @@ enum dma_ctrl_flags {
115typedef struct { DECLARE_BITMAP(bits, DMA_TX_TYPE_END); } dma_cap_mask_t; 132typedef struct { DECLARE_BITMAP(bits, DMA_TX_TYPE_END); } dma_cap_mask_t;
116 133
117/** 134/**
135 * struct dma_slave - Information about a DMA slave
136 * @dev: device acting as DMA slave
137 * @dma_dev: required DMA master device. If non-NULL, the client can not be
138 * bound to other masters than this.
139 * @tx_reg: physical address of data register used for
140 * memory-to-peripheral transfers
141 * @rx_reg: physical address of data register used for
142 * peripheral-to-memory transfers
143 * @reg_width: peripheral register width
144 *
145 * If dma_dev is non-NULL, the client can not be bound to other DMA
146 * masters than the one corresponding to this device. The DMA master
147 * driver may use this to determine if there is controller-specific
148 * data wrapped around this struct. Drivers of platform code that sets
149 * the dma_dev field must therefore make sure to use an appropriate
150 * controller-specific dma slave structure wrapping this struct.
151 */
152struct dma_slave {
153 struct device *dev;
154 struct device *dma_dev;
155 dma_addr_t tx_reg;
156 dma_addr_t rx_reg;
157 enum dma_slave_width reg_width;
158};
159
160/**
118 * struct dma_chan_percpu - the per-CPU part of struct dma_chan 161 * struct dma_chan_percpu - the per-CPU part of struct dma_chan
119 * @refcount: local_t used for open-coded "bigref" counting 162 * @refcount: local_t used for open-coded "bigref" counting
120 * @memcpy_count: transaction counter 163 * @memcpy_count: transaction counter
@@ -139,6 +182,7 @@ struct dma_chan_percpu {
139 * @rcu: the DMA channel's RCU head 182 * @rcu: the DMA channel's RCU head
140 * @device_node: used to add this to the device chan list 183 * @device_node: used to add this to the device chan list
141 * @local: per-cpu pointer to a struct dma_chan_percpu 184 * @local: per-cpu pointer to a struct dma_chan_percpu
185 * @client-count: how many clients are using this channel
142 */ 186 */
143struct dma_chan { 187struct dma_chan {
144 struct dma_device *device; 188 struct dma_device *device;
@@ -154,6 +198,7 @@ struct dma_chan {
154 198
155 struct list_head device_node; 199 struct list_head device_node;
156 struct dma_chan_percpu *local; 200 struct dma_chan_percpu *local;
201 int client_count;
157}; 202};
158 203
159#define to_dma_chan(p) container_of(p, struct dma_chan, dev) 204#define to_dma_chan(p) container_of(p, struct dma_chan, dev)
@@ -202,11 +247,14 @@ typedef enum dma_state_client (*dma_event_callback) (struct dma_client *client,
202 * @event_callback: func ptr to call when something happens 247 * @event_callback: func ptr to call when something happens
203 * @cap_mask: only return channels that satisfy the requested capabilities 248 * @cap_mask: only return channels that satisfy the requested capabilities
204 * a value of zero corresponds to any capability 249 * a value of zero corresponds to any capability
250 * @slave: data for preparing slave transfer. Must be non-NULL iff the
251 * DMA_SLAVE capability is requested.
205 * @global_node: list_head for global dma_client_list 252 * @global_node: list_head for global dma_client_list
206 */ 253 */
207struct dma_client { 254struct dma_client {
208 dma_event_callback event_callback; 255 dma_event_callback event_callback;
209 dma_cap_mask_t cap_mask; 256 dma_cap_mask_t cap_mask;
257 struct dma_slave *slave;
210 struct list_head global_node; 258 struct list_head global_node;
211}; 259};
212 260
@@ -263,6 +311,8 @@ struct dma_async_tx_descriptor {
263 * @device_prep_dma_zero_sum: prepares a zero_sum operation 311 * @device_prep_dma_zero_sum: prepares a zero_sum operation
264 * @device_prep_dma_memset: prepares a memset operation 312 * @device_prep_dma_memset: prepares a memset operation
265 * @device_prep_dma_interrupt: prepares an end of chain interrupt operation 313 * @device_prep_dma_interrupt: prepares an end of chain interrupt operation
314 * @device_prep_slave_sg: prepares a slave dma operation
315 * @device_terminate_all: terminate all pending operations
266 * @device_issue_pending: push pending transactions to hardware 316 * @device_issue_pending: push pending transactions to hardware
267 */ 317 */
268struct dma_device { 318struct dma_device {
@@ -279,7 +329,8 @@ struct dma_device {
279 int dev_id; 329 int dev_id;
280 struct device *dev; 330 struct device *dev;
281 331
282 int (*device_alloc_chan_resources)(struct dma_chan *chan); 332 int (*device_alloc_chan_resources)(struct dma_chan *chan,
333 struct dma_client *client);
283 void (*device_free_chan_resources)(struct dma_chan *chan); 334 void (*device_free_chan_resources)(struct dma_chan *chan);
284 335
285 struct dma_async_tx_descriptor *(*device_prep_dma_memcpy)( 336 struct dma_async_tx_descriptor *(*device_prep_dma_memcpy)(
@@ -297,6 +348,12 @@ struct dma_device {
297 struct dma_async_tx_descriptor *(*device_prep_dma_interrupt)( 348 struct dma_async_tx_descriptor *(*device_prep_dma_interrupt)(
298 struct dma_chan *chan, unsigned long flags); 349 struct dma_chan *chan, unsigned long flags);
299 350
351 struct dma_async_tx_descriptor *(*device_prep_slave_sg)(
352 struct dma_chan *chan, struct scatterlist *sgl,
353 unsigned int sg_len, enum dma_data_direction direction,
354 unsigned long flags);
355 void (*device_terminate_all)(struct dma_chan *chan);
356
300 enum dma_status (*device_is_tx_complete)(struct dma_chan *chan, 357 enum dma_status (*device_is_tx_complete)(struct dma_chan *chan,
301 dma_cookie_t cookie, dma_cookie_t *last, 358 dma_cookie_t cookie, dma_cookie_t *last,
302 dma_cookie_t *used); 359 dma_cookie_t *used);
@@ -318,16 +375,14 @@ dma_cookie_t dma_async_memcpy_pg_to_pg(struct dma_chan *chan,
318void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx, 375void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
319 struct dma_chan *chan); 376 struct dma_chan *chan);
320 377
321static inline void 378static inline void async_tx_ack(struct dma_async_tx_descriptor *tx)
322async_tx_ack(struct dma_async_tx_descriptor *tx)
323{ 379{
324 tx->flags |= DMA_CTRL_ACK; 380 tx->flags |= DMA_CTRL_ACK;
325} 381}
326 382
327static inline int 383static inline bool async_tx_test_ack(struct dma_async_tx_descriptor *tx)
328async_tx_test_ack(struct dma_async_tx_descriptor *tx)
329{ 384{
330 return tx->flags & DMA_CTRL_ACK; 385 return (tx->flags & DMA_CTRL_ACK) == DMA_CTRL_ACK;
331} 386}
332 387
333#define first_dma_cap(mask) __first_dma_cap(&(mask)) 388#define first_dma_cap(mask) __first_dma_cap(&(mask))
diff --git a/include/linux/dw_dmac.h b/include/linux/dw_dmac.h
new file mode 100644
index 000000000000..04d217b442bf
--- /dev/null
+++ b/include/linux/dw_dmac.h
@@ -0,0 +1,62 @@
1/*
2 * Driver for the Synopsys DesignWare DMA Controller (aka DMACA on
3 * AVR32 systems.)
4 *
5 * Copyright (C) 2007 Atmel Corporation
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#ifndef DW_DMAC_H
12#define DW_DMAC_H
13
14#include <linux/dmaengine.h>
15
16/**
17 * struct dw_dma_platform_data - Controller configuration parameters
18 * @nr_channels: Number of channels supported by hardware (max 8)
19 */
20struct dw_dma_platform_data {
21 unsigned int nr_channels;
22};
23
24/**
25 * struct dw_dma_slave - Controller-specific information about a slave
26 * @slave: Generic information about the slave
27 * @ctl_lo: Platform-specific initializer for the CTL_LO register
28 * @cfg_hi: Platform-specific initializer for the CFG_HI register
29 * @cfg_lo: Platform-specific initializer for the CFG_LO register
30 */
31struct dw_dma_slave {
32 struct dma_slave slave;
33 u32 cfg_hi;
34 u32 cfg_lo;
35};
36
37/* Platform-configurable bits in CFG_HI */
38#define DWC_CFGH_FCMODE (1 << 0)
39#define DWC_CFGH_FIFO_MODE (1 << 1)
40#define DWC_CFGH_PROTCTL(x) ((x) << 2)
41#define DWC_CFGH_SRC_PER(x) ((x) << 7)
42#define DWC_CFGH_DST_PER(x) ((x) << 11)
43
44/* Platform-configurable bits in CFG_LO */
45#define DWC_CFGL_PRIO(x) ((x) << 5) /* priority */
46#define DWC_CFGL_LOCK_CH_XFER (0 << 12) /* scope of LOCK_CH */
47#define DWC_CFGL_LOCK_CH_BLOCK (1 << 12)
48#define DWC_CFGL_LOCK_CH_XACT (2 << 12)
49#define DWC_CFGL_LOCK_BUS_XFER (0 << 14) /* scope of LOCK_BUS */
50#define DWC_CFGL_LOCK_BUS_BLOCK (1 << 14)
51#define DWC_CFGL_LOCK_BUS_XACT (2 << 14)
52#define DWC_CFGL_LOCK_CH (1 << 15) /* channel lockout */
53#define DWC_CFGL_LOCK_BUS (1 << 16) /* busmaster lockout */
54#define DWC_CFGL_HS_DST_POL (1 << 18) /* dst handshake active low */
55#define DWC_CFGL_HS_SRC_POL (1 << 19) /* src handshake active low */
56
57static inline struct dw_dma_slave *to_dw_dma_slave(struct dma_slave *slave)
58{
59 return container_of(slave, struct dw_dma_slave, slave);
60}
61
62#endif /* DW_DMAC_H */
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index d8507eb394cf..119ae7b8f028 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -2371,6 +2371,14 @@
2371#define PCI_DEVICE_ID_INTEL_ICH9_7 0x2916 2371#define PCI_DEVICE_ID_INTEL_ICH9_7 0x2916
2372#define PCI_DEVICE_ID_INTEL_ICH9_8 0x2918 2372#define PCI_DEVICE_ID_INTEL_ICH9_8 0x2918
2373#define PCI_DEVICE_ID_INTEL_82855PM_HB 0x3340 2373#define PCI_DEVICE_ID_INTEL_82855PM_HB 0x3340
2374#define PCI_DEVICE_ID_INTEL_IOAT_TBG4 0x3429
2375#define PCI_DEVICE_ID_INTEL_IOAT_TBG5 0x342a
2376#define PCI_DEVICE_ID_INTEL_IOAT_TBG6 0x342b
2377#define PCI_DEVICE_ID_INTEL_IOAT_TBG7 0x342c
2378#define PCI_DEVICE_ID_INTEL_IOAT_TBG0 0x3430
2379#define PCI_DEVICE_ID_INTEL_IOAT_TBG1 0x3431
2380#define PCI_DEVICE_ID_INTEL_IOAT_TBG2 0x3432
2381#define PCI_DEVICE_ID_INTEL_IOAT_TBG3 0x3433
2374#define PCI_DEVICE_ID_INTEL_82830_HB 0x3575 2382#define PCI_DEVICE_ID_INTEL_82830_HB 0x3575
2375#define PCI_DEVICE_ID_INTEL_82830_CGC 0x3577 2383#define PCI_DEVICE_ID_INTEL_82830_CGC 0x3577
2376#define PCI_DEVICE_ID_INTEL_82855GM_HB 0x3580 2384#define PCI_DEVICE_ID_INTEL_82855GM_HB 0x3580
diff --git a/net/core/user_dma.c b/net/core/user_dma.c
index c77aff9c6eb3..8c6b706963ff 100644
--- a/net/core/user_dma.c
+++ b/net/core/user_dma.c
@@ -34,6 +34,7 @@
34#define NET_DMA_DEFAULT_COPYBREAK 4096 34#define NET_DMA_DEFAULT_COPYBREAK 4096
35 35
36int sysctl_tcp_dma_copybreak = NET_DMA_DEFAULT_COPYBREAK; 36int sysctl_tcp_dma_copybreak = NET_DMA_DEFAULT_COPYBREAK;
37EXPORT_SYMBOL(sysctl_tcp_dma_copybreak);
37 38
38/** 39/**
39 * dma_skb_copy_datagram_iovec - Copy a datagram to an iovec. 40 * dma_skb_copy_datagram_iovec - Copy a datagram to an iovec.