aboutsummaryrefslogtreecommitdiffstats
path: root/crypto/async_tx
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2008-07-23 15:03:18 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-07-23 15:03:18 -0400
commit5554b35933245e95710d709175e14c02cbc956a4 (patch)
tree2eeb2f05a7061da3c9a3bc9ea69a344b990c6b49 /crypto/async_tx
parent0f6e38a6381446eff5175b77d1094834a633a90f (diff)
parent7f1b358a236ee9c19657a619ac6f2dcabcaa0924 (diff)
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/djbw/async_tx
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/djbw/async_tx: (24 commits) I/OAT: I/OAT version 3.0 support I/OAT: tcp_dma_copybreak default value dependent on I/OAT version I/OAT: Add watchdog/reset functionality to ioatdma iop_adma: cleanup iop_chan_xor_slot_count iop_adma: document how to calculate the minimum descriptor pool size iop_adma: directly reclaim descriptors on allocation failure async_tx: make async_tx_test_ack a boolean routine async_tx: remove depend_tx from async_tx_sync_epilog async_tx: export async_tx_quiesce async_tx: fix handling of the "out of descriptor" condition in async_xor async_tx: ensure the xor destination buffer remains dma-mapped async_tx: list_for_each_entry_rcu() cleanup dmaengine: Driver for the Synopsys DesignWare DMA controller dmaengine: Add slave DMA interface dmaengine: add DMA_COMPL_SKIP_{SRC,DEST}_UNMAP flags to control dma unmap dmaengine: Add dma_client parameter to device_alloc_chan_resources dmatest: Simple DMA memcpy test client dmaengine: DMA engine driver for Marvell XOR engine iop-adma: fix platform driver hotplug/coldplug dmaengine: track the number of clients using a channel ... Fixed up conflict in drivers/dca/dca-sysfs.c manually
Diffstat (limited to 'crypto/async_tx')
-rw-r--r--crypto/async_tx/async_memcpy.c12
-rw-r--r--crypto/async_tx/async_memset.c12
-rw-r--r--crypto/async_tx/async_tx.c33
-rw-r--r--crypto/async_tx/async_xor.c262
4 files changed, 141 insertions, 178 deletions
diff --git a/crypto/async_tx/async_memcpy.c b/crypto/async_tx/async_memcpy.c
index a5eda80e8427..ddccfb01c416 100644
--- a/crypto/async_tx/async_memcpy.c
+++ b/crypto/async_tx/async_memcpy.c
@@ -73,15 +73,7 @@ async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset,
73 pr_debug("%s: (sync) len: %zu\n", __func__, len); 73 pr_debug("%s: (sync) len: %zu\n", __func__, len);
74 74
75 /* wait for any prerequisite operations */ 75 /* wait for any prerequisite operations */
76 if (depend_tx) { 76 async_tx_quiesce(&depend_tx);
77 /* if ack is already set then we cannot be sure
78 * we are referring to the correct operation
79 */
80 BUG_ON(async_tx_test_ack(depend_tx));
81 if (dma_wait_for_async_tx(depend_tx) == DMA_ERROR)
82 panic("%s: DMA_ERROR waiting for depend_tx\n",
83 __func__);
84 }
85 77
86 dest_buf = kmap_atomic(dest, KM_USER0) + dest_offset; 78 dest_buf = kmap_atomic(dest, KM_USER0) + dest_offset;
87 src_buf = kmap_atomic(src, KM_USER1) + src_offset; 79 src_buf = kmap_atomic(src, KM_USER1) + src_offset;
@@ -91,7 +83,7 @@ async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset,
91 kunmap_atomic(dest_buf, KM_USER0); 83 kunmap_atomic(dest_buf, KM_USER0);
92 kunmap_atomic(src_buf, KM_USER1); 84 kunmap_atomic(src_buf, KM_USER1);
93 85
94 async_tx_sync_epilog(flags, depend_tx, cb_fn, cb_param); 86 async_tx_sync_epilog(cb_fn, cb_param);
95 } 87 }
96 88
97 return tx; 89 return tx;
diff --git a/crypto/async_tx/async_memset.c b/crypto/async_tx/async_memset.c
index f5ff3906b035..5b5eb99bb244 100644
--- a/crypto/async_tx/async_memset.c
+++ b/crypto/async_tx/async_memset.c
@@ -72,19 +72,11 @@ async_memset(struct page *dest, int val, unsigned int offset,
72 dest_buf = (void *) (((char *) page_address(dest)) + offset); 72 dest_buf = (void *) (((char *) page_address(dest)) + offset);
73 73
74 /* wait for any prerequisite operations */ 74 /* wait for any prerequisite operations */
75 if (depend_tx) { 75 async_tx_quiesce(&depend_tx);
76 /* if ack is already set then we cannot be sure
77 * we are referring to the correct operation
78 */
79 BUG_ON(depend_tx->ack);
80 if (dma_wait_for_async_tx(depend_tx) == DMA_ERROR)
81 panic("%s: DMA_ERROR waiting for depend_tx\n",
82 __func__);
83 }
84 76
85 memset(dest_buf, val, len); 77 memset(dest_buf, val, len);
86 78
87 async_tx_sync_epilog(flags, depend_tx, cb_fn, cb_param); 79 async_tx_sync_epilog(cb_fn, cb_param);
88 } 80 }
89 81
90 return tx; 82 return tx;
diff --git a/crypto/async_tx/async_tx.c b/crypto/async_tx/async_tx.c
index 095c798d3170..85eaf7b1c531 100644
--- a/crypto/async_tx/async_tx.c
+++ b/crypto/async_tx/async_tx.c
@@ -295,7 +295,7 @@ dma_channel_add_remove(struct dma_client *client,
295 case DMA_RESOURCE_REMOVED: 295 case DMA_RESOURCE_REMOVED:
296 found = 0; 296 found = 0;
297 spin_lock_irqsave(&async_tx_lock, flags); 297 spin_lock_irqsave(&async_tx_lock, flags);
298 list_for_each_entry_rcu(ref, &async_tx_master_list, node) 298 list_for_each_entry(ref, &async_tx_master_list, node)
299 if (ref->chan == chan) { 299 if (ref->chan == chan) {
300 /* permit backing devices to go away */ 300 /* permit backing devices to go away */
301 dma_chan_put(ref->chan); 301 dma_chan_put(ref->chan);
@@ -608,23 +608,34 @@ async_trigger_callback(enum async_tx_flags flags,
608 pr_debug("%s: (sync)\n", __func__); 608 pr_debug("%s: (sync)\n", __func__);
609 609
610 /* wait for any prerequisite operations */ 610 /* wait for any prerequisite operations */
611 if (depend_tx) { 611 async_tx_quiesce(&depend_tx);
612 /* if ack is already set then we cannot be sure
613 * we are referring to the correct operation
614 */
615 BUG_ON(async_tx_test_ack(depend_tx));
616 if (dma_wait_for_async_tx(depend_tx) == DMA_ERROR)
617 panic("%s: DMA_ERROR waiting for depend_tx\n",
618 __func__);
619 }
620 612
621 async_tx_sync_epilog(flags, depend_tx, cb_fn, cb_param); 613 async_tx_sync_epilog(cb_fn, cb_param);
622 } 614 }
623 615
624 return tx; 616 return tx;
625} 617}
626EXPORT_SYMBOL_GPL(async_trigger_callback); 618EXPORT_SYMBOL_GPL(async_trigger_callback);
627 619
620/**
621 * async_tx_quiesce - ensure tx is complete and freeable upon return
622 * @tx - transaction to quiesce
623 */
624void async_tx_quiesce(struct dma_async_tx_descriptor **tx)
625{
626 if (*tx) {
627 /* if ack is already set then we cannot be sure
628 * we are referring to the correct operation
629 */
630 BUG_ON(async_tx_test_ack(*tx));
631 if (dma_wait_for_async_tx(*tx) == DMA_ERROR)
632 panic("DMA_ERROR waiting for transaction\n");
633 async_tx_ack(*tx);
634 *tx = NULL;
635 }
636}
637EXPORT_SYMBOL_GPL(async_tx_quiesce);
638
628module_init(async_tx_init); 639module_init(async_tx_init);
629module_exit(async_tx_exit); 640module_exit(async_tx_exit);
630 641
diff --git a/crypto/async_tx/async_xor.c b/crypto/async_tx/async_xor.c
index 3a0dddca5a10..65974c6d3d7a 100644
--- a/crypto/async_tx/async_xor.c
+++ b/crypto/async_tx/async_xor.c
@@ -35,74 +35,121 @@
35 * when CONFIG_DMA_ENGINE=n 35 * when CONFIG_DMA_ENGINE=n
36 */ 36 */
37static __always_inline struct dma_async_tx_descriptor * 37static __always_inline struct dma_async_tx_descriptor *
38do_async_xor(struct dma_device *device, 38do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list,
39 struct dma_chan *chan, struct page *dest, struct page **src_list, 39 unsigned int offset, int src_cnt, size_t len,
40 unsigned int offset, unsigned int src_cnt, size_t len, 40 enum async_tx_flags flags,
41 enum async_tx_flags flags, struct dma_async_tx_descriptor *depend_tx, 41 struct dma_async_tx_descriptor *depend_tx,
42 dma_async_tx_callback cb_fn, void *cb_param) 42 dma_async_tx_callback cb_fn, void *cb_param)
43{ 43{
44 dma_addr_t dma_dest; 44 struct dma_device *dma = chan->device;
45 dma_addr_t *dma_src = (dma_addr_t *) src_list; 45 dma_addr_t *dma_src = (dma_addr_t *) src_list;
46 struct dma_async_tx_descriptor *tx; 46 struct dma_async_tx_descriptor *tx = NULL;
47 int src_off = 0;
47 int i; 48 int i;
48 unsigned long dma_prep_flags = cb_fn ? DMA_PREP_INTERRUPT : 0; 49 dma_async_tx_callback _cb_fn;
49 50 void *_cb_param;
50 pr_debug("%s: len: %zu\n", __func__, len); 51 enum async_tx_flags async_flags;
51 52 enum dma_ctrl_flags dma_flags;
52 dma_dest = dma_map_page(device->dev, dest, offset, len, 53 int xor_src_cnt;
53 DMA_FROM_DEVICE); 54 dma_addr_t dma_dest;
54 55
56 dma_dest = dma_map_page(dma->dev, dest, offset, len, DMA_FROM_DEVICE);
55 for (i = 0; i < src_cnt; i++) 57 for (i = 0; i < src_cnt; i++)
56 dma_src[i] = dma_map_page(device->dev, src_list[i], offset, 58 dma_src[i] = dma_map_page(dma->dev, src_list[i], offset,
57 len, DMA_TO_DEVICE); 59 len, DMA_TO_DEVICE);
58 60
59 /* Since we have clobbered the src_list we are committed 61 while (src_cnt) {
60 * to doing this asynchronously. Drivers force forward progress 62 async_flags = flags;
61 * in case they can not provide a descriptor 63 dma_flags = 0;
62 */ 64 xor_src_cnt = min(src_cnt, dma->max_xor);
63 tx = device->device_prep_dma_xor(chan, dma_dest, dma_src, src_cnt, len, 65 /* if we are submitting additional xors, leave the chain open,
64 dma_prep_flags); 66 * clear the callback parameters, and leave the destination
65 if (!tx) { 67 * buffer mapped
66 if (depend_tx) 68 */
67 dma_wait_for_async_tx(depend_tx); 69 if (src_cnt > xor_src_cnt) {
68 70 async_flags &= ~ASYNC_TX_ACK;
69 while (!tx) 71 dma_flags = DMA_COMPL_SKIP_DEST_UNMAP;
70 tx = device->device_prep_dma_xor(chan, dma_dest, 72 _cb_fn = NULL;
71 dma_src, src_cnt, len, 73 _cb_param = NULL;
72 dma_prep_flags); 74 } else {
73 } 75 _cb_fn = cb_fn;
76 _cb_param = cb_param;
77 }
78 if (_cb_fn)
79 dma_flags |= DMA_PREP_INTERRUPT;
74 80
75 async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param); 81 /* Since we have clobbered the src_list we are committed
82 * to doing this asynchronously. Drivers force forward progress
83 * in case they can not provide a descriptor
84 */
85 tx = dma->device_prep_dma_xor(chan, dma_dest, &dma_src[src_off],
86 xor_src_cnt, len, dma_flags);
87
88 if (unlikely(!tx))
89 async_tx_quiesce(&depend_tx);
90
91 /* spin wait for the preceeding transactions to complete */
92 while (unlikely(!tx)) {
93 dma_async_issue_pending(chan);
94 tx = dma->device_prep_dma_xor(chan, dma_dest,
95 &dma_src[src_off],
96 xor_src_cnt, len,
97 dma_flags);
98 }
99
100 async_tx_submit(chan, tx, async_flags, depend_tx, _cb_fn,
101 _cb_param);
102
103 depend_tx = tx;
104 flags |= ASYNC_TX_DEP_ACK;
105
106 if (src_cnt > xor_src_cnt) {
107 /* drop completed sources */
108 src_cnt -= xor_src_cnt;
109 src_off += xor_src_cnt;
110
111 /* use the intermediate result a source */
112 dma_src[--src_off] = dma_dest;
113 src_cnt++;
114 } else
115 break;
116 }
76 117
77 return tx; 118 return tx;
78} 119}
79 120
80static void 121static void
81do_sync_xor(struct page *dest, struct page **src_list, unsigned int offset, 122do_sync_xor(struct page *dest, struct page **src_list, unsigned int offset,
82 unsigned int src_cnt, size_t len, enum async_tx_flags flags, 123 int src_cnt, size_t len, enum async_tx_flags flags,
83 struct dma_async_tx_descriptor *depend_tx, 124 dma_async_tx_callback cb_fn, void *cb_param)
84 dma_async_tx_callback cb_fn, void *cb_param)
85{ 125{
86 void *_dest;
87 int i; 126 int i;
88 127 int xor_src_cnt;
89 pr_debug("%s: len: %zu\n", __func__, len); 128 int src_off = 0;
129 void *dest_buf;
130 void **srcs = (void **) src_list;
90 131
91 /* reuse the 'src_list' array to convert to buffer pointers */ 132 /* reuse the 'src_list' array to convert to buffer pointers */
92 for (i = 0; i < src_cnt; i++) 133 for (i = 0; i < src_cnt; i++)
93 src_list[i] = (struct page *) 134 srcs[i] = page_address(src_list[i]) + offset;
94 (page_address(src_list[i]) + offset);
95 135
96 /* set destination address */ 136 /* set destination address */
97 _dest = page_address(dest) + offset; 137 dest_buf = page_address(dest) + offset;
98 138
99 if (flags & ASYNC_TX_XOR_ZERO_DST) 139 if (flags & ASYNC_TX_XOR_ZERO_DST)
100 memset(_dest, 0, len); 140 memset(dest_buf, 0, len);
101 141
102 xor_blocks(src_cnt, len, _dest, 142 while (src_cnt > 0) {
103 (void **) src_list); 143 /* process up to 'MAX_XOR_BLOCKS' sources */
144 xor_src_cnt = min(src_cnt, MAX_XOR_BLOCKS);
145 xor_blocks(xor_src_cnt, len, dest_buf, &srcs[src_off]);
104 146
105 async_tx_sync_epilog(flags, depend_tx, cb_fn, cb_param); 147 /* drop completed sources */
148 src_cnt -= xor_src_cnt;
149 src_off += xor_src_cnt;
150 }
151
152 async_tx_sync_epilog(cb_fn, cb_param);
106} 153}
107 154
108/** 155/**
@@ -132,106 +179,34 @@ async_xor(struct page *dest, struct page **src_list, unsigned int offset,
132 struct dma_chan *chan = async_tx_find_channel(depend_tx, DMA_XOR, 179 struct dma_chan *chan = async_tx_find_channel(depend_tx, DMA_XOR,
133 &dest, 1, src_list, 180 &dest, 1, src_list,
134 src_cnt, len); 181 src_cnt, len);
135 struct dma_device *device = chan ? chan->device : NULL;
136 struct dma_async_tx_descriptor *tx = NULL;
137 dma_async_tx_callback _cb_fn;
138 void *_cb_param;
139 unsigned long local_flags;
140 int xor_src_cnt;
141 int i = 0, src_off = 0;
142
143 BUG_ON(src_cnt <= 1); 182 BUG_ON(src_cnt <= 1);
144 183
145 while (src_cnt) { 184 if (chan) {
146 local_flags = flags; 185 /* run the xor asynchronously */
147 if (device) { /* run the xor asynchronously */ 186 pr_debug("%s (async): len: %zu\n", __func__, len);
148 xor_src_cnt = min(src_cnt, device->max_xor);
149 /* if we are submitting additional xors
150 * only set the callback on the last transaction
151 */
152 if (src_cnt > xor_src_cnt) {
153 local_flags &= ~ASYNC_TX_ACK;
154 _cb_fn = NULL;
155 _cb_param = NULL;
156 } else {
157 _cb_fn = cb_fn;
158 _cb_param = cb_param;
159 }
160
161 tx = do_async_xor(device, chan, dest,
162 &src_list[src_off], offset,
163 xor_src_cnt, len, local_flags,
164 depend_tx, _cb_fn, _cb_param);
165 } else { /* run the xor synchronously */
166 /* in the sync case the dest is an implied source
167 * (assumes the dest is at the src_off index)
168 */
169 if (flags & ASYNC_TX_XOR_DROP_DST) {
170 src_cnt--;
171 src_off++;
172 }
173
174 /* process up to 'MAX_XOR_BLOCKS' sources */
175 xor_src_cnt = min(src_cnt, MAX_XOR_BLOCKS);
176
177 /* if we are submitting additional xors
178 * only set the callback on the last transaction
179 */
180 if (src_cnt > xor_src_cnt) {
181 local_flags &= ~ASYNC_TX_ACK;
182 _cb_fn = NULL;
183 _cb_param = NULL;
184 } else {
185 _cb_fn = cb_fn;
186 _cb_param = cb_param;
187 }
188
189 /* wait for any prerequisite operations */
190 if (depend_tx) {
191 /* if ack is already set then we cannot be sure
192 * we are referring to the correct operation
193 */
194 BUG_ON(async_tx_test_ack(depend_tx));
195 if (dma_wait_for_async_tx(depend_tx) ==
196 DMA_ERROR)
197 panic("%s: DMA_ERROR waiting for "
198 "depend_tx\n",
199 __func__);
200 }
201
202 do_sync_xor(dest, &src_list[src_off], offset,
203 xor_src_cnt, len, local_flags, depend_tx,
204 _cb_fn, _cb_param);
205 }
206 187
207 /* the previous tx is hidden from the client, 188 return do_async_xor(chan, dest, src_list, offset, src_cnt, len,
208 * so ack it 189 flags, depend_tx, cb_fn, cb_param);
209 */ 190 } else {
210 if (i && depend_tx) 191 /* run the xor synchronously */
211 async_tx_ack(depend_tx); 192 pr_debug("%s (sync): len: %zu\n", __func__, len);
212 193
213 depend_tx = tx; 194 /* in the sync case the dest is an implied source
195 * (assumes the dest is the first source)
196 */
197 if (flags & ASYNC_TX_XOR_DROP_DST) {
198 src_cnt--;
199 src_list++;
200 }
214 201
215 if (src_cnt > xor_src_cnt) { 202 /* wait for any prerequisite operations */
216 /* drop completed sources */ 203 async_tx_quiesce(&depend_tx);
217 src_cnt -= xor_src_cnt;
218 src_off += xor_src_cnt;
219 204
220 /* unconditionally preserve the destination */ 205 do_sync_xor(dest, src_list, offset, src_cnt, len,
221 flags &= ~ASYNC_TX_XOR_ZERO_DST; 206 flags, cb_fn, cb_param);
222 207
223 /* use the intermediate result a source, but remember 208 return NULL;
224 * it's dropped, because it's implied, in the sync case
225 */
226 src_list[--src_off] = dest;
227 src_cnt++;
228 flags |= ASYNC_TX_XOR_DROP_DST;
229 } else
230 src_cnt = 0;
231 i++;
232 } 209 }
233
234 return tx;
235} 210}
236EXPORT_SYMBOL_GPL(async_xor); 211EXPORT_SYMBOL_GPL(async_xor);
237 212
@@ -285,11 +260,11 @@ async_xor_zero_sum(struct page *dest, struct page **src_list,
285 tx = device->device_prep_dma_zero_sum(chan, dma_src, src_cnt, 260 tx = device->device_prep_dma_zero_sum(chan, dma_src, src_cnt,
286 len, result, 261 len, result,
287 dma_prep_flags); 262 dma_prep_flags);
288 if (!tx) { 263 if (unlikely(!tx)) {
289 if (depend_tx) 264 async_tx_quiesce(&depend_tx);
290 dma_wait_for_async_tx(depend_tx);
291 265
292 while (!tx) 266 while (!tx)
267 dma_async_issue_pending(chan);
293 tx = device->device_prep_dma_zero_sum(chan, 268 tx = device->device_prep_dma_zero_sum(chan,
294 dma_src, src_cnt, len, result, 269 dma_src, src_cnt, len, result,
295 dma_prep_flags); 270 dma_prep_flags);
@@ -307,18 +282,11 @@ async_xor_zero_sum(struct page *dest, struct page **src_list,
307 tx = async_xor(dest, src_list, offset, src_cnt, len, xor_flags, 282 tx = async_xor(dest, src_list, offset, src_cnt, len, xor_flags,
308 depend_tx, NULL, NULL); 283 depend_tx, NULL, NULL);
309 284
310 if (tx) { 285 async_tx_quiesce(&tx);
311 if (dma_wait_for_async_tx(tx) == DMA_ERROR)
312 panic("%s: DMA_ERROR waiting for tx\n",
313 __func__);
314 async_tx_ack(tx);
315 }
316 286
317 *result = page_is_zero(dest, offset, len) ? 0 : 1; 287 *result = page_is_zero(dest, offset, len) ? 0 : 1;
318 288
319 tx = NULL; 289 async_tx_sync_epilog(cb_fn, cb_param);
320
321 async_tx_sync_epilog(flags, depend_tx, cb_fn, cb_param);
322 } 290 }
323 291
324 return tx; 292 return tx;