aboutsummaryrefslogtreecommitdiffstats
path: root/crypto/async_tx
diff options
context:
space:
mode:
authorDan Williams <dan.j.williams@intel.com>2008-02-02 21:49:57 -0500
committerDan Williams <dan.j.williams@intel.com>2008-02-06 12:12:17 -0500
commit0036731c88fdb5bf4f04a796a30b5e445fc57f54 (patch)
tree66982e4a9fdb92fedadca35c0ccaa0b9a75e9d2e /crypto/async_tx
parentd909b347591a23c5a2c324fbccd4c9c966f31c67 (diff)
async_tx: kill tx_set_src and tx_set_dest methods
The tx_set_src and tx_set_dest methods were originally implemented to allow an array of addresses to be passed down from async_xor to the dmaengine driver while minimizing stack overhead. Removing these methods allows drivers to have all transaction parameters available at 'prep' time, saves two function pointers in struct dma_async_tx_descriptor, and reduces the number of indirect branches.. A consequence of moving this data to the 'prep' routine is that multi-source routines like async_xor need temporary storage to convert an array of linear addresses into an array of dma addresses. In order to keep the same stack footprint of the previous implementation the input array is reused as storage for the dma addresses. This requires that sizeof(dma_addr_t) be less than or equal to sizeof(void *). As a consequence CONFIG_DMADEVICES now depends on !CONFIG_HIGHMEM64G. It also requires that drivers be able to make descriptor resources available when the 'prep' routine is polled. Signed-off-by: Dan Williams <dan.j.williams@intel.com> Acked-by: Shannon Nelson <shannon.nelson@intel.com>
Diffstat (limited to 'crypto/async_tx')
-rw-r--r--crypto/async_tx/async_memcpy.c27
-rw-r--r--crypto/async_tx/async_memset.c20
-rw-r--r--crypto/async_tx/async_xor.c94
3 files changed, 83 insertions, 58 deletions
diff --git a/crypto/async_tx/async_memcpy.c b/crypto/async_tx/async_memcpy.c
index e8c8956ef1dd..faca0bc52068 100644
--- a/crypto/async_tx/async_memcpy.c
+++ b/crypto/async_tx/async_memcpy.c
@@ -48,26 +48,25 @@ async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset,
48{ 48{
49 struct dma_chan *chan = async_tx_find_channel(depend_tx, DMA_MEMCPY); 49 struct dma_chan *chan = async_tx_find_channel(depend_tx, DMA_MEMCPY);
50 struct dma_device *device = chan ? chan->device : NULL; 50 struct dma_device *device = chan ? chan->device : NULL;
51 int int_en = cb_fn ? 1 : 0; 51 struct dma_async_tx_descriptor *tx = NULL;
52 struct dma_async_tx_descriptor *tx = device ?
53 device->device_prep_dma_memcpy(chan, len,
54 int_en) : NULL;
55 52
56 if (tx) { /* run the memcpy asynchronously */ 53 if (device) {
57 dma_addr_t addr; 54 dma_addr_t dma_dest, dma_src;
58 55
59 pr_debug("%s: (async) len: %zu\n", __FUNCTION__, len); 56 dma_dest = dma_map_page(device->dev, dest, dest_offset, len,
57 DMA_FROM_DEVICE);
60 58
61 addr = dma_map_page(device->dev, dest, dest_offset, len, 59 dma_src = dma_map_page(device->dev, src, src_offset, len,
62 DMA_FROM_DEVICE); 60 DMA_TO_DEVICE);
63 tx->tx_set_dest(addr, tx, 0);
64 61
65 addr = dma_map_page(device->dev, src, src_offset, len, 62 tx = device->device_prep_dma_memcpy(chan, dma_dest, dma_src,
66 DMA_TO_DEVICE); 63 len, cb_fn != NULL);
67 tx->tx_set_src(addr, tx, 0); 64 }
68 65
66 if (tx) {
67 pr_debug("%s: (async) len: %zu\n", __FUNCTION__, len);
69 async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param); 68 async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param);
70 } else { /* run the memcpy synchronously */ 69 } else {
71 void *dest_buf, *src_buf; 70 void *dest_buf, *src_buf;
72 pr_debug("%s: (sync) len: %zu\n", __FUNCTION__, len); 71 pr_debug("%s: (sync) len: %zu\n", __FUNCTION__, len);
73 72
diff --git a/crypto/async_tx/async_memset.c b/crypto/async_tx/async_memset.c
index 760972803958..0c94851cfd37 100644
--- a/crypto/async_tx/async_memset.c
+++ b/crypto/async_tx/async_memset.c
@@ -48,20 +48,20 @@ async_memset(struct page *dest, int val, unsigned int offset,
48{ 48{
49 struct dma_chan *chan = async_tx_find_channel(depend_tx, DMA_MEMSET); 49 struct dma_chan *chan = async_tx_find_channel(depend_tx, DMA_MEMSET);
50 struct dma_device *device = chan ? chan->device : NULL; 50 struct dma_device *device = chan ? chan->device : NULL;
51 int int_en = cb_fn ? 1 : 0; 51 struct dma_async_tx_descriptor *tx = NULL;
52 struct dma_async_tx_descriptor *tx = device ?
53 device->device_prep_dma_memset(chan, val, len,
54 int_en) : NULL;
55 52
56 if (tx) { /* run the memset asynchronously */ 53 if (device) {
57 dma_addr_t dma_addr; 54 dma_addr_t dma_dest;
58 55
59 pr_debug("%s: (async) len: %zu\n", __FUNCTION__, len); 56 dma_dest = dma_map_page(device->dev, dest, offset, len,
60
61 dma_addr = dma_map_page(device->dev, dest, offset, len,
62 DMA_FROM_DEVICE); 57 DMA_FROM_DEVICE);
63 tx->tx_set_dest(dma_addr, tx, 0);
64 58
59 tx = device->device_prep_dma_memset(chan, dma_dest, val, len,
60 cb_fn != NULL);
61 }
62
63 if (tx) {
64 pr_debug("%s: (async) len: %zu\n", __FUNCTION__, len);
65 async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param); 65 async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param);
66 } else { /* run the memset synchronously */ 66 } else { /* run the memset synchronously */
67 void *dest_buf; 67 void *dest_buf;
diff --git a/crypto/async_tx/async_xor.c b/crypto/async_tx/async_xor.c
index cb41e6bbbc4d..12cba1a4205b 100644
--- a/crypto/async_tx/async_xor.c
+++ b/crypto/async_tx/async_xor.c
@@ -34,29 +34,46 @@
34 * This routine is marked __always_inline so it can be compiled away 34 * This routine is marked __always_inline so it can be compiled away
35 * when CONFIG_DMA_ENGINE=n 35 * when CONFIG_DMA_ENGINE=n
36 */ 36 */
37static __always_inline void 37static __always_inline struct dma_async_tx_descriptor *
38do_async_xor(struct dma_async_tx_descriptor *tx, struct dma_device *device, 38do_async_xor(struct dma_device *device,
39 struct dma_chan *chan, struct page *dest, struct page **src_list, 39 struct dma_chan *chan, struct page *dest, struct page **src_list,
40 unsigned int offset, unsigned int src_cnt, size_t len, 40 unsigned int offset, unsigned int src_cnt, size_t len,
41 enum async_tx_flags flags, struct dma_async_tx_descriptor *depend_tx, 41 enum async_tx_flags flags, struct dma_async_tx_descriptor *depend_tx,
42 dma_async_tx_callback cb_fn, void *cb_param) 42 dma_async_tx_callback cb_fn, void *cb_param)
43{ 43{
44 dma_addr_t dma_addr; 44 dma_addr_t dma_dest;
45 dma_addr_t *dma_src = (dma_addr_t *) src_list;
46 struct dma_async_tx_descriptor *tx;
45 int i; 47 int i;
46 48
47 pr_debug("%s: len: %zu\n", __FUNCTION__, len); 49 pr_debug("%s: len: %zu\n", __FUNCTION__, len);
48 50
49 dma_addr = dma_map_page(device->dev, dest, offset, len, 51 dma_dest = dma_map_page(device->dev, dest, offset, len,
50 DMA_FROM_DEVICE); 52 DMA_FROM_DEVICE);
51 tx->tx_set_dest(dma_addr, tx, 0);
52 53
53 for (i = 0; i < src_cnt; i++) { 54 for (i = 0; i < src_cnt; i++)
54 dma_addr = dma_map_page(device->dev, src_list[i], 55 dma_src[i] = dma_map_page(device->dev, src_list[i], offset,
55 offset, len, DMA_TO_DEVICE); 56 len, DMA_TO_DEVICE);
56 tx->tx_set_src(dma_addr, tx, i); 57
58 /* Since we have clobbered the src_list we are committed
59 * to doing this asynchronously. Drivers force forward progress
60 * in case they can not provide a descriptor
61 */
62 tx = device->device_prep_dma_xor(chan, dma_dest, dma_src, src_cnt, len,
63 cb_fn != NULL);
64 if (!tx) {
65 if (depend_tx)
66 dma_wait_for_async_tx(depend_tx);
67
68 while (!tx)
69 tx = device->device_prep_dma_xor(chan, dma_dest,
70 dma_src, src_cnt, len,
71 cb_fn != NULL);
57 } 72 }
58 73
59 async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param); 74 async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param);
75
76 return tx;
60} 77}
61 78
62static void 79static void
@@ -118,7 +135,7 @@ async_xor(struct page *dest, struct page **src_list, unsigned int offset,
118 void *_cb_param; 135 void *_cb_param;
119 unsigned long local_flags; 136 unsigned long local_flags;
120 int xor_src_cnt; 137 int xor_src_cnt;
121 int i = 0, src_off = 0, int_en; 138 int i = 0, src_off = 0;
122 139
123 BUG_ON(src_cnt <= 1); 140 BUG_ON(src_cnt <= 1);
124 141
@@ -138,20 +155,11 @@ async_xor(struct page *dest, struct page **src_list, unsigned int offset,
138 _cb_param = cb_param; 155 _cb_param = cb_param;
139 } 156 }
140 157
141 int_en = _cb_fn ? 1 : 0; 158 tx = do_async_xor(device, chan, dest,
142 159 &src_list[src_off], offset,
143 tx = device->device_prep_dma_xor( 160 xor_src_cnt, len, local_flags,
144 chan, xor_src_cnt, len, int_en); 161 depend_tx, _cb_fn, _cb_param);
145
146 if (tx) {
147 do_async_xor(tx, device, chan, dest,
148 &src_list[src_off], offset, xor_src_cnt, len,
149 local_flags, depend_tx, _cb_fn,
150 _cb_param);
151 } else /* fall through */
152 goto xor_sync;
153 } else { /* run the xor synchronously */ 162 } else { /* run the xor synchronously */
154xor_sync:
155 /* in the sync case the dest is an implied source 163 /* in the sync case the dest is an implied source
156 * (assumes the dest is at the src_off index) 164 * (assumes the dest is at the src_off index)
157 */ 165 */
@@ -254,23 +262,31 @@ async_xor_zero_sum(struct page *dest, struct page **src_list,
254{ 262{
255 struct dma_chan *chan = async_tx_find_channel(depend_tx, DMA_ZERO_SUM); 263 struct dma_chan *chan = async_tx_find_channel(depend_tx, DMA_ZERO_SUM);
256 struct dma_device *device = chan ? chan->device : NULL; 264 struct dma_device *device = chan ? chan->device : NULL;
257 int int_en = cb_fn ? 1 : 0; 265 struct dma_async_tx_descriptor *tx = NULL;
258 struct dma_async_tx_descriptor *tx = device ?
259 device->device_prep_dma_zero_sum(chan, src_cnt, len, result,
260 int_en) : NULL;
261 int i;
262 266
263 BUG_ON(src_cnt <= 1); 267 BUG_ON(src_cnt <= 1);
264 268
265 if (tx) { 269 if (device) {
266 dma_addr_t dma_addr; 270 dma_addr_t *dma_src = (dma_addr_t *) src_list;
271 int i;
267 272
268 pr_debug("%s: (async) len: %zu\n", __FUNCTION__, len); 273 pr_debug("%s: (async) len: %zu\n", __FUNCTION__, len);
269 274
270 for (i = 0; i < src_cnt; i++) { 275 for (i = 0; i < src_cnt; i++)
271 dma_addr = dma_map_page(device->dev, src_list[i], 276 dma_src[i] = dma_map_page(device->dev, src_list[i],
272 offset, len, DMA_TO_DEVICE); 277 offset, len, DMA_TO_DEVICE);
273 tx->tx_set_src(dma_addr, tx, i); 278
279 tx = device->device_prep_dma_zero_sum(chan, dma_src, src_cnt,
280 len, result,
281 cb_fn != NULL);
282 if (!tx) {
283 if (depend_tx)
284 dma_wait_for_async_tx(depend_tx);
285
286 while (!tx)
287 tx = device->device_prep_dma_zero_sum(chan,
288 dma_src, src_cnt, len, result,
289 cb_fn != NULL);
274 } 290 }
275 291
276 async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param); 292 async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param);
@@ -305,6 +321,16 @@ EXPORT_SYMBOL_GPL(async_xor_zero_sum);
305 321
306static int __init async_xor_init(void) 322static int __init async_xor_init(void)
307{ 323{
324 #ifdef CONFIG_DMA_ENGINE
325 /* To conserve stack space the input src_list (array of page pointers)
326 * is reused to hold the array of dma addresses passed to the driver.
327 * This conversion is only possible when dma_addr_t is less than the
328 * the size of a pointer. HIGHMEM64G is known to violate this
329 * assumption.
330 */
331 BUILD_BUG_ON(sizeof(dma_addr_t) > sizeof(struct page *));
332 #endif
333
308 return 0; 334 return 0;
309} 335}
310 336