aboutsummaryrefslogtreecommitdiffstats
path: root/crypto/async_tx
diff options
context:
space:
mode:
Diffstat (limited to 'crypto/async_tx')
-rw-r--r--crypto/async_tx/async_memcpy.c6
-rw-r--r--crypto/async_tx/async_memset.c6
-rw-r--r--crypto/async_tx/async_tx.c6
-rw-r--r--crypto/async_tx/async_xor.c12
4 files changed, 15 insertions, 15 deletions
diff --git a/crypto/async_tx/async_memcpy.c b/crypto/async_tx/async_memcpy.c
index 0f6282207b32..84caa4efc0d4 100644
--- a/crypto/async_tx/async_memcpy.c
+++ b/crypto/async_tx/async_memcpy.c
@@ -66,11 +66,11 @@ async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset,
66 } 66 }
67 67
68 if (tx) { 68 if (tx) {
69 pr_debug("%s: (async) len: %zu\n", __FUNCTION__, len); 69 pr_debug("%s: (async) len: %zu\n", __func__, len);
70 async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param); 70 async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param);
71 } else { 71 } else {
72 void *dest_buf, *src_buf; 72 void *dest_buf, *src_buf;
73 pr_debug("%s: (sync) len: %zu\n", __FUNCTION__, len); 73 pr_debug("%s: (sync) len: %zu\n", __func__, len);
74 74
75 /* wait for any prerequisite operations */ 75 /* wait for any prerequisite operations */
76 if (depend_tx) { 76 if (depend_tx) {
@@ -80,7 +80,7 @@ async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset,
80 BUG_ON(depend_tx->ack); 80 BUG_ON(depend_tx->ack);
81 if (dma_wait_for_async_tx(depend_tx) == DMA_ERROR) 81 if (dma_wait_for_async_tx(depend_tx) == DMA_ERROR)
82 panic("%s: DMA_ERROR waiting for depend_tx\n", 82 panic("%s: DMA_ERROR waiting for depend_tx\n",
83 __FUNCTION__); 83 __func__);
84 } 84 }
85 85
86 dest_buf = kmap_atomic(dest, KM_USER0) + dest_offset; 86 dest_buf = kmap_atomic(dest, KM_USER0) + dest_offset;
diff --git a/crypto/async_tx/async_memset.c b/crypto/async_tx/async_memset.c
index 09c0e83664bc..f5ff3906b035 100644
--- a/crypto/async_tx/async_memset.c
+++ b/crypto/async_tx/async_memset.c
@@ -63,11 +63,11 @@ async_memset(struct page *dest, int val, unsigned int offset,
63 } 63 }
64 64
65 if (tx) { 65 if (tx) {
66 pr_debug("%s: (async) len: %zu\n", __FUNCTION__, len); 66 pr_debug("%s: (async) len: %zu\n", __func__, len);
67 async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param); 67 async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param);
68 } else { /* run the memset synchronously */ 68 } else { /* run the memset synchronously */
69 void *dest_buf; 69 void *dest_buf;
70 pr_debug("%s: (sync) len: %zu\n", __FUNCTION__, len); 70 pr_debug("%s: (sync) len: %zu\n", __func__, len);
71 71
72 dest_buf = (void *) (((char *) page_address(dest)) + offset); 72 dest_buf = (void *) (((char *) page_address(dest)) + offset);
73 73
@@ -79,7 +79,7 @@ async_memset(struct page *dest, int val, unsigned int offset,
79 BUG_ON(depend_tx->ack); 79 BUG_ON(depend_tx->ack);
80 if (dma_wait_for_async_tx(depend_tx) == DMA_ERROR) 80 if (dma_wait_for_async_tx(depend_tx) == DMA_ERROR)
81 panic("%s: DMA_ERROR waiting for depend_tx\n", 81 panic("%s: DMA_ERROR waiting for depend_tx\n",
82 __FUNCTION__); 82 __func__);
83 } 83 }
84 84
85 memset(dest_buf, val, len); 85 memset(dest_buf, val, len);
diff --git a/crypto/async_tx/async_tx.c b/crypto/async_tx/async_tx.c
index 562882189de5..2be3bae89930 100644
--- a/crypto/async_tx/async_tx.c
+++ b/crypto/async_tx/async_tx.c
@@ -472,11 +472,11 @@ async_trigger_callback(enum async_tx_flags flags,
472 tx = NULL; 472 tx = NULL;
473 473
474 if (tx) { 474 if (tx) {
475 pr_debug("%s: (async)\n", __FUNCTION__); 475 pr_debug("%s: (async)\n", __func__);
476 476
477 async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param); 477 async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param);
478 } else { 478 } else {
479 pr_debug("%s: (sync)\n", __FUNCTION__); 479 pr_debug("%s: (sync)\n", __func__);
480 480
481 /* wait for any prerequisite operations */ 481 /* wait for any prerequisite operations */
482 if (depend_tx) { 482 if (depend_tx) {
@@ -486,7 +486,7 @@ async_trigger_callback(enum async_tx_flags flags,
486 BUG_ON(depend_tx->ack); 486 BUG_ON(depend_tx->ack);
487 if (dma_wait_for_async_tx(depend_tx) == DMA_ERROR) 487 if (dma_wait_for_async_tx(depend_tx) == DMA_ERROR)
488 panic("%s: DMA_ERROR waiting for depend_tx\n", 488 panic("%s: DMA_ERROR waiting for depend_tx\n",
489 __FUNCTION__); 489 __func__);
490 } 490 }
491 491
492 async_tx_sync_epilog(flags, depend_tx, cb_fn, cb_param); 492 async_tx_sync_epilog(flags, depend_tx, cb_fn, cb_param);
diff --git a/crypto/async_tx/async_xor.c b/crypto/async_tx/async_xor.c
index 2259a4ff15cb..7a9db353f198 100644
--- a/crypto/async_tx/async_xor.c
+++ b/crypto/async_tx/async_xor.c
@@ -47,7 +47,7 @@ do_async_xor(struct dma_device *device,
47 int i; 47 int i;
48 unsigned long dma_prep_flags = cb_fn ? DMA_PREP_INTERRUPT : 0; 48 unsigned long dma_prep_flags = cb_fn ? DMA_PREP_INTERRUPT : 0;
49 49
50 pr_debug("%s: len: %zu\n", __FUNCTION__, len); 50 pr_debug("%s: len: %zu\n", __func__, len);
51 51
52 dma_dest = dma_map_page(device->dev, dest, offset, len, 52 dma_dest = dma_map_page(device->dev, dest, offset, len,
53 DMA_FROM_DEVICE); 53 DMA_FROM_DEVICE);
@@ -86,7 +86,7 @@ do_sync_xor(struct page *dest, struct page **src_list, unsigned int offset,
86 void *_dest; 86 void *_dest;
87 int i; 87 int i;
88 88
89 pr_debug("%s: len: %zu\n", __FUNCTION__, len); 89 pr_debug("%s: len: %zu\n", __func__, len);
90 90
91 /* reuse the 'src_list' array to convert to buffer pointers */ 91 /* reuse the 'src_list' array to convert to buffer pointers */
92 for (i = 0; i < src_cnt; i++) 92 for (i = 0; i < src_cnt; i++)
@@ -196,7 +196,7 @@ async_xor(struct page *dest, struct page **src_list, unsigned int offset,
196 DMA_ERROR) 196 DMA_ERROR)
197 panic("%s: DMA_ERROR waiting for " 197 panic("%s: DMA_ERROR waiting for "
198 "depend_tx\n", 198 "depend_tx\n",
199 __FUNCTION__); 199 __func__);
200 } 200 }
201 201
202 do_sync_xor(dest, &src_list[src_off], offset, 202 do_sync_xor(dest, &src_list[src_off], offset,
@@ -276,7 +276,7 @@ async_xor_zero_sum(struct page *dest, struct page **src_list,
276 unsigned long dma_prep_flags = cb_fn ? DMA_PREP_INTERRUPT : 0; 276 unsigned long dma_prep_flags = cb_fn ? DMA_PREP_INTERRUPT : 0;
277 int i; 277 int i;
278 278
279 pr_debug("%s: (async) len: %zu\n", __FUNCTION__, len); 279 pr_debug("%s: (async) len: %zu\n", __func__, len);
280 280
281 for (i = 0; i < src_cnt; i++) 281 for (i = 0; i < src_cnt; i++)
282 dma_src[i] = dma_map_page(device->dev, src_list[i], 282 dma_src[i] = dma_map_page(device->dev, src_list[i],
@@ -299,7 +299,7 @@ async_xor_zero_sum(struct page *dest, struct page **src_list,
299 } else { 299 } else {
300 unsigned long xor_flags = flags; 300 unsigned long xor_flags = flags;
301 301
302 pr_debug("%s: (sync) len: %zu\n", __FUNCTION__, len); 302 pr_debug("%s: (sync) len: %zu\n", __func__, len);
303 303
304 xor_flags |= ASYNC_TX_XOR_DROP_DST; 304 xor_flags |= ASYNC_TX_XOR_DROP_DST;
305 xor_flags &= ~ASYNC_TX_ACK; 305 xor_flags &= ~ASYNC_TX_ACK;
@@ -310,7 +310,7 @@ async_xor_zero_sum(struct page *dest, struct page **src_list,
310 if (tx) { 310 if (tx) {
311 if (dma_wait_for_async_tx(tx) == DMA_ERROR) 311 if (dma_wait_for_async_tx(tx) == DMA_ERROR)
312 panic("%s: DMA_ERROR waiting for tx\n", 312 panic("%s: DMA_ERROR waiting for tx\n",
313 __FUNCTION__); 313 __func__);
314 async_tx_ack(tx); 314 async_tx_ack(tx);
315 } 315 }
316 316