aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDan Williams <dan.j.williams@intel.com>2009-04-08 17:28:37 -0400
committerDan Williams <dan.j.williams@intel.com>2009-04-08 17:28:37 -0400
commit099f53cb50e45ef617a9f1d63ceec799e489418b (patch)
treefd57f259f58bcf615fe2b17734ed0cbec612782d
parentfd74ea65883c7e6903e9b652795f72b723a2be69 (diff)
async_tx: rename zero_sum to val
'zero_sum' does not properly describe the operation of generating parity and checking that it validates against an existing buffer. Change the name of the operation to 'val' (for 'validate'). This is in anticipation of the p+q case where it is a requirement to identify the target parity buffers separately from the source buffers, because the target parity buffers will not have corresponding pq coefficients. Reviewed-by: Andre Noll <maan@systemlinux.org> Acked-by: Maciej Sosnowski <maciej.sosnowski@intel.com> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
-rw-r--r--Documentation/crypto/async-tx-api.txt14
-rw-r--r--arch/arm/mach-iop13xx/setup.c8
-rw-r--r--arch/arm/plat-iop/adma.c2
-rw-r--r--crypto/async_tx/async_xor.c16
-rw-r--r--drivers/dma/dmaengine.c4
-rw-r--r--drivers/dma/iop-adma.c38
-rw-r--r--drivers/md/raid5.c2
-rw-r--r--include/linux/async_tx.h2
-rw-r--r--include/linux/dmaengine.h8
9 files changed, 47 insertions, 47 deletions
diff --git a/Documentation/crypto/async-tx-api.txt b/Documentation/crypto/async-tx-api.txt
index 9f59fcbf5d82..4af12180d191 100644
--- a/Documentation/crypto/async-tx-api.txt
+++ b/Documentation/crypto/async-tx-api.txt
@@ -61,13 +61,13 @@ async_<operation>(<op specific parameters>,
61 void *callback_parameter); 61 void *callback_parameter);
62 62
633.2 Supported operations: 633.2 Supported operations:
64memcpy - memory copy between a source and a destination buffer 64memcpy - memory copy between a source and a destination buffer
65memset - fill a destination buffer with a byte value 65memset - fill a destination buffer with a byte value
66xor - xor a series of source buffers and write the result to a 66xor - xor a series of source buffers and write the result to a
67 destination buffer 67 destination buffer
68xor_zero_sum - xor a series of source buffers and set a flag if the 68xor_val - xor a series of source buffers and set a flag if the
69 result is zero. The implementation attempts to prevent 69 result is zero. The implementation attempts to prevent
70 writes to memory 70 writes to memory
71 71
723.3 Descriptor management: 723.3 Descriptor management:
73The return value is non-NULL and points to a 'descriptor' when the operation 73The return value is non-NULL and points to a 'descriptor' when the operation
diff --git a/arch/arm/mach-iop13xx/setup.c b/arch/arm/mach-iop13xx/setup.c
index cfd4d2e6dacd..9800228b71d3 100644
--- a/arch/arm/mach-iop13xx/setup.c
+++ b/arch/arm/mach-iop13xx/setup.c
@@ -478,7 +478,7 @@ void __init iop13xx_platform_init(void)
478 dma_cap_set(DMA_MEMCPY, plat_data->cap_mask); 478 dma_cap_set(DMA_MEMCPY, plat_data->cap_mask);
479 dma_cap_set(DMA_XOR, plat_data->cap_mask); 479 dma_cap_set(DMA_XOR, plat_data->cap_mask);
480 dma_cap_set(DMA_DUAL_XOR, plat_data->cap_mask); 480 dma_cap_set(DMA_DUAL_XOR, plat_data->cap_mask);
481 dma_cap_set(DMA_ZERO_SUM, plat_data->cap_mask); 481 dma_cap_set(DMA_XOR_VAL, plat_data->cap_mask);
482 dma_cap_set(DMA_MEMSET, plat_data->cap_mask); 482 dma_cap_set(DMA_MEMSET, plat_data->cap_mask);
483 dma_cap_set(DMA_MEMCPY_CRC32C, plat_data->cap_mask); 483 dma_cap_set(DMA_MEMCPY_CRC32C, plat_data->cap_mask);
484 dma_cap_set(DMA_INTERRUPT, plat_data->cap_mask); 484 dma_cap_set(DMA_INTERRUPT, plat_data->cap_mask);
@@ -490,7 +490,7 @@ void __init iop13xx_platform_init(void)
490 dma_cap_set(DMA_MEMCPY, plat_data->cap_mask); 490 dma_cap_set(DMA_MEMCPY, plat_data->cap_mask);
491 dma_cap_set(DMA_XOR, plat_data->cap_mask); 491 dma_cap_set(DMA_XOR, plat_data->cap_mask);
492 dma_cap_set(DMA_DUAL_XOR, plat_data->cap_mask); 492 dma_cap_set(DMA_DUAL_XOR, plat_data->cap_mask);
493 dma_cap_set(DMA_ZERO_SUM, plat_data->cap_mask); 493 dma_cap_set(DMA_XOR_VAL, plat_data->cap_mask);
494 dma_cap_set(DMA_MEMSET, plat_data->cap_mask); 494 dma_cap_set(DMA_MEMSET, plat_data->cap_mask);
495 dma_cap_set(DMA_MEMCPY_CRC32C, plat_data->cap_mask); 495 dma_cap_set(DMA_MEMCPY_CRC32C, plat_data->cap_mask);
496 dma_cap_set(DMA_INTERRUPT, plat_data->cap_mask); 496 dma_cap_set(DMA_INTERRUPT, plat_data->cap_mask);
@@ -502,13 +502,13 @@ void __init iop13xx_platform_init(void)
502 dma_cap_set(DMA_MEMCPY, plat_data->cap_mask); 502 dma_cap_set(DMA_MEMCPY, plat_data->cap_mask);
503 dma_cap_set(DMA_XOR, plat_data->cap_mask); 503 dma_cap_set(DMA_XOR, plat_data->cap_mask);
504 dma_cap_set(DMA_DUAL_XOR, plat_data->cap_mask); 504 dma_cap_set(DMA_DUAL_XOR, plat_data->cap_mask);
505 dma_cap_set(DMA_ZERO_SUM, plat_data->cap_mask); 505 dma_cap_set(DMA_XOR_VAL, plat_data->cap_mask);
506 dma_cap_set(DMA_MEMSET, plat_data->cap_mask); 506 dma_cap_set(DMA_MEMSET, plat_data->cap_mask);
507 dma_cap_set(DMA_MEMCPY_CRC32C, plat_data->cap_mask); 507 dma_cap_set(DMA_MEMCPY_CRC32C, plat_data->cap_mask);
508 dma_cap_set(DMA_INTERRUPT, plat_data->cap_mask); 508 dma_cap_set(DMA_INTERRUPT, plat_data->cap_mask);
509 dma_cap_set(DMA_PQ_XOR, plat_data->cap_mask); 509 dma_cap_set(DMA_PQ_XOR, plat_data->cap_mask);
510 dma_cap_set(DMA_PQ_UPDATE, plat_data->cap_mask); 510 dma_cap_set(DMA_PQ_UPDATE, plat_data->cap_mask);
511 dma_cap_set(DMA_PQ_ZERO_SUM, plat_data->cap_mask); 511 dma_cap_set(DMA_PQ_VAL, plat_data->cap_mask);
512 break; 512 break;
513 } 513 }
514 } 514 }
diff --git a/arch/arm/plat-iop/adma.c b/arch/arm/plat-iop/adma.c
index f72420821619..c0400440e21c 100644
--- a/arch/arm/plat-iop/adma.c
+++ b/arch/arm/plat-iop/adma.c
@@ -198,7 +198,7 @@ static int __init iop3xx_adma_cap_init(void)
198 dma_cap_set(DMA_INTERRUPT, iop3xx_aau_data.cap_mask); 198 dma_cap_set(DMA_INTERRUPT, iop3xx_aau_data.cap_mask);
199 #else 199 #else
200 dma_cap_set(DMA_XOR, iop3xx_aau_data.cap_mask); 200 dma_cap_set(DMA_XOR, iop3xx_aau_data.cap_mask);
201 dma_cap_set(DMA_ZERO_SUM, iop3xx_aau_data.cap_mask); 201 dma_cap_set(DMA_XOR_VAL, iop3xx_aau_data.cap_mask);
202 dma_cap_set(DMA_MEMSET, iop3xx_aau_data.cap_mask); 202 dma_cap_set(DMA_MEMSET, iop3xx_aau_data.cap_mask);
203 dma_cap_set(DMA_INTERRUPT, iop3xx_aau_data.cap_mask); 203 dma_cap_set(DMA_INTERRUPT, iop3xx_aau_data.cap_mask);
204 #endif 204 #endif
diff --git a/crypto/async_tx/async_xor.c b/crypto/async_tx/async_xor.c
index 95fe2c8d6c51..e0580b0ea533 100644
--- a/crypto/async_tx/async_xor.c
+++ b/crypto/async_tx/async_xor.c
@@ -222,7 +222,7 @@ static int page_is_zero(struct page *p, unsigned int offset, size_t len)
222} 222}
223 223
224/** 224/**
225 * async_xor_zero_sum - attempt a xor parity check with a dma engine. 225 * async_xor_val - attempt a xor parity check with a dma engine.
226 * @dest: destination page used if the xor is performed synchronously 226 * @dest: destination page used if the xor is performed synchronously
227 * @src_list: array of source pages. The dest page must be listed as a source 227 * @src_list: array of source pages. The dest page must be listed as a source
228 * at index zero. The contents of this array may be overwritten. 228 * at index zero. The contents of this array may be overwritten.
@@ -236,13 +236,13 @@ static int page_is_zero(struct page *p, unsigned int offset, size_t len)
236 * @cb_param: parameter to pass to the callback routine 236 * @cb_param: parameter to pass to the callback routine
237 */ 237 */
238struct dma_async_tx_descriptor * 238struct dma_async_tx_descriptor *
239async_xor_zero_sum(struct page *dest, struct page **src_list, 239async_xor_val(struct page *dest, struct page **src_list,
240 unsigned int offset, int src_cnt, size_t len, 240 unsigned int offset, int src_cnt, size_t len,
241 u32 *result, enum async_tx_flags flags, 241 u32 *result, enum async_tx_flags flags,
242 struct dma_async_tx_descriptor *depend_tx, 242 struct dma_async_tx_descriptor *depend_tx,
243 dma_async_tx_callback cb_fn, void *cb_param) 243 dma_async_tx_callback cb_fn, void *cb_param)
244{ 244{
245 struct dma_chan *chan = async_tx_find_channel(depend_tx, DMA_ZERO_SUM, 245 struct dma_chan *chan = async_tx_find_channel(depend_tx, DMA_XOR_VAL,
246 &dest, 1, src_list, 246 &dest, 1, src_list,
247 src_cnt, len); 247 src_cnt, len);
248 struct dma_device *device = chan ? chan->device : NULL; 248 struct dma_device *device = chan ? chan->device : NULL;
@@ -261,15 +261,15 @@ async_xor_zero_sum(struct page *dest, struct page **src_list,
261 dma_src[i] = dma_map_page(device->dev, src_list[i], 261 dma_src[i] = dma_map_page(device->dev, src_list[i],
262 offset, len, DMA_TO_DEVICE); 262 offset, len, DMA_TO_DEVICE);
263 263
264 tx = device->device_prep_dma_zero_sum(chan, dma_src, src_cnt, 264 tx = device->device_prep_dma_xor_val(chan, dma_src, src_cnt,
265 len, result, 265 len, result,
266 dma_prep_flags); 266 dma_prep_flags);
267 if (unlikely(!tx)) { 267 if (unlikely(!tx)) {
268 async_tx_quiesce(&depend_tx); 268 async_tx_quiesce(&depend_tx);
269 269
270 while (!tx) { 270 while (!tx) {
271 dma_async_issue_pending(chan); 271 dma_async_issue_pending(chan);
272 tx = device->device_prep_dma_zero_sum(chan, 272 tx = device->device_prep_dma_xor_val(chan,
273 dma_src, src_cnt, len, result, 273 dma_src, src_cnt, len, result,
274 dma_prep_flags); 274 dma_prep_flags);
275 } 275 }
@@ -296,7 +296,7 @@ async_xor_zero_sum(struct page *dest, struct page **src_list,
296 296
297 return tx; 297 return tx;
298} 298}
299EXPORT_SYMBOL_GPL(async_xor_zero_sum); 299EXPORT_SYMBOL_GPL(async_xor_val);
300 300
301static int __init async_xor_init(void) 301static int __init async_xor_init(void)
302{ 302{
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 92438e9dacc3..6781e8f3c064 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -644,8 +644,8 @@ int dma_async_device_register(struct dma_device *device)
644 !device->device_prep_dma_memcpy); 644 !device->device_prep_dma_memcpy);
645 BUG_ON(dma_has_cap(DMA_XOR, device->cap_mask) && 645 BUG_ON(dma_has_cap(DMA_XOR, device->cap_mask) &&
646 !device->device_prep_dma_xor); 646 !device->device_prep_dma_xor);
647 BUG_ON(dma_has_cap(DMA_ZERO_SUM, device->cap_mask) && 647 BUG_ON(dma_has_cap(DMA_XOR_VAL, device->cap_mask) &&
648 !device->device_prep_dma_zero_sum); 648 !device->device_prep_dma_xor_val);
649 BUG_ON(dma_has_cap(DMA_MEMSET, device->cap_mask) && 649 BUG_ON(dma_has_cap(DMA_MEMSET, device->cap_mask) &&
650 !device->device_prep_dma_memset); 650 !device->device_prep_dma_memset);
651 BUG_ON(dma_has_cap(DMA_INTERRUPT, device->cap_mask) && 651 BUG_ON(dma_has_cap(DMA_INTERRUPT, device->cap_mask) &&
diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c
index 2f052265122f..6ff79a672699 100644
--- a/drivers/dma/iop-adma.c
+++ b/drivers/dma/iop-adma.c
@@ -660,9 +660,9 @@ iop_adma_prep_dma_xor(struct dma_chan *chan, dma_addr_t dma_dest,
660} 660}
661 661
662static struct dma_async_tx_descriptor * 662static struct dma_async_tx_descriptor *
663iop_adma_prep_dma_zero_sum(struct dma_chan *chan, dma_addr_t *dma_src, 663iop_adma_prep_dma_xor_val(struct dma_chan *chan, dma_addr_t *dma_src,
664 unsigned int src_cnt, size_t len, u32 *result, 664 unsigned int src_cnt, size_t len, u32 *result,
665 unsigned long flags) 665 unsigned long flags)
666{ 666{
667 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan); 667 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
668 struct iop_adma_desc_slot *sw_desc, *grp_start; 668 struct iop_adma_desc_slot *sw_desc, *grp_start;
@@ -906,7 +906,7 @@ out:
906 906
907#define IOP_ADMA_NUM_SRC_TEST 4 /* must be <= 15 */ 907#define IOP_ADMA_NUM_SRC_TEST 4 /* must be <= 15 */
908static int __devinit 908static int __devinit
909iop_adma_xor_zero_sum_self_test(struct iop_adma_device *device) 909iop_adma_xor_val_self_test(struct iop_adma_device *device)
910{ 910{
911 int i, src_idx; 911 int i, src_idx;
912 struct page *dest; 912 struct page *dest;
@@ -1002,7 +1002,7 @@ iop_adma_xor_zero_sum_self_test(struct iop_adma_device *device)
1002 PAGE_SIZE, DMA_TO_DEVICE); 1002 PAGE_SIZE, DMA_TO_DEVICE);
1003 1003
1004 /* skip zero sum if the capability is not present */ 1004 /* skip zero sum if the capability is not present */
1005 if (!dma_has_cap(DMA_ZERO_SUM, dma_chan->device->cap_mask)) 1005 if (!dma_has_cap(DMA_XOR_VAL, dma_chan->device->cap_mask))
1006 goto free_resources; 1006 goto free_resources;
1007 1007
1008 /* zero sum the sources with the destintation page */ 1008 /* zero sum the sources with the destintation page */
@@ -1016,10 +1016,10 @@ iop_adma_xor_zero_sum_self_test(struct iop_adma_device *device)
1016 dma_srcs[i] = dma_map_page(dma_chan->device->dev, 1016 dma_srcs[i] = dma_map_page(dma_chan->device->dev,
1017 zero_sum_srcs[i], 0, PAGE_SIZE, 1017 zero_sum_srcs[i], 0, PAGE_SIZE,
1018 DMA_TO_DEVICE); 1018 DMA_TO_DEVICE);
1019 tx = iop_adma_prep_dma_zero_sum(dma_chan, dma_srcs, 1019 tx = iop_adma_prep_dma_xor_val(dma_chan, dma_srcs,
1020 IOP_ADMA_NUM_SRC_TEST + 1, PAGE_SIZE, 1020 IOP_ADMA_NUM_SRC_TEST + 1, PAGE_SIZE,
1021 &zero_sum_result, 1021 &zero_sum_result,
1022 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 1022 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1023 1023
1024 cookie = iop_adma_tx_submit(tx); 1024 cookie = iop_adma_tx_submit(tx);
1025 iop_adma_issue_pending(dma_chan); 1025 iop_adma_issue_pending(dma_chan);
@@ -1072,10 +1072,10 @@ iop_adma_xor_zero_sum_self_test(struct iop_adma_device *device)
1072 dma_srcs[i] = dma_map_page(dma_chan->device->dev, 1072 dma_srcs[i] = dma_map_page(dma_chan->device->dev,
1073 zero_sum_srcs[i], 0, PAGE_SIZE, 1073 zero_sum_srcs[i], 0, PAGE_SIZE,
1074 DMA_TO_DEVICE); 1074 DMA_TO_DEVICE);
1075 tx = iop_adma_prep_dma_zero_sum(dma_chan, dma_srcs, 1075 tx = iop_adma_prep_dma_xor_val(dma_chan, dma_srcs,
1076 IOP_ADMA_NUM_SRC_TEST + 1, PAGE_SIZE, 1076 IOP_ADMA_NUM_SRC_TEST + 1, PAGE_SIZE,
1077 &zero_sum_result, 1077 &zero_sum_result,
1078 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 1078 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1079 1079
1080 cookie = iop_adma_tx_submit(tx); 1080 cookie = iop_adma_tx_submit(tx);
1081 iop_adma_issue_pending(dma_chan); 1081 iop_adma_issue_pending(dma_chan);
@@ -1192,9 +1192,9 @@ static int __devinit iop_adma_probe(struct platform_device *pdev)
1192 dma_dev->max_xor = iop_adma_get_max_xor(); 1192 dma_dev->max_xor = iop_adma_get_max_xor();
1193 dma_dev->device_prep_dma_xor = iop_adma_prep_dma_xor; 1193 dma_dev->device_prep_dma_xor = iop_adma_prep_dma_xor;
1194 } 1194 }
1195 if (dma_has_cap(DMA_ZERO_SUM, dma_dev->cap_mask)) 1195 if (dma_has_cap(DMA_XOR_VAL, dma_dev->cap_mask))
1196 dma_dev->device_prep_dma_zero_sum = 1196 dma_dev->device_prep_dma_xor_val =
1197 iop_adma_prep_dma_zero_sum; 1197 iop_adma_prep_dma_xor_val;
1198 if (dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask)) 1198 if (dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask))
1199 dma_dev->device_prep_dma_interrupt = 1199 dma_dev->device_prep_dma_interrupt =
1200 iop_adma_prep_dma_interrupt; 1200 iop_adma_prep_dma_interrupt;
@@ -1249,7 +1249,7 @@ static int __devinit iop_adma_probe(struct platform_device *pdev)
1249 1249
1250 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask) || 1250 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask) ||
1251 dma_has_cap(DMA_MEMSET, dma_dev->cap_mask)) { 1251 dma_has_cap(DMA_MEMSET, dma_dev->cap_mask)) {
1252 ret = iop_adma_xor_zero_sum_self_test(adev); 1252 ret = iop_adma_xor_val_self_test(adev);
1253 dev_dbg(&pdev->dev, "xor self test returned %d\n", ret); 1253 dev_dbg(&pdev->dev, "xor self test returned %d\n", ret);
1254 if (ret) 1254 if (ret)
1255 goto err_free_iop_chan; 1255 goto err_free_iop_chan;
@@ -1259,10 +1259,10 @@ static int __devinit iop_adma_probe(struct platform_device *pdev)
1259 "( %s%s%s%s%s%s%s%s%s%s)\n", 1259 "( %s%s%s%s%s%s%s%s%s%s)\n",
1260 dma_has_cap(DMA_PQ_XOR, dma_dev->cap_mask) ? "pq_xor " : "", 1260 dma_has_cap(DMA_PQ_XOR, dma_dev->cap_mask) ? "pq_xor " : "",
1261 dma_has_cap(DMA_PQ_UPDATE, dma_dev->cap_mask) ? "pq_update " : "", 1261 dma_has_cap(DMA_PQ_UPDATE, dma_dev->cap_mask) ? "pq_update " : "",
1262 dma_has_cap(DMA_PQ_ZERO_SUM, dma_dev->cap_mask) ? "pq_zero_sum " : "", 1262 dma_has_cap(DMA_PQ_VAL, dma_dev->cap_mask) ? "pq_val " : "",
1263 dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "", 1263 dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "",
1264 dma_has_cap(DMA_DUAL_XOR, dma_dev->cap_mask) ? "dual_xor " : "", 1264 dma_has_cap(DMA_DUAL_XOR, dma_dev->cap_mask) ? "dual_xor " : "",
1265 dma_has_cap(DMA_ZERO_SUM, dma_dev->cap_mask) ? "xor_zero_sum " : "", 1265 dma_has_cap(DMA_XOR_VAL, dma_dev->cap_mask) ? "xor_val " : "",
1266 dma_has_cap(DMA_MEMSET, dma_dev->cap_mask) ? "fill " : "", 1266 dma_has_cap(DMA_MEMSET, dma_dev->cap_mask) ? "fill " : "",
1267 dma_has_cap(DMA_MEMCPY_CRC32C, dma_dev->cap_mask) ? "cpy+crc " : "", 1267 dma_has_cap(DMA_MEMCPY_CRC32C, dma_dev->cap_mask) ? "cpy+crc " : "",
1268 dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "", 1268 dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "",
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 3bbc6d647044..f8d2d35ed298 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -854,7 +854,7 @@ static void ops_run_check(struct stripe_head *sh)
854 xor_srcs[count++] = dev->page; 854 xor_srcs[count++] = dev->page;
855 } 855 }
856 856
857 tx = async_xor_zero_sum(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, 857 tx = async_xor_val(xor_dest, xor_srcs, 0, count, STRIPE_SIZE,
858 &sh->ops.zero_sum_result, 0, NULL, NULL, NULL); 858 &sh->ops.zero_sum_result, 0, NULL, NULL, NULL);
859 859
860 atomic_inc(&sh->count); 860 atomic_inc(&sh->count);
diff --git a/include/linux/async_tx.h b/include/linux/async_tx.h
index 5fc2ef8d97fa..513150d8c25b 100644
--- a/include/linux/async_tx.h
+++ b/include/linux/async_tx.h
@@ -117,7 +117,7 @@ async_xor(struct page *dest, struct page **src_list, unsigned int offset,
117 dma_async_tx_callback cb_fn, void *cb_fn_param); 117 dma_async_tx_callback cb_fn, void *cb_fn_param);
118 118
119struct dma_async_tx_descriptor * 119struct dma_async_tx_descriptor *
120async_xor_zero_sum(struct page *dest, struct page **src_list, 120async_xor_val(struct page *dest, struct page **src_list,
121 unsigned int offset, int src_cnt, size_t len, 121 unsigned int offset, int src_cnt, size_t len,
122 u32 *result, enum async_tx_flags flags, 122 u32 *result, enum async_tx_flags flags,
123 struct dma_async_tx_descriptor *depend_tx, 123 struct dma_async_tx_descriptor *depend_tx,
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index 2e2aa3df170c..6768727d00d7 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -55,8 +55,8 @@ enum dma_transaction_type {
55 DMA_PQ_XOR, 55 DMA_PQ_XOR,
56 DMA_DUAL_XOR, 56 DMA_DUAL_XOR,
57 DMA_PQ_UPDATE, 57 DMA_PQ_UPDATE,
58 DMA_ZERO_SUM, 58 DMA_XOR_VAL,
59 DMA_PQ_ZERO_SUM, 59 DMA_PQ_VAL,
60 DMA_MEMSET, 60 DMA_MEMSET,
61 DMA_MEMCPY_CRC32C, 61 DMA_MEMCPY_CRC32C,
62 DMA_INTERRUPT, 62 DMA_INTERRUPT,
@@ -214,7 +214,7 @@ struct dma_async_tx_descriptor {
214 * @device_free_chan_resources: release DMA channel's resources 214 * @device_free_chan_resources: release DMA channel's resources
215 * @device_prep_dma_memcpy: prepares a memcpy operation 215 * @device_prep_dma_memcpy: prepares a memcpy operation
216 * @device_prep_dma_xor: prepares a xor operation 216 * @device_prep_dma_xor: prepares a xor operation
217 * @device_prep_dma_zero_sum: prepares a zero_sum operation 217 * @device_prep_dma_xor_val: prepares a xor validation operation
218 * @device_prep_dma_memset: prepares a memset operation 218 * @device_prep_dma_memset: prepares a memset operation
219 * @device_prep_dma_interrupt: prepares an end of chain interrupt operation 219 * @device_prep_dma_interrupt: prepares an end of chain interrupt operation
220 * @device_prep_slave_sg: prepares a slave dma operation 220 * @device_prep_slave_sg: prepares a slave dma operation
@@ -243,7 +243,7 @@ struct dma_device {
243 struct dma_async_tx_descriptor *(*device_prep_dma_xor)( 243 struct dma_async_tx_descriptor *(*device_prep_dma_xor)(
244 struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src, 244 struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
245 unsigned int src_cnt, size_t len, unsigned long flags); 245 unsigned int src_cnt, size_t len, unsigned long flags);
246 struct dma_async_tx_descriptor *(*device_prep_dma_zero_sum)( 246 struct dma_async_tx_descriptor *(*device_prep_dma_xor_val)(
247 struct dma_chan *chan, dma_addr_t *src, unsigned int src_cnt, 247 struct dma_chan *chan, dma_addr_t *src, unsigned int src_cnt,
248 size_t len, u32 *result, unsigned long flags); 248 size_t len, u32 *result, unsigned long flags);
249 struct dma_async_tx_descriptor *(*device_prep_dma_memset)( 249 struct dma_async_tx_descriptor *(*device_prep_dma_memset)(