diff options
author | Dan Williams <dan.j.williams@intel.com> | 2009-09-08 20:42:29 -0400 |
---|---|---|
committer | Dan Williams <dan.j.williams@intel.com> | 2009-09-08 20:42:29 -0400 |
commit | f9dd2134374c8de6b911e2b8652c6c9622eaa658 (patch) | |
tree | c1b8f8d622941606b9e7247ab31d811ba4295011 /crypto | |
parent | 4b652f0db3be891c7b76b109c3b55003b920fc96 (diff) | |
parent | 07a3b417dc3d00802bd7b4874c3e811f0b015a7d (diff) |
Merge branch 'md-raid6-accel' into ioat3.2
Conflicts:
include/linux/dmaengine.h
Diffstat (limited to 'crypto')
-rw-r--r-- | crypto/async_tx/Kconfig | 9 | ||||
-rw-r--r-- | crypto/async_tx/Makefile | 3 | ||||
-rw-r--r-- | crypto/async_tx/async_memcpy.c | 39 | ||||
-rw-r--r-- | crypto/async_tx/async_memset.c | 38 | ||||
-rw-r--r-- | crypto/async_tx/async_pq.c | 388 | ||||
-rw-r--r-- | crypto/async_tx/async_raid6_recov.c | 448 | ||||
-rw-r--r-- | crypto/async_tx/async_tx.c | 83 | ||||
-rw-r--r-- | crypto/async_tx/async_xor.c | 199 | ||||
-rw-r--r-- | crypto/async_tx/raid6test.c | 241 |
9 files changed, 1247 insertions, 201 deletions
diff --git a/crypto/async_tx/Kconfig b/crypto/async_tx/Kconfig index d8fb39145986..e5aeb2b79e6f 100644 --- a/crypto/async_tx/Kconfig +++ b/crypto/async_tx/Kconfig | |||
@@ -14,3 +14,12 @@ config ASYNC_MEMSET | |||
14 | tristate | 14 | tristate |
15 | select ASYNC_CORE | 15 | select ASYNC_CORE |
16 | 16 | ||
17 | config ASYNC_PQ | ||
18 | tristate | ||
19 | select ASYNC_CORE | ||
20 | |||
21 | config ASYNC_RAID6_RECOV | ||
22 | tristate | ||
23 | select ASYNC_CORE | ||
24 | select ASYNC_PQ | ||
25 | |||
diff --git a/crypto/async_tx/Makefile b/crypto/async_tx/Makefile index 27baa7d52fbc..d1e0e6f72bc1 100644 --- a/crypto/async_tx/Makefile +++ b/crypto/async_tx/Makefile | |||
@@ -2,3 +2,6 @@ obj-$(CONFIG_ASYNC_CORE) += async_tx.o | |||
2 | obj-$(CONFIG_ASYNC_MEMCPY) += async_memcpy.o | 2 | obj-$(CONFIG_ASYNC_MEMCPY) += async_memcpy.o |
3 | obj-$(CONFIG_ASYNC_MEMSET) += async_memset.o | 3 | obj-$(CONFIG_ASYNC_MEMSET) += async_memset.o |
4 | obj-$(CONFIG_ASYNC_XOR) += async_xor.o | 4 | obj-$(CONFIG_ASYNC_XOR) += async_xor.o |
5 | obj-$(CONFIG_ASYNC_PQ) += async_pq.o | ||
6 | obj-$(CONFIG_ASYNC_RAID6_RECOV) += async_raid6_recov.o | ||
7 | obj-$(CONFIG_ASYNC_RAID6_TEST) += raid6test.o | ||
diff --git a/crypto/async_tx/async_memcpy.c b/crypto/async_tx/async_memcpy.c index ddccfb01c416..98e15bd0dcb5 100644 --- a/crypto/async_tx/async_memcpy.c +++ b/crypto/async_tx/async_memcpy.c | |||
@@ -33,28 +33,28 @@ | |||
33 | * async_memcpy - attempt to copy memory with a dma engine. | 33 | * async_memcpy - attempt to copy memory with a dma engine. |
34 | * @dest: destination page | 34 | * @dest: destination page |
35 | * @src: src page | 35 | * @src: src page |
36 | * @offset: offset in pages to start transaction | 36 | * @dest_offset: offset into 'dest' to start transaction |
37 | * @src_offset: offset into 'src' to start transaction | ||
37 | * @len: length in bytes | 38 | * @len: length in bytes |
38 | * @flags: ASYNC_TX_ACK, ASYNC_TX_DEP_ACK, | 39 | * @submit: submission / completion modifiers |
39 | * @depend_tx: memcpy depends on the result of this transaction | 40 | * |
40 | * @cb_fn: function to call when the memcpy completes | 41 | * honored flags: ASYNC_TX_ACK |
41 | * @cb_param: parameter to pass to the callback routine | ||
42 | */ | 42 | */ |
43 | struct dma_async_tx_descriptor * | 43 | struct dma_async_tx_descriptor * |
44 | async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset, | 44 | async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset, |
45 | unsigned int src_offset, size_t len, enum async_tx_flags flags, | 45 | unsigned int src_offset, size_t len, |
46 | struct dma_async_tx_descriptor *depend_tx, | 46 | struct async_submit_ctl *submit) |
47 | dma_async_tx_callback cb_fn, void *cb_param) | ||
48 | { | 47 | { |
49 | struct dma_chan *chan = async_tx_find_channel(depend_tx, DMA_MEMCPY, | 48 | struct dma_chan *chan = async_tx_find_channel(submit, DMA_MEMCPY, |
50 | &dest, 1, &src, 1, len); | 49 | &dest, 1, &src, 1, len); |
51 | struct dma_device *device = chan ? chan->device : NULL; | 50 | struct dma_device *device = chan ? chan->device : NULL; |
52 | struct dma_async_tx_descriptor *tx = NULL; | 51 | struct dma_async_tx_descriptor *tx = NULL; |
53 | 52 | ||
54 | if (device) { | 53 | if (device) { |
55 | dma_addr_t dma_dest, dma_src; | 54 | dma_addr_t dma_dest, dma_src; |
56 | unsigned long dma_prep_flags = cb_fn ? DMA_PREP_INTERRUPT : 0; | 55 | unsigned long dma_prep_flags; |
57 | 56 | ||
57 | dma_prep_flags = submit->cb_fn ? DMA_PREP_INTERRUPT : 0; | ||
58 | dma_dest = dma_map_page(device->dev, dest, dest_offset, len, | 58 | dma_dest = dma_map_page(device->dev, dest, dest_offset, len, |
59 | DMA_FROM_DEVICE); | 59 | DMA_FROM_DEVICE); |
60 | 60 | ||
@@ -67,13 +67,13 @@ async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset, | |||
67 | 67 | ||
68 | if (tx) { | 68 | if (tx) { |
69 | pr_debug("%s: (async) len: %zu\n", __func__, len); | 69 | pr_debug("%s: (async) len: %zu\n", __func__, len); |
70 | async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param); | 70 | async_tx_submit(chan, tx, submit); |
71 | } else { | 71 | } else { |
72 | void *dest_buf, *src_buf; | 72 | void *dest_buf, *src_buf; |
73 | pr_debug("%s: (sync) len: %zu\n", __func__, len); | 73 | pr_debug("%s: (sync) len: %zu\n", __func__, len); |
74 | 74 | ||
75 | /* wait for any prerequisite operations */ | 75 | /* wait for any prerequisite operations */ |
76 | async_tx_quiesce(&depend_tx); | 76 | async_tx_quiesce(&submit->depend_tx); |
77 | 77 | ||
78 | dest_buf = kmap_atomic(dest, KM_USER0) + dest_offset; | 78 | dest_buf = kmap_atomic(dest, KM_USER0) + dest_offset; |
79 | src_buf = kmap_atomic(src, KM_USER1) + src_offset; | 79 | src_buf = kmap_atomic(src, KM_USER1) + src_offset; |
@@ -83,26 +83,13 @@ async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset, | |||
83 | kunmap_atomic(dest_buf, KM_USER0); | 83 | kunmap_atomic(dest_buf, KM_USER0); |
84 | kunmap_atomic(src_buf, KM_USER1); | 84 | kunmap_atomic(src_buf, KM_USER1); |
85 | 85 | ||
86 | async_tx_sync_epilog(cb_fn, cb_param); | 86 | async_tx_sync_epilog(submit); |
87 | } | 87 | } |
88 | 88 | ||
89 | return tx; | 89 | return tx; |
90 | } | 90 | } |
91 | EXPORT_SYMBOL_GPL(async_memcpy); | 91 | EXPORT_SYMBOL_GPL(async_memcpy); |
92 | 92 | ||
93 | static int __init async_memcpy_init(void) | ||
94 | { | ||
95 | return 0; | ||
96 | } | ||
97 | |||
98 | static void __exit async_memcpy_exit(void) | ||
99 | { | ||
100 | do { } while (0); | ||
101 | } | ||
102 | |||
103 | module_init(async_memcpy_init); | ||
104 | module_exit(async_memcpy_exit); | ||
105 | |||
106 | MODULE_AUTHOR("Intel Corporation"); | 93 | MODULE_AUTHOR("Intel Corporation"); |
107 | MODULE_DESCRIPTION("asynchronous memcpy api"); | 94 | MODULE_DESCRIPTION("asynchronous memcpy api"); |
108 | MODULE_LICENSE("GPL"); | 95 | MODULE_LICENSE("GPL"); |
diff --git a/crypto/async_tx/async_memset.c b/crypto/async_tx/async_memset.c index 5b5eb99bb244..b896a6e5f673 100644 --- a/crypto/async_tx/async_memset.c +++ b/crypto/async_tx/async_memset.c | |||
@@ -35,26 +35,23 @@ | |||
35 | * @val: fill value | 35 | * @val: fill value |
36 | * @offset: offset in pages to start transaction | 36 | * @offset: offset in pages to start transaction |
37 | * @len: length in bytes | 37 | * @len: length in bytes |
38 | * @flags: ASYNC_TX_ACK, ASYNC_TX_DEP_ACK | 38 | * |
39 | * @depend_tx: memset depends on the result of this transaction | 39 | * honored flags: ASYNC_TX_ACK |
40 | * @cb_fn: function to call when the memcpy completes | ||
41 | * @cb_param: parameter to pass to the callback routine | ||
42 | */ | 40 | */ |
43 | struct dma_async_tx_descriptor * | 41 | struct dma_async_tx_descriptor * |
44 | async_memset(struct page *dest, int val, unsigned int offset, | 42 | async_memset(struct page *dest, int val, unsigned int offset, size_t len, |
45 | size_t len, enum async_tx_flags flags, | 43 | struct async_submit_ctl *submit) |
46 | struct dma_async_tx_descriptor *depend_tx, | ||
47 | dma_async_tx_callback cb_fn, void *cb_param) | ||
48 | { | 44 | { |
49 | struct dma_chan *chan = async_tx_find_channel(depend_tx, DMA_MEMSET, | 45 | struct dma_chan *chan = async_tx_find_channel(submit, DMA_MEMSET, |
50 | &dest, 1, NULL, 0, len); | 46 | &dest, 1, NULL, 0, len); |
51 | struct dma_device *device = chan ? chan->device : NULL; | 47 | struct dma_device *device = chan ? chan->device : NULL; |
52 | struct dma_async_tx_descriptor *tx = NULL; | 48 | struct dma_async_tx_descriptor *tx = NULL; |
53 | 49 | ||
54 | if (device) { | 50 | if (device) { |
55 | dma_addr_t dma_dest; | 51 | dma_addr_t dma_dest; |
56 | unsigned long dma_prep_flags = cb_fn ? DMA_PREP_INTERRUPT : 0; | 52 | unsigned long dma_prep_flags; |
57 | 53 | ||
54 | dma_prep_flags = submit->cb_fn ? DMA_PREP_INTERRUPT : 0; | ||
58 | dma_dest = dma_map_page(device->dev, dest, offset, len, | 55 | dma_dest = dma_map_page(device->dev, dest, offset, len, |
59 | DMA_FROM_DEVICE); | 56 | DMA_FROM_DEVICE); |
60 | 57 | ||
@@ -64,38 +61,25 @@ async_memset(struct page *dest, int val, unsigned int offset, | |||
64 | 61 | ||
65 | if (tx) { | 62 | if (tx) { |
66 | pr_debug("%s: (async) len: %zu\n", __func__, len); | 63 | pr_debug("%s: (async) len: %zu\n", __func__, len); |
67 | async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param); | 64 | async_tx_submit(chan, tx, submit); |
68 | } else { /* run the memset synchronously */ | 65 | } else { /* run the memset synchronously */ |
69 | void *dest_buf; | 66 | void *dest_buf; |
70 | pr_debug("%s: (sync) len: %zu\n", __func__, len); | 67 | pr_debug("%s: (sync) len: %zu\n", __func__, len); |
71 | 68 | ||
72 | dest_buf = (void *) (((char *) page_address(dest)) + offset); | 69 | dest_buf = page_address(dest) + offset; |
73 | 70 | ||
74 | /* wait for any prerequisite operations */ | 71 | /* wait for any prerequisite operations */ |
75 | async_tx_quiesce(&depend_tx); | 72 | async_tx_quiesce(&submit->depend_tx); |
76 | 73 | ||
77 | memset(dest_buf, val, len); | 74 | memset(dest_buf, val, len); |
78 | 75 | ||
79 | async_tx_sync_epilog(cb_fn, cb_param); | 76 | async_tx_sync_epilog(submit); |
80 | } | 77 | } |
81 | 78 | ||
82 | return tx; | 79 | return tx; |
83 | } | 80 | } |
84 | EXPORT_SYMBOL_GPL(async_memset); | 81 | EXPORT_SYMBOL_GPL(async_memset); |
85 | 82 | ||
86 | static int __init async_memset_init(void) | ||
87 | { | ||
88 | return 0; | ||
89 | } | ||
90 | |||
91 | static void __exit async_memset_exit(void) | ||
92 | { | ||
93 | do { } while (0); | ||
94 | } | ||
95 | |||
96 | module_init(async_memset_init); | ||
97 | module_exit(async_memset_exit); | ||
98 | |||
99 | MODULE_AUTHOR("Intel Corporation"); | 83 | MODULE_AUTHOR("Intel Corporation"); |
100 | MODULE_DESCRIPTION("asynchronous memset api"); | 84 | MODULE_DESCRIPTION("asynchronous memset api"); |
101 | MODULE_LICENSE("GPL"); | 85 | MODULE_LICENSE("GPL"); |
diff --git a/crypto/async_tx/async_pq.c b/crypto/async_tx/async_pq.c new file mode 100644 index 000000000000..108b21efb499 --- /dev/null +++ b/crypto/async_tx/async_pq.c | |||
@@ -0,0 +1,388 @@ | |||
1 | /* | ||
2 | * Copyright(c) 2007 Yuri Tikhonov <yur@emcraft.com> | ||
3 | * Copyright(c) 2009 Intel Corporation | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify it | ||
6 | * under the terms of the GNU General Public License as published by the Free | ||
7 | * Software Foundation; either version 2 of the License, or (at your option) | ||
8 | * any later version. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
11 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
12 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
13 | * more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License along with | ||
16 | * this program; if not, write to the Free Software Foundation, Inc., 59 | ||
17 | * Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
18 | * | ||
19 | * The full GNU General Public License is included in this distribution in the | ||
20 | * file called COPYING. | ||
21 | */ | ||
22 | #include <linux/kernel.h> | ||
23 | #include <linux/interrupt.h> | ||
24 | #include <linux/dma-mapping.h> | ||
25 | #include <linux/raid/pq.h> | ||
26 | #include <linux/async_tx.h> | ||
27 | |||
28 | /** | ||
29 | * scribble - space to hold throwaway P buffer for synchronous gen_syndrome | ||
30 | */ | ||
31 | static struct page *scribble; | ||
32 | |||
33 | static bool is_raid6_zero_block(struct page *p) | ||
34 | { | ||
35 | return p == (void *) raid6_empty_zero_page; | ||
36 | } | ||
37 | |||
38 | /* the struct page *blocks[] parameter passed to async_gen_syndrome() | ||
39 | * and async_syndrome_val() contains the 'P' destination address at | ||
40 | * blocks[disks-2] and the 'Q' destination address at blocks[disks-1] | ||
41 | * | ||
42 | * note: these are macros as they are used as lvalues | ||
43 | */ | ||
44 | #define P(b, d) (b[d-2]) | ||
45 | #define Q(b, d) (b[d-1]) | ||
46 | |||
47 | /** | ||
48 | * do_async_gen_syndrome - asynchronously calculate P and/or Q | ||
49 | */ | ||
50 | static __async_inline struct dma_async_tx_descriptor * | ||
51 | do_async_gen_syndrome(struct dma_chan *chan, struct page **blocks, | ||
52 | const unsigned char *scfs, unsigned int offset, int disks, | ||
53 | size_t len, dma_addr_t *dma_src, | ||
54 | struct async_submit_ctl *submit) | ||
55 | { | ||
56 | struct dma_async_tx_descriptor *tx = NULL; | ||
57 | struct dma_device *dma = chan->device; | ||
58 | enum dma_ctrl_flags dma_flags = 0; | ||
59 | enum async_tx_flags flags_orig = submit->flags; | ||
60 | dma_async_tx_callback cb_fn_orig = submit->cb_fn; | ||
61 | dma_async_tx_callback cb_param_orig = submit->cb_param; | ||
62 | int src_cnt = disks - 2; | ||
63 | unsigned char coefs[src_cnt]; | ||
64 | unsigned short pq_src_cnt; | ||
65 | dma_addr_t dma_dest[2]; | ||
66 | int src_off = 0; | ||
67 | int idx; | ||
68 | int i; | ||
69 | |||
70 | /* DMAs use destinations as sources, so use BIDIRECTIONAL mapping */ | ||
71 | if (P(blocks, disks)) | ||
72 | dma_dest[0] = dma_map_page(dma->dev, P(blocks, disks), offset, | ||
73 | len, DMA_BIDIRECTIONAL); | ||
74 | else | ||
75 | dma_flags |= DMA_PREP_PQ_DISABLE_P; | ||
76 | if (Q(blocks, disks)) | ||
77 | dma_dest[1] = dma_map_page(dma->dev, Q(blocks, disks), offset, | ||
78 | len, DMA_BIDIRECTIONAL); | ||
79 | else | ||
80 | dma_flags |= DMA_PREP_PQ_DISABLE_Q; | ||
81 | |||
82 | /* convert source addresses being careful to collapse 'empty' | ||
83 | * sources and update the coefficients accordingly | ||
84 | */ | ||
85 | for (i = 0, idx = 0; i < src_cnt; i++) { | ||
86 | if (is_raid6_zero_block(blocks[i])) | ||
87 | continue; | ||
88 | dma_src[idx] = dma_map_page(dma->dev, blocks[i], offset, len, | ||
89 | DMA_TO_DEVICE); | ||
90 | coefs[idx] = scfs[i]; | ||
91 | idx++; | ||
92 | } | ||
93 | src_cnt = idx; | ||
94 | |||
95 | while (src_cnt > 0) { | ||
96 | submit->flags = flags_orig; | ||
97 | pq_src_cnt = min(src_cnt, dma_maxpq(dma, dma_flags)); | ||
98 | /* if we are submitting additional pqs, leave the chain open, | ||
99 | * clear the callback parameters, and leave the destination | ||
100 | * buffers mapped | ||
101 | */ | ||
102 | if (src_cnt > pq_src_cnt) { | ||
103 | submit->flags &= ~ASYNC_TX_ACK; | ||
104 | dma_flags |= DMA_COMPL_SKIP_DEST_UNMAP; | ||
105 | submit->cb_fn = NULL; | ||
106 | submit->cb_param = NULL; | ||
107 | } else { | ||
108 | dma_flags &= ~DMA_COMPL_SKIP_DEST_UNMAP; | ||
109 | submit->cb_fn = cb_fn_orig; | ||
110 | submit->cb_param = cb_param_orig; | ||
111 | if (cb_fn_orig) | ||
112 | dma_flags |= DMA_PREP_INTERRUPT; | ||
113 | } | ||
114 | |||
115 | /* Since we have clobbered the src_list we are committed | ||
116 | * to doing this asynchronously. Drivers force forward | ||
117 | * progress in case they can not provide a descriptor | ||
118 | */ | ||
119 | for (;;) { | ||
120 | tx = dma->device_prep_dma_pq(chan, dma_dest, | ||
121 | &dma_src[src_off], | ||
122 | pq_src_cnt, | ||
123 | &coefs[src_off], len, | ||
124 | dma_flags); | ||
125 | if (likely(tx)) | ||
126 | break; | ||
127 | async_tx_quiesce(&submit->depend_tx); | ||
128 | dma_async_issue_pending(chan); | ||
129 | } | ||
130 | |||
131 | async_tx_submit(chan, tx, submit); | ||
132 | submit->depend_tx = tx; | ||
133 | |||
134 | /* drop completed sources */ | ||
135 | src_cnt -= pq_src_cnt; | ||
136 | src_off += pq_src_cnt; | ||
137 | |||
138 | dma_flags |= DMA_PREP_CONTINUE; | ||
139 | } | ||
140 | |||
141 | return tx; | ||
142 | } | ||
143 | |||
144 | /** | ||
145 | * do_sync_gen_syndrome - synchronously calculate a raid6 syndrome | ||
146 | */ | ||
147 | static void | ||
148 | do_sync_gen_syndrome(struct page **blocks, unsigned int offset, int disks, | ||
149 | size_t len, struct async_submit_ctl *submit) | ||
150 | { | ||
151 | void **srcs; | ||
152 | int i; | ||
153 | |||
154 | if (submit->scribble) | ||
155 | srcs = submit->scribble; | ||
156 | else | ||
157 | srcs = (void **) blocks; | ||
158 | |||
159 | for (i = 0; i < disks; i++) { | ||
160 | if (is_raid6_zero_block(blocks[i])) { | ||
161 | BUG_ON(i > disks - 3); /* P or Q can't be zero */ | ||
162 | srcs[i] = blocks[i]; | ||
163 | } else | ||
164 | srcs[i] = page_address(blocks[i]) + offset; | ||
165 | } | ||
166 | raid6_call.gen_syndrome(disks, len, srcs); | ||
167 | async_tx_sync_epilog(submit); | ||
168 | } | ||
169 | |||
170 | /** | ||
171 | * async_gen_syndrome - asynchronously calculate a raid6 syndrome | ||
172 | * @blocks: source blocks from idx 0..disks-3, P @ disks-2 and Q @ disks-1 | ||
173 | * @offset: common offset into each block (src and dest) to start transaction | ||
174 | * @disks: number of blocks (including missing P or Q, see below) | ||
175 | * @len: length of operation in bytes | ||
176 | * @submit: submission/completion modifiers | ||
177 | * | ||
178 | * General note: This routine assumes a field of GF(2^8) with a | ||
179 | * primitive polynomial of 0x11d and a generator of {02}. | ||
180 | * | ||
181 | * 'disks' note: callers can optionally omit either P or Q (but not | ||
182 | * both) from the calculation by setting blocks[disks-2] or | ||
183 | * blocks[disks-1] to NULL. When P or Q is omitted 'len' must be <= | ||
184 | * PAGE_SIZE as a temporary buffer of this size is used in the | ||
185 | * synchronous path. 'disks' always accounts for both destination | ||
186 | * buffers. | ||
187 | * | ||
188 | * 'blocks' note: if submit->scribble is NULL then the contents of | ||
189 | * 'blocks' may be overridden | ||
190 | */ | ||
191 | struct dma_async_tx_descriptor * | ||
192 | async_gen_syndrome(struct page **blocks, unsigned int offset, int disks, | ||
193 | size_t len, struct async_submit_ctl *submit) | ||
194 | { | ||
195 | int src_cnt = disks - 2; | ||
196 | struct dma_chan *chan = async_tx_find_channel(submit, DMA_PQ, | ||
197 | &P(blocks, disks), 2, | ||
198 | blocks, src_cnt, len); | ||
199 | struct dma_device *device = chan ? chan->device : NULL; | ||
200 | dma_addr_t *dma_src = NULL; | ||
201 | |||
202 | BUG_ON(disks > 255 || !(P(blocks, disks) || Q(blocks, disks))); | ||
203 | |||
204 | if (submit->scribble) | ||
205 | dma_src = submit->scribble; | ||
206 | else if (sizeof(dma_addr_t) <= sizeof(struct page *)) | ||
207 | dma_src = (dma_addr_t *) blocks; | ||
208 | |||
209 | if (dma_src && device && | ||
210 | (src_cnt <= dma_maxpq(device, 0) || | ||
211 | dma_maxpq(device, DMA_PREP_CONTINUE) > 0)) { | ||
212 | /* run the p+q asynchronously */ | ||
213 | pr_debug("%s: (async) disks: %d len: %zu\n", | ||
214 | __func__, disks, len); | ||
215 | return do_async_gen_syndrome(chan, blocks, raid6_gfexp, offset, | ||
216 | disks, len, dma_src, submit); | ||
217 | } | ||
218 | |||
219 | /* run the pq synchronously */ | ||
220 | pr_debug("%s: (sync) disks: %d len: %zu\n", __func__, disks, len); | ||
221 | |||
222 | /* wait for any prerequisite operations */ | ||
223 | async_tx_quiesce(&submit->depend_tx); | ||
224 | |||
225 | if (!P(blocks, disks)) { | ||
226 | P(blocks, disks) = scribble; | ||
227 | BUG_ON(len + offset > PAGE_SIZE); | ||
228 | } | ||
229 | if (!Q(blocks, disks)) { | ||
230 | Q(blocks, disks) = scribble; | ||
231 | BUG_ON(len + offset > PAGE_SIZE); | ||
232 | } | ||
233 | do_sync_gen_syndrome(blocks, offset, disks, len, submit); | ||
234 | |||
235 | return NULL; | ||
236 | } | ||
237 | EXPORT_SYMBOL_GPL(async_gen_syndrome); | ||
238 | |||
239 | /** | ||
240 | * async_syndrome_val - asynchronously validate a raid6 syndrome | ||
241 | * @blocks: source blocks from idx 0..disks-3, P @ disks-2 and Q @ disks-1 | ||
242 | * @offset: common offset into each block (src and dest) to start transaction | ||
243 | * @disks: number of blocks (including missing P or Q, see below) | ||
244 | * @len: length of operation in bytes | ||
245 | * @pqres: on val failure SUM_CHECK_P_RESULT and/or SUM_CHECK_Q_RESULT are set | ||
246 | * @spare: temporary result buffer for the synchronous case | ||
247 | * @submit: submission / completion modifiers | ||
248 | * | ||
249 | * The same notes from async_gen_syndrome apply to the 'blocks', | ||
250 | * and 'disks' parameters of this routine. The synchronous path | ||
251 | * requires a temporary result buffer and submit->scribble to be | ||
252 | * specified. | ||
253 | */ | ||
254 | struct dma_async_tx_descriptor * | ||
255 | async_syndrome_val(struct page **blocks, unsigned int offset, int disks, | ||
256 | size_t len, enum sum_check_flags *pqres, struct page *spare, | ||
257 | struct async_submit_ctl *submit) | ||
258 | { | ||
259 | struct dma_chan *chan = async_tx_find_channel(submit, DMA_PQ_VAL, | ||
260 | NULL, 0, blocks, disks, | ||
261 | len); | ||
262 | struct dma_device *device = chan ? chan->device : NULL; | ||
263 | struct dma_async_tx_descriptor *tx; | ||
264 | enum dma_ctrl_flags dma_flags = submit->cb_fn ? DMA_PREP_INTERRUPT : 0; | ||
265 | dma_addr_t *dma_src = NULL; | ||
266 | |||
267 | BUG_ON(disks < 4); | ||
268 | |||
269 | if (submit->scribble) | ||
270 | dma_src = submit->scribble; | ||
271 | else if (sizeof(dma_addr_t) <= sizeof(struct page *)) | ||
272 | dma_src = (dma_addr_t *) blocks; | ||
273 | |||
274 | if (dma_src && device && disks <= dma_maxpq(device, 0)) { | ||
275 | struct device *dev = device->dev; | ||
276 | dma_addr_t *pq = &dma_src[disks-2]; | ||
277 | int i; | ||
278 | |||
279 | pr_debug("%s: (async) disks: %d len: %zu\n", | ||
280 | __func__, disks, len); | ||
281 | if (!P(blocks, disks)) | ||
282 | dma_flags |= DMA_PREP_PQ_DISABLE_P; | ||
283 | if (!Q(blocks, disks)) | ||
284 | dma_flags |= DMA_PREP_PQ_DISABLE_Q; | ||
285 | for (i = 0; i < disks; i++) | ||
286 | if (likely(blocks[i])) { | ||
287 | BUG_ON(is_raid6_zero_block(blocks[i])); | ||
288 | dma_src[i] = dma_map_page(dev, blocks[i], | ||
289 | offset, len, | ||
290 | DMA_TO_DEVICE); | ||
291 | } | ||
292 | |||
293 | for (;;) { | ||
294 | tx = device->device_prep_dma_pq_val(chan, pq, dma_src, | ||
295 | disks - 2, | ||
296 | raid6_gfexp, | ||
297 | len, pqres, | ||
298 | dma_flags); | ||
299 | if (likely(tx)) | ||
300 | break; | ||
301 | async_tx_quiesce(&submit->depend_tx); | ||
302 | dma_async_issue_pending(chan); | ||
303 | } | ||
304 | async_tx_submit(chan, tx, submit); | ||
305 | |||
306 | return tx; | ||
307 | } else { | ||
308 | struct page *p_src = P(blocks, disks); | ||
309 | struct page *q_src = Q(blocks, disks); | ||
310 | enum async_tx_flags flags_orig = submit->flags; | ||
311 | dma_async_tx_callback cb_fn_orig = submit->cb_fn; | ||
312 | void *scribble = submit->scribble; | ||
313 | void *cb_param_orig = submit->cb_param; | ||
314 | void *p, *q, *s; | ||
315 | |||
316 | pr_debug("%s: (sync) disks: %d len: %zu\n", | ||
317 | __func__, disks, len); | ||
318 | |||
319 | /* caller must provide a temporary result buffer and | ||
320 | * allow the input parameters to be preserved | ||
321 | */ | ||
322 | BUG_ON(!spare || !scribble); | ||
323 | |||
324 | /* wait for any prerequisite operations */ | ||
325 | async_tx_quiesce(&submit->depend_tx); | ||
326 | |||
327 | /* recompute p and/or q into the temporary buffer and then | ||
328 | * check to see the result matches the current value | ||
329 | */ | ||
330 | tx = NULL; | ||
331 | *pqres = 0; | ||
332 | if (p_src) { | ||
333 | init_async_submit(submit, ASYNC_TX_XOR_ZERO_DST, NULL, | ||
334 | NULL, NULL, scribble); | ||
335 | tx = async_xor(spare, blocks, offset, disks-2, len, submit); | ||
336 | async_tx_quiesce(&tx); | ||
337 | p = page_address(p_src) + offset; | ||
338 | s = page_address(spare) + offset; | ||
339 | *pqres |= !!memcmp(p, s, len) << SUM_CHECK_P; | ||
340 | } | ||
341 | |||
342 | if (q_src) { | ||
343 | P(blocks, disks) = NULL; | ||
344 | Q(blocks, disks) = spare; | ||
345 | init_async_submit(submit, 0, NULL, NULL, NULL, scribble); | ||
346 | tx = async_gen_syndrome(blocks, offset, disks, len, submit); | ||
347 | async_tx_quiesce(&tx); | ||
348 | q = page_address(q_src) + offset; | ||
349 | s = page_address(spare) + offset; | ||
350 | *pqres |= !!memcmp(q, s, len) << SUM_CHECK_Q; | ||
351 | } | ||
352 | |||
353 | /* restore P, Q and submit */ | ||
354 | P(blocks, disks) = p_src; | ||
355 | Q(blocks, disks) = q_src; | ||
356 | |||
357 | submit->cb_fn = cb_fn_orig; | ||
358 | submit->cb_param = cb_param_orig; | ||
359 | submit->flags = flags_orig; | ||
360 | async_tx_sync_epilog(submit); | ||
361 | |||
362 | return NULL; | ||
363 | } | ||
364 | } | ||
365 | EXPORT_SYMBOL_GPL(async_syndrome_val); | ||
366 | |||
367 | static int __init async_pq_init(void) | ||
368 | { | ||
369 | scribble = alloc_page(GFP_KERNEL); | ||
370 | |||
371 | if (scribble) | ||
372 | return 0; | ||
373 | |||
374 | pr_err("%s: failed to allocate required spare page\n", __func__); | ||
375 | |||
376 | return -ENOMEM; | ||
377 | } | ||
378 | |||
379 | static void __exit async_pq_exit(void) | ||
380 | { | ||
381 | put_page(scribble); | ||
382 | } | ||
383 | |||
384 | module_init(async_pq_init); | ||
385 | module_exit(async_pq_exit); | ||
386 | |||
387 | MODULE_DESCRIPTION("asynchronous raid6 syndrome generation/validation"); | ||
388 | MODULE_LICENSE("GPL"); | ||
diff --git a/crypto/async_tx/async_raid6_recov.c b/crypto/async_tx/async_raid6_recov.c new file mode 100644 index 000000000000..0c14d48c9896 --- /dev/null +++ b/crypto/async_tx/async_raid6_recov.c | |||
@@ -0,0 +1,448 @@ | |||
1 | /* | ||
2 | * Asynchronous RAID-6 recovery calculations ASYNC_TX API. | ||
3 | * Copyright(c) 2009 Intel Corporation | ||
4 | * | ||
5 | * based on raid6recov.c: | ||
6 | * Copyright 2002 H. Peter Anvin | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify it | ||
9 | * under the terms of the GNU General Public License as published by the Free | ||
10 | * Software Foundation; either version 2 of the License, or (at your option) | ||
11 | * any later version. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
14 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
15 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
16 | * more details. | ||
17 | * | ||
18 | * You should have received a copy of the GNU General Public License along with | ||
19 | * this program; if not, write to the Free Software Foundation, Inc., 51 | ||
20 | * Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | ||
21 | * | ||
22 | */ | ||
23 | #include <linux/kernel.h> | ||
24 | #include <linux/interrupt.h> | ||
25 | #include <linux/dma-mapping.h> | ||
26 | #include <linux/raid/pq.h> | ||
27 | #include <linux/async_tx.h> | ||
28 | |||
29 | static struct dma_async_tx_descriptor * | ||
30 | async_sum_product(struct page *dest, struct page **srcs, unsigned char *coef, | ||
31 | size_t len, struct async_submit_ctl *submit) | ||
32 | { | ||
33 | struct dma_chan *chan = async_tx_find_channel(submit, DMA_PQ, | ||
34 | &dest, 1, srcs, 2, len); | ||
35 | struct dma_device *dma = chan ? chan->device : NULL; | ||
36 | const u8 *amul, *bmul; | ||
37 | u8 ax, bx; | ||
38 | u8 *a, *b, *c; | ||
39 | |||
40 | if (dma) { | ||
41 | dma_addr_t dma_dest[2]; | ||
42 | dma_addr_t dma_src[2]; | ||
43 | struct device *dev = dma->dev; | ||
44 | struct dma_async_tx_descriptor *tx; | ||
45 | enum dma_ctrl_flags dma_flags = DMA_PREP_PQ_DISABLE_P; | ||
46 | |||
47 | dma_dest[1] = dma_map_page(dev, dest, 0, len, DMA_BIDIRECTIONAL); | ||
48 | dma_src[0] = dma_map_page(dev, srcs[0], 0, len, DMA_TO_DEVICE); | ||
49 | dma_src[1] = dma_map_page(dev, srcs[1], 0, len, DMA_TO_DEVICE); | ||
50 | tx = dma->device_prep_dma_pq(chan, dma_dest, dma_src, 2, coef, | ||
51 | len, dma_flags); | ||
52 | if (tx) { | ||
53 | async_tx_submit(chan, tx, submit); | ||
54 | return tx; | ||
55 | } | ||
56 | } | ||
57 | |||
58 | /* run the operation synchronously */ | ||
59 | async_tx_quiesce(&submit->depend_tx); | ||
60 | amul = raid6_gfmul[coef[0]]; | ||
61 | bmul = raid6_gfmul[coef[1]]; | ||
62 | a = page_address(srcs[0]); | ||
63 | b = page_address(srcs[1]); | ||
64 | c = page_address(dest); | ||
65 | |||
66 | while (len--) { | ||
67 | ax = amul[*a++]; | ||
68 | bx = bmul[*b++]; | ||
69 | *c++ = ax ^ bx; | ||
70 | } | ||
71 | |||
72 | return NULL; | ||
73 | } | ||
74 | |||
75 | static struct dma_async_tx_descriptor * | ||
76 | async_mult(struct page *dest, struct page *src, u8 coef, size_t len, | ||
77 | struct async_submit_ctl *submit) | ||
78 | { | ||
79 | struct dma_chan *chan = async_tx_find_channel(submit, DMA_PQ, | ||
80 | &dest, 1, &src, 1, len); | ||
81 | struct dma_device *dma = chan ? chan->device : NULL; | ||
82 | const u8 *qmul; /* Q multiplier table */ | ||
83 | u8 *d, *s; | ||
84 | |||
85 | if (dma) { | ||
86 | dma_addr_t dma_dest[2]; | ||
87 | dma_addr_t dma_src[1]; | ||
88 | struct device *dev = dma->dev; | ||
89 | struct dma_async_tx_descriptor *tx; | ||
90 | enum dma_ctrl_flags dma_flags = DMA_PREP_PQ_DISABLE_P; | ||
91 | |||
92 | dma_dest[1] = dma_map_page(dev, dest, 0, len, DMA_BIDIRECTIONAL); | ||
93 | dma_src[0] = dma_map_page(dev, src, 0, len, DMA_TO_DEVICE); | ||
94 | tx = dma->device_prep_dma_pq(chan, dma_dest, dma_src, 1, &coef, | ||
95 | len, dma_flags); | ||
96 | if (tx) { | ||
97 | async_tx_submit(chan, tx, submit); | ||
98 | return tx; | ||
99 | } | ||
100 | } | ||
101 | |||
102 | /* no channel available, or failed to allocate a descriptor, so | ||
103 | * perform the operation synchronously | ||
104 | */ | ||
105 | async_tx_quiesce(&submit->depend_tx); | ||
106 | qmul = raid6_gfmul[coef]; | ||
107 | d = page_address(dest); | ||
108 | s = page_address(src); | ||
109 | |||
110 | while (len--) | ||
111 | *d++ = qmul[*s++]; | ||
112 | |||
113 | return NULL; | ||
114 | } | ||
115 | |||
116 | static struct dma_async_tx_descriptor * | ||
117 | __2data_recov_4(size_t bytes, int faila, int failb, struct page **blocks, | ||
118 | struct async_submit_ctl *submit) | ||
119 | { | ||
120 | struct dma_async_tx_descriptor *tx = NULL; | ||
121 | struct page *p, *q, *a, *b; | ||
122 | struct page *srcs[2]; | ||
123 | unsigned char coef[2]; | ||
124 | enum async_tx_flags flags = submit->flags; | ||
125 | dma_async_tx_callback cb_fn = submit->cb_fn; | ||
126 | void *cb_param = submit->cb_param; | ||
127 | void *scribble = submit->scribble; | ||
128 | |||
129 | p = blocks[4-2]; | ||
130 | q = blocks[4-1]; | ||
131 | |||
132 | a = blocks[faila]; | ||
133 | b = blocks[failb]; | ||
134 | |||
135 | /* in the 4 disk case P + Pxy == P and Q + Qxy == Q */ | ||
136 | /* Dx = A*(P+Pxy) + B*(Q+Qxy) */ | ||
137 | srcs[0] = p; | ||
138 | srcs[1] = q; | ||
139 | coef[0] = raid6_gfexi[failb-faila]; | ||
140 | coef[1] = raid6_gfinv[raid6_gfexp[faila]^raid6_gfexp[failb]]; | ||
141 | init_async_submit(submit, 0, tx, NULL, NULL, scribble); | ||
142 | tx = async_sum_product(b, srcs, coef, bytes, submit); | ||
143 | |||
144 | /* Dy = P+Pxy+Dx */ | ||
145 | srcs[0] = p; | ||
146 | srcs[1] = b; | ||
147 | init_async_submit(submit, flags | ASYNC_TX_XOR_ZERO_DST, tx, cb_fn, | ||
148 | cb_param, scribble); | ||
149 | tx = async_xor(a, srcs, 0, 2, bytes, submit); | ||
150 | |||
151 | return tx; | ||
152 | |||
153 | } | ||
154 | |||
155 | static struct dma_async_tx_descriptor * | ||
156 | __2data_recov_5(size_t bytes, int faila, int failb, struct page **blocks, | ||
157 | struct async_submit_ctl *submit) | ||
158 | { | ||
159 | struct dma_async_tx_descriptor *tx = NULL; | ||
160 | struct page *p, *q, *g, *dp, *dq; | ||
161 | struct page *srcs[2]; | ||
162 | unsigned char coef[2]; | ||
163 | enum async_tx_flags flags = submit->flags; | ||
164 | dma_async_tx_callback cb_fn = submit->cb_fn; | ||
165 | void *cb_param = submit->cb_param; | ||
166 | void *scribble = submit->scribble; | ||
167 | int uninitialized_var(good); | ||
168 | int i; | ||
169 | |||
170 | for (i = 0; i < 3; i++) { | ||
171 | if (i == faila || i == failb) | ||
172 | continue; | ||
173 | else { | ||
174 | good = i; | ||
175 | break; | ||
176 | } | ||
177 | } | ||
178 | BUG_ON(i >= 3); | ||
179 | |||
180 | p = blocks[5-2]; | ||
181 | q = blocks[5-1]; | ||
182 | g = blocks[good]; | ||
183 | |||
184 | /* Compute syndrome with zero for the missing data pages | ||
185 | * Use the dead data pages as temporary storage for delta p and | ||
186 | * delta q | ||
187 | */ | ||
188 | dp = blocks[faila]; | ||
189 | dq = blocks[failb]; | ||
190 | |||
191 | init_async_submit(submit, 0, tx, NULL, NULL, scribble); | ||
192 | tx = async_memcpy(dp, g, 0, 0, bytes, submit); | ||
193 | init_async_submit(submit, 0, tx, NULL, NULL, scribble); | ||
194 | tx = async_mult(dq, g, raid6_gfexp[good], bytes, submit); | ||
195 | |||
196 | /* compute P + Pxy */ | ||
197 | srcs[0] = dp; | ||
198 | srcs[1] = p; | ||
199 | init_async_submit(submit, ASYNC_TX_XOR_DROP_DST, tx, NULL, NULL, | ||
200 | scribble); | ||
201 | tx = async_xor(dp, srcs, 0, 2, bytes, submit); | ||
202 | |||
203 | /* compute Q + Qxy */ | ||
204 | srcs[0] = dq; | ||
205 | srcs[1] = q; | ||
206 | init_async_submit(submit, ASYNC_TX_XOR_DROP_DST, tx, NULL, NULL, | ||
207 | scribble); | ||
208 | tx = async_xor(dq, srcs, 0, 2, bytes, submit); | ||
209 | |||
210 | /* Dx = A*(P+Pxy) + B*(Q+Qxy) */ | ||
211 | srcs[0] = dp; | ||
212 | srcs[1] = dq; | ||
213 | coef[0] = raid6_gfexi[failb-faila]; | ||
214 | coef[1] = raid6_gfinv[raid6_gfexp[faila]^raid6_gfexp[failb]]; | ||
215 | init_async_submit(submit, 0, tx, NULL, NULL, scribble); | ||
216 | tx = async_sum_product(dq, srcs, coef, bytes, submit); | ||
217 | |||
218 | /* Dy = P+Pxy+Dx */ | ||
219 | srcs[0] = dp; | ||
220 | srcs[1] = dq; | ||
221 | init_async_submit(submit, flags | ASYNC_TX_XOR_DROP_DST, tx, cb_fn, | ||
222 | cb_param, scribble); | ||
223 | tx = async_xor(dp, srcs, 0, 2, bytes, submit); | ||
224 | |||
225 | return tx; | ||
226 | } | ||
227 | |||
228 | static struct dma_async_tx_descriptor * | ||
229 | __2data_recov_n(int disks, size_t bytes, int faila, int failb, | ||
230 | struct page **blocks, struct async_submit_ctl *submit) | ||
231 | { | ||
232 | struct dma_async_tx_descriptor *tx = NULL; | ||
233 | struct page *p, *q, *dp, *dq; | ||
234 | struct page *srcs[2]; | ||
235 | unsigned char coef[2]; | ||
236 | enum async_tx_flags flags = submit->flags; | ||
237 | dma_async_tx_callback cb_fn = submit->cb_fn; | ||
238 | void *cb_param = submit->cb_param; | ||
239 | void *scribble = submit->scribble; | ||
240 | |||
241 | p = blocks[disks-2]; | ||
242 | q = blocks[disks-1]; | ||
243 | |||
244 | /* Compute syndrome with zero for the missing data pages | ||
245 | * Use the dead data pages as temporary storage for | ||
246 | * delta p and delta q | ||
247 | */ | ||
248 | dp = blocks[faila]; | ||
249 | blocks[faila] = (void *)raid6_empty_zero_page; | ||
250 | blocks[disks-2] = dp; | ||
251 | dq = blocks[failb]; | ||
252 | blocks[failb] = (void *)raid6_empty_zero_page; | ||
253 | blocks[disks-1] = dq; | ||
254 | |||
255 | init_async_submit(submit, 0, tx, NULL, NULL, scribble); | ||
256 | tx = async_gen_syndrome(blocks, 0, disks, bytes, submit); | ||
257 | |||
258 | /* Restore pointer table */ | ||
259 | blocks[faila] = dp; | ||
260 | blocks[failb] = dq; | ||
261 | blocks[disks-2] = p; | ||
262 | blocks[disks-1] = q; | ||
263 | |||
264 | /* compute P + Pxy */ | ||
265 | srcs[0] = dp; | ||
266 | srcs[1] = p; | ||
267 | init_async_submit(submit, ASYNC_TX_XOR_DROP_DST, tx, NULL, NULL, | ||
268 | scribble); | ||
269 | tx = async_xor(dp, srcs, 0, 2, bytes, submit); | ||
270 | |||
271 | /* compute Q + Qxy */ | ||
272 | srcs[0] = dq; | ||
273 | srcs[1] = q; | ||
274 | init_async_submit(submit, ASYNC_TX_XOR_DROP_DST, tx, NULL, NULL, | ||
275 | scribble); | ||
276 | tx = async_xor(dq, srcs, 0, 2, bytes, submit); | ||
277 | |||
278 | /* Dx = A*(P+Pxy) + B*(Q+Qxy) */ | ||
279 | srcs[0] = dp; | ||
280 | srcs[1] = dq; | ||
281 | coef[0] = raid6_gfexi[failb-faila]; | ||
282 | coef[1] = raid6_gfinv[raid6_gfexp[faila]^raid6_gfexp[failb]]; | ||
283 | init_async_submit(submit, 0, tx, NULL, NULL, scribble); | ||
284 | tx = async_sum_product(dq, srcs, coef, bytes, submit); | ||
285 | |||
286 | /* Dy = P+Pxy+Dx */ | ||
287 | srcs[0] = dp; | ||
288 | srcs[1] = dq; | ||
289 | init_async_submit(submit, flags | ASYNC_TX_XOR_DROP_DST, tx, cb_fn, | ||
290 | cb_param, scribble); | ||
291 | tx = async_xor(dp, srcs, 0, 2, bytes, submit); | ||
292 | |||
293 | return tx; | ||
294 | } | ||
295 | |||
296 | /** | ||
297 | * async_raid6_2data_recov - asynchronously calculate two missing data blocks | ||
298 | * @disks: number of disks in the RAID-6 array | ||
299 | * @bytes: block size | ||
300 | * @faila: first failed drive index | ||
301 | * @failb: second failed drive index | ||
302 | * @blocks: array of source pointers where the last two entries are p and q | ||
303 | * @submit: submission/completion modifiers | ||
304 | */ | ||
305 | struct dma_async_tx_descriptor * | ||
306 | async_raid6_2data_recov(int disks, size_t bytes, int faila, int failb, | ||
307 | struct page **blocks, struct async_submit_ctl *submit) | ||
308 | { | ||
309 | BUG_ON(faila == failb); | ||
310 | if (failb < faila) | ||
311 | swap(faila, failb); | ||
312 | |||
313 | pr_debug("%s: disks: %d len: %zu\n", __func__, disks, bytes); | ||
314 | |||
315 | /* we need to preserve the contents of 'blocks' for the async | ||
316 | * case, so punt to synchronous if a scribble buffer is not available | ||
317 | */ | ||
318 | if (!submit->scribble) { | ||
319 | void **ptrs = (void **) blocks; | ||
320 | int i; | ||
321 | |||
322 | async_tx_quiesce(&submit->depend_tx); | ||
323 | for (i = 0; i < disks; i++) | ||
324 | ptrs[i] = page_address(blocks[i]); | ||
325 | |||
326 | raid6_2data_recov(disks, bytes, faila, failb, ptrs); | ||
327 | |||
328 | async_tx_sync_epilog(submit); | ||
329 | |||
330 | return NULL; | ||
331 | } | ||
332 | |||
333 | switch (disks) { | ||
334 | case 4: | ||
335 | /* dma devices do not uniformly understand a zero source pq | ||
336 | * operation (in contrast to the synchronous case), so | ||
337 | * explicitly handle the 4 disk special case | ||
338 | */ | ||
339 | return __2data_recov_4(bytes, faila, failb, blocks, submit); | ||
340 | case 5: | ||
341 | /* dma devices do not uniformly understand a single | ||
342 | * source pq operation (in contrast to the synchronous | ||
343 | * case), so explicitly handle the 5 disk special case | ||
344 | */ | ||
345 | return __2data_recov_5(bytes, faila, failb, blocks, submit); | ||
346 | default: | ||
347 | return __2data_recov_n(disks, bytes, faila, failb, blocks, submit); | ||
348 | } | ||
349 | } | ||
350 | EXPORT_SYMBOL_GPL(async_raid6_2data_recov); | ||
351 | |||
352 | /** | ||
353 | * async_raid6_datap_recov - asynchronously calculate a data and the 'p' block | ||
354 | * @disks: number of disks in the RAID-6 array | ||
355 | * @bytes: block size | ||
356 | * @faila: failed drive index | ||
357 | * @blocks: array of source pointers where the last two entries are p and q | ||
358 | * @submit: submission/completion modifiers | ||
359 | */ | ||
360 | struct dma_async_tx_descriptor * | ||
361 | async_raid6_datap_recov(int disks, size_t bytes, int faila, | ||
362 | struct page **blocks, struct async_submit_ctl *submit) | ||
363 | { | ||
364 | struct dma_async_tx_descriptor *tx = NULL; | ||
365 | struct page *p, *q, *dq; | ||
366 | u8 coef; | ||
367 | enum async_tx_flags flags = submit->flags; | ||
368 | dma_async_tx_callback cb_fn = submit->cb_fn; | ||
369 | void *cb_param = submit->cb_param; | ||
370 | void *scribble = submit->scribble; | ||
371 | struct page *srcs[2]; | ||
372 | |||
373 | pr_debug("%s: disks: %d len: %zu\n", __func__, disks, bytes); | ||
374 | |||
375 | /* we need to preserve the contents of 'blocks' for the async | ||
376 | * case, so punt to synchronous if a scribble buffer is not available | ||
377 | */ | ||
378 | if (!scribble) { | ||
379 | void **ptrs = (void **) blocks; | ||
380 | int i; | ||
381 | |||
382 | async_tx_quiesce(&submit->depend_tx); | ||
383 | for (i = 0; i < disks; i++) | ||
384 | ptrs[i] = page_address(blocks[i]); | ||
385 | |||
386 | raid6_datap_recov(disks, bytes, faila, ptrs); | ||
387 | |||
388 | async_tx_sync_epilog(submit); | ||
389 | |||
390 | return NULL; | ||
391 | } | ||
392 | |||
393 | p = blocks[disks-2]; | ||
394 | q = blocks[disks-1]; | ||
395 | |||
396 | /* Compute syndrome with zero for the missing data page | ||
397 | * Use the dead data page as temporary storage for delta q | ||
398 | */ | ||
399 | dq = blocks[faila]; | ||
400 | blocks[faila] = (void *)raid6_empty_zero_page; | ||
401 | blocks[disks-1] = dq; | ||
402 | |||
403 | /* in the 4 disk case we only need to perform a single source | ||
404 | * multiplication | ||
405 | */ | ||
406 | if (disks == 4) { | ||
407 | int good = faila == 0 ? 1 : 0; | ||
408 | struct page *g = blocks[good]; | ||
409 | |||
410 | init_async_submit(submit, 0, tx, NULL, NULL, scribble); | ||
411 | tx = async_memcpy(p, g, 0, 0, bytes, submit); | ||
412 | |||
413 | init_async_submit(submit, 0, tx, NULL, NULL, scribble); | ||
414 | tx = async_mult(dq, g, raid6_gfexp[good], bytes, submit); | ||
415 | } else { | ||
416 | init_async_submit(submit, 0, tx, NULL, NULL, scribble); | ||
417 | tx = async_gen_syndrome(blocks, 0, disks, bytes, submit); | ||
418 | } | ||
419 | |||
420 | /* Restore pointer table */ | ||
421 | blocks[faila] = dq; | ||
422 | blocks[disks-1] = q; | ||
423 | |||
424 | /* calculate g^{-faila} */ | ||
425 | coef = raid6_gfinv[raid6_gfexp[faila]]; | ||
426 | |||
427 | srcs[0] = dq; | ||
428 | srcs[1] = q; | ||
429 | init_async_submit(submit, ASYNC_TX_XOR_DROP_DST, tx, NULL, NULL, | ||
430 | scribble); | ||
431 | tx = async_xor(dq, srcs, 0, 2, bytes, submit); | ||
432 | |||
433 | init_async_submit(submit, 0, tx, NULL, NULL, scribble); | ||
434 | tx = async_mult(dq, dq, coef, bytes, submit); | ||
435 | |||
436 | srcs[0] = p; | ||
437 | srcs[1] = dq; | ||
438 | init_async_submit(submit, flags | ASYNC_TX_XOR_DROP_DST, tx, cb_fn, | ||
439 | cb_param, scribble); | ||
440 | tx = async_xor(p, srcs, 0, 2, bytes, submit); | ||
441 | |||
442 | return tx; | ||
443 | } | ||
444 | EXPORT_SYMBOL_GPL(async_raid6_datap_recov); | ||
445 | |||
446 | MODULE_AUTHOR("Dan Williams <dan.j.williams@intel.com>"); | ||
447 | MODULE_DESCRIPTION("asynchronous RAID-6 recovery api"); | ||
448 | MODULE_LICENSE("GPL"); | ||
diff --git a/crypto/async_tx/async_tx.c b/crypto/async_tx/async_tx.c index 06eb6cc09fef..60615fedcf5e 100644 --- a/crypto/async_tx/async_tx.c +++ b/crypto/async_tx/async_tx.c | |||
@@ -42,16 +42,21 @@ static void __exit async_tx_exit(void) | |||
42 | async_dmaengine_put(); | 42 | async_dmaengine_put(); |
43 | } | 43 | } |
44 | 44 | ||
45 | module_init(async_tx_init); | ||
46 | module_exit(async_tx_exit); | ||
47 | |||
45 | /** | 48 | /** |
46 | * __async_tx_find_channel - find a channel to carry out the operation or let | 49 | * __async_tx_find_channel - find a channel to carry out the operation or let |
47 | * the transaction execute synchronously | 50 | * the transaction execute synchronously |
48 | * @depend_tx: transaction dependency | 51 | * @submit: transaction dependency and submission modifiers |
49 | * @tx_type: transaction type | 52 | * @tx_type: transaction type |
50 | */ | 53 | */ |
51 | struct dma_chan * | 54 | struct dma_chan * |
52 | __async_tx_find_channel(struct dma_async_tx_descriptor *depend_tx, | 55 | __async_tx_find_channel(struct async_submit_ctl *submit, |
53 | enum dma_transaction_type tx_type) | 56 | enum dma_transaction_type tx_type) |
54 | { | 57 | { |
58 | struct dma_async_tx_descriptor *depend_tx = submit->depend_tx; | ||
59 | |||
55 | /* see if we can keep the chain on one channel */ | 60 | /* see if we can keep the chain on one channel */ |
56 | if (depend_tx && | 61 | if (depend_tx && |
57 | dma_has_cap(tx_type, depend_tx->chan->device->cap_mask)) | 62 | dma_has_cap(tx_type, depend_tx->chan->device->cap_mask)) |
@@ -59,17 +64,6 @@ __async_tx_find_channel(struct dma_async_tx_descriptor *depend_tx, | |||
59 | return async_dma_find_channel(tx_type); | 64 | return async_dma_find_channel(tx_type); |
60 | } | 65 | } |
61 | EXPORT_SYMBOL_GPL(__async_tx_find_channel); | 66 | EXPORT_SYMBOL_GPL(__async_tx_find_channel); |
62 | #else | ||
63 | static int __init async_tx_init(void) | ||
64 | { | ||
65 | printk(KERN_INFO "async_tx: api initialized (sync-only)\n"); | ||
66 | return 0; | ||
67 | } | ||
68 | |||
69 | static void __exit async_tx_exit(void) | ||
70 | { | ||
71 | do { } while (0); | ||
72 | } | ||
73 | #endif | 67 | #endif |
74 | 68 | ||
75 | 69 | ||
@@ -83,8 +77,8 @@ static void | |||
83 | async_tx_channel_switch(struct dma_async_tx_descriptor *depend_tx, | 77 | async_tx_channel_switch(struct dma_async_tx_descriptor *depend_tx, |
84 | struct dma_async_tx_descriptor *tx) | 78 | struct dma_async_tx_descriptor *tx) |
85 | { | 79 | { |
86 | struct dma_chan *chan; | 80 | struct dma_chan *chan = depend_tx->chan; |
87 | struct dma_device *device; | 81 | struct dma_device *device = chan->device; |
88 | struct dma_async_tx_descriptor *intr_tx = (void *) ~0; | 82 | struct dma_async_tx_descriptor *intr_tx = (void *) ~0; |
89 | 83 | ||
90 | /* first check to see if we can still append to depend_tx */ | 84 | /* first check to see if we can still append to depend_tx */ |
@@ -96,11 +90,11 @@ async_tx_channel_switch(struct dma_async_tx_descriptor *depend_tx, | |||
96 | } | 90 | } |
97 | spin_unlock_bh(&depend_tx->lock); | 91 | spin_unlock_bh(&depend_tx->lock); |
98 | 92 | ||
99 | if (!intr_tx) | 93 | /* attached dependency, flush the parent channel */ |
94 | if (!intr_tx) { | ||
95 | device->device_issue_pending(chan); | ||
100 | return; | 96 | return; |
101 | 97 | } | |
102 | chan = depend_tx->chan; | ||
103 | device = chan->device; | ||
104 | 98 | ||
105 | /* see if we can schedule an interrupt | 99 | /* see if we can schedule an interrupt |
106 | * otherwise poll for completion | 100 | * otherwise poll for completion |
@@ -134,6 +128,7 @@ async_tx_channel_switch(struct dma_async_tx_descriptor *depend_tx, | |||
134 | intr_tx->tx_submit(intr_tx); | 128 | intr_tx->tx_submit(intr_tx); |
135 | async_tx_ack(intr_tx); | 129 | async_tx_ack(intr_tx); |
136 | } | 130 | } |
131 | device->device_issue_pending(chan); | ||
137 | } else { | 132 | } else { |
138 | if (dma_wait_for_async_tx(depend_tx) == DMA_ERROR) | 133 | if (dma_wait_for_async_tx(depend_tx) == DMA_ERROR) |
139 | panic("%s: DMA_ERROR waiting for depend_tx\n", | 134 | panic("%s: DMA_ERROR waiting for depend_tx\n", |
@@ -144,13 +139,14 @@ async_tx_channel_switch(struct dma_async_tx_descriptor *depend_tx, | |||
144 | 139 | ||
145 | 140 | ||
146 | /** | 141 | /** |
147 | * submit_disposition - while holding depend_tx->lock we must avoid submitting | 142 | * submit_disposition - flags for routing an incoming operation |
148 | * new operations to prevent a circular locking dependency with | ||
149 | * drivers that already hold a channel lock when calling | ||
150 | * async_tx_run_dependencies. | ||
151 | * @ASYNC_TX_SUBMITTED: we were able to append the new operation under the lock | 143 | * @ASYNC_TX_SUBMITTED: we were able to append the new operation under the lock |
152 | * @ASYNC_TX_CHANNEL_SWITCH: when the lock is dropped schedule a channel switch | 144 | * @ASYNC_TX_CHANNEL_SWITCH: when the lock is dropped schedule a channel switch |
153 | * @ASYNC_TX_DIRECT_SUBMIT: when the lock is dropped submit directly | 145 | * @ASYNC_TX_DIRECT_SUBMIT: when the lock is dropped submit directly |
146 | * | ||
147 | * while holding depend_tx->lock we must avoid submitting new operations | ||
148 | * to prevent a circular locking dependency with drivers that already | ||
149 | * hold a channel lock when calling async_tx_run_dependencies. | ||
154 | */ | 150 | */ |
155 | enum submit_disposition { | 151 | enum submit_disposition { |
156 | ASYNC_TX_SUBMITTED, | 152 | ASYNC_TX_SUBMITTED, |
@@ -160,11 +156,12 @@ enum submit_disposition { | |||
160 | 156 | ||
161 | void | 157 | void |
162 | async_tx_submit(struct dma_chan *chan, struct dma_async_tx_descriptor *tx, | 158 | async_tx_submit(struct dma_chan *chan, struct dma_async_tx_descriptor *tx, |
163 | enum async_tx_flags flags, struct dma_async_tx_descriptor *depend_tx, | 159 | struct async_submit_ctl *submit) |
164 | dma_async_tx_callback cb_fn, void *cb_param) | ||
165 | { | 160 | { |
166 | tx->callback = cb_fn; | 161 | struct dma_async_tx_descriptor *depend_tx = submit->depend_tx; |
167 | tx->callback_param = cb_param; | 162 | |
163 | tx->callback = submit->cb_fn; | ||
164 | tx->callback_param = submit->cb_param; | ||
168 | 165 | ||
169 | if (depend_tx) { | 166 | if (depend_tx) { |
170 | enum submit_disposition s; | 167 | enum submit_disposition s; |
@@ -220,30 +217,29 @@ async_tx_submit(struct dma_chan *chan, struct dma_async_tx_descriptor *tx, | |||
220 | tx->tx_submit(tx); | 217 | tx->tx_submit(tx); |
221 | } | 218 | } |
222 | 219 | ||
223 | if (flags & ASYNC_TX_ACK) | 220 | if (submit->flags & ASYNC_TX_ACK) |
224 | async_tx_ack(tx); | 221 | async_tx_ack(tx); |
225 | 222 | ||
226 | if (depend_tx && (flags & ASYNC_TX_DEP_ACK)) | 223 | if (depend_tx) |
227 | async_tx_ack(depend_tx); | 224 | async_tx_ack(depend_tx); |
228 | } | 225 | } |
229 | EXPORT_SYMBOL_GPL(async_tx_submit); | 226 | EXPORT_SYMBOL_GPL(async_tx_submit); |
230 | 227 | ||
231 | /** | 228 | /** |
232 | * async_trigger_callback - schedules the callback function to be run after | 229 | * async_trigger_callback - schedules the callback function to be run |
233 | * any dependent operations have been completed. | 230 | * @submit: submission and completion parameters |
234 | * @flags: ASYNC_TX_ACK, ASYNC_TX_DEP_ACK | 231 | * |
235 | * @depend_tx: 'callback' requires the completion of this transaction | 232 | * honored flags: ASYNC_TX_ACK |
236 | * @cb_fn: function to call after depend_tx completes | 233 | * |
237 | * @cb_param: parameter to pass to the callback routine | 234 | * The callback is run after any dependent operations have completed. |
238 | */ | 235 | */ |
239 | struct dma_async_tx_descriptor * | 236 | struct dma_async_tx_descriptor * |
240 | async_trigger_callback(enum async_tx_flags flags, | 237 | async_trigger_callback(struct async_submit_ctl *submit) |
241 | struct dma_async_tx_descriptor *depend_tx, | ||
242 | dma_async_tx_callback cb_fn, void *cb_param) | ||
243 | { | 238 | { |
244 | struct dma_chan *chan; | 239 | struct dma_chan *chan; |
245 | struct dma_device *device; | 240 | struct dma_device *device; |
246 | struct dma_async_tx_descriptor *tx; | 241 | struct dma_async_tx_descriptor *tx; |
242 | struct dma_async_tx_descriptor *depend_tx = submit->depend_tx; | ||
247 | 243 | ||
248 | if (depend_tx) { | 244 | if (depend_tx) { |
249 | chan = depend_tx->chan; | 245 | chan = depend_tx->chan; |
@@ -262,14 +258,14 @@ async_trigger_callback(enum async_tx_flags flags, | |||
262 | if (tx) { | 258 | if (tx) { |
263 | pr_debug("%s: (async)\n", __func__); | 259 | pr_debug("%s: (async)\n", __func__); |
264 | 260 | ||
265 | async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param); | 261 | async_tx_submit(chan, tx, submit); |
266 | } else { | 262 | } else { |
267 | pr_debug("%s: (sync)\n", __func__); | 263 | pr_debug("%s: (sync)\n", __func__); |
268 | 264 | ||
269 | /* wait for any prerequisite operations */ | 265 | /* wait for any prerequisite operations */ |
270 | async_tx_quiesce(&depend_tx); | 266 | async_tx_quiesce(&submit->depend_tx); |
271 | 267 | ||
272 | async_tx_sync_epilog(cb_fn, cb_param); | 268 | async_tx_sync_epilog(submit); |
273 | } | 269 | } |
274 | 270 | ||
275 | return tx; | 271 | return tx; |
@@ -295,9 +291,6 @@ void async_tx_quiesce(struct dma_async_tx_descriptor **tx) | |||
295 | } | 291 | } |
296 | EXPORT_SYMBOL_GPL(async_tx_quiesce); | 292 | EXPORT_SYMBOL_GPL(async_tx_quiesce); |
297 | 293 | ||
298 | module_init(async_tx_init); | ||
299 | module_exit(async_tx_exit); | ||
300 | |||
301 | MODULE_AUTHOR("Intel Corporation"); | 294 | MODULE_AUTHOR("Intel Corporation"); |
302 | MODULE_DESCRIPTION("Asynchronous Bulk Memory Transactions API"); | 295 | MODULE_DESCRIPTION("Asynchronous Bulk Memory Transactions API"); |
303 | MODULE_LICENSE("GPL"); | 296 | MODULE_LICENSE("GPL"); |
diff --git a/crypto/async_tx/async_xor.c b/crypto/async_tx/async_xor.c index 95fe2c8d6c51..56b5f98da463 100644 --- a/crypto/async_tx/async_xor.c +++ b/crypto/async_tx/async_xor.c | |||
@@ -33,19 +33,16 @@ | |||
33 | /* do_async_xor - dma map the pages and perform the xor with an engine */ | 33 | /* do_async_xor - dma map the pages and perform the xor with an engine */ |
34 | static __async_inline struct dma_async_tx_descriptor * | 34 | static __async_inline struct dma_async_tx_descriptor * |
35 | do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list, | 35 | do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list, |
36 | unsigned int offset, int src_cnt, size_t len, | 36 | unsigned int offset, int src_cnt, size_t len, dma_addr_t *dma_src, |
37 | enum async_tx_flags flags, | 37 | struct async_submit_ctl *submit) |
38 | struct dma_async_tx_descriptor *depend_tx, | ||
39 | dma_async_tx_callback cb_fn, void *cb_param) | ||
40 | { | 38 | { |
41 | struct dma_device *dma = chan->device; | 39 | struct dma_device *dma = chan->device; |
42 | dma_addr_t *dma_src = (dma_addr_t *) src_list; | ||
43 | struct dma_async_tx_descriptor *tx = NULL; | 40 | struct dma_async_tx_descriptor *tx = NULL; |
44 | int src_off = 0; | 41 | int src_off = 0; |
45 | int i; | 42 | int i; |
46 | dma_async_tx_callback _cb_fn; | 43 | dma_async_tx_callback cb_fn_orig = submit->cb_fn; |
47 | void *_cb_param; | 44 | void *cb_param_orig = submit->cb_param; |
48 | enum async_tx_flags async_flags; | 45 | enum async_tx_flags flags_orig = submit->flags; |
49 | enum dma_ctrl_flags dma_flags; | 46 | enum dma_ctrl_flags dma_flags; |
50 | int xor_src_cnt; | 47 | int xor_src_cnt; |
51 | dma_addr_t dma_dest; | 48 | dma_addr_t dma_dest; |
@@ -63,23 +60,23 @@ do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list, | |||
63 | } | 60 | } |
64 | 61 | ||
65 | while (src_cnt) { | 62 | while (src_cnt) { |
66 | async_flags = flags; | 63 | submit->flags = flags_orig; |
67 | dma_flags = 0; | 64 | dma_flags = 0; |
68 | xor_src_cnt = min(src_cnt, dma->max_xor); | 65 | xor_src_cnt = min(src_cnt, (int)dma->max_xor); |
69 | /* if we are submitting additional xors, leave the chain open, | 66 | /* if we are submitting additional xors, leave the chain open, |
70 | * clear the callback parameters, and leave the destination | 67 | * clear the callback parameters, and leave the destination |
71 | * buffer mapped | 68 | * buffer mapped |
72 | */ | 69 | */ |
73 | if (src_cnt > xor_src_cnt) { | 70 | if (src_cnt > xor_src_cnt) { |
74 | async_flags &= ~ASYNC_TX_ACK; | 71 | submit->flags &= ~ASYNC_TX_ACK; |
75 | dma_flags = DMA_COMPL_SKIP_DEST_UNMAP; | 72 | dma_flags = DMA_COMPL_SKIP_DEST_UNMAP; |
76 | _cb_fn = NULL; | 73 | submit->cb_fn = NULL; |
77 | _cb_param = NULL; | 74 | submit->cb_param = NULL; |
78 | } else { | 75 | } else { |
79 | _cb_fn = cb_fn; | 76 | submit->cb_fn = cb_fn_orig; |
80 | _cb_param = cb_param; | 77 | submit->cb_param = cb_param_orig; |
81 | } | 78 | } |
82 | if (_cb_fn) | 79 | if (submit->cb_fn) |
83 | dma_flags |= DMA_PREP_INTERRUPT; | 80 | dma_flags |= DMA_PREP_INTERRUPT; |
84 | 81 | ||
85 | /* Since we have clobbered the src_list we are committed | 82 | /* Since we have clobbered the src_list we are committed |
@@ -90,7 +87,7 @@ do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list, | |||
90 | xor_src_cnt, len, dma_flags); | 87 | xor_src_cnt, len, dma_flags); |
91 | 88 | ||
92 | if (unlikely(!tx)) | 89 | if (unlikely(!tx)) |
93 | async_tx_quiesce(&depend_tx); | 90 | async_tx_quiesce(&submit->depend_tx); |
94 | 91 | ||
95 | /* spin wait for the preceeding transactions to complete */ | 92 | /* spin wait for the preceeding transactions to complete */ |
96 | while (unlikely(!tx)) { | 93 | while (unlikely(!tx)) { |
@@ -101,11 +98,8 @@ do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list, | |||
101 | dma_flags); | 98 | dma_flags); |
102 | } | 99 | } |
103 | 100 | ||
104 | async_tx_submit(chan, tx, async_flags, depend_tx, _cb_fn, | 101 | async_tx_submit(chan, tx, submit); |
105 | _cb_param); | 102 | submit->depend_tx = tx; |
106 | |||
107 | depend_tx = tx; | ||
108 | flags |= ASYNC_TX_DEP_ACK; | ||
109 | 103 | ||
110 | if (src_cnt > xor_src_cnt) { | 104 | if (src_cnt > xor_src_cnt) { |
111 | /* drop completed sources */ | 105 | /* drop completed sources */ |
@@ -124,23 +118,27 @@ do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list, | |||
124 | 118 | ||
125 | static void | 119 | static void |
126 | do_sync_xor(struct page *dest, struct page **src_list, unsigned int offset, | 120 | do_sync_xor(struct page *dest, struct page **src_list, unsigned int offset, |
127 | int src_cnt, size_t len, enum async_tx_flags flags, | 121 | int src_cnt, size_t len, struct async_submit_ctl *submit) |
128 | dma_async_tx_callback cb_fn, void *cb_param) | ||
129 | { | 122 | { |
130 | int i; | 123 | int i; |
131 | int xor_src_cnt; | 124 | int xor_src_cnt; |
132 | int src_off = 0; | 125 | int src_off = 0; |
133 | void *dest_buf; | 126 | void *dest_buf; |
134 | void **srcs = (void **) src_list; | 127 | void **srcs; |
128 | |||
129 | if (submit->scribble) | ||
130 | srcs = submit->scribble; | ||
131 | else | ||
132 | srcs = (void **) src_list; | ||
135 | 133 | ||
136 | /* reuse the 'src_list' array to convert to buffer pointers */ | 134 | /* convert to buffer pointers */ |
137 | for (i = 0; i < src_cnt; i++) | 135 | for (i = 0; i < src_cnt; i++) |
138 | srcs[i] = page_address(src_list[i]) + offset; | 136 | srcs[i] = page_address(src_list[i]) + offset; |
139 | 137 | ||
140 | /* set destination address */ | 138 | /* set destination address */ |
141 | dest_buf = page_address(dest) + offset; | 139 | dest_buf = page_address(dest) + offset; |
142 | 140 | ||
143 | if (flags & ASYNC_TX_XOR_ZERO_DST) | 141 | if (submit->flags & ASYNC_TX_XOR_ZERO_DST) |
144 | memset(dest_buf, 0, len); | 142 | memset(dest_buf, 0, len); |
145 | 143 | ||
146 | while (src_cnt > 0) { | 144 | while (src_cnt > 0) { |
@@ -153,61 +151,70 @@ do_sync_xor(struct page *dest, struct page **src_list, unsigned int offset, | |||
153 | src_off += xor_src_cnt; | 151 | src_off += xor_src_cnt; |
154 | } | 152 | } |
155 | 153 | ||
156 | async_tx_sync_epilog(cb_fn, cb_param); | 154 | async_tx_sync_epilog(submit); |
157 | } | 155 | } |
158 | 156 | ||
159 | /** | 157 | /** |
160 | * async_xor - attempt to xor a set of blocks with a dma engine. | 158 | * async_xor - attempt to xor a set of blocks with a dma engine. |
161 | * xor_blocks always uses the dest as a source so the ASYNC_TX_XOR_ZERO_DST | ||
162 | * flag must be set to not include dest data in the calculation. The | ||
163 | * assumption with dma eninges is that they only use the destination | ||
164 | * buffer as a source when it is explicity specified in the source list. | ||
165 | * @dest: destination page | 159 | * @dest: destination page |
166 | * @src_list: array of source pages (if the dest is also a source it must be | 160 | * @src_list: array of source pages |
167 | * at index zero). The contents of this array may be overwritten. | 161 | * @offset: common src/dst offset to start transaction |
168 | * @offset: offset in pages to start transaction | ||
169 | * @src_cnt: number of source pages | 162 | * @src_cnt: number of source pages |
170 | * @len: length in bytes | 163 | * @len: length in bytes |
171 | * @flags: ASYNC_TX_XOR_ZERO_DST, ASYNC_TX_XOR_DROP_DEST, | 164 | * @submit: submission / completion modifiers |
172 | * ASYNC_TX_ACK, ASYNC_TX_DEP_ACK | 165 | * |
173 | * @depend_tx: xor depends on the result of this transaction. | 166 | * honored flags: ASYNC_TX_ACK, ASYNC_TX_XOR_ZERO_DST, ASYNC_TX_XOR_DROP_DST |
174 | * @cb_fn: function to call when the xor completes | 167 | * |
175 | * @cb_param: parameter to pass to the callback routine | 168 | * xor_blocks always uses the dest as a source so the |
169 | * ASYNC_TX_XOR_ZERO_DST flag must be set to not include dest data in | ||
170 | * the calculation. The assumption with dma eninges is that they only | ||
171 | * use the destination buffer as a source when it is explicity specified | ||
172 | * in the source list. | ||
173 | * | ||
174 | * src_list note: if the dest is also a source it must be at index zero. | ||
175 | * The contents of this array will be overwritten if a scribble region | ||
176 | * is not specified. | ||
176 | */ | 177 | */ |
177 | struct dma_async_tx_descriptor * | 178 | struct dma_async_tx_descriptor * |
178 | async_xor(struct page *dest, struct page **src_list, unsigned int offset, | 179 | async_xor(struct page *dest, struct page **src_list, unsigned int offset, |
179 | int src_cnt, size_t len, enum async_tx_flags flags, | 180 | int src_cnt, size_t len, struct async_submit_ctl *submit) |
180 | struct dma_async_tx_descriptor *depend_tx, | ||
181 | dma_async_tx_callback cb_fn, void *cb_param) | ||
182 | { | 181 | { |
183 | struct dma_chan *chan = async_tx_find_channel(depend_tx, DMA_XOR, | 182 | struct dma_chan *chan = async_tx_find_channel(submit, DMA_XOR, |
184 | &dest, 1, src_list, | 183 | &dest, 1, src_list, |
185 | src_cnt, len); | 184 | src_cnt, len); |
185 | dma_addr_t *dma_src = NULL; | ||
186 | |||
186 | BUG_ON(src_cnt <= 1); | 187 | BUG_ON(src_cnt <= 1); |
187 | 188 | ||
188 | if (chan) { | 189 | if (submit->scribble) |
190 | dma_src = submit->scribble; | ||
191 | else if (sizeof(dma_addr_t) <= sizeof(struct page *)) | ||
192 | dma_src = (dma_addr_t *) src_list; | ||
193 | |||
194 | if (dma_src && chan) { | ||
189 | /* run the xor asynchronously */ | 195 | /* run the xor asynchronously */ |
190 | pr_debug("%s (async): len: %zu\n", __func__, len); | 196 | pr_debug("%s (async): len: %zu\n", __func__, len); |
191 | 197 | ||
192 | return do_async_xor(chan, dest, src_list, offset, src_cnt, len, | 198 | return do_async_xor(chan, dest, src_list, offset, src_cnt, len, |
193 | flags, depend_tx, cb_fn, cb_param); | 199 | dma_src, submit); |
194 | } else { | 200 | } else { |
195 | /* run the xor synchronously */ | 201 | /* run the xor synchronously */ |
196 | pr_debug("%s (sync): len: %zu\n", __func__, len); | 202 | pr_debug("%s (sync): len: %zu\n", __func__, len); |
203 | WARN_ONCE(chan, "%s: no space for dma address conversion\n", | ||
204 | __func__); | ||
197 | 205 | ||
198 | /* in the sync case the dest is an implied source | 206 | /* in the sync case the dest is an implied source |
199 | * (assumes the dest is the first source) | 207 | * (assumes the dest is the first source) |
200 | */ | 208 | */ |
201 | if (flags & ASYNC_TX_XOR_DROP_DST) { | 209 | if (submit->flags & ASYNC_TX_XOR_DROP_DST) { |
202 | src_cnt--; | 210 | src_cnt--; |
203 | src_list++; | 211 | src_list++; |
204 | } | 212 | } |
205 | 213 | ||
206 | /* wait for any prerequisite operations */ | 214 | /* wait for any prerequisite operations */ |
207 | async_tx_quiesce(&depend_tx); | 215 | async_tx_quiesce(&submit->depend_tx); |
208 | 216 | ||
209 | do_sync_xor(dest, src_list, offset, src_cnt, len, | 217 | do_sync_xor(dest, src_list, offset, src_cnt, len, submit); |
210 | flags, cb_fn, cb_param); | ||
211 | 218 | ||
212 | return NULL; | 219 | return NULL; |
213 | } | 220 | } |
@@ -222,104 +229,90 @@ static int page_is_zero(struct page *p, unsigned int offset, size_t len) | |||
222 | } | 229 | } |
223 | 230 | ||
224 | /** | 231 | /** |
225 | * async_xor_zero_sum - attempt a xor parity check with a dma engine. | 232 | * async_xor_val - attempt a xor parity check with a dma engine. |
226 | * @dest: destination page used if the xor is performed synchronously | 233 | * @dest: destination page used if the xor is performed synchronously |
227 | * @src_list: array of source pages. The dest page must be listed as a source | 234 | * @src_list: array of source pages |
228 | * at index zero. The contents of this array may be overwritten. | ||
229 | * @offset: offset in pages to start transaction | 235 | * @offset: offset in pages to start transaction |
230 | * @src_cnt: number of source pages | 236 | * @src_cnt: number of source pages |
231 | * @len: length in bytes | 237 | * @len: length in bytes |
232 | * @result: 0 if sum == 0 else non-zero | 238 | * @result: 0 if sum == 0 else non-zero |
233 | * @flags: ASYNC_TX_ACK, ASYNC_TX_DEP_ACK | 239 | * @submit: submission / completion modifiers |
234 | * @depend_tx: xor depends on the result of this transaction. | 240 | * |
235 | * @cb_fn: function to call when the xor completes | 241 | * honored flags: ASYNC_TX_ACK |
236 | * @cb_param: parameter to pass to the callback routine | 242 | * |
243 | * src_list note: if the dest is also a source it must be at index zero. | ||
244 | * The contents of this array will be overwritten if a scribble region | ||
245 | * is not specified. | ||
237 | */ | 246 | */ |
238 | struct dma_async_tx_descriptor * | 247 | struct dma_async_tx_descriptor * |
239 | async_xor_zero_sum(struct page *dest, struct page **src_list, | 248 | async_xor_val(struct page *dest, struct page **src_list, unsigned int offset, |
240 | unsigned int offset, int src_cnt, size_t len, | 249 | int src_cnt, size_t len, enum sum_check_flags *result, |
241 | u32 *result, enum async_tx_flags flags, | 250 | struct async_submit_ctl *submit) |
242 | struct dma_async_tx_descriptor *depend_tx, | ||
243 | dma_async_tx_callback cb_fn, void *cb_param) | ||
244 | { | 251 | { |
245 | struct dma_chan *chan = async_tx_find_channel(depend_tx, DMA_ZERO_SUM, | 252 | struct dma_chan *chan = async_tx_find_channel(submit, DMA_XOR_VAL, |
246 | &dest, 1, src_list, | 253 | &dest, 1, src_list, |
247 | src_cnt, len); | 254 | src_cnt, len); |
248 | struct dma_device *device = chan ? chan->device : NULL; | 255 | struct dma_device *device = chan ? chan->device : NULL; |
249 | struct dma_async_tx_descriptor *tx = NULL; | 256 | struct dma_async_tx_descriptor *tx = NULL; |
257 | dma_addr_t *dma_src = NULL; | ||
250 | 258 | ||
251 | BUG_ON(src_cnt <= 1); | 259 | BUG_ON(src_cnt <= 1); |
252 | 260 | ||
253 | if (device && src_cnt <= device->max_xor) { | 261 | if (submit->scribble) |
254 | dma_addr_t *dma_src = (dma_addr_t *) src_list; | 262 | dma_src = submit->scribble; |
255 | unsigned long dma_prep_flags = cb_fn ? DMA_PREP_INTERRUPT : 0; | 263 | else if (sizeof(dma_addr_t) <= sizeof(struct page *)) |
264 | dma_src = (dma_addr_t *) src_list; | ||
265 | |||
266 | if (dma_src && device && src_cnt <= device->max_xor) { | ||
267 | unsigned long dma_prep_flags; | ||
256 | int i; | 268 | int i; |
257 | 269 | ||
258 | pr_debug("%s: (async) len: %zu\n", __func__, len); | 270 | pr_debug("%s: (async) len: %zu\n", __func__, len); |
259 | 271 | ||
272 | dma_prep_flags = submit->cb_fn ? DMA_PREP_INTERRUPT : 0; | ||
260 | for (i = 0; i < src_cnt; i++) | 273 | for (i = 0; i < src_cnt; i++) |
261 | dma_src[i] = dma_map_page(device->dev, src_list[i], | 274 | dma_src[i] = dma_map_page(device->dev, src_list[i], |
262 | offset, len, DMA_TO_DEVICE); | 275 | offset, len, DMA_TO_DEVICE); |
263 | 276 | ||
264 | tx = device->device_prep_dma_zero_sum(chan, dma_src, src_cnt, | 277 | tx = device->device_prep_dma_xor_val(chan, dma_src, src_cnt, |
265 | len, result, | 278 | len, result, |
266 | dma_prep_flags); | 279 | dma_prep_flags); |
267 | if (unlikely(!tx)) { | 280 | if (unlikely(!tx)) { |
268 | async_tx_quiesce(&depend_tx); | 281 | async_tx_quiesce(&submit->depend_tx); |
269 | 282 | ||
270 | while (!tx) { | 283 | while (!tx) { |
271 | dma_async_issue_pending(chan); | 284 | dma_async_issue_pending(chan); |
272 | tx = device->device_prep_dma_zero_sum(chan, | 285 | tx = device->device_prep_dma_xor_val(chan, |
273 | dma_src, src_cnt, len, result, | 286 | dma_src, src_cnt, len, result, |
274 | dma_prep_flags); | 287 | dma_prep_flags); |
275 | } | 288 | } |
276 | } | 289 | } |
277 | 290 | ||
278 | async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param); | 291 | async_tx_submit(chan, tx, submit); |
279 | } else { | 292 | } else { |
280 | unsigned long xor_flags = flags; | 293 | enum async_tx_flags flags_orig = submit->flags; |
281 | 294 | ||
282 | pr_debug("%s: (sync) len: %zu\n", __func__, len); | 295 | pr_debug("%s: (sync) len: %zu\n", __func__, len); |
296 | WARN_ONCE(device && src_cnt <= device->max_xor, | ||
297 | "%s: no space for dma address conversion\n", | ||
298 | __func__); | ||
283 | 299 | ||
284 | xor_flags |= ASYNC_TX_XOR_DROP_DST; | 300 | submit->flags |= ASYNC_TX_XOR_DROP_DST; |
285 | xor_flags &= ~ASYNC_TX_ACK; | 301 | submit->flags &= ~ASYNC_TX_ACK; |
286 | 302 | ||
287 | tx = async_xor(dest, src_list, offset, src_cnt, len, xor_flags, | 303 | tx = async_xor(dest, src_list, offset, src_cnt, len, submit); |
288 | depend_tx, NULL, NULL); | ||
289 | 304 | ||
290 | async_tx_quiesce(&tx); | 305 | async_tx_quiesce(&tx); |
291 | 306 | ||
292 | *result = page_is_zero(dest, offset, len) ? 0 : 1; | 307 | *result = !page_is_zero(dest, offset, len) << SUM_CHECK_P; |
293 | 308 | ||
294 | async_tx_sync_epilog(cb_fn, cb_param); | 309 | async_tx_sync_epilog(submit); |
310 | submit->flags = flags_orig; | ||
295 | } | 311 | } |
296 | 312 | ||
297 | return tx; | 313 | return tx; |
298 | } | 314 | } |
299 | EXPORT_SYMBOL_GPL(async_xor_zero_sum); | 315 | EXPORT_SYMBOL_GPL(async_xor_val); |
300 | |||
301 | static int __init async_xor_init(void) | ||
302 | { | ||
303 | #ifdef CONFIG_DMA_ENGINE | ||
304 | /* To conserve stack space the input src_list (array of page pointers) | ||
305 | * is reused to hold the array of dma addresses passed to the driver. | ||
306 | * This conversion is only possible when dma_addr_t is less than the | ||
307 | * the size of a pointer. HIGHMEM64G is known to violate this | ||
308 | * assumption. | ||
309 | */ | ||
310 | BUILD_BUG_ON(sizeof(dma_addr_t) > sizeof(struct page *)); | ||
311 | #endif | ||
312 | |||
313 | return 0; | ||
314 | } | ||
315 | |||
316 | static void __exit async_xor_exit(void) | ||
317 | { | ||
318 | do { } while (0); | ||
319 | } | ||
320 | |||
321 | module_init(async_xor_init); | ||
322 | module_exit(async_xor_exit); | ||
323 | 316 | ||
324 | MODULE_AUTHOR("Intel Corporation"); | 317 | MODULE_AUTHOR("Intel Corporation"); |
325 | MODULE_DESCRIPTION("asynchronous xor/xor-zero-sum api"); | 318 | MODULE_DESCRIPTION("asynchronous xor/xor-zero-sum api"); |
diff --git a/crypto/async_tx/raid6test.c b/crypto/async_tx/raid6test.c new file mode 100644 index 000000000000..98c83ca96c83 --- /dev/null +++ b/crypto/async_tx/raid6test.c | |||
@@ -0,0 +1,241 @@ | |||
1 | /* | ||
2 | * asynchronous raid6 recovery self test | ||
3 | * Copyright (c) 2009, Intel Corporation. | ||
4 | * | ||
5 | * based on drivers/md/raid6test/test.c: | ||
6 | * Copyright 2002-2007 H. Peter Anvin | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify it | ||
9 | * under the terms and conditions of the GNU General Public License, | ||
10 | * version 2, as published by the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
13 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
14 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
15 | * more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License along with | ||
18 | * this program; if not, write to the Free Software Foundation, Inc., | ||
19 | * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | ||
20 | * | ||
21 | */ | ||
22 | #include <linux/async_tx.h> | ||
23 | #include <linux/random.h> | ||
24 | |||
25 | #undef pr | ||
26 | #define pr(fmt, args...) pr_info("raid6test: " fmt, ##args) | ||
27 | |||
28 | #define NDISKS 16 /* Including P and Q */ | ||
29 | |||
30 | static struct page *dataptrs[NDISKS]; | ||
31 | static struct page *data[NDISKS+3]; | ||
32 | static struct page *spare; | ||
33 | static struct page *recovi; | ||
34 | static struct page *recovj; | ||
35 | |||
36 | static void callback(void *param) | ||
37 | { | ||
38 | struct completion *cmp = param; | ||
39 | |||
40 | complete(cmp); | ||
41 | } | ||
42 | |||
43 | static void makedata(int disks) | ||
44 | { | ||
45 | int i, j; | ||
46 | |||
47 | for (i = 0; i < disks; i++) { | ||
48 | for (j = 0; j < PAGE_SIZE/sizeof(u32); j += sizeof(u32)) { | ||
49 | u32 *p = page_address(data[i]) + j; | ||
50 | |||
51 | *p = random32(); | ||
52 | } | ||
53 | |||
54 | dataptrs[i] = data[i]; | ||
55 | } | ||
56 | } | ||
57 | |||
58 | static char disk_type(int d, int disks) | ||
59 | { | ||
60 | if (d == disks - 2) | ||
61 | return 'P'; | ||
62 | else if (d == disks - 1) | ||
63 | return 'Q'; | ||
64 | else | ||
65 | return 'D'; | ||
66 | } | ||
67 | |||
68 | /* Recover two failed blocks. */ | ||
69 | static void raid6_dual_recov(int disks, size_t bytes, int faila, int failb, struct page **ptrs) | ||
70 | { | ||
71 | struct async_submit_ctl submit; | ||
72 | addr_conv_t addr_conv[disks]; | ||
73 | struct completion cmp; | ||
74 | struct dma_async_tx_descriptor *tx = NULL; | ||
75 | enum sum_check_flags result = ~0; | ||
76 | |||
77 | if (faila > failb) | ||
78 | swap(faila, failb); | ||
79 | |||
80 | if (failb == disks-1) { | ||
81 | if (faila == disks-2) { | ||
82 | /* P+Q failure. Just rebuild the syndrome. */ | ||
83 | init_async_submit(&submit, 0, NULL, NULL, NULL, addr_conv); | ||
84 | tx = async_gen_syndrome(ptrs, 0, disks, bytes, &submit); | ||
85 | } else { | ||
86 | struct page *blocks[disks]; | ||
87 | struct page *dest; | ||
88 | int count = 0; | ||
89 | int i; | ||
90 | |||
91 | /* data+Q failure. Reconstruct data from P, | ||
92 | * then rebuild syndrome | ||
93 | */ | ||
94 | for (i = disks; i-- ; ) { | ||
95 | if (i == faila || i == failb) | ||
96 | continue; | ||
97 | blocks[count++] = ptrs[i]; | ||
98 | } | ||
99 | dest = ptrs[faila]; | ||
100 | init_async_submit(&submit, ASYNC_TX_XOR_ZERO_DST, NULL, | ||
101 | NULL, NULL, addr_conv); | ||
102 | tx = async_xor(dest, blocks, 0, count, bytes, &submit); | ||
103 | |||
104 | init_async_submit(&submit, 0, tx, NULL, NULL, addr_conv); | ||
105 | tx = async_gen_syndrome(ptrs, 0, disks, bytes, &submit); | ||
106 | } | ||
107 | } else { | ||
108 | if (failb == disks-2) { | ||
109 | /* data+P failure. */ | ||
110 | init_async_submit(&submit, 0, NULL, NULL, NULL, addr_conv); | ||
111 | tx = async_raid6_datap_recov(disks, bytes, faila, ptrs, &submit); | ||
112 | } else { | ||
113 | /* data+data failure. */ | ||
114 | init_async_submit(&submit, 0, NULL, NULL, NULL, addr_conv); | ||
115 | tx = async_raid6_2data_recov(disks, bytes, faila, failb, ptrs, &submit); | ||
116 | } | ||
117 | } | ||
118 | init_completion(&cmp); | ||
119 | init_async_submit(&submit, ASYNC_TX_ACK, tx, callback, &cmp, addr_conv); | ||
120 | tx = async_syndrome_val(ptrs, 0, disks, bytes, &result, spare, &submit); | ||
121 | async_tx_issue_pending(tx); | ||
122 | |||
123 | if (wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)) == 0) | ||
124 | pr("%s: timeout! (faila: %d failb: %d disks: %d)\n", | ||
125 | __func__, faila, failb, disks); | ||
126 | |||
127 | if (result != 0) | ||
128 | pr("%s: validation failure! faila: %d failb: %d sum_check_flags: %x\n", | ||
129 | __func__, faila, failb, result); | ||
130 | } | ||
131 | |||
132 | static int test_disks(int i, int j, int disks) | ||
133 | { | ||
134 | int erra, errb; | ||
135 | |||
136 | memset(page_address(recovi), 0xf0, PAGE_SIZE); | ||
137 | memset(page_address(recovj), 0xba, PAGE_SIZE); | ||
138 | |||
139 | dataptrs[i] = recovi; | ||
140 | dataptrs[j] = recovj; | ||
141 | |||
142 | raid6_dual_recov(disks, PAGE_SIZE, i, j, dataptrs); | ||
143 | |||
144 | erra = memcmp(page_address(data[i]), page_address(recovi), PAGE_SIZE); | ||
145 | errb = memcmp(page_address(data[j]), page_address(recovj), PAGE_SIZE); | ||
146 | |||
147 | pr("%s(%d, %d): faila=%3d(%c) failb=%3d(%c) %s\n", | ||
148 | __func__, i, j, i, disk_type(i, disks), j, disk_type(j, disks), | ||
149 | (!erra && !errb) ? "OK" : !erra ? "ERRB" : !errb ? "ERRA" : "ERRAB"); | ||
150 | |||
151 | dataptrs[i] = data[i]; | ||
152 | dataptrs[j] = data[j]; | ||
153 | |||
154 | return erra || errb; | ||
155 | } | ||
156 | |||
157 | static int test(int disks, int *tests) | ||
158 | { | ||
159 | addr_conv_t addr_conv[disks]; | ||
160 | struct dma_async_tx_descriptor *tx; | ||
161 | struct async_submit_ctl submit; | ||
162 | struct completion cmp; | ||
163 | int err = 0; | ||
164 | int i, j; | ||
165 | |||
166 | recovi = data[disks]; | ||
167 | recovj = data[disks+1]; | ||
168 | spare = data[disks+2]; | ||
169 | |||
170 | makedata(disks); | ||
171 | |||
172 | /* Nuke syndromes */ | ||
173 | memset(page_address(data[disks-2]), 0xee, PAGE_SIZE); | ||
174 | memset(page_address(data[disks-1]), 0xee, PAGE_SIZE); | ||
175 | |||
176 | /* Generate assumed good syndrome */ | ||
177 | init_completion(&cmp); | ||
178 | init_async_submit(&submit, ASYNC_TX_ACK, NULL, callback, &cmp, addr_conv); | ||
179 | tx = async_gen_syndrome(dataptrs, 0, disks, PAGE_SIZE, &submit); | ||
180 | async_tx_issue_pending(tx); | ||
181 | |||
182 | if (wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)) == 0) { | ||
183 | pr("error: initial gen_syndrome(%d) timed out\n", disks); | ||
184 | return 1; | ||
185 | } | ||
186 | |||
187 | pr("testing the %d-disk case...\n", disks); | ||
188 | for (i = 0; i < disks-1; i++) | ||
189 | for (j = i+1; j < disks; j++) { | ||
190 | (*tests)++; | ||
191 | err += test_disks(i, j, disks); | ||
192 | } | ||
193 | |||
194 | return err; | ||
195 | } | ||
196 | |||
197 | |||
198 | static int raid6_test(void) | ||
199 | { | ||
200 | int err = 0; | ||
201 | int tests = 0; | ||
202 | int i; | ||
203 | |||
204 | for (i = 0; i < NDISKS+3; i++) { | ||
205 | data[i] = alloc_page(GFP_KERNEL); | ||
206 | if (!data[i]) { | ||
207 | while (i--) | ||
208 | put_page(data[i]); | ||
209 | return -ENOMEM; | ||
210 | } | ||
211 | } | ||
212 | |||
213 | /* the 4-disk and 5-disk cases are special for the recovery code */ | ||
214 | if (NDISKS > 4) | ||
215 | err += test(4, &tests); | ||
216 | if (NDISKS > 5) | ||
217 | err += test(5, &tests); | ||
218 | err += test(NDISKS, &tests); | ||
219 | |||
220 | pr("\n"); | ||
221 | pr("complete (%d tests, %d failure%s)\n", | ||
222 | tests, err, err == 1 ? "" : "s"); | ||
223 | |||
224 | for (i = 0; i < NDISKS+3; i++) | ||
225 | put_page(data[i]); | ||
226 | |||
227 | return 0; | ||
228 | } | ||
229 | |||
230 | static void raid6_test_exit(void) | ||
231 | { | ||
232 | } | ||
233 | |||
234 | /* when compiled-in wait for drivers to load first (assumes dma drivers | ||
235 | * are also compliled-in) | ||
236 | */ | ||
237 | late_initcall(raid6_test); | ||
238 | module_exit(raid6_test_exit); | ||
239 | MODULE_AUTHOR("Dan Williams <dan.j.williams@intel.com>"); | ||
240 | MODULE_DESCRIPTION("asynchronous RAID-6 recovery self tests"); | ||
241 | MODULE_LICENSE("GPL"); | ||