diff options
Diffstat (limited to 'crypto')
-rw-r--r-- | crypto/Kconfig | 17 | ||||
-rw-r--r-- | crypto/Makefile | 4 | ||||
-rw-r--r-- | crypto/ablkcipher.c | 277 | ||||
-rw-r--r-- | crypto/algapi.c | 2 | ||||
-rw-r--r-- | crypto/algboss.c | 4 | ||||
-rw-r--r-- | crypto/async_tx/Kconfig | 14 | ||||
-rw-r--r-- | crypto/async_tx/async_tx.c | 46 | ||||
-rw-r--r-- | crypto/authenc.c | 10 | ||||
-rw-r--r-- | crypto/ctr.c | 2 | ||||
-rw-r--r-- | crypto/internal.h | 2 | ||||
-rw-r--r-- | crypto/pcrypt.c | 252 | ||||
-rw-r--r-- | crypto/scatterwalk.c | 2 | ||||
-rw-r--r-- | crypto/shash.c | 2 | ||||
-rw-r--r-- | crypto/tcrypt.c | 343 | ||||
-rw-r--r-- | crypto/tcrypt.h | 29 | ||||
-rw-r--r-- | crypto/testmgr.c | 80 | ||||
-rw-r--r-- | crypto/testmgr.h | 64 | ||||
-rw-r--r-- | crypto/twofish_generic.c (renamed from crypto/twofish.c) | 1 | ||||
-rw-r--r-- | crypto/vmac.c | 75 | ||||
-rw-r--r-- | crypto/xts.c | 2 |
20 files changed, 1056 insertions, 172 deletions
diff --git a/crypto/Kconfig b/crypto/Kconfig index 403857ad06d4..1cd497d7a15a 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig | |||
@@ -28,7 +28,7 @@ config CRYPTO_FIPS | |||
28 | This options enables the fips boot option which is | 28 | This options enables the fips boot option which is |
29 | required if you want to system to operate in a FIPS 200 | 29 | required if you want to system to operate in a FIPS 200 |
30 | certification. You should say no unless you know what | 30 | certification. You should say no unless you know what |
31 | this is. Note that CRYPTO_ANSI_CPRNG is requred if this | 31 | this is. Note that CRYPTO_ANSI_CPRNG is required if this |
32 | option is selected | 32 | option is selected |
33 | 33 | ||
34 | config CRYPTO_ALGAPI | 34 | config CRYPTO_ALGAPI |
@@ -80,6 +80,11 @@ config CRYPTO_RNG2 | |||
80 | 80 | ||
81 | config CRYPTO_PCOMP | 81 | config CRYPTO_PCOMP |
82 | tristate | 82 | tristate |
83 | select CRYPTO_PCOMP2 | ||
84 | select CRYPTO_ALGAPI | ||
85 | |||
86 | config CRYPTO_PCOMP2 | ||
87 | tristate | ||
83 | select CRYPTO_ALGAPI2 | 88 | select CRYPTO_ALGAPI2 |
84 | 89 | ||
85 | config CRYPTO_MANAGER | 90 | config CRYPTO_MANAGER |
@@ -94,7 +99,15 @@ config CRYPTO_MANAGER2 | |||
94 | select CRYPTO_AEAD2 | 99 | select CRYPTO_AEAD2 |
95 | select CRYPTO_HASH2 | 100 | select CRYPTO_HASH2 |
96 | select CRYPTO_BLKCIPHER2 | 101 | select CRYPTO_BLKCIPHER2 |
97 | select CRYPTO_PCOMP | 102 | select CRYPTO_PCOMP2 |
103 | |||
104 | config CRYPTO_MANAGER_TESTS | ||
105 | bool "Run algolithms' self-tests" | ||
106 | default y | ||
107 | depends on CRYPTO_MANAGER2 | ||
108 | help | ||
109 | Run cryptomanager's tests for the new crypto algorithms being | ||
110 | registered. | ||
98 | 111 | ||
99 | config CRYPTO_GF128MUL | 112 | config CRYPTO_GF128MUL |
100 | tristate "GF(2^128) multiplication functions (EXPERIMENTAL)" | 113 | tristate "GF(2^128) multiplication functions (EXPERIMENTAL)" |
diff --git a/crypto/Makefile b/crypto/Makefile index d7e6441df7fe..423b7de61f93 100644 --- a/crypto/Makefile +++ b/crypto/Makefile | |||
@@ -26,7 +26,7 @@ crypto_hash-objs += ahash.o | |||
26 | crypto_hash-objs += shash.o | 26 | crypto_hash-objs += shash.o |
27 | obj-$(CONFIG_CRYPTO_HASH2) += crypto_hash.o | 27 | obj-$(CONFIG_CRYPTO_HASH2) += crypto_hash.o |
28 | 28 | ||
29 | obj-$(CONFIG_CRYPTO_PCOMP) += pcompress.o | 29 | obj-$(CONFIG_CRYPTO_PCOMP2) += pcompress.o |
30 | 30 | ||
31 | cryptomgr-objs := algboss.o testmgr.o | 31 | cryptomgr-objs := algboss.o testmgr.o |
32 | 32 | ||
@@ -61,7 +61,7 @@ obj-$(CONFIG_CRYPTO_CRYPTD) += cryptd.o | |||
61 | obj-$(CONFIG_CRYPTO_DES) += des_generic.o | 61 | obj-$(CONFIG_CRYPTO_DES) += des_generic.o |
62 | obj-$(CONFIG_CRYPTO_FCRYPT) += fcrypt.o | 62 | obj-$(CONFIG_CRYPTO_FCRYPT) += fcrypt.o |
63 | obj-$(CONFIG_CRYPTO_BLOWFISH) += blowfish.o | 63 | obj-$(CONFIG_CRYPTO_BLOWFISH) += blowfish.o |
64 | obj-$(CONFIG_CRYPTO_TWOFISH) += twofish.o | 64 | obj-$(CONFIG_CRYPTO_TWOFISH) += twofish_generic.o |
65 | obj-$(CONFIG_CRYPTO_TWOFISH_COMMON) += twofish_common.o | 65 | obj-$(CONFIG_CRYPTO_TWOFISH_COMMON) += twofish_common.o |
66 | obj-$(CONFIG_CRYPTO_SERPENT) += serpent.o | 66 | obj-$(CONFIG_CRYPTO_SERPENT) += serpent.o |
67 | obj-$(CONFIG_CRYPTO_AES) += aes_generic.o | 67 | obj-$(CONFIG_CRYPTO_AES) += aes_generic.o |
diff --git a/crypto/ablkcipher.c b/crypto/ablkcipher.c index fe980dae1727..a854df2a5a4b 100644 --- a/crypto/ablkcipher.c +++ b/crypto/ablkcipher.c | |||
@@ -24,10 +24,287 @@ | |||
24 | #include <linux/slab.h> | 24 | #include <linux/slab.h> |
25 | #include <linux/seq_file.h> | 25 | #include <linux/seq_file.h> |
26 | 26 | ||
27 | #include <crypto/scatterwalk.h> | ||
28 | |||
27 | #include "internal.h" | 29 | #include "internal.h" |
28 | 30 | ||
29 | static const char *skcipher_default_geniv __read_mostly; | 31 | static const char *skcipher_default_geniv __read_mostly; |
30 | 32 | ||
33 | struct ablkcipher_buffer { | ||
34 | struct list_head entry; | ||
35 | struct scatter_walk dst; | ||
36 | unsigned int len; | ||
37 | void *data; | ||
38 | }; | ||
39 | |||
40 | enum { | ||
41 | ABLKCIPHER_WALK_SLOW = 1 << 0, | ||
42 | }; | ||
43 | |||
44 | static inline void ablkcipher_buffer_write(struct ablkcipher_buffer *p) | ||
45 | { | ||
46 | scatterwalk_copychunks(p->data, &p->dst, p->len, 1); | ||
47 | } | ||
48 | |||
49 | void __ablkcipher_walk_complete(struct ablkcipher_walk *walk) | ||
50 | { | ||
51 | struct ablkcipher_buffer *p, *tmp; | ||
52 | |||
53 | list_for_each_entry_safe(p, tmp, &walk->buffers, entry) { | ||
54 | ablkcipher_buffer_write(p); | ||
55 | list_del(&p->entry); | ||
56 | kfree(p); | ||
57 | } | ||
58 | } | ||
59 | EXPORT_SYMBOL_GPL(__ablkcipher_walk_complete); | ||
60 | |||
61 | static inline void ablkcipher_queue_write(struct ablkcipher_walk *walk, | ||
62 | struct ablkcipher_buffer *p) | ||
63 | { | ||
64 | p->dst = walk->out; | ||
65 | list_add_tail(&p->entry, &walk->buffers); | ||
66 | } | ||
67 | |||
68 | /* Get a spot of the specified length that does not straddle a page. | ||
69 | * The caller needs to ensure that there is enough space for this operation. | ||
70 | */ | ||
71 | static inline u8 *ablkcipher_get_spot(u8 *start, unsigned int len) | ||
72 | { | ||
73 | u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK); | ||
74 | return max(start, end_page); | ||
75 | } | ||
76 | |||
77 | static inline unsigned int ablkcipher_done_slow(struct ablkcipher_walk *walk, | ||
78 | unsigned int bsize) | ||
79 | { | ||
80 | unsigned int n = bsize; | ||
81 | |||
82 | for (;;) { | ||
83 | unsigned int len_this_page = scatterwalk_pagelen(&walk->out); | ||
84 | |||
85 | if (len_this_page > n) | ||
86 | len_this_page = n; | ||
87 | scatterwalk_advance(&walk->out, n); | ||
88 | if (n == len_this_page) | ||
89 | break; | ||
90 | n -= len_this_page; | ||
91 | scatterwalk_start(&walk->out, scatterwalk_sg_next(walk->out.sg)); | ||
92 | } | ||
93 | |||
94 | return bsize; | ||
95 | } | ||
96 | |||
97 | static inline unsigned int ablkcipher_done_fast(struct ablkcipher_walk *walk, | ||
98 | unsigned int n) | ||
99 | { | ||
100 | scatterwalk_advance(&walk->in, n); | ||
101 | scatterwalk_advance(&walk->out, n); | ||
102 | |||
103 | return n; | ||
104 | } | ||
105 | |||
106 | static int ablkcipher_walk_next(struct ablkcipher_request *req, | ||
107 | struct ablkcipher_walk *walk); | ||
108 | |||
109 | int ablkcipher_walk_done(struct ablkcipher_request *req, | ||
110 | struct ablkcipher_walk *walk, int err) | ||
111 | { | ||
112 | struct crypto_tfm *tfm = req->base.tfm; | ||
113 | unsigned int nbytes = 0; | ||
114 | |||
115 | if (likely(err >= 0)) { | ||
116 | unsigned int n = walk->nbytes - err; | ||
117 | |||
118 | if (likely(!(walk->flags & ABLKCIPHER_WALK_SLOW))) | ||
119 | n = ablkcipher_done_fast(walk, n); | ||
120 | else if (WARN_ON(err)) { | ||
121 | err = -EINVAL; | ||
122 | goto err; | ||
123 | } else | ||
124 | n = ablkcipher_done_slow(walk, n); | ||
125 | |||
126 | nbytes = walk->total - n; | ||
127 | err = 0; | ||
128 | } | ||
129 | |||
130 | scatterwalk_done(&walk->in, 0, nbytes); | ||
131 | scatterwalk_done(&walk->out, 1, nbytes); | ||
132 | |||
133 | err: | ||
134 | walk->total = nbytes; | ||
135 | walk->nbytes = nbytes; | ||
136 | |||
137 | if (nbytes) { | ||
138 | crypto_yield(req->base.flags); | ||
139 | return ablkcipher_walk_next(req, walk); | ||
140 | } | ||
141 | |||
142 | if (walk->iv != req->info) | ||
143 | memcpy(req->info, walk->iv, tfm->crt_ablkcipher.ivsize); | ||
144 | if (walk->iv_buffer) | ||
145 | kfree(walk->iv_buffer); | ||
146 | |||
147 | return err; | ||
148 | } | ||
149 | EXPORT_SYMBOL_GPL(ablkcipher_walk_done); | ||
150 | |||
151 | static inline int ablkcipher_next_slow(struct ablkcipher_request *req, | ||
152 | struct ablkcipher_walk *walk, | ||
153 | unsigned int bsize, | ||
154 | unsigned int alignmask, | ||
155 | void **src_p, void **dst_p) | ||
156 | { | ||
157 | unsigned aligned_bsize = ALIGN(bsize, alignmask + 1); | ||
158 | struct ablkcipher_buffer *p; | ||
159 | void *src, *dst, *base; | ||
160 | unsigned int n; | ||
161 | |||
162 | n = ALIGN(sizeof(struct ablkcipher_buffer), alignmask + 1); | ||
163 | n += (aligned_bsize * 3 - (alignmask + 1) + | ||
164 | (alignmask & ~(crypto_tfm_ctx_alignment() - 1))); | ||
165 | |||
166 | p = kmalloc(n, GFP_ATOMIC); | ||
167 | if (!p) | ||
168 | return ablkcipher_walk_done(req, walk, -ENOMEM); | ||
169 | |||
170 | base = p + 1; | ||
171 | |||
172 | dst = (u8 *)ALIGN((unsigned long)base, alignmask + 1); | ||
173 | src = dst = ablkcipher_get_spot(dst, bsize); | ||
174 | |||
175 | p->len = bsize; | ||
176 | p->data = dst; | ||
177 | |||
178 | scatterwalk_copychunks(src, &walk->in, bsize, 0); | ||
179 | |||
180 | ablkcipher_queue_write(walk, p); | ||
181 | |||
182 | walk->nbytes = bsize; | ||
183 | walk->flags |= ABLKCIPHER_WALK_SLOW; | ||
184 | |||
185 | *src_p = src; | ||
186 | *dst_p = dst; | ||
187 | |||
188 | return 0; | ||
189 | } | ||
190 | |||
191 | static inline int ablkcipher_copy_iv(struct ablkcipher_walk *walk, | ||
192 | struct crypto_tfm *tfm, | ||
193 | unsigned int alignmask) | ||
194 | { | ||
195 | unsigned bs = walk->blocksize; | ||
196 | unsigned int ivsize = tfm->crt_ablkcipher.ivsize; | ||
197 | unsigned aligned_bs = ALIGN(bs, alignmask + 1); | ||
198 | unsigned int size = aligned_bs * 2 + ivsize + max(aligned_bs, ivsize) - | ||
199 | (alignmask + 1); | ||
200 | u8 *iv; | ||
201 | |||
202 | size += alignmask & ~(crypto_tfm_ctx_alignment() - 1); | ||
203 | walk->iv_buffer = kmalloc(size, GFP_ATOMIC); | ||
204 | if (!walk->iv_buffer) | ||
205 | return -ENOMEM; | ||
206 | |||
207 | iv = (u8 *)ALIGN((unsigned long)walk->iv_buffer, alignmask + 1); | ||
208 | iv = ablkcipher_get_spot(iv, bs) + aligned_bs; | ||
209 | iv = ablkcipher_get_spot(iv, bs) + aligned_bs; | ||
210 | iv = ablkcipher_get_spot(iv, ivsize); | ||
211 | |||
212 | walk->iv = memcpy(iv, walk->iv, ivsize); | ||
213 | return 0; | ||
214 | } | ||
215 | |||
216 | static inline int ablkcipher_next_fast(struct ablkcipher_request *req, | ||
217 | struct ablkcipher_walk *walk) | ||
218 | { | ||
219 | walk->src.page = scatterwalk_page(&walk->in); | ||
220 | walk->src.offset = offset_in_page(walk->in.offset); | ||
221 | walk->dst.page = scatterwalk_page(&walk->out); | ||
222 | walk->dst.offset = offset_in_page(walk->out.offset); | ||
223 | |||
224 | return 0; | ||
225 | } | ||
226 | |||
227 | static int ablkcipher_walk_next(struct ablkcipher_request *req, | ||
228 | struct ablkcipher_walk *walk) | ||
229 | { | ||
230 | struct crypto_tfm *tfm = req->base.tfm; | ||
231 | unsigned int alignmask, bsize, n; | ||
232 | void *src, *dst; | ||
233 | int err; | ||
234 | |||
235 | alignmask = crypto_tfm_alg_alignmask(tfm); | ||
236 | n = walk->total; | ||
237 | if (unlikely(n < crypto_tfm_alg_blocksize(tfm))) { | ||
238 | req->base.flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN; | ||
239 | return ablkcipher_walk_done(req, walk, -EINVAL); | ||
240 | } | ||
241 | |||
242 | walk->flags &= ~ABLKCIPHER_WALK_SLOW; | ||
243 | src = dst = NULL; | ||
244 | |||
245 | bsize = min(walk->blocksize, n); | ||
246 | n = scatterwalk_clamp(&walk->in, n); | ||
247 | n = scatterwalk_clamp(&walk->out, n); | ||
248 | |||
249 | if (n < bsize || | ||
250 | !scatterwalk_aligned(&walk->in, alignmask) || | ||
251 | !scatterwalk_aligned(&walk->out, alignmask)) { | ||
252 | err = ablkcipher_next_slow(req, walk, bsize, alignmask, | ||
253 | &src, &dst); | ||
254 | goto set_phys_lowmem; | ||
255 | } | ||
256 | |||
257 | walk->nbytes = n; | ||
258 | |||
259 | return ablkcipher_next_fast(req, walk); | ||
260 | |||
261 | set_phys_lowmem: | ||
262 | if (err >= 0) { | ||
263 | walk->src.page = virt_to_page(src); | ||
264 | walk->dst.page = virt_to_page(dst); | ||
265 | walk->src.offset = ((unsigned long)src & (PAGE_SIZE - 1)); | ||
266 | walk->dst.offset = ((unsigned long)dst & (PAGE_SIZE - 1)); | ||
267 | } | ||
268 | |||
269 | return err; | ||
270 | } | ||
271 | |||
272 | static int ablkcipher_walk_first(struct ablkcipher_request *req, | ||
273 | struct ablkcipher_walk *walk) | ||
274 | { | ||
275 | struct crypto_tfm *tfm = req->base.tfm; | ||
276 | unsigned int alignmask; | ||
277 | |||
278 | alignmask = crypto_tfm_alg_alignmask(tfm); | ||
279 | if (WARN_ON_ONCE(in_irq())) | ||
280 | return -EDEADLK; | ||
281 | |||
282 | walk->nbytes = walk->total; | ||
283 | if (unlikely(!walk->total)) | ||
284 | return 0; | ||
285 | |||
286 | walk->iv_buffer = NULL; | ||
287 | walk->iv = req->info; | ||
288 | if (unlikely(((unsigned long)walk->iv & alignmask))) { | ||
289 | int err = ablkcipher_copy_iv(walk, tfm, alignmask); | ||
290 | if (err) | ||
291 | return err; | ||
292 | } | ||
293 | |||
294 | scatterwalk_start(&walk->in, walk->in.sg); | ||
295 | scatterwalk_start(&walk->out, walk->out.sg); | ||
296 | |||
297 | return ablkcipher_walk_next(req, walk); | ||
298 | } | ||
299 | |||
300 | int ablkcipher_walk_phys(struct ablkcipher_request *req, | ||
301 | struct ablkcipher_walk *walk) | ||
302 | { | ||
303 | walk->blocksize = crypto_tfm_alg_blocksize(req->base.tfm); | ||
304 | return ablkcipher_walk_first(req, walk); | ||
305 | } | ||
306 | EXPORT_SYMBOL_GPL(ablkcipher_walk_phys); | ||
307 | |||
31 | static int setkey_unaligned(struct crypto_ablkcipher *tfm, const u8 *key, | 308 | static int setkey_unaligned(struct crypto_ablkcipher *tfm, const u8 *key, |
32 | unsigned int keylen) | 309 | unsigned int keylen) |
33 | { | 310 | { |
diff --git a/crypto/algapi.c b/crypto/algapi.c index 76fae27ed01c..c3cf1a69a47a 100644 --- a/crypto/algapi.c +++ b/crypto/algapi.c | |||
@@ -544,7 +544,7 @@ int crypto_init_spawn2(struct crypto_spawn *spawn, struct crypto_alg *alg, | |||
544 | { | 544 | { |
545 | int err = -EINVAL; | 545 | int err = -EINVAL; |
546 | 546 | ||
547 | if (frontend && (alg->cra_flags ^ frontend->type) & frontend->maskset) | 547 | if ((alg->cra_flags ^ frontend->type) & frontend->maskset) |
548 | goto out; | 548 | goto out; |
549 | 549 | ||
550 | spawn->frontend = frontend; | 550 | spawn->frontend = frontend; |
diff --git a/crypto/algboss.c b/crypto/algboss.c index c3c196b5823a..40bd391f34d9 100644 --- a/crypto/algboss.c +++ b/crypto/algboss.c | |||
@@ -206,6 +206,7 @@ err: | |||
206 | return NOTIFY_OK; | 206 | return NOTIFY_OK; |
207 | } | 207 | } |
208 | 208 | ||
209 | #ifdef CONFIG_CRYPTO_MANAGER_TESTS | ||
209 | static int cryptomgr_test(void *data) | 210 | static int cryptomgr_test(void *data) |
210 | { | 211 | { |
211 | struct crypto_test_param *param = data; | 212 | struct crypto_test_param *param = data; |
@@ -266,6 +267,7 @@ err_put_module: | |||
266 | err: | 267 | err: |
267 | return NOTIFY_OK; | 268 | return NOTIFY_OK; |
268 | } | 269 | } |
270 | #endif /* CONFIG_CRYPTO_MANAGER_TESTS */ | ||
269 | 271 | ||
270 | static int cryptomgr_notify(struct notifier_block *this, unsigned long msg, | 272 | static int cryptomgr_notify(struct notifier_block *this, unsigned long msg, |
271 | void *data) | 273 | void *data) |
@@ -273,8 +275,10 @@ static int cryptomgr_notify(struct notifier_block *this, unsigned long msg, | |||
273 | switch (msg) { | 275 | switch (msg) { |
274 | case CRYPTO_MSG_ALG_REQUEST: | 276 | case CRYPTO_MSG_ALG_REQUEST: |
275 | return cryptomgr_schedule_probe(data); | 277 | return cryptomgr_schedule_probe(data); |
278 | #ifdef CONFIG_CRYPTO_MANAGER_TESTS | ||
276 | case CRYPTO_MSG_ALG_REGISTER: | 279 | case CRYPTO_MSG_ALG_REGISTER: |
277 | return cryptomgr_schedule_test(data); | 280 | return cryptomgr_schedule_test(data); |
281 | #endif | ||
278 | } | 282 | } |
279 | 283 | ||
280 | return NOTIFY_DONE; | 284 | return NOTIFY_DONE; |
diff --git a/crypto/async_tx/Kconfig b/crypto/async_tx/Kconfig index e28e276ac611..5de2ed13b35d 100644 --- a/crypto/async_tx/Kconfig +++ b/crypto/async_tx/Kconfig | |||
@@ -22,6 +22,20 @@ config ASYNC_RAID6_RECOV | |||
22 | tristate | 22 | tristate |
23 | select ASYNC_CORE | 23 | select ASYNC_CORE |
24 | select ASYNC_PQ | 24 | select ASYNC_PQ |
25 | select ASYNC_XOR | ||
26 | |||
27 | config ASYNC_RAID6_TEST | ||
28 | tristate "Self test for hardware accelerated raid6 recovery" | ||
29 | depends on ASYNC_RAID6_RECOV | ||
30 | select ASYNC_MEMCPY | ||
31 | ---help--- | ||
32 | This is a one-shot self test that permutes through the | ||
33 | recovery of all the possible two disk failure scenarios for a | ||
34 | N-disk array. Recovery is performed with the asynchronous | ||
35 | raid6 recovery routines, and will optionally use an offload | ||
36 | engine if one is available. | ||
37 | |||
38 | If unsure, say N. | ||
25 | 39 | ||
26 | config ASYNC_TX_DISABLE_PQ_VAL_DMA | 40 | config ASYNC_TX_DISABLE_PQ_VAL_DMA |
27 | bool | 41 | bool |
diff --git a/crypto/async_tx/async_tx.c b/crypto/async_tx/async_tx.c index f9cdf04fe7c0..7f2c00a45205 100644 --- a/crypto/async_tx/async_tx.c +++ b/crypto/async_tx/async_tx.c | |||
@@ -81,18 +81,13 @@ async_tx_channel_switch(struct dma_async_tx_descriptor *depend_tx, | |||
81 | struct dma_device *device = chan->device; | 81 | struct dma_device *device = chan->device; |
82 | struct dma_async_tx_descriptor *intr_tx = (void *) ~0; | 82 | struct dma_async_tx_descriptor *intr_tx = (void *) ~0; |
83 | 83 | ||
84 | #ifdef CONFIG_ASYNC_TX_DISABLE_CHANNEL_SWITCH | ||
85 | BUG(); | ||
86 | #endif | ||
87 | |||
88 | /* first check to see if we can still append to depend_tx */ | 84 | /* first check to see if we can still append to depend_tx */ |
89 | spin_lock_bh(&depend_tx->lock); | 85 | txd_lock(depend_tx); |
90 | if (depend_tx->parent && depend_tx->chan == tx->chan) { | 86 | if (txd_parent(depend_tx) && depend_tx->chan == tx->chan) { |
91 | tx->parent = depend_tx; | 87 | txd_chain(depend_tx, tx); |
92 | depend_tx->next = tx; | ||
93 | intr_tx = NULL; | 88 | intr_tx = NULL; |
94 | } | 89 | } |
95 | spin_unlock_bh(&depend_tx->lock); | 90 | txd_unlock(depend_tx); |
96 | 91 | ||
97 | /* attached dependency, flush the parent channel */ | 92 | /* attached dependency, flush the parent channel */ |
98 | if (!intr_tx) { | 93 | if (!intr_tx) { |
@@ -111,24 +106,22 @@ async_tx_channel_switch(struct dma_async_tx_descriptor *depend_tx, | |||
111 | if (intr_tx) { | 106 | if (intr_tx) { |
112 | intr_tx->callback = NULL; | 107 | intr_tx->callback = NULL; |
113 | intr_tx->callback_param = NULL; | 108 | intr_tx->callback_param = NULL; |
114 | tx->parent = intr_tx; | 109 | /* safe to chain outside the lock since we know we are |
115 | /* safe to set ->next outside the lock since we know we are | ||
116 | * not submitted yet | 110 | * not submitted yet |
117 | */ | 111 | */ |
118 | intr_tx->next = tx; | 112 | txd_chain(intr_tx, tx); |
119 | 113 | ||
120 | /* check if we need to append */ | 114 | /* check if we need to append */ |
121 | spin_lock_bh(&depend_tx->lock); | 115 | txd_lock(depend_tx); |
122 | if (depend_tx->parent) { | 116 | if (txd_parent(depend_tx)) { |
123 | intr_tx->parent = depend_tx; | 117 | txd_chain(depend_tx, intr_tx); |
124 | depend_tx->next = intr_tx; | ||
125 | async_tx_ack(intr_tx); | 118 | async_tx_ack(intr_tx); |
126 | intr_tx = NULL; | 119 | intr_tx = NULL; |
127 | } | 120 | } |
128 | spin_unlock_bh(&depend_tx->lock); | 121 | txd_unlock(depend_tx); |
129 | 122 | ||
130 | if (intr_tx) { | 123 | if (intr_tx) { |
131 | intr_tx->parent = NULL; | 124 | txd_clear_parent(intr_tx); |
132 | intr_tx->tx_submit(intr_tx); | 125 | intr_tx->tx_submit(intr_tx); |
133 | async_tx_ack(intr_tx); | 126 | async_tx_ack(intr_tx); |
134 | } | 127 | } |
@@ -176,21 +169,20 @@ async_tx_submit(struct dma_chan *chan, struct dma_async_tx_descriptor *tx, | |||
176 | * 2/ dependencies are 1:1 i.e. two transactions can | 169 | * 2/ dependencies are 1:1 i.e. two transactions can |
177 | * not depend on the same parent | 170 | * not depend on the same parent |
178 | */ | 171 | */ |
179 | BUG_ON(async_tx_test_ack(depend_tx) || depend_tx->next || | 172 | BUG_ON(async_tx_test_ack(depend_tx) || txd_next(depend_tx) || |
180 | tx->parent); | 173 | txd_parent(tx)); |
181 | 174 | ||
182 | /* the lock prevents async_tx_run_dependencies from missing | 175 | /* the lock prevents async_tx_run_dependencies from missing |
183 | * the setting of ->next when ->parent != NULL | 176 | * the setting of ->next when ->parent != NULL |
184 | */ | 177 | */ |
185 | spin_lock_bh(&depend_tx->lock); | 178 | txd_lock(depend_tx); |
186 | if (depend_tx->parent) { | 179 | if (txd_parent(depend_tx)) { |
187 | /* we have a parent so we can not submit directly | 180 | /* we have a parent so we can not submit directly |
188 | * if we are staying on the same channel: append | 181 | * if we are staying on the same channel: append |
189 | * else: channel switch | 182 | * else: channel switch |
190 | */ | 183 | */ |
191 | if (depend_tx->chan == chan) { | 184 | if (depend_tx->chan == chan) { |
192 | tx->parent = depend_tx; | 185 | txd_chain(depend_tx, tx); |
193 | depend_tx->next = tx; | ||
194 | s = ASYNC_TX_SUBMITTED; | 186 | s = ASYNC_TX_SUBMITTED; |
195 | } else | 187 | } else |
196 | s = ASYNC_TX_CHANNEL_SWITCH; | 188 | s = ASYNC_TX_CHANNEL_SWITCH; |
@@ -203,7 +195,7 @@ async_tx_submit(struct dma_chan *chan, struct dma_async_tx_descriptor *tx, | |||
203 | else | 195 | else |
204 | s = ASYNC_TX_CHANNEL_SWITCH; | 196 | s = ASYNC_TX_CHANNEL_SWITCH; |
205 | } | 197 | } |
206 | spin_unlock_bh(&depend_tx->lock); | 198 | txd_unlock(depend_tx); |
207 | 199 | ||
208 | switch (s) { | 200 | switch (s) { |
209 | case ASYNC_TX_SUBMITTED: | 201 | case ASYNC_TX_SUBMITTED: |
@@ -212,12 +204,12 @@ async_tx_submit(struct dma_chan *chan, struct dma_async_tx_descriptor *tx, | |||
212 | async_tx_channel_switch(depend_tx, tx); | 204 | async_tx_channel_switch(depend_tx, tx); |
213 | break; | 205 | break; |
214 | case ASYNC_TX_DIRECT_SUBMIT: | 206 | case ASYNC_TX_DIRECT_SUBMIT: |
215 | tx->parent = NULL; | 207 | txd_clear_parent(tx); |
216 | tx->tx_submit(tx); | 208 | tx->tx_submit(tx); |
217 | break; | 209 | break; |
218 | } | 210 | } |
219 | } else { | 211 | } else { |
220 | tx->parent = NULL; | 212 | txd_clear_parent(tx); |
221 | tx->tx_submit(tx); | 213 | tx->tx_submit(tx); |
222 | } | 214 | } |
223 | 215 | ||
diff --git a/crypto/authenc.c b/crypto/authenc.c index 05eb32e0d949..a5a22cfcd07b 100644 --- a/crypto/authenc.c +++ b/crypto/authenc.c | |||
@@ -181,6 +181,7 @@ static void authenc_verify_ahash_update_done(struct crypto_async_request *areq, | |||
181 | struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc); | 181 | struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc); |
182 | struct authenc_request_ctx *areq_ctx = aead_request_ctx(req); | 182 | struct authenc_request_ctx *areq_ctx = aead_request_ctx(req); |
183 | struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff); | 183 | struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff); |
184 | unsigned int cryptlen = req->cryptlen; | ||
184 | 185 | ||
185 | if (err) | 186 | if (err) |
186 | goto out; | 187 | goto out; |
@@ -196,6 +197,7 @@ static void authenc_verify_ahash_update_done(struct crypto_async_request *areq, | |||
196 | goto out; | 197 | goto out; |
197 | 198 | ||
198 | authsize = crypto_aead_authsize(authenc); | 199 | authsize = crypto_aead_authsize(authenc); |
200 | cryptlen -= authsize; | ||
199 | ihash = ahreq->result + authsize; | 201 | ihash = ahreq->result + authsize; |
200 | scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen, | 202 | scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen, |
201 | authsize, 0); | 203 | authsize, 0); |
@@ -209,7 +211,7 @@ static void authenc_verify_ahash_update_done(struct crypto_async_request *areq, | |||
209 | ablkcipher_request_set_callback(abreq, aead_request_flags(req), | 211 | ablkcipher_request_set_callback(abreq, aead_request_flags(req), |
210 | req->base.complete, req->base.data); | 212 | req->base.complete, req->base.data); |
211 | ablkcipher_request_set_crypt(abreq, req->src, req->dst, | 213 | ablkcipher_request_set_crypt(abreq, req->src, req->dst, |
212 | req->cryptlen, req->iv); | 214 | cryptlen, req->iv); |
213 | 215 | ||
214 | err = crypto_ablkcipher_decrypt(abreq); | 216 | err = crypto_ablkcipher_decrypt(abreq); |
215 | 217 | ||
@@ -228,11 +230,13 @@ static void authenc_verify_ahash_done(struct crypto_async_request *areq, | |||
228 | struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc); | 230 | struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc); |
229 | struct authenc_request_ctx *areq_ctx = aead_request_ctx(req); | 231 | struct authenc_request_ctx *areq_ctx = aead_request_ctx(req); |
230 | struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff); | 232 | struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff); |
233 | unsigned int cryptlen = req->cryptlen; | ||
231 | 234 | ||
232 | if (err) | 235 | if (err) |
233 | goto out; | 236 | goto out; |
234 | 237 | ||
235 | authsize = crypto_aead_authsize(authenc); | 238 | authsize = crypto_aead_authsize(authenc); |
239 | cryptlen -= authsize; | ||
236 | ihash = ahreq->result + authsize; | 240 | ihash = ahreq->result + authsize; |
237 | scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen, | 241 | scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen, |
238 | authsize, 0); | 242 | authsize, 0); |
@@ -246,7 +250,7 @@ static void authenc_verify_ahash_done(struct crypto_async_request *areq, | |||
246 | ablkcipher_request_set_callback(abreq, aead_request_flags(req), | 250 | ablkcipher_request_set_callback(abreq, aead_request_flags(req), |
247 | req->base.complete, req->base.data); | 251 | req->base.complete, req->base.data); |
248 | ablkcipher_request_set_crypt(abreq, req->src, req->dst, | 252 | ablkcipher_request_set_crypt(abreq, req->src, req->dst, |
249 | req->cryptlen, req->iv); | 253 | cryptlen, req->iv); |
250 | 254 | ||
251 | err = crypto_ablkcipher_decrypt(abreq); | 255 | err = crypto_ablkcipher_decrypt(abreq); |
252 | 256 | ||
@@ -612,7 +616,7 @@ static struct crypto_instance *crypto_authenc_alloc(struct rtattr **tb) | |||
612 | auth = ahash_attr_alg(tb[1], CRYPTO_ALG_TYPE_HASH, | 616 | auth = ahash_attr_alg(tb[1], CRYPTO_ALG_TYPE_HASH, |
613 | CRYPTO_ALG_TYPE_AHASH_MASK); | 617 | CRYPTO_ALG_TYPE_AHASH_MASK); |
614 | if (IS_ERR(auth)) | 618 | if (IS_ERR(auth)) |
615 | return ERR_PTR(PTR_ERR(auth)); | 619 | return ERR_CAST(auth); |
616 | 620 | ||
617 | auth_base = &auth->base; | 621 | auth_base = &auth->base; |
618 | 622 | ||
diff --git a/crypto/ctr.c b/crypto/ctr.c index 6c3bfabb9d1d..4ca7222cfeb6 100644 --- a/crypto/ctr.c +++ b/crypto/ctr.c | |||
@@ -185,7 +185,7 @@ static struct crypto_instance *crypto_ctr_alloc(struct rtattr **tb) | |||
185 | alg = crypto_attr_alg(tb[1], CRYPTO_ALG_TYPE_CIPHER, | 185 | alg = crypto_attr_alg(tb[1], CRYPTO_ALG_TYPE_CIPHER, |
186 | CRYPTO_ALG_TYPE_MASK); | 186 | CRYPTO_ALG_TYPE_MASK); |
187 | if (IS_ERR(alg)) | 187 | if (IS_ERR(alg)) |
188 | return ERR_PTR(PTR_ERR(alg)); | 188 | return ERR_CAST(alg); |
189 | 189 | ||
190 | /* Block size must be >= 4 bytes. */ | 190 | /* Block size must be >= 4 bytes. */ |
191 | err = -EINVAL; | 191 | err = -EINVAL; |
diff --git a/crypto/internal.h b/crypto/internal.h index 2d226362e594..d4384b08ab29 100644 --- a/crypto/internal.h +++ b/crypto/internal.h | |||
@@ -6,7 +6,7 @@ | |||
6 | * | 6 | * |
7 | * This program is free software; you can redistribute it and/or modify it | 7 | * This program is free software; you can redistribute it and/or modify it |
8 | * under the terms of the GNU General Public License as published by the Free | 8 | * under the terms of the GNU General Public License as published by the Free |
9 | * Software Foundation; either version 2 of the License, or (at your option) | 9 | * Software Foundation; either version 2 of the License, or (at your option) |
10 | * any later version. | 10 | * any later version. |
11 | * | 11 | * |
12 | */ | 12 | */ |
diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c index 80201241b698..de3078215fe6 100644 --- a/crypto/pcrypt.c +++ b/crypto/pcrypt.c | |||
@@ -24,12 +24,40 @@ | |||
24 | #include <linux/init.h> | 24 | #include <linux/init.h> |
25 | #include <linux/module.h> | 25 | #include <linux/module.h> |
26 | #include <linux/slab.h> | 26 | #include <linux/slab.h> |
27 | #include <linux/notifier.h> | ||
28 | #include <linux/kobject.h> | ||
29 | #include <linux/cpu.h> | ||
27 | #include <crypto/pcrypt.h> | 30 | #include <crypto/pcrypt.h> |
28 | 31 | ||
29 | static struct padata_instance *pcrypt_enc_padata; | 32 | struct padata_pcrypt { |
30 | static struct padata_instance *pcrypt_dec_padata; | 33 | struct padata_instance *pinst; |
31 | static struct workqueue_struct *encwq; | 34 | struct workqueue_struct *wq; |
32 | static struct workqueue_struct *decwq; | 35 | |
36 | /* | ||
37 | * Cpumask for callback CPUs. It should be | ||
38 | * equal to serial cpumask of corresponding padata instance, | ||
39 | * so it is updated when padata notifies us about serial | ||
40 | * cpumask change. | ||
41 | * | ||
42 | * cb_cpumask is protected by RCU. This fact prevents us from | ||
43 | * using cpumask_var_t directly because the actual type of | ||
44 | * cpumsak_var_t depends on kernel configuration(particularly on | ||
45 | * CONFIG_CPUMASK_OFFSTACK macro). Depending on the configuration | ||
46 | * cpumask_var_t may be either a pointer to the struct cpumask | ||
47 | * or a variable allocated on the stack. Thus we can not safely use | ||
48 | * cpumask_var_t with RCU operations such as rcu_assign_pointer or | ||
49 | * rcu_dereference. So cpumask_var_t is wrapped with struct | ||
50 | * pcrypt_cpumask which makes possible to use it with RCU. | ||
51 | */ | ||
52 | struct pcrypt_cpumask { | ||
53 | cpumask_var_t mask; | ||
54 | } *cb_cpumask; | ||
55 | struct notifier_block nblock; | ||
56 | }; | ||
57 | |||
58 | static struct padata_pcrypt pencrypt; | ||
59 | static struct padata_pcrypt pdecrypt; | ||
60 | static struct kset *pcrypt_kset; | ||
33 | 61 | ||
34 | struct pcrypt_instance_ctx { | 62 | struct pcrypt_instance_ctx { |
35 | struct crypto_spawn spawn; | 63 | struct crypto_spawn spawn; |
@@ -42,25 +70,32 @@ struct pcrypt_aead_ctx { | |||
42 | }; | 70 | }; |
43 | 71 | ||
44 | static int pcrypt_do_parallel(struct padata_priv *padata, unsigned int *cb_cpu, | 72 | static int pcrypt_do_parallel(struct padata_priv *padata, unsigned int *cb_cpu, |
45 | struct padata_instance *pinst) | 73 | struct padata_pcrypt *pcrypt) |
46 | { | 74 | { |
47 | unsigned int cpu_index, cpu, i; | 75 | unsigned int cpu_index, cpu, i; |
76 | struct pcrypt_cpumask *cpumask; | ||
48 | 77 | ||
49 | cpu = *cb_cpu; | 78 | cpu = *cb_cpu; |
50 | 79 | ||
51 | if (cpumask_test_cpu(cpu, cpu_active_mask)) | 80 | rcu_read_lock_bh(); |
81 | cpumask = rcu_dereference(pcrypt->cb_cpumask); | ||
82 | if (cpumask_test_cpu(cpu, cpumask->mask)) | ||
83 | goto out; | ||
84 | |||
85 | if (!cpumask_weight(cpumask->mask)) | ||
52 | goto out; | 86 | goto out; |
53 | 87 | ||
54 | cpu_index = cpu % cpumask_weight(cpu_active_mask); | 88 | cpu_index = cpu % cpumask_weight(cpumask->mask); |
55 | 89 | ||
56 | cpu = cpumask_first(cpu_active_mask); | 90 | cpu = cpumask_first(cpumask->mask); |
57 | for (i = 0; i < cpu_index; i++) | 91 | for (i = 0; i < cpu_index; i++) |
58 | cpu = cpumask_next(cpu, cpu_active_mask); | 92 | cpu = cpumask_next(cpu, cpumask->mask); |
59 | 93 | ||
60 | *cb_cpu = cpu; | 94 | *cb_cpu = cpu; |
61 | 95 | ||
62 | out: | 96 | out: |
63 | return padata_do_parallel(pinst, padata, cpu); | 97 | rcu_read_unlock_bh(); |
98 | return padata_do_parallel(pcrypt->pinst, padata, cpu); | ||
64 | } | 99 | } |
65 | 100 | ||
66 | static int pcrypt_aead_setkey(struct crypto_aead *parent, | 101 | static int pcrypt_aead_setkey(struct crypto_aead *parent, |
@@ -142,11 +177,9 @@ static int pcrypt_aead_encrypt(struct aead_request *req) | |||
142 | req->cryptlen, req->iv); | 177 | req->cryptlen, req->iv); |
143 | aead_request_set_assoc(creq, req->assoc, req->assoclen); | 178 | aead_request_set_assoc(creq, req->assoc, req->assoclen); |
144 | 179 | ||
145 | err = pcrypt_do_parallel(padata, &ctx->cb_cpu, pcrypt_enc_padata); | 180 | err = pcrypt_do_parallel(padata, &ctx->cb_cpu, &pencrypt); |
146 | if (err) | 181 | if (!err) |
147 | return err; | 182 | return -EINPROGRESS; |
148 | else | ||
149 | err = crypto_aead_encrypt(creq); | ||
150 | 183 | ||
151 | return err; | 184 | return err; |
152 | } | 185 | } |
@@ -186,11 +219,9 @@ static int pcrypt_aead_decrypt(struct aead_request *req) | |||
186 | req->cryptlen, req->iv); | 219 | req->cryptlen, req->iv); |
187 | aead_request_set_assoc(creq, req->assoc, req->assoclen); | 220 | aead_request_set_assoc(creq, req->assoc, req->assoclen); |
188 | 221 | ||
189 | err = pcrypt_do_parallel(padata, &ctx->cb_cpu, pcrypt_dec_padata); | 222 | err = pcrypt_do_parallel(padata, &ctx->cb_cpu, &pdecrypt); |
190 | if (err) | 223 | if (!err) |
191 | return err; | 224 | return -EINPROGRESS; |
192 | else | ||
193 | err = crypto_aead_decrypt(creq); | ||
194 | 225 | ||
195 | return err; | 226 | return err; |
196 | } | 227 | } |
@@ -232,11 +263,9 @@ static int pcrypt_aead_givencrypt(struct aead_givcrypt_request *req) | |||
232 | aead_givcrypt_set_assoc(creq, areq->assoc, areq->assoclen); | 263 | aead_givcrypt_set_assoc(creq, areq->assoc, areq->assoclen); |
233 | aead_givcrypt_set_giv(creq, req->giv, req->seq); | 264 | aead_givcrypt_set_giv(creq, req->giv, req->seq); |
234 | 265 | ||
235 | err = pcrypt_do_parallel(padata, &ctx->cb_cpu, pcrypt_enc_padata); | 266 | err = pcrypt_do_parallel(padata, &ctx->cb_cpu, &pencrypt); |
236 | if (err) | 267 | if (!err) |
237 | return err; | 268 | return -EINPROGRESS; |
238 | else | ||
239 | err = crypto_aead_givencrypt(creq); | ||
240 | 269 | ||
241 | return err; | 270 | return err; |
242 | } | 271 | } |
@@ -315,16 +344,13 @@ out_free_inst: | |||
315 | goto out; | 344 | goto out; |
316 | } | 345 | } |
317 | 346 | ||
318 | static struct crypto_instance *pcrypt_alloc_aead(struct rtattr **tb) | 347 | static struct crypto_instance *pcrypt_alloc_aead(struct rtattr **tb, |
348 | u32 type, u32 mask) | ||
319 | { | 349 | { |
320 | struct crypto_instance *inst; | 350 | struct crypto_instance *inst; |
321 | struct crypto_alg *alg; | 351 | struct crypto_alg *alg; |
322 | struct crypto_attr_type *algt; | ||
323 | 352 | ||
324 | algt = crypto_get_attr_type(tb); | 353 | alg = crypto_get_attr_alg(tb, type, (mask & CRYPTO_ALG_TYPE_MASK)); |
325 | |||
326 | alg = crypto_get_attr_alg(tb, algt->type, | ||
327 | (algt->mask & CRYPTO_ALG_TYPE_MASK)); | ||
328 | if (IS_ERR(alg)) | 354 | if (IS_ERR(alg)) |
329 | return ERR_CAST(alg); | 355 | return ERR_CAST(alg); |
330 | 356 | ||
@@ -365,7 +391,7 @@ static struct crypto_instance *pcrypt_alloc(struct rtattr **tb) | |||
365 | 391 | ||
366 | switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) { | 392 | switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) { |
367 | case CRYPTO_ALG_TYPE_AEAD: | 393 | case CRYPTO_ALG_TYPE_AEAD: |
368 | return pcrypt_alloc_aead(tb); | 394 | return pcrypt_alloc_aead(tb, algt->type, algt->mask); |
369 | } | 395 | } |
370 | 396 | ||
371 | return ERR_PTR(-EINVAL); | 397 | return ERR_PTR(-EINVAL); |
@@ -379,6 +405,115 @@ static void pcrypt_free(struct crypto_instance *inst) | |||
379 | kfree(inst); | 405 | kfree(inst); |
380 | } | 406 | } |
381 | 407 | ||
408 | static int pcrypt_cpumask_change_notify(struct notifier_block *self, | ||
409 | unsigned long val, void *data) | ||
410 | { | ||
411 | struct padata_pcrypt *pcrypt; | ||
412 | struct pcrypt_cpumask *new_mask, *old_mask; | ||
413 | struct padata_cpumask *cpumask = (struct padata_cpumask *)data; | ||
414 | |||
415 | if (!(val & PADATA_CPU_SERIAL)) | ||
416 | return 0; | ||
417 | |||
418 | pcrypt = container_of(self, struct padata_pcrypt, nblock); | ||
419 | new_mask = kmalloc(sizeof(*new_mask), GFP_KERNEL); | ||
420 | if (!new_mask) | ||
421 | return -ENOMEM; | ||
422 | if (!alloc_cpumask_var(&new_mask->mask, GFP_KERNEL)) { | ||
423 | kfree(new_mask); | ||
424 | return -ENOMEM; | ||
425 | } | ||
426 | |||
427 | old_mask = pcrypt->cb_cpumask; | ||
428 | |||
429 | cpumask_copy(new_mask->mask, cpumask->cbcpu); | ||
430 | rcu_assign_pointer(pcrypt->cb_cpumask, new_mask); | ||
431 | synchronize_rcu_bh(); | ||
432 | |||
433 | free_cpumask_var(old_mask->mask); | ||
434 | kfree(old_mask); | ||
435 | return 0; | ||
436 | } | ||
437 | |||
438 | static int pcrypt_sysfs_add(struct padata_instance *pinst, const char *name) | ||
439 | { | ||
440 | int ret; | ||
441 | |||
442 | pinst->kobj.kset = pcrypt_kset; | ||
443 | ret = kobject_add(&pinst->kobj, NULL, name); | ||
444 | if (!ret) | ||
445 | kobject_uevent(&pinst->kobj, KOBJ_ADD); | ||
446 | |||
447 | return ret; | ||
448 | } | ||
449 | |||
450 | static int pcrypt_init_padata(struct padata_pcrypt *pcrypt, | ||
451 | const char *name) | ||
452 | { | ||
453 | int ret = -ENOMEM; | ||
454 | struct pcrypt_cpumask *mask; | ||
455 | |||
456 | get_online_cpus(); | ||
457 | |||
458 | pcrypt->wq = create_workqueue(name); | ||
459 | if (!pcrypt->wq) | ||
460 | goto err; | ||
461 | |||
462 | pcrypt->pinst = padata_alloc_possible(pcrypt->wq); | ||
463 | if (!pcrypt->pinst) | ||
464 | goto err_destroy_workqueue; | ||
465 | |||
466 | mask = kmalloc(sizeof(*mask), GFP_KERNEL); | ||
467 | if (!mask) | ||
468 | goto err_free_padata; | ||
469 | if (!alloc_cpumask_var(&mask->mask, GFP_KERNEL)) { | ||
470 | kfree(mask); | ||
471 | goto err_free_padata; | ||
472 | } | ||
473 | |||
474 | cpumask_and(mask->mask, cpu_possible_mask, cpu_active_mask); | ||
475 | rcu_assign_pointer(pcrypt->cb_cpumask, mask); | ||
476 | |||
477 | pcrypt->nblock.notifier_call = pcrypt_cpumask_change_notify; | ||
478 | ret = padata_register_cpumask_notifier(pcrypt->pinst, &pcrypt->nblock); | ||
479 | if (ret) | ||
480 | goto err_free_cpumask; | ||
481 | |||
482 | ret = pcrypt_sysfs_add(pcrypt->pinst, name); | ||
483 | if (ret) | ||
484 | goto err_unregister_notifier; | ||
485 | |||
486 | put_online_cpus(); | ||
487 | |||
488 | return ret; | ||
489 | |||
490 | err_unregister_notifier: | ||
491 | padata_unregister_cpumask_notifier(pcrypt->pinst, &pcrypt->nblock); | ||
492 | err_free_cpumask: | ||
493 | free_cpumask_var(mask->mask); | ||
494 | kfree(mask); | ||
495 | err_free_padata: | ||
496 | padata_free(pcrypt->pinst); | ||
497 | err_destroy_workqueue: | ||
498 | destroy_workqueue(pcrypt->wq); | ||
499 | err: | ||
500 | put_online_cpus(); | ||
501 | |||
502 | return ret; | ||
503 | } | ||
504 | |||
505 | static void pcrypt_fini_padata(struct padata_pcrypt *pcrypt) | ||
506 | { | ||
507 | kobject_put(&pcrypt->pinst->kobj); | ||
508 | free_cpumask_var(pcrypt->cb_cpumask->mask); | ||
509 | kfree(pcrypt->cb_cpumask); | ||
510 | |||
511 | padata_stop(pcrypt->pinst); | ||
512 | padata_unregister_cpumask_notifier(pcrypt->pinst, &pcrypt->nblock); | ||
513 | destroy_workqueue(pcrypt->wq); | ||
514 | padata_free(pcrypt->pinst); | ||
515 | } | ||
516 | |||
382 | static struct crypto_template pcrypt_tmpl = { | 517 | static struct crypto_template pcrypt_tmpl = { |
383 | .name = "pcrypt", | 518 | .name = "pcrypt", |
384 | .alloc = pcrypt_alloc, | 519 | .alloc = pcrypt_alloc, |
@@ -388,52 +523,39 @@ static struct crypto_template pcrypt_tmpl = { | |||
388 | 523 | ||
389 | static int __init pcrypt_init(void) | 524 | static int __init pcrypt_init(void) |
390 | { | 525 | { |
391 | encwq = create_workqueue("pencrypt"); | 526 | int err = -ENOMEM; |
392 | if (!encwq) | ||
393 | goto err; | ||
394 | |||
395 | decwq = create_workqueue("pdecrypt"); | ||
396 | if (!decwq) | ||
397 | goto err_destroy_encwq; | ||
398 | 527 | ||
528 | pcrypt_kset = kset_create_and_add("pcrypt", NULL, kernel_kobj); | ||
529 | if (!pcrypt_kset) | ||
530 | goto err; | ||
399 | 531 | ||
400 | pcrypt_enc_padata = padata_alloc(cpu_possible_mask, encwq); | 532 | err = pcrypt_init_padata(&pencrypt, "pencrypt"); |
401 | if (!pcrypt_enc_padata) | 533 | if (err) |
402 | goto err_destroy_decwq; | 534 | goto err_unreg_kset; |
403 | 535 | ||
404 | pcrypt_dec_padata = padata_alloc(cpu_possible_mask, decwq); | 536 | err = pcrypt_init_padata(&pdecrypt, "pdecrypt"); |
405 | if (!pcrypt_dec_padata) | 537 | if (err) |
406 | goto err_free_padata; | 538 | goto err_deinit_pencrypt; |
407 | 539 | ||
408 | padata_start(pcrypt_enc_padata); | 540 | padata_start(pencrypt.pinst); |
409 | padata_start(pcrypt_dec_padata); | 541 | padata_start(pdecrypt.pinst); |
410 | 542 | ||
411 | return crypto_register_template(&pcrypt_tmpl); | 543 | return crypto_register_template(&pcrypt_tmpl); |
412 | 544 | ||
413 | err_free_padata: | 545 | err_deinit_pencrypt: |
414 | padata_free(pcrypt_enc_padata); | 546 | pcrypt_fini_padata(&pencrypt); |
415 | 547 | err_unreg_kset: | |
416 | err_destroy_decwq: | 548 | kset_unregister(pcrypt_kset); |
417 | destroy_workqueue(decwq); | ||
418 | |||
419 | err_destroy_encwq: | ||
420 | destroy_workqueue(encwq); | ||
421 | |||
422 | err: | 549 | err: |
423 | return -ENOMEM; | 550 | return err; |
424 | } | 551 | } |
425 | 552 | ||
426 | static void __exit pcrypt_exit(void) | 553 | static void __exit pcrypt_exit(void) |
427 | { | 554 | { |
428 | padata_stop(pcrypt_enc_padata); | 555 | pcrypt_fini_padata(&pencrypt); |
429 | padata_stop(pcrypt_dec_padata); | 556 | pcrypt_fini_padata(&pdecrypt); |
430 | |||
431 | destroy_workqueue(encwq); | ||
432 | destroy_workqueue(decwq); | ||
433 | |||
434 | padata_free(pcrypt_enc_padata); | ||
435 | padata_free(pcrypt_dec_padata); | ||
436 | 557 | ||
558 | kset_unregister(pcrypt_kset); | ||
437 | crypto_unregister_template(&pcrypt_tmpl); | 559 | crypto_unregister_template(&pcrypt_tmpl); |
438 | } | 560 | } |
439 | 561 | ||
diff --git a/crypto/scatterwalk.c b/crypto/scatterwalk.c index 3de89a424401..41e529af0773 100644 --- a/crypto/scatterwalk.c +++ b/crypto/scatterwalk.c | |||
@@ -68,7 +68,7 @@ static void scatterwalk_pagedone(struct scatter_walk *walk, int out, | |||
68 | 68 | ||
69 | void scatterwalk_done(struct scatter_walk *walk, int out, int more) | 69 | void scatterwalk_done(struct scatter_walk *walk, int out, int more) |
70 | { | 70 | { |
71 | if (!offset_in_page(walk->offset) || !more) | 71 | if (!(scatterwalk_pagelen(walk) & (PAGE_SIZE - 1)) || !more) |
72 | scatterwalk_pagedone(walk, out, more); | 72 | scatterwalk_pagedone(walk, out, more); |
73 | } | 73 | } |
74 | EXPORT_SYMBOL_GPL(scatterwalk_done); | 74 | EXPORT_SYMBOL_GPL(scatterwalk_done); |
diff --git a/crypto/shash.c b/crypto/shash.c index 91f7b9d83881..22fd9433141f 100644 --- a/crypto/shash.c +++ b/crypto/shash.c | |||
@@ -37,7 +37,7 @@ static int shash_setkey_unaligned(struct crypto_shash *tfm, const u8 *key, | |||
37 | u8 *buffer, *alignbuffer; | 37 | u8 *buffer, *alignbuffer; |
38 | int err; | 38 | int err; |
39 | 39 | ||
40 | absize = keylen + (alignmask & ~(CRYPTO_MINALIGN - 1)); | 40 | absize = keylen + (alignmask & ~(crypto_tfm_ctx_alignment() - 1)); |
41 | buffer = kmalloc(absize, GFP_KERNEL); | 41 | buffer = kmalloc(absize, GFP_KERNEL); |
42 | if (!buffer) | 42 | if (!buffer) |
43 | return -ENOMEM; | 43 | return -ENOMEM; |
diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c index a35159947a26..3ca68f9fc14d 100644 --- a/crypto/tcrypt.c +++ b/crypto/tcrypt.c | |||
@@ -394,6 +394,17 @@ out: | |||
394 | return 0; | 394 | return 0; |
395 | } | 395 | } |
396 | 396 | ||
397 | static void test_hash_sg_init(struct scatterlist *sg) | ||
398 | { | ||
399 | int i; | ||
400 | |||
401 | sg_init_table(sg, TVMEMSIZE); | ||
402 | for (i = 0; i < TVMEMSIZE; i++) { | ||
403 | sg_set_buf(sg + i, tvmem[i], PAGE_SIZE); | ||
404 | memset(tvmem[i], 0xff, PAGE_SIZE); | ||
405 | } | ||
406 | } | ||
407 | |||
397 | static void test_hash_speed(const char *algo, unsigned int sec, | 408 | static void test_hash_speed(const char *algo, unsigned int sec, |
398 | struct hash_speed *speed) | 409 | struct hash_speed *speed) |
399 | { | 410 | { |
@@ -423,12 +434,7 @@ static void test_hash_speed(const char *algo, unsigned int sec, | |||
423 | goto out; | 434 | goto out; |
424 | } | 435 | } |
425 | 436 | ||
426 | sg_init_table(sg, TVMEMSIZE); | 437 | test_hash_sg_init(sg); |
427 | for (i = 0; i < TVMEMSIZE; i++) { | ||
428 | sg_set_buf(sg + i, tvmem[i], PAGE_SIZE); | ||
429 | memset(tvmem[i], 0xff, PAGE_SIZE); | ||
430 | } | ||
431 | |||
432 | for (i = 0; speed[i].blen != 0; i++) { | 438 | for (i = 0; speed[i].blen != 0; i++) { |
433 | if (speed[i].blen > TVMEMSIZE * PAGE_SIZE) { | 439 | if (speed[i].blen > TVMEMSIZE * PAGE_SIZE) { |
434 | printk(KERN_ERR | 440 | printk(KERN_ERR |
@@ -437,6 +443,9 @@ static void test_hash_speed(const char *algo, unsigned int sec, | |||
437 | goto out; | 443 | goto out; |
438 | } | 444 | } |
439 | 445 | ||
446 | if (speed[i].klen) | ||
447 | crypto_hash_setkey(tfm, tvmem[0], speed[i].klen); | ||
448 | |||
440 | printk(KERN_INFO "test%3u " | 449 | printk(KERN_INFO "test%3u " |
441 | "(%5u byte blocks,%5u bytes per update,%4u updates): ", | 450 | "(%5u byte blocks,%5u bytes per update,%4u updates): ", |
442 | i, speed[i].blen, speed[i].plen, speed[i].blen / speed[i].plen); | 451 | i, speed[i].blen, speed[i].plen, speed[i].blen / speed[i].plen); |
@@ -458,6 +467,250 @@ out: | |||
458 | crypto_free_hash(tfm); | 467 | crypto_free_hash(tfm); |
459 | } | 468 | } |
460 | 469 | ||
470 | struct tcrypt_result { | ||
471 | struct completion completion; | ||
472 | int err; | ||
473 | }; | ||
474 | |||
475 | static void tcrypt_complete(struct crypto_async_request *req, int err) | ||
476 | { | ||
477 | struct tcrypt_result *res = req->data; | ||
478 | |||
479 | if (err == -EINPROGRESS) | ||
480 | return; | ||
481 | |||
482 | res->err = err; | ||
483 | complete(&res->completion); | ||
484 | } | ||
485 | |||
486 | static inline int do_one_ahash_op(struct ahash_request *req, int ret) | ||
487 | { | ||
488 | if (ret == -EINPROGRESS || ret == -EBUSY) { | ||
489 | struct tcrypt_result *tr = req->base.data; | ||
490 | |||
491 | ret = wait_for_completion_interruptible(&tr->completion); | ||
492 | if (!ret) | ||
493 | ret = tr->err; | ||
494 | INIT_COMPLETION(tr->completion); | ||
495 | } | ||
496 | return ret; | ||
497 | } | ||
498 | |||
499 | static int test_ahash_jiffies_digest(struct ahash_request *req, int blen, | ||
500 | char *out, int sec) | ||
501 | { | ||
502 | unsigned long start, end; | ||
503 | int bcount; | ||
504 | int ret; | ||
505 | |||
506 | for (start = jiffies, end = start + sec * HZ, bcount = 0; | ||
507 | time_before(jiffies, end); bcount++) { | ||
508 | ret = do_one_ahash_op(req, crypto_ahash_digest(req)); | ||
509 | if (ret) | ||
510 | return ret; | ||
511 | } | ||
512 | |||
513 | printk("%6u opers/sec, %9lu bytes/sec\n", | ||
514 | bcount / sec, ((long)bcount * blen) / sec); | ||
515 | |||
516 | return 0; | ||
517 | } | ||
518 | |||
519 | static int test_ahash_jiffies(struct ahash_request *req, int blen, | ||
520 | int plen, char *out, int sec) | ||
521 | { | ||
522 | unsigned long start, end; | ||
523 | int bcount, pcount; | ||
524 | int ret; | ||
525 | |||
526 | if (plen == blen) | ||
527 | return test_ahash_jiffies_digest(req, blen, out, sec); | ||
528 | |||
529 | for (start = jiffies, end = start + sec * HZ, bcount = 0; | ||
530 | time_before(jiffies, end); bcount++) { | ||
531 | ret = crypto_ahash_init(req); | ||
532 | if (ret) | ||
533 | return ret; | ||
534 | for (pcount = 0; pcount < blen; pcount += plen) { | ||
535 | ret = do_one_ahash_op(req, crypto_ahash_update(req)); | ||
536 | if (ret) | ||
537 | return ret; | ||
538 | } | ||
539 | /* we assume there is enough space in 'out' for the result */ | ||
540 | ret = do_one_ahash_op(req, crypto_ahash_final(req)); | ||
541 | if (ret) | ||
542 | return ret; | ||
543 | } | ||
544 | |||
545 | pr_cont("%6u opers/sec, %9lu bytes/sec\n", | ||
546 | bcount / sec, ((long)bcount * blen) / sec); | ||
547 | |||
548 | return 0; | ||
549 | } | ||
550 | |||
551 | static int test_ahash_cycles_digest(struct ahash_request *req, int blen, | ||
552 | char *out) | ||
553 | { | ||
554 | unsigned long cycles = 0; | ||
555 | int ret, i; | ||
556 | |||
557 | /* Warm-up run. */ | ||
558 | for (i = 0; i < 4; i++) { | ||
559 | ret = do_one_ahash_op(req, crypto_ahash_digest(req)); | ||
560 | if (ret) | ||
561 | goto out; | ||
562 | } | ||
563 | |||
564 | /* The real thing. */ | ||
565 | for (i = 0; i < 8; i++) { | ||
566 | cycles_t start, end; | ||
567 | |||
568 | start = get_cycles(); | ||
569 | |||
570 | ret = do_one_ahash_op(req, crypto_ahash_digest(req)); | ||
571 | if (ret) | ||
572 | goto out; | ||
573 | |||
574 | end = get_cycles(); | ||
575 | |||
576 | cycles += end - start; | ||
577 | } | ||
578 | |||
579 | out: | ||
580 | if (ret) | ||
581 | return ret; | ||
582 | |||
583 | pr_cont("%6lu cycles/operation, %4lu cycles/byte\n", | ||
584 | cycles / 8, cycles / (8 * blen)); | ||
585 | |||
586 | return 0; | ||
587 | } | ||
588 | |||
589 | static int test_ahash_cycles(struct ahash_request *req, int blen, | ||
590 | int plen, char *out) | ||
591 | { | ||
592 | unsigned long cycles = 0; | ||
593 | int i, pcount, ret; | ||
594 | |||
595 | if (plen == blen) | ||
596 | return test_ahash_cycles_digest(req, blen, out); | ||
597 | |||
598 | /* Warm-up run. */ | ||
599 | for (i = 0; i < 4; i++) { | ||
600 | ret = crypto_ahash_init(req); | ||
601 | if (ret) | ||
602 | goto out; | ||
603 | for (pcount = 0; pcount < blen; pcount += plen) { | ||
604 | ret = do_one_ahash_op(req, crypto_ahash_update(req)); | ||
605 | if (ret) | ||
606 | goto out; | ||
607 | } | ||
608 | ret = do_one_ahash_op(req, crypto_ahash_final(req)); | ||
609 | if (ret) | ||
610 | goto out; | ||
611 | } | ||
612 | |||
613 | /* The real thing. */ | ||
614 | for (i = 0; i < 8; i++) { | ||
615 | cycles_t start, end; | ||
616 | |||
617 | start = get_cycles(); | ||
618 | |||
619 | ret = crypto_ahash_init(req); | ||
620 | if (ret) | ||
621 | goto out; | ||
622 | for (pcount = 0; pcount < blen; pcount += plen) { | ||
623 | ret = do_one_ahash_op(req, crypto_ahash_update(req)); | ||
624 | if (ret) | ||
625 | goto out; | ||
626 | } | ||
627 | ret = do_one_ahash_op(req, crypto_ahash_final(req)); | ||
628 | if (ret) | ||
629 | goto out; | ||
630 | |||
631 | end = get_cycles(); | ||
632 | |||
633 | cycles += end - start; | ||
634 | } | ||
635 | |||
636 | out: | ||
637 | if (ret) | ||
638 | return ret; | ||
639 | |||
640 | pr_cont("%6lu cycles/operation, %4lu cycles/byte\n", | ||
641 | cycles / 8, cycles / (8 * blen)); | ||
642 | |||
643 | return 0; | ||
644 | } | ||
645 | |||
646 | static void test_ahash_speed(const char *algo, unsigned int sec, | ||
647 | struct hash_speed *speed) | ||
648 | { | ||
649 | struct scatterlist sg[TVMEMSIZE]; | ||
650 | struct tcrypt_result tresult; | ||
651 | struct ahash_request *req; | ||
652 | struct crypto_ahash *tfm; | ||
653 | static char output[1024]; | ||
654 | int i, ret; | ||
655 | |||
656 | printk(KERN_INFO "\ntesting speed of async %s\n", algo); | ||
657 | |||
658 | tfm = crypto_alloc_ahash(algo, 0, 0); | ||
659 | if (IS_ERR(tfm)) { | ||
660 | pr_err("failed to load transform for %s: %ld\n", | ||
661 | algo, PTR_ERR(tfm)); | ||
662 | return; | ||
663 | } | ||
664 | |||
665 | if (crypto_ahash_digestsize(tfm) > sizeof(output)) { | ||
666 | pr_err("digestsize(%u) > outputbuffer(%zu)\n", | ||
667 | crypto_ahash_digestsize(tfm), sizeof(output)); | ||
668 | goto out; | ||
669 | } | ||
670 | |||
671 | test_hash_sg_init(sg); | ||
672 | req = ahash_request_alloc(tfm, GFP_KERNEL); | ||
673 | if (!req) { | ||
674 | pr_err("ahash request allocation failure\n"); | ||
675 | goto out; | ||
676 | } | ||
677 | |||
678 | init_completion(&tresult.completion); | ||
679 | ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, | ||
680 | tcrypt_complete, &tresult); | ||
681 | |||
682 | for (i = 0; speed[i].blen != 0; i++) { | ||
683 | if (speed[i].blen > TVMEMSIZE * PAGE_SIZE) { | ||
684 | pr_err("template (%u) too big for tvmem (%lu)\n", | ||
685 | speed[i].blen, TVMEMSIZE * PAGE_SIZE); | ||
686 | break; | ||
687 | } | ||
688 | |||
689 | pr_info("test%3u " | ||
690 | "(%5u byte blocks,%5u bytes per update,%4u updates): ", | ||
691 | i, speed[i].blen, speed[i].plen, speed[i].blen / speed[i].plen); | ||
692 | |||
693 | ahash_request_set_crypt(req, sg, output, speed[i].plen); | ||
694 | |||
695 | if (sec) | ||
696 | ret = test_ahash_jiffies(req, speed[i].blen, | ||
697 | speed[i].plen, output, sec); | ||
698 | else | ||
699 | ret = test_ahash_cycles(req, speed[i].blen, | ||
700 | speed[i].plen, output); | ||
701 | |||
702 | if (ret) { | ||
703 | pr_err("hashing failed ret=%d\n", ret); | ||
704 | break; | ||
705 | } | ||
706 | } | ||
707 | |||
708 | ahash_request_free(req); | ||
709 | |||
710 | out: | ||
711 | crypto_free_ahash(tfm); | ||
712 | } | ||
713 | |||
461 | static void test_available(void) | 714 | static void test_available(void) |
462 | { | 715 | { |
463 | char **name = check; | 716 | char **name = check; |
@@ -881,9 +1134,87 @@ static int do_test(int m) | |||
881 | test_hash_speed("rmd320", sec, generic_hash_speed_template); | 1134 | test_hash_speed("rmd320", sec, generic_hash_speed_template); |
882 | if (mode > 300 && mode < 400) break; | 1135 | if (mode > 300 && mode < 400) break; |
883 | 1136 | ||
1137 | case 318: | ||
1138 | test_hash_speed("ghash-generic", sec, hash_speed_template_16); | ||
1139 | if (mode > 300 && mode < 400) break; | ||
1140 | |||
884 | case 399: | 1141 | case 399: |
885 | break; | 1142 | break; |
886 | 1143 | ||
1144 | case 400: | ||
1145 | /* fall through */ | ||
1146 | |||
1147 | case 401: | ||
1148 | test_ahash_speed("md4", sec, generic_hash_speed_template); | ||
1149 | if (mode > 400 && mode < 500) break; | ||
1150 | |||
1151 | case 402: | ||
1152 | test_ahash_speed("md5", sec, generic_hash_speed_template); | ||
1153 | if (mode > 400 && mode < 500) break; | ||
1154 | |||
1155 | case 403: | ||
1156 | test_ahash_speed("sha1", sec, generic_hash_speed_template); | ||
1157 | if (mode > 400 && mode < 500) break; | ||
1158 | |||
1159 | case 404: | ||
1160 | test_ahash_speed("sha256", sec, generic_hash_speed_template); | ||
1161 | if (mode > 400 && mode < 500) break; | ||
1162 | |||
1163 | case 405: | ||
1164 | test_ahash_speed("sha384", sec, generic_hash_speed_template); | ||
1165 | if (mode > 400 && mode < 500) break; | ||
1166 | |||
1167 | case 406: | ||
1168 | test_ahash_speed("sha512", sec, generic_hash_speed_template); | ||
1169 | if (mode > 400 && mode < 500) break; | ||
1170 | |||
1171 | case 407: | ||
1172 | test_ahash_speed("wp256", sec, generic_hash_speed_template); | ||
1173 | if (mode > 400 && mode < 500) break; | ||
1174 | |||
1175 | case 408: | ||
1176 | test_ahash_speed("wp384", sec, generic_hash_speed_template); | ||
1177 | if (mode > 400 && mode < 500) break; | ||
1178 | |||
1179 | case 409: | ||
1180 | test_ahash_speed("wp512", sec, generic_hash_speed_template); | ||
1181 | if (mode > 400 && mode < 500) break; | ||
1182 | |||
1183 | case 410: | ||
1184 | test_ahash_speed("tgr128", sec, generic_hash_speed_template); | ||
1185 | if (mode > 400 && mode < 500) break; | ||
1186 | |||
1187 | case 411: | ||
1188 | test_ahash_speed("tgr160", sec, generic_hash_speed_template); | ||
1189 | if (mode > 400 && mode < 500) break; | ||
1190 | |||
1191 | case 412: | ||
1192 | test_ahash_speed("tgr192", sec, generic_hash_speed_template); | ||
1193 | if (mode > 400 && mode < 500) break; | ||
1194 | |||
1195 | case 413: | ||
1196 | test_ahash_speed("sha224", sec, generic_hash_speed_template); | ||
1197 | if (mode > 400 && mode < 500) break; | ||
1198 | |||
1199 | case 414: | ||
1200 | test_ahash_speed("rmd128", sec, generic_hash_speed_template); | ||
1201 | if (mode > 400 && mode < 500) break; | ||
1202 | |||
1203 | case 415: | ||
1204 | test_ahash_speed("rmd160", sec, generic_hash_speed_template); | ||
1205 | if (mode > 400 && mode < 500) break; | ||
1206 | |||
1207 | case 416: | ||
1208 | test_ahash_speed("rmd256", sec, generic_hash_speed_template); | ||
1209 | if (mode > 400 && mode < 500) break; | ||
1210 | |||
1211 | case 417: | ||
1212 | test_ahash_speed("rmd320", sec, generic_hash_speed_template); | ||
1213 | if (mode > 400 && mode < 500) break; | ||
1214 | |||
1215 | case 499: | ||
1216 | break; | ||
1217 | |||
887 | case 1000: | 1218 | case 1000: |
888 | test_available(); | 1219 | test_available(); |
889 | break; | 1220 | break; |
diff --git a/crypto/tcrypt.h b/crypto/tcrypt.h index 966bbfaf95b1..10cb925132c9 100644 --- a/crypto/tcrypt.h +++ b/crypto/tcrypt.h | |||
@@ -25,6 +25,7 @@ struct cipher_speed_template { | |||
25 | struct hash_speed { | 25 | struct hash_speed { |
26 | unsigned int blen; /* buffer length */ | 26 | unsigned int blen; /* buffer length */ |
27 | unsigned int plen; /* per-update length */ | 27 | unsigned int plen; /* per-update length */ |
28 | unsigned int klen; /* key length */ | ||
28 | }; | 29 | }; |
29 | 30 | ||
30 | /* | 31 | /* |
@@ -83,4 +84,32 @@ static struct hash_speed generic_hash_speed_template[] = { | |||
83 | { .blen = 0, .plen = 0, } | 84 | { .blen = 0, .plen = 0, } |
84 | }; | 85 | }; |
85 | 86 | ||
87 | static struct hash_speed hash_speed_template_16[] = { | ||
88 | { .blen = 16, .plen = 16, .klen = 16, }, | ||
89 | { .blen = 64, .plen = 16, .klen = 16, }, | ||
90 | { .blen = 64, .plen = 64, .klen = 16, }, | ||
91 | { .blen = 256, .plen = 16, .klen = 16, }, | ||
92 | { .blen = 256, .plen = 64, .klen = 16, }, | ||
93 | { .blen = 256, .plen = 256, .klen = 16, }, | ||
94 | { .blen = 1024, .plen = 16, .klen = 16, }, | ||
95 | { .blen = 1024, .plen = 256, .klen = 16, }, | ||
96 | { .blen = 1024, .plen = 1024, .klen = 16, }, | ||
97 | { .blen = 2048, .plen = 16, .klen = 16, }, | ||
98 | { .blen = 2048, .plen = 256, .klen = 16, }, | ||
99 | { .blen = 2048, .plen = 1024, .klen = 16, }, | ||
100 | { .blen = 2048, .plen = 2048, .klen = 16, }, | ||
101 | { .blen = 4096, .plen = 16, .klen = 16, }, | ||
102 | { .blen = 4096, .plen = 256, .klen = 16, }, | ||
103 | { .blen = 4096, .plen = 1024, .klen = 16, }, | ||
104 | { .blen = 4096, .plen = 4096, .klen = 16, }, | ||
105 | { .blen = 8192, .plen = 16, .klen = 16, }, | ||
106 | { .blen = 8192, .plen = 256, .klen = 16, }, | ||
107 | { .blen = 8192, .plen = 1024, .klen = 16, }, | ||
108 | { .blen = 8192, .plen = 4096, .klen = 16, }, | ||
109 | { .blen = 8192, .plen = 8192, .klen = 16, }, | ||
110 | |||
111 | /* End marker */ | ||
112 | { .blen = 0, .plen = 0, .klen = 0, } | ||
113 | }; | ||
114 | |||
86 | #endif /* _CRYPTO_TCRYPT_H */ | 115 | #endif /* _CRYPTO_TCRYPT_H */ |
diff --git a/crypto/testmgr.c b/crypto/testmgr.c index c494d7610be1..abd980c729eb 100644 --- a/crypto/testmgr.c +++ b/crypto/testmgr.c | |||
@@ -22,6 +22,17 @@ | |||
22 | #include <crypto/rng.h> | 22 | #include <crypto/rng.h> |
23 | 23 | ||
24 | #include "internal.h" | 24 | #include "internal.h" |
25 | |||
26 | #ifndef CONFIG_CRYPTO_MANAGER_TESTS | ||
27 | |||
28 | /* a perfect nop */ | ||
29 | int alg_test(const char *driver, const char *alg, u32 type, u32 mask) | ||
30 | { | ||
31 | return 0; | ||
32 | } | ||
33 | |||
34 | #else | ||
35 | |||
25 | #include "testmgr.h" | 36 | #include "testmgr.h" |
26 | 37 | ||
27 | /* | 38 | /* |
@@ -153,8 +164,21 @@ static void testmgr_free_buf(char *buf[XBUFSIZE]) | |||
153 | free_page((unsigned long)buf[i]); | 164 | free_page((unsigned long)buf[i]); |
154 | } | 165 | } |
155 | 166 | ||
167 | static int do_one_async_hash_op(struct ahash_request *req, | ||
168 | struct tcrypt_result *tr, | ||
169 | int ret) | ||
170 | { | ||
171 | if (ret == -EINPROGRESS || ret == -EBUSY) { | ||
172 | ret = wait_for_completion_interruptible(&tr->completion); | ||
173 | if (!ret) | ||
174 | ret = tr->err; | ||
175 | INIT_COMPLETION(tr->completion); | ||
176 | } | ||
177 | return ret; | ||
178 | } | ||
179 | |||
156 | static int test_hash(struct crypto_ahash *tfm, struct hash_testvec *template, | 180 | static int test_hash(struct crypto_ahash *tfm, struct hash_testvec *template, |
157 | unsigned int tcount) | 181 | unsigned int tcount, bool use_digest) |
158 | { | 182 | { |
159 | const char *algo = crypto_tfm_alg_driver_name(crypto_ahash_tfm(tfm)); | 183 | const char *algo = crypto_tfm_alg_driver_name(crypto_ahash_tfm(tfm)); |
160 | unsigned int i, j, k, temp; | 184 | unsigned int i, j, k, temp; |
@@ -206,23 +230,36 @@ static int test_hash(struct crypto_ahash *tfm, struct hash_testvec *template, | |||
206 | } | 230 | } |
207 | 231 | ||
208 | ahash_request_set_crypt(req, sg, result, template[i].psize); | 232 | ahash_request_set_crypt(req, sg, result, template[i].psize); |
209 | ret = crypto_ahash_digest(req); | 233 | if (use_digest) { |
210 | switch (ret) { | 234 | ret = do_one_async_hash_op(req, &tresult, |
211 | case 0: | 235 | crypto_ahash_digest(req)); |
212 | break; | 236 | if (ret) { |
213 | case -EINPROGRESS: | 237 | pr_err("alg: hash: digest failed on test %d " |
214 | case -EBUSY: | 238 | "for %s: ret=%d\n", j, algo, -ret); |
215 | ret = wait_for_completion_interruptible( | 239 | goto out; |
216 | &tresult.completion); | 240 | } |
217 | if (!ret && !(ret = tresult.err)) { | 241 | } else { |
218 | INIT_COMPLETION(tresult.completion); | 242 | ret = do_one_async_hash_op(req, &tresult, |
219 | break; | 243 | crypto_ahash_init(req)); |
244 | if (ret) { | ||
245 | pr_err("alt: hash: init failed on test %d " | ||
246 | "for %s: ret=%d\n", j, algo, -ret); | ||
247 | goto out; | ||
248 | } | ||
249 | ret = do_one_async_hash_op(req, &tresult, | ||
250 | crypto_ahash_update(req)); | ||
251 | if (ret) { | ||
252 | pr_err("alt: hash: update failed on test %d " | ||
253 | "for %s: ret=%d\n", j, algo, -ret); | ||
254 | goto out; | ||
255 | } | ||
256 | ret = do_one_async_hash_op(req, &tresult, | ||
257 | crypto_ahash_final(req)); | ||
258 | if (ret) { | ||
259 | pr_err("alt: hash: final failed on test %d " | ||
260 | "for %s: ret=%d\n", j, algo, -ret); | ||
261 | goto out; | ||
220 | } | 262 | } |
221 | /* fall through */ | ||
222 | default: | ||
223 | printk(KERN_ERR "alg: hash: digest failed on test %d " | ||
224 | "for %s: ret=%d\n", j, algo, -ret); | ||
225 | goto out; | ||
226 | } | 263 | } |
227 | 264 | ||
228 | if (memcmp(result, template[i].digest, | 265 | if (memcmp(result, template[i].digest, |
@@ -1402,7 +1439,11 @@ static int alg_test_hash(const struct alg_test_desc *desc, const char *driver, | |||
1402 | return PTR_ERR(tfm); | 1439 | return PTR_ERR(tfm); |
1403 | } | 1440 | } |
1404 | 1441 | ||
1405 | err = test_hash(tfm, desc->suite.hash.vecs, desc->suite.hash.count); | 1442 | err = test_hash(tfm, desc->suite.hash.vecs, |
1443 | desc->suite.hash.count, true); | ||
1444 | if (!err) | ||
1445 | err = test_hash(tfm, desc->suite.hash.vecs, | ||
1446 | desc->suite.hash.count, false); | ||
1406 | 1447 | ||
1407 | crypto_free_ahash(tfm); | 1448 | crypto_free_ahash(tfm); |
1408 | return err; | 1449 | return err; |
@@ -2500,4 +2541,7 @@ notest: | |||
2500 | non_fips_alg: | 2541 | non_fips_alg: |
2501 | return -EINVAL; | 2542 | return -EINVAL; |
2502 | } | 2543 | } |
2544 | |||
2545 | #endif /* CONFIG_CRYPTO_MANAGER_TESTS */ | ||
2546 | |||
2503 | EXPORT_SYMBOL_GPL(alg_test); | 2547 | EXPORT_SYMBOL_GPL(alg_test); |
diff --git a/crypto/testmgr.h b/crypto/testmgr.h index fb765173d41c..74e35377fd30 100644 --- a/crypto/testmgr.h +++ b/crypto/testmgr.h | |||
@@ -1669,17 +1669,73 @@ static struct hash_testvec aes_xcbc128_tv_template[] = { | |||
1669 | } | 1669 | } |
1670 | }; | 1670 | }; |
1671 | 1671 | ||
1672 | #define VMAC_AES_TEST_VECTORS 1 | 1672 | #define VMAC_AES_TEST_VECTORS 8 |
1673 | static char vmac_string[128] = {'\x01', '\x01', '\x01', '\x01', | 1673 | static char vmac_string1[128] = {'\x01', '\x01', '\x01', '\x01', |
1674 | '\x02', '\x03', '\x02', '\x02', | 1674 | '\x02', '\x03', '\x02', '\x02', |
1675 | '\x02', '\x04', '\x01', '\x07', | 1675 | '\x02', '\x04', '\x01', '\x07', |
1676 | '\x04', '\x01', '\x04', '\x03',}; | 1676 | '\x04', '\x01', '\x04', '\x03',}; |
1677 | static char vmac_string2[128] = {'a', 'b', 'c',}; | ||
1678 | static char vmac_string3[128] = {'a', 'b', 'c', 'a', 'b', 'c', | ||
1679 | 'a', 'b', 'c', 'a', 'b', 'c', | ||
1680 | 'a', 'b', 'c', 'a', 'b', 'c', | ||
1681 | 'a', 'b', 'c', 'a', 'b', 'c', | ||
1682 | 'a', 'b', 'c', 'a', 'b', 'c', | ||
1683 | 'a', 'b', 'c', 'a', 'b', 'c', | ||
1684 | 'a', 'b', 'c', 'a', 'b', 'c', | ||
1685 | 'a', 'b', 'c', 'a', 'b', 'c', | ||
1686 | }; | ||
1687 | |||
1677 | static struct hash_testvec aes_vmac128_tv_template[] = { | 1688 | static struct hash_testvec aes_vmac128_tv_template[] = { |
1678 | { | 1689 | { |
1690 | .key = "\x00\x01\x02\x03\x04\x05\x06\x07" | ||
1691 | "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", | ||
1692 | .plaintext = NULL, | ||
1693 | .digest = "\x07\x58\x80\x35\x77\xa4\x7b\x54", | ||
1694 | .psize = 0, | ||
1695 | .ksize = 16, | ||
1696 | }, { | ||
1697 | .key = "\x00\x01\x02\x03\x04\x05\x06\x07" | ||
1698 | "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", | ||
1699 | .plaintext = vmac_string1, | ||
1700 | .digest = "\xce\xf5\x3c\xd3\xae\x68\x8c\xa1", | ||
1701 | .psize = 128, | ||
1702 | .ksize = 16, | ||
1703 | }, { | ||
1704 | .key = "\x00\x01\x02\x03\x04\x05\x06\x07" | ||
1705 | "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", | ||
1706 | .plaintext = vmac_string2, | ||
1707 | .digest = "\xc9\x27\xb0\x73\x81\xbd\x14\x2d", | ||
1708 | .psize = 128, | ||
1709 | .ksize = 16, | ||
1710 | }, { | ||
1679 | .key = "\x00\x01\x02\x03\x04\x05\x06\x07" | 1711 | .key = "\x00\x01\x02\x03\x04\x05\x06\x07" |
1680 | "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", | 1712 | "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", |
1681 | .plaintext = vmac_string, | 1713 | .plaintext = vmac_string3, |
1682 | .digest = "\xcb\xd7\x8a\xfd\xb7\x33\x79\xe7", | 1714 | .digest = "\x8d\x1a\x95\x8c\x98\x47\x0b\x19", |
1715 | .psize = 128, | ||
1716 | .ksize = 16, | ||
1717 | }, { | ||
1718 | .key = "abcdefghijklmnop", | ||
1719 | .plaintext = NULL, | ||
1720 | .digest = "\x3b\x89\xa1\x26\x9e\x55\x8f\x84", | ||
1721 | .psize = 0, | ||
1722 | .ksize = 16, | ||
1723 | }, { | ||
1724 | .key = "abcdefghijklmnop", | ||
1725 | .plaintext = vmac_string1, | ||
1726 | .digest = "\xab\x5e\xab\xb0\xf6\x8d\x74\xc2", | ||
1727 | .psize = 128, | ||
1728 | .ksize = 16, | ||
1729 | }, { | ||
1730 | .key = "abcdefghijklmnop", | ||
1731 | .plaintext = vmac_string2, | ||
1732 | .digest = "\x11\x15\x68\x42\x3d\x7b\x09\xdf", | ||
1733 | .psize = 128, | ||
1734 | .ksize = 16, | ||
1735 | }, { | ||
1736 | .key = "abcdefghijklmnop", | ||
1737 | .plaintext = vmac_string3, | ||
1738 | .digest = "\x8b\x32\x8f\xe1\xed\x8f\xfa\xd4", | ||
1683 | .psize = 128, | 1739 | .psize = 128, |
1684 | .ksize = 16, | 1740 | .ksize = 16, |
1685 | }, | 1741 | }, |
diff --git a/crypto/twofish.c b/crypto/twofish_generic.c index dfcda231f87a..1f07b843e07c 100644 --- a/crypto/twofish.c +++ b/crypto/twofish_generic.c | |||
@@ -212,3 +212,4 @@ module_exit(twofish_mod_fini); | |||
212 | 212 | ||
213 | MODULE_LICENSE("GPL"); | 213 | MODULE_LICENSE("GPL"); |
214 | MODULE_DESCRIPTION ("Twofish Cipher Algorithm"); | 214 | MODULE_DESCRIPTION ("Twofish Cipher Algorithm"); |
215 | MODULE_ALIAS("twofish"); | ||
diff --git a/crypto/vmac.c b/crypto/vmac.c index 0a9468e575de..0999274a27ac 100644 --- a/crypto/vmac.c +++ b/crypto/vmac.c | |||
@@ -43,6 +43,8 @@ const u64 m63 = UINT64_C(0x7fffffffffffffff); /* 63-bit mask */ | |||
43 | const u64 m64 = UINT64_C(0xffffffffffffffff); /* 64-bit mask */ | 43 | const u64 m64 = UINT64_C(0xffffffffffffffff); /* 64-bit mask */ |
44 | const u64 mpoly = UINT64_C(0x1fffffff1fffffff); /* Poly key mask */ | 44 | const u64 mpoly = UINT64_C(0x1fffffff1fffffff); /* Poly key mask */ |
45 | 45 | ||
46 | #define pe64_to_cpup le64_to_cpup /* Prefer little endian */ | ||
47 | |||
46 | #ifdef __LITTLE_ENDIAN | 48 | #ifdef __LITTLE_ENDIAN |
47 | #define INDEX_HIGH 1 | 49 | #define INDEX_HIGH 1 |
48 | #define INDEX_LOW 0 | 50 | #define INDEX_LOW 0 |
@@ -110,8 +112,8 @@ const u64 mpoly = UINT64_C(0x1fffffff1fffffff); /* Poly key mask */ | |||
110 | int i; u64 th, tl; \ | 112 | int i; u64 th, tl; \ |
111 | rh = rl = 0; \ | 113 | rh = rl = 0; \ |
112 | for (i = 0; i < nw; i += 2) { \ | 114 | for (i = 0; i < nw; i += 2) { \ |
113 | MUL64(th, tl, le64_to_cpup((mp)+i)+(kp)[i], \ | 115 | MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i], \ |
114 | le64_to_cpup((mp)+i+1)+(kp)[i+1]); \ | 116 | pe64_to_cpup((mp)+i+1)+(kp)[i+1]); \ |
115 | ADD128(rh, rl, th, tl); \ | 117 | ADD128(rh, rl, th, tl); \ |
116 | } \ | 118 | } \ |
117 | } while (0) | 119 | } while (0) |
@@ -121,11 +123,11 @@ const u64 mpoly = UINT64_C(0x1fffffff1fffffff); /* Poly key mask */ | |||
121 | int i; u64 th, tl; \ | 123 | int i; u64 th, tl; \ |
122 | rh1 = rl1 = rh = rl = 0; \ | 124 | rh1 = rl1 = rh = rl = 0; \ |
123 | for (i = 0; i < nw; i += 2) { \ | 125 | for (i = 0; i < nw; i += 2) { \ |
124 | MUL64(th, tl, le64_to_cpup((mp)+i)+(kp)[i], \ | 126 | MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i], \ |
125 | le64_to_cpup((mp)+i+1)+(kp)[i+1]); \ | 127 | pe64_to_cpup((mp)+i+1)+(kp)[i+1]); \ |
126 | ADD128(rh, rl, th, tl); \ | 128 | ADD128(rh, rl, th, tl); \ |
127 | MUL64(th, tl, le64_to_cpup((mp)+i)+(kp)[i+2], \ | 129 | MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i+2], \ |
128 | le64_to_cpup((mp)+i+1)+(kp)[i+3]); \ | 130 | pe64_to_cpup((mp)+i+1)+(kp)[i+3]); \ |
129 | ADD128(rh1, rl1, th, tl); \ | 131 | ADD128(rh1, rl1, th, tl); \ |
130 | } \ | 132 | } \ |
131 | } while (0) | 133 | } while (0) |
@@ -136,17 +138,17 @@ const u64 mpoly = UINT64_C(0x1fffffff1fffffff); /* Poly key mask */ | |||
136 | int i; u64 th, tl; \ | 138 | int i; u64 th, tl; \ |
137 | rh = rl = 0; \ | 139 | rh = rl = 0; \ |
138 | for (i = 0; i < nw; i += 8) { \ | 140 | for (i = 0; i < nw; i += 8) { \ |
139 | MUL64(th, tl, le64_to_cpup((mp)+i)+(kp)[i], \ | 141 | MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i], \ |
140 | le64_to_cpup((mp)+i+1)+(kp)[i+1]); \ | 142 | pe64_to_cpup((mp)+i+1)+(kp)[i+1]); \ |
141 | ADD128(rh, rl, th, tl); \ | 143 | ADD128(rh, rl, th, tl); \ |
142 | MUL64(th, tl, le64_to_cpup((mp)+i+2)+(kp)[i+2], \ | 144 | MUL64(th, tl, pe64_to_cpup((mp)+i+2)+(kp)[i+2], \ |
143 | le64_to_cpup((mp)+i+3)+(kp)[i+3]); \ | 145 | pe64_to_cpup((mp)+i+3)+(kp)[i+3]); \ |
144 | ADD128(rh, rl, th, tl); \ | 146 | ADD128(rh, rl, th, tl); \ |
145 | MUL64(th, tl, le64_to_cpup((mp)+i+4)+(kp)[i+4], \ | 147 | MUL64(th, tl, pe64_to_cpup((mp)+i+4)+(kp)[i+4], \ |
146 | le64_to_cpup((mp)+i+5)+(kp)[i+5]); \ | 148 | pe64_to_cpup((mp)+i+5)+(kp)[i+5]); \ |
147 | ADD128(rh, rl, th, tl); \ | 149 | ADD128(rh, rl, th, tl); \ |
148 | MUL64(th, tl, le64_to_cpup((mp)+i+6)+(kp)[i+6], \ | 150 | MUL64(th, tl, pe64_to_cpup((mp)+i+6)+(kp)[i+6], \ |
149 | le64_to_cpup((mp)+i+7)+(kp)[i+7]); \ | 151 | pe64_to_cpup((mp)+i+7)+(kp)[i+7]); \ |
150 | ADD128(rh, rl, th, tl); \ | 152 | ADD128(rh, rl, th, tl); \ |
151 | } \ | 153 | } \ |
152 | } while (0) | 154 | } while (0) |
@@ -156,29 +158,29 @@ const u64 mpoly = UINT64_C(0x1fffffff1fffffff); /* Poly key mask */ | |||
156 | int i; u64 th, tl; \ | 158 | int i; u64 th, tl; \ |
157 | rh1 = rl1 = rh = rl = 0; \ | 159 | rh1 = rl1 = rh = rl = 0; \ |
158 | for (i = 0; i < nw; i += 8) { \ | 160 | for (i = 0; i < nw; i += 8) { \ |
159 | MUL64(th, tl, le64_to_cpup((mp)+i)+(kp)[i], \ | 161 | MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i], \ |
160 | le64_to_cpup((mp)+i+1)+(kp)[i+1]); \ | 162 | pe64_to_cpup((mp)+i+1)+(kp)[i+1]); \ |
161 | ADD128(rh, rl, th, tl); \ | 163 | ADD128(rh, rl, th, tl); \ |
162 | MUL64(th, tl, le64_to_cpup((mp)+i)+(kp)[i+2], \ | 164 | MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i+2], \ |
163 | le64_to_cpup((mp)+i+1)+(kp)[i+3]); \ | 165 | pe64_to_cpup((mp)+i+1)+(kp)[i+3]); \ |
164 | ADD128(rh1, rl1, th, tl); \ | 166 | ADD128(rh1, rl1, th, tl); \ |
165 | MUL64(th, tl, le64_to_cpup((mp)+i+2)+(kp)[i+2], \ | 167 | MUL64(th, tl, pe64_to_cpup((mp)+i+2)+(kp)[i+2], \ |
166 | le64_to_cpup((mp)+i+3)+(kp)[i+3]); \ | 168 | pe64_to_cpup((mp)+i+3)+(kp)[i+3]); \ |
167 | ADD128(rh, rl, th, tl); \ | 169 | ADD128(rh, rl, th, tl); \ |
168 | MUL64(th, tl, le64_to_cpup((mp)+i+2)+(kp)[i+4], \ | 170 | MUL64(th, tl, pe64_to_cpup((mp)+i+2)+(kp)[i+4], \ |
169 | le64_to_cpup((mp)+i+3)+(kp)[i+5]); \ | 171 | pe64_to_cpup((mp)+i+3)+(kp)[i+5]); \ |
170 | ADD128(rh1, rl1, th, tl); \ | 172 | ADD128(rh1, rl1, th, tl); \ |
171 | MUL64(th, tl, le64_to_cpup((mp)+i+4)+(kp)[i+4], \ | 173 | MUL64(th, tl, pe64_to_cpup((mp)+i+4)+(kp)[i+4], \ |
172 | le64_to_cpup((mp)+i+5)+(kp)[i+5]); \ | 174 | pe64_to_cpup((mp)+i+5)+(kp)[i+5]); \ |
173 | ADD128(rh, rl, th, tl); \ | 175 | ADD128(rh, rl, th, tl); \ |
174 | MUL64(th, tl, le64_to_cpup((mp)+i+4)+(kp)[i+6], \ | 176 | MUL64(th, tl, pe64_to_cpup((mp)+i+4)+(kp)[i+6], \ |
175 | le64_to_cpup((mp)+i+5)+(kp)[i+7]); \ | 177 | pe64_to_cpup((mp)+i+5)+(kp)[i+7]); \ |
176 | ADD128(rh1, rl1, th, tl); \ | 178 | ADD128(rh1, rl1, th, tl); \ |
177 | MUL64(th, tl, le64_to_cpup((mp)+i+6)+(kp)[i+6], \ | 179 | MUL64(th, tl, pe64_to_cpup((mp)+i+6)+(kp)[i+6], \ |
178 | le64_to_cpup((mp)+i+7)+(kp)[i+7]); \ | 180 | pe64_to_cpup((mp)+i+7)+(kp)[i+7]); \ |
179 | ADD128(rh, rl, th, tl); \ | 181 | ADD128(rh, rl, th, tl); \ |
180 | MUL64(th, tl, le64_to_cpup((mp)+i+6)+(kp)[i+8], \ | 182 | MUL64(th, tl, pe64_to_cpup((mp)+i+6)+(kp)[i+8], \ |
181 | le64_to_cpup((mp)+i+7)+(kp)[i+9]); \ | 183 | pe64_to_cpup((mp)+i+7)+(kp)[i+9]); \ |
182 | ADD128(rh1, rl1, th, tl); \ | 184 | ADD128(rh1, rl1, th, tl); \ |
183 | } \ | 185 | } \ |
184 | } while (0) | 186 | } while (0) |
@@ -216,8 +218,8 @@ const u64 mpoly = UINT64_C(0x1fffffff1fffffff); /* Poly key mask */ | |||
216 | int i; \ | 218 | int i; \ |
217 | rh = rl = t = 0; \ | 219 | rh = rl = t = 0; \ |
218 | for (i = 0; i < nw; i += 2) { \ | 220 | for (i = 0; i < nw; i += 2) { \ |
219 | t1 = le64_to_cpup(mp+i) + kp[i]; \ | 221 | t1 = pe64_to_cpup(mp+i) + kp[i]; \ |
220 | t2 = le64_to_cpup(mp+i+1) + kp[i+1]; \ | 222 | t2 = pe64_to_cpup(mp+i+1) + kp[i+1]; \ |
221 | m2 = MUL32(t1 >> 32, t2); \ | 223 | m2 = MUL32(t1 >> 32, t2); \ |
222 | m1 = MUL32(t1, t2 >> 32); \ | 224 | m1 = MUL32(t1, t2 >> 32); \ |
223 | ADD128(rh, rl, MUL32(t1 >> 32, t2 >> 32), \ | 225 | ADD128(rh, rl, MUL32(t1 >> 32, t2 >> 32), \ |
@@ -322,8 +324,7 @@ static void vhash_abort(struct vmac_ctx *ctx) | |||
322 | ctx->first_block_processed = 0; | 324 | ctx->first_block_processed = 0; |
323 | } | 325 | } |
324 | 326 | ||
325 | static u64 l3hash(u64 p1, u64 p2, | 327 | static u64 l3hash(u64 p1, u64 p2, u64 k1, u64 k2, u64 len) |
326 | u64 k1, u64 k2, u64 len) | ||
327 | { | 328 | { |
328 | u64 rh, rl, t, z = 0; | 329 | u64 rh, rl, t, z = 0; |
329 | 330 | ||
@@ -474,7 +475,7 @@ static u64 vmac(unsigned char m[], unsigned int mbytes, | |||
474 | } | 475 | } |
475 | p = be64_to_cpup(out_p + i); | 476 | p = be64_to_cpup(out_p + i); |
476 | h = vhash(m, mbytes, (u64 *)0, &ctx->__vmac_ctx); | 477 | h = vhash(m, mbytes, (u64 *)0, &ctx->__vmac_ctx); |
477 | return p + h; | 478 | return le64_to_cpu(p + h); |
478 | } | 479 | } |
479 | 480 | ||
480 | static int vmac_set_key(unsigned char user_key[], struct vmac_ctx_t *ctx) | 481 | static int vmac_set_key(unsigned char user_key[], struct vmac_ctx_t *ctx) |
@@ -549,10 +550,6 @@ static int vmac_setkey(struct crypto_shash *parent, | |||
549 | 550 | ||
550 | static int vmac_init(struct shash_desc *pdesc) | 551 | static int vmac_init(struct shash_desc *pdesc) |
551 | { | 552 | { |
552 | struct crypto_shash *parent = pdesc->tfm; | ||
553 | struct vmac_ctx_t *ctx = crypto_shash_ctx(parent); | ||
554 | |||
555 | memset(&ctx->__vmac_ctx, 0, sizeof(struct vmac_ctx)); | ||
556 | return 0; | 553 | return 0; |
557 | } | 554 | } |
558 | 555 | ||
diff --git a/crypto/xts.c b/crypto/xts.c index d87b0f3102c3..555ecaab1e54 100644 --- a/crypto/xts.c +++ b/crypto/xts.c | |||
@@ -224,7 +224,7 @@ static struct crypto_instance *alloc(struct rtattr **tb) | |||
224 | alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER, | 224 | alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER, |
225 | CRYPTO_ALG_TYPE_MASK); | 225 | CRYPTO_ALG_TYPE_MASK); |
226 | if (IS_ERR(alg)) | 226 | if (IS_ERR(alg)) |
227 | return ERR_PTR(PTR_ERR(alg)); | 227 | return ERR_CAST(alg); |
228 | 228 | ||
229 | inst = crypto_alloc_instance("xts", alg); | 229 | inst = crypto_alloc_instance("xts", alg); |
230 | if (IS_ERR(inst)) | 230 | if (IS_ERR(inst)) |