diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-01-23 21:11:00 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-01-23 21:11:00 -0500 |
commit | 13c789a6b219aa23f917466c7e630566106b14c2 (patch) | |
tree | ad9e096ded01f433306bcd40af3a3f8dc1ddea6f /crypto | |
parent | 6dd9158ae8577372aa433e6b0eae3c3d4caa5439 (diff) | |
parent | 79ba451d66ca8402c8d052ceb50e359ddc5e1161 (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto update from Herbert Xu:
"Here is the crypto update for 3.14:
- Improved crypto_memneq helper
- Use cyprto_memneq in arch-specific crypto code
- Replaced orphaned DCP driver with Freescale MXS DCP driver
- Added AVX/AVX2 version of AESNI-GCM encode and decode
- Added AMD Cryptographic Coprocessor (CCP) driver
- Misc fixes"
* git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (41 commits)
crypto: aesni - fix build on x86 (32bit)
crypto: mxs - Fix sparse non static symbol warning
crypto: ccp - CCP device enabled/disabled changes
crypto: ccp - Cleanup hash invocation calls
crypto: ccp - Change data length declarations to u64
crypto: ccp - Check for caller result area before using it
crypto: ccp - Cleanup scatterlist usage
crypto: ccp - Apply appropriate gfp_t type to memory allocations
crypto: drivers - Sort drivers/crypto/Makefile
ARM: mxs: dts: Enable DCP for MXS
crypto: mxs - Add Freescale MXS DCP driver
crypto: mxs - Remove the old DCP driver
crypto: ahash - Fully restore ahash request before completing
crypto: aesni - fix build on x86 (32bit)
crypto: talitos - Remove redundant dev_set_drvdata
crypto: ccp - Remove redundant dev_set_drvdata
crypto: crypto4xx - Remove redundant dev_set_drvdata
crypto: caam - simplify and harden key parsing
crypto: omap-sham - Fix Polling mode for larger blocks
crypto: tcrypt - Added speed tests for AEAD crypto alogrithms in tcrypt test suite
...
Diffstat (limited to 'crypto')
-rw-r--r-- | crypto/Makefile | 5 | ||||
-rw-r--r-- | crypto/ahash.c | 5 | ||||
-rw-r--r-- | crypto/memneq.c | 80 | ||||
-rw-r--r-- | crypto/pcrypt.c | 2 | ||||
-rw-r--r-- | crypto/tcrypt.c | 270 | ||||
-rw-r--r-- | crypto/tcrypt.h | 10 |
6 files changed, 340 insertions, 32 deletions
diff --git a/crypto/Makefile b/crypto/Makefile index 989c510da8cc..b29402a7b9b5 100644 --- a/crypto/Makefile +++ b/crypto/Makefile | |||
@@ -2,11 +2,6 @@ | |||
2 | # Cryptographic API | 2 | # Cryptographic API |
3 | # | 3 | # |
4 | 4 | ||
5 | # memneq MUST be built with -Os or -O0 to prevent early-return optimizations | ||
6 | # that will defeat memneq's actual purpose to prevent timing attacks. | ||
7 | CFLAGS_REMOVE_memneq.o := -O1 -O2 -O3 | ||
8 | CFLAGS_memneq.o := -Os | ||
9 | |||
10 | obj-$(CONFIG_CRYPTO) += crypto.o | 5 | obj-$(CONFIG_CRYPTO) += crypto.o |
11 | crypto-y := api.o cipher.o compress.o memneq.o | 6 | crypto-y := api.o cipher.o compress.o memneq.o |
12 | 7 | ||
diff --git a/crypto/ahash.c b/crypto/ahash.c index 793a27f2493e..a92dc382f781 100644 --- a/crypto/ahash.c +++ b/crypto/ahash.c | |||
@@ -213,7 +213,10 @@ static void ahash_op_unaligned_done(struct crypto_async_request *req, int err) | |||
213 | 213 | ||
214 | ahash_op_unaligned_finish(areq, err); | 214 | ahash_op_unaligned_finish(areq, err); |
215 | 215 | ||
216 | complete(data, err); | 216 | areq->base.complete = complete; |
217 | areq->base.data = data; | ||
218 | |||
219 | complete(&areq->base, err); | ||
217 | } | 220 | } |
218 | 221 | ||
219 | static int ahash_op_unaligned(struct ahash_request *req, | 222 | static int ahash_op_unaligned(struct ahash_request *req, |
diff --git a/crypto/memneq.c b/crypto/memneq.c index cd0162221c14..afed1bd16aee 100644 --- a/crypto/memneq.c +++ b/crypto/memneq.c | |||
@@ -72,6 +72,7 @@ __crypto_memneq_generic(const void *a, const void *b, size_t size) | |||
72 | #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) | 72 | #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) |
73 | while (size >= sizeof(unsigned long)) { | 73 | while (size >= sizeof(unsigned long)) { |
74 | neq |= *(unsigned long *)a ^ *(unsigned long *)b; | 74 | neq |= *(unsigned long *)a ^ *(unsigned long *)b; |
75 | OPTIMIZER_HIDE_VAR(neq); | ||
75 | a += sizeof(unsigned long); | 76 | a += sizeof(unsigned long); |
76 | b += sizeof(unsigned long); | 77 | b += sizeof(unsigned long); |
77 | size -= sizeof(unsigned long); | 78 | size -= sizeof(unsigned long); |
@@ -79,6 +80,7 @@ __crypto_memneq_generic(const void *a, const void *b, size_t size) | |||
79 | #endif /* CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS */ | 80 | #endif /* CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS */ |
80 | while (size > 0) { | 81 | while (size > 0) { |
81 | neq |= *(unsigned char *)a ^ *(unsigned char *)b; | 82 | neq |= *(unsigned char *)a ^ *(unsigned char *)b; |
83 | OPTIMIZER_HIDE_VAR(neq); | ||
82 | a += 1; | 84 | a += 1; |
83 | b += 1; | 85 | b += 1; |
84 | size -= 1; | 86 | size -= 1; |
@@ -89,33 +91,61 @@ __crypto_memneq_generic(const void *a, const void *b, size_t size) | |||
89 | /* Loop-free fast-path for frequently used 16-byte size */ | 91 | /* Loop-free fast-path for frequently used 16-byte size */ |
90 | static inline unsigned long __crypto_memneq_16(const void *a, const void *b) | 92 | static inline unsigned long __crypto_memneq_16(const void *a, const void *b) |
91 | { | 93 | { |
94 | unsigned long neq = 0; | ||
95 | |||
92 | #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS | 96 | #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS |
93 | if (sizeof(unsigned long) == 8) | 97 | if (sizeof(unsigned long) == 8) { |
94 | return ((*(unsigned long *)(a) ^ *(unsigned long *)(b)) | 98 | neq |= *(unsigned long *)(a) ^ *(unsigned long *)(b); |
95 | | (*(unsigned long *)(a+8) ^ *(unsigned long *)(b+8))); | 99 | OPTIMIZER_HIDE_VAR(neq); |
96 | else if (sizeof(unsigned int) == 4) | 100 | neq |= *(unsigned long *)(a+8) ^ *(unsigned long *)(b+8); |
97 | return ((*(unsigned int *)(a) ^ *(unsigned int *)(b)) | 101 | OPTIMIZER_HIDE_VAR(neq); |
98 | | (*(unsigned int *)(a+4) ^ *(unsigned int *)(b+4)) | 102 | } else if (sizeof(unsigned int) == 4) { |
99 | | (*(unsigned int *)(a+8) ^ *(unsigned int *)(b+8)) | 103 | neq |= *(unsigned int *)(a) ^ *(unsigned int *)(b); |
100 | | (*(unsigned int *)(a+12) ^ *(unsigned int *)(b+12))); | 104 | OPTIMIZER_HIDE_VAR(neq); |
101 | else | 105 | neq |= *(unsigned int *)(a+4) ^ *(unsigned int *)(b+4); |
106 | OPTIMIZER_HIDE_VAR(neq); | ||
107 | neq |= *(unsigned int *)(a+8) ^ *(unsigned int *)(b+8); | ||
108 | OPTIMIZER_HIDE_VAR(neq); | ||
109 | neq |= *(unsigned int *)(a+12) ^ *(unsigned int *)(b+12); | ||
110 | OPTIMIZER_HIDE_VAR(neq); | ||
111 | } else | ||
102 | #endif /* CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS */ | 112 | #endif /* CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS */ |
103 | return ((*(unsigned char *)(a) ^ *(unsigned char *)(b)) | 113 | { |
104 | | (*(unsigned char *)(a+1) ^ *(unsigned char *)(b+1)) | 114 | neq |= *(unsigned char *)(a) ^ *(unsigned char *)(b); |
105 | | (*(unsigned char *)(a+2) ^ *(unsigned char *)(b+2)) | 115 | OPTIMIZER_HIDE_VAR(neq); |
106 | | (*(unsigned char *)(a+3) ^ *(unsigned char *)(b+3)) | 116 | neq |= *(unsigned char *)(a+1) ^ *(unsigned char *)(b+1); |
107 | | (*(unsigned char *)(a+4) ^ *(unsigned char *)(b+4)) | 117 | OPTIMIZER_HIDE_VAR(neq); |
108 | | (*(unsigned char *)(a+5) ^ *(unsigned char *)(b+5)) | 118 | neq |= *(unsigned char *)(a+2) ^ *(unsigned char *)(b+2); |
109 | | (*(unsigned char *)(a+6) ^ *(unsigned char *)(b+6)) | 119 | OPTIMIZER_HIDE_VAR(neq); |
110 | | (*(unsigned char *)(a+7) ^ *(unsigned char *)(b+7)) | 120 | neq |= *(unsigned char *)(a+3) ^ *(unsigned char *)(b+3); |
111 | | (*(unsigned char *)(a+8) ^ *(unsigned char *)(b+8)) | 121 | OPTIMIZER_HIDE_VAR(neq); |
112 | | (*(unsigned char *)(a+9) ^ *(unsigned char *)(b+9)) | 122 | neq |= *(unsigned char *)(a+4) ^ *(unsigned char *)(b+4); |
113 | | (*(unsigned char *)(a+10) ^ *(unsigned char *)(b+10)) | 123 | OPTIMIZER_HIDE_VAR(neq); |
114 | | (*(unsigned char *)(a+11) ^ *(unsigned char *)(b+11)) | 124 | neq |= *(unsigned char *)(a+5) ^ *(unsigned char *)(b+5); |
115 | | (*(unsigned char *)(a+12) ^ *(unsigned char *)(b+12)) | 125 | OPTIMIZER_HIDE_VAR(neq); |
116 | | (*(unsigned char *)(a+13) ^ *(unsigned char *)(b+13)) | 126 | neq |= *(unsigned char *)(a+6) ^ *(unsigned char *)(b+6); |
117 | | (*(unsigned char *)(a+14) ^ *(unsigned char *)(b+14)) | 127 | OPTIMIZER_HIDE_VAR(neq); |
118 | | (*(unsigned char *)(a+15) ^ *(unsigned char *)(b+15))); | 128 | neq |= *(unsigned char *)(a+7) ^ *(unsigned char *)(b+7); |
129 | OPTIMIZER_HIDE_VAR(neq); | ||
130 | neq |= *(unsigned char *)(a+8) ^ *(unsigned char *)(b+8); | ||
131 | OPTIMIZER_HIDE_VAR(neq); | ||
132 | neq |= *(unsigned char *)(a+9) ^ *(unsigned char *)(b+9); | ||
133 | OPTIMIZER_HIDE_VAR(neq); | ||
134 | neq |= *(unsigned char *)(a+10) ^ *(unsigned char *)(b+10); | ||
135 | OPTIMIZER_HIDE_VAR(neq); | ||
136 | neq |= *(unsigned char *)(a+11) ^ *(unsigned char *)(b+11); | ||
137 | OPTIMIZER_HIDE_VAR(neq); | ||
138 | neq |= *(unsigned char *)(a+12) ^ *(unsigned char *)(b+12); | ||
139 | OPTIMIZER_HIDE_VAR(neq); | ||
140 | neq |= *(unsigned char *)(a+13) ^ *(unsigned char *)(b+13); | ||
141 | OPTIMIZER_HIDE_VAR(neq); | ||
142 | neq |= *(unsigned char *)(a+14) ^ *(unsigned char *)(b+14); | ||
143 | OPTIMIZER_HIDE_VAR(neq); | ||
144 | neq |= *(unsigned char *)(a+15) ^ *(unsigned char *)(b+15); | ||
145 | OPTIMIZER_HIDE_VAR(neq); | ||
146 | } | ||
147 | |||
148 | return neq; | ||
119 | } | 149 | } |
120 | 150 | ||
121 | /* Compare two areas of memory without leaking timing information, | 151 | /* Compare two areas of memory without leaking timing information, |
diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c index f8c920cafe63..309d345ead95 100644 --- a/crypto/pcrypt.c +++ b/crypto/pcrypt.c | |||
@@ -78,7 +78,7 @@ static int pcrypt_do_parallel(struct padata_priv *padata, unsigned int *cb_cpu, | |||
78 | cpu = *cb_cpu; | 78 | cpu = *cb_cpu; |
79 | 79 | ||
80 | rcu_read_lock_bh(); | 80 | rcu_read_lock_bh(); |
81 | cpumask = rcu_dereference(pcrypt->cb_cpumask); | 81 | cpumask = rcu_dereference_bh(pcrypt->cb_cpumask); |
82 | if (cpumask_test_cpu(cpu, cpumask->mask)) | 82 | if (cpumask_test_cpu(cpu, cpumask->mask)) |
83 | goto out; | 83 | goto out; |
84 | 84 | ||
diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c index 001f07cdb828..0d9003ae8c61 100644 --- a/crypto/tcrypt.c +++ b/crypto/tcrypt.c | |||
@@ -137,7 +137,272 @@ out: | |||
137 | return ret; | 137 | return ret; |
138 | } | 138 | } |
139 | 139 | ||
140 | static int test_aead_jiffies(struct aead_request *req, int enc, | ||
141 | int blen, int sec) | ||
142 | { | ||
143 | unsigned long start, end; | ||
144 | int bcount; | ||
145 | int ret; | ||
146 | |||
147 | for (start = jiffies, end = start + sec * HZ, bcount = 0; | ||
148 | time_before(jiffies, end); bcount++) { | ||
149 | if (enc) | ||
150 | ret = crypto_aead_encrypt(req); | ||
151 | else | ||
152 | ret = crypto_aead_decrypt(req); | ||
153 | |||
154 | if (ret) | ||
155 | return ret; | ||
156 | } | ||
157 | |||
158 | printk("%d operations in %d seconds (%ld bytes)\n", | ||
159 | bcount, sec, (long)bcount * blen); | ||
160 | return 0; | ||
161 | } | ||
162 | |||
163 | static int test_aead_cycles(struct aead_request *req, int enc, int blen) | ||
164 | { | ||
165 | unsigned long cycles = 0; | ||
166 | int ret = 0; | ||
167 | int i; | ||
168 | |||
169 | local_irq_disable(); | ||
170 | |||
171 | /* Warm-up run. */ | ||
172 | for (i = 0; i < 4; i++) { | ||
173 | if (enc) | ||
174 | ret = crypto_aead_encrypt(req); | ||
175 | else | ||
176 | ret = crypto_aead_decrypt(req); | ||
177 | |||
178 | if (ret) | ||
179 | goto out; | ||
180 | } | ||
181 | |||
182 | /* The real thing. */ | ||
183 | for (i = 0; i < 8; i++) { | ||
184 | cycles_t start, end; | ||
185 | |||
186 | start = get_cycles(); | ||
187 | if (enc) | ||
188 | ret = crypto_aead_encrypt(req); | ||
189 | else | ||
190 | ret = crypto_aead_decrypt(req); | ||
191 | end = get_cycles(); | ||
192 | |||
193 | if (ret) | ||
194 | goto out; | ||
195 | |||
196 | cycles += end - start; | ||
197 | } | ||
198 | |||
199 | out: | ||
200 | local_irq_enable(); | ||
201 | |||
202 | if (ret == 0) | ||
203 | printk("1 operation in %lu cycles (%d bytes)\n", | ||
204 | (cycles + 4) / 8, blen); | ||
205 | |||
206 | return ret; | ||
207 | } | ||
208 | |||
140 | static u32 block_sizes[] = { 16, 64, 256, 1024, 8192, 0 }; | 209 | static u32 block_sizes[] = { 16, 64, 256, 1024, 8192, 0 }; |
210 | static u32 aead_sizes[] = { 16, 64, 256, 512, 1024, 2048, 4096, 8192, 0 }; | ||
211 | |||
212 | #define XBUFSIZE 8 | ||
213 | #define MAX_IVLEN 32 | ||
214 | |||
215 | static int testmgr_alloc_buf(char *buf[XBUFSIZE]) | ||
216 | { | ||
217 | int i; | ||
218 | |||
219 | for (i = 0; i < XBUFSIZE; i++) { | ||
220 | buf[i] = (void *)__get_free_page(GFP_KERNEL); | ||
221 | if (!buf[i]) | ||
222 | goto err_free_buf; | ||
223 | } | ||
224 | |||
225 | return 0; | ||
226 | |||
227 | err_free_buf: | ||
228 | while (i-- > 0) | ||
229 | free_page((unsigned long)buf[i]); | ||
230 | |||
231 | return -ENOMEM; | ||
232 | } | ||
233 | |||
234 | static void testmgr_free_buf(char *buf[XBUFSIZE]) | ||
235 | { | ||
236 | int i; | ||
237 | |||
238 | for (i = 0; i < XBUFSIZE; i++) | ||
239 | free_page((unsigned long)buf[i]); | ||
240 | } | ||
241 | |||
242 | static void sg_init_aead(struct scatterlist *sg, char *xbuf[XBUFSIZE], | ||
243 | unsigned int buflen) | ||
244 | { | ||
245 | int np = (buflen + PAGE_SIZE - 1)/PAGE_SIZE; | ||
246 | int k, rem; | ||
247 | |||
248 | np = (np > XBUFSIZE) ? XBUFSIZE : np; | ||
249 | rem = buflen % PAGE_SIZE; | ||
250 | if (np > XBUFSIZE) { | ||
251 | rem = PAGE_SIZE; | ||
252 | np = XBUFSIZE; | ||
253 | } | ||
254 | sg_init_table(sg, np); | ||
255 | for (k = 0; k < np; ++k) { | ||
256 | if (k == (np-1)) | ||
257 | sg_set_buf(&sg[k], xbuf[k], rem); | ||
258 | else | ||
259 | sg_set_buf(&sg[k], xbuf[k], PAGE_SIZE); | ||
260 | } | ||
261 | } | ||
262 | |||
263 | static void test_aead_speed(const char *algo, int enc, unsigned int sec, | ||
264 | struct aead_speed_template *template, | ||
265 | unsigned int tcount, u8 authsize, | ||
266 | unsigned int aad_size, u8 *keysize) | ||
267 | { | ||
268 | unsigned int i, j; | ||
269 | struct crypto_aead *tfm; | ||
270 | int ret = -ENOMEM; | ||
271 | const char *key; | ||
272 | struct aead_request *req; | ||
273 | struct scatterlist *sg; | ||
274 | struct scatterlist *asg; | ||
275 | struct scatterlist *sgout; | ||
276 | const char *e; | ||
277 | void *assoc; | ||
278 | char iv[MAX_IVLEN]; | ||
279 | char *xbuf[XBUFSIZE]; | ||
280 | char *xoutbuf[XBUFSIZE]; | ||
281 | char *axbuf[XBUFSIZE]; | ||
282 | unsigned int *b_size; | ||
283 | unsigned int iv_len; | ||
284 | |||
285 | if (enc == ENCRYPT) | ||
286 | e = "encryption"; | ||
287 | else | ||
288 | e = "decryption"; | ||
289 | |||
290 | if (testmgr_alloc_buf(xbuf)) | ||
291 | goto out_noxbuf; | ||
292 | if (testmgr_alloc_buf(axbuf)) | ||
293 | goto out_noaxbuf; | ||
294 | if (testmgr_alloc_buf(xoutbuf)) | ||
295 | goto out_nooutbuf; | ||
296 | |||
297 | sg = kmalloc(sizeof(*sg) * 8 * 3, GFP_KERNEL); | ||
298 | if (!sg) | ||
299 | goto out_nosg; | ||
300 | asg = &sg[8]; | ||
301 | sgout = &asg[8]; | ||
302 | |||
303 | |||
304 | printk(KERN_INFO "\ntesting speed of %s %s\n", algo, e); | ||
305 | |||
306 | tfm = crypto_alloc_aead(algo, 0, 0); | ||
307 | |||
308 | if (IS_ERR(tfm)) { | ||
309 | pr_err("alg: aead: Failed to load transform for %s: %ld\n", algo, | ||
310 | PTR_ERR(tfm)); | ||
311 | return; | ||
312 | } | ||
313 | |||
314 | req = aead_request_alloc(tfm, GFP_KERNEL); | ||
315 | if (!req) { | ||
316 | pr_err("alg: aead: Failed to allocate request for %s\n", | ||
317 | algo); | ||
318 | goto out; | ||
319 | } | ||
320 | |||
321 | i = 0; | ||
322 | do { | ||
323 | b_size = aead_sizes; | ||
324 | do { | ||
325 | assoc = axbuf[0]; | ||
326 | |||
327 | if (aad_size < PAGE_SIZE) | ||
328 | memset(assoc, 0xff, aad_size); | ||
329 | else { | ||
330 | pr_err("associate data length (%u) too big\n", | ||
331 | aad_size); | ||
332 | goto out_nosg; | ||
333 | } | ||
334 | sg_init_one(&asg[0], assoc, aad_size); | ||
335 | |||
336 | if ((*keysize + *b_size) > TVMEMSIZE * PAGE_SIZE) { | ||
337 | pr_err("template (%u) too big for tvmem (%lu)\n", | ||
338 | *keysize + *b_size, | ||
339 | TVMEMSIZE * PAGE_SIZE); | ||
340 | goto out; | ||
341 | } | ||
342 | |||
343 | key = tvmem[0]; | ||
344 | for (j = 0; j < tcount; j++) { | ||
345 | if (template[j].klen == *keysize) { | ||
346 | key = template[j].key; | ||
347 | break; | ||
348 | } | ||
349 | } | ||
350 | ret = crypto_aead_setkey(tfm, key, *keysize); | ||
351 | ret = crypto_aead_setauthsize(tfm, authsize); | ||
352 | |||
353 | iv_len = crypto_aead_ivsize(tfm); | ||
354 | if (iv_len) | ||
355 | memset(&iv, 0xff, iv_len); | ||
356 | |||
357 | crypto_aead_clear_flags(tfm, ~0); | ||
358 | printk(KERN_INFO "test %u (%d bit key, %d byte blocks): ", | ||
359 | i, *keysize * 8, *b_size); | ||
360 | |||
361 | |||
362 | memset(tvmem[0], 0xff, PAGE_SIZE); | ||
363 | |||
364 | if (ret) { | ||
365 | pr_err("setkey() failed flags=%x\n", | ||
366 | crypto_aead_get_flags(tfm)); | ||
367 | goto out; | ||
368 | } | ||
369 | |||
370 | sg_init_aead(&sg[0], xbuf, | ||
371 | *b_size + (enc ? authsize : 0)); | ||
372 | |||
373 | sg_init_aead(&sgout[0], xoutbuf, | ||
374 | *b_size + (enc ? authsize : 0)); | ||
375 | |||
376 | aead_request_set_crypt(req, sg, sgout, *b_size, iv); | ||
377 | aead_request_set_assoc(req, asg, aad_size); | ||
378 | |||
379 | if (sec) | ||
380 | ret = test_aead_jiffies(req, enc, *b_size, sec); | ||
381 | else | ||
382 | ret = test_aead_cycles(req, enc, *b_size); | ||
383 | |||
384 | if (ret) { | ||
385 | pr_err("%s() failed return code=%d\n", e, ret); | ||
386 | break; | ||
387 | } | ||
388 | b_size++; | ||
389 | i++; | ||
390 | } while (*b_size); | ||
391 | keysize++; | ||
392 | } while (*keysize); | ||
393 | |||
394 | out: | ||
395 | crypto_free_aead(tfm); | ||
396 | kfree(sg); | ||
397 | out_nosg: | ||
398 | testmgr_free_buf(xoutbuf); | ||
399 | out_nooutbuf: | ||
400 | testmgr_free_buf(axbuf); | ||
401 | out_noaxbuf: | ||
402 | testmgr_free_buf(xbuf); | ||
403 | out_noxbuf: | ||
404 | return; | ||
405 | } | ||
141 | 406 | ||
142 | static void test_cipher_speed(const char *algo, int enc, unsigned int sec, | 407 | static void test_cipher_speed(const char *algo, int enc, unsigned int sec, |
143 | struct cipher_speed_template *template, | 408 | struct cipher_speed_template *template, |
@@ -1427,6 +1692,11 @@ static int do_test(int m) | |||
1427 | speed_template_32_64); | 1692 | speed_template_32_64); |
1428 | break; | 1693 | break; |
1429 | 1694 | ||
1695 | case 211: | ||
1696 | test_aead_speed("rfc4106(gcm(aes))", ENCRYPT, sec, | ||
1697 | NULL, 0, 16, 8, aead_speed_template_20); | ||
1698 | break; | ||
1699 | |||
1430 | case 300: | 1700 | case 300: |
1431 | /* fall through */ | 1701 | /* fall through */ |
1432 | 1702 | ||
diff --git a/crypto/tcrypt.h b/crypto/tcrypt.h index ecdeeb1a7b05..6c7e21a09f78 100644 --- a/crypto/tcrypt.h +++ b/crypto/tcrypt.h | |||
@@ -22,6 +22,11 @@ struct cipher_speed_template { | |||
22 | unsigned int klen; | 22 | unsigned int klen; |
23 | }; | 23 | }; |
24 | 24 | ||
25 | struct aead_speed_template { | ||
26 | const char *key; | ||
27 | unsigned int klen; | ||
28 | }; | ||
29 | |||
25 | struct hash_speed { | 30 | struct hash_speed { |
26 | unsigned int blen; /* buffer length */ | 31 | unsigned int blen; /* buffer length */ |
27 | unsigned int plen; /* per-update length */ | 32 | unsigned int plen; /* per-update length */ |
@@ -58,6 +63,11 @@ static u8 speed_template_32_48_64[] = {32, 48, 64, 0}; | |||
58 | static u8 speed_template_32_64[] = {32, 64, 0}; | 63 | static u8 speed_template_32_64[] = {32, 64, 0}; |
59 | 64 | ||
60 | /* | 65 | /* |
66 | * AEAD speed tests | ||
67 | */ | ||
68 | static u8 aead_speed_template_20[] = {20, 0}; | ||
69 | |||
70 | /* | ||
61 | * Digest speed tests | 71 | * Digest speed tests |
62 | */ | 72 | */ |
63 | static struct hash_speed generic_hash_speed_template[] = { | 73 | static struct hash_speed generic_hash_speed_template[] = { |