summaryrefslogtreecommitdiffstats
path: root/crypto/testmgr.c
diff options
context:
space:
mode:
authorEric Biggers <ebiggers@google.com>2019-02-01 02:51:43 -0500
committerHerbert Xu <herbert@gondor.apana.org.au>2019-02-08 02:30:09 -0500
commit3f47a03df6e81174558f4604828851cb600e1db6 (patch)
tree6a7ffff34088f7ffd7a0d903147d857cd85eca35 /crypto/testmgr.c
parent12455e320e19e9cc7ad97f4ab89c280fe297387c (diff)
crypto: testmgr - add testvec_config struct and helper functions
Crypto algorithms must produce the same output for the same input regardless of data layout, i.e. how the src and dst scatterlists are divided into chunks and how each chunk is aligned. Request flags such as CRYPTO_TFM_REQ_MAY_SLEEP must not affect the result either. However, testing of this currently has many gaps. For example, individual algorithms are responsible for providing their own chunked test vectors. But many don't bother to do this or test only one or two cases, providing poor test coverage. Also, other things such as misaligned IVs and CRYPTO_TFM_REQ_MAY_SLEEP are never tested at all. Test code is also duplicated between the chunked and non-chunked cases, making it difficult to make other improvements. To improve the situation, this patch series basically moves the chunk descriptions into the testmgr itself so that they are shared by all algorithms. However, it's done in an extensible way via a new struct 'testvec_config', which describes not just the scaled chunk lengths but also all other aspects of the crypto operation besides the data itself such as the buffer alignments, the request flags, whether the operation is in-place or not, the IV alignment, and for hash algorithms when to do each update() and when to use finup() vs. final() vs. digest(). Then, this patch series makes skcipher, aead, and hash algorithms be tested against a list of default testvec_configs, replacing the current test code. This improves overall test coverage, without reducing test performance too much. Note that the test vectors themselves are not changed, except for removing the chunk lists. This series also adds randomized fuzz tests, enabled by a new kconfig option intended for developer use only, where skcipher, aead, and hash algorithms are tested against many randomly generated testvec_configs. This provides much more comprehensive test coverage. These improved tests have already exposed many bugs. To start it off, this initial patch adds the testvec_config and various helper functions that will be used by the skcipher, aead, and hash test code that will be converted to use the new testvec_config framework. Signed-off-by: Eric Biggers <ebiggers@google.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'crypto/testmgr.c')
-rw-r--r--crypto/testmgr.c452
1 files changed, 437 insertions, 15 deletions
diff --git a/crypto/testmgr.c b/crypto/testmgr.c
index 01a517e3f06b..0fc9421ddaba 100644
--- a/crypto/testmgr.c
+++ b/crypto/testmgr.c
@@ -5,6 +5,7 @@
5 * Copyright (c) 2002 Jean-Francois Dive <jef@linuxbe.org> 5 * Copyright (c) 2002 Jean-Francois Dive <jef@linuxbe.org>
6 * Copyright (c) 2007 Nokia Siemens Networks 6 * Copyright (c) 2007 Nokia Siemens Networks
7 * Copyright (c) 2008 Herbert Xu <herbert@gondor.apana.org.au> 7 * Copyright (c) 2008 Herbert Xu <herbert@gondor.apana.org.au>
8 * Copyright (c) 2019 Google LLC
8 * 9 *
9 * Updated RFC4106 AES-GCM testing. 10 * Updated RFC4106 AES-GCM testing.
10 * Authors: Aidan O'Mahony (aidan.o.mahony@intel.com) 11 * Authors: Aidan O'Mahony (aidan.o.mahony@intel.com)
@@ -26,6 +27,7 @@
26#include <linux/err.h> 27#include <linux/err.h>
27#include <linux/fips.h> 28#include <linux/fips.h>
28#include <linux/module.h> 29#include <linux/module.h>
30#include <linux/once.h>
29#include <linux/scatterlist.h> 31#include <linux/scatterlist.h>
30#include <linux/slab.h> 32#include <linux/slab.h>
31#include <linux/string.h> 33#include <linux/string.h>
@@ -146,12 +148,12 @@ static void hexdump(unsigned char *buf, unsigned int len)
146 buf, len, false); 148 buf, len, false);
147} 149}
148 150
149static int testmgr_alloc_buf(char *buf[XBUFSIZE]) 151static int __testmgr_alloc_buf(char *buf[XBUFSIZE], int order)
150{ 152{
151 int i; 153 int i;
152 154
153 for (i = 0; i < XBUFSIZE; i++) { 155 for (i = 0; i < XBUFSIZE; i++) {
154 buf[i] = (void *)__get_free_page(GFP_KERNEL); 156 buf[i] = (char *)__get_free_pages(GFP_KERNEL, order);
155 if (!buf[i]) 157 if (!buf[i])
156 goto err_free_buf; 158 goto err_free_buf;
157 } 159 }
@@ -160,17 +162,435 @@ static int testmgr_alloc_buf(char *buf[XBUFSIZE])
160 162
161err_free_buf: 163err_free_buf:
162 while (i-- > 0) 164 while (i-- > 0)
163 free_page((unsigned long)buf[i]); 165 free_pages((unsigned long)buf[i], order);
164 166
165 return -ENOMEM; 167 return -ENOMEM;
166} 168}
167 169
168static void testmgr_free_buf(char *buf[XBUFSIZE]) 170static int testmgr_alloc_buf(char *buf[XBUFSIZE])
171{
172 return __testmgr_alloc_buf(buf, 0);
173}
174
175static void __testmgr_free_buf(char *buf[XBUFSIZE], int order)
169{ 176{
170 int i; 177 int i;
171 178
172 for (i = 0; i < XBUFSIZE; i++) 179 for (i = 0; i < XBUFSIZE; i++)
173 free_page((unsigned long)buf[i]); 180 free_pages((unsigned long)buf[i], order);
181}
182
183static void testmgr_free_buf(char *buf[XBUFSIZE])
184{
185 __testmgr_free_buf(buf, 0);
186}
187
188#define TESTMGR_POISON_BYTE 0xfe
189#define TESTMGR_POISON_LEN 16
190
191static inline void testmgr_poison(void *addr, size_t len)
192{
193 memset(addr, TESTMGR_POISON_BYTE, len);
194}
195
196/* Is the memory region still fully poisoned? */
197static inline bool testmgr_is_poison(const void *addr, size_t len)
198{
199 return memchr_inv(addr, TESTMGR_POISON_BYTE, len) == NULL;
200}
201
202/* flush type for hash algorithms */
203enum flush_type {
204 /* merge with update of previous buffer(s) */
205 FLUSH_TYPE_NONE = 0,
206
207 /* update with previous buffer(s) before doing this one */
208 FLUSH_TYPE_FLUSH,
209
210 /* likewise, but also export and re-import the intermediate state */
211 FLUSH_TYPE_REIMPORT,
212};
213
214/* finalization function for hash algorithms */
215enum finalization_type {
216 FINALIZATION_TYPE_FINAL, /* use final() */
217 FINALIZATION_TYPE_FINUP, /* use finup() */
218 FINALIZATION_TYPE_DIGEST, /* use digest() */
219};
220
221#define TEST_SG_TOTAL 10000
222
223/**
224 * struct test_sg_division - description of a scatterlist entry
225 *
226 * This struct describes one entry of a scatterlist being constructed to check a
227 * crypto test vector.
228 *
229 * @proportion_of_total: length of this chunk relative to the total length,
230 * given as a proportion out of TEST_SG_TOTAL so that it
231 * scales to fit any test vector
232 * @offset: byte offset into a 2-page buffer at which this chunk will start
233 * @offset_relative_to_alignmask: if true, add the algorithm's alignmask to the
234 * @offset
235 * @flush_type: for hashes, whether an update() should be done now vs.
236 * continuing to accumulate data
237 */
238struct test_sg_division {
239 unsigned int proportion_of_total;
240 unsigned int offset;
241 bool offset_relative_to_alignmask;
242 enum flush_type flush_type;
243};
244
245/**
246 * struct testvec_config - configuration for testing a crypto test vector
247 *
248 * This struct describes the data layout and other parameters with which each
249 * crypto test vector can be tested.
250 *
251 * @name: name of this config, logged for debugging purposes if a test fails
252 * @inplace: operate on the data in-place, if applicable for the algorithm type?
253 * @req_flags: extra request_flags, e.g. CRYPTO_TFM_REQ_MAY_SLEEP
254 * @src_divs: description of how to arrange the source scatterlist
255 * @dst_divs: description of how to arrange the dst scatterlist, if applicable
256 * for the algorithm type. Defaults to @src_divs if unset.
257 * @iv_offset: misalignment of the IV in the range [0..MAX_ALGAPI_ALIGNMASK+1],
258 * where 0 is aligned to a 2*(MAX_ALGAPI_ALIGNMASK+1) byte boundary
259 * @iv_offset_relative_to_alignmask: if true, add the algorithm's alignmask to
260 * the @iv_offset
261 * @finalization_type: what finalization function to use for hashes
262 */
263struct testvec_config {
264 const char *name;
265 bool inplace;
266 u32 req_flags;
267 struct test_sg_division src_divs[XBUFSIZE];
268 struct test_sg_division dst_divs[XBUFSIZE];
269 unsigned int iv_offset;
270 bool iv_offset_relative_to_alignmask;
271 enum finalization_type finalization_type;
272};
273
274#define TESTVEC_CONFIG_NAMELEN 192
275
276static unsigned int count_test_sg_divisions(const struct test_sg_division *divs)
277{
278 unsigned int remaining = TEST_SG_TOTAL;
279 unsigned int ndivs = 0;
280
281 do {
282 remaining -= divs[ndivs++].proportion_of_total;
283 } while (remaining);
284
285 return ndivs;
286}
287
288static bool valid_sg_divisions(const struct test_sg_division *divs,
289 unsigned int count, bool *any_flushes_ret)
290{
291 unsigned int total = 0;
292 unsigned int i;
293
294 for (i = 0; i < count && total != TEST_SG_TOTAL; i++) {
295 if (divs[i].proportion_of_total <= 0 ||
296 divs[i].proportion_of_total > TEST_SG_TOTAL - total)
297 return false;
298 total += divs[i].proportion_of_total;
299 if (divs[i].flush_type != FLUSH_TYPE_NONE)
300 *any_flushes_ret = true;
301 }
302 return total == TEST_SG_TOTAL &&
303 memchr_inv(&divs[i], 0, (count - i) * sizeof(divs[0])) == NULL;
304}
305
306/*
307 * Check whether the given testvec_config is valid. This isn't strictly needed
308 * since every testvec_config should be valid, but check anyway so that people
309 * don't unknowingly add broken configs that don't do what they wanted.
310 */
311static bool valid_testvec_config(const struct testvec_config *cfg)
312{
313 bool any_flushes = false;
314
315 if (cfg->name == NULL)
316 return false;
317
318 if (!valid_sg_divisions(cfg->src_divs, ARRAY_SIZE(cfg->src_divs),
319 &any_flushes))
320 return false;
321
322 if (cfg->dst_divs[0].proportion_of_total) {
323 if (!valid_sg_divisions(cfg->dst_divs,
324 ARRAY_SIZE(cfg->dst_divs),
325 &any_flushes))
326 return false;
327 } else {
328 if (memchr_inv(cfg->dst_divs, 0, sizeof(cfg->dst_divs)))
329 return false;
330 /* defaults to dst_divs=src_divs */
331 }
332
333 if (cfg->iv_offset +
334 (cfg->iv_offset_relative_to_alignmask ? MAX_ALGAPI_ALIGNMASK : 0) >
335 MAX_ALGAPI_ALIGNMASK + 1)
336 return false;
337
338 if (any_flushes && cfg->finalization_type == FINALIZATION_TYPE_DIGEST)
339 return false;
340
341 return true;
342}
343
344struct test_sglist {
345 char *bufs[XBUFSIZE];
346 struct scatterlist sgl[XBUFSIZE];
347 struct scatterlist sgl_saved[XBUFSIZE];
348 struct scatterlist *sgl_ptr;
349 unsigned int nents;
350};
351
352static int init_test_sglist(struct test_sglist *tsgl)
353{
354 return __testmgr_alloc_buf(tsgl->bufs, 1 /* two pages per buffer */);
355}
356
357static void destroy_test_sglist(struct test_sglist *tsgl)
358{
359 return __testmgr_free_buf(tsgl->bufs, 1 /* two pages per buffer */);
360}
361
362/**
363 * build_test_sglist() - build a scatterlist for a crypto test
364 *
365 * @tsgl: the scatterlist to build. @tsgl->bufs[] contains an array of 2-page
366 * buffers which the scatterlist @tsgl->sgl[] will be made to point into.
367 * @divs: the layout specification on which the scatterlist will be based
368 * @alignmask: the algorithm's alignmask
369 * @total_len: the total length of the scatterlist to build in bytes
370 * @data: if non-NULL, the buffers will be filled with this data until it ends.
371 * Otherwise the buffers will be poisoned. In both cases, some bytes
372 * past the end of each buffer will be poisoned to help detect overruns.
373 * @out_divs: if non-NULL, the test_sg_division to which each scatterlist entry
374 * corresponds will be returned here. This will match @divs except
375 * that divisions resolving to a length of 0 are omitted as they are
376 * not included in the scatterlist.
377 *
378 * Return: 0 or a -errno value
379 */
380static int build_test_sglist(struct test_sglist *tsgl,
381 const struct test_sg_division *divs,
382 const unsigned int alignmask,
383 const unsigned int total_len,
384 struct iov_iter *data,
385 const struct test_sg_division *out_divs[XBUFSIZE])
386{
387 struct {
388 const struct test_sg_division *div;
389 size_t length;
390 } partitions[XBUFSIZE];
391 const unsigned int ndivs = count_test_sg_divisions(divs);
392 unsigned int len_remaining = total_len;
393 unsigned int i;
394
395 BUILD_BUG_ON(ARRAY_SIZE(partitions) != ARRAY_SIZE(tsgl->sgl));
396 if (WARN_ON(ndivs > ARRAY_SIZE(partitions)))
397 return -EINVAL;
398
399 /* Calculate the (div, length) pairs */
400 tsgl->nents = 0;
401 for (i = 0; i < ndivs; i++) {
402 unsigned int len_this_sg =
403 min(len_remaining,
404 (total_len * divs[i].proportion_of_total +
405 TEST_SG_TOTAL / 2) / TEST_SG_TOTAL);
406
407 if (len_this_sg != 0) {
408 partitions[tsgl->nents].div = &divs[i];
409 partitions[tsgl->nents].length = len_this_sg;
410 tsgl->nents++;
411 len_remaining -= len_this_sg;
412 }
413 }
414 if (tsgl->nents == 0) {
415 partitions[tsgl->nents].div = &divs[0];
416 partitions[tsgl->nents].length = 0;
417 tsgl->nents++;
418 }
419 partitions[tsgl->nents - 1].length += len_remaining;
420
421 /* Set up the sgl entries and fill the data or poison */
422 sg_init_table(tsgl->sgl, tsgl->nents);
423 for (i = 0; i < tsgl->nents; i++) {
424 unsigned int offset = partitions[i].div->offset;
425 void *addr;
426
427 if (partitions[i].div->offset_relative_to_alignmask)
428 offset += alignmask;
429
430 while (offset + partitions[i].length + TESTMGR_POISON_LEN >
431 2 * PAGE_SIZE) {
432 if (WARN_ON(offset <= 0))
433 return -EINVAL;
434 offset /= 2;
435 }
436
437 addr = &tsgl->bufs[i][offset];
438 sg_set_buf(&tsgl->sgl[i], addr, partitions[i].length);
439
440 if (out_divs)
441 out_divs[i] = partitions[i].div;
442
443 if (data) {
444 size_t copy_len, copied;
445
446 copy_len = min(partitions[i].length, data->count);
447 copied = copy_from_iter(addr, copy_len, data);
448 if (WARN_ON(copied != copy_len))
449 return -EINVAL;
450 testmgr_poison(addr + copy_len, partitions[i].length +
451 TESTMGR_POISON_LEN - copy_len);
452 } else {
453 testmgr_poison(addr, partitions[i].length +
454 TESTMGR_POISON_LEN);
455 }
456 }
457
458 sg_mark_end(&tsgl->sgl[tsgl->nents - 1]);
459 tsgl->sgl_ptr = tsgl->sgl;
460 memcpy(tsgl->sgl_saved, tsgl->sgl, tsgl->nents * sizeof(tsgl->sgl[0]));
461 return 0;
462}
463
464/*
465 * Verify that a scatterlist crypto operation produced the correct output.
466 *
467 * @tsgl: scatterlist containing the actual output
468 * @expected_output: buffer containing the expected output
469 * @len_to_check: length of @expected_output in bytes
470 * @unchecked_prefix_len: number of ignored bytes in @tsgl prior to real result
471 * @check_poison: verify that the poison bytes after each chunk are intact?
472 *
473 * Return: 0 if correct, -EINVAL if incorrect, -EOVERFLOW if buffer overrun.
474 */
475static int verify_correct_output(const struct test_sglist *tsgl,
476 const char *expected_output,
477 unsigned int len_to_check,
478 unsigned int unchecked_prefix_len,
479 bool check_poison)
480{
481 unsigned int i;
482
483 for (i = 0; i < tsgl->nents; i++) {
484 struct scatterlist *sg = &tsgl->sgl_ptr[i];
485 unsigned int len = sg->length;
486 unsigned int offset = sg->offset;
487 const char *actual_output;
488
489 if (unchecked_prefix_len) {
490 if (unchecked_prefix_len >= len) {
491 unchecked_prefix_len -= len;
492 continue;
493 }
494 offset += unchecked_prefix_len;
495 len -= unchecked_prefix_len;
496 unchecked_prefix_len = 0;
497 }
498 len = min(len, len_to_check);
499 actual_output = page_address(sg_page(sg)) + offset;
500 if (memcmp(expected_output, actual_output, len) != 0)
501 return -EINVAL;
502 if (check_poison &&
503 !testmgr_is_poison(actual_output + len, TESTMGR_POISON_LEN))
504 return -EOVERFLOW;
505 len_to_check -= len;
506 expected_output += len;
507 }
508 if (WARN_ON(len_to_check != 0))
509 return -EINVAL;
510 return 0;
511}
512
513static bool is_test_sglist_corrupted(const struct test_sglist *tsgl)
514{
515 unsigned int i;
516
517 for (i = 0; i < tsgl->nents; i++) {
518 if (tsgl->sgl[i].page_link != tsgl->sgl_saved[i].page_link)
519 return true;
520 if (tsgl->sgl[i].offset != tsgl->sgl_saved[i].offset)
521 return true;
522 if (tsgl->sgl[i].length != tsgl->sgl_saved[i].length)
523 return true;
524 }
525 return false;
526}
527
528struct cipher_test_sglists {
529 struct test_sglist src;
530 struct test_sglist dst;
531};
532
533static struct cipher_test_sglists *alloc_cipher_test_sglists(void)
534{
535 struct cipher_test_sglists *tsgls;
536
537 tsgls = kmalloc(sizeof(*tsgls), GFP_KERNEL);
538 if (!tsgls)
539 return NULL;
540
541 if (init_test_sglist(&tsgls->src) != 0)
542 goto fail_kfree;
543 if (init_test_sglist(&tsgls->dst) != 0)
544 goto fail_destroy_src;
545
546 return tsgls;
547
548fail_destroy_src:
549 destroy_test_sglist(&tsgls->src);
550fail_kfree:
551 kfree(tsgls);
552 return NULL;
553}
554
555static void free_cipher_test_sglists(struct cipher_test_sglists *tsgls)
556{
557 if (tsgls) {
558 destroy_test_sglist(&tsgls->src);
559 destroy_test_sglist(&tsgls->dst);
560 kfree(tsgls);
561 }
562}
563
564/* Build the src and dst scatterlists for an skcipher or AEAD test */
565static int build_cipher_test_sglists(struct cipher_test_sglists *tsgls,
566 const struct testvec_config *cfg,
567 unsigned int alignmask,
568 unsigned int src_total_len,
569 unsigned int dst_total_len,
570 const struct kvec *inputs,
571 unsigned int nr_inputs)
572{
573 struct iov_iter input;
574 int err;
575
576 iov_iter_kvec(&input, WRITE, inputs, nr_inputs, src_total_len);
577 err = build_test_sglist(&tsgls->src, cfg->src_divs, alignmask,
578 cfg->inplace ?
579 max(dst_total_len, src_total_len) :
580 src_total_len,
581 &input, NULL);
582 if (err)
583 return err;
584
585 if (cfg->inplace) {
586 tsgls->dst.sgl_ptr = tsgls->src.sgl;
587 tsgls->dst.nents = tsgls->src.nents;
588 return 0;
589 }
590 return build_test_sglist(&tsgls->dst,
591 cfg->dst_divs[0].proportion_of_total ?
592 cfg->dst_divs : cfg->src_divs,
593 alignmask, dst_total_len, NULL, NULL);
174} 594}
175 595
176static int ahash_guard_result(char *result, char c, int size) 596static int ahash_guard_result(char *result, char c, int size)
@@ -3654,18 +4074,10 @@ static const struct alg_test_desc alg_test_descs[] = {
3654 } 4074 }
3655}; 4075};
3656 4076
3657static bool alg_test_descs_checked; 4077static void alg_check_test_descs_order(void)
3658
3659static void alg_test_descs_check_order(void)
3660{ 4078{
3661 int i; 4079 int i;
3662 4080
3663 /* only check once */
3664 if (alg_test_descs_checked)
3665 return;
3666
3667 alg_test_descs_checked = true;
3668
3669 for (i = 1; i < ARRAY_SIZE(alg_test_descs); i++) { 4081 for (i = 1; i < ARRAY_SIZE(alg_test_descs); i++) {
3670 int diff = strcmp(alg_test_descs[i - 1].alg, 4082 int diff = strcmp(alg_test_descs[i - 1].alg,
3671 alg_test_descs[i].alg); 4083 alg_test_descs[i].alg);
@@ -3683,6 +4095,16 @@ static void alg_test_descs_check_order(void)
3683 } 4095 }
3684} 4096}
3685 4097
4098static void alg_check_testvec_configs(void)
4099{
4100}
4101
4102static void testmgr_onetime_init(void)
4103{
4104 alg_check_test_descs_order();
4105 alg_check_testvec_configs();
4106}
4107
3686static int alg_find_test(const char *alg) 4108static int alg_find_test(const char *alg)
3687{ 4109{
3688 int start = 0; 4110 int start = 0;
@@ -3719,7 +4141,7 @@ int alg_test(const char *driver, const char *alg, u32 type, u32 mask)
3719 return 0; 4141 return 0;
3720 } 4142 }
3721 4143
3722 alg_test_descs_check_order(); 4144 DO_ONCE(testmgr_onetime_init);
3723 4145
3724 if ((type & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_CIPHER) { 4146 if ((type & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_CIPHER) {
3725 char nalg[CRYPTO_MAX_ALG_NAME]; 4147 char nalg[CRYPTO_MAX_ALG_NAME];