aboutsummaryrefslogtreecommitdiffstats
path: root/crypto/testmgr.c
diff options
context:
space:
mode:
authorHerbert Xu <herbert@gondor.apana.org.au>2009-05-29 02:05:42 -0400
committerHerbert Xu <herbert@gondor.apana.org.au>2009-06-02 00:05:00 -0400
commitfd57f22a09ae276ca3e9cd11ed99b617d611ba82 (patch)
tree86f83c467efbe67c2058f00392e62c7f67f27636 /crypto/testmgr.c
parentf3d8fe40498eea9f45be260bdf6ccada845411f3 (diff)
crypto: testmgr - Check all test vector lengths
As we cannot guarantee the availability of contiguous pages at run-time, all test vectors must either fit within a page, or use scatter lists. In some cases vectors were not checked as to whether they fit inside a page. This patch adds all the missing checks. Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'crypto/testmgr.c')
-rw-r--r--crypto/testmgr.c25
1 files changed, 25 insertions, 0 deletions
diff --git a/crypto/testmgr.c b/crypto/testmgr.c
index 376ea88158b9..8fcea70ed267 100644
--- a/crypto/testmgr.c
+++ b/crypto/testmgr.c
@@ -185,6 +185,10 @@ static int test_hash(struct crypto_ahash *tfm, struct hash_testvec *template,
185 185
186 hash_buff = xbuf[0]; 186 hash_buff = xbuf[0];
187 187
188 ret = -EINVAL;
189 if (WARN_ON(template[i].psize > PAGE_SIZE))
190 goto out;
191
188 memcpy(hash_buff, template[i].plaintext, template[i].psize); 192 memcpy(hash_buff, template[i].plaintext, template[i].psize);
189 sg_init_one(&sg[0], hash_buff, template[i].psize); 193 sg_init_one(&sg[0], hash_buff, template[i].psize);
190 194
@@ -238,7 +242,11 @@ static int test_hash(struct crypto_ahash *tfm, struct hash_testvec *template,
238 242
239 temp = 0; 243 temp = 0;
240 sg_init_table(sg, template[i].np); 244 sg_init_table(sg, template[i].np);
245 ret = -EINVAL;
241 for (k = 0; k < template[i].np; k++) { 246 for (k = 0; k < template[i].np; k++) {
247 if (WARN_ON(offset_in_page(IDX[k]) +
248 template[i].tap[k] > PAGE_SIZE))
249 goto out;
242 sg_set_buf(&sg[k], 250 sg_set_buf(&sg[k],
243 memcpy(xbuf[IDX[k] >> PAGE_SHIFT] + 251 memcpy(xbuf[IDX[k] >> PAGE_SHIFT] +
244 offset_in_page(IDX[k]), 252 offset_in_page(IDX[k]),
@@ -357,6 +365,11 @@ static int test_aead(struct crypto_aead *tfm, int enc,
357 input = xbuf[0]; 365 input = xbuf[0];
358 assoc = axbuf[0]; 366 assoc = axbuf[0];
359 367
368 ret = -EINVAL;
369 if (WARN_ON(template[i].ilen > PAGE_SIZE ||
370 template[i].alen > PAGE_SIZE))
371 goto out;
372
360 memcpy(input, template[i].input, template[i].ilen); 373 memcpy(input, template[i].input, template[i].ilen);
361 memcpy(assoc, template[i].assoc, template[i].alen); 374 memcpy(assoc, template[i].assoc, template[i].alen);
362 if (template[i].iv) 375 if (template[i].iv)
@@ -516,7 +529,11 @@ static int test_aead(struct crypto_aead *tfm, int enc,
516 } 529 }
517 530
518 sg_init_table(asg, template[i].anp); 531 sg_init_table(asg, template[i].anp);
532 ret = -EINVAL;
519 for (k = 0, temp = 0; k < template[i].anp; k++) { 533 for (k = 0, temp = 0; k < template[i].anp; k++) {
534 if (WARN_ON(offset_in_page(IDX[k]) +
535 template[i].atap[k] > PAGE_SIZE))
536 goto out;
520 sg_set_buf(&asg[k], 537 sg_set_buf(&asg[k],
521 memcpy(axbuf[IDX[k] >> PAGE_SHIFT] + 538 memcpy(axbuf[IDX[k] >> PAGE_SHIFT] +
522 offset_in_page(IDX[k]), 539 offset_in_page(IDX[k]),
@@ -650,6 +667,10 @@ static int test_cipher(struct crypto_cipher *tfm, int enc,
650 667
651 j++; 668 j++;
652 669
670 ret = -EINVAL;
671 if (WARN_ON(template[i].ilen > PAGE_SIZE))
672 goto out;
673
653 data = xbuf[0]; 674 data = xbuf[0];
654 memcpy(data, template[i].input, template[i].ilen); 675 memcpy(data, template[i].input, template[i].ilen);
655 676
@@ -741,6 +762,10 @@ static int test_skcipher(struct crypto_ablkcipher *tfm, int enc,
741 if (!(template[i].np)) { 762 if (!(template[i].np)) {
742 j++; 763 j++;
743 764
765 ret = -EINVAL;
766 if (WARN_ON(template[i].ilen > PAGE_SIZE))
767 goto out;
768
744 data = xbuf[0]; 769 data = xbuf[0];
745 memcpy(data, template[i].input, template[i].ilen); 770 memcpy(data, template[i].input, template[i].ilen);
746 771