diff options
Diffstat (limited to 'drivers/crypto/padlock-aes.c')
-rw-r--r-- | drivers/crypto/padlock-aes.c | 288 |
1 files changed, 222 insertions, 66 deletions
diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c index 5158a9db4bc5..d4501dc7e650 100644 --- a/drivers/crypto/padlock-aes.c +++ b/drivers/crypto/padlock-aes.c | |||
@@ -43,11 +43,11 @@ | |||
43 | * --------------------------------------------------------------------------- | 43 | * --------------------------------------------------------------------------- |
44 | */ | 44 | */ |
45 | 45 | ||
46 | #include <crypto/algapi.h> | ||
46 | #include <linux/module.h> | 47 | #include <linux/module.h> |
47 | #include <linux/init.h> | 48 | #include <linux/init.h> |
48 | #include <linux/types.h> | 49 | #include <linux/types.h> |
49 | #include <linux/errno.h> | 50 | #include <linux/errno.h> |
50 | #include <linux/crypto.h> | ||
51 | #include <linux/interrupt.h> | 51 | #include <linux/interrupt.h> |
52 | #include <linux/kernel.h> | 52 | #include <linux/kernel.h> |
53 | #include <asm/byteorder.h> | 53 | #include <asm/byteorder.h> |
@@ -59,16 +59,31 @@ | |||
59 | #define AES_EXTENDED_KEY_SIZE 64 /* in uint32_t units */ | 59 | #define AES_EXTENDED_KEY_SIZE 64 /* in uint32_t units */ |
60 | #define AES_EXTENDED_KEY_SIZE_B (AES_EXTENDED_KEY_SIZE * sizeof(uint32_t)) | 60 | #define AES_EXTENDED_KEY_SIZE_B (AES_EXTENDED_KEY_SIZE * sizeof(uint32_t)) |
61 | 61 | ||
62 | /* Control word. */ | ||
63 | struct cword { | ||
64 | unsigned int __attribute__ ((__packed__)) | ||
65 | rounds:4, | ||
66 | algo:3, | ||
67 | keygen:1, | ||
68 | interm:1, | ||
69 | encdec:1, | ||
70 | ksize:2; | ||
71 | } __attribute__ ((__aligned__(PADLOCK_ALIGNMENT))); | ||
72 | |||
73 | /* Whenever making any changes to the following | ||
74 | * structure *make sure* you keep E, d_data | ||
75 | * and cword aligned on 16 Bytes boundaries!!! */ | ||
62 | struct aes_ctx { | 76 | struct aes_ctx { |
63 | uint32_t e_data[AES_EXTENDED_KEY_SIZE]; | ||
64 | uint32_t d_data[AES_EXTENDED_KEY_SIZE]; | ||
65 | struct { | 77 | struct { |
66 | struct cword encrypt; | 78 | struct cword encrypt; |
67 | struct cword decrypt; | 79 | struct cword decrypt; |
68 | } cword; | 80 | } cword; |
69 | uint32_t *E; | 81 | u32 *D; |
70 | uint32_t *D; | ||
71 | int key_length; | 82 | int key_length; |
83 | u32 E[AES_EXTENDED_KEY_SIZE] | ||
84 | __attribute__ ((__aligned__(PADLOCK_ALIGNMENT))); | ||
85 | u32 d_data[AES_EXTENDED_KEY_SIZE] | ||
86 | __attribute__ ((__aligned__(PADLOCK_ALIGNMENT))); | ||
72 | }; | 87 | }; |
73 | 88 | ||
74 | /* ====== Key management routines ====== */ | 89 | /* ====== Key management routines ====== */ |
@@ -282,25 +297,37 @@ aes_hw_extkey_available(uint8_t key_len) | |||
282 | return 0; | 297 | return 0; |
283 | } | 298 | } |
284 | 299 | ||
285 | static inline struct aes_ctx *aes_ctx(void *ctx) | 300 | static inline struct aes_ctx *aes_ctx_common(void *ctx) |
286 | { | 301 | { |
302 | unsigned long addr = (unsigned long)ctx; | ||
287 | unsigned long align = PADLOCK_ALIGNMENT; | 303 | unsigned long align = PADLOCK_ALIGNMENT; |
288 | 304 | ||
289 | if (align <= crypto_tfm_ctx_alignment()) | 305 | if (align <= crypto_tfm_ctx_alignment()) |
290 | align = 1; | 306 | align = 1; |
291 | return (struct aes_ctx *)ALIGN((unsigned long)ctx, align); | 307 | return (struct aes_ctx *)ALIGN(addr, align); |
292 | } | 308 | } |
293 | 309 | ||
294 | static int | 310 | static inline struct aes_ctx *aes_ctx(struct crypto_tfm *tfm) |
295 | aes_set_key(void *ctx_arg, const uint8_t *in_key, unsigned int key_len, uint32_t *flags) | ||
296 | { | 311 | { |
297 | struct aes_ctx *ctx = aes_ctx(ctx_arg); | 312 | return aes_ctx_common(crypto_tfm_ctx(tfm)); |
313 | } | ||
314 | |||
315 | static inline struct aes_ctx *blk_aes_ctx(struct crypto_blkcipher *tfm) | ||
316 | { | ||
317 | return aes_ctx_common(crypto_blkcipher_ctx(tfm)); | ||
318 | } | ||
319 | |||
320 | static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, | ||
321 | unsigned int key_len) | ||
322 | { | ||
323 | struct aes_ctx *ctx = aes_ctx(tfm); | ||
298 | const __le32 *key = (const __le32 *)in_key; | 324 | const __le32 *key = (const __le32 *)in_key; |
325 | u32 *flags = &tfm->crt_flags; | ||
299 | uint32_t i, t, u, v, w; | 326 | uint32_t i, t, u, v, w; |
300 | uint32_t P[AES_EXTENDED_KEY_SIZE]; | 327 | uint32_t P[AES_EXTENDED_KEY_SIZE]; |
301 | uint32_t rounds; | 328 | uint32_t rounds; |
302 | 329 | ||
303 | if (key_len != 16 && key_len != 24 && key_len != 32) { | 330 | if (key_len % 8) { |
304 | *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; | 331 | *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; |
305 | return -EINVAL; | 332 | return -EINVAL; |
306 | } | 333 | } |
@@ -312,8 +339,7 @@ aes_set_key(void *ctx_arg, const uint8_t *in_key, unsigned int key_len, uint32_t | |||
312 | * itself we must supply the plain key for both encryption | 339 | * itself we must supply the plain key for both encryption |
313 | * and decryption. | 340 | * and decryption. |
314 | */ | 341 | */ |
315 | ctx->E = ctx->e_data; | 342 | ctx->D = ctx->E; |
316 | ctx->D = ctx->e_data; | ||
317 | 343 | ||
318 | E_KEY[0] = le32_to_cpu(key[0]); | 344 | E_KEY[0] = le32_to_cpu(key[0]); |
319 | E_KEY[1] = le32_to_cpu(key[1]); | 345 | E_KEY[1] = le32_to_cpu(key[1]); |
@@ -414,94 +440,224 @@ static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key, | |||
414 | return iv; | 440 | return iv; |
415 | } | 441 | } |
416 | 442 | ||
417 | static void | 443 | static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) |
418 | aes_encrypt(void *ctx_arg, uint8_t *out, const uint8_t *in) | ||
419 | { | 444 | { |
420 | struct aes_ctx *ctx = aes_ctx(ctx_arg); | 445 | struct aes_ctx *ctx = aes_ctx(tfm); |
421 | padlock_xcrypt_ecb(in, out, ctx->E, &ctx->cword.encrypt, 1); | 446 | padlock_xcrypt_ecb(in, out, ctx->E, &ctx->cword.encrypt, 1); |
422 | } | 447 | } |
423 | 448 | ||
424 | static void | 449 | static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) |
425 | aes_decrypt(void *ctx_arg, uint8_t *out, const uint8_t *in) | ||
426 | { | 450 | { |
427 | struct aes_ctx *ctx = aes_ctx(ctx_arg); | 451 | struct aes_ctx *ctx = aes_ctx(tfm); |
428 | padlock_xcrypt_ecb(in, out, ctx->D, &ctx->cword.decrypt, 1); | 452 | padlock_xcrypt_ecb(in, out, ctx->D, &ctx->cword.decrypt, 1); |
429 | } | 453 | } |
430 | 454 | ||
431 | static unsigned int aes_encrypt_ecb(const struct cipher_desc *desc, u8 *out, | 455 | static struct crypto_alg aes_alg = { |
432 | const u8 *in, unsigned int nbytes) | 456 | .cra_name = "aes", |
457 | .cra_driver_name = "aes-padlock", | ||
458 | .cra_priority = PADLOCK_CRA_PRIORITY, | ||
459 | .cra_flags = CRYPTO_ALG_TYPE_CIPHER, | ||
460 | .cra_blocksize = AES_BLOCK_SIZE, | ||
461 | .cra_ctxsize = sizeof(struct aes_ctx), | ||
462 | .cra_alignmask = PADLOCK_ALIGNMENT - 1, | ||
463 | .cra_module = THIS_MODULE, | ||
464 | .cra_list = LIST_HEAD_INIT(aes_alg.cra_list), | ||
465 | .cra_u = { | ||
466 | .cipher = { | ||
467 | .cia_min_keysize = AES_MIN_KEY_SIZE, | ||
468 | .cia_max_keysize = AES_MAX_KEY_SIZE, | ||
469 | .cia_setkey = aes_set_key, | ||
470 | .cia_encrypt = aes_encrypt, | ||
471 | .cia_decrypt = aes_decrypt, | ||
472 | } | ||
473 | } | ||
474 | }; | ||
475 | |||
476 | static int ecb_aes_encrypt(struct blkcipher_desc *desc, | ||
477 | struct scatterlist *dst, struct scatterlist *src, | ||
478 | unsigned int nbytes) | ||
433 | { | 479 | { |
434 | struct aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(desc->tfm)); | 480 | struct aes_ctx *ctx = blk_aes_ctx(desc->tfm); |
435 | padlock_xcrypt_ecb(in, out, ctx->E, &ctx->cword.encrypt, | 481 | struct blkcipher_walk walk; |
436 | nbytes / AES_BLOCK_SIZE); | 482 | int err; |
437 | return nbytes & ~(AES_BLOCK_SIZE - 1); | 483 | |
484 | blkcipher_walk_init(&walk, dst, src, nbytes); | ||
485 | err = blkcipher_walk_virt(desc, &walk); | ||
486 | |||
487 | while ((nbytes = walk.nbytes)) { | ||
488 | padlock_xcrypt_ecb(walk.src.virt.addr, walk.dst.virt.addr, | ||
489 | ctx->E, &ctx->cword.encrypt, | ||
490 | nbytes / AES_BLOCK_SIZE); | ||
491 | nbytes &= AES_BLOCK_SIZE - 1; | ||
492 | err = blkcipher_walk_done(desc, &walk, nbytes); | ||
493 | } | ||
494 | |||
495 | return err; | ||
438 | } | 496 | } |
439 | 497 | ||
440 | static unsigned int aes_decrypt_ecb(const struct cipher_desc *desc, u8 *out, | 498 | static int ecb_aes_decrypt(struct blkcipher_desc *desc, |
441 | const u8 *in, unsigned int nbytes) | 499 | struct scatterlist *dst, struct scatterlist *src, |
500 | unsigned int nbytes) | ||
442 | { | 501 | { |
443 | struct aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(desc->tfm)); | 502 | struct aes_ctx *ctx = blk_aes_ctx(desc->tfm); |
444 | padlock_xcrypt_ecb(in, out, ctx->D, &ctx->cword.decrypt, | 503 | struct blkcipher_walk walk; |
445 | nbytes / AES_BLOCK_SIZE); | 504 | int err; |
446 | return nbytes & ~(AES_BLOCK_SIZE - 1); | 505 | |
506 | blkcipher_walk_init(&walk, dst, src, nbytes); | ||
507 | err = blkcipher_walk_virt(desc, &walk); | ||
508 | |||
509 | while ((nbytes = walk.nbytes)) { | ||
510 | padlock_xcrypt_ecb(walk.src.virt.addr, walk.dst.virt.addr, | ||
511 | ctx->D, &ctx->cword.decrypt, | ||
512 | nbytes / AES_BLOCK_SIZE); | ||
513 | nbytes &= AES_BLOCK_SIZE - 1; | ||
514 | err = blkcipher_walk_done(desc, &walk, nbytes); | ||
515 | } | ||
516 | |||
517 | return err; | ||
447 | } | 518 | } |
448 | 519 | ||
449 | static unsigned int aes_encrypt_cbc(const struct cipher_desc *desc, u8 *out, | 520 | static struct crypto_alg ecb_aes_alg = { |
450 | const u8 *in, unsigned int nbytes) | 521 | .cra_name = "ecb(aes)", |
451 | { | 522 | .cra_driver_name = "ecb-aes-padlock", |
452 | struct aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(desc->tfm)); | 523 | .cra_priority = PADLOCK_COMPOSITE_PRIORITY, |
453 | u8 *iv; | 524 | .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, |
525 | .cra_blocksize = AES_BLOCK_SIZE, | ||
526 | .cra_ctxsize = sizeof(struct aes_ctx), | ||
527 | .cra_alignmask = PADLOCK_ALIGNMENT - 1, | ||
528 | .cra_type = &crypto_blkcipher_type, | ||
529 | .cra_module = THIS_MODULE, | ||
530 | .cra_list = LIST_HEAD_INIT(ecb_aes_alg.cra_list), | ||
531 | .cra_u = { | ||
532 | .blkcipher = { | ||
533 | .min_keysize = AES_MIN_KEY_SIZE, | ||
534 | .max_keysize = AES_MAX_KEY_SIZE, | ||
535 | .setkey = aes_set_key, | ||
536 | .encrypt = ecb_aes_encrypt, | ||
537 | .decrypt = ecb_aes_decrypt, | ||
538 | } | ||
539 | } | ||
540 | }; | ||
454 | 541 | ||
455 | iv = padlock_xcrypt_cbc(in, out, ctx->E, desc->info, | 542 | static int cbc_aes_encrypt(struct blkcipher_desc *desc, |
456 | &ctx->cword.encrypt, nbytes / AES_BLOCK_SIZE); | 543 | struct scatterlist *dst, struct scatterlist *src, |
457 | memcpy(desc->info, iv, AES_BLOCK_SIZE); | 544 | unsigned int nbytes) |
545 | { | ||
546 | struct aes_ctx *ctx = blk_aes_ctx(desc->tfm); | ||
547 | struct blkcipher_walk walk; | ||
548 | int err; | ||
549 | |||
550 | blkcipher_walk_init(&walk, dst, src, nbytes); | ||
551 | err = blkcipher_walk_virt(desc, &walk); | ||
552 | |||
553 | while ((nbytes = walk.nbytes)) { | ||
554 | u8 *iv = padlock_xcrypt_cbc(walk.src.virt.addr, | ||
555 | walk.dst.virt.addr, ctx->E, | ||
556 | walk.iv, &ctx->cword.encrypt, | ||
557 | nbytes / AES_BLOCK_SIZE); | ||
558 | memcpy(walk.iv, iv, AES_BLOCK_SIZE); | ||
559 | nbytes &= AES_BLOCK_SIZE - 1; | ||
560 | err = blkcipher_walk_done(desc, &walk, nbytes); | ||
561 | } | ||
458 | 562 | ||
459 | return nbytes & ~(AES_BLOCK_SIZE - 1); | 563 | return err; |
460 | } | 564 | } |
461 | 565 | ||
462 | static unsigned int aes_decrypt_cbc(const struct cipher_desc *desc, u8 *out, | 566 | static int cbc_aes_decrypt(struct blkcipher_desc *desc, |
463 | const u8 *in, unsigned int nbytes) | 567 | struct scatterlist *dst, struct scatterlist *src, |
568 | unsigned int nbytes) | ||
464 | { | 569 | { |
465 | struct aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(desc->tfm)); | 570 | struct aes_ctx *ctx = blk_aes_ctx(desc->tfm); |
466 | padlock_xcrypt_cbc(in, out, ctx->D, desc->info, &ctx->cword.decrypt, | 571 | struct blkcipher_walk walk; |
467 | nbytes / AES_BLOCK_SIZE); | 572 | int err; |
468 | return nbytes & ~(AES_BLOCK_SIZE - 1); | 573 | |
574 | blkcipher_walk_init(&walk, dst, src, nbytes); | ||
575 | err = blkcipher_walk_virt(desc, &walk); | ||
576 | |||
577 | while ((nbytes = walk.nbytes)) { | ||
578 | padlock_xcrypt_cbc(walk.src.virt.addr, walk.dst.virt.addr, | ||
579 | ctx->D, walk.iv, &ctx->cword.decrypt, | ||
580 | nbytes / AES_BLOCK_SIZE); | ||
581 | nbytes &= AES_BLOCK_SIZE - 1; | ||
582 | err = blkcipher_walk_done(desc, &walk, nbytes); | ||
583 | } | ||
584 | |||
585 | return err; | ||
469 | } | 586 | } |
470 | 587 | ||
471 | static struct crypto_alg aes_alg = { | 588 | static struct crypto_alg cbc_aes_alg = { |
472 | .cra_name = "aes", | 589 | .cra_name = "cbc(aes)", |
473 | .cra_driver_name = "aes-padlock", | 590 | .cra_driver_name = "cbc-aes-padlock", |
474 | .cra_priority = 300, | 591 | .cra_priority = PADLOCK_COMPOSITE_PRIORITY, |
475 | .cra_flags = CRYPTO_ALG_TYPE_CIPHER, | 592 | .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, |
476 | .cra_blocksize = AES_BLOCK_SIZE, | 593 | .cra_blocksize = AES_BLOCK_SIZE, |
477 | .cra_ctxsize = sizeof(struct aes_ctx), | 594 | .cra_ctxsize = sizeof(struct aes_ctx), |
478 | .cra_alignmask = PADLOCK_ALIGNMENT - 1, | 595 | .cra_alignmask = PADLOCK_ALIGNMENT - 1, |
596 | .cra_type = &crypto_blkcipher_type, | ||
479 | .cra_module = THIS_MODULE, | 597 | .cra_module = THIS_MODULE, |
480 | .cra_list = LIST_HEAD_INIT(aes_alg.cra_list), | 598 | .cra_list = LIST_HEAD_INIT(cbc_aes_alg.cra_list), |
481 | .cra_u = { | 599 | .cra_u = { |
482 | .cipher = { | 600 | .blkcipher = { |
483 | .cia_min_keysize = AES_MIN_KEY_SIZE, | 601 | .min_keysize = AES_MIN_KEY_SIZE, |
484 | .cia_max_keysize = AES_MAX_KEY_SIZE, | 602 | .max_keysize = AES_MAX_KEY_SIZE, |
485 | .cia_setkey = aes_set_key, | 603 | .ivsize = AES_BLOCK_SIZE, |
486 | .cia_encrypt = aes_encrypt, | 604 | .setkey = aes_set_key, |
487 | .cia_decrypt = aes_decrypt, | 605 | .encrypt = cbc_aes_encrypt, |
488 | .cia_encrypt_ecb = aes_encrypt_ecb, | 606 | .decrypt = cbc_aes_decrypt, |
489 | .cia_decrypt_ecb = aes_decrypt_ecb, | ||
490 | .cia_encrypt_cbc = aes_encrypt_cbc, | ||
491 | .cia_decrypt_cbc = aes_decrypt_cbc, | ||
492 | } | 607 | } |
493 | } | 608 | } |
494 | }; | 609 | }; |
495 | 610 | ||
496 | int __init padlock_init_aes(void) | 611 | static int __init padlock_init(void) |
497 | { | 612 | { |
498 | printk(KERN_NOTICE PFX "Using VIA PadLock ACE for AES algorithm.\n"); | 613 | int ret; |
614 | |||
615 | if (!cpu_has_xcrypt) { | ||
616 | printk(KERN_ERR PFX "VIA PadLock not detected.\n"); | ||
617 | return -ENODEV; | ||
618 | } | ||
619 | |||
620 | if (!cpu_has_xcrypt_enabled) { | ||
621 | printk(KERN_ERR PFX "VIA PadLock detected, but not enabled. Hmm, strange...\n"); | ||
622 | return -ENODEV; | ||
623 | } | ||
499 | 624 | ||
500 | gen_tabs(); | 625 | gen_tabs(); |
501 | return crypto_register_alg(&aes_alg); | 626 | if ((ret = crypto_register_alg(&aes_alg))) |
627 | goto aes_err; | ||
628 | |||
629 | if ((ret = crypto_register_alg(&ecb_aes_alg))) | ||
630 | goto ecb_aes_err; | ||
631 | |||
632 | if ((ret = crypto_register_alg(&cbc_aes_alg))) | ||
633 | goto cbc_aes_err; | ||
634 | |||
635 | printk(KERN_NOTICE PFX "Using VIA PadLock ACE for AES algorithm.\n"); | ||
636 | |||
637 | out: | ||
638 | return ret; | ||
639 | |||
640 | cbc_aes_err: | ||
641 | crypto_unregister_alg(&ecb_aes_alg); | ||
642 | ecb_aes_err: | ||
643 | crypto_unregister_alg(&aes_alg); | ||
644 | aes_err: | ||
645 | printk(KERN_ERR PFX "VIA PadLock AES initialization failed.\n"); | ||
646 | goto out; | ||
502 | } | 647 | } |
503 | 648 | ||
504 | void __exit padlock_fini_aes(void) | 649 | static void __exit padlock_fini(void) |
505 | { | 650 | { |
651 | crypto_unregister_alg(&cbc_aes_alg); | ||
652 | crypto_unregister_alg(&ecb_aes_alg); | ||
506 | crypto_unregister_alg(&aes_alg); | 653 | crypto_unregister_alg(&aes_alg); |
507 | } | 654 | } |
655 | |||
656 | module_init(padlock_init); | ||
657 | module_exit(padlock_fini); | ||
658 | |||
659 | MODULE_DESCRIPTION("VIA PadLock AES algorithm support"); | ||
660 | MODULE_LICENSE("GPL"); | ||
661 | MODULE_AUTHOR("Michal Ludvig"); | ||
662 | |||
663 | MODULE_ALIAS("aes-padlock"); | ||