diff options
Diffstat (limited to 'drivers/crypto/padlock-aes.c')
-rw-r--r-- | drivers/crypto/padlock-aes.c | 258 |
1 files changed, 206 insertions, 52 deletions
diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c index b643d71298a9..d4501dc7e650 100644 --- a/drivers/crypto/padlock-aes.c +++ b/drivers/crypto/padlock-aes.c | |||
@@ -43,11 +43,11 @@ | |||
43 | * --------------------------------------------------------------------------- | 43 | * --------------------------------------------------------------------------- |
44 | */ | 44 | */ |
45 | 45 | ||
46 | #include <crypto/algapi.h> | ||
46 | #include <linux/module.h> | 47 | #include <linux/module.h> |
47 | #include <linux/init.h> | 48 | #include <linux/init.h> |
48 | #include <linux/types.h> | 49 | #include <linux/types.h> |
49 | #include <linux/errno.h> | 50 | #include <linux/errno.h> |
50 | #include <linux/crypto.h> | ||
51 | #include <linux/interrupt.h> | 51 | #include <linux/interrupt.h> |
52 | #include <linux/kernel.h> | 52 | #include <linux/kernel.h> |
53 | #include <asm/byteorder.h> | 53 | #include <asm/byteorder.h> |
@@ -59,6 +59,17 @@ | |||
59 | #define AES_EXTENDED_KEY_SIZE 64 /* in uint32_t units */ | 59 | #define AES_EXTENDED_KEY_SIZE 64 /* in uint32_t units */ |
60 | #define AES_EXTENDED_KEY_SIZE_B (AES_EXTENDED_KEY_SIZE * sizeof(uint32_t)) | 60 | #define AES_EXTENDED_KEY_SIZE_B (AES_EXTENDED_KEY_SIZE * sizeof(uint32_t)) |
61 | 61 | ||
62 | /* Control word. */ | ||
63 | struct cword { | ||
64 | unsigned int __attribute__ ((__packed__)) | ||
65 | rounds:4, | ||
66 | algo:3, | ||
67 | keygen:1, | ||
68 | interm:1, | ||
69 | encdec:1, | ||
70 | ksize:2; | ||
71 | } __attribute__ ((__aligned__(PADLOCK_ALIGNMENT))); | ||
72 | |||
62 | /* Whenever making any changes to the following | 73 | /* Whenever making any changes to the following |
63 | * structure *make sure* you keep E, d_data | 74 | * structure *make sure* you keep E, d_data |
64 | * and cword aligned on 16 Bytes boundaries!!! */ | 75 | * and cword aligned on 16 Bytes boundaries!!! */ |
@@ -286,9 +297,9 @@ aes_hw_extkey_available(uint8_t key_len) | |||
286 | return 0; | 297 | return 0; |
287 | } | 298 | } |
288 | 299 | ||
289 | static inline struct aes_ctx *aes_ctx(struct crypto_tfm *tfm) | 300 | static inline struct aes_ctx *aes_ctx_common(void *ctx) |
290 | { | 301 | { |
291 | unsigned long addr = (unsigned long)crypto_tfm_ctx(tfm); | 302 | unsigned long addr = (unsigned long)ctx; |
292 | unsigned long align = PADLOCK_ALIGNMENT; | 303 | unsigned long align = PADLOCK_ALIGNMENT; |
293 | 304 | ||
294 | if (align <= crypto_tfm_ctx_alignment()) | 305 | if (align <= crypto_tfm_ctx_alignment()) |
@@ -296,16 +307,27 @@ static inline struct aes_ctx *aes_ctx(struct crypto_tfm *tfm) | |||
296 | return (struct aes_ctx *)ALIGN(addr, align); | 307 | return (struct aes_ctx *)ALIGN(addr, align); |
297 | } | 308 | } |
298 | 309 | ||
310 | static inline struct aes_ctx *aes_ctx(struct crypto_tfm *tfm) | ||
311 | { | ||
312 | return aes_ctx_common(crypto_tfm_ctx(tfm)); | ||
313 | } | ||
314 | |||
315 | static inline struct aes_ctx *blk_aes_ctx(struct crypto_blkcipher *tfm) | ||
316 | { | ||
317 | return aes_ctx_common(crypto_blkcipher_ctx(tfm)); | ||
318 | } | ||
319 | |||
299 | static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, | 320 | static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, |
300 | unsigned int key_len, u32 *flags) | 321 | unsigned int key_len) |
301 | { | 322 | { |
302 | struct aes_ctx *ctx = aes_ctx(tfm); | 323 | struct aes_ctx *ctx = aes_ctx(tfm); |
303 | const __le32 *key = (const __le32 *)in_key; | 324 | const __le32 *key = (const __le32 *)in_key; |
325 | u32 *flags = &tfm->crt_flags; | ||
304 | uint32_t i, t, u, v, w; | 326 | uint32_t i, t, u, v, w; |
305 | uint32_t P[AES_EXTENDED_KEY_SIZE]; | 327 | uint32_t P[AES_EXTENDED_KEY_SIZE]; |
306 | uint32_t rounds; | 328 | uint32_t rounds; |
307 | 329 | ||
308 | if (key_len != 16 && key_len != 24 && key_len != 32) { | 330 | if (key_len % 8) { |
309 | *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; | 331 | *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; |
310 | return -EINVAL; | 332 | return -EINVAL; |
311 | } | 333 | } |
@@ -430,80 +452,212 @@ static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) | |||
430 | padlock_xcrypt_ecb(in, out, ctx->D, &ctx->cword.decrypt, 1); | 452 | padlock_xcrypt_ecb(in, out, ctx->D, &ctx->cword.decrypt, 1); |
431 | } | 453 | } |
432 | 454 | ||
433 | static unsigned int aes_encrypt_ecb(const struct cipher_desc *desc, u8 *out, | 455 | static struct crypto_alg aes_alg = { |
434 | const u8 *in, unsigned int nbytes) | 456 | .cra_name = "aes", |
457 | .cra_driver_name = "aes-padlock", | ||
458 | .cra_priority = PADLOCK_CRA_PRIORITY, | ||
459 | .cra_flags = CRYPTO_ALG_TYPE_CIPHER, | ||
460 | .cra_blocksize = AES_BLOCK_SIZE, | ||
461 | .cra_ctxsize = sizeof(struct aes_ctx), | ||
462 | .cra_alignmask = PADLOCK_ALIGNMENT - 1, | ||
463 | .cra_module = THIS_MODULE, | ||
464 | .cra_list = LIST_HEAD_INIT(aes_alg.cra_list), | ||
465 | .cra_u = { | ||
466 | .cipher = { | ||
467 | .cia_min_keysize = AES_MIN_KEY_SIZE, | ||
468 | .cia_max_keysize = AES_MAX_KEY_SIZE, | ||
469 | .cia_setkey = aes_set_key, | ||
470 | .cia_encrypt = aes_encrypt, | ||
471 | .cia_decrypt = aes_decrypt, | ||
472 | } | ||
473 | } | ||
474 | }; | ||
475 | |||
476 | static int ecb_aes_encrypt(struct blkcipher_desc *desc, | ||
477 | struct scatterlist *dst, struct scatterlist *src, | ||
478 | unsigned int nbytes) | ||
435 | { | 479 | { |
436 | struct aes_ctx *ctx = aes_ctx(desc->tfm); | 480 | struct aes_ctx *ctx = blk_aes_ctx(desc->tfm); |
437 | padlock_xcrypt_ecb(in, out, ctx->E, &ctx->cword.encrypt, | 481 | struct blkcipher_walk walk; |
438 | nbytes / AES_BLOCK_SIZE); | 482 | int err; |
439 | return nbytes & ~(AES_BLOCK_SIZE - 1); | 483 | |
484 | blkcipher_walk_init(&walk, dst, src, nbytes); | ||
485 | err = blkcipher_walk_virt(desc, &walk); | ||
486 | |||
487 | while ((nbytes = walk.nbytes)) { | ||
488 | padlock_xcrypt_ecb(walk.src.virt.addr, walk.dst.virt.addr, | ||
489 | ctx->E, &ctx->cword.encrypt, | ||
490 | nbytes / AES_BLOCK_SIZE); | ||
491 | nbytes &= AES_BLOCK_SIZE - 1; | ||
492 | err = blkcipher_walk_done(desc, &walk, nbytes); | ||
493 | } | ||
494 | |||
495 | return err; | ||
440 | } | 496 | } |
441 | 497 | ||
442 | static unsigned int aes_decrypt_ecb(const struct cipher_desc *desc, u8 *out, | 498 | static int ecb_aes_decrypt(struct blkcipher_desc *desc, |
443 | const u8 *in, unsigned int nbytes) | 499 | struct scatterlist *dst, struct scatterlist *src, |
500 | unsigned int nbytes) | ||
444 | { | 501 | { |
445 | struct aes_ctx *ctx = aes_ctx(desc->tfm); | 502 | struct aes_ctx *ctx = blk_aes_ctx(desc->tfm); |
446 | padlock_xcrypt_ecb(in, out, ctx->D, &ctx->cword.decrypt, | 503 | struct blkcipher_walk walk; |
447 | nbytes / AES_BLOCK_SIZE); | 504 | int err; |
448 | return nbytes & ~(AES_BLOCK_SIZE - 1); | 505 | |
506 | blkcipher_walk_init(&walk, dst, src, nbytes); | ||
507 | err = blkcipher_walk_virt(desc, &walk); | ||
508 | |||
509 | while ((nbytes = walk.nbytes)) { | ||
510 | padlock_xcrypt_ecb(walk.src.virt.addr, walk.dst.virt.addr, | ||
511 | ctx->D, &ctx->cword.decrypt, | ||
512 | nbytes / AES_BLOCK_SIZE); | ||
513 | nbytes &= AES_BLOCK_SIZE - 1; | ||
514 | err = blkcipher_walk_done(desc, &walk, nbytes); | ||
515 | } | ||
516 | |||
517 | return err; | ||
449 | } | 518 | } |
450 | 519 | ||
451 | static unsigned int aes_encrypt_cbc(const struct cipher_desc *desc, u8 *out, | 520 | static struct crypto_alg ecb_aes_alg = { |
452 | const u8 *in, unsigned int nbytes) | 521 | .cra_name = "ecb(aes)", |
453 | { | 522 | .cra_driver_name = "ecb-aes-padlock", |
454 | struct aes_ctx *ctx = aes_ctx(desc->tfm); | 523 | .cra_priority = PADLOCK_COMPOSITE_PRIORITY, |
455 | u8 *iv; | 524 | .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, |
525 | .cra_blocksize = AES_BLOCK_SIZE, | ||
526 | .cra_ctxsize = sizeof(struct aes_ctx), | ||
527 | .cra_alignmask = PADLOCK_ALIGNMENT - 1, | ||
528 | .cra_type = &crypto_blkcipher_type, | ||
529 | .cra_module = THIS_MODULE, | ||
530 | .cra_list = LIST_HEAD_INIT(ecb_aes_alg.cra_list), | ||
531 | .cra_u = { | ||
532 | .blkcipher = { | ||
533 | .min_keysize = AES_MIN_KEY_SIZE, | ||
534 | .max_keysize = AES_MAX_KEY_SIZE, | ||
535 | .setkey = aes_set_key, | ||
536 | .encrypt = ecb_aes_encrypt, | ||
537 | .decrypt = ecb_aes_decrypt, | ||
538 | } | ||
539 | } | ||
540 | }; | ||
456 | 541 | ||
457 | iv = padlock_xcrypt_cbc(in, out, ctx->E, desc->info, | 542 | static int cbc_aes_encrypt(struct blkcipher_desc *desc, |
458 | &ctx->cword.encrypt, nbytes / AES_BLOCK_SIZE); | 543 | struct scatterlist *dst, struct scatterlist *src, |
459 | memcpy(desc->info, iv, AES_BLOCK_SIZE); | 544 | unsigned int nbytes) |
545 | { | ||
546 | struct aes_ctx *ctx = blk_aes_ctx(desc->tfm); | ||
547 | struct blkcipher_walk walk; | ||
548 | int err; | ||
549 | |||
550 | blkcipher_walk_init(&walk, dst, src, nbytes); | ||
551 | err = blkcipher_walk_virt(desc, &walk); | ||
552 | |||
553 | while ((nbytes = walk.nbytes)) { | ||
554 | u8 *iv = padlock_xcrypt_cbc(walk.src.virt.addr, | ||
555 | walk.dst.virt.addr, ctx->E, | ||
556 | walk.iv, &ctx->cword.encrypt, | ||
557 | nbytes / AES_BLOCK_SIZE); | ||
558 | memcpy(walk.iv, iv, AES_BLOCK_SIZE); | ||
559 | nbytes &= AES_BLOCK_SIZE - 1; | ||
560 | err = blkcipher_walk_done(desc, &walk, nbytes); | ||
561 | } | ||
460 | 562 | ||
461 | return nbytes & ~(AES_BLOCK_SIZE - 1); | 563 | return err; |
462 | } | 564 | } |
463 | 565 | ||
464 | static unsigned int aes_decrypt_cbc(const struct cipher_desc *desc, u8 *out, | 566 | static int cbc_aes_decrypt(struct blkcipher_desc *desc, |
465 | const u8 *in, unsigned int nbytes) | 567 | struct scatterlist *dst, struct scatterlist *src, |
568 | unsigned int nbytes) | ||
466 | { | 569 | { |
467 | struct aes_ctx *ctx = aes_ctx(desc->tfm); | 570 | struct aes_ctx *ctx = blk_aes_ctx(desc->tfm); |
468 | padlock_xcrypt_cbc(in, out, ctx->D, desc->info, &ctx->cword.decrypt, | 571 | struct blkcipher_walk walk; |
469 | nbytes / AES_BLOCK_SIZE); | 572 | int err; |
470 | return nbytes & ~(AES_BLOCK_SIZE - 1); | 573 | |
574 | blkcipher_walk_init(&walk, dst, src, nbytes); | ||
575 | err = blkcipher_walk_virt(desc, &walk); | ||
576 | |||
577 | while ((nbytes = walk.nbytes)) { | ||
578 | padlock_xcrypt_cbc(walk.src.virt.addr, walk.dst.virt.addr, | ||
579 | ctx->D, walk.iv, &ctx->cword.decrypt, | ||
580 | nbytes / AES_BLOCK_SIZE); | ||
581 | nbytes &= AES_BLOCK_SIZE - 1; | ||
582 | err = blkcipher_walk_done(desc, &walk, nbytes); | ||
583 | } | ||
584 | |||
585 | return err; | ||
471 | } | 586 | } |
472 | 587 | ||
473 | static struct crypto_alg aes_alg = { | 588 | static struct crypto_alg cbc_aes_alg = { |
474 | .cra_name = "aes", | 589 | .cra_name = "cbc(aes)", |
475 | .cra_driver_name = "aes-padlock", | 590 | .cra_driver_name = "cbc-aes-padlock", |
476 | .cra_priority = 300, | 591 | .cra_priority = PADLOCK_COMPOSITE_PRIORITY, |
477 | .cra_flags = CRYPTO_ALG_TYPE_CIPHER, | 592 | .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, |
478 | .cra_blocksize = AES_BLOCK_SIZE, | 593 | .cra_blocksize = AES_BLOCK_SIZE, |
479 | .cra_ctxsize = sizeof(struct aes_ctx), | 594 | .cra_ctxsize = sizeof(struct aes_ctx), |
480 | .cra_alignmask = PADLOCK_ALIGNMENT - 1, | 595 | .cra_alignmask = PADLOCK_ALIGNMENT - 1, |
596 | .cra_type = &crypto_blkcipher_type, | ||
481 | .cra_module = THIS_MODULE, | 597 | .cra_module = THIS_MODULE, |
482 | .cra_list = LIST_HEAD_INIT(aes_alg.cra_list), | 598 | .cra_list = LIST_HEAD_INIT(cbc_aes_alg.cra_list), |
483 | .cra_u = { | 599 | .cra_u = { |
484 | .cipher = { | 600 | .blkcipher = { |
485 | .cia_min_keysize = AES_MIN_KEY_SIZE, | 601 | .min_keysize = AES_MIN_KEY_SIZE, |
486 | .cia_max_keysize = AES_MAX_KEY_SIZE, | 602 | .max_keysize = AES_MAX_KEY_SIZE, |
487 | .cia_setkey = aes_set_key, | 603 | .ivsize = AES_BLOCK_SIZE, |
488 | .cia_encrypt = aes_encrypt, | 604 | .setkey = aes_set_key, |
489 | .cia_decrypt = aes_decrypt, | 605 | .encrypt = cbc_aes_encrypt, |
490 | .cia_encrypt_ecb = aes_encrypt_ecb, | 606 | .decrypt = cbc_aes_decrypt, |
491 | .cia_decrypt_ecb = aes_decrypt_ecb, | ||
492 | .cia_encrypt_cbc = aes_encrypt_cbc, | ||
493 | .cia_decrypt_cbc = aes_decrypt_cbc, | ||
494 | } | 607 | } |
495 | } | 608 | } |
496 | }; | 609 | }; |
497 | 610 | ||
498 | int __init padlock_init_aes(void) | 611 | static int __init padlock_init(void) |
499 | { | 612 | { |
500 | printk(KERN_NOTICE PFX "Using VIA PadLock ACE for AES algorithm.\n"); | 613 | int ret; |
614 | |||
615 | if (!cpu_has_xcrypt) { | ||
616 | printk(KERN_ERR PFX "VIA PadLock not detected.\n"); | ||
617 | return -ENODEV; | ||
618 | } | ||
619 | |||
620 | if (!cpu_has_xcrypt_enabled) { | ||
621 | printk(KERN_ERR PFX "VIA PadLock detected, but not enabled. Hmm, strange...\n"); | ||
622 | return -ENODEV; | ||
623 | } | ||
501 | 624 | ||
502 | gen_tabs(); | 625 | gen_tabs(); |
503 | return crypto_register_alg(&aes_alg); | 626 | if ((ret = crypto_register_alg(&aes_alg))) |
627 | goto aes_err; | ||
628 | |||
629 | if ((ret = crypto_register_alg(&ecb_aes_alg))) | ||
630 | goto ecb_aes_err; | ||
631 | |||
632 | if ((ret = crypto_register_alg(&cbc_aes_alg))) | ||
633 | goto cbc_aes_err; | ||
634 | |||
635 | printk(KERN_NOTICE PFX "Using VIA PadLock ACE for AES algorithm.\n"); | ||
636 | |||
637 | out: | ||
638 | return ret; | ||
639 | |||
640 | cbc_aes_err: | ||
641 | crypto_unregister_alg(&ecb_aes_alg); | ||
642 | ecb_aes_err: | ||
643 | crypto_unregister_alg(&aes_alg); | ||
644 | aes_err: | ||
645 | printk(KERN_ERR PFX "VIA PadLock AES initialization failed.\n"); | ||
646 | goto out; | ||
504 | } | 647 | } |
505 | 648 | ||
506 | void __exit padlock_fini_aes(void) | 649 | static void __exit padlock_fini(void) |
507 | { | 650 | { |
651 | crypto_unregister_alg(&cbc_aes_alg); | ||
652 | crypto_unregister_alg(&ecb_aes_alg); | ||
508 | crypto_unregister_alg(&aes_alg); | 653 | crypto_unregister_alg(&aes_alg); |
509 | } | 654 | } |
655 | |||
656 | module_init(padlock_init); | ||
657 | module_exit(padlock_fini); | ||
658 | |||
659 | MODULE_DESCRIPTION("VIA PadLock AES algorithm support"); | ||
660 | MODULE_LICENSE("GPL"); | ||
661 | MODULE_AUTHOR("Michal Ludvig"); | ||
662 | |||
663 | MODULE_ALIAS("aes-padlock"); | ||