diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2016-07-26 16:40:17 -0400 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-07-26 16:40:17 -0400 |
| commit | bbce2ad2d711c12d93145a7bbdf086e73f414bcd (patch) | |
| tree | 35432a39f68f4c5df44ed38037cbf05adadb923e /include | |
| parent | 0f776dc377f6c87f4e4d4a5f63602f33fb93b31e (diff) | |
| parent | 0f95e2ffc58f5d32a90eb1051d17aeebc21cf91d (diff) | |
Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto updates from Herbert Xu:
"Here is the crypto update for 4.8:
API:
- first part of skcipher low-level conversions
- add KPP (Key-agreement Protocol Primitives) interface.
Algorithms:
- fix IPsec/cryptd reordering issues that affects aesni
- RSA no longer does explicit leading zero removal
- add SHA3
- add DH
- add ECDH
- improve DRBG performance by not doing CTR by hand
Drivers:
- add x86 AVX2 multibuffer SHA256/512
- add POWER8 optimised crc32c
- add xts support to vmx
- add DH support to qat
- add RSA support to caam
- add Layerscape support to caam
- add SEC1 AEAD support to talitos
- improve performance by chaining requests in marvell/cesa
- add support for Araneus Alea I USB RNG
- add support for Broadcom BCM5301 RNG
- add support for Amlogic Meson RNG
- add support Broadcom NSP SoC RNG"
* 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (180 commits)
crypto: vmx - Fix aes_p8_xts_decrypt build failure
crypto: vmx - Ignore generated files
crypto: vmx - Adding support for XTS
crypto: vmx - Adding asm subroutines for XTS
crypto: skcipher - add comment for skcipher_alg->base
crypto: testmgr - Print akcipher algorithm name
crypto: marvell - Fix wrong flag used for GFP in mv_cesa_dma_add_iv_op
crypto: nx - off by one bug in nx_of_update_msc()
crypto: rsa-pkcs1pad - fix rsa-pkcs1pad request struct
crypto: scatterwalk - Inline start/map/done
crypto: scatterwalk - Remove unnecessary BUG in scatterwalk_start
crypto: scatterwalk - Remove unnecessary advance in scatterwalk_pagedone
crypto: scatterwalk - Fix test in scatterwalk_done
crypto: api - Optimise away crypto_yield when hard preemption is on
crypto: scatterwalk - add no-copy support to copychunks
crypto: scatterwalk - Remove scatterwalk_bytes_sglen
crypto: omap - Stop using crypto scatterwalk_bytes_sglen
crypto: skcipher - Remove top-level givcipher interface
crypto: user - Remove crypto_lookup_skcipher call
crypto: cts - Convert to skcipher
...
Diffstat (limited to 'include')
| -rw-r--r-- | include/asm-generic/io.h | 71 | ||||
| -rw-r--r-- | include/asm-generic/iomap.h | 8 | ||||
| -rw-r--r-- | include/crypto/aead.h | 12 | ||||
| -rw-r--r-- | include/crypto/algapi.h | 4 | ||||
| -rw-r--r-- | include/crypto/cryptd.h | 5 | ||||
| -rw-r--r-- | include/crypto/dh.h | 29 | ||||
| -rw-r--r-- | include/crypto/drbg.h | 12 | ||||
| -rw-r--r-- | include/crypto/ecdh.h | 30 | ||||
| -rw-r--r-- | include/crypto/internal/aead.h | 21 | ||||
| -rw-r--r-- | include/crypto/internal/geniv.h | 2 | ||||
| -rw-r--r-- | include/crypto/internal/hash.h | 12 | ||||
| -rw-r--r-- | include/crypto/internal/kpp.h | 64 | ||||
| -rw-r--r-- | include/crypto/internal/rsa.h | 42 | ||||
| -rw-r--r-- | include/crypto/internal/skcipher.h | 122 | ||||
| -rw-r--r-- | include/crypto/kpp.h | 330 | ||||
| -rw-r--r-- | include/crypto/mcryptd.h | 8 | ||||
| -rw-r--r-- | include/crypto/null.h | 12 | ||||
| -rw-r--r-- | include/crypto/scatterwalk.h | 48 | ||||
| -rw-r--r-- | include/crypto/sha3.h | 29 | ||||
| -rw-r--r-- | include/crypto/skcipher.h | 207 | ||||
| -rw-r--r-- | include/linux/crypto.h | 31 | ||||
| -rw-r--r-- | include/linux/mpi.h | 3 | ||||
| -rw-r--r-- | include/uapi/linux/cryptouser.h | 5 |
23 files changed, 931 insertions, 176 deletions
diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h index 002b81f6f2bc..7ef015eb3403 100644 --- a/include/asm-generic/io.h +++ b/include/asm-generic/io.h | |||
| @@ -585,6 +585,16 @@ static inline u32 ioread32(const volatile void __iomem *addr) | |||
| 585 | } | 585 | } |
| 586 | #endif | 586 | #endif |
| 587 | 587 | ||
| 588 | #ifdef CONFIG_64BIT | ||
| 589 | #ifndef ioread64 | ||
| 590 | #define ioread64 ioread64 | ||
| 591 | static inline u64 ioread64(const volatile void __iomem *addr) | ||
| 592 | { | ||
| 593 | return readq(addr); | ||
| 594 | } | ||
| 595 | #endif | ||
| 596 | #endif /* CONFIG_64BIT */ | ||
| 597 | |||
| 588 | #ifndef iowrite8 | 598 | #ifndef iowrite8 |
| 589 | #define iowrite8 iowrite8 | 599 | #define iowrite8 iowrite8 |
| 590 | static inline void iowrite8(u8 value, volatile void __iomem *addr) | 600 | static inline void iowrite8(u8 value, volatile void __iomem *addr) |
| @@ -609,11 +619,21 @@ static inline void iowrite32(u32 value, volatile void __iomem *addr) | |||
| 609 | } | 619 | } |
| 610 | #endif | 620 | #endif |
| 611 | 621 | ||
| 622 | #ifdef CONFIG_64BIT | ||
| 623 | #ifndef iowrite64 | ||
| 624 | #define iowrite64 iowrite64 | ||
| 625 | static inline void iowrite64(u64 value, volatile void __iomem *addr) | ||
| 626 | { | ||
| 627 | writeq(value, addr); | ||
| 628 | } | ||
| 629 | #endif | ||
| 630 | #endif /* CONFIG_64BIT */ | ||
| 631 | |||
| 612 | #ifndef ioread16be | 632 | #ifndef ioread16be |
| 613 | #define ioread16be ioread16be | 633 | #define ioread16be ioread16be |
| 614 | static inline u16 ioread16be(const volatile void __iomem *addr) | 634 | static inline u16 ioread16be(const volatile void __iomem *addr) |
| 615 | { | 635 | { |
| 616 | return __be16_to_cpu(__raw_readw(addr)); | 636 | return swab16(readw(addr)); |
| 617 | } | 637 | } |
| 618 | #endif | 638 | #endif |
| 619 | 639 | ||
| @@ -621,15 +641,25 @@ static inline u16 ioread16be(const volatile void __iomem *addr) | |||
| 621 | #define ioread32be ioread32be | 641 | #define ioread32be ioread32be |
| 622 | static inline u32 ioread32be(const volatile void __iomem *addr) | 642 | static inline u32 ioread32be(const volatile void __iomem *addr) |
| 623 | { | 643 | { |
| 624 | return __be32_to_cpu(__raw_readl(addr)); | 644 | return swab32(readl(addr)); |
| 645 | } | ||
| 646 | #endif | ||
| 647 | |||
| 648 | #ifdef CONFIG_64BIT | ||
| 649 | #ifndef ioread64be | ||
| 650 | #define ioread64be ioread64be | ||
| 651 | static inline u64 ioread64be(const volatile void __iomem *addr) | ||
| 652 | { | ||
| 653 | return swab64(readq(addr)); | ||
| 625 | } | 654 | } |
| 626 | #endif | 655 | #endif |
| 656 | #endif /* CONFIG_64BIT */ | ||
| 627 | 657 | ||
| 628 | #ifndef iowrite16be | 658 | #ifndef iowrite16be |
| 629 | #define iowrite16be iowrite16be | 659 | #define iowrite16be iowrite16be |
| 630 | static inline void iowrite16be(u16 value, void volatile __iomem *addr) | 660 | static inline void iowrite16be(u16 value, void volatile __iomem *addr) |
| 631 | { | 661 | { |
| 632 | __raw_writew(__cpu_to_be16(value), addr); | 662 | writew(swab16(value), addr); |
| 633 | } | 663 | } |
| 634 | #endif | 664 | #endif |
| 635 | 665 | ||
| @@ -637,10 +667,20 @@ static inline void iowrite16be(u16 value, void volatile __iomem *addr) | |||
| 637 | #define iowrite32be iowrite32be | 667 | #define iowrite32be iowrite32be |
| 638 | static inline void iowrite32be(u32 value, volatile void __iomem *addr) | 668 | static inline void iowrite32be(u32 value, volatile void __iomem *addr) |
| 639 | { | 669 | { |
| 640 | __raw_writel(__cpu_to_be32(value), addr); | 670 | writel(swab32(value), addr); |
| 641 | } | 671 | } |
| 642 | #endif | 672 | #endif |
| 643 | 673 | ||
| 674 | #ifdef CONFIG_64BIT | ||
| 675 | #ifndef iowrite64be | ||
| 676 | #define iowrite64be iowrite64be | ||
| 677 | static inline void iowrite64be(u64 value, volatile void __iomem *addr) | ||
| 678 | { | ||
| 679 | writeq(swab64(value), addr); | ||
| 680 | } | ||
| 681 | #endif | ||
| 682 | #endif /* CONFIG_64BIT */ | ||
| 683 | |||
| 644 | #ifndef ioread8_rep | 684 | #ifndef ioread8_rep |
| 645 | #define ioread8_rep ioread8_rep | 685 | #define ioread8_rep ioread8_rep |
| 646 | static inline void ioread8_rep(const volatile void __iomem *addr, void *buffer, | 686 | static inline void ioread8_rep(const volatile void __iomem *addr, void *buffer, |
| @@ -668,6 +708,17 @@ static inline void ioread32_rep(const volatile void __iomem *addr, | |||
| 668 | } | 708 | } |
| 669 | #endif | 709 | #endif |
| 670 | 710 | ||
| 711 | #ifdef CONFIG_64BIT | ||
| 712 | #ifndef ioread64_rep | ||
| 713 | #define ioread64_rep ioread64_rep | ||
| 714 | static inline void ioread64_rep(const volatile void __iomem *addr, | ||
| 715 | void *buffer, unsigned int count) | ||
| 716 | { | ||
| 717 | readsq(addr, buffer, count); | ||
| 718 | } | ||
| 719 | #endif | ||
| 720 | #endif /* CONFIG_64BIT */ | ||
| 721 | |||
| 671 | #ifndef iowrite8_rep | 722 | #ifndef iowrite8_rep |
| 672 | #define iowrite8_rep iowrite8_rep | 723 | #define iowrite8_rep iowrite8_rep |
| 673 | static inline void iowrite8_rep(volatile void __iomem *addr, | 724 | static inline void iowrite8_rep(volatile void __iomem *addr, |
| @@ -697,6 +748,18 @@ static inline void iowrite32_rep(volatile void __iomem *addr, | |||
| 697 | writesl(addr, buffer, count); | 748 | writesl(addr, buffer, count); |
| 698 | } | 749 | } |
| 699 | #endif | 750 | #endif |
| 751 | |||
| 752 | #ifdef CONFIG_64BIT | ||
| 753 | #ifndef iowrite64_rep | ||
| 754 | #define iowrite64_rep iowrite64_rep | ||
| 755 | static inline void iowrite64_rep(volatile void __iomem *addr, | ||
| 756 | const void *buffer, | ||
| 757 | unsigned int count) | ||
| 758 | { | ||
| 759 | writesq(addr, buffer, count); | ||
| 760 | } | ||
| 761 | #endif | ||
| 762 | #endif /* CONFIG_64BIT */ | ||
| 700 | #endif /* CONFIG_GENERIC_IOMAP */ | 763 | #endif /* CONFIG_GENERIC_IOMAP */ |
| 701 | 764 | ||
| 702 | #ifdef __KERNEL__ | 765 | #ifdef __KERNEL__ |
diff --git a/include/asm-generic/iomap.h b/include/asm-generic/iomap.h index d8f8622fa044..650fede33c25 100644 --- a/include/asm-generic/iomap.h +++ b/include/asm-generic/iomap.h | |||
| @@ -30,12 +30,20 @@ extern unsigned int ioread16(void __iomem *); | |||
| 30 | extern unsigned int ioread16be(void __iomem *); | 30 | extern unsigned int ioread16be(void __iomem *); |
| 31 | extern unsigned int ioread32(void __iomem *); | 31 | extern unsigned int ioread32(void __iomem *); |
| 32 | extern unsigned int ioread32be(void __iomem *); | 32 | extern unsigned int ioread32be(void __iomem *); |
| 33 | #ifdef CONFIG_64BIT | ||
| 34 | extern u64 ioread64(void __iomem *); | ||
| 35 | extern u64 ioread64be(void __iomem *); | ||
| 36 | #endif | ||
| 33 | 37 | ||
| 34 | extern void iowrite8(u8, void __iomem *); | 38 | extern void iowrite8(u8, void __iomem *); |
| 35 | extern void iowrite16(u16, void __iomem *); | 39 | extern void iowrite16(u16, void __iomem *); |
| 36 | extern void iowrite16be(u16, void __iomem *); | 40 | extern void iowrite16be(u16, void __iomem *); |
| 37 | extern void iowrite32(u32, void __iomem *); | 41 | extern void iowrite32(u32, void __iomem *); |
| 38 | extern void iowrite32be(u32, void __iomem *); | 42 | extern void iowrite32be(u32, void __iomem *); |
| 43 | #ifdef CONFIG_64BIT | ||
| 44 | extern void iowrite64(u64, void __iomem *); | ||
| 45 | extern void iowrite64be(u64, void __iomem *); | ||
| 46 | #endif | ||
| 39 | 47 | ||
| 40 | /* | 48 | /* |
| 41 | * "string" versions of the above. Note that they | 49 | * "string" versions of the above. Note that they |
diff --git a/include/crypto/aead.h b/include/crypto/aead.h index 75174f80a106..12f84327ca36 100644 --- a/include/crypto/aead.h +++ b/include/crypto/aead.h | |||
| @@ -112,11 +112,12 @@ struct aead_request { | |||
| 112 | * supplied during the decryption operation. This function is also | 112 | * supplied during the decryption operation. This function is also |
| 113 | * responsible for checking the authentication tag size for | 113 | * responsible for checking the authentication tag size for |
| 114 | * validity. | 114 | * validity. |
| 115 | * @setkey: see struct ablkcipher_alg | 115 | * @setkey: see struct skcipher_alg |
| 116 | * @encrypt: see struct ablkcipher_alg | 116 | * @encrypt: see struct skcipher_alg |
| 117 | * @decrypt: see struct ablkcipher_alg | 117 | * @decrypt: see struct skcipher_alg |
| 118 | * @geniv: see struct ablkcipher_alg | 118 | * @geniv: see struct skcipher_alg |
| 119 | * @ivsize: see struct ablkcipher_alg | 119 | * @ivsize: see struct skcipher_alg |
| 120 | * @chunksize: see struct skcipher_alg | ||
| 120 | * @init: Initialize the cryptographic transformation object. This function | 121 | * @init: Initialize the cryptographic transformation object. This function |
| 121 | * is used to initialize the cryptographic transformation object. | 122 | * is used to initialize the cryptographic transformation object. |
| 122 | * This function is called only once at the instantiation time, right | 123 | * This function is called only once at the instantiation time, right |
| @@ -145,6 +146,7 @@ struct aead_alg { | |||
| 145 | 146 | ||
| 146 | unsigned int ivsize; | 147 | unsigned int ivsize; |
| 147 | unsigned int maxauthsize; | 148 | unsigned int maxauthsize; |
| 149 | unsigned int chunksize; | ||
| 148 | 150 | ||
| 149 | struct crypto_alg base; | 151 | struct crypto_alg base; |
| 150 | }; | 152 | }; |
diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h index eeafd21afb44..8637cdfe382a 100644 --- a/include/crypto/algapi.h +++ b/include/crypto/algapi.h | |||
| @@ -244,6 +244,8 @@ static inline struct crypto_alg *crypto_attr_alg(struct rtattr *rta, | |||
| 244 | } | 244 | } |
| 245 | 245 | ||
| 246 | int crypto_attr_u32(struct rtattr *rta, u32 *num); | 246 | int crypto_attr_u32(struct rtattr *rta, u32 *num); |
| 247 | int crypto_inst_setname(struct crypto_instance *inst, const char *name, | ||
| 248 | struct crypto_alg *alg); | ||
| 247 | void *crypto_alloc_instance2(const char *name, struct crypto_alg *alg, | 249 | void *crypto_alloc_instance2(const char *name, struct crypto_alg *alg, |
| 248 | unsigned int head); | 250 | unsigned int head); |
| 249 | struct crypto_instance *crypto_alloc_instance(const char *name, | 251 | struct crypto_instance *crypto_alloc_instance(const char *name, |
| @@ -440,8 +442,10 @@ static inline int crypto_memneq(const void *a, const void *b, size_t size) | |||
| 440 | 442 | ||
| 441 | static inline void crypto_yield(u32 flags) | 443 | static inline void crypto_yield(u32 flags) |
| 442 | { | 444 | { |
| 445 | #if !defined(CONFIG_PREEMPT) || defined(CONFIG_PREEMPT_VOLUNTARY) | ||
| 443 | if (flags & CRYPTO_TFM_REQ_MAY_SLEEP) | 446 | if (flags & CRYPTO_TFM_REQ_MAY_SLEEP) |
| 444 | cond_resched(); | 447 | cond_resched(); |
| 448 | #endif | ||
| 445 | } | 449 | } |
| 446 | 450 | ||
| 447 | #endif /* _CRYPTO_ALGAPI_H */ | 451 | #endif /* _CRYPTO_ALGAPI_H */ |
diff --git a/include/crypto/cryptd.h b/include/crypto/cryptd.h index 1547f540c920..bc792d5a9e88 100644 --- a/include/crypto/cryptd.h +++ b/include/crypto/cryptd.h | |||
| @@ -31,6 +31,7 @@ static inline struct cryptd_ablkcipher *__cryptd_ablkcipher_cast( | |||
| 31 | struct cryptd_ablkcipher *cryptd_alloc_ablkcipher(const char *alg_name, | 31 | struct cryptd_ablkcipher *cryptd_alloc_ablkcipher(const char *alg_name, |
| 32 | u32 type, u32 mask); | 32 | u32 type, u32 mask); |
| 33 | struct crypto_blkcipher *cryptd_ablkcipher_child(struct cryptd_ablkcipher *tfm); | 33 | struct crypto_blkcipher *cryptd_ablkcipher_child(struct cryptd_ablkcipher *tfm); |
| 34 | bool cryptd_ablkcipher_queued(struct cryptd_ablkcipher *tfm); | ||
| 34 | void cryptd_free_ablkcipher(struct cryptd_ablkcipher *tfm); | 35 | void cryptd_free_ablkcipher(struct cryptd_ablkcipher *tfm); |
| 35 | 36 | ||
| 36 | struct cryptd_ahash { | 37 | struct cryptd_ahash { |
| @@ -48,6 +49,8 @@ struct cryptd_ahash *cryptd_alloc_ahash(const char *alg_name, | |||
| 48 | u32 type, u32 mask); | 49 | u32 type, u32 mask); |
| 49 | struct crypto_shash *cryptd_ahash_child(struct cryptd_ahash *tfm); | 50 | struct crypto_shash *cryptd_ahash_child(struct cryptd_ahash *tfm); |
| 50 | struct shash_desc *cryptd_shash_desc(struct ahash_request *req); | 51 | struct shash_desc *cryptd_shash_desc(struct ahash_request *req); |
| 52 | /* Must be called without moving CPUs. */ | ||
| 53 | bool cryptd_ahash_queued(struct cryptd_ahash *tfm); | ||
| 51 | void cryptd_free_ahash(struct cryptd_ahash *tfm); | 54 | void cryptd_free_ahash(struct cryptd_ahash *tfm); |
| 52 | 55 | ||
| 53 | struct cryptd_aead { | 56 | struct cryptd_aead { |
| @@ -64,6 +67,8 @@ struct cryptd_aead *cryptd_alloc_aead(const char *alg_name, | |||
| 64 | u32 type, u32 mask); | 67 | u32 type, u32 mask); |
| 65 | 68 | ||
| 66 | struct crypto_aead *cryptd_aead_child(struct cryptd_aead *tfm); | 69 | struct crypto_aead *cryptd_aead_child(struct cryptd_aead *tfm); |
| 70 | /* Must be called without moving CPUs. */ | ||
| 71 | bool cryptd_aead_queued(struct cryptd_aead *tfm); | ||
| 67 | 72 | ||
| 68 | void cryptd_free_aead(struct cryptd_aead *tfm); | 73 | void cryptd_free_aead(struct cryptd_aead *tfm); |
| 69 | 74 | ||
diff --git a/include/crypto/dh.h b/include/crypto/dh.h new file mode 100644 index 000000000000..5102a8f282e6 --- /dev/null +++ b/include/crypto/dh.h | |||
| @@ -0,0 +1,29 @@ | |||
| 1 | /* | ||
| 2 | * Diffie-Hellman secret to be used with kpp API along with helper functions | ||
| 3 | * | ||
| 4 | * Copyright (c) 2016, Intel Corporation | ||
| 5 | * Authors: Salvatore Benedetto <salvatore.benedetto@intel.com> | ||
| 6 | * | ||
| 7 | * This program is free software; you can redistribute it and/or modify it | ||
| 8 | * under the terms of the GNU General Public License as published by the Free | ||
| 9 | * Software Foundation; either version 2 of the License, or (at your option) | ||
| 10 | * any later version. | ||
| 11 | * | ||
| 12 | */ | ||
| 13 | #ifndef _CRYPTO_DH_ | ||
| 14 | #define _CRYPTO_DH_ | ||
| 15 | |||
| 16 | struct dh { | ||
| 17 | void *key; | ||
| 18 | void *p; | ||
| 19 | void *g; | ||
| 20 | unsigned int key_size; | ||
| 21 | unsigned int p_size; | ||
| 22 | unsigned int g_size; | ||
| 23 | }; | ||
| 24 | |||
| 25 | int crypto_dh_key_len(const struct dh *params); | ||
| 26 | int crypto_dh_encode_key(char *buf, unsigned int len, const struct dh *params); | ||
| 27 | int crypto_dh_decode_key(const char *buf, unsigned int len, struct dh *params); | ||
| 28 | |||
| 29 | #endif | ||
diff --git a/include/crypto/drbg.h b/include/crypto/drbg.h index d961b2b16f55..61580b19f9f6 100644 --- a/include/crypto/drbg.h +++ b/include/crypto/drbg.h | |||
| @@ -43,6 +43,7 @@ | |||
| 43 | #include <linux/random.h> | 43 | #include <linux/random.h> |
| 44 | #include <linux/scatterlist.h> | 44 | #include <linux/scatterlist.h> |
| 45 | #include <crypto/hash.h> | 45 | #include <crypto/hash.h> |
| 46 | #include <crypto/skcipher.h> | ||
| 46 | #include <linux/module.h> | 47 | #include <linux/module.h> |
| 47 | #include <linux/crypto.h> | 48 | #include <linux/crypto.h> |
| 48 | #include <linux/slab.h> | 49 | #include <linux/slab.h> |
| @@ -107,14 +108,25 @@ struct drbg_test_data { | |||
| 107 | struct drbg_state { | 108 | struct drbg_state { |
| 108 | struct mutex drbg_mutex; /* lock around DRBG */ | 109 | struct mutex drbg_mutex; /* lock around DRBG */ |
| 109 | unsigned char *V; /* internal state 10.1.1.1 1a) */ | 110 | unsigned char *V; /* internal state 10.1.1.1 1a) */ |
| 111 | unsigned char *Vbuf; | ||
| 110 | /* hash: static value 10.1.1.1 1b) hmac / ctr: key */ | 112 | /* hash: static value 10.1.1.1 1b) hmac / ctr: key */ |
| 111 | unsigned char *C; | 113 | unsigned char *C; |
| 114 | unsigned char *Cbuf; | ||
| 112 | /* Number of RNG requests since last reseed -- 10.1.1.1 1c) */ | 115 | /* Number of RNG requests since last reseed -- 10.1.1.1 1c) */ |
| 113 | size_t reseed_ctr; | 116 | size_t reseed_ctr; |
| 114 | size_t reseed_threshold; | 117 | size_t reseed_threshold; |
| 115 | /* some memory the DRBG can use for its operation */ | 118 | /* some memory the DRBG can use for its operation */ |
| 116 | unsigned char *scratchpad; | 119 | unsigned char *scratchpad; |
| 120 | unsigned char *scratchpadbuf; | ||
| 117 | void *priv_data; /* Cipher handle */ | 121 | void *priv_data; /* Cipher handle */ |
| 122 | |||
| 123 | struct crypto_skcipher *ctr_handle; /* CTR mode cipher handle */ | ||
| 124 | struct skcipher_request *ctr_req; /* CTR mode request handle */ | ||
| 125 | __u8 *ctr_null_value_buf; /* CTR mode unaligned buffer */ | ||
| 126 | __u8 *ctr_null_value; /* CTR mode aligned zero buf */ | ||
| 127 | struct completion ctr_completion; /* CTR mode async handler */ | ||
| 128 | int ctr_async_err; /* CTR mode async error */ | ||
| 129 | |||
| 118 | bool seeded; /* DRBG fully seeded? */ | 130 | bool seeded; /* DRBG fully seeded? */ |
| 119 | bool pr; /* Prediction resistance enabled? */ | 131 | bool pr; /* Prediction resistance enabled? */ |
| 120 | struct work_struct seed_work; /* asynchronous seeding support */ | 132 | struct work_struct seed_work; /* asynchronous seeding support */ |
diff --git a/include/crypto/ecdh.h b/include/crypto/ecdh.h new file mode 100644 index 000000000000..84bad548d194 --- /dev/null +++ b/include/crypto/ecdh.h | |||
| @@ -0,0 +1,30 @@ | |||
| 1 | /* | ||
| 2 | * ECDH params to be used with kpp API | ||
| 3 | * | ||
| 4 | * Copyright (c) 2016, Intel Corporation | ||
| 5 | * Authors: Salvatore Benedetto <salvatore.benedetto@intel.com> | ||
| 6 | * | ||
| 7 | * This program is free software; you can redistribute it and/or modify it | ||
| 8 | * under the terms of the GNU General Public License as published by the Free | ||
| 9 | * Software Foundation; either version 2 of the License, or (at your option) | ||
| 10 | * any later version. | ||
| 11 | * | ||
| 12 | */ | ||
| 13 | #ifndef _CRYPTO_ECDH_ | ||
| 14 | #define _CRYPTO_ECDH_ | ||
| 15 | |||
| 16 | /* Curves IDs */ | ||
| 17 | #define ECC_CURVE_NIST_P192 0x0001 | ||
| 18 | #define ECC_CURVE_NIST_P256 0x0002 | ||
| 19 | |||
| 20 | struct ecdh { | ||
| 21 | unsigned short curve_id; | ||
| 22 | char *key; | ||
| 23 | unsigned short key_size; | ||
| 24 | }; | ||
| 25 | |||
| 26 | int crypto_ecdh_key_len(const struct ecdh *params); | ||
| 27 | int crypto_ecdh_encode_key(char *buf, unsigned int len, const struct ecdh *p); | ||
| 28 | int crypto_ecdh_decode_key(const char *buf, unsigned int len, struct ecdh *p); | ||
| 29 | |||
| 30 | #endif | ||
diff --git a/include/crypto/internal/aead.h b/include/crypto/internal/aead.h index da3864991d4c..6ad8e31d3868 100644 --- a/include/crypto/internal/aead.h +++ b/include/crypto/internal/aead.h | |||
| @@ -159,6 +159,27 @@ static inline struct aead_request *aead_get_backlog(struct aead_queue *queue) | |||
| 159 | return req ? container_of(req, struct aead_request, base) : NULL; | 159 | return req ? container_of(req, struct aead_request, base) : NULL; |
| 160 | } | 160 | } |
| 161 | 161 | ||
| 162 | static inline unsigned int crypto_aead_alg_chunksize(struct aead_alg *alg) | ||
| 163 | { | ||
| 164 | return alg->chunksize; | ||
| 165 | } | ||
| 166 | |||
| 167 | /** | ||
| 168 | * crypto_aead_chunksize() - obtain chunk size | ||
| 169 | * @tfm: cipher handle | ||
| 170 | * | ||
| 171 | * The block size is set to one for ciphers such as CCM. However, | ||
| 172 | * you still need to provide incremental updates in multiples of | ||
| 173 | * the underlying block size as the IV does not have sub-block | ||
| 174 | * granularity. This is known in this API as the chunk size. | ||
| 175 | * | ||
| 176 | * Return: chunk size in bytes | ||
| 177 | */ | ||
| 178 | static inline unsigned int crypto_aead_chunksize(struct crypto_aead *tfm) | ||
| 179 | { | ||
| 180 | return crypto_aead_alg_chunksize(crypto_aead_alg(tfm)); | ||
| 181 | } | ||
| 182 | |||
| 162 | int crypto_register_aead(struct aead_alg *alg); | 183 | int crypto_register_aead(struct aead_alg *alg); |
| 163 | void crypto_unregister_aead(struct aead_alg *alg); | 184 | void crypto_unregister_aead(struct aead_alg *alg); |
| 164 | int crypto_register_aeads(struct aead_alg *algs, int count); | 185 | int crypto_register_aeads(struct aead_alg *algs, int count); |
diff --git a/include/crypto/internal/geniv.h b/include/crypto/internal/geniv.h index 59333635e712..2bcfb931bc5b 100644 --- a/include/crypto/internal/geniv.h +++ b/include/crypto/internal/geniv.h | |||
| @@ -20,7 +20,7 @@ | |||
| 20 | struct aead_geniv_ctx { | 20 | struct aead_geniv_ctx { |
| 21 | spinlock_t lock; | 21 | spinlock_t lock; |
| 22 | struct crypto_aead *child; | 22 | struct crypto_aead *child; |
| 23 | struct crypto_blkcipher *null; | 23 | struct crypto_skcipher *sknull; |
| 24 | u8 salt[] __attribute__ ((aligned(__alignof__(u32)))); | 24 | u8 salt[] __attribute__ ((aligned(__alignof__(u32)))); |
| 25 | }; | 25 | }; |
| 26 | 26 | ||
diff --git a/include/crypto/internal/hash.h b/include/crypto/internal/hash.h index 49dae16f8929..1d4f365d8f03 100644 --- a/include/crypto/internal/hash.h +++ b/include/crypto/internal/hash.h | |||
| @@ -114,14 +114,10 @@ int shash_ahash_update(struct ahash_request *req, struct shash_desc *desc); | |||
| 114 | int shash_ahash_finup(struct ahash_request *req, struct shash_desc *desc); | 114 | int shash_ahash_finup(struct ahash_request *req, struct shash_desc *desc); |
| 115 | int shash_ahash_digest(struct ahash_request *req, struct shash_desc *desc); | 115 | int shash_ahash_digest(struct ahash_request *req, struct shash_desc *desc); |
| 116 | 116 | ||
| 117 | int shash_ahash_mcryptd_update(struct ahash_request *req, | 117 | int ahash_mcryptd_update(struct ahash_request *desc); |
| 118 | struct shash_desc *desc); | 118 | int ahash_mcryptd_final(struct ahash_request *desc); |
| 119 | int shash_ahash_mcryptd_final(struct ahash_request *req, | 119 | int ahash_mcryptd_finup(struct ahash_request *desc); |
| 120 | struct shash_desc *desc); | 120 | int ahash_mcryptd_digest(struct ahash_request *desc); |
| 121 | int shash_ahash_mcryptd_finup(struct ahash_request *req, | ||
| 122 | struct shash_desc *desc); | ||
| 123 | int shash_ahash_mcryptd_digest(struct ahash_request *req, | ||
| 124 | struct shash_desc *desc); | ||
| 125 | 121 | ||
| 126 | int crypto_init_shash_ops_async(struct crypto_tfm *tfm); | 122 | int crypto_init_shash_ops_async(struct crypto_tfm *tfm); |
| 127 | 123 | ||
diff --git a/include/crypto/internal/kpp.h b/include/crypto/internal/kpp.h new file mode 100644 index 000000000000..ad3acf3649be --- /dev/null +++ b/include/crypto/internal/kpp.h | |||
| @@ -0,0 +1,64 @@ | |||
| 1 | /* | ||
| 2 | * Key-agreement Protocol Primitives (KPP) | ||
| 3 | * | ||
| 4 | * Copyright (c) 2016, Intel Corporation | ||
| 5 | * Authors: Salvatore Benedetto <salvatore.benedetto@intel.com> | ||
| 6 | * | ||
| 7 | * This program is free software; you can redistribute it and/or modify it | ||
| 8 | * under the terms of the GNU General Public License as published by the Free | ||
| 9 | * Software Foundation; either version 2 of the License, or (at your option) | ||
| 10 | * any later version. | ||
| 11 | * | ||
| 12 | */ | ||
| 13 | #ifndef _CRYPTO_KPP_INT_H | ||
| 14 | #define _CRYPTO_KPP_INT_H | ||
| 15 | #include <crypto/kpp.h> | ||
| 16 | #include <crypto/algapi.h> | ||
| 17 | |||
| 18 | /* | ||
| 19 | * Transform internal helpers. | ||
| 20 | */ | ||
| 21 | static inline void *kpp_request_ctx(struct kpp_request *req) | ||
| 22 | { | ||
| 23 | return req->__ctx; | ||
| 24 | } | ||
| 25 | |||
| 26 | static inline void *kpp_tfm_ctx(struct crypto_kpp *tfm) | ||
| 27 | { | ||
| 28 | return tfm->base.__crt_ctx; | ||
| 29 | } | ||
| 30 | |||
| 31 | static inline void kpp_request_complete(struct kpp_request *req, int err) | ||
| 32 | { | ||
| 33 | req->base.complete(&req->base, err); | ||
| 34 | } | ||
| 35 | |||
| 36 | static inline const char *kpp_alg_name(struct crypto_kpp *tfm) | ||
| 37 | { | ||
| 38 | return crypto_kpp_tfm(tfm)->__crt_alg->cra_name; | ||
| 39 | } | ||
| 40 | |||
| 41 | /** | ||
| 42 | * crypto_register_kpp() -- Register key-agreement protocol primitives algorithm | ||
| 43 | * | ||
| 44 | * Function registers an implementation of a key-agreement protocol primitive | ||
| 45 | * algorithm | ||
| 46 | * | ||
| 47 | * @alg: algorithm definition | ||
| 48 | * | ||
| 49 | * Return: zero on success; error code in case of error | ||
| 50 | */ | ||
| 51 | int crypto_register_kpp(struct kpp_alg *alg); | ||
| 52 | |||
| 53 | /** | ||
| 54 | * crypto_unregister_kpp() -- Unregister key-agreement protocol primitive | ||
| 55 | * algorithm | ||
| 56 | * | ||
| 57 | * Function unregisters an implementation of a key-agreement protocol primitive | ||
| 58 | * algorithm | ||
| 59 | * | ||
| 60 | * @alg: algorithm definition | ||
| 61 | */ | ||
| 62 | void crypto_unregister_kpp(struct kpp_alg *alg); | ||
| 63 | |||
| 64 | #endif | ||
diff --git a/include/crypto/internal/rsa.h b/include/crypto/internal/rsa.h index c7585bdecbc2..9e8f1590de98 100644 --- a/include/crypto/internal/rsa.h +++ b/include/crypto/internal/rsa.h | |||
| @@ -12,12 +12,44 @@ | |||
| 12 | */ | 12 | */ |
| 13 | #ifndef _RSA_HELPER_ | 13 | #ifndef _RSA_HELPER_ |
| 14 | #define _RSA_HELPER_ | 14 | #define _RSA_HELPER_ |
| 15 | #include <linux/mpi.h> | 15 | #include <linux/types.h> |
| 16 | 16 | ||
| 17 | /** | ||
| 18 | * rsa_key - RSA key structure | ||
| 19 | * @n : RSA modulus raw byte stream | ||
| 20 | * @e : RSA public exponent raw byte stream | ||
| 21 | * @d : RSA private exponent raw byte stream | ||
| 22 | * @p : RSA prime factor p of n raw byte stream | ||
| 23 | * @q : RSA prime factor q of n raw byte stream | ||
| 24 | * @dp : RSA exponent d mod (p - 1) raw byte stream | ||
| 25 | * @dq : RSA exponent d mod (q - 1) raw byte stream | ||
| 26 | * @qinv : RSA CRT coefficient q^(-1) mod p raw byte stream | ||
| 27 | * @n_sz : length in bytes of RSA modulus n | ||
| 28 | * @e_sz : length in bytes of RSA public exponent | ||
| 29 | * @d_sz : length in bytes of RSA private exponent | ||
| 30 | * @p_sz : length in bytes of p field | ||
| 31 | * @q_sz : length in bytes of q field | ||
| 32 | * @dp_sz : length in bytes of dp field | ||
| 33 | * @dq_sz : length in bytes of dq field | ||
| 34 | * @qinv_sz : length in bytes of qinv field | ||
| 35 | */ | ||
| 17 | struct rsa_key { | 36 | struct rsa_key { |
| 18 | MPI n; | 37 | const u8 *n; |
| 19 | MPI e; | 38 | const u8 *e; |
| 20 | MPI d; | 39 | const u8 *d; |
| 40 | const u8 *p; | ||
| 41 | const u8 *q; | ||
| 42 | const u8 *dp; | ||
| 43 | const u8 *dq; | ||
| 44 | const u8 *qinv; | ||
| 45 | size_t n_sz; | ||
| 46 | size_t e_sz; | ||
| 47 | size_t d_sz; | ||
| 48 | size_t p_sz; | ||
| 49 | size_t q_sz; | ||
| 50 | size_t dp_sz; | ||
| 51 | size_t dq_sz; | ||
| 52 | size_t qinv_sz; | ||
| 21 | }; | 53 | }; |
| 22 | 54 | ||
| 23 | int rsa_parse_pub_key(struct rsa_key *rsa_key, const void *key, | 55 | int rsa_parse_pub_key(struct rsa_key *rsa_key, const void *key, |
| @@ -26,7 +58,5 @@ int rsa_parse_pub_key(struct rsa_key *rsa_key, const void *key, | |||
| 26 | int rsa_parse_priv_key(struct rsa_key *rsa_key, const void *key, | 58 | int rsa_parse_priv_key(struct rsa_key *rsa_key, const void *key, |
| 27 | unsigned int key_len); | 59 | unsigned int key_len); |
| 28 | 60 | ||
| 29 | void rsa_free_key(struct rsa_key *rsa_key); | ||
| 30 | |||
| 31 | extern struct crypto_template rsa_pkcs1pad_tmpl; | 61 | extern struct crypto_template rsa_pkcs1pad_tmpl; |
| 32 | #endif | 62 | #endif |
diff --git a/include/crypto/internal/skcipher.h b/include/crypto/internal/skcipher.h index 2cf7a61ece59..a21a95e1a375 100644 --- a/include/crypto/internal/skcipher.h +++ b/include/crypto/internal/skcipher.h | |||
| @@ -19,12 +19,46 @@ | |||
| 19 | 19 | ||
| 20 | struct rtattr; | 20 | struct rtattr; |
| 21 | 21 | ||
| 22 | struct skcipher_instance { | ||
| 23 | void (*free)(struct skcipher_instance *inst); | ||
| 24 | union { | ||
| 25 | struct { | ||
| 26 | char head[offsetof(struct skcipher_alg, base)]; | ||
| 27 | struct crypto_instance base; | ||
| 28 | } s; | ||
| 29 | struct skcipher_alg alg; | ||
| 30 | }; | ||
| 31 | }; | ||
| 32 | |||
| 22 | struct crypto_skcipher_spawn { | 33 | struct crypto_skcipher_spawn { |
| 23 | struct crypto_spawn base; | 34 | struct crypto_spawn base; |
| 24 | }; | 35 | }; |
| 25 | 36 | ||
| 26 | extern const struct crypto_type crypto_givcipher_type; | 37 | extern const struct crypto_type crypto_givcipher_type; |
| 27 | 38 | ||
| 39 | static inline struct crypto_instance *skcipher_crypto_instance( | ||
| 40 | struct skcipher_instance *inst) | ||
| 41 | { | ||
| 42 | return &inst->s.base; | ||
| 43 | } | ||
| 44 | |||
| 45 | static inline struct skcipher_instance *skcipher_alg_instance( | ||
| 46 | struct crypto_skcipher *skcipher) | ||
| 47 | { | ||
| 48 | return container_of(crypto_skcipher_alg(skcipher), | ||
| 49 | struct skcipher_instance, alg); | ||
| 50 | } | ||
| 51 | |||
| 52 | static inline void *skcipher_instance_ctx(struct skcipher_instance *inst) | ||
| 53 | { | ||
| 54 | return crypto_instance_ctx(skcipher_crypto_instance(inst)); | ||
| 55 | } | ||
| 56 | |||
| 57 | static inline void skcipher_request_complete(struct skcipher_request *req, int err) | ||
| 58 | { | ||
| 59 | req->base.complete(&req->base, err); | ||
| 60 | } | ||
| 61 | |||
| 28 | static inline void crypto_set_skcipher_spawn( | 62 | static inline void crypto_set_skcipher_spawn( |
| 29 | struct crypto_skcipher_spawn *spawn, struct crypto_instance *inst) | 63 | struct crypto_skcipher_spawn *spawn, struct crypto_instance *inst) |
| 30 | { | 64 | { |
| @@ -34,6 +68,12 @@ static inline void crypto_set_skcipher_spawn( | |||
| 34 | int crypto_grab_skcipher(struct crypto_skcipher_spawn *spawn, const char *name, | 68 | int crypto_grab_skcipher(struct crypto_skcipher_spawn *spawn, const char *name, |
| 35 | u32 type, u32 mask); | 69 | u32 type, u32 mask); |
| 36 | 70 | ||
| 71 | static inline int crypto_grab_skcipher2(struct crypto_skcipher_spawn *spawn, | ||
| 72 | const char *name, u32 type, u32 mask) | ||
| 73 | { | ||
| 74 | return crypto_grab_skcipher(spawn, name, type, mask); | ||
| 75 | } | ||
| 76 | |||
| 37 | struct crypto_alg *crypto_lookup_skcipher(const char *name, u32 type, u32 mask); | 77 | struct crypto_alg *crypto_lookup_skcipher(const char *name, u32 type, u32 mask); |
| 38 | 78 | ||
| 39 | static inline void crypto_drop_skcipher(struct crypto_skcipher_spawn *spawn) | 79 | static inline void crypto_drop_skcipher(struct crypto_skcipher_spawn *spawn) |
| @@ -41,54 +81,42 @@ static inline void crypto_drop_skcipher(struct crypto_skcipher_spawn *spawn) | |||
| 41 | crypto_drop_spawn(&spawn->base); | 81 | crypto_drop_spawn(&spawn->base); |
| 42 | } | 82 | } |
| 43 | 83 | ||
| 44 | static inline struct crypto_alg *crypto_skcipher_spawn_alg( | 84 | static inline struct skcipher_alg *crypto_skcipher_spawn_alg( |
| 45 | struct crypto_skcipher_spawn *spawn) | 85 | struct crypto_skcipher_spawn *spawn) |
| 46 | { | 86 | { |
| 47 | return spawn->base.alg; | 87 | return container_of(spawn->base.alg, struct skcipher_alg, base); |
| 48 | } | 88 | } |
| 49 | 89 | ||
| 50 | static inline struct crypto_ablkcipher *crypto_spawn_skcipher( | 90 | static inline struct skcipher_alg *crypto_spawn_skcipher_alg( |
| 51 | struct crypto_skcipher_spawn *spawn) | 91 | struct crypto_skcipher_spawn *spawn) |
| 52 | { | 92 | { |
| 53 | return __crypto_ablkcipher_cast( | 93 | return crypto_skcipher_spawn_alg(spawn); |
| 54 | crypto_spawn_tfm(&spawn->base, crypto_skcipher_type(0), | ||
| 55 | crypto_skcipher_mask(0))); | ||
| 56 | } | 94 | } |
| 57 | 95 | ||
| 58 | int skcipher_null_givencrypt(struct skcipher_givcrypt_request *req); | 96 | static inline struct crypto_skcipher *crypto_spawn_skcipher( |
| 59 | int skcipher_null_givdecrypt(struct skcipher_givcrypt_request *req); | 97 | struct crypto_skcipher_spawn *spawn) |
| 60 | const char *crypto_default_geniv(const struct crypto_alg *alg); | ||
| 61 | |||
| 62 | struct crypto_instance *skcipher_geniv_alloc(struct crypto_template *tmpl, | ||
| 63 | struct rtattr **tb, u32 type, | ||
| 64 | u32 mask); | ||
| 65 | void skcipher_geniv_free(struct crypto_instance *inst); | ||
| 66 | int skcipher_geniv_init(struct crypto_tfm *tfm); | ||
| 67 | void skcipher_geniv_exit(struct crypto_tfm *tfm); | ||
| 68 | |||
| 69 | static inline struct crypto_ablkcipher *skcipher_geniv_cipher( | ||
| 70 | struct crypto_ablkcipher *geniv) | ||
| 71 | { | 98 | { |
| 72 | return crypto_ablkcipher_crt(geniv)->base; | 99 | return crypto_spawn_tfm2(&spawn->base); |
| 73 | } | 100 | } |
| 74 | 101 | ||
| 75 | static inline int skcipher_enqueue_givcrypt( | 102 | static inline struct crypto_skcipher *crypto_spawn_skcipher2( |
| 76 | struct crypto_queue *queue, struct skcipher_givcrypt_request *request) | 103 | struct crypto_skcipher_spawn *spawn) |
| 77 | { | 104 | { |
| 78 | return ablkcipher_enqueue_request(queue, &request->creq); | 105 | return crypto_spawn_skcipher(spawn); |
| 79 | } | 106 | } |
| 80 | 107 | ||
| 81 | static inline struct skcipher_givcrypt_request *skcipher_dequeue_givcrypt( | 108 | static inline void crypto_skcipher_set_reqsize( |
| 82 | struct crypto_queue *queue) | 109 | struct crypto_skcipher *skcipher, unsigned int reqsize) |
| 83 | { | 110 | { |
| 84 | return skcipher_givcrypt_cast(crypto_dequeue_request(queue)); | 111 | skcipher->reqsize = reqsize; |
| 85 | } | 112 | } |
| 86 | 113 | ||
| 87 | static inline void *skcipher_givcrypt_reqctx( | 114 | int crypto_register_skcipher(struct skcipher_alg *alg); |
| 88 | struct skcipher_givcrypt_request *req) | 115 | void crypto_unregister_skcipher(struct skcipher_alg *alg); |
| 89 | { | 116 | int crypto_register_skciphers(struct skcipher_alg *algs, int count); |
| 90 | return ablkcipher_request_ctx(&req->creq); | 117 | void crypto_unregister_skciphers(struct skcipher_alg *algs, int count); |
| 91 | } | 118 | int skcipher_register_instance(struct crypto_template *tmpl, |
| 119 | struct skcipher_instance *inst); | ||
| 92 | 120 | ||
| 93 | static inline void ablkcipher_request_complete(struct ablkcipher_request *req, | 121 | static inline void ablkcipher_request_complete(struct ablkcipher_request *req, |
| 94 | int err) | 122 | int err) |
| @@ -96,12 +124,6 @@ static inline void ablkcipher_request_complete(struct ablkcipher_request *req, | |||
| 96 | req->base.complete(&req->base, err); | 124 | req->base.complete(&req->base, err); |
| 97 | } | 125 | } |
| 98 | 126 | ||
| 99 | static inline void skcipher_givcrypt_complete( | ||
| 100 | struct skcipher_givcrypt_request *req, int err) | ||
| 101 | { | ||
| 102 | ablkcipher_request_complete(&req->creq, err); | ||
| 103 | } | ||
| 104 | |||
| 105 | static inline u32 ablkcipher_request_flags(struct ablkcipher_request *req) | 127 | static inline u32 ablkcipher_request_flags(struct ablkcipher_request *req) |
| 106 | { | 128 | { |
| 107 | return req->base.flags; | 129 | return req->base.flags; |
| @@ -122,5 +144,31 @@ static inline u32 skcipher_request_flags(struct skcipher_request *req) | |||
| 122 | return req->base.flags; | 144 | return req->base.flags; |
| 123 | } | 145 | } |
| 124 | 146 | ||
| 147 | static inline unsigned int crypto_skcipher_alg_min_keysize( | ||
| 148 | struct skcipher_alg *alg) | ||
| 149 | { | ||
| 150 | if ((alg->base.cra_flags & CRYPTO_ALG_TYPE_MASK) == | ||
| 151 | CRYPTO_ALG_TYPE_BLKCIPHER) | ||
| 152 | return alg->base.cra_blkcipher.min_keysize; | ||
| 153 | |||
| 154 | if (alg->base.cra_ablkcipher.encrypt) | ||
| 155 | return alg->base.cra_ablkcipher.min_keysize; | ||
| 156 | |||
| 157 | return alg->min_keysize; | ||
| 158 | } | ||
| 159 | |||
| 160 | static inline unsigned int crypto_skcipher_alg_max_keysize( | ||
| 161 | struct skcipher_alg *alg) | ||
| 162 | { | ||
| 163 | if ((alg->base.cra_flags & CRYPTO_ALG_TYPE_MASK) == | ||
| 164 | CRYPTO_ALG_TYPE_BLKCIPHER) | ||
| 165 | return alg->base.cra_blkcipher.max_keysize; | ||
| 166 | |||
| 167 | if (alg->base.cra_ablkcipher.encrypt) | ||
| 168 | return alg->base.cra_ablkcipher.max_keysize; | ||
| 169 | |||
| 170 | return alg->max_keysize; | ||
| 171 | } | ||
| 172 | |||
| 125 | #endif /* _CRYPTO_INTERNAL_SKCIPHER_H */ | 173 | #endif /* _CRYPTO_INTERNAL_SKCIPHER_H */ |
| 126 | 174 | ||
diff --git a/include/crypto/kpp.h b/include/crypto/kpp.h new file mode 100644 index 000000000000..30791f75c180 --- /dev/null +++ b/include/crypto/kpp.h | |||
| @@ -0,0 +1,330 @@ | |||
| 1 | /* | ||
| 2 | * Key-agreement Protocol Primitives (KPP) | ||
| 3 | * | ||
| 4 | * Copyright (c) 2016, Intel Corporation | ||
| 5 | * Authors: Salvatore Benedetto <salvatore.benedetto@intel.com> | ||
| 6 | * | ||
| 7 | * This program is free software; you can redistribute it and/or modify it | ||
| 8 | * under the terms of the GNU General Public License as published by the Free | ||
| 9 | * Software Foundation; either version 2 of the License, or (at your option) | ||
| 10 | * any later version. | ||
| 11 | * | ||
| 12 | */ | ||
| 13 | |||
| 14 | #ifndef _CRYPTO_KPP_ | ||
| 15 | #define _CRYPTO_KPP_ | ||
| 16 | #include <linux/crypto.h> | ||
| 17 | |||
| 18 | /** | ||
| 19 | * struct kpp_request | ||
| 20 | * | ||
| 21 | * @base: Common attributes for async crypto requests | ||
| 22 | * @src: Source data | ||
| 23 | * @dst: Destination data | ||
| 24 | * @src_len: Size of the input buffer | ||
| 25 | * @dst_len: Size of the output buffer. It needs to be at least | ||
| 26 | * as big as the expected result depending on the operation | ||
| 27 | * After operation it will be updated with the actual size of the | ||
| 28 | * result. In case of error where the dst sgl size was insufficient, | ||
| 29 | * it will be updated to the size required for the operation. | ||
| 30 | * @__ctx: Start of private context data | ||
| 31 | */ | ||
| 32 | struct kpp_request { | ||
| 33 | struct crypto_async_request base; | ||
| 34 | struct scatterlist *src; | ||
| 35 | struct scatterlist *dst; | ||
| 36 | unsigned int src_len; | ||
| 37 | unsigned int dst_len; | ||
| 38 | void *__ctx[] CRYPTO_MINALIGN_ATTR; | ||
| 39 | }; | ||
| 40 | |||
| 41 | /** | ||
| 42 | * struct crypto_kpp - user-instantiated object which encapsulate | ||
| 43 | * algorithms and core processing logic | ||
| 44 | * | ||
| 45 | * @base: Common crypto API algorithm data structure | ||
| 46 | */ | ||
| 47 | struct crypto_kpp { | ||
| 48 | struct crypto_tfm base; | ||
| 49 | }; | ||
| 50 | |||
| 51 | /** | ||
| 52 | * struct kpp_alg - generic key-agreement protocol primitives | ||
| 53 | * | ||
| 54 | * @set_secret: Function invokes the protocol specific function to | ||
| 55 | * store the secret private key along with parameters. | ||
| 56 | * The implementation knows how to decode thie buffer | ||
| 57 | * @generate_public_key: Function generate the public key to be sent to the | ||
| 58 | * counterpart. In case of error, where output is not big | ||
| 59 | * enough req->dst_len will be updated to the size | ||
| 60 | * required | ||
| 61 | * @compute_shared_secret: Function compute the shared secret as defined by | ||
| 62 | * the algorithm. The result is given back to the user. | ||
| 63 | * In case of error, where output is not big enough, | ||
| 64 | * req->dst_len will be updated to the size required | ||
| 65 | * @max_size: Function returns the size of the output buffer | ||
| 66 | * @init: Initialize the object. This is called only once at | ||
| 67 | * instantiation time. In case the cryptographic hardware | ||
| 68 | * needs to be initialized. Software fallback should be | ||
| 69 | * put in place here. | ||
| 70 | * @exit: Undo everything @init did. | ||
| 71 | * | ||
| 72 | * @reqsize: Request context size required by algorithm | ||
| 73 | * implementation | ||
| 74 | * @base Common crypto API algorithm data structure | ||
| 75 | */ | ||
| 76 | struct kpp_alg { | ||
| 77 | int (*set_secret)(struct crypto_kpp *tfm, void *buffer, | ||
| 78 | unsigned int len); | ||
| 79 | int (*generate_public_key)(struct kpp_request *req); | ||
| 80 | int (*compute_shared_secret)(struct kpp_request *req); | ||
| 81 | |||
| 82 | int (*max_size)(struct crypto_kpp *tfm); | ||
| 83 | |||
| 84 | int (*init)(struct crypto_kpp *tfm); | ||
| 85 | void (*exit)(struct crypto_kpp *tfm); | ||
| 86 | |||
| 87 | unsigned int reqsize; | ||
| 88 | struct crypto_alg base; | ||
| 89 | }; | ||
| 90 | |||
| 91 | /** | ||
| 92 | * DOC: Generic Key-agreement Protocol Primitevs API | ||
| 93 | * | ||
| 94 | * The KPP API is used with the algorithm type | ||
| 95 | * CRYPTO_ALG_TYPE_KPP (listed as type "kpp" in /proc/crypto) | ||
| 96 | */ | ||
| 97 | |||
| 98 | /** | ||
| 99 | * crypto_alloc_kpp() - allocate KPP tfm handle | ||
| 100 | * @alg_name: is the name of the kpp algorithm (e.g. "dh", "ecdh") | ||
| 101 | * @type: specifies the type of the algorithm | ||
| 102 | * @mask: specifies the mask for the algorithm | ||
| 103 | * | ||
| 104 | * Allocate a handle for kpp algorithm. The returned struct crypto_kpp | ||
| 105 | * is requeried for any following API invocation | ||
| 106 | * | ||
| 107 | * Return: allocated handle in case of success; IS_ERR() is true in case of | ||
| 108 | * an error, PTR_ERR() returns the error code. | ||
| 109 | */ | ||
| 110 | struct crypto_kpp *crypto_alloc_kpp(const char *alg_name, u32 type, u32 mask); | ||
| 111 | |||
| 112 | static inline struct crypto_tfm *crypto_kpp_tfm(struct crypto_kpp *tfm) | ||
| 113 | { | ||
| 114 | return &tfm->base; | ||
| 115 | } | ||
| 116 | |||
| 117 | static inline struct kpp_alg *__crypto_kpp_alg(struct crypto_alg *alg) | ||
| 118 | { | ||
| 119 | return container_of(alg, struct kpp_alg, base); | ||
| 120 | } | ||
| 121 | |||
| 122 | static inline struct crypto_kpp *__crypto_kpp_tfm(struct crypto_tfm *tfm) | ||
| 123 | { | ||
| 124 | return container_of(tfm, struct crypto_kpp, base); | ||
| 125 | } | ||
| 126 | |||
| 127 | static inline struct kpp_alg *crypto_kpp_alg(struct crypto_kpp *tfm) | ||
| 128 | { | ||
| 129 | return __crypto_kpp_alg(crypto_kpp_tfm(tfm)->__crt_alg); | ||
| 130 | } | ||
| 131 | |||
| 132 | static inline unsigned int crypto_kpp_reqsize(struct crypto_kpp *tfm) | ||
| 133 | { | ||
| 134 | return crypto_kpp_alg(tfm)->reqsize; | ||
| 135 | } | ||
| 136 | |||
| 137 | static inline void kpp_request_set_tfm(struct kpp_request *req, | ||
| 138 | struct crypto_kpp *tfm) | ||
| 139 | { | ||
| 140 | req->base.tfm = crypto_kpp_tfm(tfm); | ||
| 141 | } | ||
| 142 | |||
| 143 | static inline struct crypto_kpp *crypto_kpp_reqtfm(struct kpp_request *req) | ||
| 144 | { | ||
| 145 | return __crypto_kpp_tfm(req->base.tfm); | ||
| 146 | } | ||
| 147 | |||
| 148 | /** | ||
| 149 | * crypto_free_kpp() - free KPP tfm handle | ||
| 150 | * | ||
| 151 | * @tfm: KPP tfm handle allocated with crypto_alloc_kpp() | ||
| 152 | */ | ||
| 153 | static inline void crypto_free_kpp(struct crypto_kpp *tfm) | ||
| 154 | { | ||
| 155 | crypto_destroy_tfm(tfm, crypto_kpp_tfm(tfm)); | ||
| 156 | } | ||
| 157 | |||
| 158 | /** | ||
| 159 | * kpp_request_alloc() - allocates kpp request | ||
| 160 | * | ||
| 161 | * @tfm: KPP tfm handle allocated with crypto_alloc_kpp() | ||
| 162 | * @gfp: allocation flags | ||
| 163 | * | ||
| 164 | * Return: allocated handle in case of success or NULL in case of an error. | ||
| 165 | */ | ||
| 166 | static inline struct kpp_request *kpp_request_alloc(struct crypto_kpp *tfm, | ||
| 167 | gfp_t gfp) | ||
| 168 | { | ||
| 169 | struct kpp_request *req; | ||
| 170 | |||
| 171 | req = kmalloc(sizeof(*req) + crypto_kpp_reqsize(tfm), gfp); | ||
| 172 | if (likely(req)) | ||
| 173 | kpp_request_set_tfm(req, tfm); | ||
| 174 | |||
| 175 | return req; | ||
| 176 | } | ||
| 177 | |||
| 178 | /** | ||
| 179 | * kpp_request_free() - zeroize and free kpp request | ||
| 180 | * | ||
| 181 | * @req: request to free | ||
| 182 | */ | ||
| 183 | static inline void kpp_request_free(struct kpp_request *req) | ||
| 184 | { | ||
| 185 | kzfree(req); | ||
| 186 | } | ||
| 187 | |||
| 188 | /** | ||
| 189 | * kpp_request_set_callback() - Sets an asynchronous callback. | ||
| 190 | * | ||
| 191 | * Callback will be called when an asynchronous operation on a given | ||
| 192 | * request is finished. | ||
| 193 | * | ||
| 194 | * @req: request that the callback will be set for | ||
| 195 | * @flgs: specify for instance if the operation may backlog | ||
| 196 | * @cmpl: callback which will be called | ||
| 197 | * @data: private data used by the caller | ||
| 198 | */ | ||
| 199 | static inline void kpp_request_set_callback(struct kpp_request *req, | ||
| 200 | u32 flgs, | ||
| 201 | crypto_completion_t cmpl, | ||
| 202 | void *data) | ||
| 203 | { | ||
| 204 | req->base.complete = cmpl; | ||
| 205 | req->base.data = data; | ||
| 206 | req->base.flags = flgs; | ||
| 207 | } | ||
| 208 | |||
| 209 | /** | ||
| 210 | * kpp_request_set_input() - Sets input buffer | ||
| 211 | * | ||
| 212 | * Sets parameters required by generate_public_key | ||
| 213 | * | ||
| 214 | * @req: kpp request | ||
| 215 | * @input: ptr to input scatter list | ||
| 216 | * @input_len: size of the input scatter list | ||
| 217 | */ | ||
| 218 | static inline void kpp_request_set_input(struct kpp_request *req, | ||
| 219 | struct scatterlist *input, | ||
| 220 | unsigned int input_len) | ||
| 221 | { | ||
| 222 | req->src = input; | ||
| 223 | req->src_len = input_len; | ||
| 224 | } | ||
| 225 | |||
| 226 | /** | ||
| 227 | * kpp_request_set_output() - Sets output buffer | ||
| 228 | * | ||
| 229 | * Sets parameters required by kpp operation | ||
| 230 | * | ||
| 231 | * @req: kpp request | ||
| 232 | * @output: ptr to output scatter list | ||
| 233 | * @output_len: size of the output scatter list | ||
| 234 | */ | ||
| 235 | static inline void kpp_request_set_output(struct kpp_request *req, | ||
| 236 | struct scatterlist *output, | ||
| 237 | unsigned int output_len) | ||
| 238 | { | ||
| 239 | req->dst = output; | ||
| 240 | req->dst_len = output_len; | ||
| 241 | } | ||
| 242 | |||
| 243 | enum { | ||
| 244 | CRYPTO_KPP_SECRET_TYPE_UNKNOWN, | ||
| 245 | CRYPTO_KPP_SECRET_TYPE_DH, | ||
| 246 | CRYPTO_KPP_SECRET_TYPE_ECDH, | ||
| 247 | }; | ||
| 248 | |||
| 249 | /** | ||
| 250 | * struct kpp_secret - small header for packing secret buffer | ||
| 251 | * | ||
| 252 | * @type: define type of secret. Each kpp type will define its own | ||
| 253 | * @len: specify the len of the secret, include the header, that | ||
| 254 | * follows the struct | ||
| 255 | */ | ||
| 256 | struct kpp_secret { | ||
| 257 | unsigned short type; | ||
| 258 | unsigned short len; | ||
| 259 | }; | ||
| 260 | |||
| 261 | /** | ||
| 262 | * crypto_kpp_set_secret() - Invoke kpp operation | ||
| 263 | * | ||
| 264 | * Function invokes the specific kpp operation for a given alg. | ||
| 265 | * | ||
| 266 | * @tfm: tfm handle | ||
| 267 | * | ||
| 268 | * Return: zero on success; error code in case of error | ||
| 269 | */ | ||
| 270 | static inline int crypto_kpp_set_secret(struct crypto_kpp *tfm, void *buffer, | ||
| 271 | unsigned int len) | ||
| 272 | { | ||
| 273 | struct kpp_alg *alg = crypto_kpp_alg(tfm); | ||
| 274 | |||
| 275 | return alg->set_secret(tfm, buffer, len); | ||
| 276 | } | ||
| 277 | |||
| 278 | /** | ||
| 279 | * crypto_kpp_generate_public_key() - Invoke kpp operation | ||
| 280 | * | ||
| 281 | * Function invokes the specific kpp operation for generating the public part | ||
| 282 | * for a given kpp algorithm | ||
| 283 | * | ||
| 284 | * @req: kpp key request | ||
| 285 | * | ||
| 286 | * Return: zero on success; error code in case of error | ||
| 287 | */ | ||
| 288 | static inline int crypto_kpp_generate_public_key(struct kpp_request *req) | ||
| 289 | { | ||
| 290 | struct crypto_kpp *tfm = crypto_kpp_reqtfm(req); | ||
| 291 | struct kpp_alg *alg = crypto_kpp_alg(tfm); | ||
| 292 | |||
| 293 | return alg->generate_public_key(req); | ||
| 294 | } | ||
| 295 | |||
| 296 | /** | ||
| 297 | * crypto_kpp_compute_shared_secret() - Invoke kpp operation | ||
| 298 | * | ||
| 299 | * Function invokes the specific kpp operation for computing the shared secret | ||
| 300 | * for a given kpp algorithm. | ||
| 301 | * | ||
| 302 | * @req: kpp key request | ||
| 303 | * | ||
| 304 | * Return: zero on success; error code in case of error | ||
| 305 | */ | ||
| 306 | static inline int crypto_kpp_compute_shared_secret(struct kpp_request *req) | ||
| 307 | { | ||
| 308 | struct crypto_kpp *tfm = crypto_kpp_reqtfm(req); | ||
| 309 | struct kpp_alg *alg = crypto_kpp_alg(tfm); | ||
| 310 | |||
| 311 | return alg->compute_shared_secret(req); | ||
| 312 | } | ||
| 313 | |||
| 314 | /** | ||
| 315 | * crypto_kpp_maxsize() - Get len for output buffer | ||
| 316 | * | ||
| 317 | * Function returns the output buffer size required | ||
| 318 | * | ||
| 319 | * @tfm: KPP tfm handle allocated with crypto_alloc_kpp() | ||
| 320 | * | ||
| 321 | * Return: minimum len for output buffer or error code if key hasn't been set | ||
| 322 | */ | ||
| 323 | static inline int crypto_kpp_maxsize(struct crypto_kpp *tfm) | ||
| 324 | { | ||
| 325 | struct kpp_alg *alg = crypto_kpp_alg(tfm); | ||
| 326 | |||
| 327 | return alg->max_size(tfm); | ||
| 328 | } | ||
| 329 | |||
| 330 | #endif | ||
diff --git a/include/crypto/mcryptd.h b/include/crypto/mcryptd.h index c23ee1f7ee80..4a53c0d38cd2 100644 --- a/include/crypto/mcryptd.h +++ b/include/crypto/mcryptd.h | |||
| @@ -39,7 +39,7 @@ struct mcryptd_instance_ctx { | |||
| 39 | }; | 39 | }; |
| 40 | 40 | ||
| 41 | struct mcryptd_hash_ctx { | 41 | struct mcryptd_hash_ctx { |
| 42 | struct crypto_shash *child; | 42 | struct crypto_ahash *child; |
| 43 | struct mcryptd_alg_state *alg_state; | 43 | struct mcryptd_alg_state *alg_state; |
| 44 | }; | 44 | }; |
| 45 | 45 | ||
| @@ -59,13 +59,13 @@ struct mcryptd_hash_request_ctx { | |||
| 59 | struct crypto_hash_walk walk; | 59 | struct crypto_hash_walk walk; |
| 60 | u8 *out; | 60 | u8 *out; |
| 61 | int flag; | 61 | int flag; |
| 62 | struct shash_desc desc; | 62 | struct ahash_request areq; |
| 63 | }; | 63 | }; |
| 64 | 64 | ||
| 65 | struct mcryptd_ahash *mcryptd_alloc_ahash(const char *alg_name, | 65 | struct mcryptd_ahash *mcryptd_alloc_ahash(const char *alg_name, |
| 66 | u32 type, u32 mask); | 66 | u32 type, u32 mask); |
| 67 | struct crypto_shash *mcryptd_ahash_child(struct mcryptd_ahash *tfm); | 67 | struct crypto_ahash *mcryptd_ahash_child(struct mcryptd_ahash *tfm); |
| 68 | struct shash_desc *mcryptd_shash_desc(struct ahash_request *req); | 68 | struct ahash_request *mcryptd_ahash_desc(struct ahash_request *req); |
| 69 | void mcryptd_free_ahash(struct mcryptd_ahash *tfm); | 69 | void mcryptd_free_ahash(struct mcryptd_ahash *tfm); |
| 70 | void mcryptd_flusher(struct work_struct *work); | 70 | void mcryptd_flusher(struct work_struct *work); |
| 71 | 71 | ||
diff --git a/include/crypto/null.h b/include/crypto/null.h index 06dc30d9f56e..3f0c59fb0a61 100644 --- a/include/crypto/null.h +++ b/include/crypto/null.h | |||
| @@ -8,7 +8,17 @@ | |||
| 8 | #define NULL_DIGEST_SIZE 0 | 8 | #define NULL_DIGEST_SIZE 0 |
| 9 | #define NULL_IV_SIZE 0 | 9 | #define NULL_IV_SIZE 0 |
| 10 | 10 | ||
| 11 | struct crypto_blkcipher *crypto_get_default_null_skcipher(void); | 11 | struct crypto_skcipher *crypto_get_default_null_skcipher(void); |
| 12 | void crypto_put_default_null_skcipher(void); | 12 | void crypto_put_default_null_skcipher(void); |
| 13 | 13 | ||
| 14 | static inline struct crypto_skcipher *crypto_get_default_null_skcipher2(void) | ||
| 15 | { | ||
| 16 | return crypto_get_default_null_skcipher(); | ||
| 17 | } | ||
| 18 | |||
| 19 | static inline void crypto_put_default_null_skcipher2(void) | ||
| 20 | { | ||
| 21 | crypto_put_default_null_skcipher(); | ||
| 22 | } | ||
| 23 | |||
| 14 | #endif | 24 | #endif |
diff --git a/include/crypto/scatterwalk.h b/include/crypto/scatterwalk.h index 35f99b68d037..880e6be9e95e 100644 --- a/include/crypto/scatterwalk.h +++ b/include/crypto/scatterwalk.h | |||
| @@ -16,14 +16,10 @@ | |||
| 16 | #ifndef _CRYPTO_SCATTERWALK_H | 16 | #ifndef _CRYPTO_SCATTERWALK_H |
| 17 | #define _CRYPTO_SCATTERWALK_H | 17 | #define _CRYPTO_SCATTERWALK_H |
| 18 | 18 | ||
| 19 | #include <asm/kmap_types.h> | ||
| 20 | #include <crypto/algapi.h> | 19 | #include <crypto/algapi.h> |
| 21 | #include <linux/hardirq.h> | ||
| 22 | #include <linux/highmem.h> | 20 | #include <linux/highmem.h> |
| 23 | #include <linux/kernel.h> | 21 | #include <linux/kernel.h> |
| 24 | #include <linux/mm.h> | ||
| 25 | #include <linux/scatterlist.h> | 22 | #include <linux/scatterlist.h> |
| 26 | #include <linux/sched.h> | ||
| 27 | 23 | ||
| 28 | static inline void scatterwalk_crypto_chain(struct scatterlist *head, | 24 | static inline void scatterwalk_crypto_chain(struct scatterlist *head, |
| 29 | struct scatterlist *sg, | 25 | struct scatterlist *sg, |
| @@ -83,17 +79,53 @@ static inline void scatterwalk_unmap(void *vaddr) | |||
| 83 | kunmap_atomic(vaddr); | 79 | kunmap_atomic(vaddr); |
| 84 | } | 80 | } |
| 85 | 81 | ||
| 86 | void scatterwalk_start(struct scatter_walk *walk, struct scatterlist *sg); | 82 | static inline void scatterwalk_start(struct scatter_walk *walk, |
| 83 | struct scatterlist *sg) | ||
| 84 | { | ||
| 85 | walk->sg = sg; | ||
| 86 | walk->offset = sg->offset; | ||
| 87 | } | ||
| 88 | |||
| 89 | static inline void *scatterwalk_map(struct scatter_walk *walk) | ||
| 90 | { | ||
| 91 | return kmap_atomic(scatterwalk_page(walk)) + | ||
| 92 | offset_in_page(walk->offset); | ||
| 93 | } | ||
| 94 | |||
| 95 | static inline void scatterwalk_pagedone(struct scatter_walk *walk, int out, | ||
| 96 | unsigned int more) | ||
| 97 | { | ||
| 98 | if (out) { | ||
| 99 | struct page *page; | ||
| 100 | |||
| 101 | page = sg_page(walk->sg) + ((walk->offset - 1) >> PAGE_SHIFT); | ||
| 102 | /* Test ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE first as | ||
| 103 | * PageSlab cannot be optimised away per se due to | ||
| 104 | * use of volatile pointer. | ||
| 105 | */ | ||
| 106 | if (ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE && !PageSlab(page)) | ||
| 107 | flush_dcache_page(page); | ||
| 108 | } | ||
| 109 | |||
| 110 | if (more && walk->offset >= walk->sg->offset + walk->sg->length) | ||
| 111 | scatterwalk_start(walk, sg_next(walk->sg)); | ||
| 112 | } | ||
| 113 | |||
| 114 | static inline void scatterwalk_done(struct scatter_walk *walk, int out, | ||
| 115 | int more) | ||
| 116 | { | ||
| 117 | if (!more || walk->offset >= walk->sg->offset + walk->sg->length || | ||
| 118 | !(walk->offset & (PAGE_SIZE - 1))) | ||
| 119 | scatterwalk_pagedone(walk, out, more); | ||
| 120 | } | ||
| 121 | |||
| 87 | void scatterwalk_copychunks(void *buf, struct scatter_walk *walk, | 122 | void scatterwalk_copychunks(void *buf, struct scatter_walk *walk, |
| 88 | size_t nbytes, int out); | 123 | size_t nbytes, int out); |
| 89 | void *scatterwalk_map(struct scatter_walk *walk); | 124 | void *scatterwalk_map(struct scatter_walk *walk); |
| 90 | void scatterwalk_done(struct scatter_walk *walk, int out, int more); | ||
| 91 | 125 | ||
| 92 | void scatterwalk_map_and_copy(void *buf, struct scatterlist *sg, | 126 | void scatterwalk_map_and_copy(void *buf, struct scatterlist *sg, |
| 93 | unsigned int start, unsigned int nbytes, int out); | 127 | unsigned int start, unsigned int nbytes, int out); |
| 94 | 128 | ||
| 95 | int scatterwalk_bytes_sglen(struct scatterlist *sg, int num_bytes); | ||
| 96 | |||
| 97 | struct scatterlist *scatterwalk_ffwd(struct scatterlist dst[2], | 129 | struct scatterlist *scatterwalk_ffwd(struct scatterlist dst[2], |
| 98 | struct scatterlist *src, | 130 | struct scatterlist *src, |
| 99 | unsigned int len); | 131 | unsigned int len); |
diff --git a/include/crypto/sha3.h b/include/crypto/sha3.h new file mode 100644 index 000000000000..f4c9f68f5ffe --- /dev/null +++ b/include/crypto/sha3.h | |||
| @@ -0,0 +1,29 @@ | |||
| 1 | /* | ||
| 2 | * Common values for SHA-3 algorithms | ||
| 3 | */ | ||
| 4 | #ifndef __CRYPTO_SHA3_H__ | ||
| 5 | #define __CRYPTO_SHA3_H__ | ||
| 6 | |||
| 7 | #define SHA3_224_DIGEST_SIZE (224 / 8) | ||
| 8 | #define SHA3_224_BLOCK_SIZE (200 - 2 * SHA3_224_DIGEST_SIZE) | ||
| 9 | |||
| 10 | #define SHA3_256_DIGEST_SIZE (256 / 8) | ||
| 11 | #define SHA3_256_BLOCK_SIZE (200 - 2 * SHA3_256_DIGEST_SIZE) | ||
| 12 | |||
| 13 | #define SHA3_384_DIGEST_SIZE (384 / 8) | ||
| 14 | #define SHA3_384_BLOCK_SIZE (200 - 2 * SHA3_384_DIGEST_SIZE) | ||
| 15 | |||
| 16 | #define SHA3_512_DIGEST_SIZE (512 / 8) | ||
| 17 | #define SHA3_512_BLOCK_SIZE (200 - 2 * SHA3_512_DIGEST_SIZE) | ||
| 18 | |||
| 19 | struct sha3_state { | ||
| 20 | u64 st[25]; | ||
| 21 | unsigned int md_len; | ||
| 22 | unsigned int rsiz; | ||
| 23 | unsigned int rsizw; | ||
| 24 | |||
| 25 | unsigned int partial; | ||
| 26 | u8 buf[SHA3_224_BLOCK_SIZE]; | ||
| 27 | }; | ||
| 28 | |||
| 29 | #endif | ||
diff --git a/include/crypto/skcipher.h b/include/crypto/skcipher.h index 0f987f50bb52..cc4d98a7892e 100644 --- a/include/crypto/skcipher.h +++ b/include/crypto/skcipher.h | |||
| @@ -65,86 +65,80 @@ struct crypto_skcipher { | |||
| 65 | struct crypto_tfm base; | 65 | struct crypto_tfm base; |
| 66 | }; | 66 | }; |
| 67 | 67 | ||
| 68 | #define SKCIPHER_REQUEST_ON_STACK(name, tfm) \ | 68 | /** |
| 69 | char __##name##_desc[sizeof(struct skcipher_request) + \ | 69 | * struct skcipher_alg - symmetric key cipher definition |
| 70 | crypto_skcipher_reqsize(tfm)] CRYPTO_MINALIGN_ATTR; \ | 70 | * @min_keysize: Minimum key size supported by the transformation. This is the |
| 71 | struct skcipher_request *name = (void *)__##name##_desc | 71 | * smallest key length supported by this transformation algorithm. |
| 72 | 72 | * This must be set to one of the pre-defined values as this is | |
| 73 | static inline struct crypto_ablkcipher *skcipher_givcrypt_reqtfm( | 73 | * not hardware specific. Possible values for this field can be |
| 74 | struct skcipher_givcrypt_request *req) | 74 | * found via git grep "_MIN_KEY_SIZE" include/crypto/ |
| 75 | { | 75 | * @max_keysize: Maximum key size supported by the transformation. This is the |
| 76 | return crypto_ablkcipher_reqtfm(&req->creq); | 76 | * largest key length supported by this transformation algorithm. |
| 77 | } | 77 | * This must be set to one of the pre-defined values as this is |
| 78 | * not hardware specific. Possible values for this field can be | ||
| 79 | * found via git grep "_MAX_KEY_SIZE" include/crypto/ | ||
| 80 | * @setkey: Set key for the transformation. This function is used to either | ||
| 81 | * program a supplied key into the hardware or store the key in the | ||
| 82 | * transformation context for programming it later. Note that this | ||
| 83 | * function does modify the transformation context. This function can | ||
| 84 | * be called multiple times during the existence of the transformation | ||
| 85 | * object, so one must make sure the key is properly reprogrammed into | ||
| 86 | * the hardware. This function is also responsible for checking the key | ||
| 87 | * length for validity. In case a software fallback was put in place in | ||
| 88 | * the @cra_init call, this function might need to use the fallback if | ||
| 89 | * the algorithm doesn't support all of the key sizes. | ||
| 90 | * @encrypt: Encrypt a scatterlist of blocks. This function is used to encrypt | ||
| 91 | * the supplied scatterlist containing the blocks of data. The crypto | ||
| 92 | * API consumer is responsible for aligning the entries of the | ||
| 93 | * scatterlist properly and making sure the chunks are correctly | ||
| 94 | * sized. In case a software fallback was put in place in the | ||
| 95 | * @cra_init call, this function might need to use the fallback if | ||
| 96 | * the algorithm doesn't support all of the key sizes. In case the | ||
| 97 | * key was stored in transformation context, the key might need to be | ||
| 98 | * re-programmed into the hardware in this function. This function | ||
| 99 | * shall not modify the transformation context, as this function may | ||
| 100 | * be called in parallel with the same transformation object. | ||
| 101 | * @decrypt: Decrypt a single block. This is a reverse counterpart to @encrypt | ||
| 102 | * and the conditions are exactly the same. | ||
| 103 | * @init: Initialize the cryptographic transformation object. This function | ||
| 104 | * is used to initialize the cryptographic transformation object. | ||
| 105 | * This function is called only once at the instantiation time, right | ||
| 106 | * after the transformation context was allocated. In case the | ||
| 107 | * cryptographic hardware has some special requirements which need to | ||
| 108 | * be handled by software, this function shall check for the precise | ||
| 109 | * requirement of the transformation and put any software fallbacks | ||
| 110 | * in place. | ||
| 111 | * @exit: Deinitialize the cryptographic transformation object. This is a | ||
| 112 | * counterpart to @init, used to remove various changes set in | ||
| 113 | * @init. | ||
| 114 | * @ivsize: IV size applicable for transformation. The consumer must provide an | ||
| 115 | * IV of exactly that size to perform the encrypt or decrypt operation. | ||
| 116 | * @chunksize: Equal to the block size except for stream ciphers such as | ||
| 117 | * CTR where it is set to the underlying block size. | ||
| 118 | * @base: Definition of a generic crypto algorithm. | ||
| 119 | * | ||
| 120 | * All fields except @ivsize are mandatory and must be filled. | ||
| 121 | */ | ||
| 122 | struct skcipher_alg { | ||
| 123 | int (*setkey)(struct crypto_skcipher *tfm, const u8 *key, | ||
| 124 | unsigned int keylen); | ||
| 125 | int (*encrypt)(struct skcipher_request *req); | ||
| 126 | int (*decrypt)(struct skcipher_request *req); | ||
| 127 | int (*init)(struct crypto_skcipher *tfm); | ||
| 128 | void (*exit)(struct crypto_skcipher *tfm); | ||
| 78 | 129 | ||
| 79 | static inline int crypto_skcipher_givencrypt( | 130 | unsigned int min_keysize; |
| 80 | struct skcipher_givcrypt_request *req) | 131 | unsigned int max_keysize; |
| 81 | { | 132 | unsigned int ivsize; |
| 82 | struct ablkcipher_tfm *crt = | 133 | unsigned int chunksize; |
| 83 | crypto_ablkcipher_crt(skcipher_givcrypt_reqtfm(req)); | ||
| 84 | return crt->givencrypt(req); | ||
| 85 | }; | ||
| 86 | 134 | ||
| 87 | static inline int crypto_skcipher_givdecrypt( | 135 | struct crypto_alg base; |
| 88 | struct skcipher_givcrypt_request *req) | ||
| 89 | { | ||
| 90 | struct ablkcipher_tfm *crt = | ||
| 91 | crypto_ablkcipher_crt(skcipher_givcrypt_reqtfm(req)); | ||
| 92 | return crt->givdecrypt(req); | ||
| 93 | }; | 136 | }; |
| 94 | 137 | ||
| 95 | static inline void skcipher_givcrypt_set_tfm( | 138 | #define SKCIPHER_REQUEST_ON_STACK(name, tfm) \ |
| 96 | struct skcipher_givcrypt_request *req, struct crypto_ablkcipher *tfm) | 139 | char __##name##_desc[sizeof(struct skcipher_request) + \ |
| 97 | { | 140 | crypto_skcipher_reqsize(tfm)] CRYPTO_MINALIGN_ATTR; \ |
| 98 | req->creq.base.tfm = crypto_ablkcipher_tfm(tfm); | 141 | struct skcipher_request *name = (void *)__##name##_desc |
| 99 | } | ||
| 100 | |||
| 101 | static inline struct skcipher_givcrypt_request *skcipher_givcrypt_cast( | ||
| 102 | struct crypto_async_request *req) | ||
| 103 | { | ||
| 104 | return container_of(ablkcipher_request_cast(req), | ||
| 105 | struct skcipher_givcrypt_request, creq); | ||
| 106 | } | ||
| 107 | |||
| 108 | static inline struct skcipher_givcrypt_request *skcipher_givcrypt_alloc( | ||
| 109 | struct crypto_ablkcipher *tfm, gfp_t gfp) | ||
| 110 | { | ||
| 111 | struct skcipher_givcrypt_request *req; | ||
| 112 | |||
| 113 | req = kmalloc(sizeof(struct skcipher_givcrypt_request) + | ||
| 114 | crypto_ablkcipher_reqsize(tfm), gfp); | ||
| 115 | |||
| 116 | if (likely(req)) | ||
| 117 | skcipher_givcrypt_set_tfm(req, tfm); | ||
| 118 | |||
| 119 | return req; | ||
| 120 | } | ||
| 121 | |||
| 122 | static inline void skcipher_givcrypt_free(struct skcipher_givcrypt_request *req) | ||
| 123 | { | ||
| 124 | kfree(req); | ||
| 125 | } | ||
| 126 | |||
| 127 | static inline void skcipher_givcrypt_set_callback( | ||
| 128 | struct skcipher_givcrypt_request *req, u32 flags, | ||
| 129 | crypto_completion_t compl, void *data) | ||
| 130 | { | ||
| 131 | ablkcipher_request_set_callback(&req->creq, flags, compl, data); | ||
| 132 | } | ||
| 133 | |||
| 134 | static inline void skcipher_givcrypt_set_crypt( | ||
| 135 | struct skcipher_givcrypt_request *req, | ||
| 136 | struct scatterlist *src, struct scatterlist *dst, | ||
| 137 | unsigned int nbytes, void *iv) | ||
| 138 | { | ||
| 139 | ablkcipher_request_set_crypt(&req->creq, src, dst, nbytes, iv); | ||
| 140 | } | ||
| 141 | |||
| 142 | static inline void skcipher_givcrypt_set_giv( | ||
| 143 | struct skcipher_givcrypt_request *req, u8 *giv, u64 seq) | ||
| 144 | { | ||
| 145 | req->giv = giv; | ||
| 146 | req->seq = seq; | ||
| 147 | } | ||
| 148 | 142 | ||
| 149 | /** | 143 | /** |
| 150 | * DOC: Symmetric Key Cipher API | 144 | * DOC: Symmetric Key Cipher API |
| @@ -231,12 +225,43 @@ static inline int crypto_has_skcipher(const char *alg_name, u32 type, | |||
| 231 | crypto_skcipher_mask(mask)); | 225 | crypto_skcipher_mask(mask)); |
| 232 | } | 226 | } |
| 233 | 227 | ||
| 228 | /** | ||
| 229 | * crypto_has_skcipher2() - Search for the availability of an skcipher. | ||
| 230 | * @alg_name: is the cra_name / name or cra_driver_name / driver name of the | ||
| 231 | * skcipher | ||
| 232 | * @type: specifies the type of the skcipher | ||
| 233 | * @mask: specifies the mask for the skcipher | ||
| 234 | * | ||
| 235 | * Return: true when the skcipher is known to the kernel crypto API; false | ||
| 236 | * otherwise | ||
| 237 | */ | ||
| 238 | int crypto_has_skcipher2(const char *alg_name, u32 type, u32 mask); | ||
| 239 | |||
| 234 | static inline const char *crypto_skcipher_driver_name( | 240 | static inline const char *crypto_skcipher_driver_name( |
| 235 | struct crypto_skcipher *tfm) | 241 | struct crypto_skcipher *tfm) |
| 236 | { | 242 | { |
| 237 | return crypto_tfm_alg_driver_name(crypto_skcipher_tfm(tfm)); | 243 | return crypto_tfm_alg_driver_name(crypto_skcipher_tfm(tfm)); |
| 238 | } | 244 | } |
| 239 | 245 | ||
| 246 | static inline struct skcipher_alg *crypto_skcipher_alg( | ||
| 247 | struct crypto_skcipher *tfm) | ||
| 248 | { | ||
| 249 | return container_of(crypto_skcipher_tfm(tfm)->__crt_alg, | ||
| 250 | struct skcipher_alg, base); | ||
| 251 | } | ||
| 252 | |||
| 253 | static inline unsigned int crypto_skcipher_alg_ivsize(struct skcipher_alg *alg) | ||
| 254 | { | ||
| 255 | if ((alg->base.cra_flags & CRYPTO_ALG_TYPE_MASK) == | ||
| 256 | CRYPTO_ALG_TYPE_BLKCIPHER) | ||
| 257 | return alg->base.cra_blkcipher.ivsize; | ||
| 258 | |||
| 259 | if (alg->base.cra_ablkcipher.encrypt) | ||
| 260 | return alg->base.cra_ablkcipher.ivsize; | ||
| 261 | |||
| 262 | return alg->ivsize; | ||
| 263 | } | ||
| 264 | |||
| 240 | /** | 265 | /** |
| 241 | * crypto_skcipher_ivsize() - obtain IV size | 266 | * crypto_skcipher_ivsize() - obtain IV size |
| 242 | * @tfm: cipher handle | 267 | * @tfm: cipher handle |
| @@ -251,6 +276,36 @@ static inline unsigned int crypto_skcipher_ivsize(struct crypto_skcipher *tfm) | |||
| 251 | return tfm->ivsize; | 276 | return tfm->ivsize; |
| 252 | } | 277 | } |
| 253 | 278 | ||
| 279 | static inline unsigned int crypto_skcipher_alg_chunksize( | ||
| 280 | struct skcipher_alg *alg) | ||
| 281 | { | ||
| 282 | if ((alg->base.cra_flags & CRYPTO_ALG_TYPE_MASK) == | ||
| 283 | CRYPTO_ALG_TYPE_BLKCIPHER) | ||
| 284 | return alg->base.cra_blocksize; | ||
| 285 | |||
| 286 | if (alg->base.cra_ablkcipher.encrypt) | ||
| 287 | return alg->base.cra_blocksize; | ||
| 288 | |||
| 289 | return alg->chunksize; | ||
| 290 | } | ||
| 291 | |||
| 292 | /** | ||
| 293 | * crypto_skcipher_chunksize() - obtain chunk size | ||
| 294 | * @tfm: cipher handle | ||
| 295 | * | ||
| 296 | * The block size is set to one for ciphers such as CTR. However, | ||
| 297 | * you still need to provide incremental updates in multiples of | ||
| 298 | * the underlying block size as the IV does not have sub-block | ||
| 299 | * granularity. This is known in this API as the chunk size. | ||
| 300 | * | ||
| 301 | * Return: chunk size in bytes | ||
| 302 | */ | ||
| 303 | static inline unsigned int crypto_skcipher_chunksize( | ||
| 304 | struct crypto_skcipher *tfm) | ||
| 305 | { | ||
| 306 | return crypto_skcipher_alg_chunksize(crypto_skcipher_alg(tfm)); | ||
| 307 | } | ||
| 308 | |||
| 254 | /** | 309 | /** |
| 255 | * crypto_skcipher_blocksize() - obtain block size of cipher | 310 | * crypto_skcipher_blocksize() - obtain block size of cipher |
| 256 | * @tfm: cipher handle | 311 | * @tfm: cipher handle |
diff --git a/include/linux/crypto.h b/include/linux/crypto.h index 6e28c895c376..7cee5551625b 100644 --- a/include/linux/crypto.h +++ b/include/linux/crypto.h | |||
| @@ -47,16 +47,18 @@ | |||
| 47 | #define CRYPTO_ALG_TYPE_AEAD 0x00000003 | 47 | #define CRYPTO_ALG_TYPE_AEAD 0x00000003 |
| 48 | #define CRYPTO_ALG_TYPE_BLKCIPHER 0x00000004 | 48 | #define CRYPTO_ALG_TYPE_BLKCIPHER 0x00000004 |
| 49 | #define CRYPTO_ALG_TYPE_ABLKCIPHER 0x00000005 | 49 | #define CRYPTO_ALG_TYPE_ABLKCIPHER 0x00000005 |
| 50 | #define CRYPTO_ALG_TYPE_SKCIPHER 0x00000005 | ||
| 50 | #define CRYPTO_ALG_TYPE_GIVCIPHER 0x00000006 | 51 | #define CRYPTO_ALG_TYPE_GIVCIPHER 0x00000006 |
| 51 | #define CRYPTO_ALG_TYPE_DIGEST 0x00000008 | 52 | #define CRYPTO_ALG_TYPE_KPP 0x00000008 |
| 52 | #define CRYPTO_ALG_TYPE_HASH 0x00000008 | ||
| 53 | #define CRYPTO_ALG_TYPE_SHASH 0x00000009 | ||
| 54 | #define CRYPTO_ALG_TYPE_AHASH 0x0000000a | ||
| 55 | #define CRYPTO_ALG_TYPE_RNG 0x0000000c | 53 | #define CRYPTO_ALG_TYPE_RNG 0x0000000c |
| 56 | #define CRYPTO_ALG_TYPE_AKCIPHER 0x0000000d | 54 | #define CRYPTO_ALG_TYPE_AKCIPHER 0x0000000d |
| 55 | #define CRYPTO_ALG_TYPE_DIGEST 0x0000000e | ||
| 56 | #define CRYPTO_ALG_TYPE_HASH 0x0000000e | ||
| 57 | #define CRYPTO_ALG_TYPE_SHASH 0x0000000e | ||
| 58 | #define CRYPTO_ALG_TYPE_AHASH 0x0000000f | ||
| 57 | 59 | ||
| 58 | #define CRYPTO_ALG_TYPE_HASH_MASK 0x0000000e | 60 | #define CRYPTO_ALG_TYPE_HASH_MASK 0x0000000e |
| 59 | #define CRYPTO_ALG_TYPE_AHASH_MASK 0x0000000c | 61 | #define CRYPTO_ALG_TYPE_AHASH_MASK 0x0000000e |
| 60 | #define CRYPTO_ALG_TYPE_BLKCIPHER_MASK 0x0000000c | 62 | #define CRYPTO_ALG_TYPE_BLKCIPHER_MASK 0x0000000c |
| 61 | 63 | ||
| 62 | #define CRYPTO_ALG_LARVAL 0x00000010 | 64 | #define CRYPTO_ALG_LARVAL 0x00000010 |
| @@ -486,8 +488,6 @@ struct ablkcipher_tfm { | |||
| 486 | unsigned int keylen); | 488 | unsigned int keylen); |
| 487 | int (*encrypt)(struct ablkcipher_request *req); | 489 | int (*encrypt)(struct ablkcipher_request *req); |
| 488 | int (*decrypt)(struct ablkcipher_request *req); | 490 | int (*decrypt)(struct ablkcipher_request *req); |
| 489 | int (*givencrypt)(struct skcipher_givcrypt_request *req); | ||
| 490 | int (*givdecrypt)(struct skcipher_givcrypt_request *req); | ||
| 491 | 491 | ||
| 492 | struct crypto_ablkcipher *base; | 492 | struct crypto_ablkcipher *base; |
| 493 | 493 | ||
| @@ -712,23 +712,6 @@ static inline u32 crypto_skcipher_mask(u32 mask) | |||
| 712 | * state information is unused by the kernel crypto API. | 712 | * state information is unused by the kernel crypto API. |
| 713 | */ | 713 | */ |
| 714 | 714 | ||
| 715 | /** | ||
| 716 | * crypto_alloc_ablkcipher() - allocate asynchronous block cipher handle | ||
| 717 | * @alg_name: is the cra_name / name or cra_driver_name / driver name of the | ||
| 718 | * ablkcipher cipher | ||
| 719 | * @type: specifies the type of the cipher | ||
| 720 | * @mask: specifies the mask for the cipher | ||
| 721 | * | ||
| 722 | * Allocate a cipher handle for an ablkcipher. The returned struct | ||
| 723 | * crypto_ablkcipher is the cipher handle that is required for any subsequent | ||
| 724 | * API invocation for that ablkcipher. | ||
| 725 | * | ||
| 726 | * Return: allocated cipher handle in case of success; IS_ERR() is true in case | ||
| 727 | * of an error, PTR_ERR() returns the error code. | ||
| 728 | */ | ||
| 729 | struct crypto_ablkcipher *crypto_alloc_ablkcipher(const char *alg_name, | ||
| 730 | u32 type, u32 mask); | ||
| 731 | |||
| 732 | static inline struct crypto_tfm *crypto_ablkcipher_tfm( | 715 | static inline struct crypto_tfm *crypto_ablkcipher_tfm( |
| 733 | struct crypto_ablkcipher *tfm) | 716 | struct crypto_ablkcipher *tfm) |
| 734 | { | 717 | { |
diff --git a/include/linux/mpi.h b/include/linux/mpi.h index 3a5abe95affd..1cc5ffb769af 100644 --- a/include/linux/mpi.h +++ b/include/linux/mpi.h | |||
| @@ -80,8 +80,7 @@ void *mpi_get_buffer(MPI a, unsigned *nbytes, int *sign); | |||
| 80 | int mpi_read_buffer(MPI a, uint8_t *buf, unsigned buf_len, unsigned *nbytes, | 80 | int mpi_read_buffer(MPI a, uint8_t *buf, unsigned buf_len, unsigned *nbytes, |
| 81 | int *sign); | 81 | int *sign); |
| 82 | void *mpi_get_secure_buffer(MPI a, unsigned *nbytes, int *sign); | 82 | void *mpi_get_secure_buffer(MPI a, unsigned *nbytes, int *sign); |
| 83 | int mpi_set_buffer(MPI a, const void *buffer, unsigned nbytes, int sign); | 83 | int mpi_write_to_sgl(MPI a, struct scatterlist *sg, unsigned nbytes, |
| 84 | int mpi_write_to_sgl(MPI a, struct scatterlist *sg, unsigned *nbytes, | ||
| 85 | int *sign); | 84 | int *sign); |
| 86 | 85 | ||
| 87 | #define log_mpidump g10_log_mpidump | 86 | #define log_mpidump g10_log_mpidump |
diff --git a/include/uapi/linux/cryptouser.h b/include/uapi/linux/cryptouser.h index 2e67bb64c1da..79b5ded2001a 100644 --- a/include/uapi/linux/cryptouser.h +++ b/include/uapi/linux/cryptouser.h | |||
| @@ -45,6 +45,7 @@ enum crypto_attr_type_t { | |||
| 45 | CRYPTOCFGA_REPORT_RNG, /* struct crypto_report_rng */ | 45 | CRYPTOCFGA_REPORT_RNG, /* struct crypto_report_rng */ |
| 46 | CRYPTOCFGA_REPORT_CIPHER, /* struct crypto_report_cipher */ | 46 | CRYPTOCFGA_REPORT_CIPHER, /* struct crypto_report_cipher */ |
| 47 | CRYPTOCFGA_REPORT_AKCIPHER, /* struct crypto_report_akcipher */ | 47 | CRYPTOCFGA_REPORT_AKCIPHER, /* struct crypto_report_akcipher */ |
| 48 | CRYPTOCFGA_REPORT_KPP, /* struct crypto_report_kpp */ | ||
| 48 | __CRYPTOCFGA_MAX | 49 | __CRYPTOCFGA_MAX |
| 49 | 50 | ||
| 50 | #define CRYPTOCFGA_MAX (__CRYPTOCFGA_MAX - 1) | 51 | #define CRYPTOCFGA_MAX (__CRYPTOCFGA_MAX - 1) |
| @@ -107,5 +108,9 @@ struct crypto_report_akcipher { | |||
| 107 | char type[CRYPTO_MAX_NAME]; | 108 | char type[CRYPTO_MAX_NAME]; |
| 108 | }; | 109 | }; |
| 109 | 110 | ||
| 111 | struct crypto_report_kpp { | ||
| 112 | char type[CRYPTO_MAX_NAME]; | ||
| 113 | }; | ||
| 114 | |||
| 110 | #define CRYPTO_REPORT_MAXSIZE (sizeof(struct crypto_user_alg) + \ | 115 | #define CRYPTO_REPORT_MAXSIZE (sizeof(struct crypto_user_alg) + \ |
| 111 | sizeof(struct crypto_report_blkcipher)) | 116 | sizeof(struct crypto_report_blkcipher)) |
