diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2017-02-23 12:54:19 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2017-02-23 12:54:19 -0500 |
commit | 5bcbe22ca47da04cda3a858cef67f55b550c1d13 (patch) | |
tree | 49bd61e32eb2d652085a49182436322a3e0e9840 /crypto/algapi.c | |
parent | 1db934a5b77a9e37c4742c704fde6af233187a98 (diff) | |
parent | 12cb3a1c4184f891d965d1f39f8cfcc9ef617647 (diff) |
Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto update from Herbert Xu:
"API:
- Try to catch hash output overrun in testmgr
- Introduce walksize attribute for batched walking
- Make crypto_xor() and crypto_inc() alignment agnostic
Algorithms:
- Add time-invariant AES algorithm
- Add standalone CBCMAC algorithm
Drivers:
- Add NEON acclerated chacha20 on ARM/ARM64
- Expose AES-CTR as synchronous skcipher on ARM64
- Add scalar AES implementation on ARM64
- Improve scalar AES implementation on ARM
- Improve NEON AES implementation on ARM/ARM64
- Merge CRC32 and PMULL instruction based drivers on ARM64
- Add NEON acclerated CBCMAC/CMAC/XCBC AES on ARM64
- Add IPsec AUTHENC implementation in atmel
- Add Support for Octeon-tx CPT Engine
- Add Broadcom SPU driver
- Add MediaTek driver"
* 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (142 commits)
crypto: xts - Add ECB dependency
crypto: cavium - switch to pci_alloc_irq_vectors
crypto: cavium - switch to pci_alloc_irq_vectors
crypto: cavium - remove dead MSI-X related define
crypto: brcm - Avoid double free in ahash_finup()
crypto: cavium - fix Kconfig dependencies
crypto: cavium - cpt_bind_vq_to_grp could return an error code
crypto: doc - fix typo
hwrng: omap - update Kconfig help description
crypto: ccm - drop unnecessary minimum 32-bit alignment
crypto: ccm - honour alignmask of subordinate MAC cipher
crypto: caam - fix state buffer DMA (un)mapping
crypto: caam - abstract ahash request double buffering
crypto: caam - fix error path for ctx_dma mapping failure
crypto: caam - fix DMA API leaks for multiple setkey() calls
crypto: caam - don't dma_map key for hash algorithms
crypto: caam - use dma_map_sg() return code
crypto: caam - replace sg_count() with sg_nents_for_len()
crypto: caam - check sg_count() return value
crypto: caam - fix HW S/G in ablkcipher_giv_edesc_alloc()
..
Diffstat (limited to 'crypto/algapi.c')
-rw-r--r-- | crypto/algapi.c | 68 |
1 files changed, 50 insertions, 18 deletions
diff --git a/crypto/algapi.c b/crypto/algapi.c index 1fad2a6b3bbb..6b52e8f0b95f 100644 --- a/crypto/algapi.c +++ b/crypto/algapi.c | |||
@@ -962,34 +962,66 @@ void crypto_inc(u8 *a, unsigned int size) | |||
962 | __be32 *b = (__be32 *)(a + size); | 962 | __be32 *b = (__be32 *)(a + size); |
963 | u32 c; | 963 | u32 c; |
964 | 964 | ||
965 | for (; size >= 4; size -= 4) { | 965 | if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) || |
966 | c = be32_to_cpu(*--b) + 1; | 966 | !((unsigned long)b & (__alignof__(*b) - 1))) |
967 | *b = cpu_to_be32(c); | 967 | for (; size >= 4; size -= 4) { |
968 | if (c) | 968 | c = be32_to_cpu(*--b) + 1; |
969 | return; | 969 | *b = cpu_to_be32(c); |
970 | } | 970 | if (c) |
971 | return; | ||
972 | } | ||
971 | 973 | ||
972 | crypto_inc_byte(a, size); | 974 | crypto_inc_byte(a, size); |
973 | } | 975 | } |
974 | EXPORT_SYMBOL_GPL(crypto_inc); | 976 | EXPORT_SYMBOL_GPL(crypto_inc); |
975 | 977 | ||
976 | static inline void crypto_xor_byte(u8 *a, const u8 *b, unsigned int size) | 978 | void __crypto_xor(u8 *dst, const u8 *src, unsigned int len) |
977 | { | 979 | { |
978 | for (; size; size--) | 980 | int relalign = 0; |
979 | *a++ ^= *b++; | 981 | |
980 | } | 982 | if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) { |
983 | int size = sizeof(unsigned long); | ||
984 | int d = ((unsigned long)dst ^ (unsigned long)src) & (size - 1); | ||
985 | |||
986 | relalign = d ? 1 << __ffs(d) : size; | ||
987 | |||
988 | /* | ||
989 | * If we care about alignment, process as many bytes as | ||
990 | * needed to advance dst and src to values whose alignments | ||
991 | * equal their relative alignment. This will allow us to | ||
992 | * process the remainder of the input using optimal strides. | ||
993 | */ | ||
994 | while (((unsigned long)dst & (relalign - 1)) && len > 0) { | ||
995 | *dst++ ^= *src++; | ||
996 | len--; | ||
997 | } | ||
998 | } | ||
981 | 999 | ||
982 | void crypto_xor(u8 *dst, const u8 *src, unsigned int size) | 1000 | while (IS_ENABLED(CONFIG_64BIT) && len >= 8 && !(relalign & 7)) { |
983 | { | 1001 | *(u64 *)dst ^= *(u64 *)src; |
984 | u32 *a = (u32 *)dst; | 1002 | dst += 8; |
985 | u32 *b = (u32 *)src; | 1003 | src += 8; |
1004 | len -= 8; | ||
1005 | } | ||
986 | 1006 | ||
987 | for (; size >= 4; size -= 4) | 1007 | while (len >= 4 && !(relalign & 3)) { |
988 | *a++ ^= *b++; | 1008 | *(u32 *)dst ^= *(u32 *)src; |
1009 | dst += 4; | ||
1010 | src += 4; | ||
1011 | len -= 4; | ||
1012 | } | ||
1013 | |||
1014 | while (len >= 2 && !(relalign & 1)) { | ||
1015 | *(u16 *)dst ^= *(u16 *)src; | ||
1016 | dst += 2; | ||
1017 | src += 2; | ||
1018 | len -= 2; | ||
1019 | } | ||
989 | 1020 | ||
990 | crypto_xor_byte((u8 *)a, (u8 *)b, size); | 1021 | while (len--) |
1022 | *dst++ ^= *src++; | ||
991 | } | 1023 | } |
992 | EXPORT_SYMBOL_GPL(crypto_xor); | 1024 | EXPORT_SYMBOL_GPL(__crypto_xor); |
993 | 1025 | ||
994 | unsigned int crypto_alg_extsize(struct crypto_alg *alg) | 1026 | unsigned int crypto_alg_extsize(struct crypto_alg *alg) |
995 | { | 1027 | { |