diff options
70 files changed, 1785 insertions, 709 deletions
diff --git a/MAINTAINERS b/MAINTAINERS index cd8383e26ac8..3ac697a64280 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
| @@ -4434,6 +4434,7 @@ F: include/linux/hwmon*.h | |||
| 4434 | HARDWARE RANDOM NUMBER GENERATOR CORE | 4434 | HARDWARE RANDOM NUMBER GENERATOR CORE |
| 4435 | M: Matt Mackall <mpm@selenic.com> | 4435 | M: Matt Mackall <mpm@selenic.com> |
| 4436 | M: Herbert Xu <herbert@gondor.apana.org.au> | 4436 | M: Herbert Xu <herbert@gondor.apana.org.au> |
| 4437 | L: linux-crypto@vger.kernel.org | ||
| 4437 | S: Odd fixes | 4438 | S: Odd fixes |
| 4438 | F: Documentation/hw_random.txt | 4439 | F: Documentation/hw_random.txt |
| 4439 | F: drivers/char/hw_random/ | 4440 | F: drivers/char/hw_random/ |
diff --git a/arch/mips/cavium-octeon/Makefile b/arch/mips/cavium-octeon/Makefile index 42f5f1a4b40a..69a8a8dabc2b 100644 --- a/arch/mips/cavium-octeon/Makefile +++ b/arch/mips/cavium-octeon/Makefile | |||
| @@ -16,6 +16,7 @@ obj-y := cpu.o setup.o octeon-platform.o octeon-irq.o csrc-octeon.o | |||
| 16 | obj-y += dma-octeon.o | 16 | obj-y += dma-octeon.o |
| 17 | obj-y += octeon-memcpy.o | 17 | obj-y += octeon-memcpy.o |
| 18 | obj-y += executive/ | 18 | obj-y += executive/ |
| 19 | obj-y += crypto/ | ||
| 19 | 20 | ||
| 20 | obj-$(CONFIG_MTD) += flash_setup.o | 21 | obj-$(CONFIG_MTD) += flash_setup.o |
| 21 | obj-$(CONFIG_SMP) += smp.o | 22 | obj-$(CONFIG_SMP) += smp.o |
diff --git a/arch/mips/cavium-octeon/crypto/Makefile b/arch/mips/cavium-octeon/crypto/Makefile new file mode 100644 index 000000000000..a74f76d85a2f --- /dev/null +++ b/arch/mips/cavium-octeon/crypto/Makefile | |||
| @@ -0,0 +1,7 @@ | |||
| 1 | # | ||
| 2 | # OCTEON-specific crypto modules. | ||
| 3 | # | ||
| 4 | |||
| 5 | obj-y += octeon-crypto.o | ||
| 6 | |||
| 7 | obj-$(CONFIG_CRYPTO_MD5_OCTEON) += octeon-md5.o | ||
diff --git a/arch/mips/cavium-octeon/crypto/octeon-crypto.c b/arch/mips/cavium-octeon/crypto/octeon-crypto.c new file mode 100644 index 000000000000..7c82ff463b65 --- /dev/null +++ b/arch/mips/cavium-octeon/crypto/octeon-crypto.c | |||
| @@ -0,0 +1,66 @@ | |||
| 1 | /* | ||
| 2 | * This file is subject to the terms and conditions of the GNU General Public | ||
| 3 | * License. See the file "COPYING" in the main directory of this archive | ||
| 4 | * for more details. | ||
| 5 | * | ||
| 6 | * Copyright (C) 2004-2012 Cavium Networks | ||
| 7 | */ | ||
| 8 | |||
| 9 | #include <asm/cop2.h> | ||
| 10 | #include <linux/module.h> | ||
| 11 | #include <linux/interrupt.h> | ||
| 12 | |||
| 13 | #include "octeon-crypto.h" | ||
| 14 | |||
| 15 | /** | ||
| 16 | * Enable access to Octeon's COP2 crypto hardware for kernel use. Wrap any | ||
| 17 | * crypto operations in calls to octeon_crypto_enable/disable in order to make | ||
| 18 | * sure the state of COP2 isn't corrupted if userspace is also performing | ||
| 19 | * hardware crypto operations. Allocate the state parameter on the stack. | ||
| 20 | * Preemption must be disabled to prevent context switches. | ||
| 21 | * | ||
| 22 | * @state: Pointer to state structure to store current COP2 state in. | ||
| 23 | * | ||
| 24 | * Returns: Flags to be passed to octeon_crypto_disable() | ||
| 25 | */ | ||
| 26 | unsigned long octeon_crypto_enable(struct octeon_cop2_state *state) | ||
| 27 | { | ||
| 28 | int status; | ||
| 29 | unsigned long flags; | ||
| 30 | |||
| 31 | local_irq_save(flags); | ||
| 32 | status = read_c0_status(); | ||
| 33 | write_c0_status(status | ST0_CU2); | ||
| 34 | if (KSTK_STATUS(current) & ST0_CU2) { | ||
| 35 | octeon_cop2_save(&(current->thread.cp2)); | ||
| 36 | KSTK_STATUS(current) &= ~ST0_CU2; | ||
| 37 | status &= ~ST0_CU2; | ||
| 38 | } else if (status & ST0_CU2) { | ||
| 39 | octeon_cop2_save(state); | ||
| 40 | } | ||
| 41 | local_irq_restore(flags); | ||
| 42 | return status & ST0_CU2; | ||
| 43 | } | ||
| 44 | EXPORT_SYMBOL_GPL(octeon_crypto_enable); | ||
| 45 | |||
| 46 | /** | ||
| 47 | * Disable access to Octeon's COP2 crypto hardware in the kernel. This must be | ||
| 48 | * called after an octeon_crypto_enable() before any context switch or return to | ||
| 49 | * userspace. | ||
| 50 | * | ||
| 51 | * @state: Pointer to COP2 state to restore | ||
| 52 | * @flags: Return value from octeon_crypto_enable() | ||
| 53 | */ | ||
| 54 | void octeon_crypto_disable(struct octeon_cop2_state *state, | ||
| 55 | unsigned long crypto_flags) | ||
| 56 | { | ||
| 57 | unsigned long flags; | ||
| 58 | |||
| 59 | local_irq_save(flags); | ||
| 60 | if (crypto_flags & ST0_CU2) | ||
| 61 | octeon_cop2_restore(state); | ||
| 62 | else | ||
| 63 | write_c0_status(read_c0_status() & ~ST0_CU2); | ||
| 64 | local_irq_restore(flags); | ||
| 65 | } | ||
| 66 | EXPORT_SYMBOL_GPL(octeon_crypto_disable); | ||
diff --git a/arch/mips/cavium-octeon/crypto/octeon-crypto.h b/arch/mips/cavium-octeon/crypto/octeon-crypto.h new file mode 100644 index 000000000000..e2a4aece9c24 --- /dev/null +++ b/arch/mips/cavium-octeon/crypto/octeon-crypto.h | |||
| @@ -0,0 +1,75 @@ | |||
| 1 | /* | ||
| 2 | * This file is subject to the terms and conditions of the GNU General Public | ||
| 3 | * License. See the file "COPYING" in the main directory of this archive | ||
| 4 | * for more details. | ||
| 5 | * | ||
| 6 | * Copyright (C) 2012-2013 Cavium Inc., All Rights Reserved. | ||
| 7 | * | ||
| 8 | * MD5 instruction definitions added by Aaro Koskinen <aaro.koskinen@iki.fi>. | ||
| 9 | * | ||
| 10 | */ | ||
| 11 | #ifndef __LINUX_OCTEON_CRYPTO_H | ||
| 12 | #define __LINUX_OCTEON_CRYPTO_H | ||
| 13 | |||
| 14 | #include <linux/sched.h> | ||
| 15 | #include <asm/mipsregs.h> | ||
| 16 | |||
| 17 | #define OCTEON_CR_OPCODE_PRIORITY 300 | ||
| 18 | |||
| 19 | extern unsigned long octeon_crypto_enable(struct octeon_cop2_state *state); | ||
| 20 | extern void octeon_crypto_disable(struct octeon_cop2_state *state, | ||
| 21 | unsigned long flags); | ||
| 22 | |||
| 23 | /* | ||
| 24 | * Macros needed to implement MD5: | ||
| 25 | */ | ||
| 26 | |||
| 27 | /* | ||
| 28 | * The index can be 0-1. | ||
| 29 | */ | ||
| 30 | #define write_octeon_64bit_hash_dword(value, index) \ | ||
| 31 | do { \ | ||
| 32 | __asm__ __volatile__ ( \ | ||
| 33 | "dmtc2 %[rt],0x0048+" STR(index) \ | ||
| 34 | : \ | ||
| 35 | : [rt] "d" (value)); \ | ||
| 36 | } while (0) | ||
| 37 | |||
| 38 | /* | ||
| 39 | * The index can be 0-1. | ||
| 40 | */ | ||
| 41 | #define read_octeon_64bit_hash_dword(index) \ | ||
| 42 | ({ \ | ||
| 43 | u64 __value; \ | ||
| 44 | \ | ||
| 45 | __asm__ __volatile__ ( \ | ||
| 46 | "dmfc2 %[rt],0x0048+" STR(index) \ | ||
| 47 | : [rt] "=d" (__value) \ | ||
| 48 | : ); \ | ||
| 49 | \ | ||
| 50 | __value; \ | ||
| 51 | }) | ||
| 52 | |||
| 53 | /* | ||
| 54 | * The index can be 0-6. | ||
| 55 | */ | ||
| 56 | #define write_octeon_64bit_block_dword(value, index) \ | ||
| 57 | do { \ | ||
| 58 | __asm__ __volatile__ ( \ | ||
| 59 | "dmtc2 %[rt],0x0040+" STR(index) \ | ||
| 60 | : \ | ||
| 61 | : [rt] "d" (value)); \ | ||
| 62 | } while (0) | ||
| 63 | |||
| 64 | /* | ||
| 65 | * The value is the final block dword (64-bit). | ||
| 66 | */ | ||
| 67 | #define octeon_md5_start(value) \ | ||
| 68 | do { \ | ||
| 69 | __asm__ __volatile__ ( \ | ||
| 70 | "dmtc2 %[rt],0x4047" \ | ||
| 71 | : \ | ||
| 72 | : [rt] "d" (value)); \ | ||
| 73 | } while (0) | ||
| 74 | |||
| 75 | #endif /* __LINUX_OCTEON_CRYPTO_H */ | ||
diff --git a/arch/mips/cavium-octeon/crypto/octeon-md5.c b/arch/mips/cavium-octeon/crypto/octeon-md5.c new file mode 100644 index 000000000000..b909881ba6c1 --- /dev/null +++ b/arch/mips/cavium-octeon/crypto/octeon-md5.c | |||
| @@ -0,0 +1,216 @@ | |||
| 1 | /* | ||
| 2 | * Cryptographic API. | ||
| 3 | * | ||
| 4 | * MD5 Message Digest Algorithm (RFC1321). | ||
| 5 | * | ||
| 6 | * Adapted for OCTEON by Aaro Koskinen <aaro.koskinen@iki.fi>. | ||
| 7 | * | ||
| 8 | * Based on crypto/md5.c, which is: | ||
| 9 | * | ||
| 10 | * Derived from cryptoapi implementation, originally based on the | ||
| 11 | * public domain implementation written by Colin Plumb in 1993. | ||
| 12 | * | ||
| 13 | * Copyright (c) Cryptoapi developers. | ||
| 14 | * Copyright (c) 2002 James Morris <jmorris@intercode.com.au> | ||
| 15 | * | ||
| 16 | * This program is free software; you can redistribute it and/or modify it | ||
| 17 | * under the terms of the GNU General Public License as published by the Free | ||
| 18 | * Software Foundation; either version 2 of the License, or (at your option) | ||
| 19 | * any later version. | ||
| 20 | */ | ||
| 21 | |||
| 22 | #include <crypto/md5.h> | ||
| 23 | #include <linux/init.h> | ||
| 24 | #include <linux/types.h> | ||
| 25 | #include <linux/module.h> | ||
| 26 | #include <linux/string.h> | ||
| 27 | #include <asm/byteorder.h> | ||
| 28 | #include <linux/cryptohash.h> | ||
| 29 | #include <asm/octeon/octeon.h> | ||
| 30 | #include <crypto/internal/hash.h> | ||
| 31 | |||
| 32 | #include "octeon-crypto.h" | ||
| 33 | |||
| 34 | /* | ||
| 35 | * We pass everything as 64-bit. OCTEON can handle misaligned data. | ||
| 36 | */ | ||
| 37 | |||
| 38 | static void octeon_md5_store_hash(struct md5_state *ctx) | ||
| 39 | { | ||
| 40 | u64 *hash = (u64 *)ctx->hash; | ||
| 41 | |||
| 42 | write_octeon_64bit_hash_dword(hash[0], 0); | ||
| 43 | write_octeon_64bit_hash_dword(hash[1], 1); | ||
| 44 | } | ||
| 45 | |||
| 46 | static void octeon_md5_read_hash(struct md5_state *ctx) | ||
| 47 | { | ||
| 48 | u64 *hash = (u64 *)ctx->hash; | ||
| 49 | |||
| 50 | hash[0] = read_octeon_64bit_hash_dword(0); | ||
| 51 | hash[1] = read_octeon_64bit_hash_dword(1); | ||
| 52 | } | ||
| 53 | |||
| 54 | static void octeon_md5_transform(const void *_block) | ||
| 55 | { | ||
| 56 | const u64 *block = _block; | ||
| 57 | |||
| 58 | write_octeon_64bit_block_dword(block[0], 0); | ||
| 59 | write_octeon_64bit_block_dword(block[1], 1); | ||
| 60 | write_octeon_64bit_block_dword(block[2], 2); | ||
| 61 | write_octeon_64bit_block_dword(block[3], 3); | ||
| 62 | write_octeon_64bit_block_dword(block[4], 4); | ||
| 63 | write_octeon_64bit_block_dword(block[5], 5); | ||
| 64 | write_octeon_64bit_block_dword(block[6], 6); | ||
| 65 | octeon_md5_start(block[7]); | ||
| 66 | } | ||
| 67 | |||
| 68 | static int octeon_md5_init(struct shash_desc *desc) | ||
| 69 | { | ||
| 70 | struct md5_state *mctx = shash_desc_ctx(desc); | ||
| 71 | |||
| 72 | mctx->hash[0] = cpu_to_le32(0x67452301); | ||
| 73 | mctx->hash[1] = cpu_to_le32(0xefcdab89); | ||
| 74 | mctx->hash[2] = cpu_to_le32(0x98badcfe); | ||
| 75 | mctx->hash[3] = cpu_to_le32(0x10325476); | ||
| 76 | mctx->byte_count = 0; | ||
| 77 | |||
| 78 | return 0; | ||
| 79 | } | ||
| 80 | |||
| 81 | static int octeon_md5_update(struct shash_desc *desc, const u8 *data, | ||
| 82 | unsigned int len) | ||
| 83 | { | ||
| 84 | struct md5_state *mctx = shash_desc_ctx(desc); | ||
| 85 | const u32 avail = sizeof(mctx->block) - (mctx->byte_count & 0x3f); | ||
| 86 | struct octeon_cop2_state state; | ||
| 87 | unsigned long flags; | ||
| 88 | |||
| 89 | mctx->byte_count += len; | ||
| 90 | |||
| 91 | if (avail > len) { | ||
| 92 | memcpy((char *)mctx->block + (sizeof(mctx->block) - avail), | ||
| 93 | data, len); | ||
| 94 | return 0; | ||
| 95 | } | ||
| 96 | |||
| 97 | memcpy((char *)mctx->block + (sizeof(mctx->block) - avail), data, | ||
| 98 | avail); | ||
| 99 | |||
| 100 | local_bh_disable(); | ||
| 101 | preempt_disable(); | ||
| 102 | flags = octeon_crypto_enable(&state); | ||
| 103 | octeon_md5_store_hash(mctx); | ||
| 104 | |||
| 105 | octeon_md5_transform(mctx->block); | ||
| 106 | data += avail; | ||
| 107 | len -= avail; | ||
| 108 | |||
| 109 | while (len >= sizeof(mctx->block)) { | ||
| 110 | octeon_md5_transform(data); | ||
| 111 | data += sizeof(mctx->block); | ||
| 112 | len -= sizeof(mctx->block); | ||
| 113 | } | ||
| 114 | |||
| 115 | octeon_md5_read_hash(mctx); | ||
| 116 | octeon_crypto_disable(&state, flags); | ||
| 117 | preempt_enable(); | ||
| 118 | local_bh_enable(); | ||
| 119 | |||
| 120 | memcpy(mctx->block, data, len); | ||
| 121 | |||
| 122 | return 0; | ||
| 123 | } | ||
| 124 | |||
| 125 | static int octeon_md5_final(struct shash_desc *desc, u8 *out) | ||
| 126 | { | ||
| 127 | struct md5_state *mctx = shash_desc_ctx(desc); | ||
| 128 | const unsigned int offset = mctx->byte_count & 0x3f; | ||
| 129 | char *p = (char *)mctx->block + offset; | ||
| 130 | int padding = 56 - (offset + 1); | ||
| 131 | struct octeon_cop2_state state; | ||
| 132 | unsigned long flags; | ||
| 133 | |||
| 134 | *p++ = 0x80; | ||
| 135 | |||
| 136 | local_bh_disable(); | ||
| 137 | preempt_disable(); | ||
| 138 | flags = octeon_crypto_enable(&state); | ||
| 139 | octeon_md5_store_hash(mctx); | ||
| 140 | |||
| 141 | if (padding < 0) { | ||
| 142 | memset(p, 0x00, padding + sizeof(u64)); | ||
| 143 | octeon_md5_transform(mctx->block); | ||
| 144 | p = (char *)mctx->block; | ||
| 145 | padding = 56; | ||
| 146 | } | ||
| 147 | |||
| 148 | memset(p, 0, padding); | ||
| 149 | mctx->block[14] = cpu_to_le32(mctx->byte_count << 3); | ||
| 150 | mctx->block[15] = cpu_to_le32(mctx->byte_count >> 29); | ||
| 151 | octeon_md5_transform(mctx->block); | ||
| 152 | |||
| 153 | octeon_md5_read_hash(mctx); | ||
| 154 | octeon_crypto_disable(&state, flags); | ||
| 155 | preempt_enable(); | ||
| 156 | local_bh_enable(); | ||
| 157 | |||
| 158 | memcpy(out, mctx->hash, sizeof(mctx->hash)); | ||
| 159 | memset(mctx, 0, sizeof(*mctx)); | ||
| 160 | |||
| 161 | return 0; | ||
| 162 | } | ||
| 163 | |||
| 164 | static int octeon_md5_export(struct shash_desc *desc, void *out) | ||
| 165 | { | ||
| 166 | struct md5_state *ctx = shash_desc_ctx(desc); | ||
| 167 | |||
| 168 | memcpy(out, ctx, sizeof(*ctx)); | ||
| 169 | return 0; | ||
| 170 | } | ||
| 171 | |||
| 172 | static int octeon_md5_import(struct shash_desc *desc, const void *in) | ||
| 173 | { | ||
| 174 | struct md5_state *ctx = shash_desc_ctx(desc); | ||
| 175 | |||
| 176 | memcpy(ctx, in, sizeof(*ctx)); | ||
| 177 | return 0; | ||
| 178 | } | ||
| 179 | |||
| 180 | static struct shash_alg alg = { | ||
| 181 | .digestsize = MD5_DIGEST_SIZE, | ||
| 182 | .init = octeon_md5_init, | ||
| 183 | .update = octeon_md5_update, | ||
| 184 | .final = octeon_md5_final, | ||
| 185 | .export = octeon_md5_export, | ||
| 186 | .import = octeon_md5_import, | ||
| 187 | .descsize = sizeof(struct md5_state), | ||
| 188 | .statesize = sizeof(struct md5_state), | ||
| 189 | .base = { | ||
| 190 | .cra_name = "md5", | ||
| 191 | .cra_driver_name= "octeon-md5", | ||
| 192 | .cra_priority = OCTEON_CR_OPCODE_PRIORITY, | ||
| 193 | .cra_flags = CRYPTO_ALG_TYPE_SHASH, | ||
| 194 | .cra_blocksize = MD5_HMAC_BLOCK_SIZE, | ||
| 195 | .cra_module = THIS_MODULE, | ||
| 196 | } | ||
| 197 | }; | ||
| 198 | |||
| 199 | static int __init md5_mod_init(void) | ||
| 200 | { | ||
| 201 | if (!octeon_has_crypto()) | ||
| 202 | return -ENOTSUPP; | ||
| 203 | return crypto_register_shash(&alg); | ||
| 204 | } | ||
| 205 | |||
| 206 | static void __exit md5_mod_fini(void) | ||
| 207 | { | ||
| 208 | crypto_unregister_shash(&alg); | ||
| 209 | } | ||
| 210 | |||
| 211 | module_init(md5_mod_init); | ||
| 212 | module_exit(md5_mod_fini); | ||
| 213 | |||
| 214 | MODULE_LICENSE("GPL"); | ||
| 215 | MODULE_DESCRIPTION("MD5 Message Digest Algorithm (OCTEON)"); | ||
| 216 | MODULE_AUTHOR("Aaro Koskinen <aaro.koskinen@iki.fi>"); | ||
diff --git a/arch/mips/cavium-octeon/executive/octeon-model.c b/arch/mips/cavium-octeon/executive/octeon-model.c index e15b049b3bd7..b2104bd9ab3b 100644 --- a/arch/mips/cavium-octeon/executive/octeon-model.c +++ b/arch/mips/cavium-octeon/executive/octeon-model.c | |||
| @@ -27,6 +27,9 @@ | |||
| 27 | 27 | ||
| 28 | #include <asm/octeon/octeon.h> | 28 | #include <asm/octeon/octeon.h> |
| 29 | 29 | ||
| 30 | enum octeon_feature_bits __octeon_feature_bits __read_mostly; | ||
| 31 | EXPORT_SYMBOL_GPL(__octeon_feature_bits); | ||
| 32 | |||
| 30 | /** | 33 | /** |
| 31 | * Read a byte of fuse data | 34 | * Read a byte of fuse data |
| 32 | * @byte_addr: address to read | 35 | * @byte_addr: address to read |
| @@ -103,6 +106,9 @@ static const char *__init octeon_model_get_string_buffer(uint32_t chip_id, | |||
| 103 | else | 106 | else |
| 104 | suffix = "NSP"; | 107 | suffix = "NSP"; |
| 105 | 108 | ||
| 109 | if (!fus_dat2.s.nocrypto) | ||
| 110 | __octeon_feature_bits |= OCTEON_HAS_CRYPTO; | ||
| 111 | |||
| 106 | /* | 112 | /* |
| 107 | * Assume pass number is encoded using <5:3><2:0>. Exceptions | 113 | * Assume pass number is encoded using <5:3><2:0>. Exceptions |
| 108 | * will be fixed later. | 114 | * will be fixed later. |
diff --git a/arch/mips/include/asm/octeon/octeon-feature.h b/arch/mips/include/asm/octeon/octeon-feature.h index c4fe81f47f53..8ebd3f579b84 100644 --- a/arch/mips/include/asm/octeon/octeon-feature.h +++ b/arch/mips/include/asm/octeon/octeon-feature.h | |||
| @@ -46,8 +46,6 @@ enum octeon_feature { | |||
| 46 | OCTEON_FEATURE_SAAD, | 46 | OCTEON_FEATURE_SAAD, |
| 47 | /* Does this Octeon support the ZIP offload engine? */ | 47 | /* Does this Octeon support the ZIP offload engine? */ |
| 48 | OCTEON_FEATURE_ZIP, | 48 | OCTEON_FEATURE_ZIP, |
| 49 | /* Does this Octeon support crypto acceleration using COP2? */ | ||
| 50 | OCTEON_FEATURE_CRYPTO, | ||
| 51 | OCTEON_FEATURE_DORM_CRYPTO, | 49 | OCTEON_FEATURE_DORM_CRYPTO, |
| 52 | /* Does this Octeon support PCI express? */ | 50 | /* Does this Octeon support PCI express? */ |
| 53 | OCTEON_FEATURE_PCIE, | 51 | OCTEON_FEATURE_PCIE, |
| @@ -86,6 +84,21 @@ enum octeon_feature { | |||
| 86 | OCTEON_MAX_FEATURE | 84 | OCTEON_MAX_FEATURE |
| 87 | }; | 85 | }; |
| 88 | 86 | ||
| 87 | enum octeon_feature_bits { | ||
| 88 | OCTEON_HAS_CRYPTO = 0x0001, /* Crypto acceleration using COP2 */ | ||
| 89 | }; | ||
| 90 | extern enum octeon_feature_bits __octeon_feature_bits; | ||
| 91 | |||
| 92 | /** | ||
| 93 | * octeon_has_crypto() - Check if this OCTEON has crypto acceleration support. | ||
| 94 | * | ||
| 95 | * Returns: Non-zero if the feature exists. Zero if the feature does not exist. | ||
| 96 | */ | ||
| 97 | static inline int octeon_has_crypto(void) | ||
| 98 | { | ||
| 99 | return __octeon_feature_bits & OCTEON_HAS_CRYPTO; | ||
| 100 | } | ||
| 101 | |||
| 89 | /** | 102 | /** |
| 90 | * Determine if the current Octeon supports a specific feature. These | 103 | * Determine if the current Octeon supports a specific feature. These |
| 91 | * checks have been optimized to be fairly quick, but they should still | 104 | * checks have been optimized to be fairly quick, but they should still |
diff --git a/arch/mips/include/asm/octeon/octeon.h b/arch/mips/include/asm/octeon/octeon.h index d781f9e66884..6dfefd2d5cdf 100644 --- a/arch/mips/include/asm/octeon/octeon.h +++ b/arch/mips/include/asm/octeon/octeon.h | |||
| @@ -44,11 +44,6 @@ extern int octeon_get_boot_num_arguments(void); | |||
| 44 | extern const char *octeon_get_boot_argument(int arg); | 44 | extern const char *octeon_get_boot_argument(int arg); |
| 45 | extern void octeon_hal_setup_reserved32(void); | 45 | extern void octeon_hal_setup_reserved32(void); |
| 46 | extern void octeon_user_io_init(void); | 46 | extern void octeon_user_io_init(void); |
| 47 | struct octeon_cop2_state; | ||
| 48 | extern unsigned long octeon_crypto_enable(struct octeon_cop2_state *state); | ||
| 49 | extern void octeon_crypto_disable(struct octeon_cop2_state *state, | ||
| 50 | unsigned long flags); | ||
| 51 | extern asmlinkage void octeon_cop2_restore(struct octeon_cop2_state *task); | ||
| 52 | 47 | ||
| 53 | extern void octeon_init_cvmcount(void); | 48 | extern void octeon_init_cvmcount(void); |
| 54 | extern void octeon_setup_delays(void); | 49 | extern void octeon_setup_delays(void); |
diff --git a/arch/sparc/crypto/aes_glue.c b/arch/sparc/crypto/aes_glue.c index 705408766ab0..2e48eb8813ff 100644 --- a/arch/sparc/crypto/aes_glue.c +++ b/arch/sparc/crypto/aes_glue.c | |||
| @@ -497,7 +497,7 @@ module_init(aes_sparc64_mod_init); | |||
| 497 | module_exit(aes_sparc64_mod_fini); | 497 | module_exit(aes_sparc64_mod_fini); |
| 498 | 498 | ||
| 499 | MODULE_LICENSE("GPL"); | 499 | MODULE_LICENSE("GPL"); |
| 500 | MODULE_DESCRIPTION("AES Secure Hash Algorithm, sparc64 aes opcode accelerated"); | 500 | MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm, sparc64 aes opcode accelerated"); |
| 501 | 501 | ||
| 502 | MODULE_ALIAS_CRYPTO("aes"); | 502 | MODULE_ALIAS_CRYPTO("aes"); |
| 503 | 503 | ||
diff --git a/arch/sparc/crypto/camellia_glue.c b/arch/sparc/crypto/camellia_glue.c index 641f55cb61c3..6bf2479a12fb 100644 --- a/arch/sparc/crypto/camellia_glue.c +++ b/arch/sparc/crypto/camellia_glue.c | |||
| @@ -322,6 +322,6 @@ module_exit(camellia_sparc64_mod_fini); | |||
| 322 | MODULE_LICENSE("GPL"); | 322 | MODULE_LICENSE("GPL"); |
| 323 | MODULE_DESCRIPTION("Camellia Cipher Algorithm, sparc64 camellia opcode accelerated"); | 323 | MODULE_DESCRIPTION("Camellia Cipher Algorithm, sparc64 camellia opcode accelerated"); |
| 324 | 324 | ||
| 325 | MODULE_ALIAS_CRYPTO("aes"); | 325 | MODULE_ALIAS_CRYPTO("camellia"); |
| 326 | 326 | ||
| 327 | #include "crop_devid.c" | 327 | #include "crop_devid.c" |
diff --git a/arch/sparc/crypto/des_glue.c b/arch/sparc/crypto/des_glue.c index d11500972994..dd6a34fa6e19 100644 --- a/arch/sparc/crypto/des_glue.c +++ b/arch/sparc/crypto/des_glue.c | |||
| @@ -533,5 +533,6 @@ MODULE_LICENSE("GPL"); | |||
| 533 | MODULE_DESCRIPTION("DES & Triple DES EDE Cipher Algorithms, sparc64 des opcode accelerated"); | 533 | MODULE_DESCRIPTION("DES & Triple DES EDE Cipher Algorithms, sparc64 des opcode accelerated"); |
| 534 | 534 | ||
| 535 | MODULE_ALIAS_CRYPTO("des"); | 535 | MODULE_ALIAS_CRYPTO("des"); |
| 536 | MODULE_ALIAS_CRYPTO("des3_ede"); | ||
| 536 | 537 | ||
| 537 | #include "crop_devid.c" | 538 | #include "crop_devid.c" |
diff --git a/arch/sparc/crypto/md5_glue.c b/arch/sparc/crypto/md5_glue.c index 64c7ff5f72a9..b688731d7ede 100644 --- a/arch/sparc/crypto/md5_glue.c +++ b/arch/sparc/crypto/md5_glue.c | |||
| @@ -183,7 +183,7 @@ module_init(md5_sparc64_mod_init); | |||
| 183 | module_exit(md5_sparc64_mod_fini); | 183 | module_exit(md5_sparc64_mod_fini); |
| 184 | 184 | ||
| 185 | MODULE_LICENSE("GPL"); | 185 | MODULE_LICENSE("GPL"); |
| 186 | MODULE_DESCRIPTION("MD5 Secure Hash Algorithm, sparc64 md5 opcode accelerated"); | 186 | MODULE_DESCRIPTION("MD5 Message Digest Algorithm, sparc64 md5 opcode accelerated"); |
| 187 | 187 | ||
| 188 | MODULE_ALIAS_CRYPTO("md5"); | 188 | MODULE_ALIAS_CRYPTO("md5"); |
| 189 | 189 | ||
diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S index 477e9d75149b..6bd2c6c95373 100644 --- a/arch/x86/crypto/aesni-intel_asm.S +++ b/arch/x86/crypto/aesni-intel_asm.S | |||
| @@ -32,12 +32,23 @@ | |||
| 32 | #include <linux/linkage.h> | 32 | #include <linux/linkage.h> |
| 33 | #include <asm/inst.h> | 33 | #include <asm/inst.h> |
| 34 | 34 | ||
| 35 | /* | ||
| 36 | * The following macros are used to move an (un)aligned 16 byte value to/from | ||
| 37 | * an XMM register. This can done for either FP or integer values, for FP use | ||
| 38 | * movaps (move aligned packed single) or integer use movdqa (move double quad | ||
| 39 | * aligned). It doesn't make a performance difference which instruction is used | ||
| 40 | * since Nehalem (original Core i7) was released. However, the movaps is a byte | ||
| 41 | * shorter, so that is the one we'll use for now. (same for unaligned). | ||
| 42 | */ | ||
| 43 | #define MOVADQ movaps | ||
| 44 | #define MOVUDQ movups | ||
| 45 | |||
| 35 | #ifdef __x86_64__ | 46 | #ifdef __x86_64__ |
| 47 | |||
| 36 | .data | 48 | .data |
| 37 | .align 16 | 49 | .align 16 |
| 38 | .Lgf128mul_x_ble_mask: | 50 | .Lgf128mul_x_ble_mask: |
| 39 | .octa 0x00000000000000010000000000000087 | 51 | .octa 0x00000000000000010000000000000087 |
| 40 | |||
| 41 | POLY: .octa 0xC2000000000000000000000000000001 | 52 | POLY: .octa 0xC2000000000000000000000000000001 |
| 42 | TWOONE: .octa 0x00000001000000000000000000000001 | 53 | TWOONE: .octa 0x00000001000000000000000000000001 |
| 43 | 54 | ||
| @@ -89,6 +100,7 @@ enc: .octa 0x2 | |||
| 89 | #define arg8 STACK_OFFSET+16(%r14) | 100 | #define arg8 STACK_OFFSET+16(%r14) |
| 90 | #define arg9 STACK_OFFSET+24(%r14) | 101 | #define arg9 STACK_OFFSET+24(%r14) |
| 91 | #define arg10 STACK_OFFSET+32(%r14) | 102 | #define arg10 STACK_OFFSET+32(%r14) |
| 103 | #define keysize 2*15*16(%arg1) | ||
| 92 | #endif | 104 | #endif |
| 93 | 105 | ||
| 94 | 106 | ||
| @@ -213,10 +225,12 @@ enc: .octa 0x2 | |||
| 213 | 225 | ||
| 214 | .macro INITIAL_BLOCKS_DEC num_initial_blocks TMP1 TMP2 TMP3 TMP4 TMP5 XMM0 XMM1 \ | 226 | .macro INITIAL_BLOCKS_DEC num_initial_blocks TMP1 TMP2 TMP3 TMP4 TMP5 XMM0 XMM1 \ |
| 215 | XMM2 XMM3 XMM4 XMMDst TMP6 TMP7 i i_seq operation | 227 | XMM2 XMM3 XMM4 XMMDst TMP6 TMP7 i i_seq operation |
| 228 | MOVADQ SHUF_MASK(%rip), %xmm14 | ||
| 216 | mov arg7, %r10 # %r10 = AAD | 229 | mov arg7, %r10 # %r10 = AAD |
| 217 | mov arg8, %r12 # %r12 = aadLen | 230 | mov arg8, %r12 # %r12 = aadLen |
| 218 | mov %r12, %r11 | 231 | mov %r12, %r11 |
| 219 | pxor %xmm\i, %xmm\i | 232 | pxor %xmm\i, %xmm\i |
| 233 | |||
| 220 | _get_AAD_loop\num_initial_blocks\operation: | 234 | _get_AAD_loop\num_initial_blocks\operation: |
| 221 | movd (%r10), \TMP1 | 235 | movd (%r10), \TMP1 |
| 222 | pslldq $12, \TMP1 | 236 | pslldq $12, \TMP1 |
| @@ -225,16 +239,18 @@ _get_AAD_loop\num_initial_blocks\operation: | |||
| 225 | add $4, %r10 | 239 | add $4, %r10 |
| 226 | sub $4, %r12 | 240 | sub $4, %r12 |
| 227 | jne _get_AAD_loop\num_initial_blocks\operation | 241 | jne _get_AAD_loop\num_initial_blocks\operation |
| 242 | |||
| 228 | cmp $16, %r11 | 243 | cmp $16, %r11 |
| 229 | je _get_AAD_loop2_done\num_initial_blocks\operation | 244 | je _get_AAD_loop2_done\num_initial_blocks\operation |
| 245 | |||
| 230 | mov $16, %r12 | 246 | mov $16, %r12 |
| 231 | _get_AAD_loop2\num_initial_blocks\operation: | 247 | _get_AAD_loop2\num_initial_blocks\operation: |
| 232 | psrldq $4, %xmm\i | 248 | psrldq $4, %xmm\i |
| 233 | sub $4, %r12 | 249 | sub $4, %r12 |
| 234 | cmp %r11, %r12 | 250 | cmp %r11, %r12 |
| 235 | jne _get_AAD_loop2\num_initial_blocks\operation | 251 | jne _get_AAD_loop2\num_initial_blocks\operation |
| 252 | |||
| 236 | _get_AAD_loop2_done\num_initial_blocks\operation: | 253 | _get_AAD_loop2_done\num_initial_blocks\operation: |
| 237 | movdqa SHUF_MASK(%rip), %xmm14 | ||
| 238 | PSHUFB_XMM %xmm14, %xmm\i # byte-reflect the AAD data | 254 | PSHUFB_XMM %xmm14, %xmm\i # byte-reflect the AAD data |
| 239 | 255 | ||
| 240 | xor %r11, %r11 # initialise the data pointer offset as zero | 256 | xor %r11, %r11 # initialise the data pointer offset as zero |
| @@ -243,59 +259,34 @@ _get_AAD_loop2_done\num_initial_blocks\operation: | |||
| 243 | 259 | ||
| 244 | mov %arg5, %rax # %rax = *Y0 | 260 | mov %arg5, %rax # %rax = *Y0 |
| 245 | movdqu (%rax), \XMM0 # XMM0 = Y0 | 261 | movdqu (%rax), \XMM0 # XMM0 = Y0 |
| 246 | movdqa SHUF_MASK(%rip), %xmm14 | ||
| 247 | PSHUFB_XMM %xmm14, \XMM0 | 262 | PSHUFB_XMM %xmm14, \XMM0 |
| 248 | 263 | ||
| 249 | .if (\i == 5) || (\i == 6) || (\i == 7) | 264 | .if (\i == 5) || (\i == 6) || (\i == 7) |
| 265 | MOVADQ ONE(%RIP),\TMP1 | ||
| 266 | MOVADQ (%arg1),\TMP2 | ||
| 250 | .irpc index, \i_seq | 267 | .irpc index, \i_seq |
| 251 | paddd ONE(%rip), \XMM0 # INCR Y0 | 268 | paddd \TMP1, \XMM0 # INCR Y0 |
| 252 | movdqa \XMM0, %xmm\index | 269 | movdqa \XMM0, %xmm\index |
| 253 | movdqa SHUF_MASK(%rip), %xmm14 | ||
| 254 | PSHUFB_XMM %xmm14, %xmm\index # perform a 16 byte swap | 270 | PSHUFB_XMM %xmm14, %xmm\index # perform a 16 byte swap |
| 255 | 271 | pxor \TMP2, %xmm\index | |
| 256 | .endr | ||
| 257 | .irpc index, \i_seq | ||
| 258 | pxor 16*0(%arg1), %xmm\index | ||
| 259 | .endr | ||
| 260 | .irpc index, \i_seq | ||
| 261 | movaps 0x10(%rdi), \TMP1 | ||
| 262 | AESENC \TMP1, %xmm\index # Round 1 | ||
| 263 | .endr | ||
| 264 | .irpc index, \i_seq | ||
| 265 | movaps 0x20(%arg1), \TMP1 | ||
| 266 | AESENC \TMP1, %xmm\index # Round 2 | ||
| 267 | .endr | ||
| 268 | .irpc index, \i_seq | ||
| 269 | movaps 0x30(%arg1), \TMP1 | ||
| 270 | AESENC \TMP1, %xmm\index # Round 2 | ||
| 271 | .endr | ||
| 272 | .irpc index, \i_seq | ||
| 273 | movaps 0x40(%arg1), \TMP1 | ||
| 274 | AESENC \TMP1, %xmm\index # Round 2 | ||
| 275 | .endr | ||
| 276 | .irpc index, \i_seq | ||
| 277 | movaps 0x50(%arg1), \TMP1 | ||
| 278 | AESENC \TMP1, %xmm\index # Round 2 | ||
| 279 | .endr | ||
| 280 | .irpc index, \i_seq | ||
| 281 | movaps 0x60(%arg1), \TMP1 | ||
| 282 | AESENC \TMP1, %xmm\index # Round 2 | ||
| 283 | .endr | 272 | .endr |
| 284 | .irpc index, \i_seq | 273 | lea 0x10(%arg1),%r10 |
| 285 | movaps 0x70(%arg1), \TMP1 | 274 | mov keysize,%eax |
| 286 | AESENC \TMP1, %xmm\index # Round 2 | 275 | shr $2,%eax # 128->4, 192->6, 256->8 |
| 287 | .endr | 276 | add $5,%eax # 128->9, 192->11, 256->13 |
| 288 | .irpc index, \i_seq | 277 | |
| 289 | movaps 0x80(%arg1), \TMP1 | 278 | aes_loop_initial_dec\num_initial_blocks: |
| 290 | AESENC \TMP1, %xmm\index # Round 2 | 279 | MOVADQ (%r10),\TMP1 |
| 291 | .endr | 280 | .irpc index, \i_seq |
| 292 | .irpc index, \i_seq | 281 | AESENC \TMP1, %xmm\index |
| 293 | movaps 0x90(%arg1), \TMP1 | ||
| 294 | AESENC \TMP1, %xmm\index # Round 2 | ||
| 295 | .endr | 282 | .endr |
| 283 | add $16,%r10 | ||
| 284 | sub $1,%eax | ||
| 285 | jnz aes_loop_initial_dec\num_initial_blocks | ||
| 286 | |||
| 287 | MOVADQ (%r10), \TMP1 | ||
| 296 | .irpc index, \i_seq | 288 | .irpc index, \i_seq |
| 297 | movaps 0xa0(%arg1), \TMP1 | 289 | AESENCLAST \TMP1, %xmm\index # Last Round |
| 298 | AESENCLAST \TMP1, %xmm\index # Round 10 | ||
| 299 | .endr | 290 | .endr |
| 300 | .irpc index, \i_seq | 291 | .irpc index, \i_seq |
| 301 | movdqu (%arg3 , %r11, 1), \TMP1 | 292 | movdqu (%arg3 , %r11, 1), \TMP1 |
| @@ -305,10 +296,8 @@ _get_AAD_loop2_done\num_initial_blocks\operation: | |||
| 305 | add $16, %r11 | 296 | add $16, %r11 |
| 306 | 297 | ||
| 307 | movdqa \TMP1, %xmm\index | 298 | movdqa \TMP1, %xmm\index |
| 308 | movdqa SHUF_MASK(%rip), %xmm14 | ||
| 309 | PSHUFB_XMM %xmm14, %xmm\index | 299 | PSHUFB_XMM %xmm14, %xmm\index |
| 310 | 300 | # prepare plaintext/ciphertext for GHASH computation | |
| 311 | # prepare plaintext/ciphertext for GHASH computation | ||
| 312 | .endr | 301 | .endr |
| 313 | .endif | 302 | .endif |
| 314 | GHASH_MUL %xmm\i, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1 | 303 | GHASH_MUL %xmm\i, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1 |
| @@ -338,30 +327,28 @@ _get_AAD_loop2_done\num_initial_blocks\operation: | |||
| 338 | * Precomputations for HashKey parallel with encryption of first 4 blocks. | 327 | * Precomputations for HashKey parallel with encryption of first 4 blocks. |
| 339 | * Haskey_i_k holds XORed values of the low and high parts of the Haskey_i | 328 | * Haskey_i_k holds XORed values of the low and high parts of the Haskey_i |
| 340 | */ | 329 | */ |
| 341 | paddd ONE(%rip), \XMM0 # INCR Y0 | 330 | MOVADQ ONE(%rip), \TMP1 |
| 342 | movdqa \XMM0, \XMM1 | 331 | paddd \TMP1, \XMM0 # INCR Y0 |
| 343 | movdqa SHUF_MASK(%rip), %xmm14 | 332 | MOVADQ \XMM0, \XMM1 |
| 344 | PSHUFB_XMM %xmm14, \XMM1 # perform a 16 byte swap | 333 | PSHUFB_XMM %xmm14, \XMM1 # perform a 16 byte swap |
| 345 | 334 | ||
| 346 | paddd ONE(%rip), \XMM0 # INCR Y0 | 335 | paddd \TMP1, \XMM0 # INCR Y0 |
| 347 | movdqa \XMM0, \XMM2 | 336 | MOVADQ \XMM0, \XMM2 |
| 348 | movdqa SHUF_MASK(%rip), %xmm14 | ||
| 349 | PSHUFB_XMM %xmm14, \XMM2 # perform a 16 byte swap | 337 | PSHUFB_XMM %xmm14, \XMM2 # perform a 16 byte swap |
| 350 | 338 | ||
| 351 | paddd ONE(%rip), \XMM0 # INCR Y0 | 339 | paddd \TMP1, \XMM0 # INCR Y0 |
| 352 | movdqa \XMM0, \XMM3 | 340 | MOVADQ \XMM0, \XMM3 |
| 353 | movdqa SHUF_MASK(%rip), %xmm14 | ||
| 354 | PSHUFB_XMM %xmm14, \XMM3 # perform a 16 byte swap | 341 | PSHUFB_XMM %xmm14, \XMM3 # perform a 16 byte swap |
| 355 | 342 | ||
| 356 | paddd ONE(%rip), \XMM0 # INCR Y0 | 343 | paddd \TMP1, \XMM0 # INCR Y0 |
| 357 | movdqa \XMM0, \XMM4 | 344 | MOVADQ \XMM0, \XMM4 |
| 358 | movdqa SHUF_MASK(%rip), %xmm14 | ||
| 359 | PSHUFB_XMM %xmm14, \XMM4 # perform a 16 byte swap | 345 | PSHUFB_XMM %xmm14, \XMM4 # perform a 16 byte swap |
| 360 | 346 | ||
| 361 | pxor 16*0(%arg1), \XMM1 | 347 | MOVADQ 0(%arg1),\TMP1 |
| 362 | pxor 16*0(%arg1), \XMM2 | 348 | pxor \TMP1, \XMM1 |
| 363 | pxor 16*0(%arg1), \XMM3 | 349 | pxor \TMP1, \XMM2 |
| 364 | pxor 16*0(%arg1), \XMM4 | 350 | pxor \TMP1, \XMM3 |
| 351 | pxor \TMP1, \XMM4 | ||
| 365 | movdqa \TMP3, \TMP5 | 352 | movdqa \TMP3, \TMP5 |
| 366 | pshufd $78, \TMP3, \TMP1 | 353 | pshufd $78, \TMP3, \TMP1 |
| 367 | pxor \TMP3, \TMP1 | 354 | pxor \TMP3, \TMP1 |
| @@ -399,7 +386,23 @@ _get_AAD_loop2_done\num_initial_blocks\operation: | |||
| 399 | pshufd $78, \TMP5, \TMP1 | 386 | pshufd $78, \TMP5, \TMP1 |
| 400 | pxor \TMP5, \TMP1 | 387 | pxor \TMP5, \TMP1 |
| 401 | movdqa \TMP1, HashKey_4_k(%rsp) | 388 | movdqa \TMP1, HashKey_4_k(%rsp) |
| 402 | movaps 0xa0(%arg1), \TMP2 | 389 | lea 0xa0(%arg1),%r10 |
| 390 | mov keysize,%eax | ||
| 391 | shr $2,%eax # 128->4, 192->6, 256->8 | ||
| 392 | sub $4,%eax # 128->0, 192->2, 256->4 | ||
| 393 | jz aes_loop_pre_dec_done\num_initial_blocks | ||
| 394 | |||
| 395 | aes_loop_pre_dec\num_initial_blocks: | ||
| 396 | MOVADQ (%r10),\TMP2 | ||
| 397 | .irpc index, 1234 | ||
| 398 | AESENC \TMP2, %xmm\index | ||
| 399 | .endr | ||
| 400 | add $16,%r10 | ||
| 401 | sub $1,%eax | ||
| 402 | jnz aes_loop_pre_dec\num_initial_blocks | ||
| 403 | |||
| 404 | aes_loop_pre_dec_done\num_initial_blocks: | ||
| 405 | MOVADQ (%r10), \TMP2 | ||
| 403 | AESENCLAST \TMP2, \XMM1 | 406 | AESENCLAST \TMP2, \XMM1 |
| 404 | AESENCLAST \TMP2, \XMM2 | 407 | AESENCLAST \TMP2, \XMM2 |
| 405 | AESENCLAST \TMP2, \XMM3 | 408 | AESENCLAST \TMP2, \XMM3 |
| @@ -421,15 +424,11 @@ _get_AAD_loop2_done\num_initial_blocks\operation: | |||
| 421 | movdqu \XMM4, 16*3(%arg2 , %r11 , 1) | 424 | movdqu \XMM4, 16*3(%arg2 , %r11 , 1) |
| 422 | movdqa \TMP1, \XMM4 | 425 | movdqa \TMP1, \XMM4 |
| 423 | add $64, %r11 | 426 | add $64, %r11 |
| 424 | movdqa SHUF_MASK(%rip), %xmm14 | ||
| 425 | PSHUFB_XMM %xmm14, \XMM1 # perform a 16 byte swap | 427 | PSHUFB_XMM %xmm14, \XMM1 # perform a 16 byte swap |
| 426 | pxor \XMMDst, \XMM1 | 428 | pxor \XMMDst, \XMM1 |
| 427 | # combine GHASHed value with the corresponding ciphertext | 429 | # combine GHASHed value with the corresponding ciphertext |
| 428 | movdqa SHUF_MASK(%rip), %xmm14 | ||
| 429 | PSHUFB_XMM %xmm14, \XMM2 # perform a 16 byte swap | 430 | PSHUFB_XMM %xmm14, \XMM2 # perform a 16 byte swap |
| 430 | movdqa SHUF_MASK(%rip), %xmm14 | ||
| 431 | PSHUFB_XMM %xmm14, \XMM3 # perform a 16 byte swap | 431 | PSHUFB_XMM %xmm14, \XMM3 # perform a 16 byte swap |
| 432 | movdqa SHUF_MASK(%rip), %xmm14 | ||
| 433 | PSHUFB_XMM %xmm14, \XMM4 # perform a 16 byte swap | 432 | PSHUFB_XMM %xmm14, \XMM4 # perform a 16 byte swap |
| 434 | 433 | ||
| 435 | _initial_blocks_done\num_initial_blocks\operation: | 434 | _initial_blocks_done\num_initial_blocks\operation: |
| @@ -451,6 +450,7 @@ _initial_blocks_done\num_initial_blocks\operation: | |||
| 451 | 450 | ||
| 452 | .macro INITIAL_BLOCKS_ENC num_initial_blocks TMP1 TMP2 TMP3 TMP4 TMP5 XMM0 XMM1 \ | 451 | .macro INITIAL_BLOCKS_ENC num_initial_blocks TMP1 TMP2 TMP3 TMP4 TMP5 XMM0 XMM1 \ |
| 453 | XMM2 XMM3 XMM4 XMMDst TMP6 TMP7 i i_seq operation | 452 | XMM2 XMM3 XMM4 XMMDst TMP6 TMP7 i i_seq operation |
| 453 | MOVADQ SHUF_MASK(%rip), %xmm14 | ||
| 454 | mov arg7, %r10 # %r10 = AAD | 454 | mov arg7, %r10 # %r10 = AAD |
| 455 | mov arg8, %r12 # %r12 = aadLen | 455 | mov arg8, %r12 # %r12 = aadLen |
| 456 | mov %r12, %r11 | 456 | mov %r12, %r11 |
| @@ -472,7 +472,6 @@ _get_AAD_loop2\num_initial_blocks\operation: | |||
| 472 | cmp %r11, %r12 | 472 | cmp %r11, %r12 |
| 473 | jne _get_AAD_loop2\num_initial_blocks\operation | 473 | jne _get_AAD_loop2\num_initial_blocks\operation |
| 474 | _get_AAD_loop2_done\num_initial_blocks\operation: | 474 | _get_AAD_loop2_done\num_initial_blocks\operation: |
| 475 | movdqa SHUF_MASK(%rip), %xmm14 | ||
| 476 | PSHUFB_XMM %xmm14, %xmm\i # byte-reflect the AAD data | 475 | PSHUFB_XMM %xmm14, %xmm\i # byte-reflect the AAD data |
| 477 | 476 | ||
| 478 | xor %r11, %r11 # initialise the data pointer offset as zero | 477 | xor %r11, %r11 # initialise the data pointer offset as zero |
| @@ -481,59 +480,35 @@ _get_AAD_loop2_done\num_initial_blocks\operation: | |||
| 481 | 480 | ||
| 482 | mov %arg5, %rax # %rax = *Y0 | 481 | mov %arg5, %rax # %rax = *Y0 |
| 483 | movdqu (%rax), \XMM0 # XMM0 = Y0 | 482 | movdqu (%rax), \XMM0 # XMM0 = Y0 |
| 484 | movdqa SHUF_MASK(%rip), %xmm14 | ||
| 485 | PSHUFB_XMM %xmm14, \XMM0 | 483 | PSHUFB_XMM %xmm14, \XMM0 |
| 486 | 484 | ||
| 487 | .if (\i == 5) || (\i == 6) || (\i == 7) | 485 | .if (\i == 5) || (\i == 6) || (\i == 7) |
| 488 | .irpc index, \i_seq | ||
| 489 | paddd ONE(%rip), \XMM0 # INCR Y0 | ||
| 490 | movdqa \XMM0, %xmm\index | ||
| 491 | movdqa SHUF_MASK(%rip), %xmm14 | ||
| 492 | PSHUFB_XMM %xmm14, %xmm\index # perform a 16 byte swap | ||
| 493 | 486 | ||
| 494 | .endr | 487 | MOVADQ ONE(%RIP),\TMP1 |
| 495 | .irpc index, \i_seq | 488 | MOVADQ 0(%arg1),\TMP2 |
| 496 | pxor 16*0(%arg1), %xmm\index | ||
| 497 | .endr | ||
| 498 | .irpc index, \i_seq | ||
| 499 | movaps 0x10(%rdi), \TMP1 | ||
| 500 | AESENC \TMP1, %xmm\index # Round 1 | ||
| 501 | .endr | ||
| 502 | .irpc index, \i_seq | ||
| 503 | movaps 0x20(%arg1), \TMP1 | ||
| 504 | AESENC \TMP1, %xmm\index # Round 2 | ||
| 505 | .endr | ||
| 506 | .irpc index, \i_seq | 489 | .irpc index, \i_seq |
| 507 | movaps 0x30(%arg1), \TMP1 | 490 | paddd \TMP1, \XMM0 # INCR Y0 |
| 508 | AESENC \TMP1, %xmm\index # Round 2 | 491 | MOVADQ \XMM0, %xmm\index |
| 492 | PSHUFB_XMM %xmm14, %xmm\index # perform a 16 byte swap | ||
| 493 | pxor \TMP2, %xmm\index | ||
| 509 | .endr | 494 | .endr |
| 510 | .irpc index, \i_seq | 495 | lea 0x10(%arg1),%r10 |
| 511 | movaps 0x40(%arg1), \TMP1 | 496 | mov keysize,%eax |
| 512 | AESENC \TMP1, %xmm\index # Round 2 | 497 | shr $2,%eax # 128->4, 192->6, 256->8 |
| 513 | .endr | 498 | add $5,%eax # 128->9, 192->11, 256->13 |
| 514 | .irpc index, \i_seq | 499 | |
| 515 | movaps 0x50(%arg1), \TMP1 | 500 | aes_loop_initial_enc\num_initial_blocks: |
| 516 | AESENC \TMP1, %xmm\index # Round 2 | 501 | MOVADQ (%r10),\TMP1 |
| 517 | .endr | 502 | .irpc index, \i_seq |
| 518 | .irpc index, \i_seq | 503 | AESENC \TMP1, %xmm\index |
| 519 | movaps 0x60(%arg1), \TMP1 | ||
| 520 | AESENC \TMP1, %xmm\index # Round 2 | ||
| 521 | .endr | ||
| 522 | .irpc index, \i_seq | ||
| 523 | movaps 0x70(%arg1), \TMP1 | ||
| 524 | AESENC \TMP1, %xmm\index # Round 2 | ||
| 525 | .endr | ||
| 526 | .irpc index, \i_seq | ||
| 527 | movaps 0x80(%arg1), \TMP1 | ||
| 528 | AESENC \TMP1, %xmm\index # Round 2 | ||
| 529 | .endr | ||
| 530 | .irpc index, \i_seq | ||
| 531 | movaps 0x90(%arg1), \TMP1 | ||
| 532 | AESENC \TMP1, %xmm\index # Round 2 | ||
| 533 | .endr | 504 | .endr |
| 505 | add $16,%r10 | ||
| 506 | sub $1,%eax | ||
| 507 | jnz aes_loop_initial_enc\num_initial_blocks | ||
| 508 | |||
| 509 | MOVADQ (%r10), \TMP1 | ||
| 534 | .irpc index, \i_seq | 510 | .irpc index, \i_seq |
| 535 | movaps 0xa0(%arg1), \TMP1 | 511 | AESENCLAST \TMP1, %xmm\index # Last Round |
| 536 | AESENCLAST \TMP1, %xmm\index # Round 10 | ||
| 537 | .endr | 512 | .endr |
| 538 | .irpc index, \i_seq | 513 | .irpc index, \i_seq |
| 539 | movdqu (%arg3 , %r11, 1), \TMP1 | 514 | movdqu (%arg3 , %r11, 1), \TMP1 |
| @@ -541,8 +516,6 @@ _get_AAD_loop2_done\num_initial_blocks\operation: | |||
| 541 | movdqu %xmm\index, (%arg2 , %r11, 1) | 516 | movdqu %xmm\index, (%arg2 , %r11, 1) |
| 542 | # write back plaintext/ciphertext for num_initial_blocks | 517 | # write back plaintext/ciphertext for num_initial_blocks |
| 543 | add $16, %r11 | 518 | add $16, %r11 |
| 544 | |||
| 545 | movdqa SHUF_MASK(%rip), %xmm14 | ||
| 546 | PSHUFB_XMM %xmm14, %xmm\index | 519 | PSHUFB_XMM %xmm14, %xmm\index |
| 547 | 520 | ||
| 548 | # prepare plaintext/ciphertext for GHASH computation | 521 | # prepare plaintext/ciphertext for GHASH computation |
| @@ -575,30 +548,28 @@ _get_AAD_loop2_done\num_initial_blocks\operation: | |||
| 575 | * Precomputations for HashKey parallel with encryption of first 4 blocks. | 548 | * Precomputations for HashKey parallel with encryption of first 4 blocks. |
| 576 | * Haskey_i_k holds XORed values of the low and high parts of the Haskey_i | 549 | * Haskey_i_k holds XORed values of the low and high parts of the Haskey_i |
| 577 | */ | 550 | */ |
| 578 | paddd ONE(%rip), \XMM0 # INCR Y0 | 551 | MOVADQ ONE(%RIP),\TMP1 |
| 579 | movdqa \XMM0, \XMM1 | 552 | paddd \TMP1, \XMM0 # INCR Y0 |
| 580 | movdqa SHUF_MASK(%rip), %xmm14 | 553 | MOVADQ \XMM0, \XMM1 |
| 581 | PSHUFB_XMM %xmm14, \XMM1 # perform a 16 byte swap | 554 | PSHUFB_XMM %xmm14, \XMM1 # perform a 16 byte swap |
| 582 | 555 | ||
| 583 | paddd ONE(%rip), \XMM0 # INCR Y0 | 556 | paddd \TMP1, \XMM0 # INCR Y0 |
| 584 | movdqa \XMM0, \XMM2 | 557 | MOVADQ \XMM0, \XMM2 |
| 585 | movdqa SHUF_MASK(%rip), %xmm14 | ||
| 586 | PSHUFB_XMM %xmm14, \XMM2 # perform a 16 byte swap | 558 | PSHUFB_XMM %xmm14, \XMM2 # perform a 16 byte swap |
| 587 | 559 | ||
| 588 | paddd ONE(%rip), \XMM0 # INCR Y0 | 560 | paddd \TMP1, \XMM0 # INCR Y0 |
| 589 | movdqa \XMM0, \XMM3 | 561 | MOVADQ \XMM0, \XMM3 |
| 590 | movdqa SHUF_MASK(%rip), %xmm14 | ||
| 591 | PSHUFB_XMM %xmm14, \XMM3 # perform a 16 byte swap | 562 | PSHUFB_XMM %xmm14, \XMM3 # perform a 16 byte swap |
| 592 | 563 | ||
| 593 | paddd ONE(%rip), \XMM0 # INCR Y0 | 564 | paddd \TMP1, \XMM0 # INCR Y0 |
| 594 | movdqa \XMM0, \XMM4 | 565 | MOVADQ \XMM0, \XMM4 |
| 595 | movdqa SHUF_MASK(%rip), %xmm14 | ||
| 596 | PSHUFB_XMM %xmm14, \XMM4 # perform a 16 byte swap | 566 | PSHUFB_XMM %xmm14, \XMM4 # perform a 16 byte swap |
| 597 | 567 | ||
| 598 | pxor 16*0(%arg1), \XMM1 | 568 | MOVADQ 0(%arg1),\TMP1 |
| 599 | pxor 16*0(%arg1), \XMM2 | 569 | pxor \TMP1, \XMM1 |
| 600 | pxor 16*0(%arg1), \XMM3 | 570 | pxor \TMP1, \XMM2 |
| 601 | pxor 16*0(%arg1), \XMM4 | 571 | pxor \TMP1, \XMM3 |
| 572 | pxor \TMP1, \XMM4 | ||
| 602 | movdqa \TMP3, \TMP5 | 573 | movdqa \TMP3, \TMP5 |
| 603 | pshufd $78, \TMP3, \TMP1 | 574 | pshufd $78, \TMP3, \TMP1 |
| 604 | pxor \TMP3, \TMP1 | 575 | pxor \TMP3, \TMP1 |
| @@ -636,7 +607,23 @@ _get_AAD_loop2_done\num_initial_blocks\operation: | |||
| 636 | pshufd $78, \TMP5, \TMP1 | 607 | pshufd $78, \TMP5, \TMP1 |
| 637 | pxor \TMP5, \TMP1 | 608 | pxor \TMP5, \TMP1 |
| 638 | movdqa \TMP1, HashKey_4_k(%rsp) | 609 | movdqa \TMP1, HashKey_4_k(%rsp) |
| 639 | movaps 0xa0(%arg1), \TMP2 | 610 | lea 0xa0(%arg1),%r10 |
| 611 | mov keysize,%eax | ||
| 612 | shr $2,%eax # 128->4, 192->6, 256->8 | ||
| 613 | sub $4,%eax # 128->0, 192->2, 256->4 | ||
| 614 | jz aes_loop_pre_enc_done\num_initial_blocks | ||
| 615 | |||
| 616 | aes_loop_pre_enc\num_initial_blocks: | ||
| 617 | MOVADQ (%r10),\TMP2 | ||
| 618 | .irpc index, 1234 | ||
| 619 | AESENC \TMP2, %xmm\index | ||
| 620 | .endr | ||
| 621 | add $16,%r10 | ||
| 622 | sub $1,%eax | ||
| 623 | jnz aes_loop_pre_enc\num_initial_blocks | ||
| 624 | |||
| 625 | aes_loop_pre_enc_done\num_initial_blocks: | ||
| 626 | MOVADQ (%r10), \TMP2 | ||
| 640 | AESENCLAST \TMP2, \XMM1 | 627 | AESENCLAST \TMP2, \XMM1 |
| 641 | AESENCLAST \TMP2, \XMM2 | 628 | AESENCLAST \TMP2, \XMM2 |
| 642 | AESENCLAST \TMP2, \XMM3 | 629 | AESENCLAST \TMP2, \XMM3 |
| @@ -655,15 +642,11 @@ _get_AAD_loop2_done\num_initial_blocks\operation: | |||
| 655 | movdqu \XMM4, 16*3(%arg2 , %r11 , 1) | 642 | movdqu \XMM4, 16*3(%arg2 , %r11 , 1) |
| 656 | 643 | ||
| 657 | add $64, %r11 | 644 | add $64, %r11 |
| 658 | movdqa SHUF_MASK(%rip), %xmm14 | ||
| 659 | PSHUFB_XMM %xmm14, \XMM1 # perform a 16 byte swap | 645 | PSHUFB_XMM %xmm14, \XMM1 # perform a 16 byte swap |
| 660 | pxor \XMMDst, \XMM1 | 646 | pxor \XMMDst, \XMM1 |
| 661 | # combine GHASHed value with the corresponding ciphertext | 647 | # combine GHASHed value with the corresponding ciphertext |
| 662 | movdqa SHUF_MASK(%rip), %xmm14 | ||
| 663 | PSHUFB_XMM %xmm14, \XMM2 # perform a 16 byte swap | 648 | PSHUFB_XMM %xmm14, \XMM2 # perform a 16 byte swap |
| 664 | movdqa SHUF_MASK(%rip), %xmm14 | ||
| 665 | PSHUFB_XMM %xmm14, \XMM3 # perform a 16 byte swap | 649 | PSHUFB_XMM %xmm14, \XMM3 # perform a 16 byte swap |
| 666 | movdqa SHUF_MASK(%rip), %xmm14 | ||
| 667 | PSHUFB_XMM %xmm14, \XMM4 # perform a 16 byte swap | 650 | PSHUFB_XMM %xmm14, \XMM4 # perform a 16 byte swap |
| 668 | 651 | ||
| 669 | _initial_blocks_done\num_initial_blocks\operation: | 652 | _initial_blocks_done\num_initial_blocks\operation: |
| @@ -794,7 +777,23 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation | |||
| 794 | AESENC \TMP3, \XMM3 | 777 | AESENC \TMP3, \XMM3 |
| 795 | AESENC \TMP3, \XMM4 | 778 | AESENC \TMP3, \XMM4 |
| 796 | PCLMULQDQ 0x00, \TMP5, \XMM8 # XMM8 = a0*b0 | 779 | PCLMULQDQ 0x00, \TMP5, \XMM8 # XMM8 = a0*b0 |
| 797 | movaps 0xa0(%arg1), \TMP3 | 780 | lea 0xa0(%arg1),%r10 |
| 781 | mov keysize,%eax | ||
| 782 | shr $2,%eax # 128->4, 192->6, 256->8 | ||
| 783 | sub $4,%eax # 128->0, 192->2, 256->4 | ||
| 784 | jz aes_loop_par_enc_done | ||
| 785 | |||
| 786 | aes_loop_par_enc: | ||
| 787 | MOVADQ (%r10),\TMP3 | ||
| 788 | .irpc index, 1234 | ||
| 789 | AESENC \TMP3, %xmm\index | ||
| 790 | .endr | ||
| 791 | add $16,%r10 | ||
| 792 | sub $1,%eax | ||
| 793 | jnz aes_loop_par_enc | ||
| 794 | |||
| 795 | aes_loop_par_enc_done: | ||
| 796 | MOVADQ (%r10), \TMP3 | ||
| 798 | AESENCLAST \TMP3, \XMM1 # Round 10 | 797 | AESENCLAST \TMP3, \XMM1 # Round 10 |
| 799 | AESENCLAST \TMP3, \XMM2 | 798 | AESENCLAST \TMP3, \XMM2 |
| 800 | AESENCLAST \TMP3, \XMM3 | 799 | AESENCLAST \TMP3, \XMM3 |
| @@ -986,8 +985,24 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation | |||
| 986 | AESENC \TMP3, \XMM3 | 985 | AESENC \TMP3, \XMM3 |
| 987 | AESENC \TMP3, \XMM4 | 986 | AESENC \TMP3, \XMM4 |
| 988 | PCLMULQDQ 0x00, \TMP5, \XMM8 # XMM8 = a0*b0 | 987 | PCLMULQDQ 0x00, \TMP5, \XMM8 # XMM8 = a0*b0 |
| 989 | movaps 0xa0(%arg1), \TMP3 | 988 | lea 0xa0(%arg1),%r10 |
| 990 | AESENCLAST \TMP3, \XMM1 # Round 10 | 989 | mov keysize,%eax |
| 990 | shr $2,%eax # 128->4, 192->6, 256->8 | ||
| 991 | sub $4,%eax # 128->0, 192->2, 256->4 | ||
| 992 | jz aes_loop_par_dec_done | ||
| 993 | |||
| 994 | aes_loop_par_dec: | ||
| 995 | MOVADQ (%r10),\TMP3 | ||
| 996 | .irpc index, 1234 | ||
| 997 | AESENC \TMP3, %xmm\index | ||
| 998 | .endr | ||
| 999 | add $16,%r10 | ||
| 1000 | sub $1,%eax | ||
| 1001 | jnz aes_loop_par_dec | ||
| 1002 | |||
| 1003 | aes_loop_par_dec_done: | ||
| 1004 | MOVADQ (%r10), \TMP3 | ||
| 1005 | AESENCLAST \TMP3, \XMM1 # last round | ||
| 991 | AESENCLAST \TMP3, \XMM2 | 1006 | AESENCLAST \TMP3, \XMM2 |
| 992 | AESENCLAST \TMP3, \XMM3 | 1007 | AESENCLAST \TMP3, \XMM3 |
| 993 | AESENCLAST \TMP3, \XMM4 | 1008 | AESENCLAST \TMP3, \XMM4 |
| @@ -1155,33 +1170,29 @@ TMP7 XMM1 XMM2 XMM3 XMM4 XMMDst | |||
| 1155 | pxor \TMP6, \XMMDst # reduced result is in XMMDst | 1170 | pxor \TMP6, \XMMDst # reduced result is in XMMDst |
| 1156 | .endm | 1171 | .endm |
| 1157 | 1172 | ||
| 1158 | /* Encryption of a single block done*/ | ||
| 1159 | .macro ENCRYPT_SINGLE_BLOCK XMM0 TMP1 | ||
| 1160 | 1173 | ||
| 1161 | pxor (%arg1), \XMM0 | 1174 | /* Encryption of a single block |
| 1162 | movaps 16(%arg1), \TMP1 | 1175 | * uses eax & r10 |
| 1163 | AESENC \TMP1, \XMM0 | 1176 | */ |
| 1164 | movaps 32(%arg1), \TMP1 | ||
| 1165 | AESENC \TMP1, \XMM0 | ||
| 1166 | movaps 48(%arg1), \TMP1 | ||
| 1167 | AESENC \TMP1, \XMM0 | ||
| 1168 | movaps 64(%arg1), \TMP1 | ||
| 1169 | AESENC \TMP1, \XMM0 | ||
| 1170 | movaps 80(%arg1), \TMP1 | ||
| 1171 | AESENC \TMP1, \XMM0 | ||
| 1172 | movaps 96(%arg1), \TMP1 | ||
| 1173 | AESENC \TMP1, \XMM0 | ||
| 1174 | movaps 112(%arg1), \TMP1 | ||
| 1175 | AESENC \TMP1, \XMM0 | ||
| 1176 | movaps 128(%arg1), \TMP1 | ||
| 1177 | AESENC \TMP1, \XMM0 | ||
| 1178 | movaps 144(%arg1), \TMP1 | ||
| 1179 | AESENC \TMP1, \XMM0 | ||
| 1180 | movaps 160(%arg1), \TMP1 | ||
| 1181 | AESENCLAST \TMP1, \XMM0 | ||
| 1182 | .endm | ||
| 1183 | 1177 | ||
| 1178 | .macro ENCRYPT_SINGLE_BLOCK XMM0 TMP1 | ||
| 1184 | 1179 | ||
| 1180 | pxor (%arg1), \XMM0 | ||
| 1181 | mov keysize,%eax | ||
| 1182 | shr $2,%eax # 128->4, 192->6, 256->8 | ||
| 1183 | add $5,%eax # 128->9, 192->11, 256->13 | ||
| 1184 | lea 16(%arg1), %r10 # get first expanded key address | ||
| 1185 | |||
| 1186 | _esb_loop_\@: | ||
| 1187 | MOVADQ (%r10),\TMP1 | ||
| 1188 | AESENC \TMP1,\XMM0 | ||
| 1189 | add $16,%r10 | ||
| 1190 | sub $1,%eax | ||
| 1191 | jnz _esb_loop_\@ | ||
| 1192 | |||
| 1193 | MOVADQ (%r10),\TMP1 | ||
| 1194 | AESENCLAST \TMP1,\XMM0 | ||
| 1195 | .endm | ||
| 1185 | /***************************************************************************** | 1196 | /***************************************************************************** |
| 1186 | * void aesni_gcm_dec(void *aes_ctx, // AES Key schedule. Starts on a 16 byte boundary. | 1197 | * void aesni_gcm_dec(void *aes_ctx, // AES Key schedule. Starts on a 16 byte boundary. |
| 1187 | * u8 *out, // Plaintext output. Encrypt in-place is allowed. | 1198 | * u8 *out, // Plaintext output. Encrypt in-place is allowed. |
diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c index ae855f4f64b7..947c6bf52c33 100644 --- a/arch/x86/crypto/aesni-intel_glue.c +++ b/arch/x86/crypto/aesni-intel_glue.c | |||
| @@ -43,6 +43,7 @@ | |||
| 43 | #include <asm/crypto/glue_helper.h> | 43 | #include <asm/crypto/glue_helper.h> |
| 44 | #endif | 44 | #endif |
| 45 | 45 | ||
| 46 | |||
| 46 | /* This data is stored at the end of the crypto_tfm struct. | 47 | /* This data is stored at the end of the crypto_tfm struct. |
| 47 | * It's a type of per "session" data storage location. | 48 | * It's a type of per "session" data storage location. |
| 48 | * This needs to be 16 byte aligned. | 49 | * This needs to be 16 byte aligned. |
| @@ -182,7 +183,8 @@ static void aesni_gcm_enc_avx(void *ctx, u8 *out, | |||
| 182 | u8 *hash_subkey, const u8 *aad, unsigned long aad_len, | 183 | u8 *hash_subkey, const u8 *aad, unsigned long aad_len, |
| 183 | u8 *auth_tag, unsigned long auth_tag_len) | 184 | u8 *auth_tag, unsigned long auth_tag_len) |
| 184 | { | 185 | { |
| 185 | if (plaintext_len < AVX_GEN2_OPTSIZE) { | 186 | struct crypto_aes_ctx *aes_ctx = (struct crypto_aes_ctx*)ctx; |
| 187 | if ((plaintext_len < AVX_GEN2_OPTSIZE) || (aes_ctx-> key_length != AES_KEYSIZE_128)){ | ||
| 186 | aesni_gcm_enc(ctx, out, in, plaintext_len, iv, hash_subkey, aad, | 188 | aesni_gcm_enc(ctx, out, in, plaintext_len, iv, hash_subkey, aad, |
| 187 | aad_len, auth_tag, auth_tag_len); | 189 | aad_len, auth_tag, auth_tag_len); |
| 188 | } else { | 190 | } else { |
| @@ -197,7 +199,8 @@ static void aesni_gcm_dec_avx(void *ctx, u8 *out, | |||
| 197 | u8 *hash_subkey, const u8 *aad, unsigned long aad_len, | 199 | u8 *hash_subkey, const u8 *aad, unsigned long aad_len, |
| 198 | u8 *auth_tag, unsigned long auth_tag_len) | 200 | u8 *auth_tag, unsigned long auth_tag_len) |
| 199 | { | 201 | { |
| 200 | if (ciphertext_len < AVX_GEN2_OPTSIZE) { | 202 | struct crypto_aes_ctx *aes_ctx = (struct crypto_aes_ctx*)ctx; |
| 203 | if ((ciphertext_len < AVX_GEN2_OPTSIZE) || (aes_ctx-> key_length != AES_KEYSIZE_128)) { | ||
| 201 | aesni_gcm_dec(ctx, out, in, ciphertext_len, iv, hash_subkey, aad, | 204 | aesni_gcm_dec(ctx, out, in, ciphertext_len, iv, hash_subkey, aad, |
| 202 | aad_len, auth_tag, auth_tag_len); | 205 | aad_len, auth_tag, auth_tag_len); |
| 203 | } else { | 206 | } else { |
| @@ -231,7 +234,8 @@ static void aesni_gcm_enc_avx2(void *ctx, u8 *out, | |||
| 231 | u8 *hash_subkey, const u8 *aad, unsigned long aad_len, | 234 | u8 *hash_subkey, const u8 *aad, unsigned long aad_len, |
| 232 | u8 *auth_tag, unsigned long auth_tag_len) | 235 | u8 *auth_tag, unsigned long auth_tag_len) |
| 233 | { | 236 | { |
| 234 | if (plaintext_len < AVX_GEN2_OPTSIZE) { | 237 | struct crypto_aes_ctx *aes_ctx = (struct crypto_aes_ctx*)ctx; |
| 238 | if ((plaintext_len < AVX_GEN2_OPTSIZE) || (aes_ctx-> key_length != AES_KEYSIZE_128)) { | ||
| 235 | aesni_gcm_enc(ctx, out, in, plaintext_len, iv, hash_subkey, aad, | 239 | aesni_gcm_enc(ctx, out, in, plaintext_len, iv, hash_subkey, aad, |
| 236 | aad_len, auth_tag, auth_tag_len); | 240 | aad_len, auth_tag, auth_tag_len); |
| 237 | } else if (plaintext_len < AVX_GEN4_OPTSIZE) { | 241 | } else if (plaintext_len < AVX_GEN4_OPTSIZE) { |
| @@ -250,7 +254,8 @@ static void aesni_gcm_dec_avx2(void *ctx, u8 *out, | |||
| 250 | u8 *hash_subkey, const u8 *aad, unsigned long aad_len, | 254 | u8 *hash_subkey, const u8 *aad, unsigned long aad_len, |
| 251 | u8 *auth_tag, unsigned long auth_tag_len) | 255 | u8 *auth_tag, unsigned long auth_tag_len) |
| 252 | { | 256 | { |
| 253 | if (ciphertext_len < AVX_GEN2_OPTSIZE) { | 257 | struct crypto_aes_ctx *aes_ctx = (struct crypto_aes_ctx*)ctx; |
| 258 | if ((ciphertext_len < AVX_GEN2_OPTSIZE) || (aes_ctx-> key_length != AES_KEYSIZE_128)) { | ||
| 254 | aesni_gcm_dec(ctx, out, in, ciphertext_len, iv, hash_subkey, | 259 | aesni_gcm_dec(ctx, out, in, ciphertext_len, iv, hash_subkey, |
| 255 | aad, aad_len, auth_tag, auth_tag_len); | 260 | aad, aad_len, auth_tag, auth_tag_len); |
| 256 | } else if (ciphertext_len < AVX_GEN4_OPTSIZE) { | 261 | } else if (ciphertext_len < AVX_GEN4_OPTSIZE) { |
| @@ -511,7 +516,7 @@ static int ctr_crypt(struct blkcipher_desc *desc, | |||
| 511 | kernel_fpu_begin(); | 516 | kernel_fpu_begin(); |
| 512 | while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) { | 517 | while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) { |
| 513 | aesni_ctr_enc_tfm(ctx, walk.dst.virt.addr, walk.src.virt.addr, | 518 | aesni_ctr_enc_tfm(ctx, walk.dst.virt.addr, walk.src.virt.addr, |
| 514 | nbytes & AES_BLOCK_MASK, walk.iv); | 519 | nbytes & AES_BLOCK_MASK, walk.iv); |
| 515 | nbytes &= AES_BLOCK_SIZE - 1; | 520 | nbytes &= AES_BLOCK_SIZE - 1; |
| 516 | err = blkcipher_walk_done(desc, &walk, nbytes); | 521 | err = blkcipher_walk_done(desc, &walk, nbytes); |
| 517 | } | 522 | } |
| @@ -902,7 +907,8 @@ static int rfc4106_set_key(struct crypto_aead *parent, const u8 *key, | |||
| 902 | } | 907 | } |
| 903 | /*Account for 4 byte nonce at the end.*/ | 908 | /*Account for 4 byte nonce at the end.*/ |
| 904 | key_len -= 4; | 909 | key_len -= 4; |
| 905 | if (key_len != AES_KEYSIZE_128) { | 910 | if (key_len != AES_KEYSIZE_128 && key_len != AES_KEYSIZE_192 && |
| 911 | key_len != AES_KEYSIZE_256) { | ||
| 906 | crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); | 912 | crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); |
| 907 | return -EINVAL; | 913 | return -EINVAL; |
| 908 | } | 914 | } |
| @@ -1013,6 +1019,7 @@ static int __driver_rfc4106_encrypt(struct aead_request *req) | |||
| 1013 | __be32 counter = cpu_to_be32(1); | 1019 | __be32 counter = cpu_to_be32(1); |
| 1014 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); | 1020 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); |
| 1015 | struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm); | 1021 | struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm); |
| 1022 | u32 key_len = ctx->aes_key_expanded.key_length; | ||
| 1016 | void *aes_ctx = &(ctx->aes_key_expanded); | 1023 | void *aes_ctx = &(ctx->aes_key_expanded); |
| 1017 | unsigned long auth_tag_len = crypto_aead_authsize(tfm); | 1024 | unsigned long auth_tag_len = crypto_aead_authsize(tfm); |
| 1018 | u8 iv_tab[16+AESNI_ALIGN]; | 1025 | u8 iv_tab[16+AESNI_ALIGN]; |
| @@ -1027,6 +1034,13 @@ static int __driver_rfc4106_encrypt(struct aead_request *req) | |||
| 1027 | /* to 8 or 12 bytes */ | 1034 | /* to 8 or 12 bytes */ |
| 1028 | if (unlikely(req->assoclen != 8 && req->assoclen != 12)) | 1035 | if (unlikely(req->assoclen != 8 && req->assoclen != 12)) |
| 1029 | return -EINVAL; | 1036 | return -EINVAL; |
| 1037 | if (unlikely(auth_tag_len != 8 && auth_tag_len != 12 && auth_tag_len != 16)) | ||
| 1038 | return -EINVAL; | ||
| 1039 | if (unlikely(key_len != AES_KEYSIZE_128 && | ||
| 1040 | key_len != AES_KEYSIZE_192 && | ||
| 1041 | key_len != AES_KEYSIZE_256)) | ||
| 1042 | return -EINVAL; | ||
| 1043 | |||
| 1030 | /* IV below built */ | 1044 | /* IV below built */ |
| 1031 | for (i = 0; i < 4; i++) | 1045 | for (i = 0; i < 4; i++) |
| 1032 | *(iv+i) = ctx->nonce[i]; | 1046 | *(iv+i) = ctx->nonce[i]; |
| @@ -1091,6 +1105,7 @@ static int __driver_rfc4106_decrypt(struct aead_request *req) | |||
| 1091 | int retval = 0; | 1105 | int retval = 0; |
| 1092 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); | 1106 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); |
| 1093 | struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm); | 1107 | struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm); |
| 1108 | u32 key_len = ctx->aes_key_expanded.key_length; | ||
| 1094 | void *aes_ctx = &(ctx->aes_key_expanded); | 1109 | void *aes_ctx = &(ctx->aes_key_expanded); |
| 1095 | unsigned long auth_tag_len = crypto_aead_authsize(tfm); | 1110 | unsigned long auth_tag_len = crypto_aead_authsize(tfm); |
| 1096 | u8 iv_and_authTag[32+AESNI_ALIGN]; | 1111 | u8 iv_and_authTag[32+AESNI_ALIGN]; |
| @@ -1104,6 +1119,13 @@ static int __driver_rfc4106_decrypt(struct aead_request *req) | |||
| 1104 | if (unlikely((req->cryptlen < auth_tag_len) || | 1119 | if (unlikely((req->cryptlen < auth_tag_len) || |
| 1105 | (req->assoclen != 8 && req->assoclen != 12))) | 1120 | (req->assoclen != 8 && req->assoclen != 12))) |
| 1106 | return -EINVAL; | 1121 | return -EINVAL; |
| 1122 | if (unlikely(auth_tag_len != 8 && auth_tag_len != 12 && auth_tag_len != 16)) | ||
| 1123 | return -EINVAL; | ||
| 1124 | if (unlikely(key_len != AES_KEYSIZE_128 && | ||
| 1125 | key_len != AES_KEYSIZE_192 && | ||
| 1126 | key_len != AES_KEYSIZE_256)) | ||
| 1127 | return -EINVAL; | ||
| 1128 | |||
| 1107 | /* Assuming we are supporting rfc4106 64-bit extended */ | 1129 | /* Assuming we are supporting rfc4106 64-bit extended */ |
| 1108 | /* sequence numbers We need to have the AAD length */ | 1130 | /* sequence numbers We need to have the AAD length */ |
| 1109 | /* equal to 8 or 12 bytes */ | 1131 | /* equal to 8 or 12 bytes */ |
diff --git a/arch/x86/crypto/des3_ede_glue.c b/arch/x86/crypto/des3_ede_glue.c index 38a14f818ef1..d6fc59aaaadf 100644 --- a/arch/x86/crypto/des3_ede_glue.c +++ b/arch/x86/crypto/des3_ede_glue.c | |||
| @@ -504,6 +504,4 @@ MODULE_LICENSE("GPL"); | |||
| 504 | MODULE_DESCRIPTION("Triple DES EDE Cipher Algorithm, asm optimized"); | 504 | MODULE_DESCRIPTION("Triple DES EDE Cipher Algorithm, asm optimized"); |
| 505 | MODULE_ALIAS_CRYPTO("des3_ede"); | 505 | MODULE_ALIAS_CRYPTO("des3_ede"); |
| 506 | MODULE_ALIAS_CRYPTO("des3_ede-asm"); | 506 | MODULE_ALIAS_CRYPTO("des3_ede-asm"); |
| 507 | MODULE_ALIAS_CRYPTO("des"); | ||
| 508 | MODULE_ALIAS_CRYPTO("des-asm"); | ||
| 509 | MODULE_AUTHOR("Jussi Kivilinna <jussi.kivilinna@iki.fi>"); | 507 | MODULE_AUTHOR("Jussi Kivilinna <jussi.kivilinna@iki.fi>"); |
diff --git a/crypto/Kconfig b/crypto/Kconfig index 87bbc9c1e681..50f4da44a304 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig | |||
| @@ -427,6 +427,15 @@ config CRYPTO_MD5 | |||
| 427 | help | 427 | help |
| 428 | MD5 message digest algorithm (RFC1321). | 428 | MD5 message digest algorithm (RFC1321). |
| 429 | 429 | ||
| 430 | config CRYPTO_MD5_OCTEON | ||
| 431 | tristate "MD5 digest algorithm (OCTEON)" | ||
| 432 | depends on CPU_CAVIUM_OCTEON | ||
| 433 | select CRYPTO_MD5 | ||
| 434 | select CRYPTO_HASH | ||
| 435 | help | ||
| 436 | MD5 message digest algorithm (RFC1321) implemented | ||
| 437 | using OCTEON crypto instructions, when available. | ||
| 438 | |||
| 430 | config CRYPTO_MD5_SPARC64 | 439 | config CRYPTO_MD5_SPARC64 |
| 431 | tristate "MD5 digest algorithm (SPARC64)" | 440 | tristate "MD5 digest algorithm (SPARC64)" |
| 432 | depends on SPARC64 | 441 | depends on SPARC64 |
| @@ -1505,6 +1514,15 @@ config CRYPTO_USER_API_SKCIPHER | |||
| 1505 | This option enables the user-spaces interface for symmetric | 1514 | This option enables the user-spaces interface for symmetric |
| 1506 | key cipher algorithms. | 1515 | key cipher algorithms. |
| 1507 | 1516 | ||
| 1517 | config CRYPTO_USER_API_RNG | ||
| 1518 | tristate "User-space interface for random number generator algorithms" | ||
| 1519 | depends on NET | ||
| 1520 | select CRYPTO_RNG | ||
| 1521 | select CRYPTO_USER_API | ||
| 1522 | help | ||
| 1523 | This option enables the user-spaces interface for random | ||
| 1524 | number generator algorithms. | ||
| 1525 | |||
| 1508 | config CRYPTO_HASH_INFO | 1526 | config CRYPTO_HASH_INFO |
| 1509 | bool | 1527 | bool |
| 1510 | 1528 | ||
diff --git a/crypto/Makefile b/crypto/Makefile index 1445b9100c05..ba19465f9ad3 100644 --- a/crypto/Makefile +++ b/crypto/Makefile | |||
| @@ -99,6 +99,7 @@ obj-$(CONFIG_CRYPTO_GHASH) += ghash-generic.o | |||
| 99 | obj-$(CONFIG_CRYPTO_USER_API) += af_alg.o | 99 | obj-$(CONFIG_CRYPTO_USER_API) += af_alg.o |
| 100 | obj-$(CONFIG_CRYPTO_USER_API_HASH) += algif_hash.o | 100 | obj-$(CONFIG_CRYPTO_USER_API_HASH) += algif_hash.o |
| 101 | obj-$(CONFIG_CRYPTO_USER_API_SKCIPHER) += algif_skcipher.o | 101 | obj-$(CONFIG_CRYPTO_USER_API_SKCIPHER) += algif_skcipher.o |
| 102 | obj-$(CONFIG_CRYPTO_USER_API_RNG) += algif_rng.o | ||
| 102 | 103 | ||
| 103 | # | 104 | # |
| 104 | # generic algorithms and the async_tx api | 105 | # generic algorithms and the async_tx api |
diff --git a/crypto/ablkcipher.c b/crypto/ablkcipher.c index 40886c489903..db201bca1581 100644 --- a/crypto/ablkcipher.c +++ b/crypto/ablkcipher.c | |||
| @@ -69,6 +69,7 @@ static inline void ablkcipher_queue_write(struct ablkcipher_walk *walk, | |||
| 69 | static inline u8 *ablkcipher_get_spot(u8 *start, unsigned int len) | 69 | static inline u8 *ablkcipher_get_spot(u8 *start, unsigned int len) |
| 70 | { | 70 | { |
| 71 | u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK); | 71 | u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK); |
| 72 | |||
| 72 | return max(start, end_page); | 73 | return max(start, end_page); |
| 73 | } | 74 | } |
| 74 | 75 | ||
| @@ -86,7 +87,7 @@ static inline unsigned int ablkcipher_done_slow(struct ablkcipher_walk *walk, | |||
| 86 | if (n == len_this_page) | 87 | if (n == len_this_page) |
| 87 | break; | 88 | break; |
| 88 | n -= len_this_page; | 89 | n -= len_this_page; |
| 89 | scatterwalk_start(&walk->out, scatterwalk_sg_next(walk->out.sg)); | 90 | scatterwalk_start(&walk->out, sg_next(walk->out.sg)); |
| 90 | } | 91 | } |
| 91 | 92 | ||
| 92 | return bsize; | 93 | return bsize; |
| @@ -284,6 +285,7 @@ static int ablkcipher_walk_first(struct ablkcipher_request *req, | |||
| 284 | walk->iv = req->info; | 285 | walk->iv = req->info; |
| 285 | if (unlikely(((unsigned long)walk->iv & alignmask))) { | 286 | if (unlikely(((unsigned long)walk->iv & alignmask))) { |
| 286 | int err = ablkcipher_copy_iv(walk, tfm, alignmask); | 287 | int err = ablkcipher_copy_iv(walk, tfm, alignmask); |
| 288 | |||
| 287 | if (err) | 289 | if (err) |
| 288 | return err; | 290 | return err; |
| 289 | } | 291 | } |
| @@ -589,7 +591,8 @@ static int crypto_givcipher_default(struct crypto_alg *alg, u32 type, u32 mask) | |||
| 589 | if (IS_ERR(inst)) | 591 | if (IS_ERR(inst)) |
| 590 | goto put_tmpl; | 592 | goto put_tmpl; |
| 591 | 593 | ||
| 592 | if ((err = crypto_register_instance(tmpl, inst))) { | 594 | err = crypto_register_instance(tmpl, inst); |
| 595 | if (err) { | ||
| 593 | tmpl->free(inst); | 596 | tmpl->free(inst); |
| 594 | goto put_tmpl; | 597 | goto put_tmpl; |
| 595 | } | 598 | } |
diff --git a/crypto/aead.c b/crypto/aead.c index 547491e35c63..222271070b49 100644 --- a/crypto/aead.c +++ b/crypto/aead.c | |||
| @@ -448,7 +448,8 @@ static int crypto_nivaead_default(struct crypto_alg *alg, u32 type, u32 mask) | |||
| 448 | if (IS_ERR(inst)) | 448 | if (IS_ERR(inst)) |
| 449 | goto put_tmpl; | 449 | goto put_tmpl; |
| 450 | 450 | ||
| 451 | if ((err = crypto_register_instance(tmpl, inst))) { | 451 | err = crypto_register_instance(tmpl, inst); |
| 452 | if (err) { | ||
| 452 | tmpl->free(inst); | 453 | tmpl->free(inst); |
| 453 | goto put_tmpl; | 454 | goto put_tmpl; |
| 454 | } | 455 | } |
diff --git a/crypto/af_alg.c b/crypto/af_alg.c index 3e80d8b8be45..7f8b7edcadca 100644 --- a/crypto/af_alg.c +++ b/crypto/af_alg.c | |||
| @@ -188,7 +188,7 @@ static int alg_setkey(struct sock *sk, char __user *ukey, | |||
| 188 | err = type->setkey(ask->private, key, keylen); | 188 | err = type->setkey(ask->private, key, keylen); |
| 189 | 189 | ||
| 190 | out: | 190 | out: |
| 191 | sock_kfree_s(sk, key, keylen); | 191 | sock_kzfree_s(sk, key, keylen); |
| 192 | 192 | ||
| 193 | return err; | 193 | return err; |
| 194 | } | 194 | } |
| @@ -215,6 +215,13 @@ static int alg_setsockopt(struct socket *sock, int level, int optname, | |||
| 215 | goto unlock; | 215 | goto unlock; |
| 216 | 216 | ||
| 217 | err = alg_setkey(sk, optval, optlen); | 217 | err = alg_setkey(sk, optval, optlen); |
| 218 | break; | ||
| 219 | case ALG_SET_AEAD_AUTHSIZE: | ||
| 220 | if (sock->state == SS_CONNECTED) | ||
| 221 | goto unlock; | ||
| 222 | if (!type->setauthsize) | ||
| 223 | goto unlock; | ||
| 224 | err = type->setauthsize(ask->private, optlen); | ||
| 218 | } | 225 | } |
| 219 | 226 | ||
| 220 | unlock: | 227 | unlock: |
| @@ -387,7 +394,7 @@ int af_alg_cmsg_send(struct msghdr *msg, struct af_alg_control *con) | |||
| 387 | if (cmsg->cmsg_level != SOL_ALG) | 394 | if (cmsg->cmsg_level != SOL_ALG) |
| 388 | continue; | 395 | continue; |
| 389 | 396 | ||
| 390 | switch(cmsg->cmsg_type) { | 397 | switch (cmsg->cmsg_type) { |
| 391 | case ALG_SET_IV: | 398 | case ALG_SET_IV: |
| 392 | if (cmsg->cmsg_len < CMSG_LEN(sizeof(*con->iv))) | 399 | if (cmsg->cmsg_len < CMSG_LEN(sizeof(*con->iv))) |
| 393 | return -EINVAL; | 400 | return -EINVAL; |
diff --git a/crypto/ahash.c b/crypto/ahash.c index f6a36a52d738..8acb886032ae 100644 --- a/crypto/ahash.c +++ b/crypto/ahash.c | |||
| @@ -55,6 +55,7 @@ static int hash_walk_next(struct crypto_hash_walk *walk) | |||
| 55 | 55 | ||
| 56 | if (offset & alignmask) { | 56 | if (offset & alignmask) { |
| 57 | unsigned int unaligned = alignmask + 1 - (offset & alignmask); | 57 | unsigned int unaligned = alignmask + 1 - (offset & alignmask); |
| 58 | |||
| 58 | if (nbytes > unaligned) | 59 | if (nbytes > unaligned) |
| 59 | nbytes = unaligned; | 60 | nbytes = unaligned; |
| 60 | } | 61 | } |
| @@ -120,7 +121,7 @@ int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err) | |||
| 120 | if (!walk->total) | 121 | if (!walk->total) |
| 121 | return 0; | 122 | return 0; |
| 122 | 123 | ||
| 123 | walk->sg = scatterwalk_sg_next(walk->sg); | 124 | walk->sg = sg_next(walk->sg); |
| 124 | 125 | ||
| 125 | return hash_walk_new_entry(walk); | 126 | return hash_walk_new_entry(walk); |
| 126 | } | 127 | } |
diff --git a/crypto/algapi.c b/crypto/algapi.c index 71a8143e23b1..83b04e0884b1 100644 --- a/crypto/algapi.c +++ b/crypto/algapi.c | |||
| @@ -473,6 +473,7 @@ void crypto_unregister_template(struct crypto_template *tmpl) | |||
| 473 | list = &tmpl->instances; | 473 | list = &tmpl->instances; |
| 474 | hlist_for_each_entry(inst, list, list) { | 474 | hlist_for_each_entry(inst, list, list) { |
| 475 | int err = crypto_remove_alg(&inst->alg, &users); | 475 | int err = crypto_remove_alg(&inst->alg, &users); |
| 476 | |||
| 476 | BUG_ON(err); | 477 | BUG_ON(err); |
| 477 | } | 478 | } |
| 478 | 479 | ||
diff --git a/crypto/algif_rng.c b/crypto/algif_rng.c new file mode 100644 index 000000000000..67f612cfed97 --- /dev/null +++ b/crypto/algif_rng.c | |||
| @@ -0,0 +1,192 @@ | |||
| 1 | /* | ||
| 2 | * algif_rng: User-space interface for random number generators | ||
| 3 | * | ||
| 4 | * This file provides the user-space API for random number generators. | ||
| 5 | * | ||
| 6 | * Copyright (C) 2014, Stephan Mueller <smueller@chronox.de> | ||
| 7 | * | ||
| 8 | * Redistribution and use in source and binary forms, with or without | ||
| 9 | * modification, are permitted provided that the following conditions | ||
| 10 | * are met: | ||
| 11 | * 1. Redistributions of source code must retain the above copyright | ||
| 12 | * notice, and the entire permission notice in its entirety, | ||
| 13 | * including the disclaimer of warranties. | ||
| 14 | * 2. Redistributions in binary form must reproduce the above copyright | ||
| 15 | * notice, this list of conditions and the following disclaimer in the | ||
| 16 | * documentation and/or other materials provided with the distribution. | ||
| 17 | * 3. The name of the author may not be used to endorse or promote | ||
| 18 | * products derived from this software without specific prior | ||
| 19 | * written permission. | ||
| 20 | * | ||
| 21 | * ALTERNATIVELY, this product may be distributed under the terms of | ||
| 22 | * the GNU General Public License, in which case the provisions of the GPL2 | ||
| 23 | * are required INSTEAD OF the above restrictions. (This clause is | ||
| 24 | * necessary due to a potential bad interaction between the GPL and | ||
| 25 | * the restrictions contained in a BSD-style copyright.) | ||
| 26 | * | ||
| 27 | * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED | ||
| 28 | * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES | ||
| 29 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ALL OF | ||
| 30 | * WHICH ARE HEREBY DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE | ||
| 31 | * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | ||
| 32 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT | ||
| 33 | * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR | ||
| 34 | * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF | ||
| 35 | * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
| 36 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE | ||
| 37 | * USE OF THIS SOFTWARE, EVEN IF NOT ADVISED OF THE POSSIBILITY OF SUCH | ||
| 38 | * DAMAGE. | ||
| 39 | */ | ||
| 40 | |||
| 41 | #include <linux/module.h> | ||
| 42 | #include <crypto/rng.h> | ||
| 43 | #include <linux/random.h> | ||
| 44 | #include <crypto/if_alg.h> | ||
| 45 | #include <linux/net.h> | ||
| 46 | #include <net/sock.h> | ||
| 47 | |||
| 48 | MODULE_LICENSE("GPL"); | ||
| 49 | MODULE_AUTHOR("Stephan Mueller <smueller@chronox.de>"); | ||
| 50 | MODULE_DESCRIPTION("User-space interface for random number generators"); | ||
| 51 | |||
| 52 | struct rng_ctx { | ||
| 53 | #define MAXSIZE 128 | ||
| 54 | unsigned int len; | ||
| 55 | struct crypto_rng *drng; | ||
| 56 | }; | ||
| 57 | |||
| 58 | static int rng_recvmsg(struct kiocb *unused, struct socket *sock, | ||
| 59 | struct msghdr *msg, size_t len, int flags) | ||
| 60 | { | ||
| 61 | struct sock *sk = sock->sk; | ||
| 62 | struct alg_sock *ask = alg_sk(sk); | ||
| 63 | struct rng_ctx *ctx = ask->private; | ||
| 64 | int err = -EFAULT; | ||
| 65 | int genlen = 0; | ||
| 66 | u8 result[MAXSIZE]; | ||
| 67 | |||
| 68 | if (len == 0) | ||
| 69 | return 0; | ||
| 70 | if (len > MAXSIZE) | ||
| 71 | len = MAXSIZE; | ||
| 72 | |||
| 73 | /* | ||
| 74 | * although not strictly needed, this is a precaution against coding | ||
| 75 | * errors | ||
| 76 | */ | ||
| 77 | memset(result, 0, len); | ||
| 78 | |||
| 79 | /* | ||
| 80 | * The enforcement of a proper seeding of an RNG is done within an | ||
| 81 | * RNG implementation. Some RNGs (DRBG, krng) do not need specific | ||
| 82 | * seeding as they automatically seed. The X9.31 DRNG will return | ||
| 83 | * an error if it was not seeded properly. | ||
| 84 | */ | ||
| 85 | genlen = crypto_rng_get_bytes(ctx->drng, result, len); | ||
| 86 | if (genlen < 0) | ||
| 87 | return genlen; | ||
| 88 | |||
| 89 | err = memcpy_to_msg(msg, result, len); | ||
| 90 | memzero_explicit(result, genlen); | ||
| 91 | |||
| 92 | return err ? err : len; | ||
| 93 | } | ||
| 94 | |||
| 95 | static struct proto_ops algif_rng_ops = { | ||
| 96 | .family = PF_ALG, | ||
| 97 | |||
| 98 | .connect = sock_no_connect, | ||
| 99 | .socketpair = sock_no_socketpair, | ||
| 100 | .getname = sock_no_getname, | ||
| 101 | .ioctl = sock_no_ioctl, | ||
| 102 | .listen = sock_no_listen, | ||
| 103 | .shutdown = sock_no_shutdown, | ||
| 104 | .getsockopt = sock_no_getsockopt, | ||
| 105 | .mmap = sock_no_mmap, | ||
| 106 | .bind = sock_no_bind, | ||
| 107 | .accept = sock_no_accept, | ||
| 108 | .setsockopt = sock_no_setsockopt, | ||
| 109 | .poll = sock_no_poll, | ||
| 110 | .sendmsg = sock_no_sendmsg, | ||
| 111 | .sendpage = sock_no_sendpage, | ||
| 112 | |||
| 113 | .release = af_alg_release, | ||
| 114 | .recvmsg = rng_recvmsg, | ||
| 115 | }; | ||
| 116 | |||
| 117 | static void *rng_bind(const char *name, u32 type, u32 mask) | ||
| 118 | { | ||
| 119 | return crypto_alloc_rng(name, type, mask); | ||
| 120 | } | ||
| 121 | |||
| 122 | static void rng_release(void *private) | ||
| 123 | { | ||
| 124 | crypto_free_rng(private); | ||
| 125 | } | ||
| 126 | |||
| 127 | static void rng_sock_destruct(struct sock *sk) | ||
| 128 | { | ||
| 129 | struct alg_sock *ask = alg_sk(sk); | ||
| 130 | struct rng_ctx *ctx = ask->private; | ||
| 131 | |||
| 132 | sock_kfree_s(sk, ctx, ctx->len); | ||
| 133 | af_alg_release_parent(sk); | ||
| 134 | } | ||
| 135 | |||
| 136 | static int rng_accept_parent(void *private, struct sock *sk) | ||
| 137 | { | ||
| 138 | struct rng_ctx *ctx; | ||
| 139 | struct alg_sock *ask = alg_sk(sk); | ||
| 140 | unsigned int len = sizeof(*ctx); | ||
| 141 | |||
| 142 | ctx = sock_kmalloc(sk, len, GFP_KERNEL); | ||
| 143 | if (!ctx) | ||
| 144 | return -ENOMEM; | ||
| 145 | |||
| 146 | ctx->len = len; | ||
| 147 | |||
| 148 | /* | ||
| 149 | * No seeding done at that point -- if multiple accepts are | ||
| 150 | * done on one RNG instance, each resulting FD points to the same | ||
| 151 | * state of the RNG. | ||
| 152 | */ | ||
| 153 | |||
| 154 | ctx->drng = private; | ||
| 155 | ask->private = ctx; | ||
| 156 | sk->sk_destruct = rng_sock_destruct; | ||
| 157 | |||
| 158 | return 0; | ||
| 159 | } | ||
| 160 | |||
| 161 | static int rng_setkey(void *private, const u8 *seed, unsigned int seedlen) | ||
| 162 | { | ||
| 163 | /* | ||
| 164 | * Check whether seedlen is of sufficient size is done in RNG | ||
| 165 | * implementations. | ||
| 166 | */ | ||
| 167 | return crypto_rng_reset(private, (u8 *)seed, seedlen); | ||
| 168 | } | ||
| 169 | |||
| 170 | static const struct af_alg_type algif_type_rng = { | ||
| 171 | .bind = rng_bind, | ||
| 172 | .release = rng_release, | ||
| 173 | .accept = rng_accept_parent, | ||
| 174 | .setkey = rng_setkey, | ||
| 175 | .ops = &algif_rng_ops, | ||
| 176 | .name = "rng", | ||
| 177 | .owner = THIS_MODULE | ||
| 178 | }; | ||
| 179 | |||
| 180 | static int __init rng_init(void) | ||
| 181 | { | ||
| 182 | return af_alg_register_type(&algif_type_rng); | ||
| 183 | } | ||
| 184 | |||
| 185 | static void __exit rng_exit(void) | ||
| 186 | { | ||
| 187 | int err = af_alg_unregister_type(&algif_type_rng); | ||
| 188 | BUG_ON(err); | ||
| 189 | } | ||
| 190 | |||
| 191 | module_init(rng_init); | ||
| 192 | module_exit(rng_exit); | ||
diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c index 6fc12c3fc4b9..0c8a1e5ccadf 100644 --- a/crypto/algif_skcipher.c +++ b/crypto/algif_skcipher.c | |||
| @@ -330,6 +330,7 @@ static int skcipher_sendmsg(struct kiocb *unused, struct socket *sock, | |||
| 330 | 330 | ||
| 331 | sgl = list_entry(ctx->tsgl.prev, struct skcipher_sg_list, list); | 331 | sgl = list_entry(ctx->tsgl.prev, struct skcipher_sg_list, list); |
| 332 | sg = sgl->sg; | 332 | sg = sgl->sg; |
| 333 | sg_unmark_end(sg + sgl->cur); | ||
| 333 | do { | 334 | do { |
| 334 | i = sgl->cur; | 335 | i = sgl->cur; |
| 335 | plen = min_t(int, len, PAGE_SIZE); | 336 | plen = min_t(int, len, PAGE_SIZE); |
| @@ -355,6 +356,9 @@ static int skcipher_sendmsg(struct kiocb *unused, struct socket *sock, | |||
| 355 | sgl->cur++; | 356 | sgl->cur++; |
| 356 | } while (len && sgl->cur < MAX_SGL_ENTS); | 357 | } while (len && sgl->cur < MAX_SGL_ENTS); |
| 357 | 358 | ||
| 359 | if (!size) | ||
| 360 | sg_mark_end(sg + sgl->cur - 1); | ||
| 361 | |||
| 358 | ctx->merge = plen & (PAGE_SIZE - 1); | 362 | ctx->merge = plen & (PAGE_SIZE - 1); |
| 359 | } | 363 | } |
| 360 | 364 | ||
| @@ -401,6 +405,10 @@ static ssize_t skcipher_sendpage(struct socket *sock, struct page *page, | |||
| 401 | ctx->merge = 0; | 405 | ctx->merge = 0; |
| 402 | sgl = list_entry(ctx->tsgl.prev, struct skcipher_sg_list, list); | 406 | sgl = list_entry(ctx->tsgl.prev, struct skcipher_sg_list, list); |
| 403 | 407 | ||
| 408 | if (sgl->cur) | ||
| 409 | sg_unmark_end(sgl->sg + sgl->cur - 1); | ||
| 410 | |||
| 411 | sg_mark_end(sgl->sg + sgl->cur); | ||
| 404 | get_page(page); | 412 | get_page(page); |
| 405 | sg_set_page(sgl->sg + sgl->cur, page, size, offset); | 413 | sg_set_page(sgl->sg + sgl->cur, page, size, offset); |
| 406 | sgl->cur++; | 414 | sgl->cur++; |
diff --git a/crypto/cts.c b/crypto/cts.c index bd9405820e8a..e467ec0acf9f 100644 --- a/crypto/cts.c +++ b/crypto/cts.c | |||
| @@ -290,6 +290,9 @@ static struct crypto_instance *crypto_cts_alloc(struct rtattr **tb) | |||
| 290 | if (!is_power_of_2(alg->cra_blocksize)) | 290 | if (!is_power_of_2(alg->cra_blocksize)) |
| 291 | goto out_put_alg; | 291 | goto out_put_alg; |
| 292 | 292 | ||
| 293 | if (strncmp(alg->cra_name, "cbc(", 4)) | ||
| 294 | goto out_put_alg; | ||
| 295 | |||
| 293 | inst = crypto_alloc_instance("cts", alg); | 296 | inst = crypto_alloc_instance("cts", alg); |
| 294 | if (IS_ERR(inst)) | 297 | if (IS_ERR(inst)) |
| 295 | goto out_put_alg; | 298 | goto out_put_alg; |
| @@ -307,8 +310,6 @@ static struct crypto_instance *crypto_cts_alloc(struct rtattr **tb) | |||
| 307 | inst->alg.cra_blkcipher.min_keysize = alg->cra_blkcipher.min_keysize; | 310 | inst->alg.cra_blkcipher.min_keysize = alg->cra_blkcipher.min_keysize; |
| 308 | inst->alg.cra_blkcipher.max_keysize = alg->cra_blkcipher.max_keysize; | 311 | inst->alg.cra_blkcipher.max_keysize = alg->cra_blkcipher.max_keysize; |
| 309 | 312 | ||
| 310 | inst->alg.cra_blkcipher.geniv = "seqiv"; | ||
| 311 | |||
| 312 | inst->alg.cra_ctxsize = sizeof(struct crypto_cts_ctx); | 313 | inst->alg.cra_ctxsize = sizeof(struct crypto_cts_ctx); |
| 313 | 314 | ||
| 314 | inst->alg.cra_init = crypto_cts_init_tfm; | 315 | inst->alg.cra_init = crypto_cts_init_tfm; |
diff --git a/crypto/drbg.c b/crypto/drbg.c index d748a1d0ca24..d8ff16e5c322 100644 --- a/crypto/drbg.c +++ b/crypto/drbg.c | |||
| @@ -98,7 +98,6 @@ | |||
| 98 | */ | 98 | */ |
| 99 | 99 | ||
| 100 | #include <crypto/drbg.h> | 100 | #include <crypto/drbg.h> |
| 101 | #include <linux/string.h> | ||
| 102 | 101 | ||
| 103 | /*************************************************************** | 102 | /*************************************************************** |
| 104 | * Backend cipher definitions available to DRBG | 103 | * Backend cipher definitions available to DRBG |
| @@ -223,15 +222,6 @@ static inline unsigned short drbg_sec_strength(drbg_flag_t flags) | |||
| 223 | * function. Thus, the function implicitly knows the size of the | 222 | * function. Thus, the function implicitly knows the size of the |
| 224 | * buffer. | 223 | * buffer. |
| 225 | * | 224 | * |
| 226 | * The FIPS test can be called in an endless loop until it returns | ||
| 227 | * true. Although the code looks like a potential for a deadlock, it | ||
| 228 | * is not the case, because returning a false cannot mathematically | ||
| 229 | * occur (except once when a reseed took place and the updated state | ||
| 230 | * would is now set up such that the generation of new value returns | ||
| 231 | * an identical one -- this is most unlikely and would happen only once). | ||
| 232 | * Thus, if this function repeatedly returns false and thus would cause | ||
| 233 | * a deadlock, the integrity of the entire kernel is lost. | ||
| 234 | * | ||
| 235 | * @drbg DRBG handle | 225 | * @drbg DRBG handle |
| 236 | * @buf output buffer of random data to be checked | 226 | * @buf output buffer of random data to be checked |
| 237 | * | 227 | * |
| @@ -258,6 +248,8 @@ static bool drbg_fips_continuous_test(struct drbg_state *drbg, | |||
| 258 | return false; | 248 | return false; |
| 259 | } | 249 | } |
| 260 | ret = memcmp(drbg->prev, buf, drbg_blocklen(drbg)); | 250 | ret = memcmp(drbg->prev, buf, drbg_blocklen(drbg)); |
| 251 | if (!ret) | ||
| 252 | panic("DRBG continuous self test failed\n"); | ||
| 261 | memcpy(drbg->prev, buf, drbg_blocklen(drbg)); | 253 | memcpy(drbg->prev, buf, drbg_blocklen(drbg)); |
| 262 | /* the test shall pass when the two compared values are not equal */ | 254 | /* the test shall pass when the two compared values are not equal */ |
| 263 | return ret != 0; | 255 | return ret != 0; |
| @@ -498,9 +490,9 @@ static int drbg_ctr_df(struct drbg_state *drbg, | |||
| 498 | ret = 0; | 490 | ret = 0; |
| 499 | 491 | ||
| 500 | out: | 492 | out: |
| 501 | memzero_explicit(iv, drbg_blocklen(drbg)); | 493 | memset(iv, 0, drbg_blocklen(drbg)); |
| 502 | memzero_explicit(temp, drbg_statelen(drbg)); | 494 | memset(temp, 0, drbg_statelen(drbg)); |
| 503 | memzero_explicit(pad, drbg_blocklen(drbg)); | 495 | memset(pad, 0, drbg_blocklen(drbg)); |
| 504 | return ret; | 496 | return ret; |
| 505 | } | 497 | } |
| 506 | 498 | ||
| @@ -574,9 +566,9 @@ static int drbg_ctr_update(struct drbg_state *drbg, struct list_head *seed, | |||
| 574 | ret = 0; | 566 | ret = 0; |
| 575 | 567 | ||
| 576 | out: | 568 | out: |
| 577 | memzero_explicit(temp, drbg_statelen(drbg) + drbg_blocklen(drbg)); | 569 | memset(temp, 0, drbg_statelen(drbg) + drbg_blocklen(drbg)); |
| 578 | if (2 != reseed) | 570 | if (2 != reseed) |
| 579 | memzero_explicit(df_data, drbg_statelen(drbg)); | 571 | memset(df_data, 0, drbg_statelen(drbg)); |
| 580 | return ret; | 572 | return ret; |
| 581 | } | 573 | } |
| 582 | 574 | ||
| @@ -634,7 +626,7 @@ static int drbg_ctr_generate(struct drbg_state *drbg, | |||
| 634 | len = ret; | 626 | len = ret; |
| 635 | 627 | ||
| 636 | out: | 628 | out: |
| 637 | memzero_explicit(drbg->scratchpad, drbg_blocklen(drbg)); | 629 | memset(drbg->scratchpad, 0, drbg_blocklen(drbg)); |
| 638 | return len; | 630 | return len; |
| 639 | } | 631 | } |
| 640 | 632 | ||
| @@ -872,7 +864,7 @@ static int drbg_hash_df(struct drbg_state *drbg, | |||
| 872 | } | 864 | } |
| 873 | 865 | ||
| 874 | out: | 866 | out: |
| 875 | memzero_explicit(tmp, drbg_blocklen(drbg)); | 867 | memset(tmp, 0, drbg_blocklen(drbg)); |
| 876 | return ret; | 868 | return ret; |
| 877 | } | 869 | } |
| 878 | 870 | ||
| @@ -916,7 +908,7 @@ static int drbg_hash_update(struct drbg_state *drbg, struct list_head *seed, | |||
| 916 | ret = drbg_hash_df(drbg, drbg->C, drbg_statelen(drbg), &datalist2); | 908 | ret = drbg_hash_df(drbg, drbg->C, drbg_statelen(drbg), &datalist2); |
| 917 | 909 | ||
| 918 | out: | 910 | out: |
| 919 | memzero_explicit(drbg->scratchpad, drbg_statelen(drbg)); | 911 | memset(drbg->scratchpad, 0, drbg_statelen(drbg)); |
| 920 | return ret; | 912 | return ret; |
| 921 | } | 913 | } |
| 922 | 914 | ||
| @@ -951,7 +943,7 @@ static int drbg_hash_process_addtl(struct drbg_state *drbg, | |||
| 951 | drbg->scratchpad, drbg_blocklen(drbg)); | 943 | drbg->scratchpad, drbg_blocklen(drbg)); |
| 952 | 944 | ||
| 953 | out: | 945 | out: |
| 954 | memzero_explicit(drbg->scratchpad, drbg_blocklen(drbg)); | 946 | memset(drbg->scratchpad, 0, drbg_blocklen(drbg)); |
| 955 | return ret; | 947 | return ret; |
| 956 | } | 948 | } |
| 957 | 949 | ||
| @@ -998,7 +990,7 @@ static int drbg_hash_hashgen(struct drbg_state *drbg, | |||
| 998 | } | 990 | } |
| 999 | 991 | ||
| 1000 | out: | 992 | out: |
| 1001 | memzero_explicit(drbg->scratchpad, | 993 | memset(drbg->scratchpad, 0, |
| 1002 | (drbg_statelen(drbg) + drbg_blocklen(drbg))); | 994 | (drbg_statelen(drbg) + drbg_blocklen(drbg))); |
| 1003 | return len; | 995 | return len; |
| 1004 | } | 996 | } |
| @@ -1047,7 +1039,7 @@ static int drbg_hash_generate(struct drbg_state *drbg, | |||
| 1047 | drbg_add_buf(drbg->V, drbg_statelen(drbg), u.req, 8); | 1039 | drbg_add_buf(drbg->V, drbg_statelen(drbg), u.req, 8); |
| 1048 | 1040 | ||
| 1049 | out: | 1041 | out: |
| 1050 | memzero_explicit(drbg->scratchpad, drbg_blocklen(drbg)); | 1042 | memset(drbg->scratchpad, 0, drbg_blocklen(drbg)); |
| 1051 | return len; | 1043 | return len; |
| 1052 | } | 1044 | } |
| 1053 | 1045 | ||
diff --git a/crypto/scatterwalk.c b/crypto/scatterwalk.c index 79ca2278c2a3..3bd749c7bb70 100644 --- a/crypto/scatterwalk.c +++ b/crypto/scatterwalk.c | |||
| @@ -62,7 +62,7 @@ static void scatterwalk_pagedone(struct scatter_walk *walk, int out, | |||
| 62 | walk->offset += PAGE_SIZE - 1; | 62 | walk->offset += PAGE_SIZE - 1; |
| 63 | walk->offset &= PAGE_MASK; | 63 | walk->offset &= PAGE_MASK; |
| 64 | if (walk->offset >= walk->sg->offset + walk->sg->length) | 64 | if (walk->offset >= walk->sg->offset + walk->sg->length) |
| 65 | scatterwalk_start(walk, scatterwalk_sg_next(walk->sg)); | 65 | scatterwalk_start(walk, sg_next(walk->sg)); |
| 66 | } | 66 | } |
| 67 | } | 67 | } |
| 68 | 68 | ||
| @@ -116,7 +116,7 @@ void scatterwalk_map_and_copy(void *buf, struct scatterlist *sg, | |||
| 116 | break; | 116 | break; |
| 117 | 117 | ||
| 118 | offset += sg->length; | 118 | offset += sg->length; |
| 119 | sg = scatterwalk_sg_next(sg); | 119 | sg = sg_next(sg); |
| 120 | } | 120 | } |
| 121 | 121 | ||
| 122 | scatterwalk_advance(&walk, start - offset); | 122 | scatterwalk_advance(&walk, start - offset); |
| @@ -136,7 +136,7 @@ int scatterwalk_bytes_sglen(struct scatterlist *sg, int num_bytes) | |||
| 136 | do { | 136 | do { |
| 137 | offset += sg->length; | 137 | offset += sg->length; |
| 138 | n++; | 138 | n++; |
| 139 | sg = scatterwalk_sg_next(sg); | 139 | sg = sg_next(sg); |
| 140 | 140 | ||
| 141 | /* num_bytes is too large */ | 141 | /* num_bytes is too large */ |
| 142 | if (unlikely(!sg && (num_bytes < offset))) | 142 | if (unlikely(!sg && (num_bytes < offset))) |
diff --git a/crypto/seqiv.c b/crypto/seqiv.c index 9daa854cc485..b7bb9a2f4a31 100644 --- a/crypto/seqiv.c +++ b/crypto/seqiv.c | |||
| @@ -267,6 +267,12 @@ static struct crypto_instance *seqiv_ablkcipher_alloc(struct rtattr **tb) | |||
| 267 | if (IS_ERR(inst)) | 267 | if (IS_ERR(inst)) |
| 268 | goto out; | 268 | goto out; |
| 269 | 269 | ||
| 270 | if (inst->alg.cra_ablkcipher.ivsize < sizeof(u64)) { | ||
| 271 | skcipher_geniv_free(inst); | ||
| 272 | inst = ERR_PTR(-EINVAL); | ||
| 273 | goto out; | ||
| 274 | } | ||
| 275 | |||
| 270 | inst->alg.cra_ablkcipher.givencrypt = seqiv_givencrypt_first; | 276 | inst->alg.cra_ablkcipher.givencrypt = seqiv_givencrypt_first; |
| 271 | 277 | ||
| 272 | inst->alg.cra_init = seqiv_init; | 278 | inst->alg.cra_init = seqiv_init; |
| @@ -287,6 +293,12 @@ static struct crypto_instance *seqiv_aead_alloc(struct rtattr **tb) | |||
| 287 | if (IS_ERR(inst)) | 293 | if (IS_ERR(inst)) |
| 288 | goto out; | 294 | goto out; |
| 289 | 295 | ||
| 296 | if (inst->alg.cra_aead.ivsize < sizeof(u64)) { | ||
| 297 | aead_geniv_free(inst); | ||
| 298 | inst = ERR_PTR(-EINVAL); | ||
| 299 | goto out; | ||
| 300 | } | ||
| 301 | |||
| 290 | inst->alg.cra_aead.givencrypt = seqiv_aead_givencrypt_first; | 302 | inst->alg.cra_aead.givencrypt = seqiv_aead_givencrypt_first; |
| 291 | 303 | ||
| 292 | inst->alg.cra_init = seqiv_aead_init; | 304 | inst->alg.cra_init = seqiv_aead_init; |
diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c index 1d864e988ea9..4b9e23fa4204 100644 --- a/crypto/tcrypt.c +++ b/crypto/tcrypt.c | |||
| @@ -250,19 +250,19 @@ static void sg_init_aead(struct scatterlist *sg, char *xbuf[XBUFSIZE], | |||
| 250 | int np = (buflen + PAGE_SIZE - 1)/PAGE_SIZE; | 250 | int np = (buflen + PAGE_SIZE - 1)/PAGE_SIZE; |
| 251 | int k, rem; | 251 | int k, rem; |
| 252 | 252 | ||
| 253 | np = (np > XBUFSIZE) ? XBUFSIZE : np; | ||
| 254 | rem = buflen % PAGE_SIZE; | ||
| 255 | if (np > XBUFSIZE) { | 253 | if (np > XBUFSIZE) { |
| 256 | rem = PAGE_SIZE; | 254 | rem = PAGE_SIZE; |
| 257 | np = XBUFSIZE; | 255 | np = XBUFSIZE; |
| 256 | } else { | ||
| 257 | rem = buflen % PAGE_SIZE; | ||
| 258 | } | 258 | } |
| 259 | |||
| 259 | sg_init_table(sg, np); | 260 | sg_init_table(sg, np); |
| 260 | for (k = 0; k < np; ++k) { | 261 | np--; |
| 261 | if (k == (np-1)) | 262 | for (k = 0; k < np; k++) |
| 262 | sg_set_buf(&sg[k], xbuf[k], rem); | 263 | sg_set_buf(&sg[k], xbuf[k], PAGE_SIZE); |
| 263 | else | 264 | |
| 264 | sg_set_buf(&sg[k], xbuf[k], PAGE_SIZE); | 265 | sg_set_buf(&sg[k], xbuf[k], rem); |
| 265 | } | ||
| 266 | } | 266 | } |
| 267 | 267 | ||
| 268 | static void test_aead_speed(const char *algo, int enc, unsigned int secs, | 268 | static void test_aead_speed(const char *algo, int enc, unsigned int secs, |
| @@ -280,16 +280,20 @@ static void test_aead_speed(const char *algo, int enc, unsigned int secs, | |||
| 280 | struct scatterlist *sgout; | 280 | struct scatterlist *sgout; |
| 281 | const char *e; | 281 | const char *e; |
| 282 | void *assoc; | 282 | void *assoc; |
| 283 | char iv[MAX_IVLEN]; | 283 | char *iv; |
| 284 | char *xbuf[XBUFSIZE]; | 284 | char *xbuf[XBUFSIZE]; |
| 285 | char *xoutbuf[XBUFSIZE]; | 285 | char *xoutbuf[XBUFSIZE]; |
| 286 | char *axbuf[XBUFSIZE]; | 286 | char *axbuf[XBUFSIZE]; |
| 287 | unsigned int *b_size; | 287 | unsigned int *b_size; |
| 288 | unsigned int iv_len; | 288 | unsigned int iv_len; |
| 289 | 289 | ||
| 290 | iv = kzalloc(MAX_IVLEN, GFP_KERNEL); | ||
| 291 | if (!iv) | ||
| 292 | return; | ||
| 293 | |||
| 290 | if (aad_size >= PAGE_SIZE) { | 294 | if (aad_size >= PAGE_SIZE) { |
| 291 | pr_err("associate data length (%u) too big\n", aad_size); | 295 | pr_err("associate data length (%u) too big\n", aad_size); |
| 292 | return; | 296 | goto out_noxbuf; |
| 293 | } | 297 | } |
| 294 | 298 | ||
| 295 | if (enc == ENCRYPT) | 299 | if (enc == ENCRYPT) |
| @@ -355,7 +359,7 @@ static void test_aead_speed(const char *algo, int enc, unsigned int secs, | |||
| 355 | 359 | ||
| 356 | iv_len = crypto_aead_ivsize(tfm); | 360 | iv_len = crypto_aead_ivsize(tfm); |
| 357 | if (iv_len) | 361 | if (iv_len) |
| 358 | memset(&iv, 0xff, iv_len); | 362 | memset(iv, 0xff, iv_len); |
| 359 | 363 | ||
| 360 | crypto_aead_clear_flags(tfm, ~0); | 364 | crypto_aead_clear_flags(tfm, ~0); |
| 361 | printk(KERN_INFO "test %u (%d bit key, %d byte blocks): ", | 365 | printk(KERN_INFO "test %u (%d bit key, %d byte blocks): ", |
| @@ -408,6 +412,7 @@ out_nooutbuf: | |||
| 408 | out_noaxbuf: | 412 | out_noaxbuf: |
| 409 | testmgr_free_buf(xbuf); | 413 | testmgr_free_buf(xbuf); |
| 410 | out_noxbuf: | 414 | out_noxbuf: |
| 415 | kfree(iv); | ||
| 411 | return; | 416 | return; |
| 412 | } | 417 | } |
| 413 | 418 | ||
| @@ -764,10 +769,9 @@ static inline int do_one_ahash_op(struct ahash_request *req, int ret) | |||
| 764 | if (ret == -EINPROGRESS || ret == -EBUSY) { | 769 | if (ret == -EINPROGRESS || ret == -EBUSY) { |
| 765 | struct tcrypt_result *tr = req->base.data; | 770 | struct tcrypt_result *tr = req->base.data; |
| 766 | 771 | ||
| 767 | ret = wait_for_completion_interruptible(&tr->completion); | 772 | wait_for_completion(&tr->completion); |
| 768 | if (!ret) | ||
| 769 | ret = tr->err; | ||
| 770 | reinit_completion(&tr->completion); | 773 | reinit_completion(&tr->completion); |
| 774 | ret = tr->err; | ||
| 771 | } | 775 | } |
| 772 | return ret; | 776 | return ret; |
| 773 | } | 777 | } |
| @@ -993,10 +997,9 @@ static inline int do_one_acipher_op(struct ablkcipher_request *req, int ret) | |||
| 993 | if (ret == -EINPROGRESS || ret == -EBUSY) { | 997 | if (ret == -EINPROGRESS || ret == -EBUSY) { |
| 994 | struct tcrypt_result *tr = req->base.data; | 998 | struct tcrypt_result *tr = req->base.data; |
| 995 | 999 | ||
| 996 | ret = wait_for_completion_interruptible(&tr->completion); | 1000 | wait_for_completion(&tr->completion); |
| 997 | if (!ret) | ||
| 998 | ret = tr->err; | ||
| 999 | reinit_completion(&tr->completion); | 1001 | reinit_completion(&tr->completion); |
| 1002 | ret = tr->err; | ||
| 1000 | } | 1003 | } |
| 1001 | 1004 | ||
| 1002 | return ret; | 1005 | return ret; |
diff --git a/crypto/testmgr.c b/crypto/testmgr.c index 037368d34586..f4ed6d4205e7 100644 --- a/crypto/testmgr.c +++ b/crypto/testmgr.c | |||
| @@ -181,10 +181,9 @@ static void testmgr_free_buf(char *buf[XBUFSIZE]) | |||
| 181 | static int wait_async_op(struct tcrypt_result *tr, int ret) | 181 | static int wait_async_op(struct tcrypt_result *tr, int ret) |
| 182 | { | 182 | { |
| 183 | if (ret == -EINPROGRESS || ret == -EBUSY) { | 183 | if (ret == -EINPROGRESS || ret == -EBUSY) { |
| 184 | ret = wait_for_completion_interruptible(&tr->completion); | 184 | wait_for_completion(&tr->completion); |
| 185 | if (!ret) | ||
| 186 | ret = tr->err; | ||
| 187 | reinit_completion(&tr->completion); | 185 | reinit_completion(&tr->completion); |
| 186 | ret = tr->err; | ||
| 188 | } | 187 | } |
| 189 | return ret; | 188 | return ret; |
| 190 | } | 189 | } |
| @@ -353,12 +352,11 @@ static int __test_hash(struct crypto_ahash *tfm, struct hash_testvec *template, | |||
| 353 | break; | 352 | break; |
| 354 | case -EINPROGRESS: | 353 | case -EINPROGRESS: |
| 355 | case -EBUSY: | 354 | case -EBUSY: |
| 356 | ret = wait_for_completion_interruptible( | 355 | wait_for_completion(&tresult.completion); |
| 357 | &tresult.completion); | 356 | reinit_completion(&tresult.completion); |
| 358 | if (!ret && !(ret = tresult.err)) { | 357 | ret = tresult.err; |
| 359 | reinit_completion(&tresult.completion); | 358 | if (!ret) |
| 360 | break; | 359 | break; |
| 361 | } | ||
| 362 | /* fall through */ | 360 | /* fall through */ |
| 363 | default: | 361 | default: |
| 364 | printk(KERN_ERR "alg: hash: digest failed " | 362 | printk(KERN_ERR "alg: hash: digest failed " |
| @@ -431,7 +429,7 @@ static int __test_aead(struct crypto_aead *tfm, int enc, | |||
| 431 | struct scatterlist *sgout; | 429 | struct scatterlist *sgout; |
| 432 | const char *e, *d; | 430 | const char *e, *d; |
| 433 | struct tcrypt_result result; | 431 | struct tcrypt_result result; |
| 434 | unsigned int authsize; | 432 | unsigned int authsize, iv_len; |
| 435 | void *input; | 433 | void *input; |
| 436 | void *output; | 434 | void *output; |
| 437 | void *assoc; | 435 | void *assoc; |
| @@ -502,10 +500,11 @@ static int __test_aead(struct crypto_aead *tfm, int enc, | |||
| 502 | 500 | ||
| 503 | memcpy(input, template[i].input, template[i].ilen); | 501 | memcpy(input, template[i].input, template[i].ilen); |
| 504 | memcpy(assoc, template[i].assoc, template[i].alen); | 502 | memcpy(assoc, template[i].assoc, template[i].alen); |
| 503 | iv_len = crypto_aead_ivsize(tfm); | ||
| 505 | if (template[i].iv) | 504 | if (template[i].iv) |
| 506 | memcpy(iv, template[i].iv, MAX_IVLEN); | 505 | memcpy(iv, template[i].iv, iv_len); |
| 507 | else | 506 | else |
| 508 | memset(iv, 0, MAX_IVLEN); | 507 | memset(iv, 0, iv_len); |
| 509 | 508 | ||
| 510 | crypto_aead_clear_flags(tfm, ~0); | 509 | crypto_aead_clear_flags(tfm, ~0); |
| 511 | if (template[i].wk) | 510 | if (template[i].wk) |
| @@ -569,12 +568,11 @@ static int __test_aead(struct crypto_aead *tfm, int enc, | |||
| 569 | break; | 568 | break; |
| 570 | case -EINPROGRESS: | 569 | case -EINPROGRESS: |
| 571 | case -EBUSY: | 570 | case -EBUSY: |
| 572 | ret = wait_for_completion_interruptible( | 571 | wait_for_completion(&result.completion); |
| 573 | &result.completion); | 572 | reinit_completion(&result.completion); |
| 574 | if (!ret && !(ret = result.err)) { | 573 | ret = result.err; |
| 575 | reinit_completion(&result.completion); | 574 | if (!ret) |
| 576 | break; | 575 | break; |
| 577 | } | ||
| 578 | case -EBADMSG: | 576 | case -EBADMSG: |
| 579 | if (template[i].novrfy) | 577 | if (template[i].novrfy) |
| 580 | /* verification failure was expected */ | 578 | /* verification failure was expected */ |
| @@ -720,12 +718,11 @@ static int __test_aead(struct crypto_aead *tfm, int enc, | |||
| 720 | break; | 718 | break; |
| 721 | case -EINPROGRESS: | 719 | case -EINPROGRESS: |
| 722 | case -EBUSY: | 720 | case -EBUSY: |
| 723 | ret = wait_for_completion_interruptible( | 721 | wait_for_completion(&result.completion); |
| 724 | &result.completion); | 722 | reinit_completion(&result.completion); |
| 725 | if (!ret && !(ret = result.err)) { | 723 | ret = result.err; |
| 726 | reinit_completion(&result.completion); | 724 | if (!ret) |
| 727 | break; | 725 | break; |
| 728 | } | ||
| 729 | case -EBADMSG: | 726 | case -EBADMSG: |
| 730 | if (template[i].novrfy) | 727 | if (template[i].novrfy) |
| 731 | /* verification failure was expected */ | 728 | /* verification failure was expected */ |
| @@ -1002,12 +999,11 @@ static int __test_skcipher(struct crypto_ablkcipher *tfm, int enc, | |||
| 1002 | break; | 999 | break; |
| 1003 | case -EINPROGRESS: | 1000 | case -EINPROGRESS: |
| 1004 | case -EBUSY: | 1001 | case -EBUSY: |
| 1005 | ret = wait_for_completion_interruptible( | 1002 | wait_for_completion(&result.completion); |
| 1006 | &result.completion); | 1003 | reinit_completion(&result.completion); |
| 1007 | if (!ret && !((ret = result.err))) { | 1004 | ret = result.err; |
| 1008 | reinit_completion(&result.completion); | 1005 | if (!ret) |
| 1009 | break; | 1006 | break; |
| 1010 | } | ||
| 1011 | /* fall through */ | 1007 | /* fall through */ |
| 1012 | default: | 1008 | default: |
| 1013 | pr_err("alg: skcipher%s: %s failed on test %d for %s: ret=%d\n", | 1009 | pr_err("alg: skcipher%s: %s failed on test %d for %s: ret=%d\n", |
| @@ -1097,12 +1093,11 @@ static int __test_skcipher(struct crypto_ablkcipher *tfm, int enc, | |||
| 1097 | break; | 1093 | break; |
| 1098 | case -EINPROGRESS: | 1094 | case -EINPROGRESS: |
| 1099 | case -EBUSY: | 1095 | case -EBUSY: |
| 1100 | ret = wait_for_completion_interruptible( | 1096 | wait_for_completion(&result.completion); |
| 1101 | &result.completion); | 1097 | reinit_completion(&result.completion); |
| 1102 | if (!ret && !((ret = result.err))) { | 1098 | ret = result.err; |
| 1103 | reinit_completion(&result.completion); | 1099 | if (!ret) |
| 1104 | break; | 1100 | break; |
| 1105 | } | ||
| 1106 | /* fall through */ | 1101 | /* fall through */ |
| 1107 | default: | 1102 | default: |
| 1108 | pr_err("alg: skcipher%s: %s failed on chunk test %d for %s: ret=%d\n", | 1103 | pr_err("alg: skcipher%s: %s failed on chunk test %d for %s: ret=%d\n", |
| @@ -3299,6 +3294,7 @@ static const struct alg_test_desc alg_test_descs[] = { | |||
| 3299 | }, { | 3294 | }, { |
| 3300 | .alg = "rfc4106(gcm(aes))", | 3295 | .alg = "rfc4106(gcm(aes))", |
| 3301 | .test = alg_test_aead, | 3296 | .test = alg_test_aead, |
| 3297 | .fips_allowed = 1, | ||
| 3302 | .suite = { | 3298 | .suite = { |
| 3303 | .aead = { | 3299 | .aead = { |
| 3304 | .enc = { | 3300 | .enc = { |
diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c index 1500cfd799a7..32a8a867f7f8 100644 --- a/drivers/char/hw_random/core.c +++ b/drivers/char/hw_random/core.c | |||
| @@ -42,6 +42,7 @@ | |||
| 42 | #include <linux/delay.h> | 42 | #include <linux/delay.h> |
| 43 | #include <linux/slab.h> | 43 | #include <linux/slab.h> |
| 44 | #include <linux/random.h> | 44 | #include <linux/random.h> |
| 45 | #include <linux/err.h> | ||
| 45 | #include <asm/uaccess.h> | 46 | #include <asm/uaccess.h> |
| 46 | 47 | ||
| 47 | 48 | ||
| @@ -53,7 +54,10 @@ | |||
| 53 | static struct hwrng *current_rng; | 54 | static struct hwrng *current_rng; |
| 54 | static struct task_struct *hwrng_fill; | 55 | static struct task_struct *hwrng_fill; |
| 55 | static LIST_HEAD(rng_list); | 56 | static LIST_HEAD(rng_list); |
| 57 | /* Protects rng_list and current_rng */ | ||
| 56 | static DEFINE_MUTEX(rng_mutex); | 58 | static DEFINE_MUTEX(rng_mutex); |
| 59 | /* Protects rng read functions, data_avail, rng_buffer and rng_fillbuf */ | ||
| 60 | static DEFINE_MUTEX(reading_mutex); | ||
| 57 | static int data_avail; | 61 | static int data_avail; |
| 58 | static u8 *rng_buffer, *rng_fillbuf; | 62 | static u8 *rng_buffer, *rng_fillbuf; |
| 59 | static unsigned short current_quality; | 63 | static unsigned short current_quality; |
| @@ -66,6 +70,8 @@ module_param(default_quality, ushort, 0644); | |||
| 66 | MODULE_PARM_DESC(default_quality, | 70 | MODULE_PARM_DESC(default_quality, |
| 67 | "default entropy content of hwrng per mill"); | 71 | "default entropy content of hwrng per mill"); |
| 68 | 72 | ||
| 73 | static void drop_current_rng(void); | ||
| 74 | static int hwrng_init(struct hwrng *rng); | ||
| 69 | static void start_khwrngd(void); | 75 | static void start_khwrngd(void); |
| 70 | 76 | ||
| 71 | static inline int rng_get_data(struct hwrng *rng, u8 *buffer, size_t size, | 77 | static inline int rng_get_data(struct hwrng *rng, u8 *buffer, size_t size, |
| @@ -81,13 +87,83 @@ static void add_early_randomness(struct hwrng *rng) | |||
| 81 | unsigned char bytes[16]; | 87 | unsigned char bytes[16]; |
| 82 | int bytes_read; | 88 | int bytes_read; |
| 83 | 89 | ||
| 90 | mutex_lock(&reading_mutex); | ||
| 84 | bytes_read = rng_get_data(rng, bytes, sizeof(bytes), 1); | 91 | bytes_read = rng_get_data(rng, bytes, sizeof(bytes), 1); |
| 92 | mutex_unlock(&reading_mutex); | ||
| 85 | if (bytes_read > 0) | 93 | if (bytes_read > 0) |
| 86 | add_device_randomness(bytes, bytes_read); | 94 | add_device_randomness(bytes, bytes_read); |
| 87 | } | 95 | } |
| 88 | 96 | ||
| 89 | static inline int hwrng_init(struct hwrng *rng) | 97 | static inline void cleanup_rng(struct kref *kref) |
| 90 | { | 98 | { |
| 99 | struct hwrng *rng = container_of(kref, struct hwrng, ref); | ||
| 100 | |||
| 101 | if (rng->cleanup) | ||
| 102 | rng->cleanup(rng); | ||
| 103 | |||
| 104 | complete(&rng->cleanup_done); | ||
| 105 | } | ||
| 106 | |||
| 107 | static int set_current_rng(struct hwrng *rng) | ||
| 108 | { | ||
| 109 | int err; | ||
| 110 | |||
| 111 | BUG_ON(!mutex_is_locked(&rng_mutex)); | ||
| 112 | |||
| 113 | err = hwrng_init(rng); | ||
| 114 | if (err) | ||
| 115 | return err; | ||
| 116 | |||
| 117 | drop_current_rng(); | ||
| 118 | current_rng = rng; | ||
| 119 | |||
| 120 | return 0; | ||
| 121 | } | ||
| 122 | |||
| 123 | static void drop_current_rng(void) | ||
| 124 | { | ||
| 125 | BUG_ON(!mutex_is_locked(&rng_mutex)); | ||
| 126 | if (!current_rng) | ||
| 127 | return; | ||
| 128 | |||
| 129 | /* decrease last reference for triggering the cleanup */ | ||
| 130 | kref_put(¤t_rng->ref, cleanup_rng); | ||
| 131 | current_rng = NULL; | ||
| 132 | } | ||
| 133 | |||
| 134 | /* Returns ERR_PTR(), NULL or refcounted hwrng */ | ||
| 135 | static struct hwrng *get_current_rng(void) | ||
| 136 | { | ||
| 137 | struct hwrng *rng; | ||
| 138 | |||
| 139 | if (mutex_lock_interruptible(&rng_mutex)) | ||
| 140 | return ERR_PTR(-ERESTARTSYS); | ||
| 141 | |||
| 142 | rng = current_rng; | ||
| 143 | if (rng) | ||
| 144 | kref_get(&rng->ref); | ||
| 145 | |||
| 146 | mutex_unlock(&rng_mutex); | ||
| 147 | return rng; | ||
| 148 | } | ||
| 149 | |||
| 150 | static void put_rng(struct hwrng *rng) | ||
| 151 | { | ||
| 152 | /* | ||
| 153 | * Hold rng_mutex here so we serialize in case they set_current_rng | ||
| 154 | * on rng again immediately. | ||
| 155 | */ | ||
| 156 | mutex_lock(&rng_mutex); | ||
| 157 | if (rng) | ||
| 158 | kref_put(&rng->ref, cleanup_rng); | ||
| 159 | mutex_unlock(&rng_mutex); | ||
| 160 | } | ||
| 161 | |||
| 162 | static int hwrng_init(struct hwrng *rng) | ||
| 163 | { | ||
| 164 | if (kref_get_unless_zero(&rng->ref)) | ||
| 165 | goto skip_init; | ||
| 166 | |||
| 91 | if (rng->init) { | 167 | if (rng->init) { |
| 92 | int ret; | 168 | int ret; |
| 93 | 169 | ||
| @@ -95,6 +171,11 @@ static inline int hwrng_init(struct hwrng *rng) | |||
| 95 | if (ret) | 171 | if (ret) |
| 96 | return ret; | 172 | return ret; |
| 97 | } | 173 | } |
| 174 | |||
| 175 | kref_init(&rng->ref); | ||
| 176 | reinit_completion(&rng->cleanup_done); | ||
| 177 | |||
| 178 | skip_init: | ||
| 98 | add_early_randomness(rng); | 179 | add_early_randomness(rng); |
| 99 | 180 | ||
| 100 | current_quality = rng->quality ? : default_quality; | 181 | current_quality = rng->quality ? : default_quality; |
| @@ -108,12 +189,6 @@ static inline int hwrng_init(struct hwrng *rng) | |||
| 108 | return 0; | 189 | return 0; |
| 109 | } | 190 | } |
| 110 | 191 | ||
| 111 | static inline void hwrng_cleanup(struct hwrng *rng) | ||
| 112 | { | ||
| 113 | if (rng && rng->cleanup) | ||
| 114 | rng->cleanup(rng); | ||
| 115 | } | ||
| 116 | |||
| 117 | static int rng_dev_open(struct inode *inode, struct file *filp) | 192 | static int rng_dev_open(struct inode *inode, struct file *filp) |
| 118 | { | 193 | { |
| 119 | /* enforce read-only access to this chrdev */ | 194 | /* enforce read-only access to this chrdev */ |
| @@ -128,6 +203,7 @@ static inline int rng_get_data(struct hwrng *rng, u8 *buffer, size_t size, | |||
| 128 | int wait) { | 203 | int wait) { |
| 129 | int present; | 204 | int present; |
| 130 | 205 | ||
| 206 | BUG_ON(!mutex_is_locked(&reading_mutex)); | ||
| 131 | if (rng->read) | 207 | if (rng->read) |
| 132 | return rng->read(rng, (void *)buffer, size, wait); | 208 | return rng->read(rng, (void *)buffer, size, wait); |
| 133 | 209 | ||
| @@ -148,25 +224,27 @@ static ssize_t rng_dev_read(struct file *filp, char __user *buf, | |||
| 148 | ssize_t ret = 0; | 224 | ssize_t ret = 0; |
| 149 | int err = 0; | 225 | int err = 0; |
| 150 | int bytes_read, len; | 226 | int bytes_read, len; |
| 227 | struct hwrng *rng; | ||
| 151 | 228 | ||
| 152 | while (size) { | 229 | while (size) { |
| 153 | if (mutex_lock_interruptible(&rng_mutex)) { | 230 | rng = get_current_rng(); |
| 154 | err = -ERESTARTSYS; | 231 | if (IS_ERR(rng)) { |
| 232 | err = PTR_ERR(rng); | ||
| 155 | goto out; | 233 | goto out; |
| 156 | } | 234 | } |
| 157 | 235 | if (!rng) { | |
| 158 | if (!current_rng) { | ||
| 159 | err = -ENODEV; | 236 | err = -ENODEV; |
| 160 | goto out_unlock; | 237 | goto out; |
| 161 | } | 238 | } |
| 162 | 239 | ||
| 240 | mutex_lock(&reading_mutex); | ||
| 163 | if (!data_avail) { | 241 | if (!data_avail) { |
| 164 | bytes_read = rng_get_data(current_rng, rng_buffer, | 242 | bytes_read = rng_get_data(rng, rng_buffer, |
| 165 | rng_buffer_size(), | 243 | rng_buffer_size(), |
| 166 | !(filp->f_flags & O_NONBLOCK)); | 244 | !(filp->f_flags & O_NONBLOCK)); |
| 167 | if (bytes_read < 0) { | 245 | if (bytes_read < 0) { |
| 168 | err = bytes_read; | 246 | err = bytes_read; |
| 169 | goto out_unlock; | 247 | goto out_unlock_reading; |
| 170 | } | 248 | } |
| 171 | data_avail = bytes_read; | 249 | data_avail = bytes_read; |
| 172 | } | 250 | } |
| @@ -174,7 +252,7 @@ static ssize_t rng_dev_read(struct file *filp, char __user *buf, | |||
| 174 | if (!data_avail) { | 252 | if (!data_avail) { |
| 175 | if (filp->f_flags & O_NONBLOCK) { | 253 | if (filp->f_flags & O_NONBLOCK) { |
| 176 | err = -EAGAIN; | 254 | err = -EAGAIN; |
| 177 | goto out_unlock; | 255 | goto out_unlock_reading; |
| 178 | } | 256 | } |
| 179 | } else { | 257 | } else { |
| 180 | len = data_avail; | 258 | len = data_avail; |
| @@ -186,14 +264,15 @@ static ssize_t rng_dev_read(struct file *filp, char __user *buf, | |||
| 186 | if (copy_to_user(buf + ret, rng_buffer + data_avail, | 264 | if (copy_to_user(buf + ret, rng_buffer + data_avail, |
| 187 | len)) { | 265 | len)) { |
| 188 | err = -EFAULT; | 266 | err = -EFAULT; |
| 189 | goto out_unlock; | 267 | goto out_unlock_reading; |
| 190 | } | 268 | } |
| 191 | 269 | ||
| 192 | size -= len; | 270 | size -= len; |
| 193 | ret += len; | 271 | ret += len; |
| 194 | } | 272 | } |
| 195 | 273 | ||
| 196 | mutex_unlock(&rng_mutex); | 274 | mutex_unlock(&reading_mutex); |
| 275 | put_rng(rng); | ||
| 197 | 276 | ||
| 198 | if (need_resched()) | 277 | if (need_resched()) |
| 199 | schedule_timeout_interruptible(1); | 278 | schedule_timeout_interruptible(1); |
| @@ -205,8 +284,10 @@ static ssize_t rng_dev_read(struct file *filp, char __user *buf, | |||
| 205 | } | 284 | } |
| 206 | out: | 285 | out: |
| 207 | return ret ? : err; | 286 | return ret ? : err; |
| 208 | out_unlock: | 287 | |
| 209 | mutex_unlock(&rng_mutex); | 288 | out_unlock_reading: |
| 289 | mutex_unlock(&reading_mutex); | ||
| 290 | put_rng(rng); | ||
| 210 | goto out; | 291 | goto out; |
| 211 | } | 292 | } |
| 212 | 293 | ||
| @@ -239,16 +320,9 @@ static ssize_t hwrng_attr_current_store(struct device *dev, | |||
| 239 | err = -ENODEV; | 320 | err = -ENODEV; |
| 240 | list_for_each_entry(rng, &rng_list, list) { | 321 | list_for_each_entry(rng, &rng_list, list) { |
| 241 | if (strcmp(rng->name, buf) == 0) { | 322 | if (strcmp(rng->name, buf) == 0) { |
| 242 | if (rng == current_rng) { | ||
| 243 | err = 0; | ||
| 244 | break; | ||
| 245 | } | ||
| 246 | err = hwrng_init(rng); | ||
| 247 | if (err) | ||
| 248 | break; | ||
| 249 | hwrng_cleanup(current_rng); | ||
| 250 | current_rng = rng; | ||
| 251 | err = 0; | 323 | err = 0; |
| 324 | if (rng != current_rng) | ||
| 325 | err = set_current_rng(rng); | ||
| 252 | break; | 326 | break; |
| 253 | } | 327 | } |
| 254 | } | 328 | } |
| @@ -261,17 +335,15 @@ static ssize_t hwrng_attr_current_show(struct device *dev, | |||
| 261 | struct device_attribute *attr, | 335 | struct device_attribute *attr, |
| 262 | char *buf) | 336 | char *buf) |
| 263 | { | 337 | { |
| 264 | int err; | ||
| 265 | ssize_t ret; | 338 | ssize_t ret; |
| 266 | const char *name = "none"; | 339 | struct hwrng *rng; |
| 267 | 340 | ||
| 268 | err = mutex_lock_interruptible(&rng_mutex); | 341 | rng = get_current_rng(); |
| 269 | if (err) | 342 | if (IS_ERR(rng)) |
| 270 | return -ERESTARTSYS; | 343 | return PTR_ERR(rng); |
| 271 | if (current_rng) | 344 | |
| 272 | name = current_rng->name; | 345 | ret = snprintf(buf, PAGE_SIZE, "%s\n", rng ? rng->name : "none"); |
| 273 | ret = snprintf(buf, PAGE_SIZE, "%s\n", name); | 346 | put_rng(rng); |
| 274 | mutex_unlock(&rng_mutex); | ||
| 275 | 347 | ||
| 276 | return ret; | 348 | return ret; |
| 277 | } | 349 | } |
| @@ -305,14 +377,14 @@ static DEVICE_ATTR(rng_available, S_IRUGO, | |||
| 305 | NULL); | 377 | NULL); |
| 306 | 378 | ||
| 307 | 379 | ||
| 308 | static void unregister_miscdev(void) | 380 | static void __exit unregister_miscdev(void) |
| 309 | { | 381 | { |
| 310 | device_remove_file(rng_miscdev.this_device, &dev_attr_rng_available); | 382 | device_remove_file(rng_miscdev.this_device, &dev_attr_rng_available); |
| 311 | device_remove_file(rng_miscdev.this_device, &dev_attr_rng_current); | 383 | device_remove_file(rng_miscdev.this_device, &dev_attr_rng_current); |
| 312 | misc_deregister(&rng_miscdev); | 384 | misc_deregister(&rng_miscdev); |
| 313 | } | 385 | } |
| 314 | 386 | ||
| 315 | static int register_miscdev(void) | 387 | static int __init register_miscdev(void) |
| 316 | { | 388 | { |
| 317 | int err; | 389 | int err; |
| 318 | 390 | ||
| @@ -342,15 +414,22 @@ static int hwrng_fillfn(void *unused) | |||
| 342 | long rc; | 414 | long rc; |
| 343 | 415 | ||
| 344 | while (!kthread_should_stop()) { | 416 | while (!kthread_should_stop()) { |
| 345 | if (!current_rng) | 417 | struct hwrng *rng; |
| 418 | |||
| 419 | rng = get_current_rng(); | ||
| 420 | if (IS_ERR(rng) || !rng) | ||
| 346 | break; | 421 | break; |
| 347 | rc = rng_get_data(current_rng, rng_fillbuf, | 422 | mutex_lock(&reading_mutex); |
| 423 | rc = rng_get_data(rng, rng_fillbuf, | ||
| 348 | rng_buffer_size(), 1); | 424 | rng_buffer_size(), 1); |
| 425 | mutex_unlock(&reading_mutex); | ||
| 426 | put_rng(rng); | ||
| 349 | if (rc <= 0) { | 427 | if (rc <= 0) { |
| 350 | pr_warn("hwrng: no data available\n"); | 428 | pr_warn("hwrng: no data available\n"); |
| 351 | msleep_interruptible(10000); | 429 | msleep_interruptible(10000); |
| 352 | continue; | 430 | continue; |
| 353 | } | 431 | } |
| 432 | /* Outside lock, sure, but y'know: randomness. */ | ||
| 354 | add_hwgenerator_randomness((void *)rng_fillbuf, rc, | 433 | add_hwgenerator_randomness((void *)rng_fillbuf, rc, |
| 355 | rc * current_quality * 8 >> 10); | 434 | rc * current_quality * 8 >> 10); |
| 356 | } | 435 | } |
| @@ -400,23 +479,16 @@ int hwrng_register(struct hwrng *rng) | |||
| 400 | goto out_unlock; | 479 | goto out_unlock; |
| 401 | } | 480 | } |
| 402 | 481 | ||
| 482 | init_completion(&rng->cleanup_done); | ||
| 483 | complete(&rng->cleanup_done); | ||
| 484 | |||
| 403 | old_rng = current_rng; | 485 | old_rng = current_rng; |
| 404 | if (!old_rng) { | ||
| 405 | err = hwrng_init(rng); | ||
| 406 | if (err) | ||
| 407 | goto out_unlock; | ||
| 408 | current_rng = rng; | ||
| 409 | } | ||
| 410 | err = 0; | 486 | err = 0; |
| 411 | if (!old_rng) { | 487 | if (!old_rng) { |
| 412 | err = register_miscdev(); | 488 | err = set_current_rng(rng); |
| 413 | if (err) { | 489 | if (err) |
| 414 | hwrng_cleanup(rng); | ||
| 415 | current_rng = NULL; | ||
| 416 | goto out_unlock; | 490 | goto out_unlock; |
| 417 | } | ||
| 418 | } | 491 | } |
| 419 | INIT_LIST_HEAD(&rng->list); | ||
| 420 | list_add_tail(&rng->list, &rng_list); | 492 | list_add_tail(&rng->list, &rng_list); |
| 421 | 493 | ||
| 422 | if (old_rng && !rng->init) { | 494 | if (old_rng && !rng->init) { |
| @@ -439,42 +511,49 @@ EXPORT_SYMBOL_GPL(hwrng_register); | |||
| 439 | 511 | ||
| 440 | void hwrng_unregister(struct hwrng *rng) | 512 | void hwrng_unregister(struct hwrng *rng) |
| 441 | { | 513 | { |
| 442 | int err; | ||
| 443 | |||
| 444 | mutex_lock(&rng_mutex); | 514 | mutex_lock(&rng_mutex); |
| 445 | 515 | ||
| 446 | list_del(&rng->list); | 516 | list_del(&rng->list); |
| 447 | if (current_rng == rng) { | 517 | if (current_rng == rng) { |
| 448 | hwrng_cleanup(rng); | 518 | drop_current_rng(); |
| 449 | if (list_empty(&rng_list)) { | 519 | if (!list_empty(&rng_list)) { |
| 450 | current_rng = NULL; | 520 | struct hwrng *tail; |
| 451 | } else { | 521 | |
| 452 | current_rng = list_entry(rng_list.prev, struct hwrng, list); | 522 | tail = list_entry(rng_list.prev, struct hwrng, list); |
| 453 | err = hwrng_init(current_rng); | 523 | |
| 454 | if (err) | 524 | set_current_rng(tail); |
| 455 | current_rng = NULL; | ||
| 456 | } | 525 | } |
| 457 | } | 526 | } |
| 527 | |||
| 458 | if (list_empty(&rng_list)) { | 528 | if (list_empty(&rng_list)) { |
| 459 | unregister_miscdev(); | 529 | mutex_unlock(&rng_mutex); |
| 460 | if (hwrng_fill) | 530 | if (hwrng_fill) |
| 461 | kthread_stop(hwrng_fill); | 531 | kthread_stop(hwrng_fill); |
| 462 | } | 532 | } else |
| 533 | mutex_unlock(&rng_mutex); | ||
| 463 | 534 | ||
| 464 | mutex_unlock(&rng_mutex); | 535 | wait_for_completion(&rng->cleanup_done); |
| 465 | } | 536 | } |
| 466 | EXPORT_SYMBOL_GPL(hwrng_unregister); | 537 | EXPORT_SYMBOL_GPL(hwrng_unregister); |
| 467 | 538 | ||
| 468 | static void __exit hwrng_exit(void) | 539 | static int __init hwrng_modinit(void) |
| 540 | { | ||
| 541 | return register_miscdev(); | ||
| 542 | } | ||
| 543 | |||
| 544 | static void __exit hwrng_modexit(void) | ||
| 469 | { | 545 | { |
| 470 | mutex_lock(&rng_mutex); | 546 | mutex_lock(&rng_mutex); |
| 471 | BUG_ON(current_rng); | 547 | BUG_ON(current_rng); |
| 472 | kfree(rng_buffer); | 548 | kfree(rng_buffer); |
| 473 | kfree(rng_fillbuf); | 549 | kfree(rng_fillbuf); |
| 474 | mutex_unlock(&rng_mutex); | 550 | mutex_unlock(&rng_mutex); |
| 551 | |||
| 552 | unregister_miscdev(); | ||
| 475 | } | 553 | } |
| 476 | 554 | ||
| 477 | module_exit(hwrng_exit); | 555 | module_init(hwrng_modinit); |
| 556 | module_exit(hwrng_modexit); | ||
| 478 | 557 | ||
| 479 | MODULE_DESCRIPTION("H/W Random Number Generator (RNG) driver"); | 558 | MODULE_DESCRIPTION("H/W Random Number Generator (RNG) driver"); |
| 480 | MODULE_LICENSE("GPL"); | 559 | MODULE_LICENSE("GPL"); |
diff --git a/drivers/char/hw_random/virtio-rng.c b/drivers/char/hw_random/virtio-rng.c index 72295ea2fd1c..3fa2f8a009b3 100644 --- a/drivers/char/hw_random/virtio-rng.c +++ b/drivers/char/hw_random/virtio-rng.c | |||
| @@ -39,7 +39,6 @@ struct virtrng_info { | |||
| 39 | bool hwrng_removed; | 39 | bool hwrng_removed; |
| 40 | }; | 40 | }; |
| 41 | 41 | ||
| 42 | |||
| 43 | static void random_recv_done(struct virtqueue *vq) | 42 | static void random_recv_done(struct virtqueue *vq) |
| 44 | { | 43 | { |
| 45 | struct virtrng_info *vi = vq->vdev->priv; | 44 | struct virtrng_info *vi = vq->vdev->priv; |
diff --git a/drivers/crypto/amcc/crypto4xx_sa.c b/drivers/crypto/amcc/crypto4xx_sa.c index de8a7a48775a..69182e2cc3ea 100644 --- a/drivers/crypto/amcc/crypto4xx_sa.c +++ b/drivers/crypto/amcc/crypto4xx_sa.c | |||
| @@ -34,29 +34,6 @@ | |||
| 34 | #include "crypto4xx_sa.h" | 34 | #include "crypto4xx_sa.h" |
| 35 | #include "crypto4xx_core.h" | 35 | #include "crypto4xx_core.h" |
| 36 | 36 | ||
| 37 | u32 get_dynamic_sa_offset_iv_field(struct crypto4xx_ctx *ctx) | ||
| 38 | { | ||
| 39 | u32 offset; | ||
| 40 | union dynamic_sa_contents cts; | ||
| 41 | |||
| 42 | if (ctx->direction == DIR_INBOUND) | ||
| 43 | cts.w = ((struct dynamic_sa_ctl *)(ctx->sa_in))->sa_contents; | ||
| 44 | else | ||
| 45 | cts.w = ((struct dynamic_sa_ctl *)(ctx->sa_out))->sa_contents; | ||
| 46 | offset = cts.bf.key_size | ||
| 47 | + cts.bf.inner_size | ||
| 48 | + cts.bf.outer_size | ||
| 49 | + cts.bf.spi | ||
| 50 | + cts.bf.seq_num0 | ||
| 51 | + cts.bf.seq_num1 | ||
| 52 | + cts.bf.seq_num_mask0 | ||
| 53 | + cts.bf.seq_num_mask1 | ||
| 54 | + cts.bf.seq_num_mask2 | ||
| 55 | + cts.bf.seq_num_mask3; | ||
| 56 | |||
| 57 | return sizeof(struct dynamic_sa_ctl) + offset * 4; | ||
| 58 | } | ||
| 59 | |||
| 60 | u32 get_dynamic_sa_offset_state_ptr_field(struct crypto4xx_ctx *ctx) | 37 | u32 get_dynamic_sa_offset_state_ptr_field(struct crypto4xx_ctx *ctx) |
| 61 | { | 38 | { |
| 62 | u32 offset; | 39 | u32 offset; |
diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c index 53d1c330f8a8..6597aac9905d 100644 --- a/drivers/crypto/atmel-aes.c +++ b/drivers/crypto/atmel-aes.c | |||
| @@ -673,9 +673,9 @@ err_map_out: | |||
| 673 | dma_unmap_single(dd->dev, dd->dma_addr_in, dd->buflen, | 673 | dma_unmap_single(dd->dev, dd->dma_addr_in, dd->buflen, |
| 674 | DMA_TO_DEVICE); | 674 | DMA_TO_DEVICE); |
| 675 | err_map_in: | 675 | err_map_in: |
| 676 | err_alloc: | ||
| 676 | free_page((unsigned long)dd->buf_out); | 677 | free_page((unsigned long)dd->buf_out); |
| 677 | free_page((unsigned long)dd->buf_in); | 678 | free_page((unsigned long)dd->buf_in); |
| 678 | err_alloc: | ||
| 679 | if (err) | 679 | if (err) |
| 680 | pr_err("error: %d\n", err); | 680 | pr_err("error: %d\n", err); |
| 681 | return err; | 681 | return err; |
diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c index d94f07c78e19..34db04addc18 100644 --- a/drivers/crypto/atmel-sha.c +++ b/drivers/crypto/atmel-sha.c | |||
| @@ -102,10 +102,6 @@ struct atmel_sha_ctx { | |||
| 102 | struct atmel_sha_dev *dd; | 102 | struct atmel_sha_dev *dd; |
| 103 | 103 | ||
| 104 | unsigned long flags; | 104 | unsigned long flags; |
| 105 | |||
| 106 | /* fallback stuff */ | ||
| 107 | struct crypto_shash *fallback; | ||
| 108 | |||
| 109 | }; | 105 | }; |
| 110 | 106 | ||
| 111 | #define ATMEL_SHA_QUEUE_LENGTH 50 | 107 | #define ATMEL_SHA_QUEUE_LENGTH 50 |
| @@ -974,19 +970,8 @@ static int atmel_sha_digest(struct ahash_request *req) | |||
| 974 | return atmel_sha_init(req) ?: atmel_sha_finup(req); | 970 | return atmel_sha_init(req) ?: atmel_sha_finup(req); |
| 975 | } | 971 | } |
| 976 | 972 | ||
| 977 | static int atmel_sha_cra_init_alg(struct crypto_tfm *tfm, const char *alg_base) | 973 | static int atmel_sha_cra_init(struct crypto_tfm *tfm) |
| 978 | { | 974 | { |
| 979 | struct atmel_sha_ctx *tctx = crypto_tfm_ctx(tfm); | ||
| 980 | const char *alg_name = crypto_tfm_alg_name(tfm); | ||
| 981 | |||
| 982 | /* Allocate a fallback and abort if it failed. */ | ||
| 983 | tctx->fallback = crypto_alloc_shash(alg_name, 0, | ||
| 984 | CRYPTO_ALG_NEED_FALLBACK); | ||
| 985 | if (IS_ERR(tctx->fallback)) { | ||
| 986 | pr_err("atmel-sha: fallback driver '%s' could not be loaded.\n", | ||
| 987 | alg_name); | ||
| 988 | return PTR_ERR(tctx->fallback); | ||
| 989 | } | ||
| 990 | crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), | 975 | crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), |
| 991 | sizeof(struct atmel_sha_reqctx) + | 976 | sizeof(struct atmel_sha_reqctx) + |
| 992 | SHA_BUFFER_LEN + SHA512_BLOCK_SIZE); | 977 | SHA_BUFFER_LEN + SHA512_BLOCK_SIZE); |
| @@ -994,19 +979,6 @@ static int atmel_sha_cra_init_alg(struct crypto_tfm *tfm, const char *alg_base) | |||
| 994 | return 0; | 979 | return 0; |
| 995 | } | 980 | } |
| 996 | 981 | ||
| 997 | static int atmel_sha_cra_init(struct crypto_tfm *tfm) | ||
| 998 | { | ||
| 999 | return atmel_sha_cra_init_alg(tfm, NULL); | ||
| 1000 | } | ||
| 1001 | |||
| 1002 | static void atmel_sha_cra_exit(struct crypto_tfm *tfm) | ||
| 1003 | { | ||
| 1004 | struct atmel_sha_ctx *tctx = crypto_tfm_ctx(tfm); | ||
| 1005 | |||
| 1006 | crypto_free_shash(tctx->fallback); | ||
| 1007 | tctx->fallback = NULL; | ||
| 1008 | } | ||
| 1009 | |||
| 1010 | static struct ahash_alg sha_1_256_algs[] = { | 982 | static struct ahash_alg sha_1_256_algs[] = { |
| 1011 | { | 983 | { |
| 1012 | .init = atmel_sha_init, | 984 | .init = atmel_sha_init, |
| @@ -1020,14 +992,12 @@ static struct ahash_alg sha_1_256_algs[] = { | |||
| 1020 | .cra_name = "sha1", | 992 | .cra_name = "sha1", |
| 1021 | .cra_driver_name = "atmel-sha1", | 993 | .cra_driver_name = "atmel-sha1", |
| 1022 | .cra_priority = 100, | 994 | .cra_priority = 100, |
| 1023 | .cra_flags = CRYPTO_ALG_ASYNC | | 995 | .cra_flags = CRYPTO_ALG_ASYNC, |
| 1024 | CRYPTO_ALG_NEED_FALLBACK, | ||
| 1025 | .cra_blocksize = SHA1_BLOCK_SIZE, | 996 | .cra_blocksize = SHA1_BLOCK_SIZE, |
| 1026 | .cra_ctxsize = sizeof(struct atmel_sha_ctx), | 997 | .cra_ctxsize = sizeof(struct atmel_sha_ctx), |
| 1027 | .cra_alignmask = 0, | 998 | .cra_alignmask = 0, |
| 1028 | .cra_module = THIS_MODULE, | 999 | .cra_module = THIS_MODULE, |
| 1029 | .cra_init = atmel_sha_cra_init, | 1000 | .cra_init = atmel_sha_cra_init, |
| 1030 | .cra_exit = atmel_sha_cra_exit, | ||
| 1031 | } | 1001 | } |
| 1032 | } | 1002 | } |
| 1033 | }, | 1003 | }, |
| @@ -1043,14 +1013,12 @@ static struct ahash_alg sha_1_256_algs[] = { | |||
| 1043 | .cra_name = "sha256", | 1013 | .cra_name = "sha256", |
| 1044 | .cra_driver_name = "atmel-sha256", | 1014 | .cra_driver_name = "atmel-sha256", |
| 1045 | .cra_priority = 100, | 1015 | .cra_priority = 100, |
| 1046 | .cra_flags = CRYPTO_ALG_ASYNC | | 1016 | .cra_flags = CRYPTO_ALG_ASYNC, |
| 1047 | CRYPTO_ALG_NEED_FALLBACK, | ||
| 1048 | .cra_blocksize = SHA256_BLOCK_SIZE, | 1017 | .cra_blocksize = SHA256_BLOCK_SIZE, |
| 1049 | .cra_ctxsize = sizeof(struct atmel_sha_ctx), | 1018 | .cra_ctxsize = sizeof(struct atmel_sha_ctx), |
| 1050 | .cra_alignmask = 0, | 1019 | .cra_alignmask = 0, |
| 1051 | .cra_module = THIS_MODULE, | 1020 | .cra_module = THIS_MODULE, |
| 1052 | .cra_init = atmel_sha_cra_init, | 1021 | .cra_init = atmel_sha_cra_init, |
| 1053 | .cra_exit = atmel_sha_cra_exit, | ||
| 1054 | } | 1022 | } |
| 1055 | } | 1023 | } |
| 1056 | }, | 1024 | }, |
| @@ -1068,14 +1036,12 @@ static struct ahash_alg sha_224_alg = { | |||
| 1068 | .cra_name = "sha224", | 1036 | .cra_name = "sha224", |
| 1069 | .cra_driver_name = "atmel-sha224", | 1037 | .cra_driver_name = "atmel-sha224", |
| 1070 | .cra_priority = 100, | 1038 | .cra_priority = 100, |
| 1071 | .cra_flags = CRYPTO_ALG_ASYNC | | 1039 | .cra_flags = CRYPTO_ALG_ASYNC, |
| 1072 | CRYPTO_ALG_NEED_FALLBACK, | ||
| 1073 | .cra_blocksize = SHA224_BLOCK_SIZE, | 1040 | .cra_blocksize = SHA224_BLOCK_SIZE, |
| 1074 | .cra_ctxsize = sizeof(struct atmel_sha_ctx), | 1041 | .cra_ctxsize = sizeof(struct atmel_sha_ctx), |
| 1075 | .cra_alignmask = 0, | 1042 | .cra_alignmask = 0, |
| 1076 | .cra_module = THIS_MODULE, | 1043 | .cra_module = THIS_MODULE, |
| 1077 | .cra_init = atmel_sha_cra_init, | 1044 | .cra_init = atmel_sha_cra_init, |
| 1078 | .cra_exit = atmel_sha_cra_exit, | ||
| 1079 | } | 1045 | } |
| 1080 | } | 1046 | } |
| 1081 | }; | 1047 | }; |
| @@ -1093,14 +1059,12 @@ static struct ahash_alg sha_384_512_algs[] = { | |||
| 1093 | .cra_name = "sha384", | 1059 | .cra_name = "sha384", |
| 1094 | .cra_driver_name = "atmel-sha384", | 1060 | .cra_driver_name = "atmel-sha384", |
| 1095 | .cra_priority = 100, | 1061 | .cra_priority = 100, |
| 1096 | .cra_flags = CRYPTO_ALG_ASYNC | | 1062 | .cra_flags = CRYPTO_ALG_ASYNC, |
| 1097 | CRYPTO_ALG_NEED_FALLBACK, | ||
| 1098 | .cra_blocksize = SHA384_BLOCK_SIZE, | 1063 | .cra_blocksize = SHA384_BLOCK_SIZE, |
| 1099 | .cra_ctxsize = sizeof(struct atmel_sha_ctx), | 1064 | .cra_ctxsize = sizeof(struct atmel_sha_ctx), |
| 1100 | .cra_alignmask = 0x3, | 1065 | .cra_alignmask = 0x3, |
| 1101 | .cra_module = THIS_MODULE, | 1066 | .cra_module = THIS_MODULE, |
| 1102 | .cra_init = atmel_sha_cra_init, | 1067 | .cra_init = atmel_sha_cra_init, |
| 1103 | .cra_exit = atmel_sha_cra_exit, | ||
| 1104 | } | 1068 | } |
| 1105 | } | 1069 | } |
| 1106 | }, | 1070 | }, |
| @@ -1116,14 +1080,12 @@ static struct ahash_alg sha_384_512_algs[] = { | |||
| 1116 | .cra_name = "sha512", | 1080 | .cra_name = "sha512", |
| 1117 | .cra_driver_name = "atmel-sha512", | 1081 | .cra_driver_name = "atmel-sha512", |
| 1118 | .cra_priority = 100, | 1082 | .cra_priority = 100, |
| 1119 | .cra_flags = CRYPTO_ALG_ASYNC | | 1083 | .cra_flags = CRYPTO_ALG_ASYNC, |
| 1120 | CRYPTO_ALG_NEED_FALLBACK, | ||
| 1121 | .cra_blocksize = SHA512_BLOCK_SIZE, | 1084 | .cra_blocksize = SHA512_BLOCK_SIZE, |
| 1122 | .cra_ctxsize = sizeof(struct atmel_sha_ctx), | 1085 | .cra_ctxsize = sizeof(struct atmel_sha_ctx), |
| 1123 | .cra_alignmask = 0x3, | 1086 | .cra_alignmask = 0x3, |
| 1124 | .cra_module = THIS_MODULE, | 1087 | .cra_module = THIS_MODULE, |
| 1125 | .cra_init = atmel_sha_cra_init, | 1088 | .cra_init = atmel_sha_cra_init, |
| 1126 | .cra_exit = atmel_sha_cra_exit, | ||
| 1127 | } | 1089 | } |
| 1128 | } | 1090 | } |
| 1129 | }, | 1091 | }, |
diff --git a/drivers/crypto/atmel-tdes.c b/drivers/crypto/atmel-tdes.c index 5e7c896cde30..258772d9b22f 100644 --- a/drivers/crypto/atmel-tdes.c +++ b/drivers/crypto/atmel-tdes.c | |||
| @@ -376,9 +376,9 @@ err_map_out: | |||
| 376 | dma_unmap_single(dd->dev, dd->dma_addr_in, dd->buflen, | 376 | dma_unmap_single(dd->dev, dd->dma_addr_in, dd->buflen, |
| 377 | DMA_TO_DEVICE); | 377 | DMA_TO_DEVICE); |
| 378 | err_map_in: | 378 | err_map_in: |
| 379 | err_alloc: | ||
| 379 | free_page((unsigned long)dd->buf_out); | 380 | free_page((unsigned long)dd->buf_out); |
| 380 | free_page((unsigned long)dd->buf_in); | 381 | free_page((unsigned long)dd->buf_in); |
| 381 | err_alloc: | ||
| 382 | if (err) | 382 | if (err) |
| 383 | pr_err("error: %d\n", err); | 383 | pr_err("error: %d\n", err); |
| 384 | return err; | 384 | return err; |
diff --git a/drivers/crypto/bfin_crc.c b/drivers/crypto/bfin_crc.c index 9ae149bddb6e..d9af9403ab6c 100644 --- a/drivers/crypto/bfin_crc.c +++ b/drivers/crypto/bfin_crc.c | |||
| @@ -110,7 +110,7 @@ static int sg_count(struct scatterlist *sg_list) | |||
| 110 | 110 | ||
| 111 | while (!sg_is_last(sg)) { | 111 | while (!sg_is_last(sg)) { |
| 112 | sg_nents++; | 112 | sg_nents++; |
| 113 | sg = scatterwalk_sg_next(sg); | 113 | sg = sg_next(sg); |
| 114 | } | 114 | } |
| 115 | 115 | ||
| 116 | return sg_nents; | 116 | return sg_nents; |
| @@ -744,7 +744,7 @@ static int __init bfin_crypto_crc_mod_init(void) | |||
| 744 | 744 | ||
| 745 | ret = platform_driver_register(&bfin_crypto_crc_driver); | 745 | ret = platform_driver_register(&bfin_crypto_crc_driver); |
| 746 | if (ret) { | 746 | if (ret) { |
| 747 | pr_info(KERN_ERR "unable to register driver\n"); | 747 | pr_err("unable to register driver\n"); |
| 748 | return ret; | 748 | return ret; |
| 749 | } | 749 | } |
| 750 | 750 | ||
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c index 3187400daf31..29071a156cbe 100644 --- a/drivers/crypto/caam/caamalg.c +++ b/drivers/crypto/caam/caamalg.c | |||
| @@ -2532,7 +2532,7 @@ static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr, | |||
| 2532 | in_options = 0; | 2532 | in_options = 0; |
| 2533 | } else { | 2533 | } else { |
| 2534 | src_dma = edesc->sec4_sg_dma; | 2534 | src_dma = edesc->sec4_sg_dma; |
| 2535 | sec4_sg_index += (iv_contig ? 0 : 1) + edesc->src_nents; | 2535 | sec4_sg_index += edesc->src_nents + 1; |
| 2536 | in_options = LDST_SGF; | 2536 | in_options = LDST_SGF; |
| 2537 | } | 2537 | } |
| 2538 | append_seq_in_ptr(desc, src_dma, req->nbytes + ivsize, in_options); | 2538 | append_seq_in_ptr(desc, src_dma, req->nbytes + ivsize, in_options); |
| @@ -2714,10 +2714,10 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, | |||
| 2714 | if (!all_contig) { | 2714 | if (!all_contig) { |
| 2715 | if (!is_gcm) { | 2715 | if (!is_gcm) { |
| 2716 | sg_to_sec4_sg(req->assoc, | 2716 | sg_to_sec4_sg(req->assoc, |
| 2717 | (assoc_nents ? : 1), | 2717 | assoc_nents, |
| 2718 | edesc->sec4_sg + | 2718 | edesc->sec4_sg + |
| 2719 | sec4_sg_index, 0); | 2719 | sec4_sg_index, 0); |
| 2720 | sec4_sg_index += assoc_nents ? : 1; | 2720 | sec4_sg_index += assoc_nents; |
| 2721 | } | 2721 | } |
| 2722 | 2722 | ||
| 2723 | dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index, | 2723 | dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index, |
| @@ -2726,17 +2726,17 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, | |||
| 2726 | 2726 | ||
| 2727 | if (is_gcm) { | 2727 | if (is_gcm) { |
| 2728 | sg_to_sec4_sg(req->assoc, | 2728 | sg_to_sec4_sg(req->assoc, |
| 2729 | (assoc_nents ? : 1), | 2729 | assoc_nents, |
| 2730 | edesc->sec4_sg + | 2730 | edesc->sec4_sg + |
| 2731 | sec4_sg_index, 0); | 2731 | sec4_sg_index, 0); |
| 2732 | sec4_sg_index += assoc_nents ? : 1; | 2732 | sec4_sg_index += assoc_nents; |
| 2733 | } | 2733 | } |
| 2734 | 2734 | ||
| 2735 | sg_to_sec4_sg_last(req->src, | 2735 | sg_to_sec4_sg_last(req->src, |
| 2736 | (src_nents ? : 1), | 2736 | src_nents, |
| 2737 | edesc->sec4_sg + | 2737 | edesc->sec4_sg + |
| 2738 | sec4_sg_index, 0); | 2738 | sec4_sg_index, 0); |
| 2739 | sec4_sg_index += src_nents ? : 1; | 2739 | sec4_sg_index += src_nents; |
| 2740 | } | 2740 | } |
| 2741 | if (dst_nents) { | 2741 | if (dst_nents) { |
| 2742 | sg_to_sec4_sg_last(req->dst, dst_nents, | 2742 | sg_to_sec4_sg_last(req->dst, dst_nents, |
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c index 70f1e6f37336..efba4ccd4fac 100644 --- a/drivers/crypto/caam/ctrl.c +++ b/drivers/crypto/caam/ctrl.c | |||
| @@ -175,13 +175,10 @@ static int instantiate_rng(struct device *ctrldev, int state_handle_mask, | |||
| 175 | { | 175 | { |
| 176 | struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev); | 176 | struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev); |
| 177 | struct caam_ctrl __iomem *ctrl; | 177 | struct caam_ctrl __iomem *ctrl; |
| 178 | struct rng4tst __iomem *r4tst; | ||
| 179 | u32 *desc, status, rdsta_val; | 178 | u32 *desc, status, rdsta_val; |
| 180 | int ret = 0, sh_idx; | 179 | int ret = 0, sh_idx; |
| 181 | 180 | ||
| 182 | ctrl = (struct caam_ctrl __iomem *)ctrlpriv->ctrl; | 181 | ctrl = (struct caam_ctrl __iomem *)ctrlpriv->ctrl; |
| 183 | r4tst = &ctrl->r4tst[0]; | ||
| 184 | |||
| 185 | desc = kmalloc(CAAM_CMD_SZ * 7, GFP_KERNEL); | 182 | desc = kmalloc(CAAM_CMD_SZ * 7, GFP_KERNEL); |
| 186 | if (!desc) | 183 | if (!desc) |
| 187 | return -ENOMEM; | 184 | return -ENOMEM; |
| @@ -209,8 +206,7 @@ static int instantiate_rng(struct device *ctrldev, int state_handle_mask, | |||
| 209 | * without any error (HW optimizations for later | 206 | * without any error (HW optimizations for later |
| 210 | * CAAM eras), then try again. | 207 | * CAAM eras), then try again. |
| 211 | */ | 208 | */ |
| 212 | rdsta_val = | 209 | rdsta_val = rd_reg32(&ctrl->r4tst[0].rdsta) & RDSTA_IFMASK; |
| 213 | rd_reg32(&ctrl->r4tst[0].rdsta) & RDSTA_IFMASK; | ||
| 214 | if (status || !(rdsta_val & (1 << sh_idx))) | 210 | if (status || !(rdsta_val & (1 << sh_idx))) |
| 215 | ret = -EAGAIN; | 211 | ret = -EAGAIN; |
| 216 | if (ret) | 212 | if (ret) |
diff --git a/drivers/crypto/caam/error.c b/drivers/crypto/caam/error.c index 66d73bf54166..33e41ea83fcc 100644 --- a/drivers/crypto/caam/error.c +++ b/drivers/crypto/caam/error.c | |||
| @@ -151,10 +151,15 @@ static void report_ccb_status(struct device *jrdev, const u32 status, | |||
| 151 | else | 151 | else |
| 152 | snprintf(err_err_code, sizeof(err_err_code), "%02x", err_id); | 152 | snprintf(err_err_code, sizeof(err_err_code), "%02x", err_id); |
| 153 | 153 | ||
| 154 | dev_err(jrdev, "%08x: %s: %s %d: %s%s: %s%s\n", | 154 | /* |
| 155 | status, error, idx_str, idx, | 155 | * CCB ICV check failures are part of normal operation life; |
| 156 | cha_str, cha_err_code, | 156 | * we leave the upper layers to do what they want with them. |
| 157 | err_str, err_err_code); | 157 | */ |
| 158 | if (err_id != JRSTA_CCBERR_ERRID_ICVCHK) | ||
| 159 | dev_err(jrdev, "%08x: %s: %s %d: %s%s: %s%s\n", | ||
| 160 | status, error, idx_str, idx, | ||
| 161 | cha_str, cha_err_code, | ||
| 162 | err_str, err_err_code); | ||
| 158 | } | 163 | } |
| 159 | 164 | ||
| 160 | static void report_jump_status(struct device *jrdev, const u32 status, | 165 | static void report_jump_status(struct device *jrdev, const u32 status, |
diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c index 9b3ef1bc9bd7..b8b5d47acd7a 100644 --- a/drivers/crypto/caam/jr.c +++ b/drivers/crypto/caam/jr.c | |||
| @@ -384,30 +384,28 @@ static int caam_jr_init(struct device *dev) | |||
| 384 | if (error) { | 384 | if (error) { |
| 385 | dev_err(dev, "can't connect JobR %d interrupt (%d)\n", | 385 | dev_err(dev, "can't connect JobR %d interrupt (%d)\n", |
| 386 | jrp->ridx, jrp->irq); | 386 | jrp->ridx, jrp->irq); |
| 387 | irq_dispose_mapping(jrp->irq); | 387 | goto out_kill_deq; |
| 388 | jrp->irq = 0; | ||
| 389 | return -EINVAL; | ||
| 390 | } | 388 | } |
| 391 | 389 | ||
| 392 | error = caam_reset_hw_jr(dev); | 390 | error = caam_reset_hw_jr(dev); |
| 393 | if (error) | 391 | if (error) |
| 394 | return error; | 392 | goto out_free_irq; |
| 395 | 393 | ||
| 394 | error = -ENOMEM; | ||
| 396 | jrp->inpring = dma_alloc_coherent(dev, sizeof(dma_addr_t) * JOBR_DEPTH, | 395 | jrp->inpring = dma_alloc_coherent(dev, sizeof(dma_addr_t) * JOBR_DEPTH, |
| 397 | &inpbusaddr, GFP_KERNEL); | 396 | &inpbusaddr, GFP_KERNEL); |
| 397 | if (!jrp->inpring) | ||
| 398 | goto out_free_irq; | ||
| 398 | 399 | ||
| 399 | jrp->outring = dma_alloc_coherent(dev, sizeof(struct jr_outentry) * | 400 | jrp->outring = dma_alloc_coherent(dev, sizeof(struct jr_outentry) * |
| 400 | JOBR_DEPTH, &outbusaddr, GFP_KERNEL); | 401 | JOBR_DEPTH, &outbusaddr, GFP_KERNEL); |
| 402 | if (!jrp->outring) | ||
| 403 | goto out_free_inpring; | ||
| 401 | 404 | ||
| 402 | jrp->entinfo = kzalloc(sizeof(struct caam_jrentry_info) * JOBR_DEPTH, | 405 | jrp->entinfo = kzalloc(sizeof(struct caam_jrentry_info) * JOBR_DEPTH, |
| 403 | GFP_KERNEL); | 406 | GFP_KERNEL); |
| 404 | 407 | if (!jrp->entinfo) | |
| 405 | if ((jrp->inpring == NULL) || (jrp->outring == NULL) || | 408 | goto out_free_outring; |
| 406 | (jrp->entinfo == NULL)) { | ||
| 407 | dev_err(dev, "can't allocate job rings for %d\n", | ||
| 408 | jrp->ridx); | ||
| 409 | return -ENOMEM; | ||
| 410 | } | ||
| 411 | 409 | ||
| 412 | for (i = 0; i < JOBR_DEPTH; i++) | 410 | for (i = 0; i < JOBR_DEPTH; i++) |
| 413 | jrp->entinfo[i].desc_addr_dma = !0; | 411 | jrp->entinfo[i].desc_addr_dma = !0; |
| @@ -434,6 +432,19 @@ static int caam_jr_init(struct device *dev) | |||
| 434 | (JOBR_INTC_TIME_THLD << JRCFG_ICTT_SHIFT)); | 432 | (JOBR_INTC_TIME_THLD << JRCFG_ICTT_SHIFT)); |
| 435 | 433 | ||
| 436 | return 0; | 434 | return 0; |
| 435 | |||
| 436 | out_free_outring: | ||
| 437 | dma_free_coherent(dev, sizeof(struct jr_outentry) * JOBR_DEPTH, | ||
| 438 | jrp->outring, outbusaddr); | ||
| 439 | out_free_inpring: | ||
| 440 | dma_free_coherent(dev, sizeof(dma_addr_t) * JOBR_DEPTH, | ||
| 441 | jrp->inpring, inpbusaddr); | ||
| 442 | dev_err(dev, "can't allocate job rings for %d\n", jrp->ridx); | ||
| 443 | out_free_irq: | ||
| 444 | free_irq(jrp->irq, dev); | ||
| 445 | out_kill_deq: | ||
| 446 | tasklet_kill(&jrp->irqtask); | ||
| 447 | return error; | ||
| 437 | } | 448 | } |
| 438 | 449 | ||
| 439 | 450 | ||
| @@ -484,8 +495,10 @@ static int caam_jr_probe(struct platform_device *pdev) | |||
| 484 | 495 | ||
| 485 | /* Now do the platform independent part */ | 496 | /* Now do the platform independent part */ |
| 486 | error = caam_jr_init(jrdev); /* now turn on hardware */ | 497 | error = caam_jr_init(jrdev); /* now turn on hardware */ |
| 487 | if (error) | 498 | if (error) { |
| 499 | irq_dispose_mapping(jrpriv->irq); | ||
| 488 | return error; | 500 | return error; |
| 501 | } | ||
| 489 | 502 | ||
| 490 | jrpriv->dev = jrdev; | 503 | jrpriv->dev = jrdev; |
| 491 | spin_lock(&driver_data.jr_alloc_lock); | 504 | spin_lock(&driver_data.jr_alloc_lock); |
diff --git a/drivers/crypto/caam/sg_sw_sec4.h b/drivers/crypto/caam/sg_sw_sec4.h index ce28a563effc..3b918218aa4c 100644 --- a/drivers/crypto/caam/sg_sw_sec4.h +++ b/drivers/crypto/caam/sg_sw_sec4.h | |||
| @@ -37,7 +37,7 @@ sg_to_sec4_sg(struct scatterlist *sg, int sg_count, | |||
| 37 | dma_to_sec4_sg_one(sec4_sg_ptr, sg_dma_address(sg), | 37 | dma_to_sec4_sg_one(sec4_sg_ptr, sg_dma_address(sg), |
| 38 | sg_dma_len(sg), offset); | 38 | sg_dma_len(sg), offset); |
| 39 | sec4_sg_ptr++; | 39 | sec4_sg_ptr++; |
| 40 | sg = scatterwalk_sg_next(sg); | 40 | sg = sg_next(sg); |
| 41 | sg_count--; | 41 | sg_count--; |
| 42 | } | 42 | } |
| 43 | return sec4_sg_ptr - 1; | 43 | return sec4_sg_ptr - 1; |
| @@ -67,7 +67,7 @@ static inline int __sg_count(struct scatterlist *sg_list, int nbytes, | |||
| 67 | nbytes -= sg->length; | 67 | nbytes -= sg->length; |
| 68 | if (!sg_is_last(sg) && (sg + 1)->length == 0) | 68 | if (!sg_is_last(sg) && (sg + 1)->length == 0) |
| 69 | *chained = true; | 69 | *chained = true; |
| 70 | sg = scatterwalk_sg_next(sg); | 70 | sg = sg_next(sg); |
| 71 | } | 71 | } |
| 72 | 72 | ||
| 73 | return sg_nents; | 73 | return sg_nents; |
| @@ -93,7 +93,7 @@ static int dma_map_sg_chained(struct device *dev, struct scatterlist *sg, | |||
| 93 | int i; | 93 | int i; |
| 94 | for (i = 0; i < nents; i++) { | 94 | for (i = 0; i < nents; i++) { |
| 95 | dma_map_sg(dev, sg, 1, dir); | 95 | dma_map_sg(dev, sg, 1, dir); |
| 96 | sg = scatterwalk_sg_next(sg); | 96 | sg = sg_next(sg); |
| 97 | } | 97 | } |
| 98 | } else { | 98 | } else { |
| 99 | dma_map_sg(dev, sg, nents, dir); | 99 | dma_map_sg(dev, sg, nents, dir); |
| @@ -109,7 +109,7 @@ static int dma_unmap_sg_chained(struct device *dev, struct scatterlist *sg, | |||
| 109 | int i; | 109 | int i; |
| 110 | for (i = 0; i < nents; i++) { | 110 | for (i = 0; i < nents; i++) { |
| 111 | dma_unmap_sg(dev, sg, 1, dir); | 111 | dma_unmap_sg(dev, sg, 1, dir); |
| 112 | sg = scatterwalk_sg_next(sg); | 112 | sg = sg_next(sg); |
| 113 | } | 113 | } |
| 114 | } else { | 114 | } else { |
| 115 | dma_unmap_sg(dev, sg, nents, dir); | 115 | dma_unmap_sg(dev, sg, nents, dir); |
diff --git a/drivers/crypto/ccp/ccp-dev.c b/drivers/crypto/ccp/ccp-dev.c index c6e6171eb6d3..ca29c120b85f 100644 --- a/drivers/crypto/ccp/ccp-dev.c +++ b/drivers/crypto/ccp/ccp-dev.c | |||
| @@ -583,6 +583,7 @@ bool ccp_queues_suspended(struct ccp_device *ccp) | |||
| 583 | #ifdef CONFIG_X86 | 583 | #ifdef CONFIG_X86 |
| 584 | static const struct x86_cpu_id ccp_support[] = { | 584 | static const struct x86_cpu_id ccp_support[] = { |
| 585 | { X86_VENDOR_AMD, 22, }, | 585 | { X86_VENDOR_AMD, 22, }, |
| 586 | { }, | ||
| 586 | }; | 587 | }; |
| 587 | #endif | 588 | #endif |
| 588 | 589 | ||
diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c index f757a0f428bd..48f453555f1f 100644 --- a/drivers/crypto/ixp4xx_crypto.c +++ b/drivers/crypto/ixp4xx_crypto.c | |||
| @@ -784,7 +784,7 @@ static struct buffer_desc *chainup_buffers(struct device *dev, | |||
| 784 | struct buffer_desc *buf, gfp_t flags, | 784 | struct buffer_desc *buf, gfp_t flags, |
| 785 | enum dma_data_direction dir) | 785 | enum dma_data_direction dir) |
| 786 | { | 786 | { |
| 787 | for (;nbytes > 0; sg = scatterwalk_sg_next(sg)) { | 787 | for (; nbytes > 0; sg = sg_next(sg)) { |
| 788 | unsigned len = min(nbytes, sg->length); | 788 | unsigned len = min(nbytes, sg->length); |
| 789 | struct buffer_desc *next_buf; | 789 | struct buffer_desc *next_buf; |
| 790 | u32 next_buf_phys; | 790 | u32 next_buf_phys; |
| @@ -982,7 +982,7 @@ static int hmac_inconsistent(struct scatterlist *sg, unsigned start, | |||
| 982 | break; | 982 | break; |
| 983 | 983 | ||
| 984 | offset += sg->length; | 984 | offset += sg->length; |
| 985 | sg = scatterwalk_sg_next(sg); | 985 | sg = sg_next(sg); |
| 986 | } | 986 | } |
| 987 | return (start + nbytes > offset + sg->length); | 987 | return (start + nbytes > offset + sg->length); |
| 988 | } | 988 | } |
diff --git a/drivers/crypto/nx/nx.c b/drivers/crypto/nx/nx.c index a392465d3e3f..1da6dc59d0dd 100644 --- a/drivers/crypto/nx/nx.c +++ b/drivers/crypto/nx/nx.c | |||
| @@ -177,7 +177,7 @@ struct nx_sg *nx_walk_and_build(struct nx_sg *nx_dst, | |||
| 177 | break; | 177 | break; |
| 178 | 178 | ||
| 179 | offset += sg_src->length; | 179 | offset += sg_src->length; |
| 180 | sg_src = scatterwalk_sg_next(sg_src); | 180 | sg_src = sg_next(sg_src); |
| 181 | } | 181 | } |
| 182 | 182 | ||
| 183 | /* start - offset is the number of bytes to advance in the scatterlist | 183 | /* start - offset is the number of bytes to advance in the scatterlist |
| @@ -187,9 +187,9 @@ struct nx_sg *nx_walk_and_build(struct nx_sg *nx_dst, | |||
| 187 | while (len && (nx_sg - nx_dst) < sglen) { | 187 | while (len && (nx_sg - nx_dst) < sglen) { |
| 188 | n = scatterwalk_clamp(&walk, len); | 188 | n = scatterwalk_clamp(&walk, len); |
| 189 | if (!n) { | 189 | if (!n) { |
| 190 | /* In cases where we have scatterlist chain scatterwalk_sg_next | 190 | /* In cases where we have scatterlist chain sg_next |
| 191 | * handles with it properly */ | 191 | * handles with it properly */ |
| 192 | scatterwalk_start(&walk, scatterwalk_sg_next(walk.sg)); | 192 | scatterwalk_start(&walk, sg_next(walk.sg)); |
| 193 | n = scatterwalk_clamp(&walk, len); | 193 | n = scatterwalk_clamp(&walk, len); |
| 194 | } | 194 | } |
| 195 | dst = scatterwalk_map(&walk); | 195 | dst = scatterwalk_map(&walk); |
diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c index f79dd410dede..42f95a4326b0 100644 --- a/drivers/crypto/omap-aes.c +++ b/drivers/crypto/omap-aes.c | |||
| @@ -994,7 +994,7 @@ static irqreturn_t omap_aes_irq(int irq, void *dev_id) | |||
| 994 | 994 | ||
| 995 | scatterwalk_advance(&dd->in_walk, 4); | 995 | scatterwalk_advance(&dd->in_walk, 4); |
| 996 | if (dd->in_sg->length == _calc_walked(in)) { | 996 | if (dd->in_sg->length == _calc_walked(in)) { |
| 997 | dd->in_sg = scatterwalk_sg_next(dd->in_sg); | 997 | dd->in_sg = sg_next(dd->in_sg); |
| 998 | if (dd->in_sg) { | 998 | if (dd->in_sg) { |
| 999 | scatterwalk_start(&dd->in_walk, | 999 | scatterwalk_start(&dd->in_walk, |
| 1000 | dd->in_sg); | 1000 | dd->in_sg); |
| @@ -1026,7 +1026,7 @@ static irqreturn_t omap_aes_irq(int irq, void *dev_id) | |||
| 1026 | *dst = omap_aes_read(dd, AES_REG_DATA_N(dd, i)); | 1026 | *dst = omap_aes_read(dd, AES_REG_DATA_N(dd, i)); |
| 1027 | scatterwalk_advance(&dd->out_walk, 4); | 1027 | scatterwalk_advance(&dd->out_walk, 4); |
| 1028 | if (dd->out_sg->length == _calc_walked(out)) { | 1028 | if (dd->out_sg->length == _calc_walked(out)) { |
| 1029 | dd->out_sg = scatterwalk_sg_next(dd->out_sg); | 1029 | dd->out_sg = sg_next(dd->out_sg); |
| 1030 | if (dd->out_sg) { | 1030 | if (dd->out_sg) { |
| 1031 | scatterwalk_start(&dd->out_walk, | 1031 | scatterwalk_start(&dd->out_walk, |
| 1032 | dd->out_sg); | 1032 | dd->out_sg); |
diff --git a/drivers/crypto/omap-des.c b/drivers/crypto/omap-des.c index e350f5be4d2e..46307098f8ba 100644 --- a/drivers/crypto/omap-des.c +++ b/drivers/crypto/omap-des.c | |||
| @@ -921,7 +921,7 @@ static irqreturn_t omap_des_irq(int irq, void *dev_id) | |||
| 921 | 921 | ||
| 922 | scatterwalk_advance(&dd->in_walk, 4); | 922 | scatterwalk_advance(&dd->in_walk, 4); |
| 923 | if (dd->in_sg->length == _calc_walked(in)) { | 923 | if (dd->in_sg->length == _calc_walked(in)) { |
| 924 | dd->in_sg = scatterwalk_sg_next(dd->in_sg); | 924 | dd->in_sg = sg_next(dd->in_sg); |
| 925 | if (dd->in_sg) { | 925 | if (dd->in_sg) { |
| 926 | scatterwalk_start(&dd->in_walk, | 926 | scatterwalk_start(&dd->in_walk, |
| 927 | dd->in_sg); | 927 | dd->in_sg); |
| @@ -953,7 +953,7 @@ static irqreturn_t omap_des_irq(int irq, void *dev_id) | |||
| 953 | *dst = omap_des_read(dd, DES_REG_DATA_N(dd, i)); | 953 | *dst = omap_des_read(dd, DES_REG_DATA_N(dd, i)); |
| 954 | scatterwalk_advance(&dd->out_walk, 4); | 954 | scatterwalk_advance(&dd->out_walk, 4); |
| 955 | if (dd->out_sg->length == _calc_walked(out)) { | 955 | if (dd->out_sg->length == _calc_walked(out)) { |
| 956 | dd->out_sg = scatterwalk_sg_next(dd->out_sg); | 956 | dd->out_sg = sg_next(dd->out_sg); |
| 957 | if (dd->out_sg) { | 957 | if (dd->out_sg) { |
| 958 | scatterwalk_start(&dd->out_walk, | 958 | scatterwalk_start(&dd->out_walk, |
| 959 | dd->out_sg); | 959 | dd->out_sg); |
| @@ -965,9 +965,9 @@ static irqreturn_t omap_des_irq(int irq, void *dev_id) | |||
| 965 | } | 965 | } |
| 966 | } | 966 | } |
| 967 | 967 | ||
| 968 | dd->total -= DES_BLOCK_SIZE; | 968 | BUG_ON(dd->total < DES_BLOCK_SIZE); |
| 969 | 969 | ||
| 970 | BUG_ON(dd->total < 0); | 970 | dd->total -= DES_BLOCK_SIZE; |
| 971 | 971 | ||
| 972 | /* Clear IRQ status */ | 972 | /* Clear IRQ status */ |
| 973 | status &= ~DES_REG_IRQ_DATA_OUT; | 973 | status &= ~DES_REG_IRQ_DATA_OUT; |
diff --git a/drivers/crypto/qat/qat_common/adf_accel_devices.h b/drivers/crypto/qat/qat_common/adf_accel_devices.h index 2ed425664a16..19c0efa29ab3 100644 --- a/drivers/crypto/qat/qat_common/adf_accel_devices.h +++ b/drivers/crypto/qat/qat_common/adf_accel_devices.h | |||
| @@ -47,7 +47,6 @@ | |||
| 47 | #ifndef ADF_ACCEL_DEVICES_H_ | 47 | #ifndef ADF_ACCEL_DEVICES_H_ |
| 48 | #define ADF_ACCEL_DEVICES_H_ | 48 | #define ADF_ACCEL_DEVICES_H_ |
| 49 | #include <linux/module.h> | 49 | #include <linux/module.h> |
| 50 | #include <linux/atomic.h> | ||
| 51 | #include <linux/list.h> | 50 | #include <linux/list.h> |
| 52 | #include <linux/proc_fs.h> | 51 | #include <linux/proc_fs.h> |
| 53 | #include <linux/io.h> | 52 | #include <linux/io.h> |
| @@ -148,6 +147,11 @@ struct adf_hw_device_data { | |||
| 148 | int (*alloc_irq)(struct adf_accel_dev *accel_dev); | 147 | int (*alloc_irq)(struct adf_accel_dev *accel_dev); |
| 149 | void (*free_irq)(struct adf_accel_dev *accel_dev); | 148 | void (*free_irq)(struct adf_accel_dev *accel_dev); |
| 150 | void (*enable_error_correction)(struct adf_accel_dev *accel_dev); | 149 | void (*enable_error_correction)(struct adf_accel_dev *accel_dev); |
| 150 | int (*init_admin_comms)(struct adf_accel_dev *accel_dev); | ||
| 151 | void (*exit_admin_comms)(struct adf_accel_dev *accel_dev); | ||
| 152 | int (*init_arb)(struct adf_accel_dev *accel_dev); | ||
| 153 | void (*exit_arb)(struct adf_accel_dev *accel_dev); | ||
| 154 | void (*enable_ints)(struct adf_accel_dev *accel_dev); | ||
| 151 | const char *fw_name; | 155 | const char *fw_name; |
| 152 | uint32_t pci_dev_id; | 156 | uint32_t pci_dev_id; |
| 153 | uint32_t fuses; | 157 | uint32_t fuses; |
diff --git a/drivers/crypto/qat/qat_common/adf_aer.c b/drivers/crypto/qat/qat_common/adf_aer.c index 10ce4a2854ab..fa1fef824de2 100644 --- a/drivers/crypto/qat/qat_common/adf_aer.c +++ b/drivers/crypto/qat/qat_common/adf_aer.c | |||
| @@ -82,28 +82,15 @@ struct adf_reset_dev_data { | |||
| 82 | struct work_struct reset_work; | 82 | struct work_struct reset_work; |
| 83 | }; | 83 | }; |
| 84 | 84 | ||
| 85 | #define PPDSTAT_OFFSET 0x7E | ||
| 86 | static void adf_dev_restore(struct adf_accel_dev *accel_dev) | 85 | static void adf_dev_restore(struct adf_accel_dev *accel_dev) |
| 87 | { | 86 | { |
| 88 | struct pci_dev *pdev = accel_to_pci_dev(accel_dev); | 87 | struct pci_dev *pdev = accel_to_pci_dev(accel_dev); |
| 89 | struct pci_dev *parent = pdev->bus->self; | 88 | struct pci_dev *parent = pdev->bus->self; |
| 90 | uint16_t ppdstat = 0, bridge_ctl = 0; | 89 | uint16_t bridge_ctl = 0; |
| 91 | int pending = 0; | ||
| 92 | 90 | ||
| 93 | pr_info("QAT: Resetting device qat_dev%d\n", accel_dev->accel_id); | 91 | pr_info("QAT: Resetting device qat_dev%d\n", accel_dev->accel_id); |
| 94 | pci_read_config_word(pdev, PPDSTAT_OFFSET, &ppdstat); | ||
| 95 | pending = ppdstat & PCI_EXP_DEVSTA_TRPND; | ||
| 96 | if (pending) { | ||
| 97 | int ctr = 0; | ||
| 98 | |||
| 99 | do { | ||
| 100 | msleep(100); | ||
| 101 | pci_read_config_word(pdev, PPDSTAT_OFFSET, &ppdstat); | ||
| 102 | pending = ppdstat & PCI_EXP_DEVSTA_TRPND; | ||
| 103 | } while (pending && ctr++ < 10); | ||
| 104 | } | ||
| 105 | 92 | ||
| 106 | if (pending) | 93 | if (!pci_wait_for_pending_transaction(pdev)) |
| 107 | pr_info("QAT: Transaction still in progress. Proceeding\n"); | 94 | pr_info("QAT: Transaction still in progress. Proceeding\n"); |
| 108 | 95 | ||
| 109 | pci_read_config_word(parent, PCI_BRIDGE_CONTROL, &bridge_ctl); | 96 | pci_read_config_word(parent, PCI_BRIDGE_CONTROL, &bridge_ctl); |
| @@ -125,8 +112,9 @@ static void adf_device_reset_worker(struct work_struct *work) | |||
| 125 | 112 | ||
| 126 | adf_dev_restarting_notify(accel_dev); | 113 | adf_dev_restarting_notify(accel_dev); |
| 127 | adf_dev_stop(accel_dev); | 114 | adf_dev_stop(accel_dev); |
| 115 | adf_dev_shutdown(accel_dev); | ||
| 128 | adf_dev_restore(accel_dev); | 116 | adf_dev_restore(accel_dev); |
| 129 | if (adf_dev_start(accel_dev)) { | 117 | if (adf_dev_init(accel_dev) || adf_dev_start(accel_dev)) { |
| 130 | /* The device hanged and we can't restart it so stop here */ | 118 | /* The device hanged and we can't restart it so stop here */ |
| 131 | dev_err(&GET_DEV(accel_dev), "Restart device failed\n"); | 119 | dev_err(&GET_DEV(accel_dev), "Restart device failed\n"); |
| 132 | kfree(reset_data); | 120 | kfree(reset_data); |
| @@ -148,8 +136,8 @@ static int adf_dev_aer_schedule_reset(struct adf_accel_dev *accel_dev, | |||
| 148 | { | 136 | { |
| 149 | struct adf_reset_dev_data *reset_data; | 137 | struct adf_reset_dev_data *reset_data; |
| 150 | 138 | ||
| 151 | if (adf_dev_started(accel_dev) && | 139 | if (!adf_dev_started(accel_dev) || |
| 152 | !test_bit(ADF_STATUS_RESTARTING, &accel_dev->status)) | 140 | test_bit(ADF_STATUS_RESTARTING, &accel_dev->status)) |
| 153 | return 0; | 141 | return 0; |
| 154 | 142 | ||
| 155 | set_bit(ADF_STATUS_RESTARTING, &accel_dev->status); | 143 | set_bit(ADF_STATUS_RESTARTING, &accel_dev->status); |
diff --git a/drivers/crypto/qat/qat_common/adf_cfg.c b/drivers/crypto/qat/qat_common/adf_cfg.c index aba7f1d043fb..de16da9070a5 100644 --- a/drivers/crypto/qat/qat_common/adf_cfg.c +++ b/drivers/crypto/qat/qat_common/adf_cfg.c | |||
| @@ -50,6 +50,7 @@ | |||
| 50 | #include <linux/seq_file.h> | 50 | #include <linux/seq_file.h> |
| 51 | #include "adf_accel_devices.h" | 51 | #include "adf_accel_devices.h" |
| 52 | #include "adf_cfg.h" | 52 | #include "adf_cfg.h" |
| 53 | #include "adf_common_drv.h" | ||
| 53 | 54 | ||
| 54 | static DEFINE_MUTEX(qat_cfg_read_lock); | 55 | static DEFINE_MUTEX(qat_cfg_read_lock); |
| 55 | 56 | ||
| @@ -159,6 +160,7 @@ void adf_cfg_del_all(struct adf_accel_dev *accel_dev) | |||
| 159 | down_write(&dev_cfg_data->lock); | 160 | down_write(&dev_cfg_data->lock); |
| 160 | adf_cfg_section_del_all(&dev_cfg_data->sec_list); | 161 | adf_cfg_section_del_all(&dev_cfg_data->sec_list); |
| 161 | up_write(&dev_cfg_data->lock); | 162 | up_write(&dev_cfg_data->lock); |
| 163 | clear_bit(ADF_STATUS_CONFIGURED, &accel_dev->status); | ||
| 162 | } | 164 | } |
| 163 | 165 | ||
| 164 | /** | 166 | /** |
diff --git a/drivers/crypto/qat/qat_common/adf_common_drv.h b/drivers/crypto/qat/qat_common/adf_common_drv.h index 5e8f9d431e5d..a62e485c8786 100644 --- a/drivers/crypto/qat/qat_common/adf_common_drv.h +++ b/drivers/crypto/qat/qat_common/adf_common_drv.h | |||
| @@ -93,7 +93,7 @@ int adf_service_unregister(struct service_hndl *service); | |||
| 93 | int adf_dev_init(struct adf_accel_dev *accel_dev); | 93 | int adf_dev_init(struct adf_accel_dev *accel_dev); |
| 94 | int adf_dev_start(struct adf_accel_dev *accel_dev); | 94 | int adf_dev_start(struct adf_accel_dev *accel_dev); |
| 95 | int adf_dev_stop(struct adf_accel_dev *accel_dev); | 95 | int adf_dev_stop(struct adf_accel_dev *accel_dev); |
| 96 | int adf_dev_shutdown(struct adf_accel_dev *accel_dev); | 96 | void adf_dev_shutdown(struct adf_accel_dev *accel_dev); |
| 97 | 97 | ||
| 98 | int adf_ctl_dev_register(void); | 98 | int adf_ctl_dev_register(void); |
| 99 | void adf_ctl_dev_unregister(void); | 99 | void adf_ctl_dev_unregister(void); |
diff --git a/drivers/crypto/qat/qat_common/adf_ctl_drv.c b/drivers/crypto/qat/qat_common/adf_ctl_drv.c index 7ee93f881db6..74207a6f0516 100644 --- a/drivers/crypto/qat/qat_common/adf_ctl_drv.c +++ b/drivers/crypto/qat/qat_common/adf_ctl_drv.c | |||
| @@ -282,6 +282,8 @@ static int adf_ctl_stop_devices(uint32_t id) | |||
| 282 | if (adf_dev_stop(accel_dev)) { | 282 | if (adf_dev_stop(accel_dev)) { |
| 283 | pr_err("QAT: Failed to stop qat_dev%d\n", id); | 283 | pr_err("QAT: Failed to stop qat_dev%d\n", id); |
| 284 | ret = -EFAULT; | 284 | ret = -EFAULT; |
| 285 | } else { | ||
| 286 | adf_dev_shutdown(accel_dev); | ||
| 285 | } | 287 | } |
| 286 | } | 288 | } |
| 287 | } | 289 | } |
| @@ -343,7 +345,9 @@ static int adf_ctl_ioctl_dev_start(struct file *fp, unsigned int cmd, | |||
| 343 | if (!adf_dev_started(accel_dev)) { | 345 | if (!adf_dev_started(accel_dev)) { |
| 344 | pr_info("QAT: Starting acceleration device qat_dev%d.\n", | 346 | pr_info("QAT: Starting acceleration device qat_dev%d.\n", |
| 345 | ctl_data->device_id); | 347 | ctl_data->device_id); |
| 346 | ret = adf_dev_start(accel_dev); | 348 | ret = adf_dev_init(accel_dev); |
| 349 | if (!ret) | ||
| 350 | ret = adf_dev_start(accel_dev); | ||
| 347 | } else { | 351 | } else { |
| 348 | pr_info("QAT: Acceleration device qat_dev%d already started.\n", | 352 | pr_info("QAT: Acceleration device qat_dev%d already started.\n", |
| 349 | ctl_data->device_id); | 353 | ctl_data->device_id); |
| @@ -351,6 +355,7 @@ static int adf_ctl_ioctl_dev_start(struct file *fp, unsigned int cmd, | |||
| 351 | if (ret) { | 355 | if (ret) { |
| 352 | pr_err("QAT: Failed to start qat_dev%d\n", ctl_data->device_id); | 356 | pr_err("QAT: Failed to start qat_dev%d\n", ctl_data->device_id); |
| 353 | adf_dev_stop(accel_dev); | 357 | adf_dev_stop(accel_dev); |
| 358 | adf_dev_shutdown(accel_dev); | ||
| 354 | } | 359 | } |
| 355 | out: | 360 | out: |
| 356 | kfree(ctl_data); | 361 | kfree(ctl_data); |
diff --git a/drivers/crypto/qat/qat_common/adf_init.c b/drivers/crypto/qat/qat_common/adf_init.c index 5c0e47a00a87..8f0ca498ab87 100644 --- a/drivers/crypto/qat/qat_common/adf_init.c +++ b/drivers/crypto/qat/qat_common/adf_init.c | |||
| @@ -108,26 +108,47 @@ int adf_service_unregister(struct service_hndl *service) | |||
| 108 | EXPORT_SYMBOL_GPL(adf_service_unregister); | 108 | EXPORT_SYMBOL_GPL(adf_service_unregister); |
| 109 | 109 | ||
| 110 | /** | 110 | /** |
| 111 | * adf_dev_start() - Start acceleration service for the given accel device | 111 | * adf_dev_init() - Init data structures and services for the given accel device |
| 112 | * @accel_dev: Pointer to acceleration device. | 112 | * @accel_dev: Pointer to acceleration device. |
| 113 | * | 113 | * |
| 114 | * Function notifies all the registered services that the acceleration device | 114 | * Initialize the ring data structures and the admin comms and arbitration |
| 115 | * is ready to be used. | 115 | * services. |
| 116 | * To be used by QAT device specific drivers. | ||
| 117 | * | 116 | * |
| 118 | * Return: 0 on success, error code othewise. | 117 | * Return: 0 on success, error code othewise. |
| 119 | */ | 118 | */ |
| 120 | int adf_dev_start(struct adf_accel_dev *accel_dev) | 119 | int adf_dev_init(struct adf_accel_dev *accel_dev) |
| 121 | { | 120 | { |
| 122 | struct service_hndl *service; | 121 | struct service_hndl *service; |
| 123 | struct list_head *list_itr; | 122 | struct list_head *list_itr; |
| 124 | struct adf_hw_device_data *hw_data = accel_dev->hw_device; | 123 | struct adf_hw_device_data *hw_data = accel_dev->hw_device; |
| 125 | 124 | ||
| 125 | if (!hw_data) { | ||
| 126 | dev_err(&GET_DEV(accel_dev), | ||
| 127 | "QAT: Failed to init device - hw_data not set\n"); | ||
| 128 | return -EFAULT; | ||
| 129 | } | ||
| 130 | |||
| 126 | if (!test_bit(ADF_STATUS_CONFIGURED, &accel_dev->status)) { | 131 | if (!test_bit(ADF_STATUS_CONFIGURED, &accel_dev->status)) { |
| 127 | pr_info("QAT: Device not configured\n"); | 132 | pr_info("QAT: Device not configured\n"); |
| 128 | return -EFAULT; | 133 | return -EFAULT; |
| 129 | } | 134 | } |
| 130 | set_bit(ADF_STATUS_STARTING, &accel_dev->status); | 135 | |
| 136 | if (adf_init_etr_data(accel_dev)) { | ||
| 137 | dev_err(&GET_DEV(accel_dev), "Failed initialize etr\n"); | ||
| 138 | return -EFAULT; | ||
| 139 | } | ||
| 140 | |||
| 141 | if (hw_data->init_admin_comms && hw_data->init_admin_comms(accel_dev)) { | ||
| 142 | dev_err(&GET_DEV(accel_dev), "Failed initialize admin comms\n"); | ||
| 143 | return -EFAULT; | ||
| 144 | } | ||
| 145 | |||
| 146 | if (hw_data->init_arb && hw_data->init_arb(accel_dev)) { | ||
| 147 | dev_err(&GET_DEV(accel_dev), "Failed initialize hw arbiter\n"); | ||
| 148 | return -EFAULT; | ||
| 149 | } | ||
| 150 | |||
| 151 | hw_data->enable_ints(accel_dev); | ||
| 131 | 152 | ||
| 132 | if (adf_ae_init(accel_dev)) { | 153 | if (adf_ae_init(accel_dev)) { |
| 133 | pr_err("QAT: Failed to initialise Acceleration Engine\n"); | 154 | pr_err("QAT: Failed to initialise Acceleration Engine\n"); |
| @@ -178,6 +199,27 @@ int adf_dev_start(struct adf_accel_dev *accel_dev) | |||
| 178 | 199 | ||
| 179 | hw_data->enable_error_correction(accel_dev); | 200 | hw_data->enable_error_correction(accel_dev); |
| 180 | 201 | ||
| 202 | return 0; | ||
| 203 | } | ||
| 204 | EXPORT_SYMBOL_GPL(adf_dev_init); | ||
| 205 | |||
| 206 | /** | ||
| 207 | * adf_dev_start() - Start acceleration service for the given accel device | ||
| 208 | * @accel_dev: Pointer to acceleration device. | ||
| 209 | * | ||
| 210 | * Function notifies all the registered services that the acceleration device | ||
| 211 | * is ready to be used. | ||
| 212 | * To be used by QAT device specific drivers. | ||
| 213 | * | ||
| 214 | * Return: 0 on success, error code othewise. | ||
| 215 | */ | ||
| 216 | int adf_dev_start(struct adf_accel_dev *accel_dev) | ||
| 217 | { | ||
| 218 | struct service_hndl *service; | ||
| 219 | struct list_head *list_itr; | ||
| 220 | |||
| 221 | set_bit(ADF_STATUS_STARTING, &accel_dev->status); | ||
| 222 | |||
| 181 | if (adf_ae_start(accel_dev)) { | 223 | if (adf_ae_start(accel_dev)) { |
| 182 | pr_err("QAT: AE Start Failed\n"); | 224 | pr_err("QAT: AE Start Failed\n"); |
| 183 | return -EFAULT; | 225 | return -EFAULT; |
| @@ -232,16 +274,15 @@ EXPORT_SYMBOL_GPL(adf_dev_start); | |||
| 232 | */ | 274 | */ |
| 233 | int adf_dev_stop(struct adf_accel_dev *accel_dev) | 275 | int adf_dev_stop(struct adf_accel_dev *accel_dev) |
| 234 | { | 276 | { |
| 235 | struct adf_hw_device_data *hw_data = accel_dev->hw_device; | ||
| 236 | struct service_hndl *service; | 277 | struct service_hndl *service; |
| 237 | struct list_head *list_itr; | 278 | struct list_head *list_itr; |
| 238 | int ret, wait = 0; | 279 | bool wait = false; |
| 280 | int ret; | ||
| 239 | 281 | ||
| 240 | if (!adf_dev_started(accel_dev) && | 282 | if (!adf_dev_started(accel_dev) && |
| 241 | !test_bit(ADF_STATUS_STARTING, &accel_dev->status)) { | 283 | !test_bit(ADF_STATUS_STARTING, &accel_dev->status)) { |
| 242 | return 0; | 284 | return 0; |
| 243 | } | 285 | } |
| 244 | clear_bit(ADF_STATUS_CONFIGURED, &accel_dev->status); | ||
| 245 | clear_bit(ADF_STATUS_STARTING, &accel_dev->status); | 286 | clear_bit(ADF_STATUS_STARTING, &accel_dev->status); |
| 246 | clear_bit(ADF_STATUS_STARTED, &accel_dev->status); | 287 | clear_bit(ADF_STATUS_STARTED, &accel_dev->status); |
| 247 | 288 | ||
| @@ -258,7 +299,7 @@ int adf_dev_stop(struct adf_accel_dev *accel_dev) | |||
| 258 | if (!ret) { | 299 | if (!ret) { |
| 259 | clear_bit(accel_dev->accel_id, &service->start_status); | 300 | clear_bit(accel_dev->accel_id, &service->start_status); |
| 260 | } else if (ret == -EAGAIN) { | 301 | } else if (ret == -EAGAIN) { |
| 261 | wait = 1; | 302 | wait = true; |
| 262 | clear_bit(accel_dev->accel_id, &service->start_status); | 303 | clear_bit(accel_dev->accel_id, &service->start_status); |
| 263 | } | 304 | } |
| 264 | } | 305 | } |
| @@ -278,13 +319,36 @@ int adf_dev_stop(struct adf_accel_dev *accel_dev) | |||
| 278 | if (wait) | 319 | if (wait) |
| 279 | msleep(100); | 320 | msleep(100); |
| 280 | 321 | ||
| 281 | if (adf_dev_started(accel_dev)) { | 322 | if (test_bit(ADF_STATUS_AE_STARTED, &accel_dev->status)) { |
| 282 | if (adf_ae_stop(accel_dev)) | 323 | if (adf_ae_stop(accel_dev)) |
| 283 | pr_err("QAT: failed to stop AE\n"); | 324 | pr_err("QAT: failed to stop AE\n"); |
| 284 | else | 325 | else |
| 285 | clear_bit(ADF_STATUS_AE_STARTED, &accel_dev->status); | 326 | clear_bit(ADF_STATUS_AE_STARTED, &accel_dev->status); |
| 286 | } | 327 | } |
| 287 | 328 | ||
| 329 | return 0; | ||
| 330 | } | ||
| 331 | EXPORT_SYMBOL_GPL(adf_dev_stop); | ||
| 332 | |||
| 333 | /** | ||
| 334 | * adf_dev_shutdown() - shutdown acceleration services and data strucutures | ||
| 335 | * @accel_dev: Pointer to acceleration device | ||
| 336 | * | ||
| 337 | * Cleanup the ring data structures and the admin comms and arbitration | ||
| 338 | * services. | ||
| 339 | */ | ||
| 340 | void adf_dev_shutdown(struct adf_accel_dev *accel_dev) | ||
| 341 | { | ||
| 342 | struct adf_hw_device_data *hw_data = accel_dev->hw_device; | ||
| 343 | struct service_hndl *service; | ||
| 344 | struct list_head *list_itr; | ||
| 345 | |||
| 346 | if (!hw_data) { | ||
| 347 | dev_err(&GET_DEV(accel_dev), | ||
| 348 | "QAT: Failed to shutdown device - hw_data not set\n"); | ||
| 349 | return; | ||
| 350 | } | ||
| 351 | |||
| 288 | if (test_bit(ADF_STATUS_AE_UCODE_LOADED, &accel_dev->status)) { | 352 | if (test_bit(ADF_STATUS_AE_UCODE_LOADED, &accel_dev->status)) { |
| 289 | if (adf_ae_fw_release(accel_dev)) | 353 | if (adf_ae_fw_release(accel_dev)) |
| 290 | pr_err("QAT: Failed to release the ucode\n"); | 354 | pr_err("QAT: Failed to release the ucode\n"); |
| @@ -335,9 +399,15 @@ int adf_dev_stop(struct adf_accel_dev *accel_dev) | |||
| 335 | if (!test_bit(ADF_STATUS_RESTARTING, &accel_dev->status)) | 399 | if (!test_bit(ADF_STATUS_RESTARTING, &accel_dev->status)) |
| 336 | adf_cfg_del_all(accel_dev); | 400 | adf_cfg_del_all(accel_dev); |
| 337 | 401 | ||
| 338 | return 0; | 402 | if (hw_data->exit_arb) |
| 403 | hw_data->exit_arb(accel_dev); | ||
| 404 | |||
| 405 | if (hw_data->exit_admin_comms) | ||
| 406 | hw_data->exit_admin_comms(accel_dev); | ||
| 407 | |||
| 408 | adf_cleanup_etr_data(accel_dev); | ||
| 339 | } | 409 | } |
| 340 | EXPORT_SYMBOL_GPL(adf_dev_stop); | 410 | EXPORT_SYMBOL_GPL(adf_dev_shutdown); |
| 341 | 411 | ||
| 342 | int adf_dev_restarting_notify(struct adf_accel_dev *accel_dev) | 412 | int adf_dev_restarting_notify(struct adf_accel_dev *accel_dev) |
| 343 | { | 413 | { |
diff --git a/drivers/crypto/qat/qat_common/adf_transport_internal.h b/drivers/crypto/qat/qat_common/adf_transport_internal.h index c40546079981..a4869627fd57 100644 --- a/drivers/crypto/qat/qat_common/adf_transport_internal.h +++ b/drivers/crypto/qat/qat_common/adf_transport_internal.h | |||
| @@ -48,7 +48,6 @@ | |||
| 48 | #define ADF_TRANSPORT_INTRN_H | 48 | #define ADF_TRANSPORT_INTRN_H |
| 49 | 49 | ||
| 50 | #include <linux/interrupt.h> | 50 | #include <linux/interrupt.h> |
| 51 | #include <linux/atomic.h> | ||
| 52 | #include <linux/spinlock_types.h> | 51 | #include <linux/spinlock_types.h> |
| 53 | #include "adf_transport.h" | 52 | #include "adf_transport.h" |
| 54 | 53 | ||
diff --git a/drivers/crypto/qat/qat_common/icp_qat_hw.h b/drivers/crypto/qat/qat_common/icp_qat_hw.h index 5031f8c10d75..68f191b653b0 100644 --- a/drivers/crypto/qat/qat_common/icp_qat_hw.h +++ b/drivers/crypto/qat/qat_common/icp_qat_hw.h | |||
| @@ -301,5 +301,5 @@ struct icp_qat_hw_cipher_aes256_f8 { | |||
| 301 | 301 | ||
| 302 | struct icp_qat_hw_cipher_algo_blk { | 302 | struct icp_qat_hw_cipher_algo_blk { |
| 303 | struct icp_qat_hw_cipher_aes256_f8 aes; | 303 | struct icp_qat_hw_cipher_aes256_f8 aes; |
| 304 | }; | 304 | } __aligned(64); |
| 305 | #endif | 305 | #endif |
diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c index 19eea1c832ac..1dc5b0a17cf7 100644 --- a/drivers/crypto/qat/qat_common/qat_algs.c +++ b/drivers/crypto/qat/qat_common/qat_algs.c | |||
| @@ -63,15 +63,15 @@ | |||
| 63 | #include "icp_qat_fw.h" | 63 | #include "icp_qat_fw.h" |
| 64 | #include "icp_qat_fw_la.h" | 64 | #include "icp_qat_fw_la.h" |
| 65 | 65 | ||
| 66 | #define QAT_AES_HW_CONFIG_ENC(alg) \ | 66 | #define QAT_AES_HW_CONFIG_CBC_ENC(alg) \ |
| 67 | ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_CBC_MODE, alg, \ | 67 | ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_CBC_MODE, alg, \ |
| 68 | ICP_QAT_HW_CIPHER_NO_CONVERT, \ | 68 | ICP_QAT_HW_CIPHER_NO_CONVERT, \ |
| 69 | ICP_QAT_HW_CIPHER_ENCRYPT) | 69 | ICP_QAT_HW_CIPHER_ENCRYPT) |
| 70 | 70 | ||
| 71 | #define QAT_AES_HW_CONFIG_DEC(alg) \ | 71 | #define QAT_AES_HW_CONFIG_CBC_DEC(alg) \ |
| 72 | ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_CBC_MODE, alg, \ | 72 | ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_CBC_MODE, alg, \ |
| 73 | ICP_QAT_HW_CIPHER_KEY_CONVERT, \ | 73 | ICP_QAT_HW_CIPHER_KEY_CONVERT, \ |
| 74 | ICP_QAT_HW_CIPHER_DECRYPT) | 74 | ICP_QAT_HW_CIPHER_DECRYPT) |
| 75 | 75 | ||
| 76 | static atomic_t active_dev; | 76 | static atomic_t active_dev; |
| 77 | 77 | ||
| @@ -102,25 +102,31 @@ struct qat_alg_cd { | |||
| 102 | }; | 102 | }; |
| 103 | } __aligned(64); | 103 | } __aligned(64); |
| 104 | 104 | ||
| 105 | #define MAX_AUTH_STATE_SIZE sizeof(struct icp_qat_hw_auth_algo_blk) | 105 | struct qat_alg_aead_ctx { |
| 106 | |||
| 107 | struct qat_auth_state { | ||
| 108 | uint8_t data[MAX_AUTH_STATE_SIZE + 64]; | ||
| 109 | } __aligned(64); | ||
| 110 | |||
| 111 | struct qat_alg_session_ctx { | ||
| 112 | struct qat_alg_cd *enc_cd; | 106 | struct qat_alg_cd *enc_cd; |
| 113 | dma_addr_t enc_cd_paddr; | ||
| 114 | struct qat_alg_cd *dec_cd; | 107 | struct qat_alg_cd *dec_cd; |
| 108 | dma_addr_t enc_cd_paddr; | ||
| 115 | dma_addr_t dec_cd_paddr; | 109 | dma_addr_t dec_cd_paddr; |
| 116 | struct icp_qat_fw_la_bulk_req enc_fw_req_tmpl; | 110 | struct icp_qat_fw_la_bulk_req enc_fw_req; |
| 117 | struct icp_qat_fw_la_bulk_req dec_fw_req_tmpl; | 111 | struct icp_qat_fw_la_bulk_req dec_fw_req; |
| 118 | struct qat_crypto_instance *inst; | ||
| 119 | struct crypto_tfm *tfm; | ||
| 120 | struct crypto_shash *hash_tfm; | 112 | struct crypto_shash *hash_tfm; |
| 121 | enum icp_qat_hw_auth_algo qat_hash_alg; | 113 | enum icp_qat_hw_auth_algo qat_hash_alg; |
| 114 | struct qat_crypto_instance *inst; | ||
| 115 | struct crypto_tfm *tfm; | ||
| 122 | uint8_t salt[AES_BLOCK_SIZE]; | 116 | uint8_t salt[AES_BLOCK_SIZE]; |
| 123 | spinlock_t lock; /* protects qat_alg_session_ctx struct */ | 117 | spinlock_t lock; /* protects qat_alg_aead_ctx struct */ |
| 118 | }; | ||
| 119 | |||
| 120 | struct qat_alg_ablkcipher_ctx { | ||
| 121 | struct icp_qat_hw_cipher_algo_blk *enc_cd; | ||
| 122 | struct icp_qat_hw_cipher_algo_blk *dec_cd; | ||
| 123 | dma_addr_t enc_cd_paddr; | ||
| 124 | dma_addr_t dec_cd_paddr; | ||
| 125 | struct icp_qat_fw_la_bulk_req enc_fw_req; | ||
| 126 | struct icp_qat_fw_la_bulk_req dec_fw_req; | ||
| 127 | struct qat_crypto_instance *inst; | ||
| 128 | struct crypto_tfm *tfm; | ||
| 129 | spinlock_t lock; /* protects qat_alg_ablkcipher_ctx struct */ | ||
| 124 | }; | 130 | }; |
| 125 | 131 | ||
| 126 | static int get_current_node(void) | 132 | static int get_current_node(void) |
| @@ -144,43 +150,37 @@ static int qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg) | |||
| 144 | } | 150 | } |
| 145 | 151 | ||
| 146 | static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash, | 152 | static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash, |
| 147 | struct qat_alg_session_ctx *ctx, | 153 | struct qat_alg_aead_ctx *ctx, |
| 148 | const uint8_t *auth_key, | 154 | const uint8_t *auth_key, |
| 149 | unsigned int auth_keylen) | 155 | unsigned int auth_keylen) |
| 150 | { | 156 | { |
| 151 | struct qat_auth_state auth_state; | ||
| 152 | SHASH_DESC_ON_STACK(shash, ctx->hash_tfm); | 157 | SHASH_DESC_ON_STACK(shash, ctx->hash_tfm); |
| 153 | struct sha1_state sha1; | 158 | struct sha1_state sha1; |
| 154 | struct sha256_state sha256; | 159 | struct sha256_state sha256; |
| 155 | struct sha512_state sha512; | 160 | struct sha512_state sha512; |
| 156 | int block_size = crypto_shash_blocksize(ctx->hash_tfm); | 161 | int block_size = crypto_shash_blocksize(ctx->hash_tfm); |
| 157 | int digest_size = crypto_shash_digestsize(ctx->hash_tfm); | 162 | int digest_size = crypto_shash_digestsize(ctx->hash_tfm); |
| 158 | uint8_t *ipad = auth_state.data; | 163 | char ipad[block_size]; |
| 159 | uint8_t *opad = ipad + block_size; | 164 | char opad[block_size]; |
| 160 | __be32 *hash_state_out; | 165 | __be32 *hash_state_out; |
| 161 | __be64 *hash512_state_out; | 166 | __be64 *hash512_state_out; |
| 162 | int i, offset; | 167 | int i, offset; |
| 163 | 168 | ||
| 164 | memzero_explicit(auth_state.data, MAX_AUTH_STATE_SIZE + 64); | 169 | memset(ipad, 0, block_size); |
| 170 | memset(opad, 0, block_size); | ||
| 165 | shash->tfm = ctx->hash_tfm; | 171 | shash->tfm = ctx->hash_tfm; |
| 166 | shash->flags = 0x0; | 172 | shash->flags = 0x0; |
| 167 | 173 | ||
| 168 | if (auth_keylen > block_size) { | 174 | if (auth_keylen > block_size) { |
| 169 | char buff[SHA512_BLOCK_SIZE]; | ||
| 170 | int ret = crypto_shash_digest(shash, auth_key, | 175 | int ret = crypto_shash_digest(shash, auth_key, |
| 171 | auth_keylen, buff); | 176 | auth_keylen, ipad); |
| 172 | if (ret) | 177 | if (ret) |
| 173 | return ret; | 178 | return ret; |
| 174 | 179 | ||
| 175 | memcpy(ipad, buff, digest_size); | 180 | memcpy(opad, ipad, digest_size); |
| 176 | memcpy(opad, buff, digest_size); | ||
| 177 | memzero_explicit(ipad + digest_size, block_size - digest_size); | ||
| 178 | memzero_explicit(opad + digest_size, block_size - digest_size); | ||
| 179 | } else { | 181 | } else { |
| 180 | memcpy(ipad, auth_key, auth_keylen); | 182 | memcpy(ipad, auth_key, auth_keylen); |
| 181 | memcpy(opad, auth_key, auth_keylen); | 183 | memcpy(opad, auth_key, auth_keylen); |
| 182 | memzero_explicit(ipad + auth_keylen, block_size - auth_keylen); | ||
| 183 | memzero_explicit(opad + auth_keylen, block_size - auth_keylen); | ||
| 184 | } | 184 | } |
| 185 | 185 | ||
| 186 | for (i = 0; i < block_size; i++) { | 186 | for (i = 0; i < block_size; i++) { |
| @@ -267,8 +267,6 @@ static void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header) | |||
| 267 | header->comn_req_flags = | 267 | header->comn_req_flags = |
| 268 | ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_64BIT_ADR, | 268 | ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_64BIT_ADR, |
| 269 | QAT_COMN_PTR_TYPE_SGL); | 269 | QAT_COMN_PTR_TYPE_SGL); |
| 270 | ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags, | ||
| 271 | ICP_QAT_FW_LA_DIGEST_IN_BUFFER); | ||
| 272 | ICP_QAT_FW_LA_PARTIAL_SET(header->serv_specif_flags, | 270 | ICP_QAT_FW_LA_PARTIAL_SET(header->serv_specif_flags, |
| 273 | ICP_QAT_FW_LA_PARTIAL_NONE); | 271 | ICP_QAT_FW_LA_PARTIAL_NONE); |
| 274 | ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags, | 272 | ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags, |
| @@ -279,8 +277,9 @@ static void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header) | |||
| 279 | ICP_QAT_FW_LA_NO_UPDATE_STATE); | 277 | ICP_QAT_FW_LA_NO_UPDATE_STATE); |
| 280 | } | 278 | } |
| 281 | 279 | ||
| 282 | static int qat_alg_init_enc_session(struct qat_alg_session_ctx *ctx, | 280 | static int qat_alg_aead_init_enc_session(struct qat_alg_aead_ctx *ctx, |
| 283 | int alg, struct crypto_authenc_keys *keys) | 281 | int alg, |
| 282 | struct crypto_authenc_keys *keys) | ||
| 284 | { | 283 | { |
| 285 | struct crypto_aead *aead_tfm = __crypto_aead_cast(ctx->tfm); | 284 | struct crypto_aead *aead_tfm = __crypto_aead_cast(ctx->tfm); |
| 286 | unsigned int digestsize = crypto_aead_crt(aead_tfm)->authsize; | 285 | unsigned int digestsize = crypto_aead_crt(aead_tfm)->authsize; |
| @@ -289,7 +288,7 @@ static int qat_alg_init_enc_session(struct qat_alg_session_ctx *ctx, | |||
| 289 | struct icp_qat_hw_auth_algo_blk *hash = | 288 | struct icp_qat_hw_auth_algo_blk *hash = |
| 290 | (struct icp_qat_hw_auth_algo_blk *)((char *)enc_ctx + | 289 | (struct icp_qat_hw_auth_algo_blk *)((char *)enc_ctx + |
| 291 | sizeof(struct icp_qat_hw_auth_setup) + keys->enckeylen); | 290 | sizeof(struct icp_qat_hw_auth_setup) + keys->enckeylen); |
| 292 | struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->enc_fw_req_tmpl; | 291 | struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->enc_fw_req; |
| 293 | struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars; | 292 | struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars; |
| 294 | struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr; | 293 | struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr; |
| 295 | void *ptr = &req_tmpl->cd_ctrl; | 294 | void *ptr = &req_tmpl->cd_ctrl; |
| @@ -297,7 +296,7 @@ static int qat_alg_init_enc_session(struct qat_alg_session_ctx *ctx, | |||
| 297 | struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr; | 296 | struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr; |
| 298 | 297 | ||
| 299 | /* CD setup */ | 298 | /* CD setup */ |
| 300 | cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_ENC(alg); | 299 | cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_CBC_ENC(alg); |
| 301 | memcpy(cipher->aes.key, keys->enckey, keys->enckeylen); | 300 | memcpy(cipher->aes.key, keys->enckey, keys->enckeylen); |
| 302 | hash->sha.inner_setup.auth_config.config = | 301 | hash->sha.inner_setup.auth_config.config = |
| 303 | ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1, | 302 | ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1, |
| @@ -311,6 +310,8 @@ static int qat_alg_init_enc_session(struct qat_alg_session_ctx *ctx, | |||
| 311 | /* Request setup */ | 310 | /* Request setup */ |
| 312 | qat_alg_init_common_hdr(header); | 311 | qat_alg_init_common_hdr(header); |
| 313 | header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER_HASH; | 312 | header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER_HASH; |
| 313 | ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags, | ||
| 314 | ICP_QAT_FW_LA_DIGEST_IN_BUFFER); | ||
| 314 | ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags, | 315 | ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags, |
| 315 | ICP_QAT_FW_LA_RET_AUTH_RES); | 316 | ICP_QAT_FW_LA_RET_AUTH_RES); |
| 316 | ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags, | 317 | ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags, |
| @@ -356,8 +357,9 @@ static int qat_alg_init_enc_session(struct qat_alg_session_ctx *ctx, | |||
| 356 | return 0; | 357 | return 0; |
| 357 | } | 358 | } |
| 358 | 359 | ||
| 359 | static int qat_alg_init_dec_session(struct qat_alg_session_ctx *ctx, | 360 | static int qat_alg_aead_init_dec_session(struct qat_alg_aead_ctx *ctx, |
| 360 | int alg, struct crypto_authenc_keys *keys) | 361 | int alg, |
| 362 | struct crypto_authenc_keys *keys) | ||
| 361 | { | 363 | { |
| 362 | struct crypto_aead *aead_tfm = __crypto_aead_cast(ctx->tfm); | 364 | struct crypto_aead *aead_tfm = __crypto_aead_cast(ctx->tfm); |
| 363 | unsigned int digestsize = crypto_aead_crt(aead_tfm)->authsize; | 365 | unsigned int digestsize = crypto_aead_crt(aead_tfm)->authsize; |
| @@ -367,7 +369,7 @@ static int qat_alg_init_dec_session(struct qat_alg_session_ctx *ctx, | |||
| 367 | (struct icp_qat_hw_cipher_algo_blk *)((char *)dec_ctx + | 369 | (struct icp_qat_hw_cipher_algo_blk *)((char *)dec_ctx + |
| 368 | sizeof(struct icp_qat_hw_auth_setup) + | 370 | sizeof(struct icp_qat_hw_auth_setup) + |
| 369 | roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2); | 371 | roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2); |
| 370 | struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->dec_fw_req_tmpl; | 372 | struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->dec_fw_req; |
| 371 | struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars; | 373 | struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars; |
| 372 | struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr; | 374 | struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr; |
| 373 | void *ptr = &req_tmpl->cd_ctrl; | 375 | void *ptr = &req_tmpl->cd_ctrl; |
| @@ -379,7 +381,7 @@ static int qat_alg_init_dec_session(struct qat_alg_session_ctx *ctx, | |||
| 379 | sizeof(struct icp_qat_fw_la_cipher_req_params)); | 381 | sizeof(struct icp_qat_fw_la_cipher_req_params)); |
| 380 | 382 | ||
| 381 | /* CD setup */ | 383 | /* CD setup */ |
| 382 | cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_DEC(alg); | 384 | cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_CBC_DEC(alg); |
| 383 | memcpy(cipher->aes.key, keys->enckey, keys->enckeylen); | 385 | memcpy(cipher->aes.key, keys->enckey, keys->enckeylen); |
| 384 | hash->sha.inner_setup.auth_config.config = | 386 | hash->sha.inner_setup.auth_config.config = |
| 385 | ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1, | 387 | ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1, |
| @@ -394,6 +396,8 @@ static int qat_alg_init_dec_session(struct qat_alg_session_ctx *ctx, | |||
| 394 | /* Request setup */ | 396 | /* Request setup */ |
| 395 | qat_alg_init_common_hdr(header); | 397 | qat_alg_init_common_hdr(header); |
| 396 | header->service_cmd_id = ICP_QAT_FW_LA_CMD_HASH_CIPHER; | 398 | header->service_cmd_id = ICP_QAT_FW_LA_CMD_HASH_CIPHER; |
| 399 | ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags, | ||
| 400 | ICP_QAT_FW_LA_DIGEST_IN_BUFFER); | ||
| 397 | ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags, | 401 | ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags, |
| 398 | ICP_QAT_FW_LA_NO_RET_AUTH_RES); | 402 | ICP_QAT_FW_LA_NO_RET_AUTH_RES); |
| 399 | ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags, | 403 | ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags, |
| @@ -444,36 +448,91 @@ static int qat_alg_init_dec_session(struct qat_alg_session_ctx *ctx, | |||
| 444 | return 0; | 448 | return 0; |
| 445 | } | 449 | } |
| 446 | 450 | ||
| 447 | static int qat_alg_init_sessions(struct qat_alg_session_ctx *ctx, | 451 | static void qat_alg_ablkcipher_init_com(struct qat_alg_ablkcipher_ctx *ctx, |
| 448 | const uint8_t *key, unsigned int keylen) | 452 | struct icp_qat_fw_la_bulk_req *req, |
| 453 | struct icp_qat_hw_cipher_algo_blk *cd, | ||
| 454 | const uint8_t *key, unsigned int keylen) | ||
| 449 | { | 455 | { |
| 450 | struct crypto_authenc_keys keys; | 456 | struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars; |
| 451 | int alg; | 457 | struct icp_qat_fw_comn_req_hdr *header = &req->comn_hdr; |
| 458 | struct icp_qat_fw_cipher_cd_ctrl_hdr *cd_ctrl = (void *)&req->cd_ctrl; | ||
| 452 | 459 | ||
| 453 | if (crypto_rng_get_bytes(crypto_default_rng, ctx->salt, AES_BLOCK_SIZE)) | 460 | memcpy(cd->aes.key, key, keylen); |
| 454 | return -EFAULT; | 461 | qat_alg_init_common_hdr(header); |
| 462 | header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER; | ||
| 463 | cd_pars->u.s.content_desc_params_sz = | ||
| 464 | sizeof(struct icp_qat_hw_cipher_algo_blk) >> 3; | ||
| 465 | /* Cipher CD config setup */ | ||
| 466 | cd_ctrl->cipher_key_sz = keylen >> 3; | ||
| 467 | cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3; | ||
| 468 | cd_ctrl->cipher_cfg_offset = 0; | ||
| 469 | ICP_QAT_FW_COMN_CURR_ID_SET(cd_ctrl, ICP_QAT_FW_SLICE_CIPHER); | ||
| 470 | ICP_QAT_FW_COMN_NEXT_ID_SET(cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR); | ||
| 471 | } | ||
| 455 | 472 | ||
| 456 | if (crypto_authenc_extractkeys(&keys, key, keylen)) | 473 | static void qat_alg_ablkcipher_init_enc(struct qat_alg_ablkcipher_ctx *ctx, |
| 457 | goto bad_key; | 474 | int alg, const uint8_t *key, |
| 475 | unsigned int keylen) | ||
| 476 | { | ||
| 477 | struct icp_qat_hw_cipher_algo_blk *enc_cd = ctx->enc_cd; | ||
| 478 | struct icp_qat_fw_la_bulk_req *req = &ctx->enc_fw_req; | ||
| 479 | struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars; | ||
| 480 | |||
| 481 | qat_alg_ablkcipher_init_com(ctx, req, enc_cd, key, keylen); | ||
| 482 | cd_pars->u.s.content_desc_addr = ctx->enc_cd_paddr; | ||
| 483 | enc_cd->aes.cipher_config.val = QAT_AES_HW_CONFIG_CBC_ENC(alg); | ||
| 484 | } | ||
| 458 | 485 | ||
| 459 | switch (keys.enckeylen) { | 486 | static void qat_alg_ablkcipher_init_dec(struct qat_alg_ablkcipher_ctx *ctx, |
| 487 | int alg, const uint8_t *key, | ||
| 488 | unsigned int keylen) | ||
| 489 | { | ||
| 490 | struct icp_qat_hw_cipher_algo_blk *dec_cd = ctx->dec_cd; | ||
| 491 | struct icp_qat_fw_la_bulk_req *req = &ctx->dec_fw_req; | ||
| 492 | struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars; | ||
| 493 | |||
| 494 | qat_alg_ablkcipher_init_com(ctx, req, dec_cd, key, keylen); | ||
| 495 | cd_pars->u.s.content_desc_addr = ctx->dec_cd_paddr; | ||
| 496 | dec_cd->aes.cipher_config.val = QAT_AES_HW_CONFIG_CBC_DEC(alg); | ||
| 497 | } | ||
| 498 | |||
| 499 | static int qat_alg_validate_key(int key_len, int *alg) | ||
| 500 | { | ||
| 501 | switch (key_len) { | ||
| 460 | case AES_KEYSIZE_128: | 502 | case AES_KEYSIZE_128: |
| 461 | alg = ICP_QAT_HW_CIPHER_ALGO_AES128; | 503 | *alg = ICP_QAT_HW_CIPHER_ALGO_AES128; |
| 462 | break; | 504 | break; |
| 463 | case AES_KEYSIZE_192: | 505 | case AES_KEYSIZE_192: |
| 464 | alg = ICP_QAT_HW_CIPHER_ALGO_AES192; | 506 | *alg = ICP_QAT_HW_CIPHER_ALGO_AES192; |
| 465 | break; | 507 | break; |
| 466 | case AES_KEYSIZE_256: | 508 | case AES_KEYSIZE_256: |
| 467 | alg = ICP_QAT_HW_CIPHER_ALGO_AES256; | 509 | *alg = ICP_QAT_HW_CIPHER_ALGO_AES256; |
| 468 | break; | 510 | break; |
| 469 | default: | 511 | default: |
| 470 | goto bad_key; | 512 | return -EINVAL; |
| 471 | } | 513 | } |
| 514 | return 0; | ||
| 515 | } | ||
| 472 | 516 | ||
| 473 | if (qat_alg_init_enc_session(ctx, alg, &keys)) | 517 | static int qat_alg_aead_init_sessions(struct qat_alg_aead_ctx *ctx, |
| 518 | const uint8_t *key, unsigned int keylen) | ||
| 519 | { | ||
| 520 | struct crypto_authenc_keys keys; | ||
| 521 | int alg; | ||
| 522 | |||
| 523 | if (crypto_rng_get_bytes(crypto_default_rng, ctx->salt, AES_BLOCK_SIZE)) | ||
| 524 | return -EFAULT; | ||
| 525 | |||
| 526 | if (crypto_authenc_extractkeys(&keys, key, keylen)) | ||
| 527 | goto bad_key; | ||
| 528 | |||
| 529 | if (qat_alg_validate_key(keys.enckeylen, &alg)) | ||
| 530 | goto bad_key; | ||
| 531 | |||
| 532 | if (qat_alg_aead_init_enc_session(ctx, alg, &keys)) | ||
| 474 | goto error; | 533 | goto error; |
| 475 | 534 | ||
| 476 | if (qat_alg_init_dec_session(ctx, alg, &keys)) | 535 | if (qat_alg_aead_init_dec_session(ctx, alg, &keys)) |
| 477 | goto error; | 536 | goto error; |
| 478 | 537 | ||
| 479 | return 0; | 538 | return 0; |
| @@ -484,22 +543,37 @@ error: | |||
| 484 | return -EFAULT; | 543 | return -EFAULT; |
| 485 | } | 544 | } |
| 486 | 545 | ||
| 487 | static int qat_alg_setkey(struct crypto_aead *tfm, const uint8_t *key, | 546 | static int qat_alg_ablkcipher_init_sessions(struct qat_alg_ablkcipher_ctx *ctx, |
| 488 | unsigned int keylen) | 547 | const uint8_t *key, |
| 548 | unsigned int keylen) | ||
| 549 | { | ||
| 550 | int alg; | ||
| 551 | |||
| 552 | if (qat_alg_validate_key(keylen, &alg)) | ||
| 553 | goto bad_key; | ||
| 554 | |||
| 555 | qat_alg_ablkcipher_init_enc(ctx, alg, key, keylen); | ||
| 556 | qat_alg_ablkcipher_init_dec(ctx, alg, key, keylen); | ||
| 557 | return 0; | ||
| 558 | bad_key: | ||
| 559 | crypto_tfm_set_flags(ctx->tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); | ||
| 560 | return -EINVAL; | ||
| 561 | } | ||
| 562 | |||
| 563 | static int qat_alg_aead_setkey(struct crypto_aead *tfm, const uint8_t *key, | ||
| 564 | unsigned int keylen) | ||
| 489 | { | 565 | { |
| 490 | struct qat_alg_session_ctx *ctx = crypto_aead_ctx(tfm); | 566 | struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm); |
| 491 | struct device *dev; | 567 | struct device *dev; |
| 492 | 568 | ||
| 493 | spin_lock(&ctx->lock); | 569 | spin_lock(&ctx->lock); |
| 494 | if (ctx->enc_cd) { | 570 | if (ctx->enc_cd) { |
| 495 | /* rekeying */ | 571 | /* rekeying */ |
| 496 | dev = &GET_DEV(ctx->inst->accel_dev); | 572 | dev = &GET_DEV(ctx->inst->accel_dev); |
| 497 | memzero_explicit(ctx->enc_cd, sizeof(struct qat_alg_cd)); | 573 | memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd)); |
| 498 | memzero_explicit(ctx->dec_cd, sizeof(struct qat_alg_cd)); | 574 | memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd)); |
| 499 | memzero_explicit(&ctx->enc_fw_req_tmpl, | 575 | memset(&ctx->enc_fw_req, 0, sizeof(ctx->enc_fw_req)); |
| 500 | sizeof(struct icp_qat_fw_la_bulk_req)); | 576 | memset(&ctx->dec_fw_req, 0, sizeof(ctx->dec_fw_req)); |
| 501 | memzero_explicit(&ctx->dec_fw_req_tmpl, | ||
| 502 | sizeof(struct icp_qat_fw_la_bulk_req)); | ||
| 503 | } else { | 577 | } else { |
| 504 | /* new key */ | 578 | /* new key */ |
| 505 | int node = get_current_node(); | 579 | int node = get_current_node(); |
| @@ -512,16 +586,14 @@ static int qat_alg_setkey(struct crypto_aead *tfm, const uint8_t *key, | |||
| 512 | 586 | ||
| 513 | dev = &GET_DEV(inst->accel_dev); | 587 | dev = &GET_DEV(inst->accel_dev); |
| 514 | ctx->inst = inst; | 588 | ctx->inst = inst; |
| 515 | ctx->enc_cd = dma_zalloc_coherent(dev, | 589 | ctx->enc_cd = dma_zalloc_coherent(dev, sizeof(*ctx->enc_cd), |
| 516 | sizeof(struct qat_alg_cd), | ||
| 517 | &ctx->enc_cd_paddr, | 590 | &ctx->enc_cd_paddr, |
| 518 | GFP_ATOMIC); | 591 | GFP_ATOMIC); |
| 519 | if (!ctx->enc_cd) { | 592 | if (!ctx->enc_cd) { |
| 520 | spin_unlock(&ctx->lock); | 593 | spin_unlock(&ctx->lock); |
| 521 | return -ENOMEM; | 594 | return -ENOMEM; |
| 522 | } | 595 | } |
| 523 | ctx->dec_cd = dma_zalloc_coherent(dev, | 596 | ctx->dec_cd = dma_zalloc_coherent(dev, sizeof(*ctx->dec_cd), |
| 524 | sizeof(struct qat_alg_cd), | ||
| 525 | &ctx->dec_cd_paddr, | 597 | &ctx->dec_cd_paddr, |
| 526 | GFP_ATOMIC); | 598 | GFP_ATOMIC); |
| 527 | if (!ctx->dec_cd) { | 599 | if (!ctx->dec_cd) { |
| @@ -530,18 +602,18 @@ static int qat_alg_setkey(struct crypto_aead *tfm, const uint8_t *key, | |||
| 530 | } | 602 | } |
| 531 | } | 603 | } |
| 532 | spin_unlock(&ctx->lock); | 604 | spin_unlock(&ctx->lock); |
| 533 | if (qat_alg_init_sessions(ctx, key, keylen)) | 605 | if (qat_alg_aead_init_sessions(ctx, key, keylen)) |
| 534 | goto out_free_all; | 606 | goto out_free_all; |
| 535 | 607 | ||
| 536 | return 0; | 608 | return 0; |
| 537 | 609 | ||
| 538 | out_free_all: | 610 | out_free_all: |
| 539 | memzero_explicit(ctx->dec_cd, sizeof(struct qat_alg_cd)); | 611 | memset(ctx->dec_cd, 0, sizeof(struct qat_alg_cd)); |
| 540 | dma_free_coherent(dev, sizeof(struct qat_alg_cd), | 612 | dma_free_coherent(dev, sizeof(struct qat_alg_cd), |
| 541 | ctx->dec_cd, ctx->dec_cd_paddr); | 613 | ctx->dec_cd, ctx->dec_cd_paddr); |
| 542 | ctx->dec_cd = NULL; | 614 | ctx->dec_cd = NULL; |
| 543 | out_free_enc: | 615 | out_free_enc: |
| 544 | memzero_explicit(ctx->enc_cd, sizeof(struct qat_alg_cd)); | 616 | memset(ctx->enc_cd, 0, sizeof(struct qat_alg_cd)); |
| 545 | dma_free_coherent(dev, sizeof(struct qat_alg_cd), | 617 | dma_free_coherent(dev, sizeof(struct qat_alg_cd), |
| 546 | ctx->enc_cd, ctx->enc_cd_paddr); | 618 | ctx->enc_cd, ctx->enc_cd_paddr); |
| 547 | ctx->enc_cd = NULL; | 619 | ctx->enc_cd = NULL; |
| @@ -557,7 +629,8 @@ static void qat_alg_free_bufl(struct qat_crypto_instance *inst, | |||
| 557 | dma_addr_t blp = qat_req->buf.blp; | 629 | dma_addr_t blp = qat_req->buf.blp; |
| 558 | dma_addr_t blpout = qat_req->buf.bloutp; | 630 | dma_addr_t blpout = qat_req->buf.bloutp; |
| 559 | size_t sz = qat_req->buf.sz; | 631 | size_t sz = qat_req->buf.sz; |
| 560 | int i, bufs = bl->num_bufs; | 632 | size_t sz_out = qat_req->buf.sz_out; |
| 633 | int i; | ||
| 561 | 634 | ||
| 562 | for (i = 0; i < bl->num_bufs; i++) | 635 | for (i = 0; i < bl->num_bufs; i++) |
| 563 | dma_unmap_single(dev, bl->bufers[i].addr, | 636 | dma_unmap_single(dev, bl->bufers[i].addr, |
| @@ -567,14 +640,14 @@ static void qat_alg_free_bufl(struct qat_crypto_instance *inst, | |||
| 567 | kfree(bl); | 640 | kfree(bl); |
| 568 | if (blp != blpout) { | 641 | if (blp != blpout) { |
| 569 | /* If out of place operation dma unmap only data */ | 642 | /* If out of place operation dma unmap only data */ |
| 570 | int bufless = bufs - blout->num_mapped_bufs; | 643 | int bufless = blout->num_bufs - blout->num_mapped_bufs; |
| 571 | 644 | ||
| 572 | for (i = bufless; i < bufs; i++) { | 645 | for (i = bufless; i < blout->num_bufs; i++) { |
| 573 | dma_unmap_single(dev, blout->bufers[i].addr, | 646 | dma_unmap_single(dev, blout->bufers[i].addr, |
| 574 | blout->bufers[i].len, | 647 | blout->bufers[i].len, |
| 575 | DMA_BIDIRECTIONAL); | 648 | DMA_BIDIRECTIONAL); |
| 576 | } | 649 | } |
| 577 | dma_unmap_single(dev, blpout, sz, DMA_TO_DEVICE); | 650 | dma_unmap_single(dev, blpout, sz_out, DMA_TO_DEVICE); |
| 578 | kfree(blout); | 651 | kfree(blout); |
| 579 | } | 652 | } |
| 580 | } | 653 | } |
| @@ -587,19 +660,20 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst, | |||
| 587 | struct qat_crypto_request *qat_req) | 660 | struct qat_crypto_request *qat_req) |
| 588 | { | 661 | { |
| 589 | struct device *dev = &GET_DEV(inst->accel_dev); | 662 | struct device *dev = &GET_DEV(inst->accel_dev); |
| 590 | int i, bufs = 0, n = sg_nents(sgl), assoc_n = sg_nents(assoc); | 663 | int i, bufs = 0, sg_nctr = 0; |
| 664 | int n = sg_nents(sgl), assoc_n = sg_nents(assoc); | ||
| 591 | struct qat_alg_buf_list *bufl; | 665 | struct qat_alg_buf_list *bufl; |
| 592 | struct qat_alg_buf_list *buflout = NULL; | 666 | struct qat_alg_buf_list *buflout = NULL; |
| 593 | dma_addr_t blp; | 667 | dma_addr_t blp; |
| 594 | dma_addr_t bloutp = 0; | 668 | dma_addr_t bloutp = 0; |
| 595 | struct scatterlist *sg; | 669 | struct scatterlist *sg; |
| 596 | size_t sz = sizeof(struct qat_alg_buf_list) + | 670 | size_t sz_out, sz = sizeof(struct qat_alg_buf_list) + |
| 597 | ((1 + n + assoc_n) * sizeof(struct qat_alg_buf)); | 671 | ((1 + n + assoc_n) * sizeof(struct qat_alg_buf)); |
| 598 | 672 | ||
| 599 | if (unlikely(!n)) | 673 | if (unlikely(!n)) |
| 600 | return -EINVAL; | 674 | return -EINVAL; |
| 601 | 675 | ||
| 602 | bufl = kmalloc_node(sz, GFP_ATOMIC, | 676 | bufl = kzalloc_node(sz, GFP_ATOMIC, |
| 603 | dev_to_node(&GET_DEV(inst->accel_dev))); | 677 | dev_to_node(&GET_DEV(inst->accel_dev))); |
| 604 | if (unlikely(!bufl)) | 678 | if (unlikely(!bufl)) |
| 605 | return -ENOMEM; | 679 | return -ENOMEM; |
| @@ -620,15 +694,20 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst, | |||
| 620 | goto err; | 694 | goto err; |
| 621 | bufs++; | 695 | bufs++; |
| 622 | } | 696 | } |
| 623 | bufl->bufers[bufs].addr = dma_map_single(dev, iv, ivlen, | 697 | if (ivlen) { |
| 624 | DMA_BIDIRECTIONAL); | 698 | bufl->bufers[bufs].addr = dma_map_single(dev, iv, ivlen, |
| 625 | bufl->bufers[bufs].len = ivlen; | 699 | DMA_BIDIRECTIONAL); |
| 626 | if (unlikely(dma_mapping_error(dev, bufl->bufers[bufs].addr))) | 700 | bufl->bufers[bufs].len = ivlen; |
| 627 | goto err; | 701 | if (unlikely(dma_mapping_error(dev, bufl->bufers[bufs].addr))) |
| 628 | bufs++; | 702 | goto err; |
| 703 | bufs++; | ||
| 704 | } | ||
| 629 | 705 | ||
| 630 | for_each_sg(sgl, sg, n, i) { | 706 | for_each_sg(sgl, sg, n, i) { |
| 631 | int y = i + bufs; | 707 | int y = sg_nctr + bufs; |
| 708 | |||
| 709 | if (!sg->length) | ||
| 710 | continue; | ||
| 632 | 711 | ||
| 633 | bufl->bufers[y].addr = dma_map_single(dev, sg_virt(sg), | 712 | bufl->bufers[y].addr = dma_map_single(dev, sg_virt(sg), |
| 634 | sg->length, | 713 | sg->length, |
| @@ -636,8 +715,9 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst, | |||
| 636 | bufl->bufers[y].len = sg->length; | 715 | bufl->bufers[y].len = sg->length; |
| 637 | if (unlikely(dma_mapping_error(dev, bufl->bufers[y].addr))) | 716 | if (unlikely(dma_mapping_error(dev, bufl->bufers[y].addr))) |
| 638 | goto err; | 717 | goto err; |
| 718 | sg_nctr++; | ||
| 639 | } | 719 | } |
| 640 | bufl->num_bufs = n + bufs; | 720 | bufl->num_bufs = sg_nctr + bufs; |
| 641 | qat_req->buf.bl = bufl; | 721 | qat_req->buf.bl = bufl; |
| 642 | qat_req->buf.blp = blp; | 722 | qat_req->buf.blp = blp; |
| 643 | qat_req->buf.sz = sz; | 723 | qat_req->buf.sz = sz; |
| @@ -645,11 +725,15 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst, | |||
| 645 | if (sgl != sglout) { | 725 | if (sgl != sglout) { |
| 646 | struct qat_alg_buf *bufers; | 726 | struct qat_alg_buf *bufers; |
| 647 | 727 | ||
| 648 | buflout = kmalloc_node(sz, GFP_ATOMIC, | 728 | n = sg_nents(sglout); |
| 729 | sz_out = sizeof(struct qat_alg_buf_list) + | ||
| 730 | ((1 + n + assoc_n) * sizeof(struct qat_alg_buf)); | ||
| 731 | sg_nctr = 0; | ||
| 732 | buflout = kzalloc_node(sz_out, GFP_ATOMIC, | ||
| 649 | dev_to_node(&GET_DEV(inst->accel_dev))); | 733 | dev_to_node(&GET_DEV(inst->accel_dev))); |
| 650 | if (unlikely(!buflout)) | 734 | if (unlikely(!buflout)) |
| 651 | goto err; | 735 | goto err; |
| 652 | bloutp = dma_map_single(dev, buflout, sz, DMA_TO_DEVICE); | 736 | bloutp = dma_map_single(dev, buflout, sz_out, DMA_TO_DEVICE); |
| 653 | if (unlikely(dma_mapping_error(dev, bloutp))) | 737 | if (unlikely(dma_mapping_error(dev, bloutp))) |
| 654 | goto err; | 738 | goto err; |
| 655 | bufers = buflout->bufers; | 739 | bufers = buflout->bufers; |
| @@ -660,60 +744,62 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst, | |||
| 660 | bufers[i].addr = bufl->bufers[i].addr; | 744 | bufers[i].addr = bufl->bufers[i].addr; |
| 661 | } | 745 | } |
| 662 | for_each_sg(sglout, sg, n, i) { | 746 | for_each_sg(sglout, sg, n, i) { |
| 663 | int y = i + bufs; | 747 | int y = sg_nctr + bufs; |
| 748 | |||
| 749 | if (!sg->length) | ||
| 750 | continue; | ||
| 664 | 751 | ||
| 665 | bufers[y].addr = dma_map_single(dev, sg_virt(sg), | 752 | bufers[y].addr = dma_map_single(dev, sg_virt(sg), |
| 666 | sg->length, | 753 | sg->length, |
| 667 | DMA_BIDIRECTIONAL); | 754 | DMA_BIDIRECTIONAL); |
| 668 | buflout->bufers[y].len = sg->length; | ||
| 669 | if (unlikely(dma_mapping_error(dev, bufers[y].addr))) | 755 | if (unlikely(dma_mapping_error(dev, bufers[y].addr))) |
| 670 | goto err; | 756 | goto err; |
| 757 | bufers[y].len = sg->length; | ||
| 758 | sg_nctr++; | ||
| 671 | } | 759 | } |
| 672 | buflout->num_bufs = n + bufs; | 760 | buflout->num_bufs = sg_nctr + bufs; |
| 673 | buflout->num_mapped_bufs = n; | 761 | buflout->num_mapped_bufs = sg_nctr; |
| 674 | qat_req->buf.blout = buflout; | 762 | qat_req->buf.blout = buflout; |
| 675 | qat_req->buf.bloutp = bloutp; | 763 | qat_req->buf.bloutp = bloutp; |
| 764 | qat_req->buf.sz_out = sz_out; | ||
| 676 | } else { | 765 | } else { |
| 677 | /* Otherwise set the src and dst to the same address */ | 766 | /* Otherwise set the src and dst to the same address */ |
| 678 | qat_req->buf.bloutp = qat_req->buf.blp; | 767 | qat_req->buf.bloutp = qat_req->buf.blp; |
| 768 | qat_req->buf.sz_out = 0; | ||
| 679 | } | 769 | } |
| 680 | return 0; | 770 | return 0; |
| 681 | err: | 771 | err: |
| 682 | dev_err(dev, "Failed to map buf for dma\n"); | 772 | dev_err(dev, "Failed to map buf for dma\n"); |
| 683 | for_each_sg(sgl, sg, n + bufs, i) { | 773 | sg_nctr = 0; |
| 684 | if (!dma_mapping_error(dev, bufl->bufers[i].addr)) { | 774 | for (i = 0; i < n + bufs; i++) |
| 775 | if (!dma_mapping_error(dev, bufl->bufers[i].addr)) | ||
| 685 | dma_unmap_single(dev, bufl->bufers[i].addr, | 776 | dma_unmap_single(dev, bufl->bufers[i].addr, |
| 686 | bufl->bufers[i].len, | 777 | bufl->bufers[i].len, |
| 687 | DMA_BIDIRECTIONAL); | 778 | DMA_BIDIRECTIONAL); |
| 688 | } | 779 | |
| 689 | } | ||
| 690 | if (!dma_mapping_error(dev, blp)) | 780 | if (!dma_mapping_error(dev, blp)) |
| 691 | dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE); | 781 | dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE); |
| 692 | kfree(bufl); | 782 | kfree(bufl); |
| 693 | if (sgl != sglout && buflout) { | 783 | if (sgl != sglout && buflout) { |
| 694 | for_each_sg(sglout, sg, n, i) { | 784 | n = sg_nents(sglout); |
| 695 | int y = i + bufs; | 785 | for (i = bufs; i < n + bufs; i++) |
| 696 | 786 | if (!dma_mapping_error(dev, buflout->bufers[i].addr)) | |
| 697 | if (!dma_mapping_error(dev, buflout->bufers[y].addr)) | 787 | dma_unmap_single(dev, buflout->bufers[i].addr, |
| 698 | dma_unmap_single(dev, buflout->bufers[y].addr, | 788 | buflout->bufers[i].len, |
| 699 | buflout->bufers[y].len, | ||
| 700 | DMA_BIDIRECTIONAL); | 789 | DMA_BIDIRECTIONAL); |
| 701 | } | ||
| 702 | if (!dma_mapping_error(dev, bloutp)) | 790 | if (!dma_mapping_error(dev, bloutp)) |
| 703 | dma_unmap_single(dev, bloutp, sz, DMA_TO_DEVICE); | 791 | dma_unmap_single(dev, bloutp, sz_out, DMA_TO_DEVICE); |
| 704 | kfree(buflout); | 792 | kfree(buflout); |
| 705 | } | 793 | } |
| 706 | return -ENOMEM; | 794 | return -ENOMEM; |
| 707 | } | 795 | } |
| 708 | 796 | ||
| 709 | void qat_alg_callback(void *resp) | 797 | static void qat_aead_alg_callback(struct icp_qat_fw_la_resp *qat_resp, |
| 798 | struct qat_crypto_request *qat_req) | ||
| 710 | { | 799 | { |
| 711 | struct icp_qat_fw_la_resp *qat_resp = resp; | 800 | struct qat_alg_aead_ctx *ctx = qat_req->aead_ctx; |
| 712 | struct qat_crypto_request *qat_req = | ||
| 713 | (void *)(__force long)qat_resp->opaque_data; | ||
| 714 | struct qat_alg_session_ctx *ctx = qat_req->ctx; | ||
| 715 | struct qat_crypto_instance *inst = ctx->inst; | 801 | struct qat_crypto_instance *inst = ctx->inst; |
| 716 | struct aead_request *areq = qat_req->areq; | 802 | struct aead_request *areq = qat_req->aead_req; |
| 717 | uint8_t stat_filed = qat_resp->comn_resp.comn_status; | 803 | uint8_t stat_filed = qat_resp->comn_resp.comn_status; |
| 718 | int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed); | 804 | int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed); |
| 719 | 805 | ||
| @@ -723,11 +809,35 @@ void qat_alg_callback(void *resp) | |||
| 723 | areq->base.complete(&areq->base, res); | 809 | areq->base.complete(&areq->base, res); |
| 724 | } | 810 | } |
| 725 | 811 | ||
| 726 | static int qat_alg_dec(struct aead_request *areq) | 812 | static void qat_ablkcipher_alg_callback(struct icp_qat_fw_la_resp *qat_resp, |
| 813 | struct qat_crypto_request *qat_req) | ||
| 814 | { | ||
| 815 | struct qat_alg_ablkcipher_ctx *ctx = qat_req->ablkcipher_ctx; | ||
| 816 | struct qat_crypto_instance *inst = ctx->inst; | ||
| 817 | struct ablkcipher_request *areq = qat_req->ablkcipher_req; | ||
| 818 | uint8_t stat_filed = qat_resp->comn_resp.comn_status; | ||
| 819 | int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed); | ||
| 820 | |||
| 821 | qat_alg_free_bufl(inst, qat_req); | ||
| 822 | if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK)) | ||
| 823 | res = -EINVAL; | ||
| 824 | areq->base.complete(&areq->base, res); | ||
| 825 | } | ||
| 826 | |||
| 827 | void qat_alg_callback(void *resp) | ||
| 828 | { | ||
| 829 | struct icp_qat_fw_la_resp *qat_resp = resp; | ||
| 830 | struct qat_crypto_request *qat_req = | ||
| 831 | (void *)(__force long)qat_resp->opaque_data; | ||
| 832 | |||
| 833 | qat_req->cb(qat_resp, qat_req); | ||
| 834 | } | ||
| 835 | |||
| 836 | static int qat_alg_aead_dec(struct aead_request *areq) | ||
| 727 | { | 837 | { |
| 728 | struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq); | 838 | struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq); |
| 729 | struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm); | 839 | struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm); |
| 730 | struct qat_alg_session_ctx *ctx = crypto_tfm_ctx(tfm); | 840 | struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm); |
| 731 | struct qat_crypto_request *qat_req = aead_request_ctx(areq); | 841 | struct qat_crypto_request *qat_req = aead_request_ctx(areq); |
| 732 | struct icp_qat_fw_la_cipher_req_params *cipher_param; | 842 | struct icp_qat_fw_la_cipher_req_params *cipher_param; |
| 733 | struct icp_qat_fw_la_auth_req_params *auth_param; | 843 | struct icp_qat_fw_la_auth_req_params *auth_param; |
| @@ -741,9 +851,10 @@ static int qat_alg_dec(struct aead_request *areq) | |||
| 741 | return ret; | 851 | return ret; |
| 742 | 852 | ||
| 743 | msg = &qat_req->req; | 853 | msg = &qat_req->req; |
| 744 | *msg = ctx->dec_fw_req_tmpl; | 854 | *msg = ctx->dec_fw_req; |
| 745 | qat_req->ctx = ctx; | 855 | qat_req->aead_ctx = ctx; |
| 746 | qat_req->areq = areq; | 856 | qat_req->aead_req = areq; |
| 857 | qat_req->cb = qat_aead_alg_callback; | ||
| 747 | qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req; | 858 | qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req; |
| 748 | qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp; | 859 | qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp; |
| 749 | qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp; | 860 | qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp; |
| @@ -766,12 +877,12 @@ static int qat_alg_dec(struct aead_request *areq) | |||
| 766 | return -EINPROGRESS; | 877 | return -EINPROGRESS; |
| 767 | } | 878 | } |
| 768 | 879 | ||
| 769 | static int qat_alg_enc_internal(struct aead_request *areq, uint8_t *iv, | 880 | static int qat_alg_aead_enc_internal(struct aead_request *areq, uint8_t *iv, |
| 770 | int enc_iv) | 881 | int enc_iv) |
| 771 | { | 882 | { |
| 772 | struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq); | 883 | struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq); |
| 773 | struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm); | 884 | struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm); |
| 774 | struct qat_alg_session_ctx *ctx = crypto_tfm_ctx(tfm); | 885 | struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm); |
| 775 | struct qat_crypto_request *qat_req = aead_request_ctx(areq); | 886 | struct qat_crypto_request *qat_req = aead_request_ctx(areq); |
| 776 | struct icp_qat_fw_la_cipher_req_params *cipher_param; | 887 | struct icp_qat_fw_la_cipher_req_params *cipher_param; |
| 777 | struct icp_qat_fw_la_auth_req_params *auth_param; | 888 | struct icp_qat_fw_la_auth_req_params *auth_param; |
| @@ -784,9 +895,10 @@ static int qat_alg_enc_internal(struct aead_request *areq, uint8_t *iv, | |||
| 784 | return ret; | 895 | return ret; |
| 785 | 896 | ||
| 786 | msg = &qat_req->req; | 897 | msg = &qat_req->req; |
| 787 | *msg = ctx->enc_fw_req_tmpl; | 898 | *msg = ctx->enc_fw_req; |
| 788 | qat_req->ctx = ctx; | 899 | qat_req->aead_ctx = ctx; |
| 789 | qat_req->areq = areq; | 900 | qat_req->aead_req = areq; |
| 901 | qat_req->cb = qat_aead_alg_callback; | ||
| 790 | qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req; | 902 | qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req; |
| 791 | qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp; | 903 | qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp; |
| 792 | qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp; | 904 | qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp; |
| @@ -815,31 +927,168 @@ static int qat_alg_enc_internal(struct aead_request *areq, uint8_t *iv, | |||
| 815 | return -EINPROGRESS; | 927 | return -EINPROGRESS; |
| 816 | } | 928 | } |
| 817 | 929 | ||
| 818 | static int qat_alg_enc(struct aead_request *areq) | 930 | static int qat_alg_aead_enc(struct aead_request *areq) |
| 819 | { | 931 | { |
| 820 | return qat_alg_enc_internal(areq, areq->iv, 0); | 932 | return qat_alg_aead_enc_internal(areq, areq->iv, 0); |
| 821 | } | 933 | } |
| 822 | 934 | ||
| 823 | static int qat_alg_genivenc(struct aead_givcrypt_request *req) | 935 | static int qat_alg_aead_genivenc(struct aead_givcrypt_request *req) |
| 824 | { | 936 | { |
| 825 | struct crypto_aead *aead_tfm = crypto_aead_reqtfm(&req->areq); | 937 | struct crypto_aead *aead_tfm = crypto_aead_reqtfm(&req->areq); |
| 826 | struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm); | 938 | struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm); |
| 827 | struct qat_alg_session_ctx *ctx = crypto_tfm_ctx(tfm); | 939 | struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm); |
| 828 | __be64 seq; | 940 | __be64 seq; |
| 829 | 941 | ||
| 830 | memcpy(req->giv, ctx->salt, AES_BLOCK_SIZE); | 942 | memcpy(req->giv, ctx->salt, AES_BLOCK_SIZE); |
| 831 | seq = cpu_to_be64(req->seq); | 943 | seq = cpu_to_be64(req->seq); |
| 832 | memcpy(req->giv + AES_BLOCK_SIZE - sizeof(uint64_t), | 944 | memcpy(req->giv + AES_BLOCK_SIZE - sizeof(uint64_t), |
| 833 | &seq, sizeof(uint64_t)); | 945 | &seq, sizeof(uint64_t)); |
| 834 | return qat_alg_enc_internal(&req->areq, req->giv, 1); | 946 | return qat_alg_aead_enc_internal(&req->areq, req->giv, 1); |
| 835 | } | 947 | } |
| 836 | 948 | ||
| 837 | static int qat_alg_init(struct crypto_tfm *tfm, | 949 | static int qat_alg_ablkcipher_setkey(struct crypto_ablkcipher *tfm, |
| 838 | enum icp_qat_hw_auth_algo hash, const char *hash_name) | 950 | const uint8_t *key, |
| 951 | unsigned int keylen) | ||
| 839 | { | 952 | { |
| 840 | struct qat_alg_session_ctx *ctx = crypto_tfm_ctx(tfm); | 953 | struct qat_alg_ablkcipher_ctx *ctx = crypto_ablkcipher_ctx(tfm); |
| 954 | struct device *dev; | ||
| 955 | |||
| 956 | spin_lock(&ctx->lock); | ||
| 957 | if (ctx->enc_cd) { | ||
| 958 | /* rekeying */ | ||
| 959 | dev = &GET_DEV(ctx->inst->accel_dev); | ||
| 960 | memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd)); | ||
| 961 | memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd)); | ||
| 962 | memset(&ctx->enc_fw_req, 0, sizeof(ctx->enc_fw_req)); | ||
| 963 | memset(&ctx->dec_fw_req, 0, sizeof(ctx->dec_fw_req)); | ||
| 964 | } else { | ||
| 965 | /* new key */ | ||
| 966 | int node = get_current_node(); | ||
| 967 | struct qat_crypto_instance *inst = | ||
| 968 | qat_crypto_get_instance_node(node); | ||
| 969 | if (!inst) { | ||
| 970 | spin_unlock(&ctx->lock); | ||
| 971 | return -EINVAL; | ||
| 972 | } | ||
| 973 | |||
| 974 | dev = &GET_DEV(inst->accel_dev); | ||
| 975 | ctx->inst = inst; | ||
| 976 | ctx->enc_cd = dma_zalloc_coherent(dev, sizeof(*ctx->enc_cd), | ||
| 977 | &ctx->enc_cd_paddr, | ||
| 978 | GFP_ATOMIC); | ||
| 979 | if (!ctx->enc_cd) { | ||
| 980 | spin_unlock(&ctx->lock); | ||
| 981 | return -ENOMEM; | ||
| 982 | } | ||
| 983 | ctx->dec_cd = dma_zalloc_coherent(dev, sizeof(*ctx->dec_cd), | ||
| 984 | &ctx->dec_cd_paddr, | ||
| 985 | GFP_ATOMIC); | ||
| 986 | if (!ctx->dec_cd) { | ||
| 987 | spin_unlock(&ctx->lock); | ||
| 988 | goto out_free_enc; | ||
| 989 | } | ||
| 990 | } | ||
| 991 | spin_unlock(&ctx->lock); | ||
| 992 | if (qat_alg_ablkcipher_init_sessions(ctx, key, keylen)) | ||
| 993 | goto out_free_all; | ||
| 994 | |||
| 995 | return 0; | ||
| 996 | |||
| 997 | out_free_all: | ||
| 998 | memset(ctx->dec_cd, 0, sizeof(*ctx->enc_cd)); | ||
| 999 | dma_free_coherent(dev, sizeof(*ctx->enc_cd), | ||
| 1000 | ctx->dec_cd, ctx->dec_cd_paddr); | ||
| 1001 | ctx->dec_cd = NULL; | ||
| 1002 | out_free_enc: | ||
| 1003 | memset(ctx->enc_cd, 0, sizeof(*ctx->dec_cd)); | ||
| 1004 | dma_free_coherent(dev, sizeof(*ctx->dec_cd), | ||
| 1005 | ctx->enc_cd, ctx->enc_cd_paddr); | ||
| 1006 | ctx->enc_cd = NULL; | ||
| 1007 | return -ENOMEM; | ||
| 1008 | } | ||
| 1009 | |||
| 1010 | static int qat_alg_ablkcipher_encrypt(struct ablkcipher_request *req) | ||
| 1011 | { | ||
| 1012 | struct crypto_ablkcipher *atfm = crypto_ablkcipher_reqtfm(req); | ||
| 1013 | struct crypto_tfm *tfm = crypto_ablkcipher_tfm(atfm); | ||
| 1014 | struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm); | ||
| 1015 | struct qat_crypto_request *qat_req = ablkcipher_request_ctx(req); | ||
| 1016 | struct icp_qat_fw_la_cipher_req_params *cipher_param; | ||
| 1017 | struct icp_qat_fw_la_bulk_req *msg; | ||
| 1018 | int ret, ctr = 0; | ||
| 1019 | |||
| 1020 | ret = qat_alg_sgl_to_bufl(ctx->inst, NULL, req->src, req->dst, | ||
| 1021 | NULL, 0, qat_req); | ||
| 1022 | if (unlikely(ret)) | ||
| 1023 | return ret; | ||
| 1024 | |||
| 1025 | msg = &qat_req->req; | ||
| 1026 | *msg = ctx->enc_fw_req; | ||
| 1027 | qat_req->ablkcipher_ctx = ctx; | ||
| 1028 | qat_req->ablkcipher_req = req; | ||
| 1029 | qat_req->cb = qat_ablkcipher_alg_callback; | ||
| 1030 | qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req; | ||
| 1031 | qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp; | ||
| 1032 | qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp; | ||
| 1033 | cipher_param = (void *)&qat_req->req.serv_specif_rqpars; | ||
| 1034 | cipher_param->cipher_length = req->nbytes; | ||
| 1035 | cipher_param->cipher_offset = 0; | ||
| 1036 | memcpy(cipher_param->u.cipher_IV_array, req->info, AES_BLOCK_SIZE); | ||
| 1037 | do { | ||
| 1038 | ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg); | ||
| 1039 | } while (ret == -EAGAIN && ctr++ < 10); | ||
| 1040 | |||
| 1041 | if (ret == -EAGAIN) { | ||
| 1042 | qat_alg_free_bufl(ctx->inst, qat_req); | ||
| 1043 | return -EBUSY; | ||
| 1044 | } | ||
| 1045 | return -EINPROGRESS; | ||
| 1046 | } | ||
| 1047 | |||
| 1048 | static int qat_alg_ablkcipher_decrypt(struct ablkcipher_request *req) | ||
| 1049 | { | ||
| 1050 | struct crypto_ablkcipher *atfm = crypto_ablkcipher_reqtfm(req); | ||
| 1051 | struct crypto_tfm *tfm = crypto_ablkcipher_tfm(atfm); | ||
| 1052 | struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm); | ||
| 1053 | struct qat_crypto_request *qat_req = ablkcipher_request_ctx(req); | ||
| 1054 | struct icp_qat_fw_la_cipher_req_params *cipher_param; | ||
| 1055 | struct icp_qat_fw_la_bulk_req *msg; | ||
| 1056 | int ret, ctr = 0; | ||
| 1057 | |||
| 1058 | ret = qat_alg_sgl_to_bufl(ctx->inst, NULL, req->src, req->dst, | ||
| 1059 | NULL, 0, qat_req); | ||
| 1060 | if (unlikely(ret)) | ||
| 1061 | return ret; | ||
| 1062 | |||
| 1063 | msg = &qat_req->req; | ||
| 1064 | *msg = ctx->dec_fw_req; | ||
| 1065 | qat_req->ablkcipher_ctx = ctx; | ||
| 1066 | qat_req->ablkcipher_req = req; | ||
| 1067 | qat_req->cb = qat_ablkcipher_alg_callback; | ||
| 1068 | qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req; | ||
| 1069 | qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp; | ||
| 1070 | qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp; | ||
| 1071 | cipher_param = (void *)&qat_req->req.serv_specif_rqpars; | ||
| 1072 | cipher_param->cipher_length = req->nbytes; | ||
| 1073 | cipher_param->cipher_offset = 0; | ||
| 1074 | memcpy(cipher_param->u.cipher_IV_array, req->info, AES_BLOCK_SIZE); | ||
| 1075 | do { | ||
| 1076 | ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg); | ||
| 1077 | } while (ret == -EAGAIN && ctr++ < 10); | ||
| 1078 | |||
| 1079 | if (ret == -EAGAIN) { | ||
| 1080 | qat_alg_free_bufl(ctx->inst, qat_req); | ||
| 1081 | return -EBUSY; | ||
| 1082 | } | ||
| 1083 | return -EINPROGRESS; | ||
| 1084 | } | ||
| 1085 | |||
| 1086 | static int qat_alg_aead_init(struct crypto_tfm *tfm, | ||
| 1087 | enum icp_qat_hw_auth_algo hash, | ||
| 1088 | const char *hash_name) | ||
| 1089 | { | ||
| 1090 | struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm); | ||
| 841 | 1091 | ||
| 842 | memzero_explicit(ctx, sizeof(*ctx)); | ||
| 843 | ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0); | 1092 | ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0); |
| 844 | if (IS_ERR(ctx->hash_tfm)) | 1093 | if (IS_ERR(ctx->hash_tfm)) |
| 845 | return -EFAULT; | 1094 | return -EFAULT; |
| @@ -851,24 +1100,24 @@ static int qat_alg_init(struct crypto_tfm *tfm, | |||
| 851 | return 0; | 1100 | return 0; |
| 852 | } | 1101 | } |
| 853 | 1102 | ||
| 854 | static int qat_alg_sha1_init(struct crypto_tfm *tfm) | 1103 | static int qat_alg_aead_sha1_init(struct crypto_tfm *tfm) |
| 855 | { | 1104 | { |
| 856 | return qat_alg_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA1, "sha1"); | 1105 | return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA1, "sha1"); |
| 857 | } | 1106 | } |
| 858 | 1107 | ||
| 859 | static int qat_alg_sha256_init(struct crypto_tfm *tfm) | 1108 | static int qat_alg_aead_sha256_init(struct crypto_tfm *tfm) |
| 860 | { | 1109 | { |
| 861 | return qat_alg_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA256, "sha256"); | 1110 | return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA256, "sha256"); |
| 862 | } | 1111 | } |
| 863 | 1112 | ||
| 864 | static int qat_alg_sha512_init(struct crypto_tfm *tfm) | 1113 | static int qat_alg_aead_sha512_init(struct crypto_tfm *tfm) |
| 865 | { | 1114 | { |
| 866 | return qat_alg_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA512, "sha512"); | 1115 | return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA512, "sha512"); |
| 867 | } | 1116 | } |
| 868 | 1117 | ||
| 869 | static void qat_alg_exit(struct crypto_tfm *tfm) | 1118 | static void qat_alg_aead_exit(struct crypto_tfm *tfm) |
| 870 | { | 1119 | { |
| 871 | struct qat_alg_session_ctx *ctx = crypto_tfm_ctx(tfm); | 1120 | struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm); |
| 872 | struct qat_crypto_instance *inst = ctx->inst; | 1121 | struct qat_crypto_instance *inst = ctx->inst; |
| 873 | struct device *dev; | 1122 | struct device *dev; |
| 874 | 1123 | ||
| @@ -880,36 +1129,74 @@ static void qat_alg_exit(struct crypto_tfm *tfm) | |||
| 880 | 1129 | ||
| 881 | dev = &GET_DEV(inst->accel_dev); | 1130 | dev = &GET_DEV(inst->accel_dev); |
| 882 | if (ctx->enc_cd) { | 1131 | if (ctx->enc_cd) { |
| 883 | memzero_explicit(ctx->enc_cd, sizeof(struct qat_alg_cd)); | 1132 | memset(ctx->enc_cd, 0, sizeof(struct qat_alg_cd)); |
| 884 | dma_free_coherent(dev, sizeof(struct qat_alg_cd), | 1133 | dma_free_coherent(dev, sizeof(struct qat_alg_cd), |
| 885 | ctx->enc_cd, ctx->enc_cd_paddr); | 1134 | ctx->enc_cd, ctx->enc_cd_paddr); |
| 886 | } | 1135 | } |
| 887 | if (ctx->dec_cd) { | 1136 | if (ctx->dec_cd) { |
| 888 | memzero_explicit(ctx->dec_cd, sizeof(struct qat_alg_cd)); | 1137 | memset(ctx->dec_cd, 0, sizeof(struct qat_alg_cd)); |
| 889 | dma_free_coherent(dev, sizeof(struct qat_alg_cd), | 1138 | dma_free_coherent(dev, sizeof(struct qat_alg_cd), |
| 890 | ctx->dec_cd, ctx->dec_cd_paddr); | 1139 | ctx->dec_cd, ctx->dec_cd_paddr); |
| 891 | } | 1140 | } |
| 892 | qat_crypto_put_instance(inst); | 1141 | qat_crypto_put_instance(inst); |
| 893 | } | 1142 | } |
| 894 | 1143 | ||
| 1144 | static int qat_alg_ablkcipher_init(struct crypto_tfm *tfm) | ||
| 1145 | { | ||
| 1146 | struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm); | ||
| 1147 | |||
| 1148 | spin_lock_init(&ctx->lock); | ||
| 1149 | tfm->crt_ablkcipher.reqsize = sizeof(struct ablkcipher_request) + | ||
| 1150 | sizeof(struct qat_crypto_request); | ||
| 1151 | ctx->tfm = tfm; | ||
| 1152 | return 0; | ||
| 1153 | } | ||
| 1154 | |||
| 1155 | static void qat_alg_ablkcipher_exit(struct crypto_tfm *tfm) | ||
| 1156 | { | ||
| 1157 | struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm); | ||
| 1158 | struct qat_crypto_instance *inst = ctx->inst; | ||
| 1159 | struct device *dev; | ||
| 1160 | |||
| 1161 | if (!inst) | ||
| 1162 | return; | ||
| 1163 | |||
| 1164 | dev = &GET_DEV(inst->accel_dev); | ||
| 1165 | if (ctx->enc_cd) { | ||
| 1166 | memset(ctx->enc_cd, 0, | ||
| 1167 | sizeof(struct icp_qat_hw_cipher_algo_blk)); | ||
| 1168 | dma_free_coherent(dev, | ||
| 1169 | sizeof(struct icp_qat_hw_cipher_algo_blk), | ||
| 1170 | ctx->enc_cd, ctx->enc_cd_paddr); | ||
| 1171 | } | ||
| 1172 | if (ctx->dec_cd) { | ||
| 1173 | memset(ctx->dec_cd, 0, | ||
| 1174 | sizeof(struct icp_qat_hw_cipher_algo_blk)); | ||
| 1175 | dma_free_coherent(dev, | ||
| 1176 | sizeof(struct icp_qat_hw_cipher_algo_blk), | ||
| 1177 | ctx->dec_cd, ctx->dec_cd_paddr); | ||
| 1178 | } | ||
| 1179 | qat_crypto_put_instance(inst); | ||
| 1180 | } | ||
| 1181 | |||
| 895 | static struct crypto_alg qat_algs[] = { { | 1182 | static struct crypto_alg qat_algs[] = { { |
| 896 | .cra_name = "authenc(hmac(sha1),cbc(aes))", | 1183 | .cra_name = "authenc(hmac(sha1),cbc(aes))", |
| 897 | .cra_driver_name = "qat_aes_cbc_hmac_sha1", | 1184 | .cra_driver_name = "qat_aes_cbc_hmac_sha1", |
| 898 | .cra_priority = 4001, | 1185 | .cra_priority = 4001, |
| 899 | .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, | 1186 | .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, |
| 900 | .cra_blocksize = AES_BLOCK_SIZE, | 1187 | .cra_blocksize = AES_BLOCK_SIZE, |
| 901 | .cra_ctxsize = sizeof(struct qat_alg_session_ctx), | 1188 | .cra_ctxsize = sizeof(struct qat_alg_aead_ctx), |
| 902 | .cra_alignmask = 0, | 1189 | .cra_alignmask = 0, |
| 903 | .cra_type = &crypto_aead_type, | 1190 | .cra_type = &crypto_aead_type, |
| 904 | .cra_module = THIS_MODULE, | 1191 | .cra_module = THIS_MODULE, |
| 905 | .cra_init = qat_alg_sha1_init, | 1192 | .cra_init = qat_alg_aead_sha1_init, |
| 906 | .cra_exit = qat_alg_exit, | 1193 | .cra_exit = qat_alg_aead_exit, |
| 907 | .cra_u = { | 1194 | .cra_u = { |
| 908 | .aead = { | 1195 | .aead = { |
| 909 | .setkey = qat_alg_setkey, | 1196 | .setkey = qat_alg_aead_setkey, |
| 910 | .decrypt = qat_alg_dec, | 1197 | .decrypt = qat_alg_aead_dec, |
| 911 | .encrypt = qat_alg_enc, | 1198 | .encrypt = qat_alg_aead_enc, |
| 912 | .givencrypt = qat_alg_genivenc, | 1199 | .givencrypt = qat_alg_aead_genivenc, |
| 913 | .ivsize = AES_BLOCK_SIZE, | 1200 | .ivsize = AES_BLOCK_SIZE, |
| 914 | .maxauthsize = SHA1_DIGEST_SIZE, | 1201 | .maxauthsize = SHA1_DIGEST_SIZE, |
| 915 | }, | 1202 | }, |
| @@ -920,18 +1207,18 @@ static struct crypto_alg qat_algs[] = { { | |||
| 920 | .cra_priority = 4001, | 1207 | .cra_priority = 4001, |
| 921 | .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, | 1208 | .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, |
| 922 | .cra_blocksize = AES_BLOCK_SIZE, | 1209 | .cra_blocksize = AES_BLOCK_SIZE, |
| 923 | .cra_ctxsize = sizeof(struct qat_alg_session_ctx), | 1210 | .cra_ctxsize = sizeof(struct qat_alg_aead_ctx), |
| 924 | .cra_alignmask = 0, | 1211 | .cra_alignmask = 0, |
| 925 | .cra_type = &crypto_aead_type, | 1212 | .cra_type = &crypto_aead_type, |
| 926 | .cra_module = THIS_MODULE, | 1213 | .cra_module = THIS_MODULE, |
| 927 | .cra_init = qat_alg_sha256_init, | 1214 | .cra_init = qat_alg_aead_sha256_init, |
| 928 | .cra_exit = qat_alg_exit, | 1215 | .cra_exit = qat_alg_aead_exit, |
| 929 | .cra_u = { | 1216 | .cra_u = { |
| 930 | .aead = { | 1217 | .aead = { |
| 931 | .setkey = qat_alg_setkey, | 1218 | .setkey = qat_alg_aead_setkey, |
| 932 | .decrypt = qat_alg_dec, | 1219 | .decrypt = qat_alg_aead_dec, |
| 933 | .encrypt = qat_alg_enc, | 1220 | .encrypt = qat_alg_aead_enc, |
| 934 | .givencrypt = qat_alg_genivenc, | 1221 | .givencrypt = qat_alg_aead_genivenc, |
| 935 | .ivsize = AES_BLOCK_SIZE, | 1222 | .ivsize = AES_BLOCK_SIZE, |
| 936 | .maxauthsize = SHA256_DIGEST_SIZE, | 1223 | .maxauthsize = SHA256_DIGEST_SIZE, |
| 937 | }, | 1224 | }, |
| @@ -942,22 +1229,44 @@ static struct crypto_alg qat_algs[] = { { | |||
| 942 | .cra_priority = 4001, | 1229 | .cra_priority = 4001, |
| 943 | .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, | 1230 | .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, |
| 944 | .cra_blocksize = AES_BLOCK_SIZE, | 1231 | .cra_blocksize = AES_BLOCK_SIZE, |
| 945 | .cra_ctxsize = sizeof(struct qat_alg_session_ctx), | 1232 | .cra_ctxsize = sizeof(struct qat_alg_aead_ctx), |
| 946 | .cra_alignmask = 0, | 1233 | .cra_alignmask = 0, |
| 947 | .cra_type = &crypto_aead_type, | 1234 | .cra_type = &crypto_aead_type, |
| 948 | .cra_module = THIS_MODULE, | 1235 | .cra_module = THIS_MODULE, |
| 949 | .cra_init = qat_alg_sha512_init, | 1236 | .cra_init = qat_alg_aead_sha512_init, |
| 950 | .cra_exit = qat_alg_exit, | 1237 | .cra_exit = qat_alg_aead_exit, |
| 951 | .cra_u = { | 1238 | .cra_u = { |
| 952 | .aead = { | 1239 | .aead = { |
| 953 | .setkey = qat_alg_setkey, | 1240 | .setkey = qat_alg_aead_setkey, |
| 954 | .decrypt = qat_alg_dec, | 1241 | .decrypt = qat_alg_aead_dec, |
| 955 | .encrypt = qat_alg_enc, | 1242 | .encrypt = qat_alg_aead_enc, |
| 956 | .givencrypt = qat_alg_genivenc, | 1243 | .givencrypt = qat_alg_aead_genivenc, |
| 957 | .ivsize = AES_BLOCK_SIZE, | 1244 | .ivsize = AES_BLOCK_SIZE, |
| 958 | .maxauthsize = SHA512_DIGEST_SIZE, | 1245 | .maxauthsize = SHA512_DIGEST_SIZE, |
| 959 | }, | 1246 | }, |
| 960 | }, | 1247 | }, |
| 1248 | }, { | ||
| 1249 | .cra_name = "cbc(aes)", | ||
| 1250 | .cra_driver_name = "qat_aes_cbc", | ||
| 1251 | .cra_priority = 4001, | ||
| 1252 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | ||
| 1253 | .cra_blocksize = AES_BLOCK_SIZE, | ||
| 1254 | .cra_ctxsize = sizeof(struct qat_alg_ablkcipher_ctx), | ||
| 1255 | .cra_alignmask = 0, | ||
| 1256 | .cra_type = &crypto_ablkcipher_type, | ||
| 1257 | .cra_module = THIS_MODULE, | ||
| 1258 | .cra_init = qat_alg_ablkcipher_init, | ||
| 1259 | .cra_exit = qat_alg_ablkcipher_exit, | ||
| 1260 | .cra_u = { | ||
| 1261 | .ablkcipher = { | ||
| 1262 | .setkey = qat_alg_ablkcipher_setkey, | ||
| 1263 | .decrypt = qat_alg_ablkcipher_decrypt, | ||
| 1264 | .encrypt = qat_alg_ablkcipher_encrypt, | ||
| 1265 | .min_keysize = AES_MIN_KEY_SIZE, | ||
| 1266 | .max_keysize = AES_MAX_KEY_SIZE, | ||
| 1267 | .ivsize = AES_BLOCK_SIZE, | ||
| 1268 | }, | ||
| 1269 | }, | ||
| 961 | } }; | 1270 | } }; |
| 962 | 1271 | ||
| 963 | int qat_algs_register(void) | 1272 | int qat_algs_register(void) |
| @@ -966,8 +1275,11 @@ int qat_algs_register(void) | |||
| 966 | int i; | 1275 | int i; |
| 967 | 1276 | ||
| 968 | for (i = 0; i < ARRAY_SIZE(qat_algs); i++) | 1277 | for (i = 0; i < ARRAY_SIZE(qat_algs); i++) |
| 969 | qat_algs[i].cra_flags = CRYPTO_ALG_TYPE_AEAD | | 1278 | qat_algs[i].cra_flags = |
| 970 | CRYPTO_ALG_ASYNC; | 1279 | (qat_algs[i].cra_type == &crypto_aead_type) ? |
| 1280 | CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC : | ||
| 1281 | CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC; | ||
| 1282 | |||
| 971 | return crypto_register_algs(qat_algs, ARRAY_SIZE(qat_algs)); | 1283 | return crypto_register_algs(qat_algs, ARRAY_SIZE(qat_algs)); |
| 972 | } | 1284 | } |
| 973 | return 0; | 1285 | return 0; |
diff --git a/drivers/crypto/qat/qat_common/qat_crypto.h b/drivers/crypto/qat/qat_common/qat_crypto.h index ab8468d11ddb..d503007b49e6 100644 --- a/drivers/crypto/qat/qat_common/qat_crypto.h +++ b/drivers/crypto/qat/qat_common/qat_crypto.h | |||
| @@ -72,12 +72,24 @@ struct qat_crypto_request_buffs { | |||
| 72 | struct qat_alg_buf_list *blout; | 72 | struct qat_alg_buf_list *blout; |
| 73 | dma_addr_t bloutp; | 73 | dma_addr_t bloutp; |
| 74 | size_t sz; | 74 | size_t sz; |
| 75 | size_t sz_out; | ||
| 75 | }; | 76 | }; |
| 76 | 77 | ||
| 78 | struct qat_crypto_request; | ||
| 79 | |||
| 77 | struct qat_crypto_request { | 80 | struct qat_crypto_request { |
| 78 | struct icp_qat_fw_la_bulk_req req; | 81 | struct icp_qat_fw_la_bulk_req req; |
| 79 | struct qat_alg_session_ctx *ctx; | 82 | union { |
| 80 | struct aead_request *areq; | 83 | struct qat_alg_aead_ctx *aead_ctx; |
| 84 | struct qat_alg_ablkcipher_ctx *ablkcipher_ctx; | ||
| 85 | }; | ||
| 86 | union { | ||
| 87 | struct aead_request *aead_req; | ||
| 88 | struct ablkcipher_request *ablkcipher_req; | ||
| 89 | }; | ||
| 81 | struct qat_crypto_request_buffs buf; | 90 | struct qat_crypto_request_buffs buf; |
| 91 | void (*cb)(struct icp_qat_fw_la_resp *resp, | ||
| 92 | struct qat_crypto_request *req); | ||
| 82 | }; | 93 | }; |
| 94 | |||
| 83 | #endif | 95 | #endif |
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c index ef05825cc651..6a735d5c0e37 100644 --- a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c +++ b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c | |||
| @@ -46,6 +46,7 @@ | |||
| 46 | */ | 46 | */ |
| 47 | #include <adf_accel_devices.h> | 47 | #include <adf_accel_devices.h> |
| 48 | #include "adf_dh895xcc_hw_data.h" | 48 | #include "adf_dh895xcc_hw_data.h" |
| 49 | #include "adf_common_drv.h" | ||
| 49 | #include "adf_drv.h" | 50 | #include "adf_drv.h" |
| 50 | 51 | ||
| 51 | /* Worker thread to service arbiter mappings based on dev SKUs */ | 52 | /* Worker thread to service arbiter mappings based on dev SKUs */ |
| @@ -182,6 +183,19 @@ static void adf_enable_error_correction(struct adf_accel_dev *accel_dev) | |||
| 182 | } | 183 | } |
| 183 | } | 184 | } |
| 184 | 185 | ||
| 186 | static void adf_enable_ints(struct adf_accel_dev *accel_dev) | ||
| 187 | { | ||
| 188 | void __iomem *addr; | ||
| 189 | |||
| 190 | addr = (&GET_BARS(accel_dev)[ADF_DH895XCC_PMISC_BAR])->virt_addr; | ||
| 191 | |||
| 192 | /* Enable bundle and misc interrupts */ | ||
| 193 | ADF_CSR_WR(addr, ADF_DH895XCC_SMIAPF0_MASK_OFFSET, | ||
| 194 | ADF_DH895XCC_SMIA0_MASK); | ||
| 195 | ADF_CSR_WR(addr, ADF_DH895XCC_SMIAPF1_MASK_OFFSET, | ||
| 196 | ADF_DH895XCC_SMIA1_MASK); | ||
| 197 | } | ||
| 198 | |||
| 185 | void adf_init_hw_data_dh895xcc(struct adf_hw_device_data *hw_data) | 199 | void adf_init_hw_data_dh895xcc(struct adf_hw_device_data *hw_data) |
| 186 | { | 200 | { |
| 187 | hw_data->dev_class = &dh895xcc_class; | 201 | hw_data->dev_class = &dh895xcc_class; |
| @@ -206,6 +220,11 @@ void adf_init_hw_data_dh895xcc(struct adf_hw_device_data *hw_data) | |||
| 206 | hw_data->get_misc_bar_id = get_misc_bar_id; | 220 | hw_data->get_misc_bar_id = get_misc_bar_id; |
| 207 | hw_data->get_sku = get_sku; | 221 | hw_data->get_sku = get_sku; |
| 208 | hw_data->fw_name = ADF_DH895XCC_FW; | 222 | hw_data->fw_name = ADF_DH895XCC_FW; |
| 223 | hw_data->init_admin_comms = adf_init_admin_comms; | ||
| 224 | hw_data->exit_admin_comms = adf_exit_admin_comms; | ||
| 225 | hw_data->init_arb = adf_init_arb; | ||
| 226 | hw_data->exit_arb = adf_exit_arb; | ||
| 227 | hw_data->enable_ints = adf_enable_ints; | ||
| 209 | } | 228 | } |
| 210 | 229 | ||
| 211 | void adf_clean_hw_data_dh895xcc(struct adf_hw_device_data *hw_data) | 230 | void adf_clean_hw_data_dh895xcc(struct adf_hw_device_data *hw_data) |
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_drv.c b/drivers/crypto/qat/qat_dh895xcc/adf_drv.c index 948f66be262b..8ffdb95c9804 100644 --- a/drivers/crypto/qat/qat_dh895xcc/adf_drv.c +++ b/drivers/crypto/qat/qat_dh895xcc/adf_drv.c | |||
| @@ -90,9 +90,7 @@ static void adf_cleanup_accel(struct adf_accel_dev *accel_dev) | |||
| 90 | struct adf_accel_pci *accel_pci_dev = &accel_dev->accel_pci_dev; | 90 | struct adf_accel_pci *accel_pci_dev = &accel_dev->accel_pci_dev; |
| 91 | int i; | 91 | int i; |
| 92 | 92 | ||
| 93 | adf_exit_admin_comms(accel_dev); | 93 | adf_dev_shutdown(accel_dev); |
| 94 | adf_exit_arb(accel_dev); | ||
| 95 | adf_cleanup_etr_data(accel_dev); | ||
| 96 | 94 | ||
| 97 | for (i = 0; i < ADF_PCI_MAX_BARS; i++) { | 95 | for (i = 0; i < ADF_PCI_MAX_BARS; i++) { |
| 98 | struct adf_bar *bar = &accel_pci_dev->pci_bars[i]; | 96 | struct adf_bar *bar = &accel_pci_dev->pci_bars[i]; |
| @@ -119,7 +117,7 @@ static void adf_cleanup_accel(struct adf_accel_dev *accel_dev) | |||
| 119 | kfree(accel_dev); | 117 | kfree(accel_dev); |
| 120 | } | 118 | } |
| 121 | 119 | ||
| 122 | static int qat_dev_start(struct adf_accel_dev *accel_dev) | 120 | static int adf_dev_configure(struct adf_accel_dev *accel_dev) |
| 123 | { | 121 | { |
| 124 | int cpus = num_online_cpus(); | 122 | int cpus = num_online_cpus(); |
| 125 | int banks = GET_MAX_BANKS(accel_dev); | 123 | int banks = GET_MAX_BANKS(accel_dev); |
| @@ -206,7 +204,7 @@ static int qat_dev_start(struct adf_accel_dev *accel_dev) | |||
| 206 | goto err; | 204 | goto err; |
| 207 | 205 | ||
| 208 | set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status); | 206 | set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status); |
| 209 | return adf_dev_start(accel_dev); | 207 | return 0; |
| 210 | err: | 208 | err: |
| 211 | dev_err(&GET_DEV(accel_dev), "Failed to start QAT accel dev\n"); | 209 | dev_err(&GET_DEV(accel_dev), "Failed to start QAT accel dev\n"); |
| 212 | return -EINVAL; | 210 | return -EINVAL; |
| @@ -217,7 +215,6 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
| 217 | struct adf_accel_dev *accel_dev; | 215 | struct adf_accel_dev *accel_dev; |
| 218 | struct adf_accel_pci *accel_pci_dev; | 216 | struct adf_accel_pci *accel_pci_dev; |
| 219 | struct adf_hw_device_data *hw_data; | 217 | struct adf_hw_device_data *hw_data; |
| 220 | void __iomem *pmisc_bar_addr = NULL; | ||
| 221 | char name[ADF_DEVICE_NAME_LENGTH]; | 218 | char name[ADF_DEVICE_NAME_LENGTH]; |
| 222 | unsigned int i, bar_nr; | 219 | unsigned int i, bar_nr; |
| 223 | int ret; | 220 | int ret; |
| @@ -347,8 +344,6 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
| 347 | ret = -EFAULT; | 344 | ret = -EFAULT; |
| 348 | goto out_err; | 345 | goto out_err; |
| 349 | } | 346 | } |
| 350 | if (i == ADF_DH895XCC_PMISC_BAR) | ||
| 351 | pmisc_bar_addr = bar->virt_addr; | ||
| 352 | } | 347 | } |
| 353 | pci_set_master(pdev); | 348 | pci_set_master(pdev); |
| 354 | 349 | ||
| @@ -358,36 +353,21 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
| 358 | goto out_err; | 353 | goto out_err; |
| 359 | } | 354 | } |
| 360 | 355 | ||
| 361 | if (adf_init_etr_data(accel_dev)) { | ||
| 362 | dev_err(&pdev->dev, "Failed initialize etr\n"); | ||
| 363 | ret = -EFAULT; | ||
| 364 | goto out_err; | ||
| 365 | } | ||
| 366 | |||
| 367 | if (adf_init_admin_comms(accel_dev)) { | ||
| 368 | dev_err(&pdev->dev, "Failed initialize admin comms\n"); | ||
| 369 | ret = -EFAULT; | ||
| 370 | goto out_err; | ||
| 371 | } | ||
| 372 | |||
| 373 | if (adf_init_arb(accel_dev)) { | ||
| 374 | dev_err(&pdev->dev, "Failed initialize hw arbiter\n"); | ||
| 375 | ret = -EFAULT; | ||
| 376 | goto out_err; | ||
| 377 | } | ||
| 378 | if (pci_save_state(pdev)) { | 356 | if (pci_save_state(pdev)) { |
| 379 | dev_err(&pdev->dev, "Failed to save pci state\n"); | 357 | dev_err(&pdev->dev, "Failed to save pci state\n"); |
| 380 | ret = -ENOMEM; | 358 | ret = -ENOMEM; |
| 381 | goto out_err; | 359 | goto out_err; |
| 382 | } | 360 | } |
| 383 | 361 | ||
| 384 | /* Enable bundle and misc interrupts */ | 362 | ret = adf_dev_configure(accel_dev); |
| 385 | ADF_CSR_WR(pmisc_bar_addr, ADF_DH895XCC_SMIAPF0_MASK_OFFSET, | 363 | if (ret) |
| 386 | ADF_DH895XCC_SMIA0_MASK); | 364 | goto out_err; |
| 387 | ADF_CSR_WR(pmisc_bar_addr, ADF_DH895XCC_SMIAPF1_MASK_OFFSET, | 365 | |
| 388 | ADF_DH895XCC_SMIA1_MASK); | 366 | ret = adf_dev_init(accel_dev); |
| 367 | if (ret) | ||
| 368 | goto out_err; | ||
| 389 | 369 | ||
| 390 | ret = qat_dev_start(accel_dev); | 370 | ret = adf_dev_start(accel_dev); |
| 391 | if (ret) { | 371 | if (ret) { |
| 392 | adf_dev_stop(accel_dev); | 372 | adf_dev_stop(accel_dev); |
| 393 | goto out_err; | 373 | goto out_err; |
diff --git a/drivers/crypto/qce/dma.c b/drivers/crypto/qce/dma.c index 0fb21e13f247..378cb768647f 100644 --- a/drivers/crypto/qce/dma.c +++ b/drivers/crypto/qce/dma.c | |||
| @@ -64,7 +64,7 @@ int qce_mapsg(struct device *dev, struct scatterlist *sg, int nents, | |||
| 64 | err = dma_map_sg(dev, sg, 1, dir); | 64 | err = dma_map_sg(dev, sg, 1, dir); |
| 65 | if (!err) | 65 | if (!err) |
| 66 | return -EFAULT; | 66 | return -EFAULT; |
| 67 | sg = scatterwalk_sg_next(sg); | 67 | sg = sg_next(sg); |
| 68 | } | 68 | } |
| 69 | } else { | 69 | } else { |
| 70 | err = dma_map_sg(dev, sg, nents, dir); | 70 | err = dma_map_sg(dev, sg, nents, dir); |
| @@ -81,7 +81,7 @@ void qce_unmapsg(struct device *dev, struct scatterlist *sg, int nents, | |||
| 81 | if (chained) | 81 | if (chained) |
| 82 | while (sg) { | 82 | while (sg) { |
| 83 | dma_unmap_sg(dev, sg, 1, dir); | 83 | dma_unmap_sg(dev, sg, 1, dir); |
| 84 | sg = scatterwalk_sg_next(sg); | 84 | sg = sg_next(sg); |
| 85 | } | 85 | } |
| 86 | else | 86 | else |
| 87 | dma_unmap_sg(dev, sg, nents, dir); | 87 | dma_unmap_sg(dev, sg, nents, dir); |
| @@ -100,7 +100,7 @@ int qce_countsg(struct scatterlist *sglist, int nbytes, bool *chained) | |||
| 100 | nbytes -= sg->length; | 100 | nbytes -= sg->length; |
| 101 | if (!sg_is_last(sg) && (sg + 1)->length == 0 && chained) | 101 | if (!sg_is_last(sg) && (sg + 1)->length == 0 && chained) |
| 102 | *chained = true; | 102 | *chained = true; |
| 103 | sg = scatterwalk_sg_next(sg); | 103 | sg = sg_next(sg); |
| 104 | } | 104 | } |
| 105 | 105 | ||
| 106 | return nents; | 106 | return nents; |
diff --git a/drivers/crypto/qce/sha.c b/drivers/crypto/qce/sha.c index f3385934eed2..5c5df1d17f90 100644 --- a/drivers/crypto/qce/sha.c +++ b/drivers/crypto/qce/sha.c | |||
| @@ -285,7 +285,7 @@ static int qce_ahash_update(struct ahash_request *req) | |||
| 285 | break; | 285 | break; |
| 286 | len += sg_dma_len(sg); | 286 | len += sg_dma_len(sg); |
| 287 | sg_last = sg; | 287 | sg_last = sg; |
| 288 | sg = scatterwalk_sg_next(sg); | 288 | sg = sg_next(sg); |
| 289 | } | 289 | } |
| 290 | 290 | ||
| 291 | if (!sg_last) | 291 | if (!sg_last) |
diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c index 220b92f7eabc..290a7f0a681f 100644 --- a/drivers/crypto/sahara.c +++ b/drivers/crypto/sahara.c | |||
| @@ -940,7 +940,7 @@ static int sahara_walk_and_recalc(struct scatterlist *sg, unsigned int nbytes) | |||
| 940 | break; | 940 | break; |
| 941 | } | 941 | } |
| 942 | nbytes -= sg->length; | 942 | nbytes -= sg->length; |
| 943 | sg = scatterwalk_sg_next(sg); | 943 | sg = sg_next(sg); |
| 944 | } | 944 | } |
| 945 | 945 | ||
| 946 | return nbytes; | 946 | return nbytes; |
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c index 067ec2193d71..ebbae8d3ce0d 100644 --- a/drivers/crypto/talitos.c +++ b/drivers/crypto/talitos.c | |||
| @@ -743,7 +743,7 @@ static int talitos_map_sg(struct device *dev, struct scatterlist *sg, | |||
| 743 | if (unlikely(chained)) | 743 | if (unlikely(chained)) |
| 744 | while (sg) { | 744 | while (sg) { |
| 745 | dma_map_sg(dev, sg, 1, dir); | 745 | dma_map_sg(dev, sg, 1, dir); |
| 746 | sg = scatterwalk_sg_next(sg); | 746 | sg = sg_next(sg); |
| 747 | } | 747 | } |
| 748 | else | 748 | else |
| 749 | dma_map_sg(dev, sg, nents, dir); | 749 | dma_map_sg(dev, sg, nents, dir); |
| @@ -755,7 +755,7 @@ static void talitos_unmap_sg_chain(struct device *dev, struct scatterlist *sg, | |||
| 755 | { | 755 | { |
| 756 | while (sg) { | 756 | while (sg) { |
| 757 | dma_unmap_sg(dev, sg, 1, dir); | 757 | dma_unmap_sg(dev, sg, 1, dir); |
| 758 | sg = scatterwalk_sg_next(sg); | 758 | sg = sg_next(sg); |
| 759 | } | 759 | } |
| 760 | } | 760 | } |
| 761 | 761 | ||
| @@ -915,7 +915,7 @@ static int sg_to_link_tbl(struct scatterlist *sg, int sg_count, | |||
| 915 | link_tbl_ptr->j_extent = 0; | 915 | link_tbl_ptr->j_extent = 0; |
| 916 | link_tbl_ptr++; | 916 | link_tbl_ptr++; |
| 917 | cryptlen -= sg_dma_len(sg); | 917 | cryptlen -= sg_dma_len(sg); |
| 918 | sg = scatterwalk_sg_next(sg); | 918 | sg = sg_next(sg); |
| 919 | } | 919 | } |
| 920 | 920 | ||
| 921 | /* adjust (decrease) last one (or two) entry's len to cryptlen */ | 921 | /* adjust (decrease) last one (or two) entry's len to cryptlen */ |
| @@ -1102,7 +1102,7 @@ static int sg_count(struct scatterlist *sg_list, int nbytes, bool *chained) | |||
| 1102 | nbytes -= sg->length; | 1102 | nbytes -= sg->length; |
| 1103 | if (!sg_is_last(sg) && (sg + 1)->length == 0) | 1103 | if (!sg_is_last(sg) && (sg + 1)->length == 0) |
| 1104 | *chained = true; | 1104 | *chained = true; |
| 1105 | sg = scatterwalk_sg_next(sg); | 1105 | sg = sg_next(sg); |
| 1106 | } | 1106 | } |
| 1107 | 1107 | ||
| 1108 | return sg_nents; | 1108 | return sg_nents; |
diff --git a/drivers/crypto/ux500/cryp/cryp_core.c b/drivers/crypto/ux500/cryp/cryp_core.c index f831bb952b2f..d594ae962ed2 100644 --- a/drivers/crypto/ux500/cryp/cryp_core.c +++ b/drivers/crypto/ux500/cryp/cryp_core.c | |||
| @@ -479,13 +479,13 @@ static void cryp_dma_setup_channel(struct cryp_device_data *device_data, | |||
| 479 | .dst_addr = device_data->phybase + CRYP_DMA_TX_FIFO, | 479 | .dst_addr = device_data->phybase + CRYP_DMA_TX_FIFO, |
| 480 | .dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES, | 480 | .dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES, |
| 481 | .dst_maxburst = 4, | 481 | .dst_maxburst = 4, |
| 482 | }; | 482 | }; |
| 483 | struct dma_slave_config cryp2mem = { | 483 | struct dma_slave_config cryp2mem = { |
| 484 | .direction = DMA_DEV_TO_MEM, | 484 | .direction = DMA_DEV_TO_MEM, |
| 485 | .src_addr = device_data->phybase + CRYP_DMA_RX_FIFO, | 485 | .src_addr = device_data->phybase + CRYP_DMA_RX_FIFO, |
| 486 | .src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES, | 486 | .src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES, |
| 487 | .src_maxburst = 4, | 487 | .src_maxburst = 4, |
| 488 | }; | 488 | }; |
| 489 | 489 | ||
| 490 | dma_cap_zero(device_data->dma.mask); | 490 | dma_cap_zero(device_data->dma.mask); |
| 491 | dma_cap_set(DMA_SLAVE, device_data->dma.mask); | 491 | dma_cap_set(DMA_SLAVE, device_data->dma.mask); |
| @@ -814,7 +814,7 @@ static int get_nents(struct scatterlist *sg, int nbytes) | |||
| 814 | 814 | ||
| 815 | while (nbytes > 0) { | 815 | while (nbytes > 0) { |
| 816 | nbytes -= sg->length; | 816 | nbytes -= sg->length; |
| 817 | sg = scatterwalk_sg_next(sg); | 817 | sg = sg_next(sg); |
| 818 | nents++; | 818 | nents++; |
| 819 | } | 819 | } |
| 820 | 820 | ||
| @@ -1774,8 +1774,8 @@ static int ux500_cryp_resume(struct device *dev) | |||
| 1774 | static SIMPLE_DEV_PM_OPS(ux500_cryp_pm, ux500_cryp_suspend, ux500_cryp_resume); | 1774 | static SIMPLE_DEV_PM_OPS(ux500_cryp_pm, ux500_cryp_suspend, ux500_cryp_resume); |
| 1775 | 1775 | ||
| 1776 | static const struct of_device_id ux500_cryp_match[] = { | 1776 | static const struct of_device_id ux500_cryp_match[] = { |
| 1777 | { .compatible = "stericsson,ux500-cryp" }, | 1777 | { .compatible = "stericsson,ux500-cryp" }, |
| 1778 | { }, | 1778 | { }, |
| 1779 | }; | 1779 | }; |
| 1780 | 1780 | ||
| 1781 | static struct platform_driver cryp_driver = { | 1781 | static struct platform_driver cryp_driver = { |
diff --git a/include/crypto/if_alg.h b/include/crypto/if_alg.h index 88ea64e9a91c..178525e5f430 100644 --- a/include/crypto/if_alg.h +++ b/include/crypto/if_alg.h | |||
| @@ -50,6 +50,7 @@ struct af_alg_type { | |||
| 50 | void (*release)(void *private); | 50 | void (*release)(void *private); |
| 51 | int (*setkey)(void *private, const u8 *key, unsigned int keylen); | 51 | int (*setkey)(void *private, const u8 *key, unsigned int keylen); |
| 52 | int (*accept)(void *private, struct sock *sk); | 52 | int (*accept)(void *private, struct sock *sk); |
| 53 | int (*setauthsize)(void *private, unsigned int authsize); | ||
| 53 | 54 | ||
| 54 | struct proto_ops *ops; | 55 | struct proto_ops *ops; |
| 55 | struct module *owner; | 56 | struct module *owner; |
diff --git a/include/crypto/scatterwalk.h b/include/crypto/scatterwalk.h index 7ef512f8631c..20e4226a2e14 100644 --- a/include/crypto/scatterwalk.h +++ b/include/crypto/scatterwalk.h | |||
| @@ -33,21 +33,13 @@ static inline void scatterwalk_sg_chain(struct scatterlist *sg1, int num, | |||
| 33 | sg1[num - 1].page_link |= 0x01; | 33 | sg1[num - 1].page_link |= 0x01; |
| 34 | } | 34 | } |
| 35 | 35 | ||
| 36 | static inline struct scatterlist *scatterwalk_sg_next(struct scatterlist *sg) | ||
| 37 | { | ||
| 38 | if (sg_is_last(sg)) | ||
| 39 | return NULL; | ||
| 40 | |||
| 41 | return (++sg)->length ? sg : sg_chain_ptr(sg); | ||
| 42 | } | ||
| 43 | |||
| 44 | static inline void scatterwalk_crypto_chain(struct scatterlist *head, | 36 | static inline void scatterwalk_crypto_chain(struct scatterlist *head, |
| 45 | struct scatterlist *sg, | 37 | struct scatterlist *sg, |
| 46 | int chain, int num) | 38 | int chain, int num) |
| 47 | { | 39 | { |
| 48 | if (chain) { | 40 | if (chain) { |
| 49 | head->length += sg->length; | 41 | head->length += sg->length; |
| 50 | sg = scatterwalk_sg_next(sg); | 42 | sg = sg_next(sg); |
| 51 | } | 43 | } |
| 52 | 44 | ||
| 53 | if (sg) | 45 | if (sg) |
diff --git a/include/linux/crypto.h b/include/linux/crypto.h index 9c8776d0ada8..fb5ef16d6a12 100644 --- a/include/linux/crypto.h +++ b/include/linux/crypto.h | |||
| @@ -1147,7 +1147,7 @@ static inline void ablkcipher_request_free(struct ablkcipher_request *req) | |||
| 1147 | * cipher operation completes. | 1147 | * cipher operation completes. |
| 1148 | * | 1148 | * |
| 1149 | * The callback function is registered with the ablkcipher_request handle and | 1149 | * The callback function is registered with the ablkcipher_request handle and |
| 1150 | * must comply with the following template: | 1150 | * must comply with the following template |
| 1151 | * | 1151 | * |
| 1152 | * void callback_function(struct crypto_async_request *req, int error) | 1152 | * void callback_function(struct crypto_async_request *req, int error) |
| 1153 | */ | 1153 | */ |
| @@ -1174,7 +1174,7 @@ static inline void ablkcipher_request_set_callback( | |||
| 1174 | * | 1174 | * |
| 1175 | * For encryption, the source is treated as the plaintext and the | 1175 | * For encryption, the source is treated as the plaintext and the |
| 1176 | * destination is the ciphertext. For a decryption operation, the use is | 1176 | * destination is the ciphertext. For a decryption operation, the use is |
| 1177 | * reversed: the source is the ciphertext and the destination is the plaintext. | 1177 | * reversed - the source is the ciphertext and the destination is the plaintext. |
| 1178 | */ | 1178 | */ |
| 1179 | static inline void ablkcipher_request_set_crypt( | 1179 | static inline void ablkcipher_request_set_crypt( |
| 1180 | struct ablkcipher_request *req, | 1180 | struct ablkcipher_request *req, |
| @@ -1412,6 +1412,9 @@ static inline int crypto_aead_encrypt(struct aead_request *req) | |||
| 1412 | */ | 1412 | */ |
| 1413 | static inline int crypto_aead_decrypt(struct aead_request *req) | 1413 | static inline int crypto_aead_decrypt(struct aead_request *req) |
| 1414 | { | 1414 | { |
| 1415 | if (req->cryptlen < crypto_aead_authsize(crypto_aead_reqtfm(req))) | ||
| 1416 | return -EINVAL; | ||
| 1417 | |||
| 1415 | return crypto_aead_crt(crypto_aead_reqtfm(req))->decrypt(req); | 1418 | return crypto_aead_crt(crypto_aead_reqtfm(req))->decrypt(req); |
| 1416 | } | 1419 | } |
| 1417 | 1420 | ||
| @@ -1506,7 +1509,7 @@ static inline void aead_request_free(struct aead_request *req) | |||
| 1506 | * completes | 1509 | * completes |
| 1507 | * | 1510 | * |
| 1508 | * The callback function is registered with the aead_request handle and | 1511 | * The callback function is registered with the aead_request handle and |
| 1509 | * must comply with the following template: | 1512 | * must comply with the following template |
| 1510 | * | 1513 | * |
| 1511 | * void callback_function(struct crypto_async_request *req, int error) | 1514 | * void callback_function(struct crypto_async_request *req, int error) |
| 1512 | */ | 1515 | */ |
| @@ -1533,7 +1536,7 @@ static inline void aead_request_set_callback(struct aead_request *req, | |||
| 1533 | * | 1536 | * |
| 1534 | * For encryption, the source is treated as the plaintext and the | 1537 | * For encryption, the source is treated as the plaintext and the |
| 1535 | * destination is the ciphertext. For a decryption operation, the use is | 1538 | * destination is the ciphertext. For a decryption operation, the use is |
| 1536 | * reversed: the source is the ciphertext and the destination is the plaintext. | 1539 | * reversed - the source is the ciphertext and the destination is the plaintext. |
| 1537 | * | 1540 | * |
| 1538 | * IMPORTANT NOTE AEAD requires an authentication tag (MAC). For decryption, | 1541 | * IMPORTANT NOTE AEAD requires an authentication tag (MAC). For decryption, |
| 1539 | * the caller must concatenate the ciphertext followed by the | 1542 | * the caller must concatenate the ciphertext followed by the |
diff --git a/include/linux/hw_random.h b/include/linux/hw_random.h index 914bb08cd738..eb7b414d232b 100644 --- a/include/linux/hw_random.h +++ b/include/linux/hw_random.h | |||
| @@ -12,8 +12,10 @@ | |||
| 12 | #ifndef LINUX_HWRANDOM_H_ | 12 | #ifndef LINUX_HWRANDOM_H_ |
| 13 | #define LINUX_HWRANDOM_H_ | 13 | #define LINUX_HWRANDOM_H_ |
| 14 | 14 | ||
| 15 | #include <linux/completion.h> | ||
| 15 | #include <linux/types.h> | 16 | #include <linux/types.h> |
| 16 | #include <linux/list.h> | 17 | #include <linux/list.h> |
| 18 | #include <linux/kref.h> | ||
| 17 | 19 | ||
| 18 | /** | 20 | /** |
| 19 | * struct hwrng - Hardware Random Number Generator driver | 21 | * struct hwrng - Hardware Random Number Generator driver |
| @@ -44,6 +46,8 @@ struct hwrng { | |||
| 44 | 46 | ||
| 45 | /* internal. */ | 47 | /* internal. */ |
| 46 | struct list_head list; | 48 | struct list_head list; |
| 49 | struct kref ref; | ||
| 50 | struct completion cleanup_done; | ||
| 47 | }; | 51 | }; |
| 48 | 52 | ||
| 49 | /** Register a new Hardware Random Number Generator driver. */ | 53 | /** Register a new Hardware Random Number Generator driver. */ |
diff --git a/lib/string.c b/lib/string.c index cdd97f431ae2..ce81aaec3839 100644 --- a/lib/string.c +++ b/lib/string.c | |||
| @@ -596,6 +596,11 @@ EXPORT_SYMBOL(memset); | |||
| 596 | * @s: Pointer to the start of the area. | 596 | * @s: Pointer to the start of the area. |
| 597 | * @count: The size of the area. | 597 | * @count: The size of the area. |
| 598 | * | 598 | * |
| 599 | * Note: usually using memset() is just fine (!), but in cases | ||
| 600 | * where clearing out _local_ data at the end of a scope is | ||
| 601 | * necessary, memzero_explicit() should be used instead in | ||
| 602 | * order to prevent the compiler from optimising away zeroing. | ||
| 603 | * | ||
| 599 | * memzero_explicit() doesn't need an arch-specific version as | 604 | * memzero_explicit() doesn't need an arch-specific version as |
| 600 | * it just invokes the one of memset() implicitly. | 605 | * it just invokes the one of memset() implicitly. |
| 601 | */ | 606 | */ |
