diff options
| -rw-r--r-- | crypto/hmac.c | 2 | ||||
| -rw-r--r-- | drivers/crypto/caam/ctrl.c | 2 | ||||
| -rw-r--r-- | drivers/crypto/vmx/aesp8-ppc.pl | 2 | ||||
| -rw-r--r-- | drivers/crypto/vmx/ghash.c | 211 | ||||
| -rw-r--r-- | include/crypto/hash.h | 8 |
5 files changed, 97 insertions, 128 deletions
diff --git a/crypto/hmac.c b/crypto/hmac.c index a68c1266121f..241b1868c1d0 100644 --- a/crypto/hmac.c +++ b/crypto/hmac.c | |||
| @@ -157,6 +157,8 @@ static int hmac_init_tfm(struct crypto_tfm *tfm) | |||
| 157 | 157 | ||
| 158 | parent->descsize = sizeof(struct shash_desc) + | 158 | parent->descsize = sizeof(struct shash_desc) + |
| 159 | crypto_shash_descsize(hash); | 159 | crypto_shash_descsize(hash); |
| 160 | if (WARN_ON(parent->descsize > HASH_MAX_DESCSIZE)) | ||
| 161 | return -EINVAL; | ||
| 160 | 162 | ||
| 161 | ctx->hash = hash; | 163 | ctx->hash = hash; |
| 162 | return 0; | 164 | return 0; |
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c index e2ba3d202da5..fec39c35c877 100644 --- a/drivers/crypto/caam/ctrl.c +++ b/drivers/crypto/caam/ctrl.c | |||
| @@ -469,7 +469,7 @@ static int caam_get_era(struct caam_ctrl __iomem *ctrl) | |||
| 469 | } | 469 | } |
| 470 | 470 | ||
| 471 | /* | 471 | /* |
| 472 | * ERRATA: imx6 devices (imx6D, imx6Q, imx6DL, imx6S, imx6DP and imx6DQ) | 472 | * ERRATA: imx6 devices (imx6D, imx6Q, imx6DL, imx6S, imx6DP and imx6QP) |
| 473 | * have an issue wherein AXI bus transactions may not occur in the correct | 473 | * have an issue wherein AXI bus transactions may not occur in the correct |
| 474 | * order. This isn't a problem running single descriptors, but can be if | 474 | * order. This isn't a problem running single descriptors, but can be if |
| 475 | * running multiple concurrent descriptors. Reworking the driver to throttle | 475 | * running multiple concurrent descriptors. Reworking the driver to throttle |
diff --git a/drivers/crypto/vmx/aesp8-ppc.pl b/drivers/crypto/vmx/aesp8-ppc.pl index de78282b8f44..9c6b5c1d6a1a 100644 --- a/drivers/crypto/vmx/aesp8-ppc.pl +++ b/drivers/crypto/vmx/aesp8-ppc.pl | |||
| @@ -1357,7 +1357,7 @@ Loop_ctr32_enc: | |||
| 1357 | addi $idx,$idx,16 | 1357 | addi $idx,$idx,16 |
| 1358 | bdnz Loop_ctr32_enc | 1358 | bdnz Loop_ctr32_enc |
| 1359 | 1359 | ||
| 1360 | vadduwm $ivec,$ivec,$one | 1360 | vadduqm $ivec,$ivec,$one |
| 1361 | vmr $dat,$inptail | 1361 | vmr $dat,$inptail |
| 1362 | lvx $inptail,0,$inp | 1362 | lvx $inptail,0,$inp |
| 1363 | addi $inp,$inp,16 | 1363 | addi $inp,$inp,16 |
diff --git a/drivers/crypto/vmx/ghash.c b/drivers/crypto/vmx/ghash.c index b5a6883bb09e..14807ac2e3b9 100644 --- a/drivers/crypto/vmx/ghash.c +++ b/drivers/crypto/vmx/ghash.c | |||
| @@ -1,22 +1,14 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 1 | /** | 2 | /** |
| 2 | * GHASH routines supporting VMX instructions on the Power 8 | 3 | * GHASH routines supporting VMX instructions on the Power 8 |
| 3 | * | 4 | * |
| 4 | * Copyright (C) 2015 International Business Machines Inc. | 5 | * Copyright (C) 2015, 2019 International Business Machines Inc. |
| 5 | * | ||
| 6 | * This program is free software; you can redistribute it and/or modify | ||
| 7 | * it under the terms of the GNU General Public License as published by | ||
| 8 | * the Free Software Foundation; version 2 only. | ||
| 9 | * | ||
| 10 | * This program is distributed in the hope that it will be useful, | ||
| 11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 13 | * GNU General Public License for more details. | ||
| 14 | * | ||
| 15 | * You should have received a copy of the GNU General Public License | ||
| 16 | * along with this program; if not, write to the Free Software | ||
| 17 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
| 18 | * | 6 | * |
| 19 | * Author: Marcelo Henrique Cerri <mhcerri@br.ibm.com> | 7 | * Author: Marcelo Henrique Cerri <mhcerri@br.ibm.com> |
| 8 | * | ||
| 9 | * Extended by Daniel Axtens <dja@axtens.net> to replace the fallback | ||
| 10 | * mechanism. The new approach is based on arm64 code, which is: | ||
| 11 | * Copyright (C) 2014 - 2018 Linaro Ltd. <ard.biesheuvel@linaro.org> | ||
| 20 | */ | 12 | */ |
| 21 | 13 | ||
| 22 | #include <linux/types.h> | 14 | #include <linux/types.h> |
| @@ -38,70 +30,25 @@ void gcm_ghash_p8(u64 Xi[2], const u128 htable[16], | |||
| 38 | const u8 *in, size_t len); | 30 | const u8 *in, size_t len); |
| 39 | 31 | ||
| 40 | struct p8_ghash_ctx { | 32 | struct p8_ghash_ctx { |
| 33 | /* key used by vector asm */ | ||
| 41 | u128 htable[16]; | 34 | u128 htable[16]; |
| 42 | struct crypto_shash *fallback; | 35 | /* key used by software fallback */ |
| 36 | be128 key; | ||
| 43 | }; | 37 | }; |
| 44 | 38 | ||
| 45 | struct p8_ghash_desc_ctx { | 39 | struct p8_ghash_desc_ctx { |
| 46 | u64 shash[2]; | 40 | u64 shash[2]; |
| 47 | u8 buffer[GHASH_DIGEST_SIZE]; | 41 | u8 buffer[GHASH_DIGEST_SIZE]; |
| 48 | int bytes; | 42 | int bytes; |
| 49 | struct shash_desc fallback_desc; | ||
| 50 | }; | 43 | }; |
| 51 | 44 | ||
| 52 | static int p8_ghash_init_tfm(struct crypto_tfm *tfm) | ||
| 53 | { | ||
| 54 | const char *alg = "ghash-generic"; | ||
| 55 | struct crypto_shash *fallback; | ||
| 56 | struct crypto_shash *shash_tfm = __crypto_shash_cast(tfm); | ||
| 57 | struct p8_ghash_ctx *ctx = crypto_tfm_ctx(tfm); | ||
| 58 | |||
| 59 | fallback = crypto_alloc_shash(alg, 0, CRYPTO_ALG_NEED_FALLBACK); | ||
| 60 | if (IS_ERR(fallback)) { | ||
| 61 | printk(KERN_ERR | ||
| 62 | "Failed to allocate transformation for '%s': %ld\n", | ||
| 63 | alg, PTR_ERR(fallback)); | ||
| 64 | return PTR_ERR(fallback); | ||
| 65 | } | ||
| 66 | |||
| 67 | crypto_shash_set_flags(fallback, | ||
| 68 | crypto_shash_get_flags((struct crypto_shash | ||
| 69 | *) tfm)); | ||
| 70 | |||
| 71 | /* Check if the descsize defined in the algorithm is still enough. */ | ||
| 72 | if (shash_tfm->descsize < sizeof(struct p8_ghash_desc_ctx) | ||
| 73 | + crypto_shash_descsize(fallback)) { | ||
| 74 | printk(KERN_ERR | ||
| 75 | "Desc size of the fallback implementation (%s) does not match the expected value: %lu vs %u\n", | ||
| 76 | alg, | ||
| 77 | shash_tfm->descsize - sizeof(struct p8_ghash_desc_ctx), | ||
| 78 | crypto_shash_descsize(fallback)); | ||
| 79 | return -EINVAL; | ||
| 80 | } | ||
| 81 | ctx->fallback = fallback; | ||
| 82 | |||
| 83 | return 0; | ||
| 84 | } | ||
| 85 | |||
| 86 | static void p8_ghash_exit_tfm(struct crypto_tfm *tfm) | ||
| 87 | { | ||
| 88 | struct p8_ghash_ctx *ctx = crypto_tfm_ctx(tfm); | ||
| 89 | |||
| 90 | if (ctx->fallback) { | ||
| 91 | crypto_free_shash(ctx->fallback); | ||
| 92 | ctx->fallback = NULL; | ||
| 93 | } | ||
| 94 | } | ||
| 95 | |||
| 96 | static int p8_ghash_init(struct shash_desc *desc) | 45 | static int p8_ghash_init(struct shash_desc *desc) |
| 97 | { | 46 | { |
| 98 | struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(desc->tfm)); | ||
| 99 | struct p8_ghash_desc_ctx *dctx = shash_desc_ctx(desc); | 47 | struct p8_ghash_desc_ctx *dctx = shash_desc_ctx(desc); |
| 100 | 48 | ||
| 101 | dctx->bytes = 0; | 49 | dctx->bytes = 0; |
| 102 | memset(dctx->shash, 0, GHASH_DIGEST_SIZE); | 50 | memset(dctx->shash, 0, GHASH_DIGEST_SIZE); |
| 103 | dctx->fallback_desc.tfm = ctx->fallback; | 51 | return 0; |
| 104 | return crypto_shash_init(&dctx->fallback_desc); | ||
| 105 | } | 52 | } |
| 106 | 53 | ||
| 107 | static int p8_ghash_setkey(struct crypto_shash *tfm, const u8 *key, | 54 | static int p8_ghash_setkey(struct crypto_shash *tfm, const u8 *key, |
| @@ -119,7 +66,51 @@ static int p8_ghash_setkey(struct crypto_shash *tfm, const u8 *key, | |||
| 119 | disable_kernel_vsx(); | 66 | disable_kernel_vsx(); |
| 120 | pagefault_enable(); | 67 | pagefault_enable(); |
| 121 | preempt_enable(); | 68 | preempt_enable(); |
| 122 | return crypto_shash_setkey(ctx->fallback, key, keylen); | 69 | |
| 70 | memcpy(&ctx->key, key, GHASH_BLOCK_SIZE); | ||
| 71 | |||
| 72 | return 0; | ||
| 73 | } | ||
| 74 | |||
| 75 | static inline void __ghash_block(struct p8_ghash_ctx *ctx, | ||
| 76 | struct p8_ghash_desc_ctx *dctx) | ||
| 77 | { | ||
| 78 | if (crypto_simd_usable()) { | ||
| 79 | preempt_disable(); | ||
| 80 | pagefault_disable(); | ||
| 81 | enable_kernel_vsx(); | ||
| 82 | gcm_ghash_p8(dctx->shash, ctx->htable, | ||
| 83 | dctx->buffer, GHASH_DIGEST_SIZE); | ||
| 84 | disable_kernel_vsx(); | ||
| 85 | pagefault_enable(); | ||
| 86 | preempt_enable(); | ||
| 87 | } else { | ||
| 88 | crypto_xor((u8 *)dctx->shash, dctx->buffer, GHASH_BLOCK_SIZE); | ||
| 89 | gf128mul_lle((be128 *)dctx->shash, &ctx->key); | ||
| 90 | } | ||
| 91 | } | ||
| 92 | |||
| 93 | static inline void __ghash_blocks(struct p8_ghash_ctx *ctx, | ||
| 94 | struct p8_ghash_desc_ctx *dctx, | ||
| 95 | const u8 *src, unsigned int srclen) | ||
| 96 | { | ||
| 97 | if (crypto_simd_usable()) { | ||
| 98 | preempt_disable(); | ||
| 99 | pagefault_disable(); | ||
| 100 | enable_kernel_vsx(); | ||
| 101 | gcm_ghash_p8(dctx->shash, ctx->htable, | ||
| 102 | src, srclen); | ||
| 103 | disable_kernel_vsx(); | ||
| 104 | pagefault_enable(); | ||
| 105 | preempt_enable(); | ||
| 106 | } else { | ||
| 107 | while (srclen >= GHASH_BLOCK_SIZE) { | ||
| 108 | crypto_xor((u8 *)dctx->shash, src, GHASH_BLOCK_SIZE); | ||
| 109 | gf128mul_lle((be128 *)dctx->shash, &ctx->key); | ||
| 110 | srclen -= GHASH_BLOCK_SIZE; | ||
| 111 | src += GHASH_BLOCK_SIZE; | ||
| 112 | } | ||
| 113 | } | ||
| 123 | } | 114 | } |
| 124 | 115 | ||
| 125 | static int p8_ghash_update(struct shash_desc *desc, | 116 | static int p8_ghash_update(struct shash_desc *desc, |
| @@ -129,49 +120,33 @@ static int p8_ghash_update(struct shash_desc *desc, | |||
| 129 | struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(desc->tfm)); | 120 | struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(desc->tfm)); |
| 130 | struct p8_ghash_desc_ctx *dctx = shash_desc_ctx(desc); | 121 | struct p8_ghash_desc_ctx *dctx = shash_desc_ctx(desc); |
| 131 | 122 | ||
| 132 | if (!crypto_simd_usable()) { | 123 | if (dctx->bytes) { |
| 133 | return crypto_shash_update(&dctx->fallback_desc, src, | 124 | if (dctx->bytes + srclen < GHASH_DIGEST_SIZE) { |
| 134 | srclen); | ||
| 135 | } else { | ||
| 136 | if (dctx->bytes) { | ||
| 137 | if (dctx->bytes + srclen < GHASH_DIGEST_SIZE) { | ||
| 138 | memcpy(dctx->buffer + dctx->bytes, src, | ||
| 139 | srclen); | ||
| 140 | dctx->bytes += srclen; | ||
| 141 | return 0; | ||
| 142 | } | ||
| 143 | memcpy(dctx->buffer + dctx->bytes, src, | 125 | memcpy(dctx->buffer + dctx->bytes, src, |
| 144 | GHASH_DIGEST_SIZE - dctx->bytes); | 126 | srclen); |
| 145 | preempt_disable(); | 127 | dctx->bytes += srclen; |
| 146 | pagefault_disable(); | 128 | return 0; |
| 147 | enable_kernel_vsx(); | ||
| 148 | gcm_ghash_p8(dctx->shash, ctx->htable, | ||
| 149 | dctx->buffer, GHASH_DIGEST_SIZE); | ||
| 150 | disable_kernel_vsx(); | ||
| 151 | pagefault_enable(); | ||
| 152 | preempt_enable(); | ||
| 153 | src += GHASH_DIGEST_SIZE - dctx->bytes; | ||
| 154 | srclen -= GHASH_DIGEST_SIZE - dctx->bytes; | ||
| 155 | dctx->bytes = 0; | ||
| 156 | } | ||
| 157 | len = srclen & ~(GHASH_DIGEST_SIZE - 1); | ||
| 158 | if (len) { | ||
| 159 | preempt_disable(); | ||
| 160 | pagefault_disable(); | ||
| 161 | enable_kernel_vsx(); | ||
| 162 | gcm_ghash_p8(dctx->shash, ctx->htable, src, len); | ||
| 163 | disable_kernel_vsx(); | ||
| 164 | pagefault_enable(); | ||
| 165 | preempt_enable(); | ||
| 166 | src += len; | ||
| 167 | srclen -= len; | ||
| 168 | } | ||
| 169 | if (srclen) { | ||
| 170 | memcpy(dctx->buffer, src, srclen); | ||
| 171 | dctx->bytes = srclen; | ||
| 172 | } | 129 | } |
| 173 | return 0; | 130 | memcpy(dctx->buffer + dctx->bytes, src, |
| 131 | GHASH_DIGEST_SIZE - dctx->bytes); | ||
| 132 | |||
| 133 | __ghash_block(ctx, dctx); | ||
| 134 | |||
| 135 | src += GHASH_DIGEST_SIZE - dctx->bytes; | ||
| 136 | srclen -= GHASH_DIGEST_SIZE - dctx->bytes; | ||
| 137 | dctx->bytes = 0; | ||
| 138 | } | ||
| 139 | len = srclen & ~(GHASH_DIGEST_SIZE - 1); | ||
| 140 | if (len) { | ||
| 141 | __ghash_blocks(ctx, dctx, src, len); | ||
| 142 | src += len; | ||
| 143 | srclen -= len; | ||
| 174 | } | 144 | } |
| 145 | if (srclen) { | ||
| 146 | memcpy(dctx->buffer, src, srclen); | ||
| 147 | dctx->bytes = srclen; | ||
| 148 | } | ||
| 149 | return 0; | ||
| 175 | } | 150 | } |
| 176 | 151 | ||
| 177 | static int p8_ghash_final(struct shash_desc *desc, u8 *out) | 152 | static int p8_ghash_final(struct shash_desc *desc, u8 *out) |
| @@ -180,25 +155,14 @@ static int p8_ghash_final(struct shash_desc *desc, u8 *out) | |||
| 180 | struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(desc->tfm)); | 155 | struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(desc->tfm)); |
| 181 | struct p8_ghash_desc_ctx *dctx = shash_desc_ctx(desc); | 156 | struct p8_ghash_desc_ctx *dctx = shash_desc_ctx(desc); |
| 182 | 157 | ||
| 183 | if (!crypto_simd_usable()) { | 158 | if (dctx->bytes) { |
| 184 | return crypto_shash_final(&dctx->fallback_desc, out); | 159 | for (i = dctx->bytes; i < GHASH_DIGEST_SIZE; i++) |
| 185 | } else { | 160 | dctx->buffer[i] = 0; |
| 186 | if (dctx->bytes) { | 161 | __ghash_block(ctx, dctx); |
| 187 | for (i = dctx->bytes; i < GHASH_DIGEST_SIZE; i++) | 162 | dctx->bytes = 0; |
| 188 | dctx->buffer[i] = 0; | ||
| 189 | preempt_disable(); | ||
| 190 | pagefault_disable(); | ||
| 191 | enable_kernel_vsx(); | ||
| 192 | gcm_ghash_p8(dctx->shash, ctx->htable, | ||
| 193 | dctx->buffer, GHASH_DIGEST_SIZE); | ||
| 194 | disable_kernel_vsx(); | ||
| 195 | pagefault_enable(); | ||
| 196 | preempt_enable(); | ||
| 197 | dctx->bytes = 0; | ||
| 198 | } | ||
| 199 | memcpy(out, dctx->shash, GHASH_DIGEST_SIZE); | ||
| 200 | return 0; | ||
| 201 | } | 163 | } |
| 164 | memcpy(out, dctx->shash, GHASH_DIGEST_SIZE); | ||
| 165 | return 0; | ||
| 202 | } | 166 | } |
| 203 | 167 | ||
| 204 | struct shash_alg p8_ghash_alg = { | 168 | struct shash_alg p8_ghash_alg = { |
| @@ -213,11 +177,8 @@ struct shash_alg p8_ghash_alg = { | |||
| 213 | .cra_name = "ghash", | 177 | .cra_name = "ghash", |
| 214 | .cra_driver_name = "p8_ghash", | 178 | .cra_driver_name = "p8_ghash", |
| 215 | .cra_priority = 1000, | 179 | .cra_priority = 1000, |
| 216 | .cra_flags = CRYPTO_ALG_NEED_FALLBACK, | ||
| 217 | .cra_blocksize = GHASH_BLOCK_SIZE, | 180 | .cra_blocksize = GHASH_BLOCK_SIZE, |
| 218 | .cra_ctxsize = sizeof(struct p8_ghash_ctx), | 181 | .cra_ctxsize = sizeof(struct p8_ghash_ctx), |
| 219 | .cra_module = THIS_MODULE, | 182 | .cra_module = THIS_MODULE, |
| 220 | .cra_init = p8_ghash_init_tfm, | ||
| 221 | .cra_exit = p8_ghash_exit_tfm, | ||
| 222 | }, | 183 | }, |
| 223 | }; | 184 | }; |
diff --git a/include/crypto/hash.h b/include/crypto/hash.h index d21bea2c4382..d6702b4a457f 100644 --- a/include/crypto/hash.h +++ b/include/crypto/hash.h | |||
| @@ -150,7 +150,13 @@ struct shash_desc { | |||
| 150 | }; | 150 | }; |
| 151 | 151 | ||
| 152 | #define HASH_MAX_DIGESTSIZE 64 | 152 | #define HASH_MAX_DIGESTSIZE 64 |
| 153 | #define HASH_MAX_DESCSIZE 360 | 153 | |
| 154 | /* | ||
| 155 | * Worst case is hmac(sha3-224-generic). Its context is a nested 'shash_desc' | ||
| 156 | * containing a 'struct sha3_state'. | ||
| 157 | */ | ||
| 158 | #define HASH_MAX_DESCSIZE (sizeof(struct shash_desc) + 360) | ||
| 159 | |||
| 154 | #define HASH_MAX_STATESIZE 512 | 160 | #define HASH_MAX_STATESIZE 512 |
| 155 | 161 | ||
| 156 | #define SHASH_DESC_ON_STACK(shash, ctx) \ | 162 | #define SHASH_DESC_ON_STACK(shash, ctx) \ |
