aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/arm64/crypto/Kconfig3
-rw-r--r--arch/arm64/crypto/sha2-ce-glue.c30
-rw-r--r--arch/arm64/crypto/sha256-glue.c1
3 files changed, 29 insertions, 5 deletions
diff --git a/arch/arm64/crypto/Kconfig b/arch/arm64/crypto/Kconfig
index 5d5953545dad..8cd145f9c1ff 100644
--- a/arch/arm64/crypto/Kconfig
+++ b/arch/arm64/crypto/Kconfig
@@ -24,8 +24,9 @@ config CRYPTO_SHA1_ARM64_CE
24 24
25config CRYPTO_SHA2_ARM64_CE 25config CRYPTO_SHA2_ARM64_CE
26 tristate "SHA-224/SHA-256 digest algorithm (ARMv8 Crypto Extensions)" 26 tristate "SHA-224/SHA-256 digest algorithm (ARMv8 Crypto Extensions)"
27 depends on ARM64 && KERNEL_MODE_NEON 27 depends on KERNEL_MODE_NEON
28 select CRYPTO_HASH 28 select CRYPTO_HASH
29 select CRYPTO_SHA256_ARM64
29 30
30config CRYPTO_GHASH_ARM64_CE 31config CRYPTO_GHASH_ARM64_CE
31 tristate "GHASH (for GCM chaining mode) using ARMv8 Crypto Extensions" 32 tristate "GHASH (for GCM chaining mode) using ARMv8 Crypto Extensions"
diff --git a/arch/arm64/crypto/sha2-ce-glue.c b/arch/arm64/crypto/sha2-ce-glue.c
index 0ed9486f75dd..fd1ff2b13dfa 100644
--- a/arch/arm64/crypto/sha2-ce-glue.c
+++ b/arch/arm64/crypto/sha2-ce-glue.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * sha2-ce-glue.c - SHA-224/SHA-256 using ARMv8 Crypto Extensions 2 * sha2-ce-glue.c - SHA-224/SHA-256 using ARMv8 Crypto Extensions
3 * 3 *
4 * Copyright (C) 2014 Linaro Ltd <ard.biesheuvel@linaro.org> 4 * Copyright (C) 2014 - 2017 Linaro Ltd <ard.biesheuvel@linaro.org>
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as 7 * it under the terms of the GNU General Public License version 2 as
@@ -9,6 +9,7 @@
9 */ 9 */
10 10
11#include <asm/neon.h> 11#include <asm/neon.h>
12#include <asm/simd.h>
12#include <asm/unaligned.h> 13#include <asm/unaligned.h>
13#include <crypto/internal/hash.h> 14#include <crypto/internal/hash.h>
14#include <crypto/sha.h> 15#include <crypto/sha.h>
@@ -34,13 +35,19 @@ const u32 sha256_ce_offsetof_count = offsetof(struct sha256_ce_state,
34const u32 sha256_ce_offsetof_finalize = offsetof(struct sha256_ce_state, 35const u32 sha256_ce_offsetof_finalize = offsetof(struct sha256_ce_state,
35 finalize); 36 finalize);
36 37
38asmlinkage void sha256_block_data_order(u32 *digest, u8 const *src, int blocks);
39
37static int sha256_ce_update(struct shash_desc *desc, const u8 *data, 40static int sha256_ce_update(struct shash_desc *desc, const u8 *data,
38 unsigned int len) 41 unsigned int len)
39{ 42{
40 struct sha256_ce_state *sctx = shash_desc_ctx(desc); 43 struct sha256_ce_state *sctx = shash_desc_ctx(desc);
41 44
45 if (!may_use_simd())
46 return sha256_base_do_update(desc, data, len,
47 (sha256_block_fn *)sha256_block_data_order);
48
42 sctx->finalize = 0; 49 sctx->finalize = 0;
43 kernel_neon_begin_partial(28); 50 kernel_neon_begin();
44 sha256_base_do_update(desc, data, len, 51 sha256_base_do_update(desc, data, len,
45 (sha256_block_fn *)sha2_ce_transform); 52 (sha256_block_fn *)sha2_ce_transform);
46 kernel_neon_end(); 53 kernel_neon_end();
@@ -54,13 +61,22 @@ static int sha256_ce_finup(struct shash_desc *desc, const u8 *data,
54 struct sha256_ce_state *sctx = shash_desc_ctx(desc); 61 struct sha256_ce_state *sctx = shash_desc_ctx(desc);
55 bool finalize = !sctx->sst.count && !(len % SHA256_BLOCK_SIZE); 62 bool finalize = !sctx->sst.count && !(len % SHA256_BLOCK_SIZE);
56 63
64 if (!may_use_simd()) {
65 if (len)
66 sha256_base_do_update(desc, data, len,
67 (sha256_block_fn *)sha256_block_data_order);
68 sha256_base_do_finalize(desc,
69 (sha256_block_fn *)sha256_block_data_order);
70 return sha256_base_finish(desc, out);
71 }
72
57 /* 73 /*
58 * Allow the asm code to perform the finalization if there is no 74 * Allow the asm code to perform the finalization if there is no
59 * partial data and the input is a round multiple of the block size. 75 * partial data and the input is a round multiple of the block size.
60 */ 76 */
61 sctx->finalize = finalize; 77 sctx->finalize = finalize;
62 78
63 kernel_neon_begin_partial(28); 79 kernel_neon_begin();
64 sha256_base_do_update(desc, data, len, 80 sha256_base_do_update(desc, data, len,
65 (sha256_block_fn *)sha2_ce_transform); 81 (sha256_block_fn *)sha2_ce_transform);
66 if (!finalize) 82 if (!finalize)
@@ -74,8 +90,14 @@ static int sha256_ce_final(struct shash_desc *desc, u8 *out)
74{ 90{
75 struct sha256_ce_state *sctx = shash_desc_ctx(desc); 91 struct sha256_ce_state *sctx = shash_desc_ctx(desc);
76 92
93 if (!may_use_simd()) {
94 sha256_base_do_finalize(desc,
95 (sha256_block_fn *)sha256_block_data_order);
96 return sha256_base_finish(desc, out);
97 }
98
77 sctx->finalize = 0; 99 sctx->finalize = 0;
78 kernel_neon_begin_partial(28); 100 kernel_neon_begin();
79 sha256_base_do_finalize(desc, (sha256_block_fn *)sha2_ce_transform); 101 sha256_base_do_finalize(desc, (sha256_block_fn *)sha2_ce_transform);
80 kernel_neon_end(); 102 kernel_neon_end();
81 return sha256_base_finish(desc, out); 103 return sha256_base_finish(desc, out);
diff --git a/arch/arm64/crypto/sha256-glue.c b/arch/arm64/crypto/sha256-glue.c
index a2226f841960..b064d925fe2a 100644
--- a/arch/arm64/crypto/sha256-glue.c
+++ b/arch/arm64/crypto/sha256-glue.c
@@ -29,6 +29,7 @@ MODULE_ALIAS_CRYPTO("sha256");
29 29
30asmlinkage void sha256_block_data_order(u32 *digest, const void *data, 30asmlinkage void sha256_block_data_order(u32 *digest, const void *data,
31 unsigned int num_blks); 31 unsigned int num_blks);
32EXPORT_SYMBOL(sha256_block_data_order);
32 33
33asmlinkage void sha256_block_neon(u32 *digest, const void *data, 34asmlinkage void sha256_block_neon(u32 *digest, const void *data,
34 unsigned int num_blks); 35 unsigned int num_blks);