aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorArd Biesheuvel <ard.biesheuvel@linaro.org>2015-04-09 06:55:41 -0400
committerHerbert Xu <herbert@gondor.apana.org.au>2015-04-10 09:39:44 -0400
commitdde00981e64b3c6621cafe3eea2eef6a4055208c (patch)
treef1c1694f6d66b592e98e5306035eae903b922707
parent51e515faa887e40e7e30a3e13607ea6be418e4c4 (diff)
crypto: arm/sha1-ce - move SHA-1 ARMv8 implementation to base layer
This removes all the boilerplate from the existing implementation, and replaces it with calls into the base layer. Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
-rw-r--r--arch/arm/crypto/Kconfig1
-rw-r--r--arch/arm/crypto/sha1-ce-core.S23
-rw-r--r--arch/arm/crypto/sha1-ce-glue.c107
3 files changed, 33 insertions, 98 deletions
diff --git a/arch/arm/crypto/Kconfig b/arch/arm/crypto/Kconfig
index 458729d2ce22..5ed98bc6f95d 100644
--- a/arch/arm/crypto/Kconfig
+++ b/arch/arm/crypto/Kconfig
@@ -31,7 +31,6 @@ config CRYPTO_SHA1_ARM_CE
31 tristate "SHA1 digest algorithm (ARM v8 Crypto Extensions)" 31 tristate "SHA1 digest algorithm (ARM v8 Crypto Extensions)"
32 depends on KERNEL_MODE_NEON 32 depends on KERNEL_MODE_NEON
33 select CRYPTO_SHA1_ARM 33 select CRYPTO_SHA1_ARM
34 select CRYPTO_SHA1
35 select CRYPTO_HASH 34 select CRYPTO_HASH
36 help 35 help
37 SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2) implemented 36 SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2) implemented
diff --git a/arch/arm/crypto/sha1-ce-core.S b/arch/arm/crypto/sha1-ce-core.S
index 4aad520935d8..b623f51ccbcf 100644
--- a/arch/arm/crypto/sha1-ce-core.S
+++ b/arch/arm/crypto/sha1-ce-core.S
@@ -61,8 +61,8 @@
61 .word 0xca62c1d6, 0xca62c1d6, 0xca62c1d6, 0xca62c1d6 61 .word 0xca62c1d6, 0xca62c1d6, 0xca62c1d6, 0xca62c1d6
62 62
63 /* 63 /*
64 * void sha1_ce_transform(int blocks, u8 const *src, u32 *state, 64 * void sha1_ce_transform(struct sha1_state *sst, u8 const *src,
65 * u8 *head); 65 * int blocks);
66 */ 66 */
67ENTRY(sha1_ce_transform) 67ENTRY(sha1_ce_transform)
68 /* load round constants */ 68 /* load round constants */
@@ -71,23 +71,14 @@ ENTRY(sha1_ce_transform)
71 vld1.32 {k2-k3}, [ip, :128] 71 vld1.32 {k2-k3}, [ip, :128]
72 72
73 /* load state */ 73 /* load state */
74 vld1.32 {dga}, [r2] 74 vld1.32 {dga}, [r0]
75 vldr dgbs, [r2, #16] 75 vldr dgbs, [r0, #16]
76
77 /* load partial input (if supplied) */
78 teq r3, #0
79 beq 0f
80 vld1.32 {q8-q9}, [r3]!
81 vld1.32 {q10-q11}, [r3]
82 teq r0, #0
83 b 1f
84 76
85 /* load input */ 77 /* load input */
860: vld1.32 {q8-q9}, [r1]! 780: vld1.32 {q8-q9}, [r1]!
87 vld1.32 {q10-q11}, [r1]! 79 vld1.32 {q10-q11}, [r1]!
88 subs r0, r0, #1 80 subs r2, r2, #1
89 81
901:
91#ifndef CONFIG_CPU_BIG_ENDIAN 82#ifndef CONFIG_CPU_BIG_ENDIAN
92 vrev32.8 q8, q8 83 vrev32.8 q8, q8
93 vrev32.8 q9, q9 84 vrev32.8 q9, q9
@@ -128,7 +119,7 @@ ENTRY(sha1_ce_transform)
128 bne 0b 119 bne 0b
129 120
130 /* store new state */ 121 /* store new state */
131 vst1.32 {dga}, [r2] 122 vst1.32 {dga}, [r0]
132 vstr dgbs, [r2, #16] 123 vstr dgbs, [r0, #16]
133 bx lr 124 bx lr
134ENDPROC(sha1_ce_transform) 125ENDPROC(sha1_ce_transform)
diff --git a/arch/arm/crypto/sha1-ce-glue.c b/arch/arm/crypto/sha1-ce-glue.c
index e93b24c1af1f..80bc2fcd241a 100644
--- a/arch/arm/crypto/sha1-ce-glue.c
+++ b/arch/arm/crypto/sha1-ce-glue.c
@@ -10,13 +10,13 @@
10 10
11#include <crypto/internal/hash.h> 11#include <crypto/internal/hash.h>
12#include <crypto/sha.h> 12#include <crypto/sha.h>
13#include <crypto/sha1_base.h>
13#include <linux/crypto.h> 14#include <linux/crypto.h>
14#include <linux/module.h> 15#include <linux/module.h>
15 16
16#include <asm/hwcap.h> 17#include <asm/hwcap.h>
17#include <asm/neon.h> 18#include <asm/neon.h>
18#include <asm/simd.h> 19#include <asm/simd.h>
19#include <asm/unaligned.h>
20 20
21#include "sha1.h" 21#include "sha1.h"
22 22
@@ -24,107 +24,52 @@ MODULE_DESCRIPTION("SHA1 secure hash using ARMv8 Crypto Extensions");
24MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>"); 24MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
25MODULE_LICENSE("GPL v2"); 25MODULE_LICENSE("GPL v2");
26 26
27asmlinkage void sha1_ce_transform(int blocks, u8 const *src, u32 *state, 27asmlinkage void sha1_ce_transform(struct sha1_state *sst, u8 const *src,
28 u8 *head); 28 int blocks);
29 29
30static int sha1_init(struct shash_desc *desc) 30static int sha1_ce_update(struct shash_desc *desc, const u8 *data,
31 unsigned int len)
31{ 32{
32 struct sha1_state *sctx = shash_desc_ctx(desc); 33 struct sha1_state *sctx = shash_desc_ctx(desc);
33 34
34 *sctx = (struct sha1_state){ 35 if (!may_use_simd() ||
35 .state = { SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4 }, 36 (sctx->count % SHA1_BLOCK_SIZE) + len < SHA1_BLOCK_SIZE)
36 };
37 return 0;
38}
39
40static int sha1_update(struct shash_desc *desc, const u8 *data,
41 unsigned int len)
42{
43 struct sha1_state *sctx = shash_desc_ctx(desc);
44 unsigned int partial;
45
46 if (!may_use_simd())
47 return sha1_update_arm(desc, data, len); 37 return sha1_update_arm(desc, data, len);
48 38
49 partial = sctx->count % SHA1_BLOCK_SIZE; 39 kernel_neon_begin();
50 sctx->count += len; 40 sha1_base_do_update(desc, data, len, sha1_ce_transform);
51 41 kernel_neon_end();
52 if ((partial + len) >= SHA1_BLOCK_SIZE) {
53 int blocks;
54 42
55 if (partial) {
56 int p = SHA1_BLOCK_SIZE - partial;
57
58 memcpy(sctx->buffer + partial, data, p);
59 data += p;
60 len -= p;
61 }
62
63 blocks = len / SHA1_BLOCK_SIZE;
64 len %= SHA1_BLOCK_SIZE;
65
66 kernel_neon_begin();
67 sha1_ce_transform(blocks, data, sctx->state,
68 partial ? sctx->buffer : NULL);
69 kernel_neon_end();
70
71 data += blocks * SHA1_BLOCK_SIZE;
72 partial = 0;
73 }
74 if (len)
75 memcpy(sctx->buffer + partial, data, len);
76 return 0; 43 return 0;
77} 44}
78 45
79static int sha1_final(struct shash_desc *desc, u8 *out) 46static int sha1_ce_finup(struct shash_desc *desc, const u8 *data,
47 unsigned int len, u8 *out)
80{ 48{
81 static const u8 padding[SHA1_BLOCK_SIZE] = { 0x80, }; 49 if (!may_use_simd())
82 50 return sha1_finup_arm(desc, data, len, out);
83 struct sha1_state *sctx = shash_desc_ctx(desc);
84 __be64 bits = cpu_to_be64(sctx->count << 3);
85 __be32 *dst = (__be32 *)out;
86 int i;
87
88 u32 padlen = SHA1_BLOCK_SIZE
89 - ((sctx->count + sizeof(bits)) % SHA1_BLOCK_SIZE);
90
91 sha1_update(desc, padding, padlen);
92 sha1_update(desc, (const u8 *)&bits, sizeof(bits));
93
94 for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(__be32); i++)
95 put_unaligned_be32(sctx->state[i], dst++);
96
97 *sctx = (struct sha1_state){};
98 return 0;
99}
100 51
101static int sha1_export(struct shash_desc *desc, void *out) 52 kernel_neon_begin();
102{ 53 if (len)
103 struct sha1_state *sctx = shash_desc_ctx(desc); 54 sha1_base_do_update(desc, data, len, sha1_ce_transform);
104 struct sha1_state *dst = out; 55 sha1_base_do_finalize(desc, sha1_ce_transform);
56 kernel_neon_end();
105 57
106 *dst = *sctx; 58 return sha1_base_finish(desc, out);
107 return 0;
108} 59}
109 60
110static int sha1_import(struct shash_desc *desc, const void *in) 61static int sha1_ce_final(struct shash_desc *desc, u8 *out)
111{ 62{
112 struct sha1_state *sctx = shash_desc_ctx(desc); 63 return sha1_ce_finup(desc, NULL, 0, out);
113 struct sha1_state const *src = in;
114
115 *sctx = *src;
116 return 0;
117} 64}
118 65
119static struct shash_alg alg = { 66static struct shash_alg alg = {
120 .init = sha1_init, 67 .init = sha1_base_init,
121 .update = sha1_update, 68 .update = sha1_ce_update,
122 .final = sha1_final, 69 .final = sha1_ce_final,
123 .export = sha1_export, 70 .finup = sha1_ce_finup,
124 .import = sha1_import,
125 .descsize = sizeof(struct sha1_state), 71 .descsize = sizeof(struct sha1_state),
126 .digestsize = SHA1_DIGEST_SIZE, 72 .digestsize = SHA1_DIGEST_SIZE,
127 .statesize = sizeof(struct sha1_state),
128 .base = { 73 .base = {
129 .cra_name = "sha1", 74 .cra_name = "sha1",
130 .cra_driver_name = "sha1-ce", 75 .cra_driver_name = "sha1-ce",