aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authortim <tim.c.chen@linux.intel.com>2015-09-10 18:27:20 -0400
committerHerbert Xu <herbert@gondor.apana.org.au>2015-09-21 10:01:06 -0400
commit95fca7df0b4964fbe3fe159e3d6e681e6b5b7a53 (patch)
tree188d3095578102fba0a385c6d4745ea744a0cbf8
parent600a2334e83d22e5c3f7ff2581f545bfc354d206 (diff)
crypto: x86/sha - glue code for Intel SHA extensions optimized SHA1 & SHA256
This patch adds the glue code to detect and utilize the Intel SHA extensions optimized SHA1 and SHA256 update transforms when available. This code has been tested on Broxton for functionality. Originally-by: Chandramouli Narayanan <mouli_7982@yahoo.com> Signed-off-by: Tim Chen <tim.c.chen@linux.intel.com> Acked-by: David S. Miller <davem@davemloft.net> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
-rw-r--r--arch/x86/crypto/sha1_ssse3_glue.c12
-rw-r--r--arch/x86/crypto/sha256_ssse3_glue.c38
2 files changed, 34 insertions, 16 deletions
diff --git a/arch/x86/crypto/sha1_ssse3_glue.c b/arch/x86/crypto/sha1_ssse3_glue.c
index 7c48e8b20848..98be8cc17ca2 100644
--- a/arch/x86/crypto/sha1_ssse3_glue.c
+++ b/arch/x86/crypto/sha1_ssse3_glue.c
@@ -44,6 +44,10 @@ asmlinkage void sha1_transform_avx(u32 *digest, const char *data,
44asmlinkage void sha1_transform_avx2(u32 *digest, const char *data, 44asmlinkage void sha1_transform_avx2(u32 *digest, const char *data,
45 unsigned int rounds); 45 unsigned int rounds);
46#endif 46#endif
47#ifdef CONFIG_AS_SHA1_NI
48asmlinkage void sha1_ni_transform(u32 *digest, const char *data,
49 unsigned int rounds);
50#endif
47 51
48static void (*sha1_transform_asm)(u32 *, const char *, unsigned int); 52static void (*sha1_transform_asm)(u32 *, const char *, unsigned int);
49 53
@@ -166,12 +170,18 @@ static int __init sha1_ssse3_mod_init(void)
166#endif 170#endif
167 } 171 }
168#endif 172#endif
173#ifdef CONFIG_AS_SHA1_NI
174 if (boot_cpu_has(X86_FEATURE_SHA_NI)) {
175 sha1_transform_asm = sha1_ni_transform;
176 algo_name = "SHA-NI";
177 }
178#endif
169 179
170 if (sha1_transform_asm) { 180 if (sha1_transform_asm) {
171 pr_info("Using %s optimized SHA-1 implementation\n", algo_name); 181 pr_info("Using %s optimized SHA-1 implementation\n", algo_name);
172 return crypto_register_shash(&alg); 182 return crypto_register_shash(&alg);
173 } 183 }
174 pr_info("Neither AVX nor AVX2 nor SSSE3 is available/usable.\n"); 184 pr_info("Neither AVX nor AVX2 nor SSSE3/SHA-NI is available/usable.\n");
175 185
176 return -ENODEV; 186 return -ENODEV;
177} 187}
diff --git a/arch/x86/crypto/sha256_ssse3_glue.c b/arch/x86/crypto/sha256_ssse3_glue.c
index f8097fc0d1d1..9c7b22c489f6 100644
--- a/arch/x86/crypto/sha256_ssse3_glue.c
+++ b/arch/x86/crypto/sha256_ssse3_glue.c
@@ -50,6 +50,10 @@ asmlinkage void sha256_transform_avx(u32 *digest, const char *data,
50asmlinkage void sha256_transform_rorx(u32 *digest, const char *data, 50asmlinkage void sha256_transform_rorx(u32 *digest, const char *data,
51 u64 rounds); 51 u64 rounds);
52#endif 52#endif
53#ifdef CONFIG_AS_SHA256_NI
54asmlinkage void sha256_ni_transform(u32 *digest, const char *data,
55 u64 rounds); /*unsigned int rounds);*/
56#endif
53 57
54static void (*sha256_transform_asm)(u32 *, const char *, u64); 58static void (*sha256_transform_asm)(u32 *, const char *, u64);
55 59
@@ -142,36 +146,40 @@ static bool __init avx_usable(void)
142 146
143static int __init sha256_ssse3_mod_init(void) 147static int __init sha256_ssse3_mod_init(void)
144{ 148{
149 char *algo;
150
145 /* test for SSSE3 first */ 151 /* test for SSSE3 first */
146 if (cpu_has_ssse3) 152 if (cpu_has_ssse3) {
147 sha256_transform_asm = sha256_transform_ssse3; 153 sha256_transform_asm = sha256_transform_ssse3;
154 algo = "SSSE3";
155 }
148 156
149#ifdef CONFIG_AS_AVX 157#ifdef CONFIG_AS_AVX
150 /* allow AVX to override SSSE3, it's a little faster */ 158 /* allow AVX to override SSSE3, it's a little faster */
151 if (avx_usable()) { 159 if (avx_usable()) {
160 sha256_transform_asm = sha256_transform_avx;
161 algo = "AVX";
152#ifdef CONFIG_AS_AVX2 162#ifdef CONFIG_AS_AVX2
153 if (boot_cpu_has(X86_FEATURE_AVX2) && boot_cpu_has(X86_FEATURE_BMI2)) 163 if (boot_cpu_has(X86_FEATURE_AVX2) &&
164 boot_cpu_has(X86_FEATURE_BMI2)) {
154 sha256_transform_asm = sha256_transform_rorx; 165 sha256_transform_asm = sha256_transform_rorx;
155 else 166 algo = "AVX2";
167 }
168#endif
169 }
156#endif 170#endif
157 sha256_transform_asm = sha256_transform_avx; 171#ifdef CONFIG_AS_SHA256_NI
172 if (boot_cpu_has(X86_FEATURE_SHA_NI)) {
173 sha256_transform_asm = sha256_ni_transform;
174 algo = "SHA-256-NI";
158 } 175 }
159#endif 176#endif
160 177
161 if (sha256_transform_asm) { 178 if (sha256_transform_asm) {
162#ifdef CONFIG_AS_AVX 179 pr_info("Using %s optimized SHA-256 implementation\n", algo);
163 if (sha256_transform_asm == sha256_transform_avx)
164 pr_info("Using AVX optimized SHA-256 implementation\n");
165#ifdef CONFIG_AS_AVX2
166 else if (sha256_transform_asm == sha256_transform_rorx)
167 pr_info("Using AVX2 optimized SHA-256 implementation\n");
168#endif
169 else
170#endif
171 pr_info("Using SSSE3 optimized SHA-256 implementation\n");
172 return crypto_register_shashes(algs, ARRAY_SIZE(algs)); 180 return crypto_register_shashes(algs, ARRAY_SIZE(algs));
173 } 181 }
174 pr_info("Neither AVX nor SSSE3 is available/usable.\n"); 182 pr_info("Neither AVX nor SSSE3/SHA-NI is available/usable.\n");
175 183
176 return -ENODEV; 184 return -ENODEV;
177} 185}