diff options
| author | Mathias Krause <minipli@googlemail.com> | 2012-05-24 05:13:42 -0400 |
|---|---|---|
| committer | Herbert Xu <herbert@gondor.apana.org.au> | 2012-06-12 04:37:16 -0400 |
| commit | 65df57743924c3d13e1fa1bcf5bf70fe874fcdfd (patch) | |
| tree | 122291d27b0ea9aeed2e1e454a62f83a8aed3aa5 | |
| parent | 4e3c8a1b1c63482403e9d5e3148dee1a711e4b91 (diff) | |
crypto: sha1 - use Kbuild supplied flags for AVX test
Commit ea4d26ae ("raid5: add AVX optimized RAID5 checksumming")
introduced x86/ arch wide defines for AFLAGS and CFLAGS indicating AVX
support in binutils based on the same test we have in x86/crypto/ right
now. To minimize duplication drop our implementation in favour to the
one in x86/.
Signed-off-by: Mathias Krause <minipli@googlemail.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
| -rw-r--r-- | arch/x86/crypto/Makefile | 7 | ||||
| -rw-r--r-- | arch/x86/crypto/sha1_ssse3_asm.S | 2 | ||||
| -rw-r--r-- | arch/x86/crypto/sha1_ssse3_glue.c | 6 |
3 files changed, 4 insertions, 11 deletions
diff --git a/arch/x86/crypto/Makefile b/arch/x86/crypto/Makefile index e191ac048b5..479f95a744f 100644 --- a/arch/x86/crypto/Makefile +++ b/arch/x86/crypto/Makefile | |||
| @@ -34,12 +34,5 @@ salsa20-x86_64-y := salsa20-x86_64-asm_64.o salsa20_glue.o | |||
| 34 | serpent-sse2-x86_64-y := serpent-sse2-x86_64-asm_64.o serpent_sse2_glue.o | 34 | serpent-sse2-x86_64-y := serpent-sse2-x86_64-asm_64.o serpent_sse2_glue.o |
| 35 | 35 | ||
| 36 | aesni-intel-y := aesni-intel_asm.o aesni-intel_glue.o fpu.o | 36 | aesni-intel-y := aesni-intel_asm.o aesni-intel_glue.o fpu.o |
| 37 | |||
| 38 | ghash-clmulni-intel-y := ghash-clmulni-intel_asm.o ghash-clmulni-intel_glue.o | 37 | ghash-clmulni-intel-y := ghash-clmulni-intel_asm.o ghash-clmulni-intel_glue.o |
| 39 | |||
| 40 | # enable AVX support only when $(AS) can actually assemble the instructions | ||
| 41 | ifeq ($(call as-instr,vpxor %xmm0$(comma)%xmm1$(comma)%xmm2,yes,no),yes) | ||
| 42 | AFLAGS_sha1_ssse3_asm.o += -DSHA1_ENABLE_AVX_SUPPORT | ||
| 43 | CFLAGS_sha1_ssse3_glue.o += -DSHA1_ENABLE_AVX_SUPPORT | ||
| 44 | endif | ||
| 45 | sha1-ssse3-y := sha1_ssse3_asm.o sha1_ssse3_glue.o | 38 | sha1-ssse3-y := sha1_ssse3_asm.o sha1_ssse3_glue.o |
diff --git a/arch/x86/crypto/sha1_ssse3_asm.S b/arch/x86/crypto/sha1_ssse3_asm.S index b2c2f57d70e..49d6987a73d 100644 --- a/arch/x86/crypto/sha1_ssse3_asm.S +++ b/arch/x86/crypto/sha1_ssse3_asm.S | |||
| @@ -468,7 +468,7 @@ W_PRECALC_SSSE3 | |||
| 468 | */ | 468 | */ |
| 469 | SHA1_VECTOR_ASM sha1_transform_ssse3 | 469 | SHA1_VECTOR_ASM sha1_transform_ssse3 |
| 470 | 470 | ||
| 471 | #ifdef SHA1_ENABLE_AVX_SUPPORT | 471 | #ifdef CONFIG_AS_AVX |
| 472 | 472 | ||
| 473 | .macro W_PRECALC_AVX | 473 | .macro W_PRECALC_AVX |
| 474 | 474 | ||
diff --git a/arch/x86/crypto/sha1_ssse3_glue.c b/arch/x86/crypto/sha1_ssse3_glue.c index f916499d0ab..4a11a9d7245 100644 --- a/arch/x86/crypto/sha1_ssse3_glue.c +++ b/arch/x86/crypto/sha1_ssse3_glue.c | |||
| @@ -35,7 +35,7 @@ | |||
| 35 | 35 | ||
| 36 | asmlinkage void sha1_transform_ssse3(u32 *digest, const char *data, | 36 | asmlinkage void sha1_transform_ssse3(u32 *digest, const char *data, |
| 37 | unsigned int rounds); | 37 | unsigned int rounds); |
| 38 | #ifdef SHA1_ENABLE_AVX_SUPPORT | 38 | #ifdef CONFIG_AS_AVX |
| 39 | asmlinkage void sha1_transform_avx(u32 *digest, const char *data, | 39 | asmlinkage void sha1_transform_avx(u32 *digest, const char *data, |
| 40 | unsigned int rounds); | 40 | unsigned int rounds); |
| 41 | #endif | 41 | #endif |
| @@ -184,7 +184,7 @@ static struct shash_alg alg = { | |||
| 184 | } | 184 | } |
| 185 | }; | 185 | }; |
| 186 | 186 | ||
| 187 | #ifdef SHA1_ENABLE_AVX_SUPPORT | 187 | #ifdef CONFIG_AS_AVX |
| 188 | static bool __init avx_usable(void) | 188 | static bool __init avx_usable(void) |
| 189 | { | 189 | { |
| 190 | u64 xcr0; | 190 | u64 xcr0; |
| @@ -209,7 +209,7 @@ static int __init sha1_ssse3_mod_init(void) | |||
| 209 | if (cpu_has_ssse3) | 209 | if (cpu_has_ssse3) |
| 210 | sha1_transform_asm = sha1_transform_ssse3; | 210 | sha1_transform_asm = sha1_transform_ssse3; |
| 211 | 211 | ||
| 212 | #ifdef SHA1_ENABLE_AVX_SUPPORT | 212 | #ifdef CONFIG_AS_AVX |
| 213 | /* allow AVX to override SSSE3, it's a little faster */ | 213 | /* allow AVX to override SSSE3, it's a little faster */ |
| 214 | if (avx_usable()) | 214 | if (avx_usable()) |
| 215 | sha1_transform_asm = sha1_transform_avx; | 215 | sha1_transform_asm = sha1_transform_avx; |
