aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/crypto
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-09-14 10:57:32 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2009-09-14 10:57:32 -0400
commitc7208de304ac335d5d58db346bb773a471fc636b (patch)
tree47808484fc3ff8447fe30943a33880bae00d5fee /arch/x86/crypto
parent15b0404272e1513940223cf9eefadfd22804a060 (diff)
parent5367b6887e7d8c870a5da7d9b8c6e9c207684e43 (diff)
Merge branch 'x86-cpu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'x86-cpu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (22 commits) x86: Fix code patching for paravirt-alternatives on 486 x86, msr: change msr-reg.o to obj-y, and export its symbols x86: Use hard_smp_processor_id() to get apic id for AMD K8 cpus x86, sched: Workaround broken sched domain creation for AMD Magny-Cours x86, mcheck: Use correct cpumask for shared bank4 x86, cacheinfo: Fixup L3 cache information for AMD multi-node processors x86: Fix CPU llc_shared_map information for AMD Magny-Cours x86, msr: Fix msr-reg.S compilation with gas 2.16.1, on 32-bit too x86: Move kernel_fpu_using to irq_fpu_usable in asm/i387.h x86, msr: fix msr-reg.S compilation with gas 2.16.1 x86, msr: Export the register-setting MSR functions via /dev/*/msr x86, msr: Create _on_cpu helpers for {rw,wr}msr_safe_regs() x86, msr: Have the _safe MSR functions return -EIO, not -EFAULT x86, msr: CFI annotations, cleanups for msr-reg.S x86, asm: Make _ASM_EXTABLE() usable from assembly code x86, asm: Add 32-bit versions of the combined CFI macros x86, AMD: Disable wrongly set X86_FEATURE_LAHF_LM CPUID bit x86, msr: Rewrite AMD rd/wrmsr variants x86, msr: Add rd/wrmsr interfaces with preset registers x86: add specific support for Intel Atom architecture ...
Diffstat (limited to 'arch/x86/crypto')
-rw-r--r--arch/x86/crypto/aesni-intel_glue.c17
1 files changed, 5 insertions, 12 deletions
diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
index d3ec8d588d4b..585edebe12cf 100644
--- a/arch/x86/crypto/aesni-intel_glue.c
+++ b/arch/x86/crypto/aesni-intel_glue.c
@@ -59,13 +59,6 @@ asmlinkage void aesni_cbc_enc(struct crypto_aes_ctx *ctx, u8 *out,
59asmlinkage void aesni_cbc_dec(struct crypto_aes_ctx *ctx, u8 *out, 59asmlinkage void aesni_cbc_dec(struct crypto_aes_ctx *ctx, u8 *out,
60 const u8 *in, unsigned int len, u8 *iv); 60 const u8 *in, unsigned int len, u8 *iv);
61 61
62static inline int kernel_fpu_using(void)
63{
64 if (in_interrupt() && !(read_cr0() & X86_CR0_TS))
65 return 1;
66 return 0;
67}
68
69static inline struct crypto_aes_ctx *aes_ctx(void *raw_ctx) 62static inline struct crypto_aes_ctx *aes_ctx(void *raw_ctx)
70{ 63{
71 unsigned long addr = (unsigned long)raw_ctx; 64 unsigned long addr = (unsigned long)raw_ctx;
@@ -89,7 +82,7 @@ static int aes_set_key_common(struct crypto_tfm *tfm, void *raw_ctx,
89 return -EINVAL; 82 return -EINVAL;
90 } 83 }
91 84
92 if (kernel_fpu_using()) 85 if (irq_fpu_usable())
93 err = crypto_aes_expand_key(ctx, in_key, key_len); 86 err = crypto_aes_expand_key(ctx, in_key, key_len);
94 else { 87 else {
95 kernel_fpu_begin(); 88 kernel_fpu_begin();
@@ -110,7 +103,7 @@ static void aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
110{ 103{
111 struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm)); 104 struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
112 105
113 if (kernel_fpu_using()) 106 if (irq_fpu_usable())
114 crypto_aes_encrypt_x86(ctx, dst, src); 107 crypto_aes_encrypt_x86(ctx, dst, src);
115 else { 108 else {
116 kernel_fpu_begin(); 109 kernel_fpu_begin();
@@ -123,7 +116,7 @@ static void aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
123{ 116{
124 struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm)); 117 struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
125 118
126 if (kernel_fpu_using()) 119 if (irq_fpu_usable())
127 crypto_aes_decrypt_x86(ctx, dst, src); 120 crypto_aes_decrypt_x86(ctx, dst, src);
128 else { 121 else {
129 kernel_fpu_begin(); 122 kernel_fpu_begin();
@@ -349,7 +342,7 @@ static int ablk_encrypt(struct ablkcipher_request *req)
349 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); 342 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
350 struct async_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm); 343 struct async_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
351 344
352 if (kernel_fpu_using()) { 345 if (irq_fpu_usable()) {
353 struct ablkcipher_request *cryptd_req = 346 struct ablkcipher_request *cryptd_req =
354 ablkcipher_request_ctx(req); 347 ablkcipher_request_ctx(req);
355 memcpy(cryptd_req, req, sizeof(*req)); 348 memcpy(cryptd_req, req, sizeof(*req));
@@ -370,7 +363,7 @@ static int ablk_decrypt(struct ablkcipher_request *req)
370 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); 363 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
371 struct async_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm); 364 struct async_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
372 365
373 if (kernel_fpu_using()) { 366 if (irq_fpu_usable()) {
374 struct ablkcipher_request *cryptd_req = 367 struct ablkcipher_request *cryptd_req =
375 ablkcipher_request_ctx(req); 368 ablkcipher_request_ctx(req);
376 memcpy(cryptd_req, req, sizeof(*req)); 369 memcpy(cryptd_req, req, sizeof(*req));