diff options
-rw-r--r-- | arch/x86/crypto/aesni-intel_glue.c | 10 | ||||
-rw-r--r-- | drivers/crypto/padlock-sha.c | 14 |
2 files changed, 17 insertions, 7 deletions
diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c index 585edebe12cf..49c552c060e9 100644 --- a/arch/x86/crypto/aesni-intel_glue.c +++ b/arch/x86/crypto/aesni-intel_glue.c | |||
@@ -82,7 +82,7 @@ static int aes_set_key_common(struct crypto_tfm *tfm, void *raw_ctx, | |||
82 | return -EINVAL; | 82 | return -EINVAL; |
83 | } | 83 | } |
84 | 84 | ||
85 | if (irq_fpu_usable()) | 85 | if (!irq_fpu_usable()) |
86 | err = crypto_aes_expand_key(ctx, in_key, key_len); | 86 | err = crypto_aes_expand_key(ctx, in_key, key_len); |
87 | else { | 87 | else { |
88 | kernel_fpu_begin(); | 88 | kernel_fpu_begin(); |
@@ -103,7 +103,7 @@ static void aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) | |||
103 | { | 103 | { |
104 | struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm)); | 104 | struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm)); |
105 | 105 | ||
106 | if (irq_fpu_usable()) | 106 | if (!irq_fpu_usable()) |
107 | crypto_aes_encrypt_x86(ctx, dst, src); | 107 | crypto_aes_encrypt_x86(ctx, dst, src); |
108 | else { | 108 | else { |
109 | kernel_fpu_begin(); | 109 | kernel_fpu_begin(); |
@@ -116,7 +116,7 @@ static void aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) | |||
116 | { | 116 | { |
117 | struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm)); | 117 | struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm)); |
118 | 118 | ||
119 | if (irq_fpu_usable()) | 119 | if (!irq_fpu_usable()) |
120 | crypto_aes_decrypt_x86(ctx, dst, src); | 120 | crypto_aes_decrypt_x86(ctx, dst, src); |
121 | else { | 121 | else { |
122 | kernel_fpu_begin(); | 122 | kernel_fpu_begin(); |
@@ -342,7 +342,7 @@ static int ablk_encrypt(struct ablkcipher_request *req) | |||
342 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); | 342 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); |
343 | struct async_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm); | 343 | struct async_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm); |
344 | 344 | ||
345 | if (irq_fpu_usable()) { | 345 | if (!irq_fpu_usable()) { |
346 | struct ablkcipher_request *cryptd_req = | 346 | struct ablkcipher_request *cryptd_req = |
347 | ablkcipher_request_ctx(req); | 347 | ablkcipher_request_ctx(req); |
348 | memcpy(cryptd_req, req, sizeof(*req)); | 348 | memcpy(cryptd_req, req, sizeof(*req)); |
@@ -363,7 +363,7 @@ static int ablk_decrypt(struct ablkcipher_request *req) | |||
363 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); | 363 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); |
364 | struct async_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm); | 364 | struct async_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm); |
365 | 365 | ||
366 | if (irq_fpu_usable()) { | 366 | if (!irq_fpu_usable()) { |
367 | struct ablkcipher_request *cryptd_req = | 367 | struct ablkcipher_request *cryptd_req = |
368 | ablkcipher_request_ctx(req); | 368 | ablkcipher_request_ctx(req); |
369 | memcpy(cryptd_req, req, sizeof(*req)); | 369 | memcpy(cryptd_req, req, sizeof(*req)); |
diff --git a/drivers/crypto/padlock-sha.c b/drivers/crypto/padlock-sha.c index 76cb6b345e7b..0af80577dc7b 100644 --- a/drivers/crypto/padlock-sha.c +++ b/drivers/crypto/padlock-sha.c | |||
@@ -24,6 +24,12 @@ | |||
24 | #include <asm/i387.h> | 24 | #include <asm/i387.h> |
25 | #include "padlock.h" | 25 | #include "padlock.h" |
26 | 26 | ||
27 | #ifdef CONFIG_64BIT | ||
28 | #define STACK_ALIGN 16 | ||
29 | #else | ||
30 | #define STACK_ALIGN 4 | ||
31 | #endif | ||
32 | |||
27 | struct padlock_sha_desc { | 33 | struct padlock_sha_desc { |
28 | struct shash_desc fallback; | 34 | struct shash_desc fallback; |
29 | }; | 35 | }; |
@@ -64,7 +70,9 @@ static int padlock_sha1_finup(struct shash_desc *desc, const u8 *in, | |||
64 | /* We can't store directly to *out as it may be unaligned. */ | 70 | /* We can't store directly to *out as it may be unaligned. */ |
65 | /* BTW Don't reduce the buffer size below 128 Bytes! | 71 | /* BTW Don't reduce the buffer size below 128 Bytes! |
66 | * PadLock microcode needs it that big. */ | 72 | * PadLock microcode needs it that big. */ |
67 | char result[128] __attribute__ ((aligned(PADLOCK_ALIGNMENT))); | 73 | char buf[128 + PADLOCK_ALIGNMENT - STACK_ALIGN] __attribute__ |
74 | ((aligned(STACK_ALIGN))); | ||
75 | char *result = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT); | ||
68 | struct padlock_sha_desc *dctx = shash_desc_ctx(desc); | 76 | struct padlock_sha_desc *dctx = shash_desc_ctx(desc); |
69 | struct sha1_state state; | 77 | struct sha1_state state; |
70 | unsigned int space; | 78 | unsigned int space; |
@@ -128,7 +136,9 @@ static int padlock_sha256_finup(struct shash_desc *desc, const u8 *in, | |||
128 | /* We can't store directly to *out as it may be unaligned. */ | 136 | /* We can't store directly to *out as it may be unaligned. */ |
129 | /* BTW Don't reduce the buffer size below 128 Bytes! | 137 | /* BTW Don't reduce the buffer size below 128 Bytes! |
130 | * PadLock microcode needs it that big. */ | 138 | * PadLock microcode needs it that big. */ |
131 | char result[128] __attribute__ ((aligned(PADLOCK_ALIGNMENT))); | 139 | char buf[128 + PADLOCK_ALIGNMENT - STACK_ALIGN] __attribute__ |
140 | ((aligned(STACK_ALIGN))); | ||
141 | char *result = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT); | ||
132 | struct padlock_sha_desc *dctx = shash_desc_ctx(desc); | 142 | struct padlock_sha_desc *dctx = shash_desc_ctx(desc); |
133 | struct sha256_state state; | 143 | struct sha256_state state; |
134 | unsigned int space; | 144 | unsigned int space; |