aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorHuang Ying <ying.huang@intel.com>2009-11-03 10:55:20 -0500
committerHerbert Xu <herbert@gondor.apana.org.au>2009-11-03 10:55:20 -0500
commit01dd95827726534230d8f03f7e6faafe24e49260 (patch)
tree2b198f49ad60e96b8564897f65939c41dd7de2a3 /arch/x86
parent3b0d65969b549b796abc6f0230f6142fed365d49 (diff)
crypto: ghash-intel - Fix irq_fpu_usable usage
When renaming kernel_fpu_using to irq_fpu_usable, the semantics of the function is changed too, from mesuring whether kernel is using FPU, that is, the FPU is NOT available, to measuring whether FPU is usable, that is, the FPU is available. But the usage of irq_fpu_usable in ghash-clmulni-intel_glue.c is not changed accordingly. This patch fixes this. Signed-off-by: Huang Ying <ying.huang@intel.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/crypto/ghash-clmulni-intel_glue.c8
1 files changed, 4 insertions, 4 deletions
diff --git a/arch/x86/crypto/ghash-clmulni-intel_glue.c b/arch/x86/crypto/ghash-clmulni-intel_glue.c
index 65d409644d72..cbcc8d8ea93a 100644
--- a/arch/x86/crypto/ghash-clmulni-intel_glue.c
+++ b/arch/x86/crypto/ghash-clmulni-intel_glue.c
@@ -159,7 +159,7 @@ static int ghash_async_init(struct ahash_request *req)
159 struct ahash_request *cryptd_req = ahash_request_ctx(req); 159 struct ahash_request *cryptd_req = ahash_request_ctx(req);
160 struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm; 160 struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm;
161 161
162 if (irq_fpu_usable()) { 162 if (!irq_fpu_usable()) {
163 memcpy(cryptd_req, req, sizeof(*req)); 163 memcpy(cryptd_req, req, sizeof(*req));
164 ahash_request_set_tfm(cryptd_req, &cryptd_tfm->base); 164 ahash_request_set_tfm(cryptd_req, &cryptd_tfm->base);
165 return crypto_ahash_init(cryptd_req); 165 return crypto_ahash_init(cryptd_req);
@@ -177,7 +177,7 @@ static int ghash_async_update(struct ahash_request *req)
177{ 177{
178 struct ahash_request *cryptd_req = ahash_request_ctx(req); 178 struct ahash_request *cryptd_req = ahash_request_ctx(req);
179 179
180 if (irq_fpu_usable()) { 180 if (!irq_fpu_usable()) {
181 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 181 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
182 struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm); 182 struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm);
183 struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm; 183 struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm;
@@ -195,7 +195,7 @@ static int ghash_async_final(struct ahash_request *req)
195{ 195{
196 struct ahash_request *cryptd_req = ahash_request_ctx(req); 196 struct ahash_request *cryptd_req = ahash_request_ctx(req);
197 197
198 if (irq_fpu_usable()) { 198 if (!irq_fpu_usable()) {
199 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 199 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
200 struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm); 200 struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm);
201 struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm; 201 struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm;
@@ -216,7 +216,7 @@ static int ghash_async_digest(struct ahash_request *req)
216 struct ahash_request *cryptd_req = ahash_request_ctx(req); 216 struct ahash_request *cryptd_req = ahash_request_ctx(req);
217 struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm; 217 struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm;
218 218
219 if (irq_fpu_usable()) { 219 if (!irq_fpu_usable()) {
220 memcpy(cryptd_req, req, sizeof(*req)); 220 memcpy(cryptd_req, req, sizeof(*req));
221 ahash_request_set_tfm(cryptd_req, &cryptd_tfm->base); 221 ahash_request_set_tfm(cryptd_req, &cryptd_tfm->base);
222 return crypto_ahash_digest(cryptd_req); 222 return crypto_ahash_digest(cryptd_req);