diff options
author | David S. Miller <davem@davemloft.net> | 2010-05-22 04:09:04 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2010-05-25 20:37:15 -0400 |
commit | c9aa55e5271a53d28e93fa58759d318b403c15ba (patch) | |
tree | a6726466da33e2be0ed943beb5cb2c9dc3ca146c /drivers/crypto | |
parent | 527b9525256f97ad8d092bbfc8fdc3c5409f4a4d (diff) |
n2_crypto: Plumb fallback ahash requests properly.
Do this by putting the async fallback request at the end of an n2
specific ahash request context, then properly adjusting the request
private size in our ahash ->cra_init().
We also need to put the writable state bits into the n2 request
private instead of the n2 cra_ctx.
With help from Herbert Xu.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/crypto')
-rw-r--r-- | drivers/crypto/n2_core.c | 95 |
1 files changed, 48 insertions, 47 deletions
diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c index 5613b8affe11..23163fda5035 100644 --- a/drivers/crypto/n2_core.c +++ b/drivers/crypto/n2_core.c | |||
@@ -251,16 +251,10 @@ static void n2_base_ctx_init(struct n2_base_ctx *ctx) | |||
251 | struct n2_hash_ctx { | 251 | struct n2_hash_ctx { |
252 | struct n2_base_ctx base; | 252 | struct n2_base_ctx base; |
253 | 253 | ||
254 | struct crypto_ahash *fallback; | 254 | struct crypto_ahash *fallback_tfm; |
255 | }; | ||
255 | 256 | ||
256 | /* These next three members must match the layout created by | 257 | struct n2_hash_req_ctx { |
257 | * crypto_init_shash_ops_async. This allows us to properly | ||
258 | * plumb requests we can't do in hardware down to the fallback | ||
259 | * operation, providing all of the data structures and layouts | ||
260 | * expected by those paths. | ||
261 | */ | ||
262 | struct ahash_request fallback_req; | ||
263 | struct shash_desc fallback_desc; | ||
264 | union { | 258 | union { |
265 | struct md5_state md5; | 259 | struct md5_state md5; |
266 | struct sha1_state sha1; | 260 | struct sha1_state sha1; |
@@ -269,56 +263,62 @@ struct n2_hash_ctx { | |||
269 | 263 | ||
270 | unsigned char hash_key[64]; | 264 | unsigned char hash_key[64]; |
271 | unsigned char keyed_zero_hash[32]; | 265 | unsigned char keyed_zero_hash[32]; |
266 | |||
267 | struct ahash_request fallback_req; | ||
272 | }; | 268 | }; |
273 | 269 | ||
274 | static int n2_hash_async_init(struct ahash_request *req) | 270 | static int n2_hash_async_init(struct ahash_request *req) |
275 | { | 271 | { |
272 | struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); | ||
276 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | 273 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); |
277 | struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); | 274 | struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); |
278 | 275 | ||
279 | ctx->fallback_req.base.tfm = crypto_ahash_tfm(ctx->fallback); | 276 | ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); |
280 | ctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; | 277 | rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; |
281 | 278 | ||
282 | return crypto_ahash_init(&ctx->fallback_req); | 279 | return crypto_ahash_init(&rctx->fallback_req); |
283 | } | 280 | } |
284 | 281 | ||
285 | static int n2_hash_async_update(struct ahash_request *req) | 282 | static int n2_hash_async_update(struct ahash_request *req) |
286 | { | 283 | { |
284 | struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); | ||
287 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | 285 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); |
288 | struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); | 286 | struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); |
289 | 287 | ||
290 | ctx->fallback_req.base.tfm = crypto_ahash_tfm(ctx->fallback); | 288 | ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); |
291 | ctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; | 289 | rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; |
292 | ctx->fallback_req.nbytes = req->nbytes; | 290 | rctx->fallback_req.nbytes = req->nbytes; |
293 | ctx->fallback_req.src = req->src; | 291 | rctx->fallback_req.src = req->src; |
294 | 292 | ||
295 | return crypto_ahash_update(&ctx->fallback_req); | 293 | return crypto_ahash_update(&rctx->fallback_req); |
296 | } | 294 | } |
297 | 295 | ||
298 | static int n2_hash_async_final(struct ahash_request *req) | 296 | static int n2_hash_async_final(struct ahash_request *req) |
299 | { | 297 | { |
298 | struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); | ||
300 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | 299 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); |
301 | struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); | 300 | struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); |
302 | 301 | ||
303 | ctx->fallback_req.base.tfm = crypto_ahash_tfm(ctx->fallback); | 302 | ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); |
304 | ctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; | 303 | rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; |
305 | ctx->fallback_req.result = req->result; | 304 | rctx->fallback_req.result = req->result; |
306 | 305 | ||
307 | return crypto_ahash_final(&ctx->fallback_req); | 306 | return crypto_ahash_final(&rctx->fallback_req); |
308 | } | 307 | } |
309 | 308 | ||
310 | static int n2_hash_async_finup(struct ahash_request *req) | 309 | static int n2_hash_async_finup(struct ahash_request *req) |
311 | { | 310 | { |
311 | struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); | ||
312 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | 312 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); |
313 | struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); | 313 | struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); |
314 | 314 | ||
315 | ctx->fallback_req.base.tfm = crypto_ahash_tfm(ctx->fallback); | 315 | ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); |
316 | ctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; | 316 | rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; |
317 | ctx->fallback_req.nbytes = req->nbytes; | 317 | rctx->fallback_req.nbytes = req->nbytes; |
318 | ctx->fallback_req.src = req->src; | 318 | rctx->fallback_req.src = req->src; |
319 | ctx->fallback_req.result = req->result; | 319 | rctx->fallback_req.result = req->result; |
320 | 320 | ||
321 | return crypto_ahash_finup(&ctx->fallback_req); | 321 | return crypto_ahash_finup(&rctx->fallback_req); |
322 | } | 322 | } |
323 | 323 | ||
324 | static int n2_hash_cra_init(struct crypto_tfm *tfm) | 324 | static int n2_hash_cra_init(struct crypto_tfm *tfm) |
@@ -338,7 +338,10 @@ static int n2_hash_cra_init(struct crypto_tfm *tfm) | |||
338 | goto out; | 338 | goto out; |
339 | } | 339 | } |
340 | 340 | ||
341 | ctx->fallback = fallback_tfm; | 341 | crypto_ahash_set_reqsize(ahash, (sizeof(struct n2_hash_req_ctx) + |
342 | crypto_ahash_reqsize(fallback_tfm))); | ||
343 | |||
344 | ctx->fallback_tfm = fallback_tfm; | ||
342 | return 0; | 345 | return 0; |
343 | 346 | ||
344 | out: | 347 | out: |
@@ -350,7 +353,7 @@ static void n2_hash_cra_exit(struct crypto_tfm *tfm) | |||
350 | struct crypto_ahash *ahash = __crypto_ahash_cast(tfm); | 353 | struct crypto_ahash *ahash = __crypto_ahash_cast(tfm); |
351 | struct n2_hash_ctx *ctx = crypto_ahash_ctx(ahash); | 354 | struct n2_hash_ctx *ctx = crypto_ahash_ctx(ahash); |
352 | 355 | ||
353 | crypto_free_ahash(ctx->fallback); | 356 | crypto_free_ahash(ctx->fallback_tfm); |
354 | } | 357 | } |
355 | 358 | ||
356 | static unsigned long wait_for_tail(struct spu_queue *qp) | 359 | static unsigned long wait_for_tail(struct spu_queue *qp) |
@@ -399,14 +402,16 @@ static int n2_hash_async_digest(struct ahash_request *req, | |||
399 | * exceed 2^16. | 402 | * exceed 2^16. |
400 | */ | 403 | */ |
401 | if (unlikely(req->nbytes > (1 << 16))) { | 404 | if (unlikely(req->nbytes > (1 << 16))) { |
402 | ctx->fallback_req.base.tfm = crypto_ahash_tfm(ctx->fallback); | 405 | struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); |
403 | ctx->fallback_req.base.flags = | 406 | |
407 | ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); | ||
408 | rctx->fallback_req.base.flags = | ||
404 | req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; | 409 | req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; |
405 | ctx->fallback_req.nbytes = req->nbytes; | 410 | rctx->fallback_req.nbytes = req->nbytes; |
406 | ctx->fallback_req.src = req->src; | 411 | rctx->fallback_req.src = req->src; |
407 | ctx->fallback_req.result = req->result; | 412 | rctx->fallback_req.result = req->result; |
408 | 413 | ||
409 | return crypto_ahash_digest(&ctx->fallback_req); | 414 | return crypto_ahash_digest(&rctx->fallback_req); |
410 | } | 415 | } |
411 | 416 | ||
412 | n2_base_ctx_init(&ctx->base); | 417 | n2_base_ctx_init(&ctx->base); |
@@ -472,9 +477,8 @@ out: | |||
472 | 477 | ||
473 | static int n2_md5_async_digest(struct ahash_request *req) | 478 | static int n2_md5_async_digest(struct ahash_request *req) |
474 | { | 479 | { |
475 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | 480 | struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); |
476 | struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); | 481 | struct md5_state *m = &rctx->u.md5; |
477 | struct md5_state *m = &ctx->u.md5; | ||
478 | 482 | ||
479 | if (unlikely(req->nbytes == 0)) { | 483 | if (unlikely(req->nbytes == 0)) { |
480 | static const char md5_zero[MD5_DIGEST_SIZE] = { | 484 | static const char md5_zero[MD5_DIGEST_SIZE] = { |
@@ -497,9 +501,8 @@ static int n2_md5_async_digest(struct ahash_request *req) | |||
497 | 501 | ||
498 | static int n2_sha1_async_digest(struct ahash_request *req) | 502 | static int n2_sha1_async_digest(struct ahash_request *req) |
499 | { | 503 | { |
500 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | 504 | struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); |
501 | struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); | 505 | struct sha1_state *s = &rctx->u.sha1; |
502 | struct sha1_state *s = &ctx->u.sha1; | ||
503 | 506 | ||
504 | if (unlikely(req->nbytes == 0)) { | 507 | if (unlikely(req->nbytes == 0)) { |
505 | static const char sha1_zero[SHA1_DIGEST_SIZE] = { | 508 | static const char sha1_zero[SHA1_DIGEST_SIZE] = { |
@@ -524,9 +527,8 @@ static int n2_sha1_async_digest(struct ahash_request *req) | |||
524 | 527 | ||
525 | static int n2_sha256_async_digest(struct ahash_request *req) | 528 | static int n2_sha256_async_digest(struct ahash_request *req) |
526 | { | 529 | { |
527 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | 530 | struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); |
528 | struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); | 531 | struct sha256_state *s = &rctx->u.sha256; |
529 | struct sha256_state *s = &ctx->u.sha256; | ||
530 | 532 | ||
531 | if (req->nbytes == 0) { | 533 | if (req->nbytes == 0) { |
532 | static const char sha256_zero[SHA256_DIGEST_SIZE] = { | 534 | static const char sha256_zero[SHA256_DIGEST_SIZE] = { |
@@ -555,9 +557,8 @@ static int n2_sha256_async_digest(struct ahash_request *req) | |||
555 | 557 | ||
556 | static int n2_sha224_async_digest(struct ahash_request *req) | 558 | static int n2_sha224_async_digest(struct ahash_request *req) |
557 | { | 559 | { |
558 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | 560 | struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); |
559 | struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); | 561 | struct sha256_state *s = &rctx->u.sha256; |
560 | struct sha256_state *s = &ctx->u.sha256; | ||
561 | 562 | ||
562 | if (req->nbytes == 0) { | 563 | if (req->nbytes == 0) { |
563 | static const char sha224_zero[SHA224_DIGEST_SIZE] = { | 564 | static const char sha224_zero[SHA224_DIGEST_SIZE] = { |