aboutsummaryrefslogtreecommitdiffstats
path: root/crypto/blkcipher.c
diff options
context:
space:
mode:
authorHerbert Xu <herbert@gondor.apana.org.au>2007-03-23 23:35:34 -0400
committerHerbert Xu <herbert@gondor.apana.org.au>2007-05-02 00:38:30 -0400
commit32e3983fe590ac4cd70c7728eb330d43cef031a7 (patch)
tree9e239c4d2f208578fd400c2abb31e8ae7788de4b /crypto/blkcipher.c
parent03f5d8cedb31deb558cd97095730cbc8bc54b12a (diff)
[CRYPTO] api: Add async block cipher interface
This patch adds the frontend interface for asynchronous block ciphers. In addition to the usual block cipher parameters, there is a callback function pointer and a data pointer. The callback will be invoked only if the encrypt/decrypt handlers return -EINPROGRESS. In other words, if the return value of zero the completion handler (or the equivalent code) needs to be invoked by the caller. The request structure is allocated and freed by the caller. Its size is determined by calling crypto_ablkcipher_reqsize(). The helpers ablkcipher_request_alloc/ablkcipher_request_free can be used to manage the memory for a request. Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'crypto/blkcipher.c')
-rw-r--r--crypto/blkcipher.c70
1 files changed, 65 insertions, 5 deletions
diff --git a/crypto/blkcipher.c b/crypto/blkcipher.c
index bf459179efe3..8edf40c835a7 100644
--- a/crypto/blkcipher.c
+++ b/crypto/blkcipher.c
@@ -349,13 +349,48 @@ static int setkey(struct crypto_tfm *tfm, const u8 *key,
349 return cipher->setkey(tfm, key, keylen); 349 return cipher->setkey(tfm, key, keylen);
350} 350}
351 351
352static int async_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
353 unsigned int keylen)
354{
355 return setkey(crypto_ablkcipher_tfm(tfm), key, keylen);
356}
357
358static int async_encrypt(struct ablkcipher_request *req)
359{
360 struct crypto_tfm *tfm = req->base.tfm;
361 struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
362 struct blkcipher_desc desc = {
363 .tfm = __crypto_blkcipher_cast(tfm),
364 .info = req->info,
365 .flags = req->base.flags,
366 };
367
368
369 return alg->encrypt(&desc, req->dst, req->src, req->nbytes);
370}
371
372static int async_decrypt(struct ablkcipher_request *req)
373{
374 struct crypto_tfm *tfm = req->base.tfm;
375 struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
376 struct blkcipher_desc desc = {
377 .tfm = __crypto_blkcipher_cast(tfm),
378 .info = req->info,
379 .flags = req->base.flags,
380 };
381
382 return alg->decrypt(&desc, req->dst, req->src, req->nbytes);
383}
384
352static unsigned int crypto_blkcipher_ctxsize(struct crypto_alg *alg, u32 type, 385static unsigned int crypto_blkcipher_ctxsize(struct crypto_alg *alg, u32 type,
353 u32 mask) 386 u32 mask)
354{ 387{
355 struct blkcipher_alg *cipher = &alg->cra_blkcipher; 388 struct blkcipher_alg *cipher = &alg->cra_blkcipher;
356 unsigned int len = alg->cra_ctxsize; 389 unsigned int len = alg->cra_ctxsize;
357 390
358 if (cipher->ivsize) { 391 type ^= CRYPTO_ALG_ASYNC;
392 mask &= CRYPTO_ALG_ASYNC;
393 if ((type & mask) && cipher->ivsize) {
359 len = ALIGN(len, (unsigned long)alg->cra_alignmask + 1); 394 len = ALIGN(len, (unsigned long)alg->cra_alignmask + 1);
360 len += cipher->ivsize; 395 len += cipher->ivsize;
361 } 396 }
@@ -363,16 +398,26 @@ static unsigned int crypto_blkcipher_ctxsize(struct crypto_alg *alg, u32 type,
363 return len; 398 return len;
364} 399}
365 400
366static int crypto_init_blkcipher_ops(struct crypto_tfm *tfm, u32 type, u32 mask) 401static int crypto_init_blkcipher_ops_async(struct crypto_tfm *tfm)
402{
403 struct ablkcipher_tfm *crt = &tfm->crt_ablkcipher;
404 struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
405
406 crt->setkey = async_setkey;
407 crt->encrypt = async_encrypt;
408 crt->decrypt = async_decrypt;
409 crt->ivsize = alg->ivsize;
410
411 return 0;
412}
413
414static int crypto_init_blkcipher_ops_sync(struct crypto_tfm *tfm)
367{ 415{
368 struct blkcipher_tfm *crt = &tfm->crt_blkcipher; 416 struct blkcipher_tfm *crt = &tfm->crt_blkcipher;
369 struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher; 417 struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
370 unsigned long align = crypto_tfm_alg_alignmask(tfm) + 1; 418 unsigned long align = crypto_tfm_alg_alignmask(tfm) + 1;
371 unsigned long addr; 419 unsigned long addr;
372 420
373 if (alg->ivsize > PAGE_SIZE / 8)
374 return -EINVAL;
375
376 crt->setkey = setkey; 421 crt->setkey = setkey;
377 crt->encrypt = alg->encrypt; 422 crt->encrypt = alg->encrypt;
378 crt->decrypt = alg->decrypt; 423 crt->decrypt = alg->decrypt;
@@ -385,6 +430,21 @@ static int crypto_init_blkcipher_ops(struct crypto_tfm *tfm, u32 type, u32 mask)
385 return 0; 430 return 0;
386} 431}
387 432
433static int crypto_init_blkcipher_ops(struct crypto_tfm *tfm, u32 type, u32 mask)
434{
435 struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
436
437 if (alg->ivsize > PAGE_SIZE / 8)
438 return -EINVAL;
439
440 type ^= CRYPTO_ALG_ASYNC;
441 mask &= CRYPTO_ALG_ASYNC;
442 if (type & mask)
443 return crypto_init_blkcipher_ops_sync(tfm);
444 else
445 return crypto_init_blkcipher_ops_async(tfm);
446}
447
388static void crypto_blkcipher_show(struct seq_file *m, struct crypto_alg *alg) 448static void crypto_blkcipher_show(struct seq_file *m, struct crypto_alg *alg)
389 __attribute__ ((unused)); 449 __attribute__ ((unused));
390static void crypto_blkcipher_show(struct seq_file *m, struct crypto_alg *alg) 450static void crypto_blkcipher_show(struct seq_file *m, struct crypto_alg *alg)