aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/crypto
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2010-08-04 18:23:14 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2010-08-04 18:23:14 -0400
commitb7c8e55db7141dcbb9d5305a3260fa0ed62a1bcc (patch)
tree59fbd52d8e80e5a83d9747961d28aaf4d400613a /drivers/crypto
parentffd386a9a8273dcfa61705d0b349eebc7525ef87 (diff)
parent4015d9a865e3bcc42d88bedc8ce1551000bab664 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (39 commits) random: Reorder struct entropy_store to remove padding on 64bits padata: update API documentation padata: Remove padata_get_cpumask crypto: pcrypt - Update pcrypt cpumask according to the padata cpumask notifier crypto: pcrypt - Rename pcrypt_instance padata: Pass the padata cpumasks to the cpumask_change_notifier chain padata: Rearrange set_cpumask functions padata: Rename padata_alloc functions crypto: pcrypt - Dont calulate a callback cpu on empty callback cpumask padata: Check for valid cpumasks padata: Allocate cpumask dependend recources in any case padata: Fix cpu index counting crypto: geode_aes - Convert pci_table entries to PCI_VDEVICE (if PCI_ANY_ID is used) pcrypt: Added sysfs interface to pcrypt padata: Added sysfs primitives to padata subsystem padata: Make two separate cpumasks padata: update documentation padata: simplify serialization mechanism padata: make padata_do_parallel to return zero on success padata: Handle empty padata cpumasks ...
Diffstat (limited to 'drivers/crypto')
-rw-r--r--drivers/crypto/geode-aes.c2
-rw-r--r--drivers/crypto/hifn_795x.c4
-rw-r--r--drivers/crypto/mv_cesa.c10
-rw-r--r--drivers/crypto/n2_core.c415
-rw-r--r--drivers/crypto/omap-sham.c1
-rw-r--r--drivers/crypto/talitos.c77
6 files changed, 342 insertions, 167 deletions
diff --git a/drivers/crypto/geode-aes.c b/drivers/crypto/geode-aes.c
index 09389dd2f96b..219d09cbb0d1 100644
--- a/drivers/crypto/geode-aes.c
+++ b/drivers/crypto/geode-aes.c
@@ -573,7 +573,7 @@ geode_aes_probe(struct pci_dev *dev, const struct pci_device_id *id)
573} 573}
574 574
575static struct pci_device_id geode_aes_tbl[] = { 575static struct pci_device_id geode_aes_tbl[] = {
576 { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_LX_AES, PCI_ANY_ID, PCI_ANY_ID} , 576 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_LX_AES), } ,
577 { 0, } 577 { 0, }
578}; 578};
579 579
diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c
index 16fce3aadf4d..e449ac5627a5 100644
--- a/drivers/crypto/hifn_795x.c
+++ b/drivers/crypto/hifn_795x.c
@@ -2018,7 +2018,6 @@ static void hifn_flush(struct hifn_device *dev)
2018{ 2018{
2019 unsigned long flags; 2019 unsigned long flags;
2020 struct crypto_async_request *async_req; 2020 struct crypto_async_request *async_req;
2021 struct hifn_context *ctx;
2022 struct ablkcipher_request *req; 2021 struct ablkcipher_request *req;
2023 struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt; 2022 struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
2024 int i; 2023 int i;
@@ -2035,7 +2034,6 @@ static void hifn_flush(struct hifn_device *dev)
2035 2034
2036 spin_lock_irqsave(&dev->lock, flags); 2035 spin_lock_irqsave(&dev->lock, flags);
2037 while ((async_req = crypto_dequeue_request(&dev->queue))) { 2036 while ((async_req = crypto_dequeue_request(&dev->queue))) {
2038 ctx = crypto_tfm_ctx(async_req->tfm);
2039 req = container_of(async_req, struct ablkcipher_request, base); 2037 req = container_of(async_req, struct ablkcipher_request, base);
2040 spin_unlock_irqrestore(&dev->lock, flags); 2038 spin_unlock_irqrestore(&dev->lock, flags);
2041 2039
@@ -2139,7 +2137,6 @@ static int hifn_setup_crypto_req(struct ablkcipher_request *req, u8 op,
2139static int hifn_process_queue(struct hifn_device *dev) 2137static int hifn_process_queue(struct hifn_device *dev)
2140{ 2138{
2141 struct crypto_async_request *async_req, *backlog; 2139 struct crypto_async_request *async_req, *backlog;
2142 struct hifn_context *ctx;
2143 struct ablkcipher_request *req; 2140 struct ablkcipher_request *req;
2144 unsigned long flags; 2141 unsigned long flags;
2145 int err = 0; 2142 int err = 0;
@@ -2156,7 +2153,6 @@ static int hifn_process_queue(struct hifn_device *dev)
2156 if (backlog) 2153 if (backlog)
2157 backlog->complete(backlog, -EINPROGRESS); 2154 backlog->complete(backlog, -EINPROGRESS);
2158 2155
2159 ctx = crypto_tfm_ctx(async_req->tfm);
2160 req = container_of(async_req, struct ablkcipher_request, base); 2156 req = container_of(async_req, struct ablkcipher_request, base);
2161 2157
2162 err = hifn_handle_req(req); 2158 err = hifn_handle_req(req);
diff --git a/drivers/crypto/mv_cesa.c b/drivers/crypto/mv_cesa.c
index e095422b58dd..7d279e578df5 100644
--- a/drivers/crypto/mv_cesa.c
+++ b/drivers/crypto/mv_cesa.c
@@ -1055,20 +1055,20 @@ static int mv_probe(struct platform_device *pdev)
1055 cp->queue_th = kthread_run(queue_manag, cp, "mv_crypto"); 1055 cp->queue_th = kthread_run(queue_manag, cp, "mv_crypto");
1056 if (IS_ERR(cp->queue_th)) { 1056 if (IS_ERR(cp->queue_th)) {
1057 ret = PTR_ERR(cp->queue_th); 1057 ret = PTR_ERR(cp->queue_th);
1058 goto err_thread; 1058 goto err_unmap_sram;
1059 } 1059 }
1060 1060
1061 ret = request_irq(irq, crypto_int, IRQF_DISABLED, dev_name(&pdev->dev), 1061 ret = request_irq(irq, crypto_int, IRQF_DISABLED, dev_name(&pdev->dev),
1062 cp); 1062 cp);
1063 if (ret) 1063 if (ret)
1064 goto err_unmap_sram; 1064 goto err_thread;
1065 1065
1066 writel(SEC_INT_ACCEL0_DONE, cpg->reg + SEC_ACCEL_INT_MASK); 1066 writel(SEC_INT_ACCEL0_DONE, cpg->reg + SEC_ACCEL_INT_MASK);
1067 writel(SEC_CFG_STOP_DIG_ERR, cpg->reg + SEC_ACCEL_CFG); 1067 writel(SEC_CFG_STOP_DIG_ERR, cpg->reg + SEC_ACCEL_CFG);
1068 1068
1069 ret = crypto_register_alg(&mv_aes_alg_ecb); 1069 ret = crypto_register_alg(&mv_aes_alg_ecb);
1070 if (ret) 1070 if (ret)
1071 goto err_reg; 1071 goto err_irq;
1072 1072
1073 ret = crypto_register_alg(&mv_aes_alg_cbc); 1073 ret = crypto_register_alg(&mv_aes_alg_cbc);
1074 if (ret) 1074 if (ret)
@@ -1091,9 +1091,9 @@ static int mv_probe(struct platform_device *pdev)
1091 return 0; 1091 return 0;
1092err_unreg_ecb: 1092err_unreg_ecb:
1093 crypto_unregister_alg(&mv_aes_alg_ecb); 1093 crypto_unregister_alg(&mv_aes_alg_ecb);
1094err_thread: 1094err_irq:
1095 free_irq(irq, cp); 1095 free_irq(irq, cp);
1096err_reg: 1096err_thread:
1097 kthread_stop(cp->queue_th); 1097 kthread_stop(cp->queue_th);
1098err_unmap_sram: 1098err_unmap_sram:
1099 iounmap(cp->sram); 1099 iounmap(cp->sram);
diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c
index 23163fda5035..b99c38f23d61 100644
--- a/drivers/crypto/n2_core.c
+++ b/drivers/crypto/n2_core.c
@@ -239,21 +239,57 @@ static inline bool n2_should_run_async(struct spu_queue *qp, int this_len)
239} 239}
240#endif 240#endif
241 241
242struct n2_base_ctx { 242struct n2_ahash_alg {
243 struct list_head list; 243 struct list_head entry;
244 const char *hash_zero;
245 const u32 *hash_init;
246 u8 hw_op_hashsz;
247 u8 digest_size;
248 u8 auth_type;
249 u8 hmac_type;
250 struct ahash_alg alg;
244}; 251};
245 252
246static void n2_base_ctx_init(struct n2_base_ctx *ctx) 253static inline struct n2_ahash_alg *n2_ahash_alg(struct crypto_tfm *tfm)
247{ 254{
248 INIT_LIST_HEAD(&ctx->list); 255 struct crypto_alg *alg = tfm->__crt_alg;
256 struct ahash_alg *ahash_alg;
257
258 ahash_alg = container_of(alg, struct ahash_alg, halg.base);
259
260 return container_of(ahash_alg, struct n2_ahash_alg, alg);
249} 261}
250 262
251struct n2_hash_ctx { 263struct n2_hmac_alg {
252 struct n2_base_ctx base; 264 const char *child_alg;
265 struct n2_ahash_alg derived;
266};
267
268static inline struct n2_hmac_alg *n2_hmac_alg(struct crypto_tfm *tfm)
269{
270 struct crypto_alg *alg = tfm->__crt_alg;
271 struct ahash_alg *ahash_alg;
272
273 ahash_alg = container_of(alg, struct ahash_alg, halg.base);
274
275 return container_of(ahash_alg, struct n2_hmac_alg, derived.alg);
276}
253 277
278struct n2_hash_ctx {
254 struct crypto_ahash *fallback_tfm; 279 struct crypto_ahash *fallback_tfm;
255}; 280};
256 281
282#define N2_HASH_KEY_MAX 32 /* HW limit for all HMAC requests */
283
284struct n2_hmac_ctx {
285 struct n2_hash_ctx base;
286
287 struct crypto_shash *child_shash;
288
289 int hash_key_len;
290 unsigned char hash_key[N2_HASH_KEY_MAX];
291};
292
257struct n2_hash_req_ctx { 293struct n2_hash_req_ctx {
258 union { 294 union {
259 struct md5_state md5; 295 struct md5_state md5;
@@ -261,9 +297,6 @@ struct n2_hash_req_ctx {
261 struct sha256_state sha256; 297 struct sha256_state sha256;
262 } u; 298 } u;
263 299
264 unsigned char hash_key[64];
265 unsigned char keyed_zero_hash[32];
266
267 struct ahash_request fallback_req; 300 struct ahash_request fallback_req;
268}; 301};
269 302
@@ -356,6 +389,94 @@ static void n2_hash_cra_exit(struct crypto_tfm *tfm)
356 crypto_free_ahash(ctx->fallback_tfm); 389 crypto_free_ahash(ctx->fallback_tfm);
357} 390}
358 391
392static int n2_hmac_cra_init(struct crypto_tfm *tfm)
393{
394 const char *fallback_driver_name = tfm->__crt_alg->cra_name;
395 struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
396 struct n2_hmac_ctx *ctx = crypto_ahash_ctx(ahash);
397 struct n2_hmac_alg *n2alg = n2_hmac_alg(tfm);
398 struct crypto_ahash *fallback_tfm;
399 struct crypto_shash *child_shash;
400 int err;
401
402 fallback_tfm = crypto_alloc_ahash(fallback_driver_name, 0,
403 CRYPTO_ALG_NEED_FALLBACK);
404 if (IS_ERR(fallback_tfm)) {
405 pr_warning("Fallback driver '%s' could not be loaded!\n",
406 fallback_driver_name);
407 err = PTR_ERR(fallback_tfm);
408 goto out;
409 }
410
411 child_shash = crypto_alloc_shash(n2alg->child_alg, 0, 0);
412 if (IS_ERR(child_shash)) {
413 pr_warning("Child shash '%s' could not be loaded!\n",
414 n2alg->child_alg);
415 err = PTR_ERR(child_shash);
416 goto out_free_fallback;
417 }
418
419 crypto_ahash_set_reqsize(ahash, (sizeof(struct n2_hash_req_ctx) +
420 crypto_ahash_reqsize(fallback_tfm)));
421
422 ctx->child_shash = child_shash;
423 ctx->base.fallback_tfm = fallback_tfm;
424 return 0;
425
426out_free_fallback:
427 crypto_free_ahash(fallback_tfm);
428
429out:
430 return err;
431}
432
433static void n2_hmac_cra_exit(struct crypto_tfm *tfm)
434{
435 struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
436 struct n2_hmac_ctx *ctx = crypto_ahash_ctx(ahash);
437
438 crypto_free_ahash(ctx->base.fallback_tfm);
439 crypto_free_shash(ctx->child_shash);
440}
441
442static int n2_hmac_async_setkey(struct crypto_ahash *tfm, const u8 *key,
443 unsigned int keylen)
444{
445 struct n2_hmac_ctx *ctx = crypto_ahash_ctx(tfm);
446 struct crypto_shash *child_shash = ctx->child_shash;
447 struct crypto_ahash *fallback_tfm;
448 struct {
449 struct shash_desc shash;
450 char ctx[crypto_shash_descsize(child_shash)];
451 } desc;
452 int err, bs, ds;
453
454 fallback_tfm = ctx->base.fallback_tfm;
455 err = crypto_ahash_setkey(fallback_tfm, key, keylen);
456 if (err)
457 return err;
458
459 desc.shash.tfm = child_shash;
460 desc.shash.flags = crypto_ahash_get_flags(tfm) &
461 CRYPTO_TFM_REQ_MAY_SLEEP;
462
463 bs = crypto_shash_blocksize(child_shash);
464 ds = crypto_shash_digestsize(child_shash);
465 BUG_ON(ds > N2_HASH_KEY_MAX);
466 if (keylen > bs) {
467 err = crypto_shash_digest(&desc.shash, key, keylen,
468 ctx->hash_key);
469 if (err)
470 return err;
471 keylen = ds;
472 } else if (keylen <= N2_HASH_KEY_MAX)
473 memcpy(ctx->hash_key, key, keylen);
474
475 ctx->hash_key_len = keylen;
476
477 return err;
478}
479
359static unsigned long wait_for_tail(struct spu_queue *qp) 480static unsigned long wait_for_tail(struct spu_queue *qp)
360{ 481{
361 unsigned long head, hv_ret; 482 unsigned long head, hv_ret;
@@ -385,12 +506,12 @@ static unsigned long submit_and_wait_for_tail(struct spu_queue *qp,
385 return hv_ret; 506 return hv_ret;
386} 507}
387 508
388static int n2_hash_async_digest(struct ahash_request *req, 509static int n2_do_async_digest(struct ahash_request *req,
389 unsigned int auth_type, unsigned int digest_size, 510 unsigned int auth_type, unsigned int digest_size,
390 unsigned int result_size, void *hash_loc) 511 unsigned int result_size, void *hash_loc,
512 unsigned long auth_key, unsigned int auth_key_len)
391{ 513{
392 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 514 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
393 struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
394 struct cwq_initial_entry *ent; 515 struct cwq_initial_entry *ent;
395 struct crypto_hash_walk walk; 516 struct crypto_hash_walk walk;
396 struct spu_queue *qp; 517 struct spu_queue *qp;
@@ -403,6 +524,7 @@ static int n2_hash_async_digest(struct ahash_request *req,
403 */ 524 */
404 if (unlikely(req->nbytes > (1 << 16))) { 525 if (unlikely(req->nbytes > (1 << 16))) {
405 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); 526 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
527 struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
406 528
407 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); 529 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
408 rctx->fallback_req.base.flags = 530 rctx->fallback_req.base.flags =
@@ -414,8 +536,6 @@ static int n2_hash_async_digest(struct ahash_request *req,
414 return crypto_ahash_digest(&rctx->fallback_req); 536 return crypto_ahash_digest(&rctx->fallback_req);
415 } 537 }
416 538
417 n2_base_ctx_init(&ctx->base);
418
419 nbytes = crypto_hash_walk_first(req, &walk); 539 nbytes = crypto_hash_walk_first(req, &walk);
420 540
421 cpu = get_cpu(); 541 cpu = get_cpu();
@@ -430,13 +550,13 @@ static int n2_hash_async_digest(struct ahash_request *req,
430 */ 550 */
431 ent = qp->q + qp->tail; 551 ent = qp->q + qp->tail;
432 552
433 ent->control = control_word_base(nbytes, 0, 0, 553 ent->control = control_word_base(nbytes, auth_key_len, 0,
434 auth_type, digest_size, 554 auth_type, digest_size,
435 false, true, false, false, 555 false, true, false, false,
436 OPCODE_INPLACE_BIT | 556 OPCODE_INPLACE_BIT |
437 OPCODE_AUTH_MAC); 557 OPCODE_AUTH_MAC);
438 ent->src_addr = __pa(walk.data); 558 ent->src_addr = __pa(walk.data);
439 ent->auth_key_addr = 0UL; 559 ent->auth_key_addr = auth_key;
440 ent->auth_iv_addr = __pa(hash_loc); 560 ent->auth_iv_addr = __pa(hash_loc);
441 ent->final_auth_state_addr = 0UL; 561 ent->final_auth_state_addr = 0UL;
442 ent->enc_key_addr = 0UL; 562 ent->enc_key_addr = 0UL;
@@ -475,114 +595,55 @@ out:
475 return err; 595 return err;
476} 596}
477 597
478static int n2_md5_async_digest(struct ahash_request *req) 598static int n2_hash_async_digest(struct ahash_request *req)
479{ 599{
600 struct n2_ahash_alg *n2alg = n2_ahash_alg(req->base.tfm);
480 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); 601 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
481 struct md5_state *m = &rctx->u.md5; 602 int ds;
482 603
604 ds = n2alg->digest_size;
483 if (unlikely(req->nbytes == 0)) { 605 if (unlikely(req->nbytes == 0)) {
484 static const char md5_zero[MD5_DIGEST_SIZE] = { 606 memcpy(req->result, n2alg->hash_zero, ds);
485 0xd4, 0x1d, 0x8c, 0xd9, 0x8f, 0x00, 0xb2, 0x04,
486 0xe9, 0x80, 0x09, 0x98, 0xec, 0xf8, 0x42, 0x7e,
487 };
488
489 memcpy(req->result, md5_zero, MD5_DIGEST_SIZE);
490 return 0; 607 return 0;
491 } 608 }
492 m->hash[0] = cpu_to_le32(0x67452301); 609 memcpy(&rctx->u, n2alg->hash_init, n2alg->hw_op_hashsz);
493 m->hash[1] = cpu_to_le32(0xefcdab89);
494 m->hash[2] = cpu_to_le32(0x98badcfe);
495 m->hash[3] = cpu_to_le32(0x10325476);
496 610
497 return n2_hash_async_digest(req, AUTH_TYPE_MD5, 611 return n2_do_async_digest(req, n2alg->auth_type,
498 MD5_DIGEST_SIZE, MD5_DIGEST_SIZE, 612 n2alg->hw_op_hashsz, ds,
499 m->hash); 613 &rctx->u, 0UL, 0);
500} 614}
501 615
502static int n2_sha1_async_digest(struct ahash_request *req) 616static int n2_hmac_async_digest(struct ahash_request *req)
503{ 617{
618 struct n2_hmac_alg *n2alg = n2_hmac_alg(req->base.tfm);
504 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); 619 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
505 struct sha1_state *s = &rctx->u.sha1; 620 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
506 621 struct n2_hmac_ctx *ctx = crypto_ahash_ctx(tfm);
507 if (unlikely(req->nbytes == 0)) { 622 int ds;
508 static const char sha1_zero[SHA1_DIGEST_SIZE] = {
509 0xda, 0x39, 0xa3, 0xee, 0x5e, 0x6b, 0x4b, 0x0d, 0x32,
510 0x55, 0xbf, 0xef, 0x95, 0x60, 0x18, 0x90, 0xaf, 0xd8,
511 0x07, 0x09
512 };
513
514 memcpy(req->result, sha1_zero, SHA1_DIGEST_SIZE);
515 return 0;
516 }
517 s->state[0] = SHA1_H0;
518 s->state[1] = SHA1_H1;
519 s->state[2] = SHA1_H2;
520 s->state[3] = SHA1_H3;
521 s->state[4] = SHA1_H4;
522
523 return n2_hash_async_digest(req, AUTH_TYPE_SHA1,
524 SHA1_DIGEST_SIZE, SHA1_DIGEST_SIZE,
525 s->state);
526}
527
528static int n2_sha256_async_digest(struct ahash_request *req)
529{
530 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
531 struct sha256_state *s = &rctx->u.sha256;
532
533 if (req->nbytes == 0) {
534 static const char sha256_zero[SHA256_DIGEST_SIZE] = {
535 0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a,
536 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae,
537 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, 0xa4, 0x95, 0x99,
538 0x1b, 0x78, 0x52, 0xb8, 0x55
539 };
540
541 memcpy(req->result, sha256_zero, SHA256_DIGEST_SIZE);
542 return 0;
543 }
544 s->state[0] = SHA256_H0;
545 s->state[1] = SHA256_H1;
546 s->state[2] = SHA256_H2;
547 s->state[3] = SHA256_H3;
548 s->state[4] = SHA256_H4;
549 s->state[5] = SHA256_H5;
550 s->state[6] = SHA256_H6;
551 s->state[7] = SHA256_H7;
552
553 return n2_hash_async_digest(req, AUTH_TYPE_SHA256,
554 SHA256_DIGEST_SIZE, SHA256_DIGEST_SIZE,
555 s->state);
556}
557 623
558static int n2_sha224_async_digest(struct ahash_request *req) 624 ds = n2alg->derived.digest_size;
559{ 625 if (unlikely(req->nbytes == 0) ||
560 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); 626 unlikely(ctx->hash_key_len > N2_HASH_KEY_MAX)) {
561 struct sha256_state *s = &rctx->u.sha256; 627 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
628 struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
562 629
563 if (req->nbytes == 0) { 630 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
564 static const char sha224_zero[SHA224_DIGEST_SIZE] = { 631 rctx->fallback_req.base.flags =
565 0xd1, 0x4a, 0x02, 0x8c, 0x2a, 0x3a, 0x2b, 0xc9, 0x47, 632 req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
566 0x61, 0x02, 0xbb, 0x28, 0x82, 0x34, 0xc4, 0x15, 0xa2, 633 rctx->fallback_req.nbytes = req->nbytes;
567 0xb0, 0x1f, 0x82, 0x8e, 0xa6, 0x2a, 0xc5, 0xb3, 0xe4, 634 rctx->fallback_req.src = req->src;
568 0x2f 635 rctx->fallback_req.result = req->result;
569 };
570 636
571 memcpy(req->result, sha224_zero, SHA224_DIGEST_SIZE); 637 return crypto_ahash_digest(&rctx->fallback_req);
572 return 0;
573 } 638 }
574 s->state[0] = SHA224_H0; 639 memcpy(&rctx->u, n2alg->derived.hash_init,
575 s->state[1] = SHA224_H1; 640 n2alg->derived.hw_op_hashsz);
576 s->state[2] = SHA224_H2;
577 s->state[3] = SHA224_H3;
578 s->state[4] = SHA224_H4;
579 s->state[5] = SHA224_H5;
580 s->state[6] = SHA224_H6;
581 s->state[7] = SHA224_H7;
582 641
583 return n2_hash_async_digest(req, AUTH_TYPE_SHA256, 642 return n2_do_async_digest(req, n2alg->derived.hmac_type,
584 SHA256_DIGEST_SIZE, SHA224_DIGEST_SIZE, 643 n2alg->derived.hw_op_hashsz, ds,
585 s->state); 644 &rctx->u,
645 __pa(&ctx->hash_key),
646 ctx->hash_key_len);
586} 647}
587 648
588struct n2_cipher_context { 649struct n2_cipher_context {
@@ -1209,35 +1270,92 @@ static LIST_HEAD(cipher_algs);
1209 1270
1210struct n2_hash_tmpl { 1271struct n2_hash_tmpl {
1211 const char *name; 1272 const char *name;
1212 int (*digest)(struct ahash_request *req); 1273 const char *hash_zero;
1274 const u32 *hash_init;
1275 u8 hw_op_hashsz;
1213 u8 digest_size; 1276 u8 digest_size;
1214 u8 block_size; 1277 u8 block_size;
1278 u8 auth_type;
1279 u8 hmac_type;
1280};
1281
1282static const char md5_zero[MD5_DIGEST_SIZE] = {
1283 0xd4, 0x1d, 0x8c, 0xd9, 0x8f, 0x00, 0xb2, 0x04,
1284 0xe9, 0x80, 0x09, 0x98, 0xec, 0xf8, 0x42, 0x7e,
1285};
1286static const u32 md5_init[MD5_HASH_WORDS] = {
1287 cpu_to_le32(0x67452301),
1288 cpu_to_le32(0xefcdab89),
1289 cpu_to_le32(0x98badcfe),
1290 cpu_to_le32(0x10325476),
1291};
1292static const char sha1_zero[SHA1_DIGEST_SIZE] = {
1293 0xda, 0x39, 0xa3, 0xee, 0x5e, 0x6b, 0x4b, 0x0d, 0x32,
1294 0x55, 0xbf, 0xef, 0x95, 0x60, 0x18, 0x90, 0xaf, 0xd8,
1295 0x07, 0x09
1215}; 1296};
1297static const u32 sha1_init[SHA1_DIGEST_SIZE / 4] = {
1298 SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4,
1299};
1300static const char sha256_zero[SHA256_DIGEST_SIZE] = {
1301 0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a,
1302 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae,
1303 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, 0xa4, 0x95, 0x99,
1304 0x1b, 0x78, 0x52, 0xb8, 0x55
1305};
1306static const u32 sha256_init[SHA256_DIGEST_SIZE / 4] = {
1307 SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3,
1308 SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7,
1309};
1310static const char sha224_zero[SHA224_DIGEST_SIZE] = {
1311 0xd1, 0x4a, 0x02, 0x8c, 0x2a, 0x3a, 0x2b, 0xc9, 0x47,
1312 0x61, 0x02, 0xbb, 0x28, 0x82, 0x34, 0xc4, 0x15, 0xa2,
1313 0xb0, 0x1f, 0x82, 0x8e, 0xa6, 0x2a, 0xc5, 0xb3, 0xe4,
1314 0x2f
1315};
1316static const u32 sha224_init[SHA256_DIGEST_SIZE / 4] = {
1317 SHA224_H0, SHA224_H1, SHA224_H2, SHA224_H3,
1318 SHA224_H4, SHA224_H5, SHA224_H6, SHA224_H7,
1319};
1320
1216static const struct n2_hash_tmpl hash_tmpls[] = { 1321static const struct n2_hash_tmpl hash_tmpls[] = {
1217 { .name = "md5", 1322 { .name = "md5",
1218 .digest = n2_md5_async_digest, 1323 .hash_zero = md5_zero,
1324 .hash_init = md5_init,
1325 .auth_type = AUTH_TYPE_MD5,
1326 .hmac_type = AUTH_TYPE_HMAC_MD5,
1327 .hw_op_hashsz = MD5_DIGEST_SIZE,
1219 .digest_size = MD5_DIGEST_SIZE, 1328 .digest_size = MD5_DIGEST_SIZE,
1220 .block_size = MD5_HMAC_BLOCK_SIZE }, 1329 .block_size = MD5_HMAC_BLOCK_SIZE },
1221 { .name = "sha1", 1330 { .name = "sha1",
1222 .digest = n2_sha1_async_digest, 1331 .hash_zero = sha1_zero,
1332 .hash_init = sha1_init,
1333 .auth_type = AUTH_TYPE_SHA1,
1334 .hmac_type = AUTH_TYPE_HMAC_SHA1,
1335 .hw_op_hashsz = SHA1_DIGEST_SIZE,
1223 .digest_size = SHA1_DIGEST_SIZE, 1336 .digest_size = SHA1_DIGEST_SIZE,
1224 .block_size = SHA1_BLOCK_SIZE }, 1337 .block_size = SHA1_BLOCK_SIZE },
1225 { .name = "sha256", 1338 { .name = "sha256",
1226 .digest = n2_sha256_async_digest, 1339 .hash_zero = sha256_zero,
1340 .hash_init = sha256_init,
1341 .auth_type = AUTH_TYPE_SHA256,
1342 .hmac_type = AUTH_TYPE_HMAC_SHA256,
1343 .hw_op_hashsz = SHA256_DIGEST_SIZE,
1227 .digest_size = SHA256_DIGEST_SIZE, 1344 .digest_size = SHA256_DIGEST_SIZE,
1228 .block_size = SHA256_BLOCK_SIZE }, 1345 .block_size = SHA256_BLOCK_SIZE },
1229 { .name = "sha224", 1346 { .name = "sha224",
1230 .digest = n2_sha224_async_digest, 1347 .hash_zero = sha224_zero,
1348 .hash_init = sha224_init,
1349 .auth_type = AUTH_TYPE_SHA256,
1350 .hmac_type = AUTH_TYPE_RESERVED,
1351 .hw_op_hashsz = SHA256_DIGEST_SIZE,
1231 .digest_size = SHA224_DIGEST_SIZE, 1352 .digest_size = SHA224_DIGEST_SIZE,
1232 .block_size = SHA224_BLOCK_SIZE }, 1353 .block_size = SHA224_BLOCK_SIZE },
1233}; 1354};
1234#define NUM_HASH_TMPLS ARRAY_SIZE(hash_tmpls) 1355#define NUM_HASH_TMPLS ARRAY_SIZE(hash_tmpls)
1235 1356
1236struct n2_ahash_alg {
1237 struct list_head entry;
1238 struct ahash_alg alg;
1239};
1240static LIST_HEAD(ahash_algs); 1357static LIST_HEAD(ahash_algs);
1358static LIST_HEAD(hmac_algs);
1241 1359
1242static int algs_registered; 1360static int algs_registered;
1243 1361
@@ -1245,12 +1363,18 @@ static void __n2_unregister_algs(void)
1245{ 1363{
1246 struct n2_cipher_alg *cipher, *cipher_tmp; 1364 struct n2_cipher_alg *cipher, *cipher_tmp;
1247 struct n2_ahash_alg *alg, *alg_tmp; 1365 struct n2_ahash_alg *alg, *alg_tmp;
1366 struct n2_hmac_alg *hmac, *hmac_tmp;
1248 1367
1249 list_for_each_entry_safe(cipher, cipher_tmp, &cipher_algs, entry) { 1368 list_for_each_entry_safe(cipher, cipher_tmp, &cipher_algs, entry) {
1250 crypto_unregister_alg(&cipher->alg); 1369 crypto_unregister_alg(&cipher->alg);
1251 list_del(&cipher->entry); 1370 list_del(&cipher->entry);
1252 kfree(cipher); 1371 kfree(cipher);
1253 } 1372 }
1373 list_for_each_entry_safe(hmac, hmac_tmp, &hmac_algs, derived.entry) {
1374 crypto_unregister_ahash(&hmac->derived.alg);
1375 list_del(&hmac->derived.entry);
1376 kfree(hmac);
1377 }
1254 list_for_each_entry_safe(alg, alg_tmp, &ahash_algs, entry) { 1378 list_for_each_entry_safe(alg, alg_tmp, &ahash_algs, entry) {
1255 crypto_unregister_ahash(&alg->alg); 1379 crypto_unregister_ahash(&alg->alg);
1256 list_del(&alg->entry); 1380 list_del(&alg->entry);
@@ -1290,8 +1414,49 @@ static int __devinit __n2_register_one_cipher(const struct n2_cipher_tmpl *tmpl)
1290 list_add(&p->entry, &cipher_algs); 1414 list_add(&p->entry, &cipher_algs);
1291 err = crypto_register_alg(alg); 1415 err = crypto_register_alg(alg);
1292 if (err) { 1416 if (err) {
1417 pr_err("%s alg registration failed\n", alg->cra_name);
1293 list_del(&p->entry); 1418 list_del(&p->entry);
1294 kfree(p); 1419 kfree(p);
1420 } else {
1421 pr_info("%s alg registered\n", alg->cra_name);
1422 }
1423 return err;
1424}
1425
1426static int __devinit __n2_register_one_hmac(struct n2_ahash_alg *n2ahash)
1427{
1428 struct n2_hmac_alg *p = kzalloc(sizeof(*p), GFP_KERNEL);
1429 struct ahash_alg *ahash;
1430 struct crypto_alg *base;
1431 int err;
1432
1433 if (!p)
1434 return -ENOMEM;
1435
1436 p->child_alg = n2ahash->alg.halg.base.cra_name;
1437 memcpy(&p->derived, n2ahash, sizeof(struct n2_ahash_alg));
1438 INIT_LIST_HEAD(&p->derived.entry);
1439
1440 ahash = &p->derived.alg;
1441 ahash->digest = n2_hmac_async_digest;
1442 ahash->setkey = n2_hmac_async_setkey;
1443
1444 base = &ahash->halg.base;
1445 snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "hmac(%s)", p->child_alg);
1446 snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "hmac-%s-n2", p->child_alg);
1447
1448 base->cra_ctxsize = sizeof(struct n2_hmac_ctx);
1449 base->cra_init = n2_hmac_cra_init;
1450 base->cra_exit = n2_hmac_cra_exit;
1451
1452 list_add(&p->derived.entry, &hmac_algs);
1453 err = crypto_register_ahash(ahash);
1454 if (err) {
1455 pr_err("%s alg registration failed\n", base->cra_name);
1456 list_del(&p->derived.entry);
1457 kfree(p);
1458 } else {
1459 pr_info("%s alg registered\n", base->cra_name);
1295 } 1460 }
1296 return err; 1461 return err;
1297} 1462}
@@ -1307,12 +1472,19 @@ static int __devinit __n2_register_one_ahash(const struct n2_hash_tmpl *tmpl)
1307 if (!p) 1472 if (!p)
1308 return -ENOMEM; 1473 return -ENOMEM;
1309 1474
1475 p->hash_zero = tmpl->hash_zero;
1476 p->hash_init = tmpl->hash_init;
1477 p->auth_type = tmpl->auth_type;
1478 p->hmac_type = tmpl->hmac_type;
1479 p->hw_op_hashsz = tmpl->hw_op_hashsz;
1480 p->digest_size = tmpl->digest_size;
1481
1310 ahash = &p->alg; 1482 ahash = &p->alg;
1311 ahash->init = n2_hash_async_init; 1483 ahash->init = n2_hash_async_init;
1312 ahash->update = n2_hash_async_update; 1484 ahash->update = n2_hash_async_update;
1313 ahash->final = n2_hash_async_final; 1485 ahash->final = n2_hash_async_final;
1314 ahash->finup = n2_hash_async_finup; 1486 ahash->finup = n2_hash_async_finup;
1315 ahash->digest = tmpl->digest; 1487 ahash->digest = n2_hash_async_digest;
1316 1488
1317 halg = &ahash->halg; 1489 halg = &ahash->halg;
1318 halg->digestsize = tmpl->digest_size; 1490 halg->digestsize = tmpl->digest_size;
@@ -1331,9 +1503,14 @@ static int __devinit __n2_register_one_ahash(const struct n2_hash_tmpl *tmpl)
1331 list_add(&p->entry, &ahash_algs); 1503 list_add(&p->entry, &ahash_algs);
1332 err = crypto_register_ahash(ahash); 1504 err = crypto_register_ahash(ahash);
1333 if (err) { 1505 if (err) {
1506 pr_err("%s alg registration failed\n", base->cra_name);
1334 list_del(&p->entry); 1507 list_del(&p->entry);
1335 kfree(p); 1508 kfree(p);
1509 } else {
1510 pr_info("%s alg registered\n", base->cra_name);
1336 } 1511 }
1512 if (!err && p->hmac_type != AUTH_TYPE_RESERVED)
1513 err = __n2_register_one_hmac(p);
1337 return err; 1514 return err;
1338} 1515}
1339 1516
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c
index 8b034337793f..7d1485676886 100644
--- a/drivers/crypto/omap-sham.c
+++ b/drivers/crypto/omap-sham.c
@@ -15,7 +15,6 @@
15 15
16#define pr_fmt(fmt) "%s: " fmt, __func__ 16#define pr_fmt(fmt) "%s: " fmt, __func__
17 17
18#include <linux/version.h>
19#include <linux/err.h> 18#include <linux/err.h>
20#include <linux/device.h> 19#include <linux/device.h>
21#include <linux/module.h> 20#include <linux/module.h>
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index bd78acf3c365..97f4af1d8a64 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -720,7 +720,6 @@ struct talitos_ctx {
720#define TALITOS_MDEU_MAX_CONTEXT_SIZE TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512 720#define TALITOS_MDEU_MAX_CONTEXT_SIZE TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512
721 721
722struct talitos_ahash_req_ctx { 722struct talitos_ahash_req_ctx {
723 u64 count;
724 u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)]; 723 u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)];
725 unsigned int hw_context_size; 724 unsigned int hw_context_size;
726 u8 buf[HASH_MAX_BLOCK_SIZE]; 725 u8 buf[HASH_MAX_BLOCK_SIZE];
@@ -729,6 +728,7 @@ struct talitos_ahash_req_ctx {
729 unsigned int first; 728 unsigned int first;
730 unsigned int last; 729 unsigned int last;
731 unsigned int to_hash_later; 730 unsigned int to_hash_later;
731 u64 nbuf;
732 struct scatterlist bufsl[2]; 732 struct scatterlist bufsl[2];
733 struct scatterlist *psrc; 733 struct scatterlist *psrc;
734}; 734};
@@ -1613,6 +1613,7 @@ static void ahash_done(struct device *dev,
1613 if (!req_ctx->last && req_ctx->to_hash_later) { 1613 if (!req_ctx->last && req_ctx->to_hash_later) {
1614 /* Position any partial block for next update/final/finup */ 1614 /* Position any partial block for next update/final/finup */
1615 memcpy(req_ctx->buf, req_ctx->bufnext, req_ctx->to_hash_later); 1615 memcpy(req_ctx->buf, req_ctx->bufnext, req_ctx->to_hash_later);
1616 req_ctx->nbuf = req_ctx->to_hash_later;
1616 } 1617 }
1617 common_nonsnoop_hash_unmap(dev, edesc, areq); 1618 common_nonsnoop_hash_unmap(dev, edesc, areq);
1618 1619
@@ -1728,7 +1729,7 @@ static int ahash_init(struct ahash_request *areq)
1728 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); 1729 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1729 1730
1730 /* Initialize the context */ 1731 /* Initialize the context */
1731 req_ctx->count = 0; 1732 req_ctx->nbuf = 0;
1732 req_ctx->first = 1; /* first indicates h/w must init its context */ 1733 req_ctx->first = 1; /* first indicates h/w must init its context */
1733 req_ctx->swinit = 0; /* assume h/w init of context */ 1734 req_ctx->swinit = 0; /* assume h/w init of context */
1734 req_ctx->hw_context_size = 1735 req_ctx->hw_context_size =
@@ -1776,52 +1777,54 @@ static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
1776 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm)); 1777 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
1777 unsigned int nbytes_to_hash; 1778 unsigned int nbytes_to_hash;
1778 unsigned int to_hash_later; 1779 unsigned int to_hash_later;
1779 unsigned int index; 1780 unsigned int nsg;
1780 int chained; 1781 int chained;
1781 1782
1782 index = req_ctx->count & (blocksize - 1); 1783 if (!req_ctx->last && (nbytes + req_ctx->nbuf <= blocksize)) {
1783 req_ctx->count += nbytes; 1784 /* Buffer up to one whole block */
1784
1785 if (!req_ctx->last && (index + nbytes) < blocksize) {
1786 /* Buffer the partial block */
1787 sg_copy_to_buffer(areq->src, 1785 sg_copy_to_buffer(areq->src,
1788 sg_count(areq->src, nbytes, &chained), 1786 sg_count(areq->src, nbytes, &chained),
1789 req_ctx->buf + index, nbytes); 1787 req_ctx->buf + req_ctx->nbuf, nbytes);
1788 req_ctx->nbuf += nbytes;
1790 return 0; 1789 return 0;
1791 } 1790 }
1792 1791
1793 if (index) { 1792 /* At least (blocksize + 1) bytes are available to hash */
1794 /* partial block from previous update; chain it in. */ 1793 nbytes_to_hash = nbytes + req_ctx->nbuf;
1795 sg_init_table(req_ctx->bufsl, (nbytes) ? 2 : 1); 1794 to_hash_later = nbytes_to_hash & (blocksize - 1);
1796 sg_set_buf(req_ctx->bufsl, req_ctx->buf, index); 1795
1797 if (nbytes) 1796 if (req_ctx->last)
1798 scatterwalk_sg_chain(req_ctx->bufsl, 2, 1797 to_hash_later = 0;
1799 areq->src); 1798 else if (to_hash_later)
1799 /* There is a partial block. Hash the full block(s) now */
1800 nbytes_to_hash -= to_hash_later;
1801 else {
1802 /* Keep one block buffered */
1803 nbytes_to_hash -= blocksize;
1804 to_hash_later = blocksize;
1805 }
1806
1807 /* Chain in any previously buffered data */
1808 if (req_ctx->nbuf) {
1809 nsg = (req_ctx->nbuf < nbytes_to_hash) ? 2 : 1;
1810 sg_init_table(req_ctx->bufsl, nsg);
1811 sg_set_buf(req_ctx->bufsl, req_ctx->buf, req_ctx->nbuf);
1812 if (nsg > 1)
1813 scatterwalk_sg_chain(req_ctx->bufsl, 2, areq->src);
1800 req_ctx->psrc = req_ctx->bufsl; 1814 req_ctx->psrc = req_ctx->bufsl;
1801 } else { 1815 } else
1802 req_ctx->psrc = areq->src; 1816 req_ctx->psrc = areq->src;
1817
1818 if (to_hash_later) {
1819 int nents = sg_count(areq->src, nbytes, &chained);
1820 sg_copy_end_to_buffer(areq->src, nents,
1821 req_ctx->bufnext,
1822 to_hash_later,
1823 nbytes - to_hash_later);
1803 } 1824 }
1804 nbytes_to_hash = index + nbytes; 1825 req_ctx->to_hash_later = to_hash_later;
1805 if (!req_ctx->last) {
1806 to_hash_later = (nbytes_to_hash & (blocksize - 1));
1807 if (to_hash_later) {
1808 int nents;
1809 /* Must copy to_hash_later bytes from the end
1810 * to bufnext (a partial block) for later.
1811 */
1812 nents = sg_count(areq->src, nbytes, &chained);
1813 sg_copy_end_to_buffer(areq->src, nents,
1814 req_ctx->bufnext,
1815 to_hash_later,
1816 nbytes - to_hash_later);
1817
1818 /* Adjust count for what will be hashed now */
1819 nbytes_to_hash -= to_hash_later;
1820 }
1821 req_ctx->to_hash_later = to_hash_later;
1822 }
1823 1826
1824 /* allocate extended descriptor */ 1827 /* Allocate extended descriptor */
1825 edesc = ahash_edesc_alloc(areq, nbytes_to_hash); 1828 edesc = ahash_edesc_alloc(areq, nbytes_to_hash);
1826 if (IS_ERR(edesc)) 1829 if (IS_ERR(edesc))
1827 return PTR_ERR(edesc); 1830 return PTR_ERR(edesc);