diff options
author | Dmitry Kasatkin <dmitry.kasatkin@nokia.com> | 2010-11-30 03:13:29 -0500 |
---|---|---|
committer | Herbert Xu <herbert@gondor.apana.org.au> | 2010-12-02 03:37:06 -0500 |
commit | 21fe9767f3bd56fd9a271dc43b93cd4608d47f4a (patch) | |
tree | 05dc1fc0e660088fba7ad8b4645b8564a6cf7883 | |
parent | eeb2b202c5b886b76c3bfa76f47e450fa69389fb (diff) |
crypto: omap-aes - error handling implementation improved
Previous version had not error handling.
Request could remain uncompleted.
Also in the case of DMA error, FLAGS_INIT is unset
and accelerator will be initialized again.
Buffer size allignment is checked.
Signed-off-by: Dmitry Kasatkin <dmitry.kasatkin@nokia.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
-rw-r--r-- | drivers/crypto/omap-aes.c | 134 |
1 files changed, 93 insertions, 41 deletions
diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c index 2d8f72eaf896..704cc701ab42 100644 --- a/drivers/crypto/omap-aes.c +++ b/drivers/crypto/omap-aes.c | |||
@@ -103,14 +103,16 @@ struct omap_aes_dev { | |||
103 | struct omap_aes_ctx *ctx; | 103 | struct omap_aes_ctx *ctx; |
104 | struct device *dev; | 104 | struct device *dev; |
105 | unsigned long flags; | 105 | unsigned long flags; |
106 | int err; | ||
106 | 107 | ||
107 | u32 *iv; | 108 | u32 *iv; |
108 | u32 ctrl; | 109 | u32 ctrl; |
109 | 110 | ||
110 | spinlock_t lock; | 111 | spinlock_t lock; |
111 | struct crypto_queue queue; | 112 | struct crypto_queue queue; |
112 | 113 | ||
113 | struct tasklet_struct task; | 114 | struct tasklet_struct done_task; |
115 | struct tasklet_struct queue_task; | ||
114 | 116 | ||
115 | struct ablkcipher_request *req; | 117 | struct ablkcipher_request *req; |
116 | size_t total; | 118 | size_t total; |
@@ -198,24 +200,30 @@ static int omap_aes_hw_init(struct omap_aes_dev *dd) | |||
198 | return -ETIMEDOUT; | 200 | return -ETIMEDOUT; |
199 | } | 201 | } |
200 | dd->flags |= FLAGS_INIT; | 202 | dd->flags |= FLAGS_INIT; |
203 | dd->err = 0; | ||
201 | } | 204 | } |
202 | 205 | ||
203 | return 0; | 206 | return 0; |
204 | } | 207 | } |
205 | 208 | ||
206 | static void omap_aes_write_ctrl(struct omap_aes_dev *dd) | 209 | static int omap_aes_write_ctrl(struct omap_aes_dev *dd) |
207 | { | 210 | { |
208 | unsigned int key32; | 211 | unsigned int key32; |
209 | int i; | 212 | int i, err, init = dd->flags & FLAGS_INIT; |
210 | u32 val, mask; | 213 | u32 val, mask; |
211 | 214 | ||
215 | err = omap_aes_hw_init(dd); | ||
216 | if (err) | ||
217 | return err; | ||
218 | |||
212 | val = FLD_VAL(((dd->ctx->keylen >> 3) - 1), 4, 3); | 219 | val = FLD_VAL(((dd->ctx->keylen >> 3) - 1), 4, 3); |
213 | if (dd->flags & FLAGS_CBC) | 220 | if (dd->flags & FLAGS_CBC) |
214 | val |= AES_REG_CTRL_CBC; | 221 | val |= AES_REG_CTRL_CBC; |
215 | if (dd->flags & FLAGS_ENCRYPT) | 222 | if (dd->flags & FLAGS_ENCRYPT) |
216 | val |= AES_REG_CTRL_DIRECTION; | 223 | val |= AES_REG_CTRL_DIRECTION; |
217 | 224 | ||
218 | if (dd->ctrl == val && !(dd->flags & FLAGS_NEW_IV) && | 225 | /* check if hw state & mode have not changed */ |
226 | if (init && dd->ctrl == val && !(dd->flags & FLAGS_NEW_IV) && | ||
219 | !(dd->ctx->flags & FLAGS_NEW_KEY)) | 227 | !(dd->ctx->flags & FLAGS_NEW_KEY)) |
220 | goto out; | 228 | goto out; |
221 | 229 | ||
@@ -257,6 +265,8 @@ out: | |||
257 | /* start DMA or disable idle mode */ | 265 | /* start DMA or disable idle mode */ |
258 | omap_aes_write_mask(dd, AES_REG_MASK, AES_REG_MASK_START, | 266 | omap_aes_write_mask(dd, AES_REG_MASK, AES_REG_MASK_START, |
259 | AES_REG_MASK_START); | 267 | AES_REG_MASK_START); |
268 | |||
269 | return 0; | ||
260 | } | 270 | } |
261 | 271 | ||
262 | static struct omap_aes_dev *omap_aes_find_dev(struct omap_aes_ctx *ctx) | 272 | static struct omap_aes_dev *omap_aes_find_dev(struct omap_aes_ctx *ctx) |
@@ -284,8 +294,16 @@ static void omap_aes_dma_callback(int lch, u16 ch_status, void *data) | |||
284 | { | 294 | { |
285 | struct omap_aes_dev *dd = data; | 295 | struct omap_aes_dev *dd = data; |
286 | 296 | ||
287 | if (lch == dd->dma_lch_out) | 297 | if (ch_status != OMAP_DMA_BLOCK_IRQ) { |
288 | tasklet_schedule(&dd->task); | 298 | pr_err("omap-aes DMA error status: 0x%hx\n", ch_status); |
299 | dd->err = -EIO; | ||
300 | dd->flags &= ~FLAGS_INIT; /* request to re-initialize */ | ||
301 | } else if (lch == dd->dma_lch_in) { | ||
302 | return; | ||
303 | } | ||
304 | |||
305 | /* dma_lch_out - completed */ | ||
306 | tasklet_schedule(&dd->done_task); | ||
289 | } | 307 | } |
290 | 308 | ||
291 | static int omap_aes_dma_init(struct omap_aes_dev *dd) | 309 | static int omap_aes_dma_init(struct omap_aes_dev *dd) |
@@ -390,6 +408,11 @@ static int sg_copy(struct scatterlist **sg, size_t *offset, void *buf, | |||
390 | if (!count) | 408 | if (!count) |
391 | return off; | 409 | return off; |
392 | 410 | ||
411 | /* | ||
412 | * buflen and total are AES_BLOCK_SIZE size aligned, | ||
413 | * so count should be also aligned | ||
414 | */ | ||
415 | |||
393 | sg_copy_buf(buf + off, *sg, *offset, count, out); | 416 | sg_copy_buf(buf + off, *sg, *offset, count, out); |
394 | 417 | ||
395 | off += count; | 418 | off += count; |
@@ -415,6 +438,7 @@ static int omap_aes_crypt_dma(struct crypto_tfm *tfm, dma_addr_t dma_addr_in, | |||
415 | struct omap_aes_ctx *ctx = crypto_tfm_ctx(tfm); | 438 | struct omap_aes_ctx *ctx = crypto_tfm_ctx(tfm); |
416 | struct omap_aes_dev *dd = ctx->dd; | 439 | struct omap_aes_dev *dd = ctx->dd; |
417 | int len32; | 440 | int len32; |
441 | int err; | ||
418 | 442 | ||
419 | pr_debug("len: %d\n", length); | 443 | pr_debug("len: %d\n", length); |
420 | 444 | ||
@@ -454,11 +478,13 @@ static int omap_aes_crypt_dma(struct crypto_tfm *tfm, dma_addr_t dma_addr_in, | |||
454 | omap_set_dma_dest_params(dd->dma_lch_out, 0, OMAP_DMA_AMODE_POST_INC, | 478 | omap_set_dma_dest_params(dd->dma_lch_out, 0, OMAP_DMA_AMODE_POST_INC, |
455 | dma_addr_out, 0, 0); | 479 | dma_addr_out, 0, 0); |
456 | 480 | ||
481 | err = omap_aes_write_ctrl(dd); | ||
482 | if (err) | ||
483 | return err; | ||
484 | |||
457 | omap_start_dma(dd->dma_lch_in); | 485 | omap_start_dma(dd->dma_lch_in); |
458 | omap_start_dma(dd->dma_lch_out); | 486 | omap_start_dma(dd->dma_lch_out); |
459 | 487 | ||
460 | omap_aes_write_ctrl(dd); | ||
461 | |||
462 | return 0; | 488 | return 0; |
463 | } | 489 | } |
464 | 490 | ||
@@ -484,8 +510,10 @@ static int omap_aes_crypt_dma_start(struct omap_aes_dev *dd) | |||
484 | count = min(dd->total, sg_dma_len(dd->in_sg)); | 510 | count = min(dd->total, sg_dma_len(dd->in_sg)); |
485 | count = min(count, sg_dma_len(dd->out_sg)); | 511 | count = min(count, sg_dma_len(dd->out_sg)); |
486 | 512 | ||
487 | if (count != dd->total) | 513 | if (count != dd->total) { |
514 | pr_err("request length != buffer length\n"); | ||
488 | return -EINVAL; | 515 | return -EINVAL; |
516 | } | ||
489 | 517 | ||
490 | pr_debug("fast\n"); | 518 | pr_debug("fast\n"); |
491 | 519 | ||
@@ -521,25 +549,28 @@ static int omap_aes_crypt_dma_start(struct omap_aes_dev *dd) | |||
521 | 549 | ||
522 | dd->total -= count; | 550 | dd->total -= count; |
523 | 551 | ||
524 | err = omap_aes_hw_init(dd); | ||
525 | |||
526 | err = omap_aes_crypt_dma(tfm, addr_in, addr_out, count); | 552 | err = omap_aes_crypt_dma(tfm, addr_in, addr_out, count); |
553 | if (err) { | ||
554 | dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE); | ||
555 | dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_TO_DEVICE); | ||
556 | } | ||
527 | 557 | ||
528 | return err; | 558 | return err; |
529 | } | 559 | } |
530 | 560 | ||
531 | static void omap_aes_finish_req(struct omap_aes_dev *dd, int err) | 561 | static void omap_aes_finish_req(struct omap_aes_dev *dd, int err) |
532 | { | 562 | { |
563 | struct ablkcipher_request *req = dd->req; | ||
533 | struct omap_aes_ctx *ctx; | 564 | struct omap_aes_ctx *ctx; |
534 | 565 | ||
535 | pr_debug("err: %d\n", err); | 566 | pr_debug("err: %d\n", err); |
536 | 567 | ||
537 | dd->flags &= ~FLAGS_BUSY; | 568 | dd->flags &= ~FLAGS_BUSY; |
538 | 569 | ||
539 | ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(dd->req)); | 570 | ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req)); |
540 | 571 | ||
541 | if (!dd->total) | 572 | if (req->base.complete) |
542 | dd->req->base.complete(&dd->req->base, err); | 573 | req->base.complete(&req->base, err); |
543 | } | 574 | } |
544 | 575 | ||
545 | static int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd) | 576 | static int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd) |
@@ -551,11 +582,11 @@ static int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd) | |||
551 | 582 | ||
552 | omap_aes_write_mask(dd, AES_REG_MASK, 0, AES_REG_MASK_START); | 583 | omap_aes_write_mask(dd, AES_REG_MASK, 0, AES_REG_MASK_START); |
553 | 584 | ||
554 | clk_disable(dd->iclk); | ||
555 | |||
556 | omap_stop_dma(dd->dma_lch_in); | 585 | omap_stop_dma(dd->dma_lch_in); |
557 | omap_stop_dma(dd->dma_lch_out); | 586 | omap_stop_dma(dd->dma_lch_out); |
558 | 587 | ||
588 | clk_disable(dd->iclk); | ||
589 | |||
559 | if (dd->flags & FLAGS_FAST) { | 590 | if (dd->flags & FLAGS_FAST) { |
560 | dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_FROM_DEVICE); | 591 | dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_FROM_DEVICE); |
561 | dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE); | 592 | dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE); |
@@ -572,27 +603,24 @@ static int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd) | |||
572 | } | 603 | } |
573 | } | 604 | } |
574 | 605 | ||
575 | if (err || !dd->total) | ||
576 | omap_aes_finish_req(dd, err); | ||
577 | |||
578 | return err; | 606 | return err; |
579 | } | 607 | } |
580 | 608 | ||
581 | static int omap_aes_handle_req(struct omap_aes_dev *dd, | 609 | static int omap_aes_handle_queue(struct omap_aes_dev *dd, |
582 | struct ablkcipher_request *req) | 610 | struct ablkcipher_request *req) |
583 | { | 611 | { |
584 | struct crypto_async_request *async_req, *backlog; | 612 | struct crypto_async_request *async_req, *backlog; |
585 | struct omap_aes_ctx *ctx; | 613 | struct omap_aes_ctx *ctx; |
586 | struct omap_aes_reqctx *rctx; | 614 | struct omap_aes_reqctx *rctx; |
587 | unsigned long flags; | 615 | unsigned long flags; |
588 | int err = 0; | 616 | int err, ret = 0; |
589 | 617 | ||
590 | spin_lock_irqsave(&dd->lock, flags); | 618 | spin_lock_irqsave(&dd->lock, flags); |
591 | if (req) | 619 | if (req) |
592 | err = ablkcipher_enqueue_request(&dd->queue, req); | 620 | ret = ablkcipher_enqueue_request(&dd->queue, req); |
593 | if (dd->flags & FLAGS_BUSY) { | 621 | if (dd->flags & FLAGS_BUSY) { |
594 | spin_unlock_irqrestore(&dd->lock, flags); | 622 | spin_unlock_irqrestore(&dd->lock, flags); |
595 | return err; | 623 | return ret; |
596 | } | 624 | } |
597 | backlog = crypto_get_backlog(&dd->queue); | 625 | backlog = crypto_get_backlog(&dd->queue); |
598 | async_req = crypto_dequeue_request(&dd->queue); | 626 | async_req = crypto_dequeue_request(&dd->queue); |
@@ -601,7 +629,7 @@ static int omap_aes_handle_req(struct omap_aes_dev *dd, | |||
601 | spin_unlock_irqrestore(&dd->lock, flags); | 629 | spin_unlock_irqrestore(&dd->lock, flags); |
602 | 630 | ||
603 | if (!async_req) | 631 | if (!async_req) |
604 | return 0; | 632 | return ret; |
605 | 633 | ||
606 | if (backlog) | 634 | if (backlog) |
607 | backlog->complete(backlog, -EINPROGRESS); | 635 | backlog->complete(backlog, -EINPROGRESS); |
@@ -636,30 +664,46 @@ static int omap_aes_handle_req(struct omap_aes_dev *dd, | |||
636 | ctx->flags |= FLAGS_NEW_KEY; | 664 | ctx->flags |= FLAGS_NEW_KEY; |
637 | } | 665 | } |
638 | 666 | ||
639 | if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE)) | 667 | err = omap_aes_crypt_dma_start(dd); |
640 | pr_err("request size is not exact amount of AES blocks\n"); | 668 | if (err) { |
641 | 669 | /* aes_task will not finish it, so do it here */ | |
642 | omap_aes_crypt_dma_start(dd); | 670 | omap_aes_finish_req(dd, err); |
671 | tasklet_schedule(&dd->queue_task); | ||
672 | } | ||
643 | 673 | ||
644 | return err; | 674 | return ret; /* return ret, which is enqueue return value */ |
645 | } | 675 | } |
646 | 676 | ||
647 | static void omap_aes_task(unsigned long data) | 677 | static void omap_aes_done_task(unsigned long data) |
648 | { | 678 | { |
649 | struct omap_aes_dev *dd = (struct omap_aes_dev *)data; | 679 | struct omap_aes_dev *dd = (struct omap_aes_dev *)data; |
680 | int err; | ||
650 | 681 | ||
651 | pr_debug("enter\n"); | 682 | pr_debug("enter\n"); |
652 | 683 | ||
653 | omap_aes_crypt_dma_stop(dd); | 684 | err = omap_aes_crypt_dma_stop(dd); |
654 | 685 | ||
655 | if (dd->total) | 686 | err = dd->err ? : err; |
656 | omap_aes_crypt_dma_start(dd); | 687 | |
657 | else | 688 | if (dd->total && !err) { |
658 | omap_aes_handle_req(dd, NULL); | 689 | err = omap_aes_crypt_dma_start(dd); |
690 | if (!err) | ||
691 | return; /* DMA started. Not fininishing. */ | ||
692 | } | ||
693 | |||
694 | omap_aes_finish_req(dd, err); | ||
695 | omap_aes_handle_queue(dd, NULL); | ||
659 | 696 | ||
660 | pr_debug("exit\n"); | 697 | pr_debug("exit\n"); |
661 | } | 698 | } |
662 | 699 | ||
700 | static void omap_aes_queue_task(unsigned long data) | ||
701 | { | ||
702 | struct omap_aes_dev *dd = (struct omap_aes_dev *)data; | ||
703 | |||
704 | omap_aes_handle_queue(dd, NULL); | ||
705 | } | ||
706 | |||
663 | static int omap_aes_crypt(struct ablkcipher_request *req, unsigned long mode) | 707 | static int omap_aes_crypt(struct ablkcipher_request *req, unsigned long mode) |
664 | { | 708 | { |
665 | struct omap_aes_ctx *ctx = crypto_ablkcipher_ctx( | 709 | struct omap_aes_ctx *ctx = crypto_ablkcipher_ctx( |
@@ -671,13 +715,18 @@ static int omap_aes_crypt(struct ablkcipher_request *req, unsigned long mode) | |||
671 | !!(mode & FLAGS_ENCRYPT), | 715 | !!(mode & FLAGS_ENCRYPT), |
672 | !!(mode & FLAGS_CBC)); | 716 | !!(mode & FLAGS_CBC)); |
673 | 717 | ||
718 | if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE)) { | ||
719 | pr_err("request size is not exact amount of AES blocks\n"); | ||
720 | return -EINVAL; | ||
721 | } | ||
722 | |||
674 | dd = omap_aes_find_dev(ctx); | 723 | dd = omap_aes_find_dev(ctx); |
675 | if (!dd) | 724 | if (!dd) |
676 | return -ENODEV; | 725 | return -ENODEV; |
677 | 726 | ||
678 | rctx->mode = mode; | 727 | rctx->mode = mode; |
679 | 728 | ||
680 | return omap_aes_handle_req(dd, req); | 729 | return omap_aes_handle_queue(dd, req); |
681 | } | 730 | } |
682 | 731 | ||
683 | /* ********************** ALG API ************************************ */ | 732 | /* ********************** ALG API ************************************ */ |
@@ -843,7 +892,8 @@ static int omap_aes_probe(struct platform_device *pdev) | |||
843 | (reg & AES_REG_REV_MAJOR) >> 4, reg & AES_REG_REV_MINOR); | 892 | (reg & AES_REG_REV_MAJOR) >> 4, reg & AES_REG_REV_MINOR); |
844 | clk_disable(dd->iclk); | 893 | clk_disable(dd->iclk); |
845 | 894 | ||
846 | tasklet_init(&dd->task, omap_aes_task, (unsigned long)dd); | 895 | tasklet_init(&dd->done_task, omap_aes_done_task, (unsigned long)dd); |
896 | tasklet_init(&dd->queue_task, omap_aes_queue_task, (unsigned long)dd); | ||
847 | 897 | ||
848 | err = omap_aes_dma_init(dd); | 898 | err = omap_aes_dma_init(dd); |
849 | if (err) | 899 | if (err) |
@@ -870,7 +920,8 @@ err_algs: | |||
870 | crypto_unregister_alg(&algs[j]); | 920 | crypto_unregister_alg(&algs[j]); |
871 | omap_aes_dma_cleanup(dd); | 921 | omap_aes_dma_cleanup(dd); |
872 | err_dma: | 922 | err_dma: |
873 | tasklet_kill(&dd->task); | 923 | tasklet_kill(&dd->done_task); |
924 | tasklet_kill(&dd->queue_task); | ||
874 | iounmap(dd->io_base); | 925 | iounmap(dd->io_base); |
875 | err_io: | 926 | err_io: |
876 | clk_put(dd->iclk); | 927 | clk_put(dd->iclk); |
@@ -897,7 +948,8 @@ static int omap_aes_remove(struct platform_device *pdev) | |||
897 | for (i = 0; i < ARRAY_SIZE(algs); i++) | 948 | for (i = 0; i < ARRAY_SIZE(algs); i++) |
898 | crypto_unregister_alg(&algs[i]); | 949 | crypto_unregister_alg(&algs[i]); |
899 | 950 | ||
900 | tasklet_kill(&dd->task); | 951 | tasklet_kill(&dd->done_task); |
952 | tasklet_kill(&dd->queue_task); | ||
901 | omap_aes_dma_cleanup(dd); | 953 | omap_aes_dma_cleanup(dd); |
902 | iounmap(dd->io_base); | 954 | iounmap(dd->io_base); |
903 | clk_put(dd->iclk); | 955 | clk_put(dd->iclk); |