aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/crypto
diff options
context:
space:
mode:
authorDmitry Kasatkin <dmitry.kasatkin@nokia.com>2010-11-30 03:13:28 -0500
committerHerbert Xu <herbert@gondor.apana.org.au>2010-12-02 03:37:06 -0500
commiteeb2b202c5b886b76c3bfa76f47e450fa69389fb (patch)
tree8d7c63122112203a5227639e42e418b068f0d1d5 /drivers/crypto
parent3bd2e2216bc82a83fc5048f8e61d2d22dd5d9cda (diff)
crypto: omap-aes - redundant locking is removed
Submitting request involved double locking for enqueuing and dequeuing. Now it is done under the same lock. FLAGS_BUSY is now handled under the same lock. Signed-off-by: Dmitry Kasatkin <dmitry.kasatkin@nokia.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'drivers/crypto')
-rw-r--r--drivers/crypto/omap-aes.c70
1 files changed, 32 insertions, 38 deletions
diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c
index 41c91f3c7f14..2d8f72eaf896 100644
--- a/drivers/crypto/omap-aes.c
+++ b/drivers/crypto/omap-aes.c
@@ -78,7 +78,7 @@
78#define FLAGS_NEW_IV BIT(5) 78#define FLAGS_NEW_IV BIT(5)
79#define FLAGS_INIT BIT(6) 79#define FLAGS_INIT BIT(6)
80#define FLAGS_FAST BIT(7) 80#define FLAGS_FAST BIT(7)
81#define FLAGS_BUSY 8 81#define FLAGS_BUSY BIT(8)
82 82
83struct omap_aes_ctx { 83struct omap_aes_ctx {
84 struct omap_aes_dev *dd; 84 struct omap_aes_dev *dd;
@@ -179,9 +179,8 @@ static int omap_aes_wait(struct omap_aes_dev *dd, u32 offset, u32 bit)
179 179
180static int omap_aes_hw_init(struct omap_aes_dev *dd) 180static int omap_aes_hw_init(struct omap_aes_dev *dd)
181{ 181{
182 int err = 0;
183
184 clk_enable(dd->iclk); 182 clk_enable(dd->iclk);
183
185 if (!(dd->flags & FLAGS_INIT)) { 184 if (!(dd->flags & FLAGS_INIT)) {
186 /* is it necessary to reset before every operation? */ 185 /* is it necessary to reset before every operation? */
187 omap_aes_write_mask(dd, AES_REG_MASK, AES_REG_MASK_SOFTRESET, 186 omap_aes_write_mask(dd, AES_REG_MASK, AES_REG_MASK_SOFTRESET,
@@ -193,18 +192,15 @@ static int omap_aes_hw_init(struct omap_aes_dev *dd)
193 __asm__ __volatile__("nop"); 192 __asm__ __volatile__("nop");
194 __asm__ __volatile__("nop"); 193 __asm__ __volatile__("nop");
195 194
196 err = omap_aes_wait(dd, AES_REG_SYSSTATUS, 195 if (omap_aes_wait(dd, AES_REG_SYSSTATUS,
197 AES_REG_SYSSTATUS_RESETDONE); 196 AES_REG_SYSSTATUS_RESETDONE)) {
198 if (!err) 197 clk_disable(dd->iclk);
199 dd->flags |= FLAGS_INIT; 198 return -ETIMEDOUT;
199 }
200 dd->flags |= FLAGS_INIT;
200 } 201 }
201 202
202 return err; 203 return 0;
203}
204
205static void omap_aes_hw_cleanup(struct omap_aes_dev *dd)
206{
207 clk_disable(dd->iclk);
208} 204}
209 205
210static void omap_aes_write_ctrl(struct omap_aes_dev *dd) 206static void omap_aes_write_ctrl(struct omap_aes_dev *dd)
@@ -538,6 +534,8 @@ static void omap_aes_finish_req(struct omap_aes_dev *dd, int err)
538 534
539 pr_debug("err: %d\n", err); 535 pr_debug("err: %d\n", err);
540 536
537 dd->flags &= ~FLAGS_BUSY;
538
541 ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(dd->req)); 539 ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(dd->req));
542 540
543 if (!dd->total) 541 if (!dd->total)
@@ -553,7 +551,7 @@ static int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd)
553 551
554 omap_aes_write_mask(dd, AES_REG_MASK, 0, AES_REG_MASK_START); 552 omap_aes_write_mask(dd, AES_REG_MASK, 0, AES_REG_MASK_START);
555 553
556 omap_aes_hw_cleanup(dd); 554 clk_disable(dd->iclk);
557 555
558 omap_stop_dma(dd->dma_lch_in); 556 omap_stop_dma(dd->dma_lch_in);
559 omap_stop_dma(dd->dma_lch_out); 557 omap_stop_dma(dd->dma_lch_out);
@@ -580,22 +578,26 @@ static int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd)
580 return err; 578 return err;
581} 579}
582 580
583static int omap_aes_handle_req(struct omap_aes_dev *dd) 581static int omap_aes_handle_req(struct omap_aes_dev *dd,
582 struct ablkcipher_request *req)
584{ 583{
585 struct crypto_async_request *async_req, *backlog; 584 struct crypto_async_request *async_req, *backlog;
586 struct omap_aes_ctx *ctx; 585 struct omap_aes_ctx *ctx;
587 struct omap_aes_reqctx *rctx; 586 struct omap_aes_reqctx *rctx;
588 struct ablkcipher_request *req;
589 unsigned long flags; 587 unsigned long flags;
590 588 int err = 0;
591 if (dd->total)
592 goto start;
593 589
594 spin_lock_irqsave(&dd->lock, flags); 590 spin_lock_irqsave(&dd->lock, flags);
591 if (req)
592 err = ablkcipher_enqueue_request(&dd->queue, req);
593 if (dd->flags & FLAGS_BUSY) {
594 spin_unlock_irqrestore(&dd->lock, flags);
595 return err;
596 }
595 backlog = crypto_get_backlog(&dd->queue); 597 backlog = crypto_get_backlog(&dd->queue);
596 async_req = crypto_dequeue_request(&dd->queue); 598 async_req = crypto_dequeue_request(&dd->queue);
597 if (!async_req) 599 if (async_req)
598 clear_bit(FLAGS_BUSY, &dd->flags); 600 dd->flags |= FLAGS_BUSY;
599 spin_unlock_irqrestore(&dd->lock, flags); 601 spin_unlock_irqrestore(&dd->lock, flags);
600 602
601 if (!async_req) 603 if (!async_req)
@@ -637,20 +639,23 @@ static int omap_aes_handle_req(struct omap_aes_dev *dd)
637 if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE)) 639 if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE))
638 pr_err("request size is not exact amount of AES blocks\n"); 640 pr_err("request size is not exact amount of AES blocks\n");
639 641
640start: 642 omap_aes_crypt_dma_start(dd);
641 return omap_aes_crypt_dma_start(dd); 643
644 return err;
642} 645}
643 646
644static void omap_aes_task(unsigned long data) 647static void omap_aes_task(unsigned long data)
645{ 648{
646 struct omap_aes_dev *dd = (struct omap_aes_dev *)data; 649 struct omap_aes_dev *dd = (struct omap_aes_dev *)data;
647 int err;
648 650
649 pr_debug("enter\n"); 651 pr_debug("enter\n");
650 652
651 err = omap_aes_crypt_dma_stop(dd); 653 omap_aes_crypt_dma_stop(dd);
652 654
653 err = omap_aes_handle_req(dd); 655 if (dd->total)
656 omap_aes_crypt_dma_start(dd);
657 else
658 omap_aes_handle_req(dd, NULL);
654 659
655 pr_debug("exit\n"); 660 pr_debug("exit\n");
656} 661}
@@ -661,8 +666,6 @@ static int omap_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
661 crypto_ablkcipher_reqtfm(req)); 666 crypto_ablkcipher_reqtfm(req));
662 struct omap_aes_reqctx *rctx = ablkcipher_request_ctx(req); 667 struct omap_aes_reqctx *rctx = ablkcipher_request_ctx(req);
663 struct omap_aes_dev *dd; 668 struct omap_aes_dev *dd;
664 unsigned long flags;
665 int err;
666 669
667 pr_debug("nbytes: %d, enc: %d, cbc: %d\n", req->nbytes, 670 pr_debug("nbytes: %d, enc: %d, cbc: %d\n", req->nbytes,
668 !!(mode & FLAGS_ENCRYPT), 671 !!(mode & FLAGS_ENCRYPT),
@@ -674,16 +677,7 @@ static int omap_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
674 677
675 rctx->mode = mode; 678 rctx->mode = mode;
676 679
677 spin_lock_irqsave(&dd->lock, flags); 680 return omap_aes_handle_req(dd, req);
678 err = ablkcipher_enqueue_request(&dd->queue, req);
679 spin_unlock_irqrestore(&dd->lock, flags);
680
681 if (!test_and_set_bit(FLAGS_BUSY, &dd->flags))
682 omap_aes_handle_req(dd);
683
684 pr_debug("exit\n");
685
686 return err;
687} 681}
688 682
689/* ********************** ALG API ************************************ */ 683/* ********************** ALG API ************************************ */