diff options
author | Dmitry Kasatkin <dmitry.kasatkin@nokia.com> | 2011-06-02 14:10:05 -0400 |
---|---|---|
committer | Herbert Xu <herbert@gondor.apana.org.au> | 2011-06-29 19:44:02 -0400 |
commit | ea1fd2246f3c3c6b739529db2a547fa080cf09a3 (patch) | |
tree | 529edffe38e89d187362192654e9f8950bceb936 /drivers/crypto | |
parent | c3304721ed666804395fc340f1aa347b18f1dda0 (diff) |
crypto: omap-sham - replace flags bit mask with bit number
Flags mask cannot be used with atomic bit operations.
This patch changes masks to bit numbers.
Atomic bit operations will be used by following patches.
Signed-off-by: Dmitry Kasatkin <dmitry.kasatkin@nokia.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'drivers/crypto')
-rw-r--r-- | drivers/crypto/omap-sham.c | 102 |
1 files changed, 52 insertions, 50 deletions
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c index ac12a608d502..64698adec0c1 100644 --- a/drivers/crypto/omap-sham.c +++ b/drivers/crypto/omap-sham.c | |||
@@ -72,17 +72,19 @@ | |||
72 | 72 | ||
73 | #define DEFAULT_TIMEOUT_INTERVAL HZ | 73 | #define DEFAULT_TIMEOUT_INTERVAL HZ |
74 | 74 | ||
75 | #define FLAGS_FINUP 0x0002 | 75 | /* mostly device flags */ |
76 | #define FLAGS_FINAL 0x0004 | 76 | #define FLAGS_BUSY 0 |
77 | #define FLAGS_SG 0x0008 | 77 | #define FLAGS_FINAL 1 |
78 | #define FLAGS_SHA1 0x0010 | 78 | #define FLAGS_DMA_ACTIVE 2 |
79 | #define FLAGS_DMA_ACTIVE 0x0020 | 79 | #define FLAGS_OUTPUT_READY 3 |
80 | #define FLAGS_OUTPUT_READY 0x0040 | 80 | #define FLAGS_INIT 4 |
81 | #define FLAGS_INIT 0x0100 | 81 | #define FLAGS_CPU 5 |
82 | #define FLAGS_CPU 0x0200 | 82 | /* context flags */ |
83 | #define FLAGS_HMAC 0x0400 | 83 | #define FLAGS_FINUP 16 |
84 | #define FLAGS_ERROR 0x0800 | 84 | #define FLAGS_SG 17 |
85 | #define FLAGS_BUSY 0x1000 | 85 | #define FLAGS_SHA1 18 |
86 | #define FLAGS_HMAC 19 | ||
87 | #define FLAGS_ERROR 20 | ||
86 | 88 | ||
87 | #define OP_UPDATE 1 | 89 | #define OP_UPDATE 1 |
88 | #define OP_FINAL 2 | 90 | #define OP_FINAL 2 |
@@ -223,7 +225,7 @@ static void omap_sham_copy_ready_hash(struct ahash_request *req) | |||
223 | if (!hash) | 225 | if (!hash) |
224 | return; | 226 | return; |
225 | 227 | ||
226 | if (likely(ctx->flags & FLAGS_SHA1)) { | 228 | if (likely(ctx->flags & BIT(FLAGS_SHA1))) { |
227 | /* SHA1 results are in big endian */ | 229 | /* SHA1 results are in big endian */ |
228 | for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(u32); i++) | 230 | for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(u32); i++) |
229 | hash[i] = be32_to_cpu(in[i]); | 231 | hash[i] = be32_to_cpu(in[i]); |
@@ -238,7 +240,7 @@ static int omap_sham_hw_init(struct omap_sham_dev *dd) | |||
238 | { | 240 | { |
239 | clk_enable(dd->iclk); | 241 | clk_enable(dd->iclk); |
240 | 242 | ||
241 | if (!(dd->flags & FLAGS_INIT)) { | 243 | if (!(dd->flags & BIT(FLAGS_INIT))) { |
242 | omap_sham_write_mask(dd, SHA_REG_MASK, | 244 | omap_sham_write_mask(dd, SHA_REG_MASK, |
243 | SHA_REG_MASK_SOFTRESET, SHA_REG_MASK_SOFTRESET); | 245 | SHA_REG_MASK_SOFTRESET, SHA_REG_MASK_SOFTRESET); |
244 | 246 | ||
@@ -246,7 +248,7 @@ static int omap_sham_hw_init(struct omap_sham_dev *dd) | |||
246 | SHA_REG_SYSSTATUS_RESETDONE)) | 248 | SHA_REG_SYSSTATUS_RESETDONE)) |
247 | return -ETIMEDOUT; | 249 | return -ETIMEDOUT; |
248 | 250 | ||
249 | dd->flags |= FLAGS_INIT; | 251 | dd->flags |= BIT(FLAGS_INIT); |
250 | dd->err = 0; | 252 | dd->err = 0; |
251 | } | 253 | } |
252 | 254 | ||
@@ -269,7 +271,7 @@ static void omap_sham_write_ctrl(struct omap_sham_dev *dd, size_t length, | |||
269 | * Setting ALGO_CONST only for the first iteration | 271 | * Setting ALGO_CONST only for the first iteration |
270 | * and CLOSE_HASH only for the last one. | 272 | * and CLOSE_HASH only for the last one. |
271 | */ | 273 | */ |
272 | if (ctx->flags & FLAGS_SHA1) | 274 | if (ctx->flags & BIT(FLAGS_SHA1)) |
273 | val |= SHA_REG_CTRL_ALGO; | 275 | val |= SHA_REG_CTRL_ALGO; |
274 | if (!ctx->digcnt) | 276 | if (!ctx->digcnt) |
275 | val |= SHA_REG_CTRL_ALGO_CONST; | 277 | val |= SHA_REG_CTRL_ALGO_CONST; |
@@ -301,7 +303,7 @@ static int omap_sham_xmit_cpu(struct omap_sham_dev *dd, const u8 *buf, | |||
301 | return -ETIMEDOUT; | 303 | return -ETIMEDOUT; |
302 | 304 | ||
303 | if (final) | 305 | if (final) |
304 | ctx->flags |= FLAGS_FINAL; /* catch last interrupt */ | 306 | ctx->flags |= BIT(FLAGS_FINAL); /* catch last interrupt */ |
305 | 307 | ||
306 | len32 = DIV_ROUND_UP(length, sizeof(u32)); | 308 | len32 = DIV_ROUND_UP(length, sizeof(u32)); |
307 | 309 | ||
@@ -334,9 +336,9 @@ static int omap_sham_xmit_dma(struct omap_sham_dev *dd, dma_addr_t dma_addr, | |||
334 | ctx->digcnt += length; | 336 | ctx->digcnt += length; |
335 | 337 | ||
336 | if (final) | 338 | if (final) |
337 | ctx->flags |= FLAGS_FINAL; /* catch last interrupt */ | 339 | ctx->flags |= BIT(FLAGS_FINAL); /* catch last interrupt */ |
338 | 340 | ||
339 | dd->flags |= FLAGS_DMA_ACTIVE; | 341 | dd->flags |= BIT(FLAGS_DMA_ACTIVE); |
340 | 342 | ||
341 | omap_start_dma(dd->dma_lch); | 343 | omap_start_dma(dd->dma_lch); |
342 | 344 | ||
@@ -392,7 +394,7 @@ static int omap_sham_xmit_dma_map(struct omap_sham_dev *dd, | |||
392 | return -EINVAL; | 394 | return -EINVAL; |
393 | } | 395 | } |
394 | 396 | ||
395 | ctx->flags &= ~FLAGS_SG; | 397 | ctx->flags &= ~BIT(FLAGS_SG); |
396 | 398 | ||
397 | /* next call does not fail... so no unmap in the case of error */ | 399 | /* next call does not fail... so no unmap in the case of error */ |
398 | return omap_sham_xmit_dma(dd, ctx->dma_addr, length, final); | 400 | return omap_sham_xmit_dma(dd, ctx->dma_addr, length, final); |
@@ -406,7 +408,7 @@ static int omap_sham_update_dma_slow(struct omap_sham_dev *dd) | |||
406 | 408 | ||
407 | omap_sham_append_sg(ctx); | 409 | omap_sham_append_sg(ctx); |
408 | 410 | ||
409 | final = (ctx->flags & FLAGS_FINUP) && !ctx->total; | 411 | final = (ctx->flags & BIT(FLAGS_FINUP)) && !ctx->total; |
410 | 412 | ||
411 | dev_dbg(dd->dev, "slow: bufcnt: %u, digcnt: %d, final: %d\n", | 413 | dev_dbg(dd->dev, "slow: bufcnt: %u, digcnt: %d, final: %d\n", |
412 | ctx->bufcnt, ctx->digcnt, final); | 414 | ctx->bufcnt, ctx->digcnt, final); |
@@ -452,7 +454,7 @@ static int omap_sham_update_dma_start(struct omap_sham_dev *dd) | |||
452 | length = min(ctx->total, sg->length); | 454 | length = min(ctx->total, sg->length); |
453 | 455 | ||
454 | if (sg_is_last(sg)) { | 456 | if (sg_is_last(sg)) { |
455 | if (!(ctx->flags & FLAGS_FINUP)) { | 457 | if (!(ctx->flags & BIT(FLAGS_FINUP))) { |
456 | /* not last sg must be SHA1_MD5_BLOCK_SIZE aligned */ | 458 | /* not last sg must be SHA1_MD5_BLOCK_SIZE aligned */ |
457 | tail = length & (SHA1_MD5_BLOCK_SIZE - 1); | 459 | tail = length & (SHA1_MD5_BLOCK_SIZE - 1); |
458 | /* without finup() we need one block to close hash */ | 460 | /* without finup() we need one block to close hash */ |
@@ -467,12 +469,12 @@ static int omap_sham_update_dma_start(struct omap_sham_dev *dd) | |||
467 | return -EINVAL; | 469 | return -EINVAL; |
468 | } | 470 | } |
469 | 471 | ||
470 | ctx->flags |= FLAGS_SG; | 472 | ctx->flags |= BIT(FLAGS_SG); |
471 | 473 | ||
472 | ctx->total -= length; | 474 | ctx->total -= length; |
473 | ctx->offset = length; /* offset where to start slow */ | 475 | ctx->offset = length; /* offset where to start slow */ |
474 | 476 | ||
475 | final = (ctx->flags & FLAGS_FINUP) && !ctx->total; | 477 | final = (ctx->flags & BIT(FLAGS_FINUP)) && !ctx->total; |
476 | 478 | ||
477 | /* next call does not fail... so no unmap in the case of error */ | 479 | /* next call does not fail... so no unmap in the case of error */ |
478 | return omap_sham_xmit_dma(dd, sg_dma_address(ctx->sg), length, final); | 480 | return omap_sham_xmit_dma(dd, sg_dma_address(ctx->sg), length, final); |
@@ -495,7 +497,7 @@ static int omap_sham_update_dma_stop(struct omap_sham_dev *dd) | |||
495 | struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); | 497 | struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); |
496 | 498 | ||
497 | omap_stop_dma(dd->dma_lch); | 499 | omap_stop_dma(dd->dma_lch); |
498 | if (ctx->flags & FLAGS_SG) { | 500 | if (ctx->flags & BIT(FLAGS_SG)) { |
499 | dma_unmap_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE); | 501 | dma_unmap_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE); |
500 | if (ctx->sg->length == ctx->offset) { | 502 | if (ctx->sg->length == ctx->offset) { |
501 | ctx->sg = sg_next(ctx->sg); | 503 | ctx->sg = sg_next(ctx->sg); |
@@ -537,18 +539,18 @@ static int omap_sham_init(struct ahash_request *req) | |||
537 | crypto_ahash_digestsize(tfm)); | 539 | crypto_ahash_digestsize(tfm)); |
538 | 540 | ||
539 | if (crypto_ahash_digestsize(tfm) == SHA1_DIGEST_SIZE) | 541 | if (crypto_ahash_digestsize(tfm) == SHA1_DIGEST_SIZE) |
540 | ctx->flags |= FLAGS_SHA1; | 542 | ctx->flags |= BIT(FLAGS_SHA1); |
541 | 543 | ||
542 | ctx->bufcnt = 0; | 544 | ctx->bufcnt = 0; |
543 | ctx->digcnt = 0; | 545 | ctx->digcnt = 0; |
544 | ctx->buflen = BUFLEN; | 546 | ctx->buflen = BUFLEN; |
545 | 547 | ||
546 | if (tctx->flags & FLAGS_HMAC) { | 548 | if (tctx->flags & BIT(FLAGS_HMAC)) { |
547 | struct omap_sham_hmac_ctx *bctx = tctx->base; | 549 | struct omap_sham_hmac_ctx *bctx = tctx->base; |
548 | 550 | ||
549 | memcpy(ctx->buffer, bctx->ipad, SHA1_MD5_BLOCK_SIZE); | 551 | memcpy(ctx->buffer, bctx->ipad, SHA1_MD5_BLOCK_SIZE); |
550 | ctx->bufcnt = SHA1_MD5_BLOCK_SIZE; | 552 | ctx->bufcnt = SHA1_MD5_BLOCK_SIZE; |
551 | ctx->flags |= FLAGS_HMAC; | 553 | ctx->flags |= BIT(FLAGS_HMAC); |
552 | } | 554 | } |
553 | 555 | ||
554 | return 0; | 556 | return 0; |
@@ -562,9 +564,9 @@ static int omap_sham_update_req(struct omap_sham_dev *dd) | |||
562 | int err; | 564 | int err; |
563 | 565 | ||
564 | dev_dbg(dd->dev, "update_req: total: %u, digcnt: %d, finup: %d\n", | 566 | dev_dbg(dd->dev, "update_req: total: %u, digcnt: %d, finup: %d\n", |
565 | ctx->total, ctx->digcnt, (ctx->flags & FLAGS_FINUP) != 0); | 567 | ctx->total, ctx->digcnt, (ctx->flags & BIT(FLAGS_FINUP)) != 0); |
566 | 568 | ||
567 | if (ctx->flags & FLAGS_CPU) | 569 | if (ctx->flags & BIT(FLAGS_CPU)) |
568 | err = omap_sham_update_cpu(dd); | 570 | err = omap_sham_update_cpu(dd); |
569 | else | 571 | else |
570 | err = omap_sham_update_dma_start(dd); | 572 | err = omap_sham_update_dma_start(dd); |
@@ -624,7 +626,7 @@ static int omap_sham_finish(struct ahash_request *req) | |||
624 | 626 | ||
625 | if (ctx->digcnt) { | 627 | if (ctx->digcnt) { |
626 | omap_sham_copy_ready_hash(req); | 628 | omap_sham_copy_ready_hash(req); |
627 | if (ctx->flags & FLAGS_HMAC) | 629 | if (ctx->flags & BIT(FLAGS_HMAC)) |
628 | err = omap_sham_finish_hmac(req); | 630 | err = omap_sham_finish_hmac(req); |
629 | } | 631 | } |
630 | 632 | ||
@@ -640,14 +642,14 @@ static void omap_sham_finish_req(struct ahash_request *req, int err) | |||
640 | 642 | ||
641 | if (!err) { | 643 | if (!err) { |
642 | omap_sham_copy_hash(req, 1); | 644 | omap_sham_copy_hash(req, 1); |
643 | if (ctx->flags & FLAGS_FINAL) | 645 | if (ctx->flags & BIT(FLAGS_FINAL)) |
644 | err = omap_sham_finish(req); | 646 | err = omap_sham_finish(req); |
645 | } else { | 647 | } else { |
646 | ctx->flags |= FLAGS_ERROR; | 648 | ctx->flags |= BIT(FLAGS_ERROR); |
647 | } | 649 | } |
648 | 650 | ||
649 | clk_disable(dd->iclk); | 651 | clk_disable(dd->iclk); |
650 | dd->flags &= ~FLAGS_BUSY; | 652 | dd->flags &= ~BIT(FLAGS_BUSY); |
651 | 653 | ||
652 | if (req->base.complete) | 654 | if (req->base.complete) |
653 | req->base.complete(&req->base, err); | 655 | req->base.complete(&req->base, err); |
@@ -664,14 +666,14 @@ static int omap_sham_handle_queue(struct omap_sham_dev *dd, | |||
664 | spin_lock_irqsave(&dd->lock, flags); | 666 | spin_lock_irqsave(&dd->lock, flags); |
665 | if (req) | 667 | if (req) |
666 | ret = ahash_enqueue_request(&dd->queue, req); | 668 | ret = ahash_enqueue_request(&dd->queue, req); |
667 | if (dd->flags & FLAGS_BUSY) { | 669 | if (dd->flags & BIT(FLAGS_BUSY)) { |
668 | spin_unlock_irqrestore(&dd->lock, flags); | 670 | spin_unlock_irqrestore(&dd->lock, flags); |
669 | return ret; | 671 | return ret; |
670 | } | 672 | } |
671 | backlog = crypto_get_backlog(&dd->queue); | 673 | backlog = crypto_get_backlog(&dd->queue); |
672 | async_req = crypto_dequeue_request(&dd->queue); | 674 | async_req = crypto_dequeue_request(&dd->queue); |
673 | if (async_req) | 675 | if (async_req) |
674 | dd->flags |= FLAGS_BUSY; | 676 | dd->flags |= BIT(FLAGS_BUSY); |
675 | spin_unlock_irqrestore(&dd->lock, flags); | 677 | spin_unlock_irqrestore(&dd->lock, flags); |
676 | 678 | ||
677 | if (!async_req) | 679 | if (!async_req) |
@@ -707,7 +709,7 @@ static int omap_sham_handle_queue(struct omap_sham_dev *dd, | |||
707 | 709 | ||
708 | if (ctx->op == OP_UPDATE) { | 710 | if (ctx->op == OP_UPDATE) { |
709 | err = omap_sham_update_req(dd); | 711 | err = omap_sham_update_req(dd); |
710 | if (err != -EINPROGRESS && (ctx->flags & FLAGS_FINUP)) | 712 | if (err != -EINPROGRESS && (ctx->flags & BIT(FLAGS_FINUP))) |
711 | /* no final() after finup() */ | 713 | /* no final() after finup() */ |
712 | err = omap_sham_final_req(dd); | 714 | err = omap_sham_final_req(dd); |
713 | } else if (ctx->op == OP_FINAL) { | 715 | } else if (ctx->op == OP_FINAL) { |
@@ -747,7 +749,7 @@ static int omap_sham_update(struct ahash_request *req) | |||
747 | ctx->sg = req->src; | 749 | ctx->sg = req->src; |
748 | ctx->offset = 0; | 750 | ctx->offset = 0; |
749 | 751 | ||
750 | if (ctx->flags & FLAGS_FINUP) { | 752 | if (ctx->flags & BIT(FLAGS_FINUP)) { |
751 | if ((ctx->digcnt + ctx->bufcnt + ctx->total) < 9) { | 753 | if ((ctx->digcnt + ctx->bufcnt + ctx->total) < 9) { |
752 | /* | 754 | /* |
753 | * OMAP HW accel works only with buffers >= 9 | 755 | * OMAP HW accel works only with buffers >= 9 |
@@ -760,7 +762,7 @@ static int omap_sham_update(struct ahash_request *req) | |||
760 | /* | 762 | /* |
761 | * faster to use CPU for short transfers | 763 | * faster to use CPU for short transfers |
762 | */ | 764 | */ |
763 | ctx->flags |= FLAGS_CPU; | 765 | ctx->flags |= BIT(FLAGS_CPU); |
764 | } | 766 | } |
765 | } else if (ctx->bufcnt + ctx->total < ctx->buflen) { | 767 | } else if (ctx->bufcnt + ctx->total < ctx->buflen) { |
766 | omap_sham_append_sg(ctx); | 768 | omap_sham_append_sg(ctx); |
@@ -797,9 +799,9 @@ static int omap_sham_final(struct ahash_request *req) | |||
797 | { | 799 | { |
798 | struct omap_sham_reqctx *ctx = ahash_request_ctx(req); | 800 | struct omap_sham_reqctx *ctx = ahash_request_ctx(req); |
799 | 801 | ||
800 | ctx->flags |= FLAGS_FINUP; | 802 | ctx->flags |= BIT(FLAGS_FINUP); |
801 | 803 | ||
802 | if (ctx->flags & FLAGS_ERROR) | 804 | if (ctx->flags & BIT(FLAGS_ERROR)) |
803 | return 0; /* uncompleted hash is not needed */ | 805 | return 0; /* uncompleted hash is not needed */ |
804 | 806 | ||
805 | /* OMAP HW accel works only with buffers >= 9 */ | 807 | /* OMAP HW accel works only with buffers >= 9 */ |
@@ -818,7 +820,7 @@ static int omap_sham_finup(struct ahash_request *req) | |||
818 | struct omap_sham_reqctx *ctx = ahash_request_ctx(req); | 820 | struct omap_sham_reqctx *ctx = ahash_request_ctx(req); |
819 | int err1, err2; | 821 | int err1, err2; |
820 | 822 | ||
821 | ctx->flags |= FLAGS_FINUP; | 823 | ctx->flags |= BIT(FLAGS_FINUP); |
822 | 824 | ||
823 | err1 = omap_sham_update(req); | 825 | err1 = omap_sham_update(req); |
824 | if (err1 == -EINPROGRESS || err1 == -EBUSY) | 826 | if (err1 == -EINPROGRESS || err1 == -EBUSY) |
@@ -890,7 +892,7 @@ static int omap_sham_cra_init_alg(struct crypto_tfm *tfm, const char *alg_base) | |||
890 | 892 | ||
891 | if (alg_base) { | 893 | if (alg_base) { |
892 | struct omap_sham_hmac_ctx *bctx = tctx->base; | 894 | struct omap_sham_hmac_ctx *bctx = tctx->base; |
893 | tctx->flags |= FLAGS_HMAC; | 895 | tctx->flags |= BIT(FLAGS_HMAC); |
894 | bctx->shash = crypto_alloc_shash(alg_base, 0, | 896 | bctx->shash = crypto_alloc_shash(alg_base, 0, |
895 | CRYPTO_ALG_NEED_FALLBACK); | 897 | CRYPTO_ALG_NEED_FALLBACK); |
896 | if (IS_ERR(bctx->shash)) { | 898 | if (IS_ERR(bctx->shash)) { |
@@ -927,7 +929,7 @@ static void omap_sham_cra_exit(struct crypto_tfm *tfm) | |||
927 | crypto_free_shash(tctx->fallback); | 929 | crypto_free_shash(tctx->fallback); |
928 | tctx->fallback = NULL; | 930 | tctx->fallback = NULL; |
929 | 931 | ||
930 | if (tctx->flags & FLAGS_HMAC) { | 932 | if (tctx->flags & BIT(FLAGS_HMAC)) { |
931 | struct omap_sham_hmac_ctx *bctx = tctx->base; | 933 | struct omap_sham_hmac_ctx *bctx = tctx->base; |
932 | crypto_free_shash(bctx->shash); | 934 | crypto_free_shash(bctx->shash); |
933 | } | 935 | } |
@@ -1035,13 +1037,13 @@ static void omap_sham_done_task(unsigned long data) | |||
1035 | struct omap_sham_reqctx *ctx = ahash_request_ctx(req); | 1037 | struct omap_sham_reqctx *ctx = ahash_request_ctx(req); |
1036 | int ready = 0, err = 0; | 1038 | int ready = 0, err = 0; |
1037 | 1039 | ||
1038 | if (ctx->flags & FLAGS_OUTPUT_READY) { | 1040 | if (ctx->flags & BIT(FLAGS_OUTPUT_READY)) { |
1039 | ctx->flags &= ~FLAGS_OUTPUT_READY; | 1041 | ctx->flags &= ~BIT(FLAGS_OUTPUT_READY); |
1040 | ready = 1; | 1042 | ready = 1; |
1041 | } | 1043 | } |
1042 | 1044 | ||
1043 | if (dd->flags & FLAGS_DMA_ACTIVE) { | 1045 | if (dd->flags & BIT(FLAGS_DMA_ACTIVE)) { |
1044 | dd->flags &= ~FLAGS_DMA_ACTIVE; | 1046 | dd->flags &= ~BIT(FLAGS_DMA_ACTIVE); |
1045 | omap_sham_update_dma_stop(dd); | 1047 | omap_sham_update_dma_stop(dd); |
1046 | if (!dd->err) | 1048 | if (!dd->err) |
1047 | err = omap_sham_update_dma_start(dd); | 1049 | err = omap_sham_update_dma_start(dd); |
@@ -1075,7 +1077,7 @@ static irqreturn_t omap_sham_irq(int irq, void *dev_id) | |||
1075 | return IRQ_HANDLED; | 1077 | return IRQ_HANDLED; |
1076 | } | 1078 | } |
1077 | 1079 | ||
1078 | if (unlikely(ctx->flags & FLAGS_FINAL)) | 1080 | if (unlikely(ctx->flags & BIT(FLAGS_FINAL))) |
1079 | /* final -> allow device to go to power-saving mode */ | 1081 | /* final -> allow device to go to power-saving mode */ |
1080 | omap_sham_write_mask(dd, SHA_REG_CTRL, 0, SHA_REG_CTRL_LENGTH); | 1082 | omap_sham_write_mask(dd, SHA_REG_CTRL, 0, SHA_REG_CTRL_LENGTH); |
1081 | 1083 | ||
@@ -1083,7 +1085,7 @@ static irqreturn_t omap_sham_irq(int irq, void *dev_id) | |||
1083 | SHA_REG_CTRL_OUTPUT_READY); | 1085 | SHA_REG_CTRL_OUTPUT_READY); |
1084 | omap_sham_read(dd, SHA_REG_CTRL); | 1086 | omap_sham_read(dd, SHA_REG_CTRL); |
1085 | 1087 | ||
1086 | ctx->flags |= FLAGS_OUTPUT_READY; | 1088 | ctx->flags |= BIT(FLAGS_OUTPUT_READY); |
1087 | dd->err = 0; | 1089 | dd->err = 0; |
1088 | tasklet_schedule(&dd->done_task); | 1090 | tasklet_schedule(&dd->done_task); |
1089 | 1091 | ||
@@ -1097,7 +1099,7 @@ static void omap_sham_dma_callback(int lch, u16 ch_status, void *data) | |||
1097 | if (ch_status != OMAP_DMA_BLOCK_IRQ) { | 1099 | if (ch_status != OMAP_DMA_BLOCK_IRQ) { |
1098 | pr_err("omap-sham DMA error status: 0x%hx\n", ch_status); | 1100 | pr_err("omap-sham DMA error status: 0x%hx\n", ch_status); |
1099 | dd->err = -EIO; | 1101 | dd->err = -EIO; |
1100 | dd->flags &= ~FLAGS_INIT; /* request to re-initialize */ | 1102 | dd->flags &= ~BIT(FLAGS_INIT); /* request to re-initialize */ |
1101 | } | 1103 | } |
1102 | 1104 | ||
1103 | tasklet_schedule(&dd->done_task); | 1105 | tasklet_schedule(&dd->done_task); |