diff options
author | Cyrille Pitchen <cyrille.pitchen@atmel.com> | 2015-12-17 11:48:41 -0500 |
---|---|---|
committer | Herbert Xu <herbert@gondor.apana.org.au> | 2015-12-23 05:19:52 -0500 |
commit | 77dacf5fc511484eab47f802d7369c03175c2b9e (patch) | |
tree | 0e35923c98df61386825260aefdb8d949ac0dcac /drivers/crypto/atmel-aes.c | |
parent | 794595d2047a31702905b3666145c6a59bfee472 (diff) |
crypto: atmel-aes - simplify the configuration of the AES IP
This patch reworks the AES_FLAGS_* to simplify the configuration of the
AES IP.
Signed-off-by: Cyrille Pitchen <cyrille.pitchen@atmel.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'drivers/crypto/atmel-aes.c')
-rw-r--r-- | drivers/crypto/atmel-aes.c | 216 |
1 files changed, 93 insertions, 123 deletions
diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c index f1ea9c893561..c10c54ccc606 100644 --- a/drivers/crypto/atmel-aes.c +++ b/drivers/crypto/atmel-aes.c | |||
@@ -48,22 +48,28 @@ | |||
48 | #define CFB64_BLOCK_SIZE 8 | 48 | #define CFB64_BLOCK_SIZE 8 |
49 | 49 | ||
50 | /* AES flags */ | 50 | /* AES flags */ |
51 | #define AES_FLAGS_MODE_MASK 0x03ff | 51 | /* Reserve bits [18:16] [14:12] [0] for mode (same as for AES_MR) */ |
52 | #define AES_FLAGS_ENCRYPT BIT(0) | 52 | #define AES_FLAGS_ENCRYPT AES_MR_CYPHER_ENC |
53 | #define AES_FLAGS_CBC BIT(1) | 53 | #define AES_FLAGS_OPMODE_MASK (AES_MR_OPMOD_MASK | AES_MR_CFBS_MASK) |
54 | #define AES_FLAGS_CFB BIT(2) | 54 | #define AES_FLAGS_ECB AES_MR_OPMOD_ECB |
55 | #define AES_FLAGS_CFB8 BIT(3) | 55 | #define AES_FLAGS_CBC AES_MR_OPMOD_CBC |
56 | #define AES_FLAGS_CFB16 BIT(4) | 56 | #define AES_FLAGS_OFB AES_MR_OPMOD_OFB |
57 | #define AES_FLAGS_CFB32 BIT(5) | 57 | #define AES_FLAGS_CFB128 (AES_MR_OPMOD_CFB | AES_MR_CFBS_128b) |
58 | #define AES_FLAGS_CFB64 BIT(6) | 58 | #define AES_FLAGS_CFB64 (AES_MR_OPMOD_CFB | AES_MR_CFBS_64b) |
59 | #define AES_FLAGS_CFB128 BIT(7) | 59 | #define AES_FLAGS_CFB32 (AES_MR_OPMOD_CFB | AES_MR_CFBS_32b) |
60 | #define AES_FLAGS_OFB BIT(8) | 60 | #define AES_FLAGS_CFB16 (AES_MR_OPMOD_CFB | AES_MR_CFBS_16b) |
61 | #define AES_FLAGS_CTR BIT(9) | 61 | #define AES_FLAGS_CFB8 (AES_MR_OPMOD_CFB | AES_MR_CFBS_8b) |
62 | 62 | #define AES_FLAGS_CTR AES_MR_OPMOD_CTR | |
63 | #define AES_FLAGS_INIT BIT(16) | 63 | |
64 | #define AES_FLAGS_DMA BIT(17) | 64 | #define AES_FLAGS_MODE_MASK (AES_FLAGS_OPMODE_MASK | \ |
65 | #define AES_FLAGS_BUSY BIT(18) | 65 | AES_FLAGS_ENCRYPT) |
66 | #define AES_FLAGS_FAST BIT(19) | 66 | |
67 | #define AES_FLAGS_INIT BIT(2) | ||
68 | #define AES_FLAGS_BUSY BIT(3) | ||
69 | #define AES_FLAGS_DMA BIT(4) | ||
70 | #define AES_FLAGS_FAST BIT(5) | ||
71 | |||
72 | #define AES_FLAGS_PERSISTENT (AES_FLAGS_INIT | AES_FLAGS_BUSY) | ||
67 | 73 | ||
68 | #define ATMEL_AES_QUEUE_LENGTH 50 | 74 | #define ATMEL_AES_QUEUE_LENGTH 50 |
69 | 75 | ||
@@ -306,6 +312,13 @@ static int atmel_aes_hw_version_init(struct atmel_aes_dev *dd) | |||
306 | return 0; | 312 | return 0; |
307 | } | 313 | } |
308 | 314 | ||
315 | static inline void atmel_aes_set_mode(struct atmel_aes_dev *dd, | ||
316 | const struct atmel_aes_reqctx *rctx) | ||
317 | { | ||
318 | /* Clear all but persistent flags and set request flags. */ | ||
319 | dd->flags = (dd->flags & AES_FLAGS_PERSISTENT) | rctx->mode; | ||
320 | } | ||
321 | |||
309 | static void atmel_aes_finish_req(struct atmel_aes_dev *dd, int err) | 322 | static void atmel_aes_finish_req(struct atmel_aes_dev *dd, int err) |
310 | { | 323 | { |
311 | struct ablkcipher_request *req = ablkcipher_request_cast(dd->areq); | 324 | struct ablkcipher_request *req = ablkcipher_request_cast(dd->areq); |
@@ -329,6 +342,34 @@ static int atmel_aes_crypt_dma(struct atmel_aes_dev *dd, | |||
329 | { | 342 | { |
330 | struct scatterlist sg[2]; | 343 | struct scatterlist sg[2]; |
331 | struct dma_async_tx_descriptor *in_desc, *out_desc; | 344 | struct dma_async_tx_descriptor *in_desc, *out_desc; |
345 | enum dma_slave_buswidth addr_width; | ||
346 | u32 maxburst; | ||
347 | |||
348 | switch (dd->ctx->block_size) { | ||
349 | case CFB8_BLOCK_SIZE: | ||
350 | addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; | ||
351 | maxburst = 1; | ||
352 | break; | ||
353 | |||
354 | case CFB16_BLOCK_SIZE: | ||
355 | addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES; | ||
356 | maxburst = 1; | ||
357 | break; | ||
358 | |||
359 | case CFB32_BLOCK_SIZE: | ||
360 | case CFB64_BLOCK_SIZE: | ||
361 | addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; | ||
362 | maxburst = 1; | ||
363 | break; | ||
364 | |||
365 | case AES_BLOCK_SIZE: | ||
366 | addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; | ||
367 | maxburst = dd->caps.max_burst_size; | ||
368 | break; | ||
369 | |||
370 | default: | ||
371 | return -EINVAL; | ||
372 | } | ||
332 | 373 | ||
333 | dd->dma_size = length; | 374 | dd->dma_size = length; |
334 | 375 | ||
@@ -337,35 +378,13 @@ static int atmel_aes_crypt_dma(struct atmel_aes_dev *dd, | |||
337 | dma_sync_single_for_device(dd->dev, dma_addr_out, length, | 378 | dma_sync_single_for_device(dd->dev, dma_addr_out, length, |
338 | DMA_FROM_DEVICE); | 379 | DMA_FROM_DEVICE); |
339 | 380 | ||
340 | if (dd->flags & AES_FLAGS_CFB8) { | 381 | dd->dma_lch_in.dma_conf.dst_addr_width = addr_width; |
341 | dd->dma_lch_in.dma_conf.dst_addr_width = | 382 | dd->dma_lch_in.dma_conf.src_maxburst = maxburst; |
342 | DMA_SLAVE_BUSWIDTH_1_BYTE; | 383 | dd->dma_lch_in.dma_conf.dst_maxburst = maxburst; |
343 | dd->dma_lch_out.dma_conf.src_addr_width = | ||
344 | DMA_SLAVE_BUSWIDTH_1_BYTE; | ||
345 | } else if (dd->flags & AES_FLAGS_CFB16) { | ||
346 | dd->dma_lch_in.dma_conf.dst_addr_width = | ||
347 | DMA_SLAVE_BUSWIDTH_2_BYTES; | ||
348 | dd->dma_lch_out.dma_conf.src_addr_width = | ||
349 | DMA_SLAVE_BUSWIDTH_2_BYTES; | ||
350 | } else { | ||
351 | dd->dma_lch_in.dma_conf.dst_addr_width = | ||
352 | DMA_SLAVE_BUSWIDTH_4_BYTES; | ||
353 | dd->dma_lch_out.dma_conf.src_addr_width = | ||
354 | DMA_SLAVE_BUSWIDTH_4_BYTES; | ||
355 | } | ||
356 | 384 | ||
357 | if (dd->flags & (AES_FLAGS_CFB8 | AES_FLAGS_CFB16 | | 385 | dd->dma_lch_out.dma_conf.src_addr_width = addr_width; |
358 | AES_FLAGS_CFB32 | AES_FLAGS_CFB64)) { | 386 | dd->dma_lch_out.dma_conf.src_maxburst = maxburst; |
359 | dd->dma_lch_in.dma_conf.src_maxburst = 1; | 387 | dd->dma_lch_out.dma_conf.dst_maxburst = maxburst; |
360 | dd->dma_lch_in.dma_conf.dst_maxburst = 1; | ||
361 | dd->dma_lch_out.dma_conf.src_maxburst = 1; | ||
362 | dd->dma_lch_out.dma_conf.dst_maxburst = 1; | ||
363 | } else { | ||
364 | dd->dma_lch_in.dma_conf.src_maxburst = dd->caps.max_burst_size; | ||
365 | dd->dma_lch_in.dma_conf.dst_maxburst = dd->caps.max_burst_size; | ||
366 | dd->dma_lch_out.dma_conf.src_maxburst = dd->caps.max_burst_size; | ||
367 | dd->dma_lch_out.dma_conf.dst_maxburst = dd->caps.max_burst_size; | ||
368 | } | ||
369 | 388 | ||
370 | dmaengine_slave_config(dd->dma_lch_in.chan, &dd->dma_lch_in.dma_conf); | 389 | dmaengine_slave_config(dd->dma_lch_in.chan, &dd->dma_lch_in.dma_conf); |
371 | dmaengine_slave_config(dd->dma_lch_out.chan, &dd->dma_lch_out.dma_conf); | 390 | dmaengine_slave_config(dd->dma_lch_out.chan, &dd->dma_lch_out.dma_conf); |
@@ -521,30 +540,7 @@ static void atmel_aes_write_ctrl(struct atmel_aes_dev *dd, bool use_dma, | |||
521 | else | 540 | else |
522 | valmr |= AES_MR_KEYSIZE_256; | 541 | valmr |= AES_MR_KEYSIZE_256; |
523 | 542 | ||
524 | if (dd->flags & AES_FLAGS_CBC) { | 543 | valmr |= dd->flags & AES_FLAGS_MODE_MASK; |
525 | valmr |= AES_MR_OPMOD_CBC; | ||
526 | } else if (dd->flags & AES_FLAGS_CFB) { | ||
527 | valmr |= AES_MR_OPMOD_CFB; | ||
528 | if (dd->flags & AES_FLAGS_CFB8) | ||
529 | valmr |= AES_MR_CFBS_8b; | ||
530 | else if (dd->flags & AES_FLAGS_CFB16) | ||
531 | valmr |= AES_MR_CFBS_16b; | ||
532 | else if (dd->flags & AES_FLAGS_CFB32) | ||
533 | valmr |= AES_MR_CFBS_32b; | ||
534 | else if (dd->flags & AES_FLAGS_CFB64) | ||
535 | valmr |= AES_MR_CFBS_64b; | ||
536 | else if (dd->flags & AES_FLAGS_CFB128) | ||
537 | valmr |= AES_MR_CFBS_128b; | ||
538 | } else if (dd->flags & AES_FLAGS_OFB) { | ||
539 | valmr |= AES_MR_OPMOD_OFB; | ||
540 | } else if (dd->flags & AES_FLAGS_CTR) { | ||
541 | valmr |= AES_MR_OPMOD_CTR; | ||
542 | } else { | ||
543 | valmr |= AES_MR_OPMOD_ECB; | ||
544 | } | ||
545 | |||
546 | if (dd->flags & AES_FLAGS_ENCRYPT) | ||
547 | valmr |= AES_MR_CYPHER_ENC; | ||
548 | 544 | ||
549 | if (use_dma) { | 545 | if (use_dma) { |
550 | valmr |= AES_MR_SMOD_IDATAR0; | 546 | valmr |= AES_MR_SMOD_IDATAR0; |
@@ -559,11 +555,8 @@ static void atmel_aes_write_ctrl(struct atmel_aes_dev *dd, bool use_dma, | |||
559 | atmel_aes_write_n(dd, AES_KEYWR(0), dd->ctx->key, | 555 | atmel_aes_write_n(dd, AES_KEYWR(0), dd->ctx->key, |
560 | dd->ctx->keylen >> 2); | 556 | dd->ctx->keylen >> 2); |
561 | 557 | ||
562 | if (((dd->flags & AES_FLAGS_CBC) || (dd->flags & AES_FLAGS_CFB) || | 558 | if (iv && (valmr & AES_MR_OPMOD_MASK) != AES_MR_OPMOD_ECB) |
563 | (dd->flags & AES_FLAGS_OFB) || (dd->flags & AES_FLAGS_CTR)) && | ||
564 | iv) { | ||
565 | atmel_aes_write_n(dd, AES_IVR(0), iv, 4); | 559 | atmel_aes_write_n(dd, AES_IVR(0), iv, 4); |
566 | } | ||
567 | } | 560 | } |
568 | 561 | ||
569 | static int atmel_aes_handle_queue(struct atmel_aes_dev *dd, | 562 | static int atmel_aes_handle_queue(struct atmel_aes_dev *dd, |
@@ -617,8 +610,7 @@ static int atmel_aes_start(struct atmel_aes_dev *dd) | |||
617 | dd->out_sg = req->dst; | 610 | dd->out_sg = req->dst; |
618 | 611 | ||
619 | rctx = ablkcipher_request_ctx(req); | 612 | rctx = ablkcipher_request_ctx(req); |
620 | rctx->mode &= AES_FLAGS_MODE_MASK; | 613 | atmel_aes_set_mode(dd, rctx); |
621 | dd->flags = (dd->flags & ~AES_FLAGS_MODE_MASK) | rctx->mode; | ||
622 | 614 | ||
623 | err = atmel_aes_hw_init(dd); | 615 | err = atmel_aes_hw_init(dd); |
624 | if (!err) { | 616 | if (!err) { |
@@ -728,36 +720,26 @@ static int atmel_aes_crypt(struct ablkcipher_request *req, unsigned long mode) | |||
728 | struct atmel_aes_reqctx *rctx = ablkcipher_request_ctx(req); | 720 | struct atmel_aes_reqctx *rctx = ablkcipher_request_ctx(req); |
729 | struct atmel_aes_dev *dd; | 721 | struct atmel_aes_dev *dd; |
730 | 722 | ||
731 | if (mode & AES_FLAGS_CFB8) { | 723 | switch (mode & AES_FLAGS_OPMODE_MASK) { |
732 | if (!IS_ALIGNED(req->nbytes, CFB8_BLOCK_SIZE)) { | 724 | case AES_FLAGS_CFB8: |
733 | pr_err("request size is not exact amount of CFB8 blocks\n"); | ||
734 | return -EINVAL; | ||
735 | } | ||
736 | ctx->block_size = CFB8_BLOCK_SIZE; | 725 | ctx->block_size = CFB8_BLOCK_SIZE; |
737 | } else if (mode & AES_FLAGS_CFB16) { | 726 | break; |
738 | if (!IS_ALIGNED(req->nbytes, CFB16_BLOCK_SIZE)) { | 727 | |
739 | pr_err("request size is not exact amount of CFB16 blocks\n"); | 728 | case AES_FLAGS_CFB16: |
740 | return -EINVAL; | ||
741 | } | ||
742 | ctx->block_size = CFB16_BLOCK_SIZE; | 729 | ctx->block_size = CFB16_BLOCK_SIZE; |
743 | } else if (mode & AES_FLAGS_CFB32) { | 730 | break; |
744 | if (!IS_ALIGNED(req->nbytes, CFB32_BLOCK_SIZE)) { | 731 | |
745 | pr_err("request size is not exact amount of CFB32 blocks\n"); | 732 | case AES_FLAGS_CFB32: |
746 | return -EINVAL; | ||
747 | } | ||
748 | ctx->block_size = CFB32_BLOCK_SIZE; | 733 | ctx->block_size = CFB32_BLOCK_SIZE; |
749 | } else if (mode & AES_FLAGS_CFB64) { | 734 | break; |
750 | if (!IS_ALIGNED(req->nbytes, CFB64_BLOCK_SIZE)) { | 735 | |
751 | pr_err("request size is not exact amount of CFB64 blocks\n"); | 736 | case AES_FLAGS_CFB64: |
752 | return -EINVAL; | ||
753 | } | ||
754 | ctx->block_size = CFB64_BLOCK_SIZE; | 737 | ctx->block_size = CFB64_BLOCK_SIZE; |
755 | } else { | 738 | break; |
756 | if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE)) { | 739 | |
757 | pr_err("request size is not exact amount of AES blocks\n"); | 740 | default: |
758 | return -EINVAL; | ||
759 | } | ||
760 | ctx->block_size = AES_BLOCK_SIZE; | 741 | ctx->block_size = AES_BLOCK_SIZE; |
742 | break; | ||
761 | } | 743 | } |
762 | 744 | ||
763 | dd = atmel_aes_find_dev(ctx); | 745 | dd = atmel_aes_find_dev(ctx); |
@@ -857,14 +839,12 @@ static int atmel_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key, | |||
857 | 839 | ||
858 | static int atmel_aes_ecb_encrypt(struct ablkcipher_request *req) | 840 | static int atmel_aes_ecb_encrypt(struct ablkcipher_request *req) |
859 | { | 841 | { |
860 | return atmel_aes_crypt(req, | 842 | return atmel_aes_crypt(req, AES_FLAGS_ECB | AES_FLAGS_ENCRYPT); |
861 | AES_FLAGS_ENCRYPT); | ||
862 | } | 843 | } |
863 | 844 | ||
864 | static int atmel_aes_ecb_decrypt(struct ablkcipher_request *req) | 845 | static int atmel_aes_ecb_decrypt(struct ablkcipher_request *req) |
865 | { | 846 | { |
866 | return atmel_aes_crypt(req, | 847 | return atmel_aes_crypt(req, AES_FLAGS_ECB); |
867 | 0); | ||
868 | } | 848 | } |
869 | 849 | ||
870 | static int atmel_aes_cbc_encrypt(struct ablkcipher_request *req) | 850 | static int atmel_aes_cbc_encrypt(struct ablkcipher_request *req) |
@@ -893,62 +873,52 @@ static int atmel_aes_ofb_decrypt(struct ablkcipher_request *req) | |||
893 | 873 | ||
894 | static int atmel_aes_cfb_encrypt(struct ablkcipher_request *req) | 874 | static int atmel_aes_cfb_encrypt(struct ablkcipher_request *req) |
895 | { | 875 | { |
896 | return atmel_aes_crypt(req, | 876 | return atmel_aes_crypt(req, AES_FLAGS_CFB128 | AES_FLAGS_ENCRYPT); |
897 | AES_FLAGS_ENCRYPT | AES_FLAGS_CFB | AES_FLAGS_CFB128); | ||
898 | } | 877 | } |
899 | 878 | ||
900 | static int atmel_aes_cfb_decrypt(struct ablkcipher_request *req) | 879 | static int atmel_aes_cfb_decrypt(struct ablkcipher_request *req) |
901 | { | 880 | { |
902 | return atmel_aes_crypt(req, | 881 | return atmel_aes_crypt(req, AES_FLAGS_CFB128); |
903 | AES_FLAGS_CFB | AES_FLAGS_CFB128); | ||
904 | } | 882 | } |
905 | 883 | ||
906 | static int atmel_aes_cfb64_encrypt(struct ablkcipher_request *req) | 884 | static int atmel_aes_cfb64_encrypt(struct ablkcipher_request *req) |
907 | { | 885 | { |
908 | return atmel_aes_crypt(req, | 886 | return atmel_aes_crypt(req, AES_FLAGS_CFB64 | AES_FLAGS_ENCRYPT); |
909 | AES_FLAGS_ENCRYPT | AES_FLAGS_CFB | AES_FLAGS_CFB64); | ||
910 | } | 887 | } |
911 | 888 | ||
912 | static int atmel_aes_cfb64_decrypt(struct ablkcipher_request *req) | 889 | static int atmel_aes_cfb64_decrypt(struct ablkcipher_request *req) |
913 | { | 890 | { |
914 | return atmel_aes_crypt(req, | 891 | return atmel_aes_crypt(req, AES_FLAGS_CFB64); |
915 | AES_FLAGS_CFB | AES_FLAGS_CFB64); | ||
916 | } | 892 | } |
917 | 893 | ||
918 | static int atmel_aes_cfb32_encrypt(struct ablkcipher_request *req) | 894 | static int atmel_aes_cfb32_encrypt(struct ablkcipher_request *req) |
919 | { | 895 | { |
920 | return atmel_aes_crypt(req, | 896 | return atmel_aes_crypt(req, AES_FLAGS_CFB32 | AES_FLAGS_ENCRYPT); |
921 | AES_FLAGS_ENCRYPT | AES_FLAGS_CFB | AES_FLAGS_CFB32); | ||
922 | } | 897 | } |
923 | 898 | ||
924 | static int atmel_aes_cfb32_decrypt(struct ablkcipher_request *req) | 899 | static int atmel_aes_cfb32_decrypt(struct ablkcipher_request *req) |
925 | { | 900 | { |
926 | return atmel_aes_crypt(req, | 901 | return atmel_aes_crypt(req, AES_FLAGS_CFB32); |
927 | AES_FLAGS_CFB | AES_FLAGS_CFB32); | ||
928 | } | 902 | } |
929 | 903 | ||
930 | static int atmel_aes_cfb16_encrypt(struct ablkcipher_request *req) | 904 | static int atmel_aes_cfb16_encrypt(struct ablkcipher_request *req) |
931 | { | 905 | { |
932 | return atmel_aes_crypt(req, | 906 | return atmel_aes_crypt(req, AES_FLAGS_CFB16 | AES_FLAGS_ENCRYPT); |
933 | AES_FLAGS_ENCRYPT | AES_FLAGS_CFB | AES_FLAGS_CFB16); | ||
934 | } | 907 | } |
935 | 908 | ||
936 | static int atmel_aes_cfb16_decrypt(struct ablkcipher_request *req) | 909 | static int atmel_aes_cfb16_decrypt(struct ablkcipher_request *req) |
937 | { | 910 | { |
938 | return atmel_aes_crypt(req, | 911 | return atmel_aes_crypt(req, AES_FLAGS_CFB16); |
939 | AES_FLAGS_CFB | AES_FLAGS_CFB16); | ||
940 | } | 912 | } |
941 | 913 | ||
942 | static int atmel_aes_cfb8_encrypt(struct ablkcipher_request *req) | 914 | static int atmel_aes_cfb8_encrypt(struct ablkcipher_request *req) |
943 | { | 915 | { |
944 | return atmel_aes_crypt(req, | 916 | return atmel_aes_crypt(req, AES_FLAGS_CFB8 | AES_FLAGS_ENCRYPT); |
945 | AES_FLAGS_ENCRYPT | AES_FLAGS_CFB | AES_FLAGS_CFB8); | ||
946 | } | 917 | } |
947 | 918 | ||
948 | static int atmel_aes_cfb8_decrypt(struct ablkcipher_request *req) | 919 | static int atmel_aes_cfb8_decrypt(struct ablkcipher_request *req) |
949 | { | 920 | { |
950 | return atmel_aes_crypt(req, | 921 | return atmel_aes_crypt(req, AES_FLAGS_CFB8); |
951 | AES_FLAGS_CFB | AES_FLAGS_CFB8); | ||
952 | } | 922 | } |
953 | 923 | ||
954 | static int atmel_aes_ctr_encrypt(struct ablkcipher_request *req) | 924 | static int atmel_aes_ctr_encrypt(struct ablkcipher_request *req) |