aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/crypto
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/crypto')
-rw-r--r--drivers/crypto/Kconfig13
-rw-r--r--drivers/crypto/amcc/crypto4xx_alg.c15
-rw-r--r--drivers/crypto/caam/Kconfig8
-rw-r--r--drivers/crypto/caam/Makefile3
-rw-r--r--drivers/crypto/caam/caamalg.c80
-rw-r--r--drivers/crypto/caam/caamhash.c70
-rw-r--r--drivers/crypto/caam/ctrl.c77
-rw-r--r--drivers/crypto/caam/desc_constr.h1
-rw-r--r--drivers/crypto/caam/intern.h5
-rw-r--r--drivers/crypto/caam/jr.c67
-rw-r--r--drivers/crypto/caam/jr.h2
-rw-r--r--drivers/crypto/caam/key_gen.c6
-rw-r--r--drivers/crypto/caam/regs.h12
-rw-r--r--drivers/crypto/nx/nx-aes-cbc.c57
-rw-r--r--drivers/crypto/nx/nx-aes-ccm.c283
-rw-r--r--drivers/crypto/nx/nx-aes-ctr.c52
-rw-r--r--drivers/crypto/nx/nx-aes-ecb.c50
-rw-r--r--drivers/crypto/nx/nx-aes-gcm.c296
-rw-r--r--drivers/crypto/nx/nx-aes-xcbc.c205
-rw-r--r--drivers/crypto/nx/nx-sha256.c124
-rw-r--r--drivers/crypto/nx/nx-sha512.c131
-rw-r--r--drivers/crypto/nx/nx.c35
-rw-r--r--drivers/crypto/nx/nx.h3
-rw-r--r--drivers/crypto/omap-aes.c468
-rw-r--r--drivers/crypto/omap-sham.c382
-rw-r--r--drivers/crypto/sahara.c4
-rw-r--r--drivers/crypto/tegra-aes.c6
-rw-r--r--drivers/crypto/ux500/hash/hash_core.c586
28 files changed, 1890 insertions, 1151 deletions
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 8ff7c230d82e..f4fd837bcb82 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -242,17 +242,20 @@ config CRYPTO_DEV_PPC4XX
242 This option allows you to have support for AMCC crypto acceleration. 242 This option allows you to have support for AMCC crypto acceleration.
243 243
244config CRYPTO_DEV_OMAP_SHAM 244config CRYPTO_DEV_OMAP_SHAM
245 tristate "Support for OMAP SHA1/MD5 hw accelerator" 245 tristate "Support for OMAP MD5/SHA1/SHA2 hw accelerator"
246 depends on ARCH_OMAP2 || ARCH_OMAP3 246 depends on ARCH_OMAP2PLUS
247 select CRYPTO_SHA1 247 select CRYPTO_SHA1
248 select CRYPTO_MD5 248 select CRYPTO_MD5
249 select CRYPTO_SHA256
250 select CRYPTO_SHA512
251 select CRYPTO_HMAC
249 help 252 help
250 OMAP processors have SHA1/MD5 hw accelerator. Select this if you 253 OMAP processors have MD5/SHA1/SHA2 hw accelerator. Select this if you
251 want to use the OMAP module for SHA1/MD5 algorithms. 254 want to use the OMAP module for MD5/SHA1/SHA2 algorithms.
252 255
253config CRYPTO_DEV_OMAP_AES 256config CRYPTO_DEV_OMAP_AES
254 tristate "Support for OMAP AES hw engine" 257 tristate "Support for OMAP AES hw engine"
255 depends on ARCH_OMAP2 || ARCH_OMAP3 258 depends on ARCH_OMAP2 || ARCH_OMAP3 || ARCH_OMAP2PLUS
256 select CRYPTO_AES 259 select CRYPTO_AES
257 select CRYPTO_BLKCIPHER2 260 select CRYPTO_BLKCIPHER2
258 help 261 help
diff --git a/drivers/crypto/amcc/crypto4xx_alg.c b/drivers/crypto/amcc/crypto4xx_alg.c
index a33243c17b00..4afca3968773 100644
--- a/drivers/crypto/amcc/crypto4xx_alg.c
+++ b/drivers/crypto/amcc/crypto4xx_alg.c
@@ -32,10 +32,10 @@
32#include "crypto4xx_sa.h" 32#include "crypto4xx_sa.h"
33#include "crypto4xx_core.h" 33#include "crypto4xx_core.h"
34 34
35void set_dynamic_sa_command_0(struct dynamic_sa_ctl *sa, u32 save_h, 35static void set_dynamic_sa_command_0(struct dynamic_sa_ctl *sa, u32 save_h,
36 u32 save_iv, u32 ld_h, u32 ld_iv, u32 hdr_proc, 36 u32 save_iv, u32 ld_h, u32 ld_iv,
37 u32 h, u32 c, u32 pad_type, u32 op_grp, u32 op, 37 u32 hdr_proc, u32 h, u32 c, u32 pad_type,
38 u32 dir) 38 u32 op_grp, u32 op, u32 dir)
39{ 39{
40 sa->sa_command_0.w = 0; 40 sa->sa_command_0.w = 0;
41 sa->sa_command_0.bf.save_hash_state = save_h; 41 sa->sa_command_0.bf.save_hash_state = save_h;
@@ -52,9 +52,10 @@ void set_dynamic_sa_command_0(struct dynamic_sa_ctl *sa, u32 save_h,
52 sa->sa_command_0.bf.dir = dir; 52 sa->sa_command_0.bf.dir = dir;
53} 53}
54 54
55void set_dynamic_sa_command_1(struct dynamic_sa_ctl *sa, u32 cm, u32 hmac_mc, 55static void set_dynamic_sa_command_1(struct dynamic_sa_ctl *sa, u32 cm,
56 u32 cfb, u32 esn, u32 sn_mask, u32 mute, 56 u32 hmac_mc, u32 cfb, u32 esn,
57 u32 cp_pad, u32 cp_pay, u32 cp_hdr) 57 u32 sn_mask, u32 mute, u32 cp_pad,
58 u32 cp_pay, u32 cp_hdr)
58{ 59{
59 sa->sa_command_1.w = 0; 60 sa->sa_command_1.w = 0;
60 sa->sa_command_1.bf.crypto_mode31 = (cm & 4) >> 2; 61 sa->sa_command_1.bf.crypto_mode31 = (cm & 4) >> 2;
diff --git a/drivers/crypto/caam/Kconfig b/drivers/crypto/caam/Kconfig
index b44091c47f75..ca89f6b84b06 100644
--- a/drivers/crypto/caam/Kconfig
+++ b/drivers/crypto/caam/Kconfig
@@ -98,3 +98,11 @@ config CRYPTO_DEV_FSL_CAAM_RNG_API
98 98
99 To compile this as a module, choose M here: the module 99 To compile this as a module, choose M here: the module
100 will be called caamrng. 100 will be called caamrng.
101
102config CRYPTO_DEV_FSL_CAAM_DEBUG
103 bool "Enable debug output in CAAM driver"
104 depends on CRYPTO_DEV_FSL_CAAM
105 default n
106 help
107 Selecting this will enable printing of various debug
108 information in the CAAM driver.
diff --git a/drivers/crypto/caam/Makefile b/drivers/crypto/caam/Makefile
index b1eb44838db5..d56bd0ec65d8 100644
--- a/drivers/crypto/caam/Makefile
+++ b/drivers/crypto/caam/Makefile
@@ -1,6 +1,9 @@
1# 1#
2# Makefile for the CAAM backend and dependent components 2# Makefile for the CAAM backend and dependent components
3# 3#
4ifeq ($(CONFIG_CRYPTO_DEV_FSL_CAAM_DEBUG), y)
5 EXTRA_CFLAGS := -DDEBUG
6endif
4 7
5obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM) += caam.o 8obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM) += caam.o
6obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API) += caamalg.o 9obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API) += caamalg.o
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index bf416a8391a7..7c63b72ecd75 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -65,8 +65,6 @@
65#define CAAM_MAX_IV_LENGTH 16 65#define CAAM_MAX_IV_LENGTH 16
66 66
67/* length of descriptors text */ 67/* length of descriptors text */
68#define DESC_JOB_IO_LEN (CAAM_CMD_SZ * 5 + CAAM_PTR_SZ * 3)
69
70#define DESC_AEAD_BASE (4 * CAAM_CMD_SZ) 68#define DESC_AEAD_BASE (4 * CAAM_CMD_SZ)
71#define DESC_AEAD_ENC_LEN (DESC_AEAD_BASE + 16 * CAAM_CMD_SZ) 69#define DESC_AEAD_ENC_LEN (DESC_AEAD_BASE + 16 * CAAM_CMD_SZ)
72#define DESC_AEAD_DEC_LEN (DESC_AEAD_BASE + 21 * CAAM_CMD_SZ) 70#define DESC_AEAD_DEC_LEN (DESC_AEAD_BASE + 21 * CAAM_CMD_SZ)
@@ -84,8 +82,6 @@
84 82
85#ifdef DEBUG 83#ifdef DEBUG
86/* for print_hex_dumps with line references */ 84/* for print_hex_dumps with line references */
87#define xstr(s) str(s)
88#define str(s) #s
89#define debug(format, arg...) printk(format, arg) 85#define debug(format, arg...) printk(format, arg)
90#else 86#else
91#define debug(format, arg...) 87#define debug(format, arg...)
@@ -285,7 +281,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
285 return -ENOMEM; 281 return -ENOMEM;
286 } 282 }
287#ifdef DEBUG 283#ifdef DEBUG
288 print_hex_dump(KERN_ERR, "aead enc shdesc@"xstr(__LINE__)": ", 284 print_hex_dump(KERN_ERR, "aead enc shdesc@"__stringify(__LINE__)": ",
289 DUMP_PREFIX_ADDRESS, 16, 4, desc, 285 DUMP_PREFIX_ADDRESS, 16, 4, desc,
290 desc_bytes(desc), 1); 286 desc_bytes(desc), 1);
291#endif 287#endif
@@ -353,7 +349,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
353 return -ENOMEM; 349 return -ENOMEM;
354 } 350 }
355#ifdef DEBUG 351#ifdef DEBUG
356 print_hex_dump(KERN_ERR, "aead dec shdesc@"xstr(__LINE__)": ", 352 print_hex_dump(KERN_ERR, "aead dec shdesc@"__stringify(__LINE__)": ",
357 DUMP_PREFIX_ADDRESS, 16, 4, desc, 353 DUMP_PREFIX_ADDRESS, 16, 4, desc,
358 desc_bytes(desc), 1); 354 desc_bytes(desc), 1);
359#endif 355#endif
@@ -436,7 +432,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
436 return -ENOMEM; 432 return -ENOMEM;
437 } 433 }
438#ifdef DEBUG 434#ifdef DEBUG
439 print_hex_dump(KERN_ERR, "aead givenc shdesc@"xstr(__LINE__)": ", 435 print_hex_dump(KERN_ERR, "aead givenc shdesc@"__stringify(__LINE__)": ",
440 DUMP_PREFIX_ADDRESS, 16, 4, desc, 436 DUMP_PREFIX_ADDRESS, 16, 4, desc,
441 desc_bytes(desc), 1); 437 desc_bytes(desc), 1);
442#endif 438#endif
@@ -500,7 +496,7 @@ static int aead_setkey(struct crypto_aead *aead,
500 keylen, enckeylen, authkeylen); 496 keylen, enckeylen, authkeylen);
501 printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n", 497 printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n",
502 ctx->split_key_len, ctx->split_key_pad_len); 498 ctx->split_key_len, ctx->split_key_pad_len);
503 print_hex_dump(KERN_ERR, "key in @"xstr(__LINE__)": ", 499 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
504 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); 500 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
505#endif 501#endif
506 502
@@ -519,7 +515,7 @@ static int aead_setkey(struct crypto_aead *aead,
519 return -ENOMEM; 515 return -ENOMEM;
520 } 516 }
521#ifdef DEBUG 517#ifdef DEBUG
522 print_hex_dump(KERN_ERR, "ctx.key@"xstr(__LINE__)": ", 518 print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
523 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key, 519 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
524 ctx->split_key_pad_len + enckeylen, 1); 520 ctx->split_key_pad_len + enckeylen, 1);
525#endif 521#endif
@@ -549,7 +545,7 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
549 u32 *desc; 545 u32 *desc;
550 546
551#ifdef DEBUG 547#ifdef DEBUG
552 print_hex_dump(KERN_ERR, "key in @"xstr(__LINE__)": ", 548 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
553 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); 549 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
554#endif 550#endif
555 551
@@ -598,7 +594,8 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
598 return -ENOMEM; 594 return -ENOMEM;
599 } 595 }
600#ifdef DEBUG 596#ifdef DEBUG
601 print_hex_dump(KERN_ERR, "ablkcipher enc shdesc@"xstr(__LINE__)": ", 597 print_hex_dump(KERN_ERR,
598 "ablkcipher enc shdesc@"__stringify(__LINE__)": ",
602 DUMP_PREFIX_ADDRESS, 16, 4, desc, 599 DUMP_PREFIX_ADDRESS, 16, 4, desc,
603 desc_bytes(desc), 1); 600 desc_bytes(desc), 1);
604#endif 601#endif
@@ -643,7 +640,8 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
643 } 640 }
644 641
645#ifdef DEBUG 642#ifdef DEBUG
646 print_hex_dump(KERN_ERR, "ablkcipher dec shdesc@"xstr(__LINE__)": ", 643 print_hex_dump(KERN_ERR,
644 "ablkcipher dec shdesc@"__stringify(__LINE__)": ",
647 DUMP_PREFIX_ADDRESS, 16, 4, desc, 645 DUMP_PREFIX_ADDRESS, 16, 4, desc,
648 desc_bytes(desc), 1); 646 desc_bytes(desc), 1);
649#endif 647#endif
@@ -780,13 +778,13 @@ static void aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
780 aead_unmap(jrdev, edesc, req); 778 aead_unmap(jrdev, edesc, req);
781 779
782#ifdef DEBUG 780#ifdef DEBUG
783 print_hex_dump(KERN_ERR, "assoc @"xstr(__LINE__)": ", 781 print_hex_dump(KERN_ERR, "assoc @"__stringify(__LINE__)": ",
784 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc), 782 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
785 req->assoclen , 1); 783 req->assoclen , 1);
786 print_hex_dump(KERN_ERR, "dstiv @"xstr(__LINE__)": ", 784 print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
787 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src) - ivsize, 785 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src) - ivsize,
788 edesc->src_nents ? 100 : ivsize, 1); 786 edesc->src_nents ? 100 : ivsize, 1);
789 print_hex_dump(KERN_ERR, "dst @"xstr(__LINE__)": ", 787 print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ",
790 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src), 788 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
791 edesc->src_nents ? 100 : req->cryptlen + 789 edesc->src_nents ? 100 : req->cryptlen +
792 ctx->authsize + 4, 1); 790 ctx->authsize + 4, 1);
@@ -814,10 +812,10 @@ static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
814 offsetof(struct aead_edesc, hw_desc)); 812 offsetof(struct aead_edesc, hw_desc));
815 813
816#ifdef DEBUG 814#ifdef DEBUG
817 print_hex_dump(KERN_ERR, "dstiv @"xstr(__LINE__)": ", 815 print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
818 DUMP_PREFIX_ADDRESS, 16, 4, req->iv, 816 DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
819 ivsize, 1); 817 ivsize, 1);
820 print_hex_dump(KERN_ERR, "dst @"xstr(__LINE__)": ", 818 print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ",
821 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->dst), 819 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->dst),
822 req->cryptlen, 1); 820 req->cryptlen, 1);
823#endif 821#endif
@@ -837,7 +835,7 @@ static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
837 err = -EBADMSG; 835 err = -EBADMSG;
838 836
839#ifdef DEBUG 837#ifdef DEBUG
840 print_hex_dump(KERN_ERR, "iphdrout@"xstr(__LINE__)": ", 838 print_hex_dump(KERN_ERR, "iphdrout@"__stringify(__LINE__)": ",
841 DUMP_PREFIX_ADDRESS, 16, 4, 839 DUMP_PREFIX_ADDRESS, 16, 4,
842 ((char *)sg_virt(req->assoc) - sizeof(struct iphdr)), 840 ((char *)sg_virt(req->assoc) - sizeof(struct iphdr)),
843 sizeof(struct iphdr) + req->assoclen + 841 sizeof(struct iphdr) + req->assoclen +
@@ -845,7 +843,7 @@ static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
845 ctx->authsize + 36, 1); 843 ctx->authsize + 36, 1);
846 if (!err && edesc->sec4_sg_bytes) { 844 if (!err && edesc->sec4_sg_bytes) {
847 struct scatterlist *sg = sg_last(req->src, edesc->src_nents); 845 struct scatterlist *sg = sg_last(req->src, edesc->src_nents);
848 print_hex_dump(KERN_ERR, "sglastout@"xstr(__LINE__)": ", 846 print_hex_dump(KERN_ERR, "sglastout@"__stringify(__LINE__)": ",
849 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(sg), 847 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(sg),
850 sg->length + ctx->authsize + 16, 1); 848 sg->length + ctx->authsize + 16, 1);
851 } 849 }
@@ -878,10 +876,10 @@ static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
878 } 876 }
879 877
880#ifdef DEBUG 878#ifdef DEBUG
881 print_hex_dump(KERN_ERR, "dstiv @"xstr(__LINE__)": ", 879 print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
882 DUMP_PREFIX_ADDRESS, 16, 4, req->info, 880 DUMP_PREFIX_ADDRESS, 16, 4, req->info,
883 edesc->src_nents > 1 ? 100 : ivsize, 1); 881 edesc->src_nents > 1 ? 100 : ivsize, 1);
884 print_hex_dump(KERN_ERR, "dst @"xstr(__LINE__)": ", 882 print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ",
885 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src), 883 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
886 edesc->dst_nents > 1 ? 100 : req->nbytes, 1); 884 edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
887#endif 885#endif
@@ -913,10 +911,10 @@ static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
913 } 911 }
914 912
915#ifdef DEBUG 913#ifdef DEBUG
916 print_hex_dump(KERN_ERR, "dstiv @"xstr(__LINE__)": ", 914 print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
917 DUMP_PREFIX_ADDRESS, 16, 4, req->info, 915 DUMP_PREFIX_ADDRESS, 16, 4, req->info,
918 ivsize, 1); 916 ivsize, 1);
919 print_hex_dump(KERN_ERR, "dst @"xstr(__LINE__)": ", 917 print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ",
920 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src), 918 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
921 edesc->dst_nents > 1 ? 100 : req->nbytes, 1); 919 edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
922#endif 920#endif
@@ -947,16 +945,16 @@ static void init_aead_job(u32 *sh_desc, dma_addr_t ptr,
947#ifdef DEBUG 945#ifdef DEBUG
948 debug("assoclen %d cryptlen %d authsize %d\n", 946 debug("assoclen %d cryptlen %d authsize %d\n",
949 req->assoclen, req->cryptlen, authsize); 947 req->assoclen, req->cryptlen, authsize);
950 print_hex_dump(KERN_ERR, "assoc @"xstr(__LINE__)": ", 948 print_hex_dump(KERN_ERR, "assoc @"__stringify(__LINE__)": ",
951 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc), 949 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
952 req->assoclen , 1); 950 req->assoclen , 1);
953 print_hex_dump(KERN_ERR, "presciv@"xstr(__LINE__)": ", 951 print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
954 DUMP_PREFIX_ADDRESS, 16, 4, req->iv, 952 DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
955 edesc->src_nents ? 100 : ivsize, 1); 953 edesc->src_nents ? 100 : ivsize, 1);
956 print_hex_dump(KERN_ERR, "src @"xstr(__LINE__)": ", 954 print_hex_dump(KERN_ERR, "src @"__stringify(__LINE__)": ",
957 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src), 955 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
958 edesc->src_nents ? 100 : req->cryptlen, 1); 956 edesc->src_nents ? 100 : req->cryptlen, 1);
959 print_hex_dump(KERN_ERR, "shrdesc@"xstr(__LINE__)": ", 957 print_hex_dump(KERN_ERR, "shrdesc@"__stringify(__LINE__)": ",
960 DUMP_PREFIX_ADDRESS, 16, 4, sh_desc, 958 DUMP_PREFIX_ADDRESS, 16, 4, sh_desc,
961 desc_bytes(sh_desc), 1); 959 desc_bytes(sh_desc), 1);
962#endif 960#endif
@@ -1025,15 +1023,15 @@ static void init_aead_giv_job(u32 *sh_desc, dma_addr_t ptr,
1025#ifdef DEBUG 1023#ifdef DEBUG
1026 debug("assoclen %d cryptlen %d authsize %d\n", 1024 debug("assoclen %d cryptlen %d authsize %d\n",
1027 req->assoclen, req->cryptlen, authsize); 1025 req->assoclen, req->cryptlen, authsize);
1028 print_hex_dump(KERN_ERR, "assoc @"xstr(__LINE__)": ", 1026 print_hex_dump(KERN_ERR, "assoc @"__stringify(__LINE__)": ",
1029 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc), 1027 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
1030 req->assoclen , 1); 1028 req->assoclen , 1);
1031 print_hex_dump(KERN_ERR, "presciv@"xstr(__LINE__)": ", 1029 print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
1032 DUMP_PREFIX_ADDRESS, 16, 4, req->iv, ivsize, 1); 1030 DUMP_PREFIX_ADDRESS, 16, 4, req->iv, ivsize, 1);
1033 print_hex_dump(KERN_ERR, "src @"xstr(__LINE__)": ", 1031 print_hex_dump(KERN_ERR, "src @"__stringify(__LINE__)": ",
1034 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src), 1032 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
1035 edesc->src_nents > 1 ? 100 : req->cryptlen, 1); 1033 edesc->src_nents > 1 ? 100 : req->cryptlen, 1);
1036 print_hex_dump(KERN_ERR, "shrdesc@"xstr(__LINE__)": ", 1034 print_hex_dump(KERN_ERR, "shrdesc@"__stringify(__LINE__)": ",
1037 DUMP_PREFIX_ADDRESS, 16, 4, sh_desc, 1035 DUMP_PREFIX_ADDRESS, 16, 4, sh_desc,
1038 desc_bytes(sh_desc), 1); 1036 desc_bytes(sh_desc), 1);
1039#endif 1037#endif
@@ -1086,10 +1084,10 @@ static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
1086 int len, sec4_sg_index = 0; 1084 int len, sec4_sg_index = 0;
1087 1085
1088#ifdef DEBUG 1086#ifdef DEBUG
1089 print_hex_dump(KERN_ERR, "presciv@"xstr(__LINE__)": ", 1087 print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
1090 DUMP_PREFIX_ADDRESS, 16, 4, req->info, 1088 DUMP_PREFIX_ADDRESS, 16, 4, req->info,
1091 ivsize, 1); 1089 ivsize, 1);
1092 print_hex_dump(KERN_ERR, "src @"xstr(__LINE__)": ", 1090 print_hex_dump(KERN_ERR, "src @"__stringify(__LINE__)": ",
1093 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src), 1091 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
1094 edesc->src_nents ? 100 : req->nbytes, 1); 1092 edesc->src_nents ? 100 : req->nbytes, 1);
1095#endif 1093#endif
@@ -1247,7 +1245,7 @@ static int aead_encrypt(struct aead_request *req)
1247 init_aead_job(ctx->sh_desc_enc, ctx->sh_desc_enc_dma, edesc, req, 1245 init_aead_job(ctx->sh_desc_enc, ctx->sh_desc_enc_dma, edesc, req,
1248 all_contig, true); 1246 all_contig, true);
1249#ifdef DEBUG 1247#ifdef DEBUG
1250 print_hex_dump(KERN_ERR, "aead jobdesc@"xstr(__LINE__)": ", 1248 print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
1251 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, 1249 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1252 desc_bytes(edesc->hw_desc), 1); 1250 desc_bytes(edesc->hw_desc), 1);
1253#endif 1251#endif
@@ -1281,7 +1279,7 @@ static int aead_decrypt(struct aead_request *req)
1281 return PTR_ERR(edesc); 1279 return PTR_ERR(edesc);
1282 1280
1283#ifdef DEBUG 1281#ifdef DEBUG
1284 print_hex_dump(KERN_ERR, "dec src@"xstr(__LINE__)": ", 1282 print_hex_dump(KERN_ERR, "dec src@"__stringify(__LINE__)": ",
1285 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src), 1283 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
1286 req->cryptlen, 1); 1284 req->cryptlen, 1);
1287#endif 1285#endif
@@ -1290,7 +1288,7 @@ static int aead_decrypt(struct aead_request *req)
1290 init_aead_job(ctx->sh_desc_dec, 1288 init_aead_job(ctx->sh_desc_dec,
1291 ctx->sh_desc_dec_dma, edesc, req, all_contig, false); 1289 ctx->sh_desc_dec_dma, edesc, req, all_contig, false);
1292#ifdef DEBUG 1290#ifdef DEBUG
1293 print_hex_dump(KERN_ERR, "aead jobdesc@"xstr(__LINE__)": ", 1291 print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
1294 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, 1292 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1295 desc_bytes(edesc->hw_desc), 1); 1293 desc_bytes(edesc->hw_desc), 1);
1296#endif 1294#endif
@@ -1437,7 +1435,7 @@ static int aead_givencrypt(struct aead_givcrypt_request *areq)
1437 return PTR_ERR(edesc); 1435 return PTR_ERR(edesc);
1438 1436
1439#ifdef DEBUG 1437#ifdef DEBUG
1440 print_hex_dump(KERN_ERR, "giv src@"xstr(__LINE__)": ", 1438 print_hex_dump(KERN_ERR, "giv src@"__stringify(__LINE__)": ",
1441 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src), 1439 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
1442 req->cryptlen, 1); 1440 req->cryptlen, 1);
1443#endif 1441#endif
@@ -1446,7 +1444,7 @@ static int aead_givencrypt(struct aead_givcrypt_request *areq)
1446 init_aead_giv_job(ctx->sh_desc_givenc, 1444 init_aead_giv_job(ctx->sh_desc_givenc,
1447 ctx->sh_desc_givenc_dma, edesc, req, contig); 1445 ctx->sh_desc_givenc_dma, edesc, req, contig);
1448#ifdef DEBUG 1446#ifdef DEBUG
1449 print_hex_dump(KERN_ERR, "aead jobdesc@"xstr(__LINE__)": ", 1447 print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
1450 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, 1448 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1451 desc_bytes(edesc->hw_desc), 1); 1449 desc_bytes(edesc->hw_desc), 1);
1452#endif 1450#endif
@@ -1546,7 +1544,7 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
1546 edesc->iv_dma = iv_dma; 1544 edesc->iv_dma = iv_dma;
1547 1545
1548#ifdef DEBUG 1546#ifdef DEBUG
1549 print_hex_dump(KERN_ERR, "ablkcipher sec4_sg@"xstr(__LINE__)": ", 1547 print_hex_dump(KERN_ERR, "ablkcipher sec4_sg@"__stringify(__LINE__)": ",
1550 DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg, 1548 DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
1551 sec4_sg_bytes, 1); 1549 sec4_sg_bytes, 1);
1552#endif 1550#endif
@@ -1575,7 +1573,7 @@ static int ablkcipher_encrypt(struct ablkcipher_request *req)
1575 init_ablkcipher_job(ctx->sh_desc_enc, 1573 init_ablkcipher_job(ctx->sh_desc_enc,
1576 ctx->sh_desc_enc_dma, edesc, req, iv_contig); 1574 ctx->sh_desc_enc_dma, edesc, req, iv_contig);
1577#ifdef DEBUG 1575#ifdef DEBUG
1578 print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"xstr(__LINE__)": ", 1576 print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ",
1579 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, 1577 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1580 desc_bytes(edesc->hw_desc), 1); 1578 desc_bytes(edesc->hw_desc), 1);
1581#endif 1579#endif
@@ -1613,7 +1611,7 @@ static int ablkcipher_decrypt(struct ablkcipher_request *req)
1613 ctx->sh_desc_dec_dma, edesc, req, iv_contig); 1611 ctx->sh_desc_dec_dma, edesc, req, iv_contig);
1614 desc = edesc->hw_desc; 1612 desc = edesc->hw_desc;
1615#ifdef DEBUG 1613#ifdef DEBUG
1616 print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"xstr(__LINE__)": ", 1614 print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ",
1617 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, 1615 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1618 desc_bytes(edesc->hw_desc), 1); 1616 desc_bytes(edesc->hw_desc), 1);
1619#endif 1617#endif
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
index 84573b4d6f92..e732bd962e98 100644
--- a/drivers/crypto/caam/caamhash.c
+++ b/drivers/crypto/caam/caamhash.c
@@ -72,8 +72,6 @@
72#define CAAM_MAX_HASH_DIGEST_SIZE SHA512_DIGEST_SIZE 72#define CAAM_MAX_HASH_DIGEST_SIZE SHA512_DIGEST_SIZE
73 73
74/* length of descriptors text */ 74/* length of descriptors text */
75#define DESC_JOB_IO_LEN (CAAM_CMD_SZ * 5 + CAAM_PTR_SZ * 3)
76
77#define DESC_AHASH_BASE (4 * CAAM_CMD_SZ) 75#define DESC_AHASH_BASE (4 * CAAM_CMD_SZ)
78#define DESC_AHASH_UPDATE_LEN (6 * CAAM_CMD_SZ) 76#define DESC_AHASH_UPDATE_LEN (6 * CAAM_CMD_SZ)
79#define DESC_AHASH_UPDATE_FIRST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ) 77#define DESC_AHASH_UPDATE_FIRST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
@@ -91,8 +89,6 @@
91 89
92#ifdef DEBUG 90#ifdef DEBUG
93/* for print_hex_dumps with line references */ 91/* for print_hex_dumps with line references */
94#define xstr(s) str(s)
95#define str(s) #s
96#define debug(format, arg...) printk(format, arg) 92#define debug(format, arg...) printk(format, arg)
97#else 93#else
98#define debug(format, arg...) 94#define debug(format, arg...)
@@ -331,7 +327,8 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
331 return -ENOMEM; 327 return -ENOMEM;
332 } 328 }
333#ifdef DEBUG 329#ifdef DEBUG
334 print_hex_dump(KERN_ERR, "ahash update shdesc@"xstr(__LINE__)": ", 330 print_hex_dump(KERN_ERR,
331 "ahash update shdesc@"__stringify(__LINE__)": ",
335 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); 332 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
336#endif 333#endif
337 334
@@ -349,7 +346,8 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
349 return -ENOMEM; 346 return -ENOMEM;
350 } 347 }
351#ifdef DEBUG 348#ifdef DEBUG
352 print_hex_dump(KERN_ERR, "ahash update first shdesc@"xstr(__LINE__)": ", 349 print_hex_dump(KERN_ERR,
350 "ahash update first shdesc@"__stringify(__LINE__)": ",
353 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); 351 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
354#endif 352#endif
355 353
@@ -366,7 +364,7 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
366 return -ENOMEM; 364 return -ENOMEM;
367 } 365 }
368#ifdef DEBUG 366#ifdef DEBUG
369 print_hex_dump(KERN_ERR, "ahash final shdesc@"xstr(__LINE__)": ", 367 print_hex_dump(KERN_ERR, "ahash final shdesc@"__stringify(__LINE__)": ",
370 DUMP_PREFIX_ADDRESS, 16, 4, desc, 368 DUMP_PREFIX_ADDRESS, 16, 4, desc,
371 desc_bytes(desc), 1); 369 desc_bytes(desc), 1);
372#endif 370#endif
@@ -384,7 +382,7 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
384 return -ENOMEM; 382 return -ENOMEM;
385 } 383 }
386#ifdef DEBUG 384#ifdef DEBUG
387 print_hex_dump(KERN_ERR, "ahash finup shdesc@"xstr(__LINE__)": ", 385 print_hex_dump(KERN_ERR, "ahash finup shdesc@"__stringify(__LINE__)": ",
388 DUMP_PREFIX_ADDRESS, 16, 4, desc, 386 DUMP_PREFIX_ADDRESS, 16, 4, desc,
389 desc_bytes(desc), 1); 387 desc_bytes(desc), 1);
390#endif 388#endif
@@ -403,7 +401,8 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
403 return -ENOMEM; 401 return -ENOMEM;
404 } 402 }
405#ifdef DEBUG 403#ifdef DEBUG
406 print_hex_dump(KERN_ERR, "ahash digest shdesc@"xstr(__LINE__)": ", 404 print_hex_dump(KERN_ERR,
405 "ahash digest shdesc@"__stringify(__LINE__)": ",
407 DUMP_PREFIX_ADDRESS, 16, 4, desc, 406 DUMP_PREFIX_ADDRESS, 16, 4, desc,
408 desc_bytes(desc), 1); 407 desc_bytes(desc), 1);
409#endif 408#endif
@@ -464,9 +463,9 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
464 LDST_SRCDST_BYTE_CONTEXT); 463 LDST_SRCDST_BYTE_CONTEXT);
465 464
466#ifdef DEBUG 465#ifdef DEBUG
467 print_hex_dump(KERN_ERR, "key_in@"xstr(__LINE__)": ", 466 print_hex_dump(KERN_ERR, "key_in@"__stringify(__LINE__)": ",
468 DUMP_PREFIX_ADDRESS, 16, 4, key_in, *keylen, 1); 467 DUMP_PREFIX_ADDRESS, 16, 4, key_in, *keylen, 1);
469 print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ", 468 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
470 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); 469 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
471#endif 470#endif
472 471
@@ -479,7 +478,8 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
479 wait_for_completion_interruptible(&result.completion); 478 wait_for_completion_interruptible(&result.completion);
480 ret = result.err; 479 ret = result.err;
481#ifdef DEBUG 480#ifdef DEBUG
482 print_hex_dump(KERN_ERR, "digested key@"xstr(__LINE__)": ", 481 print_hex_dump(KERN_ERR,
482 "digested key@"__stringify(__LINE__)": ",
483 DUMP_PREFIX_ADDRESS, 16, 4, key_in, 483 DUMP_PREFIX_ADDRESS, 16, 4, key_in,
484 digestsize, 1); 484 digestsize, 1);
485#endif 485#endif
@@ -530,7 +530,7 @@ static int ahash_setkey(struct crypto_ahash *ahash,
530#ifdef DEBUG 530#ifdef DEBUG
531 printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n", 531 printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n",
532 ctx->split_key_len, ctx->split_key_pad_len); 532 ctx->split_key_len, ctx->split_key_pad_len);
533 print_hex_dump(KERN_ERR, "key in @"xstr(__LINE__)": ", 533 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
534 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); 534 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
535#endif 535#endif
536 536
@@ -545,7 +545,7 @@ static int ahash_setkey(struct crypto_ahash *ahash,
545 return -ENOMEM; 545 return -ENOMEM;
546 } 546 }
547#ifdef DEBUG 547#ifdef DEBUG
548 print_hex_dump(KERN_ERR, "ctx.key@"xstr(__LINE__)": ", 548 print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
549 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key, 549 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
550 ctx->split_key_pad_len, 1); 550 ctx->split_key_pad_len, 1);
551#endif 551#endif
@@ -638,11 +638,11 @@ static void ahash_done(struct device *jrdev, u32 *desc, u32 err,
638 kfree(edesc); 638 kfree(edesc);
639 639
640#ifdef DEBUG 640#ifdef DEBUG
641 print_hex_dump(KERN_ERR, "ctx@"xstr(__LINE__)": ", 641 print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
642 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, 642 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
643 ctx->ctx_len, 1); 643 ctx->ctx_len, 1);
644 if (req->result) 644 if (req->result)
645 print_hex_dump(KERN_ERR, "result@"xstr(__LINE__)": ", 645 print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
646 DUMP_PREFIX_ADDRESS, 16, 4, req->result, 646 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
647 digestsize, 1); 647 digestsize, 1);
648#endif 648#endif
@@ -676,11 +676,11 @@ static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err,
676 kfree(edesc); 676 kfree(edesc);
677 677
678#ifdef DEBUG 678#ifdef DEBUG
679 print_hex_dump(KERN_ERR, "ctx@"xstr(__LINE__)": ", 679 print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
680 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, 680 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
681 ctx->ctx_len, 1); 681 ctx->ctx_len, 1);
682 if (req->result) 682 if (req->result)
683 print_hex_dump(KERN_ERR, "result@"xstr(__LINE__)": ", 683 print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
684 DUMP_PREFIX_ADDRESS, 16, 4, req->result, 684 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
685 digestsize, 1); 685 digestsize, 1);
686#endif 686#endif
@@ -714,11 +714,11 @@ static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err,
714 kfree(edesc); 714 kfree(edesc);
715 715
716#ifdef DEBUG 716#ifdef DEBUG
717 print_hex_dump(KERN_ERR, "ctx@"xstr(__LINE__)": ", 717 print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
718 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, 718 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
719 ctx->ctx_len, 1); 719 ctx->ctx_len, 1);
720 if (req->result) 720 if (req->result)
721 print_hex_dump(KERN_ERR, "result@"xstr(__LINE__)": ", 721 print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
722 DUMP_PREFIX_ADDRESS, 16, 4, req->result, 722 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
723 digestsize, 1); 723 digestsize, 1);
724#endif 724#endif
@@ -752,11 +752,11 @@ static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err,
752 kfree(edesc); 752 kfree(edesc);
753 753
754#ifdef DEBUG 754#ifdef DEBUG
755 print_hex_dump(KERN_ERR, "ctx@"xstr(__LINE__)": ", 755 print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
756 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, 756 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
757 ctx->ctx_len, 1); 757 ctx->ctx_len, 1);
758 if (req->result) 758 if (req->result)
759 print_hex_dump(KERN_ERR, "result@"xstr(__LINE__)": ", 759 print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
760 DUMP_PREFIX_ADDRESS, 16, 4, req->result, 760 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
761 digestsize, 1); 761 digestsize, 1);
762#endif 762#endif
@@ -852,7 +852,7 @@ static int ahash_update_ctx(struct ahash_request *req)
852 append_seq_out_ptr(desc, state->ctx_dma, ctx->ctx_len, 0); 852 append_seq_out_ptr(desc, state->ctx_dma, ctx->ctx_len, 0);
853 853
854#ifdef DEBUG 854#ifdef DEBUG
855 print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ", 855 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
856 DUMP_PREFIX_ADDRESS, 16, 4, desc, 856 DUMP_PREFIX_ADDRESS, 16, 4, desc,
857 desc_bytes(desc), 1); 857 desc_bytes(desc), 1);
858#endif 858#endif
@@ -871,9 +871,9 @@ static int ahash_update_ctx(struct ahash_request *req)
871 *next_buflen = last_buflen; 871 *next_buflen = last_buflen;
872 } 872 }
873#ifdef DEBUG 873#ifdef DEBUG
874 print_hex_dump(KERN_ERR, "buf@"xstr(__LINE__)": ", 874 print_hex_dump(KERN_ERR, "buf@"__stringify(__LINE__)": ",
875 DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1); 875 DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
876 print_hex_dump(KERN_ERR, "next buf@"xstr(__LINE__)": ", 876 print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
877 DUMP_PREFIX_ADDRESS, 16, 4, next_buf, 877 DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
878 *next_buflen, 1); 878 *next_buflen, 1);
879#endif 879#endif
@@ -937,7 +937,7 @@ static int ahash_final_ctx(struct ahash_request *req)
937 digestsize); 937 digestsize);
938 938
939#ifdef DEBUG 939#ifdef DEBUG
940 print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ", 940 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
941 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); 941 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
942#endif 942#endif
943 943
@@ -1016,7 +1016,7 @@ static int ahash_finup_ctx(struct ahash_request *req)
1016 digestsize); 1016 digestsize);
1017 1017
1018#ifdef DEBUG 1018#ifdef DEBUG
1019 print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ", 1019 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1020 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); 1020 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1021#endif 1021#endif
1022 1022
@@ -1086,7 +1086,7 @@ static int ahash_digest(struct ahash_request *req)
1086 digestsize); 1086 digestsize);
1087 1087
1088#ifdef DEBUG 1088#ifdef DEBUG
1089 print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ", 1089 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1090 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); 1090 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1091#endif 1091#endif
1092 1092
@@ -1140,7 +1140,7 @@ static int ahash_final_no_ctx(struct ahash_request *req)
1140 edesc->src_nents = 0; 1140 edesc->src_nents = 0;
1141 1141
1142#ifdef DEBUG 1142#ifdef DEBUG
1143 print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ", 1143 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1144 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); 1144 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1145#endif 1145#endif
1146 1146
@@ -1228,7 +1228,7 @@ static int ahash_update_no_ctx(struct ahash_request *req)
1228 map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len); 1228 map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
1229 1229
1230#ifdef DEBUG 1230#ifdef DEBUG
1231 print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ", 1231 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1232 DUMP_PREFIX_ADDRESS, 16, 4, desc, 1232 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1233 desc_bytes(desc), 1); 1233 desc_bytes(desc), 1);
1234#endif 1234#endif
@@ -1250,9 +1250,9 @@ static int ahash_update_no_ctx(struct ahash_request *req)
1250 *next_buflen = 0; 1250 *next_buflen = 0;
1251 } 1251 }
1252#ifdef DEBUG 1252#ifdef DEBUG
1253 print_hex_dump(KERN_ERR, "buf@"xstr(__LINE__)": ", 1253 print_hex_dump(KERN_ERR, "buf@"__stringify(__LINE__)": ",
1254 DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1); 1254 DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
1255 print_hex_dump(KERN_ERR, "next buf@"xstr(__LINE__)": ", 1255 print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
1256 DUMP_PREFIX_ADDRESS, 16, 4, next_buf, 1256 DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
1257 *next_buflen, 1); 1257 *next_buflen, 1);
1258#endif 1258#endif
@@ -1321,7 +1321,7 @@ static int ahash_finup_no_ctx(struct ahash_request *req)
1321 digestsize); 1321 digestsize);
1322 1322
1323#ifdef DEBUG 1323#ifdef DEBUG
1324 print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ", 1324 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1325 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); 1325 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1326#endif 1326#endif
1327 1327
@@ -1414,7 +1414,7 @@ static int ahash_update_first(struct ahash_request *req)
1414 map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len); 1414 map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
1415 1415
1416#ifdef DEBUG 1416#ifdef DEBUG
1417 print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ", 1417 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1418 DUMP_PREFIX_ADDRESS, 16, 4, desc, 1418 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1419 desc_bytes(desc), 1); 1419 desc_bytes(desc), 1);
1420#endif 1420#endif
@@ -1438,7 +1438,7 @@ static int ahash_update_first(struct ahash_request *req)
1438 sg_copy(next_buf, req->src, req->nbytes); 1438 sg_copy(next_buf, req->src, req->nbytes);
1439 } 1439 }
1440#ifdef DEBUG 1440#ifdef DEBUG
1441 print_hex_dump(KERN_ERR, "next buf@"xstr(__LINE__)": ", 1441 print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
1442 DUMP_PREFIX_ADDRESS, 16, 4, next_buf, 1442 DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
1443 *next_buflen, 1); 1443 *next_buflen, 1);
1444#endif 1444#endif
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index f5d6deced1cb..b010d42a1803 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -75,55 +75,53 @@ static void build_instantiation_desc(u32 *desc)
75 OP_ALG_RNG4_SK); 75 OP_ALG_RNG4_SK);
76} 76}
77 77
78struct instantiate_result { 78static int instantiate_rng(struct device *ctrldev)
79 struct completion completion;
80 int err;
81};
82
83static void rng4_init_done(struct device *dev, u32 *desc, u32 err,
84 void *context)
85{
86 struct instantiate_result *instantiation = context;
87
88 if (err) {
89 char tmp[CAAM_ERROR_STR_MAX];
90
91 dev_err(dev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
92 }
93
94 instantiation->err = err;
95 complete(&instantiation->completion);
96}
97
98static int instantiate_rng(struct device *jrdev)
99{ 79{
100 struct instantiate_result instantiation; 80 struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev);
101 81 struct caam_full __iomem *topregs;
102 dma_addr_t desc_dma; 82 unsigned int timeout = 100000;
103 u32 *desc; 83 u32 *desc;
104 int ret; 84 int i, ret = 0;
105 85
106 desc = kmalloc(CAAM_CMD_SZ * 6, GFP_KERNEL | GFP_DMA); 86 desc = kmalloc(CAAM_CMD_SZ * 6, GFP_KERNEL | GFP_DMA);
107 if (!desc) { 87 if (!desc) {
108 dev_err(jrdev, "cannot allocate RNG init descriptor memory\n"); 88 dev_err(ctrldev, "can't allocate RNG init descriptor memory\n");
109 return -ENOMEM; 89 return -ENOMEM;
110 } 90 }
111
112 build_instantiation_desc(desc); 91 build_instantiation_desc(desc);
113 desc_dma = dma_map_single(jrdev, desc, desc_bytes(desc), DMA_TO_DEVICE); 92
114 init_completion(&instantiation.completion); 93 /* Set the bit to request direct access to DECO0 */
115 ret = caam_jr_enqueue(jrdev, desc, rng4_init_done, &instantiation); 94 topregs = (struct caam_full __iomem *)ctrlpriv->ctrl;
116 if (!ret) { 95 setbits32(&topregs->ctrl.deco_rq, DECORR_RQD0ENABLE);
117 wait_for_completion_interruptible(&instantiation.completion); 96
118 ret = instantiation.err; 97 while (!(rd_reg32(&topregs->ctrl.deco_rq) & DECORR_DEN0) &&
119 if (ret) 98 --timeout)
120 dev_err(jrdev, "unable to instantiate RNG\n"); 99 cpu_relax();
100
101 if (!timeout) {
102 dev_err(ctrldev, "failed to acquire DECO 0\n");
103 ret = -EIO;
104 goto out;
121 } 105 }
122 106
123 dma_unmap_single(jrdev, desc_dma, desc_bytes(desc), DMA_TO_DEVICE); 107 for (i = 0; i < desc_len(desc); i++)
108 topregs->deco.descbuf[i] = *(desc + i);
124 109
125 kfree(desc); 110 wr_reg32(&topregs->deco.jr_ctl_hi, DECO_JQCR_WHL | DECO_JQCR_FOUR);
126 111
112 timeout = 10000000;
113 while ((rd_reg32(&topregs->deco.desc_dbg) & DECO_DBG_VALID) &&
114 --timeout)
115 cpu_relax();
116
117 if (!timeout) {
118 dev_err(ctrldev, "failed to instantiate RNG\n");
119 ret = -EIO;
120 }
121
122 clrbits32(&topregs->ctrl.deco_rq, DECORR_RQD0ENABLE);
123out:
124 kfree(desc);
127 return ret; 125 return ret;
128} 126}
129 127
@@ -303,7 +301,7 @@ static int caam_probe(struct platform_device *pdev)
303 if ((cha_vid & CHA_ID_RNG_MASK) >> CHA_ID_RNG_SHIFT >= 4 && 301 if ((cha_vid & CHA_ID_RNG_MASK) >> CHA_ID_RNG_SHIFT >= 4 &&
304 !(rd_reg32(&topregs->ctrl.r4tst[0].rdsta) & RDSTA_IF0)) { 302 !(rd_reg32(&topregs->ctrl.r4tst[0].rdsta) & RDSTA_IF0)) {
305 kick_trng(pdev); 303 kick_trng(pdev);
306 ret = instantiate_rng(ctrlpriv->jrdev[0]); 304 ret = instantiate_rng(dev);
307 if (ret) { 305 if (ret) {
308 caam_remove(pdev); 306 caam_remove(pdev);
309 return ret; 307 return ret;
@@ -315,9 +313,6 @@ static int caam_probe(struct platform_device *pdev)
315 313
316 /* NOTE: RTIC detection ought to go here, around Si time */ 314 /* NOTE: RTIC detection ought to go here, around Si time */
317 315
318 /* Initialize queue allocator lock */
319 spin_lock_init(&ctrlpriv->jr_alloc_lock);
320
321 caam_id = rd_reg64(&topregs->ctrl.perfmon.caam_id); 316 caam_id = rd_reg64(&topregs->ctrl.perfmon.caam_id);
322 317
323 /* Report "alive" for developer to see */ 318 /* Report "alive" for developer to see */
diff --git a/drivers/crypto/caam/desc_constr.h b/drivers/crypto/caam/desc_constr.h
index fe3bfd1b08ca..cd5f678847ce 100644
--- a/drivers/crypto/caam/desc_constr.h
+++ b/drivers/crypto/caam/desc_constr.h
@@ -10,6 +10,7 @@
10#define CAAM_CMD_SZ sizeof(u32) 10#define CAAM_CMD_SZ sizeof(u32)
11#define CAAM_PTR_SZ sizeof(dma_addr_t) 11#define CAAM_PTR_SZ sizeof(dma_addr_t)
12#define CAAM_DESC_BYTES_MAX (CAAM_CMD_SZ * MAX_CAAM_DESCSIZE) 12#define CAAM_DESC_BYTES_MAX (CAAM_CMD_SZ * MAX_CAAM_DESCSIZE)
13#define DESC_JOB_IO_LEN (CAAM_CMD_SZ * 5 + CAAM_PTR_SZ * 3)
13 14
14#ifdef DEBUG 15#ifdef DEBUG
15#define PRINT_POS do { printk(KERN_DEBUG "%02d: %s\n", desc_len(desc),\ 16#define PRINT_POS do { printk(KERN_DEBUG "%02d: %s\n", desc_len(desc),\
diff --git a/drivers/crypto/caam/intern.h b/drivers/crypto/caam/intern.h
index e4a16b741371..34c4b9f7fbfa 100644
--- a/drivers/crypto/caam/intern.h
+++ b/drivers/crypto/caam/intern.h
@@ -9,9 +9,6 @@
9#ifndef INTERN_H 9#ifndef INTERN_H
10#define INTERN_H 10#define INTERN_H
11 11
12#define JOBR_UNASSIGNED 0
13#define JOBR_ASSIGNED 1
14
15/* Currently comes from Kconfig param as a ^2 (driver-required) */ 12/* Currently comes from Kconfig param as a ^2 (driver-required) */
16#define JOBR_DEPTH (1 << CONFIG_CRYPTO_DEV_FSL_CAAM_RINGSIZE) 13#define JOBR_DEPTH (1 << CONFIG_CRYPTO_DEV_FSL_CAAM_RINGSIZE)
17 14
@@ -46,7 +43,6 @@ struct caam_drv_private_jr {
46 struct caam_job_ring __iomem *rregs; /* JobR's register space */ 43 struct caam_job_ring __iomem *rregs; /* JobR's register space */
47 struct tasklet_struct irqtask; 44 struct tasklet_struct irqtask;
48 int irq; /* One per queue */ 45 int irq; /* One per queue */
49 int assign; /* busy/free */
50 46
51 /* Job ring info */ 47 /* Job ring info */
52 int ringsize; /* Size of rings (assume input = output) */ 48 int ringsize; /* Size of rings (assume input = output) */
@@ -68,7 +64,6 @@ struct caam_drv_private {
68 64
69 struct device *dev; 65 struct device *dev;
70 struct device **jrdev; /* Alloc'ed array per sub-device */ 66 struct device **jrdev; /* Alloc'ed array per sub-device */
71 spinlock_t jr_alloc_lock;
72 struct platform_device *pdev; 67 struct platform_device *pdev;
73 68
74 /* Physical-presence section */ 69 /* Physical-presence section */
diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c
index b4aa773ecbc8..105ba4da6180 100644
--- a/drivers/crypto/caam/jr.c
+++ b/drivers/crypto/caam/jr.c
@@ -126,72 +126,6 @@ static void caam_jr_dequeue(unsigned long devarg)
126} 126}
127 127
128/** 128/**
129 * caam_jr_register() - Alloc a ring for someone to use as needed. Returns
130 * an ordinal of the rings allocated, else returns -ENODEV if no rings
131 * are available.
132 * @ctrldev: points to the controller level dev (parent) that
133 * owns rings available for use.
134 * @dev: points to where a pointer to the newly allocated queue's
135 * dev can be written to if successful.
136 **/
137int caam_jr_register(struct device *ctrldev, struct device **rdev)
138{
139 struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev);
140 struct caam_drv_private_jr *jrpriv = NULL;
141 int ring;
142
143 /* Lock, if free ring - assign, unlock */
144 spin_lock(&ctrlpriv->jr_alloc_lock);
145 for (ring = 0; ring < ctrlpriv->total_jobrs; ring++) {
146 jrpriv = dev_get_drvdata(ctrlpriv->jrdev[ring]);
147 if (jrpriv->assign == JOBR_UNASSIGNED) {
148 jrpriv->assign = JOBR_ASSIGNED;
149 *rdev = ctrlpriv->jrdev[ring];
150 spin_unlock(&ctrlpriv->jr_alloc_lock);
151 return ring;
152 }
153 }
154
155 /* If assigned, write dev where caller needs it */
156 spin_unlock(&ctrlpriv->jr_alloc_lock);
157 *rdev = NULL;
158
159 return -ENODEV;
160}
161EXPORT_SYMBOL(caam_jr_register);
162
163/**
164 * caam_jr_deregister() - Deregister an API and release the queue.
165 * Returns 0 if OK, -EBUSY if queue still contains pending entries
166 * or unprocessed results at the time of the call
167 * @dev - points to the dev that identifies the queue to
168 * be released.
169 **/
170int caam_jr_deregister(struct device *rdev)
171{
172 struct caam_drv_private_jr *jrpriv = dev_get_drvdata(rdev);
173 struct caam_drv_private *ctrlpriv;
174
175 /* Get the owning controller's private space */
176 ctrlpriv = dev_get_drvdata(jrpriv->parentdev);
177
178 /*
179 * Make sure ring empty before release
180 */
181 if (rd_reg32(&jrpriv->rregs->outring_used) ||
182 (rd_reg32(&jrpriv->rregs->inpring_avail) != JOBR_DEPTH))
183 return -EBUSY;
184
185 /* Release ring */
186 spin_lock(&ctrlpriv->jr_alloc_lock);
187 jrpriv->assign = JOBR_UNASSIGNED;
188 spin_unlock(&ctrlpriv->jr_alloc_lock);
189
190 return 0;
191}
192EXPORT_SYMBOL(caam_jr_deregister);
193
194/**
195 * caam_jr_enqueue() - Enqueue a job descriptor head. Returns 0 if OK, 129 * caam_jr_enqueue() - Enqueue a job descriptor head. Returns 0 if OK,
196 * -EBUSY if the queue is full, -EIO if it cannot map the caller's 130 * -EBUSY if the queue is full, -EIO if it cannot map the caller's
197 * descriptor. 131 * descriptor.
@@ -379,7 +313,6 @@ static int caam_jr_init(struct device *dev)
379 (JOBR_INTC_COUNT_THLD << JRCFG_ICDCT_SHIFT) | 313 (JOBR_INTC_COUNT_THLD << JRCFG_ICDCT_SHIFT) |
380 (JOBR_INTC_TIME_THLD << JRCFG_ICTT_SHIFT)); 314 (JOBR_INTC_TIME_THLD << JRCFG_ICTT_SHIFT));
381 315
382 jrp->assign = JOBR_UNASSIGNED;
383 return 0; 316 return 0;
384} 317}
385 318
diff --git a/drivers/crypto/caam/jr.h b/drivers/crypto/caam/jr.h
index c23df395b622..9d8741a59037 100644
--- a/drivers/crypto/caam/jr.h
+++ b/drivers/crypto/caam/jr.h
@@ -8,8 +8,6 @@
8#define JR_H 8#define JR_H
9 9
10/* Prototypes for backend-level services exposed to APIs */ 10/* Prototypes for backend-level services exposed to APIs */
11int caam_jr_register(struct device *ctrldev, struct device **rdev);
12int caam_jr_deregister(struct device *rdev);
13int caam_jr_enqueue(struct device *dev, u32 *desc, 11int caam_jr_enqueue(struct device *dev, u32 *desc,
14 void (*cbk)(struct device *dev, u32 *desc, u32 status, 12 void (*cbk)(struct device *dev, u32 *desc, u32 status,
15 void *areq), 13 void *areq),
diff --git a/drivers/crypto/caam/key_gen.c b/drivers/crypto/caam/key_gen.c
index 87138d2adb5f..ea2e406610eb 100644
--- a/drivers/crypto/caam/key_gen.c
+++ b/drivers/crypto/caam/key_gen.c
@@ -95,9 +95,9 @@ int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
95 LDST_CLASS_2_CCB | FIFOST_TYPE_SPLIT_KEK); 95 LDST_CLASS_2_CCB | FIFOST_TYPE_SPLIT_KEK);
96 96
97#ifdef DEBUG 97#ifdef DEBUG
98 print_hex_dump(KERN_ERR, "ctx.key@"xstr(__LINE__)": ", 98 print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
99 DUMP_PREFIX_ADDRESS, 16, 4, key_in, keylen, 1); 99 DUMP_PREFIX_ADDRESS, 16, 4, key_in, keylen, 1);
100 print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ", 100 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
101 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); 101 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
102#endif 102#endif
103 103
@@ -110,7 +110,7 @@ int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
110 wait_for_completion_interruptible(&result.completion); 110 wait_for_completion_interruptible(&result.completion);
111 ret = result.err; 111 ret = result.err;
112#ifdef DEBUG 112#ifdef DEBUG
113 print_hex_dump(KERN_ERR, "ctx.key@"xstr(__LINE__)": ", 113 print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
114 DUMP_PREFIX_ADDRESS, 16, 4, key_out, 114 DUMP_PREFIX_ADDRESS, 16, 4, key_out,
115 split_key_pad_len, 1); 115 split_key_pad_len, 1);
116#endif 116#endif
diff --git a/drivers/crypto/caam/regs.h b/drivers/crypto/caam/regs.h
index c09142fc13e3..4455396918de 100644
--- a/drivers/crypto/caam/regs.h
+++ b/drivers/crypto/caam/regs.h
@@ -341,6 +341,8 @@ struct caam_ctrl {
341#define MCFGR_DMA_RESET 0x10000000 341#define MCFGR_DMA_RESET 0x10000000
342#define MCFGR_LONG_PTR 0x00010000 /* Use >32-bit desc addressing */ 342#define MCFGR_LONG_PTR 0x00010000 /* Use >32-bit desc addressing */
343#define SCFGR_RDBENABLE 0x00000400 343#define SCFGR_RDBENABLE 0x00000400
344#define DECORR_RQD0ENABLE 0x00000001 /* Enable DECO0 for direct access */
345#define DECORR_DEN0 0x00010000 /* DECO0 available for access*/
344 346
345/* AXI read cache control */ 347/* AXI read cache control */
346#define MCFGR_ARCACHE_SHIFT 12 348#define MCFGR_ARCACHE_SHIFT 12
@@ -703,9 +705,16 @@ struct caam_deco {
703 struct deco_sg_table sctr_tbl[4]; /* DxSTR - Scatter Tables */ 705 struct deco_sg_table sctr_tbl[4]; /* DxSTR - Scatter Tables */
704 u32 rsvd29[48]; 706 u32 rsvd29[48];
705 u32 descbuf[64]; /* DxDESB - Descriptor buffer */ 707 u32 descbuf[64]; /* DxDESB - Descriptor buffer */
706 u32 rsvd30[320]; 708 u32 rscvd30[193];
709 u32 desc_dbg; /* DxDDR - DECO Debug Register */
710 u32 rsvd31[126];
707}; 711};
708 712
713/* DECO DBG Register Valid Bit*/
714#define DECO_DBG_VALID 0x80000000
715#define DECO_JQCR_WHL 0x20000000
716#define DECO_JQCR_FOUR 0x10000000
717
709/* 718/*
710 * Current top-level view of memory map is: 719 * Current top-level view of memory map is:
711 * 720 *
@@ -733,6 +742,7 @@ struct caam_full {
733 u64 rsvd[512]; 742 u64 rsvd[512];
734 struct caam_assurance assure; 743 struct caam_assurance assure;
735 struct caam_queue_if qi; 744 struct caam_queue_if qi;
745 struct caam_deco deco;
736}; 746};
737 747
738#endif /* REGS_H */ 748#endif /* REGS_H */
diff --git a/drivers/crypto/nx/nx-aes-cbc.c b/drivers/crypto/nx/nx-aes-cbc.c
index 35d483f8db66..cc00b52306ba 100644
--- a/drivers/crypto/nx/nx-aes-cbc.c
+++ b/drivers/crypto/nx/nx-aes-cbc.c
@@ -70,35 +70,52 @@ static int cbc_aes_nx_crypt(struct blkcipher_desc *desc,
70{ 70{
71 struct nx_crypto_ctx *nx_ctx = crypto_blkcipher_ctx(desc->tfm); 71 struct nx_crypto_ctx *nx_ctx = crypto_blkcipher_ctx(desc->tfm);
72 struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; 72 struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
73 unsigned long irq_flags;
74 unsigned int processed = 0, to_process;
75 u32 max_sg_len;
73 int rc; 76 int rc;
74 77
75 if (nbytes > nx_ctx->ap->databytelen) 78 spin_lock_irqsave(&nx_ctx->lock, irq_flags);
76 return -EINVAL; 79
80 max_sg_len = min_t(u32, nx_driver.of.max_sg_len/sizeof(struct nx_sg),
81 nx_ctx->ap->sglen);
77 82
78 if (enc) 83 if (enc)
79 NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT; 84 NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT;
80 else 85 else
81 NX_CPB_FDM(csbcpb) &= ~NX_FDM_ENDE_ENCRYPT; 86 NX_CPB_FDM(csbcpb) &= ~NX_FDM_ENDE_ENCRYPT;
82 87
83 rc = nx_build_sg_lists(nx_ctx, desc, dst, src, nbytes, 88 do {
84 csbcpb->cpb.aes_cbc.iv); 89 to_process = min_t(u64, nbytes - processed,
85 if (rc) 90 nx_ctx->ap->databytelen);
86 goto out; 91 to_process = min_t(u64, to_process,
87 92 NX_PAGE_SIZE * (max_sg_len - 1));
88 if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) { 93 to_process = to_process & ~(AES_BLOCK_SIZE - 1);
89 rc = -EINVAL; 94
90 goto out; 95 rc = nx_build_sg_lists(nx_ctx, desc, dst, src, to_process,
91 } 96 processed, csbcpb->cpb.aes_cbc.iv);
92 97 if (rc)
93 rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, 98 goto out;
94 desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP); 99
95 if (rc) 100 if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) {
96 goto out; 101 rc = -EINVAL;
97 102 goto out;
98 atomic_inc(&(nx_ctx->stats->aes_ops)); 103 }
99 atomic64_add(csbcpb->csb.processed_byte_count, 104
100 &(nx_ctx->stats->aes_bytes)); 105 rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
106 desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
107 if (rc)
108 goto out;
109
110 memcpy(desc->info, csbcpb->cpb.aes_cbc.cv, AES_BLOCK_SIZE);
111 atomic_inc(&(nx_ctx->stats->aes_ops));
112 atomic64_add(csbcpb->csb.processed_byte_count,
113 &(nx_ctx->stats->aes_bytes));
114
115 processed += to_process;
116 } while (processed < nbytes);
101out: 117out:
118 spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
102 return rc; 119 return rc;
103} 120}
104 121
diff --git a/drivers/crypto/nx/nx-aes-ccm.c b/drivers/crypto/nx/nx-aes-ccm.c
index ef5eae6d1400..5ecd4c2414aa 100644
--- a/drivers/crypto/nx/nx-aes-ccm.c
+++ b/drivers/crypto/nx/nx-aes-ccm.c
@@ -179,13 +179,26 @@ static int generate_pat(u8 *iv,
179 struct nx_sg *nx_insg = nx_ctx->in_sg; 179 struct nx_sg *nx_insg = nx_ctx->in_sg;
180 struct nx_sg *nx_outsg = nx_ctx->out_sg; 180 struct nx_sg *nx_outsg = nx_ctx->out_sg;
181 unsigned int iauth_len = 0; 181 unsigned int iauth_len = 0;
182 struct vio_pfo_op *op = NULL;
183 u8 tmp[16], *b1 = NULL, *b0 = NULL, *result = NULL; 182 u8 tmp[16], *b1 = NULL, *b0 = NULL, *result = NULL;
184 int rc; 183 int rc;
185 184
186 /* zero the ctr value */ 185 /* zero the ctr value */
187 memset(iv + 15 - iv[0], 0, iv[0] + 1); 186 memset(iv + 15 - iv[0], 0, iv[0] + 1);
188 187
188 /* page 78 of nx_wb.pdf has,
189 * Note: RFC3610 allows the AAD data to be up to 2^64 -1 bytes
190 * in length. If a full message is used, the AES CCA implementation
191 * restricts the maximum AAD length to 2^32 -1 bytes.
192 * If partial messages are used, the implementation supports
193 * 2^64 -1 bytes maximum AAD length.
194 *
195 * However, in the cryptoapi's aead_request structure,
196 * assoclen is an unsigned int, thus it cannot hold a length
197 * value greater than 2^32 - 1.
198 * Thus the AAD is further constrained by this and is never
199 * greater than 2^32.
200 */
201
189 if (!req->assoclen) { 202 if (!req->assoclen) {
190 b0 = nx_ctx->csbcpb->cpb.aes_ccm.in_pat_or_b0; 203 b0 = nx_ctx->csbcpb->cpb.aes_ccm.in_pat_or_b0;
191 } else if (req->assoclen <= 14) { 204 } else if (req->assoclen <= 14) {
@@ -195,7 +208,46 @@ static int generate_pat(u8 *iv,
195 b0 = nx_ctx->csbcpb->cpb.aes_ccm.in_pat_or_b0; 208 b0 = nx_ctx->csbcpb->cpb.aes_ccm.in_pat_or_b0;
196 b1 = nx_ctx->priv.ccm.iauth_tag; 209 b1 = nx_ctx->priv.ccm.iauth_tag;
197 iauth_len = req->assoclen; 210 iauth_len = req->assoclen;
211 } else if (req->assoclen <= 65280) {
212 /* if associated data is less than (2^16 - 2^8), we construct
213 * B1 differently and feed in the associated data to a CCA
214 * operation */
215 b0 = nx_ctx->csbcpb_aead->cpb.aes_cca.b0;
216 b1 = nx_ctx->csbcpb_aead->cpb.aes_cca.b1;
217 iauth_len = 14;
218 } else {
219 b0 = nx_ctx->csbcpb_aead->cpb.aes_cca.b0;
220 b1 = nx_ctx->csbcpb_aead->cpb.aes_cca.b1;
221 iauth_len = 10;
222 }
223
224 /* generate B0 */
225 rc = generate_b0(iv, req->assoclen, authsize, nbytes, b0);
226 if (rc)
227 return rc;
228
229 /* generate B1:
230 * add control info for associated data
231 * RFC 3610 and NIST Special Publication 800-38C
232 */
233 if (b1) {
234 memset(b1, 0, 16);
235 if (req->assoclen <= 65280) {
236 *(u16 *)b1 = (u16)req->assoclen;
237 scatterwalk_map_and_copy(b1 + 2, req->assoc, 0,
238 iauth_len, SCATTERWALK_FROM_SG);
239 } else {
240 *(u16 *)b1 = (u16)(0xfffe);
241 *(u32 *)&b1[2] = (u32)req->assoclen;
242 scatterwalk_map_and_copy(b1 + 6, req->assoc, 0,
243 iauth_len, SCATTERWALK_FROM_SG);
244 }
245 }
198 246
247 /* now copy any remaining AAD to scatterlist and call nx... */
248 if (!req->assoclen) {
249 return rc;
250 } else if (req->assoclen <= 14) {
199 nx_insg = nx_build_sg_list(nx_insg, b1, 16, nx_ctx->ap->sglen); 251 nx_insg = nx_build_sg_list(nx_insg, b1, 16, nx_ctx->ap->sglen);
200 nx_outsg = nx_build_sg_list(nx_outsg, tmp, 16, 252 nx_outsg = nx_build_sg_list(nx_outsg, tmp, 16,
201 nx_ctx->ap->sglen); 253 nx_ctx->ap->sglen);
@@ -210,56 +262,74 @@ static int generate_pat(u8 *iv,
210 NX_CPB_FDM(nx_ctx->csbcpb) |= NX_FDM_ENDE_ENCRYPT; 262 NX_CPB_FDM(nx_ctx->csbcpb) |= NX_FDM_ENDE_ENCRYPT;
211 NX_CPB_FDM(nx_ctx->csbcpb) |= NX_FDM_INTERMEDIATE; 263 NX_CPB_FDM(nx_ctx->csbcpb) |= NX_FDM_INTERMEDIATE;
212 264
213 op = &nx_ctx->op;
214 result = nx_ctx->csbcpb->cpb.aes_ccm.out_pat_or_mac; 265 result = nx_ctx->csbcpb->cpb.aes_ccm.out_pat_or_mac;
215 } else if (req->assoclen <= 65280) {
216 /* if associated data is less than (2^16 - 2^8), we construct
217 * B1 differently and feed in the associated data to a CCA
218 * operation */
219 b0 = nx_ctx->csbcpb_aead->cpb.aes_cca.b0;
220 b1 = nx_ctx->csbcpb_aead->cpb.aes_cca.b1;
221 iauth_len = 14;
222 266
223 /* remaining assoc data must have scatterlist built for it */ 267 rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
224 nx_insg = nx_walk_and_build(nx_insg, nx_ctx->ap->sglen, 268 req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
225 req->assoc, iauth_len, 269 if (rc)
226 req->assoclen - iauth_len); 270 return rc;
227 nx_ctx->op_aead.inlen = (nx_ctx->in_sg - nx_insg) * 271
228 sizeof(struct nx_sg); 272 atomic_inc(&(nx_ctx->stats->aes_ops));
273 atomic64_add(req->assoclen, &(nx_ctx->stats->aes_bytes));
229 274
230 op = &nx_ctx->op_aead;
231 result = nx_ctx->csbcpb_aead->cpb.aes_cca.out_pat_or_b0;
232 } else { 275 } else {
233 /* if associated data is less than (2^32), we construct B1 276 u32 max_sg_len;
234 * differently yet again and feed in the associated data to a 277 unsigned int processed = 0, to_process;
235 * CCA operation */ 278
236 pr_err("associated data len is %u bytes (returning -EINVAL)\n", 279 /* page_limit: number of sg entries that fit on one page */
237 req->assoclen); 280 max_sg_len = min_t(u32,
238 rc = -EINVAL; 281 nx_driver.of.max_sg_len/sizeof(struct nx_sg),
239 } 282 nx_ctx->ap->sglen);
283
284 processed += iauth_len;
285
286 do {
287 to_process = min_t(u32, req->assoclen - processed,
288 nx_ctx->ap->databytelen);
289 to_process = min_t(u64, to_process,
290 NX_PAGE_SIZE * (max_sg_len - 1));
291
292 if ((to_process + processed) < req->assoclen) {
293 NX_CPB_FDM(nx_ctx->csbcpb_aead) |=
294 NX_FDM_INTERMEDIATE;
295 } else {
296 NX_CPB_FDM(nx_ctx->csbcpb_aead) &=
297 ~NX_FDM_INTERMEDIATE;
298 }
299
300 nx_insg = nx_walk_and_build(nx_ctx->in_sg,
301 nx_ctx->ap->sglen,
302 req->assoc, processed,
303 to_process);
304
305 nx_ctx->op_aead.inlen = (nx_ctx->in_sg - nx_insg) *
306 sizeof(struct nx_sg);
240 307
241 rc = generate_b0(iv, req->assoclen, authsize, nbytes, b0); 308 result = nx_ctx->csbcpb_aead->cpb.aes_cca.out_pat_or_b0;
242 if (rc)
243 goto done;
244 309
245 if (b1) { 310 rc = nx_hcall_sync(nx_ctx, &nx_ctx->op_aead,
246 memset(b1, 0, 16); 311 req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
247 *(u16 *)b1 = (u16)req->assoclen; 312 if (rc)
313 return rc;
248 314
249 scatterwalk_map_and_copy(b1 + 2, req->assoc, 0, 315 memcpy(nx_ctx->csbcpb_aead->cpb.aes_cca.b0,
250 iauth_len, SCATTERWALK_FROM_SG); 316 nx_ctx->csbcpb_aead->cpb.aes_cca.out_pat_or_b0,
317 AES_BLOCK_SIZE);
251 318
252 rc = nx_hcall_sync(nx_ctx, op, 319 NX_CPB_FDM(nx_ctx->csbcpb_aead) |= NX_FDM_CONTINUATION;
253 req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
254 if (rc)
255 goto done;
256 320
257 atomic_inc(&(nx_ctx->stats->aes_ops)); 321 atomic_inc(&(nx_ctx->stats->aes_ops));
258 atomic64_add(req->assoclen, &(nx_ctx->stats->aes_bytes)); 322 atomic64_add(req->assoclen,
323 &(nx_ctx->stats->aes_bytes));
259 324
260 memcpy(out, result, AES_BLOCK_SIZE); 325 processed += to_process;
326 } while (processed < req->assoclen);
327
328 result = nx_ctx->csbcpb_aead->cpb.aes_cca.out_pat_or_b0;
261 } 329 }
262done: 330
331 memcpy(out, result, AES_BLOCK_SIZE);
332
263 return rc; 333 return rc;
264} 334}
265 335
@@ -271,10 +341,12 @@ static int ccm_nx_decrypt(struct aead_request *req,
271 unsigned int nbytes = req->cryptlen; 341 unsigned int nbytes = req->cryptlen;
272 unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req)); 342 unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req));
273 struct nx_ccm_priv *priv = &nx_ctx->priv.ccm; 343 struct nx_ccm_priv *priv = &nx_ctx->priv.ccm;
344 unsigned long irq_flags;
345 unsigned int processed = 0, to_process;
346 u32 max_sg_len;
274 int rc = -1; 347 int rc = -1;
275 348
276 if (nbytes > nx_ctx->ap->databytelen) 349 spin_lock_irqsave(&nx_ctx->lock, irq_flags);
277 return -EINVAL;
278 350
279 nbytes -= authsize; 351 nbytes -= authsize;
280 352
@@ -288,26 +360,61 @@ static int ccm_nx_decrypt(struct aead_request *req,
288 if (rc) 360 if (rc)
289 goto out; 361 goto out;
290 362
291 rc = nx_build_sg_lists(nx_ctx, desc, req->dst, req->src, nbytes, 363 /* page_limit: number of sg entries that fit on one page */
292 csbcpb->cpb.aes_ccm.iv_or_ctr); 364 max_sg_len = min_t(u32, nx_driver.of.max_sg_len/sizeof(struct nx_sg),
293 if (rc) 365 nx_ctx->ap->sglen);
294 goto out;
295 366
296 NX_CPB_FDM(nx_ctx->csbcpb) &= ~NX_FDM_ENDE_ENCRYPT; 367 do {
297 NX_CPB_FDM(nx_ctx->csbcpb) &= ~NX_FDM_INTERMEDIATE; 368
369 /* to_process: the AES_BLOCK_SIZE data chunk to process in this
370 * update. This value is bound by sg list limits.
371 */
372 to_process = min_t(u64, nbytes - processed,
373 nx_ctx->ap->databytelen);
374 to_process = min_t(u64, to_process,
375 NX_PAGE_SIZE * (max_sg_len - 1));
376
377 if ((to_process + processed) < nbytes)
378 NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
379 else
380 NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
381
382 NX_CPB_FDM(nx_ctx->csbcpb) &= ~NX_FDM_ENDE_ENCRYPT;
383
384 rc = nx_build_sg_lists(nx_ctx, desc, req->dst, req->src,
385 to_process, processed,
386 csbcpb->cpb.aes_ccm.iv_or_ctr);
387 if (rc)
388 goto out;
298 389
299 rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, 390 rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
300 req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP); 391 req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
301 if (rc) 392 if (rc)
302 goto out; 393 goto out;
303 394
304 atomic_inc(&(nx_ctx->stats->aes_ops)); 395 /* for partial completion, copy following for next
305 atomic64_add(csbcpb->csb.processed_byte_count, 396 * entry into loop...
306 &(nx_ctx->stats->aes_bytes)); 397 */
398 memcpy(desc->info, csbcpb->cpb.aes_ccm.out_ctr, AES_BLOCK_SIZE);
399 memcpy(csbcpb->cpb.aes_ccm.in_pat_or_b0,
400 csbcpb->cpb.aes_ccm.out_pat_or_mac, AES_BLOCK_SIZE);
401 memcpy(csbcpb->cpb.aes_ccm.in_s0,
402 csbcpb->cpb.aes_ccm.out_s0, AES_BLOCK_SIZE);
403
404 NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
405
406 /* update stats */
407 atomic_inc(&(nx_ctx->stats->aes_ops));
408 atomic64_add(csbcpb->csb.processed_byte_count,
409 &(nx_ctx->stats->aes_bytes));
410
411 processed += to_process;
412 } while (processed < nbytes);
307 413
308 rc = memcmp(csbcpb->cpb.aes_ccm.out_pat_or_mac, priv->oauth_tag, 414 rc = memcmp(csbcpb->cpb.aes_ccm.out_pat_or_mac, priv->oauth_tag,
309 authsize) ? -EBADMSG : 0; 415 authsize) ? -EBADMSG : 0;
310out: 416out:
417 spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
311 return rc; 418 return rc;
312} 419}
313 420
@@ -318,38 +425,76 @@ static int ccm_nx_encrypt(struct aead_request *req,
318 struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; 425 struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
319 unsigned int nbytes = req->cryptlen; 426 unsigned int nbytes = req->cryptlen;
320 unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req)); 427 unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req));
428 unsigned long irq_flags;
429 unsigned int processed = 0, to_process;
430 u32 max_sg_len;
321 int rc = -1; 431 int rc = -1;
322 432
323 if (nbytes > nx_ctx->ap->databytelen) 433 spin_lock_irqsave(&nx_ctx->lock, irq_flags);
324 return -EINVAL;
325 434
326 rc = generate_pat(desc->info, req, nx_ctx, authsize, nbytes, 435 rc = generate_pat(desc->info, req, nx_ctx, authsize, nbytes,
327 csbcpb->cpb.aes_ccm.in_pat_or_b0); 436 csbcpb->cpb.aes_ccm.in_pat_or_b0);
328 if (rc) 437 if (rc)
329 goto out; 438 goto out;
330 439
331 rc = nx_build_sg_lists(nx_ctx, desc, req->dst, req->src, nbytes, 440 /* page_limit: number of sg entries that fit on one page */
332 csbcpb->cpb.aes_ccm.iv_or_ctr); 441 max_sg_len = min_t(u32, nx_driver.of.max_sg_len/sizeof(struct nx_sg),
333 if (rc) 442 nx_ctx->ap->sglen);
334 goto out; 443
444 do {
445 /* to process: the AES_BLOCK_SIZE data chunk to process in this
446 * update. This value is bound by sg list limits.
447 */
448 to_process = min_t(u64, nbytes - processed,
449 nx_ctx->ap->databytelen);
450 to_process = min_t(u64, to_process,
451 NX_PAGE_SIZE * (max_sg_len - 1));
452
453 if ((to_process + processed) < nbytes)
454 NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
455 else
456 NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
457
458 NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT;
459
460 rc = nx_build_sg_lists(nx_ctx, desc, req->dst, req->src,
461 to_process, processed,
462 csbcpb->cpb.aes_ccm.iv_or_ctr);
463 if (rc)
464 goto out;
335 465
336 NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT; 466 rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
337 NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE; 467 req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
468 if (rc)
469 goto out;
338 470
339 rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, 471 /* for partial completion, copy following for next
340 req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP); 472 * entry into loop...
341 if (rc) 473 */
342 goto out; 474 memcpy(desc->info, csbcpb->cpb.aes_ccm.out_ctr, AES_BLOCK_SIZE);
475 memcpy(csbcpb->cpb.aes_ccm.in_pat_or_b0,
476 csbcpb->cpb.aes_ccm.out_pat_or_mac, AES_BLOCK_SIZE);
477 memcpy(csbcpb->cpb.aes_ccm.in_s0,
478 csbcpb->cpb.aes_ccm.out_s0, AES_BLOCK_SIZE);
343 479
344 atomic_inc(&(nx_ctx->stats->aes_ops)); 480 NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
345 atomic64_add(csbcpb->csb.processed_byte_count, 481
346 &(nx_ctx->stats->aes_bytes)); 482 /* update stats */
483 atomic_inc(&(nx_ctx->stats->aes_ops));
484 atomic64_add(csbcpb->csb.processed_byte_count,
485 &(nx_ctx->stats->aes_bytes));
486
487 processed += to_process;
488
489 } while (processed < nbytes);
347 490
348 /* copy out the auth tag */ 491 /* copy out the auth tag */
349 scatterwalk_map_and_copy(csbcpb->cpb.aes_ccm.out_pat_or_mac, 492 scatterwalk_map_and_copy(csbcpb->cpb.aes_ccm.out_pat_or_mac,
350 req->dst, nbytes, authsize, 493 req->dst, nbytes, authsize,
351 SCATTERWALK_TO_SG); 494 SCATTERWALK_TO_SG);
495
352out: 496out:
497 spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
353 return rc; 498 return rc;
354} 499}
355 500
diff --git a/drivers/crypto/nx/nx-aes-ctr.c b/drivers/crypto/nx/nx-aes-ctr.c
index b6286f14680b..a37d009dc75c 100644
--- a/drivers/crypto/nx/nx-aes-ctr.c
+++ b/drivers/crypto/nx/nx-aes-ctr.c
@@ -88,30 +88,48 @@ static int ctr_aes_nx_crypt(struct blkcipher_desc *desc,
88{ 88{
89 struct nx_crypto_ctx *nx_ctx = crypto_blkcipher_ctx(desc->tfm); 89 struct nx_crypto_ctx *nx_ctx = crypto_blkcipher_ctx(desc->tfm);
90 struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; 90 struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
91 unsigned long irq_flags;
92 unsigned int processed = 0, to_process;
93 u32 max_sg_len;
91 int rc; 94 int rc;
92 95
93 if (nbytes > nx_ctx->ap->databytelen) 96 spin_lock_irqsave(&nx_ctx->lock, irq_flags);
94 return -EINVAL;
95 97
96 rc = nx_build_sg_lists(nx_ctx, desc, dst, src, nbytes, 98 max_sg_len = min_t(u32, nx_driver.of.max_sg_len/sizeof(struct nx_sg),
97 csbcpb->cpb.aes_ctr.iv); 99 nx_ctx->ap->sglen);
98 if (rc)
99 goto out;
100 100
101 if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) { 101 do {
102 rc = -EINVAL; 102 to_process = min_t(u64, nbytes - processed,
103 goto out; 103 nx_ctx->ap->databytelen);
104 } 104 to_process = min_t(u64, to_process,
105 NX_PAGE_SIZE * (max_sg_len - 1));
106 to_process = to_process & ~(AES_BLOCK_SIZE - 1);
107
108 rc = nx_build_sg_lists(nx_ctx, desc, dst, src, to_process,
109 processed, csbcpb->cpb.aes_ctr.iv);
110 if (rc)
111 goto out;
112
113 if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) {
114 rc = -EINVAL;
115 goto out;
116 }
117
118 rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
119 desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
120 if (rc)
121 goto out;
122
123 memcpy(desc->info, csbcpb->cpb.aes_cbc.cv, AES_BLOCK_SIZE);
105 124
106 rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, 125 atomic_inc(&(nx_ctx->stats->aes_ops));
107 desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP); 126 atomic64_add(csbcpb->csb.processed_byte_count,
108 if (rc) 127 &(nx_ctx->stats->aes_bytes));
109 goto out;
110 128
111 atomic_inc(&(nx_ctx->stats->aes_ops)); 129 processed += to_process;
112 atomic64_add(csbcpb->csb.processed_byte_count, 130 } while (processed < nbytes);
113 &(nx_ctx->stats->aes_bytes));
114out: 131out:
132 spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
115 return rc; 133 return rc;
116} 134}
117 135
diff --git a/drivers/crypto/nx/nx-aes-ecb.c b/drivers/crypto/nx/nx-aes-ecb.c
index 7bbc9a81da21..85a8d23cf29d 100644
--- a/drivers/crypto/nx/nx-aes-ecb.c
+++ b/drivers/crypto/nx/nx-aes-ecb.c
@@ -70,34 +70,52 @@ static int ecb_aes_nx_crypt(struct blkcipher_desc *desc,
70{ 70{
71 struct nx_crypto_ctx *nx_ctx = crypto_blkcipher_ctx(desc->tfm); 71 struct nx_crypto_ctx *nx_ctx = crypto_blkcipher_ctx(desc->tfm);
72 struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; 72 struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
73 unsigned long irq_flags;
74 unsigned int processed = 0, to_process;
75 u32 max_sg_len;
73 int rc; 76 int rc;
74 77
75 if (nbytes > nx_ctx->ap->databytelen) 78 spin_lock_irqsave(&nx_ctx->lock, irq_flags);
76 return -EINVAL; 79
80 max_sg_len = min_t(u32, nx_driver.of.max_sg_len/sizeof(struct nx_sg),
81 nx_ctx->ap->sglen);
77 82
78 if (enc) 83 if (enc)
79 NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT; 84 NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT;
80 else 85 else
81 NX_CPB_FDM(csbcpb) &= ~NX_FDM_ENDE_ENCRYPT; 86 NX_CPB_FDM(csbcpb) &= ~NX_FDM_ENDE_ENCRYPT;
82 87
83 rc = nx_build_sg_lists(nx_ctx, desc, dst, src, nbytes, NULL); 88 do {
84 if (rc) 89 to_process = min_t(u64, nbytes - processed,
85 goto out; 90 nx_ctx->ap->databytelen);
91 to_process = min_t(u64, to_process,
92 NX_PAGE_SIZE * (max_sg_len - 1));
93 to_process = to_process & ~(AES_BLOCK_SIZE - 1);
86 94
87 if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) { 95 rc = nx_build_sg_lists(nx_ctx, desc, dst, src, to_process,
88 rc = -EINVAL; 96 processed, NULL);
89 goto out; 97 if (rc)
90 } 98 goto out;
99
100 if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) {
101 rc = -EINVAL;
102 goto out;
103 }
104
105 rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
106 desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
107 if (rc)
108 goto out;
109
110 atomic_inc(&(nx_ctx->stats->aes_ops));
111 atomic64_add(csbcpb->csb.processed_byte_count,
112 &(nx_ctx->stats->aes_bytes));
91 113
92 rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, 114 processed += to_process;
93 desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP); 115 } while (processed < nbytes);
94 if (rc)
95 goto out;
96 116
97 atomic_inc(&(nx_ctx->stats->aes_ops));
98 atomic64_add(csbcpb->csb.processed_byte_count,
99 &(nx_ctx->stats->aes_bytes));
100out: 117out:
118 spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
101 return rc; 119 return rc;
102} 120}
103 121
diff --git a/drivers/crypto/nx/nx-aes-gcm.c b/drivers/crypto/nx/nx-aes-gcm.c
index 6cca6c392b00..025d9a8d5b19 100644
--- a/drivers/crypto/nx/nx-aes-gcm.c
+++ b/drivers/crypto/nx/nx-aes-gcm.c
@@ -125,38 +125,187 @@ static int nx_gca(struct nx_crypto_ctx *nx_ctx,
125 struct aead_request *req, 125 struct aead_request *req,
126 u8 *out) 126 u8 *out)
127{ 127{
128 int rc;
128 struct nx_csbcpb *csbcpb_aead = nx_ctx->csbcpb_aead; 129 struct nx_csbcpb *csbcpb_aead = nx_ctx->csbcpb_aead;
129 int rc = -EINVAL;
130 struct scatter_walk walk; 130 struct scatter_walk walk;
131 struct nx_sg *nx_sg = nx_ctx->in_sg; 131 struct nx_sg *nx_sg = nx_ctx->in_sg;
132 unsigned int nbytes = req->assoclen;
133 unsigned int processed = 0, to_process;
134 u32 max_sg_len;
132 135
133 if (req->assoclen > nx_ctx->ap->databytelen) 136 if (nbytes <= AES_BLOCK_SIZE) {
134 goto out;
135
136 if (req->assoclen <= AES_BLOCK_SIZE) {
137 scatterwalk_start(&walk, req->assoc); 137 scatterwalk_start(&walk, req->assoc);
138 scatterwalk_copychunks(out, &walk, req->assoclen, 138 scatterwalk_copychunks(out, &walk, nbytes, SCATTERWALK_FROM_SG);
139 SCATTERWALK_FROM_SG);
140 scatterwalk_done(&walk, SCATTERWALK_FROM_SG, 0); 139 scatterwalk_done(&walk, SCATTERWALK_FROM_SG, 0);
141 140 return 0;
142 rc = 0;
143 goto out;
144 } 141 }
145 142
146 nx_sg = nx_walk_and_build(nx_sg, nx_ctx->ap->sglen, req->assoc, 0, 143 NX_CPB_FDM(csbcpb_aead) &= ~NX_FDM_CONTINUATION;
147 req->assoclen); 144
148 nx_ctx->op_aead.inlen = (nx_ctx->in_sg - nx_sg) * sizeof(struct nx_sg); 145 /* page_limit: number of sg entries that fit on one page */
146 max_sg_len = min_t(u32, nx_driver.of.max_sg_len/sizeof(struct nx_sg),
147 nx_ctx->ap->sglen);
148
149 do {
150 /*
151 * to_process: the data chunk to process in this update.
152 * This value is bound by sg list limits.
153 */
154 to_process = min_t(u64, nbytes - processed,
155 nx_ctx->ap->databytelen);
156 to_process = min_t(u64, to_process,
157 NX_PAGE_SIZE * (max_sg_len - 1));
158
159 if ((to_process + processed) < nbytes)
160 NX_CPB_FDM(csbcpb_aead) |= NX_FDM_INTERMEDIATE;
161 else
162 NX_CPB_FDM(csbcpb_aead) &= ~NX_FDM_INTERMEDIATE;
163
164 nx_sg = nx_walk_and_build(nx_ctx->in_sg, nx_ctx->ap->sglen,
165 req->assoc, processed, to_process);
166 nx_ctx->op_aead.inlen = (nx_ctx->in_sg - nx_sg)
167 * sizeof(struct nx_sg);
168
169 rc = nx_hcall_sync(nx_ctx, &nx_ctx->op_aead,
170 req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
171 if (rc)
172 return rc;
173
174 memcpy(csbcpb_aead->cpb.aes_gca.in_pat,
175 csbcpb_aead->cpb.aes_gca.out_pat,
176 AES_BLOCK_SIZE);
177 NX_CPB_FDM(csbcpb_aead) |= NX_FDM_CONTINUATION;
178
179 atomic_inc(&(nx_ctx->stats->aes_ops));
180 atomic64_add(req->assoclen, &(nx_ctx->stats->aes_bytes));
181
182 processed += to_process;
183 } while (processed < nbytes);
184
185 memcpy(out, csbcpb_aead->cpb.aes_gca.out_pat, AES_BLOCK_SIZE);
186
187 return rc;
188}
189
190static int gmac(struct aead_request *req, struct blkcipher_desc *desc)
191{
192 int rc;
193 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
194 struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
195 struct nx_sg *nx_sg;
196 unsigned int nbytes = req->assoclen;
197 unsigned int processed = 0, to_process;
198 u32 max_sg_len;
199
200 /* Set GMAC mode */
201 csbcpb->cpb.hdr.mode = NX_MODE_AES_GMAC;
202
203 NX_CPB_FDM(csbcpb) &= ~NX_FDM_CONTINUATION;
204
205 /* page_limit: number of sg entries that fit on one page */
206 max_sg_len = min_t(u32, nx_driver.of.max_sg_len/sizeof(struct nx_sg),
207 nx_ctx->ap->sglen);
208
209 /* Copy IV */
210 memcpy(csbcpb->cpb.aes_gcm.iv_or_cnt, desc->info, AES_BLOCK_SIZE);
211
212 do {
213 /*
214 * to_process: the data chunk to process in this update.
215 * This value is bound by sg list limits.
216 */
217 to_process = min_t(u64, nbytes - processed,
218 nx_ctx->ap->databytelen);
219 to_process = min_t(u64, to_process,
220 NX_PAGE_SIZE * (max_sg_len - 1));
221
222 if ((to_process + processed) < nbytes)
223 NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
224 else
225 NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
226
227 nx_sg = nx_walk_and_build(nx_ctx->in_sg, nx_ctx->ap->sglen,
228 req->assoc, processed, to_process);
229 nx_ctx->op.inlen = (nx_ctx->in_sg - nx_sg)
230 * sizeof(struct nx_sg);
231
232 csbcpb->cpb.aes_gcm.bit_length_data = 0;
233 csbcpb->cpb.aes_gcm.bit_length_aad = 8 * nbytes;
234
235 rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
236 req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
237 if (rc)
238 goto out;
239
240 memcpy(csbcpb->cpb.aes_gcm.in_pat_or_aad,
241 csbcpb->cpb.aes_gcm.out_pat_or_mac, AES_BLOCK_SIZE);
242 memcpy(csbcpb->cpb.aes_gcm.in_s0,
243 csbcpb->cpb.aes_gcm.out_s0, AES_BLOCK_SIZE);
244
245 NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
246
247 atomic_inc(&(nx_ctx->stats->aes_ops));
248 atomic64_add(req->assoclen, &(nx_ctx->stats->aes_bytes));
249
250 processed += to_process;
251 } while (processed < nbytes);
252
253out:
254 /* Restore GCM mode */
255 csbcpb->cpb.hdr.mode = NX_MODE_AES_GCM;
256 return rc;
257}
258
259static int gcm_empty(struct aead_request *req, struct blkcipher_desc *desc,
260 int enc)
261{
262 int rc;
263 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
264 struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
265 char out[AES_BLOCK_SIZE];
266 struct nx_sg *in_sg, *out_sg;
267
268 /* For scenarios where the input message is zero length, AES CTR mode
269 * may be used. Set the source data to be a single block (16B) of all
270 * zeros, and set the input IV value to be the same as the GMAC IV
271 * value. - nx_wb 4.8.1.3 */
272
273 /* Change to ECB mode */
274 csbcpb->cpb.hdr.mode = NX_MODE_AES_ECB;
275 memcpy(csbcpb->cpb.aes_ecb.key, csbcpb->cpb.aes_gcm.key,
276 sizeof(csbcpb->cpb.aes_ecb.key));
277 if (enc)
278 NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT;
279 else
280 NX_CPB_FDM(csbcpb) &= ~NX_FDM_ENDE_ENCRYPT;
281
282 /* Encrypt the counter/IV */
283 in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *) desc->info,
284 AES_BLOCK_SIZE, nx_ctx->ap->sglen);
285 out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *) out, sizeof(out),
286 nx_ctx->ap->sglen);
287 nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
288 nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
149 289
150 rc = nx_hcall_sync(nx_ctx, &nx_ctx->op_aead, 290 rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
151 req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP); 291 desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
152 if (rc) 292 if (rc)
153 goto out; 293 goto out;
154
155 atomic_inc(&(nx_ctx->stats->aes_ops)); 294 atomic_inc(&(nx_ctx->stats->aes_ops));
156 atomic64_add(req->assoclen, &(nx_ctx->stats->aes_bytes));
157 295
158 memcpy(out, csbcpb_aead->cpb.aes_gca.out_pat, AES_BLOCK_SIZE); 296 /* Copy out the auth tag */
297 memcpy(csbcpb->cpb.aes_gcm.out_pat_or_mac, out,
298 crypto_aead_authsize(crypto_aead_reqtfm(req)));
159out: 299out:
300 /* Restore XCBC mode */
301 csbcpb->cpb.hdr.mode = NX_MODE_AES_GCM;
302
303 /*
304 * ECB key uses the same region that GCM AAD and counter, so it's safe
305 * to just fill it with zeroes.
306 */
307 memset(csbcpb->cpb.aes_ecb.key, 0, sizeof(csbcpb->cpb.aes_ecb.key));
308
160 return rc; 309 return rc;
161} 310}
162 311
@@ -166,88 +315,104 @@ static int gcm_aes_nx_crypt(struct aead_request *req, int enc)
166 struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; 315 struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
167 struct blkcipher_desc desc; 316 struct blkcipher_desc desc;
168 unsigned int nbytes = req->cryptlen; 317 unsigned int nbytes = req->cryptlen;
318 unsigned int processed = 0, to_process;
319 unsigned long irq_flags;
320 u32 max_sg_len;
169 int rc = -EINVAL; 321 int rc = -EINVAL;
170 322
171 if (nbytes > nx_ctx->ap->databytelen) 323 spin_lock_irqsave(&nx_ctx->lock, irq_flags);
172 goto out;
173 324
174 desc.info = nx_ctx->priv.gcm.iv; 325 desc.info = nx_ctx->priv.gcm.iv;
175 /* initialize the counter */ 326 /* initialize the counter */
176 *(u32 *)(desc.info + NX_GCM_CTR_OFFSET) = 1; 327 *(u32 *)(desc.info + NX_GCM_CTR_OFFSET) = 1;
177 328
178 /* For scenarios where the input message is zero length, AES CTR mode
179 * may be used. Set the source data to be a single block (16B) of all
180 * zeros, and set the input IV value to be the same as the GMAC IV
181 * value. - nx_wb 4.8.1.3 */
182 if (nbytes == 0) { 329 if (nbytes == 0) {
183 char src[AES_BLOCK_SIZE] = {}; 330 if (req->assoclen == 0)
184 struct scatterlist sg; 331 rc = gcm_empty(req, &desc, enc);
185 332 else
186 desc.tfm = crypto_alloc_blkcipher("ctr(aes)", 0, 0); 333 rc = gmac(req, &desc);
187 if (IS_ERR(desc.tfm)) { 334 if (rc)
188 rc = -ENOMEM;
189 goto out; 335 goto out;
190 }
191
192 crypto_blkcipher_setkey(desc.tfm, csbcpb->cpb.aes_gcm.key,
193 NX_CPB_KEY_SIZE(csbcpb) == NX_KS_AES_128 ? 16 :
194 NX_CPB_KEY_SIZE(csbcpb) == NX_KS_AES_192 ? 24 : 32);
195
196 sg_init_one(&sg, src, AES_BLOCK_SIZE);
197 if (enc)
198 crypto_blkcipher_encrypt_iv(&desc, req->dst, &sg,
199 AES_BLOCK_SIZE);
200 else 336 else
201 crypto_blkcipher_decrypt_iv(&desc, req->dst, &sg, 337 goto mac;
202 AES_BLOCK_SIZE);
203 crypto_free_blkcipher(desc.tfm);
204
205 rc = 0;
206 goto out;
207 } 338 }
208 339
209 desc.tfm = (struct crypto_blkcipher *)req->base.tfm; 340 /* Process associated data */
210
211 csbcpb->cpb.aes_gcm.bit_length_aad = req->assoclen * 8; 341 csbcpb->cpb.aes_gcm.bit_length_aad = req->assoclen * 8;
212
213 if (req->assoclen) { 342 if (req->assoclen) {
214 rc = nx_gca(nx_ctx, req, csbcpb->cpb.aes_gcm.in_pat_or_aad); 343 rc = nx_gca(nx_ctx, req, csbcpb->cpb.aes_gcm.in_pat_or_aad);
215 if (rc) 344 if (rc)
216 goto out; 345 goto out;
217 } 346 }
218 347
219 if (enc) 348 /* Set flags for encryption */
349 NX_CPB_FDM(csbcpb) &= ~NX_FDM_CONTINUATION;
350 if (enc) {
220 NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT; 351 NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT;
221 else 352 } else {
353 NX_CPB_FDM(csbcpb) &= ~NX_FDM_ENDE_ENCRYPT;
222 nbytes -= crypto_aead_authsize(crypto_aead_reqtfm(req)); 354 nbytes -= crypto_aead_authsize(crypto_aead_reqtfm(req));
355 }
223 356
224 csbcpb->cpb.aes_gcm.bit_length_data = nbytes * 8; 357 /* page_limit: number of sg entries that fit on one page */
358 max_sg_len = min_t(u32, nx_driver.of.max_sg_len/sizeof(struct nx_sg),
359 nx_ctx->ap->sglen);
360
361 do {
362 /*
363 * to_process: the data chunk to process in this update.
364 * This value is bound by sg list limits.
365 */
366 to_process = min_t(u64, nbytes - processed,
367 nx_ctx->ap->databytelen);
368 to_process = min_t(u64, to_process,
369 NX_PAGE_SIZE * (max_sg_len - 1));
370
371 if ((to_process + processed) < nbytes)
372 NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
373 else
374 NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
225 375
226 rc = nx_build_sg_lists(nx_ctx, &desc, req->dst, req->src, nbytes, 376 csbcpb->cpb.aes_gcm.bit_length_data = nbytes * 8;
227 csbcpb->cpb.aes_gcm.iv_or_cnt); 377 desc.tfm = (struct crypto_blkcipher *) req->base.tfm;
228 if (rc) 378 rc = nx_build_sg_lists(nx_ctx, &desc, req->dst,
229 goto out; 379 req->src, to_process, processed,
380 csbcpb->cpb.aes_gcm.iv_or_cnt);
381 if (rc)
382 goto out;
230 383
231 rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, 384 rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
232 req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP); 385 req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
233 if (rc) 386 if (rc)
234 goto out; 387 goto out;
235 388
236 atomic_inc(&(nx_ctx->stats->aes_ops)); 389 memcpy(desc.info, csbcpb->cpb.aes_gcm.out_cnt, AES_BLOCK_SIZE);
237 atomic64_add(csbcpb->csb.processed_byte_count, 390 memcpy(csbcpb->cpb.aes_gcm.in_pat_or_aad,
238 &(nx_ctx->stats->aes_bytes)); 391 csbcpb->cpb.aes_gcm.out_pat_or_mac, AES_BLOCK_SIZE);
392 memcpy(csbcpb->cpb.aes_gcm.in_s0,
393 csbcpb->cpb.aes_gcm.out_s0, AES_BLOCK_SIZE);
394
395 NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
396
397 atomic_inc(&(nx_ctx->stats->aes_ops));
398 atomic64_add(csbcpb->csb.processed_byte_count,
399 &(nx_ctx->stats->aes_bytes));
400
401 processed += to_process;
402 } while (processed < nbytes);
239 403
404mac:
240 if (enc) { 405 if (enc) {
241 /* copy out the auth tag */ 406 /* copy out the auth tag */
242 scatterwalk_map_and_copy(csbcpb->cpb.aes_gcm.out_pat_or_mac, 407 scatterwalk_map_and_copy(csbcpb->cpb.aes_gcm.out_pat_or_mac,
243 req->dst, nbytes, 408 req->dst, nbytes,
244 crypto_aead_authsize(crypto_aead_reqtfm(req)), 409 crypto_aead_authsize(crypto_aead_reqtfm(req)),
245 SCATTERWALK_TO_SG); 410 SCATTERWALK_TO_SG);
246 } else if (req->assoclen) { 411 } else {
247 u8 *itag = nx_ctx->priv.gcm.iauth_tag; 412 u8 *itag = nx_ctx->priv.gcm.iauth_tag;
248 u8 *otag = csbcpb->cpb.aes_gcm.out_pat_or_mac; 413 u8 *otag = csbcpb->cpb.aes_gcm.out_pat_or_mac;
249 414
250 scatterwalk_map_and_copy(itag, req->dst, nbytes, 415 scatterwalk_map_and_copy(itag, req->src, nbytes,
251 crypto_aead_authsize(crypto_aead_reqtfm(req)), 416 crypto_aead_authsize(crypto_aead_reqtfm(req)),
252 SCATTERWALK_FROM_SG); 417 SCATTERWALK_FROM_SG);
253 rc = memcmp(itag, otag, 418 rc = memcmp(itag, otag,
@@ -255,6 +420,7 @@ static int gcm_aes_nx_crypt(struct aead_request *req, int enc)
255 -EBADMSG : 0; 420 -EBADMSG : 0;
256 } 421 }
257out: 422out:
423 spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
258 return rc; 424 return rc;
259} 425}
260 426
diff --git a/drivers/crypto/nx/nx-aes-xcbc.c b/drivers/crypto/nx/nx-aes-xcbc.c
index 93923e4628c0..03c4bf57d066 100644
--- a/drivers/crypto/nx/nx-aes-xcbc.c
+++ b/drivers/crypto/nx/nx-aes-xcbc.c
@@ -56,6 +56,77 @@ static int nx_xcbc_set_key(struct crypto_shash *desc,
56 return 0; 56 return 0;
57} 57}
58 58
59/*
60 * Based on RFC 3566, for a zero-length message:
61 *
62 * n = 1
63 * K1 = E(K, 0x01010101010101010101010101010101)
64 * K3 = E(K, 0x03030303030303030303030303030303)
65 * E[0] = 0x00000000000000000000000000000000
66 * M[1] = 0x80000000000000000000000000000000 (0 length message with padding)
67 * E[1] = (K1, M[1] ^ E[0] ^ K3)
68 * Tag = M[1]
69 */
70static int nx_xcbc_empty(struct shash_desc *desc, u8 *out)
71{
72 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
73 struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
74 struct nx_sg *in_sg, *out_sg;
75 u8 keys[2][AES_BLOCK_SIZE];
76 u8 key[32];
77 int rc = 0;
78
79 /* Change to ECB mode */
80 csbcpb->cpb.hdr.mode = NX_MODE_AES_ECB;
81 memcpy(key, csbcpb->cpb.aes_xcbc.key, AES_BLOCK_SIZE);
82 memcpy(csbcpb->cpb.aes_ecb.key, key, AES_BLOCK_SIZE);
83 NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT;
84
85 /* K1 and K3 base patterns */
86 memset(keys[0], 0x01, sizeof(keys[0]));
87 memset(keys[1], 0x03, sizeof(keys[1]));
88
89 /* Generate K1 and K3 encrypting the patterns */
90 in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *) keys, sizeof(keys),
91 nx_ctx->ap->sglen);
92 out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *) keys, sizeof(keys),
93 nx_ctx->ap->sglen);
94 nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
95 nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
96
97 rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
98 desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
99 if (rc)
100 goto out;
101 atomic_inc(&(nx_ctx->stats->aes_ops));
102
103 /* XOr K3 with the padding for a 0 length message */
104 keys[1][0] ^= 0x80;
105
106 /* Encrypt the final result */
107 memcpy(csbcpb->cpb.aes_ecb.key, keys[0], AES_BLOCK_SIZE);
108 in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *) keys[1], sizeof(keys[1]),
109 nx_ctx->ap->sglen);
110 out_sg = nx_build_sg_list(nx_ctx->out_sg, out, AES_BLOCK_SIZE,
111 nx_ctx->ap->sglen);
112 nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
113 nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
114
115 rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
116 desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
117 if (rc)
118 goto out;
119 atomic_inc(&(nx_ctx->stats->aes_ops));
120
121out:
122 /* Restore XCBC mode */
123 csbcpb->cpb.hdr.mode = NX_MODE_AES_XCBC_MAC;
124 memcpy(csbcpb->cpb.aes_xcbc.key, key, AES_BLOCK_SIZE);
125 NX_CPB_FDM(csbcpb) &= ~NX_FDM_ENDE_ENCRYPT;
126
127 return rc;
128}
129
59static int nx_xcbc_init(struct shash_desc *desc) 130static int nx_xcbc_init(struct shash_desc *desc)
60{ 131{
61 struct xcbc_state *sctx = shash_desc_ctx(desc); 132 struct xcbc_state *sctx = shash_desc_ctx(desc);
@@ -88,76 +159,99 @@ static int nx_xcbc_update(struct shash_desc *desc,
88 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); 159 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
89 struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; 160 struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
90 struct nx_sg *in_sg; 161 struct nx_sg *in_sg;
91 u32 to_process, leftover; 162 u32 to_process, leftover, total;
163 u32 max_sg_len;
164 unsigned long irq_flags;
92 int rc = 0; 165 int rc = 0;
93 166
94 if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) { 167 spin_lock_irqsave(&nx_ctx->lock, irq_flags);
95 /* we've hit the nx chip previously and we're updating again, 168
96 * so copy over the partial digest */ 169
97 memcpy(csbcpb->cpb.aes_xcbc.cv, 170 total = sctx->count + len;
98 csbcpb->cpb.aes_xcbc.out_cv_mac, AES_BLOCK_SIZE);
99 }
100 171
101 /* 2 cases for total data len: 172 /* 2 cases for total data len:
102 * 1: <= AES_BLOCK_SIZE: copy into state, return 0 173 * 1: <= AES_BLOCK_SIZE: copy into state, return 0
103 * 2: > AES_BLOCK_SIZE: process X blocks, copy in leftover 174 * 2: > AES_BLOCK_SIZE: process X blocks, copy in leftover
104 */ 175 */
105 if (len + sctx->count <= AES_BLOCK_SIZE) { 176 if (total <= AES_BLOCK_SIZE) {
106 memcpy(sctx->buffer + sctx->count, data, len); 177 memcpy(sctx->buffer + sctx->count, data, len);
107 sctx->count += len; 178 sctx->count += len;
108 goto out; 179 goto out;
109 } 180 }
110 181
111 /* to_process: the AES_BLOCK_SIZE data chunk to process in this 182 in_sg = nx_ctx->in_sg;
112 * update */ 183 max_sg_len = min_t(u32, nx_driver.of.max_sg_len/sizeof(struct nx_sg),
113 to_process = (sctx->count + len) & ~(AES_BLOCK_SIZE - 1); 184 nx_ctx->ap->sglen);
114 leftover = (sctx->count + len) & (AES_BLOCK_SIZE - 1); 185
115 186 do {
116 /* the hardware will not accept a 0 byte operation for this algorithm 187
117 * and the operation MUST be finalized to be correct. So if we happen 188 /* to_process: the AES_BLOCK_SIZE data chunk to process in this
118 * to get an update that falls on a block sized boundary, we must 189 * update */
119 * save off the last block to finalize with later. */ 190 to_process = min_t(u64, total, nx_ctx->ap->databytelen);
120 if (!leftover) { 191 to_process = min_t(u64, to_process,
121 to_process -= AES_BLOCK_SIZE; 192 NX_PAGE_SIZE * (max_sg_len - 1));
122 leftover = AES_BLOCK_SIZE; 193 to_process = to_process & ~(AES_BLOCK_SIZE - 1);
123 } 194 leftover = total - to_process;
124 195
125 if (sctx->count) { 196 /* the hardware will not accept a 0 byte operation for this
126 in_sg = nx_build_sg_list(nx_ctx->in_sg, sctx->buffer, 197 * algorithm and the operation MUST be finalized to be correct.
127 sctx->count, nx_ctx->ap->sglen); 198 * So if we happen to get an update that falls on a block sized
128 in_sg = nx_build_sg_list(in_sg, (u8 *)data, 199 * boundary, we must save off the last block to finalize with
129 to_process - sctx->count, 200 * later. */
130 nx_ctx->ap->sglen); 201 if (!leftover) {
202 to_process -= AES_BLOCK_SIZE;
203 leftover = AES_BLOCK_SIZE;
204 }
205
206 if (sctx->count) {
207 in_sg = nx_build_sg_list(nx_ctx->in_sg,
208 (u8 *) sctx->buffer,
209 sctx->count,
210 max_sg_len);
211 }
212 in_sg = nx_build_sg_list(in_sg,
213 (u8 *) data,
214 to_process - sctx->count,
215 max_sg_len);
131 nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * 216 nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) *
132 sizeof(struct nx_sg); 217 sizeof(struct nx_sg);
133 } else {
134 in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *)data, to_process,
135 nx_ctx->ap->sglen);
136 nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) *
137 sizeof(struct nx_sg);
138 }
139 218
140 NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE; 219 /* we've hit the nx chip previously and we're updating again,
220 * so copy over the partial digest */
221 if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) {
222 memcpy(csbcpb->cpb.aes_xcbc.cv,
223 csbcpb->cpb.aes_xcbc.out_cv_mac,
224 AES_BLOCK_SIZE);
225 }
226
227 NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
228 if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) {
229 rc = -EINVAL;
230 goto out;
231 }
232
233 rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
234 desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
235 if (rc)
236 goto out;
141 237
142 if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) { 238 atomic_inc(&(nx_ctx->stats->aes_ops));
143 rc = -EINVAL;
144 goto out;
145 }
146 239
147 rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, 240 /* everything after the first update is continuation */
148 desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP); 241 NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
149 if (rc)
150 goto out;
151 242
152 atomic_inc(&(nx_ctx->stats->aes_ops)); 243 total -= to_process;
244 data += to_process - sctx->count;
245 sctx->count = 0;
246 in_sg = nx_ctx->in_sg;
247 } while (leftover > AES_BLOCK_SIZE);
153 248
154 /* copy the leftover back into the state struct */ 249 /* copy the leftover back into the state struct */
155 memcpy(sctx->buffer, data + len - leftover, leftover); 250 memcpy(sctx->buffer, data, leftover);
156 sctx->count = leftover; 251 sctx->count = leftover;
157 252
158 /* everything after the first update is continuation */
159 NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
160out: 253out:
254 spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
161 return rc; 255 return rc;
162} 256}
163 257
@@ -167,21 +261,23 @@ static int nx_xcbc_final(struct shash_desc *desc, u8 *out)
167 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); 261 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
168 struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; 262 struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
169 struct nx_sg *in_sg, *out_sg; 263 struct nx_sg *in_sg, *out_sg;
264 unsigned long irq_flags;
170 int rc = 0; 265 int rc = 0;
171 266
267 spin_lock_irqsave(&nx_ctx->lock, irq_flags);
268
172 if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) { 269 if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) {
173 /* we've hit the nx chip previously, now we're finalizing, 270 /* we've hit the nx chip previously, now we're finalizing,
174 * so copy over the partial digest */ 271 * so copy over the partial digest */
175 memcpy(csbcpb->cpb.aes_xcbc.cv, 272 memcpy(csbcpb->cpb.aes_xcbc.cv,
176 csbcpb->cpb.aes_xcbc.out_cv_mac, AES_BLOCK_SIZE); 273 csbcpb->cpb.aes_xcbc.out_cv_mac, AES_BLOCK_SIZE);
177 } else if (sctx->count == 0) { 274 } else if (sctx->count == 0) {
178 /* we've never seen an update, so this is a 0 byte op. The 275 /*
179 * hardware cannot handle a 0 byte op, so just copy out the 276 * we've never seen an update, so this is a 0 byte op. The
180 * known 0 byte result. This is cheaper than allocating a 277 * hardware cannot handle a 0 byte op, so just ECB to
181 * software context to do a 0 byte op */ 278 * generate the hash.
182 u8 data[] = { 0x75, 0xf0, 0x25, 0x1d, 0x52, 0x8a, 0xc0, 0x1c, 279 */
183 0x45, 0x73, 0xdf, 0xd5, 0x84, 0xd7, 0x9f, 0x29 }; 280 rc = nx_xcbc_empty(desc, out);
184 memcpy(out, data, sizeof(data));
185 goto out; 281 goto out;
186 } 282 }
187 283
@@ -211,6 +307,7 @@ static int nx_xcbc_final(struct shash_desc *desc, u8 *out)
211 307
212 memcpy(out, csbcpb->cpb.aes_xcbc.out_cv_mac, AES_BLOCK_SIZE); 308 memcpy(out, csbcpb->cpb.aes_xcbc.out_cv_mac, AES_BLOCK_SIZE);
213out: 309out:
310 spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
214 return rc; 311 return rc;
215} 312}
216 313
diff --git a/drivers/crypto/nx/nx-sha256.c b/drivers/crypto/nx/nx-sha256.c
index 67024f2f0b78..da0b24a7633f 100644
--- a/drivers/crypto/nx/nx-sha256.c
+++ b/drivers/crypto/nx/nx-sha256.c
@@ -55,71 +55,91 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
55 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); 55 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
56 struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb; 56 struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
57 struct nx_sg *in_sg; 57 struct nx_sg *in_sg;
58 u64 to_process, leftover; 58 u64 to_process, leftover, total;
59 u32 max_sg_len;
60 unsigned long irq_flags;
59 int rc = 0; 61 int rc = 0;
60 62
61 if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) { 63 spin_lock_irqsave(&nx_ctx->lock, irq_flags);
62 /* we've hit the nx chip previously and we're updating again,
63 * so copy over the partial digest */
64 memcpy(csbcpb->cpb.sha256.input_partial_digest,
65 csbcpb->cpb.sha256.message_digest, SHA256_DIGEST_SIZE);
66 }
67 64
68 /* 2 cases for total data len: 65 /* 2 cases for total data len:
69 * 1: <= SHA256_BLOCK_SIZE: copy into state, return 0 66 * 1: < SHA256_BLOCK_SIZE: copy into state, return 0
70 * 2: > SHA256_BLOCK_SIZE: process X blocks, copy in leftover 67 * 2: >= SHA256_BLOCK_SIZE: process X blocks, copy in leftover
71 */ 68 */
72 if (len + sctx->count < SHA256_BLOCK_SIZE) { 69 total = sctx->count + len;
70 if (total < SHA256_BLOCK_SIZE) {
73 memcpy(sctx->buf + sctx->count, data, len); 71 memcpy(sctx->buf + sctx->count, data, len);
74 sctx->count += len; 72 sctx->count += len;
75 goto out; 73 goto out;
76 } 74 }
77 75
78 /* to_process: the SHA256_BLOCK_SIZE data chunk to process in this 76 in_sg = nx_ctx->in_sg;
79 * update */ 77 max_sg_len = min_t(u32, nx_driver.of.max_sg_len/sizeof(struct nx_sg),
80 to_process = (sctx->count + len) & ~(SHA256_BLOCK_SIZE - 1); 78 nx_ctx->ap->sglen);
81 leftover = (sctx->count + len) & (SHA256_BLOCK_SIZE - 1); 79
82 80 do {
83 if (sctx->count) { 81 /*
84 in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *)sctx->buf, 82 * to_process: the SHA256_BLOCK_SIZE data chunk to process in
85 sctx->count, nx_ctx->ap->sglen); 83 * this update. This value is also restricted by the sg list
86 in_sg = nx_build_sg_list(in_sg, (u8 *)data, 84 * limits.
85 */
86 to_process = min_t(u64, total, nx_ctx->ap->databytelen);
87 to_process = min_t(u64, to_process,
88 NX_PAGE_SIZE * (max_sg_len - 1));
89 to_process = to_process & ~(SHA256_BLOCK_SIZE - 1);
90 leftover = total - to_process;
91
92 if (sctx->count) {
93 in_sg = nx_build_sg_list(nx_ctx->in_sg,
94 (u8 *) sctx->buf,
95 sctx->count, max_sg_len);
96 }
97 in_sg = nx_build_sg_list(in_sg, (u8 *) data,
87 to_process - sctx->count, 98 to_process - sctx->count,
88 nx_ctx->ap->sglen); 99 max_sg_len);
89 nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * 100 nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) *
90 sizeof(struct nx_sg); 101 sizeof(struct nx_sg);
91 } else {
92 in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *)data,
93 to_process, nx_ctx->ap->sglen);
94 nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) *
95 sizeof(struct nx_sg);
96 }
97 102
98 NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE; 103 if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) {
104 /*
105 * we've hit the nx chip previously and we're updating
106 * again, so copy over the partial digest.
107 */
108 memcpy(csbcpb->cpb.sha256.input_partial_digest,
109 csbcpb->cpb.sha256.message_digest,
110 SHA256_DIGEST_SIZE);
111 }
99 112
100 if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) { 113 NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
101 rc = -EINVAL; 114 if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) {
102 goto out; 115 rc = -EINVAL;
103 } 116 goto out;
117 }
104 118
105 rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, 119 rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
106 desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP); 120 desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
107 if (rc) 121 if (rc)
108 goto out; 122 goto out;
109 123
110 atomic_inc(&(nx_ctx->stats->sha256_ops)); 124 atomic_inc(&(nx_ctx->stats->sha256_ops));
125 csbcpb->cpb.sha256.message_bit_length += (u64)
126 (csbcpb->cpb.sha256.spbc * 8);
127
128 /* everything after the first update is continuation */
129 NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
130
131 total -= to_process;
132 data += to_process - sctx->count;
133 sctx->count = 0;
134 in_sg = nx_ctx->in_sg;
135 } while (leftover >= SHA256_BLOCK_SIZE);
111 136
112 /* copy the leftover back into the state struct */ 137 /* copy the leftover back into the state struct */
113 if (leftover) 138 if (leftover)
114 memcpy(sctx->buf, data + len - leftover, leftover); 139 memcpy(sctx->buf, data, leftover);
115 sctx->count = leftover; 140 sctx->count = leftover;
116
117 csbcpb->cpb.sha256.message_bit_length += (u64)
118 (csbcpb->cpb.sha256.spbc * 8);
119
120 /* everything after the first update is continuation */
121 NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
122out: 141out:
142 spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
123 return rc; 143 return rc;
124} 144}
125 145
@@ -129,8 +149,13 @@ static int nx_sha256_final(struct shash_desc *desc, u8 *out)
129 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); 149 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
130 struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb; 150 struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
131 struct nx_sg *in_sg, *out_sg; 151 struct nx_sg *in_sg, *out_sg;
152 u32 max_sg_len;
153 unsigned long irq_flags;
132 int rc; 154 int rc;
133 155
156 spin_lock_irqsave(&nx_ctx->lock, irq_flags);
157
158 max_sg_len = min_t(u32, nx_driver.of.max_sg_len, nx_ctx->ap->sglen);
134 159
135 if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) { 160 if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) {
136 /* we've hit the nx chip previously, now we're finalizing, 161 /* we've hit the nx chip previously, now we're finalizing,
@@ -146,9 +171,9 @@ static int nx_sha256_final(struct shash_desc *desc, u8 *out)
146 csbcpb->cpb.sha256.message_bit_length += (u64)(sctx->count * 8); 171 csbcpb->cpb.sha256.message_bit_length += (u64)(sctx->count * 8);
147 172
148 in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *)sctx->buf, 173 in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *)sctx->buf,
149 sctx->count, nx_ctx->ap->sglen); 174 sctx->count, max_sg_len);
150 out_sg = nx_build_sg_list(nx_ctx->out_sg, out, SHA256_DIGEST_SIZE, 175 out_sg = nx_build_sg_list(nx_ctx->out_sg, out, SHA256_DIGEST_SIZE,
151 nx_ctx->ap->sglen); 176 max_sg_len);
152 nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg); 177 nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
153 nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg); 178 nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
154 179
@@ -168,6 +193,7 @@ static int nx_sha256_final(struct shash_desc *desc, u8 *out)
168 &(nx_ctx->stats->sha256_bytes)); 193 &(nx_ctx->stats->sha256_bytes));
169 memcpy(out, csbcpb->cpb.sha256.message_digest, SHA256_DIGEST_SIZE); 194 memcpy(out, csbcpb->cpb.sha256.message_digest, SHA256_DIGEST_SIZE);
170out: 195out:
196 spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
171 return rc; 197 return rc;
172} 198}
173 199
@@ -177,6 +203,9 @@ static int nx_sha256_export(struct shash_desc *desc, void *out)
177 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); 203 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
178 struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb; 204 struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
179 struct sha256_state *octx = out; 205 struct sha256_state *octx = out;
206 unsigned long irq_flags;
207
208 spin_lock_irqsave(&nx_ctx->lock, irq_flags);
180 209
181 octx->count = sctx->count + 210 octx->count = sctx->count +
182 (csbcpb->cpb.sha256.message_bit_length / 8); 211 (csbcpb->cpb.sha256.message_bit_length / 8);
@@ -199,6 +228,7 @@ static int nx_sha256_export(struct shash_desc *desc, void *out)
199 octx->state[7] = SHA256_H7; 228 octx->state[7] = SHA256_H7;
200 } 229 }
201 230
231 spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
202 return 0; 232 return 0;
203} 233}
204 234
@@ -208,6 +238,9 @@ static int nx_sha256_import(struct shash_desc *desc, const void *in)
208 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); 238 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
209 struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb; 239 struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
210 const struct sha256_state *ictx = in; 240 const struct sha256_state *ictx = in;
241 unsigned long irq_flags;
242
243 spin_lock_irqsave(&nx_ctx->lock, irq_flags);
211 244
212 memcpy(sctx->buf, ictx->buf, sizeof(ictx->buf)); 245 memcpy(sctx->buf, ictx->buf, sizeof(ictx->buf));
213 246
@@ -222,6 +255,7 @@ static int nx_sha256_import(struct shash_desc *desc, const void *in)
222 NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE; 255 NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
223 } 256 }
224 257
258 spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
225 return 0; 259 return 0;
226} 260}
227 261
diff --git a/drivers/crypto/nx/nx-sha512.c b/drivers/crypto/nx/nx-sha512.c
index 08eee1122349..4ae5b0f221d5 100644
--- a/drivers/crypto/nx/nx-sha512.c
+++ b/drivers/crypto/nx/nx-sha512.c
@@ -55,73 +55,93 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
55 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); 55 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
56 struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb; 56 struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
57 struct nx_sg *in_sg; 57 struct nx_sg *in_sg;
58 u64 to_process, leftover, spbc_bits; 58 u64 to_process, leftover, total, spbc_bits;
59 u32 max_sg_len;
60 unsigned long irq_flags;
59 int rc = 0; 61 int rc = 0;
60 62
61 if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) { 63 spin_lock_irqsave(&nx_ctx->lock, irq_flags);
62 /* we've hit the nx chip previously and we're updating again,
63 * so copy over the partial digest */
64 memcpy(csbcpb->cpb.sha512.input_partial_digest,
65 csbcpb->cpb.sha512.message_digest, SHA512_DIGEST_SIZE);
66 }
67 64
68 /* 2 cases for total data len: 65 /* 2 cases for total data len:
69 * 1: <= SHA512_BLOCK_SIZE: copy into state, return 0 66 * 1: < SHA512_BLOCK_SIZE: copy into state, return 0
70 * 2: > SHA512_BLOCK_SIZE: process X blocks, copy in leftover 67 * 2: >= SHA512_BLOCK_SIZE: process X blocks, copy in leftover
71 */ 68 */
72 if ((u64)len + sctx->count[0] < SHA512_BLOCK_SIZE) { 69 total = sctx->count[0] + len;
70 if (total < SHA512_BLOCK_SIZE) {
73 memcpy(sctx->buf + sctx->count[0], data, len); 71 memcpy(sctx->buf + sctx->count[0], data, len);
74 sctx->count[0] += len; 72 sctx->count[0] += len;
75 goto out; 73 goto out;
76 } 74 }
77 75
78 /* to_process: the SHA512_BLOCK_SIZE data chunk to process in this 76 in_sg = nx_ctx->in_sg;
79 * update */ 77 max_sg_len = min_t(u32, nx_driver.of.max_sg_len/sizeof(struct nx_sg),
80 to_process = (sctx->count[0] + len) & ~(SHA512_BLOCK_SIZE - 1); 78 nx_ctx->ap->sglen);
81 leftover = (sctx->count[0] + len) & (SHA512_BLOCK_SIZE - 1); 79
82 80 do {
83 if (sctx->count[0]) { 81 /*
84 in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *)sctx->buf, 82 * to_process: the SHA512_BLOCK_SIZE data chunk to process in
85 sctx->count[0], nx_ctx->ap->sglen); 83 * this update. This value is also restricted by the sg list
86 in_sg = nx_build_sg_list(in_sg, (u8 *)data, 84 * limits.
85 */
86 to_process = min_t(u64, total, nx_ctx->ap->databytelen);
87 to_process = min_t(u64, to_process,
88 NX_PAGE_SIZE * (max_sg_len - 1));
89 to_process = to_process & ~(SHA512_BLOCK_SIZE - 1);
90 leftover = total - to_process;
91
92 if (sctx->count[0]) {
93 in_sg = nx_build_sg_list(nx_ctx->in_sg,
94 (u8 *) sctx->buf,
95 sctx->count[0], max_sg_len);
96 }
97 in_sg = nx_build_sg_list(in_sg, (u8 *) data,
87 to_process - sctx->count[0], 98 to_process - sctx->count[0],
88 nx_ctx->ap->sglen); 99 max_sg_len);
89 nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) *
90 sizeof(struct nx_sg);
91 } else {
92 in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *)data,
93 to_process, nx_ctx->ap->sglen);
94 nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * 100 nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) *
95 sizeof(struct nx_sg); 101 sizeof(struct nx_sg);
96 }
97 102
98 NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE; 103 if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) {
104 /*
105 * we've hit the nx chip previously and we're updating
106 * again, so copy over the partial digest.
107 */
108 memcpy(csbcpb->cpb.sha512.input_partial_digest,
109 csbcpb->cpb.sha512.message_digest,
110 SHA512_DIGEST_SIZE);
111 }
99 112
100 if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) { 113 NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
101 rc = -EINVAL; 114 if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) {
102 goto out; 115 rc = -EINVAL;
103 } 116 goto out;
104 117 }
105 rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, 118
106 desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP); 119 rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
107 if (rc) 120 desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
108 goto out; 121 if (rc)
122 goto out;
123
124 atomic_inc(&(nx_ctx->stats->sha512_ops));
125 spbc_bits = csbcpb->cpb.sha512.spbc * 8;
126 csbcpb->cpb.sha512.message_bit_length_lo += spbc_bits;
127 if (csbcpb->cpb.sha512.message_bit_length_lo < spbc_bits)
128 csbcpb->cpb.sha512.message_bit_length_hi++;
129
130 /* everything after the first update is continuation */
131 NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
109 132
110 atomic_inc(&(nx_ctx->stats->sha512_ops)); 133 total -= to_process;
134 data += to_process - sctx->count[0];
135 sctx->count[0] = 0;
136 in_sg = nx_ctx->in_sg;
137 } while (leftover >= SHA512_BLOCK_SIZE);
111 138
112 /* copy the leftover back into the state struct */ 139 /* copy the leftover back into the state struct */
113 if (leftover) 140 if (leftover)
114 memcpy(sctx->buf, data + len - leftover, leftover); 141 memcpy(sctx->buf, data, leftover);
115 sctx->count[0] = leftover; 142 sctx->count[0] = leftover;
116
117 spbc_bits = csbcpb->cpb.sha512.spbc * 8;
118 csbcpb->cpb.sha512.message_bit_length_lo += spbc_bits;
119 if (csbcpb->cpb.sha512.message_bit_length_lo < spbc_bits)
120 csbcpb->cpb.sha512.message_bit_length_hi++;
121
122 /* everything after the first update is continuation */
123 NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
124out: 143out:
144 spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
125 return rc; 145 return rc;
126} 146}
127 147
@@ -131,9 +151,15 @@ static int nx_sha512_final(struct shash_desc *desc, u8 *out)
131 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); 151 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
132 struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb; 152 struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
133 struct nx_sg *in_sg, *out_sg; 153 struct nx_sg *in_sg, *out_sg;
154 u32 max_sg_len;
134 u64 count0; 155 u64 count0;
156 unsigned long irq_flags;
135 int rc; 157 int rc;
136 158
159 spin_lock_irqsave(&nx_ctx->lock, irq_flags);
160
161 max_sg_len = min_t(u32, nx_driver.of.max_sg_len, nx_ctx->ap->sglen);
162
137 if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) { 163 if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) {
138 /* we've hit the nx chip previously, now we're finalizing, 164 /* we've hit the nx chip previously, now we're finalizing,
139 * so copy over the partial digest */ 165 * so copy over the partial digest */
@@ -152,9 +178,9 @@ static int nx_sha512_final(struct shash_desc *desc, u8 *out)
152 csbcpb->cpb.sha512.message_bit_length_hi++; 178 csbcpb->cpb.sha512.message_bit_length_hi++;
153 179
154 in_sg = nx_build_sg_list(nx_ctx->in_sg, sctx->buf, sctx->count[0], 180 in_sg = nx_build_sg_list(nx_ctx->in_sg, sctx->buf, sctx->count[0],
155 nx_ctx->ap->sglen); 181 max_sg_len);
156 out_sg = nx_build_sg_list(nx_ctx->out_sg, out, SHA512_DIGEST_SIZE, 182 out_sg = nx_build_sg_list(nx_ctx->out_sg, out, SHA512_DIGEST_SIZE,
157 nx_ctx->ap->sglen); 183 max_sg_len);
158 nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg); 184 nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
159 nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg); 185 nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
160 186
@@ -174,6 +200,7 @@ static int nx_sha512_final(struct shash_desc *desc, u8 *out)
174 200
175 memcpy(out, csbcpb->cpb.sha512.message_digest, SHA512_DIGEST_SIZE); 201 memcpy(out, csbcpb->cpb.sha512.message_digest, SHA512_DIGEST_SIZE);
176out: 202out:
203 spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
177 return rc; 204 return rc;
178} 205}
179 206
@@ -183,6 +210,9 @@ static int nx_sha512_export(struct shash_desc *desc, void *out)
183 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); 210 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
184 struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb; 211 struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
185 struct sha512_state *octx = out; 212 struct sha512_state *octx = out;
213 unsigned long irq_flags;
214
215 spin_lock_irqsave(&nx_ctx->lock, irq_flags);
186 216
187 /* move message_bit_length (128 bits) into count and convert its value 217 /* move message_bit_length (128 bits) into count and convert its value
188 * to bytes */ 218 * to bytes */
@@ -214,6 +244,7 @@ static int nx_sha512_export(struct shash_desc *desc, void *out)
214 octx->state[7] = SHA512_H7; 244 octx->state[7] = SHA512_H7;
215 } 245 }
216 246
247 spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
217 return 0; 248 return 0;
218} 249}
219 250
@@ -223,6 +254,9 @@ static int nx_sha512_import(struct shash_desc *desc, const void *in)
223 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); 254 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
224 struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb; 255 struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
225 const struct sha512_state *ictx = in; 256 const struct sha512_state *ictx = in;
257 unsigned long irq_flags;
258
259 spin_lock_irqsave(&nx_ctx->lock, irq_flags);
226 260
227 memcpy(sctx->buf, ictx->buf, sizeof(ictx->buf)); 261 memcpy(sctx->buf, ictx->buf, sizeof(ictx->buf));
228 sctx->count[0] = ictx->count[0] & 0x3f; 262 sctx->count[0] = ictx->count[0] & 0x3f;
@@ -240,6 +274,7 @@ static int nx_sha512_import(struct shash_desc *desc, const void *in)
240 NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE; 274 NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
241 } 275 }
242 276
277 spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
243 return 0; 278 return 0;
244} 279}
245 280
diff --git a/drivers/crypto/nx/nx.c b/drivers/crypto/nx/nx.c
index bbdab6e5ccf0..5533fe31c90d 100644
--- a/drivers/crypto/nx/nx.c
+++ b/drivers/crypto/nx/nx.c
@@ -61,8 +61,7 @@ int nx_hcall_sync(struct nx_crypto_ctx *nx_ctx,
61 61
62 do { 62 do {
63 rc = vio_h_cop_sync(viodev, op); 63 rc = vio_h_cop_sync(viodev, op);
64 } while ((rc == -EBUSY && !may_sleep && retries--) || 64 } while (rc == -EBUSY && !may_sleep && retries--);
65 (rc == -EBUSY && may_sleep && cond_resched()));
66 65
67 if (rc) { 66 if (rc) {
68 dev_dbg(&viodev->dev, "vio_h_cop_sync failed: rc: %d " 67 dev_dbg(&viodev->dev, "vio_h_cop_sync failed: rc: %d "
@@ -114,13 +113,29 @@ struct nx_sg *nx_build_sg_list(struct nx_sg *sg_head,
114 * have been described (or @sgmax elements have been written), the 113 * have been described (or @sgmax elements have been written), the
115 * loop ends. min_t is used to ensure @end_addr falls on the same page 114 * loop ends. min_t is used to ensure @end_addr falls on the same page
116 * as sg_addr, if not, we need to create another nx_sg element for the 115 * as sg_addr, if not, we need to create another nx_sg element for the
117 * data on the next page */ 116 * data on the next page.
117 *
118 * Also when using vmalloc'ed data, every time that a system page
119 * boundary is crossed the physical address needs to be re-calculated.
120 */
118 for (sg = sg_head; sg_len < len; sg++) { 121 for (sg = sg_head; sg_len < len; sg++) {
122 u64 next_page;
123
119 sg->addr = sg_addr; 124 sg->addr = sg_addr;
120 sg_addr = min_t(u64, NX_PAGE_NUM(sg_addr + NX_PAGE_SIZE), end_addr); 125 sg_addr = min_t(u64, NX_PAGE_NUM(sg_addr + NX_PAGE_SIZE),
121 sg->len = sg_addr - sg->addr; 126 end_addr);
127
128 next_page = (sg->addr & PAGE_MASK) + PAGE_SIZE;
129 sg->len = min_t(u64, sg_addr, next_page) - sg->addr;
122 sg_len += sg->len; 130 sg_len += sg->len;
123 131
132 if (sg_addr >= next_page &&
133 is_vmalloc_addr(start_addr + sg_len)) {
134 sg_addr = page_to_phys(vmalloc_to_page(
135 start_addr + sg_len));
136 end_addr = sg_addr + len - sg_len;
137 }
138
124 if ((sg - sg_head) == sgmax) { 139 if ((sg - sg_head) == sgmax) {
125 pr_err("nx: scatter/gather list overflow, pid: %d\n", 140 pr_err("nx: scatter/gather list overflow, pid: %d\n",
126 current->pid); 141 current->pid);
@@ -196,6 +211,8 @@ struct nx_sg *nx_walk_and_build(struct nx_sg *nx_dst,
196 * @dst: destination scatterlist 211 * @dst: destination scatterlist
197 * @src: source scatterlist 212 * @src: source scatterlist
198 * @nbytes: length of data described in the scatterlists 213 * @nbytes: length of data described in the scatterlists
214 * @offset: number of bytes to fast-forward past at the beginning of
215 * scatterlists.
199 * @iv: destination for the iv data, if the algorithm requires it 216 * @iv: destination for the iv data, if the algorithm requires it
200 * 217 *
201 * This is common code shared by all the AES algorithms. It uses the block 218 * This is common code shared by all the AES algorithms. It uses the block
@@ -207,6 +224,7 @@ int nx_build_sg_lists(struct nx_crypto_ctx *nx_ctx,
207 struct scatterlist *dst, 224 struct scatterlist *dst,
208 struct scatterlist *src, 225 struct scatterlist *src,
209 unsigned int nbytes, 226 unsigned int nbytes,
227 unsigned int offset,
210 u8 *iv) 228 u8 *iv)
211{ 229{
212 struct nx_sg *nx_insg = nx_ctx->in_sg; 230 struct nx_sg *nx_insg = nx_ctx->in_sg;
@@ -215,8 +233,10 @@ int nx_build_sg_lists(struct nx_crypto_ctx *nx_ctx,
215 if (iv) 233 if (iv)
216 memcpy(iv, desc->info, AES_BLOCK_SIZE); 234 memcpy(iv, desc->info, AES_BLOCK_SIZE);
217 235
218 nx_insg = nx_walk_and_build(nx_insg, nx_ctx->ap->sglen, src, 0, nbytes); 236 nx_insg = nx_walk_and_build(nx_insg, nx_ctx->ap->sglen, src,
219 nx_outsg = nx_walk_and_build(nx_outsg, nx_ctx->ap->sglen, dst, 0, nbytes); 237 offset, nbytes);
238 nx_outsg = nx_walk_and_build(nx_outsg, nx_ctx->ap->sglen, dst,
239 offset, nbytes);
220 240
221 /* these lengths should be negative, which will indicate to phyp that 241 /* these lengths should be negative, which will indicate to phyp that
222 * the input and output parameters are scatterlists, not linear 242 * the input and output parameters are scatterlists, not linear
@@ -235,6 +255,7 @@ int nx_build_sg_lists(struct nx_crypto_ctx *nx_ctx,
235 */ 255 */
236void nx_ctx_init(struct nx_crypto_ctx *nx_ctx, unsigned int function) 256void nx_ctx_init(struct nx_crypto_ctx *nx_ctx, unsigned int function)
237{ 257{
258 spin_lock_init(&nx_ctx->lock);
238 memset(nx_ctx->kmem, 0, nx_ctx->kmem_len); 259 memset(nx_ctx->kmem, 0, nx_ctx->kmem_len);
239 nx_ctx->csbcpb->csb.valid |= NX_CSB_VALID_BIT; 260 nx_ctx->csbcpb->csb.valid |= NX_CSB_VALID_BIT;
240 261
diff --git a/drivers/crypto/nx/nx.h b/drivers/crypto/nx/nx.h
index 3232b182dd28..befda07ca1da 100644
--- a/drivers/crypto/nx/nx.h
+++ b/drivers/crypto/nx/nx.h
@@ -117,6 +117,7 @@ struct nx_ctr_priv {
117}; 117};
118 118
119struct nx_crypto_ctx { 119struct nx_crypto_ctx {
120 spinlock_t lock; /* synchronize access to the context */
120 void *kmem; /* unaligned, kmalloc'd buffer */ 121 void *kmem; /* unaligned, kmalloc'd buffer */
121 size_t kmem_len; /* length of kmem */ 122 size_t kmem_len; /* length of kmem */
122 struct nx_csbcpb *csbcpb; /* aligned page given to phyp @ hcall time */ 123 struct nx_csbcpb *csbcpb; /* aligned page given to phyp @ hcall time */
@@ -155,7 +156,7 @@ int nx_hcall_sync(struct nx_crypto_ctx *ctx, struct vio_pfo_op *op,
155struct nx_sg *nx_build_sg_list(struct nx_sg *, u8 *, unsigned int, u32); 156struct nx_sg *nx_build_sg_list(struct nx_sg *, u8 *, unsigned int, u32);
156int nx_build_sg_lists(struct nx_crypto_ctx *, struct blkcipher_desc *, 157int nx_build_sg_lists(struct nx_crypto_ctx *, struct blkcipher_desc *,
157 struct scatterlist *, struct scatterlist *, unsigned int, 158 struct scatterlist *, struct scatterlist *, unsigned int,
158 u8 *); 159 unsigned int, u8 *);
159struct nx_sg *nx_walk_and_build(struct nx_sg *, unsigned int, 160struct nx_sg *nx_walk_and_build(struct nx_sg *, unsigned int,
160 struct scatterlist *, unsigned int, 161 struct scatterlist *, unsigned int,
161 unsigned int); 162 unsigned int);
diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c
index 5f7980586850..ce791c2f81f7 100644
--- a/drivers/crypto/omap-aes.c
+++ b/drivers/crypto/omap-aes.c
@@ -13,7 +13,9 @@
13 * 13 *
14 */ 14 */
15 15
16#define pr_fmt(fmt) "%s: " fmt, __func__ 16#define pr_fmt(fmt) "%20s: " fmt, __func__
17#define prn(num) pr_debug(#num "=%d\n", num)
18#define prx(num) pr_debug(#num "=%x\n", num)
17 19
18#include <linux/err.h> 20#include <linux/err.h>
19#include <linux/module.h> 21#include <linux/module.h>
@@ -38,6 +40,8 @@
38#define DST_MAXBURST 4 40#define DST_MAXBURST 4
39#define DMA_MIN (DST_MAXBURST * sizeof(u32)) 41#define DMA_MIN (DST_MAXBURST * sizeof(u32))
40 42
43#define _calc_walked(inout) (dd->inout##_walk.offset - dd->inout##_sg->offset)
44
41/* OMAP TRM gives bitfields as start:end, where start is the higher bit 45/* OMAP TRM gives bitfields as start:end, where start is the higher bit
42 number. For example 7:0 */ 46 number. For example 7:0 */
43#define FLD_MASK(start, end) (((1 << ((start) - (end) + 1)) - 1) << (end)) 47#define FLD_MASK(start, end) (((1 << ((start) - (end) + 1)) - 1) << (end))
@@ -74,6 +78,10 @@
74 78
75#define AES_REG_LENGTH_N(x) (0x54 + ((x) * 0x04)) 79#define AES_REG_LENGTH_N(x) (0x54 + ((x) * 0x04))
76 80
81#define AES_REG_IRQ_STATUS(dd) ((dd)->pdata->irq_status_ofs)
82#define AES_REG_IRQ_ENABLE(dd) ((dd)->pdata->irq_enable_ofs)
83#define AES_REG_IRQ_DATA_IN BIT(1)
84#define AES_REG_IRQ_DATA_OUT BIT(2)
77#define DEFAULT_TIMEOUT (5*HZ) 85#define DEFAULT_TIMEOUT (5*HZ)
78 86
79#define FLAGS_MODE_MASK 0x000f 87#define FLAGS_MODE_MASK 0x000f
@@ -86,6 +94,8 @@
86#define FLAGS_FAST BIT(5) 94#define FLAGS_FAST BIT(5)
87#define FLAGS_BUSY BIT(6) 95#define FLAGS_BUSY BIT(6)
88 96
97#define AES_BLOCK_WORDS (AES_BLOCK_SIZE >> 2)
98
89struct omap_aes_ctx { 99struct omap_aes_ctx {
90 struct omap_aes_dev *dd; 100 struct omap_aes_dev *dd;
91 101
@@ -119,6 +129,8 @@ struct omap_aes_pdata {
119 u32 data_ofs; 129 u32 data_ofs;
120 u32 rev_ofs; 130 u32 rev_ofs;
121 u32 mask_ofs; 131 u32 mask_ofs;
132 u32 irq_enable_ofs;
133 u32 irq_status_ofs;
122 134
123 u32 dma_enable_in; 135 u32 dma_enable_in;
124 u32 dma_enable_out; 136 u32 dma_enable_out;
@@ -146,25 +158,32 @@ struct omap_aes_dev {
146 struct tasklet_struct queue_task; 158 struct tasklet_struct queue_task;
147 159
148 struct ablkcipher_request *req; 160 struct ablkcipher_request *req;
161
162 /*
163 * total is used by PIO mode for book keeping so introduce
164 * variable total_save as need it to calc page_order
165 */
149 size_t total; 166 size_t total;
167 size_t total_save;
168
150 struct scatterlist *in_sg; 169 struct scatterlist *in_sg;
151 struct scatterlist in_sgl;
152 size_t in_offset;
153 struct scatterlist *out_sg; 170 struct scatterlist *out_sg;
171
172 /* Buffers for copying for unaligned cases */
173 struct scatterlist in_sgl;
154 struct scatterlist out_sgl; 174 struct scatterlist out_sgl;
155 size_t out_offset; 175 struct scatterlist *orig_out;
176 int sgs_copied;
156 177
157 size_t buflen; 178 struct scatter_walk in_walk;
158 void *buf_in; 179 struct scatter_walk out_walk;
159 size_t dma_size;
160 int dma_in; 180 int dma_in;
161 struct dma_chan *dma_lch_in; 181 struct dma_chan *dma_lch_in;
162 dma_addr_t dma_addr_in;
163 void *buf_out;
164 int dma_out; 182 int dma_out;
165 struct dma_chan *dma_lch_out; 183 struct dma_chan *dma_lch_out;
166 dma_addr_t dma_addr_out; 184 int in_sg_len;
167 185 int out_sg_len;
186 int pio_only;
168 const struct omap_aes_pdata *pdata; 187 const struct omap_aes_pdata *pdata;
169}; 188};
170 189
@@ -172,16 +191,36 @@ struct omap_aes_dev {
172static LIST_HEAD(dev_list); 191static LIST_HEAD(dev_list);
173static DEFINE_SPINLOCK(list_lock); 192static DEFINE_SPINLOCK(list_lock);
174 193
194#ifdef DEBUG
195#define omap_aes_read(dd, offset) \
196({ \
197 int _read_ret; \
198 _read_ret = __raw_readl(dd->io_base + offset); \
199 pr_debug("omap_aes_read(" #offset "=%#x)= %#x\n", \
200 offset, _read_ret); \
201 _read_ret; \
202})
203#else
175static inline u32 omap_aes_read(struct omap_aes_dev *dd, u32 offset) 204static inline u32 omap_aes_read(struct omap_aes_dev *dd, u32 offset)
176{ 205{
177 return __raw_readl(dd->io_base + offset); 206 return __raw_readl(dd->io_base + offset);
178} 207}
208#endif
179 209
210#ifdef DEBUG
211#define omap_aes_write(dd, offset, value) \
212 do { \
213 pr_debug("omap_aes_write(" #offset "=%#x) value=%#x\n", \
214 offset, value); \
215 __raw_writel(value, dd->io_base + offset); \
216 } while (0)
217#else
180static inline void omap_aes_write(struct omap_aes_dev *dd, u32 offset, 218static inline void omap_aes_write(struct omap_aes_dev *dd, u32 offset,
181 u32 value) 219 u32 value)
182{ 220{
183 __raw_writel(value, dd->io_base + offset); 221 __raw_writel(value, dd->io_base + offset);
184} 222}
223#endif
185 224
186static inline void omap_aes_write_mask(struct omap_aes_dev *dd, u32 offset, 225static inline void omap_aes_write_mask(struct omap_aes_dev *dd, u32 offset,
187 u32 value, u32 mask) 226 u32 value, u32 mask)
@@ -323,33 +362,6 @@ static int omap_aes_dma_init(struct omap_aes_dev *dd)
323 dd->dma_lch_out = NULL; 362 dd->dma_lch_out = NULL;
324 dd->dma_lch_in = NULL; 363 dd->dma_lch_in = NULL;
325 364
326 dd->buf_in = (void *)__get_free_pages(GFP_KERNEL, OMAP_AES_CACHE_SIZE);
327 dd->buf_out = (void *)__get_free_pages(GFP_KERNEL, OMAP_AES_CACHE_SIZE);
328 dd->buflen = PAGE_SIZE << OMAP_AES_CACHE_SIZE;
329 dd->buflen &= ~(AES_BLOCK_SIZE - 1);
330
331 if (!dd->buf_in || !dd->buf_out) {
332 dev_err(dd->dev, "unable to alloc pages.\n");
333 goto err_alloc;
334 }
335
336 /* MAP here */
337 dd->dma_addr_in = dma_map_single(dd->dev, dd->buf_in, dd->buflen,
338 DMA_TO_DEVICE);
339 if (dma_mapping_error(dd->dev, dd->dma_addr_in)) {
340 dev_err(dd->dev, "dma %d bytes error\n", dd->buflen);
341 err = -EINVAL;
342 goto err_map_in;
343 }
344
345 dd->dma_addr_out = dma_map_single(dd->dev, dd->buf_out, dd->buflen,
346 DMA_FROM_DEVICE);
347 if (dma_mapping_error(dd->dev, dd->dma_addr_out)) {
348 dev_err(dd->dev, "dma %d bytes error\n", dd->buflen);
349 err = -EINVAL;
350 goto err_map_out;
351 }
352
353 dma_cap_zero(mask); 365 dma_cap_zero(mask);
354 dma_cap_set(DMA_SLAVE, mask); 366 dma_cap_set(DMA_SLAVE, mask);
355 367
@@ -376,14 +388,6 @@ static int omap_aes_dma_init(struct omap_aes_dev *dd)
376err_dma_out: 388err_dma_out:
377 dma_release_channel(dd->dma_lch_in); 389 dma_release_channel(dd->dma_lch_in);
378err_dma_in: 390err_dma_in:
379 dma_unmap_single(dd->dev, dd->dma_addr_out, dd->buflen,
380 DMA_FROM_DEVICE);
381err_map_out:
382 dma_unmap_single(dd->dev, dd->dma_addr_in, dd->buflen, DMA_TO_DEVICE);
383err_map_in:
384 free_pages((unsigned long)dd->buf_out, OMAP_AES_CACHE_SIZE);
385 free_pages((unsigned long)dd->buf_in, OMAP_AES_CACHE_SIZE);
386err_alloc:
387 if (err) 391 if (err)
388 pr_err("error: %d\n", err); 392 pr_err("error: %d\n", err);
389 return err; 393 return err;
@@ -393,11 +397,6 @@ static void omap_aes_dma_cleanup(struct omap_aes_dev *dd)
393{ 397{
394 dma_release_channel(dd->dma_lch_out); 398 dma_release_channel(dd->dma_lch_out);
395 dma_release_channel(dd->dma_lch_in); 399 dma_release_channel(dd->dma_lch_in);
396 dma_unmap_single(dd->dev, dd->dma_addr_out, dd->buflen,
397 DMA_FROM_DEVICE);
398 dma_unmap_single(dd->dev, dd->dma_addr_in, dd->buflen, DMA_TO_DEVICE);
399 free_pages((unsigned long)dd->buf_out, OMAP_AES_CACHE_SIZE);
400 free_pages((unsigned long)dd->buf_in, OMAP_AES_CACHE_SIZE);
401} 400}
402 401
403static void sg_copy_buf(void *buf, struct scatterlist *sg, 402static void sg_copy_buf(void *buf, struct scatterlist *sg,
@@ -414,59 +413,27 @@ static void sg_copy_buf(void *buf, struct scatterlist *sg,
414 scatterwalk_done(&walk, out, 0); 413 scatterwalk_done(&walk, out, 0);
415} 414}
416 415
417static int sg_copy(struct scatterlist **sg, size_t *offset, void *buf,
418 size_t buflen, size_t total, int out)
419{
420 unsigned int count, off = 0;
421
422 while (buflen && total) {
423 count = min((*sg)->length - *offset, total);
424 count = min(count, buflen);
425
426 if (!count)
427 return off;
428
429 /*
430 * buflen and total are AES_BLOCK_SIZE size aligned,
431 * so count should be also aligned
432 */
433
434 sg_copy_buf(buf + off, *sg, *offset, count, out);
435
436 off += count;
437 buflen -= count;
438 *offset += count;
439 total -= count;
440
441 if (*offset == (*sg)->length) {
442 *sg = sg_next(*sg);
443 if (*sg)
444 *offset = 0;
445 else
446 total = 0;
447 }
448 }
449
450 return off;
451}
452
453static int omap_aes_crypt_dma(struct crypto_tfm *tfm, 416static int omap_aes_crypt_dma(struct crypto_tfm *tfm,
454 struct scatterlist *in_sg, struct scatterlist *out_sg) 417 struct scatterlist *in_sg, struct scatterlist *out_sg,
418 int in_sg_len, int out_sg_len)
455{ 419{
456 struct omap_aes_ctx *ctx = crypto_tfm_ctx(tfm); 420 struct omap_aes_ctx *ctx = crypto_tfm_ctx(tfm);
457 struct omap_aes_dev *dd = ctx->dd; 421 struct omap_aes_dev *dd = ctx->dd;
458 struct dma_async_tx_descriptor *tx_in, *tx_out; 422 struct dma_async_tx_descriptor *tx_in, *tx_out;
459 struct dma_slave_config cfg; 423 struct dma_slave_config cfg;
460 dma_addr_t dma_addr_in = sg_dma_address(in_sg); 424 int ret;
461 int ret, length = sg_dma_len(in_sg);
462 425
463 pr_debug("len: %d\n", length); 426 if (dd->pio_only) {
427 scatterwalk_start(&dd->in_walk, dd->in_sg);
428 scatterwalk_start(&dd->out_walk, dd->out_sg);
464 429
465 dd->dma_size = length; 430 /* Enable DATAIN interrupt and let it take
431 care of the rest */
432 omap_aes_write(dd, AES_REG_IRQ_ENABLE(dd), 0x2);
433 return 0;
434 }
466 435
467 if (!(dd->flags & FLAGS_FAST)) 436 dma_sync_sg_for_device(dd->dev, dd->in_sg, in_sg_len, DMA_TO_DEVICE);
468 dma_sync_single_for_device(dd->dev, dma_addr_in, length,
469 DMA_TO_DEVICE);
470 437
471 memset(&cfg, 0, sizeof(cfg)); 438 memset(&cfg, 0, sizeof(cfg));
472 439
@@ -485,7 +452,7 @@ static int omap_aes_crypt_dma(struct crypto_tfm *tfm,
485 return ret; 452 return ret;
486 } 453 }
487 454
488 tx_in = dmaengine_prep_slave_sg(dd->dma_lch_in, in_sg, 1, 455 tx_in = dmaengine_prep_slave_sg(dd->dma_lch_in, in_sg, in_sg_len,
489 DMA_MEM_TO_DEV, 456 DMA_MEM_TO_DEV,
490 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 457 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
491 if (!tx_in) { 458 if (!tx_in) {
@@ -504,7 +471,7 @@ static int omap_aes_crypt_dma(struct crypto_tfm *tfm,
504 return ret; 471 return ret;
505 } 472 }
506 473
507 tx_out = dmaengine_prep_slave_sg(dd->dma_lch_out, out_sg, 1, 474 tx_out = dmaengine_prep_slave_sg(dd->dma_lch_out, out_sg, out_sg_len,
508 DMA_DEV_TO_MEM, 475 DMA_DEV_TO_MEM,
509 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 476 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
510 if (!tx_out) { 477 if (!tx_out) {
@@ -522,7 +489,7 @@ static int omap_aes_crypt_dma(struct crypto_tfm *tfm,
522 dma_async_issue_pending(dd->dma_lch_out); 489 dma_async_issue_pending(dd->dma_lch_out);
523 490
524 /* start DMA */ 491 /* start DMA */
525 dd->pdata->trigger(dd, length); 492 dd->pdata->trigger(dd, dd->total);
526 493
527 return 0; 494 return 0;
528} 495}
@@ -531,93 +498,32 @@ static int omap_aes_crypt_dma_start(struct omap_aes_dev *dd)
531{ 498{
532 struct crypto_tfm *tfm = crypto_ablkcipher_tfm( 499 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(
533 crypto_ablkcipher_reqtfm(dd->req)); 500 crypto_ablkcipher_reqtfm(dd->req));
534 int err, fast = 0, in, out; 501 int err;
535 size_t count;
536 dma_addr_t addr_in, addr_out;
537 struct scatterlist *in_sg, *out_sg;
538 int len32;
539 502
540 pr_debug("total: %d\n", dd->total); 503 pr_debug("total: %d\n", dd->total);
541 504
542 if (sg_is_last(dd->in_sg) && sg_is_last(dd->out_sg)) { 505 if (!dd->pio_only) {
543 /* check for alignment */ 506 err = dma_map_sg(dd->dev, dd->in_sg, dd->in_sg_len,
544 in = IS_ALIGNED((u32)dd->in_sg->offset, sizeof(u32)); 507 DMA_TO_DEVICE);
545 out = IS_ALIGNED((u32)dd->out_sg->offset, sizeof(u32));
546
547 fast = in && out;
548 }
549
550 if (fast) {
551 count = min(dd->total, sg_dma_len(dd->in_sg));
552 count = min(count, sg_dma_len(dd->out_sg));
553
554 if (count != dd->total) {
555 pr_err("request length != buffer length\n");
556 return -EINVAL;
557 }
558
559 pr_debug("fast\n");
560
561 err = dma_map_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
562 if (!err) { 508 if (!err) {
563 dev_err(dd->dev, "dma_map_sg() error\n"); 509 dev_err(dd->dev, "dma_map_sg() error\n");
564 return -EINVAL; 510 return -EINVAL;
565 } 511 }
566 512
567 err = dma_map_sg(dd->dev, dd->out_sg, 1, DMA_FROM_DEVICE); 513 err = dma_map_sg(dd->dev, dd->out_sg, dd->out_sg_len,
514 DMA_FROM_DEVICE);
568 if (!err) { 515 if (!err) {
569 dev_err(dd->dev, "dma_map_sg() error\n"); 516 dev_err(dd->dev, "dma_map_sg() error\n");
570 dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
571 return -EINVAL; 517 return -EINVAL;
572 } 518 }
573
574 addr_in = sg_dma_address(dd->in_sg);
575 addr_out = sg_dma_address(dd->out_sg);
576
577 in_sg = dd->in_sg;
578 out_sg = dd->out_sg;
579
580 dd->flags |= FLAGS_FAST;
581
582 } else {
583 /* use cache buffers */
584 count = sg_copy(&dd->in_sg, &dd->in_offset, dd->buf_in,
585 dd->buflen, dd->total, 0);
586
587 len32 = DIV_ROUND_UP(count, DMA_MIN) * DMA_MIN;
588
589 /*
590 * The data going into the AES module has been copied
591 * to a local buffer and the data coming out will go
592 * into a local buffer so set up local SG entries for
593 * both.
594 */
595 sg_init_table(&dd->in_sgl, 1);
596 dd->in_sgl.offset = dd->in_offset;
597 sg_dma_len(&dd->in_sgl) = len32;
598 sg_dma_address(&dd->in_sgl) = dd->dma_addr_in;
599
600 sg_init_table(&dd->out_sgl, 1);
601 dd->out_sgl.offset = dd->out_offset;
602 sg_dma_len(&dd->out_sgl) = len32;
603 sg_dma_address(&dd->out_sgl) = dd->dma_addr_out;
604
605 in_sg = &dd->in_sgl;
606 out_sg = &dd->out_sgl;
607
608 addr_in = dd->dma_addr_in;
609 addr_out = dd->dma_addr_out;
610
611 dd->flags &= ~FLAGS_FAST;
612
613 } 519 }
614 520
615 dd->total -= count; 521 err = omap_aes_crypt_dma(tfm, dd->in_sg, dd->out_sg, dd->in_sg_len,
616 522 dd->out_sg_len);
617 err = omap_aes_crypt_dma(tfm, in_sg, out_sg); 523 if (err && !dd->pio_only) {
618 if (err) { 524 dma_unmap_sg(dd->dev, dd->in_sg, dd->in_sg_len, DMA_TO_DEVICE);
619 dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE); 525 dma_unmap_sg(dd->dev, dd->out_sg, dd->out_sg_len,
620 dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_TO_DEVICE); 526 DMA_FROM_DEVICE);
621 } 527 }
622 528
623 return err; 529 return err;
@@ -637,7 +543,6 @@ static void omap_aes_finish_req(struct omap_aes_dev *dd, int err)
637static int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd) 543static int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd)
638{ 544{
639 int err = 0; 545 int err = 0;
640 size_t count;
641 546
642 pr_debug("total: %d\n", dd->total); 547 pr_debug("total: %d\n", dd->total);
643 548
@@ -646,23 +551,49 @@ static int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd)
646 dmaengine_terminate_all(dd->dma_lch_in); 551 dmaengine_terminate_all(dd->dma_lch_in);
647 dmaengine_terminate_all(dd->dma_lch_out); 552 dmaengine_terminate_all(dd->dma_lch_out);
648 553
649 if (dd->flags & FLAGS_FAST) { 554 return err;
650 dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_FROM_DEVICE); 555}
651 dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE); 556
652 } else { 557int omap_aes_check_aligned(struct scatterlist *sg)
653 dma_sync_single_for_device(dd->dev, dd->dma_addr_out, 558{
654 dd->dma_size, DMA_FROM_DEVICE); 559 while (sg) {
655 560 if (!IS_ALIGNED(sg->offset, 4))
656 /* copy data */ 561 return -1;
657 count = sg_copy(&dd->out_sg, &dd->out_offset, dd->buf_out, 562 if (!IS_ALIGNED(sg->length, AES_BLOCK_SIZE))
658 dd->buflen, dd->dma_size, 1); 563 return -1;
659 if (count != dd->dma_size) { 564 sg = sg_next(sg);
660 err = -EINVAL;
661 pr_err("not all data converted: %u\n", count);
662 }
663 } 565 }
566 return 0;
567}
664 568
665 return err; 569int omap_aes_copy_sgs(struct omap_aes_dev *dd)
570{
571 void *buf_in, *buf_out;
572 int pages;
573
574 pages = get_order(dd->total);
575
576 buf_in = (void *)__get_free_pages(GFP_ATOMIC, pages);
577 buf_out = (void *)__get_free_pages(GFP_ATOMIC, pages);
578
579 if (!buf_in || !buf_out) {
580 pr_err("Couldn't allocated pages for unaligned cases.\n");
581 return -1;
582 }
583
584 dd->orig_out = dd->out_sg;
585
586 sg_copy_buf(buf_in, dd->in_sg, 0, dd->total, 0);
587
588 sg_init_table(&dd->in_sgl, 1);
589 sg_set_buf(&dd->in_sgl, buf_in, dd->total);
590 dd->in_sg = &dd->in_sgl;
591
592 sg_init_table(&dd->out_sgl, 1);
593 sg_set_buf(&dd->out_sgl, buf_out, dd->total);
594 dd->out_sg = &dd->out_sgl;
595
596 return 0;
666} 597}
667 598
668static int omap_aes_handle_queue(struct omap_aes_dev *dd, 599static int omap_aes_handle_queue(struct omap_aes_dev *dd,
@@ -698,11 +629,23 @@ static int omap_aes_handle_queue(struct omap_aes_dev *dd,
698 /* assign new request to device */ 629 /* assign new request to device */
699 dd->req = req; 630 dd->req = req;
700 dd->total = req->nbytes; 631 dd->total = req->nbytes;
701 dd->in_offset = 0; 632 dd->total_save = req->nbytes;
702 dd->in_sg = req->src; 633 dd->in_sg = req->src;
703 dd->out_offset = 0;
704 dd->out_sg = req->dst; 634 dd->out_sg = req->dst;
705 635
636 if (omap_aes_check_aligned(dd->in_sg) ||
637 omap_aes_check_aligned(dd->out_sg)) {
638 if (omap_aes_copy_sgs(dd))
639 pr_err("Failed to copy SGs for unaligned cases\n");
640 dd->sgs_copied = 1;
641 } else {
642 dd->sgs_copied = 0;
643 }
644
645 dd->in_sg_len = scatterwalk_bytes_sglen(dd->in_sg, dd->total);
646 dd->out_sg_len = scatterwalk_bytes_sglen(dd->out_sg, dd->total);
647 BUG_ON(dd->in_sg_len < 0 || dd->out_sg_len < 0);
648
706 rctx = ablkcipher_request_ctx(req); 649 rctx = ablkcipher_request_ctx(req);
707 ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req)); 650 ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req));
708 rctx->mode &= FLAGS_MODE_MASK; 651 rctx->mode &= FLAGS_MODE_MASK;
@@ -726,21 +669,32 @@ static int omap_aes_handle_queue(struct omap_aes_dev *dd,
726static void omap_aes_done_task(unsigned long data) 669static void omap_aes_done_task(unsigned long data)
727{ 670{
728 struct omap_aes_dev *dd = (struct omap_aes_dev *)data; 671 struct omap_aes_dev *dd = (struct omap_aes_dev *)data;
729 int err; 672 void *buf_in, *buf_out;
730 673 int pages;
731 pr_debug("enter\n"); 674
675 pr_debug("enter done_task\n");
676
677 if (!dd->pio_only) {
678 dma_sync_sg_for_device(dd->dev, dd->out_sg, dd->out_sg_len,
679 DMA_FROM_DEVICE);
680 dma_unmap_sg(dd->dev, dd->in_sg, dd->in_sg_len, DMA_TO_DEVICE);
681 dma_unmap_sg(dd->dev, dd->out_sg, dd->out_sg_len,
682 DMA_FROM_DEVICE);
683 omap_aes_crypt_dma_stop(dd);
684 }
732 685
733 err = omap_aes_crypt_dma_stop(dd); 686 if (dd->sgs_copied) {
687 buf_in = sg_virt(&dd->in_sgl);
688 buf_out = sg_virt(&dd->out_sgl);
734 689
735 err = dd->err ? : err; 690 sg_copy_buf(buf_out, dd->orig_out, 0, dd->total_save, 1);
736 691
737 if (dd->total && !err) { 692 pages = get_order(dd->total_save);
738 err = omap_aes_crypt_dma_start(dd); 693 free_pages((unsigned long)buf_in, pages);
739 if (!err) 694 free_pages((unsigned long)buf_out, pages);
740 return; /* DMA started. Not fininishing. */
741 } 695 }
742 696
743 omap_aes_finish_req(dd, err); 697 omap_aes_finish_req(dd, 0);
744 omap_aes_handle_queue(dd, NULL); 698 omap_aes_handle_queue(dd, NULL);
745 699
746 pr_debug("exit\n"); 700 pr_debug("exit\n");
@@ -1002,6 +956,8 @@ static const struct omap_aes_pdata omap_aes_pdata_omap4 = {
1002 .data_ofs = 0x60, 956 .data_ofs = 0x60,
1003 .rev_ofs = 0x80, 957 .rev_ofs = 0x80,
1004 .mask_ofs = 0x84, 958 .mask_ofs = 0x84,
959 .irq_status_ofs = 0x8c,
960 .irq_enable_ofs = 0x90,
1005 .dma_enable_in = BIT(5), 961 .dma_enable_in = BIT(5),
1006 .dma_enable_out = BIT(6), 962 .dma_enable_out = BIT(6),
1007 .major_mask = 0x0700, 963 .major_mask = 0x0700,
@@ -1010,6 +966,90 @@ static const struct omap_aes_pdata omap_aes_pdata_omap4 = {
1010 .minor_shift = 0, 966 .minor_shift = 0,
1011}; 967};
1012 968
969static irqreturn_t omap_aes_irq(int irq, void *dev_id)
970{
971 struct omap_aes_dev *dd = dev_id;
972 u32 status, i;
973 u32 *src, *dst;
974
975 status = omap_aes_read(dd, AES_REG_IRQ_STATUS(dd));
976 if (status & AES_REG_IRQ_DATA_IN) {
977 omap_aes_write(dd, AES_REG_IRQ_ENABLE(dd), 0x0);
978
979 BUG_ON(!dd->in_sg);
980
981 BUG_ON(_calc_walked(in) > dd->in_sg->length);
982
983 src = sg_virt(dd->in_sg) + _calc_walked(in);
984
985 for (i = 0; i < AES_BLOCK_WORDS; i++) {
986 omap_aes_write(dd, AES_REG_DATA_N(dd, i), *src);
987
988 scatterwalk_advance(&dd->in_walk, 4);
989 if (dd->in_sg->length == _calc_walked(in)) {
990 dd->in_sg = scatterwalk_sg_next(dd->in_sg);
991 if (dd->in_sg) {
992 scatterwalk_start(&dd->in_walk,
993 dd->in_sg);
994 src = sg_virt(dd->in_sg) +
995 _calc_walked(in);
996 }
997 } else {
998 src++;
999 }
1000 }
1001
1002 /* Clear IRQ status */
1003 status &= ~AES_REG_IRQ_DATA_IN;
1004 omap_aes_write(dd, AES_REG_IRQ_STATUS(dd), status);
1005
1006 /* Enable DATA_OUT interrupt */
1007 omap_aes_write(dd, AES_REG_IRQ_ENABLE(dd), 0x4);
1008
1009 } else if (status & AES_REG_IRQ_DATA_OUT) {
1010 omap_aes_write(dd, AES_REG_IRQ_ENABLE(dd), 0x0);
1011
1012 BUG_ON(!dd->out_sg);
1013
1014 BUG_ON(_calc_walked(out) > dd->out_sg->length);
1015
1016 dst = sg_virt(dd->out_sg) + _calc_walked(out);
1017
1018 for (i = 0; i < AES_BLOCK_WORDS; i++) {
1019 *dst = omap_aes_read(dd, AES_REG_DATA_N(dd, i));
1020 scatterwalk_advance(&dd->out_walk, 4);
1021 if (dd->out_sg->length == _calc_walked(out)) {
1022 dd->out_sg = scatterwalk_sg_next(dd->out_sg);
1023 if (dd->out_sg) {
1024 scatterwalk_start(&dd->out_walk,
1025 dd->out_sg);
1026 dst = sg_virt(dd->out_sg) +
1027 _calc_walked(out);
1028 }
1029 } else {
1030 dst++;
1031 }
1032 }
1033
1034 dd->total -= AES_BLOCK_SIZE;
1035
1036 BUG_ON(dd->total < 0);
1037
1038 /* Clear IRQ status */
1039 status &= ~AES_REG_IRQ_DATA_OUT;
1040 omap_aes_write(dd, AES_REG_IRQ_STATUS(dd), status);
1041
1042 if (!dd->total)
1043 /* All bytes read! */
1044 tasklet_schedule(&dd->done_task);
1045 else
1046 /* Enable DATA_IN interrupt for next block */
1047 omap_aes_write(dd, AES_REG_IRQ_ENABLE(dd), 0x2);
1048 }
1049
1050 return IRQ_HANDLED;
1051}
1052
1013static const struct of_device_id omap_aes_of_match[] = { 1053static const struct of_device_id omap_aes_of_match[] = {
1014 { 1054 {
1015 .compatible = "ti,omap2-aes", 1055 .compatible = "ti,omap2-aes",
@@ -1115,10 +1155,10 @@ static int omap_aes_probe(struct platform_device *pdev)
1115 struct omap_aes_dev *dd; 1155 struct omap_aes_dev *dd;
1116 struct crypto_alg *algp; 1156 struct crypto_alg *algp;
1117 struct resource res; 1157 struct resource res;
1118 int err = -ENOMEM, i, j; 1158 int err = -ENOMEM, i, j, irq = -1;
1119 u32 reg; 1159 u32 reg;
1120 1160
1121 dd = kzalloc(sizeof(struct omap_aes_dev), GFP_KERNEL); 1161 dd = devm_kzalloc(dev, sizeof(struct omap_aes_dev), GFP_KERNEL);
1122 if (dd == NULL) { 1162 if (dd == NULL) {
1123 dev_err(dev, "unable to alloc data struct.\n"); 1163 dev_err(dev, "unable to alloc data struct.\n");
1124 goto err_data; 1164 goto err_data;
@@ -1158,8 +1198,23 @@ static int omap_aes_probe(struct platform_device *pdev)
1158 tasklet_init(&dd->queue_task, omap_aes_queue_task, (unsigned long)dd); 1198 tasklet_init(&dd->queue_task, omap_aes_queue_task, (unsigned long)dd);
1159 1199
1160 err = omap_aes_dma_init(dd); 1200 err = omap_aes_dma_init(dd);
1161 if (err) 1201 if (err && AES_REG_IRQ_STATUS(dd) && AES_REG_IRQ_ENABLE(dd)) {
1162 goto err_dma; 1202 dd->pio_only = 1;
1203
1204 irq = platform_get_irq(pdev, 0);
1205 if (irq < 0) {
1206 dev_err(dev, "can't get IRQ resource\n");
1207 goto err_irq;
1208 }
1209
1210 err = devm_request_irq(dev, irq, omap_aes_irq, 0,
1211 dev_name(dev), dd);
1212 if (err) {
1213 dev_err(dev, "Unable to grab omap-aes IRQ\n");
1214 goto err_irq;
1215 }
1216 }
1217
1163 1218
1164 INIT_LIST_HEAD(&dd->list); 1219 INIT_LIST_HEAD(&dd->list);
1165 spin_lock(&list_lock); 1220 spin_lock(&list_lock);
@@ -1187,13 +1242,13 @@ err_algs:
1187 for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--) 1242 for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--)
1188 crypto_unregister_alg( 1243 crypto_unregister_alg(
1189 &dd->pdata->algs_info[i].algs_list[j]); 1244 &dd->pdata->algs_info[i].algs_list[j]);
1190 omap_aes_dma_cleanup(dd); 1245 if (!dd->pio_only)
1191err_dma: 1246 omap_aes_dma_cleanup(dd);
1247err_irq:
1192 tasklet_kill(&dd->done_task); 1248 tasklet_kill(&dd->done_task);
1193 tasklet_kill(&dd->queue_task); 1249 tasklet_kill(&dd->queue_task);
1194 pm_runtime_disable(dev); 1250 pm_runtime_disable(dev);
1195err_res: 1251err_res:
1196 kfree(dd);
1197 dd = NULL; 1252 dd = NULL;
1198err_data: 1253err_data:
1199 dev_err(dev, "initialization failed.\n"); 1254 dev_err(dev, "initialization failed.\n");
@@ -1221,7 +1276,6 @@ static int omap_aes_remove(struct platform_device *pdev)
1221 tasklet_kill(&dd->queue_task); 1276 tasklet_kill(&dd->queue_task);
1222 omap_aes_dma_cleanup(dd); 1277 omap_aes_dma_cleanup(dd);
1223 pm_runtime_disable(dd->dev); 1278 pm_runtime_disable(dd->dev);
1224 kfree(dd);
1225 dd = NULL; 1279 dd = NULL;
1226 1280
1227 return 0; 1281 return 0;
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c
index 4bb67652c200..8bdde57f6bb1 100644
--- a/drivers/crypto/omap-sham.c
+++ b/drivers/crypto/omap-sham.c
@@ -44,17 +44,13 @@
44#include <crypto/hash.h> 44#include <crypto/hash.h>
45#include <crypto/internal/hash.h> 45#include <crypto/internal/hash.h>
46 46
47#define SHA1_MD5_BLOCK_SIZE SHA1_BLOCK_SIZE
48#define MD5_DIGEST_SIZE 16 47#define MD5_DIGEST_SIZE 16
49 48
50#define DST_MAXBURST 16
51#define DMA_MIN (DST_MAXBURST * sizeof(u32))
52
53#define SHA_REG_IDIGEST(dd, x) ((dd)->pdata->idigest_ofs + ((x)*0x04)) 49#define SHA_REG_IDIGEST(dd, x) ((dd)->pdata->idigest_ofs + ((x)*0x04))
54#define SHA_REG_DIN(dd, x) ((dd)->pdata->din_ofs + ((x) * 0x04)) 50#define SHA_REG_DIN(dd, x) ((dd)->pdata->din_ofs + ((x) * 0x04))
55#define SHA_REG_DIGCNT(dd) ((dd)->pdata->digcnt_ofs) 51#define SHA_REG_DIGCNT(dd) ((dd)->pdata->digcnt_ofs)
56 52
57#define SHA_REG_ODIGEST(x) (0x00 + ((x) * 0x04)) 53#define SHA_REG_ODIGEST(dd, x) ((dd)->pdata->odigest_ofs + (x * 0x04))
58 54
59#define SHA_REG_CTRL 0x18 55#define SHA_REG_CTRL 0x18
60#define SHA_REG_CTRL_LENGTH (0xFFFFFFFF << 5) 56#define SHA_REG_CTRL_LENGTH (0xFFFFFFFF << 5)
@@ -75,18 +71,21 @@
75#define SHA_REG_SYSSTATUS(dd) ((dd)->pdata->sysstatus_ofs) 71#define SHA_REG_SYSSTATUS(dd) ((dd)->pdata->sysstatus_ofs)
76#define SHA_REG_SYSSTATUS_RESETDONE (1 << 0) 72#define SHA_REG_SYSSTATUS_RESETDONE (1 << 0)
77 73
78#define SHA_REG_MODE 0x44 74#define SHA_REG_MODE(dd) ((dd)->pdata->mode_ofs)
79#define SHA_REG_MODE_HMAC_OUTER_HASH (1 << 7) 75#define SHA_REG_MODE_HMAC_OUTER_HASH (1 << 7)
80#define SHA_REG_MODE_HMAC_KEY_PROC (1 << 5) 76#define SHA_REG_MODE_HMAC_KEY_PROC (1 << 5)
81#define SHA_REG_MODE_CLOSE_HASH (1 << 4) 77#define SHA_REG_MODE_CLOSE_HASH (1 << 4)
82#define SHA_REG_MODE_ALGO_CONSTANT (1 << 3) 78#define SHA_REG_MODE_ALGO_CONSTANT (1 << 3)
83#define SHA_REG_MODE_ALGO_MASK (3 << 1)
84#define SHA_REG_MODE_ALGO_MD5_128 (0 << 1)
85#define SHA_REG_MODE_ALGO_SHA1_160 (1 << 1)
86#define SHA_REG_MODE_ALGO_SHA2_224 (2 << 1)
87#define SHA_REG_MODE_ALGO_SHA2_256 (3 << 1)
88 79
89#define SHA_REG_LENGTH 0x48 80#define SHA_REG_MODE_ALGO_MASK (7 << 0)
81#define SHA_REG_MODE_ALGO_MD5_128 (0 << 1)
82#define SHA_REG_MODE_ALGO_SHA1_160 (1 << 1)
83#define SHA_REG_MODE_ALGO_SHA2_224 (2 << 1)
84#define SHA_REG_MODE_ALGO_SHA2_256 (3 << 1)
85#define SHA_REG_MODE_ALGO_SHA2_384 (1 << 0)
86#define SHA_REG_MODE_ALGO_SHA2_512 (3 << 0)
87
88#define SHA_REG_LENGTH(dd) ((dd)->pdata->length_ofs)
90 89
91#define SHA_REG_IRQSTATUS 0x118 90#define SHA_REG_IRQSTATUS 0x118
92#define SHA_REG_IRQSTATUS_CTX_RDY (1 << 3) 91#define SHA_REG_IRQSTATUS_CTX_RDY (1 << 3)
@@ -117,18 +116,16 @@
117#define FLAGS_SG 17 116#define FLAGS_SG 17
118 117
119#define FLAGS_MODE_SHIFT 18 118#define FLAGS_MODE_SHIFT 18
120#define FLAGS_MODE_MASK (SHA_REG_MODE_ALGO_MASK \ 119#define FLAGS_MODE_MASK (SHA_REG_MODE_ALGO_MASK << FLAGS_MODE_SHIFT)
121 << (FLAGS_MODE_SHIFT - 1)) 120#define FLAGS_MODE_MD5 (SHA_REG_MODE_ALGO_MD5_128 << FLAGS_MODE_SHIFT)
122#define FLAGS_MODE_MD5 (SHA_REG_MODE_ALGO_MD5_128 \ 121#define FLAGS_MODE_SHA1 (SHA_REG_MODE_ALGO_SHA1_160 << FLAGS_MODE_SHIFT)
123 << (FLAGS_MODE_SHIFT - 1)) 122#define FLAGS_MODE_SHA224 (SHA_REG_MODE_ALGO_SHA2_224 << FLAGS_MODE_SHIFT)
124#define FLAGS_MODE_SHA1 (SHA_REG_MODE_ALGO_SHA1_160 \ 123#define FLAGS_MODE_SHA256 (SHA_REG_MODE_ALGO_SHA2_256 << FLAGS_MODE_SHIFT)
125 << (FLAGS_MODE_SHIFT - 1)) 124#define FLAGS_MODE_SHA384 (SHA_REG_MODE_ALGO_SHA2_384 << FLAGS_MODE_SHIFT)
126#define FLAGS_MODE_SHA224 (SHA_REG_MODE_ALGO_SHA2_224 \ 125#define FLAGS_MODE_SHA512 (SHA_REG_MODE_ALGO_SHA2_512 << FLAGS_MODE_SHIFT)
127 << (FLAGS_MODE_SHIFT - 1)) 126
128#define FLAGS_MODE_SHA256 (SHA_REG_MODE_ALGO_SHA2_256 \ 127#define FLAGS_HMAC 21
129 << (FLAGS_MODE_SHIFT - 1)) 128#define FLAGS_ERROR 22
130#define FLAGS_HMAC 20
131#define FLAGS_ERROR 21
132 129
133#define OP_UPDATE 1 130#define OP_UPDATE 1
134#define OP_FINAL 2 131#define OP_FINAL 2
@@ -145,7 +142,7 @@ struct omap_sham_reqctx {
145 unsigned long flags; 142 unsigned long flags;
146 unsigned long op; 143 unsigned long op;
147 144
148 u8 digest[SHA256_DIGEST_SIZE] OMAP_ALIGNED; 145 u8 digest[SHA512_DIGEST_SIZE] OMAP_ALIGNED;
149 size_t digcnt; 146 size_t digcnt;
150 size_t bufcnt; 147 size_t bufcnt;
151 size_t buflen; 148 size_t buflen;
@@ -162,8 +159,8 @@ struct omap_sham_reqctx {
162 159
163struct omap_sham_hmac_ctx { 160struct omap_sham_hmac_ctx {
164 struct crypto_shash *shash; 161 struct crypto_shash *shash;
165 u8 ipad[SHA1_MD5_BLOCK_SIZE] OMAP_ALIGNED; 162 u8 ipad[SHA512_BLOCK_SIZE] OMAP_ALIGNED;
166 u8 opad[SHA1_MD5_BLOCK_SIZE] OMAP_ALIGNED; 163 u8 opad[SHA512_BLOCK_SIZE] OMAP_ALIGNED;
167}; 164};
168 165
169struct omap_sham_ctx { 166struct omap_sham_ctx {
@@ -205,6 +202,8 @@ struct omap_sham_pdata {
205 u32 rev_ofs; 202 u32 rev_ofs;
206 u32 mask_ofs; 203 u32 mask_ofs;
207 u32 sysstatus_ofs; 204 u32 sysstatus_ofs;
205 u32 mode_ofs;
206 u32 length_ofs;
208 207
209 u32 major_mask; 208 u32 major_mask;
210 u32 major_shift; 209 u32 major_shift;
@@ -223,6 +222,7 @@ struct omap_sham_dev {
223 unsigned int dma; 222 unsigned int dma;
224 struct dma_chan *dma_lch; 223 struct dma_chan *dma_lch;
225 struct tasklet_struct done_task; 224 struct tasklet_struct done_task;
225 u8 polling_mode;
226 226
227 unsigned long flags; 227 unsigned long flags;
228 struct crypto_queue queue; 228 struct crypto_queue queue;
@@ -306,9 +306,9 @@ static void omap_sham_copy_hash_omap4(struct ahash_request *req, int out)
306 for (i = 0; i < dd->pdata->digest_size / sizeof(u32); i++) { 306 for (i = 0; i < dd->pdata->digest_size / sizeof(u32); i++) {
307 if (out) 307 if (out)
308 opad[i] = omap_sham_read(dd, 308 opad[i] = omap_sham_read(dd,
309 SHA_REG_ODIGEST(i)); 309 SHA_REG_ODIGEST(dd, i));
310 else 310 else
311 omap_sham_write(dd, SHA_REG_ODIGEST(i), 311 omap_sham_write(dd, SHA_REG_ODIGEST(dd, i),
312 opad[i]); 312 opad[i]);
313 } 313 }
314 } 314 }
@@ -342,6 +342,12 @@ static void omap_sham_copy_ready_hash(struct ahash_request *req)
342 case FLAGS_MODE_SHA256: 342 case FLAGS_MODE_SHA256:
343 d = SHA256_DIGEST_SIZE / sizeof(u32); 343 d = SHA256_DIGEST_SIZE / sizeof(u32);
344 break; 344 break;
345 case FLAGS_MODE_SHA384:
346 d = SHA384_DIGEST_SIZE / sizeof(u32);
347 break;
348 case FLAGS_MODE_SHA512:
349 d = SHA512_DIGEST_SIZE / sizeof(u32);
350 break;
345 default: 351 default:
346 d = 0; 352 d = 0;
347 } 353 }
@@ -404,6 +410,30 @@ static int omap_sham_poll_irq_omap2(struct omap_sham_dev *dd)
404 return omap_sham_wait(dd, SHA_REG_CTRL, SHA_REG_CTRL_INPUT_READY); 410 return omap_sham_wait(dd, SHA_REG_CTRL, SHA_REG_CTRL_INPUT_READY);
405} 411}
406 412
413static int get_block_size(struct omap_sham_reqctx *ctx)
414{
415 int d;
416
417 switch (ctx->flags & FLAGS_MODE_MASK) {
418 case FLAGS_MODE_MD5:
419 case FLAGS_MODE_SHA1:
420 d = SHA1_BLOCK_SIZE;
421 break;
422 case FLAGS_MODE_SHA224:
423 case FLAGS_MODE_SHA256:
424 d = SHA256_BLOCK_SIZE;
425 break;
426 case FLAGS_MODE_SHA384:
427 case FLAGS_MODE_SHA512:
428 d = SHA512_BLOCK_SIZE;
429 break;
430 default:
431 d = 0;
432 }
433
434 return d;
435}
436
407static void omap_sham_write_n(struct omap_sham_dev *dd, u32 offset, 437static void omap_sham_write_n(struct omap_sham_dev *dd, u32 offset,
408 u32 *value, int count) 438 u32 *value, int count)
409{ 439{
@@ -422,20 +452,24 @@ static void omap_sham_write_ctrl_omap4(struct omap_sham_dev *dd, size_t length,
422 * CLOSE_HASH only for the last one. Note that flags mode bits 452 * CLOSE_HASH only for the last one. Note that flags mode bits
423 * correspond to algorithm encoding in mode register. 453 * correspond to algorithm encoding in mode register.
424 */ 454 */
425 val = (ctx->flags & FLAGS_MODE_MASK) >> (FLAGS_MODE_SHIFT - 1); 455 val = (ctx->flags & FLAGS_MODE_MASK) >> (FLAGS_MODE_SHIFT);
426 if (!ctx->digcnt) { 456 if (!ctx->digcnt) {
427 struct crypto_ahash *tfm = crypto_ahash_reqtfm(dd->req); 457 struct crypto_ahash *tfm = crypto_ahash_reqtfm(dd->req);
428 struct omap_sham_ctx *tctx = crypto_ahash_ctx(tfm); 458 struct omap_sham_ctx *tctx = crypto_ahash_ctx(tfm);
429 struct omap_sham_hmac_ctx *bctx = tctx->base; 459 struct omap_sham_hmac_ctx *bctx = tctx->base;
460 int bs, nr_dr;
430 461
431 val |= SHA_REG_MODE_ALGO_CONSTANT; 462 val |= SHA_REG_MODE_ALGO_CONSTANT;
432 463
433 if (ctx->flags & BIT(FLAGS_HMAC)) { 464 if (ctx->flags & BIT(FLAGS_HMAC)) {
465 bs = get_block_size(ctx);
466 nr_dr = bs / (2 * sizeof(u32));
434 val |= SHA_REG_MODE_HMAC_KEY_PROC; 467 val |= SHA_REG_MODE_HMAC_KEY_PROC;
435 omap_sham_write_n(dd, SHA_REG_ODIGEST(0), 468 omap_sham_write_n(dd, SHA_REG_ODIGEST(dd, 0),
436 (u32 *)bctx->ipad, 469 (u32 *)bctx->ipad, nr_dr);
437 SHA1_BLOCK_SIZE / sizeof(u32)); 470 omap_sham_write_n(dd, SHA_REG_IDIGEST(dd, 0),
438 ctx->digcnt += SHA1_BLOCK_SIZE; 471 (u32 *)bctx->ipad + nr_dr, nr_dr);
472 ctx->digcnt += bs;
439 } 473 }
440 } 474 }
441 475
@@ -451,7 +485,7 @@ static void omap_sham_write_ctrl_omap4(struct omap_sham_dev *dd, size_t length,
451 SHA_REG_MODE_HMAC_KEY_PROC; 485 SHA_REG_MODE_HMAC_KEY_PROC;
452 486
453 dev_dbg(dd->dev, "ctrl: %08x, flags: %08lx\n", val, ctx->flags); 487 dev_dbg(dd->dev, "ctrl: %08x, flags: %08lx\n", val, ctx->flags);
454 omap_sham_write_mask(dd, SHA_REG_MODE, val, mask); 488 omap_sham_write_mask(dd, SHA_REG_MODE(dd), val, mask);
455 omap_sham_write(dd, SHA_REG_IRQENA, SHA_REG_IRQENA_OUTPUT_RDY); 489 omap_sham_write(dd, SHA_REG_IRQENA, SHA_REG_IRQENA_OUTPUT_RDY);
456 omap_sham_write_mask(dd, SHA_REG_MASK(dd), 490 omap_sham_write_mask(dd, SHA_REG_MASK(dd),
457 SHA_REG_MASK_IT_EN | 491 SHA_REG_MASK_IT_EN |
@@ -461,7 +495,7 @@ static void omap_sham_write_ctrl_omap4(struct omap_sham_dev *dd, size_t length,
461 495
462static void omap_sham_trigger_omap4(struct omap_sham_dev *dd, size_t length) 496static void omap_sham_trigger_omap4(struct omap_sham_dev *dd, size_t length)
463{ 497{
464 omap_sham_write(dd, SHA_REG_LENGTH, length); 498 omap_sham_write(dd, SHA_REG_LENGTH(dd), length);
465} 499}
466 500
467static int omap_sham_poll_irq_omap4(struct omap_sham_dev *dd) 501static int omap_sham_poll_irq_omap4(struct omap_sham_dev *dd)
@@ -474,7 +508,7 @@ static int omap_sham_xmit_cpu(struct omap_sham_dev *dd, const u8 *buf,
474 size_t length, int final) 508 size_t length, int final)
475{ 509{
476 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); 510 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
477 int count, len32; 511 int count, len32, bs32, offset = 0;
478 const u32 *buffer = (const u32 *)buf; 512 const u32 *buffer = (const u32 *)buf;
479 513
480 dev_dbg(dd->dev, "xmit_cpu: digcnt: %d, length: %d, final: %d\n", 514 dev_dbg(dd->dev, "xmit_cpu: digcnt: %d, length: %d, final: %d\n",
@@ -486,18 +520,23 @@ static int omap_sham_xmit_cpu(struct omap_sham_dev *dd, const u8 *buf,
486 /* should be non-zero before next lines to disable clocks later */ 520 /* should be non-zero before next lines to disable clocks later */
487 ctx->digcnt += length; 521 ctx->digcnt += length;
488 522
489 if (dd->pdata->poll_irq(dd))
490 return -ETIMEDOUT;
491
492 if (final) 523 if (final)
493 set_bit(FLAGS_FINAL, &dd->flags); /* catch last interrupt */ 524 set_bit(FLAGS_FINAL, &dd->flags); /* catch last interrupt */
494 525
495 set_bit(FLAGS_CPU, &dd->flags); 526 set_bit(FLAGS_CPU, &dd->flags);
496 527
497 len32 = DIV_ROUND_UP(length, sizeof(u32)); 528 len32 = DIV_ROUND_UP(length, sizeof(u32));
529 bs32 = get_block_size(ctx) / sizeof(u32);
498 530
499 for (count = 0; count < len32; count++) 531 while (len32) {
500 omap_sham_write(dd, SHA_REG_DIN(dd, count), buffer[count]); 532 if (dd->pdata->poll_irq(dd))
533 return -ETIMEDOUT;
534
535 for (count = 0; count < min(len32, bs32); count++, offset++)
536 omap_sham_write(dd, SHA_REG_DIN(dd, count),
537 buffer[offset]);
538 len32 -= min(len32, bs32);
539 }
501 540
502 return -EINPROGRESS; 541 return -EINPROGRESS;
503} 542}
@@ -516,7 +555,7 @@ static int omap_sham_xmit_dma(struct omap_sham_dev *dd, dma_addr_t dma_addr,
516 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); 555 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
517 struct dma_async_tx_descriptor *tx; 556 struct dma_async_tx_descriptor *tx;
518 struct dma_slave_config cfg; 557 struct dma_slave_config cfg;
519 int len32, ret; 558 int len32, ret, dma_min = get_block_size(ctx);
520 559
521 dev_dbg(dd->dev, "xmit_dma: digcnt: %d, length: %d, final: %d\n", 560 dev_dbg(dd->dev, "xmit_dma: digcnt: %d, length: %d, final: %d\n",
522 ctx->digcnt, length, final); 561 ctx->digcnt, length, final);
@@ -525,7 +564,7 @@ static int omap_sham_xmit_dma(struct omap_sham_dev *dd, dma_addr_t dma_addr,
525 564
526 cfg.dst_addr = dd->phys_base + SHA_REG_DIN(dd, 0); 565 cfg.dst_addr = dd->phys_base + SHA_REG_DIN(dd, 0);
527 cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 566 cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
528 cfg.dst_maxburst = DST_MAXBURST; 567 cfg.dst_maxburst = dma_min / DMA_SLAVE_BUSWIDTH_4_BYTES;
529 568
530 ret = dmaengine_slave_config(dd->dma_lch, &cfg); 569 ret = dmaengine_slave_config(dd->dma_lch, &cfg);
531 if (ret) { 570 if (ret) {
@@ -533,7 +572,7 @@ static int omap_sham_xmit_dma(struct omap_sham_dev *dd, dma_addr_t dma_addr,
533 return ret; 572 return ret;
534 } 573 }
535 574
536 len32 = DIV_ROUND_UP(length, DMA_MIN) * DMA_MIN; 575 len32 = DIV_ROUND_UP(length, dma_min) * dma_min;
537 576
538 if (is_sg) { 577 if (is_sg) {
539 /* 578 /*
@@ -666,14 +705,14 @@ static int omap_sham_update_dma_slow(struct omap_sham_dev *dd)
666/* Start address alignment */ 705/* Start address alignment */
667#define SG_AA(sg) (IS_ALIGNED(sg->offset, sizeof(u32))) 706#define SG_AA(sg) (IS_ALIGNED(sg->offset, sizeof(u32)))
668/* SHA1 block size alignment */ 707/* SHA1 block size alignment */
669#define SG_SA(sg) (IS_ALIGNED(sg->length, SHA1_MD5_BLOCK_SIZE)) 708#define SG_SA(sg, bs) (IS_ALIGNED(sg->length, bs))
670 709
671static int omap_sham_update_dma_start(struct omap_sham_dev *dd) 710static int omap_sham_update_dma_start(struct omap_sham_dev *dd)
672{ 711{
673 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); 712 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
674 unsigned int length, final, tail; 713 unsigned int length, final, tail;
675 struct scatterlist *sg; 714 struct scatterlist *sg;
676 int ret; 715 int ret, bs;
677 716
678 if (!ctx->total) 717 if (!ctx->total)
679 return 0; 718 return 0;
@@ -687,30 +726,31 @@ static int omap_sham_update_dma_start(struct omap_sham_dev *dd)
687 * the dmaengine infrastructure will calculate that it needs 726 * the dmaengine infrastructure will calculate that it needs
688 * to transfer 0 frames which ultimately fails. 727 * to transfer 0 frames which ultimately fails.
689 */ 728 */
690 if (ctx->total < (DST_MAXBURST * sizeof(u32))) 729 if (ctx->total < get_block_size(ctx))
691 return omap_sham_update_dma_slow(dd); 730 return omap_sham_update_dma_slow(dd);
692 731
693 dev_dbg(dd->dev, "fast: digcnt: %d, bufcnt: %u, total: %u\n", 732 dev_dbg(dd->dev, "fast: digcnt: %d, bufcnt: %u, total: %u\n",
694 ctx->digcnt, ctx->bufcnt, ctx->total); 733 ctx->digcnt, ctx->bufcnt, ctx->total);
695 734
696 sg = ctx->sg; 735 sg = ctx->sg;
736 bs = get_block_size(ctx);
697 737
698 if (!SG_AA(sg)) 738 if (!SG_AA(sg))
699 return omap_sham_update_dma_slow(dd); 739 return omap_sham_update_dma_slow(dd);
700 740
701 if (!sg_is_last(sg) && !SG_SA(sg)) 741 if (!sg_is_last(sg) && !SG_SA(sg, bs))
702 /* size is not SHA1_BLOCK_SIZE aligned */ 742 /* size is not BLOCK_SIZE aligned */
703 return omap_sham_update_dma_slow(dd); 743 return omap_sham_update_dma_slow(dd);
704 744
705 length = min(ctx->total, sg->length); 745 length = min(ctx->total, sg->length);
706 746
707 if (sg_is_last(sg)) { 747 if (sg_is_last(sg)) {
708 if (!(ctx->flags & BIT(FLAGS_FINUP))) { 748 if (!(ctx->flags & BIT(FLAGS_FINUP))) {
709 /* not last sg must be SHA1_MD5_BLOCK_SIZE aligned */ 749 /* not last sg must be BLOCK_SIZE aligned */
710 tail = length & (SHA1_MD5_BLOCK_SIZE - 1); 750 tail = length & (bs - 1);
711 /* without finup() we need one block to close hash */ 751 /* without finup() we need one block to close hash */
712 if (!tail) 752 if (!tail)
713 tail = SHA1_MD5_BLOCK_SIZE; 753 tail = bs;
714 length -= tail; 754 length -= tail;
715 } 755 }
716 } 756 }
@@ -737,13 +777,22 @@ static int omap_sham_update_dma_start(struct omap_sham_dev *dd)
737static int omap_sham_update_cpu(struct omap_sham_dev *dd) 777static int omap_sham_update_cpu(struct omap_sham_dev *dd)
738{ 778{
739 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); 779 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
740 int bufcnt; 780 int bufcnt, final;
781
782 if (!ctx->total)
783 return 0;
741 784
742 omap_sham_append_sg(ctx); 785 omap_sham_append_sg(ctx);
786
787 final = (ctx->flags & BIT(FLAGS_FINUP)) && !ctx->total;
788
789 dev_dbg(dd->dev, "cpu: bufcnt: %u, digcnt: %d, final: %d\n",
790 ctx->bufcnt, ctx->digcnt, final);
791
743 bufcnt = ctx->bufcnt; 792 bufcnt = ctx->bufcnt;
744 ctx->bufcnt = 0; 793 ctx->bufcnt = 0;
745 794
746 return omap_sham_xmit_cpu(dd, ctx->buffer, bufcnt, 1); 795 return omap_sham_xmit_cpu(dd, ctx->buffer, bufcnt, final);
747} 796}
748 797
749static int omap_sham_update_dma_stop(struct omap_sham_dev *dd) 798static int omap_sham_update_dma_stop(struct omap_sham_dev *dd)
@@ -773,6 +822,7 @@ static int omap_sham_init(struct ahash_request *req)
773 struct omap_sham_ctx *tctx = crypto_ahash_ctx(tfm); 822 struct omap_sham_ctx *tctx = crypto_ahash_ctx(tfm);
774 struct omap_sham_reqctx *ctx = ahash_request_ctx(req); 823 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
775 struct omap_sham_dev *dd = NULL, *tmp; 824 struct omap_sham_dev *dd = NULL, *tmp;
825 int bs = 0;
776 826
777 spin_lock_bh(&sham.lock); 827 spin_lock_bh(&sham.lock);
778 if (!tctx->dd) { 828 if (!tctx->dd) {
@@ -796,15 +846,27 @@ static int omap_sham_init(struct ahash_request *req)
796 switch (crypto_ahash_digestsize(tfm)) { 846 switch (crypto_ahash_digestsize(tfm)) {
797 case MD5_DIGEST_SIZE: 847 case MD5_DIGEST_SIZE:
798 ctx->flags |= FLAGS_MODE_MD5; 848 ctx->flags |= FLAGS_MODE_MD5;
849 bs = SHA1_BLOCK_SIZE;
799 break; 850 break;
800 case SHA1_DIGEST_SIZE: 851 case SHA1_DIGEST_SIZE:
801 ctx->flags |= FLAGS_MODE_SHA1; 852 ctx->flags |= FLAGS_MODE_SHA1;
853 bs = SHA1_BLOCK_SIZE;
802 break; 854 break;
803 case SHA224_DIGEST_SIZE: 855 case SHA224_DIGEST_SIZE:
804 ctx->flags |= FLAGS_MODE_SHA224; 856 ctx->flags |= FLAGS_MODE_SHA224;
857 bs = SHA224_BLOCK_SIZE;
805 break; 858 break;
806 case SHA256_DIGEST_SIZE: 859 case SHA256_DIGEST_SIZE:
807 ctx->flags |= FLAGS_MODE_SHA256; 860 ctx->flags |= FLAGS_MODE_SHA256;
861 bs = SHA256_BLOCK_SIZE;
862 break;
863 case SHA384_DIGEST_SIZE:
864 ctx->flags |= FLAGS_MODE_SHA384;
865 bs = SHA384_BLOCK_SIZE;
866 break;
867 case SHA512_DIGEST_SIZE:
868 ctx->flags |= FLAGS_MODE_SHA512;
869 bs = SHA512_BLOCK_SIZE;
808 break; 870 break;
809 } 871 }
810 872
@@ -816,8 +878,8 @@ static int omap_sham_init(struct ahash_request *req)
816 if (!test_bit(FLAGS_AUTO_XOR, &dd->flags)) { 878 if (!test_bit(FLAGS_AUTO_XOR, &dd->flags)) {
817 struct omap_sham_hmac_ctx *bctx = tctx->base; 879 struct omap_sham_hmac_ctx *bctx = tctx->base;
818 880
819 memcpy(ctx->buffer, bctx->ipad, SHA1_MD5_BLOCK_SIZE); 881 memcpy(ctx->buffer, bctx->ipad, bs);
820 ctx->bufcnt = SHA1_MD5_BLOCK_SIZE; 882 ctx->bufcnt = bs;
821 } 883 }
822 884
823 ctx->flags |= BIT(FLAGS_HMAC); 885 ctx->flags |= BIT(FLAGS_HMAC);
@@ -853,8 +915,11 @@ static int omap_sham_final_req(struct omap_sham_dev *dd)
853 struct omap_sham_reqctx *ctx = ahash_request_ctx(req); 915 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
854 int err = 0, use_dma = 1; 916 int err = 0, use_dma = 1;
855 917
856 if (ctx->bufcnt <= DMA_MIN) 918 if ((ctx->bufcnt <= get_block_size(ctx)) || dd->polling_mode)
857 /* faster to handle last block with cpu */ 919 /*
920 * faster to handle last block with cpu or
921 * use cpu when dma is not present.
922 */
858 use_dma = 0; 923 use_dma = 0;
859 924
860 if (use_dma) 925 if (use_dma)
@@ -1006,6 +1071,8 @@ static int omap_sham_enqueue(struct ahash_request *req, unsigned int op)
1006static int omap_sham_update(struct ahash_request *req) 1071static int omap_sham_update(struct ahash_request *req)
1007{ 1072{
1008 struct omap_sham_reqctx *ctx = ahash_request_ctx(req); 1073 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
1074 struct omap_sham_dev *dd = ctx->dd;
1075 int bs = get_block_size(ctx);
1009 1076
1010 if (!req->nbytes) 1077 if (!req->nbytes)
1011 return 0; 1078 return 0;
@@ -1023,10 +1090,12 @@ static int omap_sham_update(struct ahash_request *req)
1023 */ 1090 */
1024 omap_sham_append_sg(ctx); 1091 omap_sham_append_sg(ctx);
1025 return 0; 1092 return 0;
1026 } else if (ctx->bufcnt + ctx->total <= SHA1_MD5_BLOCK_SIZE) { 1093 } else if ((ctx->bufcnt + ctx->total <= bs) ||
1094 dd->polling_mode) {
1027 /* 1095 /*
1028 * faster to use CPU for short transfers 1096 * faster to use CPU for short transfers or
1029 */ 1097 * use cpu when dma is not present.
1098 */
1030 ctx->flags |= BIT(FLAGS_CPU); 1099 ctx->flags |= BIT(FLAGS_CPU);
1031 } 1100 }
1032 } else if (ctx->bufcnt + ctx->total < ctx->buflen) { 1101 } else if (ctx->bufcnt + ctx->total < ctx->buflen) {
@@ -1214,6 +1283,16 @@ static int omap_sham_cra_md5_init(struct crypto_tfm *tfm)
1214 return omap_sham_cra_init_alg(tfm, "md5"); 1283 return omap_sham_cra_init_alg(tfm, "md5");
1215} 1284}
1216 1285
1286static int omap_sham_cra_sha384_init(struct crypto_tfm *tfm)
1287{
1288 return omap_sham_cra_init_alg(tfm, "sha384");
1289}
1290
1291static int omap_sham_cra_sha512_init(struct crypto_tfm *tfm)
1292{
1293 return omap_sham_cra_init_alg(tfm, "sha512");
1294}
1295
1217static void omap_sham_cra_exit(struct crypto_tfm *tfm) 1296static void omap_sham_cra_exit(struct crypto_tfm *tfm)
1218{ 1297{
1219 struct omap_sham_ctx *tctx = crypto_tfm_ctx(tfm); 1298 struct omap_sham_ctx *tctx = crypto_tfm_ctx(tfm);
@@ -1422,6 +1501,101 @@ static struct ahash_alg algs_sha224_sha256[] = {
1422}, 1501},
1423}; 1502};
1424 1503
1504static struct ahash_alg algs_sha384_sha512[] = {
1505{
1506 .init = omap_sham_init,
1507 .update = omap_sham_update,
1508 .final = omap_sham_final,
1509 .finup = omap_sham_finup,
1510 .digest = omap_sham_digest,
1511 .halg.digestsize = SHA384_DIGEST_SIZE,
1512 .halg.base = {
1513 .cra_name = "sha384",
1514 .cra_driver_name = "omap-sha384",
1515 .cra_priority = 100,
1516 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
1517 CRYPTO_ALG_ASYNC |
1518 CRYPTO_ALG_NEED_FALLBACK,
1519 .cra_blocksize = SHA384_BLOCK_SIZE,
1520 .cra_ctxsize = sizeof(struct omap_sham_ctx),
1521 .cra_alignmask = 0,
1522 .cra_module = THIS_MODULE,
1523 .cra_init = omap_sham_cra_init,
1524 .cra_exit = omap_sham_cra_exit,
1525 }
1526},
1527{
1528 .init = omap_sham_init,
1529 .update = omap_sham_update,
1530 .final = omap_sham_final,
1531 .finup = omap_sham_finup,
1532 .digest = omap_sham_digest,
1533 .halg.digestsize = SHA512_DIGEST_SIZE,
1534 .halg.base = {
1535 .cra_name = "sha512",
1536 .cra_driver_name = "omap-sha512",
1537 .cra_priority = 100,
1538 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
1539 CRYPTO_ALG_ASYNC |
1540 CRYPTO_ALG_NEED_FALLBACK,
1541 .cra_blocksize = SHA512_BLOCK_SIZE,
1542 .cra_ctxsize = sizeof(struct omap_sham_ctx),
1543 .cra_alignmask = 0,
1544 .cra_module = THIS_MODULE,
1545 .cra_init = omap_sham_cra_init,
1546 .cra_exit = omap_sham_cra_exit,
1547 }
1548},
1549{
1550 .init = omap_sham_init,
1551 .update = omap_sham_update,
1552 .final = omap_sham_final,
1553 .finup = omap_sham_finup,
1554 .digest = omap_sham_digest,
1555 .setkey = omap_sham_setkey,
1556 .halg.digestsize = SHA384_DIGEST_SIZE,
1557 .halg.base = {
1558 .cra_name = "hmac(sha384)",
1559 .cra_driver_name = "omap-hmac-sha384",
1560 .cra_priority = 100,
1561 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
1562 CRYPTO_ALG_ASYNC |
1563 CRYPTO_ALG_NEED_FALLBACK,
1564 .cra_blocksize = SHA384_BLOCK_SIZE,
1565 .cra_ctxsize = sizeof(struct omap_sham_ctx) +
1566 sizeof(struct omap_sham_hmac_ctx),
1567 .cra_alignmask = OMAP_ALIGN_MASK,
1568 .cra_module = THIS_MODULE,
1569 .cra_init = omap_sham_cra_sha384_init,
1570 .cra_exit = omap_sham_cra_exit,
1571 }
1572},
1573{
1574 .init = omap_sham_init,
1575 .update = omap_sham_update,
1576 .final = omap_sham_final,
1577 .finup = omap_sham_finup,
1578 .digest = omap_sham_digest,
1579 .setkey = omap_sham_setkey,
1580 .halg.digestsize = SHA512_DIGEST_SIZE,
1581 .halg.base = {
1582 .cra_name = "hmac(sha512)",
1583 .cra_driver_name = "omap-hmac-sha512",
1584 .cra_priority = 100,
1585 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
1586 CRYPTO_ALG_ASYNC |
1587 CRYPTO_ALG_NEED_FALLBACK,
1588 .cra_blocksize = SHA512_BLOCK_SIZE,
1589 .cra_ctxsize = sizeof(struct omap_sham_ctx) +
1590 sizeof(struct omap_sham_hmac_ctx),
1591 .cra_alignmask = OMAP_ALIGN_MASK,
1592 .cra_module = THIS_MODULE,
1593 .cra_init = omap_sham_cra_sha512_init,
1594 .cra_exit = omap_sham_cra_exit,
1595 }
1596},
1597};
1598
1425static void omap_sham_done_task(unsigned long data) 1599static void omap_sham_done_task(unsigned long data)
1426{ 1600{
1427 struct omap_sham_dev *dd = (struct omap_sham_dev *)data; 1601 struct omap_sham_dev *dd = (struct omap_sham_dev *)data;
@@ -1433,8 +1607,12 @@ static void omap_sham_done_task(unsigned long data)
1433 } 1607 }
1434 1608
1435 if (test_bit(FLAGS_CPU, &dd->flags)) { 1609 if (test_bit(FLAGS_CPU, &dd->flags)) {
1436 if (test_and_clear_bit(FLAGS_OUTPUT_READY, &dd->flags)) 1610 if (test_and_clear_bit(FLAGS_OUTPUT_READY, &dd->flags)) {
1437 goto finish; 1611 /* hash or semi-hash ready */
1612 err = omap_sham_update_cpu(dd);
1613 if (err != -EINPROGRESS)
1614 goto finish;
1615 }
1438 } else if (test_bit(FLAGS_DMA_READY, &dd->flags)) { 1616 } else if (test_bit(FLAGS_DMA_READY, &dd->flags)) {
1439 if (test_and_clear_bit(FLAGS_DMA_ACTIVE, &dd->flags)) { 1617 if (test_and_clear_bit(FLAGS_DMA_ACTIVE, &dd->flags)) {
1440 omap_sham_update_dma_stop(dd); 1618 omap_sham_update_dma_stop(dd);
@@ -1548,11 +1726,54 @@ static const struct omap_sham_pdata omap_sham_pdata_omap4 = {
1548 .poll_irq = omap_sham_poll_irq_omap4, 1726 .poll_irq = omap_sham_poll_irq_omap4,
1549 .intr_hdlr = omap_sham_irq_omap4, 1727 .intr_hdlr = omap_sham_irq_omap4,
1550 .idigest_ofs = 0x020, 1728 .idigest_ofs = 0x020,
1729 .odigest_ofs = 0x0,
1551 .din_ofs = 0x080, 1730 .din_ofs = 0x080,
1552 .digcnt_ofs = 0x040, 1731 .digcnt_ofs = 0x040,
1553 .rev_ofs = 0x100, 1732 .rev_ofs = 0x100,
1554 .mask_ofs = 0x110, 1733 .mask_ofs = 0x110,
1555 .sysstatus_ofs = 0x114, 1734 .sysstatus_ofs = 0x114,
1735 .mode_ofs = 0x44,
1736 .length_ofs = 0x48,
1737 .major_mask = 0x0700,
1738 .major_shift = 8,
1739 .minor_mask = 0x003f,
1740 .minor_shift = 0,
1741};
1742
1743static struct omap_sham_algs_info omap_sham_algs_info_omap5[] = {
1744 {
1745 .algs_list = algs_sha1_md5,
1746 .size = ARRAY_SIZE(algs_sha1_md5),
1747 },
1748 {
1749 .algs_list = algs_sha224_sha256,
1750 .size = ARRAY_SIZE(algs_sha224_sha256),
1751 },
1752 {
1753 .algs_list = algs_sha384_sha512,
1754 .size = ARRAY_SIZE(algs_sha384_sha512),
1755 },
1756};
1757
1758static const struct omap_sham_pdata omap_sham_pdata_omap5 = {
1759 .algs_info = omap_sham_algs_info_omap5,
1760 .algs_info_size = ARRAY_SIZE(omap_sham_algs_info_omap5),
1761 .flags = BIT(FLAGS_AUTO_XOR),
1762 .digest_size = SHA512_DIGEST_SIZE,
1763 .copy_hash = omap_sham_copy_hash_omap4,
1764 .write_ctrl = omap_sham_write_ctrl_omap4,
1765 .trigger = omap_sham_trigger_omap4,
1766 .poll_irq = omap_sham_poll_irq_omap4,
1767 .intr_hdlr = omap_sham_irq_omap4,
1768 .idigest_ofs = 0x240,
1769 .odigest_ofs = 0x200,
1770 .din_ofs = 0x080,
1771 .digcnt_ofs = 0x280,
1772 .rev_ofs = 0x100,
1773 .mask_ofs = 0x110,
1774 .sysstatus_ofs = 0x114,
1775 .mode_ofs = 0x284,
1776 .length_ofs = 0x288,
1556 .major_mask = 0x0700, 1777 .major_mask = 0x0700,
1557 .major_shift = 8, 1778 .major_shift = 8,
1558 .minor_mask = 0x003f, 1779 .minor_mask = 0x003f,
@@ -1568,6 +1789,10 @@ static const struct of_device_id omap_sham_of_match[] = {
1568 .compatible = "ti,omap4-sham", 1789 .compatible = "ti,omap4-sham",
1569 .data = &omap_sham_pdata_omap4, 1790 .data = &omap_sham_pdata_omap4,
1570 }, 1791 },
1792 {
1793 .compatible = "ti,omap5-sham",
1794 .data = &omap_sham_pdata_omap5,
1795 },
1571 {}, 1796 {},
1572}; 1797};
1573MODULE_DEVICE_TABLE(of, omap_sham_of_match); 1798MODULE_DEVICE_TABLE(of, omap_sham_of_match);
@@ -1667,7 +1892,7 @@ static int omap_sham_probe(struct platform_device *pdev)
1667 int err, i, j; 1892 int err, i, j;
1668 u32 rev; 1893 u32 rev;
1669 1894
1670 dd = kzalloc(sizeof(struct omap_sham_dev), GFP_KERNEL); 1895 dd = devm_kzalloc(dev, sizeof(struct omap_sham_dev), GFP_KERNEL);
1671 if (dd == NULL) { 1896 if (dd == NULL) {
1672 dev_err(dev, "unable to alloc data struct.\n"); 1897 dev_err(dev, "unable to alloc data struct.\n");
1673 err = -ENOMEM; 1898 err = -ENOMEM;
@@ -1684,20 +1909,21 @@ static int omap_sham_probe(struct platform_device *pdev)
1684 err = (dev->of_node) ? omap_sham_get_res_of(dd, dev, &res) : 1909 err = (dev->of_node) ? omap_sham_get_res_of(dd, dev, &res) :
1685 omap_sham_get_res_pdev(dd, pdev, &res); 1910 omap_sham_get_res_pdev(dd, pdev, &res);
1686 if (err) 1911 if (err)
1687 goto res_err; 1912 goto data_err;
1688 1913
1689 dd->io_base = devm_ioremap_resource(dev, &res); 1914 dd->io_base = devm_ioremap_resource(dev, &res);
1690 if (IS_ERR(dd->io_base)) { 1915 if (IS_ERR(dd->io_base)) {
1691 err = PTR_ERR(dd->io_base); 1916 err = PTR_ERR(dd->io_base);
1692 goto res_err; 1917 goto data_err;
1693 } 1918 }
1694 dd->phys_base = res.start; 1919 dd->phys_base = res.start;
1695 1920
1696 err = request_irq(dd->irq, dd->pdata->intr_hdlr, IRQF_TRIGGER_LOW, 1921 err = devm_request_irq(dev, dd->irq, dd->pdata->intr_hdlr,
1697 dev_name(dev), dd); 1922 IRQF_TRIGGER_NONE, dev_name(dev), dd);
1698 if (err) { 1923 if (err) {
1699 dev_err(dev, "unable to request irq.\n"); 1924 dev_err(dev, "unable to request irq %d, err = %d\n",
1700 goto res_err; 1925 dd->irq, err);
1926 goto data_err;
1701 } 1927 }
1702 1928
1703 dma_cap_zero(mask); 1929 dma_cap_zero(mask);
@@ -1706,10 +1932,8 @@ static int omap_sham_probe(struct platform_device *pdev)
1706 dd->dma_lch = dma_request_slave_channel_compat(mask, omap_dma_filter_fn, 1932 dd->dma_lch = dma_request_slave_channel_compat(mask, omap_dma_filter_fn,
1707 &dd->dma, dev, "rx"); 1933 &dd->dma, dev, "rx");
1708 if (!dd->dma_lch) { 1934 if (!dd->dma_lch) {
1709 dev_err(dev, "unable to obtain RX DMA engine channel %u\n", 1935 dd->polling_mode = 1;
1710 dd->dma); 1936 dev_dbg(dev, "using polling mode instead of dma\n");
1711 err = -ENXIO;
1712 goto dma_err;
1713 } 1937 }
1714 1938
1715 dd->flags |= dd->pdata->flags; 1939 dd->flags |= dd->pdata->flags;
@@ -1747,11 +1971,6 @@ err_algs:
1747 &dd->pdata->algs_info[i].algs_list[j]); 1971 &dd->pdata->algs_info[i].algs_list[j]);
1748 pm_runtime_disable(dev); 1972 pm_runtime_disable(dev);
1749 dma_release_channel(dd->dma_lch); 1973 dma_release_channel(dd->dma_lch);
1750dma_err:
1751 free_irq(dd->irq, dd);
1752res_err:
1753 kfree(dd);
1754 dd = NULL;
1755data_err: 1974data_err:
1756 dev_err(dev, "initialization failed.\n"); 1975 dev_err(dev, "initialization failed.\n");
1757 1976
@@ -1776,9 +1995,6 @@ static int omap_sham_remove(struct platform_device *pdev)
1776 tasklet_kill(&dd->done_task); 1995 tasklet_kill(&dd->done_task);
1777 pm_runtime_disable(&pdev->dev); 1996 pm_runtime_disable(&pdev->dev);
1778 dma_release_channel(dd->dma_lch); 1997 dma_release_channel(dd->dma_lch);
1779 free_irq(dd->irq, dd);
1780 kfree(dd);
1781 dd = NULL;
1782 1998
1783 return 0; 1999 return 0;
1784} 2000}
diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c
index c3dc1c04a5df..d7bb8bac36e9 100644
--- a/drivers/crypto/sahara.c
+++ b/drivers/crypto/sahara.c
@@ -417,7 +417,7 @@ static void sahara_aes_done_task(unsigned long data)
417 dev->req->base.complete(&dev->req->base, dev->error); 417 dev->req->base.complete(&dev->req->base, dev->error);
418} 418}
419 419
420void sahara_watchdog(unsigned long data) 420static void sahara_watchdog(unsigned long data)
421{ 421{
422 struct sahara_dev *dev = (struct sahara_dev *)data; 422 struct sahara_dev *dev = (struct sahara_dev *)data;
423 unsigned int err = sahara_read(dev, SAHARA_REG_ERRSTATUS); 423 unsigned int err = sahara_read(dev, SAHARA_REG_ERRSTATUS);
@@ -955,7 +955,7 @@ static int sahara_probe(struct platform_device *pdev)
955 dev->hw_link[0] = dma_alloc_coherent(&pdev->dev, 955 dev->hw_link[0] = dma_alloc_coherent(&pdev->dev,
956 SAHARA_MAX_HW_LINK * sizeof(struct sahara_hw_link), 956 SAHARA_MAX_HW_LINK * sizeof(struct sahara_hw_link),
957 &dev->hw_phys_link[0], GFP_KERNEL); 957 &dev->hw_phys_link[0], GFP_KERNEL);
958 if (!dev->hw_link) { 958 if (!dev->hw_link[0]) {
959 dev_err(&pdev->dev, "Could not allocate hw links\n"); 959 dev_err(&pdev->dev, "Could not allocate hw links\n");
960 err = -ENOMEM; 960 err = -ENOMEM;
961 goto err_link; 961 goto err_link;
diff --git a/drivers/crypto/tegra-aes.c b/drivers/crypto/tegra-aes.c
index 85ea7525fa36..2d58da972ae2 100644
--- a/drivers/crypto/tegra-aes.c
+++ b/drivers/crypto/tegra-aes.c
@@ -275,7 +275,7 @@ static int aes_start_crypt(struct tegra_aes_dev *dd, u32 in_addr, u32 out_addr,
275 value = aes_readl(dd, TEGRA_AES_INTR_STATUS); 275 value = aes_readl(dd, TEGRA_AES_INTR_STATUS);
276 eng_busy = value & TEGRA_AES_ENGINE_BUSY_FIELD; 276 eng_busy = value & TEGRA_AES_ENGINE_BUSY_FIELD;
277 icq_empty = value & TEGRA_AES_ICQ_EMPTY_FIELD; 277 icq_empty = value & TEGRA_AES_ICQ_EMPTY_FIELD;
278 } while (eng_busy & (!icq_empty)); 278 } while (eng_busy && !icq_empty);
279 aes_writel(dd, cmdq[i], TEGRA_AES_ICMDQUE_WR); 279 aes_writel(dd, cmdq[i], TEGRA_AES_ICMDQUE_WR);
280 } 280 }
281 281
@@ -365,7 +365,7 @@ static int aes_set_key(struct tegra_aes_dev *dd)
365 eng_busy = value & TEGRA_AES_ENGINE_BUSY_FIELD; 365 eng_busy = value & TEGRA_AES_ENGINE_BUSY_FIELD;
366 icq_empty = value & TEGRA_AES_ICQ_EMPTY_FIELD; 366 icq_empty = value & TEGRA_AES_ICQ_EMPTY_FIELD;
367 dma_busy = value & TEGRA_AES_DMA_BUSY_FIELD; 367 dma_busy = value & TEGRA_AES_DMA_BUSY_FIELD;
368 } while (eng_busy & (!icq_empty) & dma_busy); 368 } while (eng_busy && !icq_empty && dma_busy);
369 369
370 /* settable command to get key into internal registers */ 370 /* settable command to get key into internal registers */
371 value = CMD_SETTABLE << CMDQ_OPCODE_SHIFT | 371 value = CMD_SETTABLE << CMDQ_OPCODE_SHIFT |
@@ -379,7 +379,7 @@ static int aes_set_key(struct tegra_aes_dev *dd)
379 value = aes_readl(dd, TEGRA_AES_INTR_STATUS); 379 value = aes_readl(dd, TEGRA_AES_INTR_STATUS);
380 eng_busy = value & TEGRA_AES_ENGINE_BUSY_FIELD; 380 eng_busy = value & TEGRA_AES_ENGINE_BUSY_FIELD;
381 icq_empty = value & TEGRA_AES_ICQ_EMPTY_FIELD; 381 icq_empty = value & TEGRA_AES_ICQ_EMPTY_FIELD;
382 } while (eng_busy & (!icq_empty)); 382 } while (eng_busy && !icq_empty);
383 383
384 return 0; 384 return 0;
385} 385}
diff --git a/drivers/crypto/ux500/hash/hash_core.c b/drivers/crypto/ux500/hash/hash_core.c
index 33693d966b6a..1c73f4fbc252 100644
--- a/drivers/crypto/ux500/hash/hash_core.c
+++ b/drivers/crypto/ux500/hash/hash_core.c
@@ -11,6 +11,8 @@
11 * License terms: GNU General Public License (GPL) version 2 11 * License terms: GNU General Public License (GPL) version 2
12 */ 12 */
13 13
14#define pr_fmt(fmt) "hashX hashX: " fmt
15
14#include <linux/clk.h> 16#include <linux/clk.h>
15#include <linux/device.h> 17#include <linux/device.h>
16#include <linux/err.h> 18#include <linux/err.h>
@@ -35,8 +37,6 @@
35 37
36#include "hash_alg.h" 38#include "hash_alg.h"
37 39
38#define DEV_DBG_NAME "hashX hashX:"
39
40static int hash_mode; 40static int hash_mode;
41module_param(hash_mode, int, 0); 41module_param(hash_mode, int, 0);
42MODULE_PARM_DESC(hash_mode, "CPU or DMA mode. CPU = 0 (default), DMA = 1"); 42MODULE_PARM_DESC(hash_mode, "CPU or DMA mode. CPU = 0 (default), DMA = 1");
@@ -44,13 +44,13 @@ MODULE_PARM_DESC(hash_mode, "CPU or DMA mode. CPU = 0 (default), DMA = 1");
44/** 44/**
45 * Pre-calculated empty message digests. 45 * Pre-calculated empty message digests.
46 */ 46 */
47static u8 zero_message_hash_sha1[SHA1_DIGEST_SIZE] = { 47static const u8 zero_message_hash_sha1[SHA1_DIGEST_SIZE] = {
48 0xda, 0x39, 0xa3, 0xee, 0x5e, 0x6b, 0x4b, 0x0d, 48 0xda, 0x39, 0xa3, 0xee, 0x5e, 0x6b, 0x4b, 0x0d,
49 0x32, 0x55, 0xbf, 0xef, 0x95, 0x60, 0x18, 0x90, 49 0x32, 0x55, 0xbf, 0xef, 0x95, 0x60, 0x18, 0x90,
50 0xaf, 0xd8, 0x07, 0x09 50 0xaf, 0xd8, 0x07, 0x09
51}; 51};
52 52
53static u8 zero_message_hash_sha256[SHA256_DIGEST_SIZE] = { 53static const u8 zero_message_hash_sha256[SHA256_DIGEST_SIZE] = {
54 0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 54 0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14,
55 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, 55 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24,
56 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, 56 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c,
@@ -58,14 +58,14 @@ static u8 zero_message_hash_sha256[SHA256_DIGEST_SIZE] = {
58}; 58};
59 59
60/* HMAC-SHA1, no key */ 60/* HMAC-SHA1, no key */
61static u8 zero_message_hmac_sha1[SHA1_DIGEST_SIZE] = { 61static const u8 zero_message_hmac_sha1[SHA1_DIGEST_SIZE] = {
62 0xfb, 0xdb, 0x1d, 0x1b, 0x18, 0xaa, 0x6c, 0x08, 62 0xfb, 0xdb, 0x1d, 0x1b, 0x18, 0xaa, 0x6c, 0x08,
63 0x32, 0x4b, 0x7d, 0x64, 0xb7, 0x1f, 0xb7, 0x63, 63 0x32, 0x4b, 0x7d, 0x64, 0xb7, 0x1f, 0xb7, 0x63,
64 0x70, 0x69, 0x0e, 0x1d 64 0x70, 0x69, 0x0e, 0x1d
65}; 65};
66 66
67/* HMAC-SHA256, no key */ 67/* HMAC-SHA256, no key */
68static u8 zero_message_hmac_sha256[SHA256_DIGEST_SIZE] = { 68static const u8 zero_message_hmac_sha256[SHA256_DIGEST_SIZE] = {
69 0xb6, 0x13, 0x67, 0x9a, 0x08, 0x14, 0xd9, 0xec, 69 0xb6, 0x13, 0x67, 0x9a, 0x08, 0x14, 0xd9, 0xec,
70 0x77, 0x2f, 0x95, 0xd7, 0x78, 0xc3, 0x5f, 0xc5, 70 0x77, 0x2f, 0x95, 0xd7, 0x78, 0xc3, 0x5f, 0xc5,
71 0xff, 0x16, 0x97, 0xc4, 0x93, 0x71, 0x56, 0x53, 71 0xff, 0x16, 0x97, 0xc4, 0x93, 0x71, 0x56, 0x53,
@@ -97,7 +97,7 @@ static struct hash_driver_data driver_data;
97 * 97 *
98 */ 98 */
99static void hash_messagepad(struct hash_device_data *device_data, 99static void hash_messagepad(struct hash_device_data *device_data,
100 const u32 *message, u8 index_bytes); 100 const u32 *message, u8 index_bytes);
101 101
102/** 102/**
103 * release_hash_device - Releases a previously allocated hash device. 103 * release_hash_device - Releases a previously allocated hash device.
@@ -119,7 +119,7 @@ static void release_hash_device(struct hash_device_data *device_data)
119} 119}
120 120
121static void hash_dma_setup_channel(struct hash_device_data *device_data, 121static void hash_dma_setup_channel(struct hash_device_data *device_data,
122 struct device *dev) 122 struct device *dev)
123{ 123{
124 struct hash_platform_data *platform_data = dev->platform_data; 124 struct hash_platform_data *platform_data = dev->platform_data;
125 struct dma_slave_config conf = { 125 struct dma_slave_config conf = {
@@ -127,7 +127,7 @@ static void hash_dma_setup_channel(struct hash_device_data *device_data,
127 .dst_addr = device_data->phybase + HASH_DMA_FIFO, 127 .dst_addr = device_data->phybase + HASH_DMA_FIFO,
128 .dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES, 128 .dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES,
129 .dst_maxburst = 16, 129 .dst_maxburst = 16,
130 }; 130 };
131 131
132 dma_cap_zero(device_data->dma.mask); 132 dma_cap_zero(device_data->dma.mask);
133 dma_cap_set(DMA_SLAVE, device_data->dma.mask); 133 dma_cap_set(DMA_SLAVE, device_data->dma.mask);
@@ -135,8 +135,8 @@ static void hash_dma_setup_channel(struct hash_device_data *device_data,
135 device_data->dma.cfg_mem2hash = platform_data->mem_to_engine; 135 device_data->dma.cfg_mem2hash = platform_data->mem_to_engine;
136 device_data->dma.chan_mem2hash = 136 device_data->dma.chan_mem2hash =
137 dma_request_channel(device_data->dma.mask, 137 dma_request_channel(device_data->dma.mask,
138 platform_data->dma_filter, 138 platform_data->dma_filter,
139 device_data->dma.cfg_mem2hash); 139 device_data->dma.cfg_mem2hash);
140 140
141 dmaengine_slave_config(device_data->dma.chan_mem2hash, &conf); 141 dmaengine_slave_config(device_data->dma.chan_mem2hash, &conf);
142 142
@@ -145,21 +145,21 @@ static void hash_dma_setup_channel(struct hash_device_data *device_data,
145 145
146static void hash_dma_callback(void *data) 146static void hash_dma_callback(void *data)
147{ 147{
148 struct hash_ctx *ctx = (struct hash_ctx *) data; 148 struct hash_ctx *ctx = data;
149 149
150 complete(&ctx->device->dma.complete); 150 complete(&ctx->device->dma.complete);
151} 151}
152 152
153static int hash_set_dma_transfer(struct hash_ctx *ctx, struct scatterlist *sg, 153static int hash_set_dma_transfer(struct hash_ctx *ctx, struct scatterlist *sg,
154 int len, enum dma_data_direction direction) 154 int len, enum dma_data_direction direction)
155{ 155{
156 struct dma_async_tx_descriptor *desc = NULL; 156 struct dma_async_tx_descriptor *desc = NULL;
157 struct dma_chan *channel = NULL; 157 struct dma_chan *channel = NULL;
158 dma_cookie_t cookie; 158 dma_cookie_t cookie;
159 159
160 if (direction != DMA_TO_DEVICE) { 160 if (direction != DMA_TO_DEVICE) {
161 dev_err(ctx->device->dev, "[%s] Invalid DMA direction", 161 dev_err(ctx->device->dev, "%s: Invalid DMA direction\n",
162 __func__); 162 __func__);
163 return -EFAULT; 163 return -EFAULT;
164 } 164 }
165 165
@@ -172,20 +172,19 @@ static int hash_set_dma_transfer(struct hash_ctx *ctx, struct scatterlist *sg,
172 direction); 172 direction);
173 173
174 if (!ctx->device->dma.sg_len) { 174 if (!ctx->device->dma.sg_len) {
175 dev_err(ctx->device->dev, 175 dev_err(ctx->device->dev, "%s: Could not map the sg list (TO_DEVICE)\n",
176 "[%s]: Could not map the sg list (TO_DEVICE)", 176 __func__);
177 __func__);
178 return -EFAULT; 177 return -EFAULT;
179 } 178 }
180 179
181 dev_dbg(ctx->device->dev, "[%s]: Setting up DMA for buffer " 180 dev_dbg(ctx->device->dev, "%s: Setting up DMA for buffer (TO_DEVICE)\n",
182 "(TO_DEVICE)", __func__); 181 __func__);
183 desc = dmaengine_prep_slave_sg(channel, 182 desc = dmaengine_prep_slave_sg(channel,
184 ctx->device->dma.sg, ctx->device->dma.sg_len, 183 ctx->device->dma.sg, ctx->device->dma.sg_len,
185 direction, DMA_CTRL_ACK | DMA_PREP_INTERRUPT); 184 direction, DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
186 if (!desc) { 185 if (!desc) {
187 dev_err(ctx->device->dev, 186 dev_err(ctx->device->dev,
188 "[%s]: device_prep_slave_sg() failed!", __func__); 187 "%s: device_prep_slave_sg() failed!\n", __func__);
189 return -EFAULT; 188 return -EFAULT;
190 } 189 }
191 190
@@ -205,17 +204,16 @@ static void hash_dma_done(struct hash_ctx *ctx)
205 chan = ctx->device->dma.chan_mem2hash; 204 chan = ctx->device->dma.chan_mem2hash;
206 dmaengine_device_control(chan, DMA_TERMINATE_ALL, 0); 205 dmaengine_device_control(chan, DMA_TERMINATE_ALL, 0);
207 dma_unmap_sg(chan->device->dev, ctx->device->dma.sg, 206 dma_unmap_sg(chan->device->dev, ctx->device->dma.sg,
208 ctx->device->dma.sg_len, DMA_TO_DEVICE); 207 ctx->device->dma.sg_len, DMA_TO_DEVICE);
209
210} 208}
211 209
212static int hash_dma_write(struct hash_ctx *ctx, 210static int hash_dma_write(struct hash_ctx *ctx,
213 struct scatterlist *sg, int len) 211 struct scatterlist *sg, int len)
214{ 212{
215 int error = hash_set_dma_transfer(ctx, sg, len, DMA_TO_DEVICE); 213 int error = hash_set_dma_transfer(ctx, sg, len, DMA_TO_DEVICE);
216 if (error) { 214 if (error) {
217 dev_dbg(ctx->device->dev, "[%s]: hash_set_dma_transfer() " 215 dev_dbg(ctx->device->dev,
218 "failed", __func__); 216 "%s: hash_set_dma_transfer() failed\n", __func__);
219 return error; 217 return error;
220 } 218 }
221 219
@@ -245,19 +243,18 @@ static int get_empty_message_digest(
245 if (HASH_OPER_MODE_HASH == ctx->config.oper_mode) { 243 if (HASH_OPER_MODE_HASH == ctx->config.oper_mode) {
246 if (HASH_ALGO_SHA1 == ctx->config.algorithm) { 244 if (HASH_ALGO_SHA1 == ctx->config.algorithm) {
247 memcpy(zero_hash, &zero_message_hash_sha1[0], 245 memcpy(zero_hash, &zero_message_hash_sha1[0],
248 SHA1_DIGEST_SIZE); 246 SHA1_DIGEST_SIZE);
249 *zero_hash_size = SHA1_DIGEST_SIZE; 247 *zero_hash_size = SHA1_DIGEST_SIZE;
250 *zero_digest = true; 248 *zero_digest = true;
251 } else if (HASH_ALGO_SHA256 == 249 } else if (HASH_ALGO_SHA256 ==
252 ctx->config.algorithm) { 250 ctx->config.algorithm) {
253 memcpy(zero_hash, &zero_message_hash_sha256[0], 251 memcpy(zero_hash, &zero_message_hash_sha256[0],
254 SHA256_DIGEST_SIZE); 252 SHA256_DIGEST_SIZE);
255 *zero_hash_size = SHA256_DIGEST_SIZE; 253 *zero_hash_size = SHA256_DIGEST_SIZE;
256 *zero_digest = true; 254 *zero_digest = true;
257 } else { 255 } else {
258 dev_err(device_data->dev, "[%s] " 256 dev_err(device_data->dev, "%s: Incorrect algorithm!\n",
259 "Incorrect algorithm!" 257 __func__);
260 , __func__);
261 ret = -EINVAL; 258 ret = -EINVAL;
262 goto out; 259 goto out;
263 } 260 }
@@ -265,25 +262,24 @@ static int get_empty_message_digest(
265 if (!ctx->keylen) { 262 if (!ctx->keylen) {
266 if (HASH_ALGO_SHA1 == ctx->config.algorithm) { 263 if (HASH_ALGO_SHA1 == ctx->config.algorithm) {
267 memcpy(zero_hash, &zero_message_hmac_sha1[0], 264 memcpy(zero_hash, &zero_message_hmac_sha1[0],
268 SHA1_DIGEST_SIZE); 265 SHA1_DIGEST_SIZE);
269 *zero_hash_size = SHA1_DIGEST_SIZE; 266 *zero_hash_size = SHA1_DIGEST_SIZE;
270 *zero_digest = true; 267 *zero_digest = true;
271 } else if (HASH_ALGO_SHA256 == ctx->config.algorithm) { 268 } else if (HASH_ALGO_SHA256 == ctx->config.algorithm) {
272 memcpy(zero_hash, &zero_message_hmac_sha256[0], 269 memcpy(zero_hash, &zero_message_hmac_sha256[0],
273 SHA256_DIGEST_SIZE); 270 SHA256_DIGEST_SIZE);
274 *zero_hash_size = SHA256_DIGEST_SIZE; 271 *zero_hash_size = SHA256_DIGEST_SIZE;
275 *zero_digest = true; 272 *zero_digest = true;
276 } else { 273 } else {
277 dev_err(device_data->dev, "[%s] " 274 dev_err(device_data->dev, "%s: Incorrect algorithm!\n",
278 "Incorrect algorithm!" 275 __func__);
279 , __func__);
280 ret = -EINVAL; 276 ret = -EINVAL;
281 goto out; 277 goto out;
282 } 278 }
283 } else { 279 } else {
284 dev_dbg(device_data->dev, "[%s] Continue hash " 280 dev_dbg(device_data->dev,
285 "calculation, since hmac key available", 281 "%s: Continue hash calculation, since hmac key available\n",
286 __func__); 282 __func__);
287 } 283 }
288 } 284 }
289out: 285out:
@@ -299,9 +295,8 @@ out:
299 * This function request for disabling power (regulator) and clock, 295 * This function request for disabling power (regulator) and clock,
300 * and could also save current hw state. 296 * and could also save current hw state.
301 */ 297 */
302static int hash_disable_power( 298static int hash_disable_power(struct hash_device_data *device_data,
303 struct hash_device_data *device_data, 299 bool save_device_state)
304 bool save_device_state)
305{ 300{
306 int ret = 0; 301 int ret = 0;
307 struct device *dev = device_data->dev; 302 struct device *dev = device_data->dev;
@@ -319,7 +314,7 @@ static int hash_disable_power(
319 clk_disable(device_data->clk); 314 clk_disable(device_data->clk);
320 ret = regulator_disable(device_data->regulator); 315 ret = regulator_disable(device_data->regulator);
321 if (ret) 316 if (ret)
322 dev_err(dev, "[%s] regulator_disable() failed!", __func__); 317 dev_err(dev, "%s: regulator_disable() failed!\n", __func__);
323 318
324 device_data->power_state = false; 319 device_data->power_state = false;
325 320
@@ -337,9 +332,8 @@ out:
337 * This function request for enabling power (regulator) and clock, 332 * This function request for enabling power (regulator) and clock,
338 * and could also restore a previously saved hw state. 333 * and could also restore a previously saved hw state.
339 */ 334 */
340static int hash_enable_power( 335static int hash_enable_power(struct hash_device_data *device_data,
341 struct hash_device_data *device_data, 336 bool restore_device_state)
342 bool restore_device_state)
343{ 337{
344 int ret = 0; 338 int ret = 0;
345 struct device *dev = device_data->dev; 339 struct device *dev = device_data->dev;
@@ -348,14 +342,13 @@ static int hash_enable_power(
348 if (!device_data->power_state) { 342 if (!device_data->power_state) {
349 ret = regulator_enable(device_data->regulator); 343 ret = regulator_enable(device_data->regulator);
350 if (ret) { 344 if (ret) {
351 dev_err(dev, "[%s]: regulator_enable() failed!", 345 dev_err(dev, "%s: regulator_enable() failed!\n",
352 __func__); 346 __func__);
353 goto out; 347 goto out;
354 } 348 }
355 ret = clk_enable(device_data->clk); 349 ret = clk_enable(device_data->clk);
356 if (ret) { 350 if (ret) {
357 dev_err(dev, "[%s]: clk_enable() failed!", 351 dev_err(dev, "%s: clk_enable() failed!\n", __func__);
358 __func__);
359 ret = regulator_disable( 352 ret = regulator_disable(
360 device_data->regulator); 353 device_data->regulator);
361 goto out; 354 goto out;
@@ -366,8 +359,7 @@ static int hash_enable_power(
366 if (device_data->restore_dev_state) { 359 if (device_data->restore_dev_state) {
367 if (restore_device_state) { 360 if (restore_device_state) {
368 device_data->restore_dev_state = false; 361 device_data->restore_dev_state = false;
369 hash_resume_state(device_data, 362 hash_resume_state(device_data, &device_data->state);
370 &device_data->state);
371 } 363 }
372 } 364 }
373out: 365out:
@@ -447,7 +439,7 @@ static int hash_get_device_data(struct hash_ctx *ctx,
447 * spec or due to a bug in the hw. 439 * spec or due to a bug in the hw.
448 */ 440 */
449static void hash_hw_write_key(struct hash_device_data *device_data, 441static void hash_hw_write_key(struct hash_device_data *device_data,
450 const u8 *key, unsigned int keylen) 442 const u8 *key, unsigned int keylen)
451{ 443{
452 u32 word = 0; 444 u32 word = 0;
453 int nwords = 1; 445 int nwords = 1;
@@ -491,14 +483,14 @@ static void hash_hw_write_key(struct hash_device_data *device_data,
491 * calculation. 483 * calculation.
492 */ 484 */
493static int init_hash_hw(struct hash_device_data *device_data, 485static int init_hash_hw(struct hash_device_data *device_data,
494 struct hash_ctx *ctx) 486 struct hash_ctx *ctx)
495{ 487{
496 int ret = 0; 488 int ret = 0;
497 489
498 ret = hash_setconfiguration(device_data, &ctx->config); 490 ret = hash_setconfiguration(device_data, &ctx->config);
499 if (ret) { 491 if (ret) {
500 dev_err(device_data->dev, "[%s] hash_setconfiguration() " 492 dev_err(device_data->dev, "%s: hash_setconfiguration() failed!\n",
501 "failed!", __func__); 493 __func__);
502 return ret; 494 return ret;
503 } 495 }
504 496
@@ -528,9 +520,8 @@ static int hash_get_nents(struct scatterlist *sg, int size, bool *aligned)
528 size -= sg->length; 520 size -= sg->length;
529 521
530 /* hash_set_dma_transfer will align last nent */ 522 /* hash_set_dma_transfer will align last nent */
531 if ((aligned && !IS_ALIGNED(sg->offset, HASH_DMA_ALIGN_SIZE)) 523 if ((aligned && !IS_ALIGNED(sg->offset, HASH_DMA_ALIGN_SIZE)) ||
532 || (!IS_ALIGNED(sg->length, HASH_DMA_ALIGN_SIZE) && 524 (!IS_ALIGNED(sg->length, HASH_DMA_ALIGN_SIZE) && size > 0))
533 size > 0))
534 aligned_data = false; 525 aligned_data = false;
535 526
536 sg = sg_next(sg); 527 sg = sg_next(sg);
@@ -585,21 +576,17 @@ static int hash_init(struct ahash_request *req)
585 if (req->nbytes < HASH_DMA_ALIGN_SIZE) { 576 if (req->nbytes < HASH_DMA_ALIGN_SIZE) {
586 req_ctx->dma_mode = false; /* Don't use DMA */ 577 req_ctx->dma_mode = false; /* Don't use DMA */
587 578
588 pr_debug(DEV_DBG_NAME " [%s] DMA mode, but direct " 579 pr_debug("%s: DMA mode, but direct to CPU mode for data size < %d\n",
589 "to CPU mode for data size < %d", 580 __func__, HASH_DMA_ALIGN_SIZE);
590 __func__, HASH_DMA_ALIGN_SIZE);
591 } else { 581 } else {
592 if (req->nbytes >= HASH_DMA_PERFORMANCE_MIN_SIZE && 582 if (req->nbytes >= HASH_DMA_PERFORMANCE_MIN_SIZE &&
593 hash_dma_valid_data(req->src, 583 hash_dma_valid_data(req->src, req->nbytes)) {
594 req->nbytes)) {
595 req_ctx->dma_mode = true; 584 req_ctx->dma_mode = true;
596 } else { 585 } else {
597 req_ctx->dma_mode = false; 586 req_ctx->dma_mode = false;
598 pr_debug(DEV_DBG_NAME " [%s] DMA mode, but use" 587 pr_debug("%s: DMA mode, but use CPU mode for datalength < %d or non-aligned data, except in last nent\n",
599 " CPU mode for datalength < %d" 588 __func__,
600 " or non-aligned data, except " 589 HASH_DMA_PERFORMANCE_MIN_SIZE);
601 "in last nent", __func__,
602 HASH_DMA_PERFORMANCE_MIN_SIZE);
603 } 590 }
604 } 591 }
605 } 592 }
@@ -614,9 +601,8 @@ static int hash_init(struct ahash_request *req)
614 * the HASH hardware. 601 * the HASH hardware.
615 * 602 *
616 */ 603 */
617static void hash_processblock( 604static void hash_processblock(struct hash_device_data *device_data,
618 struct hash_device_data *device_data, 605 const u32 *message, int length)
619 const u32 *message, int length)
620{ 606{
621 int len = length / HASH_BYTES_PER_WORD; 607 int len = length / HASH_BYTES_PER_WORD;
622 /* 608 /*
@@ -641,7 +627,7 @@ static void hash_processblock(
641 * 627 *
642 */ 628 */
643static void hash_messagepad(struct hash_device_data *device_data, 629static void hash_messagepad(struct hash_device_data *device_data,
644 const u32 *message, u8 index_bytes) 630 const u32 *message, u8 index_bytes)
645{ 631{
646 int nwords = 1; 632 int nwords = 1;
647 633
@@ -666,15 +652,13 @@ static void hash_messagepad(struct hash_device_data *device_data,
666 652
667 /* num_of_bytes == 0 => NBLW <- 0 (32 bits valid in DATAIN) */ 653 /* num_of_bytes == 0 => NBLW <- 0 (32 bits valid in DATAIN) */
668 HASH_SET_NBLW(index_bytes * 8); 654 HASH_SET_NBLW(index_bytes * 8);
669 dev_dbg(device_data->dev, "[%s] DIN=0x%08x NBLW=%d", __func__, 655 dev_dbg(device_data->dev, "%s: DIN=0x%08x NBLW=%lu\n",
670 readl_relaxed(&device_data->base->din), 656 __func__, readl_relaxed(&device_data->base->din),
671 (int)(readl_relaxed(&device_data->base->str) & 657 readl_relaxed(&device_data->base->str) & HASH_STR_NBLW_MASK);
672 HASH_STR_NBLW_MASK));
673 HASH_SET_DCAL; 658 HASH_SET_DCAL;
674 dev_dbg(device_data->dev, "[%s] after dcal -> DIN=0x%08x NBLW=%d", 659 dev_dbg(device_data->dev, "%s: after dcal -> DIN=0x%08x NBLW=%lu\n",
675 __func__, readl_relaxed(&device_data->base->din), 660 __func__, readl_relaxed(&device_data->base->din),
676 (int)(readl_relaxed(&device_data->base->str) & 661 readl_relaxed(&device_data->base->str) & HASH_STR_NBLW_MASK);
677 HASH_STR_NBLW_MASK));
678 662
679 while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK) 663 while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK)
680 cpu_relax(); 664 cpu_relax();
@@ -704,7 +688,7 @@ static void hash_incrementlength(struct hash_req_ctx *ctx, u32 incr)
704 * @config: Pointer to a configuration structure. 688 * @config: Pointer to a configuration structure.
705 */ 689 */
706int hash_setconfiguration(struct hash_device_data *device_data, 690int hash_setconfiguration(struct hash_device_data *device_data,
707 struct hash_config *config) 691 struct hash_config *config)
708{ 692{
709 int ret = 0; 693 int ret = 0;
710 694
@@ -731,8 +715,8 @@ int hash_setconfiguration(struct hash_device_data *device_data,
731 break; 715 break;
732 716
733 default: 717 default:
734 dev_err(device_data->dev, "[%s] Incorrect algorithm.", 718 dev_err(device_data->dev, "%s: Incorrect algorithm\n",
735 __func__); 719 __func__);
736 return -EPERM; 720 return -EPERM;
737 } 721 }
738 722
@@ -744,23 +728,22 @@ int hash_setconfiguration(struct hash_device_data *device_data,
744 HASH_CLEAR_BITS(&device_data->base->cr, 728 HASH_CLEAR_BITS(&device_data->base->cr,
745 HASH_CR_MODE_MASK); 729 HASH_CR_MODE_MASK);
746 else if (HASH_OPER_MODE_HMAC == config->oper_mode) { 730 else if (HASH_OPER_MODE_HMAC == config->oper_mode) {
747 HASH_SET_BITS(&device_data->base->cr, 731 HASH_SET_BITS(&device_data->base->cr, HASH_CR_MODE_MASK);
748 HASH_CR_MODE_MASK);
749 if (device_data->current_ctx->keylen > HASH_BLOCK_SIZE) { 732 if (device_data->current_ctx->keylen > HASH_BLOCK_SIZE) {
750 /* Truncate key to blocksize */ 733 /* Truncate key to blocksize */
751 dev_dbg(device_data->dev, "[%s] LKEY set", __func__); 734 dev_dbg(device_data->dev, "%s: LKEY set\n", __func__);
752 HASH_SET_BITS(&device_data->base->cr, 735 HASH_SET_BITS(&device_data->base->cr,
753 HASH_CR_LKEY_MASK); 736 HASH_CR_LKEY_MASK);
754 } else { 737 } else {
755 dev_dbg(device_data->dev, "[%s] LKEY cleared", 738 dev_dbg(device_data->dev, "%s: LKEY cleared\n",
756 __func__); 739 __func__);
757 HASH_CLEAR_BITS(&device_data->base->cr, 740 HASH_CLEAR_BITS(&device_data->base->cr,
758 HASH_CR_LKEY_MASK); 741 HASH_CR_LKEY_MASK);
759 } 742 }
760 } else { /* Wrong hash mode */ 743 } else { /* Wrong hash mode */
761 ret = -EPERM; 744 ret = -EPERM;
762 dev_err(device_data->dev, "[%s] HASH_INVALID_PARAMETER!", 745 dev_err(device_data->dev, "%s: HASH_INVALID_PARAMETER!\n",
763 __func__); 746 __func__);
764 } 747 }
765 return ret; 748 return ret;
766} 749}
@@ -793,8 +776,9 @@ void hash_begin(struct hash_device_data *device_data, struct hash_ctx *ctx)
793} 776}
794 777
795static int hash_process_data(struct hash_device_data *device_data, 778static int hash_process_data(struct hash_device_data *device_data,
796 struct hash_ctx *ctx, struct hash_req_ctx *req_ctx, 779 struct hash_ctx *ctx, struct hash_req_ctx *req_ctx,
797 int msg_length, u8 *data_buffer, u8 *buffer, u8 *index) 780 int msg_length, u8 *data_buffer, u8 *buffer,
781 u8 *index)
798{ 782{
799 int ret = 0; 783 int ret = 0;
800 u32 count; 784 u32 count;
@@ -809,24 +793,23 @@ static int hash_process_data(struct hash_device_data *device_data,
809 msg_length = 0; 793 msg_length = 0;
810 } else { 794 } else {
811 if (req_ctx->updated) { 795 if (req_ctx->updated) {
812
813 ret = hash_resume_state(device_data, 796 ret = hash_resume_state(device_data,
814 &device_data->state); 797 &device_data->state);
815 memmove(req_ctx->state.buffer, 798 memmove(req_ctx->state.buffer,
816 device_data->state.buffer, 799 device_data->state.buffer,
817 HASH_BLOCK_SIZE / sizeof(u32)); 800 HASH_BLOCK_SIZE / sizeof(u32));
818 if (ret) { 801 if (ret) {
819 dev_err(device_data->dev, "[%s] " 802 dev_err(device_data->dev,
820 "hash_resume_state()" 803 "%s: hash_resume_state() failed!\n",
821 " failed!", __func__); 804 __func__);
822 goto out; 805 goto out;
823 } 806 }
824 } else { 807 } else {
825 ret = init_hash_hw(device_data, ctx); 808 ret = init_hash_hw(device_data, ctx);
826 if (ret) { 809 if (ret) {
827 dev_err(device_data->dev, "[%s] " 810 dev_err(device_data->dev,
828 "init_hash_hw()" 811 "%s: init_hash_hw() failed!\n",
829 " failed!", __func__); 812 __func__);
830 goto out; 813 goto out;
831 } 814 }
832 req_ctx->updated = 1; 815 req_ctx->updated = 1;
@@ -838,22 +821,21 @@ static int hash_process_data(struct hash_device_data *device_data,
838 * HW peripheral, otherwise we first copy data 821 * HW peripheral, otherwise we first copy data
839 * to a local buffer 822 * to a local buffer
840 */ 823 */
841 if ((0 == (((u32)data_buffer) % 4)) 824 if ((0 == (((u32)data_buffer) % 4)) &&
842 && (0 == *index)) 825 (0 == *index))
843 hash_processblock(device_data, 826 hash_processblock(device_data,
844 (const u32 *) 827 (const u32 *)data_buffer,
845 data_buffer, HASH_BLOCK_SIZE); 828 HASH_BLOCK_SIZE);
846 else { 829 else {
847 for (count = 0; count < 830 for (count = 0;
848 (u32)(HASH_BLOCK_SIZE - 831 count < (u32)(HASH_BLOCK_SIZE - *index);
849 *index); 832 count++) {
850 count++) {
851 buffer[*index + count] = 833 buffer[*index + count] =
852 *(data_buffer + count); 834 *(data_buffer + count);
853 } 835 }
854 hash_processblock(device_data, 836 hash_processblock(device_data,
855 (const u32 *)buffer, 837 (const u32 *)buffer,
856 HASH_BLOCK_SIZE); 838 HASH_BLOCK_SIZE);
857 } 839 }
858 hash_incrementlength(req_ctx, HASH_BLOCK_SIZE); 840 hash_incrementlength(req_ctx, HASH_BLOCK_SIZE);
859 data_buffer += (HASH_BLOCK_SIZE - *index); 841 data_buffer += (HASH_BLOCK_SIZE - *index);
@@ -865,12 +847,11 @@ static int hash_process_data(struct hash_device_data *device_data,
865 &device_data->state); 847 &device_data->state);
866 848
867 memmove(device_data->state.buffer, 849 memmove(device_data->state.buffer,
868 req_ctx->state.buffer, 850 req_ctx->state.buffer,
869 HASH_BLOCK_SIZE / sizeof(u32)); 851 HASH_BLOCK_SIZE / sizeof(u32));
870 if (ret) { 852 if (ret) {
871 dev_err(device_data->dev, "[%s] " 853 dev_err(device_data->dev, "%s: hash_save_state() failed!\n",
872 "hash_save_state()" 854 __func__);
873 " failed!", __func__);
874 goto out; 855 goto out;
875 } 856 }
876 } 857 }
@@ -898,25 +879,24 @@ static int hash_dma_final(struct ahash_request *req)
898 if (ret) 879 if (ret)
899 return ret; 880 return ret;
900 881
901 dev_dbg(device_data->dev, "[%s] (ctx=0x%x)!", __func__, (u32) ctx); 882 dev_dbg(device_data->dev, "%s: (ctx=0x%x)!\n", __func__, (u32) ctx);
902 883
903 if (req_ctx->updated) { 884 if (req_ctx->updated) {
904 ret = hash_resume_state(device_data, &device_data->state); 885 ret = hash_resume_state(device_data, &device_data->state);
905 886
906 if (ret) { 887 if (ret) {
907 dev_err(device_data->dev, "[%s] hash_resume_state() " 888 dev_err(device_data->dev, "%s: hash_resume_state() failed!\n",
908 "failed!", __func__); 889 __func__);
909 goto out; 890 goto out;
910 } 891 }
911
912 } 892 }
913 893
914 if (!req_ctx->updated) { 894 if (!req_ctx->updated) {
915 ret = hash_setconfiguration(device_data, &ctx->config); 895 ret = hash_setconfiguration(device_data, &ctx->config);
916 if (ret) { 896 if (ret) {
917 dev_err(device_data->dev, "[%s] " 897 dev_err(device_data->dev,
918 "hash_setconfiguration() failed!", 898 "%s: hash_setconfiguration() failed!\n",
919 __func__); 899 __func__);
920 goto out; 900 goto out;
921 } 901 }
922 902
@@ -926,9 +906,9 @@ static int hash_dma_final(struct ahash_request *req)
926 HASH_CR_DMAE_MASK); 906 HASH_CR_DMAE_MASK);
927 } else { 907 } else {
928 HASH_SET_BITS(&device_data->base->cr, 908 HASH_SET_BITS(&device_data->base->cr,
929 HASH_CR_DMAE_MASK); 909 HASH_CR_DMAE_MASK);
930 HASH_SET_BITS(&device_data->base->cr, 910 HASH_SET_BITS(&device_data->base->cr,
931 HASH_CR_PRIVN_MASK); 911 HASH_CR_PRIVN_MASK);
932 } 912 }
933 913
934 HASH_INITIALIZE; 914 HASH_INITIALIZE;
@@ -944,16 +924,16 @@ static int hash_dma_final(struct ahash_request *req)
944 /* Store the nents in the dma struct. */ 924 /* Store the nents in the dma struct. */
945 ctx->device->dma.nents = hash_get_nents(req->src, req->nbytes, NULL); 925 ctx->device->dma.nents = hash_get_nents(req->src, req->nbytes, NULL);
946 if (!ctx->device->dma.nents) { 926 if (!ctx->device->dma.nents) {
947 dev_err(device_data->dev, "[%s] " 927 dev_err(device_data->dev, "%s: ctx->device->dma.nents = 0\n",
948 "ctx->device->dma.nents = 0", __func__); 928 __func__);
949 ret = ctx->device->dma.nents; 929 ret = ctx->device->dma.nents;
950 goto out; 930 goto out;
951 } 931 }
952 932
953 bytes_written = hash_dma_write(ctx, req->src, req->nbytes); 933 bytes_written = hash_dma_write(ctx, req->src, req->nbytes);
954 if (bytes_written != req->nbytes) { 934 if (bytes_written != req->nbytes) {
955 dev_err(device_data->dev, "[%s] " 935 dev_err(device_data->dev, "%s: hash_dma_write() failed!\n",
956 "hash_dma_write() failed!", __func__); 936 __func__);
957 ret = bytes_written; 937 ret = bytes_written;
958 goto out; 938 goto out;
959 } 939 }
@@ -968,8 +948,8 @@ static int hash_dma_final(struct ahash_request *req)
968 unsigned int keylen = ctx->keylen; 948 unsigned int keylen = ctx->keylen;
969 u8 *key = ctx->key; 949 u8 *key = ctx->key;
970 950
971 dev_dbg(device_data->dev, "[%s] keylen: %d", __func__, 951 dev_dbg(device_data->dev, "%s: keylen: %d\n",
972 ctx->keylen); 952 __func__, ctx->keylen);
973 hash_hw_write_key(device_data, key, keylen); 953 hash_hw_write_key(device_data, key, keylen);
974 } 954 }
975 955
@@ -1004,14 +984,14 @@ static int hash_hw_final(struct ahash_request *req)
1004 if (ret) 984 if (ret)
1005 return ret; 985 return ret;
1006 986
1007 dev_dbg(device_data->dev, "[%s] (ctx=0x%x)!", __func__, (u32) ctx); 987 dev_dbg(device_data->dev, "%s: (ctx=0x%x)!\n", __func__, (u32) ctx);
1008 988
1009 if (req_ctx->updated) { 989 if (req_ctx->updated) {
1010 ret = hash_resume_state(device_data, &device_data->state); 990 ret = hash_resume_state(device_data, &device_data->state);
1011 991
1012 if (ret) { 992 if (ret) {
1013 dev_err(device_data->dev, "[%s] hash_resume_state() " 993 dev_err(device_data->dev,
1014 "failed!", __func__); 994 "%s: hash_resume_state() failed!\n", __func__);
1015 goto out; 995 goto out;
1016 } 996 }
1017 } else if (req->nbytes == 0 && ctx->keylen == 0) { 997 } else if (req->nbytes == 0 && ctx->keylen == 0) {
@@ -1025,31 +1005,33 @@ static int hash_hw_final(struct ahash_request *req)
1025 ret = get_empty_message_digest(device_data, &zero_hash[0], 1005 ret = get_empty_message_digest(device_data, &zero_hash[0],
1026 &zero_hash_size, &zero_digest); 1006 &zero_hash_size, &zero_digest);
1027 if (!ret && likely(zero_hash_size == ctx->digestsize) && 1007 if (!ret && likely(zero_hash_size == ctx->digestsize) &&
1028 zero_digest) { 1008 zero_digest) {
1029 memcpy(req->result, &zero_hash[0], ctx->digestsize); 1009 memcpy(req->result, &zero_hash[0], ctx->digestsize);
1030 goto out; 1010 goto out;
1031 } else if (!ret && !zero_digest) { 1011 } else if (!ret && !zero_digest) {
1032 dev_dbg(device_data->dev, "[%s] HMAC zero msg with " 1012 dev_dbg(device_data->dev,
1033 "key, continue...", __func__); 1013 "%s: HMAC zero msg with key, continue...\n",
1014 __func__);
1034 } else { 1015 } else {
1035 dev_err(device_data->dev, "[%s] ret=%d, or wrong " 1016 dev_err(device_data->dev,
1036 "digest size? %s", __func__, ret, 1017 "%s: ret=%d, or wrong digest size? %s\n",
1037 (zero_hash_size == ctx->digestsize) ? 1018 __func__, ret,
1038 "true" : "false"); 1019 zero_hash_size == ctx->digestsize ?
1020 "true" : "false");
1039 /* Return error */ 1021 /* Return error */
1040 goto out; 1022 goto out;
1041 } 1023 }
1042 } else if (req->nbytes == 0 && ctx->keylen > 0) { 1024 } else if (req->nbytes == 0 && ctx->keylen > 0) {
1043 dev_err(device_data->dev, "[%s] Empty message with " 1025 dev_err(device_data->dev, "%s: Empty message with keylength > 0, NOT supported\n",
1044 "keylength > 0, NOT supported.", __func__); 1026 __func__);
1045 goto out; 1027 goto out;
1046 } 1028 }
1047 1029
1048 if (!req_ctx->updated) { 1030 if (!req_ctx->updated) {
1049 ret = init_hash_hw(device_data, ctx); 1031 ret = init_hash_hw(device_data, ctx);
1050 if (ret) { 1032 if (ret) {
1051 dev_err(device_data->dev, "[%s] init_hash_hw() " 1033 dev_err(device_data->dev,
1052 "failed!", __func__); 1034 "%s: init_hash_hw() failed!\n", __func__);
1053 goto out; 1035 goto out;
1054 } 1036 }
1055 } 1037 }
@@ -1067,8 +1049,8 @@ static int hash_hw_final(struct ahash_request *req)
1067 unsigned int keylen = ctx->keylen; 1049 unsigned int keylen = ctx->keylen;
1068 u8 *key = ctx->key; 1050 u8 *key = ctx->key;
1069 1051
1070 dev_dbg(device_data->dev, "[%s] keylen: %d", __func__, 1052 dev_dbg(device_data->dev, "%s: keylen: %d\n",
1071 ctx->keylen); 1053 __func__, ctx->keylen);
1072 hash_hw_write_key(device_data, key, keylen); 1054 hash_hw_write_key(device_data, key, keylen);
1073 } 1055 }
1074 1056
@@ -1115,10 +1097,8 @@ int hash_hw_update(struct ahash_request *req)
1115 /* Check if ctx->state.length + msg_length 1097 /* Check if ctx->state.length + msg_length
1116 overflows */ 1098 overflows */
1117 if (msg_length > (req_ctx->state.length.low_word + msg_length) && 1099 if (msg_length > (req_ctx->state.length.low_word + msg_length) &&
1118 HASH_HIGH_WORD_MAX_VAL == 1100 HASH_HIGH_WORD_MAX_VAL == req_ctx->state.length.high_word) {
1119 req_ctx->state.length.high_word) { 1101 pr_err("%s: HASH_MSG_LENGTH_OVERFLOW!\n", __func__);
1120 pr_err(DEV_DBG_NAME " [%s] HASH_MSG_LENGTH_OVERFLOW!",
1121 __func__);
1122 return -EPERM; 1102 return -EPERM;
1123 } 1103 }
1124 1104
@@ -1133,8 +1113,8 @@ int hash_hw_update(struct ahash_request *req)
1133 data_buffer, buffer, &index); 1113 data_buffer, buffer, &index);
1134 1114
1135 if (ret) { 1115 if (ret) {
1136 dev_err(device_data->dev, "[%s] hash_internal_hw_" 1116 dev_err(device_data->dev, "%s: hash_internal_hw_update() failed!\n",
1137 "update() failed!", __func__); 1117 __func__);
1138 goto out; 1118 goto out;
1139 } 1119 }
1140 1120
@@ -1142,9 +1122,8 @@ int hash_hw_update(struct ahash_request *req)
1142 } 1122 }
1143 1123
1144 req_ctx->state.index = index; 1124 req_ctx->state.index = index;
1145 dev_dbg(device_data->dev, "[%s] indata length=%d, bin=%d))", 1125 dev_dbg(device_data->dev, "%s: indata length=%d, bin=%d\n",
1146 __func__, req_ctx->state.index, 1126 __func__, req_ctx->state.index, req_ctx->state.bit_index);
1147 req_ctx->state.bit_index);
1148 1127
1149out: 1128out:
1150 release_hash_device(device_data); 1129 release_hash_device(device_data);
@@ -1158,23 +1137,23 @@ out:
1158 * @device_state: The state to be restored in the hash hardware 1137 * @device_state: The state to be restored in the hash hardware
1159 */ 1138 */
1160int hash_resume_state(struct hash_device_data *device_data, 1139int hash_resume_state(struct hash_device_data *device_data,
1161 const struct hash_state *device_state) 1140 const struct hash_state *device_state)
1162{ 1141{
1163 u32 temp_cr; 1142 u32 temp_cr;
1164 s32 count; 1143 s32 count;
1165 int hash_mode = HASH_OPER_MODE_HASH; 1144 int hash_mode = HASH_OPER_MODE_HASH;
1166 1145
1167 if (NULL == device_state) { 1146 if (NULL == device_state) {
1168 dev_err(device_data->dev, "[%s] HASH_INVALID_PARAMETER!", 1147 dev_err(device_data->dev, "%s: HASH_INVALID_PARAMETER!\n",
1169 __func__); 1148 __func__);
1170 return -EPERM; 1149 return -EPERM;
1171 } 1150 }
1172 1151
1173 /* Check correctness of index and length members */ 1152 /* Check correctness of index and length members */
1174 if (device_state->index > HASH_BLOCK_SIZE 1153 if (device_state->index > HASH_BLOCK_SIZE ||
1175 || (device_state->length.low_word % HASH_BLOCK_SIZE) != 0) { 1154 (device_state->length.low_word % HASH_BLOCK_SIZE) != 0) {
1176 dev_err(device_data->dev, "[%s] HASH_INVALID_PARAMETER!", 1155 dev_err(device_data->dev, "%s: HASH_INVALID_PARAMETER!\n",
1177 __func__); 1156 __func__);
1178 return -EPERM; 1157 return -EPERM;
1179 } 1158 }
1180 1159
@@ -1198,7 +1177,7 @@ int hash_resume_state(struct hash_device_data *device_data,
1198 break; 1177 break;
1199 1178
1200 writel_relaxed(device_state->csr[count], 1179 writel_relaxed(device_state->csr[count],
1201 &device_data->base->csrx[count]); 1180 &device_data->base->csrx[count]);
1202 } 1181 }
1203 1182
1204 writel_relaxed(device_state->csfull, &device_data->base->csfull); 1183 writel_relaxed(device_state->csfull, &device_data->base->csfull);
@@ -1216,15 +1195,15 @@ int hash_resume_state(struct hash_device_data *device_data,
1216 * @device_state: The strucure where the hardware state should be saved. 1195 * @device_state: The strucure where the hardware state should be saved.
1217 */ 1196 */
1218int hash_save_state(struct hash_device_data *device_data, 1197int hash_save_state(struct hash_device_data *device_data,
1219 struct hash_state *device_state) 1198 struct hash_state *device_state)
1220{ 1199{
1221 u32 temp_cr; 1200 u32 temp_cr;
1222 u32 count; 1201 u32 count;
1223 int hash_mode = HASH_OPER_MODE_HASH; 1202 int hash_mode = HASH_OPER_MODE_HASH;
1224 1203
1225 if (NULL == device_state) { 1204 if (NULL == device_state) {
1226 dev_err(device_data->dev, "[%s] HASH_INVALID_PARAMETER!", 1205 dev_err(device_data->dev, "%s: HASH_INVALID_PARAMETER!\n",
1227 __func__); 1206 __func__);
1228 return -ENOTSUPP; 1207 return -ENOTSUPP;
1229 } 1208 }
1230 1209
@@ -1270,20 +1249,18 @@ int hash_save_state(struct hash_device_data *device_data,
1270int hash_check_hw(struct hash_device_data *device_data) 1249int hash_check_hw(struct hash_device_data *device_data)
1271{ 1250{
1272 /* Checking Peripheral Ids */ 1251 /* Checking Peripheral Ids */
1273 if (HASH_P_ID0 == readl_relaxed(&device_data->base->periphid0) 1252 if (HASH_P_ID0 == readl_relaxed(&device_data->base->periphid0) &&
1274 && HASH_P_ID1 == readl_relaxed(&device_data->base->periphid1) 1253 HASH_P_ID1 == readl_relaxed(&device_data->base->periphid1) &&
1275 && HASH_P_ID2 == readl_relaxed(&device_data->base->periphid2) 1254 HASH_P_ID2 == readl_relaxed(&device_data->base->periphid2) &&
1276 && HASH_P_ID3 == readl_relaxed(&device_data->base->periphid3) 1255 HASH_P_ID3 == readl_relaxed(&device_data->base->periphid3) &&
1277 && HASH_CELL_ID0 == readl_relaxed(&device_data->base->cellid0) 1256 HASH_CELL_ID0 == readl_relaxed(&device_data->base->cellid0) &&
1278 && HASH_CELL_ID1 == readl_relaxed(&device_data->base->cellid1) 1257 HASH_CELL_ID1 == readl_relaxed(&device_data->base->cellid1) &&
1279 && HASH_CELL_ID2 == readl_relaxed(&device_data->base->cellid2) 1258 HASH_CELL_ID2 == readl_relaxed(&device_data->base->cellid2) &&
1280 && HASH_CELL_ID3 == readl_relaxed(&device_data->base->cellid3) 1259 HASH_CELL_ID3 == readl_relaxed(&device_data->base->cellid3)) {
1281 ) {
1282 return 0; 1260 return 0;
1283 } 1261 }
1284 1262
1285 dev_err(device_data->dev, "[%s] HASH_UNSUPPORTED_HW!", 1263 dev_err(device_data->dev, "%s: HASH_UNSUPPORTED_HW!\n", __func__);
1286 __func__);
1287 return -ENOTSUPP; 1264 return -ENOTSUPP;
1288} 1265}
1289 1266
@@ -1294,14 +1271,14 @@ int hash_check_hw(struct hash_device_data *device_data)
1294 * @algorithm: The algorithm in use. 1271 * @algorithm: The algorithm in use.
1295 */ 1272 */
1296void hash_get_digest(struct hash_device_data *device_data, 1273void hash_get_digest(struct hash_device_data *device_data,
1297 u8 *digest, int algorithm) 1274 u8 *digest, int algorithm)
1298{ 1275{
1299 u32 temp_hx_val, count; 1276 u32 temp_hx_val, count;
1300 int loop_ctr; 1277 int loop_ctr;
1301 1278
1302 if (algorithm != HASH_ALGO_SHA1 && algorithm != HASH_ALGO_SHA256) { 1279 if (algorithm != HASH_ALGO_SHA1 && algorithm != HASH_ALGO_SHA256) {
1303 dev_err(device_data->dev, "[%s] Incorrect algorithm %d", 1280 dev_err(device_data->dev, "%s: Incorrect algorithm %d\n",
1304 __func__, algorithm); 1281 __func__, algorithm);
1305 return; 1282 return;
1306 } 1283 }
1307 1284
@@ -1310,8 +1287,8 @@ void hash_get_digest(struct hash_device_data *device_data,
1310 else 1287 else
1311 loop_ctr = SHA256_DIGEST_SIZE / sizeof(u32); 1288 loop_ctr = SHA256_DIGEST_SIZE / sizeof(u32);
1312 1289
1313 dev_dbg(device_data->dev, "[%s] digest array:(0x%x)", 1290 dev_dbg(device_data->dev, "%s: digest array:(0x%x)\n",
1314 __func__, (u32) digest); 1291 __func__, (u32) digest);
1315 1292
1316 /* Copy result into digest array */ 1293 /* Copy result into digest array */
1317 for (count = 0; count < loop_ctr; count++) { 1294 for (count = 0; count < loop_ctr; count++) {
@@ -1337,8 +1314,7 @@ static int ahash_update(struct ahash_request *req)
1337 /* Skip update for DMA, all data will be passed to DMA in final */ 1314 /* Skip update for DMA, all data will be passed to DMA in final */
1338 1315
1339 if (ret) { 1316 if (ret) {
1340 pr_err(DEV_DBG_NAME " [%s] hash_hw_update() failed!", 1317 pr_err("%s: hash_hw_update() failed!\n", __func__);
1341 __func__);
1342 } 1318 }
1343 1319
1344 return ret; 1320 return ret;
@@ -1353,7 +1329,7 @@ static int ahash_final(struct ahash_request *req)
1353 int ret = 0; 1329 int ret = 0;
1354 struct hash_req_ctx *req_ctx = ahash_request_ctx(req); 1330 struct hash_req_ctx *req_ctx = ahash_request_ctx(req);
1355 1331
1356 pr_debug(DEV_DBG_NAME " [%s] data size: %d", __func__, req->nbytes); 1332 pr_debug("%s: data size: %d\n", __func__, req->nbytes);
1357 1333
1358 if ((hash_mode == HASH_MODE_DMA) && req_ctx->dma_mode) 1334 if ((hash_mode == HASH_MODE_DMA) && req_ctx->dma_mode)
1359 ret = hash_dma_final(req); 1335 ret = hash_dma_final(req);
@@ -1361,15 +1337,14 @@ static int ahash_final(struct ahash_request *req)
1361 ret = hash_hw_final(req); 1337 ret = hash_hw_final(req);
1362 1338
1363 if (ret) { 1339 if (ret) {
1364 pr_err(DEV_DBG_NAME " [%s] hash_hw/dma_final() failed", 1340 pr_err("%s: hash_hw/dma_final() failed\n", __func__);
1365 __func__);
1366 } 1341 }
1367 1342
1368 return ret; 1343 return ret;
1369} 1344}
1370 1345
1371static int hash_setkey(struct crypto_ahash *tfm, 1346static int hash_setkey(struct crypto_ahash *tfm,
1372 const u8 *key, unsigned int keylen, int alg) 1347 const u8 *key, unsigned int keylen, int alg)
1373{ 1348{
1374 int ret = 0; 1349 int ret = 0;
1375 struct hash_ctx *ctx = crypto_ahash_ctx(tfm); 1350 struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
@@ -1379,8 +1354,8 @@ static int hash_setkey(struct crypto_ahash *tfm,
1379 */ 1354 */
1380 ctx->key = kmemdup(key, keylen, GFP_KERNEL); 1355 ctx->key = kmemdup(key, keylen, GFP_KERNEL);
1381 if (!ctx->key) { 1356 if (!ctx->key) {
1382 pr_err(DEV_DBG_NAME " [%s] Failed to allocate ctx->key " 1357 pr_err("%s: Failed to allocate ctx->key for %d\n",
1383 "for %d\n", __func__, alg); 1358 __func__, alg);
1384 return -ENOMEM; 1359 return -ENOMEM;
1385 } 1360 }
1386 ctx->keylen = keylen; 1361 ctx->keylen = keylen;
@@ -1501,13 +1476,13 @@ out:
1501} 1476}
1502 1477
1503static int hmac_sha1_setkey(struct crypto_ahash *tfm, 1478static int hmac_sha1_setkey(struct crypto_ahash *tfm,
1504 const u8 *key, unsigned int keylen) 1479 const u8 *key, unsigned int keylen)
1505{ 1480{
1506 return hash_setkey(tfm, key, keylen, HASH_ALGO_SHA1); 1481 return hash_setkey(tfm, key, keylen, HASH_ALGO_SHA1);
1507} 1482}
1508 1483
1509static int hmac_sha256_setkey(struct crypto_ahash *tfm, 1484static int hmac_sha256_setkey(struct crypto_ahash *tfm,
1510 const u8 *key, unsigned int keylen) 1485 const u8 *key, unsigned int keylen)
1511{ 1486{
1512 return hash_setkey(tfm, key, keylen, HASH_ALGO_SHA256); 1487 return hash_setkey(tfm, key, keylen, HASH_ALGO_SHA256);
1513} 1488}
@@ -1528,7 +1503,7 @@ static int hash_cra_init(struct crypto_tfm *tfm)
1528 hash); 1503 hash);
1529 1504
1530 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), 1505 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1531 sizeof(struct hash_req_ctx)); 1506 sizeof(struct hash_req_ctx));
1532 1507
1533 ctx->config.data_format = HASH_DATA_8_BITS; 1508 ctx->config.data_format = HASH_DATA_8_BITS;
1534 ctx->config.algorithm = hash_alg->conf.algorithm; 1509 ctx->config.algorithm = hash_alg->conf.algorithm;
@@ -1541,98 +1516,97 @@ static int hash_cra_init(struct crypto_tfm *tfm)
1541 1516
1542static struct hash_algo_template hash_algs[] = { 1517static struct hash_algo_template hash_algs[] = {
1543 { 1518 {
1544 .conf.algorithm = HASH_ALGO_SHA1, 1519 .conf.algorithm = HASH_ALGO_SHA1,
1545 .conf.oper_mode = HASH_OPER_MODE_HASH, 1520 .conf.oper_mode = HASH_OPER_MODE_HASH,
1546 .hash = { 1521 .hash = {
1547 .init = hash_init, 1522 .init = hash_init,
1548 .update = ahash_update, 1523 .update = ahash_update,
1549 .final = ahash_final, 1524 .final = ahash_final,
1550 .digest = ahash_sha1_digest, 1525 .digest = ahash_sha1_digest,
1551 .halg.digestsize = SHA1_DIGEST_SIZE, 1526 .halg.digestsize = SHA1_DIGEST_SIZE,
1552 .halg.statesize = sizeof(struct hash_ctx), 1527 .halg.statesize = sizeof(struct hash_ctx),
1553 .halg.base = { 1528 .halg.base = {
1554 .cra_name = "sha1", 1529 .cra_name = "sha1",
1555 .cra_driver_name = "sha1-ux500", 1530 .cra_driver_name = "sha1-ux500",
1556 .cra_flags = CRYPTO_ALG_TYPE_AHASH | 1531 .cra_flags = (CRYPTO_ALG_TYPE_AHASH |
1557 CRYPTO_ALG_ASYNC, 1532 CRYPTO_ALG_ASYNC),
1558 .cra_blocksize = SHA1_BLOCK_SIZE, 1533 .cra_blocksize = SHA1_BLOCK_SIZE,
1559 .cra_ctxsize = sizeof(struct hash_ctx), 1534 .cra_ctxsize = sizeof(struct hash_ctx),
1560 .cra_init = hash_cra_init, 1535 .cra_init = hash_cra_init,
1561 .cra_module = THIS_MODULE, 1536 .cra_module = THIS_MODULE,
1562 } 1537 }
1563 } 1538 }
1564 }, 1539 },
1565 { 1540 {
1566 .conf.algorithm = HASH_ALGO_SHA256, 1541 .conf.algorithm = HASH_ALGO_SHA256,
1567 .conf.oper_mode = HASH_OPER_MODE_HASH, 1542 .conf.oper_mode = HASH_OPER_MODE_HASH,
1568 .hash = { 1543 .hash = {
1569 .init = hash_init, 1544 .init = hash_init,
1570 .update = ahash_update, 1545 .update = ahash_update,
1571 .final = ahash_final, 1546 .final = ahash_final,
1572 .digest = ahash_sha256_digest, 1547 .digest = ahash_sha256_digest,
1573 .halg.digestsize = SHA256_DIGEST_SIZE, 1548 .halg.digestsize = SHA256_DIGEST_SIZE,
1574 .halg.statesize = sizeof(struct hash_ctx), 1549 .halg.statesize = sizeof(struct hash_ctx),
1575 .halg.base = { 1550 .halg.base = {
1576 .cra_name = "sha256", 1551 .cra_name = "sha256",
1577 .cra_driver_name = "sha256-ux500", 1552 .cra_driver_name = "sha256-ux500",
1578 .cra_flags = CRYPTO_ALG_TYPE_AHASH | 1553 .cra_flags = (CRYPTO_ALG_TYPE_AHASH |
1579 CRYPTO_ALG_ASYNC, 1554 CRYPTO_ALG_ASYNC),
1580 .cra_blocksize = SHA256_BLOCK_SIZE, 1555 .cra_blocksize = SHA256_BLOCK_SIZE,
1581 .cra_ctxsize = sizeof(struct hash_ctx), 1556 .cra_ctxsize = sizeof(struct hash_ctx),
1582 .cra_type = &crypto_ahash_type, 1557 .cra_type = &crypto_ahash_type,
1583 .cra_init = hash_cra_init, 1558 .cra_init = hash_cra_init,
1584 .cra_module = THIS_MODULE, 1559 .cra_module = THIS_MODULE,
1585 }
1586 } 1560 }
1587 1561 }
1588 }, 1562 },
1589 { 1563 {
1590 .conf.algorithm = HASH_ALGO_SHA1, 1564 .conf.algorithm = HASH_ALGO_SHA1,
1591 .conf.oper_mode = HASH_OPER_MODE_HMAC, 1565 .conf.oper_mode = HASH_OPER_MODE_HMAC,
1592 .hash = { 1566 .hash = {
1593 .init = hash_init, 1567 .init = hash_init,
1594 .update = ahash_update, 1568 .update = ahash_update,
1595 .final = ahash_final, 1569 .final = ahash_final,
1596 .digest = hmac_sha1_digest, 1570 .digest = hmac_sha1_digest,
1597 .setkey = hmac_sha1_setkey, 1571 .setkey = hmac_sha1_setkey,
1598 .halg.digestsize = SHA1_DIGEST_SIZE, 1572 .halg.digestsize = SHA1_DIGEST_SIZE,
1599 .halg.statesize = sizeof(struct hash_ctx), 1573 .halg.statesize = sizeof(struct hash_ctx),
1600 .halg.base = { 1574 .halg.base = {
1601 .cra_name = "hmac(sha1)", 1575 .cra_name = "hmac(sha1)",
1602 .cra_driver_name = "hmac-sha1-ux500", 1576 .cra_driver_name = "hmac-sha1-ux500",
1603 .cra_flags = CRYPTO_ALG_TYPE_AHASH | 1577 .cra_flags = (CRYPTO_ALG_TYPE_AHASH |
1604 CRYPTO_ALG_ASYNC, 1578 CRYPTO_ALG_ASYNC),
1605 .cra_blocksize = SHA1_BLOCK_SIZE, 1579 .cra_blocksize = SHA1_BLOCK_SIZE,
1606 .cra_ctxsize = sizeof(struct hash_ctx), 1580 .cra_ctxsize = sizeof(struct hash_ctx),
1607 .cra_type = &crypto_ahash_type, 1581 .cra_type = &crypto_ahash_type,
1608 .cra_init = hash_cra_init, 1582 .cra_init = hash_cra_init,
1609 .cra_module = THIS_MODULE, 1583 .cra_module = THIS_MODULE,
1610 }
1611 } 1584 }
1585 }
1612 }, 1586 },
1613 { 1587 {
1614 .conf.algorithm = HASH_ALGO_SHA256, 1588 .conf.algorithm = HASH_ALGO_SHA256,
1615 .conf.oper_mode = HASH_OPER_MODE_HMAC, 1589 .conf.oper_mode = HASH_OPER_MODE_HMAC,
1616 .hash = { 1590 .hash = {
1617 .init = hash_init, 1591 .init = hash_init,
1618 .update = ahash_update, 1592 .update = ahash_update,
1619 .final = ahash_final, 1593 .final = ahash_final,
1620 .digest = hmac_sha256_digest, 1594 .digest = hmac_sha256_digest,
1621 .setkey = hmac_sha256_setkey, 1595 .setkey = hmac_sha256_setkey,
1622 .halg.digestsize = SHA256_DIGEST_SIZE, 1596 .halg.digestsize = SHA256_DIGEST_SIZE,
1623 .halg.statesize = sizeof(struct hash_ctx), 1597 .halg.statesize = sizeof(struct hash_ctx),
1624 .halg.base = { 1598 .halg.base = {
1625 .cra_name = "hmac(sha256)", 1599 .cra_name = "hmac(sha256)",
1626 .cra_driver_name = "hmac-sha256-ux500", 1600 .cra_driver_name = "hmac-sha256-ux500",
1627 .cra_flags = CRYPTO_ALG_TYPE_AHASH | 1601 .cra_flags = (CRYPTO_ALG_TYPE_AHASH |
1628 CRYPTO_ALG_ASYNC, 1602 CRYPTO_ALG_ASYNC),
1629 .cra_blocksize = SHA256_BLOCK_SIZE, 1603 .cra_blocksize = SHA256_BLOCK_SIZE,
1630 .cra_ctxsize = sizeof(struct hash_ctx), 1604 .cra_ctxsize = sizeof(struct hash_ctx),
1631 .cra_type = &crypto_ahash_type, 1605 .cra_type = &crypto_ahash_type,
1632 .cra_init = hash_cra_init, 1606 .cra_init = hash_cra_init,
1633 .cra_module = THIS_MODULE, 1607 .cra_module = THIS_MODULE,
1634 }
1635 } 1608 }
1609 }
1636 } 1610 }
1637}; 1611};
1638 1612
@@ -1649,7 +1623,7 @@ static int ahash_algs_register_all(struct hash_device_data *device_data)
1649 ret = crypto_register_ahash(&hash_algs[i].hash); 1623 ret = crypto_register_ahash(&hash_algs[i].hash);
1650 if (ret) { 1624 if (ret) {
1651 count = i; 1625 count = i;
1652 dev_err(device_data->dev, "[%s] alg registration failed", 1626 dev_err(device_data->dev, "%s: alg registration failed\n",
1653 hash_algs[i].hash.halg.base.cra_driver_name); 1627 hash_algs[i].hash.halg.base.cra_driver_name);
1654 goto unreg; 1628 goto unreg;
1655 } 1629 }
@@ -1683,9 +1657,8 @@ static int ux500_hash_probe(struct platform_device *pdev)
1683 struct hash_device_data *device_data; 1657 struct hash_device_data *device_data;
1684 struct device *dev = &pdev->dev; 1658 struct device *dev = &pdev->dev;
1685 1659
1686 device_data = kzalloc(sizeof(struct hash_device_data), GFP_ATOMIC); 1660 device_data = kzalloc(sizeof(*device_data), GFP_ATOMIC);
1687 if (!device_data) { 1661 if (!device_data) {
1688 dev_dbg(dev, "[%s] kzalloc() failed!", __func__);
1689 ret = -ENOMEM; 1662 ret = -ENOMEM;
1690 goto out; 1663 goto out;
1691 } 1664 }
@@ -1695,14 +1668,14 @@ static int ux500_hash_probe(struct platform_device *pdev)
1695 1668
1696 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1669 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1697 if (!res) { 1670 if (!res) {
1698 dev_dbg(dev, "[%s] platform_get_resource() failed!", __func__); 1671 dev_dbg(dev, "%s: platform_get_resource() failed!\n", __func__);
1699 ret = -ENODEV; 1672 ret = -ENODEV;
1700 goto out_kfree; 1673 goto out_kfree;
1701 } 1674 }
1702 1675
1703 res = request_mem_region(res->start, resource_size(res), pdev->name); 1676 res = request_mem_region(res->start, resource_size(res), pdev->name);
1704 if (res == NULL) { 1677 if (res == NULL) {
1705 dev_dbg(dev, "[%s] request_mem_region() failed!", __func__); 1678 dev_dbg(dev, "%s: request_mem_region() failed!\n", __func__);
1706 ret = -EBUSY; 1679 ret = -EBUSY;
1707 goto out_kfree; 1680 goto out_kfree;
1708 } 1681 }
@@ -1710,8 +1683,7 @@ static int ux500_hash_probe(struct platform_device *pdev)
1710 device_data->phybase = res->start; 1683 device_data->phybase = res->start;
1711 device_data->base = ioremap(res->start, resource_size(res)); 1684 device_data->base = ioremap(res->start, resource_size(res));
1712 if (!device_data->base) { 1685 if (!device_data->base) {
1713 dev_err(dev, "[%s] ioremap() failed!", 1686 dev_err(dev, "%s: ioremap() failed!\n", __func__);
1714 __func__);
1715 ret = -ENOMEM; 1687 ret = -ENOMEM;
1716 goto out_free_mem; 1688 goto out_free_mem;
1717 } 1689 }
@@ -1721,7 +1693,7 @@ static int ux500_hash_probe(struct platform_device *pdev)
1721 /* Enable power for HASH1 hardware block */ 1693 /* Enable power for HASH1 hardware block */
1722 device_data->regulator = regulator_get(dev, "v-ape"); 1694 device_data->regulator = regulator_get(dev, "v-ape");
1723 if (IS_ERR(device_data->regulator)) { 1695 if (IS_ERR(device_data->regulator)) {
1724 dev_err(dev, "[%s] regulator_get() failed!", __func__); 1696 dev_err(dev, "%s: regulator_get() failed!\n", __func__);
1725 ret = PTR_ERR(device_data->regulator); 1697 ret = PTR_ERR(device_data->regulator);
1726 device_data->regulator = NULL; 1698 device_data->regulator = NULL;
1727 goto out_unmap; 1699 goto out_unmap;
@@ -1730,27 +1702,27 @@ static int ux500_hash_probe(struct platform_device *pdev)
1730 /* Enable the clock for HASH1 hardware block */ 1702 /* Enable the clock for HASH1 hardware block */
1731 device_data->clk = clk_get(dev, NULL); 1703 device_data->clk = clk_get(dev, NULL);
1732 if (IS_ERR(device_data->clk)) { 1704 if (IS_ERR(device_data->clk)) {
1733 dev_err(dev, "[%s] clk_get() failed!", __func__); 1705 dev_err(dev, "%s: clk_get() failed!\n", __func__);
1734 ret = PTR_ERR(device_data->clk); 1706 ret = PTR_ERR(device_data->clk);
1735 goto out_regulator; 1707 goto out_regulator;
1736 } 1708 }
1737 1709
1738 ret = clk_prepare(device_data->clk); 1710 ret = clk_prepare(device_data->clk);
1739 if (ret) { 1711 if (ret) {
1740 dev_err(dev, "[%s] clk_prepare() failed!", __func__); 1712 dev_err(dev, "%s: clk_prepare() failed!\n", __func__);
1741 goto out_clk; 1713 goto out_clk;
1742 } 1714 }
1743 1715
1744 /* Enable device power (and clock) */ 1716 /* Enable device power (and clock) */
1745 ret = hash_enable_power(device_data, false); 1717 ret = hash_enable_power(device_data, false);
1746 if (ret) { 1718 if (ret) {
1747 dev_err(dev, "[%s]: hash_enable_power() failed!", __func__); 1719 dev_err(dev, "%s: hash_enable_power() failed!\n", __func__);
1748 goto out_clk_unprepare; 1720 goto out_clk_unprepare;
1749 } 1721 }
1750 1722
1751 ret = hash_check_hw(device_data); 1723 ret = hash_check_hw(device_data);
1752 if (ret) { 1724 if (ret) {
1753 dev_err(dev, "[%s] hash_check_hw() failed!", __func__); 1725 dev_err(dev, "%s: hash_check_hw() failed!\n", __func__);
1754 goto out_power; 1726 goto out_power;
1755 } 1727 }
1756 1728
@@ -1766,8 +1738,8 @@ static int ux500_hash_probe(struct platform_device *pdev)
1766 1738
1767 ret = ahash_algs_register_all(device_data); 1739 ret = ahash_algs_register_all(device_data);
1768 if (ret) { 1740 if (ret) {
1769 dev_err(dev, "[%s] ahash_algs_register_all() " 1741 dev_err(dev, "%s: ahash_algs_register_all() failed!\n",
1770 "failed!", __func__); 1742 __func__);
1771 goto out_power; 1743 goto out_power;
1772 } 1744 }
1773 1745
@@ -1810,8 +1782,7 @@ static int ux500_hash_remove(struct platform_device *pdev)
1810 1782
1811 device_data = platform_get_drvdata(pdev); 1783 device_data = platform_get_drvdata(pdev);
1812 if (!device_data) { 1784 if (!device_data) {
1813 dev_err(dev, "[%s]: platform_get_drvdata() failed!", 1785 dev_err(dev, "%s: platform_get_drvdata() failed!\n", __func__);
1814 __func__);
1815 return -ENOMEM; 1786 return -ENOMEM;
1816 } 1787 }
1817 1788
@@ -1841,7 +1812,7 @@ static int ux500_hash_remove(struct platform_device *pdev)
1841 ahash_algs_unregister_all(device_data); 1812 ahash_algs_unregister_all(device_data);
1842 1813
1843 if (hash_disable_power(device_data, false)) 1814 if (hash_disable_power(device_data, false))
1844 dev_err(dev, "[%s]: hash_disable_power() failed", 1815 dev_err(dev, "%s: hash_disable_power() failed\n",
1845 __func__); 1816 __func__);
1846 1817
1847 clk_unprepare(device_data->clk); 1818 clk_unprepare(device_data->clk);
@@ -1870,8 +1841,8 @@ static void ux500_hash_shutdown(struct platform_device *pdev)
1870 1841
1871 device_data = platform_get_drvdata(pdev); 1842 device_data = platform_get_drvdata(pdev);
1872 if (!device_data) { 1843 if (!device_data) {
1873 dev_err(&pdev->dev, "[%s] platform_get_drvdata() failed!", 1844 dev_err(&pdev->dev, "%s: platform_get_drvdata() failed!\n",
1874 __func__); 1845 __func__);
1875 return; 1846 return;
1876 } 1847 }
1877 1848
@@ -1880,8 +1851,8 @@ static void ux500_hash_shutdown(struct platform_device *pdev)
1880 /* current_ctx allocates a device, NULL = unallocated */ 1851 /* current_ctx allocates a device, NULL = unallocated */
1881 if (!device_data->current_ctx) { 1852 if (!device_data->current_ctx) {
1882 if (down_trylock(&driver_data.device_allocation)) 1853 if (down_trylock(&driver_data.device_allocation))
1883 dev_dbg(&pdev->dev, "[%s]: Cryp still in use!" 1854 dev_dbg(&pdev->dev, "%s: Cryp still in use! Shutting down anyway...\n",
1884 "Shutting down anyway...", __func__); 1855 __func__);
1885 /** 1856 /**
1886 * (Allocate the device) 1857 * (Allocate the device)
1887 * Need to set this to non-null (dummy) value, 1858 * Need to set this to non-null (dummy) value,
@@ -1906,8 +1877,8 @@ static void ux500_hash_shutdown(struct platform_device *pdev)
1906 release_mem_region(res->start, resource_size(res)); 1877 release_mem_region(res->start, resource_size(res));
1907 1878
1908 if (hash_disable_power(device_data, false)) 1879 if (hash_disable_power(device_data, false))
1909 dev_err(&pdev->dev, "[%s] hash_disable_power() failed", 1880 dev_err(&pdev->dev, "%s: hash_disable_power() failed\n",
1910 __func__); 1881 __func__);
1911} 1882}
1912 1883
1913/** 1884/**
@@ -1922,7 +1893,7 @@ static int ux500_hash_suspend(struct device *dev)
1922 1893
1923 device_data = dev_get_drvdata(dev); 1894 device_data = dev_get_drvdata(dev);
1924 if (!device_data) { 1895 if (!device_data) {
1925 dev_err(dev, "[%s] platform_get_drvdata() failed!", __func__); 1896 dev_err(dev, "%s: platform_get_drvdata() failed!\n", __func__);
1926 return -ENOMEM; 1897 return -ENOMEM;
1927 } 1898 }
1928 1899
@@ -1933,15 +1904,16 @@ static int ux500_hash_suspend(struct device *dev)
1933 1904
1934 if (device_data->current_ctx == ++temp_ctx) { 1905 if (device_data->current_ctx == ++temp_ctx) {
1935 if (down_interruptible(&driver_data.device_allocation)) 1906 if (down_interruptible(&driver_data.device_allocation))
1936 dev_dbg(dev, "[%s]: down_interruptible() failed", 1907 dev_dbg(dev, "%s: down_interruptible() failed\n",
1937 __func__); 1908 __func__);
1938 ret = hash_disable_power(device_data, false); 1909 ret = hash_disable_power(device_data, false);
1939 1910
1940 } else 1911 } else {
1941 ret = hash_disable_power(device_data, true); 1912 ret = hash_disable_power(device_data, true);
1913 }
1942 1914
1943 if (ret) 1915 if (ret)
1944 dev_err(dev, "[%s]: hash_disable_power()", __func__); 1916 dev_err(dev, "%s: hash_disable_power()\n", __func__);
1945 1917
1946 return ret; 1918 return ret;
1947} 1919}
@@ -1958,7 +1930,7 @@ static int ux500_hash_resume(struct device *dev)
1958 1930
1959 device_data = dev_get_drvdata(dev); 1931 device_data = dev_get_drvdata(dev);
1960 if (!device_data) { 1932 if (!device_data) {
1961 dev_err(dev, "[%s] platform_get_drvdata() failed!", __func__); 1933 dev_err(dev, "%s: platform_get_drvdata() failed!\n", __func__);
1962 return -ENOMEM; 1934 return -ENOMEM;
1963 } 1935 }
1964 1936
@@ -1973,7 +1945,7 @@ static int ux500_hash_resume(struct device *dev)
1973 ret = hash_enable_power(device_data, true); 1945 ret = hash_enable_power(device_data, true);
1974 1946
1975 if (ret) 1947 if (ret)
1976 dev_err(dev, "[%s]: hash_enable_power() failed!", __func__); 1948 dev_err(dev, "%s: hash_enable_power() failed!\n", __func__);
1977 1949
1978 return ret; 1950 return ret;
1979} 1951}
@@ -1981,8 +1953,8 @@ static int ux500_hash_resume(struct device *dev)
1981static SIMPLE_DEV_PM_OPS(ux500_hash_pm, ux500_hash_suspend, ux500_hash_resume); 1953static SIMPLE_DEV_PM_OPS(ux500_hash_pm, ux500_hash_suspend, ux500_hash_resume);
1982 1954
1983static const struct of_device_id ux500_hash_match[] = { 1955static const struct of_device_id ux500_hash_match[] = {
1984 { .compatible = "stericsson,ux500-hash" }, 1956 { .compatible = "stericsson,ux500-hash" },
1985 { }, 1957 { },
1986}; 1958};
1987 1959
1988static struct platform_driver hash_driver = { 1960static struct platform_driver hash_driver = {