aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/crypto
diff options
context:
space:
mode:
authorYuan Kang <Yuan.Kang@freescale.com>2012-06-22 20:48:46 -0400
committerHerbert Xu <herbert@gondor.apana.org.au>2012-06-27 02:42:05 -0400
commita299c837040bb47810b9d287dfe7deed6a254995 (patch)
tree0993aa7a03473e04576785e3450bd6b50ff815c2 /drivers/crypto
parent4c1ec1f9301549db229bc6dce916f8a99d1f82d6 (diff)
crypto: caam - link_tbl rename
- rename scatterlist and link_tbl functions - link_tbl changed to sec4_sg - sg_to_link_tbl_one changed to dma_to_sec4_sg_one, since no scatterlist is use Signed-off-by: Yuan Kang <Yuan.Kang@freescale.com> Signed-off-by: Kim Phillips <kim.phillips@freescale.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'drivers/crypto')
-rw-r--r--drivers/crypto/caam/caamalg.c226
-rw-r--r--drivers/crypto/caam/desc.h6
-rw-r--r--drivers/crypto/caam/sg_sw_sec4.h (renamed from drivers/crypto/caam/sg_link_tbl.h)42
3 files changed, 137 insertions, 137 deletions
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index ea0295d137a7..5ab480a12b51 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -51,7 +51,7 @@
51#include "desc_constr.h" 51#include "desc_constr.h"
52#include "jr.h" 52#include "jr.h"
53#include "error.h" 53#include "error.h"
54#include "sg_link_tbl.h" 54#include "sg_sw_sec4.h"
55#include "key_gen.h" 55#include "key_gen.h"
56 56
57/* 57/*
@@ -658,8 +658,8 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
658 * @dst_nents: number of segments in output scatterlist 658 * @dst_nents: number of segments in output scatterlist
659 * @iv_dma: dma address of iv for checking continuity and link table 659 * @iv_dma: dma address of iv for checking continuity and link table
660 * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE) 660 * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
661 * @link_tbl_bytes: length of dma mapped link_tbl space 661 * @sec4_sg_bytes: length of dma mapped sec4_sg space
662 * @link_tbl_dma: bus physical mapped address of h/w link table 662 * @sec4_sg_dma: bus physical mapped address of h/w link table
663 * @hw_desc: the h/w job descriptor followed by any referenced link tables 663 * @hw_desc: the h/w job descriptor followed by any referenced link tables
664 */ 664 */
665struct aead_edesc { 665struct aead_edesc {
@@ -667,9 +667,9 @@ struct aead_edesc {
667 int src_nents; 667 int src_nents;
668 int dst_nents; 668 int dst_nents;
669 dma_addr_t iv_dma; 669 dma_addr_t iv_dma;
670 int link_tbl_bytes; 670 int sec4_sg_bytes;
671 dma_addr_t link_tbl_dma; 671 dma_addr_t sec4_sg_dma;
672 struct link_tbl_entry *link_tbl; 672 struct sec4_sg_entry *sec4_sg;
673 u32 hw_desc[0]; 673 u32 hw_desc[0];
674}; 674};
675 675
@@ -679,24 +679,24 @@ struct aead_edesc {
679 * @dst_nents: number of segments in output scatterlist 679 * @dst_nents: number of segments in output scatterlist
680 * @iv_dma: dma address of iv for checking continuity and link table 680 * @iv_dma: dma address of iv for checking continuity and link table
681 * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE) 681 * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
682 * @link_tbl_bytes: length of dma mapped link_tbl space 682 * @sec4_sg_bytes: length of dma mapped sec4_sg space
683 * @link_tbl_dma: bus physical mapped address of h/w link table 683 * @sec4_sg_dma: bus physical mapped address of h/w link table
684 * @hw_desc: the h/w job descriptor followed by any referenced link tables 684 * @hw_desc: the h/w job descriptor followed by any referenced link tables
685 */ 685 */
686struct ablkcipher_edesc { 686struct ablkcipher_edesc {
687 int src_nents; 687 int src_nents;
688 int dst_nents; 688 int dst_nents;
689 dma_addr_t iv_dma; 689 dma_addr_t iv_dma;
690 int link_tbl_bytes; 690 int sec4_sg_bytes;
691 dma_addr_t link_tbl_dma; 691 dma_addr_t sec4_sg_dma;
692 struct link_tbl_entry *link_tbl; 692 struct sec4_sg_entry *sec4_sg;
693 u32 hw_desc[0]; 693 u32 hw_desc[0];
694}; 694};
695 695
696static void caam_unmap(struct device *dev, struct scatterlist *src, 696static void caam_unmap(struct device *dev, struct scatterlist *src,
697 struct scatterlist *dst, int src_nents, int dst_nents, 697 struct scatterlist *dst, int src_nents, int dst_nents,
698 dma_addr_t iv_dma, int ivsize, dma_addr_t link_tbl_dma, 698 dma_addr_t iv_dma, int ivsize, dma_addr_t sec4_sg_dma,
699 int link_tbl_bytes) 699 int sec4_sg_bytes)
700{ 700{
701 if (unlikely(dst != src)) { 701 if (unlikely(dst != src)) {
702 dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE); 702 dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
@@ -707,8 +707,8 @@ static void caam_unmap(struct device *dev, struct scatterlist *src,
707 707
708 if (iv_dma) 708 if (iv_dma)
709 dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE); 709 dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
710 if (link_tbl_bytes) 710 if (sec4_sg_bytes)
711 dma_unmap_single(dev, link_tbl_dma, link_tbl_bytes, 711 dma_unmap_single(dev, sec4_sg_dma, sec4_sg_bytes,
712 DMA_TO_DEVICE); 712 DMA_TO_DEVICE);
713} 713}
714 714
@@ -723,8 +723,8 @@ static void aead_unmap(struct device *dev,
723 723
724 caam_unmap(dev, req->src, req->dst, 724 caam_unmap(dev, req->src, req->dst,
725 edesc->src_nents, edesc->dst_nents, 725 edesc->src_nents, edesc->dst_nents,
726 edesc->iv_dma, ivsize, edesc->link_tbl_dma, 726 edesc->iv_dma, ivsize, edesc->sec4_sg_dma,
727 edesc->link_tbl_bytes); 727 edesc->sec4_sg_bytes);
728} 728}
729 729
730static void ablkcipher_unmap(struct device *dev, 730static void ablkcipher_unmap(struct device *dev,
@@ -736,8 +736,8 @@ static void ablkcipher_unmap(struct device *dev,
736 736
737 caam_unmap(dev, req->src, req->dst, 737 caam_unmap(dev, req->src, req->dst,
738 edesc->src_nents, edesc->dst_nents, 738 edesc->src_nents, edesc->dst_nents,
739 edesc->iv_dma, ivsize, edesc->link_tbl_dma, 739 edesc->iv_dma, ivsize, edesc->sec4_sg_dma,
740 edesc->link_tbl_bytes); 740 edesc->sec4_sg_bytes);
741} 741}
742 742
743static void aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err, 743static void aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
@@ -828,7 +828,7 @@ static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
828 sizeof(struct iphdr) + req->assoclen + 828 sizeof(struct iphdr) + req->assoclen +
829 ((req->cryptlen > 1500) ? 1500 : req->cryptlen) + 829 ((req->cryptlen > 1500) ? 1500 : req->cryptlen) +
830 ctx->authsize + 36, 1); 830 ctx->authsize + 36, 1);
831 if (!err && edesc->link_tbl_bytes) { 831 if (!err && edesc->sec4_sg_bytes) {
832 struct scatterlist *sg = sg_last(req->src, edesc->src_nents); 832 struct scatterlist *sg = sg_last(req->src, edesc->src_nents);
833 print_hex_dump(KERN_ERR, "sglastout@"xstr(__LINE__)": ", 833 print_hex_dump(KERN_ERR, "sglastout@"xstr(__LINE__)": ",
834 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(sg), 834 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(sg),
@@ -927,7 +927,7 @@ static void init_aead_job(u32 *sh_desc, dma_addr_t ptr,
927 u32 *desc = edesc->hw_desc; 927 u32 *desc = edesc->hw_desc;
928 u32 out_options = 0, in_options; 928 u32 out_options = 0, in_options;
929 dma_addr_t dst_dma, src_dma; 929 dma_addr_t dst_dma, src_dma;
930 int len, link_tbl_index = 0; 930 int len, sec4_sg_index = 0;
931 931
932#ifdef DEBUG 932#ifdef DEBUG
933 debug("assoclen %d cryptlen %d authsize %d\n", 933 debug("assoclen %d cryptlen %d authsize %d\n",
@@ -953,9 +953,9 @@ static void init_aead_job(u32 *sh_desc, dma_addr_t ptr,
953 src_dma = sg_dma_address(req->assoc); 953 src_dma = sg_dma_address(req->assoc);
954 in_options = 0; 954 in_options = 0;
955 } else { 955 } else {
956 src_dma = edesc->link_tbl_dma; 956 src_dma = edesc->sec4_sg_dma;
957 link_tbl_index += (edesc->assoc_nents ? : 1) + 1 + 957 sec4_sg_index += (edesc->assoc_nents ? : 1) + 1 +
958 (edesc->src_nents ? : 1); 958 (edesc->src_nents ? : 1);
959 in_options = LDST_SGF; 959 in_options = LDST_SGF;
960 } 960 }
961 if (encrypt) 961 if (encrypt)
@@ -969,7 +969,7 @@ static void init_aead_job(u32 *sh_desc, dma_addr_t ptr,
969 if (all_contig) { 969 if (all_contig) {
970 dst_dma = sg_dma_address(req->src); 970 dst_dma = sg_dma_address(req->src);
971 } else { 971 } else {
972 dst_dma = src_dma + sizeof(struct link_tbl_entry) * 972 dst_dma = src_dma + sizeof(struct sec4_sg_entry) *
973 ((edesc->assoc_nents ? : 1) + 1); 973 ((edesc->assoc_nents ? : 1) + 1);
974 out_options = LDST_SGF; 974 out_options = LDST_SGF;
975 } 975 }
@@ -977,9 +977,9 @@ static void init_aead_job(u32 *sh_desc, dma_addr_t ptr,
977 if (!edesc->dst_nents) { 977 if (!edesc->dst_nents) {
978 dst_dma = sg_dma_address(req->dst); 978 dst_dma = sg_dma_address(req->dst);
979 } else { 979 } else {
980 dst_dma = edesc->link_tbl_dma + 980 dst_dma = edesc->sec4_sg_dma +
981 link_tbl_index * 981 sec4_sg_index *
982 sizeof(struct link_tbl_entry); 982 sizeof(struct sec4_sg_entry);
983 out_options = LDST_SGF; 983 out_options = LDST_SGF;
984 } 984 }
985 } 985 }
@@ -1005,7 +1005,7 @@ static void init_aead_giv_job(u32 *sh_desc, dma_addr_t ptr,
1005 u32 *desc = edesc->hw_desc; 1005 u32 *desc = edesc->hw_desc;
1006 u32 out_options = 0, in_options; 1006 u32 out_options = 0, in_options;
1007 dma_addr_t dst_dma, src_dma; 1007 dma_addr_t dst_dma, src_dma;
1008 int len, link_tbl_index = 0; 1008 int len, sec4_sg_index = 0;
1009 1009
1010#ifdef DEBUG 1010#ifdef DEBUG
1011 debug("assoclen %d cryptlen %d authsize %d\n", 1011 debug("assoclen %d cryptlen %d authsize %d\n",
@@ -1030,8 +1030,8 @@ static void init_aead_giv_job(u32 *sh_desc, dma_addr_t ptr,
1030 src_dma = sg_dma_address(req->assoc); 1030 src_dma = sg_dma_address(req->assoc);
1031 in_options = 0; 1031 in_options = 0;
1032 } else { 1032 } else {
1033 src_dma = edesc->link_tbl_dma; 1033 src_dma = edesc->sec4_sg_dma;
1034 link_tbl_index += edesc->assoc_nents + 1 + edesc->src_nents; 1034 sec4_sg_index += edesc->assoc_nents + 1 + edesc->src_nents;
1035 in_options = LDST_SGF; 1035 in_options = LDST_SGF;
1036 } 1036 }
1037 append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + 1037 append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize +
@@ -1041,13 +1041,13 @@ static void init_aead_giv_job(u32 *sh_desc, dma_addr_t ptr,
1041 dst_dma = edesc->iv_dma; 1041 dst_dma = edesc->iv_dma;
1042 } else { 1042 } else {
1043 if (likely(req->src == req->dst)) { 1043 if (likely(req->src == req->dst)) {
1044 dst_dma = src_dma + sizeof(struct link_tbl_entry) * 1044 dst_dma = src_dma + sizeof(struct sec4_sg_entry) *
1045 edesc->assoc_nents; 1045 edesc->assoc_nents;
1046 out_options = LDST_SGF; 1046 out_options = LDST_SGF;
1047 } else { 1047 } else {
1048 dst_dma = edesc->link_tbl_dma + 1048 dst_dma = edesc->sec4_sg_dma +
1049 link_tbl_index * 1049 sec4_sg_index *
1050 sizeof(struct link_tbl_entry); 1050 sizeof(struct sec4_sg_entry);
1051 out_options = LDST_SGF; 1051 out_options = LDST_SGF;
1052 } 1052 }
1053 } 1053 }
@@ -1068,7 +1068,7 @@ static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
1068 u32 *desc = edesc->hw_desc; 1068 u32 *desc = edesc->hw_desc;
1069 u32 out_options = 0, in_options; 1069 u32 out_options = 0, in_options;
1070 dma_addr_t dst_dma, src_dma; 1070 dma_addr_t dst_dma, src_dma;
1071 int len, link_tbl_index = 0; 1071 int len, sec4_sg_index = 0;
1072 1072
1073#ifdef DEBUG 1073#ifdef DEBUG
1074 print_hex_dump(KERN_ERR, "presciv@"xstr(__LINE__)": ", 1074 print_hex_dump(KERN_ERR, "presciv@"xstr(__LINE__)": ",
@@ -1086,8 +1086,8 @@ static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
1086 src_dma = edesc->iv_dma; 1086 src_dma = edesc->iv_dma;
1087 in_options = 0; 1087 in_options = 0;
1088 } else { 1088 } else {
1089 src_dma = edesc->link_tbl_dma; 1089 src_dma = edesc->sec4_sg_dma;
1090 link_tbl_index += (iv_contig ? 0 : 1) + edesc->src_nents; 1090 sec4_sg_index += (iv_contig ? 0 : 1) + edesc->src_nents;
1091 in_options = LDST_SGF; 1091 in_options = LDST_SGF;
1092 } 1092 }
1093 append_seq_in_ptr(desc, src_dma, req->nbytes + ivsize, in_options); 1093 append_seq_in_ptr(desc, src_dma, req->nbytes + ivsize, in_options);
@@ -1096,16 +1096,16 @@ static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
1096 if (!edesc->src_nents && iv_contig) { 1096 if (!edesc->src_nents && iv_contig) {
1097 dst_dma = sg_dma_address(req->src); 1097 dst_dma = sg_dma_address(req->src);
1098 } else { 1098 } else {
1099 dst_dma = edesc->link_tbl_dma + 1099 dst_dma = edesc->sec4_sg_dma +
1100 sizeof(struct link_tbl_entry); 1100 sizeof(struct sec4_sg_entry);
1101 out_options = LDST_SGF; 1101 out_options = LDST_SGF;
1102 } 1102 }
1103 } else { 1103 } else {
1104 if (!edesc->dst_nents) { 1104 if (!edesc->dst_nents) {
1105 dst_dma = sg_dma_address(req->dst); 1105 dst_dma = sg_dma_address(req->dst);
1106 } else { 1106 } else {
1107 dst_dma = edesc->link_tbl_dma + 1107 dst_dma = edesc->sec4_sg_dma +
1108 link_tbl_index * sizeof(struct link_tbl_entry); 1108 sec4_sg_index * sizeof(struct sec4_sg_entry);
1109 out_options = LDST_SGF; 1109 out_options = LDST_SGF;
1110 } 1110 }
1111 } 1111 }
@@ -1129,7 +1129,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
1129 int sgc; 1129 int sgc;
1130 bool all_contig = true; 1130 bool all_contig = true;
1131 int ivsize = crypto_aead_ivsize(aead); 1131 int ivsize = crypto_aead_ivsize(aead);
1132 int link_tbl_index, link_tbl_len = 0, link_tbl_bytes; 1132 int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
1133 1133
1134 assoc_nents = sg_count(req->assoc, req->assoclen); 1134 assoc_nents = sg_count(req->assoc, req->assoclen);
1135 src_nents = sg_count(req->src, req->cryptlen); 1135 src_nents = sg_count(req->src, req->cryptlen);
@@ -1157,15 +1157,15 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
1157 all_contig = false; 1157 all_contig = false;
1158 assoc_nents = assoc_nents ? : 1; 1158 assoc_nents = assoc_nents ? : 1;
1159 src_nents = src_nents ? : 1; 1159 src_nents = src_nents ? : 1;
1160 link_tbl_len = assoc_nents + 1 + src_nents; 1160 sec4_sg_len = assoc_nents + 1 + src_nents;
1161 } 1161 }
1162 link_tbl_len += dst_nents; 1162 sec4_sg_len += dst_nents;
1163 1163
1164 link_tbl_bytes = link_tbl_len * sizeof(struct link_tbl_entry); 1164 sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
1165 1165
1166 /* allocate space for base edesc and hw desc commands, link tables */ 1166 /* allocate space for base edesc and hw desc commands, link tables */
1167 edesc = kmalloc(sizeof(struct aead_edesc) + desc_bytes + 1167 edesc = kmalloc(sizeof(struct aead_edesc) + desc_bytes +
1168 link_tbl_bytes, GFP_DMA | flags); 1168 sec4_sg_bytes, GFP_DMA | flags);
1169 if (!edesc) { 1169 if (!edesc) {
1170 dev_err(jrdev, "could not allocate extended descriptor\n"); 1170 dev_err(jrdev, "could not allocate extended descriptor\n");
1171 return ERR_PTR(-ENOMEM); 1171 return ERR_PTR(-ENOMEM);
@@ -1175,32 +1175,32 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
1175 edesc->src_nents = src_nents; 1175 edesc->src_nents = src_nents;
1176 edesc->dst_nents = dst_nents; 1176 edesc->dst_nents = dst_nents;
1177 edesc->iv_dma = iv_dma; 1177 edesc->iv_dma = iv_dma;
1178 edesc->link_tbl_bytes = link_tbl_bytes; 1178 edesc->sec4_sg_bytes = sec4_sg_bytes;
1179 edesc->link_tbl = (void *)edesc + sizeof(struct aead_edesc) + 1179 edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
1180 desc_bytes; 1180 desc_bytes;
1181 edesc->link_tbl_dma = dma_map_single(jrdev, edesc->link_tbl, 1181 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1182 link_tbl_bytes, DMA_TO_DEVICE); 1182 sec4_sg_bytes, DMA_TO_DEVICE);
1183 *all_contig_ptr = all_contig; 1183 *all_contig_ptr = all_contig;
1184 1184
1185 link_tbl_index = 0; 1185 sec4_sg_index = 0;
1186 if (!all_contig) { 1186 if (!all_contig) {
1187 sg_to_link_tbl(req->assoc, 1187 sg_to_sec4_sg(req->assoc,
1188 (assoc_nents ? : 1), 1188 (assoc_nents ? : 1),
1189 edesc->link_tbl + 1189 edesc->sec4_sg +
1190 link_tbl_index, 0); 1190 sec4_sg_index, 0);
1191 link_tbl_index += assoc_nents ? : 1; 1191 sec4_sg_index += assoc_nents ? : 1;
1192 sg_to_link_tbl_one(edesc->link_tbl + link_tbl_index, 1192 dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
1193 iv_dma, ivsize, 0); 1193 iv_dma, ivsize, 0);
1194 link_tbl_index += 1; 1194 sec4_sg_index += 1;
1195 sg_to_link_tbl_last(req->src, 1195 sg_to_sec4_sg_last(req->src,
1196 (src_nents ? : 1), 1196 (src_nents ? : 1),
1197 edesc->link_tbl + 1197 edesc->sec4_sg +
1198 link_tbl_index, 0); 1198 sec4_sg_index, 0);
1199 link_tbl_index += src_nents ? : 1; 1199 sec4_sg_index += src_nents ? : 1;
1200 } 1200 }
1201 if (dst_nents) { 1201 if (dst_nents) {
1202 sg_to_link_tbl_last(req->dst, dst_nents, 1202 sg_to_sec4_sg_last(req->dst, dst_nents,
1203 edesc->link_tbl + link_tbl_index, 0); 1203 edesc->sec4_sg + sec4_sg_index, 0);
1204 } 1204 }
1205 1205
1206 return edesc; 1206 return edesc;
@@ -1307,7 +1307,7 @@ static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request
1307 int sgc; 1307 int sgc;
1308 u32 contig = GIV_SRC_CONTIG | GIV_DST_CONTIG; 1308 u32 contig = GIV_SRC_CONTIG | GIV_DST_CONTIG;
1309 int ivsize = crypto_aead_ivsize(aead); 1309 int ivsize = crypto_aead_ivsize(aead);
1310 int link_tbl_index, link_tbl_len = 0, link_tbl_bytes; 1310 int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
1311 1311
1312 assoc_nents = sg_count(req->assoc, req->assoclen); 1312 assoc_nents = sg_count(req->assoc, req->assoclen);
1313 src_nents = sg_count(req->src, req->cryptlen); 1313 src_nents = sg_count(req->src, req->cryptlen);
@@ -1336,22 +1336,22 @@ static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request
1336 contig &= ~GIV_DST_CONTIG; 1336 contig &= ~GIV_DST_CONTIG;
1337 if (unlikely(req->src != req->dst)) { 1337 if (unlikely(req->src != req->dst)) {
1338 dst_nents = dst_nents ? : 1; 1338 dst_nents = dst_nents ? : 1;
1339 link_tbl_len += 1; 1339 sec4_sg_len += 1;
1340 } 1340 }
1341 if (!(contig & GIV_SRC_CONTIG)) { 1341 if (!(contig & GIV_SRC_CONTIG)) {
1342 assoc_nents = assoc_nents ? : 1; 1342 assoc_nents = assoc_nents ? : 1;
1343 src_nents = src_nents ? : 1; 1343 src_nents = src_nents ? : 1;
1344 link_tbl_len += assoc_nents + 1 + src_nents; 1344 sec4_sg_len += assoc_nents + 1 + src_nents;
1345 if (likely(req->src == req->dst)) 1345 if (likely(req->src == req->dst))
1346 contig &= ~GIV_DST_CONTIG; 1346 contig &= ~GIV_DST_CONTIG;
1347 } 1347 }
1348 link_tbl_len += dst_nents; 1348 sec4_sg_len += dst_nents;
1349 1349
1350 link_tbl_bytes = link_tbl_len * sizeof(struct link_tbl_entry); 1350 sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
1351 1351
1352 /* allocate space for base edesc and hw desc commands, link tables */ 1352 /* allocate space for base edesc and hw desc commands, link tables */
1353 edesc = kmalloc(sizeof(struct aead_edesc) + desc_bytes + 1353 edesc = kmalloc(sizeof(struct aead_edesc) + desc_bytes +
1354 link_tbl_bytes, GFP_DMA | flags); 1354 sec4_sg_bytes, GFP_DMA | flags);
1355 if (!edesc) { 1355 if (!edesc) {
1356 dev_err(jrdev, "could not allocate extended descriptor\n"); 1356 dev_err(jrdev, "could not allocate extended descriptor\n");
1357 return ERR_PTR(-ENOMEM); 1357 return ERR_PTR(-ENOMEM);
@@ -1361,33 +1361,33 @@ static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request
1361 edesc->src_nents = src_nents; 1361 edesc->src_nents = src_nents;
1362 edesc->dst_nents = dst_nents; 1362 edesc->dst_nents = dst_nents;
1363 edesc->iv_dma = iv_dma; 1363 edesc->iv_dma = iv_dma;
1364 edesc->link_tbl_bytes = link_tbl_bytes; 1364 edesc->sec4_sg_bytes = sec4_sg_bytes;
1365 edesc->link_tbl = (void *)edesc + sizeof(struct aead_edesc) + 1365 edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
1366 desc_bytes; 1366 desc_bytes;
1367 edesc->link_tbl_dma = dma_map_single(jrdev, edesc->link_tbl, 1367 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1368 link_tbl_bytes, DMA_TO_DEVICE); 1368 sec4_sg_bytes, DMA_TO_DEVICE);
1369 *contig_ptr = contig; 1369 *contig_ptr = contig;
1370 1370
1371 link_tbl_index = 0; 1371 sec4_sg_index = 0;
1372 if (!(contig & GIV_SRC_CONTIG)) { 1372 if (!(contig & GIV_SRC_CONTIG)) {
1373 sg_to_link_tbl(req->assoc, assoc_nents, 1373 sg_to_sec4_sg(req->assoc, assoc_nents,
1374 edesc->link_tbl + 1374 edesc->sec4_sg +
1375 link_tbl_index, 0); 1375 sec4_sg_index, 0);
1376 link_tbl_index += assoc_nents; 1376 sec4_sg_index += assoc_nents;
1377 sg_to_link_tbl_one(edesc->link_tbl + link_tbl_index, 1377 dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
1378 iv_dma, ivsize, 0); 1378 iv_dma, ivsize, 0);
1379 link_tbl_index += 1; 1379 sec4_sg_index += 1;
1380 sg_to_link_tbl_last(req->src, src_nents, 1380 sg_to_sec4_sg_last(req->src, src_nents,
1381 edesc->link_tbl + 1381 edesc->sec4_sg +
1382 link_tbl_index, 0); 1382 sec4_sg_index, 0);
1383 link_tbl_index += src_nents; 1383 sec4_sg_index += src_nents;
1384 } 1384 }
1385 if (unlikely(req->src != req->dst && !(contig & GIV_DST_CONTIG))) { 1385 if (unlikely(req->src != req->dst && !(contig & GIV_DST_CONTIG))) {
1386 sg_to_link_tbl_one(edesc->link_tbl + link_tbl_index, 1386 dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
1387 iv_dma, ivsize, 0); 1387 iv_dma, ivsize, 0);
1388 link_tbl_index += 1; 1388 sec4_sg_index += 1;
1389 sg_to_link_tbl_last(req->dst, dst_nents, 1389 sg_to_sec4_sg_last(req->dst, dst_nents,
1390 edesc->link_tbl + link_tbl_index, 0); 1390 edesc->sec4_sg + sec4_sg_index, 0);
1391 } 1391 }
1392 1392
1393 return edesc; 1393 return edesc;
@@ -1453,13 +1453,13 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
1453 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | 1453 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1454 CRYPTO_TFM_REQ_MAY_SLEEP)) ? 1454 CRYPTO_TFM_REQ_MAY_SLEEP)) ?
1455 GFP_KERNEL : GFP_ATOMIC; 1455 GFP_KERNEL : GFP_ATOMIC;
1456 int src_nents, dst_nents = 0, link_tbl_bytes; 1456 int src_nents, dst_nents = 0, sec4_sg_bytes;
1457 struct ablkcipher_edesc *edesc; 1457 struct ablkcipher_edesc *edesc;
1458 dma_addr_t iv_dma = 0; 1458 dma_addr_t iv_dma = 0;
1459 bool iv_contig = false; 1459 bool iv_contig = false;
1460 int sgc; 1460 int sgc;
1461 int ivsize = crypto_ablkcipher_ivsize(ablkcipher); 1461 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1462 int link_tbl_index; 1462 int sec4_sg_index;
1463 1463
1464 src_nents = sg_count(req->src, req->nbytes); 1464 src_nents = sg_count(req->src, req->nbytes);
1465 1465
@@ -1485,12 +1485,12 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
1485 iv_contig = true; 1485 iv_contig = true;
1486 else 1486 else
1487 src_nents = src_nents ? : 1; 1487 src_nents = src_nents ? : 1;
1488 link_tbl_bytes = ((iv_contig ? 0 : 1) + src_nents + dst_nents) * 1488 sec4_sg_bytes = ((iv_contig ? 0 : 1) + src_nents + dst_nents) *
1489 sizeof(struct link_tbl_entry); 1489 sizeof(struct sec4_sg_entry);
1490 1490
1491 /* allocate space for base edesc and hw desc commands, link tables */ 1491 /* allocate space for base edesc and hw desc commands, link tables */
1492 edesc = kmalloc(sizeof(struct ablkcipher_edesc) + desc_bytes + 1492 edesc = kmalloc(sizeof(struct ablkcipher_edesc) + desc_bytes +
1493 link_tbl_bytes, GFP_DMA | flags); 1493 sec4_sg_bytes, GFP_DMA | flags);
1494 if (!edesc) { 1494 if (!edesc) {
1495 dev_err(jrdev, "could not allocate extended descriptor\n"); 1495 dev_err(jrdev, "could not allocate extended descriptor\n");
1496 return ERR_PTR(-ENOMEM); 1496 return ERR_PTR(-ENOMEM);
@@ -1498,31 +1498,31 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
1498 1498
1499 edesc->src_nents = src_nents; 1499 edesc->src_nents = src_nents;
1500 edesc->dst_nents = dst_nents; 1500 edesc->dst_nents = dst_nents;
1501 edesc->link_tbl_bytes = link_tbl_bytes; 1501 edesc->sec4_sg_bytes = sec4_sg_bytes;
1502 edesc->link_tbl = (void *)edesc + sizeof(struct ablkcipher_edesc) + 1502 edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
1503 desc_bytes; 1503 desc_bytes;
1504 1504
1505 link_tbl_index = 0; 1505 sec4_sg_index = 0;
1506 if (!iv_contig) { 1506 if (!iv_contig) {
1507 sg_to_link_tbl_one(edesc->link_tbl, iv_dma, ivsize, 0); 1507 dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0);
1508 sg_to_link_tbl_last(req->src, src_nents, 1508 sg_to_sec4_sg_last(req->src, src_nents,
1509 edesc->link_tbl + 1, 0); 1509 edesc->sec4_sg + 1, 0);
1510 link_tbl_index += 1 + src_nents; 1510 sec4_sg_index += 1 + src_nents;
1511 } 1511 }
1512 1512
1513 if (unlikely(dst_nents)) { 1513 if (unlikely(dst_nents)) {
1514 sg_to_link_tbl_last(req->dst, dst_nents, 1514 sg_to_sec4_sg_last(req->dst, dst_nents,
1515 edesc->link_tbl + link_tbl_index, 0); 1515 edesc->sec4_sg + sec4_sg_index, 0);
1516 } 1516 }
1517 1517
1518 edesc->link_tbl_dma = dma_map_single(jrdev, edesc->link_tbl, 1518 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1519 link_tbl_bytes, DMA_TO_DEVICE); 1519 sec4_sg_bytes, DMA_TO_DEVICE);
1520 edesc->iv_dma = iv_dma; 1520 edesc->iv_dma = iv_dma;
1521 1521
1522#ifdef DEBUG 1522#ifdef DEBUG
1523 print_hex_dump(KERN_ERR, "ablkcipher link_tbl@"xstr(__LINE__)": ", 1523 print_hex_dump(KERN_ERR, "ablkcipher sec4_sg@"xstr(__LINE__)": ",
1524 DUMP_PREFIX_ADDRESS, 16, 4, edesc->link_tbl, 1524 DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
1525 link_tbl_bytes, 1); 1525 sec4_sg_bytes, 1);
1526#endif 1526#endif
1527 1527
1528 *iv_contig_out = iv_contig; 1528 *iv_contig_out = iv_contig;
diff --git a/drivers/crypto/caam/desc.h b/drivers/crypto/caam/desc.h
index 48c1927dbf21..3e685062d44b 100644
--- a/drivers/crypto/caam/desc.h
+++ b/drivers/crypto/caam/desc.h
@@ -8,10 +8,10 @@
8#ifndef DESC_H 8#ifndef DESC_H
9#define DESC_H 9#define DESC_H
10 10
11struct link_tbl_entry { 11struct sec4_sg_entry {
12 u64 ptr; 12 u64 ptr;
13#define LINK_TBL_LEN_FIN 0x40000000 13#define SEC4_SG_LEN_FIN 0x40000000
14#define LINK_TBL_LEN_EXT 0x80000000 14#define SEC4_SG_LEN_EXT 0x80000000
15 u32 len; 15 u32 len;
16 u8 reserved; 16 u8 reserved;
17 u8 buf_pool_id; 17 u8 buf_pool_id;
diff --git a/drivers/crypto/caam/sg_link_tbl.h b/drivers/crypto/caam/sg_sw_sec4.h
index 6df434922460..a6ad7a443213 100644
--- a/drivers/crypto/caam/sg_link_tbl.h
+++ b/drivers/crypto/caam/sg_sw_sec4.h
@@ -5,23 +5,23 @@
5 * 5 *
6 */ 6 */
7 7
8struct link_tbl_entry; 8struct sec4_sg_entry;
9 9
10/* 10/*
11 * convert single dma address to h/w link table format 11 * convert single dma address to h/w link table format
12 */ 12 */
13static inline void sg_to_link_tbl_one(struct link_tbl_entry *link_tbl_ptr, 13static inline void dma_to_sec4_sg_one(struct sec4_sg_entry *sec4_sg_ptr,
14 dma_addr_t dma, u32 len, u32 offset) 14 dma_addr_t dma, u32 len, u32 offset)
15{ 15{
16 link_tbl_ptr->ptr = dma; 16 sec4_sg_ptr->ptr = dma;
17 link_tbl_ptr->len = len; 17 sec4_sg_ptr->len = len;
18 link_tbl_ptr->reserved = 0; 18 sec4_sg_ptr->reserved = 0;
19 link_tbl_ptr->buf_pool_id = 0; 19 sec4_sg_ptr->buf_pool_id = 0;
20 link_tbl_ptr->offset = offset; 20 sec4_sg_ptr->offset = offset;
21#ifdef DEBUG 21#ifdef DEBUG
22 print_hex_dump(KERN_ERR, "link_tbl_ptr@: ", 22 print_hex_dump(KERN_ERR, "sec4_sg_ptr@: ",
23 DUMP_PREFIX_ADDRESS, 16, 4, link_tbl_ptr, 23 DUMP_PREFIX_ADDRESS, 16, 4, sec4_sg_ptr,
24 sizeof(struct link_tbl_entry), 1); 24 sizeof(struct sec4_sg_entry), 1);
25#endif 25#endif
26} 26}
27 27
@@ -29,30 +29,30 @@ static inline void sg_to_link_tbl_one(struct link_tbl_entry *link_tbl_ptr,
29 * convert scatterlist to h/w link table format 29 * convert scatterlist to h/w link table format
30 * but does not have final bit; instead, returns last entry 30 * but does not have final bit; instead, returns last entry
31 */ 31 */
32static inline struct link_tbl_entry * 32static inline struct sec4_sg_entry *
33sg_to_link_tbl(struct scatterlist *sg, int sg_count, 33sg_to_sec4_sg(struct scatterlist *sg, int sg_count,
34 struct link_tbl_entry *link_tbl_ptr, u32 offset) 34 struct sec4_sg_entry *sec4_sg_ptr, u32 offset)
35{ 35{
36 while (sg_count) { 36 while (sg_count) {
37 sg_to_link_tbl_one(link_tbl_ptr, sg_dma_address(sg), 37 dma_to_sec4_sg_one(sec4_sg_ptr, sg_dma_address(sg),
38 sg_dma_len(sg), offset); 38 sg_dma_len(sg), offset);
39 link_tbl_ptr++; 39 sec4_sg_ptr++;
40 sg = sg_next(sg); 40 sg = sg_next(sg);
41 sg_count--; 41 sg_count--;
42 } 42 }
43 return link_tbl_ptr - 1; 43 return sec4_sg_ptr - 1;
44} 44}
45 45
46/* 46/*
47 * convert scatterlist to h/w link table format 47 * convert scatterlist to h/w link table format
48 * scatterlist must have been previously dma mapped 48 * scatterlist must have been previously dma mapped
49 */ 49 */
50static inline void sg_to_link_tbl_last(struct scatterlist *sg, int sg_count, 50static inline void sg_to_sec4_sg_last(struct scatterlist *sg, int sg_count,
51 struct link_tbl_entry *link_tbl_ptr, 51 struct sec4_sg_entry *sec4_sg_ptr,
52 u32 offset) 52 u32 offset)
53{ 53{
54 link_tbl_ptr = sg_to_link_tbl(sg, sg_count, link_tbl_ptr, offset); 54 sec4_sg_ptr = sg_to_sec4_sg(sg, sg_count, sec4_sg_ptr, offset);
55 link_tbl_ptr->len |= LINK_TBL_LEN_FIN; 55 sec4_sg_ptr->len |= SEC4_SG_LEN_FIN;
56} 56}
57 57
58/* count number of elements in scatterlist */ 58/* count number of elements in scatterlist */