aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/crypto/caam/caamalg.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/crypto/caam/caamalg.c')
-rw-r--r--drivers/crypto/caam/caamalg.c134
1 files changed, 49 insertions, 85 deletions
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index 7c63b72ecd75..4cf5dec826e1 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -86,6 +86,7 @@
86#else 86#else
87#define debug(format, arg...) 87#define debug(format, arg...)
88#endif 88#endif
89static struct list_head alg_list;
89 90
90/* Set DK bit in class 1 operation if shared */ 91/* Set DK bit in class 1 operation if shared */
91static inline void append_dec_op1(u32 *desc, u32 type) 92static inline void append_dec_op1(u32 *desc, u32 type)
@@ -817,7 +818,7 @@ static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
817 ivsize, 1); 818 ivsize, 1);
818 print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ", 819 print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ",
819 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->dst), 820 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->dst),
820 req->cryptlen, 1); 821 req->cryptlen - ctx->authsize, 1);
821#endif 822#endif
822 823
823 if (err) { 824 if (err) {
@@ -971,12 +972,9 @@ static void init_aead_job(u32 *sh_desc, dma_addr_t ptr,
971 (edesc->src_nents ? : 1); 972 (edesc->src_nents ? : 1);
972 in_options = LDST_SGF; 973 in_options = LDST_SGF;
973 } 974 }
974 if (encrypt) 975
975 append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + 976 append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + req->cryptlen,
976 req->cryptlen - authsize, in_options); 977 in_options);
977 else
978 append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize +
979 req->cryptlen, in_options);
980 978
981 if (likely(req->src == req->dst)) { 979 if (likely(req->src == req->dst)) {
982 if (all_contig) { 980 if (all_contig) {
@@ -997,7 +995,8 @@ static void init_aead_job(u32 *sh_desc, dma_addr_t ptr,
997 } 995 }
998 } 996 }
999 if (encrypt) 997 if (encrypt)
1000 append_seq_out_ptr(desc, dst_dma, req->cryptlen, out_options); 998 append_seq_out_ptr(desc, dst_dma, req->cryptlen + authsize,
999 out_options);
1001 else 1000 else
1002 append_seq_out_ptr(desc, dst_dma, req->cryptlen - authsize, 1001 append_seq_out_ptr(desc, dst_dma, req->cryptlen - authsize,
1003 out_options); 1002 out_options);
@@ -1047,8 +1046,8 @@ static void init_aead_giv_job(u32 *sh_desc, dma_addr_t ptr,
1047 sec4_sg_index += edesc->assoc_nents + 1 + edesc->src_nents; 1046 sec4_sg_index += edesc->assoc_nents + 1 + edesc->src_nents;
1048 in_options = LDST_SGF; 1047 in_options = LDST_SGF;
1049 } 1048 }
1050 append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + 1049 append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + req->cryptlen,
1051 req->cryptlen - authsize, in_options); 1050 in_options);
1052 1051
1053 if (contig & GIV_DST_CONTIG) { 1052 if (contig & GIV_DST_CONTIG) {
1054 dst_dma = edesc->iv_dma; 1053 dst_dma = edesc->iv_dma;
@@ -1065,7 +1064,8 @@ static void init_aead_giv_job(u32 *sh_desc, dma_addr_t ptr,
1065 } 1064 }
1066 } 1065 }
1067 1066
1068 append_seq_out_ptr(desc, dst_dma, ivsize + req->cryptlen, out_options); 1067 append_seq_out_ptr(desc, dst_dma, ivsize + req->cryptlen + authsize,
1068 out_options);
1069} 1069}
1070 1070
1071/* 1071/*
@@ -1129,7 +1129,8 @@ static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
1129 * allocate and map the aead extended descriptor 1129 * allocate and map the aead extended descriptor
1130 */ 1130 */
1131static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, 1131static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
1132 int desc_bytes, bool *all_contig_ptr) 1132 int desc_bytes, bool *all_contig_ptr,
1133 bool encrypt)
1133{ 1134{
1134 struct crypto_aead *aead = crypto_aead_reqtfm(req); 1135 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1135 struct caam_ctx *ctx = crypto_aead_ctx(aead); 1136 struct caam_ctx *ctx = crypto_aead_ctx(aead);
@@ -1144,12 +1145,22 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
1144 bool assoc_chained = false, src_chained = false, dst_chained = false; 1145 bool assoc_chained = false, src_chained = false, dst_chained = false;
1145 int ivsize = crypto_aead_ivsize(aead); 1146 int ivsize = crypto_aead_ivsize(aead);
1146 int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes; 1147 int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
1148 unsigned int authsize = ctx->authsize;
1147 1149
1148 assoc_nents = sg_count(req->assoc, req->assoclen, &assoc_chained); 1150 assoc_nents = sg_count(req->assoc, req->assoclen, &assoc_chained);
1149 src_nents = sg_count(req->src, req->cryptlen, &src_chained);
1150 1151
1151 if (unlikely(req->dst != req->src)) 1152 if (unlikely(req->dst != req->src)) {
1152 dst_nents = sg_count(req->dst, req->cryptlen, &dst_chained); 1153 src_nents = sg_count(req->src, req->cryptlen, &src_chained);
1154 dst_nents = sg_count(req->dst,
1155 req->cryptlen +
1156 (encrypt ? authsize : (-authsize)),
1157 &dst_chained);
1158 } else {
1159 src_nents = sg_count(req->src,
1160 req->cryptlen +
1161 (encrypt ? authsize : 0),
1162 &src_chained);
1163 }
1153 1164
1154 sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1, 1165 sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1,
1155 DMA_TO_DEVICE, assoc_chained); 1166 DMA_TO_DEVICE, assoc_chained);
@@ -1233,11 +1244,9 @@ static int aead_encrypt(struct aead_request *req)
1233 u32 *desc; 1244 u32 *desc;
1234 int ret = 0; 1245 int ret = 0;
1235 1246
1236 req->cryptlen += ctx->authsize;
1237
1238 /* allocate extended descriptor */ 1247 /* allocate extended descriptor */
1239 edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN * 1248 edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN *
1240 CAAM_CMD_SZ, &all_contig); 1249 CAAM_CMD_SZ, &all_contig, true);
1241 if (IS_ERR(edesc)) 1250 if (IS_ERR(edesc))
1242 return PTR_ERR(edesc); 1251 return PTR_ERR(edesc);
1243 1252
@@ -1274,7 +1283,7 @@ static int aead_decrypt(struct aead_request *req)
1274 1283
1275 /* allocate extended descriptor */ 1284 /* allocate extended descriptor */
1276 edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN * 1285 edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN *
1277 CAAM_CMD_SZ, &all_contig); 1286 CAAM_CMD_SZ, &all_contig, false);
1278 if (IS_ERR(edesc)) 1287 if (IS_ERR(edesc))
1279 return PTR_ERR(edesc); 1288 return PTR_ERR(edesc);
1280 1289
@@ -1331,7 +1340,8 @@ static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request
1331 src_nents = sg_count(req->src, req->cryptlen, &src_chained); 1340 src_nents = sg_count(req->src, req->cryptlen, &src_chained);
1332 1341
1333 if (unlikely(req->dst != req->src)) 1342 if (unlikely(req->dst != req->src))
1334 dst_nents = sg_count(req->dst, req->cryptlen, &dst_chained); 1343 dst_nents = sg_count(req->dst, req->cryptlen + ctx->authsize,
1344 &dst_chained);
1335 1345
1336 sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1, 1346 sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1,
1337 DMA_TO_DEVICE, assoc_chained); 1347 DMA_TO_DEVICE, assoc_chained);
@@ -1425,8 +1435,6 @@ static int aead_givencrypt(struct aead_givcrypt_request *areq)
1425 u32 *desc; 1435 u32 *desc;
1426 int ret = 0; 1436 int ret = 0;
1427 1437
1428 req->cryptlen += ctx->authsize;
1429
1430 /* allocate extended descriptor */ 1438 /* allocate extended descriptor */
1431 edesc = aead_giv_edesc_alloc(areq, DESC_JOB_IO_LEN * 1439 edesc = aead_giv_edesc_alloc(areq, DESC_JOB_IO_LEN *
1432 CAAM_CMD_SZ, &contig); 1440 CAAM_CMD_SZ, &contig);
@@ -2057,7 +2065,6 @@ static struct caam_alg_template driver_algs[] = {
2057 2065
2058struct caam_crypto_alg { 2066struct caam_crypto_alg {
2059 struct list_head entry; 2067 struct list_head entry;
2060 struct device *ctrldev;
2061 int class1_alg_type; 2068 int class1_alg_type;
2062 int class2_alg_type; 2069 int class2_alg_type;
2063 int alg_op; 2070 int alg_op;
@@ -2070,14 +2077,12 @@ static int caam_cra_init(struct crypto_tfm *tfm)
2070 struct caam_crypto_alg *caam_alg = 2077 struct caam_crypto_alg *caam_alg =
2071 container_of(alg, struct caam_crypto_alg, crypto_alg); 2078 container_of(alg, struct caam_crypto_alg, crypto_alg);
2072 struct caam_ctx *ctx = crypto_tfm_ctx(tfm); 2079 struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
2073 struct caam_drv_private *priv = dev_get_drvdata(caam_alg->ctrldev);
2074 int tgt_jr = atomic_inc_return(&priv->tfm_count);
2075 2080
2076 /* 2081 ctx->jrdev = caam_jr_alloc();
2077 * distribute tfms across job rings to ensure in-order 2082 if (IS_ERR(ctx->jrdev)) {
2078 * crypto request processing per tfm 2083 pr_err("Job Ring Device allocation for transform failed\n");
2079 */ 2084 return PTR_ERR(ctx->jrdev);
2080 ctx->jrdev = priv->jrdev[(tgt_jr / 2) % priv->total_jobrs]; 2085 }
2081 2086
2082 /* copy descriptor header template value */ 2087 /* copy descriptor header template value */
2083 ctx->class1_alg_type = OP_TYPE_CLASS1_ALG | caam_alg->class1_alg_type; 2088 ctx->class1_alg_type = OP_TYPE_CLASS1_ALG | caam_alg->class1_alg_type;
@@ -2104,44 +2109,26 @@ static void caam_cra_exit(struct crypto_tfm *tfm)
2104 dma_unmap_single(ctx->jrdev, ctx->sh_desc_givenc_dma, 2109 dma_unmap_single(ctx->jrdev, ctx->sh_desc_givenc_dma,
2105 desc_bytes(ctx->sh_desc_givenc), 2110 desc_bytes(ctx->sh_desc_givenc),
2106 DMA_TO_DEVICE); 2111 DMA_TO_DEVICE);
2112
2113 caam_jr_free(ctx->jrdev);
2107} 2114}
2108 2115
2109static void __exit caam_algapi_exit(void) 2116static void __exit caam_algapi_exit(void)
2110{ 2117{
2111 2118
2112 struct device_node *dev_node;
2113 struct platform_device *pdev;
2114 struct device *ctrldev;
2115 struct caam_drv_private *priv;
2116 struct caam_crypto_alg *t_alg, *n; 2119 struct caam_crypto_alg *t_alg, *n;
2117 2120
2118 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0"); 2121 if (!alg_list.next)
2119 if (!dev_node) {
2120 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
2121 if (!dev_node)
2122 return;
2123 }
2124
2125 pdev = of_find_device_by_node(dev_node);
2126 if (!pdev)
2127 return;
2128
2129 ctrldev = &pdev->dev;
2130 of_node_put(dev_node);
2131 priv = dev_get_drvdata(ctrldev);
2132
2133 if (!priv->alg_list.next)
2134 return; 2122 return;
2135 2123
2136 list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) { 2124 list_for_each_entry_safe(t_alg, n, &alg_list, entry) {
2137 crypto_unregister_alg(&t_alg->crypto_alg); 2125 crypto_unregister_alg(&t_alg->crypto_alg);
2138 list_del(&t_alg->entry); 2126 list_del(&t_alg->entry);
2139 kfree(t_alg); 2127 kfree(t_alg);
2140 } 2128 }
2141} 2129}
2142 2130
2143static struct caam_crypto_alg *caam_alg_alloc(struct device *ctrldev, 2131static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template
2144 struct caam_alg_template
2145 *template) 2132 *template)
2146{ 2133{
2147 struct caam_crypto_alg *t_alg; 2134 struct caam_crypto_alg *t_alg;
@@ -2149,7 +2136,7 @@ static struct caam_crypto_alg *caam_alg_alloc(struct device *ctrldev,
2149 2136
2150 t_alg = kzalloc(sizeof(struct caam_crypto_alg), GFP_KERNEL); 2137 t_alg = kzalloc(sizeof(struct caam_crypto_alg), GFP_KERNEL);
2151 if (!t_alg) { 2138 if (!t_alg) {
2152 dev_err(ctrldev, "failed to allocate t_alg\n"); 2139 pr_err("failed to allocate t_alg\n");
2153 return ERR_PTR(-ENOMEM); 2140 return ERR_PTR(-ENOMEM);
2154 } 2141 }
2155 2142
@@ -2181,62 +2168,39 @@ static struct caam_crypto_alg *caam_alg_alloc(struct device *ctrldev,
2181 t_alg->class1_alg_type = template->class1_alg_type; 2168 t_alg->class1_alg_type = template->class1_alg_type;
2182 t_alg->class2_alg_type = template->class2_alg_type; 2169 t_alg->class2_alg_type = template->class2_alg_type;
2183 t_alg->alg_op = template->alg_op; 2170 t_alg->alg_op = template->alg_op;
2184 t_alg->ctrldev = ctrldev;
2185 2171
2186 return t_alg; 2172 return t_alg;
2187} 2173}
2188 2174
2189static int __init caam_algapi_init(void) 2175static int __init caam_algapi_init(void)
2190{ 2176{
2191 struct device_node *dev_node;
2192 struct platform_device *pdev;
2193 struct device *ctrldev;
2194 struct caam_drv_private *priv;
2195 int i = 0, err = 0; 2177 int i = 0, err = 0;
2196 2178
2197 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0"); 2179 INIT_LIST_HEAD(&alg_list);
2198 if (!dev_node) {
2199 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
2200 if (!dev_node)
2201 return -ENODEV;
2202 }
2203
2204 pdev = of_find_device_by_node(dev_node);
2205 if (!pdev)
2206 return -ENODEV;
2207
2208 ctrldev = &pdev->dev;
2209 priv = dev_get_drvdata(ctrldev);
2210 of_node_put(dev_node);
2211
2212 INIT_LIST_HEAD(&priv->alg_list);
2213
2214 atomic_set(&priv->tfm_count, -1);
2215 2180
2216 /* register crypto algorithms the device supports */ 2181 /* register crypto algorithms the device supports */
2217 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) { 2182 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
2218 /* TODO: check if h/w supports alg */ 2183 /* TODO: check if h/w supports alg */
2219 struct caam_crypto_alg *t_alg; 2184 struct caam_crypto_alg *t_alg;
2220 2185
2221 t_alg = caam_alg_alloc(ctrldev, &driver_algs[i]); 2186 t_alg = caam_alg_alloc(&driver_algs[i]);
2222 if (IS_ERR(t_alg)) { 2187 if (IS_ERR(t_alg)) {
2223 err = PTR_ERR(t_alg); 2188 err = PTR_ERR(t_alg);
2224 dev_warn(ctrldev, "%s alg allocation failed\n", 2189 pr_warn("%s alg allocation failed\n",
2225 driver_algs[i].driver_name); 2190 driver_algs[i].driver_name);
2226 continue; 2191 continue;
2227 } 2192 }
2228 2193
2229 err = crypto_register_alg(&t_alg->crypto_alg); 2194 err = crypto_register_alg(&t_alg->crypto_alg);
2230 if (err) { 2195 if (err) {
2231 dev_warn(ctrldev, "%s alg registration failed\n", 2196 pr_warn("%s alg registration failed\n",
2232 t_alg->crypto_alg.cra_driver_name); 2197 t_alg->crypto_alg.cra_driver_name);
2233 kfree(t_alg); 2198 kfree(t_alg);
2234 } else 2199 } else
2235 list_add_tail(&t_alg->entry, &priv->alg_list); 2200 list_add_tail(&t_alg->entry, &alg_list);
2236 } 2201 }
2237 if (!list_empty(&priv->alg_list)) 2202 if (!list_empty(&alg_list))
2238 dev_info(ctrldev, "%s algorithms registered in /proc/crypto\n", 2203 pr_info("caam algorithms registered in /proc/crypto\n");
2239 (char *)of_get_property(dev_node, "compatible", NULL));
2240 2204
2241 return err; 2205 return err;
2242} 2206}