aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorHarsh Jain <harsh@chelsio.com>2018-05-24 07:56:39 -0400
committerHerbert Xu <herbert@gondor.apana.org.au>2018-05-30 12:13:48 -0400
commit4262c98aab95119ec0810b5ec4be521dda1b28b2 (patch)
tree8a474a99c04f0e4b2127428575c9d0d950118709
parent335bcc4a2600f56ec3c28cf93dd9070df2576891 (diff)
crypto: chelsio - Remove separate buffer used for DMA map B0 block in CCM
Extends memory required for IV to include B0 Block and DMA map in single operation. Signed-off-by: Harsh Jain <harsh@chelsio.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
-rw-r--r--drivers/crypto/chelsio/chcr_algo.c198
-rw-r--r--drivers/crypto/chelsio/chcr_crypto.h12
2 files changed, 97 insertions, 113 deletions
diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c
index b2bfeb251e21..b916c4eb608c 100644
--- a/drivers/crypto/chelsio/chcr_algo.c
+++ b/drivers/crypto/chelsio/chcr_algo.c
@@ -203,13 +203,8 @@ static inline void chcr_handle_aead_resp(struct aead_request *req,
203 int err) 203 int err)
204{ 204{
205 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); 205 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
206 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
207 struct uld_ctx *u_ctx = ULD_CTX(a_ctx(tfm));
208 206
209 chcr_aead_dma_unmap(&u_ctx->lldi.pdev->dev, req, reqctx->op); 207 chcr_aead_common_exit(req);
210 if (reqctx->b0_dma)
211 dma_unmap_single(&u_ctx->lldi.pdev->dev, reqctx->b0_dma,
212 reqctx->b0_len, DMA_BIDIRECTIONAL);
213 if (reqctx->verify == VERIFY_SW) { 208 if (reqctx->verify == VERIFY_SW) {
214 chcr_verify_tag(req, input, &err); 209 chcr_verify_tag(req, input, &err);
215 reqctx->verify = VERIFY_HW; 210 reqctx->verify = VERIFY_HW;
@@ -2178,22 +2173,35 @@ static void chcr_hmac_cra_exit(struct crypto_tfm *tfm)
2178 } 2173 }
2179} 2174}
2180 2175
2181static int chcr_aead_common_init(struct aead_request *req, 2176inline void chcr_aead_common_exit(struct aead_request *req)
2182 unsigned short op_type) 2177{
2178 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2179 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2180 struct uld_ctx *u_ctx = ULD_CTX(a_ctx(tfm));
2181
2182 chcr_aead_dma_unmap(&u_ctx->lldi.pdev->dev, req, reqctx->op);
2183}
2184
2185static int chcr_aead_common_init(struct aead_request *req)
2183{ 2186{
2184 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 2187 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2185 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm)); 2188 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2186 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); 2189 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2187 int error = -EINVAL;
2188 unsigned int authsize = crypto_aead_authsize(tfm); 2190 unsigned int authsize = crypto_aead_authsize(tfm);
2191 int error = -EINVAL;
2189 2192
2190 /* validate key size */ 2193 /* validate key size */
2191 if (aeadctx->enckey_len == 0) 2194 if (aeadctx->enckey_len == 0)
2192 goto err; 2195 goto err;
2193 if (op_type && req->cryptlen < authsize) 2196 if (reqctx->op && req->cryptlen < authsize)
2194 goto err; 2197 goto err;
2198 if (reqctx->b0_len)
2199 reqctx->scratch_pad = reqctx->iv + IV;
2200 else
2201 reqctx->scratch_pad = NULL;
2202
2195 error = chcr_aead_dma_map(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req, 2203 error = chcr_aead_dma_map(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req,
2196 op_type); 2204 reqctx->op);
2197 if (error) { 2205 if (error) {
2198 error = -ENOMEM; 2206 error = -ENOMEM;
2199 goto err; 2207 goto err;
@@ -2230,7 +2238,7 @@ static int chcr_aead_fallback(struct aead_request *req, unsigned short op_type)
2230 aead_request_set_tfm(subreq, aeadctx->sw_cipher); 2238 aead_request_set_tfm(subreq, aeadctx->sw_cipher);
2231 aead_request_set_callback(subreq, req->base.flags, 2239 aead_request_set_callback(subreq, req->base.flags,
2232 req->base.complete, req->base.data); 2240 req->base.complete, req->base.data);
2233 aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen, 2241 aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
2234 req->iv); 2242 req->iv);
2235 aead_request_set_ad(subreq, req->assoclen); 2243 aead_request_set_ad(subreq, req->assoclen);
2236 return op_type ? crypto_aead_decrypt(subreq) : 2244 return op_type ? crypto_aead_decrypt(subreq) :
@@ -2239,8 +2247,7 @@ static int chcr_aead_fallback(struct aead_request *req, unsigned short op_type)
2239 2247
2240static struct sk_buff *create_authenc_wr(struct aead_request *req, 2248static struct sk_buff *create_authenc_wr(struct aead_request *req,
2241 unsigned short qid, 2249 unsigned short qid,
2242 int size, 2250 int size)
2243 unsigned short op_type)
2244{ 2251{
2245 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 2252 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2246 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm)); 2253 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
@@ -2264,18 +2271,20 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
2264 if (req->cryptlen == 0) 2271 if (req->cryptlen == 0)
2265 return NULL; 2272 return NULL;
2266 2273
2267 reqctx->b0_dma = 0; 2274 reqctx->b0_len = 0;
2275 error = chcr_aead_common_init(req);
2276 if (error)
2277 return ERR_PTR(error);
2278
2268 if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL || 2279 if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL ||
2269 subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) { 2280 subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
2270 null = 1; 2281 null = 1;
2271 assoclen = 0; 2282 assoclen = 0;
2283 reqctx->aad_nents = 0;
2272 } 2284 }
2273 error = chcr_aead_common_init(req, op_type);
2274 if (error)
2275 return ERR_PTR(error);
2276 dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0); 2285 dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0);
2277 dnents += sg_nents_xlen(req->dst, req->cryptlen + 2286 dnents += sg_nents_xlen(req->dst, req->cryptlen +
2278 (op_type ? -authsize : authsize), CHCR_DST_SG_SIZE, 2287 (reqctx->op ? -authsize : authsize), CHCR_DST_SG_SIZE,
2279 req->assoclen); 2288 req->assoclen);
2280 dnents += MIN_AUTH_SG; // For IV 2289 dnents += MIN_AUTH_SG; // For IV
2281 2290
@@ -2292,11 +2301,10 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
2292 transhdr_len = roundup(transhdr_len, 16); 2301 transhdr_len = roundup(transhdr_len, 16);
2293 2302
2294 if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE, 2303 if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE,
2295 transhdr_len, op_type)) { 2304 transhdr_len, reqctx->op)) {
2296 atomic_inc(&adap->chcr_stats.fallback); 2305 atomic_inc(&adap->chcr_stats.fallback);
2297 chcr_aead_dma_unmap(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req, 2306 chcr_aead_common_exit(req);
2298 op_type); 2307 return ERR_PTR(chcr_aead_fallback(req, reqctx->op));
2299 return ERR_PTR(chcr_aead_fallback(req, op_type));
2300 } 2308 }
2301 skb = alloc_skb(SGE_MAX_WR_LEN, flags); 2309 skb = alloc_skb(SGE_MAX_WR_LEN, flags);
2302 if (!skb) { 2310 if (!skb) {
@@ -2306,7 +2314,7 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
2306 2314
2307 chcr_req = __skb_put_zero(skb, transhdr_len); 2315 chcr_req = __skb_put_zero(skb, transhdr_len);
2308 2316
2309 temp = (op_type == CHCR_ENCRYPT_OP) ? 0 : authsize; 2317 temp = (reqctx->op == CHCR_ENCRYPT_OP) ? 0 : authsize;
2310 2318
2311 /* 2319 /*
2312 * Input order is AAD,IV and Payload. where IV should be included as 2320 * Input order is AAD,IV and Payload. where IV should be included as
@@ -2330,8 +2338,8 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
2330 temp = CHCR_SCMD_CIPHER_MODE_AES_CTR; 2338 temp = CHCR_SCMD_CIPHER_MODE_AES_CTR;
2331 else 2339 else
2332 temp = CHCR_SCMD_CIPHER_MODE_AES_CBC; 2340 temp = CHCR_SCMD_CIPHER_MODE_AES_CBC;
2333 chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(op_type, 2341 chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op,
2334 (op_type == CHCR_ENCRYPT_OP) ? 1 : 0, 2342 (reqctx->op == CHCR_ENCRYPT_OP) ? 1 : 0,
2335 temp, 2343 temp,
2336 actx->auth_mode, aeadctx->hmac_ctrl, 2344 actx->auth_mode, aeadctx->hmac_ctrl,
2337 IV >> 1); 2345 IV >> 1);
@@ -2339,7 +2347,7 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
2339 0, 0, dst_size); 2347 0, 0, dst_size);
2340 2348
2341 chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr; 2349 chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
2342 if (op_type == CHCR_ENCRYPT_OP || 2350 if (reqctx->op == CHCR_ENCRYPT_OP ||
2343 subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA || 2351 subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
2344 subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) 2352 subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL)
2345 memcpy(chcr_req->key_ctx.key, aeadctx->key, 2353 memcpy(chcr_req->key_ctx.key, aeadctx->key,
@@ -2362,20 +2370,18 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
2362 } 2370 }
2363 phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len); 2371 phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
2364 ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size); 2372 ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size);
2365 chcr_add_aead_dst_ent(req, phys_cpl, assoclen, op_type, qid); 2373 chcr_add_aead_dst_ent(req, phys_cpl, assoclen, qid);
2366 chcr_add_aead_src_ent(req, ulptx, assoclen, op_type); 2374 chcr_add_aead_src_ent(req, ulptx, assoclen);
2367 atomic_inc(&adap->chcr_stats.cipher_rqst); 2375 atomic_inc(&adap->chcr_stats.cipher_rqst);
2368 temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + 2376 temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size +
2369 kctx_len + (reqctx->imm ? (assoclen + IV + req->cryptlen) : 0); 2377 kctx_len + (reqctx->imm ? (assoclen + IV + req->cryptlen) : 0);
2370 create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, size, 2378 create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, size,
2371 transhdr_len, temp, 0); 2379 transhdr_len, temp, 0);
2372 reqctx->skb = skb; 2380 reqctx->skb = skb;
2373 reqctx->op = op_type;
2374 2381
2375 return skb; 2382 return skb;
2376err: 2383err:
2377 chcr_aead_dma_unmap(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req, 2384 chcr_aead_common_exit(req);
2378 op_type);
2379 2385
2380 return ERR_PTR(error); 2386 return ERR_PTR(error);
2381} 2387}
@@ -2394,11 +2400,14 @@ int chcr_aead_dma_map(struct device *dev,
2394 -authsize : authsize); 2400 -authsize : authsize);
2395 if (!req->cryptlen || !dst_size) 2401 if (!req->cryptlen || !dst_size)
2396 return 0; 2402 return 0;
2397 reqctx->iv_dma = dma_map_single(dev, reqctx->iv, IV, 2403 reqctx->iv_dma = dma_map_single(dev, reqctx->iv, (IV + reqctx->b0_len),
2398 DMA_BIDIRECTIONAL); 2404 DMA_BIDIRECTIONAL);
2399 if (dma_mapping_error(dev, reqctx->iv_dma)) 2405 if (dma_mapping_error(dev, reqctx->iv_dma))
2400 return -ENOMEM; 2406 return -ENOMEM;
2401 2407 if (reqctx->b0_len)
2408 reqctx->b0_dma = reqctx->iv_dma + IV;
2409 else
2410 reqctx->b0_dma = 0;
2402 if (req->src == req->dst) { 2411 if (req->src == req->dst) {
2403 error = dma_map_sg(dev, req->src, sg_nents(req->src), 2412 error = dma_map_sg(dev, req->src, sg_nents(req->src),
2404 DMA_BIDIRECTIONAL); 2413 DMA_BIDIRECTIONAL);
@@ -2438,7 +2447,7 @@ void chcr_aead_dma_unmap(struct device *dev,
2438 if (!req->cryptlen || !dst_size) 2447 if (!req->cryptlen || !dst_size)
2439 return; 2448 return;
2440 2449
2441 dma_unmap_single(dev, reqctx->iv_dma, IV, 2450 dma_unmap_single(dev, reqctx->iv_dma, (IV + reqctx->b0_len),
2442 DMA_BIDIRECTIONAL); 2451 DMA_BIDIRECTIONAL);
2443 if (req->src == req->dst) { 2452 if (req->src == req->dst) {
2444 dma_unmap_sg(dev, req->src, sg_nents(req->src), 2453 dma_unmap_sg(dev, req->src, sg_nents(req->src),
@@ -2453,8 +2462,7 @@ void chcr_aead_dma_unmap(struct device *dev,
2453 2462
2454void chcr_add_aead_src_ent(struct aead_request *req, 2463void chcr_add_aead_src_ent(struct aead_request *req,
2455 struct ulptx_sgl *ulptx, 2464 struct ulptx_sgl *ulptx,
2456 unsigned int assoclen, 2465 unsigned int assoclen)
2457 unsigned short op_type)
2458{ 2466{
2459 struct ulptx_walk ulp_walk; 2467 struct ulptx_walk ulp_walk;
2460 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); 2468 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
@@ -2462,7 +2470,7 @@ void chcr_add_aead_src_ent(struct aead_request *req,
2462 if (reqctx->imm) { 2470 if (reqctx->imm) {
2463 u8 *buf = (u8 *)ulptx; 2471 u8 *buf = (u8 *)ulptx;
2464 2472
2465 if (reqctx->b0_dma) { 2473 if (reqctx->b0_len) {
2466 memcpy(buf, reqctx->scratch_pad, reqctx->b0_len); 2474 memcpy(buf, reqctx->scratch_pad, reqctx->b0_len);
2467 buf += reqctx->b0_len; 2475 buf += reqctx->b0_len;
2468 } 2476 }
@@ -2475,7 +2483,7 @@ void chcr_add_aead_src_ent(struct aead_request *req,
2475 buf, req->cryptlen, req->assoclen); 2483 buf, req->cryptlen, req->assoclen);
2476 } else { 2484 } else {
2477 ulptx_walk_init(&ulp_walk, ulptx); 2485 ulptx_walk_init(&ulp_walk, ulptx);
2478 if (reqctx->b0_dma) 2486 if (reqctx->b0_len)
2479 ulptx_walk_add_page(&ulp_walk, reqctx->b0_len, 2487 ulptx_walk_add_page(&ulp_walk, reqctx->b0_len,
2480 &reqctx->b0_dma); 2488 &reqctx->b0_dma);
2481 ulptx_walk_add_sg(&ulp_walk, req->src, assoclen, 0); 2489 ulptx_walk_add_sg(&ulp_walk, req->src, assoclen, 0);
@@ -2489,7 +2497,6 @@ void chcr_add_aead_src_ent(struct aead_request *req,
2489void chcr_add_aead_dst_ent(struct aead_request *req, 2497void chcr_add_aead_dst_ent(struct aead_request *req,
2490 struct cpl_rx_phys_dsgl *phys_cpl, 2498 struct cpl_rx_phys_dsgl *phys_cpl,
2491 unsigned int assoclen, 2499 unsigned int assoclen,
2492 unsigned short op_type,
2493 unsigned short qid) 2500 unsigned short qid)
2494{ 2501{
2495 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); 2502 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
@@ -2499,11 +2506,11 @@ void chcr_add_aead_dst_ent(struct aead_request *req,
2499 u32 temp; 2506 u32 temp;
2500 2507
2501 dsgl_walk_init(&dsgl_walk, phys_cpl); 2508 dsgl_walk_init(&dsgl_walk, phys_cpl);
2502 if (reqctx->b0_dma) 2509 if (reqctx->b0_len)
2503 dsgl_walk_add_page(&dsgl_walk, reqctx->b0_len, &reqctx->b0_dma); 2510 dsgl_walk_add_page(&dsgl_walk, reqctx->b0_len, &reqctx->b0_dma);
2504 dsgl_walk_add_sg(&dsgl_walk, req->dst, assoclen, 0); 2511 dsgl_walk_add_sg(&dsgl_walk, req->dst, assoclen, 0);
2505 dsgl_walk_add_page(&dsgl_walk, IV, &reqctx->iv_dma); 2512 dsgl_walk_add_page(&dsgl_walk, IV, &reqctx->iv_dma);
2506 temp = req->cryptlen + (op_type ? -authsize : authsize); 2513 temp = req->cryptlen + (reqctx->op ? -authsize : authsize);
2507 dsgl_walk_add_sg(&dsgl_walk, req->dst, temp, req->assoclen); 2514 dsgl_walk_add_sg(&dsgl_walk, req->dst, temp, req->assoclen);
2508 dsgl_walk_end(&dsgl_walk, qid); 2515 dsgl_walk_end(&dsgl_walk, qid);
2509} 2516}
@@ -2710,7 +2717,8 @@ static inline int crypto_ccm_check_iv(const u8 *iv)
2710static int ccm_format_packet(struct aead_request *req, 2717static int ccm_format_packet(struct aead_request *req,
2711 struct chcr_aead_ctx *aeadctx, 2718 struct chcr_aead_ctx *aeadctx,
2712 unsigned int sub_type, 2719 unsigned int sub_type,
2713 unsigned short op_type) 2720 unsigned short op_type,
2721 unsigned int assoclen)
2714{ 2722{
2715 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); 2723 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2716 int rc = 0; 2724 int rc = 0;
@@ -2720,13 +2728,13 @@ static int ccm_format_packet(struct aead_request *req,
2720 memcpy(reqctx->iv + 1, &aeadctx->salt[0], 3); 2728 memcpy(reqctx->iv + 1, &aeadctx->salt[0], 3);
2721 memcpy(reqctx->iv + 4, req->iv, 8); 2729 memcpy(reqctx->iv + 4, req->iv, 8);
2722 memset(reqctx->iv + 12, 0, 4); 2730 memset(reqctx->iv + 12, 0, 4);
2723 *((unsigned short *)(reqctx->scratch_pad + 16)) =
2724 htons(req->assoclen - 8);
2725 } else { 2731 } else {
2726 memcpy(reqctx->iv, req->iv, 16); 2732 memcpy(reqctx->iv, req->iv, 16);
2727 *((unsigned short *)(reqctx->scratch_pad + 16)) =
2728 htons(req->assoclen);
2729 } 2733 }
2734 if (assoclen)
2735 *((unsigned short *)(reqctx->scratch_pad + 16)) =
2736 htons(assoclen);
2737
2730 generate_b0(req, aeadctx, op_type); 2738 generate_b0(req, aeadctx, op_type);
2731 /* zero the ctr value */ 2739 /* zero the ctr value */
2732 memset(reqctx->iv + 15 - reqctx->iv[0], 0, reqctx->iv[0] + 1); 2740 memset(reqctx->iv + 15 - reqctx->iv[0], 0, reqctx->iv[0] + 1);
@@ -2808,8 +2816,7 @@ static int aead_ccm_validate_input(unsigned short op_type,
2808 2816
2809static struct sk_buff *create_aead_ccm_wr(struct aead_request *req, 2817static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
2810 unsigned short qid, 2818 unsigned short qid,
2811 int size, 2819 int size)
2812 unsigned short op_type)
2813{ 2820{
2814 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 2821 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2815 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm)); 2822 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
@@ -2827,22 +2834,20 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
2827 GFP_ATOMIC; 2834 GFP_ATOMIC;
2828 struct adapter *adap = padap(a_ctx(tfm)->dev); 2835 struct adapter *adap = padap(a_ctx(tfm)->dev);
2829 2836
2830 reqctx->b0_dma = 0;
2831 sub_type = get_aead_subtype(tfm); 2837 sub_type = get_aead_subtype(tfm);
2832 if (sub_type == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) 2838 if (sub_type == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
2833 assoclen -= 8; 2839 assoclen -= 8;
2834 error = chcr_aead_common_init(req, op_type); 2840 reqctx->b0_len = CCM_B0_SIZE + (assoclen ? CCM_AAD_FIELD_SIZE : 0);
2841 error = chcr_aead_common_init(req);
2835 if (error) 2842 if (error)
2836 return ERR_PTR(error); 2843 return ERR_PTR(error);
2837 2844
2838 2845 error = aead_ccm_validate_input(reqctx->op, req, aeadctx, sub_type);
2839 reqctx->b0_len = CCM_B0_SIZE + (assoclen ? CCM_AAD_FIELD_SIZE : 0);
2840 error = aead_ccm_validate_input(op_type, req, aeadctx, sub_type);
2841 if (error) 2846 if (error)
2842 goto err; 2847 goto err;
2843 dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0); 2848 dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0);
2844 dnents += sg_nents_xlen(req->dst, req->cryptlen 2849 dnents += sg_nents_xlen(req->dst, req->cryptlen
2845 + (op_type ? -authsize : authsize), 2850 + (reqctx->op ? -authsize : authsize),
2846 CHCR_DST_SG_SIZE, req->assoclen); 2851 CHCR_DST_SG_SIZE, req->assoclen);
2847 dnents += MIN_CCM_SG; // For IV and B0 2852 dnents += MIN_CCM_SG; // For IV and B0
2848 dst_size = get_space_for_phys_dsgl(dnents); 2853 dst_size = get_space_for_phys_dsgl(dnents);
@@ -2858,11 +2863,10 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
2858 transhdr_len = roundup(transhdr_len, 16); 2863 transhdr_len = roundup(transhdr_len, 16);
2859 2864
2860 if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE - 2865 if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE -
2861 reqctx->b0_len, transhdr_len, op_type)) { 2866 reqctx->b0_len, transhdr_len, reqctx->op)) {
2862 atomic_inc(&adap->chcr_stats.fallback); 2867 atomic_inc(&adap->chcr_stats.fallback);
2863 chcr_aead_dma_unmap(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req, 2868 chcr_aead_common_exit(req);
2864 op_type); 2869 return ERR_PTR(chcr_aead_fallback(req, reqctx->op));
2865 return ERR_PTR(chcr_aead_fallback(req, op_type));
2866 } 2870 }
2867 skb = alloc_skb(SGE_MAX_WR_LEN, flags); 2871 skb = alloc_skb(SGE_MAX_WR_LEN, flags);
2868 2872
@@ -2873,7 +2877,7 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
2873 2877
2874 chcr_req = (struct chcr_wr *) __skb_put_zero(skb, transhdr_len); 2878 chcr_req = (struct chcr_wr *) __skb_put_zero(skb, transhdr_len);
2875 2879
2876 fill_sec_cpl_for_aead(&chcr_req->sec_cpl, dst_size, req, op_type); 2880 fill_sec_cpl_for_aead(&chcr_req->sec_cpl, dst_size, req, reqctx->op);
2877 2881
2878 chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr; 2882 chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
2879 memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len); 2883 memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len);
@@ -2882,21 +2886,11 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
2882 2886
2883 phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len); 2887 phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
2884 ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size); 2888 ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size);
2885 error = ccm_format_packet(req, aeadctx, sub_type, op_type); 2889 error = ccm_format_packet(req, aeadctx, sub_type, reqctx->op, assoclen);
2886 if (error) 2890 if (error)
2887 goto dstmap_fail; 2891 goto dstmap_fail;
2888 2892 chcr_add_aead_dst_ent(req, phys_cpl, assoclen, qid);
2889 reqctx->b0_dma = dma_map_single(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, 2893 chcr_add_aead_src_ent(req, ulptx, assoclen);
2890 &reqctx->scratch_pad, reqctx->b0_len,
2891 DMA_BIDIRECTIONAL);
2892 if (dma_mapping_error(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev,
2893 reqctx->b0_dma)) {
2894 error = -ENOMEM;
2895 goto dstmap_fail;
2896 }
2897
2898 chcr_add_aead_dst_ent(req, phys_cpl, assoclen, op_type, qid);
2899 chcr_add_aead_src_ent(req, ulptx, assoclen, op_type);
2900 2894
2901 atomic_inc(&adap->chcr_stats.aead_rqst); 2895 atomic_inc(&adap->chcr_stats.aead_rqst);
2902 temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + 2896 temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size +
@@ -2905,20 +2899,18 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
2905 create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, 0, 2899 create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, 0,
2906 transhdr_len, temp, 0); 2900 transhdr_len, temp, 0);
2907 reqctx->skb = skb; 2901 reqctx->skb = skb;
2908 reqctx->op = op_type;
2909 2902
2910 return skb; 2903 return skb;
2911dstmap_fail: 2904dstmap_fail:
2912 kfree_skb(skb); 2905 kfree_skb(skb);
2913err: 2906err:
2914 chcr_aead_dma_unmap(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req, op_type); 2907 chcr_aead_common_exit(req);
2915 return ERR_PTR(error); 2908 return ERR_PTR(error);
2916} 2909}
2917 2910
2918static struct sk_buff *create_gcm_wr(struct aead_request *req, 2911static struct sk_buff *create_gcm_wr(struct aead_request *req,
2919 unsigned short qid, 2912 unsigned short qid,
2920 int size, 2913 int size)
2921 unsigned short op_type)
2922{ 2914{
2923 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 2915 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2924 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm)); 2916 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
@@ -2938,13 +2930,13 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
2938 if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) 2930 if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106)
2939 assoclen = req->assoclen - 8; 2931 assoclen = req->assoclen - 8;
2940 2932
2941 reqctx->b0_dma = 0; 2933 reqctx->b0_len = 0;
2942 error = chcr_aead_common_init(req, op_type); 2934 error = chcr_aead_common_init(req);
2943 if (error) 2935 if (error)
2944 return ERR_PTR(error); 2936 return ERR_PTR(error);
2945 dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0); 2937 dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0);
2946 dnents += sg_nents_xlen(req->dst, req->cryptlen + 2938 dnents += sg_nents_xlen(req->dst, req->cryptlen +
2947 (op_type ? -authsize : authsize), 2939 (reqctx->op ? -authsize : authsize),
2948 CHCR_DST_SG_SIZE, req->assoclen); 2940 CHCR_DST_SG_SIZE, req->assoclen);
2949 dnents += MIN_GCM_SG; // For IV 2941 dnents += MIN_GCM_SG; // For IV
2950 dst_size = get_space_for_phys_dsgl(dnents); 2942 dst_size = get_space_for_phys_dsgl(dnents);
@@ -2958,11 +2950,11 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
2958 transhdr_len += temp; 2950 transhdr_len += temp;
2959 transhdr_len = roundup(transhdr_len, 16); 2951 transhdr_len = roundup(transhdr_len, 16);
2960 if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE, 2952 if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE,
2961 transhdr_len, op_type)) { 2953 transhdr_len, reqctx->op)) {
2954
2962 atomic_inc(&adap->chcr_stats.fallback); 2955 atomic_inc(&adap->chcr_stats.fallback);
2963 chcr_aead_dma_unmap(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req, 2956 chcr_aead_common_exit(req);
2964 op_type); 2957 return ERR_PTR(chcr_aead_fallback(req, reqctx->op));
2965 return ERR_PTR(chcr_aead_fallback(req, op_type));
2966 } 2958 }
2967 skb = alloc_skb(SGE_MAX_WR_LEN, flags); 2959 skb = alloc_skb(SGE_MAX_WR_LEN, flags);
2968 if (!skb) { 2960 if (!skb) {
@@ -2973,7 +2965,7 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
2973 chcr_req = __skb_put_zero(skb, transhdr_len); 2965 chcr_req = __skb_put_zero(skb, transhdr_len);
2974 2966
2975 //Offset of tag from end 2967 //Offset of tag from end
2976 temp = (op_type == CHCR_ENCRYPT_OP) ? 0 : authsize; 2968 temp = (reqctx->op == CHCR_ENCRYPT_OP) ? 0 : authsize;
2977 chcr_req->sec_cpl.op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR( 2969 chcr_req->sec_cpl.op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(
2978 a_ctx(tfm)->dev->rx_channel_id, 2, 2970 a_ctx(tfm)->dev->rx_channel_id, 2,
2979 (assoclen + 1)); 2971 (assoclen + 1));
@@ -2986,7 +2978,7 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
2986 FILL_SEC_CPL_AUTHINSERT(0, assoclen + IV + 1, 2978 FILL_SEC_CPL_AUTHINSERT(0, assoclen + IV + 1,
2987 temp, temp); 2979 temp, temp);
2988 chcr_req->sec_cpl.seqno_numivs = 2980 chcr_req->sec_cpl.seqno_numivs =
2989 FILL_SEC_CPL_SCMD0_SEQNO(op_type, (op_type == 2981 FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op, (reqctx->op ==
2990 CHCR_ENCRYPT_OP) ? 1 : 0, 2982 CHCR_ENCRYPT_OP) ? 1 : 0,
2991 CHCR_SCMD_CIPHER_MODE_AES_GCM, 2983 CHCR_SCMD_CIPHER_MODE_AES_GCM,
2992 CHCR_SCMD_AUTH_MODE_GHASH, 2984 CHCR_SCMD_AUTH_MODE_GHASH,
@@ -3012,19 +3004,18 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
3012 phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len); 3004 phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
3013 ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size); 3005 ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size);
3014 3006
3015 chcr_add_aead_dst_ent(req, phys_cpl, assoclen, op_type, qid); 3007 chcr_add_aead_dst_ent(req, phys_cpl, assoclen, qid);
3016 chcr_add_aead_src_ent(req, ulptx, assoclen, op_type); 3008 chcr_add_aead_src_ent(req, ulptx, assoclen);
3017 atomic_inc(&adap->chcr_stats.aead_rqst); 3009 atomic_inc(&adap->chcr_stats.aead_rqst);
3018 temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + 3010 temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size +
3019 kctx_len + (reqctx->imm ? (assoclen + IV + req->cryptlen) : 0); 3011 kctx_len + (reqctx->imm ? (assoclen + IV + req->cryptlen) : 0);
3020 create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, size, 3012 create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, size,
3021 transhdr_len, temp, reqctx->verify); 3013 transhdr_len, temp, reqctx->verify);
3022 reqctx->skb = skb; 3014 reqctx->skb = skb;
3023 reqctx->op = op_type;
3024 return skb; 3015 return skb;
3025 3016
3026err: 3017err:
3027 chcr_aead_dma_unmap(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req, op_type); 3018 chcr_aead_common_exit(req);
3028 return ERR_PTR(error); 3019 return ERR_PTR(error);
3029} 3020}
3030 3021
@@ -3558,7 +3549,6 @@ out:
3558} 3549}
3559 3550
3560static int chcr_aead_op(struct aead_request *req, 3551static int chcr_aead_op(struct aead_request *req,
3561 unsigned short op_type,
3562 int size, 3552 int size,
3563 create_wr_t create_wr_fn) 3553 create_wr_t create_wr_fn)
3564{ 3554{
@@ -3580,8 +3570,7 @@ static int chcr_aead_op(struct aead_request *req,
3580 } 3570 }
3581 3571
3582 /* Form a WR from req */ 3572 /* Form a WR from req */
3583 skb = create_wr_fn(req, u_ctx->lldi.rxq_ids[a_ctx(tfm)->rx_qidx], size, 3573 skb = create_wr_fn(req, u_ctx->lldi.rxq_ids[a_ctx(tfm)->rx_qidx], size);
3584 op_type);
3585 3574
3586 if (IS_ERR(skb) || !skb) 3575 if (IS_ERR(skb) || !skb)
3587 return PTR_ERR(skb); 3576 return PTR_ERR(skb);
@@ -3598,21 +3587,19 @@ static int chcr_aead_encrypt(struct aead_request *req)
3598 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); 3587 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
3599 3588
3600 reqctx->verify = VERIFY_HW; 3589 reqctx->verify = VERIFY_HW;
3590 reqctx->op = CHCR_ENCRYPT_OP;
3601 3591
3602 switch (get_aead_subtype(tfm)) { 3592 switch (get_aead_subtype(tfm)) {
3603 case CRYPTO_ALG_SUB_TYPE_CTR_SHA: 3593 case CRYPTO_ALG_SUB_TYPE_CTR_SHA:
3604 case CRYPTO_ALG_SUB_TYPE_CBC_SHA: 3594 case CRYPTO_ALG_SUB_TYPE_CBC_SHA:
3605 case CRYPTO_ALG_SUB_TYPE_CBC_NULL: 3595 case CRYPTO_ALG_SUB_TYPE_CBC_NULL:
3606 case CRYPTO_ALG_SUB_TYPE_CTR_NULL: 3596 case CRYPTO_ALG_SUB_TYPE_CTR_NULL:
3607 return chcr_aead_op(req, CHCR_ENCRYPT_OP, 0, 3597 return chcr_aead_op(req, 0, create_authenc_wr);
3608 create_authenc_wr);
3609 case CRYPTO_ALG_SUB_TYPE_AEAD_CCM: 3598 case CRYPTO_ALG_SUB_TYPE_AEAD_CCM:
3610 case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309: 3599 case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309:
3611 return chcr_aead_op(req, CHCR_ENCRYPT_OP, 0, 3600 return chcr_aead_op(req, 0, create_aead_ccm_wr);
3612 create_aead_ccm_wr);
3613 default: 3601 default:
3614 return chcr_aead_op(req, CHCR_ENCRYPT_OP, 0, 3602 return chcr_aead_op(req, 0, create_gcm_wr);
3615 create_gcm_wr);
3616 } 3603 }
3617} 3604}
3618 3605
@@ -3630,21 +3617,18 @@ static int chcr_aead_decrypt(struct aead_request *req)
3630 size = 0; 3617 size = 0;
3631 reqctx->verify = VERIFY_HW; 3618 reqctx->verify = VERIFY_HW;
3632 } 3619 }
3633 3620 reqctx->op = CHCR_DECRYPT_OP;
3634 switch (get_aead_subtype(tfm)) { 3621 switch (get_aead_subtype(tfm)) {
3635 case CRYPTO_ALG_SUB_TYPE_CBC_SHA: 3622 case CRYPTO_ALG_SUB_TYPE_CBC_SHA:
3636 case CRYPTO_ALG_SUB_TYPE_CTR_SHA: 3623 case CRYPTO_ALG_SUB_TYPE_CTR_SHA:
3637 case CRYPTO_ALG_SUB_TYPE_CBC_NULL: 3624 case CRYPTO_ALG_SUB_TYPE_CBC_NULL:
3638 case CRYPTO_ALG_SUB_TYPE_CTR_NULL: 3625 case CRYPTO_ALG_SUB_TYPE_CTR_NULL:
3639 return chcr_aead_op(req, CHCR_DECRYPT_OP, size, 3626 return chcr_aead_op(req, size, create_authenc_wr);
3640 create_authenc_wr);
3641 case CRYPTO_ALG_SUB_TYPE_AEAD_CCM: 3627 case CRYPTO_ALG_SUB_TYPE_AEAD_CCM:
3642 case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309: 3628 case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309:
3643 return chcr_aead_op(req, CHCR_DECRYPT_OP, size, 3629 return chcr_aead_op(req, size, create_aead_ccm_wr);
3644 create_aead_ccm_wr);
3645 default: 3630 default:
3646 return chcr_aead_op(req, CHCR_DECRYPT_OP, size, 3631 return chcr_aead_op(req, size, create_gcm_wr);
3647 create_gcm_wr);
3648 } 3632 }
3649} 3633}
3650 3634
diff --git a/drivers/crypto/chelsio/chcr_crypto.h b/drivers/crypto/chelsio/chcr_crypto.h
index 97878d46c287..54835cb109e5 100644
--- a/drivers/crypto/chelsio/chcr_crypto.h
+++ b/drivers/crypto/chelsio/chcr_crypto.h
@@ -190,8 +190,8 @@ struct chcr_aead_reqctx {
190 short int dst_nents; 190 short int dst_nents;
191 u16 imm; 191 u16 imm;
192 u16 verify; 192 u16 verify;
193 u8 iv[CHCR_MAX_CRYPTO_IV_LEN]; 193 u8 iv[CHCR_MAX_CRYPTO_IV_LEN + MAX_SCRATCH_PAD_SIZE];
194 unsigned char scratch_pad[MAX_SCRATCH_PAD_SIZE]; 194 u8 *scratch_pad;
195}; 195};
196 196
197struct ulptx_walk { 197struct ulptx_walk {
@@ -311,8 +311,7 @@ struct chcr_alg_template {
311 311
312typedef struct sk_buff *(*create_wr_t)(struct aead_request *req, 312typedef struct sk_buff *(*create_wr_t)(struct aead_request *req,
313 unsigned short qid, 313 unsigned short qid,
314 int size, 314 int size);
315 unsigned short op_type);
316 315
317void chcr_verify_tag(struct aead_request *req, u8 *input, int *err); 316void chcr_verify_tag(struct aead_request *req, u8 *input, int *err);
318int chcr_aead_dma_map(struct device *dev, struct aead_request *req, 317int chcr_aead_dma_map(struct device *dev, struct aead_request *req,
@@ -321,10 +320,10 @@ void chcr_aead_dma_unmap(struct device *dev, struct aead_request *req,
321 unsigned short op_type); 320 unsigned short op_type);
322void chcr_add_aead_dst_ent(struct aead_request *req, 321void chcr_add_aead_dst_ent(struct aead_request *req,
323 struct cpl_rx_phys_dsgl *phys_cpl, 322 struct cpl_rx_phys_dsgl *phys_cpl,
324 unsigned int assoclen, unsigned short op_type, 323 unsigned int assoclen,
325 unsigned short qid); 324 unsigned short qid);
326void chcr_add_aead_src_ent(struct aead_request *req, struct ulptx_sgl *ulptx, 325void chcr_add_aead_src_ent(struct aead_request *req, struct ulptx_sgl *ulptx,
327 unsigned int assoclen, unsigned short op_type); 326 unsigned int assoclen);
328void chcr_add_cipher_src_ent(struct ablkcipher_request *req, 327void chcr_add_cipher_src_ent(struct ablkcipher_request *req,
329 void *ulptx, 328 void *ulptx,
330 struct cipher_wr_param *wrparam); 329 struct cipher_wr_param *wrparam);
@@ -339,4 +338,5 @@ void chcr_add_hash_src_ent(struct ahash_request *req, struct ulptx_sgl *ulptx,
339 struct hash_wr_param *param); 338 struct hash_wr_param *param);
340int chcr_hash_dma_map(struct device *dev, struct ahash_request *req); 339int chcr_hash_dma_map(struct device *dev, struct ahash_request *req);
341void chcr_hash_dma_unmap(struct device *dev, struct ahash_request *req); 340void chcr_hash_dma_unmap(struct device *dev, struct ahash_request *req);
341void chcr_aead_common_exit(struct aead_request *req);
342#endif /* __CHCR_CRYPTO_H__ */ 342#endif /* __CHCR_CRYPTO_H__ */