aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorHarsh Jain <harsh@chelsio.com>2018-09-19 13:12:16 -0400
committerHerbert Xu <herbert@gondor.apana.org.au>2018-09-28 00:44:34 -0400
commitadd92a817e60e308a419693413a38d9d1e663aff (patch)
tree9656fb78ffdaa7b205a56fd30dfedd273195bca8
parent13cc6f48c7434ce46ba6dbc90003a136a263d75a (diff)
crypto: chelsio - Fix memory corruption in DMA Mapped buffers.
Update PCI Id in "cpl_rx_phys_dsgl" header. In case pci_chan_id and tx_chan_id are not derived from same queue, H/W can send request completion indication before completing DMA Transfer. Herbert, It would be good if fix can be merge to stable tree. For 4.14 kernel, It requires some update to avoid mege conficts. Cc: <stable@vger.kernel.org> Signed-off-by: Harsh Jain <harsh@chelsio.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
-rw-r--r--drivers/crypto/chelsio/chcr_algo.c32
-rw-r--r--drivers/crypto/chelsio/chcr_crypto.h2
2 files changed, 24 insertions, 10 deletions
diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c
index 5c539af8ed60..010bbf607797 100644
--- a/drivers/crypto/chelsio/chcr_algo.c
+++ b/drivers/crypto/chelsio/chcr_algo.c
@@ -367,7 +367,8 @@ static inline void dsgl_walk_init(struct dsgl_walk *walk,
367 walk->to = (struct phys_sge_pairs *)(dsgl + 1); 367 walk->to = (struct phys_sge_pairs *)(dsgl + 1);
368} 368}
369 369
370static inline void dsgl_walk_end(struct dsgl_walk *walk, unsigned short qid) 370static inline void dsgl_walk_end(struct dsgl_walk *walk, unsigned short qid,
371 int pci_chan_id)
371{ 372{
372 struct cpl_rx_phys_dsgl *phys_cpl; 373 struct cpl_rx_phys_dsgl *phys_cpl;
373 374
@@ -385,6 +386,7 @@ static inline void dsgl_walk_end(struct dsgl_walk *walk, unsigned short qid)
385 phys_cpl->rss_hdr_int.opcode = CPL_RX_PHYS_ADDR; 386 phys_cpl->rss_hdr_int.opcode = CPL_RX_PHYS_ADDR;
386 phys_cpl->rss_hdr_int.qid = htons(qid); 387 phys_cpl->rss_hdr_int.qid = htons(qid);
387 phys_cpl->rss_hdr_int.hash_val = 0; 388 phys_cpl->rss_hdr_int.hash_val = 0;
389 phys_cpl->rss_hdr_int.channel = pci_chan_id;
388} 390}
389 391
390static inline void dsgl_walk_add_page(struct dsgl_walk *walk, 392static inline void dsgl_walk_add_page(struct dsgl_walk *walk,
@@ -718,7 +720,7 @@ static inline void create_wreq(struct chcr_context *ctx,
718 FILL_WR_RX_Q_ID(ctx->dev->rx_channel_id, qid, 720 FILL_WR_RX_Q_ID(ctx->dev->rx_channel_id, qid,
719 !!lcb, ctx->tx_qidx); 721 !!lcb, ctx->tx_qidx);
720 722
721 chcr_req->ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(ctx->dev->tx_channel_id, 723 chcr_req->ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(ctx->tx_chan_id,
722 qid); 724 qid);
723 chcr_req->ulptx.len = htonl((DIV_ROUND_UP(len16, 16) - 725 chcr_req->ulptx.len = htonl((DIV_ROUND_UP(len16, 16) -
724 ((sizeof(chcr_req->wreq)) >> 4))); 726 ((sizeof(chcr_req->wreq)) >> 4)));
@@ -1339,16 +1341,23 @@ static int chcr_device_init(struct chcr_context *ctx)
1339 adap->vres.ncrypto_fc); 1341 adap->vres.ncrypto_fc);
1340 rxq_perchan = u_ctx->lldi.nrxq / u_ctx->lldi.nchan; 1342 rxq_perchan = u_ctx->lldi.nrxq / u_ctx->lldi.nchan;
1341 txq_perchan = ntxq / u_ctx->lldi.nchan; 1343 txq_perchan = ntxq / u_ctx->lldi.nchan;
1342 rxq_idx = ctx->dev->tx_channel_id * rxq_perchan;
1343 rxq_idx += id % rxq_perchan;
1344 txq_idx = ctx->dev->tx_channel_id * txq_perchan;
1345 txq_idx += id % txq_perchan;
1346 spin_lock(&ctx->dev->lock_chcr_dev); 1344 spin_lock(&ctx->dev->lock_chcr_dev);
1347 ctx->rx_qidx = rxq_idx; 1345 ctx->tx_chan_id = ctx->dev->tx_channel_id;
1348 ctx->tx_qidx = txq_idx;
1349 ctx->dev->tx_channel_id = !ctx->dev->tx_channel_id; 1346 ctx->dev->tx_channel_id = !ctx->dev->tx_channel_id;
1350 ctx->dev->rx_channel_id = 0; 1347 ctx->dev->rx_channel_id = 0;
1351 spin_unlock(&ctx->dev->lock_chcr_dev); 1348 spin_unlock(&ctx->dev->lock_chcr_dev);
1349 rxq_idx = ctx->tx_chan_id * rxq_perchan;
1350 rxq_idx += id % rxq_perchan;
1351 txq_idx = ctx->tx_chan_id * txq_perchan;
1352 txq_idx += id % txq_perchan;
1353 ctx->rx_qidx = rxq_idx;
1354 ctx->tx_qidx = txq_idx;
1355 /* Channel Id used by SGE to forward packet to Host.
1356 * Same value should be used in cpl_fw6_pld RSS_CH field
1357 * by FW. Driver programs PCI channel ID to be used in fw
1358 * at the time of queue allocation with value "pi->tx_chan"
1359 */
1360 ctx->pci_chan_id = txq_idx / txq_perchan;
1352 } 1361 }
1353out: 1362out:
1354 return err; 1363 return err;
@@ -2503,6 +2512,7 @@ void chcr_add_aead_dst_ent(struct aead_request *req,
2503 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 2512 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2504 struct dsgl_walk dsgl_walk; 2513 struct dsgl_walk dsgl_walk;
2505 unsigned int authsize = crypto_aead_authsize(tfm); 2514 unsigned int authsize = crypto_aead_authsize(tfm);
2515 struct chcr_context *ctx = a_ctx(tfm);
2506 u32 temp; 2516 u32 temp;
2507 2517
2508 dsgl_walk_init(&dsgl_walk, phys_cpl); 2518 dsgl_walk_init(&dsgl_walk, phys_cpl);
@@ -2512,7 +2522,7 @@ void chcr_add_aead_dst_ent(struct aead_request *req,
2512 dsgl_walk_add_page(&dsgl_walk, IV, &reqctx->iv_dma); 2522 dsgl_walk_add_page(&dsgl_walk, IV, &reqctx->iv_dma);
2513 temp = req->cryptlen + (reqctx->op ? -authsize : authsize); 2523 temp = req->cryptlen + (reqctx->op ? -authsize : authsize);
2514 dsgl_walk_add_sg(&dsgl_walk, req->dst, temp, req->assoclen); 2524 dsgl_walk_add_sg(&dsgl_walk, req->dst, temp, req->assoclen);
2515 dsgl_walk_end(&dsgl_walk, qid); 2525 dsgl_walk_end(&dsgl_walk, qid, ctx->pci_chan_id);
2516} 2526}
2517 2527
2518void chcr_add_cipher_src_ent(struct ablkcipher_request *req, 2528void chcr_add_cipher_src_ent(struct ablkcipher_request *req,
@@ -2544,6 +2554,8 @@ void chcr_add_cipher_dst_ent(struct ablkcipher_request *req,
2544 unsigned short qid) 2554 unsigned short qid)
2545{ 2555{
2546 struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req); 2556 struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
2557 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(wrparam->req);
2558 struct chcr_context *ctx = c_ctx(tfm);
2547 struct dsgl_walk dsgl_walk; 2559 struct dsgl_walk dsgl_walk;
2548 2560
2549 dsgl_walk_init(&dsgl_walk, phys_cpl); 2561 dsgl_walk_init(&dsgl_walk, phys_cpl);
@@ -2552,7 +2564,7 @@ void chcr_add_cipher_dst_ent(struct ablkcipher_request *req,
2552 reqctx->dstsg = dsgl_walk.last_sg; 2564 reqctx->dstsg = dsgl_walk.last_sg;
2553 reqctx->dst_ofst = dsgl_walk.last_sg_len; 2565 reqctx->dst_ofst = dsgl_walk.last_sg_len;
2554 2566
2555 dsgl_walk_end(&dsgl_walk, qid); 2567 dsgl_walk_end(&dsgl_walk, qid, ctx->pci_chan_id);
2556} 2568}
2557 2569
2558void chcr_add_hash_src_ent(struct ahash_request *req, 2570void chcr_add_hash_src_ent(struct ahash_request *req,
diff --git a/drivers/crypto/chelsio/chcr_crypto.h b/drivers/crypto/chelsio/chcr_crypto.h
index 54835cb109e5..0d2c70c344f3 100644
--- a/drivers/crypto/chelsio/chcr_crypto.h
+++ b/drivers/crypto/chelsio/chcr_crypto.h
@@ -255,6 +255,8 @@ struct chcr_context {
255 struct chcr_dev *dev; 255 struct chcr_dev *dev;
256 unsigned char tx_qidx; 256 unsigned char tx_qidx;
257 unsigned char rx_qidx; 257 unsigned char rx_qidx;
258 unsigned char tx_chan_id;
259 unsigned char pci_chan_id;
258 struct __crypto_ctx crypto_ctx[0]; 260 struct __crypto_ctx crypto_ctx[0];
259}; 261};
260 262