diff options
author | Atul Gupta <atul.gupta@chelsio.com> | 2019-02-18 05:04:37 -0500 |
---|---|---|
committer | Herbert Xu <herbert@gondor.apana.org.au> | 2019-02-28 01:17:58 -0500 |
commit | 8cd9d183731a8b54e7ca40de1c72e3c6bec40113 (patch) | |
tree | 794efc6f36284fc23a3e57c704f604274bfdfde2 /drivers/crypto | |
parent | bf432e72c170f28c08bb8c0f45356a5a48a7cae2 (diff) |
crypto: chelsio - Fixed Traffic Stall
Fixed Traffic Stall caused by
- Subcommands except last should have more bit set
- For esn case subcommand is required for linear skb only
- Also Optimized is_eth_imm usage
Signed-off-by: Atul Gupta <atul.gupta@chelsio.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'drivers/crypto')
-rw-r--r-- | drivers/crypto/chelsio/chcr_ipsec.c | 33 |
1 files changed, 20 insertions, 13 deletions
diff --git a/drivers/crypto/chelsio/chcr_ipsec.c b/drivers/crypto/chelsio/chcr_ipsec.c index 0c826d0e1bfc..2f60049361ef 100644 --- a/drivers/crypto/chelsio/chcr_ipsec.c +++ b/drivers/crypto/chelsio/chcr_ipsec.c | |||
@@ -336,7 +336,8 @@ static inline int is_eth_imm(const struct sk_buff *skb, | |||
336 | } | 336 | } |
337 | 337 | ||
338 | static inline unsigned int calc_tx_sec_flits(const struct sk_buff *skb, | 338 | static inline unsigned int calc_tx_sec_flits(const struct sk_buff *skb, |
339 | struct ipsec_sa_entry *sa_entry) | 339 | struct ipsec_sa_entry *sa_entry, |
340 | bool *immediate) | ||
340 | { | 341 | { |
341 | unsigned int kctx_len; | 342 | unsigned int kctx_len; |
342 | unsigned int flits; | 343 | unsigned int flits; |
@@ -354,8 +355,10 @@ static inline unsigned int calc_tx_sec_flits(const struct sk_buff *skb, | |||
354 | * TX Packet header plus the skb data in the Work Request. | 355 | * TX Packet header plus the skb data in the Work Request. |
355 | */ | 356 | */ |
356 | 357 | ||
357 | if (hdrlen) | 358 | if (hdrlen) { |
359 | *immediate = true; | ||
358 | return DIV_ROUND_UP(skb->len + hdrlen, sizeof(__be64)); | 360 | return DIV_ROUND_UP(skb->len + hdrlen, sizeof(__be64)); |
361 | } | ||
359 | 362 | ||
360 | flits = sgl_len(skb_shinfo(skb)->nr_frags + 1); | 363 | flits = sgl_len(skb_shinfo(skb)->nr_frags + 1); |
361 | 364 | ||
@@ -418,7 +421,7 @@ inline void *copy_esn_pktxt(struct sk_buff *skb, | |||
418 | iv = skb_transport_header(skb) + sizeof(struct ip_esp_hdr); | 421 | iv = skb_transport_header(skb) + sizeof(struct ip_esp_hdr); |
419 | memcpy(aadiv->iv, iv, 8); | 422 | memcpy(aadiv->iv, iv, 8); |
420 | 423 | ||
421 | if (is_eth_imm(skb, sa_entry)) { | 424 | if (is_eth_imm(skb, sa_entry) && !skb_is_nonlinear(skb)) { |
422 | sc_imm = (struct ulptx_idata *)(pos + | 425 | sc_imm = (struct ulptx_idata *)(pos + |
423 | (DIV_ROUND_UP(sizeof(struct chcr_ipsec_aadiv), | 426 | (DIV_ROUND_UP(sizeof(struct chcr_ipsec_aadiv), |
424 | sizeof(__be64)) << 3)); | 427 | sizeof(__be64)) << 3)); |
@@ -531,15 +534,18 @@ inline void *chcr_crypto_wreq(struct sk_buff *skb, | |||
531 | struct adapter *adap = pi->adapter; | 534 | struct adapter *adap = pi->adapter; |
532 | unsigned int ivsize = GCM_ESP_IV_SIZE; | 535 | unsigned int ivsize = GCM_ESP_IV_SIZE; |
533 | struct chcr_ipsec_wr *wr; | 536 | struct chcr_ipsec_wr *wr; |
537 | bool immediate = false; | ||
534 | u16 immdatalen = 0; | 538 | u16 immdatalen = 0; |
535 | unsigned int flits; | 539 | unsigned int flits; |
536 | u32 ivinoffset; | 540 | u32 ivinoffset; |
537 | u32 aadstart; | 541 | u32 aadstart; |
538 | u32 aadstop; | 542 | u32 aadstop; |
539 | u32 ciphstart; | 543 | u32 ciphstart; |
544 | u16 sc_more = 0; | ||
540 | u32 ivdrop = 0; | 545 | u32 ivdrop = 0; |
541 | u32 esnlen = 0; | 546 | u32 esnlen = 0; |
542 | u32 wr_mid; | 547 | u32 wr_mid; |
548 | u16 ndesc; | ||
543 | int qidx = skb_get_queue_mapping(skb); | 549 | int qidx = skb_get_queue_mapping(skb); |
544 | struct sge_eth_txq *q = &adap->sge.ethtxq[qidx + pi->first_qset]; | 550 | struct sge_eth_txq *q = &adap->sge.ethtxq[qidx + pi->first_qset]; |
545 | unsigned int kctx_len = sa_entry->kctx_len; | 551 | unsigned int kctx_len = sa_entry->kctx_len; |
@@ -547,20 +553,24 @@ inline void *chcr_crypto_wreq(struct sk_buff *skb, | |||
547 | 553 | ||
548 | atomic_inc(&adap->chcr_stats.ipsec_cnt); | 554 | atomic_inc(&adap->chcr_stats.ipsec_cnt); |
549 | 555 | ||
550 | flits = calc_tx_sec_flits(skb, sa_entry); | 556 | flits = calc_tx_sec_flits(skb, sa_entry, &immediate); |
557 | ndesc = DIV_ROUND_UP(flits, 2); | ||
551 | if (sa_entry->esn) | 558 | if (sa_entry->esn) |
552 | ivdrop = 1; | 559 | ivdrop = 1; |
553 | 560 | ||
554 | if (is_eth_imm(skb, sa_entry)) | 561 | if (immediate) |
555 | immdatalen = skb->len; | 562 | immdatalen = skb->len; |
556 | 563 | ||
557 | if (sa_entry->esn) | 564 | if (sa_entry->esn) { |
558 | esnlen = sizeof(struct chcr_ipsec_aadiv); | 565 | esnlen = sizeof(struct chcr_ipsec_aadiv); |
566 | if (!skb_is_nonlinear(skb)) | ||
567 | sc_more = 1; | ||
568 | } | ||
559 | 569 | ||
560 | /* WR Header */ | 570 | /* WR Header */ |
561 | wr = (struct chcr_ipsec_wr *)pos; | 571 | wr = (struct chcr_ipsec_wr *)pos; |
562 | wr->wreq.op_to_compl = htonl(FW_WR_OP_V(FW_ULPTX_WR)); | 572 | wr->wreq.op_to_compl = htonl(FW_WR_OP_V(FW_ULPTX_WR)); |
563 | wr_mid = FW_CRYPTO_LOOKASIDE_WR_LEN16_V(DIV_ROUND_UP(flits, 2)); | 573 | wr_mid = FW_CRYPTO_LOOKASIDE_WR_LEN16_V(ndesc); |
564 | 574 | ||
565 | if (unlikely(credits < ETHTXQ_STOP_THRES)) { | 575 | if (unlikely(credits < ETHTXQ_STOP_THRES)) { |
566 | netif_tx_stop_queue(q->txq); | 576 | netif_tx_stop_queue(q->txq); |
@@ -572,10 +582,10 @@ inline void *chcr_crypto_wreq(struct sk_buff *skb, | |||
572 | 582 | ||
573 | /* ULPTX */ | 583 | /* ULPTX */ |
574 | wr->req.ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(pi->port_id, qid); | 584 | wr->req.ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(pi->port_id, qid); |
575 | wr->req.ulptx.len = htonl(DIV_ROUND_UP(flits, 2) - 1); | 585 | wr->req.ulptx.len = htonl(ndesc - 1); |
576 | 586 | ||
577 | /* Sub-command */ | 587 | /* Sub-command */ |
578 | wr->req.sc_imm.cmd_more = FILL_CMD_MORE(!immdatalen); | 588 | wr->req.sc_imm.cmd_more = FILL_CMD_MORE(!immdatalen || sc_more); |
579 | wr->req.sc_imm.len = cpu_to_be32(sizeof(struct cpl_tx_sec_pdu) + | 589 | wr->req.sc_imm.len = cpu_to_be32(sizeof(struct cpl_tx_sec_pdu) + |
580 | sizeof(wr->req.key_ctx) + | 590 | sizeof(wr->req.key_ctx) + |
581 | kctx_len + | 591 | kctx_len + |
@@ -698,7 +708,7 @@ out_free: dev_kfree_skb_any(skb); | |||
698 | 708 | ||
699 | cxgb4_reclaim_completed_tx(adap, &q->q, true); | 709 | cxgb4_reclaim_completed_tx(adap, &q->q, true); |
700 | 710 | ||
701 | flits = calc_tx_sec_flits(skb, sa_entry); | 711 | flits = calc_tx_sec_flits(skb, sa_entry, &immediate); |
702 | ndesc = flits_to_desc(flits); | 712 | ndesc = flits_to_desc(flits); |
703 | credits = txq_avail(&q->q) - ndesc; | 713 | credits = txq_avail(&q->q) - ndesc; |
704 | 714 | ||
@@ -711,9 +721,6 @@ out_free: dev_kfree_skb_any(skb); | |||
711 | return NETDEV_TX_BUSY; | 721 | return NETDEV_TX_BUSY; |
712 | } | 722 | } |
713 | 723 | ||
714 | if (is_eth_imm(skb, sa_entry)) | ||
715 | immediate = true; | ||
716 | |||
717 | if (!immediate && | 724 | if (!immediate && |
718 | unlikely(cxgb4_map_skb(adap->pdev_dev, skb, addr) < 0)) { | 725 | unlikely(cxgb4_map_skb(adap->pdev_dev, skb, addr) < 0)) { |
719 | q->mapping_err++; | 726 | q->mapping_err++; |