diff options
author | Sunil Goutham <sgoutham@cavium.com> | 2017-05-02 09:06:52 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2017-05-02 15:41:21 -0400 |
commit | 0dada88b8cd74569abc3dda50f1b268a5868f6f2 (patch) | |
tree | 8f2c26a5ef9427483375de3f208357c3150f3601 | |
parent | 5e848e4c5d77438e126c97702ec3bea477f550a9 (diff) |
net: thunderx: Optimize CQE_TX handling
Optimized CQE handling with below changes
- Feeing descriptors back to SQ in bulk i.e once per NAPI
instance instead for every CQE_TX, this will reduce number
of atomic updates to 'sq->free_cnt'.
- Checking errors in CQE_TX and CQE_RX before calling appropriate
fn()s to update error stats i.e reduce branching.
Also removed debug messages in packet handling path which otherwise
causes issues if DEBUG is enabled.
Signed-off-by: Sunil Goutham <sgoutham@cavium.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | drivers/net/ethernet/cavium/thunder/nicvf_main.c | 44 | ||||
-rw-r--r-- | drivers/net/ethernet/cavium/thunder/nicvf_queues.c | 5 |
2 files changed, 21 insertions, 28 deletions
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c index 81a2fcb3cb1b..0d79894400ab 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c | |||
@@ -498,7 +498,7 @@ static int nicvf_init_resources(struct nicvf *nic) | |||
498 | 498 | ||
499 | static void nicvf_snd_pkt_handler(struct net_device *netdev, | 499 | static void nicvf_snd_pkt_handler(struct net_device *netdev, |
500 | struct cqe_send_t *cqe_tx, | 500 | struct cqe_send_t *cqe_tx, |
501 | int cqe_type, int budget, | 501 | int budget, int *subdesc_cnt, |
502 | unsigned int *tx_pkts, unsigned int *tx_bytes) | 502 | unsigned int *tx_pkts, unsigned int *tx_bytes) |
503 | { | 503 | { |
504 | struct sk_buff *skb = NULL; | 504 | struct sk_buff *skb = NULL; |
@@ -513,12 +513,10 @@ static void nicvf_snd_pkt_handler(struct net_device *netdev, | |||
513 | if (hdr->subdesc_type != SQ_DESC_TYPE_HEADER) | 513 | if (hdr->subdesc_type != SQ_DESC_TYPE_HEADER) |
514 | return; | 514 | return; |
515 | 515 | ||
516 | netdev_dbg(nic->netdev, | 516 | /* Check for errors */ |
517 | "%s Qset #%d SQ #%d SQ ptr #%d subdesc count %d\n", | 517 | if (cqe_tx->send_status) |
518 | __func__, cqe_tx->sq_qs, cqe_tx->sq_idx, | 518 | nicvf_check_cqe_tx_errs(nic->pnicvf, cqe_tx); |
519 | cqe_tx->sqe_ptr, hdr->subdesc_cnt); | ||
520 | 519 | ||
521 | nicvf_check_cqe_tx_errs(nic, cqe_tx); | ||
522 | skb = (struct sk_buff *)sq->skbuff[cqe_tx->sqe_ptr]; | 520 | skb = (struct sk_buff *)sq->skbuff[cqe_tx->sqe_ptr]; |
523 | if (skb) { | 521 | if (skb) { |
524 | /* Check for dummy descriptor used for HW TSO offload on 88xx */ | 522 | /* Check for dummy descriptor used for HW TSO offload on 88xx */ |
@@ -528,12 +526,12 @@ static void nicvf_snd_pkt_handler(struct net_device *netdev, | |||
528 | (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, hdr->rsvd2); | 526 | (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, hdr->rsvd2); |
529 | nicvf_unmap_sndq_buffers(nic, sq, hdr->rsvd2, | 527 | nicvf_unmap_sndq_buffers(nic, sq, hdr->rsvd2, |
530 | tso_sqe->subdesc_cnt); | 528 | tso_sqe->subdesc_cnt); |
531 | nicvf_put_sq_desc(sq, tso_sqe->subdesc_cnt + 1); | 529 | *subdesc_cnt += tso_sqe->subdesc_cnt + 1; |
532 | } else { | 530 | } else { |
533 | nicvf_unmap_sndq_buffers(nic, sq, cqe_tx->sqe_ptr, | 531 | nicvf_unmap_sndq_buffers(nic, sq, cqe_tx->sqe_ptr, |
534 | hdr->subdesc_cnt); | 532 | hdr->subdesc_cnt); |
535 | } | 533 | } |
536 | nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1); | 534 | *subdesc_cnt += hdr->subdesc_cnt + 1; |
537 | prefetch(skb); | 535 | prefetch(skb); |
538 | (*tx_pkts)++; | 536 | (*tx_pkts)++; |
539 | *tx_bytes += skb->len; | 537 | *tx_bytes += skb->len; |
@@ -544,7 +542,7 @@ static void nicvf_snd_pkt_handler(struct net_device *netdev, | |||
544 | * a SKB attached, so just free SQEs here. | 542 | * a SKB attached, so just free SQEs here. |
545 | */ | 543 | */ |
546 | if (!nic->hw_tso) | 544 | if (!nic->hw_tso) |
547 | nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1); | 545 | *subdesc_cnt += hdr->subdesc_cnt + 1; |
548 | } | 546 | } |
549 | } | 547 | } |
550 | 548 | ||
@@ -595,9 +593,11 @@ static void nicvf_rcv_pkt_handler(struct net_device *netdev, | |||
595 | } | 593 | } |
596 | 594 | ||
597 | /* Check for errors */ | 595 | /* Check for errors */ |
598 | err = nicvf_check_cqe_rx_errs(nic, cqe_rx); | 596 | if (cqe_rx->err_level || cqe_rx->err_opcode) { |
599 | if (err && !cqe_rx->rb_cnt) | 597 | err = nicvf_check_cqe_rx_errs(nic, cqe_rx); |
600 | return; | 598 | if (err && !cqe_rx->rb_cnt) |
599 | return; | ||
600 | } | ||
601 | 601 | ||
602 | skb = nicvf_get_rcv_skb(snic, cqe_rx); | 602 | skb = nicvf_get_rcv_skb(snic, cqe_rx); |
603 | if (!skb) { | 603 | if (!skb) { |
@@ -646,6 +646,7 @@ static int nicvf_cq_intr_handler(struct net_device *netdev, u8 cq_idx, | |||
646 | { | 646 | { |
647 | int processed_cqe, work_done = 0, tx_done = 0; | 647 | int processed_cqe, work_done = 0, tx_done = 0; |
648 | int cqe_count, cqe_head; | 648 | int cqe_count, cqe_head; |
649 | int subdesc_cnt = 0; | ||
649 | struct nicvf *nic = netdev_priv(netdev); | 650 | struct nicvf *nic = netdev_priv(netdev); |
650 | struct queue_set *qs = nic->qs; | 651 | struct queue_set *qs = nic->qs; |
651 | struct cmp_queue *cq = &qs->cq[cq_idx]; | 652 | struct cmp_queue *cq = &qs->cq[cq_idx]; |
@@ -667,8 +668,6 @@ loop: | |||
667 | cqe_head = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD, cq_idx) >> 9; | 668 | cqe_head = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD, cq_idx) >> 9; |
668 | cqe_head &= 0xFFFF; | 669 | cqe_head &= 0xFFFF; |
669 | 670 | ||
670 | netdev_dbg(nic->netdev, "%s CQ%d cqe_count %d cqe_head %d\n", | ||
671 | __func__, cq_idx, cqe_count, cqe_head); | ||
672 | while (processed_cqe < cqe_count) { | 671 | while (processed_cqe < cqe_count) { |
673 | /* Get the CQ descriptor */ | 672 | /* Get the CQ descriptor */ |
674 | cq_desc = (struct cqe_rx_t *)GET_CQ_DESC(cq, cqe_head); | 673 | cq_desc = (struct cqe_rx_t *)GET_CQ_DESC(cq, cqe_head); |
@@ -682,17 +681,15 @@ loop: | |||
682 | break; | 681 | break; |
683 | } | 682 | } |
684 | 683 | ||
685 | netdev_dbg(nic->netdev, "CQ%d cq_desc->cqe_type %d\n", | ||
686 | cq_idx, cq_desc->cqe_type); | ||
687 | switch (cq_desc->cqe_type) { | 684 | switch (cq_desc->cqe_type) { |
688 | case CQE_TYPE_RX: | 685 | case CQE_TYPE_RX: |
689 | nicvf_rcv_pkt_handler(netdev, napi, cq_desc); | 686 | nicvf_rcv_pkt_handler(netdev, napi, cq_desc); |
690 | work_done++; | 687 | work_done++; |
691 | break; | 688 | break; |
692 | case CQE_TYPE_SEND: | 689 | case CQE_TYPE_SEND: |
693 | nicvf_snd_pkt_handler(netdev, | 690 | nicvf_snd_pkt_handler(netdev, (void *)cq_desc, |
694 | (void *)cq_desc, CQE_TYPE_SEND, | 691 | budget, &subdesc_cnt, |
695 | budget, &tx_pkts, &tx_bytes); | 692 | &tx_pkts, &tx_bytes); |
696 | tx_done++; | 693 | tx_done++; |
697 | break; | 694 | break; |
698 | case CQE_TYPE_INVALID: | 695 | case CQE_TYPE_INVALID: |
@@ -704,9 +701,6 @@ loop: | |||
704 | } | 701 | } |
705 | processed_cqe++; | 702 | processed_cqe++; |
706 | } | 703 | } |
707 | netdev_dbg(nic->netdev, | ||
708 | "%s CQ%d processed_cqe %d work_done %d budget %d\n", | ||
709 | __func__, cq_idx, processed_cqe, work_done, budget); | ||
710 | 704 | ||
711 | /* Ring doorbell to inform H/W to reuse processed CQEs */ | 705 | /* Ring doorbell to inform H/W to reuse processed CQEs */ |
712 | nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_DOOR, | 706 | nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_DOOR, |
@@ -716,8 +710,12 @@ loop: | |||
716 | goto loop; | 710 | goto loop; |
717 | 711 | ||
718 | done: | 712 | done: |
719 | /* Wakeup TXQ if its stopped earlier due to SQ full */ | ||
720 | sq = &nic->qs->sq[cq_idx]; | 713 | sq = &nic->qs->sq[cq_idx]; |
714 | /* Update SQ's descriptor free count */ | ||
715 | if (subdesc_cnt) | ||
716 | nicvf_put_sq_desc(sq, subdesc_cnt); | ||
717 | |||
718 | /* Wakeup TXQ if its stopped earlier due to SQ full */ | ||
721 | if (tx_done || | 719 | if (tx_done || |
722 | (atomic_read(&sq->free_cnt) >= MIN_SQ_DESC_PER_PKT_XMIT)) { | 720 | (atomic_read(&sq->free_cnt) >= MIN_SQ_DESC_PER_PKT_XMIT)) { |
723 | netdev = nic->pnicvf->netdev; | 721 | netdev = nic->pnicvf->netdev; |
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c index dfc85a169127..90c5bc7d7344 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c | |||
@@ -1640,9 +1640,6 @@ void nicvf_update_sq_stats(struct nicvf *nic, int sq_idx) | |||
1640 | /* Check for errors in the receive cmp.queue entry */ | 1640 | /* Check for errors in the receive cmp.queue entry */ |
1641 | int nicvf_check_cqe_rx_errs(struct nicvf *nic, struct cqe_rx_t *cqe_rx) | 1641 | int nicvf_check_cqe_rx_errs(struct nicvf *nic, struct cqe_rx_t *cqe_rx) |
1642 | { | 1642 | { |
1643 | if (!cqe_rx->err_level && !cqe_rx->err_opcode) | ||
1644 | return 0; | ||
1645 | |||
1646 | if (netif_msg_rx_err(nic)) | 1643 | if (netif_msg_rx_err(nic)) |
1647 | netdev_err(nic->netdev, | 1644 | netdev_err(nic->netdev, |
1648 | "%s: RX error CQE err_level 0x%x err_opcode 0x%x\n", | 1645 | "%s: RX error CQE err_level 0x%x err_opcode 0x%x\n", |
@@ -1731,8 +1728,6 @@ int nicvf_check_cqe_rx_errs(struct nicvf *nic, struct cqe_rx_t *cqe_rx) | |||
1731 | int nicvf_check_cqe_tx_errs(struct nicvf *nic, struct cqe_send_t *cqe_tx) | 1728 | int nicvf_check_cqe_tx_errs(struct nicvf *nic, struct cqe_send_t *cqe_tx) |
1732 | { | 1729 | { |
1733 | switch (cqe_tx->send_status) { | 1730 | switch (cqe_tx->send_status) { |
1734 | case CQ_TX_ERROP_GOOD: | ||
1735 | return 0; | ||
1736 | case CQ_TX_ERROP_DESC_FAULT: | 1731 | case CQ_TX_ERROP_DESC_FAULT: |
1737 | this_cpu_inc(nic->drv_stats->tx_desc_fault); | 1732 | this_cpu_inc(nic->drv_stats->tx_desc_fault); |
1738 | break; | 1733 | break; |