aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c180
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c33
3 files changed, 118 insertions, 96 deletions
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 1500243b9886..c7e5e6f09647 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -1439,7 +1439,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
1439 (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) { 1439 (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
1440 u16 vlan_proto = tpa_info->metadata >> 1440 u16 vlan_proto = tpa_info->metadata >>
1441 RX_CMP_FLAGS2_METADATA_TPID_SFT; 1441 RX_CMP_FLAGS2_METADATA_TPID_SFT;
1442 u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_VID_MASK; 1442 u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_TCI_MASK;
1443 1443
1444 __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag); 1444 __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag);
1445 } 1445 }
@@ -1623,7 +1623,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons,
1623 cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) && 1623 cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) &&
1624 (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) { 1624 (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
1625 u32 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data); 1625 u32 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data);
1626 u16 vtag = meta_data & RX_CMP_FLAGS2_METADATA_VID_MASK; 1626 u16 vtag = meta_data & RX_CMP_FLAGS2_METADATA_TCI_MASK;
1627 u16 vlan_proto = meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT; 1627 u16 vlan_proto = meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT;
1628 1628
1629 __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag); 1629 __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag);
@@ -3847,6 +3847,9 @@ static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags)
3847 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; 3847 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
3848 struct hwrm_vnic_tpa_cfg_input req = {0}; 3848 struct hwrm_vnic_tpa_cfg_input req = {0};
3849 3849
3850 if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
3851 return 0;
3852
3850 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_TPA_CFG, -1, -1); 3853 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_TPA_CFG, -1, -1);
3851 3854
3852 if (tpa_flags) { 3855 if (tpa_flags) {
@@ -4558,18 +4561,17 @@ int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings)
4558 return rc; 4561 return rc;
4559} 4562}
4560 4563
4561static int 4564static void
4562bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings, 4565__bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct hwrm_func_cfg_input *req,
4563 int ring_grps, int cp_rings, int vnics) 4566 int tx_rings, int rx_rings, int ring_grps,
4567 int cp_rings, int vnics)
4564{ 4568{
4565 struct hwrm_func_cfg_input req = {0};
4566 u32 enables = 0; 4569 u32 enables = 0;
4567 int rc;
4568 4570
4569 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); 4571 bnxt_hwrm_cmd_hdr_init(bp, req, HWRM_FUNC_CFG, -1, -1);
4570 req.fid = cpu_to_le16(0xffff); 4572 req->fid = cpu_to_le16(0xffff);
4571 enables |= tx_rings ? FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS : 0; 4573 enables |= tx_rings ? FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
4572 req.num_tx_rings = cpu_to_le16(tx_rings); 4574 req->num_tx_rings = cpu_to_le16(tx_rings);
4573 if (bp->flags & BNXT_FLAG_NEW_RM) { 4575 if (bp->flags & BNXT_FLAG_NEW_RM) {
4574 enables |= rx_rings ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0; 4576 enables |= rx_rings ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0;
4575 enables |= cp_rings ? FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS | 4577 enables |= cp_rings ? FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
@@ -4578,16 +4580,53 @@ bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
4578 FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0; 4580 FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
4579 enables |= vnics ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0; 4581 enables |= vnics ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0;
4580 4582
4581 req.num_rx_rings = cpu_to_le16(rx_rings); 4583 req->num_rx_rings = cpu_to_le16(rx_rings);
4582 req.num_hw_ring_grps = cpu_to_le16(ring_grps); 4584 req->num_hw_ring_grps = cpu_to_le16(ring_grps);
4583 req.num_cmpl_rings = cpu_to_le16(cp_rings); 4585 req->num_cmpl_rings = cpu_to_le16(cp_rings);
4584 req.num_stat_ctxs = req.num_cmpl_rings; 4586 req->num_stat_ctxs = req->num_cmpl_rings;
4585 req.num_vnics = cpu_to_le16(vnics); 4587 req->num_vnics = cpu_to_le16(vnics);
4586 } 4588 }
4587 if (!enables) 4589 req->enables = cpu_to_le32(enables);
4590}
4591
4592static void
4593__bnxt_hwrm_reserve_vf_rings(struct bnxt *bp,
4594 struct hwrm_func_vf_cfg_input *req, int tx_rings,
4595 int rx_rings, int ring_grps, int cp_rings,
4596 int vnics)
4597{
4598 u32 enables = 0;
4599
4600 bnxt_hwrm_cmd_hdr_init(bp, req, HWRM_FUNC_VF_CFG, -1, -1);
4601 enables |= tx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
4602 enables |= rx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS : 0;
4603 enables |= cp_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
4604 FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
4605 enables |= ring_grps ? FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
4606 enables |= vnics ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0;
4607
4608 req->num_tx_rings = cpu_to_le16(tx_rings);
4609 req->num_rx_rings = cpu_to_le16(rx_rings);
4610 req->num_hw_ring_grps = cpu_to_le16(ring_grps);
4611 req->num_cmpl_rings = cpu_to_le16(cp_rings);
4612 req->num_stat_ctxs = req->num_cmpl_rings;
4613 req->num_vnics = cpu_to_le16(vnics);
4614
4615 req->enables = cpu_to_le32(enables);
4616}
4617
4618static int
4619bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
4620 int ring_grps, int cp_rings, int vnics)
4621{
4622 struct hwrm_func_cfg_input req = {0};
4623 int rc;
4624
4625 __bnxt_hwrm_reserve_pf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
4626 cp_rings, vnics);
4627 if (!req.enables)
4588 return 0; 4628 return 0;
4589 4629
4590 req.enables = cpu_to_le32(enables);
4591 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4630 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4592 if (rc) 4631 if (rc)
4593 return -ENOMEM; 4632 return -ENOMEM;
@@ -4604,7 +4643,6 @@ bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
4604 int ring_grps, int cp_rings, int vnics) 4643 int ring_grps, int cp_rings, int vnics)
4605{ 4644{
4606 struct hwrm_func_vf_cfg_input req = {0}; 4645 struct hwrm_func_vf_cfg_input req = {0};
4607 u32 enables = 0;
4608 int rc; 4646 int rc;
4609 4647
4610 if (!(bp->flags & BNXT_FLAG_NEW_RM)) { 4648 if (!(bp->flags & BNXT_FLAG_NEW_RM)) {
@@ -4612,22 +4650,8 @@ bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
4612 return 0; 4650 return 0;
4613 } 4651 }
4614 4652
4615 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_CFG, -1, -1); 4653 __bnxt_hwrm_reserve_vf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
4616 enables |= tx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0; 4654 cp_rings, vnics);
4617 enables |= rx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS : 0;
4618 enables |= cp_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
4619 FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
4620 enables |= ring_grps ? FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
4621 enables |= vnics ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0;
4622
4623 req.num_tx_rings = cpu_to_le16(tx_rings);
4624 req.num_rx_rings = cpu_to_le16(rx_rings);
4625 req.num_hw_ring_grps = cpu_to_le16(ring_grps);
4626 req.num_cmpl_rings = cpu_to_le16(cp_rings);
4627 req.num_stat_ctxs = req.num_cmpl_rings;
4628 req.num_vnics = cpu_to_le16(vnics);
4629
4630 req.enables = cpu_to_le32(enables);
4631 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4655 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4632 if (rc) 4656 if (rc)
4633 return -ENOMEM; 4657 return -ENOMEM;
@@ -4743,39 +4767,25 @@ static bool bnxt_need_reserve_rings(struct bnxt *bp)
4743} 4767}
4744 4768
4745static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings, 4769static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
4746 int ring_grps, int cp_rings) 4770 int ring_grps, int cp_rings, int vnics)
4747{ 4771{
4748 struct hwrm_func_vf_cfg_input req = {0}; 4772 struct hwrm_func_vf_cfg_input req = {0};
4749 u32 flags, enables; 4773 u32 flags;
4750 int rc; 4774 int rc;
4751 4775
4752 if (!(bp->flags & BNXT_FLAG_NEW_RM)) 4776 if (!(bp->flags & BNXT_FLAG_NEW_RM))
4753 return 0; 4777 return 0;
4754 4778
4755 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_CFG, -1, -1); 4779 __bnxt_hwrm_reserve_vf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
4780 cp_rings, vnics);
4756 flags = FUNC_VF_CFG_REQ_FLAGS_TX_ASSETS_TEST | 4781 flags = FUNC_VF_CFG_REQ_FLAGS_TX_ASSETS_TEST |
4757 FUNC_VF_CFG_REQ_FLAGS_RX_ASSETS_TEST | 4782 FUNC_VF_CFG_REQ_FLAGS_RX_ASSETS_TEST |
4758 FUNC_VF_CFG_REQ_FLAGS_CMPL_ASSETS_TEST | 4783 FUNC_VF_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
4759 FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST | 4784 FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST |
4760 FUNC_VF_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST | 4785 FUNC_VF_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
4761 FUNC_VF_CFG_REQ_FLAGS_VNIC_ASSETS_TEST; 4786 FUNC_VF_CFG_REQ_FLAGS_VNIC_ASSETS_TEST;
4762 enables = FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS |
4763 FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS |
4764 FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
4765 FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS |
4766 FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS |
4767 FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS;
4768 4787
4769 req.flags = cpu_to_le32(flags); 4788 req.flags = cpu_to_le32(flags);
4770 req.enables = cpu_to_le32(enables);
4771 req.num_tx_rings = cpu_to_le16(tx_rings);
4772 req.num_rx_rings = cpu_to_le16(rx_rings);
4773 req.num_cmpl_rings = cpu_to_le16(cp_rings);
4774 req.num_hw_ring_grps = cpu_to_le16(ring_grps);
4775 req.num_stat_ctxs = cpu_to_le16(cp_rings);
4776 req.num_vnics = cpu_to_le16(1);
4777 if (bp->flags & BNXT_FLAG_RFS)
4778 req.num_vnics = cpu_to_le16(rx_rings + 1);
4779 rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4789 rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4780 if (rc) 4790 if (rc)
4781 return -ENOMEM; 4791 return -ENOMEM;
@@ -4783,38 +4793,23 @@ static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
4783} 4793}
4784 4794
4785static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings, 4795static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
4786 int ring_grps, int cp_rings) 4796 int ring_grps, int cp_rings, int vnics)
4787{ 4797{
4788 struct hwrm_func_cfg_input req = {0}; 4798 struct hwrm_func_cfg_input req = {0};
4789 u32 flags, enables; 4799 u32 flags;
4790 int rc; 4800 int rc;
4791 4801
4792 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); 4802 __bnxt_hwrm_reserve_pf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
4793 req.fid = cpu_to_le16(0xffff); 4803 cp_rings, vnics);
4794 flags = FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST; 4804 flags = FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST;
4795 enables = FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS; 4805 if (bp->flags & BNXT_FLAG_NEW_RM)
4796 req.num_tx_rings = cpu_to_le16(tx_rings);
4797 if (bp->flags & BNXT_FLAG_NEW_RM) {
4798 flags |= FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST | 4806 flags |= FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST |
4799 FUNC_CFG_REQ_FLAGS_CMPL_ASSETS_TEST | 4807 FUNC_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
4800 FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST | 4808 FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST |
4801 FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST | 4809 FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
4802 FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST; 4810 FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST;
4803 enables |= FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS | 4811
4804 FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
4805 FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS |
4806 FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS |
4807 FUNC_CFG_REQ_ENABLES_NUM_VNICS;
4808 req.num_rx_rings = cpu_to_le16(rx_rings);
4809 req.num_cmpl_rings = cpu_to_le16(cp_rings);
4810 req.num_hw_ring_grps = cpu_to_le16(ring_grps);
4811 req.num_stat_ctxs = cpu_to_le16(cp_rings);
4812 req.num_vnics = cpu_to_le16(1);
4813 if (bp->flags & BNXT_FLAG_RFS)
4814 req.num_vnics = cpu_to_le16(rx_rings + 1);
4815 }
4816 req.flags = cpu_to_le32(flags); 4812 req.flags = cpu_to_le32(flags);
4817 req.enables = cpu_to_le32(enables);
4818 rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4813 rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4819 if (rc) 4814 if (rc)
4820 return -ENOMEM; 4815 return -ENOMEM;
@@ -4822,17 +4817,17 @@ static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
4822} 4817}
4823 4818
4824static int bnxt_hwrm_check_rings(struct bnxt *bp, int tx_rings, int rx_rings, 4819static int bnxt_hwrm_check_rings(struct bnxt *bp, int tx_rings, int rx_rings,
4825 int ring_grps, int cp_rings) 4820 int ring_grps, int cp_rings, int vnics)
4826{ 4821{
4827 if (bp->hwrm_spec_code < 0x10801) 4822 if (bp->hwrm_spec_code < 0x10801)
4828 return 0; 4823 return 0;
4829 4824
4830 if (BNXT_PF(bp)) 4825 if (BNXT_PF(bp))
4831 return bnxt_hwrm_check_pf_rings(bp, tx_rings, rx_rings, 4826 return bnxt_hwrm_check_pf_rings(bp, tx_rings, rx_rings,
4832 ring_grps, cp_rings); 4827 ring_grps, cp_rings, vnics);
4833 4828
4834 return bnxt_hwrm_check_vf_rings(bp, tx_rings, rx_rings, ring_grps, 4829 return bnxt_hwrm_check_vf_rings(bp, tx_rings, rx_rings, ring_grps,
4835 cp_rings); 4830 cp_rings, vnics);
4836} 4831}
4837 4832
4838static void bnxt_hwrm_set_coal_params(struct bnxt_coal *hw_coal, 4833static void bnxt_hwrm_set_coal_params(struct bnxt_coal *hw_coal,
@@ -5865,7 +5860,6 @@ static int bnxt_init_msix(struct bnxt *bp)
5865 if (rc) 5860 if (rc)
5866 goto msix_setup_exit; 5861 goto msix_setup_exit;
5867 5862
5868 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
5869 bp->cp_nr_rings = (min == 1) ? 5863 bp->cp_nr_rings = (min == 1) ?
5870 max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) : 5864 max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
5871 bp->tx_nr_rings + bp->rx_nr_rings; 5865 bp->tx_nr_rings + bp->rx_nr_rings;
@@ -5897,7 +5891,6 @@ static int bnxt_init_inta(struct bnxt *bp)
5897 bp->rx_nr_rings = 1; 5891 bp->rx_nr_rings = 1;
5898 bp->tx_nr_rings = 1; 5892 bp->tx_nr_rings = 1;
5899 bp->cp_nr_rings = 1; 5893 bp->cp_nr_rings = 1;
5900 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
5901 bp->flags |= BNXT_FLAG_SHARED_RINGS; 5894 bp->flags |= BNXT_FLAG_SHARED_RINGS;
5902 bp->irq_tbl[0].vector = bp->pdev->irq; 5895 bp->irq_tbl[0].vector = bp->pdev->irq;
5903 return 0; 5896 return 0;
@@ -7531,7 +7524,7 @@ int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
7531 int max_rx, max_tx, tx_sets = 1; 7524 int max_rx, max_tx, tx_sets = 1;
7532 int tx_rings_needed; 7525 int tx_rings_needed;
7533 int rx_rings = rx; 7526 int rx_rings = rx;
7534 int cp, rc; 7527 int cp, vnics, rc;
7535 7528
7536 if (tcs) 7529 if (tcs)
7537 tx_sets = tcs; 7530 tx_sets = tcs;
@@ -7547,10 +7540,15 @@ int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
7547 if (max_tx < tx_rings_needed) 7540 if (max_tx < tx_rings_needed)
7548 return -ENOMEM; 7541 return -ENOMEM;
7549 7542
7543 vnics = 1;
7544 if (bp->flags & BNXT_FLAG_RFS)
7545 vnics += rx_rings;
7546
7550 if (bp->flags & BNXT_FLAG_AGG_RINGS) 7547 if (bp->flags & BNXT_FLAG_AGG_RINGS)
7551 rx_rings <<= 1; 7548 rx_rings <<= 1;
7552 cp = sh ? max_t(int, tx_rings_needed, rx) : tx_rings_needed + rx; 7549 cp = sh ? max_t(int, tx_rings_needed, rx) : tx_rings_needed + rx;
7553 return bnxt_hwrm_check_rings(bp, tx_rings_needed, rx_rings, rx, cp); 7550 return bnxt_hwrm_check_rings(bp, tx_rings_needed, rx_rings, rx, cp,
7551 vnics);
7554} 7552}
7555 7553
7556static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev) 7554static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev)
@@ -8437,13 +8435,20 @@ int bnxt_restore_pf_fw_resources(struct bnxt *bp)
8437 return 0; 8435 return 0;
8438 8436
8439 bnxt_hwrm_func_qcaps(bp); 8437 bnxt_hwrm_func_qcaps(bp);
8440 __bnxt_close_nic(bp, true, false); 8438
8439 if (netif_running(bp->dev))
8440 __bnxt_close_nic(bp, true, false);
8441
8441 bnxt_clear_int_mode(bp); 8442 bnxt_clear_int_mode(bp);
8442 rc = bnxt_init_int_mode(bp); 8443 rc = bnxt_init_int_mode(bp);
8443 if (rc) 8444
8444 dev_close(bp->dev); 8445 if (netif_running(bp->dev)) {
8445 else 8446 if (rc)
8446 rc = bnxt_open_nic(bp, true, false); 8447 dev_close(bp->dev);
8448 else
8449 rc = bnxt_open_nic(bp, true, false);
8450 }
8451
8447 return rc; 8452 return rc;
8448} 8453}
8449 8454
@@ -8664,6 +8669,11 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8664 if (rc) 8669 if (rc)
8665 goto init_err_pci_clean; 8670 goto init_err_pci_clean;
8666 8671
8672 /* No TC has been set yet and rings may have been trimmed due to
8673 * limited MSIX, so we re-initialize the TX rings per TC.
8674 */
8675 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
8676
8667 bnxt_get_wol_settings(bp); 8677 bnxt_get_wol_settings(bp);
8668 if (bp->flags & BNXT_FLAG_WOL_CAP) 8678 if (bp->flags & BNXT_FLAG_WOL_CAP)
8669 device_set_wakeup_enable(&pdev->dev, bp->wol); 8679 device_set_wakeup_enable(&pdev->dev, bp->wol);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 1989c470172c..5e3d62189cab 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -189,6 +189,7 @@ struct rx_cmp_ext {
189 #define RX_CMP_FLAGS2_T_L4_CS_CALC (0x1 << 3) 189 #define RX_CMP_FLAGS2_T_L4_CS_CALC (0x1 << 3)
190 #define RX_CMP_FLAGS2_META_FORMAT_VLAN (0x1 << 4) 190 #define RX_CMP_FLAGS2_META_FORMAT_VLAN (0x1 << 4)
191 __le32 rx_cmp_meta_data; 191 __le32 rx_cmp_meta_data;
192 #define RX_CMP_FLAGS2_METADATA_TCI_MASK 0xffff
192 #define RX_CMP_FLAGS2_METADATA_VID_MASK 0xfff 193 #define RX_CMP_FLAGS2_METADATA_VID_MASK 0xfff
193 #define RX_CMP_FLAGS2_METADATA_TPID_MASK 0xffff0000 194 #define RX_CMP_FLAGS2_METADATA_TPID_MASK 0xffff0000
194 #define RX_CMP_FLAGS2_METADATA_TPID_SFT 16 195 #define RX_CMP_FLAGS2_METADATA_TPID_SFT 16
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
index fbe6e208e17b..65c2cee35766 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
@@ -349,6 +349,9 @@ static int bnxt_hwrm_cfa_flow_free(struct bnxt *bp, __le16 flow_handle)
349 if (rc) 349 if (rc)
350 netdev_info(bp->dev, "Error: %s: flow_handle=0x%x rc=%d", 350 netdev_info(bp->dev, "Error: %s: flow_handle=0x%x rc=%d",
351 __func__, flow_handle, rc); 351 __func__, flow_handle, rc);
352
353 if (rc)
354 rc = -EIO;
352 return rc; 355 return rc;
353} 356}
354 357
@@ -484,13 +487,15 @@ static int bnxt_hwrm_cfa_flow_alloc(struct bnxt *bp, struct bnxt_tc_flow *flow,
484 req.action_flags = cpu_to_le16(action_flags); 487 req.action_flags = cpu_to_le16(action_flags);
485 488
486 mutex_lock(&bp->hwrm_cmd_lock); 489 mutex_lock(&bp->hwrm_cmd_lock);
487
488 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 490 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
489 if (!rc) 491 if (!rc)
490 *flow_handle = resp->flow_handle; 492 *flow_handle = resp->flow_handle;
491
492 mutex_unlock(&bp->hwrm_cmd_lock); 493 mutex_unlock(&bp->hwrm_cmd_lock);
493 494
495 if (rc == HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR)
496 rc = -ENOSPC;
497 else if (rc)
498 rc = -EIO;
494 return rc; 499 return rc;
495} 500}
496 501
@@ -561,6 +566,8 @@ static int hwrm_cfa_decap_filter_alloc(struct bnxt *bp,
561 netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc); 566 netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc);
562 mutex_unlock(&bp->hwrm_cmd_lock); 567 mutex_unlock(&bp->hwrm_cmd_lock);
563 568
569 if (rc)
570 rc = -EIO;
564 return rc; 571 return rc;
565} 572}
566 573
@@ -576,6 +583,9 @@ static int hwrm_cfa_decap_filter_free(struct bnxt *bp,
576 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 583 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
577 if (rc) 584 if (rc)
578 netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc); 585 netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc);
586
587 if (rc)
588 rc = -EIO;
579 return rc; 589 return rc;
580} 590}
581 591
@@ -624,6 +634,8 @@ static int hwrm_cfa_encap_record_alloc(struct bnxt *bp,
624 netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc); 634 netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc);
625 mutex_unlock(&bp->hwrm_cmd_lock); 635 mutex_unlock(&bp->hwrm_cmd_lock);
626 636
637 if (rc)
638 rc = -EIO;
627 return rc; 639 return rc;
628} 640}
629 641
@@ -639,6 +651,9 @@ static int hwrm_cfa_encap_record_free(struct bnxt *bp,
639 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 651 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
640 if (rc) 652 if (rc)
641 netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc); 653 netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc);
654
655 if (rc)
656 rc = -EIO;
642 return rc; 657 return rc;
643} 658}
644 659
@@ -1269,11 +1284,8 @@ static int bnxt_tc_del_flow(struct bnxt *bp,
1269 flow_node = rhashtable_lookup_fast(&tc_info->flow_table, 1284 flow_node = rhashtable_lookup_fast(&tc_info->flow_table,
1270 &tc_flow_cmd->cookie, 1285 &tc_flow_cmd->cookie,
1271 tc_info->flow_ht_params); 1286 tc_info->flow_ht_params);
1272 if (!flow_node) { 1287 if (!flow_node)
1273 netdev_info(bp->dev, "ERROR: no flow_node for cookie %lx",
1274 tc_flow_cmd->cookie);
1275 return -EINVAL; 1288 return -EINVAL;
1276 }
1277 1289
1278 return __bnxt_tc_del_flow(bp, flow_node); 1290 return __bnxt_tc_del_flow(bp, flow_node);
1279} 1291}
@@ -1290,11 +1302,8 @@ static int bnxt_tc_get_flow_stats(struct bnxt *bp,
1290 flow_node = rhashtable_lookup_fast(&tc_info->flow_table, 1302 flow_node = rhashtable_lookup_fast(&tc_info->flow_table,
1291 &tc_flow_cmd->cookie, 1303 &tc_flow_cmd->cookie,
1292 tc_info->flow_ht_params); 1304 tc_info->flow_ht_params);
1293 if (!flow_node) { 1305 if (!flow_node)
1294 netdev_info(bp->dev, "Error: no flow_node for cookie %lx",
1295 tc_flow_cmd->cookie);
1296 return -1; 1306 return -1;
1297 }
1298 1307
1299 flow = &flow_node->flow; 1308 flow = &flow_node->flow;
1300 curr_stats = &flow->stats; 1309 curr_stats = &flow->stats;
@@ -1344,8 +1353,10 @@ bnxt_hwrm_cfa_flow_stats_get(struct bnxt *bp, int num_flows,
1344 } else { 1353 } else {
1345 netdev_info(bp->dev, "error rc=%d", rc); 1354 netdev_info(bp->dev, "error rc=%d", rc);
1346 } 1355 }
1347
1348 mutex_unlock(&bp->hwrm_cmd_lock); 1356 mutex_unlock(&bp->hwrm_cmd_lock);
1357
1358 if (rc)
1359 rc = -EIO;
1349 return rc; 1360 return rc;
1350} 1361}
1351 1362