aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/intel/ice/ice_lib.c
diff options
context:
space:
mode:
authorUsha Ketineni <usha.k.ketineni@intel.com>2018-10-26 14:44:35 -0400
committerJeff Kirsher <jeffrey.t.kirsher@intel.com>2018-11-13 12:09:26 -0500
commitc5a2a4a38856faed6fa6654746c838231289e8d6 (patch)
treeb2738c710b3c7ad85b4073fdfdb6e7d09de4ad32 /drivers/net/ethernet/intel/ice/ice_lib.c
parent99fc1057b4d4e2a95c24b3b1ea4d6140eb2712a2 (diff)
ice: Fix to make VLAN priority tagged traffic to appear on all TCs
This patch includes below changes to resolve the issue of ETS bandwidth shaping to work. 1. Allocation of Tx queues is accounted for based on the enabled TC's in ice_vsi_setup_q_map() and enabled the Tx queues on those TC's via ice_vsi_cfg_txqs() 2. Get the mapped netdev TC # for the user priority and set the priority to TC mapping for the VSI. Signed-off-by: Usha Ketineni <usha.k.ketineni@intel.com> Signed-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com> Tested-by: Andrew Bowers <andrewx.bowers@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Diffstat (limited to 'drivers/net/ethernet/intel/ice/ice_lib.c')
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c121
1 files changed, 73 insertions, 48 deletions
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index f6e21363c8d6..597005f39919 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -774,11 +774,13 @@ static void ice_set_dflt_vsi_ctx(struct ice_vsi_ctx *ctxt)
774 */ 774 */
775static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt) 775static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
776{ 776{
777 u16 offset = 0, qmap = 0, numq_tc; 777 u16 offset = 0, qmap = 0, tx_count = 0;
778 u16 pow = 0, max_rss = 0, qcount;
779 u16 qcount_tx = vsi->alloc_txq; 778 u16 qcount_tx = vsi->alloc_txq;
780 u16 qcount_rx = vsi->alloc_rxq; 779 u16 qcount_rx = vsi->alloc_rxq;
780 u16 tx_numq_tc, rx_numq_tc;
781 u16 pow = 0, max_rss = 0;
781 bool ena_tc0 = false; 782 bool ena_tc0 = false;
783 u8 netdev_tc = 0;
782 int i; 784 int i;
783 785
784 /* at least TC0 should be enabled by default */ 786 /* at least TC0 should be enabled by default */
@@ -794,7 +796,12 @@ static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
794 vsi->tc_cfg.ena_tc |= 1; 796 vsi->tc_cfg.ena_tc |= 1;
795 } 797 }
796 798
797 numq_tc = qcount_rx / vsi->tc_cfg.numtc; 799 rx_numq_tc = qcount_rx / vsi->tc_cfg.numtc;
800 if (!rx_numq_tc)
801 rx_numq_tc = 1;
802 tx_numq_tc = qcount_tx / vsi->tc_cfg.numtc;
803 if (!tx_numq_tc)
804 tx_numq_tc = 1;
798 805
799 /* TC mapping is a function of the number of Rx queues assigned to the 806 /* TC mapping is a function of the number of Rx queues assigned to the
800 * VSI for each traffic class and the offset of these queues. 807 * VSI for each traffic class and the offset of these queues.
@@ -808,7 +815,8 @@ static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
808 * Setup number and offset of Rx queues for all TCs for the VSI 815 * Setup number and offset of Rx queues for all TCs for the VSI
809 */ 816 */
810 817
811 qcount = numq_tc; 818 qcount_rx = rx_numq_tc;
819
812 /* qcount will change if RSS is enabled */ 820 /* qcount will change if RSS is enabled */
813 if (test_bit(ICE_FLAG_RSS_ENA, vsi->back->flags)) { 821 if (test_bit(ICE_FLAG_RSS_ENA, vsi->back->flags)) {
814 if (vsi->type == ICE_VSI_PF || vsi->type == ICE_VSI_VF) { 822 if (vsi->type == ICE_VSI_PF || vsi->type == ICE_VSI_VF) {
@@ -816,37 +824,41 @@ static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
816 max_rss = ICE_MAX_LG_RSS_QS; 824 max_rss = ICE_MAX_LG_RSS_QS;
817 else 825 else
818 max_rss = ICE_MAX_SMALL_RSS_QS; 826 max_rss = ICE_MAX_SMALL_RSS_QS;
819 qcount = min_t(int, numq_tc, max_rss); 827 qcount_rx = min_t(int, rx_numq_tc, max_rss);
820 qcount = min_t(int, qcount, vsi->rss_size); 828 qcount_rx = min_t(int, qcount_rx, vsi->rss_size);
821 } 829 }
822 } 830 }
823 831
824 /* find the (rounded up) power-of-2 of qcount */ 832 /* find the (rounded up) power-of-2 of qcount */
825 pow = order_base_2(qcount); 833 pow = order_base_2(qcount_rx);
826 834
827 for (i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++) { 835 for (i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++) {
828 if (!(vsi->tc_cfg.ena_tc & BIT(i))) { 836 if (!(vsi->tc_cfg.ena_tc & BIT(i))) {
829 /* TC is not enabled */ 837 /* TC is not enabled */
830 vsi->tc_cfg.tc_info[i].qoffset = 0; 838 vsi->tc_cfg.tc_info[i].qoffset = 0;
831 vsi->tc_cfg.tc_info[i].qcount = 1; 839 vsi->tc_cfg.tc_info[i].qcount_rx = 1;
840 vsi->tc_cfg.tc_info[i].qcount_tx = 1;
841 vsi->tc_cfg.tc_info[i].netdev_tc = 0;
832 ctxt->info.tc_mapping[i] = 0; 842 ctxt->info.tc_mapping[i] = 0;
833 continue; 843 continue;
834 } 844 }
835 845
836 /* TC is enabled */ 846 /* TC is enabled */
837 vsi->tc_cfg.tc_info[i].qoffset = offset; 847 vsi->tc_cfg.tc_info[i].qoffset = offset;
838 vsi->tc_cfg.tc_info[i].qcount = qcount; 848 vsi->tc_cfg.tc_info[i].qcount_rx = qcount_rx;
849 vsi->tc_cfg.tc_info[i].qcount_tx = tx_numq_tc;
850 vsi->tc_cfg.tc_info[i].netdev_tc = netdev_tc++;
839 851
840 qmap = ((offset << ICE_AQ_VSI_TC_Q_OFFSET_S) & 852 qmap = ((offset << ICE_AQ_VSI_TC_Q_OFFSET_S) &
841 ICE_AQ_VSI_TC_Q_OFFSET_M) | 853 ICE_AQ_VSI_TC_Q_OFFSET_M) |
842 ((pow << ICE_AQ_VSI_TC_Q_NUM_S) & 854 ((pow << ICE_AQ_VSI_TC_Q_NUM_S) &
843 ICE_AQ_VSI_TC_Q_NUM_M); 855 ICE_AQ_VSI_TC_Q_NUM_M);
844 offset += qcount; 856 offset += qcount_rx;
857 tx_count += tx_numq_tc;
845 ctxt->info.tc_mapping[i] = cpu_to_le16(qmap); 858 ctxt->info.tc_mapping[i] = cpu_to_le16(qmap);
846 } 859 }
847
848 vsi->num_txq = qcount_tx;
849 vsi->num_rxq = offset; 860 vsi->num_rxq = offset;
861 vsi->num_txq = tx_count;
850 862
851 if (vsi->type == ICE_VSI_VF && vsi->num_txq != vsi->num_rxq) { 863 if (vsi->type == ICE_VSI_VF && vsi->num_txq != vsi->num_rxq) {
852 dev_dbg(&vsi->back->pdev->dev, "VF VSI should have same number of Tx and Rx queues. Hence making them equal\n"); 864 dev_dbg(&vsi->back->pdev->dev, "VF VSI should have same number of Tx and Rx queues. Hence making them equal\n");
@@ -1611,10 +1623,10 @@ int ice_vsi_cfg_txqs(struct ice_vsi *vsi)
1611 struct ice_aqc_add_tx_qgrp *qg_buf; 1623 struct ice_aqc_add_tx_qgrp *qg_buf;
1612 struct ice_aqc_add_txqs_perq *txq; 1624 struct ice_aqc_add_txqs_perq *txq;
1613 struct ice_pf *pf = vsi->back; 1625 struct ice_pf *pf = vsi->back;
1626 u8 num_q_grps, q_idx = 0;
1614 enum ice_status status; 1627 enum ice_status status;
1615 u16 buf_len, i, pf_q; 1628 u16 buf_len, i, pf_q;
1616 int err = 0, tc = 0; 1629 int err = 0, tc = 0;
1617 u8 num_q_grps;
1618 1630
1619 buf_len = sizeof(struct ice_aqc_add_tx_qgrp); 1631 buf_len = sizeof(struct ice_aqc_add_tx_qgrp);
1620 qg_buf = devm_kzalloc(&pf->pdev->dev, buf_len, GFP_KERNEL); 1632 qg_buf = devm_kzalloc(&pf->pdev->dev, buf_len, GFP_KERNEL);
@@ -1628,38 +1640,49 @@ int ice_vsi_cfg_txqs(struct ice_vsi *vsi)
1628 qg_buf->num_txqs = 1; 1640 qg_buf->num_txqs = 1;
1629 num_q_grps = 1; 1641 num_q_grps = 1;
1630 1642
1631 /* set up and configure the Tx queues */ 1643 /* set up and configure the Tx queues for each enabled TC */
1632 ice_for_each_txq(vsi, i) { 1644 for (tc = 0; tc < ICE_MAX_TRAFFIC_CLASS; tc++) {
1633 struct ice_tlan_ctx tlan_ctx = { 0 }; 1645 if (!(vsi->tc_cfg.ena_tc & BIT(tc)))
1646 break;
1634 1647
1635 pf_q = vsi->txq_map[i]; 1648 for (i = 0; i < vsi->tc_cfg.tc_info[tc].qcount_tx; i++) {
1636 ice_setup_tx_ctx(vsi->tx_rings[i], &tlan_ctx, pf_q); 1649 struct ice_tlan_ctx tlan_ctx = { 0 };
1637 /* copy context contents into the qg_buf */ 1650
1638 qg_buf->txqs[0].txq_id = cpu_to_le16(pf_q); 1651 pf_q = vsi->txq_map[q_idx];
1639 ice_set_ctx((u8 *)&tlan_ctx, qg_buf->txqs[0].txq_ctx, 1652 ice_setup_tx_ctx(vsi->tx_rings[q_idx], &tlan_ctx,
1640 ice_tlan_ctx_info); 1653 pf_q);
1654 /* copy context contents into the qg_buf */
1655 qg_buf->txqs[0].txq_id = cpu_to_le16(pf_q);
1656 ice_set_ctx((u8 *)&tlan_ctx, qg_buf->txqs[0].txq_ctx,
1657 ice_tlan_ctx_info);
1658
1659 /* init queue specific tail reg. It is referred as
1660 * transmit comm scheduler queue doorbell.
1661 */
1662 vsi->tx_rings[q_idx]->tail =
1663 pf->hw.hw_addr + QTX_COMM_DBELL(pf_q);
1664 status = ice_ena_vsi_txq(vsi->port_info, vsi->idx, tc,
1665 num_q_grps, qg_buf, buf_len,
1666 NULL);
1667 if (status) {
1668 dev_err(&vsi->back->pdev->dev,
1669 "Failed to set LAN Tx queue context, error: %d\n",
1670 status);
1671 err = -ENODEV;
1672 goto err_cfg_txqs;
1673 }
1641 1674
1642 /* init queue specific tail reg. It is referred as transmit 1675 /* Add Tx Queue TEID into the VSI Tx ring from the
1643 * comm scheduler queue doorbell. 1676 * response. This will complete configuring and
1644 */ 1677 * enabling the queue.
1645 vsi->tx_rings[i]->tail = pf->hw.hw_addr + QTX_COMM_DBELL(pf_q); 1678 */
1646 status = ice_ena_vsi_txq(vsi->port_info, vsi->idx, tc, 1679 txq = &qg_buf->txqs[0];
1647 num_q_grps, qg_buf, buf_len, NULL); 1680 if (pf_q == le16_to_cpu(txq->txq_id))
1648 if (status) { 1681 vsi->tx_rings[q_idx]->txq_teid =
1649 dev_err(&vsi->back->pdev->dev, 1682 le32_to_cpu(txq->q_teid);
1650 "Failed to set LAN Tx queue context, error: %d\n",
1651 status);
1652 err = -ENODEV;
1653 goto err_cfg_txqs;
1654 }
1655 1683
1656 /* Add Tx Queue TEID into the VSI Tx ring from the response 1684 q_idx++;
1657 * This will complete configuring and enabling the queue. 1685 }
1658 */
1659 txq = &qg_buf->txqs[0];
1660 if (pf_q == le16_to_cpu(txq->txq_id))
1661 vsi->tx_rings[i]->txq_teid =
1662 le32_to_cpu(txq->q_teid);
1663 } 1686 }
1664err_cfg_txqs: 1687err_cfg_txqs:
1665 devm_kfree(&pf->pdev->dev, qg_buf); 1688 devm_kfree(&pf->pdev->dev, qg_buf);
@@ -2057,6 +2080,9 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
2057 /* set RSS capabilities */ 2080 /* set RSS capabilities */
2058 ice_vsi_set_rss_params(vsi); 2081 ice_vsi_set_rss_params(vsi);
2059 2082
2083 /* set tc configuration */
2084 ice_vsi_set_tc_cfg(vsi);
2085
2060 /* create the VSI */ 2086 /* create the VSI */
2061 ret = ice_vsi_init(vsi); 2087 ret = ice_vsi_init(vsi);
2062 if (ret) 2088 if (ret)
@@ -2120,11 +2146,9 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
2120 goto unroll_vsi_init; 2146 goto unroll_vsi_init;
2121 } 2147 }
2122 2148
2123 ice_vsi_set_tc_cfg(vsi);
2124
2125 /* configure VSI nodes based on number of queues and TC's */ 2149 /* configure VSI nodes based on number of queues and TC's */
2126 for (i = 0; i < vsi->tc_cfg.numtc; i++) 2150 for (i = 0; i < vsi->tc_cfg.numtc; i++)
2127 max_txqs[i] = vsi->num_txq; 2151 max_txqs[i] = pf->num_lan_tx;
2128 2152
2129 ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc, 2153 ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
2130 max_txqs); 2154 max_txqs);
@@ -2520,11 +2544,13 @@ int ice_vsi_release(struct ice_vsi *vsi)
2520int ice_vsi_rebuild(struct ice_vsi *vsi) 2544int ice_vsi_rebuild(struct ice_vsi *vsi)
2521{ 2545{
2522 u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 }; 2546 u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
2547 struct ice_pf *pf;
2523 int ret, i; 2548 int ret, i;
2524 2549
2525 if (!vsi) 2550 if (!vsi)
2526 return -EINVAL; 2551 return -EINVAL;
2527 2552
2553 pf = vsi->back;
2528 ice_vsi_free_q_vectors(vsi); 2554 ice_vsi_free_q_vectors(vsi);
2529 ice_free_res(vsi->back->sw_irq_tracker, vsi->sw_base_vector, vsi->idx); 2555 ice_free_res(vsi->back->sw_irq_tracker, vsi->sw_base_vector, vsi->idx);
2530 ice_free_res(vsi->back->hw_irq_tracker, vsi->hw_base_vector, vsi->idx); 2556 ice_free_res(vsi->back->hw_irq_tracker, vsi->hw_base_vector, vsi->idx);
@@ -2534,6 +2560,7 @@ int ice_vsi_rebuild(struct ice_vsi *vsi)
2534 ice_vsi_free_arrays(vsi, false); 2560 ice_vsi_free_arrays(vsi, false);
2535 ice_dev_onetime_setup(&vsi->back->hw); 2561 ice_dev_onetime_setup(&vsi->back->hw);
2536 ice_vsi_set_num_qs(vsi); 2562 ice_vsi_set_num_qs(vsi);
2563 ice_vsi_set_tc_cfg(vsi);
2537 2564
2538 /* Initialize VSI struct elements and create VSI in FW */ 2565 /* Initialize VSI struct elements and create VSI in FW */
2539 ret = ice_vsi_init(vsi); 2566 ret = ice_vsi_init(vsi);
@@ -2580,11 +2607,9 @@ int ice_vsi_rebuild(struct ice_vsi *vsi)
2580 break; 2607 break;
2581 } 2608 }
2582 2609
2583 ice_vsi_set_tc_cfg(vsi);
2584
2585 /* configure VSI nodes based on number of queues and TC's */ 2610 /* configure VSI nodes based on number of queues and TC's */
2586 for (i = 0; i < vsi->tc_cfg.numtc; i++) 2611 for (i = 0; i < vsi->tc_cfg.numtc; i++)
2587 max_txqs[i] = vsi->num_txq; 2612 max_txqs[i] = pf->num_lan_tx;
2588 2613
2589 ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc, 2614 ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
2590 max_txqs); 2615 max_txqs);