aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/intel/ice
diff options
context:
space:
mode:
authorBrett Creeley <brett.creeley@intel.com>2018-09-19 20:43:05 -0400
committerJeff Kirsher <jeffrey.t.kirsher@intel.com>2018-10-03 10:42:30 -0400
commitd2b464a7ff6cda3e1d4eb070ed6558f0cd152d1c (patch)
treea979bdbfafa97c79746f52c1dcae6243beefa2bd /drivers/net/ethernet/intel/ice
parent072f0c3db9daf7b57dfe0a5e2a5ccf42f71982f4 (diff)
ice: Add more flexibility on how we assign an ITR index
This issue came about when looking at the VF function ice_vc_cfg_irq_map_msg. Currently we are assigning the itr_setting value to the itr_idx received from the AVF driver, which is not correct and is not used for the VF flow anyway. Currently the only way we set the ITR index for both the PF and VF driver is by hard coding ICE_TX_ITR or ICE_RX_ITR for the ITR index on each q_vector. To fix this, add the member itr_idx in struct ice_ring_container. This can then be used to dynamically program the correct ITR index. This change also affected the PF driver so make the necessary changes there as well. Also, removed the itr_setting member in struct ice_ring because it is not being used meaningfully and is going to be removed in a future patch that includes dynamic ITR. On another note, this will be useful moving forward if we decide to split Rx/Tx rings on different q_vectors instead of sharing them as queue pairs. Signed-off-by: Brett Creeley <brett.creeley@intel.com> Signed-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com> Tested-by: Andrew Bowers <andrewx.bowers@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Diffstat (limited to 'drivers/net/ethernet/intel/ice')
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c73
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.h13
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c20
3 files changed, 59 insertions, 47 deletions
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index 8139302cd92b..49f1940772ed 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -1204,7 +1204,6 @@ static int ice_vsi_alloc_rings(struct ice_vsi *vsi)
1204 ring->vsi = vsi; 1204 ring->vsi = vsi;
1205 ring->dev = &pf->pdev->dev; 1205 ring->dev = &pf->pdev->dev;
1206 ring->count = vsi->num_desc; 1206 ring->count = vsi->num_desc;
1207 ring->itr_setting = ICE_DFLT_TX_ITR;
1208 vsi->tx_rings[i] = ring; 1207 vsi->tx_rings[i] = ring;
1209 } 1208 }
1210 1209
@@ -1224,7 +1223,6 @@ static int ice_vsi_alloc_rings(struct ice_vsi *vsi)
1224 ring->netdev = vsi->netdev; 1223 ring->netdev = vsi->netdev;
1225 ring->dev = &pf->pdev->dev; 1224 ring->dev = &pf->pdev->dev;
1226 ring->count = vsi->num_desc; 1225 ring->count = vsi->num_desc;
1227 ring->itr_setting = ICE_DFLT_RX_ITR;
1228 vsi->rx_rings[i] = ring; 1226 vsi->rx_rings[i] = ring;
1229 } 1227 }
1230 1228
@@ -1261,6 +1259,7 @@ static void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi)
1261 tx_rings_per_v = DIV_ROUND_UP(tx_rings_rem, q_vectors - v_id); 1259 tx_rings_per_v = DIV_ROUND_UP(tx_rings_rem, q_vectors - v_id);
1262 q_vector->num_ring_tx = tx_rings_per_v; 1260 q_vector->num_ring_tx = tx_rings_per_v;
1263 q_vector->tx.ring = NULL; 1261 q_vector->tx.ring = NULL;
1262 q_vector->tx.itr_idx = ICE_TX_ITR;
1264 q_base = vsi->num_txq - tx_rings_rem; 1263 q_base = vsi->num_txq - tx_rings_rem;
1265 1264
1266 for (q_id = q_base; q_id < (q_base + tx_rings_per_v); q_id++) { 1265 for (q_id = q_base; q_id < (q_base + tx_rings_per_v); q_id++) {
@@ -1276,6 +1275,7 @@ static void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi)
1276 rx_rings_per_v = DIV_ROUND_UP(rx_rings_rem, q_vectors - v_id); 1275 rx_rings_per_v = DIV_ROUND_UP(rx_rings_rem, q_vectors - v_id);
1277 q_vector->num_ring_rx = rx_rings_per_v; 1276 q_vector->num_ring_rx = rx_rings_per_v;
1278 q_vector->rx.ring = NULL; 1277 q_vector->rx.ring = NULL;
1278 q_vector->rx.itr_idx = ICE_RX_ITR;
1279 q_base = vsi->num_rxq - rx_rings_rem; 1279 q_base = vsi->num_rxq - rx_rings_rem;
1280 1280
1281 for (q_id = q_base; q_id < (q_base + rx_rings_per_v); q_id++) { 1281 for (q_id = q_base; q_id < (q_base + rx_rings_per_v); q_id++) {
@@ -1684,6 +1684,37 @@ static u32 ice_intrl_usec_to_reg(u8 intrl, u8 gran)
1684} 1684}
1685 1685
1686/** 1686/**
1687 * ice_cfg_itr - configure the initial interrupt throttle values
1688 * @hw: pointer to the HW structure
1689 * @q_vector: interrupt vector that's being configured
1690 * @vector: HW vector index to apply the interrupt throttling to
1691 *
1692 * Configure interrupt throttling values for the ring containers that are
1693 * associated with the interrupt vector passed in.
1694 */
1695static void
1696ice_cfg_itr(struct ice_hw *hw, struct ice_q_vector *q_vector, u16 vector)
1697{
1698 u8 itr_gran = hw->itr_gran;
1699
1700 if (q_vector->num_ring_rx) {
1701 struct ice_ring_container *rc = &q_vector->rx;
1702
1703 rc->itr = ITR_TO_REG(ICE_DFLT_RX_ITR, itr_gran);
1704 rc->latency_range = ICE_LOW_LATENCY;
1705 wr32(hw, GLINT_ITR(rc->itr_idx, vector), rc->itr);
1706 }
1707
1708 if (q_vector->num_ring_tx) {
1709 struct ice_ring_container *rc = &q_vector->tx;
1710
1711 rc->itr = ITR_TO_REG(ICE_DFLT_TX_ITR, itr_gran);
1712 rc->latency_range = ICE_LOW_LATENCY;
1713 wr32(hw, GLINT_ITR(rc->itr_idx, vector), rc->itr);
1714 }
1715}
1716
1717/**
1687 * ice_vsi_cfg_msix - MSIX mode Interrupt Config in the HW 1718 * ice_vsi_cfg_msix - MSIX mode Interrupt Config in the HW
1688 * @vsi: the VSI being configured 1719 * @vsi: the VSI being configured
1689 */ 1720 */
@@ -1693,31 +1724,13 @@ void ice_vsi_cfg_msix(struct ice_vsi *vsi)
1693 u16 vector = vsi->hw_base_vector; 1724 u16 vector = vsi->hw_base_vector;
1694 struct ice_hw *hw = &pf->hw; 1725 struct ice_hw *hw = &pf->hw;
1695 u32 txq = 0, rxq = 0; 1726 u32 txq = 0, rxq = 0;
1696 int i, q, itr; 1727 int i, q;
1697 u8 itr_gran;
1698 1728
1699 for (i = 0; i < vsi->num_q_vectors; i++, vector++) { 1729 for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
1700 struct ice_q_vector *q_vector = vsi->q_vectors[i]; 1730 struct ice_q_vector *q_vector = vsi->q_vectors[i];
1701 1731
1702 itr_gran = hw->itr_gran; 1732 ice_cfg_itr(hw, q_vector, vector);
1703 1733
1704 q_vector->intrl = ICE_DFLT_INTRL;
1705
1706 if (q_vector->num_ring_rx) {
1707 q_vector->rx.itr =
1708 ITR_TO_REG(vsi->rx_rings[rxq]->itr_setting,
1709 itr_gran);
1710 q_vector->rx.latency_range = ICE_LOW_LATENCY;
1711 }
1712
1713 if (q_vector->num_ring_tx) {
1714 q_vector->tx.itr =
1715 ITR_TO_REG(vsi->tx_rings[txq]->itr_setting,
1716 itr_gran);
1717 q_vector->tx.latency_range = ICE_LOW_LATENCY;
1718 }
1719 wr32(hw, GLINT_ITR(ICE_RX_ITR, vector), q_vector->rx.itr);
1720 wr32(hw, GLINT_ITR(ICE_TX_ITR, vector), q_vector->tx.itr);
1721 wr32(hw, GLINT_RATE(vector), 1734 wr32(hw, GLINT_RATE(vector),
1722 ice_intrl_usec_to_reg(q_vector->intrl, hw->intrl_gran)); 1735 ice_intrl_usec_to_reg(q_vector->intrl, hw->intrl_gran));
1723 1736
@@ -1733,32 +1746,32 @@ void ice_vsi_cfg_msix(struct ice_vsi *vsi)
1733 * tracked for this PF. 1746 * tracked for this PF.
1734 */ 1747 */
1735 for (q = 0; q < q_vector->num_ring_tx; q++) { 1748 for (q = 0; q < q_vector->num_ring_tx; q++) {
1749 int itr_idx = q_vector->tx.itr_idx;
1736 u32 val; 1750 u32 val;
1737 1751
1738 itr = ICE_ITR_NONE;
1739 if (vsi->type == ICE_VSI_VF) 1752 if (vsi->type == ICE_VSI_VF)
1740 val = QINT_TQCTL_CAUSE_ENA_M | 1753 val = QINT_TQCTL_CAUSE_ENA_M |
1741 (itr << QINT_TQCTL_ITR_INDX_S) | 1754 (itr_idx << QINT_TQCTL_ITR_INDX_S) |
1742 ((i + 1) << QINT_TQCTL_MSIX_INDX_S); 1755 ((i + 1) << QINT_TQCTL_MSIX_INDX_S);
1743 else 1756 else
1744 val = QINT_TQCTL_CAUSE_ENA_M | 1757 val = QINT_TQCTL_CAUSE_ENA_M |
1745 (itr << QINT_TQCTL_ITR_INDX_S) | 1758 (itr_idx << QINT_TQCTL_ITR_INDX_S) |
1746 (vector << QINT_TQCTL_MSIX_INDX_S); 1759 (vector << QINT_TQCTL_MSIX_INDX_S);
1747 wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), val); 1760 wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), val);
1748 txq++; 1761 txq++;
1749 } 1762 }
1750 1763
1751 for (q = 0; q < q_vector->num_ring_rx; q++) { 1764 for (q = 0; q < q_vector->num_ring_rx; q++) {
1765 int itr_idx = q_vector->rx.itr_idx;
1752 u32 val; 1766 u32 val;
1753 1767
1754 itr = ICE_ITR_NONE;
1755 if (vsi->type == ICE_VSI_VF) 1768 if (vsi->type == ICE_VSI_VF)
1756 val = QINT_RQCTL_CAUSE_ENA_M | 1769 val = QINT_RQCTL_CAUSE_ENA_M |
1757 (itr << QINT_RQCTL_ITR_INDX_S) | 1770 (itr_idx << QINT_RQCTL_ITR_INDX_S) |
1758 ((i + 1) << QINT_RQCTL_MSIX_INDX_S); 1771 ((i + 1) << QINT_RQCTL_MSIX_INDX_S);
1759 else 1772 else
1760 val = QINT_RQCTL_CAUSE_ENA_M | 1773 val = QINT_RQCTL_CAUSE_ENA_M |
1761 (itr << QINT_RQCTL_ITR_INDX_S) | 1774 (itr_idx << QINT_RQCTL_ITR_INDX_S) |
1762 (vector << QINT_RQCTL_MSIX_INDX_S); 1775 (vector << QINT_RQCTL_MSIX_INDX_S);
1763 wr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), val); 1776 wr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), val);
1764 rxq++; 1777 rxq++;
@@ -2157,8 +2170,8 @@ static void ice_vsi_release_msix(struct ice_vsi *vsi)
2157 for (i = 0; i < vsi->num_q_vectors; i++, vector++) { 2170 for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
2158 struct ice_q_vector *q_vector = vsi->q_vectors[i]; 2171 struct ice_q_vector *q_vector = vsi->q_vectors[i];
2159 2172
2160 wr32(hw, GLINT_ITR(ICE_RX_ITR, vector), 0); 2173 wr32(hw, GLINT_ITR(ICE_IDX_ITR0, vector), 0);
2161 wr32(hw, GLINT_ITR(ICE_TX_ITR, vector), 0); 2174 wr32(hw, GLINT_ITR(ICE_IDX_ITR1, vector), 0);
2162 for (q = 0; q < q_vector->num_ring_tx; q++) { 2175 for (q = 0; q < q_vector->num_ring_tx; q++) {
2163 wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), 0); 2176 wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), 0);
2164 txq++; 2177 txq++;
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h
index a9b92974e041..1d0f58bd389b 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.h
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.h
@@ -105,8 +105,9 @@ enum ice_rx_dtype {
105#define ICE_TX_ITR ICE_IDX_ITR1 105#define ICE_TX_ITR ICE_IDX_ITR1
106#define ICE_ITR_DYNAMIC 0x8000 /* use top bit as a flag */ 106#define ICE_ITR_DYNAMIC 0x8000 /* use top bit as a flag */
107#define ICE_ITR_8K 125 107#define ICE_ITR_8K 125
108#define ICE_DFLT_TX_ITR ICE_ITR_8K 108#define ICE_ITR_20K 50
109#define ICE_DFLT_RX_ITR ICE_ITR_8K 109#define ICE_DFLT_TX_ITR ICE_ITR_20K
110#define ICE_DFLT_RX_ITR ICE_ITR_20K
110/* apply ITR granularity translation to program the register. itr_gran is either 111/* apply ITR granularity translation to program the register. itr_gran is either
111 * 2 or 4 usecs so we need to divide by 2 first then shift by that value 112 * 2 or 4 usecs so we need to divide by 2 first then shift by that value
112 */ 113 */
@@ -135,13 +136,6 @@ struct ice_ring {
135 u16 q_index; /* Queue number of ring */ 136 u16 q_index; /* Queue number of ring */
136 u32 txq_teid; /* Added Tx queue TEID */ 137 u32 txq_teid; /* Added Tx queue TEID */
137 138
138 /* high bit set means dynamic, use accessor routines to read/write.
139 * hardware supports 4us/2us resolution for the ITR registers.
140 * these values always store the USER setting, and must be converted
141 * before programming to a register.
142 */
143 u16 itr_setting;
144
145 u16 count; /* Number of descriptors */ 139 u16 count; /* Number of descriptors */
146 u16 reg_idx; /* HW register index of the ring */ 140 u16 reg_idx; /* HW register index of the ring */
147 141
@@ -178,6 +172,7 @@ struct ice_ring_container {
178 unsigned int total_bytes; /* total bytes processed this int */ 172 unsigned int total_bytes; /* total bytes processed this int */
179 unsigned int total_pkts; /* total packets processed this int */ 173 unsigned int total_pkts; /* total packets processed this int */
180 enum ice_latency_range latency_range; 174 enum ice_latency_range latency_range;
175 int itr_idx; /* index in the interrupt vector */
181 u16 itr; 176 u16 itr;
182}; 177};
183 178
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
index 20de2034e153..c25e486706f3 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
@@ -1678,26 +1678,30 @@ static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg)
1678 /* lookout for the invalid queue index */ 1678 /* lookout for the invalid queue index */
1679 qmap = map->rxq_map; 1679 qmap = map->rxq_map;
1680 for_each_set_bit(vsi_q_id, &qmap, ICE_MAX_BASE_QS_PER_VF) { 1680 for_each_set_bit(vsi_q_id, &qmap, ICE_MAX_BASE_QS_PER_VF) {
1681 struct ice_q_vector *q_vector;
1682
1681 if (!ice_vc_isvalid_q_id(vf, vsi_id, vsi_q_id)) { 1683 if (!ice_vc_isvalid_q_id(vf, vsi_id, vsi_q_id)) {
1682 aq_ret = ICE_ERR_PARAM; 1684 aq_ret = ICE_ERR_PARAM;
1683 goto error_param; 1685 goto error_param;
1684 } 1686 }
1685 vsi->q_vectors[i]->num_ring_rx++; 1687 q_vector = vsi->q_vectors[i];
1686 vsi->rx_rings[vsi_q_id]->itr_setting = 1688 q_vector->num_ring_rx++;
1687 map->rxitr_idx; 1689 q_vector->rx.itr_idx = map->rxitr_idx;
1688 vsi->rx_rings[vsi_q_id]->q_vector = vsi->q_vectors[i]; 1690 vsi->rx_rings[vsi_q_id]->q_vector = q_vector;
1689 } 1691 }
1690 1692
1691 qmap = map->txq_map; 1693 qmap = map->txq_map;
1692 for_each_set_bit(vsi_q_id, &qmap, ICE_MAX_BASE_QS_PER_VF) { 1694 for_each_set_bit(vsi_q_id, &qmap, ICE_MAX_BASE_QS_PER_VF) {
1695 struct ice_q_vector *q_vector;
1696
1693 if (!ice_vc_isvalid_q_id(vf, vsi_id, vsi_q_id)) { 1697 if (!ice_vc_isvalid_q_id(vf, vsi_id, vsi_q_id)) {
1694 aq_ret = ICE_ERR_PARAM; 1698 aq_ret = ICE_ERR_PARAM;
1695 goto error_param; 1699 goto error_param;
1696 } 1700 }
1697 vsi->q_vectors[i]->num_ring_tx++; 1701 q_vector = vsi->q_vectors[i];
1698 vsi->tx_rings[vsi_q_id]->itr_setting = 1702 q_vector->num_ring_tx++;
1699 map->txitr_idx; 1703 q_vector->tx.itr_idx = map->txitr_idx;
1700 vsi->tx_rings[vsi_q_id]->q_vector = vsi->q_vectors[i]; 1704 vsi->tx_rings[vsi_q_id]->q_vector = q_vector;
1701 } 1705 }
1702 } 1706 }
1703 1707