diff options
author | Brett Creeley <brett.creeley@intel.com> | 2018-09-19 20:43:05 -0400 |
---|---|---|
committer | Jeff Kirsher <jeffrey.t.kirsher@intel.com> | 2018-10-03 10:42:30 -0400 |
commit | d2b464a7ff6cda3e1d4eb070ed6558f0cd152d1c (patch) | |
tree | a979bdbfafa97c79746f52c1dcae6243beefa2bd /drivers/net/ethernet/intel/ice/ice_lib.c | |
parent | 072f0c3db9daf7b57dfe0a5e2a5ccf42f71982f4 (diff) |
ice: Add more flexibility on how we assign an ITR index
This issue came about when looking at the VF function
ice_vc_cfg_irq_map_msg. Currently we are assigning the itr_setting value
to the itr_idx received from the AVF driver, which is not correct and is
not used for the VF flow anyway. Currently the only way we set the ITR
index for both the PF and VF driver is by hard coding ICE_TX_ITR or
ICE_RX_ITR for the ITR index on each q_vector.
To fix this, add the member itr_idx in struct ice_ring_container. This
can then be used to dynamically program the correct ITR index. This change
also affected the PF driver so make the necessary changes there as well.
Also, removed the itr_setting member in struct ice_ring because it is not
being used meaningfully and is going to be removed in a future patch that
includes dynamic ITR.
On another note, this will be useful moving forward if we decide to split
Rx/Tx rings on different q_vectors instead of sharing them as queue pairs.
Signed-off-by: Brett Creeley <brett.creeley@intel.com>
Signed-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com>
Tested-by: Andrew Bowers <andrewx.bowers@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Diffstat (limited to 'drivers/net/ethernet/intel/ice/ice_lib.c')
-rw-r--r-- | drivers/net/ethernet/intel/ice/ice_lib.c | 73 |
1 files changed, 43 insertions, 30 deletions
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c index 8139302cd92b..49f1940772ed 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_lib.c | |||
@@ -1204,7 +1204,6 @@ static int ice_vsi_alloc_rings(struct ice_vsi *vsi) | |||
1204 | ring->vsi = vsi; | 1204 | ring->vsi = vsi; |
1205 | ring->dev = &pf->pdev->dev; | 1205 | ring->dev = &pf->pdev->dev; |
1206 | ring->count = vsi->num_desc; | 1206 | ring->count = vsi->num_desc; |
1207 | ring->itr_setting = ICE_DFLT_TX_ITR; | ||
1208 | vsi->tx_rings[i] = ring; | 1207 | vsi->tx_rings[i] = ring; |
1209 | } | 1208 | } |
1210 | 1209 | ||
@@ -1224,7 +1223,6 @@ static int ice_vsi_alloc_rings(struct ice_vsi *vsi) | |||
1224 | ring->netdev = vsi->netdev; | 1223 | ring->netdev = vsi->netdev; |
1225 | ring->dev = &pf->pdev->dev; | 1224 | ring->dev = &pf->pdev->dev; |
1226 | ring->count = vsi->num_desc; | 1225 | ring->count = vsi->num_desc; |
1227 | ring->itr_setting = ICE_DFLT_RX_ITR; | ||
1228 | vsi->rx_rings[i] = ring; | 1226 | vsi->rx_rings[i] = ring; |
1229 | } | 1227 | } |
1230 | 1228 | ||
@@ -1261,6 +1259,7 @@ static void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi) | |||
1261 | tx_rings_per_v = DIV_ROUND_UP(tx_rings_rem, q_vectors - v_id); | 1259 | tx_rings_per_v = DIV_ROUND_UP(tx_rings_rem, q_vectors - v_id); |
1262 | q_vector->num_ring_tx = tx_rings_per_v; | 1260 | q_vector->num_ring_tx = tx_rings_per_v; |
1263 | q_vector->tx.ring = NULL; | 1261 | q_vector->tx.ring = NULL; |
1262 | q_vector->tx.itr_idx = ICE_TX_ITR; | ||
1264 | q_base = vsi->num_txq - tx_rings_rem; | 1263 | q_base = vsi->num_txq - tx_rings_rem; |
1265 | 1264 | ||
1266 | for (q_id = q_base; q_id < (q_base + tx_rings_per_v); q_id++) { | 1265 | for (q_id = q_base; q_id < (q_base + tx_rings_per_v); q_id++) { |
@@ -1276,6 +1275,7 @@ static void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi) | |||
1276 | rx_rings_per_v = DIV_ROUND_UP(rx_rings_rem, q_vectors - v_id); | 1275 | rx_rings_per_v = DIV_ROUND_UP(rx_rings_rem, q_vectors - v_id); |
1277 | q_vector->num_ring_rx = rx_rings_per_v; | 1276 | q_vector->num_ring_rx = rx_rings_per_v; |
1278 | q_vector->rx.ring = NULL; | 1277 | q_vector->rx.ring = NULL; |
1278 | q_vector->rx.itr_idx = ICE_RX_ITR; | ||
1279 | q_base = vsi->num_rxq - rx_rings_rem; | 1279 | q_base = vsi->num_rxq - rx_rings_rem; |
1280 | 1280 | ||
1281 | for (q_id = q_base; q_id < (q_base + rx_rings_per_v); q_id++) { | 1281 | for (q_id = q_base; q_id < (q_base + rx_rings_per_v); q_id++) { |
@@ -1684,6 +1684,37 @@ static u32 ice_intrl_usec_to_reg(u8 intrl, u8 gran) | |||
1684 | } | 1684 | } |
1685 | 1685 | ||
1686 | /** | 1686 | /** |
1687 | * ice_cfg_itr - configure the initial interrupt throttle values | ||
1688 | * @hw: pointer to the HW structure | ||
1689 | * @q_vector: interrupt vector that's being configured | ||
1690 | * @vector: HW vector index to apply the interrupt throttling to | ||
1691 | * | ||
1692 | * Configure interrupt throttling values for the ring containers that are | ||
1693 | * associated with the interrupt vector passed in. | ||
1694 | */ | ||
1695 | static void | ||
1696 | ice_cfg_itr(struct ice_hw *hw, struct ice_q_vector *q_vector, u16 vector) | ||
1697 | { | ||
1698 | u8 itr_gran = hw->itr_gran; | ||
1699 | |||
1700 | if (q_vector->num_ring_rx) { | ||
1701 | struct ice_ring_container *rc = &q_vector->rx; | ||
1702 | |||
1703 | rc->itr = ITR_TO_REG(ICE_DFLT_RX_ITR, itr_gran); | ||
1704 | rc->latency_range = ICE_LOW_LATENCY; | ||
1705 | wr32(hw, GLINT_ITR(rc->itr_idx, vector), rc->itr); | ||
1706 | } | ||
1707 | |||
1708 | if (q_vector->num_ring_tx) { | ||
1709 | struct ice_ring_container *rc = &q_vector->tx; | ||
1710 | |||
1711 | rc->itr = ITR_TO_REG(ICE_DFLT_TX_ITR, itr_gran); | ||
1712 | rc->latency_range = ICE_LOW_LATENCY; | ||
1713 | wr32(hw, GLINT_ITR(rc->itr_idx, vector), rc->itr); | ||
1714 | } | ||
1715 | } | ||
1716 | |||
1717 | /** | ||
1687 | * ice_vsi_cfg_msix - MSIX mode Interrupt Config in the HW | 1718 | * ice_vsi_cfg_msix - MSIX mode Interrupt Config in the HW |
1688 | * @vsi: the VSI being configured | 1719 | * @vsi: the VSI being configured |
1689 | */ | 1720 | */ |
@@ -1693,31 +1724,13 @@ void ice_vsi_cfg_msix(struct ice_vsi *vsi) | |||
1693 | u16 vector = vsi->hw_base_vector; | 1724 | u16 vector = vsi->hw_base_vector; |
1694 | struct ice_hw *hw = &pf->hw; | 1725 | struct ice_hw *hw = &pf->hw; |
1695 | u32 txq = 0, rxq = 0; | 1726 | u32 txq = 0, rxq = 0; |
1696 | int i, q, itr; | 1727 | int i, q; |
1697 | u8 itr_gran; | ||
1698 | 1728 | ||
1699 | for (i = 0; i < vsi->num_q_vectors; i++, vector++) { | 1729 | for (i = 0; i < vsi->num_q_vectors; i++, vector++) { |
1700 | struct ice_q_vector *q_vector = vsi->q_vectors[i]; | 1730 | struct ice_q_vector *q_vector = vsi->q_vectors[i]; |
1701 | 1731 | ||
1702 | itr_gran = hw->itr_gran; | 1732 | ice_cfg_itr(hw, q_vector, vector); |
1703 | 1733 | ||
1704 | q_vector->intrl = ICE_DFLT_INTRL; | ||
1705 | |||
1706 | if (q_vector->num_ring_rx) { | ||
1707 | q_vector->rx.itr = | ||
1708 | ITR_TO_REG(vsi->rx_rings[rxq]->itr_setting, | ||
1709 | itr_gran); | ||
1710 | q_vector->rx.latency_range = ICE_LOW_LATENCY; | ||
1711 | } | ||
1712 | |||
1713 | if (q_vector->num_ring_tx) { | ||
1714 | q_vector->tx.itr = | ||
1715 | ITR_TO_REG(vsi->tx_rings[txq]->itr_setting, | ||
1716 | itr_gran); | ||
1717 | q_vector->tx.latency_range = ICE_LOW_LATENCY; | ||
1718 | } | ||
1719 | wr32(hw, GLINT_ITR(ICE_RX_ITR, vector), q_vector->rx.itr); | ||
1720 | wr32(hw, GLINT_ITR(ICE_TX_ITR, vector), q_vector->tx.itr); | ||
1721 | wr32(hw, GLINT_RATE(vector), | 1734 | wr32(hw, GLINT_RATE(vector), |
1722 | ice_intrl_usec_to_reg(q_vector->intrl, hw->intrl_gran)); | 1735 | ice_intrl_usec_to_reg(q_vector->intrl, hw->intrl_gran)); |
1723 | 1736 | ||
@@ -1733,32 +1746,32 @@ void ice_vsi_cfg_msix(struct ice_vsi *vsi) | |||
1733 | * tracked for this PF. | 1746 | * tracked for this PF. |
1734 | */ | 1747 | */ |
1735 | for (q = 0; q < q_vector->num_ring_tx; q++) { | 1748 | for (q = 0; q < q_vector->num_ring_tx; q++) { |
1749 | int itr_idx = q_vector->tx.itr_idx; | ||
1736 | u32 val; | 1750 | u32 val; |
1737 | 1751 | ||
1738 | itr = ICE_ITR_NONE; | ||
1739 | if (vsi->type == ICE_VSI_VF) | 1752 | if (vsi->type == ICE_VSI_VF) |
1740 | val = QINT_TQCTL_CAUSE_ENA_M | | 1753 | val = QINT_TQCTL_CAUSE_ENA_M | |
1741 | (itr << QINT_TQCTL_ITR_INDX_S) | | 1754 | (itr_idx << QINT_TQCTL_ITR_INDX_S) | |
1742 | ((i + 1) << QINT_TQCTL_MSIX_INDX_S); | 1755 | ((i + 1) << QINT_TQCTL_MSIX_INDX_S); |
1743 | else | 1756 | else |
1744 | val = QINT_TQCTL_CAUSE_ENA_M | | 1757 | val = QINT_TQCTL_CAUSE_ENA_M | |
1745 | (itr << QINT_TQCTL_ITR_INDX_S) | | 1758 | (itr_idx << QINT_TQCTL_ITR_INDX_S) | |
1746 | (vector << QINT_TQCTL_MSIX_INDX_S); | 1759 | (vector << QINT_TQCTL_MSIX_INDX_S); |
1747 | wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), val); | 1760 | wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), val); |
1748 | txq++; | 1761 | txq++; |
1749 | } | 1762 | } |
1750 | 1763 | ||
1751 | for (q = 0; q < q_vector->num_ring_rx; q++) { | 1764 | for (q = 0; q < q_vector->num_ring_rx; q++) { |
1765 | int itr_idx = q_vector->rx.itr_idx; | ||
1752 | u32 val; | 1766 | u32 val; |
1753 | 1767 | ||
1754 | itr = ICE_ITR_NONE; | ||
1755 | if (vsi->type == ICE_VSI_VF) | 1768 | if (vsi->type == ICE_VSI_VF) |
1756 | val = QINT_RQCTL_CAUSE_ENA_M | | 1769 | val = QINT_RQCTL_CAUSE_ENA_M | |
1757 | (itr << QINT_RQCTL_ITR_INDX_S) | | 1770 | (itr_idx << QINT_RQCTL_ITR_INDX_S) | |
1758 | ((i + 1) << QINT_RQCTL_MSIX_INDX_S); | 1771 | ((i + 1) << QINT_RQCTL_MSIX_INDX_S); |
1759 | else | 1772 | else |
1760 | val = QINT_RQCTL_CAUSE_ENA_M | | 1773 | val = QINT_RQCTL_CAUSE_ENA_M | |
1761 | (itr << QINT_RQCTL_ITR_INDX_S) | | 1774 | (itr_idx << QINT_RQCTL_ITR_INDX_S) | |
1762 | (vector << QINT_RQCTL_MSIX_INDX_S); | 1775 | (vector << QINT_RQCTL_MSIX_INDX_S); |
1763 | wr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), val); | 1776 | wr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), val); |
1764 | rxq++; | 1777 | rxq++; |
@@ -2157,8 +2170,8 @@ static void ice_vsi_release_msix(struct ice_vsi *vsi) | |||
2157 | for (i = 0; i < vsi->num_q_vectors; i++, vector++) { | 2170 | for (i = 0; i < vsi->num_q_vectors; i++, vector++) { |
2158 | struct ice_q_vector *q_vector = vsi->q_vectors[i]; | 2171 | struct ice_q_vector *q_vector = vsi->q_vectors[i]; |
2159 | 2172 | ||
2160 | wr32(hw, GLINT_ITR(ICE_RX_ITR, vector), 0); | 2173 | wr32(hw, GLINT_ITR(ICE_IDX_ITR0, vector), 0); |
2161 | wr32(hw, GLINT_ITR(ICE_TX_ITR, vector), 0); | 2174 | wr32(hw, GLINT_ITR(ICE_IDX_ITR1, vector), 0); |
2162 | for (q = 0; q < q_vector->num_ring_tx; q++) { | 2175 | for (q = 0; q < q_vector->num_ring_tx; q++) { |
2163 | wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), 0); | 2176 | wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), 0); |
2164 | txq++; | 2177 | txq++; |