aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
authorAlexander Duyck <alexander.h.duyck@intel.com>2011-06-10 21:45:03 -0400
committerJeff Kirsher <jeffrey.t.kirsher@intel.com>2011-07-22 01:55:56 -0400
commit08c8833b29cfa4343ff132eebc5648b234eb3f85 (patch)
tree68fac5d690c63d9f83e654b4315e801e73f2eead /drivers/net
parent82d4e46e2a398154273044dd9813206f0d85bc09 (diff)
ixgbe: add structure for containing RX/TX rings to q_vector
This patch adds support for a ring container structure to be used within the q_vector. The basic idea is to provide a means of separating the RX and TX rings while maintaining a common structure for their containment. The advantage to this is that later we should be able to pass this structure to the update_itr functions without needing to pass individual rings. Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com> Tested-by: Ross Brattain <ross.b.brattain@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/ixgbe/ixgbe.h16
-rw-r--r--drivers/net/ixgbe/ixgbe_ethtool.c6
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c150
3 files changed, 88 insertions, 84 deletions
diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h
index 744b64108130..a5ed9fd69fdd 100644
--- a/drivers/net/ixgbe/ixgbe.h
+++ b/drivers/net/ixgbe/ixgbe.h
@@ -277,6 +277,15 @@ struct ixgbe_ring_feature {
277 int mask; 277 int mask;
278} ____cacheline_internodealigned_in_smp; 278} ____cacheline_internodealigned_in_smp;
279 279
280struct ixgbe_ring_container {
281#if MAX_RX_QUEUES > MAX_TX_QUEUES
282 DECLARE_BITMAP(idx, MAX_RX_QUEUES);
283#else
284 DECLARE_BITMAP(idx, MAX_TX_QUEUES);
285#endif
286 u8 count; /* total number of rings in vector */
287 u8 itr; /* current ITR setting for ring */
288};
280 289
281#define MAX_RX_PACKET_BUFFERS ((adapter->flags & IXGBE_FLAG_DCB_ENABLED) \ 290#define MAX_RX_PACKET_BUFFERS ((adapter->flags & IXGBE_FLAG_DCB_ENABLED) \
282 ? 8 : 1) 291 ? 8 : 1)
@@ -294,12 +303,7 @@ struct ixgbe_q_vector {
294 int cpu; /* CPU for DCA */ 303 int cpu; /* CPU for DCA */
295#endif 304#endif
296 struct napi_struct napi; 305 struct napi_struct napi;
297 DECLARE_BITMAP(rxr_idx, MAX_RX_QUEUES); /* Rx ring indices */ 306 struct ixgbe_ring_container rx, tx;
298 DECLARE_BITMAP(txr_idx, MAX_TX_QUEUES); /* Tx ring indices */
299 u8 rxr_count; /* Rx ring count assigned to this vector */
300 u8 txr_count; /* Tx ring count assigned to this vector */
301 u8 tx_itr;
302 u8 rx_itr;
303 u32 eitr; 307 u32 eitr;
304 cpumask_var_t affinity_mask; 308 cpumask_var_t affinity_mask;
305 char name[IFNAMSIZ + 9]; 309 char name[IFNAMSIZ + 9];
diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c
index 074e9baf069a..cd842f9dcae5 100644
--- a/drivers/net/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ixgbe/ixgbe_ethtool.c
@@ -2122,7 +2122,7 @@ static int ixgbe_get_coalesce(struct net_device *netdev,
2122 } 2122 }
2123 2123
2124 /* if in mixed tx/rx queues per vector mode, report only rx settings */ 2124 /* if in mixed tx/rx queues per vector mode, report only rx settings */
2125 if (adapter->q_vector[0]->txr_count && adapter->q_vector[0]->rxr_count) 2125 if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count)
2126 return 0; 2126 return 0;
2127 2127
2128 /* only valid if in constant ITR mode */ 2128 /* only valid if in constant ITR mode */
@@ -2187,7 +2187,7 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
2187 bool need_reset = false; 2187 bool need_reset = false;
2188 2188
2189 /* don't accept tx specific changes if we've got mixed RxTx vectors */ 2189 /* don't accept tx specific changes if we've got mixed RxTx vectors */
2190 if (adapter->q_vector[0]->txr_count && adapter->q_vector[0]->rxr_count 2190 if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count
2191 && ec->tx_coalesce_usecs) 2191 && ec->tx_coalesce_usecs)
2192 return -EINVAL; 2192 return -EINVAL;
2193 2193
@@ -2261,7 +2261,7 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
2261 int num_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 2261 int num_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
2262 for (i = 0; i < num_vectors; i++) { 2262 for (i = 0; i < num_vectors; i++) {
2263 q_vector = adapter->q_vector[i]; 2263 q_vector = adapter->q_vector[i];
2264 if (q_vector->txr_count && !q_vector->rxr_count) 2264 if (q_vector->tx.count && !q_vector->rx.count)
2265 /* tx only */ 2265 /* tx only */
2266 q_vector->eitr = adapter->tx_eitr_param; 2266 q_vector->eitr = adapter->tx_eitr_param;
2267 else 2267 else
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index 7275c5cc09b2..bd1fd8f422dc 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -959,17 +959,17 @@ static void ixgbe_update_dca(struct ixgbe_q_vector *q_vector)
959 if (q_vector->cpu == cpu) 959 if (q_vector->cpu == cpu)
960 goto out_no_update; 960 goto out_no_update;
961 961
962 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues); 962 r_idx = find_first_bit(q_vector->tx.idx, adapter->num_tx_queues);
963 for (i = 0; i < q_vector->txr_count; i++) { 963 for (i = 0; i < q_vector->tx.count; i++) {
964 ixgbe_update_tx_dca(adapter, adapter->tx_ring[r_idx], cpu); 964 ixgbe_update_tx_dca(adapter, adapter->tx_ring[r_idx], cpu);
965 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues, 965 r_idx = find_next_bit(q_vector->tx.idx, adapter->num_tx_queues,
966 r_idx + 1); 966 r_idx + 1);
967 } 967 }
968 968
969 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); 969 r_idx = find_first_bit(q_vector->rx.idx, adapter->num_rx_queues);
970 for (i = 0; i < q_vector->rxr_count; i++) { 970 for (i = 0; i < q_vector->rx.count; i++) {
971 ixgbe_update_rx_dca(adapter, adapter->rx_ring[r_idx], cpu); 971 ixgbe_update_rx_dca(adapter, adapter->rx_ring[r_idx], cpu);
972 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues, 972 r_idx = find_next_bit(q_vector->rx.idx, adapter->num_rx_queues,
973 r_idx + 1); 973 r_idx + 1);
974 } 974 }
975 975
@@ -1517,31 +1517,31 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
1517 for (v_idx = 0; v_idx < q_vectors; v_idx++) { 1517 for (v_idx = 0; v_idx < q_vectors; v_idx++) {
1518 q_vector = adapter->q_vector[v_idx]; 1518 q_vector = adapter->q_vector[v_idx];
1519 /* XXX for_each_set_bit(...) */ 1519 /* XXX for_each_set_bit(...) */
1520 r_idx = find_first_bit(q_vector->rxr_idx, 1520 r_idx = find_first_bit(q_vector->rx.idx,
1521 adapter->num_rx_queues); 1521 adapter->num_rx_queues);
1522 1522
1523 for (i = 0; i < q_vector->rxr_count; i++) { 1523 for (i = 0; i < q_vector->rx.count; i++) {
1524 u8 reg_idx = adapter->rx_ring[r_idx]->reg_idx; 1524 u8 reg_idx = adapter->rx_ring[r_idx]->reg_idx;
1525 ixgbe_set_ivar(adapter, 0, reg_idx, v_idx); 1525 ixgbe_set_ivar(adapter, 0, reg_idx, v_idx);
1526 r_idx = find_next_bit(q_vector->rxr_idx, 1526 r_idx = find_next_bit(q_vector->rx.idx,
1527 adapter->num_rx_queues, 1527 adapter->num_rx_queues,
1528 r_idx + 1); 1528 r_idx + 1);
1529 } 1529 }
1530 r_idx = find_first_bit(q_vector->txr_idx, 1530 r_idx = find_first_bit(q_vector->tx.idx,
1531 adapter->num_tx_queues); 1531 adapter->num_tx_queues);
1532 1532
1533 for (i = 0; i < q_vector->txr_count; i++) { 1533 for (i = 0; i < q_vector->tx.count; i++) {
1534 u8 reg_idx = adapter->tx_ring[r_idx]->reg_idx; 1534 u8 reg_idx = adapter->tx_ring[r_idx]->reg_idx;
1535 ixgbe_set_ivar(adapter, 1, reg_idx, v_idx); 1535 ixgbe_set_ivar(adapter, 1, reg_idx, v_idx);
1536 r_idx = find_next_bit(q_vector->txr_idx, 1536 r_idx = find_next_bit(q_vector->tx.idx,
1537 adapter->num_tx_queues, 1537 adapter->num_tx_queues,
1538 r_idx + 1); 1538 r_idx + 1);
1539 } 1539 }
1540 1540
1541 if (q_vector->txr_count && !q_vector->rxr_count) 1541 if (q_vector->tx.count && !q_vector->rx.count)
1542 /* tx only */ 1542 /* tx only */
1543 q_vector->eitr = adapter->tx_eitr_param; 1543 q_vector->eitr = adapter->tx_eitr_param;
1544 else if (q_vector->rxr_count) 1544 else if (q_vector->rx.count)
1545 /* rx or mixed */ 1545 /* rx or mixed */
1546 q_vector->eitr = adapter->rx_eitr_param; 1546 q_vector->eitr = adapter->rx_eitr_param;
1547 1547
@@ -1705,37 +1705,37 @@ static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector)
1705 u32 new_itr; 1705 u32 new_itr;
1706 u8 current_itr, ret_itr; 1706 u8 current_itr, ret_itr;
1707 1707
1708 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues); 1708 r_idx = find_first_bit(q_vector->tx.idx, adapter->num_tx_queues);
1709 for (i = 0; i < q_vector->txr_count; i++) { 1709 for (i = 0; i < q_vector->tx.count; i++) {
1710 struct ixgbe_ring *tx_ring = adapter->tx_ring[r_idx]; 1710 struct ixgbe_ring *tx_ring = adapter->tx_ring[r_idx];
1711 ret_itr = ixgbe_update_itr(adapter, q_vector->eitr, 1711 ret_itr = ixgbe_update_itr(adapter, q_vector->eitr,
1712 q_vector->tx_itr, 1712 q_vector->tx.itr,
1713 tx_ring->total_packets, 1713 tx_ring->total_packets,
1714 tx_ring->total_bytes); 1714 tx_ring->total_bytes);
1715 /* if the result for this queue would decrease interrupt 1715 /* if the result for this queue would decrease interrupt
1716 * rate for this vector then use that result */ 1716 * rate for this vector then use that result */
1717 q_vector->tx_itr = ((q_vector->tx_itr > ret_itr) ? 1717 q_vector->tx.itr = ((q_vector->tx.itr > ret_itr) ?
1718 q_vector->tx_itr - 1 : ret_itr); 1718 q_vector->tx.itr - 1 : ret_itr);
1719 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues, 1719 r_idx = find_next_bit(q_vector->tx.idx, adapter->num_tx_queues,
1720 r_idx + 1); 1720 r_idx + 1);
1721 } 1721 }
1722 1722
1723 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); 1723 r_idx = find_first_bit(q_vector->rx.idx, adapter->num_rx_queues);
1724 for (i = 0; i < q_vector->rxr_count; i++) { 1724 for (i = 0; i < q_vector->rx.count; i++) {
1725 struct ixgbe_ring *rx_ring = adapter->rx_ring[r_idx]; 1725 struct ixgbe_ring *rx_ring = adapter->rx_ring[r_idx];
1726 ret_itr = ixgbe_update_itr(adapter, q_vector->eitr, 1726 ret_itr = ixgbe_update_itr(adapter, q_vector->eitr,
1727 q_vector->rx_itr, 1727 q_vector->rx.itr,
1728 rx_ring->total_packets, 1728 rx_ring->total_packets,
1729 rx_ring->total_bytes); 1729 rx_ring->total_bytes);
1730 /* if the result for this queue would decrease interrupt 1730 /* if the result for this queue would decrease interrupt
1731 * rate for this vector then use that result */ 1731 * rate for this vector then use that result */
1732 q_vector->rx_itr = ((q_vector->rx_itr > ret_itr) ? 1732 q_vector->rx.itr = ((q_vector->rx.itr > ret_itr) ?
1733 q_vector->rx_itr - 1 : ret_itr); 1733 q_vector->rx.itr - 1 : ret_itr);
1734 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues, 1734 r_idx = find_next_bit(q_vector->rx.idx, adapter->num_rx_queues,
1735 r_idx + 1); 1735 r_idx + 1);
1736 } 1736 }
1737 1737
1738 current_itr = max(q_vector->rx_itr, q_vector->tx_itr); 1738 current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
1739 1739
1740 switch (current_itr) { 1740 switch (current_itr) {
1741 /* counts and packets in update_itr are dependent on these numbers */ 1741 /* counts and packets in update_itr are dependent on these numbers */
@@ -1995,15 +1995,15 @@ static irqreturn_t ixgbe_msix_clean_tx(int irq, void *data)
1995 struct ixgbe_ring *tx_ring; 1995 struct ixgbe_ring *tx_ring;
1996 int i, r_idx; 1996 int i, r_idx;
1997 1997
1998 if (!q_vector->txr_count) 1998 if (!q_vector->tx.count)
1999 return IRQ_HANDLED; 1999 return IRQ_HANDLED;
2000 2000
2001 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues); 2001 r_idx = find_first_bit(q_vector->tx.idx, adapter->num_tx_queues);
2002 for (i = 0; i < q_vector->txr_count; i++) { 2002 for (i = 0; i < q_vector->tx.count; i++) {
2003 tx_ring = adapter->tx_ring[r_idx]; 2003 tx_ring = adapter->tx_ring[r_idx];
2004 tx_ring->total_bytes = 0; 2004 tx_ring->total_bytes = 0;
2005 tx_ring->total_packets = 0; 2005 tx_ring->total_packets = 0;
2006 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues, 2006 r_idx = find_next_bit(q_vector->tx.idx, adapter->num_tx_queues,
2007 r_idx + 1); 2007 r_idx + 1);
2008 } 2008 }
2009 2009
@@ -2031,16 +2031,16 @@ static irqreturn_t ixgbe_msix_clean_rx(int irq, void *data)
2031 ixgbe_update_dca(q_vector); 2031 ixgbe_update_dca(q_vector);
2032#endif 2032#endif
2033 2033
2034 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); 2034 r_idx = find_first_bit(q_vector->rx.idx, adapter->num_rx_queues);
2035 for (i = 0; i < q_vector->rxr_count; i++) { 2035 for (i = 0; i < q_vector->rx.count; i++) {
2036 rx_ring = adapter->rx_ring[r_idx]; 2036 rx_ring = adapter->rx_ring[r_idx];
2037 rx_ring->total_bytes = 0; 2037 rx_ring->total_bytes = 0;
2038 rx_ring->total_packets = 0; 2038 rx_ring->total_packets = 0;
2039 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues, 2039 r_idx = find_next_bit(q_vector->rx.idx, adapter->num_rx_queues,
2040 r_idx + 1); 2040 r_idx + 1);
2041 } 2041 }
2042 2042
2043 if (!q_vector->rxr_count) 2043 if (!q_vector->rx.count)
2044 return IRQ_HANDLED; 2044 return IRQ_HANDLED;
2045 2045
2046 /* EIAM disabled interrupts (on this vector) for us */ 2046 /* EIAM disabled interrupts (on this vector) for us */
@@ -2057,24 +2057,24 @@ static irqreturn_t ixgbe_msix_clean_many(int irq, void *data)
2057 int r_idx; 2057 int r_idx;
2058 int i; 2058 int i;
2059 2059
2060 if (!q_vector->txr_count && !q_vector->rxr_count) 2060 if (!q_vector->tx.count && !q_vector->rx.count)
2061 return IRQ_HANDLED; 2061 return IRQ_HANDLED;
2062 2062
2063 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues); 2063 r_idx = find_first_bit(q_vector->tx.idx, adapter->num_tx_queues);
2064 for (i = 0; i < q_vector->txr_count; i++) { 2064 for (i = 0; i < q_vector->tx.count; i++) {
2065 ring = adapter->tx_ring[r_idx]; 2065 ring = adapter->tx_ring[r_idx];
2066 ring->total_bytes = 0; 2066 ring->total_bytes = 0;
2067 ring->total_packets = 0; 2067 ring->total_packets = 0;
2068 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues, 2068 r_idx = find_next_bit(q_vector->tx.idx, adapter->num_tx_queues,
2069 r_idx + 1); 2069 r_idx + 1);
2070 } 2070 }
2071 2071
2072 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); 2072 r_idx = find_first_bit(q_vector->rx.idx, adapter->num_rx_queues);
2073 for (i = 0; i < q_vector->rxr_count; i++) { 2073 for (i = 0; i < q_vector->rx.count; i++) {
2074 ring = adapter->rx_ring[r_idx]; 2074 ring = adapter->rx_ring[r_idx];
2075 ring->total_bytes = 0; 2075 ring->total_bytes = 0;
2076 ring->total_packets = 0; 2076 ring->total_packets = 0;
2077 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues, 2077 r_idx = find_next_bit(q_vector->rx.idx, adapter->num_rx_queues,
2078 r_idx + 1); 2078 r_idx + 1);
2079 } 2079 }
2080 2080
@@ -2106,7 +2106,7 @@ static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget)
2106 ixgbe_update_dca(q_vector); 2106 ixgbe_update_dca(q_vector);
2107#endif 2107#endif
2108 2108
2109 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); 2109 r_idx = find_first_bit(q_vector->rx.idx, adapter->num_rx_queues);
2110 rx_ring = adapter->rx_ring[r_idx]; 2110 rx_ring = adapter->rx_ring[r_idx];
2111 2111
2112 ixgbe_clean_rx_irq(q_vector, rx_ring, &work_done, budget); 2112 ixgbe_clean_rx_irq(q_vector, rx_ring, &work_done, budget);
@@ -2147,27 +2147,27 @@ static int ixgbe_clean_rxtx_many(struct napi_struct *napi, int budget)
2147 ixgbe_update_dca(q_vector); 2147 ixgbe_update_dca(q_vector);
2148#endif 2148#endif
2149 2149
2150 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues); 2150 r_idx = find_first_bit(q_vector->tx.idx, adapter->num_tx_queues);
2151 for (i = 0; i < q_vector->txr_count; i++) { 2151 for (i = 0; i < q_vector->tx.count; i++) {
2152 ring = adapter->tx_ring[r_idx]; 2152 ring = adapter->tx_ring[r_idx];
2153 tx_clean_complete &= ixgbe_clean_tx_irq(q_vector, ring); 2153 tx_clean_complete &= ixgbe_clean_tx_irq(q_vector, ring);
2154 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues, 2154 r_idx = find_next_bit(q_vector->tx.idx, adapter->num_tx_queues,
2155 r_idx + 1); 2155 r_idx + 1);
2156 } 2156 }
2157 2157
2158 /* attempt to distribute budget to each queue fairly, but don't allow 2158 /* attempt to distribute budget to each queue fairly, but don't allow
2159 * the budget to go below 1 because we'll exit polling */ 2159 * the budget to go below 1 because we'll exit polling */
2160 budget /= (q_vector->rxr_count ?: 1); 2160 budget /= (q_vector->rx.count ?: 1);
2161 budget = max(budget, 1); 2161 budget = max(budget, 1);
2162 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); 2162 r_idx = find_first_bit(q_vector->rx.idx, adapter->num_rx_queues);
2163 for (i = 0; i < q_vector->rxr_count; i++) { 2163 for (i = 0; i < q_vector->rx.count; i++) {
2164 ring = adapter->rx_ring[r_idx]; 2164 ring = adapter->rx_ring[r_idx];
2165 ixgbe_clean_rx_irq(q_vector, ring, &work_done, budget); 2165 ixgbe_clean_rx_irq(q_vector, ring, &work_done, budget);
2166 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues, 2166 r_idx = find_next_bit(q_vector->rx.idx, adapter->num_rx_queues,
2167 r_idx + 1); 2167 r_idx + 1);
2168 } 2168 }
2169 2169
2170 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); 2170 r_idx = find_first_bit(q_vector->rx.idx, adapter->num_rx_queues);
2171 ring = adapter->rx_ring[r_idx]; 2171 ring = adapter->rx_ring[r_idx];
2172 /* If all Rx work done, exit the polling mode */ 2172 /* If all Rx work done, exit the polling mode */
2173 if (work_done < budget) { 2173 if (work_done < budget) {
@@ -2205,7 +2205,7 @@ static int ixgbe_clean_txonly(struct napi_struct *napi, int budget)
2205 ixgbe_update_dca(q_vector); 2205 ixgbe_update_dca(q_vector);
2206#endif 2206#endif
2207 2207
2208 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues); 2208 r_idx = find_first_bit(q_vector->tx.idx, adapter->num_tx_queues);
2209 tx_ring = adapter->tx_ring[r_idx]; 2209 tx_ring = adapter->tx_ring[r_idx];
2210 2210
2211 if (!ixgbe_clean_tx_irq(q_vector, tx_ring)) 2211 if (!ixgbe_clean_tx_irq(q_vector, tx_ring))
@@ -2230,8 +2230,8 @@ static inline void map_vector_to_rxq(struct ixgbe_adapter *a, int v_idx,
2230 struct ixgbe_q_vector *q_vector = a->q_vector[v_idx]; 2230 struct ixgbe_q_vector *q_vector = a->q_vector[v_idx];
2231 struct ixgbe_ring *rx_ring = a->rx_ring[r_idx]; 2231 struct ixgbe_ring *rx_ring = a->rx_ring[r_idx];
2232 2232
2233 set_bit(r_idx, q_vector->rxr_idx); 2233 set_bit(r_idx, q_vector->rx.idx);
2234 q_vector->rxr_count++; 2234 q_vector->rx.count++;
2235 rx_ring->q_vector = q_vector; 2235 rx_ring->q_vector = q_vector;
2236} 2236}
2237 2237
@@ -2241,8 +2241,8 @@ static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx,
2241 struct ixgbe_q_vector *q_vector = a->q_vector[v_idx]; 2241 struct ixgbe_q_vector *q_vector = a->q_vector[v_idx];
2242 struct ixgbe_ring *tx_ring = a->tx_ring[t_idx]; 2242 struct ixgbe_ring *tx_ring = a->tx_ring[t_idx];
2243 2243
2244 set_bit(t_idx, q_vector->txr_idx); 2244 set_bit(t_idx, q_vector->tx.idx);
2245 q_vector->txr_count++; 2245 q_vector->tx.count++;
2246 tx_ring->q_vector = q_vector; 2246 tx_ring->q_vector = q_vector;
2247} 2247}
2248 2248
@@ -2332,10 +2332,10 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
2332 if (err) 2332 if (err)
2333 return err; 2333 return err;
2334 2334
2335#define SET_HANDLER(_v) (((_v)->rxr_count && (_v)->txr_count) \ 2335#define SET_HANDLER(_v) (((_v)->rx.count && (_v)->tx.count) \
2336 ? &ixgbe_msix_clean_many : \ 2336 ? &ixgbe_msix_clean_many : \
2337 (_v)->rxr_count ? &ixgbe_msix_clean_rx : \ 2337 (_v)->rx.count ? &ixgbe_msix_clean_rx : \
2338 (_v)->txr_count ? &ixgbe_msix_clean_tx : \ 2338 (_v)->tx.count ? &ixgbe_msix_clean_tx : \
2339 NULL) 2339 NULL)
2340 for (vector = 0; vector < q_vectors; vector++) { 2340 for (vector = 0; vector < q_vectors; vector++) {
2341 struct ixgbe_q_vector *q_vector = adapter->q_vector[vector]; 2341 struct ixgbe_q_vector *q_vector = adapter->q_vector[vector];
@@ -2394,16 +2394,16 @@ static void ixgbe_set_itr(struct ixgbe_adapter *adapter)
2394 u32 new_itr = q_vector->eitr; 2394 u32 new_itr = q_vector->eitr;
2395 u8 current_itr; 2395 u8 current_itr;
2396 2396
2397 q_vector->tx_itr = ixgbe_update_itr(adapter, new_itr, 2397 q_vector->tx.itr = ixgbe_update_itr(adapter, new_itr,
2398 q_vector->tx_itr, 2398 q_vector->tx.itr,
2399 tx_ring->total_packets, 2399 tx_ring->total_packets,
2400 tx_ring->total_bytes); 2400 tx_ring->total_bytes);
2401 q_vector->rx_itr = ixgbe_update_itr(adapter, new_itr, 2401 q_vector->rx.itr = ixgbe_update_itr(adapter, new_itr,
2402 q_vector->rx_itr, 2402 q_vector->rx.itr,
2403 rx_ring->total_packets, 2403 rx_ring->total_packets,
2404 rx_ring->total_bytes); 2404 rx_ring->total_bytes);
2405 2405
2406 current_itr = max(q_vector->rx_itr, q_vector->tx_itr); 2406 current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
2407 2407
2408 switch (current_itr) { 2408 switch (current_itr) {
2409 /* counts and packets in update_itr are dependent on these numbers */ 2409 /* counts and packets in update_itr are dependent on these numbers */
@@ -2553,10 +2553,10 @@ static inline void ixgbe_reset_q_vectors(struct ixgbe_adapter *adapter)
2553 2553
2554 for (i = 0; i < q_vectors; i++) { 2554 for (i = 0; i < q_vectors; i++) {
2555 struct ixgbe_q_vector *q_vector = adapter->q_vector[i]; 2555 struct ixgbe_q_vector *q_vector = adapter->q_vector[i];
2556 bitmap_zero(q_vector->rxr_idx, MAX_RX_QUEUES); 2556 bitmap_zero(q_vector->rx.idx, MAX_RX_QUEUES);
2557 bitmap_zero(q_vector->txr_idx, MAX_TX_QUEUES); 2557 bitmap_zero(q_vector->tx.idx, MAX_TX_QUEUES);
2558 q_vector->rxr_count = 0; 2558 q_vector->rx.count = 0;
2559 q_vector->txr_count = 0; 2559 q_vector->tx.count = 0;
2560 } 2560 }
2561} 2561}
2562 2562
@@ -2601,8 +2601,8 @@ static void ixgbe_free_irq(struct ixgbe_adapter *adapter)
2601 i--; 2601 i--;
2602 for (; i >= 0; i--) { 2602 for (; i >= 0; i--) {
2603 /* free only the irqs that were actually requested */ 2603 /* free only the irqs that were actually requested */
2604 if (!adapter->q_vector[i]->rxr_count && 2604 if (!adapter->q_vector[i]->rx.count &&
2605 !adapter->q_vector[i]->txr_count) 2605 !adapter->q_vector[i]->tx.count)
2606 continue; 2606 continue;
2607 2607
2608 free_irq(adapter->msix_entries[i].vector, 2608 free_irq(adapter->msix_entries[i].vector,
@@ -3616,10 +3616,10 @@ static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter)
3616 q_vector = adapter->q_vector[q_idx]; 3616 q_vector = adapter->q_vector[q_idx];
3617 napi = &q_vector->napi; 3617 napi = &q_vector->napi;
3618 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { 3618 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
3619 if (!q_vector->rxr_count || !q_vector->txr_count) { 3619 if (!q_vector->rx.count || !q_vector->tx.count) {
3620 if (q_vector->txr_count == 1) 3620 if (q_vector->tx.count == 1)
3621 napi->poll = &ixgbe_clean_txonly; 3621 napi->poll = &ixgbe_clean_txonly;
3622 else if (q_vector->rxr_count == 1) 3622 else if (q_vector->rx.count == 1)
3623 napi->poll = &ixgbe_clean_rxonly; 3623 napi->poll = &ixgbe_clean_rxonly;
3624 } 3624 }
3625 } 3625 }
@@ -4965,7 +4965,7 @@ static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter)
4965 if (!q_vector) 4965 if (!q_vector)
4966 goto err_out; 4966 goto err_out;
4967 q_vector->adapter = adapter; 4967 q_vector->adapter = adapter;
4968 if (q_vector->txr_count && !q_vector->rxr_count) 4968 if (q_vector->tx.count && !q_vector->rx.count)
4969 q_vector->eitr = adapter->tx_eitr_param; 4969 q_vector->eitr = adapter->tx_eitr_param;
4970 else 4970 else
4971 q_vector->eitr = adapter->rx_eitr_param; 4971 q_vector->eitr = adapter->rx_eitr_param;
@@ -5979,7 +5979,7 @@ static void ixgbe_check_hang_subtask(struct ixgbe_adapter *adapter)
5979 /* get one bit for every active tx/rx interrupt vector */ 5979 /* get one bit for every active tx/rx interrupt vector */
5980 for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) { 5980 for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) {
5981 struct ixgbe_q_vector *qv = adapter->q_vector[i]; 5981 struct ixgbe_q_vector *qv = adapter->q_vector[i];
5982 if (qv->rxr_count || qv->txr_count) 5982 if (qv->rx.count || qv->tx.count)
5983 eics |= ((u64)1 << i); 5983 eics |= ((u64)1 << i);
5984 } 5984 }
5985 } 5985 }