aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
authorAlexander Duyck <alexander.h.duyck@intel.com>2011-07-14 23:05:21 -0400
committerJeff Kirsher <jeffrey.t.kirsher@intel.com>2011-08-27 03:00:10 -0400
commitefe3d3c8ee6805c7e8b17f9aae554c04b271ab99 (patch)
treec5a976b148a7453dada76d8eb33905493354c4b4 /drivers/net
parent30065e63d8366b6ea4c8962fa255adfac157ce06 (diff)
ixgbe: convert rings from q_vector bit indexed array to linked list
This change converts the current bit array into a linked list so that the q_vectors can simply go through ring by ring and locate each ring needing to be cleaned. Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com> Tested-by: Phil Schmitt <phillip.j.schmitt@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h7
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c192
2 files changed, 60 insertions, 139 deletions
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index 378ce46a7f92..dc3b12e15331 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -209,6 +209,7 @@ enum ixbge_ring_state_t {
209#define clear_ring_rsc_enabled(ring) \ 209#define clear_ring_rsc_enabled(ring) \
210 clear_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state) 210 clear_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state)
211struct ixgbe_ring { 211struct ixgbe_ring {
212 struct ixgbe_ring *next; /* pointer to next ring in q_vector */
212 void *desc; /* descriptor ring memory */ 213 void *desc; /* descriptor ring memory */
213 struct device *dev; /* device for DMA mapping */ 214 struct device *dev; /* device for DMA mapping */
214 struct net_device *netdev; /* netdev ring belongs to */ 215 struct net_device *netdev; /* netdev ring belongs to */
@@ -277,11 +278,7 @@ struct ixgbe_ring_feature {
277} ____cacheline_internodealigned_in_smp; 278} ____cacheline_internodealigned_in_smp;
278 279
279struct ixgbe_ring_container { 280struct ixgbe_ring_container {
280#if MAX_RX_QUEUES > MAX_TX_QUEUES 281 struct ixgbe_ring *ring; /* pointer to linked list of rings */
281 DECLARE_BITMAP(idx, MAX_RX_QUEUES);
282#else
283 DECLARE_BITMAP(idx, MAX_TX_QUEUES);
284#endif
285 unsigned int total_bytes; /* total bytes processed this int */ 282 unsigned int total_bytes; /* total bytes processed this int */
286 unsigned int total_packets; /* total packets processed this int */ 283 unsigned int total_packets; /* total packets processed this int */
287 u16 work_limit; /* total work allowed per interrupt */ 284 u16 work_limit; /* total work allowed per interrupt */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index e5a4eb62b27c..bb54d3d28419 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -974,26 +974,17 @@ static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter,
974static void ixgbe_update_dca(struct ixgbe_q_vector *q_vector) 974static void ixgbe_update_dca(struct ixgbe_q_vector *q_vector)
975{ 975{
976 struct ixgbe_adapter *adapter = q_vector->adapter; 976 struct ixgbe_adapter *adapter = q_vector->adapter;
977 struct ixgbe_ring *ring;
977 int cpu = get_cpu(); 978 int cpu = get_cpu();
978 long r_idx;
979 int i;
980 979
981 if (q_vector->cpu == cpu) 980 if (q_vector->cpu == cpu)
982 goto out_no_update; 981 goto out_no_update;
983 982
984 r_idx = find_first_bit(q_vector->tx.idx, adapter->num_tx_queues); 983 for (ring = q_vector->tx.ring; ring != NULL; ring = ring->next)
985 for (i = 0; i < q_vector->tx.count; i++) { 984 ixgbe_update_tx_dca(adapter, ring, cpu);
986 ixgbe_update_tx_dca(adapter, adapter->tx_ring[r_idx], cpu);
987 r_idx = find_next_bit(q_vector->tx.idx, adapter->num_tx_queues,
988 r_idx + 1);
989 }
990 985
991 r_idx = find_first_bit(q_vector->rx.idx, adapter->num_rx_queues); 986 for (ring = q_vector->rx.ring; ring != NULL; ring = ring->next)
992 for (i = 0; i < q_vector->rx.count; i++) { 987 ixgbe_update_rx_dca(adapter, ring, cpu);
993 ixgbe_update_rx_dca(adapter, adapter->rx_ring[r_idx], cpu);
994 r_idx = find_next_bit(q_vector->rx.idx, adapter->num_rx_queues,
995 r_idx + 1);
996 }
997 988
998 q_vector->cpu = cpu; 989 q_vector->cpu = cpu;
999out_no_update: 990out_no_update:
@@ -1546,7 +1537,7 @@ static int ixgbe_clean_rxonly(struct napi_struct *, int);
1546static void ixgbe_configure_msix(struct ixgbe_adapter *adapter) 1537static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
1547{ 1538{
1548 struct ixgbe_q_vector *q_vector; 1539 struct ixgbe_q_vector *q_vector;
1549 int i, q_vectors, v_idx, r_idx; 1540 int q_vectors, v_idx;
1550 u32 mask; 1541 u32 mask;
1551 1542
1552 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 1543 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
@@ -1556,33 +1547,19 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
1556 * corresponding register. 1547 * corresponding register.
1557 */ 1548 */
1558 for (v_idx = 0; v_idx < q_vectors; v_idx++) { 1549 for (v_idx = 0; v_idx < q_vectors; v_idx++) {
1550 struct ixgbe_ring *ring;
1559 q_vector = adapter->q_vector[v_idx]; 1551 q_vector = adapter->q_vector[v_idx];
1560 /* XXX for_each_set_bit(...) */
1561 r_idx = find_first_bit(q_vector->rx.idx,
1562 adapter->num_rx_queues);
1563
1564 for (i = 0; i < q_vector->rx.count; i++) {
1565 u8 reg_idx = adapter->rx_ring[r_idx]->reg_idx;
1566 ixgbe_set_ivar(adapter, 0, reg_idx, v_idx);
1567 r_idx = find_next_bit(q_vector->rx.idx,
1568 adapter->num_rx_queues,
1569 r_idx + 1);
1570 }
1571 r_idx = find_first_bit(q_vector->tx.idx,
1572 adapter->num_tx_queues);
1573
1574 for (i = 0; i < q_vector->tx.count; i++) {
1575 u8 reg_idx = adapter->tx_ring[r_idx]->reg_idx;
1576 ixgbe_set_ivar(adapter, 1, reg_idx, v_idx);
1577 r_idx = find_next_bit(q_vector->tx.idx,
1578 adapter->num_tx_queues,
1579 r_idx + 1);
1580 }
1581 1552
1582 if (q_vector->tx.count && !q_vector->rx.count) 1553 for (ring = q_vector->rx.ring; ring != NULL; ring = ring->next)
1554 ixgbe_set_ivar(adapter, 0, ring->reg_idx, v_idx);
1555
1556 for (ring = q_vector->tx.ring; ring != NULL; ring = ring->next)
1557 ixgbe_set_ivar(adapter, 1, ring->reg_idx, v_idx);
1558
1559 if (q_vector->tx.ring && !q_vector->rx.ring)
1583 /* tx only */ 1560 /* tx only */
1584 q_vector->eitr = adapter->tx_eitr_param; 1561 q_vector->eitr = adapter->tx_eitr_param;
1585 else if (q_vector->rx.count) 1562 else if (q_vector->rx.ring)
1586 /* rx or mixed */ 1563 /* rx or mixed */
1587 q_vector->eitr = adapter->rx_eitr_param; 1564 q_vector->eitr = adapter->rx_eitr_param;
1588 1565
@@ -2006,20 +1983,10 @@ static inline void ixgbe_irq_disable_queues(struct ixgbe_adapter *adapter,
2006static irqreturn_t ixgbe_msix_clean_tx(int irq, void *data) 1983static irqreturn_t ixgbe_msix_clean_tx(int irq, void *data)
2007{ 1984{
2008 struct ixgbe_q_vector *q_vector = data; 1985 struct ixgbe_q_vector *q_vector = data;
2009 struct ixgbe_adapter *adapter = q_vector->adapter;
2010 struct ixgbe_ring *tx_ring;
2011 int i, r_idx;
2012 1986
2013 if (!q_vector->tx.count) 1987 if (!q_vector->tx.count)
2014 return IRQ_HANDLED; 1988 return IRQ_HANDLED;
2015 1989
2016 r_idx = find_first_bit(q_vector->tx.idx, adapter->num_tx_queues);
2017 for (i = 0; i < q_vector->tx.count; i++) {
2018 tx_ring = adapter->tx_ring[r_idx];
2019 r_idx = find_next_bit(q_vector->tx.idx, adapter->num_tx_queues,
2020 r_idx + 1);
2021 }
2022
2023 /* EIAM disabled interrupts (on this vector) for us */ 1990 /* EIAM disabled interrupts (on this vector) for us */
2024 napi_schedule(&q_vector->napi); 1991 napi_schedule(&q_vector->napi);
2025 1992
@@ -2034,22 +2001,6 @@ static irqreturn_t ixgbe_msix_clean_tx(int irq, void *data)
2034static irqreturn_t ixgbe_msix_clean_rx(int irq, void *data) 2001static irqreturn_t ixgbe_msix_clean_rx(int irq, void *data)
2035{ 2002{
2036 struct ixgbe_q_vector *q_vector = data; 2003 struct ixgbe_q_vector *q_vector = data;
2037 struct ixgbe_adapter *adapter = q_vector->adapter;
2038 struct ixgbe_ring *rx_ring;
2039 int r_idx;
2040 int i;
2041
2042#ifdef CONFIG_IXGBE_DCA
2043 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
2044 ixgbe_update_dca(q_vector);
2045#endif
2046
2047 r_idx = find_first_bit(q_vector->rx.idx, adapter->num_rx_queues);
2048 for (i = 0; i < q_vector->rx.count; i++) {
2049 rx_ring = adapter->rx_ring[r_idx];
2050 r_idx = find_next_bit(q_vector->rx.idx, adapter->num_rx_queues,
2051 r_idx + 1);
2052 }
2053 2004
2054 if (!q_vector->rx.count) 2005 if (!q_vector->rx.count)
2055 return IRQ_HANDLED; 2006 return IRQ_HANDLED;
@@ -2063,28 +2014,10 @@ static irqreturn_t ixgbe_msix_clean_rx(int irq, void *data)
2063static irqreturn_t ixgbe_msix_clean_many(int irq, void *data) 2014static irqreturn_t ixgbe_msix_clean_many(int irq, void *data)
2064{ 2015{
2065 struct ixgbe_q_vector *q_vector = data; 2016 struct ixgbe_q_vector *q_vector = data;
2066 struct ixgbe_adapter *adapter = q_vector->adapter;
2067 struct ixgbe_ring *ring;
2068 int r_idx;
2069 int i;
2070 2017
2071 if (!q_vector->tx.count && !q_vector->rx.count) 2018 if (!q_vector->tx.count && !q_vector->rx.count)
2072 return IRQ_HANDLED; 2019 return IRQ_HANDLED;
2073 2020
2074 r_idx = find_first_bit(q_vector->tx.idx, adapter->num_tx_queues);
2075 for (i = 0; i < q_vector->tx.count; i++) {
2076 ring = adapter->tx_ring[r_idx];
2077 r_idx = find_next_bit(q_vector->tx.idx, adapter->num_tx_queues,
2078 r_idx + 1);
2079 }
2080
2081 r_idx = find_first_bit(q_vector->rx.idx, adapter->num_rx_queues);
2082 for (i = 0; i < q_vector->rx.count; i++) {
2083 ring = adapter->rx_ring[r_idx];
2084 r_idx = find_next_bit(q_vector->rx.idx, adapter->num_rx_queues,
2085 r_idx + 1);
2086 }
2087
2088 /* EIAM disabled interrupts (on this vector) for us */ 2021 /* EIAM disabled interrupts (on this vector) for us */
2089 napi_schedule(&q_vector->napi); 2022 napi_schedule(&q_vector->napi);
2090 2023
@@ -2104,19 +2037,14 @@ static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget)
2104 struct ixgbe_q_vector *q_vector = 2037 struct ixgbe_q_vector *q_vector =
2105 container_of(napi, struct ixgbe_q_vector, napi); 2038 container_of(napi, struct ixgbe_q_vector, napi);
2106 struct ixgbe_adapter *adapter = q_vector->adapter; 2039 struct ixgbe_adapter *adapter = q_vector->adapter;
2107 struct ixgbe_ring *rx_ring = NULL;
2108 int work_done = 0; 2040 int work_done = 0;
2109 long r_idx;
2110 2041
2111#ifdef CONFIG_IXGBE_DCA 2042#ifdef CONFIG_IXGBE_DCA
2112 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) 2043 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
2113 ixgbe_update_dca(q_vector); 2044 ixgbe_update_dca(q_vector);
2114#endif 2045#endif
2115 2046
2116 r_idx = find_first_bit(q_vector->rx.idx, adapter->num_rx_queues); 2047 ixgbe_clean_rx_irq(q_vector, q_vector->rx.ring, &work_done, budget);
2117 rx_ring = adapter->rx_ring[r_idx];
2118
2119 ixgbe_clean_rx_irq(q_vector, rx_ring, &work_done, budget);
2120 2048
2121 /* If all Rx work done, exit the polling mode */ 2049 /* If all Rx work done, exit the polling mode */
2122 if (work_done < budget) { 2050 if (work_done < budget) {
@@ -2144,38 +2072,29 @@ static int ixgbe_clean_rxtx_many(struct napi_struct *napi, int budget)
2144 struct ixgbe_q_vector *q_vector = 2072 struct ixgbe_q_vector *q_vector =
2145 container_of(napi, struct ixgbe_q_vector, napi); 2073 container_of(napi, struct ixgbe_q_vector, napi);
2146 struct ixgbe_adapter *adapter = q_vector->adapter; 2074 struct ixgbe_adapter *adapter = q_vector->adapter;
2147 struct ixgbe_ring *ring = NULL; 2075 struct ixgbe_ring *ring;
2148 int work_done = 0, i; 2076 int work_done = 0;
2149 long r_idx; 2077 bool clean_complete = true;
2150 bool tx_clean_complete = true;
2151 2078
2152#ifdef CONFIG_IXGBE_DCA 2079#ifdef CONFIG_IXGBE_DCA
2153 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) 2080 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
2154 ixgbe_update_dca(q_vector); 2081 ixgbe_update_dca(q_vector);
2155#endif 2082#endif
2156 2083
2157 r_idx = find_first_bit(q_vector->tx.idx, adapter->num_tx_queues); 2084 for (ring = q_vector->tx.ring; ring != NULL; ring = ring->next)
2158 for (i = 0; i < q_vector->tx.count; i++) { 2085 clean_complete &= ixgbe_clean_tx_irq(q_vector, ring);
2159 ring = adapter->tx_ring[r_idx];
2160 tx_clean_complete &= ixgbe_clean_tx_irq(q_vector, ring);
2161 r_idx = find_next_bit(q_vector->tx.idx, adapter->num_tx_queues,
2162 r_idx + 1);
2163 }
2164 2086
2165 /* attempt to distribute budget to each queue fairly, but don't allow 2087 /* attempt to distribute budget to each queue fairly, but don't allow
2166 * the budget to go below 1 because we'll exit polling */ 2088 * the budget to go below 1 because we'll exit polling */
2167 budget /= (q_vector->rx.count ?: 1); 2089 budget /= (q_vector->rx.count ?: 1);
2168 budget = max(budget, 1); 2090 budget = max(budget, 1);
2169 r_idx = find_first_bit(q_vector->rx.idx, adapter->num_rx_queues); 2091
2170 for (i = 0; i < q_vector->rx.count; i++) { 2092 for (ring = q_vector->rx.ring; ring != NULL; ring = ring->next)
2171 ring = adapter->rx_ring[r_idx];
2172 ixgbe_clean_rx_irq(q_vector, ring, &work_done, budget); 2093 ixgbe_clean_rx_irq(q_vector, ring, &work_done, budget);
2173 r_idx = find_next_bit(q_vector->rx.idx, adapter->num_rx_queues,
2174 r_idx + 1);
2175 }
2176 2094
2177 r_idx = find_first_bit(q_vector->rx.idx, adapter->num_rx_queues); 2095 if (!clean_complete)
2178 ring = adapter->rx_ring[r_idx]; 2096 work_done = budget;
2097
2179 /* If all Rx work done, exit the polling mode */ 2098 /* If all Rx work done, exit the polling mode */
2180 if (work_done < budget) { 2099 if (work_done < budget) {
2181 napi_complete(napi); 2100 napi_complete(napi);
@@ -2203,32 +2122,23 @@ static int ixgbe_clean_txonly(struct napi_struct *napi, int budget)
2203 struct ixgbe_q_vector *q_vector = 2122 struct ixgbe_q_vector *q_vector =
2204 container_of(napi, struct ixgbe_q_vector, napi); 2123 container_of(napi, struct ixgbe_q_vector, napi);
2205 struct ixgbe_adapter *adapter = q_vector->adapter; 2124 struct ixgbe_adapter *adapter = q_vector->adapter;
2206 struct ixgbe_ring *tx_ring = NULL;
2207 int work_done = 0;
2208 long r_idx;
2209 2125
2210#ifdef CONFIG_IXGBE_DCA 2126#ifdef CONFIG_IXGBE_DCA
2211 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) 2127 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
2212 ixgbe_update_dca(q_vector); 2128 ixgbe_update_dca(q_vector);
2213#endif 2129#endif
2214 2130
2215 r_idx = find_first_bit(q_vector->tx.idx, adapter->num_tx_queues); 2131 if (!ixgbe_clean_tx_irq(q_vector, q_vector->tx.ring))
2216 tx_ring = adapter->tx_ring[r_idx]; 2132 return budget;
2217
2218 if (!ixgbe_clean_tx_irq(q_vector, tx_ring))
2219 work_done = budget;
2220 2133
2221 /* If all Tx work done, exit the polling mode */ 2134 /* If all Tx work done, exit the polling mode */
2222 if (work_done < budget) { 2135 napi_complete(napi);
2223 napi_complete(napi); 2136 if (adapter->tx_itr_setting & 1)
2224 if (adapter->tx_itr_setting & 1) 2137 ixgbe_set_itr(q_vector);
2225 ixgbe_set_itr(q_vector); 2138 if (!test_bit(__IXGBE_DOWN, &adapter->state))
2226 if (!test_bit(__IXGBE_DOWN, &adapter->state)) 2139 ixgbe_irq_enable_queues(adapter, ((u64)1 << q_vector->v_idx));
2227 ixgbe_irq_enable_queues(adapter,
2228 ((u64)1 << q_vector->v_idx));
2229 }
2230 2140
2231 return work_done; 2141 return 0;
2232} 2142}
2233 2143
2234static inline void map_vector_to_rxq(struct ixgbe_adapter *a, int v_idx, 2144static inline void map_vector_to_rxq(struct ixgbe_adapter *a, int v_idx,
@@ -2237,9 +2147,10 @@ static inline void map_vector_to_rxq(struct ixgbe_adapter *a, int v_idx,
2237 struct ixgbe_q_vector *q_vector = a->q_vector[v_idx]; 2147 struct ixgbe_q_vector *q_vector = a->q_vector[v_idx];
2238 struct ixgbe_ring *rx_ring = a->rx_ring[r_idx]; 2148 struct ixgbe_ring *rx_ring = a->rx_ring[r_idx];
2239 2149
2240 set_bit(r_idx, q_vector->rx.idx);
2241 q_vector->rx.count++;
2242 rx_ring->q_vector = q_vector; 2150 rx_ring->q_vector = q_vector;
2151 rx_ring->next = q_vector->rx.ring;
2152 q_vector->rx.ring = rx_ring;
2153 q_vector->rx.count++;
2243} 2154}
2244 2155
2245static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx, 2156static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx,
@@ -2248,9 +2159,10 @@ static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx,
2248 struct ixgbe_q_vector *q_vector = a->q_vector[v_idx]; 2159 struct ixgbe_q_vector *q_vector = a->q_vector[v_idx];
2249 struct ixgbe_ring *tx_ring = a->tx_ring[t_idx]; 2160 struct ixgbe_ring *tx_ring = a->tx_ring[t_idx];
2250 2161
2251 set_bit(t_idx, q_vector->tx.idx);
2252 q_vector->tx.count++;
2253 tx_ring->q_vector = q_vector; 2162 tx_ring->q_vector = q_vector;
2163 tx_ring->next = q_vector->tx.ring;
2164 q_vector->tx.ring = tx_ring;
2165 q_vector->tx.count++;
2254 q_vector->tx.work_limit = a->tx_work_limit; 2166 q_vector->tx.work_limit = a->tx_work_limit;
2255} 2167}
2256 2168
@@ -2508,14 +2420,26 @@ static irqreturn_t ixgbe_intr(int irq, void *data)
2508 2420
2509static inline void ixgbe_reset_q_vectors(struct ixgbe_adapter *adapter) 2421static inline void ixgbe_reset_q_vectors(struct ixgbe_adapter *adapter)
2510{ 2422{
2511 int i, q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 2423 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
2424 int i;
2425
2426 /* legacy and MSI only use one vector */
2427 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
2428 q_vectors = 1;
2429
2430 for (i = 0; i < adapter->num_rx_queues; i++) {
2431 adapter->rx_ring[i]->q_vector = NULL;
2432 adapter->rx_ring[i]->next = NULL;
2433 }
2434 for (i = 0; i < adapter->num_tx_queues; i++) {
2435 adapter->tx_ring[i]->q_vector = NULL;
2436 adapter->tx_ring[i]->next = NULL;
2437 }
2512 2438
2513 for (i = 0; i < q_vectors; i++) { 2439 for (i = 0; i < q_vectors; i++) {
2514 struct ixgbe_q_vector *q_vector = adapter->q_vector[i]; 2440 struct ixgbe_q_vector *q_vector = adapter->q_vector[i];
2515 bitmap_zero(q_vector->rx.idx, MAX_RX_QUEUES); 2441 memset(&q_vector->rx, 0, sizeof(struct ixgbe_ring_container));
2516 bitmap_zero(q_vector->tx.idx, MAX_TX_QUEUES); 2442 memset(&q_vector->tx, 0, sizeof(struct ixgbe_ring_container));
2517 q_vector->rx.count = 0;
2518 q_vector->tx.count = 0;
2519 } 2443 }
2520} 2444}
2521 2445
@@ -5923,7 +5847,7 @@ static void ixgbe_check_hang_subtask(struct ixgbe_adapter *adapter)
5923 /* get one bit for every active tx/rx interrupt vector */ 5847 /* get one bit for every active tx/rx interrupt vector */
5924 for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) { 5848 for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) {
5925 struct ixgbe_q_vector *qv = adapter->q_vector[i]; 5849 struct ixgbe_q_vector *qv = adapter->q_vector[i];
5926 if (qv->rx.count || qv->tx.count) 5850 if (qv->rx.ring || qv->tx.ring)
5927 eics |= ((u64)1 << i); 5851 eics |= ((u64)1 << i);
5928 } 5852 }
5929 } 5853 }