aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ixgbe/ixgbe_main.c
diff options
context:
space:
mode:
authorAlexander Duyck <alexander.h.duyck@intel.com>2009-06-04 12:00:09 -0400
committerDavid S. Miller <davem@davemloft.net>2009-06-07 08:20:20 -0400
commitfe49f04aa8c0f74c363cbb1e9852a0d7769b5a99 (patch)
tree0017d7c78c5ebf7aafa6b9a1462dd768df06662c /drivers/net/ixgbe/ixgbe_main.c
parent21fa4e66bd0bedfa4ed6aa6f7008b2aff6d45c8d (diff)
ixgbe: move v_idx into q_vector and use as index only
The v_idx value was being used as both a bitmask and an index. This change makes it so that the q_vector contains the index and allows for much of the code to be simplified since disabling a q_vector involves only clearing one bit in the interrupt bitmask. Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ixgbe/ixgbe_main.c')
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c227
1 files changed, 111 insertions, 116 deletions
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index e1efa1d7df4e..2500e8b236c6 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -186,6 +186,22 @@ static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, s8 direction,
186 } 186 }
187} 187}
188 188
189static inline void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter,
190 u64 qmask)
191{
192 u32 mask;
193
194 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
195 mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
196 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
197 } else {
198 mask = (qmask & 0xFFFFFFFF);
199 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
200 mask = (qmask >> 32);
201 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
202 }
203}
204
189static void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter, 205static void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter,
190 struct ixgbe_tx_buffer 206 struct ixgbe_tx_buffer
191 *tx_buffer_info) 207 *tx_buffer_info)
@@ -248,14 +264,13 @@ static void ixgbe_tx_timeout(struct net_device *netdev);
248 264
249/** 265/**
250 * ixgbe_clean_tx_irq - Reclaim resources after transmit completes 266 * ixgbe_clean_tx_irq - Reclaim resources after transmit completes
251 * @adapter: board private structure 267 * @q_vector: structure containing interrupt and ring information
252 * @tx_ring: tx ring to clean 268 * @tx_ring: tx ring to clean
253 *
254 * returns true if transmit work is done
255 **/ 269 **/
256static bool ixgbe_clean_tx_irq(struct ixgbe_adapter *adapter, 270static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
257 struct ixgbe_ring *tx_ring) 271 struct ixgbe_ring *tx_ring)
258{ 272{
273 struct ixgbe_adapter *adapter = q_vector->adapter;
259 struct net_device *netdev = adapter->netdev; 274 struct net_device *netdev = adapter->netdev;
260 union ixgbe_adv_tx_desc *tx_desc, *eop_desc; 275 union ixgbe_adv_tx_desc *tx_desc, *eop_desc;
261 struct ixgbe_tx_buffer *tx_buffer_info; 276 struct ixgbe_tx_buffer *tx_buffer_info;
@@ -329,18 +344,8 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_adapter *adapter,
329 } 344 }
330 345
331 /* re-arm the interrupt */ 346 /* re-arm the interrupt */
332 if (count >= tx_ring->work_limit) { 347 if (count >= tx_ring->work_limit)
333 if (adapter->hw.mac.type == ixgbe_mac_82598EB) 348 ixgbe_irq_rearm_queues(adapter, ((u64)1 << q_vector->v_idx));
334 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS,
335 tx_ring->v_idx);
336 else if (tx_ring->v_idx & 0xFFFFFFFF)
337 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0),
338 tx_ring->v_idx);
339 else
340 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1),
341 (tx_ring->v_idx >> 32));
342 }
343
344 349
345 tx_ring->total_bytes += total_bytes; 350 tx_ring->total_bytes += total_bytes;
346 tx_ring->total_packets += total_packets; 351 tx_ring->total_packets += total_packets;
@@ -875,12 +880,7 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
875 /* rx only */ 880 /* rx only */
876 q_vector->eitr = adapter->eitr_param; 881 q_vector->eitr = adapter->eitr_param;
877 882
878 /* 883 ixgbe_write_eitr(q_vector);
879 * since this is initial set up don't need to call
880 * ixgbe_write_eitr helper
881 */
882 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx),
883 EITR_INTS_PER_SEC_TO_REG(q_vector->eitr));
884 } 884 }
885 885
886 if (adapter->hw.mac.type == ixgbe_mac_82598EB) 886 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
@@ -965,17 +965,19 @@ update_itr_done:
965 965
966/** 966/**
967 * ixgbe_write_eitr - write EITR register in hardware specific way 967 * ixgbe_write_eitr - write EITR register in hardware specific way
968 * @adapter: pointer to adapter struct 968 * @q_vector: structure containing interrupt and ring information
969 * @v_idx: vector index into q_vector array
970 * @itr_reg: new value to be written in *register* format, not ints/s
971 * 969 *
972 * This function is made to be called by ethtool and by the driver 970 * This function is made to be called by ethtool and by the driver
973 * when it needs to update EITR registers at runtime. Hardware 971 * when it needs to update EITR registers at runtime. Hardware
974 * specific quirks/differences are taken care of here. 972 * specific quirks/differences are taken care of here.
975 */ 973 */
976void ixgbe_write_eitr(struct ixgbe_adapter *adapter, int v_idx, u32 itr_reg) 974void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector)
977{ 975{
976 struct ixgbe_adapter *adapter = q_vector->adapter;
978 struct ixgbe_hw *hw = &adapter->hw; 977 struct ixgbe_hw *hw = &adapter->hw;
978 int v_idx = q_vector->v_idx;
979 u32 itr_reg = EITR_INTS_PER_SEC_TO_REG(q_vector->eitr);
980
979 if (adapter->hw.mac.type == ixgbe_mac_82598EB) { 981 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
980 /* must write high and low 16 bits to reset counter */ 982 /* must write high and low 16 bits to reset counter */
981 itr_reg |= (itr_reg << 16); 983 itr_reg |= (itr_reg << 16);
@@ -994,7 +996,7 @@ static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector)
994 struct ixgbe_adapter *adapter = q_vector->adapter; 996 struct ixgbe_adapter *adapter = q_vector->adapter;
995 u32 new_itr; 997 u32 new_itr;
996 u8 current_itr, ret_itr; 998 u8 current_itr, ret_itr;
997 int i, r_idx, v_idx = q_vector->v_idx; 999 int i, r_idx;
998 struct ixgbe_ring *rx_ring, *tx_ring; 1000 struct ixgbe_ring *rx_ring, *tx_ring;
999 1001
1000 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues); 1002 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
@@ -1044,14 +1046,13 @@ static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector)
1044 } 1046 }
1045 1047
1046 if (new_itr != q_vector->eitr) { 1048 if (new_itr != q_vector->eitr) {
1047 u32 itr_reg; 1049 /* do an exponential smoothing */
1050 new_itr = ((q_vector->eitr * 90)/100) + ((new_itr * 10)/100);
1048 1051
1049 /* save the algorithm value here, not the smoothed one */ 1052 /* save the algorithm value here, not the smoothed one */
1050 q_vector->eitr = new_itr; 1053 q_vector->eitr = new_itr;
1051 /* do an exponential smoothing */ 1054
1052 new_itr = ((q_vector->eitr * 90)/100) + ((new_itr * 10)/100); 1055 ixgbe_write_eitr(q_vector);
1053 itr_reg = EITR_INTS_PER_SEC_TO_REG(new_itr);
1054 ixgbe_write_eitr(adapter, v_idx, itr_reg);
1055 } 1056 }
1056 1057
1057 return; 1058 return;
@@ -1130,6 +1131,40 @@ static irqreturn_t ixgbe_msix_lsc(int irq, void *data)
1130 return IRQ_HANDLED; 1131 return IRQ_HANDLED;
1131} 1132}
1132 1133
1134static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter,
1135 u64 qmask)
1136{
1137 u32 mask;
1138
1139 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
1140 mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
1141 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
1142 } else {
1143 mask = (qmask & 0xFFFFFFFF);
1144 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS_EX(0), mask);
1145 mask = (qmask >> 32);
1146 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS_EX(1), mask);
1147 }
1148 /* skip the flush */
1149}
1150
1151static inline void ixgbe_irq_disable_queues(struct ixgbe_adapter *adapter,
1152 u64 qmask)
1153{
1154 u32 mask;
1155
1156 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
1157 mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
1158 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, mask);
1159 } else {
1160 mask = (qmask & 0xFFFFFFFF);
1161 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), mask);
1162 mask = (qmask >> 32);
1163 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), mask);
1164 }
1165 /* skip the flush */
1166}
1167
1133static irqreturn_t ixgbe_msix_clean_tx(int irq, void *data) 1168static irqreturn_t ixgbe_msix_clean_tx(int irq, void *data)
1134{ 1169{
1135 struct ixgbe_q_vector *q_vector = data; 1170 struct ixgbe_q_vector *q_vector = data;
@@ -1149,7 +1184,7 @@ static irqreturn_t ixgbe_msix_clean_tx(int irq, void *data)
1149#endif 1184#endif
1150 tx_ring->total_bytes = 0; 1185 tx_ring->total_bytes = 0;
1151 tx_ring->total_packets = 0; 1186 tx_ring->total_packets = 0;
1152 ixgbe_clean_tx_irq(adapter, tx_ring); 1187 ixgbe_clean_tx_irq(q_vector, tx_ring);
1153 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues, 1188 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
1154 r_idx + 1); 1189 r_idx + 1);
1155 } 1190 }
@@ -1185,13 +1220,7 @@ static irqreturn_t ixgbe_msix_clean_rx(int irq, void *data)
1185 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); 1220 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
1186 rx_ring = &(adapter->rx_ring[r_idx]); 1221 rx_ring = &(adapter->rx_ring[r_idx]);
1187 /* disable interrupts on this vector only */ 1222 /* disable interrupts on this vector only */
1188 if (adapter->hw.mac.type == ixgbe_mac_82598EB) 1223 ixgbe_irq_disable_queues(adapter, ((u64)1 << q_vector->v_idx));
1189 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, rx_ring->v_idx);
1190 else if (rx_ring->v_idx & 0xFFFFFFFF)
1191 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), rx_ring->v_idx);
1192 else
1193 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1),
1194 (rx_ring->v_idx >> 32));
1195 napi_schedule(&q_vector->napi); 1224 napi_schedule(&q_vector->napi);
1196 1225
1197 return IRQ_HANDLED; 1226 return IRQ_HANDLED;
@@ -1205,23 +1234,6 @@ static irqreturn_t ixgbe_msix_clean_many(int irq, void *data)
1205 return IRQ_HANDLED; 1234 return IRQ_HANDLED;
1206} 1235}
1207 1236
1208static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter,
1209 u64 qmask)
1210{
1211 u32 mask;
1212
1213 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
1214 mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
1215 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
1216 } else {
1217 mask = (qmask & 0xFFFFFFFF);
1218 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS_EX(0), mask);
1219 mask = (qmask >> 32);
1220 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS_EX(1), mask);
1221 }
1222 /* skip the flush */
1223}
1224
1225/** 1237/**
1226 * ixgbe_clean_rxonly - msix (aka one shot) rx clean routine 1238 * ixgbe_clean_rxonly - msix (aka one shot) rx clean routine
1227 * @napi: napi struct with our devices info in it 1239 * @napi: napi struct with our devices info in it
@@ -1254,7 +1266,8 @@ static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget)
1254 if (adapter->itr_setting & 1) 1266 if (adapter->itr_setting & 1)
1255 ixgbe_set_itr_msix(q_vector); 1267 ixgbe_set_itr_msix(q_vector);
1256 if (!test_bit(__IXGBE_DOWN, &adapter->state)) 1268 if (!test_bit(__IXGBE_DOWN, &adapter->state))
1257 ixgbe_irq_enable_queues(adapter, rx_ring->v_idx); 1269 ixgbe_irq_enable_queues(adapter,
1270 ((u64)1 << q_vector->v_idx));
1258 } 1271 }
1259 1272
1260 return work_done; 1273 return work_done;
@@ -1276,7 +1289,6 @@ static int ixgbe_clean_rxonly_many(struct napi_struct *napi, int budget)
1276 struct ixgbe_ring *rx_ring = NULL; 1289 struct ixgbe_ring *rx_ring = NULL;
1277 int work_done = 0, i; 1290 int work_done = 0, i;
1278 long r_idx; 1291 long r_idx;
1279 u64 enable_mask = 0;
1280 1292
1281 /* attempt to distribute budget to each queue fairly, but don't allow 1293 /* attempt to distribute budget to each queue fairly, but don't allow
1282 * the budget to go below 1 because we'll exit polling */ 1294 * the budget to go below 1 because we'll exit polling */
@@ -1290,7 +1302,6 @@ static int ixgbe_clean_rxonly_many(struct napi_struct *napi, int budget)
1290 ixgbe_update_rx_dca(adapter, rx_ring); 1302 ixgbe_update_rx_dca(adapter, rx_ring);
1291#endif 1303#endif
1292 ixgbe_clean_rx_irq(q_vector, rx_ring, &work_done, budget); 1304 ixgbe_clean_rx_irq(q_vector, rx_ring, &work_done, budget);
1293 enable_mask |= rx_ring->v_idx;
1294 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues, 1305 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
1295 r_idx + 1); 1306 r_idx + 1);
1296 } 1307 }
@@ -1303,7 +1314,8 @@ static int ixgbe_clean_rxonly_many(struct napi_struct *napi, int budget)
1303 if (adapter->itr_setting & 1) 1314 if (adapter->itr_setting & 1)
1304 ixgbe_set_itr_msix(q_vector); 1315 ixgbe_set_itr_msix(q_vector);
1305 if (!test_bit(__IXGBE_DOWN, &adapter->state)) 1316 if (!test_bit(__IXGBE_DOWN, &adapter->state))
1306 ixgbe_irq_enable_queues(adapter, enable_mask); 1317 ixgbe_irq_enable_queues(adapter,
1318 ((u64)1 << q_vector->v_idx));
1307 return 0; 1319 return 0;
1308 } 1320 }
1309 1321
@@ -1316,7 +1328,6 @@ static inline void map_vector_to_rxq(struct ixgbe_adapter *a, int v_idx,
1316 1328
1317 set_bit(r_idx, q_vector->rxr_idx); 1329 set_bit(r_idx, q_vector->rxr_idx);
1318 q_vector->rxr_count++; 1330 q_vector->rxr_count++;
1319 a->rx_ring[r_idx].v_idx = (u64)1 << v_idx;
1320} 1331}
1321 1332
1322static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx, 1333static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx,
@@ -1326,7 +1337,6 @@ static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx,
1326 1337
1327 set_bit(t_idx, q_vector->txr_idx); 1338 set_bit(t_idx, q_vector->txr_idx);
1328 q_vector->txr_count++; 1339 q_vector->txr_count++;
1329 a->tx_ring[t_idx].v_idx = (u64)1 << v_idx;
1330} 1340}
1331 1341
1332/** 1342/**
@@ -1505,14 +1515,13 @@ static void ixgbe_set_itr(struct ixgbe_adapter *adapter)
1505 } 1515 }
1506 1516
1507 if (new_itr != q_vector->eitr) { 1517 if (new_itr != q_vector->eitr) {
1508 u32 itr_reg; 1518 /* do an exponential smoothing */
1519 new_itr = ((q_vector->eitr * 90)/100) + ((new_itr * 10)/100);
1509 1520
1510 /* save the algorithm value here, not the smoothed one */ 1521 /* save the algorithm value here, not the smoothed one */
1511 q_vector->eitr = new_itr; 1522 q_vector->eitr = new_itr;
1512 /* do an exponential smoothing */ 1523
1513 new_itr = ((q_vector->eitr * 90)/100) + ((new_itr * 10)/100); 1524 ixgbe_write_eitr(q_vector);
1514 itr_reg = EITR_INTS_PER_SEC_TO_REG(new_itr);
1515 ixgbe_write_eitr(adapter, 0, itr_reg);
1516 } 1525 }
1517 1526
1518 return; 1527 return;
@@ -2805,7 +2814,7 @@ static int ixgbe_poll(struct napi_struct *napi, int budget)
2805 } 2814 }
2806#endif 2815#endif
2807 2816
2808 tx_clean_complete = ixgbe_clean_tx_irq(adapter, adapter->tx_ring); 2817 tx_clean_complete = ixgbe_clean_tx_irq(q_vector, adapter->tx_ring);
2809 ixgbe_clean_rx_irq(q_vector, adapter->rx_ring, &work_done, budget); 2818 ixgbe_clean_rx_irq(q_vector, adapter->rx_ring, &work_done, budget);
2810 2819
2811 if (!tx_clean_complete) 2820 if (!tx_clean_complete)
@@ -3324,8 +3333,8 @@ static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter)
3324 if (!q_vector) 3333 if (!q_vector)
3325 goto err_out; 3334 goto err_out;
3326 q_vector->adapter = adapter; 3335 q_vector->adapter = adapter;
3327 q_vector->v_idx = q_idx;
3328 q_vector->eitr = adapter->eitr_param; 3336 q_vector->eitr = adapter->eitr_param;
3337 q_vector->v_idx = q_idx;
3329 if (q_idx < napi_vectors) 3338 if (q_idx < napi_vectors)
3330 netif_napi_add(adapter->netdev, &q_vector->napi, 3339 netif_napi_add(adapter->netdev, &q_vector->napi,
3331 (*poll), 64); 3340 (*poll), 64);
@@ -4216,57 +4225,43 @@ static void ixgbe_watchdog(unsigned long data)
4216{ 4225{
4217 struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; 4226 struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data;
4218 struct ixgbe_hw *hw = &adapter->hw; 4227 struct ixgbe_hw *hw = &adapter->hw;
4228 u64 eics = 0;
4229 int i;
4219 4230
4220 /* Do the watchdog outside of interrupt context due to the lovely 4231 /*
4221 * delays that some of the newer hardware requires */ 4232 * Do the watchdog outside of interrupt context due to the lovely
4222 if (!test_bit(__IXGBE_DOWN, &adapter->state)) { 4233 * delays that some of the newer hardware requires
4223 u64 eics = 0; 4234 */
4224 int i;
4225 4235
4226 for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) 4236 if (test_bit(__IXGBE_DOWN, &adapter->state))
4227 eics |= ((u64)1 << i); 4237 goto watchdog_short_circuit;
4228 4238
4229 /* Cause software interrupt to ensure rx rings are cleaned */ 4239 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
4230 switch (hw->mac.type) { 4240 /*
4231 case ixgbe_mac_82598EB: 4241 * for legacy and MSI interrupts don't set any bits
4232 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { 4242 * that are enabled for EIAM, because this operation
4233 IXGBE_WRITE_REG(hw, IXGBE_EICS, (u32)eics); 4243 * would set *both* EIMS and EICS for any bit in EIAM
4234 } else { 4244 */
4235 /* 4245 IXGBE_WRITE_REG(hw, IXGBE_EICS,
4236 * for legacy and MSI interrupts don't set any 4246 (IXGBE_EICS_TCP_TIMER | IXGBE_EICS_OTHER));
4237 * bits that are enabled for EIAM, because this 4247 goto watchdog_reschedule;
4238 * operation would set *both* EIMS and EICS for 4248 }
4239 * any bit in EIAM 4249
4240 */ 4250 /* get one bit for every active tx/rx interrupt vector */
4241 IXGBE_WRITE_REG(hw, IXGBE_EICS, 4251 for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) {
4242 (IXGBE_EICS_TCP_TIMER | IXGBE_EICS_OTHER)); 4252 struct ixgbe_q_vector *qv = adapter->q_vector[i];
4243 } 4253 if (qv->rxr_count || qv->txr_count)
4244 break; 4254 eics |= ((u64)1 << i);
4245 case ixgbe_mac_82599EB:
4246 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
4247 IXGBE_WRITE_REG(hw, IXGBE_EICS_EX(0),
4248 (u32)(eics & 0xFFFFFFFF));
4249 IXGBE_WRITE_REG(hw, IXGBE_EICS_EX(1),
4250 (u32)(eics >> 32));
4251 } else {
4252 /*
4253 * for legacy and MSI interrupts don't set any
4254 * bits that are enabled for EIAM, because this
4255 * operation would set *both* EIMS and EICS for
4256 * any bit in EIAM
4257 */
4258 IXGBE_WRITE_REG(hw, IXGBE_EICS,
4259 (IXGBE_EICS_TCP_TIMER | IXGBE_EICS_OTHER));
4260 }
4261 break;
4262 default:
4263 break;
4264 }
4265 /* Reset the timer */
4266 mod_timer(&adapter->watchdog_timer,
4267 round_jiffies(jiffies + 2 * HZ));
4268 } 4255 }
4269 4256
4257 /* Cause software interrupt to ensure rx rings are cleaned */
4258 ixgbe_irq_rearm_queues(adapter, eics);
4259
4260watchdog_reschedule:
4261 /* Reset the timer */
4262 mod_timer(&adapter->watchdog_timer, round_jiffies(jiffies + 2 * HZ));
4263
4264watchdog_short_circuit:
4270 schedule_work(&adapter->watchdog_task); 4265 schedule_work(&adapter->watchdog_task);
4271} 4266}
4272 4267