diff options
author | Emil Tantilov <emil.s.tantilov@intel.com> | 2015-01-27 22:21:24 -0500 |
---|---|---|
committer | Jeff Kirsher <jeffrey.t.kirsher@intel.com> | 2015-02-05 22:58:44 -0500 |
commit | e08400b707739f0eca1645413924743466ea70b8 (patch) | |
tree | e897d3e8e7ed2273a75c2aa942269bfa0510e701 /drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c | |
parent | d9bdb57f9c9eee20835a947f2b9ece1ed2ef8485 (diff) |
ixgbevf: Add code to check for Tx hang
This patch adds code to allow for Tx hang checking. The idea is to provide
more robust debug info in the event of a transmit unit hang. Similar to the
logic in ixgbe.
CC: Alexander Duyck <alexander.h.duyck@redhat.com>
Signed-off-by: Emil Tantilov <emil.s.tantilov@intel.com>
Tested-by: Phil Schmitt <phillip.j.schmitt@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Diffstat (limited to 'drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c')
-rw-r--r-- | drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c | 113 |
1 files changed, 95 insertions, 18 deletions
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index a4b3d66b39a0..87f9f8686b6f 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c | |||
@@ -199,14 +199,64 @@ static void ixgbevf_unmap_and_free_tx_resource(struct ixgbevf_ring *tx_ring, | |||
199 | /* tx_buffer must be completely set up in the transmit path */ | 199 | /* tx_buffer must be completely set up in the transmit path */ |
200 | } | 200 | } |
201 | 201 | ||
202 | #define IXGBE_MAX_TXD_PWR 14 | 202 | static u64 ixgbevf_get_tx_completed(struct ixgbevf_ring *ring) |
203 | #define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR) | 203 | { |
204 | return ring->stats.packets; | ||
205 | } | ||
206 | |||
207 | static u32 ixgbevf_get_tx_pending(struct ixgbevf_ring *ring) | ||
208 | { | ||
209 | struct ixgbevf_adapter *adapter = netdev_priv(ring->netdev); | ||
210 | struct ixgbe_hw *hw = &adapter->hw; | ||
211 | |||
212 | u32 head = IXGBE_READ_REG(hw, IXGBE_VFTDH(ring->reg_idx)); | ||
213 | u32 tail = IXGBE_READ_REG(hw, IXGBE_VFTDT(ring->reg_idx)); | ||
214 | |||
215 | if (head != tail) | ||
216 | return (head < tail) ? | ||
217 | tail - head : (tail + ring->count - head); | ||
218 | |||
219 | return 0; | ||
220 | } | ||
221 | |||
222 | static inline bool ixgbevf_check_tx_hang(struct ixgbevf_ring *tx_ring) | ||
223 | { | ||
224 | u32 tx_done = ixgbevf_get_tx_completed(tx_ring); | ||
225 | u32 tx_done_old = tx_ring->tx_stats.tx_done_old; | ||
226 | u32 tx_pending = ixgbevf_get_tx_pending(tx_ring); | ||
227 | |||
228 | clear_check_for_tx_hang(tx_ring); | ||
204 | 229 | ||
205 | /* Tx Descriptors needed, worst case */ | 230 | /* Check for a hung queue, but be thorough. This verifies |
206 | #define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IXGBE_MAX_DATA_PER_TXD) | 231 | * that a transmit has been completed since the previous |
207 | #define DESC_NEEDED (MAX_SKB_FRAGS + 4) | 232 | * check AND there is at least one packet pending. The |
233 | * ARMED bit is set to indicate a potential hang. | ||
234 | */ | ||
235 | if ((tx_done_old == tx_done) && tx_pending) { | ||
236 | /* make sure it is true for two checks in a row */ | ||
237 | return test_and_set_bit(__IXGBEVF_HANG_CHECK_ARMED, | ||
238 | &tx_ring->state); | ||
239 | } | ||
240 | /* reset the countdown */ | ||
241 | clear_bit(__IXGBEVF_HANG_CHECK_ARMED, &tx_ring->state); | ||
242 | |||
243 | /* update completed stats and continue */ | ||
244 | tx_ring->tx_stats.tx_done_old = tx_done; | ||
245 | |||
246 | return false; | ||
247 | } | ||
248 | |||
249 | /** | ||
250 | * ixgbevf_tx_timeout - Respond to a Tx Hang | ||
251 | * @netdev: network interface device structure | ||
252 | **/ | ||
253 | static void ixgbevf_tx_timeout(struct net_device *netdev) | ||
254 | { | ||
255 | struct ixgbevf_adapter *adapter = netdev_priv(netdev); | ||
208 | 256 | ||
209 | static void ixgbevf_tx_timeout(struct net_device *netdev); | 257 | /* Do the reset outside of interrupt context */ |
258 | schedule_work(&adapter->reset_task); | ||
259 | } | ||
210 | 260 | ||
211 | /** | 261 | /** |
212 | * ixgbevf_clean_tx_irq - Reclaim resources after transmit completes | 262 | * ixgbevf_clean_tx_irq - Reclaim resources after transmit completes |
@@ -311,6 +361,37 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector, | |||
311 | q_vector->tx.total_bytes += total_bytes; | 361 | q_vector->tx.total_bytes += total_bytes; |
312 | q_vector->tx.total_packets += total_packets; | 362 | q_vector->tx.total_packets += total_packets; |
313 | 363 | ||
364 | if (check_for_tx_hang(tx_ring) && ixgbevf_check_tx_hang(tx_ring)) { | ||
365 | struct ixgbe_hw *hw = &adapter->hw; | ||
366 | union ixgbe_adv_tx_desc *eop_desc; | ||
367 | |||
368 | eop_desc = tx_ring->tx_buffer_info[i].next_to_watch; | ||
369 | |||
370 | pr_err("Detected Tx Unit Hang\n" | ||
371 | " Tx Queue <%d>\n" | ||
372 | " TDH, TDT <%x>, <%x>\n" | ||
373 | " next_to_use <%x>\n" | ||
374 | " next_to_clean <%x>\n" | ||
375 | "tx_buffer_info[next_to_clean]\n" | ||
376 | " next_to_watch <%p>\n" | ||
377 | " eop_desc->wb.status <%x>\n" | ||
378 | " time_stamp <%lx>\n" | ||
379 | " jiffies <%lx>\n", | ||
380 | tx_ring->queue_index, | ||
381 | IXGBE_READ_REG(hw, IXGBE_VFTDH(tx_ring->reg_idx)), | ||
382 | IXGBE_READ_REG(hw, IXGBE_VFTDT(tx_ring->reg_idx)), | ||
383 | tx_ring->next_to_use, i, | ||
384 | eop_desc, (eop_desc ? eop_desc->wb.status : 0), | ||
385 | tx_ring->tx_buffer_info[i].time_stamp, jiffies); | ||
386 | |||
387 | netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); | ||
388 | |||
389 | /* schedule immediate reset if we believe we hung */ | ||
390 | schedule_work(&adapter->reset_task); | ||
391 | |||
392 | return true; | ||
393 | } | ||
394 | |||
314 | #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) | 395 | #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) |
315 | if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) && | 396 | if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) && |
316 | (ixgbevf_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) { | 397 | (ixgbevf_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) { |
@@ -1479,6 +1560,8 @@ static void ixgbevf_configure_tx_ring(struct ixgbevf_adapter *adapter, | |||
1479 | txdctl |= (1 << 8) | /* HTHRESH = 1 */ | 1560 | txdctl |= (1 << 8) | /* HTHRESH = 1 */ |
1480 | 32; /* PTHRESH = 32 */ | 1561 | 32; /* PTHRESH = 32 */ |
1481 | 1562 | ||
1563 | clear_bit(__IXGBEVF_HANG_CHECK_ARMED, &ring->state); | ||
1564 | |||
1482 | IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx), txdctl); | 1565 | IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx), txdctl); |
1483 | 1566 | ||
1484 | /* poll to verify queue is enabled */ | 1567 | /* poll to verify queue is enabled */ |
@@ -2643,6 +2726,12 @@ static void ixgbevf_watchdog(unsigned long data) | |||
2643 | if (test_bit(__IXGBEVF_DOWN, &adapter->state)) | 2726 | if (test_bit(__IXGBEVF_DOWN, &adapter->state)) |
2644 | goto watchdog_short_circuit; | 2727 | goto watchdog_short_circuit; |
2645 | 2728 | ||
2729 | /* Force detection of hung controller */ | ||
2730 | if (netif_carrier_ok(adapter->netdev)) { | ||
2731 | for (i = 0; i < adapter->num_tx_queues; i++) | ||
2732 | set_check_for_tx_hang(adapter->tx_ring[i]); | ||
2733 | } | ||
2734 | |||
2646 | /* get one bit for every active tx/rx interrupt vector */ | 2735 | /* get one bit for every active tx/rx interrupt vector */ |
2647 | for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) { | 2736 | for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) { |
2648 | struct ixgbevf_q_vector *qv = adapter->q_vector[i]; | 2737 | struct ixgbevf_q_vector *qv = adapter->q_vector[i]; |
@@ -2656,18 +2745,6 @@ watchdog_short_circuit: | |||
2656 | schedule_work(&adapter->watchdog_task); | 2745 | schedule_work(&adapter->watchdog_task); |
2657 | } | 2746 | } |
2658 | 2747 | ||
2659 | /** | ||
2660 | * ixgbevf_tx_timeout - Respond to a Tx Hang | ||
2661 | * @netdev: network interface device structure | ||
2662 | **/ | ||
2663 | static void ixgbevf_tx_timeout(struct net_device *netdev) | ||
2664 | { | ||
2665 | struct ixgbevf_adapter *adapter = netdev_priv(netdev); | ||
2666 | |||
2667 | /* Do the reset outside of interrupt context */ | ||
2668 | schedule_work(&adapter->reset_task); | ||
2669 | } | ||
2670 | |||
2671 | static void ixgbevf_reset_task(struct work_struct *work) | 2748 | static void ixgbevf_reset_task(struct work_struct *work) |
2672 | { | 2749 | { |
2673 | struct ixgbevf_adapter *adapter; | 2750 | struct ixgbevf_adapter *adapter; |