aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/net/ixgb/ixgb.h1
-rw-r--r--drivers/net/ixgb/ixgb_ethtool.c1
-rw-r--r--drivers/net/ixgb/ixgb_main.c37
3 files changed, 36 insertions, 3 deletions
diff --git a/drivers/net/ixgb/ixgb.h b/drivers/net/ixgb/ixgb.h
index 50ffe90488ff..f4aba4355b19 100644
--- a/drivers/net/ixgb/ixgb.h
+++ b/drivers/net/ixgb/ixgb.h
@@ -171,6 +171,7 @@ struct ixgb_adapter {
171 171
172 /* TX */ 172 /* TX */
173 struct ixgb_desc_ring tx_ring ____cacheline_aligned_in_smp; 173 struct ixgb_desc_ring tx_ring ____cacheline_aligned_in_smp;
174 unsigned int restart_queue;
174 unsigned long timeo_start; 175 unsigned long timeo_start;
175 uint32_t tx_cmd_type; 176 uint32_t tx_cmd_type;
176 uint64_t hw_csum_tx_good; 177 uint64_t hw_csum_tx_good;
diff --git a/drivers/net/ixgb/ixgb_ethtool.c b/drivers/net/ixgb/ixgb_ethtool.c
index cd22523fb035..82c044d6e08a 100644
--- a/drivers/net/ixgb/ixgb_ethtool.c
+++ b/drivers/net/ixgb/ixgb_ethtool.c
@@ -79,6 +79,7 @@ static struct ixgb_stats ixgb_gstrings_stats[] = {
79 {"tx_window_errors", IXGB_STAT(net_stats.tx_window_errors)}, 79 {"tx_window_errors", IXGB_STAT(net_stats.tx_window_errors)},
80 {"tx_deferred_ok", IXGB_STAT(stats.dc)}, 80 {"tx_deferred_ok", IXGB_STAT(stats.dc)},
81 {"tx_timeout_count", IXGB_STAT(tx_timeout_count) }, 81 {"tx_timeout_count", IXGB_STAT(tx_timeout_count) },
82 {"tx_restart_queue", IXGB_STAT(restart_queue) },
82 {"rx_long_length_errors", IXGB_STAT(stats.roc)}, 83 {"rx_long_length_errors", IXGB_STAT(stats.roc)},
83 {"rx_short_length_errors", IXGB_STAT(stats.ruc)}, 84 {"rx_short_length_errors", IXGB_STAT(stats.ruc)},
84#ifdef NETIF_F_TSO 85#ifdef NETIF_F_TSO
diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c
index 70ac9d4a83bb..16317b58f741 100644
--- a/drivers/net/ixgb/ixgb_main.c
+++ b/drivers/net/ixgb/ixgb_main.c
@@ -1411,6 +1411,37 @@ ixgb_tx_queue(struct ixgb_adapter *adapter, int count, int vlan_id,int tx_flags)
1411 IXGB_WRITE_REG(&adapter->hw, TDT, i); 1411 IXGB_WRITE_REG(&adapter->hw, TDT, i);
1412} 1412}
1413 1413
1414static int __ixgb_maybe_stop_tx(struct net_device *netdev, int size)
1415{
1416 struct ixgb_adapter *adapter = netdev_priv(netdev);
1417 struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
1418
1419 netif_stop_queue(netdev);
1420 /* Herbert's original patch had:
1421 * smp_mb__after_netif_stop_queue();
1422 * but since that doesn't exist yet, just open code it. */
1423 smp_mb();
1424
1425 /* We need to check again in a case another CPU has just
1426 * made room available. */
1427 if (likely(IXGB_DESC_UNUSED(tx_ring) < size))
1428 return -EBUSY;
1429
1430 /* A reprieve! */
1431 netif_start_queue(netdev);
1432 ++adapter->restart_queue;
1433 return 0;
1434}
1435
1436static int ixgb_maybe_stop_tx(struct net_device *netdev,
1437 struct ixgb_desc_ring *tx_ring, int size)
1438{
1439 if (likely(IXGB_DESC_UNUSED(tx_ring) >= size))
1440 return 0;
1441 return __ixgb_maybe_stop_tx(netdev, size);
1442}
1443
1444
1414/* Tx Descriptors needed, worst case */ 1445/* Tx Descriptors needed, worst case */
1415#define TXD_USE_COUNT(S) (((S) >> IXGB_MAX_TXD_PWR) + \ 1446#define TXD_USE_COUNT(S) (((S) >> IXGB_MAX_TXD_PWR) + \
1416 (((S) & (IXGB_MAX_DATA_PER_TXD - 1)) ? 1 : 0)) 1447 (((S) & (IXGB_MAX_DATA_PER_TXD - 1)) ? 1 : 0))
@@ -1444,7 +1475,8 @@ ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1444 spin_lock_irqsave(&adapter->tx_lock, flags); 1475 spin_lock_irqsave(&adapter->tx_lock, flags);
1445#endif 1476#endif
1446 1477
1447 if(unlikely(IXGB_DESC_UNUSED(&adapter->tx_ring) < DESC_NEEDED)) { 1478 if (unlikely(ixgb_maybe_stop_tx(netdev, &adapter->tx_ring,
1479 DESC_NEEDED))) {
1448 netif_stop_queue(netdev); 1480 netif_stop_queue(netdev);
1449 spin_unlock_irqrestore(&adapter->tx_lock, flags); 1481 spin_unlock_irqrestore(&adapter->tx_lock, flags);
1450 return NETDEV_TX_BUSY; 1482 return NETDEV_TX_BUSY;
@@ -1482,8 +1514,7 @@ ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1482 1514
1483#ifdef NETIF_F_LLTX 1515#ifdef NETIF_F_LLTX
1484 /* Make sure there is space in the ring for the next send. */ 1516 /* Make sure there is space in the ring for the next send. */
1485 if(unlikely(IXGB_DESC_UNUSED(&adapter->tx_ring) < DESC_NEEDED)) 1517 ixgb_maybe_stop_tx(netdev, &adapter->tx_ring, DESC_NEEDED);
1486 netif_stop_queue(netdev);
1487 1518
1488 spin_unlock_irqrestore(&adapter->tx_lock, flags); 1519 spin_unlock_irqrestore(&adapter->tx_lock, flags);
1489 1520