diff options
author | Jesse Brandeburg <jesse.brandeburg@intel.com> | 2007-01-06 12:51:38 -0500 |
---|---|---|
committer | Auke Kok <juke-jan.h.kok@intel.com> | 2007-01-06 12:51:38 -0500 |
commit | dfd341e4e467d146901a3accb761f04fda535433 (patch) | |
tree | 60c663f439cbfdd50eff5cdf1f76ee9436b6c6d2 /drivers/net/ixgb/ixgb_main.c | |
parent | 5d9278537502d2e404e85485d1b905814fe728c0 (diff) |
ixgb: Maybe stop TX if not enough free descriptors
A similar patch to commit 65c7973fa5b46b024f38be208aa477e8daf9a603
but now for ixgb.
Cc: Herbert Xu <herbert@gondor.apana.org.au>
Signed-off-by: Jesse Brandeburg <jesse.brandeburg@intel.com>
Signed-off-by: Auke Kok <auke-jan.h.kok@intel.com>
Diffstat (limited to 'drivers/net/ixgb/ixgb_main.c')
-rw-r--r-- | drivers/net/ixgb/ixgb_main.c | 37 |
1 files changed, 34 insertions, 3 deletions
diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c index 70ac9d4a83bb..16317b58f741 100644 --- a/drivers/net/ixgb/ixgb_main.c +++ b/drivers/net/ixgb/ixgb_main.c | |||
@@ -1411,6 +1411,37 @@ ixgb_tx_queue(struct ixgb_adapter *adapter, int count, int vlan_id,int tx_flags) | |||
1411 | IXGB_WRITE_REG(&adapter->hw, TDT, i); | 1411 | IXGB_WRITE_REG(&adapter->hw, TDT, i); |
1412 | } | 1412 | } |
1413 | 1413 | ||
1414 | static int __ixgb_maybe_stop_tx(struct net_device *netdev, int size) | ||
1415 | { | ||
1416 | struct ixgb_adapter *adapter = netdev_priv(netdev); | ||
1417 | struct ixgb_desc_ring *tx_ring = &adapter->tx_ring; | ||
1418 | |||
1419 | netif_stop_queue(netdev); | ||
1420 | /* Herbert's original patch had: | ||
1421 | * smp_mb__after_netif_stop_queue(); | ||
1422 | * but since that doesn't exist yet, just open code it. */ | ||
1423 | smp_mb(); | ||
1424 | |||
1425 | /* We need to check again in a case another CPU has just | ||
1426 | * made room available. */ | ||
1427 | if (likely(IXGB_DESC_UNUSED(tx_ring) < size)) | ||
1428 | return -EBUSY; | ||
1429 | |||
1430 | /* A reprieve! */ | ||
1431 | netif_start_queue(netdev); | ||
1432 | ++adapter->restart_queue; | ||
1433 | return 0; | ||
1434 | } | ||
1435 | |||
1436 | static int ixgb_maybe_stop_tx(struct net_device *netdev, | ||
1437 | struct ixgb_desc_ring *tx_ring, int size) | ||
1438 | { | ||
1439 | if (likely(IXGB_DESC_UNUSED(tx_ring) >= size)) | ||
1440 | return 0; | ||
1441 | return __ixgb_maybe_stop_tx(netdev, size); | ||
1442 | } | ||
1443 | |||
1444 | |||
1414 | /* Tx Descriptors needed, worst case */ | 1445 | /* Tx Descriptors needed, worst case */ |
1415 | #define TXD_USE_COUNT(S) (((S) >> IXGB_MAX_TXD_PWR) + \ | 1446 | #define TXD_USE_COUNT(S) (((S) >> IXGB_MAX_TXD_PWR) + \ |
1416 | (((S) & (IXGB_MAX_DATA_PER_TXD - 1)) ? 1 : 0)) | 1447 | (((S) & (IXGB_MAX_DATA_PER_TXD - 1)) ? 1 : 0)) |
@@ -1444,7 +1475,8 @@ ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
1444 | spin_lock_irqsave(&adapter->tx_lock, flags); | 1475 | spin_lock_irqsave(&adapter->tx_lock, flags); |
1445 | #endif | 1476 | #endif |
1446 | 1477 | ||
1447 | if(unlikely(IXGB_DESC_UNUSED(&adapter->tx_ring) < DESC_NEEDED)) { | 1478 | if (unlikely(ixgb_maybe_stop_tx(netdev, &adapter->tx_ring, |
1479 | DESC_NEEDED))) { | ||
1448 | netif_stop_queue(netdev); | 1480 | netif_stop_queue(netdev); |
1449 | spin_unlock_irqrestore(&adapter->tx_lock, flags); | 1481 | spin_unlock_irqrestore(&adapter->tx_lock, flags); |
1450 | return NETDEV_TX_BUSY; | 1482 | return NETDEV_TX_BUSY; |
@@ -1482,8 +1514,7 @@ ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
1482 | 1514 | ||
1483 | #ifdef NETIF_F_LLTX | 1515 | #ifdef NETIF_F_LLTX |
1484 | /* Make sure there is space in the ring for the next send. */ | 1516 | /* Make sure there is space in the ring for the next send. */ |
1485 | if(unlikely(IXGB_DESC_UNUSED(&adapter->tx_ring) < DESC_NEEDED)) | 1517 | ixgb_maybe_stop_tx(netdev, &adapter->tx_ring, DESC_NEEDED); |
1486 | netif_stop_queue(netdev); | ||
1487 | 1518 | ||
1488 | spin_unlock_irqrestore(&adapter->tx_lock, flags); | 1519 | spin_unlock_irqrestore(&adapter->tx_lock, flags); |
1489 | 1520 | ||