diff options
Diffstat (limited to 'drivers/net/e1000')
-rw-r--r-- | drivers/net/e1000/e1000_main.c | 52 |
1 files changed, 42 insertions, 10 deletions
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c index a1f5b0605363..e8a760802070 100644 --- a/drivers/net/e1000/e1000_main.c +++ b/drivers/net/e1000/e1000_main.c | |||
@@ -2899,6 +2899,35 @@ e1000_transfer_dhcp_info(struct e1000_adapter *adapter, struct sk_buff *skb) | |||
2899 | return 0; | 2899 | return 0; |
2900 | } | 2900 | } |
2901 | 2901 | ||
2902 | static int __e1000_maybe_stop_tx(struct net_device *netdev, int size) | ||
2903 | { | ||
2904 | struct e1000_adapter *adapter = netdev_priv(netdev); | ||
2905 | struct e1000_tx_ring *tx_ring = adapter->tx_ring; | ||
2906 | |||
2907 | netif_stop_queue(netdev); | ||
2908 | /* Herbert's original patch had: | ||
2909 | * smp_mb__after_netif_stop_queue(); | ||
2910 | * but since that doesn't exist yet, just open code it. */ | ||
2911 | smp_mb(); | ||
2912 | |||
2913 | /* We need to check again in a case another CPU has just | ||
2914 | * made room available. */ | ||
2915 | if (likely(E1000_DESC_UNUSED(tx_ring) < size)) | ||
2916 | return -EBUSY; | ||
2917 | |||
2918 | /* A reprieve! */ | ||
2919 | netif_start_queue(netdev); | ||
2920 | return 0; | ||
2921 | } | ||
2922 | |||
2923 | static int e1000_maybe_stop_tx(struct net_device *netdev, | ||
2924 | struct e1000_tx_ring *tx_ring, int size) | ||
2925 | { | ||
2926 | if (likely(E1000_DESC_UNUSED(tx_ring) >= size)) | ||
2927 | return 0; | ||
2928 | return __e1000_maybe_stop_tx(netdev, size); | ||
2929 | } | ||
2930 | |||
2902 | #define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1 ) | 2931 | #define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1 ) |
2903 | static int | 2932 | static int |
2904 | e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | 2933 | e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev) |
@@ -2917,6 +2946,10 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
2917 | unsigned int f; | 2946 | unsigned int f; |
2918 | len -= skb->data_len; | 2947 | len -= skb->data_len; |
2919 | 2948 | ||
2949 | /* This goes back to the question of how to logically map a tx queue | ||
2950 | * to a flow. Right now, performance is impacted slightly negatively | ||
2951 | * if using multiple tx queues. If the stack breaks away from a | ||
2952 | * single qdisc implementation, we can look at this again. */ | ||
2920 | tx_ring = adapter->tx_ring; | 2953 | tx_ring = adapter->tx_ring; |
2921 | 2954 | ||
2922 | if (unlikely(skb->len <= 0)) { | 2955 | if (unlikely(skb->len <= 0)) { |
@@ -3012,8 +3045,7 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
3012 | 3045 | ||
3013 | /* need: count + 2 desc gap to keep tail from touching | 3046 | /* need: count + 2 desc gap to keep tail from touching |
3014 | * head, otherwise try next time */ | 3047 | * head, otherwise try next time */ |
3015 | if (unlikely(E1000_DESC_UNUSED(tx_ring) < count + 2)) { | 3048 | if (unlikely(e1000_maybe_stop_tx(netdev, tx_ring, count + 2))) { |
3016 | netif_stop_queue(netdev); | ||
3017 | spin_unlock_irqrestore(&tx_ring->tx_lock, flags); | 3049 | spin_unlock_irqrestore(&tx_ring->tx_lock, flags); |
3018 | return NETDEV_TX_BUSY; | 3050 | return NETDEV_TX_BUSY; |
3019 | } | 3051 | } |
@@ -3060,8 +3092,7 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
3060 | netdev->trans_start = jiffies; | 3092 | netdev->trans_start = jiffies; |
3061 | 3093 | ||
3062 | /* Make sure there is space in the ring for the next send. */ | 3094 | /* Make sure there is space in the ring for the next send. */ |
3063 | if (unlikely(E1000_DESC_UNUSED(tx_ring) < MAX_SKB_FRAGS + 2)) | 3095 | e1000_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 2); |
3064 | netif_stop_queue(netdev); | ||
3065 | 3096 | ||
3066 | spin_unlock_irqrestore(&tx_ring->tx_lock, flags); | 3097 | spin_unlock_irqrestore(&tx_ring->tx_lock, flags); |
3067 | return NETDEV_TX_OK; | 3098 | return NETDEV_TX_OK; |
@@ -3556,13 +3587,14 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter, | |||
3556 | tx_ring->next_to_clean = i; | 3587 | tx_ring->next_to_clean = i; |
3557 | 3588 | ||
3558 | #define TX_WAKE_THRESHOLD 32 | 3589 | #define TX_WAKE_THRESHOLD 32 |
3559 | if (unlikely(cleaned && netif_queue_stopped(netdev) && | 3590 | if (unlikely(cleaned && netif_carrier_ok(netdev) && |
3560 | netif_carrier_ok(netdev))) { | 3591 | E1000_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD)) { |
3561 | spin_lock(&tx_ring->tx_lock); | 3592 | /* Make sure that anybody stopping the queue after this |
3562 | if (netif_queue_stopped(netdev) && | 3593 | * sees the new next_to_clean. |
3563 | (E1000_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD)) | 3594 | */ |
3595 | smp_mb(); | ||
3596 | if (netif_queue_stopped(netdev)) | ||
3564 | netif_wake_queue(netdev); | 3597 | netif_wake_queue(netdev); |
3565 | spin_unlock(&tx_ring->tx_lock); | ||
3566 | } | 3598 | } |
3567 | 3599 | ||
3568 | if (adapter->detect_tx_hung) { | 3600 | if (adapter->detect_tx_hung) { |