diff options
author | Ranjit Manomohan <ranjitm@google.com> | 2006-10-18 23:54:26 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2006-10-18 23:54:26 -0400 |
commit | 42952231c6a8623117ee3cc89c82d382dc69ca30 (patch) | |
tree | 8c4db924ae0e1cebfcbedd7da75008675a36c523 /drivers/net | |
parent | 5175c3786c244f8b689854db24c9e79b1c6a084f (diff) |
[TG3]: Fix set ring params tx ring size implementation
Fixes the implementation of the ethtool set ring parameters for the
tg3 transmit ring. The size of tx_pending is taken into account
before doing a netif_wake_queue. This prevents the interface from
locking up when smaller transmit ring sizes are used.
Signed-off-by: Ranjit Manomohan <ranjitm@google.com>
Acked-by: Michael Chan <mchan@broadcom.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net')
-rw-r--r-- | drivers/net/tg3.c | 10 |
1 files changed, 5 insertions, 5 deletions
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c index 327836b1014e..39e483308a44 100644 --- a/drivers/net/tg3.c +++ b/drivers/net/tg3.c | |||
@@ -129,7 +129,7 @@ | |||
129 | #define RX_JUMBO_PKT_BUF_SZ (9046 + tp->rx_offset + 64) | 129 | #define RX_JUMBO_PKT_BUF_SZ (9046 + tp->rx_offset + 64) |
130 | 130 | ||
131 | /* minimum number of free TX descriptors required to wake up TX process */ | 131 | /* minimum number of free TX descriptors required to wake up TX process */ |
132 | #define TG3_TX_WAKEUP_THRESH (TG3_TX_RING_SIZE / 4) | 132 | #define TG3_TX_WAKEUP_THRESH(tp) ((tp)->tx_pending / 4) |
133 | 133 | ||
134 | /* number of ETHTOOL_GSTATS u64's */ | 134 | /* number of ETHTOOL_GSTATS u64's */ |
135 | #define TG3_NUM_STATS (sizeof(struct tg3_ethtool_stats)/sizeof(u64)) | 135 | #define TG3_NUM_STATS (sizeof(struct tg3_ethtool_stats)/sizeof(u64)) |
@@ -3075,10 +3075,10 @@ static void tg3_tx(struct tg3 *tp) | |||
3075 | smp_mb(); | 3075 | smp_mb(); |
3076 | 3076 | ||
3077 | if (unlikely(netif_queue_stopped(tp->dev) && | 3077 | if (unlikely(netif_queue_stopped(tp->dev) && |
3078 | (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH))) { | 3078 | (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))) { |
3079 | netif_tx_lock(tp->dev); | 3079 | netif_tx_lock(tp->dev); |
3080 | if (netif_queue_stopped(tp->dev) && | 3080 | if (netif_queue_stopped(tp->dev) && |
3081 | (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH)) | 3081 | (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))) |
3082 | netif_wake_queue(tp->dev); | 3082 | netif_wake_queue(tp->dev); |
3083 | netif_tx_unlock(tp->dev); | 3083 | netif_tx_unlock(tp->dev); |
3084 | } | 3084 | } |
@@ -3928,7 +3928,7 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
3928 | tp->tx_prod = entry; | 3928 | tp->tx_prod = entry; |
3929 | if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) { | 3929 | if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) { |
3930 | netif_stop_queue(dev); | 3930 | netif_stop_queue(dev); |
3931 | if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH) | 3931 | if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)) |
3932 | netif_wake_queue(tp->dev); | 3932 | netif_wake_queue(tp->dev); |
3933 | } | 3933 | } |
3934 | 3934 | ||
@@ -4143,7 +4143,7 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev) | |||
4143 | tp->tx_prod = entry; | 4143 | tp->tx_prod = entry; |
4144 | if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) { | 4144 | if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) { |
4145 | netif_stop_queue(dev); | 4145 | netif_stop_queue(dev); |
4146 | if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH) | 4146 | if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)) |
4147 | netif_wake_queue(tp->dev); | 4147 | netif_wake_queue(tp->dev); |
4148 | } | 4148 | } |
4149 | 4149 | ||