aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorMichael Chan <mchan@broadcom.com>2005-09-01 20:41:28 -0400
committerDavid S. Miller <davem@davemloft.net>2005-09-01 20:41:28 -0400
commit51b9146869ab9492da785c5c9321d85f01655ab6 (patch)
treed95f8fda63a7e4a2508f1fc440dcc5e162140334 /drivers
parent86d9f7f0c9cf06d7d3cfa2a9f0514cf21fa5fda1 (diff)
[TG3]: Minimize locking in TX path.
This is similar to Eric Dumazet's tx_lock patch for tg3 but takes it one step further to eliminate the tx_lock in the tx_completion path when the tx queue is not stopped. Signed-off-by: Michael Chan <mchan@broadcom.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/net/tg3.c24
1 files changed, 13 insertions, 11 deletions
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index af8263a1580e..e877579aab38 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -121,12 +121,9 @@
121 TG3_RX_RCB_RING_SIZE(tp)) 121 TG3_RX_RCB_RING_SIZE(tp))
122#define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \ 122#define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
123 TG3_TX_RING_SIZE) 123 TG3_TX_RING_SIZE)
124#define TX_RING_GAP(TP) \
125 (TG3_TX_RING_SIZE - (TP)->tx_pending)
126#define TX_BUFFS_AVAIL(TP) \ 124#define TX_BUFFS_AVAIL(TP) \
127 (((TP)->tx_cons <= (TP)->tx_prod) ? \ 125 ((TP)->tx_pending - \
128 (TP)->tx_cons + (TP)->tx_pending - (TP)->tx_prod : \ 126 (((TP)->tx_prod - (TP)->tx_cons) & (TG3_TX_RING_SIZE - 1)))
129 (TP)->tx_cons - (TP)->tx_prod - TX_RING_GAP(TP))
130#define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1)) 127#define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
131 128
132#define RX_PKT_BUF_SZ (1536 + tp->rx_offset + 64) 129#define RX_PKT_BUF_SZ (1536 + tp->rx_offset + 64)
@@ -2880,9 +2877,13 @@ static void tg3_tx(struct tg3 *tp)
2880 2877
2881 tp->tx_cons = sw_idx; 2878 tp->tx_cons = sw_idx;
2882 2879
2883 if (netif_queue_stopped(tp->dev) && 2880 if (unlikely(netif_queue_stopped(tp->dev))) {
2884 (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH)) 2881 spin_lock(&tp->tx_lock);
2885 netif_wake_queue(tp->dev); 2882 if (netif_queue_stopped(tp->dev) &&
2883 (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH))
2884 netif_wake_queue(tp->dev);
2885 spin_unlock(&tp->tx_lock);
2886 }
2886} 2887}
2887 2888
2888/* Returns size of skb allocated or < 0 on error. 2889/* Returns size of skb allocated or < 0 on error.
@@ -3198,9 +3199,7 @@ static int tg3_poll(struct net_device *netdev, int *budget)
3198 3199
3199 /* run TX completion thread */ 3200 /* run TX completion thread */
3200 if (sblk->idx[0].tx_consumer != tp->tx_cons) { 3201 if (sblk->idx[0].tx_consumer != tp->tx_cons) {
3201 spin_lock(&tp->tx_lock);
3202 tg3_tx(tp); 3202 tg3_tx(tp);
3203 spin_unlock(&tp->tx_lock);
3204 } 3203 }
3205 3204
3206 /* run RX thread, within the bounds set by NAPI. 3205 /* run RX thread, within the bounds set by NAPI.
@@ -3716,8 +3715,11 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
3716 tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry); 3715 tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
3717 3716
3718 tp->tx_prod = entry; 3717 tp->tx_prod = entry;
3719 if (TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1)) 3718 if (TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1)) {
3720 netif_stop_queue(dev); 3719 netif_stop_queue(dev);
3720 if (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH)
3721 netif_wake_queue(tp->dev);
3722 }
3721 3723
3722out_unlock: 3724out_unlock:
3723 mmiowb(); 3725 mmiowb();