diff options
-rw-r--r-- | drivers/net/bnx2.c | 31 | ||||
-rw-r--r-- | drivers/net/bnx2.h | 6 |
2 files changed, 20 insertions, 17 deletions
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c index 3f754e6b48d6..0300a759728c 100644 --- a/drivers/net/bnx2.c +++ b/drivers/net/bnx2.c | |||
@@ -226,7 +226,7 @@ static struct flash_spec flash_5709 = { | |||
226 | 226 | ||
227 | MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl); | 227 | MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl); |
228 | 228 | ||
229 | static inline u32 bnx2_tx_avail(struct bnx2 *bp) | 229 | static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_napi *bnapi) |
230 | { | 230 | { |
231 | u32 diff; | 231 | u32 diff; |
232 | 232 | ||
@@ -235,7 +235,7 @@ static inline u32 bnx2_tx_avail(struct bnx2 *bp) | |||
235 | /* The ring uses 256 indices for 255 entries, one of them | 235 | /* The ring uses 256 indices for 255 entries, one of them |
236 | * needs to be skipped. | 236 | * needs to be skipped. |
237 | */ | 237 | */ |
238 | diff = bp->tx_prod - bp->tx_cons; | 238 | diff = bp->tx_prod - bnapi->tx_cons; |
239 | if (unlikely(diff >= TX_DESC_CNT)) { | 239 | if (unlikely(diff >= TX_DESC_CNT)) { |
240 | diff &= 0xffff; | 240 | diff &= 0xffff; |
241 | if (diff == TX_DESC_CNT) | 241 | if (diff == TX_DESC_CNT) |
@@ -2358,7 +2358,7 @@ bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi) | |||
2358 | int tx_free_bd = 0; | 2358 | int tx_free_bd = 0; |
2359 | 2359 | ||
2360 | hw_cons = bnx2_get_hw_tx_cons(bnapi); | 2360 | hw_cons = bnx2_get_hw_tx_cons(bnapi); |
2361 | sw_cons = bp->tx_cons; | 2361 | sw_cons = bnapi->tx_cons; |
2362 | 2362 | ||
2363 | while (sw_cons != hw_cons) { | 2363 | while (sw_cons != hw_cons) { |
2364 | struct sw_bd *tx_buf; | 2364 | struct sw_bd *tx_buf; |
@@ -2412,8 +2412,8 @@ bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi) | |||
2412 | hw_cons = bnx2_get_hw_tx_cons(bnapi); | 2412 | hw_cons = bnx2_get_hw_tx_cons(bnapi); |
2413 | } | 2413 | } |
2414 | 2414 | ||
2415 | bp->hw_tx_cons = hw_cons; | 2415 | bnapi->hw_tx_cons = hw_cons; |
2416 | bp->tx_cons = sw_cons; | 2416 | bnapi->tx_cons = sw_cons; |
2417 | /* Need to make the tx_cons update visible to bnx2_start_xmit() | 2417 | /* Need to make the tx_cons update visible to bnx2_start_xmit() |
2418 | * before checking for netif_queue_stopped(). Without the | 2418 | * before checking for netif_queue_stopped(). Without the |
2419 | * memory barrier, there is a small possibility that bnx2_start_xmit() | 2419 | * memory barrier, there is a small possibility that bnx2_start_xmit() |
@@ -2422,10 +2422,10 @@ bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi) | |||
2422 | smp_mb(); | 2422 | smp_mb(); |
2423 | 2423 | ||
2424 | if (unlikely(netif_queue_stopped(bp->dev)) && | 2424 | if (unlikely(netif_queue_stopped(bp->dev)) && |
2425 | (bnx2_tx_avail(bp) > bp->tx_wake_thresh)) { | 2425 | (bnx2_tx_avail(bp, bnapi) > bp->tx_wake_thresh)) { |
2426 | netif_tx_lock(bp->dev); | 2426 | netif_tx_lock(bp->dev); |
2427 | if ((netif_queue_stopped(bp->dev)) && | 2427 | if ((netif_queue_stopped(bp->dev)) && |
2428 | (bnx2_tx_avail(bp) > bp->tx_wake_thresh)) | 2428 | (bnx2_tx_avail(bp, bnapi) > bp->tx_wake_thresh)) |
2429 | netif_wake_queue(bp->dev); | 2429 | netif_wake_queue(bp->dev); |
2430 | netif_tx_unlock(bp->dev); | 2430 | netif_tx_unlock(bp->dev); |
2431 | } | 2431 | } |
@@ -2846,7 +2846,7 @@ bnx2_has_work(struct bnx2_napi *bnapi) | |||
2846 | struct status_block *sblk = bp->status_blk; | 2846 | struct status_block *sblk = bp->status_blk; |
2847 | 2847 | ||
2848 | if ((bnx2_get_hw_rx_cons(bnapi) != bp->rx_cons) || | 2848 | if ((bnx2_get_hw_rx_cons(bnapi) != bp->rx_cons) || |
2849 | (bnx2_get_hw_tx_cons(bnapi) != bp->hw_tx_cons)) | 2849 | (bnx2_get_hw_tx_cons(bnapi) != bnapi->hw_tx_cons)) |
2850 | return 1; | 2850 | return 1; |
2851 | 2851 | ||
2852 | if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) != | 2852 | if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) != |
@@ -2876,7 +2876,7 @@ static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi, | |||
2876 | REG_RD(bp, BNX2_HC_COMMAND); | 2876 | REG_RD(bp, BNX2_HC_COMMAND); |
2877 | } | 2877 | } |
2878 | 2878 | ||
2879 | if (bnx2_get_hw_tx_cons(bnapi) != bp->hw_tx_cons) | 2879 | if (bnx2_get_hw_tx_cons(bnapi) != bnapi->hw_tx_cons) |
2880 | bnx2_tx_int(bp, bnapi); | 2880 | bnx2_tx_int(bp, bnapi); |
2881 | 2881 | ||
2882 | if (bnx2_get_hw_rx_cons(bnapi) != bp->rx_cons) | 2882 | if (bnx2_get_hw_rx_cons(bnapi) != bp->rx_cons) |
@@ -4381,6 +4381,7 @@ bnx2_init_tx_ring(struct bnx2 *bp) | |||
4381 | { | 4381 | { |
4382 | struct tx_bd *txbd; | 4382 | struct tx_bd *txbd; |
4383 | u32 cid; | 4383 | u32 cid; |
4384 | struct bnx2_napi *bnapi = &bp->bnx2_napi; | ||
4384 | 4385 | ||
4385 | bp->tx_wake_thresh = bp->tx_ring_size / 2; | 4386 | bp->tx_wake_thresh = bp->tx_ring_size / 2; |
4386 | 4387 | ||
@@ -4390,8 +4391,8 @@ bnx2_init_tx_ring(struct bnx2 *bp) | |||
4390 | txbd->tx_bd_haddr_lo = (u64) bp->tx_desc_mapping & 0xffffffff; | 4391 | txbd->tx_bd_haddr_lo = (u64) bp->tx_desc_mapping & 0xffffffff; |
4391 | 4392 | ||
4392 | bp->tx_prod = 0; | 4393 | bp->tx_prod = 0; |
4393 | bp->tx_cons = 0; | 4394 | bnapi->tx_cons = 0; |
4394 | bp->hw_tx_cons = 0; | 4395 | bnapi->hw_tx_cons = 0; |
4395 | bp->tx_prod_bseq = 0; | 4396 | bp->tx_prod_bseq = 0; |
4396 | 4397 | ||
4397 | cid = TX_CID; | 4398 | cid = TX_CID; |
@@ -5440,8 +5441,10 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
5440 | u32 len, vlan_tag_flags, last_frag, mss; | 5441 | u32 len, vlan_tag_flags, last_frag, mss; |
5441 | u16 prod, ring_prod; | 5442 | u16 prod, ring_prod; |
5442 | int i; | 5443 | int i; |
5444 | struct bnx2_napi *bnapi = &bp->bnx2_napi; | ||
5443 | 5445 | ||
5444 | if (unlikely(bnx2_tx_avail(bp) < (skb_shinfo(skb)->nr_frags + 1))) { | 5446 | if (unlikely(bnx2_tx_avail(bp, bnapi) < |
5447 | (skb_shinfo(skb)->nr_frags + 1))) { | ||
5445 | netif_stop_queue(dev); | 5448 | netif_stop_queue(dev); |
5446 | printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n", | 5449 | printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n", |
5447 | dev->name); | 5450 | dev->name); |
@@ -5556,9 +5559,9 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
5556 | bp->tx_prod = prod; | 5559 | bp->tx_prod = prod; |
5557 | dev->trans_start = jiffies; | 5560 | dev->trans_start = jiffies; |
5558 | 5561 | ||
5559 | if (unlikely(bnx2_tx_avail(bp) <= MAX_SKB_FRAGS)) { | 5562 | if (unlikely(bnx2_tx_avail(bp, bnapi) <= MAX_SKB_FRAGS)) { |
5560 | netif_stop_queue(dev); | 5563 | netif_stop_queue(dev); |
5561 | if (bnx2_tx_avail(bp) > bp->tx_wake_thresh) | 5564 | if (bnx2_tx_avail(bp, bnapi) > bp->tx_wake_thresh) |
5562 | netif_wake_queue(dev); | 5565 | netif_wake_queue(dev); |
5563 | } | 5566 | } |
5564 | 5567 | ||
diff --git a/drivers/net/bnx2.h b/drivers/net/bnx2.h index 345b6db9a947..958fdda09c6b 100644 --- a/drivers/net/bnx2.h +++ b/drivers/net/bnx2.h | |||
@@ -6509,6 +6509,9 @@ struct bnx2_napi { | |||
6509 | struct status_block *status_blk; | 6509 | struct status_block *status_blk; |
6510 | u32 last_status_idx; | 6510 | u32 last_status_idx; |
6511 | u32 int_num; | 6511 | u32 int_num; |
6512 | |||
6513 | u16 tx_cons; | ||
6514 | u16 hw_tx_cons; | ||
6512 | }; | 6515 | }; |
6513 | 6516 | ||
6514 | struct bnx2 { | 6517 | struct bnx2 { |
@@ -6539,9 +6542,6 @@ struct bnx2 { | |||
6539 | u32 tx_bidx_addr; | 6542 | u32 tx_bidx_addr; |
6540 | u32 tx_bseq_addr; | 6543 | u32 tx_bseq_addr; |
6541 | 6544 | ||
6542 | u16 tx_cons __attribute__((aligned(L1_CACHE_BYTES))); | ||
6543 | u16 hw_tx_cons; | ||
6544 | |||
6545 | struct bnx2_napi bnx2_napi; | 6545 | struct bnx2_napi bnx2_napi; |
6546 | 6546 | ||
6547 | #ifdef BCM_VLAN | 6547 | #ifdef BCM_VLAN |