aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/net/bnx2.c35
-rw-r--r--drivers/net/bnx2.h3
2 files changed, 18 insertions, 20 deletions
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index 015ff7906601..da903b3ebfb0 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -107,6 +107,15 @@ static struct flash_spec flash_table[] =
107 107
108MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl); 108MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
109 109
110static inline u32 bnx2_tx_avail(struct bnx2 *bp)
111{
112 u32 diff = TX_RING_IDX(bp->tx_prod) - TX_RING_IDX(bp->tx_cons);
113
114 if (diff > MAX_TX_DESC_CNT)
115 diff = (diff & MAX_TX_DESC_CNT) - 1;
116 return (bp->tx_ring_size - diff);
117}
118
110static u32 119static u32
111bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset) 120bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
112{ 121{
@@ -1338,22 +1347,19 @@ bnx2_tx_int(struct bnx2 *bp)
1338 } 1347 }
1339 } 1348 }
1340 1349
1341 atomic_add(tx_free_bd, &bp->tx_avail_bd); 1350 bp->tx_cons = sw_cons;
1342 1351
1343 if (unlikely(netif_queue_stopped(bp->dev))) { 1352 if (unlikely(netif_queue_stopped(bp->dev))) {
1344 unsigned long flags; 1353 unsigned long flags;
1345 1354
1346 spin_lock_irqsave(&bp->tx_lock, flags); 1355 spin_lock_irqsave(&bp->tx_lock, flags);
1347 if ((netif_queue_stopped(bp->dev)) && 1356 if ((netif_queue_stopped(bp->dev)) &&
1348 (atomic_read(&bp->tx_avail_bd) > MAX_SKB_FRAGS)) { 1357 (bnx2_tx_avail(bp) > MAX_SKB_FRAGS)) {
1349 1358
1350 netif_wake_queue(bp->dev); 1359 netif_wake_queue(bp->dev);
1351 } 1360 }
1352 spin_unlock_irqrestore(&bp->tx_lock, flags); 1361 spin_unlock_irqrestore(&bp->tx_lock, flags);
1353 } 1362 }
1354
1355 bp->tx_cons = sw_cons;
1356
1357} 1363}
1358 1364
1359static inline void 1365static inline void
@@ -2971,7 +2977,6 @@ bnx2_init_tx_ring(struct bnx2 *bp)
2971 bp->tx_prod = 0; 2977 bp->tx_prod = 0;
2972 bp->tx_cons = 0; 2978 bp->tx_cons = 0;
2973 bp->tx_prod_bseq = 0; 2979 bp->tx_prod_bseq = 0;
2974 atomic_set(&bp->tx_avail_bd, bp->tx_ring_size);
2975 2980
2976 val = BNX2_L2CTX_TYPE_TYPE_L2; 2981 val = BNX2_L2CTX_TYPE_TYPE_L2;
2977 val |= BNX2_L2CTX_TYPE_SIZE_L2; 2982 val |= BNX2_L2CTX_TYPE_SIZE_L2;
@@ -4057,9 +4062,7 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
4057 u16 prod, ring_prod; 4062 u16 prod, ring_prod;
4058 int i; 4063 int i;
4059 4064
4060 if (unlikely(atomic_read(&bp->tx_avail_bd) < 4065 if (unlikely(bnx2_tx_avail(bp) < (skb_shinfo(skb)->nr_frags + 1))) {
4061 (skb_shinfo(skb)->nr_frags + 1))) {
4062
4063 netif_stop_queue(dev); 4066 netif_stop_queue(dev);
4064 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n", 4067 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
4065 dev->name); 4068 dev->name);
@@ -4156,8 +4159,6 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
4156 prod = NEXT_TX_BD(prod); 4159 prod = NEXT_TX_BD(prod);
4157 bp->tx_prod_bseq += skb->len; 4160 bp->tx_prod_bseq += skb->len;
4158 4161
4159 atomic_sub(last_frag + 1, &bp->tx_avail_bd);
4160
4161 REG_WR16(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BIDX, prod); 4162 REG_WR16(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BIDX, prod);
4162 REG_WR(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BSEQ, bp->tx_prod_bseq); 4163 REG_WR(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BSEQ, bp->tx_prod_bseq);
4163 4164
@@ -4166,16 +4167,14 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
4166 bp->tx_prod = prod; 4167 bp->tx_prod = prod;
4167 dev->trans_start = jiffies; 4168 dev->trans_start = jiffies;
4168 4169
4169 if (unlikely(atomic_read(&bp->tx_avail_bd) <= MAX_SKB_FRAGS)) { 4170 if (unlikely(bnx2_tx_avail(bp) <= MAX_SKB_FRAGS)) {
4170 unsigned long flags; 4171 unsigned long flags;
4171 4172
4172 spin_lock_irqsave(&bp->tx_lock, flags); 4173 spin_lock_irqsave(&bp->tx_lock, flags);
4173 if (atomic_read(&bp->tx_avail_bd) <= MAX_SKB_FRAGS) { 4174 netif_stop_queue(dev);
4174 netif_stop_queue(dev); 4175
4175 4176 if (bnx2_tx_avail(bp) > MAX_SKB_FRAGS)
4176 if (atomic_read(&bp->tx_avail_bd) > MAX_SKB_FRAGS) 4177 netif_wake_queue(dev);
4177 netif_wake_queue(dev);
4178 }
4179 spin_unlock_irqrestore(&bp->tx_lock, flags); 4178 spin_unlock_irqrestore(&bp->tx_lock, flags);
4180 } 4179 }
4181 4180
diff --git a/drivers/net/bnx2.h b/drivers/net/bnx2.h
index e1fb099acbf2..9ad3f5740cd8 100644
--- a/drivers/net/bnx2.h
+++ b/drivers/net/bnx2.h
@@ -3841,12 +3841,12 @@ struct bnx2 {
3841 struct status_block *status_blk; 3841 struct status_block *status_blk;
3842 u32 last_status_idx; 3842 u32 last_status_idx;
3843 3843
3844 atomic_t tx_avail_bd;
3845 struct tx_bd *tx_desc_ring; 3844 struct tx_bd *tx_desc_ring;
3846 struct sw_bd *tx_buf_ring; 3845 struct sw_bd *tx_buf_ring;
3847 u32 tx_prod_bseq; 3846 u32 tx_prod_bseq;
3848 u16 tx_prod; 3847 u16 tx_prod;
3849 u16 tx_cons; 3848 u16 tx_cons;
3849 int tx_ring_size;
3850 3850
3851#ifdef BCM_VLAN 3851#ifdef BCM_VLAN
3852 struct vlan_group *vlgrp; 3852 struct vlan_group *vlgrp;
@@ -3929,7 +3929,6 @@ struct bnx2 {
3929 u16 fw_wr_seq; 3929 u16 fw_wr_seq;
3930 u16 fw_drv_pulse_wr_seq; 3930 u16 fw_drv_pulse_wr_seq;
3931 3931
3932 int tx_ring_size;
3933 dma_addr_t tx_desc_mapping; 3932 dma_addr_t tx_desc_mapping;
3934 3933
3935 3934