aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/bnx2.c
diff options
context:
space:
mode:
authorDavid Woodhouse <dwmw2@infradead.org>2006-08-30 18:30:38 -0400
committerDavid Woodhouse <dwmw2@infradead.org>2006-08-30 18:30:38 -0400
commit0a7d5f8ce960e74fa22986bda4af488539796e49 (patch)
treee29ad17808a5c3410518e22dae8dfe94801b59f3 /drivers/net/bnx2.c
parent0165508c80a2b5d5268d9c5dfa9b30c534a33693 (diff)
parentdc709bd190c130b299ac19d596594256265c042a (diff)
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
Diffstat (limited to 'drivers/net/bnx2.c')
-rw-r--r--drivers/net/bnx2.c81
1 files changed, 41 insertions, 40 deletions
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index 4f4db5ae503b..652eb05a6c2d 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -56,8 +56,8 @@
56 56
57#define DRV_MODULE_NAME "bnx2" 57#define DRV_MODULE_NAME "bnx2"
58#define PFX DRV_MODULE_NAME ": " 58#define PFX DRV_MODULE_NAME ": "
59#define DRV_MODULE_VERSION "1.4.43" 59#define DRV_MODULE_VERSION "1.4.44"
60#define DRV_MODULE_RELDATE "June 28, 2006" 60#define DRV_MODULE_RELDATE "August 10, 2006"
61 61
62#define RUN_AT(x) (jiffies + (x)) 62#define RUN_AT(x) (jiffies + (x))
63 63
@@ -209,8 +209,10 @@ MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
209 209
210static inline u32 bnx2_tx_avail(struct bnx2 *bp) 210static inline u32 bnx2_tx_avail(struct bnx2 *bp)
211{ 211{
212 u32 diff = TX_RING_IDX(bp->tx_prod) - TX_RING_IDX(bp->tx_cons); 212 u32 diff;
213 213
214 smp_mb();
215 diff = TX_RING_IDX(bp->tx_prod) - TX_RING_IDX(bp->tx_cons);
214 if (diff > MAX_TX_DESC_CNT) 216 if (diff > MAX_TX_DESC_CNT)
215 diff = (diff & MAX_TX_DESC_CNT) - 1; 217 diff = (diff & MAX_TX_DESC_CNT) - 1;
216 return (bp->tx_ring_size - diff); 218 return (bp->tx_ring_size - diff);
@@ -1569,7 +1571,7 @@ bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index)
1569 struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)]; 1571 struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
1570 unsigned long align; 1572 unsigned long align;
1571 1573
1572 skb = dev_alloc_skb(bp->rx_buf_size); 1574 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1573 if (skb == NULL) { 1575 if (skb == NULL) {
1574 return -ENOMEM; 1576 return -ENOMEM;
1575 } 1577 }
@@ -1578,7 +1580,6 @@ bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index)
1578 skb_reserve(skb, 8 - align); 1580 skb_reserve(skb, 8 - align);
1579 } 1581 }
1580 1582
1581 skb->dev = bp->dev;
1582 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size, 1583 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
1583 PCI_DMA_FROMDEVICE); 1584 PCI_DMA_FROMDEVICE);
1584 1585
@@ -1639,7 +1640,7 @@ bnx2_tx_int(struct bnx2 *bp)
1639 skb = tx_buf->skb; 1640 skb = tx_buf->skb;
1640#ifdef BCM_TSO 1641#ifdef BCM_TSO
1641 /* partial BD completions possible with TSO packets */ 1642 /* partial BD completions possible with TSO packets */
1642 if (skb_shinfo(skb)->gso_size) { 1643 if (skb_is_gso(skb)) {
1643 u16 last_idx, last_ring_idx; 1644 u16 last_idx, last_ring_idx;
1644 1645
1645 last_idx = sw_cons + 1646 last_idx = sw_cons +
@@ -1686,15 +1687,20 @@ bnx2_tx_int(struct bnx2 *bp)
1686 } 1687 }
1687 1688
1688 bp->tx_cons = sw_cons; 1689 bp->tx_cons = sw_cons;
1690 /* Need to make the tx_cons update visible to bnx2_start_xmit()
1691 * before checking for netif_queue_stopped(). Without the
1692 * memory barrier, there is a small possibility that bnx2_start_xmit()
1693 * will miss it and cause the queue to be stopped forever.
1694 */
1695 smp_mb();
1689 1696
1690 if (unlikely(netif_queue_stopped(bp->dev))) { 1697 if (unlikely(netif_queue_stopped(bp->dev)) &&
1691 spin_lock(&bp->tx_lock); 1698 (bnx2_tx_avail(bp) > bp->tx_wake_thresh)) {
1699 netif_tx_lock(bp->dev);
1692 if ((netif_queue_stopped(bp->dev)) && 1700 if ((netif_queue_stopped(bp->dev)) &&
1693 (bnx2_tx_avail(bp) > MAX_SKB_FRAGS)) { 1701 (bnx2_tx_avail(bp) > bp->tx_wake_thresh))
1694
1695 netif_wake_queue(bp->dev); 1702 netif_wake_queue(bp->dev);
1696 } 1703 netif_tx_unlock(bp->dev);
1697 spin_unlock(&bp->tx_lock);
1698 } 1704 }
1699} 1705}
1700 1706
@@ -1786,7 +1792,7 @@ bnx2_rx_int(struct bnx2 *bp, int budget)
1786 if ((bp->dev->mtu > 1500) && (len <= RX_COPY_THRESH)) { 1792 if ((bp->dev->mtu > 1500) && (len <= RX_COPY_THRESH)) {
1787 struct sk_buff *new_skb; 1793 struct sk_buff *new_skb;
1788 1794
1789 new_skb = dev_alloc_skb(len + 2); 1795 new_skb = netdev_alloc_skb(bp->dev, len + 2);
1790 if (new_skb == NULL) 1796 if (new_skb == NULL)
1791 goto reuse_rx; 1797 goto reuse_rx;
1792 1798
@@ -1797,7 +1803,6 @@ bnx2_rx_int(struct bnx2 *bp, int budget)
1797 1803
1798 skb_reserve(new_skb, 2); 1804 skb_reserve(new_skb, 2);
1799 skb_put(new_skb, len); 1805 skb_put(new_skb, len);
1800 new_skb->dev = bp->dev;
1801 1806
1802 bnx2_reuse_rx_skb(bp, skb, 1807 bnx2_reuse_rx_skb(bp, skb,
1803 sw_ring_cons, sw_ring_prod); 1808 sw_ring_cons, sw_ring_prod);
@@ -3503,6 +3508,8 @@ bnx2_init_tx_ring(struct bnx2 *bp)
3503 struct tx_bd *txbd; 3508 struct tx_bd *txbd;
3504 u32 val; 3509 u32 val;
3505 3510
3511 bp->tx_wake_thresh = bp->tx_ring_size / 2;
3512
3506 txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT]; 3513 txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT];
3507 3514
3508 txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32; 3515 txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32;
@@ -3952,7 +3959,7 @@ bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
3952 return -EINVAL; 3959 return -EINVAL;
3953 3960
3954 pkt_size = 1514; 3961 pkt_size = 1514;
3955 skb = dev_alloc_skb(pkt_size); 3962 skb = netdev_alloc_skb(bp->dev, pkt_size);
3956 if (!skb) 3963 if (!skb)
3957 return -ENOMEM; 3964 return -ENOMEM;
3958 packet = skb_put(skb, pkt_size); 3965 packet = skb_put(skb, pkt_size);
@@ -4390,10 +4397,8 @@ bnx2_vlan_rx_kill_vid(struct net_device *dev, uint16_t vid)
4390#endif 4397#endif
4391 4398
4392/* Called with netif_tx_lock. 4399/* Called with netif_tx_lock.
4393 * hard_start_xmit is pseudo-lockless - a lock is only required when 4400 * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
4394 * the tx queue is full. This way, we get the benefit of lockless 4401 * netif_wake_queue().
4395 * operations most of the time without the complexities to handle
4396 * netif_stop_queue/wake_queue race conditions.
4397 */ 4402 */
4398static int 4403static int
4399bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev) 4404bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
@@ -4512,12 +4517,9 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
4512 dev->trans_start = jiffies; 4517 dev->trans_start = jiffies;
4513 4518
4514 if (unlikely(bnx2_tx_avail(bp) <= MAX_SKB_FRAGS)) { 4519 if (unlikely(bnx2_tx_avail(bp) <= MAX_SKB_FRAGS)) {
4515 spin_lock(&bp->tx_lock);
4516 netif_stop_queue(dev); 4520 netif_stop_queue(dev);
4517 4521 if (bnx2_tx_avail(bp) > bp->tx_wake_thresh)
4518 if (bnx2_tx_avail(bp) > MAX_SKB_FRAGS)
4519 netif_wake_queue(dev); 4522 netif_wake_queue(dev);
4520 spin_unlock(&bp->tx_lock);
4521 } 4523 }
4522 4524
4523 return NETDEV_TX_OK; 4525 return NETDEV_TX_OK;
@@ -5575,20 +5577,20 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
5575 /* enable device (incl. PCI PM wakeup), and bus-mastering */ 5577 /* enable device (incl. PCI PM wakeup), and bus-mastering */
5576 rc = pci_enable_device(pdev); 5578 rc = pci_enable_device(pdev);
5577 if (rc) { 5579 if (rc) {
5578 printk(KERN_ERR PFX "Cannot enable PCI device, aborting."); 5580 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.");
5579 goto err_out; 5581 goto err_out;
5580 } 5582 }
5581 5583
5582 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { 5584 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
5583 printk(KERN_ERR PFX "Cannot find PCI device base address, " 5585 dev_err(&pdev->dev,
5584 "aborting.\n"); 5586 "Cannot find PCI device base address, aborting.\n");
5585 rc = -ENODEV; 5587 rc = -ENODEV;
5586 goto err_out_disable; 5588 goto err_out_disable;
5587 } 5589 }
5588 5590
5589 rc = pci_request_regions(pdev, DRV_MODULE_NAME); 5591 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
5590 if (rc) { 5592 if (rc) {
5591 printk(KERN_ERR PFX "Cannot obtain PCI resources, aborting.\n"); 5593 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
5592 goto err_out_disable; 5594 goto err_out_disable;
5593 } 5595 }
5594 5596
@@ -5596,15 +5598,15 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
5596 5598
5597 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM); 5599 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
5598 if (bp->pm_cap == 0) { 5600 if (bp->pm_cap == 0) {
5599 printk(KERN_ERR PFX "Cannot find power management capability, " 5601 dev_err(&pdev->dev,
5600 "aborting.\n"); 5602 "Cannot find power management capability, aborting.\n");
5601 rc = -EIO; 5603 rc = -EIO;
5602 goto err_out_release; 5604 goto err_out_release;
5603 } 5605 }
5604 5606
5605 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX); 5607 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
5606 if (bp->pcix_cap == 0) { 5608 if (bp->pcix_cap == 0) {
5607 printk(KERN_ERR PFX "Cannot find PCIX capability, aborting.\n"); 5609 dev_err(&pdev->dev, "Cannot find PCIX capability, aborting.\n");
5608 rc = -EIO; 5610 rc = -EIO;
5609 goto err_out_release; 5611 goto err_out_release;
5610 } 5612 }
@@ -5612,14 +5614,14 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
5612 if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) { 5614 if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
5613 bp->flags |= USING_DAC_FLAG; 5615 bp->flags |= USING_DAC_FLAG;
5614 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) { 5616 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
5615 printk(KERN_ERR PFX "pci_set_consistent_dma_mask " 5617 dev_err(&pdev->dev,
5616 "failed, aborting.\n"); 5618 "pci_set_consistent_dma_mask failed, aborting.\n");
5617 rc = -EIO; 5619 rc = -EIO;
5618 goto err_out_release; 5620 goto err_out_release;
5619 } 5621 }
5620 } 5622 }
5621 else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) { 5623 else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
5622 printk(KERN_ERR PFX "System does not support DMA, aborting.\n"); 5624 dev_err(&pdev->dev, "System does not support DMA, aborting.\n");
5623 rc = -EIO; 5625 rc = -EIO;
5624 goto err_out_release; 5626 goto err_out_release;
5625 } 5627 }
@@ -5628,7 +5630,6 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
5628 bp->pdev = pdev; 5630 bp->pdev = pdev;
5629 5631
5630 spin_lock_init(&bp->phy_lock); 5632 spin_lock_init(&bp->phy_lock);
5631 spin_lock_init(&bp->tx_lock);
5632 INIT_WORK(&bp->reset_task, bnx2_reset_task, bp); 5633 INIT_WORK(&bp->reset_task, bnx2_reset_task, bp);
5633 5634
5634 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0); 5635 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
@@ -5639,7 +5640,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
5639 bp->regview = ioremap_nocache(dev->base_addr, mem_len); 5640 bp->regview = ioremap_nocache(dev->base_addr, mem_len);
5640 5641
5641 if (!bp->regview) { 5642 if (!bp->regview) {
5642 printk(KERN_ERR PFX "Cannot map register space, aborting.\n"); 5643 dev_err(&pdev->dev, "Cannot map register space, aborting.\n");
5643 rc = -ENOMEM; 5644 rc = -ENOMEM;
5644 goto err_out_release; 5645 goto err_out_release;
5645 } 5646 }
@@ -5711,8 +5712,8 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
5711 else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) && 5712 else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
5712 !(bp->flags & PCIX_FLAG)) { 5713 !(bp->flags & PCIX_FLAG)) {
5713 5714
5714 printk(KERN_ERR PFX "5706 A1 can only be used in a PCIX bus, " 5715 dev_err(&pdev->dev,
5715 "aborting.\n"); 5716 "5706 A1 can only be used in a PCIX bus, aborting.\n");
5716 goto err_out_unmap; 5717 goto err_out_unmap;
5717 } 5718 }
5718 5719
@@ -5733,7 +5734,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
5733 5734
5734 if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) != 5735 if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
5735 BNX2_DEV_INFO_SIGNATURE_MAGIC) { 5736 BNX2_DEV_INFO_SIGNATURE_MAGIC) {
5736 printk(KERN_ERR PFX "Firmware not running, aborting.\n"); 5737 dev_err(&pdev->dev, "Firmware not running, aborting.\n");
5737 rc = -ENODEV; 5738 rc = -ENODEV;
5738 goto err_out_unmap; 5739 goto err_out_unmap;
5739 } 5740 }
@@ -5751,7 +5752,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
5751 bp->mac_addr[5] = (u8) reg; 5752 bp->mac_addr[5] = (u8) reg;
5752 5753
5753 bp->tx_ring_size = MAX_TX_DESC_CNT; 5754 bp->tx_ring_size = MAX_TX_DESC_CNT;
5754 bnx2_set_rx_ring_size(bp, 100); 5755 bnx2_set_rx_ring_size(bp, 255);
5755 5756
5756 bp->rx_csum = 1; 5757 bp->rx_csum = 1;
5757 5758
@@ -5895,7 +5896,7 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
5895#endif 5896#endif
5896 5897
5897 if ((rc = register_netdev(dev))) { 5898 if ((rc = register_netdev(dev))) {
5898 printk(KERN_ERR PFX "Cannot register net device\n"); 5899 dev_err(&pdev->dev, "Cannot register net device\n");
5899 if (bp->regview) 5900 if (bp->regview)
5900 iounmap(bp->regview); 5901 iounmap(bp->regview);
5901 pci_release_regions(pdev); 5902 pci_release_regions(pdev);