aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/bnx2.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/bnx2.c')
-rw-r--r--drivers/net/bnx2.c229
1 files changed, 129 insertions, 100 deletions
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index 8acc655ec1e8..7babf6af4e28 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -14,8 +14,8 @@
14 14
15#define DRV_MODULE_NAME "bnx2" 15#define DRV_MODULE_NAME "bnx2"
16#define PFX DRV_MODULE_NAME ": " 16#define PFX DRV_MODULE_NAME ": "
17#define DRV_MODULE_VERSION "1.2.19" 17#define DRV_MODULE_VERSION "1.2.20"
18#define DRV_MODULE_RELDATE "May 23, 2005" 18#define DRV_MODULE_RELDATE "August 22, 2005"
19 19
20#define RUN_AT(x) (jiffies + (x)) 20#define RUN_AT(x) (jiffies + (x))
21 21
@@ -52,7 +52,6 @@ static struct {
52 { "HP NC370i Multifunction Gigabit Server Adapter" }, 52 { "HP NC370i Multifunction Gigabit Server Adapter" },
53 { "Broadcom NetXtreme II BCM5706 1000Base-SX" }, 53 { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
54 { "HP NC370F Multifunction Gigabit Server Adapter" }, 54 { "HP NC370F Multifunction Gigabit Server Adapter" },
55 { 0 },
56 }; 55 };
57 56
58static struct pci_device_id bnx2_pci_tbl[] = { 57static struct pci_device_id bnx2_pci_tbl[] = {
@@ -108,6 +107,15 @@ static struct flash_spec flash_table[] =
108 107
109MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl); 108MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
110 109
110static inline u32 bnx2_tx_avail(struct bnx2 *bp)
111{
112 u32 diff = TX_RING_IDX(bp->tx_prod) - TX_RING_IDX(bp->tx_cons);
113
114 if (diff > MAX_TX_DESC_CNT)
115 diff = (diff & MAX_TX_DESC_CNT) - 1;
116 return (bp->tx_ring_size - diff);
117}
118
111static u32 119static u32
112bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset) 120bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
113{ 121{
@@ -807,7 +815,19 @@ bnx2_setup_serdes_phy(struct bnx2 *bp)
807 bnx2_write_phy(bp, MII_ADVERTISE, new_adv); 815 bnx2_write_phy(bp, MII_ADVERTISE, new_adv);
808 bnx2_write_phy(bp, MII_BMCR, bmcr | BMCR_ANRESTART | 816 bnx2_write_phy(bp, MII_BMCR, bmcr | BMCR_ANRESTART |
809 BMCR_ANENABLE); 817 BMCR_ANENABLE);
810 bp->serdes_an_pending = SERDES_AN_TIMEOUT / bp->timer_interval; 818 if (CHIP_NUM(bp) == CHIP_NUM_5706) {
819 /* Speed up link-up time when the link partner
820 * does not autonegotiate which is very common
821 * in blade servers. Some blade servers use
822 * IPMI for kerboard input and it's important
823 * to minimize link disruptions. Autoneg. involves
824 * exchanging base pages plus 3 next pages and
825 * normally completes in about 120 msec.
826 */
827 bp->current_interval = SERDES_AN_TIMEOUT;
828 bp->serdes_an_pending = 1;
829 mod_timer(&bp->timer, jiffies + bp->current_interval);
830 }
811 } 831 }
812 832
813 return 0; 833 return 0;
@@ -1327,22 +1347,17 @@ bnx2_tx_int(struct bnx2 *bp)
1327 } 1347 }
1328 } 1348 }
1329 1349
1330 atomic_add(tx_free_bd, &bp->tx_avail_bd); 1350 bp->tx_cons = sw_cons;
1331 1351
1332 if (unlikely(netif_queue_stopped(bp->dev))) { 1352 if (unlikely(netif_queue_stopped(bp->dev))) {
1333 unsigned long flags; 1353 spin_lock(&bp->tx_lock);
1334
1335 spin_lock_irqsave(&bp->tx_lock, flags);
1336 if ((netif_queue_stopped(bp->dev)) && 1354 if ((netif_queue_stopped(bp->dev)) &&
1337 (atomic_read(&bp->tx_avail_bd) > MAX_SKB_FRAGS)) { 1355 (bnx2_tx_avail(bp) > MAX_SKB_FRAGS)) {
1338 1356
1339 netif_wake_queue(bp->dev); 1357 netif_wake_queue(bp->dev);
1340 } 1358 }
1341 spin_unlock_irqrestore(&bp->tx_lock, flags); 1359 spin_unlock(&bp->tx_lock);
1342 } 1360 }
1343
1344 bp->tx_cons = sw_cons;
1345
1346} 1361}
1347 1362
1348static inline void 1363static inline void
@@ -1523,15 +1538,12 @@ bnx2_msi(int irq, void *dev_instance, struct pt_regs *regs)
1523 BNX2_PCICFG_INT_ACK_CMD_MASK_INT); 1538 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
1524 1539
1525 /* Return here if interrupt is disabled. */ 1540 /* Return here if interrupt is disabled. */
1526 if (unlikely(atomic_read(&bp->intr_sem) != 0)) { 1541 if (unlikely(atomic_read(&bp->intr_sem) != 0))
1527 return IRQ_RETVAL(1); 1542 return IRQ_HANDLED;
1528 }
1529 1543
1530 if (netif_rx_schedule_prep(dev)) { 1544 netif_rx_schedule(dev);
1531 __netif_rx_schedule(dev);
1532 }
1533 1545
1534 return IRQ_RETVAL(1); 1546 return IRQ_HANDLED;
1535} 1547}
1536 1548
1537static irqreturn_t 1549static irqreturn_t
@@ -1549,22 +1561,19 @@ bnx2_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
1549 if ((bp->status_blk->status_idx == bp->last_status_idx) || 1561 if ((bp->status_blk->status_idx == bp->last_status_idx) ||
1550 (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) & 1562 (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
1551 BNX2_PCICFG_MISC_STATUS_INTA_VALUE)) 1563 BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
1552 return IRQ_RETVAL(0); 1564 return IRQ_NONE;
1553 1565
1554 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, 1566 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
1555 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM | 1567 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
1556 BNX2_PCICFG_INT_ACK_CMD_MASK_INT); 1568 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
1557 1569
1558 /* Return here if interrupt is shared and is disabled. */ 1570 /* Return here if interrupt is shared and is disabled. */
1559 if (unlikely(atomic_read(&bp->intr_sem) != 0)) { 1571 if (unlikely(atomic_read(&bp->intr_sem) != 0))
1560 return IRQ_RETVAL(1); 1572 return IRQ_HANDLED;
1561 }
1562 1573
1563 if (netif_rx_schedule_prep(dev)) { 1574 netif_rx_schedule(dev);
1564 __netif_rx_schedule(dev);
1565 }
1566 1575
1567 return IRQ_RETVAL(1); 1576 return IRQ_HANDLED;
1568} 1577}
1569 1578
1570static int 1579static int
@@ -1581,11 +1590,9 @@ bnx2_poll(struct net_device *dev, int *budget)
1581 (bp->status_blk->status_attn_bits_ack & 1590 (bp->status_blk->status_attn_bits_ack &
1582 STATUS_ATTN_BITS_LINK_STATE)) { 1591 STATUS_ATTN_BITS_LINK_STATE)) {
1583 1592
1584 unsigned long flags; 1593 spin_lock(&bp->phy_lock);
1585
1586 spin_lock_irqsave(&bp->phy_lock, flags);
1587 bnx2_phy_int(bp); 1594 bnx2_phy_int(bp);
1588 spin_unlock_irqrestore(&bp->phy_lock, flags); 1595 spin_unlock(&bp->phy_lock);
1589 } 1596 }
1590 1597
1591 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->tx_cons) { 1598 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->tx_cons) {
@@ -1628,9 +1635,8 @@ bnx2_set_rx_mode(struct net_device *dev)
1628 struct bnx2 *bp = dev->priv; 1635 struct bnx2 *bp = dev->priv;
1629 u32 rx_mode, sort_mode; 1636 u32 rx_mode, sort_mode;
1630 int i; 1637 int i;
1631 unsigned long flags;
1632 1638
1633 spin_lock_irqsave(&bp->phy_lock, flags); 1639 spin_lock_bh(&bp->phy_lock);
1634 1640
1635 rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS | 1641 rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
1636 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG); 1642 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
@@ -1691,7 +1697,7 @@ bnx2_set_rx_mode(struct net_device *dev)
1691 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode); 1697 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
1692 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA); 1698 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
1693 1699
1694 spin_unlock_irqrestore(&bp->phy_lock, flags); 1700 spin_unlock_bh(&bp->phy_lock);
1695} 1701}
1696 1702
1697static void 1703static void
@@ -2960,7 +2966,6 @@ bnx2_init_tx_ring(struct bnx2 *bp)
2960 bp->tx_prod = 0; 2966 bp->tx_prod = 0;
2961 bp->tx_cons = 0; 2967 bp->tx_cons = 0;
2962 bp->tx_prod_bseq = 0; 2968 bp->tx_prod_bseq = 0;
2963 atomic_set(&bp->tx_avail_bd, bp->tx_ring_size);
2964 2969
2965 val = BNX2_L2CTX_TYPE_TYPE_L2; 2970 val = BNX2_L2CTX_TYPE_TYPE_L2;
2966 val |= BNX2_L2CTX_TYPE_SIZE_L2; 2971 val |= BNX2_L2CTX_TYPE_SIZE_L2;
@@ -3507,11 +3512,11 @@ bnx2_test_registers(struct bnx2 *bp)
3507 rw_mask = reg_tbl[i].rw_mask; 3512 rw_mask = reg_tbl[i].rw_mask;
3508 ro_mask = reg_tbl[i].ro_mask; 3513 ro_mask = reg_tbl[i].ro_mask;
3509 3514
3510 save_val = readl((u8 *) bp->regview + offset); 3515 save_val = readl(bp->regview + offset);
3511 3516
3512 writel(0, (u8 *) bp->regview + offset); 3517 writel(0, bp->regview + offset);
3513 3518
3514 val = readl((u8 *) bp->regview + offset); 3519 val = readl(bp->regview + offset);
3515 if ((val & rw_mask) != 0) { 3520 if ((val & rw_mask) != 0) {
3516 goto reg_test_err; 3521 goto reg_test_err;
3517 } 3522 }
@@ -3520,9 +3525,9 @@ bnx2_test_registers(struct bnx2 *bp)
3520 goto reg_test_err; 3525 goto reg_test_err;
3521 } 3526 }
3522 3527
3523 writel(0xffffffff, (u8 *) bp->regview + offset); 3528 writel(0xffffffff, bp->regview + offset);
3524 3529
3525 val = readl((u8 *) bp->regview + offset); 3530 val = readl(bp->regview + offset);
3526 if ((val & rw_mask) != rw_mask) { 3531 if ((val & rw_mask) != rw_mask) {
3527 goto reg_test_err; 3532 goto reg_test_err;
3528 } 3533 }
@@ -3531,11 +3536,11 @@ bnx2_test_registers(struct bnx2 *bp)
3531 goto reg_test_err; 3536 goto reg_test_err;
3532 } 3537 }
3533 3538
3534 writel(save_val, (u8 *) bp->regview + offset); 3539 writel(save_val, bp->regview + offset);
3535 continue; 3540 continue;
3536 3541
3537reg_test_err: 3542reg_test_err:
3538 writel(save_val, (u8 *) bp->regview + offset); 3543 writel(save_val, bp->regview + offset);
3539 ret = -ENODEV; 3544 ret = -ENODEV;
3540 break; 3545 break;
3541 } 3546 }
@@ -3752,10 +3757,10 @@ bnx2_test_link(struct bnx2 *bp)
3752{ 3757{
3753 u32 bmsr; 3758 u32 bmsr;
3754 3759
3755 spin_lock_irq(&bp->phy_lock); 3760 spin_lock_bh(&bp->phy_lock);
3756 bnx2_read_phy(bp, MII_BMSR, &bmsr); 3761 bnx2_read_phy(bp, MII_BMSR, &bmsr);
3757 bnx2_read_phy(bp, MII_BMSR, &bmsr); 3762 bnx2_read_phy(bp, MII_BMSR, &bmsr);
3758 spin_unlock_irq(&bp->phy_lock); 3763 spin_unlock_bh(&bp->phy_lock);
3759 3764
3760 if (bmsr & BMSR_LSTATUS) { 3765 if (bmsr & BMSR_LSTATUS) {
3761 return 0; 3766 return 0;
@@ -3801,6 +3806,9 @@ bnx2_timer(unsigned long data)
3801 struct bnx2 *bp = (struct bnx2 *) data; 3806 struct bnx2 *bp = (struct bnx2 *) data;
3802 u32 msg; 3807 u32 msg;
3803 3808
3809 if (!netif_running(bp->dev))
3810 return;
3811
3804 if (atomic_read(&bp->intr_sem) != 0) 3812 if (atomic_read(&bp->intr_sem) != 0)
3805 goto bnx2_restart_timer; 3813 goto bnx2_restart_timer;
3806 3814
@@ -3809,15 +3817,16 @@ bnx2_timer(unsigned long data)
3809 3817
3810 if ((bp->phy_flags & PHY_SERDES_FLAG) && 3818 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
3811 (CHIP_NUM(bp) == CHIP_NUM_5706)) { 3819 (CHIP_NUM(bp) == CHIP_NUM_5706)) {
3812 unsigned long flags;
3813 3820
3814 spin_lock_irqsave(&bp->phy_lock, flags); 3821 spin_lock(&bp->phy_lock);
3815 if (bp->serdes_an_pending) { 3822 if (bp->serdes_an_pending) {
3816 bp->serdes_an_pending--; 3823 bp->serdes_an_pending--;
3817 } 3824 }
3818 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) { 3825 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
3819 u32 bmcr; 3826 u32 bmcr;
3820 3827
3828 bp->current_interval = bp->timer_interval;
3829
3821 bnx2_read_phy(bp, MII_BMCR, &bmcr); 3830 bnx2_read_phy(bp, MII_BMCR, &bmcr);
3822 3831
3823 if (bmcr & BMCR_ANENABLE) { 3832 if (bmcr & BMCR_ANENABLE) {
@@ -3860,14 +3869,14 @@ bnx2_timer(unsigned long data)
3860 3869
3861 } 3870 }
3862 } 3871 }
3872 else
3873 bp->current_interval = bp->timer_interval;
3863 3874
3864 spin_unlock_irqrestore(&bp->phy_lock, flags); 3875 spin_unlock(&bp->phy_lock);
3865 } 3876 }
3866 3877
3867bnx2_restart_timer: 3878bnx2_restart_timer:
3868 bp->timer.expires = RUN_AT(bp->timer_interval); 3879 mod_timer(&bp->timer, jiffies + bp->current_interval);
3869
3870 add_timer(&bp->timer);
3871} 3880}
3872 3881
3873/* Called with rtnl_lock */ 3882/* Called with rtnl_lock */
@@ -3920,12 +3929,7 @@ bnx2_open(struct net_device *dev)
3920 return rc; 3929 return rc;
3921 } 3930 }
3922 3931
3923 init_timer(&bp->timer); 3932 mod_timer(&bp->timer, jiffies + bp->current_interval);
3924
3925 bp->timer.expires = RUN_AT(bp->timer_interval);
3926 bp->timer.data = (unsigned long) bp;
3927 bp->timer.function = bnx2_timer;
3928 add_timer(&bp->timer);
3929 3933
3930 atomic_set(&bp->intr_sem, 0); 3934 atomic_set(&bp->intr_sem, 0);
3931 3935
@@ -3976,12 +3980,17 @@ bnx2_reset_task(void *data)
3976{ 3980{
3977 struct bnx2 *bp = data; 3981 struct bnx2 *bp = data;
3978 3982
3983 if (!netif_running(bp->dev))
3984 return;
3985
3986 bp->in_reset_task = 1;
3979 bnx2_netif_stop(bp); 3987 bnx2_netif_stop(bp);
3980 3988
3981 bnx2_init_nic(bp); 3989 bnx2_init_nic(bp);
3982 3990
3983 atomic_set(&bp->intr_sem, 1); 3991 atomic_set(&bp->intr_sem, 1);
3984 bnx2_netif_start(bp); 3992 bnx2_netif_start(bp);
3993 bp->in_reset_task = 0;
3985} 3994}
3986 3995
3987static void 3996static void
@@ -4041,9 +4050,7 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
4041 u16 prod, ring_prod; 4050 u16 prod, ring_prod;
4042 int i; 4051 int i;
4043 4052
4044 if (unlikely(atomic_read(&bp->tx_avail_bd) < 4053 if (unlikely(bnx2_tx_avail(bp) < (skb_shinfo(skb)->nr_frags + 1))) {
4045 (skb_shinfo(skb)->nr_frags + 1))) {
4046
4047 netif_stop_queue(dev); 4054 netif_stop_queue(dev);
4048 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n", 4055 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
4049 dev->name); 4056 dev->name);
@@ -4140,8 +4147,6 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
4140 prod = NEXT_TX_BD(prod); 4147 prod = NEXT_TX_BD(prod);
4141 bp->tx_prod_bseq += skb->len; 4148 bp->tx_prod_bseq += skb->len;
4142 4149
4143 atomic_sub(last_frag + 1, &bp->tx_avail_bd);
4144
4145 REG_WR16(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BIDX, prod); 4150 REG_WR16(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BIDX, prod);
4146 REG_WR(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BSEQ, bp->tx_prod_bseq); 4151 REG_WR(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BSEQ, bp->tx_prod_bseq);
4147 4152
@@ -4150,17 +4155,13 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
4150 bp->tx_prod = prod; 4155 bp->tx_prod = prod;
4151 dev->trans_start = jiffies; 4156 dev->trans_start = jiffies;
4152 4157
4153 if (unlikely(atomic_read(&bp->tx_avail_bd) <= MAX_SKB_FRAGS)) { 4158 if (unlikely(bnx2_tx_avail(bp) <= MAX_SKB_FRAGS)) {
4154 unsigned long flags; 4159 spin_lock(&bp->tx_lock);
4155 4160 netif_stop_queue(dev);
4156 spin_lock_irqsave(&bp->tx_lock, flags); 4161
4157 if (atomic_read(&bp->tx_avail_bd) <= MAX_SKB_FRAGS) { 4162 if (bnx2_tx_avail(bp) > MAX_SKB_FRAGS)
4158 netif_stop_queue(dev); 4163 netif_wake_queue(dev);
4159 4164 spin_unlock(&bp->tx_lock);
4160 if (atomic_read(&bp->tx_avail_bd) > MAX_SKB_FRAGS)
4161 netif_wake_queue(dev);
4162 }
4163 spin_unlock_irqrestore(&bp->tx_lock, flags);
4164 } 4165 }
4165 4166
4166 return NETDEV_TX_OK; 4167 return NETDEV_TX_OK;
@@ -4173,7 +4174,13 @@ bnx2_close(struct net_device *dev)
4173 struct bnx2 *bp = dev->priv; 4174 struct bnx2 *bp = dev->priv;
4174 u32 reset_code; 4175 u32 reset_code;
4175 4176
4176 flush_scheduled_work(); 4177 /* Calling flush_scheduled_work() may deadlock because
4178 * linkwatch_event() may be on the workqueue and it will try to get
4179 * the rtnl_lock which we are holding.
4180 */
4181 while (bp->in_reset_task)
4182 msleep(1);
4183
4177 bnx2_netif_stop(bp); 4184 bnx2_netif_stop(bp);
4178 del_timer_sync(&bp->timer); 4185 del_timer_sync(&bp->timer);
4179 if (bp->wol) 4186 if (bp->wol)
@@ -4390,11 +4397,11 @@ bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
4390 bp->req_line_speed = req_line_speed; 4397 bp->req_line_speed = req_line_speed;
4391 bp->req_duplex = req_duplex; 4398 bp->req_duplex = req_duplex;
4392 4399
4393 spin_lock_irq(&bp->phy_lock); 4400 spin_lock_bh(&bp->phy_lock);
4394 4401
4395 bnx2_setup_phy(bp); 4402 bnx2_setup_phy(bp);
4396 4403
4397 spin_unlock_irq(&bp->phy_lock); 4404 spin_unlock_bh(&bp->phy_lock);
4398 4405
4399 return 0; 4406 return 0;
4400} 4407}
@@ -4464,19 +4471,20 @@ bnx2_nway_reset(struct net_device *dev)
4464 return -EINVAL; 4471 return -EINVAL;
4465 } 4472 }
4466 4473
4467 spin_lock_irq(&bp->phy_lock); 4474 spin_lock_bh(&bp->phy_lock);
4468 4475
4469 /* Force a link down visible on the other side */ 4476 /* Force a link down visible on the other side */
4470 if (bp->phy_flags & PHY_SERDES_FLAG) { 4477 if (bp->phy_flags & PHY_SERDES_FLAG) {
4471 bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK); 4478 bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
4472 spin_unlock_irq(&bp->phy_lock); 4479 spin_unlock_bh(&bp->phy_lock);
4473 4480
4474 msleep(20); 4481 msleep(20);
4475 4482
4476 spin_lock_irq(&bp->phy_lock); 4483 spin_lock_bh(&bp->phy_lock);
4477 if (CHIP_NUM(bp) == CHIP_NUM_5706) { 4484 if (CHIP_NUM(bp) == CHIP_NUM_5706) {
4478 bp->serdes_an_pending = SERDES_AN_TIMEOUT / 4485 bp->current_interval = SERDES_AN_TIMEOUT;
4479 bp->timer_interval; 4486 bp->serdes_an_pending = 1;
4487 mod_timer(&bp->timer, jiffies + bp->current_interval);
4480 } 4488 }
4481 } 4489 }
4482 4490
@@ -4484,7 +4492,7 @@ bnx2_nway_reset(struct net_device *dev)
4484 bmcr &= ~BMCR_LOOPBACK; 4492 bmcr &= ~BMCR_LOOPBACK;
4485 bnx2_write_phy(bp, MII_BMCR, bmcr | BMCR_ANRESTART | BMCR_ANENABLE); 4493 bnx2_write_phy(bp, MII_BMCR, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
4486 4494
4487 spin_unlock_irq(&bp->phy_lock); 4495 spin_unlock_bh(&bp->phy_lock);
4488 4496
4489 return 0; 4497 return 0;
4490} 4498}
@@ -4670,11 +4678,11 @@ bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
4670 bp->autoneg &= ~AUTONEG_FLOW_CTRL; 4678 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
4671 } 4679 }
4672 4680
4673 spin_lock_irq(&bp->phy_lock); 4681 spin_lock_bh(&bp->phy_lock);
4674 4682
4675 bnx2_setup_phy(bp); 4683 bnx2_setup_phy(bp);
4676 4684
4677 spin_unlock_irq(&bp->phy_lock); 4685 spin_unlock_bh(&bp->phy_lock);
4678 4686
4679 return 0; 4687 return 0;
4680} 4688}
@@ -4698,7 +4706,7 @@ bnx2_set_rx_csum(struct net_device *dev, u32 data)
4698 4706
4699#define BNX2_NUM_STATS 45 4707#define BNX2_NUM_STATS 45
4700 4708
4701struct { 4709static struct {
4702 char string[ETH_GSTRING_LEN]; 4710 char string[ETH_GSTRING_LEN];
4703} bnx2_stats_str_arr[BNX2_NUM_STATS] = { 4711} bnx2_stats_str_arr[BNX2_NUM_STATS] = {
4704 { "rx_bytes" }, 4712 { "rx_bytes" },
@@ -4750,7 +4758,7 @@ struct {
4750 4758
4751#define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4) 4759#define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
4752 4760
4753unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = { 4761static unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
4754 STATS_OFFSET32(stat_IfHCInOctets_hi), 4762 STATS_OFFSET32(stat_IfHCInOctets_hi),
4755 STATS_OFFSET32(stat_IfHCInBadOctets_hi), 4763 STATS_OFFSET32(stat_IfHCInBadOctets_hi),
4756 STATS_OFFSET32(stat_IfHCOutOctets_hi), 4764 STATS_OFFSET32(stat_IfHCOutOctets_hi),
@@ -4801,7 +4809,7 @@ unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
4801/* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are 4809/* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
4802 * skipped because of errata. 4810 * skipped because of errata.
4803 */ 4811 */
4804u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = { 4812static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
4805 8,0,8,8,8,8,8,8,8,8, 4813 8,0,8,8,8,8,8,8,8,8,
4806 4,0,4,4,4,4,4,4,4,4, 4814 4,0,4,4,4,4,4,4,4,4,
4807 4,4,4,4,4,4,4,4,4,4, 4815 4,4,4,4,4,4,4,4,4,4,
@@ -4811,7 +4819,7 @@ u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
4811 4819
4812#define BNX2_NUM_TESTS 6 4820#define BNX2_NUM_TESTS 6
4813 4821
4814struct { 4822static struct {
4815 char string[ETH_GSTRING_LEN]; 4823 char string[ETH_GSTRING_LEN];
4816} bnx2_tests_str_arr[BNX2_NUM_TESTS] = { 4824} bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
4817 { "register_test (offline)" }, 4825 { "register_test (offline)" },
@@ -4910,7 +4918,7 @@ bnx2_get_ethtool_stats(struct net_device *dev,
4910 struct bnx2 *bp = dev->priv; 4918 struct bnx2 *bp = dev->priv;
4911 int i; 4919 int i;
4912 u32 *hw_stats = (u32 *) bp->stats_blk; 4920 u32 *hw_stats = (u32 *) bp->stats_blk;
4913 u8 *stats_len_arr = 0; 4921 u8 *stats_len_arr = NULL;
4914 4922
4915 if (hw_stats == NULL) { 4923 if (hw_stats == NULL) {
4916 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS); 4924 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
@@ -5012,7 +5020,7 @@ static struct ethtool_ops bnx2_ethtool_ops = {
5012static int 5020static int
5013bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 5021bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
5014{ 5022{
5015 struct mii_ioctl_data *data = (struct mii_ioctl_data *)&ifr->ifr_data; 5023 struct mii_ioctl_data *data = if_mii(ifr);
5016 struct bnx2 *bp = dev->priv; 5024 struct bnx2 *bp = dev->priv;
5017 int err; 5025 int err;
5018 5026
@@ -5024,9 +5032,9 @@ bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
5024 case SIOCGMIIREG: { 5032 case SIOCGMIIREG: {
5025 u32 mii_regval; 5033 u32 mii_regval;
5026 5034
5027 spin_lock_irq(&bp->phy_lock); 5035 spin_lock_bh(&bp->phy_lock);
5028 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval); 5036 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
5029 spin_unlock_irq(&bp->phy_lock); 5037 spin_unlock_bh(&bp->phy_lock);
5030 5038
5031 data->val_out = mii_regval; 5039 data->val_out = mii_regval;
5032 5040
@@ -5037,9 +5045,9 @@ bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
5037 if (!capable(CAP_NET_ADMIN)) 5045 if (!capable(CAP_NET_ADMIN))
5038 return -EPERM; 5046 return -EPERM;
5039 5047
5040 spin_lock_irq(&bp->phy_lock); 5048 spin_lock_bh(&bp->phy_lock);
5041 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in); 5049 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
5042 spin_unlock_irq(&bp->phy_lock); 5050 spin_unlock_bh(&bp->phy_lock);
5043 5051
5044 return err; 5052 return err;
5045 5053
@@ -5057,6 +5065,9 @@ bnx2_change_mac_addr(struct net_device *dev, void *p)
5057 struct sockaddr *addr = p; 5065 struct sockaddr *addr = p;
5058 struct bnx2 *bp = dev->priv; 5066 struct bnx2 *bp = dev->priv;
5059 5067
5068 if (!is_valid_ether_addr(addr->sa_data))
5069 return -EINVAL;
5070
5060 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); 5071 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5061 if (netif_running(dev)) 5072 if (netif_running(dev))
5062 bnx2_set_mac_addr(bp); 5073 bnx2_set_mac_addr(bp);
@@ -5305,6 +5316,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
5305 bp->stats_ticks = 1000000 & 0xffff00; 5316 bp->stats_ticks = 1000000 & 0xffff00;
5306 5317
5307 bp->timer_interval = HZ; 5318 bp->timer_interval = HZ;
5319 bp->current_interval = HZ;
5308 5320
5309 /* Disable WOL support if we are running on a SERDES chip. */ 5321 /* Disable WOL support if we are running on a SERDES chip. */
5310 if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT) { 5322 if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT) {
@@ -5328,6 +5340,15 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
5328 bp->req_line_speed = 0; 5340 bp->req_line_speed = 0;
5329 if (bp->phy_flags & PHY_SERDES_FLAG) { 5341 if (bp->phy_flags & PHY_SERDES_FLAG) {
5330 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg; 5342 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
5343
5344 reg = REG_RD_IND(bp, HOST_VIEW_SHMEM_BASE +
5345 BNX2_PORT_HW_CFG_CONFIG);
5346 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
5347 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
5348 bp->autoneg = 0;
5349 bp->req_line_speed = bp->line_speed = SPEED_1000;
5350 bp->req_duplex = DUPLEX_FULL;
5351 }
5331 } 5352 }
5332 else { 5353 else {
5333 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg; 5354 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
@@ -5335,11 +5356,17 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
5335 5356
5336 bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX; 5357 bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
5337 5358
5359 init_timer(&bp->timer);
5360 bp->timer.expires = RUN_AT(bp->timer_interval);
5361 bp->timer.data = (unsigned long) bp;
5362 bp->timer.function = bnx2_timer;
5363
5338 return 0; 5364 return 0;
5339 5365
5340err_out_unmap: 5366err_out_unmap:
5341 if (bp->regview) { 5367 if (bp->regview) {
5342 iounmap(bp->regview); 5368 iounmap(bp->regview);
5369 bp->regview = NULL;
5343 } 5370 }
5344 5371
5345err_out_release: 5372err_out_release:
@@ -5454,6 +5481,8 @@ bnx2_remove_one(struct pci_dev *pdev)
5454 struct net_device *dev = pci_get_drvdata(pdev); 5481 struct net_device *dev = pci_get_drvdata(pdev);
5455 struct bnx2 *bp = dev->priv; 5482 struct bnx2 *bp = dev->priv;
5456 5483
5484 flush_scheduled_work();
5485
5457 unregister_netdev(dev); 5486 unregister_netdev(dev);
5458 5487
5459 if (bp->regview) 5488 if (bp->regview)
@@ -5505,12 +5534,12 @@ bnx2_resume(struct pci_dev *pdev)
5505} 5534}
5506 5535
5507static struct pci_driver bnx2_pci_driver = { 5536static struct pci_driver bnx2_pci_driver = {
5508 name: DRV_MODULE_NAME, 5537 .name = DRV_MODULE_NAME,
5509 id_table: bnx2_pci_tbl, 5538 .id_table = bnx2_pci_tbl,
5510 probe: bnx2_init_one, 5539 .probe = bnx2_init_one,
5511 remove: __devexit_p(bnx2_remove_one), 5540 .remove = __devexit_p(bnx2_remove_one),
5512 suspend: bnx2_suspend, 5541 .suspend = bnx2_suspend,
5513 resume: bnx2_resume, 5542 .resume = bnx2_resume,
5514}; 5543};
5515 5544
5516static int __init bnx2_init(void) 5545static int __init bnx2_init(void)