aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/bnx2.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/bnx2.c')
-rw-r--r--drivers/net/bnx2.c573
1 files changed, 387 insertions, 186 deletions
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index 08cddb6ff740..ac90a3828f69 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -1,6 +1,6 @@
1/* bnx2.c: Broadcom NX2 network driver. 1/* bnx2.c: Broadcom NX2 network driver.
2 * 2 *
3 * Copyright (c) 2004-2009 Broadcom Corporation 3 * Copyright (c) 2004-2010 Broadcom Corporation
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by 6 * it under the terms of the GNU General Public License as published by
@@ -9,6 +9,7 @@
9 * Written by: Michael Chan (mchan@broadcom.com) 9 * Written by: Michael Chan (mchan@broadcom.com)
10 */ 10 */
11 11
12#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12 13
13#include <linux/module.h> 14#include <linux/module.h>
14#include <linux/moduleparam.h> 15#include <linux/moduleparam.h>
@@ -48,7 +49,6 @@
48#include <linux/cache.h> 49#include <linux/cache.h>
49#include <linux/firmware.h> 50#include <linux/firmware.h>
50#include <linux/log2.h> 51#include <linux/log2.h>
51#include <linux/list.h>
52 52
53#if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE) 53#if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE)
54#define BCM_CNIC 1 54#define BCM_CNIC 1
@@ -58,14 +58,13 @@
58#include "bnx2_fw.h" 58#include "bnx2_fw.h"
59 59
60#define DRV_MODULE_NAME "bnx2" 60#define DRV_MODULE_NAME "bnx2"
61#define PFX DRV_MODULE_NAME ": " 61#define DRV_MODULE_VERSION "2.0.9"
62#define DRV_MODULE_VERSION "2.0.2" 62#define DRV_MODULE_RELDATE "April 27, 2010"
63#define DRV_MODULE_RELDATE "Aug 21, 2009" 63#define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-5.0.0.j6.fw"
64#define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-5.0.0.j3.fw"
65#define FW_RV2P_FILE_06 "bnx2/bnx2-rv2p-06-5.0.0.j3.fw" 64#define FW_RV2P_FILE_06 "bnx2/bnx2-rv2p-06-5.0.0.j3.fw"
66#define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-5.0.0.j3.fw" 65#define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-5.0.0.j9.fw"
67#define FW_RV2P_FILE_09_Ax "bnx2/bnx2-rv2p-09ax-5.0.0.j3.fw" 66#define FW_RV2P_FILE_09_Ax "bnx2/bnx2-rv2p-09ax-5.0.0.j10.fw"
68#define FW_RV2P_FILE_09 "bnx2/bnx2-rv2p-09-5.0.0.j3.fw" 67#define FW_RV2P_FILE_09 "bnx2/bnx2-rv2p-09-5.0.0.j10.fw"
69 68
70#define RUN_AT(x) (jiffies + (x)) 69#define RUN_AT(x) (jiffies + (x))
71 70
@@ -247,6 +246,8 @@ static const struct flash_spec flash_5709 = {
247 246
248MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl); 247MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
249 248
249static void bnx2_init_napi(struct bnx2 *bp);
250
250static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr) 251static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
251{ 252{
252 u32 diff; 253 u32 diff;
@@ -650,26 +651,36 @@ bnx2_napi_enable(struct bnx2 *bp)
650} 651}
651 652
652static void 653static void
653bnx2_netif_stop(struct bnx2 *bp) 654bnx2_netif_stop(struct bnx2 *bp, bool stop_cnic)
654{ 655{
655 bnx2_cnic_stop(bp); 656 if (stop_cnic)
656 bnx2_disable_int_sync(bp); 657 bnx2_cnic_stop(bp);
657 if (netif_running(bp->dev)) { 658 if (netif_running(bp->dev)) {
659 int i;
660
658 bnx2_napi_disable(bp); 661 bnx2_napi_disable(bp);
659 netif_tx_disable(bp->dev); 662 netif_tx_disable(bp->dev);
660 bp->dev->trans_start = jiffies; /* prevent tx timeout */ 663 /* prevent tx timeout */
664 for (i = 0; i < bp->dev->num_tx_queues; i++) {
665 struct netdev_queue *txq;
666
667 txq = netdev_get_tx_queue(bp->dev, i);
668 txq->trans_start = jiffies;
669 }
661 } 670 }
671 bnx2_disable_int_sync(bp);
662} 672}
663 673
664static void 674static void
665bnx2_netif_start(struct bnx2 *bp) 675bnx2_netif_start(struct bnx2 *bp, bool start_cnic)
666{ 676{
667 if (atomic_dec_and_test(&bp->intr_sem)) { 677 if (atomic_dec_and_test(&bp->intr_sem)) {
668 if (netif_running(bp->dev)) { 678 if (netif_running(bp->dev)) {
669 netif_tx_wake_all_queues(bp->dev); 679 netif_tx_wake_all_queues(bp->dev);
670 bnx2_napi_enable(bp); 680 bnx2_napi_enable(bp);
671 bnx2_enable_int(bp); 681 bnx2_enable_int(bp);
672 bnx2_cnic_start(bp); 682 if (start_cnic)
683 bnx2_cnic_start(bp);
673 } 684 }
674 } 685 }
675} 686}
@@ -972,33 +983,27 @@ bnx2_report_link(struct bnx2 *bp)
972{ 983{
973 if (bp->link_up) { 984 if (bp->link_up) {
974 netif_carrier_on(bp->dev); 985 netif_carrier_on(bp->dev);
975 printk(KERN_INFO PFX "%s NIC %s Link is Up, ", bp->dev->name, 986 netdev_info(bp->dev, "NIC %s Link is Up, %d Mbps %s duplex",
976 bnx2_xceiver_str(bp)); 987 bnx2_xceiver_str(bp),
977 988 bp->line_speed,
978 printk("%d Mbps ", bp->line_speed); 989 bp->duplex == DUPLEX_FULL ? "full" : "half");
979
980 if (bp->duplex == DUPLEX_FULL)
981 printk("full duplex");
982 else
983 printk("half duplex");
984 990
985 if (bp->flow_ctrl) { 991 if (bp->flow_ctrl) {
986 if (bp->flow_ctrl & FLOW_CTRL_RX) { 992 if (bp->flow_ctrl & FLOW_CTRL_RX) {
987 printk(", receive "); 993 pr_cont(", receive ");
988 if (bp->flow_ctrl & FLOW_CTRL_TX) 994 if (bp->flow_ctrl & FLOW_CTRL_TX)
989 printk("& transmit "); 995 pr_cont("& transmit ");
990 } 996 }
991 else { 997 else {
992 printk(", transmit "); 998 pr_cont(", transmit ");
993 } 999 }
994 printk("flow control ON"); 1000 pr_cont("flow control ON");
995 } 1001 }
996 printk("\n"); 1002 pr_cont("\n");
997 } 1003 } else {
998 else {
999 netif_carrier_off(bp->dev); 1004 netif_carrier_off(bp->dev);
1000 printk(KERN_ERR PFX "%s NIC %s Link is Down\n", bp->dev->name, 1005 netdev_err(bp->dev, "NIC %s Link is Down\n",
1001 bnx2_xceiver_str(bp)); 1006 bnx2_xceiver_str(bp));
1002 } 1007 }
1003 1008
1004 bnx2_report_fw_link(bp); 1009 bnx2_report_fw_link(bp);
@@ -1270,7 +1275,7 @@ bnx2_init_rx_context(struct bnx2 *bp, u32 cid)
1270 if (lo_water >= bp->rx_ring_size) 1275 if (lo_water >= bp->rx_ring_size)
1271 lo_water = 0; 1276 lo_water = 0;
1272 1277
1273 hi_water = bp->rx_ring_size / 4; 1278 hi_water = min_t(int, bp->rx_ring_size / 4, lo_water + 16);
1274 1279
1275 if (hi_water <= lo_water) 1280 if (hi_water <= lo_water)
1276 lo_water = 0; 1281 lo_water = 0;
@@ -1466,6 +1471,8 @@ bnx2_enable_forced_2g5(struct bnx2 *bp)
1466 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) { 1471 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1467 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr); 1472 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1468 bmcr |= BCM5708S_BMCR_FORCE_2500; 1473 bmcr |= BCM5708S_BMCR_FORCE_2500;
1474 } else {
1475 return;
1469 } 1476 }
1470 1477
1471 if (bp->autoneg & AUTONEG_SPEED) { 1478 if (bp->autoneg & AUTONEG_SPEED) {
@@ -1500,6 +1507,8 @@ bnx2_disable_forced_2g5(struct bnx2 *bp)
1500 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) { 1507 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1501 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr); 1508 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1502 bmcr &= ~BCM5708S_BMCR_FORCE_2500; 1509 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1510 } else {
1511 return;
1503 } 1512 }
1504 1513
1505 if (bp->autoneg & AUTONEG_SPEED) 1514 if (bp->autoneg & AUTONEG_SPEED)
@@ -2471,8 +2480,7 @@ bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int ack, int silent)
2471 /* If we timed out, inform the firmware that this is the case. */ 2480 /* If we timed out, inform the firmware that this is the case. */
2472 if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) { 2481 if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2473 if (!silent) 2482 if (!silent)
2474 printk(KERN_ERR PFX "fw sync timeout, reset code = " 2483 pr_err("fw sync timeout, reset code = %x\n", msg_data);
2475 "%x\n", msg_data);
2476 2484
2477 msg_data &= ~BNX2_DRV_MSG_CODE; 2485 msg_data &= ~BNX2_DRV_MSG_CODE;
2478 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT; 2486 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
@@ -2588,8 +2596,7 @@ bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2588 2596
2589 good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL); 2597 good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2590 if (good_mbuf == NULL) { 2598 if (good_mbuf == NULL) {
2591 printk(KERN_ERR PFX "Failed to allocate memory in " 2599 pr_err("Failed to allocate memory in %s\n", __func__);
2592 "bnx2_alloc_bad_rbuf\n");
2593 return -ENOMEM; 2600 return -ENOMEM;
2594 } 2601 }
2595 2602
@@ -2811,13 +2818,21 @@ bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2811 } 2818 }
2812 } 2819 }
2813 2820
2814 skb_dma_unmap(&bp->pdev->dev, skb, DMA_TO_DEVICE); 2821 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
2822 skb_headlen(skb), PCI_DMA_TODEVICE);
2815 2823
2816 tx_buf->skb = NULL; 2824 tx_buf->skb = NULL;
2817 last = tx_buf->nr_frags; 2825 last = tx_buf->nr_frags;
2818 2826
2819 for (i = 0; i < last; i++) { 2827 for (i = 0; i < last; i++) {
2820 sw_cons = NEXT_TX_BD(sw_cons); 2828 sw_cons = NEXT_TX_BD(sw_cons);
2829
2830 pci_unmap_page(bp->pdev,
2831 pci_unmap_addr(
2832 &txr->tx_buf_ring[TX_RING_IDX(sw_cons)],
2833 mapping),
2834 skb_shinfo(skb)->frags[i].size,
2835 PCI_DMA_TODEVICE);
2821 } 2836 }
2822 2837
2823 sw_cons = NEXT_TX_BD(sw_cons); 2838 sw_cons = NEXT_TX_BD(sw_cons);
@@ -3541,9 +3556,7 @@ bnx2_set_rx_mode(struct net_device *dev)
3541 3556
3542 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS); 3557 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
3543 3558
3544 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count; 3559 netdev_for_each_mc_addr(mclist, dev) {
3545 i++, mclist = mclist->next) {
3546
3547 crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr); 3560 crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
3548 bit = crc & 0xff; 3561 bit = crc & 0xff;
3549 regidx = (bit & 0xe0) >> 5; 3562 regidx = (bit & 0xe0) >> 5;
@@ -3559,14 +3572,14 @@ bnx2_set_rx_mode(struct net_device *dev)
3559 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN; 3572 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
3560 } 3573 }
3561 3574
3562 if (dev->uc.count > BNX2_MAX_UNICAST_ADDRESSES) { 3575 if (netdev_uc_count(dev) > BNX2_MAX_UNICAST_ADDRESSES) {
3563 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS; 3576 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3564 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN | 3577 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3565 BNX2_RPM_SORT_USER0_PROM_VLAN; 3578 BNX2_RPM_SORT_USER0_PROM_VLAN;
3566 } else if (!(dev->flags & IFF_PROMISC)) { 3579 } else if (!(dev->flags & IFF_PROMISC)) {
3567 /* Add all entries into to the match filter list */ 3580 /* Add all entries into to the match filter list */
3568 i = 0; 3581 i = 0;
3569 list_for_each_entry(ha, &dev->uc.list, list) { 3582 netdev_for_each_uc_addr(ha, dev) {
3570 bnx2_set_mac_addr(bp, ha->addr, 3583 bnx2_set_mac_addr(bp, ha->addr,
3571 i + BNX2_START_UNICAST_ADDRESS_INDEX); 3584 i + BNX2_START_UNICAST_ADDRESS_INDEX);
3572 sort_mode |= (1 << 3585 sort_mode |= (1 <<
@@ -3637,15 +3650,13 @@ bnx2_request_firmware(struct bnx2 *bp)
3637 3650
3638 rc = request_firmware(&bp->mips_firmware, mips_fw_file, &bp->pdev->dev); 3651 rc = request_firmware(&bp->mips_firmware, mips_fw_file, &bp->pdev->dev);
3639 if (rc) { 3652 if (rc) {
3640 printk(KERN_ERR PFX "Can't load firmware file \"%s\"\n", 3653 pr_err("Can't load firmware file \"%s\"\n", mips_fw_file);
3641 mips_fw_file);
3642 return rc; 3654 return rc;
3643 } 3655 }
3644 3656
3645 rc = request_firmware(&bp->rv2p_firmware, rv2p_fw_file, &bp->pdev->dev); 3657 rc = request_firmware(&bp->rv2p_firmware, rv2p_fw_file, &bp->pdev->dev);
3646 if (rc) { 3658 if (rc) {
3647 printk(KERN_ERR PFX "Can't load firmware file \"%s\"\n", 3659 pr_err("Can't load firmware file \"%s\"\n", rv2p_fw_file);
3648 rv2p_fw_file);
3649 return rc; 3660 return rc;
3650 } 3661 }
3651 mips_fw = (const struct bnx2_mips_fw_file *) bp->mips_firmware->data; 3662 mips_fw = (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
@@ -3656,15 +3667,13 @@ bnx2_request_firmware(struct bnx2 *bp)
3656 check_mips_fw_entry(bp->mips_firmware, &mips_fw->rxp) || 3667 check_mips_fw_entry(bp->mips_firmware, &mips_fw->rxp) ||
3657 check_mips_fw_entry(bp->mips_firmware, &mips_fw->tpat) || 3668 check_mips_fw_entry(bp->mips_firmware, &mips_fw->tpat) ||
3658 check_mips_fw_entry(bp->mips_firmware, &mips_fw->txp)) { 3669 check_mips_fw_entry(bp->mips_firmware, &mips_fw->txp)) {
3659 printk(KERN_ERR PFX "Firmware file \"%s\" is invalid\n", 3670 pr_err("Firmware file \"%s\" is invalid\n", mips_fw_file);
3660 mips_fw_file);
3661 return -EINVAL; 3671 return -EINVAL;
3662 } 3672 }
3663 if (bp->rv2p_firmware->size < sizeof(*rv2p_fw) || 3673 if (bp->rv2p_firmware->size < sizeof(*rv2p_fw) ||
3664 check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc1.rv2p, 8, true) || 3674 check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc1.rv2p, 8, true) ||
3665 check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc2.rv2p, 8, true)) { 3675 check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc2.rv2p, 8, true)) {
3666 printk(KERN_ERR PFX "Firmware file \"%s\" is invalid\n", 3676 pr_err("Firmware file \"%s\" is invalid\n", rv2p_fw_file);
3667 rv2p_fw_file);
3668 return -EINVAL; 3677 return -EINVAL;
3669 } 3678 }
3670 3679
@@ -4298,7 +4307,7 @@ bnx2_init_nvram(struct bnx2 *bp)
4298 4307
4299 if (j == entry_count) { 4308 if (j == entry_count) {
4300 bp->flash_info = NULL; 4309 bp->flash_info = NULL;
4301 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n"); 4310 pr_alert("Unknown flash/EEPROM type\n");
4302 return -ENODEV; 4311 return -ENODEV;
4303 } 4312 }
4304 4313
@@ -4718,7 +4727,7 @@ bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4718 4727
4719 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ | 4728 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4720 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) { 4729 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
4721 printk(KERN_ERR PFX "Chip reset did not complete\n"); 4730 pr_err("Chip reset did not complete\n");
4722 return -EBUSY; 4731 return -EBUSY;
4723 } 4732 }
4724 } 4733 }
@@ -4726,7 +4735,7 @@ bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4726 /* Make sure byte swapping is properly configured. */ 4735 /* Make sure byte swapping is properly configured. */
4727 val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0); 4736 val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
4728 if (val != 0x01020304) { 4737 if (val != 0x01020304) {
4729 printk(KERN_ERR PFX "Chip not in correct endian mode\n"); 4738 pr_err("Chip not in correct endian mode\n");
4730 return -ENODEV; 4739 return -ENODEV;
4731 } 4740 }
4732 4741
@@ -4752,8 +4761,12 @@ bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4752 rc = bnx2_alloc_bad_rbuf(bp); 4761 rc = bnx2_alloc_bad_rbuf(bp);
4753 } 4762 }
4754 4763
4755 if (bp->flags & BNX2_FLAG_USING_MSIX) 4764 if (bp->flags & BNX2_FLAG_USING_MSIX) {
4756 bnx2_setup_msix_tbl(bp); 4765 bnx2_setup_msix_tbl(bp);
4766 /* Prevent MSIX table reads and write from timing out */
4767 REG_WR(bp, BNX2_MISC_ECO_HW_CTL,
4768 BNX2_MISC_ECO_HW_CTL_LARGE_GRC_TMOUT_EN);
4769 }
4757 4770
4758 return rc; 4771 return rc;
4759} 4772}
@@ -4921,7 +4934,7 @@ bnx2_init_chip(struct bnx2 *bp)
4921 BNX2_HC_CONFIG_COLLECT_STATS; 4934 BNX2_HC_CONFIG_COLLECT_STATS;
4922 } 4935 }
4923 4936
4924 if (bp->irq_nvecs > 1) { 4937 if (bp->flags & BNX2_FLAG_USING_MSIX) {
4925 REG_WR(bp, BNX2_HC_MSIX_BIT_VECTOR, 4938 REG_WR(bp, BNX2_HC_MSIX_BIT_VECTOR,
4926 BNX2_HC_MSIX_BIT_VECTOR_VAL); 4939 BNX2_HC_MSIX_BIT_VECTOR_VAL);
4927 4940
@@ -5146,8 +5159,11 @@ bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
5146 5159
5147 ring_prod = prod = rxr->rx_pg_prod; 5160 ring_prod = prod = rxr->rx_pg_prod;
5148 for (i = 0; i < bp->rx_pg_ring_size; i++) { 5161 for (i = 0; i < bp->rx_pg_ring_size; i++) {
5149 if (bnx2_alloc_rx_page(bp, rxr, ring_prod) < 0) 5162 if (bnx2_alloc_rx_page(bp, rxr, ring_prod) < 0) {
5163 netdev_warn(bp->dev, "init'ed rx page ring %d with %d/%d pages only\n",
5164 ring_num, i, bp->rx_pg_ring_size);
5150 break; 5165 break;
5166 }
5151 prod = NEXT_RX_BD(prod); 5167 prod = NEXT_RX_BD(prod);
5152 ring_prod = RX_PG_RING_IDX(prod); 5168 ring_prod = RX_PG_RING_IDX(prod);
5153 } 5169 }
@@ -5155,8 +5171,11 @@ bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
5155 5171
5156 ring_prod = prod = rxr->rx_prod; 5172 ring_prod = prod = rxr->rx_prod;
5157 for (i = 0; i < bp->rx_ring_size; i++) { 5173 for (i = 0; i < bp->rx_ring_size; i++) {
5158 if (bnx2_alloc_rx_skb(bp, rxr, ring_prod) < 0) 5174 if (bnx2_alloc_rx_skb(bp, rxr, ring_prod) < 0) {
5175 netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d skbs only\n",
5176 ring_num, i, bp->rx_ring_size);
5159 break; 5177 break;
5178 }
5160 prod = NEXT_RX_BD(prod); 5179 prod = NEXT_RX_BD(prod);
5161 ring_prod = RX_RING_IDX(prod); 5180 ring_prod = RX_RING_IDX(prod);
5162 } 5181 }
@@ -5291,17 +5310,29 @@ bnx2_free_tx_skbs(struct bnx2 *bp)
5291 for (j = 0; j < TX_DESC_CNT; ) { 5310 for (j = 0; j < TX_DESC_CNT; ) {
5292 struct sw_tx_bd *tx_buf = &txr->tx_buf_ring[j]; 5311 struct sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
5293 struct sk_buff *skb = tx_buf->skb; 5312 struct sk_buff *skb = tx_buf->skb;
5313 int k, last;
5294 5314
5295 if (skb == NULL) { 5315 if (skb == NULL) {
5296 j++; 5316 j++;
5297 continue; 5317 continue;
5298 } 5318 }
5299 5319
5300 skb_dma_unmap(&bp->pdev->dev, skb, DMA_TO_DEVICE); 5320 pci_unmap_single(bp->pdev,
5321 pci_unmap_addr(tx_buf, mapping),
5322 skb_headlen(skb),
5323 PCI_DMA_TODEVICE);
5301 5324
5302 tx_buf->skb = NULL; 5325 tx_buf->skb = NULL;
5303 5326
5304 j += skb_shinfo(skb)->nr_frags + 1; 5327 last = tx_buf->nr_frags;
5328 j++;
5329 for (k = 0; k < last; k++, j++) {
5330 tx_buf = &txr->tx_buf_ring[TX_RING_IDX(j)];
5331 pci_unmap_page(bp->pdev,
5332 pci_unmap_addr(tx_buf, mapping),
5333 skb_shinfo(skb)->frags[k].size,
5334 PCI_DMA_TODEVICE);
5335 }
5305 dev_kfree_skb(skb); 5336 dev_kfree_skb(skb);
5306 } 5337 }
5307 } 5338 }
@@ -5680,11 +5711,12 @@ bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
5680 for (i = 14; i < pkt_size; i++) 5711 for (i = 14; i < pkt_size; i++)
5681 packet[i] = (unsigned char) (i & 0xff); 5712 packet[i] = (unsigned char) (i & 0xff);
5682 5713
5683 if (skb_dma_map(&bp->pdev->dev, skb, DMA_TO_DEVICE)) { 5714 map = pci_map_single(bp->pdev, skb->data, pkt_size,
5715 PCI_DMA_TODEVICE);
5716 if (pci_dma_mapping_error(bp->pdev, map)) {
5684 dev_kfree_skb(skb); 5717 dev_kfree_skb(skb);
5685 return -EIO; 5718 return -EIO;
5686 } 5719 }
5687 map = skb_shinfo(skb)->dma_head;
5688 5720
5689 REG_WR(bp, BNX2_HC_COMMAND, 5721 REG_WR(bp, BNX2_HC_COMMAND,
5690 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT); 5722 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
@@ -5719,7 +5751,7 @@ bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
5719 5751
5720 udelay(5); 5752 udelay(5);
5721 5753
5722 skb_dma_unmap(&bp->pdev->dev, skb, DMA_TO_DEVICE); 5754 pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
5723 dev_kfree_skb(skb); 5755 dev_kfree_skb(skb);
5724 5756
5725 if (bnx2_get_hw_tx_cons(tx_napi) != txr->tx_prod) 5757 if (bnx2_get_hw_tx_cons(tx_napi) != txr->tx_prod)
@@ -6104,6 +6136,10 @@ bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
6104 REG_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE); 6136 REG_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE);
6105 REG_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE); 6137 REG_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE);
6106 6138
6139 /* Need to flush the previous three writes to ensure MSI-X
6140 * is setup properly */
6141 REG_RD(bp, BNX2_PCI_MSIX_CONTROL);
6142
6107 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) { 6143 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
6108 msix_ent[i].entry = i; 6144 msix_ent[i].entry = i;
6109 msix_ent[i].vector = 0; 6145 msix_ent[i].vector = 0;
@@ -6169,6 +6205,7 @@ bnx2_open(struct net_device *dev)
6169 bnx2_disable_int(bp); 6205 bnx2_disable_int(bp);
6170 6206
6171 bnx2_setup_int_mode(bp, disable_msi); 6207 bnx2_setup_int_mode(bp, disable_msi);
6208 bnx2_init_napi(bp);
6172 bnx2_napi_enable(bp); 6209 bnx2_napi_enable(bp);
6173 rc = bnx2_alloc_mem(bp); 6210 rc = bnx2_alloc_mem(bp);
6174 if (rc) 6211 if (rc)
@@ -6186,6 +6223,8 @@ bnx2_open(struct net_device *dev)
6186 6223
6187 atomic_set(&bp->intr_sem, 0); 6224 atomic_set(&bp->intr_sem, 0);
6188 6225
6226 memset(bp->temp_stats_blk, 0, sizeof(struct statistics_block));
6227
6189 bnx2_enable_int(bp); 6228 bnx2_enable_int(bp);
6190 6229
6191 if (bp->flags & BNX2_FLAG_USING_MSI) { 6230 if (bp->flags & BNX2_FLAG_USING_MSI) {
@@ -6193,11 +6232,7 @@ bnx2_open(struct net_device *dev)
6193 * If MSI test fails, go back to INTx mode 6232 * If MSI test fails, go back to INTx mode
6194 */ 6233 */
6195 if (bnx2_test_intr(bp) != 0) { 6234 if (bnx2_test_intr(bp) != 0) {
6196 printk(KERN_WARNING PFX "%s: No interrupt was generated" 6235 netdev_warn(bp->dev, "No interrupt was generated using MSI, switching to INTx mode. Please report this failure to the PCI maintainer and include system chipset information.\n");
6197 " using MSI, switching to INTx mode. Please"
6198 " report this failure to the PCI maintainer"
6199 " and include system chipset information.\n",
6200 bp->dev->name);
6201 6236
6202 bnx2_disable_int(bp); 6237 bnx2_disable_int(bp);
6203 bnx2_free_irq(bp); 6238 bnx2_free_irq(bp);
@@ -6217,9 +6252,9 @@ bnx2_open(struct net_device *dev)
6217 } 6252 }
6218 } 6253 }
6219 if (bp->flags & BNX2_FLAG_USING_MSI) 6254 if (bp->flags & BNX2_FLAG_USING_MSI)
6220 printk(KERN_INFO PFX "%s: using MSI\n", dev->name); 6255 netdev_info(dev, "using MSI\n");
6221 else if (bp->flags & BNX2_FLAG_USING_MSIX) 6256 else if (bp->flags & BNX2_FLAG_USING_MSIX)
6222 printk(KERN_INFO PFX "%s: using MSIX\n", dev->name); 6257 netdev_info(dev, "using MSIX\n");
6223 6258
6224 netif_tx_start_all_queues(dev); 6259 netif_tx_start_all_queues(dev);
6225 6260
@@ -6238,15 +6273,38 @@ bnx2_reset_task(struct work_struct *work)
6238{ 6273{
6239 struct bnx2 *bp = container_of(work, struct bnx2, reset_task); 6274 struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
6240 6275
6241 if (!netif_running(bp->dev)) 6276 rtnl_lock();
6277 if (!netif_running(bp->dev)) {
6278 rtnl_unlock();
6242 return; 6279 return;
6280 }
6243 6281
6244 bnx2_netif_stop(bp); 6282 bnx2_netif_stop(bp, true);
6245 6283
6246 bnx2_init_nic(bp, 1); 6284 bnx2_init_nic(bp, 1);
6247 6285
6248 atomic_set(&bp->intr_sem, 1); 6286 atomic_set(&bp->intr_sem, 1);
6249 bnx2_netif_start(bp); 6287 bnx2_netif_start(bp, true);
6288 rtnl_unlock();
6289}
6290
6291static void
6292bnx2_dump_state(struct bnx2 *bp)
6293{
6294 struct net_device *dev = bp->dev;
6295
6296 netdev_err(dev, "DEBUG: intr_sem[%x]\n", atomic_read(&bp->intr_sem));
6297 netdev_err(dev, "DEBUG: EMAC_TX_STATUS[%08x] RPM_MGMT_PKT_CTRL[%08x]\n",
6298 REG_RD(bp, BNX2_EMAC_TX_STATUS),
6299 REG_RD(bp, BNX2_RPM_MGMT_PKT_CTRL));
6300 netdev_err(dev, "DEBUG: MCP_STATE_P0[%08x] MCP_STATE_P1[%08x]\n",
6301 bnx2_reg_rd_ind(bp, BNX2_MCP_STATE_P0),
6302 bnx2_reg_rd_ind(bp, BNX2_MCP_STATE_P1));
6303 netdev_err(dev, "DEBUG: HC_STATS_INTERRUPT_STATUS[%08x]\n",
6304 REG_RD(bp, BNX2_HC_STATS_INTERRUPT_STATUS));
6305 if (bp->flags & BNX2_FLAG_USING_MSIX)
6306 netdev_err(dev, "DEBUG: PBA[%08x]\n",
6307 REG_RD(bp, BNX2_PCI_GRC_WINDOW3_BASE));
6250} 6308}
6251 6309
6252static void 6310static void
@@ -6254,6 +6312,8 @@ bnx2_tx_timeout(struct net_device *dev)
6254{ 6312{
6255 struct bnx2 *bp = netdev_priv(dev); 6313 struct bnx2 *bp = netdev_priv(dev);
6256 6314
6315 bnx2_dump_state(bp);
6316
6257 /* This allows the netif to be shutdown gracefully before resetting */ 6317 /* This allows the netif to be shutdown gracefully before resetting */
6258 schedule_work(&bp->reset_task); 6318 schedule_work(&bp->reset_task);
6259} 6319}
@@ -6266,7 +6326,7 @@ bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
6266 struct bnx2 *bp = netdev_priv(dev); 6326 struct bnx2 *bp = netdev_priv(dev);
6267 6327
6268 if (netif_running(dev)) 6328 if (netif_running(dev))
6269 bnx2_netif_stop(bp); 6329 bnx2_netif_stop(bp, false);
6270 6330
6271 bp->vlgrp = vlgrp; 6331 bp->vlgrp = vlgrp;
6272 6332
@@ -6277,7 +6337,7 @@ bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
6277 if (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN) 6337 if (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN)
6278 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE, 0, 1); 6338 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE, 0, 1);
6279 6339
6280 bnx2_netif_start(bp); 6340 bnx2_netif_start(bp, false);
6281} 6341}
6282#endif 6342#endif
6283 6343
@@ -6298,7 +6358,6 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
6298 struct bnx2_napi *bnapi; 6358 struct bnx2_napi *bnapi;
6299 struct bnx2_tx_ring_info *txr; 6359 struct bnx2_tx_ring_info *txr;
6300 struct netdev_queue *txq; 6360 struct netdev_queue *txq;
6301 struct skb_shared_info *sp;
6302 6361
6303 /* Determine which tx ring we will be placed on */ 6362 /* Determine which tx ring we will be placed on */
6304 i = skb_get_queue_mapping(skb); 6363 i = skb_get_queue_mapping(skb);
@@ -6309,8 +6368,7 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
6309 if (unlikely(bnx2_tx_avail(bp, txr) < 6368 if (unlikely(bnx2_tx_avail(bp, txr) <
6310 (skb_shinfo(skb)->nr_frags + 1))) { 6369 (skb_shinfo(skb)->nr_frags + 1))) {
6311 netif_tx_stop_queue(txq); 6370 netif_tx_stop_queue(txq);
6312 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n", 6371 netdev_err(dev, "BUG! Tx ring full when queue awake!\n");
6313 dev->name);
6314 6372
6315 return NETDEV_TX_BUSY; 6373 return NETDEV_TX_BUSY;
6316 } 6374 }
@@ -6363,16 +6421,15 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
6363 } else 6421 } else
6364 mss = 0; 6422 mss = 0;
6365 6423
6366 if (skb_dma_map(&bp->pdev->dev, skb, DMA_TO_DEVICE)) { 6424 mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
6425 if (pci_dma_mapping_error(bp->pdev, mapping)) {
6367 dev_kfree_skb(skb); 6426 dev_kfree_skb(skb);
6368 return NETDEV_TX_OK; 6427 return NETDEV_TX_OK;
6369 } 6428 }
6370 6429
6371 sp = skb_shinfo(skb);
6372 mapping = sp->dma_head;
6373
6374 tx_buf = &txr->tx_buf_ring[ring_prod]; 6430 tx_buf = &txr->tx_buf_ring[ring_prod];
6375 tx_buf->skb = skb; 6431 tx_buf->skb = skb;
6432 pci_unmap_addr_set(tx_buf, mapping, mapping);
6376 6433
6377 txbd = &txr->tx_desc_ring[ring_prod]; 6434 txbd = &txr->tx_desc_ring[ring_prod];
6378 6435
@@ -6393,7 +6450,12 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
6393 txbd = &txr->tx_desc_ring[ring_prod]; 6450 txbd = &txr->tx_desc_ring[ring_prod];
6394 6451
6395 len = frag->size; 6452 len = frag->size;
6396 mapping = sp->dma_maps[i]; 6453 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
6454 len, PCI_DMA_TODEVICE);
6455 if (pci_dma_mapping_error(bp->pdev, mapping))
6456 goto dma_error;
6457 pci_unmap_addr_set(&txr->tx_buf_ring[ring_prod], mapping,
6458 mapping);
6397 6459
6398 txbd->tx_bd_haddr_hi = (u64) mapping >> 32; 6460 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6399 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff; 6461 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
@@ -6420,6 +6482,30 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
6420 } 6482 }
6421 6483
6422 return NETDEV_TX_OK; 6484 return NETDEV_TX_OK;
6485dma_error:
6486 /* save value of frag that failed */
6487 last_frag = i;
6488
6489 /* start back at beginning and unmap skb */
6490 prod = txr->tx_prod;
6491 ring_prod = TX_RING_IDX(prod);
6492 tx_buf = &txr->tx_buf_ring[ring_prod];
6493 tx_buf->skb = NULL;
6494 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
6495 skb_headlen(skb), PCI_DMA_TODEVICE);
6496
6497 /* unmap remaining mapped pages */
6498 for (i = 0; i < last_frag; i++) {
6499 prod = NEXT_TX_BD(prod);
6500 ring_prod = TX_RING_IDX(prod);
6501 tx_buf = &txr->tx_buf_ring[ring_prod];
6502 pci_unmap_page(bp->pdev, pci_unmap_addr(tx_buf, mapping),
6503 skb_shinfo(skb)->frags[i].size,
6504 PCI_DMA_TODEVICE);
6505 }
6506
6507 dev_kfree_skb(skb);
6508 return NETDEV_TX_OK;
6423} 6509}
6424 6510
6425/* Called with rtnl_lock */ 6511/* Called with rtnl_lock */
@@ -6443,92 +6529,121 @@ bnx2_close(struct net_device *dev)
6443 return 0; 6529 return 0;
6444} 6530}
6445 6531
6446#define GET_NET_STATS64(ctr) \ 6532static void
6533bnx2_save_stats(struct bnx2 *bp)
6534{
6535 u32 *hw_stats = (u32 *) bp->stats_blk;
6536 u32 *temp_stats = (u32 *) bp->temp_stats_blk;
6537 int i;
6538
6539 /* The 1st 10 counters are 64-bit counters */
6540 for (i = 0; i < 20; i += 2) {
6541 u32 hi;
6542 u64 lo;
6543
6544 hi = temp_stats[i] + hw_stats[i];
6545 lo = (u64) temp_stats[i + 1] + (u64) hw_stats[i + 1];
6546 if (lo > 0xffffffff)
6547 hi++;
6548 temp_stats[i] = hi;
6549 temp_stats[i + 1] = lo & 0xffffffff;
6550 }
6551
6552 for ( ; i < sizeof(struct statistics_block) / 4; i++)
6553 temp_stats[i] += hw_stats[i];
6554}
6555
6556#define GET_64BIT_NET_STATS64(ctr) \
6447 (unsigned long) ((unsigned long) (ctr##_hi) << 32) + \ 6557 (unsigned long) ((unsigned long) (ctr##_hi) << 32) + \
6448 (unsigned long) (ctr##_lo) 6558 (unsigned long) (ctr##_lo)
6449 6559
6450#define GET_NET_STATS32(ctr) \ 6560#define GET_64BIT_NET_STATS32(ctr) \
6451 (ctr##_lo) 6561 (ctr##_lo)
6452 6562
6453#if (BITS_PER_LONG == 64) 6563#if (BITS_PER_LONG == 64)
6454#define GET_NET_STATS GET_NET_STATS64 6564#define GET_64BIT_NET_STATS(ctr) \
6565 GET_64BIT_NET_STATS64(bp->stats_blk->ctr) + \
6566 GET_64BIT_NET_STATS64(bp->temp_stats_blk->ctr)
6455#else 6567#else
6456#define GET_NET_STATS GET_NET_STATS32 6568#define GET_64BIT_NET_STATS(ctr) \
6569 GET_64BIT_NET_STATS32(bp->stats_blk->ctr) + \
6570 GET_64BIT_NET_STATS32(bp->temp_stats_blk->ctr)
6457#endif 6571#endif
6458 6572
6573#define GET_32BIT_NET_STATS(ctr) \
6574 (unsigned long) (bp->stats_blk->ctr + \
6575 bp->temp_stats_blk->ctr)
6576
6459static struct net_device_stats * 6577static struct net_device_stats *
6460bnx2_get_stats(struct net_device *dev) 6578bnx2_get_stats(struct net_device *dev)
6461{ 6579{
6462 struct bnx2 *bp = netdev_priv(dev); 6580 struct bnx2 *bp = netdev_priv(dev);
6463 struct statistics_block *stats_blk = bp->stats_blk;
6464 struct net_device_stats *net_stats = &dev->stats; 6581 struct net_device_stats *net_stats = &dev->stats;
6465 6582
6466 if (bp->stats_blk == NULL) { 6583 if (bp->stats_blk == NULL) {
6467 return net_stats; 6584 return net_stats;
6468 } 6585 }
6469 net_stats->rx_packets = 6586 net_stats->rx_packets =
6470 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) + 6587 GET_64BIT_NET_STATS(stat_IfHCInUcastPkts) +
6471 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) + 6588 GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts) +
6472 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts); 6589 GET_64BIT_NET_STATS(stat_IfHCInBroadcastPkts);
6473 6590
6474 net_stats->tx_packets = 6591 net_stats->tx_packets =
6475 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) + 6592 GET_64BIT_NET_STATS(stat_IfHCOutUcastPkts) +
6476 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) + 6593 GET_64BIT_NET_STATS(stat_IfHCOutMulticastPkts) +
6477 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts); 6594 GET_64BIT_NET_STATS(stat_IfHCOutBroadcastPkts);
6478 6595
6479 net_stats->rx_bytes = 6596 net_stats->rx_bytes =
6480 GET_NET_STATS(stats_blk->stat_IfHCInOctets); 6597 GET_64BIT_NET_STATS(stat_IfHCInOctets);
6481 6598
6482 net_stats->tx_bytes = 6599 net_stats->tx_bytes =
6483 GET_NET_STATS(stats_blk->stat_IfHCOutOctets); 6600 GET_64BIT_NET_STATS(stat_IfHCOutOctets);
6484 6601
6485 net_stats->multicast = 6602 net_stats->multicast =
6486 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts); 6603 GET_64BIT_NET_STATS(stat_IfHCOutMulticastPkts);
6487 6604
6488 net_stats->collisions = 6605 net_stats->collisions =
6489 (unsigned long) stats_blk->stat_EtherStatsCollisions; 6606 GET_32BIT_NET_STATS(stat_EtherStatsCollisions);
6490 6607
6491 net_stats->rx_length_errors = 6608 net_stats->rx_length_errors =
6492 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts + 6609 GET_32BIT_NET_STATS(stat_EtherStatsUndersizePkts) +
6493 stats_blk->stat_EtherStatsOverrsizePkts); 6610 GET_32BIT_NET_STATS(stat_EtherStatsOverrsizePkts);
6494 6611
6495 net_stats->rx_over_errors = 6612 net_stats->rx_over_errors =
6496 (unsigned long) (stats_blk->stat_IfInFTQDiscards + 6613 GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
6497 stats_blk->stat_IfInMBUFDiscards); 6614 GET_32BIT_NET_STATS(stat_IfInMBUFDiscards);
6498 6615
6499 net_stats->rx_frame_errors = 6616 net_stats->rx_frame_errors =
6500 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors; 6617 GET_32BIT_NET_STATS(stat_Dot3StatsAlignmentErrors);
6501 6618
6502 net_stats->rx_crc_errors = 6619 net_stats->rx_crc_errors =
6503 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors; 6620 GET_32BIT_NET_STATS(stat_Dot3StatsFCSErrors);
6504 6621
6505 net_stats->rx_errors = net_stats->rx_length_errors + 6622 net_stats->rx_errors = net_stats->rx_length_errors +
6506 net_stats->rx_over_errors + net_stats->rx_frame_errors + 6623 net_stats->rx_over_errors + net_stats->rx_frame_errors +
6507 net_stats->rx_crc_errors; 6624 net_stats->rx_crc_errors;
6508 6625
6509 net_stats->tx_aborted_errors = 6626 net_stats->tx_aborted_errors =
6510 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions + 6627 GET_32BIT_NET_STATS(stat_Dot3StatsExcessiveCollisions) +
6511 stats_blk->stat_Dot3StatsLateCollisions); 6628 GET_32BIT_NET_STATS(stat_Dot3StatsLateCollisions);
6512 6629
6513 if ((CHIP_NUM(bp) == CHIP_NUM_5706) || 6630 if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
6514 (CHIP_ID(bp) == CHIP_ID_5708_A0)) 6631 (CHIP_ID(bp) == CHIP_ID_5708_A0))
6515 net_stats->tx_carrier_errors = 0; 6632 net_stats->tx_carrier_errors = 0;
6516 else { 6633 else {
6517 net_stats->tx_carrier_errors = 6634 net_stats->tx_carrier_errors =
6518 (unsigned long) 6635 GET_32BIT_NET_STATS(stat_Dot3StatsCarrierSenseErrors);
6519 stats_blk->stat_Dot3StatsCarrierSenseErrors;
6520 } 6636 }
6521 6637
6522 net_stats->tx_errors = 6638 net_stats->tx_errors =
6523 (unsigned long) 6639 GET_32BIT_NET_STATS(stat_emac_tx_stat_dot3statsinternalmactransmiterrors) +
6524 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
6525 +
6526 net_stats->tx_aborted_errors + 6640 net_stats->tx_aborted_errors +
6527 net_stats->tx_carrier_errors; 6641 net_stats->tx_carrier_errors;
6528 6642
6529 net_stats->rx_missed_errors = 6643 net_stats->rx_missed_errors =
6530 (unsigned long) (stats_blk->stat_IfInFTQDiscards + 6644 GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
6531 stats_blk->stat_IfInMBUFDiscards + stats_blk->stat_FwRxDrop); 6645 GET_32BIT_NET_STATS(stat_IfInMBUFDiscards) +
6646 GET_32BIT_NET_STATS(stat_FwRxDrop);
6532 6647
6533 return net_stats; 6648 return net_stats;
6534} 6649}
@@ -6622,32 +6737,15 @@ bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6622 if (cmd->autoneg == AUTONEG_ENABLE) { 6737 if (cmd->autoneg == AUTONEG_ENABLE) {
6623 autoneg |= AUTONEG_SPEED; 6738 autoneg |= AUTONEG_SPEED;
6624 6739
6625 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED; 6740 advertising = cmd->advertising;
6626 6741 if (cmd->port == PORT_TP) {
6627 /* allow advertising 1 speed */ 6742 advertising &= ETHTOOL_ALL_COPPER_SPEED;
6628 if ((cmd->advertising == ADVERTISED_10baseT_Half) || 6743 if (!advertising)
6629 (cmd->advertising == ADVERTISED_10baseT_Full) ||
6630 (cmd->advertising == ADVERTISED_100baseT_Half) ||
6631 (cmd->advertising == ADVERTISED_100baseT_Full)) {
6632
6633 if (cmd->port == PORT_FIBRE)
6634 goto err_out_unlock;
6635
6636 advertising = cmd->advertising;
6637
6638 } else if (cmd->advertising == ADVERTISED_2500baseX_Full) {
6639 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ||
6640 (cmd->port == PORT_TP))
6641 goto err_out_unlock;
6642 } else if (cmd->advertising == ADVERTISED_1000baseT_Full)
6643 advertising = cmd->advertising;
6644 else if (cmd->advertising == ADVERTISED_1000baseT_Half)
6645 goto err_out_unlock;
6646 else {
6647 if (cmd->port == PORT_FIBRE)
6648 advertising = ETHTOOL_ALL_FIBRE_SPEED;
6649 else
6650 advertising = ETHTOOL_ALL_COPPER_SPEED; 6744 advertising = ETHTOOL_ALL_COPPER_SPEED;
6745 } else {
6746 advertising &= ETHTOOL_ALL_FIBRE_SPEED;
6747 if (!advertising)
6748 advertising = ETHTOOL_ALL_FIBRE_SPEED;
6651 } 6749 }
6652 advertising |= ADVERTISED_Autoneg; 6750 advertising |= ADVERTISED_Autoneg;
6653 } 6751 }
@@ -6959,9 +7057,9 @@ bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
6959 bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS; 7057 bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
6960 7058
6961 if (netif_running(bp->dev)) { 7059 if (netif_running(bp->dev)) {
6962 bnx2_netif_stop(bp); 7060 bnx2_netif_stop(bp, true);
6963 bnx2_init_nic(bp, 0); 7061 bnx2_init_nic(bp, 0);
6964 bnx2_netif_start(bp); 7062 bnx2_netif_start(bp, true);
6965 } 7063 }
6966 7064
6967 return 0; 7065 return 0;
@@ -6988,7 +7086,10 @@ static int
6988bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx) 7086bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx)
6989{ 7087{
6990 if (netif_running(bp->dev)) { 7088 if (netif_running(bp->dev)) {
6991 bnx2_netif_stop(bp); 7089 /* Reset will erase chipset stats; save them */
7090 bnx2_save_stats(bp);
7091
7092 bnx2_netif_stop(bp, true);
6992 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET); 7093 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
6993 bnx2_free_skbs(bp); 7094 bnx2_free_skbs(bp);
6994 bnx2_free_mem(bp); 7095 bnx2_free_mem(bp);
@@ -7009,7 +7110,14 @@ bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx)
7009 dev_close(bp->dev); 7110 dev_close(bp->dev);
7010 return rc; 7111 return rc;
7011 } 7112 }
7012 bnx2_netif_start(bp); 7113#ifdef BCM_CNIC
7114 mutex_lock(&bp->cnic_lock);
7115 /* Let cnic know about the new status block. */
7116 if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD)
7117 bnx2_setup_cnic_irq_info(bp);
7118 mutex_unlock(&bp->cnic_lock);
7119#endif
7120 bnx2_netif_start(bp, true);
7013 } 7121 }
7014 return 0; 7122 return 0;
7015} 7123}
@@ -7262,7 +7370,7 @@ bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
7262 if (etest->flags & ETH_TEST_FL_OFFLINE) { 7370 if (etest->flags & ETH_TEST_FL_OFFLINE) {
7263 int i; 7371 int i;
7264 7372
7265 bnx2_netif_stop(bp); 7373 bnx2_netif_stop(bp, true);
7266 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG); 7374 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
7267 bnx2_free_skbs(bp); 7375 bnx2_free_skbs(bp);
7268 7376
@@ -7281,7 +7389,7 @@ bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
7281 bnx2_shutdown_chip(bp); 7389 bnx2_shutdown_chip(bp);
7282 else { 7390 else {
7283 bnx2_init_nic(bp, 1); 7391 bnx2_init_nic(bp, 1);
7284 bnx2_netif_start(bp); 7392 bnx2_netif_start(bp, true);
7285 } 7393 }
7286 7394
7287 /* wait for link up */ 7395 /* wait for link up */
@@ -7332,6 +7440,7 @@ bnx2_get_ethtool_stats(struct net_device *dev,
7332 struct bnx2 *bp = netdev_priv(dev); 7440 struct bnx2 *bp = netdev_priv(dev);
7333 int i; 7441 int i;
7334 u32 *hw_stats = (u32 *) bp->stats_blk; 7442 u32 *hw_stats = (u32 *) bp->stats_blk;
7443 u32 *temp_stats = (u32 *) bp->temp_stats_blk;
7335 u8 *stats_len_arr = NULL; 7444 u8 *stats_len_arr = NULL;
7336 7445
7337 if (hw_stats == NULL) { 7446 if (hw_stats == NULL) {
@@ -7348,21 +7457,26 @@ bnx2_get_ethtool_stats(struct net_device *dev,
7348 stats_len_arr = bnx2_5708_stats_len_arr; 7457 stats_len_arr = bnx2_5708_stats_len_arr;
7349 7458
7350 for (i = 0; i < BNX2_NUM_STATS; i++) { 7459 for (i = 0; i < BNX2_NUM_STATS; i++) {
7460 unsigned long offset;
7461
7351 if (stats_len_arr[i] == 0) { 7462 if (stats_len_arr[i] == 0) {
7352 /* skip this counter */ 7463 /* skip this counter */
7353 buf[i] = 0; 7464 buf[i] = 0;
7354 continue; 7465 continue;
7355 } 7466 }
7467
7468 offset = bnx2_stats_offset_arr[i];
7356 if (stats_len_arr[i] == 4) { 7469 if (stats_len_arr[i] == 4) {
7357 /* 4-byte counter */ 7470 /* 4-byte counter */
7358 buf[i] = (u64) 7471 buf[i] = (u64) *(hw_stats + offset) +
7359 *(hw_stats + bnx2_stats_offset_arr[i]); 7472 *(temp_stats + offset);
7360 continue; 7473 continue;
7361 } 7474 }
7362 /* 8-byte counter */ 7475 /* 8-byte counter */
7363 buf[i] = (((u64) *(hw_stats + 7476 buf[i] = (((u64) *(hw_stats + offset)) << 32) +
7364 bnx2_stats_offset_arr[i])) << 32) + 7477 *(hw_stats + offset + 1) +
7365 *(hw_stats + bnx2_stats_offset_arr[i] + 1); 7478 (((u64) *(temp_stats + offset)) << 32) +
7479 *(temp_stats + offset + 1);
7366 } 7480 }
7367} 7481}
7368 7482
@@ -7530,7 +7644,7 @@ bnx2_change_mtu(struct net_device *dev, int new_mtu)
7530 return (bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size)); 7644 return (bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size));
7531} 7645}
7532 7646
7533#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER) 7647#ifdef CONFIG_NET_POLL_CONTROLLER
7534static void 7648static void
7535poll_bnx2(struct net_device *dev) 7649poll_bnx2(struct net_device *dev)
7536{ 7650{
@@ -7538,9 +7652,11 @@ poll_bnx2(struct net_device *dev)
7538 int i; 7652 int i;
7539 7653
7540 for (i = 0; i < bp->irq_nvecs; i++) { 7654 for (i = 0; i < bp->irq_nvecs; i++) {
7541 disable_irq(bp->irq_tbl[i].vector); 7655 struct bnx2_irq *irq = &bp->irq_tbl[i];
7542 bnx2_interrupt(bp->irq_tbl[i].vector, &bp->bnx2_napi[i]); 7656
7543 enable_irq(bp->irq_tbl[i].vector); 7657 disable_irq(irq->vector);
7658 irq->handler(irq->vector, &bp->bnx2_napi[i]);
7659 enable_irq(irq->vector);
7544 } 7660 }
7545} 7661}
7546#endif 7662#endif
@@ -7635,6 +7751,74 @@ bnx2_get_pci_speed(struct bnx2 *bp)
7635 7751
7636} 7752}
7637 7753
7754static void __devinit
7755bnx2_read_vpd_fw_ver(struct bnx2 *bp)
7756{
7757 int rc, i, j;
7758 u8 *data;
7759 unsigned int block_end, rosize, len;
7760
7761#define BNX2_VPD_NVRAM_OFFSET 0x300
7762#define BNX2_VPD_LEN 128
7763#define BNX2_MAX_VER_SLEN 30
7764
7765 data = kmalloc(256, GFP_KERNEL);
7766 if (!data)
7767 return;
7768
7769 rc = bnx2_nvram_read(bp, BNX2_VPD_NVRAM_OFFSET, data + BNX2_VPD_LEN,
7770 BNX2_VPD_LEN);
7771 if (rc)
7772 goto vpd_done;
7773
7774 for (i = 0; i < BNX2_VPD_LEN; i += 4) {
7775 data[i] = data[i + BNX2_VPD_LEN + 3];
7776 data[i + 1] = data[i + BNX2_VPD_LEN + 2];
7777 data[i + 2] = data[i + BNX2_VPD_LEN + 1];
7778 data[i + 3] = data[i + BNX2_VPD_LEN];
7779 }
7780
7781 i = pci_vpd_find_tag(data, 0, BNX2_VPD_LEN, PCI_VPD_LRDT_RO_DATA);
7782 if (i < 0)
7783 goto vpd_done;
7784
7785 rosize = pci_vpd_lrdt_size(&data[i]);
7786 i += PCI_VPD_LRDT_TAG_SIZE;
7787 block_end = i + rosize;
7788
7789 if (block_end > BNX2_VPD_LEN)
7790 goto vpd_done;
7791
7792 j = pci_vpd_find_info_keyword(data, i, rosize,
7793 PCI_VPD_RO_KEYWORD_MFR_ID);
7794 if (j < 0)
7795 goto vpd_done;
7796
7797 len = pci_vpd_info_field_size(&data[j]);
7798
7799 j += PCI_VPD_INFO_FLD_HDR_SIZE;
7800 if (j + len > block_end || len != 4 ||
7801 memcmp(&data[j], "1028", 4))
7802 goto vpd_done;
7803
7804 j = pci_vpd_find_info_keyword(data, i, rosize,
7805 PCI_VPD_RO_KEYWORD_VENDOR0);
7806 if (j < 0)
7807 goto vpd_done;
7808
7809 len = pci_vpd_info_field_size(&data[j]);
7810
7811 j += PCI_VPD_INFO_FLD_HDR_SIZE;
7812 if (j + len > block_end || len > BNX2_MAX_VER_SLEN)
7813 goto vpd_done;
7814
7815 memcpy(bp->fw_version, &data[j], len);
7816 bp->fw_version[len] = ' ';
7817
7818vpd_done:
7819 kfree(data);
7820}
7821
7638static int __devinit 7822static int __devinit
7639bnx2_init_board(struct pci_dev *pdev, struct net_device *dev) 7823bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
7640{ 7824{
@@ -7650,23 +7834,31 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
7650 bp->flags = 0; 7834 bp->flags = 0;
7651 bp->phy_flags = 0; 7835 bp->phy_flags = 0;
7652 7836
7837 bp->temp_stats_blk =
7838 kzalloc(sizeof(struct statistics_block), GFP_KERNEL);
7839
7840 if (bp->temp_stats_blk == NULL) {
7841 rc = -ENOMEM;
7842 goto err_out;
7843 }
7844
7653 /* enable device (incl. PCI PM wakeup), and bus-mastering */ 7845 /* enable device (incl. PCI PM wakeup), and bus-mastering */
7654 rc = pci_enable_device(pdev); 7846 rc = pci_enable_device(pdev);
7655 if (rc) { 7847 if (rc) {
7656 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.\n"); 7848 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
7657 goto err_out; 7849 goto err_out;
7658 } 7850 }
7659 7851
7660 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { 7852 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
7661 dev_err(&pdev->dev, 7853 dev_err(&pdev->dev,
7662 "Cannot find PCI device base address, aborting.\n"); 7854 "Cannot find PCI device base address, aborting\n");
7663 rc = -ENODEV; 7855 rc = -ENODEV;
7664 goto err_out_disable; 7856 goto err_out_disable;
7665 } 7857 }
7666 7858
7667 rc = pci_request_regions(pdev, DRV_MODULE_NAME); 7859 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
7668 if (rc) { 7860 if (rc) {
7669 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n"); 7861 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
7670 goto err_out_disable; 7862 goto err_out_disable;
7671 } 7863 }
7672 7864
@@ -7676,7 +7868,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
7676 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM); 7868 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
7677 if (bp->pm_cap == 0) { 7869 if (bp->pm_cap == 0) {
7678 dev_err(&pdev->dev, 7870 dev_err(&pdev->dev,
7679 "Cannot find power management capability, aborting.\n"); 7871 "Cannot find power management capability, aborting\n");
7680 rc = -EIO; 7872 rc = -EIO;
7681 goto err_out_release; 7873 goto err_out_release;
7682 } 7874 }
@@ -7699,7 +7891,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
7699 bp->regview = ioremap_nocache(dev->base_addr, mem_len); 7891 bp->regview = ioremap_nocache(dev->base_addr, mem_len);
7700 7892
7701 if (!bp->regview) { 7893 if (!bp->regview) {
7702 dev_err(&pdev->dev, "Cannot map register space, aborting.\n"); 7894 dev_err(&pdev->dev, "Cannot map register space, aborting\n");
7703 rc = -ENOMEM; 7895 rc = -ENOMEM;
7704 goto err_out_release; 7896 goto err_out_release;
7705 } 7897 }
@@ -7719,7 +7911,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
7719 if (CHIP_NUM(bp) == CHIP_NUM_5709) { 7911 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
7720 if (pci_find_capability(pdev, PCI_CAP_ID_EXP) == 0) { 7912 if (pci_find_capability(pdev, PCI_CAP_ID_EXP) == 0) {
7721 dev_err(&pdev->dev, 7913 dev_err(&pdev->dev,
7722 "Cannot find PCIE capability, aborting.\n"); 7914 "Cannot find PCIE capability, aborting\n");
7723 rc = -EIO; 7915 rc = -EIO;
7724 goto err_out_unmap; 7916 goto err_out_unmap;
7725 } 7917 }
@@ -7730,7 +7922,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
7730 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX); 7922 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
7731 if (bp->pcix_cap == 0) { 7923 if (bp->pcix_cap == 0) {
7732 dev_err(&pdev->dev, 7924 dev_err(&pdev->dev,
7733 "Cannot find PCIX capability, aborting.\n"); 7925 "Cannot find PCIX capability, aborting\n");
7734 rc = -EIO; 7926 rc = -EIO;
7735 goto err_out_unmap; 7927 goto err_out_unmap;
7736 } 7928 }
@@ -7759,11 +7951,11 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
7759 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask); 7951 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
7760 if (rc) { 7952 if (rc) {
7761 dev_err(&pdev->dev, 7953 dev_err(&pdev->dev,
7762 "pci_set_consistent_dma_mask failed, aborting.\n"); 7954 "pci_set_consistent_dma_mask failed, aborting\n");
7763 goto err_out_unmap; 7955 goto err_out_unmap;
7764 } 7956 }
7765 } else if ((rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) { 7957 } else if ((rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
7766 dev_err(&pdev->dev, "System does not support DMA, aborting.\n"); 7958 dev_err(&pdev->dev, "System does not support DMA, aborting\n");
7767 goto err_out_unmap; 7959 goto err_out_unmap;
7768 } 7960 }
7769 7961
@@ -7780,7 +7972,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
7780 !(bp->flags & BNX2_FLAG_PCIX)) { 7972 !(bp->flags & BNX2_FLAG_PCIX)) {
7781 7973
7782 dev_err(&pdev->dev, 7974 dev_err(&pdev->dev,
7783 "5706 A1 can only be used in a PCIX bus, aborting.\n"); 7975 "5706 A1 can only be used in a PCIX bus, aborting\n");
7784 goto err_out_unmap; 7976 goto err_out_unmap;
7785 } 7977 }
7786 7978
@@ -7803,15 +7995,23 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
7803 7995
7804 if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) != 7996 if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
7805 BNX2_DEV_INFO_SIGNATURE_MAGIC) { 7997 BNX2_DEV_INFO_SIGNATURE_MAGIC) {
7806 dev_err(&pdev->dev, "Firmware not running, aborting.\n"); 7998 dev_err(&pdev->dev, "Firmware not running, aborting\n");
7807 rc = -ENODEV; 7999 rc = -ENODEV;
7808 goto err_out_unmap; 8000 goto err_out_unmap;
7809 } 8001 }
7810 8002
8003 bnx2_read_vpd_fw_ver(bp);
8004
8005 j = strlen(bp->fw_version);
7811 reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_BC_REV); 8006 reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_BC_REV);
7812 for (i = 0, j = 0; i < 3; i++) { 8007 for (i = 0; i < 3 && j < 24; i++) {
7813 u8 num, k, skip0; 8008 u8 num, k, skip0;
7814 8009
8010 if (i == 0) {
8011 bp->fw_version[j++] = 'b';
8012 bp->fw_version[j++] = 'c';
8013 bp->fw_version[j++] = ' ';
8014 }
7815 num = (u8) (reg >> (24 - (i * 8))); 8015 num = (u8) (reg >> (24 - (i * 8)));
7816 for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) { 8016 for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
7817 if (num >= k || !skip0 || k == 1) { 8017 if (num >= k || !skip0 || k == 1) {
@@ -7842,8 +8042,9 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
7842 reg != BNX2_CONDITION_MFW_RUN_NONE) { 8042 reg != BNX2_CONDITION_MFW_RUN_NONE) {
7843 u32 addr = bnx2_shmem_rd(bp, BNX2_MFW_VER_PTR); 8043 u32 addr = bnx2_shmem_rd(bp, BNX2_MFW_VER_PTR);
7844 8044
7845 bp->fw_version[j++] = ' '; 8045 if (j < 32)
7846 for (i = 0; i < 3; i++) { 8046 bp->fw_version[j++] = ' ';
8047 for (i = 0; i < 3 && j < 28; i++) {
7847 reg = bnx2_reg_rd_ind(bp, addr + i * 4); 8048 reg = bnx2_reg_rd_ind(bp, addr + i * 4);
7848 reg = swab32(reg); 8049 reg = swab32(reg);
7849 memcpy(&bp->fw_version[j], &reg, 4); 8050 memcpy(&bp->fw_version[j], &reg, 4);
@@ -8017,7 +8218,7 @@ bnx2_init_napi(struct bnx2 *bp)
8017{ 8218{
8018 int i; 8219 int i;
8019 8220
8020 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) { 8221 for (i = 0; i < bp->irq_nvecs; i++) {
8021 struct bnx2_napi *bnapi = &bp->bnx2_napi[i]; 8222 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
8022 int (*poll)(struct napi_struct *, int); 8223 int (*poll)(struct napi_struct *, int);
8023 8224
@@ -8045,7 +8246,7 @@ static const struct net_device_ops bnx2_netdev_ops = {
8045#ifdef BCM_VLAN 8246#ifdef BCM_VLAN
8046 .ndo_vlan_rx_register = bnx2_vlan_rx_register, 8247 .ndo_vlan_rx_register = bnx2_vlan_rx_register,
8047#endif 8248#endif
8048#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER) 8249#ifdef CONFIG_NET_POLL_CONTROLLER
8049 .ndo_poll_controller = poll_bnx2, 8250 .ndo_poll_controller = poll_bnx2,
8050#endif 8251#endif
8051}; 8252};
@@ -8067,7 +8268,7 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8067 char str[40]; 8268 char str[40];
8068 8269
8069 if (version_printed++ == 0) 8270 if (version_printed++ == 0)
8070 printk(KERN_INFO "%s", version); 8271 pr_info("%s", version);
8071 8272
8072 /* dev zeroed in init_etherdev */ 8273 /* dev zeroed in init_etherdev */
8073 dev = alloc_etherdev_mq(sizeof(*bp), TX_MAX_RINGS); 8274 dev = alloc_etherdev_mq(sizeof(*bp), TX_MAX_RINGS);
@@ -8086,7 +8287,6 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8086 dev->ethtool_ops = &bnx2_ethtool_ops; 8287 dev->ethtool_ops = &bnx2_ethtool_ops;
8087 8288
8088 bp = netdev_priv(dev); 8289 bp = netdev_priv(dev);
8089 bnx2_init_napi(bp);
8090 8290
8091 pci_set_drvdata(pdev, dev); 8291 pci_set_drvdata(pdev, dev);
8092 8292
@@ -8117,15 +8317,13 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8117 goto error; 8317 goto error;
8118 } 8318 }
8119 8319
8120 printk(KERN_INFO "%s: %s (%c%d) %s found at mem %lx, " 8320 netdev_info(dev, "%s (%c%d) %s found at mem %lx, IRQ %d, node addr %pM\n",
8121 "IRQ %d, node addr %pM\n", 8321 board_info[ent->driver_data].name,
8122 dev->name, 8322 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
8123 board_info[ent->driver_data].name, 8323 ((CHIP_ID(bp) & 0x0ff0) >> 4),
8124 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A', 8324 bnx2_bus_string(bp, str),
8125 ((CHIP_ID(bp) & 0x0ff0) >> 4), 8325 dev->base_addr,
8126 bnx2_bus_string(bp, str), 8326 bp->pdev->irq, dev->dev_addr);
8127 dev->base_addr,
8128 bp->pdev->irq, dev->dev_addr);
8129 8327
8130 return 0; 8328 return 0;
8131 8329
@@ -8162,6 +8360,8 @@ bnx2_remove_one(struct pci_dev *pdev)
8162 if (bp->regview) 8360 if (bp->regview)
8163 iounmap(bp->regview); 8361 iounmap(bp->regview);
8164 8362
8363 kfree(bp->temp_stats_blk);
8364
8165 free_netdev(dev); 8365 free_netdev(dev);
8166 pci_release_regions(pdev); 8366 pci_release_regions(pdev);
8167 pci_disable_device(pdev); 8367 pci_disable_device(pdev);
@@ -8183,7 +8383,7 @@ bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
8183 return 0; 8383 return 0;
8184 8384
8185 flush_scheduled_work(); 8385 flush_scheduled_work();
8186 bnx2_netif_stop(bp); 8386 bnx2_netif_stop(bp, true);
8187 netif_device_detach(dev); 8387 netif_device_detach(dev);
8188 del_timer_sync(&bp->timer); 8388 del_timer_sync(&bp->timer);
8189 bnx2_shutdown_chip(bp); 8389 bnx2_shutdown_chip(bp);
@@ -8205,7 +8405,7 @@ bnx2_resume(struct pci_dev *pdev)
8205 bnx2_set_power_state(bp, PCI_D0); 8405 bnx2_set_power_state(bp, PCI_D0);
8206 netif_device_attach(dev); 8406 netif_device_attach(dev);
8207 bnx2_init_nic(bp, 1); 8407 bnx2_init_nic(bp, 1);
8208 bnx2_netif_start(bp); 8408 bnx2_netif_start(bp, true);
8209 return 0; 8409 return 0;
8210} 8410}
8211 8411
@@ -8232,7 +8432,7 @@ static pci_ers_result_t bnx2_io_error_detected(struct pci_dev *pdev,
8232 } 8432 }
8233 8433
8234 if (netif_running(dev)) { 8434 if (netif_running(dev)) {
8235 bnx2_netif_stop(bp); 8435 bnx2_netif_stop(bp, true);
8236 del_timer_sync(&bp->timer); 8436 del_timer_sync(&bp->timer);
8237 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET); 8437 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
8238 } 8438 }
@@ -8258,12 +8458,13 @@ static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
8258 rtnl_lock(); 8458 rtnl_lock();
8259 if (pci_enable_device(pdev)) { 8459 if (pci_enable_device(pdev)) {
8260 dev_err(&pdev->dev, 8460 dev_err(&pdev->dev,
8261 "Cannot re-enable PCI device after reset.\n"); 8461 "Cannot re-enable PCI device after reset\n");
8262 rtnl_unlock(); 8462 rtnl_unlock();
8263 return PCI_ERS_RESULT_DISCONNECT; 8463 return PCI_ERS_RESULT_DISCONNECT;
8264 } 8464 }
8265 pci_set_master(pdev); 8465 pci_set_master(pdev);
8266 pci_restore_state(pdev); 8466 pci_restore_state(pdev);
8467 pci_save_state(pdev);
8267 8468
8268 if (netif_running(dev)) { 8469 if (netif_running(dev)) {
8269 bnx2_set_power_state(bp, PCI_D0); 8470 bnx2_set_power_state(bp, PCI_D0);
@@ -8288,7 +8489,7 @@ static void bnx2_io_resume(struct pci_dev *pdev)
8288 8489
8289 rtnl_lock(); 8490 rtnl_lock();
8290 if (netif_running(dev)) 8491 if (netif_running(dev))
8291 bnx2_netif_start(bp); 8492 bnx2_netif_start(bp, true);
8292 8493
8293 netif_device_attach(dev); 8494 netif_device_attach(dev);
8294 rtnl_unlock(); 8495 rtnl_unlock();