aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/bnx2.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/bnx2.c')
-rw-r--r--drivers/net/bnx2.c217
1 files changed, 197 insertions, 20 deletions
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index 08cddb6ff74..4bfc8081292 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -59,8 +59,8 @@
59 59
60#define DRV_MODULE_NAME "bnx2" 60#define DRV_MODULE_NAME "bnx2"
61#define PFX DRV_MODULE_NAME ": " 61#define PFX DRV_MODULE_NAME ": "
62#define DRV_MODULE_VERSION "2.0.2" 62#define DRV_MODULE_VERSION "2.0.3"
63#define DRV_MODULE_RELDATE "Aug 21, 2009" 63#define DRV_MODULE_RELDATE "Dec 03, 2009"
64#define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-5.0.0.j3.fw" 64#define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-5.0.0.j3.fw"
65#define FW_RV2P_FILE_06 "bnx2/bnx2-rv2p-06-5.0.0.j3.fw" 65#define FW_RV2P_FILE_06 "bnx2/bnx2-rv2p-06-5.0.0.j3.fw"
66#define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-5.0.0.j3.fw" 66#define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-5.0.0.j3.fw"
@@ -1466,6 +1466,8 @@ bnx2_enable_forced_2g5(struct bnx2 *bp)
1466 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) { 1466 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1467 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr); 1467 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1468 bmcr |= BCM5708S_BMCR_FORCE_2500; 1468 bmcr |= BCM5708S_BMCR_FORCE_2500;
1469 } else {
1470 return;
1469 } 1471 }
1470 1472
1471 if (bp->autoneg & AUTONEG_SPEED) { 1473 if (bp->autoneg & AUTONEG_SPEED) {
@@ -1500,6 +1502,8 @@ bnx2_disable_forced_2g5(struct bnx2 *bp)
1500 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) { 1502 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1501 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr); 1503 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1502 bmcr &= ~BCM5708S_BMCR_FORCE_2500; 1504 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1505 } else {
1506 return;
1503 } 1507 }
1504 1508
1505 if (bp->autoneg & AUTONEG_SPEED) 1509 if (bp->autoneg & AUTONEG_SPEED)
@@ -2811,13 +2815,21 @@ bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2811 } 2815 }
2812 } 2816 }
2813 2817
2814 skb_dma_unmap(&bp->pdev->dev, skb, DMA_TO_DEVICE); 2818 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
2819 skb_headlen(skb), PCI_DMA_TODEVICE);
2815 2820
2816 tx_buf->skb = NULL; 2821 tx_buf->skb = NULL;
2817 last = tx_buf->nr_frags; 2822 last = tx_buf->nr_frags;
2818 2823
2819 for (i = 0; i < last; i++) { 2824 for (i = 0; i < last; i++) {
2820 sw_cons = NEXT_TX_BD(sw_cons); 2825 sw_cons = NEXT_TX_BD(sw_cons);
2826
2827 pci_unmap_page(bp->pdev,
2828 pci_unmap_addr(
2829 &txr->tx_buf_ring[TX_RING_IDX(sw_cons)],
2830 mapping),
2831 skb_shinfo(skb)->frags[i].size,
2832 PCI_DMA_TODEVICE);
2821 } 2833 }
2822 2834
2823 sw_cons = NEXT_TX_BD(sw_cons); 2835 sw_cons = NEXT_TX_BD(sw_cons);
@@ -5146,8 +5158,12 @@ bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
5146 5158
5147 ring_prod = prod = rxr->rx_pg_prod; 5159 ring_prod = prod = rxr->rx_pg_prod;
5148 for (i = 0; i < bp->rx_pg_ring_size; i++) { 5160 for (i = 0; i < bp->rx_pg_ring_size; i++) {
5149 if (bnx2_alloc_rx_page(bp, rxr, ring_prod) < 0) 5161 if (bnx2_alloc_rx_page(bp, rxr, ring_prod) < 0) {
5162 printk(KERN_WARNING PFX "%s: init'ed rx page ring %d "
5163 "with %d/%d pages only\n",
5164 bp->dev->name, ring_num, i, bp->rx_pg_ring_size);
5150 break; 5165 break;
5166 }
5151 prod = NEXT_RX_BD(prod); 5167 prod = NEXT_RX_BD(prod);
5152 ring_prod = RX_PG_RING_IDX(prod); 5168 ring_prod = RX_PG_RING_IDX(prod);
5153 } 5169 }
@@ -5155,8 +5171,12 @@ bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
5155 5171
5156 ring_prod = prod = rxr->rx_prod; 5172 ring_prod = prod = rxr->rx_prod;
5157 for (i = 0; i < bp->rx_ring_size; i++) { 5173 for (i = 0; i < bp->rx_ring_size; i++) {
5158 if (bnx2_alloc_rx_skb(bp, rxr, ring_prod) < 0) 5174 if (bnx2_alloc_rx_skb(bp, rxr, ring_prod) < 0) {
5175 printk(KERN_WARNING PFX "%s: init'ed rx ring %d with "
5176 "%d/%d skbs only\n",
5177 bp->dev->name, ring_num, i, bp->rx_ring_size);
5159 break; 5178 break;
5179 }
5160 prod = NEXT_RX_BD(prod); 5180 prod = NEXT_RX_BD(prod);
5161 ring_prod = RX_RING_IDX(prod); 5181 ring_prod = RX_RING_IDX(prod);
5162 } 5182 }
@@ -5291,17 +5311,29 @@ bnx2_free_tx_skbs(struct bnx2 *bp)
5291 for (j = 0; j < TX_DESC_CNT; ) { 5311 for (j = 0; j < TX_DESC_CNT; ) {
5292 struct sw_tx_bd *tx_buf = &txr->tx_buf_ring[j]; 5312 struct sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
5293 struct sk_buff *skb = tx_buf->skb; 5313 struct sk_buff *skb = tx_buf->skb;
5314 int k, last;
5294 5315
5295 if (skb == NULL) { 5316 if (skb == NULL) {
5296 j++; 5317 j++;
5297 continue; 5318 continue;
5298 } 5319 }
5299 5320
5300 skb_dma_unmap(&bp->pdev->dev, skb, DMA_TO_DEVICE); 5321 pci_unmap_single(bp->pdev,
5322 pci_unmap_addr(tx_buf, mapping),
5323 skb_headlen(skb),
5324 PCI_DMA_TODEVICE);
5301 5325
5302 tx_buf->skb = NULL; 5326 tx_buf->skb = NULL;
5303 5327
5304 j += skb_shinfo(skb)->nr_frags + 1; 5328 last = tx_buf->nr_frags;
5329 j++;
5330 for (k = 0; k < last; k++, j++) {
5331 tx_buf = &txr->tx_buf_ring[TX_RING_IDX(j)];
5332 pci_unmap_page(bp->pdev,
5333 pci_unmap_addr(tx_buf, mapping),
5334 skb_shinfo(skb)->frags[k].size,
5335 PCI_DMA_TODEVICE);
5336 }
5305 dev_kfree_skb(skb); 5337 dev_kfree_skb(skb);
5306 } 5338 }
5307 } 5339 }
@@ -5680,11 +5712,12 @@ bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
5680 for (i = 14; i < pkt_size; i++) 5712 for (i = 14; i < pkt_size; i++)
5681 packet[i] = (unsigned char) (i & 0xff); 5713 packet[i] = (unsigned char) (i & 0xff);
5682 5714
5683 if (skb_dma_map(&bp->pdev->dev, skb, DMA_TO_DEVICE)) { 5715 map = pci_map_single(bp->pdev, skb->data, pkt_size,
5716 PCI_DMA_TODEVICE);
5717 if (pci_dma_mapping_error(bp->pdev, map)) {
5684 dev_kfree_skb(skb); 5718 dev_kfree_skb(skb);
5685 return -EIO; 5719 return -EIO;
5686 } 5720 }
5687 map = skb_shinfo(skb)->dma_head;
5688 5721
5689 REG_WR(bp, BNX2_HC_COMMAND, 5722 REG_WR(bp, BNX2_HC_COMMAND,
5690 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT); 5723 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
@@ -5719,7 +5752,7 @@ bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
5719 5752
5720 udelay(5); 5753 udelay(5);
5721 5754
5722 skb_dma_unmap(&bp->pdev->dev, skb, DMA_TO_DEVICE); 5755 pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
5723 dev_kfree_skb(skb); 5756 dev_kfree_skb(skb);
5724 5757
5725 if (bnx2_get_hw_tx_cons(tx_napi) != txr->tx_prod) 5758 if (bnx2_get_hw_tx_cons(tx_napi) != txr->tx_prod)
@@ -6238,8 +6271,11 @@ bnx2_reset_task(struct work_struct *work)
6238{ 6271{
6239 struct bnx2 *bp = container_of(work, struct bnx2, reset_task); 6272 struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
6240 6273
6241 if (!netif_running(bp->dev)) 6274 rtnl_lock();
6275 if (!netif_running(bp->dev)) {
6276 rtnl_unlock();
6242 return; 6277 return;
6278 }
6243 6279
6244 bnx2_netif_stop(bp); 6280 bnx2_netif_stop(bp);
6245 6281
@@ -6247,6 +6283,28 @@ bnx2_reset_task(struct work_struct *work)
6247 6283
6248 atomic_set(&bp->intr_sem, 1); 6284 atomic_set(&bp->intr_sem, 1);
6249 bnx2_netif_start(bp); 6285 bnx2_netif_start(bp);
6286 rtnl_unlock();
6287}
6288
6289static void
6290bnx2_dump_state(struct bnx2 *bp)
6291{
6292 struct net_device *dev = bp->dev;
6293
6294 printk(KERN_ERR PFX "%s DEBUG: intr_sem[%x]\n", dev->name,
6295 atomic_read(&bp->intr_sem));
6296 printk(KERN_ERR PFX "%s DEBUG: EMAC_TX_STATUS[%08x] "
6297 "RPM_MGMT_PKT_CTRL[%08x]\n", dev->name,
6298 REG_RD(bp, BNX2_EMAC_TX_STATUS),
6299 REG_RD(bp, BNX2_RPM_MGMT_PKT_CTRL));
6300 printk(KERN_ERR PFX "%s DEBUG: MCP_STATE_P0[%08x] MCP_STATE_P1[%08x]\n",
6301 dev->name, bnx2_reg_rd_ind(bp, BNX2_MCP_STATE_P0),
6302 bnx2_reg_rd_ind(bp, BNX2_MCP_STATE_P1));
6303 printk(KERN_ERR PFX "%s DEBUG: HC_STATS_INTERRUPT_STATUS[%08x]\n",
6304 dev->name, REG_RD(bp, BNX2_HC_STATS_INTERRUPT_STATUS));
6305 if (bp->flags & BNX2_FLAG_USING_MSIX)
6306 printk(KERN_ERR PFX "%s DEBUG: PBA[%08x]\n", dev->name,
6307 REG_RD(bp, BNX2_PCI_GRC_WINDOW3_BASE));
6250} 6308}
6251 6309
6252static void 6310static void
@@ -6254,6 +6312,8 @@ bnx2_tx_timeout(struct net_device *dev)
6254{ 6312{
6255 struct bnx2 *bp = netdev_priv(dev); 6313 struct bnx2 *bp = netdev_priv(dev);
6256 6314
6315 bnx2_dump_state(bp);
6316
6257 /* This allows the netif to be shutdown gracefully before resetting */ 6317 /* This allows the netif to be shutdown gracefully before resetting */
6258 schedule_work(&bp->reset_task); 6318 schedule_work(&bp->reset_task);
6259} 6319}
@@ -6298,7 +6358,6 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
6298 struct bnx2_napi *bnapi; 6358 struct bnx2_napi *bnapi;
6299 struct bnx2_tx_ring_info *txr; 6359 struct bnx2_tx_ring_info *txr;
6300 struct netdev_queue *txq; 6360 struct netdev_queue *txq;
6301 struct skb_shared_info *sp;
6302 6361
6303 /* Determine which tx ring we will be placed on */ 6362 /* Determine which tx ring we will be placed on */
6304 i = skb_get_queue_mapping(skb); 6363 i = skb_get_queue_mapping(skb);
@@ -6363,16 +6422,15 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
6363 } else 6422 } else
6364 mss = 0; 6423 mss = 0;
6365 6424
6366 if (skb_dma_map(&bp->pdev->dev, skb, DMA_TO_DEVICE)) { 6425 mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
6426 if (pci_dma_mapping_error(bp->pdev, mapping)) {
6367 dev_kfree_skb(skb); 6427 dev_kfree_skb(skb);
6368 return NETDEV_TX_OK; 6428 return NETDEV_TX_OK;
6369 } 6429 }
6370 6430
6371 sp = skb_shinfo(skb);
6372 mapping = sp->dma_head;
6373
6374 tx_buf = &txr->tx_buf_ring[ring_prod]; 6431 tx_buf = &txr->tx_buf_ring[ring_prod];
6375 tx_buf->skb = skb; 6432 tx_buf->skb = skb;
6433 pci_unmap_addr_set(tx_buf, mapping, mapping);
6376 6434
6377 txbd = &txr->tx_desc_ring[ring_prod]; 6435 txbd = &txr->tx_desc_ring[ring_prod];
6378 6436
@@ -6393,7 +6451,12 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
6393 txbd = &txr->tx_desc_ring[ring_prod]; 6451 txbd = &txr->tx_desc_ring[ring_prod];
6394 6452
6395 len = frag->size; 6453 len = frag->size;
6396 mapping = sp->dma_maps[i]; 6454 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
6455 len, PCI_DMA_TODEVICE);
6456 if (pci_dma_mapping_error(bp->pdev, mapping))
6457 goto dma_error;
6458 pci_unmap_addr_set(&txr->tx_buf_ring[ring_prod], mapping,
6459 mapping);
6397 6460
6398 txbd->tx_bd_haddr_hi = (u64) mapping >> 32; 6461 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6399 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff; 6462 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
@@ -6420,6 +6483,30 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
6420 } 6483 }
6421 6484
6422 return NETDEV_TX_OK; 6485 return NETDEV_TX_OK;
6486dma_error:
6487 /* save value of frag that failed */
6488 last_frag = i;
6489
6490 /* start back at beginning and unmap skb */
6491 prod = txr->tx_prod;
6492 ring_prod = TX_RING_IDX(prod);
6493 tx_buf = &txr->tx_buf_ring[ring_prod];
6494 tx_buf->skb = NULL;
6495 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
6496 skb_headlen(skb), PCI_DMA_TODEVICE);
6497
6498 /* unmap remaining mapped pages */
6499 for (i = 0; i < last_frag; i++) {
6500 prod = NEXT_TX_BD(prod);
6501 ring_prod = TX_RING_IDX(prod);
6502 tx_buf = &txr->tx_buf_ring[ring_prod];
6503 pci_unmap_page(bp->pdev, pci_unmap_addr(tx_buf, mapping),
6504 skb_shinfo(skb)->frags[i].size,
6505 PCI_DMA_TODEVICE);
6506 }
6507
6508 dev_kfree_skb(skb);
6509 return NETDEV_TX_OK;
6423} 6510}
6424 6511
6425/* Called with rtnl_lock */ 6512/* Called with rtnl_lock */
@@ -7635,6 +7722,86 @@ bnx2_get_pci_speed(struct bnx2 *bp)
7635 7722
7636} 7723}
7637 7724
7725static void __devinit
7726bnx2_read_vpd_fw_ver(struct bnx2 *bp)
7727{
7728 int rc, i, v0_len = 0;
7729 u8 *data;
7730 u8 *v0_str = NULL;
7731 bool mn_match = false;
7732
7733#define BNX2_VPD_NVRAM_OFFSET 0x300
7734#define BNX2_VPD_LEN 128
7735#define BNX2_MAX_VER_SLEN 30
7736
7737 data = kmalloc(256, GFP_KERNEL);
7738 if (!data)
7739 return;
7740
7741 rc = bnx2_nvram_read(bp, BNX2_VPD_NVRAM_OFFSET, data + BNX2_VPD_LEN,
7742 BNX2_VPD_LEN);
7743 if (rc)
7744 goto vpd_done;
7745
7746 for (i = 0; i < BNX2_VPD_LEN; i += 4) {
7747 data[i] = data[i + BNX2_VPD_LEN + 3];
7748 data[i + 1] = data[i + BNX2_VPD_LEN + 2];
7749 data[i + 2] = data[i + BNX2_VPD_LEN + 1];
7750 data[i + 3] = data[i + BNX2_VPD_LEN];
7751 }
7752
7753 for (i = 0; i <= BNX2_VPD_LEN - 3; ) {
7754 unsigned char val = data[i];
7755 unsigned int block_end;
7756
7757 if (val == 0x82 || val == 0x91) {
7758 i = (i + 3 + (data[i + 1] + (data[i + 2] << 8)));
7759 continue;
7760 }
7761
7762 if (val != 0x90)
7763 goto vpd_done;
7764
7765 block_end = (i + 3 + (data[i + 1] + (data[i + 2] << 8)));
7766 i += 3;
7767
7768 if (block_end > BNX2_VPD_LEN)
7769 goto vpd_done;
7770
7771 while (i < (block_end - 2)) {
7772 int len = data[i + 2];
7773
7774 if (i + 3 + len > block_end)
7775 goto vpd_done;
7776
7777 if (data[i] == 'M' && data[i + 1] == 'N') {
7778 if (len != 4 ||
7779 memcmp(&data[i + 3], "1028", 4))
7780 goto vpd_done;
7781 mn_match = true;
7782
7783 } else if (data[i] == 'V' && data[i + 1] == '0') {
7784 if (len > BNX2_MAX_VER_SLEN)
7785 goto vpd_done;
7786
7787 v0_len = len;
7788 v0_str = &data[i + 3];
7789 }
7790 i += 3 + len;
7791
7792 if (mn_match && v0_str) {
7793 memcpy(bp->fw_version, v0_str, v0_len);
7794 bp->fw_version[v0_len] = ' ';
7795 goto vpd_done;
7796 }
7797 }
7798 goto vpd_done;
7799 }
7800
7801vpd_done:
7802 kfree(data);
7803}
7804
7638static int __devinit 7805static int __devinit
7639bnx2_init_board(struct pci_dev *pdev, struct net_device *dev) 7806bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
7640{ 7807{
@@ -7808,10 +7975,18 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
7808 goto err_out_unmap; 7975 goto err_out_unmap;
7809 } 7976 }
7810 7977
7978 bnx2_read_vpd_fw_ver(bp);
7979
7980 j = strlen(bp->fw_version);
7811 reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_BC_REV); 7981 reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_BC_REV);
7812 for (i = 0, j = 0; i < 3; i++) { 7982 for (i = 0; i < 3 && j < 24; i++) {
7813 u8 num, k, skip0; 7983 u8 num, k, skip0;
7814 7984
7985 if (i == 0) {
7986 bp->fw_version[j++] = 'b';
7987 bp->fw_version[j++] = 'c';
7988 bp->fw_version[j++] = ' ';
7989 }
7815 num = (u8) (reg >> (24 - (i * 8))); 7990 num = (u8) (reg >> (24 - (i * 8)));
7816 for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) { 7991 for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
7817 if (num >= k || !skip0 || k == 1) { 7992 if (num >= k || !skip0 || k == 1) {
@@ -7842,8 +8017,9 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
7842 reg != BNX2_CONDITION_MFW_RUN_NONE) { 8017 reg != BNX2_CONDITION_MFW_RUN_NONE) {
7843 u32 addr = bnx2_shmem_rd(bp, BNX2_MFW_VER_PTR); 8018 u32 addr = bnx2_shmem_rd(bp, BNX2_MFW_VER_PTR);
7844 8019
7845 bp->fw_version[j++] = ' '; 8020 if (j < 32)
7846 for (i = 0; i < 3; i++) { 8021 bp->fw_version[j++] = ' ';
8022 for (i = 0; i < 3 && j < 28; i++) {
7847 reg = bnx2_reg_rd_ind(bp, addr + i * 4); 8023 reg = bnx2_reg_rd_ind(bp, addr + i * 4);
7848 reg = swab32(reg); 8024 reg = swab32(reg);
7849 memcpy(&bp->fw_version[j], &reg, 4); 8025 memcpy(&bp->fw_version[j], &reg, 4);
@@ -8264,6 +8440,7 @@ static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
8264 } 8440 }
8265 pci_set_master(pdev); 8441 pci_set_master(pdev);
8266 pci_restore_state(pdev); 8442 pci_restore_state(pdev);
8443 pci_save_state(pdev);
8267 8444
8268 if (netif_running(dev)) { 8445 if (netif_running(dev)) {
8269 bnx2_set_power_state(bp, PCI_D0); 8446 bnx2_set_power_state(bp, PCI_D0);