diff options
Diffstat (limited to 'drivers/net/bnx2x_main.c')
-rw-r--r-- | drivers/net/bnx2x_main.c | 125 |
1 files changed, 98 insertions, 27 deletions
diff --git a/drivers/net/bnx2x_main.c b/drivers/net/bnx2x_main.c index 82deea0a63f5..fce745148ff9 100644 --- a/drivers/net/bnx2x_main.c +++ b/drivers/net/bnx2x_main.c | |||
@@ -59,8 +59,8 @@ | |||
59 | #include "bnx2x.h" | 59 | #include "bnx2x.h" |
60 | #include "bnx2x_init.h" | 60 | #include "bnx2x_init.h" |
61 | 61 | ||
62 | #define DRV_MODULE_VERSION "1.45.20" | 62 | #define DRV_MODULE_VERSION "1.45.22" |
63 | #define DRV_MODULE_RELDATE "2008/08/25" | 63 | #define DRV_MODULE_RELDATE "2008/09/09" |
64 | #define BNX2X_BC_VER 0x040200 | 64 | #define BNX2X_BC_VER 0x040200 |
65 | 65 | ||
66 | /* Time in jiffies before concluding the transmitter is hung */ | 66 | /* Time in jiffies before concluding the transmitter is hung */ |
@@ -649,15 +649,16 @@ static void bnx2x_int_disable(struct bnx2x *bp) | |||
649 | BNX2X_ERR("BUG! proper val not read from IGU!\n"); | 649 | BNX2X_ERR("BUG! proper val not read from IGU!\n"); |
650 | } | 650 | } |
651 | 651 | ||
652 | static void bnx2x_int_disable_sync(struct bnx2x *bp) | 652 | static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw) |
653 | { | 653 | { |
654 | int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0; | 654 | int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0; |
655 | int i; | 655 | int i; |
656 | 656 | ||
657 | /* disable interrupt handling */ | 657 | /* disable interrupt handling */ |
658 | atomic_inc(&bp->intr_sem); | 658 | atomic_inc(&bp->intr_sem); |
659 | /* prevent the HW from sending interrupts */ | 659 | if (disable_hw) |
660 | bnx2x_int_disable(bp); | 660 | /* prevent the HW from sending interrupts */ |
661 | bnx2x_int_disable(bp); | ||
661 | 662 | ||
662 | /* make sure all ISRs are done */ | 663 | /* make sure all ISRs are done */ |
663 | if (msix) { | 664 | if (msix) { |
@@ -1027,7 +1028,7 @@ static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp, | |||
1027 | if (unlikely(skb == NULL)) | 1028 | if (unlikely(skb == NULL)) |
1028 | return -ENOMEM; | 1029 | return -ENOMEM; |
1029 | 1030 | ||
1030 | mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size, | 1031 | mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size, |
1031 | PCI_DMA_FROMDEVICE); | 1032 | PCI_DMA_FROMDEVICE); |
1032 | if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { | 1033 | if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { |
1033 | dev_kfree_skb(skb); | 1034 | dev_kfree_skb(skb); |
@@ -1169,7 +1170,7 @@ static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue, | |||
1169 | /* move empty skb from pool to prod and map it */ | 1170 | /* move empty skb from pool to prod and map it */ |
1170 | prod_rx_buf->skb = fp->tpa_pool[queue].skb; | 1171 | prod_rx_buf->skb = fp->tpa_pool[queue].skb; |
1171 | mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data, | 1172 | mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data, |
1172 | bp->rx_buf_use_size, PCI_DMA_FROMDEVICE); | 1173 | bp->rx_buf_size, PCI_DMA_FROMDEVICE); |
1173 | pci_unmap_addr_set(prod_rx_buf, mapping, mapping); | 1174 | pci_unmap_addr_set(prod_rx_buf, mapping, mapping); |
1174 | 1175 | ||
1175 | /* move partial skb from cons to pool (don't unmap yet) */ | 1176 | /* move partial skb from cons to pool (don't unmap yet) */ |
@@ -1276,7 +1277,7 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp, | |||
1276 | pool entry status to BNX2X_TPA_STOP even if new skb allocation | 1277 | pool entry status to BNX2X_TPA_STOP even if new skb allocation |
1277 | fails. */ | 1278 | fails. */ |
1278 | pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping), | 1279 | pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping), |
1279 | bp->rx_buf_use_size, PCI_DMA_FROMDEVICE); | 1280 | bp->rx_buf_size, PCI_DMA_FROMDEVICE); |
1280 | 1281 | ||
1281 | if (likely(new_skb)) { | 1282 | if (likely(new_skb)) { |
1282 | /* fix ip xsum and give it to the stack */ | 1283 | /* fix ip xsum and give it to the stack */ |
@@ -1520,7 +1521,7 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget) | |||
1520 | } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) { | 1521 | } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) { |
1521 | pci_unmap_single(bp->pdev, | 1522 | pci_unmap_single(bp->pdev, |
1522 | pci_unmap_addr(rx_buf, mapping), | 1523 | pci_unmap_addr(rx_buf, mapping), |
1523 | bp->rx_buf_use_size, | 1524 | bp->rx_buf_size, |
1524 | PCI_DMA_FROMDEVICE); | 1525 | PCI_DMA_FROMDEVICE); |
1525 | skb_reserve(skb, pad); | 1526 | skb_reserve(skb, pad); |
1526 | skb_put(skb, len); | 1527 | skb_put(skb, len); |
@@ -4229,7 +4230,7 @@ static inline void bnx2x_free_tpa_pool(struct bnx2x *bp, | |||
4229 | if (fp->tpa_state[i] == BNX2X_TPA_START) | 4230 | if (fp->tpa_state[i] == BNX2X_TPA_START) |
4230 | pci_unmap_single(bp->pdev, | 4231 | pci_unmap_single(bp->pdev, |
4231 | pci_unmap_addr(rx_buf, mapping), | 4232 | pci_unmap_addr(rx_buf, mapping), |
4232 | bp->rx_buf_use_size, | 4233 | bp->rx_buf_size, |
4233 | PCI_DMA_FROMDEVICE); | 4234 | PCI_DMA_FROMDEVICE); |
4234 | 4235 | ||
4235 | dev_kfree_skb(skb); | 4236 | dev_kfree_skb(skb); |
@@ -4245,15 +4246,14 @@ static void bnx2x_init_rx_rings(struct bnx2x *bp) | |||
4245 | u16 ring_prod, cqe_ring_prod; | 4246 | u16 ring_prod, cqe_ring_prod; |
4246 | int i, j; | 4247 | int i, j; |
4247 | 4248 | ||
4248 | bp->rx_buf_use_size = bp->dev->mtu; | 4249 | bp->rx_buf_size = bp->dev->mtu; |
4249 | bp->rx_buf_use_size += bp->rx_offset + ETH_OVREHEAD; | 4250 | bp->rx_buf_size += bp->rx_offset + ETH_OVREHEAD + |
4250 | bp->rx_buf_size = bp->rx_buf_use_size + 64; | 4251 | BCM_RX_ETH_PAYLOAD_ALIGN; |
4251 | 4252 | ||
4252 | if (bp->flags & TPA_ENABLE_FLAG) { | 4253 | if (bp->flags & TPA_ENABLE_FLAG) { |
4253 | DP(NETIF_MSG_IFUP, | 4254 | DP(NETIF_MSG_IFUP, |
4254 | "rx_buf_use_size %d rx_buf_size %d effective_mtu %d\n", | 4255 | "rx_buf_size %d effective_mtu %d\n", |
4255 | bp->rx_buf_use_size, bp->rx_buf_size, | 4256 | bp->rx_buf_size, bp->dev->mtu + ETH_OVREHEAD); |
4256 | bp->dev->mtu + ETH_OVREHEAD); | ||
4257 | 4257 | ||
4258 | for_each_queue(bp, j) { | 4258 | for_each_queue(bp, j) { |
4259 | struct bnx2x_fastpath *fp = &bp->fp[j]; | 4259 | struct bnx2x_fastpath *fp = &bp->fp[j]; |
@@ -4462,9 +4462,10 @@ static void bnx2x_init_context(struct bnx2x *bp) | |||
4462 | context->ustorm_st_context.common.status_block_id = sb_id; | 4462 | context->ustorm_st_context.common.status_block_id = sb_id; |
4463 | context->ustorm_st_context.common.flags = | 4463 | context->ustorm_st_context.common.flags = |
4464 | USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT; | 4464 | USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT; |
4465 | context->ustorm_st_context.common.mc_alignment_size = 64; | 4465 | context->ustorm_st_context.common.mc_alignment_size = |
4466 | BCM_RX_ETH_PAYLOAD_ALIGN; | ||
4466 | context->ustorm_st_context.common.bd_buff_size = | 4467 | context->ustorm_st_context.common.bd_buff_size = |
4467 | bp->rx_buf_use_size; | 4468 | bp->rx_buf_size; |
4468 | context->ustorm_st_context.common.bd_page_base_hi = | 4469 | context->ustorm_st_context.common.bd_page_base_hi = |
4469 | U64_HI(fp->rx_desc_mapping); | 4470 | U64_HI(fp->rx_desc_mapping); |
4470 | context->ustorm_st_context.common.bd_page_base_lo = | 4471 | context->ustorm_st_context.common.bd_page_base_lo = |
@@ -4717,7 +4718,7 @@ static void bnx2x_init_internal_func(struct bnx2x *bp) | |||
4717 | } | 4718 | } |
4718 | 4719 | ||
4719 | /* Init CQ ring mapping and aggregation size */ | 4720 | /* Init CQ ring mapping and aggregation size */ |
4720 | max_agg_size = min((u32)(bp->rx_buf_use_size + | 4721 | max_agg_size = min((u32)(bp->rx_buf_size + |
4721 | 8*BCM_PAGE_SIZE*PAGES_PER_SGE), | 4722 | 8*BCM_PAGE_SIZE*PAGES_PER_SGE), |
4722 | (u32)0xffff); | 4723 | (u32)0xffff); |
4723 | for_each_queue(bp, i) { | 4724 | for_each_queue(bp, i) { |
@@ -5940,7 +5941,7 @@ static void bnx2x_free_rx_skbs(struct bnx2x *bp) | |||
5940 | 5941 | ||
5941 | pci_unmap_single(bp->pdev, | 5942 | pci_unmap_single(bp->pdev, |
5942 | pci_unmap_addr(rx_buf, mapping), | 5943 | pci_unmap_addr(rx_buf, mapping), |
5943 | bp->rx_buf_use_size, | 5944 | bp->rx_buf_size, |
5944 | PCI_DMA_FROMDEVICE); | 5945 | PCI_DMA_FROMDEVICE); |
5945 | 5946 | ||
5946 | rx_buf->skb = NULL; | 5947 | rx_buf->skb = NULL; |
@@ -6086,9 +6087,9 @@ static void bnx2x_netif_start(struct bnx2x *bp) | |||
6086 | } | 6087 | } |
6087 | } | 6088 | } |
6088 | 6089 | ||
6089 | static void bnx2x_netif_stop(struct bnx2x *bp) | 6090 | static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw) |
6090 | { | 6091 | { |
6091 | bnx2x_int_disable_sync(bp); | 6092 | bnx2x_int_disable_sync(bp, disable_hw); |
6092 | if (netif_running(bp->dev)) { | 6093 | if (netif_running(bp->dev)) { |
6093 | bnx2x_napi_disable(bp); | 6094 | bnx2x_napi_disable(bp); |
6094 | netif_tx_disable(bp->dev); | 6095 | netif_tx_disable(bp->dev); |
@@ -6475,7 +6476,7 @@ load_rings_free: | |||
6475 | for_each_queue(bp, i) | 6476 | for_each_queue(bp, i) |
6476 | bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); | 6477 | bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); |
6477 | load_int_disable: | 6478 | load_int_disable: |
6478 | bnx2x_int_disable_sync(bp); | 6479 | bnx2x_int_disable_sync(bp, 1); |
6479 | /* Release IRQs */ | 6480 | /* Release IRQs */ |
6480 | bnx2x_free_irq(bp); | 6481 | bnx2x_free_irq(bp); |
6481 | load_error: | 6482 | load_error: |
@@ -6650,7 +6651,7 @@ static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode) | |||
6650 | bp->rx_mode = BNX2X_RX_MODE_NONE; | 6651 | bp->rx_mode = BNX2X_RX_MODE_NONE; |
6651 | bnx2x_set_storm_rx_mode(bp); | 6652 | bnx2x_set_storm_rx_mode(bp); |
6652 | 6653 | ||
6653 | bnx2x_netif_stop(bp); | 6654 | bnx2x_netif_stop(bp, 1); |
6654 | if (!netif_running(bp->dev)) | 6655 | if (!netif_running(bp->dev)) |
6655 | bnx2x_napi_disable(bp); | 6656 | bnx2x_napi_disable(bp); |
6656 | del_timer_sync(&bp->timer); | 6657 | del_timer_sync(&bp->timer); |
@@ -8791,7 +8792,7 @@ static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up) | |||
8791 | if (!netif_running(bp->dev)) | 8792 | if (!netif_running(bp->dev)) |
8792 | return BNX2X_LOOPBACK_FAILED; | 8793 | return BNX2X_LOOPBACK_FAILED; |
8793 | 8794 | ||
8794 | bnx2x_netif_stop(bp); | 8795 | bnx2x_netif_stop(bp, 1); |
8795 | 8796 | ||
8796 | if (bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up)) { | 8797 | if (bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up)) { |
8797 | DP(NETIF_MSG_PROBE, "MAC loopback failed\n"); | 8798 | DP(NETIF_MSG_PROBE, "MAC loopback failed\n"); |
@@ -10346,6 +10347,74 @@ static int bnx2x_resume(struct pci_dev *pdev) | |||
10346 | return rc; | 10347 | return rc; |
10347 | } | 10348 | } |
10348 | 10349 | ||
10350 | static int bnx2x_eeh_nic_unload(struct bnx2x *bp) | ||
10351 | { | ||
10352 | int i; | ||
10353 | |||
10354 | bp->state = BNX2X_STATE_ERROR; | ||
10355 | |||
10356 | bp->rx_mode = BNX2X_RX_MODE_NONE; | ||
10357 | |||
10358 | bnx2x_netif_stop(bp, 0); | ||
10359 | |||
10360 | del_timer_sync(&bp->timer); | ||
10361 | bp->stats_state = STATS_STATE_DISABLED; | ||
10362 | DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n"); | ||
10363 | |||
10364 | /* Release IRQs */ | ||
10365 | bnx2x_free_irq(bp); | ||
10366 | |||
10367 | if (CHIP_IS_E1(bp)) { | ||
10368 | struct mac_configuration_cmd *config = | ||
10369 | bnx2x_sp(bp, mcast_config); | ||
10370 | |||
10371 | for (i = 0; i < config->hdr.length_6b; i++) | ||
10372 | CAM_INVALIDATE(config->config_table[i]); | ||
10373 | } | ||
10374 | |||
10375 | /* Free SKBs, SGEs, TPA pool and driver internals */ | ||
10376 | bnx2x_free_skbs(bp); | ||
10377 | for_each_queue(bp, i) | ||
10378 | bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); | ||
10379 | bnx2x_free_mem(bp); | ||
10380 | |||
10381 | bp->state = BNX2X_STATE_CLOSED; | ||
10382 | |||
10383 | netif_carrier_off(bp->dev); | ||
10384 | |||
10385 | return 0; | ||
10386 | } | ||
10387 | |||
10388 | static void bnx2x_eeh_recover(struct bnx2x *bp) | ||
10389 | { | ||
10390 | u32 val; | ||
10391 | |||
10392 | mutex_init(&bp->port.phy_mutex); | ||
10393 | |||
10394 | bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR); | ||
10395 | bp->link_params.shmem_base = bp->common.shmem_base; | ||
10396 | BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base); | ||
10397 | |||
10398 | if (!bp->common.shmem_base || | ||
10399 | (bp->common.shmem_base < 0xA0000) || | ||
10400 | (bp->common.shmem_base >= 0xC0000)) { | ||
10401 | BNX2X_DEV_INFO("MCP not active\n"); | ||
10402 | bp->flags |= NO_MCP_FLAG; | ||
10403 | return; | ||
10404 | } | ||
10405 | |||
10406 | val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]); | ||
10407 | if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) | ||
10408 | != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) | ||
10409 | BNX2X_ERR("BAD MCP validity signature\n"); | ||
10410 | |||
10411 | if (!BP_NOMCP(bp)) { | ||
10412 | bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header) | ||
10413 | & DRV_MSG_SEQ_NUMBER_MASK); | ||
10414 | BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq); | ||
10415 | } | ||
10416 | } | ||
10417 | |||
10349 | /** | 10418 | /** |
10350 | * bnx2x_io_error_detected - called when PCI error is detected | 10419 | * bnx2x_io_error_detected - called when PCI error is detected |
10351 | * @pdev: Pointer to PCI device | 10420 | * @pdev: Pointer to PCI device |
@@ -10365,7 +10434,7 @@ static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev, | |||
10365 | netif_device_detach(dev); | 10434 | netif_device_detach(dev); |
10366 | 10435 | ||
10367 | if (netif_running(dev)) | 10436 | if (netif_running(dev)) |
10368 | bnx2x_nic_unload(bp, UNLOAD_CLOSE); | 10437 | bnx2x_eeh_nic_unload(bp); |
10369 | 10438 | ||
10370 | pci_disable_device(pdev); | 10439 | pci_disable_device(pdev); |
10371 | 10440 | ||
@@ -10420,8 +10489,10 @@ static void bnx2x_io_resume(struct pci_dev *pdev) | |||
10420 | 10489 | ||
10421 | rtnl_lock(); | 10490 | rtnl_lock(); |
10422 | 10491 | ||
10492 | bnx2x_eeh_recover(bp); | ||
10493 | |||
10423 | if (netif_running(dev)) | 10494 | if (netif_running(dev)) |
10424 | bnx2x_nic_load(bp, LOAD_OPEN); | 10495 | bnx2x_nic_load(bp, LOAD_NORMAL); |
10425 | 10496 | ||
10426 | netif_device_attach(dev); | 10497 | netif_device_attach(dev); |
10427 | 10498 | ||