diff options
Diffstat (limited to 'drivers/net/bnx2x_main.c')
| -rw-r--r-- | drivers/net/bnx2x_main.c | 251 |
1 files changed, 128 insertions, 123 deletions
diff --git a/drivers/net/bnx2x_main.c b/drivers/net/bnx2x_main.c index 3e7dc171cdf1..a8eb3c4a47c8 100644 --- a/drivers/net/bnx2x_main.c +++ b/drivers/net/bnx2x_main.c | |||
| @@ -44,7 +44,6 @@ | |||
| 44 | #include <net/ip.h> | 44 | #include <net/ip.h> |
| 45 | #include <net/tcp.h> | 45 | #include <net/tcp.h> |
| 46 | #include <net/checksum.h> | 46 | #include <net/checksum.h> |
| 47 | #include <linux/version.h> | ||
| 48 | #include <net/ip6_checksum.h> | 47 | #include <net/ip6_checksum.h> |
| 49 | #include <linux/workqueue.h> | 48 | #include <linux/workqueue.h> |
| 50 | #include <linux/crc32.h> | 49 | #include <linux/crc32.h> |
| @@ -60,8 +59,8 @@ | |||
| 60 | #include "bnx2x.h" | 59 | #include "bnx2x.h" |
| 61 | #include "bnx2x_init.h" | 60 | #include "bnx2x_init.h" |
| 62 | 61 | ||
| 63 | #define DRV_MODULE_VERSION "1.45.17" | 62 | #define DRV_MODULE_VERSION "1.45.21" |
| 64 | #define DRV_MODULE_RELDATE "2008/08/13" | 63 | #define DRV_MODULE_RELDATE "2008/09/03" |
| 65 | #define BNX2X_BC_VER 0x040200 | 64 | #define BNX2X_BC_VER 0x040200 |
| 66 | 65 | ||
| 67 | /* Time in jiffies before concluding the transmitter is hung */ | 66 | /* Time in jiffies before concluding the transmitter is hung */ |
| @@ -1028,7 +1027,7 @@ static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp, | |||
| 1028 | if (unlikely(skb == NULL)) | 1027 | if (unlikely(skb == NULL)) |
| 1029 | return -ENOMEM; | 1028 | return -ENOMEM; |
| 1030 | 1029 | ||
| 1031 | mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size, | 1030 | mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size, |
| 1032 | PCI_DMA_FROMDEVICE); | 1031 | PCI_DMA_FROMDEVICE); |
| 1033 | if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { | 1032 | if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { |
| 1034 | dev_kfree_skb(skb); | 1033 | dev_kfree_skb(skb); |
| @@ -1170,7 +1169,7 @@ static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue, | |||
| 1170 | /* move empty skb from pool to prod and map it */ | 1169 | /* move empty skb from pool to prod and map it */ |
| 1171 | prod_rx_buf->skb = fp->tpa_pool[queue].skb; | 1170 | prod_rx_buf->skb = fp->tpa_pool[queue].skb; |
| 1172 | mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data, | 1171 | mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data, |
| 1173 | bp->rx_buf_use_size, PCI_DMA_FROMDEVICE); | 1172 | bp->rx_buf_size, PCI_DMA_FROMDEVICE); |
| 1174 | pci_unmap_addr_set(prod_rx_buf, mapping, mapping); | 1173 | pci_unmap_addr_set(prod_rx_buf, mapping, mapping); |
| 1175 | 1174 | ||
| 1176 | /* move partial skb from cons to pool (don't unmap yet) */ | 1175 | /* move partial skb from cons to pool (don't unmap yet) */ |
| @@ -1277,7 +1276,7 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp, | |||
| 1277 | pool entry status to BNX2X_TPA_STOP even if new skb allocation | 1276 | pool entry status to BNX2X_TPA_STOP even if new skb allocation |
| 1278 | fails. */ | 1277 | fails. */ |
| 1279 | pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping), | 1278 | pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping), |
| 1280 | bp->rx_buf_use_size, PCI_DMA_FROMDEVICE); | 1279 | bp->rx_buf_size, PCI_DMA_FROMDEVICE); |
| 1281 | 1280 | ||
| 1282 | if (likely(new_skb)) { | 1281 | if (likely(new_skb)) { |
| 1283 | /* fix ip xsum and give it to the stack */ | 1282 | /* fix ip xsum and give it to the stack */ |
| @@ -1521,7 +1520,7 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget) | |||
| 1521 | } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) { | 1520 | } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) { |
| 1522 | pci_unmap_single(bp->pdev, | 1521 | pci_unmap_single(bp->pdev, |
| 1523 | pci_unmap_addr(rx_buf, mapping), | 1522 | pci_unmap_addr(rx_buf, mapping), |
| 1524 | bp->rx_buf_use_size, | 1523 | bp->rx_buf_size, |
| 1525 | PCI_DMA_FROMDEVICE); | 1524 | PCI_DMA_FROMDEVICE); |
| 1526 | skb_reserve(skb, pad); | 1525 | skb_reserve(skb, pad); |
| 1527 | skb_put(skb, len); | 1526 | skb_put(skb, len); |
| @@ -1718,8 +1717,8 @@ static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource) | |||
| 1718 | return -EEXIST; | 1717 | return -EEXIST; |
| 1719 | } | 1718 | } |
| 1720 | 1719 | ||
| 1721 | /* Try for 1 second every 5ms */ | 1720 | /* Try for 5 second every 5ms */ |
| 1722 | for (cnt = 0; cnt < 200; cnt++) { | 1721 | for (cnt = 0; cnt < 1000; cnt++) { |
| 1723 | /* Try to acquire the lock */ | 1722 | /* Try to acquire the lock */ |
| 1724 | REG_WR(bp, hw_lock_control_reg + 4, resource_bit); | 1723 | REG_WR(bp, hw_lock_control_reg + 4, resource_bit); |
| 1725 | lock_status = REG_RD(bp, hw_lock_control_reg); | 1724 | lock_status = REG_RD(bp, hw_lock_control_reg); |
| @@ -2551,6 +2550,7 @@ static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn) | |||
| 2551 | BNX2X_ERR("SPIO5 hw attention\n"); | 2550 | BNX2X_ERR("SPIO5 hw attention\n"); |
| 2552 | 2551 | ||
| 2553 | switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) { | 2552 | switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) { |
| 2553 | case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G: | ||
| 2554 | case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G: | 2554 | case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G: |
| 2555 | /* Fan failure attention */ | 2555 | /* Fan failure attention */ |
| 2556 | 2556 | ||
| @@ -4229,7 +4229,7 @@ static inline void bnx2x_free_tpa_pool(struct bnx2x *bp, | |||
| 4229 | if (fp->tpa_state[i] == BNX2X_TPA_START) | 4229 | if (fp->tpa_state[i] == BNX2X_TPA_START) |
| 4230 | pci_unmap_single(bp->pdev, | 4230 | pci_unmap_single(bp->pdev, |
| 4231 | pci_unmap_addr(rx_buf, mapping), | 4231 | pci_unmap_addr(rx_buf, mapping), |
| 4232 | bp->rx_buf_use_size, | 4232 | bp->rx_buf_size, |
| 4233 | PCI_DMA_FROMDEVICE); | 4233 | PCI_DMA_FROMDEVICE); |
| 4234 | 4234 | ||
| 4235 | dev_kfree_skb(skb); | 4235 | dev_kfree_skb(skb); |
| @@ -4245,15 +4245,14 @@ static void bnx2x_init_rx_rings(struct bnx2x *bp) | |||
| 4245 | u16 ring_prod, cqe_ring_prod; | 4245 | u16 ring_prod, cqe_ring_prod; |
| 4246 | int i, j; | 4246 | int i, j; |
| 4247 | 4247 | ||
| 4248 | bp->rx_buf_use_size = bp->dev->mtu; | 4248 | bp->rx_buf_size = bp->dev->mtu; |
| 4249 | bp->rx_buf_use_size += bp->rx_offset + ETH_OVREHEAD; | 4249 | bp->rx_buf_size += bp->rx_offset + ETH_OVREHEAD + |
| 4250 | bp->rx_buf_size = bp->rx_buf_use_size + 64; | 4250 | BCM_RX_ETH_PAYLOAD_ALIGN; |
| 4251 | 4251 | ||
| 4252 | if (bp->flags & TPA_ENABLE_FLAG) { | 4252 | if (bp->flags & TPA_ENABLE_FLAG) { |
| 4253 | DP(NETIF_MSG_IFUP, | 4253 | DP(NETIF_MSG_IFUP, |
| 4254 | "rx_buf_use_size %d rx_buf_size %d effective_mtu %d\n", | 4254 | "rx_buf_size %d effective_mtu %d\n", |
| 4255 | bp->rx_buf_use_size, bp->rx_buf_size, | 4255 | bp->rx_buf_size, bp->dev->mtu + ETH_OVREHEAD); |
| 4256 | bp->dev->mtu + ETH_OVREHEAD); | ||
| 4257 | 4256 | ||
| 4258 | for_each_queue(bp, j) { | 4257 | for_each_queue(bp, j) { |
| 4259 | struct bnx2x_fastpath *fp = &bp->fp[j]; | 4258 | struct bnx2x_fastpath *fp = &bp->fp[j]; |
| @@ -4462,9 +4461,10 @@ static void bnx2x_init_context(struct bnx2x *bp) | |||
| 4462 | context->ustorm_st_context.common.status_block_id = sb_id; | 4461 | context->ustorm_st_context.common.status_block_id = sb_id; |
| 4463 | context->ustorm_st_context.common.flags = | 4462 | context->ustorm_st_context.common.flags = |
| 4464 | USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT; | 4463 | USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT; |
| 4465 | context->ustorm_st_context.common.mc_alignment_size = 64; | 4464 | context->ustorm_st_context.common.mc_alignment_size = |
| 4465 | BCM_RX_ETH_PAYLOAD_ALIGN; | ||
| 4466 | context->ustorm_st_context.common.bd_buff_size = | 4466 | context->ustorm_st_context.common.bd_buff_size = |
| 4467 | bp->rx_buf_use_size; | 4467 | bp->rx_buf_size; |
| 4468 | context->ustorm_st_context.common.bd_page_base_hi = | 4468 | context->ustorm_st_context.common.bd_page_base_hi = |
| 4469 | U64_HI(fp->rx_desc_mapping); | 4469 | U64_HI(fp->rx_desc_mapping); |
| 4470 | context->ustorm_st_context.common.bd_page_base_lo = | 4470 | context->ustorm_st_context.common.bd_page_base_lo = |
| @@ -4606,6 +4606,17 @@ static void bnx2x_init_internal_common(struct bnx2x *bp) | |||
| 4606 | { | 4606 | { |
| 4607 | int i; | 4607 | int i; |
| 4608 | 4608 | ||
| 4609 | if (bp->flags & TPA_ENABLE_FLAG) { | ||
| 4610 | struct tstorm_eth_tpa_exist tpa = {0}; | ||
| 4611 | |||
| 4612 | tpa.tpa_exist = 1; | ||
| 4613 | |||
| 4614 | REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET, | ||
| 4615 | ((u32 *)&tpa)[0]); | ||
| 4616 | REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4, | ||
| 4617 | ((u32 *)&tpa)[1]); | ||
| 4618 | } | ||
| 4619 | |||
| 4609 | /* Zero this manually as its initialization is | 4620 | /* Zero this manually as its initialization is |
| 4610 | currently missing in the initTool */ | 4621 | currently missing in the initTool */ |
| 4611 | for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++) | 4622 | for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++) |
| @@ -4706,7 +4717,7 @@ static void bnx2x_init_internal_func(struct bnx2x *bp) | |||
| 4706 | } | 4717 | } |
| 4707 | 4718 | ||
| 4708 | /* Init CQ ring mapping and aggregation size */ | 4719 | /* Init CQ ring mapping and aggregation size */ |
| 4709 | max_agg_size = min((u32)(bp->rx_buf_use_size + | 4720 | max_agg_size = min((u32)(bp->rx_buf_size + |
| 4710 | 8*BCM_PAGE_SIZE*PAGES_PER_SGE), | 4721 | 8*BCM_PAGE_SIZE*PAGES_PER_SGE), |
| 4711 | (u32)0xffff); | 4722 | (u32)0xffff); |
| 4712 | for_each_queue(bp, i) { | 4723 | for_each_queue(bp, i) { |
| @@ -5338,6 +5349,7 @@ static int bnx2x_init_common(struct bnx2x *bp) | |||
| 5338 | } | 5349 | } |
| 5339 | 5350 | ||
| 5340 | switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) { | 5351 | switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) { |
| 5352 | case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G: | ||
| 5341 | case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G: | 5353 | case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G: |
| 5342 | /* Fan failure is indicated by SPIO 5 */ | 5354 | /* Fan failure is indicated by SPIO 5 */ |
| 5343 | bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5, | 5355 | bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5, |
| @@ -5364,17 +5376,6 @@ static int bnx2x_init_common(struct bnx2x *bp) | |||
| 5364 | 5376 | ||
| 5365 | enable_blocks_attention(bp); | 5377 | enable_blocks_attention(bp); |
| 5366 | 5378 | ||
| 5367 | if (bp->flags & TPA_ENABLE_FLAG) { | ||
| 5368 | struct tstorm_eth_tpa_exist tmp = {0}; | ||
| 5369 | |||
| 5370 | tmp.tpa_exist = 1; | ||
| 5371 | |||
| 5372 | REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET, | ||
| 5373 | ((u32 *)&tmp)[0]); | ||
| 5374 | REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4, | ||
| 5375 | ((u32 *)&tmp)[1]); | ||
| 5376 | } | ||
| 5377 | |||
| 5378 | if (!BP_NOMCP(bp)) { | 5379 | if (!BP_NOMCP(bp)) { |
| 5379 | bnx2x_acquire_phy_lock(bp); | 5380 | bnx2x_acquire_phy_lock(bp); |
| 5380 | bnx2x_common_init_phy(bp, bp->common.shmem_base); | 5381 | bnx2x_common_init_phy(bp, bp->common.shmem_base); |
| @@ -5532,6 +5533,7 @@ static int bnx2x_init_port(struct bnx2x *bp) | |||
| 5532 | /* Port DMAE comes here */ | 5533 | /* Port DMAE comes here */ |
| 5533 | 5534 | ||
| 5534 | switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) { | 5535 | switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) { |
| 5536 | case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G: | ||
| 5535 | case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G: | 5537 | case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G: |
| 5536 | /* add SPIO 5 to group 0 */ | 5538 | /* add SPIO 5 to group 0 */ |
| 5537 | val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0); | 5539 | val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0); |
| @@ -5938,7 +5940,7 @@ static void bnx2x_free_rx_skbs(struct bnx2x *bp) | |||
| 5938 | 5940 | ||
| 5939 | pci_unmap_single(bp->pdev, | 5941 | pci_unmap_single(bp->pdev, |
| 5940 | pci_unmap_addr(rx_buf, mapping), | 5942 | pci_unmap_addr(rx_buf, mapping), |
| 5941 | bp->rx_buf_use_size, | 5943 | bp->rx_buf_size, |
| 5942 | PCI_DMA_FROMDEVICE); | 5944 | PCI_DMA_FROMDEVICE); |
| 5943 | 5945 | ||
| 5944 | rx_buf->skb = NULL; | 5946 | rx_buf->skb = NULL; |
| @@ -6056,6 +6058,44 @@ static int bnx2x_req_irq(struct bnx2x *bp) | |||
| 6056 | return rc; | 6058 | return rc; |
| 6057 | } | 6059 | } |
| 6058 | 6060 | ||
| 6061 | static void bnx2x_napi_enable(struct bnx2x *bp) | ||
| 6062 | { | ||
| 6063 | int i; | ||
| 6064 | |||
| 6065 | for_each_queue(bp, i) | ||
| 6066 | napi_enable(&bnx2x_fp(bp, i, napi)); | ||
| 6067 | } | ||
| 6068 | |||
| 6069 | static void bnx2x_napi_disable(struct bnx2x *bp) | ||
| 6070 | { | ||
| 6071 | int i; | ||
| 6072 | |||
| 6073 | for_each_queue(bp, i) | ||
| 6074 | napi_disable(&bnx2x_fp(bp, i, napi)); | ||
| 6075 | } | ||
| 6076 | |||
| 6077 | static void bnx2x_netif_start(struct bnx2x *bp) | ||
| 6078 | { | ||
| 6079 | if (atomic_dec_and_test(&bp->intr_sem)) { | ||
| 6080 | if (netif_running(bp->dev)) { | ||
| 6081 | if (bp->state == BNX2X_STATE_OPEN) | ||
| 6082 | netif_wake_queue(bp->dev); | ||
| 6083 | bnx2x_napi_enable(bp); | ||
| 6084 | bnx2x_int_enable(bp); | ||
| 6085 | } | ||
| 6086 | } | ||
| 6087 | } | ||
| 6088 | |||
| 6089 | static void bnx2x_netif_stop(struct bnx2x *bp) | ||
| 6090 | { | ||
| 6091 | bnx2x_int_disable_sync(bp); | ||
| 6092 | if (netif_running(bp->dev)) { | ||
| 6093 | bnx2x_napi_disable(bp); | ||
| 6094 | netif_tx_disable(bp->dev); | ||
| 6095 | bp->dev->trans_start = jiffies; /* prevent tx timeout */ | ||
| 6096 | } | ||
| 6097 | } | ||
| 6098 | |||
| 6059 | /* | 6099 | /* |
| 6060 | * Init service functions | 6100 | * Init service functions |
| 6061 | */ | 6101 | */ |
| @@ -6339,7 +6379,7 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode) | |||
| 6339 | rc = bnx2x_init_hw(bp, load_code); | 6379 | rc = bnx2x_init_hw(bp, load_code); |
| 6340 | if (rc) { | 6380 | if (rc) { |
| 6341 | BNX2X_ERR("HW init failed, aborting\n"); | 6381 | BNX2X_ERR("HW init failed, aborting\n"); |
| 6342 | goto load_error; | 6382 | goto load_int_disable; |
| 6343 | } | 6383 | } |
| 6344 | 6384 | ||
| 6345 | /* Setup NIC internals and enable interrupts */ | 6385 | /* Setup NIC internals and enable interrupts */ |
| @@ -6351,7 +6391,7 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode) | |||
| 6351 | if (!load_code) { | 6391 | if (!load_code) { |
| 6352 | BNX2X_ERR("MCP response failure, aborting\n"); | 6392 | BNX2X_ERR("MCP response failure, aborting\n"); |
| 6353 | rc = -EBUSY; | 6393 | rc = -EBUSY; |
| 6354 | goto load_int_disable; | 6394 | goto load_rings_free; |
| 6355 | } | 6395 | } |
| 6356 | } | 6396 | } |
| 6357 | 6397 | ||
| @@ -6361,8 +6401,7 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode) | |||
| 6361 | 6401 | ||
| 6362 | /* Enable Rx interrupt handling before sending the ramrod | 6402 | /* Enable Rx interrupt handling before sending the ramrod |
| 6363 | as it's completed on Rx FP queue */ | 6403 | as it's completed on Rx FP queue */ |
| 6364 | for_each_queue(bp, i) | 6404 | bnx2x_napi_enable(bp); |
| 6365 | napi_enable(&bnx2x_fp(bp, i, napi)); | ||
| 6366 | 6405 | ||
| 6367 | /* Enable interrupt handling */ | 6406 | /* Enable interrupt handling */ |
| 6368 | atomic_set(&bp->intr_sem, 0); | 6407 | atomic_set(&bp->intr_sem, 0); |
| @@ -6370,7 +6409,7 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode) | |||
| 6370 | rc = bnx2x_setup_leading(bp); | 6409 | rc = bnx2x_setup_leading(bp); |
| 6371 | if (rc) { | 6410 | if (rc) { |
| 6372 | BNX2X_ERR("Setup leading failed!\n"); | 6411 | BNX2X_ERR("Setup leading failed!\n"); |
| 6373 | goto load_stop_netif; | 6412 | goto load_netif_stop; |
| 6374 | } | 6413 | } |
| 6375 | 6414 | ||
| 6376 | if (CHIP_IS_E1H(bp)) | 6415 | if (CHIP_IS_E1H(bp)) |
| @@ -6383,7 +6422,7 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode) | |||
| 6383 | for_each_nondefault_queue(bp, i) { | 6422 | for_each_nondefault_queue(bp, i) { |
| 6384 | rc = bnx2x_setup_multi(bp, i); | 6423 | rc = bnx2x_setup_multi(bp, i); |
| 6385 | if (rc) | 6424 | if (rc) |
| 6386 | goto load_stop_netif; | 6425 | goto load_netif_stop; |
| 6387 | } | 6426 | } |
| 6388 | 6427 | ||
| 6389 | if (CHIP_IS_E1(bp)) | 6428 | if (CHIP_IS_E1(bp)) |
| @@ -6428,20 +6467,17 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode) | |||
| 6428 | 6467 | ||
| 6429 | return 0; | 6468 | return 0; |
| 6430 | 6469 | ||
| 6431 | load_stop_netif: | 6470 | load_netif_stop: |
| 6471 | bnx2x_napi_disable(bp); | ||
| 6472 | load_rings_free: | ||
| 6473 | /* Free SKBs, SGEs, TPA pool and driver internals */ | ||
| 6474 | bnx2x_free_skbs(bp); | ||
| 6432 | for_each_queue(bp, i) | 6475 | for_each_queue(bp, i) |
| 6433 | napi_disable(&bnx2x_fp(bp, i, napi)); | 6476 | bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); |
| 6434 | |||
| 6435 | load_int_disable: | 6477 | load_int_disable: |
| 6436 | bnx2x_int_disable_sync(bp); | 6478 | bnx2x_int_disable_sync(bp); |
| 6437 | |||
| 6438 | /* Release IRQs */ | 6479 | /* Release IRQs */ |
| 6439 | bnx2x_free_irq(bp); | 6480 | bnx2x_free_irq(bp); |
| 6440 | |||
| 6441 | /* Free SKBs, SGEs, TPA pool and driver internals */ | ||
| 6442 | bnx2x_free_skbs(bp); | ||
| 6443 | for_each_queue(bp, i) | ||
| 6444 | bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); | ||
| 6445 | load_error: | 6481 | load_error: |
| 6446 | bnx2x_free_mem(bp); | 6482 | bnx2x_free_mem(bp); |
| 6447 | 6483 | ||
| @@ -6456,7 +6492,7 @@ static int bnx2x_stop_multi(struct bnx2x *bp, int index) | |||
| 6456 | 6492 | ||
| 6457 | /* halt the connection */ | 6493 | /* halt the connection */ |
| 6458 | bp->fp[index].state = BNX2X_FP_STATE_HALTING; | 6494 | bp->fp[index].state = BNX2X_FP_STATE_HALTING; |
| 6459 | bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, 0, 0); | 6495 | bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, index, 0); |
| 6460 | 6496 | ||
| 6461 | /* Wait for completion */ | 6497 | /* Wait for completion */ |
| 6462 | rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index, | 6498 | rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index, |
| @@ -6614,11 +6650,9 @@ static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode) | |||
| 6614 | bp->rx_mode = BNX2X_RX_MODE_NONE; | 6650 | bp->rx_mode = BNX2X_RX_MODE_NONE; |
| 6615 | bnx2x_set_storm_rx_mode(bp); | 6651 | bnx2x_set_storm_rx_mode(bp); |
| 6616 | 6652 | ||
| 6617 | if (netif_running(bp->dev)) { | 6653 | bnx2x_netif_stop(bp); |
| 6618 | netif_tx_disable(bp->dev); | 6654 | if (!netif_running(bp->dev)) |
| 6619 | bp->dev->trans_start = jiffies; /* prevent tx timeout */ | 6655 | bnx2x_napi_disable(bp); |
| 6620 | } | ||
| 6621 | |||
| 6622 | del_timer_sync(&bp->timer); | 6656 | del_timer_sync(&bp->timer); |
| 6623 | SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb, | 6657 | SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb, |
| 6624 | (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq)); | 6658 | (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq)); |
| @@ -6632,9 +6666,7 @@ static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode) | |||
| 6632 | smp_rmb(); | 6666 | smp_rmb(); |
| 6633 | while (BNX2X_HAS_TX_WORK(fp)) { | 6667 | while (BNX2X_HAS_TX_WORK(fp)) { |
| 6634 | 6668 | ||
| 6635 | if (!netif_running(bp->dev)) | 6669 | bnx2x_tx_int(fp, 1000); |
| 6636 | bnx2x_tx_int(fp, 1000); | ||
| 6637 | |||
| 6638 | if (!cnt) { | 6670 | if (!cnt) { |
| 6639 | BNX2X_ERR("timeout waiting for queue[%d]\n", | 6671 | BNX2X_ERR("timeout waiting for queue[%d]\n", |
| 6640 | i); | 6672 | i); |
| @@ -6650,46 +6682,12 @@ static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode) | |||
| 6650 | smp_rmb(); | 6682 | smp_rmb(); |
| 6651 | } | 6683 | } |
| 6652 | } | 6684 | } |
| 6653 | |||
| 6654 | /* Give HW time to discard old tx messages */ | 6685 | /* Give HW time to discard old tx messages */ |
| 6655 | msleep(1); | 6686 | msleep(1); |
| 6656 | 6687 | ||
| 6657 | for_each_queue(bp, i) | ||
| 6658 | napi_disable(&bnx2x_fp(bp, i, napi)); | ||
| 6659 | /* Disable interrupts after Tx and Rx are disabled on stack level */ | ||
| 6660 | bnx2x_int_disable_sync(bp); | ||
| 6661 | |||
| 6662 | /* Release IRQs */ | 6688 | /* Release IRQs */ |
| 6663 | bnx2x_free_irq(bp); | 6689 | bnx2x_free_irq(bp); |
| 6664 | 6690 | ||
| 6665 | if (unload_mode == UNLOAD_NORMAL) | ||
| 6666 | reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; | ||
| 6667 | |||
| 6668 | else if (bp->flags & NO_WOL_FLAG) { | ||
| 6669 | reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP; | ||
| 6670 | if (CHIP_IS_E1H(bp)) | ||
| 6671 | REG_WR(bp, MISC_REG_E1HMF_MODE, 0); | ||
| 6672 | |||
| 6673 | } else if (bp->wol) { | ||
| 6674 | u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0; | ||
| 6675 | u8 *mac_addr = bp->dev->dev_addr; | ||
| 6676 | u32 val; | ||
| 6677 | /* The mac address is written to entries 1-4 to | ||
| 6678 | preserve entry 0 which is used by the PMF */ | ||
| 6679 | u8 entry = (BP_E1HVN(bp) + 1)*8; | ||
| 6680 | |||
| 6681 | val = (mac_addr[0] << 8) | mac_addr[1]; | ||
| 6682 | EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val); | ||
| 6683 | |||
| 6684 | val = (mac_addr[2] << 24) | (mac_addr[3] << 16) | | ||
| 6685 | (mac_addr[4] << 8) | mac_addr[5]; | ||
| 6686 | EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val); | ||
| 6687 | |||
| 6688 | reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN; | ||
| 6689 | |||
| 6690 | } else | ||
| 6691 | reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; | ||
| 6692 | |||
| 6693 | if (CHIP_IS_E1(bp)) { | 6691 | if (CHIP_IS_E1(bp)) { |
| 6694 | struct mac_configuration_cmd *config = | 6692 | struct mac_configuration_cmd *config = |
| 6695 | bnx2x_sp(bp, mcast_config); | 6693 | bnx2x_sp(bp, mcast_config); |
| @@ -6712,14 +6710,41 @@ static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode) | |||
| 6712 | U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0); | 6710 | U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0); |
| 6713 | 6711 | ||
| 6714 | } else { /* E1H */ | 6712 | } else { /* E1H */ |
| 6713 | REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0); | ||
| 6714 | |||
| 6715 | bnx2x_set_mac_addr_e1h(bp, 0); | 6715 | bnx2x_set_mac_addr_e1h(bp, 0); |
| 6716 | 6716 | ||
| 6717 | for (i = 0; i < MC_HASH_SIZE; i++) | 6717 | for (i = 0; i < MC_HASH_SIZE; i++) |
| 6718 | REG_WR(bp, MC_HASH_OFFSET(bp, i), 0); | 6718 | REG_WR(bp, MC_HASH_OFFSET(bp, i), 0); |
| 6719 | } | 6719 | } |
| 6720 | 6720 | ||
| 6721 | if (CHIP_IS_E1H(bp)) | 6721 | if (unload_mode == UNLOAD_NORMAL) |
| 6722 | REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0); | 6722 | reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; |
| 6723 | |||
| 6724 | else if (bp->flags & NO_WOL_FLAG) { | ||
| 6725 | reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP; | ||
| 6726 | if (CHIP_IS_E1H(bp)) | ||
| 6727 | REG_WR(bp, MISC_REG_E1HMF_MODE, 0); | ||
| 6728 | |||
| 6729 | } else if (bp->wol) { | ||
| 6730 | u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0; | ||
| 6731 | u8 *mac_addr = bp->dev->dev_addr; | ||
| 6732 | u32 val; | ||
| 6733 | /* The mac address is written to entries 1-4 to | ||
| 6734 | preserve entry 0 which is used by the PMF */ | ||
| 6735 | u8 entry = (BP_E1HVN(bp) + 1)*8; | ||
| 6736 | |||
| 6737 | val = (mac_addr[0] << 8) | mac_addr[1]; | ||
| 6738 | EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val); | ||
| 6739 | |||
| 6740 | val = (mac_addr[2] << 24) | (mac_addr[3] << 16) | | ||
| 6741 | (mac_addr[4] << 8) | mac_addr[5]; | ||
| 6742 | EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val); | ||
| 6743 | |||
| 6744 | reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN; | ||
| 6745 | |||
| 6746 | } else | ||
| 6747 | reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; | ||
| 6723 | 6748 | ||
| 6724 | /* Close multi and leading connections | 6749 | /* Close multi and leading connections |
| 6725 | Completions for ramrods are collected in a synchronous way */ | 6750 | Completions for ramrods are collected in a synchronous way */ |
| @@ -6822,6 +6847,10 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp) | |||
| 6822 | */ | 6847 | */ |
| 6823 | bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI); | 6848 | bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI); |
| 6824 | val = REG_RD(bp, DORQ_REG_NORM_CID_OFST); | 6849 | val = REG_RD(bp, DORQ_REG_NORM_CID_OFST); |
| 6850 | if (val == 0x7) | ||
| 6851 | REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0); | ||
| 6852 | bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI); | ||
| 6853 | |||
| 6825 | if (val == 0x7) { | 6854 | if (val == 0x7) { |
| 6826 | u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; | 6855 | u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; |
| 6827 | /* save our func */ | 6856 | /* save our func */ |
| @@ -6899,7 +6928,6 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp) | |||
| 6899 | (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) & | 6928 | (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) & |
| 6900 | DRV_MSG_SEQ_NUMBER_MASK); | 6929 | DRV_MSG_SEQ_NUMBER_MASK); |
| 6901 | } | 6930 | } |
| 6902 | bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI); | ||
| 6903 | } | 6931 | } |
| 6904 | } | 6932 | } |
| 6905 | 6933 | ||
| @@ -8618,34 +8646,6 @@ test_mem_exit: | |||
| 8618 | return rc; | 8646 | return rc; |
| 8619 | } | 8647 | } |
| 8620 | 8648 | ||
| 8621 | static void bnx2x_netif_start(struct bnx2x *bp) | ||
| 8622 | { | ||
| 8623 | int i; | ||
| 8624 | |||
| 8625 | if (atomic_dec_and_test(&bp->intr_sem)) { | ||
| 8626 | if (netif_running(bp->dev)) { | ||
| 8627 | bnx2x_int_enable(bp); | ||
| 8628 | for_each_queue(bp, i) | ||
| 8629 | napi_enable(&bnx2x_fp(bp, i, napi)); | ||
| 8630 | if (bp->state == BNX2X_STATE_OPEN) | ||
| 8631 | netif_wake_queue(bp->dev); | ||
| 8632 | } | ||
| 8633 | } | ||
| 8634 | } | ||
| 8635 | |||
| 8636 | static void bnx2x_netif_stop(struct bnx2x *bp) | ||
| 8637 | { | ||
| 8638 | int i; | ||
| 8639 | |||
| 8640 | if (netif_running(bp->dev)) { | ||
| 8641 | netif_tx_disable(bp->dev); | ||
| 8642 | bp->dev->trans_start = jiffies; /* prevent tx timeout */ | ||
| 8643 | for_each_queue(bp, i) | ||
| 8644 | napi_disable(&bnx2x_fp(bp, i, napi)); | ||
| 8645 | } | ||
| 8646 | bnx2x_int_disable_sync(bp); | ||
| 8647 | } | ||
| 8648 | |||
| 8649 | static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up) | 8649 | static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up) |
| 8650 | { | 8650 | { |
| 8651 | int cnt = 1000; | 8651 | int cnt = 1000; |
| @@ -9251,6 +9251,7 @@ static int bnx2x_poll(struct napi_struct *napi, int budget) | |||
| 9251 | napi); | 9251 | napi); |
| 9252 | struct bnx2x *bp = fp->bp; | 9252 | struct bnx2x *bp = fp->bp; |
| 9253 | int work_done = 0; | 9253 | int work_done = 0; |
| 9254 | u16 rx_cons_sb; | ||
| 9254 | 9255 | ||
| 9255 | #ifdef BNX2X_STOP_ON_ERROR | 9256 | #ifdef BNX2X_STOP_ON_ERROR |
| 9256 | if (unlikely(bp->panic)) | 9257 | if (unlikely(bp->panic)) |
| @@ -9266,10 +9267,16 @@ static int bnx2x_poll(struct napi_struct *napi, int budget) | |||
| 9266 | if (BNX2X_HAS_TX_WORK(fp)) | 9267 | if (BNX2X_HAS_TX_WORK(fp)) |
| 9267 | bnx2x_tx_int(fp, budget); | 9268 | bnx2x_tx_int(fp, budget); |
| 9268 | 9269 | ||
| 9270 | rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb); | ||
| 9271 | if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT) | ||
| 9272 | rx_cons_sb++; | ||
| 9269 | if (BNX2X_HAS_RX_WORK(fp)) | 9273 | if (BNX2X_HAS_RX_WORK(fp)) |
| 9270 | work_done = bnx2x_rx_int(fp, budget); | 9274 | work_done = bnx2x_rx_int(fp, budget); |
| 9271 | 9275 | ||
| 9272 | rmb(); /* BNX2X_HAS_WORK() reads the status block */ | 9276 | rmb(); /* BNX2X_HAS_WORK() reads the status block */ |
| 9277 | rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb); | ||
| 9278 | if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT) | ||
| 9279 | rx_cons_sb++; | ||
| 9273 | 9280 | ||
| 9274 | /* must not complete if we consumed full budget */ | 9281 | /* must not complete if we consumed full budget */ |
| 9275 | if ((work_done < budget) && !BNX2X_HAS_WORK(fp)) { | 9282 | if ((work_done < budget) && !BNX2X_HAS_WORK(fp)) { |
| @@ -9485,8 +9492,7 @@ static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 9485 | fp_index = (smp_processor_id() % bp->num_queues); | 9492 | fp_index = (smp_processor_id() % bp->num_queues); |
| 9486 | fp = &bp->fp[fp_index]; | 9493 | fp = &bp->fp[fp_index]; |
| 9487 | 9494 | ||
| 9488 | if (unlikely(bnx2x_tx_avail(bp->fp) < | 9495 | if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) { |
| 9489 | (skb_shinfo(skb)->nr_frags + 3))) { | ||
| 9490 | bp->eth_stats.driver_xoff++, | 9496 | bp->eth_stats.driver_xoff++, |
| 9491 | netif_stop_queue(dev); | 9497 | netif_stop_queue(dev); |
| 9492 | BNX2X_ERR("BUG! Tx ring full when queue awake!\n"); | 9498 | BNX2X_ERR("BUG! Tx ring full when queue awake!\n"); |
| @@ -9549,7 +9555,6 @@ static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 9549 | tx_bd->vlan = cpu_to_le16(pkt_prod); | 9555 | tx_bd->vlan = cpu_to_le16(pkt_prod); |
| 9550 | 9556 | ||
| 9551 | if (xmit_type) { | 9557 | if (xmit_type) { |
| 9552 | |||
| 9553 | /* turn on parsing and get a BD */ | 9558 | /* turn on parsing and get a BD */ |
| 9554 | bd_prod = TX_BD(NEXT_TX_IDX(bd_prod)); | 9559 | bd_prod = TX_BD(NEXT_TX_IDX(bd_prod)); |
| 9555 | pbd = (void *)&fp->tx_desc_ring[bd_prod]; | 9560 | pbd = (void *)&fp->tx_desc_ring[bd_prod]; |
