diff options
Diffstat (limited to 'drivers/net/bnx2x_main.c')
-rw-r--r-- | drivers/net/bnx2x_main.c | 220 |
1 files changed, 113 insertions, 107 deletions
diff --git a/drivers/net/bnx2x_main.c b/drivers/net/bnx2x_main.c index 971576b43687..82deea0a63f5 100644 --- a/drivers/net/bnx2x_main.c +++ b/drivers/net/bnx2x_main.c | |||
@@ -59,8 +59,8 @@ | |||
59 | #include "bnx2x.h" | 59 | #include "bnx2x.h" |
60 | #include "bnx2x_init.h" | 60 | #include "bnx2x_init.h" |
61 | 61 | ||
62 | #define DRV_MODULE_VERSION "1.45.17" | 62 | #define DRV_MODULE_VERSION "1.45.20" |
63 | #define DRV_MODULE_RELDATE "2008/08/13" | 63 | #define DRV_MODULE_RELDATE "2008/08/25" |
64 | #define BNX2X_BC_VER 0x040200 | 64 | #define BNX2X_BC_VER 0x040200 |
65 | 65 | ||
66 | /* Time in jiffies before concluding the transmitter is hung */ | 66 | /* Time in jiffies before concluding the transmitter is hung */ |
@@ -1717,8 +1717,8 @@ static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource) | |||
1717 | return -EEXIST; | 1717 | return -EEXIST; |
1718 | } | 1718 | } |
1719 | 1719 | ||
1720 | /* Try for 1 second every 5ms */ | 1720 | /* Try for 5 second every 5ms */ |
1721 | for (cnt = 0; cnt < 200; cnt++) { | 1721 | for (cnt = 0; cnt < 1000; cnt++) { |
1722 | /* Try to acquire the lock */ | 1722 | /* Try to acquire the lock */ |
1723 | REG_WR(bp, hw_lock_control_reg + 4, resource_bit); | 1723 | REG_WR(bp, hw_lock_control_reg + 4, resource_bit); |
1724 | lock_status = REG_RD(bp, hw_lock_control_reg); | 1724 | lock_status = REG_RD(bp, hw_lock_control_reg); |
@@ -2550,6 +2550,7 @@ static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn) | |||
2550 | BNX2X_ERR("SPIO5 hw attention\n"); | 2550 | BNX2X_ERR("SPIO5 hw attention\n"); |
2551 | 2551 | ||
2552 | switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) { | 2552 | switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) { |
2553 | case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G: | ||
2553 | case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G: | 2554 | case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G: |
2554 | /* Fan failure attention */ | 2555 | /* Fan failure attention */ |
2555 | 2556 | ||
@@ -4605,6 +4606,17 @@ static void bnx2x_init_internal_common(struct bnx2x *bp) | |||
4605 | { | 4606 | { |
4606 | int i; | 4607 | int i; |
4607 | 4608 | ||
4609 | if (bp->flags & TPA_ENABLE_FLAG) { | ||
4610 | struct tstorm_eth_tpa_exist tpa = {0}; | ||
4611 | |||
4612 | tpa.tpa_exist = 1; | ||
4613 | |||
4614 | REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET, | ||
4615 | ((u32 *)&tpa)[0]); | ||
4616 | REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4, | ||
4617 | ((u32 *)&tpa)[1]); | ||
4618 | } | ||
4619 | |||
4608 | /* Zero this manually as its initialization is | 4620 | /* Zero this manually as its initialization is |
4609 | currently missing in the initTool */ | 4621 | currently missing in the initTool */ |
4610 | for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++) | 4622 | for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++) |
@@ -5337,6 +5349,7 @@ static int bnx2x_init_common(struct bnx2x *bp) | |||
5337 | } | 5349 | } |
5338 | 5350 | ||
5339 | switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) { | 5351 | switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) { |
5352 | case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G: | ||
5340 | case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G: | 5353 | case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G: |
5341 | /* Fan failure is indicated by SPIO 5 */ | 5354 | /* Fan failure is indicated by SPIO 5 */ |
5342 | bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5, | 5355 | bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5, |
@@ -5363,17 +5376,6 @@ static int bnx2x_init_common(struct bnx2x *bp) | |||
5363 | 5376 | ||
5364 | enable_blocks_attention(bp); | 5377 | enable_blocks_attention(bp); |
5365 | 5378 | ||
5366 | if (bp->flags & TPA_ENABLE_FLAG) { | ||
5367 | struct tstorm_eth_tpa_exist tmp = {0}; | ||
5368 | |||
5369 | tmp.tpa_exist = 1; | ||
5370 | |||
5371 | REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET, | ||
5372 | ((u32 *)&tmp)[0]); | ||
5373 | REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4, | ||
5374 | ((u32 *)&tmp)[1]); | ||
5375 | } | ||
5376 | |||
5377 | if (!BP_NOMCP(bp)) { | 5379 | if (!BP_NOMCP(bp)) { |
5378 | bnx2x_acquire_phy_lock(bp); | 5380 | bnx2x_acquire_phy_lock(bp); |
5379 | bnx2x_common_init_phy(bp, bp->common.shmem_base); | 5381 | bnx2x_common_init_phy(bp, bp->common.shmem_base); |
@@ -5531,6 +5533,7 @@ static int bnx2x_init_port(struct bnx2x *bp) | |||
5531 | /* Port DMAE comes here */ | 5533 | /* Port DMAE comes here */ |
5532 | 5534 | ||
5533 | switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) { | 5535 | switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) { |
5536 | case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G: | ||
5534 | case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G: | 5537 | case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G: |
5535 | /* add SPIO 5 to group 0 */ | 5538 | /* add SPIO 5 to group 0 */ |
5536 | val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0); | 5539 | val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0); |
@@ -6055,6 +6058,44 @@ static int bnx2x_req_irq(struct bnx2x *bp) | |||
6055 | return rc; | 6058 | return rc; |
6056 | } | 6059 | } |
6057 | 6060 | ||
6061 | static void bnx2x_napi_enable(struct bnx2x *bp) | ||
6062 | { | ||
6063 | int i; | ||
6064 | |||
6065 | for_each_queue(bp, i) | ||
6066 | napi_enable(&bnx2x_fp(bp, i, napi)); | ||
6067 | } | ||
6068 | |||
6069 | static void bnx2x_napi_disable(struct bnx2x *bp) | ||
6070 | { | ||
6071 | int i; | ||
6072 | |||
6073 | for_each_queue(bp, i) | ||
6074 | napi_disable(&bnx2x_fp(bp, i, napi)); | ||
6075 | } | ||
6076 | |||
6077 | static void bnx2x_netif_start(struct bnx2x *bp) | ||
6078 | { | ||
6079 | if (atomic_dec_and_test(&bp->intr_sem)) { | ||
6080 | if (netif_running(bp->dev)) { | ||
6081 | if (bp->state == BNX2X_STATE_OPEN) | ||
6082 | netif_wake_queue(bp->dev); | ||
6083 | bnx2x_napi_enable(bp); | ||
6084 | bnx2x_int_enable(bp); | ||
6085 | } | ||
6086 | } | ||
6087 | } | ||
6088 | |||
6089 | static void bnx2x_netif_stop(struct bnx2x *bp) | ||
6090 | { | ||
6091 | bnx2x_int_disable_sync(bp); | ||
6092 | if (netif_running(bp->dev)) { | ||
6093 | bnx2x_napi_disable(bp); | ||
6094 | netif_tx_disable(bp->dev); | ||
6095 | bp->dev->trans_start = jiffies; /* prevent tx timeout */ | ||
6096 | } | ||
6097 | } | ||
6098 | |||
6058 | /* | 6099 | /* |
6059 | * Init service functions | 6100 | * Init service functions |
6060 | */ | 6101 | */ |
@@ -6338,7 +6379,7 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode) | |||
6338 | rc = bnx2x_init_hw(bp, load_code); | 6379 | rc = bnx2x_init_hw(bp, load_code); |
6339 | if (rc) { | 6380 | if (rc) { |
6340 | BNX2X_ERR("HW init failed, aborting\n"); | 6381 | BNX2X_ERR("HW init failed, aborting\n"); |
6341 | goto load_error; | 6382 | goto load_int_disable; |
6342 | } | 6383 | } |
6343 | 6384 | ||
6344 | /* Setup NIC internals and enable interrupts */ | 6385 | /* Setup NIC internals and enable interrupts */ |
@@ -6350,7 +6391,7 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode) | |||
6350 | if (!load_code) { | 6391 | if (!load_code) { |
6351 | BNX2X_ERR("MCP response failure, aborting\n"); | 6392 | BNX2X_ERR("MCP response failure, aborting\n"); |
6352 | rc = -EBUSY; | 6393 | rc = -EBUSY; |
6353 | goto load_int_disable; | 6394 | goto load_rings_free; |
6354 | } | 6395 | } |
6355 | } | 6396 | } |
6356 | 6397 | ||
@@ -6360,8 +6401,7 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode) | |||
6360 | 6401 | ||
6361 | /* Enable Rx interrupt handling before sending the ramrod | 6402 | /* Enable Rx interrupt handling before sending the ramrod |
6362 | as it's completed on Rx FP queue */ | 6403 | as it's completed on Rx FP queue */ |
6363 | for_each_queue(bp, i) | 6404 | bnx2x_napi_enable(bp); |
6364 | napi_enable(&bnx2x_fp(bp, i, napi)); | ||
6365 | 6405 | ||
6366 | /* Enable interrupt handling */ | 6406 | /* Enable interrupt handling */ |
6367 | atomic_set(&bp->intr_sem, 0); | 6407 | atomic_set(&bp->intr_sem, 0); |
@@ -6369,7 +6409,7 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode) | |||
6369 | rc = bnx2x_setup_leading(bp); | 6409 | rc = bnx2x_setup_leading(bp); |
6370 | if (rc) { | 6410 | if (rc) { |
6371 | BNX2X_ERR("Setup leading failed!\n"); | 6411 | BNX2X_ERR("Setup leading failed!\n"); |
6372 | goto load_stop_netif; | 6412 | goto load_netif_stop; |
6373 | } | 6413 | } |
6374 | 6414 | ||
6375 | if (CHIP_IS_E1H(bp)) | 6415 | if (CHIP_IS_E1H(bp)) |
@@ -6382,7 +6422,7 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode) | |||
6382 | for_each_nondefault_queue(bp, i) { | 6422 | for_each_nondefault_queue(bp, i) { |
6383 | rc = bnx2x_setup_multi(bp, i); | 6423 | rc = bnx2x_setup_multi(bp, i); |
6384 | if (rc) | 6424 | if (rc) |
6385 | goto load_stop_netif; | 6425 | goto load_netif_stop; |
6386 | } | 6426 | } |
6387 | 6427 | ||
6388 | if (CHIP_IS_E1(bp)) | 6428 | if (CHIP_IS_E1(bp)) |
@@ -6427,20 +6467,17 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode) | |||
6427 | 6467 | ||
6428 | return 0; | 6468 | return 0; |
6429 | 6469 | ||
6430 | load_stop_netif: | 6470 | load_netif_stop: |
6471 | bnx2x_napi_disable(bp); | ||
6472 | load_rings_free: | ||
6473 | /* Free SKBs, SGEs, TPA pool and driver internals */ | ||
6474 | bnx2x_free_skbs(bp); | ||
6431 | for_each_queue(bp, i) | 6475 | for_each_queue(bp, i) |
6432 | napi_disable(&bnx2x_fp(bp, i, napi)); | 6476 | bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); |
6433 | |||
6434 | load_int_disable: | 6477 | load_int_disable: |
6435 | bnx2x_int_disable_sync(bp); | 6478 | bnx2x_int_disable_sync(bp); |
6436 | |||
6437 | /* Release IRQs */ | 6479 | /* Release IRQs */ |
6438 | bnx2x_free_irq(bp); | 6480 | bnx2x_free_irq(bp); |
6439 | |||
6440 | /* Free SKBs, SGEs, TPA pool and driver internals */ | ||
6441 | bnx2x_free_skbs(bp); | ||
6442 | for_each_queue(bp, i) | ||
6443 | bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); | ||
6444 | load_error: | 6481 | load_error: |
6445 | bnx2x_free_mem(bp); | 6482 | bnx2x_free_mem(bp); |
6446 | 6483 | ||
@@ -6455,7 +6492,7 @@ static int bnx2x_stop_multi(struct bnx2x *bp, int index) | |||
6455 | 6492 | ||
6456 | /* halt the connection */ | 6493 | /* halt the connection */ |
6457 | bp->fp[index].state = BNX2X_FP_STATE_HALTING; | 6494 | bp->fp[index].state = BNX2X_FP_STATE_HALTING; |
6458 | bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, 0, 0); | 6495 | bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, index, 0); |
6459 | 6496 | ||
6460 | /* Wait for completion */ | 6497 | /* Wait for completion */ |
6461 | rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index, | 6498 | rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index, |
@@ -6613,11 +6650,9 @@ static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode) | |||
6613 | bp->rx_mode = BNX2X_RX_MODE_NONE; | 6650 | bp->rx_mode = BNX2X_RX_MODE_NONE; |
6614 | bnx2x_set_storm_rx_mode(bp); | 6651 | bnx2x_set_storm_rx_mode(bp); |
6615 | 6652 | ||
6616 | if (netif_running(bp->dev)) { | 6653 | bnx2x_netif_stop(bp); |
6617 | netif_tx_disable(bp->dev); | 6654 | if (!netif_running(bp->dev)) |
6618 | bp->dev->trans_start = jiffies; /* prevent tx timeout */ | 6655 | bnx2x_napi_disable(bp); |
6619 | } | ||
6620 | |||
6621 | del_timer_sync(&bp->timer); | 6656 | del_timer_sync(&bp->timer); |
6622 | SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb, | 6657 | SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb, |
6623 | (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq)); | 6658 | (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq)); |
@@ -6631,9 +6666,7 @@ static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode) | |||
6631 | smp_rmb(); | 6666 | smp_rmb(); |
6632 | while (BNX2X_HAS_TX_WORK(fp)) { | 6667 | while (BNX2X_HAS_TX_WORK(fp)) { |
6633 | 6668 | ||
6634 | if (!netif_running(bp->dev)) | 6669 | bnx2x_tx_int(fp, 1000); |
6635 | bnx2x_tx_int(fp, 1000); | ||
6636 | |||
6637 | if (!cnt) { | 6670 | if (!cnt) { |
6638 | BNX2X_ERR("timeout waiting for queue[%d]\n", | 6671 | BNX2X_ERR("timeout waiting for queue[%d]\n", |
6639 | i); | 6672 | i); |
@@ -6649,46 +6682,12 @@ static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode) | |||
6649 | smp_rmb(); | 6682 | smp_rmb(); |
6650 | } | 6683 | } |
6651 | } | 6684 | } |
6652 | |||
6653 | /* Give HW time to discard old tx messages */ | 6685 | /* Give HW time to discard old tx messages */ |
6654 | msleep(1); | 6686 | msleep(1); |
6655 | 6687 | ||
6656 | for_each_queue(bp, i) | ||
6657 | napi_disable(&bnx2x_fp(bp, i, napi)); | ||
6658 | /* Disable interrupts after Tx and Rx are disabled on stack level */ | ||
6659 | bnx2x_int_disable_sync(bp); | ||
6660 | |||
6661 | /* Release IRQs */ | 6688 | /* Release IRQs */ |
6662 | bnx2x_free_irq(bp); | 6689 | bnx2x_free_irq(bp); |
6663 | 6690 | ||
6664 | if (unload_mode == UNLOAD_NORMAL) | ||
6665 | reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; | ||
6666 | |||
6667 | else if (bp->flags & NO_WOL_FLAG) { | ||
6668 | reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP; | ||
6669 | if (CHIP_IS_E1H(bp)) | ||
6670 | REG_WR(bp, MISC_REG_E1HMF_MODE, 0); | ||
6671 | |||
6672 | } else if (bp->wol) { | ||
6673 | u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0; | ||
6674 | u8 *mac_addr = bp->dev->dev_addr; | ||
6675 | u32 val; | ||
6676 | /* The mac address is written to entries 1-4 to | ||
6677 | preserve entry 0 which is used by the PMF */ | ||
6678 | u8 entry = (BP_E1HVN(bp) + 1)*8; | ||
6679 | |||
6680 | val = (mac_addr[0] << 8) | mac_addr[1]; | ||
6681 | EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val); | ||
6682 | |||
6683 | val = (mac_addr[2] << 24) | (mac_addr[3] << 16) | | ||
6684 | (mac_addr[4] << 8) | mac_addr[5]; | ||
6685 | EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val); | ||
6686 | |||
6687 | reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN; | ||
6688 | |||
6689 | } else | ||
6690 | reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; | ||
6691 | |||
6692 | if (CHIP_IS_E1(bp)) { | 6691 | if (CHIP_IS_E1(bp)) { |
6693 | struct mac_configuration_cmd *config = | 6692 | struct mac_configuration_cmd *config = |
6694 | bnx2x_sp(bp, mcast_config); | 6693 | bnx2x_sp(bp, mcast_config); |
@@ -6711,14 +6710,41 @@ static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode) | |||
6711 | U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0); | 6710 | U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0); |
6712 | 6711 | ||
6713 | } else { /* E1H */ | 6712 | } else { /* E1H */ |
6713 | REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0); | ||
6714 | |||
6714 | bnx2x_set_mac_addr_e1h(bp, 0); | 6715 | bnx2x_set_mac_addr_e1h(bp, 0); |
6715 | 6716 | ||
6716 | for (i = 0; i < MC_HASH_SIZE; i++) | 6717 | for (i = 0; i < MC_HASH_SIZE; i++) |
6717 | REG_WR(bp, MC_HASH_OFFSET(bp, i), 0); | 6718 | REG_WR(bp, MC_HASH_OFFSET(bp, i), 0); |
6718 | } | 6719 | } |
6719 | 6720 | ||
6720 | if (CHIP_IS_E1H(bp)) | 6721 | if (unload_mode == UNLOAD_NORMAL) |
6721 | REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0); | 6722 | reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; |
6723 | |||
6724 | else if (bp->flags & NO_WOL_FLAG) { | ||
6725 | reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP; | ||
6726 | if (CHIP_IS_E1H(bp)) | ||
6727 | REG_WR(bp, MISC_REG_E1HMF_MODE, 0); | ||
6728 | |||
6729 | } else if (bp->wol) { | ||
6730 | u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0; | ||
6731 | u8 *mac_addr = bp->dev->dev_addr; | ||
6732 | u32 val; | ||
6733 | /* The mac address is written to entries 1-4 to | ||
6734 | preserve entry 0 which is used by the PMF */ | ||
6735 | u8 entry = (BP_E1HVN(bp) + 1)*8; | ||
6736 | |||
6737 | val = (mac_addr[0] << 8) | mac_addr[1]; | ||
6738 | EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val); | ||
6739 | |||
6740 | val = (mac_addr[2] << 24) | (mac_addr[3] << 16) | | ||
6741 | (mac_addr[4] << 8) | mac_addr[5]; | ||
6742 | EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val); | ||
6743 | |||
6744 | reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN; | ||
6745 | |||
6746 | } else | ||
6747 | reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; | ||
6722 | 6748 | ||
6723 | /* Close multi and leading connections | 6749 | /* Close multi and leading connections |
6724 | Completions for ramrods are collected in a synchronous way */ | 6750 | Completions for ramrods are collected in a synchronous way */ |
@@ -6821,6 +6847,10 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp) | |||
6821 | */ | 6847 | */ |
6822 | bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI); | 6848 | bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI); |
6823 | val = REG_RD(bp, DORQ_REG_NORM_CID_OFST); | 6849 | val = REG_RD(bp, DORQ_REG_NORM_CID_OFST); |
6850 | if (val == 0x7) | ||
6851 | REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0); | ||
6852 | bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI); | ||
6853 | |||
6824 | if (val == 0x7) { | 6854 | if (val == 0x7) { |
6825 | u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; | 6855 | u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; |
6826 | /* save our func */ | 6856 | /* save our func */ |
@@ -6898,7 +6928,6 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp) | |||
6898 | (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) & | 6928 | (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) & |
6899 | DRV_MSG_SEQ_NUMBER_MASK); | 6929 | DRV_MSG_SEQ_NUMBER_MASK); |
6900 | } | 6930 | } |
6901 | bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI); | ||
6902 | } | 6931 | } |
6903 | } | 6932 | } |
6904 | 6933 | ||
@@ -8617,34 +8646,6 @@ test_mem_exit: | |||
8617 | return rc; | 8646 | return rc; |
8618 | } | 8647 | } |
8619 | 8648 | ||
8620 | static void bnx2x_netif_start(struct bnx2x *bp) | ||
8621 | { | ||
8622 | int i; | ||
8623 | |||
8624 | if (atomic_dec_and_test(&bp->intr_sem)) { | ||
8625 | if (netif_running(bp->dev)) { | ||
8626 | bnx2x_int_enable(bp); | ||
8627 | for_each_queue(bp, i) | ||
8628 | napi_enable(&bnx2x_fp(bp, i, napi)); | ||
8629 | if (bp->state == BNX2X_STATE_OPEN) | ||
8630 | netif_wake_queue(bp->dev); | ||
8631 | } | ||
8632 | } | ||
8633 | } | ||
8634 | |||
8635 | static void bnx2x_netif_stop(struct bnx2x *bp) | ||
8636 | { | ||
8637 | int i; | ||
8638 | |||
8639 | if (netif_running(bp->dev)) { | ||
8640 | netif_tx_disable(bp->dev); | ||
8641 | bp->dev->trans_start = jiffies; /* prevent tx timeout */ | ||
8642 | for_each_queue(bp, i) | ||
8643 | napi_disable(&bnx2x_fp(bp, i, napi)); | ||
8644 | } | ||
8645 | bnx2x_int_disable_sync(bp); | ||
8646 | } | ||
8647 | |||
8648 | static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up) | 8649 | static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up) |
8649 | { | 8650 | { |
8650 | int cnt = 1000; | 8651 | int cnt = 1000; |
@@ -9250,6 +9251,7 @@ static int bnx2x_poll(struct napi_struct *napi, int budget) | |||
9250 | napi); | 9251 | napi); |
9251 | struct bnx2x *bp = fp->bp; | 9252 | struct bnx2x *bp = fp->bp; |
9252 | int work_done = 0; | 9253 | int work_done = 0; |
9254 | u16 rx_cons_sb; | ||
9253 | 9255 | ||
9254 | #ifdef BNX2X_STOP_ON_ERROR | 9256 | #ifdef BNX2X_STOP_ON_ERROR |
9255 | if (unlikely(bp->panic)) | 9257 | if (unlikely(bp->panic)) |
@@ -9265,10 +9267,16 @@ static int bnx2x_poll(struct napi_struct *napi, int budget) | |||
9265 | if (BNX2X_HAS_TX_WORK(fp)) | 9267 | if (BNX2X_HAS_TX_WORK(fp)) |
9266 | bnx2x_tx_int(fp, budget); | 9268 | bnx2x_tx_int(fp, budget); |
9267 | 9269 | ||
9270 | rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb); | ||
9271 | if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT) | ||
9272 | rx_cons_sb++; | ||
9268 | if (BNX2X_HAS_RX_WORK(fp)) | 9273 | if (BNX2X_HAS_RX_WORK(fp)) |
9269 | work_done = bnx2x_rx_int(fp, budget); | 9274 | work_done = bnx2x_rx_int(fp, budget); |
9270 | 9275 | ||
9271 | rmb(); /* BNX2X_HAS_WORK() reads the status block */ | 9276 | rmb(); /* BNX2X_HAS_WORK() reads the status block */ |
9277 | rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb); | ||
9278 | if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT) | ||
9279 | rx_cons_sb++; | ||
9272 | 9280 | ||
9273 | /* must not complete if we consumed full budget */ | 9281 | /* must not complete if we consumed full budget */ |
9274 | if ((work_done < budget) && !BNX2X_HAS_WORK(fp)) { | 9282 | if ((work_done < budget) && !BNX2X_HAS_WORK(fp)) { |
@@ -9484,8 +9492,7 @@ static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
9484 | fp_index = (smp_processor_id() % bp->num_queues); | 9492 | fp_index = (smp_processor_id() % bp->num_queues); |
9485 | fp = &bp->fp[fp_index]; | 9493 | fp = &bp->fp[fp_index]; |
9486 | 9494 | ||
9487 | if (unlikely(bnx2x_tx_avail(bp->fp) < | 9495 | if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) { |
9488 | (skb_shinfo(skb)->nr_frags + 3))) { | ||
9489 | bp->eth_stats.driver_xoff++, | 9496 | bp->eth_stats.driver_xoff++, |
9490 | netif_stop_queue(dev); | 9497 | netif_stop_queue(dev); |
9491 | BNX2X_ERR("BUG! Tx ring full when queue awake!\n"); | 9498 | BNX2X_ERR("BUG! Tx ring full when queue awake!\n"); |
@@ -9548,7 +9555,6 @@ static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
9548 | tx_bd->vlan = cpu_to_le16(pkt_prod); | 9555 | tx_bd->vlan = cpu_to_le16(pkt_prod); |
9549 | 9556 | ||
9550 | if (xmit_type) { | 9557 | if (xmit_type) { |
9551 | |||
9552 | /* turn on parsing and get a BD */ | 9558 | /* turn on parsing and get a BD */ |
9553 | bd_prod = TX_BD(NEXT_TX_IDX(bd_prod)); | 9559 | bd_prod = TX_BD(NEXT_TX_IDX(bd_prod)); |
9554 | pbd = (void *)&fp->tx_desc_ring[bd_prod]; | 9560 | pbd = (void *)&fp->tx_desc_ring[bd_prod]; |