aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorYitchak Gertner <gertner@broadcom.com>2008-08-25 18:26:24 -0400
committerDavid S. Miller <davem@davemloft.net>2008-08-25 18:26:24 -0400
commit65abd74dd52a79226070904f138f3f8cbcdcf10b (patch)
tree63f03f46812fb09a9b52e35a0016351ef5109e02
parentd101463499b769d04e37d3bcb0e0c6876780ce08 (diff)
bnx2x: NAPI and interrupts enable/disable
Fixing the order of enabling and disabling NAPI and the interrupts Signed-off-by: Yitchak Gertner <gertner@broadcom.com> Signed-off-by: Eilon Greenstein <eilong@broadcom.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/bnx2x_main.c149
1 files changed, 73 insertions, 76 deletions
diff --git a/drivers/net/bnx2x_main.c b/drivers/net/bnx2x_main.c
index 1c81da45f691..f0b04c98e45e 100644
--- a/drivers/net/bnx2x_main.c
+++ b/drivers/net/bnx2x_main.c
@@ -6058,6 +6058,44 @@ static int bnx2x_req_irq(struct bnx2x *bp)
6058 return rc; 6058 return rc;
6059} 6059}
6060 6060
6061static void bnx2x_napi_enable(struct bnx2x *bp)
6062{
6063 int i;
6064
6065 for_each_queue(bp, i)
6066 napi_enable(&bnx2x_fp(bp, i, napi));
6067}
6068
6069static void bnx2x_napi_disable(struct bnx2x *bp)
6070{
6071 int i;
6072
6073 for_each_queue(bp, i)
6074 napi_disable(&bnx2x_fp(bp, i, napi));
6075}
6076
6077static void bnx2x_netif_start(struct bnx2x *bp)
6078{
6079 if (atomic_dec_and_test(&bp->intr_sem)) {
6080 if (netif_running(bp->dev)) {
6081 if (bp->state == BNX2X_STATE_OPEN)
6082 netif_wake_queue(bp->dev);
6083 bnx2x_napi_enable(bp);
6084 bnx2x_int_enable(bp);
6085 }
6086 }
6087}
6088
6089static void bnx2x_netif_stop(struct bnx2x *bp)
6090{
6091 bnx2x_int_disable_sync(bp);
6092 if (netif_running(bp->dev)) {
6093 bnx2x_napi_disable(bp);
6094 netif_tx_disable(bp->dev);
6095 bp->dev->trans_start = jiffies; /* prevent tx timeout */
6096 }
6097}
6098
6061/* 6099/*
6062 * Init service functions 6100 * Init service functions
6063 */ 6101 */
@@ -6363,8 +6401,7 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6363 6401
6364 /* Enable Rx interrupt handling before sending the ramrod 6402 /* Enable Rx interrupt handling before sending the ramrod
6365 as it's completed on Rx FP queue */ 6403 as it's completed on Rx FP queue */
6366 for_each_queue(bp, i) 6404 bnx2x_napi_enable(bp);
6367 napi_enable(&bnx2x_fp(bp, i, napi));
6368 6405
6369 /* Enable interrupt handling */ 6406 /* Enable interrupt handling */
6370 atomic_set(&bp->intr_sem, 0); 6407 atomic_set(&bp->intr_sem, 0);
@@ -6431,8 +6468,7 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6431 return 0; 6468 return 0;
6432 6469
6433load_netif_stop: 6470load_netif_stop:
6434 for_each_queue(bp, i) 6471 bnx2x_napi_disable(bp);
6435 napi_disable(&bnx2x_fp(bp, i, napi));
6436load_rings_free: 6472load_rings_free:
6437 /* Free SKBs, SGEs, TPA pool and driver internals */ 6473 /* Free SKBs, SGEs, TPA pool and driver internals */
6438 bnx2x_free_skbs(bp); 6474 bnx2x_free_skbs(bp);
@@ -6614,11 +6650,9 @@ static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
6614 bp->rx_mode = BNX2X_RX_MODE_NONE; 6650 bp->rx_mode = BNX2X_RX_MODE_NONE;
6615 bnx2x_set_storm_rx_mode(bp); 6651 bnx2x_set_storm_rx_mode(bp);
6616 6652
6617 if (netif_running(bp->dev)) { 6653 bnx2x_netif_stop(bp);
6618 netif_tx_disable(bp->dev); 6654 if (!netif_running(bp->dev))
6619 bp->dev->trans_start = jiffies; /* prevent tx timeout */ 6655 bnx2x_napi_disable(bp);
6620 }
6621
6622 del_timer_sync(&bp->timer); 6656 del_timer_sync(&bp->timer);
6623 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb, 6657 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
6624 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq)); 6658 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
@@ -6632,9 +6666,7 @@ static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
6632 smp_rmb(); 6666 smp_rmb();
6633 while (BNX2X_HAS_TX_WORK(fp)) { 6667 while (BNX2X_HAS_TX_WORK(fp)) {
6634 6668
6635 if (!netif_running(bp->dev)) 6669 bnx2x_tx_int(fp, 1000);
6636 bnx2x_tx_int(fp, 1000);
6637
6638 if (!cnt) { 6670 if (!cnt) {
6639 BNX2X_ERR("timeout waiting for queue[%d]\n", 6671 BNX2X_ERR("timeout waiting for queue[%d]\n",
6640 i); 6672 i);
@@ -6650,46 +6682,12 @@ static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
6650 smp_rmb(); 6682 smp_rmb();
6651 } 6683 }
6652 } 6684 }
6653
6654 /* Give HW time to discard old tx messages */ 6685 /* Give HW time to discard old tx messages */
6655 msleep(1); 6686 msleep(1);
6656 6687
6657 for_each_queue(bp, i)
6658 napi_disable(&bnx2x_fp(bp, i, napi));
6659 /* Disable interrupts after Tx and Rx are disabled on stack level */
6660 bnx2x_int_disable_sync(bp);
6661
6662 /* Release IRQs */ 6688 /* Release IRQs */
6663 bnx2x_free_irq(bp); 6689 bnx2x_free_irq(bp);
6664 6690
6665 if (unload_mode == UNLOAD_NORMAL)
6666 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6667
6668 else if (bp->flags & NO_WOL_FLAG) {
6669 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
6670 if (CHIP_IS_E1H(bp))
6671 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
6672
6673 } else if (bp->wol) {
6674 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
6675 u8 *mac_addr = bp->dev->dev_addr;
6676 u32 val;
6677 /* The mac address is written to entries 1-4 to
6678 preserve entry 0 which is used by the PMF */
6679 u8 entry = (BP_E1HVN(bp) + 1)*8;
6680
6681 val = (mac_addr[0] << 8) | mac_addr[1];
6682 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
6683
6684 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
6685 (mac_addr[4] << 8) | mac_addr[5];
6686 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
6687
6688 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
6689
6690 } else
6691 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6692
6693 if (CHIP_IS_E1(bp)) { 6691 if (CHIP_IS_E1(bp)) {
6694 struct mac_configuration_cmd *config = 6692 struct mac_configuration_cmd *config =
6695 bnx2x_sp(bp, mcast_config); 6693 bnx2x_sp(bp, mcast_config);
@@ -6712,14 +6710,41 @@ static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
6712 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0); 6710 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
6713 6711
6714 } else { /* E1H */ 6712 } else { /* E1H */
6713 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
6714
6715 bnx2x_set_mac_addr_e1h(bp, 0); 6715 bnx2x_set_mac_addr_e1h(bp, 0);
6716 6716
6717 for (i = 0; i < MC_HASH_SIZE; i++) 6717 for (i = 0; i < MC_HASH_SIZE; i++)
6718 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0); 6718 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
6719 } 6719 }
6720 6720
6721 if (CHIP_IS_E1H(bp)) 6721 if (unload_mode == UNLOAD_NORMAL)
6722 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0); 6722 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6723
6724 else if (bp->flags & NO_WOL_FLAG) {
6725 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
6726 if (CHIP_IS_E1H(bp))
6727 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
6728
6729 } else if (bp->wol) {
6730 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
6731 u8 *mac_addr = bp->dev->dev_addr;
6732 u32 val;
6733 /* The mac address is written to entries 1-4 to
6734 preserve entry 0 which is used by the PMF */
6735 u8 entry = (BP_E1HVN(bp) + 1)*8;
6736
6737 val = (mac_addr[0] << 8) | mac_addr[1];
6738 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
6739
6740 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
6741 (mac_addr[4] << 8) | mac_addr[5];
6742 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
6743
6744 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
6745
6746 } else
6747 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6723 6748
6724 /* Close multi and leading connections 6749 /* Close multi and leading connections
6725 Completions for ramrods are collected in a synchronous way */ 6750 Completions for ramrods are collected in a synchronous way */
@@ -8621,34 +8646,6 @@ test_mem_exit:
8621 return rc; 8646 return rc;
8622} 8647}
8623 8648
8624static void bnx2x_netif_start(struct bnx2x *bp)
8625{
8626 int i;
8627
8628 if (atomic_dec_and_test(&bp->intr_sem)) {
8629 if (netif_running(bp->dev)) {
8630 bnx2x_int_enable(bp);
8631 for_each_queue(bp, i)
8632 napi_enable(&bnx2x_fp(bp, i, napi));
8633 if (bp->state == BNX2X_STATE_OPEN)
8634 netif_wake_queue(bp->dev);
8635 }
8636 }
8637}
8638
8639static void bnx2x_netif_stop(struct bnx2x *bp)
8640{
8641 int i;
8642
8643 if (netif_running(bp->dev)) {
8644 netif_tx_disable(bp->dev);
8645 bp->dev->trans_start = jiffies; /* prevent tx timeout */
8646 for_each_queue(bp, i)
8647 napi_disable(&bnx2x_fp(bp, i, napi));
8648 }
8649 bnx2x_int_disable_sync(bp);
8650}
8651
8652static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up) 8649static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
8653{ 8650{
8654 int cnt = 1000; 8651 int cnt = 1000;