aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
authorVladislav Zolotarov <vladz@broadcom.com>2008-08-13 18:50:00 -0400
committerDavid S. Miller <davem@davemloft.net>2008-08-13 19:02:04 -0400
commitda5a662a2326931bef25f0e534c9c1702f862399 (patch)
tree53bd5af8f3c24cdbb01959a3f3feca3af0bad655 /drivers/net
parent471de716b782fb55ae0fdc040cf2722caffeeb94 (diff)
bnx2x: Load/Unload under traffic
Load/Unload under traffic Few issues were found when loading and unloading under traffic: - When receiving Tx interrupt call netif_wake_queue if the queue is stopped but the state is open - Check that interrupts are enabled before doing anything else on the msix_fp_int function - In nic_load, enable the interrupts only when needed and ready for it - Function stop_leading returns status since it can fail - Add 1ms delay when unloading the driver to validate that there are no open transactions that already started by the FW - Splitting the "has work" function into Tx and Rx so the same function will be used on unload and interrupts - Do not request for WoL if only resetting the device (save the time that it takes the FW to set the link after reset) - Fixing the device reset after iSCSI boot and before driver load - all internal buffers must be cleared before the driver is loaded Signed-off-by: Vladislav Zolotarov <vladz@broadcom.com> Signed-off-by: Eilon Greenstein <eilong@broadcom.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/bnx2x.h9
-rw-r--r--drivers/net/bnx2x_main.c221
-rw-r--r--drivers/net/bnx2x_reg.h3
3 files changed, 135 insertions, 98 deletions
diff --git a/drivers/net/bnx2x.h b/drivers/net/bnx2x.h
index 9cbf0e82ef38..b9aa6f48ae2e 100644
--- a/drivers/net/bnx2x.h
+++ b/drivers/net/bnx2x.h
@@ -274,6 +274,15 @@ struct bnx2x_fastpath {
274 274
275#define bnx2x_fp(bp, nr, var) (bp->fp[nr].var) 275#define bnx2x_fp(bp, nr, var) (bp->fp[nr].var)
276 276
277#define BNX2X_HAS_TX_WORK(fp) \
278 ((fp->tx_pkt_prod != le16_to_cpu(*fp->tx_cons_sb)) || \
279 (fp->tx_pkt_prod != fp->tx_pkt_cons))
280
281#define BNX2X_HAS_RX_WORK(fp) \
282 (fp->rx_comp_cons != le16_to_cpu(*fp->rx_cons_sb))
283
284#define BNX2X_HAS_WORK(fp) (BNX2X_HAS_RX_WORK(fp) || BNX2X_HAS_TX_WORK(fp))
285
277 286
278/* MC hsi */ 287/* MC hsi */
279#define MAX_FETCH_BD 13 /* HW max BDs per packet */ 288#define MAX_FETCH_BD 13 /* HW max BDs per packet */
diff --git a/drivers/net/bnx2x_main.c b/drivers/net/bnx2x_main.c
index 6115161334a7..dfa8c7b00cb7 100644
--- a/drivers/net/bnx2x_main.c
+++ b/drivers/net/bnx2x_main.c
@@ -717,21 +717,6 @@ static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
717 return rc; 717 return rc;
718} 718}
719 719
720static inline int bnx2x_has_work(struct bnx2x_fastpath *fp)
721{
722 u16 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
723
724 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
725 rx_cons_sb++;
726
727 if ((fp->rx_comp_cons != rx_cons_sb) ||
728 (fp->tx_pkt_prod != le16_to_cpu(*fp->tx_cons_sb)) ||
729 (fp->tx_pkt_prod != fp->tx_pkt_cons))
730 return 1;
731
732 return 0;
733}
734
735static u16 bnx2x_ack_int(struct bnx2x *bp) 720static u16 bnx2x_ack_int(struct bnx2x *bp)
736{ 721{
737 u32 igu_addr = (IGU_ADDR_SIMD_MASK + IGU_FUNC_BASE * BP_FUNC(bp)) * 8; 722 u32 igu_addr = (IGU_ADDR_SIMD_MASK + IGU_FUNC_BASE * BP_FUNC(bp)) * 8;
@@ -899,6 +884,7 @@ static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work)
899 netif_tx_lock(bp->dev); 884 netif_tx_lock(bp->dev);
900 885
901 if (netif_queue_stopped(bp->dev) && 886 if (netif_queue_stopped(bp->dev) &&
887 (bp->state == BNX2X_STATE_OPEN) &&
902 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)) 888 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
903 netif_wake_queue(bp->dev); 889 netif_wake_queue(bp->dev);
904 890
@@ -1617,6 +1603,12 @@ static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1617 struct net_device *dev = bp->dev; 1603 struct net_device *dev = bp->dev;
1618 int index = FP_IDX(fp); 1604 int index = FP_IDX(fp);
1619 1605
1606 /* Return here if interrupt is disabled */
1607 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1608 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1609 return IRQ_HANDLED;
1610 }
1611
1620 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n", 1612 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1621 index, FP_SB_ID(fp)); 1613 index, FP_SB_ID(fp));
1622 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID, 0, IGU_INT_DISABLE, 0); 1614 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID, 0, IGU_INT_DISABLE, 0);
@@ -6230,22 +6222,24 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6230 if (!BP_NOMCP(bp)) { 6222 if (!BP_NOMCP(bp)) {
6231 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ); 6223 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
6232 if (!load_code) { 6224 if (!load_code) {
6233 BNX2X_ERR("MCP response failure, unloading\n"); 6225 BNX2X_ERR("MCP response failure, aborting\n");
6234 return -EBUSY; 6226 return -EBUSY;
6235 } 6227 }
6236 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) 6228 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED)
6237 return -EBUSY; /* other port in diagnostic mode */ 6229 return -EBUSY; /* other port in diagnostic mode */
6238 6230
6239 } else { 6231 } else {
6232 int port = BP_PORT(bp);
6233
6240 DP(NETIF_MSG_IFUP, "NO MCP load counts before us %d, %d, %d\n", 6234 DP(NETIF_MSG_IFUP, "NO MCP load counts before us %d, %d, %d\n",
6241 load_count[0], load_count[1], load_count[2]); 6235 load_count[0], load_count[1], load_count[2]);
6242 load_count[0]++; 6236 load_count[0]++;
6243 load_count[1 + BP_PORT(bp)]++; 6237 load_count[1 + port]++;
6244 DP(NETIF_MSG_IFUP, "NO MCP new load counts %d, %d, %d\n", 6238 DP(NETIF_MSG_IFUP, "NO MCP new load counts %d, %d, %d\n",
6245 load_count[0], load_count[1], load_count[2]); 6239 load_count[0], load_count[1], load_count[2]);
6246 if (load_count[0] == 1) 6240 if (load_count[0] == 1)
6247 load_code = FW_MSG_CODE_DRV_LOAD_COMMON; 6241 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
6248 else if (load_count[1 + BP_PORT(bp)] == 1) 6242 else if (load_count[1 + port] == 1)
6249 load_code = FW_MSG_CODE_DRV_LOAD_PORT; 6243 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
6250 else 6244 else
6251 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION; 6245 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
@@ -6294,9 +6288,6 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6294 bnx2x_fp(bp, i, disable_tpa) = 6288 bnx2x_fp(bp, i, disable_tpa) =
6295 ((bp->flags & TPA_ENABLE_FLAG) == 0); 6289 ((bp->flags & TPA_ENABLE_FLAG) == 0);
6296 6290
6297 /* Disable interrupt handling until HW is initialized */
6298 atomic_set(&bp->intr_sem, 1);
6299
6300 if (bp->flags & USING_MSIX_FLAG) { 6291 if (bp->flags & USING_MSIX_FLAG) {
6301 rc = bnx2x_req_msix_irqs(bp); 6292 rc = bnx2x_req_msix_irqs(bp);
6302 if (rc) { 6293 if (rc) {
@@ -6323,9 +6314,6 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6323 goto load_error; 6314 goto load_error;
6324 } 6315 }
6325 6316
6326 /* Enable interrupt handling */
6327 atomic_set(&bp->intr_sem, 0);
6328
6329 /* Setup NIC internals and enable interrupts */ 6317 /* Setup NIC internals and enable interrupts */
6330 bnx2x_nic_init(bp, load_code); 6318 bnx2x_nic_init(bp, load_code);
6331 6319
@@ -6333,7 +6321,7 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6333 if (!BP_NOMCP(bp)) { 6321 if (!BP_NOMCP(bp)) {
6334 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE); 6322 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
6335 if (!load_code) { 6323 if (!load_code) {
6336 BNX2X_ERR("MCP response failure, unloading\n"); 6324 BNX2X_ERR("MCP response failure, aborting\n");
6337 rc = -EBUSY; 6325 rc = -EBUSY;
6338 goto load_int_disable; 6326 goto load_int_disable;
6339 } 6327 }
@@ -6348,11 +6336,12 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6348 for_each_queue(bp, i) 6336 for_each_queue(bp, i)
6349 napi_enable(&bnx2x_fp(bp, i, napi)); 6337 napi_enable(&bnx2x_fp(bp, i, napi));
6350 6338
6339 /* Enable interrupt handling */
6340 atomic_set(&bp->intr_sem, 0);
6341
6351 rc = bnx2x_setup_leading(bp); 6342 rc = bnx2x_setup_leading(bp);
6352 if (rc) { 6343 if (rc) {
6353#ifdef BNX2X_STOP_ON_ERROR 6344 BNX2X_ERR("Setup leading failed!\n");
6354 bp->panic = 1;
6355#endif
6356 goto load_stop_netif; 6345 goto load_stop_netif;
6357 } 6346 }
6358 6347
@@ -6386,7 +6375,6 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6386 break; 6375 break;
6387 6376
6388 case LOAD_OPEN: 6377 case LOAD_OPEN:
6389 /* IRQ is only requested from bnx2x_open */
6390 netif_start_queue(bp->dev); 6378 netif_start_queue(bp->dev);
6391 bnx2x_set_rx_mode(bp->dev); 6379 bnx2x_set_rx_mode(bp->dev);
6392 if (bp->flags & USING_MSIX_FLAG) 6380 if (bp->flags & USING_MSIX_FLAG)
@@ -6458,7 +6446,7 @@ static int bnx2x_stop_multi(struct bnx2x *bp, int index)
6458 return rc; 6446 return rc;
6459} 6447}
6460 6448
6461static void bnx2x_stop_leading(struct bnx2x *bp) 6449static int bnx2x_stop_leading(struct bnx2x *bp)
6462{ 6450{
6463 u16 dsb_sp_prod_idx; 6451 u16 dsb_sp_prod_idx;
6464 /* if the other port is handling traffic, 6452 /* if the other port is handling traffic,
@@ -6476,7 +6464,7 @@ static void bnx2x_stop_leading(struct bnx2x *bp)
6476 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0, 6464 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
6477 &(bp->fp[0].state), 1); 6465 &(bp->fp[0].state), 1);
6478 if (rc) /* timeout */ 6466 if (rc) /* timeout */
6479 return; 6467 return rc;
6480 6468
6481 dsb_sp_prod_idx = *bp->dsb_sp_prod; 6469 dsb_sp_prod_idx = *bp->dsb_sp_prod;
6482 6470
@@ -6495,13 +6483,18 @@ static void bnx2x_stop_leading(struct bnx2x *bp)
6495 *bp->dsb_sp_prod, dsb_sp_prod_idx); 6483 *bp->dsb_sp_prod, dsb_sp_prod_idx);
6496#ifdef BNX2X_STOP_ON_ERROR 6484#ifdef BNX2X_STOP_ON_ERROR
6497 bnx2x_panic(); 6485 bnx2x_panic();
6486#else
6487 rc = -EBUSY;
6498#endif 6488#endif
6499 break; 6489 break;
6500 } 6490 }
6501 cnt--; 6491 cnt--;
6492 msleep(1);
6502 } 6493 }
6503 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD; 6494 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
6504 bp->fp[0].state = BNX2X_FP_STATE_CLOSED; 6495 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
6496
6497 return rc;
6505} 6498}
6506 6499
6507static void bnx2x_reset_func(struct bnx2x *bp) 6500static void bnx2x_reset_func(struct bnx2x *bp)
@@ -6586,8 +6579,9 @@ static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
6586/* msut be called with rtnl_lock */ 6579/* msut be called with rtnl_lock */
6587static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode) 6580static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
6588{ 6581{
6582 int port = BP_PORT(bp);
6589 u32 reset_code = 0; 6583 u32 reset_code = 0;
6590 int i, cnt; 6584 int i, cnt, rc;
6591 6585
6592 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT; 6586 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
6593 6587
@@ -6604,22 +6598,17 @@ static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
6604 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq)); 6598 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
6605 bnx2x_stats_handle(bp, STATS_EVENT_STOP); 6599 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
6606 6600
6607 /* Wait until all fast path tasks complete */ 6601 /* Wait until tx fast path tasks complete */
6608 for_each_queue(bp, i) { 6602 for_each_queue(bp, i) {
6609 struct bnx2x_fastpath *fp = &bp->fp[i]; 6603 struct bnx2x_fastpath *fp = &bp->fp[i];
6610 6604
6611#ifdef BNX2X_STOP_ON_ERROR
6612#ifdef __powerpc64__
6613 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
6614#else
6615 DP(NETIF_MSG_IFDOWN, "fp->tpa_queue_used = 0x%llx\n",
6616#endif
6617 fp->tpa_queue_used);
6618#endif
6619 cnt = 1000; 6605 cnt = 1000;
6620 smp_rmb(); 6606 smp_rmb();
6621 while (bnx2x_has_work(fp)) { 6607 while (BNX2X_HAS_TX_WORK(fp)) {
6622 msleep(1); 6608
6609 if (!netif_running(bp->dev))
6610 bnx2x_tx_int(fp, 1000);
6611
6623 if (!cnt) { 6612 if (!cnt) {
6624 BNX2X_ERR("timeout waiting for queue[%d]\n", 6613 BNX2X_ERR("timeout waiting for queue[%d]\n",
6625 i); 6614 i);
@@ -6631,14 +6620,13 @@ static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
6631#endif 6620#endif
6632 } 6621 }
6633 cnt--; 6622 cnt--;
6623 msleep(1);
6634 smp_rmb(); 6624 smp_rmb();
6635 } 6625 }
6636 } 6626 }
6637 6627
6638 /* Wait until all slow path tasks complete */ 6628 /* Give HW time to discard old tx messages */
6639 cnt = 1000; 6629 msleep(1);
6640 while ((bp->spq_left != MAX_SPQ_PENDING) && cnt--)
6641 msleep(1);
6642 6630
6643 for_each_queue(bp, i) 6631 for_each_queue(bp, i)
6644 napi_disable(&bnx2x_fp(bp, i, napi)); 6632 napi_disable(&bnx2x_fp(bp, i, napi));
@@ -6648,52 +6636,51 @@ static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
6648 /* Release IRQs */ 6636 /* Release IRQs */
6649 bnx2x_free_irq(bp); 6637 bnx2x_free_irq(bp);
6650 6638
6651 if (bp->flags & NO_WOL_FLAG) 6639 if (unload_mode == UNLOAD_NORMAL)
6640 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6641
6642 else if (bp->flags & NO_WOL_FLAG) {
6652 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP; 6643 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
6644 if (CHIP_IS_E1H(bp))
6645 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
6653 6646
6654 else if (bp->wol) { 6647 } else if (bp->wol) {
6655 u32 emac_base = BP_PORT(bp) ? GRCBASE_EMAC1 : GRCBASE_EMAC0; 6648 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
6656 u8 *mac_addr = bp->dev->dev_addr; 6649 u8 *mac_addr = bp->dev->dev_addr;
6657 u32 val; 6650 u32 val;
6658
6659 /* The mac address is written to entries 1-4 to 6651 /* The mac address is written to entries 1-4 to
6660 preserve entry 0 which is used by the PMF */ 6652 preserve entry 0 which is used by the PMF */
6653 u8 entry = (BP_E1HVN(bp) + 1)*8;
6654
6661 val = (mac_addr[0] << 8) | mac_addr[1]; 6655 val = (mac_addr[0] << 8) | mac_addr[1];
6662 EMAC_WR(EMAC_REG_EMAC_MAC_MATCH + (BP_E1HVN(bp) + 1)*8, val); 6656 EMAC_WR(EMAC_REG_EMAC_MAC_MATCH + entry, val);
6663 6657
6664 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) | 6658 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
6665 (mac_addr[4] << 8) | mac_addr[5]; 6659 (mac_addr[4] << 8) | mac_addr[5];
6666 EMAC_WR(EMAC_REG_EMAC_MAC_MATCH + (BP_E1HVN(bp) + 1)*8 + 4, 6660 EMAC_WR(EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
6667 val);
6668 6661
6669 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN; 6662 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
6670 6663
6671 } else 6664 } else
6672 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; 6665 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6673 6666
6667 if (CHIP_IS_E1H(bp))
6668 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
6669
6674 /* Close multi and leading connections 6670 /* Close multi and leading connections
6675 Completions for ramrods are collected in a synchronous way */ 6671 Completions for ramrods are collected in a synchronous way */
6676 for_each_nondefault_queue(bp, i) 6672 for_each_nondefault_queue(bp, i)
6677 if (bnx2x_stop_multi(bp, i)) 6673 if (bnx2x_stop_multi(bp, i))
6678 goto unload_error; 6674 goto unload_error;
6679 6675
6680 if (CHIP_IS_E1H(bp)) 6676 rc = bnx2x_stop_leading(bp);
6681 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + BP_PORT(bp)*8, 0); 6677 if (rc) {
6682
6683 bnx2x_stop_leading(bp);
6684#ifdef BNX2X_STOP_ON_ERROR
6685 /* If ramrod completion timed out - break here! */
6686 if (bp->panic) {
6687 BNX2X_ERR("Stop leading failed!\n"); 6678 BNX2X_ERR("Stop leading failed!\n");
6679#ifdef BNX2X_STOP_ON_ERROR
6688 return -EBUSY; 6680 return -EBUSY;
6689 } 6681#else
6682 goto unload_error;
6690#endif 6683#endif
6691
6692 if ((bp->state != BNX2X_STATE_CLOSING_WAIT4_UNLOAD) ||
6693 (bp->fp[0].state != BNX2X_FP_STATE_CLOSED)) {
6694 DP(NETIF_MSG_IFDOWN, "failed to close leading properly! "
6695 "state 0x%x fp[0].state 0x%x\n",
6696 bp->state, bp->fp[0].state);
6697 } 6684 }
6698 6685
6699unload_error: 6686unload_error:
@@ -6703,12 +6690,12 @@ unload_error:
6703 DP(NETIF_MSG_IFDOWN, "NO MCP load counts %d, %d, %d\n", 6690 DP(NETIF_MSG_IFDOWN, "NO MCP load counts %d, %d, %d\n",
6704 load_count[0], load_count[1], load_count[2]); 6691 load_count[0], load_count[1], load_count[2]);
6705 load_count[0]--; 6692 load_count[0]--;
6706 load_count[1 + BP_PORT(bp)]--; 6693 load_count[1 + port]--;
6707 DP(NETIF_MSG_IFDOWN, "NO MCP new load counts %d, %d, %d\n", 6694 DP(NETIF_MSG_IFDOWN, "NO MCP new load counts %d, %d, %d\n",
6708 load_count[0], load_count[1], load_count[2]); 6695 load_count[0], load_count[1], load_count[2]);
6709 if (load_count[0] == 0) 6696 if (load_count[0] == 0)
6710 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON; 6697 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
6711 else if (load_count[1 + BP_PORT(bp)] == 0) 6698 else if (load_count[1 + port] == 0)
6712 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT; 6699 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
6713 else 6700 else
6714 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION; 6701 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
@@ -6780,50 +6767,86 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
6780 /* Check if it is the UNDI driver 6767 /* Check if it is the UNDI driver
6781 * UNDI driver initializes CID offset for normal bell to 0x7 6768 * UNDI driver initializes CID offset for normal bell to 0x7
6782 */ 6769 */
6770 bnx2x_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
6783 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST); 6771 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
6784 if (val == 0x7) { 6772 if (val == 0x7) {
6785 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; 6773 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6786 /* save our func and fw_seq */ 6774 /* save our func */
6787 int func = BP_FUNC(bp); 6775 int func = BP_FUNC(bp);
6788 u16 fw_seq = bp->fw_seq; 6776 u32 swap_en;
6777 u32 swap_val;
6789 6778
6790 BNX2X_DEV_INFO("UNDI is active! reset device\n"); 6779 BNX2X_DEV_INFO("UNDI is active! reset device\n");
6791 6780
6792 /* try unload UNDI on port 0 */ 6781 /* try unload UNDI on port 0 */
6793 bp->func = 0; 6782 bp->func = 0;
6794 bp->fw_seq = (SHMEM_RD(bp, 6783 bp->fw_seq =
6795 func_mb[bp->func].drv_mb_header) & 6784 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6796 DRV_MSG_SEQ_NUMBER_MASK); 6785 DRV_MSG_SEQ_NUMBER_MASK);
6797
6798 reset_code = bnx2x_fw_command(bp, reset_code); 6786 reset_code = bnx2x_fw_command(bp, reset_code);
6799 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6800 6787
6801 /* if UNDI is loaded on the other port */ 6788 /* if UNDI is loaded on the other port */
6802 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) { 6789 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
6803 6790
6791 /* send "DONE" for previous unload */
6792 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6793
6794 /* unload UNDI on port 1 */
6804 bp->func = 1; 6795 bp->func = 1;
6805 bp->fw_seq = (SHMEM_RD(bp, 6796 bp->fw_seq =
6806 func_mb[bp->func].drv_mb_header) & 6797 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6807 DRV_MSG_SEQ_NUMBER_MASK); 6798 DRV_MSG_SEQ_NUMBER_MASK);
6808 6799 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6809 bnx2x_fw_command(bp, 6800
6810 DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS); 6801 bnx2x_fw_command(bp, reset_code);
6811 bnx2x_fw_command(bp,
6812 DRV_MSG_CODE_UNLOAD_DONE);
6813
6814 /* restore our func and fw_seq */
6815 bp->func = func;
6816 bp->fw_seq = fw_seq;
6817 } 6802 }
6818 6803
6804 REG_WR(bp, (BP_PORT(bp) ? HC_REG_CONFIG_1 :
6805 HC_REG_CONFIG_0), 0x1000);
6806
6807 /* close input traffic and wait for it */
6808 /* Do not rcv packets to BRB */
6809 REG_WR(bp,
6810 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
6811 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
6812 /* Do not direct rcv packets that are not for MCP to
6813 * the BRB */
6814 REG_WR(bp,
6815 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
6816 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6817 /* clear AEU */
6818 REG_WR(bp,
6819 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
6820 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
6821 msleep(10);
6822
6823 /* save NIG port swap info */
6824 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6825 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
6819 /* reset device */ 6826 /* reset device */
6820 REG_WR(bp, 6827 REG_WR(bp,
6821 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 6828 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6822 0xd3ffff7f); 6829 0xd3ffffff);
6823 REG_WR(bp, 6830 REG_WR(bp,
6824 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 6831 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
6825 0x1403); 6832 0x1403);
6833 /* take the NIG out of reset and restore swap values */
6834 REG_WR(bp,
6835 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6836 MISC_REGISTERS_RESET_REG_1_RST_NIG);
6837 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
6838 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
6839
6840 /* send unload done to the MCP */
6841 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6842
6843 /* restore our func and fw_seq */
6844 bp->func = func;
6845 bp->fw_seq =
6846 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6847 DRV_MSG_SEQ_NUMBER_MASK);
6826 } 6848 }
6849 bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_UNDI);
6827 } 6850 }
6828} 6851}
6829 6852
@@ -7384,6 +7407,9 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp)
7384 int func = BP_FUNC(bp); 7407 int func = BP_FUNC(bp);
7385 int rc; 7408 int rc;
7386 7409
7410 /* Disable interrupt handling until HW is initialized */
7411 atomic_set(&bp->intr_sem, 1);
7412
7387 mutex_init(&bp->port.phy_mutex); 7413 mutex_init(&bp->port.phy_mutex);
7388 7414
7389 INIT_WORK(&bp->sp_task, bnx2x_sp_task); 7415 INIT_WORK(&bp->sp_task, bnx2x_sp_task);
@@ -9163,17 +9189,16 @@ static int bnx2x_poll(struct napi_struct *napi, int budget)
9163 9189
9164 bnx2x_update_fpsb_idx(fp); 9190 bnx2x_update_fpsb_idx(fp);
9165 9191
9166 if ((fp->tx_pkt_prod != le16_to_cpu(*fp->tx_cons_sb)) || 9192 if (BNX2X_HAS_TX_WORK(fp))
9167 (fp->tx_pkt_prod != fp->tx_pkt_cons))
9168 bnx2x_tx_int(fp, budget); 9193 bnx2x_tx_int(fp, budget);
9169 9194
9170 if (le16_to_cpu(*fp->rx_cons_sb) != fp->rx_comp_cons) 9195 if (BNX2X_HAS_RX_WORK(fp))
9171 work_done = bnx2x_rx_int(fp, budget); 9196 work_done = bnx2x_rx_int(fp, budget);
9172 9197
9173 rmb(); /* bnx2x_has_work() reads the status block */ 9198 rmb(); /* BNX2X_HAS_WORK() reads the status block */
9174 9199
9175 /* must not complete if we consumed full budget */ 9200 /* must not complete if we consumed full budget */
9176 if ((work_done < budget) && !bnx2x_has_work(fp)) { 9201 if ((work_done < budget) && !BNX2X_HAS_WORK(fp)) {
9177 9202
9178#ifdef BNX2X_STOP_ON_ERROR 9203#ifdef BNX2X_STOP_ON_ERROR
9179poll_panic: 9204poll_panic:
@@ -9408,7 +9433,7 @@ static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
9408 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - " 9433 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
9409 "silently dropping this SKB\n"); 9434 "silently dropping this SKB\n");
9410 dev_kfree_skb_any(skb); 9435 dev_kfree_skb_any(skb);
9411 return 0; 9436 return NETDEV_TX_OK;
9412 } 9437 }
9413 } 9438 }
9414 9439
@@ -10200,7 +10225,7 @@ static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
10200 10225
10201 netif_device_detach(dev); 10226 netif_device_detach(dev);
10202 10227
10203 bnx2x_nic_unload(bp, UNLOAD_NORMAL); 10228 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
10204 10229
10205 bnx2x_set_power_state(bp, pci_choose_state(pdev, state)); 10230 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
10206 10231
@@ -10233,7 +10258,7 @@ static int bnx2x_resume(struct pci_dev *pdev)
10233 bnx2x_set_power_state(bp, PCI_D0); 10258 bnx2x_set_power_state(bp, PCI_D0);
10234 netif_device_attach(dev); 10259 netif_device_attach(dev);
10235 10260
10236 rc = bnx2x_nic_load(bp, LOAD_NORMAL); 10261 rc = bnx2x_nic_load(bp, LOAD_OPEN);
10237 10262
10238 rtnl_unlock(); 10263 rtnl_unlock();
10239 10264
diff --git a/drivers/net/bnx2x_reg.h b/drivers/net/bnx2x_reg.h
index 40c3a7735d25..b5313c209caa 100644
--- a/drivers/net/bnx2x_reg.h
+++ b/drivers/net/bnx2x_reg.h
@@ -1677,6 +1677,7 @@
1677/* [RW 8] init credit counter for port0 in LLH */ 1677/* [RW 8] init credit counter for port0 in LLH */
1678#define NIG_REG_LLH0_XCM_INIT_CREDIT 0x10554 1678#define NIG_REG_LLH0_XCM_INIT_CREDIT 0x10554
1679#define NIG_REG_LLH0_XCM_MASK 0x10130 1679#define NIG_REG_LLH0_XCM_MASK 0x10130
1680#define NIG_REG_LLH1_BRB1_DRV_MASK 0x10248
1680/* [RW 1] send to BRB1 if no match on any of RMP rules. */ 1681/* [RW 1] send to BRB1 if no match on any of RMP rules. */
1681#define NIG_REG_LLH1_BRB1_NOT_MCP 0x102dc 1682#define NIG_REG_LLH1_BRB1_NOT_MCP 0x102dc
1682/* [RW 2] Determine the classification participants. 0: no classification.1: 1683/* [RW 2] Determine the classification participants. 0: no classification.1:
@@ -4962,6 +4963,7 @@
4962#define MISC_REGISTERS_GPIO_PORT_SHIFT 4 4963#define MISC_REGISTERS_GPIO_PORT_SHIFT 4
4963#define MISC_REGISTERS_GPIO_SET_POS 8 4964#define MISC_REGISTERS_GPIO_SET_POS 8
4964#define MISC_REGISTERS_RESET_REG_1_CLEAR 0x588 4965#define MISC_REGISTERS_RESET_REG_1_CLEAR 0x588
4966#define MISC_REGISTERS_RESET_REG_1_RST_NIG (0x1<<7)
4965#define MISC_REGISTERS_RESET_REG_1_SET 0x584 4967#define MISC_REGISTERS_RESET_REG_1_SET 0x584
4966#define MISC_REGISTERS_RESET_REG_2_CLEAR 0x598 4968#define MISC_REGISTERS_RESET_REG_2_CLEAR 0x598
4967#define MISC_REGISTERS_RESET_REG_2_RST_BMAC0 (0x1<<0) 4969#define MISC_REGISTERS_RESET_REG_2_RST_BMAC0 (0x1<<0)
@@ -4997,6 +4999,7 @@
4997#define HW_LOCK_RESOURCE_8072_MDIO 0 4999#define HW_LOCK_RESOURCE_8072_MDIO 0
4998#define HW_LOCK_RESOURCE_GPIO 1 5000#define HW_LOCK_RESOURCE_GPIO 1
4999#define HW_LOCK_RESOURCE_SPIO 2 5001#define HW_LOCK_RESOURCE_SPIO 2
5002#define HW_LOCK_RESOURCE_UNDI 5
5000#define AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR (1<<18) 5003#define AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR (1<<18)
5001#define AEU_INPUTS_ATTN_BITS_CCM_HW_INTERRUPT (1<<31) 5004#define AEU_INPUTS_ATTN_BITS_CCM_HW_INTERRUPT (1<<31)
5002#define AEU_INPUTS_ATTN_BITS_CDU_HW_INTERRUPT (1<<9) 5005#define AEU_INPUTS_ATTN_BITS_CDU_HW_INTERRUPT (1<<9)