aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c')
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c162
1 files changed, 115 insertions, 47 deletions
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 85dd294aeaba..621ab281ed89 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -408,8 +408,8 @@ u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type,
408 opcode |= (DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET); 408 opcode |= (DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET);
409 409
410 opcode |= (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0); 410 opcode |= (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0);
411 opcode |= ((BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT) | 411 opcode |= ((BP_VN(bp) << DMAE_CMD_E1HVN_SHIFT) |
412 (BP_E1HVN(bp) << DMAE_COMMAND_DST_VN_SHIFT)); 412 (BP_VN(bp) << DMAE_COMMAND_DST_VN_SHIFT));
413 opcode |= (DMAE_COM_SET_ERR << DMAE_COMMAND_ERR_POLICY_SHIFT); 413 opcode |= (DMAE_COM_SET_ERR << DMAE_COMMAND_ERR_POLICY_SHIFT);
414 414
415#ifdef __BIG_ENDIAN 415#ifdef __BIG_ENDIAN
@@ -1417,7 +1417,7 @@ static void bnx2x_hc_int_enable(struct bnx2x *bp)
1417 if (!CHIP_IS_E1(bp)) { 1417 if (!CHIP_IS_E1(bp)) {
1418 /* init leading/trailing edge */ 1418 /* init leading/trailing edge */
1419 if (IS_MF(bp)) { 1419 if (IS_MF(bp)) {
1420 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4))); 1420 val = (0xee0f | (1 << (BP_VN(bp) + 4)));
1421 if (bp->port.pmf) 1421 if (bp->port.pmf)
1422 /* enable nig and gpio3 attention */ 1422 /* enable nig and gpio3 attention */
1423 val |= 0x1100; 1423 val |= 0x1100;
@@ -1469,7 +1469,7 @@ static void bnx2x_igu_int_enable(struct bnx2x *bp)
1469 1469
1470 /* init leading/trailing edge */ 1470 /* init leading/trailing edge */
1471 if (IS_MF(bp)) { 1471 if (IS_MF(bp)) {
1472 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4))); 1472 val = (0xee0f | (1 << (BP_VN(bp) + 4)));
1473 if (bp->port.pmf) 1473 if (bp->port.pmf)
1474 /* enable nig and gpio3 attention */ 1474 /* enable nig and gpio3 attention */
1475 val |= 0x1100; 1475 val |= 0x1100;
@@ -2285,7 +2285,7 @@ static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
2285 int vn; 2285 int vn;
2286 2286
2287 bp->vn_weight_sum = 0; 2287 bp->vn_weight_sum = 0;
2288 for (vn = VN_0; vn < E1HVN_MAX; vn++) { 2288 for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
2289 u32 vn_cfg = bp->mf_config[vn]; 2289 u32 vn_cfg = bp->mf_config[vn];
2290 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >> 2290 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2291 FUNC_MF_CFG_MIN_BW_SHIFT) * 100; 2291 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
@@ -2318,12 +2318,18 @@ static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
2318 CMNG_FLAGS_PER_PORT_FAIRNESS_VN; 2318 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2319} 2319}
2320 2320
2321/* returns func by VN for current port */
2322static inline int func_by_vn(struct bnx2x *bp, int vn)
2323{
2324 return 2 * vn + BP_PORT(bp);
2325}
2326
2321static void bnx2x_init_vn_minmax(struct bnx2x *bp, int vn) 2327static void bnx2x_init_vn_minmax(struct bnx2x *bp, int vn)
2322{ 2328{
2323 struct rate_shaping_vars_per_vn m_rs_vn; 2329 struct rate_shaping_vars_per_vn m_rs_vn;
2324 struct fairness_vars_per_vn m_fair_vn; 2330 struct fairness_vars_per_vn m_fair_vn;
2325 u32 vn_cfg = bp->mf_config[vn]; 2331 u32 vn_cfg = bp->mf_config[vn];
2326 int func = 2*vn + BP_PORT(bp); 2332 int func = func_by_vn(bp, vn);
2327 u16 vn_min_rate, vn_max_rate; 2333 u16 vn_min_rate, vn_max_rate;
2328 int i; 2334 int i;
2329 2335
@@ -2420,7 +2426,7 @@ void bnx2x_read_mf_cfg(struct bnx2x *bp)
2420 * 2426 *
2421 * and there are 2 functions per port 2427 * and there are 2 functions per port
2422 */ 2428 */
2423 for (vn = VN_0; vn < E1HVN_MAX; vn++) { 2429 for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
2424 int /*abs*/func = n * (2 * vn + BP_PORT(bp)) + BP_PATH(bp); 2430 int /*abs*/func = n * (2 * vn + BP_PORT(bp)) + BP_PATH(bp);
2425 2431
2426 if (func >= E1H_FUNC_MAX) 2432 if (func >= E1H_FUNC_MAX)
@@ -2452,7 +2458,7 @@ static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type)
2452 2458
2453 /* calculate and set min-max rate for each vn */ 2459 /* calculate and set min-max rate for each vn */
2454 if (bp->port.pmf) 2460 if (bp->port.pmf)
2455 for (vn = VN_0; vn < E1HVN_MAX; vn++) 2461 for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++)
2456 bnx2x_init_vn_minmax(bp, vn); 2462 bnx2x_init_vn_minmax(bp, vn);
2457 2463
2458 /* always enable rate shaping and fairness */ 2464 /* always enable rate shaping and fairness */
@@ -2471,16 +2477,15 @@ static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type)
2471 2477
2472static inline void bnx2x_link_sync_notify(struct bnx2x *bp) 2478static inline void bnx2x_link_sync_notify(struct bnx2x *bp)
2473{ 2479{
2474 int port = BP_PORT(bp);
2475 int func; 2480 int func;
2476 int vn; 2481 int vn;
2477 2482
2478 /* Set the attention towards other drivers on the same port */ 2483 /* Set the attention towards other drivers on the same port */
2479 for (vn = VN_0; vn < E1HVN_MAX; vn++) { 2484 for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
2480 if (vn == BP_E1HVN(bp)) 2485 if (vn == BP_VN(bp))
2481 continue; 2486 continue;
2482 2487
2483 func = ((vn << 1) | port); 2488 func = func_by_vn(bp, vn);
2484 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 + 2489 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2485 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1); 2490 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2486 } 2491 }
@@ -2575,7 +2580,7 @@ static void bnx2x_pmf_update(struct bnx2x *bp)
2575 bnx2x_dcbx_pmf_update(bp); 2580 bnx2x_dcbx_pmf_update(bp);
2576 2581
2577 /* enable nig attention */ 2582 /* enable nig attention */
2578 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4))); 2583 val = (0xff0f | (1 << (BP_VN(bp) + 4)));
2579 if (bp->common.int_block == INT_BLOCK_HC) { 2584 if (bp->common.int_block == INT_BLOCK_HC) {
2580 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val); 2585 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2581 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val); 2586 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
@@ -2754,8 +2759,14 @@ static void bnx2x_pf_rx_q_prep(struct bnx2x *bp,
2754 u16 tpa_agg_size = 0; 2759 u16 tpa_agg_size = 0;
2755 2760
2756 if (!fp->disable_tpa) { 2761 if (!fp->disable_tpa) {
2757 pause->sge_th_hi = 250; 2762 pause->sge_th_lo = SGE_TH_LO(bp);
2758 pause->sge_th_lo = 150; 2763 pause->sge_th_hi = SGE_TH_HI(bp);
2764
2765 /* validate SGE ring has enough to cross high threshold */
2766 WARN_ON(bp->dropless_fc &&
2767 pause->sge_th_hi + FW_PREFETCH_CNT >
2768 MAX_RX_SGE_CNT * NUM_RX_SGE_PAGES);
2769
2759 tpa_agg_size = min_t(u32, 2770 tpa_agg_size = min_t(u32,
2760 (min_t(u32, 8, MAX_SKB_FRAGS) * 2771 (min_t(u32, 8, MAX_SKB_FRAGS) *
2761 SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff); 2772 SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff);
@@ -2769,10 +2780,21 @@ static void bnx2x_pf_rx_q_prep(struct bnx2x *bp,
2769 2780
2770 /* pause - not for e1 */ 2781 /* pause - not for e1 */
2771 if (!CHIP_IS_E1(bp)) { 2782 if (!CHIP_IS_E1(bp)) {
2772 pause->bd_th_hi = 350; 2783 pause->bd_th_lo = BD_TH_LO(bp);
2773 pause->bd_th_lo = 250; 2784 pause->bd_th_hi = BD_TH_HI(bp);
2774 pause->rcq_th_hi = 350; 2785
2775 pause->rcq_th_lo = 250; 2786 pause->rcq_th_lo = RCQ_TH_LO(bp);
2787 pause->rcq_th_hi = RCQ_TH_HI(bp);
2788 /*
2789 * validate that rings have enough entries to cross
2790 * high thresholds
2791 */
2792 WARN_ON(bp->dropless_fc &&
2793 pause->bd_th_hi + FW_PREFETCH_CNT >
2794 bp->rx_ring_size);
2795 WARN_ON(bp->dropless_fc &&
2796 pause->rcq_th_hi + FW_PREFETCH_CNT >
2797 NUM_RCQ_RINGS * MAX_RCQ_DESC_CNT);
2776 2798
2777 pause->pri_map = 1; 2799 pause->pri_map = 1;
2778 } 2800 }
@@ -2800,9 +2822,7 @@ static void bnx2x_pf_rx_q_prep(struct bnx2x *bp,
2800 * For PF Clients it should be the maximum avaliable number. 2822 * For PF Clients it should be the maximum avaliable number.
2801 * VF driver(s) may want to define it to a smaller value. 2823 * VF driver(s) may want to define it to a smaller value.
2802 */ 2824 */
2803 rxq_init->max_tpa_queues = 2825 rxq_init->max_tpa_queues = MAX_AGG_QS(bp);
2804 (CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
2805 ETH_MAX_AGGREGATION_QUEUES_E1H_E2);
2806 2826
2807 rxq_init->cache_line_log = BNX2X_RX_ALIGN_SHIFT; 2827 rxq_init->cache_line_log = BNX2X_RX_ALIGN_SHIFT;
2808 rxq_init->fw_sb_id = fp->fw_sb_id; 2828 rxq_init->fw_sb_id = fp->fw_sb_id;
@@ -4804,6 +4824,37 @@ void bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm,
4804 hc_sm->time_to_expire = 0xFFFFFFFF; 4824 hc_sm->time_to_expire = 0xFFFFFFFF;
4805} 4825}
4806 4826
4827
4828/* allocates state machine ids. */
4829static inline
4830void bnx2x_map_sb_state_machines(struct hc_index_data *index_data)
4831{
4832 /* zero out state machine indices */
4833 /* rx indices */
4834 index_data[HC_INDEX_ETH_RX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID;
4835
4836 /* tx indices */
4837 index_data[HC_INDEX_OOO_TX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID;
4838 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags &= ~HC_INDEX_DATA_SM_ID;
4839 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags &= ~HC_INDEX_DATA_SM_ID;
4840 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags &= ~HC_INDEX_DATA_SM_ID;
4841
4842 /* map indices */
4843 /* rx indices */
4844 index_data[HC_INDEX_ETH_RX_CQ_CONS].flags |=
4845 SM_RX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
4846
4847 /* tx indices */
4848 index_data[HC_INDEX_OOO_TX_CQ_CONS].flags |=
4849 SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
4850 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags |=
4851 SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
4852 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags |=
4853 SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
4854 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags |=
4855 SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
4856}
4857
4807static void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid, 4858static void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
4808 u8 vf_valid, int fw_sb_id, int igu_sb_id) 4859 u8 vf_valid, int fw_sb_id, int igu_sb_id)
4809{ 4860{
@@ -4835,6 +4886,7 @@ static void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
4835 hc_sm_p = sb_data_e2.common.state_machine; 4886 hc_sm_p = sb_data_e2.common.state_machine;
4836 sb_data_p = (u32 *)&sb_data_e2; 4887 sb_data_p = (u32 *)&sb_data_e2;
4837 data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32); 4888 data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
4889 bnx2x_map_sb_state_machines(sb_data_e2.index_data);
4838 } else { 4890 } else {
4839 memset(&sb_data_e1x, 0, 4891 memset(&sb_data_e1x, 0,
4840 sizeof(struct hc_status_block_data_e1x)); 4892 sizeof(struct hc_status_block_data_e1x));
@@ -4849,6 +4901,7 @@ static void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
4849 hc_sm_p = sb_data_e1x.common.state_machine; 4901 hc_sm_p = sb_data_e1x.common.state_machine;
4850 sb_data_p = (u32 *)&sb_data_e1x; 4902 sb_data_p = (u32 *)&sb_data_e1x;
4851 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32); 4903 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
4904 bnx2x_map_sb_state_machines(sb_data_e1x.index_data);
4852 } 4905 }
4853 4906
4854 bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID], 4907 bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID],
@@ -5798,7 +5851,7 @@ static int bnx2x_init_hw_common(struct bnx2x *bp)
5798 * take the UNDI lock to protect undi_unload flow from accessing 5851 * take the UNDI lock to protect undi_unload flow from accessing
5799 * registers while we're resetting the chip 5852 * registers while we're resetting the chip
5800 */ 5853 */
5801 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI); 5854 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
5802 5855
5803 bnx2x_reset_common(bp); 5856 bnx2x_reset_common(bp);
5804 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff); 5857 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
@@ -5810,7 +5863,7 @@ static int bnx2x_init_hw_common(struct bnx2x *bp)
5810 } 5863 }
5811 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, val); 5864 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, val);
5812 5865
5813 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI); 5866 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
5814 5867
5815 bnx2x_init_block(bp, BLOCK_MISC, PHASE_COMMON); 5868 bnx2x_init_block(bp, BLOCK_MISC, PHASE_COMMON);
5816 5869
@@ -6667,12 +6720,16 @@ static int bnx2x_init_hw_func(struct bnx2x *bp)
6667 if (CHIP_MODE_IS_4_PORT(bp)) 6720 if (CHIP_MODE_IS_4_PORT(bp))
6668 dsb_idx = BP_FUNC(bp); 6721 dsb_idx = BP_FUNC(bp);
6669 else 6722 else
6670 dsb_idx = BP_E1HVN(bp); 6723 dsb_idx = BP_VN(bp);
6671 6724
6672 prod_offset = (CHIP_INT_MODE_IS_BC(bp) ? 6725 prod_offset = (CHIP_INT_MODE_IS_BC(bp) ?
6673 IGU_BC_BASE_DSB_PROD + dsb_idx : 6726 IGU_BC_BASE_DSB_PROD + dsb_idx :
6674 IGU_NORM_BASE_DSB_PROD + dsb_idx); 6727 IGU_NORM_BASE_DSB_PROD + dsb_idx);
6675 6728
6729 /*
6730 * igu prods come in chunks of E1HVN_MAX (4) -
6731 * does not matters what is the current chip mode
6732 */
6676 for (i = 0; i < (num_segs * E1HVN_MAX); 6733 for (i = 0; i < (num_segs * E1HVN_MAX);
6677 i += E1HVN_MAX) { 6734 i += E1HVN_MAX) {
6678 addr = IGU_REG_PROD_CONS_MEMORY + 6735 addr = IGU_REG_PROD_CONS_MEMORY +
@@ -7566,7 +7623,7 @@ u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode)
7566 u32 val; 7623 u32 val;
7567 /* The mac address is written to entries 1-4 to 7624 /* The mac address is written to entries 1-4 to
7568 preserve entry 0 which is used by the PMF */ 7625 preserve entry 0 which is used by the PMF */
7569 u8 entry = (BP_E1HVN(bp) + 1)*8; 7626 u8 entry = (BP_VN(bp) + 1)*8;
7570 7627
7571 val = (mac_addr[0] << 8) | mac_addr[1]; 7628 val = (mac_addr[0] << 8) | mac_addr[1];
7572 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val); 7629 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
@@ -8542,10 +8599,12 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
8542 /* Check if there is any driver already loaded */ 8599 /* Check if there is any driver already loaded */
8543 val = REG_RD(bp, MISC_REG_UNPREPARED); 8600 val = REG_RD(bp, MISC_REG_UNPREPARED);
8544 if (val == 0x1) { 8601 if (val == 0x1) {
8545 /* Check if it is the UNDI driver 8602
8603 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
8604 /*
8605 * Check if it is the UNDI driver
8546 * UNDI driver initializes CID offset for normal bell to 0x7 8606 * UNDI driver initializes CID offset for normal bell to 0x7
8547 */ 8607 */
8548 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
8549 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST); 8608 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
8550 if (val == 0x7) { 8609 if (val == 0x7) {
8551 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; 8610 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
@@ -8583,9 +8642,6 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
8583 bnx2x_fw_command(bp, reset_code, 0); 8642 bnx2x_fw_command(bp, reset_code, 0);
8584 } 8643 }
8585 8644
8586 /* now it's safe to release the lock */
8587 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
8588
8589 bnx2x_undi_int_disable(bp); 8645 bnx2x_undi_int_disable(bp);
8590 port = BP_PORT(bp); 8646 port = BP_PORT(bp);
8591 8647
@@ -8635,8 +8691,10 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
8635 bp->fw_seq = 8691 bp->fw_seq =
8636 (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) & 8692 (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
8637 DRV_MSG_SEQ_NUMBER_MASK); 8693 DRV_MSG_SEQ_NUMBER_MASK);
8638 } else 8694 }
8639 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI); 8695
8696 /* now it's safe to release the lock */
8697 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
8640 } 8698 }
8641} 8699}
8642 8700
@@ -8773,13 +8831,13 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
8773static void __devinit bnx2x_get_igu_cam_info(struct bnx2x *bp) 8831static void __devinit bnx2x_get_igu_cam_info(struct bnx2x *bp)
8774{ 8832{
8775 int pfid = BP_FUNC(bp); 8833 int pfid = BP_FUNC(bp);
8776 int vn = BP_E1HVN(bp);
8777 int igu_sb_id; 8834 int igu_sb_id;
8778 u32 val; 8835 u32 val;
8779 u8 fid, igu_sb_cnt = 0; 8836 u8 fid, igu_sb_cnt = 0;
8780 8837
8781 bp->igu_base_sb = 0xff; 8838 bp->igu_base_sb = 0xff;
8782 if (CHIP_INT_MODE_IS_BC(bp)) { 8839 if (CHIP_INT_MODE_IS_BC(bp)) {
8840 int vn = BP_VN(bp);
8783 igu_sb_cnt = bp->igu_sb_cnt; 8841 igu_sb_cnt = bp->igu_sb_cnt;
8784 bp->igu_base_sb = (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn) * 8842 bp->igu_base_sb = (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn) *
8785 FP_SB_MAX_E1x; 8843 FP_SB_MAX_E1x;
@@ -9410,6 +9468,10 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
9410 bp->igu_base_sb = 0; 9468 bp->igu_base_sb = 0;
9411 } else { 9469 } else {
9412 bp->common.int_block = INT_BLOCK_IGU; 9470 bp->common.int_block = INT_BLOCK_IGU;
9471
9472 /* do not allow device reset during IGU info preocessing */
9473 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
9474
9413 val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION); 9475 val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION);
9414 9476
9415 if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) { 9477 if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
@@ -9441,6 +9503,7 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
9441 9503
9442 bnx2x_get_igu_cam_info(bp); 9504 bnx2x_get_igu_cam_info(bp);
9443 9505
9506 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
9444 } 9507 }
9445 9508
9446 /* 9509 /*
@@ -9467,7 +9530,7 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
9467 9530
9468 bp->mf_ov = 0; 9531 bp->mf_ov = 0;
9469 bp->mf_mode = 0; 9532 bp->mf_mode = 0;
9470 vn = BP_E1HVN(bp); 9533 vn = BP_VN(bp);
9471 9534
9472 if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) { 9535 if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) {
9473 BNX2X_DEV_INFO("shmem2base 0x%x, size %d, mfcfg offset %d\n", 9536 BNX2X_DEV_INFO("shmem2base 0x%x, size %d, mfcfg offset %d\n",
@@ -9587,13 +9650,6 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
9587 /* port info */ 9650 /* port info */
9588 bnx2x_get_port_hwinfo(bp); 9651 bnx2x_get_port_hwinfo(bp);
9589 9652
9590 if (!BP_NOMCP(bp)) {
9591 bp->fw_seq =
9592 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
9593 DRV_MSG_SEQ_NUMBER_MASK);
9594 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
9595 }
9596
9597 /* Get MAC addresses */ 9653 /* Get MAC addresses */
9598 bnx2x_get_mac_hwinfo(bp); 9654 bnx2x_get_mac_hwinfo(bp);
9599 9655
@@ -9759,6 +9815,14 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp)
9759 if (!BP_NOMCP(bp)) 9815 if (!BP_NOMCP(bp))
9760 bnx2x_undi_unload(bp); 9816 bnx2x_undi_unload(bp);
9761 9817
9818 /* init fw_seq after undi_unload! */
9819 if (!BP_NOMCP(bp)) {
9820 bp->fw_seq =
9821 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
9822 DRV_MSG_SEQ_NUMBER_MASK);
9823 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
9824 }
9825
9762 if (CHIP_REV_IS_FPGA(bp)) 9826 if (CHIP_REV_IS_FPGA(bp))
9763 dev_err(&bp->pdev->dev, "FPGA detected\n"); 9827 dev_err(&bp->pdev->dev, "FPGA detected\n");
9764 9828
@@ -10253,17 +10317,21 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
10253 /* clean indirect addresses */ 10317 /* clean indirect addresses */
10254 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, 10318 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
10255 PCICFG_VENDOR_ID_OFFSET); 10319 PCICFG_VENDOR_ID_OFFSET);
10256 /* Clean the following indirect addresses for all functions since it 10320 /*
10321 * Clean the following indirect addresses for all functions since it
10257 * is not used by the driver. 10322 * is not used by the driver.
10258 */ 10323 */
10259 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0, 0); 10324 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0, 0);
10260 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0, 0); 10325 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0, 0);
10261 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0, 0); 10326 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0, 0);
10262 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0, 0); 10327 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0, 0);
10263 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F1, 0); 10328
10264 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F1, 0); 10329 if (CHIP_IS_E1x(bp)) {
10265 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F1, 0); 10330 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F1, 0);
10266 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F1, 0); 10331 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F1, 0);
10332 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F1, 0);
10333 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F1, 0);
10334 }
10267 10335
10268 /* 10336 /*
10269 * Enable internal target-read (in case we are probed after PF FLR). 10337 * Enable internal target-read (in case we are probed after PF FLR).