aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/bnx2x_main.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/bnx2x_main.c')
-rw-r--r--drivers/net/bnx2x_main.c89
1 files changed, 46 insertions, 43 deletions
diff --git a/drivers/net/bnx2x_main.c b/drivers/net/bnx2x_main.c
index 88da14c141f4..3ca9c969a688 100644
--- a/drivers/net/bnx2x_main.c
+++ b/drivers/net/bnx2x_main.c
@@ -1377,7 +1377,7 @@ static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1377 u16 bd_prod, u16 rx_comp_prod, 1377 u16 bd_prod, u16 rx_comp_prod,
1378 u16 rx_sge_prod) 1378 u16 rx_sge_prod)
1379{ 1379{
1380 struct tstorm_eth_rx_producers rx_prods = {0}; 1380 struct ustorm_eth_rx_producers rx_prods = {0};
1381 int i; 1381 int i;
1382 1382
1383 /* Update producers */ 1383 /* Update producers */
@@ -1395,9 +1395,9 @@ static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1395 */ 1395 */
1396 wmb(); 1396 wmb();
1397 1397
1398 for (i = 0; i < sizeof(struct tstorm_eth_rx_producers)/4; i++) 1398 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1399 REG_WR(bp, BAR_TSTRORM_INTMEM + 1399 REG_WR(bp, BAR_USTRORM_INTMEM +
1400 TSTORM_RX_PRODS_OFFSET(BP_PORT(bp), FP_CL_ID(fp)) + i*4, 1400 USTORM_RX_PRODS_OFFSET(BP_PORT(bp), FP_CL_ID(fp)) + i*4,
1401 ((u32 *)&rx_prods)[i]); 1401 ((u32 *)&rx_prods)[i]);
1402 1402
1403 mmiowb(); /* keep prod updates ordered */ 1403 mmiowb(); /* keep prod updates ordered */
@@ -2915,7 +2915,7 @@ static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2915 return IRQ_HANDLED; 2915 return IRQ_HANDLED;
2916 } 2916 }
2917 2917
2918 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, 0, IGU_INT_DISABLE, 0); 2918 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
2919 2919
2920#ifdef BNX2X_STOP_ON_ERROR 2920#ifdef BNX2X_STOP_ON_ERROR
2921 if (unlikely(bp->panic)) 2921 if (unlikely(bp->panic))
@@ -3043,7 +3043,7 @@ static void bnx2x_storm_stats_post(struct bnx2x *bp)
3043 int rc; 3043 int rc;
3044 3044
3045 ramrod_data.drv_counter = bp->stats_counter++; 3045 ramrod_data.drv_counter = bp->stats_counter++;
3046 ramrod_data.collect_port_1b = bp->port.pmf ? 1 : 0; 3046 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
3047 ramrod_data.ctr_id_vector = (1 << BP_CL_ID(bp)); 3047 ramrod_data.ctr_id_vector = (1 << BP_CL_ID(bp));
3048 3048
3049 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0, 3049 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
@@ -4240,10 +4240,6 @@ static void bnx2x_update_coalesce(struct bnx2x *bp)
4240 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id, 4240 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4241 U_SB_ETH_RX_CQ_INDEX), 4241 U_SB_ETH_RX_CQ_INDEX),
4242 bp->rx_ticks ? 0 : 1); 4242 bp->rx_ticks ? 0 : 1);
4243 REG_WR16(bp, BAR_USTRORM_INTMEM +
4244 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4245 U_SB_ETH_RX_BD_INDEX),
4246 bp->rx_ticks ? 0 : 1);
4247 4243
4248 /* HC_INDEX_C_ETH_TX_CQ_CONS */ 4244 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4249 REG_WR8(bp, BAR_CSTRORM_INTMEM + 4245 REG_WR8(bp, BAR_CSTRORM_INTMEM +
@@ -4489,25 +4485,14 @@ static void bnx2x_init_context(struct bnx2x *bp)
4489 struct bnx2x_fastpath *fp = &bp->fp[i]; 4485 struct bnx2x_fastpath *fp = &bp->fp[i];
4490 u8 sb_id = FP_SB_ID(fp); 4486 u8 sb_id = FP_SB_ID(fp);
4491 4487
4492 context->xstorm_st_context.tx_bd_page_base_hi =
4493 U64_HI(fp->tx_desc_mapping);
4494 context->xstorm_st_context.tx_bd_page_base_lo =
4495 U64_LO(fp->tx_desc_mapping);
4496 context->xstorm_st_context.db_data_addr_hi =
4497 U64_HI(fp->tx_prods_mapping);
4498 context->xstorm_st_context.db_data_addr_lo =
4499 U64_LO(fp->tx_prods_mapping);
4500 context->xstorm_st_context.statistics_data = (BP_CL_ID(bp) |
4501 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
4502
4503 context->ustorm_st_context.common.sb_index_numbers = 4488 context->ustorm_st_context.common.sb_index_numbers =
4504 BNX2X_RX_SB_INDEX_NUM; 4489 BNX2X_RX_SB_INDEX_NUM;
4505 context->ustorm_st_context.common.clientId = FP_CL_ID(fp); 4490 context->ustorm_st_context.common.clientId = FP_CL_ID(fp);
4506 context->ustorm_st_context.common.status_block_id = sb_id; 4491 context->ustorm_st_context.common.status_block_id = sb_id;
4507 context->ustorm_st_context.common.flags = 4492 context->ustorm_st_context.common.flags =
4508 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT; 4493 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT;
4509 context->ustorm_st_context.common.mc_alignment_size = 4494 context->ustorm_st_context.common.mc_alignment_log_size =
4510 BCM_RX_ETH_PAYLOAD_ALIGN; 4495 6 /*BCM_RX_ETH_PAYLOAD_ALIGN*/;
4511 context->ustorm_st_context.common.bd_buff_size = 4496 context->ustorm_st_context.common.bd_buff_size =
4512 bp->rx_buf_size; 4497 bp->rx_buf_size;
4513 context->ustorm_st_context.common.bd_page_base_hi = 4498 context->ustorm_st_context.common.bd_page_base_hi =
@@ -4519,13 +4504,29 @@ static void bnx2x_init_context(struct bnx2x *bp)
4519 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA | 4504 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA |
4520 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING); 4505 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING);
4521 context->ustorm_st_context.common.sge_buff_size = 4506 context->ustorm_st_context.common.sge_buff_size =
4522 (u16)(BCM_PAGE_SIZE*PAGES_PER_SGE); 4507 (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE,
4508 (u32)0xffff);
4523 context->ustorm_st_context.common.sge_page_base_hi = 4509 context->ustorm_st_context.common.sge_page_base_hi =
4524 U64_HI(fp->rx_sge_mapping); 4510 U64_HI(fp->rx_sge_mapping);
4525 context->ustorm_st_context.common.sge_page_base_lo = 4511 context->ustorm_st_context.common.sge_page_base_lo =
4526 U64_LO(fp->rx_sge_mapping); 4512 U64_LO(fp->rx_sge_mapping);
4527 } 4513 }
4528 4514
4515 context->ustorm_ag_context.cdu_usage =
4516 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4517 CDU_REGION_NUMBER_UCM_AG,
4518 ETH_CONNECTION_TYPE);
4519
4520 context->xstorm_st_context.tx_bd_page_base_hi =
4521 U64_HI(fp->tx_desc_mapping);
4522 context->xstorm_st_context.tx_bd_page_base_lo =
4523 U64_LO(fp->tx_desc_mapping);
4524 context->xstorm_st_context.db_data_addr_hi =
4525 U64_HI(fp->tx_prods_mapping);
4526 context->xstorm_st_context.db_data_addr_lo =
4527 U64_LO(fp->tx_prods_mapping);
4528 context->xstorm_st_context.statistics_data = (fp->cl_id |
4529 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
4529 context->cstorm_st_context.sb_index_number = 4530 context->cstorm_st_context.sb_index_number =
4530 C_SB_ETH_TX_CQ_INDEX; 4531 C_SB_ETH_TX_CQ_INDEX;
4531 context->cstorm_st_context.status_block_id = sb_id; 4532 context->cstorm_st_context.status_block_id = sb_id;
@@ -4534,10 +4535,6 @@ static void bnx2x_init_context(struct bnx2x *bp)
4534 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i), 4535 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4535 CDU_REGION_NUMBER_XCM_AG, 4536 CDU_REGION_NUMBER_XCM_AG,
4536 ETH_CONNECTION_TYPE); 4537 ETH_CONNECTION_TYPE);
4537 context->ustorm_ag_context.cdu_usage =
4538 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4539 CDU_REGION_NUMBER_UCM_AG,
4540 ETH_CONNECTION_TYPE);
4541 } 4538 }
4542} 4539}
4543 4540
@@ -4569,7 +4566,7 @@ static void bnx2x_set_client_config(struct bnx2x *bp)
4569#ifdef BCM_VLAN 4566#ifdef BCM_VLAN
4570 if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) { 4567 if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
4571 tstorm_client.config_flags |= 4568 tstorm_client.config_flags |=
4572 TSTORM_ETH_CLIENT_CONFIG_VLAN_REMOVAL_ENABLE; 4569 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
4573 DP(NETIF_MSG_IFUP, "vlan removal enabled\n"); 4570 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
4574 } 4571 }
4575#endif 4572#endif
@@ -4690,6 +4687,9 @@ static void bnx2x_init_internal_func(struct bnx2x *bp)
4690 tstorm_config.config_flags = MULTI_FLAGS; 4687 tstorm_config.config_flags = MULTI_FLAGS;
4691 tstorm_config.rss_result_mask = MULTI_MASK; 4688 tstorm_config.rss_result_mask = MULTI_MASK;
4692 } 4689 }
4690 if (IS_E1HMF(bp))
4691 tstorm_config.config_flags |=
4692 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
4693 4693
4694 tstorm_config.leading_client_id = BP_L_ID(bp); 4694 tstorm_config.leading_client_id = BP_L_ID(bp);
4695 4695
@@ -5338,8 +5338,7 @@ static int bnx2x_init_common(struct bnx2x *bp)
5338 REG_WR(bp, i, 0xc0cac01a); 5338 REG_WR(bp, i, 0xc0cac01a);
5339 /* TODO: replace with something meaningful */ 5339 /* TODO: replace with something meaningful */
5340 } 5340 }
5341 if (CHIP_IS_E1H(bp)) 5341 bnx2x_init_block(bp, SRCH_COMMON_START, SRCH_COMMON_END);
5342 bnx2x_init_block(bp, SRCH_COMMON_START, SRCH_COMMON_END);
5343 REG_WR(bp, SRC_REG_SOFT_RST, 0); 5342 REG_WR(bp, SRC_REG_SOFT_RST, 0);
5344 5343
5345 if (sizeof(union cdu_context) != 1024) 5344 if (sizeof(union cdu_context) != 1024)
@@ -5358,6 +5357,11 @@ static int bnx2x_init_common(struct bnx2x *bp)
5358 5357
5359 bnx2x_init_block(bp, CFC_COMMON_START, CFC_COMMON_END); 5358 bnx2x_init_block(bp, CFC_COMMON_START, CFC_COMMON_END);
5360 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF); 5359 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
5360 /* enable context validation interrupt from CFC */
5361 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5362
5363 /* set the thresholds to prevent CFC/CDU race */
5364 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
5361 5365
5362 bnx2x_init_block(bp, HC_COMMON_START, HC_COMMON_END); 5366 bnx2x_init_block(bp, HC_COMMON_START, HC_COMMON_END);
5363 bnx2x_init_block(bp, MISC_AEU_COMMON_START, MISC_AEU_COMMON_END); 5367 bnx2x_init_block(bp, MISC_AEU_COMMON_START, MISC_AEU_COMMON_END);
@@ -5485,6 +5489,8 @@ static int bnx2x_init_port(struct bnx2x *bp)
5485 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i)); 5489 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
5486#endif 5490#endif
5487 /* Port CMs come here */ 5491 /* Port CMs come here */
5492 bnx2x_init_block(bp, (port ? XCM_PORT1_START : XCM_PORT0_START),
5493 (port ? XCM_PORT1_END : XCM_PORT0_END));
5488 5494
5489 /* Port QM comes here */ 5495 /* Port QM comes here */
5490#ifdef BCM_ISCSI 5496#ifdef BCM_ISCSI
@@ -5675,9 +5681,6 @@ static int bnx2x_init_func(struct bnx2x *bp)
5675 } 5681 }
5676 bnx2x_init_block(bp, hc_limits[func][0], hc_limits[func][1]); 5682 bnx2x_init_block(bp, hc_limits[func][0], hc_limits[func][1]);
5677 5683
5678 if (CHIP_IS_E1H(bp))
5679 REG_WR(bp, HC_REG_FUNC_NUM_P0 + port*4, func);
5680
5681 /* Reset PCIE errors for debug */ 5684 /* Reset PCIE errors for debug */
5682 REG_WR(bp, 0x2114, 0xffffffff); 5685 REG_WR(bp, 0x2114, 0xffffffff);
5683 REG_WR(bp, 0x2120, 0xffffffff); 5686 REG_WR(bp, 0x2120, 0xffffffff);
@@ -6171,7 +6174,7 @@ static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set)
6171 * unicasts 0-31:port0 32-63:port1 6174 * unicasts 0-31:port0 32-63:port1
6172 * multicast 64-127:port0 128-191:port1 6175 * multicast 64-127:port0 128-191:port1
6173 */ 6176 */
6174 config->hdr.length_6b = 2; 6177 config->hdr.length = 2;
6175 config->hdr.offset = port ? 32 : 0; 6178 config->hdr.offset = port ? 32 : 0;
6176 config->hdr.client_id = BP_CL_ID(bp); 6179 config->hdr.client_id = BP_CL_ID(bp);
6177 config->hdr.reserved1 = 0; 6180 config->hdr.reserved1 = 0;
@@ -6229,7 +6232,7 @@ static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set)
6229 * unicasts: by func number 6232 * unicasts: by func number
6230 * multicast: 20+FUNC*20, 20 each 6233 * multicast: 20+FUNC*20, 20 each
6231 */ 6234 */
6232 config->hdr.length_6b = 1; 6235 config->hdr.length = 1;
6233 config->hdr.offset = BP_FUNC(bp); 6236 config->hdr.offset = BP_FUNC(bp);
6234 config->hdr.client_id = BP_CL_ID(bp); 6237 config->hdr.client_id = BP_CL_ID(bp);
6235 config->hdr.reserved1 = 0; 6238 config->hdr.reserved1 = 0;
@@ -6764,10 +6767,10 @@ static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
6764 6767
6765 bnx2x_set_mac_addr_e1(bp, 0); 6768 bnx2x_set_mac_addr_e1(bp, 0);
6766 6769
6767 for (i = 0; i < config->hdr.length_6b; i++) 6770 for (i = 0; i < config->hdr.length; i++)
6768 CAM_INVALIDATE(config->config_table[i]); 6771 CAM_INVALIDATE(config->config_table[i]);
6769 6772
6770 config->hdr.length_6b = i; 6773 config->hdr.length = i;
6771 if (CHIP_REV_IS_SLOW(bp)) 6774 if (CHIP_REV_IS_SLOW(bp))
6772 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port); 6775 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
6773 else 6776 else
@@ -8959,7 +8962,7 @@ static int bnx2x_test_intr(struct bnx2x *bp)
8959 if (!netif_running(bp->dev)) 8962 if (!netif_running(bp->dev))
8960 return -ENODEV; 8963 return -ENODEV;
8961 8964
8962 config->hdr.length_6b = 0; 8965 config->hdr.length = 0;
8963 if (CHIP_IS_E1(bp)) 8966 if (CHIP_IS_E1(bp))
8964 config->hdr.offset = (BP_PORT(bp) ? 32 : 0); 8967 config->hdr.offset = (BP_PORT(bp) ? 32 : 0);
8965 else 8968 else
@@ -9921,7 +9924,7 @@ static void bnx2x_set_rx_mode(struct net_device *dev)
9921 config->config_table[i]. 9924 config->config_table[i].
9922 cam_entry.lsb_mac_addr); 9925 cam_entry.lsb_mac_addr);
9923 } 9926 }
9924 old = config->hdr.length_6b; 9927 old = config->hdr.length;
9925 if (old > i) { 9928 if (old > i) {
9926 for (; i < old; i++) { 9929 for (; i < old; i++) {
9927 if (CAM_IS_INVALID(config-> 9930 if (CAM_IS_INVALID(config->
@@ -9940,9 +9943,9 @@ static void bnx2x_set_rx_mode(struct net_device *dev)
9940 else 9943 else
9941 offset = BNX2X_MAX_MULTICAST*(1 + port); 9944 offset = BNX2X_MAX_MULTICAST*(1 + port);
9942 9945
9943 config->hdr.length_6b = i; 9946 config->hdr.length = i;
9944 config->hdr.offset = offset; 9947 config->hdr.offset = offset;
9945 config->hdr.client_id = BP_CL_ID(bp); 9948 config->hdr.client_id = bp->fp->cl_id;
9946 config->hdr.reserved1 = 0; 9949 config->hdr.reserved1 = 0;
9947 9950
9948 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0, 9951 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
@@ -10487,7 +10490,7 @@ static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
10487 struct mac_configuration_cmd *config = 10490 struct mac_configuration_cmd *config =
10488 bnx2x_sp(bp, mcast_config); 10491 bnx2x_sp(bp, mcast_config);
10489 10492
10490 for (i = 0; i < config->hdr.length_6b; i++) 10493 for (i = 0; i < config->hdr.length; i++)
10491 CAM_INVALIDATE(config->config_table[i]); 10494 CAM_INVALIDATE(config->config_table[i]);
10492 } 10495 }
10493 10496