aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorVladislav Zolotarov <vladz@broadcom.com>2011-02-06 14:25:41 -0500
committerDavid S. Miller <davem@davemloft.net>2011-02-06 14:25:41 -0500
commit6e30dd4e3935ddb4e7dd27d5be7a6e5504e64a27 (patch)
tree9ce171d9f20bec820fe1ca0ae565ffb474e8214f
parenta8c94b9188bf6012d9b6c3d37f324bd6c7d2924e (diff)
bnx2x: Proper netdev->ndo_set_rx_mode() implementation.
Completed the bnx2x_set_rx_mode() to a proper netdev->ndo_set_rx_mode implementation: - Added a missing configuration of a unicast MAC addresses list. - Changed bp->dma_lock from being a mutex to a spinlock as long as it's taken under netdev->addr_list_lock now. Signed-off-by: Vladislav Zolotarov <vladz@broadcom.com> Signed-off-by: Eilon Greenstein <eilong@broadcom.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/bnx2x/bnx2x.h13
-rw-r--r--drivers/net/bnx2x/bnx2x_cmn.c17
-rw-r--r--drivers/net/bnx2x/bnx2x_main.c444
3 files changed, 369 insertions, 105 deletions
diff --git a/drivers/net/bnx2x/bnx2x.h b/drivers/net/bnx2x/bnx2x.h
index c29b37e5e743..236d79a80624 100644
--- a/drivers/net/bnx2x/bnx2x.h
+++ b/drivers/net/bnx2x/bnx2x.h
@@ -129,6 +129,7 @@ void bnx2x_panic_dump(struct bnx2x *bp);
129#endif 129#endif
130 130
131#define bnx2x_mc_addr(ha) ((ha)->addr) 131#define bnx2x_mc_addr(ha) ((ha)->addr)
132#define bnx2x_uc_addr(ha) ((ha)->addr)
132 133
133#define U64_LO(x) (u32)(((u64)(x)) & 0xffffffff) 134#define U64_LO(x) (u32)(((u64)(x)) & 0xffffffff)
134#define U64_HI(x) (u32)(((u64)(x)) >> 32) 135#define U64_HI(x) (u32)(((u64)(x)) >> 32)
@@ -816,6 +817,7 @@ struct bnx2x_slowpath {
816 struct eth_stats_query fw_stats; 817 struct eth_stats_query fw_stats;
817 struct mac_configuration_cmd mac_config; 818 struct mac_configuration_cmd mac_config;
818 struct mac_configuration_cmd mcast_config; 819 struct mac_configuration_cmd mcast_config;
820 struct mac_configuration_cmd uc_mac_config;
819 struct client_init_ramrod_data client_init_data; 821 struct client_init_ramrod_data client_init_data;
820 822
821 /* used by dmae command executer */ 823 /* used by dmae command executer */
@@ -944,7 +946,7 @@ struct bnx2x {
944 struct eth_spe *spq_prod_bd; 946 struct eth_spe *spq_prod_bd;
945 struct eth_spe *spq_last_bd; 947 struct eth_spe *spq_last_bd;
946 __le16 *dsb_sp_prod; 948 __le16 *dsb_sp_prod;
947 atomic_t spq_left; /* serialize spq */ 949 atomic_t cq_spq_left; /* ETH_XXX ramrods credit */
948 /* used to synchronize spq accesses */ 950 /* used to synchronize spq accesses */
949 spinlock_t spq_lock; 951 spinlock_t spq_lock;
950 952
@@ -954,6 +956,7 @@ struct bnx2x {
954 u16 eq_prod; 956 u16 eq_prod;
955 u16 eq_cons; 957 u16 eq_cons;
956 __le16 *eq_cons_sb; 958 __le16 *eq_cons_sb;
959 atomic_t eq_spq_left; /* COMMON_XXX ramrods credit */
957 960
958 /* Flags for marking that there is a STAT_QUERY or 961 /* Flags for marking that there is a STAT_QUERY or
959 SET_MAC ramrod pending */ 962 SET_MAC ramrod pending */
@@ -1139,7 +1142,7 @@ struct bnx2x {
1139 1142
1140 int dmae_ready; 1143 int dmae_ready;
1141 /* used to synchronize dmae accesses */ 1144 /* used to synchronize dmae accesses */
1142 struct mutex dmae_mutex; 1145 spinlock_t dmae_lock;
1143 1146
1144 /* used to protect the FW mail box */ 1147 /* used to protect the FW mail box */
1145 struct mutex fw_mb_mutex; 1148 struct mutex fw_mb_mutex;
@@ -1455,6 +1458,12 @@ u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param);
1455void bnx2x_calc_fc_adv(struct bnx2x *bp); 1458void bnx2x_calc_fc_adv(struct bnx2x *bp);
1456int bnx2x_sp_post(struct bnx2x *bp, int command, int cid, 1459int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
1457 u32 data_hi, u32 data_lo, int common); 1460 u32 data_hi, u32 data_lo, int common);
1461
1462/* Clears multicast and unicast list configuration in the chip. */
1463void bnx2x_invalidate_e1_mc_list(struct bnx2x *bp);
1464void bnx2x_invalidate_e1h_mc_list(struct bnx2x *bp);
1465void bnx2x_invalidate_uc_list(struct bnx2x *bp);
1466
1458void bnx2x_update_coalesce(struct bnx2x *bp); 1467void bnx2x_update_coalesce(struct bnx2x *bp);
1459int bnx2x_get_link_cfg_idx(struct bnx2x *bp); 1468int bnx2x_get_link_cfg_idx(struct bnx2x *bp);
1460 1469
diff --git a/drivers/net/bnx2x/bnx2x_cmn.c b/drivers/net/bnx2x/bnx2x_cmn.c
index 844afcec79b4..6fac8e183c59 100644
--- a/drivers/net/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/bnx2x/bnx2x_cmn.c
@@ -1452,28 +1452,35 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
1452 1452
1453 bnx2x_set_eth_mac(bp, 1); 1453 bnx2x_set_eth_mac(bp, 1);
1454 1454
1455 /* Clear MC configuration */
1456 if (CHIP_IS_E1(bp))
1457 bnx2x_invalidate_e1_mc_list(bp);
1458 else
1459 bnx2x_invalidate_e1h_mc_list(bp);
1460
1461 /* Clear UC lists configuration */
1462 bnx2x_invalidate_uc_list(bp);
1463
1455 if (bp->port.pmf) 1464 if (bp->port.pmf)
1456 bnx2x_initial_phy_init(bp, load_mode); 1465 bnx2x_initial_phy_init(bp, load_mode);
1457 1466
1467 /* Initialize Rx filtering */
1468 bnx2x_set_rx_mode(bp->dev);
1469
1458 /* Start fast path */ 1470 /* Start fast path */
1459 switch (load_mode) { 1471 switch (load_mode) {
1460 case LOAD_NORMAL: 1472 case LOAD_NORMAL:
1461 /* Tx queue should be only reenabled */ 1473 /* Tx queue should be only reenabled */
1462 netif_tx_wake_all_queues(bp->dev); 1474 netif_tx_wake_all_queues(bp->dev);
1463 /* Initialize the receive filter. */ 1475 /* Initialize the receive filter. */
1464 bnx2x_set_rx_mode(bp->dev);
1465 break; 1476 break;
1466 1477
1467 case LOAD_OPEN: 1478 case LOAD_OPEN:
1468 netif_tx_start_all_queues(bp->dev); 1479 netif_tx_start_all_queues(bp->dev);
1469 smp_mb__after_clear_bit(); 1480 smp_mb__after_clear_bit();
1470 /* Initialize the receive filter. */
1471 bnx2x_set_rx_mode(bp->dev);
1472 break; 1481 break;
1473 1482
1474 case LOAD_DIAG: 1483 case LOAD_DIAG:
1475 /* Initialize the receive filter. */
1476 bnx2x_set_rx_mode(bp->dev);
1477 bp->state = BNX2X_STATE_DIAG; 1484 bp->state = BNX2X_STATE_DIAG;
1478 break; 1485 break;
1479 1486
diff --git a/drivers/net/bnx2x/bnx2x_main.c b/drivers/net/bnx2x/bnx2x_main.c
index 722450631302..ccf2c8c61a61 100644
--- a/drivers/net/bnx2x/bnx2x_main.c
+++ b/drivers/net/bnx2x/bnx2x_main.c
@@ -586,7 +586,7 @@ static int bnx2x_issue_dmae_with_comp(struct bnx2x *bp,
586 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]); 586 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
587 587
588 /* lock the dmae channel */ 588 /* lock the dmae channel */
589 mutex_lock(&bp->dmae_mutex); 589 spin_lock_bh(&bp->dmae_lock);
590 590
591 /* reset completion */ 591 /* reset completion */
592 *wb_comp = 0; 592 *wb_comp = 0;
@@ -617,7 +617,7 @@ static int bnx2x_issue_dmae_with_comp(struct bnx2x *bp,
617 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]); 617 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
618 618
619unlock: 619unlock:
620 mutex_unlock(&bp->dmae_mutex); 620 spin_unlock_bh(&bp->dmae_lock);
621 return rc; 621 return rc;
622} 622}
623 623
@@ -1397,7 +1397,7 @@ void bnx2x_sp_event(struct bnx2x_fastpath *fp,
1397 } 1397 }
1398 1398
1399 smp_mb__before_atomic_inc(); 1399 smp_mb__before_atomic_inc();
1400 atomic_inc(&bp->spq_left); 1400 atomic_inc(&bp->cq_spq_left);
1401 /* push the change in fp->state and towards the memory */ 1401 /* push the change in fp->state and towards the memory */
1402 smp_wmb(); 1402 smp_wmb();
1403 1403
@@ -2732,11 +2732,18 @@ int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2732 2732
2733 spin_lock_bh(&bp->spq_lock); 2733 spin_lock_bh(&bp->spq_lock);
2734 2734
2735 if (!atomic_read(&bp->spq_left)) { 2735 if (common) {
2736 BNX2X_ERR("BUG! SPQ ring full!\n"); 2736 if (!atomic_read(&bp->eq_spq_left)) {
2737 spin_unlock_bh(&bp->spq_lock); 2737 BNX2X_ERR("BUG! EQ ring full!\n");
2738 bnx2x_panic(); 2738 spin_unlock_bh(&bp->spq_lock);
2739 return -EBUSY; 2739 bnx2x_panic();
2740 return -EBUSY;
2741 }
2742 } else if (!atomic_read(&bp->cq_spq_left)) {
2743 BNX2X_ERR("BUG! SPQ ring full!\n");
2744 spin_unlock_bh(&bp->spq_lock);
2745 bnx2x_panic();
2746 return -EBUSY;
2740 } 2747 }
2741 2748
2742 spe = bnx2x_sp_get_next(bp); 2749 spe = bnx2x_sp_get_next(bp);
@@ -2767,20 +2774,26 @@ int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2767 spe->data.update_data_addr.lo = cpu_to_le32(data_lo); 2774 spe->data.update_data_addr.lo = cpu_to_le32(data_lo);
2768 2775
2769 /* stats ramrod has it's own slot on the spq */ 2776 /* stats ramrod has it's own slot on the spq */
2770 if (command != RAMROD_CMD_ID_COMMON_STAT_QUERY) 2777 if (command != RAMROD_CMD_ID_COMMON_STAT_QUERY) {
2771 /* It's ok if the actual decrement is issued towards the memory 2778 /* It's ok if the actual decrement is issued towards the memory
2772 * somewhere between the spin_lock and spin_unlock. Thus no 2779 * somewhere between the spin_lock and spin_unlock. Thus no
2773 * more explict memory barrier is needed. 2780 * more explict memory barrier is needed.
2774 */ 2781 */
2775 atomic_dec(&bp->spq_left); 2782 if (common)
2783 atomic_dec(&bp->eq_spq_left);
2784 else
2785 atomic_dec(&bp->cq_spq_left);
2786 }
2787
2776 2788
2777 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/, 2789 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2778 "SPQE[%x] (%x:%x) command %d hw_cid %x data (%x:%x) " 2790 "SPQE[%x] (%x:%x) command %d hw_cid %x data (%x:%x) "
2779 "type(0x%x) left %x\n", 2791 "type(0x%x) left (ETH, COMMON) (%x,%x)\n",
2780 bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping), 2792 bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
2781 (u32)(U64_LO(bp->spq_mapping) + 2793 (u32)(U64_LO(bp->spq_mapping) +
2782 (void *)bp->spq_prod_bd - (void *)bp->spq), command, 2794 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2783 HW_CID(bp, cid), data_hi, data_lo, type, atomic_read(&bp->spq_left)); 2795 HW_CID(bp, cid), data_hi, data_lo, type,
2796 atomic_read(&bp->cq_spq_left), atomic_read(&bp->eq_spq_left));
2784 2797
2785 bnx2x_sp_prod_update(bp); 2798 bnx2x_sp_prod_update(bp);
2786 spin_unlock_bh(&bp->spq_lock); 2799 spin_unlock_bh(&bp->spq_lock);
@@ -3692,8 +3705,8 @@ static void bnx2x_eq_int(struct bnx2x *bp)
3692 sw_cons = bp->eq_cons; 3705 sw_cons = bp->eq_cons;
3693 sw_prod = bp->eq_prod; 3706 sw_prod = bp->eq_prod;
3694 3707
3695 DP(BNX2X_MSG_SP, "EQ: hw_cons %u sw_cons %u bp->spq_left %u\n", 3708 DP(BNX2X_MSG_SP, "EQ: hw_cons %u sw_cons %u bp->cq_spq_left %u\n",
3696 hw_cons, sw_cons, atomic_read(&bp->spq_left)); 3709 hw_cons, sw_cons, atomic_read(&bp->eq_spq_left));
3697 3710
3698 for (; sw_cons != hw_cons; 3711 for (; sw_cons != hw_cons;
3699 sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) { 3712 sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) {
@@ -3758,13 +3771,15 @@ static void bnx2x_eq_int(struct bnx2x *bp)
3758 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_OPEN): 3771 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_OPEN):
3759 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_DIAG): 3772 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_DIAG):
3760 DP(NETIF_MSG_IFUP, "got set mac ramrod\n"); 3773 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
3761 bp->set_mac_pending = 0; 3774 if (elem->message.data.set_mac_event.echo)
3775 bp->set_mac_pending = 0;
3762 break; 3776 break;
3763 3777
3764 case (EVENT_RING_OPCODE_SET_MAC | 3778 case (EVENT_RING_OPCODE_SET_MAC |
3765 BNX2X_STATE_CLOSING_WAIT4_HALT): 3779 BNX2X_STATE_CLOSING_WAIT4_HALT):
3766 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n"); 3780 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
3767 bp->set_mac_pending = 0; 3781 if (elem->message.data.set_mac_event.echo)
3782 bp->set_mac_pending = 0;
3768 break; 3783 break;
3769 default: 3784 default:
3770 /* unknown event log error and continue */ 3785 /* unknown event log error and continue */
@@ -3776,7 +3791,7 @@ next_spqe:
3776 } /* for */ 3791 } /* for */
3777 3792
3778 smp_mb__before_atomic_inc(); 3793 smp_mb__before_atomic_inc();
3779 atomic_add(spqe_cnt, &bp->spq_left); 3794 atomic_add(spqe_cnt, &bp->eq_spq_left);
3780 3795
3781 bp->eq_cons = sw_cons; 3796 bp->eq_cons = sw_cons;
3782 bp->eq_prod = sw_prod; 3797 bp->eq_prod = sw_prod;
@@ -4209,7 +4224,7 @@ void bnx2x_update_coalesce(struct bnx2x *bp)
4209static void bnx2x_init_sp_ring(struct bnx2x *bp) 4224static void bnx2x_init_sp_ring(struct bnx2x *bp)
4210{ 4225{
4211 spin_lock_init(&bp->spq_lock); 4226 spin_lock_init(&bp->spq_lock);
4212 atomic_set(&bp->spq_left, MAX_SPQ_PENDING); 4227 atomic_set(&bp->cq_spq_left, MAX_SPQ_PENDING);
4213 4228
4214 bp->spq_prod_idx = 0; 4229 bp->spq_prod_idx = 0;
4215 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX; 4230 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
@@ -4234,6 +4249,9 @@ static void bnx2x_init_eq_ring(struct bnx2x *bp)
4234 bp->eq_cons = 0; 4249 bp->eq_cons = 0;
4235 bp->eq_prod = NUM_EQ_DESC; 4250 bp->eq_prod = NUM_EQ_DESC;
4236 bp->eq_cons_sb = BNX2X_EQ_INDEX; 4251 bp->eq_cons_sb = BNX2X_EQ_INDEX;
4252 /* we want a warning message before it gets rought... */
4253 atomic_set(&bp->eq_spq_left,
4254 min_t(int, MAX_SP_DESC_CNT - MAX_SPQ_PENDING, NUM_EQ_DESC) - 1);
4237} 4255}
4238 4256
4239static void bnx2x_init_ind_table(struct bnx2x *bp) 4257static void bnx2x_init_ind_table(struct bnx2x *bp)
@@ -5832,7 +5850,7 @@ int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
5832 BP_ABS_FUNC(bp), load_code); 5850 BP_ABS_FUNC(bp), load_code);
5833 5851
5834 bp->dmae_ready = 0; 5852 bp->dmae_ready = 0;
5835 mutex_init(&bp->dmae_mutex); 5853 spin_lock_init(&bp->dmae_lock);
5836 rc = bnx2x_gunzip_init(bp); 5854 rc = bnx2x_gunzip_init(bp);
5837 if (rc) 5855 if (rc)
5838 return rc; 5856 return rc;
@@ -6167,12 +6185,14 @@ static void bnx2x_set_mac_addr_gen(struct bnx2x *bp, int set, const u8 *mac,
6167 int ramrod_flags = WAIT_RAMROD_COMMON; 6185 int ramrod_flags = WAIT_RAMROD_COMMON;
6168 6186
6169 bp->set_mac_pending = 1; 6187 bp->set_mac_pending = 1;
6170 smp_wmb();
6171 6188
6172 config->hdr.length = 1; 6189 config->hdr.length = 1;
6173 config->hdr.offset = cam_offset; 6190 config->hdr.offset = cam_offset;
6174 config->hdr.client_id = 0xff; 6191 config->hdr.client_id = 0xff;
6175 config->hdr.reserved1 = 0; 6192 /* Mark the single MAC configuration ramrod as opposed to a
6193 * UC/MC list configuration).
6194 */
6195 config->hdr.echo = 1;
6176 6196
6177 /* primary MAC */ 6197 /* primary MAC */
6178 config->config_table[0].msb_mac_addr = 6198 config->config_table[0].msb_mac_addr =
@@ -6204,6 +6224,8 @@ static void bnx2x_set_mac_addr_gen(struct bnx2x *bp, int set, const u8 *mac,
6204 config->config_table[0].middle_mac_addr, 6224 config->config_table[0].middle_mac_addr,
6205 config->config_table[0].lsb_mac_addr, BP_FUNC(bp), cl_bit_vec); 6225 config->config_table[0].lsb_mac_addr, BP_FUNC(bp), cl_bit_vec);
6206 6226
6227 mb();
6228
6207 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0, 6229 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
6208 U64_HI(bnx2x_sp_mapping(bp, mac_config)), 6230 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6209 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 1); 6231 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 1);
@@ -6268,20 +6290,15 @@ static u8 bnx2x_e1h_cam_offset(struct bnx2x *bp, u8 rel_offset)
6268 if (CHIP_IS_E1H(bp)) 6290 if (CHIP_IS_E1H(bp))
6269 return E1H_FUNC_MAX * rel_offset + BP_FUNC(bp); 6291 return E1H_FUNC_MAX * rel_offset + BP_FUNC(bp);
6270 else if (CHIP_MODE_IS_4_PORT(bp)) 6292 else if (CHIP_MODE_IS_4_PORT(bp))
6271 return BP_FUNC(bp) * 32 + rel_offset; 6293 return E2_FUNC_MAX * rel_offset + BP_FUNC(bp);
6272 else 6294 else
6273 return BP_VN(bp) * 32 + rel_offset; 6295 return E2_FUNC_MAX * rel_offset + BP_VN(bp);
6274} 6296}
6275 6297
6276/** 6298/**
6277 * LLH CAM line allocations: currently only iSCSI and ETH macs are 6299 * LLH CAM line allocations: currently only iSCSI and ETH macs are
6278 * relevant. In addition, current implementation is tuned for a 6300 * relevant. In addition, current implementation is tuned for a
6279 * single ETH MAC. 6301 * single ETH MAC.
6280 *
6281 * When multiple unicast ETH MACs PF configuration in switch
6282 * independent mode is required (NetQ, multiple netdev MACs,
6283 * etc.), consider better utilisation of 16 per function MAC
6284 * entries in the LLH memory.
6285 */ 6302 */
6286enum { 6303enum {
6287 LLH_CAM_ISCSI_ETH_LINE = 0, 6304 LLH_CAM_ISCSI_ETH_LINE = 0,
@@ -6356,14 +6373,37 @@ void bnx2x_set_eth_mac(struct bnx2x *bp, int set)
6356 bnx2x_set_mac_addr_gen(bp, set, bcast, 0, cam_offset + 1, 1); 6373 bnx2x_set_mac_addr_gen(bp, set, bcast, 0, cam_offset + 1, 1);
6357 } 6374 }
6358} 6375}
6359static void bnx2x_set_e1_mc_list(struct bnx2x *bp, u8 offset) 6376
6377static inline u8 bnx2x_e1_cam_mc_offset(struct bnx2x *bp)
6378{
6379 return CHIP_REV_IS_SLOW(bp) ?
6380 (BNX2X_MAX_EMUL_MULTI * (1 + BP_PORT(bp))) :
6381 (BNX2X_MAX_MULTICAST * (1 + BP_PORT(bp)));
6382}
6383
6384/* set mc list, do not wait as wait implies sleep and
6385 * set_rx_mode can be invoked from non-sleepable context.
6386 *
6387 * Instead we use the same ramrod data buffer each time we need
6388 * to configure a list of addresses, and use the fact that the
6389 * list of MACs is changed in an incremental way and that the
6390 * function is called under the netif_addr_lock. A temporary
6391 * inconsistent CAM configuration (possible in case of a very fast
6392 * sequence of add/del/add on the host side) will shortly be
6393 * restored by the handler of the last ramrod.
6394 */
6395static int bnx2x_set_e1_mc_list(struct bnx2x *bp)
6360{ 6396{
6361 int i = 0, old; 6397 int i = 0, old;
6362 struct net_device *dev = bp->dev; 6398 struct net_device *dev = bp->dev;
6399 u8 offset = bnx2x_e1_cam_mc_offset(bp);
6363 struct netdev_hw_addr *ha; 6400 struct netdev_hw_addr *ha;
6364 struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, mcast_config); 6401 struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, mcast_config);
6365 dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, mcast_config); 6402 dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, mcast_config);
6366 6403
6404 if (netdev_mc_count(dev) > BNX2X_MAX_MULTICAST)
6405 return -EINVAL;
6406
6367 netdev_for_each_mc_addr(ha, dev) { 6407 netdev_for_each_mc_addr(ha, dev) {
6368 /* copy mac */ 6408 /* copy mac */
6369 config_cmd->config_table[i].msb_mac_addr = 6409 config_cmd->config_table[i].msb_mac_addr =
@@ -6404,32 +6444,47 @@ static void bnx2x_set_e1_mc_list(struct bnx2x *bp, u8 offset)
6404 } 6444 }
6405 } 6445 }
6406 6446
6447 wmb();
6448
6407 config_cmd->hdr.length = i; 6449 config_cmd->hdr.length = i;
6408 config_cmd->hdr.offset = offset; 6450 config_cmd->hdr.offset = offset;
6409 config_cmd->hdr.client_id = 0xff; 6451 config_cmd->hdr.client_id = 0xff;
6410 config_cmd->hdr.reserved1 = 0; 6452 /* Mark that this ramrod doesn't use bp->set_mac_pending for
6453 * synchronization.
6454 */
6455 config_cmd->hdr.echo = 0;
6411 6456
6412 bp->set_mac_pending = 1; 6457 mb();
6413 smp_wmb();
6414 6458
6415 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0, 6459 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
6416 U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1); 6460 U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
6417} 6461}
6418static void bnx2x_invlidate_e1_mc_list(struct bnx2x *bp) 6462
6463void bnx2x_invalidate_e1_mc_list(struct bnx2x *bp)
6419{ 6464{
6420 int i; 6465 int i;
6421 struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, mcast_config); 6466 struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, mcast_config);
6422 dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, mcast_config); 6467 dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, mcast_config);
6423 int ramrod_flags = WAIT_RAMROD_COMMON; 6468 int ramrod_flags = WAIT_RAMROD_COMMON;
6469 u8 offset = bnx2x_e1_cam_mc_offset(bp);
6424 6470
6425 bp->set_mac_pending = 1; 6471 for (i = 0; i < BNX2X_MAX_MULTICAST; i++)
6426 smp_wmb();
6427
6428 for (i = 0; i < config_cmd->hdr.length; i++)
6429 SET_FLAG(config_cmd->config_table[i].flags, 6472 SET_FLAG(config_cmd->config_table[i].flags,
6430 MAC_CONFIGURATION_ENTRY_ACTION_TYPE, 6473 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6431 T_ETH_MAC_COMMAND_INVALIDATE); 6474 T_ETH_MAC_COMMAND_INVALIDATE);
6432 6475
6476 wmb();
6477
6478 config_cmd->hdr.length = BNX2X_MAX_MULTICAST;
6479 config_cmd->hdr.offset = offset;
6480 config_cmd->hdr.client_id = 0xff;
6481 /* We'll wait for a completion this time... */
6482 config_cmd->hdr.echo = 1;
6483
6484 bp->set_mac_pending = 1;
6485
6486 mb();
6487
6433 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0, 6488 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
6434 U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1); 6489 U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
6435 6490
@@ -6439,6 +6494,44 @@ static void bnx2x_invlidate_e1_mc_list(struct bnx2x *bp)
6439 6494
6440} 6495}
6441 6496
6497/* Accept one or more multicasts */
6498static int bnx2x_set_e1h_mc_list(struct bnx2x *bp)
6499{
6500 struct net_device *dev = bp->dev;
6501 struct netdev_hw_addr *ha;
6502 u32 mc_filter[MC_HASH_SIZE];
6503 u32 crc, bit, regidx;
6504 int i;
6505
6506 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
6507
6508 netdev_for_each_mc_addr(ha, dev) {
6509 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
6510 bnx2x_mc_addr(ha));
6511
6512 crc = crc32c_le(0, bnx2x_mc_addr(ha),
6513 ETH_ALEN);
6514 bit = (crc >> 24) & 0xff;
6515 regidx = bit >> 5;
6516 bit &= 0x1f;
6517 mc_filter[regidx] |= (1 << bit);
6518 }
6519
6520 for (i = 0; i < MC_HASH_SIZE; i++)
6521 REG_WR(bp, MC_HASH_OFFSET(bp, i),
6522 mc_filter[i]);
6523
6524 return 0;
6525}
6526
6527void bnx2x_invalidate_e1h_mc_list(struct bnx2x *bp)
6528{
6529 int i;
6530
6531 for (i = 0; i < MC_HASH_SIZE; i++)
6532 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
6533}
6534
6442#ifdef BCM_CNIC 6535#ifdef BCM_CNIC
6443/** 6536/**
6444 * Set iSCSI MAC(s) at the next enties in the CAM after the ETH 6537 * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
@@ -7105,20 +7198,15 @@ void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
7105 /* Give HW time to discard old tx messages */ 7198 /* Give HW time to discard old tx messages */
7106 msleep(1); 7199 msleep(1);
7107 7200
7108 if (CHIP_IS_E1(bp)) { 7201 bnx2x_set_eth_mac(bp, 0);
7109 /* invalidate mc list,
7110 * wait and poll (interrupts are off)
7111 */
7112 bnx2x_invlidate_e1_mc_list(bp);
7113 bnx2x_set_eth_mac(bp, 0);
7114
7115 } else {
7116 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7117 7202
7118 bnx2x_set_eth_mac(bp, 0); 7203 bnx2x_invalidate_uc_list(bp);
7119 7204
7120 for (i = 0; i < MC_HASH_SIZE; i++) 7205 if (CHIP_IS_E1(bp))
7121 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0); 7206 bnx2x_invalidate_e1_mc_list(bp);
7207 else {
7208 bnx2x_invalidate_e1h_mc_list(bp);
7209 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7122 } 7210 }
7123 7211
7124#ifdef BCM_CNIC 7212#ifdef BCM_CNIC
@@ -8890,12 +8978,197 @@ static int bnx2x_close(struct net_device *dev)
8890 return 0; 8978 return 0;
8891} 8979}
8892 8980
8981#define E1_MAX_UC_LIST 29
8982#define E1H_MAX_UC_LIST 30
8983#define E2_MAX_UC_LIST 14
8984static inline u8 bnx2x_max_uc_list(struct bnx2x *bp)
8985{
8986 if (CHIP_IS_E1(bp))
8987 return E1_MAX_UC_LIST;
8988 else if (CHIP_IS_E1H(bp))
8989 return E1H_MAX_UC_LIST;
8990 else
8991 return E2_MAX_UC_LIST;
8992}
8993
8994
8995static inline u8 bnx2x_uc_list_cam_offset(struct bnx2x *bp)
8996{
8997 if (CHIP_IS_E1(bp))
8998 /* CAM Entries for Port0:
8999 * 0 - prim ETH MAC
9000 * 1 - BCAST MAC
9001 * 2 - iSCSI L2 ring ETH MAC
9002 * 3-31 - UC MACs
9003 *
9004 * Port1 entries are allocated the same way starting from
9005 * entry 32.
9006 */
9007 return 3 + 32 * BP_PORT(bp);
9008 else if (CHIP_IS_E1H(bp)) {
9009 /* CAM Entries:
9010 * 0-7 - prim ETH MAC for each function
9011 * 8-15 - iSCSI L2 ring ETH MAC for each function
9012 * 16 till 255 UC MAC lists for each function
9013 *
9014 * Remark: There is no FCoE support for E1H, thus FCoE related
9015 * MACs are not considered.
9016 */
9017 return E1H_FUNC_MAX * (CAM_ISCSI_ETH_LINE + 1) +
9018 bnx2x_max_uc_list(bp) * BP_FUNC(bp);
9019 } else {
9020 /* CAM Entries (there is a separate CAM per engine):
9021 * 0-4 - prim ETH MAC for each function
9022 * 4-7 - iSCSI L2 ring ETH MAC for each function
9023 * 8-11 - FIP ucast L2 MAC for each function
9024 * 12-15 - ALL_ENODE_MACS mcast MAC for each function
9025 * 16 till 71 UC MAC lists for each function
9026 */
9027 u8 func_idx =
9028 (CHIP_MODE_IS_4_PORT(bp) ? BP_FUNC(bp) : BP_VN(bp));
9029
9030 return E2_FUNC_MAX * (CAM_MAX_PF_LINE + 1) +
9031 bnx2x_max_uc_list(bp) * func_idx;
9032 }
9033}
9034
9035/* set uc list, do not wait as wait implies sleep and
9036 * set_rx_mode can be invoked from non-sleepable context.
9037 *
9038 * Instead we use the same ramrod data buffer each time we need
9039 * to configure a list of addresses, and use the fact that the
9040 * list of MACs is changed in an incremental way and that the
9041 * function is called under the netif_addr_lock. A temporary
9042 * inconsistent CAM configuration (possible in case of very fast
9043 * sequence of add/del/add on the host side) will shortly be
9044 * restored by the handler of the last ramrod.
9045 */
9046static int bnx2x_set_uc_list(struct bnx2x *bp)
9047{
9048 int i = 0, old;
9049 struct net_device *dev = bp->dev;
9050 u8 offset = bnx2x_uc_list_cam_offset(bp);
9051 struct netdev_hw_addr *ha;
9052 struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, uc_mac_config);
9053 dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, uc_mac_config);
9054
9055 if (netdev_uc_count(dev) > bnx2x_max_uc_list(bp))
9056 return -EINVAL;
9057
9058 netdev_for_each_uc_addr(ha, dev) {
9059 /* copy mac */
9060 config_cmd->config_table[i].msb_mac_addr =
9061 swab16(*(u16 *)&bnx2x_uc_addr(ha)[0]);
9062 config_cmd->config_table[i].middle_mac_addr =
9063 swab16(*(u16 *)&bnx2x_uc_addr(ha)[2]);
9064 config_cmd->config_table[i].lsb_mac_addr =
9065 swab16(*(u16 *)&bnx2x_uc_addr(ha)[4]);
9066
9067 config_cmd->config_table[i].vlan_id = 0;
9068 config_cmd->config_table[i].pf_id = BP_FUNC(bp);
9069 config_cmd->config_table[i].clients_bit_vector =
9070 cpu_to_le32(1 << BP_L_ID(bp));
9071
9072 SET_FLAG(config_cmd->config_table[i].flags,
9073 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
9074 T_ETH_MAC_COMMAND_SET);
9075
9076 DP(NETIF_MSG_IFUP,
9077 "setting UCAST[%d] (%04x:%04x:%04x)\n", i,
9078 config_cmd->config_table[i].msb_mac_addr,
9079 config_cmd->config_table[i].middle_mac_addr,
9080 config_cmd->config_table[i].lsb_mac_addr);
9081
9082 i++;
9083
9084 /* Set uc MAC in NIG */
9085 bnx2x_set_mac_in_nig(bp, 1, bnx2x_uc_addr(ha),
9086 LLH_CAM_ETH_LINE + i);
9087 }
9088 old = config_cmd->hdr.length;
9089 if (old > i) {
9090 for (; i < old; i++) {
9091 if (CAM_IS_INVALID(config_cmd->
9092 config_table[i])) {
9093 /* already invalidated */
9094 break;
9095 }
9096 /* invalidate */
9097 SET_FLAG(config_cmd->config_table[i].flags,
9098 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
9099 T_ETH_MAC_COMMAND_INVALIDATE);
9100 }
9101 }
9102
9103 wmb();
9104
9105 config_cmd->hdr.length = i;
9106 config_cmd->hdr.offset = offset;
9107 config_cmd->hdr.client_id = 0xff;
9108 /* Mark that this ramrod doesn't use bp->set_mac_pending for
9109 * synchronization.
9110 */
9111 config_cmd->hdr.echo = 0;
9112
9113 mb();
9114
9115 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
9116 U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
9117
9118}
9119
9120void bnx2x_invalidate_uc_list(struct bnx2x *bp)
9121{
9122 int i;
9123 struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, uc_mac_config);
9124 dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, uc_mac_config);
9125 int ramrod_flags = WAIT_RAMROD_COMMON;
9126 u8 offset = bnx2x_uc_list_cam_offset(bp);
9127 u8 max_list_size = bnx2x_max_uc_list(bp);
9128
9129 for (i = 0; i < max_list_size; i++) {
9130 SET_FLAG(config_cmd->config_table[i].flags,
9131 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
9132 T_ETH_MAC_COMMAND_INVALIDATE);
9133 bnx2x_set_mac_in_nig(bp, 0, NULL, LLH_CAM_ETH_LINE + 1 + i);
9134 }
9135
9136 wmb();
9137
9138 config_cmd->hdr.length = max_list_size;
9139 config_cmd->hdr.offset = offset;
9140 config_cmd->hdr.client_id = 0xff;
9141 /* We'll wait for a completion this time... */
9142 config_cmd->hdr.echo = 1;
9143
9144 bp->set_mac_pending = 1;
9145
9146 mb();
9147
9148 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
9149 U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
9150
9151 /* Wait for a completion */
9152 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending,
9153 ramrod_flags);
9154
9155}
9156
9157static inline int bnx2x_set_mc_list(struct bnx2x *bp)
9158{
9159 /* some multicasts */
9160 if (CHIP_IS_E1(bp)) {
9161 return bnx2x_set_e1_mc_list(bp);
9162 } else { /* E1H and newer */
9163 return bnx2x_set_e1h_mc_list(bp);
9164 }
9165}
9166
8893/* called with netif_tx_lock from dev_mcast.c */ 9167/* called with netif_tx_lock from dev_mcast.c */
8894void bnx2x_set_rx_mode(struct net_device *dev) 9168void bnx2x_set_rx_mode(struct net_device *dev)
8895{ 9169{
8896 struct bnx2x *bp = netdev_priv(dev); 9170 struct bnx2x *bp = netdev_priv(dev);
8897 u32 rx_mode = BNX2X_RX_MODE_NORMAL; 9171 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
8898 int port = BP_PORT(bp);
8899 9172
8900 if (bp->state != BNX2X_STATE_OPEN) { 9173 if (bp->state != BNX2X_STATE_OPEN) {
8901 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state); 9174 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
@@ -8906,47 +9179,16 @@ void bnx2x_set_rx_mode(struct net_device *dev)
8906 9179
8907 if (dev->flags & IFF_PROMISC) 9180 if (dev->flags & IFF_PROMISC)
8908 rx_mode = BNX2X_RX_MODE_PROMISC; 9181 rx_mode = BNX2X_RX_MODE_PROMISC;
8909 else if ((dev->flags & IFF_ALLMULTI) || 9182 else if (dev->flags & IFF_ALLMULTI)
8910 ((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) &&
8911 CHIP_IS_E1(bp)))
8912 rx_mode = BNX2X_RX_MODE_ALLMULTI; 9183 rx_mode = BNX2X_RX_MODE_ALLMULTI;
8913 else { /* some multicasts */ 9184 else {
8914 if (CHIP_IS_E1(bp)) { 9185 /* some multicasts */
8915 /* 9186 if (bnx2x_set_mc_list(bp))
8916 * set mc list, do not wait as wait implies sleep 9187 rx_mode = BNX2X_RX_MODE_ALLMULTI;
8917 * and set_rx_mode can be invoked from non-sleepable
8918 * context
8919 */
8920 u8 offset = (CHIP_REV_IS_SLOW(bp) ?
8921 BNX2X_MAX_EMUL_MULTI*(1 + port) :
8922 BNX2X_MAX_MULTICAST*(1 + port));
8923
8924 bnx2x_set_e1_mc_list(bp, offset);
8925 } else { /* E1H */
8926 /* Accept one or more multicasts */
8927 struct netdev_hw_addr *ha;
8928 u32 mc_filter[MC_HASH_SIZE];
8929 u32 crc, bit, regidx;
8930 int i;
8931
8932 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
8933
8934 netdev_for_each_mc_addr(ha, dev) {
8935 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
8936 bnx2x_mc_addr(ha));
8937
8938 crc = crc32c_le(0, bnx2x_mc_addr(ha),
8939 ETH_ALEN);
8940 bit = (crc >> 24) & 0xff;
8941 regidx = bit >> 5;
8942 bit &= 0x1f;
8943 mc_filter[regidx] |= (1 << bit);
8944 }
8945 9188
8946 for (i = 0; i < MC_HASH_SIZE; i++) 9189 /* some unicasts */
8947 REG_WR(bp, MC_HASH_OFFSET(bp, i), 9190 if (bnx2x_set_uc_list(bp))
8948 mc_filter[i]); 9191 rx_mode = BNX2X_RX_MODE_PROMISC;
8949 }
8950 } 9192 }
8951 9193
8952 bp->rx_mode = rx_mode; 9194 bp->rx_mode = rx_mode;
@@ -9027,7 +9269,7 @@ static const struct net_device_ops bnx2x_netdev_ops = {
9027 .ndo_stop = bnx2x_close, 9269 .ndo_stop = bnx2x_close,
9028 .ndo_start_xmit = bnx2x_start_xmit, 9270 .ndo_start_xmit = bnx2x_start_xmit,
9029 .ndo_select_queue = bnx2x_select_queue, 9271 .ndo_select_queue = bnx2x_select_queue,
9030 .ndo_set_multicast_list = bnx2x_set_rx_mode, 9272 .ndo_set_rx_mode = bnx2x_set_rx_mode,
9031 .ndo_set_mac_address = bnx2x_change_mac_addr, 9273 .ndo_set_mac_address = bnx2x_change_mac_addr,
9032 .ndo_validate_addr = eth_validate_addr, 9274 .ndo_validate_addr = eth_validate_addr,
9033 .ndo_do_ioctl = bnx2x_ioctl, 9275 .ndo_do_ioctl = bnx2x_ioctl,
@@ -9853,15 +10095,21 @@ static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
9853 HW_CID(bp, BNX2X_ISCSI_ETH_CID)); 10095 HW_CID(bp, BNX2X_ISCSI_ETH_CID));
9854 } 10096 }
9855 10097
9856 /* There may be not more than 8 L2 and COMMON SPEs and not more 10098 /* There may be not more than 8 L2 and not more than 8 L5 SPEs
9857 * than 8 L5 SPEs in the air. 10099 * We also check that the number of outstanding
10100 * COMMON ramrods is not more than the EQ and SPQ can
10101 * accommodate.
9858 */ 10102 */
9859 if ((type == NONE_CONNECTION_TYPE) || 10103 if (type == ETH_CONNECTION_TYPE) {
9860 (type == ETH_CONNECTION_TYPE)) { 10104 if (!atomic_read(&bp->cq_spq_left))
9861 if (!atomic_read(&bp->spq_left)) 10105 break;
10106 else
10107 atomic_dec(&bp->cq_spq_left);
10108 } else if (type == NONE_CONNECTION_TYPE) {
10109 if (!atomic_read(&bp->eq_spq_left))
9862 break; 10110 break;
9863 else 10111 else
9864 atomic_dec(&bp->spq_left); 10112 atomic_dec(&bp->eq_spq_left);
9865 } else if ((type == ISCSI_CONNECTION_TYPE) || 10113 } else if ((type == ISCSI_CONNECTION_TYPE) ||
9866 (type == FCOE_CONNECTION_TYPE)) { 10114 (type == FCOE_CONNECTION_TYPE)) {
9867 if (bp->cnic_spq_pending >= 10115 if (bp->cnic_spq_pending >=
@@ -10054,7 +10302,7 @@ static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
10054 int count = ctl->data.credit.credit_count; 10302 int count = ctl->data.credit.credit_count;
10055 10303
10056 smp_mb__before_atomic_inc(); 10304 smp_mb__before_atomic_inc();
10057 atomic_add(count, &bp->spq_left); 10305 atomic_add(count, &bp->cq_spq_left);
10058 smp_mb__after_atomic_inc(); 10306 smp_mb__after_atomic_inc();
10059 break; 10307 break;
10060 } 10308 }