diff options
author | Dmitry Kravkov <dmitry@broadcom.com> | 2010-10-05 23:23:26 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2010-10-06 17:10:35 -0400 |
commit | 523224a3b3cd407ce4e6731a087194e13a90db18 (patch) | |
tree | bb0fda289682e4259c401b8a5763ba4cc4d41659 /drivers/net/bnx2x/bnx2x_main.c | |
parent | 0c5b77152e736d23a23eb2546eab323e27a37f52 (diff) |
bnx2x, cnic, bnx2i: use new FW/HSI
This is the new FW HSI blob and the relevant definitions without logic changes.
It also included code adaptation for new HSI. New features are not enabled.
New FW/HSI includes:
- Support for 57712 HW
- Future support for VF (not used)
- Improvements in FW interrupts scheme
- FW FCoE hooks (stubs for future usage)
Signed-off-by: Dmitry Kravkov <dmitry@broadcom.com>
Signed-off-by: Michael Chan <mchan@broadcom.com>
Signed-off-by: Eilon Greenstein <eilong@broadcom.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/bnx2x/bnx2x_main.c')
-rw-r--r-- | drivers/net/bnx2x/bnx2x_main.c | 3208 |
1 files changed, 2001 insertions, 1207 deletions
diff --git a/drivers/net/bnx2x/bnx2x_main.c b/drivers/net/bnx2x/bnx2x_main.c index 3696a4b6547..119ca871f01 100644 --- a/drivers/net/bnx2x/bnx2x_main.c +++ b/drivers/net/bnx2x/bnx2x_main.c | |||
@@ -149,6 +149,242 @@ MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl); | |||
149 | * General service functions | 149 | * General service functions |
150 | ****************************************************************************/ | 150 | ****************************************************************************/ |
151 | 151 | ||
152 | static inline void __storm_memset_dma_mapping(struct bnx2x *bp, | ||
153 | u32 addr, dma_addr_t mapping) | ||
154 | { | ||
155 | REG_WR(bp, addr, U64_LO(mapping)); | ||
156 | REG_WR(bp, addr + 4, U64_HI(mapping)); | ||
157 | } | ||
158 | |||
159 | static inline void __storm_memset_fill(struct bnx2x *bp, | ||
160 | u32 addr, size_t size, u32 val) | ||
161 | { | ||
162 | int i; | ||
163 | for (i = 0; i < size/4; i++) | ||
164 | REG_WR(bp, addr + (i * 4), val); | ||
165 | } | ||
166 | |||
167 | static inline void storm_memset_ustats_zero(struct bnx2x *bp, | ||
168 | u8 port, u16 stat_id) | ||
169 | { | ||
170 | size_t size = sizeof(struct ustorm_per_client_stats); | ||
171 | |||
172 | u32 addr = BAR_USTRORM_INTMEM + | ||
173 | USTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id); | ||
174 | |||
175 | __storm_memset_fill(bp, addr, size, 0); | ||
176 | } | ||
177 | |||
178 | static inline void storm_memset_tstats_zero(struct bnx2x *bp, | ||
179 | u8 port, u16 stat_id) | ||
180 | { | ||
181 | size_t size = sizeof(struct tstorm_per_client_stats); | ||
182 | |||
183 | u32 addr = BAR_TSTRORM_INTMEM + | ||
184 | TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id); | ||
185 | |||
186 | __storm_memset_fill(bp, addr, size, 0); | ||
187 | } | ||
188 | |||
189 | static inline void storm_memset_xstats_zero(struct bnx2x *bp, | ||
190 | u8 port, u16 stat_id) | ||
191 | { | ||
192 | size_t size = sizeof(struct xstorm_per_client_stats); | ||
193 | |||
194 | u32 addr = BAR_XSTRORM_INTMEM + | ||
195 | XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id); | ||
196 | |||
197 | __storm_memset_fill(bp, addr, size, 0); | ||
198 | } | ||
199 | |||
200 | |||
201 | static inline void storm_memset_spq_addr(struct bnx2x *bp, | ||
202 | dma_addr_t mapping, u16 abs_fid) | ||
203 | { | ||
204 | u32 addr = XSEM_REG_FAST_MEMORY + | ||
205 | XSTORM_SPQ_PAGE_BASE_OFFSET(abs_fid); | ||
206 | |||
207 | __storm_memset_dma_mapping(bp, addr, mapping); | ||
208 | } | ||
209 | |||
210 | static inline void storm_memset_ov(struct bnx2x *bp, u16 ov, u16 abs_fid) | ||
211 | { | ||
212 | REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(abs_fid), ov); | ||
213 | } | ||
214 | |||
215 | static inline void storm_memset_func_cfg(struct bnx2x *bp, | ||
216 | struct tstorm_eth_function_common_config *tcfg, | ||
217 | u16 abs_fid) | ||
218 | { | ||
219 | size_t size = sizeof(struct tstorm_eth_function_common_config); | ||
220 | |||
221 | u32 addr = BAR_TSTRORM_INTMEM + | ||
222 | TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid); | ||
223 | |||
224 | __storm_memset_struct(bp, addr, size, (u32 *)tcfg); | ||
225 | } | ||
226 | |||
227 | static inline void storm_memset_xstats_flags(struct bnx2x *bp, | ||
228 | struct stats_indication_flags *flags, | ||
229 | u16 abs_fid) | ||
230 | { | ||
231 | size_t size = sizeof(struct stats_indication_flags); | ||
232 | |||
233 | u32 addr = BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(abs_fid); | ||
234 | |||
235 | __storm_memset_struct(bp, addr, size, (u32 *)flags); | ||
236 | } | ||
237 | |||
238 | static inline void storm_memset_tstats_flags(struct bnx2x *bp, | ||
239 | struct stats_indication_flags *flags, | ||
240 | u16 abs_fid) | ||
241 | { | ||
242 | size_t size = sizeof(struct stats_indication_flags); | ||
243 | |||
244 | u32 addr = BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(abs_fid); | ||
245 | |||
246 | __storm_memset_struct(bp, addr, size, (u32 *)flags); | ||
247 | } | ||
248 | |||
249 | static inline void storm_memset_ustats_flags(struct bnx2x *bp, | ||
250 | struct stats_indication_flags *flags, | ||
251 | u16 abs_fid) | ||
252 | { | ||
253 | size_t size = sizeof(struct stats_indication_flags); | ||
254 | |||
255 | u32 addr = BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(abs_fid); | ||
256 | |||
257 | __storm_memset_struct(bp, addr, size, (u32 *)flags); | ||
258 | } | ||
259 | |||
260 | static inline void storm_memset_cstats_flags(struct bnx2x *bp, | ||
261 | struct stats_indication_flags *flags, | ||
262 | u16 abs_fid) | ||
263 | { | ||
264 | size_t size = sizeof(struct stats_indication_flags); | ||
265 | |||
266 | u32 addr = BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(abs_fid); | ||
267 | |||
268 | __storm_memset_struct(bp, addr, size, (u32 *)flags); | ||
269 | } | ||
270 | |||
271 | static inline void storm_memset_xstats_addr(struct bnx2x *bp, | ||
272 | dma_addr_t mapping, u16 abs_fid) | ||
273 | { | ||
274 | u32 addr = BAR_XSTRORM_INTMEM + | ||
275 | XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid); | ||
276 | |||
277 | __storm_memset_dma_mapping(bp, addr, mapping); | ||
278 | } | ||
279 | |||
280 | static inline void storm_memset_tstats_addr(struct bnx2x *bp, | ||
281 | dma_addr_t mapping, u16 abs_fid) | ||
282 | { | ||
283 | u32 addr = BAR_TSTRORM_INTMEM + | ||
284 | TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid); | ||
285 | |||
286 | __storm_memset_dma_mapping(bp, addr, mapping); | ||
287 | } | ||
288 | |||
289 | static inline void storm_memset_ustats_addr(struct bnx2x *bp, | ||
290 | dma_addr_t mapping, u16 abs_fid) | ||
291 | { | ||
292 | u32 addr = BAR_USTRORM_INTMEM + | ||
293 | USTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid); | ||
294 | |||
295 | __storm_memset_dma_mapping(bp, addr, mapping); | ||
296 | } | ||
297 | |||
298 | static inline void storm_memset_cstats_addr(struct bnx2x *bp, | ||
299 | dma_addr_t mapping, u16 abs_fid) | ||
300 | { | ||
301 | u32 addr = BAR_CSTRORM_INTMEM + | ||
302 | CSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid); | ||
303 | |||
304 | __storm_memset_dma_mapping(bp, addr, mapping); | ||
305 | } | ||
306 | |||
307 | static inline void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid, | ||
308 | u16 pf_id) | ||
309 | { | ||
310 | REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid), | ||
311 | pf_id); | ||
312 | REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid), | ||
313 | pf_id); | ||
314 | REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid), | ||
315 | pf_id); | ||
316 | REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid), | ||
317 | pf_id); | ||
318 | } | ||
319 | |||
320 | static inline void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid, | ||
321 | u8 enable) | ||
322 | { | ||
323 | REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid), | ||
324 | enable); | ||
325 | REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid), | ||
326 | enable); | ||
327 | REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid), | ||
328 | enable); | ||
329 | REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid), | ||
330 | enable); | ||
331 | } | ||
332 | |||
333 | static inline void storm_memset_eq_data(struct bnx2x *bp, | ||
334 | struct event_ring_data *eq_data, | ||
335 | u16 pfid) | ||
336 | { | ||
337 | size_t size = sizeof(struct event_ring_data); | ||
338 | |||
339 | u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_DATA_OFFSET(pfid); | ||
340 | |||
341 | __storm_memset_struct(bp, addr, size, (u32 *)eq_data); | ||
342 | } | ||
343 | |||
344 | static inline void storm_memset_eq_prod(struct bnx2x *bp, u16 eq_prod, | ||
345 | u16 pfid) | ||
346 | { | ||
347 | u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_PROD_OFFSET(pfid); | ||
348 | REG_WR16(bp, addr, eq_prod); | ||
349 | } | ||
350 | |||
351 | static inline void storm_memset_hc_timeout(struct bnx2x *bp, u8 port, | ||
352 | u16 fw_sb_id, u8 sb_index, | ||
353 | u8 ticks) | ||
354 | { | ||
355 | |||
356 | int index_offset = | ||
357 | offsetof(struct hc_status_block_data_e1x, index_data); | ||
358 | u32 addr = BAR_CSTRORM_INTMEM + | ||
359 | CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) + | ||
360 | index_offset + | ||
361 | sizeof(struct hc_index_data)*sb_index + | ||
362 | offsetof(struct hc_index_data, timeout); | ||
363 | REG_WR8(bp, addr, ticks); | ||
364 | DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d ticks %d\n", | ||
365 | port, fw_sb_id, sb_index, ticks); | ||
366 | } | ||
367 | static inline void storm_memset_hc_disable(struct bnx2x *bp, u8 port, | ||
368 | u16 fw_sb_id, u8 sb_index, | ||
369 | u8 disable) | ||
370 | { | ||
371 | u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT); | ||
372 | int index_offset = | ||
373 | offsetof(struct hc_status_block_data_e1x, index_data); | ||
374 | u32 addr = BAR_CSTRORM_INTMEM + | ||
375 | CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) + | ||
376 | index_offset + | ||
377 | sizeof(struct hc_index_data)*sb_index + | ||
378 | offsetof(struct hc_index_data, flags); | ||
379 | u16 flags = REG_RD16(bp, addr); | ||
380 | /* clear and set */ | ||
381 | flags &= ~HC_INDEX_DATA_HC_ENABLED; | ||
382 | flags |= enable_flag; | ||
383 | REG_WR16(bp, addr, flags); | ||
384 | DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d disable %d\n", | ||
385 | port, fw_sb_id, sb_index, disable); | ||
386 | } | ||
387 | |||
152 | /* used only at init | 388 | /* used only at init |
153 | * locking is done by mcp | 389 | * locking is done by mcp |
154 | */ | 390 | */ |
@@ -538,7 +774,12 @@ static void bnx2x_fw_dump(struct bnx2x *bp) | |||
538 | void bnx2x_panic_dump(struct bnx2x *bp) | 774 | void bnx2x_panic_dump(struct bnx2x *bp) |
539 | { | 775 | { |
540 | int i; | 776 | int i; |
541 | u16 j, start, end; | 777 | u16 j; |
778 | struct hc_sp_status_block_data sp_sb_data; | ||
779 | int func = BP_FUNC(bp); | ||
780 | #ifdef BNX2X_STOP_ON_ERROR | ||
781 | u16 start = 0, end = 0; | ||
782 | #endif | ||
542 | 783 | ||
543 | bp->stats_state = STATS_STATE_DISABLED; | 784 | bp->stats_state = STATS_STATE_DISABLED; |
544 | DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n"); | 785 | DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n"); |
@@ -547,44 +788,124 @@ void bnx2x_panic_dump(struct bnx2x *bp) | |||
547 | 788 | ||
548 | /* Indices */ | 789 | /* Indices */ |
549 | /* Common */ | 790 | /* Common */ |
550 | BNX2X_ERR("def_c_idx(0x%x) def_u_idx(0x%x) def_x_idx(0x%x)" | 791 | BNX2X_ERR("def_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x)" |
551 | " def_t_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x)" | ||
552 | " spq_prod_idx(0x%x)\n", | 792 | " spq_prod_idx(0x%x)\n", |
553 | bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx, | 793 | bp->def_idx, bp->def_att_idx, |
554 | bp->def_att_idx, bp->attn_state, bp->spq_prod_idx); | 794 | bp->attn_state, bp->spq_prod_idx); |
795 | BNX2X_ERR("DSB: attn bits(0x%x) ack(0x%x) id(0x%x) idx(0x%x)\n", | ||
796 | bp->def_status_blk->atten_status_block.attn_bits, | ||
797 | bp->def_status_blk->atten_status_block.attn_bits_ack, | ||
798 | bp->def_status_blk->atten_status_block.status_block_id, | ||
799 | bp->def_status_blk->atten_status_block.attn_bits_index); | ||
800 | BNX2X_ERR(" def ("); | ||
801 | for (i = 0; i < HC_SP_SB_MAX_INDICES; i++) | ||
802 | pr_cont("0x%x%s", | ||
803 | bp->def_status_blk->sp_sb.index_values[i], | ||
804 | (i == HC_SP_SB_MAX_INDICES - 1) ? ") " : " "); | ||
805 | |||
806 | for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++) | ||
807 | *((u32 *)&sp_sb_data + i) = REG_RD(bp, BAR_CSTRORM_INTMEM + | ||
808 | CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) + | ||
809 | i*sizeof(u32)); | ||
810 | |||
811 | pr_cont("igu_sb_id(0x%x) igu_seg_id (0x%x) " | ||
812 | "pf_id(0x%x) vnic_id(0x%x) " | ||
813 | "vf_id(0x%x) vf_valid (0x%x)\n", | ||
814 | sp_sb_data.igu_sb_id, | ||
815 | sp_sb_data.igu_seg_id, | ||
816 | sp_sb_data.p_func.pf_id, | ||
817 | sp_sb_data.p_func.vnic_id, | ||
818 | sp_sb_data.p_func.vf_id, | ||
819 | sp_sb_data.p_func.vf_valid); | ||
820 | |||
555 | 821 | ||
556 | /* Rx */ | ||
557 | for_each_queue(bp, i) { | 822 | for_each_queue(bp, i) { |
558 | struct bnx2x_fastpath *fp = &bp->fp[i]; | 823 | struct bnx2x_fastpath *fp = &bp->fp[i]; |
559 | 824 | int loop; | |
825 | struct hc_status_block_data_e1x sb_data_e1x; | ||
826 | struct hc_status_block_sm *hc_sm_p = | ||
827 | sb_data_e1x.common.state_machine; | ||
828 | struct hc_index_data *hc_index_p = | ||
829 | sb_data_e1x.index_data; | ||
830 | int data_size; | ||
831 | u32 *sb_data_p; | ||
832 | |||
833 | /* Rx */ | ||
560 | BNX2X_ERR("fp%d: rx_bd_prod(0x%x) rx_bd_cons(0x%x)" | 834 | BNX2X_ERR("fp%d: rx_bd_prod(0x%x) rx_bd_cons(0x%x)" |
561 | " *rx_bd_cons_sb(0x%x) rx_comp_prod(0x%x)" | 835 | " rx_comp_prod(0x%x)" |
562 | " rx_comp_cons(0x%x) *rx_cons_sb(0x%x)\n", | 836 | " rx_comp_cons(0x%x) *rx_cons_sb(0x%x)\n", |
563 | i, fp->rx_bd_prod, fp->rx_bd_cons, | 837 | i, fp->rx_bd_prod, fp->rx_bd_cons, |
564 | le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod, | 838 | fp->rx_comp_prod, |
565 | fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb)); | 839 | fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb)); |
566 | BNX2X_ERR(" rx_sge_prod(0x%x) last_max_sge(0x%x)" | 840 | BNX2X_ERR(" rx_sge_prod(0x%x) last_max_sge(0x%x)" |
567 | " fp_u_idx(0x%x) *sb_u_idx(0x%x)\n", | 841 | " fp_hc_idx(0x%x)\n", |
568 | fp->rx_sge_prod, fp->last_max_sge, | 842 | fp->rx_sge_prod, fp->last_max_sge, |
569 | le16_to_cpu(fp->fp_u_idx), | 843 | le16_to_cpu(fp->fp_hc_idx)); |
570 | fp->status_blk->u_status_block.status_block_index); | ||
571 | } | ||
572 | |||
573 | /* Tx */ | ||
574 | for_each_queue(bp, i) { | ||
575 | struct bnx2x_fastpath *fp = &bp->fp[i]; | ||
576 | 844 | ||
845 | /* Tx */ | ||
577 | BNX2X_ERR("fp%d: tx_pkt_prod(0x%x) tx_pkt_cons(0x%x)" | 846 | BNX2X_ERR("fp%d: tx_pkt_prod(0x%x) tx_pkt_cons(0x%x)" |
578 | " tx_bd_prod(0x%x) tx_bd_cons(0x%x)" | 847 | " tx_bd_prod(0x%x) tx_bd_cons(0x%x)" |
579 | " *tx_cons_sb(0x%x)\n", | 848 | " *tx_cons_sb(0x%x)\n", |
580 | i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod, | 849 | i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod, |
581 | fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb)); | 850 | fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb)); |
582 | BNX2X_ERR(" fp_c_idx(0x%x) *sb_c_idx(0x%x)" | 851 | |
583 | " tx_db_prod(0x%x)\n", le16_to_cpu(fp->fp_c_idx), | 852 | loop = HC_SB_MAX_INDICES_E1X; |
584 | fp->status_blk->c_status_block.status_block_index, | 853 | |
585 | fp->tx_db.data.prod); | 854 | /* host sb data */ |
855 | |||
856 | BNX2X_ERR(" run indexes ("); | ||
857 | for (j = 0; j < HC_SB_MAX_SM; j++) | ||
858 | pr_cont("0x%x%s", | ||
859 | fp->sb_running_index[j], | ||
860 | (j == HC_SB_MAX_SM - 1) ? ")" : " "); | ||
861 | |||
862 | BNX2X_ERR(" indexes ("); | ||
863 | for (j = 0; j < loop; j++) | ||
864 | pr_cont("0x%x%s", | ||
865 | fp->sb_index_values[j], | ||
866 | (j == loop - 1) ? ")" : " "); | ||
867 | /* fw sb data */ | ||
868 | data_size = | ||
869 | sizeof(struct hc_status_block_data_e1x); | ||
870 | data_size /= sizeof(u32); | ||
871 | sb_data_p = (u32 *)&sb_data_e1x; | ||
872 | /* copy sb data in here */ | ||
873 | for (j = 0; j < data_size; j++) | ||
874 | *(sb_data_p + j) = REG_RD(bp, BAR_CSTRORM_INTMEM + | ||
875 | CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id) + | ||
876 | j * sizeof(u32)); | ||
877 | |||
878 | pr_cont("pf_id(0x%x) vf_id (0x%x) vf_valid(0x%x) " | ||
879 | "vnic_id(0x%x) same_igu_sb_1b(0x%x)\n", | ||
880 | sb_data_e1x.common.p_func.pf_id, | ||
881 | sb_data_e1x.common.p_func.vf_id, | ||
882 | sb_data_e1x.common.p_func.vf_valid, | ||
883 | sb_data_e1x.common.p_func.vnic_id, | ||
884 | sb_data_e1x.common.same_igu_sb_1b); | ||
885 | |||
886 | /* SB_SMs data */ | ||
887 | for (j = 0; j < HC_SB_MAX_SM; j++) { | ||
888 | pr_cont("SM[%d] __flags (0x%x) " | ||
889 | "igu_sb_id (0x%x) igu_seg_id(0x%x) " | ||
890 | "time_to_expire (0x%x) " | ||
891 | "timer_value(0x%x)\n", j, | ||
892 | hc_sm_p[j].__flags, | ||
893 | hc_sm_p[j].igu_sb_id, | ||
894 | hc_sm_p[j].igu_seg_id, | ||
895 | hc_sm_p[j].time_to_expire, | ||
896 | hc_sm_p[j].timer_value); | ||
897 | } | ||
898 | |||
899 | /* Indecies data */ | ||
900 | for (j = 0; j < loop; j++) { | ||
901 | pr_cont("INDEX[%d] flags (0x%x) " | ||
902 | "timeout (0x%x)\n", j, | ||
903 | hc_index_p[j].flags, | ||
904 | hc_index_p[j].timeout); | ||
905 | } | ||
586 | } | 906 | } |
587 | 907 | ||
908 | #ifdef BNX2X_STOP_ON_ERROR | ||
588 | /* Rings */ | 909 | /* Rings */ |
589 | /* Rx */ | 910 | /* Rx */ |
590 | for_each_queue(bp, i) { | 911 | for_each_queue(bp, i) { |
@@ -642,7 +963,7 @@ void bnx2x_panic_dump(struct bnx2x *bp) | |||
642 | i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]); | 963 | i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]); |
643 | } | 964 | } |
644 | } | 965 | } |
645 | 966 | #endif | |
646 | bnx2x_fw_dump(bp); | 967 | bnx2x_fw_dump(bp); |
647 | bnx2x_mc_assert(bp); | 968 | bnx2x_mc_assert(bp); |
648 | BNX2X_ERR("end crash dump -----------------\n"); | 969 | BNX2X_ERR("end crash dump -----------------\n"); |
@@ -708,7 +1029,7 @@ void bnx2x_int_enable(struct bnx2x *bp) | |||
708 | mmiowb(); | 1029 | mmiowb(); |
709 | } | 1030 | } |
710 | 1031 | ||
711 | static void bnx2x_int_disable(struct bnx2x *bp) | 1032 | void bnx2x_int_disable(struct bnx2x *bp) |
712 | { | 1033 | { |
713 | int port = BP_PORT(bp); | 1034 | int port = BP_PORT(bp); |
714 | u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0; | 1035 | u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0; |
@@ -817,76 +1138,35 @@ void bnx2x_sp_event(struct bnx2x_fastpath *fp, | |||
817 | fp->index, cid, command, bp->state, | 1138 | fp->index, cid, command, bp->state, |
818 | rr_cqe->ramrod_cqe.ramrod_type); | 1139 | rr_cqe->ramrod_cqe.ramrod_type); |
819 | 1140 | ||
820 | bp->spq_left++; | 1141 | switch (command | fp->state) { |
821 | 1142 | case (RAMROD_CMD_ID_ETH_CLIENT_SETUP | BNX2X_FP_STATE_OPENING): | |
822 | if (fp->index) { | 1143 | DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n", cid); |
823 | switch (command | fp->state) { | 1144 | fp->state = BNX2X_FP_STATE_OPEN; |
824 | case (RAMROD_CMD_ID_ETH_CLIENT_SETUP | | ||
825 | BNX2X_FP_STATE_OPENING): | ||
826 | DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n", | ||
827 | cid); | ||
828 | fp->state = BNX2X_FP_STATE_OPEN; | ||
829 | break; | ||
830 | |||
831 | case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING): | ||
832 | DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n", | ||
833 | cid); | ||
834 | fp->state = BNX2X_FP_STATE_HALTED; | ||
835 | break; | ||
836 | |||
837 | default: | ||
838 | BNX2X_ERR("unexpected MC reply (%d) " | ||
839 | "fp[%d] state is %x\n", | ||
840 | command, fp->index, fp->state); | ||
841 | break; | ||
842 | } | ||
843 | mb(); /* force bnx2x_wait_ramrod() to see the change */ | ||
844 | return; | ||
845 | } | ||
846 | |||
847 | switch (command | bp->state) { | ||
848 | case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT): | ||
849 | DP(NETIF_MSG_IFUP, "got setup ramrod\n"); | ||
850 | bp->state = BNX2X_STATE_OPEN; | ||
851 | break; | 1145 | break; |
852 | 1146 | ||
853 | case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT): | 1147 | case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING): |
854 | DP(NETIF_MSG_IFDOWN, "got halt ramrod\n"); | 1148 | DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n", cid); |
855 | bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE; | ||
856 | fp->state = BNX2X_FP_STATE_HALTED; | 1149 | fp->state = BNX2X_FP_STATE_HALTED; |
857 | break; | 1150 | break; |
858 | 1151 | ||
859 | case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT): | 1152 | case (RAMROD_CMD_ID_ETH_TERMINATE | BNX2X_FP_STATE_TERMINATING): |
860 | DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid); | 1153 | DP(NETIF_MSG_IFDOWN, "got MULTI[%d] teminate ramrod\n", cid); |
861 | bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED; | 1154 | fp->state = BNX2X_FP_STATE_TERMINATED; |
862 | break; | 1155 | break; |
863 | 1156 | ||
864 | #ifdef BCM_CNIC | 1157 | default: |
865 | case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_OPEN): | 1158 | BNX2X_ERR("unexpected MC reply (%d) " |
866 | DP(NETIF_MSG_IFDOWN, "got delete ramrod for CID %d\n", cid); | 1159 | "fp[%d] state is %x\n", |
867 | bnx2x_cnic_cfc_comp(bp, cid); | 1160 | command, fp->index, fp->state); |
868 | break; | 1161 | break; |
869 | #endif | 1162 | } |
870 | 1163 | ||
871 | case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN): | 1164 | bp->spq_left++; |
872 | case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG): | ||
873 | DP(NETIF_MSG_IFUP, "got set mac ramrod\n"); | ||
874 | bp->set_mac_pending--; | ||
875 | smp_wmb(); | ||
876 | break; | ||
877 | 1165 | ||
878 | case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT): | 1166 | /* push the change in fp->state and towards the memory */ |
879 | DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n"); | 1167 | smp_wmb(); |
880 | bp->set_mac_pending--; | ||
881 | smp_wmb(); | ||
882 | break; | ||
883 | 1168 | ||
884 | default: | 1169 | return; |
885 | BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n", | ||
886 | command, bp->state); | ||
887 | break; | ||
888 | } | ||
889 | mb(); /* force bnx2x_wait_ramrod() to see the change */ | ||
890 | } | 1170 | } |
891 | 1171 | ||
892 | irqreturn_t bnx2x_interrupt(int irq, void *dev_instance) | 1172 | irqreturn_t bnx2x_interrupt(int irq, void *dev_instance) |
@@ -917,22 +1197,19 @@ irqreturn_t bnx2x_interrupt(int irq, void *dev_instance) | |||
917 | for (i = 0; i < BNX2X_NUM_QUEUES(bp); i++) { | 1197 | for (i = 0; i < BNX2X_NUM_QUEUES(bp); i++) { |
918 | struct bnx2x_fastpath *fp = &bp->fp[i]; | 1198 | struct bnx2x_fastpath *fp = &bp->fp[i]; |
919 | 1199 | ||
920 | mask = 0x2 << fp->sb_id; | 1200 | mask = 0x2 << (fp->index + CNIC_CONTEXT_USE); |
921 | if (status & mask) { | 1201 | if (status & mask) { |
922 | /* Handle Rx and Tx according to SB id */ | 1202 | /* Handle Rx and Tx according to SB id */ |
923 | prefetch(fp->rx_cons_sb); | 1203 | prefetch(fp->rx_cons_sb); |
924 | prefetch(&fp->status_blk->u_status_block. | ||
925 | status_block_index); | ||
926 | prefetch(fp->tx_cons_sb); | 1204 | prefetch(fp->tx_cons_sb); |
927 | prefetch(&fp->status_blk->c_status_block. | 1205 | prefetch(&fp->sb_running_index[SM_RX_ID]); |
928 | status_block_index); | ||
929 | napi_schedule(&bnx2x_fp(bp, fp->index, napi)); | 1206 | napi_schedule(&bnx2x_fp(bp, fp->index, napi)); |
930 | status &= ~mask; | 1207 | status &= ~mask; |
931 | } | 1208 | } |
932 | } | 1209 | } |
933 | 1210 | ||
934 | #ifdef BCM_CNIC | 1211 | #ifdef BCM_CNIC |
935 | mask = 0x2 << CNIC_SB_ID(bp); | 1212 | mask = 0x2; |
936 | if (status & (mask | 0x1)) { | 1213 | if (status & (mask | 0x1)) { |
937 | struct cnic_ops *c_ops = NULL; | 1214 | struct cnic_ops *c_ops = NULL; |
938 | 1215 | ||
@@ -1422,7 +1699,7 @@ static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp) | |||
1422 | bp->vn_weight_sum = 0; | 1699 | bp->vn_weight_sum = 0; |
1423 | for (vn = VN_0; vn < E1HVN_MAX; vn++) { | 1700 | for (vn = VN_0; vn < E1HVN_MAX; vn++) { |
1424 | int func = 2*vn + port; | 1701 | int func = 2*vn + port; |
1425 | u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config); | 1702 | u32 vn_cfg = MF_CFG_RD(bp, func_mf_config[func].config); |
1426 | u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >> | 1703 | u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >> |
1427 | FUNC_MF_CFG_MIN_BW_SHIFT) * 100; | 1704 | FUNC_MF_CFG_MIN_BW_SHIFT) * 100; |
1428 | 1705 | ||
@@ -1454,7 +1731,7 @@ static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func) | |||
1454 | { | 1731 | { |
1455 | struct rate_shaping_vars_per_vn m_rs_vn; | 1732 | struct rate_shaping_vars_per_vn m_rs_vn; |
1456 | struct fairness_vars_per_vn m_fair_vn; | 1733 | struct fairness_vars_per_vn m_fair_vn; |
1457 | u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config); | 1734 | u32 vn_cfg = MF_CFG_RD(bp, func_mf_config[func].config); |
1458 | u16 vn_min_rate, vn_max_rate; | 1735 | u16 vn_min_rate, vn_max_rate; |
1459 | int i; | 1736 | int i; |
1460 | 1737 | ||
@@ -1511,7 +1788,83 @@ static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func) | |||
1511 | XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4, | 1788 | XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4, |
1512 | ((u32 *)(&m_fair_vn))[i]); | 1789 | ((u32 *)(&m_fair_vn))[i]); |
1513 | } | 1790 | } |
1791 | static int bnx2x_get_cmng_fns_mode(struct bnx2x *bp) | ||
1792 | { | ||
1793 | if (CHIP_REV_IS_SLOW(bp)) | ||
1794 | return CMNG_FNS_NONE; | ||
1795 | if (IS_E1HMF(bp)) | ||
1796 | return CMNG_FNS_MINMAX; | ||
1797 | |||
1798 | return CMNG_FNS_NONE; | ||
1799 | } | ||
1800 | |||
1801 | static void bnx2x_read_mf_cfg(struct bnx2x *bp) | ||
1802 | { | ||
1803 | int vn; | ||
1804 | |||
1805 | if (BP_NOMCP(bp)) | ||
1806 | return; /* what should be the default bvalue in this case */ | ||
1807 | |||
1808 | for (vn = VN_0; vn < E1HVN_MAX; vn++) { | ||
1809 | int /*abs*/func = 2*vn + BP_PORT(bp); | ||
1810 | bp->mf_config = | ||
1811 | MF_CFG_RD(bp, func_mf_config[func].config); | ||
1812 | } | ||
1813 | } | ||
1814 | |||
1815 | static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type) | ||
1816 | { | ||
1817 | |||
1818 | if (cmng_type == CMNG_FNS_MINMAX) { | ||
1819 | int vn; | ||
1820 | |||
1821 | /* clear cmng_enables */ | ||
1822 | bp->cmng.flags.cmng_enables = 0; | ||
1823 | |||
1824 | /* read mf conf from shmem */ | ||
1825 | if (read_cfg) | ||
1826 | bnx2x_read_mf_cfg(bp); | ||
1827 | |||
1828 | /* Init rate shaping and fairness contexts */ | ||
1829 | bnx2x_init_port_minmax(bp); | ||
1830 | |||
1831 | /* vn_weight_sum and enable fairness if not 0 */ | ||
1832 | bnx2x_calc_vn_weight_sum(bp); | ||
1833 | |||
1834 | /* calculate and set min-max rate for each vn */ | ||
1835 | for (vn = VN_0; vn < E1HVN_MAX; vn++) | ||
1836 | bnx2x_init_vn_minmax(bp, vn); | ||
1837 | |||
1838 | /* always enable rate shaping and fairness */ | ||
1839 | bp->cmng.flags.cmng_enables |= | ||
1840 | CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN; | ||
1841 | if (!bp->vn_weight_sum) | ||
1842 | DP(NETIF_MSG_IFUP, "All MIN values are zeroes" | ||
1843 | " fairness will be disabled\n"); | ||
1844 | return; | ||
1845 | } | ||
1846 | |||
1847 | /* rate shaping and fairness are disabled */ | ||
1848 | DP(NETIF_MSG_IFUP, | ||
1849 | "rate shaping and fairness are disabled\n"); | ||
1850 | } | ||
1851 | |||
1852 | static inline void bnx2x_link_sync_notify(struct bnx2x *bp) | ||
1853 | { | ||
1854 | int port = BP_PORT(bp); | ||
1855 | int func; | ||
1856 | int vn; | ||
1514 | 1857 | ||
1858 | /* Set the attention towards other drivers on the same port */ | ||
1859 | for (vn = VN_0; vn < E1HVN_MAX; vn++) { | ||
1860 | if (vn == BP_E1HVN(bp)) | ||
1861 | continue; | ||
1862 | |||
1863 | func = ((vn << 1) | port); | ||
1864 | REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 + | ||
1865 | (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1); | ||
1866 | } | ||
1867 | } | ||
1515 | 1868 | ||
1516 | /* This function is called upon link interrupt */ | 1869 | /* This function is called upon link interrupt */ |
1517 | static void bnx2x_link_attn(struct bnx2x *bp) | 1870 | static void bnx2x_link_attn(struct bnx2x *bp) |
@@ -1669,6 +2022,308 @@ u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param) | |||
1669 | return rc; | 2022 | return rc; |
1670 | } | 2023 | } |
1671 | 2024 | ||
2025 | /* must be called under rtnl_lock */ | ||
2026 | void bnx2x_rxq_set_mac_filters(struct bnx2x *bp, u16 cl_id, u32 filters) | ||
2027 | { | ||
2028 | u32 mask = (1 << cl_id); | ||
2029 | |||
2030 | /* initial seeting is BNX2X_ACCEPT_NONE */ | ||
2031 | u8 drop_all_ucast = 1, drop_all_bcast = 1, drop_all_mcast = 1; | ||
2032 | u8 accp_all_ucast = 0, accp_all_bcast = 0, accp_all_mcast = 0; | ||
2033 | u8 unmatched_unicast = 0; | ||
2034 | |||
2035 | if (filters & BNX2X_PROMISCUOUS_MODE) { | ||
2036 | /* promiscious - accept all, drop none */ | ||
2037 | drop_all_ucast = drop_all_bcast = drop_all_mcast = 0; | ||
2038 | accp_all_ucast = accp_all_bcast = accp_all_mcast = 1; | ||
2039 | } | ||
2040 | if (filters & BNX2X_ACCEPT_UNICAST) { | ||
2041 | /* accept matched ucast */ | ||
2042 | drop_all_ucast = 0; | ||
2043 | } | ||
2044 | if (filters & BNX2X_ACCEPT_MULTICAST) { | ||
2045 | /* accept matched mcast */ | ||
2046 | drop_all_mcast = 0; | ||
2047 | } | ||
2048 | if (filters & BNX2X_ACCEPT_ALL_UNICAST) { | ||
2049 | /* accept all mcast */ | ||
2050 | drop_all_ucast = 0; | ||
2051 | accp_all_ucast = 1; | ||
2052 | } | ||
2053 | if (filters & BNX2X_ACCEPT_ALL_MULTICAST) { | ||
2054 | /* accept all mcast */ | ||
2055 | drop_all_mcast = 0; | ||
2056 | accp_all_mcast = 1; | ||
2057 | } | ||
2058 | if (filters & BNX2X_ACCEPT_BROADCAST) { | ||
2059 | /* accept (all) bcast */ | ||
2060 | drop_all_bcast = 0; | ||
2061 | accp_all_bcast = 1; | ||
2062 | } | ||
2063 | |||
2064 | bp->mac_filters.ucast_drop_all = drop_all_ucast ? | ||
2065 | bp->mac_filters.ucast_drop_all | mask : | ||
2066 | bp->mac_filters.ucast_drop_all & ~mask; | ||
2067 | |||
2068 | bp->mac_filters.mcast_drop_all = drop_all_mcast ? | ||
2069 | bp->mac_filters.mcast_drop_all | mask : | ||
2070 | bp->mac_filters.mcast_drop_all & ~mask; | ||
2071 | |||
2072 | bp->mac_filters.bcast_drop_all = drop_all_bcast ? | ||
2073 | bp->mac_filters.bcast_drop_all | mask : | ||
2074 | bp->mac_filters.bcast_drop_all & ~mask; | ||
2075 | |||
2076 | bp->mac_filters.ucast_accept_all = accp_all_ucast ? | ||
2077 | bp->mac_filters.ucast_accept_all | mask : | ||
2078 | bp->mac_filters.ucast_accept_all & ~mask; | ||
2079 | |||
2080 | bp->mac_filters.mcast_accept_all = accp_all_mcast ? | ||
2081 | bp->mac_filters.mcast_accept_all | mask : | ||
2082 | bp->mac_filters.mcast_accept_all & ~mask; | ||
2083 | |||
2084 | bp->mac_filters.bcast_accept_all = accp_all_bcast ? | ||
2085 | bp->mac_filters.bcast_accept_all | mask : | ||
2086 | bp->mac_filters.bcast_accept_all & ~mask; | ||
2087 | |||
2088 | bp->mac_filters.unmatched_unicast = unmatched_unicast ? | ||
2089 | bp->mac_filters.unmatched_unicast | mask : | ||
2090 | bp->mac_filters.unmatched_unicast & ~mask; | ||
2091 | } | ||
2092 | |||
2093 | void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p) | ||
2094 | { | ||
2095 | if (FUNC_CONFIG(p->func_flgs)) { | ||
2096 | struct tstorm_eth_function_common_config tcfg = {0}; | ||
2097 | |||
2098 | /* tpa */ | ||
2099 | if (p->func_flgs & FUNC_FLG_TPA) | ||
2100 | tcfg.config_flags |= | ||
2101 | TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA; | ||
2102 | |||
2103 | /* set rss flags */ | ||
2104 | if (p->func_flgs & FUNC_FLG_RSS) { | ||
2105 | u16 rss_flgs = (p->rss->mode << | ||
2106 | TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE_SHIFT); | ||
2107 | |||
2108 | if (p->rss->cap & RSS_IPV4_CAP) | ||
2109 | rss_flgs |= RSS_IPV4_CAP_MASK; | ||
2110 | if (p->rss->cap & RSS_IPV4_TCP_CAP) | ||
2111 | rss_flgs |= RSS_IPV4_TCP_CAP_MASK; | ||
2112 | if (p->rss->cap & RSS_IPV6_CAP) | ||
2113 | rss_flgs |= RSS_IPV6_CAP_MASK; | ||
2114 | if (p->rss->cap & RSS_IPV6_TCP_CAP) | ||
2115 | rss_flgs |= RSS_IPV6_TCP_CAP_MASK; | ||
2116 | |||
2117 | tcfg.config_flags |= rss_flgs; | ||
2118 | tcfg.rss_result_mask = p->rss->result_mask; | ||
2119 | |||
2120 | } | ||
2121 | |||
2122 | storm_memset_func_cfg(bp, &tcfg, p->func_id); | ||
2123 | } | ||
2124 | |||
2125 | /* Enable the function in the FW */ | ||
2126 | storm_memset_vf_to_pf(bp, p->func_id, p->pf_id); | ||
2127 | storm_memset_func_en(bp, p->func_id, 1); | ||
2128 | |||
2129 | /* statistics */ | ||
2130 | if (p->func_flgs & FUNC_FLG_STATS) { | ||
2131 | struct stats_indication_flags stats_flags = {0}; | ||
2132 | stats_flags.collect_eth = 1; | ||
2133 | |||
2134 | storm_memset_xstats_flags(bp, &stats_flags, p->func_id); | ||
2135 | storm_memset_xstats_addr(bp, p->fw_stat_map, p->func_id); | ||
2136 | |||
2137 | storm_memset_tstats_flags(bp, &stats_flags, p->func_id); | ||
2138 | storm_memset_tstats_addr(bp, p->fw_stat_map, p->func_id); | ||
2139 | |||
2140 | storm_memset_ustats_flags(bp, &stats_flags, p->func_id); | ||
2141 | storm_memset_ustats_addr(bp, p->fw_stat_map, p->func_id); | ||
2142 | |||
2143 | storm_memset_cstats_flags(bp, &stats_flags, p->func_id); | ||
2144 | storm_memset_cstats_addr(bp, p->fw_stat_map, p->func_id); | ||
2145 | } | ||
2146 | |||
2147 | /* spq */ | ||
2148 | if (p->func_flgs & FUNC_FLG_SPQ) { | ||
2149 | storm_memset_spq_addr(bp, p->spq_map, p->func_id); | ||
2150 | REG_WR(bp, XSEM_REG_FAST_MEMORY + | ||
2151 | XSTORM_SPQ_PROD_OFFSET(p->func_id), p->spq_prod); | ||
2152 | } | ||
2153 | } | ||
2154 | |||
2155 | static inline u16 bnx2x_get_cl_flags(struct bnx2x *bp, | ||
2156 | struct bnx2x_fastpath *fp) | ||
2157 | { | ||
2158 | u16 flags = 0; | ||
2159 | |||
2160 | /* calculate queue flags */ | ||
2161 | flags |= QUEUE_FLG_CACHE_ALIGN; | ||
2162 | flags |= QUEUE_FLG_HC; | ||
2163 | flags |= IS_E1HMF(bp) ? QUEUE_FLG_OV : 0; | ||
2164 | |||
2165 | #ifdef BCM_VLAN | ||
2166 | flags |= QUEUE_FLG_VLAN; | ||
2167 | DP(NETIF_MSG_IFUP, "vlan removal enabled\n"); | ||
2168 | #endif | ||
2169 | |||
2170 | if (!fp->disable_tpa) | ||
2171 | flags |= QUEUE_FLG_TPA; | ||
2172 | |||
2173 | flags |= QUEUE_FLG_STATS; | ||
2174 | |||
2175 | return flags; | ||
2176 | } | ||
2177 | |||
2178 | static void bnx2x_pf_rx_cl_prep(struct bnx2x *bp, | ||
2179 | struct bnx2x_fastpath *fp, struct rxq_pause_params *pause, | ||
2180 | struct bnx2x_rxq_init_params *rxq_init) | ||
2181 | { | ||
2182 | u16 max_sge = 0; | ||
2183 | u16 sge_sz = 0; | ||
2184 | u16 tpa_agg_size = 0; | ||
2185 | |||
2186 | /* calculate queue flags */ | ||
2187 | u16 flags = bnx2x_get_cl_flags(bp, fp); | ||
2188 | |||
2189 | if (!fp->disable_tpa) { | ||
2190 | pause->sge_th_hi = 250; | ||
2191 | pause->sge_th_lo = 150; | ||
2192 | tpa_agg_size = min_t(u32, | ||
2193 | (min_t(u32, 8, MAX_SKB_FRAGS) * | ||
2194 | SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff); | ||
2195 | max_sge = SGE_PAGE_ALIGN(bp->dev->mtu) >> | ||
2196 | SGE_PAGE_SHIFT; | ||
2197 | max_sge = ((max_sge + PAGES_PER_SGE - 1) & | ||
2198 | (~(PAGES_PER_SGE-1))) >> PAGES_PER_SGE_SHIFT; | ||
2199 | sge_sz = (u16)min_t(u32, SGE_PAGE_SIZE * PAGES_PER_SGE, | ||
2200 | 0xffff); | ||
2201 | } | ||
2202 | |||
2203 | /* pause - not for e1 */ | ||
2204 | if (!CHIP_IS_E1(bp)) { | ||
2205 | pause->bd_th_hi = 350; | ||
2206 | pause->bd_th_lo = 250; | ||
2207 | pause->rcq_th_hi = 350; | ||
2208 | pause->rcq_th_lo = 250; | ||
2209 | pause->sge_th_hi = 0; | ||
2210 | pause->sge_th_lo = 0; | ||
2211 | pause->pri_map = 1; | ||
2212 | } | ||
2213 | |||
2214 | /* rxq setup */ | ||
2215 | rxq_init->flags = flags; | ||
2216 | rxq_init->cxt = &bp->context.vcxt[fp->cid].eth; | ||
2217 | rxq_init->dscr_map = fp->rx_desc_mapping; | ||
2218 | rxq_init->sge_map = fp->rx_sge_mapping; | ||
2219 | rxq_init->rcq_map = fp->rx_comp_mapping; | ||
2220 | rxq_init->rcq_np_map = fp->rx_comp_mapping + BCM_PAGE_SIZE; | ||
2221 | rxq_init->mtu = bp->dev->mtu; | ||
2222 | rxq_init->buf_sz = bp->rx_buf_size; | ||
2223 | rxq_init->cl_qzone_id = fp->cl_qzone_id; | ||
2224 | rxq_init->cl_id = fp->cl_id; | ||
2225 | rxq_init->spcl_id = fp->cl_id; | ||
2226 | rxq_init->stat_id = fp->cl_id; | ||
2227 | rxq_init->tpa_agg_sz = tpa_agg_size; | ||
2228 | rxq_init->sge_buf_sz = sge_sz; | ||
2229 | rxq_init->max_sges_pkt = max_sge; | ||
2230 | rxq_init->cache_line_log = BNX2X_RX_ALIGN_SHIFT; | ||
2231 | rxq_init->fw_sb_id = fp->fw_sb_id; | ||
2232 | |||
2233 | rxq_init->sb_cq_index = U_SB_ETH_RX_CQ_INDEX; | ||
2234 | |||
2235 | rxq_init->cid = HW_CID(bp, fp->cid); | ||
2236 | |||
2237 | rxq_init->hc_rate = bp->rx_ticks ? (1000000 / bp->rx_ticks) : 0; | ||
2238 | } | ||
2239 | |||
2240 | static void bnx2x_pf_tx_cl_prep(struct bnx2x *bp, | ||
2241 | struct bnx2x_fastpath *fp, struct bnx2x_txq_init_params *txq_init) | ||
2242 | { | ||
2243 | u16 flags = bnx2x_get_cl_flags(bp, fp); | ||
2244 | |||
2245 | txq_init->flags = flags; | ||
2246 | txq_init->cxt = &bp->context.vcxt[fp->cid].eth; | ||
2247 | txq_init->dscr_map = fp->tx_desc_mapping; | ||
2248 | txq_init->stat_id = fp->cl_id; | ||
2249 | txq_init->cid = HW_CID(bp, fp->cid); | ||
2250 | txq_init->sb_cq_index = C_SB_ETH_TX_CQ_INDEX; | ||
2251 | txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW; | ||
2252 | txq_init->fw_sb_id = fp->fw_sb_id; | ||
2253 | txq_init->hc_rate = bp->tx_ticks ? (1000000 / bp->tx_ticks) : 0; | ||
2254 | } | ||
2255 | |||
2256 | void bnx2x_pf_init(struct bnx2x *bp) | ||
2257 | { | ||
2258 | struct bnx2x_func_init_params func_init = {0}; | ||
2259 | struct bnx2x_rss_params rss = {0}; | ||
2260 | struct event_ring_data eq_data = { {0} }; | ||
2261 | u16 flags; | ||
2262 | |||
2263 | /* pf specific setups */ | ||
2264 | if (!CHIP_IS_E1(bp)) | ||
2265 | storm_memset_ov(bp, bp->e1hov, BP_FUNC(bp)); | ||
2266 | |||
2267 | /* function setup flags */ | ||
2268 | flags = (FUNC_FLG_STATS | FUNC_FLG_LEADING | FUNC_FLG_SPQ); | ||
2269 | |||
2270 | flags |= (bp->flags & TPA_ENABLE_FLAG) ? FUNC_FLG_TPA : 0; | ||
2271 | |||
2272 | /** | ||
2273 | * Although RSS is meaningless when there is a single HW queue we | ||
2274 | * still need it enabled in order to have HW Rx hash generated. | ||
2275 | * | ||
2276 | * if (is_eth_multi(bp)) | ||
2277 | * flags |= FUNC_FLG_RSS; | ||
2278 | */ | ||
2279 | |||
2280 | /* function setup */ | ||
2281 | if (flags & FUNC_FLG_RSS) { | ||
2282 | rss.cap = (RSS_IPV4_CAP | RSS_IPV4_TCP_CAP | | ||
2283 | RSS_IPV6_CAP | RSS_IPV6_TCP_CAP); | ||
2284 | rss.mode = bp->multi_mode; | ||
2285 | rss.result_mask = MULTI_MASK; | ||
2286 | func_init.rss = &rss; | ||
2287 | } | ||
2288 | |||
2289 | func_init.func_flgs = flags; | ||
2290 | func_init.pf_id = BP_FUNC(bp); | ||
2291 | func_init.func_id = BP_FUNC(bp); | ||
2292 | func_init.fw_stat_map = bnx2x_sp_mapping(bp, fw_stats); | ||
2293 | func_init.spq_map = bp->spq_mapping; | ||
2294 | func_init.spq_prod = bp->spq_prod_idx; | ||
2295 | |||
2296 | bnx2x_func_init(bp, &func_init); | ||
2297 | |||
2298 | memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port)); | ||
2299 | |||
2300 | /* | ||
2301 | Congestion management values depend on the link rate | ||
2302 | There is no active link so initial link rate is set to 10 Gbps. | ||
2303 | When the link comes up The congestion management values are | ||
2304 | re-calculated according to the actual link rate. | ||
2305 | */ | ||
2306 | bp->link_vars.line_speed = SPEED_10000; | ||
2307 | bnx2x_cmng_fns_init(bp, true, bnx2x_get_cmng_fns_mode(bp)); | ||
2308 | |||
2309 | /* Only the PMF sets the HW */ | ||
2310 | if (bp->port.pmf) | ||
2311 | storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp)); | ||
2312 | |||
2313 | /* no rx until link is up */ | ||
2314 | bp->rx_mode = BNX2X_RX_MODE_NONE; | ||
2315 | bnx2x_set_storm_rx_mode(bp); | ||
2316 | |||
2317 | /* init Event Queue */ | ||
2318 | eq_data.base_addr.hi = U64_HI(bp->eq_mapping); | ||
2319 | eq_data.base_addr.lo = U64_LO(bp->eq_mapping); | ||
2320 | eq_data.producer = bp->eq_prod; | ||
2321 | eq_data.index_id = HC_SP_INDEX_EQ_CONS; | ||
2322 | eq_data.sb_id = DEF_SB_ID; | ||
2323 | storm_memset_eq_data(bp, &eq_data, BP_FUNC(bp)); | ||
2324 | } | ||
2325 | |||
2326 | |||
1672 | static void bnx2x_e1h_disable(struct bnx2x *bp) | 2327 | static void bnx2x_e1h_disable(struct bnx2x *bp) |
1673 | { | 2328 | { |
1674 | int port = BP_PORT(bp); | 2329 | int port = BP_PORT(bp); |
@@ -1695,40 +2350,6 @@ static void bnx2x_e1h_enable(struct bnx2x *bp) | |||
1695 | */ | 2350 | */ |
1696 | } | 2351 | } |
1697 | 2352 | ||
1698 | static void bnx2x_update_min_max(struct bnx2x *bp) | ||
1699 | { | ||
1700 | int port = BP_PORT(bp); | ||
1701 | int vn, i; | ||
1702 | |||
1703 | /* Init rate shaping and fairness contexts */ | ||
1704 | bnx2x_init_port_minmax(bp); | ||
1705 | |||
1706 | bnx2x_calc_vn_weight_sum(bp); | ||
1707 | |||
1708 | for (vn = VN_0; vn < E1HVN_MAX; vn++) | ||
1709 | bnx2x_init_vn_minmax(bp, 2*vn + port); | ||
1710 | |||
1711 | if (bp->port.pmf) { | ||
1712 | int func; | ||
1713 | |||
1714 | /* Set the attention towards other drivers on the same port */ | ||
1715 | for (vn = VN_0; vn < E1HVN_MAX; vn++) { | ||
1716 | if (vn == BP_E1HVN(bp)) | ||
1717 | continue; | ||
1718 | |||
1719 | func = ((vn << 1) | port); | ||
1720 | REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 + | ||
1721 | (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1); | ||
1722 | } | ||
1723 | |||
1724 | /* Store it to internal memory */ | ||
1725 | for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++) | ||
1726 | REG_WR(bp, BAR_XSTRORM_INTMEM + | ||
1727 | XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4, | ||
1728 | ((u32 *)(&bp->cmng))[i]); | ||
1729 | } | ||
1730 | } | ||
1731 | |||
1732 | static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event) | 2353 | static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event) |
1733 | { | 2354 | { |
1734 | DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event); | 2355 | DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event); |
@@ -1755,7 +2376,9 @@ static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event) | |||
1755 | } | 2376 | } |
1756 | if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) { | 2377 | if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) { |
1757 | 2378 | ||
1758 | bnx2x_update_min_max(bp); | 2379 | bnx2x_cmng_fns_init(bp, true, CMNG_FNS_MINMAX); |
2380 | bnx2x_link_sync_notify(bp); | ||
2381 | storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp)); | ||
1759 | dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION; | 2382 | dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION; |
1760 | } | 2383 | } |
1761 | 2384 | ||
@@ -1790,7 +2413,7 @@ static inline void bnx2x_sp_prod_update(struct bnx2x *bp) | |||
1790 | /* Make sure that BD data is updated before writing the producer */ | 2413 | /* Make sure that BD data is updated before writing the producer */ |
1791 | wmb(); | 2414 | wmb(); |
1792 | 2415 | ||
1793 | REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func), | 2416 | REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func), |
1794 | bp->spq_prod_idx); | 2417 | bp->spq_prod_idx); |
1795 | mmiowb(); | 2418 | mmiowb(); |
1796 | } | 2419 | } |
@@ -1800,6 +2423,7 @@ int bnx2x_sp_post(struct bnx2x *bp, int command, int cid, | |||
1800 | u32 data_hi, u32 data_lo, int common) | 2423 | u32 data_hi, u32 data_lo, int common) |
1801 | { | 2424 | { |
1802 | struct eth_spe *spe; | 2425 | struct eth_spe *spe; |
2426 | u16 type; | ||
1803 | 2427 | ||
1804 | #ifdef BNX2X_STOP_ON_ERROR | 2428 | #ifdef BNX2X_STOP_ON_ERROR |
1805 | if (unlikely(bp->panic)) | 2429 | if (unlikely(bp->panic)) |
@@ -1821,22 +2445,42 @@ int bnx2x_sp_post(struct bnx2x *bp, int command, int cid, | |||
1821 | spe->hdr.conn_and_cmd_data = | 2445 | spe->hdr.conn_and_cmd_data = |
1822 | cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) | | 2446 | cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) | |
1823 | HW_CID(bp, cid)); | 2447 | HW_CID(bp, cid)); |
1824 | spe->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE); | 2448 | |
1825 | if (common) | 2449 | if (common) |
1826 | spe->hdr.type |= | 2450 | /* Common ramrods: |
1827 | cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT)); | 2451 | * FUNC_START, FUNC_STOP, CFC_DEL, STATS, SET_MAC |
2452 | * TRAFFIC_STOP, TRAFFIC_START | ||
2453 | */ | ||
2454 | type = (NONE_CONNECTION_TYPE << SPE_HDR_CONN_TYPE_SHIFT) | ||
2455 | & SPE_HDR_CONN_TYPE; | ||
2456 | else | ||
2457 | /* ETH ramrods: SETUP, HALT */ | ||
2458 | type = (ETH_CONNECTION_TYPE << SPE_HDR_CONN_TYPE_SHIFT) | ||
2459 | & SPE_HDR_CONN_TYPE; | ||
2460 | |||
2461 | type |= ((BP_FUNC(bp) << SPE_HDR_FUNCTION_ID_SHIFT) & | ||
2462 | SPE_HDR_FUNCTION_ID); | ||
1828 | 2463 | ||
1829 | spe->data.mac_config_addr.hi = cpu_to_le32(data_hi); | 2464 | spe->hdr.type = cpu_to_le16(type); |
1830 | spe->data.mac_config_addr.lo = cpu_to_le32(data_lo); | ||
1831 | 2465 | ||
1832 | bp->spq_left--; | 2466 | spe->data.update_data_addr.hi = cpu_to_le32(data_hi); |
2467 | spe->data.update_data_addr.lo = cpu_to_le32(data_lo); | ||
2468 | |||
2469 | /* stats ramrod has it's own slot on the spq */ | ||
2470 | if (command != RAMROD_CMD_ID_COMMON_STAT_QUERY) | ||
2471 | /* It's ok if the actual decrement is issued towards the memory | ||
2472 | * somewhere between the spin_lock and spin_unlock. Thus no | ||
2473 | * more explict memory barrier is needed. | ||
2474 | */ | ||
2475 | bp->spq_left--; | ||
1833 | 2476 | ||
1834 | DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/, | 2477 | DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/, |
1835 | "SPQE[%x] (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n", | 2478 | "SPQE[%x] (%x:%x) command %d hw_cid %x data (%x:%x) " |
2479 | "type(0x%x) left %x\n", | ||
1836 | bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping), | 2480 | bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping), |
1837 | (u32)(U64_LO(bp->spq_mapping) + | 2481 | (u32)(U64_LO(bp->spq_mapping) + |
1838 | (void *)bp->spq_prod_bd - (void *)bp->spq), command, | 2482 | (void *)bp->spq_prod_bd - (void *)bp->spq), command, |
1839 | HW_CID(bp, cid), data_hi, data_lo, bp->spq_left); | 2483 | HW_CID(bp, cid), data_hi, data_lo, type, bp->spq_left); |
1840 | 2484 | ||
1841 | bnx2x_sp_prod_update(bp); | 2485 | bnx2x_sp_prod_update(bp); |
1842 | spin_unlock_bh(&bp->spq_lock); | 2486 | spin_unlock_bh(&bp->spq_lock); |
@@ -1873,32 +2517,27 @@ static void bnx2x_release_alr(struct bnx2x *bp) | |||
1873 | REG_WR(bp, GRCBASE_MCP + 0x9c, 0); | 2517 | REG_WR(bp, GRCBASE_MCP + 0x9c, 0); |
1874 | } | 2518 | } |
1875 | 2519 | ||
2520 | #define BNX2X_DEF_SB_ATT_IDX 0x0001 | ||
2521 | #define BNX2X_DEF_SB_IDX 0x0002 | ||
2522 | |||
1876 | static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp) | 2523 | static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp) |
1877 | { | 2524 | { |
1878 | struct host_def_status_block *def_sb = bp->def_status_blk; | 2525 | struct host_sp_status_block *def_sb = bp->def_status_blk; |
1879 | u16 rc = 0; | 2526 | u16 rc = 0; |
1880 | 2527 | ||
1881 | barrier(); /* status block is written to by the chip */ | 2528 | barrier(); /* status block is written to by the chip */ |
1882 | if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) { | 2529 | if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) { |
1883 | bp->def_att_idx = def_sb->atten_status_block.attn_bits_index; | 2530 | bp->def_att_idx = def_sb->atten_status_block.attn_bits_index; |
1884 | rc |= 1; | 2531 | rc |= BNX2X_DEF_SB_ATT_IDX; |
1885 | } | ||
1886 | if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) { | ||
1887 | bp->def_c_idx = def_sb->c_def_status_block.status_block_index; | ||
1888 | rc |= 2; | ||
1889 | } | 2532 | } |
1890 | if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) { | 2533 | |
1891 | bp->def_u_idx = def_sb->u_def_status_block.status_block_index; | 2534 | if (bp->def_idx != def_sb->sp_sb.running_index) { |
1892 | rc |= 4; | 2535 | bp->def_idx = def_sb->sp_sb.running_index; |
1893 | } | 2536 | rc |= BNX2X_DEF_SB_IDX; |
1894 | if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) { | ||
1895 | bp->def_x_idx = def_sb->x_def_status_block.status_block_index; | ||
1896 | rc |= 8; | ||
1897 | } | ||
1898 | if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) { | ||
1899 | bp->def_t_idx = def_sb->t_def_status_block.status_block_index; | ||
1900 | rc |= 16; | ||
1901 | } | 2537 | } |
2538 | |||
2539 | /* Do not reorder: indecies reading should complete before handling */ | ||
2540 | barrier(); | ||
1902 | return rc; | 2541 | return rc; |
1903 | } | 2542 | } |
1904 | 2543 | ||
@@ -2144,8 +2783,8 @@ static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn) | |||
2144 | int func = BP_FUNC(bp); | 2783 | int func = BP_FUNC(bp); |
2145 | 2784 | ||
2146 | REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0); | 2785 | REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0); |
2147 | bp->mf_config = SHMEM_RD(bp, | 2786 | bp->mf_config = |
2148 | mf_cfg.func_mf_config[func].config); | 2787 | MF_CFG_RD(bp, func_mf_config[func].config); |
2149 | val = SHMEM_RD(bp, func_mb[func].drv_status); | 2788 | val = SHMEM_RD(bp, func_mb[func].drv_status); |
2150 | if (val & DRV_STATUS_DCC_EVENT_MASK) | 2789 | if (val & DRV_STATUS_DCC_EVENT_MASK) |
2151 | bnx2x_dcc_event(bp, | 2790 | bnx2x_dcc_event(bp, |
@@ -2598,6 +3237,140 @@ static void bnx2x_attn_int(struct bnx2x *bp) | |||
2598 | bnx2x_attn_int_deasserted(bp, deasserted); | 3237 | bnx2x_attn_int_deasserted(bp, deasserted); |
2599 | } | 3238 | } |
2600 | 3239 | ||
3240 | static inline void bnx2x_update_eq_prod(struct bnx2x *bp, u16 prod) | ||
3241 | { | ||
3242 | /* No memory barriers */ | ||
3243 | storm_memset_eq_prod(bp, prod, BP_FUNC(bp)); | ||
3244 | mmiowb(); /* keep prod updates ordered */ | ||
3245 | } | ||
3246 | |||
3247 | #ifdef BCM_CNIC | ||
3248 | static int bnx2x_cnic_handle_cfc_del(struct bnx2x *bp, u32 cid, | ||
3249 | union event_ring_elem *elem) | ||
3250 | { | ||
3251 | if (!bp->cnic_eth_dev.starting_cid || | ||
3252 | cid < bp->cnic_eth_dev.starting_cid) | ||
3253 | return 1; | ||
3254 | |||
3255 | DP(BNX2X_MSG_SP, "got delete ramrod for CNIC CID %d\n", cid); | ||
3256 | |||
3257 | if (unlikely(elem->message.data.cfc_del_event.error)) { | ||
3258 | BNX2X_ERR("got delete ramrod for CNIC CID %d with error!\n", | ||
3259 | cid); | ||
3260 | bnx2x_panic_dump(bp); | ||
3261 | } | ||
3262 | bnx2x_cnic_cfc_comp(bp, cid); | ||
3263 | return 0; | ||
3264 | } | ||
3265 | #endif | ||
3266 | |||
3267 | static void bnx2x_eq_int(struct bnx2x *bp) | ||
3268 | { | ||
3269 | u16 hw_cons, sw_cons, sw_prod; | ||
3270 | union event_ring_elem *elem; | ||
3271 | u32 cid; | ||
3272 | u8 opcode; | ||
3273 | int spqe_cnt = 0; | ||
3274 | |||
3275 | hw_cons = le16_to_cpu(*bp->eq_cons_sb); | ||
3276 | |||
3277 | /* The hw_cos range is 1-255, 257 - the sw_cons range is 0-254, 256. | ||
3278 | * when we get the the next-page we nned to adjust so the loop | ||
3279 | * condition below will be met. The next element is the size of a | ||
3280 | * regular element and hence incrementing by 1 | ||
3281 | */ | ||
3282 | if ((hw_cons & EQ_DESC_MAX_PAGE) == EQ_DESC_MAX_PAGE) | ||
3283 | hw_cons++; | ||
3284 | |||
3285 | /* This function may never run in parralel with itself for a | ||
3286 | * specific bp, thus there is no need in "paired" read memory | ||
3287 | * barrier here. | ||
3288 | */ | ||
3289 | sw_cons = bp->eq_cons; | ||
3290 | sw_prod = bp->eq_prod; | ||
3291 | |||
3292 | DP(BNX2X_MSG_SP, "EQ: hw_cons %u sw_cons %u bp->spq_left %u\n", | ||
3293 | hw_cons, sw_cons, bp->spq_left); | ||
3294 | |||
3295 | for (; sw_cons != hw_cons; | ||
3296 | sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) { | ||
3297 | |||
3298 | |||
3299 | elem = &bp->eq_ring[EQ_DESC(sw_cons)]; | ||
3300 | |||
3301 | cid = SW_CID(elem->message.data.cfc_del_event.cid); | ||
3302 | opcode = elem->message.opcode; | ||
3303 | |||
3304 | |||
3305 | /* handle eq element */ | ||
3306 | switch (opcode) { | ||
3307 | case EVENT_RING_OPCODE_STAT_QUERY: | ||
3308 | DP(NETIF_MSG_TIMER, "got statistics comp event\n"); | ||
3309 | /* nothing to do with stats comp */ | ||
3310 | continue; | ||
3311 | |||
3312 | case EVENT_RING_OPCODE_CFC_DEL: | ||
3313 | /* handle according to cid range */ | ||
3314 | /* | ||
3315 | * we may want to verify here that the bp state is | ||
3316 | * HALTING | ||
3317 | */ | ||
3318 | DP(NETIF_MSG_IFDOWN, | ||
3319 | "got delete ramrod for MULTI[%d]\n", cid); | ||
3320 | #ifdef BCM_CNIC | ||
3321 | if (!bnx2x_cnic_handle_cfc_del(bp, cid, elem)) | ||
3322 | goto next_spqe; | ||
3323 | #endif | ||
3324 | bnx2x_fp(bp, cid, state) = | ||
3325 | BNX2X_FP_STATE_CLOSED; | ||
3326 | |||
3327 | goto next_spqe; | ||
3328 | } | ||
3329 | |||
3330 | switch (opcode | bp->state) { | ||
3331 | case (EVENT_RING_OPCODE_FUNCTION_START | | ||
3332 | BNX2X_STATE_OPENING_WAIT4_PORT): | ||
3333 | DP(NETIF_MSG_IFUP, "got setup ramrod\n"); | ||
3334 | bp->state = BNX2X_STATE_FUNC_STARTED; | ||
3335 | break; | ||
3336 | |||
3337 | case (EVENT_RING_OPCODE_FUNCTION_STOP | | ||
3338 | BNX2X_STATE_CLOSING_WAIT4_HALT): | ||
3339 | DP(NETIF_MSG_IFDOWN, "got halt ramrod\n"); | ||
3340 | bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD; | ||
3341 | break; | ||
3342 | |||
3343 | case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_OPEN): | ||
3344 | case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_DIAG): | ||
3345 | DP(NETIF_MSG_IFUP, "got set mac ramrod\n"); | ||
3346 | bp->set_mac_pending = 0; | ||
3347 | break; | ||
3348 | |||
3349 | case (EVENT_RING_OPCODE_SET_MAC | | ||
3350 | BNX2X_STATE_CLOSING_WAIT4_HALT): | ||
3351 | DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n"); | ||
3352 | bp->set_mac_pending = 0; | ||
3353 | break; | ||
3354 | default: | ||
3355 | /* unknown event log error and continue */ | ||
3356 | BNX2X_ERR("Unknown EQ event %d\n", | ||
3357 | elem->message.opcode); | ||
3358 | } | ||
3359 | next_spqe: | ||
3360 | spqe_cnt++; | ||
3361 | } /* for */ | ||
3362 | |||
3363 | bp->spq_left++; | ||
3364 | |||
3365 | bp->eq_cons = sw_cons; | ||
3366 | bp->eq_prod = sw_prod; | ||
3367 | /* Make sure that above mem writes were issued towards the memory */ | ||
3368 | smp_wmb(); | ||
3369 | |||
3370 | /* update producer */ | ||
3371 | bnx2x_update_eq_prod(bp, bp->eq_prod); | ||
3372 | } | ||
3373 | |||
2601 | static void bnx2x_sp_task(struct work_struct *work) | 3374 | static void bnx2x_sp_task(struct work_struct *work) |
2602 | { | 3375 | { |
2603 | struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work); | 3376 | struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work); |
@@ -2616,31 +3389,29 @@ static void bnx2x_sp_task(struct work_struct *work) | |||
2616 | DP(NETIF_MSG_INTR, "got a slowpath interrupt (status 0x%x)\n", status); | 3389 | DP(NETIF_MSG_INTR, "got a slowpath interrupt (status 0x%x)\n", status); |
2617 | 3390 | ||
2618 | /* HW attentions */ | 3391 | /* HW attentions */ |
2619 | if (status & 0x1) { | 3392 | if (status & BNX2X_DEF_SB_ATT_IDX) { |
2620 | bnx2x_attn_int(bp); | 3393 | bnx2x_attn_int(bp); |
2621 | status &= ~0x1; | 3394 | status &= ~BNX2X_DEF_SB_ATT_IDX; |
2622 | } | 3395 | } |
2623 | 3396 | ||
2624 | /* CStorm events: STAT_QUERY */ | 3397 | /* SP events: STAT_QUERY and others */ |
2625 | if (status & 0x2) { | 3398 | if (status & BNX2X_DEF_SB_IDX) { |
2626 | DP(BNX2X_MSG_SP, "CStorm events: STAT_QUERY\n"); | 3399 | |
2627 | status &= ~0x2; | 3400 | /* Handle EQ completions */ |
3401 | bnx2x_eq_int(bp); | ||
3402 | |||
3403 | bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, | ||
3404 | le16_to_cpu(bp->def_idx), IGU_INT_NOP, 1); | ||
3405 | |||
3406 | status &= ~BNX2X_DEF_SB_IDX; | ||
2628 | } | 3407 | } |
2629 | 3408 | ||
2630 | if (unlikely(status)) | 3409 | if (unlikely(status)) |
2631 | DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n", | 3410 | DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n", |
2632 | status); | 3411 | status); |
2633 | 3412 | ||
2634 | bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx), | 3413 | bnx2x_ack_sb(bp, bp->igu_dsb_id, ATTENTION_ID, |
2635 | IGU_INT_NOP, 1); | 3414 | le16_to_cpu(bp->def_att_idx), IGU_INT_ENABLE, 1); |
2636 | bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx), | ||
2637 | IGU_INT_NOP, 1); | ||
2638 | bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx), | ||
2639 | IGU_INT_NOP, 1); | ||
2640 | bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx), | ||
2641 | IGU_INT_NOP, 1); | ||
2642 | bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx), | ||
2643 | IGU_INT_ENABLE, 1); | ||
2644 | } | 3415 | } |
2645 | 3416 | ||
2646 | irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance) | 3417 | irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance) |
@@ -2654,7 +3425,8 @@ irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance) | |||
2654 | return IRQ_HANDLED; | 3425 | return IRQ_HANDLED; |
2655 | } | 3426 | } |
2656 | 3427 | ||
2657 | bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0); | 3428 | bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0, |
3429 | IGU_INT_DISABLE, 0); | ||
2658 | 3430 | ||
2659 | #ifdef BNX2X_STOP_ON_ERROR | 3431 | #ifdef BNX2X_STOP_ON_ERROR |
2660 | if (unlikely(bp->panic)) | 3432 | if (unlikely(bp->panic)) |
@@ -2736,232 +3508,234 @@ timer_restart: | |||
2736 | * nic init service functions | 3508 | * nic init service functions |
2737 | */ | 3509 | */ |
2738 | 3510 | ||
2739 | static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id) | 3511 | static inline void bnx2x_fill(struct bnx2x *bp, u32 addr, int fill, u32 len) |
2740 | { | 3512 | { |
2741 | int port = BP_PORT(bp); | 3513 | u32 i; |
3514 | if (!(len%4) && !(addr%4)) | ||
3515 | for (i = 0; i < len; i += 4) | ||
3516 | REG_WR(bp, addr + i, fill); | ||
3517 | else | ||
3518 | for (i = 0; i < len; i++) | ||
3519 | REG_WR8(bp, addr + i, fill); | ||
2742 | 3520 | ||
2743 | /* "CSTORM" */ | ||
2744 | bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY + | ||
2745 | CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), 0, | ||
2746 | CSTORM_SB_STATUS_BLOCK_U_SIZE / 4); | ||
2747 | bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY + | ||
2748 | CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), 0, | ||
2749 | CSTORM_SB_STATUS_BLOCK_C_SIZE / 4); | ||
2750 | } | 3521 | } |
2751 | 3522 | ||
2752 | void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb, | 3523 | /* helper: writes FP SP data to FW - data_size in dwords */ |
2753 | dma_addr_t mapping, int sb_id) | 3524 | static inline void bnx2x_wr_fp_sb_data(struct bnx2x *bp, |
3525 | int fw_sb_id, | ||
3526 | u32 *sb_data_p, | ||
3527 | u32 data_size) | ||
2754 | { | 3528 | { |
2755 | int port = BP_PORT(bp); | ||
2756 | int func = BP_FUNC(bp); | ||
2757 | int index; | 3529 | int index; |
2758 | u64 section; | 3530 | for (index = 0; index < data_size; index++) |
3531 | REG_WR(bp, BAR_CSTRORM_INTMEM + | ||
3532 | CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) + | ||
3533 | sizeof(u32)*index, | ||
3534 | *(sb_data_p + index)); | ||
3535 | } | ||
2759 | 3536 | ||
2760 | /* USTORM */ | 3537 | static inline void bnx2x_zero_fp_sb(struct bnx2x *bp, int fw_sb_id) |
2761 | section = ((u64)mapping) + offsetof(struct host_status_block, | 3538 | { |
2762 | u_status_block); | 3539 | u32 *sb_data_p; |
2763 | sb->u_status_block.status_block_id = sb_id; | 3540 | u32 data_size = 0; |
2764 | 3541 | struct hc_status_block_data_e1x sb_data_e1x; | |
2765 | REG_WR(bp, BAR_CSTRORM_INTMEM + | ||
2766 | CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id), U64_LO(section)); | ||
2767 | REG_WR(bp, BAR_CSTRORM_INTMEM + | ||
2768 | ((CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id)) + 4), | ||
2769 | U64_HI(section)); | ||
2770 | REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_USB_FUNC_OFF + | ||
2771 | CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), func); | ||
2772 | |||
2773 | for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++) | ||
2774 | REG_WR16(bp, BAR_CSTRORM_INTMEM + | ||
2775 | CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id, index), 1); | ||
2776 | 3542 | ||
2777 | /* CSTORM */ | 3543 | /* disable the function first */ |
2778 | section = ((u64)mapping) + offsetof(struct host_status_block, | 3544 | memset(&sb_data_e1x, 0, |
2779 | c_status_block); | 3545 | sizeof(struct hc_status_block_data_e1x)); |
2780 | sb->c_status_block.status_block_id = sb_id; | 3546 | sb_data_e1x.common.p_func.pf_id = HC_FUNCTION_DISABLED; |
3547 | sb_data_e1x.common.p_func.vf_id = HC_FUNCTION_DISABLED; | ||
3548 | sb_data_e1x.common.p_func.vf_valid = false; | ||
3549 | sb_data_p = (u32 *)&sb_data_e1x; | ||
3550 | data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32); | ||
2781 | 3551 | ||
2782 | REG_WR(bp, BAR_CSTRORM_INTMEM + | 3552 | bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size); |
2783 | CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id), U64_LO(section)); | ||
2784 | REG_WR(bp, BAR_CSTRORM_INTMEM + | ||
2785 | ((CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id)) + 4), | ||
2786 | U64_HI(section)); | ||
2787 | REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF + | ||
2788 | CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), func); | ||
2789 | 3553 | ||
2790 | for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++) | 3554 | bnx2x_fill(bp, BAR_CSTRORM_INTMEM + |
2791 | REG_WR16(bp, BAR_CSTRORM_INTMEM + | 3555 | CSTORM_STATUS_BLOCK_OFFSET(fw_sb_id), 0, |
2792 | CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id, index), 1); | 3556 | CSTORM_STATUS_BLOCK_SIZE); |
3557 | bnx2x_fill(bp, BAR_CSTRORM_INTMEM + | ||
3558 | CSTORM_SYNC_BLOCK_OFFSET(fw_sb_id), 0, | ||
3559 | CSTORM_SYNC_BLOCK_SIZE); | ||
3560 | } | ||
2793 | 3561 | ||
2794 | bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0); | 3562 | /* helper: writes SP SB data to FW */ |
3563 | static inline void bnx2x_wr_sp_sb_data(struct bnx2x *bp, | ||
3564 | struct hc_sp_status_block_data *sp_sb_data) | ||
3565 | { | ||
3566 | int func = BP_FUNC(bp); | ||
3567 | int i; | ||
3568 | for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++) | ||
3569 | REG_WR(bp, BAR_CSTRORM_INTMEM + | ||
3570 | CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) + | ||
3571 | i*sizeof(u32), | ||
3572 | *((u32 *)sp_sb_data + i)); | ||
2795 | } | 3573 | } |
2796 | 3574 | ||
2797 | static void bnx2x_zero_def_sb(struct bnx2x *bp) | 3575 | static inline void bnx2x_zero_sp_sb(struct bnx2x *bp) |
2798 | { | 3576 | { |
2799 | int func = BP_FUNC(bp); | 3577 | int func = BP_FUNC(bp); |
3578 | struct hc_sp_status_block_data sp_sb_data; | ||
3579 | memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data)); | ||
3580 | |||
3581 | sp_sb_data.p_func.pf_id = HC_FUNCTION_DISABLED; | ||
3582 | sp_sb_data.p_func.vf_id = HC_FUNCTION_DISABLED; | ||
3583 | sp_sb_data.p_func.vf_valid = false; | ||
3584 | |||
3585 | bnx2x_wr_sp_sb_data(bp, &sp_sb_data); | ||
3586 | |||
3587 | bnx2x_fill(bp, BAR_CSTRORM_INTMEM + | ||
3588 | CSTORM_SP_STATUS_BLOCK_OFFSET(func), 0, | ||
3589 | CSTORM_SP_STATUS_BLOCK_SIZE); | ||
3590 | bnx2x_fill(bp, BAR_CSTRORM_INTMEM + | ||
3591 | CSTORM_SP_SYNC_BLOCK_OFFSET(func), 0, | ||
3592 | CSTORM_SP_SYNC_BLOCK_SIZE); | ||
3593 | |||
3594 | } | ||
3595 | |||
3596 | |||
3597 | static inline | ||
3598 | void bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm, | ||
3599 | int igu_sb_id, int igu_seg_id) | ||
3600 | { | ||
3601 | hc_sm->igu_sb_id = igu_sb_id; | ||
3602 | hc_sm->igu_seg_id = igu_seg_id; | ||
3603 | hc_sm->timer_value = 0xFF; | ||
3604 | hc_sm->time_to_expire = 0xFFFFFFFF; | ||
3605 | } | ||
3606 | |||
3607 | void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid, | ||
3608 | u8 vf_valid, int fw_sb_id, int igu_sb_id) | ||
3609 | { | ||
3610 | int igu_seg_id; | ||
3611 | |||
3612 | struct hc_status_block_data_e1x sb_data_e1x; | ||
3613 | struct hc_status_block_sm *hc_sm_p; | ||
3614 | struct hc_index_data *hc_index_p; | ||
3615 | int data_size; | ||
3616 | u32 *sb_data_p; | ||
3617 | |||
3618 | igu_seg_id = HC_SEG_ACCESS_NORM; | ||
3619 | |||
3620 | bnx2x_zero_fp_sb(bp, fw_sb_id); | ||
3621 | |||
3622 | memset(&sb_data_e1x, 0, | ||
3623 | sizeof(struct hc_status_block_data_e1x)); | ||
3624 | sb_data_e1x.common.p_func.pf_id = BP_FUNC(bp); | ||
3625 | sb_data_e1x.common.p_func.vf_id = 0xff; | ||
3626 | sb_data_e1x.common.p_func.vf_valid = false; | ||
3627 | sb_data_e1x.common.p_func.vnic_id = BP_E1HVN(bp); | ||
3628 | sb_data_e1x.common.same_igu_sb_1b = true; | ||
3629 | sb_data_e1x.common.host_sb_addr.hi = U64_HI(mapping); | ||
3630 | sb_data_e1x.common.host_sb_addr.lo = U64_LO(mapping); | ||
3631 | hc_sm_p = sb_data_e1x.common.state_machine; | ||
3632 | hc_index_p = sb_data_e1x.index_data; | ||
3633 | sb_data_p = (u32 *)&sb_data_e1x; | ||
3634 | data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32); | ||
3635 | |||
3636 | |||
3637 | bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID], | ||
3638 | igu_sb_id, igu_seg_id); | ||
3639 | bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_TX_ID], | ||
3640 | igu_sb_id, igu_seg_id); | ||
3641 | |||
3642 | DP(NETIF_MSG_HW, "Init FW SB %d\n", fw_sb_id); | ||
3643 | |||
3644 | /* write indecies to HW */ | ||
3645 | bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size); | ||
3646 | } | ||
3647 | |||
3648 | static void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u16 fw_sb_id, | ||
3649 | u8 sb_index, u8 disable, u16 usec) | ||
3650 | { | ||
3651 | int port = BP_PORT(bp); | ||
3652 | u8 ticks = usec / BNX2X_BTR; | ||
2800 | 3653 | ||
2801 | bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY + | 3654 | storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks); |
2802 | TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0, | 3655 | |
2803 | sizeof(struct tstorm_def_status_block)/4); | 3656 | disable = disable ? 1 : (usec ? 0 : 1); |
2804 | bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY + | 3657 | storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable); |
2805 | CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), 0, | ||
2806 | sizeof(struct cstorm_def_status_block_u)/4); | ||
2807 | bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY + | ||
2808 | CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), 0, | ||
2809 | sizeof(struct cstorm_def_status_block_c)/4); | ||
2810 | bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY + | ||
2811 | XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0, | ||
2812 | sizeof(struct xstorm_def_status_block)/4); | ||
2813 | } | 3658 | } |
2814 | 3659 | ||
2815 | static void bnx2x_init_def_sb(struct bnx2x *bp, | 3660 | static void bnx2x_update_coalesce_sb(struct bnx2x *bp, u16 fw_sb_id, |
2816 | struct host_def_status_block *def_sb, | 3661 | u16 tx_usec, u16 rx_usec) |
2817 | dma_addr_t mapping, int sb_id) | ||
2818 | { | 3662 | { |
3663 | bnx2x_update_coalesce_sb_index(bp, fw_sb_id, U_SB_ETH_RX_CQ_INDEX, | ||
3664 | false, rx_usec); | ||
3665 | bnx2x_update_coalesce_sb_index(bp, fw_sb_id, C_SB_ETH_TX_CQ_INDEX, | ||
3666 | false, tx_usec); | ||
3667 | } | ||
3668 | static void bnx2x_init_def_sb(struct bnx2x *bp) | ||
3669 | { | ||
3670 | struct host_sp_status_block *def_sb = bp->def_status_blk; | ||
3671 | dma_addr_t mapping = bp->def_status_blk_mapping; | ||
3672 | int igu_sp_sb_index; | ||
3673 | int igu_seg_id; | ||
2819 | int port = BP_PORT(bp); | 3674 | int port = BP_PORT(bp); |
2820 | int func = BP_FUNC(bp); | 3675 | int func = BP_FUNC(bp); |
2821 | int index, val, reg_offset; | 3676 | int reg_offset; |
2822 | u64 section; | 3677 | u64 section; |
3678 | int index; | ||
3679 | struct hc_sp_status_block_data sp_sb_data; | ||
3680 | memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data)); | ||
3681 | |||
3682 | igu_sp_sb_index = DEF_SB_IGU_ID; | ||
3683 | igu_seg_id = HC_SEG_ACCESS_DEF; | ||
2823 | 3684 | ||
2824 | /* ATTN */ | 3685 | /* ATTN */ |
2825 | section = ((u64)mapping) + offsetof(struct host_def_status_block, | 3686 | section = ((u64)mapping) + offsetof(struct host_sp_status_block, |
2826 | atten_status_block); | 3687 | atten_status_block); |
2827 | def_sb->atten_status_block.status_block_id = sb_id; | 3688 | def_sb->atten_status_block.status_block_id = igu_sp_sb_index; |
2828 | 3689 | ||
2829 | bp->attn_state = 0; | 3690 | bp->attn_state = 0; |
2830 | 3691 | ||
2831 | reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : | 3692 | reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : |
2832 | MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0); | 3693 | MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0); |
2833 | |||
2834 | for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) { | 3694 | for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) { |
2835 | bp->attn_group[index].sig[0] = REG_RD(bp, | 3695 | int sindex; |
2836 | reg_offset + 0x10*index); | 3696 | /* take care of sig[0]..sig[4] */ |
2837 | bp->attn_group[index].sig[1] = REG_RD(bp, | 3697 | for (sindex = 0; sindex < 4; sindex++) |
2838 | reg_offset + 0x4 + 0x10*index); | 3698 | bp->attn_group[index].sig[sindex] = |
2839 | bp->attn_group[index].sig[2] = REG_RD(bp, | 3699 | REG_RD(bp, reg_offset + sindex*0x4 + 0x10*index); |
2840 | reg_offset + 0x8 + 0x10*index); | ||
2841 | bp->attn_group[index].sig[3] = REG_RD(bp, | ||
2842 | reg_offset + 0xc + 0x10*index); | ||
2843 | } | 3700 | } |
2844 | 3701 | ||
2845 | reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L : | 3702 | reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L : |
2846 | HC_REG_ATTN_MSG0_ADDR_L); | 3703 | HC_REG_ATTN_MSG0_ADDR_L); |
2847 | |||
2848 | REG_WR(bp, reg_offset, U64_LO(section)); | 3704 | REG_WR(bp, reg_offset, U64_LO(section)); |
2849 | REG_WR(bp, reg_offset + 4, U64_HI(section)); | 3705 | REG_WR(bp, reg_offset + 4, U64_HI(section)); |
2850 | 3706 | ||
2851 | reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0); | 3707 | section = ((u64)mapping) + offsetof(struct host_sp_status_block, |
2852 | 3708 | sp_sb); | |
2853 | val = REG_RD(bp, reg_offset); | ||
2854 | val |= sb_id; | ||
2855 | REG_WR(bp, reg_offset, val); | ||
2856 | |||
2857 | /* USTORM */ | ||
2858 | section = ((u64)mapping) + offsetof(struct host_def_status_block, | ||
2859 | u_def_status_block); | ||
2860 | def_sb->u_def_status_block.status_block_id = sb_id; | ||
2861 | |||
2862 | REG_WR(bp, BAR_CSTRORM_INTMEM + | ||
2863 | CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func), U64_LO(section)); | ||
2864 | REG_WR(bp, BAR_CSTRORM_INTMEM + | ||
2865 | ((CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func)) + 4), | ||
2866 | U64_HI(section)); | ||
2867 | REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_USB_FUNC_OFF + | ||
2868 | CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), func); | ||
2869 | |||
2870 | for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++) | ||
2871 | REG_WR16(bp, BAR_CSTRORM_INTMEM + | ||
2872 | CSTORM_DEF_SB_HC_DISABLE_U_OFFSET(func, index), 1); | ||
2873 | 3709 | ||
2874 | /* CSTORM */ | 3710 | bnx2x_zero_sp_sb(bp); |
2875 | section = ((u64)mapping) + offsetof(struct host_def_status_block, | ||
2876 | c_def_status_block); | ||
2877 | def_sb->c_def_status_block.status_block_id = sb_id; | ||
2878 | |||
2879 | REG_WR(bp, BAR_CSTRORM_INTMEM + | ||
2880 | CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func), U64_LO(section)); | ||
2881 | REG_WR(bp, BAR_CSTRORM_INTMEM + | ||
2882 | ((CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func)) + 4), | ||
2883 | U64_HI(section)); | ||
2884 | REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF + | ||
2885 | CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), func); | ||
2886 | |||
2887 | for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++) | ||
2888 | REG_WR16(bp, BAR_CSTRORM_INTMEM + | ||
2889 | CSTORM_DEF_SB_HC_DISABLE_C_OFFSET(func, index), 1); | ||
2890 | 3711 | ||
2891 | /* TSTORM */ | 3712 | sp_sb_data.host_sb_addr.lo = U64_LO(section); |
2892 | section = ((u64)mapping) + offsetof(struct host_def_status_block, | 3713 | sp_sb_data.host_sb_addr.hi = U64_HI(section); |
2893 | t_def_status_block); | 3714 | sp_sb_data.igu_sb_id = igu_sp_sb_index; |
2894 | def_sb->t_def_status_block.status_block_id = sb_id; | 3715 | sp_sb_data.igu_seg_id = igu_seg_id; |
2895 | 3716 | sp_sb_data.p_func.pf_id = func; | |
2896 | REG_WR(bp, BAR_TSTRORM_INTMEM + | 3717 | sp_sb_data.p_func.vnic_id = BP_E1HVN(bp); |
2897 | TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section)); | 3718 | sp_sb_data.p_func.vf_id = 0xff; |
2898 | REG_WR(bp, BAR_TSTRORM_INTMEM + | ||
2899 | ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4), | ||
2900 | U64_HI(section)); | ||
2901 | REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF + | ||
2902 | TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func); | ||
2903 | |||
2904 | for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++) | ||
2905 | REG_WR16(bp, BAR_TSTRORM_INTMEM + | ||
2906 | TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1); | ||
2907 | 3719 | ||
2908 | /* XSTORM */ | 3720 | bnx2x_wr_sp_sb_data(bp, &sp_sb_data); |
2909 | section = ((u64)mapping) + offsetof(struct host_def_status_block, | ||
2910 | x_def_status_block); | ||
2911 | def_sb->x_def_status_block.status_block_id = sb_id; | ||
2912 | |||
2913 | REG_WR(bp, BAR_XSTRORM_INTMEM + | ||
2914 | XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section)); | ||
2915 | REG_WR(bp, BAR_XSTRORM_INTMEM + | ||
2916 | ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4), | ||
2917 | U64_HI(section)); | ||
2918 | REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF + | ||
2919 | XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func); | ||
2920 | |||
2921 | for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++) | ||
2922 | REG_WR16(bp, BAR_XSTRORM_INTMEM + | ||
2923 | XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1); | ||
2924 | 3721 | ||
2925 | bp->stats_pending = 0; | 3722 | bp->stats_pending = 0; |
2926 | bp->set_mac_pending = 0; | 3723 | bp->set_mac_pending = 0; |
2927 | 3724 | ||
2928 | bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0); | 3725 | bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0); |
2929 | } | 3726 | } |
2930 | 3727 | ||
2931 | void bnx2x_update_coalesce(struct bnx2x *bp) | 3728 | void bnx2x_update_coalesce(struct bnx2x *bp) |
2932 | { | 3729 | { |
2933 | int port = BP_PORT(bp); | ||
2934 | int i; | 3730 | int i; |
2935 | 3731 | ||
2936 | for_each_queue(bp, i) { | 3732 | for_each_queue(bp, i) |
2937 | int sb_id = bp->fp[i].sb_id; | 3733 | bnx2x_update_coalesce_sb(bp, bp->fp[i].fw_sb_id, |
2938 | 3734 | bp->rx_ticks, bp->tx_ticks); | |
2939 | /* HC_INDEX_U_ETH_RX_CQ_CONS */ | ||
2940 | REG_WR8(bp, BAR_CSTRORM_INTMEM + | ||
2941 | CSTORM_SB_HC_TIMEOUT_U_OFFSET(port, sb_id, | ||
2942 | U_SB_ETH_RX_CQ_INDEX), | ||
2943 | bp->rx_ticks/(4 * BNX2X_BTR)); | ||
2944 | REG_WR16(bp, BAR_CSTRORM_INTMEM + | ||
2945 | CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id, | ||
2946 | U_SB_ETH_RX_CQ_INDEX), | ||
2947 | (bp->rx_ticks/(4 * BNX2X_BTR)) ? 0 : 1); | ||
2948 | |||
2949 | /* HC_INDEX_C_ETH_TX_CQ_CONS */ | ||
2950 | REG_WR8(bp, BAR_CSTRORM_INTMEM + | ||
2951 | CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, sb_id, | ||
2952 | C_SB_ETH_TX_CQ_INDEX), | ||
2953 | bp->tx_ticks/(4 * BNX2X_BTR)); | ||
2954 | REG_WR16(bp, BAR_CSTRORM_INTMEM + | ||
2955 | CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id, | ||
2956 | C_SB_ETH_TX_CQ_INDEX), | ||
2957 | (bp->tx_ticks/(4 * BNX2X_BTR)) ? 0 : 1); | ||
2958 | } | ||
2959 | } | 3735 | } |
2960 | 3736 | ||
2961 | static void bnx2x_init_sp_ring(struct bnx2x *bp) | 3737 | static void bnx2x_init_sp_ring(struct bnx2x *bp) |
2962 | { | 3738 | { |
2963 | int func = BP_FUNC(bp); | ||
2964 | |||
2965 | spin_lock_init(&bp->spq_lock); | 3739 | spin_lock_init(&bp->spq_lock); |
2966 | 3740 | ||
2967 | bp->spq_left = MAX_SPQ_PENDING; | 3741 | bp->spq_left = MAX_SPQ_PENDING; |
@@ -2969,91 +3743,25 @@ static void bnx2x_init_sp_ring(struct bnx2x *bp) | |||
2969 | bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX; | 3743 | bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX; |
2970 | bp->spq_prod_bd = bp->spq; | 3744 | bp->spq_prod_bd = bp->spq; |
2971 | bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT; | 3745 | bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT; |
2972 | |||
2973 | REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func), | ||
2974 | U64_LO(bp->spq_mapping)); | ||
2975 | REG_WR(bp, | ||
2976 | XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4, | ||
2977 | U64_HI(bp->spq_mapping)); | ||
2978 | |||
2979 | REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func), | ||
2980 | bp->spq_prod_idx); | ||
2981 | } | 3746 | } |
2982 | 3747 | ||
2983 | static void bnx2x_init_context(struct bnx2x *bp) | 3748 | static void bnx2x_init_eq_ring(struct bnx2x *bp) |
2984 | { | 3749 | { |
2985 | int i; | 3750 | int i; |
3751 | for (i = 1; i <= NUM_EQ_PAGES; i++) { | ||
3752 | union event_ring_elem *elem = | ||
3753 | &bp->eq_ring[EQ_DESC_CNT_PAGE * i - 1]; | ||
2986 | 3754 | ||
2987 | /* Rx */ | 3755 | elem->next_page.addr.hi = |
2988 | for_each_queue(bp, i) { | 3756 | cpu_to_le32(U64_HI(bp->eq_mapping + |
2989 | struct eth_context *context = bnx2x_sp(bp, context[i].eth); | 3757 | BCM_PAGE_SIZE * (i % NUM_EQ_PAGES))); |
2990 | struct bnx2x_fastpath *fp = &bp->fp[i]; | 3758 | elem->next_page.addr.lo = |
2991 | u8 cl_id = fp->cl_id; | 3759 | cpu_to_le32(U64_LO(bp->eq_mapping + |
2992 | 3760 | BCM_PAGE_SIZE*(i % NUM_EQ_PAGES))); | |
2993 | context->ustorm_st_context.common.sb_index_numbers = | ||
2994 | BNX2X_RX_SB_INDEX_NUM; | ||
2995 | context->ustorm_st_context.common.clientId = cl_id; | ||
2996 | context->ustorm_st_context.common.status_block_id = fp->sb_id; | ||
2997 | context->ustorm_st_context.common.flags = | ||
2998 | (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT | | ||
2999 | USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS); | ||
3000 | context->ustorm_st_context.common.statistics_counter_id = | ||
3001 | cl_id; | ||
3002 | context->ustorm_st_context.common.mc_alignment_log_size = | ||
3003 | BNX2X_RX_ALIGN_SHIFT; | ||
3004 | context->ustorm_st_context.common.bd_buff_size = | ||
3005 | bp->rx_buf_size; | ||
3006 | context->ustorm_st_context.common.bd_page_base_hi = | ||
3007 | U64_HI(fp->rx_desc_mapping); | ||
3008 | context->ustorm_st_context.common.bd_page_base_lo = | ||
3009 | U64_LO(fp->rx_desc_mapping); | ||
3010 | if (!fp->disable_tpa) { | ||
3011 | context->ustorm_st_context.common.flags |= | ||
3012 | USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA; | ||
3013 | context->ustorm_st_context.common.sge_buff_size = | ||
3014 | (u16)min_t(u32, SGE_PAGE_SIZE*PAGES_PER_SGE, | ||
3015 | 0xffff); | ||
3016 | context->ustorm_st_context.common.sge_page_base_hi = | ||
3017 | U64_HI(fp->rx_sge_mapping); | ||
3018 | context->ustorm_st_context.common.sge_page_base_lo = | ||
3019 | U64_LO(fp->rx_sge_mapping); | ||
3020 | |||
3021 | context->ustorm_st_context.common.max_sges_for_packet = | ||
3022 | SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT; | ||
3023 | context->ustorm_st_context.common.max_sges_for_packet = | ||
3024 | ((context->ustorm_st_context.common. | ||
3025 | max_sges_for_packet + PAGES_PER_SGE - 1) & | ||
3026 | (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT; | ||
3027 | } | ||
3028 | |||
3029 | context->ustorm_ag_context.cdu_usage = | ||
3030 | CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i), | ||
3031 | CDU_REGION_NUMBER_UCM_AG, | ||
3032 | ETH_CONNECTION_TYPE); | ||
3033 | |||
3034 | context->xstorm_ag_context.cdu_reserved = | ||
3035 | CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i), | ||
3036 | CDU_REGION_NUMBER_XCM_AG, | ||
3037 | ETH_CONNECTION_TYPE); | ||
3038 | } | ||
3039 | |||
3040 | /* Tx */ | ||
3041 | for_each_queue(bp, i) { | ||
3042 | struct bnx2x_fastpath *fp = &bp->fp[i]; | ||
3043 | struct eth_context *context = | ||
3044 | bnx2x_sp(bp, context[i].eth); | ||
3045 | |||
3046 | context->cstorm_st_context.sb_index_number = | ||
3047 | C_SB_ETH_TX_CQ_INDEX; | ||
3048 | context->cstorm_st_context.status_block_id = fp->sb_id; | ||
3049 | |||
3050 | context->xstorm_st_context.tx_bd_page_base_hi = | ||
3051 | U64_HI(fp->tx_desc_mapping); | ||
3052 | context->xstorm_st_context.tx_bd_page_base_lo = | ||
3053 | U64_LO(fp->tx_desc_mapping); | ||
3054 | context->xstorm_st_context.statistics_data = (fp->cl_id | | ||
3055 | XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE); | ||
3056 | } | 3761 | } |
3762 | bp->eq_cons = 0; | ||
3763 | bp->eq_prod = NUM_EQ_DESC; | ||
3764 | bp->eq_cons_sb = BNX2X_EQ_INDEX; | ||
3057 | } | 3765 | } |
3058 | 3766 | ||
3059 | static void bnx2x_init_ind_table(struct bnx2x *bp) | 3767 | static void bnx2x_init_ind_table(struct bnx2x *bp) |
@@ -3072,47 +3780,11 @@ static void bnx2x_init_ind_table(struct bnx2x *bp) | |||
3072 | bp->fp->cl_id + (i % bp->num_queues)); | 3780 | bp->fp->cl_id + (i % bp->num_queues)); |
3073 | } | 3781 | } |
3074 | 3782 | ||
3075 | void bnx2x_set_client_config(struct bnx2x *bp) | ||
3076 | { | ||
3077 | struct tstorm_eth_client_config tstorm_client = {0}; | ||
3078 | int port = BP_PORT(bp); | ||
3079 | int i; | ||
3080 | |||
3081 | tstorm_client.mtu = bp->dev->mtu; | ||
3082 | tstorm_client.config_flags = | ||
3083 | (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE | | ||
3084 | TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE); | ||
3085 | #ifdef BCM_VLAN | ||
3086 | if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) { | ||
3087 | tstorm_client.config_flags |= | ||
3088 | TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE; | ||
3089 | DP(NETIF_MSG_IFUP, "vlan removal enabled\n"); | ||
3090 | } | ||
3091 | #endif | ||
3092 | |||
3093 | for_each_queue(bp, i) { | ||
3094 | tstorm_client.statistics_counter_id = bp->fp[i].cl_id; | ||
3095 | |||
3096 | REG_WR(bp, BAR_TSTRORM_INTMEM + | ||
3097 | TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id), | ||
3098 | ((u32 *)&tstorm_client)[0]); | ||
3099 | REG_WR(bp, BAR_TSTRORM_INTMEM + | ||
3100 | TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4, | ||
3101 | ((u32 *)&tstorm_client)[1]); | ||
3102 | } | ||
3103 | |||
3104 | DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n", | ||
3105 | ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]); | ||
3106 | } | ||
3107 | |||
3108 | void bnx2x_set_storm_rx_mode(struct bnx2x *bp) | 3783 | void bnx2x_set_storm_rx_mode(struct bnx2x *bp) |
3109 | { | 3784 | { |
3110 | struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0}; | ||
3111 | int mode = bp->rx_mode; | 3785 | int mode = bp->rx_mode; |
3112 | int mask = bp->rx_mode_cl_mask; | 3786 | u16 cl_id; |
3113 | int func = BP_FUNC(bp); | 3787 | |
3114 | int port = BP_PORT(bp); | ||
3115 | int i; | ||
3116 | /* All but management unicast packets should pass to the host as well */ | 3788 | /* All but management unicast packets should pass to the host as well */ |
3117 | u32 llh_mask = | 3789 | u32 llh_mask = |
3118 | NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST | | 3790 | NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST | |
@@ -3120,28 +3792,32 @@ void bnx2x_set_storm_rx_mode(struct bnx2x *bp) | |||
3120 | NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN | | 3792 | NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN | |
3121 | NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN; | 3793 | NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN; |
3122 | 3794 | ||
3123 | DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask); | ||
3124 | |||
3125 | switch (mode) { | 3795 | switch (mode) { |
3126 | case BNX2X_RX_MODE_NONE: /* no Rx */ | 3796 | case BNX2X_RX_MODE_NONE: /* no Rx */ |
3127 | tstorm_mac_filter.ucast_drop_all = mask; | 3797 | cl_id = BP_L_ID(bp); |
3128 | tstorm_mac_filter.mcast_drop_all = mask; | 3798 | bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_NONE); |
3129 | tstorm_mac_filter.bcast_drop_all = mask; | ||
3130 | break; | 3799 | break; |
3131 | 3800 | ||
3132 | case BNX2X_RX_MODE_NORMAL: | 3801 | case BNX2X_RX_MODE_NORMAL: |
3133 | tstorm_mac_filter.bcast_accept_all = mask; | 3802 | cl_id = BP_L_ID(bp); |
3803 | bnx2x_rxq_set_mac_filters(bp, cl_id, | ||
3804 | BNX2X_ACCEPT_UNICAST | | ||
3805 | BNX2X_ACCEPT_BROADCAST | | ||
3806 | BNX2X_ACCEPT_MULTICAST); | ||
3134 | break; | 3807 | break; |
3135 | 3808 | ||
3136 | case BNX2X_RX_MODE_ALLMULTI: | 3809 | case BNX2X_RX_MODE_ALLMULTI: |
3137 | tstorm_mac_filter.mcast_accept_all = mask; | 3810 | cl_id = BP_L_ID(bp); |
3138 | tstorm_mac_filter.bcast_accept_all = mask; | 3811 | bnx2x_rxq_set_mac_filters(bp, cl_id, |
3812 | BNX2X_ACCEPT_UNICAST | | ||
3813 | BNX2X_ACCEPT_BROADCAST | | ||
3814 | BNX2X_ACCEPT_ALL_MULTICAST); | ||
3139 | break; | 3815 | break; |
3140 | 3816 | ||
3141 | case BNX2X_RX_MODE_PROMISC: | 3817 | case BNX2X_RX_MODE_PROMISC: |
3142 | tstorm_mac_filter.ucast_accept_all = mask; | 3818 | cl_id = BP_L_ID(bp); |
3143 | tstorm_mac_filter.mcast_accept_all = mask; | 3819 | bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_PROMISCUOUS_MODE); |
3144 | tstorm_mac_filter.bcast_accept_all = mask; | 3820 | |
3145 | /* pass management unicast packets as well */ | 3821 | /* pass management unicast packets as well */ |
3146 | llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST; | 3822 | llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST; |
3147 | break; | 3823 | break; |
@@ -3152,256 +3828,52 @@ void bnx2x_set_storm_rx_mode(struct bnx2x *bp) | |||
3152 | } | 3828 | } |
3153 | 3829 | ||
3154 | REG_WR(bp, | 3830 | REG_WR(bp, |
3155 | (port ? NIG_REG_LLH1_BRB1_DRV_MASK : NIG_REG_LLH0_BRB1_DRV_MASK), | 3831 | BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK : |
3832 | NIG_REG_LLH0_BRB1_DRV_MASK, | ||
3156 | llh_mask); | 3833 | llh_mask); |
3157 | 3834 | ||
3158 | for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) { | 3835 | DP(NETIF_MSG_IFUP, "rx mode %d\n" |
3159 | REG_WR(bp, BAR_TSTRORM_INTMEM + | 3836 | "drop_ucast 0x%x\ndrop_mcast 0x%x\ndrop_bcast 0x%x\n" |
3160 | TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4, | 3837 | "accp_ucast 0x%x\naccp_mcast 0x%x\naccp_bcast 0x%x\n", mode, |
3161 | ((u32 *)&tstorm_mac_filter)[i]); | 3838 | bp->mac_filters.ucast_drop_all, |
3839 | bp->mac_filters.mcast_drop_all, | ||
3840 | bp->mac_filters.bcast_drop_all, | ||
3841 | bp->mac_filters.ucast_accept_all, | ||
3842 | bp->mac_filters.mcast_accept_all, | ||
3843 | bp->mac_filters.bcast_accept_all | ||
3844 | ); | ||
3162 | 3845 | ||
3163 | /* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i, | 3846 | storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp)); |
3164 | ((u32 *)&tstorm_mac_filter)[i]); */ | ||
3165 | } | ||
3166 | |||
3167 | if (mode != BNX2X_RX_MODE_NONE) | ||
3168 | bnx2x_set_client_config(bp); | ||
3169 | } | 3847 | } |
3170 | 3848 | ||
3171 | static void bnx2x_init_internal_common(struct bnx2x *bp) | 3849 | static void bnx2x_init_internal_common(struct bnx2x *bp) |
3172 | { | 3850 | { |
3173 | int i; | 3851 | int i; |
3174 | 3852 | ||
3175 | /* Zero this manually as its initialization is | 3853 | if (!CHIP_IS_E1(bp)) { |
3176 | currently missing in the initTool */ | ||
3177 | for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++) | ||
3178 | REG_WR(bp, BAR_USTRORM_INTMEM + | ||
3179 | USTORM_AGG_DATA_OFFSET + i * 4, 0); | ||
3180 | } | ||
3181 | |||
3182 | static void bnx2x_init_internal_port(struct bnx2x *bp) | ||
3183 | { | ||
3184 | int port = BP_PORT(bp); | ||
3185 | |||
3186 | REG_WR(bp, | ||
3187 | BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_U_OFFSET(port), BNX2X_BTR); | ||
3188 | REG_WR(bp, | ||
3189 | BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_C_OFFSET(port), BNX2X_BTR); | ||
3190 | REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR); | ||
3191 | REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR); | ||
3192 | } | ||
3193 | |||
3194 | static void bnx2x_init_internal_func(struct bnx2x *bp) | ||
3195 | { | ||
3196 | struct tstorm_eth_function_common_config tstorm_config = {0}; | ||
3197 | struct stats_indication_flags stats_flags = {0}; | ||
3198 | int port = BP_PORT(bp); | ||
3199 | int func = BP_FUNC(bp); | ||
3200 | int i, j; | ||
3201 | u32 offset; | ||
3202 | u16 max_agg_size; | ||
3203 | |||
3204 | tstorm_config.config_flags = RSS_FLAGS(bp); | ||
3205 | |||
3206 | if (is_multi(bp)) | ||
3207 | tstorm_config.rss_result_mask = MULTI_MASK; | ||
3208 | |||
3209 | /* Enable TPA if needed */ | ||
3210 | if (bp->flags & TPA_ENABLE_FLAG) | ||
3211 | tstorm_config.config_flags |= | ||
3212 | TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA; | ||
3213 | |||
3214 | if (IS_E1HMF(bp)) | ||
3215 | tstorm_config.config_flags |= | ||
3216 | TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM; | ||
3217 | |||
3218 | tstorm_config.leading_client_id = BP_L_ID(bp); | ||
3219 | |||
3220 | REG_WR(bp, BAR_TSTRORM_INTMEM + | ||
3221 | TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func), | ||
3222 | (*(u32 *)&tstorm_config)); | ||
3223 | |||
3224 | bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */ | ||
3225 | bp->rx_mode_cl_mask = (1 << BP_L_ID(bp)); | ||
3226 | bnx2x_set_storm_rx_mode(bp); | ||
3227 | |||
3228 | for_each_queue(bp, i) { | ||
3229 | u8 cl_id = bp->fp[i].cl_id; | ||
3230 | |||
3231 | /* reset xstorm per client statistics */ | ||
3232 | offset = BAR_XSTRORM_INTMEM + | ||
3233 | XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id); | ||
3234 | for (j = 0; | ||
3235 | j < sizeof(struct xstorm_per_client_stats) / 4; j++) | ||
3236 | REG_WR(bp, offset + j*4, 0); | ||
3237 | |||
3238 | /* reset tstorm per client statistics */ | ||
3239 | offset = BAR_TSTRORM_INTMEM + | ||
3240 | TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id); | ||
3241 | for (j = 0; | ||
3242 | j < sizeof(struct tstorm_per_client_stats) / 4; j++) | ||
3243 | REG_WR(bp, offset + j*4, 0); | ||
3244 | |||
3245 | /* reset ustorm per client statistics */ | ||
3246 | offset = BAR_USTRORM_INTMEM + | ||
3247 | USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id); | ||
3248 | for (j = 0; | ||
3249 | j < sizeof(struct ustorm_per_client_stats) / 4; j++) | ||
3250 | REG_WR(bp, offset + j*4, 0); | ||
3251 | } | ||
3252 | |||
3253 | /* Init statistics related context */ | ||
3254 | stats_flags.collect_eth = 1; | ||
3255 | |||
3256 | REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func), | ||
3257 | ((u32 *)&stats_flags)[0]); | ||
3258 | REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4, | ||
3259 | ((u32 *)&stats_flags)[1]); | ||
3260 | |||
3261 | REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func), | ||
3262 | ((u32 *)&stats_flags)[0]); | ||
3263 | REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4, | ||
3264 | ((u32 *)&stats_flags)[1]); | ||
3265 | |||
3266 | REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func), | ||
3267 | ((u32 *)&stats_flags)[0]); | ||
3268 | REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4, | ||
3269 | ((u32 *)&stats_flags)[1]); | ||
3270 | |||
3271 | REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func), | ||
3272 | ((u32 *)&stats_flags)[0]); | ||
3273 | REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4, | ||
3274 | ((u32 *)&stats_flags)[1]); | ||
3275 | |||
3276 | REG_WR(bp, BAR_XSTRORM_INTMEM + | ||
3277 | XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func), | ||
3278 | U64_LO(bnx2x_sp_mapping(bp, fw_stats))); | ||
3279 | REG_WR(bp, BAR_XSTRORM_INTMEM + | ||
3280 | XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4, | ||
3281 | U64_HI(bnx2x_sp_mapping(bp, fw_stats))); | ||
3282 | |||
3283 | REG_WR(bp, BAR_TSTRORM_INTMEM + | ||
3284 | TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func), | ||
3285 | U64_LO(bnx2x_sp_mapping(bp, fw_stats))); | ||
3286 | REG_WR(bp, BAR_TSTRORM_INTMEM + | ||
3287 | TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4, | ||
3288 | U64_HI(bnx2x_sp_mapping(bp, fw_stats))); | ||
3289 | |||
3290 | REG_WR(bp, BAR_USTRORM_INTMEM + | ||
3291 | USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func), | ||
3292 | U64_LO(bnx2x_sp_mapping(bp, fw_stats))); | ||
3293 | REG_WR(bp, BAR_USTRORM_INTMEM + | ||
3294 | USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4, | ||
3295 | U64_HI(bnx2x_sp_mapping(bp, fw_stats))); | ||
3296 | 3854 | ||
3297 | if (CHIP_IS_E1H(bp)) { | 3855 | /* xstorm needs to know whether to add ovlan to packets or not, |
3856 | * in switch-independent we'll write 0 to here... */ | ||
3298 | REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET, | 3857 | REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET, |
3299 | IS_E1HMF(bp)); | 3858 | bp->e1hmf); |
3300 | REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET, | 3859 | REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET, |
3301 | IS_E1HMF(bp)); | 3860 | bp->e1hmf); |
3302 | REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET, | 3861 | REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET, |
3303 | IS_E1HMF(bp)); | 3862 | bp->e1hmf); |
3304 | REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET, | 3863 | REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET, |
3305 | IS_E1HMF(bp)); | 3864 | bp->e1hmf); |
3306 | |||
3307 | REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func), | ||
3308 | bp->e1hov); | ||
3309 | } | 3865 | } |
3310 | 3866 | ||
3311 | /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */ | 3867 | /* Zero this manually as its initialization is |
3312 | max_agg_size = min_t(u32, (min_t(u32, 8, MAX_SKB_FRAGS) * | 3868 | currently missing in the initTool */ |
3313 | SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff); | 3869 | for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++) |
3314 | for_each_queue(bp, i) { | ||
3315 | struct bnx2x_fastpath *fp = &bp->fp[i]; | ||
3316 | |||
3317 | REG_WR(bp, BAR_USTRORM_INTMEM + | ||
3318 | USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id), | ||
3319 | U64_LO(fp->rx_comp_mapping)); | ||
3320 | REG_WR(bp, BAR_USTRORM_INTMEM + | ||
3321 | USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4, | ||
3322 | U64_HI(fp->rx_comp_mapping)); | ||
3323 | |||
3324 | /* Next page */ | ||
3325 | REG_WR(bp, BAR_USTRORM_INTMEM + | ||
3326 | USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id), | ||
3327 | U64_LO(fp->rx_comp_mapping + BCM_PAGE_SIZE)); | ||
3328 | REG_WR(bp, BAR_USTRORM_INTMEM + | 3870 | REG_WR(bp, BAR_USTRORM_INTMEM + |
3329 | USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id) + 4, | 3871 | USTORM_AGG_DATA_OFFSET + i * 4, 0); |
3330 | U64_HI(fp->rx_comp_mapping + BCM_PAGE_SIZE)); | 3872 | } |
3331 | |||
3332 | REG_WR16(bp, BAR_USTRORM_INTMEM + | ||
3333 | USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id), | ||
3334 | max_agg_size); | ||
3335 | } | ||
3336 | |||
3337 | /* dropless flow control */ | ||
3338 | if (CHIP_IS_E1H(bp)) { | ||
3339 | struct ustorm_eth_rx_pause_data_e1h rx_pause = {0}; | ||
3340 | |||
3341 | rx_pause.bd_thr_low = 250; | ||
3342 | rx_pause.cqe_thr_low = 250; | ||
3343 | rx_pause.cos = 1; | ||
3344 | rx_pause.sge_thr_low = 0; | ||
3345 | rx_pause.bd_thr_high = 350; | ||
3346 | rx_pause.cqe_thr_high = 350; | ||
3347 | rx_pause.sge_thr_high = 0; | ||
3348 | |||
3349 | for_each_queue(bp, i) { | ||
3350 | struct bnx2x_fastpath *fp = &bp->fp[i]; | ||
3351 | |||
3352 | if (!fp->disable_tpa) { | ||
3353 | rx_pause.sge_thr_low = 150; | ||
3354 | rx_pause.sge_thr_high = 250; | ||
3355 | } | ||
3356 | |||
3357 | |||
3358 | offset = BAR_USTRORM_INTMEM + | ||
3359 | USTORM_ETH_RING_PAUSE_DATA_OFFSET(port, | ||
3360 | fp->cl_id); | ||
3361 | for (j = 0; | ||
3362 | j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4; | ||
3363 | j++) | ||
3364 | REG_WR(bp, offset + j*4, | ||
3365 | ((u32 *)&rx_pause)[j]); | ||
3366 | } | ||
3367 | } | ||
3368 | |||
3369 | memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port)); | ||
3370 | |||
3371 | /* Init rate shaping and fairness contexts */ | ||
3372 | if (IS_E1HMF(bp)) { | ||
3373 | int vn; | ||
3374 | |||
3375 | /* During init there is no active link | ||
3376 | Until link is up, set link rate to 10Gbps */ | ||
3377 | bp->link_vars.line_speed = SPEED_10000; | ||
3378 | bnx2x_init_port_minmax(bp); | ||
3379 | |||
3380 | if (!BP_NOMCP(bp)) | ||
3381 | bp->mf_config = | ||
3382 | SHMEM_RD(bp, mf_cfg.func_mf_config[func].config); | ||
3383 | bnx2x_calc_vn_weight_sum(bp); | ||
3384 | |||
3385 | for (vn = VN_0; vn < E1HVN_MAX; vn++) | ||
3386 | bnx2x_init_vn_minmax(bp, 2*vn + port); | ||
3387 | |||
3388 | /* Enable rate shaping and fairness */ | ||
3389 | bp->cmng.flags.cmng_enables |= | ||
3390 | CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN; | ||
3391 | |||
3392 | } else { | ||
3393 | /* rate shaping and fairness are disabled */ | ||
3394 | DP(NETIF_MSG_IFUP, | ||
3395 | "single function mode minmax will be disabled\n"); | ||
3396 | } | ||
3397 | |||
3398 | 3873 | ||
3399 | /* Store cmng structures to internal memory */ | 3874 | static void bnx2x_init_internal_port(struct bnx2x *bp) |
3400 | if (bp->port.pmf) | 3875 | { |
3401 | for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++) | 3876 | /* port */ |
3402 | REG_WR(bp, BAR_XSTRORM_INTMEM + | ||
3403 | XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4, | ||
3404 | ((u32 *)(&bp->cmng))[i]); | ||
3405 | } | 3877 | } |
3406 | 3878 | ||
3407 | static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code) | 3879 | static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code) |
@@ -3416,7 +3888,8 @@ static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code) | |||
3416 | /* no break */ | 3888 | /* no break */ |
3417 | 3889 | ||
3418 | case FW_MSG_CODE_DRV_LOAD_FUNCTION: | 3890 | case FW_MSG_CODE_DRV_LOAD_FUNCTION: |
3419 | bnx2x_init_internal_func(bp); | 3891 | /* internal memory per function is |
3892 | initialized inside bnx2x_pf_init */ | ||
3420 | break; | 3893 | break; |
3421 | 3894 | ||
3422 | default: | 3895 | default: |
@@ -3425,43 +3898,61 @@ static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code) | |||
3425 | } | 3898 | } |
3426 | } | 3899 | } |
3427 | 3900 | ||
3901 | static void bnx2x_init_fp_sb(struct bnx2x *bp, int fp_idx) | ||
3902 | { | ||
3903 | struct bnx2x_fastpath *fp = &bp->fp[fp_idx]; | ||
3904 | |||
3905 | fp->state = BNX2X_FP_STATE_CLOSED; | ||
3906 | |||
3907 | fp->index = fp->cid = fp_idx; | ||
3908 | fp->cl_id = BP_L_ID(bp) + fp_idx; | ||
3909 | fp->fw_sb_id = bp->base_fw_ndsb + fp->cl_id + CNIC_CONTEXT_USE; | ||
3910 | fp->igu_sb_id = bp->igu_base_sb + fp_idx + CNIC_CONTEXT_USE; | ||
3911 | /* qZone id equals to FW (per path) client id */ | ||
3912 | fp->cl_qzone_id = fp->cl_id + | ||
3913 | BP_PORT(bp)*(ETH_MAX_RX_CLIENTS_E1H); | ||
3914 | /* init shortcut */ | ||
3915 | fp->ustorm_rx_prods_offset = | ||
3916 | USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), fp->cl_id); | ||
3917 | /* Setup SB indicies */ | ||
3918 | fp->rx_cons_sb = BNX2X_RX_SB_INDEX; | ||
3919 | fp->tx_cons_sb = BNX2X_TX_SB_INDEX; | ||
3920 | |||
3921 | DP(NETIF_MSG_IFUP, "queue[%d]: bnx2x_init_sb(%p,%p) " | ||
3922 | "cl_id %d fw_sb %d igu_sb %d\n", | ||
3923 | fp_idx, bp, fp->status_blk.e1x_sb, fp->cl_id, fp->fw_sb_id, | ||
3924 | fp->igu_sb_id); | ||
3925 | bnx2x_init_sb(bp, fp->status_blk_mapping, BNX2X_VF_ID_INVALID, false, | ||
3926 | fp->fw_sb_id, fp->igu_sb_id); | ||
3927 | |||
3928 | bnx2x_update_fpsb_idx(fp); | ||
3929 | } | ||
3930 | |||
3428 | void bnx2x_nic_init(struct bnx2x *bp, u32 load_code) | 3931 | void bnx2x_nic_init(struct bnx2x *bp, u32 load_code) |
3429 | { | 3932 | { |
3430 | int i; | 3933 | int i; |
3431 | 3934 | ||
3432 | for_each_queue(bp, i) { | 3935 | for_each_queue(bp, i) |
3433 | struct bnx2x_fastpath *fp = &bp->fp[i]; | 3936 | bnx2x_init_fp_sb(bp, i); |
3434 | |||
3435 | fp->bp = bp; | ||
3436 | fp->state = BNX2X_FP_STATE_CLOSED; | ||
3437 | fp->index = i; | ||
3438 | fp->cl_id = BP_L_ID(bp) + i; | ||
3439 | #ifdef BCM_CNIC | 3937 | #ifdef BCM_CNIC |
3440 | fp->sb_id = fp->cl_id + 1; | 3938 | |
3441 | #else | 3939 | bnx2x_init_sb(bp, bp->cnic_sb_mapping, |
3442 | fp->sb_id = fp->cl_id; | 3940 | BNX2X_VF_ID_INVALID, false, |
3941 | CNIC_SB_ID(bp), CNIC_IGU_SB_ID(bp)); | ||
3942 | |||
3443 | #endif | 3943 | #endif |
3444 | DP(NETIF_MSG_IFUP, | ||
3445 | "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d sb %d\n", | ||
3446 | i, bp, fp->status_blk, fp->cl_id, fp->sb_id); | ||
3447 | bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping, | ||
3448 | fp->sb_id); | ||
3449 | bnx2x_update_fpsb_idx(fp); | ||
3450 | } | ||
3451 | 3944 | ||
3452 | /* ensure status block indices were read */ | 3945 | /* ensure status block indices were read */ |
3453 | rmb(); | 3946 | rmb(); |
3454 | 3947 | ||
3455 | 3948 | bnx2x_init_def_sb(bp); | |
3456 | bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping, | ||
3457 | DEF_SB_ID); | ||
3458 | bnx2x_update_dsb_idx(bp); | 3949 | bnx2x_update_dsb_idx(bp); |
3459 | bnx2x_update_coalesce(bp); | ||
3460 | bnx2x_init_rx_rings(bp); | 3950 | bnx2x_init_rx_rings(bp); |
3461 | bnx2x_init_tx_ring(bp); | 3951 | bnx2x_init_tx_rings(bp); |
3462 | bnx2x_init_sp_ring(bp); | 3952 | bnx2x_init_sp_ring(bp); |
3463 | bnx2x_init_context(bp); | 3953 | bnx2x_init_eq_ring(bp); |
3464 | bnx2x_init_internal(bp, load_code); | 3954 | bnx2x_init_internal(bp, load_code); |
3955 | bnx2x_pf_init(bp); | ||
3465 | bnx2x_init_ind_table(bp); | 3956 | bnx2x_init_ind_table(bp); |
3466 | bnx2x_stats_init(bp); | 3957 | bnx2x_stats_init(bp); |
3467 | 3958 | ||
@@ -3620,8 +4111,6 @@ static int bnx2x_int_mem_test(struct bnx2x *bp) | |||
3620 | else | 4111 | else |
3621 | factor = 1; | 4112 | factor = 1; |
3622 | 4113 | ||
3623 | DP(NETIF_MSG_HW, "start part1\n"); | ||
3624 | |||
3625 | /* Disable inputs of parser neighbor blocks */ | 4114 | /* Disable inputs of parser neighbor blocks */ |
3626 | REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0); | 4115 | REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0); |
3627 | REG_WR(bp, TCM_REG_PRS_IFEN, 0x0); | 4116 | REG_WR(bp, TCM_REG_PRS_IFEN, 0x0); |
@@ -3917,12 +4406,9 @@ static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp) | |||
3917 | REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val); | 4406 | REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val); |
3918 | } | 4407 | } |
3919 | 4408 | ||
3920 | static int bnx2x_init_common(struct bnx2x *bp) | 4409 | static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code) |
3921 | { | 4410 | { |
3922 | u32 val, i; | 4411 | u32 val, i; |
3923 | #ifdef BCM_CNIC | ||
3924 | u32 wb_write[2]; | ||
3925 | #endif | ||
3926 | 4412 | ||
3927 | DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp)); | 4413 | DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp)); |
3928 | 4414 | ||
@@ -3964,12 +4450,8 @@ static int bnx2x_init_common(struct bnx2x *bp) | |||
3964 | REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1); | 4450 | REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1); |
3965 | #endif | 4451 | #endif |
3966 | 4452 | ||
3967 | REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2); | 4453 | bnx2x_ilt_init_page_size(bp, INITOP_SET); |
3968 | #ifdef BCM_CNIC | 4454 | |
3969 | REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5); | ||
3970 | REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5); | ||
3971 | REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5); | ||
3972 | #endif | ||
3973 | 4455 | ||
3974 | if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp)) | 4456 | if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp)) |
3975 | REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1); | 4457 | REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1); |
@@ -4009,20 +4491,9 @@ static int bnx2x_init_common(struct bnx2x *bp) | |||
4009 | 4491 | ||
4010 | bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE); | 4492 | bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE); |
4011 | 4493 | ||
4012 | #ifdef BCM_CNIC | 4494 | /* QM queues pointers table */ |
4013 | wb_write[0] = 0; | 4495 | bnx2x_qm_init_ptr_table(bp, bp->qm_cid_count, INITOP_SET); |
4014 | wb_write[1] = 0; | 4496 | |
4015 | for (i = 0; i < 64; i++) { | ||
4016 | REG_WR(bp, QM_REG_BASEADDR + i*4, 1024 * 4 * (i%16)); | ||
4017 | bnx2x_init_ind_wr(bp, QM_REG_PTRTBL + i*8, wb_write, 2); | ||
4018 | |||
4019 | if (CHIP_IS_E1H(bp)) { | ||
4020 | REG_WR(bp, QM_REG_BASEADDR_EXT_A + i*4, 1024*4*(i%16)); | ||
4021 | bnx2x_init_ind_wr(bp, QM_REG_PTRTBL_EXT_A + i*8, | ||
4022 | wb_write, 2); | ||
4023 | } | ||
4024 | } | ||
4025 | #endif | ||
4026 | /* soft reset pulse */ | 4497 | /* soft reset pulse */ |
4027 | REG_WR(bp, QM_REG_SOFT_RESET, 1); | 4498 | REG_WR(bp, QM_REG_SOFT_RESET, 1); |
4028 | REG_WR(bp, QM_REG_SOFT_RESET, 0); | 4499 | REG_WR(bp, QM_REG_SOFT_RESET, 0); |
@@ -4032,7 +4503,8 @@ static int bnx2x_init_common(struct bnx2x *bp) | |||
4032 | #endif | 4503 | #endif |
4033 | 4504 | ||
4034 | bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE); | 4505 | bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE); |
4035 | REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT); | 4506 | REG_WR(bp, DORQ_REG_DPM_CID_OFST, BNX2X_DB_SHIFT); |
4507 | |||
4036 | if (!CHIP_REV_IS_SLOW(bp)) { | 4508 | if (!CHIP_REV_IS_SLOW(bp)) { |
4037 | /* enable hw interrupt from doorbell Q */ | 4509 | /* enable hw interrupt from doorbell Q */ |
4038 | REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0); | 4510 | REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0); |
@@ -4184,7 +4656,7 @@ static int bnx2x_init_common(struct bnx2x *bp) | |||
4184 | return 0; | 4656 | return 0; |
4185 | } | 4657 | } |
4186 | 4658 | ||
4187 | static int bnx2x_init_port(struct bnx2x *bp) | 4659 | static int bnx2x_init_hw_port(struct bnx2x *bp) |
4188 | { | 4660 | { |
4189 | int port = BP_PORT(bp); | 4661 | int port = BP_PORT(bp); |
4190 | int init_stage = port ? PORT1_STAGE : PORT0_STAGE; | 4662 | int init_stage = port ? PORT1_STAGE : PORT0_STAGE; |
@@ -4203,9 +4675,10 @@ static int bnx2x_init_port(struct bnx2x *bp) | |||
4203 | bnx2x_init_block(bp, CCM_BLOCK, init_stage); | 4675 | bnx2x_init_block(bp, CCM_BLOCK, init_stage); |
4204 | bnx2x_init_block(bp, XCM_BLOCK, init_stage); | 4676 | bnx2x_init_block(bp, XCM_BLOCK, init_stage); |
4205 | 4677 | ||
4206 | #ifdef BCM_CNIC | 4678 | /* QM cid (connection) count */ |
4207 | REG_WR(bp, QM_REG_CONNNUM_0 + port*4, 1024/16 - 1); | 4679 | bnx2x_qm_init_cid_count(bp, bp->qm_cid_count, INITOP_SET); |
4208 | 4680 | ||
4681 | #ifdef BCM_CNIC | ||
4209 | bnx2x_init_block(bp, TIMERS_BLOCK, init_stage); | 4682 | bnx2x_init_block(bp, TIMERS_BLOCK, init_stage); |
4210 | REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20); | 4683 | REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20); |
4211 | REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31); | 4684 | REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31); |
@@ -4327,25 +4800,6 @@ static int bnx2x_init_port(struct bnx2x *bp) | |||
4327 | return 0; | 4800 | return 0; |
4328 | } | 4801 | } |
4329 | 4802 | ||
4330 | #define ILT_PER_FUNC (768/2) | ||
4331 | #define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC) | ||
4332 | /* the phys address is shifted right 12 bits and has an added | ||
4333 | 1=valid bit added to the 53rd bit | ||
4334 | then since this is a wide register(TM) | ||
4335 | we split it into two 32 bit writes | ||
4336 | */ | ||
4337 | #define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF)) | ||
4338 | #define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44))) | ||
4339 | #define PXP_ONE_ILT(x) (((x) << 10) | x) | ||
4340 | #define PXP_ILT_RANGE(f, l) (((l) << 10) | f) | ||
4341 | |||
4342 | #ifdef BCM_CNIC | ||
4343 | #define CNIC_ILT_LINES 127 | ||
4344 | #define CNIC_CTX_PER_ILT 16 | ||
4345 | #else | ||
4346 | #define CNIC_ILT_LINES 0 | ||
4347 | #endif | ||
4348 | |||
4349 | static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr) | 4803 | static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr) |
4350 | { | 4804 | { |
4351 | int reg; | 4805 | int reg; |
@@ -4358,10 +4812,12 @@ static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr) | |||
4358 | bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr)); | 4812 | bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr)); |
4359 | } | 4813 | } |
4360 | 4814 | ||
4361 | static int bnx2x_init_func(struct bnx2x *bp) | 4815 | static int bnx2x_init_hw_func(struct bnx2x *bp) |
4362 | { | 4816 | { |
4363 | int port = BP_PORT(bp); | 4817 | int port = BP_PORT(bp); |
4364 | int func = BP_FUNC(bp); | 4818 | int func = BP_FUNC(bp); |
4819 | struct bnx2x_ilt *ilt = BP_ILT(bp); | ||
4820 | u16 cdu_ilt_start; | ||
4365 | u32 addr, val; | 4821 | u32 addr, val; |
4366 | int i; | 4822 | int i; |
4367 | 4823 | ||
@@ -4373,72 +4829,67 @@ static int bnx2x_init_func(struct bnx2x *bp) | |||
4373 | val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0; | 4829 | val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0; |
4374 | REG_WR(bp, addr, val); | 4830 | REG_WR(bp, addr, val); |
4375 | 4831 | ||
4376 | i = FUNC_ILT_BASE(func); | 4832 | ilt = BP_ILT(bp); |
4377 | 4833 | cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start; | |
4378 | bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context)); | ||
4379 | if (CHIP_IS_E1H(bp)) { | ||
4380 | REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i); | ||
4381 | REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES); | ||
4382 | } else /* E1 */ | ||
4383 | REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4, | ||
4384 | PXP_ILT_RANGE(i, i + CNIC_ILT_LINES)); | ||
4385 | |||
4386 | #ifdef BCM_CNIC | ||
4387 | i += 1 + CNIC_ILT_LINES; | ||
4388 | bnx2x_ilt_wr(bp, i, bp->timers_mapping); | ||
4389 | if (CHIP_IS_E1(bp)) | ||
4390 | REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i)); | ||
4391 | else { | ||
4392 | REG_WR(bp, PXP2_REG_RQ_TM_FIRST_ILT, i); | ||
4393 | REG_WR(bp, PXP2_REG_RQ_TM_LAST_ILT, i); | ||
4394 | } | ||
4395 | |||
4396 | i++; | ||
4397 | bnx2x_ilt_wr(bp, i, bp->qm_mapping); | ||
4398 | if (CHIP_IS_E1(bp)) | ||
4399 | REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i)); | ||
4400 | else { | ||
4401 | REG_WR(bp, PXP2_REG_RQ_QM_FIRST_ILT, i); | ||
4402 | REG_WR(bp, PXP2_REG_RQ_QM_LAST_ILT, i); | ||
4403 | } | ||
4404 | 4834 | ||
4405 | i++; | 4835 | for (i = 0; i < L2_ILT_LINES(bp); i++) { |
4406 | bnx2x_ilt_wr(bp, i, bp->t1_mapping); | 4836 | ilt->lines[cdu_ilt_start + i].page = |
4407 | if (CHIP_IS_E1(bp)) | 4837 | bp->context.vcxt + (ILT_PAGE_CIDS * i); |
4408 | REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i)); | 4838 | ilt->lines[cdu_ilt_start + i].page_mapping = |
4409 | else { | 4839 | bp->context.cxt_mapping + (CDU_ILT_PAGE_SZ * i); |
4410 | REG_WR(bp, PXP2_REG_RQ_SRC_FIRST_ILT, i); | 4840 | /* cdu ilt pages are allocated manually so there's no need to |
4411 | REG_WR(bp, PXP2_REG_RQ_SRC_LAST_ILT, i); | 4841 | set the size */ |
4412 | } | 4842 | } |
4843 | bnx2x_ilt_init_op(bp, INITOP_SET); | ||
4844 | #ifdef BCM_CNIC | ||
4845 | bnx2x_src_init_t2(bp, bp->t2, bp->t2_mapping, SRC_CONN_NUM); | ||
4413 | 4846 | ||
4414 | /* tell the searcher where the T2 table is */ | 4847 | /* T1 hash bits value determines the T1 number of entries */ |
4415 | REG_WR(bp, SRC_REG_COUNTFREE0 + port*4, 16*1024/64); | 4848 | REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, SRC_HASH_BITS); |
4416 | 4849 | #endif | |
4417 | bnx2x_wb_wr(bp, SRC_REG_FIRSTFREE0 + port*16, | ||
4418 | U64_LO(bp->t2_mapping), U64_HI(bp->t2_mapping)); | ||
4419 | 4850 | ||
4420 | bnx2x_wb_wr(bp, SRC_REG_LASTFREE0 + port*16, | 4851 | #ifndef BCM_CNIC |
4421 | U64_LO((u64)bp->t2_mapping + 16*1024 - 64), | 4852 | /* set NIC mode */ |
4422 | U64_HI((u64)bp->t2_mapping + 16*1024 - 64)); | 4853 | REG_WR(bp, PRS_REG_NIC_MODE, 1); |
4854 | #endif /* BCM_CNIC */ | ||
4423 | 4855 | ||
4424 | REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, 10); | 4856 | bp->dmae_ready = 1; |
4425 | #endif | ||
4426 | 4857 | ||
4427 | if (CHIP_IS_E1H(bp)) { | 4858 | bnx2x_init_block(bp, PGLUE_B_BLOCK, FUNC0_STAGE + func); |
4428 | bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func); | 4859 | |
4429 | bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func); | 4860 | bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func); |
4430 | bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func); | 4861 | bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func); |
4431 | bnx2x_init_block(bp, CCM_BLOCK, FUNC0_STAGE + func); | 4862 | bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func); |
4432 | bnx2x_init_block(bp, XCM_BLOCK, FUNC0_STAGE + func); | 4863 | bnx2x_init_block(bp, CCM_BLOCK, FUNC0_STAGE + func); |
4433 | bnx2x_init_block(bp, TSEM_BLOCK, FUNC0_STAGE + func); | 4864 | bnx2x_init_block(bp, XCM_BLOCK, FUNC0_STAGE + func); |
4434 | bnx2x_init_block(bp, USEM_BLOCK, FUNC0_STAGE + func); | 4865 | bnx2x_init_block(bp, TSEM_BLOCK, FUNC0_STAGE + func); |
4435 | bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func); | 4866 | bnx2x_init_block(bp, USEM_BLOCK, FUNC0_STAGE + func); |
4436 | bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func); | 4867 | bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func); |
4868 | bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func); | ||
4869 | |||
4870 | bnx2x_init_block(bp, QM_BLOCK, FUNC0_STAGE + func); | ||
4871 | bnx2x_init_block(bp, TIMERS_BLOCK, FUNC0_STAGE + func); | ||
4872 | bnx2x_init_block(bp, DQ_BLOCK, FUNC0_STAGE + func); | ||
4873 | bnx2x_init_block(bp, BRB1_BLOCK, FUNC0_STAGE + func); | ||
4874 | bnx2x_init_block(bp, PRS_BLOCK, FUNC0_STAGE + func); | ||
4875 | bnx2x_init_block(bp, TSDM_BLOCK, FUNC0_STAGE + func); | ||
4876 | bnx2x_init_block(bp, CSDM_BLOCK, FUNC0_STAGE + func); | ||
4877 | bnx2x_init_block(bp, USDM_BLOCK, FUNC0_STAGE + func); | ||
4878 | bnx2x_init_block(bp, XSDM_BLOCK, FUNC0_STAGE + func); | ||
4879 | bnx2x_init_block(bp, UPB_BLOCK, FUNC0_STAGE + func); | ||
4880 | bnx2x_init_block(bp, XPB_BLOCK, FUNC0_STAGE + func); | ||
4881 | bnx2x_init_block(bp, PBF_BLOCK, FUNC0_STAGE + func); | ||
4882 | bnx2x_init_block(bp, CDU_BLOCK, FUNC0_STAGE + func); | ||
4883 | |||
4884 | bnx2x_init_block(bp, CFC_BLOCK, FUNC0_STAGE + func); | ||
4437 | 4885 | ||
4886 | if (IS_E1HMF(bp)) { | ||
4438 | REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1); | 4887 | REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1); |
4439 | REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov); | 4888 | REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov); |
4440 | } | 4889 | } |
4441 | 4890 | ||
4891 | bnx2x_init_block(bp, MISC_AEU_BLOCK, FUNC0_STAGE + func); | ||
4892 | |||
4442 | /* HC init per function */ | 4893 | /* HC init per function */ |
4443 | if (CHIP_IS_E1H(bp)) { | 4894 | if (CHIP_IS_E1H(bp)) { |
4444 | REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0); | 4895 | REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0); |
@@ -4451,13 +4902,21 @@ static int bnx2x_init_func(struct bnx2x *bp) | |||
4451 | /* Reset PCIE errors for debug */ | 4902 | /* Reset PCIE errors for debug */ |
4452 | REG_WR(bp, 0x2114, 0xffffffff); | 4903 | REG_WR(bp, 0x2114, 0xffffffff); |
4453 | REG_WR(bp, 0x2120, 0xffffffff); | 4904 | REG_WR(bp, 0x2120, 0xffffffff); |
4905 | |||
4906 | bnx2x_init_block(bp, EMAC0_BLOCK, FUNC0_STAGE + func); | ||
4907 | bnx2x_init_block(bp, EMAC1_BLOCK, FUNC0_STAGE + func); | ||
4908 | bnx2x_init_block(bp, DBU_BLOCK, FUNC0_STAGE + func); | ||
4909 | bnx2x_init_block(bp, DBG_BLOCK, FUNC0_STAGE + func); | ||
4910 | bnx2x_init_block(bp, MCP_BLOCK, FUNC0_STAGE + func); | ||
4911 | bnx2x_init_block(bp, DMAE_BLOCK, FUNC0_STAGE + func); | ||
4912 | |||
4454 | bnx2x_phy_probe(&bp->link_params); | 4913 | bnx2x_phy_probe(&bp->link_params); |
4455 | return 0; | 4914 | return 0; |
4456 | } | 4915 | } |
4457 | 4916 | ||
4458 | int bnx2x_init_hw(struct bnx2x *bp, u32 load_code) | 4917 | int bnx2x_init_hw(struct bnx2x *bp, u32 load_code) |
4459 | { | 4918 | { |
4460 | int i, rc = 0; | 4919 | int rc = 0; |
4461 | 4920 | ||
4462 | DP(BNX2X_MSG_MCP, "function %d load_code %x\n", | 4921 | DP(BNX2X_MSG_MCP, "function %d load_code %x\n", |
4463 | BP_FUNC(bp), load_code); | 4922 | BP_FUNC(bp), load_code); |
@@ -4470,21 +4929,19 @@ int bnx2x_init_hw(struct bnx2x *bp, u32 load_code) | |||
4470 | 4929 | ||
4471 | switch (load_code) { | 4930 | switch (load_code) { |
4472 | case FW_MSG_CODE_DRV_LOAD_COMMON: | 4931 | case FW_MSG_CODE_DRV_LOAD_COMMON: |
4473 | rc = bnx2x_init_common(bp); | 4932 | rc = bnx2x_init_hw_common(bp, load_code); |
4474 | if (rc) | 4933 | if (rc) |
4475 | goto init_hw_err; | 4934 | goto init_hw_err; |
4476 | /* no break */ | 4935 | /* no break */ |
4477 | 4936 | ||
4478 | case FW_MSG_CODE_DRV_LOAD_PORT: | 4937 | case FW_MSG_CODE_DRV_LOAD_PORT: |
4479 | bp->dmae_ready = 1; | 4938 | rc = bnx2x_init_hw_port(bp); |
4480 | rc = bnx2x_init_port(bp); | ||
4481 | if (rc) | 4939 | if (rc) |
4482 | goto init_hw_err; | 4940 | goto init_hw_err; |
4483 | /* no break */ | 4941 | /* no break */ |
4484 | 4942 | ||
4485 | case FW_MSG_CODE_DRV_LOAD_FUNCTION: | 4943 | case FW_MSG_CODE_DRV_LOAD_FUNCTION: |
4486 | bp->dmae_ready = 1; | 4944 | rc = bnx2x_init_hw_func(bp); |
4487 | rc = bnx2x_init_func(bp); | ||
4488 | if (rc) | 4945 | if (rc) |
4489 | goto init_hw_err; | 4946 | goto init_hw_err; |
4490 | break; | 4947 | break; |
@@ -4503,14 +4960,6 @@ int bnx2x_init_hw(struct bnx2x *bp, u32 load_code) | |||
4503 | DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq); | 4960 | DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq); |
4504 | } | 4961 | } |
4505 | 4962 | ||
4506 | /* this needs to be done before gunzip end */ | ||
4507 | bnx2x_zero_def_sb(bp); | ||
4508 | for_each_queue(bp, i) | ||
4509 | bnx2x_zero_sb(bp, BP_L_ID(bp) + i); | ||
4510 | #ifdef BCM_CNIC | ||
4511 | bnx2x_zero_sb(bp, BP_L_ID(bp) + i); | ||
4512 | #endif | ||
4513 | |||
4514 | init_hw_err: | 4963 | init_hw_err: |
4515 | bnx2x_gunzip_end(bp); | 4964 | bnx2x_gunzip_end(bp); |
4516 | 4965 | ||
@@ -4523,7 +4972,7 @@ void bnx2x_free_mem(struct bnx2x *bp) | |||
4523 | #define BNX2X_PCI_FREE(x, y, size) \ | 4972 | #define BNX2X_PCI_FREE(x, y, size) \ |
4524 | do { \ | 4973 | do { \ |
4525 | if (x) { \ | 4974 | if (x) { \ |
4526 | dma_free_coherent(&bp->pdev->dev, size, x, y); \ | 4975 | dma_free_coherent(&bp->pdev->dev, size, (void *)x, y); \ |
4527 | x = NULL; \ | 4976 | x = NULL; \ |
4528 | y = 0; \ | 4977 | y = 0; \ |
4529 | } \ | 4978 | } \ |
@@ -4532,7 +4981,7 @@ void bnx2x_free_mem(struct bnx2x *bp) | |||
4532 | #define BNX2X_FREE(x) \ | 4981 | #define BNX2X_FREE(x) \ |
4533 | do { \ | 4982 | do { \ |
4534 | if (x) { \ | 4983 | if (x) { \ |
4535 | vfree(x); \ | 4984 | kfree((void *)x); \ |
4536 | x = NULL; \ | 4985 | x = NULL; \ |
4537 | } \ | 4986 | } \ |
4538 | } while (0) | 4987 | } while (0) |
@@ -4542,11 +4991,10 @@ void bnx2x_free_mem(struct bnx2x *bp) | |||
4542 | /* fastpath */ | 4991 | /* fastpath */ |
4543 | /* Common */ | 4992 | /* Common */ |
4544 | for_each_queue(bp, i) { | 4993 | for_each_queue(bp, i) { |
4545 | |||
4546 | /* status blocks */ | 4994 | /* status blocks */ |
4547 | BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk), | 4995 | BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk.e1x_sb), |
4548 | bnx2x_fp(bp, i, status_blk_mapping), | 4996 | bnx2x_fp(bp, i, status_blk_mapping), |
4549 | sizeof(struct host_status_block)); | 4997 | sizeof(struct host_hc_status_block_e1x)); |
4550 | } | 4998 | } |
4551 | /* Rx */ | 4999 | /* Rx */ |
4552 | for_each_queue(bp, i) { | 5000 | for_each_queue(bp, i) { |
@@ -4580,21 +5028,28 @@ void bnx2x_free_mem(struct bnx2x *bp) | |||
4580 | /* end of fastpath */ | 5028 | /* end of fastpath */ |
4581 | 5029 | ||
4582 | BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping, | 5030 | BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping, |
4583 | sizeof(struct host_def_status_block)); | 5031 | sizeof(struct host_sp_status_block)); |
4584 | 5032 | ||
4585 | BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping, | 5033 | BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping, |
4586 | sizeof(struct bnx2x_slowpath)); | 5034 | sizeof(struct bnx2x_slowpath)); |
4587 | 5035 | ||
5036 | BNX2X_PCI_FREE(bp->context.vcxt, bp->context.cxt_mapping, | ||
5037 | bp->context.size); | ||
5038 | |||
5039 | bnx2x_ilt_mem_op(bp, ILT_MEMOP_FREE); | ||
5040 | |||
5041 | BNX2X_FREE(bp->ilt->lines); | ||
4588 | #ifdef BCM_CNIC | 5042 | #ifdef BCM_CNIC |
4589 | BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024); | 5043 | |
4590 | BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024); | 5044 | BNX2X_PCI_FREE(bp->cnic_sb.e1x_sb, bp->cnic_sb_mapping, |
4591 | BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024); | 5045 | sizeof(struct host_hc_status_block_e1x)); |
4592 | BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024); | 5046 | BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, SRC_T2_SZ); |
4593 | BNX2X_PCI_FREE(bp->cnic_sb, bp->cnic_sb_mapping, | ||
4594 | sizeof(struct host_status_block)); | ||
4595 | #endif | 5047 | #endif |
4596 | BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE); | 5048 | BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE); |
4597 | 5049 | ||
5050 | BNX2X_PCI_FREE(bp->eq_ring, bp->eq_mapping, | ||
5051 | BCM_PAGE_SIZE * NUM_EQ_PAGES); | ||
5052 | |||
4598 | #undef BNX2X_PCI_FREE | 5053 | #undef BNX2X_PCI_FREE |
4599 | #undef BNX2X_KFREE | 5054 | #undef BNX2X_KFREE |
4600 | } | 5055 | } |
@@ -4612,13 +5067,13 @@ int bnx2x_alloc_mem(struct bnx2x *bp) | |||
4612 | 5067 | ||
4613 | #define BNX2X_ALLOC(x, size) \ | 5068 | #define BNX2X_ALLOC(x, size) \ |
4614 | do { \ | 5069 | do { \ |
4615 | x = vmalloc(size); \ | 5070 | x = kzalloc(size, GFP_KERNEL); \ |
4616 | if (x == NULL) \ | 5071 | if (x == NULL) \ |
4617 | goto alloc_mem_err; \ | 5072 | goto alloc_mem_err; \ |
4618 | memset(x, 0, size); \ | ||
4619 | } while (0) | 5073 | } while (0) |
4620 | 5074 | ||
4621 | int i; | 5075 | int i; |
5076 | void *p; | ||
4622 | 5077 | ||
4623 | /* fastpath */ | 5078 | /* fastpath */ |
4624 | /* Common */ | 5079 | /* Common */ |
@@ -4626,9 +5081,17 @@ int bnx2x_alloc_mem(struct bnx2x *bp) | |||
4626 | bnx2x_fp(bp, i, bp) = bp; | 5081 | bnx2x_fp(bp, i, bp) = bp; |
4627 | 5082 | ||
4628 | /* status blocks */ | 5083 | /* status blocks */ |
4629 | BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk), | 5084 | BNX2X_PCI_ALLOC(p, |
4630 | &bnx2x_fp(bp, i, status_blk_mapping), | 5085 | &bnx2x_fp(bp, i, status_blk_mapping), |
4631 | sizeof(struct host_status_block)); | 5086 | sizeof(struct host_hc_status_block_e1x)); |
5087 | |||
5088 | bnx2x_fp(bp, i, status_blk.e1x_sb) = | ||
5089 | (struct host_hc_status_block_e1x *)p; | ||
5090 | |||
5091 | bnx2x_fp(bp, i, sb_index_values) = (__le16 *) | ||
5092 | (bnx2x_fp(bp, i, status_blk.e1x_sb)->sb.index_values); | ||
5093 | bnx2x_fp(bp, i, sb_running_index) = (__le16 *) | ||
5094 | (bnx2x_fp(bp, i, status_blk.e1x_sb)->sb.running_index); | ||
4632 | } | 5095 | } |
4633 | /* Rx */ | 5096 | /* Rx */ |
4634 | for_each_queue(bp, i) { | 5097 | for_each_queue(bp, i) { |
@@ -4664,37 +5127,36 @@ int bnx2x_alloc_mem(struct bnx2x *bp) | |||
4664 | } | 5127 | } |
4665 | /* end of fastpath */ | 5128 | /* end of fastpath */ |
4666 | 5129 | ||
4667 | BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping, | 5130 | #ifdef BCM_CNIC |
4668 | sizeof(struct host_def_status_block)); | 5131 | BNX2X_PCI_ALLOC(bp->cnic_sb.e1x_sb, &bp->cnic_sb_mapping, |
5132 | sizeof(struct host_hc_status_block_e1x)); | ||
4669 | 5133 | ||
4670 | BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping, | 5134 | /* allocate searcher T2 table */ |
4671 | sizeof(struct bnx2x_slowpath)); | 5135 | BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, SRC_T2_SZ); |
5136 | #endif | ||
4672 | 5137 | ||
4673 | #ifdef BCM_CNIC | ||
4674 | BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024); | ||
4675 | 5138 | ||
4676 | /* allocate searcher T2 table | 5139 | BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping, |
4677 | we allocate 1/4 of alloc num for T2 | 5140 | sizeof(struct host_sp_status_block)); |
4678 | (which is not entered into the ILT) */ | ||
4679 | BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024); | ||
4680 | 5141 | ||
4681 | /* Initialize T2 (for 1024 connections) */ | 5142 | BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping, |
4682 | for (i = 0; i < 16*1024; i += 64) | 5143 | sizeof(struct bnx2x_slowpath)); |
4683 | *(u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64; | ||
4684 | 5144 | ||
4685 | /* Timer block array (8*MAX_CONN) phys uncached for now 1024 conns */ | 5145 | bp->context.size = sizeof(union cdu_context) * bp->l2_cid_count; |
4686 | BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024); | 5146 | BNX2X_PCI_ALLOC(bp->context.vcxt, &bp->context.cxt_mapping, |
5147 | bp->context.size); | ||
4687 | 5148 | ||
4688 | /* QM queues (128*MAX_CONN) */ | 5149 | BNX2X_ALLOC(bp->ilt->lines, sizeof(struct ilt_line) * ILT_MAX_LINES); |
4689 | BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024); | ||
4690 | 5150 | ||
4691 | BNX2X_PCI_ALLOC(bp->cnic_sb, &bp->cnic_sb_mapping, | 5151 | if (bnx2x_ilt_mem_op(bp, ILT_MEMOP_ALLOC)) |
4692 | sizeof(struct host_status_block)); | 5152 | goto alloc_mem_err; |
4693 | #endif | ||
4694 | 5153 | ||
4695 | /* Slow path ring */ | 5154 | /* Slow path ring */ |
4696 | BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE); | 5155 | BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE); |
4697 | 5156 | ||
5157 | /* EQ */ | ||
5158 | BNX2X_PCI_ALLOC(bp->eq_ring, &bp->eq_mapping, | ||
5159 | BCM_PAGE_SIZE * NUM_EQ_PAGES); | ||
4698 | return 0; | 5160 | return 0; |
4699 | 5161 | ||
4700 | alloc_mem_err: | 5162 | alloc_mem_err: |
@@ -4705,97 +5167,52 @@ alloc_mem_err: | |||
4705 | #undef BNX2X_ALLOC | 5167 | #undef BNX2X_ALLOC |
4706 | } | 5168 | } |
4707 | 5169 | ||
4708 | |||
4709 | /* | 5170 | /* |
4710 | * Init service functions | 5171 | * Init service functions |
4711 | */ | 5172 | */ |
4712 | 5173 | int bnx2x_func_start(struct bnx2x *bp) | |
4713 | /** | ||
4714 | * Sets a MAC in a CAM for a few L2 Clients for E1 chip | ||
4715 | * | ||
4716 | * @param bp driver descriptor | ||
4717 | * @param set set or clear an entry (1 or 0) | ||
4718 | * @param mac pointer to a buffer containing a MAC | ||
4719 | * @param cl_bit_vec bit vector of clients to register a MAC for | ||
4720 | * @param cam_offset offset in a CAM to use | ||
4721 | * @param with_bcast set broadcast MAC as well | ||
4722 | */ | ||
4723 | static void bnx2x_set_mac_addr_e1_gen(struct bnx2x *bp, int set, u8 *mac, | ||
4724 | u32 cl_bit_vec, u8 cam_offset, | ||
4725 | u8 with_bcast) | ||
4726 | { | 5174 | { |
4727 | struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config); | 5175 | bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_START, 0, 0, 0, 1); |
4728 | int port = BP_PORT(bp); | ||
4729 | 5176 | ||
4730 | /* CAM allocation | 5177 | /* Wait for completion */ |
4731 | * unicasts 0-31:port0 32-63:port1 | 5178 | return bnx2x_wait_ramrod(bp, BNX2X_STATE_FUNC_STARTED, 0, &(bp->state), |
4732 | * multicast 64-127:port0 128-191:port1 | 5179 | WAIT_RAMROD_COMMON); |
4733 | */ | 5180 | } |
4734 | config->hdr.length = 1 + (with_bcast ? 1 : 0); | ||
4735 | config->hdr.offset = cam_offset; | ||
4736 | config->hdr.client_id = 0xff; | ||
4737 | config->hdr.reserved1 = 0; | ||
4738 | |||
4739 | /* primary MAC */ | ||
4740 | config->config_table[0].cam_entry.msb_mac_addr = | ||
4741 | swab16(*(u16 *)&mac[0]); | ||
4742 | config->config_table[0].cam_entry.middle_mac_addr = | ||
4743 | swab16(*(u16 *)&mac[2]); | ||
4744 | config->config_table[0].cam_entry.lsb_mac_addr = | ||
4745 | swab16(*(u16 *)&mac[4]); | ||
4746 | config->config_table[0].cam_entry.flags = cpu_to_le16(port); | ||
4747 | if (set) | ||
4748 | config->config_table[0].target_table_entry.flags = 0; | ||
4749 | else | ||
4750 | CAM_INVALIDATE(config->config_table[0]); | ||
4751 | config->config_table[0].target_table_entry.clients_bit_vector = | ||
4752 | cpu_to_le32(cl_bit_vec); | ||
4753 | config->config_table[0].target_table_entry.vlan_id = 0; | ||
4754 | 5181 | ||
4755 | DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n", | 5182 | int bnx2x_func_stop(struct bnx2x *bp) |
4756 | (set ? "setting" : "clearing"), | 5183 | { |
4757 | config->config_table[0].cam_entry.msb_mac_addr, | 5184 | bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_STOP, 0, 0, 0, 1); |
4758 | config->config_table[0].cam_entry.middle_mac_addr, | ||
4759 | config->config_table[0].cam_entry.lsb_mac_addr); | ||
4760 | |||
4761 | /* broadcast */ | ||
4762 | if (with_bcast) { | ||
4763 | config->config_table[1].cam_entry.msb_mac_addr = | ||
4764 | cpu_to_le16(0xffff); | ||
4765 | config->config_table[1].cam_entry.middle_mac_addr = | ||
4766 | cpu_to_le16(0xffff); | ||
4767 | config->config_table[1].cam_entry.lsb_mac_addr = | ||
4768 | cpu_to_le16(0xffff); | ||
4769 | config->config_table[1].cam_entry.flags = cpu_to_le16(port); | ||
4770 | if (set) | ||
4771 | config->config_table[1].target_table_entry.flags = | ||
4772 | TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST; | ||
4773 | else | ||
4774 | CAM_INVALIDATE(config->config_table[1]); | ||
4775 | config->config_table[1].target_table_entry.clients_bit_vector = | ||
4776 | cpu_to_le32(cl_bit_vec); | ||
4777 | config->config_table[1].target_table_entry.vlan_id = 0; | ||
4778 | } | ||
4779 | 5185 | ||
4780 | bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0, | 5186 | /* Wait for completion */ |
4781 | U64_HI(bnx2x_sp_mapping(bp, mac_config)), | 5187 | return bnx2x_wait_ramrod(bp, BNX2X_STATE_CLOSING_WAIT4_UNLOAD, |
4782 | U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0); | 5188 | 0, &(bp->state), WAIT_RAMROD_COMMON); |
4783 | } | 5189 | } |
4784 | 5190 | ||
4785 | /** | 5191 | /** |
4786 | * Sets a MAC in a CAM for a few L2 Clients for E1H chip | 5192 | * Sets a MAC in a CAM for a few L2 Clients for E1x chip |
4787 | * | 5193 | * |
4788 | * @param bp driver descriptor | 5194 | * @param bp driver descriptor |
4789 | * @param set set or clear an entry (1 or 0) | 5195 | * @param set set or clear an entry (1 or 0) |
4790 | * @param mac pointer to a buffer containing a MAC | 5196 | * @param mac pointer to a buffer containing a MAC |
4791 | * @param cl_bit_vec bit vector of clients to register a MAC for | 5197 | * @param cl_bit_vec bit vector of clients to register a MAC for |
4792 | * @param cam_offset offset in a CAM to use | 5198 | * @param cam_offset offset in a CAM to use |
5199 | * @param is_bcast is the set MAC a broadcast address (for E1 only) | ||
4793 | */ | 5200 | */ |
4794 | static void bnx2x_set_mac_addr_e1h_gen(struct bnx2x *bp, int set, u8 *mac, | 5201 | static void bnx2x_set_mac_addr_gen(struct bnx2x *bp, int set, u8 *mac, |
4795 | u32 cl_bit_vec, u8 cam_offset) | 5202 | u32 cl_bit_vec, u8 cam_offset, |
5203 | u8 is_bcast) | ||
4796 | { | 5204 | { |
4797 | struct mac_configuration_cmd_e1h *config = | 5205 | struct mac_configuration_cmd *config = |
4798 | (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config); | 5206 | (struct mac_configuration_cmd *)bnx2x_sp(bp, mac_config); |
5207 | int ramrod_flags = WAIT_RAMROD_COMMON; | ||
5208 | |||
5209 | bp->set_mac_pending = 1; | ||
5210 | smp_wmb(); | ||
5211 | |||
5212 | config->hdr.length = 1 + (is_bcast ? 1 : 0); | ||
5213 | config->hdr.offset = cam_offset; | ||
5214 | config->hdr.client_id = 0xff; | ||
5215 | config->hdr.reserved1 = 0; | ||
4799 | 5216 | ||
4800 | config->hdr.length = 1; | 5217 | config->hdr.length = 1; |
4801 | config->hdr.offset = cam_offset; | 5218 | config->hdr.offset = cam_offset; |
@@ -4812,29 +5229,42 @@ static void bnx2x_set_mac_addr_e1h_gen(struct bnx2x *bp, int set, u8 *mac, | |||
4812 | config->config_table[0].clients_bit_vector = | 5229 | config->config_table[0].clients_bit_vector = |
4813 | cpu_to_le32(cl_bit_vec); | 5230 | cpu_to_le32(cl_bit_vec); |
4814 | config->config_table[0].vlan_id = 0; | 5231 | config->config_table[0].vlan_id = 0; |
4815 | config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov); | 5232 | config->config_table[0].pf_id = BP_FUNC(bp); |
4816 | if (set) | 5233 | if (set) |
4817 | config->config_table[0].flags = BP_PORT(bp); | 5234 | SET_FLAG(config->config_table[0].flags, |
5235 | MAC_CONFIGURATION_ENTRY_ACTION_TYPE, | ||
5236 | T_ETH_MAC_COMMAND_SET); | ||
4818 | else | 5237 | else |
4819 | config->config_table[0].flags = | 5238 | SET_FLAG(config->config_table[0].flags, |
4820 | MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE; | 5239 | MAC_CONFIGURATION_ENTRY_ACTION_TYPE, |
5240 | T_ETH_MAC_COMMAND_INVALIDATE); | ||
5241 | |||
5242 | if (is_bcast) | ||
5243 | SET_FLAG(config->config_table[0].flags, | ||
5244 | MAC_CONFIGURATION_ENTRY_BROADCAST, 1); | ||
4821 | 5245 | ||
4822 | DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID mask %d\n", | 5246 | DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) PF_ID %d CLID mask %d\n", |
4823 | (set ? "setting" : "clearing"), | 5247 | (set ? "setting" : "clearing"), |
4824 | config->config_table[0].msb_mac_addr, | 5248 | config->config_table[0].msb_mac_addr, |
4825 | config->config_table[0].middle_mac_addr, | 5249 | config->config_table[0].middle_mac_addr, |
4826 | config->config_table[0].lsb_mac_addr, bp->e1hov, cl_bit_vec); | 5250 | config->config_table[0].lsb_mac_addr, BP_FUNC(bp), cl_bit_vec); |
4827 | 5251 | ||
4828 | bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0, | 5252 | bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0, |
4829 | U64_HI(bnx2x_sp_mapping(bp, mac_config)), | 5253 | U64_HI(bnx2x_sp_mapping(bp, mac_config)), |
4830 | U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0); | 5254 | U64_LO(bnx2x_sp_mapping(bp, mac_config)), 1); |
5255 | |||
5256 | /* Wait for a completion */ | ||
5257 | bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, ramrod_flags); | ||
4831 | } | 5258 | } |
4832 | 5259 | ||
4833 | static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx, | 5260 | |
4834 | int *state_p, int poll) | 5261 | int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx, |
5262 | int *state_p, int flags) | ||
4835 | { | 5263 | { |
4836 | /* can take a while if any port is running */ | 5264 | /* can take a while if any port is running */ |
4837 | int cnt = 5000; | 5265 | int cnt = 5000; |
5266 | u8 poll = flags & WAIT_RAMROD_POLL; | ||
5267 | u8 common = flags & WAIT_RAMROD_COMMON; | ||
4838 | 5268 | ||
4839 | DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n", | 5269 | DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n", |
4840 | poll ? "polling" : "waiting", state, idx); | 5270 | poll ? "polling" : "waiting", state, idx); |
@@ -4842,13 +5272,17 @@ static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx, | |||
4842 | might_sleep(); | 5272 | might_sleep(); |
4843 | while (cnt--) { | 5273 | while (cnt--) { |
4844 | if (poll) { | 5274 | if (poll) { |
4845 | bnx2x_rx_int(bp->fp, 10); | 5275 | if (common) |
4846 | /* if index is different from 0 | 5276 | bnx2x_eq_int(bp); |
4847 | * the reply for some commands will | 5277 | else { |
4848 | * be on the non default queue | 5278 | bnx2x_rx_int(bp->fp, 10); |
4849 | */ | 5279 | /* if index is different from 0 |
4850 | if (idx) | 5280 | * the reply for some commands will |
4851 | bnx2x_rx_int(&bp->fp[idx], 10); | 5281 | * be on the non default queue |
5282 | */ | ||
5283 | if (idx) | ||
5284 | bnx2x_rx_int(&bp->fp[idx], 10); | ||
5285 | } | ||
4852 | } | 5286 | } |
4853 | 5287 | ||
4854 | mb(); /* state is changed by bnx2x_sp_event() */ | 5288 | mb(); /* state is changed by bnx2x_sp_event() */ |
@@ -4875,31 +5309,110 @@ static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx, | |||
4875 | return -EBUSY; | 5309 | return -EBUSY; |
4876 | } | 5310 | } |
4877 | 5311 | ||
4878 | void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set) | 5312 | u8 bnx2x_e1h_cam_offset(struct bnx2x *bp, u8 rel_offset) |
4879 | { | 5313 | { |
4880 | bp->set_mac_pending++; | 5314 | return E1H_FUNC_MAX * rel_offset + BP_FUNC(bp); |
4881 | smp_wmb(); | 5315 | } |
5316 | |||
5317 | void bnx2x_set_eth_mac(struct bnx2x *bp, int set) | ||
5318 | { | ||
5319 | u8 cam_offset = (CHIP_IS_E1(bp) ? (BP_PORT(bp) ? 32 : 0) : | ||
5320 | bnx2x_e1h_cam_offset(bp, CAM_ETH_LINE)); | ||
4882 | 5321 | ||
4883 | bnx2x_set_mac_addr_e1h_gen(bp, set, bp->dev->dev_addr, | 5322 | /* networking MAC */ |
4884 | (1 << bp->fp->cl_id), BP_FUNC(bp)); | 5323 | bnx2x_set_mac_addr_gen(bp, set, bp->dev->dev_addr, |
5324 | (1 << bp->fp->cl_id), cam_offset , 0); | ||
4885 | 5325 | ||
4886 | /* Wait for a completion */ | 5326 | if (CHIP_IS_E1(bp)) { |
4887 | bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1); | 5327 | /* broadcast MAC */ |
5328 | u8 bcast[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; | ||
5329 | bnx2x_set_mac_addr_gen(bp, set, bcast, 0, cam_offset + 1, 1); | ||
5330 | } | ||
4888 | } | 5331 | } |
5332 | static void bnx2x_set_e1_mc_list(struct bnx2x *bp, u8 offset) | ||
5333 | { | ||
5334 | int i = 0, old; | ||
5335 | struct net_device *dev = bp->dev; | ||
5336 | struct netdev_hw_addr *ha; | ||
5337 | struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, mcast_config); | ||
5338 | dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, mcast_config); | ||
5339 | |||
5340 | netdev_for_each_mc_addr(ha, dev) { | ||
5341 | /* copy mac */ | ||
5342 | config_cmd->config_table[i].msb_mac_addr = | ||
5343 | swab16(*(u16 *)&bnx2x_mc_addr(ha)[0]); | ||
5344 | config_cmd->config_table[i].middle_mac_addr = | ||
5345 | swab16(*(u16 *)&bnx2x_mc_addr(ha)[2]); | ||
5346 | config_cmd->config_table[i].lsb_mac_addr = | ||
5347 | swab16(*(u16 *)&bnx2x_mc_addr(ha)[4]); | ||
5348 | |||
5349 | config_cmd->config_table[i].vlan_id = 0; | ||
5350 | config_cmd->config_table[i].pf_id = BP_FUNC(bp); | ||
5351 | config_cmd->config_table[i].clients_bit_vector = | ||
5352 | cpu_to_le32(1 << BP_L_ID(bp)); | ||
5353 | |||
5354 | SET_FLAG(config_cmd->config_table[i].flags, | ||
5355 | MAC_CONFIGURATION_ENTRY_ACTION_TYPE, | ||
5356 | T_ETH_MAC_COMMAND_SET); | ||
5357 | |||
5358 | DP(NETIF_MSG_IFUP, | ||
5359 | "setting MCAST[%d] (%04x:%04x:%04x)\n", i, | ||
5360 | config_cmd->config_table[i].msb_mac_addr, | ||
5361 | config_cmd->config_table[i].middle_mac_addr, | ||
5362 | config_cmd->config_table[i].lsb_mac_addr); | ||
5363 | i++; | ||
5364 | } | ||
5365 | old = config_cmd->hdr.length; | ||
5366 | if (old > i) { | ||
5367 | for (; i < old; i++) { | ||
5368 | if (CAM_IS_INVALID(config_cmd-> | ||
5369 | config_table[i])) { | ||
5370 | /* already invalidated */ | ||
5371 | break; | ||
5372 | } | ||
5373 | /* invalidate */ | ||
5374 | SET_FLAG(config_cmd->config_table[i].flags, | ||
5375 | MAC_CONFIGURATION_ENTRY_ACTION_TYPE, | ||
5376 | T_ETH_MAC_COMMAND_INVALIDATE); | ||
5377 | } | ||
5378 | } | ||
5379 | |||
5380 | config_cmd->hdr.length = i; | ||
5381 | config_cmd->hdr.offset = offset; | ||
5382 | config_cmd->hdr.client_id = 0xff; | ||
5383 | config_cmd->hdr.reserved1 = 0; | ||
5384 | |||
5385 | bp->set_mac_pending = 1; | ||
5386 | smp_wmb(); | ||
4889 | 5387 | ||
4890 | void bnx2x_set_eth_mac_addr_e1(struct bnx2x *bp, int set) | 5388 | bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0, |
5389 | U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1); | ||
5390 | } | ||
5391 | static void bnx2x_invlidate_e1_mc_list(struct bnx2x *bp) | ||
4891 | { | 5392 | { |
4892 | bp->set_mac_pending++; | 5393 | int i; |
5394 | struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, mcast_config); | ||
5395 | dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, mcast_config); | ||
5396 | int ramrod_flags = WAIT_RAMROD_COMMON; | ||
5397 | |||
5398 | bp->set_mac_pending = 1; | ||
4893 | smp_wmb(); | 5399 | smp_wmb(); |
4894 | 5400 | ||
4895 | bnx2x_set_mac_addr_e1_gen(bp, set, bp->dev->dev_addr, | 5401 | for (i = 0; i < config_cmd->hdr.length; i++) |
4896 | (1 << bp->fp->cl_id), (BP_PORT(bp) ? 32 : 0), | 5402 | SET_FLAG(config_cmd->config_table[i].flags, |
4897 | 1); | 5403 | MAC_CONFIGURATION_ENTRY_ACTION_TYPE, |
5404 | T_ETH_MAC_COMMAND_INVALIDATE); | ||
5405 | |||
5406 | bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0, | ||
5407 | U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1); | ||
4898 | 5408 | ||
4899 | /* Wait for a completion */ | 5409 | /* Wait for a completion */ |
4900 | bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1); | 5410 | bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, |
5411 | ramrod_flags); | ||
5412 | |||
4901 | } | 5413 | } |
4902 | 5414 | ||
5415 | |||
4903 | #ifdef BCM_CNIC | 5416 | #ifdef BCM_CNIC |
4904 | /** | 5417 | /** |
4905 | * Set iSCSI MAC(s) at the next enties in the CAM after the ETH | 5418 | * Set iSCSI MAC(s) at the next enties in the CAM after the ETH |
@@ -4913,65 +5426,181 @@ void bnx2x_set_eth_mac_addr_e1(struct bnx2x *bp, int set) | |||
4913 | */ | 5426 | */ |
4914 | int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set) | 5427 | int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set) |
4915 | { | 5428 | { |
4916 | u32 cl_bit_vec = (1 << BCM_ISCSI_ETH_CL_ID); | 5429 | u8 cam_offset = (CHIP_IS_E1(bp) ? ((BP_PORT(bp) ? 32 : 0) + 2) : |
4917 | 5430 | bnx2x_e1h_cam_offset(bp, CAM_ISCSI_ETH_LINE)); | |
4918 | bp->set_mac_pending++; | 5431 | u32 iscsi_l2_cl_id = BNX2X_ISCSI_ETH_CL_ID; |
4919 | smp_wmb(); | 5432 | u32 cl_bit_vec = (1 << iscsi_l2_cl_id); |
4920 | 5433 | ||
4921 | /* Send a SET_MAC ramrod */ | 5434 | /* Send a SET_MAC ramrod */ |
4922 | if (CHIP_IS_E1(bp)) | 5435 | bnx2x_set_mac_addr_gen(bp, set, bp->iscsi_mac, cl_bit_vec, |
4923 | bnx2x_set_mac_addr_e1_gen(bp, set, bp->iscsi_mac, | 5436 | cam_offset, 0); |
4924 | cl_bit_vec, (BP_PORT(bp) ? 32 : 0) + 2, | ||
4925 | 1); | ||
4926 | else | ||
4927 | /* CAM allocation for E1H | ||
4928 | * unicasts: by func number | ||
4929 | * multicast: 20+FUNC*20, 20 each | ||
4930 | */ | ||
4931 | bnx2x_set_mac_addr_e1h_gen(bp, set, bp->iscsi_mac, | ||
4932 | cl_bit_vec, E1H_FUNC_MAX + BP_FUNC(bp)); | ||
4933 | |||
4934 | /* Wait for a completion when setting */ | ||
4935 | bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1); | ||
4936 | |||
4937 | return 0; | 5437 | return 0; |
4938 | } | 5438 | } |
4939 | #endif | 5439 | #endif |
4940 | 5440 | ||
4941 | int bnx2x_setup_leading(struct bnx2x *bp) | 5441 | static void bnx2x_fill_cl_init_data(struct bnx2x *bp, |
4942 | { | 5442 | struct bnx2x_client_init_params *params, |
4943 | int rc; | 5443 | u8 activate, |
5444 | struct client_init_ramrod_data *data) | ||
5445 | { | ||
5446 | /* Clear the buffer */ | ||
5447 | memset(data, 0, sizeof(*data)); | ||
5448 | |||
5449 | /* general */ | ||
5450 | data->general.client_id = params->rxq_params.cl_id; | ||
5451 | data->general.statistics_counter_id = params->rxq_params.stat_id; | ||
5452 | data->general.statistics_en_flg = | ||
5453 | (params->rxq_params.flags & QUEUE_FLG_STATS) ? 1 : 0; | ||
5454 | data->general.activate_flg = activate; | ||
5455 | data->general.sp_client_id = params->rxq_params.spcl_id; | ||
5456 | |||
5457 | /* Rx data */ | ||
5458 | data->rx.tpa_en_flg = | ||
5459 | (params->rxq_params.flags & QUEUE_FLG_TPA) ? 1 : 0; | ||
5460 | data->rx.vmqueue_mode_en_flg = 0; | ||
5461 | data->rx.cache_line_alignment_log_size = | ||
5462 | params->rxq_params.cache_line_log; | ||
5463 | data->rx.enable_dynamic_hc = | ||
5464 | (params->rxq_params.flags & QUEUE_FLG_DHC) ? 1 : 0; | ||
5465 | data->rx.max_sges_for_packet = params->rxq_params.max_sges_pkt; | ||
5466 | data->rx.client_qzone_id = params->rxq_params.cl_qzone_id; | ||
5467 | data->rx.max_agg_size = params->rxq_params.tpa_agg_sz; | ||
5468 | |||
5469 | /* We don't set drop flags */ | ||
5470 | data->rx.drop_ip_cs_err_flg = 0; | ||
5471 | data->rx.drop_tcp_cs_err_flg = 0; | ||
5472 | data->rx.drop_ttl0_flg = 0; | ||
5473 | data->rx.drop_udp_cs_err_flg = 0; | ||
5474 | |||
5475 | data->rx.inner_vlan_removal_enable_flg = | ||
5476 | (params->rxq_params.flags & QUEUE_FLG_VLAN) ? 1 : 0; | ||
5477 | data->rx.outer_vlan_removal_enable_flg = | ||
5478 | (params->rxq_params.flags & QUEUE_FLG_OV) ? 1 : 0; | ||
5479 | data->rx.status_block_id = params->rxq_params.fw_sb_id; | ||
5480 | data->rx.rx_sb_index_number = params->rxq_params.sb_cq_index; | ||
5481 | data->rx.bd_buff_size = cpu_to_le16(params->rxq_params.buf_sz); | ||
5482 | data->rx.sge_buff_size = cpu_to_le16(params->rxq_params.sge_buf_sz); | ||
5483 | data->rx.mtu = cpu_to_le16(params->rxq_params.mtu); | ||
5484 | data->rx.bd_page_base.lo = | ||
5485 | cpu_to_le32(U64_LO(params->rxq_params.dscr_map)); | ||
5486 | data->rx.bd_page_base.hi = | ||
5487 | cpu_to_le32(U64_HI(params->rxq_params.dscr_map)); | ||
5488 | data->rx.sge_page_base.lo = | ||
5489 | cpu_to_le32(U64_LO(params->rxq_params.sge_map)); | ||
5490 | data->rx.sge_page_base.hi = | ||
5491 | cpu_to_le32(U64_HI(params->rxq_params.sge_map)); | ||
5492 | data->rx.cqe_page_base.lo = | ||
5493 | cpu_to_le32(U64_LO(params->rxq_params.rcq_map)); | ||
5494 | data->rx.cqe_page_base.hi = | ||
5495 | cpu_to_le32(U64_HI(params->rxq_params.rcq_map)); | ||
5496 | data->rx.is_leading_rss = | ||
5497 | (params->ramrod_params.flags & CLIENT_IS_LEADING_RSS) ? 1 : 0; | ||
5498 | data->rx.is_approx_mcast = data->rx.is_leading_rss; | ||
5499 | |||
5500 | /* Tx data */ | ||
5501 | data->tx.enforce_security_flg = 0; /* VF specific */ | ||
5502 | data->tx.tx_status_block_id = params->txq_params.fw_sb_id; | ||
5503 | data->tx.tx_sb_index_number = params->txq_params.sb_cq_index; | ||
5504 | data->tx.mtu = 0; /* VF specific */ | ||
5505 | data->tx.tx_bd_page_base.lo = | ||
5506 | cpu_to_le32(U64_LO(params->txq_params.dscr_map)); | ||
5507 | data->tx.tx_bd_page_base.hi = | ||
5508 | cpu_to_le32(U64_HI(params->txq_params.dscr_map)); | ||
5509 | |||
5510 | /* flow control data */ | ||
5511 | data->fc.cqe_pause_thr_low = cpu_to_le16(params->pause.rcq_th_lo); | ||
5512 | data->fc.cqe_pause_thr_high = cpu_to_le16(params->pause.rcq_th_hi); | ||
5513 | data->fc.bd_pause_thr_low = cpu_to_le16(params->pause.bd_th_lo); | ||
5514 | data->fc.bd_pause_thr_high = cpu_to_le16(params->pause.bd_th_hi); | ||
5515 | data->fc.sge_pause_thr_low = cpu_to_le16(params->pause.sge_th_lo); | ||
5516 | data->fc.sge_pause_thr_high = cpu_to_le16(params->pause.sge_th_hi); | ||
5517 | data->fc.rx_cos_mask = cpu_to_le16(params->pause.pri_map); | ||
5518 | |||
5519 | data->fc.safc_group_num = params->txq_params.cos; | ||
5520 | data->fc.safc_group_en_flg = | ||
5521 | (params->txq_params.flags & QUEUE_FLG_COS) ? 1 : 0; | ||
5522 | data->fc.traffic_type = LLFC_TRAFFIC_TYPE_NW; | ||
5523 | } | ||
5524 | |||
5525 | static inline void bnx2x_set_ctx_validation(struct eth_context *cxt, u32 cid) | ||
5526 | { | ||
5527 | /* ustorm cxt validation */ | ||
5528 | cxt->ustorm_ag_context.cdu_usage = | ||
5529 | CDU_RSRVD_VALUE_TYPE_A(cid, CDU_REGION_NUMBER_UCM_AG, | ||
5530 | ETH_CONNECTION_TYPE); | ||
5531 | /* xcontext validation */ | ||
5532 | cxt->xstorm_ag_context.cdu_reserved = | ||
5533 | CDU_RSRVD_VALUE_TYPE_A(cid, CDU_REGION_NUMBER_XCM_AG, | ||
5534 | ETH_CONNECTION_TYPE); | ||
5535 | } | ||
5536 | |||
5537 | int bnx2x_setup_fw_client(struct bnx2x *bp, | ||
5538 | struct bnx2x_client_init_params *params, | ||
5539 | u8 activate, | ||
5540 | struct client_init_ramrod_data *data, | ||
5541 | dma_addr_t data_mapping) | ||
5542 | { | ||
5543 | u16 hc_usec; | ||
5544 | int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP; | ||
5545 | int ramrod_flags = 0, rc; | ||
5546 | |||
5547 | /* HC and context validation values */ | ||
5548 | hc_usec = params->txq_params.hc_rate ? | ||
5549 | 1000000 / params->txq_params.hc_rate : 0; | ||
5550 | bnx2x_update_coalesce_sb_index(bp, | ||
5551 | params->txq_params.fw_sb_id, | ||
5552 | params->txq_params.sb_cq_index, | ||
5553 | !(params->txq_params.flags & QUEUE_FLG_HC), | ||
5554 | hc_usec); | ||
5555 | |||
5556 | *(params->ramrod_params.pstate) = BNX2X_FP_STATE_OPENING; | ||
5557 | |||
5558 | hc_usec = params->rxq_params.hc_rate ? | ||
5559 | 1000000 / params->rxq_params.hc_rate : 0; | ||
5560 | bnx2x_update_coalesce_sb_index(bp, | ||
5561 | params->rxq_params.fw_sb_id, | ||
5562 | params->rxq_params.sb_cq_index, | ||
5563 | !(params->rxq_params.flags & QUEUE_FLG_HC), | ||
5564 | hc_usec); | ||
5565 | |||
5566 | bnx2x_set_ctx_validation(params->rxq_params.cxt, | ||
5567 | params->rxq_params.cid); | ||
5568 | |||
5569 | /* zero stats */ | ||
5570 | if (params->txq_params.flags & QUEUE_FLG_STATS) | ||
5571 | storm_memset_xstats_zero(bp, BP_PORT(bp), | ||
5572 | params->txq_params.stat_id); | ||
5573 | |||
5574 | if (params->rxq_params.flags & QUEUE_FLG_STATS) { | ||
5575 | storm_memset_ustats_zero(bp, BP_PORT(bp), | ||
5576 | params->rxq_params.stat_id); | ||
5577 | storm_memset_tstats_zero(bp, BP_PORT(bp), | ||
5578 | params->rxq_params.stat_id); | ||
5579 | } | ||
5580 | |||
5581 | /* Fill the ramrod data */ | ||
5582 | bnx2x_fill_cl_init_data(bp, params, activate, data); | ||
5583 | |||
5584 | /* SETUP ramrod. | ||
5585 | * | ||
5586 | * bnx2x_sp_post() takes a spin_lock thus no other explict memory | ||
5587 | * barrier except from mmiowb() is needed to impose a | ||
5588 | * proper ordering of memory operations. | ||
5589 | */ | ||
5590 | mmiowb(); | ||
4944 | 5591 | ||
4945 | /* reset IGU state */ | ||
4946 | bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0); | ||
4947 | 5592 | ||
4948 | /* SETUP ramrod */ | 5593 | bnx2x_sp_post(bp, ramrod, params->ramrod_params.cid, |
4949 | bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0); | 5594 | U64_HI(data_mapping), U64_LO(data_mapping), 0); |
4950 | 5595 | ||
4951 | /* Wait for completion */ | 5596 | /* Wait for completion */ |
4952 | rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0); | 5597 | rc = bnx2x_wait_ramrod(bp, params->ramrod_params.state, |
4953 | 5598 | params->ramrod_params.index, | |
5599 | params->ramrod_params.pstate, | ||
5600 | ramrod_flags); | ||
4954 | return rc; | 5601 | return rc; |
4955 | } | 5602 | } |
4956 | 5603 | ||
4957 | int bnx2x_setup_multi(struct bnx2x *bp, int index) | ||
4958 | { | ||
4959 | struct bnx2x_fastpath *fp = &bp->fp[index]; | ||
4960 | |||
4961 | /* reset IGU state */ | ||
4962 | bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0); | ||
4963 | |||
4964 | /* SETUP ramrod */ | ||
4965 | fp->state = BNX2X_FP_STATE_OPENING; | ||
4966 | bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0, | ||
4967 | fp->cl_id, 0); | ||
4968 | |||
4969 | /* Wait for completion */ | ||
4970 | return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index, | ||
4971 | &(fp->state), 0); | ||
4972 | } | ||
4973 | |||
4974 | |||
4975 | void bnx2x_set_num_queues_msix(struct bnx2x *bp) | 5604 | void bnx2x_set_num_queues_msix(struct bnx2x *bp) |
4976 | { | 5605 | { |
4977 | 5606 | ||
@@ -4996,87 +5625,217 @@ void bnx2x_set_num_queues_msix(struct bnx2x *bp) | |||
4996 | } | 5625 | } |
4997 | } | 5626 | } |
4998 | 5627 | ||
5628 | void bnx2x_ilt_set_info(struct bnx2x *bp) | ||
5629 | { | ||
5630 | struct ilt_client_info *ilt_client; | ||
5631 | struct bnx2x_ilt *ilt = BP_ILT(bp); | ||
5632 | u16 line = 0; | ||
5633 | |||
5634 | ilt->start_line = FUNC_ILT_BASE(BP_FUNC(bp)); | ||
5635 | DP(BNX2X_MSG_SP, "ilt starts at line %d\n", ilt->start_line); | ||
5636 | |||
5637 | /* CDU */ | ||
5638 | ilt_client = &ilt->clients[ILT_CLIENT_CDU]; | ||
5639 | ilt_client->client_num = ILT_CLIENT_CDU; | ||
5640 | ilt_client->page_size = CDU_ILT_PAGE_SZ; | ||
5641 | ilt_client->flags = ILT_CLIENT_SKIP_MEM; | ||
5642 | ilt_client->start = line; | ||
5643 | line += L2_ILT_LINES(bp); | ||
5644 | #ifdef BCM_CNIC | ||
5645 | line += CNIC_ILT_LINES; | ||
5646 | #endif | ||
5647 | ilt_client->end = line - 1; | ||
5648 | |||
5649 | DP(BNX2X_MSG_SP, "ilt client[CDU]: start %d, end %d, psz 0x%x, " | ||
5650 | "flags 0x%x, hw psz %d\n", | ||
5651 | ilt_client->start, | ||
5652 | ilt_client->end, | ||
5653 | ilt_client->page_size, | ||
5654 | ilt_client->flags, | ||
5655 | ilog2(ilt_client->page_size >> 12)); | ||
5656 | |||
5657 | /* QM */ | ||
5658 | if (QM_INIT(bp->qm_cid_count)) { | ||
5659 | ilt_client = &ilt->clients[ILT_CLIENT_QM]; | ||
5660 | ilt_client->client_num = ILT_CLIENT_QM; | ||
5661 | ilt_client->page_size = QM_ILT_PAGE_SZ; | ||
5662 | ilt_client->flags = 0; | ||
5663 | ilt_client->start = line; | ||
5664 | |||
5665 | /* 4 bytes for each cid */ | ||
5666 | line += DIV_ROUND_UP(bp->qm_cid_count * QM_QUEUES_PER_FUNC * 4, | ||
5667 | QM_ILT_PAGE_SZ); | ||
5668 | |||
5669 | ilt_client->end = line - 1; | ||
5670 | |||
5671 | DP(BNX2X_MSG_SP, "ilt client[QM]: start %d, end %d, psz 0x%x, " | ||
5672 | "flags 0x%x, hw psz %d\n", | ||
5673 | ilt_client->start, | ||
5674 | ilt_client->end, | ||
5675 | ilt_client->page_size, | ||
5676 | ilt_client->flags, | ||
5677 | ilog2(ilt_client->page_size >> 12)); | ||
5678 | |||
5679 | } | ||
5680 | /* SRC */ | ||
5681 | ilt_client = &ilt->clients[ILT_CLIENT_SRC]; | ||
5682 | #ifdef BCM_CNIC | ||
5683 | ilt_client->client_num = ILT_CLIENT_SRC; | ||
5684 | ilt_client->page_size = SRC_ILT_PAGE_SZ; | ||
5685 | ilt_client->flags = 0; | ||
5686 | ilt_client->start = line; | ||
5687 | line += SRC_ILT_LINES; | ||
5688 | ilt_client->end = line - 1; | ||
5689 | |||
5690 | DP(BNX2X_MSG_SP, "ilt client[SRC]: start %d, end %d, psz 0x%x, " | ||
5691 | "flags 0x%x, hw psz %d\n", | ||
5692 | ilt_client->start, | ||
5693 | ilt_client->end, | ||
5694 | ilt_client->page_size, | ||
5695 | ilt_client->flags, | ||
5696 | ilog2(ilt_client->page_size >> 12)); | ||
5697 | |||
5698 | #else | ||
5699 | ilt_client->flags = (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM); | ||
5700 | #endif | ||
4999 | 5701 | ||
5702 | /* TM */ | ||
5703 | ilt_client = &ilt->clients[ILT_CLIENT_TM]; | ||
5704 | #ifdef BCM_CNIC | ||
5705 | ilt_client->client_num = ILT_CLIENT_TM; | ||
5706 | ilt_client->page_size = TM_ILT_PAGE_SZ; | ||
5707 | ilt_client->flags = 0; | ||
5708 | ilt_client->start = line; | ||
5709 | line += TM_ILT_LINES; | ||
5710 | ilt_client->end = line - 1; | ||
5711 | |||
5712 | DP(BNX2X_MSG_SP, "ilt client[TM]: start %d, end %d, psz 0x%x, " | ||
5713 | "flags 0x%x, hw psz %d\n", | ||
5714 | ilt_client->start, | ||
5715 | ilt_client->end, | ||
5716 | ilt_client->page_size, | ||
5717 | ilt_client->flags, | ||
5718 | ilog2(ilt_client->page_size >> 12)); | ||
5000 | 5719 | ||
5001 | static int bnx2x_stop_multi(struct bnx2x *bp, int index) | 5720 | #else |
5721 | ilt_client->flags = (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM); | ||
5722 | #endif | ||
5723 | } | ||
5724 | int bnx2x_setup_client(struct bnx2x *bp, struct bnx2x_fastpath *fp, | ||
5725 | int is_leading) | ||
5002 | { | 5726 | { |
5003 | struct bnx2x_fastpath *fp = &bp->fp[index]; | 5727 | struct bnx2x_client_init_params params = { {0} }; |
5004 | int rc; | 5728 | int rc; |
5005 | 5729 | ||
5006 | /* halt the connection */ | 5730 | bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, |
5007 | fp->state = BNX2X_FP_STATE_HALTING; | 5731 | IGU_INT_ENABLE, 0); |
5008 | bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0); | ||
5009 | 5732 | ||
5010 | /* Wait for completion */ | 5733 | params.ramrod_params.pstate = &fp->state; |
5011 | rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index, | 5734 | params.ramrod_params.state = BNX2X_FP_STATE_OPEN; |
5012 | &(fp->state), 1); | 5735 | params.ramrod_params.index = fp->index; |
5013 | if (rc) /* timeout */ | 5736 | params.ramrod_params.cid = fp->cid; |
5014 | return rc; | ||
5015 | 5737 | ||
5016 | /* delete cfc entry */ | 5738 | if (is_leading) |
5017 | bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1); | 5739 | params.ramrod_params.flags |= CLIENT_IS_LEADING_RSS; |
5018 | 5740 | ||
5019 | /* Wait for completion */ | 5741 | bnx2x_pf_rx_cl_prep(bp, fp, ¶ms.pause, ¶ms.rxq_params); |
5020 | rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index, | 5742 | |
5021 | &(fp->state), 1); | 5743 | bnx2x_pf_tx_cl_prep(bp, fp, ¶ms.txq_params); |
5744 | |||
5745 | rc = bnx2x_setup_fw_client(bp, ¶ms, 1, | ||
5746 | bnx2x_sp(bp, client_init_data), | ||
5747 | bnx2x_sp_mapping(bp, client_init_data)); | ||
5022 | return rc; | 5748 | return rc; |
5023 | } | 5749 | } |
5024 | 5750 | ||
5025 | static int bnx2x_stop_leading(struct bnx2x *bp) | 5751 | int bnx2x_stop_fw_client(struct bnx2x *bp, struct bnx2x_client_ramrod_params *p) |
5026 | { | 5752 | { |
5027 | __le16 dsb_sp_prod_idx; | ||
5028 | /* if the other port is handling traffic, | ||
5029 | this can take a lot of time */ | ||
5030 | int cnt = 500; | ||
5031 | int rc; | 5753 | int rc; |
5032 | 5754 | ||
5033 | might_sleep(); | 5755 | int poll_flag = p->poll ? WAIT_RAMROD_POLL : 0; |
5034 | 5756 | ||
5035 | /* Send HALT ramrod */ | 5757 | /* halt the connection */ |
5036 | bp->fp[0].state = BNX2X_FP_STATE_HALTING; | 5758 | *p->pstate = BNX2X_FP_STATE_HALTING; |
5037 | bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0); | 5759 | bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, p->cid, 0, |
5760 | p->cl_id, 0); | ||
5038 | 5761 | ||
5039 | /* Wait for completion */ | 5762 | /* Wait for completion */ |
5040 | rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0, | 5763 | rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, p->index, |
5041 | &(bp->fp[0].state), 1); | 5764 | p->pstate, poll_flag); |
5042 | if (rc) /* timeout */ | 5765 | if (rc) /* timeout */ |
5043 | return rc; | 5766 | return rc; |
5044 | 5767 | ||
5045 | dsb_sp_prod_idx = *bp->dsb_sp_prod; | 5768 | *p->pstate = BNX2X_FP_STATE_TERMINATING; |
5769 | bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_TERMINATE, p->cid, 0, | ||
5770 | p->cl_id, 0); | ||
5771 | /* Wait for completion */ | ||
5772 | rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_TERMINATED, p->index, | ||
5773 | p->pstate, poll_flag); | ||
5774 | if (rc) /* timeout */ | ||
5775 | return rc; | ||
5046 | 5776 | ||
5047 | /* Send PORT_DELETE ramrod */ | ||
5048 | bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1); | ||
5049 | 5777 | ||
5050 | /* Wait for completion to arrive on default status block | 5778 | /* delete cfc entry */ |
5051 | we are going to reset the chip anyway | 5779 | bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_CFC_DEL, p->cid, 0, 0, 1); |
5052 | so there is not much to do if this times out | ||
5053 | */ | ||
5054 | while (dsb_sp_prod_idx == *bp->dsb_sp_prod) { | ||
5055 | if (!cnt) { | ||
5056 | DP(NETIF_MSG_IFDOWN, "timeout waiting for port del " | ||
5057 | "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n", | ||
5058 | *bp->dsb_sp_prod, dsb_sp_prod_idx); | ||
5059 | #ifdef BNX2X_STOP_ON_ERROR | ||
5060 | bnx2x_panic(); | ||
5061 | #endif | ||
5062 | rc = -EBUSY; | ||
5063 | break; | ||
5064 | } | ||
5065 | cnt--; | ||
5066 | msleep(1); | ||
5067 | rmb(); /* Refresh the dsb_sp_prod */ | ||
5068 | } | ||
5069 | bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD; | ||
5070 | bp->fp[0].state = BNX2X_FP_STATE_CLOSED; | ||
5071 | 5780 | ||
5781 | /* Wait for completion */ | ||
5782 | rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, p->index, | ||
5783 | p->pstate, WAIT_RAMROD_COMMON); | ||
5072 | return rc; | 5784 | return rc; |
5073 | } | 5785 | } |
5074 | 5786 | ||
5787 | static int bnx2x_stop_client(struct bnx2x *bp, int index) | ||
5788 | { | ||
5789 | struct bnx2x_client_ramrod_params client_stop = {0}; | ||
5790 | struct bnx2x_fastpath *fp = &bp->fp[index]; | ||
5791 | |||
5792 | client_stop.index = index; | ||
5793 | client_stop.cid = fp->cid; | ||
5794 | client_stop.cl_id = fp->cl_id; | ||
5795 | client_stop.pstate = &(fp->state); | ||
5796 | client_stop.poll = 0; | ||
5797 | |||
5798 | return bnx2x_stop_fw_client(bp, &client_stop); | ||
5799 | } | ||
5800 | |||
5801 | |||
5075 | static void bnx2x_reset_func(struct bnx2x *bp) | 5802 | static void bnx2x_reset_func(struct bnx2x *bp) |
5076 | { | 5803 | { |
5077 | int port = BP_PORT(bp); | 5804 | int port = BP_PORT(bp); |
5078 | int func = BP_FUNC(bp); | 5805 | int func = BP_FUNC(bp); |
5079 | int base, i; | 5806 | int base, i; |
5807 | int pfunc_offset_fp = offsetof(struct hc_sb_data, p_func) + | ||
5808 | offsetof(struct hc_status_block_data_e1x, common); | ||
5809 | int pfunc_offset_sp = offsetof(struct hc_sp_status_block_data, p_func); | ||
5810 | int pfid_offset = offsetof(struct pci_entity, pf_id); | ||
5811 | |||
5812 | /* Disable the function in the FW */ | ||
5813 | REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(func), 0); | ||
5814 | REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(func), 0); | ||
5815 | REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(func), 0); | ||
5816 | REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(func), 0); | ||
5817 | |||
5818 | /* FP SBs */ | ||
5819 | for_each_queue(bp, i) { | ||
5820 | struct bnx2x_fastpath *fp = &bp->fp[i]; | ||
5821 | REG_WR8(bp, | ||
5822 | BAR_CSTRORM_INTMEM + | ||
5823 | CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id) | ||
5824 | + pfunc_offset_fp + pfid_offset, | ||
5825 | HC_FUNCTION_DISABLED); | ||
5826 | } | ||
5827 | |||
5828 | /* SP SB */ | ||
5829 | REG_WR8(bp, | ||
5830 | BAR_CSTRORM_INTMEM + | ||
5831 | CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) + | ||
5832 | pfunc_offset_sp + pfid_offset, | ||
5833 | HC_FUNCTION_DISABLED); | ||
5834 | |||
5835 | |||
5836 | for (i = 0; i < XSTORM_SPQ_DATA_SIZE / 4; i++) | ||
5837 | REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_DATA_OFFSET(func), | ||
5838 | 0); | ||
5080 | 5839 | ||
5081 | /* Configure IGU */ | 5840 | /* Configure IGU */ |
5082 | REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0); | 5841 | REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0); |
@@ -5099,6 +5858,8 @@ static void bnx2x_reset_func(struct bnx2x *bp) | |||
5099 | base = FUNC_ILT_BASE(func); | 5858 | base = FUNC_ILT_BASE(func); |
5100 | for (i = base; i < base + ILT_PER_FUNC; i++) | 5859 | for (i = base; i < base + ILT_PER_FUNC; i++) |
5101 | bnx2x_ilt_wr(bp, i, 0); | 5860 | bnx2x_ilt_wr(bp, i, 0); |
5861 | |||
5862 | bp->dmae_ready = 0; | ||
5102 | } | 5863 | } |
5103 | 5864 | ||
5104 | static void bnx2x_reset_port(struct bnx2x *bp) | 5865 | static void bnx2x_reset_port(struct bnx2x *bp) |
@@ -5167,7 +5928,6 @@ void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode) | |||
5167 | cnt = 1000; | 5928 | cnt = 1000; |
5168 | while (bnx2x_has_tx_work_unload(fp)) { | 5929 | while (bnx2x_has_tx_work_unload(fp)) { |
5169 | 5930 | ||
5170 | bnx2x_tx_int(fp); | ||
5171 | if (!cnt) { | 5931 | if (!cnt) { |
5172 | BNX2X_ERR("timeout waiting for queue[%d]\n", | 5932 | BNX2X_ERR("timeout waiting for queue[%d]\n", |
5173 | i); | 5933 | i); |
@@ -5186,39 +5946,21 @@ void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode) | |||
5186 | msleep(1); | 5946 | msleep(1); |
5187 | 5947 | ||
5188 | if (CHIP_IS_E1(bp)) { | 5948 | if (CHIP_IS_E1(bp)) { |
5189 | struct mac_configuration_cmd *config = | 5949 | /* invalidate mc list, |
5190 | bnx2x_sp(bp, mcast_config); | 5950 | * wait and poll (interrupts are off) |
5191 | 5951 | */ | |
5192 | bnx2x_set_eth_mac_addr_e1(bp, 0); | 5952 | bnx2x_invlidate_e1_mc_list(bp); |
5193 | 5953 | bnx2x_set_eth_mac(bp, 0); | |
5194 | for (i = 0; i < config->hdr.length; i++) | ||
5195 | CAM_INVALIDATE(config->config_table[i]); | ||
5196 | |||
5197 | config->hdr.length = i; | ||
5198 | if (CHIP_REV_IS_SLOW(bp)) | ||
5199 | config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port); | ||
5200 | else | ||
5201 | config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port); | ||
5202 | config->hdr.client_id = bp->fp->cl_id; | ||
5203 | config->hdr.reserved1 = 0; | ||
5204 | |||
5205 | bp->set_mac_pending++; | ||
5206 | smp_wmb(); | ||
5207 | |||
5208 | bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0, | ||
5209 | U64_HI(bnx2x_sp_mapping(bp, mcast_config)), | ||
5210 | U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0); | ||
5211 | 5954 | ||
5212 | } else { /* E1H */ | 5955 | } else { |
5213 | REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0); | 5956 | REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0); |
5214 | 5957 | ||
5215 | bnx2x_set_eth_mac_addr_e1h(bp, 0); | 5958 | bnx2x_set_eth_mac(bp, 0); |
5216 | 5959 | ||
5217 | for (i = 0; i < MC_HASH_SIZE; i++) | 5960 | for (i = 0; i < MC_HASH_SIZE; i++) |
5218 | REG_WR(bp, MC_HASH_OFFSET(bp, i), 0); | 5961 | REG_WR(bp, MC_HASH_OFFSET(bp, i), 0); |
5219 | |||
5220 | REG_WR(bp, MISC_REG_E1HMF_MODE, 0); | ||
5221 | } | 5962 | } |
5963 | |||
5222 | #ifdef BCM_CNIC | 5964 | #ifdef BCM_CNIC |
5223 | /* Clear iSCSI L2 MAC */ | 5965 | /* Clear iSCSI L2 MAC */ |
5224 | mutex_lock(&bp->cnic_mutex); | 5966 | mutex_lock(&bp->cnic_mutex); |
@@ -5257,21 +5999,27 @@ void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode) | |||
5257 | 5999 | ||
5258 | /* Close multi and leading connections | 6000 | /* Close multi and leading connections |
5259 | Completions for ramrods are collected in a synchronous way */ | 6001 | Completions for ramrods are collected in a synchronous way */ |
5260 | for_each_nondefault_queue(bp, i) | 6002 | for_each_queue(bp, i) |
5261 | if (bnx2x_stop_multi(bp, i)) | 6003 | |
6004 | if (bnx2x_stop_client(bp, i)) | ||
6005 | #ifdef BNX2X_STOP_ON_ERROR | ||
6006 | return; | ||
6007 | #else | ||
5262 | goto unload_error; | 6008 | goto unload_error; |
6009 | #endif | ||
5263 | 6010 | ||
5264 | rc = bnx2x_stop_leading(bp); | 6011 | rc = bnx2x_func_stop(bp); |
5265 | if (rc) { | 6012 | if (rc) { |
5266 | BNX2X_ERR("Stop leading failed!\n"); | 6013 | BNX2X_ERR("Function stop failed!\n"); |
5267 | #ifdef BNX2X_STOP_ON_ERROR | 6014 | #ifdef BNX2X_STOP_ON_ERROR |
5268 | return -EBUSY; | 6015 | return; |
5269 | #else | 6016 | #else |
5270 | goto unload_error; | 6017 | goto unload_error; |
5271 | #endif | 6018 | #endif |
5272 | } | 6019 | } |
5273 | 6020 | #ifndef BNX2X_STOP_ON_ERROR | |
5274 | unload_error: | 6021 | unload_error: |
6022 | #endif | ||
5275 | if (!BP_NOMCP(bp)) | 6023 | if (!BP_NOMCP(bp)) |
5276 | reset_code = bnx2x_fw_command(bp, reset_code, 0); | 6024 | reset_code = bnx2x_fw_command(bp, reset_code, 0); |
5277 | else { | 6025 | else { |
@@ -5293,6 +6041,12 @@ unload_error: | |||
5293 | (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT)) | 6041 | (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT)) |
5294 | bnx2x__link_reset(bp); | 6042 | bnx2x__link_reset(bp); |
5295 | 6043 | ||
6044 | /* Disable HW interrupts, NAPI */ | ||
6045 | bnx2x_netif_stop(bp, 1); | ||
6046 | |||
6047 | /* Release IRQs */ | ||
6048 | bnx2x_free_irq(bp, false); | ||
6049 | |||
5296 | /* Reset the chip */ | 6050 | /* Reset the chip */ |
5297 | bnx2x_reset_chip(bp, reset_code); | 6051 | bnx2x_reset_chip(bp, reset_code); |
5298 | 6052 | ||
@@ -5953,6 +6707,18 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp) | |||
5953 | bp->link_params.chip_id = bp->common.chip_id; | 6707 | bp->link_params.chip_id = bp->common.chip_id; |
5954 | BNX2X_DEV_INFO("chip ID is 0x%x\n", id); | 6708 | BNX2X_DEV_INFO("chip ID is 0x%x\n", id); |
5955 | 6709 | ||
6710 | bp->common.chip_port_mode = CHIP_PORT_MODE_NONE; /* N/A */ | ||
6711 | |||
6712 | /* Set doorbell size */ | ||
6713 | bp->db_size = (1 << BNX2X_DB_SHIFT); | ||
6714 | |||
6715 | /* | ||
6716 | * set base FW non-default (fast path) status block id, this value is | ||
6717 | * used to initialize the fw_sb_id saved on the fp/queue structure to | ||
6718 | * determine the id used by the FW. | ||
6719 | */ | ||
6720 | bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E1x; | ||
6721 | |||
5956 | val = (REG_RD(bp, 0x2874) & 0x55); | 6722 | val = (REG_RD(bp, 0x2874) & 0x55); |
5957 | if ((bp->common.chip_id & 0x1) || | 6723 | if ((bp->common.chip_id & 0x1) || |
5958 | (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) { | 6724 | (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) { |
@@ -6417,13 +7183,23 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp) | |||
6417 | 7183 | ||
6418 | bnx2x_get_common_hwinfo(bp); | 7184 | bnx2x_get_common_hwinfo(bp); |
6419 | 7185 | ||
7186 | bp->common.int_block = INT_BLOCK_HC; | ||
7187 | |||
7188 | bp->igu_dsb_id = DEF_SB_IGU_ID; | ||
7189 | bp->igu_base_sb = 0; | ||
7190 | bp->igu_sb_cnt = min_t(u8, FP_SB_MAX_E1x, bp->l2_cid_count); | ||
7191 | |||
6420 | bp->e1hov = 0; | 7192 | bp->e1hov = 0; |
6421 | bp->e1hmf = 0; | 7193 | bp->e1hmf = 0; |
6422 | if (CHIP_IS_E1H(bp) && !BP_NOMCP(bp)) { | 7194 | if (CHIP_IS_E1H(bp) && !BP_NOMCP(bp)) { |
7195 | |||
7196 | bp->common.mf_cfg_base = bp->common.shmem_base + | ||
7197 | offsetof(struct shmem_region, func_mb) + | ||
7198 | E1H_FUNC_MAX * sizeof(struct drv_func_mb); | ||
6423 | bp->mf_config = | 7199 | bp->mf_config = |
6424 | SHMEM_RD(bp, mf_cfg.func_mf_config[func].config); | 7200 | MF_CFG_RD(bp, func_mf_config[func].config); |
6425 | 7201 | ||
6426 | val = (SHMEM_RD(bp, mf_cfg.func_mf_config[FUNC_0].e1hov_tag) & | 7202 | val = (MF_CFG_RD(bp, func_mf_config[FUNC_0].e1hov_tag) & |
6427 | FUNC_MF_CFG_E1HOV_TAG_MASK); | 7203 | FUNC_MF_CFG_E1HOV_TAG_MASK); |
6428 | if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) | 7204 | if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) |
6429 | bp->e1hmf = 1; | 7205 | bp->e1hmf = 1; |
@@ -6431,7 +7207,7 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp) | |||
6431 | IS_E1HMF(bp) ? "multi" : "single"); | 7207 | IS_E1HMF(bp) ? "multi" : "single"); |
6432 | 7208 | ||
6433 | if (IS_E1HMF(bp)) { | 7209 | if (IS_E1HMF(bp)) { |
6434 | val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func]. | 7210 | val = (MF_CFG_RD(bp, func_mf_config[func]. |
6435 | e1hov_tag) & | 7211 | e1hov_tag) & |
6436 | FUNC_MF_CFG_E1HOV_TAG_MASK); | 7212 | FUNC_MF_CFG_E1HOV_TAG_MASK); |
6437 | if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) { | 7213 | if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) { |
@@ -6453,6 +7229,10 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp) | |||
6453 | } | 7229 | } |
6454 | } | 7230 | } |
6455 | 7231 | ||
7232 | /* adjust igu_sb_cnt to MF */ | ||
7233 | if (IS_E1HMF(bp)) | ||
7234 | bp->igu_sb_cnt /= E1HVN_MAX; | ||
7235 | |||
6456 | if (!BP_NOMCP(bp)) { | 7236 | if (!BP_NOMCP(bp)) { |
6457 | bnx2x_get_port_hwinfo(bp); | 7237 | bnx2x_get_port_hwinfo(bp); |
6458 | 7238 | ||
@@ -6462,8 +7242,8 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp) | |||
6462 | } | 7242 | } |
6463 | 7243 | ||
6464 | if (IS_E1HMF(bp)) { | 7244 | if (IS_E1HMF(bp)) { |
6465 | val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper); | 7245 | val2 = MF_CFG_RD(bp, func_mf_config[func].mac_upper); |
6466 | val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower); | 7246 | val = MF_CFG_RD(bp, func_mf_config[func].mac_lower); |
6467 | if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) && | 7247 | if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) && |
6468 | (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) { | 7248 | (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) { |
6469 | bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff); | 7249 | bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff); |
@@ -6577,6 +7357,9 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp) | |||
6577 | 7357 | ||
6578 | rc = bnx2x_get_hwinfo(bp); | 7358 | rc = bnx2x_get_hwinfo(bp); |
6579 | 7359 | ||
7360 | if (!rc) | ||
7361 | rc = bnx2x_alloc_mem_bp(bp); | ||
7362 | |||
6580 | bnx2x_read_fwinfo(bp); | 7363 | bnx2x_read_fwinfo(bp); |
6581 | /* need to reset chip if undi was active */ | 7364 | /* need to reset chip if undi was active */ |
6582 | if (!BP_NOMCP(bp)) | 7365 | if (!BP_NOMCP(bp)) |
@@ -6623,8 +7406,8 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp) | |||
6623 | bp->rx_csum = 1; | 7406 | bp->rx_csum = 1; |
6624 | 7407 | ||
6625 | /* make sure that the numbers are in the right granularity */ | 7408 | /* make sure that the numbers are in the right granularity */ |
6626 | bp->tx_ticks = (50 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR); | 7409 | bp->tx_ticks = (50 / BNX2X_BTR) * BNX2X_BTR; |
6627 | bp->rx_ticks = (25 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR); | 7410 | bp->rx_ticks = (25 / BNX2X_BTR) * BNX2X_BTR; |
6628 | 7411 | ||
6629 | timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ); | 7412 | timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ); |
6630 | bp->current_interval = (poll ? poll : timer_interval); | 7413 | bp->current_interval = (poll ? poll : timer_interval); |
@@ -6724,73 +7507,16 @@ void bnx2x_set_rx_mode(struct net_device *dev) | |||
6724 | 7507 | ||
6725 | else { /* some multicasts */ | 7508 | else { /* some multicasts */ |
6726 | if (CHIP_IS_E1(bp)) { | 7509 | if (CHIP_IS_E1(bp)) { |
6727 | int i, old, offset; | 7510 | /* |
6728 | struct netdev_hw_addr *ha; | 7511 | * set mc list, do not wait as wait implies sleep |
6729 | struct mac_configuration_cmd *config = | 7512 | * and set_rx_mode can be invoked from non-sleepable |
6730 | bnx2x_sp(bp, mcast_config); | 7513 | * context |
6731 | 7514 | */ | |
6732 | i = 0; | 7515 | u8 offset = (CHIP_REV_IS_SLOW(bp) ? |
6733 | netdev_for_each_mc_addr(ha, dev) { | 7516 | BNX2X_MAX_EMUL_MULTI*(1 + port) : |
6734 | config->config_table[i]. | 7517 | BNX2X_MAX_MULTICAST*(1 + port)); |
6735 | cam_entry.msb_mac_addr = | ||
6736 | swab16(*(u16 *)&ha->addr[0]); | ||
6737 | config->config_table[i]. | ||
6738 | cam_entry.middle_mac_addr = | ||
6739 | swab16(*(u16 *)&ha->addr[2]); | ||
6740 | config->config_table[i]. | ||
6741 | cam_entry.lsb_mac_addr = | ||
6742 | swab16(*(u16 *)&ha->addr[4]); | ||
6743 | config->config_table[i].cam_entry.flags = | ||
6744 | cpu_to_le16(port); | ||
6745 | config->config_table[i]. | ||
6746 | target_table_entry.flags = 0; | ||
6747 | config->config_table[i].target_table_entry. | ||
6748 | clients_bit_vector = | ||
6749 | cpu_to_le32(1 << BP_L_ID(bp)); | ||
6750 | config->config_table[i]. | ||
6751 | target_table_entry.vlan_id = 0; | ||
6752 | |||
6753 | DP(NETIF_MSG_IFUP, | ||
6754 | "setting MCAST[%d] (%04x:%04x:%04x)\n", i, | ||
6755 | config->config_table[i]. | ||
6756 | cam_entry.msb_mac_addr, | ||
6757 | config->config_table[i]. | ||
6758 | cam_entry.middle_mac_addr, | ||
6759 | config->config_table[i]. | ||
6760 | cam_entry.lsb_mac_addr); | ||
6761 | i++; | ||
6762 | } | ||
6763 | old = config->hdr.length; | ||
6764 | if (old > i) { | ||
6765 | for (; i < old; i++) { | ||
6766 | if (CAM_IS_INVALID(config-> | ||
6767 | config_table[i])) { | ||
6768 | /* already invalidated */ | ||
6769 | break; | ||
6770 | } | ||
6771 | /* invalidate */ | ||
6772 | CAM_INVALIDATE(config-> | ||
6773 | config_table[i]); | ||
6774 | } | ||
6775 | } | ||
6776 | |||
6777 | if (CHIP_REV_IS_SLOW(bp)) | ||
6778 | offset = BNX2X_MAX_EMUL_MULTI*(1 + port); | ||
6779 | else | ||
6780 | offset = BNX2X_MAX_MULTICAST*(1 + port); | ||
6781 | |||
6782 | config->hdr.length = i; | ||
6783 | config->hdr.offset = offset; | ||
6784 | config->hdr.client_id = bp->fp->cl_id; | ||
6785 | config->hdr.reserved1 = 0; | ||
6786 | |||
6787 | bp->set_mac_pending++; | ||
6788 | smp_wmb(); | ||
6789 | 7518 | ||
6790 | bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0, | 7519 | bnx2x_set_e1_mc_list(bp, offset); |
6791 | U64_HI(bnx2x_sp_mapping(bp, mcast_config)), | ||
6792 | U64_LO(bnx2x_sp_mapping(bp, mcast_config)), | ||
6793 | 0); | ||
6794 | } else { /* E1H */ | 7520 | } else { /* E1H */ |
6795 | /* Accept one or more multicasts */ | 7521 | /* Accept one or more multicasts */ |
6796 | struct netdev_hw_addr *ha; | 7522 | struct netdev_hw_addr *ha; |
@@ -6802,9 +7528,10 @@ void bnx2x_set_rx_mode(struct net_device *dev) | |||
6802 | 7528 | ||
6803 | netdev_for_each_mc_addr(ha, dev) { | 7529 | netdev_for_each_mc_addr(ha, dev) { |
6804 | DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n", | 7530 | DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n", |
6805 | ha->addr); | 7531 | bnx2x_mc_addr(ha)); |
6806 | 7532 | ||
6807 | crc = crc32c_le(0, ha->addr, ETH_ALEN); | 7533 | crc = crc32c_le(0, bnx2x_mc_addr(ha), |
7534 | ETH_ALEN); | ||
6808 | bit = (crc >> 24) & 0xff; | 7535 | bit = (crc >> 24) & 0xff; |
6809 | regidx = bit >> 5; | 7536 | regidx = bit >> 5; |
6810 | bit &= 0x1f; | 7537 | bit &= 0x1f; |
@@ -6817,6 +7544,7 @@ void bnx2x_set_rx_mode(struct net_device *dev) | |||
6817 | } | 7544 | } |
6818 | } | 7545 | } |
6819 | 7546 | ||
7547 | |||
6820 | bp->rx_mode = rx_mode; | 7548 | bp->rx_mode = rx_mode; |
6821 | bnx2x_set_storm_rx_mode(bp); | 7549 | bnx2x_set_storm_rx_mode(bp); |
6822 | } | 7550 | } |
@@ -7003,7 +7731,7 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev, | |||
7003 | } | 7731 | } |
7004 | 7732 | ||
7005 | bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2), | 7733 | bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2), |
7006 | min_t(u64, BNX2X_DB_SIZE, | 7734 | min_t(u64, BNX2X_DB_SIZE(bp), |
7007 | pci_resource_len(pdev, 2))); | 7735 | pci_resource_len(pdev, 2))); |
7008 | if (!bp->doorbells) { | 7736 | if (!bp->doorbells) { |
7009 | dev_err(&bp->pdev->dev, | 7737 | dev_err(&bp->pdev->dev, |
@@ -7179,6 +7907,30 @@ static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n) | |||
7179 | } | 7907 | } |
7180 | } | 7908 | } |
7181 | 7909 | ||
7910 | /** | ||
7911 | * IRO array is stored in the following format: | ||
7912 | * {base(24bit), m1(16bit), m2(16bit), m3(16bit), size(16bit) } | ||
7913 | */ | ||
7914 | static inline void bnx2x_prep_iro(const u8 *_source, u8 *_target, u32 n) | ||
7915 | { | ||
7916 | const __be32 *source = (const __be32 *)_source; | ||
7917 | struct iro *target = (struct iro *)_target; | ||
7918 | u32 i, j, tmp; | ||
7919 | |||
7920 | for (i = 0, j = 0; i < n/sizeof(struct iro); i++) { | ||
7921 | target[i].base = be32_to_cpu(source[j]); | ||
7922 | j++; | ||
7923 | tmp = be32_to_cpu(source[j]); | ||
7924 | target[i].m1 = (tmp >> 16) & 0xffff; | ||
7925 | target[i].m2 = tmp & 0xffff; | ||
7926 | j++; | ||
7927 | tmp = be32_to_cpu(source[j]); | ||
7928 | target[i].m3 = (tmp >> 16) & 0xffff; | ||
7929 | target[i].size = tmp & 0xffff; | ||
7930 | j++; | ||
7931 | } | ||
7932 | } | ||
7933 | |||
7182 | static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n) | 7934 | static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n) |
7183 | { | 7935 | { |
7184 | const __be16 *source = (const __be16 *)_source; | 7936 | const __be16 *source = (const __be16 *)_source; |
@@ -7260,9 +8012,13 @@ int bnx2x_init_firmware(struct bnx2x *bp) | |||
7260 | be32_to_cpu(fw_hdr->csem_int_table_data.offset); | 8012 | be32_to_cpu(fw_hdr->csem_int_table_data.offset); |
7261 | INIT_CSEM_PRAM_DATA(bp) = bp->firmware->data + | 8013 | INIT_CSEM_PRAM_DATA(bp) = bp->firmware->data + |
7262 | be32_to_cpu(fw_hdr->csem_pram_data.offset); | 8014 | be32_to_cpu(fw_hdr->csem_pram_data.offset); |
8015 | /* IRO */ | ||
8016 | BNX2X_ALLOC_AND_SET(iro_arr, iro_alloc_err, bnx2x_prep_iro); | ||
7263 | 8017 | ||
7264 | return 0; | 8018 | return 0; |
7265 | 8019 | ||
8020 | iro_alloc_err: | ||
8021 | kfree(bp->init_ops_offsets); | ||
7266 | init_offsets_alloc_err: | 8022 | init_offsets_alloc_err: |
7267 | kfree(bp->init_ops); | 8023 | kfree(bp->init_ops); |
7268 | init_ops_alloc_err: | 8024 | init_ops_alloc_err: |
@@ -7273,17 +8029,27 @@ request_firmware_exit: | |||
7273 | return rc; | 8029 | return rc; |
7274 | } | 8030 | } |
7275 | 8031 | ||
8032 | static inline int bnx2x_set_qm_cid_count(struct bnx2x *bp, int l2_cid_count) | ||
8033 | { | ||
8034 | int cid_count = L2_FP_COUNT(l2_cid_count); | ||
7276 | 8035 | ||
8036 | #ifdef BCM_CNIC | ||
8037 | cid_count += CNIC_CID_MAX; | ||
8038 | #endif | ||
8039 | return roundup(cid_count, QM_CID_ROUND); | ||
8040 | } | ||
7277 | static int __devinit bnx2x_init_one(struct pci_dev *pdev, | 8041 | static int __devinit bnx2x_init_one(struct pci_dev *pdev, |
7278 | const struct pci_device_id *ent) | 8042 | const struct pci_device_id *ent) |
7279 | { | 8043 | { |
7280 | struct net_device *dev = NULL; | 8044 | struct net_device *dev = NULL; |
7281 | struct bnx2x *bp; | 8045 | struct bnx2x *bp; |
7282 | int pcie_width, pcie_speed; | 8046 | int pcie_width, pcie_speed; |
7283 | int rc; | 8047 | int rc, cid_count; |
8048 | |||
8049 | cid_count = FP_SB_MAX_E1x + CNIC_CONTEXT_USE; | ||
7284 | 8050 | ||
7285 | /* dev zeroed in init_etherdev */ | 8051 | /* dev zeroed in init_etherdev */ |
7286 | dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT); | 8052 | dev = alloc_etherdev_mq(sizeof(*bp), cid_count); |
7287 | if (!dev) { | 8053 | if (!dev) { |
7288 | dev_err(&pdev->dev, "Cannot allocate net device\n"); | 8054 | dev_err(&pdev->dev, "Cannot allocate net device\n"); |
7289 | return -ENOMEM; | 8055 | return -ENOMEM; |
@@ -7294,6 +8060,8 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev, | |||
7294 | 8060 | ||
7295 | pci_set_drvdata(pdev, dev); | 8061 | pci_set_drvdata(pdev, dev); |
7296 | 8062 | ||
8063 | bp->l2_cid_count = cid_count; | ||
8064 | |||
7297 | rc = bnx2x_init_dev(pdev, dev); | 8065 | rc = bnx2x_init_dev(pdev, dev); |
7298 | if (rc < 0) { | 8066 | if (rc < 0) { |
7299 | free_netdev(dev); | 8067 | free_netdev(dev); |
@@ -7304,6 +8072,9 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev, | |||
7304 | if (rc) | 8072 | if (rc) |
7305 | goto init_one_exit; | 8073 | goto init_one_exit; |
7306 | 8074 | ||
8075 | /* calc qm_cid_count */ | ||
8076 | bp->qm_cid_count = bnx2x_set_qm_cid_count(bp, cid_count); | ||
8077 | |||
7307 | rc = register_netdev(dev); | 8078 | rc = register_netdev(dev); |
7308 | if (rc) { | 8079 | if (rc) { |
7309 | dev_err(&pdev->dev, "Cannot register net device\n"); | 8080 | dev_err(&pdev->dev, "Cannot register net device\n"); |
@@ -7360,6 +8131,8 @@ static void __devexit bnx2x_remove_one(struct pci_dev *pdev) | |||
7360 | if (bp->doorbells) | 8131 | if (bp->doorbells) |
7361 | iounmap(bp->doorbells); | 8132 | iounmap(bp->doorbells); |
7362 | 8133 | ||
8134 | bnx2x_free_mem_bp(bp); | ||
8135 | |||
7363 | free_netdev(dev); | 8136 | free_netdev(dev); |
7364 | 8137 | ||
7365 | if (atomic_read(&pdev->enable_cnt) == 1) | 8138 | if (atomic_read(&pdev->enable_cnt) == 1) |
@@ -7387,16 +8160,9 @@ static int bnx2x_eeh_nic_unload(struct bnx2x *bp) | |||
7387 | /* Release IRQs */ | 8160 | /* Release IRQs */ |
7388 | bnx2x_free_irq(bp, false); | 8161 | bnx2x_free_irq(bp, false); |
7389 | 8162 | ||
7390 | if (CHIP_IS_E1(bp)) { | ||
7391 | struct mac_configuration_cmd *config = | ||
7392 | bnx2x_sp(bp, mcast_config); | ||
7393 | |||
7394 | for (i = 0; i < config->hdr.length; i++) | ||
7395 | CAM_INVALIDATE(config->config_table[i]); | ||
7396 | } | ||
7397 | |||
7398 | /* Free SKBs, SGEs, TPA pool and driver internals */ | 8163 | /* Free SKBs, SGEs, TPA pool and driver internals */ |
7399 | bnx2x_free_skbs(bp); | 8164 | bnx2x_free_skbs(bp); |
8165 | |||
7400 | for_each_queue(bp, i) | 8166 | for_each_queue(bp, i) |
7401 | bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); | 8167 | bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); |
7402 | for_each_queue(bp, i) | 8168 | for_each_queue(bp, i) |
@@ -7641,8 +8407,8 @@ static int bnx2x_cnic_sp_queue(struct net_device *dev, | |||
7641 | 8407 | ||
7642 | DP(NETIF_MSG_TIMER, "L5 SPQE %x %x %x:%x pos %d\n", | 8408 | DP(NETIF_MSG_TIMER, "L5 SPQE %x %x %x:%x pos %d\n", |
7643 | spe->hdr.conn_and_cmd_data, spe->hdr.type, | 8409 | spe->hdr.conn_and_cmd_data, spe->hdr.type, |
7644 | spe->data.mac_config_addr.hi, | 8410 | spe->data.update_data_addr.hi, |
7645 | spe->data.mac_config_addr.lo, | 8411 | spe->data.update_data_addr.lo, |
7646 | bp->cnic_kwq_pending); | 8412 | bp->cnic_kwq_pending); |
7647 | 8413 | ||
7648 | if (bp->cnic_kwq_prod == bp->cnic_kwq_last) | 8414 | if (bp->cnic_kwq_prod == bp->cnic_kwq_last) |
@@ -7736,8 +8502,24 @@ static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl) | |||
7736 | case DRV_CTL_START_L2_CMD: { | 8502 | case DRV_CTL_START_L2_CMD: { |
7737 | u32 cli = ctl->data.ring.client_id; | 8503 | u32 cli = ctl->data.ring.client_id; |
7738 | 8504 | ||
7739 | bp->rx_mode_cl_mask |= (1 << cli); | 8505 | /* Set iSCSI MAC address */ |
7740 | bnx2x_set_storm_rx_mode(bp); | 8506 | bnx2x_set_iscsi_eth_mac_addr(bp, 1); |
8507 | |||
8508 | mmiowb(); | ||
8509 | barrier(); | ||
8510 | |||
8511 | /* Start accepting on iSCSI L2 ring. Accept all multicasts | ||
8512 | * because it's the only way for UIO Client to accept | ||
8513 | * multicasts (in non-promiscuous mode only one Client per | ||
8514 | * function will receive multicast packets (leading in our | ||
8515 | * case). | ||
8516 | */ | ||
8517 | bnx2x_rxq_set_mac_filters(bp, cli, | ||
8518 | BNX2X_ACCEPT_UNICAST | | ||
8519 | BNX2X_ACCEPT_BROADCAST | | ||
8520 | BNX2X_ACCEPT_ALL_MULTICAST); | ||
8521 | storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp)); | ||
8522 | |||
7741 | break; | 8523 | break; |
7742 | } | 8524 | } |
7743 | 8525 | ||
@@ -7745,8 +8527,15 @@ static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl) | |||
7745 | case DRV_CTL_STOP_L2_CMD: { | 8527 | case DRV_CTL_STOP_L2_CMD: { |
7746 | u32 cli = ctl->data.ring.client_id; | 8528 | u32 cli = ctl->data.ring.client_id; |
7747 | 8529 | ||
7748 | bp->rx_mode_cl_mask &= ~(1 << cli); | 8530 | /* Stop accepting on iSCSI L2 ring */ |
7749 | bnx2x_set_storm_rx_mode(bp); | 8531 | bnx2x_rxq_set_mac_filters(bp, cli, BNX2X_ACCEPT_NONE); |
8532 | storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp)); | ||
8533 | |||
8534 | mmiowb(); | ||
8535 | barrier(); | ||
8536 | |||
8537 | /* Unset iSCSI L2 MAC */ | ||
8538 | bnx2x_set_iscsi_eth_mac_addr(bp, 0); | ||
7750 | break; | 8539 | break; |
7751 | } | 8540 | } |
7752 | 8541 | ||
@@ -7770,10 +8559,12 @@ void bnx2x_setup_cnic_irq_info(struct bnx2x *bp) | |||
7770 | cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX; | 8559 | cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX; |
7771 | cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX; | 8560 | cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX; |
7772 | } | 8561 | } |
7773 | cp->irq_arr[0].status_blk = bp->cnic_sb; | 8562 | cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e1x_sb; |
7774 | cp->irq_arr[0].status_blk_num = CNIC_SB_ID(bp); | 8563 | cp->irq_arr[0].status_blk_num = CNIC_SB_ID(bp); |
8564 | cp->irq_arr[0].status_blk_num2 = CNIC_IGU_SB_ID(bp); | ||
7775 | cp->irq_arr[1].status_blk = bp->def_status_blk; | 8565 | cp->irq_arr[1].status_blk = bp->def_status_blk; |
7776 | cp->irq_arr[1].status_blk_num = DEF_SB_ID; | 8566 | cp->irq_arr[1].status_blk_num = DEF_SB_ID; |
8567 | cp->irq_arr[1].status_blk_num2 = DEF_SB_IGU_ID; | ||
7777 | 8568 | ||
7778 | cp->num_irq = 2; | 8569 | cp->num_irq = 2; |
7779 | } | 8570 | } |
@@ -7805,8 +8596,11 @@ static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops, | |||
7805 | 8596 | ||
7806 | cp->num_irq = 0; | 8597 | cp->num_irq = 0; |
7807 | cp->drv_state = CNIC_DRV_STATE_REGD; | 8598 | cp->drv_state = CNIC_DRV_STATE_REGD; |
8599 | cp->iro_arr = bp->iro_arr; | ||
7808 | 8600 | ||
7809 | bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping, CNIC_SB_ID(bp)); | 8601 | bnx2x_init_sb(bp, bp->cnic_sb_mapping, |
8602 | BNX2X_VF_ID_INVALID, false, | ||
8603 | CNIC_SB_ID(bp), CNIC_IGU_SB_ID(bp)); | ||
7810 | 8604 | ||
7811 | bnx2x_setup_cnic_irq_info(bp); | 8605 | bnx2x_setup_cnic_irq_info(bp); |
7812 | bnx2x_set_iscsi_eth_mac_addr(bp, 1); | 8606 | bnx2x_set_iscsi_eth_mac_addr(bp, 1); |
@@ -7847,7 +8641,7 @@ struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev) | |||
7847 | cp->io_base = bp->regview; | 8641 | cp->io_base = bp->regview; |
7848 | cp->io_base2 = bp->doorbells; | 8642 | cp->io_base2 = bp->doorbells; |
7849 | cp->max_kwqe_pending = 8; | 8643 | cp->max_kwqe_pending = 8; |
7850 | cp->ctx_blk_size = CNIC_CTX_PER_ILT * sizeof(union cdu_context); | 8644 | cp->ctx_blk_size = CDU_ILT_PAGE_SZ; |
7851 | cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) + 1; | 8645 | cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) + 1; |
7852 | cp->ctx_tbl_len = CNIC_ILT_LINES; | 8646 | cp->ctx_tbl_len = CNIC_ILT_LINES; |
7853 | cp->starting_cid = BCM_CNIC_CID_START; | 8647 | cp->starting_cid = BCM_CNIC_CID_START; |