diff options
Diffstat (limited to 'drivers/net/bnx2x')
-rw-r--r-- | drivers/net/bnx2x/Makefile | 2 | ||||
-rw-r--r-- | drivers/net/bnx2x/bnx2x.h | 921 | ||||
-rw-r--r-- | drivers/net/bnx2x/bnx2x_cmn.c | 1105 | ||||
-rw-r--r-- | drivers/net/bnx2x/bnx2x_cmn.h | 568 | ||||
-rw-r--r-- | drivers/net/bnx2x/bnx2x_dcb.c | 671 | ||||
-rw-r--r-- | drivers/net/bnx2x/bnx2x_dcb.h | 30 | ||||
-rw-r--r-- | drivers/net/bnx2x/bnx2x_dump.h | 913 | ||||
-rw-r--r-- | drivers/net/bnx2x/bnx2x_ethtool.c | 456 | ||||
-rw-r--r-- | drivers/net/bnx2x/bnx2x_fw_defs.h | 519 | ||||
-rw-r--r-- | drivers/net/bnx2x/bnx2x_hsi.h | 5398 | ||||
-rw-r--r-- | drivers/net/bnx2x/bnx2x_init.h | 327 | ||||
-rw-r--r-- | drivers/net/bnx2x/bnx2x_init_ops.h | 194 | ||||
-rw-r--r-- | drivers/net/bnx2x/bnx2x_link.c | 6355 | ||||
-rw-r--r-- | drivers/net/bnx2x/bnx2x_link.h | 183 | ||||
-rw-r--r-- | drivers/net/bnx2x/bnx2x_main.c | 5454 | ||||
-rw-r--r-- | drivers/net/bnx2x/bnx2x_reg.h | 935 | ||||
-rw-r--r-- | drivers/net/bnx2x/bnx2x_sp.c | 5333 | ||||
-rw-r--r-- | drivers/net/bnx2x/bnx2x_sp.h | 1235 | ||||
-rw-r--r-- | drivers/net/bnx2x/bnx2x_stats.c | 868 | ||||
-rw-r--r-- | drivers/net/bnx2x/bnx2x_stats.h | 228 |
20 files changed, 23542 insertions, 8153 deletions
diff --git a/drivers/net/bnx2x/Makefile b/drivers/net/bnx2x/Makefile index bb83a2961273..48fbdd48f88f 100644 --- a/drivers/net/bnx2x/Makefile +++ b/drivers/net/bnx2x/Makefile | |||
@@ -4,4 +4,4 @@ | |||
4 | 4 | ||
5 | obj-$(CONFIG_BNX2X) += bnx2x.o | 5 | obj-$(CONFIG_BNX2X) += bnx2x.o |
6 | 6 | ||
7 | bnx2x-objs := bnx2x_main.o bnx2x_link.o bnx2x_cmn.o bnx2x_ethtool.o bnx2x_stats.o bnx2x_dcb.o | 7 | bnx2x-objs := bnx2x_main.o bnx2x_link.o bnx2x_cmn.o bnx2x_ethtool.o bnx2x_stats.o bnx2x_dcb.o bnx2x_sp.o |
diff --git a/drivers/net/bnx2x/bnx2x.h b/drivers/net/bnx2x/bnx2x.h index 668a578c49e9..0d4b98126241 100644 --- a/drivers/net/bnx2x/bnx2x.h +++ b/drivers/net/bnx2x/bnx2x.h | |||
@@ -14,6 +14,7 @@ | |||
14 | #ifndef BNX2X_H | 14 | #ifndef BNX2X_H |
15 | #define BNX2X_H | 15 | #define BNX2X_H |
16 | #include <linux/netdevice.h> | 16 | #include <linux/netdevice.h> |
17 | #include <linux/dma-mapping.h> | ||
17 | #include <linux/types.h> | 18 | #include <linux/types.h> |
18 | 19 | ||
19 | /* compilation time flags */ | 20 | /* compilation time flags */ |
@@ -22,14 +23,10 @@ | |||
22 | * (you will need to reboot afterwards) */ | 23 | * (you will need to reboot afterwards) */ |
23 | /* #define BNX2X_STOP_ON_ERROR */ | 24 | /* #define BNX2X_STOP_ON_ERROR */ |
24 | 25 | ||
25 | #define DRV_MODULE_VERSION "1.62.12-0" | 26 | #define DRV_MODULE_VERSION "1.70.00-0" |
26 | #define DRV_MODULE_RELDATE "2011/03/20" | 27 | #define DRV_MODULE_RELDATE "2011/06/13" |
27 | #define BNX2X_BC_VER 0x040200 | 28 | #define BNX2X_BC_VER 0x040200 |
28 | 29 | ||
29 | #define BNX2X_MULTI_QUEUE | ||
30 | |||
31 | #define BNX2X_NEW_NAPI | ||
32 | |||
33 | #if defined(CONFIG_DCB) | 30 | #if defined(CONFIG_DCB) |
34 | #define BCM_DCBNL | 31 | #define BCM_DCBNL |
35 | #endif | 32 | #endif |
@@ -47,11 +44,12 @@ | |||
47 | #endif | 44 | #endif |
48 | 45 | ||
49 | #include <linux/mdio.h> | 46 | #include <linux/mdio.h> |
50 | #include <linux/pci.h> | 47 | |
51 | #include "bnx2x_reg.h" | 48 | #include "bnx2x_reg.h" |
52 | #include "bnx2x_fw_defs.h" | 49 | #include "bnx2x_fw_defs.h" |
53 | #include "bnx2x_hsi.h" | 50 | #include "bnx2x_hsi.h" |
54 | #include "bnx2x_link.h" | 51 | #include "bnx2x_link.h" |
52 | #include "bnx2x_sp.h" | ||
55 | #include "bnx2x_dcb.h" | 53 | #include "bnx2x_dcb.h" |
56 | #include "bnx2x_stats.h" | 54 | #include "bnx2x_stats.h" |
57 | 55 | ||
@@ -80,6 +78,12 @@ do { \ | |||
80 | ##__args); \ | 78 | ##__args); \ |
81 | } while (0) | 79 | } while (0) |
82 | 80 | ||
81 | #define DP_CONT(__mask, __fmt, __args...) \ | ||
82 | do { \ | ||
83 | if (bp->msg_enable & (__mask)) \ | ||
84 | pr_cont(__fmt, ##__args); \ | ||
85 | } while (0) | ||
86 | |||
83 | /* errors debug print */ | 87 | /* errors debug print */ |
84 | #define BNX2X_DBG_ERR(__fmt, __args...) \ | 88 | #define BNX2X_DBG_ERR(__fmt, __args...) \ |
85 | do { \ | 89 | do { \ |
@@ -111,7 +115,9 @@ do { \ | |||
111 | dev_info(&bp->pdev->dev, __fmt, ##__args); \ | 115 | dev_info(&bp->pdev->dev, __fmt, ##__args); \ |
112 | } while (0) | 116 | } while (0) |
113 | 117 | ||
114 | void bnx2x_panic_dump(struct bnx2x *bp); | 118 | #define BNX2X_MAC_FMT "%pM" |
119 | #define BNX2X_MAC_PRN_LIST(mac) (mac) | ||
120 | |||
115 | 121 | ||
116 | #ifdef BNX2X_STOP_ON_ERROR | 122 | #ifdef BNX2X_STOP_ON_ERROR |
117 | #define bnx2x_panic() do { \ | 123 | #define bnx2x_panic() do { \ |
@@ -233,11 +239,11 @@ void bnx2x_panic_dump(struct bnx2x *bp); | |||
233 | * | 239 | * |
234 | */ | 240 | */ |
235 | /* iSCSI L2 */ | 241 | /* iSCSI L2 */ |
236 | #define BNX2X_ISCSI_ETH_CL_ID 17 | 242 | #define BNX2X_ISCSI_ETH_CL_ID_IDX 1 |
237 | #define BNX2X_ISCSI_ETH_CID 17 | 243 | #define BNX2X_ISCSI_ETH_CID 17 |
238 | 244 | ||
239 | /* FCoE L2 */ | 245 | /* FCoE L2 */ |
240 | #define BNX2X_FCOE_ETH_CL_ID 18 | 246 | #define BNX2X_FCOE_ETH_CL_ID_IDX 2 |
241 | #define BNX2X_FCOE_ETH_CID 18 | 247 | #define BNX2X_FCOE_ETH_CID 18 |
242 | 248 | ||
243 | /** Additional rings budgeting */ | 249 | /** Additional rings budgeting */ |
@@ -283,44 +289,73 @@ union db_prod { | |||
283 | 289 | ||
284 | 290 | ||
285 | /* MC hsi */ | 291 | /* MC hsi */ |
286 | #define BCM_PAGE_SHIFT 12 | 292 | #define BCM_PAGE_SHIFT 12 |
287 | #define BCM_PAGE_SIZE (1 << BCM_PAGE_SHIFT) | 293 | #define BCM_PAGE_SIZE (1 << BCM_PAGE_SHIFT) |
288 | #define BCM_PAGE_MASK (~(BCM_PAGE_SIZE - 1)) | 294 | #define BCM_PAGE_MASK (~(BCM_PAGE_SIZE - 1)) |
289 | #define BCM_PAGE_ALIGN(addr) (((addr) + BCM_PAGE_SIZE - 1) & BCM_PAGE_MASK) | 295 | #define BCM_PAGE_ALIGN(addr) (((addr) + BCM_PAGE_SIZE - 1) & BCM_PAGE_MASK) |
290 | 296 | ||
291 | #define PAGES_PER_SGE_SHIFT 0 | 297 | #define PAGES_PER_SGE_SHIFT 0 |
292 | #define PAGES_PER_SGE (1 << PAGES_PER_SGE_SHIFT) | 298 | #define PAGES_PER_SGE (1 << PAGES_PER_SGE_SHIFT) |
293 | #define SGE_PAGE_SIZE PAGE_SIZE | 299 | #define SGE_PAGE_SIZE PAGE_SIZE |
294 | #define SGE_PAGE_SHIFT PAGE_SHIFT | 300 | #define SGE_PAGE_SHIFT PAGE_SHIFT |
295 | #define SGE_PAGE_ALIGN(addr) PAGE_ALIGN((typeof(PAGE_SIZE))(addr)) | 301 | #define SGE_PAGE_ALIGN(addr) PAGE_ALIGN((typeof(PAGE_SIZE))(addr)) |
296 | 302 | ||
297 | /* SGE ring related macros */ | 303 | /* SGE ring related macros */ |
298 | #define NUM_RX_SGE_PAGES 2 | 304 | #define NUM_RX_SGE_PAGES 2 |
299 | #define RX_SGE_CNT (BCM_PAGE_SIZE / sizeof(struct eth_rx_sge)) | 305 | #define RX_SGE_CNT (BCM_PAGE_SIZE / sizeof(struct eth_rx_sge)) |
300 | #define MAX_RX_SGE_CNT (RX_SGE_CNT - 2) | 306 | #define MAX_RX_SGE_CNT (RX_SGE_CNT - 2) |
301 | /* RX_SGE_CNT is promised to be a power of 2 */ | 307 | /* RX_SGE_CNT is promised to be a power of 2 */ |
302 | #define RX_SGE_MASK (RX_SGE_CNT - 1) | 308 | #define RX_SGE_MASK (RX_SGE_CNT - 1) |
303 | #define NUM_RX_SGE (RX_SGE_CNT * NUM_RX_SGE_PAGES) | 309 | #define NUM_RX_SGE (RX_SGE_CNT * NUM_RX_SGE_PAGES) |
304 | #define MAX_RX_SGE (NUM_RX_SGE - 1) | 310 | #define MAX_RX_SGE (NUM_RX_SGE - 1) |
305 | #define NEXT_SGE_IDX(x) ((((x) & RX_SGE_MASK) == \ | 311 | #define NEXT_SGE_IDX(x) ((((x) & RX_SGE_MASK) == \ |
306 | (MAX_RX_SGE_CNT - 1)) ? (x) + 3 : (x) + 1) | 312 | (MAX_RX_SGE_CNT - 1)) ? (x) + 3 : (x) + 1) |
307 | #define RX_SGE(x) ((x) & MAX_RX_SGE) | 313 | #define RX_SGE(x) ((x) & MAX_RX_SGE) |
314 | |||
315 | /* Manipulate a bit vector defined as an array of u64 */ | ||
308 | 316 | ||
309 | /* SGE producer mask related macros */ | ||
310 | /* Number of bits in one sge_mask array element */ | 317 | /* Number of bits in one sge_mask array element */ |
311 | #define RX_SGE_MASK_ELEM_SZ 64 | 318 | #define BIT_VEC64_ELEM_SZ 64 |
312 | #define RX_SGE_MASK_ELEM_SHIFT 6 | 319 | #define BIT_VEC64_ELEM_SHIFT 6 |
313 | #define RX_SGE_MASK_ELEM_MASK ((u64)RX_SGE_MASK_ELEM_SZ - 1) | 320 | #define BIT_VEC64_ELEM_MASK ((u64)BIT_VEC64_ELEM_SZ - 1) |
321 | |||
322 | |||
323 | #define __BIT_VEC64_SET_BIT(el, bit) \ | ||
324 | do { \ | ||
325 | el = ((el) | ((u64)0x1 << (bit))); \ | ||
326 | } while (0) | ||
327 | |||
328 | #define __BIT_VEC64_CLEAR_BIT(el, bit) \ | ||
329 | do { \ | ||
330 | el = ((el) & (~((u64)0x1 << (bit)))); \ | ||
331 | } while (0) | ||
332 | |||
333 | |||
334 | #define BIT_VEC64_SET_BIT(vec64, idx) \ | ||
335 | __BIT_VEC64_SET_BIT((vec64)[(idx) >> BIT_VEC64_ELEM_SHIFT], \ | ||
336 | (idx) & BIT_VEC64_ELEM_MASK) | ||
337 | |||
338 | #define BIT_VEC64_CLEAR_BIT(vec64, idx) \ | ||
339 | __BIT_VEC64_CLEAR_BIT((vec64)[(idx) >> BIT_VEC64_ELEM_SHIFT], \ | ||
340 | (idx) & BIT_VEC64_ELEM_MASK) | ||
341 | |||
342 | #define BIT_VEC64_TEST_BIT(vec64, idx) \ | ||
343 | (((vec64)[(idx) >> BIT_VEC64_ELEM_SHIFT] >> \ | ||
344 | ((idx) & BIT_VEC64_ELEM_MASK)) & 0x1) | ||
314 | 345 | ||
315 | /* Creates a bitmask of all ones in less significant bits. | 346 | /* Creates a bitmask of all ones in less significant bits. |
316 | idx - index of the most significant bit in the created mask */ | 347 | idx - index of the most significant bit in the created mask */ |
317 | #define RX_SGE_ONES_MASK(idx) \ | 348 | #define BIT_VEC64_ONES_MASK(idx) \ |
318 | (((u64)0x1 << (((idx) & RX_SGE_MASK_ELEM_MASK) + 1)) - 1) | 349 | (((u64)0x1 << (((idx) & BIT_VEC64_ELEM_MASK) + 1)) - 1) |
319 | #define RX_SGE_MASK_ELEM_ONE_MASK ((u64)(~0)) | 350 | #define BIT_VEC64_ELEM_ONE_MASK ((u64)(~0)) |
351 | |||
352 | /*******************************************************/ | ||
353 | |||
354 | |||
320 | 355 | ||
321 | /* Number of u64 elements in SGE mask array */ | 356 | /* Number of u64 elements in SGE mask array */ |
322 | #define RX_SGE_MASK_LEN ((NUM_RX_SGE_PAGES * RX_SGE_CNT) / \ | 357 | #define RX_SGE_MASK_LEN ((NUM_RX_SGE_PAGES * RX_SGE_CNT) / \ |
323 | RX_SGE_MASK_ELEM_SZ) | 358 | BIT_VEC64_ELEM_SZ) |
324 | #define RX_SGE_MASK_LEN_MASK (RX_SGE_MASK_LEN - 1) | 359 | #define RX_SGE_MASK_LEN_MASK (RX_SGE_MASK_LEN - 1) |
325 | #define NEXT_SGE_MASK_ELEM(el) (((el) + 1) & RX_SGE_MASK_LEN_MASK) | 360 | #define NEXT_SGE_MASK_ELEM(el) (((el) + 1) & RX_SGE_MASK_LEN_MASK) |
326 | 361 | ||
@@ -331,7 +366,30 @@ union host_hc_status_block { | |||
331 | struct host_hc_status_block_e2 *e2_sb; | 366 | struct host_hc_status_block_e2 *e2_sb; |
332 | }; | 367 | }; |
333 | 368 | ||
369 | struct bnx2x_agg_info { | ||
370 | /* | ||
371 | * First aggregation buffer is an skb, the following - are pages. | ||
372 | * We will preallocate the skbs for each aggregation when | ||
373 | * we open the interface and will replace the BD at the consumer | ||
374 | * with this one when we receive the TPA_START CQE in order to | ||
375 | * keep the Rx BD ring consistent. | ||
376 | */ | ||
377 | struct sw_rx_bd first_buf; | ||
378 | u8 tpa_state; | ||
379 | #define BNX2X_TPA_START 1 | ||
380 | #define BNX2X_TPA_STOP 2 | ||
381 | #define BNX2X_TPA_ERROR 3 | ||
382 | u8 placement_offset; | ||
383 | u16 parsing_flags; | ||
384 | u16 vlan_tag; | ||
385 | u16 len_on_bd; | ||
386 | }; | ||
387 | |||
388 | #define Q_STATS_OFFSET32(stat_name) \ | ||
389 | (offsetof(struct bnx2x_eth_q_stats, stat_name) / 4) | ||
390 | |||
334 | struct bnx2x_fastpath { | 391 | struct bnx2x_fastpath { |
392 | struct bnx2x *bp; /* parent */ | ||
335 | 393 | ||
336 | #define BNX2X_NAPI_WEIGHT 128 | 394 | #define BNX2X_NAPI_WEIGHT 128 |
337 | struct napi_struct napi; | 395 | struct napi_struct napi; |
@@ -366,23 +424,13 @@ struct bnx2x_fastpath { | |||
366 | 424 | ||
367 | u64 sge_mask[RX_SGE_MASK_LEN]; | 425 | u64 sge_mask[RX_SGE_MASK_LEN]; |
368 | 426 | ||
369 | int state; | 427 | u32 cid; |
370 | #define BNX2X_FP_STATE_CLOSED 0 | ||
371 | #define BNX2X_FP_STATE_IRQ 0x80000 | ||
372 | #define BNX2X_FP_STATE_OPENING 0x90000 | ||
373 | #define BNX2X_FP_STATE_OPEN 0xa0000 | ||
374 | #define BNX2X_FP_STATE_HALTING 0xb0000 | ||
375 | #define BNX2X_FP_STATE_HALTED 0xc0000 | ||
376 | #define BNX2X_FP_STATE_TERMINATING 0xd0000 | ||
377 | #define BNX2X_FP_STATE_TERMINATED 0xe0000 | ||
378 | 428 | ||
379 | u8 index; /* number in fp array */ | 429 | u8 index; /* number in fp array */ |
380 | u8 cl_id; /* eth client id */ | 430 | u8 cl_id; /* eth client id */ |
381 | u8 cl_qzone_id; | 431 | u8 cl_qzone_id; |
382 | u8 fw_sb_id; /* status block number in FW */ | 432 | u8 fw_sb_id; /* status block number in FW */ |
383 | u8 igu_sb_id; /* status block number in HW */ | 433 | u8 igu_sb_id; /* status block number in HW */ |
384 | u32 cid; | ||
385 | |||
386 | union db_prod tx_db; | 434 | union db_prod tx_db; |
387 | 435 | ||
388 | u16 tx_pkt_prod; | 436 | u16 tx_pkt_prod; |
@@ -401,24 +449,20 @@ struct bnx2x_fastpath { | |||
401 | /* The last maximal completed SGE */ | 449 | /* The last maximal completed SGE */ |
402 | u16 last_max_sge; | 450 | u16 last_max_sge; |
403 | __le16 *rx_cons_sb; | 451 | __le16 *rx_cons_sb; |
404 | |||
405 | unsigned long tx_pkt, | 452 | unsigned long tx_pkt, |
406 | rx_pkt, | 453 | rx_pkt, |
407 | rx_calls; | 454 | rx_calls; |
408 | 455 | ||
409 | /* TPA related */ | 456 | /* TPA related */ |
410 | struct sw_rx_bd tpa_pool[ETH_MAX_AGGREGATION_QUEUES_E1H]; | 457 | struct bnx2x_agg_info tpa_info[ETH_MAX_AGGREGATION_QUEUES_E1H_E2]; |
411 | u8 tpa_state[ETH_MAX_AGGREGATION_QUEUES_E1H]; | ||
412 | #define BNX2X_TPA_START 1 | ||
413 | #define BNX2X_TPA_STOP 2 | ||
414 | u8 disable_tpa; | 458 | u8 disable_tpa; |
415 | #ifdef BNX2X_STOP_ON_ERROR | 459 | #ifdef BNX2X_STOP_ON_ERROR |
416 | u64 tpa_queue_used; | 460 | u64 tpa_queue_used; |
417 | #endif | 461 | #endif |
418 | 462 | ||
419 | struct tstorm_per_client_stats old_tclient; | 463 | struct tstorm_per_queue_stats old_tclient; |
420 | struct ustorm_per_client_stats old_uclient; | 464 | struct ustorm_per_queue_stats old_uclient; |
421 | struct xstorm_per_client_stats old_xclient; | 465 | struct xstorm_per_queue_stats old_xclient; |
422 | struct bnx2x_eth_q_stats eth_q_stats; | 466 | struct bnx2x_eth_q_stats eth_q_stats; |
423 | 467 | ||
424 | /* The size is calculated using the following: | 468 | /* The size is calculated using the following: |
@@ -427,7 +471,13 @@ struct bnx2x_fastpath { | |||
427 | 4 (for the digits and to make it DWORD aligned) */ | 471 | 4 (for the digits and to make it DWORD aligned) */ |
428 | #define FP_NAME_SIZE (sizeof(((struct net_device *)0)->name) + 8) | 472 | #define FP_NAME_SIZE (sizeof(((struct net_device *)0)->name) + 8) |
429 | char name[FP_NAME_SIZE]; | 473 | char name[FP_NAME_SIZE]; |
430 | struct bnx2x *bp; /* parent */ | 474 | |
475 | /* MACs object */ | ||
476 | struct bnx2x_vlan_mac_obj mac_obj; | ||
477 | |||
478 | /* Queue State object */ | ||
479 | struct bnx2x_queue_sp_obj q_obj; | ||
480 | |||
431 | }; | 481 | }; |
432 | 482 | ||
433 | #define bnx2x_fp(bp, nr, var) (bp->fp[nr].var) | 483 | #define bnx2x_fp(bp, nr, var) (bp->fp[nr].var) |
@@ -435,11 +485,13 @@ struct bnx2x_fastpath { | |||
435 | /* Use 2500 as a mini-jumbo MTU for FCoE */ | 485 | /* Use 2500 as a mini-jumbo MTU for FCoE */ |
436 | #define BNX2X_FCOE_MINI_JUMBO_MTU 2500 | 486 | #define BNX2X_FCOE_MINI_JUMBO_MTU 2500 |
437 | 487 | ||
438 | #ifdef BCM_CNIC | 488 | /* FCoE L2 `fastpath' entry is right after the eth entries */ |
439 | /* FCoE L2 `fastpath' is right after the eth entries */ | ||
440 | #define FCOE_IDX BNX2X_NUM_ETH_QUEUES(bp) | 489 | #define FCOE_IDX BNX2X_NUM_ETH_QUEUES(bp) |
441 | #define bnx2x_fcoe_fp(bp) (&bp->fp[FCOE_IDX]) | 490 | #define bnx2x_fcoe_fp(bp) (&bp->fp[FCOE_IDX]) |
442 | #define bnx2x_fcoe(bp, var) (bnx2x_fcoe_fp(bp)->var) | 491 | #define bnx2x_fcoe(bp, var) (bnx2x_fcoe_fp(bp)->var) |
492 | |||
493 | |||
494 | #ifdef BCM_CNIC | ||
443 | #define IS_FCOE_FP(fp) (fp->index == FCOE_IDX) | 495 | #define IS_FCOE_FP(fp) (fp->index == FCOE_IDX) |
444 | #define IS_FCOE_IDX(idx) ((idx) == FCOE_IDX) | 496 | #define IS_FCOE_IDX(idx) ((idx) == FCOE_IDX) |
445 | #else | 497 | #else |
@@ -449,77 +501,68 @@ struct bnx2x_fastpath { | |||
449 | 501 | ||
450 | 502 | ||
451 | /* MC hsi */ | 503 | /* MC hsi */ |
452 | #define MAX_FETCH_BD 13 /* HW max BDs per packet */ | 504 | #define MAX_FETCH_BD 13 /* HW max BDs per packet */ |
453 | #define RX_COPY_THRESH 92 | 505 | #define RX_COPY_THRESH 92 |
454 | 506 | ||
455 | #define NUM_TX_RINGS 16 | 507 | #define NUM_TX_RINGS 16 |
456 | #define TX_DESC_CNT (BCM_PAGE_SIZE / sizeof(union eth_tx_bd_types)) | 508 | #define TX_DESC_CNT (BCM_PAGE_SIZE / sizeof(union eth_tx_bd_types)) |
457 | #define MAX_TX_DESC_CNT (TX_DESC_CNT - 1) | 509 | #define MAX_TX_DESC_CNT (TX_DESC_CNT - 1) |
458 | #define NUM_TX_BD (TX_DESC_CNT * NUM_TX_RINGS) | 510 | #define NUM_TX_BD (TX_DESC_CNT * NUM_TX_RINGS) |
459 | #define MAX_TX_BD (NUM_TX_BD - 1) | 511 | #define MAX_TX_BD (NUM_TX_BD - 1) |
460 | #define MAX_TX_AVAIL (MAX_TX_DESC_CNT * NUM_TX_RINGS - 2) | 512 | #define MAX_TX_AVAIL (MAX_TX_DESC_CNT * NUM_TX_RINGS - 2) |
461 | #define INIT_JUMBO_TX_RING_SIZE MAX_TX_AVAIL | ||
462 | #define INIT_TX_RING_SIZE MAX_TX_AVAIL | ||
463 | #define NEXT_TX_IDX(x) ((((x) & MAX_TX_DESC_CNT) == \ | 513 | #define NEXT_TX_IDX(x) ((((x) & MAX_TX_DESC_CNT) == \ |
464 | (MAX_TX_DESC_CNT - 1)) ? (x) + 2 : (x) + 1) | 514 | (MAX_TX_DESC_CNT - 1)) ? (x) + 2 : (x) + 1) |
465 | #define TX_BD(x) ((x) & MAX_TX_BD) | 515 | #define TX_BD(x) ((x) & MAX_TX_BD) |
466 | #define TX_BD_POFF(x) ((x) & MAX_TX_DESC_CNT) | 516 | #define TX_BD_POFF(x) ((x) & MAX_TX_DESC_CNT) |
467 | 517 | ||
468 | /* The RX BD ring is special, each bd is 8 bytes but the last one is 16 */ | 518 | /* The RX BD ring is special, each bd is 8 bytes but the last one is 16 */ |
469 | #define NUM_RX_RINGS 8 | 519 | #define NUM_RX_RINGS 8 |
470 | #define RX_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct eth_rx_bd)) | 520 | #define RX_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct eth_rx_bd)) |
471 | #define MAX_RX_DESC_CNT (RX_DESC_CNT - 2) | 521 | #define MAX_RX_DESC_CNT (RX_DESC_CNT - 2) |
472 | #define RX_DESC_MASK (RX_DESC_CNT - 1) | 522 | #define RX_DESC_MASK (RX_DESC_CNT - 1) |
473 | #define NUM_RX_BD (RX_DESC_CNT * NUM_RX_RINGS) | 523 | #define NUM_RX_BD (RX_DESC_CNT * NUM_RX_RINGS) |
474 | #define MAX_RX_BD (NUM_RX_BD - 1) | 524 | #define MAX_RX_BD (NUM_RX_BD - 1) |
475 | #define MAX_RX_AVAIL (MAX_RX_DESC_CNT * NUM_RX_RINGS - 2) | 525 | #define MAX_RX_AVAIL (MAX_RX_DESC_CNT * NUM_RX_RINGS - 2) |
476 | #define MIN_RX_SIZE_TPA 72 | 526 | #define MIN_RX_AVAIL 128 |
477 | #define MIN_RX_SIZE_NONTPA 10 | 527 | |
478 | #define INIT_JUMBO_RX_RING_SIZE MAX_RX_AVAIL | 528 | #define MIN_RX_SIZE_TPA_HW (CHIP_IS_E1(bp) ? \ |
479 | #define INIT_RX_RING_SIZE MAX_RX_AVAIL | 529 | ETH_MIN_RX_CQES_WITH_TPA_E1 : \ |
530 | ETH_MIN_RX_CQES_WITH_TPA_E1H_E2) | ||
531 | #define MIN_RX_SIZE_NONTPA_HW ETH_MIN_RX_CQES_WITHOUT_TPA | ||
532 | #define MIN_RX_SIZE_TPA (max_t(u32, MIN_RX_SIZE_TPA_HW, MIN_RX_AVAIL)) | ||
533 | #define MIN_RX_SIZE_NONTPA (max_t(u32, MIN_RX_SIZE_NONTPA_HW,\ | ||
534 | MIN_RX_AVAIL)) | ||
535 | |||
480 | #define NEXT_RX_IDX(x) ((((x) & RX_DESC_MASK) == \ | 536 | #define NEXT_RX_IDX(x) ((((x) & RX_DESC_MASK) == \ |
481 | (MAX_RX_DESC_CNT - 1)) ? (x) + 3 : (x) + 1) | 537 | (MAX_RX_DESC_CNT - 1)) ? (x) + 3 : (x) + 1) |
482 | #define RX_BD(x) ((x) & MAX_RX_BD) | 538 | #define RX_BD(x) ((x) & MAX_RX_BD) |
483 | 539 | ||
484 | /* As long as CQE is 4 times bigger than BD entry we have to allocate | 540 | /* |
485 | 4 times more pages for CQ ring in order to keep it balanced with | 541 | * As long as CQE is X times bigger than BD entry we have to allocate X times |
486 | BD ring */ | 542 | * more pages for CQ ring in order to keep it balanced with BD ring |
487 | #define NUM_RCQ_RINGS (NUM_RX_RINGS * 4) | 543 | */ |
544 | #define CQE_BD_REL (sizeof(union eth_rx_cqe) / sizeof(struct eth_rx_bd)) | ||
545 | #define NUM_RCQ_RINGS (NUM_RX_RINGS * CQE_BD_REL) | ||
488 | #define RCQ_DESC_CNT (BCM_PAGE_SIZE / sizeof(union eth_rx_cqe)) | 546 | #define RCQ_DESC_CNT (BCM_PAGE_SIZE / sizeof(union eth_rx_cqe)) |
489 | #define MAX_RCQ_DESC_CNT (RCQ_DESC_CNT - 1) | 547 | #define MAX_RCQ_DESC_CNT (RCQ_DESC_CNT - 1) |
490 | #define NUM_RCQ_BD (RCQ_DESC_CNT * NUM_RCQ_RINGS) | 548 | #define NUM_RCQ_BD (RCQ_DESC_CNT * NUM_RCQ_RINGS) |
491 | #define MAX_RCQ_BD (NUM_RCQ_BD - 1) | 549 | #define MAX_RCQ_BD (NUM_RCQ_BD - 1) |
492 | #define MAX_RCQ_AVAIL (MAX_RCQ_DESC_CNT * NUM_RCQ_RINGS - 2) | 550 | #define MAX_RCQ_AVAIL (MAX_RCQ_DESC_CNT * NUM_RCQ_RINGS - 2) |
493 | #define NEXT_RCQ_IDX(x) ((((x) & MAX_RCQ_DESC_CNT) == \ | 551 | #define NEXT_RCQ_IDX(x) ((((x) & MAX_RCQ_DESC_CNT) == \ |
494 | (MAX_RCQ_DESC_CNT - 1)) ? (x) + 2 : (x) + 1) | 552 | (MAX_RCQ_DESC_CNT - 1)) ? (x) + 2 : (x) + 1) |
495 | #define RCQ_BD(x) ((x) & MAX_RCQ_BD) | 553 | #define RCQ_BD(x) ((x) & MAX_RCQ_BD) |
496 | 554 | ||
497 | 555 | ||
498 | /* This is needed for determining of last_max */ | 556 | /* This is needed for determining of last_max */ |
499 | #define SUB_S16(a, b) (s16)((s16)(a) - (s16)(b)) | 557 | #define SUB_S16(a, b) (s16)((s16)(a) - (s16)(b)) |
558 | #define SUB_S32(a, b) (s32)((s32)(a) - (s32)(b)) | ||
500 | 559 | ||
501 | #define __SGE_MASK_SET_BIT(el, bit) \ | ||
502 | do { \ | ||
503 | el = ((el) | ((u64)0x1 << (bit))); \ | ||
504 | } while (0) | ||
505 | |||
506 | #define __SGE_MASK_CLEAR_BIT(el, bit) \ | ||
507 | do { \ | ||
508 | el = ((el) & (~((u64)0x1 << (bit)))); \ | ||
509 | } while (0) | ||
510 | |||
511 | #define SGE_MASK_SET_BIT(fp, idx) \ | ||
512 | __SGE_MASK_SET_BIT(fp->sge_mask[(idx) >> RX_SGE_MASK_ELEM_SHIFT], \ | ||
513 | ((idx) & RX_SGE_MASK_ELEM_MASK)) | ||
514 | |||
515 | #define SGE_MASK_CLEAR_BIT(fp, idx) \ | ||
516 | __SGE_MASK_CLEAR_BIT(fp->sge_mask[(idx) >> RX_SGE_MASK_ELEM_SHIFT], \ | ||
517 | ((idx) & RX_SGE_MASK_ELEM_MASK)) | ||
518 | 560 | ||
561 | #define BNX2X_SWCID_SHIFT 17 | ||
562 | #define BNX2X_SWCID_MASK ((0x1 << BNX2X_SWCID_SHIFT) - 1) | ||
519 | 563 | ||
520 | /* used on a CID received from the HW */ | 564 | /* used on a CID received from the HW */ |
521 | #define SW_CID(x) (le32_to_cpu(x) & \ | 565 | #define SW_CID(x) (le32_to_cpu(x) & BNX2X_SWCID_MASK) |
522 | (COMMON_RAMROD_ETH_RX_CQE_CID >> 7)) | ||
523 | #define CQE_CMD(x) (le32_to_cpu(x) >> \ | 566 | #define CQE_CMD(x) (le32_to_cpu(x) >> \ |
524 | COMMON_RAMROD_ETH_RX_CQE_CMD_ID_SHIFT) | 567 | COMMON_RAMROD_ETH_RX_CQE_CMD_ID_SHIFT) |
525 | 568 | ||
@@ -529,6 +572,9 @@ struct bnx2x_fastpath { | |||
529 | 572 | ||
530 | #define BNX2X_DB_MIN_SHIFT 3 /* 8 bytes */ | 573 | #define BNX2X_DB_MIN_SHIFT 3 /* 8 bytes */ |
531 | #define BNX2X_DB_SHIFT 7 /* 128 bytes*/ | 574 | #define BNX2X_DB_SHIFT 7 /* 128 bytes*/ |
575 | #if (BNX2X_DB_SHIFT < BNX2X_DB_MIN_SHIFT) | ||
576 | #error "Min DB doorbell stride is 8" | ||
577 | #endif | ||
532 | #define DPM_TRIGER_TYPE 0x40 | 578 | #define DPM_TRIGER_TYPE 0x40 |
533 | #define DOORBELL(bp, cid, val) \ | 579 | #define DOORBELL(bp, cid, val) \ |
534 | do { \ | 580 | do { \ |
@@ -557,13 +603,11 @@ struct bnx2x_fastpath { | |||
557 | 603 | ||
558 | 604 | ||
559 | /* stuff added to make the code fit 80Col */ | 605 | /* stuff added to make the code fit 80Col */ |
560 | 606 | #define CQE_TYPE(cqe_fp_flags) ((cqe_fp_flags) & ETH_FAST_PATH_RX_CQE_TYPE) | |
561 | #define CQE_TYPE(cqe_fp_flags) ((cqe_fp_flags) & ETH_FAST_PATH_RX_CQE_TYPE) | 607 | #define CQE_TYPE_START(cqe_type) ((cqe_type) == RX_ETH_CQE_TYPE_ETH_START_AGG) |
562 | 608 | #define CQE_TYPE_STOP(cqe_type) ((cqe_type) == RX_ETH_CQE_TYPE_ETH_STOP_AGG) | |
563 | #define TPA_TYPE_START ETH_FAST_PATH_RX_CQE_START_FLG | 609 | #define CQE_TYPE_SLOW(cqe_type) ((cqe_type) == RX_ETH_CQE_TYPE_ETH_RAMROD) |
564 | #define TPA_TYPE_END ETH_FAST_PATH_RX_CQE_END_FLG | 610 | #define CQE_TYPE_FAST(cqe_type) ((cqe_type) == RX_ETH_CQE_TYPE_ETH_FASTPATH) |
565 | #define TPA_TYPE(cqe_fp_flags) ((cqe_fp_flags) & \ | ||
566 | (TPA_TYPE_START | TPA_TYPE_END)) | ||
567 | 611 | ||
568 | #define ETH_RX_ERROR_FALGS ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG | 612 | #define ETH_RX_ERROR_FALGS ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG |
569 | 613 | ||
@@ -590,12 +634,30 @@ struct bnx2x_fastpath { | |||
590 | #define BNX2X_RX_SUM_FIX(cqe) \ | 634 | #define BNX2X_RX_SUM_FIX(cqe) \ |
591 | BNX2X_PRS_FLAG_OVERETH_IPV4(cqe->fast_path_cqe.pars_flags.flags) | 635 | BNX2X_PRS_FLAG_OVERETH_IPV4(cqe->fast_path_cqe.pars_flags.flags) |
592 | 636 | ||
593 | #define U_SB_ETH_RX_CQ_INDEX 1 | 637 | |
594 | #define U_SB_ETH_RX_BD_INDEX 2 | 638 | #define FP_USB_FUNC_OFF \ |
595 | #define C_SB_ETH_TX_CQ_INDEX 5 | 639 | offsetof(struct cstorm_status_block_u, func) |
640 | #define FP_CSB_FUNC_OFF \ | ||
641 | offsetof(struct cstorm_status_block_c, func) | ||
642 | |||
643 | #define HC_INDEX_TOE_RX_CQ_CONS 0 /* Formerly Ustorm TOE CQ index */ | ||
644 | /* (HC_INDEX_U_TOE_RX_CQ_CONS) */ | ||
645 | #define HC_INDEX_ETH_RX_CQ_CONS 1 /* Formerly Ustorm ETH CQ index */ | ||
646 | /* (HC_INDEX_U_ETH_RX_CQ_CONS) */ | ||
647 | #define HC_INDEX_ETH_RX_BD_CONS 2 /* Formerly Ustorm ETH BD index */ | ||
648 | /* (HC_INDEX_U_ETH_RX_BD_CONS) */ | ||
649 | |||
650 | #define HC_INDEX_TOE_TX_CQ_CONS 4 /* Formerly Cstorm TOE CQ index */ | ||
651 | /* (HC_INDEX_C_TOE_TX_CQ_CONS) */ | ||
652 | #define HC_INDEX_ETH_TX_CQ_CONS 5 /* Formerly Cstorm ETH CQ index */ | ||
653 | /* (HC_INDEX_C_ETH_TX_CQ_CONS) */ | ||
654 | |||
655 | #define U_SB_ETH_RX_CQ_INDEX HC_INDEX_ETH_RX_CQ_CONS | ||
656 | #define U_SB_ETH_RX_BD_INDEX HC_INDEX_ETH_RX_BD_CONS | ||
657 | #define C_SB_ETH_TX_CQ_INDEX HC_INDEX_ETH_TX_CQ_CONS | ||
596 | 658 | ||
597 | #define BNX2X_RX_SB_INDEX \ | 659 | #define BNX2X_RX_SB_INDEX \ |
598 | (&fp->sb_index_values[U_SB_ETH_RX_CQ_INDEX]) | 660 | (&fp->sb_index_values[HC_INDEX_ETH_RX_CQ_CONS]) |
599 | 661 | ||
600 | #define BNX2X_TX_SB_INDEX \ | 662 | #define BNX2X_TX_SB_INDEX \ |
601 | (&fp->sb_index_values[C_SB_ETH_TX_CQ_INDEX]) | 663 | (&fp->sb_index_values[C_SB_ETH_TX_CQ_INDEX]) |
@@ -615,41 +677,74 @@ struct bnx2x_common { | |||
615 | #define CHIP_NUM_57711 0x164f | 677 | #define CHIP_NUM_57711 0x164f |
616 | #define CHIP_NUM_57711E 0x1650 | 678 | #define CHIP_NUM_57711E 0x1650 |
617 | #define CHIP_NUM_57712 0x1662 | 679 | #define CHIP_NUM_57712 0x1662 |
618 | #define CHIP_NUM_57712E 0x1663 | 680 | #define CHIP_NUM_57712_MF 0x1663 |
681 | #define CHIP_NUM_57713 0x1651 | ||
682 | #define CHIP_NUM_57713E 0x1652 | ||
683 | #define CHIP_NUM_57800 0x168a | ||
684 | #define CHIP_NUM_57800_MF 0x16a5 | ||
685 | #define CHIP_NUM_57810 0x168e | ||
686 | #define CHIP_NUM_57810_MF 0x16ae | ||
687 | #define CHIP_NUM_57840 0x168d | ||
688 | #define CHIP_NUM_57840_MF 0x16ab | ||
619 | #define CHIP_IS_E1(bp) (CHIP_NUM(bp) == CHIP_NUM_57710) | 689 | #define CHIP_IS_E1(bp) (CHIP_NUM(bp) == CHIP_NUM_57710) |
620 | #define CHIP_IS_57711(bp) (CHIP_NUM(bp) == CHIP_NUM_57711) | 690 | #define CHIP_IS_57711(bp) (CHIP_NUM(bp) == CHIP_NUM_57711) |
621 | #define CHIP_IS_57711E(bp) (CHIP_NUM(bp) == CHIP_NUM_57711E) | 691 | #define CHIP_IS_57711E(bp) (CHIP_NUM(bp) == CHIP_NUM_57711E) |
622 | #define CHIP_IS_57712(bp) (CHIP_NUM(bp) == CHIP_NUM_57712) | 692 | #define CHIP_IS_57712(bp) (CHIP_NUM(bp) == CHIP_NUM_57712) |
623 | #define CHIP_IS_57712E(bp) (CHIP_NUM(bp) == CHIP_NUM_57712E) | 693 | #define CHIP_IS_57712_MF(bp) (CHIP_NUM(bp) == CHIP_NUM_57712_MF) |
694 | #define CHIP_IS_57800(bp) (CHIP_NUM(bp) == CHIP_NUM_57800) | ||
695 | #define CHIP_IS_57800_MF(bp) (CHIP_NUM(bp) == CHIP_NUM_57800_MF) | ||
696 | #define CHIP_IS_57810(bp) (CHIP_NUM(bp) == CHIP_NUM_57810) | ||
697 | #define CHIP_IS_57810_MF(bp) (CHIP_NUM(bp) == CHIP_NUM_57810_MF) | ||
698 | #define CHIP_IS_57840(bp) (CHIP_NUM(bp) == CHIP_NUM_57840) | ||
699 | #define CHIP_IS_57840_MF(bp) (CHIP_NUM(bp) == CHIP_NUM_57840_MF) | ||
624 | #define CHIP_IS_E1H(bp) (CHIP_IS_57711(bp) || \ | 700 | #define CHIP_IS_E1H(bp) (CHIP_IS_57711(bp) || \ |
625 | CHIP_IS_57711E(bp)) | 701 | CHIP_IS_57711E(bp)) |
626 | #define CHIP_IS_E2(bp) (CHIP_IS_57712(bp) || \ | 702 | #define CHIP_IS_E2(bp) (CHIP_IS_57712(bp) || \ |
627 | CHIP_IS_57712E(bp)) | 703 | CHIP_IS_57712_MF(bp)) |
704 | #define CHIP_IS_E3(bp) (CHIP_IS_57800(bp) || \ | ||
705 | CHIP_IS_57800_MF(bp) || \ | ||
706 | CHIP_IS_57810(bp) || \ | ||
707 | CHIP_IS_57810_MF(bp) || \ | ||
708 | CHIP_IS_57840(bp) || \ | ||
709 | CHIP_IS_57840_MF(bp)) | ||
628 | #define CHIP_IS_E1x(bp) (CHIP_IS_E1((bp)) || CHIP_IS_E1H((bp))) | 710 | #define CHIP_IS_E1x(bp) (CHIP_IS_E1((bp)) || CHIP_IS_E1H((bp))) |
629 | #define IS_E1H_OFFSET (CHIP_IS_E1H(bp) || CHIP_IS_E2(bp)) | 711 | #define USES_WARPCORE(bp) (CHIP_IS_E3(bp)) |
630 | 712 | #define IS_E1H_OFFSET (!CHIP_IS_E1(bp)) | |
631 | #define CHIP_REV(bp) (bp->common.chip_id & 0x0000f000) | 713 | |
632 | #define CHIP_REV_Ax 0x00000000 | 714 | #define CHIP_REV_SHIFT 12 |
715 | #define CHIP_REV_MASK (0xF << CHIP_REV_SHIFT) | ||
716 | #define CHIP_REV_VAL(bp) (bp->common.chip_id & CHIP_REV_MASK) | ||
717 | #define CHIP_REV_Ax (0x0 << CHIP_REV_SHIFT) | ||
718 | #define CHIP_REV_Bx (0x1 << CHIP_REV_SHIFT) | ||
633 | /* assume maximum 5 revisions */ | 719 | /* assume maximum 5 revisions */ |
634 | #define CHIP_REV_IS_SLOW(bp) (CHIP_REV(bp) > 0x00005000) | 720 | #define CHIP_REV_IS_SLOW(bp) (CHIP_REV_VAL(bp) > 0x00005000) |
635 | /* Emul versions are A=>0xe, B=>0xc, C=>0xa, D=>8, E=>6 */ | 721 | /* Emul versions are A=>0xe, B=>0xc, C=>0xa, D=>8, E=>6 */ |
636 | #define CHIP_REV_IS_EMUL(bp) ((CHIP_REV_IS_SLOW(bp)) && \ | 722 | #define CHIP_REV_IS_EMUL(bp) ((CHIP_REV_IS_SLOW(bp)) && \ |
637 | !(CHIP_REV(bp) & 0x00001000)) | 723 | !(CHIP_REV_VAL(bp) & 0x00001000)) |
638 | /* FPGA versions are A=>0xf, B=>0xd, C=>0xb, D=>9, E=>7 */ | 724 | /* FPGA versions are A=>0xf, B=>0xd, C=>0xb, D=>9, E=>7 */ |
639 | #define CHIP_REV_IS_FPGA(bp) ((CHIP_REV_IS_SLOW(bp)) && \ | 725 | #define CHIP_REV_IS_FPGA(bp) ((CHIP_REV_IS_SLOW(bp)) && \ |
640 | (CHIP_REV(bp) & 0x00001000)) | 726 | (CHIP_REV_VAL(bp) & 0x00001000)) |
641 | 727 | ||
642 | #define CHIP_TIME(bp) ((CHIP_REV_IS_EMUL(bp)) ? 2000 : \ | 728 | #define CHIP_TIME(bp) ((CHIP_REV_IS_EMUL(bp)) ? 2000 : \ |
643 | ((CHIP_REV_IS_FPGA(bp)) ? 200 : 1)) | 729 | ((CHIP_REV_IS_FPGA(bp)) ? 200 : 1)) |
644 | 730 | ||
645 | #define CHIP_METAL(bp) (bp->common.chip_id & 0x00000ff0) | 731 | #define CHIP_METAL(bp) (bp->common.chip_id & 0x00000ff0) |
646 | #define CHIP_BOND_ID(bp) (bp->common.chip_id & 0x0000000f) | 732 | #define CHIP_BOND_ID(bp) (bp->common.chip_id & 0x0000000f) |
647 | #define CHIP_PARITY_ENABLED(bp) (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp)) | 733 | #define CHIP_REV_SIM(bp) (((CHIP_REV_MASK - CHIP_REV_VAL(bp)) >>\ |
734 | (CHIP_REV_SHIFT + 1)) \ | ||
735 | << CHIP_REV_SHIFT) | ||
736 | #define CHIP_REV(bp) (CHIP_REV_IS_SLOW(bp) ? \ | ||
737 | CHIP_REV_SIM(bp) :\ | ||
738 | CHIP_REV_VAL(bp)) | ||
739 | #define CHIP_IS_E3B0(bp) (CHIP_IS_E3(bp) && \ | ||
740 | (CHIP_REV(bp) == CHIP_REV_Bx)) | ||
741 | #define CHIP_IS_E3A0(bp) (CHIP_IS_E3(bp) && \ | ||
742 | (CHIP_REV(bp) == CHIP_REV_Ax)) | ||
648 | 743 | ||
649 | int flash_size; | 744 | int flash_size; |
650 | #define NVRAM_1MB_SIZE 0x20000 /* 1M bit in bytes */ | 745 | #define BNX2X_NVRAM_1MB_SIZE 0x20000 /* 1M bit in bytes */ |
651 | #define NVRAM_TIMEOUT_COUNT 30000 | 746 | #define BNX2X_NVRAM_TIMEOUT_COUNT 30000 |
652 | #define NVRAM_PAGE_SIZE 256 | 747 | #define BNX2X_NVRAM_PAGE_SIZE 256 |
653 | 748 | ||
654 | u32 shmem_base; | 749 | u32 shmem_base; |
655 | u32 shmem2_base; | 750 | u32 shmem2_base; |
@@ -666,7 +761,7 @@ struct bnx2x_common { | |||
666 | #define INT_BLOCK_MODE_NORMAL 0 | 761 | #define INT_BLOCK_MODE_NORMAL 0 |
667 | #define INT_BLOCK_MODE_BW_COMP 2 | 762 | #define INT_BLOCK_MODE_BW_COMP 2 |
668 | #define CHIP_INT_MODE_IS_NBC(bp) \ | 763 | #define CHIP_INT_MODE_IS_NBC(bp) \ |
669 | (CHIP_IS_E2(bp) && \ | 764 | (!CHIP_IS_E1x(bp) && \ |
670 | !((bp)->common.int_block & INT_BLOCK_MODE_BW_COMP)) | 765 | !((bp)->common.int_block & INT_BLOCK_MODE_BW_COMP)) |
671 | #define CHIP_INT_MODE_IS_BC(bp) (!CHIP_INT_MODE_IS_NBC(bp)) | 766 | #define CHIP_INT_MODE_IS_BC(bp) (!CHIP_INT_MODE_IS_NBC(bp)) |
672 | 767 | ||
@@ -712,19 +807,15 @@ struct bnx2x_port { | |||
712 | 807 | ||
713 | /* end of port */ | 808 | /* end of port */ |
714 | 809 | ||
715 | /* e1h Classification CAM line allocations */ | 810 | #define STATS_OFFSET32(stat_name) \ |
716 | enum { | 811 | (offsetof(struct bnx2x_eth_stats, stat_name) / 4) |
717 | CAM_ETH_LINE = 0, | ||
718 | CAM_ISCSI_ETH_LINE, | ||
719 | CAM_FIP_ETH_LINE, | ||
720 | CAM_FIP_MCAST_LINE, | ||
721 | CAM_MAX_PF_LINE = CAM_FIP_MCAST_LINE | ||
722 | }; | ||
723 | /* number of MACs per function in NIG memory - used for SI mode */ | ||
724 | #define NIG_LLH_FUNC_MEM_SIZE 16 | ||
725 | /* number of entries in NIG_REG_LLHX_FUNC_MEM */ | ||
726 | #define NIG_LLH_FUNC_MEM_MAX_OFFSET 8 | ||
727 | 812 | ||
813 | /* slow path */ | ||
814 | |||
815 | /* slow path work-queue */ | ||
816 | extern struct workqueue_struct *bnx2x_wq; | ||
817 | |||
818 | #define BNX2X_MAX_NUM_OF_VFS 64 | ||
728 | #define BNX2X_VF_ID_INVALID 0xFF | 819 | #define BNX2X_VF_ID_INVALID 0xFF |
729 | 820 | ||
730 | /* | 821 | /* |
@@ -749,8 +840,10 @@ enum { | |||
749 | * L2 queue is supported. the cid for the FCoE L2 queue is always X. | 840 | * L2 queue is supported. the cid for the FCoE L2 queue is always X. |
750 | */ | 841 | */ |
751 | 842 | ||
752 | #define FP_SB_MAX_E1x 16 /* fast-path interrupt contexts E1x */ | 843 | /* fast-path interrupt contexts E1x */ |
753 | #define FP_SB_MAX_E2 16 /* fast-path interrupt contexts E2 */ | 844 | #define FP_SB_MAX_E1x 16 |
845 | /* fast-path interrupt contexts E2 */ | ||
846 | #define FP_SB_MAX_E2 HC_SB_MAX_SB_E2 | ||
754 | 847 | ||
755 | /* | 848 | /* |
756 | * cid_cnt paramter below refers to the value returned by | 849 | * cid_cnt paramter below refers to the value returned by |
@@ -761,13 +854,13 @@ enum { | |||
761 | * The number of FP context allocated by the driver == max number of regular | 854 | * The number of FP context allocated by the driver == max number of regular |
762 | * L2 queues + 1 for the FCoE L2 queue | 855 | * L2 queues + 1 for the FCoE L2 queue |
763 | */ | 856 | */ |
764 | #define L2_FP_COUNT(cid_cnt) ((cid_cnt) - CNIC_CONTEXT_USE) | 857 | #define L2_FP_COUNT(cid_cnt) ((cid_cnt) - FCOE_CONTEXT_USE) |
765 | 858 | ||
766 | /* | 859 | /* |
767 | * The number of FP-SB allocated by the driver == max number of regular L2 | 860 | * The number of FP-SB allocated by the driver == max number of regular L2 |
768 | * queues + 1 for the CNIC which also consumes an FP-SB | 861 | * queues + 1 for the CNIC which also consumes an FP-SB |
769 | */ | 862 | */ |
770 | #define FP_SB_COUNT(cid_cnt) ((cid_cnt) - FCOE_CONTEXT_USE) | 863 | #define FP_SB_COUNT(cid_cnt) ((cid_cnt) - CNIC_CONTEXT_USE) |
771 | #define NUM_IGU_SB_REQUIRED(cid_cnt) \ | 864 | #define NUM_IGU_SB_REQUIRED(cid_cnt) \ |
772 | (FP_SB_COUNT(cid_cnt) - NONE_ETH_CONTEXT_USE) | 865 | (FP_SB_COUNT(cid_cnt) - NONE_ETH_CONTEXT_USE) |
773 | 866 | ||
@@ -788,38 +881,61 @@ union cdu_context { | |||
788 | #define CNIC_ILT_LINES DIV_ROUND_UP(CNIC_CID_MAX, ILT_PAGE_CIDS) | 881 | #define CNIC_ILT_LINES DIV_ROUND_UP(CNIC_CID_MAX, ILT_PAGE_CIDS) |
789 | #endif | 882 | #endif |
790 | 883 | ||
791 | #define QM_ILT_PAGE_SZ_HW 3 | 884 | #define QM_ILT_PAGE_SZ_HW 0 |
792 | #define QM_ILT_PAGE_SZ (4096 << QM_ILT_PAGE_SZ_HW) /* 32K */ | 885 | #define QM_ILT_PAGE_SZ (4096 << QM_ILT_PAGE_SZ_HW) /* 4K */ |
793 | #define QM_CID_ROUND 1024 | 886 | #define QM_CID_ROUND 1024 |
794 | 887 | ||
795 | #ifdef BCM_CNIC | 888 | #ifdef BCM_CNIC |
796 | /* TM (timers) host DB constants */ | 889 | /* TM (timers) host DB constants */ |
797 | #define TM_ILT_PAGE_SZ_HW 2 | 890 | #define TM_ILT_PAGE_SZ_HW 0 |
798 | #define TM_ILT_PAGE_SZ (4096 << TM_ILT_PAGE_SZ_HW) /* 16K */ | 891 | #define TM_ILT_PAGE_SZ (4096 << TM_ILT_PAGE_SZ_HW) /* 4K */ |
799 | /* #define TM_CONN_NUM (CNIC_STARTING_CID+CNIC_ISCSI_CXT_MAX) */ | 892 | /* #define TM_CONN_NUM (CNIC_STARTING_CID+CNIC_ISCSI_CXT_MAX) */ |
800 | #define TM_CONN_NUM 1024 | 893 | #define TM_CONN_NUM 1024 |
801 | #define TM_ILT_SZ (8 * TM_CONN_NUM) | 894 | #define TM_ILT_SZ (8 * TM_CONN_NUM) |
802 | #define TM_ILT_LINES DIV_ROUND_UP(TM_ILT_SZ, TM_ILT_PAGE_SZ) | 895 | #define TM_ILT_LINES DIV_ROUND_UP(TM_ILT_SZ, TM_ILT_PAGE_SZ) |
803 | 896 | ||
804 | /* SRC (Searcher) host DB constants */ | 897 | /* SRC (Searcher) host DB constants */ |
805 | #define SRC_ILT_PAGE_SZ_HW 3 | 898 | #define SRC_ILT_PAGE_SZ_HW 0 |
806 | #define SRC_ILT_PAGE_SZ (4096 << SRC_ILT_PAGE_SZ_HW) /* 32K */ | 899 | #define SRC_ILT_PAGE_SZ (4096 << SRC_ILT_PAGE_SZ_HW) /* 4K */ |
807 | #define SRC_HASH_BITS 10 | 900 | #define SRC_HASH_BITS 10 |
808 | #define SRC_CONN_NUM (1 << SRC_HASH_BITS) /* 1024 */ | 901 | #define SRC_CONN_NUM (1 << SRC_HASH_BITS) /* 1024 */ |
809 | #define SRC_ILT_SZ (sizeof(struct src_ent) * SRC_CONN_NUM) | 902 | #define SRC_ILT_SZ (sizeof(struct src_ent) * SRC_CONN_NUM) |
810 | #define SRC_T2_SZ SRC_ILT_SZ | 903 | #define SRC_T2_SZ SRC_ILT_SZ |
811 | #define SRC_ILT_LINES DIV_ROUND_UP(SRC_ILT_SZ, SRC_ILT_PAGE_SZ) | 904 | #define SRC_ILT_LINES DIV_ROUND_UP(SRC_ILT_SZ, SRC_ILT_PAGE_SZ) |
905 | |||
812 | #endif | 906 | #endif |
813 | 907 | ||
814 | #define MAX_DMAE_C 8 | 908 | #define MAX_DMAE_C 8 |
815 | 909 | ||
816 | /* DMA memory not used in fastpath */ | 910 | /* DMA memory not used in fastpath */ |
817 | struct bnx2x_slowpath { | 911 | struct bnx2x_slowpath { |
818 | struct eth_stats_query fw_stats; | 912 | union { |
819 | struct mac_configuration_cmd mac_config; | 913 | struct mac_configuration_cmd e1x; |
820 | struct mac_configuration_cmd mcast_config; | 914 | struct eth_classify_rules_ramrod_data e2; |
821 | struct mac_configuration_cmd uc_mac_config; | 915 | } mac_rdata; |
822 | struct client_init_ramrod_data client_init_data; | 916 | |
917 | |||
918 | union { | ||
919 | struct tstorm_eth_mac_filter_config e1x; | ||
920 | struct eth_filter_rules_ramrod_data e2; | ||
921 | } rx_mode_rdata; | ||
922 | |||
923 | union { | ||
924 | struct mac_configuration_cmd e1; | ||
925 | struct eth_multicast_rules_ramrod_data e2; | ||
926 | } mcast_rdata; | ||
927 | |||
928 | struct eth_rss_update_ramrod_data rss_rdata; | ||
929 | |||
930 | /* Queue State related ramrods are always sent under rtnl_lock */ | ||
931 | union { | ||
932 | struct client_init_ramrod_data init_data; | ||
933 | struct client_update_ramrod_data update_data; | ||
934 | } q_rdata; | ||
935 | |||
936 | union { | ||
937 | struct function_start_data func_start; | ||
938 | } func_rdata; | ||
823 | 939 | ||
824 | /* used by dmae command executer */ | 940 | /* used by dmae command executer */ |
825 | struct dmae_command dmae[MAX_DMAE_C]; | 941 | struct dmae_command dmae[MAX_DMAE_C]; |
@@ -846,7 +962,7 @@ struct bnx2x_slowpath { | |||
846 | #define MAX_DYNAMIC_ATTN_GRPS 8 | 962 | #define MAX_DYNAMIC_ATTN_GRPS 8 |
847 | 963 | ||
848 | struct attn_route { | 964 | struct attn_route { |
849 | u32 sig[5]; | 965 | u32 sig[5]; |
850 | }; | 966 | }; |
851 | 967 | ||
852 | struct iro { | 968 | struct iro { |
@@ -866,13 +982,15 @@ struct hw_context { | |||
866 | /* forward */ | 982 | /* forward */ |
867 | struct bnx2x_ilt; | 983 | struct bnx2x_ilt; |
868 | 984 | ||
869 | typedef enum { | 985 | |
986 | enum bnx2x_recovery_state { | ||
870 | BNX2X_RECOVERY_DONE, | 987 | BNX2X_RECOVERY_DONE, |
871 | BNX2X_RECOVERY_INIT, | 988 | BNX2X_RECOVERY_INIT, |
872 | BNX2X_RECOVERY_WAIT, | 989 | BNX2X_RECOVERY_WAIT, |
873 | } bnx2x_recovery_state_t; | 990 | BNX2X_RECOVERY_FAILED |
991 | }; | ||
874 | 992 | ||
875 | /** | 993 | /* |
876 | * Event queue (EQ or event ring) MC hsi | 994 | * Event queue (EQ or event ring) MC hsi |
877 | * NUM_EQ_PAGES and EQ_DESC_CNT_PAGE must be power of 2 | 995 | * NUM_EQ_PAGES and EQ_DESC_CNT_PAGE must be power of 2 |
878 | */ | 996 | */ |
@@ -910,6 +1028,24 @@ enum { | |||
910 | BNX2X_LINK_REPORT_TX_FC_ON, | 1028 | BNX2X_LINK_REPORT_TX_FC_ON, |
911 | }; | 1029 | }; |
912 | 1030 | ||
1031 | enum { | ||
1032 | BNX2X_PORT_QUERY_IDX, | ||
1033 | BNX2X_PF_QUERY_IDX, | ||
1034 | BNX2X_FIRST_QUEUE_QUERY_IDX, | ||
1035 | }; | ||
1036 | |||
1037 | struct bnx2x_fw_stats_req { | ||
1038 | struct stats_query_header hdr; | ||
1039 | struct stats_query_entry query[STATS_QUERY_CMD_COUNT]; | ||
1040 | }; | ||
1041 | |||
1042 | struct bnx2x_fw_stats_data { | ||
1043 | struct stats_counter storm_counters; | ||
1044 | struct per_port_stats port; | ||
1045 | struct per_pf_stats pf; | ||
1046 | struct per_queue_stats queue_stats[1]; | ||
1047 | }; | ||
1048 | |||
913 | struct bnx2x { | 1049 | struct bnx2x { |
914 | /* Fields used in the tx and intr/napi performance paths | 1050 | /* Fields used in the tx and intr/napi performance paths |
915 | * are grouped together in the beginning of the structure | 1051 | * are grouped together in the beginning of the structure |
@@ -919,19 +1055,28 @@ struct bnx2x { | |||
919 | void __iomem *doorbells; | 1055 | void __iomem *doorbells; |
920 | u16 db_size; | 1056 | u16 db_size; |
921 | 1057 | ||
1058 | u8 pf_num; /* absolute PF number */ | ||
1059 | u8 pfid; /* per-path PF number */ | ||
1060 | int base_fw_ndsb; /**/ | ||
1061 | #define BP_PATH(bp) (CHIP_IS_E1x(bp) ? 0 : (bp->pf_num & 1)) | ||
1062 | #define BP_PORT(bp) (bp->pfid & 1) | ||
1063 | #define BP_FUNC(bp) (bp->pfid) | ||
1064 | #define BP_ABS_FUNC(bp) (bp->pf_num) | ||
1065 | #define BP_E1HVN(bp) (bp->pfid >> 1) | ||
1066 | #define BP_VN(bp) (BP_E1HVN(bp)) /*remove when approved*/ | ||
1067 | #define BP_L_ID(bp) (BP_E1HVN(bp) << 2) | ||
1068 | #define BP_FW_MB_IDX(bp) (BP_PORT(bp) +\ | ||
1069 | BP_VN(bp) * (CHIP_IS_E1x(bp) ? 2 : 1)) | ||
1070 | |||
922 | struct net_device *dev; | 1071 | struct net_device *dev; |
923 | struct pci_dev *pdev; | 1072 | struct pci_dev *pdev; |
924 | 1073 | ||
925 | struct iro *iro_arr; | 1074 | const struct iro *iro_arr; |
926 | #define IRO (bp->iro_arr) | 1075 | #define IRO (bp->iro_arr) |
927 | 1076 | ||
928 | atomic_t intr_sem; | 1077 | enum bnx2x_recovery_state recovery_state; |
929 | |||
930 | bnx2x_recovery_state_t recovery_state; | ||
931 | int is_leader; | 1078 | int is_leader; |
932 | struct msix_entry *msix_table; | 1079 | struct msix_entry *msix_table; |
933 | #define INT_MODE_INTx 1 | ||
934 | #define INT_MODE_MSI 2 | ||
935 | 1080 | ||
936 | int tx_ring_size; | 1081 | int tx_ring_size; |
937 | 1082 | ||
@@ -944,7 +1089,8 @@ struct bnx2x { | |||
944 | /* Max supported alignment is 256 (8 shift) */ | 1089 | /* Max supported alignment is 256 (8 shift) */ |
945 | #define BNX2X_RX_ALIGN_SHIFT ((L1_CACHE_SHIFT < 8) ? \ | 1090 | #define BNX2X_RX_ALIGN_SHIFT ((L1_CACHE_SHIFT < 8) ? \ |
946 | L1_CACHE_SHIFT : 8) | 1091 | L1_CACHE_SHIFT : 8) |
947 | #define BNX2X_RX_ALIGN (1 << BNX2X_RX_ALIGN_SHIFT) | 1092 | /* FW use 2 Cache lines Alignment for start packet and size */ |
1093 | #define BNX2X_FW_RX_ALIGN (2 << BNX2X_RX_ALIGN_SHIFT) | ||
948 | #define BNX2X_PXP_DRAM_ALIGN (BNX2X_RX_ALIGN_SHIFT - 5) | 1094 | #define BNX2X_PXP_DRAM_ALIGN (BNX2X_RX_ALIGN_SHIFT - 5) |
949 | 1095 | ||
950 | struct host_sp_status_block *def_status_blk; | 1096 | struct host_sp_status_block *def_status_blk; |
@@ -974,10 +1120,12 @@ struct bnx2x { | |||
974 | __le16 *eq_cons_sb; | 1120 | __le16 *eq_cons_sb; |
975 | atomic_t eq_spq_left; /* COMMON_XXX ramrods credit */ | 1121 | atomic_t eq_spq_left; /* COMMON_XXX ramrods credit */ |
976 | 1122 | ||
977 | /* Flags for marking that there is a STAT_QUERY or | 1123 | |
978 | SET_MAC ramrod pending */ | 1124 | |
979 | int stats_pending; | 1125 | /* Counter for marking that there is a STAT_QUERY ramrod pending */ |
980 | int set_mac_pending; | 1126 | u16 stats_pending; |
1127 | /* Counter for completed statistics ramrods */ | ||
1128 | u16 stats_comp; | ||
981 | 1129 | ||
982 | /* End of fields used in the performance code paths */ | 1130 | /* End of fields used in the performance code paths */ |
983 | 1131 | ||
@@ -985,54 +1133,35 @@ struct bnx2x { | |||
985 | int msg_enable; | 1133 | int msg_enable; |
986 | 1134 | ||
987 | u32 flags; | 1135 | u32 flags; |
988 | #define PCIX_FLAG 1 | 1136 | #define PCIX_FLAG (1 << 0) |
989 | #define PCI_32BIT_FLAG 2 | 1137 | #define PCI_32BIT_FLAG (1 << 1) |
990 | #define ONE_PORT_FLAG 4 | 1138 | #define ONE_PORT_FLAG (1 << 2) |
991 | #define NO_WOL_FLAG 8 | 1139 | #define NO_WOL_FLAG (1 << 3) |
992 | #define USING_DAC_FLAG 0x10 | 1140 | #define USING_DAC_FLAG (1 << 4) |
993 | #define USING_MSIX_FLAG 0x20 | 1141 | #define USING_MSIX_FLAG (1 << 5) |
994 | #define USING_MSI_FLAG 0x40 | 1142 | #define USING_MSI_FLAG (1 << 6) |
995 | 1143 | #define DISABLE_MSI_FLAG (1 << 7) | |
996 | #define TPA_ENABLE_FLAG 0x80 | 1144 | #define TPA_ENABLE_FLAG (1 << 8) |
997 | #define NO_MCP_FLAG 0x100 | 1145 | #define NO_MCP_FLAG (1 << 9) |
998 | #define DISABLE_MSI_FLAG 0x200 | 1146 | |
999 | #define BP_NOMCP(bp) (bp->flags & NO_MCP_FLAG) | 1147 | #define BP_NOMCP(bp) (bp->flags & NO_MCP_FLAG) |
1000 | #define MF_FUNC_DIS 0x1000 | 1148 | #define MF_FUNC_DIS (1 << 11) |
1001 | #define FCOE_MACS_SET 0x2000 | 1149 | #define OWN_CNIC_IRQ (1 << 12) |
1002 | #define NO_FCOE_FLAG 0x4000 | 1150 | #define NO_ISCSI_OOO_FLAG (1 << 13) |
1003 | #define NO_ISCSI_OOO_FLAG 0x8000 | 1151 | #define NO_ISCSI_FLAG (1 << 14) |
1004 | #define NO_ISCSI_FLAG 0x10000 | 1152 | #define NO_FCOE_FLAG (1 << 15) |
1005 | 1153 | ||
1006 | #define NO_FCOE(bp) ((bp)->flags & NO_FCOE_FLAG) | ||
1007 | #define NO_ISCSI(bp) ((bp)->flags & NO_ISCSI_FLAG) | 1154 | #define NO_ISCSI(bp) ((bp)->flags & NO_ISCSI_FLAG) |
1008 | #define NO_ISCSI_OOO(bp) ((bp)->flags & NO_ISCSI_OOO_FLAG) | 1155 | #define NO_ISCSI_OOO(bp) ((bp)->flags & NO_ISCSI_OOO_FLAG) |
1009 | 1156 | #define NO_FCOE(bp) ((bp)->flags & NO_FCOE_FLAG) | |
1010 | int pf_num; /* absolute PF number */ | ||
1011 | int pfid; /* per-path PF number */ | ||
1012 | int base_fw_ndsb; | ||
1013 | #define BP_PATH(bp) (!CHIP_IS_E2(bp) ? \ | ||
1014 | 0 : (bp->pf_num & 1)) | ||
1015 | #define BP_PORT(bp) (bp->pfid & 1) | ||
1016 | #define BP_FUNC(bp) (bp->pfid) | ||
1017 | #define BP_ABS_FUNC(bp) (bp->pf_num) | ||
1018 | #define BP_E1HVN(bp) (bp->pfid >> 1) | ||
1019 | #define BP_VN(bp) (CHIP_MODE_IS_4_PORT(bp) ? \ | ||
1020 | 0 : BP_E1HVN(bp)) | ||
1021 | #define BP_L_ID(bp) (BP_E1HVN(bp) << 2) | ||
1022 | #define BP_FW_MB_IDX(bp) (BP_PORT(bp) +\ | ||
1023 | BP_VN(bp) * (CHIP_IS_E1x(bp) ? 2 : 1)) | ||
1024 | |||
1025 | #ifdef BCM_CNIC | ||
1026 | #define BCM_CNIC_CID_START 16 | ||
1027 | #define BCM_ISCSI_ETH_CL_ID 17 | ||
1028 | #endif | ||
1029 | 1157 | ||
1030 | int pm_cap; | 1158 | int pm_cap; |
1031 | int pcie_cap; | ||
1032 | int mrrs; | 1159 | int mrrs; |
1033 | 1160 | ||
1034 | struct delayed_work sp_task; | 1161 | struct delayed_work sp_task; |
1035 | struct delayed_work reset_task; | 1162 | struct delayed_work reset_task; |
1163 | |||
1164 | struct delayed_work period_task; | ||
1036 | struct timer_list timer; | 1165 | struct timer_list timer; |
1037 | int current_interval; | 1166 | int current_interval; |
1038 | 1167 | ||
@@ -1052,9 +1181,9 @@ struct bnx2x { | |||
1052 | 1181 | ||
1053 | struct cmng_struct_per_port cmng; | 1182 | struct cmng_struct_per_port cmng; |
1054 | u32 vn_weight_sum; | 1183 | u32 vn_weight_sum; |
1055 | |||
1056 | u32 mf_config[E1HVN_MAX]; | 1184 | u32 mf_config[E1HVN_MAX]; |
1057 | u32 mf2_config[E2_FUNC_MAX]; | 1185 | u32 mf2_config[E2_FUNC_MAX]; |
1186 | u32 path_has_ovlan; /* E3 */ | ||
1058 | u16 mf_ov; | 1187 | u16 mf_ov; |
1059 | u8 mf_mode; | 1188 | u8 mf_mode; |
1060 | #define IS_MF(bp) (bp->mf_mode != 0) | 1189 | #define IS_MF(bp) (bp->mf_mode != 0) |
@@ -1079,33 +1208,20 @@ struct bnx2x { | |||
1079 | 1208 | ||
1080 | u32 lin_cnt; | 1209 | u32 lin_cnt; |
1081 | 1210 | ||
1082 | int state; | 1211 | u16 state; |
1083 | #define BNX2X_STATE_CLOSED 0 | 1212 | #define BNX2X_STATE_CLOSED 0 |
1084 | #define BNX2X_STATE_OPENING_WAIT4_LOAD 0x1000 | 1213 | #define BNX2X_STATE_OPENING_WAIT4_LOAD 0x1000 |
1085 | #define BNX2X_STATE_OPENING_WAIT4_PORT 0x2000 | 1214 | #define BNX2X_STATE_OPENING_WAIT4_PORT 0x2000 |
1086 | #define BNX2X_STATE_OPEN 0x3000 | 1215 | #define BNX2X_STATE_OPEN 0x3000 |
1087 | #define BNX2X_STATE_CLOSING_WAIT4_HALT 0x4000 | 1216 | #define BNX2X_STATE_CLOSING_WAIT4_HALT 0x4000 |
1088 | #define BNX2X_STATE_CLOSING_WAIT4_DELETE 0x5000 | 1217 | #define BNX2X_STATE_CLOSING_WAIT4_DELETE 0x5000 |
1089 | #define BNX2X_STATE_CLOSING_WAIT4_UNLOAD 0x6000 | 1218 | |
1090 | #define BNX2X_STATE_FUNC_STARTED 0x7000 | ||
1091 | #define BNX2X_STATE_DIAG 0xe000 | 1219 | #define BNX2X_STATE_DIAG 0xe000 |
1092 | #define BNX2X_STATE_ERROR 0xf000 | 1220 | #define BNX2X_STATE_ERROR 0xf000 |
1093 | 1221 | ||
1094 | int multi_mode; | 1222 | int multi_mode; |
1095 | int num_queues; | 1223 | int num_queues; |
1096 | int disable_tpa; | 1224 | int disable_tpa; |
1097 | int int_mode; | ||
1098 | u32 *rx_indir_table; | ||
1099 | |||
1100 | struct tstorm_eth_mac_filter_config mac_filters; | ||
1101 | #define BNX2X_ACCEPT_NONE 0x0000 | ||
1102 | #define BNX2X_ACCEPT_UNICAST 0x0001 | ||
1103 | #define BNX2X_ACCEPT_MULTICAST 0x0002 | ||
1104 | #define BNX2X_ACCEPT_ALL_UNICAST 0x0004 | ||
1105 | #define BNX2X_ACCEPT_ALL_MULTICAST 0x0008 | ||
1106 | #define BNX2X_ACCEPT_BROADCAST 0x0010 | ||
1107 | #define BNX2X_ACCEPT_UNMATCHED_UCAST 0x0020 | ||
1108 | #define BNX2X_PROMISCUOUS_MODE 0x10000 | ||
1109 | 1225 | ||
1110 | u32 rx_mode; | 1226 | u32 rx_mode; |
1111 | #define BNX2X_RX_MODE_NONE 0 | 1227 | #define BNX2X_RX_MODE_NONE 0 |
@@ -1113,7 +1229,6 @@ struct bnx2x { | |||
1113 | #define BNX2X_RX_MODE_ALLMULTI 2 | 1229 | #define BNX2X_RX_MODE_ALLMULTI 2 |
1114 | #define BNX2X_RX_MODE_PROMISC 3 | 1230 | #define BNX2X_RX_MODE_PROMISC 3 |
1115 | #define BNX2X_MAX_MULTICAST 64 | 1231 | #define BNX2X_MAX_MULTICAST 64 |
1116 | #define BNX2X_MAX_EMUL_MULTI 16 | ||
1117 | 1232 | ||
1118 | u8 igu_dsb_id; | 1233 | u8 igu_dsb_id; |
1119 | u8 igu_base_sb; | 1234 | u8 igu_base_sb; |
@@ -1122,11 +1237,38 @@ struct bnx2x { | |||
1122 | 1237 | ||
1123 | struct bnx2x_slowpath *slowpath; | 1238 | struct bnx2x_slowpath *slowpath; |
1124 | dma_addr_t slowpath_mapping; | 1239 | dma_addr_t slowpath_mapping; |
1240 | |||
1241 | /* Total number of FW statistics requests */ | ||
1242 | u8 fw_stats_num; | ||
1243 | |||
1244 | /* | ||
1245 | * This is a memory buffer that will contain both statistics | ||
1246 | * ramrod request and data. | ||
1247 | */ | ||
1248 | void *fw_stats; | ||
1249 | dma_addr_t fw_stats_mapping; | ||
1250 | |||
1251 | /* | ||
1252 | * FW statistics request shortcut (points at the | ||
1253 | * beginning of fw_stats buffer). | ||
1254 | */ | ||
1255 | struct bnx2x_fw_stats_req *fw_stats_req; | ||
1256 | dma_addr_t fw_stats_req_mapping; | ||
1257 | int fw_stats_req_sz; | ||
1258 | |||
1259 | /* | ||
1260 | * FW statistics data shortcut (points at the begining of | ||
1261 | * fw_stats buffer + fw_stats_req_sz). | ||
1262 | */ | ||
1263 | struct bnx2x_fw_stats_data *fw_stats_data; | ||
1264 | dma_addr_t fw_stats_data_mapping; | ||
1265 | int fw_stats_data_sz; | ||
1266 | |||
1125 | struct hw_context context; | 1267 | struct hw_context context; |
1126 | 1268 | ||
1127 | struct bnx2x_ilt *ilt; | 1269 | struct bnx2x_ilt *ilt; |
1128 | #define BP_ILT(bp) ((bp)->ilt) | 1270 | #define BP_ILT(bp) ((bp)->ilt) |
1129 | #define ILT_MAX_LINES 128 | 1271 | #define ILT_MAX_LINES 256 |
1130 | 1272 | ||
1131 | int l2_cid_count; | 1273 | int l2_cid_count; |
1132 | #define L2_ILT_LINES(bp) (DIV_ROUND_UP((bp)->l2_cid_count, \ | 1274 | #define L2_ILT_LINES(bp) (DIV_ROUND_UP((bp)->l2_cid_count, \ |
@@ -1148,16 +1290,18 @@ struct bnx2x { | |||
1148 | struct cnic_eth_dev cnic_eth_dev; | 1290 | struct cnic_eth_dev cnic_eth_dev; |
1149 | union host_hc_status_block cnic_sb; | 1291 | union host_hc_status_block cnic_sb; |
1150 | dma_addr_t cnic_sb_mapping; | 1292 | dma_addr_t cnic_sb_mapping; |
1151 | #define CNIC_SB_ID(bp) ((bp)->base_fw_ndsb + BP_L_ID(bp)) | ||
1152 | #define CNIC_IGU_SB_ID(bp) ((bp)->igu_base_sb) | ||
1153 | struct eth_spe *cnic_kwq; | 1293 | struct eth_spe *cnic_kwq; |
1154 | struct eth_spe *cnic_kwq_prod; | 1294 | struct eth_spe *cnic_kwq_prod; |
1155 | struct eth_spe *cnic_kwq_cons; | 1295 | struct eth_spe *cnic_kwq_cons; |
1156 | struct eth_spe *cnic_kwq_last; | 1296 | struct eth_spe *cnic_kwq_last; |
1157 | u16 cnic_kwq_pending; | 1297 | u16 cnic_kwq_pending; |
1158 | u16 cnic_spq_pending; | 1298 | u16 cnic_spq_pending; |
1159 | struct mutex cnic_mutex; | ||
1160 | u8 fip_mac[ETH_ALEN]; | 1299 | u8 fip_mac[ETH_ALEN]; |
1300 | struct mutex cnic_mutex; | ||
1301 | struct bnx2x_vlan_mac_obj iscsi_l2_mac_obj; | ||
1302 | |||
1303 | /* Start index of the "special" (CNIC related) L2 cleints */ | ||
1304 | u8 cnic_base_cl_id; | ||
1161 | #endif | 1305 | #endif |
1162 | 1306 | ||
1163 | int dmae_ready; | 1307 | int dmae_ready; |
@@ -1194,6 +1338,8 @@ struct bnx2x { | |||
1194 | u16 *init_ops_offsets; | 1338 | u16 *init_ops_offsets; |
1195 | /* Data blob - has 32 bit granularity */ | 1339 | /* Data blob - has 32 bit granularity */ |
1196 | u32 *init_data; | 1340 | u32 *init_data; |
1341 | u32 init_mode_flags; | ||
1342 | #define INIT_MODE_FLAGS(bp) (bp->init_mode_flags) | ||
1197 | /* Zipped PRAM blobs - raw data */ | 1343 | /* Zipped PRAM blobs - raw data */ |
1198 | const u8 *tsem_int_table_data; | 1344 | const u8 *tsem_int_table_data; |
1199 | const u8 *tsem_pram_data; | 1345 | const u8 *tsem_pram_data; |
@@ -1215,8 +1361,10 @@ struct bnx2x { | |||
1215 | #define INIT_CSEM_INT_TABLE_DATA(bp) (bp->csem_int_table_data) | 1361 | #define INIT_CSEM_INT_TABLE_DATA(bp) (bp->csem_int_table_data) |
1216 | #define INIT_CSEM_PRAM_DATA(bp) (bp->csem_pram_data) | 1362 | #define INIT_CSEM_PRAM_DATA(bp) (bp->csem_pram_data) |
1217 | 1363 | ||
1364 | #define PHY_FW_VER_LEN 20 | ||
1218 | char fw_ver[32]; | 1365 | char fw_ver[32]; |
1219 | const struct firmware *firmware; | 1366 | const struct firmware *firmware; |
1367 | |||
1220 | /* LLDP params */ | 1368 | /* LLDP params */ |
1221 | struct bnx2x_config_lldp_params lldp_config_params; | 1369 | struct bnx2x_config_lldp_params lldp_config_params; |
1222 | 1370 | ||
@@ -1235,13 +1383,30 @@ struct bnx2x { | |||
1235 | bool dcbx_mode_uset; | 1383 | bool dcbx_mode_uset; |
1236 | 1384 | ||
1237 | struct bnx2x_config_dcbx_params dcbx_config_params; | 1385 | struct bnx2x_config_dcbx_params dcbx_config_params; |
1238 | |||
1239 | struct bnx2x_dcbx_port_params dcbx_port_params; | 1386 | struct bnx2x_dcbx_port_params dcbx_port_params; |
1240 | int dcb_version; | 1387 | int dcb_version; |
1241 | 1388 | ||
1242 | /* DCBX Negotiation results */ | 1389 | /* CAM credit pools */ |
1390 | struct bnx2x_credit_pool_obj macs_pool; | ||
1391 | |||
1392 | /* RX_MODE object */ | ||
1393 | struct bnx2x_rx_mode_obj rx_mode_obj; | ||
1394 | |||
1395 | /* MCAST object */ | ||
1396 | struct bnx2x_mcast_obj mcast_obj; | ||
1397 | |||
1398 | /* RSS configuration object */ | ||
1399 | struct bnx2x_rss_config_obj rss_conf_obj; | ||
1400 | |||
1401 | /* Function State controlling object */ | ||
1402 | struct bnx2x_func_sp_obj func_obj; | ||
1403 | |||
1404 | unsigned long sp_state; | ||
1405 | |||
1406 | /* DCBX Negotation results */ | ||
1243 | struct dcbx_features dcbx_local_feat; | 1407 | struct dcbx_features dcbx_local_feat; |
1244 | u32 dcbx_error; | 1408 | u32 dcbx_error; |
1409 | |||
1245 | #ifdef BCM_DCBNL | 1410 | #ifdef BCM_DCBNL |
1246 | struct dcbx_features dcbx_remote_feat; | 1411 | struct dcbx_features dcbx_remote_feat; |
1247 | u32 dcbx_remote_flags; | 1412 | u32 dcbx_remote_flags; |
@@ -1249,42 +1414,11 @@ struct bnx2x { | |||
1249 | u32 pending_max; | 1414 | u32 pending_max; |
1250 | }; | 1415 | }; |
1251 | 1416 | ||
1252 | /** | 1417 | /* Tx queues may be less or equal to Rx queues */ |
1253 | * Init queue/func interface | 1418 | extern int num_queues; |
1254 | */ | ||
1255 | /* queue init flags */ | ||
1256 | #define QUEUE_FLG_TPA 0x0001 | ||
1257 | #define QUEUE_FLG_CACHE_ALIGN 0x0002 | ||
1258 | #define QUEUE_FLG_STATS 0x0004 | ||
1259 | #define QUEUE_FLG_OV 0x0008 | ||
1260 | #define QUEUE_FLG_VLAN 0x0010 | ||
1261 | #define QUEUE_FLG_COS 0x0020 | ||
1262 | #define QUEUE_FLG_HC 0x0040 | ||
1263 | #define QUEUE_FLG_DHC 0x0080 | ||
1264 | #define QUEUE_FLG_OOO 0x0100 | ||
1265 | |||
1266 | #define QUEUE_DROP_IP_CS_ERR TSTORM_ETH_CLIENT_CONFIG_DROP_IP_CS_ERR | ||
1267 | #define QUEUE_DROP_TCP_CS_ERR TSTORM_ETH_CLIENT_CONFIG_DROP_TCP_CS_ERR | ||
1268 | #define QUEUE_DROP_TTL0 TSTORM_ETH_CLIENT_CONFIG_DROP_TTL0 | ||
1269 | #define QUEUE_DROP_UDP_CS_ERR TSTORM_ETH_CLIENT_CONFIG_DROP_UDP_CS_ERR | ||
1270 | |||
1271 | |||
1272 | |||
1273 | /* rss capabilities */ | ||
1274 | #define RSS_IPV4_CAP 0x0001 | ||
1275 | #define RSS_IPV4_TCP_CAP 0x0002 | ||
1276 | #define RSS_IPV6_CAP 0x0004 | ||
1277 | #define RSS_IPV6_TCP_CAP 0x0008 | ||
1278 | |||
1279 | #define BNX2X_NUM_QUEUES(bp) (bp->num_queues) | 1419 | #define BNX2X_NUM_QUEUES(bp) (bp->num_queues) |
1280 | #define BNX2X_NUM_ETH_QUEUES(bp) (BNX2X_NUM_QUEUES(bp) - NONE_ETH_CONTEXT_USE) | 1420 | #define BNX2X_NUM_ETH_QUEUES(bp) (BNX2X_NUM_QUEUES(bp) - NONE_ETH_CONTEXT_USE) |
1281 | 1421 | ||
1282 | /* ethtool statistics are displayed for all regular ethernet queues and the | ||
1283 | * fcoe L2 queue if not disabled | ||
1284 | */ | ||
1285 | #define BNX2X_NUM_STAT_QUEUES(bp) (NO_FCOE(bp) ? BNX2X_NUM_ETH_QUEUES(bp) : \ | ||
1286 | (BNX2X_NUM_ETH_QUEUES(bp) + FCOE_CONTEXT_USE)) | ||
1287 | |||
1288 | #define is_multi(bp) (BNX2X_NUM_QUEUES(bp) > 1) | 1422 | #define is_multi(bp) (BNX2X_NUM_QUEUES(bp) > 1) |
1289 | 1423 | ||
1290 | #define BNX2X_MAX_QUEUES(bp) (bp->igu_sb_cnt - CNIC_CONTEXT_USE) | 1424 | #define BNX2X_MAX_QUEUES(bp) (bp->igu_sb_cnt - CNIC_CONTEXT_USE) |
@@ -1302,107 +1436,15 @@ struct bnx2x { | |||
1302 | TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_TCP_CAPABILITY | 1436 | TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_TCP_CAPABILITY |
1303 | 1437 | ||
1304 | /* func init flags */ | 1438 | /* func init flags */ |
1305 | #define FUNC_FLG_STATS 0x0001 | 1439 | #define FUNC_FLG_RSS 0x0001 |
1306 | #define FUNC_FLG_TPA 0x0002 | 1440 | #define FUNC_FLG_STATS 0x0002 |
1307 | #define FUNC_FLG_SPQ 0x0004 | 1441 | /* removed FUNC_FLG_UNMATCHED 0x0004 */ |
1308 | #define FUNC_FLG_LEADING 0x0008 /* PF only */ | 1442 | #define FUNC_FLG_TPA 0x0008 |
1309 | 1443 | #define FUNC_FLG_SPQ 0x0010 | |
1310 | struct rxq_pause_params { | 1444 | #define FUNC_FLG_LEADING 0x0020 /* PF only */ |
1311 | u16 bd_th_lo; | ||
1312 | u16 bd_th_hi; | ||
1313 | u16 rcq_th_lo; | ||
1314 | u16 rcq_th_hi; | ||
1315 | u16 sge_th_lo; /* valid iff QUEUE_FLG_TPA */ | ||
1316 | u16 sge_th_hi; /* valid iff QUEUE_FLG_TPA */ | ||
1317 | u16 pri_map; | ||
1318 | }; | ||
1319 | |||
1320 | struct bnx2x_rxq_init_params { | ||
1321 | /* cxt*/ | ||
1322 | struct eth_context *cxt; | ||
1323 | |||
1324 | /* dma */ | ||
1325 | dma_addr_t dscr_map; | ||
1326 | dma_addr_t sge_map; | ||
1327 | dma_addr_t rcq_map; | ||
1328 | dma_addr_t rcq_np_map; | ||
1329 | |||
1330 | u16 flags; | ||
1331 | u16 drop_flags; | ||
1332 | u16 mtu; | ||
1333 | u16 buf_sz; | ||
1334 | u16 fw_sb_id; | ||
1335 | u16 cl_id; | ||
1336 | u16 spcl_id; | ||
1337 | u16 cl_qzone_id; | ||
1338 | |||
1339 | /* valid iff QUEUE_FLG_STATS */ | ||
1340 | u16 stat_id; | ||
1341 | |||
1342 | /* valid iff QUEUE_FLG_TPA */ | ||
1343 | u16 tpa_agg_sz; | ||
1344 | u16 sge_buf_sz; | ||
1345 | u16 max_sges_pkt; | ||
1346 | |||
1347 | /* valid iff QUEUE_FLG_CACHE_ALIGN */ | ||
1348 | u8 cache_line_log; | ||
1349 | |||
1350 | u8 sb_cq_index; | ||
1351 | u32 cid; | ||
1352 | |||
1353 | /* desired interrupts per sec. valid iff QUEUE_FLG_HC */ | ||
1354 | u32 hc_rate; | ||
1355 | }; | ||
1356 | |||
1357 | struct bnx2x_txq_init_params { | ||
1358 | /* cxt*/ | ||
1359 | struct eth_context *cxt; | ||
1360 | 1445 | ||
1361 | /* dma */ | ||
1362 | dma_addr_t dscr_map; | ||
1363 | |||
1364 | u16 flags; | ||
1365 | u16 fw_sb_id; | ||
1366 | u8 sb_cq_index; | ||
1367 | u8 cos; /* valid iff QUEUE_FLG_COS */ | ||
1368 | u16 stat_id; /* valid iff QUEUE_FLG_STATS */ | ||
1369 | u16 traffic_type; | ||
1370 | u32 cid; | ||
1371 | u16 hc_rate; /* desired interrupts per sec.*/ | ||
1372 | /* valid iff QUEUE_FLG_HC */ | ||
1373 | |||
1374 | }; | ||
1375 | |||
1376 | struct bnx2x_client_ramrod_params { | ||
1377 | int *pstate; | ||
1378 | int state; | ||
1379 | u16 index; | ||
1380 | u16 cl_id; | ||
1381 | u32 cid; | ||
1382 | u8 poll; | ||
1383 | #define CLIENT_IS_FCOE 0x01 | ||
1384 | #define CLIENT_IS_LEADING_RSS 0x02 | ||
1385 | u8 flags; | ||
1386 | }; | ||
1387 | |||
1388 | struct bnx2x_client_init_params { | ||
1389 | struct rxq_pause_params pause; | ||
1390 | struct bnx2x_rxq_init_params rxq_params; | ||
1391 | struct bnx2x_txq_init_params txq_params; | ||
1392 | struct bnx2x_client_ramrod_params ramrod_params; | ||
1393 | }; | ||
1394 | |||
1395 | struct bnx2x_rss_params { | ||
1396 | int mode; | ||
1397 | u16 cap; | ||
1398 | u16 result_mask; | ||
1399 | }; | ||
1400 | 1446 | ||
1401 | struct bnx2x_func_init_params { | 1447 | struct bnx2x_func_init_params { |
1402 | |||
1403 | /* rss */ | ||
1404 | struct bnx2x_rss_params *rss; /* valid iff FUNC_FLG_RSS */ | ||
1405 | |||
1406 | /* dma */ | 1448 | /* dma */ |
1407 | dma_addr_t fw_stat_map; /* valid iff FUNC_FLG_STATS */ | 1449 | dma_addr_t fw_stat_map; /* valid iff FUNC_FLG_STATS */ |
1408 | dma_addr_t spq_map; /* valid iff FUNC_FLG_SPQ */ | 1450 | dma_addr_t spq_map; /* valid iff FUNC_FLG_SPQ */ |
@@ -1414,17 +1456,10 @@ struct bnx2x_func_init_params { | |||
1414 | }; | 1456 | }; |
1415 | 1457 | ||
1416 | #define for_each_eth_queue(bp, var) \ | 1458 | #define for_each_eth_queue(bp, var) \ |
1417 | for (var = 0; var < BNX2X_NUM_ETH_QUEUES(bp); var++) | 1459 | for (var = 0; var < BNX2X_NUM_ETH_QUEUES(bp); var++) |
1418 | 1460 | ||
1419 | #define for_each_nondefault_eth_queue(bp, var) \ | 1461 | #define for_each_nondefault_eth_queue(bp, var) \ |
1420 | for (var = 1; var < BNX2X_NUM_ETH_QUEUES(bp); var++) | 1462 | for (var = 1; var < BNX2X_NUM_ETH_QUEUES(bp); var++) |
1421 | |||
1422 | #define for_each_napi_queue(bp, var) \ | ||
1423 | for (var = 0; \ | ||
1424 | var < BNX2X_NUM_ETH_QUEUES(bp) + FCOE_CONTEXT_USE; var++) \ | ||
1425 | if (skip_queue(bp, var)) \ | ||
1426 | continue; \ | ||
1427 | else | ||
1428 | 1463 | ||
1429 | #define for_each_queue(bp, var) \ | 1464 | #define for_each_queue(bp, var) \ |
1430 | for (var = 0; var < BNX2X_NUM_QUEUES(bp); var++) \ | 1465 | for (var = 0; var < BNX2X_NUM_QUEUES(bp); var++) \ |
@@ -1462,11 +1497,66 @@ struct bnx2x_func_init_params { | |||
1462 | 1497 | ||
1463 | #define skip_queue(bp, idx) (NO_FCOE(bp) && IS_FCOE_IDX(idx)) | 1498 | #define skip_queue(bp, idx) (NO_FCOE(bp) && IS_FCOE_IDX(idx)) |
1464 | 1499 | ||
1465 | #define WAIT_RAMROD_POLL 0x01 | ||
1466 | #define WAIT_RAMROD_COMMON 0x02 | ||
1467 | 1500 | ||
1501 | |||
1502 | |||
1503 | /** | ||
1504 | * bnx2x_set_mac_one - configure a single MAC address | ||
1505 | * | ||
1506 | * @bp: driver handle | ||
1507 | * @mac: MAC to configure | ||
1508 | * @obj: MAC object handle | ||
1509 | * @set: if 'true' add a new MAC, otherwise - delete | ||
1510 | * @mac_type: the type of the MAC to configure (e.g. ETH, UC list) | ||
1511 | * @ramrod_flags: RAMROD_XXX flags (e.g. RAMROD_CONT, RAMROD_COMP_WAIT) | ||
1512 | * | ||
1513 | * Configures one MAC according to provided parameters or continues the | ||
1514 | * execution of previously scheduled commands if RAMROD_CONT is set in | ||
1515 | * ramrod_flags. | ||
1516 | * | ||
1517 | * Returns zero if operation has successfully completed, a positive value if the | ||
1518 | * operation has been successfully scheduled and a negative - if a requested | ||
1519 | * operations has failed. | ||
1520 | */ | ||
1521 | int bnx2x_set_mac_one(struct bnx2x *bp, u8 *mac, | ||
1522 | struct bnx2x_vlan_mac_obj *obj, bool set, | ||
1523 | int mac_type, unsigned long *ramrod_flags); | ||
1524 | /** | ||
1525 | * Deletes all MACs configured for the specific MAC object. | ||
1526 | * | ||
1527 | * @param bp Function driver instance | ||
1528 | * @param mac_obj MAC object to cleanup | ||
1529 | * | ||
1530 | * @return zero if all MACs were cleaned | ||
1531 | */ | ||
1532 | |||
1533 | /** | ||
1534 | * bnx2x_del_all_macs - delete all MACs configured for the specific MAC object | ||
1535 | * | ||
1536 | * @bp: driver handle | ||
1537 | * @mac_obj: MAC object handle | ||
1538 | * @mac_type: type of the MACs to clear (BNX2X_XXX_MAC) | ||
1539 | * @wait_for_comp: if 'true' block until completion | ||
1540 | * | ||
1541 | * Deletes all MACs of the specific type (e.g. ETH, UC list). | ||
1542 | * | ||
1543 | * Returns zero if operation has successfully completed, a positive value if the | ||
1544 | * operation has been successfully scheduled and a negative - if a requested | ||
1545 | * operations has failed. | ||
1546 | */ | ||
1547 | int bnx2x_del_all_macs(struct bnx2x *bp, | ||
1548 | struct bnx2x_vlan_mac_obj *mac_obj, | ||
1549 | int mac_type, bool wait_for_comp); | ||
1550 | |||
1551 | /* Init Function API */ | ||
1552 | void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p); | ||
1553 | int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port); | ||
1554 | int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port); | ||
1555 | int bnx2x_set_mult_gpio(struct bnx2x *bp, u8 pins, u32 mode); | ||
1556 | int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port); | ||
1468 | void bnx2x_read_mf_cfg(struct bnx2x *bp); | 1557 | void bnx2x_read_mf_cfg(struct bnx2x *bp); |
1469 | 1558 | ||
1559 | |||
1470 | /* dmae */ | 1560 | /* dmae */ |
1471 | void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32); | 1561 | void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32); |
1472 | void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr, | 1562 | void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr, |
@@ -1477,22 +1567,12 @@ u32 bnx2x_dmae_opcode_clr_src_reset(u32 opcode); | |||
1477 | u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type, | 1567 | u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type, |
1478 | bool with_comp, u8 comp_type); | 1568 | bool with_comp, u8 comp_type); |
1479 | 1569 | ||
1480 | int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port); | ||
1481 | int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port); | ||
1482 | int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port); | ||
1483 | u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param); | ||
1484 | 1570 | ||
1485 | void bnx2x_calc_fc_adv(struct bnx2x *bp); | 1571 | void bnx2x_calc_fc_adv(struct bnx2x *bp); |
1486 | int bnx2x_sp_post(struct bnx2x *bp, int command, int cid, | 1572 | int bnx2x_sp_post(struct bnx2x *bp, int command, int cid, |
1487 | u32 data_hi, u32 data_lo, int common); | 1573 | u32 data_hi, u32 data_lo, int cmd_type); |
1488 | |||
1489 | /* Clears multicast and unicast list configuration in the chip. */ | ||
1490 | void bnx2x_invalidate_e1_mc_list(struct bnx2x *bp); | ||
1491 | void bnx2x_invalidate_e1h_mc_list(struct bnx2x *bp); | ||
1492 | void bnx2x_invalidate_uc_list(struct bnx2x *bp); | ||
1493 | |||
1494 | void bnx2x_update_coalesce(struct bnx2x *bp); | 1574 | void bnx2x_update_coalesce(struct bnx2x *bp); |
1495 | int bnx2x_get_link_cfg_idx(struct bnx2x *bp); | 1575 | int bnx2x_get_cur_phy_idx(struct bnx2x *bp); |
1496 | 1576 | ||
1497 | static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms, | 1577 | static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms, |
1498 | int wait) | 1578 | int wait) |
@@ -1648,7 +1728,8 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms, | |||
1648 | 1728 | ||
1649 | /* must be used on a CID before placing it on a HW ring */ | 1729 | /* must be used on a CID before placing it on a HW ring */ |
1650 | #define HW_CID(bp, x) ((BP_PORT(bp) << 23) | \ | 1730 | #define HW_CID(bp, x) ((BP_PORT(bp) << 23) | \ |
1651 | (BP_E1HVN(bp) << 17) | (x)) | 1731 | (BP_E1HVN(bp) << BNX2X_SWCID_SHIFT) | \ |
1732 | (x)) | ||
1652 | 1733 | ||
1653 | #define SP_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct eth_spe)) | 1734 | #define SP_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct eth_spe)) |
1654 | #define MAX_SP_DESC_CNT (SP_DESC_CNT - 1) | 1735 | #define MAX_SP_DESC_CNT (SP_DESC_CNT - 1) |
@@ -1718,12 +1799,14 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms, | |||
1718 | (AEU_INPUTS_ATTN_BITS_TSDM_HW_INTERRUPT | \ | 1799 | (AEU_INPUTS_ATTN_BITS_TSDM_HW_INTERRUPT | \ |
1719 | AEU_INPUTS_ATTN_BITS_TCM_HW_INTERRUPT | \ | 1800 | AEU_INPUTS_ATTN_BITS_TCM_HW_INTERRUPT | \ |
1720 | AEU_INPUTS_ATTN_BITS_TSEMI_HW_INTERRUPT | \ | 1801 | AEU_INPUTS_ATTN_BITS_TSEMI_HW_INTERRUPT | \ |
1721 | AEU_INPUTS_ATTN_BITS_PBF_HW_INTERRUPT) | 1802 | AEU_INPUTS_ATTN_BITS_PBCLIENT_HW_INTERRUPT) |
1722 | #define HW_PRTY_ASSERT_SET_0 (AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR | \ | 1803 | #define HW_PRTY_ASSERT_SET_0 (AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR | \ |
1723 | AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR | \ | 1804 | AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR | \ |
1724 | AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR | \ | 1805 | AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR | \ |
1725 | AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR |\ | 1806 | AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR |\ |
1726 | AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR) | 1807 | AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR |\ |
1808 | AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR |\ | ||
1809 | AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR) | ||
1727 | #define HW_INTERRUT_ASSERT_SET_1 \ | 1810 | #define HW_INTERRUT_ASSERT_SET_1 \ |
1728 | (AEU_INPUTS_ATTN_BITS_QM_HW_INTERRUPT | \ | 1811 | (AEU_INPUTS_ATTN_BITS_QM_HW_INTERRUPT | \ |
1729 | AEU_INPUTS_ATTN_BITS_TIMERS_HW_INTERRUPT | \ | 1812 | AEU_INPUTS_ATTN_BITS_TIMERS_HW_INTERRUPT | \ |
@@ -1736,17 +1819,22 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms, | |||
1736 | AEU_INPUTS_ATTN_BITS_UPB_HW_INTERRUPT | \ | 1819 | AEU_INPUTS_ATTN_BITS_UPB_HW_INTERRUPT | \ |
1737 | AEU_INPUTS_ATTN_BITS_CSDM_HW_INTERRUPT | \ | 1820 | AEU_INPUTS_ATTN_BITS_CSDM_HW_INTERRUPT | \ |
1738 | AEU_INPUTS_ATTN_BITS_CCM_HW_INTERRUPT) | 1821 | AEU_INPUTS_ATTN_BITS_CCM_HW_INTERRUPT) |
1739 | #define HW_PRTY_ASSERT_SET_1 (AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR |\ | 1822 | #define HW_PRTY_ASSERT_SET_1 (AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR |\ |
1740 | AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR | \ | 1823 | AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR | \ |
1824 | AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR |\ | ||
1741 | AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR | \ | 1825 | AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR | \ |
1826 | AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR |\ | ||
1742 | AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR | \ | 1827 | AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR | \ |
1743 | AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR |\ | 1828 | AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR |\ |
1829 | AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR |\ | ||
1744 | AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR |\ | 1830 | AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR |\ |
1745 | AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR | \ | 1831 | AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR | \ |
1746 | AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR | \ | 1832 | AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR | \ |
1833 | AEU_INPUTS_ATTN_BITS_UCM_PARITY_ERROR |\ | ||
1747 | AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR | \ | 1834 | AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR | \ |
1748 | AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR | \ | 1835 | AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR | \ |
1749 | AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR) | 1836 | AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR |\ |
1837 | AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR) | ||
1750 | #define HW_INTERRUT_ASSERT_SET_2 \ | 1838 | #define HW_INTERRUT_ASSERT_SET_2 \ |
1751 | (AEU_INPUTS_ATTN_BITS_CSEMI_HW_INTERRUPT | \ | 1839 | (AEU_INPUTS_ATTN_BITS_CSEMI_HW_INTERRUPT | \ |
1752 | AEU_INPUTS_ATTN_BITS_CDU_HW_INTERRUPT | \ | 1840 | AEU_INPUTS_ATTN_BITS_CDU_HW_INTERRUPT | \ |
@@ -1758,6 +1846,7 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms, | |||
1758 | AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR |\ | 1846 | AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR |\ |
1759 | AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR | \ | 1847 | AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR | \ |
1760 | AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR | \ | 1848 | AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR | \ |
1849 | AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR |\ | ||
1761 | AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR | \ | 1850 | AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR | \ |
1762 | AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR) | 1851 | AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR) |
1763 | 1852 | ||
@@ -1775,6 +1864,30 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms, | |||
1775 | TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE_SHIFT)) | 1864 | TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE_SHIFT)) |
1776 | #define MULTI_MASK 0x7f | 1865 | #define MULTI_MASK 0x7f |
1777 | 1866 | ||
1867 | |||
1868 | #define DEF_USB_FUNC_OFF offsetof(struct cstorm_def_status_block_u, func) | ||
1869 | #define DEF_CSB_FUNC_OFF offsetof(struct cstorm_def_status_block_c, func) | ||
1870 | #define DEF_XSB_FUNC_OFF offsetof(struct xstorm_def_status_block, func) | ||
1871 | #define DEF_TSB_FUNC_OFF offsetof(struct tstorm_def_status_block, func) | ||
1872 | |||
1873 | #define DEF_USB_IGU_INDEX_OFF \ | ||
1874 | offsetof(struct cstorm_def_status_block_u, igu_index) | ||
1875 | #define DEF_CSB_IGU_INDEX_OFF \ | ||
1876 | offsetof(struct cstorm_def_status_block_c, igu_index) | ||
1877 | #define DEF_XSB_IGU_INDEX_OFF \ | ||
1878 | offsetof(struct xstorm_def_status_block, igu_index) | ||
1879 | #define DEF_TSB_IGU_INDEX_OFF \ | ||
1880 | offsetof(struct tstorm_def_status_block, igu_index) | ||
1881 | |||
1882 | #define DEF_USB_SEGMENT_OFF \ | ||
1883 | offsetof(struct cstorm_def_status_block_u, segment) | ||
1884 | #define DEF_CSB_SEGMENT_OFF \ | ||
1885 | offsetof(struct cstorm_def_status_block_c, segment) | ||
1886 | #define DEF_XSB_SEGMENT_OFF \ | ||
1887 | offsetof(struct xstorm_def_status_block, segment) | ||
1888 | #define DEF_TSB_SEGMENT_OFF \ | ||
1889 | offsetof(struct tstorm_def_status_block, segment) | ||
1890 | |||
1778 | #define BNX2X_SP_DSB_INDEX \ | 1891 | #define BNX2X_SP_DSB_INDEX \ |
1779 | (&bp->def_status_blk->sp_sb.\ | 1892 | (&bp->def_status_blk->sp_sb.\ |
1780 | index_values[HC_SP_INDEX_ETH_DEF_CONS]) | 1893 | index_values[HC_SP_INDEX_ETH_DEF_CONS]) |
@@ -1786,7 +1899,7 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms, | |||
1786 | } while (0) | 1899 | } while (0) |
1787 | 1900 | ||
1788 | #define GET_FLAG(value, mask) \ | 1901 | #define GET_FLAG(value, mask) \ |
1789 | (((value) &= (mask)) >> (mask##_SHIFT)) | 1902 | (((value) & (mask)) >> (mask##_SHIFT)) |
1790 | 1903 | ||
1791 | #define GET_FIELD(value, fname) \ | 1904 | #define GET_FIELD(value, fname) \ |
1792 | (((value) & (fname##_MASK)) >> (fname##_SHIFT)) | 1905 | (((value) & (fname##_MASK)) >> (fname##_SHIFT)) |
@@ -1821,15 +1934,13 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms, | |||
1821 | #define HC_SEG_ACCESS_ATTN 4 | 1934 | #define HC_SEG_ACCESS_ATTN 4 |
1822 | #define HC_SEG_ACCESS_NORM 0 /*Driver decision 0-1*/ | 1935 | #define HC_SEG_ACCESS_NORM 0 /*Driver decision 0-1*/ |
1823 | 1936 | ||
1824 | #ifdef BNX2X_MAIN | 1937 | static const u32 dmae_reg_go_c[] = { |
1825 | #define BNX2X_EXTERN | 1938 | DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3, |
1826 | #else | 1939 | DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7, |
1827 | #define BNX2X_EXTERN extern | 1940 | DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11, |
1828 | #endif | 1941 | DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15 |
1829 | 1942 | }; | |
1830 | BNX2X_EXTERN int load_count[2][3]; /* per path: 0-common, 1-port0, 2-port1 */ | ||
1831 | |||
1832 | extern void bnx2x_set_ethtool_ops(struct net_device *netdev); | ||
1833 | void bnx2x_push_indir_table(struct bnx2x *bp); | ||
1834 | 1943 | ||
1944 | void bnx2x_set_ethtool_ops(struct net_device *netdev); | ||
1945 | void bnx2x_notify_link_changed(struct bnx2x *bp); | ||
1835 | #endif /* bnx2x.h */ | 1946 | #endif /* bnx2x.h */ |
diff --git a/drivers/net/bnx2x/bnx2x_cmn.c b/drivers/net/bnx2x/bnx2x_cmn.c index 289044332ed8..bb7556016f41 100644 --- a/drivers/net/bnx2x/bnx2x_cmn.c +++ b/drivers/net/bnx2x/bnx2x_cmn.c | |||
@@ -17,16 +17,17 @@ | |||
17 | 17 | ||
18 | #include <linux/etherdevice.h> | 18 | #include <linux/etherdevice.h> |
19 | #include <linux/if_vlan.h> | 19 | #include <linux/if_vlan.h> |
20 | #include <linux/interrupt.h> | ||
20 | #include <linux/ip.h> | 21 | #include <linux/ip.h> |
21 | #include <net/ipv6.h> | 22 | #include <net/ipv6.h> |
22 | #include <net/ip6_checksum.h> | 23 | #include <net/ip6_checksum.h> |
23 | #include <linux/firmware.h> | 24 | #include <linux/firmware.h> |
24 | #include <linux/prefetch.h> | 25 | #include <linux/prefetch.h> |
25 | #include "bnx2x_cmn.h" | 26 | #include "bnx2x_cmn.h" |
26 | |||
27 | #include "bnx2x_init.h" | 27 | #include "bnx2x_init.h" |
28 | #include "bnx2x_sp.h" | ||
29 | |||
28 | 30 | ||
29 | static int bnx2x_setup_irqs(struct bnx2x *bp); | ||
30 | 31 | ||
31 | /** | 32 | /** |
32 | * bnx2x_bz_fp - zero content of the fastpath structure. | 33 | * bnx2x_bz_fp - zero content of the fastpath structure. |
@@ -71,6 +72,8 @@ static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to) | |||
71 | to_fp->napi = orig_napi; | 72 | to_fp->napi = orig_napi; |
72 | } | 73 | } |
73 | 74 | ||
75 | int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */ | ||
76 | |||
74 | /* free skb in the packet ring at pos idx | 77 | /* free skb in the packet ring at pos idx |
75 | * return idx of last bd freed | 78 | * return idx of last bd freed |
76 | */ | 79 | */ |
@@ -87,8 +90,8 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp, | |||
87 | /* prefetch skb end pointer to speedup dev_kfree_skb() */ | 90 | /* prefetch skb end pointer to speedup dev_kfree_skb() */ |
88 | prefetch(&skb->end); | 91 | prefetch(&skb->end); |
89 | 92 | ||
90 | DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n", | 93 | DP(BNX2X_MSG_FP, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n", |
91 | idx, tx_buf, skb); | 94 | fp->index, idx, tx_buf, skb); |
92 | 95 | ||
93 | /* unmap first bd */ | 96 | /* unmap first bd */ |
94 | DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx); | 97 | DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx); |
@@ -96,6 +99,7 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp, | |||
96 | dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd), | 99 | dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd), |
97 | BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE); | 100 | BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE); |
98 | 101 | ||
102 | |||
99 | nbd = le16_to_cpu(tx_start_bd->nbd) - 1; | 103 | nbd = le16_to_cpu(tx_start_bd->nbd) - 1; |
100 | #ifdef BNX2X_STOP_ON_ERROR | 104 | #ifdef BNX2X_STOP_ON_ERROR |
101 | if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) { | 105 | if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) { |
@@ -174,6 +178,9 @@ int bnx2x_tx_int(struct bnx2x_fastpath *fp) | |||
174 | * memory barrier, there is a small possibility that | 178 | * memory barrier, there is a small possibility that |
175 | * start_xmit() will miss it and cause the queue to be stopped | 179 | * start_xmit() will miss it and cause the queue to be stopped |
176 | * forever. | 180 | * forever. |
181 | * On the other hand we need an rmb() here to ensure the proper | ||
182 | * ordering of bit testing in the following | ||
183 | * netif_tx_queue_stopped(txq) call. | ||
177 | */ | 184 | */ |
178 | smp_mb(); | 185 | smp_mb(); |
179 | 186 | ||
@@ -225,7 +232,7 @@ static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp, | |||
225 | 232 | ||
226 | /* First mark all used pages */ | 233 | /* First mark all used pages */ |
227 | for (i = 0; i < sge_len; i++) | 234 | for (i = 0; i < sge_len; i++) |
228 | SGE_MASK_CLEAR_BIT(fp, | 235 | BIT_VEC64_CLEAR_BIT(fp->sge_mask, |
229 | RX_SGE(le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[i]))); | 236 | RX_SGE(le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[i]))); |
230 | 237 | ||
231 | DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n", | 238 | DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n", |
@@ -237,8 +244,8 @@ static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp, | |||
237 | le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[sge_len - 1])); | 244 | le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[sge_len - 1])); |
238 | 245 | ||
239 | last_max = RX_SGE(fp->last_max_sge); | 246 | last_max = RX_SGE(fp->last_max_sge); |
240 | last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT; | 247 | last_elem = last_max >> BIT_VEC64_ELEM_SHIFT; |
241 | first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT; | 248 | first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT; |
242 | 249 | ||
243 | /* If ring is not full */ | 250 | /* If ring is not full */ |
244 | if (last_elem + 1 != first_elem) | 251 | if (last_elem + 1 != first_elem) |
@@ -249,8 +256,8 @@ static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp, | |||
249 | if (likely(fp->sge_mask[i])) | 256 | if (likely(fp->sge_mask[i])) |
250 | break; | 257 | break; |
251 | 258 | ||
252 | fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK; | 259 | fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK; |
253 | delta += RX_SGE_MASK_ELEM_SZ; | 260 | delta += BIT_VEC64_ELEM_SZ; |
254 | } | 261 | } |
255 | 262 | ||
256 | if (delta > 0) { | 263 | if (delta > 0) { |
@@ -265,33 +272,56 @@ static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp, | |||
265 | } | 272 | } |
266 | 273 | ||
267 | static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue, | 274 | static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue, |
268 | struct sk_buff *skb, u16 cons, u16 prod) | 275 | struct sk_buff *skb, u16 cons, u16 prod, |
276 | struct eth_fast_path_rx_cqe *cqe) | ||
269 | { | 277 | { |
270 | struct bnx2x *bp = fp->bp; | 278 | struct bnx2x *bp = fp->bp; |
271 | struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons]; | 279 | struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons]; |
272 | struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod]; | 280 | struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod]; |
273 | struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod]; | 281 | struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod]; |
274 | dma_addr_t mapping; | 282 | dma_addr_t mapping; |
283 | struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue]; | ||
284 | struct sw_rx_bd *first_buf = &tpa_info->first_buf; | ||
275 | 285 | ||
276 | /* move empty skb from pool to prod and map it */ | 286 | /* print error if current state != stop */ |
277 | prod_rx_buf->skb = fp->tpa_pool[queue].skb; | 287 | if (tpa_info->tpa_state != BNX2X_TPA_STOP) |
278 | mapping = dma_map_single(&bp->pdev->dev, fp->tpa_pool[queue].skb->data, | ||
279 | fp->rx_buf_size, DMA_FROM_DEVICE); | ||
280 | dma_unmap_addr_set(prod_rx_buf, mapping, mapping); | ||
281 | |||
282 | /* move partial skb from cons to pool (don't unmap yet) */ | ||
283 | fp->tpa_pool[queue] = *cons_rx_buf; | ||
284 | |||
285 | /* mark bin state as start - print error if current state != stop */ | ||
286 | if (fp->tpa_state[queue] != BNX2X_TPA_STOP) | ||
287 | BNX2X_ERR("start of bin not in stop [%d]\n", queue); | 288 | BNX2X_ERR("start of bin not in stop [%d]\n", queue); |
288 | 289 | ||
289 | fp->tpa_state[queue] = BNX2X_TPA_START; | 290 | /* Try to map an empty skb from the aggregation info */ |
291 | mapping = dma_map_single(&bp->pdev->dev, | ||
292 | first_buf->skb->data, | ||
293 | fp->rx_buf_size, DMA_FROM_DEVICE); | ||
294 | /* | ||
295 | * ...if it fails - move the skb from the consumer to the producer | ||
296 | * and set the current aggregation state as ERROR to drop it | ||
297 | * when TPA_STOP arrives. | ||
298 | */ | ||
299 | |||
300 | if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { | ||
301 | /* Move the BD from the consumer to the producer */ | ||
302 | bnx2x_reuse_rx_skb(fp, cons, prod); | ||
303 | tpa_info->tpa_state = BNX2X_TPA_ERROR; | ||
304 | return; | ||
305 | } | ||
290 | 306 | ||
307 | /* move empty skb from pool to prod */ | ||
308 | prod_rx_buf->skb = first_buf->skb; | ||
309 | dma_unmap_addr_set(prod_rx_buf, mapping, mapping); | ||
291 | /* point prod_bd to new skb */ | 310 | /* point prod_bd to new skb */ |
292 | prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); | 311 | prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); |
293 | prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); | 312 | prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); |
294 | 313 | ||
314 | /* move partial skb from cons to pool (don't unmap yet) */ | ||
315 | *first_buf = *cons_rx_buf; | ||
316 | |||
317 | /* mark bin state as START */ | ||
318 | tpa_info->parsing_flags = | ||
319 | le16_to_cpu(cqe->pars_flags.flags); | ||
320 | tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag); | ||
321 | tpa_info->tpa_state = BNX2X_TPA_START; | ||
322 | tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd); | ||
323 | tpa_info->placement_offset = cqe->placement_offset; | ||
324 | |||
295 | #ifdef BNX2X_STOP_ON_ERROR | 325 | #ifdef BNX2X_STOP_ON_ERROR |
296 | fp->tpa_queue_used |= (1 << queue); | 326 | fp->tpa_queue_used |= (1 << queue); |
297 | #ifdef _ASM_GENERIC_INT_L64_H | 327 | #ifdef _ASM_GENERIC_INT_L64_H |
@@ -322,10 +352,17 @@ static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue, | |||
322 | static inline u16 bnx2x_set_lro_mss(struct bnx2x *bp, u16 parsing_flags, | 352 | static inline u16 bnx2x_set_lro_mss(struct bnx2x *bp, u16 parsing_flags, |
323 | u16 len_on_bd) | 353 | u16 len_on_bd) |
324 | { | 354 | { |
325 | /* TPA arrgregation won't have an IP options and TCP options | 355 | /* |
326 | * other than timestamp. | 356 | * TPA arrgregation won't have either IP options or TCP options |
357 | * other than timestamp or IPv6 extension headers. | ||
327 | */ | 358 | */ |
328 | u16 hdrs_len = ETH_HLEN + sizeof(struct iphdr) + sizeof(struct tcphdr); | 359 | u16 hdrs_len = ETH_HLEN + sizeof(struct tcphdr); |
360 | |||
361 | if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) == | ||
362 | PRS_FLAG_OVERETH_IPV6) | ||
363 | hdrs_len += sizeof(struct ipv6hdr); | ||
364 | else /* IPv4 */ | ||
365 | hdrs_len += sizeof(struct iphdr); | ||
329 | 366 | ||
330 | 367 | ||
331 | /* Check if there was a TCP timestamp, if there is it's will | 368 | /* Check if there was a TCP timestamp, if there is it's will |
@@ -340,30 +377,30 @@ static inline u16 bnx2x_set_lro_mss(struct bnx2x *bp, u16 parsing_flags, | |||
340 | } | 377 | } |
341 | 378 | ||
342 | static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp, | 379 | static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp, |
343 | struct sk_buff *skb, | 380 | u16 queue, struct sk_buff *skb, |
344 | struct eth_fast_path_rx_cqe *fp_cqe, | 381 | struct eth_end_agg_rx_cqe *cqe, |
345 | u16 cqe_idx, u16 parsing_flags) | 382 | u16 cqe_idx) |
346 | { | 383 | { |
347 | struct sw_rx_page *rx_pg, old_rx_pg; | 384 | struct sw_rx_page *rx_pg, old_rx_pg; |
348 | u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd); | ||
349 | u32 i, frag_len, frag_size, pages; | 385 | u32 i, frag_len, frag_size, pages; |
350 | int err; | 386 | int err; |
351 | int j; | 387 | int j; |
388 | struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue]; | ||
389 | u16 len_on_bd = tpa_info->len_on_bd; | ||
352 | 390 | ||
353 | frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd; | 391 | frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd; |
354 | pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT; | 392 | pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT; |
355 | 393 | ||
356 | /* This is needed in order to enable forwarding support */ | 394 | /* This is needed in order to enable forwarding support */ |
357 | if (frag_size) | 395 | if (frag_size) |
358 | skb_shinfo(skb)->gso_size = bnx2x_set_lro_mss(bp, parsing_flags, | 396 | skb_shinfo(skb)->gso_size = bnx2x_set_lro_mss(bp, |
359 | len_on_bd); | 397 | tpa_info->parsing_flags, len_on_bd); |
360 | 398 | ||
361 | #ifdef BNX2X_STOP_ON_ERROR | 399 | #ifdef BNX2X_STOP_ON_ERROR |
362 | if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) { | 400 | if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) { |
363 | BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n", | 401 | BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n", |
364 | pages, cqe_idx); | 402 | pages, cqe_idx); |
365 | BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n", | 403 | BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len); |
366 | fp_cqe->pkt_len, len_on_bd); | ||
367 | bnx2x_panic(); | 404 | bnx2x_panic(); |
368 | return -EINVAL; | 405 | return -EINVAL; |
369 | } | 406 | } |
@@ -371,8 +408,7 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp, | |||
371 | 408 | ||
372 | /* Run through the SGL and compose the fragmented skb */ | 409 | /* Run through the SGL and compose the fragmented skb */ |
373 | for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) { | 410 | for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) { |
374 | u16 sge_idx = | 411 | u16 sge_idx = RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[j])); |
375 | RX_SGE(le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[j])); | ||
376 | 412 | ||
377 | /* FW gives the indices of the SGE as if the ring is an array | 413 | /* FW gives the indices of the SGE as if the ring is an array |
378 | (meaning that "next" element will consume 2 indices) */ | 414 | (meaning that "next" element will consume 2 indices) */ |
@@ -407,13 +443,28 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp, | |||
407 | } | 443 | } |
408 | 444 | ||
409 | static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp, | 445 | static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp, |
410 | u16 queue, int pad, int len, union eth_rx_cqe *cqe, | 446 | u16 queue, struct eth_end_agg_rx_cqe *cqe, |
411 | u16 cqe_idx) | 447 | u16 cqe_idx) |
412 | { | 448 | { |
413 | struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue]; | 449 | struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue]; |
450 | struct sw_rx_bd *rx_buf = &tpa_info->first_buf; | ||
451 | u8 pad = tpa_info->placement_offset; | ||
452 | u16 len = tpa_info->len_on_bd; | ||
414 | struct sk_buff *skb = rx_buf->skb; | 453 | struct sk_buff *skb = rx_buf->skb; |
415 | /* alloc new skb */ | 454 | /* alloc new skb */ |
416 | struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, fp->rx_buf_size); | 455 | struct sk_buff *new_skb; |
456 | u8 old_tpa_state = tpa_info->tpa_state; | ||
457 | |||
458 | tpa_info->tpa_state = BNX2X_TPA_STOP; | ||
459 | |||
460 | /* If we there was an error during the handling of the TPA_START - | ||
461 | * drop this aggregation. | ||
462 | */ | ||
463 | if (old_tpa_state == BNX2X_TPA_ERROR) | ||
464 | goto drop; | ||
465 | |||
466 | /* Try to allocate the new skb */ | ||
467 | new_skb = netdev_alloc_skb(bp->dev, fp->rx_buf_size); | ||
417 | 468 | ||
418 | /* Unmap skb in the pool anyway, as we are going to change | 469 | /* Unmap skb in the pool anyway, as we are going to change |
419 | pool entry status to BNX2X_TPA_STOP even if new skb allocation | 470 | pool entry status to BNX2X_TPA_STOP even if new skb allocation |
@@ -422,11 +473,6 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp, | |||
422 | fp->rx_buf_size, DMA_FROM_DEVICE); | 473 | fp->rx_buf_size, DMA_FROM_DEVICE); |
423 | 474 | ||
424 | if (likely(new_skb)) { | 475 | if (likely(new_skb)) { |
425 | /* fix ip xsum and give it to the stack */ | ||
426 | /* (no need to map the new skb) */ | ||
427 | u16 parsing_flags = | ||
428 | le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags); | ||
429 | |||
430 | prefetch(skb); | 476 | prefetch(skb); |
431 | prefetch(((char *)(skb)) + L1_CACHE_BYTES); | 477 | prefetch(((char *)(skb)) + L1_CACHE_BYTES); |
432 | 478 | ||
@@ -446,21 +492,9 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp, | |||
446 | skb->protocol = eth_type_trans(skb, bp->dev); | 492 | skb->protocol = eth_type_trans(skb, bp->dev); |
447 | skb->ip_summed = CHECKSUM_UNNECESSARY; | 493 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
448 | 494 | ||
449 | { | 495 | if (!bnx2x_fill_frag_skb(bp, fp, queue, skb, cqe, cqe_idx)) { |
450 | struct iphdr *iph; | 496 | if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN) |
451 | 497 | __vlan_hwaccel_put_tag(skb, tpa_info->vlan_tag); | |
452 | iph = (struct iphdr *)skb->data; | ||
453 | iph->check = 0; | ||
454 | iph->check = ip_fast_csum((u8 *)iph, iph->ihl); | ||
455 | } | ||
456 | |||
457 | if (!bnx2x_fill_frag_skb(bp, fp, skb, | ||
458 | &cqe->fast_path_cqe, cqe_idx, | ||
459 | parsing_flags)) { | ||
460 | if (parsing_flags & PARSING_FLAGS_VLAN) | ||
461 | __vlan_hwaccel_put_tag(skb, | ||
462 | le16_to_cpu(cqe->fast_path_cqe. | ||
463 | vlan_tag)); | ||
464 | napi_gro_receive(&fp->napi, skb); | 498 | napi_gro_receive(&fp->napi, skb); |
465 | } else { | 499 | } else { |
466 | DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages" | 500 | DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages" |
@@ -470,16 +504,16 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp, | |||
470 | 504 | ||
471 | 505 | ||
472 | /* put new skb in bin */ | 506 | /* put new skb in bin */ |
473 | fp->tpa_pool[queue].skb = new_skb; | 507 | rx_buf->skb = new_skb; |
474 | 508 | ||
475 | } else { | 509 | return; |
476 | /* else drop the packet and keep the buffer in the bin */ | ||
477 | DP(NETIF_MSG_RX_STATUS, | ||
478 | "Failed to allocate new skb - dropping packet!\n"); | ||
479 | fp->eth_q_stats.rx_skb_alloc_failed++; | ||
480 | } | 510 | } |
481 | 511 | ||
482 | fp->tpa_state[queue] = BNX2X_TPA_STOP; | 512 | drop: |
513 | /* drop the packet and keep the buffer in the bin */ | ||
514 | DP(NETIF_MSG_RX_STATUS, | ||
515 | "Failed to allocate or map a new skb - dropping packet!\n"); | ||
516 | fp->eth_q_stats.rx_skb_alloc_failed++; | ||
483 | } | 517 | } |
484 | 518 | ||
485 | /* Set Toeplitz hash value in the skb using the value from the | 519 | /* Set Toeplitz hash value in the skb using the value from the |
@@ -533,9 +567,16 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget) | |||
533 | struct sw_rx_bd *rx_buf = NULL; | 567 | struct sw_rx_bd *rx_buf = NULL; |
534 | struct sk_buff *skb; | 568 | struct sk_buff *skb; |
535 | union eth_rx_cqe *cqe; | 569 | union eth_rx_cqe *cqe; |
570 | struct eth_fast_path_rx_cqe *cqe_fp; | ||
536 | u8 cqe_fp_flags; | 571 | u8 cqe_fp_flags; |
572 | enum eth_rx_cqe_type cqe_fp_type; | ||
537 | u16 len, pad; | 573 | u16 len, pad; |
538 | 574 | ||
575 | #ifdef BNX2X_STOP_ON_ERROR | ||
576 | if (unlikely(bp->panic)) | ||
577 | return 0; | ||
578 | #endif | ||
579 | |||
539 | comp_ring_cons = RCQ_BD(sw_comp_cons); | 580 | comp_ring_cons = RCQ_BD(sw_comp_cons); |
540 | bd_prod = RX_BD(bd_prod); | 581 | bd_prod = RX_BD(bd_prod); |
541 | bd_cons = RX_BD(bd_cons); | 582 | bd_cons = RX_BD(bd_cons); |
@@ -548,17 +589,18 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget) | |||
548 | PAGE_SIZE + 1)); | 589 | PAGE_SIZE + 1)); |
549 | 590 | ||
550 | cqe = &fp->rx_comp_ring[comp_ring_cons]; | 591 | cqe = &fp->rx_comp_ring[comp_ring_cons]; |
551 | cqe_fp_flags = cqe->fast_path_cqe.type_error_flags; | 592 | cqe_fp = &cqe->fast_path_cqe; |
593 | cqe_fp_flags = cqe_fp->type_error_flags; | ||
594 | cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE; | ||
552 | 595 | ||
553 | DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x" | 596 | DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x" |
554 | " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags), | 597 | " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags), |
555 | cqe_fp_flags, cqe->fast_path_cqe.status_flags, | 598 | cqe_fp_flags, cqe_fp->status_flags, |
556 | le32_to_cpu(cqe->fast_path_cqe.rss_hash_result), | 599 | le32_to_cpu(cqe_fp->rss_hash_result), |
557 | le16_to_cpu(cqe->fast_path_cqe.vlan_tag), | 600 | le16_to_cpu(cqe_fp->vlan_tag), le16_to_cpu(cqe_fp->pkt_len)); |
558 | le16_to_cpu(cqe->fast_path_cqe.pkt_len)); | ||
559 | 601 | ||
560 | /* is this a slowpath msg? */ | 602 | /* is this a slowpath msg? */ |
561 | if (unlikely(CQE_TYPE(cqe_fp_flags))) { | 603 | if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) { |
562 | bnx2x_sp_event(fp, cqe); | 604 | bnx2x_sp_event(fp, cqe); |
563 | goto next_cqe; | 605 | goto next_cqe; |
564 | 606 | ||
@@ -567,61 +609,59 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget) | |||
567 | rx_buf = &fp->rx_buf_ring[bd_cons]; | 609 | rx_buf = &fp->rx_buf_ring[bd_cons]; |
568 | skb = rx_buf->skb; | 610 | skb = rx_buf->skb; |
569 | prefetch(skb); | 611 | prefetch(skb); |
570 | len = le16_to_cpu(cqe->fast_path_cqe.pkt_len); | ||
571 | pad = cqe->fast_path_cqe.placement_offset; | ||
572 | 612 | ||
573 | /* - If CQE is marked both TPA_START and TPA_END it is | 613 | if (!CQE_TYPE_FAST(cqe_fp_type)) { |
574 | * a non-TPA CQE. | 614 | #ifdef BNX2X_STOP_ON_ERROR |
575 | * - FP CQE will always have either TPA_START or/and | 615 | /* sanity check */ |
576 | * TPA_STOP flags set. | 616 | if (fp->disable_tpa && |
577 | */ | 617 | (CQE_TYPE_START(cqe_fp_type) || |
578 | if ((!fp->disable_tpa) && | 618 | CQE_TYPE_STOP(cqe_fp_type))) |
579 | (TPA_TYPE(cqe_fp_flags) != | 619 | BNX2X_ERR("START/STOP packet while " |
580 | (TPA_TYPE_START | TPA_TYPE_END))) { | 620 | "disable_tpa type %x\n", |
581 | u16 queue = cqe->fast_path_cqe.queue_index; | 621 | CQE_TYPE(cqe_fp_type)); |
622 | #endif | ||
582 | 623 | ||
583 | if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) { | 624 | if (CQE_TYPE_START(cqe_fp_type)) { |
625 | u16 queue = cqe_fp->queue_index; | ||
584 | DP(NETIF_MSG_RX_STATUS, | 626 | DP(NETIF_MSG_RX_STATUS, |
585 | "calling tpa_start on queue %d\n", | 627 | "calling tpa_start on queue %d\n", |
586 | queue); | 628 | queue); |
587 | 629 | ||
588 | bnx2x_tpa_start(fp, queue, skb, | 630 | bnx2x_tpa_start(fp, queue, skb, |
589 | bd_cons, bd_prod); | 631 | bd_cons, bd_prod, |
632 | cqe_fp); | ||
590 | 633 | ||
591 | /* Set Toeplitz hash for an LRO skb */ | 634 | /* Set Toeplitz hash for LRO skb */ |
592 | bnx2x_set_skb_rxhash(bp, cqe, skb); | 635 | bnx2x_set_skb_rxhash(bp, cqe, skb); |
593 | 636 | ||
594 | goto next_rx; | 637 | goto next_rx; |
595 | } else { /* TPA_STOP */ | 638 | |
639 | } else { | ||
640 | u16 queue = | ||
641 | cqe->end_agg_cqe.queue_index; | ||
596 | DP(NETIF_MSG_RX_STATUS, | 642 | DP(NETIF_MSG_RX_STATUS, |
597 | "calling tpa_stop on queue %d\n", | 643 | "calling tpa_stop on queue %d\n", |
598 | queue); | 644 | queue); |
599 | 645 | ||
600 | if (!BNX2X_RX_SUM_FIX(cqe)) | 646 | bnx2x_tpa_stop(bp, fp, queue, |
601 | BNX2X_ERR("STOP on none TCP " | 647 | &cqe->end_agg_cqe, |
602 | "data\n"); | 648 | comp_ring_cons); |
603 | |||
604 | /* This is a size of the linear data | ||
605 | on this skb */ | ||
606 | len = le16_to_cpu(cqe->fast_path_cqe. | ||
607 | len_on_bd); | ||
608 | bnx2x_tpa_stop(bp, fp, queue, pad, | ||
609 | len, cqe, comp_ring_cons); | ||
610 | #ifdef BNX2X_STOP_ON_ERROR | 649 | #ifdef BNX2X_STOP_ON_ERROR |
611 | if (bp->panic) | 650 | if (bp->panic) |
612 | return 0; | 651 | return 0; |
613 | #endif | 652 | #endif |
614 | 653 | ||
615 | bnx2x_update_sge_prod(fp, | 654 | bnx2x_update_sge_prod(fp, cqe_fp); |
616 | &cqe->fast_path_cqe); | ||
617 | goto next_cqe; | 655 | goto next_cqe; |
618 | } | 656 | } |
619 | } | 657 | } |
620 | 658 | /* non TPA */ | |
659 | len = le16_to_cpu(cqe_fp->pkt_len); | ||
660 | pad = cqe_fp->placement_offset; | ||
621 | dma_sync_single_for_device(&bp->pdev->dev, | 661 | dma_sync_single_for_device(&bp->pdev->dev, |
622 | dma_unmap_addr(rx_buf, mapping), | 662 | dma_unmap_addr(rx_buf, mapping), |
623 | pad + RX_COPY_THRESH, | 663 | pad + RX_COPY_THRESH, |
624 | DMA_FROM_DEVICE); | 664 | DMA_FROM_DEVICE); |
625 | prefetch(((char *)(skb)) + L1_CACHE_BYTES); | 665 | prefetch(((char *)(skb)) + L1_CACHE_BYTES); |
626 | 666 | ||
627 | /* is this an error packet? */ | 667 | /* is this an error packet? */ |
@@ -640,8 +680,7 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget) | |||
640 | (len <= RX_COPY_THRESH)) { | 680 | (len <= RX_COPY_THRESH)) { |
641 | struct sk_buff *new_skb; | 681 | struct sk_buff *new_skb; |
642 | 682 | ||
643 | new_skb = netdev_alloc_skb(bp->dev, | 683 | new_skb = netdev_alloc_skb(bp->dev, len + pad); |
644 | len + pad); | ||
645 | if (new_skb == NULL) { | 684 | if (new_skb == NULL) { |
646 | DP(NETIF_MSG_RX_ERR, | 685 | DP(NETIF_MSG_RX_ERR, |
647 | "ERROR packet dropped " | 686 | "ERROR packet dropped " |
@@ -687,6 +726,7 @@ reuse_rx: | |||
687 | skb_checksum_none_assert(skb); | 726 | skb_checksum_none_assert(skb); |
688 | 727 | ||
689 | if (bp->dev->features & NETIF_F_RXCSUM) { | 728 | if (bp->dev->features & NETIF_F_RXCSUM) { |
729 | |||
690 | if (likely(BNX2X_RX_CSUM_OK(cqe))) | 730 | if (likely(BNX2X_RX_CSUM_OK(cqe))) |
691 | skb->ip_summed = CHECKSUM_UNNECESSARY; | 731 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
692 | else | 732 | else |
@@ -696,10 +736,10 @@ reuse_rx: | |||
696 | 736 | ||
697 | skb_record_rx_queue(skb, fp->index); | 737 | skb_record_rx_queue(skb, fp->index); |
698 | 738 | ||
699 | if (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) & | 739 | if (le16_to_cpu(cqe_fp->pars_flags.flags) & |
700 | PARSING_FLAGS_VLAN) | 740 | PARSING_FLAGS_VLAN) |
701 | __vlan_hwaccel_put_tag(skb, | 741 | __vlan_hwaccel_put_tag(skb, |
702 | le16_to_cpu(cqe->fast_path_cqe.vlan_tag)); | 742 | le16_to_cpu(cqe_fp->vlan_tag)); |
703 | napi_gro_receive(&fp->napi, skb); | 743 | napi_gro_receive(&fp->napi, skb); |
704 | 744 | ||
705 | 745 | ||
@@ -738,12 +778,6 @@ static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie) | |||
738 | struct bnx2x_fastpath *fp = fp_cookie; | 778 | struct bnx2x_fastpath *fp = fp_cookie; |
739 | struct bnx2x *bp = fp->bp; | 779 | struct bnx2x *bp = fp->bp; |
740 | 780 | ||
741 | /* Return here if interrupt is disabled */ | ||
742 | if (unlikely(atomic_read(&bp->intr_sem) != 0)) { | ||
743 | DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n"); | ||
744 | return IRQ_HANDLED; | ||
745 | } | ||
746 | |||
747 | DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB " | 781 | DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB " |
748 | "[fp %d fw_sd %d igusb %d]\n", | 782 | "[fp %d fw_sd %d igusb %d]\n", |
749 | fp->index, fp->fw_sb_id, fp->igu_sb_id); | 783 | fp->index, fp->fw_sb_id, fp->igu_sb_id); |
@@ -931,7 +965,7 @@ void bnx2x_init_rx_rings(struct bnx2x *bp) | |||
931 | { | 965 | { |
932 | int func = BP_FUNC(bp); | 966 | int func = BP_FUNC(bp); |
933 | int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 : | 967 | int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 : |
934 | ETH_MAX_AGGREGATION_QUEUES_E1H; | 968 | ETH_MAX_AGGREGATION_QUEUES_E1H_E2; |
935 | u16 ring_prod; | 969 | u16 ring_prod; |
936 | int i, j; | 970 | int i, j; |
937 | 971 | ||
@@ -943,11 +977,16 @@ void bnx2x_init_rx_rings(struct bnx2x *bp) | |||
943 | "mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size); | 977 | "mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size); |
944 | 978 | ||
945 | if (!fp->disable_tpa) { | 979 | if (!fp->disable_tpa) { |
946 | /* Fill the per-aggregation pool */ | 980 | /* Fill the per-aggregtion pool */ |
947 | for (i = 0; i < max_agg_queues; i++) { | 981 | for (i = 0; i < max_agg_queues; i++) { |
948 | fp->tpa_pool[i].skb = | 982 | struct bnx2x_agg_info *tpa_info = |
949 | netdev_alloc_skb(bp->dev, fp->rx_buf_size); | 983 | &fp->tpa_info[i]; |
950 | if (!fp->tpa_pool[i].skb) { | 984 | struct sw_rx_bd *first_buf = |
985 | &tpa_info->first_buf; | ||
986 | |||
987 | first_buf->skb = netdev_alloc_skb(bp->dev, | ||
988 | fp->rx_buf_size); | ||
989 | if (!first_buf->skb) { | ||
951 | BNX2X_ERR("Failed to allocate TPA " | 990 | BNX2X_ERR("Failed to allocate TPA " |
952 | "skb pool for queue[%d] - " | 991 | "skb pool for queue[%d] - " |
953 | "disabling TPA on this " | 992 | "disabling TPA on this " |
@@ -956,10 +995,8 @@ void bnx2x_init_rx_rings(struct bnx2x *bp) | |||
956 | fp->disable_tpa = 1; | 995 | fp->disable_tpa = 1; |
957 | break; | 996 | break; |
958 | } | 997 | } |
959 | dma_unmap_addr_set((struct sw_rx_bd *) | 998 | dma_unmap_addr_set(first_buf, mapping, 0); |
960 | &bp->fp->tpa_pool[i], | 999 | tpa_info->tpa_state = BNX2X_TPA_STOP; |
961 | mapping, 0); | ||
962 | fp->tpa_state[i] = BNX2X_TPA_STOP; | ||
963 | } | 1000 | } |
964 | 1001 | ||
965 | /* "next page" elements initialization */ | 1002 | /* "next page" elements initialization */ |
@@ -975,13 +1012,13 @@ void bnx2x_init_rx_rings(struct bnx2x *bp) | |||
975 | if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) { | 1012 | if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) { |
976 | BNX2X_ERR("was only able to allocate " | 1013 | BNX2X_ERR("was only able to allocate " |
977 | "%d rx sges\n", i); | 1014 | "%d rx sges\n", i); |
978 | BNX2X_ERR("disabling TPA for" | 1015 | BNX2X_ERR("disabling TPA for " |
979 | " queue[%d]\n", j); | 1016 | "queue[%d]\n", j); |
980 | /* Cleanup already allocated elements */ | 1017 | /* Cleanup already allocated elements */ |
981 | bnx2x_free_rx_sge_range(bp, | 1018 | bnx2x_free_rx_sge_range(bp, fp, |
982 | fp, ring_prod); | 1019 | ring_prod); |
983 | bnx2x_free_tpa_pool(bp, | 1020 | bnx2x_free_tpa_pool(bp, fp, |
984 | fp, max_agg_queues); | 1021 | max_agg_queues); |
985 | fp->disable_tpa = 1; | 1022 | fp->disable_tpa = 1; |
986 | ring_prod = 0; | 1023 | ring_prod = 0; |
987 | break; | 1024 | break; |
@@ -1009,7 +1046,7 @@ void bnx2x_init_rx_rings(struct bnx2x *bp) | |||
1009 | if (j != 0) | 1046 | if (j != 0) |
1010 | continue; | 1047 | continue; |
1011 | 1048 | ||
1012 | if (!CHIP_IS_E2(bp)) { | 1049 | if (CHIP_IS_E1(bp)) { |
1013 | REG_WR(bp, BAR_USTRORM_INTMEM + | 1050 | REG_WR(bp, BAR_USTRORM_INTMEM + |
1014 | USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func), | 1051 | USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func), |
1015 | U64_LO(fp->rx_comp_mapping)); | 1052 | U64_LO(fp->rx_comp_mapping)); |
@@ -1053,7 +1090,6 @@ static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp) | |||
1053 | 1090 | ||
1054 | if (skb == NULL) | 1091 | if (skb == NULL) |
1055 | continue; | 1092 | continue; |
1056 | |||
1057 | dma_unmap_single(&bp->pdev->dev, | 1093 | dma_unmap_single(&bp->pdev->dev, |
1058 | dma_unmap_addr(rx_buf, mapping), | 1094 | dma_unmap_addr(rx_buf, mapping), |
1059 | fp->rx_buf_size, DMA_FROM_DEVICE); | 1095 | fp->rx_buf_size, DMA_FROM_DEVICE); |
@@ -1075,7 +1111,7 @@ static void bnx2x_free_rx_skbs(struct bnx2x *bp) | |||
1075 | if (!fp->disable_tpa) | 1111 | if (!fp->disable_tpa) |
1076 | bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ? | 1112 | bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ? |
1077 | ETH_MAX_AGGREGATION_QUEUES_E1 : | 1113 | ETH_MAX_AGGREGATION_QUEUES_E1 : |
1078 | ETH_MAX_AGGREGATION_QUEUES_E1H); | 1114 | ETH_MAX_AGGREGATION_QUEUES_E1H_E2); |
1079 | } | 1115 | } |
1080 | } | 1116 | } |
1081 | 1117 | ||
@@ -1102,30 +1138,43 @@ void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value) | |||
1102 | } | 1138 | } |
1103 | } | 1139 | } |
1104 | 1140 | ||
1105 | static void bnx2x_free_msix_irqs(struct bnx2x *bp) | 1141 | /** |
1142 | * bnx2x_free_msix_irqs - free previously requested MSI-X IRQ vectors | ||
1143 | * | ||
1144 | * @bp: driver handle | ||
1145 | * @nvecs: number of vectors to be released | ||
1146 | */ | ||
1147 | static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs) | ||
1106 | { | 1148 | { |
1107 | int i, offset = 1; | 1149 | int i, offset = 0; |
1108 | 1150 | ||
1109 | free_irq(bp->msix_table[0].vector, bp->dev); | 1151 | if (nvecs == offset) |
1152 | return; | ||
1153 | free_irq(bp->msix_table[offset].vector, bp->dev); | ||
1110 | DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n", | 1154 | DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n", |
1111 | bp->msix_table[0].vector); | 1155 | bp->msix_table[offset].vector); |
1112 | 1156 | offset++; | |
1113 | #ifdef BCM_CNIC | 1157 | #ifdef BCM_CNIC |
1158 | if (nvecs == offset) | ||
1159 | return; | ||
1114 | offset++; | 1160 | offset++; |
1115 | #endif | 1161 | #endif |
1162 | |||
1116 | for_each_eth_queue(bp, i) { | 1163 | for_each_eth_queue(bp, i) { |
1117 | DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq " | 1164 | if (nvecs == offset) |
1118 | "state %x\n", i, bp->msix_table[i + offset].vector, | 1165 | return; |
1119 | bnx2x_fp(bp, i, state)); | 1166 | DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d " |
1167 | "irq\n", i, bp->msix_table[offset].vector); | ||
1120 | 1168 | ||
1121 | free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]); | 1169 | free_irq(bp->msix_table[offset++].vector, &bp->fp[i]); |
1122 | } | 1170 | } |
1123 | } | 1171 | } |
1124 | 1172 | ||
1125 | void bnx2x_free_irq(struct bnx2x *bp) | 1173 | void bnx2x_free_irq(struct bnx2x *bp) |
1126 | { | 1174 | { |
1127 | if (bp->flags & USING_MSIX_FLAG) | 1175 | if (bp->flags & USING_MSIX_FLAG) |
1128 | bnx2x_free_msix_irqs(bp); | 1176 | bnx2x_free_msix_irqs(bp, BNX2X_NUM_ETH_QUEUES(bp) + |
1177 | CNIC_CONTEXT_USE + 1); | ||
1129 | else if (bp->flags & USING_MSI_FLAG) | 1178 | else if (bp->flags & USING_MSI_FLAG) |
1130 | free_irq(bp->pdev->irq, bp->dev); | 1179 | free_irq(bp->pdev->irq, bp->dev); |
1131 | else | 1180 | else |
@@ -1198,9 +1247,10 @@ int bnx2x_enable_msix(struct bnx2x *bp) | |||
1198 | 1247 | ||
1199 | static int bnx2x_req_msix_irqs(struct bnx2x *bp) | 1248 | static int bnx2x_req_msix_irqs(struct bnx2x *bp) |
1200 | { | 1249 | { |
1201 | int i, rc, offset = 1; | 1250 | int i, rc, offset = 0; |
1202 | 1251 | ||
1203 | rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0, | 1252 | rc = request_irq(bp->msix_table[offset++].vector, |
1253 | bnx2x_msix_sp_int, 0, | ||
1204 | bp->dev->name, bp->dev); | 1254 | bp->dev->name, bp->dev); |
1205 | if (rc) { | 1255 | if (rc) { |
1206 | BNX2X_ERR("request sp irq failed\n"); | 1256 | BNX2X_ERR("request sp irq failed\n"); |
@@ -1218,13 +1268,13 @@ static int bnx2x_req_msix_irqs(struct bnx2x *bp) | |||
1218 | rc = request_irq(bp->msix_table[offset].vector, | 1268 | rc = request_irq(bp->msix_table[offset].vector, |
1219 | bnx2x_msix_fp_int, 0, fp->name, fp); | 1269 | bnx2x_msix_fp_int, 0, fp->name, fp); |
1220 | if (rc) { | 1270 | if (rc) { |
1221 | BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc); | 1271 | BNX2X_ERR("request fp #%d irq (%d) failed rc %d\n", i, |
1222 | bnx2x_free_msix_irqs(bp); | 1272 | bp->msix_table[offset].vector, rc); |
1273 | bnx2x_free_msix_irqs(bp, offset); | ||
1223 | return -EBUSY; | 1274 | return -EBUSY; |
1224 | } | 1275 | } |
1225 | 1276 | ||
1226 | offset++; | 1277 | offset++; |
1227 | fp->state = BNX2X_FP_STATE_IRQ; | ||
1228 | } | 1278 | } |
1229 | 1279 | ||
1230 | i = BNX2X_NUM_ETH_QUEUES(bp); | 1280 | i = BNX2X_NUM_ETH_QUEUES(bp); |
@@ -1264,42 +1314,56 @@ static int bnx2x_req_irq(struct bnx2x *bp) | |||
1264 | 1314 | ||
1265 | rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags, | 1315 | rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags, |
1266 | bp->dev->name, bp->dev); | 1316 | bp->dev->name, bp->dev); |
1267 | if (!rc) | ||
1268 | bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ; | ||
1269 | |||
1270 | return rc; | 1317 | return rc; |
1271 | } | 1318 | } |
1272 | 1319 | ||
1273 | static void bnx2x_napi_enable(struct bnx2x *bp) | 1320 | static inline int bnx2x_setup_irqs(struct bnx2x *bp) |
1321 | { | ||
1322 | int rc = 0; | ||
1323 | if (bp->flags & USING_MSIX_FLAG) { | ||
1324 | rc = bnx2x_req_msix_irqs(bp); | ||
1325 | if (rc) | ||
1326 | return rc; | ||
1327 | } else { | ||
1328 | bnx2x_ack_int(bp); | ||
1329 | rc = bnx2x_req_irq(bp); | ||
1330 | if (rc) { | ||
1331 | BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc); | ||
1332 | return rc; | ||
1333 | } | ||
1334 | if (bp->flags & USING_MSI_FLAG) { | ||
1335 | bp->dev->irq = bp->pdev->irq; | ||
1336 | netdev_info(bp->dev, "using MSI IRQ %d\n", | ||
1337 | bp->pdev->irq); | ||
1338 | } | ||
1339 | } | ||
1340 | |||
1341 | return 0; | ||
1342 | } | ||
1343 | |||
1344 | static inline void bnx2x_napi_enable(struct bnx2x *bp) | ||
1274 | { | 1345 | { |
1275 | int i; | 1346 | int i; |
1276 | 1347 | ||
1277 | for_each_napi_queue(bp, i) | 1348 | for_each_rx_queue(bp, i) |
1278 | napi_enable(&bnx2x_fp(bp, i, napi)); | 1349 | napi_enable(&bnx2x_fp(bp, i, napi)); |
1279 | } | 1350 | } |
1280 | 1351 | ||
1281 | static void bnx2x_napi_disable(struct bnx2x *bp) | 1352 | static inline void bnx2x_napi_disable(struct bnx2x *bp) |
1282 | { | 1353 | { |
1283 | int i; | 1354 | int i; |
1284 | 1355 | ||
1285 | for_each_napi_queue(bp, i) | 1356 | for_each_rx_queue(bp, i) |
1286 | napi_disable(&bnx2x_fp(bp, i, napi)); | 1357 | napi_disable(&bnx2x_fp(bp, i, napi)); |
1287 | } | 1358 | } |
1288 | 1359 | ||
1289 | void bnx2x_netif_start(struct bnx2x *bp) | 1360 | void bnx2x_netif_start(struct bnx2x *bp) |
1290 | { | 1361 | { |
1291 | int intr_sem; | 1362 | if (netif_running(bp->dev)) { |
1292 | 1363 | bnx2x_napi_enable(bp); | |
1293 | intr_sem = atomic_dec_and_test(&bp->intr_sem); | 1364 | bnx2x_int_enable(bp); |
1294 | smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */ | 1365 | if (bp->state == BNX2X_STATE_OPEN) |
1295 | 1366 | netif_tx_wake_all_queues(bp->dev); | |
1296 | if (intr_sem) { | ||
1297 | if (netif_running(bp->dev)) { | ||
1298 | bnx2x_napi_enable(bp); | ||
1299 | bnx2x_int_enable(bp); | ||
1300 | if (bp->state == BNX2X_STATE_OPEN) | ||
1301 | netif_tx_wake_all_queues(bp->dev); | ||
1302 | } | ||
1303 | } | 1367 | } |
1304 | } | 1368 | } |
1305 | 1369 | ||
@@ -1307,7 +1371,6 @@ void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw) | |||
1307 | { | 1371 | { |
1308 | bnx2x_int_disable_sync(bp, disable_hw); | 1372 | bnx2x_int_disable_sync(bp, disable_hw); |
1309 | bnx2x_napi_disable(bp); | 1373 | bnx2x_napi_disable(bp); |
1310 | netif_tx_disable(bp->dev); | ||
1311 | } | 1374 | } |
1312 | 1375 | ||
1313 | u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb) | 1376 | u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb) |
@@ -1358,26 +1421,6 @@ void bnx2x_set_num_queues(struct bnx2x *bp) | |||
1358 | bp->num_queues += NONE_ETH_CONTEXT_USE; | 1421 | bp->num_queues += NONE_ETH_CONTEXT_USE; |
1359 | } | 1422 | } |
1360 | 1423 | ||
1361 | #ifdef BCM_CNIC | ||
1362 | static inline void bnx2x_set_fcoe_eth_macs(struct bnx2x *bp) | ||
1363 | { | ||
1364 | if (!NO_FCOE(bp)) { | ||
1365 | if (!IS_MF_SD(bp)) | ||
1366 | bnx2x_set_fip_eth_mac_addr(bp, 1); | ||
1367 | bnx2x_set_all_enode_macs(bp, 1); | ||
1368 | bp->flags |= FCOE_MACS_SET; | ||
1369 | } | ||
1370 | } | ||
1371 | #endif | ||
1372 | |||
1373 | static void bnx2x_release_firmware(struct bnx2x *bp) | ||
1374 | { | ||
1375 | kfree(bp->init_ops_offsets); | ||
1376 | kfree(bp->init_ops); | ||
1377 | kfree(bp->init_data); | ||
1378 | release_firmware(bp->firmware); | ||
1379 | } | ||
1380 | |||
1381 | static inline int bnx2x_set_real_num_queues(struct bnx2x *bp) | 1424 | static inline int bnx2x_set_real_num_queues(struct bnx2x *bp) |
1382 | { | 1425 | { |
1383 | int rc, num = bp->num_queues; | 1426 | int rc, num = bp->num_queues; |
@@ -1409,27 +1452,198 @@ static inline void bnx2x_set_rx_buf_size(struct bnx2x *bp) | |||
1409 | */ | 1452 | */ |
1410 | fp->rx_buf_size = | 1453 | fp->rx_buf_size = |
1411 | BNX2X_FCOE_MINI_JUMBO_MTU + ETH_OVREHEAD + | 1454 | BNX2X_FCOE_MINI_JUMBO_MTU + ETH_OVREHEAD + |
1412 | BNX2X_RX_ALIGN + IP_HEADER_ALIGNMENT_PADDING; | 1455 | BNX2X_FW_RX_ALIGN + IP_HEADER_ALIGNMENT_PADDING; |
1413 | else | 1456 | else |
1414 | fp->rx_buf_size = | 1457 | fp->rx_buf_size = |
1415 | bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN + | 1458 | bp->dev->mtu + ETH_OVREHEAD + |
1416 | IP_HEADER_ALIGNMENT_PADDING; | 1459 | BNX2X_FW_RX_ALIGN + IP_HEADER_ALIGNMENT_PADDING; |
1460 | } | ||
1461 | } | ||
1462 | |||
1463 | static inline int bnx2x_init_rss_pf(struct bnx2x *bp) | ||
1464 | { | ||
1465 | int i; | ||
1466 | u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE] = {0}; | ||
1467 | u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp); | ||
1468 | |||
1469 | /* | ||
1470 | * Prepare the inital contents fo the indirection table if RSS is | ||
1471 | * enabled | ||
1472 | */ | ||
1473 | if (bp->multi_mode != ETH_RSS_MODE_DISABLED) { | ||
1474 | for (i = 0; i < sizeof(ind_table); i++) | ||
1475 | ind_table[i] = | ||
1476 | bp->fp->cl_id + (i % num_eth_queues); | ||
1477 | } | ||
1478 | |||
1479 | /* | ||
1480 | * For 57710 and 57711 SEARCHER configuration (rss_keys) is | ||
1481 | * per-port, so if explicit configuration is needed , do it only | ||
1482 | * for a PMF. | ||
1483 | * | ||
1484 | * For 57712 and newer on the other hand it's a per-function | ||
1485 | * configuration. | ||
1486 | */ | ||
1487 | return bnx2x_config_rss_pf(bp, ind_table, | ||
1488 | bp->port.pmf || !CHIP_IS_E1x(bp)); | ||
1489 | } | ||
1490 | |||
1491 | int bnx2x_config_rss_pf(struct bnx2x *bp, u8 *ind_table, bool config_hash) | ||
1492 | { | ||
1493 | struct bnx2x_config_rss_params params = {0}; | ||
1494 | int i; | ||
1495 | |||
1496 | /* Although RSS is meaningless when there is a single HW queue we | ||
1497 | * still need it enabled in order to have HW Rx hash generated. | ||
1498 | * | ||
1499 | * if (!is_eth_multi(bp)) | ||
1500 | * bp->multi_mode = ETH_RSS_MODE_DISABLED; | ||
1501 | */ | ||
1502 | |||
1503 | params.rss_obj = &bp->rss_conf_obj; | ||
1504 | |||
1505 | __set_bit(RAMROD_COMP_WAIT, ¶ms.ramrod_flags); | ||
1506 | |||
1507 | /* RSS mode */ | ||
1508 | switch (bp->multi_mode) { | ||
1509 | case ETH_RSS_MODE_DISABLED: | ||
1510 | __set_bit(BNX2X_RSS_MODE_DISABLED, ¶ms.rss_flags); | ||
1511 | break; | ||
1512 | case ETH_RSS_MODE_REGULAR: | ||
1513 | __set_bit(BNX2X_RSS_MODE_REGULAR, ¶ms.rss_flags); | ||
1514 | break; | ||
1515 | case ETH_RSS_MODE_VLAN_PRI: | ||
1516 | __set_bit(BNX2X_RSS_MODE_VLAN_PRI, ¶ms.rss_flags); | ||
1517 | break; | ||
1518 | case ETH_RSS_MODE_E1HOV_PRI: | ||
1519 | __set_bit(BNX2X_RSS_MODE_E1HOV_PRI, ¶ms.rss_flags); | ||
1520 | break; | ||
1521 | case ETH_RSS_MODE_IP_DSCP: | ||
1522 | __set_bit(BNX2X_RSS_MODE_IP_DSCP, ¶ms.rss_flags); | ||
1523 | break; | ||
1524 | default: | ||
1525 | BNX2X_ERR("Unknown multi_mode: %d\n", bp->multi_mode); | ||
1526 | return -EINVAL; | ||
1527 | } | ||
1528 | |||
1529 | /* If RSS is enabled */ | ||
1530 | if (bp->multi_mode != ETH_RSS_MODE_DISABLED) { | ||
1531 | /* RSS configuration */ | ||
1532 | __set_bit(BNX2X_RSS_IPV4, ¶ms.rss_flags); | ||
1533 | __set_bit(BNX2X_RSS_IPV4_TCP, ¶ms.rss_flags); | ||
1534 | __set_bit(BNX2X_RSS_IPV6, ¶ms.rss_flags); | ||
1535 | __set_bit(BNX2X_RSS_IPV6_TCP, ¶ms.rss_flags); | ||
1536 | |||
1537 | /* Hash bits */ | ||
1538 | params.rss_result_mask = MULTI_MASK; | ||
1539 | |||
1540 | memcpy(params.ind_table, ind_table, sizeof(params.ind_table)); | ||
1541 | |||
1542 | if (config_hash) { | ||
1543 | /* RSS keys */ | ||
1544 | for (i = 0; i < sizeof(params.rss_key) / 4; i++) | ||
1545 | params.rss_key[i] = random32(); | ||
1546 | |||
1547 | __set_bit(BNX2X_RSS_SET_SRCH, ¶ms.rss_flags); | ||
1548 | } | ||
1417 | } | 1549 | } |
1550 | |||
1551 | return bnx2x_config_rss(bp, ¶ms); | ||
1552 | } | ||
1553 | |||
1554 | static inline int bnx2x_init_hw(struct bnx2x *bp, u32 load_code) | ||
1555 | { | ||
1556 | struct bnx2x_func_state_params func_params = {0}; | ||
1557 | |||
1558 | /* Prepare parameters for function state transitions */ | ||
1559 | __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags); | ||
1560 | |||
1561 | func_params.f_obj = &bp->func_obj; | ||
1562 | func_params.cmd = BNX2X_F_CMD_HW_INIT; | ||
1563 | |||
1564 | func_params.params.hw_init.load_phase = load_code; | ||
1565 | |||
1566 | return bnx2x_func_state_change(bp, &func_params); | ||
1418 | } | 1567 | } |
1419 | 1568 | ||
1569 | /* | ||
1570 | * Cleans the object that have internal lists without sending | ||
1571 | * ramrods. Should be run when interrutps are disabled. | ||
1572 | */ | ||
1573 | static void bnx2x_squeeze_objects(struct bnx2x *bp) | ||
1574 | { | ||
1575 | int rc; | ||
1576 | unsigned long ramrod_flags = 0, vlan_mac_flags = 0; | ||
1577 | struct bnx2x_mcast_ramrod_params rparam = {0}; | ||
1578 | struct bnx2x_vlan_mac_obj *mac_obj = &bp->fp->mac_obj; | ||
1579 | |||
1580 | /***************** Cleanup MACs' object first *************************/ | ||
1581 | |||
1582 | /* Wait for completion of requested */ | ||
1583 | __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); | ||
1584 | /* Perform a dry cleanup */ | ||
1585 | __set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags); | ||
1586 | |||
1587 | /* Clean ETH primary MAC */ | ||
1588 | __set_bit(BNX2X_ETH_MAC, &vlan_mac_flags); | ||
1589 | rc = mac_obj->delete_all(bp, &bp->fp->mac_obj, &vlan_mac_flags, | ||
1590 | &ramrod_flags); | ||
1591 | if (rc != 0) | ||
1592 | BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc); | ||
1593 | |||
1594 | /* Cleanup UC list */ | ||
1595 | vlan_mac_flags = 0; | ||
1596 | __set_bit(BNX2X_UC_LIST_MAC, &vlan_mac_flags); | ||
1597 | rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags, | ||
1598 | &ramrod_flags); | ||
1599 | if (rc != 0) | ||
1600 | BNX2X_ERR("Failed to clean UC list MACs: %d\n", rc); | ||
1601 | |||
1602 | /***************** Now clean mcast object *****************************/ | ||
1603 | rparam.mcast_obj = &bp->mcast_obj; | ||
1604 | __set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags); | ||
1605 | |||
1606 | /* Add a DEL command... */ | ||
1607 | rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL); | ||
1608 | if (rc < 0) | ||
1609 | BNX2X_ERR("Failed to add a new DEL command to a multi-cast " | ||
1610 | "object: %d\n", rc); | ||
1611 | |||
1612 | /* ...and wait until all pending commands are cleared */ | ||
1613 | rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT); | ||
1614 | while (rc != 0) { | ||
1615 | if (rc < 0) { | ||
1616 | BNX2X_ERR("Failed to clean multi-cast object: %d\n", | ||
1617 | rc); | ||
1618 | return; | ||
1619 | } | ||
1620 | |||
1621 | rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT); | ||
1622 | } | ||
1623 | } | ||
1624 | |||
1625 | #ifndef BNX2X_STOP_ON_ERROR | ||
1626 | #define LOAD_ERROR_EXIT(bp, label) \ | ||
1627 | do { \ | ||
1628 | (bp)->state = BNX2X_STATE_ERROR; \ | ||
1629 | goto label; \ | ||
1630 | } while (0) | ||
1631 | #else | ||
1632 | #define LOAD_ERROR_EXIT(bp, label) \ | ||
1633 | do { \ | ||
1634 | (bp)->state = BNX2X_STATE_ERROR; \ | ||
1635 | (bp)->panic = 1; \ | ||
1636 | return -EBUSY; \ | ||
1637 | } while (0) | ||
1638 | #endif | ||
1639 | |||
1420 | /* must be called with rtnl_lock */ | 1640 | /* must be called with rtnl_lock */ |
1421 | int bnx2x_nic_load(struct bnx2x *bp, int load_mode) | 1641 | int bnx2x_nic_load(struct bnx2x *bp, int load_mode) |
1422 | { | 1642 | { |
1643 | int port = BP_PORT(bp); | ||
1423 | u32 load_code; | 1644 | u32 load_code; |
1424 | int i, rc; | 1645 | int i, rc; |
1425 | 1646 | ||
1426 | /* Set init arrays */ | ||
1427 | rc = bnx2x_init_firmware(bp); | ||
1428 | if (rc) { | ||
1429 | BNX2X_ERR("Error loading firmware\n"); | ||
1430 | return rc; | ||
1431 | } | ||
1432 | |||
1433 | #ifdef BNX2X_STOP_ON_ERROR | 1647 | #ifdef BNX2X_STOP_ON_ERROR |
1434 | if (unlikely(bp->panic)) | 1648 | if (unlikely(bp->panic)) |
1435 | return -EPERM; | 1649 | return -EPERM; |
@@ -1456,6 +1670,10 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode) | |||
1456 | /* Set the receive queues buffer size */ | 1670 | /* Set the receive queues buffer size */ |
1457 | bnx2x_set_rx_buf_size(bp); | 1671 | bnx2x_set_rx_buf_size(bp); |
1458 | 1672 | ||
1673 | /* | ||
1674 | * set the tpa flag for each queue. The tpa flag determines the queue | ||
1675 | * minimal size so it must be set prior to queue memory allocation | ||
1676 | */ | ||
1459 | for_each_queue(bp, i) | 1677 | for_each_queue(bp, i) |
1460 | bnx2x_fp(bp, i, disable_tpa) = | 1678 | bnx2x_fp(bp, i, disable_tpa) = |
1461 | ((bp->flags & TPA_ENABLE_FLAG) == 0); | 1679 | ((bp->flags & TPA_ENABLE_FLAG) == 0); |
@@ -1475,31 +1693,30 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode) | |||
1475 | rc = bnx2x_set_real_num_queues(bp); | 1693 | rc = bnx2x_set_real_num_queues(bp); |
1476 | if (rc) { | 1694 | if (rc) { |
1477 | BNX2X_ERR("Unable to set real_num_queues\n"); | 1695 | BNX2X_ERR("Unable to set real_num_queues\n"); |
1478 | goto load_error0; | 1696 | LOAD_ERROR_EXIT(bp, load_error0); |
1479 | } | 1697 | } |
1480 | 1698 | ||
1481 | bnx2x_napi_enable(bp); | 1699 | bnx2x_napi_enable(bp); |
1482 | 1700 | ||
1483 | /* Send LOAD_REQUEST command to MCP | 1701 | /* Send LOAD_REQUEST command to MCP |
1484 | Returns the type of LOAD command: | 1702 | * Returns the type of LOAD command: |
1485 | if it is the first port to be initialized | 1703 | * if it is the first port to be initialized |
1486 | common blocks should be initialized, otherwise - not | 1704 | * common blocks should be initialized, otherwise - not |
1487 | */ | 1705 | */ |
1488 | if (!BP_NOMCP(bp)) { | 1706 | if (!BP_NOMCP(bp)) { |
1489 | load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, 0); | 1707 | load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, 0); |
1490 | if (!load_code) { | 1708 | if (!load_code) { |
1491 | BNX2X_ERR("MCP response failure, aborting\n"); | 1709 | BNX2X_ERR("MCP response failure, aborting\n"); |
1492 | rc = -EBUSY; | 1710 | rc = -EBUSY; |
1493 | goto load_error1; | 1711 | LOAD_ERROR_EXIT(bp, load_error1); |
1494 | } | 1712 | } |
1495 | if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) { | 1713 | if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) { |
1496 | rc = -EBUSY; /* other port in diagnostic mode */ | 1714 | rc = -EBUSY; /* other port in diagnostic mode */ |
1497 | goto load_error1; | 1715 | LOAD_ERROR_EXIT(bp, load_error1); |
1498 | } | 1716 | } |
1499 | 1717 | ||
1500 | } else { | 1718 | } else { |
1501 | int path = BP_PATH(bp); | 1719 | int path = BP_PATH(bp); |
1502 | int port = BP_PORT(bp); | ||
1503 | 1720 | ||
1504 | DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d] %d, %d, %d\n", | 1721 | DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d] %d, %d, %d\n", |
1505 | path, load_count[path][0], load_count[path][1], | 1722 | path, load_count[path][0], load_count[path][1], |
@@ -1519,36 +1736,58 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode) | |||
1519 | 1736 | ||
1520 | if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) || | 1737 | if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) || |
1521 | (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) || | 1738 | (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) || |
1522 | (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) | 1739 | (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) { |
1523 | bp->port.pmf = 1; | 1740 | bp->port.pmf = 1; |
1524 | else | 1741 | /* |
1742 | * We need the barrier to ensure the ordering between the | ||
1743 | * writing to bp->port.pmf here and reading it from the | ||
1744 | * bnx2x_periodic_task(). | ||
1745 | */ | ||
1746 | smp_mb(); | ||
1747 | queue_delayed_work(bnx2x_wq, &bp->period_task, 0); | ||
1748 | } else | ||
1525 | bp->port.pmf = 0; | 1749 | bp->port.pmf = 0; |
1526 | DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf); | 1750 | DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf); |
1527 | 1751 | ||
1752 | /* Init Function state controlling object */ | ||
1753 | bnx2x__init_func_obj(bp); | ||
1754 | |||
1528 | /* Initialize HW */ | 1755 | /* Initialize HW */ |
1529 | rc = bnx2x_init_hw(bp, load_code); | 1756 | rc = bnx2x_init_hw(bp, load_code); |
1530 | if (rc) { | 1757 | if (rc) { |
1531 | BNX2X_ERR("HW init failed, aborting\n"); | 1758 | BNX2X_ERR("HW init failed, aborting\n"); |
1532 | bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0); | 1759 | bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0); |
1533 | goto load_error2; | 1760 | LOAD_ERROR_EXIT(bp, load_error2); |
1534 | } | 1761 | } |
1535 | 1762 | ||
1536 | /* Connect to IRQs */ | 1763 | /* Connect to IRQs */ |
1537 | rc = bnx2x_setup_irqs(bp); | 1764 | rc = bnx2x_setup_irqs(bp); |
1538 | if (rc) { | 1765 | if (rc) { |
1539 | bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0); | 1766 | bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0); |
1540 | goto load_error2; | 1767 | LOAD_ERROR_EXIT(bp, load_error2); |
1541 | } | 1768 | } |
1542 | 1769 | ||
1543 | /* Setup NIC internals and enable interrupts */ | 1770 | /* Setup NIC internals and enable interrupts */ |
1544 | bnx2x_nic_init(bp, load_code); | 1771 | bnx2x_nic_init(bp, load_code); |
1545 | 1772 | ||
1773 | /* Init per-function objects */ | ||
1774 | bnx2x_init_bp_objs(bp); | ||
1775 | |||
1546 | if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) || | 1776 | if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) || |
1547 | (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) && | 1777 | (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) && |
1548 | (bp->common.shmem2_base)) | 1778 | (bp->common.shmem2_base)) { |
1549 | SHMEM2_WR(bp, dcc_support, | 1779 | if (SHMEM2_HAS(bp, dcc_support)) |
1550 | (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV | | 1780 | SHMEM2_WR(bp, dcc_support, |
1551 | SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV)); | 1781 | (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV | |
1782 | SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV)); | ||
1783 | } | ||
1784 | |||
1785 | bp->state = BNX2X_STATE_OPENING_WAIT4_PORT; | ||
1786 | rc = bnx2x_func_start(bp); | ||
1787 | if (rc) { | ||
1788 | BNX2X_ERR("Function start failed!\n"); | ||
1789 | LOAD_ERROR_EXIT(bp, load_error3); | ||
1790 | } | ||
1552 | 1791 | ||
1553 | /* Send LOAD_DONE command to MCP */ | 1792 | /* Send LOAD_DONE command to MCP */ |
1554 | if (!BP_NOMCP(bp)) { | 1793 | if (!BP_NOMCP(bp)) { |
@@ -1556,74 +1795,38 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode) | |||
1556 | if (!load_code) { | 1795 | if (!load_code) { |
1557 | BNX2X_ERR("MCP response failure, aborting\n"); | 1796 | BNX2X_ERR("MCP response failure, aborting\n"); |
1558 | rc = -EBUSY; | 1797 | rc = -EBUSY; |
1559 | goto load_error3; | 1798 | LOAD_ERROR_EXIT(bp, load_error3); |
1560 | } | 1799 | } |
1561 | } | 1800 | } |
1562 | 1801 | ||
1563 | bnx2x_dcbx_init(bp); | 1802 | rc = bnx2x_setup_leading(bp); |
1564 | |||
1565 | bp->state = BNX2X_STATE_OPENING_WAIT4_PORT; | ||
1566 | |||
1567 | rc = bnx2x_func_start(bp); | ||
1568 | if (rc) { | ||
1569 | BNX2X_ERR("Function start failed!\n"); | ||
1570 | #ifndef BNX2X_STOP_ON_ERROR | ||
1571 | goto load_error3; | ||
1572 | #else | ||
1573 | bp->panic = 1; | ||
1574 | return -EBUSY; | ||
1575 | #endif | ||
1576 | } | ||
1577 | |||
1578 | rc = bnx2x_setup_client(bp, &bp->fp[0], 1 /* Leading */); | ||
1579 | if (rc) { | 1803 | if (rc) { |
1580 | BNX2X_ERR("Setup leading failed!\n"); | 1804 | BNX2X_ERR("Setup leading failed!\n"); |
1581 | #ifndef BNX2X_STOP_ON_ERROR | 1805 | LOAD_ERROR_EXIT(bp, load_error3); |
1582 | goto load_error3; | ||
1583 | #else | ||
1584 | bp->panic = 1; | ||
1585 | return -EBUSY; | ||
1586 | #endif | ||
1587 | } | ||
1588 | |||
1589 | if (!CHIP_IS_E1(bp) && | ||
1590 | (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED)) { | ||
1591 | DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n"); | ||
1592 | bp->flags |= MF_FUNC_DIS; | ||
1593 | } | 1806 | } |
1594 | 1807 | ||
1595 | #ifdef BCM_CNIC | 1808 | #ifdef BCM_CNIC |
1596 | /* Enable Timer scan */ | 1809 | /* Enable Timer scan */ |
1597 | REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 1); | 1810 | REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1); |
1598 | #endif | 1811 | #endif |
1599 | 1812 | ||
1600 | for_each_nondefault_queue(bp, i) { | 1813 | for_each_nondefault_queue(bp, i) { |
1601 | rc = bnx2x_setup_client(bp, &bp->fp[i], 0); | 1814 | rc = bnx2x_setup_queue(bp, &bp->fp[i], 0); |
1602 | if (rc) | 1815 | if (rc) |
1603 | #ifdef BCM_CNIC | 1816 | LOAD_ERROR_EXIT(bp, load_error4); |
1604 | goto load_error4; | ||
1605 | #else | ||
1606 | goto load_error3; | ||
1607 | #endif | ||
1608 | } | 1817 | } |
1609 | 1818 | ||
1819 | rc = bnx2x_init_rss_pf(bp); | ||
1820 | if (rc) | ||
1821 | LOAD_ERROR_EXIT(bp, load_error4); | ||
1822 | |||
1610 | /* Now when Clients are configured we are ready to work */ | 1823 | /* Now when Clients are configured we are ready to work */ |
1611 | bp->state = BNX2X_STATE_OPEN; | 1824 | bp->state = BNX2X_STATE_OPEN; |
1612 | 1825 | ||
1613 | #ifdef BCM_CNIC | 1826 | /* Configure a ucast MAC */ |
1614 | bnx2x_set_fcoe_eth_macs(bp); | 1827 | rc = bnx2x_set_eth_mac(bp, true); |
1615 | #endif | 1828 | if (rc) |
1616 | 1829 | LOAD_ERROR_EXIT(bp, load_error4); | |
1617 | bnx2x_set_eth_mac(bp, 1); | ||
1618 | |||
1619 | /* Clear MC configuration */ | ||
1620 | if (CHIP_IS_E1(bp)) | ||
1621 | bnx2x_invalidate_e1_mc_list(bp); | ||
1622 | else | ||
1623 | bnx2x_invalidate_e1h_mc_list(bp); | ||
1624 | |||
1625 | /* Clear UC lists configuration */ | ||
1626 | bnx2x_invalidate_uc_list(bp); | ||
1627 | 1830 | ||
1628 | if (bp->pending_max) { | 1831 | if (bp->pending_max) { |
1629 | bnx2x_update_max_mf_config(bp, bp->pending_max); | 1832 | bnx2x_update_max_mf_config(bp, bp->pending_max); |
@@ -1633,15 +1836,18 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode) | |||
1633 | if (bp->port.pmf) | 1836 | if (bp->port.pmf) |
1634 | bnx2x_initial_phy_init(bp, load_mode); | 1837 | bnx2x_initial_phy_init(bp, load_mode); |
1635 | 1838 | ||
1636 | /* Initialize Rx filtering */ | 1839 | /* Start fast path */ |
1840 | |||
1841 | /* Initialize Rx filter. */ | ||
1842 | netif_addr_lock_bh(bp->dev); | ||
1637 | bnx2x_set_rx_mode(bp->dev); | 1843 | bnx2x_set_rx_mode(bp->dev); |
1844 | netif_addr_unlock_bh(bp->dev); | ||
1638 | 1845 | ||
1639 | /* Start fast path */ | 1846 | /* Start the Tx */ |
1640 | switch (load_mode) { | 1847 | switch (load_mode) { |
1641 | case LOAD_NORMAL: | 1848 | case LOAD_NORMAL: |
1642 | /* Tx queue should be only reenabled */ | 1849 | /* Tx queue should be only reenabled */ |
1643 | netif_tx_wake_all_queues(bp->dev); | 1850 | netif_tx_wake_all_queues(bp->dev); |
1644 | /* Initialize the receive filter. */ | ||
1645 | break; | 1851 | break; |
1646 | 1852 | ||
1647 | case LOAD_OPEN: | 1853 | case LOAD_OPEN: |
@@ -1670,18 +1876,28 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode) | |||
1670 | #endif | 1876 | #endif |
1671 | bnx2x_inc_load_cnt(bp); | 1877 | bnx2x_inc_load_cnt(bp); |
1672 | 1878 | ||
1673 | bnx2x_release_firmware(bp); | 1879 | /* Wait for all pending SP commands to complete */ |
1880 | if (!bnx2x_wait_sp_comp(bp, ~0x0UL)) { | ||
1881 | BNX2X_ERR("Timeout waiting for SP elements to complete\n"); | ||
1882 | bnx2x_nic_unload(bp, UNLOAD_CLOSE); | ||
1883 | return -EBUSY; | ||
1884 | } | ||
1674 | 1885 | ||
1886 | bnx2x_dcbx_init(bp); | ||
1675 | return 0; | 1887 | return 0; |
1676 | 1888 | ||
1677 | #ifdef BCM_CNIC | 1889 | #ifndef BNX2X_STOP_ON_ERROR |
1678 | load_error4: | 1890 | load_error4: |
1891 | #ifdef BCM_CNIC | ||
1679 | /* Disable Timer scan */ | 1892 | /* Disable Timer scan */ |
1680 | REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 0); | 1893 | REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0); |
1681 | #endif | 1894 | #endif |
1682 | load_error3: | 1895 | load_error3: |
1683 | bnx2x_int_disable_sync(bp, 1); | 1896 | bnx2x_int_disable_sync(bp, 1); |
1684 | 1897 | ||
1898 | /* Clean queueable objects */ | ||
1899 | bnx2x_squeeze_objects(bp); | ||
1900 | |||
1685 | /* Free SKBs, SGEs, TPA pool and driver internals */ | 1901 | /* Free SKBs, SGEs, TPA pool and driver internals */ |
1686 | bnx2x_free_skbs(bp); | 1902 | bnx2x_free_skbs(bp); |
1687 | for_each_rx_queue(bp, i) | 1903 | for_each_rx_queue(bp, i) |
@@ -1701,22 +1917,31 @@ load_error1: | |||
1701 | load_error0: | 1917 | load_error0: |
1702 | bnx2x_free_mem(bp); | 1918 | bnx2x_free_mem(bp); |
1703 | 1919 | ||
1704 | bnx2x_release_firmware(bp); | ||
1705 | |||
1706 | return rc; | 1920 | return rc; |
1921 | #endif /* ! BNX2X_STOP_ON_ERROR */ | ||
1707 | } | 1922 | } |
1708 | 1923 | ||
1709 | /* must be called with rtnl_lock */ | 1924 | /* must be called with rtnl_lock */ |
1710 | int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode) | 1925 | int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode) |
1711 | { | 1926 | { |
1712 | int i; | 1927 | int i; |
1713 | 1928 | bool global = false; | |
1714 | if (bp->state == BNX2X_STATE_CLOSED) { | 1929 | |
1715 | /* Interface has been removed - nothing to recover */ | 1930 | if ((bp->state == BNX2X_STATE_CLOSED) || |
1931 | (bp->state == BNX2X_STATE_ERROR)) { | ||
1932 | /* We can get here if the driver has been unloaded | ||
1933 | * during parity error recovery and is either waiting for a | ||
1934 | * leader to complete or for other functions to unload and | ||
1935 | * then ifdown has been issued. In this case we want to | ||
1936 | * unload and let other functions to complete a recovery | ||
1937 | * process. | ||
1938 | */ | ||
1716 | bp->recovery_state = BNX2X_RECOVERY_DONE; | 1939 | bp->recovery_state = BNX2X_RECOVERY_DONE; |
1717 | bp->is_leader = 0; | 1940 | bp->is_leader = 0; |
1718 | bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08); | 1941 | bnx2x_release_leader_lock(bp); |
1719 | smp_wmb(); | 1942 | smp_mb(); |
1943 | |||
1944 | DP(NETIF_MSG_HW, "Releasing a leadership...\n"); | ||
1720 | 1945 | ||
1721 | return -EINVAL; | 1946 | return -EINVAL; |
1722 | } | 1947 | } |
@@ -1725,18 +1950,19 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode) | |||
1725 | bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD); | 1950 | bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD); |
1726 | #endif | 1951 | #endif |
1727 | bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT; | 1952 | bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT; |
1953 | smp_mb(); | ||
1728 | 1954 | ||
1729 | /* Set "drop all" */ | ||
1730 | bp->rx_mode = BNX2X_RX_MODE_NONE; | 1955 | bp->rx_mode = BNX2X_RX_MODE_NONE; |
1731 | bnx2x_set_storm_rx_mode(bp); | ||
1732 | 1956 | ||
1733 | /* Stop Tx */ | 1957 | /* Stop Tx */ |
1734 | bnx2x_tx_disable(bp); | 1958 | bnx2x_tx_disable(bp); |
1735 | 1959 | ||
1736 | del_timer_sync(&bp->timer); | 1960 | del_timer_sync(&bp->timer); |
1737 | 1961 | ||
1738 | SHMEM_WR(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb, | 1962 | /* Set ALWAYS_ALIVE bit in shmem */ |
1739 | (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq)); | 1963 | bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE; |
1964 | |||
1965 | bnx2x_drv_pulse(bp); | ||
1740 | 1966 | ||
1741 | bnx2x_stats_handle(bp, STATS_EVENT_STOP); | 1967 | bnx2x_stats_handle(bp, STATS_EVENT_STOP); |
1742 | 1968 | ||
@@ -1744,13 +1970,35 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode) | |||
1744 | if (unload_mode != UNLOAD_RECOVERY) | 1970 | if (unload_mode != UNLOAD_RECOVERY) |
1745 | bnx2x_chip_cleanup(bp, unload_mode); | 1971 | bnx2x_chip_cleanup(bp, unload_mode); |
1746 | else { | 1972 | else { |
1747 | /* Disable HW interrupts, NAPI and Tx */ | 1973 | /* Send the UNLOAD_REQUEST to the MCP */ |
1974 | bnx2x_send_unload_req(bp, unload_mode); | ||
1975 | |||
1976 | /* | ||
1977 | * Prevent transactions to host from the functions on the | ||
1978 | * engine that doesn't reset global blocks in case of global | ||
1979 | * attention once gloabl blocks are reset and gates are opened | ||
1980 | * (the engine which leader will perform the recovery | ||
1981 | * last). | ||
1982 | */ | ||
1983 | if (!CHIP_IS_E1x(bp)) | ||
1984 | bnx2x_pf_disable(bp); | ||
1985 | |||
1986 | /* Disable HW interrupts, NAPI */ | ||
1748 | bnx2x_netif_stop(bp, 1); | 1987 | bnx2x_netif_stop(bp, 1); |
1749 | 1988 | ||
1750 | /* Release IRQs */ | 1989 | /* Release IRQs */ |
1751 | bnx2x_free_irq(bp); | 1990 | bnx2x_free_irq(bp); |
1991 | |||
1992 | /* Report UNLOAD_DONE to MCP */ | ||
1993 | bnx2x_send_unload_done(bp); | ||
1752 | } | 1994 | } |
1753 | 1995 | ||
1996 | /* | ||
1997 | * At this stage no more interrupts will arrive so we may safly clean | ||
1998 | * the queueable objects here in case they failed to get cleaned so far. | ||
1999 | */ | ||
2000 | bnx2x_squeeze_objects(bp); | ||
2001 | |||
1754 | bp->port.pmf = 0; | 2002 | bp->port.pmf = 0; |
1755 | 2003 | ||
1756 | /* Free SKBs, SGEs, TPA pool and driver internals */ | 2004 | /* Free SKBs, SGEs, TPA pool and driver internals */ |
@@ -1762,17 +2010,24 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode) | |||
1762 | 2010 | ||
1763 | bp->state = BNX2X_STATE_CLOSED; | 2011 | bp->state = BNX2X_STATE_CLOSED; |
1764 | 2012 | ||
2013 | /* Check if there are pending parity attentions. If there are - set | ||
2014 | * RECOVERY_IN_PROGRESS. | ||
2015 | */ | ||
2016 | if (bnx2x_chk_parity_attn(bp, &global, false)) { | ||
2017 | bnx2x_set_reset_in_progress(bp); | ||
2018 | |||
2019 | /* Set RESET_IS_GLOBAL if needed */ | ||
2020 | if (global) | ||
2021 | bnx2x_set_reset_global(bp); | ||
2022 | } | ||
2023 | |||
2024 | |||
1765 | /* The last driver must disable a "close the gate" if there is no | 2025 | /* The last driver must disable a "close the gate" if there is no |
1766 | * parity attention or "process kill" pending. | 2026 | * parity attention or "process kill" pending. |
1767 | */ | 2027 | */ |
1768 | if ((!bnx2x_dec_load_cnt(bp)) && (!bnx2x_chk_parity_attn(bp)) && | 2028 | if (!bnx2x_dec_load_cnt(bp) && bnx2x_reset_is_done(bp, BP_PATH(bp))) |
1769 | bnx2x_reset_is_done(bp)) | ||
1770 | bnx2x_disable_close_the_gate(bp); | 2029 | bnx2x_disable_close_the_gate(bp); |
1771 | 2030 | ||
1772 | /* Reset MCP mail box sequence if there is on going recovery */ | ||
1773 | if (unload_mode == UNLOAD_RECOVERY) | ||
1774 | bp->fw_seq = 0; | ||
1775 | |||
1776 | return 0; | 2031 | return 0; |
1777 | } | 2032 | } |
1778 | 2033 | ||
@@ -2148,6 +2403,22 @@ static inline u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb, | |||
2148 | sizeof(struct udphdr) - skb->data; | 2403 | sizeof(struct udphdr) - skb->data; |
2149 | } | 2404 | } |
2150 | 2405 | ||
2406 | static inline void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb, | ||
2407 | struct eth_tx_start_bd *tx_start_bd, u32 xmit_type) | ||
2408 | { | ||
2409 | tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM; | ||
2410 | |||
2411 | if (xmit_type & XMIT_CSUM_V4) | ||
2412 | tx_start_bd->bd_flags.as_bitfield |= | ||
2413 | ETH_TX_BD_FLAGS_IP_CSUM; | ||
2414 | else | ||
2415 | tx_start_bd->bd_flags.as_bitfield |= | ||
2416 | ETH_TX_BD_FLAGS_IPV6; | ||
2417 | |||
2418 | if (!(xmit_type & XMIT_CSUM_TCP)) | ||
2419 | tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP; | ||
2420 | } | ||
2421 | |||
2151 | /** | 2422 | /** |
2152 | * bnx2x_set_pbd_csum - update PBD with checksum and return header length | 2423 | * bnx2x_set_pbd_csum - update PBD with checksum and return header length |
2153 | * | 2424 | * |
@@ -2213,7 +2484,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
2213 | struct bnx2x_fastpath *fp; | 2484 | struct bnx2x_fastpath *fp; |
2214 | struct netdev_queue *txq; | 2485 | struct netdev_queue *txq; |
2215 | struct sw_tx_bd *tx_buf; | 2486 | struct sw_tx_bd *tx_buf; |
2216 | struct eth_tx_start_bd *tx_start_bd; | 2487 | struct eth_tx_start_bd *tx_start_bd, *first_bd; |
2217 | struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL; | 2488 | struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL; |
2218 | struct eth_tx_parse_bd_e1x *pbd_e1x = NULL; | 2489 | struct eth_tx_parse_bd_e1x *pbd_e1x = NULL; |
2219 | struct eth_tx_parse_bd_e2 *pbd_e2 = NULL; | 2490 | struct eth_tx_parse_bd_e2 *pbd_e2 = NULL; |
@@ -2275,7 +2546,15 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
2275 | } | 2546 | } |
2276 | } | 2547 | } |
2277 | #endif | 2548 | #endif |
2278 | 2549 | /* Map skb linear data for DMA */ | |
2550 | mapping = dma_map_single(&bp->pdev->dev, skb->data, | ||
2551 | skb_headlen(skb), DMA_TO_DEVICE); | ||
2552 | if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { | ||
2553 | DP(NETIF_MSG_TX_QUEUED, "SKB mapping failed - " | ||
2554 | "silently dropping this SKB\n"); | ||
2555 | dev_kfree_skb_any(skb); | ||
2556 | return NETDEV_TX_OK; | ||
2557 | } | ||
2279 | /* | 2558 | /* |
2280 | Please read carefully. First we use one BD which we mark as start, | 2559 | Please read carefully. First we use one BD which we mark as start, |
2281 | then we have a parsing info BD (used for TSO or xsum), | 2560 | then we have a parsing info BD (used for TSO or xsum), |
@@ -2285,12 +2564,19 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
2285 | And above all, all pdb sizes are in words - NOT DWORDS! | 2564 | And above all, all pdb sizes are in words - NOT DWORDS! |
2286 | */ | 2565 | */ |
2287 | 2566 | ||
2288 | pkt_prod = fp->tx_pkt_prod++; | 2567 | /* get current pkt produced now - advance it just before sending packet |
2568 | * since mapping of pages may fail and cause packet to be dropped | ||
2569 | */ | ||
2570 | pkt_prod = fp->tx_pkt_prod; | ||
2289 | bd_prod = TX_BD(fp->tx_bd_prod); | 2571 | bd_prod = TX_BD(fp->tx_bd_prod); |
2290 | 2572 | ||
2291 | /* get a tx_buf and first BD */ | 2573 | /* get a tx_buf and first BD |
2574 | * tx_start_bd may be changed during SPLIT, | ||
2575 | * but first_bd will always stay first | ||
2576 | */ | ||
2292 | tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)]; | 2577 | tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)]; |
2293 | tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd; | 2578 | tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd; |
2579 | first_bd = tx_start_bd; | ||
2294 | 2580 | ||
2295 | tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD; | 2581 | tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD; |
2296 | SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_ETH_ADDR_TYPE, | 2582 | SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_ETH_ADDR_TYPE, |
@@ -2319,22 +2605,10 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
2319 | /* turn on parsing and get a BD */ | 2605 | /* turn on parsing and get a BD */ |
2320 | bd_prod = TX_BD(NEXT_TX_IDX(bd_prod)); | 2606 | bd_prod = TX_BD(NEXT_TX_IDX(bd_prod)); |
2321 | 2607 | ||
2322 | if (xmit_type & XMIT_CSUM) { | 2608 | if (xmit_type & XMIT_CSUM) |
2323 | tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM; | 2609 | bnx2x_set_sbd_csum(bp, skb, tx_start_bd, xmit_type); |
2324 | 2610 | ||
2325 | if (xmit_type & XMIT_CSUM_V4) | 2611 | if (!CHIP_IS_E1x(bp)) { |
2326 | tx_start_bd->bd_flags.as_bitfield |= | ||
2327 | ETH_TX_BD_FLAGS_IP_CSUM; | ||
2328 | else | ||
2329 | tx_start_bd->bd_flags.as_bitfield |= | ||
2330 | ETH_TX_BD_FLAGS_IPV6; | ||
2331 | |||
2332 | if (!(xmit_type & XMIT_CSUM_TCP)) | ||
2333 | tx_start_bd->bd_flags.as_bitfield |= | ||
2334 | ETH_TX_BD_FLAGS_IS_UDP; | ||
2335 | } | ||
2336 | |||
2337 | if (CHIP_IS_E2(bp)) { | ||
2338 | pbd_e2 = &fp->tx_desc_ring[bd_prod].parse_bd_e2; | 2612 | pbd_e2 = &fp->tx_desc_ring[bd_prod].parse_bd_e2; |
2339 | memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2)); | 2613 | memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2)); |
2340 | /* Set PBD in checksum offload case */ | 2614 | /* Set PBD in checksum offload case */ |
@@ -2342,6 +2616,20 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
2342 | hlen = bnx2x_set_pbd_csum_e2(bp, skb, | 2616 | hlen = bnx2x_set_pbd_csum_e2(bp, skb, |
2343 | &pbd_e2_parsing_data, | 2617 | &pbd_e2_parsing_data, |
2344 | xmit_type); | 2618 | xmit_type); |
2619 | if (IS_MF_SI(bp)) { | ||
2620 | /* | ||
2621 | * fill in the MAC addresses in the PBD - for local | ||
2622 | * switching | ||
2623 | */ | ||
2624 | bnx2x_set_fw_mac_addr(&pbd_e2->src_mac_addr_hi, | ||
2625 | &pbd_e2->src_mac_addr_mid, | ||
2626 | &pbd_e2->src_mac_addr_lo, | ||
2627 | eth->h_source); | ||
2628 | bnx2x_set_fw_mac_addr(&pbd_e2->dst_mac_addr_hi, | ||
2629 | &pbd_e2->dst_mac_addr_mid, | ||
2630 | &pbd_e2->dst_mac_addr_lo, | ||
2631 | eth->h_dest); | ||
2632 | } | ||
2345 | } else { | 2633 | } else { |
2346 | pbd_e1x = &fp->tx_desc_ring[bd_prod].parse_bd_e1x; | 2634 | pbd_e1x = &fp->tx_desc_ring[bd_prod].parse_bd_e1x; |
2347 | memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x)); | 2635 | memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x)); |
@@ -2351,15 +2639,10 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
2351 | 2639 | ||
2352 | } | 2640 | } |
2353 | 2641 | ||
2354 | /* Map skb linear data for DMA */ | ||
2355 | mapping = dma_map_single(&bp->pdev->dev, skb->data, | ||
2356 | skb_headlen(skb), DMA_TO_DEVICE); | ||
2357 | |||
2358 | /* Setup the data pointer of the first BD of the packet */ | 2642 | /* Setup the data pointer of the first BD of the packet */ |
2359 | tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); | 2643 | tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); |
2360 | tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); | 2644 | tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); |
2361 | nbd = skb_shinfo(skb)->nr_frags + 2; /* start_bd + pbd + frags */ | 2645 | nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */ |
2362 | tx_start_bd->nbd = cpu_to_le16(nbd); | ||
2363 | tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb)); | 2646 | tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb)); |
2364 | pkt_size = tx_start_bd->nbytes; | 2647 | pkt_size = tx_start_bd->nbytes; |
2365 | 2648 | ||
@@ -2382,7 +2665,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
2382 | if (unlikely(skb_headlen(skb) > hlen)) | 2665 | if (unlikely(skb_headlen(skb) > hlen)) |
2383 | bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd, | 2666 | bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd, |
2384 | hlen, bd_prod, ++nbd); | 2667 | hlen, bd_prod, ++nbd); |
2385 | if (CHIP_IS_E2(bp)) | 2668 | if (!CHIP_IS_E1x(bp)) |
2386 | bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data, | 2669 | bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data, |
2387 | xmit_type); | 2670 | xmit_type); |
2388 | else | 2671 | else |
@@ -2401,19 +2684,34 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
2401 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { | 2684 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { |
2402 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | 2685 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; |
2403 | 2686 | ||
2687 | mapping = dma_map_page(&bp->pdev->dev, frag->page, | ||
2688 | frag->page_offset, frag->size, | ||
2689 | DMA_TO_DEVICE); | ||
2690 | if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { | ||
2691 | |||
2692 | DP(NETIF_MSG_TX_QUEUED, "Unable to map page - " | ||
2693 | "dropping packet...\n"); | ||
2694 | |||
2695 | /* we need unmap all buffers already mapped | ||
2696 | * for this SKB; | ||
2697 | * first_bd->nbd need to be properly updated | ||
2698 | * before call to bnx2x_free_tx_pkt | ||
2699 | */ | ||
2700 | first_bd->nbd = cpu_to_le16(nbd); | ||
2701 | bnx2x_free_tx_pkt(bp, fp, TX_BD(fp->tx_pkt_prod)); | ||
2702 | return NETDEV_TX_OK; | ||
2703 | } | ||
2704 | |||
2404 | bd_prod = TX_BD(NEXT_TX_IDX(bd_prod)); | 2705 | bd_prod = TX_BD(NEXT_TX_IDX(bd_prod)); |
2405 | tx_data_bd = &fp->tx_desc_ring[bd_prod].reg_bd; | 2706 | tx_data_bd = &fp->tx_desc_ring[bd_prod].reg_bd; |
2406 | if (total_pkt_bd == NULL) | 2707 | if (total_pkt_bd == NULL) |
2407 | total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd; | 2708 | total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd; |
2408 | 2709 | ||
2409 | mapping = dma_map_page(&bp->pdev->dev, frag->page, | ||
2410 | frag->page_offset, | ||
2411 | frag->size, DMA_TO_DEVICE); | ||
2412 | |||
2413 | tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); | 2710 | tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); |
2414 | tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); | 2711 | tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); |
2415 | tx_data_bd->nbytes = cpu_to_le16(frag->size); | 2712 | tx_data_bd->nbytes = cpu_to_le16(frag->size); |
2416 | le16_add_cpu(&pkt_size, frag->size); | 2713 | le16_add_cpu(&pkt_size, frag->size); |
2714 | nbd++; | ||
2417 | 2715 | ||
2418 | DP(NETIF_MSG_TX_QUEUED, | 2716 | DP(NETIF_MSG_TX_QUEUED, |
2419 | "frag %d bd @%p addr (%x:%x) nbytes %d\n", | 2717 | "frag %d bd @%p addr (%x:%x) nbytes %d\n", |
@@ -2423,6 +2721,9 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
2423 | 2721 | ||
2424 | DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd); | 2722 | DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd); |
2425 | 2723 | ||
2724 | /* update with actual num BDs */ | ||
2725 | first_bd->nbd = cpu_to_le16(nbd); | ||
2726 | |||
2426 | bd_prod = TX_BD(NEXT_TX_IDX(bd_prod)); | 2727 | bd_prod = TX_BD(NEXT_TX_IDX(bd_prod)); |
2427 | 2728 | ||
2428 | /* now send a tx doorbell, counting the next BD | 2729 | /* now send a tx doorbell, counting the next BD |
@@ -2431,6 +2732,13 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
2431 | if (TX_BD_POFF(bd_prod) < nbd) | 2732 | if (TX_BD_POFF(bd_prod) < nbd) |
2432 | nbd++; | 2733 | nbd++; |
2433 | 2734 | ||
2735 | /* total_pkt_bytes should be set on the first data BD if | ||
2736 | * it's not an LSO packet and there is more than one | ||
2737 | * data BD. In this case pkt_size is limited by an MTU value. | ||
2738 | * However we prefer to set it for an LSO packet (while we don't | ||
2739 | * have to) in order to save some CPU cycles in a none-LSO | ||
2740 | * case, when we much more care about them. | ||
2741 | */ | ||
2434 | if (total_pkt_bd != NULL) | 2742 | if (total_pkt_bd != NULL) |
2435 | total_pkt_bd->total_pkt_bytes = pkt_size; | 2743 | total_pkt_bd->total_pkt_bytes = pkt_size; |
2436 | 2744 | ||
@@ -2451,6 +2759,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
2451 | pbd_e2->parsing_data); | 2759 | pbd_e2->parsing_data); |
2452 | DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod); | 2760 | DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod); |
2453 | 2761 | ||
2762 | fp->tx_pkt_prod++; | ||
2454 | /* | 2763 | /* |
2455 | * Make sure that the BD data is updated before updating the producer | 2764 | * Make sure that the BD data is updated before updating the producer |
2456 | * since FW might read the BD right after the producer is updated. | 2765 | * since FW might read the BD right after the producer is updated. |
@@ -2491,15 +2800,23 @@ int bnx2x_change_mac_addr(struct net_device *dev, void *p) | |||
2491 | { | 2800 | { |
2492 | struct sockaddr *addr = p; | 2801 | struct sockaddr *addr = p; |
2493 | struct bnx2x *bp = netdev_priv(dev); | 2802 | struct bnx2x *bp = netdev_priv(dev); |
2803 | int rc = 0; | ||
2494 | 2804 | ||
2495 | if (!is_valid_ether_addr((u8 *)(addr->sa_data))) | 2805 | if (!is_valid_ether_addr((u8 *)(addr->sa_data))) |
2496 | return -EINVAL; | 2806 | return -EINVAL; |
2497 | 2807 | ||
2808 | if (netif_running(dev)) { | ||
2809 | rc = bnx2x_set_eth_mac(bp, false); | ||
2810 | if (rc) | ||
2811 | return rc; | ||
2812 | } | ||
2813 | |||
2498 | memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); | 2814 | memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); |
2815 | |||
2499 | if (netif_running(dev)) | 2816 | if (netif_running(dev)) |
2500 | bnx2x_set_eth_mac(bp, 1); | 2817 | rc = bnx2x_set_eth_mac(bp, true); |
2501 | 2818 | ||
2502 | return 0; | 2819 | return rc; |
2503 | } | 2820 | } |
2504 | 2821 | ||
2505 | static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index) | 2822 | static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index) |
@@ -2516,7 +2833,7 @@ static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index) | |||
2516 | } else { | 2833 | } else { |
2517 | #endif | 2834 | #endif |
2518 | /* status blocks */ | 2835 | /* status blocks */ |
2519 | if (CHIP_IS_E2(bp)) | 2836 | if (!CHIP_IS_E1x(bp)) |
2520 | BNX2X_PCI_FREE(sb->e2_sb, | 2837 | BNX2X_PCI_FREE(sb->e2_sb, |
2521 | bnx2x_fp(bp, fp_index, | 2838 | bnx2x_fp(bp, fp_index, |
2522 | status_blk_mapping), | 2839 | status_blk_mapping), |
@@ -2572,7 +2889,7 @@ void bnx2x_free_fp_mem(struct bnx2x *bp) | |||
2572 | static inline void set_sb_shortcuts(struct bnx2x *bp, int index) | 2889 | static inline void set_sb_shortcuts(struct bnx2x *bp, int index) |
2573 | { | 2890 | { |
2574 | union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk); | 2891 | union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk); |
2575 | if (CHIP_IS_E2(bp)) { | 2892 | if (!CHIP_IS_E1x(bp)) { |
2576 | bnx2x_fp(bp, index, sb_index_values) = | 2893 | bnx2x_fp(bp, index, sb_index_values) = |
2577 | (__le16 *)status_blk.e2_sb->sb.index_values; | 2894 | (__le16 *)status_blk.e2_sb->sb.index_values; |
2578 | bnx2x_fp(bp, index, sb_running_index) = | 2895 | bnx2x_fp(bp, index, sb_running_index) = |
@@ -2609,7 +2926,7 @@ static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index) | |||
2609 | if (!IS_FCOE_IDX(index)) { | 2926 | if (!IS_FCOE_IDX(index)) { |
2610 | #endif | 2927 | #endif |
2611 | /* status blocks */ | 2928 | /* status blocks */ |
2612 | if (CHIP_IS_E2(bp)) | 2929 | if (!CHIP_IS_E1x(bp)) |
2613 | BNX2X_PCI_ALLOC(sb->e2_sb, | 2930 | BNX2X_PCI_ALLOC(sb->e2_sb, |
2614 | &bnx2x_fp(bp, index, status_blk_mapping), | 2931 | &bnx2x_fp(bp, index, status_blk_mapping), |
2615 | sizeof(struct host_hc_status_block_e2)); | 2932 | sizeof(struct host_hc_status_block_e2)); |
@@ -2620,7 +2937,12 @@ static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index) | |||
2620 | #ifdef BCM_CNIC | 2937 | #ifdef BCM_CNIC |
2621 | } | 2938 | } |
2622 | #endif | 2939 | #endif |
2623 | set_sb_shortcuts(bp, index); | 2940 | |
2941 | /* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to | ||
2942 | * set shortcuts for it. | ||
2943 | */ | ||
2944 | if (!IS_FCOE_IDX(index)) | ||
2945 | set_sb_shortcuts(bp, index); | ||
2624 | 2946 | ||
2625 | /* Tx */ | 2947 | /* Tx */ |
2626 | if (!skip_tx_queue(bp, index)) { | 2948 | if (!skip_tx_queue(bp, index)) { |
@@ -2697,9 +3019,13 @@ int bnx2x_alloc_fp_mem(struct bnx2x *bp) | |||
2697 | if (bnx2x_alloc_fp_mem_at(bp, 0)) | 3019 | if (bnx2x_alloc_fp_mem_at(bp, 0)) |
2698 | return -ENOMEM; | 3020 | return -ENOMEM; |
2699 | #ifdef BCM_CNIC | 3021 | #ifdef BCM_CNIC |
2700 | /* FCoE */ | 3022 | if (!NO_FCOE(bp)) |
2701 | if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX)) | 3023 | /* FCoE */ |
2702 | return -ENOMEM; | 3024 | if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX)) |
3025 | /* we will fail load process instead of mark | ||
3026 | * NO_FCOE_FLAG | ||
3027 | */ | ||
3028 | return -ENOMEM; | ||
2703 | #endif | 3029 | #endif |
2704 | /* RSS */ | 3030 | /* RSS */ |
2705 | for_each_nondefault_eth_queue(bp, i) | 3031 | for_each_nondefault_eth_queue(bp, i) |
@@ -2729,30 +3055,6 @@ int bnx2x_alloc_fp_mem(struct bnx2x *bp) | |||
2729 | return 0; | 3055 | return 0; |
2730 | } | 3056 | } |
2731 | 3057 | ||
2732 | static int bnx2x_setup_irqs(struct bnx2x *bp) | ||
2733 | { | ||
2734 | int rc = 0; | ||
2735 | if (bp->flags & USING_MSIX_FLAG) { | ||
2736 | rc = bnx2x_req_msix_irqs(bp); | ||
2737 | if (rc) | ||
2738 | return rc; | ||
2739 | } else { | ||
2740 | bnx2x_ack_int(bp); | ||
2741 | rc = bnx2x_req_irq(bp); | ||
2742 | if (rc) { | ||
2743 | BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc); | ||
2744 | return rc; | ||
2745 | } | ||
2746 | if (bp->flags & USING_MSI_FLAG) { | ||
2747 | bp->dev->irq = bp->pdev->irq; | ||
2748 | netdev_info(bp->dev, "using MSI IRQ %d\n", | ||
2749 | bp->pdev->irq); | ||
2750 | } | ||
2751 | } | ||
2752 | |||
2753 | return 0; | ||
2754 | } | ||
2755 | |||
2756 | void bnx2x_free_mem_bp(struct bnx2x *bp) | 3058 | void bnx2x_free_mem_bp(struct bnx2x *bp) |
2757 | { | 3059 | { |
2758 | kfree(bp->fp); | 3060 | kfree(bp->fp); |
@@ -2792,7 +3094,7 @@ alloc_err: | |||
2792 | 3094 | ||
2793 | } | 3095 | } |
2794 | 3096 | ||
2795 | static int bnx2x_reload_if_running(struct net_device *dev) | 3097 | int bnx2x_reload_if_running(struct net_device *dev) |
2796 | { | 3098 | { |
2797 | struct bnx2x *bp = netdev_priv(dev); | 3099 | struct bnx2x *bp = netdev_priv(dev); |
2798 | 3100 | ||
@@ -2803,6 +3105,55 @@ static int bnx2x_reload_if_running(struct net_device *dev) | |||
2803 | return bnx2x_nic_load(bp, LOAD_NORMAL); | 3105 | return bnx2x_nic_load(bp, LOAD_NORMAL); |
2804 | } | 3106 | } |
2805 | 3107 | ||
3108 | int bnx2x_get_cur_phy_idx(struct bnx2x *bp) | ||
3109 | { | ||
3110 | u32 sel_phy_idx = 0; | ||
3111 | if (bp->link_params.num_phys <= 1) | ||
3112 | return INT_PHY; | ||
3113 | |||
3114 | if (bp->link_vars.link_up) { | ||
3115 | sel_phy_idx = EXT_PHY1; | ||
3116 | /* In case link is SERDES, check if the EXT_PHY2 is the one */ | ||
3117 | if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) && | ||
3118 | (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE)) | ||
3119 | sel_phy_idx = EXT_PHY2; | ||
3120 | } else { | ||
3121 | |||
3122 | switch (bnx2x_phy_selection(&bp->link_params)) { | ||
3123 | case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT: | ||
3124 | case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY: | ||
3125 | case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY: | ||
3126 | sel_phy_idx = EXT_PHY1; | ||
3127 | break; | ||
3128 | case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY: | ||
3129 | case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY: | ||
3130 | sel_phy_idx = EXT_PHY2; | ||
3131 | break; | ||
3132 | } | ||
3133 | } | ||
3134 | |||
3135 | return sel_phy_idx; | ||
3136 | |||
3137 | } | ||
3138 | int bnx2x_get_link_cfg_idx(struct bnx2x *bp) | ||
3139 | { | ||
3140 | u32 sel_phy_idx = bnx2x_get_cur_phy_idx(bp); | ||
3141 | /* | ||
3142 | * The selected actived PHY is always after swapping (in case PHY | ||
3143 | * swapping is enabled). So when swapping is enabled, we need to reverse | ||
3144 | * the configuration | ||
3145 | */ | ||
3146 | |||
3147 | if (bp->link_params.multi_phy_config & | ||
3148 | PORT_HW_CFG_PHY_SWAPPED_ENABLED) { | ||
3149 | if (sel_phy_idx == EXT_PHY1) | ||
3150 | sel_phy_idx = EXT_PHY2; | ||
3151 | else if (sel_phy_idx == EXT_PHY2) | ||
3152 | sel_phy_idx = EXT_PHY1; | ||
3153 | } | ||
3154 | return LINK_CONFIG_IDX(sel_phy_idx); | ||
3155 | } | ||
3156 | |||
2806 | /* called with rtnl_lock */ | 3157 | /* called with rtnl_lock */ |
2807 | int bnx2x_change_mtu(struct net_device *dev, int new_mtu) | 3158 | int bnx2x_change_mtu(struct net_device *dev, int new_mtu) |
2808 | { | 3159 | { |
@@ -2954,3 +3305,57 @@ int bnx2x_resume(struct pci_dev *pdev) | |||
2954 | 3305 | ||
2955 | return rc; | 3306 | return rc; |
2956 | } | 3307 | } |
3308 | |||
3309 | |||
3310 | void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt, | ||
3311 | u32 cid) | ||
3312 | { | ||
3313 | /* ustorm cxt validation */ | ||
3314 | cxt->ustorm_ag_context.cdu_usage = | ||
3315 | CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid), | ||
3316 | CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE); | ||
3317 | /* xcontext validation */ | ||
3318 | cxt->xstorm_ag_context.cdu_reserved = | ||
3319 | CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid), | ||
3320 | CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE); | ||
3321 | } | ||
3322 | |||
3323 | static inline void storm_memset_hc_timeout(struct bnx2x *bp, u8 port, | ||
3324 | u8 fw_sb_id, u8 sb_index, | ||
3325 | u8 ticks) | ||
3326 | { | ||
3327 | |||
3328 | u32 addr = BAR_CSTRORM_INTMEM + | ||
3329 | CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index); | ||
3330 | REG_WR8(bp, addr, ticks); | ||
3331 | DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d ticks %d\n", | ||
3332 | port, fw_sb_id, sb_index, ticks); | ||
3333 | } | ||
3334 | |||
3335 | static inline void storm_memset_hc_disable(struct bnx2x *bp, u8 port, | ||
3336 | u16 fw_sb_id, u8 sb_index, | ||
3337 | u8 disable) | ||
3338 | { | ||
3339 | u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT); | ||
3340 | u32 addr = BAR_CSTRORM_INTMEM + | ||
3341 | CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index); | ||
3342 | u16 flags = REG_RD16(bp, addr); | ||
3343 | /* clear and set */ | ||
3344 | flags &= ~HC_INDEX_DATA_HC_ENABLED; | ||
3345 | flags |= enable_flag; | ||
3346 | REG_WR16(bp, addr, flags); | ||
3347 | DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d disable %d\n", | ||
3348 | port, fw_sb_id, sb_index, disable); | ||
3349 | } | ||
3350 | |||
3351 | void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id, | ||
3352 | u8 sb_index, u8 disable, u16 usec) | ||
3353 | { | ||
3354 | int port = BP_PORT(bp); | ||
3355 | u8 ticks = usec / BNX2X_BTR; | ||
3356 | |||
3357 | storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks); | ||
3358 | |||
3359 | disable = disable ? 1 : (usec ? 0 : 1); | ||
3360 | storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable); | ||
3361 | } | ||
diff --git a/drivers/net/bnx2x/bnx2x_cmn.h b/drivers/net/bnx2x/bnx2x_cmn.h index 1a3545bd8a92..c016e20c5c2b 100644 --- a/drivers/net/bnx2x/bnx2x_cmn.h +++ b/drivers/net/bnx2x/bnx2x_cmn.h | |||
@@ -18,11 +18,15 @@ | |||
18 | #define BNX2X_CMN_H | 18 | #define BNX2X_CMN_H |
19 | 19 | ||
20 | #include <linux/types.h> | 20 | #include <linux/types.h> |
21 | #include <linux/pci.h> | ||
21 | #include <linux/netdevice.h> | 22 | #include <linux/netdevice.h> |
22 | 23 | ||
23 | 24 | ||
24 | #include "bnx2x.h" | 25 | #include "bnx2x.h" |
25 | 26 | ||
27 | /* This is used as a replacement for an MCP if it's not present */ | ||
28 | extern int load_count[2][3]; /* per-path: 0-common, 1-port0, 2-port1 */ | ||
29 | |||
26 | extern int num_queues; | 30 | extern int num_queues; |
27 | 31 | ||
28 | /************************ Macros ********************************/ | 32 | /************************ Macros ********************************/ |
@@ -61,6 +65,73 @@ extern int num_queues; | |||
61 | /*********************** Interfaces **************************** | 65 | /*********************** Interfaces **************************** |
62 | * Functions that need to be implemented by each driver version | 66 | * Functions that need to be implemented by each driver version |
63 | */ | 67 | */ |
68 | /* Init */ | ||
69 | |||
70 | /** | ||
71 | * bnx2x_send_unload_req - request unload mode from the MCP. | ||
72 | * | ||
73 | * @bp: driver handle | ||
74 | * @unload_mode: requested function's unload mode | ||
75 | * | ||
76 | * Return unload mode returned by the MCP: COMMON, PORT or FUNC. | ||
77 | */ | ||
78 | u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode); | ||
79 | |||
80 | /** | ||
81 | * bnx2x_send_unload_done - send UNLOAD_DONE command to the MCP. | ||
82 | * | ||
83 | * @bp: driver handle | ||
84 | */ | ||
85 | void bnx2x_send_unload_done(struct bnx2x *bp); | ||
86 | |||
87 | /** | ||
88 | * bnx2x_config_rss_pf - configure RSS parameters. | ||
89 | * | ||
90 | * @bp: driver handle | ||
91 | * @ind_table: indirection table to configure | ||
92 | * @config_hash: re-configure RSS hash keys configuration | ||
93 | */ | ||
94 | int bnx2x_config_rss_pf(struct bnx2x *bp, u8 *ind_table, bool config_hash); | ||
95 | |||
96 | /** | ||
97 | * bnx2x__init_func_obj - init function object | ||
98 | * | ||
99 | * @bp: driver handle | ||
100 | * | ||
101 | * Initializes the Function Object with the appropriate | ||
102 | * parameters which include a function slow path driver | ||
103 | * interface. | ||
104 | */ | ||
105 | void bnx2x__init_func_obj(struct bnx2x *bp); | ||
106 | |||
107 | /** | ||
108 | * bnx2x_setup_queue - setup eth queue. | ||
109 | * | ||
110 | * @bp: driver handle | ||
111 | * @fp: pointer to the fastpath structure | ||
112 | * @leading: boolean | ||
113 | * | ||
114 | */ | ||
115 | int bnx2x_setup_queue(struct bnx2x *bp, struct bnx2x_fastpath *fp, | ||
116 | bool leading); | ||
117 | |||
118 | /** | ||
119 | * bnx2x_setup_leading - bring up a leading eth queue. | ||
120 | * | ||
121 | * @bp: driver handle | ||
122 | */ | ||
123 | int bnx2x_setup_leading(struct bnx2x *bp); | ||
124 | |||
125 | /** | ||
126 | * bnx2x_fw_command - send the MCP a request | ||
127 | * | ||
128 | * @bp: driver handle | ||
129 | * @command: request | ||
130 | * @param: request's parameter | ||
131 | * | ||
132 | * block until there is a reply | ||
133 | */ | ||
134 | u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param); | ||
64 | 135 | ||
65 | /** | 136 | /** |
66 | * bnx2x_initial_phy_init - initialize link parameters structure variables. | 137 | * bnx2x_initial_phy_init - initialize link parameters structure variables. |
@@ -88,6 +159,32 @@ void bnx2x_link_set(struct bnx2x *bp); | |||
88 | u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes); | 159 | u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes); |
89 | 160 | ||
90 | /** | 161 | /** |
162 | * bnx2x_drv_pulse - write driver pulse to shmem | ||
163 | * | ||
164 | * @bp: driver handle | ||
165 | * | ||
166 | * writes the value in bp->fw_drv_pulse_wr_seq to drv_pulse mbox | ||
167 | * in the shmem. | ||
168 | */ | ||
169 | void bnx2x_drv_pulse(struct bnx2x *bp); | ||
170 | |||
171 | /** | ||
172 | * bnx2x_igu_ack_sb - update IGU with current SB value | ||
173 | * | ||
174 | * @bp: driver handle | ||
175 | * @igu_sb_id: SB id | ||
176 | * @segment: SB segment | ||
177 | * @index: SB index | ||
178 | * @op: SB operation | ||
179 | * @update: is HW update required | ||
180 | */ | ||
181 | void bnx2x_igu_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 segment, | ||
182 | u16 index, u8 op, u8 update); | ||
183 | |||
184 | /* Disable transactions from chip to host */ | ||
185 | void bnx2x_pf_disable(struct bnx2x *bp); | ||
186 | |||
187 | /** | ||
91 | * bnx2x__link_status_update - handles link status change. | 188 | * bnx2x__link_status_update - handles link status change. |
92 | * | 189 | * |
93 | * @bp: driver handle | 190 | * @bp: driver handle |
@@ -165,21 +262,6 @@ void bnx2x_int_enable(struct bnx2x *bp); | |||
165 | void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw); | 262 | void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw); |
166 | 263 | ||
167 | /** | 264 | /** |
168 | * bnx2x_init_firmware - loads device firmware | ||
169 | * | ||
170 | * @bp: driver handle | ||
171 | */ | ||
172 | int bnx2x_init_firmware(struct bnx2x *bp); | ||
173 | |||
174 | /** | ||
175 | * bnx2x_init_hw - init HW blocks according to current initialization stage. | ||
176 | * | ||
177 | * @bp: driver handle | ||
178 | * @load_code: COMMON, PORT or FUNCTION | ||
179 | */ | ||
180 | int bnx2x_init_hw(struct bnx2x *bp, u32 load_code); | ||
181 | |||
182 | /** | ||
183 | * bnx2x_nic_init - init driver internals. | 265 | * bnx2x_nic_init - init driver internals. |
184 | * | 266 | * |
185 | * @bp: driver handle | 267 | * @bp: driver handle |
@@ -207,16 +289,6 @@ int bnx2x_alloc_mem(struct bnx2x *bp); | |||
207 | void bnx2x_free_mem(struct bnx2x *bp); | 289 | void bnx2x_free_mem(struct bnx2x *bp); |
208 | 290 | ||
209 | /** | 291 | /** |
210 | * bnx2x_setup_client - setup eth client. | ||
211 | * | ||
212 | * @bp: driver handle | ||
213 | * @fp: pointer to fastpath structure | ||
214 | * @is_leading: boolean | ||
215 | */ | ||
216 | int bnx2x_setup_client(struct bnx2x *bp, struct bnx2x_fastpath *fp, | ||
217 | int is_leading); | ||
218 | |||
219 | /** | ||
220 | * bnx2x_set_num_queues - set number of queues according to mode. | 292 | * bnx2x_set_num_queues - set number of queues according to mode. |
221 | * | 293 | * |
222 | * @bp: driver handle | 294 | * @bp: driver handle |
@@ -252,36 +324,21 @@ int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource); | |||
252 | int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource); | 324 | int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource); |
253 | 325 | ||
254 | /** | 326 | /** |
255 | * bnx2x_set_eth_mac - configure eth MAC address in the HW | 327 | * bnx2x_release_leader_lock - release recovery leader lock |
256 | * | 328 | * |
257 | * @bp: driver handle | 329 | * @bp: driver handle |
258 | * @set: set or clear | ||
259 | * | ||
260 | * Configures according to the value in netdev->dev_addr. | ||
261 | */ | 330 | */ |
262 | void bnx2x_set_eth_mac(struct bnx2x *bp, int set); | 331 | int bnx2x_release_leader_lock(struct bnx2x *bp); |
263 | 332 | ||
264 | #ifdef BCM_CNIC | ||
265 | /** | 333 | /** |
266 | * bnx2x_set_fip_eth_mac_addr - Set/Clear FIP MAC(s) | 334 | * bnx2x_set_eth_mac - configure eth MAC address in the HW |
267 | * | ||
268 | * @bp: driver handle | ||
269 | * @set: set or clear the CAM entry | ||
270 | * | ||
271 | * Used next enties in the CAM after the ETH MAC(s). | ||
272 | * This function will wait until the ramdord completion returns. | ||
273 | * Return 0 if cussess, -ENODEV if ramrod doesn't return. | ||
274 | */ | ||
275 | int bnx2x_set_fip_eth_mac_addr(struct bnx2x *bp, int set); | ||
276 | |||
277 | /** | ||
278 | * bnx2x_set_all_enode_macs - Set/Clear ALL_ENODE mcast MAC. | ||
279 | * | 335 | * |
280 | * @bp: driver handle | 336 | * @bp: driver handle |
281 | * @set: set or clear | 337 | * @set: set or clear |
338 | * | ||
339 | * Configures according to the value in netdev->dev_addr. | ||
282 | */ | 340 | */ |
283 | int bnx2x_set_all_enode_macs(struct bnx2x *bp, int set); | 341 | int bnx2x_set_eth_mac(struct bnx2x *bp, bool set); |
284 | #endif | ||
285 | 342 | ||
286 | /** | 343 | /** |
287 | * bnx2x_set_rx_mode - set MAC filtering configurations. | 344 | * bnx2x_set_rx_mode - set MAC filtering configurations. |
@@ -289,6 +346,8 @@ int bnx2x_set_all_enode_macs(struct bnx2x *bp, int set); | |||
289 | * @dev: netdevice | 346 | * @dev: netdevice |
290 | * | 347 | * |
291 | * called with netif_tx_lock from dev_mcast.c | 348 | * called with netif_tx_lock from dev_mcast.c |
349 | * If bp->state is OPEN, should be called with | ||
350 | * netif_addr_lock_bh() | ||
292 | */ | 351 | */ |
293 | void bnx2x_set_rx_mode(struct net_device *dev); | 352 | void bnx2x_set_rx_mode(struct net_device *dev); |
294 | 353 | ||
@@ -296,25 +355,38 @@ void bnx2x_set_rx_mode(struct net_device *dev); | |||
296 | * bnx2x_set_storm_rx_mode - configure MAC filtering rules in a FW. | 355 | * bnx2x_set_storm_rx_mode - configure MAC filtering rules in a FW. |
297 | * | 356 | * |
298 | * @bp: driver handle | 357 | * @bp: driver handle |
358 | * | ||
359 | * If bp->state is OPEN, should be called with | ||
360 | * netif_addr_lock_bh(). | ||
299 | */ | 361 | */ |
300 | void bnx2x_set_storm_rx_mode(struct bnx2x *bp); | 362 | void bnx2x_set_storm_rx_mode(struct bnx2x *bp); |
301 | 363 | ||
364 | /** | ||
365 | * bnx2x_set_q_rx_mode - configures rx_mode for a single queue. | ||
366 | * | ||
367 | * @bp: driver handle | ||
368 | * @cl_id: client id | ||
369 | * @rx_mode_flags: rx mode configuration | ||
370 | * @rx_accept_flags: rx accept configuration | ||
371 | * @tx_accept_flags: tx accept configuration (tx switch) | ||
372 | * @ramrod_flags: ramrod configuration | ||
373 | */ | ||
374 | void bnx2x_set_q_rx_mode(struct bnx2x *bp, u8 cl_id, | ||
375 | unsigned long rx_mode_flags, | ||
376 | unsigned long rx_accept_flags, | ||
377 | unsigned long tx_accept_flags, | ||
378 | unsigned long ramrod_flags); | ||
379 | |||
302 | /* Parity errors related */ | 380 | /* Parity errors related */ |
303 | void bnx2x_inc_load_cnt(struct bnx2x *bp); | 381 | void bnx2x_inc_load_cnt(struct bnx2x *bp); |
304 | u32 bnx2x_dec_load_cnt(struct bnx2x *bp); | 382 | u32 bnx2x_dec_load_cnt(struct bnx2x *bp); |
305 | bool bnx2x_chk_parity_attn(struct bnx2x *bp); | 383 | bool bnx2x_chk_parity_attn(struct bnx2x *bp, bool *global, bool print); |
306 | bool bnx2x_reset_is_done(struct bnx2x *bp); | 384 | bool bnx2x_reset_is_done(struct bnx2x *bp, int engine); |
385 | void bnx2x_set_reset_in_progress(struct bnx2x *bp); | ||
386 | void bnx2x_set_reset_global(struct bnx2x *bp); | ||
307 | void bnx2x_disable_close_the_gate(struct bnx2x *bp); | 387 | void bnx2x_disable_close_the_gate(struct bnx2x *bp); |
308 | 388 | ||
309 | /** | 389 | /** |
310 | * bnx2x_stats_handle - perform statistics handling according to event. | ||
311 | * | ||
312 | * @bp: driver handle | ||
313 | * @event: bnx2x_stats_event | ||
314 | */ | ||
315 | void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event); | ||
316 | |||
317 | /** | ||
318 | * bnx2x_sp_event - handle ramrods completion. | 390 | * bnx2x_sp_event - handle ramrods completion. |
319 | * | 391 | * |
320 | * @fp: fastpath handle for the event | 392 | * @fp: fastpath handle for the event |
@@ -323,15 +395,6 @@ void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event); | |||
323 | void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe); | 395 | void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe); |
324 | 396 | ||
325 | /** | 397 | /** |
326 | * bnx2x_func_start - init function | ||
327 | * | ||
328 | * @bp: driver handle | ||
329 | * | ||
330 | * Must be called before sending CLIENT_SETUP for the first client. | ||
331 | */ | ||
332 | int bnx2x_func_start(struct bnx2x *bp); | ||
333 | |||
334 | /** | ||
335 | * bnx2x_ilt_set_info - prepare ILT configurations. | 398 | * bnx2x_ilt_set_info - prepare ILT configurations. |
336 | * | 399 | * |
337 | * @bp: driver handle | 400 | * @bp: driver handle |
@@ -362,6 +425,10 @@ int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state); | |||
362 | * @value: new value | 425 | * @value: new value |
363 | */ | 426 | */ |
364 | void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value); | 427 | void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value); |
428 | /* Error handling */ | ||
429 | void bnx2x_panic_dump(struct bnx2x *bp); | ||
430 | |||
431 | void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl); | ||
365 | 432 | ||
366 | /* dev_close main block */ | 433 | /* dev_close main block */ |
367 | int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode); | 434 | int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode); |
@@ -375,11 +442,17 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev); | |||
375 | /* select_queue callback */ | 442 | /* select_queue callback */ |
376 | u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb); | 443 | u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb); |
377 | 444 | ||
445 | /* reload helper */ | ||
446 | int bnx2x_reload_if_running(struct net_device *dev); | ||
447 | |||
378 | int bnx2x_change_mac_addr(struct net_device *dev, void *p); | 448 | int bnx2x_change_mac_addr(struct net_device *dev, void *p); |
379 | 449 | ||
380 | /* NAPI poll Rx part */ | 450 | /* NAPI poll Rx part */ |
381 | int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget); | 451 | int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget); |
382 | 452 | ||
453 | void bnx2x_update_rx_prod(struct bnx2x *bp, struct bnx2x_fastpath *fp, | ||
454 | u16 bd_prod, u16 rx_comp_prod, u16 rx_sge_prod); | ||
455 | |||
383 | /* NAPI poll Tx part */ | 456 | /* NAPI poll Tx part */ |
384 | int bnx2x_tx_int(struct bnx2x_fastpath *fp); | 457 | int bnx2x_tx_int(struct bnx2x_fastpath *fp); |
385 | 458 | ||
@@ -392,7 +465,6 @@ void bnx2x_free_irq(struct bnx2x *bp); | |||
392 | 465 | ||
393 | void bnx2x_free_fp_mem(struct bnx2x *bp); | 466 | void bnx2x_free_fp_mem(struct bnx2x *bp); |
394 | int bnx2x_alloc_fp_mem(struct bnx2x *bp); | 467 | int bnx2x_alloc_fp_mem(struct bnx2x *bp); |
395 | |||
396 | void bnx2x_init_rx_rings(struct bnx2x *bp); | 468 | void bnx2x_init_rx_rings(struct bnx2x *bp); |
397 | void bnx2x_free_skbs(struct bnx2x *bp); | 469 | void bnx2x_free_skbs(struct bnx2x *bp); |
398 | void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw); | 470 | void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw); |
@@ -457,19 +529,20 @@ int bnx2x_set_features(struct net_device *dev, u32 features); | |||
457 | */ | 529 | */ |
458 | void bnx2x_tx_timeout(struct net_device *dev); | 530 | void bnx2x_tx_timeout(struct net_device *dev); |
459 | 531 | ||
532 | /*********************** Inlines **********************************/ | ||
533 | /*********************** Fast path ********************************/ | ||
460 | static inline void bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp) | 534 | static inline void bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp) |
461 | { | 535 | { |
462 | barrier(); /* status block is written to by the chip */ | 536 | barrier(); /* status block is written to by the chip */ |
463 | fp->fp_hc_idx = fp->sb_running_index[SM_RX_ID]; | 537 | fp->fp_hc_idx = fp->sb_running_index[SM_RX_ID]; |
464 | } | 538 | } |
465 | 539 | ||
466 | static inline void bnx2x_update_rx_prod(struct bnx2x *bp, | 540 | static inline void bnx2x_update_rx_prod_gen(struct bnx2x *bp, |
467 | struct bnx2x_fastpath *fp, | 541 | struct bnx2x_fastpath *fp, u16 bd_prod, |
468 | u16 bd_prod, u16 rx_comp_prod, | 542 | u16 rx_comp_prod, u16 rx_sge_prod, u32 start) |
469 | u16 rx_sge_prod) | ||
470 | { | 543 | { |
471 | struct ustorm_eth_rx_producers rx_prods = {0}; | 544 | struct ustorm_eth_rx_producers rx_prods = {0}; |
472 | int i; | 545 | u32 i; |
473 | 546 | ||
474 | /* Update producers */ | 547 | /* Update producers */ |
475 | rx_prods.bd_prod = bd_prod; | 548 | rx_prods.bd_prod = bd_prod; |
@@ -486,10 +559,8 @@ static inline void bnx2x_update_rx_prod(struct bnx2x *bp, | |||
486 | */ | 559 | */ |
487 | wmb(); | 560 | wmb(); |
488 | 561 | ||
489 | for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++) | 562 | for (i = 0; i < sizeof(rx_prods)/4; i++) |
490 | REG_WR(bp, | 563 | REG_WR(bp, start + i*4, ((u32 *)&rx_prods)[i]); |
491 | BAR_USTRORM_INTMEM + fp->ustorm_rx_prods_offset + i*4, | ||
492 | ((u32 *)&rx_prods)[i]); | ||
493 | 564 | ||
494 | mmiowb(); /* keep prod updates ordered */ | 565 | mmiowb(); /* keep prod updates ordered */ |
495 | 566 | ||
@@ -519,7 +590,7 @@ static inline void bnx2x_igu_ack_sb_gen(struct bnx2x *bp, u8 igu_sb_id, | |||
519 | barrier(); | 590 | barrier(); |
520 | } | 591 | } |
521 | 592 | ||
522 | static inline void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, | 593 | static inline void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, |
523 | u8 idu_sb_id, bool is_Pf) | 594 | u8 idu_sb_id, bool is_Pf) |
524 | { | 595 | { |
525 | u32 data, ctl, cnt = 100; | 596 | u32 data, ctl, cnt = 100; |
@@ -527,7 +598,7 @@ static inline void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, | |||
527 | u32 igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL; | 598 | u32 igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL; |
528 | u32 igu_addr_ack = IGU_REG_CSTORM_TYPE_0_SB_CLEANUP + (idu_sb_id/32)*4; | 599 | u32 igu_addr_ack = IGU_REG_CSTORM_TYPE_0_SB_CLEANUP + (idu_sb_id/32)*4; |
529 | u32 sb_bit = 1 << (idu_sb_id%32); | 600 | u32 sb_bit = 1 << (idu_sb_id%32); |
530 | u32 func_encode = BP_FUNC(bp) | | 601 | u32 func_encode = func | |
531 | ((is_Pf == true ? 1 : 0) << IGU_FID_ENCODE_IS_PF_SHIFT); | 602 | ((is_Pf == true ? 1 : 0) << IGU_FID_ENCODE_IS_PF_SHIFT); |
532 | u32 addr_encode = IGU_CMD_E2_PROD_UPD_BASE + idu_sb_id; | 603 | u32 addr_encode = IGU_CMD_E2_PROD_UPD_BASE + idu_sb_id; |
533 | 604 | ||
@@ -590,15 +661,6 @@ static inline void bnx2x_hc_ack_sb(struct bnx2x *bp, u8 sb_id, | |||
590 | barrier(); | 661 | barrier(); |
591 | } | 662 | } |
592 | 663 | ||
593 | static inline void bnx2x_igu_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 segment, | ||
594 | u16 index, u8 op, u8 update) | ||
595 | { | ||
596 | u32 igu_addr = BAR_IGU_INTMEM + (IGU_CMD_INT_ACK_BASE + igu_sb_id)*8; | ||
597 | |||
598 | bnx2x_igu_ack_sb_gen(bp, igu_sb_id, segment, index, op, update, | ||
599 | igu_addr); | ||
600 | } | ||
601 | |||
602 | static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 storm, | 664 | static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 storm, |
603 | u16 index, u8 op, u8 update) | 665 | u16 index, u8 op, u8 update) |
604 | { | 666 | { |
@@ -705,7 +767,7 @@ static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp) | |||
705 | } | 767 | } |
706 | 768 | ||
707 | /** | 769 | /** |
708 | * disables tx from stack point of view | 770 | * bnx2x_tx_disable - disables tx from stack point of view |
709 | * | 771 | * |
710 | * @bp: driver handle | 772 | * @bp: driver handle |
711 | */ | 773 | */ |
@@ -740,7 +802,7 @@ static inline void bnx2x_add_all_napi(struct bnx2x *bp) | |||
740 | int i; | 802 | int i; |
741 | 803 | ||
742 | /* Add NAPI objects */ | 804 | /* Add NAPI objects */ |
743 | for_each_napi_queue(bp, i) | 805 | for_each_rx_queue(bp, i) |
744 | netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi), | 806 | netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi), |
745 | bnx2x_poll, BNX2X_NAPI_WEIGHT); | 807 | bnx2x_poll, BNX2X_NAPI_WEIGHT); |
746 | } | 808 | } |
@@ -749,7 +811,7 @@ static inline void bnx2x_del_all_napi(struct bnx2x *bp) | |||
749 | { | 811 | { |
750 | int i; | 812 | int i; |
751 | 813 | ||
752 | for_each_napi_queue(bp, i) | 814 | for_each_rx_queue(bp, i) |
753 | netif_napi_del(&bnx2x_fp(bp, i, napi)); | 815 | netif_napi_del(&bnx2x_fp(bp, i, napi)); |
754 | } | 816 | } |
755 | 817 | ||
@@ -779,7 +841,7 @@ static inline void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp) | |||
779 | int idx = RX_SGE_CNT * i - 1; | 841 | int idx = RX_SGE_CNT * i - 1; |
780 | 842 | ||
781 | for (j = 0; j < 2; j++) { | 843 | for (j = 0; j < 2; j++) { |
782 | SGE_MASK_CLEAR_BIT(fp, idx); | 844 | BIT_VEC64_CLEAR_BIT(fp->sge_mask, idx); |
783 | idx--; | 845 | idx--; |
784 | } | 846 | } |
785 | } | 847 | } |
@@ -789,7 +851,7 @@ static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp) | |||
789 | { | 851 | { |
790 | /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */ | 852 | /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */ |
791 | memset(fp->sge_mask, 0xff, | 853 | memset(fp->sge_mask, 0xff, |
792 | (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64)); | 854 | (NUM_RX_SGE >> BIT_VEC64_ELEM_SHIFT)*sizeof(u64)); |
793 | 855 | ||
794 | /* Clear the two last indices in the page to 1: | 856 | /* Clear the two last indices in the page to 1: |
795 | these are the indices that correspond to the "next" element, | 857 | these are the indices that correspond to the "next" element, |
@@ -871,12 +933,61 @@ static inline void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp, | |||
871 | dma_unmap_addr(cons_rx_buf, mapping), | 933 | dma_unmap_addr(cons_rx_buf, mapping), |
872 | RX_COPY_THRESH, DMA_FROM_DEVICE); | 934 | RX_COPY_THRESH, DMA_FROM_DEVICE); |
873 | 935 | ||
874 | prod_rx_buf->skb = cons_rx_buf->skb; | ||
875 | dma_unmap_addr_set(prod_rx_buf, mapping, | 936 | dma_unmap_addr_set(prod_rx_buf, mapping, |
876 | dma_unmap_addr(cons_rx_buf, mapping)); | 937 | dma_unmap_addr(cons_rx_buf, mapping)); |
938 | prod_rx_buf->skb = cons_rx_buf->skb; | ||
877 | *prod_bd = *cons_bd; | 939 | *prod_bd = *cons_bd; |
878 | } | 940 | } |
879 | 941 | ||
942 | /************************* Init ******************************************/ | ||
943 | |||
944 | /** | ||
945 | * bnx2x_func_start - init function | ||
946 | * | ||
947 | * @bp: driver handle | ||
948 | * | ||
949 | * Must be called before sending CLIENT_SETUP for the first client. | ||
950 | */ | ||
951 | static inline int bnx2x_func_start(struct bnx2x *bp) | ||
952 | { | ||
953 | struct bnx2x_func_state_params func_params = {0}; | ||
954 | struct bnx2x_func_start_params *start_params = | ||
955 | &func_params.params.start; | ||
956 | |||
957 | /* Prepare parameters for function state transitions */ | ||
958 | __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags); | ||
959 | |||
960 | func_params.f_obj = &bp->func_obj; | ||
961 | func_params.cmd = BNX2X_F_CMD_START; | ||
962 | |||
963 | /* Function parameters */ | ||
964 | start_params->mf_mode = bp->mf_mode; | ||
965 | start_params->sd_vlan_tag = bp->mf_ov; | ||
966 | start_params->network_cos_mode = OVERRIDE_COS; | ||
967 | |||
968 | return bnx2x_func_state_change(bp, &func_params); | ||
969 | } | ||
970 | |||
971 | |||
972 | /** | ||
973 | * bnx2x_set_fw_mac_addr - fill in a MAC address in FW format | ||
974 | * | ||
975 | * @fw_hi: pointer to upper part | ||
976 | * @fw_mid: pointer to middle part | ||
977 | * @fw_lo: pointer to lower part | ||
978 | * @mac: pointer to MAC address | ||
979 | */ | ||
980 | static inline void bnx2x_set_fw_mac_addr(u16 *fw_hi, u16 *fw_mid, u16 *fw_lo, | ||
981 | u8 *mac) | ||
982 | { | ||
983 | ((u8 *)fw_hi)[0] = mac[1]; | ||
984 | ((u8 *)fw_hi)[1] = mac[0]; | ||
985 | ((u8 *)fw_mid)[0] = mac[3]; | ||
986 | ((u8 *)fw_mid)[1] = mac[2]; | ||
987 | ((u8 *)fw_lo)[0] = mac[5]; | ||
988 | ((u8 *)fw_lo)[1] = mac[4]; | ||
989 | } | ||
990 | |||
880 | static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp, | 991 | static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp, |
881 | struct bnx2x_fastpath *fp, int last) | 992 | struct bnx2x_fastpath *fp, int last) |
882 | { | 993 | { |
@@ -895,21 +1006,20 @@ static inline void bnx2x_free_tpa_pool(struct bnx2x *bp, | |||
895 | int i; | 1006 | int i; |
896 | 1007 | ||
897 | for (i = 0; i < last; i++) { | 1008 | for (i = 0; i < last; i++) { |
898 | struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]); | 1009 | struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i]; |
899 | struct sk_buff *skb = rx_buf->skb; | 1010 | struct sw_rx_bd *first_buf = &tpa_info->first_buf; |
1011 | struct sk_buff *skb = first_buf->skb; | ||
900 | 1012 | ||
901 | if (skb == NULL) { | 1013 | if (skb == NULL) { |
902 | DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i); | 1014 | DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i); |
903 | continue; | 1015 | continue; |
904 | } | 1016 | } |
905 | 1017 | if (tpa_info->tpa_state == BNX2X_TPA_START) | |
906 | if (fp->tpa_state[i] == BNX2X_TPA_START) | ||
907 | dma_unmap_single(&bp->pdev->dev, | 1018 | dma_unmap_single(&bp->pdev->dev, |
908 | dma_unmap_addr(rx_buf, mapping), | 1019 | dma_unmap_addr(first_buf, mapping), |
909 | fp->rx_buf_size, DMA_FROM_DEVICE); | 1020 | fp->rx_buf_size, DMA_FROM_DEVICE); |
910 | |||
911 | dev_kfree_skb(skb); | 1021 | dev_kfree_skb(skb); |
912 | rx_buf->skb = NULL; | 1022 | first_buf->skb = NULL; |
913 | } | 1023 | } |
914 | } | 1024 | } |
915 | 1025 | ||
@@ -1038,31 +1148,201 @@ static inline int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp, | |||
1038 | return i - fp->eth_q_stats.rx_skb_alloc_failed; | 1148 | return i - fp->eth_q_stats.rx_skb_alloc_failed; |
1039 | } | 1149 | } |
1040 | 1150 | ||
1151 | /* Statistics ID are global per chip/path, while Client IDs for E1x are per | ||
1152 | * port. | ||
1153 | */ | ||
1154 | static inline u8 bnx2x_stats_id(struct bnx2x_fastpath *fp) | ||
1155 | { | ||
1156 | if (!CHIP_IS_E1x(fp->bp)) | ||
1157 | return fp->cl_id; | ||
1158 | else | ||
1159 | return fp->cl_id + BP_PORT(fp->bp) * FP_SB_MAX_E1x; | ||
1160 | } | ||
1161 | |||
1162 | static inline void bnx2x_init_vlan_mac_fp_objs(struct bnx2x_fastpath *fp, | ||
1163 | bnx2x_obj_type obj_type) | ||
1164 | { | ||
1165 | struct bnx2x *bp = fp->bp; | ||
1166 | |||
1167 | /* Configure classification DBs */ | ||
1168 | bnx2x_init_mac_obj(bp, &fp->mac_obj, fp->cl_id, fp->cid, | ||
1169 | BP_FUNC(bp), bnx2x_sp(bp, mac_rdata), | ||
1170 | bnx2x_sp_mapping(bp, mac_rdata), | ||
1171 | BNX2X_FILTER_MAC_PENDING, | ||
1172 | &bp->sp_state, obj_type, | ||
1173 | &bp->macs_pool); | ||
1174 | } | ||
1175 | |||
1176 | /** | ||
1177 | * bnx2x_get_path_func_num - get number of active functions | ||
1178 | * | ||
1179 | * @bp: driver handle | ||
1180 | * | ||
1181 | * Calculates the number of active (not hidden) functions on the | ||
1182 | * current path. | ||
1183 | */ | ||
1184 | static inline u8 bnx2x_get_path_func_num(struct bnx2x *bp) | ||
1185 | { | ||
1186 | u8 func_num = 0, i; | ||
1187 | |||
1188 | /* 57710 has only one function per-port */ | ||
1189 | if (CHIP_IS_E1(bp)) | ||
1190 | return 1; | ||
1191 | |||
1192 | /* Calculate a number of functions enabled on the current | ||
1193 | * PATH/PORT. | ||
1194 | */ | ||
1195 | if (CHIP_REV_IS_SLOW(bp)) { | ||
1196 | if (IS_MF(bp)) | ||
1197 | func_num = 4; | ||
1198 | else | ||
1199 | func_num = 2; | ||
1200 | } else { | ||
1201 | for (i = 0; i < E1H_FUNC_MAX / 2; i++) { | ||
1202 | u32 func_config = | ||
1203 | MF_CFG_RD(bp, | ||
1204 | func_mf_config[BP_PORT(bp) + 2 * i]. | ||
1205 | config); | ||
1206 | func_num += | ||
1207 | ((func_config & FUNC_MF_CFG_FUNC_HIDE) ? 0 : 1); | ||
1208 | } | ||
1209 | } | ||
1210 | |||
1211 | WARN_ON(!func_num); | ||
1212 | |||
1213 | return func_num; | ||
1214 | } | ||
1215 | |||
1216 | static inline void bnx2x_init_bp_objs(struct bnx2x *bp) | ||
1217 | { | ||
1218 | /* RX_MODE controlling object */ | ||
1219 | bnx2x_init_rx_mode_obj(bp, &bp->rx_mode_obj); | ||
1220 | |||
1221 | /* multicast configuration controlling object */ | ||
1222 | bnx2x_init_mcast_obj(bp, &bp->mcast_obj, bp->fp->cl_id, bp->fp->cid, | ||
1223 | BP_FUNC(bp), BP_FUNC(bp), | ||
1224 | bnx2x_sp(bp, mcast_rdata), | ||
1225 | bnx2x_sp_mapping(bp, mcast_rdata), | ||
1226 | BNX2X_FILTER_MCAST_PENDING, &bp->sp_state, | ||
1227 | BNX2X_OBJ_TYPE_RX); | ||
1228 | |||
1229 | /* Setup CAM credit pools */ | ||
1230 | bnx2x_init_mac_credit_pool(bp, &bp->macs_pool, BP_FUNC(bp), | ||
1231 | bnx2x_get_path_func_num(bp)); | ||
1232 | |||
1233 | /* RSS configuration object */ | ||
1234 | bnx2x_init_rss_config_obj(bp, &bp->rss_conf_obj, bp->fp->cl_id, | ||
1235 | bp->fp->cid, BP_FUNC(bp), BP_FUNC(bp), | ||
1236 | bnx2x_sp(bp, rss_rdata), | ||
1237 | bnx2x_sp_mapping(bp, rss_rdata), | ||
1238 | BNX2X_FILTER_RSS_CONF_PENDING, &bp->sp_state, | ||
1239 | BNX2X_OBJ_TYPE_RX); | ||
1240 | } | ||
1241 | |||
1242 | static inline u8 bnx2x_fp_qzone_id(struct bnx2x_fastpath *fp) | ||
1243 | { | ||
1244 | if (CHIP_IS_E1x(fp->bp)) | ||
1245 | return fp->cl_id + BP_PORT(fp->bp) * ETH_MAX_RX_CLIENTS_E1H; | ||
1246 | else | ||
1247 | return fp->cl_id; | ||
1248 | } | ||
1249 | |||
1250 | static inline u32 bnx2x_rx_ustorm_prods_offset(struct bnx2x_fastpath *fp) | ||
1251 | { | ||
1252 | struct bnx2x *bp = fp->bp; | ||
1253 | |||
1254 | if (!CHIP_IS_E1x(bp)) | ||
1255 | return USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id); | ||
1256 | else | ||
1257 | return USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), fp->cl_id); | ||
1258 | } | ||
1259 | |||
1260 | |||
1041 | #ifdef BCM_CNIC | 1261 | #ifdef BCM_CNIC |
1262 | static inline u8 bnx2x_cnic_eth_cl_id(struct bnx2x *bp, u8 cl_idx) | ||
1263 | { | ||
1264 | return bp->cnic_base_cl_id + cl_idx + | ||
1265 | (bp->pf_num >> 1) * NONE_ETH_CONTEXT_USE; | ||
1266 | } | ||
1267 | |||
1268 | static inline u8 bnx2x_cnic_fw_sb_id(struct bnx2x *bp) | ||
1269 | { | ||
1270 | |||
1271 | /* the 'first' id is allocated for the cnic */ | ||
1272 | return bp->base_fw_ndsb; | ||
1273 | } | ||
1274 | |||
1275 | static inline u8 bnx2x_cnic_igu_sb_id(struct bnx2x *bp) | ||
1276 | { | ||
1277 | return bp->igu_base_sb; | ||
1278 | } | ||
1279 | |||
1280 | |||
1042 | static inline void bnx2x_init_fcoe_fp(struct bnx2x *bp) | 1281 | static inline void bnx2x_init_fcoe_fp(struct bnx2x *bp) |
1043 | { | 1282 | { |
1044 | bnx2x_fcoe(bp, cl_id) = BNX2X_FCOE_ETH_CL_ID + | 1283 | struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp); |
1045 | BP_E1HVN(bp) * NONE_ETH_CONTEXT_USE; | 1284 | unsigned long q_type = 0; |
1285 | |||
1286 | bnx2x_fcoe(bp, cl_id) = bnx2x_cnic_eth_cl_id(bp, | ||
1287 | BNX2X_FCOE_ETH_CL_ID_IDX); | ||
1288 | /** Current BNX2X_FCOE_ETH_CID deffinition implies not more than | ||
1289 | * 16 ETH clients per function when CNIC is enabled! | ||
1290 | * | ||
1291 | * Fix it ASAP!!! | ||
1292 | */ | ||
1046 | bnx2x_fcoe(bp, cid) = BNX2X_FCOE_ETH_CID; | 1293 | bnx2x_fcoe(bp, cid) = BNX2X_FCOE_ETH_CID; |
1047 | bnx2x_fcoe(bp, fw_sb_id) = DEF_SB_ID; | 1294 | bnx2x_fcoe(bp, fw_sb_id) = DEF_SB_ID; |
1048 | bnx2x_fcoe(bp, igu_sb_id) = bp->igu_dsb_id; | 1295 | bnx2x_fcoe(bp, igu_sb_id) = bp->igu_dsb_id; |
1049 | bnx2x_fcoe(bp, bp) = bp; | 1296 | bnx2x_fcoe(bp, bp) = bp; |
1050 | bnx2x_fcoe(bp, state) = BNX2X_FP_STATE_CLOSED; | ||
1051 | bnx2x_fcoe(bp, index) = FCOE_IDX; | 1297 | bnx2x_fcoe(bp, index) = FCOE_IDX; |
1052 | bnx2x_fcoe(bp, rx_cons_sb) = BNX2X_FCOE_L2_RX_INDEX; | 1298 | bnx2x_fcoe(bp, rx_cons_sb) = BNX2X_FCOE_L2_RX_INDEX; |
1053 | bnx2x_fcoe(bp, tx_cons_sb) = BNX2X_FCOE_L2_TX_INDEX; | 1299 | bnx2x_fcoe(bp, tx_cons_sb) = BNX2X_FCOE_L2_TX_INDEX; |
1054 | /* qZone id equals to FW (per path) client id */ | 1300 | /* qZone id equals to FW (per path) client id */ |
1055 | bnx2x_fcoe(bp, cl_qzone_id) = bnx2x_fcoe(bp, cl_id) + | 1301 | bnx2x_fcoe(bp, cl_qzone_id) = bnx2x_fp_qzone_id(fp); |
1056 | BP_PORT(bp)*(CHIP_IS_E2(bp) ? ETH_MAX_RX_CLIENTS_E2 : | ||
1057 | ETH_MAX_RX_CLIENTS_E1H); | ||
1058 | /* init shortcut */ | 1302 | /* init shortcut */ |
1059 | bnx2x_fcoe(bp, ustorm_rx_prods_offset) = CHIP_IS_E2(bp) ? | 1303 | bnx2x_fcoe(bp, ustorm_rx_prods_offset) = |
1060 | USTORM_RX_PRODS_E2_OFFSET(bnx2x_fcoe(bp, cl_qzone_id)) : | 1304 | bnx2x_rx_ustorm_prods_offset(fp); |
1061 | USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), bnx2x_fcoe_fp(bp)->cl_id); | 1305 | |
1062 | 1306 | /* Configure Queue State object */ | |
1307 | __set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type); | ||
1308 | __set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type); | ||
1309 | bnx2x_init_queue_obj(bp, &fp->q_obj, fp->cl_id, fp->cid, BP_FUNC(bp), | ||
1310 | bnx2x_sp(bp, q_rdata), bnx2x_sp_mapping(bp, q_rdata), | ||
1311 | q_type); | ||
1312 | |||
1313 | DP(NETIF_MSG_IFUP, "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d fw_sb %d " | ||
1314 | "igu_sb %d\n", | ||
1315 | fp->index, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id, | ||
1316 | fp->igu_sb_id); | ||
1063 | } | 1317 | } |
1064 | #endif | 1318 | #endif |
1065 | 1319 | ||
1320 | static inline int bnx2x_clean_tx_queue(struct bnx2x *bp, | ||
1321 | struct bnx2x_fastpath *fp) | ||
1322 | { | ||
1323 | int cnt = 1000; | ||
1324 | |||
1325 | while (bnx2x_has_tx_work_unload(fp)) { | ||
1326 | if (!cnt) { | ||
1327 | BNX2X_ERR("timeout waiting for queue[%d]: " | ||
1328 | "fp->tx_pkt_prod(%d) != fp->tx_pkt_cons(%d)\n", | ||
1329 | fp->index, fp->tx_pkt_prod, fp->tx_pkt_cons); | ||
1330 | #ifdef BNX2X_STOP_ON_ERROR | ||
1331 | bnx2x_panic(); | ||
1332 | return -EBUSY; | ||
1333 | #else | ||
1334 | break; | ||
1335 | #endif | ||
1336 | } | ||
1337 | cnt--; | ||
1338 | usleep_range(1000, 1000); | ||
1339 | } | ||
1340 | |||
1341 | return 0; | ||
1342 | } | ||
1343 | |||
1344 | int bnx2x_get_link_cfg_idx(struct bnx2x *bp); | ||
1345 | |||
1066 | static inline void __storm_memset_struct(struct bnx2x *bp, | 1346 | static inline void __storm_memset_struct(struct bnx2x *bp, |
1067 | u32 addr, size_t size, u32 *data) | 1347 | u32 addr, size_t size, u32 *data) |
1068 | { | 1348 | { |
@@ -1071,42 +1351,78 @@ static inline void __storm_memset_struct(struct bnx2x *bp, | |||
1071 | REG_WR(bp, addr + (i * 4), data[i]); | 1351 | REG_WR(bp, addr + (i * 4), data[i]); |
1072 | } | 1352 | } |
1073 | 1353 | ||
1074 | static inline void storm_memset_mac_filters(struct bnx2x *bp, | 1354 | static inline void storm_memset_func_cfg(struct bnx2x *bp, |
1075 | struct tstorm_eth_mac_filter_config *mac_filters, | 1355 | struct tstorm_eth_function_common_config *tcfg, |
1076 | u16 abs_fid) | 1356 | u16 abs_fid) |
1077 | { | 1357 | { |
1078 | size_t size = sizeof(struct tstorm_eth_mac_filter_config); | 1358 | size_t size = sizeof(struct tstorm_eth_function_common_config); |
1079 | 1359 | ||
1080 | u32 addr = BAR_TSTRORM_INTMEM + | 1360 | u32 addr = BAR_TSTRORM_INTMEM + |
1081 | TSTORM_MAC_FILTER_CONFIG_OFFSET(abs_fid); | 1361 | TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid); |
1082 | 1362 | ||
1083 | __storm_memset_struct(bp, addr, size, (u32 *)mac_filters); | 1363 | __storm_memset_struct(bp, addr, size, (u32 *)tcfg); |
1084 | } | 1364 | } |
1085 | 1365 | ||
1086 | static inline void storm_memset_cmng(struct bnx2x *bp, | 1366 | static inline void storm_memset_cmng(struct bnx2x *bp, |
1087 | struct cmng_struct_per_port *cmng, | 1367 | struct cmng_struct_per_port *cmng, |
1088 | u8 port) | 1368 | u8 port) |
1089 | { | 1369 | { |
1090 | size_t size = | 1370 | size_t size = sizeof(struct cmng_struct_per_port); |
1091 | sizeof(struct rate_shaping_vars_per_port) + | ||
1092 | sizeof(struct fairness_vars_per_port) + | ||
1093 | sizeof(struct safc_struct_per_port) + | ||
1094 | sizeof(struct pfc_struct_per_port); | ||
1095 | 1371 | ||
1096 | u32 addr = BAR_XSTRORM_INTMEM + | 1372 | u32 addr = BAR_XSTRORM_INTMEM + |
1097 | XSTORM_CMNG_PER_PORT_VARS_OFFSET(port); | 1373 | XSTORM_CMNG_PER_PORT_VARS_OFFSET(port); |
1098 | 1374 | ||
1099 | __storm_memset_struct(bp, addr, size, (u32 *)cmng); | 1375 | __storm_memset_struct(bp, addr, size, (u32 *)cmng); |
1376 | } | ||
1100 | 1377 | ||
1101 | addr += size + 4 /* SKIP DCB+LLFC */; | 1378 | /** |
1102 | size = sizeof(struct cmng_struct_per_port) - | 1379 | * bnx2x_wait_sp_comp - wait for the outstanding SP commands. |
1103 | size /* written */ - 4 /*skipped*/; | 1380 | * |
1381 | * @bp: driver handle | ||
1382 | * @mask: bits that need to be cleared | ||
1383 | */ | ||
1384 | static inline bool bnx2x_wait_sp_comp(struct bnx2x *bp, unsigned long mask) | ||
1385 | { | ||
1386 | int tout = 5000; /* Wait for 5 secs tops */ | ||
1387 | |||
1388 | while (tout--) { | ||
1389 | smp_mb(); | ||
1390 | netif_addr_lock_bh(bp->dev); | ||
1391 | if (!(bp->sp_state & mask)) { | ||
1392 | netif_addr_unlock_bh(bp->dev); | ||
1393 | return true; | ||
1394 | } | ||
1395 | netif_addr_unlock_bh(bp->dev); | ||
1104 | 1396 | ||
1105 | __storm_memset_struct(bp, addr, size, | 1397 | usleep_range(1000, 1000); |
1106 | (u32 *)(cmng->traffic_type_to_priority_cos)); | 1398 | } |
1399 | |||
1400 | smp_mb(); | ||
1401 | |||
1402 | netif_addr_lock_bh(bp->dev); | ||
1403 | if (bp->sp_state & mask) { | ||
1404 | BNX2X_ERR("Filtering completion timed out. sp_state 0x%lx, " | ||
1405 | "mask 0x%lx\n", bp->sp_state, mask); | ||
1406 | netif_addr_unlock_bh(bp->dev); | ||
1407 | return false; | ||
1408 | } | ||
1409 | netif_addr_unlock_bh(bp->dev); | ||
1410 | |||
1411 | return true; | ||
1107 | } | 1412 | } |
1108 | 1413 | ||
1109 | /* HW Lock for shared dual port PHYs */ | 1414 | /** |
1415 | * bnx2x_set_ctx_validation - set CDU context validation values | ||
1416 | * | ||
1417 | * @bp: driver handle | ||
1418 | * @cxt: context of the connection on the host memory | ||
1419 | * @cid: SW CID of the connection to be configured | ||
1420 | */ | ||
1421 | void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt, | ||
1422 | u32 cid); | ||
1423 | |||
1424 | void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id, | ||
1425 | u8 sb_index, u8 disable, u16 usec); | ||
1110 | void bnx2x_acquire_phy_lock(struct bnx2x *bp); | 1426 | void bnx2x_acquire_phy_lock(struct bnx2x *bp); |
1111 | void bnx2x_release_phy_lock(struct bnx2x *bp); | 1427 | void bnx2x_release_phy_lock(struct bnx2x *bp); |
1112 | 1428 | ||
diff --git a/drivers/net/bnx2x/bnx2x_dcb.c b/drivers/net/bnx2x/bnx2x_dcb.c index 410a49e571ac..b51a759c1036 100644 --- a/drivers/net/bnx2x/bnx2x_dcb.c +++ b/drivers/net/bnx2x/bnx2x_dcb.c | |||
@@ -47,34 +47,39 @@ static void bnx2x_dcbx_separate_pauseable_from_non(struct bnx2x *bp, | |||
47 | struct cos_help_data *cos_data, | 47 | struct cos_help_data *cos_data, |
48 | u32 *pg_pri_orginal_spread, | 48 | u32 *pg_pri_orginal_spread, |
49 | struct dcbx_ets_feature *ets); | 49 | struct dcbx_ets_feature *ets); |
50 | static void bnx2x_pfc_fw_struct_e2(struct bnx2x *bp); | 50 | static void bnx2x_dcbx_fw_struct(struct bnx2x *bp); |
51 | 51 | ||
52 | 52 | ||
53 | static void bnx2x_pfc_set(struct bnx2x *bp) | 53 | static void bnx2x_pfc_set(struct bnx2x *bp) |
54 | { | 54 | { |
55 | struct bnx2x_nig_brb_pfc_port_params pfc_params = {0}; | 55 | struct bnx2x_nig_brb_pfc_port_params pfc_params = {0}; |
56 | u32 pri_bit, val = 0; | 56 | u32 pri_bit, val = 0; |
57 | u8 pri; | 57 | int i; |
58 | 58 | ||
59 | /* Tx COS configuration */ | 59 | pfc_params.num_of_rx_cos_priority_mask = |
60 | if (bp->dcbx_port_params.ets.cos_params[0].pauseable) | 60 | bp->dcbx_port_params.ets.num_of_cos; |
61 | pfc_params.rx_cos0_priority_mask = | ||
62 | bp->dcbx_port_params.ets.cos_params[0].pri_bitmask; | ||
63 | if (bp->dcbx_port_params.ets.cos_params[1].pauseable) | ||
64 | pfc_params.rx_cos1_priority_mask = | ||
65 | bp->dcbx_port_params.ets.cos_params[1].pri_bitmask; | ||
66 | 61 | ||
62 | /* Tx COS configuration */ | ||
63 | for (i = 0; i < bp->dcbx_port_params.ets.num_of_cos; i++) | ||
64 | /* | ||
65 | * We configure only the pauseable bits (non pauseable aren't | ||
66 | * configured at all) it's done to avoid false pauses from | ||
67 | * network | ||
68 | */ | ||
69 | pfc_params.rx_cos_priority_mask[i] = | ||
70 | bp->dcbx_port_params.ets.cos_params[i].pri_bitmask | ||
71 | & DCBX_PFC_PRI_PAUSE_MASK(bp); | ||
67 | 72 | ||
68 | /** | 73 | /* |
69 | * Rx COS configuration | 74 | * Rx COS configuration |
70 | * Changing PFC RX configuration . | 75 | * Changing PFC RX configuration . |
71 | * In RX COS0 will always be configured to lossy and COS1 to lossless | 76 | * In RX COS0 will always be configured to lossy and COS1 to lossless |
72 | */ | 77 | */ |
73 | for (pri = 0 ; pri < MAX_PFC_PRIORITIES ; pri++) { | 78 | for (i = 0 ; i < MAX_PFC_PRIORITIES ; i++) { |
74 | pri_bit = 1 << pri; | 79 | pri_bit = 1 << i; |
75 | 80 | ||
76 | if (pri_bit & DCBX_PFC_PRI_PAUSE_MASK(bp)) | 81 | if (pri_bit & DCBX_PFC_PRI_PAUSE_MASK(bp)) |
77 | val |= 1 << (pri * 4); | 82 | val |= 1 << (i * 4); |
78 | } | 83 | } |
79 | 84 | ||
80 | pfc_params.pkt_priority_to_cos = val; | 85 | pfc_params.pkt_priority_to_cos = val; |
@@ -253,12 +258,11 @@ static void bnx2x_dcbx_get_ets_feature(struct bnx2x *bp, | |||
253 | 258 | ||
254 | 259 | ||
255 | /* Clean up old settings of ets on COS */ | 260 | /* Clean up old settings of ets on COS */ |
256 | for (i = 0; i < E2_NUM_OF_COS ; i++) { | 261 | for (i = 0; i < ARRAY_SIZE(bp->dcbx_port_params.ets.cos_params) ; i++) { |
257 | |||
258 | cos_params[i].pauseable = false; | 262 | cos_params[i].pauseable = false; |
259 | cos_params[i].strict = BNX2X_DCBX_COS_NOT_STRICT; | 263 | cos_params[i].strict = BNX2X_DCBX_STRICT_INVALID; |
260 | cos_params[i].bw_tbl = DCBX_INVALID_COS_BW; | 264 | cos_params[i].bw_tbl = DCBX_INVALID_COS_BW; |
261 | cos_params[i].pri_bitmask = DCBX_PFC_PRI_GET_NON_PAUSE(bp, 0); | 265 | cos_params[i].pri_bitmask = 0; |
262 | } | 266 | } |
263 | 267 | ||
264 | if (bp->dcbx_port_params.app.enabled && | 268 | if (bp->dcbx_port_params.app.enabled && |
@@ -378,25 +382,19 @@ static int bnx2x_dcbx_read_mib(struct bnx2x *bp, | |||
378 | 382 | ||
379 | static void bnx2x_pfc_set_pfc(struct bnx2x *bp) | 383 | static void bnx2x_pfc_set_pfc(struct bnx2x *bp) |
380 | { | 384 | { |
381 | if (CHIP_IS_E2(bp)) { | 385 | if (BP_PORT(bp)) { |
382 | if (BP_PORT(bp)) { | 386 | BNX2X_ERR("4 port mode is not supported"); |
383 | BNX2X_ERR("4 port mode is not supported"); | 387 | return; |
384 | return; | ||
385 | } | ||
386 | |||
387 | if (bp->dcbx_port_params.pfc.enabled) | ||
388 | |||
389 | /* 1. Fills up common PFC structures if required.*/ | ||
390 | /* 2. Configure NIG, MAC and BRB via the elink: | ||
391 | * elink must first check if BMAC is not in reset | ||
392 | * and only then configures the BMAC | ||
393 | * Or, configure EMAC. | ||
394 | */ | ||
395 | bnx2x_pfc_set(bp); | ||
396 | |||
397 | else | ||
398 | bnx2x_pfc_clear(bp); | ||
399 | } | 388 | } |
389 | |||
390 | if (bp->dcbx_port_params.pfc.enabled) | ||
391 | /* | ||
392 | * 1. Fills up common PFC structures if required | ||
393 | * 2. Configure NIG, MAC and BRB via the elink | ||
394 | */ | ||
395 | bnx2x_pfc_set(bp); | ||
396 | else | ||
397 | bnx2x_pfc_clear(bp); | ||
400 | } | 398 | } |
401 | 399 | ||
402 | static void bnx2x_dcbx_stop_hw_tx(struct bnx2x *bp) | 400 | static void bnx2x_dcbx_stop_hw_tx(struct bnx2x *bp) |
@@ -406,32 +404,27 @@ static void bnx2x_dcbx_stop_hw_tx(struct bnx2x *bp) | |||
406 | 0 /* connectionless */, | 404 | 0 /* connectionless */, |
407 | 0 /* dataHi is zero */, | 405 | 0 /* dataHi is zero */, |
408 | 0 /* dataLo is zero */, | 406 | 0 /* dataLo is zero */, |
409 | 1 /* common */); | 407 | NONE_CONNECTION_TYPE); |
410 | } | 408 | } |
411 | 409 | ||
412 | static void bnx2x_dcbx_resume_hw_tx(struct bnx2x *bp) | 410 | static void bnx2x_dcbx_resume_hw_tx(struct bnx2x *bp) |
413 | { | 411 | { |
414 | bnx2x_pfc_fw_struct_e2(bp); | 412 | bnx2x_dcbx_fw_struct(bp); |
415 | DP(NETIF_MSG_LINK, "sending START TRAFFIC\n"); | 413 | DP(NETIF_MSG_LINK, "sending START TRAFFIC\n"); |
416 | bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_START_TRAFFIC, | 414 | bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_START_TRAFFIC, |
417 | 0, /* connectionless */ | 415 | 0, /* connectionless */ |
418 | U64_HI(bnx2x_sp_mapping(bp, pfc_config)), | 416 | U64_HI(bnx2x_sp_mapping(bp, pfc_config)), |
419 | U64_LO(bnx2x_sp_mapping(bp, pfc_config)), | 417 | U64_LO(bnx2x_sp_mapping(bp, pfc_config)), |
420 | 1 /* commmon */); | 418 | NONE_CONNECTION_TYPE); |
421 | } | 419 | } |
422 | 420 | ||
423 | static void bnx2x_dcbx_update_ets_params(struct bnx2x *bp) | 421 | static void bnx2x_dcbx_2cos_limit_update_ets_config(struct bnx2x *bp) |
424 | { | 422 | { |
425 | struct bnx2x_dcbx_pg_params *ets = &(bp->dcbx_port_params.ets); | 423 | struct bnx2x_dcbx_pg_params *ets = &(bp->dcbx_port_params.ets); |
426 | u8 status = 0; | 424 | int rc = 0; |
427 | |||
428 | bnx2x_ets_disabled(&bp->link_params); | ||
429 | |||
430 | if (!ets->enabled) | ||
431 | return; | ||
432 | 425 | ||
433 | if ((ets->num_of_cos == 0) || (ets->num_of_cos > E2_NUM_OF_COS)) { | 426 | if (ets->num_of_cos == 0 || ets->num_of_cos > DCBX_COS_MAX_NUM_E2) { |
434 | BNX2X_ERR("illegal num of cos= %x", ets->num_of_cos); | 427 | BNX2X_ERR("Illegal number of COSes %d\n", ets->num_of_cos); |
435 | return; | 428 | return; |
436 | } | 429 | } |
437 | 430 | ||
@@ -440,9 +433,9 @@ static void bnx2x_dcbx_update_ets_params(struct bnx2x *bp) | |||
440 | return; | 433 | return; |
441 | 434 | ||
442 | /* sanity */ | 435 | /* sanity */ |
443 | if (((BNX2X_DCBX_COS_NOT_STRICT == ets->cos_params[0].strict) && | 436 | if (((BNX2X_DCBX_STRICT_INVALID == ets->cos_params[0].strict) && |
444 | (DCBX_INVALID_COS_BW == ets->cos_params[0].bw_tbl)) || | 437 | (DCBX_INVALID_COS_BW == ets->cos_params[0].bw_tbl)) || |
445 | ((BNX2X_DCBX_COS_NOT_STRICT == ets->cos_params[1].strict) && | 438 | ((BNX2X_DCBX_STRICT_INVALID == ets->cos_params[1].strict) && |
446 | (DCBX_INVALID_COS_BW == ets->cos_params[1].bw_tbl))) { | 439 | (DCBX_INVALID_COS_BW == ets->cos_params[1].bw_tbl))) { |
447 | BNX2X_ERR("all COS should have at least bw_limit or strict" | 440 | BNX2X_ERR("all COS should have at least bw_limit or strict" |
448 | "ets->cos_params[0].strict= %x" | 441 | "ets->cos_params[0].strict= %x" |
@@ -474,17 +467,70 @@ static void bnx2x_dcbx_update_ets_params(struct bnx2x *bp) | |||
474 | 467 | ||
475 | bnx2x_ets_bw_limit(&bp->link_params, bw_tbl_0, bw_tbl_1); | 468 | bnx2x_ets_bw_limit(&bp->link_params, bw_tbl_0, bw_tbl_1); |
476 | } else { | 469 | } else { |
477 | if (ets->cos_params[0].strict == BNX2X_DCBX_COS_HIGH_STRICT) | 470 | if (ets->cos_params[0].strict == BNX2X_DCBX_STRICT_COS_HIGHEST) |
478 | status = bnx2x_ets_strict(&bp->link_params, 0); | 471 | rc = bnx2x_ets_strict(&bp->link_params, 0); |
479 | else if (ets->cos_params[1].strict | 472 | else if (ets->cos_params[1].strict |
480 | == BNX2X_DCBX_COS_HIGH_STRICT) | 473 | == BNX2X_DCBX_STRICT_COS_HIGHEST) |
481 | status = bnx2x_ets_strict(&bp->link_params, 1); | 474 | rc = bnx2x_ets_strict(&bp->link_params, 1); |
482 | 475 | if (rc) | |
483 | if (status) | ||
484 | BNX2X_ERR("update_ets_params failed\n"); | 476 | BNX2X_ERR("update_ets_params failed\n"); |
485 | } | 477 | } |
486 | } | 478 | } |
487 | 479 | ||
480 | /* | ||
481 | * In E3B0 the configuration may have more than 2 COS. | ||
482 | */ | ||
483 | void bnx2x_dcbx_update_ets_config(struct bnx2x *bp) | ||
484 | { | ||
485 | struct bnx2x_dcbx_pg_params *ets = &(bp->dcbx_port_params.ets); | ||
486 | struct bnx2x_ets_params ets_params = { 0 }; | ||
487 | u8 i; | ||
488 | |||
489 | ets_params.num_of_cos = ets->num_of_cos; | ||
490 | |||
491 | for (i = 0; i < ets->num_of_cos; i++) { | ||
492 | /* COS is SP */ | ||
493 | if (ets->cos_params[i].strict != BNX2X_DCBX_STRICT_INVALID) { | ||
494 | if (ets->cos_params[i].bw_tbl != DCBX_INVALID_COS_BW) { | ||
495 | BNX2X_ERR("COS can't be not BW and not SP\n"); | ||
496 | return; | ||
497 | } | ||
498 | |||
499 | ets_params.cos[i].state = bnx2x_cos_state_strict; | ||
500 | ets_params.cos[i].params.sp_params.pri = | ||
501 | ets->cos_params[i].strict; | ||
502 | } else { /* COS is BW */ | ||
503 | if (ets->cos_params[i].bw_tbl == DCBX_INVALID_COS_BW) { | ||
504 | BNX2X_ERR("COS can't be not BW and not SP\n"); | ||
505 | return; | ||
506 | } | ||
507 | ets_params.cos[i].state = bnx2x_cos_state_bw; | ||
508 | ets_params.cos[i].params.bw_params.bw = | ||
509 | (u8)ets->cos_params[i].bw_tbl; | ||
510 | } | ||
511 | } | ||
512 | |||
513 | /* Configure the ETS in HW */ | ||
514 | if (bnx2x_ets_e3b0_config(&bp->link_params, &bp->link_vars, | ||
515 | &ets_params)) { | ||
516 | BNX2X_ERR("bnx2x_ets_e3b0_config failed\n"); | ||
517 | bnx2x_ets_disabled(&bp->link_params, &bp->link_vars); | ||
518 | } | ||
519 | } | ||
520 | |||
521 | static void bnx2x_dcbx_update_ets_params(struct bnx2x *bp) | ||
522 | { | ||
523 | bnx2x_ets_disabled(&bp->link_params, &bp->link_vars); | ||
524 | |||
525 | if (!bp->dcbx_port_params.ets.enabled) | ||
526 | return; | ||
527 | |||
528 | if (CHIP_IS_E3B0(bp)) | ||
529 | bnx2x_dcbx_update_ets_config(bp); | ||
530 | else | ||
531 | bnx2x_dcbx_2cos_limit_update_ets_config(bp); | ||
532 | } | ||
533 | |||
488 | #ifdef BCM_DCBNL | 534 | #ifdef BCM_DCBNL |
489 | static int bnx2x_dcbx_read_shmem_remote_mib(struct bnx2x *bp) | 535 | static int bnx2x_dcbx_read_shmem_remote_mib(struct bnx2x *bp) |
490 | { | 536 | { |
@@ -527,6 +573,7 @@ static int bnx2x_dcbx_read_shmem_neg_results(struct bnx2x *bp) | |||
527 | BNX2X_ERR("FW doesn't support dcbx_neg_res_offset\n"); | 573 | BNX2X_ERR("FW doesn't support dcbx_neg_res_offset\n"); |
528 | return -EINVAL; | 574 | return -EINVAL; |
529 | } | 575 | } |
576 | |||
530 | rc = bnx2x_dcbx_read_mib(bp, (u32 *)&local_mib, dcbx_neg_res_offset, | 577 | rc = bnx2x_dcbx_read_mib(bp, (u32 *)&local_mib, dcbx_neg_res_offset, |
531 | DCBX_READ_LOCAL_MIB); | 578 | DCBX_READ_LOCAL_MIB); |
532 | 579 | ||
@@ -563,15 +610,6 @@ u8 bnx2x_dcbx_dcbnl_app_idtype(struct dcbx_app_priority_entry *ent) | |||
563 | DCB_APP_IDTYPE_ETHTYPE; | 610 | DCB_APP_IDTYPE_ETHTYPE; |
564 | } | 611 | } |
565 | 612 | ||
566 | static inline | ||
567 | void bnx2x_dcbx_invalidate_local_apps(struct bnx2x *bp) | ||
568 | { | ||
569 | int i; | ||
570 | for (i = 0; i < DCBX_MAX_APP_PROTOCOL; i++) | ||
571 | bp->dcbx_local_feat.app.app_pri_tbl[i].appBitfield &= | ||
572 | ~DCBX_APP_ENTRY_VALID; | ||
573 | } | ||
574 | |||
575 | int bnx2x_dcbnl_update_applist(struct bnx2x *bp, bool delall) | 613 | int bnx2x_dcbnl_update_applist(struct bnx2x *bp, bool delall) |
576 | { | 614 | { |
577 | int i, err = 0; | 615 | int i, err = 0; |
@@ -597,32 +635,28 @@ int bnx2x_dcbnl_update_applist(struct bnx2x *bp, bool delall) | |||
597 | } | 635 | } |
598 | #endif | 636 | #endif |
599 | 637 | ||
638 | static inline void bnx2x_update_drv_flags(struct bnx2x *bp, u32 flags, u32 set) | ||
639 | { | ||
640 | if (SHMEM2_HAS(bp, drv_flags)) { | ||
641 | u32 drv_flags; | ||
642 | bnx2x_acquire_hw_lock(bp, HW_LOCK_DRV_FLAGS); | ||
643 | drv_flags = SHMEM2_RD(bp, drv_flags); | ||
644 | |||
645 | if (set) | ||
646 | SET_FLAGS(drv_flags, flags); | ||
647 | else | ||
648 | RESET_FLAGS(drv_flags, flags); | ||
649 | |||
650 | SHMEM2_WR(bp, drv_flags, drv_flags); | ||
651 | DP(NETIF_MSG_HW, "drv_flags 0x%08x\n", drv_flags); | ||
652 | bnx2x_release_hw_lock(bp, HW_LOCK_DRV_FLAGS); | ||
653 | } | ||
654 | } | ||
655 | |||
600 | void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state) | 656 | void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state) |
601 | { | 657 | { |
602 | switch (state) { | 658 | switch (state) { |
603 | case BNX2X_DCBX_STATE_NEG_RECEIVED: | 659 | case BNX2X_DCBX_STATE_NEG_RECEIVED: |
604 | #ifdef BCM_CNIC | ||
605 | if (bp->state != BNX2X_STATE_OPENING_WAIT4_LOAD) { | ||
606 | struct cnic_ops *c_ops; | ||
607 | struct cnic_eth_dev *cp = &bp->cnic_eth_dev; | ||
608 | bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG; | ||
609 | cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI_OOO; | ||
610 | cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI; | ||
611 | |||
612 | rcu_read_lock(); | ||
613 | c_ops = rcu_dereference(bp->cnic_ops); | ||
614 | if (c_ops) { | ||
615 | bnx2x_cnic_notify(bp, CNIC_CTL_STOP_ISCSI_CMD); | ||
616 | rcu_read_unlock(); | ||
617 | return; | ||
618 | } | ||
619 | rcu_read_unlock(); | ||
620 | } | ||
621 | |||
622 | /* fall through if no CNIC initialized */ | ||
623 | case BNX2X_DCBX_STATE_ISCSI_STOPPED: | ||
624 | #endif | ||
625 | |||
626 | { | 660 | { |
627 | DP(NETIF_MSG_LINK, "BNX2X_DCBX_STATE_NEG_RECEIVED\n"); | 661 | DP(NETIF_MSG_LINK, "BNX2X_DCBX_STATE_NEG_RECEIVED\n"); |
628 | #ifdef BCM_DCBNL | 662 | #ifdef BCM_DCBNL |
@@ -646,41 +680,28 @@ void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state) | |||
646 | bnx2x_get_dcbx_drv_param(bp, &bp->dcbx_local_feat, | 680 | bnx2x_get_dcbx_drv_param(bp, &bp->dcbx_local_feat, |
647 | bp->dcbx_error); | 681 | bp->dcbx_error); |
648 | 682 | ||
649 | if (bp->state != BNX2X_STATE_OPENING_WAIT4_LOAD) { | 683 | /* mark DCBX result for PMF migration */ |
650 | #ifdef BCM_DCBNL | 684 | bnx2x_update_drv_flags(bp, DRV_FLAGS_DCB_CONFIGURED, 1); |
651 | /** | ||
652 | * Add new app tlvs to dcbnl | ||
653 | */ | ||
654 | bnx2x_dcbnl_update_applist(bp, false); | ||
655 | #endif | ||
656 | bnx2x_dcbx_stop_hw_tx(bp); | ||
657 | return; | ||
658 | } | ||
659 | /* fall through */ | ||
660 | #ifdef BCM_DCBNL | 685 | #ifdef BCM_DCBNL |
661 | /** | 686 | /** |
662 | * Invalidate the local app tlvs if they are not added | 687 | * Add new app tlvs to dcbnl |
663 | * to the dcbnl app list to avoid deleting them from | ||
664 | * the list later on | ||
665 | */ | 688 | */ |
666 | bnx2x_dcbx_invalidate_local_apps(bp); | 689 | bnx2x_dcbnl_update_applist(bp, false); |
667 | #endif | 690 | #endif |
691 | bnx2x_dcbx_stop_hw_tx(bp); | ||
692 | |||
693 | return; | ||
668 | } | 694 | } |
669 | case BNX2X_DCBX_STATE_TX_PAUSED: | 695 | case BNX2X_DCBX_STATE_TX_PAUSED: |
670 | DP(NETIF_MSG_LINK, "BNX2X_DCBX_STATE_TX_PAUSED\n"); | 696 | DP(NETIF_MSG_LINK, "BNX2X_DCBX_STATE_TX_PAUSED\n"); |
671 | bnx2x_pfc_set_pfc(bp); | 697 | bnx2x_pfc_set_pfc(bp); |
672 | 698 | ||
673 | bnx2x_dcbx_update_ets_params(bp); | 699 | bnx2x_dcbx_update_ets_params(bp); |
674 | if (bp->state != BNX2X_STATE_OPENING_WAIT4_LOAD) { | 700 | bnx2x_dcbx_resume_hw_tx(bp); |
675 | bnx2x_dcbx_resume_hw_tx(bp); | 701 | return; |
676 | return; | ||
677 | } | ||
678 | /* fall through */ | ||
679 | case BNX2X_DCBX_STATE_TX_RELEASED: | 702 | case BNX2X_DCBX_STATE_TX_RELEASED: |
680 | DP(NETIF_MSG_LINK, "BNX2X_DCBX_STATE_TX_RELEASED\n"); | 703 | DP(NETIF_MSG_LINK, "BNX2X_DCBX_STATE_TX_RELEASED\n"); |
681 | if (bp->state != BNX2X_STATE_OPENING_WAIT4_LOAD) | 704 | bnx2x_fw_command(bp, DRV_MSG_CODE_DCBX_PMF_DRV_OK, 0); |
682 | bnx2x_fw_command(bp, DRV_MSG_CODE_DCBX_PMF_DRV_OK, 0); | ||
683 | |||
684 | return; | 705 | return; |
685 | default: | 706 | default: |
686 | BNX2X_ERR("Unknown DCBX_STATE\n"); | 707 | BNX2X_ERR("Unknown DCBX_STATE\n"); |
@@ -868,7 +889,7 @@ static void bnx2x_dcbx_admin_mib_updated_params(struct bnx2x *bp, | |||
868 | 889 | ||
869 | void bnx2x_dcbx_set_state(struct bnx2x *bp, bool dcb_on, u32 dcbx_enabled) | 890 | void bnx2x_dcbx_set_state(struct bnx2x *bp, bool dcb_on, u32 dcbx_enabled) |
870 | { | 891 | { |
871 | if (CHIP_IS_E2(bp) && !CHIP_MODE_IS_4_PORT(bp)) { | 892 | if (!CHIP_IS_E1x(bp) && !CHIP_MODE_IS_4_PORT(bp)) { |
872 | bp->dcb_state = dcb_on; | 893 | bp->dcb_state = dcb_on; |
873 | bp->dcbx_enabled = dcbx_enabled; | 894 | bp->dcbx_enabled = dcbx_enabled; |
874 | } else { | 895 | } else { |
@@ -966,7 +987,7 @@ void bnx2x_dcbx_init(struct bnx2x *bp) | |||
966 | DP(NETIF_MSG_LINK, "dcb_state %d bp->port.pmf %d\n", | 987 | DP(NETIF_MSG_LINK, "dcb_state %d bp->port.pmf %d\n", |
967 | bp->dcb_state, bp->port.pmf); | 988 | bp->dcb_state, bp->port.pmf); |
968 | 989 | ||
969 | if (bp->dcb_state == BNX2X_DCB_STATE_ON && bp->port.pmf && | 990 | if (bp->dcb_state == BNX2X_DCB_STATE_ON && bp->port.pmf && |
970 | SHMEM2_HAS(bp, dcbx_lldp_params_offset)) { | 991 | SHMEM2_HAS(bp, dcbx_lldp_params_offset)) { |
971 | dcbx_lldp_params_offset = | 992 | dcbx_lldp_params_offset = |
972 | SHMEM2_RD(bp, dcbx_lldp_params_offset); | 993 | SHMEM2_RD(bp, dcbx_lldp_params_offset); |
@@ -974,6 +995,8 @@ void bnx2x_dcbx_init(struct bnx2x *bp) | |||
974 | DP(NETIF_MSG_LINK, "dcbx_lldp_params_offset 0x%x\n", | 995 | DP(NETIF_MSG_LINK, "dcbx_lldp_params_offset 0x%x\n", |
975 | dcbx_lldp_params_offset); | 996 | dcbx_lldp_params_offset); |
976 | 997 | ||
998 | bnx2x_update_drv_flags(bp, DRV_FLAGS_DCB_CONFIGURED, 0); | ||
999 | |||
977 | if (SHMEM_LLDP_DCBX_PARAMS_NONE != dcbx_lldp_params_offset) { | 1000 | if (SHMEM_LLDP_DCBX_PARAMS_NONE != dcbx_lldp_params_offset) { |
978 | bnx2x_dcbx_lldp_updated_params(bp, | 1001 | bnx2x_dcbx_lldp_updated_params(bp, |
979 | dcbx_lldp_params_offset); | 1002 | dcbx_lldp_params_offset); |
@@ -981,46 +1004,12 @@ void bnx2x_dcbx_init(struct bnx2x *bp) | |||
981 | bnx2x_dcbx_admin_mib_updated_params(bp, | 1004 | bnx2x_dcbx_admin_mib_updated_params(bp, |
982 | dcbx_lldp_params_offset); | 1005 | dcbx_lldp_params_offset); |
983 | 1006 | ||
984 | /* set default configuration BC has */ | 1007 | /* Let HW start negotiation */ |
985 | bnx2x_dcbx_set_params(bp, | ||
986 | BNX2X_DCBX_STATE_NEG_RECEIVED); | ||
987 | |||
988 | bnx2x_fw_command(bp, | 1008 | bnx2x_fw_command(bp, |
989 | DRV_MSG_CODE_DCBX_ADMIN_PMF_MSG, 0); | 1009 | DRV_MSG_CODE_DCBX_ADMIN_PMF_MSG, 0); |
990 | } | 1010 | } |
991 | } | 1011 | } |
992 | } | 1012 | } |
993 | |||
994 | void bnx2x_dcb_init_intmem_pfc(struct bnx2x *bp) | ||
995 | { | ||
996 | struct priority_cos pricos[MAX_PFC_TRAFFIC_TYPES]; | ||
997 | u32 i = 0, addr; | ||
998 | memset(pricos, 0, sizeof(pricos)); | ||
999 | /* Default initialization */ | ||
1000 | for (i = 0; i < MAX_PFC_TRAFFIC_TYPES; i++) | ||
1001 | pricos[i].priority = LLFC_TRAFFIC_TYPE_TO_PRIORITY_UNMAPPED; | ||
1002 | |||
1003 | /* Store per port struct to internal memory */ | ||
1004 | addr = BAR_XSTRORM_INTMEM + | ||
1005 | XSTORM_CMNG_PER_PORT_VARS_OFFSET(BP_PORT(bp)) + | ||
1006 | offsetof(struct cmng_struct_per_port, | ||
1007 | traffic_type_to_priority_cos); | ||
1008 | __storm_memset_struct(bp, addr, sizeof(pricos), (u32 *)pricos); | ||
1009 | |||
1010 | |||
1011 | /* LLFC disabled.*/ | ||
1012 | REG_WR8(bp , BAR_XSTRORM_INTMEM + | ||
1013 | XSTORM_CMNG_PER_PORT_VARS_OFFSET(BP_PORT(bp)) + | ||
1014 | offsetof(struct cmng_struct_per_port, llfc_mode), | ||
1015 | LLFC_MODE_NONE); | ||
1016 | |||
1017 | /* DCBX disabled.*/ | ||
1018 | REG_WR8(bp , BAR_XSTRORM_INTMEM + | ||
1019 | XSTORM_CMNG_PER_PORT_VARS_OFFSET(BP_PORT(bp)) + | ||
1020 | offsetof(struct cmng_struct_per_port, dcb_enabled), | ||
1021 | DCB_DISABLED); | ||
1022 | } | ||
1023 | |||
1024 | static void | 1013 | static void |
1025 | bnx2x_dcbx_print_cos_params(struct bnx2x *bp, | 1014 | bnx2x_dcbx_print_cos_params(struct bnx2x *bp, |
1026 | struct flow_control_configuration *pfc_fw_cfg) | 1015 | struct flow_control_configuration *pfc_fw_cfg) |
@@ -1171,7 +1160,7 @@ static void bnx2x_dcbx_separate_pauseable_from_non(struct bnx2x *bp, | |||
1171 | /* If we join a group and one is strict | 1160 | /* If we join a group and one is strict |
1172 | * than the bw rulls */ | 1161 | * than the bw rulls */ |
1173 | cos_data->data[entry].strict = | 1162 | cos_data->data[entry].strict = |
1174 | BNX2X_DCBX_COS_HIGH_STRICT; | 1163 | BNX2X_DCBX_STRICT_COS_HIGHEST; |
1175 | } | 1164 | } |
1176 | if ((0 == cos_data->data[0].pri_join_mask) && | 1165 | if ((0 == cos_data->data[0].pri_join_mask) && |
1177 | (0 == cos_data->data[1].pri_join_mask)) | 1166 | (0 == cos_data->data[1].pri_join_mask)) |
@@ -1183,7 +1172,7 @@ static void bnx2x_dcbx_separate_pauseable_from_non(struct bnx2x *bp, | |||
1183 | #define POWER_OF_2(x) ((0 != x) && (0 == (x & (x-1)))) | 1172 | #define POWER_OF_2(x) ((0 != x) && (0 == (x & (x-1)))) |
1184 | #endif | 1173 | #endif |
1185 | 1174 | ||
1186 | static void bxn2x_dcbx_single_pg_to_cos_params(struct bnx2x *bp, | 1175 | static void bnx2x_dcbx_2cos_limit_cee_single_pg_to_cos_params(struct bnx2x *bp, |
1187 | struct pg_help_data *pg_help_data, | 1176 | struct pg_help_data *pg_help_data, |
1188 | struct cos_help_data *cos_data, | 1177 | struct cos_help_data *cos_data, |
1189 | u32 pri_join_mask, | 1178 | u32 pri_join_mask, |
@@ -1263,14 +1252,16 @@ static void bxn2x_dcbx_single_pg_to_cos_params(struct bnx2x *bp, | |||
1263 | if (DCBX_PFC_PRI_GET_PAUSE(bp, pri_join_mask) > | 1252 | if (DCBX_PFC_PRI_GET_PAUSE(bp, pri_join_mask) > |
1264 | DCBX_PFC_PRI_GET_NON_PAUSE(bp, pri_join_mask)) { | 1253 | DCBX_PFC_PRI_GET_NON_PAUSE(bp, pri_join_mask)) { |
1265 | cos_data->data[0].strict = | 1254 | cos_data->data[0].strict = |
1266 | BNX2X_DCBX_COS_HIGH_STRICT; | 1255 | BNX2X_DCBX_STRICT_COS_HIGHEST; |
1267 | cos_data->data[1].strict = | 1256 | cos_data->data[1].strict = |
1268 | BNX2X_DCBX_COS_LOW_STRICT; | 1257 | BNX2X_DCBX_STRICT_COS_NEXT_LOWER_PRI( |
1258 | BNX2X_DCBX_STRICT_COS_HIGHEST); | ||
1269 | } else { | 1259 | } else { |
1270 | cos_data->data[0].strict = | 1260 | cos_data->data[0].strict = |
1271 | BNX2X_DCBX_COS_LOW_STRICT; | 1261 | BNX2X_DCBX_STRICT_COS_NEXT_LOWER_PRI( |
1262 | BNX2X_DCBX_STRICT_COS_HIGHEST); | ||
1272 | cos_data->data[1].strict = | 1263 | cos_data->data[1].strict = |
1273 | BNX2X_DCBX_COS_HIGH_STRICT; | 1264 | BNX2X_DCBX_STRICT_COS_HIGHEST; |
1274 | } | 1265 | } |
1275 | /* Pauseable */ | 1266 | /* Pauseable */ |
1276 | cos_data->data[0].pausable = true; | 1267 | cos_data->data[0].pausable = true; |
@@ -1306,13 +1297,16 @@ static void bxn2x_dcbx_single_pg_to_cos_params(struct bnx2x *bp, | |||
1306 | * and that with the highest priority | 1297 | * and that with the highest priority |
1307 | * gets the highest strict priority in the arbiter. | 1298 | * gets the highest strict priority in the arbiter. |
1308 | */ | 1299 | */ |
1309 | cos_data->data[0].strict = BNX2X_DCBX_COS_LOW_STRICT; | 1300 | cos_data->data[0].strict = |
1310 | cos_data->data[1].strict = BNX2X_DCBX_COS_HIGH_STRICT; | 1301 | BNX2X_DCBX_STRICT_COS_NEXT_LOWER_PRI( |
1302 | BNX2X_DCBX_STRICT_COS_HIGHEST); | ||
1303 | cos_data->data[1].strict = | ||
1304 | BNX2X_DCBX_STRICT_COS_HIGHEST; | ||
1311 | } | 1305 | } |
1312 | } | 1306 | } |
1313 | } | 1307 | } |
1314 | 1308 | ||
1315 | static void bnx2x_dcbx_two_pg_to_cos_params( | 1309 | static void bnx2x_dcbx_2cos_limit_cee_two_pg_to_cos_params( |
1316 | struct bnx2x *bp, | 1310 | struct bnx2x *bp, |
1317 | struct pg_help_data *pg_help_data, | 1311 | struct pg_help_data *pg_help_data, |
1318 | struct dcbx_ets_feature *ets, | 1312 | struct dcbx_ets_feature *ets, |
@@ -1322,7 +1316,7 @@ static void bnx2x_dcbx_two_pg_to_cos_params( | |||
1322 | u8 num_of_dif_pri) | 1316 | u8 num_of_dif_pri) |
1323 | { | 1317 | { |
1324 | u8 i = 0; | 1318 | u8 i = 0; |
1325 | u8 pg[E2_NUM_OF_COS] = {0}; | 1319 | u8 pg[DCBX_COS_MAX_NUM_E2] = { 0 }; |
1326 | 1320 | ||
1327 | /* If there are both pauseable and non-pauseable priorities, | 1321 | /* If there are both pauseable and non-pauseable priorities, |
1328 | * the pauseable priorities go to the first queue and | 1322 | * the pauseable priorities go to the first queue and |
@@ -1378,16 +1372,68 @@ static void bnx2x_dcbx_two_pg_to_cos_params( | |||
1378 | } | 1372 | } |
1379 | 1373 | ||
1380 | /* There can be only one strict pg */ | 1374 | /* There can be only one strict pg */ |
1381 | for (i = 0 ; i < E2_NUM_OF_COS; i++) { | 1375 | for (i = 0 ; i < ARRAY_SIZE(pg); i++) { |
1382 | if (pg[i] < DCBX_MAX_NUM_PG_BW_ENTRIES) | 1376 | if (pg[i] < DCBX_MAX_NUM_PG_BW_ENTRIES) |
1383 | cos_data->data[i].cos_bw = | 1377 | cos_data->data[i].cos_bw = |
1384 | DCBX_PG_BW_GET(ets->pg_bw_tbl, pg[i]); | 1378 | DCBX_PG_BW_GET(ets->pg_bw_tbl, pg[i]); |
1385 | else | 1379 | else |
1386 | cos_data->data[i].strict = BNX2X_DCBX_COS_HIGH_STRICT; | 1380 | cos_data->data[i].strict = |
1381 | BNX2X_DCBX_STRICT_COS_HIGHEST; | ||
1382 | } | ||
1383 | } | ||
1384 | |||
1385 | static int bnx2x_dcbx_join_pgs( | ||
1386 | struct bnx2x *bp, | ||
1387 | struct dcbx_ets_feature *ets, | ||
1388 | struct pg_help_data *pg_help_data, | ||
1389 | u8 required_num_of_pg) | ||
1390 | { | ||
1391 | u8 entry_joined = pg_help_data->num_of_pg - 1; | ||
1392 | u8 entry_removed = entry_joined + 1; | ||
1393 | u8 pg_joined = 0; | ||
1394 | |||
1395 | if (required_num_of_pg == 0 || ARRAY_SIZE(pg_help_data->data) | ||
1396 | <= pg_help_data->num_of_pg) { | ||
1397 | |||
1398 | BNX2X_ERR("required_num_of_pg can't be zero\n"); | ||
1399 | return -EINVAL; | ||
1400 | } | ||
1401 | |||
1402 | while (required_num_of_pg < pg_help_data->num_of_pg) { | ||
1403 | entry_joined = pg_help_data->num_of_pg - 2; | ||
1404 | entry_removed = entry_joined + 1; | ||
1405 | /* protect index */ | ||
1406 | entry_removed %= ARRAY_SIZE(pg_help_data->data); | ||
1407 | |||
1408 | pg_help_data->data[entry_joined].pg_priority |= | ||
1409 | pg_help_data->data[entry_removed].pg_priority; | ||
1410 | |||
1411 | pg_help_data->data[entry_joined].num_of_dif_pri += | ||
1412 | pg_help_data->data[entry_removed].num_of_dif_pri; | ||
1413 | |||
1414 | if (pg_help_data->data[entry_joined].pg == DCBX_STRICT_PRI_PG || | ||
1415 | pg_help_data->data[entry_removed].pg == DCBX_STRICT_PRI_PG) | ||
1416 | /* Entries joined strict priority rules */ | ||
1417 | pg_help_data->data[entry_joined].pg = | ||
1418 | DCBX_STRICT_PRI_PG; | ||
1419 | else { | ||
1420 | /* Entries can be joined join BW */ | ||
1421 | pg_joined = DCBX_PG_BW_GET(ets->pg_bw_tbl, | ||
1422 | pg_help_data->data[entry_joined].pg) + | ||
1423 | DCBX_PG_BW_GET(ets->pg_bw_tbl, | ||
1424 | pg_help_data->data[entry_removed].pg); | ||
1425 | |||
1426 | DCBX_PG_BW_SET(ets->pg_bw_tbl, | ||
1427 | pg_help_data->data[entry_joined].pg, pg_joined); | ||
1428 | } | ||
1429 | /* Joined the entries */ | ||
1430 | pg_help_data->num_of_pg--; | ||
1387 | } | 1431 | } |
1432 | |||
1433 | return 0; | ||
1388 | } | 1434 | } |
1389 | 1435 | ||
1390 | static void bnx2x_dcbx_three_pg_to_cos_params( | 1436 | static void bnx2x_dcbx_2cos_limit_cee_three_pg_to_cos_params( |
1391 | struct bnx2x *bp, | 1437 | struct bnx2x *bp, |
1392 | struct pg_help_data *pg_help_data, | 1438 | struct pg_help_data *pg_help_data, |
1393 | struct dcbx_ets_feature *ets, | 1439 | struct dcbx_ets_feature *ets, |
@@ -1459,102 +1505,272 @@ static void bnx2x_dcbx_three_pg_to_cos_params( | |||
1459 | /* If we join a group and one is strict | 1505 | /* If we join a group and one is strict |
1460 | * than the bw rulls */ | 1506 | * than the bw rulls */ |
1461 | cos_data->data[1].strict = | 1507 | cos_data->data[1].strict = |
1462 | BNX2X_DCBX_COS_HIGH_STRICT; | 1508 | BNX2X_DCBX_STRICT_COS_HIGHEST; |
1463 | } | 1509 | } |
1464 | } | 1510 | } |
1465 | } | 1511 | } |
1466 | } | 1512 | } |
1467 | 1513 | ||
1468 | 1514 | ||
1469 | static void bnx2x_dcbx_fill_cos_params(struct bnx2x *bp, | 1515 | static void bnx2x_dcbx_2cos_limit_cee_fill_cos_params(struct bnx2x *bp, |
1470 | struct pg_help_data *help_data, | 1516 | struct pg_help_data *help_data, |
1471 | struct dcbx_ets_feature *ets, | 1517 | struct dcbx_ets_feature *ets, |
1472 | u32 *pg_pri_orginal_spread) | 1518 | struct cos_help_data *cos_data, |
1519 | u32 *pg_pri_orginal_spread, | ||
1520 | u32 pri_join_mask, | ||
1521 | u8 num_of_dif_pri) | ||
1473 | { | 1522 | { |
1474 | struct cos_help_data cos_data ; | ||
1475 | u8 i = 0; | ||
1476 | u32 pri_join_mask = 0; | ||
1477 | u8 num_of_dif_pri = 0; | ||
1478 | 1523 | ||
1479 | memset(&cos_data, 0, sizeof(cos_data)); | 1524 | /* default E2 settings */ |
1480 | /* Validate the pg value */ | 1525 | cos_data->num_of_cos = DCBX_COS_MAX_NUM_E2; |
1481 | for (i = 0; i < help_data->num_of_pg ; i++) { | ||
1482 | if (DCBX_STRICT_PRIORITY != help_data->data[i].pg && | ||
1483 | DCBX_MAX_NUM_PG_BW_ENTRIES <= help_data->data[i].pg) | ||
1484 | BNX2X_ERR("Invalid pg[%d] data %x\n", i, | ||
1485 | help_data->data[i].pg); | ||
1486 | pri_join_mask |= help_data->data[i].pg_priority; | ||
1487 | num_of_dif_pri += help_data->data[i].num_of_dif_pri; | ||
1488 | } | ||
1489 | |||
1490 | /* default settings */ | ||
1491 | cos_data.num_of_cos = 2; | ||
1492 | for (i = 0; i < E2_NUM_OF_COS ; i++) { | ||
1493 | cos_data.data[i].pri_join_mask = pri_join_mask; | ||
1494 | cos_data.data[i].pausable = false; | ||
1495 | cos_data.data[i].strict = BNX2X_DCBX_COS_NOT_STRICT; | ||
1496 | cos_data.data[i].cos_bw = DCBX_INVALID_COS_BW; | ||
1497 | } | ||
1498 | 1526 | ||
1499 | switch (help_data->num_of_pg) { | 1527 | switch (help_data->num_of_pg) { |
1500 | case 1: | 1528 | case 1: |
1501 | 1529 | bnx2x_dcbx_2cos_limit_cee_single_pg_to_cos_params( | |
1502 | bxn2x_dcbx_single_pg_to_cos_params( | ||
1503 | bp, | 1530 | bp, |
1504 | help_data, | 1531 | help_data, |
1505 | &cos_data, | 1532 | cos_data, |
1506 | pri_join_mask, | 1533 | pri_join_mask, |
1507 | num_of_dif_pri); | 1534 | num_of_dif_pri); |
1508 | break; | 1535 | break; |
1509 | case 2: | 1536 | case 2: |
1510 | bnx2x_dcbx_two_pg_to_cos_params( | 1537 | bnx2x_dcbx_2cos_limit_cee_two_pg_to_cos_params( |
1511 | bp, | 1538 | bp, |
1512 | help_data, | 1539 | help_data, |
1513 | ets, | 1540 | ets, |
1514 | &cos_data, | 1541 | cos_data, |
1515 | pg_pri_orginal_spread, | 1542 | pg_pri_orginal_spread, |
1516 | pri_join_mask, | 1543 | pri_join_mask, |
1517 | num_of_dif_pri); | 1544 | num_of_dif_pri); |
1518 | break; | 1545 | break; |
1519 | 1546 | ||
1520 | case 3: | 1547 | case 3: |
1521 | bnx2x_dcbx_three_pg_to_cos_params( | 1548 | bnx2x_dcbx_2cos_limit_cee_three_pg_to_cos_params( |
1522 | bp, | 1549 | bp, |
1523 | help_data, | 1550 | help_data, |
1524 | ets, | 1551 | ets, |
1525 | &cos_data, | 1552 | cos_data, |
1526 | pg_pri_orginal_spread, | 1553 | pg_pri_orginal_spread, |
1527 | pri_join_mask, | 1554 | pri_join_mask, |
1528 | num_of_dif_pri); | 1555 | num_of_dif_pri); |
1529 | |||
1530 | break; | 1556 | break; |
1531 | default: | 1557 | default: |
1532 | BNX2X_ERR("Wrong pg_help_data.num_of_pg\n"); | 1558 | BNX2X_ERR("Wrong pg_help_data.num_of_pg\n"); |
1533 | bnx2x_dcbx_ets_disabled_entry_data(bp, | 1559 | bnx2x_dcbx_ets_disabled_entry_data(bp, |
1534 | &cos_data, pri_join_mask); | 1560 | cos_data, pri_join_mask); |
1561 | } | ||
1562 | } | ||
1563 | |||
1564 | static int bnx2x_dcbx_spread_strict_pri(struct bnx2x *bp, | ||
1565 | struct cos_help_data *cos_data, | ||
1566 | u8 entry, | ||
1567 | u8 num_spread_of_entries, | ||
1568 | u8 strict_app_pris) | ||
1569 | { | ||
1570 | u8 strict_pri = BNX2X_DCBX_STRICT_COS_HIGHEST; | ||
1571 | u8 num_of_app_pri = MAX_PFC_PRIORITIES; | ||
1572 | u8 app_pri_bit = 0; | ||
1573 | |||
1574 | while (num_spread_of_entries && num_of_app_pri > 0) { | ||
1575 | app_pri_bit = 1 << (num_of_app_pri - 1); | ||
1576 | if (app_pri_bit & strict_app_pris) { | ||
1577 | struct cos_entry_help_data *data = &cos_data-> | ||
1578 | data[entry]; | ||
1579 | num_spread_of_entries--; | ||
1580 | if (num_spread_of_entries == 0) { | ||
1581 | /* last entry needed put all the entries left */ | ||
1582 | data->cos_bw = DCBX_INVALID_COS_BW; | ||
1583 | data->strict = strict_pri; | ||
1584 | data->pri_join_mask = strict_app_pris; | ||
1585 | data->pausable = DCBX_IS_PFC_PRI_SOME_PAUSE(bp, | ||
1586 | data->pri_join_mask); | ||
1587 | } else { | ||
1588 | strict_app_pris &= ~app_pri_bit; | ||
1589 | |||
1590 | data->cos_bw = DCBX_INVALID_COS_BW; | ||
1591 | data->strict = strict_pri; | ||
1592 | data->pri_join_mask = app_pri_bit; | ||
1593 | data->pausable = DCBX_IS_PFC_PRI_SOME_PAUSE(bp, | ||
1594 | data->pri_join_mask); | ||
1595 | } | ||
1596 | |||
1597 | strict_pri = | ||
1598 | BNX2X_DCBX_STRICT_COS_NEXT_LOWER_PRI(strict_pri); | ||
1599 | entry++; | ||
1600 | } | ||
1601 | |||
1602 | num_of_app_pri--; | ||
1603 | } | ||
1604 | |||
1605 | if (num_spread_of_entries) | ||
1606 | return -EINVAL; | ||
1607 | |||
1608 | return 0; | ||
1609 | } | ||
1610 | |||
1611 | static u8 bnx2x_dcbx_cee_fill_strict_pri(struct bnx2x *bp, | ||
1612 | struct cos_help_data *cos_data, | ||
1613 | u8 entry, | ||
1614 | u8 num_spread_of_entries, | ||
1615 | u8 strict_app_pris) | ||
1616 | { | ||
1617 | |||
1618 | if (bnx2x_dcbx_spread_strict_pri(bp, cos_data, entry, | ||
1619 | num_spread_of_entries, | ||
1620 | strict_app_pris)) { | ||
1621 | struct cos_entry_help_data *data = &cos_data-> | ||
1622 | data[entry]; | ||
1623 | /* Fill BW entry */ | ||
1624 | data->cos_bw = DCBX_INVALID_COS_BW; | ||
1625 | data->strict = BNX2X_DCBX_STRICT_COS_HIGHEST; | ||
1626 | data->pri_join_mask = strict_app_pris; | ||
1627 | data->pausable = DCBX_IS_PFC_PRI_SOME_PAUSE(bp, | ||
1628 | data->pri_join_mask); | ||
1629 | return 1; | ||
1630 | } | ||
1631 | |||
1632 | return num_spread_of_entries; | ||
1633 | } | ||
1634 | |||
1635 | static void bnx2x_dcbx_cee_fill_cos_params(struct bnx2x *bp, | ||
1636 | struct pg_help_data *help_data, | ||
1637 | struct dcbx_ets_feature *ets, | ||
1638 | struct cos_help_data *cos_data, | ||
1639 | u32 pri_join_mask) | ||
1640 | |||
1641 | { | ||
1642 | u8 need_num_of_entries = 0; | ||
1643 | u8 i = 0; | ||
1644 | u8 entry = 0; | ||
1645 | |||
1646 | /* | ||
1647 | * if the number of requested PG-s in CEE is greater than 3 | ||
1648 | * then the results are not determined since this is a violation | ||
1649 | * of the standard. | ||
1650 | */ | ||
1651 | if (help_data->num_of_pg > DCBX_COS_MAX_NUM_E3B0) { | ||
1652 | if (bnx2x_dcbx_join_pgs(bp, ets, help_data, | ||
1653 | DCBX_COS_MAX_NUM_E3B0)) { | ||
1654 | BNX2X_ERR("Unable to reduce the number of PGs -" | ||
1655 | "we will disables ETS\n"); | ||
1656 | bnx2x_dcbx_ets_disabled_entry_data(bp, cos_data, | ||
1657 | pri_join_mask); | ||
1658 | return; | ||
1659 | } | ||
1535 | } | 1660 | } |
1536 | 1661 | ||
1662 | for (i = 0 ; i < help_data->num_of_pg; i++) { | ||
1663 | struct pg_entry_help_data *pg = &help_data->data[i]; | ||
1664 | if (pg->pg < DCBX_MAX_NUM_PG_BW_ENTRIES) { | ||
1665 | struct cos_entry_help_data *data = &cos_data-> | ||
1666 | data[entry]; | ||
1667 | /* Fill BW entry */ | ||
1668 | data->cos_bw = DCBX_PG_BW_GET(ets->pg_bw_tbl, pg->pg); | ||
1669 | data->strict = BNX2X_DCBX_STRICT_INVALID; | ||
1670 | data->pri_join_mask = pg->pg_priority; | ||
1671 | data->pausable = DCBX_IS_PFC_PRI_SOME_PAUSE(bp, | ||
1672 | data->pri_join_mask); | ||
1673 | |||
1674 | entry++; | ||
1675 | } else { | ||
1676 | need_num_of_entries = min_t(u8, | ||
1677 | (u8)pg->num_of_dif_pri, | ||
1678 | (u8)DCBX_COS_MAX_NUM_E3B0 - | ||
1679 | help_data->num_of_pg + 1); | ||
1680 | /* | ||
1681 | * If there are still VOQ-s which have no associated PG, | ||
1682 | * then associate these VOQ-s to PG15. These PG-s will | ||
1683 | * be used for SP between priorities on PG15. | ||
1684 | */ | ||
1685 | entry += bnx2x_dcbx_cee_fill_strict_pri(bp, cos_data, | ||
1686 | entry, need_num_of_entries, pg->pg_priority); | ||
1687 | } | ||
1688 | } | ||
1689 | |||
1690 | /* the entry will represent the number of COSes used */ | ||
1691 | cos_data->num_of_cos = entry; | ||
1692 | } | ||
1693 | static void bnx2x_dcbx_fill_cos_params(struct bnx2x *bp, | ||
1694 | struct pg_help_data *help_data, | ||
1695 | struct dcbx_ets_feature *ets, | ||
1696 | u32 *pg_pri_orginal_spread) | ||
1697 | { | ||
1698 | struct cos_help_data cos_data; | ||
1699 | u8 i = 0; | ||
1700 | u32 pri_join_mask = 0; | ||
1701 | u8 num_of_dif_pri = 0; | ||
1702 | |||
1703 | memset(&cos_data, 0, sizeof(cos_data)); | ||
1704 | |||
1705 | /* Validate the pg value */ | ||
1706 | for (i = 0; i < help_data->num_of_pg ; i++) { | ||
1707 | if (DCBX_STRICT_PRIORITY != help_data->data[i].pg && | ||
1708 | DCBX_MAX_NUM_PG_BW_ENTRIES <= help_data->data[i].pg) | ||
1709 | BNX2X_ERR("Invalid pg[%d] data %x\n", i, | ||
1710 | help_data->data[i].pg); | ||
1711 | pri_join_mask |= help_data->data[i].pg_priority; | ||
1712 | num_of_dif_pri += help_data->data[i].num_of_dif_pri; | ||
1713 | } | ||
1714 | |||
1715 | /* defaults */ | ||
1716 | cos_data.num_of_cos = 1; | ||
1717 | for (i = 0; i < ARRAY_SIZE(cos_data.data); i++) { | ||
1718 | cos_data.data[i].pri_join_mask = 0; | ||
1719 | cos_data.data[i].pausable = false; | ||
1720 | cos_data.data[i].strict = BNX2X_DCBX_STRICT_INVALID; | ||
1721 | cos_data.data[i].cos_bw = DCBX_INVALID_COS_BW; | ||
1722 | } | ||
1723 | |||
1724 | if (CHIP_IS_E3B0(bp)) | ||
1725 | bnx2x_dcbx_cee_fill_cos_params(bp, help_data, ets, | ||
1726 | &cos_data, pri_join_mask); | ||
1727 | else /* E2 + E3A0 */ | ||
1728 | bnx2x_dcbx_2cos_limit_cee_fill_cos_params(bp, | ||
1729 | help_data, ets, | ||
1730 | &cos_data, | ||
1731 | pg_pri_orginal_spread, | ||
1732 | pri_join_mask, | ||
1733 | num_of_dif_pri); | ||
1734 | |||
1735 | |||
1537 | for (i = 0; i < cos_data.num_of_cos ; i++) { | 1736 | for (i = 0; i < cos_data.num_of_cos ; i++) { |
1538 | struct bnx2x_dcbx_cos_params *params = | 1737 | struct bnx2x_dcbx_cos_params *p = |
1539 | &bp->dcbx_port_params.ets.cos_params[i]; | 1738 | &bp->dcbx_port_params.ets.cos_params[i]; |
1540 | 1739 | ||
1541 | params->pauseable = cos_data.data[i].pausable; | 1740 | p->strict = cos_data.data[i].strict; |
1542 | params->strict = cos_data.data[i].strict; | 1741 | p->bw_tbl = cos_data.data[i].cos_bw; |
1543 | params->bw_tbl = cos_data.data[i].cos_bw; | 1742 | p->pri_bitmask = cos_data.data[i].pri_join_mask; |
1544 | if (params->pauseable) { | 1743 | p->pauseable = cos_data.data[i].pausable; |
1545 | params->pri_bitmask = | 1744 | |
1546 | DCBX_PFC_PRI_GET_PAUSE(bp, | 1745 | /* sanity */ |
1547 | cos_data.data[i].pri_join_mask); | 1746 | if (p->bw_tbl != DCBX_INVALID_COS_BW || |
1747 | p->strict != BNX2X_DCBX_STRICT_INVALID) { | ||
1748 | if (p->pri_bitmask == 0) | ||
1749 | BNX2X_ERR("Invalid pri_bitmask for %d\n", i); | ||
1750 | |||
1751 | if (CHIP_IS_E2(bp) || CHIP_IS_E3A0(bp)) { | ||
1752 | |||
1753 | if (p->pauseable && | ||
1754 | DCBX_PFC_PRI_GET_NON_PAUSE(bp, | ||
1755 | p->pri_bitmask) != 0) | ||
1756 | BNX2X_ERR("Inconsistent config for " | ||
1757 | "pausable COS %d\n", i); | ||
1758 | |||
1759 | if (!p->pauseable && | ||
1760 | DCBX_PFC_PRI_GET_PAUSE(bp, | ||
1761 | p->pri_bitmask) != 0) | ||
1762 | BNX2X_ERR("Inconsistent config for " | ||
1763 | "nonpausable COS %d\n", i); | ||
1764 | } | ||
1765 | } | ||
1766 | |||
1767 | if (p->pauseable) | ||
1548 | DP(NETIF_MSG_LINK, "COS %d PAUSABLE prijoinmask 0x%x\n", | 1768 | DP(NETIF_MSG_LINK, "COS %d PAUSABLE prijoinmask 0x%x\n", |
1549 | i, cos_data.data[i].pri_join_mask); | 1769 | i, cos_data.data[i].pri_join_mask); |
1550 | } else { | 1770 | else |
1551 | params->pri_bitmask = | ||
1552 | DCBX_PFC_PRI_GET_NON_PAUSE(bp, | ||
1553 | cos_data.data[i].pri_join_mask); | ||
1554 | DP(NETIF_MSG_LINK, "COS %d NONPAUSABLE prijoinmask " | 1771 | DP(NETIF_MSG_LINK, "COS %d NONPAUSABLE prijoinmask " |
1555 | "0x%x\n", | 1772 | "0x%x\n", |
1556 | i, cos_data.data[i].pri_join_mask); | 1773 | i, cos_data.data[i].pri_join_mask); |
1557 | } | ||
1558 | } | 1774 | } |
1559 | 1775 | ||
1560 | bp->dcbx_port_params.ets.num_of_cos = cos_data.num_of_cos ; | 1776 | bp->dcbx_port_params.ets.num_of_cos = cos_data.num_of_cos ; |
@@ -1574,7 +1790,7 @@ static void bnx2x_dcbx_get_ets_pri_pg_tbl(struct bnx2x *bp, | |||
1574 | } | 1790 | } |
1575 | } | 1791 | } |
1576 | 1792 | ||
1577 | static void bnx2x_pfc_fw_struct_e2(struct bnx2x *bp) | 1793 | static void bnx2x_dcbx_fw_struct(struct bnx2x *bp) |
1578 | { | 1794 | { |
1579 | struct flow_control_configuration *pfc_fw_cfg = NULL; | 1795 | struct flow_control_configuration *pfc_fw_cfg = NULL; |
1580 | u16 pri_bit = 0; | 1796 | u16 pri_bit = 0; |
@@ -1591,13 +1807,7 @@ static void bnx2x_pfc_fw_struct_e2(struct bnx2x *bp) | |||
1591 | 1807 | ||
1592 | /* Fw version should be incremented each update */ | 1808 | /* Fw version should be incremented each update */ |
1593 | pfc_fw_cfg->dcb_version = ++bp->dcb_version; | 1809 | pfc_fw_cfg->dcb_version = ++bp->dcb_version; |
1594 | pfc_fw_cfg->dcb_enabled = DCB_ENABLED; | 1810 | pfc_fw_cfg->dcb_enabled = 1; |
1595 | |||
1596 | /* Default initialization */ | ||
1597 | for (pri = 0; pri < MAX_PFC_TRAFFIC_TYPES ; pri++) { | ||
1598 | tt2cos[pri].priority = LLFC_TRAFFIC_TYPE_TO_PRIORITY_UNMAPPED; | ||
1599 | tt2cos[pri].cos = 0; | ||
1600 | } | ||
1601 | 1811 | ||
1602 | /* Fill priority parameters */ | 1812 | /* Fill priority parameters */ |
1603 | for (pri = 0; pri < LLFC_DRIVER_TRAFFIC_TYPE_MAX; pri++) { | 1813 | for (pri = 0; pri < LLFC_DRIVER_TRAFFIC_TYPE_MAX; pri++) { |
@@ -1605,14 +1815,37 @@ static void bnx2x_pfc_fw_struct_e2(struct bnx2x *bp) | |||
1605 | pri_bit = 1 << tt2cos[pri].priority; | 1815 | pri_bit = 1 << tt2cos[pri].priority; |
1606 | 1816 | ||
1607 | /* Fill COS parameters based on COS calculated to | 1817 | /* Fill COS parameters based on COS calculated to |
1608 | * make it more generally for future use */ | 1818 | * make it more general for future use */ |
1609 | for (cos = 0; cos < bp->dcbx_port_params.ets.num_of_cos; cos++) | 1819 | for (cos = 0; cos < bp->dcbx_port_params.ets.num_of_cos; cos++) |
1610 | if (bp->dcbx_port_params.ets.cos_params[cos]. | 1820 | if (bp->dcbx_port_params.ets.cos_params[cos]. |
1611 | pri_bitmask & pri_bit) | 1821 | pri_bitmask & pri_bit) |
1612 | tt2cos[pri].cos = cos; | 1822 | tt2cos[pri].cos = cos; |
1613 | } | 1823 | } |
1824 | |||
1825 | /* we never want the FW to add a 0 vlan tag */ | ||
1826 | pfc_fw_cfg->dont_add_pri_0_en = 1; | ||
1827 | |||
1614 | bnx2x_dcbx_print_cos_params(bp, pfc_fw_cfg); | 1828 | bnx2x_dcbx_print_cos_params(bp, pfc_fw_cfg); |
1615 | } | 1829 | } |
1830 | |||
1831 | void bnx2x_dcbx_pmf_update(struct bnx2x *bp) | ||
1832 | { | ||
1833 | /* if we need to syncronize DCBX result from prev PMF | ||
1834 | * read it from shmem and update bp accordingly | ||
1835 | */ | ||
1836 | if (SHMEM2_HAS(bp, drv_flags) && | ||
1837 | GET_FLAGS(SHMEM2_RD(bp, drv_flags), DRV_FLAGS_DCB_CONFIGURED)) { | ||
1838 | /* Read neg results if dcbx is in the FW */ | ||
1839 | if (bnx2x_dcbx_read_shmem_neg_results(bp)) | ||
1840 | return; | ||
1841 | |||
1842 | bnx2x_dump_dcbx_drv_param(bp, &bp->dcbx_local_feat, | ||
1843 | bp->dcbx_error); | ||
1844 | bnx2x_get_dcbx_drv_param(bp, &bp->dcbx_local_feat, | ||
1845 | bp->dcbx_error); | ||
1846 | } | ||
1847 | } | ||
1848 | |||
1616 | /* DCB netlink */ | 1849 | /* DCB netlink */ |
1617 | #ifdef BCM_DCBNL | 1850 | #ifdef BCM_DCBNL |
1618 | 1851 | ||
@@ -1879,10 +2112,12 @@ static u8 bnx2x_dcbnl_get_numtcs(struct net_device *netdev, int tcid, u8 *num) | |||
1879 | if (bp->dcb_state) { | 2112 | if (bp->dcb_state) { |
1880 | switch (tcid) { | 2113 | switch (tcid) { |
1881 | case DCB_NUMTCS_ATTR_PG: | 2114 | case DCB_NUMTCS_ATTR_PG: |
1882 | *num = E2_NUM_OF_COS; | 2115 | *num = CHIP_IS_E3B0(bp) ? DCBX_COS_MAX_NUM_E3B0 : |
2116 | DCBX_COS_MAX_NUM_E2; | ||
1883 | break; | 2117 | break; |
1884 | case DCB_NUMTCS_ATTR_PFC: | 2118 | case DCB_NUMTCS_ATTR_PFC: |
1885 | *num = E2_NUM_OF_COS; | 2119 | *num = CHIP_IS_E3B0(bp) ? DCBX_COS_MAX_NUM_E3B0 : |
2120 | DCBX_COS_MAX_NUM_E2; | ||
1886 | break; | 2121 | break; |
1887 | default: | 2122 | default: |
1888 | rval = -EINVAL; | 2123 | rval = -EINVAL; |
diff --git a/drivers/net/bnx2x/bnx2x_dcb.h b/drivers/net/bnx2x/bnx2x_dcb.h index bed369d67e02..2c6a3bca6f28 100644 --- a/drivers/net/bnx2x/bnx2x_dcb.h +++ b/drivers/net/bnx2x/bnx2x_dcb.h | |||
@@ -27,22 +27,30 @@ struct bnx2x_dcbx_app_params { | |||
27 | u32 traffic_type_priority[LLFC_DRIVER_TRAFFIC_TYPE_MAX]; | 27 | u32 traffic_type_priority[LLFC_DRIVER_TRAFFIC_TYPE_MAX]; |
28 | }; | 28 | }; |
29 | 29 | ||
30 | #define E2_NUM_OF_COS 2 | 30 | #define DCBX_COS_MAX_NUM_E2 DCBX_E2E3_MAX_NUM_COS |
31 | #define BNX2X_DCBX_COS_NOT_STRICT 0 | 31 | /* bnx2x currently limits numbers of supported COSes to 3 to be extended to 6 */ |
32 | #define BNX2X_DCBX_COS_LOW_STRICT 1 | 32 | #define BNX2X_MAX_COS_SUPPORT 3 |
33 | #define BNX2X_DCBX_COS_HIGH_STRICT 2 | 33 | #define DCBX_COS_MAX_NUM_E3B0 BNX2X_MAX_COS_SUPPORT |
34 | #define DCBX_COS_MAX_NUM BNX2X_MAX_COS_SUPPORT | ||
34 | 35 | ||
35 | struct bnx2x_dcbx_cos_params { | 36 | struct bnx2x_dcbx_cos_params { |
36 | u32 bw_tbl; | 37 | u32 bw_tbl; |
37 | u32 pri_bitmask; | 38 | u32 pri_bitmask; |
39 | /* | ||
40 | * strict priority: valid values are 0..5; 0 is highest priority. | ||
41 | * There can't be two COSes with the same priority. | ||
42 | */ | ||
38 | u8 strict; | 43 | u8 strict; |
44 | #define BNX2X_DCBX_STRICT_INVALID DCBX_COS_MAX_NUM | ||
45 | #define BNX2X_DCBX_STRICT_COS_HIGHEST 0 | ||
46 | #define BNX2X_DCBX_STRICT_COS_NEXT_LOWER_PRI(sp) ((sp) + 1) | ||
39 | u8 pauseable; | 47 | u8 pauseable; |
40 | }; | 48 | }; |
41 | 49 | ||
42 | struct bnx2x_dcbx_pg_params { | 50 | struct bnx2x_dcbx_pg_params { |
43 | u32 enabled; | 51 | u32 enabled; |
44 | u8 num_of_cos; /* valid COS entries */ | 52 | u8 num_of_cos; /* valid COS entries */ |
45 | struct bnx2x_dcbx_cos_params cos_params[E2_NUM_OF_COS]; | 53 | struct bnx2x_dcbx_cos_params cos_params[DCBX_COS_MAX_NUM]; |
46 | }; | 54 | }; |
47 | 55 | ||
48 | struct bnx2x_dcbx_pfc_params { | 56 | struct bnx2x_dcbx_pfc_params { |
@@ -60,6 +68,8 @@ struct bnx2x_dcbx_port_params { | |||
60 | #define BNX2X_DCBX_OVERWRITE_SETTINGS_DISABLE 0 | 68 | #define BNX2X_DCBX_OVERWRITE_SETTINGS_DISABLE 0 |
61 | #define BNX2X_DCBX_OVERWRITE_SETTINGS_ENABLE 1 | 69 | #define BNX2X_DCBX_OVERWRITE_SETTINGS_ENABLE 1 |
62 | #define BNX2X_DCBX_OVERWRITE_SETTINGS_INVALID (BNX2X_DCBX_CONFIG_INV_VALUE) | 70 | #define BNX2X_DCBX_OVERWRITE_SETTINGS_INVALID (BNX2X_DCBX_CONFIG_INV_VALUE) |
71 | #define BNX2X_IS_ETS_ENABLED(bp) ((bp)->dcb_state == BNX2X_DCB_STATE_ON &&\ | ||
72 | (bp)->dcbx_port_params.ets.enabled) | ||
63 | 73 | ||
64 | struct bnx2x_config_lldp_params { | 74 | struct bnx2x_config_lldp_params { |
65 | u32 overwrite_settings; | 75 | u32 overwrite_settings; |
@@ -132,7 +142,7 @@ struct cos_entry_help_data { | |||
132 | }; | 142 | }; |
133 | 143 | ||
134 | struct cos_help_data { | 144 | struct cos_help_data { |
135 | struct cos_entry_help_data data[E2_NUM_OF_COS]; | 145 | struct cos_entry_help_data data[DCBX_COS_MAX_NUM]; |
136 | u8 num_of_cos; | 146 | u8 num_of_cos; |
137 | }; | 147 | }; |
138 | 148 | ||
@@ -148,6 +158,8 @@ struct cos_help_data { | |||
148 | ((pg_pri) & (DCBX_PFC_PRI_PAUSE_MASK(bp))) | 158 | ((pg_pri) & (DCBX_PFC_PRI_PAUSE_MASK(bp))) |
149 | #define DCBX_PFC_PRI_GET_NON_PAUSE(bp, pg_pri) \ | 159 | #define DCBX_PFC_PRI_GET_NON_PAUSE(bp, pg_pri) \ |
150 | (DCBX_PFC_PRI_NON_PAUSE_MASK(bp) & (pg_pri)) | 160 | (DCBX_PFC_PRI_NON_PAUSE_MASK(bp) & (pg_pri)) |
161 | #define DCBX_IS_PFC_PRI_SOME_PAUSE(bp, pg_pri) \ | ||
162 | (0 != DCBX_PFC_PRI_GET_PAUSE(bp, pg_pri)) | ||
151 | #define IS_DCBX_PFC_PRI_ONLY_PAUSE(bp, pg_pri) \ | 163 | #define IS_DCBX_PFC_PRI_ONLY_PAUSE(bp, pg_pri) \ |
152 | (pg_pri == DCBX_PFC_PRI_GET_PAUSE((bp), (pg_pri))) | 164 | (pg_pri == DCBX_PFC_PRI_GET_PAUSE((bp), (pg_pri))) |
153 | #define IS_DCBX_PFC_PRI_ONLY_NON_PAUSE(bp, pg_pri)\ | 165 | #define IS_DCBX_PFC_PRI_ONLY_NON_PAUSE(bp, pg_pri)\ |
@@ -170,22 +182,18 @@ struct pg_help_data { | |||
170 | 182 | ||
171 | /* forward DCB/PFC related declarations */ | 183 | /* forward DCB/PFC related declarations */ |
172 | struct bnx2x; | 184 | struct bnx2x; |
173 | void bnx2x_dcb_init_intmem_pfc(struct bnx2x *bp); | ||
174 | void bnx2x_dcbx_update(struct work_struct *work); | 185 | void bnx2x_dcbx_update(struct work_struct *work); |
175 | void bnx2x_dcbx_init_params(struct bnx2x *bp); | 186 | void bnx2x_dcbx_init_params(struct bnx2x *bp); |
176 | void bnx2x_dcbx_set_state(struct bnx2x *bp, bool dcb_on, u32 dcbx_enabled); | 187 | void bnx2x_dcbx_set_state(struct bnx2x *bp, bool dcb_on, u32 dcbx_enabled); |
177 | 188 | ||
178 | enum { | 189 | enum { |
179 | BNX2X_DCBX_STATE_NEG_RECEIVED = 0x1, | 190 | BNX2X_DCBX_STATE_NEG_RECEIVED = 0x1, |
180 | #ifdef BCM_CNIC | ||
181 | BNX2X_DCBX_STATE_ISCSI_STOPPED, | ||
182 | #endif | ||
183 | BNX2X_DCBX_STATE_TX_PAUSED, | 191 | BNX2X_DCBX_STATE_TX_PAUSED, |
184 | BNX2X_DCBX_STATE_TX_RELEASED | 192 | BNX2X_DCBX_STATE_TX_RELEASED |
185 | }; | 193 | }; |
186 | 194 | ||
187 | void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state); | 195 | void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state); |
188 | 196 | void bnx2x_dcbx_pmf_update(struct bnx2x *bp); | |
189 | /* DCB netlink */ | 197 | /* DCB netlink */ |
190 | #ifdef BCM_DCBNL | 198 | #ifdef BCM_DCBNL |
191 | extern const struct dcbnl_rtnl_ops bnx2x_dcbnl_ops; | 199 | extern const struct dcbnl_rtnl_ops bnx2x_dcbnl_ops; |
diff --git a/drivers/net/bnx2x/bnx2x_dump.h b/drivers/net/bnx2x/bnx2x_dump.h index fb3ff7c4d7ca..407531a9ab13 100644 --- a/drivers/net/bnx2x/bnx2x_dump.h +++ b/drivers/net/bnx2x/bnx2x_dump.h | |||
@@ -25,34 +25,55 @@ | |||
25 | 25 | ||
26 | 26 | ||
27 | /*definitions */ | 27 | /*definitions */ |
28 | #define XSTORM_WAITP_ADDR 0x2b8a80 | 28 | #define XSTORM_WAITP_ADDR 0x2b8a80 |
29 | #define TSTORM_WAITP_ADDR 0x1b8a80 | 29 | #define TSTORM_WAITP_ADDR 0x1b8a80 |
30 | #define USTORM_WAITP_ADDR 0x338a80 | 30 | #define USTORM_WAITP_ADDR 0x338a80 |
31 | #define CSTORM_WAITP_ADDR 0x238a80 | 31 | #define CSTORM_WAITP_ADDR 0x238a80 |
32 | #define TSTORM_CAM_MODE 0x1B1440 | 32 | #define TSTORM_CAM_MODE 0x1B1440 |
33 | 33 | ||
34 | #define MAX_TIMER_PENDING 200 | 34 | #define MAX_TIMER_PENDING 200 |
35 | #define TIMER_SCAN_DONT_CARE 0xFF | 35 | #define TIMER_SCAN_DONT_CARE 0xFF |
36 | #define RI_E1 0x1 | 36 | #define RI_E1 0x1 |
37 | #define RI_E1H 0x2 | 37 | #define RI_E1H 0x2 |
38 | #define RI_E2 0x4 | 38 | #define RI_E2 0x4 |
39 | #define RI_ONLINE 0x100 | 39 | #define RI_E3 0x8 |
40 | #define RI_PATH0_DUMP 0x200 | 40 | #define RI_ONLINE 0x100 |
41 | #define RI_PATH1_DUMP 0x400 | 41 | #define RI_PATH0_DUMP 0x200 |
42 | #define RI_E1_OFFLINE (RI_E1) | 42 | #define RI_PATH1_DUMP 0x400 |
43 | #define RI_E1_ONLINE (RI_E1 | RI_ONLINE) | 43 | #define RI_E1_OFFLINE (RI_E1) |
44 | #define RI_E1H_OFFLINE (RI_E1H) | 44 | #define RI_E1_ONLINE (RI_E1 | RI_ONLINE) |
45 | #define RI_E1H_ONLINE (RI_E1H | RI_ONLINE) | 45 | #define RI_E1H_OFFLINE (RI_E1H) |
46 | #define RI_E2_OFFLINE (RI_E2) | 46 | #define RI_E1H_ONLINE (RI_E1H | RI_ONLINE) |
47 | #define RI_E2_ONLINE (RI_E2 | RI_ONLINE) | 47 | #define RI_E2_OFFLINE (RI_E2) |
48 | #define RI_E1E1H_OFFLINE (RI_E1 | RI_E1H) | 48 | #define RI_E2_ONLINE (RI_E2 | RI_ONLINE) |
49 | #define RI_E1E1H_ONLINE (RI_E1 | RI_E1H | RI_ONLINE) | 49 | #define RI_E3_OFFLINE (RI_E3) |
50 | #define RI_E1HE2_OFFLINE (RI_E2 | RI_E1H) | 50 | #define RI_E3_ONLINE (RI_E3 | RI_ONLINE) |
51 | #define RI_E1HE2_ONLINE (RI_E2 | RI_E1H | RI_ONLINE) | 51 | #define RI_E1E1H_OFFLINE (RI_E1 | RI_E1H) |
52 | #define RI_E1E2_OFFLINE (RI_E2 | RI_E1) | 52 | #define RI_E1E1H_ONLINE (RI_E1 | RI_E1H | RI_ONLINE) |
53 | #define RI_E1E2_ONLINE (RI_E2 | RI_E1 | RI_ONLINE) | 53 | #define RI_E1E1HE2_OFFLINE (RI_E1 | RI_E1H | RI_E2) |
54 | #define RI_ALL_OFFLINE (RI_E1 | RI_E1H | RI_E2) | 54 | #define RI_E1E1HE2_ONLINE (RI_E1 | RI_E1H | RI_E2 | RI_ONLINE) |
55 | #define RI_ALL_ONLINE (RI_E1 | RI_E1H | RI_E2 | RI_ONLINE) | 55 | #define RI_E1HE2_OFFLINE (RI_E2 | RI_E1H) |
56 | #define RI_E1HE2_ONLINE (RI_E2 | RI_E1H | RI_ONLINE) | ||
57 | #define RI_E1E2_OFFLINE (RI_E2 | RI_E1) | ||
58 | #define RI_E1E2_ONLINE (RI_E2 | RI_E1 | RI_ONLINE) | ||
59 | #define RI_E1E3_OFFLINE (RI_E1 | RI_E3) | ||
60 | #define RI_E1E3_ONLINE (RI_E1 | RI_E3 | RI_ONLINE) | ||
61 | #define RI_E1HE3_OFFLINE (RI_E1H | RI_E3) | ||
62 | #define RI_E1HE3_ONLINE (RI_E1H | RI_E3 | RI_ONLINE) | ||
63 | #define RI_E2E3_OFFLINE (RI_E2 | RI_E3) | ||
64 | #define RI_E2E3_ONLINE (RI_E2 | RI_E3 | RI_ONLINE) | ||
65 | #define RI_E1E1HE3_OFFLINE (RI_E1 | RI_E1H | RI_E3) | ||
66 | #define RI_E1E1HE3_ONLINE (RI_E1 | RI_E1H | RI_E3 | RI_ONLINE) | ||
67 | #define RI_E1HE2E3_OFFLINE (RI_E2 | RI_E1H | RI_E3) | ||
68 | #define RI_E1HE2E3_ONLINE (RI_E2 | RI_E1H | RI_E3 | RI_ONLINE) | ||
69 | #define RI_E1E2E3_OFFLINE (RI_E2 | RI_E1 | RI_E3) | ||
70 | #define RI_E1E2E3_ONLINE (RI_E2 | RI_E1 | RI_E3 | RI_ONLINE) | ||
71 | #define RI_ALL_OFFLINE (RI_E1 | RI_E1H | RI_E2 | RI_E3) | ||
72 | #define RI_ALL_ONLINE (RI_E1 | RI_E1H | RI_E2 | RI_E3 | RI_ONLINE) | ||
73 | |||
74 | #define DBG_DMP_TRACE_BUFFER_SIZE 0x800 | ||
75 | #define DBG_DMP_TRACE_BUFFER_OFFSET(shmem0_offset) \ | ||
76 | ((shmem0_offset) - DBG_DMP_TRACE_BUFFER_SIZE) | ||
56 | 77 | ||
57 | struct dump_sign { | 78 | struct dump_sign { |
58 | u32 time_stamp; | 79 | u32 time_stamp; |
@@ -86,185 +107,255 @@ struct wreg_addr { | |||
86 | u16 info; | 107 | u16 info; |
87 | }; | 108 | }; |
88 | 109 | ||
89 | #define REGS_COUNT 834 | 110 | static const struct reg_addr reg_addrs[] = { |
90 | static const struct reg_addr reg_addrs[REGS_COUNT] = { | ||
91 | { 0x2000, 341, RI_ALL_ONLINE }, { 0x2800, 103, RI_ALL_ONLINE }, | 111 | { 0x2000, 341, RI_ALL_ONLINE }, { 0x2800, 103, RI_ALL_ONLINE }, |
92 | { 0x3000, 287, RI_ALL_ONLINE }, { 0x3800, 331, RI_ALL_ONLINE }, | 112 | { 0x3000, 287, RI_ALL_ONLINE }, { 0x3800, 331, RI_ALL_ONLINE }, |
93 | { 0x8800, 6, RI_ALL_ONLINE }, { 0x8818, 1, RI_E1HE2_ONLINE }, | 113 | { 0x8800, 6, RI_ALL_ONLINE }, { 0x8818, 1, RI_E1HE2E3_ONLINE }, |
94 | { 0x9000, 164, RI_E2_ONLINE }, { 0x9400, 33, RI_E2_ONLINE }, | 114 | { 0x9000, 147, RI_E2E3_ONLINE }, { 0x924c, 1, RI_E2_ONLINE }, |
95 | { 0xa000, 27, RI_ALL_ONLINE }, { 0xa06c, 1, RI_E1E1H_ONLINE }, | 115 | { 0x9250, 16, RI_E2E3_ONLINE }, { 0x9400, 33, RI_E2E3_ONLINE }, |
96 | { 0xa070, 71, RI_ALL_ONLINE }, { 0xa18c, 4, RI_E1E1H_ONLINE }, | 116 | { 0x9484, 5, RI_E3_ONLINE }, { 0xa000, 27, RI_ALL_ONLINE }, |
97 | { 0xa19c, 62, RI_ALL_ONLINE }, { 0xa294, 2, RI_E1E1H_ONLINE }, | 117 | { 0xa06c, 1, RI_E1E1H_ONLINE }, { 0xa070, 71, RI_ALL_ONLINE }, |
98 | { 0xa29c, 56, RI_ALL_ONLINE }, { 0xa39c, 7, RI_E1HE2_ONLINE }, | 118 | { 0xa18c, 4, RI_E1E1H_ONLINE }, { 0xa19c, 62, RI_ALL_ONLINE }, |
99 | { 0xa3c0, 3, RI_E1HE2_ONLINE }, { 0xa3d0, 1, RI_E1HE2_ONLINE }, | 119 | { 0xa294, 2, RI_E1E1H_ONLINE }, { 0xa29c, 2, RI_ALL_ONLINE }, |
100 | { 0xa3d8, 1, RI_E1HE2_ONLINE }, { 0xa3e0, 1, RI_E1HE2_ONLINE }, | 120 | { 0xa2a4, 2, RI_E1E1HE2_ONLINE }, { 0xa2ac, 52, RI_ALL_ONLINE }, |
101 | { 0xa3e8, 1, RI_E1HE2_ONLINE }, { 0xa3f0, 1, RI_E1HE2_ONLINE }, | 121 | { 0xa39c, 7, RI_E1HE2E3_ONLINE }, { 0xa3b8, 2, RI_E3_ONLINE }, |
102 | { 0xa3f8, 1, RI_E1HE2_ONLINE }, { 0xa400, 43, RI_ALL_ONLINE }, | 122 | { 0xa3c0, 3, RI_E1HE2E3_ONLINE }, { 0xa3d0, 1, RI_E1HE2E3_ONLINE }, |
103 | { 0xa4ac, 2, RI_E1E1H_ONLINE }, { 0xa4b4, 1, RI_ALL_ONLINE }, | 123 | { 0xa3d8, 1, RI_E1HE2E3_ONLINE }, { 0xa3e0, 1, RI_E1HE2E3_ONLINE }, |
124 | { 0xa3e8, 1, RI_E1HE2E3_ONLINE }, { 0xa3f0, 1, RI_E1HE2E3_ONLINE }, | ||
125 | { 0xa3f8, 1, RI_E1HE2E3_ONLINE }, { 0xa400, 40, RI_ALL_ONLINE }, | ||
126 | { 0xa4a0, 1, RI_E1E1HE2_ONLINE }, { 0xa4a4, 2, RI_ALL_ONLINE }, | ||
127 | { 0xa4ac, 2, RI_E1E1H_ONLINE }, { 0xa4b4, 1, RI_E1E1HE2_ONLINE }, | ||
104 | { 0xa4b8, 2, RI_E1E1H_ONLINE }, { 0xa4c0, 3, RI_ALL_ONLINE }, | 128 | { 0xa4b8, 2, RI_E1E1H_ONLINE }, { 0xa4c0, 3, RI_ALL_ONLINE }, |
105 | { 0xa4cc, 5, RI_E1E1H_ONLINE }, { 0xa4e0, 9, RI_ALL_ONLINE }, | 129 | { 0xa4cc, 5, RI_E1E1H_ONLINE }, { 0xa4e0, 3, RI_ALL_ONLINE }, |
106 | { 0xa504, 1, RI_E1E1H_ONLINE }, { 0xa508, 3, RI_ALL_ONLINE }, | 130 | { 0xa4fc, 2, RI_ALL_ONLINE }, { 0xa504, 1, RI_E1E1H_ONLINE }, |
107 | { 0xa518, 1, RI_ALL_ONLINE }, { 0xa520, 1, RI_ALL_ONLINE }, | 131 | { 0xa508, 3, RI_ALL_ONLINE }, { 0xa518, 1, RI_ALL_ONLINE }, |
108 | { 0xa528, 1, RI_ALL_ONLINE }, { 0xa530, 1, RI_ALL_ONLINE }, | 132 | { 0xa520, 1, RI_ALL_ONLINE }, { 0xa528, 1, RI_ALL_ONLINE }, |
109 | { 0xa538, 1, RI_ALL_ONLINE }, { 0xa540, 1, RI_ALL_ONLINE }, | 133 | { 0xa530, 1, RI_ALL_ONLINE }, { 0xa538, 1, RI_ALL_ONLINE }, |
110 | { 0xa548, 1, RI_E1E1H_ONLINE }, { 0xa550, 1, RI_E1E1H_ONLINE }, | 134 | { 0xa540, 1, RI_ALL_ONLINE }, { 0xa548, 1, RI_E1E1H_ONLINE }, |
111 | { 0xa558, 1, RI_E1E1H_ONLINE }, { 0xa560, 1, RI_E1E1H_ONLINE }, | 135 | { 0xa550, 1, RI_E1E1H_ONLINE }, { 0xa558, 1, RI_E1E1H_ONLINE }, |
112 | { 0xa568, 1, RI_E1E1H_ONLINE }, { 0xa570, 1, RI_ALL_ONLINE }, | 136 | { 0xa560, 1, RI_E1E1H_ONLINE }, { 0xa568, 1, RI_E1E1H_ONLINE }, |
113 | { 0xa580, 1, RI_ALL_ONLINE }, { 0xa590, 1, RI_ALL_ONLINE }, | 137 | { 0xa570, 1, RI_ALL_ONLINE }, { 0xa580, 1, RI_ALL_ONLINE }, |
114 | { 0xa5a0, 1, RI_ALL_ONLINE }, { 0xa5c0, 1, RI_ALL_ONLINE }, | 138 | { 0xa590, 1, RI_ALL_ONLINE }, { 0xa5a0, 1, RI_E1E1HE2_ONLINE }, |
115 | { 0xa5e0, 1, RI_E1HE2_ONLINE }, { 0xa5e8, 1, RI_E1HE2_ONLINE }, | 139 | { 0xa5c0, 1, RI_ALL_ONLINE }, { 0xa5e0, 1, RI_E1HE2E3_ONLINE }, |
116 | { 0xa5f0, 1, RI_E1HE2_ONLINE }, { 0xa5f8, 10, RI_E1HE2_ONLINE }, | 140 | { 0xa5e8, 1, RI_E1HE2E3_ONLINE }, { 0xa5f0, 1, RI_E1HE2E3_ONLINE }, |
117 | { 0xa620, 111, RI_E2_ONLINE }, { 0xa800, 51, RI_E2_ONLINE }, | 141 | { 0xa5f8, 1, RI_E1HE2_ONLINE }, { 0xa5fc, 9, RI_E1HE2E3_ONLINE }, |
118 | { 0xa8d4, 4, RI_E2_ONLINE }, { 0xa8e8, 1, RI_E2_ONLINE }, | 142 | { 0xa620, 6, RI_E2E3_ONLINE }, { 0xa638, 20, RI_E2_ONLINE }, |
119 | { 0xa8f0, 1, RI_E2_ONLINE }, { 0x10000, 236, RI_ALL_ONLINE }, | 143 | { 0xa688, 42, RI_E2E3_ONLINE }, { 0xa730, 1, RI_E2_ONLINE }, |
120 | { 0x10400, 57, RI_ALL_ONLINE }, { 0x104e8, 2, RI_ALL_ONLINE }, | 144 | { 0xa734, 2, RI_E2E3_ONLINE }, { 0xa73c, 4, RI_E2_ONLINE }, |
145 | { 0xa74c, 5, RI_E2E3_ONLINE }, { 0xa760, 5, RI_E2_ONLINE }, | ||
146 | { 0xa774, 7, RI_E2E3_ONLINE }, { 0xa790, 15, RI_E2_ONLINE }, | ||
147 | { 0xa7cc, 4, RI_E2E3_ONLINE }, { 0xa7e0, 6, RI_E3_ONLINE }, | ||
148 | { 0xa800, 18, RI_E2_ONLINE }, { 0xa848, 33, RI_E2E3_ONLINE }, | ||
149 | { 0xa8cc, 2, RI_E3_ONLINE }, { 0xa8d4, 4, RI_E2E3_ONLINE }, | ||
150 | { 0xa8e4, 1, RI_E3_ONLINE }, { 0xa8e8, 1, RI_E2E3_ONLINE }, | ||
151 | { 0xa8f0, 1, RI_E2E3_ONLINE }, { 0xa8f8, 30, RI_E3_ONLINE }, | ||
152 | { 0xa974, 73, RI_E3_ONLINE }, { 0xac30, 1, RI_E3_ONLINE }, | ||
153 | { 0xac40, 1, RI_E3_ONLINE }, { 0xac50, 1, RI_E3_ONLINE }, | ||
154 | { 0x10000, 9, RI_ALL_ONLINE }, { 0x10024, 1, RI_E1E1HE2_ONLINE }, | ||
155 | { 0x10028, 5, RI_ALL_ONLINE }, { 0x1003c, 6, RI_E1E1HE2_ONLINE }, | ||
156 | { 0x10054, 20, RI_ALL_ONLINE }, { 0x100a4, 4, RI_E1E1HE2_ONLINE }, | ||
157 | { 0x100b4, 11, RI_ALL_ONLINE }, { 0x100e0, 4, RI_E1E1HE2_ONLINE }, | ||
158 | { 0x100f0, 8, RI_ALL_ONLINE }, { 0x10110, 6, RI_E1E1HE2_ONLINE }, | ||
159 | { 0x10128, 110, RI_ALL_ONLINE }, { 0x102e0, 4, RI_E1E1HE2_ONLINE }, | ||
160 | { 0x102f0, 18, RI_ALL_ONLINE }, { 0x10338, 20, RI_E1E1HE2_ONLINE }, | ||
161 | { 0x10388, 10, RI_ALL_ONLINE }, { 0x10400, 6, RI_E1E1HE2_ONLINE }, | ||
162 | { 0x10418, 6, RI_ALL_ONLINE }, { 0x10430, 10, RI_E1E1HE2_ONLINE }, | ||
163 | { 0x10458, 22, RI_ALL_ONLINE }, { 0x104b0, 12, RI_E1E1HE2_ONLINE }, | ||
164 | { 0x104e0, 1, RI_ALL_ONLINE }, { 0x104e8, 2, RI_ALL_ONLINE }, | ||
121 | { 0x104f4, 2, RI_ALL_ONLINE }, { 0x10500, 146, RI_ALL_ONLINE }, | 165 | { 0x104f4, 2, RI_ALL_ONLINE }, { 0x10500, 146, RI_ALL_ONLINE }, |
122 | { 0x10750, 2, RI_ALL_ONLINE }, { 0x10760, 2, RI_ALL_ONLINE }, | 166 | { 0x10750, 2, RI_E1E1HE2_ONLINE }, { 0x10760, 2, RI_E1E1HE2_ONLINE }, |
123 | { 0x10770, 2, RI_ALL_ONLINE }, { 0x10780, 2, RI_ALL_ONLINE }, | 167 | { 0x10770, 2, RI_E1E1HE2_ONLINE }, { 0x10780, 2, RI_E1E1HE2_ONLINE }, |
124 | { 0x10790, 2, RI_ALL_ONLINE }, { 0x107a0, 2, RI_ALL_ONLINE }, | 168 | { 0x10790, 2, RI_ALL_ONLINE }, { 0x107a0, 2, RI_E1E1HE2_ONLINE }, |
125 | { 0x107b0, 2, RI_ALL_ONLINE }, { 0x107c0, 2, RI_ALL_ONLINE }, | 169 | { 0x107b0, 2, RI_E1E1HE2_ONLINE }, { 0x107c0, 2, RI_E1E1HE2_ONLINE }, |
126 | { 0x107d0, 2, RI_ALL_ONLINE }, { 0x107e0, 2, RI_ALL_ONLINE }, | 170 | { 0x107d0, 2, RI_E1E1HE2_ONLINE }, { 0x107e0, 2, RI_ALL_ONLINE }, |
127 | { 0x10880, 2, RI_ALL_ONLINE }, { 0x10900, 2, RI_ALL_ONLINE }, | 171 | { 0x10880, 2, RI_ALL_ONLINE }, { 0x10900, 2, RI_ALL_ONLINE }, |
128 | { 0x16000, 26, RI_E1HE2_ONLINE }, { 0x16070, 18, RI_E1HE2_ONLINE }, | 172 | { 0x16000, 1, RI_E1HE2_ONLINE }, { 0x16004, 25, RI_E1HE2E3_ONLINE }, |
129 | { 0x160c0, 27, RI_E1HE2_ONLINE }, { 0x16140, 1, RI_E1HE2_ONLINE }, | 173 | { 0x16070, 18, RI_E1HE2E3_ONLINE }, { 0x160c0, 7, RI_E1HE2E3_ONLINE }, |
130 | { 0x16160, 1, RI_E1HE2_ONLINE }, { 0x16180, 2, RI_E1HE2_ONLINE }, | 174 | { 0x160dc, 2, RI_E1HE2_ONLINE }, { 0x160e4, 10, RI_E1HE2E3_ONLINE }, |
131 | { 0x161c0, 2, RI_E1HE2_ONLINE }, { 0x16204, 5, RI_E1HE2_ONLINE }, | 175 | { 0x1610c, 2, RI_E1HE2_ONLINE }, { 0x16114, 6, RI_E1HE2E3_ONLINE }, |
132 | { 0x18000, 1, RI_E1HE2_ONLINE }, { 0x18008, 1, RI_E1HE2_ONLINE }, | 176 | { 0x16140, 48, RI_E1HE2E3_ONLINE }, { 0x16204, 5, RI_E1HE2E3_ONLINE }, |
133 | { 0x18010, 35, RI_E2_ONLINE }, { 0x180a4, 2, RI_E2_ONLINE }, | 177 | { 0x18000, 1, RI_E1HE2E3_ONLINE }, { 0x18008, 1, RI_E1HE2E3_ONLINE }, |
134 | { 0x180c0, 191, RI_E2_ONLINE }, { 0x18440, 1, RI_E2_ONLINE }, | 178 | { 0x18010, 35, RI_E2E3_ONLINE }, { 0x180a4, 2, RI_E2E3_ONLINE }, |
135 | { 0x18460, 1, RI_E2_ONLINE }, { 0x18480, 2, RI_E2_ONLINE }, | 179 | { 0x180c0, 109, RI_E2E3_ONLINE }, { 0x18274, 1, RI_E2_ONLINE }, |
136 | { 0x184c0, 2, RI_E2_ONLINE }, { 0x18500, 15, RI_E2_ONLINE }, | 180 | { 0x18278, 81, RI_E2E3_ONLINE }, { 0x18440, 63, RI_E2E3_ONLINE }, |
137 | { 0x20000, 24, RI_ALL_ONLINE }, { 0x20060, 8, RI_ALL_ONLINE }, | 181 | { 0x18570, 42, RI_E3_ONLINE }, { 0x20000, 24, RI_ALL_ONLINE }, |
138 | { 0x20080, 94, RI_ALL_ONLINE }, { 0x201f8, 1, RI_E1E1H_ONLINE }, | 182 | { 0x20060, 8, RI_ALL_ONLINE }, { 0x20080, 94, RI_ALL_ONLINE }, |
139 | { 0x201fc, 1, RI_ALL_ONLINE }, { 0x20200, 1, RI_E1E1H_ONLINE }, | 183 | { 0x201f8, 1, RI_E1E1H_ONLINE }, { 0x201fc, 1, RI_ALL_ONLINE }, |
140 | { 0x20204, 1, RI_ALL_ONLINE }, { 0x20208, 1, RI_E1E1H_ONLINE }, | 184 | { 0x20200, 1, RI_E1E1H_ONLINE }, { 0x20204, 1, RI_ALL_ONLINE }, |
141 | { 0x2020c, 39, RI_ALL_ONLINE }, { 0x202c8, 1, RI_E2_ONLINE }, | 185 | { 0x20208, 1, RI_E1E1H_ONLINE }, { 0x2020c, 39, RI_ALL_ONLINE }, |
142 | { 0x202d8, 4, RI_E2_ONLINE }, { 0x20400, 2, RI_ALL_ONLINE }, | 186 | { 0x202c8, 1, RI_E2E3_ONLINE }, { 0x202d8, 4, RI_E2E3_ONLINE }, |
143 | { 0x2040c, 8, RI_ALL_ONLINE }, { 0x2042c, 18, RI_E1HE2_ONLINE }, | 187 | { 0x20400, 2, RI_ALL_ONLINE }, { 0x2040c, 8, RI_ALL_ONLINE }, |
144 | { 0x20480, 1, RI_ALL_ONLINE }, { 0x20500, 1, RI_ALL_ONLINE }, | 188 | { 0x2042c, 18, RI_E1HE2E3_ONLINE }, { 0x20480, 1, RI_ALL_ONLINE }, |
145 | { 0x20600, 1, RI_ALL_ONLINE }, { 0x28000, 1, RI_ALL_ONLINE }, | 189 | { 0x20500, 1, RI_ALL_ONLINE }, { 0x20600, 1, RI_ALL_ONLINE }, |
146 | { 0x28004, 8191, RI_ALL_OFFLINE }, { 0x30000, 1, RI_ALL_ONLINE }, | 190 | { 0x28000, 1, RI_ALL_ONLINE }, { 0x28004, 8191, RI_ALL_OFFLINE }, |
147 | { 0x30004, 16383, RI_ALL_OFFLINE }, { 0x40000, 98, RI_ALL_ONLINE }, | 191 | { 0x30000, 1, RI_ALL_ONLINE }, { 0x30004, 16383, RI_ALL_OFFLINE }, |
148 | { 0x401a8, 8, RI_E1HE2_ONLINE }, { 0x401c8, 1, RI_E1H_ONLINE }, | 192 | { 0x40000, 98, RI_ALL_ONLINE }, { 0x401a8, 8, RI_E1HE2E3_ONLINE }, |
149 | { 0x401cc, 2, RI_E1HE2_ONLINE }, { 0x401d4, 2, RI_E2_ONLINE }, | 193 | { 0x401c8, 1, RI_E1H_ONLINE }, { 0x401cc, 2, RI_E1HE2E3_ONLINE }, |
150 | { 0x40200, 4, RI_ALL_ONLINE }, { 0x40220, 18, RI_E2_ONLINE }, | 194 | { 0x401d4, 2, RI_E2E3_ONLINE }, { 0x40200, 4, RI_ALL_ONLINE }, |
151 | { 0x40400, 43, RI_ALL_ONLINE }, { 0x404cc, 3, RI_E1HE2_ONLINE }, | 195 | { 0x40220, 18, RI_E2E3_ONLINE }, { 0x40268, 2, RI_E3_ONLINE }, |
152 | { 0x404e0, 1, RI_E2_ONLINE }, { 0x40500, 2, RI_ALL_ONLINE }, | 196 | { 0x40400, 43, RI_ALL_ONLINE }, { 0x404cc, 3, RI_E1HE2E3_ONLINE }, |
197 | { 0x404e0, 1, RI_E2E3_ONLINE }, { 0x40500, 2, RI_ALL_ONLINE }, | ||
153 | { 0x40510, 2, RI_ALL_ONLINE }, { 0x40520, 2, RI_ALL_ONLINE }, | 198 | { 0x40510, 2, RI_ALL_ONLINE }, { 0x40520, 2, RI_ALL_ONLINE }, |
154 | { 0x40530, 2, RI_ALL_ONLINE }, { 0x40540, 2, RI_ALL_ONLINE }, | 199 | { 0x40530, 2, RI_ALL_ONLINE }, { 0x40540, 2, RI_ALL_ONLINE }, |
155 | { 0x40550, 10, RI_E2_ONLINE }, { 0x40610, 2, RI_E2_ONLINE }, | 200 | { 0x40550, 10, RI_E2E3_ONLINE }, { 0x40610, 2, RI_E2E3_ONLINE }, |
156 | { 0x42000, 164, RI_ALL_ONLINE }, { 0x422c0, 4, RI_E2_ONLINE }, | 201 | { 0x42000, 164, RI_ALL_ONLINE }, { 0x422c0, 4, RI_E2E3_ONLINE }, |
157 | { 0x422d4, 5, RI_E1HE2_ONLINE }, { 0x422e8, 1, RI_E2_ONLINE }, | 202 | { 0x422d4, 5, RI_E1HE2E3_ONLINE }, { 0x422e8, 1, RI_E2E3_ONLINE }, |
158 | { 0x42400, 49, RI_ALL_ONLINE }, { 0x424c8, 38, RI_ALL_ONLINE }, | 203 | { 0x42400, 49, RI_ALL_ONLINE }, { 0x424c8, 38, RI_ALL_ONLINE }, |
159 | { 0x42568, 2, RI_ALL_ONLINE }, { 0x42640, 5, RI_E2_ONLINE }, | 204 | { 0x42568, 2, RI_ALL_ONLINE }, { 0x42640, 5, RI_E2E3_ONLINE }, |
160 | { 0x42800, 1, RI_ALL_ONLINE }, { 0x50000, 1, RI_ALL_ONLINE }, | 205 | { 0x42800, 1, RI_ALL_ONLINE }, { 0x50000, 1, RI_ALL_ONLINE }, |
161 | { 0x50004, 19, RI_ALL_ONLINE }, { 0x50050, 8, RI_ALL_ONLINE }, | 206 | { 0x50004, 19, RI_ALL_ONLINE }, { 0x50050, 8, RI_ALL_ONLINE }, |
162 | { 0x50070, 88, RI_ALL_ONLINE }, { 0x501f0, 4, RI_E1HE2_ONLINE }, | 207 | { 0x50070, 88, RI_ALL_ONLINE }, { 0x501f0, 4, RI_E1HE2E3_ONLINE }, |
163 | { 0x50200, 2, RI_ALL_ONLINE }, { 0x5020c, 7, RI_ALL_ONLINE }, | 208 | { 0x50200, 2, RI_ALL_ONLINE }, { 0x5020c, 7, RI_ALL_ONLINE }, |
164 | { 0x50228, 6, RI_E1HE2_ONLINE }, { 0x50240, 1, RI_ALL_ONLINE }, | 209 | { 0x50228, 6, RI_E1HE2E3_ONLINE }, { 0x50240, 1, RI_ALL_ONLINE }, |
165 | { 0x50280, 1, RI_ALL_ONLINE }, { 0x50300, 1, RI_E2_ONLINE }, | 210 | { 0x50280, 1, RI_ALL_ONLINE }, { 0x50300, 1, RI_E2E3_ONLINE }, |
166 | { 0x5030c, 1, RI_E2_ONLINE }, { 0x50318, 1, RI_E2_ONLINE }, | 211 | { 0x5030c, 1, RI_E2E3_ONLINE }, { 0x50318, 1, RI_E2E3_ONLINE }, |
167 | { 0x5031c, 1, RI_E2_ONLINE }, { 0x50320, 2, RI_E2_ONLINE }, | 212 | { 0x5031c, 1, RI_E2E3_ONLINE }, { 0x50320, 2, RI_E2E3_ONLINE }, |
168 | { 0x52000, 1, RI_ALL_ONLINE }, { 0x54000, 1, RI_ALL_ONLINE }, | 213 | { 0x52000, 1, RI_ALL_ONLINE }, { 0x54000, 1, RI_ALL_ONLINE }, |
169 | { 0x54004, 3327, RI_ALL_OFFLINE }, { 0x58000, 1, RI_ALL_ONLINE }, | 214 | { 0x54004, 3327, RI_ALL_OFFLINE }, { 0x58000, 1, RI_ALL_ONLINE }, |
170 | { 0x58004, 8191, RI_E1E1H_OFFLINE }, { 0x60000, 26, RI_ALL_ONLINE }, | 215 | { 0x58004, 8191, RI_E1E1H_OFFLINE }, { 0x60000, 26, RI_ALL_ONLINE }, |
171 | { 0x60068, 8, RI_E1E1H_ONLINE }, { 0x60088, 12, RI_ALL_ONLINE }, | 216 | { 0x60068, 8, RI_E1E1H_ONLINE }, { 0x60088, 12, RI_ALL_ONLINE }, |
172 | { 0x600b8, 9, RI_E1E1H_ONLINE }, { 0x600dc, 1, RI_ALL_ONLINE }, | 217 | { 0x600b8, 9, RI_E1E1H_ONLINE }, { 0x600dc, 1, RI_ALL_ONLINE }, |
173 | { 0x600e0, 5, RI_E1E1H_ONLINE }, { 0x600f4, 1, RI_ALL_ONLINE }, | 218 | { 0x600e0, 5, RI_E1E1H_ONLINE }, { 0x600f4, 1, RI_E1E1HE2_ONLINE }, |
174 | { 0x600f8, 1, RI_E1E1H_ONLINE }, { 0x600fc, 8, RI_ALL_ONLINE }, | 219 | { 0x600f8, 1, RI_E1E1H_ONLINE }, { 0x600fc, 8, RI_ALL_ONLINE }, |
175 | { 0x6013c, 24, RI_E1H_ONLINE }, { 0x6019c, 2, RI_E2_ONLINE }, | 220 | { 0x6013c, 24, RI_E1H_ONLINE }, { 0x6019c, 2, RI_E2E3_ONLINE }, |
176 | { 0x601ac, 18, RI_E2_ONLINE }, { 0x60200, 1, RI_ALL_ONLINE }, | 221 | { 0x601ac, 18, RI_E2E3_ONLINE }, { 0x60200, 1, RI_ALL_ONLINE }, |
177 | { 0x60204, 2, RI_ALL_OFFLINE }, { 0x60210, 13, RI_E2_ONLINE }, | 222 | { 0x60204, 2, RI_ALL_OFFLINE }, { 0x60210, 13, RI_E2E3_ONLINE }, |
178 | { 0x61000, 1, RI_ALL_ONLINE }, { 0x61004, 511, RI_ALL_OFFLINE }, | 223 | { 0x61000, 1, RI_ALL_ONLINE }, { 0x61004, 511, RI_ALL_OFFLINE }, |
179 | { 0x70000, 8, RI_ALL_ONLINE }, { 0x70020, 8184, RI_ALL_OFFLINE }, | 224 | { 0x61800, 512, RI_E3_OFFLINE }, { 0x70000, 8, RI_ALL_ONLINE }, |
225 | { 0x70020, 8184, RI_ALL_OFFLINE }, { 0x78000, 8192, RI_E3_OFFLINE }, | ||
180 | { 0x85000, 3, RI_ALL_ONLINE }, { 0x8501c, 7, RI_ALL_ONLINE }, | 226 | { 0x85000, 3, RI_ALL_ONLINE }, { 0x8501c, 7, RI_ALL_ONLINE }, |
181 | { 0x85048, 1, RI_ALL_ONLINE }, { 0x85200, 32, RI_ALL_ONLINE }, | 227 | { 0x85048, 1, RI_ALL_ONLINE }, { 0x85200, 32, RI_ALL_ONLINE }, |
182 | { 0xc1000, 7, RI_ALL_ONLINE }, { 0xc103c, 2, RI_E2_ONLINE }, | 228 | { 0xb0000, 16384, RI_E1H_ONLINE }, |
229 | { 0xc1000, 7, RI_ALL_ONLINE }, { 0xc103c, 2, RI_E2E3_ONLINE }, | ||
183 | { 0xc1800, 2, RI_ALL_ONLINE }, { 0xc2000, 164, RI_ALL_ONLINE }, | 230 | { 0xc1800, 2, RI_ALL_ONLINE }, { 0xc2000, 164, RI_ALL_ONLINE }, |
184 | { 0xc22c0, 5, RI_E2_ONLINE }, { 0xc22d8, 4, RI_E2_ONLINE }, | 231 | { 0xc22c0, 5, RI_E2E3_ONLINE }, { 0xc22d8, 4, RI_E2E3_ONLINE }, |
185 | { 0xc2400, 49, RI_ALL_ONLINE }, { 0xc24c8, 38, RI_ALL_ONLINE }, | 232 | { 0xc2400, 49, RI_ALL_ONLINE }, { 0xc24c8, 38, RI_ALL_ONLINE }, |
186 | { 0xc2568, 2, RI_ALL_ONLINE }, { 0xc2600, 1, RI_ALL_ONLINE }, | 233 | { 0xc2568, 2, RI_ALL_ONLINE }, { 0xc2600, 1, RI_ALL_ONLINE }, |
187 | { 0xc4000, 165, RI_ALL_ONLINE }, { 0xc42d8, 2, RI_E2_ONLINE }, | 234 | { 0xc4000, 165, RI_ALL_ONLINE }, { 0xc42d8, 2, RI_E2E3_ONLINE }, |
188 | { 0xc42e0, 7, RI_E1HE2_ONLINE }, { 0xc42fc, 1, RI_E2_ONLINE }, | 235 | { 0xc42e0, 7, RI_E1HE2E3_ONLINE }, { 0xc42fc, 1, RI_E2E3_ONLINE }, |
189 | { 0xc4400, 51, RI_ALL_ONLINE }, { 0xc44d0, 38, RI_ALL_ONLINE }, | 236 | { 0xc4400, 51, RI_ALL_ONLINE }, { 0xc44d0, 38, RI_ALL_ONLINE }, |
190 | { 0xc4570, 2, RI_ALL_ONLINE }, { 0xc4578, 5, RI_E2_ONLINE }, | 237 | { 0xc4570, 2, RI_ALL_ONLINE }, { 0xc4578, 5, RI_E2E3_ONLINE }, |
191 | { 0xc4600, 1, RI_ALL_ONLINE }, { 0xd0000, 19, RI_ALL_ONLINE }, | 238 | { 0xc4600, 1, RI_ALL_ONLINE }, { 0xd0000, 19, RI_ALL_ONLINE }, |
192 | { 0xd004c, 8, RI_ALL_ONLINE }, { 0xd006c, 91, RI_ALL_ONLINE }, | 239 | { 0xd004c, 8, RI_ALL_ONLINE }, { 0xd006c, 91, RI_ALL_ONLINE }, |
193 | { 0xd01fc, 1, RI_E2_ONLINE }, { 0xd0200, 2, RI_ALL_ONLINE }, | 240 | { 0xd01fc, 1, RI_E2E3_ONLINE }, { 0xd0200, 2, RI_ALL_ONLINE }, |
194 | { 0xd020c, 7, RI_ALL_ONLINE }, { 0xd0228, 18, RI_E1HE2_ONLINE }, | 241 | { 0xd020c, 7, RI_ALL_ONLINE }, { 0xd0228, 18, RI_E1HE2E3_ONLINE }, |
195 | { 0xd0280, 1, RI_ALL_ONLINE }, { 0xd0300, 1, RI_ALL_ONLINE }, | 242 | { 0xd0280, 1, RI_ALL_ONLINE }, { 0xd0300, 1, RI_ALL_ONLINE }, |
196 | { 0xd0400, 1, RI_ALL_ONLINE }, { 0xd4000, 1, RI_ALL_ONLINE }, | 243 | { 0xd0400, 1, RI_ALL_ONLINE }, { 0xd4000, 1, RI_ALL_ONLINE }, |
197 | { 0xd4004, 2559, RI_ALL_OFFLINE }, { 0xd8000, 1, RI_ALL_ONLINE }, | 244 | { 0xd4004, 2559, RI_ALL_OFFLINE }, { 0xd8000, 1, RI_ALL_ONLINE }, |
198 | { 0xd8004, 8191, RI_ALL_OFFLINE }, { 0xe0000, 21, RI_ALL_ONLINE }, | 245 | { 0xd8004, 8191, RI_ALL_OFFLINE }, { 0xe0000, 21, RI_ALL_ONLINE }, |
199 | { 0xe0054, 8, RI_ALL_ONLINE }, { 0xe0074, 49, RI_ALL_ONLINE }, | 246 | { 0xe0054, 8, RI_ALL_ONLINE }, { 0xe0074, 49, RI_ALL_ONLINE }, |
200 | { 0xe0138, 1, RI_E1E1H_ONLINE }, { 0xe013c, 35, RI_ALL_ONLINE }, | 247 | { 0xe0138, 1, RI_E1E1H_ONLINE }, { 0xe013c, 35, RI_ALL_ONLINE }, |
201 | { 0xe01f4, 2, RI_E2_ONLINE }, { 0xe0200, 2, RI_ALL_ONLINE }, | 248 | { 0xe01f4, 1, RI_E2_ONLINE }, { 0xe01f8, 1, RI_E2E3_ONLINE }, |
202 | { 0xe020c, 8, RI_ALL_ONLINE }, { 0xe022c, 18, RI_E1HE2_ONLINE }, | 249 | { 0xe0200, 2, RI_ALL_ONLINE }, { 0xe020c, 8, RI_ALL_ONLINE }, |
203 | { 0xe0280, 1, RI_ALL_ONLINE }, { 0xe0300, 1, RI_ALL_ONLINE }, | 250 | { 0xe022c, 18, RI_E1HE2E3_ONLINE }, { 0xe0280, 1, RI_ALL_ONLINE }, |
204 | { 0xe1000, 1, RI_ALL_ONLINE }, { 0xe2000, 1, RI_ALL_ONLINE }, | 251 | { 0xe0300, 1, RI_ALL_ONLINE }, { 0xe1000, 1, RI_ALL_ONLINE }, |
205 | { 0xe2004, 2047, RI_ALL_OFFLINE }, { 0xf0000, 1, RI_ALL_ONLINE }, | 252 | { 0xe2000, 1, RI_ALL_ONLINE }, { 0xe2004, 2047, RI_ALL_OFFLINE }, |
206 | { 0xf0004, 16383, RI_ALL_OFFLINE }, { 0x101000, 12, RI_ALL_ONLINE }, | 253 | { 0xf0000, 1, RI_ALL_ONLINE }, { 0xf0004, 16383, RI_ALL_OFFLINE }, |
207 | { 0x101050, 1, RI_E1HE2_ONLINE }, { 0x101054, 3, RI_E2_ONLINE }, | 254 | { 0x101000, 12, RI_ALL_ONLINE }, { 0x101050, 1, RI_E1HE2E3_ONLINE }, |
208 | { 0x101100, 1, RI_ALL_ONLINE }, { 0x101800, 8, RI_ALL_ONLINE }, | 255 | { 0x101054, 3, RI_E2E3_ONLINE }, { 0x101100, 1, RI_ALL_ONLINE }, |
209 | { 0x102000, 18, RI_ALL_ONLINE }, { 0x102068, 6, RI_E2_ONLINE }, | 256 | { 0x101800, 8, RI_ALL_ONLINE }, { 0x102000, 18, RI_ALL_ONLINE }, |
210 | { 0x102080, 17, RI_ALL_ONLINE }, { 0x1020c8, 8, RI_E1H_ONLINE }, | 257 | { 0x102068, 6, RI_E2E3_ONLINE }, { 0x102080, 17, RI_ALL_ONLINE }, |
211 | { 0x1020e8, 9, RI_E2_ONLINE }, { 0x102400, 1, RI_ALL_ONLINE }, | 258 | { 0x1020c8, 8, RI_E1H_ONLINE }, { 0x1020e8, 9, RI_E2E3_ONLINE }, |
212 | { 0x103000, 26, RI_ALL_ONLINE }, { 0x103098, 5, RI_E1HE2_ONLINE }, | 259 | { 0x102400, 1, RI_ALL_ONLINE }, { 0x103000, 26, RI_ALL_ONLINE }, |
213 | { 0x1030ac, 10, RI_E2_ONLINE }, { 0x1030d8, 8, RI_E2_ONLINE }, | 260 | { 0x103098, 5, RI_E1HE2E3_ONLINE }, { 0x1030ac, 2, RI_E2E3_ONLINE }, |
214 | { 0x103400, 1, RI_E2_ONLINE }, { 0x103404, 135, RI_E2_OFFLINE }, | 261 | { 0x1030b4, 1, RI_E2_ONLINE }, { 0x1030b8, 7, RI_E2E3_ONLINE }, |
215 | { 0x103800, 8, RI_ALL_ONLINE }, { 0x104000, 63, RI_ALL_ONLINE }, | 262 | { 0x1030d8, 8, RI_E2E3_ONLINE }, { 0x103400, 1, RI_E2E3_ONLINE }, |
216 | { 0x10411c, 16, RI_E2_ONLINE }, { 0x104200, 17, RI_ALL_ONLINE }, | 263 | { 0x103404, 135, RI_E2E3_OFFLINE }, { 0x103800, 8, RI_ALL_ONLINE }, |
217 | { 0x104400, 64, RI_ALL_ONLINE }, { 0x104500, 192, RI_ALL_OFFLINE }, | 264 | { 0x104000, 63, RI_ALL_ONLINE }, { 0x10411c, 16, RI_E2E3_ONLINE }, |
218 | { 0x104800, 64, RI_ALL_ONLINE }, { 0x104900, 192, RI_ALL_OFFLINE }, | 265 | { 0x104200, 17, RI_ALL_ONLINE }, { 0x104400, 64, RI_ALL_ONLINE }, |
219 | { 0x105000, 256, RI_ALL_ONLINE }, { 0x105400, 768, RI_ALL_OFFLINE }, | 266 | { 0x104500, 192, RI_ALL_OFFLINE }, { 0x104800, 64, RI_ALL_ONLINE }, |
220 | { 0x107000, 7, RI_E2_ONLINE }, { 0x108000, 33, RI_E1E1H_ONLINE }, | 267 | { 0x104900, 192, RI_ALL_OFFLINE }, { 0x105000, 256, RI_ALL_ONLINE }, |
268 | { 0x105400, 768, RI_ALL_OFFLINE }, { 0x107000, 7, RI_E2E3_ONLINE }, | ||
269 | { 0x10701c, 1, RI_E3_ONLINE }, { 0x108000, 33, RI_E1E1H_ONLINE }, | ||
221 | { 0x1080ac, 5, RI_E1H_ONLINE }, { 0x108100, 5, RI_E1E1H_ONLINE }, | 270 | { 0x1080ac, 5, RI_E1H_ONLINE }, { 0x108100, 5, RI_E1E1H_ONLINE }, |
222 | { 0x108120, 5, RI_E1E1H_ONLINE }, { 0x108200, 74, RI_E1E1H_ONLINE }, | 271 | { 0x108120, 5, RI_E1E1H_ONLINE }, { 0x108200, 74, RI_E1E1H_ONLINE }, |
223 | { 0x108400, 74, RI_E1E1H_ONLINE }, { 0x108800, 152, RI_E1E1H_ONLINE }, | 272 | { 0x108400, 74, RI_E1E1H_ONLINE }, { 0x108800, 152, RI_E1E1H_ONLINE }, |
224 | { 0x110000, 111, RI_E2_ONLINE }, { 0x110200, 4, RI_E2_ONLINE }, | 273 | { 0x110000, 111, RI_E2E3_ONLINE }, { 0x1101dc, 1, RI_E3_ONLINE }, |
225 | { 0x120000, 2, RI_ALL_ONLINE }, { 0x120008, 4, RI_ALL_ONLINE }, | 274 | { 0x110200, 4, RI_E2E3_ONLINE }, { 0x120000, 2, RI_ALL_ONLINE }, |
226 | { 0x120018, 3, RI_ALL_ONLINE }, { 0x120024, 4, RI_ALL_ONLINE }, | 275 | { 0x120008, 4, RI_ALL_ONLINE }, { 0x120018, 3, RI_ALL_ONLINE }, |
227 | { 0x120034, 3, RI_ALL_ONLINE }, { 0x120040, 4, RI_ALL_ONLINE }, | 276 | { 0x120024, 4, RI_ALL_ONLINE }, { 0x120034, 3, RI_ALL_ONLINE }, |
228 | { 0x120050, 3, RI_ALL_ONLINE }, { 0x12005c, 4, RI_ALL_ONLINE }, | 277 | { 0x120040, 4, RI_ALL_ONLINE }, { 0x120050, 3, RI_ALL_ONLINE }, |
229 | { 0x12006c, 3, RI_ALL_ONLINE }, { 0x120078, 4, RI_ALL_ONLINE }, | 278 | { 0x12005c, 4, RI_ALL_ONLINE }, { 0x12006c, 3, RI_ALL_ONLINE }, |
230 | { 0x120088, 3, RI_ALL_ONLINE }, { 0x120094, 4, RI_ALL_ONLINE }, | 279 | { 0x120078, 4, RI_ALL_ONLINE }, { 0x120088, 3, RI_ALL_ONLINE }, |
231 | { 0x1200a4, 3, RI_ALL_ONLINE }, { 0x1200b0, 4, RI_ALL_ONLINE }, | 280 | { 0x120094, 4, RI_ALL_ONLINE }, { 0x1200a4, 3, RI_ALL_ONLINE }, |
232 | { 0x1200c0, 3, RI_ALL_ONLINE }, { 0x1200cc, 4, RI_ALL_ONLINE }, | 281 | { 0x1200b0, 4, RI_ALL_ONLINE }, { 0x1200c0, 3, RI_ALL_ONLINE }, |
233 | { 0x1200dc, 3, RI_ALL_ONLINE }, { 0x1200e8, 4, RI_ALL_ONLINE }, | 282 | { 0x1200cc, 4, RI_ALL_ONLINE }, { 0x1200dc, 3, RI_ALL_ONLINE }, |
234 | { 0x1200f8, 3, RI_ALL_ONLINE }, { 0x120104, 4, RI_ALL_ONLINE }, | 283 | { 0x1200e8, 4, RI_ALL_ONLINE }, { 0x1200f8, 3, RI_ALL_ONLINE }, |
235 | { 0x120114, 1, RI_ALL_ONLINE }, { 0x120118, 22, RI_ALL_ONLINE }, | 284 | { 0x120104, 4, RI_ALL_ONLINE }, { 0x120114, 1, RI_ALL_ONLINE }, |
236 | { 0x120170, 2, RI_E1E1H_ONLINE }, { 0x120178, 243, RI_ALL_ONLINE }, | 285 | { 0x120118, 22, RI_ALL_ONLINE }, { 0x120170, 2, RI_E1E1H_ONLINE }, |
237 | { 0x120544, 4, RI_E1E1H_ONLINE }, { 0x120554, 7, RI_ALL_ONLINE }, | 286 | { 0x120178, 243, RI_ALL_ONLINE }, { 0x120544, 4, RI_E1E1H_ONLINE }, |
238 | { 0x12059c, 6, RI_E1HE2_ONLINE }, { 0x1205b4, 1, RI_E1HE2_ONLINE }, | 287 | { 0x120554, 6, RI_ALL_ONLINE }, { 0x12059c, 6, RI_E1HE2E3_ONLINE }, |
239 | { 0x1205b8, 16, RI_E1HE2_ONLINE }, { 0x1205f8, 4, RI_E2_ONLINE }, | 288 | { 0x1205b4, 1, RI_E1HE2E3_ONLINE }, { 0x1205b8, 15, RI_E1HE2E3_ONLINE }, |
240 | { 0x120618, 1, RI_E2_ONLINE }, { 0x12061c, 20, RI_E1HE2_ONLINE }, | 289 | { 0x1205f4, 1, RI_E1HE2_ONLINE }, { 0x1205f8, 4, RI_E2E3_ONLINE }, |
241 | { 0x12066c, 11, RI_E1HE2_ONLINE }, { 0x120698, 5, RI_E2_ONLINE }, | 290 | { 0x120618, 1, RI_E2E3_ONLINE }, { 0x12061c, 20, RI_E1HE2E3_ONLINE }, |
242 | { 0x1206b0, 76, RI_E2_ONLINE }, { 0x1207fc, 1, RI_E2_ONLINE }, | 291 | { 0x12066c, 11, RI_E1HE2E3_ONLINE }, { 0x120698, 3, RI_E2E3_ONLINE }, |
243 | { 0x120808, 66, RI_ALL_ONLINE }, { 0x120910, 7, RI_E2_ONLINE }, | 292 | { 0x1206a4, 1, RI_E2_ONLINE }, { 0x1206a8, 1, RI_E2E3_ONLINE }, |
244 | { 0x120930, 9, RI_E2_ONLINE }, { 0x120a00, 2, RI_ALL_ONLINE }, | 293 | { 0x1206b0, 75, RI_E2E3_ONLINE }, { 0x1207dc, 1, RI_E2_ONLINE }, |
245 | { 0x122000, 2, RI_ALL_ONLINE }, { 0x122008, 2046, RI_E1_OFFLINE }, | 294 | { 0x1207fc, 1, RI_E2E3_ONLINE }, { 0x12080c, 65, RI_ALL_ONLINE }, |
246 | { 0x128000, 2, RI_E1HE2_ONLINE }, { 0x128008, 6142, RI_E1HE2_OFFLINE }, | 295 | { 0x120910, 7, RI_E2E3_ONLINE }, { 0x120930, 9, RI_E2E3_ONLINE }, |
247 | { 0x130000, 35, RI_E2_ONLINE }, { 0x130100, 29, RI_E2_ONLINE }, | 296 | { 0x12095c, 37, RI_E3_ONLINE }, { 0x120a00, 2, RI_E1E1HE2_ONLINE }, |
248 | { 0x130180, 1, RI_E2_ONLINE }, { 0x130200, 1, RI_E2_ONLINE }, | 297 | { 0x120b00, 1, RI_E3_ONLINE }, { 0x122000, 2, RI_ALL_ONLINE }, |
249 | { 0x130280, 1, RI_E2_ONLINE }, { 0x130300, 5, RI_E2_ONLINE }, | 298 | { 0x122008, 2046, RI_E1_OFFLINE }, { 0x128000, 2, RI_E1HE2E3_ONLINE }, |
250 | { 0x130380, 1, RI_E2_ONLINE }, { 0x130400, 1, RI_E2_ONLINE }, | 299 | { 0x128008, 6142, RI_E1HE2E3_OFFLINE }, |
251 | { 0x130480, 5, RI_E2_ONLINE }, { 0x130800, 72, RI_E2_ONLINE }, | 300 | { 0x130000, 35, RI_E2E3_ONLINE }, |
252 | { 0x131000, 136, RI_E2_ONLINE }, { 0x132000, 148, RI_E2_ONLINE }, | 301 | { 0x130100, 29, RI_E2E3_ONLINE }, { 0x130180, 1, RI_E2E3_ONLINE }, |
253 | { 0x134000, 544, RI_E2_ONLINE }, { 0x140000, 64, RI_ALL_ONLINE }, | 302 | { 0x130200, 1, RI_E2E3_ONLINE }, { 0x130280, 1, RI_E2E3_ONLINE }, |
254 | { 0x140100, 5, RI_E1E1H_ONLINE }, { 0x140114, 45, RI_ALL_ONLINE }, | 303 | { 0x130300, 5, RI_E2E3_ONLINE }, { 0x130380, 1, RI_E2E3_ONLINE }, |
255 | { 0x140200, 6, RI_ALL_ONLINE }, { 0x140220, 4, RI_E2_ONLINE }, | 304 | { 0x130400, 1, RI_E2E3_ONLINE }, { 0x130480, 5, RI_E2E3_ONLINE }, |
256 | { 0x140240, 4, RI_E2_ONLINE }, { 0x140260, 4, RI_E2_ONLINE }, | 305 | { 0x130800, 72, RI_E2E3_ONLINE }, { 0x131000, 136, RI_E2E3_ONLINE }, |
257 | { 0x140280, 4, RI_E2_ONLINE }, { 0x1402a0, 4, RI_E2_ONLINE }, | 306 | { 0x132000, 148, RI_E2E3_ONLINE }, { 0x134000, 544, RI_E2E3_ONLINE }, |
258 | { 0x1402c0, 4, RI_E2_ONLINE }, { 0x1402e0, 13, RI_E2_ONLINE }, | 307 | { 0x140000, 64, RI_ALL_ONLINE }, { 0x140100, 5, RI_E1E1H_ONLINE }, |
259 | { 0x144000, 4, RI_E1E1H_ONLINE }, { 0x148000, 4, RI_E1E1H_ONLINE }, | 308 | { 0x140114, 45, RI_ALL_ONLINE }, { 0x140200, 6, RI_ALL_ONLINE }, |
260 | { 0x14c000, 4, RI_E1E1H_ONLINE }, { 0x150000, 4, RI_E1E1H_ONLINE }, | 309 | { 0x140220, 4, RI_E2E3_ONLINE }, { 0x140240, 4, RI_E2E3_ONLINE }, |
261 | { 0x154000, 4, RI_E1E1H_ONLINE }, { 0x158000, 4, RI_E1E1H_ONLINE }, | 310 | { 0x140260, 4, RI_E2E3_ONLINE }, { 0x140280, 4, RI_E2E3_ONLINE }, |
262 | { 0x15c000, 2, RI_E1HE2_ONLINE }, { 0x15c008, 5, RI_E1H_ONLINE }, | 311 | { 0x1402a0, 4, RI_E2E3_ONLINE }, { 0x1402c0, 4, RI_E2E3_ONLINE }, |
263 | { 0x15c020, 27, RI_E2_ONLINE }, { 0x15c090, 13, RI_E2_ONLINE }, | 312 | { 0x1402e0, 13, RI_E2E3_ONLINE }, { 0x144000, 4, RI_E1E1H_ONLINE }, |
264 | { 0x15c0c8, 34, RI_E2_ONLINE }, { 0x161000, 7, RI_ALL_ONLINE }, | 313 | { 0x148000, 4, RI_E1E1H_ONLINE }, { 0x14c000, 4, RI_E1E1H_ONLINE }, |
265 | { 0x16103c, 2, RI_E2_ONLINE }, { 0x161800, 2, RI_ALL_ONLINE }, | 314 | { 0x150000, 4, RI_E1E1H_ONLINE }, { 0x154000, 4, RI_E1E1H_ONLINE }, |
266 | { 0x164000, 60, RI_ALL_ONLINE }, { 0x164110, 2, RI_E1HE2_ONLINE }, | 315 | { 0x158000, 4, RI_E1E1H_ONLINE }, { 0x15c000, 2, RI_E1HE2E3_ONLINE }, |
267 | { 0x164118, 15, RI_E2_ONLINE }, { 0x164200, 1, RI_ALL_ONLINE }, | 316 | { 0x15c008, 5, RI_E1H_ONLINE }, { 0x15c020, 27, RI_E2E3_ONLINE }, |
317 | { 0x15c090, 13, RI_E2E3_ONLINE }, { 0x15c0c8, 34, RI_E2E3_ONLINE }, | ||
318 | { 0x15c150, 4, RI_E3_ONLINE }, { 0x160004, 6, RI_E3_ONLINE }, | ||
319 | { 0x160040, 6, RI_E3_ONLINE }, { 0x16005c, 6, RI_E3_ONLINE }, | ||
320 | { 0x160078, 2, RI_E3_ONLINE }, { 0x160300, 8, RI_E3_ONLINE }, | ||
321 | { 0x160330, 6, RI_E3_ONLINE }, { 0x160404, 6, RI_E3_ONLINE }, | ||
322 | { 0x160440, 6, RI_E3_ONLINE }, { 0x16045c, 6, RI_E3_ONLINE }, | ||
323 | { 0x160478, 2, RI_E3_ONLINE }, { 0x160700, 8, RI_E3_ONLINE }, | ||
324 | { 0x160730, 6, RI_E3_ONLINE }, { 0x161000, 7, RI_ALL_ONLINE }, | ||
325 | { 0x16103c, 2, RI_E2E3_ONLINE }, { 0x161800, 2, RI_ALL_ONLINE }, | ||
326 | { 0x162000, 54, RI_E3_ONLINE }, { 0x162200, 60, RI_E3_ONLINE }, | ||
327 | { 0x162400, 54, RI_E3_ONLINE }, { 0x162600, 60, RI_E3_ONLINE }, | ||
328 | { 0x162800, 54, RI_E3_ONLINE }, { 0x162a00, 60, RI_E3_ONLINE }, | ||
329 | { 0x162c00, 54, RI_E3_ONLINE }, { 0x162e00, 60, RI_E3_ONLINE }, | ||
330 | { 0x163000, 1, RI_E3_ONLINE }, { 0x163008, 1, RI_E3_ONLINE }, | ||
331 | { 0x163010, 1, RI_E3_ONLINE }, { 0x163018, 1, RI_E3_ONLINE }, | ||
332 | { 0x163020, 5, RI_E3_ONLINE }, { 0x163038, 3, RI_E3_ONLINE }, | ||
333 | { 0x163048, 3, RI_E3_ONLINE }, { 0x163058, 1, RI_E3_ONLINE }, | ||
334 | { 0x163060, 1, RI_E3_ONLINE }, { 0x163068, 1, RI_E3_ONLINE }, | ||
335 | { 0x163070, 3, RI_E3_ONLINE }, { 0x163080, 1, RI_E3_ONLINE }, | ||
336 | { 0x163088, 3, RI_E3_ONLINE }, { 0x163098, 1, RI_E3_ONLINE }, | ||
337 | { 0x1630a0, 1, RI_E3_ONLINE }, { 0x1630a8, 1, RI_E3_ONLINE }, | ||
338 | { 0x1630c0, 1, RI_E3_ONLINE }, { 0x1630c8, 1, RI_E3_ONLINE }, | ||
339 | { 0x1630d0, 1, RI_E3_ONLINE }, { 0x1630d8, 1, RI_E3_ONLINE }, | ||
340 | { 0x1630e0, 2, RI_E3_ONLINE }, { 0x163110, 1, RI_E3_ONLINE }, | ||
341 | { 0x163120, 2, RI_E3_ONLINE }, { 0x163420, 4, RI_E3_ONLINE }, | ||
342 | { 0x163438, 2, RI_E3_ONLINE }, { 0x163488, 2, RI_E3_ONLINE }, | ||
343 | { 0x163520, 2, RI_E3_ONLINE }, { 0x163800, 1, RI_E3_ONLINE }, | ||
344 | { 0x163808, 1, RI_E3_ONLINE }, { 0x163810, 1, RI_E3_ONLINE }, | ||
345 | { 0x163818, 1, RI_E3_ONLINE }, { 0x163820, 5, RI_E3_ONLINE }, | ||
346 | { 0x163838, 3, RI_E3_ONLINE }, { 0x163848, 3, RI_E3_ONLINE }, | ||
347 | { 0x163858, 1, RI_E3_ONLINE }, { 0x163860, 1, RI_E3_ONLINE }, | ||
348 | { 0x163868, 1, RI_E3_ONLINE }, { 0x163870, 3, RI_E3_ONLINE }, | ||
349 | { 0x163880, 1, RI_E3_ONLINE }, { 0x163888, 3, RI_E3_ONLINE }, | ||
350 | { 0x163898, 1, RI_E3_ONLINE }, { 0x1638a0, 1, RI_E3_ONLINE }, | ||
351 | { 0x1638a8, 1, RI_E3_ONLINE }, { 0x1638c0, 1, RI_E3_ONLINE }, | ||
352 | { 0x1638c8, 1, RI_E3_ONLINE }, { 0x1638d0, 1, RI_E3_ONLINE }, | ||
353 | { 0x1638d8, 1, RI_E3_ONLINE }, { 0x1638e0, 2, RI_E3_ONLINE }, | ||
354 | { 0x163910, 1, RI_E3_ONLINE }, { 0x163920, 2, RI_E3_ONLINE }, | ||
355 | { 0x163c20, 4, RI_E3_ONLINE }, { 0x163c38, 2, RI_E3_ONLINE }, | ||
356 | { 0x163c88, 2, RI_E3_ONLINE }, { 0x163d20, 2, RI_E3_ONLINE }, | ||
357 | { 0x164000, 60, RI_ALL_ONLINE }, { 0x164110, 2, RI_E1HE2E3_ONLINE }, | ||
358 | { 0x164118, 15, RI_E2E3_ONLINE }, { 0x164200, 1, RI_ALL_ONLINE }, | ||
268 | { 0x164208, 1, RI_ALL_ONLINE }, { 0x164210, 1, RI_ALL_ONLINE }, | 359 | { 0x164208, 1, RI_ALL_ONLINE }, { 0x164210, 1, RI_ALL_ONLINE }, |
269 | { 0x164218, 1, RI_ALL_ONLINE }, { 0x164220, 1, RI_ALL_ONLINE }, | 360 | { 0x164218, 1, RI_ALL_ONLINE }, { 0x164220, 1, RI_ALL_ONLINE }, |
270 | { 0x164228, 1, RI_ALL_ONLINE }, { 0x164230, 1, RI_ALL_ONLINE }, | 361 | { 0x164228, 1, RI_ALL_ONLINE }, { 0x164230, 1, RI_ALL_ONLINE }, |
@@ -273,9 +364,9 @@ static const struct reg_addr reg_addrs[REGS_COUNT] = { | |||
273 | { 0x164258, 1, RI_ALL_ONLINE }, { 0x164260, 1, RI_ALL_ONLINE }, | 364 | { 0x164258, 1, RI_ALL_ONLINE }, { 0x164260, 1, RI_ALL_ONLINE }, |
274 | { 0x164270, 2, RI_ALL_ONLINE }, { 0x164280, 2, RI_ALL_ONLINE }, | 365 | { 0x164270, 2, RI_ALL_ONLINE }, { 0x164280, 2, RI_ALL_ONLINE }, |
275 | { 0x164800, 2, RI_ALL_ONLINE }, { 0x165000, 2, RI_ALL_ONLINE }, | 366 | { 0x164800, 2, RI_ALL_ONLINE }, { 0x165000, 2, RI_ALL_ONLINE }, |
276 | { 0x166000, 164, RI_ALL_ONLINE }, { 0x1662cc, 7, RI_E2_ONLINE }, | 367 | { 0x166000, 164, RI_ALL_ONLINE }, { 0x1662cc, 7, RI_E2E3_ONLINE }, |
277 | { 0x166400, 49, RI_ALL_ONLINE }, { 0x1664c8, 38, RI_ALL_ONLINE }, | 368 | { 0x166400, 49, RI_ALL_ONLINE }, { 0x1664c8, 38, RI_ALL_ONLINE }, |
278 | { 0x166568, 2, RI_ALL_ONLINE }, { 0x166570, 5, RI_E2_ONLINE }, | 369 | { 0x166568, 2, RI_ALL_ONLINE }, { 0x166570, 5, RI_E2E3_ONLINE }, |
279 | { 0x166800, 1, RI_ALL_ONLINE }, { 0x168000, 137, RI_ALL_ONLINE }, | 370 | { 0x166800, 1, RI_ALL_ONLINE }, { 0x168000, 137, RI_ALL_ONLINE }, |
280 | { 0x168224, 2, RI_E1E1H_ONLINE }, { 0x16822c, 29, RI_ALL_ONLINE }, | 371 | { 0x168224, 2, RI_E1E1H_ONLINE }, { 0x16822c, 29, RI_ALL_ONLINE }, |
281 | { 0x1682a0, 12, RI_E1E1H_ONLINE }, { 0x1682d0, 12, RI_ALL_ONLINE }, | 372 | { 0x1682a0, 12, RI_E1E1H_ONLINE }, { 0x1682d0, 12, RI_ALL_ONLINE }, |
@@ -285,89 +376,94 @@ static const struct reg_addr reg_addrs[REGS_COUNT] = { | |||
285 | { 0x168a00, 128, RI_ALL_ONLINE }, { 0x16a000, 1, RI_ALL_ONLINE }, | 376 | { 0x168a00, 128, RI_ALL_ONLINE }, { 0x16a000, 1, RI_ALL_ONLINE }, |
286 | { 0x16a004, 1535, RI_ALL_OFFLINE }, { 0x16c000, 1, RI_ALL_ONLINE }, | 377 | { 0x16a004, 1535, RI_ALL_OFFLINE }, { 0x16c000, 1, RI_ALL_ONLINE }, |
287 | { 0x16c004, 1535, RI_ALL_OFFLINE }, { 0x16e000, 16, RI_E1H_ONLINE }, | 378 | { 0x16c004, 1535, RI_ALL_OFFLINE }, { 0x16e000, 16, RI_E1H_ONLINE }, |
288 | { 0x16e040, 8, RI_E2_ONLINE }, { 0x16e100, 1, RI_E1H_ONLINE }, | 379 | { 0x16e040, 8, RI_E2E3_ONLINE }, { 0x16e100, 1, RI_E1H_ONLINE }, |
289 | { 0x16e200, 2, RI_E1H_ONLINE }, { 0x16e400, 161, RI_E1H_ONLINE }, | 380 | { 0x16e200, 2, RI_E1H_ONLINE }, { 0x16e400, 161, RI_E1H_ONLINE }, |
290 | { 0x16e684, 2, RI_E1HE2_ONLINE }, { 0x16e68c, 12, RI_E1H_ONLINE }, | 381 | { 0x16e684, 2, RI_E1HE2E3_ONLINE }, { 0x16e68c, 12, RI_E1H_ONLINE }, |
291 | { 0x16e6bc, 4, RI_E1HE2_ONLINE }, { 0x16e6cc, 4, RI_E1H_ONLINE }, | 382 | { 0x16e6bc, 4, RI_E1HE2E3_ONLINE }, { 0x16e6cc, 4, RI_E1H_ONLINE }, |
292 | { 0x16e6e0, 12, RI_E2_ONLINE }, { 0x16e768, 17, RI_E2_ONLINE }, | 383 | { 0x16e6e0, 12, RI_E2E3_ONLINE }, { 0x16e768, 17, RI_E2E3_ONLINE }, |
293 | { 0x170000, 24, RI_ALL_ONLINE }, { 0x170060, 4, RI_E1E1H_ONLINE }, | 384 | { 0x170000, 24, RI_ALL_ONLINE }, { 0x170060, 4, RI_E1E1H_ONLINE }, |
294 | { 0x170070, 65, RI_ALL_ONLINE }, { 0x170194, 11, RI_E2_ONLINE }, | 385 | { 0x170070, 65, RI_ALL_ONLINE }, { 0x170194, 11, RI_E2E3_ONLINE }, |
295 | { 0x1701c4, 1, RI_E2_ONLINE }, { 0x1701cc, 7, RI_E2_ONLINE }, | 386 | { 0x1701c4, 1, RI_E2E3_ONLINE }, { 0x1701cc, 7, RI_E2E3_ONLINE }, |
296 | { 0x1701ec, 1, RI_E2_ONLINE }, { 0x1701f4, 1, RI_E2_ONLINE }, | 387 | { 0x1701e8, 1, RI_E3_ONLINE }, { 0x1701ec, 1, RI_E2E3_ONLINE }, |
297 | { 0x170200, 4, RI_ALL_ONLINE }, { 0x170214, 1, RI_ALL_ONLINE }, | 388 | { 0x1701f4, 1, RI_E2E3_ONLINE }, { 0x170200, 4, RI_ALL_ONLINE }, |
298 | { 0x170218, 77, RI_E2_ONLINE }, { 0x170400, 64, RI_E2_ONLINE }, | 389 | { 0x170214, 1, RI_ALL_ONLINE }, { 0x170218, 77, RI_E2E3_ONLINE }, |
299 | { 0x178000, 1, RI_ALL_ONLINE }, { 0x180000, 61, RI_ALL_ONLINE }, | 390 | { 0x170400, 64, RI_E2E3_ONLINE }, { 0x178000, 1, RI_ALL_ONLINE }, |
300 | { 0x18013c, 2, RI_E1HE2_ONLINE }, { 0x180200, 58, RI_ALL_ONLINE }, | 391 | { 0x180000, 61, RI_ALL_ONLINE }, { 0x18013c, 2, RI_E1HE2E3_ONLINE }, |
301 | { 0x180340, 4, RI_ALL_ONLINE }, { 0x180380, 1, RI_E2_ONLINE }, | 392 | { 0x180200, 58, RI_ALL_ONLINE }, { 0x180340, 4, RI_ALL_ONLINE }, |
302 | { 0x180388, 1, RI_E2_ONLINE }, { 0x180390, 1, RI_E2_ONLINE }, | 393 | { 0x180380, 1, RI_E2E3_ONLINE }, { 0x180388, 1, RI_E2E3_ONLINE }, |
303 | { 0x180398, 1, RI_E2_ONLINE }, { 0x1803a0, 5, RI_E2_ONLINE }, | 394 | { 0x180390, 1, RI_E2E3_ONLINE }, { 0x180398, 1, RI_E2E3_ONLINE }, |
395 | { 0x1803a0, 5, RI_E2E3_ONLINE }, { 0x1803b4, 2, RI_E3_ONLINE }, | ||
304 | { 0x180400, 1, RI_ALL_ONLINE }, { 0x180404, 255, RI_E1E1H_OFFLINE }, | 396 | { 0x180400, 1, RI_ALL_ONLINE }, { 0x180404, 255, RI_E1E1H_OFFLINE }, |
305 | { 0x181000, 4, RI_ALL_ONLINE }, { 0x181010, 1020, RI_ALL_OFFLINE }, | 397 | { 0x181000, 4, RI_ALL_ONLINE }, { 0x181010, 1020, RI_ALL_OFFLINE }, |
306 | { 0x1a0000, 1, RI_ALL_ONLINE }, { 0x1a0004, 5631, RI_ALL_OFFLINE }, | 398 | { 0x182000, 4, RI_E3_ONLINE }, { 0x1a0000, 1, RI_ALL_ONLINE }, |
307 | { 0x1a5800, 2560, RI_E1HE2_OFFLINE }, { 0x1a8000, 1, RI_ALL_ONLINE }, | 399 | { 0x1a0004, 5631, RI_ALL_OFFLINE }, |
308 | { 0x1a8004, 8191, RI_E1HE2_OFFLINE }, { 0x1b0000, 1, RI_ALL_ONLINE }, | 400 | { 0x1a5800, 2560, RI_E1HE2E3_OFFLINE }, |
309 | { 0x1b0004, 15, RI_E1H_OFFLINE }, { 0x1b0040, 1, RI_E1HE2_ONLINE }, | 401 | { 0x1a8000, 1, RI_ALL_ONLINE }, { 0x1a8004, 8191, RI_E1HE2E3_OFFLINE }, |
310 | { 0x1b0044, 239, RI_E1H_OFFLINE }, { 0x1b0400, 1, RI_ALL_ONLINE }, | 402 | { 0x1b0000, 1, RI_ALL_ONLINE }, { 0x1b0004, 15, RI_E1H_OFFLINE }, |
311 | { 0x1b0404, 255, RI_E1H_OFFLINE }, { 0x1b0800, 1, RI_ALL_ONLINE }, | 403 | { 0x1b0040, 1, RI_E1HE2E3_ONLINE }, { 0x1b0044, 239, RI_E1H_OFFLINE }, |
312 | { 0x1b0840, 1, RI_E1HE2_ONLINE }, { 0x1b0c00, 1, RI_ALL_ONLINE }, | 404 | { 0x1b0400, 1, RI_ALL_ONLINE }, { 0x1b0404, 255, RI_E1H_OFFLINE }, |
313 | { 0x1b1000, 1, RI_ALL_ONLINE }, { 0x1b1040, 1, RI_E1HE2_ONLINE }, | 405 | { 0x1b0800, 1, RI_ALL_ONLINE }, { 0x1b0840, 1, RI_E1HE2E3_ONLINE }, |
314 | { 0x1b1400, 1, RI_ALL_ONLINE }, { 0x1b1440, 1, RI_E1HE2_ONLINE }, | 406 | { 0x1b0c00, 1, RI_ALL_ONLINE }, { 0x1b1000, 1, RI_ALL_ONLINE }, |
315 | { 0x1b1480, 1, RI_E1HE2_ONLINE }, { 0x1b14c0, 1, RI_E1HE2_ONLINE }, | 407 | { 0x1b1040, 1, RI_E1HE2E3_ONLINE }, { 0x1b1400, 1, RI_ALL_ONLINE }, |
316 | { 0x1b1800, 128, RI_ALL_OFFLINE }, { 0x1b1c00, 128, RI_ALL_OFFLINE }, | 408 | { 0x1b1440, 1, RI_E1HE2E3_ONLINE }, { 0x1b1480, 1, RI_E1HE2E3_ONLINE }, |
317 | { 0x1b2000, 1, RI_ALL_ONLINE }, { 0x1b2400, 1, RI_E1HE2_ONLINE }, | 409 | { 0x1b14c0, 1, RI_E1HE2E3_ONLINE }, { 0x1b1800, 128, RI_ALL_OFFLINE }, |
318 | { 0x1b2404, 5631, RI_E2_OFFLINE }, { 0x1b8000, 1, RI_ALL_ONLINE }, | 410 | { 0x1b1c00, 128, RI_ALL_OFFLINE }, { 0x1b2000, 1, RI_ALL_ONLINE }, |
319 | { 0x1b8040, 1, RI_ALL_ONLINE }, { 0x1b8080, 1, RI_ALL_ONLINE }, | 411 | { 0x1b2400, 1, RI_E1HE2E3_ONLINE }, { 0x1b2404, 5631, RI_E2E3_OFFLINE }, |
320 | { 0x1b80c0, 1, RI_ALL_ONLINE }, { 0x1b8100, 1, RI_ALL_ONLINE }, | 412 | { 0x1b8000, 1, RI_ALL_ONLINE }, { 0x1b8040, 1, RI_ALL_ONLINE }, |
321 | { 0x1b8140, 1, RI_ALL_ONLINE }, { 0x1b8180, 1, RI_ALL_ONLINE }, | 413 | { 0x1b8080, 1, RI_ALL_ONLINE }, { 0x1b80c0, 1, RI_ALL_ONLINE }, |
322 | { 0x1b81c0, 1, RI_ALL_ONLINE }, { 0x1b8200, 1, RI_ALL_ONLINE }, | 414 | { 0x1b8100, 1, RI_ALL_ONLINE }, { 0x1b8140, 1, RI_ALL_ONLINE }, |
323 | { 0x1b8240, 1, RI_ALL_ONLINE }, { 0x1b8280, 1, RI_ALL_ONLINE }, | 415 | { 0x1b8180, 1, RI_ALL_ONLINE }, { 0x1b81c0, 1, RI_ALL_ONLINE }, |
324 | { 0x1b82c0, 1, RI_ALL_ONLINE }, { 0x1b8300, 1, RI_ALL_ONLINE }, | 416 | { 0x1b8200, 1, RI_ALL_ONLINE }, { 0x1b8240, 1, RI_ALL_ONLINE }, |
325 | { 0x1b8340, 1, RI_ALL_ONLINE }, { 0x1b8380, 1, RI_ALL_ONLINE }, | 417 | { 0x1b8280, 1, RI_ALL_ONLINE }, { 0x1b82c0, 1, RI_ALL_ONLINE }, |
326 | { 0x1b83c0, 1, RI_ALL_ONLINE }, { 0x1b8400, 1, RI_ALL_ONLINE }, | 418 | { 0x1b8300, 1, RI_ALL_ONLINE }, { 0x1b8340, 1, RI_ALL_ONLINE }, |
327 | { 0x1b8440, 1, RI_ALL_ONLINE }, { 0x1b8480, 1, RI_ALL_ONLINE }, | 419 | { 0x1b8380, 1, RI_ALL_ONLINE }, { 0x1b83c0, 1, RI_ALL_ONLINE }, |
328 | { 0x1b84c0, 1, RI_ALL_ONLINE }, { 0x1b8500, 1, RI_ALL_ONLINE }, | 420 | { 0x1b8400, 1, RI_ALL_ONLINE }, { 0x1b8440, 1, RI_ALL_ONLINE }, |
329 | { 0x1b8540, 1, RI_ALL_ONLINE }, { 0x1b8580, 1, RI_ALL_ONLINE }, | 421 | { 0x1b8480, 1, RI_ALL_ONLINE }, { 0x1b84c0, 1, RI_ALL_ONLINE }, |
330 | { 0x1b85c0, 19, RI_E2_ONLINE }, { 0x1b8800, 1, RI_ALL_ONLINE }, | 422 | { 0x1b8500, 1, RI_ALL_ONLINE }, { 0x1b8540, 1, RI_ALL_ONLINE }, |
331 | { 0x1b8840, 1, RI_ALL_ONLINE }, { 0x1b8880, 1, RI_ALL_ONLINE }, | 423 | { 0x1b8580, 1, RI_ALL_ONLINE }, { 0x1b85c0, 19, RI_E2E3_ONLINE }, |
332 | { 0x1b88c0, 1, RI_ALL_ONLINE }, { 0x1b8900, 1, RI_ALL_ONLINE }, | 424 | { 0x1b8800, 1, RI_ALL_ONLINE }, { 0x1b8840, 1, RI_ALL_ONLINE }, |
333 | { 0x1b8940, 1, RI_ALL_ONLINE }, { 0x1b8980, 1, RI_ALL_ONLINE }, | 425 | { 0x1b8880, 1, RI_ALL_ONLINE }, { 0x1b88c0, 1, RI_ALL_ONLINE }, |
334 | { 0x1b89c0, 1, RI_ALL_ONLINE }, { 0x1b8a00, 1, RI_ALL_ONLINE }, | 426 | { 0x1b8900, 1, RI_ALL_ONLINE }, { 0x1b8940, 1, RI_ALL_ONLINE }, |
335 | { 0x1b8a40, 1, RI_ALL_ONLINE }, { 0x1b8a80, 1, RI_ALL_ONLINE }, | 427 | { 0x1b8980, 1, RI_ALL_ONLINE }, { 0x1b89c0, 1, RI_ALL_ONLINE }, |
336 | { 0x1b8ac0, 1, RI_ALL_ONLINE }, { 0x1b8b00, 1, RI_ALL_ONLINE }, | 428 | { 0x1b8a00, 1, RI_ALL_ONLINE }, { 0x1b8a40, 1, RI_ALL_ONLINE }, |
337 | { 0x1b8b40, 1, RI_ALL_ONLINE }, { 0x1b8b80, 1, RI_ALL_ONLINE }, | 429 | { 0x1b8a80, 1, RI_ALL_ONLINE }, { 0x1b8ac0, 1, RI_ALL_ONLINE }, |
338 | { 0x1b8bc0, 1, RI_ALL_ONLINE }, { 0x1b8c00, 1, RI_ALL_ONLINE }, | 430 | { 0x1b8b00, 1, RI_ALL_ONLINE }, { 0x1b8b40, 1, RI_ALL_ONLINE }, |
339 | { 0x1b8c40, 1, RI_ALL_ONLINE }, { 0x1b8c80, 1, RI_ALL_ONLINE }, | 431 | { 0x1b8b80, 1, RI_ALL_ONLINE }, { 0x1b8bc0, 1, RI_ALL_ONLINE }, |
340 | { 0x1b8cc0, 1, RI_ALL_ONLINE }, { 0x1b8cc4, 1, RI_E2_ONLINE }, | 432 | { 0x1b8c00, 1, RI_ALL_ONLINE }, { 0x1b8c40, 1, RI_ALL_ONLINE }, |
341 | { 0x1b8d00, 1, RI_ALL_ONLINE }, { 0x1b8d40, 1, RI_ALL_ONLINE }, | 433 | { 0x1b8c80, 1, RI_ALL_ONLINE }, { 0x1b8cc0, 1, RI_ALL_ONLINE }, |
342 | { 0x1b8d80, 1, RI_ALL_ONLINE }, { 0x1b8dc0, 1, RI_ALL_ONLINE }, | 434 | { 0x1b8cc4, 1, RI_E2E3_ONLINE }, { 0x1b8d00, 1, RI_ALL_ONLINE }, |
343 | { 0x1b8e00, 1, RI_ALL_ONLINE }, { 0x1b8e40, 1, RI_ALL_ONLINE }, | 435 | { 0x1b8d40, 1, RI_ALL_ONLINE }, { 0x1b8d80, 1, RI_ALL_ONLINE }, |
344 | { 0x1b8e80, 1, RI_ALL_ONLINE }, { 0x1b8e84, 1, RI_E2_ONLINE }, | 436 | { 0x1b8dc0, 1, RI_ALL_ONLINE }, { 0x1b8e00, 1, RI_ALL_ONLINE }, |
345 | { 0x1b8ec0, 1, RI_E1HE2_ONLINE }, { 0x1b8f00, 1, RI_E1HE2_ONLINE }, | 437 | { 0x1b8e40, 1, RI_ALL_ONLINE }, { 0x1b8e80, 1, RI_ALL_ONLINE }, |
346 | { 0x1b8f40, 1, RI_E1HE2_ONLINE }, { 0x1b8f80, 1, RI_E1HE2_ONLINE }, | 438 | { 0x1b8e84, 1, RI_E2E3_ONLINE }, { 0x1b8ec0, 1, RI_E1HE2E3_ONLINE }, |
347 | { 0x1b8fc0, 1, RI_E1HE2_ONLINE }, { 0x1b8fc4, 2, RI_E2_ONLINE }, | 439 | { 0x1b8f00, 1, RI_E1HE2E3_ONLINE }, { 0x1b8f40, 1, RI_E1HE2E3_ONLINE }, |
348 | { 0x1b8fd0, 6, RI_E2_ONLINE }, { 0x1b9000, 1, RI_E2_ONLINE }, | 440 | { 0x1b8f80, 1, RI_E1HE2E3_ONLINE }, { 0x1b8fc0, 1, RI_E1HE2E3_ONLINE }, |
349 | { 0x1b9040, 3, RI_E2_ONLINE }, { 0x1b9400, 14, RI_E2_ONLINE }, | 441 | { 0x1b8fc4, 2, RI_E2E3_ONLINE }, { 0x1b8fd0, 6, RI_E2E3_ONLINE }, |
350 | { 0x1b943c, 19, RI_E2_ONLINE }, { 0x1b9490, 10, RI_E2_ONLINE }, | 442 | { 0x1b8fe8, 2, RI_E3_ONLINE }, { 0x1b9000, 1, RI_E2E3_ONLINE }, |
351 | { 0x1c0000, 2, RI_ALL_ONLINE }, { 0x200000, 65, RI_ALL_ONLINE }, | 443 | { 0x1b9040, 3, RI_E2E3_ONLINE }, { 0x1b905c, 1, RI_E3_ONLINE }, |
352 | { 0x20014c, 2, RI_E1HE2_ONLINE }, { 0x200200, 58, RI_ALL_ONLINE }, | 444 | { 0x1b9400, 14, RI_E2E3_ONLINE }, { 0x1b943c, 19, RI_E2E3_ONLINE }, |
353 | { 0x200340, 4, RI_ALL_ONLINE }, { 0x200380, 1, RI_E2_ONLINE }, | 445 | { 0x1b9490, 10, RI_E2E3_ONLINE }, { 0x1c0000, 2, RI_ALL_ONLINE }, |
354 | { 0x200388, 1, RI_E2_ONLINE }, { 0x200390, 1, RI_E2_ONLINE }, | 446 | { 0x200000, 65, RI_ALL_ONLINE }, { 0x20014c, 2, RI_E1HE2E3_ONLINE }, |
355 | { 0x200398, 1, RI_E2_ONLINE }, { 0x2003a0, 1, RI_E2_ONLINE }, | 447 | { 0x200200, 58, RI_ALL_ONLINE }, { 0x200340, 4, RI_ALL_ONLINE }, |
356 | { 0x2003a8, 2, RI_E2_ONLINE }, { 0x200400, 1, RI_ALL_ONLINE }, | 448 | { 0x200380, 1, RI_E2E3_ONLINE }, { 0x200388, 1, RI_E2E3_ONLINE }, |
357 | { 0x200404, 255, RI_E1E1H_OFFLINE }, { 0x202000, 4, RI_ALL_ONLINE }, | 449 | { 0x200390, 1, RI_E2E3_ONLINE }, { 0x200398, 1, RI_E2E3_ONLINE }, |
358 | { 0x202010, 2044, RI_ALL_OFFLINE }, { 0x220000, 1, RI_ALL_ONLINE }, | 450 | { 0x2003a0, 1, RI_E2E3_ONLINE }, { 0x2003a8, 2, RI_E2E3_ONLINE }, |
359 | { 0x220004, 5631, RI_ALL_OFFLINE }, { 0x225800, 2560, RI_E1HE2_OFFLINE}, | 451 | { 0x200400, 1, RI_ALL_ONLINE }, { 0x200404, 255, RI_E1E1H_OFFLINE }, |
360 | { 0x228000, 1, RI_ALL_ONLINE }, { 0x228004, 8191, RI_E1HE2_OFFLINE }, | 452 | { 0x202000, 4, RI_ALL_ONLINE }, { 0x202010, 2044, RI_ALL_OFFLINE }, |
453 | { 0x204000, 4, RI_E3_ONLINE }, { 0x220000, 1, RI_ALL_ONLINE }, | ||
454 | { 0x220004, 5631, RI_ALL_OFFLINE }, | ||
455 | { 0x225800, 2560, RI_E1HE2E3_OFFLINE }, | ||
456 | { 0x228000, 1, RI_ALL_ONLINE }, { 0x228004, 8191, RI_E1HE2E3_OFFLINE }, | ||
361 | { 0x230000, 1, RI_ALL_ONLINE }, { 0x230004, 15, RI_E1H_OFFLINE }, | 457 | { 0x230000, 1, RI_ALL_ONLINE }, { 0x230004, 15, RI_E1H_OFFLINE }, |
362 | { 0x230040, 1, RI_E1HE2_ONLINE }, { 0x230044, 239, RI_E1H_OFFLINE }, | 458 | { 0x230040, 1, RI_E1HE2E3_ONLINE }, { 0x230044, 239, RI_E1H_OFFLINE }, |
363 | { 0x230400, 1, RI_ALL_ONLINE }, { 0x230404, 255, RI_E1H_OFFLINE }, | 459 | { 0x230400, 1, RI_ALL_ONLINE }, { 0x230404, 255, RI_E1H_OFFLINE }, |
364 | { 0x230800, 1, RI_ALL_ONLINE }, { 0x230840, 1, RI_E1HE2_ONLINE }, | 460 | { 0x230800, 1, RI_ALL_ONLINE }, { 0x230840, 1, RI_E1HE2E3_ONLINE }, |
365 | { 0x230c00, 1, RI_ALL_ONLINE }, { 0x231000, 1, RI_ALL_ONLINE }, | 461 | { 0x230c00, 1, RI_ALL_ONLINE }, { 0x231000, 1, RI_ALL_ONLINE }, |
366 | { 0x231040, 1, RI_E1HE2_ONLINE }, { 0x231400, 1, RI_ALL_ONLINE }, | 462 | { 0x231040, 1, RI_E1HE2E3_ONLINE }, { 0x231400, 1, RI_ALL_ONLINE }, |
367 | { 0x231440, 1, RI_E1HE2_ONLINE }, { 0x231480, 1, RI_E1HE2_ONLINE }, | 463 | { 0x231440, 1, RI_E1HE2E3_ONLINE }, { 0x231480, 1, RI_E1HE2E3_ONLINE }, |
368 | { 0x2314c0, 1, RI_E1HE2_ONLINE }, { 0x231800, 128, RI_ALL_OFFLINE }, | 464 | { 0x2314c0, 1, RI_E1HE2E3_ONLINE }, { 0x231800, 128, RI_ALL_OFFLINE }, |
369 | { 0x231c00, 128, RI_ALL_OFFLINE }, { 0x232000, 1, RI_ALL_ONLINE }, | 465 | { 0x231c00, 128, RI_ALL_OFFLINE }, { 0x232000, 1, RI_ALL_ONLINE }, |
370 | { 0x232400, 1, RI_E1HE2_ONLINE }, { 0x232404, 5631, RI_E2_OFFLINE }, | 466 | { 0x232400, 1, RI_E1HE2E3_ONLINE }, { 0x232404, 5631, RI_E2E3_OFFLINE }, |
371 | { 0x238000, 1, RI_ALL_ONLINE }, { 0x238040, 1, RI_ALL_ONLINE }, | 467 | { 0x238000, 1, RI_ALL_ONLINE }, { 0x238040, 1, RI_ALL_ONLINE }, |
372 | { 0x238080, 1, RI_ALL_ONLINE }, { 0x2380c0, 1, RI_ALL_ONLINE }, | 468 | { 0x238080, 1, RI_ALL_ONLINE }, { 0x2380c0, 1, RI_ALL_ONLINE }, |
373 | { 0x238100, 1, RI_ALL_ONLINE }, { 0x238140, 1, RI_ALL_ONLINE }, | 469 | { 0x238100, 1, RI_ALL_ONLINE }, { 0x238140, 1, RI_ALL_ONLINE }, |
@@ -379,7 +475,7 @@ static const struct reg_addr reg_addrs[REGS_COUNT] = { | |||
379 | { 0x238400, 1, RI_ALL_ONLINE }, { 0x238440, 1, RI_ALL_ONLINE }, | 475 | { 0x238400, 1, RI_ALL_ONLINE }, { 0x238440, 1, RI_ALL_ONLINE }, |
380 | { 0x238480, 1, RI_ALL_ONLINE }, { 0x2384c0, 1, RI_ALL_ONLINE }, | 476 | { 0x238480, 1, RI_ALL_ONLINE }, { 0x2384c0, 1, RI_ALL_ONLINE }, |
381 | { 0x238500, 1, RI_ALL_ONLINE }, { 0x238540, 1, RI_ALL_ONLINE }, | 477 | { 0x238500, 1, RI_ALL_ONLINE }, { 0x238540, 1, RI_ALL_ONLINE }, |
382 | { 0x238580, 1, RI_ALL_ONLINE }, { 0x2385c0, 19, RI_E2_ONLINE }, | 478 | { 0x238580, 1, RI_ALL_ONLINE }, { 0x2385c0, 19, RI_E2E3_ONLINE }, |
383 | { 0x238800, 1, RI_ALL_ONLINE }, { 0x238840, 1, RI_ALL_ONLINE }, | 479 | { 0x238800, 1, RI_ALL_ONLINE }, { 0x238840, 1, RI_ALL_ONLINE }, |
384 | { 0x238880, 1, RI_ALL_ONLINE }, { 0x2388c0, 1, RI_ALL_ONLINE }, | 480 | { 0x238880, 1, RI_ALL_ONLINE }, { 0x2388c0, 1, RI_ALL_ONLINE }, |
385 | { 0x238900, 1, RI_ALL_ONLINE }, { 0x238940, 1, RI_ALL_ONLINE }, | 481 | { 0x238900, 1, RI_ALL_ONLINE }, { 0x238940, 1, RI_ALL_ONLINE }, |
@@ -390,88 +486,91 @@ static const struct reg_addr reg_addrs[REGS_COUNT] = { | |||
390 | { 0x238b80, 1, RI_ALL_ONLINE }, { 0x238bc0, 1, RI_ALL_ONLINE }, | 486 | { 0x238b80, 1, RI_ALL_ONLINE }, { 0x238bc0, 1, RI_ALL_ONLINE }, |
391 | { 0x238c00, 1, RI_ALL_ONLINE }, { 0x238c40, 1, RI_ALL_ONLINE }, | 487 | { 0x238c00, 1, RI_ALL_ONLINE }, { 0x238c40, 1, RI_ALL_ONLINE }, |
392 | { 0x238c80, 1, RI_ALL_ONLINE }, { 0x238cc0, 1, RI_ALL_ONLINE }, | 488 | { 0x238c80, 1, RI_ALL_ONLINE }, { 0x238cc0, 1, RI_ALL_ONLINE }, |
393 | { 0x238cc4, 1, RI_E2_ONLINE }, { 0x238d00, 1, RI_ALL_ONLINE }, | 489 | { 0x238cc4, 1, RI_E2E3_ONLINE }, { 0x238d00, 1, RI_ALL_ONLINE }, |
394 | { 0x238d40, 1, RI_ALL_ONLINE }, { 0x238d80, 1, RI_ALL_ONLINE }, | 490 | { 0x238d40, 1, RI_ALL_ONLINE }, { 0x238d80, 1, RI_ALL_ONLINE }, |
395 | { 0x238dc0, 1, RI_ALL_ONLINE }, { 0x238e00, 1, RI_ALL_ONLINE }, | 491 | { 0x238dc0, 1, RI_ALL_ONLINE }, { 0x238e00, 1, RI_ALL_ONLINE }, |
396 | { 0x238e40, 1, RI_ALL_ONLINE }, { 0x238e80, 1, RI_ALL_ONLINE }, | 492 | { 0x238e40, 1, RI_ALL_ONLINE }, { 0x238e80, 1, RI_ALL_ONLINE }, |
397 | { 0x238e84, 1, RI_E2_ONLINE }, { 0x238ec0, 1, RI_E1HE2_ONLINE }, | 493 | { 0x238e84, 1, RI_E2E3_ONLINE }, { 0x238ec0, 1, RI_E1HE2E3_ONLINE }, |
398 | { 0x238f00, 1, RI_E1HE2_ONLINE }, { 0x238f40, 1, RI_E1HE2_ONLINE }, | 494 | { 0x238f00, 1, RI_E1HE2E3_ONLINE }, { 0x238f40, 1, RI_E1HE2E3_ONLINE }, |
399 | { 0x238f80, 1, RI_E1HE2_ONLINE }, { 0x238fc0, 1, RI_E1HE2_ONLINE }, | 495 | { 0x238f80, 1, RI_E1HE2E3_ONLINE }, { 0x238fc0, 1, RI_E1HE2E3_ONLINE }, |
400 | { 0x238fc4, 2, RI_E2_ONLINE }, { 0x238fd0, 6, RI_E2_ONLINE }, | 496 | { 0x238fc4, 2, RI_E2E3_ONLINE }, { 0x238fd0, 6, RI_E2E3_ONLINE }, |
401 | { 0x239000, 1, RI_E2_ONLINE }, { 0x239040, 3, RI_E2_ONLINE }, | 497 | { 0x238fe8, 2, RI_E3_ONLINE }, { 0x239000, 1, RI_E2E3_ONLINE }, |
498 | { 0x239040, 3, RI_E2E3_ONLINE }, { 0x23905c, 1, RI_E3_ONLINE }, | ||
402 | { 0x240000, 2, RI_ALL_ONLINE }, { 0x280000, 65, RI_ALL_ONLINE }, | 499 | { 0x240000, 2, RI_ALL_ONLINE }, { 0x280000, 65, RI_ALL_ONLINE }, |
403 | { 0x28014c, 2, RI_E1HE2_ONLINE }, { 0x280200, 58, RI_ALL_ONLINE }, | 500 | { 0x28014c, 2, RI_E1HE2E3_ONLINE }, { 0x280200, 58, RI_ALL_ONLINE }, |
404 | { 0x280340, 4, RI_ALL_ONLINE }, { 0x280380, 1, RI_E2_ONLINE }, | 501 | { 0x280340, 4, RI_ALL_ONLINE }, { 0x280380, 1, RI_E2E3_ONLINE }, |
405 | { 0x280388, 1, RI_E2_ONLINE }, { 0x280390, 1, RI_E2_ONLINE }, | 502 | { 0x280388, 1, RI_E2E3_ONLINE }, { 0x280390, 1, RI_E2E3_ONLINE }, |
406 | { 0x280398, 1, RI_E2_ONLINE }, { 0x2803a0, 1, RI_E2_ONLINE }, | 503 | { 0x280398, 1, RI_E2E3_ONLINE }, { 0x2803a0, 1, RI_E2E3_ONLINE }, |
407 | { 0x2803a8, 2, RI_E2_ONLINE }, { 0x280400, 1, RI_ALL_ONLINE }, | 504 | { 0x2803a8, 2, RI_E2E3_ONLINE }, { 0x280400, 1, RI_ALL_ONLINE }, |
408 | { 0x280404, 255, RI_E1E1H_OFFLINE }, { 0x282000, 4, RI_ALL_ONLINE }, | 505 | { 0x280404, 255, RI_E1E1H_OFFLINE }, { 0x282000, 4, RI_ALL_ONLINE }, |
409 | { 0x282010, 2044, RI_ALL_OFFLINE }, { 0x2a0000, 1, RI_ALL_ONLINE }, | 506 | { 0x282010, 2044, RI_ALL_OFFLINE }, { 0x284000, 4, RI_E3_ONLINE }, |
410 | { 0x2a0004, 5631, RI_ALL_OFFLINE }, { 0x2a5800, 2560, RI_E1HE2_OFFLINE}, | 507 | { 0x2a0000, 1, RI_ALL_ONLINE }, { 0x2a0004, 5631, RI_ALL_OFFLINE }, |
411 | { 0x2a8000, 1, RI_ALL_ONLINE }, { 0x2a8004, 8191, RI_E1HE2_OFFLINE }, | 508 | { 0x2a5800, 2560, RI_E1HE2E3_OFFLINE }, { 0x2a8000, 1, RI_ALL_ONLINE }, |
412 | { 0x2b0000, 1, RI_ALL_ONLINE }, { 0x2b0004, 15, RI_E1H_OFFLINE }, | 509 | { 0x2a8004, 8191, RI_E1HE2E3_OFFLINE }, { 0x2b0000, 1, RI_ALL_ONLINE }, |
413 | { 0x2b0040, 1, RI_E1HE2_ONLINE }, { 0x2b0044, 239, RI_E1H_OFFLINE }, | 510 | { 0x2b0004, 15, RI_E1H_OFFLINE }, { 0x2b0040, 1, RI_E1HE2E3_ONLINE }, |
414 | { 0x2b0400, 1, RI_ALL_ONLINE }, { 0x2b0404, 255, RI_E1H_OFFLINE }, | 511 | { 0x2b0044, 239, RI_E1H_OFFLINE }, { 0x2b0400, 1, RI_ALL_ONLINE }, |
415 | { 0x2b0800, 1, RI_ALL_ONLINE }, { 0x2b0840, 1, RI_E1HE2_ONLINE }, | 512 | { 0x2b0404, 255, RI_E1H_OFFLINE }, { 0x2b0800, 1, RI_ALL_ONLINE }, |
416 | { 0x2b0c00, 1, RI_ALL_ONLINE }, { 0x2b1000, 1, RI_ALL_ONLINE }, | 513 | { 0x2b0840, 1, RI_E1HE2E3_ONLINE }, { 0x2b0c00, 1, RI_ALL_ONLINE }, |
417 | { 0x2b1040, 1, RI_E1HE2_ONLINE }, { 0x2b1400, 1, RI_ALL_ONLINE }, | 514 | { 0x2b1000, 1, RI_ALL_ONLINE }, { 0x2b1040, 1, RI_E1HE2E3_ONLINE }, |
418 | { 0x2b1440, 1, RI_E1HE2_ONLINE }, { 0x2b1480, 1, RI_E1HE2_ONLINE }, | 515 | { 0x2b1400, 1, RI_ALL_ONLINE }, { 0x2b1440, 1, RI_E1HE2E3_ONLINE }, |
419 | { 0x2b14c0, 1, RI_E1HE2_ONLINE }, { 0x2b1800, 128, RI_ALL_OFFLINE }, | 516 | { 0x2b1480, 1, RI_E1HE2E3_ONLINE }, { 0x2b14c0, 1, RI_E1HE2E3_ONLINE }, |
420 | { 0x2b1c00, 128, RI_ALL_OFFLINE }, { 0x2b2000, 1, RI_ALL_ONLINE }, | 517 | { 0x2b1800, 128, RI_ALL_OFFLINE }, { 0x2b1c00, 128, RI_ALL_OFFLINE }, |
421 | { 0x2b2400, 1, RI_E1HE2_ONLINE }, { 0x2b2404, 5631, RI_E2_OFFLINE }, | 518 | { 0x2b2000, 1, RI_ALL_ONLINE }, { 0x2b2400, 1, RI_E1HE2E3_ONLINE }, |
422 | { 0x2b8000, 1, RI_ALL_ONLINE }, { 0x2b8040, 1, RI_ALL_ONLINE }, | 519 | { 0x2b2404, 5631, RI_E2E3_OFFLINE }, { 0x2b8000, 1, RI_ALL_ONLINE }, |
423 | { 0x2b8080, 1, RI_ALL_ONLINE }, { 0x2b80c0, 1, RI_ALL_ONLINE }, | 520 | { 0x2b8040, 1, RI_ALL_ONLINE }, { 0x2b8080, 1, RI_ALL_ONLINE }, |
424 | { 0x2b8100, 1, RI_ALL_ONLINE }, { 0x2b8140, 1, RI_ALL_ONLINE }, | 521 | { 0x2b80c0, 1, RI_ALL_ONLINE }, { 0x2b8100, 1, RI_ALL_ONLINE }, |
425 | { 0x2b8180, 1, RI_ALL_ONLINE }, { 0x2b81c0, 1, RI_ALL_ONLINE }, | 522 | { 0x2b8140, 1, RI_ALL_ONLINE }, { 0x2b8180, 1, RI_ALL_ONLINE }, |
426 | { 0x2b8200, 1, RI_ALL_ONLINE }, { 0x2b8240, 1, RI_ALL_ONLINE }, | 523 | { 0x2b81c0, 1, RI_ALL_ONLINE }, { 0x2b8200, 1, RI_ALL_ONLINE }, |
427 | { 0x2b8280, 1, RI_ALL_ONLINE }, { 0x2b82c0, 1, RI_ALL_ONLINE }, | 524 | { 0x2b8240, 1, RI_ALL_ONLINE }, { 0x2b8280, 1, RI_ALL_ONLINE }, |
428 | { 0x2b8300, 1, RI_ALL_ONLINE }, { 0x2b8340, 1, RI_ALL_ONLINE }, | 525 | { 0x2b82c0, 1, RI_ALL_ONLINE }, { 0x2b8300, 1, RI_ALL_ONLINE }, |
429 | { 0x2b8380, 1, RI_ALL_ONLINE }, { 0x2b83c0, 1, RI_ALL_ONLINE }, | 526 | { 0x2b8340, 1, RI_ALL_ONLINE }, { 0x2b8380, 1, RI_ALL_ONLINE }, |
430 | { 0x2b8400, 1, RI_ALL_ONLINE }, { 0x2b8440, 1, RI_ALL_ONLINE }, | 527 | { 0x2b83c0, 1, RI_ALL_ONLINE }, { 0x2b8400, 1, RI_ALL_ONLINE }, |
431 | { 0x2b8480, 1, RI_ALL_ONLINE }, { 0x2b84c0, 1, RI_ALL_ONLINE }, | 528 | { 0x2b8440, 1, RI_ALL_ONLINE }, { 0x2b8480, 1, RI_ALL_ONLINE }, |
432 | { 0x2b8500, 1, RI_ALL_ONLINE }, { 0x2b8540, 1, RI_ALL_ONLINE }, | 529 | { 0x2b84c0, 1, RI_ALL_ONLINE }, { 0x2b8500, 1, RI_ALL_ONLINE }, |
433 | { 0x2b8580, 1, RI_ALL_ONLINE }, { 0x2b85c0, 19, RI_E2_ONLINE }, | 530 | { 0x2b8540, 1, RI_ALL_ONLINE }, { 0x2b8580, 1, RI_ALL_ONLINE }, |
434 | { 0x2b8800, 1, RI_ALL_ONLINE }, { 0x2b8840, 1, RI_ALL_ONLINE }, | 531 | { 0x2b85c0, 19, RI_E2E3_ONLINE }, { 0x2b8800, 1, RI_ALL_ONLINE }, |
435 | { 0x2b8880, 1, RI_ALL_ONLINE }, { 0x2b88c0, 1, RI_ALL_ONLINE }, | 532 | { 0x2b8840, 1, RI_ALL_ONLINE }, { 0x2b8880, 1, RI_ALL_ONLINE }, |
436 | { 0x2b8900, 1, RI_ALL_ONLINE }, { 0x2b8940, 1, RI_ALL_ONLINE }, | 533 | { 0x2b88c0, 1, RI_ALL_ONLINE }, { 0x2b8900, 1, RI_ALL_ONLINE }, |
437 | { 0x2b8980, 1, RI_ALL_ONLINE }, { 0x2b89c0, 1, RI_ALL_ONLINE }, | 534 | { 0x2b8940, 1, RI_ALL_ONLINE }, { 0x2b8980, 1, RI_ALL_ONLINE }, |
438 | { 0x2b8a00, 1, RI_ALL_ONLINE }, { 0x2b8a40, 1, RI_ALL_ONLINE }, | 535 | { 0x2b89c0, 1, RI_ALL_ONLINE }, { 0x2b8a00, 1, RI_ALL_ONLINE }, |
439 | { 0x2b8a80, 1, RI_ALL_ONLINE }, { 0x2b8ac0, 1, RI_ALL_ONLINE }, | 536 | { 0x2b8a40, 1, RI_ALL_ONLINE }, { 0x2b8a80, 1, RI_ALL_ONLINE }, |
440 | { 0x2b8b00, 1, RI_ALL_ONLINE }, { 0x2b8b40, 1, RI_ALL_ONLINE }, | 537 | { 0x2b8ac0, 1, RI_ALL_ONLINE }, { 0x2b8b00, 1, RI_ALL_ONLINE }, |
441 | { 0x2b8b80, 1, RI_ALL_ONLINE }, { 0x2b8bc0, 1, RI_ALL_ONLINE }, | 538 | { 0x2b8b40, 1, RI_ALL_ONLINE }, { 0x2b8b80, 1, RI_ALL_ONLINE }, |
442 | { 0x2b8c00, 1, RI_ALL_ONLINE }, { 0x2b8c40, 1, RI_ALL_ONLINE }, | 539 | { 0x2b8bc0, 1, RI_ALL_ONLINE }, { 0x2b8c00, 1, RI_ALL_ONLINE }, |
443 | { 0x2b8c80, 1, RI_ALL_ONLINE }, { 0x2b8cc0, 1, RI_ALL_ONLINE }, | 540 | { 0x2b8c40, 1, RI_ALL_ONLINE }, { 0x2b8c80, 1, RI_ALL_ONLINE }, |
444 | { 0x2b8cc4, 1, RI_E2_ONLINE }, { 0x2b8d00, 1, RI_ALL_ONLINE }, | 541 | { 0x2b8cc0, 1, RI_ALL_ONLINE }, { 0x2b8cc4, 1, RI_E2E3_ONLINE }, |
445 | { 0x2b8d40, 1, RI_ALL_ONLINE }, { 0x2b8d80, 1, RI_ALL_ONLINE }, | 542 | { 0x2b8d00, 1, RI_ALL_ONLINE }, { 0x2b8d40, 1, RI_ALL_ONLINE }, |
446 | { 0x2b8dc0, 1, RI_ALL_ONLINE }, { 0x2b8e00, 1, RI_ALL_ONLINE }, | 543 | { 0x2b8d80, 1, RI_ALL_ONLINE }, { 0x2b8dc0, 1, RI_ALL_ONLINE }, |
447 | { 0x2b8e40, 1, RI_ALL_ONLINE }, { 0x2b8e80, 1, RI_ALL_ONLINE }, | 544 | { 0x2b8e00, 1, RI_ALL_ONLINE }, { 0x2b8e40, 1, RI_ALL_ONLINE }, |
448 | { 0x2b8e84, 1, RI_E2_ONLINE }, { 0x2b8ec0, 1, RI_E1HE2_ONLINE }, | 545 | { 0x2b8e80, 1, RI_ALL_ONLINE }, { 0x2b8e84, 1, RI_E2E3_ONLINE }, |
449 | { 0x2b8f00, 1, RI_E1HE2_ONLINE }, { 0x2b8f40, 1, RI_E1HE2_ONLINE }, | 546 | { 0x2b8ec0, 1, RI_E1HE2E3_ONLINE }, { 0x2b8f00, 1, RI_E1HE2E3_ONLINE }, |
450 | { 0x2b8f80, 1, RI_E1HE2_ONLINE }, { 0x2b8fc0, 1, RI_E1HE2_ONLINE }, | 547 | { 0x2b8f40, 1, RI_E1HE2E3_ONLINE }, { 0x2b8f80, 1, RI_E1HE2E3_ONLINE }, |
451 | { 0x2b8fc4, 2, RI_E2_ONLINE }, { 0x2b8fd0, 6, RI_E2_ONLINE }, | 548 | { 0x2b8fc0, 1, RI_E1HE2E3_ONLINE }, { 0x2b8fc4, 2, RI_E2E3_ONLINE }, |
452 | { 0x2b9000, 1, RI_E2_ONLINE }, { 0x2b9040, 3, RI_E2_ONLINE }, | 549 | { 0x2b8fd0, 6, RI_E2E3_ONLINE }, { 0x2b8fe8, 2, RI_E3_ONLINE }, |
453 | { 0x2b9400, 14, RI_E2_ONLINE }, { 0x2b943c, 19, RI_E2_ONLINE }, | 550 | { 0x2b9000, 1, RI_E2E3_ONLINE }, { 0x2b9040, 3, RI_E2E3_ONLINE }, |
454 | { 0x2b9490, 10, RI_E2_ONLINE }, { 0x2c0000, 2, RI_ALL_ONLINE }, | 551 | { 0x2b905c, 1, RI_E3_ONLINE }, { 0x2b9400, 14, RI_E2E3_ONLINE }, |
455 | { 0x300000, 65, RI_ALL_ONLINE }, { 0x30014c, 2, RI_E1HE2_ONLINE }, | 552 | { 0x2b943c, 19, RI_E2E3_ONLINE }, { 0x2b9490, 10, RI_E2E3_ONLINE }, |
456 | { 0x300200, 58, RI_ALL_ONLINE }, { 0x300340, 4, RI_ALL_ONLINE }, | 553 | { 0x2c0000, 2, RI_ALL_ONLINE }, { 0x300000, 65, RI_ALL_ONLINE }, |
457 | { 0x300380, 1, RI_E2_ONLINE }, { 0x300388, 1, RI_E2_ONLINE }, | 554 | { 0x30014c, 2, RI_E1HE2E3_ONLINE }, { 0x300200, 58, RI_ALL_ONLINE }, |
458 | { 0x300390, 1, RI_E2_ONLINE }, { 0x300398, 1, RI_E2_ONLINE }, | 555 | { 0x300340, 4, RI_ALL_ONLINE }, { 0x300380, 1, RI_E2E3_ONLINE }, |
459 | { 0x3003a0, 1, RI_E2_ONLINE }, { 0x3003a8, 2, RI_E2_ONLINE }, | 556 | { 0x300388, 1, RI_E2E3_ONLINE }, { 0x300390, 1, RI_E2E3_ONLINE }, |
460 | { 0x300400, 1, RI_ALL_ONLINE }, { 0x300404, 255, RI_E1E1H_OFFLINE }, | 557 | { 0x300398, 1, RI_E2E3_ONLINE }, { 0x3003a0, 1, RI_E2E3_ONLINE }, |
461 | { 0x302000, 4, RI_ALL_ONLINE }, { 0x302010, 2044, RI_ALL_OFFLINE }, | 558 | { 0x3003a8, 2, RI_E2E3_ONLINE }, { 0x300400, 1, RI_ALL_ONLINE }, |
559 | { 0x300404, 255, RI_E1E1H_OFFLINE }, { 0x302000, 4, RI_ALL_ONLINE }, | ||
560 | { 0x302010, 2044, RI_ALL_OFFLINE }, { 0x304000, 4, RI_E3_ONLINE }, | ||
462 | { 0x320000, 1, RI_ALL_ONLINE }, { 0x320004, 5631, RI_ALL_OFFLINE }, | 561 | { 0x320000, 1, RI_ALL_ONLINE }, { 0x320004, 5631, RI_ALL_OFFLINE }, |
463 | { 0x325800, 2560, RI_E1HE2_OFFLINE }, { 0x328000, 1, RI_ALL_ONLINE }, | 562 | { 0x325800, 2560, RI_E1HE2E3_OFFLINE }, { 0x328000, 1, RI_ALL_ONLINE }, |
464 | { 0x328004, 8191, RI_E1HE2_OFFLINE }, { 0x330000, 1, RI_ALL_ONLINE }, | 563 | { 0x328004, 8191, RI_E1HE2E3_OFFLINE }, { 0x330000, 1, RI_ALL_ONLINE }, |
465 | { 0x330004, 15, RI_E1H_OFFLINE }, { 0x330040, 1, RI_E1HE2_ONLINE }, | 564 | { 0x330004, 15, RI_E1H_OFFLINE }, { 0x330040, 1, RI_E1HE2E3_ONLINE }, |
466 | { 0x330044, 239, RI_E1H_OFFLINE }, { 0x330400, 1, RI_ALL_ONLINE }, | 565 | { 0x330044, 239, RI_E1H_OFFLINE }, { 0x330400, 1, RI_ALL_ONLINE }, |
467 | { 0x330404, 255, RI_E1H_OFFLINE }, { 0x330800, 1, RI_ALL_ONLINE }, | 566 | { 0x330404, 255, RI_E1H_OFFLINE }, { 0x330800, 1, RI_ALL_ONLINE }, |
468 | { 0x330840, 1, RI_E1HE2_ONLINE }, { 0x330c00, 1, RI_ALL_ONLINE }, | 567 | { 0x330840, 1, RI_E1HE2E3_ONLINE }, { 0x330c00, 1, RI_ALL_ONLINE }, |
469 | { 0x331000, 1, RI_ALL_ONLINE }, { 0x331040, 1, RI_E1HE2_ONLINE }, | 568 | { 0x331000, 1, RI_ALL_ONLINE }, { 0x331040, 1, RI_E1HE2E3_ONLINE }, |
470 | { 0x331400, 1, RI_ALL_ONLINE }, { 0x331440, 1, RI_E1HE2_ONLINE }, | 569 | { 0x331400, 1, RI_ALL_ONLINE }, { 0x331440, 1, RI_E1HE2E3_ONLINE }, |
471 | { 0x331480, 1, RI_E1HE2_ONLINE }, { 0x3314c0, 1, RI_E1HE2_ONLINE }, | 570 | { 0x331480, 1, RI_E1HE2E3_ONLINE }, { 0x3314c0, 1, RI_E1HE2E3_ONLINE }, |
472 | { 0x331800, 128, RI_ALL_OFFLINE }, { 0x331c00, 128, RI_ALL_OFFLINE }, | 571 | { 0x331800, 128, RI_ALL_OFFLINE }, { 0x331c00, 128, RI_ALL_OFFLINE }, |
473 | { 0x332000, 1, RI_ALL_ONLINE }, { 0x332400, 1, RI_E1HE2_ONLINE }, | 572 | { 0x332000, 1, RI_ALL_ONLINE }, { 0x332400, 1, RI_E1HE2E3_ONLINE }, |
474 | { 0x332404, 5631, RI_E2_OFFLINE }, { 0x338000, 1, RI_ALL_ONLINE }, | 573 | { 0x332404, 5631, RI_E2E3_OFFLINE }, { 0x338000, 1, RI_ALL_ONLINE }, |
475 | { 0x338040, 1, RI_ALL_ONLINE }, { 0x338080, 1, RI_ALL_ONLINE }, | 574 | { 0x338040, 1, RI_ALL_ONLINE }, { 0x338080, 1, RI_ALL_ONLINE }, |
476 | { 0x3380c0, 1, RI_ALL_ONLINE }, { 0x338100, 1, RI_ALL_ONLINE }, | 575 | { 0x3380c0, 1, RI_ALL_ONLINE }, { 0x338100, 1, RI_ALL_ONLINE }, |
477 | { 0x338140, 1, RI_ALL_ONLINE }, { 0x338180, 1, RI_ALL_ONLINE }, | 576 | { 0x338140, 1, RI_ALL_ONLINE }, { 0x338180, 1, RI_ALL_ONLINE }, |
@@ -483,7 +582,7 @@ static const struct reg_addr reg_addrs[REGS_COUNT] = { | |||
483 | { 0x338440, 1, RI_ALL_ONLINE }, { 0x338480, 1, RI_ALL_ONLINE }, | 582 | { 0x338440, 1, RI_ALL_ONLINE }, { 0x338480, 1, RI_ALL_ONLINE }, |
484 | { 0x3384c0, 1, RI_ALL_ONLINE }, { 0x338500, 1, RI_ALL_ONLINE }, | 583 | { 0x3384c0, 1, RI_ALL_ONLINE }, { 0x338500, 1, RI_ALL_ONLINE }, |
485 | { 0x338540, 1, RI_ALL_ONLINE }, { 0x338580, 1, RI_ALL_ONLINE }, | 584 | { 0x338540, 1, RI_ALL_ONLINE }, { 0x338580, 1, RI_ALL_ONLINE }, |
486 | { 0x3385c0, 19, RI_E2_ONLINE }, { 0x338800, 1, RI_ALL_ONLINE }, | 585 | { 0x3385c0, 19, RI_E2E3_ONLINE }, { 0x338800, 1, RI_ALL_ONLINE }, |
487 | { 0x338840, 1, RI_ALL_ONLINE }, { 0x338880, 1, RI_ALL_ONLINE }, | 586 | { 0x338840, 1, RI_ALL_ONLINE }, { 0x338880, 1, RI_ALL_ONLINE }, |
488 | { 0x3388c0, 1, RI_ALL_ONLINE }, { 0x338900, 1, RI_ALL_ONLINE }, | 587 | { 0x3388c0, 1, RI_ALL_ONLINE }, { 0x338900, 1, RI_ALL_ONLINE }, |
489 | { 0x338940, 1, RI_ALL_ONLINE }, { 0x338980, 1, RI_ALL_ONLINE }, | 588 | { 0x338940, 1, RI_ALL_ONLINE }, { 0x338980, 1, RI_ALL_ONLINE }, |
@@ -493,35 +592,48 @@ static const struct reg_addr reg_addrs[REGS_COUNT] = { | |||
493 | { 0x338b40, 1, RI_ALL_ONLINE }, { 0x338b80, 1, RI_ALL_ONLINE }, | 592 | { 0x338b40, 1, RI_ALL_ONLINE }, { 0x338b80, 1, RI_ALL_ONLINE }, |
494 | { 0x338bc0, 1, RI_ALL_ONLINE }, { 0x338c00, 1, RI_ALL_ONLINE }, | 593 | { 0x338bc0, 1, RI_ALL_ONLINE }, { 0x338c00, 1, RI_ALL_ONLINE }, |
495 | { 0x338c40, 1, RI_ALL_ONLINE }, { 0x338c80, 1, RI_ALL_ONLINE }, | 594 | { 0x338c40, 1, RI_ALL_ONLINE }, { 0x338c80, 1, RI_ALL_ONLINE }, |
496 | { 0x338cc0, 1, RI_ALL_ONLINE }, { 0x338cc4, 1, RI_E2_ONLINE }, | 595 | { 0x338cc0, 1, RI_ALL_ONLINE }, { 0x338cc4, 1, RI_E2E3_ONLINE }, |
497 | { 0x338d00, 1, RI_ALL_ONLINE }, { 0x338d40, 1, RI_ALL_ONLINE }, | 596 | { 0x338d00, 1, RI_ALL_ONLINE }, { 0x338d40, 1, RI_ALL_ONLINE }, |
498 | { 0x338d80, 1, RI_ALL_ONLINE }, { 0x338dc0, 1, RI_ALL_ONLINE }, | 597 | { 0x338d80, 1, RI_ALL_ONLINE }, { 0x338dc0, 1, RI_ALL_ONLINE }, |
499 | { 0x338e00, 1, RI_ALL_ONLINE }, { 0x338e40, 1, RI_ALL_ONLINE }, | 598 | { 0x338e00, 1, RI_ALL_ONLINE }, { 0x338e40, 1, RI_ALL_ONLINE }, |
500 | { 0x338e80, 1, RI_ALL_ONLINE }, { 0x338e84, 1, RI_E2_ONLINE }, | 599 | { 0x338e80, 1, RI_ALL_ONLINE }, { 0x338e84, 1, RI_E2E3_ONLINE }, |
501 | { 0x338ec0, 1, RI_E1HE2_ONLINE }, { 0x338f00, 1, RI_E1HE2_ONLINE }, | 600 | { 0x338ec0, 1, RI_E1HE2E3_ONLINE }, { 0x338f00, 1, RI_E1HE2E3_ONLINE }, |
502 | { 0x338f40, 1, RI_E1HE2_ONLINE }, { 0x338f80, 1, RI_E1HE2_ONLINE }, | 601 | { 0x338f40, 1, RI_E1HE2E3_ONLINE }, { 0x338f80, 1, RI_E1HE2E3_ONLINE }, |
503 | { 0x338fc0, 1, RI_E1HE2_ONLINE }, { 0x338fc4, 2, RI_E2_ONLINE }, | 602 | { 0x338fc0, 1, RI_E1HE2E3_ONLINE }, { 0x338fc4, 2, RI_E2E3_ONLINE }, |
504 | { 0x338fd0, 6, RI_E2_ONLINE }, { 0x339000, 1, RI_E2_ONLINE }, | 603 | { 0x338fd0, 6, RI_E2E3_ONLINE }, { 0x338fe8, 2, RI_E3_ONLINE }, |
505 | { 0x339040, 3, RI_E2_ONLINE }, { 0x340000, 2, RI_ALL_ONLINE }, | 604 | { 0x339000, 1, RI_E2E3_ONLINE }, { 0x339040, 3, RI_E2E3_ONLINE }, |
605 | { 0x33905c, 1, RI_E3_ONLINE }, { 0x340000, 2, RI_ALL_ONLINE }, | ||
506 | }; | 606 | }; |
607 | #define REGS_COUNT ARRAY_SIZE(reg_addrs) | ||
507 | 608 | ||
508 | #define IDLE_REGS_COUNT 237 | 609 | static const struct reg_addr idle_addrs[] = { |
509 | static const struct reg_addr idle_addrs[IDLE_REGS_COUNT] = { | ||
510 | { 0x2104, 1, RI_ALL_ONLINE }, { 0x2110, 2, RI_ALL_ONLINE }, | 610 | { 0x2104, 1, RI_ALL_ONLINE }, { 0x2110, 2, RI_ALL_ONLINE }, |
511 | { 0x211c, 8, RI_ALL_ONLINE }, { 0x2814, 1, RI_ALL_ONLINE }, | 611 | { 0x211c, 8, RI_ALL_ONLINE }, { 0x2814, 1, RI_ALL_ONLINE }, |
512 | { 0x281c, 2, RI_ALL_ONLINE }, { 0x2854, 1, RI_ALL_ONLINE }, | 612 | { 0x281c, 2, RI_ALL_ONLINE }, { 0x2854, 1, RI_ALL_ONLINE }, |
513 | { 0x285c, 1, RI_ALL_ONLINE }, { 0x9010, 7, RI_E2_ONLINE }, | 613 | { 0x285c, 1, RI_ALL_ONLINE }, { 0x3040, 1, RI_ALL_ONLINE }, |
514 | { 0x9030, 1, RI_E2_ONLINE }, { 0x9068, 16, RI_E2_ONLINE }, | 614 | { 0x9010, 7, RI_E2E3_ONLINE }, { 0x9030, 1, RI_E2E3_ONLINE }, |
515 | { 0x9230, 2, RI_E2_ONLINE }, { 0x9244, 1, RI_E2_ONLINE }, | 615 | { 0x9068, 16, RI_E2E3_ONLINE }, { 0x9230, 2, RI_E2E3_ONLINE }, |
516 | { 0x9298, 1, RI_E2_ONLINE }, { 0x92a8, 1, RI_E2_ONLINE }, | 616 | { 0x9244, 1, RI_E2E3_ONLINE }, { 0x9298, 1, RI_E2E3_ONLINE }, |
517 | { 0xa38c, 1, RI_ALL_ONLINE }, { 0xa3c4, 1, RI_E1HE2_ONLINE }, | 617 | { 0x92a8, 1, RI_E2E3_ONLINE }, { 0xa38c, 1, RI_ALL_ONLINE }, |
518 | { 0xa408, 1, RI_ALL_ONLINE }, { 0xa42c, 12, RI_ALL_ONLINE }, | 618 | { 0xa3c4, 1, RI_E1HE2E3_ONLINE }, { 0xa404, 3, RI_ALL_ONLINE }, |
519 | { 0xa600, 5, RI_E1HE2_ONLINE }, { 0xa618, 1, RI_E1HE2_ONLINE }, | 619 | { 0xa42c, 12, RI_ALL_ONLINE }, { 0xa600, 5, RI_E1HE2E3_ONLINE }, |
520 | { 0xa714, 1, RI_E2_ONLINE }, { 0xa720, 1, RI_E2_ONLINE }, | 620 | { 0xa618, 1, RI_E1HE2E3_ONLINE }, { 0xa714, 1, RI_E2E3_ONLINE }, |
521 | { 0xa750, 1, RI_E2_ONLINE }, { 0xc09c, 1, RI_E1E1H_ONLINE }, | 621 | { 0xa720, 1, RI_E2E3_ONLINE }, { 0xa750, 1, RI_E2E3_ONLINE }, |
522 | { 0x103b0, 1, RI_ALL_ONLINE }, { 0x103c0, 1, RI_ALL_ONLINE }, | 622 | { 0xc09c, 1, RI_E1E1H_ONLINE }, { 0x103b0, 1, RI_ALL_ONLINE }, |
523 | { 0x103d0, 1, RI_E1H_ONLINE }, { 0x183bc, 1, RI_E2_ONLINE }, | 623 | { 0x103c0, 1, RI_ALL_ONLINE }, { 0x103d0, 1, RI_E1H_ONLINE }, |
524 | { 0x183cc, 1, RI_E2_ONLINE }, { 0x2021c, 11, RI_ALL_ONLINE }, | 624 | { 0x10418, 1, RI_ALL_ONLINE }, { 0x10420, 1, RI_ALL_ONLINE }, |
625 | { 0x10428, 1, RI_ALL_ONLINE }, { 0x10460, 1, RI_ALL_ONLINE }, | ||
626 | { 0x10474, 1, RI_ALL_ONLINE }, { 0x104e0, 1, RI_ALL_ONLINE }, | ||
627 | { 0x104ec, 1, RI_ALL_ONLINE }, { 0x104f8, 1, RI_ALL_ONLINE }, | ||
628 | { 0x10508, 1, RI_ALL_ONLINE }, { 0x10530, 1, RI_ALL_ONLINE }, | ||
629 | { 0x10538, 1, RI_ALL_ONLINE }, { 0x10548, 1, RI_ALL_ONLINE }, | ||
630 | { 0x10558, 1, RI_ALL_ONLINE }, { 0x182a8, 1, RI_E2E3_ONLINE }, | ||
631 | { 0x182b8, 1, RI_E2E3_ONLINE }, { 0x18308, 1, RI_E2E3_ONLINE }, | ||
632 | { 0x18318, 1, RI_E2E3_ONLINE }, { 0x18338, 1, RI_E2E3_ONLINE }, | ||
633 | { 0x18348, 1, RI_E2E3_ONLINE }, { 0x183bc, 1, RI_E2E3_ONLINE }, | ||
634 | { 0x183cc, 1, RI_E2E3_ONLINE }, { 0x18570, 1, RI_E3_ONLINE }, | ||
635 | { 0x18578, 1, RI_E3_ONLINE }, { 0x1858c, 1, RI_E3_ONLINE }, | ||
636 | { 0x18594, 1, RI_E3_ONLINE }, { 0x2021c, 11, RI_ALL_ONLINE }, | ||
525 | { 0x202a8, 1, RI_ALL_ONLINE }, { 0x202b8, 1, RI_ALL_ONLINE }, | 637 | { 0x202a8, 1, RI_ALL_ONLINE }, { 0x202b8, 1, RI_ALL_ONLINE }, |
526 | { 0x20404, 1, RI_ALL_ONLINE }, { 0x2040c, 2, RI_ALL_ONLINE }, | 638 | { 0x20404, 1, RI_ALL_ONLINE }, { 0x2040c, 2, RI_ALL_ONLINE }, |
527 | { 0x2041c, 2, RI_ALL_ONLINE }, { 0x40154, 14, RI_ALL_ONLINE }, | 639 | { 0x2041c, 2, RI_ALL_ONLINE }, { 0x40154, 14, RI_ALL_ONLINE }, |
@@ -551,8 +663,8 @@ static const struct reg_addr idle_addrs[IDLE_REGS_COUNT] = { | |||
551 | { 0x102058, 1, RI_ALL_ONLINE }, { 0x102080, 16, RI_ALL_ONLINE }, | 663 | { 0x102058, 1, RI_ALL_ONLINE }, { 0x102080, 16, RI_ALL_ONLINE }, |
552 | { 0x103004, 2, RI_ALL_ONLINE }, { 0x103068, 1, RI_ALL_ONLINE }, | 664 | { 0x103004, 2, RI_ALL_ONLINE }, { 0x103068, 1, RI_ALL_ONLINE }, |
553 | { 0x103078, 1, RI_ALL_ONLINE }, { 0x103088, 1, RI_ALL_ONLINE }, | 665 | { 0x103078, 1, RI_ALL_ONLINE }, { 0x103088, 1, RI_ALL_ONLINE }, |
554 | { 0x10309c, 2, RI_E1HE2_ONLINE }, { 0x1030b8, 2, RI_E2_ONLINE }, | 666 | { 0x10309c, 2, RI_E1HE2E3_ONLINE }, { 0x1030b8, 2, RI_E2E3_ONLINE }, |
555 | { 0x1030cc, 1, RI_E2_ONLINE }, { 0x1030e0, 1, RI_E2_ONLINE }, | 667 | { 0x1030cc, 1, RI_E2E3_ONLINE }, { 0x1030e0, 1, RI_E2E3_ONLINE }, |
556 | { 0x104004, 1, RI_ALL_ONLINE }, { 0x104018, 1, RI_ALL_ONLINE }, | 668 | { 0x104004, 1, RI_ALL_ONLINE }, { 0x104018, 1, RI_ALL_ONLINE }, |
557 | { 0x104020, 1, RI_ALL_ONLINE }, { 0x10403c, 1, RI_ALL_ONLINE }, | 669 | { 0x104020, 1, RI_ALL_ONLINE }, { 0x10403c, 1, RI_ALL_ONLINE }, |
558 | { 0x1040fc, 1, RI_ALL_ONLINE }, { 0x10410c, 1, RI_ALL_ONLINE }, | 670 | { 0x1040fc, 1, RI_ALL_ONLINE }, { 0x10410c, 1, RI_ALL_ONLINE }, |
@@ -563,28 +675,27 @@ static const struct reg_addr idle_addrs[IDLE_REGS_COUNT] = { | |||
563 | { 0x120414, 15, RI_ALL_ONLINE }, { 0x120478, 2, RI_ALL_ONLINE }, | 675 | { 0x120414, 15, RI_ALL_ONLINE }, { 0x120478, 2, RI_ALL_ONLINE }, |
564 | { 0x12052c, 1, RI_ALL_ONLINE }, { 0x120564, 3, RI_ALL_ONLINE }, | 676 | { 0x12052c, 1, RI_ALL_ONLINE }, { 0x120564, 3, RI_ALL_ONLINE }, |
565 | { 0x12057c, 1, RI_ALL_ONLINE }, { 0x12058c, 1, RI_ALL_ONLINE }, | 677 | { 0x12057c, 1, RI_ALL_ONLINE }, { 0x12058c, 1, RI_ALL_ONLINE }, |
566 | { 0x120608, 1, RI_E1HE2_ONLINE }, { 0x120738, 1, RI_E2_ONLINE }, | 678 | { 0x120608, 1, RI_E1HE2E3_ONLINE }, { 0x120778, 2, RI_E2E3_ONLINE }, |
567 | { 0x120778, 2, RI_E2_ONLINE }, { 0x120808, 3, RI_ALL_ONLINE }, | 679 | { 0x120808, 3, RI_ALL_ONLINE }, { 0x120818, 1, RI_ALL_ONLINE }, |
568 | { 0x120818, 1, RI_ALL_ONLINE }, { 0x120820, 1, RI_ALL_ONLINE }, | 680 | { 0x120820, 1, RI_ALL_ONLINE }, { 0x120828, 1, RI_ALL_ONLINE }, |
569 | { 0x120828, 1, RI_ALL_ONLINE }, { 0x120830, 1, RI_ALL_ONLINE }, | 681 | { 0x120830, 1, RI_ALL_ONLINE }, { 0x120838, 1, RI_ALL_ONLINE }, |
570 | { 0x120838, 1, RI_ALL_ONLINE }, { 0x120840, 1, RI_ALL_ONLINE }, | 682 | { 0x120840, 1, RI_ALL_ONLINE }, { 0x120848, 1, RI_ALL_ONLINE }, |
571 | { 0x120848, 1, RI_ALL_ONLINE }, { 0x120850, 1, RI_ALL_ONLINE }, | 683 | { 0x120850, 1, RI_ALL_ONLINE }, { 0x120858, 1, RI_ALL_ONLINE }, |
572 | { 0x120858, 1, RI_ALL_ONLINE }, { 0x120860, 1, RI_ALL_ONLINE }, | 684 | { 0x120860, 1, RI_ALL_ONLINE }, { 0x120868, 1, RI_ALL_ONLINE }, |
573 | { 0x120868, 1, RI_ALL_ONLINE }, { 0x120870, 1, RI_ALL_ONLINE }, | 685 | { 0x120870, 1, RI_ALL_ONLINE }, { 0x120878, 1, RI_ALL_ONLINE }, |
574 | { 0x120878, 1, RI_ALL_ONLINE }, { 0x120880, 1, RI_ALL_ONLINE }, | 686 | { 0x120880, 1, RI_ALL_ONLINE }, { 0x120888, 1, RI_ALL_ONLINE }, |
575 | { 0x120888, 1, RI_ALL_ONLINE }, { 0x120890, 1, RI_ALL_ONLINE }, | 687 | { 0x120890, 1, RI_ALL_ONLINE }, { 0x120898, 1, RI_ALL_ONLINE }, |
576 | { 0x120898, 1, RI_ALL_ONLINE }, { 0x1208a0, 1, RI_ALL_ONLINE }, | 688 | { 0x1208a0, 1, RI_ALL_ONLINE }, { 0x1208a8, 1, RI_ALL_ONLINE }, |
577 | { 0x1208a8, 1, RI_ALL_ONLINE }, { 0x1208b0, 1, RI_ALL_ONLINE }, | 689 | { 0x1208b0, 1, RI_ALL_ONLINE }, { 0x1208b8, 1, RI_ALL_ONLINE }, |
578 | { 0x1208b8, 1, RI_ALL_ONLINE }, { 0x1208c0, 1, RI_ALL_ONLINE }, | 690 | { 0x1208c0, 1, RI_ALL_ONLINE }, { 0x1208c8, 1, RI_ALL_ONLINE }, |
579 | { 0x1208c8, 1, RI_ALL_ONLINE }, { 0x1208d0, 1, RI_ALL_ONLINE }, | 691 | { 0x1208d0, 1, RI_ALL_ONLINE }, { 0x1208d8, 1, RI_ALL_ONLINE }, |
580 | { 0x1208d8, 1, RI_ALL_ONLINE }, { 0x1208e0, 1, RI_ALL_ONLINE }, | 692 | { 0x1208e0, 1, RI_ALL_ONLINE }, { 0x1208e8, 1, RI_ALL_ONLINE }, |
581 | { 0x1208e8, 1, RI_ALL_ONLINE }, { 0x1208f0, 1, RI_ALL_ONLINE }, | 693 | { 0x1208f0, 1, RI_ALL_ONLINE }, { 0x1208f8, 1, RI_ALL_ONLINE }, |
582 | { 0x1208f8, 1, RI_ALL_ONLINE }, { 0x120900, 1, RI_ALL_ONLINE }, | 694 | { 0x120900, 1, RI_ALL_ONLINE }, { 0x120908, 1, RI_ALL_ONLINE }, |
583 | { 0x120908, 1, RI_ALL_ONLINE }, { 0x120940, 5, RI_E2_ONLINE }, | 695 | { 0x130030, 1, RI_E2E3_ONLINE }, { 0x13004c, 3, RI_E2E3_ONLINE }, |
584 | { 0x130030, 1, RI_E2_ONLINE }, { 0x13004c, 3, RI_E2_ONLINE }, | 696 | { 0x130064, 2, RI_E2E3_ONLINE }, { 0x13009c, 1, RI_E2E3_ONLINE }, |
585 | { 0x130064, 2, RI_E2_ONLINE }, { 0x13009c, 1, RI_E2_ONLINE }, | 697 | { 0x130130, 1, RI_E2E3_ONLINE }, { 0x13016c, 1, RI_E2E3_ONLINE }, |
586 | { 0x130130, 1, RI_E2_ONLINE }, { 0x13016c, 1, RI_E2_ONLINE }, | 698 | { 0x130300, 1, RI_E2E3_ONLINE }, { 0x130480, 1, RI_E2E3_ONLINE }, |
587 | { 0x130300, 1, RI_E2_ONLINE }, { 0x130480, 1, RI_E2_ONLINE }, | ||
588 | { 0x14005c, 2, RI_ALL_ONLINE }, { 0x1400d0, 2, RI_ALL_ONLINE }, | 699 | { 0x14005c, 2, RI_ALL_ONLINE }, { 0x1400d0, 2, RI_ALL_ONLINE }, |
589 | { 0x1400e0, 1, RI_ALL_ONLINE }, { 0x1401c8, 1, RI_ALL_ONLINE }, | 700 | { 0x1400e0, 1, RI_ALL_ONLINE }, { 0x1401c8, 1, RI_ALL_ONLINE }, |
590 | { 0x140200, 6, RI_ALL_ONLINE }, { 0x16101c, 1, RI_ALL_ONLINE }, | 701 | { 0x140200, 6, RI_ALL_ONLINE }, { 0x16101c, 1, RI_ALL_ONLINE }, |
@@ -602,8 +713,8 @@ static const struct reg_addr idle_addrs[IDLE_REGS_COUNT] = { | |||
602 | { 0x168438, 1, RI_ALL_ONLINE }, { 0x168448, 1, RI_ALL_ONLINE }, | 713 | { 0x168438, 1, RI_ALL_ONLINE }, { 0x168448, 1, RI_ALL_ONLINE }, |
603 | { 0x168a00, 128, RI_ALL_ONLINE }, { 0x16e200, 128, RI_E1H_ONLINE }, | 714 | { 0x168a00, 128, RI_ALL_ONLINE }, { 0x16e200, 128, RI_E1H_ONLINE }, |
604 | { 0x16e404, 2, RI_E1H_ONLINE }, { 0x16e584, 64, RI_E1H_ONLINE }, | 715 | { 0x16e404, 2, RI_E1H_ONLINE }, { 0x16e584, 64, RI_E1H_ONLINE }, |
605 | { 0x16e684, 2, RI_E1HE2_ONLINE }, { 0x16e68c, 4, RI_E1H_ONLINE }, | 716 | { 0x16e684, 2, RI_E1HE2E3_ONLINE }, { 0x16e68c, 4, RI_E1H_ONLINE }, |
606 | { 0x16e6fc, 4, RI_E2_ONLINE }, { 0x1700a4, 1, RI_ALL_ONLINE }, | 717 | { 0x16e6fc, 4, RI_E2E3_ONLINE }, { 0x1700a4, 1, RI_ALL_ONLINE }, |
607 | { 0x1700ac, 2, RI_ALL_ONLINE }, { 0x1700c0, 1, RI_ALL_ONLINE }, | 718 | { 0x1700ac, 2, RI_ALL_ONLINE }, { 0x1700c0, 1, RI_ALL_ONLINE }, |
608 | { 0x170174, 1, RI_ALL_ONLINE }, { 0x170184, 1, RI_ALL_ONLINE }, | 719 | { 0x170174, 1, RI_ALL_ONLINE }, { 0x170184, 1, RI_ALL_ONLINE }, |
609 | { 0x1800f4, 1, RI_ALL_ONLINE }, { 0x180104, 1, RI_ALL_ONLINE }, | 720 | { 0x1800f4, 1, RI_ALL_ONLINE }, { 0x180104, 1, RI_ALL_ONLINE }, |
@@ -627,51 +738,61 @@ static const struct reg_addr idle_addrs[IDLE_REGS_COUNT] = { | |||
627 | { 0x338040, 1, RI_ALL_ONLINE }, { 0x338080, 1, RI_ALL_ONLINE }, | 738 | { 0x338040, 1, RI_ALL_ONLINE }, { 0x338080, 1, RI_ALL_ONLINE }, |
628 | { 0x3380c0, 1, RI_ALL_ONLINE } | 739 | { 0x3380c0, 1, RI_ALL_ONLINE } |
629 | }; | 740 | }; |
741 | #define IDLE_REGS_COUNT ARRAY_SIZE(idle_addrs) | ||
630 | 742 | ||
631 | #define WREGS_COUNT_E1 1 | ||
632 | static const u32 read_reg_e1_0[] = { 0x1b1000 }; | 743 | static const u32 read_reg_e1_0[] = { 0x1b1000 }; |
744 | #define WREGS_COUNT_E1 ARRAY_SIZE(read_reg_e1_0) | ||
633 | 745 | ||
634 | static const struct wreg_addr wreg_addrs_e1[WREGS_COUNT_E1] = { | 746 | static const struct wreg_addr wreg_addrs_e1[WREGS_COUNT_E1] = { |
635 | { 0x1b0c00, 192, 1, read_reg_e1_0, RI_E1_OFFLINE } | 747 | { 0x1b0c00, 192, 1, read_reg_e1_0, RI_E1_OFFLINE } |
636 | }; | 748 | }; |
637 | 749 | ||
638 | #define WREGS_COUNT_E1H 1 | ||
639 | static const u32 read_reg_e1h_0[] = { 0x1b1040, 0x1b1000 }; | 750 | static const u32 read_reg_e1h_0[] = { 0x1b1040, 0x1b1000 }; |
751 | #define WREGS_COUNT_E1H ARRAY_SIZE(read_reg_e1h_0) | ||
640 | 752 | ||
641 | static const struct wreg_addr wreg_addrs_e1h[WREGS_COUNT_E1H] = { | 753 | static const struct wreg_addr wreg_addrs_e1h[WREGS_COUNT_E1H] = { |
642 | { 0x1b0c00, 256, 2, read_reg_e1h_0, RI_E1H_OFFLINE } | 754 | { 0x1b0c00, 256, 2, read_reg_e1h_0, RI_E1H_OFFLINE } |
643 | }; | 755 | }; |
644 | 756 | ||
645 | #define WREGS_COUNT_E2 1 | ||
646 | static const u32 read_reg_e2_0[] = { 0x1b1040, 0x1b1000 }; | 757 | static const u32 read_reg_e2_0[] = { 0x1b1040, 0x1b1000 }; |
758 | #define WREGS_COUNT_E2 ARRAY_SIZE(read_reg_e2_0) | ||
647 | 759 | ||
648 | static const struct wreg_addr wreg_addrs_e2[WREGS_COUNT_E2] = { | 760 | static const struct wreg_addr wreg_addrs_e2[WREGS_COUNT_E2] = { |
649 | { 0x1b0c00, 128, 2, read_reg_e2_0, RI_E2_OFFLINE } | 761 | { 0x1b0c00, 128, 2, read_reg_e2_0, RI_E2_OFFLINE } |
650 | }; | 762 | }; |
651 | 763 | ||
652 | static const struct dump_sign dump_sign_all = { 0x4d18b0a4, 0x60010, 0x3a }; | 764 | static const u32 read_reg_e3_0[] = { 0x1b1040, 0x1b1000 }; |
765 | #define WREGS_COUNT_E3 ARRAY_SIZE(read_reg_e3_0) | ||
766 | |||
767 | static const struct wreg_addr wreg_addrs_e3[WREGS_COUNT_E3] = { | ||
768 | { 0x1b0c00, 128, 2, read_reg_e3_0, RI_E3_OFFLINE } }; | ||
769 | |||
770 | static const struct dump_sign dump_sign_all = { 0x4dbe9fca, 0x60011, 0x3a }; | ||
653 | 771 | ||
654 | #define TIMER_REGS_COUNT_E1 2 | 772 | static const u32 timer_status_regs_e1[] = { 0x164014, 0x164018 }; |
773 | #define TIMER_REGS_COUNT_E1 ARRAY_SIZE(timer_status_regs_e1) | ||
655 | 774 | ||
656 | static const u32 timer_status_regs_e1[TIMER_REGS_COUNT_E1] = { | ||
657 | 0x164014, 0x164018 }; | ||
658 | static const u32 timer_scan_regs_e1[TIMER_REGS_COUNT_E1] = { | 775 | static const u32 timer_scan_regs_e1[TIMER_REGS_COUNT_E1] = { |
659 | 0x1640d0, 0x1640d4 }; | 776 | 0x1640d0, 0x1640d4 }; |
660 | 777 | ||
661 | #define TIMER_REGS_COUNT_E1H 2 | 778 | static const u32 timer_status_regs_e1h[] = { 0x164014, 0x164018 }; |
779 | #define TIMER_REGS_COUNT_E1H ARRAY_SIZE(timer_status_regs_e1h) | ||
662 | 780 | ||
663 | static const u32 timer_status_regs_e1h[TIMER_REGS_COUNT_E1H] = { | ||
664 | 0x164014, 0x164018 }; | ||
665 | static const u32 timer_scan_regs_e1h[TIMER_REGS_COUNT_E1H] = { | 781 | static const u32 timer_scan_regs_e1h[TIMER_REGS_COUNT_E1H] = { |
666 | 0x1640d0, 0x1640d4 }; | 782 | 0x1640d0, 0x1640d4 }; |
667 | 783 | ||
668 | #define TIMER_REGS_COUNT_E2 2 | 784 | static const u32 timer_status_regs_e2[] = { 0x164014, 0x164018 }; |
785 | #define TIMER_REGS_COUNT_E2 ARRAY_SIZE(timer_status_regs_e2) | ||
669 | 786 | ||
670 | static const u32 timer_status_regs_e2[TIMER_REGS_COUNT_E2] = { | ||
671 | 0x164014, 0x164018 }; | ||
672 | static const u32 timer_scan_regs_e2[TIMER_REGS_COUNT_E2] = { | 787 | static const u32 timer_scan_regs_e2[TIMER_REGS_COUNT_E2] = { |
673 | 0x1640d0, 0x1640d4 }; | 788 | 0x1640d0, 0x1640d4 }; |
674 | 789 | ||
790 | static const u32 timer_status_regs_e3[] = { 0x164014, 0x164018 }; | ||
791 | #define TIMER_REGS_COUNT_E3 ARRAY_SIZE(timer_status_regs_e3) | ||
792 | |||
793 | static const u32 timer_scan_regs_e3[TIMER_REGS_COUNT_E3] = { | ||
794 | 0x1640d0, 0x1640d4 }; | ||
795 | |||
675 | #define PAGE_MODE_VALUES_E1 0 | 796 | #define PAGE_MODE_VALUES_E1 0 |
676 | 797 | ||
677 | #define PAGE_READ_REGS_E1 0 | 798 | #define PAGE_READ_REGS_E1 0 |
@@ -682,7 +803,8 @@ static const u32 page_vals_e1[] = { 0 }; | |||
682 | 803 | ||
683 | static const u32 page_write_regs_e1[] = { 0 }; | 804 | static const u32 page_write_regs_e1[] = { 0 }; |
684 | 805 | ||
685 | static const struct reg_addr page_read_regs_e1[] = { { 0x0, 0, RI_E1_ONLINE } }; | 806 | static const struct reg_addr page_read_regs_e1[] = { |
807 | { 0x0, 0, RI_E1_ONLINE } }; | ||
686 | 808 | ||
687 | #define PAGE_MODE_VALUES_E1H 0 | 809 | #define PAGE_MODE_VALUES_E1H 0 |
688 | 810 | ||
@@ -697,17 +819,24 @@ static const u32 page_write_regs_e1h[] = { 0 }; | |||
697 | static const struct reg_addr page_read_regs_e1h[] = { | 819 | static const struct reg_addr page_read_regs_e1h[] = { |
698 | { 0x0, 0, RI_E1H_ONLINE } }; | 820 | { 0x0, 0, RI_E1H_ONLINE } }; |
699 | 821 | ||
700 | #define PAGE_MODE_VALUES_E2 2 | 822 | static const u32 page_vals_e2[] = { 0, 128 }; |
823 | #define PAGE_MODE_VALUES_E2 ARRAY_SIZE(page_vals_e2) | ||
701 | 824 | ||
702 | #define PAGE_READ_REGS_E2 1 | 825 | static const u32 page_write_regs_e2[] = { 328476 }; |
826 | #define PAGE_WRITE_REGS_E2 ARRAY_SIZE(page_write_regs_e2) | ||
703 | 827 | ||
704 | #define PAGE_WRITE_REGS_E2 1 | 828 | static const struct reg_addr page_read_regs_e2[] = { |
829 | { 0x58000, 4608, RI_E2_ONLINE } }; | ||
830 | #define PAGE_READ_REGS_E2 ARRAY_SIZE(page_read_regs_e2) | ||
705 | 831 | ||
706 | static const u32 page_vals_e2[PAGE_MODE_VALUES_E2] = { 0, 128 }; | 832 | static const u32 page_vals_e3[] = { 0, 128 }; |
833 | #define PAGE_MODE_VALUES_E3 ARRAY_SIZE(page_vals_e3) | ||
707 | 834 | ||
708 | static const u32 page_write_regs_e2[PAGE_WRITE_REGS_E2] = { 328476 }; | 835 | static const u32 page_write_regs_e3[] = { 328476 }; |
836 | #define PAGE_WRITE_REGS_E3 ARRAY_SIZE(page_write_regs_e3) | ||
709 | 837 | ||
710 | static const struct reg_addr page_read_regs_e2[PAGE_READ_REGS_E2] = { | 838 | static const struct reg_addr page_read_regs_e3[] = { |
711 | { 0x58000, 4608, RI_E2_ONLINE } }; | 839 | { 0x58000, 4608, RI_E3_ONLINE } }; |
840 | #define PAGE_READ_REGS_E3 ARRAY_SIZE(page_read_regs_e3) | ||
712 | 841 | ||
713 | #endif /* BNX2X_DUMP_H */ | 842 | #endif /* BNX2X_DUMP_H */ |
diff --git a/drivers/net/bnx2x/bnx2x_ethtool.c b/drivers/net/bnx2x/bnx2x_ethtool.c index 727fe89ff37f..1a3ed418946d 100644 --- a/drivers/net/bnx2x/bnx2x_ethtool.c +++ b/drivers/net/bnx2x/bnx2x_ethtool.c | |||
@@ -25,6 +25,7 @@ | |||
25 | #include "bnx2x_cmn.h" | 25 | #include "bnx2x_cmn.h" |
26 | #include "bnx2x_dump.h" | 26 | #include "bnx2x_dump.h" |
27 | #include "bnx2x_init.h" | 27 | #include "bnx2x_init.h" |
28 | #include "bnx2x_sp.h" | ||
28 | 29 | ||
29 | /* Note: in the format strings below %s is replaced by the queue-name which is | 30 | /* Note: in the format strings below %s is replaced by the queue-name which is |
30 | * either its index or 'fcoe' for the fcoe queue. Make sure the format string | 31 | * either its index or 'fcoe' for the fcoe queue. Make sure the format string |
@@ -37,8 +38,6 @@ static const struct { | |||
37 | char string[ETH_GSTRING_LEN]; | 38 | char string[ETH_GSTRING_LEN]; |
38 | } bnx2x_q_stats_arr[] = { | 39 | } bnx2x_q_stats_arr[] = { |
39 | /* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%s]: rx_bytes" }, | 40 | /* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%s]: rx_bytes" }, |
40 | { Q_STATS_OFFSET32(error_bytes_received_hi), | ||
41 | 8, "[%s]: rx_error_bytes" }, | ||
42 | { Q_STATS_OFFSET32(total_unicast_packets_received_hi), | 41 | { Q_STATS_OFFSET32(total_unicast_packets_received_hi), |
43 | 8, "[%s]: rx_ucast_packets" }, | 42 | 8, "[%s]: rx_ucast_packets" }, |
44 | { Q_STATS_OFFSET32(total_multicast_packets_received_hi), | 43 | { Q_STATS_OFFSET32(total_multicast_packets_received_hi), |
@@ -52,13 +51,18 @@ static const struct { | |||
52 | 4, "[%s]: rx_skb_alloc_discard" }, | 51 | 4, "[%s]: rx_skb_alloc_discard" }, |
53 | { Q_STATS_OFFSET32(hw_csum_err), 4, "[%s]: rx_csum_offload_errors" }, | 52 | { Q_STATS_OFFSET32(hw_csum_err), 4, "[%s]: rx_csum_offload_errors" }, |
54 | 53 | ||
55 | /* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%s]: tx_bytes" }, | 54 | { Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%s]: tx_bytes" }, |
56 | { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi), | 55 | /* 10 */{ Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi), |
57 | 8, "[%s]: tx_ucast_packets" }, | 56 | 8, "[%s]: tx_ucast_packets" }, |
58 | { Q_STATS_OFFSET32(total_multicast_packets_transmitted_hi), | 57 | { Q_STATS_OFFSET32(total_multicast_packets_transmitted_hi), |
59 | 8, "[%s]: tx_mcast_packets" }, | 58 | 8, "[%s]: tx_mcast_packets" }, |
60 | { Q_STATS_OFFSET32(total_broadcast_packets_transmitted_hi), | 59 | { Q_STATS_OFFSET32(total_broadcast_packets_transmitted_hi), |
61 | 8, "[%s]: tx_bcast_packets" } | 60 | 8, "[%s]: tx_bcast_packets" }, |
61 | { Q_STATS_OFFSET32(total_tpa_aggregations_hi), | ||
62 | 8, "[%s]: tpa_aggregations" }, | ||
63 | { Q_STATS_OFFSET32(total_tpa_aggregated_frames_hi), | ||
64 | 8, "[%s]: tpa_aggregated_frames"}, | ||
65 | { Q_STATS_OFFSET32(total_tpa_bytes_hi), 8, "[%s]: tpa_bytes"} | ||
62 | }; | 66 | }; |
63 | 67 | ||
64 | #define BNX2X_NUM_Q_STATS ARRAY_SIZE(bnx2x_q_stats_arr) | 68 | #define BNX2X_NUM_Q_STATS ARRAY_SIZE(bnx2x_q_stats_arr) |
@@ -98,8 +102,8 @@ static const struct { | |||
98 | 8, STATS_FLAGS_BOTH, "rx_discards" }, | 102 | 8, STATS_FLAGS_BOTH, "rx_discards" }, |
99 | { STATS_OFFSET32(mac_filter_discard), | 103 | { STATS_OFFSET32(mac_filter_discard), |
100 | 4, STATS_FLAGS_PORT, "rx_filtered_packets" }, | 104 | 4, STATS_FLAGS_PORT, "rx_filtered_packets" }, |
101 | { STATS_OFFSET32(xxoverflow_discard), | 105 | { STATS_OFFSET32(mf_tag_discard), |
102 | 4, STATS_FLAGS_PORT, "rx_fw_discards" }, | 106 | 4, STATS_FLAGS_PORT, "rx_mf_tag_discard" }, |
103 | { STATS_OFFSET32(brb_drop_hi), | 107 | { STATS_OFFSET32(brb_drop_hi), |
104 | 8, STATS_FLAGS_PORT, "rx_brb_discard" }, | 108 | 8, STATS_FLAGS_PORT, "rx_brb_discard" }, |
105 | { STATS_OFFSET32(brb_truncate_hi), | 109 | { STATS_OFFSET32(brb_truncate_hi), |
@@ -158,10 +162,43 @@ static const struct { | |||
158 | { STATS_OFFSET32(etherstatspktsover1522octets_hi), | 162 | { STATS_OFFSET32(etherstatspktsover1522octets_hi), |
159 | 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" }, | 163 | 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" }, |
160 | { STATS_OFFSET32(pause_frames_sent_hi), | 164 | { STATS_OFFSET32(pause_frames_sent_hi), |
161 | 8, STATS_FLAGS_PORT, "tx_pause_frames" } | 165 | 8, STATS_FLAGS_PORT, "tx_pause_frames" }, |
166 | { STATS_OFFSET32(total_tpa_aggregations_hi), | ||
167 | 8, STATS_FLAGS_FUNC, "tpa_aggregations" }, | ||
168 | { STATS_OFFSET32(total_tpa_aggregated_frames_hi), | ||
169 | 8, STATS_FLAGS_FUNC, "tpa_aggregated_frames"}, | ||
170 | { STATS_OFFSET32(total_tpa_bytes_hi), | ||
171 | 8, STATS_FLAGS_FUNC, "tpa_bytes"} | ||
162 | }; | 172 | }; |
163 | 173 | ||
164 | #define BNX2X_NUM_STATS ARRAY_SIZE(bnx2x_stats_arr) | 174 | #define BNX2X_NUM_STATS ARRAY_SIZE(bnx2x_stats_arr) |
175 | static int bnx2x_get_port_type(struct bnx2x *bp) | ||
176 | { | ||
177 | int port_type; | ||
178 | u32 phy_idx = bnx2x_get_cur_phy_idx(bp); | ||
179 | switch (bp->link_params.phy[phy_idx].media_type) { | ||
180 | case ETH_PHY_SFP_FIBER: | ||
181 | case ETH_PHY_XFP_FIBER: | ||
182 | case ETH_PHY_KR: | ||
183 | case ETH_PHY_CX4: | ||
184 | port_type = PORT_FIBRE; | ||
185 | break; | ||
186 | case ETH_PHY_DA_TWINAX: | ||
187 | port_type = PORT_DA; | ||
188 | break; | ||
189 | case ETH_PHY_BASE_T: | ||
190 | port_type = PORT_TP; | ||
191 | break; | ||
192 | case ETH_PHY_NOT_PRESENT: | ||
193 | port_type = PORT_NONE; | ||
194 | break; | ||
195 | case ETH_PHY_UNSPECIFIED: | ||
196 | default: | ||
197 | port_type = PORT_OTHER; | ||
198 | break; | ||
199 | } | ||
200 | return port_type; | ||
201 | } | ||
165 | 202 | ||
166 | static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) | 203 | static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) |
167 | { | 204 | { |
@@ -188,12 +225,7 @@ static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) | |||
188 | if (IS_MF(bp)) | 225 | if (IS_MF(bp)) |
189 | ethtool_cmd_speed_set(cmd, bnx2x_get_mf_speed(bp)); | 226 | ethtool_cmd_speed_set(cmd, bnx2x_get_mf_speed(bp)); |
190 | 227 | ||
191 | if (bp->port.supported[cfg_idx] & SUPPORTED_TP) | 228 | cmd->port = bnx2x_get_port_type(bp); |
192 | cmd->port = PORT_TP; | ||
193 | else if (bp->port.supported[cfg_idx] & SUPPORTED_FIBRE) | ||
194 | cmd->port = PORT_FIBRE; | ||
195 | else | ||
196 | BNX2X_ERR("XGXS PHY Failure detected\n"); | ||
197 | 229 | ||
198 | cmd->phy_address = bp->mdio.prtad; | 230 | cmd->phy_address = bp->mdio.prtad; |
199 | cmd->transceiver = XCVR_INTERNAL; | 231 | cmd->transceiver = XCVR_INTERNAL; |
@@ -494,7 +526,7 @@ static int bnx2x_get_regs_len(struct net_device *dev) | |||
494 | if (IS_E1H_ONLINE(wreg_addrs_e1h[i].info)) | 526 | if (IS_E1H_ONLINE(wreg_addrs_e1h[i].info)) |
495 | regdump_len += wreg_addrs_e1h[i].size * | 527 | regdump_len += wreg_addrs_e1h[i].size * |
496 | (1 + wreg_addrs_e1h[i].read_regs_count); | 528 | (1 + wreg_addrs_e1h[i].read_regs_count); |
497 | } else if (CHIP_IS_E2(bp)) { | 529 | } else if (!CHIP_IS_E1x(bp)) { |
498 | for (i = 0; i < REGS_COUNT; i++) | 530 | for (i = 0; i < REGS_COUNT; i++) |
499 | if (IS_E2_ONLINE(reg_addrs[i].info)) | 531 | if (IS_E2_ONLINE(reg_addrs[i].info)) |
500 | regdump_len += reg_addrs[i].size; | 532 | regdump_len += reg_addrs[i].size; |
@@ -566,7 +598,7 @@ static void bnx2x_get_regs(struct net_device *dev, | |||
566 | dump_hdr.info = RI_E1_ONLINE; | 598 | dump_hdr.info = RI_E1_ONLINE; |
567 | else if (CHIP_IS_E1H(bp)) | 599 | else if (CHIP_IS_E1H(bp)) |
568 | dump_hdr.info = RI_E1H_ONLINE; | 600 | dump_hdr.info = RI_E1H_ONLINE; |
569 | else if (CHIP_IS_E2(bp)) | 601 | else if (!CHIP_IS_E1x(bp)) |
570 | dump_hdr.info = RI_E2_ONLINE | | 602 | dump_hdr.info = RI_E2_ONLINE | |
571 | (BP_PATH(bp) ? RI_PATH1_DUMP : RI_PATH0_DUMP); | 603 | (BP_PATH(bp) ? RI_PATH1_DUMP : RI_PATH0_DUMP); |
572 | 604 | ||
@@ -587,23 +619,24 @@ static void bnx2x_get_regs(struct net_device *dev, | |||
587 | *p++ = REG_RD(bp, | 619 | *p++ = REG_RD(bp, |
588 | reg_addrs[i].addr + j*4); | 620 | reg_addrs[i].addr + j*4); |
589 | 621 | ||
590 | } else if (CHIP_IS_E2(bp)) { | 622 | } else if (!CHIP_IS_E1x(bp)) { |
591 | for (i = 0; i < REGS_COUNT; i++) | 623 | for (i = 0; i < REGS_COUNT; i++) |
592 | if (IS_E2_ONLINE(reg_addrs[i].info)) | 624 | if (IS_E2_ONLINE(reg_addrs[i].info)) |
593 | for (j = 0; j < reg_addrs[i].size; j++) | 625 | for (j = 0; j < reg_addrs[i].size; j++) |
594 | *p++ = REG_RD(bp, | 626 | *p++ = REG_RD(bp, |
595 | reg_addrs[i].addr + j*4); | 627 | reg_addrs[i].addr + j*4); |
596 | 628 | ||
597 | bnx2x_read_pages_regs_e2(bp, p); | 629 | if (CHIP_IS_E2(bp)) |
630 | bnx2x_read_pages_regs_e2(bp, p); | ||
631 | else | ||
632 | /* E3 paged registers read is unimplemented yet */ | ||
633 | WARN_ON(1); | ||
598 | } | 634 | } |
599 | /* Re-enable parity attentions */ | 635 | /* Re-enable parity attentions */ |
600 | bnx2x_clear_blocks_parity(bp); | 636 | bnx2x_clear_blocks_parity(bp); |
601 | if (CHIP_PARITY_ENABLED(bp)) | 637 | bnx2x_enable_blocks_parity(bp); |
602 | bnx2x_enable_blocks_parity(bp); | ||
603 | } | 638 | } |
604 | 639 | ||
605 | #define PHY_FW_VER_LEN 20 | ||
606 | |||
607 | static void bnx2x_get_drvinfo(struct net_device *dev, | 640 | static void bnx2x_get_drvinfo(struct net_device *dev, |
608 | struct ethtool_drvinfo *info) | 641 | struct ethtool_drvinfo *info) |
609 | { | 642 | { |
@@ -682,8 +715,12 @@ static void bnx2x_set_msglevel(struct net_device *dev, u32 level) | |||
682 | { | 715 | { |
683 | struct bnx2x *bp = netdev_priv(dev); | 716 | struct bnx2x *bp = netdev_priv(dev); |
684 | 717 | ||
685 | if (capable(CAP_NET_ADMIN)) | 718 | if (capable(CAP_NET_ADMIN)) { |
719 | /* dump MCP trace */ | ||
720 | if (level & BNX2X_MSG_MCP) | ||
721 | bnx2x_fw_dump_lvl(bp, KERN_INFO); | ||
686 | bp->msg_enable = level; | 722 | bp->msg_enable = level; |
723 | } | ||
687 | } | 724 | } |
688 | 725 | ||
689 | static int bnx2x_nway_reset(struct net_device *dev) | 726 | static int bnx2x_nway_reset(struct net_device *dev) |
@@ -725,7 +762,7 @@ static int bnx2x_acquire_nvram_lock(struct bnx2x *bp) | |||
725 | u32 val = 0; | 762 | u32 val = 0; |
726 | 763 | ||
727 | /* adjust timeout for emulation/FPGA */ | 764 | /* adjust timeout for emulation/FPGA */ |
728 | count = NVRAM_TIMEOUT_COUNT; | 765 | count = BNX2X_NVRAM_TIMEOUT_COUNT; |
729 | if (CHIP_REV_IS_SLOW(bp)) | 766 | if (CHIP_REV_IS_SLOW(bp)) |
730 | count *= 100; | 767 | count *= 100; |
731 | 768 | ||
@@ -756,7 +793,7 @@ static int bnx2x_release_nvram_lock(struct bnx2x *bp) | |||
756 | u32 val = 0; | 793 | u32 val = 0; |
757 | 794 | ||
758 | /* adjust timeout for emulation/FPGA */ | 795 | /* adjust timeout for emulation/FPGA */ |
759 | count = NVRAM_TIMEOUT_COUNT; | 796 | count = BNX2X_NVRAM_TIMEOUT_COUNT; |
760 | if (CHIP_REV_IS_SLOW(bp)) | 797 | if (CHIP_REV_IS_SLOW(bp)) |
761 | count *= 100; | 798 | count *= 100; |
762 | 799 | ||
@@ -824,7 +861,7 @@ static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val, | |||
824 | REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags); | 861 | REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags); |
825 | 862 | ||
826 | /* adjust timeout for emulation/FPGA */ | 863 | /* adjust timeout for emulation/FPGA */ |
827 | count = NVRAM_TIMEOUT_COUNT; | 864 | count = BNX2X_NVRAM_TIMEOUT_COUNT; |
828 | if (CHIP_REV_IS_SLOW(bp)) | 865 | if (CHIP_REV_IS_SLOW(bp)) |
829 | count *= 100; | 866 | count *= 100; |
830 | 867 | ||
@@ -947,7 +984,7 @@ static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val, | |||
947 | REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags); | 984 | REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags); |
948 | 985 | ||
949 | /* adjust timeout for emulation/FPGA */ | 986 | /* adjust timeout for emulation/FPGA */ |
950 | count = NVRAM_TIMEOUT_COUNT; | 987 | count = BNX2X_NVRAM_TIMEOUT_COUNT; |
951 | if (CHIP_REV_IS_SLOW(bp)) | 988 | if (CHIP_REV_IS_SLOW(bp)) |
952 | count *= 100; | 989 | count *= 100; |
953 | 990 | ||
@@ -1051,9 +1088,9 @@ static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf, | |||
1051 | while ((written_so_far < buf_size) && (rc == 0)) { | 1088 | while ((written_so_far < buf_size) && (rc == 0)) { |
1052 | if (written_so_far == (buf_size - sizeof(u32))) | 1089 | if (written_so_far == (buf_size - sizeof(u32))) |
1053 | cmd_flags |= MCPR_NVM_COMMAND_LAST; | 1090 | cmd_flags |= MCPR_NVM_COMMAND_LAST; |
1054 | else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0) | 1091 | else if (((offset + 4) % BNX2X_NVRAM_PAGE_SIZE) == 0) |
1055 | cmd_flags |= MCPR_NVM_COMMAND_LAST; | 1092 | cmd_flags |= MCPR_NVM_COMMAND_LAST; |
1056 | else if ((offset % NVRAM_PAGE_SIZE) == 0) | 1093 | else if ((offset % BNX2X_NVRAM_PAGE_SIZE) == 0) |
1057 | cmd_flags |= MCPR_NVM_COMMAND_FIRST; | 1094 | cmd_flags |= MCPR_NVM_COMMAND_FIRST; |
1058 | 1095 | ||
1059 | memcpy(&val, data_buf, 4); | 1096 | memcpy(&val, data_buf, 4); |
@@ -1212,7 +1249,6 @@ static int bnx2x_set_ringparam(struct net_device *dev, | |||
1212 | struct ethtool_ringparam *ering) | 1249 | struct ethtool_ringparam *ering) |
1213 | { | 1250 | { |
1214 | struct bnx2x *bp = netdev_priv(dev); | 1251 | struct bnx2x *bp = netdev_priv(dev); |
1215 | int rc = 0; | ||
1216 | 1252 | ||
1217 | if (bp->recovery_state != BNX2X_RECOVERY_DONE) { | 1253 | if (bp->recovery_state != BNX2X_RECOVERY_DONE) { |
1218 | printk(KERN_ERR "Handling parity error recovery. Try again later\n"); | 1254 | printk(KERN_ERR "Handling parity error recovery. Try again later\n"); |
@@ -1229,12 +1265,7 @@ static int bnx2x_set_ringparam(struct net_device *dev, | |||
1229 | bp->rx_ring_size = ering->rx_pending; | 1265 | bp->rx_ring_size = ering->rx_pending; |
1230 | bp->tx_ring_size = ering->tx_pending; | 1266 | bp->tx_ring_size = ering->tx_pending; |
1231 | 1267 | ||
1232 | if (netif_running(dev)) { | 1268 | return bnx2x_reload_if_running(dev); |
1233 | bnx2x_nic_unload(bp, UNLOAD_NORMAL); | ||
1234 | rc = bnx2x_nic_load(bp, LOAD_NORMAL); | ||
1235 | } | ||
1236 | |||
1237 | return rc; | ||
1238 | } | 1269 | } |
1239 | 1270 | ||
1240 | static void bnx2x_get_pauseparam(struct net_device *dev, | 1271 | static void bnx2x_get_pauseparam(struct net_device *dev, |
@@ -1313,60 +1344,129 @@ static const struct { | |||
1313 | { "idle check (online)" } | 1344 | { "idle check (online)" } |
1314 | }; | 1345 | }; |
1315 | 1346 | ||
1347 | enum { | ||
1348 | BNX2X_CHIP_E1_OFST = 0, | ||
1349 | BNX2X_CHIP_E1H_OFST, | ||
1350 | BNX2X_CHIP_E2_OFST, | ||
1351 | BNX2X_CHIP_E3_OFST, | ||
1352 | BNX2X_CHIP_E3B0_OFST, | ||
1353 | BNX2X_CHIP_MAX_OFST | ||
1354 | }; | ||
1355 | |||
1356 | #define BNX2X_CHIP_MASK_E1 (1 << BNX2X_CHIP_E1_OFST) | ||
1357 | #define BNX2X_CHIP_MASK_E1H (1 << BNX2X_CHIP_E1H_OFST) | ||
1358 | #define BNX2X_CHIP_MASK_E2 (1 << BNX2X_CHIP_E2_OFST) | ||
1359 | #define BNX2X_CHIP_MASK_E3 (1 << BNX2X_CHIP_E3_OFST) | ||
1360 | #define BNX2X_CHIP_MASK_E3B0 (1 << BNX2X_CHIP_E3B0_OFST) | ||
1361 | |||
1362 | #define BNX2X_CHIP_MASK_ALL ((1 << BNX2X_CHIP_MAX_OFST) - 1) | ||
1363 | #define BNX2X_CHIP_MASK_E1X (BNX2X_CHIP_MASK_E1 | BNX2X_CHIP_MASK_E1H) | ||
1364 | |||
1316 | static int bnx2x_test_registers(struct bnx2x *bp) | 1365 | static int bnx2x_test_registers(struct bnx2x *bp) |
1317 | { | 1366 | { |
1318 | int idx, i, rc = -ENODEV; | 1367 | int idx, i, rc = -ENODEV; |
1319 | u32 wr_val = 0; | 1368 | u32 wr_val = 0, hw; |
1320 | int port = BP_PORT(bp); | 1369 | int port = BP_PORT(bp); |
1321 | static const struct { | 1370 | static const struct { |
1371 | u32 hw; | ||
1322 | u32 offset0; | 1372 | u32 offset0; |
1323 | u32 offset1; | 1373 | u32 offset1; |
1324 | u32 mask; | 1374 | u32 mask; |
1325 | } reg_tbl[] = { | 1375 | } reg_tbl[] = { |
1326 | /* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff }, | 1376 | /* 0 */ { BNX2X_CHIP_MASK_ALL, |
1327 | { DORQ_REG_DB_ADDR0, 4, 0xffffffff }, | 1377 | BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff }, |
1328 | { HC_REG_AGG_INT_0, 4, 0x000003ff }, | 1378 | { BNX2X_CHIP_MASK_ALL, |
1329 | { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 }, | 1379 | DORQ_REG_DB_ADDR0, 4, 0xffffffff }, |
1330 | { PBF_REG_P0_INIT_CRD, 4, 0x000007ff }, | 1380 | { BNX2X_CHIP_MASK_E1X, |
1331 | { PRS_REG_CID_PORT_0, 4, 0x00ffffff }, | 1381 | HC_REG_AGG_INT_0, 4, 0x000003ff }, |
1332 | { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff }, | 1382 | { BNX2X_CHIP_MASK_ALL, |
1333 | { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff }, | 1383 | PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 }, |
1334 | { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff }, | 1384 | { BNX2X_CHIP_MASK_E1X | BNX2X_CHIP_MASK_E2 | BNX2X_CHIP_MASK_E3, |
1335 | { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff }, | 1385 | PBF_REG_P0_INIT_CRD, 4, 0x000007ff }, |
1336 | /* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff }, | 1386 | { BNX2X_CHIP_MASK_E3B0, |
1337 | { QM_REG_CONNNUM_0, 4, 0x000fffff }, | 1387 | PBF_REG_INIT_CRD_Q0, 4, 0x000007ff }, |
1338 | { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff }, | 1388 | { BNX2X_CHIP_MASK_ALL, |
1339 | { SRC_REG_KEYRSS0_0, 40, 0xffffffff }, | 1389 | PRS_REG_CID_PORT_0, 4, 0x00ffffff }, |
1340 | { SRC_REG_KEYRSS0_7, 40, 0xffffffff }, | 1390 | { BNX2X_CHIP_MASK_ALL, |
1341 | { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 }, | 1391 | PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff }, |
1342 | { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 }, | 1392 | { BNX2X_CHIP_MASK_ALL, |
1343 | { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff }, | 1393 | PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff }, |
1344 | { NIG_REG_LLH0_T_BIT, 4, 0x00000001 }, | 1394 | { BNX2X_CHIP_MASK_ALL, |
1345 | { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 }, | 1395 | PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff }, |
1346 | /* 20 */ { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 }, | 1396 | /* 10 */ { BNX2X_CHIP_MASK_ALL, |
1347 | { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 }, | 1397 | PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff }, |
1348 | { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 }, | 1398 | { BNX2X_CHIP_MASK_ALL, |
1349 | { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 }, | 1399 | PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff }, |
1350 | { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff }, | 1400 | { BNX2X_CHIP_MASK_ALL, |
1351 | { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff }, | 1401 | QM_REG_CONNNUM_0, 4, 0x000fffff }, |
1352 | { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff }, | 1402 | { BNX2X_CHIP_MASK_ALL, |
1353 | { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff }, | 1403 | TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff }, |
1354 | { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 }, | 1404 | { BNX2X_CHIP_MASK_ALL, |
1355 | { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff }, | 1405 | SRC_REG_KEYRSS0_0, 40, 0xffffffff }, |
1356 | /* 30 */ { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff }, | 1406 | { BNX2X_CHIP_MASK_ALL, |
1357 | { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff }, | 1407 | SRC_REG_KEYRSS0_7, 40, 0xffffffff }, |
1358 | { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 }, | 1408 | { BNX2X_CHIP_MASK_ALL, |
1359 | { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 }, | 1409 | XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 }, |
1360 | { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff }, | 1410 | { BNX2X_CHIP_MASK_ALL, |
1361 | { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 }, | 1411 | XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 }, |
1362 | { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f }, | 1412 | { BNX2X_CHIP_MASK_ALL, |
1363 | 1413 | XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff }, | |
1364 | { 0xffffffff, 0, 0x00000000 } | 1414 | { BNX2X_CHIP_MASK_ALL, |
1415 | NIG_REG_LLH0_T_BIT, 4, 0x00000001 }, | ||
1416 | /* 20 */ { BNX2X_CHIP_MASK_E1X | BNX2X_CHIP_MASK_E2, | ||
1417 | NIG_REG_EMAC0_IN_EN, 4, 0x00000001 }, | ||
1418 | { BNX2X_CHIP_MASK_E1X | BNX2X_CHIP_MASK_E2, | ||
1419 | NIG_REG_BMAC0_IN_EN, 4, 0x00000001 }, | ||
1420 | { BNX2X_CHIP_MASK_ALL, | ||
1421 | NIG_REG_XCM0_OUT_EN, 4, 0x00000001 }, | ||
1422 | { BNX2X_CHIP_MASK_ALL, | ||
1423 | NIG_REG_BRB0_OUT_EN, 4, 0x00000001 }, | ||
1424 | { BNX2X_CHIP_MASK_ALL, | ||
1425 | NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 }, | ||
1426 | { BNX2X_CHIP_MASK_ALL, | ||
1427 | NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff }, | ||
1428 | { BNX2X_CHIP_MASK_ALL, | ||
1429 | NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff }, | ||
1430 | { BNX2X_CHIP_MASK_ALL, | ||
1431 | NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff }, | ||
1432 | { BNX2X_CHIP_MASK_ALL, | ||
1433 | NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff }, | ||
1434 | { BNX2X_CHIP_MASK_ALL, | ||
1435 | NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 }, | ||
1436 | /* 30 */ { BNX2X_CHIP_MASK_ALL, | ||
1437 | NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff }, | ||
1438 | { BNX2X_CHIP_MASK_ALL, | ||
1439 | NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff }, | ||
1440 | { BNX2X_CHIP_MASK_ALL, | ||
1441 | NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff }, | ||
1442 | { BNX2X_CHIP_MASK_E1X | BNX2X_CHIP_MASK_E2, | ||
1443 | NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 }, | ||
1444 | { BNX2X_CHIP_MASK_ALL, | ||
1445 | NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001}, | ||
1446 | { BNX2X_CHIP_MASK_ALL, | ||
1447 | NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff }, | ||
1448 | { BNX2X_CHIP_MASK_E1X | BNX2X_CHIP_MASK_E2, | ||
1449 | NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 }, | ||
1450 | { BNX2X_CHIP_MASK_E1X | BNX2X_CHIP_MASK_E2, | ||
1451 | NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f }, | ||
1452 | |||
1453 | { BNX2X_CHIP_MASK_ALL, 0xffffffff, 0, 0x00000000 } | ||
1365 | }; | 1454 | }; |
1366 | 1455 | ||
1367 | if (!netif_running(bp->dev)) | 1456 | if (!netif_running(bp->dev)) |
1368 | return rc; | 1457 | return rc; |
1369 | 1458 | ||
1459 | if (CHIP_IS_E1(bp)) | ||
1460 | hw = BNX2X_CHIP_MASK_E1; | ||
1461 | else if (CHIP_IS_E1H(bp)) | ||
1462 | hw = BNX2X_CHIP_MASK_E1H; | ||
1463 | else if (CHIP_IS_E2(bp)) | ||
1464 | hw = BNX2X_CHIP_MASK_E2; | ||
1465 | else if (CHIP_IS_E3B0(bp)) | ||
1466 | hw = BNX2X_CHIP_MASK_E3B0; | ||
1467 | else /* e3 A0 */ | ||
1468 | hw = BNX2X_CHIP_MASK_E3; | ||
1469 | |||
1370 | /* Repeat the test twice: | 1470 | /* Repeat the test twice: |
1371 | First by writing 0x00000000, second by writing 0xffffffff */ | 1471 | First by writing 0x00000000, second by writing 0xffffffff */ |
1372 | for (idx = 0; idx < 2; idx++) { | 1472 | for (idx = 0; idx < 2; idx++) { |
@@ -1382,8 +1482,7 @@ static int bnx2x_test_registers(struct bnx2x *bp) | |||
1382 | 1482 | ||
1383 | for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) { | 1483 | for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) { |
1384 | u32 offset, mask, save_val, val; | 1484 | u32 offset, mask, save_val, val; |
1385 | if (CHIP_IS_E2(bp) && | 1485 | if (!(hw & reg_tbl[i].hw)) |
1386 | reg_tbl[i].offset0 == HC_REG_AGG_INT_0) | ||
1387 | continue; | 1486 | continue; |
1388 | 1487 | ||
1389 | offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1; | 1488 | offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1; |
@@ -1400,7 +1499,7 @@ static int bnx2x_test_registers(struct bnx2x *bp) | |||
1400 | 1499 | ||
1401 | /* verify value is as expected */ | 1500 | /* verify value is as expected */ |
1402 | if ((val & mask) != (wr_val & mask)) { | 1501 | if ((val & mask) != (wr_val & mask)) { |
1403 | DP(NETIF_MSG_PROBE, | 1502 | DP(NETIF_MSG_HW, |
1404 | "offset 0x%x: val 0x%x != 0x%x mask 0x%x\n", | 1503 | "offset 0x%x: val 0x%x != 0x%x mask 0x%x\n", |
1405 | offset, val, wr_val, mask); | 1504 | offset, val, wr_val, mask); |
1406 | goto test_reg_exit; | 1505 | goto test_reg_exit; |
@@ -1417,7 +1516,7 @@ test_reg_exit: | |||
1417 | static int bnx2x_test_memory(struct bnx2x *bp) | 1516 | static int bnx2x_test_memory(struct bnx2x *bp) |
1418 | { | 1517 | { |
1419 | int i, j, rc = -ENODEV; | 1518 | int i, j, rc = -ENODEV; |
1420 | u32 val; | 1519 | u32 val, index; |
1421 | static const struct { | 1520 | static const struct { |
1422 | u32 offset; | 1521 | u32 offset; |
1423 | int size; | 1522 | int size; |
@@ -1432,32 +1531,44 @@ static int bnx2x_test_memory(struct bnx2x *bp) | |||
1432 | 1531 | ||
1433 | { 0xffffffff, 0 } | 1532 | { 0xffffffff, 0 } |
1434 | }; | 1533 | }; |
1534 | |||
1435 | static const struct { | 1535 | static const struct { |
1436 | char *name; | 1536 | char *name; |
1437 | u32 offset; | 1537 | u32 offset; |
1438 | u32 e1_mask; | 1538 | u32 hw_mask[BNX2X_CHIP_MAX_OFST]; |
1439 | u32 e1h_mask; | ||
1440 | u32 e2_mask; | ||
1441 | } prty_tbl[] = { | 1539 | } prty_tbl[] = { |
1442 | { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0, 0 }, | 1540 | { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, |
1443 | { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2, 0 }, | 1541 | {0x3ffc0, 0, 0, 0} }, |
1444 | { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0, 0 }, | 1542 | { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, |
1445 | { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0, 0 }, | 1543 | {0x2, 0x2, 0, 0} }, |
1446 | { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0, 0 }, | 1544 | { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, |
1447 | { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0, 0 }, | 1545 | {0, 0, 0, 0} }, |
1448 | 1546 | { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, | |
1449 | { NULL, 0xffffffff, 0, 0, 0 } | 1547 | {0x3ffc0, 0, 0, 0} }, |
1548 | { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, | ||
1549 | {0x3ffc0, 0, 0, 0} }, | ||
1550 | { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, | ||
1551 | {0x3ffc1, 0, 0, 0} }, | ||
1552 | |||
1553 | { NULL, 0xffffffff, {0, 0, 0, 0} } | ||
1450 | }; | 1554 | }; |
1451 | 1555 | ||
1452 | if (!netif_running(bp->dev)) | 1556 | if (!netif_running(bp->dev)) |
1453 | return rc; | 1557 | return rc; |
1454 | 1558 | ||
1559 | if (CHIP_IS_E1(bp)) | ||
1560 | index = BNX2X_CHIP_E1_OFST; | ||
1561 | else if (CHIP_IS_E1H(bp)) | ||
1562 | index = BNX2X_CHIP_E1H_OFST; | ||
1563 | else if (CHIP_IS_E2(bp)) | ||
1564 | index = BNX2X_CHIP_E2_OFST; | ||
1565 | else /* e3 */ | ||
1566 | index = BNX2X_CHIP_E3_OFST; | ||
1567 | |||
1455 | /* pre-Check the parity status */ | 1568 | /* pre-Check the parity status */ |
1456 | for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) { | 1569 | for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) { |
1457 | val = REG_RD(bp, prty_tbl[i].offset); | 1570 | val = REG_RD(bp, prty_tbl[i].offset); |
1458 | if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) || | 1571 | if (val & ~(prty_tbl[i].hw_mask[index])) { |
1459 | (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask))) || | ||
1460 | (CHIP_IS_E2(bp) && (val & ~(prty_tbl[i].e2_mask)))) { | ||
1461 | DP(NETIF_MSG_HW, | 1572 | DP(NETIF_MSG_HW, |
1462 | "%s is 0x%x\n", prty_tbl[i].name, val); | 1573 | "%s is 0x%x\n", prty_tbl[i].name, val); |
1463 | goto test_mem_exit; | 1574 | goto test_mem_exit; |
@@ -1472,9 +1583,7 @@ static int bnx2x_test_memory(struct bnx2x *bp) | |||
1472 | /* Check the parity status */ | 1583 | /* Check the parity status */ |
1473 | for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) { | 1584 | for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) { |
1474 | val = REG_RD(bp, prty_tbl[i].offset); | 1585 | val = REG_RD(bp, prty_tbl[i].offset); |
1475 | if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) || | 1586 | if (val & ~(prty_tbl[i].hw_mask[index])) { |
1476 | (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask))) || | ||
1477 | (CHIP_IS_E2(bp) && (val & ~(prty_tbl[i].e2_mask)))) { | ||
1478 | DP(NETIF_MSG_HW, | 1587 | DP(NETIF_MSG_HW, |
1479 | "%s is 0x%x\n", prty_tbl[i].name, val); | 1588 | "%s is 0x%x\n", prty_tbl[i].name, val); |
1480 | goto test_mem_exit; | 1589 | goto test_mem_exit; |
@@ -1491,12 +1600,16 @@ static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up, u8 is_serdes) | |||
1491 | { | 1600 | { |
1492 | int cnt = 1400; | 1601 | int cnt = 1400; |
1493 | 1602 | ||
1494 | if (link_up) | 1603 | if (link_up) { |
1495 | while (bnx2x_link_test(bp, is_serdes) && cnt--) | 1604 | while (bnx2x_link_test(bp, is_serdes) && cnt--) |
1496 | msleep(10); | 1605 | msleep(20); |
1606 | |||
1607 | if (cnt <= 0 && bnx2x_link_test(bp, is_serdes)) | ||
1608 | DP(NETIF_MSG_LINK, "Timeout waiting for link up\n"); | ||
1609 | } | ||
1497 | } | 1610 | } |
1498 | 1611 | ||
1499 | static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up) | 1612 | static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode) |
1500 | { | 1613 | { |
1501 | unsigned int pkt_size, num_pkts, i; | 1614 | unsigned int pkt_size, num_pkts, i; |
1502 | struct sk_buff *skb; | 1615 | struct sk_buff *skb; |
@@ -1505,14 +1618,14 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up) | |||
1505 | struct bnx2x_fastpath *fp_tx = &bp->fp[0]; | 1618 | struct bnx2x_fastpath *fp_tx = &bp->fp[0]; |
1506 | u16 tx_start_idx, tx_idx; | 1619 | u16 tx_start_idx, tx_idx; |
1507 | u16 rx_start_idx, rx_idx; | 1620 | u16 rx_start_idx, rx_idx; |
1508 | u16 pkt_prod, bd_prod; | 1621 | u16 pkt_prod, bd_prod, rx_comp_cons; |
1509 | struct sw_tx_bd *tx_buf; | 1622 | struct sw_tx_bd *tx_buf; |
1510 | struct eth_tx_start_bd *tx_start_bd; | 1623 | struct eth_tx_start_bd *tx_start_bd; |
1511 | struct eth_tx_parse_bd_e1x *pbd_e1x = NULL; | 1624 | struct eth_tx_parse_bd_e1x *pbd_e1x = NULL; |
1512 | struct eth_tx_parse_bd_e2 *pbd_e2 = NULL; | 1625 | struct eth_tx_parse_bd_e2 *pbd_e2 = NULL; |
1513 | dma_addr_t mapping; | 1626 | dma_addr_t mapping; |
1514 | union eth_rx_cqe *cqe; | 1627 | union eth_rx_cqe *cqe; |
1515 | u8 cqe_fp_flags; | 1628 | u8 cqe_fp_flags, cqe_fp_type; |
1516 | struct sw_rx_bd *rx_buf; | 1629 | struct sw_rx_bd *rx_buf; |
1517 | u16 len; | 1630 | u16 len; |
1518 | int rc = -ENODEV; | 1631 | int rc = -ENODEV; |
@@ -1524,7 +1637,8 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up) | |||
1524 | return -EINVAL; | 1637 | return -EINVAL; |
1525 | break; | 1638 | break; |
1526 | case BNX2X_MAC_LOOPBACK: | 1639 | case BNX2X_MAC_LOOPBACK: |
1527 | bp->link_params.loopback_mode = LOOPBACK_BMAC; | 1640 | bp->link_params.loopback_mode = CHIP_IS_E3(bp) ? |
1641 | LOOPBACK_XMAC : LOOPBACK_BMAC; | ||
1528 | bnx2x_phy_init(&bp->link_params, &bp->link_vars); | 1642 | bnx2x_phy_init(&bp->link_params, &bp->link_vars); |
1529 | break; | 1643 | break; |
1530 | default: | 1644 | default: |
@@ -1545,6 +1659,14 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up) | |||
1545 | memset(packet + 2*ETH_ALEN, 0x77, (ETH_HLEN - 2*ETH_ALEN)); | 1659 | memset(packet + 2*ETH_ALEN, 0x77, (ETH_HLEN - 2*ETH_ALEN)); |
1546 | for (i = ETH_HLEN; i < pkt_size; i++) | 1660 | for (i = ETH_HLEN; i < pkt_size; i++) |
1547 | packet[i] = (unsigned char) (i & 0xff); | 1661 | packet[i] = (unsigned char) (i & 0xff); |
1662 | mapping = dma_map_single(&bp->pdev->dev, skb->data, | ||
1663 | skb_headlen(skb), DMA_TO_DEVICE); | ||
1664 | if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { | ||
1665 | rc = -ENOMEM; | ||
1666 | dev_kfree_skb(skb); | ||
1667 | BNX2X_ERR("Unable to map SKB\n"); | ||
1668 | goto test_loopback_exit; | ||
1669 | } | ||
1548 | 1670 | ||
1549 | /* send the loopback packet */ | 1671 | /* send the loopback packet */ |
1550 | num_pkts = 0; | 1672 | num_pkts = 0; |
@@ -1559,8 +1681,6 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up) | |||
1559 | 1681 | ||
1560 | bd_prod = TX_BD(fp_tx->tx_bd_prod); | 1682 | bd_prod = TX_BD(fp_tx->tx_bd_prod); |
1561 | tx_start_bd = &fp_tx->tx_desc_ring[bd_prod].start_bd; | 1683 | tx_start_bd = &fp_tx->tx_desc_ring[bd_prod].start_bd; |
1562 | mapping = dma_map_single(&bp->pdev->dev, skb->data, | ||
1563 | skb_headlen(skb), DMA_TO_DEVICE); | ||
1564 | tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); | 1684 | tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); |
1565 | tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); | 1685 | tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); |
1566 | tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */ | 1686 | tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */ |
@@ -1590,6 +1710,7 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up) | |||
1590 | DOORBELL(bp, fp_tx->index, fp_tx->tx_db.raw); | 1710 | DOORBELL(bp, fp_tx->index, fp_tx->tx_db.raw); |
1591 | 1711 | ||
1592 | mmiowb(); | 1712 | mmiowb(); |
1713 | barrier(); | ||
1593 | 1714 | ||
1594 | num_pkts++; | 1715 | num_pkts++; |
1595 | fp_tx->tx_bd_prod += 2; /* start + pbd */ | 1716 | fp_tx->tx_bd_prod += 2; /* start + pbd */ |
@@ -1618,9 +1739,11 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up) | |||
1618 | if (rx_idx != rx_start_idx + num_pkts) | 1739 | if (rx_idx != rx_start_idx + num_pkts) |
1619 | goto test_loopback_exit; | 1740 | goto test_loopback_exit; |
1620 | 1741 | ||
1621 | cqe = &fp_rx->rx_comp_ring[RCQ_BD(fp_rx->rx_comp_cons)]; | 1742 | rx_comp_cons = le16_to_cpu(fp_rx->rx_comp_cons); |
1743 | cqe = &fp_rx->rx_comp_ring[RCQ_BD(rx_comp_cons)]; | ||
1622 | cqe_fp_flags = cqe->fast_path_cqe.type_error_flags; | 1744 | cqe_fp_flags = cqe->fast_path_cqe.type_error_flags; |
1623 | if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS)) | 1745 | cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE; |
1746 | if (!CQE_TYPE_FAST(cqe_fp_type) || (cqe_fp_flags & ETH_RX_ERROR_FALGS)) | ||
1624 | goto test_loopback_rx_exit; | 1747 | goto test_loopback_rx_exit; |
1625 | 1748 | ||
1626 | len = le16_to_cpu(cqe->fast_path_cqe.pkt_len); | 1749 | len = le16_to_cpu(cqe->fast_path_cqe.pkt_len); |
@@ -1628,6 +1751,9 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up) | |||
1628 | goto test_loopback_rx_exit; | 1751 | goto test_loopback_rx_exit; |
1629 | 1752 | ||
1630 | rx_buf = &fp_rx->rx_buf_ring[RX_BD(fp_rx->rx_bd_cons)]; | 1753 | rx_buf = &fp_rx->rx_buf_ring[RX_BD(fp_rx->rx_bd_cons)]; |
1754 | dma_sync_single_for_device(&bp->pdev->dev, | ||
1755 | dma_unmap_addr(rx_buf, mapping), | ||
1756 | fp_rx->rx_buf_size, DMA_FROM_DEVICE); | ||
1631 | skb = rx_buf->skb; | 1757 | skb = rx_buf->skb; |
1632 | skb_reserve(skb, cqe->fast_path_cqe.placement_offset); | 1758 | skb_reserve(skb, cqe->fast_path_cqe.placement_offset); |
1633 | for (i = ETH_HLEN; i < pkt_size; i++) | 1759 | for (i = ETH_HLEN; i < pkt_size; i++) |
@@ -1653,7 +1779,7 @@ test_loopback_exit: | |||
1653 | return rc; | 1779 | return rc; |
1654 | } | 1780 | } |
1655 | 1781 | ||
1656 | static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up) | 1782 | static int bnx2x_test_loopback(struct bnx2x *bp) |
1657 | { | 1783 | { |
1658 | int rc = 0, res; | 1784 | int rc = 0, res; |
1659 | 1785 | ||
@@ -1666,13 +1792,13 @@ static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up) | |||
1666 | bnx2x_netif_stop(bp, 1); | 1792 | bnx2x_netif_stop(bp, 1); |
1667 | bnx2x_acquire_phy_lock(bp); | 1793 | bnx2x_acquire_phy_lock(bp); |
1668 | 1794 | ||
1669 | res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up); | 1795 | res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK); |
1670 | if (res) { | 1796 | if (res) { |
1671 | DP(NETIF_MSG_PROBE, " PHY loopback failed (res %d)\n", res); | 1797 | DP(NETIF_MSG_PROBE, " PHY loopback failed (res %d)\n", res); |
1672 | rc |= BNX2X_PHY_LOOPBACK_FAILED; | 1798 | rc |= BNX2X_PHY_LOOPBACK_FAILED; |
1673 | } | 1799 | } |
1674 | 1800 | ||
1675 | res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up); | 1801 | res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK); |
1676 | if (res) { | 1802 | if (res) { |
1677 | DP(NETIF_MSG_PROBE, " MAC loopback failed (res %d)\n", res); | 1803 | DP(NETIF_MSG_PROBE, " MAC loopback failed (res %d)\n", res); |
1678 | rc |= BNX2X_MAC_LOOPBACK_FAILED; | 1804 | rc |= BNX2X_MAC_LOOPBACK_FAILED; |
@@ -1744,39 +1870,20 @@ test_nvram_exit: | |||
1744 | return rc; | 1870 | return rc; |
1745 | } | 1871 | } |
1746 | 1872 | ||
1873 | /* Send an EMPTY ramrod on the first queue */ | ||
1747 | static int bnx2x_test_intr(struct bnx2x *bp) | 1874 | static int bnx2x_test_intr(struct bnx2x *bp) |
1748 | { | 1875 | { |
1749 | struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config); | 1876 | struct bnx2x_queue_state_params params = {0}; |
1750 | int i, rc; | ||
1751 | 1877 | ||
1752 | if (!netif_running(bp->dev)) | 1878 | if (!netif_running(bp->dev)) |
1753 | return -ENODEV; | 1879 | return -ENODEV; |
1754 | 1880 | ||
1755 | config->hdr.length = 0; | 1881 | params.q_obj = &bp->fp->q_obj; |
1756 | if (CHIP_IS_E1(bp)) | 1882 | params.cmd = BNX2X_Q_CMD_EMPTY; |
1757 | config->hdr.offset = (BP_PORT(bp) ? 32 : 0); | ||
1758 | else | ||
1759 | config->hdr.offset = BP_FUNC(bp); | ||
1760 | config->hdr.client_id = bp->fp->cl_id; | ||
1761 | config->hdr.reserved1 = 0; | ||
1762 | |||
1763 | bp->set_mac_pending = 1; | ||
1764 | smp_wmb(); | ||
1765 | rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0, | ||
1766 | U64_HI(bnx2x_sp_mapping(bp, mac_config)), | ||
1767 | U64_LO(bnx2x_sp_mapping(bp, mac_config)), 1); | ||
1768 | if (rc == 0) { | ||
1769 | for (i = 0; i < 10; i++) { | ||
1770 | if (!bp->set_mac_pending) | ||
1771 | break; | ||
1772 | smp_rmb(); | ||
1773 | msleep_interruptible(10); | ||
1774 | } | ||
1775 | if (i == 10) | ||
1776 | rc = -ENODEV; | ||
1777 | } | ||
1778 | 1883 | ||
1779 | return rc; | 1884 | __set_bit(RAMROD_COMP_WAIT, ¶ms.ramrod_flags); |
1885 | |||
1886 | return bnx2x_queue_state_change(bp, ¶ms); | ||
1780 | } | 1887 | } |
1781 | 1888 | ||
1782 | static void bnx2x_self_test(struct net_device *dev, | 1889 | static void bnx2x_self_test(struct net_device *dev, |
@@ -1815,7 +1922,7 @@ static void bnx2x_self_test(struct net_device *dev, | |||
1815 | bnx2x_nic_unload(bp, UNLOAD_NORMAL); | 1922 | bnx2x_nic_unload(bp, UNLOAD_NORMAL); |
1816 | bnx2x_nic_load(bp, LOAD_DIAG); | 1923 | bnx2x_nic_load(bp, LOAD_DIAG); |
1817 | /* wait until link state is restored */ | 1924 | /* wait until link state is restored */ |
1818 | bnx2x_wait_for_link(bp, link_up, is_serdes); | 1925 | bnx2x_wait_for_link(bp, 1, is_serdes); |
1819 | 1926 | ||
1820 | if (bnx2x_test_registers(bp) != 0) { | 1927 | if (bnx2x_test_registers(bp) != 0) { |
1821 | buf[0] = 1; | 1928 | buf[0] = 1; |
@@ -1826,7 +1933,7 @@ static void bnx2x_self_test(struct net_device *dev, | |||
1826 | etest->flags |= ETH_TEST_FL_FAILED; | 1933 | etest->flags |= ETH_TEST_FL_FAILED; |
1827 | } | 1934 | } |
1828 | 1935 | ||
1829 | buf[2] = bnx2x_test_loopback(bp, link_up); | 1936 | buf[2] = bnx2x_test_loopback(bp); |
1830 | if (buf[2] != 0) | 1937 | if (buf[2] != 0) |
1831 | etest->flags |= ETH_TEST_FL_FAILED; | 1938 | etest->flags |= ETH_TEST_FL_FAILED; |
1832 | 1939 | ||
@@ -1864,6 +1971,14 @@ static void bnx2x_self_test(struct net_device *dev, | |||
1864 | #define IS_MF_MODE_STAT(bp) \ | 1971 | #define IS_MF_MODE_STAT(bp) \ |
1865 | (IS_MF(bp) && !(bp->msg_enable & BNX2X_MSG_STATS)) | 1972 | (IS_MF(bp) && !(bp->msg_enable & BNX2X_MSG_STATS)) |
1866 | 1973 | ||
1974 | /* ethtool statistics are displayed for all regular ethernet queues and the | ||
1975 | * fcoe L2 queue if not disabled | ||
1976 | */ | ||
1977 | static inline int bnx2x_num_stat_queues(struct bnx2x *bp) | ||
1978 | { | ||
1979 | return BNX2X_NUM_ETH_QUEUES(bp); | ||
1980 | } | ||
1981 | |||
1867 | static int bnx2x_get_sset_count(struct net_device *dev, int stringset) | 1982 | static int bnx2x_get_sset_count(struct net_device *dev, int stringset) |
1868 | { | 1983 | { |
1869 | struct bnx2x *bp = netdev_priv(dev); | 1984 | struct bnx2x *bp = netdev_priv(dev); |
@@ -1872,7 +1987,7 @@ static int bnx2x_get_sset_count(struct net_device *dev, int stringset) | |||
1872 | switch (stringset) { | 1987 | switch (stringset) { |
1873 | case ETH_SS_STATS: | 1988 | case ETH_SS_STATS: |
1874 | if (is_multi(bp)) { | 1989 | if (is_multi(bp)) { |
1875 | num_stats = BNX2X_NUM_STAT_QUEUES(bp) * | 1990 | num_stats = bnx2x_num_stat_queues(bp) * |
1876 | BNX2X_NUM_Q_STATS; | 1991 | BNX2X_NUM_Q_STATS; |
1877 | if (!IS_MF_MODE_STAT(bp)) | 1992 | if (!IS_MF_MODE_STAT(bp)) |
1878 | num_stats += BNX2X_NUM_STATS; | 1993 | num_stats += BNX2X_NUM_STATS; |
@@ -1905,14 +2020,9 @@ static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf) | |||
1905 | case ETH_SS_STATS: | 2020 | case ETH_SS_STATS: |
1906 | if (is_multi(bp)) { | 2021 | if (is_multi(bp)) { |
1907 | k = 0; | 2022 | k = 0; |
1908 | for_each_napi_queue(bp, i) { | 2023 | for_each_eth_queue(bp, i) { |
1909 | memset(queue_name, 0, sizeof(queue_name)); | 2024 | memset(queue_name, 0, sizeof(queue_name)); |
1910 | 2025 | sprintf(queue_name, "%d", i); | |
1911 | if (IS_FCOE_IDX(i)) | ||
1912 | sprintf(queue_name, "fcoe"); | ||
1913 | else | ||
1914 | sprintf(queue_name, "%d", i); | ||
1915 | |||
1916 | for (j = 0; j < BNX2X_NUM_Q_STATS; j++) | 2026 | for (j = 0; j < BNX2X_NUM_Q_STATS; j++) |
1917 | snprintf(buf + (k + j)*ETH_GSTRING_LEN, | 2027 | snprintf(buf + (k + j)*ETH_GSTRING_LEN, |
1918 | ETH_GSTRING_LEN, | 2028 | ETH_GSTRING_LEN, |
@@ -1951,7 +2061,7 @@ static void bnx2x_get_ethtool_stats(struct net_device *dev, | |||
1951 | 2061 | ||
1952 | if (is_multi(bp)) { | 2062 | if (is_multi(bp)) { |
1953 | k = 0; | 2063 | k = 0; |
1954 | for_each_napi_queue(bp, i) { | 2064 | for_each_eth_queue(bp, i) { |
1955 | hw_stats = (u32 *)&bp->fp[i].eth_q_stats; | 2065 | hw_stats = (u32 *)&bp->fp[i].eth_q_stats; |
1956 | for (j = 0; j < BNX2X_NUM_Q_STATS; j++) { | 2066 | for (j = 0; j < BNX2X_NUM_Q_STATS; j++) { |
1957 | if (bnx2x_q_stats_arr[j].size == 0) { | 2067 | if (bnx2x_q_stats_arr[j].size == 0) { |
@@ -2069,14 +2179,30 @@ static int bnx2x_get_rxfh_indir(struct net_device *dev, | |||
2069 | { | 2179 | { |
2070 | struct bnx2x *bp = netdev_priv(dev); | 2180 | struct bnx2x *bp = netdev_priv(dev); |
2071 | size_t copy_size = | 2181 | size_t copy_size = |
2072 | min_t(size_t, indir->size, TSTORM_INDIRECTION_TABLE_SIZE); | 2182 | min_t(size_t, indir->size, T_ETH_INDIRECTION_TABLE_SIZE); |
2183 | u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE] = {0}; | ||
2184 | size_t i; | ||
2073 | 2185 | ||
2074 | if (bp->multi_mode == ETH_RSS_MODE_DISABLED) | 2186 | if (bp->multi_mode == ETH_RSS_MODE_DISABLED) |
2075 | return -EOPNOTSUPP; | 2187 | return -EOPNOTSUPP; |
2076 | 2188 | ||
2077 | indir->size = TSTORM_INDIRECTION_TABLE_SIZE; | 2189 | /* Get the current configuration of the RSS indirection table */ |
2078 | memcpy(indir->ring_index, bp->rx_indir_table, | 2190 | bnx2x_get_rss_ind_table(&bp->rss_conf_obj, ind_table); |
2079 | copy_size * sizeof(bp->rx_indir_table[0])); | 2191 | |
2192 | /* | ||
2193 | * We can't use a memcpy() as an internal storage of an | ||
2194 | * indirection table is a u8 array while indir->ring_index | ||
2195 | * points to an array of u32. | ||
2196 | * | ||
2197 | * Indirection table contains the FW Client IDs, so we need to | ||
2198 | * align the returned table to the Client ID of the leading RSS | ||
2199 | * queue. | ||
2200 | */ | ||
2201 | for (i = 0; i < copy_size; i++) | ||
2202 | indir->ring_index[i] = ind_table[i] - bp->fp->cl_id; | ||
2203 | |||
2204 | indir->size = T_ETH_INDIRECTION_TABLE_SIZE; | ||
2205 | |||
2080 | return 0; | 2206 | return 0; |
2081 | } | 2207 | } |
2082 | 2208 | ||
@@ -2085,21 +2211,33 @@ static int bnx2x_set_rxfh_indir(struct net_device *dev, | |||
2085 | { | 2211 | { |
2086 | struct bnx2x *bp = netdev_priv(dev); | 2212 | struct bnx2x *bp = netdev_priv(dev); |
2087 | size_t i; | 2213 | size_t i; |
2214 | u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE] = {0}; | ||
2215 | u32 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp); | ||
2088 | 2216 | ||
2089 | if (bp->multi_mode == ETH_RSS_MODE_DISABLED) | 2217 | if (bp->multi_mode == ETH_RSS_MODE_DISABLED) |
2090 | return -EOPNOTSUPP; | 2218 | return -EOPNOTSUPP; |
2091 | 2219 | ||
2092 | /* Validate size and indices */ | 2220 | /* validate the size */ |
2093 | if (indir->size != TSTORM_INDIRECTION_TABLE_SIZE) | 2221 | if (indir->size != T_ETH_INDIRECTION_TABLE_SIZE) |
2094 | return -EINVAL; | 2222 | return -EINVAL; |
2095 | for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++) | 2223 | |
2096 | if (indir->ring_index[i] >= BNX2X_NUM_ETH_QUEUES(bp)) | 2224 | for (i = 0; i < T_ETH_INDIRECTION_TABLE_SIZE; i++) { |
2225 | /* validate the indices */ | ||
2226 | if (indir->ring_index[i] >= num_eth_queues) | ||
2097 | return -EINVAL; | 2227 | return -EINVAL; |
2228 | /* | ||
2229 | * The same as in bnx2x_get_rxfh_indir: we can't use a memcpy() | ||
2230 | * as an internal storage of an indirection table is a u8 array | ||
2231 | * while indir->ring_index points to an array of u32. | ||
2232 | * | ||
2233 | * Indirection table contains the FW Client IDs, so we need to | ||
2234 | * align the received table to the Client ID of the leading RSS | ||
2235 | * queue | ||
2236 | */ | ||
2237 | ind_table[i] = indir->ring_index[i] + bp->fp->cl_id; | ||
2238 | } | ||
2098 | 2239 | ||
2099 | memcpy(bp->rx_indir_table, indir->ring_index, | 2240 | return bnx2x_config_rss_pf(bp, ind_table, false); |
2100 | indir->size * sizeof(bp->rx_indir_table[0])); | ||
2101 | bnx2x_push_indir_table(bp); | ||
2102 | return 0; | ||
2103 | } | 2241 | } |
2104 | 2242 | ||
2105 | static const struct ethtool_ops bnx2x_ethtool_ops = { | 2243 | static const struct ethtool_ops bnx2x_ethtool_ops = { |
diff --git a/drivers/net/bnx2x/bnx2x_fw_defs.h b/drivers/net/bnx2x/bnx2x_fw_defs.h index 9fe367836a57..998652a1b858 100644 --- a/drivers/net/bnx2x/bnx2x_fw_defs.h +++ b/drivers/net/bnx2x/bnx2x_fw_defs.h | |||
@@ -10,249 +10,221 @@ | |||
10 | #ifndef BNX2X_FW_DEFS_H | 10 | #ifndef BNX2X_FW_DEFS_H |
11 | #define BNX2X_FW_DEFS_H | 11 | #define BNX2X_FW_DEFS_H |
12 | 12 | ||
13 | #define CSTORM_ASSERT_LIST_INDEX_OFFSET (IRO[142].base) | 13 | #define CSTORM_ASSERT_LIST_INDEX_OFFSET (IRO[148].base) |
14 | #define CSTORM_ASSERT_LIST_OFFSET(assertListEntry) \ | 14 | #define CSTORM_ASSERT_LIST_OFFSET(assertListEntry) \ |
15 | (IRO[141].base + ((assertListEntry) * IRO[141].m1)) | 15 | (IRO[147].base + ((assertListEntry) * IRO[147].m1)) |
16 | #define CSTORM_ETH_STATS_QUERY_ADDR_OFFSET(pfId) \ | ||
17 | (IRO[144].base + ((pfId) * IRO[144].m1)) | ||
18 | #define CSTORM_EVENT_RING_DATA_OFFSET(pfId) \ | 16 | #define CSTORM_EVENT_RING_DATA_OFFSET(pfId) \ |
19 | (IRO[149].base + (((pfId)>>1) * IRO[149].m1) + (((pfId)&1) * \ | 17 | (IRO[153].base + (((pfId)>>1) * IRO[153].m1) + (((pfId)&1) * \ |
20 | IRO[149].m2)) | 18 | IRO[153].m2)) |
21 | #define CSTORM_EVENT_RING_PROD_OFFSET(pfId) \ | 19 | #define CSTORM_EVENT_RING_PROD_OFFSET(pfId) \ |
22 | (IRO[150].base + (((pfId)>>1) * IRO[150].m1) + (((pfId)&1) * \ | 20 | (IRO[154].base + (((pfId)>>1) * IRO[154].m1) + (((pfId)&1) * \ |
23 | IRO[150].m2)) | 21 | IRO[154].m2)) |
24 | #define CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(funcId) \ | 22 | #define CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(funcId) \ |
25 | (IRO[156].base + ((funcId) * IRO[156].m1)) | 23 | (IRO[159].base + ((funcId) * IRO[159].m1)) |
26 | #define CSTORM_FUNC_EN_OFFSET(funcId) \ | 24 | #define CSTORM_FUNC_EN_OFFSET(funcId) \ |
27 | (IRO[146].base + ((funcId) * IRO[146].m1)) | 25 | (IRO[149].base + ((funcId) * IRO[149].m1)) |
28 | #define CSTORM_FUNCTION_MODE_OFFSET (IRO[153].base) | 26 | #define CSTORM_IGU_MODE_OFFSET (IRO[157].base) |
29 | #define CSTORM_IGU_MODE_OFFSET (IRO[154].base) | ||
30 | #define CSTORM_ISCSI_CQ_SIZE_OFFSET(pfId) \ | 27 | #define CSTORM_ISCSI_CQ_SIZE_OFFSET(pfId) \ |
31 | (IRO[311].base + ((pfId) * IRO[311].m1)) | 28 | (IRO[315].base + ((pfId) * IRO[315].m1)) |
32 | #define CSTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfId) \ | 29 | #define CSTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfId) \ |
33 | (IRO[312].base + ((pfId) * IRO[312].m1)) | 30 | (IRO[316].base + ((pfId) * IRO[316].m1)) |
34 | #define CSTORM_ISCSI_EQ_CONS_OFFSET(pfId, iscsiEqId) \ | 31 | #define CSTORM_ISCSI_EQ_CONS_OFFSET(pfId, iscsiEqId) \ |
35 | (IRO[304].base + ((pfId) * IRO[304].m1) + ((iscsiEqId) * \ | 32 | (IRO[308].base + ((pfId) * IRO[308].m1) + ((iscsiEqId) * IRO[308].m2)) |
36 | IRO[304].m2)) | 33 | #define CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfId, iscsiEqId) \ |
37 | #define CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfId, iscsiEqId) \ | 34 | (IRO[310].base + ((pfId) * IRO[310].m1) + ((iscsiEqId) * IRO[310].m2)) |
38 | (IRO[306].base + ((pfId) * IRO[306].m1) + ((iscsiEqId) * \ | 35 | #define CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfId, iscsiEqId) \ |
39 | IRO[306].m2)) | 36 | (IRO[309].base + ((pfId) * IRO[309].m1) + ((iscsiEqId) * IRO[309].m2)) |
40 | #define CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfId, iscsiEqId) \ | 37 | #define CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_VALID_OFFSET(pfId, iscsiEqId) \ |
41 | (IRO[305].base + ((pfId) * IRO[305].m1) + ((iscsiEqId) * \ | 38 | (IRO[311].base + ((pfId) * IRO[311].m1) + ((iscsiEqId) * IRO[311].m2)) |
42 | IRO[305].m2)) | 39 | #define CSTORM_ISCSI_EQ_PROD_OFFSET(pfId, iscsiEqId) \ |
43 | #define \ | 40 | (IRO[307].base + ((pfId) * IRO[307].m1) + ((iscsiEqId) * IRO[307].m2)) |
44 | CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_VALID_OFFSET(pfId, iscsiEqId) \ | 41 | #define CSTORM_ISCSI_EQ_SB_INDEX_OFFSET(pfId, iscsiEqId) \ |
45 | (IRO[307].base + ((pfId) * IRO[307].m1) + ((iscsiEqId) * \ | 42 | (IRO[313].base + ((pfId) * IRO[313].m1) + ((iscsiEqId) * IRO[313].m2)) |
46 | IRO[307].m2)) | 43 | #define CSTORM_ISCSI_EQ_SB_NUM_OFFSET(pfId, iscsiEqId) \ |
47 | #define CSTORM_ISCSI_EQ_PROD_OFFSET(pfId, iscsiEqId) \ | 44 | (IRO[312].base + ((pfId) * IRO[312].m1) + ((iscsiEqId) * IRO[312].m2)) |
48 | (IRO[303].base + ((pfId) * IRO[303].m1) + ((iscsiEqId) * \ | ||
49 | IRO[303].m2)) | ||
50 | #define CSTORM_ISCSI_EQ_SB_INDEX_OFFSET(pfId, iscsiEqId) \ | ||
51 | (IRO[309].base + ((pfId) * IRO[309].m1) + ((iscsiEqId) * \ | ||
52 | IRO[309].m2)) | ||
53 | #define CSTORM_ISCSI_EQ_SB_NUM_OFFSET(pfId, iscsiEqId) \ | ||
54 | (IRO[308].base + ((pfId) * IRO[308].m1) + ((iscsiEqId) * \ | ||
55 | IRO[308].m2)) | ||
56 | #define CSTORM_ISCSI_HQ_SIZE_OFFSET(pfId) \ | 45 | #define CSTORM_ISCSI_HQ_SIZE_OFFSET(pfId) \ |
57 | (IRO[310].base + ((pfId) * IRO[310].m1)) | 46 | (IRO[314].base + ((pfId) * IRO[314].m1)) |
58 | #define CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \ | 47 | #define CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \ |
59 | (IRO[302].base + ((pfId) * IRO[302].m1)) | 48 | (IRO[306].base + ((pfId) * IRO[306].m1)) |
60 | #define CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \ | 49 | #define CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \ |
61 | (IRO[301].base + ((pfId) * IRO[301].m1)) | 50 | (IRO[305].base + ((pfId) * IRO[305].m1)) |
62 | #define CSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \ | 51 | #define CSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \ |
63 | (IRO[300].base + ((pfId) * IRO[300].m1)) | 52 | (IRO[304].base + ((pfId) * IRO[304].m1)) |
64 | #define CSTORM_PATH_ID_OFFSET (IRO[159].base) | 53 | #define CSTORM_RECORD_SLOW_PATH_OFFSET(funcId) \ |
54 | (IRO[151].base + ((funcId) * IRO[151].m1)) | ||
65 | #define CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(pfId) \ | 55 | #define CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(pfId) \ |
66 | (IRO[137].base + ((pfId) * IRO[137].m1)) | 56 | (IRO[142].base + ((pfId) * IRO[142].m1)) |
57 | #define CSTORM_SP_STATUS_BLOCK_DATA_STATE_OFFSET(pfId) \ | ||
58 | (IRO[143].base + ((pfId) * IRO[143].m1)) | ||
67 | #define CSTORM_SP_STATUS_BLOCK_OFFSET(pfId) \ | 59 | #define CSTORM_SP_STATUS_BLOCK_OFFSET(pfId) \ |
68 | (IRO[136].base + ((pfId) * IRO[136].m1)) | 60 | (IRO[141].base + ((pfId) * IRO[141].m1)) |
69 | #define CSTORM_SP_STATUS_BLOCK_SIZE (IRO[136].size) | 61 | #define CSTORM_SP_STATUS_BLOCK_SIZE (IRO[141].size) |
70 | #define CSTORM_SP_SYNC_BLOCK_OFFSET(pfId) \ | 62 | #define CSTORM_SP_SYNC_BLOCK_OFFSET(pfId) \ |
71 | (IRO[138].base + ((pfId) * IRO[138].m1)) | 63 | (IRO[144].base + ((pfId) * IRO[144].m1)) |
72 | #define CSTORM_SP_SYNC_BLOCK_SIZE (IRO[138].size) | 64 | #define CSTORM_SP_SYNC_BLOCK_SIZE (IRO[144].size) |
73 | #define CSTORM_STATS_FLAGS_OFFSET(pfId) \ | 65 | #define CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(sbId, hcIndex) \ |
74 | (IRO[143].base + ((pfId) * IRO[143].m1)) | 66 | (IRO[136].base + ((sbId) * IRO[136].m1) + ((hcIndex) * IRO[136].m2)) |
75 | #define CSTORM_STATUS_BLOCK_DATA_OFFSET(sbId) \ | 67 | #define CSTORM_STATUS_BLOCK_DATA_OFFSET(sbId) \ |
76 | (IRO[129].base + ((sbId) * IRO[129].m1)) | 68 | (IRO[133].base + ((sbId) * IRO[133].m1)) |
69 | #define CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(sbId) \ | ||
70 | (IRO[134].base + ((sbId) * IRO[134].m1)) | ||
71 | #define CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(sbId, hcIndex) \ | ||
72 | (IRO[135].base + ((sbId) * IRO[135].m1) + ((hcIndex) * IRO[135].m2)) | ||
77 | #define CSTORM_STATUS_BLOCK_OFFSET(sbId) \ | 73 | #define CSTORM_STATUS_BLOCK_OFFSET(sbId) \ |
78 | (IRO[128].base + ((sbId) * IRO[128].m1)) | ||
79 | #define CSTORM_STATUS_BLOCK_SIZE (IRO[128].size) | ||
80 | #define CSTORM_SYNC_BLOCK_OFFSET(sbId) \ | ||
81 | (IRO[132].base + ((sbId) * IRO[132].m1)) | 74 | (IRO[132].base + ((sbId) * IRO[132].m1)) |
82 | #define CSTORM_SYNC_BLOCK_SIZE (IRO[132].size) | 75 | #define CSTORM_STATUS_BLOCK_SIZE (IRO[132].size) |
76 | #define CSTORM_SYNC_BLOCK_OFFSET(sbId) \ | ||
77 | (IRO[137].base + ((sbId) * IRO[137].m1)) | ||
78 | #define CSTORM_SYNC_BLOCK_SIZE (IRO[137].size) | ||
83 | #define CSTORM_VF_PF_CHANNEL_STATE_OFFSET(vfId) \ | 79 | #define CSTORM_VF_PF_CHANNEL_STATE_OFFSET(vfId) \ |
84 | (IRO[151].base + ((vfId) * IRO[151].m1)) | 80 | (IRO[155].base + ((vfId) * IRO[155].m1)) |
85 | #define CSTORM_VF_PF_CHANNEL_VALID_OFFSET(vfId) \ | 81 | #define CSTORM_VF_PF_CHANNEL_VALID_OFFSET(vfId) \ |
86 | (IRO[152].base + ((vfId) * IRO[152].m1)) | 82 | (IRO[156].base + ((vfId) * IRO[156].m1)) |
87 | #define CSTORM_VF_TO_PF_OFFSET(funcId) \ | 83 | #define CSTORM_VF_TO_PF_OFFSET(funcId) \ |
88 | (IRO[147].base + ((funcId) * IRO[147].m1)) | 84 | (IRO[150].base + ((funcId) * IRO[150].m1)) |
89 | #define TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET (IRO[199].base) | 85 | #define TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET (IRO[204].base) |
90 | #define TSTORM_APPROXIMATE_MATCH_MULTICAST_FILTERING_OFFSET(pfId) \ | 86 | #define TSTORM_APPROXIMATE_MATCH_MULTICAST_FILTERING_OFFSET(pfId) \ |
91 | (IRO[198].base + ((pfId) * IRO[198].m1)) | 87 | (IRO[203].base + ((pfId) * IRO[203].m1)) |
92 | #define TSTORM_ASSERT_LIST_INDEX_OFFSET (IRO[99].base) | 88 | #define TSTORM_ASSERT_LIST_INDEX_OFFSET (IRO[102].base) |
93 | #define TSTORM_ASSERT_LIST_OFFSET(assertListEntry) \ | 89 | #define TSTORM_ASSERT_LIST_OFFSET(assertListEntry) \ |
94 | (IRO[98].base + ((assertListEntry) * IRO[98].m1)) | 90 | (IRO[101].base + ((assertListEntry) * IRO[101].m1)) |
95 | #define TSTORM_CLIENT_CONFIG_OFFSET(portId, clientId) \ | 91 | #define TSTORM_COMMON_SAFC_WORKAROUND_ENABLE_OFFSET (IRO[107].base) |
96 | (IRO[197].base + ((portId) * IRO[197].m1) + ((clientId) * \ | ||
97 | IRO[197].m2)) | ||
98 | #define TSTORM_COMMON_SAFC_WORKAROUND_ENABLE_OFFSET (IRO[104].base) | ||
99 | #define TSTORM_COMMON_SAFC_WORKAROUND_TIMEOUT_10USEC_OFFSET \ | 92 | #define TSTORM_COMMON_SAFC_WORKAROUND_TIMEOUT_10USEC_OFFSET \ |
100 | (IRO[105].base) | 93 | (IRO[108].base) |
101 | #define TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(pfId) \ | ||
102 | (IRO[96].base + ((pfId) * IRO[96].m1)) | ||
103 | #define TSTORM_FUNC_EN_OFFSET(funcId) \ | ||
104 | (IRO[101].base + ((funcId) * IRO[101].m1)) | ||
105 | #define TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(pfId) \ | 94 | #define TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(pfId) \ |
106 | (IRO[195].base + ((pfId) * IRO[195].m1)) | 95 | (IRO[201].base + ((pfId) * IRO[201].m1)) |
107 | #define TSTORM_FUNCTION_MODE_OFFSET (IRO[103].base) | 96 | #define TSTORM_FUNC_EN_OFFSET(funcId) \ |
108 | #define TSTORM_INDIRECTION_TABLE_OFFSET(pfId) \ | 97 | (IRO[103].base + ((funcId) * IRO[103].m1)) |
109 | (IRO[91].base + ((pfId) * IRO[91].m1)) | ||
110 | #define TSTORM_INDIRECTION_TABLE_SIZE (IRO[91].size) | ||
111 | #define \ | ||
112 | TSTORM_ISCSI_CONN_BUF_PBL_OFFSET(pfId, iscsiConBufPblEntry) \ | ||
113 | (IRO[260].base + ((pfId) * IRO[260].m1) + ((iscsiConBufPblEntry) \ | ||
114 | * IRO[260].m2)) | ||
115 | #define TSTORM_ISCSI_ERROR_BITMAP_OFFSET(pfId) \ | 98 | #define TSTORM_ISCSI_ERROR_BITMAP_OFFSET(pfId) \ |
116 | (IRO[264].base + ((pfId) * IRO[264].m1)) | 99 | (IRO[271].base + ((pfId) * IRO[271].m1)) |
117 | #define TSTORM_ISCSI_L2_ISCSI_OOO_CID_TABLE_OFFSET(pfId) \ | 100 | #define TSTORM_ISCSI_L2_ISCSI_OOO_CID_TABLE_OFFSET(pfId) \ |
118 | (IRO[265].base + ((pfId) * IRO[265].m1)) | 101 | (IRO[272].base + ((pfId) * IRO[272].m1)) |
119 | #define TSTORM_ISCSI_L2_ISCSI_OOO_CLIENT_ID_TABLE_OFFSET(pfId) \ | 102 | #define TSTORM_ISCSI_L2_ISCSI_OOO_CLIENT_ID_TABLE_OFFSET(pfId) \ |
120 | (IRO[266].base + ((pfId) * IRO[266].m1)) | 103 | (IRO[273].base + ((pfId) * IRO[273].m1)) |
121 | #define TSTORM_ISCSI_L2_ISCSI_OOO_PROD_OFFSET(pfId) \ | 104 | #define TSTORM_ISCSI_L2_ISCSI_OOO_PROD_OFFSET(pfId) \ |
122 | (IRO[267].base + ((pfId) * IRO[267].m1)) | 105 | (IRO[274].base + ((pfId) * IRO[274].m1)) |
123 | #define TSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \ | 106 | #define TSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \ |
124 | (IRO[263].base + ((pfId) * IRO[263].m1)) | 107 | (IRO[270].base + ((pfId) * IRO[270].m1)) |
125 | #define TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \ | 108 | #define TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \ |
126 | (IRO[262].base + ((pfId) * IRO[262].m1)) | 109 | (IRO[269].base + ((pfId) * IRO[269].m1)) |
127 | #define TSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \ | 110 | #define TSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \ |
128 | (IRO[261].base + ((pfId) * IRO[261].m1)) | 111 | (IRO[268].base + ((pfId) * IRO[268].m1)) |
129 | #define TSTORM_ISCSI_RQ_SIZE_OFFSET(pfId) \ | 112 | #define TSTORM_ISCSI_RQ_SIZE_OFFSET(pfId) \ |
130 | (IRO[259].base + ((pfId) * IRO[259].m1)) | 113 | (IRO[267].base + ((pfId) * IRO[267].m1)) |
131 | #define TSTORM_ISCSI_TCP_LOCAL_ADV_WND_OFFSET(pfId) \ | 114 | #define TSTORM_ISCSI_TCP_LOCAL_ADV_WND_OFFSET(pfId) \ |
132 | (IRO[269].base + ((pfId) * IRO[269].m1)) | 115 | (IRO[276].base + ((pfId) * IRO[276].m1)) |
133 | #define TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(pfId) \ | 116 | #define TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(pfId) \ |
134 | (IRO[256].base + ((pfId) * IRO[256].m1)) | 117 | (IRO[263].base + ((pfId) * IRO[263].m1)) |
135 | #define TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfId) \ | 118 | #define TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfId) \ |
136 | (IRO[257].base + ((pfId) * IRO[257].m1)) | 119 | (IRO[264].base + ((pfId) * IRO[264].m1)) |
120 | #define TSTORM_ISCSI_TCP_VARS_MID_LOCAL_MAC_ADDR_OFFSET(pfId) \ | ||
121 | (IRO[265].base + ((pfId) * IRO[265].m1)) | ||
137 | #define TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfId) \ | 122 | #define TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfId) \ |
138 | (IRO[258].base + ((pfId) * IRO[258].m1)) | 123 | (IRO[266].base + ((pfId) * IRO[266].m1)) |
139 | #define TSTORM_MAC_FILTER_CONFIG_OFFSET(pfId) \ | 124 | #define TSTORM_MAC_FILTER_CONFIG_OFFSET(pfId) \ |
140 | (IRO[196].base + ((pfId) * IRO[196].m1)) | 125 | (IRO[202].base + ((pfId) * IRO[202].m1)) |
141 | #define TSTORM_PER_COUNTER_ID_STATS_OFFSET(portId, tStatCntId) \ | 126 | #define TSTORM_RECORD_SLOW_PATH_OFFSET(funcId) \ |
142 | (IRO[100].base + ((portId) * IRO[100].m1) + ((tStatCntId) * \ | 127 | (IRO[105].base + ((funcId) * IRO[105].m1)) |
143 | IRO[100].m2)) | ||
144 | #define TSTORM_STATS_FLAGS_OFFSET(pfId) \ | ||
145 | (IRO[95].base + ((pfId) * IRO[95].m1)) | ||
146 | #define TSTORM_TCP_MAX_CWND_OFFSET(pfId) \ | 128 | #define TSTORM_TCP_MAX_CWND_OFFSET(pfId) \ |
147 | (IRO[211].base + ((pfId) * IRO[211].m1)) | 129 | (IRO[216].base + ((pfId) * IRO[216].m1)) |
148 | #define TSTORM_VF_TO_PF_OFFSET(funcId) \ | 130 | #define TSTORM_VF_TO_PF_OFFSET(funcId) \ |
149 | (IRO[102].base + ((funcId) * IRO[102].m1)) | 131 | (IRO[104].base + ((funcId) * IRO[104].m1)) |
150 | #define USTORM_AGG_DATA_OFFSET (IRO[201].base) | 132 | #define USTORM_AGG_DATA_OFFSET (IRO[206].base) |
151 | #define USTORM_AGG_DATA_SIZE (IRO[201].size) | 133 | #define USTORM_AGG_DATA_SIZE (IRO[206].size) |
152 | #define USTORM_ASSERT_LIST_INDEX_OFFSET (IRO[170].base) | 134 | #define USTORM_ASSERT_LIST_INDEX_OFFSET (IRO[177].base) |
153 | #define USTORM_ASSERT_LIST_OFFSET(assertListEntry) \ | 135 | #define USTORM_ASSERT_LIST_OFFSET(assertListEntry) \ |
154 | (IRO[169].base + ((assertListEntry) * IRO[169].m1)) | 136 | (IRO[176].base + ((assertListEntry) * IRO[176].m1)) |
137 | #define USTORM_CQE_PAGE_NEXT_OFFSET(portId, clientId) \ | ||
138 | (IRO[205].base + ((portId) * IRO[205].m1) + ((clientId) * \ | ||
139 | IRO[205].m2)) | ||
155 | #define USTORM_ETH_PAUSE_ENABLED_OFFSET(portId) \ | 140 | #define USTORM_ETH_PAUSE_ENABLED_OFFSET(portId) \ |
156 | (IRO[178].base + ((portId) * IRO[178].m1)) | 141 | (IRO[183].base + ((portId) * IRO[183].m1)) |
157 | #define USTORM_ETH_STATS_QUERY_ADDR_OFFSET(pfId) \ | ||
158 | (IRO[172].base + ((pfId) * IRO[172].m1)) | ||
159 | #define USTORM_FCOE_EQ_PROD_OFFSET(pfId) \ | 142 | #define USTORM_FCOE_EQ_PROD_OFFSET(pfId) \ |
160 | (IRO[313].base + ((pfId) * IRO[313].m1)) | 143 | (IRO[317].base + ((pfId) * IRO[317].m1)) |
161 | #define USTORM_FUNC_EN_OFFSET(funcId) \ | 144 | #define USTORM_FUNC_EN_OFFSET(funcId) \ |
162 | (IRO[174].base + ((funcId) * IRO[174].m1)) | 145 | (IRO[178].base + ((funcId) * IRO[178].m1)) |
163 | #define USTORM_FUNCTION_MODE_OFFSET (IRO[177].base) | ||
164 | #define USTORM_ISCSI_CQ_SIZE_OFFSET(pfId) \ | 146 | #define USTORM_ISCSI_CQ_SIZE_OFFSET(pfId) \ |
165 | (IRO[277].base + ((pfId) * IRO[277].m1)) | 147 | (IRO[281].base + ((pfId) * IRO[281].m1)) |
166 | #define USTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfId) \ | 148 | #define USTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfId) \ |
167 | (IRO[278].base + ((pfId) * IRO[278].m1)) | ||
168 | #define USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfId) \ | ||
169 | (IRO[282].base + ((pfId) * IRO[282].m1)) | 149 | (IRO[282].base + ((pfId) * IRO[282].m1)) |
150 | #define USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfId) \ | ||
151 | (IRO[286].base + ((pfId) * IRO[286].m1)) | ||
170 | #define USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfId) \ | 152 | #define USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfId) \ |
171 | (IRO[279].base + ((pfId) * IRO[279].m1)) | 153 | (IRO[283].base + ((pfId) * IRO[283].m1)) |
172 | #define USTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \ | 154 | #define USTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \ |
173 | (IRO[275].base + ((pfId) * IRO[275].m1)) | 155 | (IRO[279].base + ((pfId) * IRO[279].m1)) |
174 | #define USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \ | 156 | #define USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \ |
175 | (IRO[274].base + ((pfId) * IRO[274].m1)) | 157 | (IRO[278].base + ((pfId) * IRO[278].m1)) |
176 | #define USTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \ | 158 | #define USTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \ |
177 | (IRO[273].base + ((pfId) * IRO[273].m1)) | 159 | (IRO[277].base + ((pfId) * IRO[277].m1)) |
178 | #define USTORM_ISCSI_R2TQ_SIZE_OFFSET(pfId) \ | 160 | #define USTORM_ISCSI_R2TQ_SIZE_OFFSET(pfId) \ |
179 | (IRO[276].base + ((pfId) * IRO[276].m1)) | ||
180 | #define USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(pfId) \ | ||
181 | (IRO[280].base + ((pfId) * IRO[280].m1)) | 161 | (IRO[280].base + ((pfId) * IRO[280].m1)) |
162 | #define USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(pfId) \ | ||
163 | (IRO[284].base + ((pfId) * IRO[284].m1)) | ||
182 | #define USTORM_ISCSI_RQ_SIZE_OFFSET(pfId) \ | 164 | #define USTORM_ISCSI_RQ_SIZE_OFFSET(pfId) \ |
183 | (IRO[281].base + ((pfId) * IRO[281].m1)) | 165 | (IRO[285].base + ((pfId) * IRO[285].m1)) |
184 | #define USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(pfId) \ | 166 | #define USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(pfId) \ |
185 | (IRO[176].base + ((pfId) * IRO[176].m1)) | 167 | (IRO[182].base + ((pfId) * IRO[182].m1)) |
186 | #define USTORM_PER_COUNTER_ID_STATS_OFFSET(portId, uStatCntId) \ | 168 | #define USTORM_RECORD_SLOW_PATH_OFFSET(funcId) \ |
187 | (IRO[173].base + ((portId) * IRO[173].m1) + ((uStatCntId) * \ | 169 | (IRO[180].base + ((funcId) * IRO[180].m1)) |
188 | IRO[173].m2)) | 170 | #define USTORM_RX_PRODS_E1X_OFFSET(portId, clientId) \ |
189 | #define USTORM_RX_PRODS_E1X_OFFSET(portId, clientId) \ | 171 | (IRO[209].base + ((portId) * IRO[209].m1) + ((clientId) * \ |
190 | (IRO[204].base + ((portId) * IRO[204].m1) + ((clientId) * \ | 172 | IRO[209].m2)) |
191 | IRO[204].m2)) | ||
192 | #define USTORM_RX_PRODS_E2_OFFSET(qzoneId) \ | 173 | #define USTORM_RX_PRODS_E2_OFFSET(qzoneId) \ |
193 | (IRO[205].base + ((qzoneId) * IRO[205].m1)) | 174 | (IRO[210].base + ((qzoneId) * IRO[210].m1)) |
194 | #define USTORM_STATS_FLAGS_OFFSET(pfId) \ | 175 | #define USTORM_TPA_BTR_OFFSET (IRO[207].base) |
195 | (IRO[171].base + ((pfId) * IRO[171].m1)) | 176 | #define USTORM_TPA_BTR_SIZE (IRO[207].size) |
196 | #define USTORM_TPA_BTR_OFFSET (IRO[202].base) | ||
197 | #define USTORM_TPA_BTR_SIZE (IRO[202].size) | ||
198 | #define USTORM_VF_TO_PF_OFFSET(funcId) \ | 177 | #define USTORM_VF_TO_PF_OFFSET(funcId) \ |
199 | (IRO[175].base + ((funcId) * IRO[175].m1)) | 178 | (IRO[179].base + ((funcId) * IRO[179].m1)) |
200 | #define XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE (IRO[59].base) | 179 | #define XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE (IRO[67].base) |
201 | #define XSTORM_AGG_INT_FINAL_CLEANUP_INDEX (IRO[58].base) | 180 | #define XSTORM_AGG_INT_FINAL_CLEANUP_INDEX (IRO[66].base) |
202 | #define XSTORM_ASSERT_LIST_INDEX_OFFSET (IRO[54].base) | 181 | #define XSTORM_ASSERT_LIST_INDEX_OFFSET (IRO[51].base) |
203 | #define XSTORM_ASSERT_LIST_OFFSET(assertListEntry) \ | 182 | #define XSTORM_ASSERT_LIST_OFFSET(assertListEntry) \ |
204 | (IRO[53].base + ((assertListEntry) * IRO[53].m1)) | 183 | (IRO[50].base + ((assertListEntry) * IRO[50].m1)) |
205 | #define XSTORM_CMNG_PER_PORT_VARS_OFFSET(portId) \ | 184 | #define XSTORM_CMNG_PER_PORT_VARS_OFFSET(portId) \ |
206 | (IRO[47].base + ((portId) * IRO[47].m1)) | 185 | (IRO[43].base + ((portId) * IRO[43].m1)) |
207 | #define XSTORM_E1HOV_OFFSET(pfId) \ | ||
208 | (IRO[55].base + ((pfId) * IRO[55].m1)) | ||
209 | #define XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(pfId) \ | ||
210 | (IRO[45].base + ((pfId) * IRO[45].m1)) | ||
211 | #define XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(pfId) \ | 186 | #define XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(pfId) \ |
212 | (IRO[49].base + ((pfId) * IRO[49].m1)) | 187 | (IRO[45].base + ((pfId) * IRO[45].m1)) |
213 | #define XSTORM_FUNC_EN_OFFSET(funcId) \ | 188 | #define XSTORM_FUNC_EN_OFFSET(funcId) \ |
214 | (IRO[51].base + ((funcId) * IRO[51].m1)) | 189 | (IRO[47].base + ((funcId) * IRO[47].m1)) |
215 | #define XSTORM_FUNCTION_MODE_OFFSET (IRO[56].base) | ||
216 | #define XSTORM_ISCSI_HQ_SIZE_OFFSET(pfId) \ | 190 | #define XSTORM_ISCSI_HQ_SIZE_OFFSET(pfId) \ |
217 | (IRO[290].base + ((pfId) * IRO[290].m1)) | 191 | (IRO[294].base + ((pfId) * IRO[294].m1)) |
218 | #define XSTORM_ISCSI_LOCAL_MAC_ADDR0_OFFSET(pfId) \ | 192 | #define XSTORM_ISCSI_LOCAL_MAC_ADDR0_OFFSET(pfId) \ |
219 | (IRO[293].base + ((pfId) * IRO[293].m1)) | 193 | (IRO[297].base + ((pfId) * IRO[297].m1)) |
220 | #define XSTORM_ISCSI_LOCAL_MAC_ADDR1_OFFSET(pfId) \ | 194 | #define XSTORM_ISCSI_LOCAL_MAC_ADDR1_OFFSET(pfId) \ |
221 | (IRO[294].base + ((pfId) * IRO[294].m1)) | 195 | (IRO[298].base + ((pfId) * IRO[298].m1)) |
222 | #define XSTORM_ISCSI_LOCAL_MAC_ADDR2_OFFSET(pfId) \ | 196 | #define XSTORM_ISCSI_LOCAL_MAC_ADDR2_OFFSET(pfId) \ |
223 | (IRO[295].base + ((pfId) * IRO[295].m1)) | 197 | (IRO[299].base + ((pfId) * IRO[299].m1)) |
224 | #define XSTORM_ISCSI_LOCAL_MAC_ADDR3_OFFSET(pfId) \ | 198 | #define XSTORM_ISCSI_LOCAL_MAC_ADDR3_OFFSET(pfId) \ |
225 | (IRO[296].base + ((pfId) * IRO[296].m1)) | 199 | (IRO[300].base + ((pfId) * IRO[300].m1)) |
226 | #define XSTORM_ISCSI_LOCAL_MAC_ADDR4_OFFSET(pfId) \ | 200 | #define XSTORM_ISCSI_LOCAL_MAC_ADDR4_OFFSET(pfId) \ |
227 | (IRO[297].base + ((pfId) * IRO[297].m1)) | 201 | (IRO[301].base + ((pfId) * IRO[301].m1)) |
228 | #define XSTORM_ISCSI_LOCAL_MAC_ADDR5_OFFSET(pfId) \ | 202 | #define XSTORM_ISCSI_LOCAL_MAC_ADDR5_OFFSET(pfId) \ |
229 | (IRO[298].base + ((pfId) * IRO[298].m1)) | 203 | (IRO[302].base + ((pfId) * IRO[302].m1)) |
230 | #define XSTORM_ISCSI_LOCAL_VLAN_OFFSET(pfId) \ | 204 | #define XSTORM_ISCSI_LOCAL_VLAN_OFFSET(pfId) \ |
231 | (IRO[299].base + ((pfId) * IRO[299].m1)) | 205 | (IRO[303].base + ((pfId) * IRO[303].m1)) |
232 | #define XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \ | 206 | #define XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \ |
233 | (IRO[289].base + ((pfId) * IRO[289].m1)) | 207 | (IRO[293].base + ((pfId) * IRO[293].m1)) |
234 | #define XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \ | 208 | #define XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \ |
235 | (IRO[288].base + ((pfId) * IRO[288].m1)) | 209 | (IRO[292].base + ((pfId) * IRO[292].m1)) |
236 | #define XSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \ | 210 | #define XSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \ |
237 | (IRO[287].base + ((pfId) * IRO[287].m1)) | 211 | (IRO[291].base + ((pfId) * IRO[291].m1)) |
238 | #define XSTORM_ISCSI_R2TQ_SIZE_OFFSET(pfId) \ | 212 | #define XSTORM_ISCSI_R2TQ_SIZE_OFFSET(pfId) \ |
239 | (IRO[292].base + ((pfId) * IRO[292].m1)) | 213 | (IRO[296].base + ((pfId) * IRO[296].m1)) |
240 | #define XSTORM_ISCSI_SQ_SIZE_OFFSET(pfId) \ | 214 | #define XSTORM_ISCSI_SQ_SIZE_OFFSET(pfId) \ |
241 | (IRO[291].base + ((pfId) * IRO[291].m1)) | 215 | (IRO[295].base + ((pfId) * IRO[295].m1)) |
242 | #define XSTORM_ISCSI_TCP_VARS_ADV_WND_SCL_OFFSET(pfId) \ | 216 | #define XSTORM_ISCSI_TCP_VARS_ADV_WND_SCL_OFFSET(pfId) \ |
243 | (IRO[286].base + ((pfId) * IRO[286].m1)) | 217 | (IRO[290].base + ((pfId) * IRO[290].m1)) |
244 | #define XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(pfId) \ | 218 | #define XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(pfId) \ |
245 | (IRO[285].base + ((pfId) * IRO[285].m1)) | 219 | (IRO[289].base + ((pfId) * IRO[289].m1)) |
246 | #define XSTORM_ISCSI_TCP_VARS_TOS_OFFSET(pfId) \ | 220 | #define XSTORM_ISCSI_TCP_VARS_TOS_OFFSET(pfId) \ |
247 | (IRO[284].base + ((pfId) * IRO[284].m1)) | 221 | (IRO[288].base + ((pfId) * IRO[288].m1)) |
248 | #define XSTORM_ISCSI_TCP_VARS_TTL_OFFSET(pfId) \ | 222 | #define XSTORM_ISCSI_TCP_VARS_TTL_OFFSET(pfId) \ |
249 | (IRO[283].base + ((pfId) * IRO[283].m1)) | 223 | (IRO[287].base + ((pfId) * IRO[287].m1)) |
250 | #define XSTORM_PATH_ID_OFFSET (IRO[65].base) | ||
251 | #define XSTORM_PER_COUNTER_ID_STATS_OFFSET(portId, xStatCntId) \ | ||
252 | (IRO[50].base + ((portId) * IRO[50].m1) + ((xStatCntId) * \ | ||
253 | IRO[50].m2)) | ||
254 | #define XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(pfId) \ | 224 | #define XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(pfId) \ |
255 | (IRO[48].base + ((pfId) * IRO[48].m1)) | 225 | (IRO[44].base + ((pfId) * IRO[44].m1)) |
226 | #define XSTORM_RECORD_SLOW_PATH_OFFSET(funcId) \ | ||
227 | (IRO[49].base + ((funcId) * IRO[49].m1)) | ||
256 | #define XSTORM_SPQ_DATA_OFFSET(funcId) \ | 228 | #define XSTORM_SPQ_DATA_OFFSET(funcId) \ |
257 | (IRO[32].base + ((funcId) * IRO[32].m1)) | 229 | (IRO[32].base + ((funcId) * IRO[32].m1)) |
258 | #define XSTORM_SPQ_DATA_SIZE (IRO[32].size) | 230 | #define XSTORM_SPQ_DATA_SIZE (IRO[32].size) |
@@ -260,42 +232,37 @@ | |||
260 | (IRO[30].base + ((funcId) * IRO[30].m1)) | 232 | (IRO[30].base + ((funcId) * IRO[30].m1)) |
261 | #define XSTORM_SPQ_PROD_OFFSET(funcId) \ | 233 | #define XSTORM_SPQ_PROD_OFFSET(funcId) \ |
262 | (IRO[31].base + ((funcId) * IRO[31].m1)) | 234 | (IRO[31].base + ((funcId) * IRO[31].m1)) |
263 | #define XSTORM_STATS_FLAGS_OFFSET(pfId) \ | ||
264 | (IRO[43].base + ((pfId) * IRO[43].m1)) | ||
265 | #define XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_ENABLED_OFFSET(portId) \ | 235 | #define XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_ENABLED_OFFSET(portId) \ |
266 | (IRO[206].base + ((portId) * IRO[206].m1)) | 236 | (IRO[211].base + ((portId) * IRO[211].m1)) |
267 | #define XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_MAX_COUNT_OFFSET(portId) \ | 237 | #define XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_MAX_COUNT_OFFSET(portId) \ |
268 | (IRO[207].base + ((portId) * IRO[207].m1)) | 238 | (IRO[212].base + ((portId) * IRO[212].m1)) |
269 | #define XSTORM_TCP_TX_SWS_TIMER_VAL_OFFSET(pfId) \ | 239 | #define XSTORM_TCP_TX_SWS_TIMER_VAL_OFFSET(pfId) \ |
270 | (IRO[209].base + (((pfId)>>1) * IRO[209].m1) + (((pfId)&1) * \ | 240 | (IRO[214].base + (((pfId)>>1) * IRO[214].m1) + (((pfId)&1) * \ |
271 | IRO[209].m2)) | 241 | IRO[214].m2)) |
272 | #define XSTORM_VF_TO_PF_OFFSET(funcId) \ | 242 | #define XSTORM_VF_TO_PF_OFFSET(funcId) \ |
273 | (IRO[52].base + ((funcId) * IRO[52].m1)) | 243 | (IRO[48].base + ((funcId) * IRO[48].m1)) |
274 | #define COMMON_ASM_INVALID_ASSERT_OPCODE 0x0 | 244 | #define COMMON_ASM_INVALID_ASSERT_OPCODE 0x0 |
275 | 245 | ||
276 | /* RSS hash types */ | 246 | /** |
277 | #define DEFAULT_HASH_TYPE 0 | 247 | * This file defines HSI constants for the ETH flow |
278 | #define IPV4_HASH_TYPE 1 | 248 | */ |
279 | #define TCP_IPV4_HASH_TYPE 2 | 249 | #ifdef _EVEREST_MICROCODE |
280 | #define IPV6_HASH_TYPE 3 | 250 | #include "Microcode\Generated\DataTypes\eth_rx_bd.h" |
281 | #define TCP_IPV6_HASH_TYPE 4 | 251 | #include "Microcode\Generated\DataTypes\eth_tx_bd.h" |
282 | #define VLAN_PRI_HASH_TYPE 5 | 252 | #include "Microcode\Generated\DataTypes\eth_rx_cqe.h" |
283 | #define E1HOV_PRI_HASH_TYPE 6 | 253 | #include "Microcode\Generated\DataTypes\eth_rx_sge.h" |
284 | #define DSCP_HASH_TYPE 7 | 254 | #include "Microcode\Generated\DataTypes\eth_rx_cqe_next_page.h" |
255 | #endif | ||
285 | 256 | ||
286 | 257 | ||
287 | /* Ethernet Ring parameters */ | 258 | /* Ethernet Ring parameters */ |
288 | #define X_ETH_LOCAL_RING_SIZE 13 | 259 | #define X_ETH_LOCAL_RING_SIZE 13 |
289 | #define FIRST_BD_IN_PKT 0 | 260 | #define FIRST_BD_IN_PKT 0 |
290 | #define PARSE_BD_INDEX 1 | 261 | #define PARSE_BD_INDEX 1 |
291 | #define NUM_OF_ETH_BDS_IN_PAGE ((PAGE_SIZE)/(STRUCT_SIZE(eth_tx_bd)/8)) | 262 | #define NUM_OF_ETH_BDS_IN_PAGE ((PAGE_SIZE)/(STRUCT_SIZE(eth_tx_bd)/8)) |
292 | #define U_ETH_NUM_OF_SGES_TO_FETCH 8 | 263 | #define U_ETH_NUM_OF_SGES_TO_FETCH 8 |
293 | #define U_ETH_MAX_SGES_FOR_PACKET 3 | 264 | #define U_ETH_MAX_SGES_FOR_PACKET 3 |
294 | 265 | ||
295 | /*Tx params*/ | ||
296 | #define X_ETH_NO_VLAN 0 | ||
297 | #define X_ETH_OUTBAND_VLAN 1 | ||
298 | #define X_ETH_INBAND_VLAN 2 | ||
299 | /* Rx ring params */ | 266 | /* Rx ring params */ |
300 | #define U_ETH_LOCAL_BD_RING_SIZE 8 | 267 | #define U_ETH_LOCAL_BD_RING_SIZE 8 |
301 | #define U_ETH_LOCAL_SGE_RING_SIZE 10 | 268 | #define U_ETH_LOCAL_SGE_RING_SIZE 10 |
@@ -311,79 +278,64 @@ | |||
311 | #define U_ETH_BDS_PER_PAGE (PAGE_SIZE/(STRUCT_SIZE(eth_rx_bd)/8)) | 278 | #define U_ETH_BDS_PER_PAGE (PAGE_SIZE/(STRUCT_SIZE(eth_rx_bd)/8)) |
312 | #define U_ETH_SGES_PER_PAGE (PAGE_SIZE/(STRUCT_SIZE(eth_rx_sge)/8)) | 279 | #define U_ETH_SGES_PER_PAGE (PAGE_SIZE/(STRUCT_SIZE(eth_rx_sge)/8)) |
313 | 280 | ||
314 | #define U_ETH_BDS_PER_PAGE_MASK (U_ETH_BDS_PER_PAGE-1) | 281 | #define U_ETH_BDS_PER_PAGE_MASK (U_ETH_BDS_PER_PAGE-1) |
315 | #define U_ETH_CQE_PER_PAGE_MASK (TU_ETH_CQES_PER_PAGE-1) | 282 | #define U_ETH_CQE_PER_PAGE_MASK (TU_ETH_CQES_PER_PAGE-1) |
316 | #define U_ETH_SGES_PER_PAGE_MASK (U_ETH_SGES_PER_PAGE-1) | 283 | #define U_ETH_SGES_PER_PAGE_MASK (U_ETH_SGES_PER_PAGE-1) |
317 | 284 | ||
318 | #define U_ETH_UNDEFINED_Q 0xFF | 285 | #define U_ETH_UNDEFINED_Q 0xFF |
319 | 286 | ||
320 | /* values of command IDs in the ramrod message */ | ||
321 | #define RAMROD_CMD_ID_ETH_UNUSED 0 | ||
322 | #define RAMROD_CMD_ID_ETH_CLIENT_SETUP 1 | ||
323 | #define RAMROD_CMD_ID_ETH_UPDATE 2 | ||
324 | #define RAMROD_CMD_ID_ETH_HALT 3 | ||
325 | #define RAMROD_CMD_ID_ETH_FORWARD_SETUP 4 | ||
326 | #define RAMROD_CMD_ID_ETH_ACTIVATE 5 | ||
327 | #define RAMROD_CMD_ID_ETH_DEACTIVATE 6 | ||
328 | #define RAMROD_CMD_ID_ETH_EMPTY 7 | ||
329 | #define RAMROD_CMD_ID_ETH_TERMINATE 8 | ||
330 | |||
331 | /* command values for set mac command */ | ||
332 | #define T_ETH_MAC_COMMAND_SET 0 | ||
333 | #define T_ETH_MAC_COMMAND_INVALIDATE 1 | ||
334 | |||
335 | #define T_ETH_INDIRECTION_TABLE_SIZE 128 | 287 | #define T_ETH_INDIRECTION_TABLE_SIZE 128 |
288 | #define T_ETH_RSS_KEY 10 | ||
289 | #define ETH_NUM_OF_RSS_ENGINES_E2 72 | ||
290 | |||
291 | #define FILTER_RULES_COUNT 16 | ||
292 | #define MULTICAST_RULES_COUNT 16 | ||
293 | #define CLASSIFY_RULES_COUNT 16 | ||
336 | 294 | ||
337 | /*The CRC32 seed, that is used for the hash(reduction) multicast address */ | 295 | /*The CRC32 seed, that is used for the hash(reduction) multicast address */ |
338 | #define T_ETH_CRC32_HASH_SEED 0x00000000 | 296 | #define ETH_CRC32_HASH_SEED 0x00000000 |
297 | |||
298 | #define ETH_CRC32_HASH_BIT_SIZE (8) | ||
299 | #define ETH_CRC32_HASH_MASK EVAL((1<<ETH_CRC32_HASH_BIT_SIZE)-1) | ||
339 | 300 | ||
340 | /* Maximal L2 clients supported */ | 301 | /* Maximal L2 clients supported */ |
341 | #define ETH_MAX_RX_CLIENTS_E1 18 | 302 | #define ETH_MAX_RX_CLIENTS_E1 18 |
342 | #define ETH_MAX_RX_CLIENTS_E1H 28 | 303 | #define ETH_MAX_RX_CLIENTS_E1H 28 |
304 | #define ETH_MAX_RX_CLIENTS_E2 152 | ||
305 | |||
306 | /* Maximal statistics client Ids */ | ||
307 | #define MAX_STAT_COUNTER_ID_E1 36 | ||
308 | #define MAX_STAT_COUNTER_ID_E1H 56 | ||
309 | #define MAX_STAT_COUNTER_ID_E2 140 | ||
310 | |||
311 | #define MAX_MAC_CREDIT_E1 192 /* Per Chip */ | ||
312 | #define MAX_MAC_CREDIT_E1H 256 /* Per Chip */ | ||
313 | #define MAX_MAC_CREDIT_E2 272 /* Per Path */ | ||
314 | #define MAX_VLAN_CREDIT_E1 0 /* Per Chip */ | ||
315 | #define MAX_VLAN_CREDIT_E1H 0 /* Per Chip */ | ||
316 | #define MAX_VLAN_CREDIT_E2 272 /* Per Path */ | ||
343 | 317 | ||
344 | #define MAX_STAT_COUNTER_ID ETH_MAX_RX_CLIENTS_E1H | ||
345 | 318 | ||
346 | /* Maximal aggregation queues supported */ | 319 | /* Maximal aggregation queues supported */ |
347 | #define ETH_MAX_AGGREGATION_QUEUES_E1 32 | 320 | #define ETH_MAX_AGGREGATION_QUEUES_E1 32 |
348 | #define ETH_MAX_AGGREGATION_QUEUES_E1H 64 | 321 | #define ETH_MAX_AGGREGATION_QUEUES_E1H_E2 64 |
349 | 322 | ||
350 | /* ETH RSS modes */ | ||
351 | #define ETH_RSS_MODE_DISABLED 0 | ||
352 | #define ETH_RSS_MODE_REGULAR 1 | ||
353 | #define ETH_RSS_MODE_VLAN_PRI 2 | ||
354 | #define ETH_RSS_MODE_E1HOV_PRI 3 | ||
355 | #define ETH_RSS_MODE_IP_DSCP 4 | ||
356 | #define ETH_RSS_MODE_E2_INTEG 5 | ||
357 | 323 | ||
324 | #define ETH_NUM_OF_MCAST_BINS 256 | ||
325 | #define ETH_NUM_OF_MCAST_ENGINES_E2 72 | ||
358 | 326 | ||
359 | /* ETH vlan filtering modes */ | 327 | #define ETH_MIN_RX_CQES_WITHOUT_TPA (MAX_RAMRODS_PER_PORT + 3) |
360 | #define ETH_VLAN_FILTER_ANY_VLAN 0 /* Don't filter by vlan */ | 328 | #define ETH_MIN_RX_CQES_WITH_TPA_E1 \ |
361 | #define ETH_VLAN_FILTER_SPECIFIC_VLAN \ | 329 | (ETH_MAX_AGGREGATION_QUEUES_E1 + ETH_MIN_RX_CQES_WITHOUT_TPA) |
362 | 1 /* Only the vlan_id is allowed */ | 330 | #define ETH_MIN_RX_CQES_WITH_TPA_E1H_E2 \ |
363 | #define ETH_VLAN_FILTER_CLASSIFY \ | 331 | (ETH_MAX_AGGREGATION_QUEUES_E1H_E2 + ETH_MIN_RX_CQES_WITHOUT_TPA) |
364 | 2 /* vlan will be added to CAM for classification */ | ||
365 | 332 | ||
366 | /* Fast path CQE selection */ | 333 | #define DISABLE_STATISTIC_COUNTER_ID_VALUE 0 |
367 | #define ETH_FP_CQE_REGULAR 0 | ||
368 | #define ETH_FP_CQE_SGL 1 | ||
369 | #define ETH_FP_CQE_RAW 2 | ||
370 | 334 | ||
371 | 335 | ||
372 | /** | 336 | /** |
373 | * This file defines HSI constants common to all microcode flows | 337 | * This file defines HSI constants common to all microcode flows |
374 | */ | 338 | */ |
375 | |||
376 | /* Connection types */ | ||
377 | #define ETH_CONNECTION_TYPE 0 | ||
378 | #define TOE_CONNECTION_TYPE 1 | ||
379 | #define RDMA_CONNECTION_TYPE 2 | ||
380 | #define ISCSI_CONNECTION_TYPE 3 | ||
381 | #define FCOE_CONNECTION_TYPE 4 | ||
382 | #define RESERVED_CONNECTION_TYPE_0 5 | ||
383 | #define RESERVED_CONNECTION_TYPE_1 6 | ||
384 | #define RESERVED_CONNECTION_TYPE_2 7 | ||
385 | #define NONE_CONNECTION_TYPE 8 | ||
386 | |||
387 | 339 | ||
388 | #define PROTOCOL_STATE_BIT_OFFSET 6 | 340 | #define PROTOCOL_STATE_BIT_OFFSET 6 |
389 | 341 | ||
@@ -391,25 +343,9 @@ | |||
391 | #define TOE_STATE (TOE_CONNECTION_TYPE << PROTOCOL_STATE_BIT_OFFSET) | 343 | #define TOE_STATE (TOE_CONNECTION_TYPE << PROTOCOL_STATE_BIT_OFFSET) |
392 | #define RDMA_STATE (RDMA_CONNECTION_TYPE << PROTOCOL_STATE_BIT_OFFSET) | 344 | #define RDMA_STATE (RDMA_CONNECTION_TYPE << PROTOCOL_STATE_BIT_OFFSET) |
393 | 345 | ||
394 | /* values of command IDs in the ramrod message */ | ||
395 | #define RAMROD_CMD_ID_COMMON_FUNCTION_START 1 | ||
396 | #define RAMROD_CMD_ID_COMMON_FUNCTION_STOP 2 | ||
397 | #define RAMROD_CMD_ID_COMMON_CFC_DEL 3 | ||
398 | #define RAMROD_CMD_ID_COMMON_CFC_DEL_WB 4 | ||
399 | #define RAMROD_CMD_ID_COMMON_SET_MAC 5 | ||
400 | #define RAMROD_CMD_ID_COMMON_STAT_QUERY 6 | ||
401 | #define RAMROD_CMD_ID_COMMON_STOP_TRAFFIC 7 | ||
402 | #define RAMROD_CMD_ID_COMMON_START_TRAFFIC 8 | ||
403 | |||
404 | /* microcode fixed page page size 4K (chains and ring segments) */ | 346 | /* microcode fixed page page size 4K (chains and ring segments) */ |
405 | #define MC_PAGE_SIZE 4096 | 347 | #define MC_PAGE_SIZE 4096 |
406 | 348 | ||
407 | |||
408 | /* Host coalescing constants */ | ||
409 | #define HC_IGU_BC_MODE 0 | ||
410 | #define HC_IGU_NBC_MODE 1 | ||
411 | /* Host coalescing constants. E1 includes E1H as well */ | ||
412 | |||
413 | /* Number of indices per slow-path SB */ | 349 | /* Number of indices per slow-path SB */ |
414 | #define HC_SP_SB_MAX_INDICES 16 | 350 | #define HC_SP_SB_MAX_INDICES 16 |
415 | 351 | ||
@@ -418,30 +354,17 @@ | |||
418 | #define HC_SB_MAX_INDICES_E2 8 | 354 | #define HC_SB_MAX_INDICES_E2 8 |
419 | 355 | ||
420 | #define HC_SB_MAX_SB_E1X 32 | 356 | #define HC_SB_MAX_SB_E1X 32 |
421 | #define HC_SB_MAX_SB_E2 136 | 357 | #define HC_SB_MAX_SB_E2 136 |
422 | 358 | ||
423 | #define HC_SP_SB_ID 0xde | 359 | #define HC_SP_SB_ID 0xde |
424 | 360 | ||
425 | #define HC_REGULAR_SEGMENT 0 | ||
426 | #define HC_DEFAULT_SEGMENT 1 | ||
427 | #define HC_SB_MAX_SM 2 | 361 | #define HC_SB_MAX_SM 2 |
428 | 362 | ||
429 | #define HC_SB_MAX_DYNAMIC_INDICES 4 | 363 | #define HC_SB_MAX_DYNAMIC_INDICES 4 |
430 | #define HC_FUNCTION_DISABLED 0xff | ||
431 | /* used by the driver to get the SB offset */ | ||
432 | #define USTORM_ID 0 | ||
433 | #define CSTORM_ID 1 | ||
434 | #define XSTORM_ID 2 | ||
435 | #define TSTORM_ID 3 | ||
436 | #define ATTENTION_ID 4 | ||
437 | 364 | ||
438 | /* max number of slow path commands per port */ | 365 | /* max number of slow path commands per port */ |
439 | #define MAX_RAMRODS_PER_PORT 8 | 366 | #define MAX_RAMRODS_PER_PORT 8 |
440 | 367 | ||
441 | /* values for RX ETH CQE type field */ | ||
442 | #define RX_ETH_CQE_TYPE_ETH_FASTPATH 0 | ||
443 | #define RX_ETH_CQE_TYPE_ETH_RAMROD 1 | ||
444 | |||
445 | 368 | ||
446 | /**** DEFINES FOR TIMERS/CLOCKS RESOLUTIONS ****/ | 369 | /**** DEFINES FOR TIMERS/CLOCKS RESOLUTIONS ****/ |
447 | 370 | ||
@@ -451,7 +374,7 @@ | |||
451 | 374 | ||
452 | #define XSEMI_CLK1_RESUL_CHIP (1e-3) | 375 | #define XSEMI_CLK1_RESUL_CHIP (1e-3) |
453 | 376 | ||
454 | #define SDM_TIMER_TICK_RESUL_CHIP (4*(1e-6)) | 377 | #define SDM_TIMER_TICK_RESUL_CHIP (4 * (1e-6)) |
455 | 378 | ||
456 | /**** END DEFINES FOR TIMERS/CLOCKS RESOLUTIONS ****/ | 379 | /**** END DEFINES FOR TIMERS/CLOCKS RESOLUTIONS ****/ |
457 | 380 | ||
@@ -460,72 +383,28 @@ | |||
460 | 383 | ||
461 | #define FW_LOG_LIST_SIZE 50 | 384 | #define FW_LOG_LIST_SIZE 50 |
462 | 385 | ||
463 | #define NUM_OF_PROTOCOLS 4 | ||
464 | #define NUM_OF_SAFC_BITS 16 | 386 | #define NUM_OF_SAFC_BITS 16 |
465 | #define MAX_COS_NUMBER 4 | 387 | #define MAX_COS_NUMBER 4 |
466 | 388 | #define MAX_TRAFFIC_TYPES 8 | |
467 | #define FAIRNESS_COS_WRR_MODE 0 | ||
468 | #define FAIRNESS_COS_ETS_MODE 1 | ||
469 | |||
470 | |||
471 | /* Priority Flow Control (PFC) */ | ||
472 | #define MAX_PFC_PRIORITIES 8 | 389 | #define MAX_PFC_PRIORITIES 8 |
473 | #define MAX_PFC_TRAFFIC_TYPES 8 | ||
474 | |||
475 | /* Available Traffic Types for Link Layer Flow Control */ | ||
476 | #define LLFC_TRAFFIC_TYPE_NW 0 | ||
477 | #define LLFC_TRAFFIC_TYPE_FCOE 1 | ||
478 | #define LLFC_TRAFFIC_TYPE_ISCSI 2 | ||
479 | /***************** START OF E2 INTEGRATION \ | ||
480 | CODE***************************************/ | ||
481 | #define LLFC_TRAFFIC_TYPE_NW_COS1_E2INTEG 3 | ||
482 | /***************** END OF E2 INTEGRATION \ | ||
483 | CODE***************************************/ | ||
484 | #define LLFC_TRAFFIC_TYPE_MAX 4 | ||
485 | 390 | ||
486 | /* used by array traffic_type_to_priority[] to mark traffic type \ | 391 | /* used by array traffic_type_to_priority[] to mark traffic type \ |
487 | that is not mapped to priority*/ | 392 | that is not mapped to priority*/ |
488 | #define LLFC_TRAFFIC_TYPE_TO_PRIORITY_UNMAPPED 0xFF | 393 | #define LLFC_TRAFFIC_TYPE_TO_PRIORITY_UNMAPPED 0xFF |
489 | 394 | ||
490 | #define LLFC_MODE_NONE 0 | ||
491 | #define LLFC_MODE_PFC 1 | ||
492 | #define LLFC_MODE_SAFC 2 | ||
493 | |||
494 | #define DCB_DISABLED 0 | ||
495 | #define DCB_ENABLED 1 | ||
496 | 395 | ||
497 | #define UNKNOWN_ADDRESS 0 | 396 | #define C_ERES_PER_PAGE \ |
498 | #define UNICAST_ADDRESS 1 | 397 | (PAGE_SIZE / BITS_TO_BYTES(STRUCT_SIZE(event_ring_elem))) |
499 | #define MULTICAST_ADDRESS 2 | 398 | #define C_ERE_PER_PAGE_MASK (C_ERES_PER_PAGE - 1) |
500 | #define BROADCAST_ADDRESS 3 | ||
501 | 399 | ||
502 | #define SINGLE_FUNCTION 0 | 400 | #define STATS_QUERY_CMD_COUNT 16 |
503 | #define MULTI_FUNCTION_SD 1 | ||
504 | #define MULTI_FUNCTION_SI 2 | ||
505 | 401 | ||
506 | #define IP_V4 0 | 402 | #define NIV_LIST_TABLE_SIZE 4096 |
507 | #define IP_V6 1 | ||
508 | 403 | ||
404 | #define INVALID_VNIC_ID 0xFF | ||
509 | 405 | ||
510 | #define C_ERES_PER_PAGE \ | ||
511 | (PAGE_SIZE / BITS_TO_BYTES(STRUCT_SIZE(event_ring_elem))) | ||
512 | #define C_ERE_PER_PAGE_MASK (C_ERES_PER_PAGE - 1) | ||
513 | 406 | ||
514 | #define EVENT_RING_OPCODE_VF_PF_CHANNEL 0 | 407 | #define UNDEF_IRO 0x80000000 |
515 | #define EVENT_RING_OPCODE_FUNCTION_START 1 | ||
516 | #define EVENT_RING_OPCODE_FUNCTION_STOP 2 | ||
517 | #define EVENT_RING_OPCODE_CFC_DEL 3 | ||
518 | #define EVENT_RING_OPCODE_CFC_DEL_WB 4 | ||
519 | #define EVENT_RING_OPCODE_SET_MAC 5 | ||
520 | #define EVENT_RING_OPCODE_STAT_QUERY 6 | ||
521 | #define EVENT_RING_OPCODE_STOP_TRAFFIC 7 | ||
522 | #define EVENT_RING_OPCODE_START_TRAFFIC 8 | ||
523 | #define EVENT_RING_OPCODE_FORWARD_SETUP 9 | ||
524 | |||
525 | #define VF_PF_CHANNEL_STATE_READY 0 | ||
526 | #define VF_PF_CHANNEL_STATE_WAITING_FOR_ACK 1 | ||
527 | |||
528 | #define VF_PF_CHANNEL_STATE_MAX_NUMBER 2 | ||
529 | 408 | ||
530 | 409 | ||
531 | #endif /* BNX2X_FW_DEFS_H */ | 410 | #endif /* BNX2X_FW_DEFS_H */ |
diff --git a/drivers/net/bnx2x/bnx2x_hsi.h b/drivers/net/bnx2x/bnx2x_hsi.h index cdf19fe7c7f6..0692d75756df 100644 --- a/drivers/net/bnx2x/bnx2x_hsi.h +++ b/drivers/net/bnx2x/bnx2x_hsi.h | |||
@@ -11,7 +11,7 @@ | |||
11 | 11 | ||
12 | #include "bnx2x_fw_defs.h" | 12 | #include "bnx2x_fw_defs.h" |
13 | 13 | ||
14 | #define FW_ENCODE_32BIT_PATTERN 0x1e1e1e1e | 14 | #define FW_ENCODE_32BIT_PATTERN 0x1e1e1e1e |
15 | 15 | ||
16 | struct license_key { | 16 | struct license_key { |
17 | u32 reserved[6]; | 17 | u32 reserved[6]; |
@@ -33,201 +33,366 @@ struct license_key { | |||
33 | u32 reserved_b[4]; | 33 | u32 reserved_b[4]; |
34 | }; | 34 | }; |
35 | 35 | ||
36 | #define PORT_0 0 | 36 | |
37 | #define PORT_1 1 | 37 | #define PORT_0 0 |
38 | #define PORT_MAX 2 | 38 | #define PORT_1 1 |
39 | #define PORT_MAX 2 | ||
39 | 40 | ||
40 | /**************************************************************************** | 41 | /**************************************************************************** |
41 | * Shared HW configuration * | 42 | * Shared HW configuration * |
42 | ****************************************************************************/ | 43 | ****************************************************************************/ |
43 | struct shared_hw_cfg { /* NVRAM Offset */ | 44 | #define PIN_CFG_NA 0x00000000 |
45 | #define PIN_CFG_GPIO0_P0 0x00000001 | ||
46 | #define PIN_CFG_GPIO1_P0 0x00000002 | ||
47 | #define PIN_CFG_GPIO2_P0 0x00000003 | ||
48 | #define PIN_CFG_GPIO3_P0 0x00000004 | ||
49 | #define PIN_CFG_GPIO0_P1 0x00000005 | ||
50 | #define PIN_CFG_GPIO1_P1 0x00000006 | ||
51 | #define PIN_CFG_GPIO2_P1 0x00000007 | ||
52 | #define PIN_CFG_GPIO3_P1 0x00000008 | ||
53 | #define PIN_CFG_EPIO0 0x00000009 | ||
54 | #define PIN_CFG_EPIO1 0x0000000a | ||
55 | #define PIN_CFG_EPIO2 0x0000000b | ||
56 | #define PIN_CFG_EPIO3 0x0000000c | ||
57 | #define PIN_CFG_EPIO4 0x0000000d | ||
58 | #define PIN_CFG_EPIO5 0x0000000e | ||
59 | #define PIN_CFG_EPIO6 0x0000000f | ||
60 | #define PIN_CFG_EPIO7 0x00000010 | ||
61 | #define PIN_CFG_EPIO8 0x00000011 | ||
62 | #define PIN_CFG_EPIO9 0x00000012 | ||
63 | #define PIN_CFG_EPIO10 0x00000013 | ||
64 | #define PIN_CFG_EPIO11 0x00000014 | ||
65 | #define PIN_CFG_EPIO12 0x00000015 | ||
66 | #define PIN_CFG_EPIO13 0x00000016 | ||
67 | #define PIN_CFG_EPIO14 0x00000017 | ||
68 | #define PIN_CFG_EPIO15 0x00000018 | ||
69 | #define PIN_CFG_EPIO16 0x00000019 | ||
70 | #define PIN_CFG_EPIO17 0x0000001a | ||
71 | #define PIN_CFG_EPIO18 0x0000001b | ||
72 | #define PIN_CFG_EPIO19 0x0000001c | ||
73 | #define PIN_CFG_EPIO20 0x0000001d | ||
74 | #define PIN_CFG_EPIO21 0x0000001e | ||
75 | #define PIN_CFG_EPIO22 0x0000001f | ||
76 | #define PIN_CFG_EPIO23 0x00000020 | ||
77 | #define PIN_CFG_EPIO24 0x00000021 | ||
78 | #define PIN_CFG_EPIO25 0x00000022 | ||
79 | #define PIN_CFG_EPIO26 0x00000023 | ||
80 | #define PIN_CFG_EPIO27 0x00000024 | ||
81 | #define PIN_CFG_EPIO28 0x00000025 | ||
82 | #define PIN_CFG_EPIO29 0x00000026 | ||
83 | #define PIN_CFG_EPIO30 0x00000027 | ||
84 | #define PIN_CFG_EPIO31 0x00000028 | ||
85 | |||
86 | /* EPIO definition */ | ||
87 | #define EPIO_CFG_NA 0x00000000 | ||
88 | #define EPIO_CFG_EPIO0 0x00000001 | ||
89 | #define EPIO_CFG_EPIO1 0x00000002 | ||
90 | #define EPIO_CFG_EPIO2 0x00000003 | ||
91 | #define EPIO_CFG_EPIO3 0x00000004 | ||
92 | #define EPIO_CFG_EPIO4 0x00000005 | ||
93 | #define EPIO_CFG_EPIO5 0x00000006 | ||
94 | #define EPIO_CFG_EPIO6 0x00000007 | ||
95 | #define EPIO_CFG_EPIO7 0x00000008 | ||
96 | #define EPIO_CFG_EPIO8 0x00000009 | ||
97 | #define EPIO_CFG_EPIO9 0x0000000a | ||
98 | #define EPIO_CFG_EPIO10 0x0000000b | ||
99 | #define EPIO_CFG_EPIO11 0x0000000c | ||
100 | #define EPIO_CFG_EPIO12 0x0000000d | ||
101 | #define EPIO_CFG_EPIO13 0x0000000e | ||
102 | #define EPIO_CFG_EPIO14 0x0000000f | ||
103 | #define EPIO_CFG_EPIO15 0x00000010 | ||
104 | #define EPIO_CFG_EPIO16 0x00000011 | ||
105 | #define EPIO_CFG_EPIO17 0x00000012 | ||
106 | #define EPIO_CFG_EPIO18 0x00000013 | ||
107 | #define EPIO_CFG_EPIO19 0x00000014 | ||
108 | #define EPIO_CFG_EPIO20 0x00000015 | ||
109 | #define EPIO_CFG_EPIO21 0x00000016 | ||
110 | #define EPIO_CFG_EPIO22 0x00000017 | ||
111 | #define EPIO_CFG_EPIO23 0x00000018 | ||
112 | #define EPIO_CFG_EPIO24 0x00000019 | ||
113 | #define EPIO_CFG_EPIO25 0x0000001a | ||
114 | #define EPIO_CFG_EPIO26 0x0000001b | ||
115 | #define EPIO_CFG_EPIO27 0x0000001c | ||
116 | #define EPIO_CFG_EPIO28 0x0000001d | ||
117 | #define EPIO_CFG_EPIO29 0x0000001e | ||
118 | #define EPIO_CFG_EPIO30 0x0000001f | ||
119 | #define EPIO_CFG_EPIO31 0x00000020 | ||
120 | |||
121 | |||
122 | struct shared_hw_cfg { /* NVRAM Offset */ | ||
44 | /* Up to 16 bytes of NULL-terminated string */ | 123 | /* Up to 16 bytes of NULL-terminated string */ |
45 | u8 part_num[16]; /* 0x104 */ | 124 | u8 part_num[16]; /* 0x104 */ |
125 | |||
126 | u32 config; /* 0x114 */ | ||
127 | #define SHARED_HW_CFG_MDIO_VOLTAGE_MASK 0x00000001 | ||
128 | #define SHARED_HW_CFG_MDIO_VOLTAGE_SHIFT 0 | ||
129 | #define SHARED_HW_CFG_MDIO_VOLTAGE_1_2V 0x00000000 | ||
130 | #define SHARED_HW_CFG_MDIO_VOLTAGE_2_5V 0x00000001 | ||
131 | #define SHARED_HW_CFG_MCP_RST_ON_CORE_RST_EN 0x00000002 | ||
46 | 132 | ||
47 | u32 config; /* 0x114 */ | 133 | #define SHARED_HW_CFG_PORT_SWAP 0x00000004 |
48 | #define SHARED_HW_CFG_MDIO_VOLTAGE_MASK 0x00000001 | ||
49 | #define SHARED_HW_CFG_MDIO_VOLTAGE_SHIFT 0 | ||
50 | #define SHARED_HW_CFG_MDIO_VOLTAGE_1_2V 0x00000000 | ||
51 | #define SHARED_HW_CFG_MDIO_VOLTAGE_2_5V 0x00000001 | ||
52 | #define SHARED_HW_CFG_MCP_RST_ON_CORE_RST_EN 0x00000002 | ||
53 | 134 | ||
54 | #define SHARED_HW_CFG_PORT_SWAP 0x00000004 | 135 | #define SHARED_HW_CFG_BEACON_WOL_EN 0x00000008 |
55 | 136 | ||
56 | #define SHARED_HW_CFG_BEACON_WOL_EN 0x00000008 | 137 | #define SHARED_HW_CFG_PCIE_GEN3_DISABLED 0x00000000 |
138 | #define SHARED_HW_CFG_PCIE_GEN3_ENABLED 0x00000010 | ||
57 | 139 | ||
58 | #define SHARED_HW_CFG_MFW_SELECT_MASK 0x00000700 | 140 | #define SHARED_HW_CFG_MFW_SELECT_MASK 0x00000700 |
59 | #define SHARED_HW_CFG_MFW_SELECT_SHIFT 8 | 141 | #define SHARED_HW_CFG_MFW_SELECT_SHIFT 8 |
60 | /* Whatever MFW found in NVM | 142 | /* Whatever MFW found in NVM |
61 | (if multiple found, priority order is: NC-SI, UMP, IPMI) */ | 143 | (if multiple found, priority order is: NC-SI, UMP, IPMI) */ |
62 | #define SHARED_HW_CFG_MFW_SELECT_DEFAULT 0x00000000 | 144 | #define SHARED_HW_CFG_MFW_SELECT_DEFAULT 0x00000000 |
63 | #define SHARED_HW_CFG_MFW_SELECT_NC_SI 0x00000100 | 145 | #define SHARED_HW_CFG_MFW_SELECT_NC_SI 0x00000100 |
64 | #define SHARED_HW_CFG_MFW_SELECT_UMP 0x00000200 | 146 | #define SHARED_HW_CFG_MFW_SELECT_UMP 0x00000200 |
65 | #define SHARED_HW_CFG_MFW_SELECT_IPMI 0x00000300 | 147 | #define SHARED_HW_CFG_MFW_SELECT_IPMI 0x00000300 |
66 | /* Use SPIO4 as an arbiter between: 0-NC_SI, 1-IPMI | 148 | /* Use SPIO4 as an arbiter between: 0-NC_SI, 1-IPMI |
67 | (can only be used when an add-in board, not BMC, pulls-down SPIO4) */ | 149 | (can only be used when an add-in board, not BMC, pulls-down SPIO4) */ |
68 | #define SHARED_HW_CFG_MFW_SELECT_SPIO4_NC_SI_IPMI 0x00000400 | 150 | #define SHARED_HW_CFG_MFW_SELECT_SPIO4_NC_SI_IPMI 0x00000400 |
69 | /* Use SPIO4 as an arbiter between: 0-UMP, 1-IPMI | 151 | /* Use SPIO4 as an arbiter between: 0-UMP, 1-IPMI |
70 | (can only be used when an add-in board, not BMC, pulls-down SPIO4) */ | 152 | (can only be used when an add-in board, not BMC, pulls-down SPIO4) */ |
71 | #define SHARED_HW_CFG_MFW_SELECT_SPIO4_UMP_IPMI 0x00000500 | 153 | #define SHARED_HW_CFG_MFW_SELECT_SPIO4_UMP_IPMI 0x00000500 |
72 | /* Use SPIO4 as an arbiter between: 0-NC-SI, 1-UMP | 154 | /* Use SPIO4 as an arbiter between: 0-NC-SI, 1-UMP |
73 | (can only be used when an add-in board, not BMC, pulls-down SPIO4) */ | 155 | (can only be used when an add-in board, not BMC, pulls-down SPIO4) */ |
74 | #define SHARED_HW_CFG_MFW_SELECT_SPIO4_NC_SI_UMP 0x00000600 | 156 | #define SHARED_HW_CFG_MFW_SELECT_SPIO4_NC_SI_UMP 0x00000600 |
75 | 157 | ||
76 | #define SHARED_HW_CFG_LED_MODE_MASK 0x000f0000 | 158 | #define SHARED_HW_CFG_LED_MODE_MASK 0x000f0000 |
77 | #define SHARED_HW_CFG_LED_MODE_SHIFT 16 | 159 | #define SHARED_HW_CFG_LED_MODE_SHIFT 16 |
78 | #define SHARED_HW_CFG_LED_MAC1 0x00000000 | 160 | #define SHARED_HW_CFG_LED_MAC1 0x00000000 |
79 | #define SHARED_HW_CFG_LED_PHY1 0x00010000 | 161 | #define SHARED_HW_CFG_LED_PHY1 0x00010000 |
80 | #define SHARED_HW_CFG_LED_PHY2 0x00020000 | 162 | #define SHARED_HW_CFG_LED_PHY2 0x00020000 |
81 | #define SHARED_HW_CFG_LED_PHY3 0x00030000 | 163 | #define SHARED_HW_CFG_LED_PHY3 0x00030000 |
82 | #define SHARED_HW_CFG_LED_MAC2 0x00040000 | 164 | #define SHARED_HW_CFG_LED_MAC2 0x00040000 |
83 | #define SHARED_HW_CFG_LED_PHY4 0x00050000 | 165 | #define SHARED_HW_CFG_LED_PHY4 0x00050000 |
84 | #define SHARED_HW_CFG_LED_PHY5 0x00060000 | 166 | #define SHARED_HW_CFG_LED_PHY5 0x00060000 |
85 | #define SHARED_HW_CFG_LED_PHY6 0x00070000 | 167 | #define SHARED_HW_CFG_LED_PHY6 0x00070000 |
86 | #define SHARED_HW_CFG_LED_MAC3 0x00080000 | 168 | #define SHARED_HW_CFG_LED_MAC3 0x00080000 |
87 | #define SHARED_HW_CFG_LED_PHY7 0x00090000 | 169 | #define SHARED_HW_CFG_LED_PHY7 0x00090000 |
88 | #define SHARED_HW_CFG_LED_PHY9 0x000a0000 | 170 | #define SHARED_HW_CFG_LED_PHY9 0x000a0000 |
89 | #define SHARED_HW_CFG_LED_PHY11 0x000b0000 | 171 | #define SHARED_HW_CFG_LED_PHY11 0x000b0000 |
90 | #define SHARED_HW_CFG_LED_MAC4 0x000c0000 | 172 | #define SHARED_HW_CFG_LED_MAC4 0x000c0000 |
91 | #define SHARED_HW_CFG_LED_PHY8 0x000d0000 | 173 | #define SHARED_HW_CFG_LED_PHY8 0x000d0000 |
92 | #define SHARED_HW_CFG_LED_EXTPHY1 0x000e0000 | 174 | #define SHARED_HW_CFG_LED_EXTPHY1 0x000e0000 |
93 | 175 | ||
94 | 176 | ||
95 | #define SHARED_HW_CFG_AN_ENABLE_MASK 0x3f000000 | 177 | #define SHARED_HW_CFG_AN_ENABLE_MASK 0x3f000000 |
96 | #define SHARED_HW_CFG_AN_ENABLE_SHIFT 24 | 178 | #define SHARED_HW_CFG_AN_ENABLE_SHIFT 24 |
97 | #define SHARED_HW_CFG_AN_ENABLE_CL37 0x01000000 | 179 | #define SHARED_HW_CFG_AN_ENABLE_CL37 0x01000000 |
98 | #define SHARED_HW_CFG_AN_ENABLE_CL73 0x02000000 | 180 | #define SHARED_HW_CFG_AN_ENABLE_CL73 0x02000000 |
99 | #define SHARED_HW_CFG_AN_ENABLE_BAM 0x04000000 | 181 | #define SHARED_HW_CFG_AN_ENABLE_BAM 0x04000000 |
100 | #define SHARED_HW_CFG_AN_ENABLE_PARALLEL_DETECTION 0x08000000 | 182 | #define SHARED_HW_CFG_AN_ENABLE_PARALLEL_DETECTION 0x08000000 |
101 | #define SHARED_HW_CFG_AN_EN_SGMII_FIBER_AUTO_DETECT 0x10000000 | 183 | #define SHARED_HW_CFG_AN_EN_SGMII_FIBER_AUTO_DETECT 0x10000000 |
102 | #define SHARED_HW_CFG_AN_ENABLE_REMOTE_PHY 0x20000000 | 184 | #define SHARED_HW_CFG_AN_ENABLE_REMOTE_PHY 0x20000000 |
103 | 185 | ||
104 | u32 config2; /* 0x118 */ | 186 | #define SHARED_HW_CFG_SRIOV_MASK 0x40000000 |
187 | #define SHARED_HW_CFG_SRIOV_DISABLED 0x00000000 | ||
188 | #define SHARED_HW_CFG_SRIOV_ENABLED 0x40000000 | ||
189 | |||
190 | #define SHARED_HW_CFG_ATC_MASK 0x80000000 | ||
191 | #define SHARED_HW_CFG_ATC_DISABLED 0x00000000 | ||
192 | #define SHARED_HW_CFG_ATC_ENABLED 0x80000000 | ||
193 | |||
194 | u32 config2; /* 0x118 */ | ||
105 | /* one time auto detect grace period (in sec) */ | 195 | /* one time auto detect grace period (in sec) */ |
106 | #define SHARED_HW_CFG_GRACE_PERIOD_MASK 0x000000ff | 196 | #define SHARED_HW_CFG_GRACE_PERIOD_MASK 0x000000ff |
107 | #define SHARED_HW_CFG_GRACE_PERIOD_SHIFT 0 | 197 | #define SHARED_HW_CFG_GRACE_PERIOD_SHIFT 0 |
108 | 198 | ||
109 | #define SHARED_HW_CFG_PCIE_GEN2_ENABLED 0x00000100 | 199 | #define SHARED_HW_CFG_PCIE_GEN2_ENABLED 0x00000100 |
200 | #define SHARED_HW_CFG_PCIE_GEN2_DISABLED 0x00000000 | ||
110 | 201 | ||
111 | /* The default value for the core clock is 250MHz and it is | 202 | /* The default value for the core clock is 250MHz and it is |
112 | achieved by setting the clock change to 4 */ | 203 | achieved by setting the clock change to 4 */ |
113 | #define SHARED_HW_CFG_CLOCK_CHANGE_MASK 0x00000e00 | 204 | #define SHARED_HW_CFG_CLOCK_CHANGE_MASK 0x00000e00 |
114 | #define SHARED_HW_CFG_CLOCK_CHANGE_SHIFT 9 | 205 | #define SHARED_HW_CFG_CLOCK_CHANGE_SHIFT 9 |
115 | 206 | ||
116 | #define SHARED_HW_CFG_SMBUS_TIMING_100KHZ 0x00000000 | 207 | #define SHARED_HW_CFG_SMBUS_TIMING_MASK 0x00001000 |
117 | #define SHARED_HW_CFG_SMBUS_TIMING_400KHZ 0x00001000 | 208 | #define SHARED_HW_CFG_SMBUS_TIMING_100KHZ 0x00000000 |
209 | #define SHARED_HW_CFG_SMBUS_TIMING_400KHZ 0x00001000 | ||
118 | 210 | ||
119 | #define SHARED_HW_CFG_HIDE_PORT1 0x00002000 | 211 | #define SHARED_HW_CFG_HIDE_PORT1 0x00002000 |
120 | 212 | ||
121 | /* The fan failure mechanism is usually related to the PHY type | 213 | #define SHARED_HW_CFG_WOL_CAPABLE_MASK 0x00004000 |
122 | since the power consumption of the board is determined by the PHY. | 214 | #define SHARED_HW_CFG_WOL_CAPABLE_DISABLED 0x00000000 |
123 | Currently, fan is required for most designs with SFX7101, BCM8727 | 215 | #define SHARED_HW_CFG_WOL_CAPABLE_ENABLED 0x00004000 |
124 | and BCM8481. If a fan is not required for a board which uses one | ||
125 | of those PHYs, this field should be set to "Disabled". If a fan is | ||
126 | required for a different PHY type, this option should be set to | ||
127 | "Enabled". | ||
128 | The fan failure indication is expected on | ||
129 | SPIO5 */ | ||
130 | #define SHARED_HW_CFG_FAN_FAILURE_MASK 0x00180000 | ||
131 | #define SHARED_HW_CFG_FAN_FAILURE_SHIFT 19 | ||
132 | #define SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE 0x00000000 | ||
133 | #define SHARED_HW_CFG_FAN_FAILURE_DISABLED 0x00080000 | ||
134 | #define SHARED_HW_CFG_FAN_FAILURE_ENABLED 0x00100000 | ||
135 | |||
136 | /* Set the MDC/MDIO access for the first external phy */ | ||
137 | #define SHARED_HW_CFG_MDC_MDIO_ACCESS1_MASK 0x1C000000 | ||
138 | #define SHARED_HW_CFG_MDC_MDIO_ACCESS1_SHIFT 26 | ||
139 | #define SHARED_HW_CFG_MDC_MDIO_ACCESS1_PHY_TYPE 0x00000000 | ||
140 | #define SHARED_HW_CFG_MDC_MDIO_ACCESS1_EMAC0 0x04000000 | ||
141 | #define SHARED_HW_CFG_MDC_MDIO_ACCESS1_EMAC1 0x08000000 | ||
142 | #define SHARED_HW_CFG_MDC_MDIO_ACCESS1_BOTH 0x0c000000 | ||
143 | #define SHARED_HW_CFG_MDC_MDIO_ACCESS1_SWAPPED 0x10000000 | ||
144 | |||
145 | /* Set the MDC/MDIO access for the second external phy */ | ||
146 | #define SHARED_HW_CFG_MDC_MDIO_ACCESS2_MASK 0xE0000000 | ||
147 | #define SHARED_HW_CFG_MDC_MDIO_ACCESS2_SHIFT 29 | ||
148 | #define SHARED_HW_CFG_MDC_MDIO_ACCESS2_PHY_TYPE 0x00000000 | ||
149 | #define SHARED_HW_CFG_MDC_MDIO_ACCESS2_EMAC0 0x20000000 | ||
150 | #define SHARED_HW_CFG_MDC_MDIO_ACCESS2_EMAC1 0x40000000 | ||
151 | #define SHARED_HW_CFG_MDC_MDIO_ACCESS2_BOTH 0x60000000 | ||
152 | #define SHARED_HW_CFG_MDC_MDIO_ACCESS2_SWAPPED 0x80000000 | ||
153 | u32 power_dissipated; /* 0x11c */ | ||
154 | #define SHARED_HW_CFG_POWER_DIS_CMN_MASK 0xff000000 | ||
155 | #define SHARED_HW_CFG_POWER_DIS_CMN_SHIFT 24 | ||
156 | |||
157 | #define SHARED_HW_CFG_POWER_MGNT_SCALE_MASK 0x00ff0000 | ||
158 | #define SHARED_HW_CFG_POWER_MGNT_SCALE_SHIFT 16 | ||
159 | #define SHARED_HW_CFG_POWER_MGNT_UNKNOWN_SCALE 0x00000000 | ||
160 | #define SHARED_HW_CFG_POWER_MGNT_DOT_1_WATT 0x00010000 | ||
161 | #define SHARED_HW_CFG_POWER_MGNT_DOT_01_WATT 0x00020000 | ||
162 | #define SHARED_HW_CFG_POWER_MGNT_DOT_001_WATT 0x00030000 | ||
163 | |||
164 | u32 ump_nc_si_config; /* 0x120 */ | ||
165 | #define SHARED_HW_CFG_UMP_NC_SI_MII_MODE_MASK 0x00000003 | ||
166 | #define SHARED_HW_CFG_UMP_NC_SI_MII_MODE_SHIFT 0 | ||
167 | #define SHARED_HW_CFG_UMP_NC_SI_MII_MODE_MAC 0x00000000 | ||
168 | #define SHARED_HW_CFG_UMP_NC_SI_MII_MODE_PHY 0x00000001 | ||
169 | #define SHARED_HW_CFG_UMP_NC_SI_MII_MODE_MII 0x00000000 | ||
170 | #define SHARED_HW_CFG_UMP_NC_SI_MII_MODE_RMII 0x00000002 | ||
171 | |||
172 | #define SHARED_HW_CFG_UMP_NC_SI_NUM_DEVS_MASK 0x00000f00 | ||
173 | #define SHARED_HW_CFG_UMP_NC_SI_NUM_DEVS_SHIFT 8 | ||
174 | |||
175 | #define SHARED_HW_CFG_UMP_NC_SI_EXT_PHY_TYPE_MASK 0x00ff0000 | ||
176 | #define SHARED_HW_CFG_UMP_NC_SI_EXT_PHY_TYPE_SHIFT 16 | ||
177 | #define SHARED_HW_CFG_UMP_NC_SI_EXT_PHY_TYPE_NONE 0x00000000 | ||
178 | #define SHARED_HW_CFG_UMP_NC_SI_EXT_PHY_TYPE_BCM5221 0x00010000 | ||
179 | |||
180 | u32 board; /* 0x124 */ | ||
181 | #define SHARED_HW_CFG_BOARD_REV_MASK 0x00FF0000 | ||
182 | #define SHARED_HW_CFG_BOARD_REV_SHIFT 16 | ||
183 | |||
184 | #define SHARED_HW_CFG_BOARD_MAJOR_VER_MASK 0x0F000000 | ||
185 | #define SHARED_HW_CFG_BOARD_MAJOR_VER_SHIFT 24 | ||
186 | |||
187 | #define SHARED_HW_CFG_BOARD_MINOR_VER_MASK 0xF0000000 | ||
188 | #define SHARED_HW_CFG_BOARD_MINOR_VER_SHIFT 28 | ||
189 | |||
190 | u32 reserved; /* 0x128 */ | ||
191 | 216 | ||
217 | /* Output low when PERST is asserted */ | ||
218 | #define SHARED_HW_CFG_SPIO4_FOLLOW_PERST_MASK 0x00008000 | ||
219 | #define SHARED_HW_CFG_SPIO4_FOLLOW_PERST_DISABLED 0x00000000 | ||
220 | #define SHARED_HW_CFG_SPIO4_FOLLOW_PERST_ENABLED 0x00008000 | ||
221 | |||
222 | #define SHARED_HW_CFG_PCIE_GEN2_PREEMPHASIS_MASK 0x00070000 | ||
223 | #define SHARED_HW_CFG_PCIE_GEN2_PREEMPHASIS_SHIFT 16 | ||
224 | #define SHARED_HW_CFG_PCIE_GEN2_PREEMPHASIS_HW 0x00000000 | ||
225 | #define SHARED_HW_CFG_PCIE_GEN2_PREEMPHASIS_0DB 0x00010000 | ||
226 | #define SHARED_HW_CFG_PCIE_GEN2_PREEMPHASIS_3_5DB 0x00020000 | ||
227 | #define SHARED_HW_CFG_PCIE_GEN2_PREEMPHASIS_6_0DB 0x00030000 | ||
228 | |||
229 | /* The fan failure mechanism is usually related to the PHY type | ||
230 | since the power consumption of the board is determined by the PHY. | ||
231 | Currently, fan is required for most designs with SFX7101, BCM8727 | ||
232 | and BCM8481. If a fan is not required for a board which uses one | ||
233 | of those PHYs, this field should be set to "Disabled". If a fan is | ||
234 | required for a different PHY type, this option should be set to | ||
235 | "Enabled". The fan failure indication is expected on SPIO5 */ | ||
236 | #define SHARED_HW_CFG_FAN_FAILURE_MASK 0x00180000 | ||
237 | #define SHARED_HW_CFG_FAN_FAILURE_SHIFT 19 | ||
238 | #define SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE 0x00000000 | ||
239 | #define SHARED_HW_CFG_FAN_FAILURE_DISABLED 0x00080000 | ||
240 | #define SHARED_HW_CFG_FAN_FAILURE_ENABLED 0x00100000 | ||
241 | |||
242 | /* ASPM Power Management support */ | ||
243 | #define SHARED_HW_CFG_ASPM_SUPPORT_MASK 0x00600000 | ||
244 | #define SHARED_HW_CFG_ASPM_SUPPORT_SHIFT 21 | ||
245 | #define SHARED_HW_CFG_ASPM_SUPPORT_L0S_L1_ENABLED 0x00000000 | ||
246 | #define SHARED_HW_CFG_ASPM_SUPPORT_L0S_DISABLED 0x00200000 | ||
247 | #define SHARED_HW_CFG_ASPM_SUPPORT_L1_DISABLED 0x00400000 | ||
248 | #define SHARED_HW_CFG_ASPM_SUPPORT_L0S_L1_DISABLED 0x00600000 | ||
249 | |||
250 | /* The value of PM_TL_IGNORE_REQS (bit0) in PCI register | ||
251 | tl_control_0 (register 0x2800) */ | ||
252 | #define SHARED_HW_CFG_PREVENT_L1_ENTRY_MASK 0x00800000 | ||
253 | #define SHARED_HW_CFG_PREVENT_L1_ENTRY_DISABLED 0x00000000 | ||
254 | #define SHARED_HW_CFG_PREVENT_L1_ENTRY_ENABLED 0x00800000 | ||
255 | |||
256 | #define SHARED_HW_CFG_PORT_MODE_MASK 0x01000000 | ||
257 | #define SHARED_HW_CFG_PORT_MODE_2 0x00000000 | ||
258 | #define SHARED_HW_CFG_PORT_MODE_4 0x01000000 | ||
259 | |||
260 | #define SHARED_HW_CFG_PATH_SWAP_MASK 0x02000000 | ||
261 | #define SHARED_HW_CFG_PATH_SWAP_DISABLED 0x00000000 | ||
262 | #define SHARED_HW_CFG_PATH_SWAP_ENABLED 0x02000000 | ||
263 | |||
264 | /* Set the MDC/MDIO access for the first external phy */ | ||
265 | #define SHARED_HW_CFG_MDC_MDIO_ACCESS1_MASK 0x1C000000 | ||
266 | #define SHARED_HW_CFG_MDC_MDIO_ACCESS1_SHIFT 26 | ||
267 | #define SHARED_HW_CFG_MDC_MDIO_ACCESS1_PHY_TYPE 0x00000000 | ||
268 | #define SHARED_HW_CFG_MDC_MDIO_ACCESS1_EMAC0 0x04000000 | ||
269 | #define SHARED_HW_CFG_MDC_MDIO_ACCESS1_EMAC1 0x08000000 | ||
270 | #define SHARED_HW_CFG_MDC_MDIO_ACCESS1_BOTH 0x0c000000 | ||
271 | #define SHARED_HW_CFG_MDC_MDIO_ACCESS1_SWAPPED 0x10000000 | ||
272 | |||
273 | /* Set the MDC/MDIO access for the second external phy */ | ||
274 | #define SHARED_HW_CFG_MDC_MDIO_ACCESS2_MASK 0xE0000000 | ||
275 | #define SHARED_HW_CFG_MDC_MDIO_ACCESS2_SHIFT 29 | ||
276 | #define SHARED_HW_CFG_MDC_MDIO_ACCESS2_PHY_TYPE 0x00000000 | ||
277 | #define SHARED_HW_CFG_MDC_MDIO_ACCESS2_EMAC0 0x20000000 | ||
278 | #define SHARED_HW_CFG_MDC_MDIO_ACCESS2_EMAC1 0x40000000 | ||
279 | #define SHARED_HW_CFG_MDC_MDIO_ACCESS2_BOTH 0x60000000 | ||
280 | #define SHARED_HW_CFG_MDC_MDIO_ACCESS2_SWAPPED 0x80000000 | ||
281 | |||
282 | |||
283 | u32 power_dissipated; /* 0x11c */ | ||
284 | #define SHARED_HW_CFG_POWER_MGNT_SCALE_MASK 0x00ff0000 | ||
285 | #define SHARED_HW_CFG_POWER_MGNT_SCALE_SHIFT 16 | ||
286 | #define SHARED_HW_CFG_POWER_MGNT_UNKNOWN_SCALE 0x00000000 | ||
287 | #define SHARED_HW_CFG_POWER_MGNT_DOT_1_WATT 0x00010000 | ||
288 | #define SHARED_HW_CFG_POWER_MGNT_DOT_01_WATT 0x00020000 | ||
289 | #define SHARED_HW_CFG_POWER_MGNT_DOT_001_WATT 0x00030000 | ||
290 | |||
291 | #define SHARED_HW_CFG_POWER_DIS_CMN_MASK 0xff000000 | ||
292 | #define SHARED_HW_CFG_POWER_DIS_CMN_SHIFT 24 | ||
293 | |||
294 | u32 ump_nc_si_config; /* 0x120 */ | ||
295 | #define SHARED_HW_CFG_UMP_NC_SI_MII_MODE_MASK 0x00000003 | ||
296 | #define SHARED_HW_CFG_UMP_NC_SI_MII_MODE_SHIFT 0 | ||
297 | #define SHARED_HW_CFG_UMP_NC_SI_MII_MODE_MAC 0x00000000 | ||
298 | #define SHARED_HW_CFG_UMP_NC_SI_MII_MODE_PHY 0x00000001 | ||
299 | #define SHARED_HW_CFG_UMP_NC_SI_MII_MODE_MII 0x00000000 | ||
300 | #define SHARED_HW_CFG_UMP_NC_SI_MII_MODE_RMII 0x00000002 | ||
301 | |||
302 | #define SHARED_HW_CFG_UMP_NC_SI_NUM_DEVS_MASK 0x00000f00 | ||
303 | #define SHARED_HW_CFG_UMP_NC_SI_NUM_DEVS_SHIFT 8 | ||
304 | |||
305 | #define SHARED_HW_CFG_UMP_NC_SI_EXT_PHY_TYPE_MASK 0x00ff0000 | ||
306 | #define SHARED_HW_CFG_UMP_NC_SI_EXT_PHY_TYPE_SHIFT 16 | ||
307 | #define SHARED_HW_CFG_UMP_NC_SI_EXT_PHY_TYPE_NONE 0x00000000 | ||
308 | #define SHARED_HW_CFG_UMP_NC_SI_EXT_PHY_TYPE_BCM5221 0x00010000 | ||
309 | |||
310 | u32 board; /* 0x124 */ | ||
311 | #define SHARED_HW_CFG_E3_I2C_MUX0_MASK 0x0000003F | ||
312 | #define SHARED_HW_CFG_E3_I2C_MUX0_SHIFT 0 | ||
313 | #define SHARED_HW_CFG_E3_I2C_MUX1_MASK 0x00000FC0 | ||
314 | #define SHARED_HW_CFG_E3_I2C_MUX1_SHIFT 6 | ||
315 | /* Use the PIN_CFG_XXX defines on top */ | ||
316 | #define SHARED_HW_CFG_BOARD_REV_MASK 0x00ff0000 | ||
317 | #define SHARED_HW_CFG_BOARD_REV_SHIFT 16 | ||
318 | |||
319 | #define SHARED_HW_CFG_BOARD_MAJOR_VER_MASK 0x0f000000 | ||
320 | #define SHARED_HW_CFG_BOARD_MAJOR_VER_SHIFT 24 | ||
321 | |||
322 | #define SHARED_HW_CFG_BOARD_MINOR_VER_MASK 0xf0000000 | ||
323 | #define SHARED_HW_CFG_BOARD_MINOR_VER_SHIFT 28 | ||
324 | |||
325 | u32 wc_lane_config; /* 0x128 */ | ||
326 | #define SHARED_HW_CFG_LANE_SWAP_CFG_MASK 0x0000FFFF | ||
327 | #define SHARED_HW_CFG_LANE_SWAP_CFG_SHIFT 0 | ||
328 | #define SHARED_HW_CFG_LANE_SWAP_CFG_32103210 0x00001b1b | ||
329 | #define SHARED_HW_CFG_LANE_SWAP_CFG_32100123 0x00001be4 | ||
330 | #define SHARED_HW_CFG_LANE_SWAP_CFG_01233210 0x0000e41b | ||
331 | #define SHARED_HW_CFG_LANE_SWAP_CFG_01230123 0x0000e4e4 | ||
332 | #define SHARED_HW_CFG_LANE_SWAP_CFG_TX_MASK 0x000000FF | ||
333 | #define SHARED_HW_CFG_LANE_SWAP_CFG_TX_SHIFT 0 | ||
334 | #define SHARED_HW_CFG_LANE_SWAP_CFG_RX_MASK 0x0000FF00 | ||
335 | #define SHARED_HW_CFG_LANE_SWAP_CFG_RX_SHIFT 8 | ||
336 | |||
337 | /* TX lane Polarity swap */ | ||
338 | #define SHARED_HW_CFG_TX_LANE0_POL_FLIP_ENABLED 0x00010000 | ||
339 | #define SHARED_HW_CFG_TX_LANE1_POL_FLIP_ENABLED 0x00020000 | ||
340 | #define SHARED_HW_CFG_TX_LANE2_POL_FLIP_ENABLED 0x00040000 | ||
341 | #define SHARED_HW_CFG_TX_LANE3_POL_FLIP_ENABLED 0x00080000 | ||
342 | /* TX lane Polarity swap */ | ||
343 | #define SHARED_HW_CFG_RX_LANE0_POL_FLIP_ENABLED 0x00100000 | ||
344 | #define SHARED_HW_CFG_RX_LANE1_POL_FLIP_ENABLED 0x00200000 | ||
345 | #define SHARED_HW_CFG_RX_LANE2_POL_FLIP_ENABLED 0x00400000 | ||
346 | #define SHARED_HW_CFG_RX_LANE3_POL_FLIP_ENABLED 0x00800000 | ||
347 | |||
348 | /* Selects the port layout of the board */ | ||
349 | #define SHARED_HW_CFG_E3_PORT_LAYOUT_MASK 0x0F000000 | ||
350 | #define SHARED_HW_CFG_E3_PORT_LAYOUT_SHIFT 24 | ||
351 | #define SHARED_HW_CFG_E3_PORT_LAYOUT_2P_01 0x00000000 | ||
352 | #define SHARED_HW_CFG_E3_PORT_LAYOUT_2P_10 0x01000000 | ||
353 | #define SHARED_HW_CFG_E3_PORT_LAYOUT_4P_0123 0x02000000 | ||
354 | #define SHARED_HW_CFG_E3_PORT_LAYOUT_4P_1032 0x03000000 | ||
355 | #define SHARED_HW_CFG_E3_PORT_LAYOUT_4P_2301 0x04000000 | ||
356 | #define SHARED_HW_CFG_E3_PORT_LAYOUT_4P_3210 0x05000000 | ||
192 | }; | 357 | }; |
193 | 358 | ||
194 | 359 | ||
195 | /**************************************************************************** | 360 | /**************************************************************************** |
196 | * Port HW configuration * | 361 | * Port HW configuration * |
197 | ****************************************************************************/ | 362 | ****************************************************************************/ |
198 | struct port_hw_cfg { /* port 0: 0x12c port 1: 0x2bc */ | 363 | struct port_hw_cfg { /* port 0: 0x12c port 1: 0x2bc */ |
199 | 364 | ||
200 | u32 pci_id; | 365 | u32 pci_id; |
201 | #define PORT_HW_CFG_PCI_VENDOR_ID_MASK 0xffff0000 | 366 | #define PORT_HW_CFG_PCI_VENDOR_ID_MASK 0xffff0000 |
202 | #define PORT_HW_CFG_PCI_DEVICE_ID_MASK 0x0000ffff | 367 | #define PORT_HW_CFG_PCI_DEVICE_ID_MASK 0x0000ffff |
203 | 368 | ||
204 | u32 pci_sub_id; | 369 | u32 pci_sub_id; |
205 | #define PORT_HW_CFG_PCI_SUBSYS_DEVICE_ID_MASK 0xffff0000 | 370 | #define PORT_HW_CFG_PCI_SUBSYS_DEVICE_ID_MASK 0xffff0000 |
206 | #define PORT_HW_CFG_PCI_SUBSYS_VENDOR_ID_MASK 0x0000ffff | 371 | #define PORT_HW_CFG_PCI_SUBSYS_VENDOR_ID_MASK 0x0000ffff |
207 | 372 | ||
208 | u32 power_dissipated; | 373 | u32 power_dissipated; |
209 | #define PORT_HW_CFG_POWER_DIS_D3_MASK 0xff000000 | 374 | #define PORT_HW_CFG_POWER_DIS_D0_MASK 0x000000ff |
210 | #define PORT_HW_CFG_POWER_DIS_D3_SHIFT 24 | 375 | #define PORT_HW_CFG_POWER_DIS_D0_SHIFT 0 |
211 | #define PORT_HW_CFG_POWER_DIS_D2_MASK 0x00ff0000 | 376 | #define PORT_HW_CFG_POWER_DIS_D1_MASK 0x0000ff00 |
212 | #define PORT_HW_CFG_POWER_DIS_D2_SHIFT 16 | 377 | #define PORT_HW_CFG_POWER_DIS_D1_SHIFT 8 |
213 | #define PORT_HW_CFG_POWER_DIS_D1_MASK 0x0000ff00 | 378 | #define PORT_HW_CFG_POWER_DIS_D2_MASK 0x00ff0000 |
214 | #define PORT_HW_CFG_POWER_DIS_D1_SHIFT 8 | 379 | #define PORT_HW_CFG_POWER_DIS_D2_SHIFT 16 |
215 | #define PORT_HW_CFG_POWER_DIS_D0_MASK 0x000000ff | 380 | #define PORT_HW_CFG_POWER_DIS_D3_MASK 0xff000000 |
216 | #define PORT_HW_CFG_POWER_DIS_D0_SHIFT 0 | 381 | #define PORT_HW_CFG_POWER_DIS_D3_SHIFT 24 |
217 | 382 | ||
218 | u32 power_consumed; | 383 | u32 power_consumed; |
219 | #define PORT_HW_CFG_POWER_CONS_D3_MASK 0xff000000 | 384 | #define PORT_HW_CFG_POWER_CONS_D0_MASK 0x000000ff |
220 | #define PORT_HW_CFG_POWER_CONS_D3_SHIFT 24 | 385 | #define PORT_HW_CFG_POWER_CONS_D0_SHIFT 0 |
221 | #define PORT_HW_CFG_POWER_CONS_D2_MASK 0x00ff0000 | 386 | #define PORT_HW_CFG_POWER_CONS_D1_MASK 0x0000ff00 |
222 | #define PORT_HW_CFG_POWER_CONS_D2_SHIFT 16 | 387 | #define PORT_HW_CFG_POWER_CONS_D1_SHIFT 8 |
223 | #define PORT_HW_CFG_POWER_CONS_D1_MASK 0x0000ff00 | 388 | #define PORT_HW_CFG_POWER_CONS_D2_MASK 0x00ff0000 |
224 | #define PORT_HW_CFG_POWER_CONS_D1_SHIFT 8 | 389 | #define PORT_HW_CFG_POWER_CONS_D2_SHIFT 16 |
225 | #define PORT_HW_CFG_POWER_CONS_D0_MASK 0x000000ff | 390 | #define PORT_HW_CFG_POWER_CONS_D3_MASK 0xff000000 |
226 | #define PORT_HW_CFG_POWER_CONS_D0_SHIFT 0 | 391 | #define PORT_HW_CFG_POWER_CONS_D3_SHIFT 24 |
227 | 392 | ||
228 | u32 mac_upper; | 393 | u32 mac_upper; |
229 | #define PORT_HW_CFG_UPPERMAC_MASK 0x0000ffff | 394 | #define PORT_HW_CFG_UPPERMAC_MASK 0x0000ffff |
230 | #define PORT_HW_CFG_UPPERMAC_SHIFT 0 | 395 | #define PORT_HW_CFG_UPPERMAC_SHIFT 0 |
231 | u32 mac_lower; | 396 | u32 mac_lower; |
232 | 397 | ||
233 | u32 iscsi_mac_upper; /* Upper 16 bits are always zeroes */ | 398 | u32 iscsi_mac_upper; /* Upper 16 bits are always zeroes */ |
@@ -237,642 +402,807 @@ struct port_hw_cfg { /* port 0: 0x12c port 1: 0x2bc */ | |||
237 | u32 rdma_mac_lower; | 402 | u32 rdma_mac_lower; |
238 | 403 | ||
239 | u32 serdes_config; | 404 | u32 serdes_config; |
240 | #define PORT_HW_CFG_SERDES_TX_DRV_PRE_EMPHASIS_MASK 0x0000FFFF | 405 | #define PORT_HW_CFG_SERDES_TX_DRV_PRE_EMPHASIS_MASK 0x0000ffff |
241 | #define PORT_HW_CFG_SERDES_TX_DRV_PRE_EMPHASIS_SHIFT 0 | 406 | #define PORT_HW_CFG_SERDES_TX_DRV_PRE_EMPHASIS_SHIFT 0 |
242 | 407 | ||
243 | #define PORT_HW_CFG_SERDES_RX_DRV_EQUALIZER_MASK 0xFFFF0000 | 408 | #define PORT_HW_CFG_SERDES_RX_DRV_EQUALIZER_MASK 0xffff0000 |
244 | #define PORT_HW_CFG_SERDES_RX_DRV_EQUALIZER_SHIFT 16 | 409 | #define PORT_HW_CFG_SERDES_RX_DRV_EQUALIZER_SHIFT 16 |
245 | 410 | ||
246 | 411 | ||
247 | u32 Reserved0[3]; /* 0x158 */ | 412 | /* Default values: 2P-64, 4P-32 */ |
248 | /* Controls the TX laser of the SFP+ module */ | 413 | u32 pf_config; /* 0x158 */ |
249 | u32 sfp_ctrl; /* 0x164 */ | 414 | #define PORT_HW_CFG_PF_NUM_VF_MASK 0x0000007F |
250 | #define PORT_HW_CFG_TX_LASER_MASK 0x000000FF | 415 | #define PORT_HW_CFG_PF_NUM_VF_SHIFT 0 |
251 | #define PORT_HW_CFG_TX_LASER_SHIFT 0 | 416 | |
252 | #define PORT_HW_CFG_TX_LASER_MDIO 0x00000000 | 417 | /* Default values: 17 */ |
253 | #define PORT_HW_CFG_TX_LASER_GPIO0 0x00000001 | 418 | #define PORT_HW_CFG_PF_NUM_MSIX_VECTORS_MASK 0x00007F00 |
254 | #define PORT_HW_CFG_TX_LASER_GPIO1 0x00000002 | 419 | #define PORT_HW_CFG_PF_NUM_MSIX_VECTORS_SHIFT 8 |
255 | #define PORT_HW_CFG_TX_LASER_GPIO2 0x00000003 | 420 | |
256 | #define PORT_HW_CFG_TX_LASER_GPIO3 0x00000004 | 421 | #define PORT_HW_CFG_ENABLE_FLR_MASK 0x00010000 |
257 | 422 | #define PORT_HW_CFG_FLR_ENABLED 0x00010000 | |
258 | /* Controls the fault module LED of the SFP+ */ | 423 | |
259 | #define PORT_HW_CFG_FAULT_MODULE_LED_MASK 0x0000FF00 | 424 | u32 vf_config; /* 0x15C */ |
260 | #define PORT_HW_CFG_FAULT_MODULE_LED_SHIFT 8 | 425 | #define PORT_HW_CFG_VF_NUM_MSIX_VECTORS_MASK 0x0000007F |
261 | #define PORT_HW_CFG_FAULT_MODULE_LED_GPIO0 0x00000000 | 426 | #define PORT_HW_CFG_VF_NUM_MSIX_VECTORS_SHIFT 0 |
262 | #define PORT_HW_CFG_FAULT_MODULE_LED_GPIO1 0x00000100 | 427 | |
263 | #define PORT_HW_CFG_FAULT_MODULE_LED_GPIO2 0x00000200 | 428 | #define PORT_HW_CFG_VF_PCI_DEVICE_ID_MASK 0xFFFF0000 |
264 | #define PORT_HW_CFG_FAULT_MODULE_LED_GPIO3 0x00000300 | 429 | #define PORT_HW_CFG_VF_PCI_DEVICE_ID_SHIFT 16 |
265 | #define PORT_HW_CFG_FAULT_MODULE_LED_DISABLED 0x00000400 | 430 | |
266 | u32 Reserved01[12]; /* 0x158 */ | 431 | u32 mf_pci_id; /* 0x160 */ |
267 | /* for external PHY, or forced mode or during AN */ | 432 | #define PORT_HW_CFG_MF_PCI_DEVICE_ID_MASK 0x0000FFFF |
268 | u16 xgxs_config_rx[4]; /* 0x198 */ | 433 | #define PORT_HW_CFG_MF_PCI_DEVICE_ID_SHIFT 0 |
269 | 434 | ||
270 | u16 xgxs_config_tx[4]; /* 0x1A0 */ | 435 | /* Controls the TX laser of the SFP+ module */ |
271 | 436 | u32 sfp_ctrl; /* 0x164 */ | |
272 | u32 Reserved1[56]; /* 0x1A8 */ | 437 | #define PORT_HW_CFG_TX_LASER_MASK 0x000000FF |
273 | u32 default_cfg; /* 0x288 */ | 438 | #define PORT_HW_CFG_TX_LASER_SHIFT 0 |
274 | #define PORT_HW_CFG_GPIO0_CONFIG_MASK 0x00000003 | 439 | #define PORT_HW_CFG_TX_LASER_MDIO 0x00000000 |
275 | #define PORT_HW_CFG_GPIO0_CONFIG_SHIFT 0 | 440 | #define PORT_HW_CFG_TX_LASER_GPIO0 0x00000001 |
276 | #define PORT_HW_CFG_GPIO0_CONFIG_NA 0x00000000 | 441 | #define PORT_HW_CFG_TX_LASER_GPIO1 0x00000002 |
277 | #define PORT_HW_CFG_GPIO0_CONFIG_LOW 0x00000001 | 442 | #define PORT_HW_CFG_TX_LASER_GPIO2 0x00000003 |
278 | #define PORT_HW_CFG_GPIO0_CONFIG_HIGH 0x00000002 | 443 | #define PORT_HW_CFG_TX_LASER_GPIO3 0x00000004 |
279 | #define PORT_HW_CFG_GPIO0_CONFIG_INPUT 0x00000003 | 444 | |
280 | 445 | /* Controls the fault module LED of the SFP+ */ | |
281 | #define PORT_HW_CFG_GPIO1_CONFIG_MASK 0x0000000C | 446 | #define PORT_HW_CFG_FAULT_MODULE_LED_MASK 0x0000FF00 |
282 | #define PORT_HW_CFG_GPIO1_CONFIG_SHIFT 2 | 447 | #define PORT_HW_CFG_FAULT_MODULE_LED_SHIFT 8 |
283 | #define PORT_HW_CFG_GPIO1_CONFIG_NA 0x00000000 | 448 | #define PORT_HW_CFG_FAULT_MODULE_LED_GPIO0 0x00000000 |
284 | #define PORT_HW_CFG_GPIO1_CONFIG_LOW 0x00000004 | 449 | #define PORT_HW_CFG_FAULT_MODULE_LED_GPIO1 0x00000100 |
285 | #define PORT_HW_CFG_GPIO1_CONFIG_HIGH 0x00000008 | 450 | #define PORT_HW_CFG_FAULT_MODULE_LED_GPIO2 0x00000200 |
286 | #define PORT_HW_CFG_GPIO1_CONFIG_INPUT 0x0000000c | 451 | #define PORT_HW_CFG_FAULT_MODULE_LED_GPIO3 0x00000300 |
287 | 452 | #define PORT_HW_CFG_FAULT_MODULE_LED_DISABLED 0x00000400 | |
288 | #define PORT_HW_CFG_GPIO2_CONFIG_MASK 0x00000030 | 453 | |
289 | #define PORT_HW_CFG_GPIO2_CONFIG_SHIFT 4 | 454 | /* The output pin TX_DIS that controls the TX laser of the SFP+ |
290 | #define PORT_HW_CFG_GPIO2_CONFIG_NA 0x00000000 | 455 | module. Use the PIN_CFG_XXX defines on top */ |
291 | #define PORT_HW_CFG_GPIO2_CONFIG_LOW 0x00000010 | 456 | u32 e3_sfp_ctrl; /* 0x168 */ |
292 | #define PORT_HW_CFG_GPIO2_CONFIG_HIGH 0x00000020 | 457 | #define PORT_HW_CFG_E3_TX_LASER_MASK 0x000000FF |
293 | #define PORT_HW_CFG_GPIO2_CONFIG_INPUT 0x00000030 | 458 | #define PORT_HW_CFG_E3_TX_LASER_SHIFT 0 |
294 | 459 | ||
295 | #define PORT_HW_CFG_GPIO3_CONFIG_MASK 0x000000C0 | 460 | /* The output pin for SFPP_TYPE which turns on the Fault module LED */ |
296 | #define PORT_HW_CFG_GPIO3_CONFIG_SHIFT 6 | 461 | #define PORT_HW_CFG_E3_FAULT_MDL_LED_MASK 0x0000FF00 |
297 | #define PORT_HW_CFG_GPIO3_CONFIG_NA 0x00000000 | 462 | #define PORT_HW_CFG_E3_FAULT_MDL_LED_SHIFT 8 |
298 | #define PORT_HW_CFG_GPIO3_CONFIG_LOW 0x00000040 | 463 | |
299 | #define PORT_HW_CFG_GPIO3_CONFIG_HIGH 0x00000080 | 464 | /* The input pin MOD_ABS that indicates whether SFP+ module is |
300 | #define PORT_HW_CFG_GPIO3_CONFIG_INPUT 0x000000c0 | 465 | present or not. Use the PIN_CFG_XXX defines on top */ |
466 | #define PORT_HW_CFG_E3_MOD_ABS_MASK 0x00FF0000 | ||
467 | #define PORT_HW_CFG_E3_MOD_ABS_SHIFT 16 | ||
468 | |||
469 | /* The output pin PWRDIS_SFP_X which disable the power of the SFP+ | ||
470 | module. Use the PIN_CFG_XXX defines on top */ | ||
471 | #define PORT_HW_CFG_E3_PWR_DIS_MASK 0xFF000000 | ||
472 | #define PORT_HW_CFG_E3_PWR_DIS_SHIFT 24 | ||
301 | 473 | ||
302 | /* | 474 | /* |
303 | * When KR link is required to be set to force which is not | 475 | * The input pin which signals module transmit fault. Use the |
304 | * KR-compliant, this parameter determine what is the trigger for it. | 476 | * PIN_CFG_XXX defines on top |
305 | * When GPIO is selected, low input will force the speed. Currently | ||
306 | * default speed is 1G. In the future, it may be widen to select the | ||
307 | * forced speed in with another parameter. Note when force-1G is | ||
308 | * enabled, it override option 56: Link Speed option. | ||
309 | */ | 477 | */ |
310 | #define PORT_HW_CFG_FORCE_KR_ENABLER_MASK 0x00000F00 | 478 | u32 e3_cmn_pin_cfg; /* 0x16C */ |
311 | #define PORT_HW_CFG_FORCE_KR_ENABLER_SHIFT 8 | 479 | #define PORT_HW_CFG_E3_TX_FAULT_MASK 0x000000FF |
312 | #define PORT_HW_CFG_FORCE_KR_ENABLER_NOT_FORCED 0x00000000 | 480 | #define PORT_HW_CFG_E3_TX_FAULT_SHIFT 0 |
313 | #define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO0_P0 0x00000100 | 481 | |
314 | #define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO1_P0 0x00000200 | 482 | /* The output pin which reset the PHY. Use the PIN_CFG_XXX defines on |
315 | #define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO2_P0 0x00000300 | 483 | top */ |
316 | #define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO3_P0 0x00000400 | 484 | #define PORT_HW_CFG_E3_PHY_RESET_MASK 0x0000FF00 |
317 | #define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO0_P1 0x00000500 | 485 | #define PORT_HW_CFG_E3_PHY_RESET_SHIFT 8 |
318 | #define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO1_P1 0x00000600 | 486 | |
319 | #define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO2_P1 0x00000700 | 487 | /* |
320 | #define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO3_P1 0x00000800 | 488 | * The output pin which powers down the PHY. Use the PIN_CFG_XXX |
321 | #define PORT_HW_CFG_FORCE_KR_ENABLER_FORCED 0x00000900 | 489 | * defines on top |
322 | /* Enable to determine with which GPIO to reset the external phy */ | 490 | */ |
323 | #define PORT_HW_CFG_EXT_PHY_GPIO_RST_MASK 0x000F0000 | 491 | #define PORT_HW_CFG_E3_PWR_DOWN_MASK 0x00FF0000 |
324 | #define PORT_HW_CFG_EXT_PHY_GPIO_RST_SHIFT 16 | 492 | #define PORT_HW_CFG_E3_PWR_DOWN_SHIFT 16 |
325 | #define PORT_HW_CFG_EXT_PHY_GPIO_RST_PHY_TYPE 0x00000000 | 493 | |
326 | #define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO0_P0 0x00010000 | 494 | /* The output pin values BSC_SEL which selects the I2C for this port |
327 | #define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO1_P0 0x00020000 | 495 | in the I2C Mux */ |
328 | #define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO2_P0 0x00030000 | 496 | #define PORT_HW_CFG_E3_I2C_MUX0_MASK 0x01000000 |
329 | #define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO3_P0 0x00040000 | 497 | #define PORT_HW_CFG_E3_I2C_MUX1_MASK 0x02000000 |
330 | #define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO0_P1 0x00050000 | 498 | |
331 | #define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO1_P1 0x00060000 | 499 | |
332 | #define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO2_P1 0x00070000 | 500 | /* |
333 | #define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO3_P1 0x00080000 | 501 | * The input pin I_FAULT which indicate over-current has occurred. |
502 | * Use the PIN_CFG_XXX defines on top | ||
503 | */ | ||
504 | u32 e3_cmn_pin_cfg1; /* 0x170 */ | ||
505 | #define PORT_HW_CFG_E3_OVER_CURRENT_MASK 0x000000FF | ||
506 | #define PORT_HW_CFG_E3_OVER_CURRENT_SHIFT 0 | ||
507 | u32 reserved0[7]; /* 0x174 */ | ||
508 | |||
509 | u32 aeu_int_mask; /* 0x190 */ | ||
510 | |||
511 | u32 media_type; /* 0x194 */ | ||
512 | #define PORT_HW_CFG_MEDIA_TYPE_PHY0_MASK 0x000000FF | ||
513 | #define PORT_HW_CFG_MEDIA_TYPE_PHY0_SHIFT 0 | ||
514 | |||
515 | #define PORT_HW_CFG_MEDIA_TYPE_PHY1_MASK 0x0000FF00 | ||
516 | #define PORT_HW_CFG_MEDIA_TYPE_PHY1_SHIFT 8 | ||
517 | |||
518 | #define PORT_HW_CFG_MEDIA_TYPE_PHY2_MASK 0x00FF0000 | ||
519 | #define PORT_HW_CFG_MEDIA_TYPE_PHY2_SHIFT 16 | ||
520 | |||
521 | /* 4 times 16 bits for all 4 lanes. In case external PHY is present | ||
522 | (not direct mode), those values will not take effect on the 4 XGXS | ||
523 | lanes. For some external PHYs (such as 8706 and 8726) the values | ||
524 | will be used to configure the external PHY in those cases, not | ||
525 | all 4 values are needed. */ | ||
526 | u16 xgxs_config_rx[4]; /* 0x198 */ | ||
527 | u16 xgxs_config_tx[4]; /* 0x1A0 */ | ||
528 | |||
529 | /* For storing FCOE mac on shared memory */ | ||
530 | u32 fcoe_fip_mac_upper; | ||
531 | #define PORT_HW_CFG_FCOE_UPPERMAC_MASK 0x0000ffff | ||
532 | #define PORT_HW_CFG_FCOE_UPPERMAC_SHIFT 0 | ||
533 | u32 fcoe_fip_mac_lower; | ||
534 | |||
535 | u32 fcoe_wwn_port_name_upper; | ||
536 | u32 fcoe_wwn_port_name_lower; | ||
537 | |||
538 | u32 fcoe_wwn_node_name_upper; | ||
539 | u32 fcoe_wwn_node_name_lower; | ||
540 | |||
541 | u32 Reserved1[49]; /* 0x1C0 */ | ||
542 | |||
543 | /* Enable RJ45 magjack pair swapping on 10GBase-T PHY (0=default), | ||
544 | 84833 only */ | ||
545 | u32 xgbt_phy_cfg; /* 0x284 */ | ||
546 | #define PORT_HW_CFG_RJ45_PAIR_SWAP_MASK 0x000000FF | ||
547 | #define PORT_HW_CFG_RJ45_PAIR_SWAP_SHIFT 0 | ||
548 | |||
549 | u32 default_cfg; /* 0x288 */ | ||
550 | #define PORT_HW_CFG_GPIO0_CONFIG_MASK 0x00000003 | ||
551 | #define PORT_HW_CFG_GPIO0_CONFIG_SHIFT 0 | ||
552 | #define PORT_HW_CFG_GPIO0_CONFIG_NA 0x00000000 | ||
553 | #define PORT_HW_CFG_GPIO0_CONFIG_LOW 0x00000001 | ||
554 | #define PORT_HW_CFG_GPIO0_CONFIG_HIGH 0x00000002 | ||
555 | #define PORT_HW_CFG_GPIO0_CONFIG_INPUT 0x00000003 | ||
556 | |||
557 | #define PORT_HW_CFG_GPIO1_CONFIG_MASK 0x0000000C | ||
558 | #define PORT_HW_CFG_GPIO1_CONFIG_SHIFT 2 | ||
559 | #define PORT_HW_CFG_GPIO1_CONFIG_NA 0x00000000 | ||
560 | #define PORT_HW_CFG_GPIO1_CONFIG_LOW 0x00000004 | ||
561 | #define PORT_HW_CFG_GPIO1_CONFIG_HIGH 0x00000008 | ||
562 | #define PORT_HW_CFG_GPIO1_CONFIG_INPUT 0x0000000c | ||
563 | |||
564 | #define PORT_HW_CFG_GPIO2_CONFIG_MASK 0x00000030 | ||
565 | #define PORT_HW_CFG_GPIO2_CONFIG_SHIFT 4 | ||
566 | #define PORT_HW_CFG_GPIO2_CONFIG_NA 0x00000000 | ||
567 | #define PORT_HW_CFG_GPIO2_CONFIG_LOW 0x00000010 | ||
568 | #define PORT_HW_CFG_GPIO2_CONFIG_HIGH 0x00000020 | ||
569 | #define PORT_HW_CFG_GPIO2_CONFIG_INPUT 0x00000030 | ||
570 | |||
571 | #define PORT_HW_CFG_GPIO3_CONFIG_MASK 0x000000C0 | ||
572 | #define PORT_HW_CFG_GPIO3_CONFIG_SHIFT 6 | ||
573 | #define PORT_HW_CFG_GPIO3_CONFIG_NA 0x00000000 | ||
574 | #define PORT_HW_CFG_GPIO3_CONFIG_LOW 0x00000040 | ||
575 | #define PORT_HW_CFG_GPIO3_CONFIG_HIGH 0x00000080 | ||
576 | #define PORT_HW_CFG_GPIO3_CONFIG_INPUT 0x000000c0 | ||
577 | |||
578 | /* When KR link is required to be set to force which is not | ||
579 | KR-compliant, this parameter determine what is the trigger for it. | ||
580 | When GPIO is selected, low input will force the speed. Currently | ||
581 | default speed is 1G. In the future, it may be widen to select the | ||
582 | forced speed in with another parameter. Note when force-1G is | ||
583 | enabled, it override option 56: Link Speed option. */ | ||
584 | #define PORT_HW_CFG_FORCE_KR_ENABLER_MASK 0x00000F00 | ||
585 | #define PORT_HW_CFG_FORCE_KR_ENABLER_SHIFT 8 | ||
586 | #define PORT_HW_CFG_FORCE_KR_ENABLER_NOT_FORCED 0x00000000 | ||
587 | #define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO0_P0 0x00000100 | ||
588 | #define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO1_P0 0x00000200 | ||
589 | #define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO2_P0 0x00000300 | ||
590 | #define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO3_P0 0x00000400 | ||
591 | #define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO0_P1 0x00000500 | ||
592 | #define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO1_P1 0x00000600 | ||
593 | #define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO2_P1 0x00000700 | ||
594 | #define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO3_P1 0x00000800 | ||
595 | #define PORT_HW_CFG_FORCE_KR_ENABLER_FORCED 0x00000900 | ||
596 | /* Enable to determine with which GPIO to reset the external phy */ | ||
597 | #define PORT_HW_CFG_EXT_PHY_GPIO_RST_MASK 0x000F0000 | ||
598 | #define PORT_HW_CFG_EXT_PHY_GPIO_RST_SHIFT 16 | ||
599 | #define PORT_HW_CFG_EXT_PHY_GPIO_RST_PHY_TYPE 0x00000000 | ||
600 | #define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO0_P0 0x00010000 | ||
601 | #define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO1_P0 0x00020000 | ||
602 | #define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO2_P0 0x00030000 | ||
603 | #define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO3_P0 0x00040000 | ||
604 | #define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO0_P1 0x00050000 | ||
605 | #define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO1_P1 0x00060000 | ||
606 | #define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO2_P1 0x00070000 | ||
607 | #define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO3_P1 0x00080000 | ||
608 | |||
334 | /* Enable BAM on KR */ | 609 | /* Enable BAM on KR */ |
335 | #define PORT_HW_CFG_ENABLE_BAM_ON_KR_MASK 0x00100000 | 610 | #define PORT_HW_CFG_ENABLE_BAM_ON_KR_MASK 0x00100000 |
336 | #define PORT_HW_CFG_ENABLE_BAM_ON_KR_SHIFT 20 | 611 | #define PORT_HW_CFG_ENABLE_BAM_ON_KR_SHIFT 20 |
337 | #define PORT_HW_CFG_ENABLE_BAM_ON_KR_DISABLED 0x00000000 | 612 | #define PORT_HW_CFG_ENABLE_BAM_ON_KR_DISABLED 0x00000000 |
338 | #define PORT_HW_CFG_ENABLE_BAM_ON_KR_ENABLED 0x00100000 | 613 | #define PORT_HW_CFG_ENABLE_BAM_ON_KR_ENABLED 0x00100000 |
339 | 614 | ||
340 | /* Enable Common Mode Sense */ | 615 | /* Enable Common Mode Sense */ |
341 | #define PORT_HW_CFG_ENABLE_CMS_MASK 0x00200000 | 616 | #define PORT_HW_CFG_ENABLE_CMS_MASK 0x00200000 |
342 | #define PORT_HW_CFG_ENABLE_CMS_SHIFT 21 | 617 | #define PORT_HW_CFG_ENABLE_CMS_SHIFT 21 |
343 | #define PORT_HW_CFG_ENABLE_CMS_DISABLED 0x00000000 | 618 | #define PORT_HW_CFG_ENABLE_CMS_DISABLED 0x00000000 |
344 | #define PORT_HW_CFG_ENABLE_CMS_ENABLED 0x00200000 | 619 | #define PORT_HW_CFG_ENABLE_CMS_ENABLED 0x00200000 |
620 | |||
621 | /* Enable RJ45 magjack pair swapping on 10GBase-T PHY, 84833 only */ | ||
622 | #define PORT_HW_CFG_RJ45_PR_SWP_MASK 0x00400000 | ||
623 | #define PORT_HW_CFG_RJ45_PR_SWP_SHIFT 22 | ||
624 | #define PORT_HW_CFG_RJ45_PR_SWP_DISABLED 0x00000000 | ||
625 | #define PORT_HW_CFG_RJ45_PR_SWP_ENABLED 0x00400000 | ||
626 | |||
627 | /* Determine the Serdes electrical interface */ | ||
628 | #define PORT_HW_CFG_NET_SERDES_IF_MASK 0x0F000000 | ||
629 | #define PORT_HW_CFG_NET_SERDES_IF_SHIFT 24 | ||
630 | #define PORT_HW_CFG_NET_SERDES_IF_SGMII 0x00000000 | ||
631 | #define PORT_HW_CFG_NET_SERDES_IF_XFI 0x01000000 | ||
632 | #define PORT_HW_CFG_NET_SERDES_IF_SFI 0x02000000 | ||
633 | #define PORT_HW_CFG_NET_SERDES_IF_KR 0x03000000 | ||
634 | #define PORT_HW_CFG_NET_SERDES_IF_DXGXS 0x04000000 | ||
635 | #define PORT_HW_CFG_NET_SERDES_IF_KR2 0x05000000 | ||
636 | |||
345 | 637 | ||
346 | u32 speed_capability_mask2; /* 0x28C */ | 638 | u32 speed_capability_mask2; /* 0x28C */ |
347 | #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_MASK 0x0000FFFF | 639 | #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_MASK 0x0000FFFF |
348 | #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_SHIFT 0 | 640 | #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_SHIFT 0 |
349 | #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_10M_FULL 0x00000001 | 641 | #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_10M_FULL 0x00000001 |
350 | #define PORT_HW_CFG_SPEED_CAPABILITY2_D3__ 0x00000002 | 642 | #define PORT_HW_CFG_SPEED_CAPABILITY2_D3__ 0x00000002 |
351 | #define PORT_HW_CFG_SPEED_CAPABILITY2_D3___ 0x00000004 | 643 | #define PORT_HW_CFG_SPEED_CAPABILITY2_D3___ 0x00000004 |
352 | #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_100M_FULL 0x00000008 | 644 | #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_100M_FULL 0x00000008 |
353 | #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_1G 0x00000010 | 645 | #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_1G 0x00000010 |
354 | #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_2_DOT_5G 0x00000020 | 646 | #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_2_DOT_5G 0x00000020 |
355 | #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_10G 0x00000040 | 647 | #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_10G 0x00000040 |
356 | #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_12G 0x00000080 | 648 | #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_20G 0x00000080 |
357 | #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_12_DOT_5G 0x00000100 | 649 | |
358 | #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_13G 0x00000200 | 650 | #define PORT_HW_CFG_SPEED_CAPABILITY2_D0_MASK 0xFFFF0000 |
359 | #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_15G 0x00000400 | 651 | #define PORT_HW_CFG_SPEED_CAPABILITY2_D0_SHIFT 16 |
360 | #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_16G 0x00000800 | 652 | #define PORT_HW_CFG_SPEED_CAPABILITY2_D0_10M_FULL 0x00010000 |
361 | 653 | #define PORT_HW_CFG_SPEED_CAPABILITY2_D0__ 0x00020000 | |
362 | #define PORT_HW_CFG_SPEED_CAPABILITY2_D0_MASK 0xFFFF0000 | 654 | #define PORT_HW_CFG_SPEED_CAPABILITY2_D0___ 0x00040000 |
363 | #define PORT_HW_CFG_SPEED_CAPABILITY2_D0_SHIFT 16 | 655 | #define PORT_HW_CFG_SPEED_CAPABILITY2_D0_100M_FULL 0x00080000 |
364 | #define PORT_HW_CFG_SPEED_CAPABILITY2_D0_10M_FULL 0x00010000 | 656 | #define PORT_HW_CFG_SPEED_CAPABILITY2_D0_1G 0x00100000 |
365 | #define PORT_HW_CFG_SPEED_CAPABILITY2_D0__ 0x00020000 | 657 | #define PORT_HW_CFG_SPEED_CAPABILITY2_D0_2_DOT_5G 0x00200000 |
366 | #define PORT_HW_CFG_SPEED_CAPABILITY2_D0___ 0x00040000 | 658 | #define PORT_HW_CFG_SPEED_CAPABILITY2_D0_10G 0x00400000 |
367 | #define PORT_HW_CFG_SPEED_CAPABILITY2_D0_100M_FULL 0x00080000 | 659 | #define PORT_HW_CFG_SPEED_CAPABILITY2_D0_20G 0x00800000 |
368 | #define PORT_HW_CFG_SPEED_CAPABILITY2_D0_1G 0x00100000 | 660 | |
369 | #define PORT_HW_CFG_SPEED_CAPABILITY2_D0_2_DOT_5G 0x00200000 | 661 | |
370 | #define PORT_HW_CFG_SPEED_CAPABILITY2_D0_10G 0x00400000 | 662 | /* In the case where two media types (e.g. copper and fiber) are |
371 | #define PORT_HW_CFG_SPEED_CAPABILITY2_D0_12G 0x00800000 | 663 | present and electrically active at the same time, PHY Selection |
372 | #define PORT_HW_CFG_SPEED_CAPABILITY2_D0_12_DOT_5G 0x01000000 | 664 | will determine which of the two PHYs will be designated as the |
373 | #define PORT_HW_CFG_SPEED_CAPABILITY2_D0_13G 0x02000000 | 665 | Active PHY and used for a connection to the network. */ |
374 | #define PORT_HW_CFG_SPEED_CAPABILITY2_D0_15G 0x04000000 | 666 | u32 multi_phy_config; /* 0x290 */ |
375 | #define PORT_HW_CFG_SPEED_CAPABILITY2_D0_16G 0x08000000 | 667 | #define PORT_HW_CFG_PHY_SELECTION_MASK 0x00000007 |
376 | 668 | #define PORT_HW_CFG_PHY_SELECTION_SHIFT 0 | |
377 | /* In the case where two media types (e.g. copper and fiber) are | 669 | #define PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT 0x00000000 |
378 | present and electrically active at the same time, PHY Selection | 670 | #define PORT_HW_CFG_PHY_SELECTION_FIRST_PHY 0x00000001 |
379 | will determine which of the two PHYs will be designated as the | 671 | #define PORT_HW_CFG_PHY_SELECTION_SECOND_PHY 0x00000002 |
380 | Active PHY and used for a connection to the network. */ | 672 | #define PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY 0x00000003 |
381 | u32 multi_phy_config; /* 0x290 */ | 673 | #define PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY 0x00000004 |
382 | #define PORT_HW_CFG_PHY_SELECTION_MASK 0x00000007 | 674 | |
383 | #define PORT_HW_CFG_PHY_SELECTION_SHIFT 0 | 675 | /* When enabled, all second phy nvram parameters will be swapped |
384 | #define PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT 0x00000000 | 676 | with the first phy parameters */ |
385 | #define PORT_HW_CFG_PHY_SELECTION_FIRST_PHY 0x00000001 | 677 | #define PORT_HW_CFG_PHY_SWAPPED_MASK 0x00000008 |
386 | #define PORT_HW_CFG_PHY_SELECTION_SECOND_PHY 0x00000002 | 678 | #define PORT_HW_CFG_PHY_SWAPPED_SHIFT 3 |
387 | #define PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY 0x00000003 | 679 | #define PORT_HW_CFG_PHY_SWAPPED_DISABLED 0x00000000 |
388 | #define PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY 0x00000004 | 680 | #define PORT_HW_CFG_PHY_SWAPPED_ENABLED 0x00000008 |
389 | 681 | ||
390 | /* When enabled, all second phy nvram parameters will be swapped | 682 | |
391 | with the first phy parameters */ | 683 | /* Address of the second external phy */ |
392 | #define PORT_HW_CFG_PHY_SWAPPED_MASK 0x00000008 | 684 | u32 external_phy_config2; /* 0x294 */ |
393 | #define PORT_HW_CFG_PHY_SWAPPED_SHIFT 3 | 685 | #define PORT_HW_CFG_XGXS_EXT_PHY2_ADDR_MASK 0x000000FF |
394 | #define PORT_HW_CFG_PHY_SWAPPED_DISABLED 0x00000000 | 686 | #define PORT_HW_CFG_XGXS_EXT_PHY2_ADDR_SHIFT 0 |
395 | #define PORT_HW_CFG_PHY_SWAPPED_ENABLED 0x00000008 | 687 | |
396 | 688 | /* The second XGXS external PHY type */ | |
397 | 689 | #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_MASK 0x0000FF00 | |
398 | /* Address of the second external phy */ | 690 | #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_SHIFT 8 |
399 | u32 external_phy_config2; /* 0x294 */ | 691 | #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_DIRECT 0x00000000 |
400 | #define PORT_HW_CFG_XGXS_EXT_PHY2_ADDR_MASK 0x000000FF | 692 | #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8071 0x00000100 |
401 | #define PORT_HW_CFG_XGXS_EXT_PHY2_ADDR_SHIFT 0 | 693 | #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8072 0x00000200 |
402 | 694 | #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8073 0x00000300 | |
403 | /* The second XGXS external PHY type */ | 695 | #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8705 0x00000400 |
404 | #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_MASK 0x0000FF00 | 696 | #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8706 0x00000500 |
405 | #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_SHIFT 8 | 697 | #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8726 0x00000600 |
406 | #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_DIRECT 0x00000000 | 698 | #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8481 0x00000700 |
407 | #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8071 0x00000100 | 699 | #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_SFX7101 0x00000800 |
408 | #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8072 0x00000200 | 700 | #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8727 0x00000900 |
409 | #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8073 0x00000300 | 701 | #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8727_NOC 0x00000a00 |
410 | #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8705 0x00000400 | 702 | #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM84823 0x00000b00 |
411 | #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8706 0x00000500 | 703 | #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM54640 0x00000c00 |
412 | #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8726 0x00000600 | 704 | #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM84833 0x00000d00 |
413 | #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8481 0x00000700 | 705 | #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM54618SE 0x00000e00 |
414 | #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_SFX7101 0x00000800 | 706 | #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8722 0x00000f00 |
415 | #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8727 0x00000900 | 707 | #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_FAILURE 0x0000fd00 |
416 | #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8727_NOC 0x00000a00 | 708 | #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_NOT_CONN 0x0000ff00 |
417 | #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM84823 0x00000b00 | 709 | |
418 | #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM54640 0x00000c00 | 710 | |
419 | #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM84833 0x00000d00 | 711 | /* 4 times 16 bits for all 4 lanes. For some external PHYs (such as |
420 | #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_FAILURE 0x0000fd00 | 712 | 8706, 8726 and 8727) not all 4 values are needed. */ |
421 | #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_NOT_CONN 0x0000ff00 | 713 | u16 xgxs_config2_rx[4]; /* 0x296 */ |
422 | 714 | u16 xgxs_config2_tx[4]; /* 0x2A0 */ | |
423 | /* 4 times 16 bits for all 4 lanes. For some external PHYs (such as | ||
424 | 8706, 8726 and 8727) not all 4 values are needed. */ | ||
425 | u16 xgxs_config2_rx[4]; /* 0x296 */ | ||
426 | u16 xgxs_config2_tx[4]; /* 0x2A0 */ | ||
427 | 715 | ||
428 | u32 lane_config; | 716 | u32 lane_config; |
429 | #define PORT_HW_CFG_LANE_SWAP_CFG_MASK 0x0000ffff | 717 | #define PORT_HW_CFG_LANE_SWAP_CFG_MASK 0x0000ffff |
430 | #define PORT_HW_CFG_LANE_SWAP_CFG_SHIFT 0 | 718 | #define PORT_HW_CFG_LANE_SWAP_CFG_SHIFT 0 |
431 | 719 | /* AN and forced */ | |
432 | #define PORT_HW_CFG_LANE_SWAP_CFG_TX_MASK 0x000000ff | 720 | #define PORT_HW_CFG_LANE_SWAP_CFG_01230123 0x00001b1b |
433 | #define PORT_HW_CFG_LANE_SWAP_CFG_TX_SHIFT 0 | 721 | /* forced only */ |
434 | #define PORT_HW_CFG_LANE_SWAP_CFG_RX_MASK 0x0000ff00 | 722 | #define PORT_HW_CFG_LANE_SWAP_CFG_01233210 0x00001be4 |
435 | #define PORT_HW_CFG_LANE_SWAP_CFG_RX_SHIFT 8 | 723 | /* forced only */ |
436 | #define PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK 0x0000c000 | 724 | #define PORT_HW_CFG_LANE_SWAP_CFG_31203120 0x0000d8d8 |
437 | #define PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT 14 | 725 | /* forced only */ |
438 | /* AN and forced */ | 726 | #define PORT_HW_CFG_LANE_SWAP_CFG_32103210 0x0000e4e4 |
439 | #define PORT_HW_CFG_LANE_SWAP_CFG_01230123 0x00001b1b | 727 | #define PORT_HW_CFG_LANE_SWAP_CFG_TX_MASK 0x000000ff |
440 | /* forced only */ | 728 | #define PORT_HW_CFG_LANE_SWAP_CFG_TX_SHIFT 0 |
441 | #define PORT_HW_CFG_LANE_SWAP_CFG_01233210 0x00001be4 | 729 | #define PORT_HW_CFG_LANE_SWAP_CFG_RX_MASK 0x0000ff00 |
442 | /* forced only */ | 730 | #define PORT_HW_CFG_LANE_SWAP_CFG_RX_SHIFT 8 |
443 | #define PORT_HW_CFG_LANE_SWAP_CFG_31203120 0x0000d8d8 | 731 | #define PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK 0x0000c000 |
444 | /* forced only */ | 732 | #define PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT 14 |
445 | #define PORT_HW_CFG_LANE_SWAP_CFG_32103210 0x0000e4e4 | 733 | |
446 | /* Indicate whether to swap the external phy polarity */ | 734 | /* Indicate whether to swap the external phy polarity */ |
447 | #define PORT_HW_CFG_SWAP_PHY_POLARITY_MASK 0x00010000 | 735 | #define PORT_HW_CFG_SWAP_PHY_POLARITY_MASK 0x00010000 |
448 | #define PORT_HW_CFG_SWAP_PHY_POLARITY_DISABLED 0x00000000 | 736 | #define PORT_HW_CFG_SWAP_PHY_POLARITY_DISABLED 0x00000000 |
449 | #define PORT_HW_CFG_SWAP_PHY_POLARITY_ENABLED 0x00010000 | 737 | #define PORT_HW_CFG_SWAP_PHY_POLARITY_ENABLED 0x00010000 |
738 | |||
450 | 739 | ||
451 | u32 external_phy_config; | 740 | u32 external_phy_config; |
452 | #define PORT_HW_CFG_SERDES_EXT_PHY_TYPE_MASK 0xff000000 | 741 | #define PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK 0x000000ff |
453 | #define PORT_HW_CFG_SERDES_EXT_PHY_TYPE_SHIFT 24 | 742 | #define PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT 0 |
454 | #define PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT 0x00000000 | 743 | |
455 | #define PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482 0x01000000 | 744 | #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK 0x0000ff00 |
456 | #define PORT_HW_CFG_SERDES_EXT_PHY_TYPE_NOT_CONN 0xff000000 | 745 | #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SHIFT 8 |
457 | 746 | #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT 0x00000000 | |
458 | #define PORT_HW_CFG_SERDES_EXT_PHY_ADDR_MASK 0x00ff0000 | 747 | #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8071 0x00000100 |
459 | #define PORT_HW_CFG_SERDES_EXT_PHY_ADDR_SHIFT 16 | 748 | #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072 0x00000200 |
460 | 749 | #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073 0x00000300 | |
461 | #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK 0x0000ff00 | 750 | #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705 0x00000400 |
462 | #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SHIFT 8 | 751 | #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706 0x00000500 |
463 | #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT 0x00000000 | 752 | #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726 0x00000600 |
464 | #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8071 0x00000100 | 753 | #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481 0x00000700 |
465 | #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072 0x00000200 | 754 | #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101 0x00000800 |
466 | #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073 0x00000300 | 755 | #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727 0x00000900 |
467 | #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705 0x00000400 | 756 | #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC 0x00000a00 |
468 | #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706 0x00000500 | 757 | #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823 0x00000b00 |
469 | #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726 0x00000600 | 758 | #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54640 0x00000c00 |
470 | #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481 0x00000700 | 759 | #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833 0x00000d00 |
471 | #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101 0x00000800 | 760 | #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE 0x00000e00 |
472 | #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727 0x00000900 | 761 | #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8722 0x00000f00 |
473 | #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC 0x00000a00 | 762 | #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT_WC 0x0000fc00 |
474 | #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823 0x00000b00 | 763 | #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE 0x0000fd00 |
475 | #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833 0x00000d00 | 764 | #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN 0x0000ff00 |
476 | #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE 0x0000fd00 | 765 | |
477 | #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN 0x0000ff00 | 766 | #define PORT_HW_CFG_SERDES_EXT_PHY_ADDR_MASK 0x00ff0000 |
478 | 767 | #define PORT_HW_CFG_SERDES_EXT_PHY_ADDR_SHIFT 16 | |
479 | #define PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK 0x000000ff | 768 | |
480 | #define PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT 0 | 769 | #define PORT_HW_CFG_SERDES_EXT_PHY_TYPE_MASK 0xff000000 |
770 | #define PORT_HW_CFG_SERDES_EXT_PHY_TYPE_SHIFT 24 | ||
771 | #define PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT 0x00000000 | ||
772 | #define PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482 0x01000000 | ||
773 | #define PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT_SD 0x02000000 | ||
774 | #define PORT_HW_CFG_SERDES_EXT_PHY_TYPE_NOT_CONN 0xff000000 | ||
481 | 775 | ||
482 | u32 speed_capability_mask; | 776 | u32 speed_capability_mask; |
483 | #define PORT_HW_CFG_SPEED_CAPABILITY_D0_MASK 0xffff0000 | 777 | #define PORT_HW_CFG_SPEED_CAPABILITY_D3_MASK 0x0000ffff |
484 | #define PORT_HW_CFG_SPEED_CAPABILITY_D0_SHIFT 16 | 778 | #define PORT_HW_CFG_SPEED_CAPABILITY_D3_SHIFT 0 |
485 | #define PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL 0x00010000 | 779 | #define PORT_HW_CFG_SPEED_CAPABILITY_D3_10M_FULL 0x00000001 |
486 | #define PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF 0x00020000 | 780 | #define PORT_HW_CFG_SPEED_CAPABILITY_D3_10M_HALF 0x00000002 |
487 | #define PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF 0x00040000 | 781 | #define PORT_HW_CFG_SPEED_CAPABILITY_D3_100M_HALF 0x00000004 |
488 | #define PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL 0x00080000 | 782 | #define PORT_HW_CFG_SPEED_CAPABILITY_D3_100M_FULL 0x00000008 |
489 | #define PORT_HW_CFG_SPEED_CAPABILITY_D0_1G 0x00100000 | 783 | #define PORT_HW_CFG_SPEED_CAPABILITY_D3_1G 0x00000010 |
490 | #define PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G 0x00200000 | 784 | #define PORT_HW_CFG_SPEED_CAPABILITY_D3_2_5G 0x00000020 |
491 | #define PORT_HW_CFG_SPEED_CAPABILITY_D0_10G 0x00400000 | 785 | #define PORT_HW_CFG_SPEED_CAPABILITY_D3_10G 0x00000040 |
492 | #define PORT_HW_CFG_SPEED_CAPABILITY_D0_12G 0x00800000 | 786 | #define PORT_HW_CFG_SPEED_CAPABILITY_D3_20G 0x00000080 |
493 | #define PORT_HW_CFG_SPEED_CAPABILITY_D0_12_5G 0x01000000 | 787 | #define PORT_HW_CFG_SPEED_CAPABILITY_D3_RESERVED 0x0000f000 |
494 | #define PORT_HW_CFG_SPEED_CAPABILITY_D0_13G 0x02000000 | 788 | |
495 | #define PORT_HW_CFG_SPEED_CAPABILITY_D0_15G 0x04000000 | 789 | #define PORT_HW_CFG_SPEED_CAPABILITY_D0_MASK 0xffff0000 |
496 | #define PORT_HW_CFG_SPEED_CAPABILITY_D0_16G 0x08000000 | 790 | #define PORT_HW_CFG_SPEED_CAPABILITY_D0_SHIFT 16 |
497 | #define PORT_HW_CFG_SPEED_CAPABILITY_D0_RESERVED 0xf0000000 | 791 | #define PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL 0x00010000 |
498 | 792 | #define PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF 0x00020000 | |
499 | #define PORT_HW_CFG_SPEED_CAPABILITY_D3_MASK 0x0000ffff | 793 | #define PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF 0x00040000 |
500 | #define PORT_HW_CFG_SPEED_CAPABILITY_D3_SHIFT 0 | 794 | #define PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL 0x00080000 |
501 | #define PORT_HW_CFG_SPEED_CAPABILITY_D3_10M_FULL 0x00000001 | 795 | #define PORT_HW_CFG_SPEED_CAPABILITY_D0_1G 0x00100000 |
502 | #define PORT_HW_CFG_SPEED_CAPABILITY_D3_10M_HALF 0x00000002 | 796 | #define PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G 0x00200000 |
503 | #define PORT_HW_CFG_SPEED_CAPABILITY_D3_100M_HALF 0x00000004 | 797 | #define PORT_HW_CFG_SPEED_CAPABILITY_D0_10G 0x00400000 |
504 | #define PORT_HW_CFG_SPEED_CAPABILITY_D3_100M_FULL 0x00000008 | 798 | #define PORT_HW_CFG_SPEED_CAPABILITY_D0_20G 0x00800000 |
505 | #define PORT_HW_CFG_SPEED_CAPABILITY_D3_1G 0x00000010 | 799 | #define PORT_HW_CFG_SPEED_CAPABILITY_D0_RESERVED 0xf0000000 |
506 | #define PORT_HW_CFG_SPEED_CAPABILITY_D3_2_5G 0x00000020 | 800 | |
507 | #define PORT_HW_CFG_SPEED_CAPABILITY_D3_10G 0x00000040 | 801 | /* A place to hold the original MAC address as a backup */ |
508 | #define PORT_HW_CFG_SPEED_CAPABILITY_D3_12G 0x00000080 | 802 | u32 backup_mac_upper; /* 0x2B4 */ |
509 | #define PORT_HW_CFG_SPEED_CAPABILITY_D3_12_5G 0x00000100 | 803 | u32 backup_mac_lower; /* 0x2B8 */ |
510 | #define PORT_HW_CFG_SPEED_CAPABILITY_D3_13G 0x00000200 | ||
511 | #define PORT_HW_CFG_SPEED_CAPABILITY_D3_15G 0x00000400 | ||
512 | #define PORT_HW_CFG_SPEED_CAPABILITY_D3_16G 0x00000800 | ||
513 | #define PORT_HW_CFG_SPEED_CAPABILITY_D3_RESERVED 0x0000f000 | ||
514 | |||
515 | u32 reserved[2]; | ||
516 | 804 | ||
517 | }; | 805 | }; |
518 | 806 | ||
519 | 807 | ||
520 | /**************************************************************************** | 808 | /**************************************************************************** |
521 | * Shared Feature configuration * | 809 | * Shared Feature configuration * |
522 | ****************************************************************************/ | 810 | ****************************************************************************/ |
523 | struct shared_feat_cfg { /* NVRAM Offset */ | 811 | struct shared_feat_cfg { /* NVRAM Offset */ |
812 | |||
813 | u32 config; /* 0x450 */ | ||
814 | #define SHARED_FEATURE_BMC_ECHO_MODE_EN 0x00000001 | ||
815 | |||
816 | /* Use NVRAM values instead of HW default values */ | ||
817 | #define SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_MASK \ | ||
818 | 0x00000002 | ||
819 | #define SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_DISABLED \ | ||
820 | 0x00000000 | ||
821 | #define SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED \ | ||
822 | 0x00000002 | ||
524 | 823 | ||
525 | u32 config; /* 0x450 */ | 824 | #define SHARED_FEAT_CFG_NCSI_ID_METHOD_MASK 0x00000008 |
526 | #define SHARED_FEATURE_BMC_ECHO_MODE_EN 0x00000001 | 825 | #define SHARED_FEAT_CFG_NCSI_ID_METHOD_SPIO 0x00000000 |
826 | #define SHARED_FEAT_CFG_NCSI_ID_METHOD_NVRAM 0x00000008 | ||
527 | 827 | ||
528 | /* Use the values from options 47 and 48 instead of the HW default | 828 | #define SHARED_FEAT_CFG_NCSI_ID_MASK 0x00000030 |
529 | values */ | 829 | #define SHARED_FEAT_CFG_NCSI_ID_SHIFT 4 |
530 | #define SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_DISABLED 0x00000000 | ||
531 | #define SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED 0x00000002 | ||
532 | 830 | ||
533 | #define SHARED_FEAT_CFG_FORCE_SF_MODE_MASK 0x00000700 | 831 | /* Override the OTP back to single function mode. When using GPIO, |
534 | #define SHARED_FEAT_CFG_FORCE_SF_MODE_SHIFT 8 | 832 | high means only SF, 0 is according to CLP configuration */ |
535 | #define SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED 0x00000000 | 833 | #define SHARED_FEAT_CFG_FORCE_SF_MODE_MASK 0x00000700 |
536 | #define SHARED_FEAT_CFG_FORCE_SF_MODE_FORCED_SF 0x00000100 | 834 | #define SHARED_FEAT_CFG_FORCE_SF_MODE_SHIFT 8 |
537 | #define SHARED_FEAT_CFG_FORCE_SF_MODE_SPIO4 0x00000200 | 835 | #define SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED 0x00000000 |
538 | #define SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT 0x00000300 | 836 | #define SHARED_FEAT_CFG_FORCE_SF_MODE_FORCED_SF 0x00000100 |
837 | #define SHARED_FEAT_CFG_FORCE_SF_MODE_SPIO4 0x00000200 | ||
838 | #define SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT 0x00000300 | ||
839 | |||
840 | /* The interval in seconds between sending LLDP packets. Set to zero | ||
841 | to disable the feature */ | ||
842 | #define SHARED_FEAT_CFG_LLDP_XMIT_INTERVAL_MASK 0x00ff0000 | ||
843 | #define SHARED_FEAT_CFG_LLDP_XMIT_INTERVAL_SHIFT 16 | ||
844 | |||
845 | /* The assigned device type ID for LLDP usage */ | ||
846 | #define SHARED_FEAT_CFG_LLDP_DEVICE_TYPE_ID_MASK 0xff000000 | ||
847 | #define SHARED_FEAT_CFG_LLDP_DEVICE_TYPE_ID_SHIFT 24 | ||
539 | 848 | ||
540 | }; | 849 | }; |
541 | 850 | ||
542 | 851 | ||
543 | /**************************************************************************** | 852 | /**************************************************************************** |
544 | * Port Feature configuration * | 853 | * Port Feature configuration * |
545 | ****************************************************************************/ | 854 | ****************************************************************************/ |
546 | struct port_feat_cfg { /* port 0: 0x454 port 1: 0x4c8 */ | 855 | struct port_feat_cfg { /* port 0: 0x454 port 1: 0x4c8 */ |
547 | 856 | ||
548 | u32 config; | 857 | u32 config; |
549 | #define PORT_FEATURE_BAR1_SIZE_MASK 0x0000000f | 858 | #define PORT_FEATURE_BAR1_SIZE_MASK 0x0000000f |
550 | #define PORT_FEATURE_BAR1_SIZE_SHIFT 0 | 859 | #define PORT_FEATURE_BAR1_SIZE_SHIFT 0 |
551 | #define PORT_FEATURE_BAR1_SIZE_DISABLED 0x00000000 | 860 | #define PORT_FEATURE_BAR1_SIZE_DISABLED 0x00000000 |
552 | #define PORT_FEATURE_BAR1_SIZE_64K 0x00000001 | 861 | #define PORT_FEATURE_BAR1_SIZE_64K 0x00000001 |
553 | #define PORT_FEATURE_BAR1_SIZE_128K 0x00000002 | 862 | #define PORT_FEATURE_BAR1_SIZE_128K 0x00000002 |
554 | #define PORT_FEATURE_BAR1_SIZE_256K 0x00000003 | 863 | #define PORT_FEATURE_BAR1_SIZE_256K 0x00000003 |
555 | #define PORT_FEATURE_BAR1_SIZE_512K 0x00000004 | 864 | #define PORT_FEATURE_BAR1_SIZE_512K 0x00000004 |
556 | #define PORT_FEATURE_BAR1_SIZE_1M 0x00000005 | 865 | #define PORT_FEATURE_BAR1_SIZE_1M 0x00000005 |
557 | #define PORT_FEATURE_BAR1_SIZE_2M 0x00000006 | 866 | #define PORT_FEATURE_BAR1_SIZE_2M 0x00000006 |
558 | #define PORT_FEATURE_BAR1_SIZE_4M 0x00000007 | 867 | #define PORT_FEATURE_BAR1_SIZE_4M 0x00000007 |
559 | #define PORT_FEATURE_BAR1_SIZE_8M 0x00000008 | 868 | #define PORT_FEATURE_BAR1_SIZE_8M 0x00000008 |
560 | #define PORT_FEATURE_BAR1_SIZE_16M 0x00000009 | 869 | #define PORT_FEATURE_BAR1_SIZE_16M 0x00000009 |
561 | #define PORT_FEATURE_BAR1_SIZE_32M 0x0000000a | 870 | #define PORT_FEATURE_BAR1_SIZE_32M 0x0000000a |
562 | #define PORT_FEATURE_BAR1_SIZE_64M 0x0000000b | 871 | #define PORT_FEATURE_BAR1_SIZE_64M 0x0000000b |
563 | #define PORT_FEATURE_BAR1_SIZE_128M 0x0000000c | 872 | #define PORT_FEATURE_BAR1_SIZE_128M 0x0000000c |
564 | #define PORT_FEATURE_BAR1_SIZE_256M 0x0000000d | 873 | #define PORT_FEATURE_BAR1_SIZE_256M 0x0000000d |
565 | #define PORT_FEATURE_BAR1_SIZE_512M 0x0000000e | 874 | #define PORT_FEATURE_BAR1_SIZE_512M 0x0000000e |
566 | #define PORT_FEATURE_BAR1_SIZE_1G 0x0000000f | 875 | #define PORT_FEATURE_BAR1_SIZE_1G 0x0000000f |
567 | #define PORT_FEATURE_BAR2_SIZE_MASK 0x000000f0 | 876 | #define PORT_FEATURE_BAR2_SIZE_MASK 0x000000f0 |
568 | #define PORT_FEATURE_BAR2_SIZE_SHIFT 4 | 877 | #define PORT_FEATURE_BAR2_SIZE_SHIFT 4 |
569 | #define PORT_FEATURE_BAR2_SIZE_DISABLED 0x00000000 | 878 | #define PORT_FEATURE_BAR2_SIZE_DISABLED 0x00000000 |
570 | #define PORT_FEATURE_BAR2_SIZE_64K 0x00000010 | 879 | #define PORT_FEATURE_BAR2_SIZE_64K 0x00000010 |
571 | #define PORT_FEATURE_BAR2_SIZE_128K 0x00000020 | 880 | #define PORT_FEATURE_BAR2_SIZE_128K 0x00000020 |
572 | #define PORT_FEATURE_BAR2_SIZE_256K 0x00000030 | 881 | #define PORT_FEATURE_BAR2_SIZE_256K 0x00000030 |
573 | #define PORT_FEATURE_BAR2_SIZE_512K 0x00000040 | 882 | #define PORT_FEATURE_BAR2_SIZE_512K 0x00000040 |
574 | #define PORT_FEATURE_BAR2_SIZE_1M 0x00000050 | 883 | #define PORT_FEATURE_BAR2_SIZE_1M 0x00000050 |
575 | #define PORT_FEATURE_BAR2_SIZE_2M 0x00000060 | 884 | #define PORT_FEATURE_BAR2_SIZE_2M 0x00000060 |
576 | #define PORT_FEATURE_BAR2_SIZE_4M 0x00000070 | 885 | #define PORT_FEATURE_BAR2_SIZE_4M 0x00000070 |
577 | #define PORT_FEATURE_BAR2_SIZE_8M 0x00000080 | 886 | #define PORT_FEATURE_BAR2_SIZE_8M 0x00000080 |
578 | #define PORT_FEATURE_BAR2_SIZE_16M 0x00000090 | 887 | #define PORT_FEATURE_BAR2_SIZE_16M 0x00000090 |
579 | #define PORT_FEATURE_BAR2_SIZE_32M 0x000000a0 | 888 | #define PORT_FEATURE_BAR2_SIZE_32M 0x000000a0 |
580 | #define PORT_FEATURE_BAR2_SIZE_64M 0x000000b0 | 889 | #define PORT_FEATURE_BAR2_SIZE_64M 0x000000b0 |
581 | #define PORT_FEATURE_BAR2_SIZE_128M 0x000000c0 | 890 | #define PORT_FEATURE_BAR2_SIZE_128M 0x000000c0 |
582 | #define PORT_FEATURE_BAR2_SIZE_256M 0x000000d0 | 891 | #define PORT_FEATURE_BAR2_SIZE_256M 0x000000d0 |
583 | #define PORT_FEATURE_BAR2_SIZE_512M 0x000000e0 | 892 | #define PORT_FEATURE_BAR2_SIZE_512M 0x000000e0 |
584 | #define PORT_FEATURE_BAR2_SIZE_1G 0x000000f0 | 893 | #define PORT_FEATURE_BAR2_SIZE_1G 0x000000f0 |
585 | #define PORT_FEATURE_EN_SIZE_MASK 0x07000000 | 894 | |
586 | #define PORT_FEATURE_EN_SIZE_SHIFT 24 | 895 | #define PORT_FEAT_CFG_DCBX_MASK 0x00000100 |
587 | #define PORT_FEATURE_WOL_ENABLED 0x01000000 | 896 | #define PORT_FEAT_CFG_DCBX_DISABLED 0x00000000 |
588 | #define PORT_FEATURE_MBA_ENABLED 0x02000000 | 897 | #define PORT_FEAT_CFG_DCBX_ENABLED 0x00000100 |
589 | #define PORT_FEATURE_MFW_ENABLED 0x04000000 | 898 | |
590 | 899 | #define PORT_FEAT_CFG_AUTOGREEN_MASK 0x00000200 | |
591 | /* Reserved bits: 28-29 */ | 900 | #define PORT_FEAT_CFG_AUTOGREEN_SHIFT 9 |
592 | /* Check the optic vendor via i2c against a list of approved modules | 901 | #define PORT_FEAT_CFG_AUTOGREEN_DISABLED 0x00000000 |
593 | in a separate nvram image */ | 902 | #define PORT_FEAT_CFG_AUTOGREEN_ENABLED 0x00000200 |
594 | #define PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK 0xE0000000 | 903 | |
595 | #define PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_SHIFT 29 | 904 | #define PORT_FEATURE_EN_SIZE_MASK 0x0f000000 |
596 | #define PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_NO_ENFORCEMENT 0x00000000 | 905 | #define PORT_FEATURE_EN_SIZE_SHIFT 24 |
597 | #define PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER 0x20000000 | 906 | #define PORT_FEATURE_WOL_ENABLED 0x01000000 |
598 | #define PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_WARNING_MSG 0x40000000 | 907 | #define PORT_FEATURE_MBA_ENABLED 0x02000000 |
599 | #define PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_POWER_DOWN 0x60000000 | 908 | #define PORT_FEATURE_MFW_ENABLED 0x04000000 |
600 | 909 | ||
910 | /* Advertise expansion ROM even if MBA is disabled */ | ||
911 | #define PORT_FEAT_CFG_FORCE_EXP_ROM_ADV_MASK 0x08000000 | ||
912 | #define PORT_FEAT_CFG_FORCE_EXP_ROM_ADV_DISABLED 0x00000000 | ||
913 | #define PORT_FEAT_CFG_FORCE_EXP_ROM_ADV_ENABLED 0x08000000 | ||
914 | |||
915 | /* Check the optic vendor via i2c against a list of approved modules | ||
916 | in a separate nvram image */ | ||
917 | #define PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK 0xe0000000 | ||
918 | #define PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_SHIFT 29 | ||
919 | #define PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_NO_ENFORCEMENT \ | ||
920 | 0x00000000 | ||
921 | #define PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER \ | ||
922 | 0x20000000 | ||
923 | #define PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_WARNING_MSG 0x40000000 | ||
924 | #define PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_POWER_DOWN 0x60000000 | ||
601 | 925 | ||
602 | u32 wol_config; | 926 | u32 wol_config; |
603 | /* Default is used when driver sets to "auto" mode */ | 927 | /* Default is used when driver sets to "auto" mode */ |
604 | #define PORT_FEATURE_WOL_DEFAULT_MASK 0x00000003 | 928 | #define PORT_FEATURE_WOL_DEFAULT_MASK 0x00000003 |
605 | #define PORT_FEATURE_WOL_DEFAULT_SHIFT 0 | 929 | #define PORT_FEATURE_WOL_DEFAULT_SHIFT 0 |
606 | #define PORT_FEATURE_WOL_DEFAULT_DISABLE 0x00000000 | 930 | #define PORT_FEATURE_WOL_DEFAULT_DISABLE 0x00000000 |
607 | #define PORT_FEATURE_WOL_DEFAULT_MAGIC 0x00000001 | 931 | #define PORT_FEATURE_WOL_DEFAULT_MAGIC 0x00000001 |
608 | #define PORT_FEATURE_WOL_DEFAULT_ACPI 0x00000002 | 932 | #define PORT_FEATURE_WOL_DEFAULT_ACPI 0x00000002 |
609 | #define PORT_FEATURE_WOL_DEFAULT_MAGIC_AND_ACPI 0x00000003 | 933 | #define PORT_FEATURE_WOL_DEFAULT_MAGIC_AND_ACPI 0x00000003 |
610 | #define PORT_FEATURE_WOL_RES_PAUSE_CAP 0x00000004 | 934 | #define PORT_FEATURE_WOL_RES_PAUSE_CAP 0x00000004 |
611 | #define PORT_FEATURE_WOL_RES_ASYM_PAUSE_CAP 0x00000008 | 935 | #define PORT_FEATURE_WOL_RES_ASYM_PAUSE_CAP 0x00000008 |
612 | #define PORT_FEATURE_WOL_ACPI_UPON_MGMT 0x00000010 | 936 | #define PORT_FEATURE_WOL_ACPI_UPON_MGMT 0x00000010 |
613 | 937 | ||
614 | u32 mba_config; | 938 | u32 mba_config; |
615 | #define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_MASK 0x00000003 | 939 | #define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_MASK 0x00000007 |
616 | #define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_SHIFT 0 | 940 | #define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_SHIFT 0 |
617 | #define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_PXE 0x00000000 | 941 | #define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_PXE 0x00000000 |
618 | #define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_RPL 0x00000001 | 942 | #define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_RPL 0x00000001 |
619 | #define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_BOOTP 0x00000002 | 943 | #define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_BOOTP 0x00000002 |
620 | #define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_ISCSIB 0x00000003 | 944 | #define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_ISCSIB 0x00000003 |
621 | #define PORT_FEATURE_MBA_RES_PAUSE_CAP 0x00000100 | 945 | #define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_FCOE_BOOT 0x00000004 |
622 | #define PORT_FEATURE_MBA_RES_ASYM_PAUSE_CAP 0x00000200 | 946 | #define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_NONE 0x00000007 |
623 | #define PORT_FEATURE_MBA_SETUP_PROMPT_ENABLE 0x00000400 | 947 | |
624 | #define PORT_FEATURE_MBA_HOTKEY_CTRL_S 0x00000000 | 948 | #define PORT_FEATURE_MBA_BOOT_RETRY_MASK 0x00000038 |
625 | #define PORT_FEATURE_MBA_HOTKEY_CTRL_B 0x00000800 | 949 | #define PORT_FEATURE_MBA_BOOT_RETRY_SHIFT 3 |
626 | #define PORT_FEATURE_MBA_EXP_ROM_SIZE_MASK 0x000ff000 | 950 | |
627 | #define PORT_FEATURE_MBA_EXP_ROM_SIZE_SHIFT 12 | 951 | #define PORT_FEATURE_MBA_RES_PAUSE_CAP 0x00000100 |
628 | #define PORT_FEATURE_MBA_EXP_ROM_SIZE_DISABLED 0x00000000 | 952 | #define PORT_FEATURE_MBA_RES_ASYM_PAUSE_CAP 0x00000200 |
629 | #define PORT_FEATURE_MBA_EXP_ROM_SIZE_2K 0x00001000 | 953 | #define PORT_FEATURE_MBA_SETUP_PROMPT_ENABLE 0x00000400 |
630 | #define PORT_FEATURE_MBA_EXP_ROM_SIZE_4K 0x00002000 | 954 | #define PORT_FEATURE_MBA_HOTKEY_MASK 0x00000800 |
631 | #define PORT_FEATURE_MBA_EXP_ROM_SIZE_8K 0x00003000 | 955 | #define PORT_FEATURE_MBA_HOTKEY_CTRL_S 0x00000000 |
632 | #define PORT_FEATURE_MBA_EXP_ROM_SIZE_16K 0x00004000 | 956 | #define PORT_FEATURE_MBA_HOTKEY_CTRL_B 0x00000800 |
633 | #define PORT_FEATURE_MBA_EXP_ROM_SIZE_32K 0x00005000 | 957 | #define PORT_FEATURE_MBA_EXP_ROM_SIZE_MASK 0x000ff000 |
634 | #define PORT_FEATURE_MBA_EXP_ROM_SIZE_64K 0x00006000 | 958 | #define PORT_FEATURE_MBA_EXP_ROM_SIZE_SHIFT 12 |
635 | #define PORT_FEATURE_MBA_EXP_ROM_SIZE_128K 0x00007000 | 959 | #define PORT_FEATURE_MBA_EXP_ROM_SIZE_DISABLED 0x00000000 |
636 | #define PORT_FEATURE_MBA_EXP_ROM_SIZE_256K 0x00008000 | 960 | #define PORT_FEATURE_MBA_EXP_ROM_SIZE_2K 0x00001000 |
637 | #define PORT_FEATURE_MBA_EXP_ROM_SIZE_512K 0x00009000 | 961 | #define PORT_FEATURE_MBA_EXP_ROM_SIZE_4K 0x00002000 |
638 | #define PORT_FEATURE_MBA_EXP_ROM_SIZE_1M 0x0000a000 | 962 | #define PORT_FEATURE_MBA_EXP_ROM_SIZE_8K 0x00003000 |
639 | #define PORT_FEATURE_MBA_EXP_ROM_SIZE_2M 0x0000b000 | 963 | #define PORT_FEATURE_MBA_EXP_ROM_SIZE_16K 0x00004000 |
640 | #define PORT_FEATURE_MBA_EXP_ROM_SIZE_4M 0x0000c000 | 964 | #define PORT_FEATURE_MBA_EXP_ROM_SIZE_32K 0x00005000 |
641 | #define PORT_FEATURE_MBA_EXP_ROM_SIZE_8M 0x0000d000 | 965 | #define PORT_FEATURE_MBA_EXP_ROM_SIZE_64K 0x00006000 |
642 | #define PORT_FEATURE_MBA_EXP_ROM_SIZE_16M 0x0000e000 | 966 | #define PORT_FEATURE_MBA_EXP_ROM_SIZE_128K 0x00007000 |
643 | #define PORT_FEATURE_MBA_EXP_ROM_SIZE_32M 0x0000f000 | 967 | #define PORT_FEATURE_MBA_EXP_ROM_SIZE_256K 0x00008000 |
644 | #define PORT_FEATURE_MBA_MSG_TIMEOUT_MASK 0x00f00000 | 968 | #define PORT_FEATURE_MBA_EXP_ROM_SIZE_512K 0x00009000 |
645 | #define PORT_FEATURE_MBA_MSG_TIMEOUT_SHIFT 20 | 969 | #define PORT_FEATURE_MBA_EXP_ROM_SIZE_1M 0x0000a000 |
646 | #define PORT_FEATURE_MBA_BIOS_BOOTSTRAP_MASK 0x03000000 | 970 | #define PORT_FEATURE_MBA_EXP_ROM_SIZE_2M 0x0000b000 |
647 | #define PORT_FEATURE_MBA_BIOS_BOOTSTRAP_SHIFT 24 | 971 | #define PORT_FEATURE_MBA_EXP_ROM_SIZE_4M 0x0000c000 |
648 | #define PORT_FEATURE_MBA_BIOS_BOOTSTRAP_AUTO 0x00000000 | 972 | #define PORT_FEATURE_MBA_EXP_ROM_SIZE_8M 0x0000d000 |
649 | #define PORT_FEATURE_MBA_BIOS_BOOTSTRAP_BBS 0x01000000 | 973 | #define PORT_FEATURE_MBA_EXP_ROM_SIZE_16M 0x0000e000 |
650 | #define PORT_FEATURE_MBA_BIOS_BOOTSTRAP_INT18H 0x02000000 | 974 | #define PORT_FEATURE_MBA_EXP_ROM_SIZE_32M 0x0000f000 |
651 | #define PORT_FEATURE_MBA_BIOS_BOOTSTRAP_INT19H 0x03000000 | 975 | #define PORT_FEATURE_MBA_MSG_TIMEOUT_MASK 0x00f00000 |
652 | #define PORT_FEATURE_MBA_LINK_SPEED_MASK 0x3c000000 | 976 | #define PORT_FEATURE_MBA_MSG_TIMEOUT_SHIFT 20 |
653 | #define PORT_FEATURE_MBA_LINK_SPEED_SHIFT 26 | 977 | #define PORT_FEATURE_MBA_BIOS_BOOTSTRAP_MASK 0x03000000 |
654 | #define PORT_FEATURE_MBA_LINK_SPEED_AUTO 0x00000000 | 978 | #define PORT_FEATURE_MBA_BIOS_BOOTSTRAP_SHIFT 24 |
655 | #define PORT_FEATURE_MBA_LINK_SPEED_10HD 0x04000000 | 979 | #define PORT_FEATURE_MBA_BIOS_BOOTSTRAP_AUTO 0x00000000 |
656 | #define PORT_FEATURE_MBA_LINK_SPEED_10FD 0x08000000 | 980 | #define PORT_FEATURE_MBA_BIOS_BOOTSTRAP_BBS 0x01000000 |
657 | #define PORT_FEATURE_MBA_LINK_SPEED_100HD 0x0c000000 | 981 | #define PORT_FEATURE_MBA_BIOS_BOOTSTRAP_INT18H 0x02000000 |
658 | #define PORT_FEATURE_MBA_LINK_SPEED_100FD 0x10000000 | 982 | #define PORT_FEATURE_MBA_BIOS_BOOTSTRAP_INT19H 0x03000000 |
659 | #define PORT_FEATURE_MBA_LINK_SPEED_1GBPS 0x14000000 | 983 | #define PORT_FEATURE_MBA_LINK_SPEED_MASK 0x3c000000 |
660 | #define PORT_FEATURE_MBA_LINK_SPEED_2_5GBPS 0x18000000 | 984 | #define PORT_FEATURE_MBA_LINK_SPEED_SHIFT 26 |
661 | #define PORT_FEATURE_MBA_LINK_SPEED_10GBPS_CX4 0x1c000000 | 985 | #define PORT_FEATURE_MBA_LINK_SPEED_AUTO 0x00000000 |
662 | #define PORT_FEATURE_MBA_LINK_SPEED_10GBPS_KX4 0x20000000 | 986 | #define PORT_FEATURE_MBA_LINK_SPEED_10HD 0x04000000 |
663 | #define PORT_FEATURE_MBA_LINK_SPEED_10GBPS_KR 0x24000000 | 987 | #define PORT_FEATURE_MBA_LINK_SPEED_10FD 0x08000000 |
664 | #define PORT_FEATURE_MBA_LINK_SPEED_12GBPS 0x28000000 | 988 | #define PORT_FEATURE_MBA_LINK_SPEED_100HD 0x0c000000 |
665 | #define PORT_FEATURE_MBA_LINK_SPEED_12_5GBPS 0x2c000000 | 989 | #define PORT_FEATURE_MBA_LINK_SPEED_100FD 0x10000000 |
666 | #define PORT_FEATURE_MBA_LINK_SPEED_13GBPS 0x30000000 | 990 | #define PORT_FEATURE_MBA_LINK_SPEED_1GBPS 0x14000000 |
667 | #define PORT_FEATURE_MBA_LINK_SPEED_15GBPS 0x34000000 | 991 | #define PORT_FEATURE_MBA_LINK_SPEED_2_5GBPS 0x18000000 |
668 | #define PORT_FEATURE_MBA_LINK_SPEED_16GBPS 0x38000000 | 992 | #define PORT_FEATURE_MBA_LINK_SPEED_10GBPS_CX4 0x1c000000 |
669 | 993 | #define PORT_FEATURE_MBA_LINK_SPEED_20GBPS 0x20000000 | |
670 | u32 bmc_config; | 994 | u32 bmc_config; |
671 | #define PORT_FEATURE_BMC_LINK_OVERRIDE_DEFAULT 0x00000000 | 995 | #define PORT_FEATURE_BMC_LINK_OVERRIDE_MASK 0x00000001 |
672 | #define PORT_FEATURE_BMC_LINK_OVERRIDE_EN 0x00000001 | 996 | #define PORT_FEATURE_BMC_LINK_OVERRIDE_DEFAULT 0x00000000 |
997 | #define PORT_FEATURE_BMC_LINK_OVERRIDE_EN 0x00000001 | ||
673 | 998 | ||
674 | u32 mba_vlan_cfg; | 999 | u32 mba_vlan_cfg; |
675 | #define PORT_FEATURE_MBA_VLAN_TAG_MASK 0x0000ffff | 1000 | #define PORT_FEATURE_MBA_VLAN_TAG_MASK 0x0000ffff |
676 | #define PORT_FEATURE_MBA_VLAN_TAG_SHIFT 0 | 1001 | #define PORT_FEATURE_MBA_VLAN_TAG_SHIFT 0 |
677 | #define PORT_FEATURE_MBA_VLAN_EN 0x00010000 | 1002 | #define PORT_FEATURE_MBA_VLAN_EN 0x00010000 |
678 | 1003 | ||
679 | u32 resource_cfg; | 1004 | u32 resource_cfg; |
680 | #define PORT_FEATURE_RESOURCE_CFG_VALID 0x00000001 | 1005 | #define PORT_FEATURE_RESOURCE_CFG_VALID 0x00000001 |
681 | #define PORT_FEATURE_RESOURCE_CFG_DIAG 0x00000002 | 1006 | #define PORT_FEATURE_RESOURCE_CFG_DIAG 0x00000002 |
682 | #define PORT_FEATURE_RESOURCE_CFG_L2 0x00000004 | 1007 | #define PORT_FEATURE_RESOURCE_CFG_L2 0x00000004 |
683 | #define PORT_FEATURE_RESOURCE_CFG_ISCSI 0x00000008 | 1008 | #define PORT_FEATURE_RESOURCE_CFG_ISCSI 0x00000008 |
684 | #define PORT_FEATURE_RESOURCE_CFG_RDMA 0x00000010 | 1009 | #define PORT_FEATURE_RESOURCE_CFG_RDMA 0x00000010 |
685 | 1010 | ||
686 | u32 smbus_config; | 1011 | u32 smbus_config; |
687 | /* Obsolete */ | 1012 | #define PORT_FEATURE_SMBUS_ADDR_MASK 0x000000fe |
688 | #define PORT_FEATURE_SMBUS_EN 0x00000001 | 1013 | #define PORT_FEATURE_SMBUS_ADDR_SHIFT 1 |
689 | #define PORT_FEATURE_SMBUS_ADDR_MASK 0x000000fe | 1014 | |
690 | #define PORT_FEATURE_SMBUS_ADDR_SHIFT 1 | 1015 | u32 vf_config; |
691 | 1016 | #define PORT_FEAT_CFG_VF_BAR2_SIZE_MASK 0x0000000f | |
692 | u32 reserved1; | 1017 | #define PORT_FEAT_CFG_VF_BAR2_SIZE_SHIFT 0 |
1018 | #define PORT_FEAT_CFG_VF_BAR2_SIZE_DISABLED 0x00000000 | ||
1019 | #define PORT_FEAT_CFG_VF_BAR2_SIZE_4K 0x00000001 | ||
1020 | #define PORT_FEAT_CFG_VF_BAR2_SIZE_8K 0x00000002 | ||
1021 | #define PORT_FEAT_CFG_VF_BAR2_SIZE_16K 0x00000003 | ||
1022 | #define PORT_FEAT_CFG_VF_BAR2_SIZE_32K 0x00000004 | ||
1023 | #define PORT_FEAT_CFG_VF_BAR2_SIZE_64K 0x00000005 | ||
1024 | #define PORT_FEAT_CFG_VF_BAR2_SIZE_128K 0x00000006 | ||
1025 | #define PORT_FEAT_CFG_VF_BAR2_SIZE_256K 0x00000007 | ||
1026 | #define PORT_FEAT_CFG_VF_BAR2_SIZE_512K 0x00000008 | ||
1027 | #define PORT_FEAT_CFG_VF_BAR2_SIZE_1M 0x00000009 | ||
1028 | #define PORT_FEAT_CFG_VF_BAR2_SIZE_2M 0x0000000a | ||
1029 | #define PORT_FEAT_CFG_VF_BAR2_SIZE_4M 0x0000000b | ||
1030 | #define PORT_FEAT_CFG_VF_BAR2_SIZE_8M 0x0000000c | ||
1031 | #define PORT_FEAT_CFG_VF_BAR2_SIZE_16M 0x0000000d | ||
1032 | #define PORT_FEAT_CFG_VF_BAR2_SIZE_32M 0x0000000e | ||
1033 | #define PORT_FEAT_CFG_VF_BAR2_SIZE_64M 0x0000000f | ||
693 | 1034 | ||
694 | u32 link_config; /* Used as HW defaults for the driver */ | 1035 | u32 link_config; /* Used as HW defaults for the driver */ |
695 | #define PORT_FEATURE_CONNECTED_SWITCH_MASK 0x03000000 | 1036 | #define PORT_FEATURE_CONNECTED_SWITCH_MASK 0x03000000 |
696 | #define PORT_FEATURE_CONNECTED_SWITCH_SHIFT 24 | 1037 | #define PORT_FEATURE_CONNECTED_SWITCH_SHIFT 24 |
697 | /* (forced) low speed switch (< 10G) */ | 1038 | /* (forced) low speed switch (< 10G) */ |
698 | #define PORT_FEATURE_CON_SWITCH_1G_SWITCH 0x00000000 | 1039 | #define PORT_FEATURE_CON_SWITCH_1G_SWITCH 0x00000000 |
699 | /* (forced) high speed switch (>= 10G) */ | 1040 | /* (forced) high speed switch (>= 10G) */ |
700 | #define PORT_FEATURE_CON_SWITCH_10G_SWITCH 0x01000000 | 1041 | #define PORT_FEATURE_CON_SWITCH_10G_SWITCH 0x01000000 |
701 | #define PORT_FEATURE_CON_SWITCH_AUTO_DETECT 0x02000000 | 1042 | #define PORT_FEATURE_CON_SWITCH_AUTO_DETECT 0x02000000 |
702 | #define PORT_FEATURE_CON_SWITCH_ONE_TIME_DETECT 0x03000000 | 1043 | #define PORT_FEATURE_CON_SWITCH_ONE_TIME_DETECT 0x03000000 |
703 | 1044 | ||
704 | #define PORT_FEATURE_LINK_SPEED_MASK 0x000f0000 | 1045 | #define PORT_FEATURE_LINK_SPEED_MASK 0x000f0000 |
705 | #define PORT_FEATURE_LINK_SPEED_SHIFT 16 | 1046 | #define PORT_FEATURE_LINK_SPEED_SHIFT 16 |
706 | #define PORT_FEATURE_LINK_SPEED_AUTO 0x00000000 | 1047 | #define PORT_FEATURE_LINK_SPEED_AUTO 0x00000000 |
707 | #define PORT_FEATURE_LINK_SPEED_10M_FULL 0x00010000 | 1048 | #define PORT_FEATURE_LINK_SPEED_10M_FULL 0x00010000 |
708 | #define PORT_FEATURE_LINK_SPEED_10M_HALF 0x00020000 | 1049 | #define PORT_FEATURE_LINK_SPEED_10M_HALF 0x00020000 |
709 | #define PORT_FEATURE_LINK_SPEED_100M_HALF 0x00030000 | 1050 | #define PORT_FEATURE_LINK_SPEED_100M_HALF 0x00030000 |
710 | #define PORT_FEATURE_LINK_SPEED_100M_FULL 0x00040000 | 1051 | #define PORT_FEATURE_LINK_SPEED_100M_FULL 0x00040000 |
711 | #define PORT_FEATURE_LINK_SPEED_1G 0x00050000 | 1052 | #define PORT_FEATURE_LINK_SPEED_1G 0x00050000 |
712 | #define PORT_FEATURE_LINK_SPEED_2_5G 0x00060000 | 1053 | #define PORT_FEATURE_LINK_SPEED_2_5G 0x00060000 |
713 | #define PORT_FEATURE_LINK_SPEED_10G_CX4 0x00070000 | 1054 | #define PORT_FEATURE_LINK_SPEED_10G_CX4 0x00070000 |
714 | #define PORT_FEATURE_LINK_SPEED_10G_KX4 0x00080000 | 1055 | #define PORT_FEATURE_LINK_SPEED_20G 0x00080000 |
715 | #define PORT_FEATURE_LINK_SPEED_10G_KR 0x00090000 | 1056 | |
716 | #define PORT_FEATURE_LINK_SPEED_12G 0x000a0000 | 1057 | #define PORT_FEATURE_FLOW_CONTROL_MASK 0x00000700 |
717 | #define PORT_FEATURE_LINK_SPEED_12_5G 0x000b0000 | 1058 | #define PORT_FEATURE_FLOW_CONTROL_SHIFT 8 |
718 | #define PORT_FEATURE_LINK_SPEED_13G 0x000c0000 | 1059 | #define PORT_FEATURE_FLOW_CONTROL_AUTO 0x00000000 |
719 | #define PORT_FEATURE_LINK_SPEED_15G 0x000d0000 | 1060 | #define PORT_FEATURE_FLOW_CONTROL_TX 0x00000100 |
720 | #define PORT_FEATURE_LINK_SPEED_16G 0x000e0000 | 1061 | #define PORT_FEATURE_FLOW_CONTROL_RX 0x00000200 |
721 | 1062 | #define PORT_FEATURE_FLOW_CONTROL_BOTH 0x00000300 | |
722 | #define PORT_FEATURE_FLOW_CONTROL_MASK 0x00000700 | 1063 | #define PORT_FEATURE_FLOW_CONTROL_NONE 0x00000400 |
723 | #define PORT_FEATURE_FLOW_CONTROL_SHIFT 8 | ||
724 | #define PORT_FEATURE_FLOW_CONTROL_AUTO 0x00000000 | ||
725 | #define PORT_FEATURE_FLOW_CONTROL_TX 0x00000100 | ||
726 | #define PORT_FEATURE_FLOW_CONTROL_RX 0x00000200 | ||
727 | #define PORT_FEATURE_FLOW_CONTROL_BOTH 0x00000300 | ||
728 | #define PORT_FEATURE_FLOW_CONTROL_NONE 0x00000400 | ||
729 | 1064 | ||
730 | /* The default for MCP link configuration, | 1065 | /* The default for MCP link configuration, |
731 | uses the same defines as link_config */ | 1066 | uses the same defines as link_config */ |
732 | u32 mfw_wol_link_cfg; | 1067 | u32 mfw_wol_link_cfg; |
1068 | |||
733 | /* The default for the driver of the second external phy, | 1069 | /* The default for the driver of the second external phy, |
734 | uses the same defines as link_config */ | 1070 | uses the same defines as link_config */ |
735 | u32 link_config2; /* 0x47C */ | 1071 | u32 link_config2; /* 0x47C */ |
736 | 1072 | ||
737 | /* The default for MCP of the second external phy, | 1073 | /* The default for MCP of the second external phy, |
738 | uses the same defines as link_config */ | 1074 | uses the same defines as link_config */ |
739 | u32 mfw_wol_link_cfg2; /* 0x480 */ | 1075 | u32 mfw_wol_link_cfg2; /* 0x480 */ |
740 | 1076 | ||
741 | u32 Reserved2[17]; /* 0x484 */ | 1077 | u32 Reserved2[17]; /* 0x484 */ |
742 | 1078 | ||
743 | }; | 1079 | }; |
744 | 1080 | ||
745 | 1081 | ||
746 | /**************************************************************************** | 1082 | /**************************************************************************** |
747 | * Device Information * | 1083 | * Device Information * |
748 | ****************************************************************************/ | 1084 | ****************************************************************************/ |
749 | struct shm_dev_info { /* size */ | 1085 | struct shm_dev_info { /* size */ |
750 | 1086 | ||
751 | u32 bc_rev; /* 8 bits each: major, minor, build */ /* 4 */ | 1087 | u32 bc_rev; /* 8 bits each: major, minor, build */ /* 4 */ |
752 | 1088 | ||
753 | struct shared_hw_cfg shared_hw_config; /* 40 */ | 1089 | struct shared_hw_cfg shared_hw_config; /* 40 */ |
754 | 1090 | ||
755 | struct port_hw_cfg port_hw_config[PORT_MAX]; /* 400*2=800 */ | 1091 | struct port_hw_cfg port_hw_config[PORT_MAX]; /* 400*2=800 */ |
756 | 1092 | ||
757 | struct shared_feat_cfg shared_feature_config; /* 4 */ | 1093 | struct shared_feat_cfg shared_feature_config; /* 4 */ |
758 | 1094 | ||
759 | struct port_feat_cfg port_feature_config[PORT_MAX];/* 116*2=232 */ | 1095 | struct port_feat_cfg port_feature_config[PORT_MAX];/* 116*2=232 */ |
760 | 1096 | ||
761 | }; | 1097 | }; |
762 | 1098 | ||
763 | 1099 | ||
764 | #define FUNC_0 0 | 1100 | #if !defined(__LITTLE_ENDIAN) && !defined(__BIG_ENDIAN) |
765 | #define FUNC_1 1 | 1101 | #error "Missing either LITTLE_ENDIAN or BIG_ENDIAN definition." |
766 | #define FUNC_2 2 | 1102 | #endif |
767 | #define FUNC_3 3 | ||
768 | #define FUNC_4 4 | ||
769 | #define FUNC_5 5 | ||
770 | #define FUNC_6 6 | ||
771 | #define FUNC_7 7 | ||
772 | #define E1_FUNC_MAX 2 | ||
773 | #define E1H_FUNC_MAX 8 | ||
774 | #define E2_FUNC_MAX 4 /* per path */ | ||
775 | |||
776 | #define VN_0 0 | ||
777 | #define VN_1 1 | ||
778 | #define VN_2 2 | ||
779 | #define VN_3 3 | ||
780 | #define E1VN_MAX 1 | ||
781 | #define E1HVN_MAX 4 | ||
782 | 1103 | ||
783 | #define E2_VF_MAX 64 | 1104 | #define FUNC_0 0 |
1105 | #define FUNC_1 1 | ||
1106 | #define FUNC_2 2 | ||
1107 | #define FUNC_3 3 | ||
1108 | #define FUNC_4 4 | ||
1109 | #define FUNC_5 5 | ||
1110 | #define FUNC_6 6 | ||
1111 | #define FUNC_7 7 | ||
1112 | #define E1_FUNC_MAX 2 | ||
1113 | #define E1H_FUNC_MAX 8 | ||
1114 | #define E2_FUNC_MAX 4 /* per path */ | ||
1115 | |||
1116 | #define VN_0 0 | ||
1117 | #define VN_1 1 | ||
1118 | #define VN_2 2 | ||
1119 | #define VN_3 3 | ||
1120 | #define E1VN_MAX 1 | ||
1121 | #define E1HVN_MAX 4 | ||
1122 | |||
1123 | #define E2_VF_MAX 64 /* HC_REG_VF_CONFIGURATION_SIZE */ | ||
784 | /* This value (in milliseconds) determines the frequency of the driver | 1124 | /* This value (in milliseconds) determines the frequency of the driver |
785 | * issuing the PULSE message code. The firmware monitors this periodic | 1125 | * issuing the PULSE message code. The firmware monitors this periodic |
786 | * pulse to determine when to switch to an OS-absent mode. */ | 1126 | * pulse to determine when to switch to an OS-absent mode. */ |
787 | #define DRV_PULSE_PERIOD_MS 250 | 1127 | #define DRV_PULSE_PERIOD_MS 250 |
788 | 1128 | ||
789 | /* This value (in milliseconds) determines how long the driver should | 1129 | /* This value (in milliseconds) determines how long the driver should |
790 | * wait for an acknowledgement from the firmware before timing out. Once | 1130 | * wait for an acknowledgement from the firmware before timing out. Once |
791 | * the firmware has timed out, the driver will assume there is no firmware | 1131 | * the firmware has timed out, the driver will assume there is no firmware |
792 | * running and there won't be any firmware-driver synchronization during a | 1132 | * running and there won't be any firmware-driver synchronization during a |
793 | * driver reset. */ | 1133 | * driver reset. */ |
794 | #define FW_ACK_TIME_OUT_MS 5000 | 1134 | #define FW_ACK_TIME_OUT_MS 5000 |
795 | 1135 | ||
796 | #define FW_ACK_POLL_TIME_MS 1 | 1136 | #define FW_ACK_POLL_TIME_MS 1 |
797 | 1137 | ||
798 | #define FW_ACK_NUM_OF_POLL (FW_ACK_TIME_OUT_MS/FW_ACK_POLL_TIME_MS) | 1138 | #define FW_ACK_NUM_OF_POLL (FW_ACK_TIME_OUT_MS/FW_ACK_POLL_TIME_MS) |
799 | 1139 | ||
800 | /* LED Blink rate that will achieve ~15.9Hz */ | 1140 | /* LED Blink rate that will achieve ~15.9Hz */ |
801 | #define LED_BLINK_RATE_VAL 480 | 1141 | #define LED_BLINK_RATE_VAL 480 |
802 | 1142 | ||
803 | /**************************************************************************** | 1143 | /**************************************************************************** |
804 | * Driver <-> FW Mailbox * | 1144 | * Driver <-> FW Mailbox * |
805 | ****************************************************************************/ | 1145 | ****************************************************************************/ |
806 | struct drv_port_mb { | 1146 | struct drv_port_mb { |
807 | 1147 | ||
808 | u32 link_status; | 1148 | u32 link_status; |
809 | /* Driver should update this field on any link change event */ | 1149 | /* Driver should update this field on any link change event */ |
810 | 1150 | ||
811 | #define LINK_STATUS_LINK_FLAG_MASK 0x00000001 | 1151 | #define LINK_STATUS_LINK_FLAG_MASK 0x00000001 |
812 | #define LINK_STATUS_LINK_UP 0x00000001 | 1152 | #define LINK_STATUS_LINK_UP 0x00000001 |
813 | #define LINK_STATUS_SPEED_AND_DUPLEX_MASK 0x0000001E | 1153 | #define LINK_STATUS_SPEED_AND_DUPLEX_MASK 0x0000001E |
814 | #define LINK_STATUS_SPEED_AND_DUPLEX_AN_NOT_COMPLETE (0<<1) | 1154 | #define LINK_STATUS_SPEED_AND_DUPLEX_AN_NOT_COMPLETE (0<<1) |
815 | #define LINK_STATUS_SPEED_AND_DUPLEX_10THD (1<<1) | 1155 | #define LINK_STATUS_SPEED_AND_DUPLEX_10THD (1<<1) |
816 | #define LINK_STATUS_SPEED_AND_DUPLEX_10TFD (2<<1) | 1156 | #define LINK_STATUS_SPEED_AND_DUPLEX_10TFD (2<<1) |
817 | #define LINK_STATUS_SPEED_AND_DUPLEX_100TXHD (3<<1) | 1157 | #define LINK_STATUS_SPEED_AND_DUPLEX_100TXHD (3<<1) |
818 | #define LINK_STATUS_SPEED_AND_DUPLEX_100T4 (4<<1) | 1158 | #define LINK_STATUS_SPEED_AND_DUPLEX_100T4 (4<<1) |
819 | #define LINK_STATUS_SPEED_AND_DUPLEX_100TXFD (5<<1) | 1159 | #define LINK_STATUS_SPEED_AND_DUPLEX_100TXFD (5<<1) |
820 | #define LINK_STATUS_SPEED_AND_DUPLEX_1000THD (6<<1) | 1160 | #define LINK_STATUS_SPEED_AND_DUPLEX_1000THD (6<<1) |
821 | #define LINK_STATUS_SPEED_AND_DUPLEX_1000TFD (7<<1) | 1161 | #define LINK_STATUS_SPEED_AND_DUPLEX_1000TFD (7<<1) |
822 | #define LINK_STATUS_SPEED_AND_DUPLEX_1000XFD (7<<1) | 1162 | #define LINK_STATUS_SPEED_AND_DUPLEX_1000XFD (7<<1) |
823 | #define LINK_STATUS_SPEED_AND_DUPLEX_2500THD (8<<1) | 1163 | #define LINK_STATUS_SPEED_AND_DUPLEX_2500THD (8<<1) |
824 | #define LINK_STATUS_SPEED_AND_DUPLEX_2500TFD (9<<1) | 1164 | #define LINK_STATUS_SPEED_AND_DUPLEX_2500TFD (9<<1) |
825 | #define LINK_STATUS_SPEED_AND_DUPLEX_2500XFD (9<<1) | 1165 | #define LINK_STATUS_SPEED_AND_DUPLEX_2500XFD (9<<1) |
826 | #define LINK_STATUS_SPEED_AND_DUPLEX_10GTFD (10<<1) | 1166 | #define LINK_STATUS_SPEED_AND_DUPLEX_10GTFD (10<<1) |
827 | #define LINK_STATUS_SPEED_AND_DUPLEX_10GXFD (10<<1) | 1167 | #define LINK_STATUS_SPEED_AND_DUPLEX_10GXFD (10<<1) |
828 | #define LINK_STATUS_SPEED_AND_DUPLEX_12GTFD (11<<1) | 1168 | #define LINK_STATUS_SPEED_AND_DUPLEX_20GTFD (11<<1) |
829 | #define LINK_STATUS_SPEED_AND_DUPLEX_12GXFD (11<<1) | 1169 | #define LINK_STATUS_SPEED_AND_DUPLEX_20GXFD (11<<1) |
830 | #define LINK_STATUS_SPEED_AND_DUPLEX_12_5GTFD (12<<1) | 1170 | |
831 | #define LINK_STATUS_SPEED_AND_DUPLEX_12_5GXFD (12<<1) | 1171 | #define LINK_STATUS_AUTO_NEGOTIATE_FLAG_MASK 0x00000020 |
832 | #define LINK_STATUS_SPEED_AND_DUPLEX_13GTFD (13<<1) | 1172 | #define LINK_STATUS_AUTO_NEGOTIATE_ENABLED 0x00000020 |
833 | #define LINK_STATUS_SPEED_AND_DUPLEX_13GXFD (13<<1) | 1173 | |
834 | #define LINK_STATUS_SPEED_AND_DUPLEX_15GTFD (14<<1) | 1174 | #define LINK_STATUS_AUTO_NEGOTIATE_COMPLETE 0x00000040 |
835 | #define LINK_STATUS_SPEED_AND_DUPLEX_15GXFD (14<<1) | 1175 | #define LINK_STATUS_PARALLEL_DETECTION_FLAG_MASK 0x00000080 |
836 | #define LINK_STATUS_SPEED_AND_DUPLEX_16GTFD (15<<1) | 1176 | #define LINK_STATUS_PARALLEL_DETECTION_USED 0x00000080 |
837 | #define LINK_STATUS_SPEED_AND_DUPLEX_16GXFD (15<<1) | 1177 | |
838 | 1178 | #define LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE 0x00000200 | |
839 | #define LINK_STATUS_AUTO_NEGOTIATE_FLAG_MASK 0x00000020 | 1179 | #define LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE 0x00000400 |
840 | #define LINK_STATUS_AUTO_NEGOTIATE_ENABLED 0x00000020 | 1180 | #define LINK_STATUS_LINK_PARTNER_100T4_CAPABLE 0x00000800 |
841 | 1181 | #define LINK_STATUS_LINK_PARTNER_100TXFD_CAPABLE 0x00001000 | |
842 | #define LINK_STATUS_AUTO_NEGOTIATE_COMPLETE 0x00000040 | 1182 | #define LINK_STATUS_LINK_PARTNER_100TXHD_CAPABLE 0x00002000 |
843 | #define LINK_STATUS_PARALLEL_DETECTION_FLAG_MASK 0x00000080 | 1183 | #define LINK_STATUS_LINK_PARTNER_10TFD_CAPABLE 0x00004000 |
844 | #define LINK_STATUS_PARALLEL_DETECTION_USED 0x00000080 | 1184 | #define LINK_STATUS_LINK_PARTNER_10THD_CAPABLE 0x00008000 |
845 | 1185 | ||
846 | #define LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE 0x00000200 | 1186 | #define LINK_STATUS_TX_FLOW_CONTROL_FLAG_MASK 0x00010000 |
847 | #define LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE 0x00000400 | 1187 | #define LINK_STATUS_TX_FLOW_CONTROL_ENABLED 0x00010000 |
848 | #define LINK_STATUS_LINK_PARTNER_100T4_CAPABLE 0x00000800 | 1188 | |
849 | #define LINK_STATUS_LINK_PARTNER_100TXFD_CAPABLE 0x00001000 | 1189 | #define LINK_STATUS_RX_FLOW_CONTROL_FLAG_MASK 0x00020000 |
850 | #define LINK_STATUS_LINK_PARTNER_100TXHD_CAPABLE 0x00002000 | 1190 | #define LINK_STATUS_RX_FLOW_CONTROL_ENABLED 0x00020000 |
851 | #define LINK_STATUS_LINK_PARTNER_10TFD_CAPABLE 0x00004000 | 1191 | |
852 | #define LINK_STATUS_LINK_PARTNER_10THD_CAPABLE 0x00008000 | 1192 | #define LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK 0x000C0000 |
853 | 1193 | #define LINK_STATUS_LINK_PARTNER_NOT_PAUSE_CAPABLE (0<<18) | |
854 | #define LINK_STATUS_TX_FLOW_CONTROL_FLAG_MASK 0x00010000 | 1194 | #define LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE (1<<18) |
855 | #define LINK_STATUS_TX_FLOW_CONTROL_ENABLED 0x00010000 | 1195 | #define LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE (2<<18) |
856 | 1196 | #define LINK_STATUS_LINK_PARTNER_BOTH_PAUSE (3<<18) | |
857 | #define LINK_STATUS_RX_FLOW_CONTROL_FLAG_MASK 0x00020000 | 1197 | |
858 | #define LINK_STATUS_RX_FLOW_CONTROL_ENABLED 0x00020000 | 1198 | #define LINK_STATUS_SERDES_LINK 0x00100000 |
859 | 1199 | ||
860 | #define LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK 0x000C0000 | 1200 | #define LINK_STATUS_LINK_PARTNER_2500XFD_CAPABLE 0x00200000 |
861 | #define LINK_STATUS_LINK_PARTNER_NOT_PAUSE_CAPABLE (0<<18) | 1201 | #define LINK_STATUS_LINK_PARTNER_2500XHD_CAPABLE 0x00400000 |
862 | #define LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE (1<<18) | 1202 | #define LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE 0x00800000 |
863 | #define LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE (2<<18) | 1203 | #define LINK_STATUS_LINK_PARTNER_20GXFD_CAPABLE 0x10000000 |
864 | #define LINK_STATUS_LINK_PARTNER_BOTH_PAUSE (3<<18) | 1204 | |
865 | 1205 | #define LINK_STATUS_PFC_ENABLED 0x20000000 | |
866 | #define LINK_STATUS_SERDES_LINK 0x00100000 | ||
867 | |||
868 | #define LINK_STATUS_LINK_PARTNER_2500XFD_CAPABLE 0x00200000 | ||
869 | #define LINK_STATUS_LINK_PARTNER_2500XHD_CAPABLE 0x00400000 | ||
870 | #define LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE 0x00800000 | ||
871 | #define LINK_STATUS_LINK_PARTNER_12GXFD_CAPABLE 0x01000000 | ||
872 | #define LINK_STATUS_LINK_PARTNER_12_5GXFD_CAPABLE 0x02000000 | ||
873 | #define LINK_STATUS_LINK_PARTNER_13GXFD_CAPABLE 0x04000000 | ||
874 | #define LINK_STATUS_LINK_PARTNER_15GXFD_CAPABLE 0x08000000 | ||
875 | #define LINK_STATUS_LINK_PARTNER_16GXFD_CAPABLE 0x10000000 | ||
876 | 1206 | ||
877 | u32 port_stx; | 1207 | u32 port_stx; |
878 | 1208 | ||
@@ -887,138 +1217,159 @@ struct drv_port_mb { | |||
887 | struct drv_func_mb { | 1217 | struct drv_func_mb { |
888 | 1218 | ||
889 | u32 drv_mb_header; | 1219 | u32 drv_mb_header; |
890 | #define DRV_MSG_CODE_MASK 0xffff0000 | 1220 | #define DRV_MSG_CODE_MASK 0xffff0000 |
891 | #define DRV_MSG_CODE_LOAD_REQ 0x10000000 | 1221 | #define DRV_MSG_CODE_LOAD_REQ 0x10000000 |
892 | #define DRV_MSG_CODE_LOAD_DONE 0x11000000 | 1222 | #define DRV_MSG_CODE_LOAD_DONE 0x11000000 |
893 | #define DRV_MSG_CODE_UNLOAD_REQ_WOL_EN 0x20000000 | 1223 | #define DRV_MSG_CODE_UNLOAD_REQ_WOL_EN 0x20000000 |
894 | #define DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS 0x20010000 | 1224 | #define DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS 0x20010000 |
895 | #define DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP 0x20020000 | 1225 | #define DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP 0x20020000 |
896 | #define DRV_MSG_CODE_UNLOAD_DONE 0x21000000 | 1226 | #define DRV_MSG_CODE_UNLOAD_DONE 0x21000000 |
897 | #define DRV_MSG_CODE_DCC_OK 0x30000000 | 1227 | #define DRV_MSG_CODE_DCC_OK 0x30000000 |
898 | #define DRV_MSG_CODE_DCC_FAILURE 0x31000000 | 1228 | #define DRV_MSG_CODE_DCC_FAILURE 0x31000000 |
899 | #define DRV_MSG_CODE_DIAG_ENTER_REQ 0x50000000 | 1229 | #define DRV_MSG_CODE_DIAG_ENTER_REQ 0x50000000 |
900 | #define DRV_MSG_CODE_DIAG_EXIT_REQ 0x60000000 | 1230 | #define DRV_MSG_CODE_DIAG_EXIT_REQ 0x60000000 |
901 | #define DRV_MSG_CODE_VALIDATE_KEY 0x70000000 | 1231 | #define DRV_MSG_CODE_VALIDATE_KEY 0x70000000 |
902 | #define DRV_MSG_CODE_GET_CURR_KEY 0x80000000 | 1232 | #define DRV_MSG_CODE_GET_CURR_KEY 0x80000000 |
903 | #define DRV_MSG_CODE_GET_UPGRADE_KEY 0x81000000 | 1233 | #define DRV_MSG_CODE_GET_UPGRADE_KEY 0x81000000 |
904 | #define DRV_MSG_CODE_GET_MANUF_KEY 0x82000000 | 1234 | #define DRV_MSG_CODE_GET_MANUF_KEY 0x82000000 |
905 | #define DRV_MSG_CODE_LOAD_L2B_PRAM 0x90000000 | 1235 | #define DRV_MSG_CODE_LOAD_L2B_PRAM 0x90000000 |
906 | /* | ||
907 | * The optic module verification commands require bootcode | ||
908 | * v5.0.6 or later | ||
909 | */ | ||
910 | #define DRV_MSG_CODE_VRFY_FIRST_PHY_OPT_MDL 0xa0000000 | ||
911 | #define REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL 0x00050006 | ||
912 | /* | 1236 | /* |
913 | * The specific optic module verification command requires bootcode | 1237 | * The optic module verification command requires bootcode |
914 | * v5.2.12 or later | 1238 | * v5.0.6 or later, te specific optic module verification command |
1239 | * requires bootcode v5.2.12 or later | ||
915 | */ | 1240 | */ |
916 | #define DRV_MSG_CODE_VRFY_SPECIFIC_PHY_OPT_MDL 0xa1000000 | 1241 | #define DRV_MSG_CODE_VRFY_FIRST_PHY_OPT_MDL 0xa0000000 |
917 | #define REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL 0x00050234 | 1242 | #define REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL 0x00050006 |
1243 | #define DRV_MSG_CODE_VRFY_SPECIFIC_PHY_OPT_MDL 0xa1000000 | ||
1244 | #define REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL 0x00050234 | ||
1245 | #define REQ_BC_VER_4_SFP_TX_DISABLE_SUPPORTED 0x00070014 | ||
918 | 1246 | ||
919 | #define DRV_MSG_CODE_DCBX_ADMIN_PMF_MSG 0xb0000000 | 1247 | #define DRV_MSG_CODE_DCBX_ADMIN_PMF_MSG 0xb0000000 |
920 | #define DRV_MSG_CODE_DCBX_PMF_DRV_OK 0xb2000000 | 1248 | #define DRV_MSG_CODE_DCBX_PMF_DRV_OK 0xb2000000 |
921 | #define DRV_MSG_CODE_SET_MF_BW 0xe0000000 | ||
922 | #define REQ_BC_VER_4_SET_MF_BW 0x00060202 | ||
923 | #define DRV_MSG_CODE_SET_MF_BW_ACK 0xe1000000 | ||
924 | #define BIOS_MSG_CODE_LIC_CHALLENGE 0xff010000 | ||
925 | #define BIOS_MSG_CODE_LIC_RESPONSE 0xff020000 | ||
926 | #define BIOS_MSG_CODE_VIRT_MAC_PRIM 0xff030000 | ||
927 | #define BIOS_MSG_CODE_VIRT_MAC_ISCSI 0xff040000 | ||
928 | 1249 | ||
929 | #define DRV_MSG_SEQ_NUMBER_MASK 0x0000ffff | 1250 | #define DRV_MSG_CODE_VF_DISABLED_DONE 0xc0000000 |
1251 | |||
1252 | #define DRV_MSG_CODE_SET_MF_BW 0xe0000000 | ||
1253 | #define REQ_BC_VER_4_SET_MF_BW 0x00060202 | ||
1254 | #define DRV_MSG_CODE_SET_MF_BW_ACK 0xe1000000 | ||
1255 | |||
1256 | #define DRV_MSG_CODE_LINK_STATUS_CHANGED 0x01000000 | ||
1257 | |||
1258 | #define BIOS_MSG_CODE_LIC_CHALLENGE 0xff010000 | ||
1259 | #define BIOS_MSG_CODE_LIC_RESPONSE 0xff020000 | ||
1260 | #define BIOS_MSG_CODE_VIRT_MAC_PRIM 0xff030000 | ||
1261 | #define BIOS_MSG_CODE_VIRT_MAC_ISCSI 0xff040000 | ||
1262 | |||
1263 | #define DRV_MSG_SEQ_NUMBER_MASK 0x0000ffff | ||
930 | 1264 | ||
931 | u32 drv_mb_param; | 1265 | u32 drv_mb_param; |
1266 | #define DRV_MSG_CODE_SET_MF_BW_MIN_MASK 0x00ff0000 | ||
1267 | #define DRV_MSG_CODE_SET_MF_BW_MAX_MASK 0xff000000 | ||
932 | 1268 | ||
933 | u32 fw_mb_header; | 1269 | u32 fw_mb_header; |
934 | #define FW_MSG_CODE_MASK 0xffff0000 | 1270 | #define FW_MSG_CODE_MASK 0xffff0000 |
935 | #define FW_MSG_CODE_DRV_LOAD_COMMON 0x10100000 | 1271 | #define FW_MSG_CODE_DRV_LOAD_COMMON 0x10100000 |
936 | #define FW_MSG_CODE_DRV_LOAD_PORT 0x10110000 | 1272 | #define FW_MSG_CODE_DRV_LOAD_PORT 0x10110000 |
937 | #define FW_MSG_CODE_DRV_LOAD_FUNCTION 0x10120000 | 1273 | #define FW_MSG_CODE_DRV_LOAD_FUNCTION 0x10120000 |
938 | /* Load common chip is supported from bc 6.0.0 */ | 1274 | /* Load common chip is supported from bc 6.0.0 */ |
939 | #define REQ_BC_VER_4_DRV_LOAD_COMMON_CHIP 0x00060000 | 1275 | #define REQ_BC_VER_4_DRV_LOAD_COMMON_CHIP 0x00060000 |
940 | #define FW_MSG_CODE_DRV_LOAD_COMMON_CHIP 0x10130000 | 1276 | #define FW_MSG_CODE_DRV_LOAD_COMMON_CHIP 0x10130000 |
941 | #define FW_MSG_CODE_DRV_LOAD_REFUSED 0x10200000 | 1277 | |
942 | #define FW_MSG_CODE_DRV_LOAD_DONE 0x11100000 | 1278 | #define FW_MSG_CODE_DRV_LOAD_REFUSED 0x10200000 |
943 | #define FW_MSG_CODE_DRV_UNLOAD_COMMON 0x20100000 | 1279 | #define FW_MSG_CODE_DRV_LOAD_DONE 0x11100000 |
944 | #define FW_MSG_CODE_DRV_UNLOAD_PORT 0x20110000 | 1280 | #define FW_MSG_CODE_DRV_UNLOAD_COMMON 0x20100000 |
945 | #define FW_MSG_CODE_DRV_UNLOAD_FUNCTION 0x20120000 | 1281 | #define FW_MSG_CODE_DRV_UNLOAD_PORT 0x20110000 |
946 | #define FW_MSG_CODE_DRV_UNLOAD_DONE 0x21100000 | 1282 | #define FW_MSG_CODE_DRV_UNLOAD_FUNCTION 0x20120000 |
947 | #define FW_MSG_CODE_DCC_DONE 0x30100000 | 1283 | #define FW_MSG_CODE_DRV_UNLOAD_DONE 0x21100000 |
948 | #define FW_MSG_CODE_DIAG_ENTER_DONE 0x50100000 | 1284 | #define FW_MSG_CODE_DCC_DONE 0x30100000 |
949 | #define FW_MSG_CODE_DIAG_REFUSE 0x50200000 | 1285 | #define FW_MSG_CODE_LLDP_DONE 0x40100000 |
950 | #define FW_MSG_CODE_DIAG_EXIT_DONE 0x60100000 | 1286 | #define FW_MSG_CODE_DIAG_ENTER_DONE 0x50100000 |
951 | #define FW_MSG_CODE_VALIDATE_KEY_SUCCESS 0x70100000 | 1287 | #define FW_MSG_CODE_DIAG_REFUSE 0x50200000 |
952 | #define FW_MSG_CODE_VALIDATE_KEY_FAILURE 0x70200000 | 1288 | #define FW_MSG_CODE_DIAG_EXIT_DONE 0x60100000 |
953 | #define FW_MSG_CODE_GET_KEY_DONE 0x80100000 | 1289 | #define FW_MSG_CODE_VALIDATE_KEY_SUCCESS 0x70100000 |
954 | #define FW_MSG_CODE_NO_KEY 0x80f00000 | 1290 | #define FW_MSG_CODE_VALIDATE_KEY_FAILURE 0x70200000 |
955 | #define FW_MSG_CODE_LIC_INFO_NOT_READY 0x80f80000 | 1291 | #define FW_MSG_CODE_GET_KEY_DONE 0x80100000 |
956 | #define FW_MSG_CODE_L2B_PRAM_LOADED 0x90100000 | 1292 | #define FW_MSG_CODE_NO_KEY 0x80f00000 |
957 | #define FW_MSG_CODE_L2B_PRAM_T_LOAD_FAILURE 0x90210000 | 1293 | #define FW_MSG_CODE_LIC_INFO_NOT_READY 0x80f80000 |
958 | #define FW_MSG_CODE_L2B_PRAM_C_LOAD_FAILURE 0x90220000 | 1294 | #define FW_MSG_CODE_L2B_PRAM_LOADED 0x90100000 |
959 | #define FW_MSG_CODE_L2B_PRAM_X_LOAD_FAILURE 0x90230000 | 1295 | #define FW_MSG_CODE_L2B_PRAM_T_LOAD_FAILURE 0x90210000 |
960 | #define FW_MSG_CODE_L2B_PRAM_U_LOAD_FAILURE 0x90240000 | 1296 | #define FW_MSG_CODE_L2B_PRAM_C_LOAD_FAILURE 0x90220000 |
961 | #define FW_MSG_CODE_VRFY_OPT_MDL_SUCCESS 0xa0100000 | 1297 | #define FW_MSG_CODE_L2B_PRAM_X_LOAD_FAILURE 0x90230000 |
962 | #define FW_MSG_CODE_VRFY_OPT_MDL_INVLD_IMG 0xa0200000 | 1298 | #define FW_MSG_CODE_L2B_PRAM_U_LOAD_FAILURE 0x90240000 |
963 | #define FW_MSG_CODE_VRFY_OPT_MDL_UNAPPROVED 0xa0300000 | 1299 | #define FW_MSG_CODE_VRFY_OPT_MDL_SUCCESS 0xa0100000 |
964 | 1300 | #define FW_MSG_CODE_VRFY_OPT_MDL_INVLD_IMG 0xa0200000 | |
965 | #define FW_MSG_CODE_LIC_CHALLENGE 0xff010000 | 1301 | #define FW_MSG_CODE_VRFY_OPT_MDL_UNAPPROVED 0xa0300000 |
966 | #define FW_MSG_CODE_LIC_RESPONSE 0xff020000 | 1302 | #define FW_MSG_CODE_VF_DISABLED_DONE 0xb0000000 |
967 | #define FW_MSG_CODE_VIRT_MAC_PRIM 0xff030000 | 1303 | |
968 | #define FW_MSG_CODE_VIRT_MAC_ISCSI 0xff040000 | 1304 | #define FW_MSG_CODE_SET_MF_BW_SENT 0xe0000000 |
969 | 1305 | #define FW_MSG_CODE_SET_MF_BW_DONE 0xe1000000 | |
970 | #define FW_MSG_SEQ_NUMBER_MASK 0x0000ffff | 1306 | |
1307 | #define FW_MSG_CODE_LINK_CHANGED_ACK 0x01100000 | ||
1308 | |||
1309 | #define FW_MSG_CODE_LIC_CHALLENGE 0xff010000 | ||
1310 | #define FW_MSG_CODE_LIC_RESPONSE 0xff020000 | ||
1311 | #define FW_MSG_CODE_VIRT_MAC_PRIM 0xff030000 | ||
1312 | #define FW_MSG_CODE_VIRT_MAC_ISCSI 0xff040000 | ||
1313 | |||
1314 | #define FW_MSG_SEQ_NUMBER_MASK 0x0000ffff | ||
971 | 1315 | ||
972 | u32 fw_mb_param; | 1316 | u32 fw_mb_param; |
973 | 1317 | ||
974 | u32 drv_pulse_mb; | 1318 | u32 drv_pulse_mb; |
975 | #define DRV_PULSE_SEQ_MASK 0x00007fff | 1319 | #define DRV_PULSE_SEQ_MASK 0x00007fff |
976 | #define DRV_PULSE_SYSTEM_TIME_MASK 0xffff0000 | 1320 | #define DRV_PULSE_SYSTEM_TIME_MASK 0xffff0000 |
977 | /* The system time is in the format of | 1321 | /* |
978 | * (year-2001)*12*32 + month*32 + day. */ | 1322 | * The system time is in the format of |
979 | #define DRV_PULSE_ALWAYS_ALIVE 0x00008000 | 1323 | * (year-2001)*12*32 + month*32 + day. |
980 | /* Indicate to the firmware not to go into the | 1324 | */ |
1325 | #define DRV_PULSE_ALWAYS_ALIVE 0x00008000 | ||
1326 | /* | ||
1327 | * Indicate to the firmware not to go into the | ||
981 | * OS-absent when it is not getting driver pulse. | 1328 | * OS-absent when it is not getting driver pulse. |
982 | * This is used for debugging as well for PXE(MBA). */ | 1329 | * This is used for debugging as well for PXE(MBA). |
1330 | */ | ||
983 | 1331 | ||
984 | u32 mcp_pulse_mb; | 1332 | u32 mcp_pulse_mb; |
985 | #define MCP_PULSE_SEQ_MASK 0x00007fff | 1333 | #define MCP_PULSE_SEQ_MASK 0x00007fff |
986 | #define MCP_PULSE_ALWAYS_ALIVE 0x00008000 | 1334 | #define MCP_PULSE_ALWAYS_ALIVE 0x00008000 |
987 | /* Indicates to the driver not to assert due to lack | 1335 | /* Indicates to the driver not to assert due to lack |
988 | * of MCP response */ | 1336 | * of MCP response */ |
989 | #define MCP_EVENT_MASK 0xffff0000 | 1337 | #define MCP_EVENT_MASK 0xffff0000 |
990 | #define MCP_EVENT_OTHER_DRIVER_RESET_REQ 0x00010000 | 1338 | #define MCP_EVENT_OTHER_DRIVER_RESET_REQ 0x00010000 |
991 | 1339 | ||
992 | u32 iscsi_boot_signature; | 1340 | u32 iscsi_boot_signature; |
993 | u32 iscsi_boot_block_offset; | 1341 | u32 iscsi_boot_block_offset; |
994 | 1342 | ||
995 | u32 drv_status; | 1343 | u32 drv_status; |
996 | #define DRV_STATUS_PMF 0x00000001 | 1344 | #define DRV_STATUS_PMF 0x00000001 |
997 | #define DRV_STATUS_SET_MF_BW 0x00000004 | 1345 | #define DRV_STATUS_VF_DISABLED 0x00000002 |
998 | 1346 | #define DRV_STATUS_SET_MF_BW 0x00000004 | |
999 | #define DRV_STATUS_DCC_EVENT_MASK 0x0000ff00 | 1347 | #define DRV_STATUS_LINK_EVENT 0x00000008 |
1000 | #define DRV_STATUS_DCC_DISABLE_ENABLE_PF 0x00000100 | 1348 | |
1001 | #define DRV_STATUS_DCC_BANDWIDTH_ALLOCATION 0x00000200 | 1349 | #define DRV_STATUS_DCC_EVENT_MASK 0x0000ff00 |
1002 | #define DRV_STATUS_DCC_CHANGE_MAC_ADDRESS 0x00000400 | 1350 | #define DRV_STATUS_DCC_DISABLE_ENABLE_PF 0x00000100 |
1003 | #define DRV_STATUS_DCC_RESERVED1 0x00000800 | 1351 | #define DRV_STATUS_DCC_BANDWIDTH_ALLOCATION 0x00000200 |
1004 | #define DRV_STATUS_DCC_SET_PROTOCOL 0x00001000 | 1352 | #define DRV_STATUS_DCC_CHANGE_MAC_ADDRESS 0x00000400 |
1005 | #define DRV_STATUS_DCC_SET_PRIORITY 0x00002000 | 1353 | #define DRV_STATUS_DCC_RESERVED1 0x00000800 |
1006 | #define DRV_STATUS_DCBX_EVENT_MASK 0x000f0000 | 1354 | #define DRV_STATUS_DCC_SET_PROTOCOL 0x00001000 |
1007 | #define DRV_STATUS_DCBX_NEGOTIATION_RESULTS 0x00010000 | 1355 | #define DRV_STATUS_DCC_SET_PRIORITY 0x00002000 |
1356 | |||
1357 | #define DRV_STATUS_DCBX_EVENT_MASK 0x000f0000 | ||
1358 | #define DRV_STATUS_DCBX_NEGOTIATION_RESULTS 0x00010000 | ||
1008 | 1359 | ||
1009 | u32 virt_mac_upper; | 1360 | u32 virt_mac_upper; |
1010 | #define VIRT_MAC_SIGN_MASK 0xffff0000 | 1361 | #define VIRT_MAC_SIGN_MASK 0xffff0000 |
1011 | #define VIRT_MAC_SIGNATURE 0x564d0000 | 1362 | #define VIRT_MAC_SIGNATURE 0x564d0000 |
1012 | u32 virt_mac_lower; | 1363 | u32 virt_mac_lower; |
1013 | 1364 | ||
1014 | }; | 1365 | }; |
1015 | 1366 | ||
1016 | 1367 | ||
1017 | /**************************************************************************** | 1368 | /**************************************************************************** |
1018 | * Management firmware state * | 1369 | * Management firmware state * |
1019 | ****************************************************************************/ | 1370 | ****************************************************************************/ |
1020 | /* Allocate 440 bytes for management firmware */ | 1371 | /* Allocate 440 bytes for management firmware */ |
1021 | #define MGMTFW_STATE_WORD_SIZE 110 | 1372 | #define MGMTFW_STATE_WORD_SIZE 110 |
1022 | 1373 | ||
1023 | struct mgmtfw_state { | 1374 | struct mgmtfw_state { |
1024 | u32 opaque[MGMTFW_STATE_WORD_SIZE]; | 1375 | u32 opaque[MGMTFW_STATE_WORD_SIZE]; |
@@ -1026,25 +1377,25 @@ struct mgmtfw_state { | |||
1026 | 1377 | ||
1027 | 1378 | ||
1028 | /**************************************************************************** | 1379 | /**************************************************************************** |
1029 | * Multi-Function configuration * | 1380 | * Multi-Function configuration * |
1030 | ****************************************************************************/ | 1381 | ****************************************************************************/ |
1031 | struct shared_mf_cfg { | 1382 | struct shared_mf_cfg { |
1032 | 1383 | ||
1033 | u32 clp_mb; | 1384 | u32 clp_mb; |
1034 | #define SHARED_MF_CLP_SET_DEFAULT 0x00000000 | 1385 | #define SHARED_MF_CLP_SET_DEFAULT 0x00000000 |
1035 | /* set by CLP */ | 1386 | /* set by CLP */ |
1036 | #define SHARED_MF_CLP_EXIT 0x00000001 | 1387 | #define SHARED_MF_CLP_EXIT 0x00000001 |
1037 | /* set by MCP */ | 1388 | /* set by MCP */ |
1038 | #define SHARED_MF_CLP_EXIT_DONE 0x00010000 | 1389 | #define SHARED_MF_CLP_EXIT_DONE 0x00010000 |
1039 | 1390 | ||
1040 | }; | 1391 | }; |
1041 | 1392 | ||
1042 | struct port_mf_cfg { | 1393 | struct port_mf_cfg { |
1043 | 1394 | ||
1044 | u32 dynamic_cfg; /* device control channel */ | 1395 | u32 dynamic_cfg; /* device control channel */ |
1045 | #define PORT_MF_CFG_E1HOV_TAG_MASK 0x0000ffff | 1396 | #define PORT_MF_CFG_E1HOV_TAG_MASK 0x0000ffff |
1046 | #define PORT_MF_CFG_E1HOV_TAG_SHIFT 0 | 1397 | #define PORT_MF_CFG_E1HOV_TAG_SHIFT 0 |
1047 | #define PORT_MF_CFG_E1HOV_TAG_DEFAULT PORT_MF_CFG_E1HOV_TAG_MASK | 1398 | #define PORT_MF_CFG_E1HOV_TAG_DEFAULT PORT_MF_CFG_E1HOV_TAG_MASK |
1048 | 1399 | ||
1049 | u32 reserved[3]; | 1400 | u32 reserved[3]; |
1050 | 1401 | ||
@@ -1055,57 +1406,58 @@ struct func_mf_cfg { | |||
1055 | u32 config; | 1406 | u32 config; |
1056 | /* E/R/I/D */ | 1407 | /* E/R/I/D */ |
1057 | /* function 0 of each port cannot be hidden */ | 1408 | /* function 0 of each port cannot be hidden */ |
1058 | #define FUNC_MF_CFG_FUNC_HIDE 0x00000001 | 1409 | #define FUNC_MF_CFG_FUNC_HIDE 0x00000001 |
1059 | 1410 | ||
1060 | #define FUNC_MF_CFG_PROTOCOL_MASK 0x00000007 | 1411 | #define FUNC_MF_CFG_PROTOCOL_MASK 0x00000006 |
1061 | #define FUNC_MF_CFG_PROTOCOL_ETHERNET 0x00000002 | 1412 | #define FUNC_MF_CFG_PROTOCOL_FCOE 0x00000000 |
1062 | #define FUNC_MF_CFG_PROTOCOL_ETHERNET_WITH_RDMA 0x00000004 | 1413 | #define FUNC_MF_CFG_PROTOCOL_ETHERNET 0x00000002 |
1063 | #define FUNC_MF_CFG_PROTOCOL_ISCSI 0x00000006 | 1414 | #define FUNC_MF_CFG_PROTOCOL_ETHERNET_WITH_RDMA 0x00000004 |
1064 | #define FUNC_MF_CFG_PROTOCOL_DEFAULT\ | 1415 | #define FUNC_MF_CFG_PROTOCOL_ISCSI 0x00000006 |
1065 | FUNC_MF_CFG_PROTOCOL_ETHERNET_WITH_RDMA | 1416 | #define FUNC_MF_CFG_PROTOCOL_DEFAULT \ |
1417 | FUNC_MF_CFG_PROTOCOL_ETHERNET_WITH_RDMA | ||
1066 | 1418 | ||
1067 | #define FUNC_MF_CFG_FUNC_DISABLED 0x00000008 | 1419 | #define FUNC_MF_CFG_FUNC_DISABLED 0x00000008 |
1420 | #define FUNC_MF_CFG_FUNC_DELETED 0x00000010 | ||
1068 | 1421 | ||
1069 | /* PRI */ | 1422 | /* PRI */ |
1070 | /* 0 - low priority, 3 - high priority */ | 1423 | /* 0 - low priority, 3 - high priority */ |
1071 | #define FUNC_MF_CFG_TRANSMIT_PRIORITY_MASK 0x00000300 | 1424 | #define FUNC_MF_CFG_TRANSMIT_PRIORITY_MASK 0x00000300 |
1072 | #define FUNC_MF_CFG_TRANSMIT_PRIORITY_SHIFT 8 | 1425 | #define FUNC_MF_CFG_TRANSMIT_PRIORITY_SHIFT 8 |
1073 | #define FUNC_MF_CFG_TRANSMIT_PRIORITY_DEFAULT 0x00000000 | 1426 | #define FUNC_MF_CFG_TRANSMIT_PRIORITY_DEFAULT 0x00000000 |
1074 | 1427 | ||
1075 | /* MINBW, MAXBW */ | 1428 | /* MINBW, MAXBW */ |
1076 | /* value range - 0..100, increments in 100Mbps */ | 1429 | /* value range - 0..100, increments in 100Mbps */ |
1077 | #define FUNC_MF_CFG_MIN_BW_MASK 0x00ff0000 | 1430 | #define FUNC_MF_CFG_MIN_BW_MASK 0x00ff0000 |
1078 | #define FUNC_MF_CFG_MIN_BW_SHIFT 16 | 1431 | #define FUNC_MF_CFG_MIN_BW_SHIFT 16 |
1079 | #define FUNC_MF_CFG_MIN_BW_DEFAULT 0x00000000 | 1432 | #define FUNC_MF_CFG_MIN_BW_DEFAULT 0x00000000 |
1080 | #define FUNC_MF_CFG_MAX_BW_MASK 0xff000000 | 1433 | #define FUNC_MF_CFG_MAX_BW_MASK 0xff000000 |
1081 | #define FUNC_MF_CFG_MAX_BW_SHIFT 24 | 1434 | #define FUNC_MF_CFG_MAX_BW_SHIFT 24 |
1082 | #define FUNC_MF_CFG_MAX_BW_DEFAULT 0x64000000 | 1435 | #define FUNC_MF_CFG_MAX_BW_DEFAULT 0x64000000 |
1083 | 1436 | ||
1084 | u32 mac_upper; /* MAC */ | 1437 | u32 mac_upper; /* MAC */ |
1085 | #define FUNC_MF_CFG_UPPERMAC_MASK 0x0000ffff | 1438 | #define FUNC_MF_CFG_UPPERMAC_MASK 0x0000ffff |
1086 | #define FUNC_MF_CFG_UPPERMAC_SHIFT 0 | 1439 | #define FUNC_MF_CFG_UPPERMAC_SHIFT 0 |
1087 | #define FUNC_MF_CFG_UPPERMAC_DEFAULT FUNC_MF_CFG_UPPERMAC_MASK | 1440 | #define FUNC_MF_CFG_UPPERMAC_DEFAULT FUNC_MF_CFG_UPPERMAC_MASK |
1088 | u32 mac_lower; | 1441 | u32 mac_lower; |
1089 | #define FUNC_MF_CFG_LOWERMAC_DEFAULT 0xffffffff | 1442 | #define FUNC_MF_CFG_LOWERMAC_DEFAULT 0xffffffff |
1090 | 1443 | ||
1091 | u32 e1hov_tag; /* VNI */ | 1444 | u32 e1hov_tag; /* VNI */ |
1092 | #define FUNC_MF_CFG_E1HOV_TAG_MASK 0x0000ffff | 1445 | #define FUNC_MF_CFG_E1HOV_TAG_MASK 0x0000ffff |
1093 | #define FUNC_MF_CFG_E1HOV_TAG_SHIFT 0 | 1446 | #define FUNC_MF_CFG_E1HOV_TAG_SHIFT 0 |
1094 | #define FUNC_MF_CFG_E1HOV_TAG_DEFAULT FUNC_MF_CFG_E1HOV_TAG_MASK | 1447 | #define FUNC_MF_CFG_E1HOV_TAG_DEFAULT FUNC_MF_CFG_E1HOV_TAG_MASK |
1095 | 1448 | ||
1096 | u32 reserved[2]; | 1449 | u32 reserved[2]; |
1097 | |||
1098 | }; | 1450 | }; |
1099 | 1451 | ||
1100 | /* This structure is not applicable and should not be accessed on 57711 */ | 1452 | /* This structure is not applicable and should not be accessed on 57711 */ |
1101 | struct func_ext_cfg { | 1453 | struct func_ext_cfg { |
1102 | u32 func_cfg; | 1454 | u32 func_cfg; |
1103 | #define MACP_FUNC_CFG_FLAGS_MASK 0x000000FF | 1455 | #define MACP_FUNC_CFG_FLAGS_MASK 0x000000FF |
1104 | #define MACP_FUNC_CFG_FLAGS_SHIFT 0 | 1456 | #define MACP_FUNC_CFG_FLAGS_SHIFT 0 |
1105 | #define MACP_FUNC_CFG_FLAGS_ENABLED 0x00000001 | 1457 | #define MACP_FUNC_CFG_FLAGS_ENABLED 0x00000001 |
1106 | #define MACP_FUNC_CFG_FLAGS_ETHERNET 0x00000002 | 1458 | #define MACP_FUNC_CFG_FLAGS_ETHERNET 0x00000002 |
1107 | #define MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD 0x00000004 | 1459 | #define MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD 0x00000004 |
1108 | #define MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD 0x00000008 | 1460 | #define MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD 0x00000008 |
1109 | 1461 | ||
1110 | u32 iscsi_mac_addr_upper; | 1462 | u32 iscsi_mac_addr_upper; |
1111 | u32 iscsi_mac_addr_lower; | 1463 | u32 iscsi_mac_addr_lower; |
@@ -1120,73 +1472,99 @@ struct func_ext_cfg { | |||
1120 | u32 fcoe_wwn_node_name_lower; | 1472 | u32 fcoe_wwn_node_name_lower; |
1121 | 1473 | ||
1122 | u32 preserve_data; | 1474 | u32 preserve_data; |
1123 | #define MF_FUNC_CFG_PRESERVE_L2_MAC (1<<0) | 1475 | #define MF_FUNC_CFG_PRESERVE_L2_MAC (1<<0) |
1124 | #define MF_FUNC_CFG_PRESERVE_ISCSI_MAC (1<<1) | 1476 | #define MF_FUNC_CFG_PRESERVE_ISCSI_MAC (1<<1) |
1125 | #define MF_FUNC_CFG_PRESERVE_FCOE_MAC (1<<2) | 1477 | #define MF_FUNC_CFG_PRESERVE_FCOE_MAC (1<<2) |
1126 | #define MF_FUNC_CFG_PRESERVE_FCOE_WWN_P (1<<3) | 1478 | #define MF_FUNC_CFG_PRESERVE_FCOE_WWN_P (1<<3) |
1127 | #define MF_FUNC_CFG_PRESERVE_FCOE_WWN_N (1<<4) | 1479 | #define MF_FUNC_CFG_PRESERVE_FCOE_WWN_N (1<<4) |
1480 | #define MF_FUNC_CFG_PRESERVE_TX_BW (1<<5) | ||
1128 | }; | 1481 | }; |
1129 | 1482 | ||
1130 | struct mf_cfg { | 1483 | struct mf_cfg { |
1131 | 1484 | ||
1132 | struct shared_mf_cfg shared_mf_config; | 1485 | struct shared_mf_cfg shared_mf_config; /* 0x4 */ |
1133 | struct port_mf_cfg port_mf_config[PORT_MAX]; | 1486 | struct port_mf_cfg port_mf_config[PORT_MAX]; /* 0x10 * 2 = 0x20 */ |
1134 | struct func_mf_cfg func_mf_config[E1H_FUNC_MAX]; | 1487 | /* for all chips, there are 8 mf functions */ |
1135 | 1488 | struct func_mf_cfg func_mf_config[E1H_FUNC_MAX]; /* 0x18 * 8 = 0xc0 */ | |
1136 | struct func_ext_cfg func_ext_config[E1H_FUNC_MAX]; | 1489 | /* |
1137 | }; | 1490 | * Extended configuration per function - this array does not exist and |
1138 | 1491 | * should not be accessed on 57711 | |
1492 | */ | ||
1493 | struct func_ext_cfg func_ext_config[E1H_FUNC_MAX]; /* 0x28 * 8 = 0x140*/ | ||
1494 | }; /* 0x224 */ | ||
1139 | 1495 | ||
1140 | /**************************************************************************** | 1496 | /**************************************************************************** |
1141 | * Shared Memory Region * | 1497 | * Shared Memory Region * |
1142 | ****************************************************************************/ | 1498 | ****************************************************************************/ |
1143 | struct shmem_region { /* SharedMem Offset (size) */ | 1499 | struct shmem_region { /* SharedMem Offset (size) */ |
1144 | 1500 | ||
1145 | u32 validity_map[PORT_MAX]; /* 0x0 (4*2 = 0x8) */ | 1501 | u32 validity_map[PORT_MAX]; /* 0x0 (4*2 = 0x8) */ |
1146 | #define SHR_MEM_FORMAT_REV_ID ('A'<<24) | 1502 | #define SHR_MEM_FORMAT_REV_MASK 0xff000000 |
1147 | #define SHR_MEM_FORMAT_REV_MASK 0xff000000 | 1503 | #define SHR_MEM_FORMAT_REV_ID ('A'<<24) |
1148 | /* validity bits */ | 1504 | /* validity bits */ |
1149 | #define SHR_MEM_VALIDITY_PCI_CFG 0x00100000 | 1505 | #define SHR_MEM_VALIDITY_PCI_CFG 0x00100000 |
1150 | #define SHR_MEM_VALIDITY_MB 0x00200000 | 1506 | #define SHR_MEM_VALIDITY_MB 0x00200000 |
1151 | #define SHR_MEM_VALIDITY_DEV_INFO 0x00400000 | 1507 | #define SHR_MEM_VALIDITY_DEV_INFO 0x00400000 |
1152 | #define SHR_MEM_VALIDITY_RESERVED 0x00000007 | 1508 | #define SHR_MEM_VALIDITY_RESERVED 0x00000007 |
1153 | /* One licensing bit should be set */ | 1509 | /* One licensing bit should be set */ |
1154 | #define SHR_MEM_VALIDITY_LIC_KEY_IN_EFFECT_MASK 0x00000038 | 1510 | #define SHR_MEM_VALIDITY_LIC_KEY_IN_EFFECT_MASK 0x00000038 |
1155 | #define SHR_MEM_VALIDITY_LIC_MANUF_KEY_IN_EFFECT 0x00000008 | 1511 | #define SHR_MEM_VALIDITY_LIC_MANUF_KEY_IN_EFFECT 0x00000008 |
1156 | #define SHR_MEM_VALIDITY_LIC_UPGRADE_KEY_IN_EFFECT 0x00000010 | 1512 | #define SHR_MEM_VALIDITY_LIC_UPGRADE_KEY_IN_EFFECT 0x00000010 |
1157 | #define SHR_MEM_VALIDITY_LIC_NO_KEY_IN_EFFECT 0x00000020 | 1513 | #define SHR_MEM_VALIDITY_LIC_NO_KEY_IN_EFFECT 0x00000020 |
1158 | /* Active MFW */ | 1514 | /* Active MFW */ |
1159 | #define SHR_MEM_VALIDITY_ACTIVE_MFW_UNKNOWN 0x00000000 | 1515 | #define SHR_MEM_VALIDITY_ACTIVE_MFW_UNKNOWN 0x00000000 |
1160 | #define SHR_MEM_VALIDITY_ACTIVE_MFW_IPMI 0x00000040 | 1516 | #define SHR_MEM_VALIDITY_ACTIVE_MFW_MASK 0x000001c0 |
1161 | #define SHR_MEM_VALIDITY_ACTIVE_MFW_UMP 0x00000080 | 1517 | #define SHR_MEM_VALIDITY_ACTIVE_MFW_IPMI 0x00000040 |
1162 | #define SHR_MEM_VALIDITY_ACTIVE_MFW_NCSI 0x000000c0 | 1518 | #define SHR_MEM_VALIDITY_ACTIVE_MFW_UMP 0x00000080 |
1163 | #define SHR_MEM_VALIDITY_ACTIVE_MFW_NONE 0x000001c0 | 1519 | #define SHR_MEM_VALIDITY_ACTIVE_MFW_NCSI 0x000000c0 |
1164 | #define SHR_MEM_VALIDITY_ACTIVE_MFW_MASK 0x000001c0 | 1520 | #define SHR_MEM_VALIDITY_ACTIVE_MFW_NONE 0x000001c0 |
1165 | 1521 | ||
1166 | struct shm_dev_info dev_info; /* 0x8 (0x438) */ | 1522 | struct shm_dev_info dev_info; /* 0x8 (0x438) */ |
1167 | 1523 | ||
1168 | struct license_key drv_lic_key[PORT_MAX]; /* 0x440 (52*2=0x68) */ | 1524 | struct license_key drv_lic_key[PORT_MAX]; /* 0x440 (52*2=0x68) */ |
1169 | 1525 | ||
1170 | /* FW information (for internal FW use) */ | 1526 | /* FW information (for internal FW use) */ |
1171 | u32 fw_info_fio_offset; /* 0x4a8 (0x4) */ | 1527 | u32 fw_info_fio_offset; /* 0x4a8 (0x4) */ |
1172 | struct mgmtfw_state mgmtfw_state; /* 0x4ac (0x1b8) */ | 1528 | struct mgmtfw_state mgmtfw_state; /* 0x4ac (0x1b8) */ |
1529 | |||
1530 | struct drv_port_mb port_mb[PORT_MAX]; /* 0x664 (16*2=0x20) */ | ||
1173 | 1531 | ||
1174 | struct drv_port_mb port_mb[PORT_MAX]; /* 0x664 (16*2=0x20) */ | 1532 | #ifdef BMAPI |
1175 | struct drv_func_mb func_mb[]; /* 0x684 | 1533 | /* This is a variable length array */ |
1176 | (44*2/4/8=0x58/0xb0/0x160) */ | 1534 | /* the number of function depends on the chip type */ |
1535 | struct drv_func_mb func_mb[1]; /* 0x684 (44*2/4/8=0x58/0xb0/0x160) */ | ||
1536 | #else | ||
1537 | /* the number of function depends on the chip type */ | ||
1538 | struct drv_func_mb func_mb[]; /* 0x684 (44*2/4/8=0x58/0xb0/0x160) */ | ||
1539 | #endif /* BMAPI */ | ||
1177 | 1540 | ||
1178 | }; /* 57710 = 0x6dc | 57711 = 0x7E4 | 57712 = 0x734 */ | 1541 | }; /* 57710 = 0x6dc | 57711 = 0x7E4 | 57712 = 0x734 */ |
1179 | 1542 | ||
1543 | /**************************************************************************** | ||
1544 | * Shared Memory 2 Region * | ||
1545 | ****************************************************************************/ | ||
1546 | /* The fw_flr_ack is actually built in the following way: */ | ||
1547 | /* 8 bit: PF ack */ | ||
1548 | /* 64 bit: VF ack */ | ||
1549 | /* 8 bit: ios_dis_ack */ | ||
1550 | /* In order to maintain endianity in the mailbox hsi, we want to keep using */ | ||
1551 | /* u32. The fw must have the VF right after the PF since this is how it */ | ||
1552 | /* access arrays(it expects always the VF to reside after the PF, and that */ | ||
1553 | /* makes the calculation much easier for it. ) */ | ||
1554 | /* In order to answer both limitations, and keep the struct small, the code */ | ||
1555 | /* will abuse the structure defined here to achieve the actual partition */ | ||
1556 | /* above */ | ||
1557 | /****************************************************************************/ | ||
1180 | struct fw_flr_ack { | 1558 | struct fw_flr_ack { |
1181 | u32 pf_ack; | 1559 | u32 pf_ack; |
1182 | u32 vf_ack[1]; | 1560 | u32 vf_ack[1]; |
1183 | u32 iov_dis_ack; | 1561 | u32 iov_dis_ack; |
1184 | }; | 1562 | }; |
1185 | 1563 | ||
1186 | struct fw_flr_mb { | 1564 | struct fw_flr_mb { |
1187 | u32 aggint; | 1565 | u32 aggint; |
1188 | u32 opgen_addr; | 1566 | u32 opgen_addr; |
1189 | struct fw_flr_ack ack; | 1567 | struct fw_flr_ack ack; |
1190 | }; | 1568 | }; |
1191 | 1569 | ||
1192 | /**** SUPPORT FOR SHMEM ARRRAYS *** | 1570 | /**** SUPPORT FOR SHMEM ARRRAYS *** |
@@ -1210,36 +1588,36 @@ struct fw_flr_mb { | |||
1210 | * | 1588 | * |
1211 | * SHMEM_ARRAY_BITPOS(i, 4, 4) defines the stadard ordering: | 1589 | * SHMEM_ARRAY_BITPOS(i, 4, 4) defines the stadard ordering: |
1212 | * | 1590 | * |
1213 | * | | | | | 1591 | * | | | | |
1214 | * 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | | 1592 | * 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | |
1215 | * | | | | | 1593 | * | | | | |
1216 | * | 1594 | * |
1217 | * SHMEM_ARRAY_BITPOS(i, 4, 8) defines a flip ordering per byte: | 1595 | * SHMEM_ARRAY_BITPOS(i, 4, 8) defines a flip ordering per byte: |
1218 | * | 1596 | * |
1219 | * | | | | | 1597 | * | | | | |
1220 | * 1 | 0 | 3 | 2 | 5 | 4 | 7 | 6 | | 1598 | * 1 | 0 | 3 | 2 | 5 | 4 | 7 | 6 | |
1221 | * | | | | | 1599 | * | | | | |
1222 | * | 1600 | * |
1223 | * SHMEM_ARRAY_BITPOS(i, 4, 16) defines a flip ordering per word: | 1601 | * SHMEM_ARRAY_BITPOS(i, 4, 16) defines a flip ordering per word: |
1224 | * | 1602 | * |
1225 | * | | | | | 1603 | * | | | | |
1226 | * 3 | 2 | 1 | 0 | 7 | 6 | 5 | 4 | | 1604 | * 3 | 2 | 1 | 0 | 7 | 6 | 5 | 4 | |
1227 | * | | | | | 1605 | * | | | | |
1228 | */ | 1606 | */ |
1229 | #define SHMEM_ARRAY_BITPOS(i, eb, fb) \ | 1607 | #define SHMEM_ARRAY_BITPOS(i, eb, fb) \ |
1230 | ((((32/(fb)) - 1 - ((i)/((fb)/(eb))) % (32/(fb))) * (fb)) + \ | 1608 | ((((32/(fb)) - 1 - ((i)/((fb)/(eb))) % (32/(fb))) * (fb)) + \ |
1231 | (((i)%((fb)/(eb))) * (eb))) | 1609 | (((i)%((fb)/(eb))) * (eb))) |
1232 | 1610 | ||
1233 | #define SHMEM_ARRAY_GET(a, i, eb, fb) \ | 1611 | #define SHMEM_ARRAY_GET(a, i, eb, fb) \ |
1234 | ((a[SHMEM_ARRAY_ENTRY(i, eb)] >> SHMEM_ARRAY_BITPOS(i, eb, fb)) & \ | 1612 | ((a[SHMEM_ARRAY_ENTRY(i, eb)] >> SHMEM_ARRAY_BITPOS(i, eb, fb)) & \ |
1235 | SHMEM_ARRAY_MASK(eb)) | 1613 | SHMEM_ARRAY_MASK(eb)) |
1236 | 1614 | ||
1237 | #define SHMEM_ARRAY_SET(a, i, eb, fb, val) \ | 1615 | #define SHMEM_ARRAY_SET(a, i, eb, fb, val) \ |
1238 | do { \ | 1616 | do { \ |
1239 | a[SHMEM_ARRAY_ENTRY(i, eb)] &= ~(SHMEM_ARRAY_MASK(eb) << \ | 1617 | a[SHMEM_ARRAY_ENTRY(i, eb)] &= ~(SHMEM_ARRAY_MASK(eb) << \ |
1240 | SHMEM_ARRAY_BITPOS(i, eb, fb)); \ | 1618 | SHMEM_ARRAY_BITPOS(i, eb, fb)); \ |
1241 | a[SHMEM_ARRAY_ENTRY(i, eb)] |= (((val) & SHMEM_ARRAY_MASK(eb)) << \ | 1619 | a[SHMEM_ARRAY_ENTRY(i, eb)] |= (((val) & SHMEM_ARRAY_MASK(eb)) << \ |
1242 | SHMEM_ARRAY_BITPOS(i, eb, fb)); \ | 1620 | SHMEM_ARRAY_BITPOS(i, eb, fb)); \ |
1243 | } while (0) | 1621 | } while (0) |
1244 | 1622 | ||
1245 | 1623 | ||
@@ -1263,23 +1641,30 @@ do { \ | |||
1263 | #define ISCSI_APP_IDX 1 | 1641 | #define ISCSI_APP_IDX 1 |
1264 | #define PREDEFINED_APP_IDX_MAX 2 | 1642 | #define PREDEFINED_APP_IDX_MAX 2 |
1265 | 1643 | ||
1644 | |||
1645 | /* Big/Little endian have the same representation. */ | ||
1266 | struct dcbx_ets_feature { | 1646 | struct dcbx_ets_feature { |
1647 | /* | ||
1648 | * For Admin MIB - is this feature supported by the | ||
1649 | * driver | For Local MIB - should this feature be enabled. | ||
1650 | */ | ||
1267 | u32 enabled; | 1651 | u32 enabled; |
1268 | u32 pg_bw_tbl[2]; | 1652 | u32 pg_bw_tbl[2]; |
1269 | u32 pri_pg_tbl[1]; | 1653 | u32 pri_pg_tbl[1]; |
1270 | }; | 1654 | }; |
1271 | 1655 | ||
1656 | /* Driver structure in LE */ | ||
1272 | struct dcbx_pfc_feature { | 1657 | struct dcbx_pfc_feature { |
1273 | #ifdef __BIG_ENDIAN | 1658 | #ifdef __BIG_ENDIAN |
1274 | u8 pri_en_bitmap; | 1659 | u8 pri_en_bitmap; |
1275 | #define DCBX_PFC_PRI_0 0x01 | 1660 | #define DCBX_PFC_PRI_0 0x01 |
1276 | #define DCBX_PFC_PRI_1 0x02 | 1661 | #define DCBX_PFC_PRI_1 0x02 |
1277 | #define DCBX_PFC_PRI_2 0x04 | 1662 | #define DCBX_PFC_PRI_2 0x04 |
1278 | #define DCBX_PFC_PRI_3 0x08 | 1663 | #define DCBX_PFC_PRI_3 0x08 |
1279 | #define DCBX_PFC_PRI_4 0x10 | 1664 | #define DCBX_PFC_PRI_4 0x10 |
1280 | #define DCBX_PFC_PRI_5 0x20 | 1665 | #define DCBX_PFC_PRI_5 0x20 |
1281 | #define DCBX_PFC_PRI_6 0x40 | 1666 | #define DCBX_PFC_PRI_6 0x40 |
1282 | #define DCBX_PFC_PRI_7 0x80 | 1667 | #define DCBX_PFC_PRI_7 0x80 |
1283 | u8 pfc_caps; | 1668 | u8 pfc_caps; |
1284 | u8 reserved; | 1669 | u8 reserved; |
1285 | u8 enabled; | 1670 | u8 enabled; |
@@ -1288,39 +1673,41 @@ struct dcbx_pfc_feature { | |||
1288 | u8 reserved; | 1673 | u8 reserved; |
1289 | u8 pfc_caps; | 1674 | u8 pfc_caps; |
1290 | u8 pri_en_bitmap; | 1675 | u8 pri_en_bitmap; |
1291 | #define DCBX_PFC_PRI_0 0x01 | 1676 | #define DCBX_PFC_PRI_0 0x01 |
1292 | #define DCBX_PFC_PRI_1 0x02 | 1677 | #define DCBX_PFC_PRI_1 0x02 |
1293 | #define DCBX_PFC_PRI_2 0x04 | 1678 | #define DCBX_PFC_PRI_2 0x04 |
1294 | #define DCBX_PFC_PRI_3 0x08 | 1679 | #define DCBX_PFC_PRI_3 0x08 |
1295 | #define DCBX_PFC_PRI_4 0x10 | 1680 | #define DCBX_PFC_PRI_4 0x10 |
1296 | #define DCBX_PFC_PRI_5 0x20 | 1681 | #define DCBX_PFC_PRI_5 0x20 |
1297 | #define DCBX_PFC_PRI_6 0x40 | 1682 | #define DCBX_PFC_PRI_6 0x40 |
1298 | #define DCBX_PFC_PRI_7 0x80 | 1683 | #define DCBX_PFC_PRI_7 0x80 |
1299 | #endif | 1684 | #endif |
1300 | }; | 1685 | }; |
1301 | 1686 | ||
1302 | struct dcbx_app_priority_entry { | 1687 | struct dcbx_app_priority_entry { |
1303 | #ifdef __BIG_ENDIAN | 1688 | #ifdef __BIG_ENDIAN |
1304 | u16 app_id; | 1689 | u16 app_id; |
1305 | u8 pri_bitmap; | 1690 | u8 pri_bitmap; |
1306 | u8 appBitfield; | 1691 | u8 appBitfield; |
1307 | #define DCBX_APP_ENTRY_VALID 0x01 | 1692 | #define DCBX_APP_ENTRY_VALID 0x01 |
1308 | #define DCBX_APP_ENTRY_SF_MASK 0x30 | 1693 | #define DCBX_APP_ENTRY_SF_MASK 0x30 |
1309 | #define DCBX_APP_ENTRY_SF_SHIFT 4 | 1694 | #define DCBX_APP_ENTRY_SF_SHIFT 4 |
1310 | #define DCBX_APP_SF_ETH_TYPE 0x10 | 1695 | #define DCBX_APP_SF_ETH_TYPE 0x10 |
1311 | #define DCBX_APP_SF_PORT 0x20 | 1696 | #define DCBX_APP_SF_PORT 0x20 |
1312 | #elif defined(__LITTLE_ENDIAN) | 1697 | #elif defined(__LITTLE_ENDIAN) |
1313 | u8 appBitfield; | 1698 | u8 appBitfield; |
1314 | #define DCBX_APP_ENTRY_VALID 0x01 | 1699 | #define DCBX_APP_ENTRY_VALID 0x01 |
1315 | #define DCBX_APP_ENTRY_SF_MASK 0x30 | 1700 | #define DCBX_APP_ENTRY_SF_MASK 0x30 |
1316 | #define DCBX_APP_ENTRY_SF_SHIFT 4 | 1701 | #define DCBX_APP_ENTRY_SF_SHIFT 4 |
1317 | #define DCBX_APP_SF_ETH_TYPE 0x10 | 1702 | #define DCBX_APP_SF_ETH_TYPE 0x10 |
1318 | #define DCBX_APP_SF_PORT 0x20 | 1703 | #define DCBX_APP_SF_PORT 0x20 |
1319 | u8 pri_bitmap; | 1704 | u8 pri_bitmap; |
1320 | u16 app_id; | 1705 | u16 app_id; |
1321 | #endif | 1706 | #endif |
1322 | }; | 1707 | }; |
1323 | 1708 | ||
1709 | |||
1710 | /* FW structure in BE */ | ||
1324 | struct dcbx_app_priority_feature { | 1711 | struct dcbx_app_priority_feature { |
1325 | #ifdef __BIG_ENDIAN | 1712 | #ifdef __BIG_ENDIAN |
1326 | u8 reserved; | 1713 | u8 reserved; |
@@ -1336,302 +1723,402 @@ struct dcbx_app_priority_feature { | |||
1336 | struct dcbx_app_priority_entry app_pri_tbl[DCBX_MAX_APP_PROTOCOL]; | 1723 | struct dcbx_app_priority_entry app_pri_tbl[DCBX_MAX_APP_PROTOCOL]; |
1337 | }; | 1724 | }; |
1338 | 1725 | ||
1726 | /* FW structure in BE */ | ||
1339 | struct dcbx_features { | 1727 | struct dcbx_features { |
1728 | /* PG feature */ | ||
1340 | struct dcbx_ets_feature ets; | 1729 | struct dcbx_ets_feature ets; |
1730 | /* PFC feature */ | ||
1341 | struct dcbx_pfc_feature pfc; | 1731 | struct dcbx_pfc_feature pfc; |
1732 | /* APP feature */ | ||
1342 | struct dcbx_app_priority_feature app; | 1733 | struct dcbx_app_priority_feature app; |
1343 | }; | 1734 | }; |
1344 | 1735 | ||
1736 | /* LLDP protocol parameters */ | ||
1737 | /* FW structure in BE */ | ||
1345 | struct lldp_params { | 1738 | struct lldp_params { |
1346 | #ifdef __BIG_ENDIAN | 1739 | #ifdef __BIG_ENDIAN |
1347 | u8 msg_fast_tx_interval; | 1740 | u8 msg_fast_tx_interval; |
1348 | u8 msg_tx_hold; | 1741 | u8 msg_tx_hold; |
1349 | u8 msg_tx_interval; | 1742 | u8 msg_tx_interval; |
1350 | u8 admin_status; | 1743 | u8 admin_status; |
1351 | #define LLDP_TX_ONLY 0x01 | 1744 | #define LLDP_TX_ONLY 0x01 |
1352 | #define LLDP_RX_ONLY 0x02 | 1745 | #define LLDP_RX_ONLY 0x02 |
1353 | #define LLDP_TX_RX 0x03 | 1746 | #define LLDP_TX_RX 0x03 |
1354 | #define LLDP_DISABLED 0x04 | 1747 | #define LLDP_DISABLED 0x04 |
1355 | u8 reserved1; | 1748 | u8 reserved1; |
1356 | u8 tx_fast; | 1749 | u8 tx_fast; |
1357 | u8 tx_crd_max; | 1750 | u8 tx_crd_max; |
1358 | u8 tx_crd; | 1751 | u8 tx_crd; |
1359 | #elif defined(__LITTLE_ENDIAN) | 1752 | #elif defined(__LITTLE_ENDIAN) |
1360 | u8 admin_status; | 1753 | u8 admin_status; |
1361 | #define LLDP_TX_ONLY 0x01 | 1754 | #define LLDP_TX_ONLY 0x01 |
1362 | #define LLDP_RX_ONLY 0x02 | 1755 | #define LLDP_RX_ONLY 0x02 |
1363 | #define LLDP_TX_RX 0x03 | 1756 | #define LLDP_TX_RX 0x03 |
1364 | #define LLDP_DISABLED 0x04 | 1757 | #define LLDP_DISABLED 0x04 |
1365 | u8 msg_tx_interval; | 1758 | u8 msg_tx_interval; |
1366 | u8 msg_tx_hold; | 1759 | u8 msg_tx_hold; |
1367 | u8 msg_fast_tx_interval; | 1760 | u8 msg_fast_tx_interval; |
1368 | u8 tx_crd; | 1761 | u8 tx_crd; |
1369 | u8 tx_crd_max; | 1762 | u8 tx_crd_max; |
1370 | u8 tx_fast; | 1763 | u8 tx_fast; |
1371 | u8 reserved1; | 1764 | u8 reserved1; |
1372 | #endif | 1765 | #endif |
1373 | #define REM_CHASSIS_ID_STAT_LEN 4 | 1766 | #define REM_CHASSIS_ID_STAT_LEN 4 |
1374 | #define REM_PORT_ID_STAT_LEN 4 | 1767 | #define REM_PORT_ID_STAT_LEN 4 |
1768 | /* Holds remote Chassis ID TLV header, subtype and 9B of payload. */ | ||
1375 | u32 peer_chassis_id[REM_CHASSIS_ID_STAT_LEN]; | 1769 | u32 peer_chassis_id[REM_CHASSIS_ID_STAT_LEN]; |
1770 | /* Holds remote Port ID TLV header, subtype and 9B of payload. */ | ||
1376 | u32 peer_port_id[REM_PORT_ID_STAT_LEN]; | 1771 | u32 peer_port_id[REM_PORT_ID_STAT_LEN]; |
1377 | }; | 1772 | }; |
1378 | 1773 | ||
1379 | struct lldp_dcbx_stat { | 1774 | struct lldp_dcbx_stat { |
1380 | #define LOCAL_CHASSIS_ID_STAT_LEN 2 | 1775 | #define LOCAL_CHASSIS_ID_STAT_LEN 2 |
1381 | #define LOCAL_PORT_ID_STAT_LEN 2 | 1776 | #define LOCAL_PORT_ID_STAT_LEN 2 |
1777 | /* Holds local Chassis ID 8B payload of constant subtype 4. */ | ||
1382 | u32 local_chassis_id[LOCAL_CHASSIS_ID_STAT_LEN]; | 1778 | u32 local_chassis_id[LOCAL_CHASSIS_ID_STAT_LEN]; |
1779 | /* Holds local Port ID 8B payload of constant subtype 3. */ | ||
1383 | u32 local_port_id[LOCAL_PORT_ID_STAT_LEN]; | 1780 | u32 local_port_id[LOCAL_PORT_ID_STAT_LEN]; |
1781 | /* Number of DCBX frames transmitted. */ | ||
1384 | u32 num_tx_dcbx_pkts; | 1782 | u32 num_tx_dcbx_pkts; |
1783 | /* Number of DCBX frames received. */ | ||
1385 | u32 num_rx_dcbx_pkts; | 1784 | u32 num_rx_dcbx_pkts; |
1386 | }; | 1785 | }; |
1387 | 1786 | ||
1787 | /* ADMIN MIB - DCBX local machine default configuration. */ | ||
1388 | struct lldp_admin_mib { | 1788 | struct lldp_admin_mib { |
1389 | u32 ver_cfg_flags; | 1789 | u32 ver_cfg_flags; |
1390 | #define DCBX_ETS_CONFIG_TX_ENABLED 0x00000001 | 1790 | #define DCBX_ETS_CONFIG_TX_ENABLED 0x00000001 |
1391 | #define DCBX_PFC_CONFIG_TX_ENABLED 0x00000002 | 1791 | #define DCBX_PFC_CONFIG_TX_ENABLED 0x00000002 |
1392 | #define DCBX_APP_CONFIG_TX_ENABLED 0x00000004 | 1792 | #define DCBX_APP_CONFIG_TX_ENABLED 0x00000004 |
1393 | #define DCBX_ETS_RECO_TX_ENABLED 0x00000008 | 1793 | #define DCBX_ETS_RECO_TX_ENABLED 0x00000008 |
1394 | #define DCBX_ETS_RECO_VALID 0x00000010 | 1794 | #define DCBX_ETS_RECO_VALID 0x00000010 |
1395 | #define DCBX_ETS_WILLING 0x00000020 | 1795 | #define DCBX_ETS_WILLING 0x00000020 |
1396 | #define DCBX_PFC_WILLING 0x00000040 | 1796 | #define DCBX_PFC_WILLING 0x00000040 |
1397 | #define DCBX_APP_WILLING 0x00000080 | 1797 | #define DCBX_APP_WILLING 0x00000080 |
1398 | #define DCBX_VERSION_CEE 0x00000100 | 1798 | #define DCBX_VERSION_CEE 0x00000100 |
1399 | #define DCBX_VERSION_IEEE 0x00000200 | 1799 | #define DCBX_VERSION_IEEE 0x00000200 |
1400 | #define DCBX_DCBX_ENABLED 0x00000400 | 1800 | #define DCBX_DCBX_ENABLED 0x00000400 |
1401 | #define DCBX_CEE_VERSION_MASK 0x0000f000 | 1801 | #define DCBX_CEE_VERSION_MASK 0x0000f000 |
1402 | #define DCBX_CEE_VERSION_SHIFT 12 | 1802 | #define DCBX_CEE_VERSION_SHIFT 12 |
1403 | #define DCBX_CEE_MAX_VERSION_MASK 0x000f0000 | 1803 | #define DCBX_CEE_MAX_VERSION_MASK 0x000f0000 |
1404 | #define DCBX_CEE_MAX_VERSION_SHIFT 16 | 1804 | #define DCBX_CEE_MAX_VERSION_SHIFT 16 |
1405 | struct dcbx_features features; | 1805 | struct dcbx_features features; |
1406 | }; | 1806 | }; |
1407 | 1807 | ||
1808 | /* REMOTE MIB - remote machine DCBX configuration. */ | ||
1408 | struct lldp_remote_mib { | 1809 | struct lldp_remote_mib { |
1409 | u32 prefix_seq_num; | 1810 | u32 prefix_seq_num; |
1410 | u32 flags; | 1811 | u32 flags; |
1411 | #define DCBX_ETS_TLV_RX 0x00000001 | 1812 | #define DCBX_ETS_TLV_RX 0x00000001 |
1412 | #define DCBX_PFC_TLV_RX 0x00000002 | 1813 | #define DCBX_PFC_TLV_RX 0x00000002 |
1413 | #define DCBX_APP_TLV_RX 0x00000004 | 1814 | #define DCBX_APP_TLV_RX 0x00000004 |
1414 | #define DCBX_ETS_RX_ERROR 0x00000010 | 1815 | #define DCBX_ETS_RX_ERROR 0x00000010 |
1415 | #define DCBX_PFC_RX_ERROR 0x00000020 | 1816 | #define DCBX_PFC_RX_ERROR 0x00000020 |
1416 | #define DCBX_APP_RX_ERROR 0x00000040 | 1817 | #define DCBX_APP_RX_ERROR 0x00000040 |
1417 | #define DCBX_ETS_REM_WILLING 0x00000100 | 1818 | #define DCBX_ETS_REM_WILLING 0x00000100 |
1418 | #define DCBX_PFC_REM_WILLING 0x00000200 | 1819 | #define DCBX_PFC_REM_WILLING 0x00000200 |
1419 | #define DCBX_APP_REM_WILLING 0x00000400 | 1820 | #define DCBX_APP_REM_WILLING 0x00000400 |
1420 | #define DCBX_REMOTE_ETS_RECO_VALID 0x00001000 | 1821 | #define DCBX_REMOTE_ETS_RECO_VALID 0x00001000 |
1822 | #define DCBX_REMOTE_MIB_VALID 0x00002000 | ||
1421 | struct dcbx_features features; | 1823 | struct dcbx_features features; |
1422 | u32 suffix_seq_num; | 1824 | u32 suffix_seq_num; |
1423 | }; | 1825 | }; |
1424 | 1826 | ||
1827 | /* LOCAL MIB - operational DCBX configuration - transmitted on Tx LLDPDU. */ | ||
1425 | struct lldp_local_mib { | 1828 | struct lldp_local_mib { |
1426 | u32 prefix_seq_num; | 1829 | u32 prefix_seq_num; |
1830 | /* Indicates if there is mismatch with negotiation results. */ | ||
1427 | u32 error; | 1831 | u32 error; |
1428 | #define DCBX_LOCAL_ETS_ERROR 0x00000001 | 1832 | #define DCBX_LOCAL_ETS_ERROR 0x00000001 |
1429 | #define DCBX_LOCAL_PFC_ERROR 0x00000002 | 1833 | #define DCBX_LOCAL_PFC_ERROR 0x00000002 |
1430 | #define DCBX_LOCAL_APP_ERROR 0x00000004 | 1834 | #define DCBX_LOCAL_APP_ERROR 0x00000004 |
1431 | #define DCBX_LOCAL_PFC_MISMATCH 0x00000010 | 1835 | #define DCBX_LOCAL_PFC_MISMATCH 0x00000010 |
1432 | #define DCBX_LOCAL_APP_MISMATCH 0x00000020 | 1836 | #define DCBX_LOCAL_APP_MISMATCH 0x00000020 |
1433 | struct dcbx_features features; | 1837 | struct dcbx_features features; |
1434 | u32 suffix_seq_num; | 1838 | u32 suffix_seq_num; |
1435 | }; | 1839 | }; |
1436 | /***END OF DCBX STRUCTURES DECLARATIONS***/ | 1840 | /***END OF DCBX STRUCTURES DECLARATIONS***/ |
1437 | 1841 | ||
1842 | struct ncsi_oem_fcoe_features { | ||
1843 | u32 fcoe_features1; | ||
1844 | #define FCOE_FEATURES1_IOS_PER_CONNECTION_MASK 0x0000FFFF | ||
1845 | #define FCOE_FEATURES1_IOS_PER_CONNECTION_OFFSET 0 | ||
1846 | |||
1847 | #define FCOE_FEATURES1_LOGINS_PER_PORT_MASK 0xFFFF0000 | ||
1848 | #define FCOE_FEATURES1_LOGINS_PER_PORT_OFFSET 16 | ||
1849 | |||
1850 | u32 fcoe_features2; | ||
1851 | #define FCOE_FEATURES2_EXCHANGES_MASK 0x0000FFFF | ||
1852 | #define FCOE_FEATURES2_EXCHANGES_OFFSET 0 | ||
1853 | |||
1854 | #define FCOE_FEATURES2_NPIV_WWN_PER_PORT_MASK 0xFFFF0000 | ||
1855 | #define FCOE_FEATURES2_NPIV_WWN_PER_PORT_OFFSET 16 | ||
1856 | |||
1857 | u32 fcoe_features3; | ||
1858 | #define FCOE_FEATURES3_TARGETS_SUPPORTED_MASK 0x0000FFFF | ||
1859 | #define FCOE_FEATURES3_TARGETS_SUPPORTED_OFFSET 0 | ||
1860 | |||
1861 | #define FCOE_FEATURES3_OUTSTANDING_COMMANDS_MASK 0xFFFF0000 | ||
1862 | #define FCOE_FEATURES3_OUTSTANDING_COMMANDS_OFFSET 16 | ||
1863 | |||
1864 | u32 fcoe_features4; | ||
1865 | #define FCOE_FEATURES4_FEATURE_SETTINGS_MASK 0x0000000F | ||
1866 | #define FCOE_FEATURES4_FEATURE_SETTINGS_OFFSET 0 | ||
1867 | }; | ||
1868 | |||
1869 | struct ncsi_oem_data { | ||
1870 | u32 driver_version[4]; | ||
1871 | struct ncsi_oem_fcoe_features ncsi_oem_fcoe_features; | ||
1872 | }; | ||
1873 | |||
1438 | struct shmem2_region { | 1874 | struct shmem2_region { |
1439 | 1875 | ||
1440 | u32 size; | 1876 | u32 size; /* 0x0000 */ |
1441 | 1877 | ||
1442 | u32 dcc_support; | 1878 | u32 dcc_support; /* 0x0004 */ |
1443 | #define SHMEM_DCC_SUPPORT_NONE 0x00000000 | 1879 | #define SHMEM_DCC_SUPPORT_NONE 0x00000000 |
1444 | #define SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV 0x00000001 | 1880 | #define SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV 0x00000001 |
1445 | #define SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV 0x00000004 | 1881 | #define SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV 0x00000004 |
1446 | #define SHMEM_DCC_SUPPORT_CHANGE_MAC_ADDRESS_TLV 0x00000008 | 1882 | #define SHMEM_DCC_SUPPORT_CHANGE_MAC_ADDRESS_TLV 0x00000008 |
1447 | #define SHMEM_DCC_SUPPORT_SET_PROTOCOL_TLV 0x00000040 | 1883 | #define SHMEM_DCC_SUPPORT_SET_PROTOCOL_TLV 0x00000040 |
1448 | #define SHMEM_DCC_SUPPORT_SET_PRIORITY_TLV 0x00000080 | 1884 | #define SHMEM_DCC_SUPPORT_SET_PRIORITY_TLV 0x00000080 |
1449 | #define SHMEM_DCC_SUPPORT_DEFAULT SHMEM_DCC_SUPPORT_NONE | 1885 | |
1450 | u32 ext_phy_fw_version2[PORT_MAX]; | 1886 | u32 ext_phy_fw_version2[PORT_MAX]; /* 0x0008 */ |
1451 | /* | 1887 | /* |
1452 | * For backwards compatibility, if the mf_cfg_addr does not exist | 1888 | * For backwards compatibility, if the mf_cfg_addr does not exist |
1453 | * (the size filed is smaller than 0xc) the mf_cfg resides at the | 1889 | * (the size filed is smaller than 0xc) the mf_cfg resides at the |
1454 | * end of struct shmem_region | 1890 | * end of struct shmem_region |
1455 | */ | 1891 | */ |
1456 | u32 mf_cfg_addr; | 1892 | u32 mf_cfg_addr; /* 0x0010 */ |
1457 | #define SHMEM_MF_CFG_ADDR_NONE 0x00000000 | 1893 | #define SHMEM_MF_CFG_ADDR_NONE 0x00000000 |
1458 | 1894 | ||
1459 | struct fw_flr_mb flr_mb; | 1895 | struct fw_flr_mb flr_mb; /* 0x0014 */ |
1460 | u32 dcbx_lldp_params_offset; | 1896 | u32 dcbx_lldp_params_offset; /* 0x0028 */ |
1461 | #define SHMEM_LLDP_DCBX_PARAMS_NONE 0x00000000 | 1897 | #define SHMEM_LLDP_DCBX_PARAMS_NONE 0x00000000 |
1462 | u32 dcbx_neg_res_offset; | 1898 | u32 dcbx_neg_res_offset; /* 0x002c */ |
1463 | #define SHMEM_DCBX_NEG_RES_NONE 0x00000000 | 1899 | #define SHMEM_DCBX_NEG_RES_NONE 0x00000000 |
1464 | u32 dcbx_remote_mib_offset; | 1900 | u32 dcbx_remote_mib_offset; /* 0x0030 */ |
1465 | #define SHMEM_DCBX_REMOTE_MIB_NONE 0x00000000 | 1901 | #define SHMEM_DCBX_REMOTE_MIB_NONE 0x00000000 |
1466 | /* | 1902 | /* |
1467 | * The other shmemX_base_addr holds the other path's shmem address | 1903 | * The other shmemX_base_addr holds the other path's shmem address |
1468 | * required for example in case of common phy init, or for path1 to know | 1904 | * required for example in case of common phy init, or for path1 to know |
1469 | * the address of mcp debug trace which is located in offset from shmem | 1905 | * the address of mcp debug trace which is located in offset from shmem |
1470 | * of path0 | 1906 | * of path0 |
1471 | */ | 1907 | */ |
1472 | u32 other_shmem_base_addr; | 1908 | u32 other_shmem_base_addr; /* 0x0034 */ |
1473 | u32 other_shmem2_base_addr; | 1909 | u32 other_shmem2_base_addr; /* 0x0038 */ |
1474 | u32 reserved1[E2_VF_MAX / 32]; | 1910 | /* |
1475 | u32 reserved2[E2_FUNC_MAX][E2_VF_MAX / 32]; | 1911 | * mcp_vf_disabled is set by the MCP to indicate the driver about VFs |
1476 | u32 dcbx_lldp_dcbx_stat_offset; | 1912 | * which were disabled/flred |
1477 | #define SHMEM_LLDP_DCBX_STAT_NONE 0x00000000 | 1913 | */ |
1914 | u32 mcp_vf_disabled[E2_VF_MAX / 32]; /* 0x003c */ | ||
1915 | |||
1916 | /* | ||
1917 | * drv_ack_vf_disabled is set by the PF driver to ack handled disabled | ||
1918 | * VFs | ||
1919 | */ | ||
1920 | u32 drv_ack_vf_disabled[E2_FUNC_MAX][E2_VF_MAX / 32]; /* 0x0044 */ | ||
1921 | |||
1922 | u32 dcbx_lldp_dcbx_stat_offset; /* 0x0064 */ | ||
1923 | #define SHMEM_LLDP_DCBX_STAT_NONE 0x00000000 | ||
1924 | |||
1925 | /* | ||
1926 | * edebug_driver_if field is used to transfer messages between edebug | ||
1927 | * app to the driver through shmem2. | ||
1928 | * | ||
1929 | * message format: | ||
1930 | * bits 0-2 - function number / instance of driver to perform request | ||
1931 | * bits 3-5 - op code / is_ack? | ||
1932 | * bits 6-63 - data | ||
1933 | */ | ||
1934 | u32 edebug_driver_if[2]; /* 0x0068 */ | ||
1935 | #define EDEBUG_DRIVER_IF_OP_CODE_GET_PHYS_ADDR 1 | ||
1936 | #define EDEBUG_DRIVER_IF_OP_CODE_GET_BUS_ADDR 2 | ||
1937 | #define EDEBUG_DRIVER_IF_OP_CODE_DISABLE_STAT 3 | ||
1938 | |||
1939 | u32 nvm_retain_bitmap_addr; /* 0x0070 */ | ||
1940 | |||
1941 | u32 reserved1; /* 0x0074 */ | ||
1942 | |||
1943 | u32 reserved2[E2_FUNC_MAX]; | ||
1944 | |||
1945 | u32 reserved3[E2_FUNC_MAX];/* 0x0088 */ | ||
1946 | u32 reserved4[E2_FUNC_MAX];/* 0x0098 */ | ||
1947 | |||
1948 | u32 swim_base_addr; /* 0x0108 */ | ||
1949 | u32 swim_funcs; | ||
1950 | u32 swim_main_cb; | ||
1951 | |||
1952 | u32 reserved5[2]; | ||
1953 | |||
1954 | /* generic flags controlled by the driver */ | ||
1955 | u32 drv_flags; | ||
1956 | #define DRV_FLAGS_DCB_CONFIGURED 0x1 | ||
1957 | |||
1958 | /* pointer to extended dev_info shared data copied from nvm image */ | ||
1959 | u32 extended_dev_info_shared_addr; | ||
1960 | u32 ncsi_oem_data_addr; | ||
1961 | |||
1962 | u32 ocsd_host_addr; | ||
1963 | u32 ocbb_host_addr; | ||
1964 | u32 ocsd_req_update_interval; | ||
1478 | }; | 1965 | }; |
1479 | 1966 | ||
1480 | 1967 | ||
1481 | struct emac_stats { | 1968 | struct emac_stats { |
1482 | u32 rx_stat_ifhcinoctets; | 1969 | u32 rx_stat_ifhcinoctets; |
1483 | u32 rx_stat_ifhcinbadoctets; | 1970 | u32 rx_stat_ifhcinbadoctets; |
1484 | u32 rx_stat_etherstatsfragments; | 1971 | u32 rx_stat_etherstatsfragments; |
1485 | u32 rx_stat_ifhcinucastpkts; | 1972 | u32 rx_stat_ifhcinucastpkts; |
1486 | u32 rx_stat_ifhcinmulticastpkts; | 1973 | u32 rx_stat_ifhcinmulticastpkts; |
1487 | u32 rx_stat_ifhcinbroadcastpkts; | 1974 | u32 rx_stat_ifhcinbroadcastpkts; |
1488 | u32 rx_stat_dot3statsfcserrors; | 1975 | u32 rx_stat_dot3statsfcserrors; |
1489 | u32 rx_stat_dot3statsalignmenterrors; | 1976 | u32 rx_stat_dot3statsalignmenterrors; |
1490 | u32 rx_stat_dot3statscarriersenseerrors; | 1977 | u32 rx_stat_dot3statscarriersenseerrors; |
1491 | u32 rx_stat_xonpauseframesreceived; | 1978 | u32 rx_stat_xonpauseframesreceived; |
1492 | u32 rx_stat_xoffpauseframesreceived; | 1979 | u32 rx_stat_xoffpauseframesreceived; |
1493 | u32 rx_stat_maccontrolframesreceived; | 1980 | u32 rx_stat_maccontrolframesreceived; |
1494 | u32 rx_stat_xoffstateentered; | 1981 | u32 rx_stat_xoffstateentered; |
1495 | u32 rx_stat_dot3statsframestoolong; | 1982 | u32 rx_stat_dot3statsframestoolong; |
1496 | u32 rx_stat_etherstatsjabbers; | 1983 | u32 rx_stat_etherstatsjabbers; |
1497 | u32 rx_stat_etherstatsundersizepkts; | 1984 | u32 rx_stat_etherstatsundersizepkts; |
1498 | u32 rx_stat_etherstatspkts64octets; | 1985 | u32 rx_stat_etherstatspkts64octets; |
1499 | u32 rx_stat_etherstatspkts65octetsto127octets; | 1986 | u32 rx_stat_etherstatspkts65octetsto127octets; |
1500 | u32 rx_stat_etherstatspkts128octetsto255octets; | 1987 | u32 rx_stat_etherstatspkts128octetsto255octets; |
1501 | u32 rx_stat_etherstatspkts256octetsto511octets; | 1988 | u32 rx_stat_etherstatspkts256octetsto511octets; |
1502 | u32 rx_stat_etherstatspkts512octetsto1023octets; | 1989 | u32 rx_stat_etherstatspkts512octetsto1023octets; |
1503 | u32 rx_stat_etherstatspkts1024octetsto1522octets; | 1990 | u32 rx_stat_etherstatspkts1024octetsto1522octets; |
1504 | u32 rx_stat_etherstatspktsover1522octets; | 1991 | u32 rx_stat_etherstatspktsover1522octets; |
1505 | 1992 | ||
1506 | u32 rx_stat_falsecarriererrors; | 1993 | u32 rx_stat_falsecarriererrors; |
1507 | 1994 | ||
1508 | u32 tx_stat_ifhcoutoctets; | 1995 | u32 tx_stat_ifhcoutoctets; |
1509 | u32 tx_stat_ifhcoutbadoctets; | 1996 | u32 tx_stat_ifhcoutbadoctets; |
1510 | u32 tx_stat_etherstatscollisions; | 1997 | u32 tx_stat_etherstatscollisions; |
1511 | u32 tx_stat_outxonsent; | 1998 | u32 tx_stat_outxonsent; |
1512 | u32 tx_stat_outxoffsent; | 1999 | u32 tx_stat_outxoffsent; |
1513 | u32 tx_stat_flowcontroldone; | 2000 | u32 tx_stat_flowcontroldone; |
1514 | u32 tx_stat_dot3statssinglecollisionframes; | 2001 | u32 tx_stat_dot3statssinglecollisionframes; |
1515 | u32 tx_stat_dot3statsmultiplecollisionframes; | 2002 | u32 tx_stat_dot3statsmultiplecollisionframes; |
1516 | u32 tx_stat_dot3statsdeferredtransmissions; | 2003 | u32 tx_stat_dot3statsdeferredtransmissions; |
1517 | u32 tx_stat_dot3statsexcessivecollisions; | 2004 | u32 tx_stat_dot3statsexcessivecollisions; |
1518 | u32 tx_stat_dot3statslatecollisions; | 2005 | u32 tx_stat_dot3statslatecollisions; |
1519 | u32 tx_stat_ifhcoutucastpkts; | 2006 | u32 tx_stat_ifhcoutucastpkts; |
1520 | u32 tx_stat_ifhcoutmulticastpkts; | 2007 | u32 tx_stat_ifhcoutmulticastpkts; |
1521 | u32 tx_stat_ifhcoutbroadcastpkts; | 2008 | u32 tx_stat_ifhcoutbroadcastpkts; |
1522 | u32 tx_stat_etherstatspkts64octets; | 2009 | u32 tx_stat_etherstatspkts64octets; |
1523 | u32 tx_stat_etherstatspkts65octetsto127octets; | 2010 | u32 tx_stat_etherstatspkts65octetsto127octets; |
1524 | u32 tx_stat_etherstatspkts128octetsto255octets; | 2011 | u32 tx_stat_etherstatspkts128octetsto255octets; |
1525 | u32 tx_stat_etherstatspkts256octetsto511octets; | 2012 | u32 tx_stat_etherstatspkts256octetsto511octets; |
1526 | u32 tx_stat_etherstatspkts512octetsto1023octets; | 2013 | u32 tx_stat_etherstatspkts512octetsto1023octets; |
1527 | u32 tx_stat_etherstatspkts1024octetsto1522octets; | 2014 | u32 tx_stat_etherstatspkts1024octetsto1522octets; |
1528 | u32 tx_stat_etherstatspktsover1522octets; | 2015 | u32 tx_stat_etherstatspktsover1522octets; |
1529 | u32 tx_stat_dot3statsinternalmactransmiterrors; | 2016 | u32 tx_stat_dot3statsinternalmactransmiterrors; |
1530 | }; | 2017 | }; |
1531 | 2018 | ||
1532 | 2019 | ||
1533 | struct bmac1_stats { | 2020 | struct bmac1_stats { |
1534 | u32 tx_stat_gtpkt_lo; | 2021 | u32 tx_stat_gtpkt_lo; |
1535 | u32 tx_stat_gtpkt_hi; | 2022 | u32 tx_stat_gtpkt_hi; |
1536 | u32 tx_stat_gtxpf_lo; | 2023 | u32 tx_stat_gtxpf_lo; |
1537 | u32 tx_stat_gtxpf_hi; | 2024 | u32 tx_stat_gtxpf_hi; |
1538 | u32 tx_stat_gtfcs_lo; | 2025 | u32 tx_stat_gtfcs_lo; |
1539 | u32 tx_stat_gtfcs_hi; | 2026 | u32 tx_stat_gtfcs_hi; |
1540 | u32 tx_stat_gtmca_lo; | 2027 | u32 tx_stat_gtmca_lo; |
1541 | u32 tx_stat_gtmca_hi; | 2028 | u32 tx_stat_gtmca_hi; |
1542 | u32 tx_stat_gtbca_lo; | 2029 | u32 tx_stat_gtbca_lo; |
1543 | u32 tx_stat_gtbca_hi; | 2030 | u32 tx_stat_gtbca_hi; |
1544 | u32 tx_stat_gtfrg_lo; | 2031 | u32 tx_stat_gtfrg_lo; |
1545 | u32 tx_stat_gtfrg_hi; | 2032 | u32 tx_stat_gtfrg_hi; |
1546 | u32 tx_stat_gtovr_lo; | 2033 | u32 tx_stat_gtovr_lo; |
1547 | u32 tx_stat_gtovr_hi; | 2034 | u32 tx_stat_gtovr_hi; |
1548 | u32 tx_stat_gt64_lo; | 2035 | u32 tx_stat_gt64_lo; |
1549 | u32 tx_stat_gt64_hi; | 2036 | u32 tx_stat_gt64_hi; |
1550 | u32 tx_stat_gt127_lo; | 2037 | u32 tx_stat_gt127_lo; |
1551 | u32 tx_stat_gt127_hi; | 2038 | u32 tx_stat_gt127_hi; |
1552 | u32 tx_stat_gt255_lo; | 2039 | u32 tx_stat_gt255_lo; |
1553 | u32 tx_stat_gt255_hi; | 2040 | u32 tx_stat_gt255_hi; |
1554 | u32 tx_stat_gt511_lo; | 2041 | u32 tx_stat_gt511_lo; |
1555 | u32 tx_stat_gt511_hi; | 2042 | u32 tx_stat_gt511_hi; |
1556 | u32 tx_stat_gt1023_lo; | 2043 | u32 tx_stat_gt1023_lo; |
1557 | u32 tx_stat_gt1023_hi; | 2044 | u32 tx_stat_gt1023_hi; |
1558 | u32 tx_stat_gt1518_lo; | 2045 | u32 tx_stat_gt1518_lo; |
1559 | u32 tx_stat_gt1518_hi; | 2046 | u32 tx_stat_gt1518_hi; |
1560 | u32 tx_stat_gt2047_lo; | 2047 | u32 tx_stat_gt2047_lo; |
1561 | u32 tx_stat_gt2047_hi; | 2048 | u32 tx_stat_gt2047_hi; |
1562 | u32 tx_stat_gt4095_lo; | 2049 | u32 tx_stat_gt4095_lo; |
1563 | u32 tx_stat_gt4095_hi; | 2050 | u32 tx_stat_gt4095_hi; |
1564 | u32 tx_stat_gt9216_lo; | 2051 | u32 tx_stat_gt9216_lo; |
1565 | u32 tx_stat_gt9216_hi; | 2052 | u32 tx_stat_gt9216_hi; |
1566 | u32 tx_stat_gt16383_lo; | 2053 | u32 tx_stat_gt16383_lo; |
1567 | u32 tx_stat_gt16383_hi; | 2054 | u32 tx_stat_gt16383_hi; |
1568 | u32 tx_stat_gtmax_lo; | 2055 | u32 tx_stat_gtmax_lo; |
1569 | u32 tx_stat_gtmax_hi; | 2056 | u32 tx_stat_gtmax_hi; |
1570 | u32 tx_stat_gtufl_lo; | 2057 | u32 tx_stat_gtufl_lo; |
1571 | u32 tx_stat_gtufl_hi; | 2058 | u32 tx_stat_gtufl_hi; |
1572 | u32 tx_stat_gterr_lo; | 2059 | u32 tx_stat_gterr_lo; |
1573 | u32 tx_stat_gterr_hi; | 2060 | u32 tx_stat_gterr_hi; |
1574 | u32 tx_stat_gtbyt_lo; | 2061 | u32 tx_stat_gtbyt_lo; |
1575 | u32 tx_stat_gtbyt_hi; | 2062 | u32 tx_stat_gtbyt_hi; |
1576 | 2063 | ||
1577 | u32 rx_stat_gr64_lo; | 2064 | u32 rx_stat_gr64_lo; |
1578 | u32 rx_stat_gr64_hi; | 2065 | u32 rx_stat_gr64_hi; |
1579 | u32 rx_stat_gr127_lo; | 2066 | u32 rx_stat_gr127_lo; |
1580 | u32 rx_stat_gr127_hi; | 2067 | u32 rx_stat_gr127_hi; |
1581 | u32 rx_stat_gr255_lo; | 2068 | u32 rx_stat_gr255_lo; |
1582 | u32 rx_stat_gr255_hi; | 2069 | u32 rx_stat_gr255_hi; |
1583 | u32 rx_stat_gr511_lo; | 2070 | u32 rx_stat_gr511_lo; |
1584 | u32 rx_stat_gr511_hi; | 2071 | u32 rx_stat_gr511_hi; |
1585 | u32 rx_stat_gr1023_lo; | 2072 | u32 rx_stat_gr1023_lo; |
1586 | u32 rx_stat_gr1023_hi; | 2073 | u32 rx_stat_gr1023_hi; |
1587 | u32 rx_stat_gr1518_lo; | 2074 | u32 rx_stat_gr1518_lo; |
1588 | u32 rx_stat_gr1518_hi; | 2075 | u32 rx_stat_gr1518_hi; |
1589 | u32 rx_stat_gr2047_lo; | 2076 | u32 rx_stat_gr2047_lo; |
1590 | u32 rx_stat_gr2047_hi; | 2077 | u32 rx_stat_gr2047_hi; |
1591 | u32 rx_stat_gr4095_lo; | 2078 | u32 rx_stat_gr4095_lo; |
1592 | u32 rx_stat_gr4095_hi; | 2079 | u32 rx_stat_gr4095_hi; |
1593 | u32 rx_stat_gr9216_lo; | 2080 | u32 rx_stat_gr9216_lo; |
1594 | u32 rx_stat_gr9216_hi; | 2081 | u32 rx_stat_gr9216_hi; |
1595 | u32 rx_stat_gr16383_lo; | 2082 | u32 rx_stat_gr16383_lo; |
1596 | u32 rx_stat_gr16383_hi; | 2083 | u32 rx_stat_gr16383_hi; |
1597 | u32 rx_stat_grmax_lo; | 2084 | u32 rx_stat_grmax_lo; |
1598 | u32 rx_stat_grmax_hi; | 2085 | u32 rx_stat_grmax_hi; |
1599 | u32 rx_stat_grpkt_lo; | 2086 | u32 rx_stat_grpkt_lo; |
1600 | u32 rx_stat_grpkt_hi; | 2087 | u32 rx_stat_grpkt_hi; |
1601 | u32 rx_stat_grfcs_lo; | 2088 | u32 rx_stat_grfcs_lo; |
1602 | u32 rx_stat_grfcs_hi; | 2089 | u32 rx_stat_grfcs_hi; |
1603 | u32 rx_stat_grmca_lo; | 2090 | u32 rx_stat_grmca_lo; |
1604 | u32 rx_stat_grmca_hi; | 2091 | u32 rx_stat_grmca_hi; |
1605 | u32 rx_stat_grbca_lo; | 2092 | u32 rx_stat_grbca_lo; |
1606 | u32 rx_stat_grbca_hi; | 2093 | u32 rx_stat_grbca_hi; |
1607 | u32 rx_stat_grxcf_lo; | 2094 | u32 rx_stat_grxcf_lo; |
1608 | u32 rx_stat_grxcf_hi; | 2095 | u32 rx_stat_grxcf_hi; |
1609 | u32 rx_stat_grxpf_lo; | 2096 | u32 rx_stat_grxpf_lo; |
1610 | u32 rx_stat_grxpf_hi; | 2097 | u32 rx_stat_grxpf_hi; |
1611 | u32 rx_stat_grxuo_lo; | 2098 | u32 rx_stat_grxuo_lo; |
1612 | u32 rx_stat_grxuo_hi; | 2099 | u32 rx_stat_grxuo_hi; |
1613 | u32 rx_stat_grjbr_lo; | 2100 | u32 rx_stat_grjbr_lo; |
1614 | u32 rx_stat_grjbr_hi; | 2101 | u32 rx_stat_grjbr_hi; |
1615 | u32 rx_stat_grovr_lo; | 2102 | u32 rx_stat_grovr_lo; |
1616 | u32 rx_stat_grovr_hi; | 2103 | u32 rx_stat_grovr_hi; |
1617 | u32 rx_stat_grflr_lo; | 2104 | u32 rx_stat_grflr_lo; |
1618 | u32 rx_stat_grflr_hi; | 2105 | u32 rx_stat_grflr_hi; |
1619 | u32 rx_stat_grmeg_lo; | 2106 | u32 rx_stat_grmeg_lo; |
1620 | u32 rx_stat_grmeg_hi; | 2107 | u32 rx_stat_grmeg_hi; |
1621 | u32 rx_stat_grmeb_lo; | 2108 | u32 rx_stat_grmeb_lo; |
1622 | u32 rx_stat_grmeb_hi; | 2109 | u32 rx_stat_grmeb_hi; |
1623 | u32 rx_stat_grbyt_lo; | 2110 | u32 rx_stat_grbyt_lo; |
1624 | u32 rx_stat_grbyt_hi; | 2111 | u32 rx_stat_grbyt_hi; |
1625 | u32 rx_stat_grund_lo; | 2112 | u32 rx_stat_grund_lo; |
1626 | u32 rx_stat_grund_hi; | 2113 | u32 rx_stat_grund_hi; |
1627 | u32 rx_stat_grfrg_lo; | 2114 | u32 rx_stat_grfrg_lo; |
1628 | u32 rx_stat_grfrg_hi; | 2115 | u32 rx_stat_grfrg_hi; |
1629 | u32 rx_stat_grerb_lo; | 2116 | u32 rx_stat_grerb_lo; |
1630 | u32 rx_stat_grerb_hi; | 2117 | u32 rx_stat_grerb_hi; |
1631 | u32 rx_stat_grfre_lo; | 2118 | u32 rx_stat_grfre_lo; |
1632 | u32 rx_stat_grfre_hi; | 2119 | u32 rx_stat_grfre_hi; |
1633 | u32 rx_stat_gripj_lo; | 2120 | u32 rx_stat_gripj_lo; |
1634 | u32 rx_stat_gripj_hi; | 2121 | u32 rx_stat_gripj_hi; |
1635 | }; | 2122 | }; |
1636 | 2123 | ||
1637 | struct bmac2_stats { | 2124 | struct bmac2_stats { |
@@ -1750,187 +2237,316 @@ struct bmac2_stats { | |||
1750 | u32 rx_stat_gripj_hi; | 2237 | u32 rx_stat_gripj_hi; |
1751 | }; | 2238 | }; |
1752 | 2239 | ||
2240 | struct mstat_stats { | ||
2241 | struct { | ||
2242 | /* OTE MSTAT on E3 has a bug where this register's contents are | ||
2243 | * actually tx_gtxpok + tx_gtxpf + (possibly)tx_gtxpp | ||
2244 | */ | ||
2245 | u32 tx_gtxpok_lo; | ||
2246 | u32 tx_gtxpok_hi; | ||
2247 | u32 tx_gtxpf_lo; | ||
2248 | u32 tx_gtxpf_hi; | ||
2249 | u32 tx_gtxpp_lo; | ||
2250 | u32 tx_gtxpp_hi; | ||
2251 | u32 tx_gtfcs_lo; | ||
2252 | u32 tx_gtfcs_hi; | ||
2253 | u32 tx_gtuca_lo; | ||
2254 | u32 tx_gtuca_hi; | ||
2255 | u32 tx_gtmca_lo; | ||
2256 | u32 tx_gtmca_hi; | ||
2257 | u32 tx_gtgca_lo; | ||
2258 | u32 tx_gtgca_hi; | ||
2259 | u32 tx_gtpkt_lo; | ||
2260 | u32 tx_gtpkt_hi; | ||
2261 | u32 tx_gt64_lo; | ||
2262 | u32 tx_gt64_hi; | ||
2263 | u32 tx_gt127_lo; | ||
2264 | u32 tx_gt127_hi; | ||
2265 | u32 tx_gt255_lo; | ||
2266 | u32 tx_gt255_hi; | ||
2267 | u32 tx_gt511_lo; | ||
2268 | u32 tx_gt511_hi; | ||
2269 | u32 tx_gt1023_lo; | ||
2270 | u32 tx_gt1023_hi; | ||
2271 | u32 tx_gt1518_lo; | ||
2272 | u32 tx_gt1518_hi; | ||
2273 | u32 tx_gt2047_lo; | ||
2274 | u32 tx_gt2047_hi; | ||
2275 | u32 tx_gt4095_lo; | ||
2276 | u32 tx_gt4095_hi; | ||
2277 | u32 tx_gt9216_lo; | ||
2278 | u32 tx_gt9216_hi; | ||
2279 | u32 tx_gt16383_lo; | ||
2280 | u32 tx_gt16383_hi; | ||
2281 | u32 tx_gtufl_lo; | ||
2282 | u32 tx_gtufl_hi; | ||
2283 | u32 tx_gterr_lo; | ||
2284 | u32 tx_gterr_hi; | ||
2285 | u32 tx_gtbyt_lo; | ||
2286 | u32 tx_gtbyt_hi; | ||
2287 | u32 tx_collisions_lo; | ||
2288 | u32 tx_collisions_hi; | ||
2289 | u32 tx_singlecollision_lo; | ||
2290 | u32 tx_singlecollision_hi; | ||
2291 | u32 tx_multiplecollisions_lo; | ||
2292 | u32 tx_multiplecollisions_hi; | ||
2293 | u32 tx_deferred_lo; | ||
2294 | u32 tx_deferred_hi; | ||
2295 | u32 tx_excessivecollisions_lo; | ||
2296 | u32 tx_excessivecollisions_hi; | ||
2297 | u32 tx_latecollisions_lo; | ||
2298 | u32 tx_latecollisions_hi; | ||
2299 | } stats_tx; | ||
2300 | |||
2301 | struct { | ||
2302 | u32 rx_gr64_lo; | ||
2303 | u32 rx_gr64_hi; | ||
2304 | u32 rx_gr127_lo; | ||
2305 | u32 rx_gr127_hi; | ||
2306 | u32 rx_gr255_lo; | ||
2307 | u32 rx_gr255_hi; | ||
2308 | u32 rx_gr511_lo; | ||
2309 | u32 rx_gr511_hi; | ||
2310 | u32 rx_gr1023_lo; | ||
2311 | u32 rx_gr1023_hi; | ||
2312 | u32 rx_gr1518_lo; | ||
2313 | u32 rx_gr1518_hi; | ||
2314 | u32 rx_gr2047_lo; | ||
2315 | u32 rx_gr2047_hi; | ||
2316 | u32 rx_gr4095_lo; | ||
2317 | u32 rx_gr4095_hi; | ||
2318 | u32 rx_gr9216_lo; | ||
2319 | u32 rx_gr9216_hi; | ||
2320 | u32 rx_gr16383_lo; | ||
2321 | u32 rx_gr16383_hi; | ||
2322 | u32 rx_grpkt_lo; | ||
2323 | u32 rx_grpkt_hi; | ||
2324 | u32 rx_grfcs_lo; | ||
2325 | u32 rx_grfcs_hi; | ||
2326 | u32 rx_gruca_lo; | ||
2327 | u32 rx_gruca_hi; | ||
2328 | u32 rx_grmca_lo; | ||
2329 | u32 rx_grmca_hi; | ||
2330 | u32 rx_grbca_lo; | ||
2331 | u32 rx_grbca_hi; | ||
2332 | u32 rx_grxpf_lo; | ||
2333 | u32 rx_grxpf_hi; | ||
2334 | u32 rx_grxpp_lo; | ||
2335 | u32 rx_grxpp_hi; | ||
2336 | u32 rx_grxuo_lo; | ||
2337 | u32 rx_grxuo_hi; | ||
2338 | u32 rx_grovr_lo; | ||
2339 | u32 rx_grovr_hi; | ||
2340 | u32 rx_grxcf_lo; | ||
2341 | u32 rx_grxcf_hi; | ||
2342 | u32 rx_grflr_lo; | ||
2343 | u32 rx_grflr_hi; | ||
2344 | u32 rx_grpok_lo; | ||
2345 | u32 rx_grpok_hi; | ||
2346 | u32 rx_grbyt_lo; | ||
2347 | u32 rx_grbyt_hi; | ||
2348 | u32 rx_grund_lo; | ||
2349 | u32 rx_grund_hi; | ||
2350 | u32 rx_grfrg_lo; | ||
2351 | u32 rx_grfrg_hi; | ||
2352 | u32 rx_grerb_lo; | ||
2353 | u32 rx_grerb_hi; | ||
2354 | u32 rx_grfre_lo; | ||
2355 | u32 rx_grfre_hi; | ||
2356 | |||
2357 | u32 rx_alignmenterrors_lo; | ||
2358 | u32 rx_alignmenterrors_hi; | ||
2359 | u32 rx_falsecarrier_lo; | ||
2360 | u32 rx_falsecarrier_hi; | ||
2361 | u32 rx_llfcmsgcnt_lo; | ||
2362 | u32 rx_llfcmsgcnt_hi; | ||
2363 | } stats_rx; | ||
2364 | }; | ||
2365 | |||
1753 | union mac_stats { | 2366 | union mac_stats { |
1754 | struct emac_stats emac_stats; | 2367 | struct emac_stats emac_stats; |
1755 | struct bmac1_stats bmac1_stats; | 2368 | struct bmac1_stats bmac1_stats; |
1756 | struct bmac2_stats bmac2_stats; | 2369 | struct bmac2_stats bmac2_stats; |
2370 | struct mstat_stats mstat_stats; | ||
1757 | }; | 2371 | }; |
1758 | 2372 | ||
1759 | 2373 | ||
1760 | struct mac_stx { | 2374 | struct mac_stx { |
1761 | /* in_bad_octets */ | 2375 | /* in_bad_octets */ |
1762 | u32 rx_stat_ifhcinbadoctets_hi; | 2376 | u32 rx_stat_ifhcinbadoctets_hi; |
1763 | u32 rx_stat_ifhcinbadoctets_lo; | 2377 | u32 rx_stat_ifhcinbadoctets_lo; |
1764 | 2378 | ||
1765 | /* out_bad_octets */ | 2379 | /* out_bad_octets */ |
1766 | u32 tx_stat_ifhcoutbadoctets_hi; | 2380 | u32 tx_stat_ifhcoutbadoctets_hi; |
1767 | u32 tx_stat_ifhcoutbadoctets_lo; | 2381 | u32 tx_stat_ifhcoutbadoctets_lo; |
1768 | 2382 | ||
1769 | /* crc_receive_errors */ | 2383 | /* crc_receive_errors */ |
1770 | u32 rx_stat_dot3statsfcserrors_hi; | 2384 | u32 rx_stat_dot3statsfcserrors_hi; |
1771 | u32 rx_stat_dot3statsfcserrors_lo; | 2385 | u32 rx_stat_dot3statsfcserrors_lo; |
1772 | /* alignment_errors */ | 2386 | /* alignment_errors */ |
1773 | u32 rx_stat_dot3statsalignmenterrors_hi; | 2387 | u32 rx_stat_dot3statsalignmenterrors_hi; |
1774 | u32 rx_stat_dot3statsalignmenterrors_lo; | 2388 | u32 rx_stat_dot3statsalignmenterrors_lo; |
1775 | /* carrier_sense_errors */ | 2389 | /* carrier_sense_errors */ |
1776 | u32 rx_stat_dot3statscarriersenseerrors_hi; | 2390 | u32 rx_stat_dot3statscarriersenseerrors_hi; |
1777 | u32 rx_stat_dot3statscarriersenseerrors_lo; | 2391 | u32 rx_stat_dot3statscarriersenseerrors_lo; |
1778 | /* false_carrier_detections */ | 2392 | /* false_carrier_detections */ |
1779 | u32 rx_stat_falsecarriererrors_hi; | 2393 | u32 rx_stat_falsecarriererrors_hi; |
1780 | u32 rx_stat_falsecarriererrors_lo; | 2394 | u32 rx_stat_falsecarriererrors_lo; |
1781 | 2395 | ||
1782 | /* runt_packets_received */ | 2396 | /* runt_packets_received */ |
1783 | u32 rx_stat_etherstatsundersizepkts_hi; | 2397 | u32 rx_stat_etherstatsundersizepkts_hi; |
1784 | u32 rx_stat_etherstatsundersizepkts_lo; | 2398 | u32 rx_stat_etherstatsundersizepkts_lo; |
1785 | /* jabber_packets_received */ | 2399 | /* jabber_packets_received */ |
1786 | u32 rx_stat_dot3statsframestoolong_hi; | 2400 | u32 rx_stat_dot3statsframestoolong_hi; |
1787 | u32 rx_stat_dot3statsframestoolong_lo; | 2401 | u32 rx_stat_dot3statsframestoolong_lo; |
1788 | 2402 | ||
1789 | /* error_runt_packets_received */ | 2403 | /* error_runt_packets_received */ |
1790 | u32 rx_stat_etherstatsfragments_hi; | 2404 | u32 rx_stat_etherstatsfragments_hi; |
1791 | u32 rx_stat_etherstatsfragments_lo; | 2405 | u32 rx_stat_etherstatsfragments_lo; |
1792 | /* error_jabber_packets_received */ | 2406 | /* error_jabber_packets_received */ |
1793 | u32 rx_stat_etherstatsjabbers_hi; | 2407 | u32 rx_stat_etherstatsjabbers_hi; |
1794 | u32 rx_stat_etherstatsjabbers_lo; | 2408 | u32 rx_stat_etherstatsjabbers_lo; |
1795 | 2409 | ||
1796 | /* control_frames_received */ | 2410 | /* control_frames_received */ |
1797 | u32 rx_stat_maccontrolframesreceived_hi; | 2411 | u32 rx_stat_maccontrolframesreceived_hi; |
1798 | u32 rx_stat_maccontrolframesreceived_lo; | 2412 | u32 rx_stat_maccontrolframesreceived_lo; |
1799 | u32 rx_stat_bmac_xpf_hi; | 2413 | u32 rx_stat_mac_xpf_hi; |
1800 | u32 rx_stat_bmac_xpf_lo; | 2414 | u32 rx_stat_mac_xpf_lo; |
1801 | u32 rx_stat_bmac_xcf_hi; | 2415 | u32 rx_stat_mac_xcf_hi; |
1802 | u32 rx_stat_bmac_xcf_lo; | 2416 | u32 rx_stat_mac_xcf_lo; |
1803 | 2417 | ||
1804 | /* xoff_state_entered */ | 2418 | /* xoff_state_entered */ |
1805 | u32 rx_stat_xoffstateentered_hi; | 2419 | u32 rx_stat_xoffstateentered_hi; |
1806 | u32 rx_stat_xoffstateentered_lo; | 2420 | u32 rx_stat_xoffstateentered_lo; |
1807 | /* pause_xon_frames_received */ | 2421 | /* pause_xon_frames_received */ |
1808 | u32 rx_stat_xonpauseframesreceived_hi; | 2422 | u32 rx_stat_xonpauseframesreceived_hi; |
1809 | u32 rx_stat_xonpauseframesreceived_lo; | 2423 | u32 rx_stat_xonpauseframesreceived_lo; |
1810 | /* pause_xoff_frames_received */ | 2424 | /* pause_xoff_frames_received */ |
1811 | u32 rx_stat_xoffpauseframesreceived_hi; | 2425 | u32 rx_stat_xoffpauseframesreceived_hi; |
1812 | u32 rx_stat_xoffpauseframesreceived_lo; | 2426 | u32 rx_stat_xoffpauseframesreceived_lo; |
1813 | /* pause_xon_frames_transmitted */ | 2427 | /* pause_xon_frames_transmitted */ |
1814 | u32 tx_stat_outxonsent_hi; | 2428 | u32 tx_stat_outxonsent_hi; |
1815 | u32 tx_stat_outxonsent_lo; | 2429 | u32 tx_stat_outxonsent_lo; |
1816 | /* pause_xoff_frames_transmitted */ | 2430 | /* pause_xoff_frames_transmitted */ |
1817 | u32 tx_stat_outxoffsent_hi; | 2431 | u32 tx_stat_outxoffsent_hi; |
1818 | u32 tx_stat_outxoffsent_lo; | 2432 | u32 tx_stat_outxoffsent_lo; |
1819 | /* flow_control_done */ | 2433 | /* flow_control_done */ |
1820 | u32 tx_stat_flowcontroldone_hi; | 2434 | u32 tx_stat_flowcontroldone_hi; |
1821 | u32 tx_stat_flowcontroldone_lo; | 2435 | u32 tx_stat_flowcontroldone_lo; |
1822 | 2436 | ||
1823 | /* ether_stats_collisions */ | 2437 | /* ether_stats_collisions */ |
1824 | u32 tx_stat_etherstatscollisions_hi; | 2438 | u32 tx_stat_etherstatscollisions_hi; |
1825 | u32 tx_stat_etherstatscollisions_lo; | 2439 | u32 tx_stat_etherstatscollisions_lo; |
1826 | /* single_collision_transmit_frames */ | 2440 | /* single_collision_transmit_frames */ |
1827 | u32 tx_stat_dot3statssinglecollisionframes_hi; | 2441 | u32 tx_stat_dot3statssinglecollisionframes_hi; |
1828 | u32 tx_stat_dot3statssinglecollisionframes_lo; | 2442 | u32 tx_stat_dot3statssinglecollisionframes_lo; |
1829 | /* multiple_collision_transmit_frames */ | 2443 | /* multiple_collision_transmit_frames */ |
1830 | u32 tx_stat_dot3statsmultiplecollisionframes_hi; | 2444 | u32 tx_stat_dot3statsmultiplecollisionframes_hi; |
1831 | u32 tx_stat_dot3statsmultiplecollisionframes_lo; | 2445 | u32 tx_stat_dot3statsmultiplecollisionframes_lo; |
1832 | /* deferred_transmissions */ | 2446 | /* deferred_transmissions */ |
1833 | u32 tx_stat_dot3statsdeferredtransmissions_hi; | 2447 | u32 tx_stat_dot3statsdeferredtransmissions_hi; |
1834 | u32 tx_stat_dot3statsdeferredtransmissions_lo; | 2448 | u32 tx_stat_dot3statsdeferredtransmissions_lo; |
1835 | /* excessive_collision_frames */ | 2449 | /* excessive_collision_frames */ |
1836 | u32 tx_stat_dot3statsexcessivecollisions_hi; | 2450 | u32 tx_stat_dot3statsexcessivecollisions_hi; |
1837 | u32 tx_stat_dot3statsexcessivecollisions_lo; | 2451 | u32 tx_stat_dot3statsexcessivecollisions_lo; |
1838 | /* late_collision_frames */ | 2452 | /* late_collision_frames */ |
1839 | u32 tx_stat_dot3statslatecollisions_hi; | 2453 | u32 tx_stat_dot3statslatecollisions_hi; |
1840 | u32 tx_stat_dot3statslatecollisions_lo; | 2454 | u32 tx_stat_dot3statslatecollisions_lo; |
1841 | 2455 | ||
1842 | /* frames_transmitted_64_bytes */ | 2456 | /* frames_transmitted_64_bytes */ |
1843 | u32 tx_stat_etherstatspkts64octets_hi; | 2457 | u32 tx_stat_etherstatspkts64octets_hi; |
1844 | u32 tx_stat_etherstatspkts64octets_lo; | 2458 | u32 tx_stat_etherstatspkts64octets_lo; |
1845 | /* frames_transmitted_65_127_bytes */ | 2459 | /* frames_transmitted_65_127_bytes */ |
1846 | u32 tx_stat_etherstatspkts65octetsto127octets_hi; | 2460 | u32 tx_stat_etherstatspkts65octetsto127octets_hi; |
1847 | u32 tx_stat_etherstatspkts65octetsto127octets_lo; | 2461 | u32 tx_stat_etherstatspkts65octetsto127octets_lo; |
1848 | /* frames_transmitted_128_255_bytes */ | 2462 | /* frames_transmitted_128_255_bytes */ |
1849 | u32 tx_stat_etherstatspkts128octetsto255octets_hi; | 2463 | u32 tx_stat_etherstatspkts128octetsto255octets_hi; |
1850 | u32 tx_stat_etherstatspkts128octetsto255octets_lo; | 2464 | u32 tx_stat_etherstatspkts128octetsto255octets_lo; |
1851 | /* frames_transmitted_256_511_bytes */ | 2465 | /* frames_transmitted_256_511_bytes */ |
1852 | u32 tx_stat_etherstatspkts256octetsto511octets_hi; | 2466 | u32 tx_stat_etherstatspkts256octetsto511octets_hi; |
1853 | u32 tx_stat_etherstatspkts256octetsto511octets_lo; | 2467 | u32 tx_stat_etherstatspkts256octetsto511octets_lo; |
1854 | /* frames_transmitted_512_1023_bytes */ | 2468 | /* frames_transmitted_512_1023_bytes */ |
1855 | u32 tx_stat_etherstatspkts512octetsto1023octets_hi; | 2469 | u32 tx_stat_etherstatspkts512octetsto1023octets_hi; |
1856 | u32 tx_stat_etherstatspkts512octetsto1023octets_lo; | 2470 | u32 tx_stat_etherstatspkts512octetsto1023octets_lo; |
1857 | /* frames_transmitted_1024_1522_bytes */ | 2471 | /* frames_transmitted_1024_1522_bytes */ |
1858 | u32 tx_stat_etherstatspkts1024octetsto1522octets_hi; | 2472 | u32 tx_stat_etherstatspkts1024octetsto1522octets_hi; |
1859 | u32 tx_stat_etherstatspkts1024octetsto1522octets_lo; | 2473 | u32 tx_stat_etherstatspkts1024octetsto1522octets_lo; |
1860 | /* frames_transmitted_1523_9022_bytes */ | 2474 | /* frames_transmitted_1523_9022_bytes */ |
1861 | u32 tx_stat_etherstatspktsover1522octets_hi; | 2475 | u32 tx_stat_etherstatspktsover1522octets_hi; |
1862 | u32 tx_stat_etherstatspktsover1522octets_lo; | 2476 | u32 tx_stat_etherstatspktsover1522octets_lo; |
1863 | u32 tx_stat_bmac_2047_hi; | 2477 | u32 tx_stat_mac_2047_hi; |
1864 | u32 tx_stat_bmac_2047_lo; | 2478 | u32 tx_stat_mac_2047_lo; |
1865 | u32 tx_stat_bmac_4095_hi; | 2479 | u32 tx_stat_mac_4095_hi; |
1866 | u32 tx_stat_bmac_4095_lo; | 2480 | u32 tx_stat_mac_4095_lo; |
1867 | u32 tx_stat_bmac_9216_hi; | 2481 | u32 tx_stat_mac_9216_hi; |
1868 | u32 tx_stat_bmac_9216_lo; | 2482 | u32 tx_stat_mac_9216_lo; |
1869 | u32 tx_stat_bmac_16383_hi; | 2483 | u32 tx_stat_mac_16383_hi; |
1870 | u32 tx_stat_bmac_16383_lo; | 2484 | u32 tx_stat_mac_16383_lo; |
1871 | 2485 | ||
1872 | /* internal_mac_transmit_errors */ | 2486 | /* internal_mac_transmit_errors */ |
1873 | u32 tx_stat_dot3statsinternalmactransmiterrors_hi; | 2487 | u32 tx_stat_dot3statsinternalmactransmiterrors_hi; |
1874 | u32 tx_stat_dot3statsinternalmactransmiterrors_lo; | 2488 | u32 tx_stat_dot3statsinternalmactransmiterrors_lo; |
1875 | 2489 | ||
1876 | /* if_out_discards */ | 2490 | /* if_out_discards */ |
1877 | u32 tx_stat_bmac_ufl_hi; | 2491 | u32 tx_stat_mac_ufl_hi; |
1878 | u32 tx_stat_bmac_ufl_lo; | 2492 | u32 tx_stat_mac_ufl_lo; |
1879 | }; | 2493 | }; |
1880 | 2494 | ||
1881 | 2495 | ||
1882 | #define MAC_STX_IDX_MAX 2 | 2496 | #define MAC_STX_IDX_MAX 2 |
1883 | 2497 | ||
1884 | struct host_port_stats { | 2498 | struct host_port_stats { |
1885 | u32 host_port_stats_start; | 2499 | u32 host_port_stats_start; |
1886 | 2500 | ||
1887 | struct mac_stx mac_stx[MAC_STX_IDX_MAX]; | 2501 | struct mac_stx mac_stx[MAC_STX_IDX_MAX]; |
1888 | 2502 | ||
1889 | u32 brb_drop_hi; | 2503 | u32 brb_drop_hi; |
1890 | u32 brb_drop_lo; | 2504 | u32 brb_drop_lo; |
1891 | 2505 | ||
1892 | u32 host_port_stats_end; | 2506 | u32 host_port_stats_end; |
1893 | }; | 2507 | }; |
1894 | 2508 | ||
1895 | 2509 | ||
1896 | struct host_func_stats { | 2510 | struct host_func_stats { |
1897 | u32 host_func_stats_start; | 2511 | u32 host_func_stats_start; |
1898 | 2512 | ||
1899 | u32 total_bytes_received_hi; | 2513 | u32 total_bytes_received_hi; |
1900 | u32 total_bytes_received_lo; | 2514 | u32 total_bytes_received_lo; |
1901 | 2515 | ||
1902 | u32 total_bytes_transmitted_hi; | 2516 | u32 total_bytes_transmitted_hi; |
1903 | u32 total_bytes_transmitted_lo; | 2517 | u32 total_bytes_transmitted_lo; |
1904 | 2518 | ||
1905 | u32 total_unicast_packets_received_hi; | 2519 | u32 total_unicast_packets_received_hi; |
1906 | u32 total_unicast_packets_received_lo; | 2520 | u32 total_unicast_packets_received_lo; |
1907 | 2521 | ||
1908 | u32 total_multicast_packets_received_hi; | 2522 | u32 total_multicast_packets_received_hi; |
1909 | u32 total_multicast_packets_received_lo; | 2523 | u32 total_multicast_packets_received_lo; |
1910 | 2524 | ||
1911 | u32 total_broadcast_packets_received_hi; | 2525 | u32 total_broadcast_packets_received_hi; |
1912 | u32 total_broadcast_packets_received_lo; | 2526 | u32 total_broadcast_packets_received_lo; |
1913 | 2527 | ||
1914 | u32 total_unicast_packets_transmitted_hi; | 2528 | u32 total_unicast_packets_transmitted_hi; |
1915 | u32 total_unicast_packets_transmitted_lo; | 2529 | u32 total_unicast_packets_transmitted_lo; |
1916 | 2530 | ||
1917 | u32 total_multicast_packets_transmitted_hi; | 2531 | u32 total_multicast_packets_transmitted_hi; |
1918 | u32 total_multicast_packets_transmitted_lo; | 2532 | u32 total_multicast_packets_transmitted_lo; |
1919 | 2533 | ||
1920 | u32 total_broadcast_packets_transmitted_hi; | 2534 | u32 total_broadcast_packets_transmitted_hi; |
1921 | u32 total_broadcast_packets_transmitted_lo; | 2535 | u32 total_broadcast_packets_transmitted_lo; |
1922 | 2536 | ||
1923 | u32 valid_bytes_received_hi; | 2537 | u32 valid_bytes_received_hi; |
1924 | u32 valid_bytes_received_lo; | 2538 | u32 valid_bytes_received_lo; |
1925 | 2539 | ||
1926 | u32 host_func_stats_end; | 2540 | u32 host_func_stats_end; |
1927 | }; | 2541 | }; |
1928 | 2542 | ||
2543 | /* VIC definitions */ | ||
2544 | #define VICSTATST_UIF_INDEX 2 | ||
1929 | 2545 | ||
1930 | #define BCM_5710_FW_MAJOR_VERSION 6 | 2546 | #define BCM_5710_FW_MAJOR_VERSION 7 |
1931 | #define BCM_5710_FW_MINOR_VERSION 2 | 2547 | #define BCM_5710_FW_MINOR_VERSION 0 |
1932 | #define BCM_5710_FW_REVISION_VERSION 9 | 2548 | #define BCM_5710_FW_REVISION_VERSION 20 |
1933 | #define BCM_5710_FW_ENGINEERING_VERSION 0 | 2549 | #define BCM_5710_FW_ENGINEERING_VERSION 0 |
1934 | #define BCM_5710_FW_COMPILE_FLAGS 1 | 2550 | #define BCM_5710_FW_COMPILE_FLAGS 1 |
1935 | 2551 | ||
1936 | 2552 | ||
@@ -1948,6 +2564,115 @@ struct atten_sp_status_block { | |||
1948 | 2564 | ||
1949 | 2565 | ||
1950 | /* | 2566 | /* |
2567 | * The eth aggregative context of Cstorm | ||
2568 | */ | ||
2569 | struct cstorm_eth_ag_context { | ||
2570 | u32 __reserved0[10]; | ||
2571 | }; | ||
2572 | |||
2573 | |||
2574 | /* | ||
2575 | * dmae command structure | ||
2576 | */ | ||
2577 | struct dmae_command { | ||
2578 | u32 opcode; | ||
2579 | #define DMAE_COMMAND_SRC (0x1<<0) | ||
2580 | #define DMAE_COMMAND_SRC_SHIFT 0 | ||
2581 | #define DMAE_COMMAND_DST (0x3<<1) | ||
2582 | #define DMAE_COMMAND_DST_SHIFT 1 | ||
2583 | #define DMAE_COMMAND_C_DST (0x1<<3) | ||
2584 | #define DMAE_COMMAND_C_DST_SHIFT 3 | ||
2585 | #define DMAE_COMMAND_C_TYPE_ENABLE (0x1<<4) | ||
2586 | #define DMAE_COMMAND_C_TYPE_ENABLE_SHIFT 4 | ||
2587 | #define DMAE_COMMAND_C_TYPE_CRC_ENABLE (0x1<<5) | ||
2588 | #define DMAE_COMMAND_C_TYPE_CRC_ENABLE_SHIFT 5 | ||
2589 | #define DMAE_COMMAND_C_TYPE_CRC_OFFSET (0x7<<6) | ||
2590 | #define DMAE_COMMAND_C_TYPE_CRC_OFFSET_SHIFT 6 | ||
2591 | #define DMAE_COMMAND_ENDIANITY (0x3<<9) | ||
2592 | #define DMAE_COMMAND_ENDIANITY_SHIFT 9 | ||
2593 | #define DMAE_COMMAND_PORT (0x1<<11) | ||
2594 | #define DMAE_COMMAND_PORT_SHIFT 11 | ||
2595 | #define DMAE_COMMAND_CRC_RESET (0x1<<12) | ||
2596 | #define DMAE_COMMAND_CRC_RESET_SHIFT 12 | ||
2597 | #define DMAE_COMMAND_SRC_RESET (0x1<<13) | ||
2598 | #define DMAE_COMMAND_SRC_RESET_SHIFT 13 | ||
2599 | #define DMAE_COMMAND_DST_RESET (0x1<<14) | ||
2600 | #define DMAE_COMMAND_DST_RESET_SHIFT 14 | ||
2601 | #define DMAE_COMMAND_E1HVN (0x3<<15) | ||
2602 | #define DMAE_COMMAND_E1HVN_SHIFT 15 | ||
2603 | #define DMAE_COMMAND_DST_VN (0x3<<17) | ||
2604 | #define DMAE_COMMAND_DST_VN_SHIFT 17 | ||
2605 | #define DMAE_COMMAND_C_FUNC (0x1<<19) | ||
2606 | #define DMAE_COMMAND_C_FUNC_SHIFT 19 | ||
2607 | #define DMAE_COMMAND_ERR_POLICY (0x3<<20) | ||
2608 | #define DMAE_COMMAND_ERR_POLICY_SHIFT 20 | ||
2609 | #define DMAE_COMMAND_RESERVED0 (0x3FF<<22) | ||
2610 | #define DMAE_COMMAND_RESERVED0_SHIFT 22 | ||
2611 | u32 src_addr_lo; | ||
2612 | u32 src_addr_hi; | ||
2613 | u32 dst_addr_lo; | ||
2614 | u32 dst_addr_hi; | ||
2615 | #if defined(__BIG_ENDIAN) | ||
2616 | u16 opcode_iov; | ||
2617 | #define DMAE_COMMAND_SRC_VFID (0x3F<<0) | ||
2618 | #define DMAE_COMMAND_SRC_VFID_SHIFT 0 | ||
2619 | #define DMAE_COMMAND_SRC_VFPF (0x1<<6) | ||
2620 | #define DMAE_COMMAND_SRC_VFPF_SHIFT 6 | ||
2621 | #define DMAE_COMMAND_RESERVED1 (0x1<<7) | ||
2622 | #define DMAE_COMMAND_RESERVED1_SHIFT 7 | ||
2623 | #define DMAE_COMMAND_DST_VFID (0x3F<<8) | ||
2624 | #define DMAE_COMMAND_DST_VFID_SHIFT 8 | ||
2625 | #define DMAE_COMMAND_DST_VFPF (0x1<<14) | ||
2626 | #define DMAE_COMMAND_DST_VFPF_SHIFT 14 | ||
2627 | #define DMAE_COMMAND_RESERVED2 (0x1<<15) | ||
2628 | #define DMAE_COMMAND_RESERVED2_SHIFT 15 | ||
2629 | u16 len; | ||
2630 | #elif defined(__LITTLE_ENDIAN) | ||
2631 | u16 len; | ||
2632 | u16 opcode_iov; | ||
2633 | #define DMAE_COMMAND_SRC_VFID (0x3F<<0) | ||
2634 | #define DMAE_COMMAND_SRC_VFID_SHIFT 0 | ||
2635 | #define DMAE_COMMAND_SRC_VFPF (0x1<<6) | ||
2636 | #define DMAE_COMMAND_SRC_VFPF_SHIFT 6 | ||
2637 | #define DMAE_COMMAND_RESERVED1 (0x1<<7) | ||
2638 | #define DMAE_COMMAND_RESERVED1_SHIFT 7 | ||
2639 | #define DMAE_COMMAND_DST_VFID (0x3F<<8) | ||
2640 | #define DMAE_COMMAND_DST_VFID_SHIFT 8 | ||
2641 | #define DMAE_COMMAND_DST_VFPF (0x1<<14) | ||
2642 | #define DMAE_COMMAND_DST_VFPF_SHIFT 14 | ||
2643 | #define DMAE_COMMAND_RESERVED2 (0x1<<15) | ||
2644 | #define DMAE_COMMAND_RESERVED2_SHIFT 15 | ||
2645 | #endif | ||
2646 | u32 comp_addr_lo; | ||
2647 | u32 comp_addr_hi; | ||
2648 | u32 comp_val; | ||
2649 | u32 crc32; | ||
2650 | u32 crc32_c; | ||
2651 | #if defined(__BIG_ENDIAN) | ||
2652 | u16 crc16_c; | ||
2653 | u16 crc16; | ||
2654 | #elif defined(__LITTLE_ENDIAN) | ||
2655 | u16 crc16; | ||
2656 | u16 crc16_c; | ||
2657 | #endif | ||
2658 | #if defined(__BIG_ENDIAN) | ||
2659 | u16 reserved3; | ||
2660 | u16 crc_t10; | ||
2661 | #elif defined(__LITTLE_ENDIAN) | ||
2662 | u16 crc_t10; | ||
2663 | u16 reserved3; | ||
2664 | #endif | ||
2665 | #if defined(__BIG_ENDIAN) | ||
2666 | u16 xsum8; | ||
2667 | u16 xsum16; | ||
2668 | #elif defined(__LITTLE_ENDIAN) | ||
2669 | u16 xsum16; | ||
2670 | u16 xsum8; | ||
2671 | #endif | ||
2672 | }; | ||
2673 | |||
2674 | |||
2675 | /* | ||
1951 | * common data for all protocols | 2676 | * common data for all protocols |
1952 | */ | 2677 | */ |
1953 | struct doorbell_hdr { | 2678 | struct doorbell_hdr { |
@@ -1963,33 +2688,29 @@ struct doorbell_hdr { | |||
1963 | }; | 2688 | }; |
1964 | 2689 | ||
1965 | /* | 2690 | /* |
1966 | * doorbell message sent to the chip | 2691 | * Ethernet doorbell |
1967 | */ | ||
1968 | struct doorbell { | ||
1969 | #if defined(__BIG_ENDIAN) | ||
1970 | u16 zero_fill2; | ||
1971 | u8 zero_fill1; | ||
1972 | struct doorbell_hdr header; | ||
1973 | #elif defined(__LITTLE_ENDIAN) | ||
1974 | struct doorbell_hdr header; | ||
1975 | u8 zero_fill1; | ||
1976 | u16 zero_fill2; | ||
1977 | #endif | ||
1978 | }; | ||
1979 | |||
1980 | |||
1981 | /* | ||
1982 | * doorbell message sent to the chip | ||
1983 | */ | 2692 | */ |
1984 | struct doorbell_set_prod { | 2693 | struct eth_tx_doorbell { |
1985 | #if defined(__BIG_ENDIAN) | 2694 | #if defined(__BIG_ENDIAN) |
1986 | u16 prod; | 2695 | u16 npackets; |
1987 | u8 zero_fill1; | 2696 | u8 params; |
1988 | struct doorbell_hdr header; | 2697 | #define ETH_TX_DOORBELL_NUM_BDS (0x3F<<0) |
2698 | #define ETH_TX_DOORBELL_NUM_BDS_SHIFT 0 | ||
2699 | #define ETH_TX_DOORBELL_RESERVED_TX_FIN_FLAG (0x1<<6) | ||
2700 | #define ETH_TX_DOORBELL_RESERVED_TX_FIN_FLAG_SHIFT 6 | ||
2701 | #define ETH_TX_DOORBELL_SPARE (0x1<<7) | ||
2702 | #define ETH_TX_DOORBELL_SPARE_SHIFT 7 | ||
2703 | struct doorbell_hdr hdr; | ||
1989 | #elif defined(__LITTLE_ENDIAN) | 2704 | #elif defined(__LITTLE_ENDIAN) |
1990 | struct doorbell_hdr header; | 2705 | struct doorbell_hdr hdr; |
1991 | u8 zero_fill1; | 2706 | u8 params; |
1992 | u16 prod; | 2707 | #define ETH_TX_DOORBELL_NUM_BDS (0x3F<<0) |
2708 | #define ETH_TX_DOORBELL_NUM_BDS_SHIFT 0 | ||
2709 | #define ETH_TX_DOORBELL_RESERVED_TX_FIN_FLAG (0x1<<6) | ||
2710 | #define ETH_TX_DOORBELL_RESERVED_TX_FIN_FLAG_SHIFT 6 | ||
2711 | #define ETH_TX_DOORBELL_SPARE (0x1<<7) | ||
2712 | #define ETH_TX_DOORBELL_SPARE_SHIFT 7 | ||
2713 | u16 npackets; | ||
1993 | #endif | 2714 | #endif |
1994 | }; | 2715 | }; |
1995 | 2716 | ||
@@ -2000,7 +2721,7 @@ struct doorbell_set_prod { | |||
2000 | struct hc_status_block_e1x { | 2721 | struct hc_status_block_e1x { |
2001 | __le16 index_values[HC_SB_MAX_INDICES_E1X]; | 2722 | __le16 index_values[HC_SB_MAX_INDICES_E1X]; |
2002 | __le16 running_index[HC_SB_MAX_SM]; | 2723 | __le16 running_index[HC_SB_MAX_SM]; |
2003 | u32 rsrv; | 2724 | __le32 rsrv[11]; |
2004 | }; | 2725 | }; |
2005 | 2726 | ||
2006 | /* | 2727 | /* |
@@ -2017,7 +2738,7 @@ struct host_hc_status_block_e1x { | |||
2017 | struct hc_status_block_e2 { | 2738 | struct hc_status_block_e2 { |
2018 | __le16 index_values[HC_SB_MAX_INDICES_E2]; | 2739 | __le16 index_values[HC_SB_MAX_INDICES_E2]; |
2019 | __le16 running_index[HC_SB_MAX_SM]; | 2740 | __le16 running_index[HC_SB_MAX_SM]; |
2020 | u32 reserved; | 2741 | __le32 reserved[11]; |
2021 | }; | 2742 | }; |
2022 | 2743 | ||
2023 | /* | 2744 | /* |
@@ -2138,6 +2859,16 @@ union igu_consprod_reg { | |||
2138 | 2859 | ||
2139 | 2860 | ||
2140 | /* | 2861 | /* |
2862 | * Igu control commands | ||
2863 | */ | ||
2864 | enum igu_ctrl_cmd { | ||
2865 | IGU_CTRL_CMD_TYPE_RD, | ||
2866 | IGU_CTRL_CMD_TYPE_WR, | ||
2867 | MAX_IGU_CTRL_CMD | ||
2868 | }; | ||
2869 | |||
2870 | |||
2871 | /* | ||
2141 | * Control register for the IGU command register | 2872 | * Control register for the IGU command register |
2142 | */ | 2873 | */ |
2143 | struct igu_ctrl_reg { | 2874 | struct igu_ctrl_reg { |
@@ -2156,6 +2887,29 @@ struct igu_ctrl_reg { | |||
2156 | 2887 | ||
2157 | 2888 | ||
2158 | /* | 2889 | /* |
2890 | * Igu interrupt command | ||
2891 | */ | ||
2892 | enum igu_int_cmd { | ||
2893 | IGU_INT_ENABLE, | ||
2894 | IGU_INT_DISABLE, | ||
2895 | IGU_INT_NOP, | ||
2896 | IGU_INT_NOP2, | ||
2897 | MAX_IGU_INT_CMD | ||
2898 | }; | ||
2899 | |||
2900 | |||
2901 | /* | ||
2902 | * Igu segments | ||
2903 | */ | ||
2904 | enum igu_seg_access { | ||
2905 | IGU_SEG_ACCESS_NORM, | ||
2906 | IGU_SEG_ACCESS_DEF, | ||
2907 | IGU_SEG_ACCESS_ATTN, | ||
2908 | MAX_IGU_SEG_ACCESS | ||
2909 | }; | ||
2910 | |||
2911 | |||
2912 | /* | ||
2159 | * Parser parsing flags field | 2913 | * Parser parsing flags field |
2160 | */ | 2914 | */ |
2161 | struct parsing_flags { | 2915 | struct parsing_flags { |
@@ -2189,94 +2943,46 @@ struct parsing_flags { | |||
2189 | }; | 2943 | }; |
2190 | 2944 | ||
2191 | 2945 | ||
2192 | struct regpair { | 2946 | /* |
2193 | __le32 lo; | 2947 | * Parsing flags for TCP ACK type |
2194 | __le32 hi; | 2948 | */ |
2949 | enum prs_flags_ack_type { | ||
2950 | PRS_FLAG_PUREACK_PIGGY, | ||
2951 | PRS_FLAG_PUREACK_PURE, | ||
2952 | MAX_PRS_FLAGS_ACK_TYPE | ||
2195 | }; | 2953 | }; |
2196 | 2954 | ||
2197 | 2955 | ||
2198 | /* | 2956 | /* |
2199 | * dmae command structure | 2957 | * Parsing flags for Ethernet address type |
2200 | */ | 2958 | */ |
2201 | struct dmae_command { | 2959 | enum prs_flags_eth_addr_type { |
2202 | u32 opcode; | 2960 | PRS_FLAG_ETHTYPE_NON_UNICAST, |
2203 | #define DMAE_COMMAND_SRC (0x1<<0) | 2961 | PRS_FLAG_ETHTYPE_UNICAST, |
2204 | #define DMAE_COMMAND_SRC_SHIFT 0 | 2962 | MAX_PRS_FLAGS_ETH_ADDR_TYPE |
2205 | #define DMAE_COMMAND_DST (0x3<<1) | ||
2206 | #define DMAE_COMMAND_DST_SHIFT 1 | ||
2207 | #define DMAE_COMMAND_C_DST (0x1<<3) | ||
2208 | #define DMAE_COMMAND_C_DST_SHIFT 3 | ||
2209 | #define DMAE_COMMAND_C_TYPE_ENABLE (0x1<<4) | ||
2210 | #define DMAE_COMMAND_C_TYPE_ENABLE_SHIFT 4 | ||
2211 | #define DMAE_COMMAND_C_TYPE_CRC_ENABLE (0x1<<5) | ||
2212 | #define DMAE_COMMAND_C_TYPE_CRC_ENABLE_SHIFT 5 | ||
2213 | #define DMAE_COMMAND_C_TYPE_CRC_OFFSET (0x7<<6) | ||
2214 | #define DMAE_COMMAND_C_TYPE_CRC_OFFSET_SHIFT 6 | ||
2215 | #define DMAE_COMMAND_ENDIANITY (0x3<<9) | ||
2216 | #define DMAE_COMMAND_ENDIANITY_SHIFT 9 | ||
2217 | #define DMAE_COMMAND_PORT (0x1<<11) | ||
2218 | #define DMAE_COMMAND_PORT_SHIFT 11 | ||
2219 | #define DMAE_COMMAND_CRC_RESET (0x1<<12) | ||
2220 | #define DMAE_COMMAND_CRC_RESET_SHIFT 12 | ||
2221 | #define DMAE_COMMAND_SRC_RESET (0x1<<13) | ||
2222 | #define DMAE_COMMAND_SRC_RESET_SHIFT 13 | ||
2223 | #define DMAE_COMMAND_DST_RESET (0x1<<14) | ||
2224 | #define DMAE_COMMAND_DST_RESET_SHIFT 14 | ||
2225 | #define DMAE_COMMAND_E1HVN (0x3<<15) | ||
2226 | #define DMAE_COMMAND_E1HVN_SHIFT 15 | ||
2227 | #define DMAE_COMMAND_DST_VN (0x3<<17) | ||
2228 | #define DMAE_COMMAND_DST_VN_SHIFT 17 | ||
2229 | #define DMAE_COMMAND_C_FUNC (0x1<<19) | ||
2230 | #define DMAE_COMMAND_C_FUNC_SHIFT 19 | ||
2231 | #define DMAE_COMMAND_ERR_POLICY (0x3<<20) | ||
2232 | #define DMAE_COMMAND_ERR_POLICY_SHIFT 20 | ||
2233 | #define DMAE_COMMAND_RESERVED0 (0x3FF<<22) | ||
2234 | #define DMAE_COMMAND_RESERVED0_SHIFT 22 | ||
2235 | u32 src_addr_lo; | ||
2236 | u32 src_addr_hi; | ||
2237 | u32 dst_addr_lo; | ||
2238 | u32 dst_addr_hi; | ||
2239 | #if defined(__BIG_ENDIAN) | ||
2240 | u16 reserved1; | ||
2241 | u16 len; | ||
2242 | #elif defined(__LITTLE_ENDIAN) | ||
2243 | u16 len; | ||
2244 | u16 reserved1; | ||
2245 | #endif | ||
2246 | u32 comp_addr_lo; | ||
2247 | u32 comp_addr_hi; | ||
2248 | u32 comp_val; | ||
2249 | u32 crc32; | ||
2250 | u32 crc32_c; | ||
2251 | #if defined(__BIG_ENDIAN) | ||
2252 | u16 crc16_c; | ||
2253 | u16 crc16; | ||
2254 | #elif defined(__LITTLE_ENDIAN) | ||
2255 | u16 crc16; | ||
2256 | u16 crc16_c; | ||
2257 | #endif | ||
2258 | #if defined(__BIG_ENDIAN) | ||
2259 | u16 reserved3; | ||
2260 | u16 crc_t10; | ||
2261 | #elif defined(__LITTLE_ENDIAN) | ||
2262 | u16 crc_t10; | ||
2263 | u16 reserved3; | ||
2264 | #endif | ||
2265 | #if defined(__BIG_ENDIAN) | ||
2266 | u16 xsum8; | ||
2267 | u16 xsum16; | ||
2268 | #elif defined(__LITTLE_ENDIAN) | ||
2269 | u16 xsum16; | ||
2270 | u16 xsum8; | ||
2271 | #endif | ||
2272 | }; | 2963 | }; |
2273 | 2964 | ||
2274 | 2965 | ||
2275 | struct double_regpair { | 2966 | /* |
2276 | u32 regpair0_lo; | 2967 | * Parsing flags for over-ethernet protocol |
2277 | u32 regpair0_hi; | 2968 | */ |
2278 | u32 regpair1_lo; | 2969 | enum prs_flags_over_eth { |
2279 | u32 regpair1_hi; | 2970 | PRS_FLAG_OVERETH_UNKNOWN, |
2971 | PRS_FLAG_OVERETH_IPV4, | ||
2972 | PRS_FLAG_OVERETH_IPV6, | ||
2973 | PRS_FLAG_OVERETH_LLCSNAP_UNKNOWN, | ||
2974 | MAX_PRS_FLAGS_OVER_ETH | ||
2975 | }; | ||
2976 | |||
2977 | |||
2978 | /* | ||
2979 | * Parsing flags for over-IP protocol | ||
2980 | */ | ||
2981 | enum prs_flags_over_ip { | ||
2982 | PRS_FLAG_OVERIP_UNKNOWN, | ||
2983 | PRS_FLAG_OVERIP_TCP, | ||
2984 | PRS_FLAG_OVERIP_UDP, | ||
2985 | MAX_PRS_FLAGS_OVER_IP | ||
2280 | }; | 2986 | }; |
2281 | 2987 | ||
2282 | 2988 | ||
@@ -2297,54 +3003,23 @@ struct sdm_op_gen { | |||
2297 | #define SDM_OP_GEN_RESERVED_SHIFT 17 | 3003 | #define SDM_OP_GEN_RESERVED_SHIFT 17 |
2298 | }; | 3004 | }; |
2299 | 3005 | ||
2300 | /* | ||
2301 | * The eth Rx Buffer Descriptor | ||
2302 | */ | ||
2303 | struct eth_rx_bd { | ||
2304 | __le32 addr_lo; | ||
2305 | __le32 addr_hi; | ||
2306 | }; | ||
2307 | 3006 | ||
2308 | /* | 3007 | /* |
2309 | * The eth Rx SGE Descriptor | 3008 | * Timers connection context |
2310 | */ | ||
2311 | struct eth_rx_sge { | ||
2312 | __le32 addr_lo; | ||
2313 | __le32 addr_hi; | ||
2314 | }; | ||
2315 | |||
2316 | |||
2317 | |||
2318 | /* | ||
2319 | * The eth storm context of Ustorm | ||
2320 | */ | ||
2321 | struct ustorm_eth_st_context { | ||
2322 | u32 reserved0[48]; | ||
2323 | }; | ||
2324 | |||
2325 | /* | ||
2326 | * The eth storm context of Tstorm | ||
2327 | */ | 3009 | */ |
2328 | struct tstorm_eth_st_context { | 3010 | struct timers_block_context { |
2329 | u32 __reserved0[28]; | 3011 | u32 __reserved_0; |
3012 | u32 __reserved_1; | ||
3013 | u32 __reserved_2; | ||
3014 | u32 flags; | ||
3015 | #define __TIMERS_BLOCK_CONTEXT_NUM_OF_ACTIVE_TIMERS (0x3<<0) | ||
3016 | #define __TIMERS_BLOCK_CONTEXT_NUM_OF_ACTIVE_TIMERS_SHIFT 0 | ||
3017 | #define TIMERS_BLOCK_CONTEXT_CONN_VALID_FLG (0x1<<2) | ||
3018 | #define TIMERS_BLOCK_CONTEXT_CONN_VALID_FLG_SHIFT 2 | ||
3019 | #define __TIMERS_BLOCK_CONTEXT_RESERVED0 (0x1FFFFFFF<<3) | ||
3020 | #define __TIMERS_BLOCK_CONTEXT_RESERVED0_SHIFT 3 | ||
2330 | }; | 3021 | }; |
2331 | 3022 | ||
2332 | /* | ||
2333 | * The eth aggregative context of Xstorm | ||
2334 | */ | ||
2335 | struct xstorm_eth_ag_context { | ||
2336 | u32 reserved0; | ||
2337 | #if defined(__BIG_ENDIAN) | ||
2338 | u8 cdu_reserved; | ||
2339 | u8 reserved2; | ||
2340 | u16 reserved1; | ||
2341 | #elif defined(__LITTLE_ENDIAN) | ||
2342 | u16 reserved1; | ||
2343 | u8 reserved2; | ||
2344 | u8 cdu_reserved; | ||
2345 | #endif | ||
2346 | u32 reserved3[30]; | ||
2347 | }; | ||
2348 | 3023 | ||
2349 | /* | 3024 | /* |
2350 | * The eth aggregative context of Tstorm | 3025 | * The eth aggregative context of Tstorm |
@@ -2355,14 +3030,6 @@ struct tstorm_eth_ag_context { | |||
2355 | 3030 | ||
2356 | 3031 | ||
2357 | /* | 3032 | /* |
2358 | * The eth aggregative context of Cstorm | ||
2359 | */ | ||
2360 | struct cstorm_eth_ag_context { | ||
2361 | u32 __reserved0[10]; | ||
2362 | }; | ||
2363 | |||
2364 | |||
2365 | /* | ||
2366 | * The eth aggregative context of Ustorm | 3033 | * The eth aggregative context of Ustorm |
2367 | */ | 3034 | */ |
2368 | struct ustorm_eth_ag_context { | 3035 | struct ustorm_eth_ag_context { |
@@ -2379,229 +3046,81 @@ struct ustorm_eth_ag_context { | |||
2379 | u32 __reserved3[6]; | 3046 | u32 __reserved3[6]; |
2380 | }; | 3047 | }; |
2381 | 3048 | ||
2382 | /* | ||
2383 | * Timers connection context | ||
2384 | */ | ||
2385 | struct timers_block_context { | ||
2386 | u32 __reserved_0; | ||
2387 | u32 __reserved_1; | ||
2388 | u32 __reserved_2; | ||
2389 | u32 flags; | ||
2390 | #define __TIMERS_BLOCK_CONTEXT_NUM_OF_ACTIVE_TIMERS (0x3<<0) | ||
2391 | #define __TIMERS_BLOCK_CONTEXT_NUM_OF_ACTIVE_TIMERS_SHIFT 0 | ||
2392 | #define TIMERS_BLOCK_CONTEXT_CONN_VALID_FLG (0x1<<2) | ||
2393 | #define TIMERS_BLOCK_CONTEXT_CONN_VALID_FLG_SHIFT 2 | ||
2394 | #define __TIMERS_BLOCK_CONTEXT_RESERVED0 (0x1FFFFFFF<<3) | ||
2395 | #define __TIMERS_BLOCK_CONTEXT_RESERVED0_SHIFT 3 | ||
2396 | }; | ||
2397 | 3049 | ||
2398 | /* | 3050 | /* |
2399 | * structure for easy accessibility to assembler | 3051 | * The eth aggregative context of Xstorm |
2400 | */ | ||
2401 | struct eth_tx_bd_flags { | ||
2402 | u8 as_bitfield; | ||
2403 | #define ETH_TX_BD_FLAGS_IP_CSUM (0x1<<0) | ||
2404 | #define ETH_TX_BD_FLAGS_IP_CSUM_SHIFT 0 | ||
2405 | #define ETH_TX_BD_FLAGS_L4_CSUM (0x1<<1) | ||
2406 | #define ETH_TX_BD_FLAGS_L4_CSUM_SHIFT 1 | ||
2407 | #define ETH_TX_BD_FLAGS_VLAN_MODE (0x3<<2) | ||
2408 | #define ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT 2 | ||
2409 | #define ETH_TX_BD_FLAGS_START_BD (0x1<<4) | ||
2410 | #define ETH_TX_BD_FLAGS_START_BD_SHIFT 4 | ||
2411 | #define ETH_TX_BD_FLAGS_IS_UDP (0x1<<5) | ||
2412 | #define ETH_TX_BD_FLAGS_IS_UDP_SHIFT 5 | ||
2413 | #define ETH_TX_BD_FLAGS_SW_LSO (0x1<<6) | ||
2414 | #define ETH_TX_BD_FLAGS_SW_LSO_SHIFT 6 | ||
2415 | #define ETH_TX_BD_FLAGS_IPV6 (0x1<<7) | ||
2416 | #define ETH_TX_BD_FLAGS_IPV6_SHIFT 7 | ||
2417 | }; | ||
2418 | |||
2419 | /* | ||
2420 | * The eth Tx Buffer Descriptor | ||
2421 | */ | ||
2422 | struct eth_tx_start_bd { | ||
2423 | __le32 addr_lo; | ||
2424 | __le32 addr_hi; | ||
2425 | __le16 nbd; | ||
2426 | __le16 nbytes; | ||
2427 | __le16 vlan_or_ethertype; | ||
2428 | struct eth_tx_bd_flags bd_flags; | ||
2429 | u8 general_data; | ||
2430 | #define ETH_TX_START_BD_HDR_NBDS (0x3F<<0) | ||
2431 | #define ETH_TX_START_BD_HDR_NBDS_SHIFT 0 | ||
2432 | #define ETH_TX_START_BD_ETH_ADDR_TYPE (0x3<<6) | ||
2433 | #define ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT 6 | ||
2434 | }; | ||
2435 | |||
2436 | /* | ||
2437 | * Tx regular BD structure | ||
2438 | */ | ||
2439 | struct eth_tx_bd { | ||
2440 | __le32 addr_lo; | ||
2441 | __le32 addr_hi; | ||
2442 | __le16 total_pkt_bytes; | ||
2443 | __le16 nbytes; | ||
2444 | u8 reserved[4]; | ||
2445 | }; | ||
2446 | |||
2447 | /* | ||
2448 | * Tx parsing BD structure for ETH E1/E1h | ||
2449 | */ | ||
2450 | struct eth_tx_parse_bd_e1x { | ||
2451 | u8 global_data; | ||
2452 | #define ETH_TX_PARSE_BD_E1X_IP_HDR_START_OFFSET_W (0xF<<0) | ||
2453 | #define ETH_TX_PARSE_BD_E1X_IP_HDR_START_OFFSET_W_SHIFT 0 | ||
2454 | #define ETH_TX_PARSE_BD_E1X_RESERVED0 (0x1<<4) | ||
2455 | #define ETH_TX_PARSE_BD_E1X_RESERVED0_SHIFT 4 | ||
2456 | #define ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN (0x1<<5) | ||
2457 | #define ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN_SHIFT 5 | ||
2458 | #define ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN (0x1<<6) | ||
2459 | #define ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT 6 | ||
2460 | #define ETH_TX_PARSE_BD_E1X_NS_FLG (0x1<<7) | ||
2461 | #define ETH_TX_PARSE_BD_E1X_NS_FLG_SHIFT 7 | ||
2462 | u8 tcp_flags; | ||
2463 | #define ETH_TX_PARSE_BD_E1X_FIN_FLG (0x1<<0) | ||
2464 | #define ETH_TX_PARSE_BD_E1X_FIN_FLG_SHIFT 0 | ||
2465 | #define ETH_TX_PARSE_BD_E1X_SYN_FLG (0x1<<1) | ||
2466 | #define ETH_TX_PARSE_BD_E1X_SYN_FLG_SHIFT 1 | ||
2467 | #define ETH_TX_PARSE_BD_E1X_RST_FLG (0x1<<2) | ||
2468 | #define ETH_TX_PARSE_BD_E1X_RST_FLG_SHIFT 2 | ||
2469 | #define ETH_TX_PARSE_BD_E1X_PSH_FLG (0x1<<3) | ||
2470 | #define ETH_TX_PARSE_BD_E1X_PSH_FLG_SHIFT 3 | ||
2471 | #define ETH_TX_PARSE_BD_E1X_ACK_FLG (0x1<<4) | ||
2472 | #define ETH_TX_PARSE_BD_E1X_ACK_FLG_SHIFT 4 | ||
2473 | #define ETH_TX_PARSE_BD_E1X_URG_FLG (0x1<<5) | ||
2474 | #define ETH_TX_PARSE_BD_E1X_URG_FLG_SHIFT 5 | ||
2475 | #define ETH_TX_PARSE_BD_E1X_ECE_FLG (0x1<<6) | ||
2476 | #define ETH_TX_PARSE_BD_E1X_ECE_FLG_SHIFT 6 | ||
2477 | #define ETH_TX_PARSE_BD_E1X_CWR_FLG (0x1<<7) | ||
2478 | #define ETH_TX_PARSE_BD_E1X_CWR_FLG_SHIFT 7 | ||
2479 | u8 ip_hlen_w; | ||
2480 | s8 reserved; | ||
2481 | __le16 total_hlen_w; | ||
2482 | __le16 tcp_pseudo_csum; | ||
2483 | __le16 lso_mss; | ||
2484 | __le16 ip_id; | ||
2485 | __le32 tcp_send_seq; | ||
2486 | }; | ||
2487 | |||
2488 | /* | ||
2489 | * Tx parsing BD structure for ETH E2 | ||
2490 | */ | 3052 | */ |
2491 | struct eth_tx_parse_bd_e2 { | 3053 | struct xstorm_eth_ag_context { |
2492 | __le16 dst_mac_addr_lo; | 3054 | u32 reserved0; |
2493 | __le16 dst_mac_addr_mid; | 3055 | #if defined(__BIG_ENDIAN) |
2494 | __le16 dst_mac_addr_hi; | 3056 | u8 cdu_reserved; |
2495 | __le16 src_mac_addr_lo; | 3057 | u8 reserved2; |
2496 | __le16 src_mac_addr_mid; | 3058 | u16 reserved1; |
2497 | __le16 src_mac_addr_hi; | 3059 | #elif defined(__LITTLE_ENDIAN) |
2498 | __le32 parsing_data; | 3060 | u16 reserved1; |
2499 | #define ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W (0x1FFF<<0) | 3061 | u8 reserved2; |
2500 | #define ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT 0 | 3062 | u8 cdu_reserved; |
2501 | #define ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW (0xF<<13) | 3063 | #endif |
2502 | #define ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT 13 | 3064 | u32 reserved3[30]; |
2503 | #define ETH_TX_PARSE_BD_E2_LSO_MSS (0x3FFF<<17) | ||
2504 | #define ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT 17 | ||
2505 | #define ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR (0x1<<31) | ||
2506 | #define ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR_SHIFT 31 | ||
2507 | }; | 3065 | }; |
2508 | 3066 | ||
2509 | /* | ||
2510 | * The last BD in the BD memory will hold a pointer to the next BD memory | ||
2511 | */ | ||
2512 | struct eth_tx_next_bd { | ||
2513 | __le32 addr_lo; | ||
2514 | __le32 addr_hi; | ||
2515 | u8 reserved[8]; | ||
2516 | }; | ||
2517 | 3067 | ||
2518 | /* | 3068 | /* |
2519 | * union for 4 Bd types | 3069 | * doorbell message sent to the chip |
2520 | */ | 3070 | */ |
2521 | union eth_tx_bd_types { | 3071 | struct doorbell { |
2522 | struct eth_tx_start_bd start_bd; | 3072 | #if defined(__BIG_ENDIAN) |
2523 | struct eth_tx_bd reg_bd; | 3073 | u16 zero_fill2; |
2524 | struct eth_tx_parse_bd_e1x parse_bd_e1x; | 3074 | u8 zero_fill1; |
2525 | struct eth_tx_parse_bd_e2 parse_bd_e2; | 3075 | struct doorbell_hdr header; |
2526 | struct eth_tx_next_bd next_bd; | 3076 | #elif defined(__LITTLE_ENDIAN) |
3077 | struct doorbell_hdr header; | ||
3078 | u8 zero_fill1; | ||
3079 | u16 zero_fill2; | ||
3080 | #endif | ||
2527 | }; | 3081 | }; |
2528 | 3082 | ||
2529 | 3083 | ||
2530 | /* | 3084 | /* |
2531 | * The eth storm context of Xstorm | 3085 | * doorbell message sent to the chip |
2532 | */ | 3086 | */ |
2533 | struct xstorm_eth_st_context { | 3087 | struct doorbell_set_prod { |
2534 | u32 reserved0[60]; | 3088 | #if defined(__BIG_ENDIAN) |
3089 | u16 prod; | ||
3090 | u8 zero_fill1; | ||
3091 | struct doorbell_hdr header; | ||
3092 | #elif defined(__LITTLE_ENDIAN) | ||
3093 | struct doorbell_hdr header; | ||
3094 | u8 zero_fill1; | ||
3095 | u16 prod; | ||
3096 | #endif | ||
2535 | }; | 3097 | }; |
2536 | 3098 | ||
2537 | /* | ||
2538 | * The eth storm context of Cstorm | ||
2539 | */ | ||
2540 | struct cstorm_eth_st_context { | ||
2541 | u32 __reserved0[4]; | ||
2542 | }; | ||
2543 | 3099 | ||
2544 | /* | 3100 | struct regpair { |
2545 | * Ethernet connection context | 3101 | __le32 lo; |
2546 | */ | 3102 | __le32 hi; |
2547 | struct eth_context { | ||
2548 | struct ustorm_eth_st_context ustorm_st_context; | ||
2549 | struct tstorm_eth_st_context tstorm_st_context; | ||
2550 | struct xstorm_eth_ag_context xstorm_ag_context; | ||
2551 | struct tstorm_eth_ag_context tstorm_ag_context; | ||
2552 | struct cstorm_eth_ag_context cstorm_ag_context; | ||
2553 | struct ustorm_eth_ag_context ustorm_ag_context; | ||
2554 | struct timers_block_context timers_context; | ||
2555 | struct xstorm_eth_st_context xstorm_st_context; | ||
2556 | struct cstorm_eth_st_context cstorm_st_context; | ||
2557 | }; | 3103 | }; |
2558 | 3104 | ||
2559 | 3105 | ||
2560 | /* | 3106 | /* |
2561 | * Ethernet doorbell | 3107 | * Classify rule opcodes in E2/E3 |
2562 | */ | 3108 | */ |
2563 | struct eth_tx_doorbell { | 3109 | enum classify_rule { |
2564 | #if defined(__BIG_ENDIAN) | 3110 | CLASSIFY_RULE_OPCODE_MAC, |
2565 | u16 npackets; | 3111 | CLASSIFY_RULE_OPCODE_VLAN, |
2566 | u8 params; | 3112 | CLASSIFY_RULE_OPCODE_PAIR, |
2567 | #define ETH_TX_DOORBELL_NUM_BDS (0x3F<<0) | 3113 | MAX_CLASSIFY_RULE |
2568 | #define ETH_TX_DOORBELL_NUM_BDS_SHIFT 0 | ||
2569 | #define ETH_TX_DOORBELL_RESERVED_TX_FIN_FLAG (0x1<<6) | ||
2570 | #define ETH_TX_DOORBELL_RESERVED_TX_FIN_FLAG_SHIFT 6 | ||
2571 | #define ETH_TX_DOORBELL_SPARE (0x1<<7) | ||
2572 | #define ETH_TX_DOORBELL_SPARE_SHIFT 7 | ||
2573 | struct doorbell_hdr hdr; | ||
2574 | #elif defined(__LITTLE_ENDIAN) | ||
2575 | struct doorbell_hdr hdr; | ||
2576 | u8 params; | ||
2577 | #define ETH_TX_DOORBELL_NUM_BDS (0x3F<<0) | ||
2578 | #define ETH_TX_DOORBELL_NUM_BDS_SHIFT 0 | ||
2579 | #define ETH_TX_DOORBELL_RESERVED_TX_FIN_FLAG (0x1<<6) | ||
2580 | #define ETH_TX_DOORBELL_RESERVED_TX_FIN_FLAG_SHIFT 6 | ||
2581 | #define ETH_TX_DOORBELL_SPARE (0x1<<7) | ||
2582 | #define ETH_TX_DOORBELL_SPARE_SHIFT 7 | ||
2583 | u16 npackets; | ||
2584 | #endif | ||
2585 | }; | 3114 | }; |
2586 | 3115 | ||
2587 | 3116 | ||
2588 | /* | 3117 | /* |
2589 | * client init fc data | 3118 | * Classify rule types in E2/E3 |
2590 | */ | 3119 | */ |
2591 | struct client_init_fc_data { | 3120 | enum classify_rule_action_type { |
2592 | __le16 cqe_pause_thr_low; | 3121 | CLASSIFY_RULE_REMOVE, |
2593 | __le16 cqe_pause_thr_high; | 3122 | CLASSIFY_RULE_ADD, |
2594 | __le16 bd_pause_thr_low; | 3123 | MAX_CLASSIFY_RULE_ACTION_TYPE |
2595 | __le16 bd_pause_thr_high; | ||
2596 | __le16 sge_pause_thr_low; | ||
2597 | __le16 sge_pause_thr_high; | ||
2598 | __le16 rx_cos_mask; | ||
2599 | u8 safc_group_num; | ||
2600 | u8 safc_group_en_flg; | ||
2601 | u8 traffic_type; | ||
2602 | u8 reserved0; | ||
2603 | __le16 reserved1; | ||
2604 | __le32 reserved2; | ||
2605 | }; | 3124 | }; |
2606 | 3125 | ||
2607 | 3126 | ||
@@ -2615,8 +3134,12 @@ struct client_init_general_data { | |||
2615 | u8 is_fcoe_flg; | 3134 | u8 is_fcoe_flg; |
2616 | u8 activate_flg; | 3135 | u8 activate_flg; |
2617 | u8 sp_client_id; | 3136 | u8 sp_client_id; |
2618 | __le16 reserved0; | 3137 | __le16 mtu; |
2619 | __le32 reserved1[2]; | 3138 | u8 statistics_zero_flg; |
3139 | u8 func_id; | ||
3140 | u8 cos; | ||
3141 | u8 traffic_type; | ||
3142 | u32 reserved0; | ||
2620 | }; | 3143 | }; |
2621 | 3144 | ||
2622 | 3145 | ||
@@ -2624,7 +3147,13 @@ struct client_init_general_data { | |||
2624 | * client init rx data | 3147 | * client init rx data |
2625 | */ | 3148 | */ |
2626 | struct client_init_rx_data { | 3149 | struct client_init_rx_data { |
2627 | u8 tpa_en_flg; | 3150 | u8 tpa_en; |
3151 | #define CLIENT_INIT_RX_DATA_TPA_EN_IPV4 (0x1<<0) | ||
3152 | #define CLIENT_INIT_RX_DATA_TPA_EN_IPV4_SHIFT 0 | ||
3153 | #define CLIENT_INIT_RX_DATA_TPA_EN_IPV6 (0x1<<1) | ||
3154 | #define CLIENT_INIT_RX_DATA_TPA_EN_IPV6_SHIFT 1 | ||
3155 | #define CLIENT_INIT_RX_DATA_RESERVED5 (0x3F<<2) | ||
3156 | #define CLIENT_INIT_RX_DATA_RESERVED5_SHIFT 2 | ||
2628 | u8 vmqueue_mode_en_flg; | 3157 | u8 vmqueue_mode_en_flg; |
2629 | u8 extra_data_over_sgl_en_flg; | 3158 | u8 extra_data_over_sgl_en_flg; |
2630 | u8 cache_line_alignment_log_size; | 3159 | u8 cache_line_alignment_log_size; |
@@ -2639,17 +3168,46 @@ struct client_init_rx_data { | |||
2639 | u8 outer_vlan_removal_enable_flg; | 3168 | u8 outer_vlan_removal_enable_flg; |
2640 | u8 status_block_id; | 3169 | u8 status_block_id; |
2641 | u8 rx_sb_index_number; | 3170 | u8 rx_sb_index_number; |
2642 | u8 reserved0[3]; | 3171 | u8 reserved0; |
2643 | __le16 bd_buff_size; | 3172 | u8 max_tpa_queues; |
3173 | u8 silent_vlan_removal_flg; | ||
3174 | __le16 max_bytes_on_bd; | ||
2644 | __le16 sge_buff_size; | 3175 | __le16 sge_buff_size; |
2645 | __le16 mtu; | 3176 | u8 approx_mcast_engine_id; |
3177 | u8 rss_engine_id; | ||
2646 | struct regpair bd_page_base; | 3178 | struct regpair bd_page_base; |
2647 | struct regpair sge_page_base; | 3179 | struct regpair sge_page_base; |
2648 | struct regpair cqe_page_base; | 3180 | struct regpair cqe_page_base; |
2649 | u8 is_leading_rss; | 3181 | u8 is_leading_rss; |
2650 | u8 is_approx_mcast; | 3182 | u8 is_approx_mcast; |
2651 | __le16 max_agg_size; | 3183 | __le16 max_agg_size; |
2652 | __le32 reserved2[3]; | 3184 | __le16 state; |
3185 | #define CLIENT_INIT_RX_DATA_UCAST_DROP_ALL (0x1<<0) | ||
3186 | #define CLIENT_INIT_RX_DATA_UCAST_DROP_ALL_SHIFT 0 | ||
3187 | #define CLIENT_INIT_RX_DATA_UCAST_ACCEPT_ALL (0x1<<1) | ||
3188 | #define CLIENT_INIT_RX_DATA_UCAST_ACCEPT_ALL_SHIFT 1 | ||
3189 | #define CLIENT_INIT_RX_DATA_UCAST_ACCEPT_UNMATCHED (0x1<<2) | ||
3190 | #define CLIENT_INIT_RX_DATA_UCAST_ACCEPT_UNMATCHED_SHIFT 2 | ||
3191 | #define CLIENT_INIT_RX_DATA_MCAST_DROP_ALL (0x1<<3) | ||
3192 | #define CLIENT_INIT_RX_DATA_MCAST_DROP_ALL_SHIFT 3 | ||
3193 | #define CLIENT_INIT_RX_DATA_MCAST_ACCEPT_ALL (0x1<<4) | ||
3194 | #define CLIENT_INIT_RX_DATA_MCAST_ACCEPT_ALL_SHIFT 4 | ||
3195 | #define CLIENT_INIT_RX_DATA_BCAST_ACCEPT_ALL (0x1<<5) | ||
3196 | #define CLIENT_INIT_RX_DATA_BCAST_ACCEPT_ALL_SHIFT 5 | ||
3197 | #define CLIENT_INIT_RX_DATA_ACCEPT_ANY_VLAN (0x1<<6) | ||
3198 | #define CLIENT_INIT_RX_DATA_ACCEPT_ANY_VLAN_SHIFT 6 | ||
3199 | #define CLIENT_INIT_RX_DATA_RESERVED2 (0x1FF<<7) | ||
3200 | #define CLIENT_INIT_RX_DATA_RESERVED2_SHIFT 7 | ||
3201 | __le16 cqe_pause_thr_low; | ||
3202 | __le16 cqe_pause_thr_high; | ||
3203 | __le16 bd_pause_thr_low; | ||
3204 | __le16 bd_pause_thr_high; | ||
3205 | __le16 sge_pause_thr_low; | ||
3206 | __le16 sge_pause_thr_high; | ||
3207 | __le16 rx_cos_mask; | ||
3208 | __le16 silent_vlan_value; | ||
3209 | __le16 silent_vlan_mask; | ||
3210 | __le32 reserved6[2]; | ||
2653 | }; | 3211 | }; |
2654 | 3212 | ||
2655 | /* | 3213 | /* |
@@ -2659,11 +3217,25 @@ struct client_init_tx_data { | |||
2659 | u8 enforce_security_flg; | 3217 | u8 enforce_security_flg; |
2660 | u8 tx_status_block_id; | 3218 | u8 tx_status_block_id; |
2661 | u8 tx_sb_index_number; | 3219 | u8 tx_sb_index_number; |
2662 | u8 reserved0; | 3220 | u8 tss_leading_client_id; |
2663 | __le16 mtu; | 3221 | u8 tx_switching_flg; |
2664 | __le16 reserved1; | 3222 | u8 anti_spoofing_flg; |
3223 | __le16 default_vlan; | ||
2665 | struct regpair tx_bd_page_base; | 3224 | struct regpair tx_bd_page_base; |
2666 | __le32 reserved2[2]; | 3225 | __le16 state; |
3226 | #define CLIENT_INIT_TX_DATA_UCAST_ACCEPT_ALL (0x1<<0) | ||
3227 | #define CLIENT_INIT_TX_DATA_UCAST_ACCEPT_ALL_SHIFT 0 | ||
3228 | #define CLIENT_INIT_TX_DATA_MCAST_ACCEPT_ALL (0x1<<1) | ||
3229 | #define CLIENT_INIT_TX_DATA_MCAST_ACCEPT_ALL_SHIFT 1 | ||
3230 | #define CLIENT_INIT_TX_DATA_BCAST_ACCEPT_ALL (0x1<<2) | ||
3231 | #define CLIENT_INIT_TX_DATA_BCAST_ACCEPT_ALL_SHIFT 2 | ||
3232 | #define CLIENT_INIT_TX_DATA_ACCEPT_ANY_VLAN (0x1<<3) | ||
3233 | #define CLIENT_INIT_TX_DATA_ACCEPT_ANY_VLAN_SHIFT 3 | ||
3234 | #define CLIENT_INIT_TX_DATA_RESERVED1 (0xFFF<<4) | ||
3235 | #define CLIENT_INIT_TX_DATA_RESERVED1_SHIFT 4 | ||
3236 | u8 default_vlan_flg; | ||
3237 | u8 reserved2; | ||
3238 | __le32 reserved3; | ||
2667 | }; | 3239 | }; |
2668 | 3240 | ||
2669 | /* | 3241 | /* |
@@ -2673,7 +3245,146 @@ struct client_init_ramrod_data { | |||
2673 | struct client_init_general_data general; | 3245 | struct client_init_general_data general; |
2674 | struct client_init_rx_data rx; | 3246 | struct client_init_rx_data rx; |
2675 | struct client_init_tx_data tx; | 3247 | struct client_init_tx_data tx; |
2676 | struct client_init_fc_data fc; | 3248 | }; |
3249 | |||
3250 | |||
3251 | /* | ||
3252 | * client update ramrod data | ||
3253 | */ | ||
3254 | struct client_update_ramrod_data { | ||
3255 | u8 client_id; | ||
3256 | u8 func_id; | ||
3257 | u8 inner_vlan_removal_enable_flg; | ||
3258 | u8 inner_vlan_removal_change_flg; | ||
3259 | u8 outer_vlan_removal_enable_flg; | ||
3260 | u8 outer_vlan_removal_change_flg; | ||
3261 | u8 anti_spoofing_enable_flg; | ||
3262 | u8 anti_spoofing_change_flg; | ||
3263 | u8 activate_flg; | ||
3264 | u8 activate_change_flg; | ||
3265 | __le16 default_vlan; | ||
3266 | u8 default_vlan_enable_flg; | ||
3267 | u8 default_vlan_change_flg; | ||
3268 | __le16 silent_vlan_value; | ||
3269 | __le16 silent_vlan_mask; | ||
3270 | u8 silent_vlan_removal_flg; | ||
3271 | u8 silent_vlan_change_flg; | ||
3272 | __le32 echo; | ||
3273 | }; | ||
3274 | |||
3275 | |||
3276 | /* | ||
3277 | * The eth storm context of Cstorm | ||
3278 | */ | ||
3279 | struct cstorm_eth_st_context { | ||
3280 | u32 __reserved0[4]; | ||
3281 | }; | ||
3282 | |||
3283 | |||
3284 | struct double_regpair { | ||
3285 | u32 regpair0_lo; | ||
3286 | u32 regpair0_hi; | ||
3287 | u32 regpair1_lo; | ||
3288 | u32 regpair1_hi; | ||
3289 | }; | ||
3290 | |||
3291 | |||
3292 | /* | ||
3293 | * Ethernet address typesm used in ethernet tx BDs | ||
3294 | */ | ||
3295 | enum eth_addr_type { | ||
3296 | UNKNOWN_ADDRESS, | ||
3297 | UNICAST_ADDRESS, | ||
3298 | MULTICAST_ADDRESS, | ||
3299 | BROADCAST_ADDRESS, | ||
3300 | MAX_ETH_ADDR_TYPE | ||
3301 | }; | ||
3302 | |||
3303 | |||
3304 | /* | ||
3305 | * | ||
3306 | */ | ||
3307 | struct eth_classify_cmd_header { | ||
3308 | u8 cmd_general_data; | ||
3309 | #define ETH_CLASSIFY_CMD_HEADER_RX_CMD (0x1<<0) | ||
3310 | #define ETH_CLASSIFY_CMD_HEADER_RX_CMD_SHIFT 0 | ||
3311 | #define ETH_CLASSIFY_CMD_HEADER_TX_CMD (0x1<<1) | ||
3312 | #define ETH_CLASSIFY_CMD_HEADER_TX_CMD_SHIFT 1 | ||
3313 | #define ETH_CLASSIFY_CMD_HEADER_OPCODE (0x3<<2) | ||
3314 | #define ETH_CLASSIFY_CMD_HEADER_OPCODE_SHIFT 2 | ||
3315 | #define ETH_CLASSIFY_CMD_HEADER_IS_ADD (0x1<<4) | ||
3316 | #define ETH_CLASSIFY_CMD_HEADER_IS_ADD_SHIFT 4 | ||
3317 | #define ETH_CLASSIFY_CMD_HEADER_RESERVED0 (0x7<<5) | ||
3318 | #define ETH_CLASSIFY_CMD_HEADER_RESERVED0_SHIFT 5 | ||
3319 | u8 func_id; | ||
3320 | u8 client_id; | ||
3321 | u8 reserved1; | ||
3322 | }; | ||
3323 | |||
3324 | |||
3325 | /* | ||
3326 | * header for eth classification config ramrod | ||
3327 | */ | ||
3328 | struct eth_classify_header { | ||
3329 | u8 rule_cnt; | ||
3330 | u8 reserved0; | ||
3331 | __le16 reserved1; | ||
3332 | __le32 echo; | ||
3333 | }; | ||
3334 | |||
3335 | |||
3336 | /* | ||
3337 | * Command for adding/removing a MAC classification rule | ||
3338 | */ | ||
3339 | struct eth_classify_mac_cmd { | ||
3340 | struct eth_classify_cmd_header header; | ||
3341 | __le32 reserved0; | ||
3342 | __le16 mac_lsb; | ||
3343 | __le16 mac_mid; | ||
3344 | __le16 mac_msb; | ||
3345 | __le16 reserved1; | ||
3346 | }; | ||
3347 | |||
3348 | |||
3349 | /* | ||
3350 | * Command for adding/removing a MAC-VLAN pair classification rule | ||
3351 | */ | ||
3352 | struct eth_classify_pair_cmd { | ||
3353 | struct eth_classify_cmd_header header; | ||
3354 | __le32 reserved0; | ||
3355 | __le16 mac_lsb; | ||
3356 | __le16 mac_mid; | ||
3357 | __le16 mac_msb; | ||
3358 | __le16 vlan; | ||
3359 | }; | ||
3360 | |||
3361 | |||
3362 | /* | ||
3363 | * Command for adding/removing a VLAN classification rule | ||
3364 | */ | ||
3365 | struct eth_classify_vlan_cmd { | ||
3366 | struct eth_classify_cmd_header header; | ||
3367 | __le32 reserved0; | ||
3368 | __le32 reserved1; | ||
3369 | __le16 reserved2; | ||
3370 | __le16 vlan; | ||
3371 | }; | ||
3372 | |||
3373 | /* | ||
3374 | * union for eth classification rule | ||
3375 | */ | ||
3376 | union eth_classify_rule_cmd { | ||
3377 | struct eth_classify_mac_cmd mac; | ||
3378 | struct eth_classify_vlan_cmd vlan; | ||
3379 | struct eth_classify_pair_cmd pair; | ||
3380 | }; | ||
3381 | |||
3382 | /* | ||
3383 | * parameters for eth classification configuration ramrod | ||
3384 | */ | ||
3385 | struct eth_classify_rules_ramrod_data { | ||
3386 | struct eth_classify_header header; | ||
3387 | union eth_classify_rule_cmd rules[CLASSIFY_RULES_COUNT]; | ||
2677 | }; | 3388 | }; |
2678 | 3389 | ||
2679 | 3390 | ||
@@ -2681,8 +3392,45 @@ struct client_init_ramrod_data { | |||
2681 | * The data contain client ID need to the ramrod | 3392 | * The data contain client ID need to the ramrod |
2682 | */ | 3393 | */ |
2683 | struct eth_common_ramrod_data { | 3394 | struct eth_common_ramrod_data { |
2684 | u32 client_id; | 3395 | __le32 client_id; |
2685 | u32 reserved1; | 3396 | __le32 reserved1; |
3397 | }; | ||
3398 | |||
3399 | |||
3400 | /* | ||
3401 | * The eth storm context of Ustorm | ||
3402 | */ | ||
3403 | struct ustorm_eth_st_context { | ||
3404 | u32 reserved0[52]; | ||
3405 | }; | ||
3406 | |||
3407 | /* | ||
3408 | * The eth storm context of Tstorm | ||
3409 | */ | ||
3410 | struct tstorm_eth_st_context { | ||
3411 | u32 __reserved0[28]; | ||
3412 | }; | ||
3413 | |||
3414 | /* | ||
3415 | * The eth storm context of Xstorm | ||
3416 | */ | ||
3417 | struct xstorm_eth_st_context { | ||
3418 | u32 reserved0[60]; | ||
3419 | }; | ||
3420 | |||
3421 | /* | ||
3422 | * Ethernet connection context | ||
3423 | */ | ||
3424 | struct eth_context { | ||
3425 | struct ustorm_eth_st_context ustorm_st_context; | ||
3426 | struct tstorm_eth_st_context tstorm_st_context; | ||
3427 | struct xstorm_eth_ag_context xstorm_ag_context; | ||
3428 | struct tstorm_eth_ag_context tstorm_ag_context; | ||
3429 | struct cstorm_eth_ag_context cstorm_ag_context; | ||
3430 | struct ustorm_eth_ag_context ustorm_ag_context; | ||
3431 | struct timers_block_context timers_context; | ||
3432 | struct xstorm_eth_st_context xstorm_st_context; | ||
3433 | struct cstorm_eth_st_context cstorm_st_context; | ||
2686 | }; | 3434 | }; |
2687 | 3435 | ||
2688 | 3436 | ||
@@ -2695,24 +3443,47 @@ union eth_sgl_or_raw_data { | |||
2695 | }; | 3443 | }; |
2696 | 3444 | ||
2697 | /* | 3445 | /* |
3446 | * eth FP end aggregation CQE parameters struct | ||
3447 | */ | ||
3448 | struct eth_end_agg_rx_cqe { | ||
3449 | u8 type_error_flags; | ||
3450 | #define ETH_END_AGG_RX_CQE_TYPE (0x3<<0) | ||
3451 | #define ETH_END_AGG_RX_CQE_TYPE_SHIFT 0 | ||
3452 | #define ETH_END_AGG_RX_CQE_SGL_RAW_SEL (0x1<<2) | ||
3453 | #define ETH_END_AGG_RX_CQE_SGL_RAW_SEL_SHIFT 2 | ||
3454 | #define ETH_END_AGG_RX_CQE_RESERVED0 (0x1F<<3) | ||
3455 | #define ETH_END_AGG_RX_CQE_RESERVED0_SHIFT 3 | ||
3456 | u8 reserved1; | ||
3457 | u8 queue_index; | ||
3458 | u8 reserved2; | ||
3459 | __le32 timestamp_delta; | ||
3460 | __le16 num_of_coalesced_segs; | ||
3461 | __le16 pkt_len; | ||
3462 | u8 pure_ack_count; | ||
3463 | u8 reserved3; | ||
3464 | __le16 reserved4; | ||
3465 | union eth_sgl_or_raw_data sgl_or_raw_data; | ||
3466 | __le32 reserved5[8]; | ||
3467 | }; | ||
3468 | |||
3469 | |||
3470 | /* | ||
2698 | * regular eth FP CQE parameters struct | 3471 | * regular eth FP CQE parameters struct |
2699 | */ | 3472 | */ |
2700 | struct eth_fast_path_rx_cqe { | 3473 | struct eth_fast_path_rx_cqe { |
2701 | u8 type_error_flags; | 3474 | u8 type_error_flags; |
2702 | #define ETH_FAST_PATH_RX_CQE_TYPE (0x1<<0) | 3475 | #define ETH_FAST_PATH_RX_CQE_TYPE (0x3<<0) |
2703 | #define ETH_FAST_PATH_RX_CQE_TYPE_SHIFT 0 | 3476 | #define ETH_FAST_PATH_RX_CQE_TYPE_SHIFT 0 |
2704 | #define ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG (0x1<<1) | 3477 | #define ETH_FAST_PATH_RX_CQE_SGL_RAW_SEL (0x1<<2) |
2705 | #define ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG_SHIFT 1 | 3478 | #define ETH_FAST_PATH_RX_CQE_SGL_RAW_SEL_SHIFT 2 |
2706 | #define ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG (0x1<<2) | 3479 | #define ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG (0x1<<3) |
2707 | #define ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG_SHIFT 2 | 3480 | #define ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG_SHIFT 3 |
2708 | #define ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG (0x1<<3) | 3481 | #define ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG (0x1<<4) |
2709 | #define ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG_SHIFT 3 | 3482 | #define ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG_SHIFT 4 |
2710 | #define ETH_FAST_PATH_RX_CQE_START_FLG (0x1<<4) | 3483 | #define ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG (0x1<<5) |
2711 | #define ETH_FAST_PATH_RX_CQE_START_FLG_SHIFT 4 | 3484 | #define ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG_SHIFT 5 |
2712 | #define ETH_FAST_PATH_RX_CQE_END_FLG (0x1<<5) | 3485 | #define ETH_FAST_PATH_RX_CQE_RESERVED0 (0x3<<6) |
2713 | #define ETH_FAST_PATH_RX_CQE_END_FLG_SHIFT 5 | 3486 | #define ETH_FAST_PATH_RX_CQE_RESERVED0_SHIFT 6 |
2714 | #define ETH_FAST_PATH_RX_CQE_SGL_RAW_SEL (0x3<<6) | ||
2715 | #define ETH_FAST_PATH_RX_CQE_SGL_RAW_SEL_SHIFT 6 | ||
2716 | u8 status_flags; | 3487 | u8 status_flags; |
2717 | #define ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE (0x7<<0) | 3488 | #define ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE (0x7<<0) |
2718 | #define ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE_SHIFT 0 | 3489 | #define ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE_SHIFT 0 |
@@ -2726,39 +3497,108 @@ struct eth_fast_path_rx_cqe { | |||
2726 | #define ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG_SHIFT 6 | 3497 | #define ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG_SHIFT 6 |
2727 | #define ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG (0x1<<7) | 3498 | #define ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG (0x1<<7) |
2728 | #define ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG_SHIFT 7 | 3499 | #define ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG_SHIFT 7 |
2729 | u8 placement_offset; | ||
2730 | u8 queue_index; | 3500 | u8 queue_index; |
3501 | u8 placement_offset; | ||
2731 | __le32 rss_hash_result; | 3502 | __le32 rss_hash_result; |
2732 | __le16 vlan_tag; | 3503 | __le16 vlan_tag; |
2733 | __le16 pkt_len; | 3504 | __le16 pkt_len; |
2734 | __le16 len_on_bd; | 3505 | __le16 len_on_bd; |
2735 | struct parsing_flags pars_flags; | 3506 | struct parsing_flags pars_flags; |
2736 | union eth_sgl_or_raw_data sgl_or_raw_data; | 3507 | union eth_sgl_or_raw_data sgl_or_raw_data; |
3508 | __le32 reserved1[8]; | ||
2737 | }; | 3509 | }; |
2738 | 3510 | ||
2739 | 3511 | ||
2740 | /* | 3512 | /* |
2741 | * The data for RSS setup ramrod | 3513 | * Command for setting classification flags for a client |
3514 | */ | ||
3515 | struct eth_filter_rules_cmd { | ||
3516 | u8 cmd_general_data; | ||
3517 | #define ETH_FILTER_RULES_CMD_RX_CMD (0x1<<0) | ||
3518 | #define ETH_FILTER_RULES_CMD_RX_CMD_SHIFT 0 | ||
3519 | #define ETH_FILTER_RULES_CMD_TX_CMD (0x1<<1) | ||
3520 | #define ETH_FILTER_RULES_CMD_TX_CMD_SHIFT 1 | ||
3521 | #define ETH_FILTER_RULES_CMD_RESERVED0 (0x3F<<2) | ||
3522 | #define ETH_FILTER_RULES_CMD_RESERVED0_SHIFT 2 | ||
3523 | u8 func_id; | ||
3524 | u8 client_id; | ||
3525 | u8 reserved1; | ||
3526 | __le16 state; | ||
3527 | #define ETH_FILTER_RULES_CMD_UCAST_DROP_ALL (0x1<<0) | ||
3528 | #define ETH_FILTER_RULES_CMD_UCAST_DROP_ALL_SHIFT 0 | ||
3529 | #define ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL (0x1<<1) | ||
3530 | #define ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL_SHIFT 1 | ||
3531 | #define ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED (0x1<<2) | ||
3532 | #define ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED_SHIFT 2 | ||
3533 | #define ETH_FILTER_RULES_CMD_MCAST_DROP_ALL (0x1<<3) | ||
3534 | #define ETH_FILTER_RULES_CMD_MCAST_DROP_ALL_SHIFT 3 | ||
3535 | #define ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL (0x1<<4) | ||
3536 | #define ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL_SHIFT 4 | ||
3537 | #define ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL (0x1<<5) | ||
3538 | #define ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL_SHIFT 5 | ||
3539 | #define ETH_FILTER_RULES_CMD_ACCEPT_ANY_VLAN (0x1<<6) | ||
3540 | #define ETH_FILTER_RULES_CMD_ACCEPT_ANY_VLAN_SHIFT 6 | ||
3541 | #define ETH_FILTER_RULES_CMD_RESERVED2 (0x1FF<<7) | ||
3542 | #define ETH_FILTER_RULES_CMD_RESERVED2_SHIFT 7 | ||
3543 | __le16 reserved3; | ||
3544 | struct regpair reserved4; | ||
3545 | }; | ||
3546 | |||
3547 | |||
3548 | /* | ||
3549 | * parameters for eth classification filters ramrod | ||
3550 | */ | ||
3551 | struct eth_filter_rules_ramrod_data { | ||
3552 | struct eth_classify_header header; | ||
3553 | struct eth_filter_rules_cmd rules[FILTER_RULES_COUNT]; | ||
3554 | }; | ||
3555 | |||
3556 | |||
3557 | /* | ||
3558 | * parameters for eth classification configuration ramrod | ||
3559 | */ | ||
3560 | struct eth_general_rules_ramrod_data { | ||
3561 | struct eth_classify_header header; | ||
3562 | union eth_classify_rule_cmd rules[CLASSIFY_RULES_COUNT]; | ||
3563 | }; | ||
3564 | |||
3565 | |||
3566 | /* | ||
3567 | * The data for Halt ramrod | ||
2742 | */ | 3568 | */ |
2743 | struct eth_halt_ramrod_data { | 3569 | struct eth_halt_ramrod_data { |
2744 | u32 client_id; | 3570 | __le32 client_id; |
2745 | u32 reserved0; | 3571 | __le32 reserved0; |
2746 | }; | 3572 | }; |
2747 | 3573 | ||
3574 | |||
2748 | /* | 3575 | /* |
2749 | * The data for statistics query ramrod | 3576 | * Command for setting multicast classification for a client |
2750 | */ | 3577 | */ |
2751 | struct common_query_ramrod_data { | 3578 | struct eth_multicast_rules_cmd { |
2752 | #if defined(__BIG_ENDIAN) | 3579 | u8 cmd_general_data; |
2753 | u8 reserved0; | 3580 | #define ETH_MULTICAST_RULES_CMD_RX_CMD (0x1<<0) |
2754 | u8 collect_port; | 3581 | #define ETH_MULTICAST_RULES_CMD_RX_CMD_SHIFT 0 |
2755 | u16 drv_counter; | 3582 | #define ETH_MULTICAST_RULES_CMD_TX_CMD (0x1<<1) |
2756 | #elif defined(__LITTLE_ENDIAN) | 3583 | #define ETH_MULTICAST_RULES_CMD_TX_CMD_SHIFT 1 |
2757 | u16 drv_counter; | 3584 | #define ETH_MULTICAST_RULES_CMD_IS_ADD (0x1<<2) |
2758 | u8 collect_port; | 3585 | #define ETH_MULTICAST_RULES_CMD_IS_ADD_SHIFT 2 |
2759 | u8 reserved0; | 3586 | #define ETH_MULTICAST_RULES_CMD_RESERVED0 (0x1F<<3) |
2760 | #endif | 3587 | #define ETH_MULTICAST_RULES_CMD_RESERVED0_SHIFT 3 |
2761 | u32 ctr_id_vector; | 3588 | u8 func_id; |
3589 | u8 bin_id; | ||
3590 | u8 engine_id; | ||
3591 | __le32 reserved2; | ||
3592 | struct regpair reserved3; | ||
3593 | }; | ||
3594 | |||
3595 | |||
3596 | /* | ||
3597 | * parameters for multicast classification ramrod | ||
3598 | */ | ||
3599 | struct eth_multicast_rules_ramrod_data { | ||
3600 | struct eth_classify_header header; | ||
3601 | struct eth_multicast_rules_cmd rules[MULTICAST_RULES_COUNT]; | ||
2762 | }; | 3602 | }; |
2763 | 3603 | ||
2764 | 3604 | ||
@@ -2779,16 +3619,86 @@ union eth_ramrod_data { | |||
2779 | 3619 | ||
2780 | 3620 | ||
2781 | /* | 3621 | /* |
3622 | * RSS toeplitz hash type, as reported in CQE | ||
3623 | */ | ||
3624 | enum eth_rss_hash_type { | ||
3625 | DEFAULT_HASH_TYPE, | ||
3626 | IPV4_HASH_TYPE, | ||
3627 | TCP_IPV4_HASH_TYPE, | ||
3628 | IPV6_HASH_TYPE, | ||
3629 | TCP_IPV6_HASH_TYPE, | ||
3630 | VLAN_PRI_HASH_TYPE, | ||
3631 | E1HOV_PRI_HASH_TYPE, | ||
3632 | DSCP_HASH_TYPE, | ||
3633 | MAX_ETH_RSS_HASH_TYPE | ||
3634 | }; | ||
3635 | |||
3636 | |||
3637 | /* | ||
3638 | * Ethernet RSS mode | ||
3639 | */ | ||
3640 | enum eth_rss_mode { | ||
3641 | ETH_RSS_MODE_DISABLED, | ||
3642 | ETH_RSS_MODE_REGULAR, | ||
3643 | ETH_RSS_MODE_VLAN_PRI, | ||
3644 | ETH_RSS_MODE_E1HOV_PRI, | ||
3645 | ETH_RSS_MODE_IP_DSCP, | ||
3646 | MAX_ETH_RSS_MODE | ||
3647 | }; | ||
3648 | |||
3649 | |||
3650 | /* | ||
3651 | * parameters for RSS update ramrod (E2) | ||
3652 | */ | ||
3653 | struct eth_rss_update_ramrod_data { | ||
3654 | u8 rss_engine_id; | ||
3655 | u8 capabilities; | ||
3656 | #define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_CAPABILITY (0x1<<0) | ||
3657 | #define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_CAPABILITY_SHIFT 0 | ||
3658 | #define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY (0x1<<1) | ||
3659 | #define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY_SHIFT 1 | ||
3660 | #define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_UDP_CAPABILITY (0x1<<2) | ||
3661 | #define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_UDP_CAPABILITY_SHIFT 2 | ||
3662 | #define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY (0x1<<3) | ||
3663 | #define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY_SHIFT 3 | ||
3664 | #define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY (0x1<<4) | ||
3665 | #define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY_SHIFT 4 | ||
3666 | #define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY (0x1<<5) | ||
3667 | #define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY_SHIFT 5 | ||
3668 | #define ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY (0x1<<6) | ||
3669 | #define ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY_SHIFT 6 | ||
3670 | #define __ETH_RSS_UPDATE_RAMROD_DATA_RESERVED0 (0x1<<7) | ||
3671 | #define __ETH_RSS_UPDATE_RAMROD_DATA_RESERVED0_SHIFT 7 | ||
3672 | u8 rss_result_mask; | ||
3673 | u8 rss_mode; | ||
3674 | __le32 __reserved2; | ||
3675 | u8 indirection_table[T_ETH_INDIRECTION_TABLE_SIZE]; | ||
3676 | __le32 rss_key[T_ETH_RSS_KEY]; | ||
3677 | __le32 echo; | ||
3678 | __le32 reserved3; | ||
3679 | }; | ||
3680 | |||
3681 | |||
3682 | /* | ||
3683 | * The eth Rx Buffer Descriptor | ||
3684 | */ | ||
3685 | struct eth_rx_bd { | ||
3686 | __le32 addr_lo; | ||
3687 | __le32 addr_hi; | ||
3688 | }; | ||
3689 | |||
3690 | |||
3691 | /* | ||
2782 | * Eth Rx Cqe structure- general structure for ramrods | 3692 | * Eth Rx Cqe structure- general structure for ramrods |
2783 | */ | 3693 | */ |
2784 | struct common_ramrod_eth_rx_cqe { | 3694 | struct common_ramrod_eth_rx_cqe { |
2785 | u8 ramrod_type; | 3695 | u8 ramrod_type; |
2786 | #define COMMON_RAMROD_ETH_RX_CQE_TYPE (0x1<<0) | 3696 | #define COMMON_RAMROD_ETH_RX_CQE_TYPE (0x3<<0) |
2787 | #define COMMON_RAMROD_ETH_RX_CQE_TYPE_SHIFT 0 | 3697 | #define COMMON_RAMROD_ETH_RX_CQE_TYPE_SHIFT 0 |
2788 | #define COMMON_RAMROD_ETH_RX_CQE_ERROR (0x1<<1) | 3698 | #define COMMON_RAMROD_ETH_RX_CQE_ERROR (0x1<<2) |
2789 | #define COMMON_RAMROD_ETH_RX_CQE_ERROR_SHIFT 1 | 3699 | #define COMMON_RAMROD_ETH_RX_CQE_ERROR_SHIFT 2 |
2790 | #define COMMON_RAMROD_ETH_RX_CQE_RESERVED0 (0x3F<<2) | 3700 | #define COMMON_RAMROD_ETH_RX_CQE_RESERVED0 (0x1F<<3) |
2791 | #define COMMON_RAMROD_ETH_RX_CQE_RESERVED0_SHIFT 2 | 3701 | #define COMMON_RAMROD_ETH_RX_CQE_RESERVED0_SHIFT 3 |
2792 | u8 conn_type; | 3702 | u8 conn_type; |
2793 | __le16 reserved1; | 3703 | __le16 reserved1; |
2794 | __le32 conn_and_cmd_data; | 3704 | __le32 conn_and_cmd_data; |
@@ -2797,7 +3707,8 @@ struct common_ramrod_eth_rx_cqe { | |||
2797 | #define COMMON_RAMROD_ETH_RX_CQE_CMD_ID (0xFF<<24) | 3707 | #define COMMON_RAMROD_ETH_RX_CQE_CMD_ID (0xFF<<24) |
2798 | #define COMMON_RAMROD_ETH_RX_CQE_CMD_ID_SHIFT 24 | 3708 | #define COMMON_RAMROD_ETH_RX_CQE_CMD_ID_SHIFT 24 |
2799 | struct ramrod_data protocol_data; | 3709 | struct ramrod_data protocol_data; |
2800 | __le32 reserved2[4]; | 3710 | __le32 echo; |
3711 | __le32 reserved2[11]; | ||
2801 | }; | 3712 | }; |
2802 | 3713 | ||
2803 | /* | 3714 | /* |
@@ -2806,7 +3717,7 @@ struct common_ramrod_eth_rx_cqe { | |||
2806 | struct eth_rx_cqe_next_page { | 3717 | struct eth_rx_cqe_next_page { |
2807 | __le32 addr_lo; | 3718 | __le32 addr_lo; |
2808 | __le32 addr_hi; | 3719 | __le32 addr_hi; |
2809 | __le32 reserved[6]; | 3720 | __le32 reserved[14]; |
2810 | }; | 3721 | }; |
2811 | 3722 | ||
2812 | /* | 3723 | /* |
@@ -2816,6 +3727,38 @@ union eth_rx_cqe { | |||
2816 | struct eth_fast_path_rx_cqe fast_path_cqe; | 3727 | struct eth_fast_path_rx_cqe fast_path_cqe; |
2817 | struct common_ramrod_eth_rx_cqe ramrod_cqe; | 3728 | struct common_ramrod_eth_rx_cqe ramrod_cqe; |
2818 | struct eth_rx_cqe_next_page next_page_cqe; | 3729 | struct eth_rx_cqe_next_page next_page_cqe; |
3730 | struct eth_end_agg_rx_cqe end_agg_cqe; | ||
3731 | }; | ||
3732 | |||
3733 | |||
3734 | /* | ||
3735 | * Values for RX ETH CQE type field | ||
3736 | */ | ||
3737 | enum eth_rx_cqe_type { | ||
3738 | RX_ETH_CQE_TYPE_ETH_FASTPATH, | ||
3739 | RX_ETH_CQE_TYPE_ETH_RAMROD, | ||
3740 | RX_ETH_CQE_TYPE_ETH_START_AGG, | ||
3741 | RX_ETH_CQE_TYPE_ETH_STOP_AGG, | ||
3742 | MAX_ETH_RX_CQE_TYPE | ||
3743 | }; | ||
3744 | |||
3745 | |||
3746 | /* | ||
3747 | * Type of SGL/Raw field in ETH RX fast path CQE | ||
3748 | */ | ||
3749 | enum eth_rx_fp_sel { | ||
3750 | ETH_FP_CQE_REGULAR, | ||
3751 | ETH_FP_CQE_RAW, | ||
3752 | MAX_ETH_RX_FP_SEL | ||
3753 | }; | ||
3754 | |||
3755 | |||
3756 | /* | ||
3757 | * The eth Rx SGE Descriptor | ||
3758 | */ | ||
3759 | struct eth_rx_sge { | ||
3760 | __le32 addr_lo; | ||
3761 | __le32 addr_hi; | ||
2819 | }; | 3762 | }; |
2820 | 3763 | ||
2821 | 3764 | ||
@@ -2837,14 +3780,18 @@ struct spe_hdr { | |||
2837 | }; | 3780 | }; |
2838 | 3781 | ||
2839 | /* | 3782 | /* |
2840 | * Ethernet slow path element | 3783 | * specific data for ethernet slow path element |
2841 | */ | 3784 | */ |
2842 | union eth_specific_data { | 3785 | union eth_specific_data { |
2843 | u8 protocol_data[8]; | 3786 | u8 protocol_data[8]; |
3787 | struct regpair client_update_ramrod_data; | ||
2844 | struct regpair client_init_ramrod_init_data; | 3788 | struct regpair client_init_ramrod_init_data; |
2845 | struct eth_halt_ramrod_data halt_ramrod_data; | 3789 | struct eth_halt_ramrod_data halt_ramrod_data; |
2846 | struct regpair update_data_addr; | 3790 | struct regpair update_data_addr; |
2847 | struct eth_common_ramrod_data common_ramrod_data; | 3791 | struct eth_common_ramrod_data common_ramrod_data; |
3792 | struct regpair classify_cfg_addr; | ||
3793 | struct regpair filter_cfg_addr; | ||
3794 | struct regpair mcast_cfg_addr; | ||
2848 | }; | 3795 | }; |
2849 | 3796 | ||
2850 | /* | 3797 | /* |
@@ -2857,94 +3804,202 @@ struct eth_spe { | |||
2857 | 3804 | ||
2858 | 3805 | ||
2859 | /* | 3806 | /* |
2860 | * array of 13 bds as appears in the eth xstorm context | 3807 | * Ethernet command ID for slow path elements |
2861 | */ | 3808 | */ |
2862 | struct eth_tx_bds_array { | 3809 | enum eth_spqe_cmd_id { |
2863 | union eth_tx_bd_types bds[13]; | 3810 | RAMROD_CMD_ID_ETH_UNUSED, |
3811 | RAMROD_CMD_ID_ETH_CLIENT_SETUP, | ||
3812 | RAMROD_CMD_ID_ETH_HALT, | ||
3813 | RAMROD_CMD_ID_ETH_FORWARD_SETUP, | ||
3814 | RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP, | ||
3815 | RAMROD_CMD_ID_ETH_CLIENT_UPDATE, | ||
3816 | RAMROD_CMD_ID_ETH_EMPTY, | ||
3817 | RAMROD_CMD_ID_ETH_TERMINATE, | ||
3818 | RAMROD_CMD_ID_ETH_TPA_UPDATE, | ||
3819 | RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES, | ||
3820 | RAMROD_CMD_ID_ETH_FILTER_RULES, | ||
3821 | RAMROD_CMD_ID_ETH_MULTICAST_RULES, | ||
3822 | RAMROD_CMD_ID_ETH_RSS_UPDATE, | ||
3823 | RAMROD_CMD_ID_ETH_SET_MAC, | ||
3824 | MAX_ETH_SPQE_CMD_ID | ||
2864 | }; | 3825 | }; |
2865 | 3826 | ||
2866 | 3827 | ||
2867 | /* | 3828 | /* |
2868 | * Common configuration parameters per function in Tstorm | 3829 | * eth tpa update command |
2869 | */ | 3830 | */ |
2870 | struct tstorm_eth_function_common_config { | 3831 | enum eth_tpa_update_command { |
2871 | #if defined(__BIG_ENDIAN) | 3832 | TPA_UPDATE_NONE_COMMAND, |
2872 | u8 reserved1; | 3833 | TPA_UPDATE_ENABLE_COMMAND, |
2873 | u8 rss_result_mask; | 3834 | TPA_UPDATE_DISABLE_COMMAND, |
2874 | u16 config_flags; | 3835 | MAX_ETH_TPA_UPDATE_COMMAND |
2875 | #define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY (0x1<<0) | ||
2876 | #define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY_SHIFT 0 | ||
2877 | #define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_TCP_CAPABILITY (0x1<<1) | ||
2878 | #define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_TCP_CAPABILITY_SHIFT 1 | ||
2879 | #define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_CAPABILITY (0x1<<2) | ||
2880 | #define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_CAPABILITY_SHIFT 2 | ||
2881 | #define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_TCP_CAPABILITY (0x1<<3) | ||
2882 | #define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_TCP_CAPABILITY_SHIFT 3 | ||
2883 | #define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE (0x7<<4) | ||
2884 | #define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE_SHIFT 4 | ||
2885 | #define TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA (0x1<<7) | ||
2886 | #define TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA_SHIFT 7 | ||
2887 | #define TSTORM_ETH_FUNCTION_COMMON_CONFIG_VLAN_FILTERING_ENABLE (0x1<<8) | ||
2888 | #define TSTORM_ETH_FUNCTION_COMMON_CONFIG_VLAN_FILTERING_ENABLE_SHIFT 8 | ||
2889 | #define __TSTORM_ETH_FUNCTION_COMMON_CONFIG_RESERVED0 (0x7F<<9) | ||
2890 | #define __TSTORM_ETH_FUNCTION_COMMON_CONFIG_RESERVED0_SHIFT 9 | ||
2891 | #elif defined(__LITTLE_ENDIAN) | ||
2892 | u16 config_flags; | ||
2893 | #define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY (0x1<<0) | ||
2894 | #define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY_SHIFT 0 | ||
2895 | #define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_TCP_CAPABILITY (0x1<<1) | ||
2896 | #define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_TCP_CAPABILITY_SHIFT 1 | ||
2897 | #define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_CAPABILITY (0x1<<2) | ||
2898 | #define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_CAPABILITY_SHIFT 2 | ||
2899 | #define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_TCP_CAPABILITY (0x1<<3) | ||
2900 | #define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_TCP_CAPABILITY_SHIFT 3 | ||
2901 | #define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE (0x7<<4) | ||
2902 | #define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE_SHIFT 4 | ||
2903 | #define TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA (0x1<<7) | ||
2904 | #define TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA_SHIFT 7 | ||
2905 | #define TSTORM_ETH_FUNCTION_COMMON_CONFIG_VLAN_FILTERING_ENABLE (0x1<<8) | ||
2906 | #define TSTORM_ETH_FUNCTION_COMMON_CONFIG_VLAN_FILTERING_ENABLE_SHIFT 8 | ||
2907 | #define __TSTORM_ETH_FUNCTION_COMMON_CONFIG_RESERVED0 (0x7F<<9) | ||
2908 | #define __TSTORM_ETH_FUNCTION_COMMON_CONFIG_RESERVED0_SHIFT 9 | ||
2909 | u8 rss_result_mask; | ||
2910 | u8 reserved1; | ||
2911 | #endif | ||
2912 | u16 vlan_id[2]; | ||
2913 | }; | 3836 | }; |
2914 | 3837 | ||
3838 | |||
2915 | /* | 3839 | /* |
2916 | * RSS idirection table update configuration | 3840 | * Tx regular BD structure |
2917 | */ | 3841 | */ |
2918 | struct rss_update_config { | 3842 | struct eth_tx_bd { |
2919 | #if defined(__BIG_ENDIAN) | 3843 | __le32 addr_lo; |
2920 | u16 toe_rss_bitmap; | 3844 | __le32 addr_hi; |
2921 | u16 flags; | 3845 | __le16 total_pkt_bytes; |
2922 | #define RSS_UPDATE_CONFIG_ETH_UPDATE_ENABLE (0x1<<0) | 3846 | __le16 nbytes; |
2923 | #define RSS_UPDATE_CONFIG_ETH_UPDATE_ENABLE_SHIFT 0 | 3847 | u8 reserved[4]; |
2924 | #define RSS_UPDATE_CONFIG_TOE_UPDATE_ENABLE (0x1<<1) | 3848 | }; |
2925 | #define RSS_UPDATE_CONFIG_TOE_UPDATE_ENABLE_SHIFT 1 | 3849 | |
2926 | #define __RSS_UPDATE_CONFIG_RESERVED0 (0x3FFF<<2) | 3850 | |
2927 | #define __RSS_UPDATE_CONFIG_RESERVED0_SHIFT 2 | 3851 | /* |
2928 | #elif defined(__LITTLE_ENDIAN) | 3852 | * structure for easy accessibility to assembler |
2929 | u16 flags; | 3853 | */ |
2930 | #define RSS_UPDATE_CONFIG_ETH_UPDATE_ENABLE (0x1<<0) | 3854 | struct eth_tx_bd_flags { |
2931 | #define RSS_UPDATE_CONFIG_ETH_UPDATE_ENABLE_SHIFT 0 | 3855 | u8 as_bitfield; |
2932 | #define RSS_UPDATE_CONFIG_TOE_UPDATE_ENABLE (0x1<<1) | 3856 | #define ETH_TX_BD_FLAGS_IP_CSUM (0x1<<0) |
2933 | #define RSS_UPDATE_CONFIG_TOE_UPDATE_ENABLE_SHIFT 1 | 3857 | #define ETH_TX_BD_FLAGS_IP_CSUM_SHIFT 0 |
2934 | #define __RSS_UPDATE_CONFIG_RESERVED0 (0x3FFF<<2) | 3858 | #define ETH_TX_BD_FLAGS_L4_CSUM (0x1<<1) |
2935 | #define __RSS_UPDATE_CONFIG_RESERVED0_SHIFT 2 | 3859 | #define ETH_TX_BD_FLAGS_L4_CSUM_SHIFT 1 |
2936 | u16 toe_rss_bitmap; | 3860 | #define ETH_TX_BD_FLAGS_VLAN_MODE (0x3<<2) |
2937 | #endif | 3861 | #define ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT 2 |
2938 | u32 reserved1; | 3862 | #define ETH_TX_BD_FLAGS_START_BD (0x1<<4) |
3863 | #define ETH_TX_BD_FLAGS_START_BD_SHIFT 4 | ||
3864 | #define ETH_TX_BD_FLAGS_IS_UDP (0x1<<5) | ||
3865 | #define ETH_TX_BD_FLAGS_IS_UDP_SHIFT 5 | ||
3866 | #define ETH_TX_BD_FLAGS_SW_LSO (0x1<<6) | ||
3867 | #define ETH_TX_BD_FLAGS_SW_LSO_SHIFT 6 | ||
3868 | #define ETH_TX_BD_FLAGS_IPV6 (0x1<<7) | ||
3869 | #define ETH_TX_BD_FLAGS_IPV6_SHIFT 7 | ||
3870 | }; | ||
3871 | |||
3872 | /* | ||
3873 | * The eth Tx Buffer Descriptor | ||
3874 | */ | ||
3875 | struct eth_tx_start_bd { | ||
3876 | __le32 addr_lo; | ||
3877 | __le32 addr_hi; | ||
3878 | __le16 nbd; | ||
3879 | __le16 nbytes; | ||
3880 | __le16 vlan_or_ethertype; | ||
3881 | struct eth_tx_bd_flags bd_flags; | ||
3882 | u8 general_data; | ||
3883 | #define ETH_TX_START_BD_HDR_NBDS (0xF<<0) | ||
3884 | #define ETH_TX_START_BD_HDR_NBDS_SHIFT 0 | ||
3885 | #define ETH_TX_START_BD_FORCE_VLAN_MODE (0x1<<4) | ||
3886 | #define ETH_TX_START_BD_FORCE_VLAN_MODE_SHIFT 4 | ||
3887 | #define ETH_TX_START_BD_RESREVED (0x1<<5) | ||
3888 | #define ETH_TX_START_BD_RESREVED_SHIFT 5 | ||
3889 | #define ETH_TX_START_BD_ETH_ADDR_TYPE (0x3<<6) | ||
3890 | #define ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT 6 | ||
3891 | }; | ||
3892 | |||
3893 | /* | ||
3894 | * Tx parsing BD structure for ETH E1/E1h | ||
3895 | */ | ||
3896 | struct eth_tx_parse_bd_e1x { | ||
3897 | u8 global_data; | ||
3898 | #define ETH_TX_PARSE_BD_E1X_IP_HDR_START_OFFSET_W (0xF<<0) | ||
3899 | #define ETH_TX_PARSE_BD_E1X_IP_HDR_START_OFFSET_W_SHIFT 0 | ||
3900 | #define ETH_TX_PARSE_BD_E1X_RESERVED0 (0x1<<4) | ||
3901 | #define ETH_TX_PARSE_BD_E1X_RESERVED0_SHIFT 4 | ||
3902 | #define ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN (0x1<<5) | ||
3903 | #define ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN_SHIFT 5 | ||
3904 | #define ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN (0x1<<6) | ||
3905 | #define ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT 6 | ||
3906 | #define ETH_TX_PARSE_BD_E1X_NS_FLG (0x1<<7) | ||
3907 | #define ETH_TX_PARSE_BD_E1X_NS_FLG_SHIFT 7 | ||
3908 | u8 tcp_flags; | ||
3909 | #define ETH_TX_PARSE_BD_E1X_FIN_FLG (0x1<<0) | ||
3910 | #define ETH_TX_PARSE_BD_E1X_FIN_FLG_SHIFT 0 | ||
3911 | #define ETH_TX_PARSE_BD_E1X_SYN_FLG (0x1<<1) | ||
3912 | #define ETH_TX_PARSE_BD_E1X_SYN_FLG_SHIFT 1 | ||
3913 | #define ETH_TX_PARSE_BD_E1X_RST_FLG (0x1<<2) | ||
3914 | #define ETH_TX_PARSE_BD_E1X_RST_FLG_SHIFT 2 | ||
3915 | #define ETH_TX_PARSE_BD_E1X_PSH_FLG (0x1<<3) | ||
3916 | #define ETH_TX_PARSE_BD_E1X_PSH_FLG_SHIFT 3 | ||
3917 | #define ETH_TX_PARSE_BD_E1X_ACK_FLG (0x1<<4) | ||
3918 | #define ETH_TX_PARSE_BD_E1X_ACK_FLG_SHIFT 4 | ||
3919 | #define ETH_TX_PARSE_BD_E1X_URG_FLG (0x1<<5) | ||
3920 | #define ETH_TX_PARSE_BD_E1X_URG_FLG_SHIFT 5 | ||
3921 | #define ETH_TX_PARSE_BD_E1X_ECE_FLG (0x1<<6) | ||
3922 | #define ETH_TX_PARSE_BD_E1X_ECE_FLG_SHIFT 6 | ||
3923 | #define ETH_TX_PARSE_BD_E1X_CWR_FLG (0x1<<7) | ||
3924 | #define ETH_TX_PARSE_BD_E1X_CWR_FLG_SHIFT 7 | ||
3925 | u8 ip_hlen_w; | ||
3926 | s8 reserved; | ||
3927 | __le16 total_hlen_w; | ||
3928 | __le16 tcp_pseudo_csum; | ||
3929 | __le16 lso_mss; | ||
3930 | __le16 ip_id; | ||
3931 | __le32 tcp_send_seq; | ||
3932 | }; | ||
3933 | |||
3934 | /* | ||
3935 | * Tx parsing BD structure for ETH E2 | ||
3936 | */ | ||
3937 | struct eth_tx_parse_bd_e2 { | ||
3938 | __le16 dst_mac_addr_lo; | ||
3939 | __le16 dst_mac_addr_mid; | ||
3940 | __le16 dst_mac_addr_hi; | ||
3941 | __le16 src_mac_addr_lo; | ||
3942 | __le16 src_mac_addr_mid; | ||
3943 | __le16 src_mac_addr_hi; | ||
3944 | __le32 parsing_data; | ||
3945 | #define ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W (0x1FFF<<0) | ||
3946 | #define ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT 0 | ||
3947 | #define ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW (0xF<<13) | ||
3948 | #define ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT 13 | ||
3949 | #define ETH_TX_PARSE_BD_E2_LSO_MSS (0x3FFF<<17) | ||
3950 | #define ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT 17 | ||
3951 | #define ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR (0x1<<31) | ||
3952 | #define ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR_SHIFT 31 | ||
3953 | }; | ||
3954 | |||
3955 | /* | ||
3956 | * The last BD in the BD memory will hold a pointer to the next BD memory | ||
3957 | */ | ||
3958 | struct eth_tx_next_bd { | ||
3959 | __le32 addr_lo; | ||
3960 | __le32 addr_hi; | ||
3961 | u8 reserved[8]; | ||
3962 | }; | ||
3963 | |||
3964 | /* | ||
3965 | * union for 4 Bd types | ||
3966 | */ | ||
3967 | union eth_tx_bd_types { | ||
3968 | struct eth_tx_start_bd start_bd; | ||
3969 | struct eth_tx_bd reg_bd; | ||
3970 | struct eth_tx_parse_bd_e1x parse_bd_e1x; | ||
3971 | struct eth_tx_parse_bd_e2 parse_bd_e2; | ||
3972 | struct eth_tx_next_bd next_bd; | ||
3973 | }; | ||
3974 | |||
3975 | /* | ||
3976 | * array of 13 bds as appears in the eth xstorm context | ||
3977 | */ | ||
3978 | struct eth_tx_bds_array { | ||
3979 | union eth_tx_bd_types bds[13]; | ||
2939 | }; | 3980 | }; |
2940 | 3981 | ||
3982 | |||
2941 | /* | 3983 | /* |
2942 | * parameters for eth update ramrod | 3984 | * VLAN mode on TX BDs |
2943 | */ | 3985 | */ |
2944 | struct eth_update_ramrod_data { | 3986 | enum eth_tx_vlan_type { |
2945 | struct tstorm_eth_function_common_config func_config; | 3987 | X_ETH_NO_VLAN, |
2946 | u8 indirectionTable[128]; | 3988 | X_ETH_OUTBAND_VLAN, |
2947 | struct rss_update_config rss_config; | 3989 | X_ETH_INBAND_VLAN, |
3990 | X_ETH_FW_ADDED_VLAN, | ||
3991 | MAX_ETH_TX_VLAN_TYPE | ||
3992 | }; | ||
3993 | |||
3994 | |||
3995 | /* | ||
3996 | * Ethernet VLAN filtering mode in E1x | ||
3997 | */ | ||
3998 | enum eth_vlan_filter_mode { | ||
3999 | ETH_VLAN_FILTER_ANY_VLAN, | ||
4000 | ETH_VLAN_FILTER_SPECIFIC_VLAN, | ||
4001 | ETH_VLAN_FILTER_CLASSIFY, | ||
4002 | MAX_ETH_VLAN_FILTER_MODE | ||
2948 | }; | 4003 | }; |
2949 | 4004 | ||
2950 | 4005 | ||
@@ -2954,9 +4009,8 @@ struct eth_update_ramrod_data { | |||
2954 | struct mac_configuration_hdr { | 4009 | struct mac_configuration_hdr { |
2955 | u8 length; | 4010 | u8 length; |
2956 | u8 offset; | 4011 | u8 offset; |
2957 | u16 client_id; | 4012 | __le16 client_id; |
2958 | u16 echo; | 4013 | __le32 echo; |
2959 | u16 reserved1; | ||
2960 | }; | 4014 | }; |
2961 | 4015 | ||
2962 | /* | 4016 | /* |
@@ -2981,8 +4035,8 @@ struct mac_configuration_entry { | |||
2981 | #define MAC_CONFIGURATION_ENTRY_BROADCAST_SHIFT 5 | 4035 | #define MAC_CONFIGURATION_ENTRY_BROADCAST_SHIFT 5 |
2982 | #define MAC_CONFIGURATION_ENTRY_RESERVED1 (0x3<<6) | 4036 | #define MAC_CONFIGURATION_ENTRY_RESERVED1 (0x3<<6) |
2983 | #define MAC_CONFIGURATION_ENTRY_RESERVED1_SHIFT 6 | 4037 | #define MAC_CONFIGURATION_ENTRY_RESERVED1_SHIFT 6 |
2984 | u16 reserved0; | 4038 | __le16 reserved0; |
2985 | u32 clients_bit_vector; | 4039 | __le32 clients_bit_vector; |
2986 | }; | 4040 | }; |
2987 | 4041 | ||
2988 | /* | 4042 | /* |
@@ -2995,6 +4049,36 @@ struct mac_configuration_cmd { | |||
2995 | 4049 | ||
2996 | 4050 | ||
2997 | /* | 4051 | /* |
4052 | * Set-MAC command type (in E1x) | ||
4053 | */ | ||
4054 | enum set_mac_action_type { | ||
4055 | T_ETH_MAC_COMMAND_INVALIDATE, | ||
4056 | T_ETH_MAC_COMMAND_SET, | ||
4057 | MAX_SET_MAC_ACTION_TYPE | ||
4058 | }; | ||
4059 | |||
4060 | |||
4061 | /* | ||
4062 | * tpa update ramrod data | ||
4063 | */ | ||
4064 | struct tpa_update_ramrod_data { | ||
4065 | u8 update_ipv4; | ||
4066 | u8 update_ipv6; | ||
4067 | u8 client_id; | ||
4068 | u8 max_tpa_queues; | ||
4069 | u8 max_sges_for_packet; | ||
4070 | u8 complete_on_both_clients; | ||
4071 | __le16 reserved1; | ||
4072 | __le16 sge_buff_size; | ||
4073 | __le16 max_agg_size; | ||
4074 | __le32 sge_page_base_lo; | ||
4075 | __le32 sge_page_base_hi; | ||
4076 | __le16 sge_pause_thr_low; | ||
4077 | __le16 sge_pause_thr_high; | ||
4078 | }; | ||
4079 | |||
4080 | |||
4081 | /* | ||
2998 | * approximate-match multicast filtering for E1H per function in Tstorm | 4082 | * approximate-match multicast filtering for E1H per function in Tstorm |
2999 | */ | 4083 | */ |
3000 | struct tstorm_eth_approximate_match_multicast_filtering { | 4084 | struct tstorm_eth_approximate_match_multicast_filtering { |
@@ -3003,35 +4087,50 @@ struct tstorm_eth_approximate_match_multicast_filtering { | |||
3003 | 4087 | ||
3004 | 4088 | ||
3005 | /* | 4089 | /* |
4090 | * Common configuration parameters per function in Tstorm | ||
4091 | */ | ||
4092 | struct tstorm_eth_function_common_config { | ||
4093 | __le16 config_flags; | ||
4094 | #define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY (0x1<<0) | ||
4095 | #define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY_SHIFT 0 | ||
4096 | #define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_TCP_CAPABILITY (0x1<<1) | ||
4097 | #define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_TCP_CAPABILITY_SHIFT 1 | ||
4098 | #define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_CAPABILITY (0x1<<2) | ||
4099 | #define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_CAPABILITY_SHIFT 2 | ||
4100 | #define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_TCP_CAPABILITY (0x1<<3) | ||
4101 | #define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_TCP_CAPABILITY_SHIFT 3 | ||
4102 | #define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE (0x7<<4) | ||
4103 | #define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE_SHIFT 4 | ||
4104 | #define TSTORM_ETH_FUNCTION_COMMON_CONFIG_VLAN_FILTERING_ENABLE (0x1<<7) | ||
4105 | #define TSTORM_ETH_FUNCTION_COMMON_CONFIG_VLAN_FILTERING_ENABLE_SHIFT 7 | ||
4106 | #define __TSTORM_ETH_FUNCTION_COMMON_CONFIG_RESERVED0 (0xFF<<8) | ||
4107 | #define __TSTORM_ETH_FUNCTION_COMMON_CONFIG_RESERVED0_SHIFT 8 | ||
4108 | u8 rss_result_mask; | ||
4109 | u8 reserved1; | ||
4110 | __le16 vlan_id[2]; | ||
4111 | }; | ||
4112 | |||
4113 | |||
4114 | /* | ||
3006 | * MAC filtering configuration parameters per port in Tstorm | 4115 | * MAC filtering configuration parameters per port in Tstorm |
3007 | */ | 4116 | */ |
3008 | struct tstorm_eth_mac_filter_config { | 4117 | struct tstorm_eth_mac_filter_config { |
3009 | u32 ucast_drop_all; | 4118 | __le32 ucast_drop_all; |
3010 | u32 ucast_accept_all; | 4119 | __le32 ucast_accept_all; |
3011 | u32 mcast_drop_all; | 4120 | __le32 mcast_drop_all; |
3012 | u32 mcast_accept_all; | 4121 | __le32 mcast_accept_all; |
3013 | u32 bcast_drop_all; | 4122 | __le32 bcast_accept_all; |
3014 | u32 bcast_accept_all; | 4123 | __le32 vlan_filter[2]; |
3015 | u32 vlan_filter[2]; | 4124 | __le32 unmatched_unicast; |
3016 | u32 unmatched_unicast; | ||
3017 | u32 reserved; | ||
3018 | }; | 4125 | }; |
3019 | 4126 | ||
3020 | 4127 | ||
3021 | /* | 4128 | /* |
3022 | * common flag to indicate existence of TPA. | 4129 | * tx only queue init ramrod data |
3023 | */ | 4130 | */ |
3024 | struct tstorm_eth_tpa_exist { | 4131 | struct tx_queue_init_ramrod_data { |
3025 | #if defined(__BIG_ENDIAN) | 4132 | struct client_init_general_data general; |
3026 | u16 reserved1; | 4133 | struct client_init_tx_data tx; |
3027 | u8 reserved0; | ||
3028 | u8 tpa_exist; | ||
3029 | #elif defined(__LITTLE_ENDIAN) | ||
3030 | u8 tpa_exist; | ||
3031 | u8 reserved0; | ||
3032 | u16 reserved1; | ||
3033 | #endif | ||
3034 | u32 reserved2; | ||
3035 | }; | 4134 | }; |
3036 | 4135 | ||
3037 | 4136 | ||
@@ -3061,10 +4160,8 @@ struct ustorm_eth_rx_producers { | |||
3061 | */ | 4160 | */ |
3062 | struct cfc_del_event_data { | 4161 | struct cfc_del_event_data { |
3063 | u32 cid; | 4162 | u32 cid; |
3064 | u8 error; | 4163 | u32 reserved0; |
3065 | u8 reserved0; | 4164 | u32 reserved1; |
3066 | u16 reserved1; | ||
3067 | u32 reserved2; | ||
3068 | }; | 4165 | }; |
3069 | 4166 | ||
3070 | 4167 | ||
@@ -3072,22 +4169,18 @@ struct cfc_del_event_data { | |||
3072 | * per-port SAFC demo variables | 4169 | * per-port SAFC demo variables |
3073 | */ | 4170 | */ |
3074 | struct cmng_flags_per_port { | 4171 | struct cmng_flags_per_port { |
3075 | u8 con_number[NUM_OF_PROTOCOLS]; | ||
3076 | u32 cmng_enables; | 4172 | u32 cmng_enables; |
3077 | #define CMNG_FLAGS_PER_PORT_FAIRNESS_VN (0x1<<0) | 4173 | #define CMNG_FLAGS_PER_PORT_FAIRNESS_VN (0x1<<0) |
3078 | #define CMNG_FLAGS_PER_PORT_FAIRNESS_VN_SHIFT 0 | 4174 | #define CMNG_FLAGS_PER_PORT_FAIRNESS_VN_SHIFT 0 |
3079 | #define CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN (0x1<<1) | 4175 | #define CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN (0x1<<1) |
3080 | #define CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN_SHIFT 1 | 4176 | #define CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN_SHIFT 1 |
3081 | #define CMNG_FLAGS_PER_PORT_FAIRNESS_PROTOCOL (0x1<<2) | 4177 | #define CMNG_FLAGS_PER_PORT_FAIRNESS_COS (0x1<<2) |
3082 | #define CMNG_FLAGS_PER_PORT_FAIRNESS_PROTOCOL_SHIFT 2 | 4178 | #define CMNG_FLAGS_PER_PORT_FAIRNESS_COS_SHIFT 2 |
3083 | #define CMNG_FLAGS_PER_PORT_RATE_SHAPING_PROTOCOL (0x1<<3) | 4179 | #define CMNG_FLAGS_PER_PORT_FAIRNESS_COS_MODE (0x1<<3) |
3084 | #define CMNG_FLAGS_PER_PORT_RATE_SHAPING_PROTOCOL_SHIFT 3 | 4180 | #define CMNG_FLAGS_PER_PORT_FAIRNESS_COS_MODE_SHIFT 3 |
3085 | #define CMNG_FLAGS_PER_PORT_FAIRNESS_COS (0x1<<4) | 4181 | #define __CMNG_FLAGS_PER_PORT_RESERVED0 (0xFFFFFFF<<4) |
3086 | #define CMNG_FLAGS_PER_PORT_FAIRNESS_COS_SHIFT 4 | 4182 | #define __CMNG_FLAGS_PER_PORT_RESERVED0_SHIFT 4 |
3087 | #define CMNG_FLAGS_PER_PORT_FAIRNESS_COS_MODE (0x1<<5) | 4183 | u32 __reserved1; |
3088 | #define CMNG_FLAGS_PER_PORT_FAIRNESS_COS_MODE_SHIFT 5 | ||
3089 | #define __CMNG_FLAGS_PER_PORT_RESERVED0 (0x3FFFFFF<<6) | ||
3090 | #define __CMNG_FLAGS_PER_PORT_RESERVED0_SHIFT 6 | ||
3091 | }; | 4184 | }; |
3092 | 4185 | ||
3093 | 4186 | ||
@@ -3106,6 +4199,7 @@ struct fairness_vars_per_port { | |||
3106 | u32 upper_bound; | 4199 | u32 upper_bound; |
3107 | u32 fair_threshold; | 4200 | u32 fair_threshold; |
3108 | u32 fairness_timeout; | 4201 | u32 fairness_timeout; |
4202 | u32 reserved0; | ||
3109 | }; | 4203 | }; |
3110 | 4204 | ||
3111 | /* | 4205 | /* |
@@ -3122,65 +4216,65 @@ struct safc_struct_per_port { | |||
3122 | u16 __reserved1; | 4216 | u16 __reserved1; |
3123 | #endif | 4217 | #endif |
3124 | u8 cos_to_traffic_types[MAX_COS_NUMBER]; | 4218 | u8 cos_to_traffic_types[MAX_COS_NUMBER]; |
3125 | u32 __reserved2; | ||
3126 | u16 cos_to_pause_mask[NUM_OF_SAFC_BITS]; | 4219 | u16 cos_to_pause_mask[NUM_OF_SAFC_BITS]; |
3127 | }; | 4220 | }; |
3128 | 4221 | ||
3129 | /* | 4222 | /* |
3130 | * per-port PFC variables | 4223 | * Per-port congestion management variables |
3131 | */ | 4224 | */ |
3132 | struct pfc_struct_per_port { | 4225 | struct cmng_struct_per_port { |
3133 | u8 priority_to_traffic_types[MAX_PFC_PRIORITIES]; | 4226 | struct rate_shaping_vars_per_port rs_vars; |
3134 | #if defined(__BIG_ENDIAN) | 4227 | struct fairness_vars_per_port fair_vars; |
3135 | u16 pfc_pause_quanta_in_nanosec; | 4228 | struct safc_struct_per_port safc_vars; |
3136 | u8 __reserved0; | 4229 | struct cmng_flags_per_port flags; |
3137 | u8 priority_non_pausable_mask; | ||
3138 | #elif defined(__LITTLE_ENDIAN) | ||
3139 | u8 priority_non_pausable_mask; | ||
3140 | u8 __reserved0; | ||
3141 | u16 pfc_pause_quanta_in_nanosec; | ||
3142 | #endif | ||
3143 | }; | 4230 | }; |
3144 | 4231 | ||
4232 | |||
3145 | /* | 4233 | /* |
3146 | * Priority and cos | 4234 | * Protocol-common command ID for slow path elements |
3147 | */ | 4235 | */ |
3148 | struct priority_cos { | 4236 | enum common_spqe_cmd_id { |
3149 | #if defined(__BIG_ENDIAN) | 4237 | RAMROD_CMD_ID_COMMON_UNUSED, |
3150 | u16 reserved1; | 4238 | RAMROD_CMD_ID_COMMON_FUNCTION_START, |
3151 | u8 cos; | 4239 | RAMROD_CMD_ID_COMMON_FUNCTION_STOP, |
3152 | u8 priority; | 4240 | RAMROD_CMD_ID_COMMON_CFC_DEL, |
3153 | #elif defined(__LITTLE_ENDIAN) | 4241 | RAMROD_CMD_ID_COMMON_CFC_DEL_WB, |
3154 | u8 priority; | 4242 | RAMROD_CMD_ID_COMMON_STAT_QUERY, |
3155 | u8 cos; | 4243 | RAMROD_CMD_ID_COMMON_STOP_TRAFFIC, |
3156 | u16 reserved1; | 4244 | RAMROD_CMD_ID_COMMON_START_TRAFFIC, |
3157 | #endif | 4245 | RAMROD_CMD_ID_COMMON_RESERVED1, |
3158 | u32 reserved2; | 4246 | RAMROD_CMD_ID_COMMON_RESERVED2, |
4247 | MAX_COMMON_SPQE_CMD_ID | ||
3159 | }; | 4248 | }; |
3160 | 4249 | ||
4250 | |||
3161 | /* | 4251 | /* |
3162 | * Per-port congestion management variables | 4252 | * Per-protocol connection types |
3163 | */ | 4253 | */ |
3164 | struct cmng_struct_per_port { | 4254 | enum connection_type { |
3165 | struct rate_shaping_vars_per_port rs_vars; | 4255 | ETH_CONNECTION_TYPE, |
3166 | struct fairness_vars_per_port fair_vars; | 4256 | TOE_CONNECTION_TYPE, |
3167 | struct safc_struct_per_port safc_vars; | 4257 | RDMA_CONNECTION_TYPE, |
3168 | struct pfc_struct_per_port pfc_vars; | 4258 | ISCSI_CONNECTION_TYPE, |
3169 | #if defined(__BIG_ENDIAN) | 4259 | FCOE_CONNECTION_TYPE, |
3170 | u16 __reserved1; | 4260 | RESERVED_CONNECTION_TYPE_0, |
3171 | u8 dcb_enabled; | 4261 | RESERVED_CONNECTION_TYPE_1, |
3172 | u8 llfc_mode; | 4262 | RESERVED_CONNECTION_TYPE_2, |
3173 | #elif defined(__LITTLE_ENDIAN) | 4263 | NONE_CONNECTION_TYPE, |
3174 | u8 llfc_mode; | 4264 | MAX_CONNECTION_TYPE |
3175 | u8 dcb_enabled; | ||
3176 | u16 __reserved1; | ||
3177 | #endif | ||
3178 | struct priority_cos | ||
3179 | traffic_type_to_priority_cos[MAX_PFC_TRAFFIC_TYPES]; | ||
3180 | struct cmng_flags_per_port flags; | ||
3181 | }; | 4265 | }; |
3182 | 4266 | ||
3183 | 4267 | ||
4268 | /* | ||
4269 | * Cos modes | ||
4270 | */ | ||
4271 | enum cos_mode { | ||
4272 | OVERRIDE_COS, | ||
4273 | STATIC_COS, | ||
4274 | FW_WRR, | ||
4275 | MAX_COS_MODE | ||
4276 | }; | ||
4277 | |||
3184 | 4278 | ||
3185 | /* | 4279 | /* |
3186 | * Dynamic HC counters set by the driver | 4280 | * Dynamic HC counters set by the driver |
@@ -3197,126 +4291,174 @@ struct cstorm_queue_zone_data { | |||
3197 | struct regpair reserved[2]; | 4291 | struct regpair reserved[2]; |
3198 | }; | 4292 | }; |
3199 | 4293 | ||
4294 | |||
3200 | /* | 4295 | /* |
3201 | * Dynamic host coalescing init parameters | 4296 | * Vf-PF channel data in cstorm ram (non-triggered zone) |
3202 | */ | 4297 | */ |
3203 | struct dynamic_hc_config { | 4298 | struct vf_pf_channel_zone_data { |
3204 | u32 threshold[3]; | 4299 | u32 msg_addr_lo; |
3205 | u8 shift_per_protocol[HC_SB_MAX_DYNAMIC_INDICES]; | 4300 | u32 msg_addr_hi; |
3206 | u8 hc_timeout0[HC_SB_MAX_DYNAMIC_INDICES]; | ||
3207 | u8 hc_timeout1[HC_SB_MAX_DYNAMIC_INDICES]; | ||
3208 | u8 hc_timeout2[HC_SB_MAX_DYNAMIC_INDICES]; | ||
3209 | u8 hc_timeout3[HC_SB_MAX_DYNAMIC_INDICES]; | ||
3210 | }; | 4301 | }; |
3211 | 4302 | ||
3212 | |||
3213 | /* | 4303 | /* |
3214 | * Protocol-common statistics collected by the Xstorm (per client) | 4304 | * zone for VF non-triggered data |
3215 | */ | 4305 | */ |
3216 | struct xstorm_per_client_stats { | 4306 | struct non_trigger_vf_zone { |
3217 | __le32 reserved0; | 4307 | struct vf_pf_channel_zone_data vf_pf_channel; |
3218 | __le32 unicast_pkts_sent; | ||
3219 | struct regpair unicast_bytes_sent; | ||
3220 | struct regpair multicast_bytes_sent; | ||
3221 | __le32 multicast_pkts_sent; | ||
3222 | __le32 broadcast_pkts_sent; | ||
3223 | struct regpair broadcast_bytes_sent; | ||
3224 | __le16 stats_counter; | ||
3225 | __le16 reserved1; | ||
3226 | __le32 reserved2; | ||
3227 | }; | 4308 | }; |
3228 | 4309 | ||
3229 | /* | 4310 | /* |
3230 | * Common statistics collected by the Xstorm (per port) | 4311 | * Vf-PF channel trigger zone in cstorm ram |
3231 | */ | 4312 | */ |
3232 | struct xstorm_common_stats { | 4313 | struct vf_pf_channel_zone_trigger { |
3233 | struct xstorm_per_client_stats client_statistics[MAX_STAT_COUNTER_ID]; | 4314 | u8 addr_valid; |
3234 | }; | 4315 | }; |
3235 | 4316 | ||
3236 | /* | 4317 | /* |
3237 | * Protocol-common statistics collected by the Tstorm (per port) | 4318 | * zone that triggers the in-bound interrupt |
3238 | */ | 4319 | */ |
3239 | struct tstorm_per_port_stats { | 4320 | struct trigger_vf_zone { |
3240 | __le32 mac_filter_discard; | 4321 | #if defined(__BIG_ENDIAN) |
3241 | __le32 xxoverflow_discard; | 4322 | u16 reserved1; |
3242 | __le32 brb_truncate_discard; | 4323 | u8 reserved0; |
3243 | __le32 mac_discard; | 4324 | struct vf_pf_channel_zone_trigger vf_pf_channel; |
4325 | #elif defined(__LITTLE_ENDIAN) | ||
4326 | struct vf_pf_channel_zone_trigger vf_pf_channel; | ||
4327 | u8 reserved0; | ||
4328 | u16 reserved1; | ||
4329 | #endif | ||
4330 | u32 reserved2; | ||
3244 | }; | 4331 | }; |
3245 | 4332 | ||
3246 | /* | 4333 | /* |
3247 | * Protocol-common statistics collected by the Tstorm (per client) | 4334 | * zone B per-VF data |
3248 | */ | 4335 | */ |
3249 | struct tstorm_per_client_stats { | 4336 | struct cstorm_vf_zone_data { |
3250 | struct regpair rcv_unicast_bytes; | 4337 | struct non_trigger_vf_zone non_trigger; |
3251 | struct regpair rcv_broadcast_bytes; | 4338 | struct trigger_vf_zone trigger; |
3252 | struct regpair rcv_multicast_bytes; | ||
3253 | struct regpair rcv_error_bytes; | ||
3254 | __le32 checksum_discard; | ||
3255 | __le32 packets_too_big_discard; | ||
3256 | __le32 rcv_unicast_pkts; | ||
3257 | __le32 rcv_broadcast_pkts; | ||
3258 | __le32 rcv_multicast_pkts; | ||
3259 | __le32 no_buff_discard; | ||
3260 | __le32 ttl0_discard; | ||
3261 | __le16 stats_counter; | ||
3262 | __le16 reserved0; | ||
3263 | }; | 4339 | }; |
3264 | 4340 | ||
4341 | |||
3265 | /* | 4342 | /* |
3266 | * Protocol-common statistics collected by the Tstorm | 4343 | * Dynamic host coalescing init parameters, per state machine |
3267 | */ | 4344 | */ |
3268 | struct tstorm_common_stats { | 4345 | struct dynamic_hc_sm_config { |
3269 | struct tstorm_per_port_stats port_statistics; | 4346 | u32 threshold[3]; |
3270 | struct tstorm_per_client_stats client_statistics[MAX_STAT_COUNTER_ID]; | 4347 | u8 shift_per_protocol[HC_SB_MAX_DYNAMIC_INDICES]; |
4348 | u8 hc_timeout0[HC_SB_MAX_DYNAMIC_INDICES]; | ||
4349 | u8 hc_timeout1[HC_SB_MAX_DYNAMIC_INDICES]; | ||
4350 | u8 hc_timeout2[HC_SB_MAX_DYNAMIC_INDICES]; | ||
4351 | u8 hc_timeout3[HC_SB_MAX_DYNAMIC_INDICES]; | ||
3271 | }; | 4352 | }; |
3272 | 4353 | ||
3273 | /* | 4354 | /* |
3274 | * Protocol-common statistics collected by the Ustorm (per client) | 4355 | * Dynamic host coalescing init parameters |
3275 | */ | 4356 | */ |
3276 | struct ustorm_per_client_stats { | 4357 | struct dynamic_hc_config { |
3277 | struct regpair ucast_no_buff_bytes; | 4358 | struct dynamic_hc_sm_config sm_config[HC_SB_MAX_SM]; |
3278 | struct regpair mcast_no_buff_bytes; | 4359 | }; |
3279 | struct regpair bcast_no_buff_bytes; | 4360 | |
3280 | __le32 ucast_no_buff_pkts; | 4361 | |
3281 | __le32 mcast_no_buff_pkts; | 4362 | struct e2_integ_data { |
3282 | __le32 bcast_no_buff_pkts; | 4363 | #if defined(__BIG_ENDIAN) |
3283 | __le16 stats_counter; | 4364 | u8 flags; |
3284 | __le16 reserved0; | 4365 | #define E2_INTEG_DATA_TESTING_EN (0x1<<0) |
4366 | #define E2_INTEG_DATA_TESTING_EN_SHIFT 0 | ||
4367 | #define E2_INTEG_DATA_LB_TX (0x1<<1) | ||
4368 | #define E2_INTEG_DATA_LB_TX_SHIFT 1 | ||
4369 | #define E2_INTEG_DATA_COS_TX (0x1<<2) | ||
4370 | #define E2_INTEG_DATA_COS_TX_SHIFT 2 | ||
4371 | #define E2_INTEG_DATA_OPPORTUNISTICQM (0x1<<3) | ||
4372 | #define E2_INTEG_DATA_OPPORTUNISTICQM_SHIFT 3 | ||
4373 | #define E2_INTEG_DATA_DPMTESTRELEASEDQ (0x1<<4) | ||
4374 | #define E2_INTEG_DATA_DPMTESTRELEASEDQ_SHIFT 4 | ||
4375 | #define E2_INTEG_DATA_RESERVED (0x7<<5) | ||
4376 | #define E2_INTEG_DATA_RESERVED_SHIFT 5 | ||
4377 | u8 cos; | ||
4378 | u8 voq; | ||
4379 | u8 pbf_queue; | ||
4380 | #elif defined(__LITTLE_ENDIAN) | ||
4381 | u8 pbf_queue; | ||
4382 | u8 voq; | ||
4383 | u8 cos; | ||
4384 | u8 flags; | ||
4385 | #define E2_INTEG_DATA_TESTING_EN (0x1<<0) | ||
4386 | #define E2_INTEG_DATA_TESTING_EN_SHIFT 0 | ||
4387 | #define E2_INTEG_DATA_LB_TX (0x1<<1) | ||
4388 | #define E2_INTEG_DATA_LB_TX_SHIFT 1 | ||
4389 | #define E2_INTEG_DATA_COS_TX (0x1<<2) | ||
4390 | #define E2_INTEG_DATA_COS_TX_SHIFT 2 | ||
4391 | #define E2_INTEG_DATA_OPPORTUNISTICQM (0x1<<3) | ||
4392 | #define E2_INTEG_DATA_OPPORTUNISTICQM_SHIFT 3 | ||
4393 | #define E2_INTEG_DATA_DPMTESTRELEASEDQ (0x1<<4) | ||
4394 | #define E2_INTEG_DATA_DPMTESTRELEASEDQ_SHIFT 4 | ||
4395 | #define E2_INTEG_DATA_RESERVED (0x7<<5) | ||
4396 | #define E2_INTEG_DATA_RESERVED_SHIFT 5 | ||
4397 | #endif | ||
4398 | #if defined(__BIG_ENDIAN) | ||
4399 | u16 reserved3; | ||
4400 | u8 reserved2; | ||
4401 | u8 ramEn; | ||
4402 | #elif defined(__LITTLE_ENDIAN) | ||
4403 | u8 ramEn; | ||
4404 | u8 reserved2; | ||
4405 | u16 reserved3; | ||
4406 | #endif | ||
3285 | }; | 4407 | }; |
3286 | 4408 | ||
4409 | |||
3287 | /* | 4410 | /* |
3288 | * Protocol-common statistics collected by the Ustorm | 4411 | * set mac event data |
3289 | */ | 4412 | */ |
3290 | struct ustorm_common_stats { | 4413 | struct eth_event_data { |
3291 | struct ustorm_per_client_stats client_statistics[MAX_STAT_COUNTER_ID]; | 4414 | u32 echo; |
4415 | u32 reserved0; | ||
4416 | u32 reserved1; | ||
3292 | }; | 4417 | }; |
3293 | 4418 | ||
4419 | |||
3294 | /* | 4420 | /* |
3295 | * Eth statistics query structure for the eth_stats_query ramrod | 4421 | * pf-vf event data |
3296 | */ | 4422 | */ |
3297 | struct eth_stats_query { | 4423 | struct vf_pf_event_data { |
3298 | struct xstorm_common_stats xstorm_common; | 4424 | u8 vf_id; |
3299 | struct tstorm_common_stats tstorm_common; | 4425 | u8 reserved0; |
3300 | struct ustorm_common_stats ustorm_common; | 4426 | u16 reserved1; |
4427 | u32 msg_addr_lo; | ||
4428 | u32 msg_addr_hi; | ||
3301 | }; | 4429 | }; |
3302 | 4430 | ||
4431 | /* | ||
4432 | * VF FLR event data | ||
4433 | */ | ||
4434 | struct vf_flr_event_data { | ||
4435 | u8 vf_id; | ||
4436 | u8 reserved0; | ||
4437 | u16 reserved1; | ||
4438 | u32 reserved2; | ||
4439 | u32 reserved3; | ||
4440 | }; | ||
3303 | 4441 | ||
3304 | /* | 4442 | /* |
3305 | * set mac event data | 4443 | * malicious VF event data |
3306 | */ | 4444 | */ |
3307 | struct set_mac_event_data { | 4445 | struct malicious_vf_event_data { |
3308 | u16 echo; | 4446 | u8 vf_id; |
3309 | u16 reserved0; | 4447 | u8 reserved0; |
3310 | u32 reserved1; | 4448 | u16 reserved1; |
3311 | u32 reserved2; | 4449 | u32 reserved2; |
4450 | u32 reserved3; | ||
3312 | }; | 4451 | }; |
3313 | 4452 | ||
3314 | /* | 4453 | /* |
3315 | * union for all event ring message types | 4454 | * union for all event ring message types |
3316 | */ | 4455 | */ |
3317 | union event_data { | 4456 | union event_data { |
3318 | struct set_mac_event_data set_mac_event; | 4457 | struct vf_pf_event_data vf_pf_event; |
4458 | struct eth_event_data eth_event; | ||
3319 | struct cfc_del_event_data cfc_del_event; | 4459 | struct cfc_del_event_data cfc_del_event; |
4460 | struct vf_flr_event_data vf_flr_event; | ||
4461 | struct malicious_vf_event_data malicious_vf_event; | ||
3320 | }; | 4462 | }; |
3321 | 4463 | ||
3322 | 4464 | ||
@@ -3343,7 +4485,7 @@ struct event_ring_data { | |||
3343 | */ | 4485 | */ |
3344 | struct event_ring_msg { | 4486 | struct event_ring_msg { |
3345 | u8 opcode; | 4487 | u8 opcode; |
3346 | u8 reserved0; | 4488 | u8 error; |
3347 | u16 reserved1; | 4489 | u16 reserved1; |
3348 | union event_data data; | 4490 | union event_data data; |
3349 | }; | 4491 | }; |
@@ -3366,32 +4508,82 @@ union event_ring_elem { | |||
3366 | 4508 | ||
3367 | 4509 | ||
3368 | /* | 4510 | /* |
4511 | * Common event ring opcodes | ||
4512 | */ | ||
4513 | enum event_ring_opcode { | ||
4514 | EVENT_RING_OPCODE_VF_PF_CHANNEL, | ||
4515 | EVENT_RING_OPCODE_FUNCTION_START, | ||
4516 | EVENT_RING_OPCODE_FUNCTION_STOP, | ||
4517 | EVENT_RING_OPCODE_CFC_DEL, | ||
4518 | EVENT_RING_OPCODE_CFC_DEL_WB, | ||
4519 | EVENT_RING_OPCODE_STAT_QUERY, | ||
4520 | EVENT_RING_OPCODE_STOP_TRAFFIC, | ||
4521 | EVENT_RING_OPCODE_START_TRAFFIC, | ||
4522 | EVENT_RING_OPCODE_VF_FLR, | ||
4523 | EVENT_RING_OPCODE_MALICIOUS_VF, | ||
4524 | EVENT_RING_OPCODE_FORWARD_SETUP, | ||
4525 | EVENT_RING_OPCODE_RSS_UPDATE_RULES, | ||
4526 | EVENT_RING_OPCODE_RESERVED1, | ||
4527 | EVENT_RING_OPCODE_RESERVED2, | ||
4528 | EVENT_RING_OPCODE_SET_MAC, | ||
4529 | EVENT_RING_OPCODE_CLASSIFICATION_RULES, | ||
4530 | EVENT_RING_OPCODE_FILTERS_RULES, | ||
4531 | EVENT_RING_OPCODE_MULTICAST_RULES, | ||
4532 | MAX_EVENT_RING_OPCODE | ||
4533 | }; | ||
4534 | |||
4535 | |||
4536 | /* | ||
4537 | * Modes for fairness algorithm | ||
4538 | */ | ||
4539 | enum fairness_mode { | ||
4540 | FAIRNESS_COS_WRR_MODE, | ||
4541 | FAIRNESS_COS_ETS_MODE, | ||
4542 | MAX_FAIRNESS_MODE | ||
4543 | }; | ||
4544 | |||
4545 | |||
4546 | /* | ||
3369 | * per-vnic fairness variables | 4547 | * per-vnic fairness variables |
3370 | */ | 4548 | */ |
3371 | struct fairness_vars_per_vn { | 4549 | struct fairness_vars_per_vn { |
3372 | u32 cos_credit_delta[MAX_COS_NUMBER]; | 4550 | u32 cos_credit_delta[MAX_COS_NUMBER]; |
3373 | u32 protocol_credit_delta[NUM_OF_PROTOCOLS]; | ||
3374 | u32 vn_credit_delta; | 4551 | u32 vn_credit_delta; |
3375 | u32 __reserved0; | 4552 | u32 __reserved0; |
3376 | }; | 4553 | }; |
3377 | 4554 | ||
3378 | 4555 | ||
3379 | /* | 4556 | /* |
4557 | * Priority and cos | ||
4558 | */ | ||
4559 | struct priority_cos { | ||
4560 | u8 priority; | ||
4561 | u8 cos; | ||
4562 | __le16 reserved1; | ||
4563 | }; | ||
4564 | |||
4565 | /* | ||
3380 | * The data for flow control configuration | 4566 | * The data for flow control configuration |
3381 | */ | 4567 | */ |
3382 | struct flow_control_configuration { | 4568 | struct flow_control_configuration { |
3383 | struct priority_cos | 4569 | struct priority_cos traffic_type_to_priority_cos[MAX_TRAFFIC_TYPES]; |
3384 | traffic_type_to_priority_cos[MAX_PFC_TRAFFIC_TYPES]; | ||
3385 | #if defined(__BIG_ENDIAN) | ||
3386 | u16 reserved1; | ||
3387 | u8 dcb_version; | ||
3388 | u8 dcb_enabled; | ||
3389 | #elif defined(__LITTLE_ENDIAN) | ||
3390 | u8 dcb_enabled; | 4570 | u8 dcb_enabled; |
3391 | u8 dcb_version; | 4571 | u8 dcb_version; |
3392 | u16 reserved1; | 4572 | u8 dont_add_pri_0_en; |
3393 | #endif | 4573 | u8 reserved1; |
3394 | u32 reserved2; | 4574 | __le32 reserved2; |
4575 | }; | ||
4576 | |||
4577 | |||
4578 | /* | ||
4579 | * | ||
4580 | */ | ||
4581 | struct function_start_data { | ||
4582 | __le16 function_mode; | ||
4583 | __le16 sd_vlan_tag; | ||
4584 | u16 reserved; | ||
4585 | u8 path_id; | ||
4586 | u8 network_cos_mode; | ||
3395 | }; | 4587 | }; |
3396 | 4588 | ||
3397 | 4589 | ||
@@ -3504,13 +4696,13 @@ struct hc_sb_data { | |||
3504 | struct pci_entity p_func; | 4696 | struct pci_entity p_func; |
3505 | #if defined(__BIG_ENDIAN) | 4697 | #if defined(__BIG_ENDIAN) |
3506 | u8 rsrv0; | 4698 | u8 rsrv0; |
4699 | u8 state; | ||
3507 | u8 dhc_qzone_id; | 4700 | u8 dhc_qzone_id; |
3508 | u8 __dynamic_hc_level; | ||
3509 | u8 same_igu_sb_1b; | 4701 | u8 same_igu_sb_1b; |
3510 | #elif defined(__LITTLE_ENDIAN) | 4702 | #elif defined(__LITTLE_ENDIAN) |
3511 | u8 same_igu_sb_1b; | 4703 | u8 same_igu_sb_1b; |
3512 | u8 __dynamic_hc_level; | ||
3513 | u8 dhc_qzone_id; | 4704 | u8 dhc_qzone_id; |
4705 | u8 state; | ||
3514 | u8 rsrv0; | 4706 | u8 rsrv0; |
3515 | #endif | 4707 | #endif |
3516 | struct regpair rsrv1[2]; | 4708 | struct regpair rsrv1[2]; |
@@ -3518,18 +4710,30 @@ struct hc_sb_data { | |||
3518 | 4710 | ||
3519 | 4711 | ||
3520 | /* | 4712 | /* |
4713 | * Segment types for host coaslescing | ||
4714 | */ | ||
4715 | enum hc_segment { | ||
4716 | HC_REGULAR_SEGMENT, | ||
4717 | HC_DEFAULT_SEGMENT, | ||
4718 | MAX_HC_SEGMENT | ||
4719 | }; | ||
4720 | |||
4721 | |||
4722 | /* | ||
3521 | * The fast-path status block meta-data | 4723 | * The fast-path status block meta-data |
3522 | */ | 4724 | */ |
3523 | struct hc_sp_status_block_data { | 4725 | struct hc_sp_status_block_data { |
3524 | struct regpair host_sb_addr; | 4726 | struct regpair host_sb_addr; |
3525 | #if defined(__BIG_ENDIAN) | 4727 | #if defined(__BIG_ENDIAN) |
3526 | u16 rsrv; | 4728 | u8 rsrv1; |
4729 | u8 state; | ||
3527 | u8 igu_seg_id; | 4730 | u8 igu_seg_id; |
3528 | u8 igu_sb_id; | 4731 | u8 igu_sb_id; |
3529 | #elif defined(__LITTLE_ENDIAN) | 4732 | #elif defined(__LITTLE_ENDIAN) |
3530 | u8 igu_sb_id; | 4733 | u8 igu_sb_id; |
3531 | u8 igu_seg_id; | 4734 | u8 igu_seg_id; |
3532 | u16 rsrv; | 4735 | u8 state; |
4736 | u8 rsrv1; | ||
3533 | #endif | 4737 | #endif |
3534 | struct pci_entity p_func; | 4738 | struct pci_entity p_func; |
3535 | }; | 4739 | }; |
@@ -3554,6 +4758,129 @@ struct hc_status_block_data_e2 { | |||
3554 | 4758 | ||
3555 | 4759 | ||
3556 | /* | 4760 | /* |
4761 | * IGU block operartion modes (in Everest2) | ||
4762 | */ | ||
4763 | enum igu_mode { | ||
4764 | HC_IGU_BC_MODE, | ||
4765 | HC_IGU_NBC_MODE, | ||
4766 | MAX_IGU_MODE | ||
4767 | }; | ||
4768 | |||
4769 | |||
4770 | /* | ||
4771 | * IP versions | ||
4772 | */ | ||
4773 | enum ip_ver { | ||
4774 | IP_V4, | ||
4775 | IP_V6, | ||
4776 | MAX_IP_VER | ||
4777 | }; | ||
4778 | |||
4779 | |||
4780 | /* | ||
4781 | * Multi-function modes | ||
4782 | */ | ||
4783 | enum mf_mode { | ||
4784 | SINGLE_FUNCTION, | ||
4785 | MULTI_FUNCTION_SD, | ||
4786 | MULTI_FUNCTION_SI, | ||
4787 | MULTI_FUNCTION_RESERVED, | ||
4788 | MAX_MF_MODE | ||
4789 | }; | ||
4790 | |||
4791 | /* | ||
4792 | * Protocol-common statistics collected by the Tstorm (per pf) | ||
4793 | */ | ||
4794 | struct tstorm_per_pf_stats { | ||
4795 | struct regpair rcv_error_bytes; | ||
4796 | }; | ||
4797 | |||
4798 | /* | ||
4799 | * | ||
4800 | */ | ||
4801 | struct per_pf_stats { | ||
4802 | struct tstorm_per_pf_stats tstorm_pf_statistics; | ||
4803 | }; | ||
4804 | |||
4805 | |||
4806 | /* | ||
4807 | * Protocol-common statistics collected by the Tstorm (per port) | ||
4808 | */ | ||
4809 | struct tstorm_per_port_stats { | ||
4810 | __le32 mac_discard; | ||
4811 | __le32 mac_filter_discard; | ||
4812 | __le32 brb_truncate_discard; | ||
4813 | __le32 mf_tag_discard; | ||
4814 | __le32 packet_drop; | ||
4815 | __le32 reserved; | ||
4816 | }; | ||
4817 | |||
4818 | /* | ||
4819 | * | ||
4820 | */ | ||
4821 | struct per_port_stats { | ||
4822 | struct tstorm_per_port_stats tstorm_port_statistics; | ||
4823 | }; | ||
4824 | |||
4825 | |||
4826 | /* | ||
4827 | * Protocol-common statistics collected by the Tstorm (per client) | ||
4828 | */ | ||
4829 | struct tstorm_per_queue_stats { | ||
4830 | struct regpair rcv_ucast_bytes; | ||
4831 | __le32 rcv_ucast_pkts; | ||
4832 | __le32 checksum_discard; | ||
4833 | struct regpair rcv_bcast_bytes; | ||
4834 | __le32 rcv_bcast_pkts; | ||
4835 | __le32 pkts_too_big_discard; | ||
4836 | struct regpair rcv_mcast_bytes; | ||
4837 | __le32 rcv_mcast_pkts; | ||
4838 | __le32 ttl0_discard; | ||
4839 | __le16 no_buff_discard; | ||
4840 | __le16 reserved0; | ||
4841 | __le32 reserved1; | ||
4842 | }; | ||
4843 | |||
4844 | /* | ||
4845 | * Protocol-common statistics collected by the Ustorm (per client) | ||
4846 | */ | ||
4847 | struct ustorm_per_queue_stats { | ||
4848 | struct regpair ucast_no_buff_bytes; | ||
4849 | struct regpair mcast_no_buff_bytes; | ||
4850 | struct regpair bcast_no_buff_bytes; | ||
4851 | __le32 ucast_no_buff_pkts; | ||
4852 | __le32 mcast_no_buff_pkts; | ||
4853 | __le32 bcast_no_buff_pkts; | ||
4854 | __le32 coalesced_pkts; | ||
4855 | struct regpair coalesced_bytes; | ||
4856 | __le32 coalesced_events; | ||
4857 | __le32 coalesced_aborts; | ||
4858 | }; | ||
4859 | |||
4860 | /* | ||
4861 | * Protocol-common statistics collected by the Xstorm (per client) | ||
4862 | */ | ||
4863 | struct xstorm_per_queue_stats { | ||
4864 | struct regpair ucast_bytes_sent; | ||
4865 | struct regpair mcast_bytes_sent; | ||
4866 | struct regpair bcast_bytes_sent; | ||
4867 | __le32 ucast_pkts_sent; | ||
4868 | __le32 mcast_pkts_sent; | ||
4869 | __le32 bcast_pkts_sent; | ||
4870 | __le32 error_drop_pkts; | ||
4871 | }; | ||
4872 | |||
4873 | /* | ||
4874 | * | ||
4875 | */ | ||
4876 | struct per_queue_stats { | ||
4877 | struct tstorm_per_queue_stats tstorm_queue_statistics; | ||
4878 | struct ustorm_per_queue_stats ustorm_queue_statistics; | ||
4879 | struct xstorm_per_queue_stats xstorm_queue_statistics; | ||
4880 | }; | ||
4881 | |||
4882 | |||
4883 | /* | ||
3557 | * FW version stored in first line of pram | 4884 | * FW version stored in first line of pram |
3558 | */ | 4885 | */ |
3559 | struct pram_fw_version { | 4886 | struct pram_fw_version { |
@@ -3582,7 +4909,6 @@ union protocol_common_specific_data { | |||
3582 | u8 protocol_data[8]; | 4909 | u8 protocol_data[8]; |
3583 | struct regpair phy_address; | 4910 | struct regpair phy_address; |
3584 | struct regpair mac_config_addr; | 4911 | struct regpair mac_config_addr; |
3585 | struct common_query_ramrod_data query_ramrod_data; | ||
3586 | }; | 4912 | }; |
3587 | 4913 | ||
3588 | /* | 4914 | /* |
@@ -3613,7 +4939,6 @@ struct rate_shaping_counter { | |||
3613 | * per-vnic rate shaping variables | 4939 | * per-vnic rate shaping variables |
3614 | */ | 4940 | */ |
3615 | struct rate_shaping_vars_per_vn { | 4941 | struct rate_shaping_vars_per_vn { |
3616 | struct rate_shaping_counter protocol_counters[NUM_OF_PROTOCOLS]; | ||
3617 | struct rate_shaping_counter vn_counter; | 4942 | struct rate_shaping_counter vn_counter; |
3618 | }; | 4943 | }; |
3619 | 4944 | ||
@@ -3628,39 +4953,100 @@ struct slow_path_element { | |||
3628 | 4953 | ||
3629 | 4954 | ||
3630 | /* | 4955 | /* |
3631 | * eth/toe flags that indicate if to query | 4956 | * Protocol-common statistics counter |
3632 | */ | 4957 | */ |
3633 | struct stats_indication_flags { | 4958 | struct stats_counter { |
3634 | u32 collect_eth; | 4959 | __le16 xstats_counter; |
3635 | u32 collect_toe; | 4960 | __le16 reserved0; |
4961 | __le32 reserved1; | ||
4962 | __le16 tstats_counter; | ||
4963 | __le16 reserved2; | ||
4964 | __le32 reserved3; | ||
4965 | __le16 ustats_counter; | ||
4966 | __le16 reserved4; | ||
4967 | __le32 reserved5; | ||
4968 | __le16 cstats_counter; | ||
4969 | __le16 reserved6; | ||
4970 | __le32 reserved7; | ||
3636 | }; | 4971 | }; |
3637 | 4972 | ||
3638 | 4973 | ||
3639 | /* | 4974 | /* |
3640 | * per-port PFC variables | 4975 | * |
3641 | */ | 4976 | */ |
3642 | struct storm_pfc_struct_per_port { | 4977 | struct stats_query_entry { |
3643 | #if defined(__BIG_ENDIAN) | 4978 | u8 kind; |
3644 | u16 mid_mac_addr; | 4979 | u8 index; |
3645 | u16 msb_mac_addr; | 4980 | __le16 funcID; |
3646 | #elif defined(__LITTLE_ENDIAN) | 4981 | __le32 reserved; |
3647 | u16 msb_mac_addr; | 4982 | struct regpair address; |
3648 | u16 mid_mac_addr; | ||
3649 | #endif | ||
3650 | #if defined(__BIG_ENDIAN) | ||
3651 | u16 pfc_pause_quanta_in_nanosec; | ||
3652 | u16 lsb_mac_addr; | ||
3653 | #elif defined(__LITTLE_ENDIAN) | ||
3654 | u16 lsb_mac_addr; | ||
3655 | u16 pfc_pause_quanta_in_nanosec; | ||
3656 | #endif | ||
3657 | }; | 4983 | }; |
3658 | 4984 | ||
3659 | /* | 4985 | /* |
3660 | * Per-port congestion management variables | 4986 | * statistic command |
3661 | */ | 4987 | */ |
3662 | struct storm_cmng_struct_per_port { | 4988 | struct stats_query_cmd_group { |
3663 | struct storm_pfc_struct_per_port pfc_vars; | 4989 | struct stats_query_entry query[STATS_QUERY_CMD_COUNT]; |
4990 | }; | ||
4991 | |||
4992 | |||
4993 | /* | ||
4994 | * statistic command header | ||
4995 | */ | ||
4996 | struct stats_query_header { | ||
4997 | u8 cmd_num; | ||
4998 | u8 reserved0; | ||
4999 | __le16 drv_stats_counter; | ||
5000 | __le32 reserved1; | ||
5001 | struct regpair stats_counters_addrs; | ||
5002 | }; | ||
5003 | |||
5004 | |||
5005 | /* | ||
5006 | * Types of statistcis query entry | ||
5007 | */ | ||
5008 | enum stats_query_type { | ||
5009 | STATS_TYPE_QUEUE, | ||
5010 | STATS_TYPE_PORT, | ||
5011 | STATS_TYPE_PF, | ||
5012 | STATS_TYPE_TOE, | ||
5013 | STATS_TYPE_FCOE, | ||
5014 | MAX_STATS_QUERY_TYPE | ||
5015 | }; | ||
5016 | |||
5017 | |||
5018 | /* | ||
5019 | * Indicate of the function status block state | ||
5020 | */ | ||
5021 | enum status_block_state { | ||
5022 | SB_DISABLED, | ||
5023 | SB_ENABLED, | ||
5024 | SB_CLEANED, | ||
5025 | MAX_STATUS_BLOCK_STATE | ||
5026 | }; | ||
5027 | |||
5028 | |||
5029 | /* | ||
5030 | * Storm IDs (including attentions for IGU related enums) | ||
5031 | */ | ||
5032 | enum storm_id { | ||
5033 | USTORM_ID, | ||
5034 | CSTORM_ID, | ||
5035 | XSTORM_ID, | ||
5036 | TSTORM_ID, | ||
5037 | ATTENTION_ID, | ||
5038 | MAX_STORM_ID | ||
5039 | }; | ||
5040 | |||
5041 | |||
5042 | /* | ||
5043 | * Taffic types used in ETS and flow control algorithms | ||
5044 | */ | ||
5045 | enum traffic_type { | ||
5046 | LLFC_TRAFFIC_TYPE_NW, | ||
5047 | LLFC_TRAFFIC_TYPE_FCOE, | ||
5048 | LLFC_TRAFFIC_TYPE_ISCSI, | ||
5049 | MAX_TRAFFIC_TYPE | ||
3664 | }; | 5050 | }; |
3665 | 5051 | ||
3666 | 5052 | ||
@@ -3715,6 +5101,16 @@ struct vf_pf_channel_data { | |||
3715 | 5101 | ||
3716 | 5102 | ||
3717 | /* | 5103 | /* |
5104 | * State of VF-PF channel | ||
5105 | */ | ||
5106 | enum vf_pf_channel_state { | ||
5107 | VF_PF_CHANNEL_STATE_READY, | ||
5108 | VF_PF_CHANNEL_STATE_WAITING_FOR_ACK, | ||
5109 | MAX_VF_PF_CHANNEL_STATE | ||
5110 | }; | ||
5111 | |||
5112 | |||
5113 | /* | ||
3718 | * zone A per-queue data | 5114 | * zone A per-queue data |
3719 | */ | 5115 | */ |
3720 | struct xstorm_queue_zone_data { | 5116 | struct xstorm_queue_zone_data { |
diff --git a/drivers/net/bnx2x/bnx2x_init.h b/drivers/net/bnx2x/bnx2x_init.h index d5399206f66e..df9f196dd6e8 100644 --- a/drivers/net/bnx2x/bnx2x_init.h +++ b/drivers/net/bnx2x/bnx2x_init.h | |||
@@ -15,98 +15,34 @@ | |||
15 | #ifndef BNX2X_INIT_H | 15 | #ifndef BNX2X_INIT_H |
16 | #define BNX2X_INIT_H | 16 | #define BNX2X_INIT_H |
17 | 17 | ||
18 | /* RAM0 size in bytes */ | ||
19 | #define STORM_INTMEM_SIZE_E1 0x5800 | ||
20 | #define STORM_INTMEM_SIZE_E1H 0x10000 | ||
21 | #define STORM_INTMEM_SIZE(bp) ((CHIP_IS_E1(bp) ? STORM_INTMEM_SIZE_E1 : \ | ||
22 | STORM_INTMEM_SIZE_E1H) / 4) | ||
23 | |||
24 | |||
25 | /* Init operation types and structures */ | 18 | /* Init operation types and structures */ |
26 | /* Common for both E1 and E1H */ | 19 | enum { |
27 | #define OP_RD 0x1 /* read single register */ | 20 | OP_RD = 0x1, /* read a single register */ |
28 | #define OP_WR 0x2 /* write single register */ | 21 | OP_WR, /* write a single register */ |
29 | #define OP_IW 0x3 /* write single register using mailbox */ | 22 | OP_SW, /* copy a string to the device */ |
30 | #define OP_SW 0x4 /* copy a string to the device */ | 23 | OP_ZR, /* clear memory */ |
31 | #define OP_SI 0x5 /* copy a string using mailbox */ | 24 | OP_ZP, /* unzip then copy with DMAE */ |
32 | #define OP_ZR 0x6 /* clear memory */ | 25 | OP_WR_64, /* write 64 bit pattern */ |
33 | #define OP_ZP 0x7 /* unzip then copy with DMAE */ | 26 | OP_WB, /* copy a string using DMAE */ |
34 | #define OP_WR_64 0x8 /* write 64 bit pattern */ | 27 | OP_WB_ZR, /* Clear a string using DMAE or indirect-wr */ |
35 | #define OP_WB 0x9 /* copy a string using DMAE */ | 28 | /* Skip the following ops if all of the init modes don't match */ |
36 | 29 | OP_IF_MODE_OR, | |
37 | /* FPGA and EMUL specific operations */ | 30 | /* Skip the following ops if any of the init modes don't match */ |
38 | #define OP_WR_EMUL 0xa /* write single register on Emulation */ | 31 | OP_IF_MODE_AND, |
39 | #define OP_WR_FPGA 0xb /* write single register on FPGA */ | 32 | OP_MAX |
40 | #define OP_WR_ASIC 0xc /* write single register on ASIC */ | 33 | }; |
41 | |||
42 | /* Init stages */ | ||
43 | /* Never reorder stages !!! */ | ||
44 | #define COMMON_STAGE 0 | ||
45 | #define PORT0_STAGE 1 | ||
46 | #define PORT1_STAGE 2 | ||
47 | #define FUNC0_STAGE 3 | ||
48 | #define FUNC1_STAGE 4 | ||
49 | #define FUNC2_STAGE 5 | ||
50 | #define FUNC3_STAGE 6 | ||
51 | #define FUNC4_STAGE 7 | ||
52 | #define FUNC5_STAGE 8 | ||
53 | #define FUNC6_STAGE 9 | ||
54 | #define FUNC7_STAGE 10 | ||
55 | #define STAGE_IDX_MAX 11 | ||
56 | |||
57 | #define STAGE_START 0 | ||
58 | #define STAGE_END 1 | ||
59 | |||
60 | |||
61 | /* Indices of blocks */ | ||
62 | #define PRS_BLOCK 0 | ||
63 | #define SRCH_BLOCK 1 | ||
64 | #define TSDM_BLOCK 2 | ||
65 | #define TCM_BLOCK 3 | ||
66 | #define BRB1_BLOCK 4 | ||
67 | #define TSEM_BLOCK 5 | ||
68 | #define PXPCS_BLOCK 6 | ||
69 | #define EMAC0_BLOCK 7 | ||
70 | #define EMAC1_BLOCK 8 | ||
71 | #define DBU_BLOCK 9 | ||
72 | #define MISC_BLOCK 10 | ||
73 | #define DBG_BLOCK 11 | ||
74 | #define NIG_BLOCK 12 | ||
75 | #define MCP_BLOCK 13 | ||
76 | #define UPB_BLOCK 14 | ||
77 | #define CSDM_BLOCK 15 | ||
78 | #define USDM_BLOCK 16 | ||
79 | #define CCM_BLOCK 17 | ||
80 | #define UCM_BLOCK 18 | ||
81 | #define USEM_BLOCK 19 | ||
82 | #define CSEM_BLOCK 20 | ||
83 | #define XPB_BLOCK 21 | ||
84 | #define DQ_BLOCK 22 | ||
85 | #define TIMERS_BLOCK 23 | ||
86 | #define XSDM_BLOCK 24 | ||
87 | #define QM_BLOCK 25 | ||
88 | #define PBF_BLOCK 26 | ||
89 | #define XCM_BLOCK 27 | ||
90 | #define XSEM_BLOCK 28 | ||
91 | #define CDU_BLOCK 29 | ||
92 | #define DMAE_BLOCK 30 | ||
93 | #define PXP_BLOCK 31 | ||
94 | #define CFC_BLOCK 32 | ||
95 | #define HC_BLOCK 33 | ||
96 | #define PXP2_BLOCK 34 | ||
97 | #define MISC_AEU_BLOCK 35 | ||
98 | #define PGLUE_B_BLOCK 36 | ||
99 | #define IGU_BLOCK 37 | ||
100 | #define ATC_BLOCK 38 | ||
101 | #define QM_4PORT_BLOCK 39 | ||
102 | #define XSEM_4PORT_BLOCK 40 | ||
103 | 34 | ||
35 | enum { | ||
36 | STAGE_START, | ||
37 | STAGE_END, | ||
38 | }; | ||
104 | 39 | ||
105 | /* Returns the index of start or end of a specific block stage in ops array*/ | 40 | /* Returns the index of start or end of a specific block stage in ops array*/ |
106 | #define BLOCK_OPS_IDX(block, stage, end) \ | 41 | #define BLOCK_OPS_IDX(block, stage, end) \ |
107 | (2*(((block)*STAGE_IDX_MAX) + (stage)) + (end)) | 42 | (2*(((block)*NUM_OF_INIT_PHASES) + (stage)) + (end)) |
108 | 43 | ||
109 | 44 | ||
45 | /* structs for the various opcodes */ | ||
110 | struct raw_op { | 46 | struct raw_op { |
111 | u32 op:8; | 47 | u32 op:8; |
112 | u32 offset:24; | 48 | u32 offset:24; |
@@ -116,7 +52,7 @@ struct raw_op { | |||
116 | struct op_read { | 52 | struct op_read { |
117 | u32 op:8; | 53 | u32 op:8; |
118 | u32 offset:24; | 54 | u32 offset:24; |
119 | u32 pad; | 55 | u32 val; |
120 | }; | 56 | }; |
121 | 57 | ||
122 | struct op_write { | 58 | struct op_write { |
@@ -125,15 +61,15 @@ struct op_write { | |||
125 | u32 val; | 61 | u32 val; |
126 | }; | 62 | }; |
127 | 63 | ||
128 | struct op_string_write { | 64 | struct op_arr_write { |
129 | u32 op:8; | 65 | u32 op:8; |
130 | u32 offset:24; | 66 | u32 offset:24; |
131 | #ifdef __LITTLE_ENDIAN | 67 | #ifdef __BIG_ENDIAN |
132 | u16 data_off; | ||
133 | u16 data_len; | ||
134 | #else /* __BIG_ENDIAN */ | ||
135 | u16 data_len; | 68 | u16 data_len; |
136 | u16 data_off; | 69 | u16 data_off; |
70 | #else /* __LITTLE_ENDIAN */ | ||
71 | u16 data_off; | ||
72 | u16 data_len; | ||
137 | #endif | 73 | #endif |
138 | }; | 74 | }; |
139 | 75 | ||
@@ -143,14 +79,210 @@ struct op_zero { | |||
143 | u32 len; | 79 | u32 len; |
144 | }; | 80 | }; |
145 | 81 | ||
82 | struct op_if_mode { | ||
83 | u32 op:8; | ||
84 | u32 cmd_offset:24; | ||
85 | u32 mode_bit_map; | ||
86 | }; | ||
87 | |||
88 | |||
146 | union init_op { | 89 | union init_op { |
147 | struct op_read read; | 90 | struct op_read read; |
148 | struct op_write write; | 91 | struct op_write write; |
149 | struct op_string_write str_wr; | 92 | struct op_arr_write arr_wr; |
150 | struct op_zero zero; | 93 | struct op_zero zero; |
151 | struct raw_op raw; | 94 | struct raw_op raw; |
95 | struct op_if_mode if_mode; | ||
96 | }; | ||
97 | |||
98 | |||
99 | /* Init Phases */ | ||
100 | enum { | ||
101 | PHASE_COMMON, | ||
102 | PHASE_PORT0, | ||
103 | PHASE_PORT1, | ||
104 | PHASE_PF0, | ||
105 | PHASE_PF1, | ||
106 | PHASE_PF2, | ||
107 | PHASE_PF3, | ||
108 | PHASE_PF4, | ||
109 | PHASE_PF5, | ||
110 | PHASE_PF6, | ||
111 | PHASE_PF7, | ||
112 | NUM_OF_INIT_PHASES | ||
152 | }; | 113 | }; |
153 | 114 | ||
115 | /* Init Modes */ | ||
116 | enum { | ||
117 | MODE_ASIC = 0x00000001, | ||
118 | MODE_FPGA = 0x00000002, | ||
119 | MODE_EMUL = 0x00000004, | ||
120 | MODE_E2 = 0x00000008, | ||
121 | MODE_E3 = 0x00000010, | ||
122 | MODE_PORT2 = 0x00000020, | ||
123 | MODE_PORT4 = 0x00000040, | ||
124 | MODE_SF = 0x00000080, | ||
125 | MODE_MF = 0x00000100, | ||
126 | MODE_MF_SD = 0x00000200, | ||
127 | MODE_MF_SI = 0x00000400, | ||
128 | MODE_MF_NIV = 0x00000800, | ||
129 | MODE_E3_A0 = 0x00001000, | ||
130 | MODE_E3_B0 = 0x00002000, | ||
131 | MODE_COS_BC = 0x00004000, | ||
132 | MODE_COS3 = 0x00008000, | ||
133 | MODE_COS6 = 0x00010000, | ||
134 | MODE_LITTLE_ENDIAN = 0x00020000, | ||
135 | MODE_BIG_ENDIAN = 0x00040000, | ||
136 | }; | ||
137 | |||
138 | /* Init Blocks */ | ||
139 | enum { | ||
140 | BLOCK_ATC, | ||
141 | BLOCK_BRB1, | ||
142 | BLOCK_CCM, | ||
143 | BLOCK_CDU, | ||
144 | BLOCK_CFC, | ||
145 | BLOCK_CSDM, | ||
146 | BLOCK_CSEM, | ||
147 | BLOCK_DBG, | ||
148 | BLOCK_DMAE, | ||
149 | BLOCK_DORQ, | ||
150 | BLOCK_HC, | ||
151 | BLOCK_IGU, | ||
152 | BLOCK_MISC, | ||
153 | BLOCK_NIG, | ||
154 | BLOCK_PBF, | ||
155 | BLOCK_PGLUE_B, | ||
156 | BLOCK_PRS, | ||
157 | BLOCK_PXP2, | ||
158 | BLOCK_PXP, | ||
159 | BLOCK_QM, | ||
160 | BLOCK_SRC, | ||
161 | BLOCK_TCM, | ||
162 | BLOCK_TM, | ||
163 | BLOCK_TSDM, | ||
164 | BLOCK_TSEM, | ||
165 | BLOCK_UCM, | ||
166 | BLOCK_UPB, | ||
167 | BLOCK_USDM, | ||
168 | BLOCK_USEM, | ||
169 | BLOCK_XCM, | ||
170 | BLOCK_XPB, | ||
171 | BLOCK_XSDM, | ||
172 | BLOCK_XSEM, | ||
173 | BLOCK_MISC_AEU, | ||
174 | NUM_OF_INIT_BLOCKS | ||
175 | }; | ||
176 | |||
177 | /* QM queue numbers */ | ||
178 | #define BNX2X_ETH_Q 0 | ||
179 | #define BNX2X_TOE_Q 3 | ||
180 | #define BNX2X_TOE_ACK_Q 6 | ||
181 | #define BNX2X_ISCSI_Q 9 | ||
182 | #define BNX2X_ISCSI_ACK_Q 8 | ||
183 | #define BNX2X_FCOE_Q 10 | ||
184 | |||
185 | /* Vnics per mode */ | ||
186 | #define BNX2X_PORT2_MODE_NUM_VNICS 4 | ||
187 | #define BNX2X_PORT4_MODE_NUM_VNICS 2 | ||
188 | |||
189 | /* COS offset for port1 in E3 B0 4port mode */ | ||
190 | #define BNX2X_E3B0_PORT1_COS_OFFSET 3 | ||
191 | |||
192 | /* QM Register addresses */ | ||
193 | #define BNX2X_Q_VOQ_REG_ADDR(pf_q_num)\ | ||
194 | (QM_REG_QVOQIDX_0 + 4 * (pf_q_num)) | ||
195 | #define BNX2X_VOQ_Q_REG_ADDR(cos, pf_q_num)\ | ||
196 | (QM_REG_VOQQMASK_0_LSB + 4 * ((cos) * 2 + ((pf_q_num) >> 5))) | ||
197 | #define BNX2X_Q_CMDQ_REG_ADDR(pf_q_num)\ | ||
198 | (QM_REG_BYTECRDCMDQ_0 + 4 * ((pf_q_num) >> 4)) | ||
199 | |||
200 | /* extracts the QM queue number for the specified port and vnic */ | ||
201 | #define BNX2X_PF_Q_NUM(q_num, port, vnic)\ | ||
202 | ((((port) << 1) | (vnic)) * 16 + (q_num)) | ||
203 | |||
204 | |||
205 | /* Maps the specified queue to the specified COS */ | ||
206 | static inline void bnx2x_map_q_cos(struct bnx2x *bp, u32 q_num, u32 new_cos) | ||
207 | { | ||
208 | /* find current COS mapping */ | ||
209 | u32 curr_cos = REG_RD(bp, QM_REG_QVOQIDX_0 + q_num * 4); | ||
210 | |||
211 | /* check if queue->COS mapping has changed */ | ||
212 | if (curr_cos != new_cos) { | ||
213 | u32 num_vnics = BNX2X_PORT2_MODE_NUM_VNICS; | ||
214 | u32 reg_addr, reg_bit_map, vnic; | ||
215 | |||
216 | /* update parameters for 4port mode */ | ||
217 | if (INIT_MODE_FLAGS(bp) & MODE_PORT4) { | ||
218 | num_vnics = BNX2X_PORT4_MODE_NUM_VNICS; | ||
219 | if (BP_PORT(bp)) { | ||
220 | curr_cos += BNX2X_E3B0_PORT1_COS_OFFSET; | ||
221 | new_cos += BNX2X_E3B0_PORT1_COS_OFFSET; | ||
222 | } | ||
223 | } | ||
224 | |||
225 | /* change queue mapping for each VNIC */ | ||
226 | for (vnic = 0; vnic < num_vnics; vnic++) { | ||
227 | u32 pf_q_num = | ||
228 | BNX2X_PF_Q_NUM(q_num, BP_PORT(bp), vnic); | ||
229 | u32 q_bit_map = 1 << (pf_q_num & 0x1f); | ||
230 | |||
231 | /* overwrite queue->VOQ mapping */ | ||
232 | REG_WR(bp, BNX2X_Q_VOQ_REG_ADDR(pf_q_num), new_cos); | ||
233 | |||
234 | /* clear queue bit from current COS bit map */ | ||
235 | reg_addr = BNX2X_VOQ_Q_REG_ADDR(curr_cos, pf_q_num); | ||
236 | reg_bit_map = REG_RD(bp, reg_addr); | ||
237 | REG_WR(bp, reg_addr, reg_bit_map & (~q_bit_map)); | ||
238 | |||
239 | /* set queue bit in new COS bit map */ | ||
240 | reg_addr = BNX2X_VOQ_Q_REG_ADDR(new_cos, pf_q_num); | ||
241 | reg_bit_map = REG_RD(bp, reg_addr); | ||
242 | REG_WR(bp, reg_addr, reg_bit_map | q_bit_map); | ||
243 | |||
244 | /* set/clear queue bit in command-queue bit map | ||
245 | (E2/E3A0 only, valid COS values are 0/1) */ | ||
246 | if (!(INIT_MODE_FLAGS(bp) & MODE_E3_B0)) { | ||
247 | reg_addr = BNX2X_Q_CMDQ_REG_ADDR(pf_q_num); | ||
248 | reg_bit_map = REG_RD(bp, reg_addr); | ||
249 | q_bit_map = 1 << (2 * (pf_q_num & 0xf)); | ||
250 | reg_bit_map = new_cos ? | ||
251 | (reg_bit_map | q_bit_map) : | ||
252 | (reg_bit_map & (~q_bit_map)); | ||
253 | REG_WR(bp, reg_addr, reg_bit_map); | ||
254 | } | ||
255 | } | ||
256 | } | ||
257 | } | ||
258 | |||
259 | /* Configures the QM according to the specified per-traffic-type COSes */ | ||
260 | static inline void bnx2x_dcb_config_qm(struct bnx2x *bp, | ||
261 | struct priority_cos *traffic_cos) | ||
262 | { | ||
263 | bnx2x_map_q_cos(bp, BNX2X_FCOE_Q, | ||
264 | traffic_cos[LLFC_TRAFFIC_TYPE_FCOE].cos); | ||
265 | bnx2x_map_q_cos(bp, BNX2X_ISCSI_Q, | ||
266 | traffic_cos[LLFC_TRAFFIC_TYPE_ISCSI].cos); | ||
267 | if (INIT_MODE_FLAGS(bp) & MODE_COS_BC) { | ||
268 | /* required only in backward compatible COS mode */ | ||
269 | bnx2x_map_q_cos(bp, BNX2X_ETH_Q, | ||
270 | traffic_cos[LLFC_TRAFFIC_TYPE_NW].cos); | ||
271 | bnx2x_map_q_cos(bp, BNX2X_TOE_Q, | ||
272 | traffic_cos[LLFC_TRAFFIC_TYPE_NW].cos); | ||
273 | bnx2x_map_q_cos(bp, BNX2X_TOE_ACK_Q, | ||
274 | traffic_cos[LLFC_TRAFFIC_TYPE_NW].cos); | ||
275 | bnx2x_map_q_cos(bp, BNX2X_ISCSI_ACK_Q, | ||
276 | traffic_cos[LLFC_TRAFFIC_TYPE_ISCSI].cos); | ||
277 | } | ||
278 | } | ||
279 | |||
280 | |||
281 | /* Returns the index of start or end of a specific block stage in ops array*/ | ||
282 | #define BLOCK_OPS_IDX(block, stage, end) \ | ||
283 | (2*(((block)*NUM_OF_INIT_PHASES) + (stage)) + (end)) | ||
284 | |||
285 | |||
154 | #define INITOP_SET 0 /* set the HW directly */ | 286 | #define INITOP_SET 0 /* set the HW directly */ |
155 | #define INITOP_CLEAR 1 /* clear the HW directly */ | 287 | #define INITOP_CLEAR 1 /* clear the HW directly */ |
156 | #define INITOP_INIT 2 /* set the init-value array */ | 288 | #define INITOP_INIT 2 /* set the init-value array */ |
@@ -245,12 +377,15 @@ static const struct { | |||
245 | BLOCK_PRTY_INFO_0(PXP2, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff), | 377 | BLOCK_PRTY_INFO_0(PXP2, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff), |
246 | BLOCK_PRTY_INFO_1(PXP2, 0x7ff, 0x7f, 0x7f, 0x7ff), | 378 | BLOCK_PRTY_INFO_1(PXP2, 0x7ff, 0x7f, 0x7f, 0x7ff), |
247 | BLOCK_PRTY_INFO(HC, 0x7, 0x7, 0x7, 0), | 379 | BLOCK_PRTY_INFO(HC, 0x7, 0x7, 0x7, 0), |
380 | BLOCK_PRTY_INFO(NIG, 0xffffffff, 0x3fffffff, 0xffffffff, 0), | ||
381 | BLOCK_PRTY_INFO_0(NIG, 0xffffffff, 0, 0, 0xffffffff), | ||
382 | BLOCK_PRTY_INFO_1(NIG, 0xffff, 0, 0, 0xffff), | ||
248 | BLOCK_PRTY_INFO(IGU, 0x7ff, 0, 0, 0x7ff), | 383 | BLOCK_PRTY_INFO(IGU, 0x7ff, 0, 0, 0x7ff), |
249 | BLOCK_PRTY_INFO(MISC, 0x1, 0x1, 0x1, 0x1), | 384 | BLOCK_PRTY_INFO(MISC, 0x1, 0x1, 0x1, 0x1), |
250 | BLOCK_PRTY_INFO(QM, 0, 0x1ff, 0xfff, 0xfff), | 385 | BLOCK_PRTY_INFO(QM, 0, 0x1ff, 0xfff, 0xfff), |
251 | BLOCK_PRTY_INFO(DORQ, 0, 0x3, 0x3, 0x3), | 386 | BLOCK_PRTY_INFO(DORQ, 0, 0x3, 0x3, 0x3), |
252 | {GRCBASE_UPB + PB_REG_PB_PRTY_MASK, | 387 | {GRCBASE_UPB + PB_REG_PB_PRTY_MASK, |
253 | GRCBASE_UPB + PB_REG_PB_PRTY_STS_CLR, 0, | 388 | GRCBASE_UPB + PB_REG_PB_PRTY_STS_CLR, 0xf, |
254 | {0xf, 0xf, 0xf}, "UPB"}, | 389 | {0xf, 0xf, 0xf}, "UPB"}, |
255 | {GRCBASE_XPB + PB_REG_PB_PRTY_MASK, | 390 | {GRCBASE_XPB + PB_REG_PB_PRTY_MASK, |
256 | GRCBASE_XPB + PB_REG_PB_PRTY_STS_CLR, 0, | 391 | GRCBASE_XPB + PB_REG_PB_PRTY_STS_CLR, 0, |
@@ -262,10 +397,16 @@ static const struct { | |||
262 | BLOCK_PRTY_INFO(DMAE, 0, 0xf, 0xf, 0xf), | 397 | BLOCK_PRTY_INFO(DMAE, 0, 0xf, 0xf, 0xf), |
263 | BLOCK_PRTY_INFO(BRB1, 0, 0xf, 0xf, 0xf), | 398 | BLOCK_PRTY_INFO(BRB1, 0, 0xf, 0xf, 0xf), |
264 | BLOCK_PRTY_INFO(PRS, (1<<6), 0xff, 0xff, 0xff), | 399 | BLOCK_PRTY_INFO(PRS, (1<<6), 0xff, 0xff, 0xff), |
400 | BLOCK_PRTY_INFO(PBF, 0, 0, 0x3ffff, 0xfffffff), | ||
401 | BLOCK_PRTY_INFO(TM, 0, 0, 0x7f, 0x7f), | ||
265 | BLOCK_PRTY_INFO(TSDM, 0x18, 0x7ff, 0x7ff, 0x7ff), | 402 | BLOCK_PRTY_INFO(TSDM, 0x18, 0x7ff, 0x7ff, 0x7ff), |
266 | BLOCK_PRTY_INFO(CSDM, 0x8, 0x7ff, 0x7ff, 0x7ff), | 403 | BLOCK_PRTY_INFO(CSDM, 0x8, 0x7ff, 0x7ff, 0x7ff), |
267 | BLOCK_PRTY_INFO(USDM, 0x38, 0x7ff, 0x7ff, 0x7ff), | 404 | BLOCK_PRTY_INFO(USDM, 0x38, 0x7ff, 0x7ff, 0x7ff), |
268 | BLOCK_PRTY_INFO(XSDM, 0x8, 0x7ff, 0x7ff, 0x7ff), | 405 | BLOCK_PRTY_INFO(XSDM, 0x8, 0x7ff, 0x7ff, 0x7ff), |
406 | BLOCK_PRTY_INFO(TCM, 0, 0, 0x7ffffff, 0x7ffffff), | ||
407 | BLOCK_PRTY_INFO(CCM, 0, 0, 0x7ffffff, 0x7ffffff), | ||
408 | BLOCK_PRTY_INFO(UCM, 0, 0, 0x7ffffff, 0x7ffffff), | ||
409 | BLOCK_PRTY_INFO(XCM, 0, 0, 0x3fffffff, 0x3fffffff), | ||
269 | BLOCK_PRTY_INFO_0(TSEM, 0, 0xffffffff, 0xffffffff, 0xffffffff), | 410 | BLOCK_PRTY_INFO_0(TSEM, 0, 0xffffffff, 0xffffffff, 0xffffffff), |
270 | BLOCK_PRTY_INFO_1(TSEM, 0, 0x3, 0x1f, 0x3f), | 411 | BLOCK_PRTY_INFO_1(TSEM, 0, 0x3, 0x1f, 0x3f), |
271 | BLOCK_PRTY_INFO_0(USEM, 0, 0xffffffff, 0xffffffff, 0xffffffff), | 412 | BLOCK_PRTY_INFO_0(USEM, 0, 0xffffffff, 0xffffffff, 0xffffffff), |
diff --git a/drivers/net/bnx2x/bnx2x_init_ops.h b/drivers/net/bnx2x/bnx2x_init_ops.h index aafd0232393f..7ec1724753ad 100644 --- a/drivers/net/bnx2x/bnx2x_init_ops.h +++ b/drivers/net/bnx2x/bnx2x_init_ops.h | |||
@@ -15,13 +15,39 @@ | |||
15 | #ifndef BNX2X_INIT_OPS_H | 15 | #ifndef BNX2X_INIT_OPS_H |
16 | #define BNX2X_INIT_OPS_H | 16 | #define BNX2X_INIT_OPS_H |
17 | 17 | ||
18 | |||
19 | #ifndef BP_ILT | ||
20 | #define BP_ILT(bp) NULL | ||
21 | #endif | ||
22 | |||
23 | #ifndef BP_FUNC | ||
24 | #define BP_FUNC(bp) 0 | ||
25 | #endif | ||
26 | |||
27 | #ifndef BP_PORT | ||
28 | #define BP_PORT(bp) 0 | ||
29 | #endif | ||
30 | |||
31 | #ifndef BNX2X_ILT_FREE | ||
32 | #define BNX2X_ILT_FREE(x, y, sz) | ||
33 | #endif | ||
34 | |||
35 | #ifndef BNX2X_ILT_ZALLOC | ||
36 | #define BNX2X_ILT_ZALLOC(x, y, sz) | ||
37 | #endif | ||
38 | |||
39 | #ifndef ILOG2 | ||
40 | #define ILOG2(x) x | ||
41 | #endif | ||
42 | |||
18 | static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len); | 43 | static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len); |
19 | static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val); | 44 | static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val); |
20 | static void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr, | 45 | static void bnx2x_write_dmae_phys_len(struct bnx2x *bp, |
21 | u32 addr, u32 len); | 46 | dma_addr_t phys_addr, u32 addr, |
47 | u32 len); | ||
22 | 48 | ||
23 | static void bnx2x_init_str_wr(struct bnx2x *bp, u32 addr, const u32 *data, | 49 | static void bnx2x_init_str_wr(struct bnx2x *bp, u32 addr, |
24 | u32 len) | 50 | const u32 *data, u32 len) |
25 | { | 51 | { |
26 | u32 i; | 52 | u32 i; |
27 | 53 | ||
@@ -29,24 +55,32 @@ static void bnx2x_init_str_wr(struct bnx2x *bp, u32 addr, const u32 *data, | |||
29 | REG_WR(bp, addr + i*4, data[i]); | 55 | REG_WR(bp, addr + i*4, data[i]); |
30 | } | 56 | } |
31 | 57 | ||
32 | static void bnx2x_init_ind_wr(struct bnx2x *bp, u32 addr, const u32 *data, | 58 | static void bnx2x_init_ind_wr(struct bnx2x *bp, u32 addr, |
33 | u32 len) | 59 | const u32 *data, u32 len) |
34 | { | 60 | { |
35 | u32 i; | 61 | u32 i; |
36 | 62 | ||
37 | for (i = 0; i < len; i++) | 63 | for (i = 0; i < len; i++) |
38 | REG_WR_IND(bp, addr + i*4, data[i]); | 64 | bnx2x_reg_wr_ind(bp, addr + i*4, data[i]); |
39 | } | 65 | } |
40 | 66 | ||
41 | static void bnx2x_write_big_buf(struct bnx2x *bp, u32 addr, u32 len) | 67 | static void bnx2x_write_big_buf(struct bnx2x *bp, u32 addr, u32 len, |
68 | u8 wb) | ||
42 | { | 69 | { |
43 | if (bp->dmae_ready) | 70 | if (bp->dmae_ready) |
44 | bnx2x_write_dmae_phys_len(bp, GUNZIP_PHYS(bp), addr, len); | 71 | bnx2x_write_dmae_phys_len(bp, GUNZIP_PHYS(bp), addr, len); |
72 | else if (wb) | ||
73 | /* | ||
74 | * Wide bus registers with no dmae need to be written | ||
75 | * using indirect write. | ||
76 | */ | ||
77 | bnx2x_init_ind_wr(bp, addr, GUNZIP_BUF(bp), len); | ||
45 | else | 78 | else |
46 | bnx2x_init_str_wr(bp, addr, GUNZIP_BUF(bp), len); | 79 | bnx2x_init_str_wr(bp, addr, GUNZIP_BUF(bp), len); |
47 | } | 80 | } |
48 | 81 | ||
49 | static void bnx2x_init_fill(struct bnx2x *bp, u32 addr, int fill, u32 len) | 82 | static void bnx2x_init_fill(struct bnx2x *bp, u32 addr, int fill, |
83 | u32 len, u8 wb) | ||
50 | { | 84 | { |
51 | u32 buf_len = (((len*4) > FW_BUF_SIZE) ? FW_BUF_SIZE : (len*4)); | 85 | u32 buf_len = (((len*4) > FW_BUF_SIZE) ? FW_BUF_SIZE : (len*4)); |
52 | u32 buf_len32 = buf_len/4; | 86 | u32 buf_len32 = buf_len/4; |
@@ -57,12 +91,20 @@ static void bnx2x_init_fill(struct bnx2x *bp, u32 addr, int fill, u32 len) | |||
57 | for (i = 0; i < len; i += buf_len32) { | 91 | for (i = 0; i < len; i += buf_len32) { |
58 | u32 cur_len = min(buf_len32, len - i); | 92 | u32 cur_len = min(buf_len32, len - i); |
59 | 93 | ||
60 | bnx2x_write_big_buf(bp, addr + i*4, cur_len); | 94 | bnx2x_write_big_buf(bp, addr + i*4, cur_len, wb); |
61 | } | 95 | } |
62 | } | 96 | } |
63 | 97 | ||
64 | static void bnx2x_init_wr_64(struct bnx2x *bp, u32 addr, const u32 *data, | 98 | static void bnx2x_write_big_buf_wb(struct bnx2x *bp, u32 addr, u32 len) |
65 | u32 len64) | 99 | { |
100 | if (bp->dmae_ready) | ||
101 | bnx2x_write_dmae_phys_len(bp, GUNZIP_PHYS(bp), addr, len); | ||
102 | else | ||
103 | bnx2x_init_ind_wr(bp, addr, GUNZIP_BUF(bp), len); | ||
104 | } | ||
105 | |||
106 | static void bnx2x_init_wr_64(struct bnx2x *bp, u32 addr, | ||
107 | const u32 *data, u32 len64) | ||
66 | { | 108 | { |
67 | u32 buf_len32 = FW_BUF_SIZE/4; | 109 | u32 buf_len32 = FW_BUF_SIZE/4; |
68 | u32 len = len64*2; | 110 | u32 len = len64*2; |
@@ -82,7 +124,7 @@ static void bnx2x_init_wr_64(struct bnx2x *bp, u32 addr, const u32 *data, | |||
82 | for (i = 0; i < len; i += buf_len32) { | 124 | for (i = 0; i < len; i += buf_len32) { |
83 | u32 cur_len = min(buf_len32, len - i); | 125 | u32 cur_len = min(buf_len32, len - i); |
84 | 126 | ||
85 | bnx2x_write_big_buf(bp, addr + i*4, cur_len); | 127 | bnx2x_write_big_buf_wb(bp, addr + i*4, cur_len); |
86 | } | 128 | } |
87 | } | 129 | } |
88 | 130 | ||
@@ -100,7 +142,8 @@ static void bnx2x_init_wr_64(struct bnx2x *bp, u32 addr, const u32 *data, | |||
100 | #define IF_IS_PRAM_ADDR(base, addr) \ | 142 | #define IF_IS_PRAM_ADDR(base, addr) \ |
101 | if (((base) <= (addr)) && ((base) + 0x40000 >= (addr))) | 143 | if (((base) <= (addr)) && ((base) + 0x40000 >= (addr))) |
102 | 144 | ||
103 | static const u8 *bnx2x_sel_blob(struct bnx2x *bp, u32 addr, const u8 *data) | 145 | static const u8 *bnx2x_sel_blob(struct bnx2x *bp, u32 addr, |
146 | const u8 *data) | ||
104 | { | 147 | { |
105 | IF_IS_INT_TABLE_ADDR(TSEM_REG_INT_TABLE, addr) | 148 | IF_IS_INT_TABLE_ADDR(TSEM_REG_INT_TABLE, addr) |
106 | data = INIT_TSEM_INT_TABLE_DATA(bp); | 149 | data = INIT_TSEM_INT_TABLE_DATA(bp); |
@@ -129,31 +172,17 @@ static const u8 *bnx2x_sel_blob(struct bnx2x *bp, u32 addr, const u8 *data) | |||
129 | return data; | 172 | return data; |
130 | } | 173 | } |
131 | 174 | ||
132 | static void bnx2x_write_big_buf_wb(struct bnx2x *bp, u32 addr, u32 len) | 175 | static void bnx2x_init_wr_wb(struct bnx2x *bp, u32 addr, |
176 | const u32 *data, u32 len) | ||
133 | { | 177 | { |
134 | if (bp->dmae_ready) | 178 | if (bp->dmae_ready) |
135 | bnx2x_write_dmae_phys_len(bp, GUNZIP_PHYS(bp), addr, len); | 179 | VIRT_WR_DMAE_LEN(bp, data, addr, len, 0); |
136 | else | 180 | else |
137 | bnx2x_init_ind_wr(bp, addr, GUNZIP_BUF(bp), len); | ||
138 | } | ||
139 | |||
140 | static void bnx2x_init_wr_wb(struct bnx2x *bp, u32 addr, const u32 *data, | ||
141 | u32 len) | ||
142 | { | ||
143 | const u32 *old_data = data; | ||
144 | |||
145 | data = (const u32 *)bnx2x_sel_blob(bp, addr, (const u8 *)data); | ||
146 | |||
147 | if (bp->dmae_ready) { | ||
148 | if (old_data != data) | ||
149 | VIRT_WR_DMAE_LEN(bp, data, addr, len, 1); | ||
150 | else | ||
151 | VIRT_WR_DMAE_LEN(bp, data, addr, len, 0); | ||
152 | } else | ||
153 | bnx2x_init_ind_wr(bp, addr, data, len); | 181 | bnx2x_init_ind_wr(bp, addr, data, len); |
154 | } | 182 | } |
155 | 183 | ||
156 | static void bnx2x_wr_64(struct bnx2x *bp, u32 reg, u32 val_lo, u32 val_hi) | 184 | static void bnx2x_wr_64(struct bnx2x *bp, u32 reg, u32 val_lo, |
185 | u32 val_hi) | ||
157 | { | 186 | { |
158 | u32 wb_write[2]; | 187 | u32 wb_write[2]; |
159 | 188 | ||
@@ -161,8 +190,8 @@ static void bnx2x_wr_64(struct bnx2x *bp, u32 reg, u32 val_lo, u32 val_hi) | |||
161 | wb_write[1] = val_hi; | 190 | wb_write[1] = val_hi; |
162 | REG_WR_DMAE_LEN(bp, reg, wb_write, 2); | 191 | REG_WR_DMAE_LEN(bp, reg, wb_write, 2); |
163 | } | 192 | } |
164 | 193 | static void bnx2x_init_wr_zp(struct bnx2x *bp, u32 addr, u32 len, | |
165 | static void bnx2x_init_wr_zp(struct bnx2x *bp, u32 addr, u32 len, u32 blob_off) | 194 | u32 blob_off) |
166 | { | 195 | { |
167 | const u8 *data = NULL; | 196 | const u8 *data = NULL; |
168 | int rc; | 197 | int rc; |
@@ -186,39 +215,33 @@ static void bnx2x_init_wr_zp(struct bnx2x *bp, u32 addr, u32 len, u32 blob_off) | |||
186 | static void bnx2x_init_block(struct bnx2x *bp, u32 block, u32 stage) | 215 | static void bnx2x_init_block(struct bnx2x *bp, u32 block, u32 stage) |
187 | { | 216 | { |
188 | u16 op_start = | 217 | u16 op_start = |
189 | INIT_OPS_OFFSETS(bp)[BLOCK_OPS_IDX(block, stage, STAGE_START)]; | 218 | INIT_OPS_OFFSETS(bp)[BLOCK_OPS_IDX(block, stage, |
219 | STAGE_START)]; | ||
190 | u16 op_end = | 220 | u16 op_end = |
191 | INIT_OPS_OFFSETS(bp)[BLOCK_OPS_IDX(block, stage, STAGE_END)]; | 221 | INIT_OPS_OFFSETS(bp)[BLOCK_OPS_IDX(block, stage, |
222 | STAGE_END)]; | ||
192 | union init_op *op; | 223 | union init_op *op; |
193 | int hw_wr; | 224 | u32 op_idx, op_type, addr, len; |
194 | u32 i, op_type, addr, len; | ||
195 | const u32 *data, *data_base; | 225 | const u32 *data, *data_base; |
196 | 226 | ||
197 | /* If empty block */ | 227 | /* If empty block */ |
198 | if (op_start == op_end) | 228 | if (op_start == op_end) |
199 | return; | 229 | return; |
200 | 230 | ||
201 | if (CHIP_REV_IS_FPGA(bp)) | ||
202 | hw_wr = OP_WR_FPGA; | ||
203 | else if (CHIP_REV_IS_EMUL(bp)) | ||
204 | hw_wr = OP_WR_EMUL; | ||
205 | else | ||
206 | hw_wr = OP_WR_ASIC; | ||
207 | |||
208 | data_base = INIT_DATA(bp); | 231 | data_base = INIT_DATA(bp); |
209 | 232 | ||
210 | for (i = op_start; i < op_end; i++) { | 233 | for (op_idx = op_start; op_idx < op_end; op_idx++) { |
211 | |||
212 | op = (union init_op *)&(INIT_OPS(bp)[i]); | ||
213 | 234 | ||
214 | op_type = op->str_wr.op; | 235 | op = (union init_op *)&(INIT_OPS(bp)[op_idx]); |
215 | addr = op->str_wr.offset; | 236 | /* Get generic data */ |
216 | len = op->str_wr.data_len; | 237 | op_type = op->raw.op; |
217 | data = data_base + op->str_wr.data_off; | 238 | addr = op->raw.offset; |
218 | 239 | /* Get data that's used for OP_SW, OP_WB, OP_FW, OP_ZP and | |
219 | /* HW/EMUL specific */ | 240 | * OP_WR64 (we assume that op_arr_write and op_write have the |
220 | if ((op_type > OP_WB) && (op_type == hw_wr)) | 241 | * same structure). |
221 | op_type = OP_WR; | 242 | */ |
243 | len = op->arr_wr.data_len; | ||
244 | data = data_base + op->arr_wr.data_off; | ||
222 | 245 | ||
223 | switch (op_type) { | 246 | switch (op_type) { |
224 | case OP_RD: | 247 | case OP_RD: |
@@ -233,21 +256,39 @@ static void bnx2x_init_block(struct bnx2x *bp, u32 block, u32 stage) | |||
233 | case OP_WB: | 256 | case OP_WB: |
234 | bnx2x_init_wr_wb(bp, addr, data, len); | 257 | bnx2x_init_wr_wb(bp, addr, data, len); |
235 | break; | 258 | break; |
236 | case OP_SI: | ||
237 | bnx2x_init_ind_wr(bp, addr, data, len); | ||
238 | break; | ||
239 | case OP_ZR: | 259 | case OP_ZR: |
240 | bnx2x_init_fill(bp, addr, 0, op->zero.len); | 260 | bnx2x_init_fill(bp, addr, 0, op->zero.len, 0); |
261 | break; | ||
262 | case OP_WB_ZR: | ||
263 | bnx2x_init_fill(bp, addr, 0, op->zero.len, 1); | ||
241 | break; | 264 | break; |
242 | case OP_ZP: | 265 | case OP_ZP: |
243 | bnx2x_init_wr_zp(bp, addr, len, | 266 | bnx2x_init_wr_zp(bp, addr, len, |
244 | op->str_wr.data_off); | 267 | op->arr_wr.data_off); |
245 | break; | 268 | break; |
246 | case OP_WR_64: | 269 | case OP_WR_64: |
247 | bnx2x_init_wr_64(bp, addr, data, len); | 270 | bnx2x_init_wr_64(bp, addr, data, len); |
248 | break; | 271 | break; |
272 | case OP_IF_MODE_AND: | ||
273 | /* if any of the flags doesn't match, skip the | ||
274 | * conditional block. | ||
275 | */ | ||
276 | if ((INIT_MODE_FLAGS(bp) & | ||
277 | op->if_mode.mode_bit_map) != | ||
278 | op->if_mode.mode_bit_map) | ||
279 | op_idx += op->if_mode.cmd_offset; | ||
280 | break; | ||
281 | case OP_IF_MODE_OR: | ||
282 | /* if all the flags don't match, skip the conditional | ||
283 | * block. | ||
284 | */ | ||
285 | if ((INIT_MODE_FLAGS(bp) & | ||
286 | op->if_mode.mode_bit_map) == 0) | ||
287 | op_idx += op->if_mode.cmd_offset; | ||
288 | break; | ||
249 | default: | 289 | default: |
250 | /* happens whenever an op is of a diff HW */ | 290 | /* Should never get here! */ |
291 | |||
251 | break; | 292 | break; |
252 | } | 293 | } |
253 | } | 294 | } |
@@ -417,7 +458,8 @@ static const struct arb_line write_arb_addr[NUM_WR_Q-1] = { | |||
417 | PXP2_REG_RQ_BW_WR_UBOUND30} | 458 | PXP2_REG_RQ_BW_WR_UBOUND30} |
418 | }; | 459 | }; |
419 | 460 | ||
420 | static void bnx2x_init_pxp_arb(struct bnx2x *bp, int r_order, int w_order) | 461 | static void bnx2x_init_pxp_arb(struct bnx2x *bp, int r_order, |
462 | int w_order) | ||
421 | { | 463 | { |
422 | u32 val, i; | 464 | u32 val, i; |
423 | 465 | ||
@@ -491,19 +533,21 @@ static void bnx2x_init_pxp_arb(struct bnx2x *bp, int r_order, int w_order) | |||
491 | if ((CHIP_IS_E1(bp) || CHIP_IS_E1H(bp)) && (r_order == MAX_RD_ORD)) | 533 | if ((CHIP_IS_E1(bp) || CHIP_IS_E1H(bp)) && (r_order == MAX_RD_ORD)) |
492 | REG_WR(bp, PXP2_REG_RQ_PDR_LIMIT, 0xe00); | 534 | REG_WR(bp, PXP2_REG_RQ_PDR_LIMIT, 0xe00); |
493 | 535 | ||
494 | if (CHIP_IS_E2(bp)) | 536 | if (CHIP_IS_E3(bp)) |
537 | REG_WR(bp, PXP2_REG_WR_USDMDP_TH, (0x4 << w_order)); | ||
538 | else if (CHIP_IS_E2(bp)) | ||
495 | REG_WR(bp, PXP2_REG_WR_USDMDP_TH, (0x8 << w_order)); | 539 | REG_WR(bp, PXP2_REG_WR_USDMDP_TH, (0x8 << w_order)); |
496 | else | 540 | else |
497 | REG_WR(bp, PXP2_REG_WR_USDMDP_TH, (0x18 << w_order)); | 541 | REG_WR(bp, PXP2_REG_WR_USDMDP_TH, (0x18 << w_order)); |
498 | 542 | ||
499 | if (CHIP_IS_E1H(bp) || CHIP_IS_E2(bp)) { | 543 | if (!CHIP_IS_E1(bp)) { |
500 | /* MPS w_order optimal TH presently TH | 544 | /* MPS w_order optimal TH presently TH |
501 | * 128 0 0 2 | 545 | * 128 0 0 2 |
502 | * 256 1 1 3 | 546 | * 256 1 1 3 |
503 | * >=512 2 2 3 | 547 | * >=512 2 2 3 |
504 | */ | 548 | */ |
505 | /* DMAE is special */ | 549 | /* DMAE is special */ |
506 | if (CHIP_IS_E2(bp)) { | 550 | if (!CHIP_IS_E1H(bp)) { |
507 | /* E2 can use optimal TH */ | 551 | /* E2 can use optimal TH */ |
508 | val = w_order; | 552 | val = w_order; |
509 | REG_WR(bp, PXP2_REG_WR_DMAE_MPS, val); | 553 | REG_WR(bp, PXP2_REG_WR_DMAE_MPS, val); |
@@ -557,8 +601,8 @@ static void bnx2x_init_pxp_arb(struct bnx2x *bp, int r_order, int w_order) | |||
557 | #define ILT_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44))) | 601 | #define ILT_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44))) |
558 | #define ILT_RANGE(f, l) (((l) << 10) | f) | 602 | #define ILT_RANGE(f, l) (((l) << 10) | f) |
559 | 603 | ||
560 | static int bnx2x_ilt_line_mem_op(struct bnx2x *bp, struct ilt_line *line, | 604 | static int bnx2x_ilt_line_mem_op(struct bnx2x *bp, |
561 | u32 size, u8 memop) | 605 | struct ilt_line *line, u32 size, u8 memop) |
562 | { | 606 | { |
563 | if (memop == ILT_MEMOP_FREE) { | 607 | if (memop == ILT_MEMOP_FREE) { |
564 | BNX2X_ILT_FREE(line->page, line->page_mapping, line->size); | 608 | BNX2X_ILT_FREE(line->page, line->page_mapping, line->size); |
@@ -572,7 +616,8 @@ static int bnx2x_ilt_line_mem_op(struct bnx2x *bp, struct ilt_line *line, | |||
572 | } | 616 | } |
573 | 617 | ||
574 | 618 | ||
575 | static int bnx2x_ilt_client_mem_op(struct bnx2x *bp, int cli_num, u8 memop) | 619 | static int bnx2x_ilt_client_mem_op(struct bnx2x *bp, int cli_num, |
620 | u8 memop) | ||
576 | { | 621 | { |
577 | int i, rc; | 622 | int i, rc; |
578 | struct bnx2x_ilt *ilt = BP_ILT(bp); | 623 | struct bnx2x_ilt *ilt = BP_ILT(bp); |
@@ -617,8 +662,8 @@ static void bnx2x_ilt_line_wr(struct bnx2x *bp, int abs_idx, | |||
617 | bnx2x_wr_64(bp, reg, ILT_ADDR1(page_mapping), ILT_ADDR2(page_mapping)); | 662 | bnx2x_wr_64(bp, reg, ILT_ADDR1(page_mapping), ILT_ADDR2(page_mapping)); |
618 | } | 663 | } |
619 | 664 | ||
620 | static void bnx2x_ilt_line_init_op(struct bnx2x *bp, struct bnx2x_ilt *ilt, | 665 | static void bnx2x_ilt_line_init_op(struct bnx2x *bp, |
621 | int idx, u8 initop) | 666 | struct bnx2x_ilt *ilt, int idx, u8 initop) |
622 | { | 667 | { |
623 | dma_addr_t null_mapping; | 668 | dma_addr_t null_mapping; |
624 | int abs_idx = ilt->start_line + idx; | 669 | int abs_idx = ilt->start_line + idx; |
@@ -733,7 +778,7 @@ static void bnx2x_ilt_init_op(struct bnx2x *bp, u8 initop) | |||
733 | } | 778 | } |
734 | 779 | ||
735 | static void bnx2x_ilt_init_client_psz(struct bnx2x *bp, int cli_num, | 780 | static void bnx2x_ilt_init_client_psz(struct bnx2x *bp, int cli_num, |
736 | u32 psz_reg, u8 initop) | 781 | u32 psz_reg, u8 initop) |
737 | { | 782 | { |
738 | struct bnx2x_ilt *ilt = BP_ILT(bp); | 783 | struct bnx2x_ilt *ilt = BP_ILT(bp); |
739 | struct ilt_client_info *ilt_cli = &ilt->clients[cli_num]; | 784 | struct ilt_client_info *ilt_cli = &ilt->clients[cli_num]; |
@@ -848,7 +893,8 @@ static void bnx2x_src_init_t2(struct bnx2x *bp, struct src_ent *t2, | |||
848 | 893 | ||
849 | /* Initialize T2 */ | 894 | /* Initialize T2 */ |
850 | for (i = 0; i < src_cid_count-1; i++) | 895 | for (i = 0; i < src_cid_count-1; i++) |
851 | t2[i].next = (u64)(t2_mapping + (i+1)*sizeof(struct src_ent)); | 896 | t2[i].next = (u64)(t2_mapping + |
897 | (i+1)*sizeof(struct src_ent)); | ||
852 | 898 | ||
853 | /* tell the searcher where the T2 table is */ | 899 | /* tell the searcher where the T2 table is */ |
854 | REG_WR(bp, SRC_REG_COUNTFREE0 + port*4, src_cid_count); | 900 | REG_WR(bp, SRC_REG_COUNTFREE0 + port*4, src_cid_count); |
diff --git a/drivers/net/bnx2x/bnx2x_link.c b/drivers/net/bnx2x/bnx2x_link.c index 076e11f5769f..bcd8f0038628 100644 --- a/drivers/net/bnx2x/bnx2x_link.c +++ b/drivers/net/bnx2x/bnx2x_link.c | |||
@@ -25,6 +25,8 @@ | |||
25 | #include <linux/mutex.h> | 25 | #include <linux/mutex.h> |
26 | 26 | ||
27 | #include "bnx2x.h" | 27 | #include "bnx2x.h" |
28 | #include "bnx2x_cmn.h" | ||
29 | |||
28 | 30 | ||
29 | /********************************************************/ | 31 | /********************************************************/ |
30 | #define ETH_HLEN 14 | 32 | #define ETH_HLEN 14 |
@@ -35,6 +37,13 @@ | |||
35 | #define ETH_MAX_JUMBO_PACKET_SIZE 9600 | 37 | #define ETH_MAX_JUMBO_PACKET_SIZE 9600 |
36 | #define MDIO_ACCESS_TIMEOUT 1000 | 38 | #define MDIO_ACCESS_TIMEOUT 1000 |
37 | #define BMAC_CONTROL_RX_ENABLE 2 | 39 | #define BMAC_CONTROL_RX_ENABLE 2 |
40 | #define WC_LANE_MAX 4 | ||
41 | #define I2C_SWITCH_WIDTH 2 | ||
42 | #define I2C_BSC0 0 | ||
43 | #define I2C_BSC1 1 | ||
44 | #define I2C_WA_RETRY_CNT 3 | ||
45 | #define MCPR_IMC_COMMAND_READ_OP 1 | ||
46 | #define MCPR_IMC_COMMAND_WRITE_OP 2 | ||
38 | 47 | ||
39 | /***********************************************************/ | 48 | /***********************************************************/ |
40 | /* Shortcut definitions */ | 49 | /* Shortcut definitions */ |
@@ -103,16 +112,13 @@ | |||
103 | MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_HIG | 112 | MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_HIG |
104 | #define GP_STATUS_10G_CX4 \ | 113 | #define GP_STATUS_10G_CX4 \ |
105 | MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_CX4 | 114 | MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_CX4 |
106 | #define GP_STATUS_12G_HIG \ | ||
107 | MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_12G_HIG | ||
108 | #define GP_STATUS_12_5G MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_12_5G | ||
109 | #define GP_STATUS_13G MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_13G | ||
110 | #define GP_STATUS_15G MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_15G | ||
111 | #define GP_STATUS_16G MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_16G | ||
112 | #define GP_STATUS_1G_KX MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_1G_KX | 115 | #define GP_STATUS_1G_KX MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_1G_KX |
113 | #define GP_STATUS_10G_KX4 \ | 116 | #define GP_STATUS_10G_KX4 \ |
114 | MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_KX4 | 117 | MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_KX4 |
115 | 118 | #define GP_STATUS_10G_KR MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_KR | |
119 | #define GP_STATUS_10G_XFI MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_XFI | ||
120 | #define GP_STATUS_20G_DXGXS MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_20G_DXGXS | ||
121 | #define GP_STATUS_10G_SFI MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_SFI | ||
116 | #define LINK_10THD LINK_STATUS_SPEED_AND_DUPLEX_10THD | 122 | #define LINK_10THD LINK_STATUS_SPEED_AND_DUPLEX_10THD |
117 | #define LINK_10TFD LINK_STATUS_SPEED_AND_DUPLEX_10TFD | 123 | #define LINK_10TFD LINK_STATUS_SPEED_AND_DUPLEX_10TFD |
118 | #define LINK_100TXHD LINK_STATUS_SPEED_AND_DUPLEX_100TXHD | 124 | #define LINK_100TXHD LINK_STATUS_SPEED_AND_DUPLEX_100TXHD |
@@ -126,20 +132,10 @@ | |||
126 | #define LINK_2500XFD LINK_STATUS_SPEED_AND_DUPLEX_2500XFD | 132 | #define LINK_2500XFD LINK_STATUS_SPEED_AND_DUPLEX_2500XFD |
127 | #define LINK_10GTFD LINK_STATUS_SPEED_AND_DUPLEX_10GTFD | 133 | #define LINK_10GTFD LINK_STATUS_SPEED_AND_DUPLEX_10GTFD |
128 | #define LINK_10GXFD LINK_STATUS_SPEED_AND_DUPLEX_10GXFD | 134 | #define LINK_10GXFD LINK_STATUS_SPEED_AND_DUPLEX_10GXFD |
129 | #define LINK_12GTFD LINK_STATUS_SPEED_AND_DUPLEX_12GTFD | 135 | #define LINK_20GTFD LINK_STATUS_SPEED_AND_DUPLEX_20GTFD |
130 | #define LINK_12GXFD LINK_STATUS_SPEED_AND_DUPLEX_12GXFD | 136 | #define LINK_20GXFD LINK_STATUS_SPEED_AND_DUPLEX_20GXFD |
131 | #define LINK_12_5GTFD LINK_STATUS_SPEED_AND_DUPLEX_12_5GTFD | 137 | |
132 | #define LINK_12_5GXFD LINK_STATUS_SPEED_AND_DUPLEX_12_5GXFD | 138 | |
133 | #define LINK_13GTFD LINK_STATUS_SPEED_AND_DUPLEX_13GTFD | ||
134 | #define LINK_13GXFD LINK_STATUS_SPEED_AND_DUPLEX_13GXFD | ||
135 | #define LINK_15GTFD LINK_STATUS_SPEED_AND_DUPLEX_15GTFD | ||
136 | #define LINK_15GXFD LINK_STATUS_SPEED_AND_DUPLEX_15GXFD | ||
137 | #define LINK_16GTFD LINK_STATUS_SPEED_AND_DUPLEX_16GTFD | ||
138 | #define LINK_16GXFD LINK_STATUS_SPEED_AND_DUPLEX_16GXFD | ||
139 | |||
140 | #define PHY_XGXS_FLAG 0x1 | ||
141 | #define PHY_SGMII_FLAG 0x2 | ||
142 | #define PHY_SERDES_FLAG 0x4 | ||
143 | 139 | ||
144 | /* */ | 140 | /* */ |
145 | #define SFP_EEPROM_CON_TYPE_ADDR 0x2 | 141 | #define SFP_EEPROM_CON_TYPE_ADDR 0x2 |
@@ -165,8 +161,104 @@ | |||
165 | #define EDC_MODE_PASSIVE_DAC 0x0055 | 161 | #define EDC_MODE_PASSIVE_DAC 0x0055 |
166 | 162 | ||
167 | 163 | ||
164 | /* BRB thresholds for E2*/ | ||
165 | #define PFC_E2_BRB_MAC_PAUSE_XOFF_THR_PAUSE 170 | ||
166 | #define PFC_E2_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE 0 | ||
167 | |||
168 | #define PFC_E2_BRB_MAC_PAUSE_XON_THR_PAUSE 250 | ||
169 | #define PFC_E2_BRB_MAC_PAUSE_XON_THR_NON_PAUSE 0 | ||
170 | |||
171 | #define PFC_E2_BRB_MAC_FULL_XOFF_THR_PAUSE 10 | ||
172 | #define PFC_E2_BRB_MAC_FULL_XOFF_THR_NON_PAUSE 90 | ||
173 | |||
174 | #define PFC_E2_BRB_MAC_FULL_XON_THR_PAUSE 50 | ||
175 | #define PFC_E2_BRB_MAC_FULL_XON_THR_NON_PAUSE 250 | ||
176 | |||
177 | /* BRB thresholds for E3A0 */ | ||
178 | #define PFC_E3A0_BRB_MAC_PAUSE_XOFF_THR_PAUSE 290 | ||
179 | #define PFC_E3A0_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE 0 | ||
180 | |||
181 | #define PFC_E3A0_BRB_MAC_PAUSE_XON_THR_PAUSE 410 | ||
182 | #define PFC_E3A0_BRB_MAC_PAUSE_XON_THR_NON_PAUSE 0 | ||
183 | |||
184 | #define PFC_E3A0_BRB_MAC_FULL_XOFF_THR_PAUSE 10 | ||
185 | #define PFC_E3A0_BRB_MAC_FULL_XOFF_THR_NON_PAUSE 170 | ||
186 | |||
187 | #define PFC_E3A0_BRB_MAC_FULL_XON_THR_PAUSE 50 | ||
188 | #define PFC_E3A0_BRB_MAC_FULL_XON_THR_NON_PAUSE 410 | ||
189 | |||
190 | |||
191 | /* BRB thresholds for E3B0 2 port mode*/ | ||
192 | #define PFC_E3B0_2P_BRB_MAC_PAUSE_XOFF_THR_PAUSE 1025 | ||
193 | #define PFC_E3B0_2P_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE 0 | ||
194 | |||
195 | #define PFC_E3B0_2P_BRB_MAC_PAUSE_XON_THR_PAUSE 1025 | ||
196 | #define PFC_E3B0_2P_BRB_MAC_PAUSE_XON_THR_NON_PAUSE 0 | ||
197 | |||
198 | #define PFC_E3B0_2P_BRB_MAC_FULL_XOFF_THR_PAUSE 10 | ||
199 | #define PFC_E3B0_2P_BRB_MAC_FULL_XOFF_THR_NON_PAUSE 1025 | ||
200 | |||
201 | #define PFC_E3B0_2P_BRB_MAC_FULL_XON_THR_PAUSE 50 | ||
202 | #define PFC_E3B0_2P_BRB_MAC_FULL_XON_THR_NON_PAUSE 1025 | ||
203 | |||
204 | /* only for E3B0*/ | ||
205 | #define PFC_E3B0_2P_BRB_FULL_LB_XOFF_THR 1025 | ||
206 | #define PFC_E3B0_2P_BRB_FULL_LB_XON_THR 1025 | ||
207 | |||
208 | /* Lossy +Lossless GUARANTIED == GUART */ | ||
209 | #define PFC_E3B0_2P_MIX_PAUSE_LB_GUART 284 | ||
210 | /* Lossless +Lossless*/ | ||
211 | #define PFC_E3B0_2P_PAUSE_LB_GUART 236 | ||
212 | /* Lossy +Lossy*/ | ||
213 | #define PFC_E3B0_2P_NON_PAUSE_LB_GUART 342 | ||
214 | |||
215 | /* Lossy +Lossless*/ | ||
216 | #define PFC_E3B0_2P_MIX_PAUSE_MAC_0_CLASS_T_GUART 284 | ||
217 | /* Lossless +Lossless*/ | ||
218 | #define PFC_E3B0_2P_PAUSE_MAC_0_CLASS_T_GUART 236 | ||
219 | /* Lossy +Lossy*/ | ||
220 | #define PFC_E3B0_2P_NON_PAUSE_MAC_0_CLASS_T_GUART 336 | ||
221 | #define PFC_E3B0_2P_BRB_MAC_0_CLASS_T_GUART_HYST 80 | ||
222 | |||
223 | #define PFC_E3B0_2P_BRB_MAC_1_CLASS_T_GUART 0 | ||
224 | #define PFC_E3B0_2P_BRB_MAC_1_CLASS_T_GUART_HYST 0 | ||
225 | |||
226 | /* BRB thresholds for E3B0 4 port mode */ | ||
227 | #define PFC_E3B0_4P_BRB_MAC_PAUSE_XOFF_THR_PAUSE 304 | ||
228 | #define PFC_E3B0_4P_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE 0 | ||
229 | |||
230 | #define PFC_E3B0_4P_BRB_MAC_PAUSE_XON_THR_PAUSE 384 | ||
231 | #define PFC_E3B0_4P_BRB_MAC_PAUSE_XON_THR_NON_PAUSE 0 | ||
232 | |||
233 | #define PFC_E3B0_4P_BRB_MAC_FULL_XOFF_THR_PAUSE 10 | ||
234 | #define PFC_E3B0_4P_BRB_MAC_FULL_XOFF_THR_NON_PAUSE 304 | ||
235 | |||
236 | #define PFC_E3B0_4P_BRB_MAC_FULL_XON_THR_PAUSE 50 | ||
237 | #define PFC_E3B0_4P_BRB_MAC_FULL_XON_THR_NON_PAUSE 384 | ||
238 | |||
239 | |||
240 | /* only for E3B0*/ | ||
241 | #define PFC_E3B0_4P_BRB_FULL_LB_XOFF_THR 304 | ||
242 | #define PFC_E3B0_4P_BRB_FULL_LB_XON_THR 384 | ||
243 | #define PFC_E3B0_4P_LB_GUART 120 | ||
244 | |||
245 | #define PFC_E3B0_4P_BRB_MAC_0_CLASS_T_GUART 120 | ||
246 | #define PFC_E3B0_4P_BRB_MAC_0_CLASS_T_GUART_HYST 80 | ||
247 | |||
248 | #define PFC_E3B0_4P_BRB_MAC_1_CLASS_T_GUART 80 | ||
249 | #define PFC_E3B0_4P_BRB_MAC_1_CLASS_T_GUART_HYST 120 | ||
250 | |||
251 | #define DCBX_INVALID_COS (0xFF) | ||
252 | |||
168 | #define ETS_BW_LIMIT_CREDIT_UPPER_BOUND (0x5000) | 253 | #define ETS_BW_LIMIT_CREDIT_UPPER_BOUND (0x5000) |
169 | #define ETS_BW_LIMIT_CREDIT_WEIGHT (0x5000) | 254 | #define ETS_BW_LIMIT_CREDIT_WEIGHT (0x5000) |
255 | #define ETS_E3B0_NIG_MIN_W_VAL_UP_TO_10GBPS (1360) | ||
256 | #define ETS_E3B0_NIG_MIN_W_VAL_20GBPS (2720) | ||
257 | #define ETS_E3B0_PBF_MIN_W_VAL (10000) | ||
258 | |||
259 | #define MAX_PACKET_SIZE (9700) | ||
260 | #define WC_UC_TIMEOUT 100 | ||
261 | |||
170 | /**********************************************************/ | 262 | /**********************************************************/ |
171 | /* INTERFACE */ | 263 | /* INTERFACE */ |
172 | /**********************************************************/ | 264 | /**********************************************************/ |
@@ -202,14 +294,86 @@ static u32 bnx2x_bits_dis(struct bnx2x *bp, u32 reg, u32 bits) | |||
202 | } | 294 | } |
203 | 295 | ||
204 | /******************************************************************/ | 296 | /******************************************************************/ |
297 | /* EPIO/GPIO section */ | ||
298 | /******************************************************************/ | ||
299 | static void bnx2x_get_epio(struct bnx2x *bp, u32 epio_pin, u32 *en) | ||
300 | { | ||
301 | u32 epio_mask, gp_oenable; | ||
302 | *en = 0; | ||
303 | /* Sanity check */ | ||
304 | if (epio_pin > 31) { | ||
305 | DP(NETIF_MSG_LINK, "Invalid EPIO pin %d to get\n", epio_pin); | ||
306 | return; | ||
307 | } | ||
308 | |||
309 | epio_mask = 1 << epio_pin; | ||
310 | /* Set this EPIO to output */ | ||
311 | gp_oenable = REG_RD(bp, MCP_REG_MCPR_GP_OENABLE); | ||
312 | REG_WR(bp, MCP_REG_MCPR_GP_OENABLE, gp_oenable & ~epio_mask); | ||
313 | |||
314 | *en = (REG_RD(bp, MCP_REG_MCPR_GP_INPUTS) & epio_mask) >> epio_pin; | ||
315 | } | ||
316 | static void bnx2x_set_epio(struct bnx2x *bp, u32 epio_pin, u32 en) | ||
317 | { | ||
318 | u32 epio_mask, gp_output, gp_oenable; | ||
319 | |||
320 | /* Sanity check */ | ||
321 | if (epio_pin > 31) { | ||
322 | DP(NETIF_MSG_LINK, "Invalid EPIO pin %d to set\n", epio_pin); | ||
323 | return; | ||
324 | } | ||
325 | DP(NETIF_MSG_LINK, "Setting EPIO pin %d to %d\n", epio_pin, en); | ||
326 | epio_mask = 1 << epio_pin; | ||
327 | /* Set this EPIO to output */ | ||
328 | gp_output = REG_RD(bp, MCP_REG_MCPR_GP_OUTPUTS); | ||
329 | if (en) | ||
330 | gp_output |= epio_mask; | ||
331 | else | ||
332 | gp_output &= ~epio_mask; | ||
333 | |||
334 | REG_WR(bp, MCP_REG_MCPR_GP_OUTPUTS, gp_output); | ||
335 | |||
336 | /* Set the value for this EPIO */ | ||
337 | gp_oenable = REG_RD(bp, MCP_REG_MCPR_GP_OENABLE); | ||
338 | REG_WR(bp, MCP_REG_MCPR_GP_OENABLE, gp_oenable | epio_mask); | ||
339 | } | ||
340 | |||
341 | static void bnx2x_set_cfg_pin(struct bnx2x *bp, u32 pin_cfg, u32 val) | ||
342 | { | ||
343 | if (pin_cfg == PIN_CFG_NA) | ||
344 | return; | ||
345 | if (pin_cfg >= PIN_CFG_EPIO0) { | ||
346 | bnx2x_set_epio(bp, pin_cfg - PIN_CFG_EPIO0, val); | ||
347 | } else { | ||
348 | u8 gpio_num = (pin_cfg - PIN_CFG_GPIO0_P0) & 0x3; | ||
349 | u8 gpio_port = (pin_cfg - PIN_CFG_GPIO0_P0) >> 2; | ||
350 | bnx2x_set_gpio(bp, gpio_num, (u8)val, gpio_port); | ||
351 | } | ||
352 | } | ||
353 | |||
354 | static u32 bnx2x_get_cfg_pin(struct bnx2x *bp, u32 pin_cfg, u32 *val) | ||
355 | { | ||
356 | if (pin_cfg == PIN_CFG_NA) | ||
357 | return -EINVAL; | ||
358 | if (pin_cfg >= PIN_CFG_EPIO0) { | ||
359 | bnx2x_get_epio(bp, pin_cfg - PIN_CFG_EPIO0, val); | ||
360 | } else { | ||
361 | u8 gpio_num = (pin_cfg - PIN_CFG_GPIO0_P0) & 0x3; | ||
362 | u8 gpio_port = (pin_cfg - PIN_CFG_GPIO0_P0) >> 2; | ||
363 | *val = bnx2x_get_gpio(bp, gpio_num, gpio_port); | ||
364 | } | ||
365 | return 0; | ||
366 | |||
367 | } | ||
368 | /******************************************************************/ | ||
205 | /* ETS section */ | 369 | /* ETS section */ |
206 | /******************************************************************/ | 370 | /******************************************************************/ |
207 | void bnx2x_ets_disabled(struct link_params *params) | 371 | static void bnx2x_ets_e2e3a0_disabled(struct link_params *params) |
208 | { | 372 | { |
209 | /* ETS disabled configuration*/ | 373 | /* ETS disabled configuration*/ |
210 | struct bnx2x *bp = params->bp; | 374 | struct bnx2x *bp = params->bp; |
211 | 375 | ||
212 | DP(NETIF_MSG_LINK, "ETS disabled configuration\n"); | 376 | DP(NETIF_MSG_LINK, "ETS E2E3 disabled configuration\n"); |
213 | 377 | ||
214 | /* | 378 | /* |
215 | * mapping between entry priority to client number (0,1,2 -debug and | 379 | * mapping between entry priority to client number (0,1,2 -debug and |
@@ -262,7 +426,756 @@ void bnx2x_ets_disabled(struct link_params *params) | |||
262 | /* Defines the number of consecutive slots for the strict priority */ | 426 | /* Defines the number of consecutive slots for the strict priority */ |
263 | REG_WR(bp, PBF_REG_NUM_STRICT_ARB_SLOTS, 0); | 427 | REG_WR(bp, PBF_REG_NUM_STRICT_ARB_SLOTS, 0); |
264 | } | 428 | } |
429 | /****************************************************************************** | ||
430 | * Description: | ||
431 | * Getting min_w_val will be set according to line speed . | ||
432 | *. | ||
433 | ******************************************************************************/ | ||
434 | static u32 bnx2x_ets_get_min_w_val_nig(const struct link_vars *vars) | ||
435 | { | ||
436 | u32 min_w_val = 0; | ||
437 | /* Calculate min_w_val.*/ | ||
438 | if (vars->link_up) { | ||
439 | if (SPEED_20000 == vars->line_speed) | ||
440 | min_w_val = ETS_E3B0_NIG_MIN_W_VAL_20GBPS; | ||
441 | else | ||
442 | min_w_val = ETS_E3B0_NIG_MIN_W_VAL_UP_TO_10GBPS; | ||
443 | } else | ||
444 | min_w_val = ETS_E3B0_NIG_MIN_W_VAL_20GBPS; | ||
445 | /** | ||
446 | * If the link isn't up (static configuration for example ) The | ||
447 | * link will be according to 20GBPS. | ||
448 | */ | ||
449 | return min_w_val; | ||
450 | } | ||
451 | /****************************************************************************** | ||
452 | * Description: | ||
453 | * Getting credit upper bound form min_w_val. | ||
454 | *. | ||
455 | ******************************************************************************/ | ||
456 | static u32 bnx2x_ets_get_credit_upper_bound(const u32 min_w_val) | ||
457 | { | ||
458 | const u32 credit_upper_bound = (u32)MAXVAL((150 * min_w_val), | ||
459 | MAX_PACKET_SIZE); | ||
460 | return credit_upper_bound; | ||
461 | } | ||
462 | /****************************************************************************** | ||
463 | * Description: | ||
464 | * Set credit upper bound for NIG. | ||
465 | *. | ||
466 | ******************************************************************************/ | ||
467 | static void bnx2x_ets_e3b0_set_credit_upper_bound_nig( | ||
468 | const struct link_params *params, | ||
469 | const u32 min_w_val) | ||
470 | { | ||
471 | struct bnx2x *bp = params->bp; | ||
472 | const u8 port = params->port; | ||
473 | const u32 credit_upper_bound = | ||
474 | bnx2x_ets_get_credit_upper_bound(min_w_val); | ||
475 | |||
476 | REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_0 : | ||
477 | NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_0, credit_upper_bound); | ||
478 | REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_1 : | ||
479 | NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_1, credit_upper_bound); | ||
480 | REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_2 : | ||
481 | NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_2, credit_upper_bound); | ||
482 | REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_3 : | ||
483 | NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_3, credit_upper_bound); | ||
484 | REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_4 : | ||
485 | NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_4, credit_upper_bound); | ||
486 | REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_5 : | ||
487 | NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_5, credit_upper_bound); | ||
488 | |||
489 | if (0 == port) { | ||
490 | REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_6, | ||
491 | credit_upper_bound); | ||
492 | REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_7, | ||
493 | credit_upper_bound); | ||
494 | REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_8, | ||
495 | credit_upper_bound); | ||
496 | } | ||
497 | } | ||
498 | /****************************************************************************** | ||
499 | * Description: | ||
500 | * Will return the NIG ETS registers to init values.Except | ||
501 | * credit_upper_bound. | ||
502 | * That isn't used in this configuration (No WFQ is enabled) and will be | ||
503 | * configured acording to spec | ||
504 | *. | ||
505 | ******************************************************************************/ | ||
506 | static void bnx2x_ets_e3b0_nig_disabled(const struct link_params *params, | ||
507 | const struct link_vars *vars) | ||
508 | { | ||
509 | struct bnx2x *bp = params->bp; | ||
510 | const u8 port = params->port; | ||
511 | const u32 min_w_val = bnx2x_ets_get_min_w_val_nig(vars); | ||
512 | /** | ||
513 | * mapping between entry priority to client number (0,1,2 -debug and | ||
514 | * management clients, 3 - COS0 client, 4 - COS1, ... 8 - | ||
515 | * COS5)(HIGHEST) 4bits client num.TODO_ETS - Should be done by | ||
516 | * reset value or init tool | ||
517 | */ | ||
518 | if (port) { | ||
519 | REG_WR(bp, NIG_REG_P1_TX_ARB_PRIORITY_CLIENT2_LSB, 0x543210); | ||
520 | REG_WR(bp, NIG_REG_P1_TX_ARB_PRIORITY_CLIENT2_MSB, 0x0); | ||
521 | } else { | ||
522 | REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_LSB, 0x76543210); | ||
523 | REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_MSB, 0x8); | ||
524 | } | ||
525 | /** | ||
526 | * For strict priority entries defines the number of consecutive | ||
527 | * slots for the highest priority. | ||
528 | */ | ||
529 | /* TODO_ETS - Should be done by reset value or init tool */ | ||
530 | REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_NUM_STRICT_ARB_SLOTS : | ||
531 | NIG_REG_P1_TX_ARB_NUM_STRICT_ARB_SLOTS, 0x100); | ||
532 | /** | ||
533 | * mapping between the CREDIT_WEIGHT registers and actual client | ||
534 | * numbers | ||
535 | */ | ||
536 | /* TODO_ETS - Should be done by reset value or init tool */ | ||
537 | if (port) { | ||
538 | /*Port 1 has 6 COS*/ | ||
539 | REG_WR(bp, NIG_REG_P1_TX_ARB_CLIENT_CREDIT_MAP2_LSB, 0x210543); | ||
540 | REG_WR(bp, NIG_REG_P1_TX_ARB_CLIENT_CREDIT_MAP2_MSB, 0x0); | ||
541 | } else { | ||
542 | /*Port 0 has 9 COS*/ | ||
543 | REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP2_LSB, | ||
544 | 0x43210876); | ||
545 | REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP2_MSB, 0x5); | ||
546 | } | ||
547 | |||
548 | /** | ||
549 | * Bitmap of 5bits length. Each bit specifies whether the entry behaves | ||
550 | * as strict. Bits 0,1,2 - debug and management entries, 3 - | ||
551 | * COS0 entry, 4 - COS1 entry. | ||
552 | * COS1 | COS0 | DEBUG1 | DEBUG0 | MGMT | ||
553 | * bit4 bit3 bit2 bit1 bit0 | ||
554 | * MCP and debug are strict | ||
555 | */ | ||
556 | if (port) | ||
557 | REG_WR(bp, NIG_REG_P1_TX_ARB_CLIENT_IS_STRICT, 0x3f); | ||
558 | else | ||
559 | REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, 0x1ff); | ||
560 | /* defines which entries (clients) are subjected to WFQ arbitration */ | ||
561 | REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CLIENT_IS_SUBJECT2WFQ : | ||
562 | NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ, 0); | ||
563 | |||
564 | /** | ||
565 | * Please notice the register address are note continuous and a | ||
566 | * for here is note appropriate.In 2 port mode port0 only COS0-5 | ||
567 | * can be used. DEBUG1,DEBUG1,MGMT are never used for WFQ* In 4 | ||
568 | * port mode port1 only COS0-2 can be used. DEBUG1,DEBUG1,MGMT | ||
569 | * are never used for WFQ | ||
570 | */ | ||
571 | REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_0 : | ||
572 | NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_0, 0x0); | ||
573 | REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_1 : | ||
574 | NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_1, 0x0); | ||
575 | REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_2 : | ||
576 | NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_2, 0x0); | ||
577 | REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_3 : | ||
578 | NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_3, 0x0); | ||
579 | REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_4 : | ||
580 | NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_4, 0x0); | ||
581 | REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_5 : | ||
582 | NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_5, 0x0); | ||
583 | if (0 == port) { | ||
584 | REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_6, 0x0); | ||
585 | REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_7, 0x0); | ||
586 | REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_8, 0x0); | ||
587 | } | ||
588 | |||
589 | bnx2x_ets_e3b0_set_credit_upper_bound_nig(params, min_w_val); | ||
590 | } | ||
591 | /****************************************************************************** | ||
592 | * Description: | ||
593 | * Set credit upper bound for PBF. | ||
594 | *. | ||
595 | ******************************************************************************/ | ||
596 | static void bnx2x_ets_e3b0_set_credit_upper_bound_pbf( | ||
597 | const struct link_params *params, | ||
598 | const u32 min_w_val) | ||
599 | { | ||
600 | struct bnx2x *bp = params->bp; | ||
601 | const u32 credit_upper_bound = | ||
602 | bnx2x_ets_get_credit_upper_bound(min_w_val); | ||
603 | const u8 port = params->port; | ||
604 | u32 base_upper_bound = 0; | ||
605 | u8 max_cos = 0; | ||
606 | u8 i = 0; | ||
607 | /** | ||
608 | * In 2 port mode port0 has COS0-5 that can be used for WFQ.In 4 | ||
609 | * port mode port1 has COS0-2 that can be used for WFQ. | ||
610 | */ | ||
611 | if (0 == port) { | ||
612 | base_upper_bound = PBF_REG_COS0_UPPER_BOUND_P0; | ||
613 | max_cos = DCBX_E3B0_MAX_NUM_COS_PORT0; | ||
614 | } else { | ||
615 | base_upper_bound = PBF_REG_COS0_UPPER_BOUND_P1; | ||
616 | max_cos = DCBX_E3B0_MAX_NUM_COS_PORT1; | ||
617 | } | ||
618 | |||
619 | for (i = 0; i < max_cos; i++) | ||
620 | REG_WR(bp, base_upper_bound + (i << 2), credit_upper_bound); | ||
621 | } | ||
622 | |||
623 | /****************************************************************************** | ||
624 | * Description: | ||
625 | * Will return the PBF ETS registers to init values.Except | ||
626 | * credit_upper_bound. | ||
627 | * That isn't used in this configuration (No WFQ is enabled) and will be | ||
628 | * configured acording to spec | ||
629 | *. | ||
630 | ******************************************************************************/ | ||
631 | static void bnx2x_ets_e3b0_pbf_disabled(const struct link_params *params) | ||
632 | { | ||
633 | struct bnx2x *bp = params->bp; | ||
634 | const u8 port = params->port; | ||
635 | const u32 min_w_val_pbf = ETS_E3B0_PBF_MIN_W_VAL; | ||
636 | u8 i = 0; | ||
637 | u32 base_weight = 0; | ||
638 | u8 max_cos = 0; | ||
639 | |||
640 | /** | ||
641 | * mapping between entry priority to client number 0 - COS0 | ||
642 | * client, 2 - COS1, ... 5 - COS5)(HIGHEST) 4bits client num. | ||
643 | * TODO_ETS - Should be done by reset value or init tool | ||
644 | */ | ||
645 | if (port) | ||
646 | /* 0x688 (|011|0 10|00 1|000) */ | ||
647 | REG_WR(bp, PBF_REG_ETS_ARB_PRIORITY_CLIENT_P1 , 0x688); | ||
648 | else | ||
649 | /* (10 1|100 |011|0 10|00 1|000) */ | ||
650 | REG_WR(bp, PBF_REG_ETS_ARB_PRIORITY_CLIENT_P0 , 0x2C688); | ||
651 | |||
652 | /* TODO_ETS - Should be done by reset value or init tool */ | ||
653 | if (port) | ||
654 | /* 0x688 (|011|0 10|00 1|000)*/ | ||
655 | REG_WR(bp, PBF_REG_ETS_ARB_CLIENT_CREDIT_MAP_P1, 0x688); | ||
656 | else | ||
657 | /* 0x2C688 (10 1|100 |011|0 10|00 1|000) */ | ||
658 | REG_WR(bp, PBF_REG_ETS_ARB_CLIENT_CREDIT_MAP_P0, 0x2C688); | ||
659 | |||
660 | REG_WR(bp, (port) ? PBF_REG_ETS_ARB_NUM_STRICT_ARB_SLOTS_P1 : | ||
661 | PBF_REG_ETS_ARB_NUM_STRICT_ARB_SLOTS_P0 , 0x100); | ||
662 | |||
663 | |||
664 | REG_WR(bp, (port) ? PBF_REG_ETS_ARB_CLIENT_IS_STRICT_P1 : | ||
665 | PBF_REG_ETS_ARB_CLIENT_IS_STRICT_P0 , 0); | ||
666 | |||
667 | REG_WR(bp, (port) ? PBF_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ_P1 : | ||
668 | PBF_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ_P0 , 0); | ||
669 | /** | ||
670 | * In 2 port mode port0 has COS0-5 that can be used for WFQ. | ||
671 | * In 4 port mode port1 has COS0-2 that can be used for WFQ. | ||
672 | */ | ||
673 | if (0 == port) { | ||
674 | base_weight = PBF_REG_COS0_WEIGHT_P0; | ||
675 | max_cos = DCBX_E3B0_MAX_NUM_COS_PORT0; | ||
676 | } else { | ||
677 | base_weight = PBF_REG_COS0_WEIGHT_P1; | ||
678 | max_cos = DCBX_E3B0_MAX_NUM_COS_PORT1; | ||
679 | } | ||
680 | |||
681 | for (i = 0; i < max_cos; i++) | ||
682 | REG_WR(bp, base_weight + (0x4 * i), 0); | ||
683 | |||
684 | bnx2x_ets_e3b0_set_credit_upper_bound_pbf(params, min_w_val_pbf); | ||
685 | } | ||
686 | /****************************************************************************** | ||
687 | * Description: | ||
688 | * E3B0 disable will return basicly the values to init values. | ||
689 | *. | ||
690 | ******************************************************************************/ | ||
691 | static int bnx2x_ets_e3b0_disabled(const struct link_params *params, | ||
692 | const struct link_vars *vars) | ||
693 | { | ||
694 | struct bnx2x *bp = params->bp; | ||
695 | |||
696 | if (!CHIP_IS_E3B0(bp)) { | ||
697 | DP(NETIF_MSG_LINK, "bnx2x_ets_e3b0_disabled the chip isn't E3B0" | ||
698 | "\n"); | ||
699 | return -EINVAL; | ||
700 | } | ||
701 | |||
702 | bnx2x_ets_e3b0_nig_disabled(params, vars); | ||
703 | |||
704 | bnx2x_ets_e3b0_pbf_disabled(params); | ||
705 | |||
706 | return 0; | ||
707 | } | ||
265 | 708 | ||
709 | /****************************************************************************** | ||
710 | * Description: | ||
711 | * Disable will return basicly the values to init values. | ||
712 | *. | ||
713 | ******************************************************************************/ | ||
714 | int bnx2x_ets_disabled(struct link_params *params, | ||
715 | struct link_vars *vars) | ||
716 | { | ||
717 | struct bnx2x *bp = params->bp; | ||
718 | int bnx2x_status = 0; | ||
719 | |||
720 | if ((CHIP_IS_E2(bp)) || (CHIP_IS_E3A0(bp))) | ||
721 | bnx2x_ets_e2e3a0_disabled(params); | ||
722 | else if (CHIP_IS_E3B0(bp)) | ||
723 | bnx2x_status = bnx2x_ets_e3b0_disabled(params, vars); | ||
724 | else { | ||
725 | DP(NETIF_MSG_LINK, "bnx2x_ets_disabled - chip not supported\n"); | ||
726 | return -EINVAL; | ||
727 | } | ||
728 | |||
729 | return bnx2x_status; | ||
730 | } | ||
731 | |||
732 | /****************************************************************************** | ||
733 | * Description | ||
734 | * Set the COS mappimg to SP and BW until this point all the COS are not | ||
735 | * set as SP or BW. | ||
736 | ******************************************************************************/ | ||
737 | static int bnx2x_ets_e3b0_cli_map(const struct link_params *params, | ||
738 | const struct bnx2x_ets_params *ets_params, | ||
739 | const u8 cos_sp_bitmap, | ||
740 | const u8 cos_bw_bitmap) | ||
741 | { | ||
742 | struct bnx2x *bp = params->bp; | ||
743 | const u8 port = params->port; | ||
744 | const u8 nig_cli_sp_bitmap = 0x7 | (cos_sp_bitmap << 3); | ||
745 | const u8 pbf_cli_sp_bitmap = cos_sp_bitmap; | ||
746 | const u8 nig_cli_subject2wfq_bitmap = cos_bw_bitmap << 3; | ||
747 | const u8 pbf_cli_subject2wfq_bitmap = cos_bw_bitmap; | ||
748 | |||
749 | REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CLIENT_IS_STRICT : | ||
750 | NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, nig_cli_sp_bitmap); | ||
751 | |||
752 | REG_WR(bp, (port) ? PBF_REG_ETS_ARB_CLIENT_IS_STRICT_P1 : | ||
753 | PBF_REG_ETS_ARB_CLIENT_IS_STRICT_P0 , pbf_cli_sp_bitmap); | ||
754 | |||
755 | REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CLIENT_IS_SUBJECT2WFQ : | ||
756 | NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ, | ||
757 | nig_cli_subject2wfq_bitmap); | ||
758 | |||
759 | REG_WR(bp, (port) ? PBF_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ_P1 : | ||
760 | PBF_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ_P0, | ||
761 | pbf_cli_subject2wfq_bitmap); | ||
762 | |||
763 | return 0; | ||
764 | } | ||
765 | |||
766 | /****************************************************************************** | ||
767 | * Description: | ||
768 | * This function is needed because NIG ARB_CREDIT_WEIGHT_X are | ||
769 | * not continues and ARB_CREDIT_WEIGHT_0 + offset is suitable. | ||
770 | ******************************************************************************/ | ||
771 | static int bnx2x_ets_e3b0_set_cos_bw(struct bnx2x *bp, | ||
772 | const u8 cos_entry, | ||
773 | const u32 min_w_val_nig, | ||
774 | const u32 min_w_val_pbf, | ||
775 | const u16 total_bw, | ||
776 | const u8 bw, | ||
777 | const u8 port) | ||
778 | { | ||
779 | u32 nig_reg_adress_crd_weight = 0; | ||
780 | u32 pbf_reg_adress_crd_weight = 0; | ||
781 | /* Calculate and set BW for this COS*/ | ||
782 | const u32 cos_bw_nig = (bw * min_w_val_nig) / total_bw; | ||
783 | const u32 cos_bw_pbf = (bw * min_w_val_pbf) / total_bw; | ||
784 | |||
785 | switch (cos_entry) { | ||
786 | case 0: | ||
787 | nig_reg_adress_crd_weight = | ||
788 | (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_0 : | ||
789 | NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_0; | ||
790 | pbf_reg_adress_crd_weight = (port) ? | ||
791 | PBF_REG_COS0_WEIGHT_P1 : PBF_REG_COS0_WEIGHT_P0; | ||
792 | break; | ||
793 | case 1: | ||
794 | nig_reg_adress_crd_weight = (port) ? | ||
795 | NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_1 : | ||
796 | NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_1; | ||
797 | pbf_reg_adress_crd_weight = (port) ? | ||
798 | PBF_REG_COS1_WEIGHT_P1 : PBF_REG_COS1_WEIGHT_P0; | ||
799 | break; | ||
800 | case 2: | ||
801 | nig_reg_adress_crd_weight = (port) ? | ||
802 | NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_2 : | ||
803 | NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_2; | ||
804 | |||
805 | pbf_reg_adress_crd_weight = (port) ? | ||
806 | PBF_REG_COS2_WEIGHT_P1 : PBF_REG_COS2_WEIGHT_P0; | ||
807 | break; | ||
808 | case 3: | ||
809 | if (port) | ||
810 | return -EINVAL; | ||
811 | nig_reg_adress_crd_weight = | ||
812 | NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_3; | ||
813 | pbf_reg_adress_crd_weight = | ||
814 | PBF_REG_COS3_WEIGHT_P0; | ||
815 | break; | ||
816 | case 4: | ||
817 | if (port) | ||
818 | return -EINVAL; | ||
819 | nig_reg_adress_crd_weight = | ||
820 | NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_4; | ||
821 | pbf_reg_adress_crd_weight = PBF_REG_COS4_WEIGHT_P0; | ||
822 | break; | ||
823 | case 5: | ||
824 | if (port) | ||
825 | return -EINVAL; | ||
826 | nig_reg_adress_crd_weight = | ||
827 | NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_5; | ||
828 | pbf_reg_adress_crd_weight = PBF_REG_COS5_WEIGHT_P0; | ||
829 | break; | ||
830 | } | ||
831 | |||
832 | REG_WR(bp, nig_reg_adress_crd_weight, cos_bw_nig); | ||
833 | |||
834 | REG_WR(bp, pbf_reg_adress_crd_weight, cos_bw_pbf); | ||
835 | |||
836 | return 0; | ||
837 | } | ||
838 | /****************************************************************************** | ||
839 | * Description: | ||
840 | * Calculate the total BW.A value of 0 isn't legal. | ||
841 | *. | ||
842 | ******************************************************************************/ | ||
843 | static int bnx2x_ets_e3b0_get_total_bw( | ||
844 | const struct link_params *params, | ||
845 | const struct bnx2x_ets_params *ets_params, | ||
846 | u16 *total_bw) | ||
847 | { | ||
848 | struct bnx2x *bp = params->bp; | ||
849 | u8 cos_idx = 0; | ||
850 | |||
851 | *total_bw = 0 ; | ||
852 | /* Calculate total BW requested */ | ||
853 | for (cos_idx = 0; cos_idx < ets_params->num_of_cos; cos_idx++) { | ||
854 | if (bnx2x_cos_state_bw == ets_params->cos[cos_idx].state) { | ||
855 | |||
856 | if (0 == ets_params->cos[cos_idx].params.bw_params.bw) { | ||
857 | DP(NETIF_MSG_LINK, "bnx2x_ets_E3B0_config BW" | ||
858 | "was set to 0\n"); | ||
859 | return -EINVAL; | ||
860 | } | ||
861 | *total_bw += | ||
862 | ets_params->cos[cos_idx].params.bw_params.bw; | ||
863 | } | ||
864 | } | ||
865 | |||
866 | /*Check taotl BW is valid */ | ||
867 | if ((100 != *total_bw) || (0 == *total_bw)) { | ||
868 | if (0 == *total_bw) { | ||
869 | DP(NETIF_MSG_LINK, "bnx2x_ets_E3B0_config toatl BW" | ||
870 | "shouldn't be 0\n"); | ||
871 | return -EINVAL; | ||
872 | } | ||
873 | DP(NETIF_MSG_LINK, "bnx2x_ets_E3B0_config toatl BW should be" | ||
874 | "100\n"); | ||
875 | /** | ||
876 | * We can handle a case whre the BW isn't 100 this can happen | ||
877 | * if the TC are joined. | ||
878 | */ | ||
879 | } | ||
880 | return 0; | ||
881 | } | ||
882 | |||
883 | /****************************************************************************** | ||
884 | * Description: | ||
885 | * Invalidate all the sp_pri_to_cos. | ||
886 | *. | ||
887 | ******************************************************************************/ | ||
888 | static void bnx2x_ets_e3b0_sp_pri_to_cos_init(u8 *sp_pri_to_cos) | ||
889 | { | ||
890 | u8 pri = 0; | ||
891 | for (pri = 0; pri < DCBX_MAX_NUM_COS; pri++) | ||
892 | sp_pri_to_cos[pri] = DCBX_INVALID_COS; | ||
893 | } | ||
894 | /****************************************************************************** | ||
895 | * Description: | ||
896 | * Calculate and set the SP (ARB_PRIORITY_CLIENT) NIG and PBF registers | ||
897 | * according to sp_pri_to_cos. | ||
898 | *. | ||
899 | ******************************************************************************/ | ||
900 | static int bnx2x_ets_e3b0_sp_pri_to_cos_set(const struct link_params *params, | ||
901 | u8 *sp_pri_to_cos, const u8 pri, | ||
902 | const u8 cos_entry) | ||
903 | { | ||
904 | struct bnx2x *bp = params->bp; | ||
905 | const u8 port = params->port; | ||
906 | const u8 max_num_of_cos = (port) ? DCBX_E3B0_MAX_NUM_COS_PORT1 : | ||
907 | DCBX_E3B0_MAX_NUM_COS_PORT0; | ||
908 | |||
909 | if (DCBX_INVALID_COS != sp_pri_to_cos[pri]) { | ||
910 | DP(NETIF_MSG_LINK, "bnx2x_ets_e3b0_sp_pri_to_cos_set invalid " | ||
911 | "parameter There can't be two COS's with" | ||
912 | "the same strict pri\n"); | ||
913 | return -EINVAL; | ||
914 | } | ||
915 | |||
916 | if (pri > max_num_of_cos) { | ||
917 | DP(NETIF_MSG_LINK, "bnx2x_ets_e3b0_sp_pri_to_cos_set invalid" | ||
918 | "parameter Illegal strict priority\n"); | ||
919 | return -EINVAL; | ||
920 | } | ||
921 | |||
922 | sp_pri_to_cos[pri] = cos_entry; | ||
923 | return 0; | ||
924 | |||
925 | } | ||
926 | |||
927 | /****************************************************************************** | ||
928 | * Description: | ||
929 | * Returns the correct value according to COS and priority in | ||
930 | * the sp_pri_cli register. | ||
931 | *. | ||
932 | ******************************************************************************/ | ||
933 | static u64 bnx2x_e3b0_sp_get_pri_cli_reg(const u8 cos, const u8 cos_offset, | ||
934 | const u8 pri_set, | ||
935 | const u8 pri_offset, | ||
936 | const u8 entry_size) | ||
937 | { | ||
938 | u64 pri_cli_nig = 0; | ||
939 | pri_cli_nig = ((u64)(cos + cos_offset)) << (entry_size * | ||
940 | (pri_set + pri_offset)); | ||
941 | |||
942 | return pri_cli_nig; | ||
943 | } | ||
944 | /****************************************************************************** | ||
945 | * Description: | ||
946 | * Returns the correct value according to COS and priority in the | ||
947 | * sp_pri_cli register for NIG. | ||
948 | *. | ||
949 | ******************************************************************************/ | ||
950 | static u64 bnx2x_e3b0_sp_get_pri_cli_reg_nig(const u8 cos, const u8 pri_set) | ||
951 | { | ||
952 | /* MCP Dbg0 and dbg1 are always with higher strict pri*/ | ||
953 | const u8 nig_cos_offset = 3; | ||
954 | const u8 nig_pri_offset = 3; | ||
955 | |||
956 | return bnx2x_e3b0_sp_get_pri_cli_reg(cos, nig_cos_offset, pri_set, | ||
957 | nig_pri_offset, 4); | ||
958 | |||
959 | } | ||
960 | /****************************************************************************** | ||
961 | * Description: | ||
962 | * Returns the correct value according to COS and priority in the | ||
963 | * sp_pri_cli register for PBF. | ||
964 | *. | ||
965 | ******************************************************************************/ | ||
966 | static u64 bnx2x_e3b0_sp_get_pri_cli_reg_pbf(const u8 cos, const u8 pri_set) | ||
967 | { | ||
968 | const u8 pbf_cos_offset = 0; | ||
969 | const u8 pbf_pri_offset = 0; | ||
970 | |||
971 | return bnx2x_e3b0_sp_get_pri_cli_reg(cos, pbf_cos_offset, pri_set, | ||
972 | pbf_pri_offset, 3); | ||
973 | |||
974 | } | ||
975 | |||
976 | /****************************************************************************** | ||
977 | * Description: | ||
978 | * Calculate and set the SP (ARB_PRIORITY_CLIENT) NIG and PBF registers | ||
979 | * according to sp_pri_to_cos.(which COS has higher priority) | ||
980 | *. | ||
981 | ******************************************************************************/ | ||
982 | static int bnx2x_ets_e3b0_sp_set_pri_cli_reg(const struct link_params *params, | ||
983 | u8 *sp_pri_to_cos) | ||
984 | { | ||
985 | struct bnx2x *bp = params->bp; | ||
986 | u8 i = 0; | ||
987 | const u8 port = params->port; | ||
988 | /* MCP Dbg0 and dbg1 are always with higher strict pri*/ | ||
989 | u64 pri_cli_nig = 0x210; | ||
990 | u32 pri_cli_pbf = 0x0; | ||
991 | u8 pri_set = 0; | ||
992 | u8 pri_bitmask = 0; | ||
993 | const u8 max_num_of_cos = (port) ? DCBX_E3B0_MAX_NUM_COS_PORT1 : | ||
994 | DCBX_E3B0_MAX_NUM_COS_PORT0; | ||
995 | |||
996 | u8 cos_bit_to_set = (1 << max_num_of_cos) - 1; | ||
997 | |||
998 | /* Set all the strict priority first */ | ||
999 | for (i = 0; i < max_num_of_cos; i++) { | ||
1000 | if (DCBX_INVALID_COS != sp_pri_to_cos[i]) { | ||
1001 | if (DCBX_MAX_NUM_COS <= sp_pri_to_cos[i]) { | ||
1002 | DP(NETIF_MSG_LINK, | ||
1003 | "bnx2x_ets_e3b0_sp_set_pri_cli_reg " | ||
1004 | "invalid cos entry\n"); | ||
1005 | return -EINVAL; | ||
1006 | } | ||
1007 | |||
1008 | pri_cli_nig |= bnx2x_e3b0_sp_get_pri_cli_reg_nig( | ||
1009 | sp_pri_to_cos[i], pri_set); | ||
1010 | |||
1011 | pri_cli_pbf |= bnx2x_e3b0_sp_get_pri_cli_reg_pbf( | ||
1012 | sp_pri_to_cos[i], pri_set); | ||
1013 | pri_bitmask = 1 << sp_pri_to_cos[i]; | ||
1014 | /* COS is used remove it from bitmap.*/ | ||
1015 | if (0 == (pri_bitmask & cos_bit_to_set)) { | ||
1016 | DP(NETIF_MSG_LINK, | ||
1017 | "bnx2x_ets_e3b0_sp_set_pri_cli_reg " | ||
1018 | "invalid There can't be two COS's with" | ||
1019 | " the same strict pri\n"); | ||
1020 | return -EINVAL; | ||
1021 | } | ||
1022 | cos_bit_to_set &= ~pri_bitmask; | ||
1023 | pri_set++; | ||
1024 | } | ||
1025 | } | ||
1026 | |||
1027 | /* Set all the Non strict priority i= COS*/ | ||
1028 | for (i = 0; i < max_num_of_cos; i++) { | ||
1029 | pri_bitmask = 1 << i; | ||
1030 | /* Check if COS was already used for SP */ | ||
1031 | if (pri_bitmask & cos_bit_to_set) { | ||
1032 | /* COS wasn't used for SP */ | ||
1033 | pri_cli_nig |= bnx2x_e3b0_sp_get_pri_cli_reg_nig( | ||
1034 | i, pri_set); | ||
1035 | |||
1036 | pri_cli_pbf |= bnx2x_e3b0_sp_get_pri_cli_reg_pbf( | ||
1037 | i, pri_set); | ||
1038 | /* COS is used remove it from bitmap.*/ | ||
1039 | cos_bit_to_set &= ~pri_bitmask; | ||
1040 | pri_set++; | ||
1041 | } | ||
1042 | } | ||
1043 | |||
1044 | if (pri_set != max_num_of_cos) { | ||
1045 | DP(NETIF_MSG_LINK, "bnx2x_ets_e3b0_sp_set_pri_cli_reg not all " | ||
1046 | "entries were set\n"); | ||
1047 | return -EINVAL; | ||
1048 | } | ||
1049 | |||
1050 | if (port) { | ||
1051 | /* Only 6 usable clients*/ | ||
1052 | REG_WR(bp, NIG_REG_P1_TX_ARB_PRIORITY_CLIENT2_LSB, | ||
1053 | (u32)pri_cli_nig); | ||
1054 | |||
1055 | REG_WR(bp, PBF_REG_ETS_ARB_PRIORITY_CLIENT_P1 , pri_cli_pbf); | ||
1056 | } else { | ||
1057 | /* Only 9 usable clients*/ | ||
1058 | const u32 pri_cli_nig_lsb = (u32) (pri_cli_nig); | ||
1059 | const u32 pri_cli_nig_msb = (u32) ((pri_cli_nig >> 32) & 0xF); | ||
1060 | |||
1061 | REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_LSB, | ||
1062 | pri_cli_nig_lsb); | ||
1063 | REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_MSB, | ||
1064 | pri_cli_nig_msb); | ||
1065 | |||
1066 | REG_WR(bp, PBF_REG_ETS_ARB_PRIORITY_CLIENT_P0 , pri_cli_pbf); | ||
1067 | } | ||
1068 | return 0; | ||
1069 | } | ||
1070 | |||
1071 | /****************************************************************************** | ||
1072 | * Description: | ||
1073 | * Configure the COS to ETS according to BW and SP settings. | ||
1074 | ******************************************************************************/ | ||
1075 | int bnx2x_ets_e3b0_config(const struct link_params *params, | ||
1076 | const struct link_vars *vars, | ||
1077 | const struct bnx2x_ets_params *ets_params) | ||
1078 | { | ||
1079 | struct bnx2x *bp = params->bp; | ||
1080 | int bnx2x_status = 0; | ||
1081 | const u8 port = params->port; | ||
1082 | u16 total_bw = 0; | ||
1083 | const u32 min_w_val_nig = bnx2x_ets_get_min_w_val_nig(vars); | ||
1084 | const u32 min_w_val_pbf = ETS_E3B0_PBF_MIN_W_VAL; | ||
1085 | u8 cos_bw_bitmap = 0; | ||
1086 | u8 cos_sp_bitmap = 0; | ||
1087 | u8 sp_pri_to_cos[DCBX_MAX_NUM_COS] = {0}; | ||
1088 | const u8 max_num_of_cos = (port) ? DCBX_E3B0_MAX_NUM_COS_PORT1 : | ||
1089 | DCBX_E3B0_MAX_NUM_COS_PORT0; | ||
1090 | u8 cos_entry = 0; | ||
1091 | |||
1092 | if (!CHIP_IS_E3B0(bp)) { | ||
1093 | DP(NETIF_MSG_LINK, "bnx2x_ets_e3b0_disabled the chip isn't E3B0" | ||
1094 | "\n"); | ||
1095 | return -EINVAL; | ||
1096 | } | ||
1097 | |||
1098 | if ((ets_params->num_of_cos > max_num_of_cos)) { | ||
1099 | DP(NETIF_MSG_LINK, "bnx2x_ets_E3B0_config the number of COS " | ||
1100 | "isn't supported\n"); | ||
1101 | return -EINVAL; | ||
1102 | } | ||
1103 | |||
1104 | /* Prepare sp strict priority parameters*/ | ||
1105 | bnx2x_ets_e3b0_sp_pri_to_cos_init(sp_pri_to_cos); | ||
1106 | |||
1107 | /* Prepare BW parameters*/ | ||
1108 | bnx2x_status = bnx2x_ets_e3b0_get_total_bw(params, ets_params, | ||
1109 | &total_bw); | ||
1110 | if (0 != bnx2x_status) { | ||
1111 | DP(NETIF_MSG_LINK, "bnx2x_ets_E3B0_config get_total_bw failed " | ||
1112 | "\n"); | ||
1113 | return -EINVAL; | ||
1114 | } | ||
1115 | |||
1116 | /** | ||
1117 | * Upper bound is set according to current link speed (min_w_val | ||
1118 | * should be the same for upper bound and COS credit val). | ||
1119 | */ | ||
1120 | bnx2x_ets_e3b0_set_credit_upper_bound_nig(params, min_w_val_nig); | ||
1121 | bnx2x_ets_e3b0_set_credit_upper_bound_pbf(params, min_w_val_pbf); | ||
1122 | |||
1123 | |||
1124 | for (cos_entry = 0; cos_entry < ets_params->num_of_cos; cos_entry++) { | ||
1125 | if (bnx2x_cos_state_bw == ets_params->cos[cos_entry].state) { | ||
1126 | cos_bw_bitmap |= (1 << cos_entry); | ||
1127 | /** | ||
1128 | * The function also sets the BW in HW(not the mappin | ||
1129 | * yet) | ||
1130 | */ | ||
1131 | bnx2x_status = bnx2x_ets_e3b0_set_cos_bw( | ||
1132 | bp, cos_entry, min_w_val_nig, min_w_val_pbf, | ||
1133 | total_bw, | ||
1134 | ets_params->cos[cos_entry].params.bw_params.bw, | ||
1135 | port); | ||
1136 | } else if (bnx2x_cos_state_strict == | ||
1137 | ets_params->cos[cos_entry].state){ | ||
1138 | cos_sp_bitmap |= (1 << cos_entry); | ||
1139 | |||
1140 | bnx2x_status = bnx2x_ets_e3b0_sp_pri_to_cos_set( | ||
1141 | params, | ||
1142 | sp_pri_to_cos, | ||
1143 | ets_params->cos[cos_entry].params.sp_params.pri, | ||
1144 | cos_entry); | ||
1145 | |||
1146 | } else { | ||
1147 | DP(NETIF_MSG_LINK, "bnx2x_ets_e3b0_config cos state not" | ||
1148 | " valid\n"); | ||
1149 | return -EINVAL; | ||
1150 | } | ||
1151 | if (0 != bnx2x_status) { | ||
1152 | DP(NETIF_MSG_LINK, "bnx2x_ets_e3b0_config set cos bw " | ||
1153 | "failed\n"); | ||
1154 | return bnx2x_status; | ||
1155 | } | ||
1156 | } | ||
1157 | |||
1158 | /* Set SP register (which COS has higher priority) */ | ||
1159 | bnx2x_status = bnx2x_ets_e3b0_sp_set_pri_cli_reg(params, | ||
1160 | sp_pri_to_cos); | ||
1161 | |||
1162 | if (0 != bnx2x_status) { | ||
1163 | DP(NETIF_MSG_LINK, "bnx2x_ets_E3B0_config set_pri_cli_reg " | ||
1164 | "failed\n"); | ||
1165 | return bnx2x_status; | ||
1166 | } | ||
1167 | |||
1168 | /* Set client mapping of BW and strict */ | ||
1169 | bnx2x_status = bnx2x_ets_e3b0_cli_map(params, ets_params, | ||
1170 | cos_sp_bitmap, | ||
1171 | cos_bw_bitmap); | ||
1172 | |||
1173 | if (0 != bnx2x_status) { | ||
1174 | DP(NETIF_MSG_LINK, "bnx2x_ets_E3B0_config SP failed\n"); | ||
1175 | return bnx2x_status; | ||
1176 | } | ||
1177 | return 0; | ||
1178 | } | ||
266 | static void bnx2x_ets_bw_limit_common(const struct link_params *params) | 1179 | static void bnx2x_ets_bw_limit_common(const struct link_params *params) |
267 | { | 1180 | { |
268 | /* ETS disabled configuration */ | 1181 | /* ETS disabled configuration */ |
@@ -342,7 +1255,7 @@ void bnx2x_ets_bw_limit(const struct link_params *params, const u32 cos0_bw, | |||
342 | REG_WR(bp, PBF_REG_COS1_WEIGHT, cos1_credit_weight); | 1255 | REG_WR(bp, PBF_REG_COS1_WEIGHT, cos1_credit_weight); |
343 | } | 1256 | } |
344 | 1257 | ||
345 | u8 bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos) | 1258 | int bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos) |
346 | { | 1259 | { |
347 | /* ETS disabled configuration*/ | 1260 | /* ETS disabled configuration*/ |
348 | struct bnx2x *bp = params->bp; | 1261 | struct bnx2x *bp = params->bp; |
@@ -388,24 +1301,64 @@ u8 bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos) | |||
388 | /* PFC section */ | 1301 | /* PFC section */ |
389 | /******************************************************************/ | 1302 | /******************************************************************/ |
390 | 1303 | ||
391 | static void bnx2x_bmac2_get_pfc_stat(struct link_params *params, | 1304 | static void bnx2x_update_pfc_xmac(struct link_params *params, |
392 | u32 pfc_frames_sent[2], | 1305 | struct link_vars *vars, |
393 | u32 pfc_frames_received[2]) | 1306 | u8 is_lb) |
394 | { | 1307 | { |
395 | /* Read pfc statistic */ | ||
396 | struct bnx2x *bp = params->bp; | 1308 | struct bnx2x *bp = params->bp; |
397 | u32 bmac_addr = params->port ? NIG_REG_INGRESS_BMAC1_MEM : | 1309 | u32 xmac_base; |
398 | NIG_REG_INGRESS_BMAC0_MEM; | 1310 | u32 pause_val, pfc0_val, pfc1_val; |
399 | 1311 | ||
400 | DP(NETIF_MSG_LINK, "pfc statistic read from BMAC\n"); | 1312 | /* XMAC base adrr */ |
1313 | xmac_base = (params->port) ? GRCBASE_XMAC1 : GRCBASE_XMAC0; | ||
401 | 1314 | ||
402 | REG_RD_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_STAT_GTPP, | 1315 | /* Initialize pause and pfc registers */ |
403 | pfc_frames_sent, 2); | 1316 | pause_val = 0x18000; |
1317 | pfc0_val = 0xFFFF8000; | ||
1318 | pfc1_val = 0x2; | ||
404 | 1319 | ||
405 | REG_RD_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_RX_STAT_GRPP, | 1320 | /* No PFC support */ |
406 | pfc_frames_received, 2); | 1321 | if (!(params->feature_config_flags & |
1322 | FEATURE_CONFIG_PFC_ENABLED)) { | ||
407 | 1323 | ||
1324 | /* | ||
1325 | * RX flow control - Process pause frame in receive direction | ||
1326 | */ | ||
1327 | if (vars->flow_ctrl & BNX2X_FLOW_CTRL_RX) | ||
1328 | pause_val |= XMAC_PAUSE_CTRL_REG_RX_PAUSE_EN; | ||
1329 | |||
1330 | /* | ||
1331 | * TX flow control - Send pause packet when buffer is full | ||
1332 | */ | ||
1333 | if (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX) | ||
1334 | pause_val |= XMAC_PAUSE_CTRL_REG_TX_PAUSE_EN; | ||
1335 | } else {/* PFC support */ | ||
1336 | pfc1_val |= XMAC_PFC_CTRL_HI_REG_PFC_REFRESH_EN | | ||
1337 | XMAC_PFC_CTRL_HI_REG_PFC_STATS_EN | | ||
1338 | XMAC_PFC_CTRL_HI_REG_RX_PFC_EN | | ||
1339 | XMAC_PFC_CTRL_HI_REG_TX_PFC_EN; | ||
1340 | } | ||
1341 | |||
1342 | /* Write pause and PFC registers */ | ||
1343 | REG_WR(bp, xmac_base + XMAC_REG_PAUSE_CTRL, pause_val); | ||
1344 | REG_WR(bp, xmac_base + XMAC_REG_PFC_CTRL, pfc0_val); | ||
1345 | REG_WR(bp, xmac_base + XMAC_REG_PFC_CTRL_HI, pfc1_val); | ||
1346 | |||
1347 | |||
1348 | /* Set MAC address for source TX Pause/PFC frames */ | ||
1349 | REG_WR(bp, xmac_base + XMAC_REG_CTRL_SA_LO, | ||
1350 | ((params->mac_addr[2] << 24) | | ||
1351 | (params->mac_addr[3] << 16) | | ||
1352 | (params->mac_addr[4] << 8) | | ||
1353 | (params->mac_addr[5]))); | ||
1354 | REG_WR(bp, xmac_base + XMAC_REG_CTRL_SA_HI, | ||
1355 | ((params->mac_addr[0] << 8) | | ||
1356 | (params->mac_addr[1]))); | ||
1357 | |||
1358 | udelay(30); | ||
408 | } | 1359 | } |
1360 | |||
1361 | |||
409 | static void bnx2x_emac_get_pfc_stat(struct link_params *params, | 1362 | static void bnx2x_emac_get_pfc_stat(struct link_params *params, |
410 | u32 pfc_frames_sent[2], | 1363 | u32 pfc_frames_sent[2], |
411 | u32 pfc_frames_received[2]) | 1364 | u32 pfc_frames_received[2]) |
@@ -437,33 +1390,54 @@ static void bnx2x_emac_get_pfc_stat(struct link_params *params, | |||
437 | pfc_frames_sent[0] = val_xon + val_xoff; | 1390 | pfc_frames_sent[0] = val_xon + val_xoff; |
438 | } | 1391 | } |
439 | 1392 | ||
1393 | /* Read pfc statistic*/ | ||
440 | void bnx2x_pfc_statistic(struct link_params *params, struct link_vars *vars, | 1394 | void bnx2x_pfc_statistic(struct link_params *params, struct link_vars *vars, |
441 | u32 pfc_frames_sent[2], | 1395 | u32 pfc_frames_sent[2], |
442 | u32 pfc_frames_received[2]) | 1396 | u32 pfc_frames_received[2]) |
443 | { | 1397 | { |
444 | /* Read pfc statistic */ | 1398 | /* Read pfc statistic */ |
445 | struct bnx2x *bp = params->bp; | 1399 | struct bnx2x *bp = params->bp; |
446 | u32 val = 0; | 1400 | |
447 | DP(NETIF_MSG_LINK, "pfc statistic\n"); | 1401 | DP(NETIF_MSG_LINK, "pfc statistic\n"); |
448 | 1402 | ||
449 | if (!vars->link_up) | 1403 | if (!vars->link_up) |
450 | return; | 1404 | return; |
451 | 1405 | ||
452 | val = REG_RD(bp, MISC_REG_RESET_REG_2); | 1406 | if (MAC_TYPE_EMAC == vars->mac_type) { |
453 | if ((val & (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << params->port)) | 1407 | DP(NETIF_MSG_LINK, "About to read PFC stats from EMAC\n"); |
454 | == 0) { | ||
455 | DP(NETIF_MSG_LINK, "About to read stats from EMAC\n"); | ||
456 | bnx2x_emac_get_pfc_stat(params, pfc_frames_sent, | 1408 | bnx2x_emac_get_pfc_stat(params, pfc_frames_sent, |
457 | pfc_frames_received); | 1409 | pfc_frames_received); |
458 | } else { | ||
459 | DP(NETIF_MSG_LINK, "About to read stats from BMAC\n"); | ||
460 | bnx2x_bmac2_get_pfc_stat(params, pfc_frames_sent, | ||
461 | pfc_frames_received); | ||
462 | } | 1410 | } |
463 | } | 1411 | } |
464 | /******************************************************************/ | 1412 | /******************************************************************/ |
465 | /* MAC/PBF section */ | 1413 | /* MAC/PBF section */ |
466 | /******************************************************************/ | 1414 | /******************************************************************/ |
1415 | static void bnx2x_set_mdio_clk(struct bnx2x *bp, u32 chip_id, u8 port) | ||
1416 | { | ||
1417 | u32 mode, emac_base; | ||
1418 | /** | ||
1419 | * Set clause 45 mode, slow down the MDIO clock to 2.5MHz | ||
1420 | * (a value of 49==0x31) and make sure that the AUTO poll is off | ||
1421 | */ | ||
1422 | |||
1423 | if (CHIP_IS_E2(bp)) | ||
1424 | emac_base = GRCBASE_EMAC0; | ||
1425 | else | ||
1426 | emac_base = (port) ? GRCBASE_EMAC1 : GRCBASE_EMAC0; | ||
1427 | mode = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE); | ||
1428 | mode &= ~(EMAC_MDIO_MODE_AUTO_POLL | | ||
1429 | EMAC_MDIO_MODE_CLOCK_CNT); | ||
1430 | if (USES_WARPCORE(bp)) | ||
1431 | mode |= (74L << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT); | ||
1432 | else | ||
1433 | mode |= (49L << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT); | ||
1434 | |||
1435 | mode |= (EMAC_MDIO_MODE_CLAUSE_45); | ||
1436 | REG_WR(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE, mode); | ||
1437 | |||
1438 | udelay(40); | ||
1439 | } | ||
1440 | |||
467 | static void bnx2x_emac_init(struct link_params *params, | 1441 | static void bnx2x_emac_init(struct link_params *params, |
468 | struct link_vars *vars) | 1442 | struct link_vars *vars) |
469 | { | 1443 | { |
@@ -495,7 +1469,7 @@ static void bnx2x_emac_init(struct link_params *params, | |||
495 | } | 1469 | } |
496 | timeout--; | 1470 | timeout--; |
497 | } while (val & EMAC_MODE_RESET); | 1471 | } while (val & EMAC_MODE_RESET); |
498 | 1472 | bnx2x_set_mdio_clk(bp, params->chip_id, port); | |
499 | /* Set mac address */ | 1473 | /* Set mac address */ |
500 | val = ((params->mac_addr[0] << 8) | | 1474 | val = ((params->mac_addr[0] << 8) | |
501 | params->mac_addr[1]); | 1475 | params->mac_addr[1]); |
@@ -508,9 +1482,246 @@ static void bnx2x_emac_init(struct link_params *params, | |||
508 | EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + 4, val); | 1482 | EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + 4, val); |
509 | } | 1483 | } |
510 | 1484 | ||
511 | static u8 bnx2x_emac_enable(struct link_params *params, | 1485 | static void bnx2x_set_xumac_nig(struct link_params *params, |
1486 | u16 tx_pause_en, | ||
1487 | u8 enable) | ||
1488 | { | ||
1489 | struct bnx2x *bp = params->bp; | ||
1490 | |||
1491 | REG_WR(bp, params->port ? NIG_REG_P1_MAC_IN_EN : NIG_REG_P0_MAC_IN_EN, | ||
1492 | enable); | ||
1493 | REG_WR(bp, params->port ? NIG_REG_P1_MAC_OUT_EN : NIG_REG_P0_MAC_OUT_EN, | ||
1494 | enable); | ||
1495 | REG_WR(bp, params->port ? NIG_REG_P1_MAC_PAUSE_OUT_EN : | ||
1496 | NIG_REG_P0_MAC_PAUSE_OUT_EN, tx_pause_en); | ||
1497 | } | ||
1498 | |||
1499 | static void bnx2x_umac_enable(struct link_params *params, | ||
512 | struct link_vars *vars, u8 lb) | 1500 | struct link_vars *vars, u8 lb) |
513 | { | 1501 | { |
1502 | u32 val; | ||
1503 | u32 umac_base = params->port ? GRCBASE_UMAC1 : GRCBASE_UMAC0; | ||
1504 | struct bnx2x *bp = params->bp; | ||
1505 | /* Reset UMAC */ | ||
1506 | REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, | ||
1507 | (MISC_REGISTERS_RESET_REG_2_UMAC0 << params->port)); | ||
1508 | usleep_range(1000, 1000); | ||
1509 | |||
1510 | REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, | ||
1511 | (MISC_REGISTERS_RESET_REG_2_UMAC0 << params->port)); | ||
1512 | |||
1513 | DP(NETIF_MSG_LINK, "enabling UMAC\n"); | ||
1514 | |||
1515 | /** | ||
1516 | * This register determines on which events the MAC will assert | ||
1517 | * error on the i/f to the NIG along w/ EOP. | ||
1518 | */ | ||
1519 | |||
1520 | /** | ||
1521 | * BD REG_WR(bp, NIG_REG_P0_MAC_RSV_ERR_MASK + | ||
1522 | * params->port*0x14, 0xfffff. | ||
1523 | */ | ||
1524 | /* This register opens the gate for the UMAC despite its name */ | ||
1525 | REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + params->port*4, 1); | ||
1526 | |||
1527 | val = UMAC_COMMAND_CONFIG_REG_PROMIS_EN | | ||
1528 | UMAC_COMMAND_CONFIG_REG_PAD_EN | | ||
1529 | UMAC_COMMAND_CONFIG_REG_SW_RESET | | ||
1530 | UMAC_COMMAND_CONFIG_REG_NO_LGTH_CHECK; | ||
1531 | switch (vars->line_speed) { | ||
1532 | case SPEED_10: | ||
1533 | val |= (0<<2); | ||
1534 | break; | ||
1535 | case SPEED_100: | ||
1536 | val |= (1<<2); | ||
1537 | break; | ||
1538 | case SPEED_1000: | ||
1539 | val |= (2<<2); | ||
1540 | break; | ||
1541 | case SPEED_2500: | ||
1542 | val |= (3<<2); | ||
1543 | break; | ||
1544 | default: | ||
1545 | DP(NETIF_MSG_LINK, "Invalid speed for UMAC %d\n", | ||
1546 | vars->line_speed); | ||
1547 | break; | ||
1548 | } | ||
1549 | REG_WR(bp, umac_base + UMAC_REG_COMMAND_CONFIG, val); | ||
1550 | udelay(50); | ||
1551 | |||
1552 | /* Set MAC address for source TX Pause/PFC frames (under SW reset) */ | ||
1553 | REG_WR(bp, umac_base + UMAC_REG_MAC_ADDR0, | ||
1554 | ((params->mac_addr[2] << 24) | | ||
1555 | (params->mac_addr[3] << 16) | | ||
1556 | (params->mac_addr[4] << 8) | | ||
1557 | (params->mac_addr[5]))); | ||
1558 | REG_WR(bp, umac_base + UMAC_REG_MAC_ADDR1, | ||
1559 | ((params->mac_addr[0] << 8) | | ||
1560 | (params->mac_addr[1]))); | ||
1561 | |||
1562 | /* Enable RX and TX */ | ||
1563 | val &= ~UMAC_COMMAND_CONFIG_REG_PAD_EN; | ||
1564 | val |= UMAC_COMMAND_CONFIG_REG_TX_ENA | | ||
1565 | UMAC_COMMAND_CONFIG_REG_RX_ENA; | ||
1566 | REG_WR(bp, umac_base + UMAC_REG_COMMAND_CONFIG, val); | ||
1567 | udelay(50); | ||
1568 | |||
1569 | /* Remove SW Reset */ | ||
1570 | val &= ~UMAC_COMMAND_CONFIG_REG_SW_RESET; | ||
1571 | |||
1572 | /* Check loopback mode */ | ||
1573 | if (lb) | ||
1574 | val |= UMAC_COMMAND_CONFIG_REG_LOOP_ENA; | ||
1575 | REG_WR(bp, umac_base + UMAC_REG_COMMAND_CONFIG, val); | ||
1576 | |||
1577 | /* | ||
1578 | * Maximum Frame Length (RW). Defines a 14-Bit maximum frame | ||
1579 | * length used by the MAC receive logic to check frames. | ||
1580 | */ | ||
1581 | REG_WR(bp, umac_base + UMAC_REG_MAXFR, 0x2710); | ||
1582 | bnx2x_set_xumac_nig(params, | ||
1583 | ((vars->flow_ctrl & BNX2X_FLOW_CTRL_TX) != 0), 1); | ||
1584 | vars->mac_type = MAC_TYPE_UMAC; | ||
1585 | |||
1586 | } | ||
1587 | |||
1588 | static u8 bnx2x_is_4_port_mode(struct bnx2x *bp) | ||
1589 | { | ||
1590 | u32 port4mode_ovwr_val; | ||
1591 | /* Check 4-port override enabled */ | ||
1592 | port4mode_ovwr_val = REG_RD(bp, MISC_REG_PORT4MODE_EN_OVWR); | ||
1593 | if (port4mode_ovwr_val & (1<<0)) { | ||
1594 | /* Return 4-port mode override value */ | ||
1595 | return ((port4mode_ovwr_val & (1<<1)) == (1<<1)); | ||
1596 | } | ||
1597 | /* Return 4-port mode from input pin */ | ||
1598 | return (u8)REG_RD(bp, MISC_REG_PORT4MODE_EN); | ||
1599 | } | ||
1600 | |||
1601 | /* Define the XMAC mode */ | ||
1602 | static void bnx2x_xmac_init(struct bnx2x *bp, u32 max_speed) | ||
1603 | { | ||
1604 | u32 is_port4mode = bnx2x_is_4_port_mode(bp); | ||
1605 | |||
1606 | /** | ||
1607 | * In 4-port mode, need to set the mode only once, so if XMAC is | ||
1608 | * already out of reset, it means the mode has already been set, | ||
1609 | * and it must not* reset the XMAC again, since it controls both | ||
1610 | * ports of the path | ||
1611 | **/ | ||
1612 | |||
1613 | if (is_port4mode && (REG_RD(bp, MISC_REG_RESET_REG_2) & | ||
1614 | MISC_REGISTERS_RESET_REG_2_XMAC)) { | ||
1615 | DP(NETIF_MSG_LINK, "XMAC already out of reset" | ||
1616 | " in 4-port mode\n"); | ||
1617 | return; | ||
1618 | } | ||
1619 | |||
1620 | /* Hard reset */ | ||
1621 | REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, | ||
1622 | MISC_REGISTERS_RESET_REG_2_XMAC); | ||
1623 | usleep_range(1000, 1000); | ||
1624 | |||
1625 | REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, | ||
1626 | MISC_REGISTERS_RESET_REG_2_XMAC); | ||
1627 | if (is_port4mode) { | ||
1628 | DP(NETIF_MSG_LINK, "Init XMAC to 2 ports x 10G per path\n"); | ||
1629 | |||
1630 | /* Set the number of ports on the system side to up to 2 */ | ||
1631 | REG_WR(bp, MISC_REG_XMAC_CORE_PORT_MODE, 1); | ||
1632 | |||
1633 | /* Set the number of ports on the Warp Core to 10G */ | ||
1634 | REG_WR(bp, MISC_REG_XMAC_PHY_PORT_MODE, 3); | ||
1635 | } else { | ||
1636 | /* Set the number of ports on the system side to 1 */ | ||
1637 | REG_WR(bp, MISC_REG_XMAC_CORE_PORT_MODE, 0); | ||
1638 | if (max_speed == SPEED_10000) { | ||
1639 | DP(NETIF_MSG_LINK, "Init XMAC to 10G x 1" | ||
1640 | " port per path\n"); | ||
1641 | /* Set the number of ports on the Warp Core to 10G */ | ||
1642 | REG_WR(bp, MISC_REG_XMAC_PHY_PORT_MODE, 3); | ||
1643 | } else { | ||
1644 | DP(NETIF_MSG_LINK, "Init XMAC to 20G x 2 ports" | ||
1645 | " per path\n"); | ||
1646 | /* Set the number of ports on the Warp Core to 20G */ | ||
1647 | REG_WR(bp, MISC_REG_XMAC_PHY_PORT_MODE, 1); | ||
1648 | } | ||
1649 | } | ||
1650 | /* Soft reset */ | ||
1651 | REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, | ||
1652 | MISC_REGISTERS_RESET_REG_2_XMAC_SOFT); | ||
1653 | usleep_range(1000, 1000); | ||
1654 | |||
1655 | REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, | ||
1656 | MISC_REGISTERS_RESET_REG_2_XMAC_SOFT); | ||
1657 | |||
1658 | } | ||
1659 | |||
1660 | static void bnx2x_xmac_disable(struct link_params *params) | ||
1661 | { | ||
1662 | u8 port = params->port; | ||
1663 | struct bnx2x *bp = params->bp; | ||
1664 | u32 xmac_base = (port) ? GRCBASE_XMAC1 : GRCBASE_XMAC0; | ||
1665 | |||
1666 | if (REG_RD(bp, MISC_REG_RESET_REG_2) & | ||
1667 | MISC_REGISTERS_RESET_REG_2_XMAC) { | ||
1668 | DP(NETIF_MSG_LINK, "Disable XMAC on port %x\n", port); | ||
1669 | REG_WR(bp, xmac_base + XMAC_REG_CTRL, 0); | ||
1670 | usleep_range(1000, 1000); | ||
1671 | bnx2x_set_xumac_nig(params, 0, 0); | ||
1672 | REG_WR(bp, xmac_base + XMAC_REG_CTRL, | ||
1673 | XMAC_CTRL_REG_SOFT_RESET); | ||
1674 | } | ||
1675 | } | ||
1676 | |||
1677 | static int bnx2x_xmac_enable(struct link_params *params, | ||
1678 | struct link_vars *vars, u8 lb) | ||
1679 | { | ||
1680 | u32 val, xmac_base; | ||
1681 | struct bnx2x *bp = params->bp; | ||
1682 | DP(NETIF_MSG_LINK, "enabling XMAC\n"); | ||
1683 | |||
1684 | xmac_base = (params->port) ? GRCBASE_XMAC1 : GRCBASE_XMAC0; | ||
1685 | |||
1686 | bnx2x_xmac_init(bp, vars->line_speed); | ||
1687 | |||
1688 | /* | ||
1689 | * This register determines on which events the MAC will assert | ||
1690 | * error on the i/f to the NIG along w/ EOP. | ||
1691 | */ | ||
1692 | |||
1693 | /* | ||
1694 | * This register tells the NIG whether to send traffic to UMAC | ||
1695 | * or XMAC | ||
1696 | */ | ||
1697 | REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + params->port*4, 0); | ||
1698 | |||
1699 | /* Set Max packet size */ | ||
1700 | REG_WR(bp, xmac_base + XMAC_REG_RX_MAX_SIZE, 0x2710); | ||
1701 | |||
1702 | /* CRC append for Tx packets */ | ||
1703 | REG_WR(bp, xmac_base + XMAC_REG_TX_CTRL, 0xC800); | ||
1704 | |||
1705 | /* update PFC */ | ||
1706 | bnx2x_update_pfc_xmac(params, vars, 0); | ||
1707 | |||
1708 | /* Enable TX and RX */ | ||
1709 | val = XMAC_CTRL_REG_TX_EN | XMAC_CTRL_REG_RX_EN; | ||
1710 | |||
1711 | /* Check loopback mode */ | ||
1712 | if (lb) | ||
1713 | val |= XMAC_CTRL_REG_CORE_LOCAL_LPBK; | ||
1714 | REG_WR(bp, xmac_base + XMAC_REG_CTRL, val); | ||
1715 | bnx2x_set_xumac_nig(params, | ||
1716 | ((vars->flow_ctrl & BNX2X_FLOW_CTRL_TX) != 0), 1); | ||
1717 | |||
1718 | vars->mac_type = MAC_TYPE_XMAC; | ||
1719 | |||
1720 | return 0; | ||
1721 | } | ||
1722 | static int bnx2x_emac_enable(struct link_params *params, | ||
1723 | struct link_vars *vars, u8 lb) | ||
1724 | { | ||
514 | struct bnx2x *bp = params->bp; | 1725 | struct bnx2x *bp = params->bp; |
515 | u8 port = params->port; | 1726 | u8 port = params->port; |
516 | u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0; | 1727 | u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0; |
@@ -760,95 +1971,398 @@ static void bnx2x_update_pfc_bmac2(struct link_params *params, | |||
760 | REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_BMAC_CONTROL, wb_data, 2); | 1971 | REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_BMAC_CONTROL, wb_data, 2); |
761 | } | 1972 | } |
762 | 1973 | ||
763 | static void bnx2x_update_pfc_brb(struct link_params *params, | 1974 | |
764 | struct link_vars *vars, | 1975 | /* PFC BRB internal port configuration params */ |
765 | struct bnx2x_nig_brb_pfc_port_params *pfc_params) | 1976 | struct bnx2x_pfc_brb_threshold_val { |
1977 | u32 pause_xoff; | ||
1978 | u32 pause_xon; | ||
1979 | u32 full_xoff; | ||
1980 | u32 full_xon; | ||
1981 | }; | ||
1982 | |||
1983 | struct bnx2x_pfc_brb_e3b0_val { | ||
1984 | u32 full_lb_xoff_th; | ||
1985 | u32 full_lb_xon_threshold; | ||
1986 | u32 lb_guarantied; | ||
1987 | u32 mac_0_class_t_guarantied; | ||
1988 | u32 mac_0_class_t_guarantied_hyst; | ||
1989 | u32 mac_1_class_t_guarantied; | ||
1990 | u32 mac_1_class_t_guarantied_hyst; | ||
1991 | }; | ||
1992 | |||
1993 | struct bnx2x_pfc_brb_th_val { | ||
1994 | struct bnx2x_pfc_brb_threshold_val pauseable_th; | ||
1995 | struct bnx2x_pfc_brb_threshold_val non_pauseable_th; | ||
1996 | }; | ||
1997 | static int bnx2x_pfc_brb_get_config_params( | ||
1998 | struct link_params *params, | ||
1999 | struct bnx2x_pfc_brb_th_val *config_val) | ||
766 | { | 2000 | { |
767 | struct bnx2x *bp = params->bp; | 2001 | struct bnx2x *bp = params->bp; |
2002 | DP(NETIF_MSG_LINK, "Setting PFC BRB configuration\n"); | ||
2003 | if (CHIP_IS_E2(bp)) { | ||
2004 | config_val->pauseable_th.pause_xoff = | ||
2005 | PFC_E2_BRB_MAC_PAUSE_XOFF_THR_PAUSE; | ||
2006 | config_val->pauseable_th.pause_xon = | ||
2007 | PFC_E2_BRB_MAC_PAUSE_XON_THR_PAUSE; | ||
2008 | config_val->pauseable_th.full_xoff = | ||
2009 | PFC_E2_BRB_MAC_FULL_XOFF_THR_PAUSE; | ||
2010 | config_val->pauseable_th.full_xon = | ||
2011 | PFC_E2_BRB_MAC_FULL_XON_THR_PAUSE; | ||
2012 | /* non pause able*/ | ||
2013 | config_val->non_pauseable_th.pause_xoff = | ||
2014 | PFC_E2_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE; | ||
2015 | config_val->non_pauseable_th.pause_xon = | ||
2016 | PFC_E2_BRB_MAC_PAUSE_XON_THR_NON_PAUSE; | ||
2017 | config_val->non_pauseable_th.full_xoff = | ||
2018 | PFC_E2_BRB_MAC_FULL_XOFF_THR_NON_PAUSE; | ||
2019 | config_val->non_pauseable_th.full_xon = | ||
2020 | PFC_E2_BRB_MAC_FULL_XON_THR_NON_PAUSE; | ||
2021 | } else if (CHIP_IS_E3A0(bp)) { | ||
2022 | config_val->pauseable_th.pause_xoff = | ||
2023 | PFC_E3A0_BRB_MAC_PAUSE_XOFF_THR_PAUSE; | ||
2024 | config_val->pauseable_th.pause_xon = | ||
2025 | PFC_E3A0_BRB_MAC_PAUSE_XON_THR_PAUSE; | ||
2026 | config_val->pauseable_th.full_xoff = | ||
2027 | PFC_E3A0_BRB_MAC_FULL_XOFF_THR_PAUSE; | ||
2028 | config_val->pauseable_th.full_xon = | ||
2029 | PFC_E3A0_BRB_MAC_FULL_XON_THR_PAUSE; | ||
2030 | /* non pause able*/ | ||
2031 | config_val->non_pauseable_th.pause_xoff = | ||
2032 | PFC_E3A0_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE; | ||
2033 | config_val->non_pauseable_th.pause_xon = | ||
2034 | PFC_E3A0_BRB_MAC_PAUSE_XON_THR_NON_PAUSE; | ||
2035 | config_val->non_pauseable_th.full_xoff = | ||
2036 | PFC_E3A0_BRB_MAC_FULL_XOFF_THR_NON_PAUSE; | ||
2037 | config_val->non_pauseable_th.full_xon = | ||
2038 | PFC_E3A0_BRB_MAC_FULL_XON_THR_NON_PAUSE; | ||
2039 | } else if (CHIP_IS_E3B0(bp)) { | ||
2040 | if (params->phy[INT_PHY].flags & | ||
2041 | FLAGS_4_PORT_MODE) { | ||
2042 | config_val->pauseable_th.pause_xoff = | ||
2043 | PFC_E3B0_4P_BRB_MAC_PAUSE_XOFF_THR_PAUSE; | ||
2044 | config_val->pauseable_th.pause_xon = | ||
2045 | PFC_E3B0_4P_BRB_MAC_PAUSE_XON_THR_PAUSE; | ||
2046 | config_val->pauseable_th.full_xoff = | ||
2047 | PFC_E3B0_4P_BRB_MAC_FULL_XOFF_THR_PAUSE; | ||
2048 | config_val->pauseable_th.full_xon = | ||
2049 | PFC_E3B0_4P_BRB_MAC_FULL_XON_THR_PAUSE; | ||
2050 | /* non pause able*/ | ||
2051 | config_val->non_pauseable_th.pause_xoff = | ||
2052 | PFC_E3B0_4P_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE; | ||
2053 | config_val->non_pauseable_th.pause_xon = | ||
2054 | PFC_E3B0_4P_BRB_MAC_PAUSE_XON_THR_NON_PAUSE; | ||
2055 | config_val->non_pauseable_th.full_xoff = | ||
2056 | PFC_E3B0_4P_BRB_MAC_FULL_XOFF_THR_NON_PAUSE; | ||
2057 | config_val->non_pauseable_th.full_xon = | ||
2058 | PFC_E3B0_4P_BRB_MAC_FULL_XON_THR_NON_PAUSE; | ||
2059 | } else { | ||
2060 | config_val->pauseable_th.pause_xoff = | ||
2061 | PFC_E3B0_2P_BRB_MAC_PAUSE_XOFF_THR_PAUSE; | ||
2062 | config_val->pauseable_th.pause_xon = | ||
2063 | PFC_E3B0_2P_BRB_MAC_PAUSE_XON_THR_PAUSE; | ||
2064 | config_val->pauseable_th.full_xoff = | ||
2065 | PFC_E3B0_2P_BRB_MAC_FULL_XOFF_THR_PAUSE; | ||
2066 | config_val->pauseable_th.full_xon = | ||
2067 | PFC_E3B0_2P_BRB_MAC_FULL_XON_THR_PAUSE; | ||
2068 | /* non pause able*/ | ||
2069 | config_val->non_pauseable_th.pause_xoff = | ||
2070 | PFC_E3B0_2P_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE; | ||
2071 | config_val->non_pauseable_th.pause_xon = | ||
2072 | PFC_E3B0_2P_BRB_MAC_PAUSE_XON_THR_NON_PAUSE; | ||
2073 | config_val->non_pauseable_th.full_xoff = | ||
2074 | PFC_E3B0_2P_BRB_MAC_FULL_XOFF_THR_NON_PAUSE; | ||
2075 | config_val->non_pauseable_th.full_xon = | ||
2076 | PFC_E3B0_2P_BRB_MAC_FULL_XON_THR_NON_PAUSE; | ||
2077 | } | ||
2078 | } else | ||
2079 | return -EINVAL; | ||
2080 | |||
2081 | return 0; | ||
2082 | } | ||
2083 | |||
2084 | |||
2085 | static void bnx2x_pfc_brb_get_e3b0_config_params(struct link_params *params, | ||
2086 | struct bnx2x_pfc_brb_e3b0_val | ||
2087 | *e3b0_val, | ||
2088 | u32 cos0_pauseable, | ||
2089 | u32 cos1_pauseable) | ||
2090 | { | ||
2091 | if (params->phy[INT_PHY].flags & FLAGS_4_PORT_MODE) { | ||
2092 | e3b0_val->full_lb_xoff_th = | ||
2093 | PFC_E3B0_4P_BRB_FULL_LB_XOFF_THR; | ||
2094 | e3b0_val->full_lb_xon_threshold = | ||
2095 | PFC_E3B0_4P_BRB_FULL_LB_XON_THR; | ||
2096 | e3b0_val->lb_guarantied = | ||
2097 | PFC_E3B0_4P_LB_GUART; | ||
2098 | e3b0_val->mac_0_class_t_guarantied = | ||
2099 | PFC_E3B0_4P_BRB_MAC_0_CLASS_T_GUART; | ||
2100 | e3b0_val->mac_0_class_t_guarantied_hyst = | ||
2101 | PFC_E3B0_4P_BRB_MAC_0_CLASS_T_GUART_HYST; | ||
2102 | e3b0_val->mac_1_class_t_guarantied = | ||
2103 | PFC_E3B0_4P_BRB_MAC_1_CLASS_T_GUART; | ||
2104 | e3b0_val->mac_1_class_t_guarantied_hyst = | ||
2105 | PFC_E3B0_4P_BRB_MAC_1_CLASS_T_GUART_HYST; | ||
2106 | } else { | ||
2107 | e3b0_val->full_lb_xoff_th = | ||
2108 | PFC_E3B0_2P_BRB_FULL_LB_XOFF_THR; | ||
2109 | e3b0_val->full_lb_xon_threshold = | ||
2110 | PFC_E3B0_2P_BRB_FULL_LB_XON_THR; | ||
2111 | e3b0_val->mac_0_class_t_guarantied_hyst = | ||
2112 | PFC_E3B0_2P_BRB_MAC_0_CLASS_T_GUART_HYST; | ||
2113 | e3b0_val->mac_1_class_t_guarantied = | ||
2114 | PFC_E3B0_2P_BRB_MAC_1_CLASS_T_GUART; | ||
2115 | e3b0_val->mac_1_class_t_guarantied_hyst = | ||
2116 | PFC_E3B0_2P_BRB_MAC_1_CLASS_T_GUART_HYST; | ||
2117 | |||
2118 | if (cos0_pauseable != cos1_pauseable) { | ||
2119 | /* nonpauseable= Lossy + pauseable = Lossless*/ | ||
2120 | e3b0_val->lb_guarantied = | ||
2121 | PFC_E3B0_2P_MIX_PAUSE_LB_GUART; | ||
2122 | e3b0_val->mac_0_class_t_guarantied = | ||
2123 | PFC_E3B0_2P_MIX_PAUSE_MAC_0_CLASS_T_GUART; | ||
2124 | } else if (cos0_pauseable) { | ||
2125 | /* Lossless +Lossless*/ | ||
2126 | e3b0_val->lb_guarantied = | ||
2127 | PFC_E3B0_2P_PAUSE_LB_GUART; | ||
2128 | e3b0_val->mac_0_class_t_guarantied = | ||
2129 | PFC_E3B0_2P_PAUSE_MAC_0_CLASS_T_GUART; | ||
2130 | } else { | ||
2131 | /* Lossy +Lossy*/ | ||
2132 | e3b0_val->lb_guarantied = | ||
2133 | PFC_E3B0_2P_NON_PAUSE_LB_GUART; | ||
2134 | e3b0_val->mac_0_class_t_guarantied = | ||
2135 | PFC_E3B0_2P_NON_PAUSE_MAC_0_CLASS_T_GUART; | ||
2136 | } | ||
2137 | } | ||
2138 | } | ||
2139 | static int bnx2x_update_pfc_brb(struct link_params *params, | ||
2140 | struct link_vars *vars, | ||
2141 | struct bnx2x_nig_brb_pfc_port_params | ||
2142 | *pfc_params) | ||
2143 | { | ||
2144 | struct bnx2x *bp = params->bp; | ||
2145 | struct bnx2x_pfc_brb_th_val config_val = { {0} }; | ||
2146 | struct bnx2x_pfc_brb_threshold_val *reg_th_config = | ||
2147 | &config_val.pauseable_th; | ||
2148 | struct bnx2x_pfc_brb_e3b0_val e3b0_val = {0}; | ||
768 | int set_pfc = params->feature_config_flags & | 2149 | int set_pfc = params->feature_config_flags & |
769 | FEATURE_CONFIG_PFC_ENABLED; | 2150 | FEATURE_CONFIG_PFC_ENABLED; |
2151 | int bnx2x_status = 0; | ||
2152 | u8 port = params->port; | ||
770 | 2153 | ||
771 | /* default - pause configuration */ | 2154 | /* default - pause configuration */ |
772 | u32 pause_xoff_th = PFC_BRB_MAC_PAUSE_XOFF_THRESHOLD_PAUSEABLE; | 2155 | reg_th_config = &config_val.pauseable_th; |
773 | u32 pause_xon_th = PFC_BRB_MAC_PAUSE_XON_THRESHOLD_PAUSEABLE; | 2156 | bnx2x_status = bnx2x_pfc_brb_get_config_params(params, &config_val); |
774 | u32 full_xoff_th = PFC_BRB_MAC_FULL_XOFF_THRESHOLD_PAUSEABLE; | 2157 | if (0 != bnx2x_status) |
775 | u32 full_xon_th = PFC_BRB_MAC_FULL_XON_THRESHOLD_PAUSEABLE; | 2158 | return bnx2x_status; |
776 | 2159 | ||
777 | if (set_pfc && pfc_params) | 2160 | if (set_pfc && pfc_params) |
778 | /* First COS */ | 2161 | /* First COS */ |
779 | if (!pfc_params->cos0_pauseable) { | 2162 | if (!pfc_params->cos0_pauseable) |
780 | pause_xoff_th = | 2163 | reg_th_config = &config_val.non_pauseable_th; |
781 | PFC_BRB_MAC_PAUSE_XOFF_THRESHOLD_NON_PAUSEABLE; | ||
782 | pause_xon_th = | ||
783 | PFC_BRB_MAC_PAUSE_XON_THRESHOLD_NON_PAUSEABLE; | ||
784 | full_xoff_th = | ||
785 | PFC_BRB_MAC_FULL_XOFF_THRESHOLD_NON_PAUSEABLE; | ||
786 | full_xon_th = | ||
787 | PFC_BRB_MAC_FULL_XON_THRESHOLD_NON_PAUSEABLE; | ||
788 | } | ||
789 | /* | 2164 | /* |
790 | * The number of free blocks below which the pause signal to class 0 | 2165 | * The number of free blocks below which the pause signal to class 0 |
791 | * of MAC #n is asserted. n=0,1 | 2166 | * of MAC #n is asserted. n=0,1 |
792 | */ | 2167 | */ |
793 | REG_WR(bp, BRB1_REG_PAUSE_0_XOFF_THRESHOLD_0 , pause_xoff_th); | 2168 | REG_WR(bp, (port) ? BRB1_REG_PAUSE_0_XOFF_THRESHOLD_1 : |
2169 | BRB1_REG_PAUSE_0_XOFF_THRESHOLD_0 , | ||
2170 | reg_th_config->pause_xoff); | ||
794 | /* | 2171 | /* |
795 | * The number of free blocks above which the pause signal to class 0 | 2172 | * The number of free blocks above which the pause signal to class 0 |
796 | * of MAC #n is de-asserted. n=0,1 | 2173 | * of MAC #n is de-asserted. n=0,1 |
797 | */ | 2174 | */ |
798 | REG_WR(bp, BRB1_REG_PAUSE_0_XON_THRESHOLD_0 , pause_xon_th); | 2175 | REG_WR(bp, (port) ? BRB1_REG_PAUSE_0_XON_THRESHOLD_1 : |
2176 | BRB1_REG_PAUSE_0_XON_THRESHOLD_0 , reg_th_config->pause_xon); | ||
799 | /* | 2177 | /* |
800 | * The number of free blocks below which the full signal to class 0 | 2178 | * The number of free blocks below which the full signal to class 0 |
801 | * of MAC #n is asserted. n=0,1 | 2179 | * of MAC #n is asserted. n=0,1 |
802 | */ | 2180 | */ |
803 | REG_WR(bp, BRB1_REG_FULL_0_XOFF_THRESHOLD_0 , full_xoff_th); | 2181 | REG_WR(bp, (port) ? BRB1_REG_FULL_0_XOFF_THRESHOLD_1 : |
2182 | BRB1_REG_FULL_0_XOFF_THRESHOLD_0 , reg_th_config->full_xoff); | ||
804 | /* | 2183 | /* |
805 | * The number of free blocks above which the full signal to class 0 | 2184 | * The number of free blocks above which the full signal to class 0 |
806 | * of MAC #n is de-asserted. n=0,1 | 2185 | * of MAC #n is de-asserted. n=0,1 |
807 | */ | 2186 | */ |
808 | REG_WR(bp, BRB1_REG_FULL_0_XON_THRESHOLD_0 , full_xon_th); | 2187 | REG_WR(bp, (port) ? BRB1_REG_FULL_0_XON_THRESHOLD_1 : |
2188 | BRB1_REG_FULL_0_XON_THRESHOLD_0 , reg_th_config->full_xon); | ||
809 | 2189 | ||
810 | if (set_pfc && pfc_params) { | 2190 | if (set_pfc && pfc_params) { |
811 | /* Second COS */ | 2191 | /* Second COS */ |
812 | if (pfc_params->cos1_pauseable) { | 2192 | if (pfc_params->cos1_pauseable) |
813 | pause_xoff_th = | 2193 | reg_th_config = &config_val.pauseable_th; |
814 | PFC_BRB_MAC_PAUSE_XOFF_THRESHOLD_PAUSEABLE; | 2194 | else |
815 | pause_xon_th = | 2195 | reg_th_config = &config_val.non_pauseable_th; |
816 | PFC_BRB_MAC_PAUSE_XON_THRESHOLD_PAUSEABLE; | ||
817 | full_xoff_th = | ||
818 | PFC_BRB_MAC_FULL_XOFF_THRESHOLD_PAUSEABLE; | ||
819 | full_xon_th = | ||
820 | PFC_BRB_MAC_FULL_XON_THRESHOLD_PAUSEABLE; | ||
821 | } else { | ||
822 | pause_xoff_th = | ||
823 | PFC_BRB_MAC_PAUSE_XOFF_THRESHOLD_NON_PAUSEABLE; | ||
824 | pause_xon_th = | ||
825 | PFC_BRB_MAC_PAUSE_XON_THRESHOLD_NON_PAUSEABLE; | ||
826 | full_xoff_th = | ||
827 | PFC_BRB_MAC_FULL_XOFF_THRESHOLD_NON_PAUSEABLE; | ||
828 | full_xon_th = | ||
829 | PFC_BRB_MAC_FULL_XON_THRESHOLD_NON_PAUSEABLE; | ||
830 | } | ||
831 | /* | 2196 | /* |
832 | * The number of free blocks below which the pause signal to | 2197 | * The number of free blocks below which the pause signal to |
833 | * class 1 of MAC #n is asserted. n=0,1 | 2198 | * class 1 of MAC #n is asserted. n=0,1 |
834 | */ | 2199 | **/ |
835 | REG_WR(bp, BRB1_REG_PAUSE_1_XOFF_THRESHOLD_0, pause_xoff_th); | 2200 | REG_WR(bp, (port) ? BRB1_REG_PAUSE_1_XOFF_THRESHOLD_1 : |
2201 | BRB1_REG_PAUSE_1_XOFF_THRESHOLD_0, | ||
2202 | reg_th_config->pause_xoff); | ||
836 | /* | 2203 | /* |
837 | * The number of free blocks above which the pause signal to | 2204 | * The number of free blocks above which the pause signal to |
838 | * class 1 of MAC #n is de-asserted. n=0,1 | 2205 | * class 1 of MAC #n is de-asserted. n=0,1 |
839 | */ | 2206 | */ |
840 | REG_WR(bp, BRB1_REG_PAUSE_1_XON_THRESHOLD_0, pause_xon_th); | 2207 | REG_WR(bp, (port) ? BRB1_REG_PAUSE_1_XON_THRESHOLD_1 : |
2208 | BRB1_REG_PAUSE_1_XON_THRESHOLD_0, | ||
2209 | reg_th_config->pause_xon); | ||
841 | /* | 2210 | /* |
842 | * The number of free blocks below which the full signal to | 2211 | * The number of free blocks below which the full signal to |
843 | * class 1 of MAC #n is asserted. n=0,1 | 2212 | * class 1 of MAC #n is asserted. n=0,1 |
844 | */ | 2213 | */ |
845 | REG_WR(bp, BRB1_REG_FULL_1_XOFF_THRESHOLD_0, full_xoff_th); | 2214 | REG_WR(bp, (port) ? BRB1_REG_FULL_1_XOFF_THRESHOLD_1 : |
2215 | BRB1_REG_FULL_1_XOFF_THRESHOLD_0, | ||
2216 | reg_th_config->full_xoff); | ||
846 | /* | 2217 | /* |
847 | * The number of free blocks above which the full signal to | 2218 | * The number of free blocks above which the full signal to |
848 | * class 1 of MAC #n is de-asserted. n=0,1 | 2219 | * class 1 of MAC #n is de-asserted. n=0,1 |
849 | */ | 2220 | */ |
850 | REG_WR(bp, BRB1_REG_FULL_1_XON_THRESHOLD_0, full_xon_th); | 2221 | REG_WR(bp, (port) ? BRB1_REG_FULL_1_XON_THRESHOLD_1 : |
2222 | BRB1_REG_FULL_1_XON_THRESHOLD_0, | ||
2223 | reg_th_config->full_xon); | ||
2224 | |||
2225 | |||
2226 | if (CHIP_IS_E3B0(bp)) { | ||
2227 | /*Should be done by init tool */ | ||
2228 | /* | ||
2229 | * BRB_empty_for_dup = BRB1_REG_BRB_EMPTY_THRESHOLD | ||
2230 | * reset value | ||
2231 | * 944 | ||
2232 | */ | ||
2233 | |||
2234 | /** | ||
2235 | * The hysteresis on the guarantied buffer space for the Lb port | ||
2236 | * before signaling XON. | ||
2237 | **/ | ||
2238 | REG_WR(bp, BRB1_REG_LB_GUARANTIED_HYST, 80); | ||
2239 | |||
2240 | bnx2x_pfc_brb_get_e3b0_config_params( | ||
2241 | params, | ||
2242 | &e3b0_val, | ||
2243 | pfc_params->cos0_pauseable, | ||
2244 | pfc_params->cos1_pauseable); | ||
2245 | /** | ||
2246 | * The number of free blocks below which the full signal to the | ||
2247 | * LB port is asserted. | ||
2248 | */ | ||
2249 | REG_WR(bp, BRB1_REG_FULL_LB_XOFF_THRESHOLD, | ||
2250 | e3b0_val.full_lb_xoff_th); | ||
2251 | /** | ||
2252 | * The number of free blocks above which the full signal to the | ||
2253 | * LB port is de-asserted. | ||
2254 | */ | ||
2255 | REG_WR(bp, BRB1_REG_FULL_LB_XON_THRESHOLD, | ||
2256 | e3b0_val.full_lb_xon_threshold); | ||
2257 | /** | ||
2258 | * The number of blocks guarantied for the MAC #n port. n=0,1 | ||
2259 | */ | ||
2260 | |||
2261 | /*The number of blocks guarantied for the LB port.*/ | ||
2262 | REG_WR(bp, BRB1_REG_LB_GUARANTIED, | ||
2263 | e3b0_val.lb_guarantied); | ||
2264 | |||
2265 | /** | ||
2266 | * The number of blocks guarantied for the MAC #n port. | ||
2267 | */ | ||
2268 | REG_WR(bp, BRB1_REG_MAC_GUARANTIED_0, | ||
2269 | 2 * e3b0_val.mac_0_class_t_guarantied); | ||
2270 | REG_WR(bp, BRB1_REG_MAC_GUARANTIED_1, | ||
2271 | 2 * e3b0_val.mac_1_class_t_guarantied); | ||
2272 | /** | ||
2273 | * The number of blocks guarantied for class #t in MAC0. t=0,1 | ||
2274 | */ | ||
2275 | REG_WR(bp, BRB1_REG_MAC_0_CLASS_0_GUARANTIED, | ||
2276 | e3b0_val.mac_0_class_t_guarantied); | ||
2277 | REG_WR(bp, BRB1_REG_MAC_0_CLASS_1_GUARANTIED, | ||
2278 | e3b0_val.mac_0_class_t_guarantied); | ||
2279 | /** | ||
2280 | * The hysteresis on the guarantied buffer space for class in | ||
2281 | * MAC0. t=0,1 | ||
2282 | */ | ||
2283 | REG_WR(bp, BRB1_REG_MAC_0_CLASS_0_GUARANTIED_HYST, | ||
2284 | e3b0_val.mac_0_class_t_guarantied_hyst); | ||
2285 | REG_WR(bp, BRB1_REG_MAC_0_CLASS_1_GUARANTIED_HYST, | ||
2286 | e3b0_val.mac_0_class_t_guarantied_hyst); | ||
2287 | |||
2288 | /** | ||
2289 | * The number of blocks guarantied for class #t in MAC1.t=0,1 | ||
2290 | */ | ||
2291 | REG_WR(bp, BRB1_REG_MAC_1_CLASS_0_GUARANTIED, | ||
2292 | e3b0_val.mac_1_class_t_guarantied); | ||
2293 | REG_WR(bp, BRB1_REG_MAC_1_CLASS_1_GUARANTIED, | ||
2294 | e3b0_val.mac_1_class_t_guarantied); | ||
2295 | /** | ||
2296 | * The hysteresis on the guarantied buffer space for class #t | ||
2297 | * in MAC1. t=0,1 | ||
2298 | */ | ||
2299 | REG_WR(bp, BRB1_REG_MAC_1_CLASS_0_GUARANTIED_HYST, | ||
2300 | e3b0_val.mac_1_class_t_guarantied_hyst); | ||
2301 | REG_WR(bp, BRB1_REG_MAC_1_CLASS_1_GUARANTIED_HYST, | ||
2302 | e3b0_val.mac_1_class_t_guarantied_hyst); | ||
2303 | |||
2304 | } | ||
2305 | |||
2306 | } | ||
2307 | |||
2308 | return bnx2x_status; | ||
2309 | } | ||
2310 | |||
2311 | /****************************************************************************** | ||
2312 | * Description: | ||
2313 | * This function is needed because NIG ARB_CREDIT_WEIGHT_X are | ||
2314 | * not continues and ARB_CREDIT_WEIGHT_0 + offset is suitable. | ||
2315 | ******************************************************************************/ | ||
2316 | int bnx2x_pfc_nig_rx_priority_mask(struct bnx2x *bp, | ||
2317 | u8 cos_entry, | ||
2318 | u32 priority_mask, u8 port) | ||
2319 | { | ||
2320 | u32 nig_reg_rx_priority_mask_add = 0; | ||
2321 | |||
2322 | switch (cos_entry) { | ||
2323 | case 0: | ||
2324 | nig_reg_rx_priority_mask_add = (port) ? | ||
2325 | NIG_REG_P1_RX_COS0_PRIORITY_MASK : | ||
2326 | NIG_REG_P0_RX_COS0_PRIORITY_MASK; | ||
2327 | break; | ||
2328 | case 1: | ||
2329 | nig_reg_rx_priority_mask_add = (port) ? | ||
2330 | NIG_REG_P1_RX_COS1_PRIORITY_MASK : | ||
2331 | NIG_REG_P0_RX_COS1_PRIORITY_MASK; | ||
2332 | break; | ||
2333 | case 2: | ||
2334 | nig_reg_rx_priority_mask_add = (port) ? | ||
2335 | NIG_REG_P1_RX_COS2_PRIORITY_MASK : | ||
2336 | NIG_REG_P0_RX_COS2_PRIORITY_MASK; | ||
2337 | break; | ||
2338 | case 3: | ||
2339 | if (port) | ||
2340 | return -EINVAL; | ||
2341 | nig_reg_rx_priority_mask_add = NIG_REG_P0_RX_COS3_PRIORITY_MASK; | ||
2342 | break; | ||
2343 | case 4: | ||
2344 | if (port) | ||
2345 | return -EINVAL; | ||
2346 | nig_reg_rx_priority_mask_add = NIG_REG_P0_RX_COS4_PRIORITY_MASK; | ||
2347 | break; | ||
2348 | case 5: | ||
2349 | if (port) | ||
2350 | return -EINVAL; | ||
2351 | nig_reg_rx_priority_mask_add = NIG_REG_P0_RX_COS5_PRIORITY_MASK; | ||
2352 | break; | ||
851 | } | 2353 | } |
2354 | |||
2355 | REG_WR(bp, nig_reg_rx_priority_mask_add, priority_mask); | ||
2356 | |||
2357 | return 0; | ||
2358 | } | ||
2359 | static void bnx2x_update_mng(struct link_params *params, u32 link_status) | ||
2360 | { | ||
2361 | struct bnx2x *bp = params->bp; | ||
2362 | |||
2363 | REG_WR(bp, params->shmem_base + | ||
2364 | offsetof(struct shmem_region, | ||
2365 | port_mb[params->port].link_status), link_status); | ||
852 | } | 2366 | } |
853 | 2367 | ||
854 | static void bnx2x_update_pfc_nig(struct link_params *params, | 2368 | static void bnx2x_update_pfc_nig(struct link_params *params, |
@@ -858,9 +2372,9 @@ static void bnx2x_update_pfc_nig(struct link_params *params, | |||
858 | u32 xcm_mask = 0, ppp_enable = 0, pause_enable = 0, llfc_out_en = 0; | 2372 | u32 xcm_mask = 0, ppp_enable = 0, pause_enable = 0, llfc_out_en = 0; |
859 | u32 llfc_enable = 0, xcm0_out_en = 0, p0_hwpfc_enable = 0; | 2373 | u32 llfc_enable = 0, xcm0_out_en = 0, p0_hwpfc_enable = 0; |
860 | u32 pkt_priority_to_cos = 0; | 2374 | u32 pkt_priority_to_cos = 0; |
861 | u32 val; | ||
862 | struct bnx2x *bp = params->bp; | 2375 | struct bnx2x *bp = params->bp; |
863 | int port = params->port; | 2376 | u8 port = params->port; |
2377 | |||
864 | int set_pfc = params->feature_config_flags & | 2378 | int set_pfc = params->feature_config_flags & |
865 | FEATURE_CONFIG_PFC_ENABLED; | 2379 | FEATURE_CONFIG_PFC_ENABLED; |
866 | DP(NETIF_MSG_LINK, "updating pfc nig parameters\n"); | 2380 | DP(NETIF_MSG_LINK, "updating pfc nig parameters\n"); |
@@ -881,6 +2395,9 @@ static void bnx2x_update_pfc_nig(struct link_params *params, | |||
881 | pause_enable = 0; | 2395 | pause_enable = 0; |
882 | llfc_out_en = 0; | 2396 | llfc_out_en = 0; |
883 | llfc_enable = 0; | 2397 | llfc_enable = 0; |
2398 | if (CHIP_IS_E3(bp)) | ||
2399 | ppp_enable = 0; | ||
2400 | else | ||
884 | ppp_enable = 1; | 2401 | ppp_enable = 1; |
885 | xcm_mask &= ~(port ? NIG_LLH1_XCM_MASK_REG_LLH1_XCM_MASK_BCN : | 2402 | xcm_mask &= ~(port ? NIG_LLH1_XCM_MASK_REG_LLH1_XCM_MASK_BCN : |
886 | NIG_LLH0_XCM_MASK_REG_LLH0_XCM_MASK_BCN); | 2403 | NIG_LLH0_XCM_MASK_REG_LLH0_XCM_MASK_BCN); |
@@ -899,6 +2416,9 @@ static void bnx2x_update_pfc_nig(struct link_params *params, | |||
899 | xcm0_out_en = 1; | 2416 | xcm0_out_en = 1; |
900 | } | 2417 | } |
901 | 2418 | ||
2419 | if (CHIP_IS_E3(bp)) | ||
2420 | REG_WR(bp, port ? NIG_REG_BRB1_PAUSE_IN_EN : | ||
2421 | NIG_REG_BRB0_PAUSE_IN_EN, pause_enable); | ||
902 | REG_WR(bp, port ? NIG_REG_LLFC_OUT_EN_1 : | 2422 | REG_WR(bp, port ? NIG_REG_LLFC_OUT_EN_1 : |
903 | NIG_REG_LLFC_OUT_EN_0, llfc_out_en); | 2423 | NIG_REG_LLFC_OUT_EN_0, llfc_out_en); |
904 | REG_WR(bp, port ? NIG_REG_LLFC_ENABLE_1 : | 2424 | REG_WR(bp, port ? NIG_REG_LLFC_ENABLE_1 : |
@@ -920,30 +2440,13 @@ static void bnx2x_update_pfc_nig(struct link_params *params, | |||
920 | /* HW PFC TX enable */ | 2440 | /* HW PFC TX enable */ |
921 | REG_WR(bp, NIG_REG_P0_HWPFC_ENABLE, p0_hwpfc_enable); | 2441 | REG_WR(bp, NIG_REG_P0_HWPFC_ENABLE, p0_hwpfc_enable); |
922 | 2442 | ||
923 | /* 0x2 = BMAC, 0x1= EMAC */ | ||
924 | switch (vars->mac_type) { | ||
925 | case MAC_TYPE_EMAC: | ||
926 | val = 1; | ||
927 | break; | ||
928 | case MAC_TYPE_BMAC: | ||
929 | val = 0; | ||
930 | break; | ||
931 | default: | ||
932 | val = 0; | ||
933 | break; | ||
934 | } | ||
935 | REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT, val); | ||
936 | |||
937 | if (nig_params) { | 2443 | if (nig_params) { |
2444 | u8 i = 0; | ||
938 | pkt_priority_to_cos = nig_params->pkt_priority_to_cos; | 2445 | pkt_priority_to_cos = nig_params->pkt_priority_to_cos; |
939 | 2446 | ||
940 | REG_WR(bp, port ? NIG_REG_P1_RX_COS0_PRIORITY_MASK : | 2447 | for (i = 0; i < nig_params->num_of_rx_cos_priority_mask; i++) |
941 | NIG_REG_P0_RX_COS0_PRIORITY_MASK, | 2448 | bnx2x_pfc_nig_rx_priority_mask(bp, i, |
942 | nig_params->rx_cos0_priority_mask); | 2449 | nig_params->rx_cos_priority_mask[i], port); |
943 | |||
944 | REG_WR(bp, port ? NIG_REG_P1_RX_COS1_PRIORITY_MASK : | ||
945 | NIG_REG_P0_RX_COS1_PRIORITY_MASK, | ||
946 | nig_params->rx_cos1_priority_mask); | ||
947 | 2450 | ||
948 | REG_WR(bp, port ? NIG_REG_LLFC_HIGH_PRIORITY_CLASSES_1 : | 2451 | REG_WR(bp, port ? NIG_REG_LLFC_HIGH_PRIORITY_CLASSES_1 : |
949 | NIG_REG_LLFC_HIGH_PRIORITY_CLASSES_0, | 2452 | NIG_REG_LLFC_HIGH_PRIORITY_CLASSES_0, |
@@ -958,8 +2461,7 @@ static void bnx2x_update_pfc_nig(struct link_params *params, | |||
958 | pkt_priority_to_cos); | 2461 | pkt_priority_to_cos); |
959 | } | 2462 | } |
960 | 2463 | ||
961 | 2464 | int bnx2x_update_pfc(struct link_params *params, | |
962 | void bnx2x_update_pfc(struct link_params *params, | ||
963 | struct link_vars *vars, | 2465 | struct link_vars *vars, |
964 | struct bnx2x_nig_brb_pfc_port_params *pfc_params) | 2466 | struct bnx2x_nig_brb_pfc_port_params *pfc_params) |
965 | { | 2467 | { |
@@ -970,41 +2472,59 @@ void bnx2x_update_pfc(struct link_params *params, | |||
970 | */ | 2472 | */ |
971 | u32 val; | 2473 | u32 val; |
972 | struct bnx2x *bp = params->bp; | 2474 | struct bnx2x *bp = params->bp; |
2475 | int bnx2x_status = 0; | ||
2476 | u8 bmac_loopback = (params->loopback_mode == LOOPBACK_BMAC); | ||
2477 | |||
2478 | if (params->feature_config_flags & FEATURE_CONFIG_PFC_ENABLED) | ||
2479 | vars->link_status |= LINK_STATUS_PFC_ENABLED; | ||
2480 | else | ||
2481 | vars->link_status &= ~LINK_STATUS_PFC_ENABLED; | ||
2482 | |||
2483 | bnx2x_update_mng(params, vars->link_status); | ||
973 | 2484 | ||
974 | /* update NIG params */ | 2485 | /* update NIG params */ |
975 | bnx2x_update_pfc_nig(params, vars, pfc_params); | 2486 | bnx2x_update_pfc_nig(params, vars, pfc_params); |
976 | 2487 | ||
977 | /* update BRB params */ | 2488 | /* update BRB params */ |
978 | bnx2x_update_pfc_brb(params, vars, pfc_params); | 2489 | bnx2x_status = bnx2x_update_pfc_brb(params, vars, pfc_params); |
2490 | if (0 != bnx2x_status) | ||
2491 | return bnx2x_status; | ||
979 | 2492 | ||
980 | if (!vars->link_up) | 2493 | if (!vars->link_up) |
981 | return; | 2494 | return bnx2x_status; |
982 | |||
983 | val = REG_RD(bp, MISC_REG_RESET_REG_2); | ||
984 | if ((val & (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << params->port)) | ||
985 | == 0) { | ||
986 | DP(NETIF_MSG_LINK, "About to update PFC in EMAC\n"); | ||
987 | bnx2x_emac_enable(params, vars, 0); | ||
988 | return; | ||
989 | } | ||
990 | 2495 | ||
991 | DP(NETIF_MSG_LINK, "About to update PFC in BMAC\n"); | 2496 | DP(NETIF_MSG_LINK, "About to update PFC in BMAC\n"); |
992 | if (CHIP_IS_E2(bp)) | 2497 | if (CHIP_IS_E3(bp)) |
993 | bnx2x_update_pfc_bmac2(params, vars, 0); | 2498 | bnx2x_update_pfc_xmac(params, vars, 0); |
994 | else | 2499 | else { |
995 | bnx2x_update_pfc_bmac1(params, vars); | 2500 | val = REG_RD(bp, MISC_REG_RESET_REG_2); |
2501 | if ((val & | ||
2502 | (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << params->port)) | ||
2503 | == 0) { | ||
2504 | DP(NETIF_MSG_LINK, "About to update PFC in EMAC\n"); | ||
2505 | bnx2x_emac_enable(params, vars, 0); | ||
2506 | return bnx2x_status; | ||
2507 | } | ||
996 | 2508 | ||
997 | val = 0; | 2509 | if (CHIP_IS_E2(bp)) |
998 | if ((params->feature_config_flags & | 2510 | bnx2x_update_pfc_bmac2(params, vars, bmac_loopback); |
999 | FEATURE_CONFIG_PFC_ENABLED) || | 2511 | else |
1000 | (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX)) | 2512 | bnx2x_update_pfc_bmac1(params, vars); |
1001 | val = 1; | 2513 | |
1002 | REG_WR(bp, NIG_REG_BMAC0_PAUSE_OUT_EN + params->port*4, val); | 2514 | val = 0; |
2515 | if ((params->feature_config_flags & | ||
2516 | FEATURE_CONFIG_PFC_ENABLED) || | ||
2517 | (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX)) | ||
2518 | val = 1; | ||
2519 | REG_WR(bp, NIG_REG_BMAC0_PAUSE_OUT_EN + params->port*4, val); | ||
2520 | } | ||
2521 | return bnx2x_status; | ||
1003 | } | 2522 | } |
1004 | 2523 | ||
1005 | static u8 bnx2x_bmac1_enable(struct link_params *params, | 2524 | |
1006 | struct link_vars *vars, | 2525 | static int bnx2x_bmac1_enable(struct link_params *params, |
1007 | u8 is_lb) | 2526 | struct link_vars *vars, |
2527 | u8 is_lb) | ||
1008 | { | 2528 | { |
1009 | struct bnx2x *bp = params->bp; | 2529 | struct bnx2x *bp = params->bp; |
1010 | u8 port = params->port; | 2530 | u8 port = params->port; |
@@ -1063,12 +2583,18 @@ static u8 bnx2x_bmac1_enable(struct link_params *params, | |||
1063 | REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_LLFC_MSG_FLDS, | 2583 | REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_LLFC_MSG_FLDS, |
1064 | wb_data, 2); | 2584 | wb_data, 2); |
1065 | 2585 | ||
2586 | if (vars->phy_flags & PHY_TX_ERROR_CHECK_FLAG) { | ||
2587 | REG_RD_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_LSS_STATUS, | ||
2588 | wb_data, 2); | ||
2589 | if (wb_data[0] > 0) | ||
2590 | return -ESRCH; | ||
2591 | } | ||
1066 | return 0; | 2592 | return 0; |
1067 | } | 2593 | } |
1068 | 2594 | ||
1069 | static u8 bnx2x_bmac2_enable(struct link_params *params, | 2595 | static int bnx2x_bmac2_enable(struct link_params *params, |
1070 | struct link_vars *vars, | 2596 | struct link_vars *vars, |
1071 | u8 is_lb) | 2597 | u8 is_lb) |
1072 | { | 2598 | { |
1073 | struct bnx2x *bp = params->bp; | 2599 | struct bnx2x *bp = params->bp; |
1074 | u8 port = params->port; | 2600 | u8 port = params->port; |
@@ -1128,14 +2654,25 @@ static u8 bnx2x_bmac2_enable(struct link_params *params, | |||
1128 | udelay(30); | 2654 | udelay(30); |
1129 | bnx2x_update_pfc_bmac2(params, vars, is_lb); | 2655 | bnx2x_update_pfc_bmac2(params, vars, is_lb); |
1130 | 2656 | ||
2657 | if (vars->phy_flags & PHY_TX_ERROR_CHECK_FLAG) { | ||
2658 | REG_RD_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_RX_LSS_STAT, | ||
2659 | wb_data, 2); | ||
2660 | if (wb_data[0] > 0) { | ||
2661 | DP(NETIF_MSG_LINK, "Got bad LSS status 0x%x\n", | ||
2662 | wb_data[0]); | ||
2663 | return -ESRCH; | ||
2664 | } | ||
2665 | } | ||
2666 | |||
1131 | return 0; | 2667 | return 0; |
1132 | } | 2668 | } |
1133 | 2669 | ||
1134 | static u8 bnx2x_bmac_enable(struct link_params *params, | 2670 | static int bnx2x_bmac_enable(struct link_params *params, |
1135 | struct link_vars *vars, | 2671 | struct link_vars *vars, |
1136 | u8 is_lb) | 2672 | u8 is_lb) |
1137 | { | 2673 | { |
1138 | u8 rc, port = params->port; | 2674 | int rc = 0; |
2675 | u8 port = params->port; | ||
1139 | struct bnx2x *bp = params->bp; | 2676 | struct bnx2x *bp = params->bp; |
1140 | u32 val; | 2677 | u32 val; |
1141 | /* reset and unreset the BigMac */ | 2678 | /* reset and unreset the BigMac */ |
@@ -1173,16 +2710,6 @@ static u8 bnx2x_bmac_enable(struct link_params *params, | |||
1173 | return rc; | 2710 | return rc; |
1174 | } | 2711 | } |
1175 | 2712 | ||
1176 | |||
1177 | static void bnx2x_update_mng(struct link_params *params, u32 link_status) | ||
1178 | { | ||
1179 | struct bnx2x *bp = params->bp; | ||
1180 | |||
1181 | REG_WR(bp, params->shmem_base + | ||
1182 | offsetof(struct shmem_region, | ||
1183 | port_mb[params->port].link_status), link_status); | ||
1184 | } | ||
1185 | |||
1186 | static void bnx2x_bmac_rx_disable(struct bnx2x *bp, u8 port) | 2713 | static void bnx2x_bmac_rx_disable(struct bnx2x *bp, u8 port) |
1187 | { | 2714 | { |
1188 | u32 bmac_addr = port ? NIG_REG_INGRESS_BMAC1_MEM : | 2715 | u32 bmac_addr = port ? NIG_REG_INGRESS_BMAC1_MEM : |
@@ -1218,8 +2745,8 @@ static void bnx2x_bmac_rx_disable(struct bnx2x *bp, u8 port) | |||
1218 | } | 2745 | } |
1219 | } | 2746 | } |
1220 | 2747 | ||
1221 | static u8 bnx2x_pbf_update(struct link_params *params, u32 flow_ctrl, | 2748 | static int bnx2x_pbf_update(struct link_params *params, u32 flow_ctrl, |
1222 | u32 line_speed) | 2749 | u32 line_speed) |
1223 | { | 2750 | { |
1224 | struct bnx2x *bp = params->bp; | 2751 | struct bnx2x *bp = params->bp; |
1225 | u8 port = params->port; | 2752 | u8 port = params->port; |
@@ -1269,18 +2796,6 @@ static u8 bnx2x_pbf_update(struct link_params *params, u32 flow_ctrl, | |||
1269 | case SPEED_10000: | 2796 | case SPEED_10000: |
1270 | init_crd = thresh + 553 - 22; | 2797 | init_crd = thresh + 553 - 22; |
1271 | break; | 2798 | break; |
1272 | |||
1273 | case SPEED_12000: | ||
1274 | init_crd = thresh + 664 - 22; | ||
1275 | break; | ||
1276 | |||
1277 | case SPEED_13000: | ||
1278 | init_crd = thresh + 742 - 22; | ||
1279 | break; | ||
1280 | |||
1281 | case SPEED_16000: | ||
1282 | init_crd = thresh + 778 - 22; | ||
1283 | break; | ||
1284 | default: | 2799 | default: |
1285 | DP(NETIF_MSG_LINK, "Invalid line_speed 0x%x\n", | 2800 | DP(NETIF_MSG_LINK, "Invalid line_speed 0x%x\n", |
1286 | line_speed); | 2801 | line_speed); |
@@ -1349,31 +2864,23 @@ static u32 bnx2x_get_emac_base(struct bnx2x *bp, | |||
1349 | } | 2864 | } |
1350 | 2865 | ||
1351 | /******************************************************************/ | 2866 | /******************************************************************/ |
1352 | /* CL45 access functions */ | 2867 | /* CL22 access functions */ |
1353 | /******************************************************************/ | 2868 | /******************************************************************/ |
1354 | static u8 bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy, | 2869 | static int bnx2x_cl22_write(struct bnx2x *bp, |
1355 | u8 devad, u16 reg, u16 val) | 2870 | struct bnx2x_phy *phy, |
2871 | u16 reg, u16 val) | ||
1356 | { | 2872 | { |
1357 | u32 tmp, saved_mode; | 2873 | u32 tmp, mode; |
1358 | u8 i, rc = 0; | 2874 | u8 i; |
1359 | /* | 2875 | int rc = 0; |
1360 | * Set clause 45 mode, slow down the MDIO clock to 2.5MHz | 2876 | /* Switch to CL22 */ |
1361 | * (a value of 49==0x31) and make sure that the AUTO poll is off | 2877 | mode = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE); |
1362 | */ | 2878 | REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, |
1363 | 2879 | mode & ~EMAC_MDIO_MODE_CLAUSE_45); | |
1364 | saved_mode = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE); | ||
1365 | tmp = saved_mode & ~(EMAC_MDIO_MODE_AUTO_POLL | | ||
1366 | EMAC_MDIO_MODE_CLOCK_CNT); | ||
1367 | tmp |= (EMAC_MDIO_MODE_CLAUSE_45 | | ||
1368 | (49 << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT)); | ||
1369 | REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, tmp); | ||
1370 | REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE); | ||
1371 | udelay(40); | ||
1372 | 2880 | ||
1373 | /* address */ | 2881 | /* address */ |
1374 | 2882 | tmp = ((phy->addr << 21) | (reg << 16) | val | | |
1375 | tmp = ((phy->addr << 21) | (devad << 16) | reg | | 2883 | EMAC_MDIO_COMM_COMMAND_WRITE_22 | |
1376 | EMAC_MDIO_COMM_COMMAND_ADDRESS | | ||
1377 | EMAC_MDIO_COMM_START_BUSY); | 2884 | EMAC_MDIO_COMM_START_BUSY); |
1378 | REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, tmp); | 2885 | REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, tmp); |
1379 | 2886 | ||
@@ -1388,57 +2895,60 @@ static u8 bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy, | |||
1388 | } | 2895 | } |
1389 | if (tmp & EMAC_MDIO_COMM_START_BUSY) { | 2896 | if (tmp & EMAC_MDIO_COMM_START_BUSY) { |
1390 | DP(NETIF_MSG_LINK, "write phy register failed\n"); | 2897 | DP(NETIF_MSG_LINK, "write phy register failed\n"); |
1391 | netdev_err(bp->dev, "MDC/MDIO access timeout\n"); | ||
1392 | rc = -EFAULT; | 2898 | rc = -EFAULT; |
1393 | } else { | 2899 | } |
1394 | /* data */ | 2900 | REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, mode); |
1395 | tmp = ((phy->addr << 21) | (devad << 16) | val | | 2901 | return rc; |
1396 | EMAC_MDIO_COMM_COMMAND_WRITE_45 | | 2902 | } |
1397 | EMAC_MDIO_COMM_START_BUSY); | ||
1398 | REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, tmp); | ||
1399 | 2903 | ||
1400 | for (i = 0; i < 50; i++) { | 2904 | static int bnx2x_cl22_read(struct bnx2x *bp, |
1401 | udelay(10); | 2905 | struct bnx2x_phy *phy, |
2906 | u16 reg, u16 *ret_val) | ||
2907 | { | ||
2908 | u32 val, mode; | ||
2909 | u16 i; | ||
2910 | int rc = 0; | ||
1402 | 2911 | ||
1403 | tmp = REG_RD(bp, phy->mdio_ctrl + | 2912 | /* Switch to CL22 */ |
1404 | EMAC_REG_EMAC_MDIO_COMM); | 2913 | mode = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE); |
1405 | if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) { | 2914 | REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, |
1406 | udelay(5); | 2915 | mode & ~EMAC_MDIO_MODE_CLAUSE_45); |
1407 | break; | 2916 | |
1408 | } | 2917 | /* address */ |
1409 | } | 2918 | val = ((phy->addr << 21) | (reg << 16) | |
1410 | if (tmp & EMAC_MDIO_COMM_START_BUSY) { | 2919 | EMAC_MDIO_COMM_COMMAND_READ_22 | |
1411 | DP(NETIF_MSG_LINK, "write phy register failed\n"); | 2920 | EMAC_MDIO_COMM_START_BUSY); |
1412 | netdev_err(bp->dev, "MDC/MDIO access timeout\n"); | 2921 | REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, val); |
1413 | rc = -EFAULT; | 2922 | |
2923 | for (i = 0; i < 50; i++) { | ||
2924 | udelay(10); | ||
2925 | |||
2926 | val = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM); | ||
2927 | if (!(val & EMAC_MDIO_COMM_START_BUSY)) { | ||
2928 | *ret_val = (u16)(val & EMAC_MDIO_COMM_DATA); | ||
2929 | udelay(5); | ||
2930 | break; | ||
1414 | } | 2931 | } |
1415 | } | 2932 | } |
2933 | if (val & EMAC_MDIO_COMM_START_BUSY) { | ||
2934 | DP(NETIF_MSG_LINK, "read phy register failed\n"); | ||
1416 | 2935 | ||
1417 | /* Restore the saved mode */ | 2936 | *ret_val = 0; |
1418 | REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, saved_mode); | 2937 | rc = -EFAULT; |
1419 | 2938 | } | |
2939 | REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, mode); | ||
1420 | return rc; | 2940 | return rc; |
1421 | } | 2941 | } |
1422 | 2942 | ||
1423 | static u8 bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy, | 2943 | /******************************************************************/ |
1424 | u8 devad, u16 reg, u16 *ret_val) | 2944 | /* CL45 access functions */ |
2945 | /******************************************************************/ | ||
2946 | static int bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy, | ||
2947 | u8 devad, u16 reg, u16 *ret_val) | ||
1425 | { | 2948 | { |
1426 | u32 val, saved_mode; | 2949 | u32 val; |
1427 | u16 i; | 2950 | u16 i; |
1428 | u8 rc = 0; | 2951 | int rc = 0; |
1429 | /* | ||
1430 | * Set clause 45 mode, slow down the MDIO clock to 2.5MHz | ||
1431 | * (a value of 49==0x31) and make sure that the AUTO poll is off | ||
1432 | */ | ||
1433 | |||
1434 | saved_mode = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE); | ||
1435 | val = saved_mode & ~((EMAC_MDIO_MODE_AUTO_POLL | | ||
1436 | EMAC_MDIO_MODE_CLOCK_CNT)); | ||
1437 | val |= (EMAC_MDIO_MODE_CLAUSE_45 | | ||
1438 | (49L << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT)); | ||
1439 | REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, val); | ||
1440 | REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE); | ||
1441 | udelay(40); | ||
1442 | 2952 | ||
1443 | /* address */ | 2953 | /* address */ |
1444 | val = ((phy->addr << 21) | (devad << 16) | reg | | 2954 | val = ((phy->addr << 21) | (devad << 16) | reg | |
@@ -1460,7 +2970,6 @@ static u8 bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy, | |||
1460 | netdev_err(bp->dev, "MDC/MDIO access timeout\n"); | 2970 | netdev_err(bp->dev, "MDC/MDIO access timeout\n"); |
1461 | *ret_val = 0; | 2971 | *ret_val = 0; |
1462 | rc = -EFAULT; | 2972 | rc = -EFAULT; |
1463 | |||
1464 | } else { | 2973 | } else { |
1465 | /* data */ | 2974 | /* data */ |
1466 | val = ((phy->addr << 21) | (devad << 16) | | 2975 | val = ((phy->addr << 21) | (devad << 16) | |
@@ -1485,15 +2994,214 @@ static u8 bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy, | |||
1485 | rc = -EFAULT; | 2994 | rc = -EFAULT; |
1486 | } | 2995 | } |
1487 | } | 2996 | } |
2997 | /* Work around for E3 A0 */ | ||
2998 | if (phy->flags & FLAGS_MDC_MDIO_WA) { | ||
2999 | phy->flags ^= FLAGS_DUMMY_READ; | ||
3000 | if (phy->flags & FLAGS_DUMMY_READ) { | ||
3001 | u16 temp_val; | ||
3002 | bnx2x_cl45_read(bp, phy, devad, 0xf, &temp_val); | ||
3003 | } | ||
3004 | } | ||
3005 | |||
3006 | return rc; | ||
3007 | } | ||
3008 | |||
3009 | static int bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy, | ||
3010 | u8 devad, u16 reg, u16 val) | ||
3011 | { | ||
3012 | u32 tmp; | ||
3013 | u8 i; | ||
3014 | int rc = 0; | ||
3015 | |||
3016 | /* address */ | ||
3017 | |||
3018 | tmp = ((phy->addr << 21) | (devad << 16) | reg | | ||
3019 | EMAC_MDIO_COMM_COMMAND_ADDRESS | | ||
3020 | EMAC_MDIO_COMM_START_BUSY); | ||
3021 | REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, tmp); | ||
3022 | |||
3023 | for (i = 0; i < 50; i++) { | ||
3024 | udelay(10); | ||
3025 | |||
3026 | tmp = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM); | ||
3027 | if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) { | ||
3028 | udelay(5); | ||
3029 | break; | ||
3030 | } | ||
3031 | } | ||
3032 | if (tmp & EMAC_MDIO_COMM_START_BUSY) { | ||
3033 | DP(NETIF_MSG_LINK, "write phy register failed\n"); | ||
3034 | netdev_err(bp->dev, "MDC/MDIO access timeout\n"); | ||
3035 | rc = -EFAULT; | ||
3036 | |||
3037 | } else { | ||
3038 | /* data */ | ||
3039 | tmp = ((phy->addr << 21) | (devad << 16) | val | | ||
3040 | EMAC_MDIO_COMM_COMMAND_WRITE_45 | | ||
3041 | EMAC_MDIO_COMM_START_BUSY); | ||
3042 | REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, tmp); | ||
3043 | |||
3044 | for (i = 0; i < 50; i++) { | ||
3045 | udelay(10); | ||
3046 | |||
3047 | tmp = REG_RD(bp, phy->mdio_ctrl + | ||
3048 | EMAC_REG_EMAC_MDIO_COMM); | ||
3049 | if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) { | ||
3050 | udelay(5); | ||
3051 | break; | ||
3052 | } | ||
3053 | } | ||
3054 | if (tmp & EMAC_MDIO_COMM_START_BUSY) { | ||
3055 | DP(NETIF_MSG_LINK, "write phy register failed\n"); | ||
3056 | netdev_err(bp->dev, "MDC/MDIO access timeout\n"); | ||
3057 | rc = -EFAULT; | ||
3058 | } | ||
3059 | } | ||
3060 | /* Work around for E3 A0 */ | ||
3061 | if (phy->flags & FLAGS_MDC_MDIO_WA) { | ||
3062 | phy->flags ^= FLAGS_DUMMY_READ; | ||
3063 | if (phy->flags & FLAGS_DUMMY_READ) { | ||
3064 | u16 temp_val; | ||
3065 | bnx2x_cl45_read(bp, phy, devad, 0xf, &temp_val); | ||
3066 | } | ||
3067 | } | ||
3068 | |||
3069 | return rc; | ||
3070 | } | ||
3071 | |||
1488 | 3072 | ||
1489 | /* Restore the saved mode */ | 3073 | /******************************************************************/ |
1490 | REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, saved_mode); | 3074 | /* BSC access functions from E3 */ |
3075 | /******************************************************************/ | ||
3076 | static void bnx2x_bsc_module_sel(struct link_params *params) | ||
3077 | { | ||
3078 | int idx; | ||
3079 | u32 board_cfg, sfp_ctrl; | ||
3080 | u32 i2c_pins[I2C_SWITCH_WIDTH], i2c_val[I2C_SWITCH_WIDTH]; | ||
3081 | struct bnx2x *bp = params->bp; | ||
3082 | u8 port = params->port; | ||
3083 | /* Read I2C output PINs */ | ||
3084 | board_cfg = REG_RD(bp, params->shmem_base + | ||
3085 | offsetof(struct shmem_region, | ||
3086 | dev_info.shared_hw_config.board)); | ||
3087 | i2c_pins[I2C_BSC0] = board_cfg & SHARED_HW_CFG_E3_I2C_MUX0_MASK; | ||
3088 | i2c_pins[I2C_BSC1] = (board_cfg & SHARED_HW_CFG_E3_I2C_MUX1_MASK) >> | ||
3089 | SHARED_HW_CFG_E3_I2C_MUX1_SHIFT; | ||
3090 | |||
3091 | /* Read I2C output value */ | ||
3092 | sfp_ctrl = REG_RD(bp, params->shmem_base + | ||
3093 | offsetof(struct shmem_region, | ||
3094 | dev_info.port_hw_config[port].e3_cmn_pin_cfg)); | ||
3095 | i2c_val[I2C_BSC0] = (sfp_ctrl & PORT_HW_CFG_E3_I2C_MUX0_MASK) > 0; | ||
3096 | i2c_val[I2C_BSC1] = (sfp_ctrl & PORT_HW_CFG_E3_I2C_MUX1_MASK) > 0; | ||
3097 | DP(NETIF_MSG_LINK, "Setting BSC switch\n"); | ||
3098 | for (idx = 0; idx < I2C_SWITCH_WIDTH; idx++) | ||
3099 | bnx2x_set_cfg_pin(bp, i2c_pins[idx], i2c_val[idx]); | ||
3100 | } | ||
3101 | |||
3102 | static int bnx2x_bsc_read(struct link_params *params, | ||
3103 | struct bnx2x_phy *phy, | ||
3104 | u8 sl_devid, | ||
3105 | u16 sl_addr, | ||
3106 | u8 lc_addr, | ||
3107 | u8 xfer_cnt, | ||
3108 | u32 *data_array) | ||
3109 | { | ||
3110 | u32 val, i; | ||
3111 | int rc = 0; | ||
3112 | struct bnx2x *bp = params->bp; | ||
3113 | |||
3114 | if ((sl_devid != 0xa0) && (sl_devid != 0xa2)) { | ||
3115 | DP(NETIF_MSG_LINK, "invalid sl_devid 0x%x\n", sl_devid); | ||
3116 | return -EINVAL; | ||
3117 | } | ||
3118 | |||
3119 | if (xfer_cnt > 16) { | ||
3120 | DP(NETIF_MSG_LINK, "invalid xfer_cnt %d. Max is 16 bytes\n", | ||
3121 | xfer_cnt); | ||
3122 | return -EINVAL; | ||
3123 | } | ||
3124 | bnx2x_bsc_module_sel(params); | ||
3125 | |||
3126 | xfer_cnt = 16 - lc_addr; | ||
3127 | |||
3128 | /* enable the engine */ | ||
3129 | val = REG_RD(bp, MCP_REG_MCPR_IMC_COMMAND); | ||
3130 | val |= MCPR_IMC_COMMAND_ENABLE; | ||
3131 | REG_WR(bp, MCP_REG_MCPR_IMC_COMMAND, val); | ||
3132 | |||
3133 | /* program slave device ID */ | ||
3134 | val = (sl_devid << 16) | sl_addr; | ||
3135 | REG_WR(bp, MCP_REG_MCPR_IMC_SLAVE_CONTROL, val); | ||
3136 | |||
3137 | /* start xfer with 0 byte to update the address pointer ???*/ | ||
3138 | val = (MCPR_IMC_COMMAND_ENABLE) | | ||
3139 | (MCPR_IMC_COMMAND_WRITE_OP << | ||
3140 | MCPR_IMC_COMMAND_OPERATION_BITSHIFT) | | ||
3141 | (lc_addr << MCPR_IMC_COMMAND_TRANSFER_ADDRESS_BITSHIFT) | (0); | ||
3142 | REG_WR(bp, MCP_REG_MCPR_IMC_COMMAND, val); | ||
3143 | |||
3144 | /* poll for completion */ | ||
3145 | i = 0; | ||
3146 | val = REG_RD(bp, MCP_REG_MCPR_IMC_COMMAND); | ||
3147 | while (((val >> MCPR_IMC_COMMAND_IMC_STATUS_BITSHIFT) & 0x3) != 1) { | ||
3148 | udelay(10); | ||
3149 | val = REG_RD(bp, MCP_REG_MCPR_IMC_COMMAND); | ||
3150 | if (i++ > 1000) { | ||
3151 | DP(NETIF_MSG_LINK, "wr 0 byte timed out after %d try\n", | ||
3152 | i); | ||
3153 | rc = -EFAULT; | ||
3154 | break; | ||
3155 | } | ||
3156 | } | ||
3157 | if (rc == -EFAULT) | ||
3158 | return rc; | ||
3159 | |||
3160 | /* start xfer with read op */ | ||
3161 | val = (MCPR_IMC_COMMAND_ENABLE) | | ||
3162 | (MCPR_IMC_COMMAND_READ_OP << | ||
3163 | MCPR_IMC_COMMAND_OPERATION_BITSHIFT) | | ||
3164 | (lc_addr << MCPR_IMC_COMMAND_TRANSFER_ADDRESS_BITSHIFT) | | ||
3165 | (xfer_cnt); | ||
3166 | REG_WR(bp, MCP_REG_MCPR_IMC_COMMAND, val); | ||
3167 | |||
3168 | /* poll for completion */ | ||
3169 | i = 0; | ||
3170 | val = REG_RD(bp, MCP_REG_MCPR_IMC_COMMAND); | ||
3171 | while (((val >> MCPR_IMC_COMMAND_IMC_STATUS_BITSHIFT) & 0x3) != 1) { | ||
3172 | udelay(10); | ||
3173 | val = REG_RD(bp, MCP_REG_MCPR_IMC_COMMAND); | ||
3174 | if (i++ > 1000) { | ||
3175 | DP(NETIF_MSG_LINK, "rd op timed out after %d try\n", i); | ||
3176 | rc = -EFAULT; | ||
3177 | break; | ||
3178 | } | ||
3179 | } | ||
3180 | if (rc == -EFAULT) | ||
3181 | return rc; | ||
1491 | 3182 | ||
3183 | for (i = (lc_addr >> 2); i < 4; i++) { | ||
3184 | data_array[i] = REG_RD(bp, (MCP_REG_MCPR_IMC_DATAREG0 + i*4)); | ||
3185 | #ifdef __BIG_ENDIAN | ||
3186 | data_array[i] = ((data_array[i] & 0x000000ff) << 24) | | ||
3187 | ((data_array[i] & 0x0000ff00) << 8) | | ||
3188 | ((data_array[i] & 0x00ff0000) >> 8) | | ||
3189 | ((data_array[i] & 0xff000000) >> 24); | ||
3190 | #endif | ||
3191 | } | ||
1492 | return rc; | 3192 | return rc; |
1493 | } | 3193 | } |
1494 | 3194 | ||
1495 | u8 bnx2x_phy_read(struct link_params *params, u8 phy_addr, | 3195 | static void bnx2x_cl45_read_or_write(struct bnx2x *bp, struct bnx2x_phy *phy, |
1496 | u8 devad, u16 reg, u16 *ret_val) | 3196 | u8 devad, u16 reg, u16 or_val) |
3197 | { | ||
3198 | u16 val; | ||
3199 | bnx2x_cl45_read(bp, phy, devad, reg, &val); | ||
3200 | bnx2x_cl45_write(bp, phy, devad, reg, val | or_val); | ||
3201 | } | ||
3202 | |||
3203 | int bnx2x_phy_read(struct link_params *params, u8 phy_addr, | ||
3204 | u8 devad, u16 reg, u16 *ret_val) | ||
1497 | { | 3205 | { |
1498 | u8 phy_index; | 3206 | u8 phy_index; |
1499 | /* | 3207 | /* |
@@ -1510,8 +3218,8 @@ u8 bnx2x_phy_read(struct link_params *params, u8 phy_addr, | |||
1510 | return -EINVAL; | 3218 | return -EINVAL; |
1511 | } | 3219 | } |
1512 | 3220 | ||
1513 | u8 bnx2x_phy_write(struct link_params *params, u8 phy_addr, | 3221 | int bnx2x_phy_write(struct link_params *params, u8 phy_addr, |
1514 | u8 devad, u16 reg, u16 val) | 3222 | u8 devad, u16 reg, u16 val) |
1515 | { | 3223 | { |
1516 | u8 phy_index; | 3224 | u8 phy_index; |
1517 | /* | 3225 | /* |
@@ -1527,9 +3235,62 @@ u8 bnx2x_phy_write(struct link_params *params, u8 phy_addr, | |||
1527 | } | 3235 | } |
1528 | return -EINVAL; | 3236 | return -EINVAL; |
1529 | } | 3237 | } |
3238 | static u8 bnx2x_get_warpcore_lane(struct bnx2x_phy *phy, | ||
3239 | struct link_params *params) | ||
3240 | { | ||
3241 | u8 lane = 0; | ||
3242 | struct bnx2x *bp = params->bp; | ||
3243 | u32 path_swap, path_swap_ovr; | ||
3244 | u8 path, port; | ||
3245 | |||
3246 | path = BP_PATH(bp); | ||
3247 | port = params->port; | ||
3248 | |||
3249 | if (bnx2x_is_4_port_mode(bp)) { | ||
3250 | u32 port_swap, port_swap_ovr; | ||
3251 | |||
3252 | /*figure out path swap value */ | ||
3253 | path_swap_ovr = REG_RD(bp, MISC_REG_FOUR_PORT_PATH_SWAP_OVWR); | ||
3254 | if (path_swap_ovr & 0x1) | ||
3255 | path_swap = (path_swap_ovr & 0x2); | ||
3256 | else | ||
3257 | path_swap = REG_RD(bp, MISC_REG_FOUR_PORT_PATH_SWAP); | ||
3258 | |||
3259 | if (path_swap) | ||
3260 | path = path ^ 1; | ||
1530 | 3261 | ||
1531 | static void bnx2x_set_aer_mmd_xgxs(struct link_params *params, | 3262 | /*figure out port swap value */ |
1532 | struct bnx2x_phy *phy) | 3263 | port_swap_ovr = REG_RD(bp, MISC_REG_FOUR_PORT_PORT_SWAP_OVWR); |
3264 | if (port_swap_ovr & 0x1) | ||
3265 | port_swap = (port_swap_ovr & 0x2); | ||
3266 | else | ||
3267 | port_swap = REG_RD(bp, MISC_REG_FOUR_PORT_PORT_SWAP); | ||
3268 | |||
3269 | if (port_swap) | ||
3270 | port = port ^ 1; | ||
3271 | |||
3272 | lane = (port<<1) + path; | ||
3273 | } else { /* two port mode - no port swap */ | ||
3274 | |||
3275 | /*figure out path swap value */ | ||
3276 | path_swap_ovr = | ||
3277 | REG_RD(bp, MISC_REG_TWO_PORT_PATH_SWAP_OVWR); | ||
3278 | if (path_swap_ovr & 0x1) { | ||
3279 | path_swap = (path_swap_ovr & 0x2); | ||
3280 | } else { | ||
3281 | path_swap = | ||
3282 | REG_RD(bp, MISC_REG_TWO_PORT_PATH_SWAP); | ||
3283 | } | ||
3284 | if (path_swap) | ||
3285 | path = path ^ 1; | ||
3286 | |||
3287 | lane = path << 1 ; | ||
3288 | } | ||
3289 | return lane; | ||
3290 | } | ||
3291 | |||
3292 | static void bnx2x_set_aer_mmd(struct link_params *params, | ||
3293 | struct bnx2x_phy *phy) | ||
1533 | { | 3294 | { |
1534 | u32 ser_lane; | 3295 | u32 ser_lane; |
1535 | u16 offset, aer_val; | 3296 | u16 offset, aer_val; |
@@ -1538,20 +3299,28 @@ static void bnx2x_set_aer_mmd_xgxs(struct link_params *params, | |||
1538 | PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >> | 3299 | PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >> |
1539 | PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT); | 3300 | PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT); |
1540 | 3301 | ||
1541 | offset = phy->addr + ser_lane; | 3302 | offset = (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) ? |
1542 | if (CHIP_IS_E2(bp)) | 3303 | (phy->addr + ser_lane) : 0; |
3304 | |||
3305 | if (USES_WARPCORE(bp)) { | ||
3306 | aer_val = bnx2x_get_warpcore_lane(phy, params); | ||
3307 | /* | ||
3308 | * In Dual-lane mode, two lanes are joined together, | ||
3309 | * so in order to configure them, the AER broadcast method is | ||
3310 | * used here. | ||
3311 | * 0x200 is the broadcast address for lanes 0,1 | ||
3312 | * 0x201 is the broadcast address for lanes 2,3 | ||
3313 | */ | ||
3314 | if (phy->flags & FLAGS_WC_DUAL_MODE) | ||
3315 | aer_val = (aer_val >> 1) | 0x200; | ||
3316 | } else if (CHIP_IS_E2(bp)) | ||
1543 | aer_val = 0x3800 + offset - 1; | 3317 | aer_val = 0x3800 + offset - 1; |
1544 | else | 3318 | else |
1545 | aer_val = 0x3800 + offset; | 3319 | aer_val = 0x3800 + offset; |
3320 | DP(NETIF_MSG_LINK, "Set AER to 0x%x\n", aer_val); | ||
1546 | CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK, | 3321 | CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK, |
1547 | MDIO_AER_BLOCK_AER_REG, aer_val); | 3322 | MDIO_AER_BLOCK_AER_REG, aer_val); |
1548 | } | 3323 | |
1549 | static void bnx2x_set_aer_mmd_serdes(struct bnx2x *bp, | ||
1550 | struct bnx2x_phy *phy) | ||
1551 | { | ||
1552 | CL22_WR_OVER_CL45(bp, phy, | ||
1553 | MDIO_REG_BANK_AER_BLOCK, | ||
1554 | MDIO_AER_BLOCK_AER_REG, 0x3800); | ||
1555 | } | 3324 | } |
1556 | 3325 | ||
1557 | /******************************************************************/ | 3326 | /******************************************************************/ |
@@ -1611,20 +3380,979 @@ static void bnx2x_xgxs_deassert(struct link_params *params) | |||
1611 | params->phy[INT_PHY].def_md_devad); | 3380 | params->phy[INT_PHY].def_md_devad); |
1612 | } | 3381 | } |
1613 | 3382 | ||
3383 | static void bnx2x_calc_ieee_aneg_adv(struct bnx2x_phy *phy, | ||
3384 | struct link_params *params, u16 *ieee_fc) | ||
3385 | { | ||
3386 | struct bnx2x *bp = params->bp; | ||
3387 | *ieee_fc = MDIO_COMBO_IEEE0_AUTO_NEG_ADV_FULL_DUPLEX; | ||
3388 | /** | ||
3389 | * resolve pause mode and advertisement Please refer to Table | ||
3390 | * 28B-3 of the 802.3ab-1999 spec | ||
3391 | */ | ||
3392 | |||
3393 | switch (phy->req_flow_ctrl) { | ||
3394 | case BNX2X_FLOW_CTRL_AUTO: | ||
3395 | if (params->req_fc_auto_adv == BNX2X_FLOW_CTRL_BOTH) | ||
3396 | *ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH; | ||
3397 | else | ||
3398 | *ieee_fc |= | ||
3399 | MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC; | ||
3400 | break; | ||
3401 | |||
3402 | case BNX2X_FLOW_CTRL_TX: | ||
3403 | *ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC; | ||
3404 | break; | ||
3405 | |||
3406 | case BNX2X_FLOW_CTRL_RX: | ||
3407 | case BNX2X_FLOW_CTRL_BOTH: | ||
3408 | *ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH; | ||
3409 | break; | ||
3410 | |||
3411 | case BNX2X_FLOW_CTRL_NONE: | ||
3412 | default: | ||
3413 | *ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE; | ||
3414 | break; | ||
3415 | } | ||
3416 | DP(NETIF_MSG_LINK, "ieee_fc = 0x%x\n", *ieee_fc); | ||
3417 | } | ||
3418 | |||
3419 | static void set_phy_vars(struct link_params *params, | ||
3420 | struct link_vars *vars) | ||
3421 | { | ||
3422 | struct bnx2x *bp = params->bp; | ||
3423 | u8 actual_phy_idx, phy_index, link_cfg_idx; | ||
3424 | u8 phy_config_swapped = params->multi_phy_config & | ||
3425 | PORT_HW_CFG_PHY_SWAPPED_ENABLED; | ||
3426 | for (phy_index = INT_PHY; phy_index < params->num_phys; | ||
3427 | phy_index++) { | ||
3428 | link_cfg_idx = LINK_CONFIG_IDX(phy_index); | ||
3429 | actual_phy_idx = phy_index; | ||
3430 | if (phy_config_swapped) { | ||
3431 | if (phy_index == EXT_PHY1) | ||
3432 | actual_phy_idx = EXT_PHY2; | ||
3433 | else if (phy_index == EXT_PHY2) | ||
3434 | actual_phy_idx = EXT_PHY1; | ||
3435 | } | ||
3436 | params->phy[actual_phy_idx].req_flow_ctrl = | ||
3437 | params->req_flow_ctrl[link_cfg_idx]; | ||
3438 | |||
3439 | params->phy[actual_phy_idx].req_line_speed = | ||
3440 | params->req_line_speed[link_cfg_idx]; | ||
3441 | |||
3442 | params->phy[actual_phy_idx].speed_cap_mask = | ||
3443 | params->speed_cap_mask[link_cfg_idx]; | ||
3444 | |||
3445 | params->phy[actual_phy_idx].req_duplex = | ||
3446 | params->req_duplex[link_cfg_idx]; | ||
3447 | |||
3448 | if (params->req_line_speed[link_cfg_idx] == | ||
3449 | SPEED_AUTO_NEG) | ||
3450 | vars->link_status |= LINK_STATUS_AUTO_NEGOTIATE_ENABLED; | ||
3451 | |||
3452 | DP(NETIF_MSG_LINK, "req_flow_ctrl %x, req_line_speed %x," | ||
3453 | " speed_cap_mask %x\n", | ||
3454 | params->phy[actual_phy_idx].req_flow_ctrl, | ||
3455 | params->phy[actual_phy_idx].req_line_speed, | ||
3456 | params->phy[actual_phy_idx].speed_cap_mask); | ||
3457 | } | ||
3458 | } | ||
3459 | |||
3460 | static void bnx2x_ext_phy_set_pause(struct link_params *params, | ||
3461 | struct bnx2x_phy *phy, | ||
3462 | struct link_vars *vars) | ||
3463 | { | ||
3464 | u16 val; | ||
3465 | struct bnx2x *bp = params->bp; | ||
3466 | /* read modify write pause advertizing */ | ||
3467 | bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_ADV_PAUSE, &val); | ||
3468 | |||
3469 | val &= ~MDIO_AN_REG_ADV_PAUSE_BOTH; | ||
3470 | |||
3471 | /* Please refer to Table 28B-3 of 802.3ab-1999 spec. */ | ||
3472 | bnx2x_calc_ieee_aneg_adv(phy, params, &vars->ieee_fc); | ||
3473 | if ((vars->ieee_fc & | ||
3474 | MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) == | ||
3475 | MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) { | ||
3476 | val |= MDIO_AN_REG_ADV_PAUSE_ASYMMETRIC; | ||
3477 | } | ||
3478 | if ((vars->ieee_fc & | ||
3479 | MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) == | ||
3480 | MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) { | ||
3481 | val |= MDIO_AN_REG_ADV_PAUSE_PAUSE; | ||
3482 | } | ||
3483 | DP(NETIF_MSG_LINK, "Ext phy AN advertize 0x%x\n", val); | ||
3484 | bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_ADV_PAUSE, val); | ||
3485 | } | ||
3486 | |||
3487 | static void bnx2x_pause_resolve(struct link_vars *vars, u32 pause_result) | ||
3488 | { /* LD LP */ | ||
3489 | switch (pause_result) { /* ASYM P ASYM P */ | ||
3490 | case 0xb: /* 1 0 1 1 */ | ||
3491 | vars->flow_ctrl = BNX2X_FLOW_CTRL_TX; | ||
3492 | break; | ||
3493 | |||
3494 | case 0xe: /* 1 1 1 0 */ | ||
3495 | vars->flow_ctrl = BNX2X_FLOW_CTRL_RX; | ||
3496 | break; | ||
3497 | |||
3498 | case 0x5: /* 0 1 0 1 */ | ||
3499 | case 0x7: /* 0 1 1 1 */ | ||
3500 | case 0xd: /* 1 1 0 1 */ | ||
3501 | case 0xf: /* 1 1 1 1 */ | ||
3502 | vars->flow_ctrl = BNX2X_FLOW_CTRL_BOTH; | ||
3503 | break; | ||
3504 | |||
3505 | default: | ||
3506 | break; | ||
3507 | } | ||
3508 | if (pause_result & (1<<0)) | ||
3509 | vars->link_status |= LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE; | ||
3510 | if (pause_result & (1<<1)) | ||
3511 | vars->link_status |= LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE; | ||
3512 | } | ||
3513 | |||
3514 | static u8 bnx2x_ext_phy_resolve_fc(struct bnx2x_phy *phy, | ||
3515 | struct link_params *params, | ||
3516 | struct link_vars *vars) | ||
3517 | { | ||
3518 | struct bnx2x *bp = params->bp; | ||
3519 | u16 ld_pause; /* local */ | ||
3520 | u16 lp_pause; /* link partner */ | ||
3521 | u16 pause_result; | ||
3522 | u8 ret = 0; | ||
3523 | /* read twice */ | ||
3524 | |||
3525 | vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE; | ||
3526 | |||
3527 | if (phy->req_flow_ctrl != BNX2X_FLOW_CTRL_AUTO) | ||
3528 | vars->flow_ctrl = phy->req_flow_ctrl; | ||
3529 | else if (phy->req_line_speed != SPEED_AUTO_NEG) | ||
3530 | vars->flow_ctrl = params->req_fc_auto_adv; | ||
3531 | else if (vars->link_status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE) { | ||
3532 | ret = 1; | ||
3533 | if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE) { | ||
3534 | bnx2x_cl22_read(bp, phy, | ||
3535 | 0x4, &ld_pause); | ||
3536 | bnx2x_cl22_read(bp, phy, | ||
3537 | 0x5, &lp_pause); | ||
3538 | } else { | ||
3539 | bnx2x_cl45_read(bp, phy, | ||
3540 | MDIO_AN_DEVAD, | ||
3541 | MDIO_AN_REG_ADV_PAUSE, &ld_pause); | ||
3542 | bnx2x_cl45_read(bp, phy, | ||
3543 | MDIO_AN_DEVAD, | ||
3544 | MDIO_AN_REG_LP_AUTO_NEG, &lp_pause); | ||
3545 | } | ||
3546 | pause_result = (ld_pause & | ||
3547 | MDIO_AN_REG_ADV_PAUSE_MASK) >> 8; | ||
3548 | pause_result |= (lp_pause & | ||
3549 | MDIO_AN_REG_ADV_PAUSE_MASK) >> 10; | ||
3550 | DP(NETIF_MSG_LINK, "Ext PHY pause result 0x%x\n", | ||
3551 | pause_result); | ||
3552 | bnx2x_pause_resolve(vars, pause_result); | ||
3553 | } | ||
3554 | return ret; | ||
3555 | } | ||
3556 | /******************************************************************/ | ||
3557 | /* Warpcore section */ | ||
3558 | /******************************************************************/ | ||
3559 | /* The init_internal_warpcore should mirror the xgxs, | ||
3560 | * i.e. reset the lane (if needed), set aer for the | ||
3561 | * init configuration, and set/clear SGMII flag. Internal | ||
3562 | * phy init is done purely in phy_init stage. | ||
3563 | */ | ||
3564 | static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy, | ||
3565 | struct link_params *params, | ||
3566 | struct link_vars *vars) { | ||
3567 | u16 val16 = 0, lane, bam37 = 0; | ||
3568 | struct bnx2x *bp = params->bp; | ||
3569 | DP(NETIF_MSG_LINK, "Enable Auto Negotiation for KR\n"); | ||
3570 | /* Check adding advertisement for 1G KX */ | ||
3571 | if (((vars->line_speed == SPEED_AUTO_NEG) && | ||
3572 | (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) || | ||
3573 | (vars->line_speed == SPEED_1000)) { | ||
3574 | u16 sd_digital; | ||
3575 | val16 |= (1<<5); | ||
3576 | |||
3577 | /* Enable CL37 1G Parallel Detect */ | ||
3578 | bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, | ||
3579 | MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, &sd_digital); | ||
3580 | bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, | ||
3581 | MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, | ||
3582 | (sd_digital | 0x1)); | ||
3583 | |||
3584 | DP(NETIF_MSG_LINK, "Advertize 1G\n"); | ||
3585 | } | ||
3586 | if (((vars->line_speed == SPEED_AUTO_NEG) && | ||
3587 | (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) || | ||
3588 | (vars->line_speed == SPEED_10000)) { | ||
3589 | /* Check adding advertisement for 10G KR */ | ||
3590 | val16 |= (1<<7); | ||
3591 | /* Enable 10G Parallel Detect */ | ||
3592 | bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, | ||
3593 | MDIO_WC_REG_PAR_DET_10G_CTRL, 1); | ||
3594 | |||
3595 | DP(NETIF_MSG_LINK, "Advertize 10G\n"); | ||
3596 | } | ||
3597 | |||
3598 | /* Set Transmit PMD settings */ | ||
3599 | lane = bnx2x_get_warpcore_lane(phy, params); | ||
3600 | bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, | ||
3601 | MDIO_WC_REG_TX0_TX_DRIVER + 0x10*lane, | ||
3602 | ((0x02 << MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_OFFSET) | | ||
3603 | (0x06 << MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_OFFSET) | | ||
3604 | (0x09 << MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_OFFSET))); | ||
3605 | bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, | ||
3606 | MDIO_WC_REG_CL72_USERB0_CL72_OS_DEF_CTRL, | ||
3607 | 0x03f0); | ||
3608 | bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, | ||
3609 | MDIO_WC_REG_CL72_USERB0_CL72_2P5_DEF_CTRL, | ||
3610 | 0x03f0); | ||
3611 | bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, | ||
3612 | MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, | ||
3613 | 0x383f); | ||
3614 | |||
3615 | /* Advertised speeds */ | ||
3616 | bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, | ||
3617 | MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT1, val16); | ||
3618 | |||
3619 | /* Enable CL37 BAM */ | ||
3620 | if (REG_RD(bp, params->shmem_base + | ||
3621 | offsetof(struct shmem_region, dev_info. | ||
3622 | port_hw_config[params->port].default_cfg)) & | ||
3623 | PORT_HW_CFG_ENABLE_BAM_ON_KR_ENABLED) { | ||
3624 | bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, | ||
3625 | MDIO_WC_REG_DIGITAL6_MP5_NEXTPAGECTRL, &bam37); | ||
3626 | bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, | ||
3627 | MDIO_WC_REG_DIGITAL6_MP5_NEXTPAGECTRL, bam37 | 1); | ||
3628 | DP(NETIF_MSG_LINK, "Enable CL37 BAM on KR\n"); | ||
3629 | } | ||
3630 | |||
3631 | /* Advertise pause */ | ||
3632 | bnx2x_ext_phy_set_pause(params, phy, vars); | ||
3633 | |||
3634 | /* Enable Autoneg */ | ||
3635 | bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, | ||
3636 | MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x1000); | ||
3637 | |||
3638 | /* Over 1G - AN local device user page 1 */ | ||
3639 | bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, | ||
3640 | MDIO_WC_REG_DIGITAL3_UP1, 0x1f); | ||
3641 | |||
3642 | bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, | ||
3643 | MDIO_WC_REG_DIGITAL5_MISC7, &val16); | ||
3644 | |||
3645 | bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, | ||
3646 | MDIO_WC_REG_DIGITAL5_MISC7, val16 | 0x100); | ||
3647 | } | ||
3648 | |||
3649 | static void bnx2x_warpcore_set_10G_KR(struct bnx2x_phy *phy, | ||
3650 | struct link_params *params, | ||
3651 | struct link_vars *vars) | ||
3652 | { | ||
3653 | struct bnx2x *bp = params->bp; | ||
3654 | u16 val; | ||
3655 | |||
3656 | /* Disable Autoneg */ | ||
3657 | bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, | ||
3658 | MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, 0x7); | ||
3659 | |||
3660 | bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, | ||
3661 | MDIO_WC_REG_PAR_DET_10G_CTRL, 0); | ||
3662 | |||
3663 | bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, | ||
3664 | MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, 0x3f00); | ||
3665 | |||
3666 | bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, | ||
3667 | MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT1, 0); | ||
3668 | |||
3669 | bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, | ||
3670 | MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x0); | ||
3671 | |||
3672 | bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, | ||
3673 | MDIO_WC_REG_DIGITAL3_UP1, 0x1); | ||
3674 | |||
3675 | bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, | ||
3676 | MDIO_WC_REG_DIGITAL5_MISC7, 0xa); | ||
3677 | |||
3678 | /* Disable CL36 PCS Tx */ | ||
3679 | bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, | ||
3680 | MDIO_WC_REG_XGXSBLK1_LANECTRL0, 0x0); | ||
3681 | |||
3682 | /* Double Wide Single Data Rate @ pll rate */ | ||
3683 | bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, | ||
3684 | MDIO_WC_REG_XGXSBLK1_LANECTRL1, 0xFFFF); | ||
3685 | |||
3686 | /* Leave cl72 training enable, needed for KR */ | ||
3687 | bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, | ||
3688 | MDIO_WC_REG_PMD_IEEE9BLK_TENGBASE_KR_PMD_CONTROL_REGISTER_150, | ||
3689 | 0x2); | ||
3690 | |||
3691 | /* Leave CL72 enabled */ | ||
3692 | bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, | ||
3693 | MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, | ||
3694 | &val); | ||
3695 | bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, | ||
3696 | MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, | ||
3697 | val | 0x3800); | ||
3698 | |||
3699 | /* Set speed via PMA/PMD register */ | ||
3700 | bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, | ||
3701 | MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x2040); | ||
3702 | |||
3703 | bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, | ||
3704 | MDIO_WC_REG_IEEE0BLK_AUTONEGNP, 0xB); | ||
3705 | |||
3706 | /*Enable encoded forced speed */ | ||
3707 | bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, | ||
3708 | MDIO_WC_REG_SERDESDIGITAL_MISC2, 0x30); | ||
3709 | |||
3710 | /* Turn TX scramble payload only the 64/66 scrambler */ | ||
3711 | bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, | ||
3712 | MDIO_WC_REG_TX66_CONTROL, 0x9); | ||
3713 | |||
3714 | /* Turn RX scramble payload only the 64/66 scrambler */ | ||
3715 | bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, | ||
3716 | MDIO_WC_REG_RX66_CONTROL, 0xF9); | ||
3717 | |||
3718 | /* set and clear loopback to cause a reset to 64/66 decoder */ | ||
3719 | bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, | ||
3720 | MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x4000); | ||
3721 | bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, | ||
3722 | MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x0); | ||
3723 | |||
3724 | } | ||
3725 | |||
3726 | static void bnx2x_warpcore_set_10G_XFI(struct bnx2x_phy *phy, | ||
3727 | struct link_params *params, | ||
3728 | u8 is_xfi) | ||
3729 | { | ||
3730 | struct bnx2x *bp = params->bp; | ||
3731 | u16 misc1_val, tap_val, tx_driver_val, lane, val; | ||
3732 | /* Hold rxSeqStart */ | ||
3733 | bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, | ||
3734 | MDIO_WC_REG_DSC2B0_DSC_MISC_CTRL0, &val); | ||
3735 | bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, | ||
3736 | MDIO_WC_REG_DSC2B0_DSC_MISC_CTRL0, (val | 0x8000)); | ||
3737 | |||
3738 | /* Hold tx_fifo_reset */ | ||
3739 | bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, | ||
3740 | MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3, &val); | ||
3741 | bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, | ||
3742 | MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3, (val | 0x1)); | ||
3743 | |||
3744 | /* Disable CL73 AN */ | ||
3745 | bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0); | ||
3746 | |||
3747 | /* Disable 100FX Enable and Auto-Detect */ | ||
3748 | bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, | ||
3749 | MDIO_WC_REG_FX100_CTRL1, &val); | ||
3750 | bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, | ||
3751 | MDIO_WC_REG_FX100_CTRL1, (val & 0xFFFA)); | ||
3752 | |||
3753 | /* Disable 100FX Idle detect */ | ||
3754 | bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, | ||
3755 | MDIO_WC_REG_FX100_CTRL3, &val); | ||
3756 | bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, | ||
3757 | MDIO_WC_REG_FX100_CTRL3, (val | 0x0080)); | ||
3758 | |||
3759 | /* Set Block address to Remote PHY & Clear forced_speed[5] */ | ||
3760 | bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, | ||
3761 | MDIO_WC_REG_DIGITAL4_MISC3, &val); | ||
3762 | bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, | ||
3763 | MDIO_WC_REG_DIGITAL4_MISC3, (val & 0xFF7F)); | ||
3764 | |||
3765 | /* Turn off auto-detect & fiber mode */ | ||
3766 | bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, | ||
3767 | MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, &val); | ||
3768 | bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, | ||
3769 | MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, | ||
3770 | (val & 0xFFEE)); | ||
3771 | |||
3772 | /* Set filter_force_link, disable_false_link and parallel_detect */ | ||
3773 | bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, | ||
3774 | MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, &val); | ||
3775 | bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, | ||
3776 | MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, | ||
3777 | ((val | 0x0006) & 0xFFFE)); | ||
3778 | |||
3779 | /* Set XFI / SFI */ | ||
3780 | bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, | ||
3781 | MDIO_WC_REG_SERDESDIGITAL_MISC1, &misc1_val); | ||
3782 | |||
3783 | misc1_val &= ~(0x1f); | ||
3784 | |||
3785 | if (is_xfi) { | ||
3786 | misc1_val |= 0x5; | ||
3787 | tap_val = ((0x08 << MDIO_WC_REG_TX_FIR_TAP_POST_TAP_OFFSET) | | ||
3788 | (0x37 << MDIO_WC_REG_TX_FIR_TAP_MAIN_TAP_OFFSET) | | ||
3789 | (0x00 << MDIO_WC_REG_TX_FIR_TAP_PRE_TAP_OFFSET)); | ||
3790 | tx_driver_val = | ||
3791 | ((0x00 << MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_OFFSET) | | ||
3792 | (0x02 << MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_OFFSET) | | ||
3793 | (0x03 << MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_OFFSET)); | ||
3794 | |||
3795 | } else { | ||
3796 | misc1_val |= 0x9; | ||
3797 | tap_val = ((0x12 << MDIO_WC_REG_TX_FIR_TAP_POST_TAP_OFFSET) | | ||
3798 | (0x2d << MDIO_WC_REG_TX_FIR_TAP_MAIN_TAP_OFFSET) | | ||
3799 | (0x00 << MDIO_WC_REG_TX_FIR_TAP_PRE_TAP_OFFSET)); | ||
3800 | tx_driver_val = | ||
3801 | ((0x02 << MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_OFFSET) | | ||
3802 | (0x02 << MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_OFFSET) | | ||
3803 | (0x02 << MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_OFFSET)); | ||
3804 | } | ||
3805 | bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, | ||
3806 | MDIO_WC_REG_SERDESDIGITAL_MISC1, misc1_val); | ||
3807 | |||
3808 | /* Set Transmit PMD settings */ | ||
3809 | lane = bnx2x_get_warpcore_lane(phy, params); | ||
3810 | bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, | ||
3811 | MDIO_WC_REG_TX_FIR_TAP, | ||
3812 | tap_val | MDIO_WC_REG_TX_FIR_TAP_ENABLE); | ||
3813 | bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, | ||
3814 | MDIO_WC_REG_TX0_TX_DRIVER + 0x10*lane, | ||
3815 | tx_driver_val); | ||
3816 | |||
3817 | /* Enable fiber mode, enable and invert sig_det */ | ||
3818 | bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, | ||
3819 | MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, &val); | ||
3820 | bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, | ||
3821 | MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, val | 0xd); | ||
3822 | |||
3823 | /* Set Block address to Remote PHY & Set forced_speed[5], 40bit mode */ | ||
3824 | bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, | ||
3825 | MDIO_WC_REG_DIGITAL4_MISC3, &val); | ||
3826 | bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, | ||
3827 | MDIO_WC_REG_DIGITAL4_MISC3, val | 0x8080); | ||
3828 | |||
3829 | /* 10G XFI Full Duplex */ | ||
3830 | bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, | ||
3831 | MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x100); | ||
3832 | |||
3833 | /* Release tx_fifo_reset */ | ||
3834 | bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, | ||
3835 | MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3, &val); | ||
3836 | bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, | ||
3837 | MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3, val & 0xFFFE); | ||
3838 | |||
3839 | /* Release rxSeqStart */ | ||
3840 | bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, | ||
3841 | MDIO_WC_REG_DSC2B0_DSC_MISC_CTRL0, &val); | ||
3842 | bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, | ||
3843 | MDIO_WC_REG_DSC2B0_DSC_MISC_CTRL0, (val & 0x7FFF)); | ||
3844 | } | ||
3845 | |||
3846 | static void bnx2x_warpcore_set_20G_KR2(struct bnx2x *bp, | ||
3847 | struct bnx2x_phy *phy) | ||
3848 | { | ||
3849 | DP(NETIF_MSG_LINK, "KR2 still not supported !!!\n"); | ||
3850 | } | ||
3851 | |||
3852 | static void bnx2x_warpcore_set_20G_DXGXS(struct bnx2x *bp, | ||
3853 | struct bnx2x_phy *phy, | ||
3854 | u16 lane) | ||
3855 | { | ||
3856 | /* Rx0 anaRxControl1G */ | ||
3857 | bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, | ||
3858 | MDIO_WC_REG_RX0_ANARXCONTROL1G, 0x90); | ||
3859 | |||
3860 | /* Rx2 anaRxControl1G */ | ||
3861 | bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, | ||
3862 | MDIO_WC_REG_RX2_ANARXCONTROL1G, 0x90); | ||
3863 | |||
3864 | bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, | ||
3865 | MDIO_WC_REG_RX66_SCW0, 0xE070); | ||
3866 | |||
3867 | bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, | ||
3868 | MDIO_WC_REG_RX66_SCW1, 0xC0D0); | ||
3869 | |||
3870 | bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, | ||
3871 | MDIO_WC_REG_RX66_SCW2, 0xA0B0); | ||
3872 | |||
3873 | bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, | ||
3874 | MDIO_WC_REG_RX66_SCW3, 0x8090); | ||
3875 | |||
3876 | bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, | ||
3877 | MDIO_WC_REG_RX66_SCW0_MASK, 0xF0F0); | ||
3878 | |||
3879 | bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, | ||
3880 | MDIO_WC_REG_RX66_SCW1_MASK, 0xF0F0); | ||
3881 | |||
3882 | bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, | ||
3883 | MDIO_WC_REG_RX66_SCW2_MASK, 0xF0F0); | ||
3884 | |||
3885 | bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, | ||
3886 | MDIO_WC_REG_RX66_SCW3_MASK, 0xF0F0); | ||
3887 | |||
3888 | /* Serdes Digital Misc1 */ | ||
3889 | bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, | ||
3890 | MDIO_WC_REG_SERDESDIGITAL_MISC1, 0x6008); | ||
3891 | |||
3892 | /* Serdes Digital4 Misc3 */ | ||
3893 | bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, | ||
3894 | MDIO_WC_REG_DIGITAL4_MISC3, 0x8088); | ||
3895 | |||
3896 | /* Set Transmit PMD settings */ | ||
3897 | bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, | ||
3898 | MDIO_WC_REG_TX_FIR_TAP, | ||
3899 | ((0x12 << MDIO_WC_REG_TX_FIR_TAP_POST_TAP_OFFSET) | | ||
3900 | (0x2d << MDIO_WC_REG_TX_FIR_TAP_MAIN_TAP_OFFSET) | | ||
3901 | (0x00 << MDIO_WC_REG_TX_FIR_TAP_PRE_TAP_OFFSET) | | ||
3902 | MDIO_WC_REG_TX_FIR_TAP_ENABLE)); | ||
3903 | bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, | ||
3904 | MDIO_WC_REG_TX0_TX_DRIVER + 0x10*lane, | ||
3905 | ((0x02 << MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_OFFSET) | | ||
3906 | (0x02 << MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_OFFSET) | | ||
3907 | (0x02 << MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_OFFSET))); | ||
3908 | } | ||
3909 | |||
3910 | static void bnx2x_warpcore_set_sgmii_speed(struct bnx2x_phy *phy, | ||
3911 | struct link_params *params, | ||
3912 | u8 fiber_mode) | ||
3913 | { | ||
3914 | struct bnx2x *bp = params->bp; | ||
3915 | u16 val16, digctrl_kx1, digctrl_kx2; | ||
3916 | u8 lane; | ||
3917 | |||
3918 | lane = bnx2x_get_warpcore_lane(phy, params); | ||
3919 | |||
3920 | /* Clear XFI clock comp in non-10G single lane mode. */ | ||
3921 | bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, | ||
3922 | MDIO_WC_REG_RX66_CONTROL, &val16); | ||
3923 | bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, | ||
3924 | MDIO_WC_REG_RX66_CONTROL, val16 & ~(3<<13)); | ||
3925 | |||
3926 | if (phy->req_line_speed == SPEED_AUTO_NEG) { | ||
3927 | /* SGMII Autoneg */ | ||
3928 | bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, | ||
3929 | MDIO_WC_REG_COMBO_IEEE0_MIICTRL, &val16); | ||
3930 | bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, | ||
3931 | MDIO_WC_REG_COMBO_IEEE0_MIICTRL, | ||
3932 | val16 | 0x1000); | ||
3933 | DP(NETIF_MSG_LINK, "set SGMII AUTONEG\n"); | ||
3934 | } else { | ||
3935 | bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, | ||
3936 | MDIO_WC_REG_COMBO_IEEE0_MIICTRL, &val16); | ||
3937 | val16 &= 0xcfbf; | ||
3938 | switch (phy->req_line_speed) { | ||
3939 | case SPEED_10: | ||
3940 | break; | ||
3941 | case SPEED_100: | ||
3942 | val16 |= 0x2000; | ||
3943 | break; | ||
3944 | case SPEED_1000: | ||
3945 | val16 |= 0x0040; | ||
3946 | break; | ||
3947 | default: | ||
3948 | DP(NETIF_MSG_LINK, "Speed not supported: 0x%x" | ||
3949 | "\n", phy->req_line_speed); | ||
3950 | return; | ||
3951 | } | ||
3952 | |||
3953 | if (phy->req_duplex == DUPLEX_FULL) | ||
3954 | val16 |= 0x0100; | ||
3955 | |||
3956 | bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, | ||
3957 | MDIO_WC_REG_COMBO_IEEE0_MIICTRL, val16); | ||
3958 | |||
3959 | DP(NETIF_MSG_LINK, "set SGMII force speed %d\n", | ||
3960 | phy->req_line_speed); | ||
3961 | bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, | ||
3962 | MDIO_WC_REG_COMBO_IEEE0_MIICTRL, &val16); | ||
3963 | DP(NETIF_MSG_LINK, " (readback) %x\n", val16); | ||
3964 | } | ||
3965 | |||
3966 | /* SGMII Slave mode and disable signal detect */ | ||
3967 | bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, | ||
3968 | MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, &digctrl_kx1); | ||
3969 | if (fiber_mode) | ||
3970 | digctrl_kx1 = 1; | ||
3971 | else | ||
3972 | digctrl_kx1 &= 0xff4a; | ||
3973 | |||
3974 | bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, | ||
3975 | MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, | ||
3976 | digctrl_kx1); | ||
3977 | |||
3978 | /* Turn off parallel detect */ | ||
3979 | bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, | ||
3980 | MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, &digctrl_kx2); | ||
3981 | bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, | ||
3982 | MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, | ||
3983 | (digctrl_kx2 & ~(1<<2))); | ||
3984 | |||
3985 | /* Re-enable parallel detect */ | ||
3986 | bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, | ||
3987 | MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, | ||
3988 | (digctrl_kx2 | (1<<2))); | ||
3989 | |||
3990 | /* Enable autodet */ | ||
3991 | bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, | ||
3992 | MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, | ||
3993 | (digctrl_kx1 | 0x10)); | ||
3994 | } | ||
3995 | |||
3996 | static void bnx2x_warpcore_reset_lane(struct bnx2x *bp, | ||
3997 | struct bnx2x_phy *phy, | ||
3998 | u8 reset) | ||
3999 | { | ||
4000 | u16 val; | ||
4001 | /* Take lane out of reset after configuration is finished */ | ||
4002 | bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, | ||
4003 | MDIO_WC_REG_DIGITAL5_MISC6, &val); | ||
4004 | if (reset) | ||
4005 | val |= 0xC000; | ||
4006 | else | ||
4007 | val &= 0x3FFF; | ||
4008 | bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, | ||
4009 | MDIO_WC_REG_DIGITAL5_MISC6, val); | ||
4010 | bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, | ||
4011 | MDIO_WC_REG_DIGITAL5_MISC6, &val); | ||
4012 | } | ||
4013 | |||
4014 | |||
4015 | /* Clear SFI/XFI link settings registers */ | ||
4016 | static void bnx2x_warpcore_clear_regs(struct bnx2x_phy *phy, | ||
4017 | struct link_params *params, | ||
4018 | u16 lane) | ||
4019 | { | ||
4020 | struct bnx2x *bp = params->bp; | ||
4021 | u16 val16; | ||
4022 | |||
4023 | /* Set XFI clock comp as default. */ | ||
4024 | bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, | ||
4025 | MDIO_WC_REG_RX66_CONTROL, &val16); | ||
4026 | bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, | ||
4027 | MDIO_WC_REG_RX66_CONTROL, val16 | (3<<13)); | ||
4028 | |||
4029 | bnx2x_warpcore_reset_lane(bp, phy, 1); | ||
4030 | bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0); | ||
4031 | bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, | ||
4032 | MDIO_WC_REG_FX100_CTRL1, 0x014a); | ||
4033 | bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, | ||
4034 | MDIO_WC_REG_FX100_CTRL3, 0x0800); | ||
4035 | bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, | ||
4036 | MDIO_WC_REG_DIGITAL4_MISC3, 0x8008); | ||
4037 | bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, | ||
4038 | MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, 0x0195); | ||
4039 | bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, | ||
4040 | MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, 0x0007); | ||
4041 | bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, | ||
4042 | MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3, 0x0002); | ||
4043 | bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, | ||
4044 | MDIO_WC_REG_SERDESDIGITAL_MISC1, 0x6000); | ||
4045 | lane = bnx2x_get_warpcore_lane(phy, params); | ||
4046 | bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, | ||
4047 | MDIO_WC_REG_TX_FIR_TAP, 0x0000); | ||
4048 | bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, | ||
4049 | MDIO_WC_REG_TX0_TX_DRIVER + 0x10*lane, 0x0990); | ||
4050 | bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, | ||
4051 | MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x2040); | ||
4052 | bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, | ||
4053 | MDIO_WC_REG_COMBO_IEEE0_MIICTRL, 0x0140); | ||
4054 | bnx2x_warpcore_reset_lane(bp, phy, 0); | ||
4055 | } | ||
4056 | |||
4057 | static int bnx2x_get_mod_abs_int_cfg(struct bnx2x *bp, | ||
4058 | u32 chip_id, | ||
4059 | u32 shmem_base, u8 port, | ||
4060 | u8 *gpio_num, u8 *gpio_port) | ||
4061 | { | ||
4062 | u32 cfg_pin; | ||
4063 | *gpio_num = 0; | ||
4064 | *gpio_port = 0; | ||
4065 | if (CHIP_IS_E3(bp)) { | ||
4066 | cfg_pin = (REG_RD(bp, shmem_base + | ||
4067 | offsetof(struct shmem_region, | ||
4068 | dev_info.port_hw_config[port].e3_sfp_ctrl)) & | ||
4069 | PORT_HW_CFG_E3_MOD_ABS_MASK) >> | ||
4070 | PORT_HW_CFG_E3_MOD_ABS_SHIFT; | ||
4071 | |||
4072 | /* | ||
4073 | * Should not happen. This function called upon interrupt | ||
4074 | * triggered by GPIO ( since EPIO can only generate interrupts | ||
4075 | * to MCP). | ||
4076 | * So if this function was called and none of the GPIOs was set, | ||
4077 | * it means the shit hit the fan. | ||
4078 | */ | ||
4079 | if ((cfg_pin < PIN_CFG_GPIO0_P0) || | ||
4080 | (cfg_pin > PIN_CFG_GPIO3_P1)) { | ||
4081 | DP(NETIF_MSG_LINK, "ERROR: Invalid cfg pin %x for " | ||
4082 | "module detect indication\n", | ||
4083 | cfg_pin); | ||
4084 | return -EINVAL; | ||
4085 | } | ||
4086 | |||
4087 | *gpio_num = (cfg_pin - PIN_CFG_GPIO0_P0) & 0x3; | ||
4088 | *gpio_port = (cfg_pin - PIN_CFG_GPIO0_P0) >> 2; | ||
4089 | } else { | ||
4090 | *gpio_num = MISC_REGISTERS_GPIO_3; | ||
4091 | *gpio_port = port; | ||
4092 | } | ||
4093 | DP(NETIF_MSG_LINK, "MOD_ABS int GPIO%d_P%d\n", *gpio_num, *gpio_port); | ||
4094 | return 0; | ||
4095 | } | ||
4096 | |||
4097 | static int bnx2x_is_sfp_module_plugged(struct bnx2x_phy *phy, | ||
4098 | struct link_params *params) | ||
4099 | { | ||
4100 | struct bnx2x *bp = params->bp; | ||
4101 | u8 gpio_num, gpio_port; | ||
4102 | u32 gpio_val; | ||
4103 | if (bnx2x_get_mod_abs_int_cfg(bp, params->chip_id, | ||
4104 | params->shmem_base, params->port, | ||
4105 | &gpio_num, &gpio_port) != 0) | ||
4106 | return 0; | ||
4107 | gpio_val = bnx2x_get_gpio(bp, gpio_num, gpio_port); | ||
4108 | |||
4109 | /* Call the handling function in case module is detected */ | ||
4110 | if (gpio_val == 0) | ||
4111 | return 1; | ||
4112 | else | ||
4113 | return 0; | ||
4114 | } | ||
4115 | |||
4116 | static void bnx2x_warpcore_config_init(struct bnx2x_phy *phy, | ||
4117 | struct link_params *params, | ||
4118 | struct link_vars *vars) | ||
4119 | { | ||
4120 | struct bnx2x *bp = params->bp; | ||
4121 | u32 serdes_net_if; | ||
4122 | u8 fiber_mode; | ||
4123 | u16 lane = bnx2x_get_warpcore_lane(phy, params); | ||
4124 | serdes_net_if = (REG_RD(bp, params->shmem_base + | ||
4125 | offsetof(struct shmem_region, dev_info. | ||
4126 | port_hw_config[params->port].default_cfg)) & | ||
4127 | PORT_HW_CFG_NET_SERDES_IF_MASK); | ||
4128 | DP(NETIF_MSG_LINK, "Begin Warpcore init, link_speed %d, " | ||
4129 | "serdes_net_if = 0x%x\n", | ||
4130 | vars->line_speed, serdes_net_if); | ||
4131 | bnx2x_set_aer_mmd(params, phy); | ||
4132 | |||
4133 | vars->phy_flags |= PHY_XGXS_FLAG; | ||
4134 | if ((serdes_net_if == PORT_HW_CFG_NET_SERDES_IF_SGMII) || | ||
4135 | (phy->req_line_speed && | ||
4136 | ((phy->req_line_speed == SPEED_100) || | ||
4137 | (phy->req_line_speed == SPEED_10)))) { | ||
4138 | vars->phy_flags |= PHY_SGMII_FLAG; | ||
4139 | DP(NETIF_MSG_LINK, "Setting SGMII mode\n"); | ||
4140 | bnx2x_warpcore_clear_regs(phy, params, lane); | ||
4141 | bnx2x_warpcore_set_sgmii_speed(phy, params, 0); | ||
4142 | } else { | ||
4143 | switch (serdes_net_if) { | ||
4144 | case PORT_HW_CFG_NET_SERDES_IF_KR: | ||
4145 | /* Enable KR Auto Neg */ | ||
4146 | if (params->loopback_mode == LOOPBACK_NONE) | ||
4147 | bnx2x_warpcore_enable_AN_KR(phy, params, vars); | ||
4148 | else { | ||
4149 | DP(NETIF_MSG_LINK, "Setting KR 10G-Force\n"); | ||
4150 | bnx2x_warpcore_set_10G_KR(phy, params, vars); | ||
4151 | } | ||
4152 | break; | ||
4153 | |||
4154 | case PORT_HW_CFG_NET_SERDES_IF_XFI: | ||
4155 | bnx2x_warpcore_clear_regs(phy, params, lane); | ||
4156 | if (vars->line_speed == SPEED_10000) { | ||
4157 | DP(NETIF_MSG_LINK, "Setting 10G XFI\n"); | ||
4158 | bnx2x_warpcore_set_10G_XFI(phy, params, 1); | ||
4159 | } else { | ||
4160 | if (SINGLE_MEDIA_DIRECT(params)) { | ||
4161 | DP(NETIF_MSG_LINK, "1G Fiber\n"); | ||
4162 | fiber_mode = 1; | ||
4163 | } else { | ||
4164 | DP(NETIF_MSG_LINK, "10/100/1G SGMII\n"); | ||
4165 | fiber_mode = 0; | ||
4166 | } | ||
4167 | bnx2x_warpcore_set_sgmii_speed(phy, | ||
4168 | params, | ||
4169 | fiber_mode); | ||
4170 | } | ||
4171 | |||
4172 | break; | ||
4173 | |||
4174 | case PORT_HW_CFG_NET_SERDES_IF_SFI: | ||
4175 | |||
4176 | bnx2x_warpcore_clear_regs(phy, params, lane); | ||
4177 | if (vars->line_speed == SPEED_10000) { | ||
4178 | DP(NETIF_MSG_LINK, "Setting 10G SFI\n"); | ||
4179 | bnx2x_warpcore_set_10G_XFI(phy, params, 0); | ||
4180 | } else if (vars->line_speed == SPEED_1000) { | ||
4181 | DP(NETIF_MSG_LINK, "Setting 1G Fiber\n"); | ||
4182 | bnx2x_warpcore_set_sgmii_speed(phy, params, 1); | ||
4183 | } | ||
4184 | /* Issue Module detection */ | ||
4185 | if (bnx2x_is_sfp_module_plugged(phy, params)) | ||
4186 | bnx2x_sfp_module_detection(phy, params); | ||
4187 | break; | ||
4188 | |||
4189 | case PORT_HW_CFG_NET_SERDES_IF_DXGXS: | ||
4190 | if (vars->line_speed != SPEED_20000) { | ||
4191 | DP(NETIF_MSG_LINK, "Speed not supported yet\n"); | ||
4192 | return; | ||
4193 | } | ||
4194 | DP(NETIF_MSG_LINK, "Setting 20G DXGXS\n"); | ||
4195 | bnx2x_warpcore_set_20G_DXGXS(bp, phy, lane); | ||
4196 | /* Issue Module detection */ | ||
4197 | |||
4198 | bnx2x_sfp_module_detection(phy, params); | ||
4199 | break; | ||
4200 | |||
4201 | case PORT_HW_CFG_NET_SERDES_IF_KR2: | ||
4202 | if (vars->line_speed != SPEED_20000) { | ||
4203 | DP(NETIF_MSG_LINK, "Speed not supported yet\n"); | ||
4204 | return; | ||
4205 | } | ||
4206 | DP(NETIF_MSG_LINK, "Setting 20G KR2\n"); | ||
4207 | bnx2x_warpcore_set_20G_KR2(bp, phy); | ||
4208 | break; | ||
4209 | |||
4210 | default: | ||
4211 | DP(NETIF_MSG_LINK, "Unsupported Serdes Net Interface " | ||
4212 | "0x%x\n", serdes_net_if); | ||
4213 | return; | ||
4214 | } | ||
4215 | } | ||
4216 | |||
4217 | /* Take lane out of reset after configuration is finished */ | ||
4218 | bnx2x_warpcore_reset_lane(bp, phy, 0); | ||
4219 | DP(NETIF_MSG_LINK, "Exit config init\n"); | ||
4220 | } | ||
4221 | |||
4222 | static void bnx2x_sfp_e3_set_transmitter(struct link_params *params, | ||
4223 | struct bnx2x_phy *phy, | ||
4224 | u8 tx_en) | ||
4225 | { | ||
4226 | struct bnx2x *bp = params->bp; | ||
4227 | u32 cfg_pin; | ||
4228 | u8 port = params->port; | ||
4229 | |||
4230 | cfg_pin = REG_RD(bp, params->shmem_base + | ||
4231 | offsetof(struct shmem_region, | ||
4232 | dev_info.port_hw_config[port].e3_sfp_ctrl)) & | ||
4233 | PORT_HW_CFG_TX_LASER_MASK; | ||
4234 | /* Set the !tx_en since this pin is DISABLE_TX_LASER */ | ||
4235 | DP(NETIF_MSG_LINK, "Setting WC TX to %d\n", tx_en); | ||
4236 | /* For 20G, the expected pin to be used is 3 pins after the current */ | ||
4237 | |||
4238 | bnx2x_set_cfg_pin(bp, cfg_pin, tx_en ^ 1); | ||
4239 | if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_20G) | ||
4240 | bnx2x_set_cfg_pin(bp, cfg_pin + 3, tx_en ^ 1); | ||
4241 | } | ||
4242 | |||
4243 | static void bnx2x_warpcore_link_reset(struct bnx2x_phy *phy, | ||
4244 | struct link_params *params) | ||
4245 | { | ||
4246 | struct bnx2x *bp = params->bp; | ||
4247 | u16 val16; | ||
4248 | bnx2x_sfp_e3_set_transmitter(params, phy, 0); | ||
4249 | bnx2x_set_mdio_clk(bp, params->chip_id, params->port); | ||
4250 | bnx2x_set_aer_mmd(params, phy); | ||
4251 | /* Global register */ | ||
4252 | bnx2x_warpcore_reset_lane(bp, phy, 1); | ||
4253 | |||
4254 | /* Clear loopback settings (if any) */ | ||
4255 | /* 10G & 20G */ | ||
4256 | bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, | ||
4257 | MDIO_WC_REG_COMBO_IEEE0_MIICTRL, &val16); | ||
4258 | bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, | ||
4259 | MDIO_WC_REG_COMBO_IEEE0_MIICTRL, val16 & | ||
4260 | 0xBFFF); | ||
4261 | |||
4262 | bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, | ||
4263 | MDIO_WC_REG_IEEE0BLK_MIICNTL, &val16); | ||
4264 | bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, | ||
4265 | MDIO_WC_REG_IEEE0BLK_MIICNTL, val16 & 0xfffe); | ||
4266 | |||
4267 | /* Update those 1-copy registers */ | ||
4268 | CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK, | ||
4269 | MDIO_AER_BLOCK_AER_REG, 0); | ||
4270 | /* Enable 1G MDIO (1-copy) */ | ||
4271 | bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, | ||
4272 | MDIO_WC_REG_XGXSBLK0_XGXSCONTROL, | ||
4273 | &val16); | ||
4274 | bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, | ||
4275 | MDIO_WC_REG_XGXSBLK0_XGXSCONTROL, | ||
4276 | val16 & ~0x10); | ||
4277 | |||
4278 | bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, | ||
4279 | MDIO_WC_REG_XGXSBLK1_LANECTRL2, &val16); | ||
4280 | bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, | ||
4281 | MDIO_WC_REG_XGXSBLK1_LANECTRL2, | ||
4282 | val16 & 0xff00); | ||
4283 | |||
4284 | } | ||
4285 | |||
4286 | static void bnx2x_set_warpcore_loopback(struct bnx2x_phy *phy, | ||
4287 | struct link_params *params) | ||
4288 | { | ||
4289 | struct bnx2x *bp = params->bp; | ||
4290 | u16 val16; | ||
4291 | u32 lane; | ||
4292 | DP(NETIF_MSG_LINK, "Setting Warpcore loopback type %x, speed %d\n", | ||
4293 | params->loopback_mode, phy->req_line_speed); | ||
4294 | |||
4295 | if (phy->req_line_speed < SPEED_10000) { | ||
4296 | /* 10/100/1000 */ | ||
4297 | |||
4298 | /* Update those 1-copy registers */ | ||
4299 | CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK, | ||
4300 | MDIO_AER_BLOCK_AER_REG, 0); | ||
4301 | /* Enable 1G MDIO (1-copy) */ | ||
4302 | bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, | ||
4303 | MDIO_WC_REG_XGXSBLK0_XGXSCONTROL, | ||
4304 | &val16); | ||
4305 | bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, | ||
4306 | MDIO_WC_REG_XGXSBLK0_XGXSCONTROL, | ||
4307 | val16 | 0x10); | ||
4308 | /* Set 1G loopback based on lane (1-copy) */ | ||
4309 | lane = bnx2x_get_warpcore_lane(phy, params); | ||
4310 | bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, | ||
4311 | MDIO_WC_REG_XGXSBLK1_LANECTRL2, &val16); | ||
4312 | bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, | ||
4313 | MDIO_WC_REG_XGXSBLK1_LANECTRL2, | ||
4314 | val16 | (1<<lane)); | ||
4315 | |||
4316 | /* Switch back to 4-copy registers */ | ||
4317 | bnx2x_set_aer_mmd(params, phy); | ||
4318 | /* Global loopback, not recommended. */ | ||
4319 | bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, | ||
4320 | MDIO_WC_REG_COMBO_IEEE0_MIICTRL, &val16); | ||
4321 | bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, | ||
4322 | MDIO_WC_REG_COMBO_IEEE0_MIICTRL, val16 | | ||
4323 | 0x4000); | ||
4324 | } else { | ||
4325 | /* 10G & 20G */ | ||
4326 | bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, | ||
4327 | MDIO_WC_REG_COMBO_IEEE0_MIICTRL, &val16); | ||
4328 | bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, | ||
4329 | MDIO_WC_REG_COMBO_IEEE0_MIICTRL, val16 | | ||
4330 | 0x4000); | ||
4331 | |||
4332 | bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, | ||
4333 | MDIO_WC_REG_IEEE0BLK_MIICNTL, &val16); | ||
4334 | bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, | ||
4335 | MDIO_WC_REG_IEEE0BLK_MIICNTL, val16 | 0x1); | ||
4336 | } | ||
4337 | } | ||
4338 | |||
1614 | 4339 | ||
1615 | void bnx2x_link_status_update(struct link_params *params, | 4340 | void bnx2x_link_status_update(struct link_params *params, |
1616 | struct link_vars *vars) | 4341 | struct link_vars *vars) |
1617 | { | 4342 | { |
1618 | struct bnx2x *bp = params->bp; | 4343 | struct bnx2x *bp = params->bp; |
1619 | u8 link_10g; | 4344 | u8 link_10g_plus; |
1620 | u8 port = params->port; | 4345 | u8 port = params->port; |
4346 | u32 sync_offset, media_types; | ||
4347 | /* Update PHY configuration */ | ||
4348 | set_phy_vars(params, vars); | ||
1621 | 4349 | ||
1622 | vars->link_status = REG_RD(bp, params->shmem_base + | 4350 | vars->link_status = REG_RD(bp, params->shmem_base + |
1623 | offsetof(struct shmem_region, | 4351 | offsetof(struct shmem_region, |
1624 | port_mb[port].link_status)); | 4352 | port_mb[port].link_status)); |
1625 | 4353 | ||
1626 | vars->link_up = (vars->link_status & LINK_STATUS_LINK_UP); | 4354 | vars->link_up = (vars->link_status & LINK_STATUS_LINK_UP); |
1627 | 4355 | vars->phy_flags = PHY_XGXS_FLAG; | |
1628 | if (vars->link_up) { | 4356 | if (vars->link_up) { |
1629 | DP(NETIF_MSG_LINK, "phy link up\n"); | 4357 | DP(NETIF_MSG_LINK, "phy link up\n"); |
1630 | 4358 | ||
@@ -1664,27 +4392,9 @@ void bnx2x_link_status_update(struct link_params *params, | |||
1664 | case LINK_10GTFD: | 4392 | case LINK_10GTFD: |
1665 | vars->line_speed = SPEED_10000; | 4393 | vars->line_speed = SPEED_10000; |
1666 | break; | 4394 | break; |
1667 | 4395 | case LINK_20GTFD: | |
1668 | case LINK_12GTFD: | 4396 | vars->line_speed = SPEED_20000; |
1669 | vars->line_speed = SPEED_12000; | ||
1670 | break; | ||
1671 | |||
1672 | case LINK_12_5GTFD: | ||
1673 | vars->line_speed = SPEED_12500; | ||
1674 | break; | ||
1675 | |||
1676 | case LINK_13GTFD: | ||
1677 | vars->line_speed = SPEED_13000; | ||
1678 | break; | 4397 | break; |
1679 | |||
1680 | case LINK_15GTFD: | ||
1681 | vars->line_speed = SPEED_15000; | ||
1682 | break; | ||
1683 | |||
1684 | case LINK_16GTFD: | ||
1685 | vars->line_speed = SPEED_16000; | ||
1686 | break; | ||
1687 | |||
1688 | default: | 4398 | default: |
1689 | break; | 4399 | break; |
1690 | } | 4400 | } |
@@ -1705,19 +4415,24 @@ void bnx2x_link_status_update(struct link_params *params, | |||
1705 | } else { | 4415 | } else { |
1706 | vars->phy_flags &= ~PHY_SGMII_FLAG; | 4416 | vars->phy_flags &= ~PHY_SGMII_FLAG; |
1707 | } | 4417 | } |
1708 | 4418 | if (vars->line_speed && | |
4419 | USES_WARPCORE(bp) && | ||
4420 | (vars->line_speed == SPEED_1000)) | ||
4421 | vars->phy_flags |= PHY_SGMII_FLAG; | ||
1709 | /* anything 10 and over uses the bmac */ | 4422 | /* anything 10 and over uses the bmac */ |
1710 | link_10g = ((vars->line_speed == SPEED_10000) || | 4423 | link_10g_plus = (vars->line_speed >= SPEED_10000); |
1711 | (vars->line_speed == SPEED_12000) || | ||
1712 | (vars->line_speed == SPEED_12500) || | ||
1713 | (vars->line_speed == SPEED_13000) || | ||
1714 | (vars->line_speed == SPEED_15000) || | ||
1715 | (vars->line_speed == SPEED_16000)); | ||
1716 | if (link_10g) | ||
1717 | vars->mac_type = MAC_TYPE_BMAC; | ||
1718 | else | ||
1719 | vars->mac_type = MAC_TYPE_EMAC; | ||
1720 | 4424 | ||
4425 | if (link_10g_plus) { | ||
4426 | if (USES_WARPCORE(bp)) | ||
4427 | vars->mac_type = MAC_TYPE_XMAC; | ||
4428 | else | ||
4429 | vars->mac_type = MAC_TYPE_BMAC; | ||
4430 | } else { | ||
4431 | if (USES_WARPCORE(bp)) | ||
4432 | vars->mac_type = MAC_TYPE_UMAC; | ||
4433 | else | ||
4434 | vars->mac_type = MAC_TYPE_EMAC; | ||
4435 | } | ||
1721 | } else { /* link down */ | 4436 | } else { /* link down */ |
1722 | DP(NETIF_MSG_LINK, "phy link down\n"); | 4437 | DP(NETIF_MSG_LINK, "phy link down\n"); |
1723 | 4438 | ||
@@ -1731,8 +4446,40 @@ void bnx2x_link_status_update(struct link_params *params, | |||
1731 | vars->mac_type = MAC_TYPE_NONE; | 4446 | vars->mac_type = MAC_TYPE_NONE; |
1732 | } | 4447 | } |
1733 | 4448 | ||
1734 | DP(NETIF_MSG_LINK, "link_status 0x%x phy_link_up %x\n", | 4449 | /* Sync media type */ |
1735 | vars->link_status, vars->phy_link_up); | 4450 | sync_offset = params->shmem_base + |
4451 | offsetof(struct shmem_region, | ||
4452 | dev_info.port_hw_config[port].media_type); | ||
4453 | media_types = REG_RD(bp, sync_offset); | ||
4454 | |||
4455 | params->phy[INT_PHY].media_type = | ||
4456 | (media_types & PORT_HW_CFG_MEDIA_TYPE_PHY0_MASK) >> | ||
4457 | PORT_HW_CFG_MEDIA_TYPE_PHY0_SHIFT; | ||
4458 | params->phy[EXT_PHY1].media_type = | ||
4459 | (media_types & PORT_HW_CFG_MEDIA_TYPE_PHY1_MASK) >> | ||
4460 | PORT_HW_CFG_MEDIA_TYPE_PHY1_SHIFT; | ||
4461 | params->phy[EXT_PHY2].media_type = | ||
4462 | (media_types & PORT_HW_CFG_MEDIA_TYPE_PHY2_MASK) >> | ||
4463 | PORT_HW_CFG_MEDIA_TYPE_PHY2_SHIFT; | ||
4464 | DP(NETIF_MSG_LINK, "media_types = 0x%x\n", media_types); | ||
4465 | |||
4466 | /* Sync AEU offset */ | ||
4467 | sync_offset = params->shmem_base + | ||
4468 | offsetof(struct shmem_region, | ||
4469 | dev_info.port_hw_config[port].aeu_int_mask); | ||
4470 | |||
4471 | vars->aeu_int_mask = REG_RD(bp, sync_offset); | ||
4472 | |||
4473 | /* Sync PFC status */ | ||
4474 | if (vars->link_status & LINK_STATUS_PFC_ENABLED) | ||
4475 | params->feature_config_flags |= | ||
4476 | FEATURE_CONFIG_PFC_ENABLED; | ||
4477 | else | ||
4478 | params->feature_config_flags &= | ||
4479 | ~FEATURE_CONFIG_PFC_ENABLED; | ||
4480 | |||
4481 | DP(NETIF_MSG_LINK, "link_status 0x%x phy_link_up %x int_mask 0x%x\n", | ||
4482 | vars->link_status, vars->phy_link_up, vars->aeu_int_mask); | ||
1736 | DP(NETIF_MSG_LINK, "line_speed %x duplex %x flow_ctrl 0x%x\n", | 4483 | DP(NETIF_MSG_LINK, "line_speed %x duplex %x flow_ctrl 0x%x\n", |
1737 | vars->line_speed, vars->duplex, vars->flow_ctrl); | 4484 | vars->line_speed, vars->duplex, vars->flow_ctrl); |
1738 | } | 4485 | } |
@@ -1759,9 +4506,9 @@ static void bnx2x_set_master_ln(struct link_params *params, | |||
1759 | (new_master_ln | ser_lane)); | 4506 | (new_master_ln | ser_lane)); |
1760 | } | 4507 | } |
1761 | 4508 | ||
1762 | static u8 bnx2x_reset_unicore(struct link_params *params, | 4509 | static int bnx2x_reset_unicore(struct link_params *params, |
1763 | struct bnx2x_phy *phy, | 4510 | struct bnx2x_phy *phy, |
1764 | u8 set_serdes) | 4511 | u8 set_serdes) |
1765 | { | 4512 | { |
1766 | struct bnx2x *bp = params->bp; | 4513 | struct bnx2x *bp = params->bp; |
1767 | u16 mii_control; | 4514 | u16 mii_control; |
@@ -2048,9 +4795,6 @@ static void bnx2x_program_serdes(struct bnx2x_phy *phy, | |||
2048 | if (vars->line_speed == SPEED_10000) | 4795 | if (vars->line_speed == SPEED_10000) |
2049 | reg_val |= | 4796 | reg_val |= |
2050 | MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_10G_CX4; | 4797 | MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_10G_CX4; |
2051 | if (vars->line_speed == SPEED_13000) | ||
2052 | reg_val |= | ||
2053 | MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_13G; | ||
2054 | } | 4798 | } |
2055 | 4799 | ||
2056 | CL22_WR_OVER_CL45(bp, phy, | 4800 | CL22_WR_OVER_CL45(bp, phy, |
@@ -2059,8 +4803,8 @@ static void bnx2x_program_serdes(struct bnx2x_phy *phy, | |||
2059 | 4803 | ||
2060 | } | 4804 | } |
2061 | 4805 | ||
2062 | static void bnx2x_set_brcm_cl37_advertisment(struct bnx2x_phy *phy, | 4806 | static void bnx2x_set_brcm_cl37_advertisement(struct bnx2x_phy *phy, |
2063 | struct link_params *params) | 4807 | struct link_params *params) |
2064 | { | 4808 | { |
2065 | struct bnx2x *bp = params->bp; | 4809 | struct bnx2x *bp = params->bp; |
2066 | u16 val = 0; | 4810 | u16 val = 0; |
@@ -2081,44 +4825,9 @@ static void bnx2x_set_brcm_cl37_advertisment(struct bnx2x_phy *phy, | |||
2081 | MDIO_OVER_1G_UP3, 0x400); | 4825 | MDIO_OVER_1G_UP3, 0x400); |
2082 | } | 4826 | } |
2083 | 4827 | ||
2084 | static void bnx2x_calc_ieee_aneg_adv(struct bnx2x_phy *phy, | 4828 | static void bnx2x_set_ieee_aneg_advertisement(struct bnx2x_phy *phy, |
2085 | struct link_params *params, u16 *ieee_fc) | 4829 | struct link_params *params, |
2086 | { | 4830 | u16 ieee_fc) |
2087 | struct bnx2x *bp = params->bp; | ||
2088 | *ieee_fc = MDIO_COMBO_IEEE0_AUTO_NEG_ADV_FULL_DUPLEX; | ||
2089 | /* | ||
2090 | * Resolve pause mode and advertisement. | ||
2091 | * Please refer to Table 28B-3 of the 802.3ab-1999 spec | ||
2092 | */ | ||
2093 | |||
2094 | switch (phy->req_flow_ctrl) { | ||
2095 | case BNX2X_FLOW_CTRL_AUTO: | ||
2096 | if (params->req_fc_auto_adv == BNX2X_FLOW_CTRL_BOTH) | ||
2097 | *ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH; | ||
2098 | else | ||
2099 | *ieee_fc |= | ||
2100 | MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC; | ||
2101 | break; | ||
2102 | case BNX2X_FLOW_CTRL_TX: | ||
2103 | *ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC; | ||
2104 | break; | ||
2105 | |||
2106 | case BNX2X_FLOW_CTRL_RX: | ||
2107 | case BNX2X_FLOW_CTRL_BOTH: | ||
2108 | *ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH; | ||
2109 | break; | ||
2110 | |||
2111 | case BNX2X_FLOW_CTRL_NONE: | ||
2112 | default: | ||
2113 | *ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE; | ||
2114 | break; | ||
2115 | } | ||
2116 | DP(NETIF_MSG_LINK, "ieee_fc = 0x%x\n", *ieee_fc); | ||
2117 | } | ||
2118 | |||
2119 | static void bnx2x_set_ieee_aneg_advertisment(struct bnx2x_phy *phy, | ||
2120 | struct link_params *params, | ||
2121 | u16 ieee_fc) | ||
2122 | { | 4831 | { |
2123 | struct bnx2x *bp = params->bp; | 4832 | struct bnx2x *bp = params->bp; |
2124 | u16 val; | 4833 | u16 val; |
@@ -2252,35 +4961,8 @@ static void bnx2x_initialize_sgmii_process(struct bnx2x_phy *phy, | |||
2252 | * link management | 4961 | * link management |
2253 | */ | 4962 | */ |
2254 | 4963 | ||
2255 | static void bnx2x_pause_resolve(struct link_vars *vars, u32 pause_result) | 4964 | static int bnx2x_direct_parallel_detect_used(struct bnx2x_phy *phy, |
2256 | { /* LD LP */ | 4965 | struct link_params *params) |
2257 | switch (pause_result) { /* ASYM P ASYM P */ | ||
2258 | case 0xb: /* 1 0 1 1 */ | ||
2259 | vars->flow_ctrl = BNX2X_FLOW_CTRL_TX; | ||
2260 | break; | ||
2261 | |||
2262 | case 0xe: /* 1 1 1 0 */ | ||
2263 | vars->flow_ctrl = BNX2X_FLOW_CTRL_RX; | ||
2264 | break; | ||
2265 | |||
2266 | case 0x5: /* 0 1 0 1 */ | ||
2267 | case 0x7: /* 0 1 1 1 */ | ||
2268 | case 0xd: /* 1 1 0 1 */ | ||
2269 | case 0xf: /* 1 1 1 1 */ | ||
2270 | vars->flow_ctrl = BNX2X_FLOW_CTRL_BOTH; | ||
2271 | break; | ||
2272 | |||
2273 | default: | ||
2274 | break; | ||
2275 | } | ||
2276 | if (pause_result & (1<<0)) | ||
2277 | vars->link_status |= LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE; | ||
2278 | if (pause_result & (1<<1)) | ||
2279 | vars->link_status |= LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE; | ||
2280 | } | ||
2281 | |||
2282 | static u8 bnx2x_direct_parallel_detect_used(struct bnx2x_phy *phy, | ||
2283 | struct link_params *params) | ||
2284 | { | 4966 | { |
2285 | struct bnx2x *bp = params->bp; | 4967 | struct bnx2x *bp = params->bp; |
2286 | u16 pd_10g, status2_1000x; | 4968 | u16 pd_10g, status2_1000x; |
@@ -2383,7 +5065,7 @@ static void bnx2x_check_fallback_to_cl37(struct bnx2x_phy *phy, | |||
2383 | struct link_params *params) | 5065 | struct link_params *params) |
2384 | { | 5066 | { |
2385 | struct bnx2x *bp = params->bp; | 5067 | struct bnx2x *bp = params->bp; |
2386 | u16 rx_status, ustat_val, cl37_fsm_recieved; | 5068 | u16 rx_status, ustat_val, cl37_fsm_received; |
2387 | DP(NETIF_MSG_LINK, "bnx2x_check_fallback_to_cl37\n"); | 5069 | DP(NETIF_MSG_LINK, "bnx2x_check_fallback_to_cl37\n"); |
2388 | /* Step 1: Make sure signal is detected */ | 5070 | /* Step 1: Make sure signal is detected */ |
2389 | CL22_RD_OVER_CL45(bp, phy, | 5071 | CL22_RD_OVER_CL45(bp, phy, |
@@ -2421,15 +5103,15 @@ static void bnx2x_check_fallback_to_cl37(struct bnx2x_phy *phy, | |||
2421 | CL22_RD_OVER_CL45(bp, phy, | 5103 | CL22_RD_OVER_CL45(bp, phy, |
2422 | MDIO_REG_BANK_REMOTE_PHY, | 5104 | MDIO_REG_BANK_REMOTE_PHY, |
2423 | MDIO_REMOTE_PHY_MISC_RX_STATUS, | 5105 | MDIO_REMOTE_PHY_MISC_RX_STATUS, |
2424 | &cl37_fsm_recieved); | 5106 | &cl37_fsm_received); |
2425 | if ((cl37_fsm_recieved & | 5107 | if ((cl37_fsm_received & |
2426 | (MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_OVER1G_MSG | | 5108 | (MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_OVER1G_MSG | |
2427 | MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_BRCM_OUI_MSG)) != | 5109 | MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_BRCM_OUI_MSG)) != |
2428 | (MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_OVER1G_MSG | | 5110 | (MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_OVER1G_MSG | |
2429 | MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_BRCM_OUI_MSG)) { | 5111 | MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_BRCM_OUI_MSG)) { |
2430 | DP(NETIF_MSG_LINK, "No CL37 FSM were received. " | 5112 | DP(NETIF_MSG_LINK, "No CL37 FSM were received. " |
2431 | "misc_rx_status(0x8330) = 0x%x\n", | 5113 | "misc_rx_status(0x8330) = 0x%x\n", |
2432 | cl37_fsm_recieved); | 5114 | cl37_fsm_received); |
2433 | return; | 5115 | return; |
2434 | } | 5116 | } |
2435 | /* | 5117 | /* |
@@ -2462,45 +5144,25 @@ static void bnx2x_xgxs_an_resolve(struct bnx2x_phy *phy, | |||
2462 | vars->link_status |= | 5144 | vars->link_status |= |
2463 | LINK_STATUS_PARALLEL_DETECTION_USED; | 5145 | LINK_STATUS_PARALLEL_DETECTION_USED; |
2464 | } | 5146 | } |
2465 | 5147 | static int bnx2x_get_link_speed_duplex(struct bnx2x_phy *phy, | |
2466 | static u8 bnx2x_link_settings_status(struct bnx2x_phy *phy, | ||
2467 | struct link_params *params, | 5148 | struct link_params *params, |
2468 | struct link_vars *vars) | 5149 | struct link_vars *vars, |
5150 | u16 is_link_up, | ||
5151 | u16 speed_mask, | ||
5152 | u16 is_duplex) | ||
2469 | { | 5153 | { |
2470 | struct bnx2x *bp = params->bp; | 5154 | struct bnx2x *bp = params->bp; |
2471 | u16 new_line_speed, gp_status; | ||
2472 | u8 rc = 0; | ||
2473 | |||
2474 | /* Read gp_status */ | ||
2475 | CL22_RD_OVER_CL45(bp, phy, | ||
2476 | MDIO_REG_BANK_GP_STATUS, | ||
2477 | MDIO_GP_STATUS_TOP_AN_STATUS1, | ||
2478 | &gp_status); | ||
2479 | |||
2480 | if (phy->req_line_speed == SPEED_AUTO_NEG) | 5155 | if (phy->req_line_speed == SPEED_AUTO_NEG) |
2481 | vars->link_status |= LINK_STATUS_AUTO_NEGOTIATE_ENABLED; | 5156 | vars->link_status |= LINK_STATUS_AUTO_NEGOTIATE_ENABLED; |
2482 | if (gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS) { | 5157 | if (is_link_up) { |
2483 | DP(NETIF_MSG_LINK, "phy link up gp_status=0x%x\n", | 5158 | DP(NETIF_MSG_LINK, "phy link up\n"); |
2484 | gp_status); | ||
2485 | 5159 | ||
2486 | vars->phy_link_up = 1; | 5160 | vars->phy_link_up = 1; |
2487 | vars->link_status |= LINK_STATUS_LINK_UP; | 5161 | vars->link_status |= LINK_STATUS_LINK_UP; |
2488 | 5162 | ||
2489 | if (gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_DUPLEX_STATUS) | 5163 | switch (speed_mask) { |
2490 | vars->duplex = DUPLEX_FULL; | ||
2491 | else | ||
2492 | vars->duplex = DUPLEX_HALF; | ||
2493 | |||
2494 | if (SINGLE_MEDIA_DIRECT(params)) { | ||
2495 | bnx2x_flow_ctrl_resolve(phy, params, vars, gp_status); | ||
2496 | if (phy->req_line_speed == SPEED_AUTO_NEG) | ||
2497 | bnx2x_xgxs_an_resolve(phy, params, vars, | ||
2498 | gp_status); | ||
2499 | } | ||
2500 | |||
2501 | switch (gp_status & GP_STATUS_SPEED_MASK) { | ||
2502 | case GP_STATUS_10M: | 5164 | case GP_STATUS_10M: |
2503 | new_line_speed = SPEED_10; | 5165 | vars->line_speed = SPEED_10; |
2504 | if (vars->duplex == DUPLEX_FULL) | 5166 | if (vars->duplex == DUPLEX_FULL) |
2505 | vars->link_status |= LINK_10TFD; | 5167 | vars->link_status |= LINK_10TFD; |
2506 | else | 5168 | else |
@@ -2508,7 +5170,7 @@ static u8 bnx2x_link_settings_status(struct bnx2x_phy *phy, | |||
2508 | break; | 5170 | break; |
2509 | 5171 | ||
2510 | case GP_STATUS_100M: | 5172 | case GP_STATUS_100M: |
2511 | new_line_speed = SPEED_100; | 5173 | vars->line_speed = SPEED_100; |
2512 | if (vars->duplex == DUPLEX_FULL) | 5174 | if (vars->duplex == DUPLEX_FULL) |
2513 | vars->link_status |= LINK_100TXFD; | 5175 | vars->link_status |= LINK_100TXFD; |
2514 | else | 5176 | else |
@@ -2517,7 +5179,7 @@ static u8 bnx2x_link_settings_status(struct bnx2x_phy *phy, | |||
2517 | 5179 | ||
2518 | case GP_STATUS_1G: | 5180 | case GP_STATUS_1G: |
2519 | case GP_STATUS_1G_KX: | 5181 | case GP_STATUS_1G_KX: |
2520 | new_line_speed = SPEED_1000; | 5182 | vars->line_speed = SPEED_1000; |
2521 | if (vars->duplex == DUPLEX_FULL) | 5183 | if (vars->duplex == DUPLEX_FULL) |
2522 | vars->link_status |= LINK_1000TFD; | 5184 | vars->link_status |= LINK_1000TFD; |
2523 | else | 5185 | else |
@@ -2525,7 +5187,7 @@ static u8 bnx2x_link_settings_status(struct bnx2x_phy *phy, | |||
2525 | break; | 5187 | break; |
2526 | 5188 | ||
2527 | case GP_STATUS_2_5G: | 5189 | case GP_STATUS_2_5G: |
2528 | new_line_speed = SPEED_2500; | 5190 | vars->line_speed = SPEED_2500; |
2529 | if (vars->duplex == DUPLEX_FULL) | 5191 | if (vars->duplex == DUPLEX_FULL) |
2530 | vars->link_status |= LINK_2500TFD; | 5192 | vars->link_status |= LINK_2500TFD; |
2531 | else | 5193 | else |
@@ -2536,50 +5198,28 @@ static u8 bnx2x_link_settings_status(struct bnx2x_phy *phy, | |||
2536 | case GP_STATUS_6G: | 5198 | case GP_STATUS_6G: |
2537 | DP(NETIF_MSG_LINK, | 5199 | DP(NETIF_MSG_LINK, |
2538 | "link speed unsupported gp_status 0x%x\n", | 5200 | "link speed unsupported gp_status 0x%x\n", |
2539 | gp_status); | 5201 | speed_mask); |
2540 | return -EINVAL; | 5202 | return -EINVAL; |
2541 | 5203 | ||
2542 | case GP_STATUS_10G_KX4: | 5204 | case GP_STATUS_10G_KX4: |
2543 | case GP_STATUS_10G_HIG: | 5205 | case GP_STATUS_10G_HIG: |
2544 | case GP_STATUS_10G_CX4: | 5206 | case GP_STATUS_10G_CX4: |
2545 | new_line_speed = SPEED_10000; | 5207 | case GP_STATUS_10G_KR: |
5208 | case GP_STATUS_10G_SFI: | ||
5209 | case GP_STATUS_10G_XFI: | ||
5210 | vars->line_speed = SPEED_10000; | ||
2546 | vars->link_status |= LINK_10GTFD; | 5211 | vars->link_status |= LINK_10GTFD; |
2547 | break; | 5212 | break; |
2548 | 5213 | case GP_STATUS_20G_DXGXS: | |
2549 | case GP_STATUS_12G_HIG: | 5214 | vars->line_speed = SPEED_20000; |
2550 | new_line_speed = SPEED_12000; | 5215 | vars->link_status |= LINK_20GTFD; |
2551 | vars->link_status |= LINK_12GTFD; | ||
2552 | break; | ||
2553 | |||
2554 | case GP_STATUS_12_5G: | ||
2555 | new_line_speed = SPEED_12500; | ||
2556 | vars->link_status |= LINK_12_5GTFD; | ||
2557 | break; | ||
2558 | |||
2559 | case GP_STATUS_13G: | ||
2560 | new_line_speed = SPEED_13000; | ||
2561 | vars->link_status |= LINK_13GTFD; | ||
2562 | break; | 5216 | break; |
2563 | |||
2564 | case GP_STATUS_15G: | ||
2565 | new_line_speed = SPEED_15000; | ||
2566 | vars->link_status |= LINK_15GTFD; | ||
2567 | break; | ||
2568 | |||
2569 | case GP_STATUS_16G: | ||
2570 | new_line_speed = SPEED_16000; | ||
2571 | vars->link_status |= LINK_16GTFD; | ||
2572 | break; | ||
2573 | |||
2574 | default: | 5217 | default: |
2575 | DP(NETIF_MSG_LINK, | 5218 | DP(NETIF_MSG_LINK, |
2576 | "link speed unsupported gp_status 0x%x\n", | 5219 | "link speed unsupported gp_status 0x%x\n", |
2577 | gp_status); | 5220 | speed_mask); |
2578 | return -EINVAL; | 5221 | return -EINVAL; |
2579 | } | 5222 | } |
2580 | |||
2581 | vars->line_speed = new_line_speed; | ||
2582 | |||
2583 | } else { /* link_down */ | 5223 | } else { /* link_down */ |
2584 | DP(NETIF_MSG_LINK, "phy link down\n"); | 5224 | DP(NETIF_MSG_LINK, "phy link down\n"); |
2585 | 5225 | ||
@@ -2588,7 +5228,47 @@ static u8 bnx2x_link_settings_status(struct bnx2x_phy *phy, | |||
2588 | vars->duplex = DUPLEX_FULL; | 5228 | vars->duplex = DUPLEX_FULL; |
2589 | vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE; | 5229 | vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE; |
2590 | vars->mac_type = MAC_TYPE_NONE; | 5230 | vars->mac_type = MAC_TYPE_NONE; |
5231 | } | ||
5232 | DP(NETIF_MSG_LINK, " phy_link_up %x line_speed %d\n", | ||
5233 | vars->phy_link_up, vars->line_speed); | ||
5234 | return 0; | ||
5235 | } | ||
5236 | |||
5237 | static int bnx2x_link_settings_status(struct bnx2x_phy *phy, | ||
5238 | struct link_params *params, | ||
5239 | struct link_vars *vars) | ||
5240 | { | ||
5241 | |||
5242 | struct bnx2x *bp = params->bp; | ||
5243 | |||
5244 | u16 gp_status, duplex = DUPLEX_HALF, link_up = 0, speed_mask; | ||
5245 | int rc = 0; | ||
2591 | 5246 | ||
5247 | /* Read gp_status */ | ||
5248 | CL22_RD_OVER_CL45(bp, phy, | ||
5249 | MDIO_REG_BANK_GP_STATUS, | ||
5250 | MDIO_GP_STATUS_TOP_AN_STATUS1, | ||
5251 | &gp_status); | ||
5252 | if (gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_DUPLEX_STATUS) | ||
5253 | duplex = DUPLEX_FULL; | ||
5254 | if (gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS) | ||
5255 | link_up = 1; | ||
5256 | speed_mask = gp_status & GP_STATUS_SPEED_MASK; | ||
5257 | DP(NETIF_MSG_LINK, "gp_status 0x%x, is_link_up %d, speed_mask 0x%x\n", | ||
5258 | gp_status, link_up, speed_mask); | ||
5259 | rc = bnx2x_get_link_speed_duplex(phy, params, vars, link_up, speed_mask, | ||
5260 | duplex); | ||
5261 | if (rc == -EINVAL) | ||
5262 | return rc; | ||
5263 | |||
5264 | if (gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS) { | ||
5265 | if (SINGLE_MEDIA_DIRECT(params)) { | ||
5266 | bnx2x_flow_ctrl_resolve(phy, params, vars, gp_status); | ||
5267 | if (phy->req_line_speed == SPEED_AUTO_NEG) | ||
5268 | bnx2x_xgxs_an_resolve(phy, params, vars, | ||
5269 | gp_status); | ||
5270 | } | ||
5271 | } else { /* link_down */ | ||
2592 | if ((phy->req_line_speed == SPEED_AUTO_NEG) && | 5272 | if ((phy->req_line_speed == SPEED_AUTO_NEG) && |
2593 | SINGLE_MEDIA_DIRECT(params)) { | 5273 | SINGLE_MEDIA_DIRECT(params)) { |
2594 | /* Check signal is detected */ | 5274 | /* Check signal is detected */ |
@@ -2596,13 +5276,86 @@ static u8 bnx2x_link_settings_status(struct bnx2x_phy *phy, | |||
2596 | } | 5276 | } |
2597 | } | 5277 | } |
2598 | 5278 | ||
2599 | DP(NETIF_MSG_LINK, "gp_status 0x%x phy_link_up %x line_speed %x\n", | ||
2600 | gp_status, vars->phy_link_up, vars->line_speed); | ||
2601 | DP(NETIF_MSG_LINK, "duplex %x flow_ctrl 0x%x link_status 0x%x\n", | 5279 | DP(NETIF_MSG_LINK, "duplex %x flow_ctrl 0x%x link_status 0x%x\n", |
2602 | vars->duplex, vars->flow_ctrl, vars->link_status); | 5280 | vars->duplex, vars->flow_ctrl, vars->link_status); |
2603 | return rc; | 5281 | return rc; |
2604 | } | 5282 | } |
2605 | 5283 | ||
5284 | static int bnx2x_warpcore_read_status(struct bnx2x_phy *phy, | ||
5285 | struct link_params *params, | ||
5286 | struct link_vars *vars) | ||
5287 | { | ||
5288 | |||
5289 | struct bnx2x *bp = params->bp; | ||
5290 | |||
5291 | u8 lane; | ||
5292 | u16 gp_status1, gp_speed, link_up, duplex = DUPLEX_FULL; | ||
5293 | int rc = 0; | ||
5294 | lane = bnx2x_get_warpcore_lane(phy, params); | ||
5295 | /* Read gp_status */ | ||
5296 | if (phy->req_line_speed > SPEED_10000) { | ||
5297 | u16 temp_link_up; | ||
5298 | bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, | ||
5299 | 1, &temp_link_up); | ||
5300 | bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, | ||
5301 | 1, &link_up); | ||
5302 | DP(NETIF_MSG_LINK, "PCS RX link status = 0x%x-->0x%x\n", | ||
5303 | temp_link_up, link_up); | ||
5304 | link_up &= (1<<2); | ||
5305 | if (link_up) | ||
5306 | bnx2x_ext_phy_resolve_fc(phy, params, vars); | ||
5307 | } else { | ||
5308 | bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, | ||
5309 | MDIO_WC_REG_GP2_STATUS_GP_2_1, &gp_status1); | ||
5310 | DP(NETIF_MSG_LINK, "0x81d1 = 0x%x\n", gp_status1); | ||
5311 | /* Check for either KR or generic link up. */ | ||
5312 | gp_status1 = ((gp_status1 >> 8) & 0xf) | | ||
5313 | ((gp_status1 >> 12) & 0xf); | ||
5314 | link_up = gp_status1 & (1 << lane); | ||
5315 | if (link_up && SINGLE_MEDIA_DIRECT(params)) { | ||
5316 | u16 pd, gp_status4; | ||
5317 | if (phy->req_line_speed == SPEED_AUTO_NEG) { | ||
5318 | /* Check Autoneg complete */ | ||
5319 | bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, | ||
5320 | MDIO_WC_REG_GP2_STATUS_GP_2_4, | ||
5321 | &gp_status4); | ||
5322 | if (gp_status4 & ((1<<12)<<lane)) | ||
5323 | vars->link_status |= | ||
5324 | LINK_STATUS_AUTO_NEGOTIATE_COMPLETE; | ||
5325 | |||
5326 | /* Check parallel detect used */ | ||
5327 | bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, | ||
5328 | MDIO_WC_REG_PAR_DET_10G_STATUS, | ||
5329 | &pd); | ||
5330 | if (pd & (1<<15)) | ||
5331 | vars->link_status |= | ||
5332 | LINK_STATUS_PARALLEL_DETECTION_USED; | ||
5333 | } | ||
5334 | bnx2x_ext_phy_resolve_fc(phy, params, vars); | ||
5335 | } | ||
5336 | } | ||
5337 | |||
5338 | if (lane < 2) { | ||
5339 | bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, | ||
5340 | MDIO_WC_REG_GP2_STATUS_GP_2_2, &gp_speed); | ||
5341 | } else { | ||
5342 | bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, | ||
5343 | MDIO_WC_REG_GP2_STATUS_GP_2_3, &gp_speed); | ||
5344 | } | ||
5345 | DP(NETIF_MSG_LINK, "lane %d gp_speed 0x%x\n", lane, gp_speed); | ||
5346 | |||
5347 | if ((lane & 1) == 0) | ||
5348 | gp_speed <<= 8; | ||
5349 | gp_speed &= 0x3f00; | ||
5350 | |||
5351 | |||
5352 | rc = bnx2x_get_link_speed_duplex(phy, params, vars, link_up, gp_speed, | ||
5353 | duplex); | ||
5354 | |||
5355 | DP(NETIF_MSG_LINK, "duplex %x flow_ctrl 0x%x link_status 0x%x\n", | ||
5356 | vars->duplex, vars->flow_ctrl, vars->link_status); | ||
5357 | return rc; | ||
5358 | } | ||
2606 | static void bnx2x_set_gmii_tx_driver(struct link_params *params) | 5359 | static void bnx2x_set_gmii_tx_driver(struct link_params *params) |
2607 | { | 5360 | { |
2608 | struct bnx2x *bp = params->bp; | 5361 | struct bnx2x *bp = params->bp; |
@@ -2642,8 +5395,8 @@ static void bnx2x_set_gmii_tx_driver(struct link_params *params) | |||
2642 | } | 5395 | } |
2643 | } | 5396 | } |
2644 | 5397 | ||
2645 | static u8 bnx2x_emac_program(struct link_params *params, | 5398 | static int bnx2x_emac_program(struct link_params *params, |
2646 | struct link_vars *vars) | 5399 | struct link_vars *vars) |
2647 | { | 5400 | { |
2648 | struct bnx2x *bp = params->bp; | 5401 | struct bnx2x *bp = params->bp; |
2649 | u8 port = params->port; | 5402 | u8 port = params->port; |
@@ -2713,9 +5466,9 @@ static void bnx2x_set_preemphasis(struct bnx2x_phy *phy, | |||
2713 | } | 5466 | } |
2714 | } | 5467 | } |
2715 | 5468 | ||
2716 | static void bnx2x_init_internal_phy(struct bnx2x_phy *phy, | 5469 | static void bnx2x_xgxs_config_init(struct bnx2x_phy *phy, |
2717 | struct link_params *params, | 5470 | struct link_params *params, |
2718 | struct link_vars *vars) | 5471 | struct link_vars *vars) |
2719 | { | 5472 | { |
2720 | struct bnx2x *bp = params->bp; | 5473 | struct bnx2x *bp = params->bp; |
2721 | u8 enable_cl73 = (SINGLE_MEDIA_DIRECT(params) || | 5474 | u8 enable_cl73 = (SINGLE_MEDIA_DIRECT(params) || |
@@ -2742,11 +5495,11 @@ static void bnx2x_init_internal_phy(struct bnx2x_phy *phy, | |||
2742 | DP(NETIF_MSG_LINK, "not SGMII, AN\n"); | 5495 | DP(NETIF_MSG_LINK, "not SGMII, AN\n"); |
2743 | 5496 | ||
2744 | /* AN enabled */ | 5497 | /* AN enabled */ |
2745 | bnx2x_set_brcm_cl37_advertisment(phy, params); | 5498 | bnx2x_set_brcm_cl37_advertisement(phy, params); |
2746 | 5499 | ||
2747 | /* program duplex & pause advertisement (for aneg) */ | 5500 | /* program duplex & pause advertisement (for aneg) */ |
2748 | bnx2x_set_ieee_aneg_advertisment(phy, params, | 5501 | bnx2x_set_ieee_aneg_advertisement(phy, params, |
2749 | vars->ieee_fc); | 5502 | vars->ieee_fc); |
2750 | 5503 | ||
2751 | /* enable autoneg */ | 5504 | /* enable autoneg */ |
2752 | bnx2x_set_autoneg(phy, params, vars, enable_cl73); | 5505 | bnx2x_set_autoneg(phy, params, vars, enable_cl73); |
@@ -2762,29 +5515,12 @@ static void bnx2x_init_internal_phy(struct bnx2x_phy *phy, | |||
2762 | } | 5515 | } |
2763 | } | 5516 | } |
2764 | 5517 | ||
2765 | static u8 bnx2x_init_serdes(struct bnx2x_phy *phy, | 5518 | static int bnx2x_prepare_xgxs(struct bnx2x_phy *phy, |
2766 | struct link_params *params, | ||
2767 | struct link_vars *vars) | ||
2768 | { | ||
2769 | u8 rc; | ||
2770 | vars->phy_flags |= PHY_SGMII_FLAG; | ||
2771 | bnx2x_calc_ieee_aneg_adv(phy, params, &vars->ieee_fc); | ||
2772 | bnx2x_set_aer_mmd_serdes(params->bp, phy); | ||
2773 | rc = bnx2x_reset_unicore(params, phy, 1); | ||
2774 | /* reset the SerDes and wait for reset bit return low */ | ||
2775 | if (rc != 0) | ||
2776 | return rc; | ||
2777 | bnx2x_set_aer_mmd_serdes(params->bp, phy); | ||
2778 | |||
2779 | return rc; | ||
2780 | } | ||
2781 | |||
2782 | static u8 bnx2x_init_xgxs(struct bnx2x_phy *phy, | ||
2783 | struct link_params *params, | 5519 | struct link_params *params, |
2784 | struct link_vars *vars) | 5520 | struct link_vars *vars) |
2785 | { | 5521 | { |
2786 | u8 rc; | 5522 | int rc; |
2787 | vars->phy_flags = PHY_XGXS_FLAG; | 5523 | vars->phy_flags |= PHY_XGXS_FLAG; |
2788 | if ((phy->req_line_speed && | 5524 | if ((phy->req_line_speed && |
2789 | ((phy->req_line_speed == SPEED_100) || | 5525 | ((phy->req_line_speed == SPEED_100) || |
2790 | (phy->req_line_speed == SPEED_10))) || | 5526 | (phy->req_line_speed == SPEED_10))) || |
@@ -2792,26 +5528,28 @@ static u8 bnx2x_init_xgxs(struct bnx2x_phy *phy, | |||
2792 | (phy->speed_cap_mask >= | 5528 | (phy->speed_cap_mask >= |
2793 | PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL) && | 5529 | PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL) && |
2794 | (phy->speed_cap_mask < | 5530 | (phy->speed_cap_mask < |
2795 | PORT_HW_CFG_SPEED_CAPABILITY_D0_1G) | 5531 | PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) || |
2796 | )) | 5532 | (phy->type == PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT_SD)) |
2797 | vars->phy_flags |= PHY_SGMII_FLAG; | 5533 | vars->phy_flags |= PHY_SGMII_FLAG; |
2798 | else | 5534 | else |
2799 | vars->phy_flags &= ~PHY_SGMII_FLAG; | 5535 | vars->phy_flags &= ~PHY_SGMII_FLAG; |
2800 | 5536 | ||
2801 | bnx2x_calc_ieee_aneg_adv(phy, params, &vars->ieee_fc); | 5537 | bnx2x_calc_ieee_aneg_adv(phy, params, &vars->ieee_fc); |
2802 | bnx2x_set_aer_mmd_xgxs(params, phy); | 5538 | bnx2x_set_aer_mmd(params, phy); |
2803 | bnx2x_set_master_ln(params, phy); | 5539 | if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) |
5540 | bnx2x_set_master_ln(params, phy); | ||
2804 | 5541 | ||
2805 | rc = bnx2x_reset_unicore(params, phy, 0); | 5542 | rc = bnx2x_reset_unicore(params, phy, 0); |
2806 | /* reset the SerDes and wait for reset bit return low */ | 5543 | /* reset the SerDes and wait for reset bit return low */ |
2807 | if (rc != 0) | 5544 | if (rc != 0) |
2808 | return rc; | 5545 | return rc; |
2809 | 5546 | ||
2810 | bnx2x_set_aer_mmd_xgxs(params, phy); | 5547 | bnx2x_set_aer_mmd(params, phy); |
2811 | |||
2812 | /* setting the masterLn_def again after the reset */ | 5548 | /* setting the masterLn_def again after the reset */ |
2813 | bnx2x_set_master_ln(params, phy); | 5549 | if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) { |
2814 | bnx2x_set_swap_lanes(params, phy); | 5550 | bnx2x_set_master_ln(params, phy); |
5551 | bnx2x_set_swap_lanes(params, phy); | ||
5552 | } | ||
2815 | 5553 | ||
2816 | return rc; | 5554 | return rc; |
2817 | } | 5555 | } |
@@ -2823,8 +5561,13 @@ static u16 bnx2x_wait_reset_complete(struct bnx2x *bp, | |||
2823 | u16 cnt, ctrl; | 5561 | u16 cnt, ctrl; |
2824 | /* Wait for soft reset to get cleared up to 1 sec */ | 5562 | /* Wait for soft reset to get cleared up to 1 sec */ |
2825 | for (cnt = 0; cnt < 1000; cnt++) { | 5563 | for (cnt = 0; cnt < 1000; cnt++) { |
2826 | bnx2x_cl45_read(bp, phy, | 5564 | if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE) |
2827 | MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, &ctrl); | 5565 | bnx2x_cl22_read(bp, phy, |
5566 | MDIO_PMA_REG_CTRL, &ctrl); | ||
5567 | else | ||
5568 | bnx2x_cl45_read(bp, phy, | ||
5569 | MDIO_PMA_DEVAD, | ||
5570 | MDIO_PMA_REG_CTRL, &ctrl); | ||
2828 | if (!(ctrl & (1<<15))) | 5571 | if (!(ctrl & (1<<15))) |
2829 | break; | 5572 | break; |
2830 | msleep(1); | 5573 | msleep(1); |
@@ -2845,7 +5588,11 @@ static void bnx2x_link_int_enable(struct link_params *params) | |||
2845 | struct bnx2x *bp = params->bp; | 5588 | struct bnx2x *bp = params->bp; |
2846 | 5589 | ||
2847 | /* Setting the status to report on link up for either XGXS or SerDes */ | 5590 | /* Setting the status to report on link up for either XGXS or SerDes */ |
2848 | if (params->switch_cfg == SWITCH_CFG_10G) { | 5591 | if (CHIP_IS_E3(bp)) { |
5592 | mask = NIG_MASK_XGXS0_LINK_STATUS; | ||
5593 | if (!(SINGLE_MEDIA_DIRECT(params))) | ||
5594 | mask |= NIG_MASK_MI_INT; | ||
5595 | } else if (params->switch_cfg == SWITCH_CFG_10G) { | ||
2849 | mask = (NIG_MASK_XGXS0_LINK10G | | 5596 | mask = (NIG_MASK_XGXS0_LINK10G | |
2850 | NIG_MASK_XGXS0_LINK_STATUS); | 5597 | NIG_MASK_XGXS0_LINK_STATUS); |
2851 | DP(NETIF_MSG_LINK, "enabled XGXS interrupt\n"); | 5598 | DP(NETIF_MSG_LINK, "enabled XGXS interrupt\n"); |
@@ -2918,11 +5665,11 @@ static void bnx2x_rearm_latch_signal(struct bnx2x *bp, u8 port, | |||
2918 | } | 5665 | } |
2919 | 5666 | ||
2920 | static void bnx2x_link_int_ack(struct link_params *params, | 5667 | static void bnx2x_link_int_ack(struct link_params *params, |
2921 | struct link_vars *vars, u8 is_10g) | 5668 | struct link_vars *vars, u8 is_10g_plus) |
2922 | { | 5669 | { |
2923 | struct bnx2x *bp = params->bp; | 5670 | struct bnx2x *bp = params->bp; |
2924 | u8 port = params->port; | 5671 | u8 port = params->port; |
2925 | 5672 | u32 mask; | |
2926 | /* | 5673 | /* |
2927 | * First reset all status we assume only one line will be | 5674 | * First reset all status we assume only one line will be |
2928 | * change at a time | 5675 | * change at a time |
@@ -2932,47 +5679,34 @@ static void bnx2x_link_int_ack(struct link_params *params, | |||
2932 | NIG_STATUS_XGXS0_LINK_STATUS | | 5679 | NIG_STATUS_XGXS0_LINK_STATUS | |
2933 | NIG_STATUS_SERDES0_LINK_STATUS)); | 5680 | NIG_STATUS_SERDES0_LINK_STATUS)); |
2934 | if (vars->phy_link_up) { | 5681 | if (vars->phy_link_up) { |
2935 | if (is_10g) { | 5682 | if (USES_WARPCORE(bp)) |
2936 | /* | 5683 | mask = NIG_STATUS_XGXS0_LINK_STATUS; |
2937 | * Disable the 10G link interrupt by writing 1 to the | 5684 | else { |
2938 | * status register | 5685 | if (is_10g_plus) |
2939 | */ | 5686 | mask = NIG_STATUS_XGXS0_LINK10G; |
2940 | DP(NETIF_MSG_LINK, "10G XGXS phy link up\n"); | 5687 | else if (params->switch_cfg == SWITCH_CFG_10G) { |
2941 | bnx2x_bits_en(bp, | 5688 | /* |
2942 | NIG_REG_STATUS_INTERRUPT_PORT0 + port*4, | 5689 | * Disable the link interrupt by writing 1 to |
2943 | NIG_STATUS_XGXS0_LINK10G); | 5690 | * the relevant lane in the status register |
2944 | 5691 | */ | |
2945 | } else if (params->switch_cfg == SWITCH_CFG_10G) { | 5692 | u32 ser_lane = |
2946 | /* | 5693 | ((params->lane_config & |
2947 | * Disable the link interrupt by writing 1 to the | ||
2948 | * relevant lane in the status register | ||
2949 | */ | ||
2950 | u32 ser_lane = ((params->lane_config & | ||
2951 | PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >> | 5694 | PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >> |
2952 | PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT); | 5695 | PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT); |
2953 | 5696 | mask = ((1 << ser_lane) << | |
2954 | DP(NETIF_MSG_LINK, "%d speed XGXS phy link up\n", | 5697 | NIG_STATUS_XGXS0_LINK_STATUS_SIZE); |
2955 | vars->line_speed); | 5698 | } else |
2956 | bnx2x_bits_en(bp, | 5699 | mask = NIG_STATUS_SERDES0_LINK_STATUS; |
2957 | NIG_REG_STATUS_INTERRUPT_PORT0 + port*4, | ||
2958 | ((1 << ser_lane) << | ||
2959 | NIG_STATUS_XGXS0_LINK_STATUS_SIZE)); | ||
2960 | |||
2961 | } else { /* SerDes */ | ||
2962 | DP(NETIF_MSG_LINK, "SerDes phy link up\n"); | ||
2963 | /* | ||
2964 | * Disable the link interrupt by writing 1 to the status | ||
2965 | * register | ||
2966 | */ | ||
2967 | bnx2x_bits_en(bp, | ||
2968 | NIG_REG_STATUS_INTERRUPT_PORT0 + port*4, | ||
2969 | NIG_STATUS_SERDES0_LINK_STATUS); | ||
2970 | } | 5700 | } |
2971 | 5701 | DP(NETIF_MSG_LINK, "Ack link up interrupt with mask 0x%x\n", | |
5702 | mask); | ||
5703 | bnx2x_bits_en(bp, | ||
5704 | NIG_REG_STATUS_INTERRUPT_PORT0 + port*4, | ||
5705 | mask); | ||
2972 | } | 5706 | } |
2973 | } | 5707 | } |
2974 | 5708 | ||
2975 | static u8 bnx2x_format_ver(u32 num, u8 *str, u16 *len) | 5709 | static int bnx2x_format_ver(u32 num, u8 *str, u16 *len) |
2976 | { | 5710 | { |
2977 | u8 *str_ptr = str; | 5711 | u8 *str_ptr = str; |
2978 | u32 mask = 0xf0000000; | 5712 | u32 mask = 0xf0000000; |
@@ -3011,19 +5745,19 @@ static u8 bnx2x_format_ver(u32 num, u8 *str, u16 *len) | |||
3011 | } | 5745 | } |
3012 | 5746 | ||
3013 | 5747 | ||
3014 | static u8 bnx2x_null_format_ver(u32 spirom_ver, u8 *str, u16 *len) | 5748 | static int bnx2x_null_format_ver(u32 spirom_ver, u8 *str, u16 *len) |
3015 | { | 5749 | { |
3016 | str[0] = '\0'; | 5750 | str[0] = '\0'; |
3017 | (*len)--; | 5751 | (*len)--; |
3018 | return 0; | 5752 | return 0; |
3019 | } | 5753 | } |
3020 | 5754 | ||
3021 | u8 bnx2x_get_ext_phy_fw_version(struct link_params *params, u8 driver_loaded, | 5755 | int bnx2x_get_ext_phy_fw_version(struct link_params *params, u8 driver_loaded, |
3022 | u8 *version, u16 len) | 5756 | u8 *version, u16 len) |
3023 | { | 5757 | { |
3024 | struct bnx2x *bp; | 5758 | struct bnx2x *bp; |
3025 | u32 spirom_ver = 0; | 5759 | u32 spirom_ver = 0; |
3026 | u8 status = 0; | 5760 | int status = 0; |
3027 | u8 *ver_p = version; | 5761 | u8 *ver_p = version; |
3028 | u16 remain_len = len; | 5762 | u16 remain_len = len; |
3029 | if (version == NULL || params == NULL) | 5763 | if (version == NULL || params == NULL) |
@@ -3065,15 +5799,18 @@ static void bnx2x_set_xgxs_loopback(struct bnx2x_phy *phy, | |||
3065 | struct bnx2x *bp = params->bp; | 5799 | struct bnx2x *bp = params->bp; |
3066 | 5800 | ||
3067 | if (phy->req_line_speed != SPEED_1000) { | 5801 | if (phy->req_line_speed != SPEED_1000) { |
3068 | u32 md_devad; | 5802 | u32 md_devad = 0; |
3069 | 5803 | ||
3070 | DP(NETIF_MSG_LINK, "XGXS 10G loopback enable\n"); | 5804 | DP(NETIF_MSG_LINK, "XGXS 10G loopback enable\n"); |
3071 | 5805 | ||
3072 | /* change the uni_phy_addr in the nig */ | 5806 | if (!CHIP_IS_E3(bp)) { |
3073 | md_devad = REG_RD(bp, (NIG_REG_XGXS0_CTRL_MD_DEVAD + | 5807 | /* change the uni_phy_addr in the nig */ |
3074 | port*0x18)); | 5808 | md_devad = REG_RD(bp, (NIG_REG_XGXS0_CTRL_MD_DEVAD + |
5809 | port*0x18)); | ||
3075 | 5810 | ||
3076 | REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18, 0x5); | 5811 | REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18, |
5812 | 0x5); | ||
5813 | } | ||
3077 | 5814 | ||
3078 | bnx2x_cl45_write(bp, phy, | 5815 | bnx2x_cl45_write(bp, phy, |
3079 | 5, | 5816 | 5, |
@@ -3088,10 +5825,13 @@ static void bnx2x_set_xgxs_loopback(struct bnx2x_phy *phy, | |||
3088 | 0x6041); | 5825 | 0x6041); |
3089 | msleep(200); | 5826 | msleep(200); |
3090 | /* set aer mmd back */ | 5827 | /* set aer mmd back */ |
3091 | bnx2x_set_aer_mmd_xgxs(params, phy); | 5828 | bnx2x_set_aer_mmd(params, phy); |
3092 | 5829 | ||
3093 | /* and md_devad */ | 5830 | if (!CHIP_IS_E3(bp)) { |
3094 | REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18, md_devad); | 5831 | /* and md_devad */ |
5832 | REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18, | ||
5833 | md_devad); | ||
5834 | } | ||
3095 | } else { | 5835 | } else { |
3096 | u16 mii_ctrl; | 5836 | u16 mii_ctrl; |
3097 | DP(NETIF_MSG_LINK, "XGXS 1G loopback enable\n"); | 5837 | DP(NETIF_MSG_LINK, "XGXS 1G loopback enable\n"); |
@@ -3107,12 +5847,13 @@ static void bnx2x_set_xgxs_loopback(struct bnx2x_phy *phy, | |||
3107 | } | 5847 | } |
3108 | } | 5848 | } |
3109 | 5849 | ||
3110 | u8 bnx2x_set_led(struct link_params *params, | 5850 | int bnx2x_set_led(struct link_params *params, |
3111 | struct link_vars *vars, u8 mode, u32 speed) | 5851 | struct link_vars *vars, u8 mode, u32 speed) |
3112 | { | 5852 | { |
3113 | u8 port = params->port; | 5853 | u8 port = params->port; |
3114 | u16 hw_led_mode = params->hw_led_mode; | 5854 | u16 hw_led_mode = params->hw_led_mode; |
3115 | u8 rc = 0, phy_idx; | 5855 | int rc = 0; |
5856 | u8 phy_idx; | ||
3116 | u32 tmp; | 5857 | u32 tmp; |
3117 | u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0; | 5858 | u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0; |
3118 | struct bnx2x *bp = params->bp; | 5859 | struct bnx2x *bp = params->bp; |
@@ -3146,8 +5887,10 @@ u8 bnx2x_set_led(struct link_params *params, | |||
3146 | if (!vars->link_up) | 5887 | if (!vars->link_up) |
3147 | break; | 5888 | break; |
3148 | case LED_MODE_ON: | 5889 | case LED_MODE_ON: |
3149 | if (params->phy[EXT_PHY1].type == | 5890 | if (((params->phy[EXT_PHY1].type == |
3150 | PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727 && | 5891 | PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) || |
5892 | (params->phy[EXT_PHY1].type == | ||
5893 | PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8722)) && | ||
3151 | CHIP_IS_E2(bp) && params->num_phys == 2) { | 5894 | CHIP_IS_E2(bp) && params->num_phys == 2) { |
3152 | /* | 5895 | /* |
3153 | * This is a work-around for E2+8727 Configurations | 5896 | * This is a work-around for E2+8727 Configurations |
@@ -3162,7 +5905,9 @@ u8 bnx2x_set_led(struct link_params *params, | |||
3162 | (tmp | EMAC_LED_OVERRIDE)); | 5905 | (tmp | EMAC_LED_OVERRIDE)); |
3163 | return rc; | 5906 | return rc; |
3164 | } | 5907 | } |
3165 | } else if (SINGLE_MEDIA_DIRECT(params)) { | 5908 | } else if (SINGLE_MEDIA_DIRECT(params) && |
5909 | (CHIP_IS_E1x(bp) || | ||
5910 | CHIP_IS_E2(bp))) { | ||
3166 | /* | 5911 | /* |
3167 | * This is a work-around for HW issue found when link | 5912 | * This is a work-around for HW issue found when link |
3168 | * is up in CL73 | 5913 | * is up in CL73 |
@@ -3214,21 +5959,49 @@ u8 bnx2x_set_led(struct link_params *params, | |||
3214 | * This function comes to reflect the actual link state read DIRECTLY from the | 5959 | * This function comes to reflect the actual link state read DIRECTLY from the |
3215 | * HW | 5960 | * HW |
3216 | */ | 5961 | */ |
3217 | u8 bnx2x_test_link(struct link_params *params, struct link_vars *vars, | 5962 | int bnx2x_test_link(struct link_params *params, struct link_vars *vars, |
3218 | u8 is_serdes) | 5963 | u8 is_serdes) |
3219 | { | 5964 | { |
3220 | struct bnx2x *bp = params->bp; | 5965 | struct bnx2x *bp = params->bp; |
3221 | u16 gp_status = 0, phy_index = 0; | 5966 | u16 gp_status = 0, phy_index = 0; |
3222 | u8 ext_phy_link_up = 0, serdes_phy_type; | 5967 | u8 ext_phy_link_up = 0, serdes_phy_type; |
3223 | struct link_vars temp_vars; | 5968 | struct link_vars temp_vars; |
3224 | 5969 | struct bnx2x_phy *int_phy = ¶ms->phy[INT_PHY]; | |
3225 | CL22_RD_OVER_CL45(bp, ¶ms->phy[INT_PHY], | 5970 | |
5971 | if (CHIP_IS_E3(bp)) { | ||
5972 | u16 link_up; | ||
5973 | if (params->req_line_speed[LINK_CONFIG_IDX(INT_PHY)] | ||
5974 | > SPEED_10000) { | ||
5975 | /* Check 20G link */ | ||
5976 | bnx2x_cl45_read(bp, int_phy, MDIO_WC_DEVAD, | ||
5977 | 1, &link_up); | ||
5978 | bnx2x_cl45_read(bp, int_phy, MDIO_WC_DEVAD, | ||
5979 | 1, &link_up); | ||
5980 | link_up &= (1<<2); | ||
5981 | } else { | ||
5982 | /* Check 10G link and below*/ | ||
5983 | u8 lane = bnx2x_get_warpcore_lane(int_phy, params); | ||
5984 | bnx2x_cl45_read(bp, int_phy, MDIO_WC_DEVAD, | ||
5985 | MDIO_WC_REG_GP2_STATUS_GP_2_1, | ||
5986 | &gp_status); | ||
5987 | gp_status = ((gp_status >> 8) & 0xf) | | ||
5988 | ((gp_status >> 12) & 0xf); | ||
5989 | link_up = gp_status & (1 << lane); | ||
5990 | } | ||
5991 | if (!link_up) | ||
5992 | return -ESRCH; | ||
5993 | } else { | ||
5994 | CL22_RD_OVER_CL45(bp, int_phy, | ||
3226 | MDIO_REG_BANK_GP_STATUS, | 5995 | MDIO_REG_BANK_GP_STATUS, |
3227 | MDIO_GP_STATUS_TOP_AN_STATUS1, | 5996 | MDIO_GP_STATUS_TOP_AN_STATUS1, |
3228 | &gp_status); | 5997 | &gp_status); |
3229 | /* link is up only if both local phy and external phy are up */ | 5998 | /* link is up only if both local phy and external phy are up */ |
3230 | if (!(gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS)) | 5999 | if (!(gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS)) |
3231 | return -ESRCH; | 6000 | return -ESRCH; |
6001 | } | ||
6002 | /* In XGXS loopback mode, do not check external PHY */ | ||
6003 | if (params->loopback_mode == LOOPBACK_XGXS) | ||
6004 | return 0; | ||
3232 | 6005 | ||
3233 | switch (params->num_phys) { | 6006 | switch (params->num_phys) { |
3234 | case 1: | 6007 | case 1: |
@@ -3245,7 +6018,9 @@ u8 bnx2x_test_link(struct link_params *params, struct link_vars *vars, | |||
3245 | serdes_phy_type = ((params->phy[phy_index].media_type == | 6018 | serdes_phy_type = ((params->phy[phy_index].media_type == |
3246 | ETH_PHY_SFP_FIBER) || | 6019 | ETH_PHY_SFP_FIBER) || |
3247 | (params->phy[phy_index].media_type == | 6020 | (params->phy[phy_index].media_type == |
3248 | ETH_PHY_XFP_FIBER)); | 6021 | ETH_PHY_XFP_FIBER) || |
6022 | (params->phy[phy_index].media_type == | ||
6023 | ETH_PHY_DA_TWINAX)); | ||
3249 | 6024 | ||
3250 | if (is_serdes != serdes_phy_type) | 6025 | if (is_serdes != serdes_phy_type) |
3251 | continue; | 6026 | continue; |
@@ -3263,10 +6038,10 @@ u8 bnx2x_test_link(struct link_params *params, struct link_vars *vars, | |||
3263 | return -ESRCH; | 6038 | return -ESRCH; |
3264 | } | 6039 | } |
3265 | 6040 | ||
3266 | static u8 bnx2x_link_initialize(struct link_params *params, | 6041 | static int bnx2x_link_initialize(struct link_params *params, |
3267 | struct link_vars *vars) | 6042 | struct link_vars *vars) |
3268 | { | 6043 | { |
3269 | u8 rc = 0; | 6044 | int rc = 0; |
3270 | u8 phy_index, non_ext_phy; | 6045 | u8 phy_index, non_ext_phy; |
3271 | struct bnx2x *bp = params->bp; | 6046 | struct bnx2x *bp = params->bp; |
3272 | /* | 6047 | /* |
@@ -3282,12 +6057,8 @@ static u8 bnx2x_link_initialize(struct link_params *params, | |||
3282 | * (no external phys), or this board has external phy which requires | 6057 | * (no external phys), or this board has external phy which requires |
3283 | * to first. | 6058 | * to first. |
3284 | */ | 6059 | */ |
3285 | 6060 | if (!USES_WARPCORE(bp)) | |
3286 | if (params->phy[INT_PHY].config_init) | 6061 | bnx2x_prepare_xgxs(¶ms->phy[INT_PHY], params, vars); |
3287 | params->phy[INT_PHY].config_init( | ||
3288 | ¶ms->phy[INT_PHY], | ||
3289 | params, vars); | ||
3290 | |||
3291 | /* init ext phy and enable link state int */ | 6062 | /* init ext phy and enable link state int */ |
3292 | non_ext_phy = (SINGLE_MEDIA_DIRECT(params) || | 6063 | non_ext_phy = (SINGLE_MEDIA_DIRECT(params) || |
3293 | (params->loopback_mode == LOOPBACK_XGXS)); | 6064 | (params->loopback_mode == LOOPBACK_XGXS)); |
@@ -3296,13 +6067,22 @@ static u8 bnx2x_link_initialize(struct link_params *params, | |||
3296 | (params->phy[EXT_PHY1].flags & FLAGS_INIT_XGXS_FIRST) || | 6067 | (params->phy[EXT_PHY1].flags & FLAGS_INIT_XGXS_FIRST) || |
3297 | (params->loopback_mode == LOOPBACK_EXT_PHY)) { | 6068 | (params->loopback_mode == LOOPBACK_EXT_PHY)) { |
3298 | struct bnx2x_phy *phy = ¶ms->phy[INT_PHY]; | 6069 | struct bnx2x_phy *phy = ¶ms->phy[INT_PHY]; |
3299 | if (vars->line_speed == SPEED_AUTO_NEG) | 6070 | if (vars->line_speed == SPEED_AUTO_NEG && |
6071 | (CHIP_IS_E1x(bp) || | ||
6072 | CHIP_IS_E2(bp))) | ||
3300 | bnx2x_set_parallel_detection(phy, params); | 6073 | bnx2x_set_parallel_detection(phy, params); |
3301 | bnx2x_init_internal_phy(phy, params, vars); | 6074 | if (params->phy[INT_PHY].config_init) |
6075 | params->phy[INT_PHY].config_init(phy, | ||
6076 | params, | ||
6077 | vars); | ||
3302 | } | 6078 | } |
3303 | 6079 | ||
3304 | /* Init external phy*/ | 6080 | /* Init external phy*/ |
3305 | if (!non_ext_phy) | 6081 | if (non_ext_phy) { |
6082 | if (params->phy[INT_PHY].supported & | ||
6083 | SUPPORTED_FIBRE) | ||
6084 | vars->link_status |= LINK_STATUS_SERDES_LINK; | ||
6085 | } else { | ||
3306 | for (phy_index = EXT_PHY1; phy_index < params->num_phys; | 6086 | for (phy_index = EXT_PHY1; phy_index < params->num_phys; |
3307 | phy_index++) { | 6087 | phy_index++) { |
3308 | /* | 6088 | /* |
@@ -3311,17 +6091,22 @@ static u8 bnx2x_link_initialize(struct link_params *params, | |||
3311 | * need to initialize the first phy, since they are | 6091 | * need to initialize the first phy, since they are |
3312 | * connected. | 6092 | * connected. |
3313 | */ | 6093 | */ |
6094 | if (params->phy[phy_index].supported & | ||
6095 | SUPPORTED_FIBRE) | ||
6096 | vars->link_status |= LINK_STATUS_SERDES_LINK; | ||
6097 | |||
3314 | if (phy_index == EXT_PHY2 && | 6098 | if (phy_index == EXT_PHY2 && |
3315 | (bnx2x_phy_selection(params) == | 6099 | (bnx2x_phy_selection(params) == |
3316 | PORT_HW_CFG_PHY_SELECTION_FIRST_PHY)) { | 6100 | PORT_HW_CFG_PHY_SELECTION_FIRST_PHY)) { |
3317 | DP(NETIF_MSG_LINK, "Ignoring second phy\n"); | 6101 | DP(NETIF_MSG_LINK, "Not initializing" |
6102 | " second phy\n"); | ||
3318 | continue; | 6103 | continue; |
3319 | } | 6104 | } |
3320 | params->phy[phy_index].config_init( | 6105 | params->phy[phy_index].config_init( |
3321 | ¶ms->phy[phy_index], | 6106 | ¶ms->phy[phy_index], |
3322 | params, vars); | 6107 | params, vars); |
3323 | } | 6108 | } |
3324 | 6109 | } | |
3325 | /* Reset the interrupt indication after phy was initialized */ | 6110 | /* Reset the interrupt indication after phy was initialized */ |
3326 | bnx2x_bits_dis(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + | 6111 | bnx2x_bits_dis(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + |
3327 | params->port*4, | 6112 | params->port*4, |
@@ -3329,6 +6114,7 @@ static u8 bnx2x_link_initialize(struct link_params *params, | |||
3329 | NIG_STATUS_XGXS0_LINK_STATUS | | 6114 | NIG_STATUS_XGXS0_LINK_STATUS | |
3330 | NIG_STATUS_SERDES0_LINK_STATUS | | 6115 | NIG_STATUS_SERDES0_LINK_STATUS | |
3331 | NIG_MASK_MI_INT)); | 6116 | NIG_MASK_MI_INT)); |
6117 | bnx2x_update_mng(params, vars->link_status); | ||
3332 | return rc; | 6118 | return rc; |
3333 | } | 6119 | } |
3334 | 6120 | ||
@@ -3359,20 +6145,25 @@ static void bnx2x_common_ext_link_reset(struct bnx2x_phy *phy, | |||
3359 | DP(NETIF_MSG_LINK, "reset external PHY\n"); | 6145 | DP(NETIF_MSG_LINK, "reset external PHY\n"); |
3360 | } | 6146 | } |
3361 | 6147 | ||
3362 | static u8 bnx2x_update_link_down(struct link_params *params, | 6148 | static int bnx2x_update_link_down(struct link_params *params, |
3363 | struct link_vars *vars) | 6149 | struct link_vars *vars) |
3364 | { | 6150 | { |
3365 | struct bnx2x *bp = params->bp; | 6151 | struct bnx2x *bp = params->bp; |
3366 | u8 port = params->port; | 6152 | u8 port = params->port; |
3367 | 6153 | ||
3368 | DP(NETIF_MSG_LINK, "Port %x: Link is down\n", port); | 6154 | DP(NETIF_MSG_LINK, "Port %x: Link is down\n", port); |
3369 | bnx2x_set_led(params, vars, LED_MODE_OFF, 0); | 6155 | bnx2x_set_led(params, vars, LED_MODE_OFF, 0); |
3370 | 6156 | vars->phy_flags &= ~PHY_PHYSICAL_LINK_FLAG; | |
3371 | /* indicate no mac active */ | 6157 | /* indicate no mac active */ |
3372 | vars->mac_type = MAC_TYPE_NONE; | 6158 | vars->mac_type = MAC_TYPE_NONE; |
3373 | 6159 | ||
3374 | /* update shared memory */ | 6160 | /* update shared memory */ |
3375 | vars->link_status = 0; | 6161 | vars->link_status &= ~(LINK_STATUS_SPEED_AND_DUPLEX_MASK | |
6162 | LINK_STATUS_LINK_UP | | ||
6163 | LINK_STATUS_AUTO_NEGOTIATE_COMPLETE | | ||
6164 | LINK_STATUS_RX_FLOW_CONTROL_FLAG_MASK | | ||
6165 | LINK_STATUS_TX_FLOW_CONTROL_FLAG_MASK | | ||
6166 | LINK_STATUS_PARALLEL_DETECTION_FLAG_MASK); | ||
3376 | vars->line_speed = 0; | 6167 | vars->line_speed = 0; |
3377 | bnx2x_update_mng(params, vars->link_status); | 6168 | bnx2x_update_mng(params, vars->link_status); |
3378 | 6169 | ||
@@ -3380,26 +6171,34 @@ static u8 bnx2x_update_link_down(struct link_params *params, | |||
3380 | REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1); | 6171 | REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1); |
3381 | 6172 | ||
3382 | /* disable emac */ | 6173 | /* disable emac */ |
3383 | REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0); | 6174 | if (!CHIP_IS_E3(bp)) |
6175 | REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0); | ||
3384 | 6176 | ||
3385 | msleep(10); | 6177 | msleep(10); |
3386 | 6178 | /* reset BigMac/Xmac */ | |
3387 | /* reset BigMac */ | 6179 | if (CHIP_IS_E1x(bp) || |
3388 | bnx2x_bmac_rx_disable(bp, params->port); | 6180 | CHIP_IS_E2(bp)) { |
3389 | REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, | 6181 | bnx2x_bmac_rx_disable(bp, params->port); |
6182 | REG_WR(bp, GRCBASE_MISC + | ||
6183 | MISC_REGISTERS_RESET_REG_2_CLEAR, | ||
3390 | (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port)); | 6184 | (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port)); |
6185 | } | ||
6186 | if (CHIP_IS_E3(bp)) | ||
6187 | bnx2x_xmac_disable(params); | ||
6188 | |||
3391 | return 0; | 6189 | return 0; |
3392 | } | 6190 | } |
3393 | 6191 | ||
3394 | static u8 bnx2x_update_link_up(struct link_params *params, | 6192 | static int bnx2x_update_link_up(struct link_params *params, |
3395 | struct link_vars *vars, | 6193 | struct link_vars *vars, |
3396 | u8 link_10g) | 6194 | u8 link_10g) |
3397 | { | 6195 | { |
3398 | struct bnx2x *bp = params->bp; | 6196 | struct bnx2x *bp = params->bp; |
3399 | u8 port = params->port; | 6197 | u8 port = params->port; |
3400 | u8 rc = 0; | 6198 | int rc = 0; |
3401 | 6199 | ||
3402 | vars->link_status |= LINK_STATUS_LINK_UP; | 6200 | vars->link_status |= LINK_STATUS_LINK_UP; |
6201 | vars->phy_flags |= PHY_PHYSICAL_LINK_FLAG; | ||
3403 | 6202 | ||
3404 | if (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX) | 6203 | if (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX) |
3405 | vars->link_status |= | 6204 | vars->link_status |= |
@@ -3408,25 +6207,48 @@ static u8 bnx2x_update_link_up(struct link_params *params, | |||
3408 | if (vars->flow_ctrl & BNX2X_FLOW_CTRL_RX) | 6207 | if (vars->flow_ctrl & BNX2X_FLOW_CTRL_RX) |
3409 | vars->link_status |= | 6208 | vars->link_status |= |
3410 | LINK_STATUS_RX_FLOW_CONTROL_ENABLED; | 6209 | LINK_STATUS_RX_FLOW_CONTROL_ENABLED; |
3411 | 6210 | if (USES_WARPCORE(bp)) { | |
3412 | if (link_10g) { | 6211 | if (link_10g) { |
3413 | bnx2x_bmac_enable(params, vars, 0); | 6212 | if (bnx2x_xmac_enable(params, vars, 0) == |
6213 | -ESRCH) { | ||
6214 | DP(NETIF_MSG_LINK, "Found errors on XMAC\n"); | ||
6215 | vars->link_up = 0; | ||
6216 | vars->phy_flags |= PHY_HALF_OPEN_CONN_FLAG; | ||
6217 | vars->link_status &= ~LINK_STATUS_LINK_UP; | ||
6218 | } | ||
6219 | } else | ||
6220 | bnx2x_umac_enable(params, vars, 0); | ||
3414 | bnx2x_set_led(params, vars, | 6221 | bnx2x_set_led(params, vars, |
3415 | LED_MODE_OPER, SPEED_10000); | 6222 | LED_MODE_OPER, vars->line_speed); |
3416 | } else { | 6223 | } |
3417 | rc = bnx2x_emac_program(params, vars); | 6224 | if ((CHIP_IS_E1x(bp) || |
6225 | CHIP_IS_E2(bp))) { | ||
6226 | if (link_10g) { | ||
6227 | if (bnx2x_bmac_enable(params, vars, 0) == | ||
6228 | -ESRCH) { | ||
6229 | DP(NETIF_MSG_LINK, "Found errors on BMAC\n"); | ||
6230 | vars->link_up = 0; | ||
6231 | vars->phy_flags |= PHY_HALF_OPEN_CONN_FLAG; | ||
6232 | vars->link_status &= ~LINK_STATUS_LINK_UP; | ||
6233 | } | ||
3418 | 6234 | ||
3419 | bnx2x_emac_enable(params, vars, 0); | 6235 | bnx2x_set_led(params, vars, |
6236 | LED_MODE_OPER, SPEED_10000); | ||
6237 | } else { | ||
6238 | rc = bnx2x_emac_program(params, vars); | ||
6239 | bnx2x_emac_enable(params, vars, 0); | ||
3420 | 6240 | ||
3421 | /* AN complete? */ | 6241 | /* AN complete? */ |
3422 | if ((vars->link_status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE) | 6242 | if ((vars->link_status & |
3423 | && (!(vars->phy_flags & PHY_SGMII_FLAG)) && | 6243 | LINK_STATUS_AUTO_NEGOTIATE_COMPLETE) |
3424 | SINGLE_MEDIA_DIRECT(params)) | 6244 | && (!(vars->phy_flags & PHY_SGMII_FLAG)) && |
3425 | bnx2x_set_gmii_tx_driver(params); | 6245 | SINGLE_MEDIA_DIRECT(params)) |
6246 | bnx2x_set_gmii_tx_driver(params); | ||
6247 | } | ||
3426 | } | 6248 | } |
3427 | 6249 | ||
3428 | /* PBF - link up */ | 6250 | /* PBF - link up */ |
3429 | if (!(CHIP_IS_E2(bp))) | 6251 | if (CHIP_IS_E1x(bp)) |
3430 | rc |= bnx2x_pbf_update(params, vars->flow_ctrl, | 6252 | rc |= bnx2x_pbf_update(params, vars->flow_ctrl, |
3431 | vars->line_speed); | 6253 | vars->line_speed); |
3432 | 6254 | ||
@@ -3451,17 +6273,18 @@ static u8 bnx2x_update_link_up(struct link_params *params, | |||
3451 | * external phy needs to be up, and at least one of the 2 | 6273 | * external phy needs to be up, and at least one of the 2 |
3452 | * external phy link must be up. | 6274 | * external phy link must be up. |
3453 | */ | 6275 | */ |
3454 | u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars) | 6276 | int bnx2x_link_update(struct link_params *params, struct link_vars *vars) |
3455 | { | 6277 | { |
3456 | struct bnx2x *bp = params->bp; | 6278 | struct bnx2x *bp = params->bp; |
3457 | struct link_vars phy_vars[MAX_PHYS]; | 6279 | struct link_vars phy_vars[MAX_PHYS]; |
3458 | u8 port = params->port; | 6280 | u8 port = params->port; |
3459 | u8 link_10g, phy_index; | 6281 | u8 link_10g_plus, phy_index; |
3460 | u8 ext_phy_link_up = 0, cur_link_up, rc = 0; | 6282 | u8 ext_phy_link_up = 0, cur_link_up; |
6283 | int rc = 0; | ||
3461 | u8 is_mi_int = 0; | 6284 | u8 is_mi_int = 0; |
3462 | u16 ext_phy_line_speed = 0, prev_line_speed = vars->line_speed; | 6285 | u16 ext_phy_line_speed = 0, prev_line_speed = vars->line_speed; |
3463 | u8 active_external_phy = INT_PHY; | 6286 | u8 active_external_phy = INT_PHY; |
3464 | vars->link_status = 0; | 6287 | vars->phy_flags &= ~PHY_HALF_OPEN_CONN_FLAG; |
3465 | for (phy_index = INT_PHY; phy_index < params->num_phys; | 6288 | for (phy_index = INT_PHY; phy_index < params->num_phys; |
3466 | phy_index++) { | 6289 | phy_index++) { |
3467 | phy_vars[phy_index].flow_ctrl = 0; | 6290 | phy_vars[phy_index].flow_ctrl = 0; |
@@ -3470,8 +6293,12 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars) | |||
3470 | phy_vars[phy_index].duplex = DUPLEX_FULL; | 6293 | phy_vars[phy_index].duplex = DUPLEX_FULL; |
3471 | phy_vars[phy_index].phy_link_up = 0; | 6294 | phy_vars[phy_index].phy_link_up = 0; |
3472 | phy_vars[phy_index].link_up = 0; | 6295 | phy_vars[phy_index].link_up = 0; |
6296 | phy_vars[phy_index].fault_detected = 0; | ||
3473 | } | 6297 | } |
3474 | 6298 | ||
6299 | if (USES_WARPCORE(bp)) | ||
6300 | bnx2x_set_aer_mmd(params, ¶ms->phy[INT_PHY]); | ||
6301 | |||
3475 | DP(NETIF_MSG_LINK, "port %x, XGXS?%x, int_status 0x%x\n", | 6302 | DP(NETIF_MSG_LINK, "port %x, XGXS?%x, int_status 0x%x\n", |
3476 | port, (vars->phy_flags & PHY_XGXS_FLAG), | 6303 | port, (vars->phy_flags & PHY_XGXS_FLAG), |
3477 | REG_RD(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4)); | 6304 | REG_RD(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4)); |
@@ -3488,13 +6315,14 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars) | |||
3488 | REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK_STATUS + port*0x68)); | 6315 | REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK_STATUS + port*0x68)); |
3489 | 6316 | ||
3490 | /* disable emac */ | 6317 | /* disable emac */ |
3491 | REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0); | 6318 | if (!CHIP_IS_E3(bp)) |
6319 | REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0); | ||
3492 | 6320 | ||
3493 | /* | 6321 | /* |
3494 | * Step 1: | 6322 | * Step 1: |
3495 | * Check external link change only for external phys, and apply | 6323 | * Check external link change only for external phys, and apply |
3496 | * priority selection between them in case the link on both phys | 6324 | * priority selection between them in case the link on both phys |
3497 | * is up. Note that the instead of the common vars, a temporary | 6325 | * is up. Note that instead of the common vars, a temporary |
3498 | * vars argument is used since each phy may have different link/ | 6326 | * vars argument is used since each phy may have different link/ |
3499 | * speed/duplex result | 6327 | * speed/duplex result |
3500 | */ | 6328 | */ |
@@ -3601,6 +6429,8 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars) | |||
3601 | if (params->phy[active_external_phy].supported & | 6429 | if (params->phy[active_external_phy].supported & |
3602 | SUPPORTED_FIBRE) | 6430 | SUPPORTED_FIBRE) |
3603 | vars->link_status |= LINK_STATUS_SERDES_LINK; | 6431 | vars->link_status |= LINK_STATUS_SERDES_LINK; |
6432 | else | ||
6433 | vars->link_status &= ~LINK_STATUS_SERDES_LINK; | ||
3604 | DP(NETIF_MSG_LINK, "Active external phy selected: %x\n", | 6434 | DP(NETIF_MSG_LINK, "Active external phy selected: %x\n", |
3605 | active_external_phy); | 6435 | active_external_phy); |
3606 | } | 6436 | } |
@@ -3640,14 +6470,9 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars) | |||
3640 | } | 6470 | } |
3641 | 6471 | ||
3642 | /* anything 10 and over uses the bmac */ | 6472 | /* anything 10 and over uses the bmac */ |
3643 | link_10g = ((vars->line_speed == SPEED_10000) || | 6473 | link_10g_plus = (vars->line_speed >= SPEED_10000); |
3644 | (vars->line_speed == SPEED_12000) || | ||
3645 | (vars->line_speed == SPEED_12500) || | ||
3646 | (vars->line_speed == SPEED_13000) || | ||
3647 | (vars->line_speed == SPEED_15000) || | ||
3648 | (vars->line_speed == SPEED_16000)); | ||
3649 | 6474 | ||
3650 | bnx2x_link_int_ack(params, vars, link_10g); | 6475 | bnx2x_link_int_ack(params, vars, link_10g_plus); |
3651 | 6476 | ||
3652 | /* | 6477 | /* |
3653 | * In case external phy link is up, and internal link is down | 6478 | * In case external phy link is up, and internal link is down |
@@ -3671,21 +6496,24 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars) | |||
3671 | vars->phy_flags |= PHY_SGMII_FLAG; | 6496 | vars->phy_flags |= PHY_SGMII_FLAG; |
3672 | else | 6497 | else |
3673 | vars->phy_flags &= ~PHY_SGMII_FLAG; | 6498 | vars->phy_flags &= ~PHY_SGMII_FLAG; |
3674 | bnx2x_init_internal_phy(¶ms->phy[INT_PHY], | 6499 | |
3675 | params, | 6500 | if (params->phy[INT_PHY].config_init) |
6501 | params->phy[INT_PHY].config_init( | ||
6502 | ¶ms->phy[INT_PHY], params, | ||
3676 | vars); | 6503 | vars); |
3677 | } | 6504 | } |
3678 | } | 6505 | } |
3679 | /* | 6506 | /* |
3680 | * Link is up only if both local phy and external phy (in case of | 6507 | * Link is up only if both local phy and external phy (in case of |
3681 | * non-direct board) are up | 6508 | * non-direct board) are up and no fault detected on active PHY. |
3682 | */ | 6509 | */ |
3683 | vars->link_up = (vars->phy_link_up && | 6510 | vars->link_up = (vars->phy_link_up && |
3684 | (ext_phy_link_up || | 6511 | (ext_phy_link_up || |
3685 | SINGLE_MEDIA_DIRECT(params))); | 6512 | SINGLE_MEDIA_DIRECT(params)) && |
6513 | (phy_vars[active_external_phy].fault_detected == 0)); | ||
3686 | 6514 | ||
3687 | if (vars->link_up) | 6515 | if (vars->link_up) |
3688 | rc = bnx2x_update_link_up(params, vars, link_10g); | 6516 | rc = bnx2x_update_link_up(params, vars, link_10g_plus); |
3689 | else | 6517 | else |
3690 | rc = bnx2x_update_link_down(params, vars); | 6518 | rc = bnx2x_update_link_down(params, vars); |
3691 | 6519 | ||
@@ -3729,69 +6557,6 @@ static void bnx2x_save_bcm_spirom_ver(struct bnx2x *bp, | |||
3729 | phy->ver_addr); | 6557 | phy->ver_addr); |
3730 | } | 6558 | } |
3731 | 6559 | ||
3732 | static void bnx2x_ext_phy_set_pause(struct link_params *params, | ||
3733 | struct bnx2x_phy *phy, | ||
3734 | struct link_vars *vars) | ||
3735 | { | ||
3736 | u16 val; | ||
3737 | struct bnx2x *bp = params->bp; | ||
3738 | /* read modify write pause advertizing */ | ||
3739 | bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_ADV_PAUSE, &val); | ||
3740 | |||
3741 | val &= ~MDIO_AN_REG_ADV_PAUSE_BOTH; | ||
3742 | |||
3743 | /* Please refer to Table 28B-3 of 802.3ab-1999 spec. */ | ||
3744 | bnx2x_calc_ieee_aneg_adv(phy, params, &vars->ieee_fc); | ||
3745 | if ((vars->ieee_fc & | ||
3746 | MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) == | ||
3747 | MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) { | ||
3748 | val |= MDIO_AN_REG_ADV_PAUSE_ASYMMETRIC; | ||
3749 | } | ||
3750 | if ((vars->ieee_fc & | ||
3751 | MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) == | ||
3752 | MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) { | ||
3753 | val |= MDIO_AN_REG_ADV_PAUSE_PAUSE; | ||
3754 | } | ||
3755 | DP(NETIF_MSG_LINK, "Ext phy AN advertize 0x%x\n", val); | ||
3756 | bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_ADV_PAUSE, val); | ||
3757 | } | ||
3758 | |||
3759 | static u8 bnx2x_ext_phy_resolve_fc(struct bnx2x_phy *phy, | ||
3760 | struct link_params *params, | ||
3761 | struct link_vars *vars) | ||
3762 | { | ||
3763 | struct bnx2x *bp = params->bp; | ||
3764 | u16 ld_pause; /* local */ | ||
3765 | u16 lp_pause; /* link partner */ | ||
3766 | u16 pause_result; | ||
3767 | u8 ret = 0; | ||
3768 | /* read twice */ | ||
3769 | |||
3770 | vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE; | ||
3771 | |||
3772 | if (phy->req_flow_ctrl != BNX2X_FLOW_CTRL_AUTO) | ||
3773 | vars->flow_ctrl = phy->req_flow_ctrl; | ||
3774 | else if (phy->req_line_speed != SPEED_AUTO_NEG) | ||
3775 | vars->flow_ctrl = params->req_fc_auto_adv; | ||
3776 | else if (vars->link_status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE) { | ||
3777 | ret = 1; | ||
3778 | bnx2x_cl45_read(bp, phy, | ||
3779 | MDIO_AN_DEVAD, | ||
3780 | MDIO_AN_REG_ADV_PAUSE, &ld_pause); | ||
3781 | bnx2x_cl45_read(bp, phy, | ||
3782 | MDIO_AN_DEVAD, | ||
3783 | MDIO_AN_REG_LP_AUTO_NEG, &lp_pause); | ||
3784 | pause_result = (ld_pause & | ||
3785 | MDIO_AN_REG_ADV_PAUSE_MASK) >> 8; | ||
3786 | pause_result |= (lp_pause & | ||
3787 | MDIO_AN_REG_ADV_PAUSE_MASK) >> 10; | ||
3788 | DP(NETIF_MSG_LINK, "Ext PHY pause result 0x%x\n", | ||
3789 | pause_result); | ||
3790 | bnx2x_pause_resolve(vars, pause_result); | ||
3791 | } | ||
3792 | return ret; | ||
3793 | } | ||
3794 | |||
3795 | static void bnx2x_ext_phy_10G_an_resolve(struct bnx2x *bp, | 6560 | static void bnx2x_ext_phy_10G_an_resolve(struct bnx2x *bp, |
3796 | struct bnx2x_phy *phy, | 6561 | struct bnx2x_phy *phy, |
3797 | struct link_vars *vars) | 6562 | struct link_vars *vars) |
@@ -3845,13 +6610,13 @@ static void bnx2x_8073_resolve_fc(struct bnx2x_phy *phy, | |||
3845 | pause_result); | 6610 | pause_result); |
3846 | } | 6611 | } |
3847 | } | 6612 | } |
3848 | static u8 bnx2x_8073_8727_external_rom_boot(struct bnx2x *bp, | 6613 | static int bnx2x_8073_8727_external_rom_boot(struct bnx2x *bp, |
3849 | struct bnx2x_phy *phy, | 6614 | struct bnx2x_phy *phy, |
3850 | u8 port) | 6615 | u8 port) |
3851 | { | 6616 | { |
3852 | u32 count = 0; | 6617 | u32 count = 0; |
3853 | u16 fw_ver1, fw_msgout; | 6618 | u16 fw_ver1, fw_msgout; |
3854 | u8 rc = 0; | 6619 | int rc = 0; |
3855 | 6620 | ||
3856 | /* Boot port from external ROM */ | 6621 | /* Boot port from external ROM */ |
3857 | /* EDC grst */ | 6622 | /* EDC grst */ |
@@ -3926,7 +6691,7 @@ static u8 bnx2x_8073_8727_external_rom_boot(struct bnx2x *bp, | |||
3926 | /******************************************************************/ | 6691 | /******************************************************************/ |
3927 | /* BCM8073 PHY SECTION */ | 6692 | /* BCM8073 PHY SECTION */ |
3928 | /******************************************************************/ | 6693 | /******************************************************************/ |
3929 | static u8 bnx2x_8073_is_snr_needed(struct bnx2x *bp, struct bnx2x_phy *phy) | 6694 | static int bnx2x_8073_is_snr_needed(struct bnx2x *bp, struct bnx2x_phy *phy) |
3930 | { | 6695 | { |
3931 | /* This is only required for 8073A1, version 102 only */ | 6696 | /* This is only required for 8073A1, version 102 only */ |
3932 | u16 val; | 6697 | u16 val; |
@@ -3952,7 +6717,7 @@ static u8 bnx2x_8073_is_snr_needed(struct bnx2x *bp, struct bnx2x_phy *phy) | |||
3952 | return 1; | 6717 | return 1; |
3953 | } | 6718 | } |
3954 | 6719 | ||
3955 | static u8 bnx2x_8073_xaui_wa(struct bnx2x *bp, struct bnx2x_phy *phy) | 6720 | static int bnx2x_8073_xaui_wa(struct bnx2x *bp, struct bnx2x_phy *phy) |
3956 | { | 6721 | { |
3957 | u16 val, cnt, cnt1 ; | 6722 | u16 val, cnt, cnt1 ; |
3958 | 6723 | ||
@@ -4059,9 +6824,9 @@ static void bnx2x_8073_set_pause_cl37(struct link_params *params, | |||
4059 | msleep(500); | 6824 | msleep(500); |
4060 | } | 6825 | } |
4061 | 6826 | ||
4062 | static u8 bnx2x_8073_config_init(struct bnx2x_phy *phy, | 6827 | static int bnx2x_8073_config_init(struct bnx2x_phy *phy, |
4063 | struct link_params *params, | 6828 | struct link_params *params, |
4064 | struct link_vars *vars) | 6829 | struct link_vars *vars) |
4065 | { | 6830 | { |
4066 | struct bnx2x *bp = params->bp; | 6831 | struct bnx2x *bp = params->bp; |
4067 | u16 val = 0, tmp1; | 6832 | u16 val = 0, tmp1; |
@@ -4081,9 +6846,9 @@ static u8 bnx2x_8073_config_init(struct bnx2x_phy *phy, | |||
4081 | 6846 | ||
4082 | /* enable LASI */ | 6847 | /* enable LASI */ |
4083 | bnx2x_cl45_write(bp, phy, | 6848 | bnx2x_cl45_write(bp, phy, |
4084 | MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_ALARM_CTRL, (1<<2)); | 6849 | MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL, (1<<2)); |
4085 | bnx2x_cl45_write(bp, phy, | 6850 | bnx2x_cl45_write(bp, phy, |
4086 | MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_CTRL, 0x0004); | 6851 | MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 0x0004); |
4087 | 6852 | ||
4088 | bnx2x_8073_set_pause_cl37(params, phy, vars); | 6853 | bnx2x_8073_set_pause_cl37(params, phy, vars); |
4089 | 6854 | ||
@@ -4091,7 +6856,7 @@ static u8 bnx2x_8073_config_init(struct bnx2x_phy *phy, | |||
4091 | MDIO_PMA_DEVAD, MDIO_PMA_REG_M8051_MSGOUT_REG, &tmp1); | 6856 | MDIO_PMA_DEVAD, MDIO_PMA_REG_M8051_MSGOUT_REG, &tmp1); |
4092 | 6857 | ||
4093 | bnx2x_cl45_read(bp, phy, | 6858 | bnx2x_cl45_read(bp, phy, |
4094 | MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_ALARM, &tmp1); | 6859 | MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXSTAT, &tmp1); |
4095 | 6860 | ||
4096 | DP(NETIF_MSG_LINK, "Before rom RX_ALARM(port1): 0x%x\n", tmp1); | 6861 | DP(NETIF_MSG_LINK, "Before rom RX_ALARM(port1): 0x%x\n", tmp1); |
4097 | 6862 | ||
@@ -4225,7 +6990,7 @@ static u8 bnx2x_8073_read_status(struct bnx2x_phy *phy, | |||
4225 | u16 an1000_status = 0; | 6990 | u16 an1000_status = 0; |
4226 | 6991 | ||
4227 | bnx2x_cl45_read(bp, phy, | 6992 | bnx2x_cl45_read(bp, phy, |
4228 | MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_STATUS, &val1); | 6993 | MDIO_PMA_DEVAD, MDIO_PMA_LASI_STAT, &val1); |
4229 | 6994 | ||
4230 | DP(NETIF_MSG_LINK, "8703 LASI status 0x%x\n", val1); | 6995 | DP(NETIF_MSG_LINK, "8703 LASI status 0x%x\n", val1); |
4231 | 6996 | ||
@@ -4241,7 +7006,7 @@ static u8 bnx2x_8073_read_status(struct bnx2x_phy *phy, | |||
4241 | 7006 | ||
4242 | /* Check the LASI */ | 7007 | /* Check the LASI */ |
4243 | bnx2x_cl45_read(bp, phy, | 7008 | bnx2x_cl45_read(bp, phy, |
4244 | MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_ALARM, &val2); | 7009 | MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXSTAT, &val2); |
4245 | 7010 | ||
4246 | DP(NETIF_MSG_LINK, "KR 0x9003 0x%x\n", val2); | 7011 | DP(NETIF_MSG_LINK, "KR 0x9003 0x%x\n", val2); |
4247 | 7012 | ||
@@ -4367,9 +7132,9 @@ static void bnx2x_8073_link_reset(struct bnx2x_phy *phy, | |||
4367 | /******************************************************************/ | 7132 | /******************************************************************/ |
4368 | /* BCM8705 PHY SECTION */ | 7133 | /* BCM8705 PHY SECTION */ |
4369 | /******************************************************************/ | 7134 | /******************************************************************/ |
4370 | static u8 bnx2x_8705_config_init(struct bnx2x_phy *phy, | 7135 | static int bnx2x_8705_config_init(struct bnx2x_phy *phy, |
4371 | struct link_params *params, | 7136 | struct link_params *params, |
4372 | struct link_vars *vars) | 7137 | struct link_vars *vars) |
4373 | { | 7138 | { |
4374 | struct bnx2x *bp = params->bp; | 7139 | struct bnx2x *bp = params->bp; |
4375 | DP(NETIF_MSG_LINK, "init 8705\n"); | 7140 | DP(NETIF_MSG_LINK, "init 8705\n"); |
@@ -4430,6 +7195,30 @@ static u8 bnx2x_8705_read_status(struct bnx2x_phy *phy, | |||
4430 | /******************************************************************/ | 7195 | /******************************************************************/ |
4431 | /* SFP+ module Section */ | 7196 | /* SFP+ module Section */ |
4432 | /******************************************************************/ | 7197 | /******************************************************************/ |
7198 | static void bnx2x_set_disable_pmd_transmit(struct link_params *params, | ||
7199 | struct bnx2x_phy *phy, | ||
7200 | u8 pmd_dis) | ||
7201 | { | ||
7202 | struct bnx2x *bp = params->bp; | ||
7203 | /* | ||
7204 | * Disable transmitter only for bootcodes which can enable it afterwards | ||
7205 | * (for D3 link) | ||
7206 | */ | ||
7207 | if (pmd_dis) { | ||
7208 | if (params->feature_config_flags & | ||
7209 | FEATURE_CONFIG_BC_SUPPORTS_SFP_TX_DISABLED) | ||
7210 | DP(NETIF_MSG_LINK, "Disabling PMD transmitter\n"); | ||
7211 | else { | ||
7212 | DP(NETIF_MSG_LINK, "NOT disabling PMD transmitter\n"); | ||
7213 | return; | ||
7214 | } | ||
7215 | } else | ||
7216 | DP(NETIF_MSG_LINK, "Enabling PMD transmitter\n"); | ||
7217 | bnx2x_cl45_write(bp, phy, | ||
7218 | MDIO_PMA_DEVAD, | ||
7219 | MDIO_PMA_REG_TX_DISABLE, pmd_dis); | ||
7220 | } | ||
7221 | |||
4433 | static u8 bnx2x_get_gpio_port(struct link_params *params) | 7222 | static u8 bnx2x_get_gpio_port(struct link_params *params) |
4434 | { | 7223 | { |
4435 | u8 gpio_port; | 7224 | u8 gpio_port; |
@@ -4443,9 +7232,10 @@ static u8 bnx2x_get_gpio_port(struct link_params *params) | |||
4443 | swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE); | 7232 | swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE); |
4444 | return gpio_port ^ (swap_val && swap_override); | 7233 | return gpio_port ^ (swap_val && swap_override); |
4445 | } | 7234 | } |
4446 | static void bnx2x_sfp_set_transmitter(struct link_params *params, | 7235 | |
4447 | struct bnx2x_phy *phy, | 7236 | static void bnx2x_sfp_e1e2_set_transmitter(struct link_params *params, |
4448 | u8 tx_en) | 7237 | struct bnx2x_phy *phy, |
7238 | u8 tx_en) | ||
4449 | { | 7239 | { |
4450 | u16 val; | 7240 | u16 val; |
4451 | u8 port = params->port; | 7241 | u8 port = params->port; |
@@ -4500,9 +7290,21 @@ static void bnx2x_sfp_set_transmitter(struct link_params *params, | |||
4500 | } | 7290 | } |
4501 | } | 7291 | } |
4502 | 7292 | ||
4503 | static u8 bnx2x_8726_read_sfp_module_eeprom(struct bnx2x_phy *phy, | 7293 | static void bnx2x_sfp_set_transmitter(struct link_params *params, |
4504 | struct link_params *params, | 7294 | struct bnx2x_phy *phy, |
4505 | u16 addr, u8 byte_cnt, u8 *o_buf) | 7295 | u8 tx_en) |
7296 | { | ||
7297 | struct bnx2x *bp = params->bp; | ||
7298 | DP(NETIF_MSG_LINK, "Setting SFP+ transmitter to %d\n", tx_en); | ||
7299 | if (CHIP_IS_E3(bp)) | ||
7300 | bnx2x_sfp_e3_set_transmitter(params, phy, tx_en); | ||
7301 | else | ||
7302 | bnx2x_sfp_e1e2_set_transmitter(params, phy, tx_en); | ||
7303 | } | ||
7304 | |||
7305 | static int bnx2x_8726_read_sfp_module_eeprom(struct bnx2x_phy *phy, | ||
7306 | struct link_params *params, | ||
7307 | u16 addr, u8 byte_cnt, u8 *o_buf) | ||
4506 | { | 7308 | { |
4507 | struct bnx2x *bp = params->bp; | 7309 | struct bnx2x *bp = params->bp; |
4508 | u16 val = 0; | 7310 | u16 val = 0; |
@@ -4566,9 +7368,45 @@ static u8 bnx2x_8726_read_sfp_module_eeprom(struct bnx2x_phy *phy, | |||
4566 | return -EINVAL; | 7368 | return -EINVAL; |
4567 | } | 7369 | } |
4568 | 7370 | ||
4569 | static u8 bnx2x_8727_read_sfp_module_eeprom(struct bnx2x_phy *phy, | 7371 | static int bnx2x_warpcore_read_sfp_module_eeprom(struct bnx2x_phy *phy, |
4570 | struct link_params *params, | 7372 | struct link_params *params, |
4571 | u16 addr, u8 byte_cnt, u8 *o_buf) | 7373 | u16 addr, u8 byte_cnt, |
7374 | u8 *o_buf) | ||
7375 | { | ||
7376 | int rc = 0; | ||
7377 | u8 i, j = 0, cnt = 0; | ||
7378 | u32 data_array[4]; | ||
7379 | u16 addr32; | ||
7380 | struct bnx2x *bp = params->bp; | ||
7381 | /*DP(NETIF_MSG_LINK, "bnx2x_direct_read_sfp_module_eeprom:" | ||
7382 | " addr %d, cnt %d\n", | ||
7383 | addr, byte_cnt);*/ | ||
7384 | if (byte_cnt > 16) { | ||
7385 | DP(NETIF_MSG_LINK, "Reading from eeprom is" | ||
7386 | " is limited to 16 bytes\n"); | ||
7387 | return -EINVAL; | ||
7388 | } | ||
7389 | |||
7390 | /* 4 byte aligned address */ | ||
7391 | addr32 = addr & (~0x3); | ||
7392 | do { | ||
7393 | rc = bnx2x_bsc_read(params, phy, 0xa0, addr32, 0, byte_cnt, | ||
7394 | data_array); | ||
7395 | } while ((rc != 0) && (++cnt < I2C_WA_RETRY_CNT)); | ||
7396 | |||
7397 | if (rc == 0) { | ||
7398 | for (i = (addr - addr32); i < byte_cnt + (addr - addr32); i++) { | ||
7399 | o_buf[j] = *((u8 *)data_array + i); | ||
7400 | j++; | ||
7401 | } | ||
7402 | } | ||
7403 | |||
7404 | return rc; | ||
7405 | } | ||
7406 | |||
7407 | static int bnx2x_8727_read_sfp_module_eeprom(struct bnx2x_phy *phy, | ||
7408 | struct link_params *params, | ||
7409 | u16 addr, u8 byte_cnt, u8 *o_buf) | ||
4572 | { | 7410 | { |
4573 | struct bnx2x *bp = params->bp; | 7411 | struct bnx2x *bp = params->bp; |
4574 | u16 val, i; | 7412 | u16 val, i; |
@@ -4653,27 +7491,39 @@ static u8 bnx2x_8727_read_sfp_module_eeprom(struct bnx2x_phy *phy, | |||
4653 | return -EINVAL; | 7491 | return -EINVAL; |
4654 | } | 7492 | } |
4655 | 7493 | ||
4656 | u8 bnx2x_read_sfp_module_eeprom(struct bnx2x_phy *phy, | 7494 | int bnx2x_read_sfp_module_eeprom(struct bnx2x_phy *phy, |
4657 | struct link_params *params, u16 addr, | 7495 | struct link_params *params, u16 addr, |
4658 | u8 byte_cnt, u8 *o_buf) | 7496 | u8 byte_cnt, u8 *o_buf) |
4659 | { | 7497 | { |
4660 | if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726) | 7498 | int rc = -EINVAL; |
4661 | return bnx2x_8726_read_sfp_module_eeprom(phy, params, addr, | 7499 | switch (phy->type) { |
4662 | byte_cnt, o_buf); | 7500 | case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726: |
4663 | else if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) | 7501 | rc = bnx2x_8726_read_sfp_module_eeprom(phy, params, addr, |
4664 | return bnx2x_8727_read_sfp_module_eeprom(phy, params, addr, | 7502 | byte_cnt, o_buf); |
4665 | byte_cnt, o_buf); | 7503 | break; |
4666 | return -EINVAL; | 7504 | case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727: |
7505 | case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8722: | ||
7506 | rc = bnx2x_8727_read_sfp_module_eeprom(phy, params, addr, | ||
7507 | byte_cnt, o_buf); | ||
7508 | break; | ||
7509 | case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT: | ||
7510 | rc = bnx2x_warpcore_read_sfp_module_eeprom(phy, params, addr, | ||
7511 | byte_cnt, o_buf); | ||
7512 | break; | ||
7513 | } | ||
7514 | return rc; | ||
4667 | } | 7515 | } |
4668 | 7516 | ||
4669 | static u8 bnx2x_get_edc_mode(struct bnx2x_phy *phy, | 7517 | static int bnx2x_get_edc_mode(struct bnx2x_phy *phy, |
4670 | struct link_params *params, | 7518 | struct link_params *params, |
4671 | u16 *edc_mode) | 7519 | u16 *edc_mode) |
4672 | { | 7520 | { |
4673 | struct bnx2x *bp = params->bp; | 7521 | struct bnx2x *bp = params->bp; |
7522 | u32 sync_offset = 0, phy_idx, media_types; | ||
4674 | u8 val, check_limiting_mode = 0; | 7523 | u8 val, check_limiting_mode = 0; |
4675 | *edc_mode = EDC_MODE_LIMITING; | 7524 | *edc_mode = EDC_MODE_LIMITING; |
4676 | 7525 | ||
7526 | phy->media_type = ETH_PHY_UNSPECIFIED; | ||
4677 | /* First check for copper cable */ | 7527 | /* First check for copper cable */ |
4678 | if (bnx2x_read_sfp_module_eeprom(phy, | 7528 | if (bnx2x_read_sfp_module_eeprom(phy, |
4679 | params, | 7529 | params, |
@@ -4688,7 +7538,7 @@ static u8 bnx2x_get_edc_mode(struct bnx2x_phy *phy, | |||
4688 | case SFP_EEPROM_CON_TYPE_VAL_COPPER: | 7538 | case SFP_EEPROM_CON_TYPE_VAL_COPPER: |
4689 | { | 7539 | { |
4690 | u8 copper_module_type; | 7540 | u8 copper_module_type; |
4691 | 7541 | phy->media_type = ETH_PHY_DA_TWINAX; | |
4692 | /* | 7542 | /* |
4693 | * Check if its active cable (includes SFP+ module) | 7543 | * Check if its active cable (includes SFP+ module) |
4694 | * of passive cable | 7544 | * of passive cable |
@@ -4697,8 +7547,7 @@ static u8 bnx2x_get_edc_mode(struct bnx2x_phy *phy, | |||
4697 | params, | 7547 | params, |
4698 | SFP_EEPROM_FC_TX_TECH_ADDR, | 7548 | SFP_EEPROM_FC_TX_TECH_ADDR, |
4699 | 1, | 7549 | 1, |
4700 | &copper_module_type) != | 7550 | &copper_module_type) != 0) { |
4701 | 0) { | ||
4702 | DP(NETIF_MSG_LINK, | 7551 | DP(NETIF_MSG_LINK, |
4703 | "Failed to read copper-cable-type" | 7552 | "Failed to read copper-cable-type" |
4704 | " from SFP+ EEPROM\n"); | 7553 | " from SFP+ EEPROM\n"); |
@@ -4723,6 +7572,7 @@ static u8 bnx2x_get_edc_mode(struct bnx2x_phy *phy, | |||
4723 | break; | 7572 | break; |
4724 | } | 7573 | } |
4725 | case SFP_EEPROM_CON_TYPE_VAL_LC: | 7574 | case SFP_EEPROM_CON_TYPE_VAL_LC: |
7575 | phy->media_type = ETH_PHY_SFP_FIBER; | ||
4726 | DP(NETIF_MSG_LINK, "Optic module detected\n"); | 7576 | DP(NETIF_MSG_LINK, "Optic module detected\n"); |
4727 | check_limiting_mode = 1; | 7577 | check_limiting_mode = 1; |
4728 | break; | 7578 | break; |
@@ -4731,7 +7581,22 @@ static u8 bnx2x_get_edc_mode(struct bnx2x_phy *phy, | |||
4731 | val); | 7581 | val); |
4732 | return -EINVAL; | 7582 | return -EINVAL; |
4733 | } | 7583 | } |
4734 | 7584 | sync_offset = params->shmem_base + | |
7585 | offsetof(struct shmem_region, | ||
7586 | dev_info.port_hw_config[params->port].media_type); | ||
7587 | media_types = REG_RD(bp, sync_offset); | ||
7588 | /* Update media type for non-PMF sync */ | ||
7589 | for (phy_idx = INT_PHY; phy_idx < MAX_PHYS; phy_idx++) { | ||
7590 | if (&(params->phy[phy_idx]) == phy) { | ||
7591 | media_types &= ~(PORT_HW_CFG_MEDIA_TYPE_PHY0_MASK << | ||
7592 | (PORT_HW_CFG_MEDIA_TYPE_PHY1_SHIFT * phy_idx)); | ||
7593 | media_types |= ((phy->media_type & | ||
7594 | PORT_HW_CFG_MEDIA_TYPE_PHY0_MASK) << | ||
7595 | (PORT_HW_CFG_MEDIA_TYPE_PHY1_SHIFT * phy_idx)); | ||
7596 | break; | ||
7597 | } | ||
7598 | } | ||
7599 | REG_WR(bp, sync_offset, media_types); | ||
4735 | if (check_limiting_mode) { | 7600 | if (check_limiting_mode) { |
4736 | u8 options[SFP_EEPROM_OPTIONS_SIZE]; | 7601 | u8 options[SFP_EEPROM_OPTIONS_SIZE]; |
4737 | if (bnx2x_read_sfp_module_eeprom(phy, | 7602 | if (bnx2x_read_sfp_module_eeprom(phy, |
@@ -4755,8 +7620,8 @@ static u8 bnx2x_get_edc_mode(struct bnx2x_phy *phy, | |||
4755 | * This function read the relevant field from the module (SFP+), and verify it | 7620 | * This function read the relevant field from the module (SFP+), and verify it |
4756 | * is compliant with this board | 7621 | * is compliant with this board |
4757 | */ | 7622 | */ |
4758 | static u8 bnx2x_verify_sfp_module(struct bnx2x_phy *phy, | 7623 | static int bnx2x_verify_sfp_module(struct bnx2x_phy *phy, |
4759 | struct link_params *params) | 7624 | struct link_params *params) |
4760 | { | 7625 | { |
4761 | struct bnx2x *bp = params->bp; | 7626 | struct bnx2x *bp = params->bp; |
4762 | u32 val, cmd; | 7627 | u32 val, cmd; |
@@ -4825,8 +7690,8 @@ static u8 bnx2x_verify_sfp_module(struct bnx2x_phy *phy, | |||
4825 | return -EINVAL; | 7690 | return -EINVAL; |
4826 | } | 7691 | } |
4827 | 7692 | ||
4828 | static u8 bnx2x_wait_for_sfp_module_initialized(struct bnx2x_phy *phy, | 7693 | static int bnx2x_wait_for_sfp_module_initialized(struct bnx2x_phy *phy, |
4829 | struct link_params *params) | 7694 | struct link_params *params) |
4830 | 7695 | ||
4831 | { | 7696 | { |
4832 | u8 val; | 7697 | u8 val; |
@@ -4858,8 +7723,8 @@ static void bnx2x_8727_power_module(struct bnx2x *bp, | |||
4858 | * In the GPIO register, bit 4 is use to determine if the GPIOs are | 7723 | * In the GPIO register, bit 4 is use to determine if the GPIOs are |
4859 | * operating as INPUT or as OUTPUT. Bit 1 is for input, and 0 for | 7724 | * operating as INPUT or as OUTPUT. Bit 1 is for input, and 0 for |
4860 | * output | 7725 | * output |
4861 | * Bits 0-1 determine the gpios value for OUTPUT in case bit 4 val is 0 | 7726 | * Bits 0-1 determine the GPIOs value for OUTPUT in case bit 4 val is 0 |
4862 | * Bits 8-9 determine the gpios value for INPUT in case bit 4 val is 1 | 7727 | * Bits 8-9 determine the GPIOs value for INPUT in case bit 4 val is 1 |
4863 | * where the 1st bit is the over-current(only input), and 2nd bit is | 7728 | * where the 1st bit is the over-current(only input), and 2nd bit is |
4864 | * for power( only output ) | 7729 | * for power( only output ) |
4865 | * | 7730 | * |
@@ -4868,15 +7733,14 @@ static void bnx2x_8727_power_module(struct bnx2x *bp, | |||
4868 | */ | 7733 | */ |
4869 | if (phy->flags & FLAGS_NOC) | 7734 | if (phy->flags & FLAGS_NOC) |
4870 | return; | 7735 | return; |
4871 | if (!(phy->flags & | 7736 | if (is_power_up) |
4872 | FLAGS_NOC) && is_power_up) | ||
4873 | val = (1<<4); | 7737 | val = (1<<4); |
4874 | else | 7738 | else |
4875 | /* | 7739 | /* |
4876 | * Set GPIO control to OUTPUT, and set the power bit | 7740 | * Set GPIO control to OUTPUT, and set the power bit |
4877 | * to according to the is_power_up | 7741 | * to according to the is_power_up |
4878 | */ | 7742 | */ |
4879 | val = ((!(is_power_up)) << 1); | 7743 | val = (1<<1); |
4880 | 7744 | ||
4881 | bnx2x_cl45_write(bp, phy, | 7745 | bnx2x_cl45_write(bp, phy, |
4882 | MDIO_PMA_DEVAD, | 7746 | MDIO_PMA_DEVAD, |
@@ -4884,9 +7748,9 @@ static void bnx2x_8727_power_module(struct bnx2x *bp, | |||
4884 | val); | 7748 | val); |
4885 | } | 7749 | } |
4886 | 7750 | ||
4887 | static u8 bnx2x_8726_set_limiting_mode(struct bnx2x *bp, | 7751 | static int bnx2x_8726_set_limiting_mode(struct bnx2x *bp, |
4888 | struct bnx2x_phy *phy, | 7752 | struct bnx2x_phy *phy, |
4889 | u16 edc_mode) | 7753 | u16 edc_mode) |
4890 | { | 7754 | { |
4891 | u16 cur_limiting_mode; | 7755 | u16 cur_limiting_mode; |
4892 | 7756 | ||
@@ -4934,9 +7798,9 @@ static u8 bnx2x_8726_set_limiting_mode(struct bnx2x *bp, | |||
4934 | return 0; | 7798 | return 0; |
4935 | } | 7799 | } |
4936 | 7800 | ||
4937 | static u8 bnx2x_8727_set_limiting_mode(struct bnx2x *bp, | 7801 | static int bnx2x_8727_set_limiting_mode(struct bnx2x *bp, |
4938 | struct bnx2x_phy *phy, | 7802 | struct bnx2x_phy *phy, |
4939 | u16 edc_mode) | 7803 | u16 edc_mode) |
4940 | { | 7804 | { |
4941 | u16 phy_identifier; | 7805 | u16 phy_identifier; |
4942 | u16 rom_ver2_val; | 7806 | u16 rom_ver2_val; |
@@ -4989,7 +7853,7 @@ static void bnx2x_8727_specific_func(struct bnx2x_phy *phy, | |||
4989 | } | 7853 | } |
4990 | } | 7854 | } |
4991 | 7855 | ||
4992 | static void bnx2x_set_sfp_module_fault_led(struct link_params *params, | 7856 | static void bnx2x_set_e1e2_module_fault_led(struct link_params *params, |
4993 | u8 gpio_mode) | 7857 | u8 gpio_mode) |
4994 | { | 7858 | { |
4995 | struct bnx2x *bp = params->bp; | 7859 | struct bnx2x *bp = params->bp; |
@@ -5021,12 +7885,146 @@ static void bnx2x_set_sfp_module_fault_led(struct link_params *params, | |||
5021 | } | 7885 | } |
5022 | } | 7886 | } |
5023 | 7887 | ||
5024 | static u8 bnx2x_sfp_module_detection(struct bnx2x_phy *phy, | 7888 | static void bnx2x_set_e3_module_fault_led(struct link_params *params, |
5025 | struct link_params *params) | 7889 | u8 gpio_mode) |
7890 | { | ||
7891 | u32 pin_cfg; | ||
7892 | u8 port = params->port; | ||
7893 | struct bnx2x *bp = params->bp; | ||
7894 | pin_cfg = (REG_RD(bp, params->shmem_base + | ||
7895 | offsetof(struct shmem_region, | ||
7896 | dev_info.port_hw_config[port].e3_sfp_ctrl)) & | ||
7897 | PORT_HW_CFG_E3_FAULT_MDL_LED_MASK) >> | ||
7898 | PORT_HW_CFG_E3_FAULT_MDL_LED_SHIFT; | ||
7899 | DP(NETIF_MSG_LINK, "Setting Fault LED to %d using pin cfg %d\n", | ||
7900 | gpio_mode, pin_cfg); | ||
7901 | bnx2x_set_cfg_pin(bp, pin_cfg, gpio_mode); | ||
7902 | } | ||
7903 | |||
7904 | static void bnx2x_set_sfp_module_fault_led(struct link_params *params, | ||
7905 | u8 gpio_mode) | ||
7906 | { | ||
7907 | struct bnx2x *bp = params->bp; | ||
7908 | DP(NETIF_MSG_LINK, "Setting SFP+ module fault LED to %d\n", gpio_mode); | ||
7909 | if (CHIP_IS_E3(bp)) { | ||
7910 | /* | ||
7911 | * Low ==> if SFP+ module is supported otherwise | ||
7912 | * High ==> if SFP+ module is not on the approved vendor list | ||
7913 | */ | ||
7914 | bnx2x_set_e3_module_fault_led(params, gpio_mode); | ||
7915 | } else | ||
7916 | bnx2x_set_e1e2_module_fault_led(params, gpio_mode); | ||
7917 | } | ||
7918 | |||
7919 | static void bnx2x_warpcore_power_module(struct link_params *params, | ||
7920 | struct bnx2x_phy *phy, | ||
7921 | u8 power) | ||
7922 | { | ||
7923 | u32 pin_cfg; | ||
7924 | struct bnx2x *bp = params->bp; | ||
7925 | |||
7926 | pin_cfg = (REG_RD(bp, params->shmem_base + | ||
7927 | offsetof(struct shmem_region, | ||
7928 | dev_info.port_hw_config[params->port].e3_sfp_ctrl)) & | ||
7929 | PORT_HW_CFG_E3_PWR_DIS_MASK) >> | ||
7930 | PORT_HW_CFG_E3_PWR_DIS_SHIFT; | ||
7931 | |||
7932 | if (pin_cfg == PIN_CFG_NA) | ||
7933 | return; | ||
7934 | DP(NETIF_MSG_LINK, "Setting SFP+ module power to %d using pin cfg %d\n", | ||
7935 | power, pin_cfg); | ||
7936 | /* | ||
7937 | * Low ==> corresponding SFP+ module is powered | ||
7938 | * high ==> the SFP+ module is powered down | ||
7939 | */ | ||
7940 | bnx2x_set_cfg_pin(bp, pin_cfg, power ^ 1); | ||
7941 | } | ||
7942 | |||
7943 | static void bnx2x_warpcore_hw_reset(struct bnx2x_phy *phy, | ||
7944 | struct link_params *params) | ||
7945 | { | ||
7946 | bnx2x_warpcore_power_module(params, phy, 0); | ||
7947 | } | ||
7948 | |||
7949 | static void bnx2x_power_sfp_module(struct link_params *params, | ||
7950 | struct bnx2x_phy *phy, | ||
7951 | u8 power) | ||
7952 | { | ||
7953 | struct bnx2x *bp = params->bp; | ||
7954 | DP(NETIF_MSG_LINK, "Setting SFP+ power to %x\n", power); | ||
7955 | |||
7956 | switch (phy->type) { | ||
7957 | case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727: | ||
7958 | case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8722: | ||
7959 | bnx2x_8727_power_module(params->bp, phy, power); | ||
7960 | break; | ||
7961 | case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT: | ||
7962 | bnx2x_warpcore_power_module(params, phy, power); | ||
7963 | break; | ||
7964 | default: | ||
7965 | break; | ||
7966 | } | ||
7967 | } | ||
7968 | static void bnx2x_warpcore_set_limiting_mode(struct link_params *params, | ||
7969 | struct bnx2x_phy *phy, | ||
7970 | u16 edc_mode) | ||
7971 | { | ||
7972 | u16 val = 0; | ||
7973 | u16 mode = MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_DEFAULT; | ||
7974 | struct bnx2x *bp = params->bp; | ||
7975 | |||
7976 | u8 lane = bnx2x_get_warpcore_lane(phy, params); | ||
7977 | /* This is a global register which controls all lanes */ | ||
7978 | bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, | ||
7979 | MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE, &val); | ||
7980 | val &= ~(0xf << (lane << 2)); | ||
7981 | |||
7982 | switch (edc_mode) { | ||
7983 | case EDC_MODE_LINEAR: | ||
7984 | case EDC_MODE_LIMITING: | ||
7985 | mode = MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_DEFAULT; | ||
7986 | break; | ||
7987 | case EDC_MODE_PASSIVE_DAC: | ||
7988 | mode = MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_SFP_DAC; | ||
7989 | break; | ||
7990 | default: | ||
7991 | break; | ||
7992 | } | ||
7993 | |||
7994 | val |= (mode << (lane << 2)); | ||
7995 | bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, | ||
7996 | MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE, val); | ||
7997 | /* A must read */ | ||
7998 | bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, | ||
7999 | MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE, &val); | ||
8000 | |||
8001 | |||
8002 | } | ||
8003 | |||
8004 | static void bnx2x_set_limiting_mode(struct link_params *params, | ||
8005 | struct bnx2x_phy *phy, | ||
8006 | u16 edc_mode) | ||
8007 | { | ||
8008 | switch (phy->type) { | ||
8009 | case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726: | ||
8010 | bnx2x_8726_set_limiting_mode(params->bp, phy, edc_mode); | ||
8011 | break; | ||
8012 | case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727: | ||
8013 | case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8722: | ||
8014 | bnx2x_8727_set_limiting_mode(params->bp, phy, edc_mode); | ||
8015 | break; | ||
8016 | case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT: | ||
8017 | bnx2x_warpcore_set_limiting_mode(params, phy, edc_mode); | ||
8018 | break; | ||
8019 | } | ||
8020 | } | ||
8021 | |||
8022 | int bnx2x_sfp_module_detection(struct bnx2x_phy *phy, | ||
8023 | struct link_params *params) | ||
5026 | { | 8024 | { |
5027 | struct bnx2x *bp = params->bp; | 8025 | struct bnx2x *bp = params->bp; |
5028 | u16 edc_mode; | 8026 | u16 edc_mode; |
5029 | u8 rc = 0; | 8027 | int rc = 0; |
5030 | 8028 | ||
5031 | u32 val = REG_RD(bp, params->shmem_base + | 8029 | u32 val = REG_RD(bp, params->shmem_base + |
5032 | offsetof(struct shmem_region, dev_info. | 8030 | offsetof(struct shmem_region, dev_info. |
@@ -5034,7 +8032,8 @@ static u8 bnx2x_sfp_module_detection(struct bnx2x_phy *phy, | |||
5034 | 8032 | ||
5035 | DP(NETIF_MSG_LINK, "SFP+ module plugged in/out detected on port %d\n", | 8033 | DP(NETIF_MSG_LINK, "SFP+ module plugged in/out detected on port %d\n", |
5036 | params->port); | 8034 | params->port); |
5037 | 8035 | /* Power up module */ | |
8036 | bnx2x_power_sfp_module(params, phy, 1); | ||
5038 | if (bnx2x_get_edc_mode(phy, params, &edc_mode) != 0) { | 8037 | if (bnx2x_get_edc_mode(phy, params, &edc_mode) != 0) { |
5039 | DP(NETIF_MSG_LINK, "Failed to get valid module type\n"); | 8038 | DP(NETIF_MSG_LINK, "Failed to get valid module type\n"); |
5040 | return -EINVAL; | 8039 | return -EINVAL; |
@@ -5046,12 +8045,11 @@ static u8 bnx2x_sfp_module_detection(struct bnx2x_phy *phy, | |||
5046 | bnx2x_set_sfp_module_fault_led(params, | 8045 | bnx2x_set_sfp_module_fault_led(params, |
5047 | MISC_REGISTERS_GPIO_HIGH); | 8046 | MISC_REGISTERS_GPIO_HIGH); |
5048 | 8047 | ||
5049 | if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) && | 8048 | /* Check if need to power down the SFP+ module */ |
5050 | ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) == | 8049 | if ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) == |
5051 | PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_POWER_DOWN)) { | 8050 | PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_POWER_DOWN) { |
5052 | /* Shutdown SFP+ module */ | ||
5053 | DP(NETIF_MSG_LINK, "Shutdown SFP+ module!!\n"); | 8051 | DP(NETIF_MSG_LINK, "Shutdown SFP+ module!!\n"); |
5054 | bnx2x_8727_power_module(bp, phy, 0); | 8052 | bnx2x_power_sfp_module(params, phy, 0); |
5055 | return rc; | 8053 | return rc; |
5056 | } | 8054 | } |
5057 | } else { | 8055 | } else { |
@@ -5059,18 +8057,12 @@ static u8 bnx2x_sfp_module_detection(struct bnx2x_phy *phy, | |||
5059 | bnx2x_set_sfp_module_fault_led(params, MISC_REGISTERS_GPIO_LOW); | 8057 | bnx2x_set_sfp_module_fault_led(params, MISC_REGISTERS_GPIO_LOW); |
5060 | } | 8058 | } |
5061 | 8059 | ||
5062 | /* power up the SFP module */ | ||
5063 | if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) | ||
5064 | bnx2x_8727_power_module(bp, phy, 1); | ||
5065 | |||
5066 | /* | 8060 | /* |
5067 | * Check and set limiting mode / LRM mode on 8726. On 8727 it | 8061 | * Check and set limiting mode / LRM mode on 8726. On 8727 it |
5068 | * is done automatically | 8062 | * is done automatically |
5069 | */ | 8063 | */ |
5070 | if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726) | 8064 | bnx2x_set_limiting_mode(params, phy, edc_mode); |
5071 | bnx2x_8726_set_limiting_mode(bp, phy, edc_mode); | 8065 | |
5072 | else | ||
5073 | bnx2x_8727_set_limiting_mode(bp, phy, edc_mode); | ||
5074 | /* | 8066 | /* |
5075 | * Enable transmit for this module if the module is approved, or | 8067 | * Enable transmit for this module if the module is approved, or |
5076 | * if unapproved modules should also enable the Tx laser | 8068 | * if unapproved modules should also enable the Tx laser |
@@ -5088,23 +8080,33 @@ static u8 bnx2x_sfp_module_detection(struct bnx2x_phy *phy, | |||
5088 | void bnx2x_handle_module_detect_int(struct link_params *params) | 8080 | void bnx2x_handle_module_detect_int(struct link_params *params) |
5089 | { | 8081 | { |
5090 | struct bnx2x *bp = params->bp; | 8082 | struct bnx2x *bp = params->bp; |
5091 | struct bnx2x_phy *phy = ¶ms->phy[EXT_PHY1]; | 8083 | struct bnx2x_phy *phy; |
5092 | u32 gpio_val; | 8084 | u32 gpio_val; |
5093 | u8 port = params->port; | 8085 | u8 gpio_num, gpio_port; |
8086 | if (CHIP_IS_E3(bp)) | ||
8087 | phy = ¶ms->phy[INT_PHY]; | ||
8088 | else | ||
8089 | phy = ¶ms->phy[EXT_PHY1]; | ||
8090 | |||
8091 | if (bnx2x_get_mod_abs_int_cfg(bp, params->chip_id, params->shmem_base, | ||
8092 | params->port, &gpio_num, &gpio_port) == | ||
8093 | -EINVAL) { | ||
8094 | DP(NETIF_MSG_LINK, "Failed to get MOD_ABS interrupt config\n"); | ||
8095 | return; | ||
8096 | } | ||
5094 | 8097 | ||
5095 | /* Set valid module led off */ | 8098 | /* Set valid module led off */ |
5096 | bnx2x_set_sfp_module_fault_led(params, MISC_REGISTERS_GPIO_HIGH); | 8099 | bnx2x_set_sfp_module_fault_led(params, MISC_REGISTERS_GPIO_HIGH); |
5097 | 8100 | ||
5098 | /* Get current gpio val reflecting module plugged in / out*/ | 8101 | /* Get current gpio val reflecting module plugged in / out*/ |
5099 | gpio_val = bnx2x_get_gpio(bp, MISC_REGISTERS_GPIO_3, port); | 8102 | gpio_val = bnx2x_get_gpio(bp, gpio_num, gpio_port); |
5100 | 8103 | ||
5101 | /* Call the handling function in case module is detected */ | 8104 | /* Call the handling function in case module is detected */ |
5102 | if (gpio_val == 0) { | 8105 | if (gpio_val == 0) { |
5103 | 8106 | bnx2x_power_sfp_module(params, phy, 1); | |
5104 | bnx2x_set_gpio_int(bp, MISC_REGISTERS_GPIO_3, | 8107 | bnx2x_set_gpio_int(bp, gpio_num, |
5105 | MISC_REGISTERS_GPIO_INT_OUTPUT_CLR, | 8108 | MISC_REGISTERS_GPIO_INT_OUTPUT_CLR, |
5106 | port); | 8109 | gpio_port); |
5107 | |||
5108 | if (bnx2x_wait_for_sfp_module_initialized(phy, params) == 0) | 8110 | if (bnx2x_wait_for_sfp_module_initialized(phy, params) == 0) |
5109 | bnx2x_sfp_module_detection(phy, params); | 8111 | bnx2x_sfp_module_detection(phy, params); |
5110 | else | 8112 | else |
@@ -5115,13 +8117,14 @@ void bnx2x_handle_module_detect_int(struct link_params *params) | |||
5115 | port_feature_config[params->port]. | 8117 | port_feature_config[params->port]. |
5116 | config)); | 8118 | config)); |
5117 | 8119 | ||
5118 | bnx2x_set_gpio_int(bp, MISC_REGISTERS_GPIO_3, | 8120 | bnx2x_set_gpio_int(bp, gpio_num, |
5119 | MISC_REGISTERS_GPIO_INT_OUTPUT_SET, | 8121 | MISC_REGISTERS_GPIO_INT_OUTPUT_SET, |
5120 | port); | 8122 | gpio_port); |
5121 | /* | 8123 | /* |
5122 | * Module was plugged out. | 8124 | * Module was plugged out. |
5123 | * Disable transmit for this module | 8125 | * Disable transmit for this module |
5124 | */ | 8126 | */ |
8127 | phy->media_type = ETH_PHY_NOT_PRESENT; | ||
5125 | if ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) == | 8128 | if ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) == |
5126 | PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER) | 8129 | PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER) |
5127 | bnx2x_sfp_set_transmitter(params, phy, 0); | 8130 | bnx2x_sfp_set_transmitter(params, phy, 0); |
@@ -5129,6 +8132,29 @@ void bnx2x_handle_module_detect_int(struct link_params *params) | |||
5129 | } | 8132 | } |
5130 | 8133 | ||
5131 | /******************************************************************/ | 8134 | /******************************************************************/ |
8135 | /* Used by 8706 and 8727 */ | ||
8136 | /******************************************************************/ | ||
8137 | static void bnx2x_sfp_mask_fault(struct bnx2x *bp, | ||
8138 | struct bnx2x_phy *phy, | ||
8139 | u16 alarm_status_offset, | ||
8140 | u16 alarm_ctrl_offset) | ||
8141 | { | ||
8142 | u16 alarm_status, val; | ||
8143 | bnx2x_cl45_read(bp, phy, | ||
8144 | MDIO_PMA_DEVAD, alarm_status_offset, | ||
8145 | &alarm_status); | ||
8146 | bnx2x_cl45_read(bp, phy, | ||
8147 | MDIO_PMA_DEVAD, alarm_status_offset, | ||
8148 | &alarm_status); | ||
8149 | /* Mask or enable the fault event. */ | ||
8150 | bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, alarm_ctrl_offset, &val); | ||
8151 | if (alarm_status & (1<<0)) | ||
8152 | val &= ~(1<<0); | ||
8153 | else | ||
8154 | val |= (1<<0); | ||
8155 | bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, alarm_ctrl_offset, val); | ||
8156 | } | ||
8157 | /******************************************************************/ | ||
5132 | /* common BCM8706/BCM8726 PHY SECTION */ | 8158 | /* common BCM8706/BCM8726 PHY SECTION */ |
5133 | /******************************************************************/ | 8159 | /******************************************************************/ |
5134 | static u8 bnx2x_8706_8726_read_status(struct bnx2x_phy *phy, | 8160 | static u8 bnx2x_8706_8726_read_status(struct bnx2x_phy *phy, |
@@ -5141,12 +8167,16 @@ static u8 bnx2x_8706_8726_read_status(struct bnx2x_phy *phy, | |||
5141 | DP(NETIF_MSG_LINK, "XGXS 8706/8726\n"); | 8167 | DP(NETIF_MSG_LINK, "XGXS 8706/8726\n"); |
5142 | /* Clear RX Alarm*/ | 8168 | /* Clear RX Alarm*/ |
5143 | bnx2x_cl45_read(bp, phy, | 8169 | bnx2x_cl45_read(bp, phy, |
5144 | MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_ALARM, &val2); | 8170 | MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXSTAT, &val2); |
8171 | |||
8172 | bnx2x_sfp_mask_fault(bp, phy, MDIO_PMA_LASI_TXSTAT, | ||
8173 | MDIO_PMA_LASI_TXCTRL); | ||
8174 | |||
5145 | /* clear LASI indication*/ | 8175 | /* clear LASI indication*/ |
5146 | bnx2x_cl45_read(bp, phy, | 8176 | bnx2x_cl45_read(bp, phy, |
5147 | MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_STATUS, &val1); | 8177 | MDIO_PMA_DEVAD, MDIO_PMA_LASI_STAT, &val1); |
5148 | bnx2x_cl45_read(bp, phy, | 8178 | bnx2x_cl45_read(bp, phy, |
5149 | MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_STATUS, &val2); | 8179 | MDIO_PMA_DEVAD, MDIO_PMA_LASI_STAT, &val2); |
5150 | DP(NETIF_MSG_LINK, "8706/8726 LASI status 0x%x--> 0x%x\n", val1, val2); | 8180 | DP(NETIF_MSG_LINK, "8706/8726 LASI status 0x%x--> 0x%x\n", val1, val2); |
5151 | 8181 | ||
5152 | bnx2x_cl45_read(bp, phy, | 8182 | bnx2x_cl45_read(bp, phy, |
@@ -5173,6 +8203,17 @@ static u8 bnx2x_8706_8726_read_status(struct bnx2x_phy *phy, | |||
5173 | bnx2x_ext_phy_resolve_fc(phy, params, vars); | 8203 | bnx2x_ext_phy_resolve_fc(phy, params, vars); |
5174 | vars->duplex = DUPLEX_FULL; | 8204 | vars->duplex = DUPLEX_FULL; |
5175 | } | 8205 | } |
8206 | |||
8207 | /* Capture 10G link fault. Read twice to clear stale value. */ | ||
8208 | if (vars->line_speed == SPEED_10000) { | ||
8209 | bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, | ||
8210 | MDIO_PMA_LASI_TXSTAT, &val1); | ||
8211 | bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, | ||
8212 | MDIO_PMA_LASI_TXSTAT, &val1); | ||
8213 | if (val1 & (1<<0)) | ||
8214 | vars->fault_detected = 1; | ||
8215 | } | ||
8216 | |||
5176 | return link_up; | 8217 | return link_up; |
5177 | } | 8218 | } |
5178 | 8219 | ||
@@ -5186,6 +8227,10 @@ static u8 bnx2x_8706_config_init(struct bnx2x_phy *phy, | |||
5186 | u32 tx_en_mode; | 8227 | u32 tx_en_mode; |
5187 | u16 cnt, val, tmp1; | 8228 | u16 cnt, val, tmp1; |
5188 | struct bnx2x *bp = params->bp; | 8229 | struct bnx2x *bp = params->bp; |
8230 | |||
8231 | /* SPF+ PHY: Set flag to check for Tx error */ | ||
8232 | vars->phy_flags = PHY_TX_ERROR_CHECK_FLAG; | ||
8233 | |||
5189 | bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, | 8234 | bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, |
5190 | MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port); | 8235 | MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port); |
5191 | /* HW reset */ | 8236 | /* HW reset */ |
@@ -5228,7 +8273,11 @@ static u8 bnx2x_8706_config_init(struct bnx2x_phy *phy, | |||
5228 | MDIO_PMA_DEVAD, | 8273 | MDIO_PMA_DEVAD, |
5229 | MDIO_PMA_REG_DIGITAL_CTRL, 0x400); | 8274 | MDIO_PMA_REG_DIGITAL_CTRL, 0x400); |
5230 | bnx2x_cl45_write(bp, phy, | 8275 | bnx2x_cl45_write(bp, phy, |
5231 | MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_CTRL, 1); | 8276 | MDIO_PMA_DEVAD, MDIO_PMA_LASI_TXCTRL, |
8277 | 0); | ||
8278 | /* Arm LASI for link and Tx fault. */ | ||
8279 | bnx2x_cl45_write(bp, phy, | ||
8280 | MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 3); | ||
5232 | } else { | 8281 | } else { |
5233 | /* Force 1Gbps using autoneg with 1G advertisement */ | 8282 | /* Force 1Gbps using autoneg with 1G advertisement */ |
5234 | 8283 | ||
@@ -5251,10 +8300,10 @@ static u8 bnx2x_8706_config_init(struct bnx2x_phy *phy, | |||
5251 | bnx2x_cl45_write(bp, phy, | 8300 | bnx2x_cl45_write(bp, phy, |
5252 | MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0x1200); | 8301 | MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0x1200); |
5253 | bnx2x_cl45_write(bp, phy, | 8302 | bnx2x_cl45_write(bp, phy, |
5254 | MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_ALARM_CTRL, | 8303 | MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL, |
5255 | 0x0400); | 8304 | 0x0400); |
5256 | bnx2x_cl45_write(bp, phy, | 8305 | bnx2x_cl45_write(bp, phy, |
5257 | MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_CTRL, | 8306 | MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, |
5258 | 0x0004); | 8307 | 0x0004); |
5259 | } | 8308 | } |
5260 | bnx2x_save_bcm_spirom_ver(bp, phy, params->port); | 8309 | bnx2x_save_bcm_spirom_ver(bp, phy, params->port); |
@@ -5281,9 +8330,9 @@ static u8 bnx2x_8706_config_init(struct bnx2x_phy *phy, | |||
5281 | return 0; | 8330 | return 0; |
5282 | } | 8331 | } |
5283 | 8332 | ||
5284 | static u8 bnx2x_8706_read_status(struct bnx2x_phy *phy, | 8333 | static int bnx2x_8706_read_status(struct bnx2x_phy *phy, |
5285 | struct link_params *params, | 8334 | struct link_params *params, |
5286 | struct link_vars *vars) | 8335 | struct link_vars *vars) |
5287 | { | 8336 | { |
5288 | return bnx2x_8706_8726_read_status(phy, params, vars); | 8337 | return bnx2x_8706_8726_read_status(phy, params, vars); |
5289 | } | 8338 | } |
@@ -5358,15 +8407,16 @@ static u8 bnx2x_8726_read_status(struct bnx2x_phy *phy, | |||
5358 | } | 8407 | } |
5359 | 8408 | ||
5360 | 8409 | ||
5361 | static u8 bnx2x_8726_config_init(struct bnx2x_phy *phy, | 8410 | static int bnx2x_8726_config_init(struct bnx2x_phy *phy, |
5362 | struct link_params *params, | 8411 | struct link_params *params, |
5363 | struct link_vars *vars) | 8412 | struct link_vars *vars) |
5364 | { | 8413 | { |
5365 | struct bnx2x *bp = params->bp; | 8414 | struct bnx2x *bp = params->bp; |
5366 | u32 val; | ||
5367 | u32 swap_val, swap_override, aeu_gpio_mask, offset; | ||
5368 | DP(NETIF_MSG_LINK, "Initializing BCM8726\n"); | 8415 | DP(NETIF_MSG_LINK, "Initializing BCM8726\n"); |
5369 | 8416 | ||
8417 | /* SPF+ PHY: Set flag to check for Tx error */ | ||
8418 | vars->phy_flags = PHY_TX_ERROR_CHECK_FLAG; | ||
8419 | |||
5370 | bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1<<15); | 8420 | bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1<<15); |
5371 | bnx2x_wait_reset_complete(bp, phy, params); | 8421 | bnx2x_wait_reset_complete(bp, phy, params); |
5372 | 8422 | ||
@@ -5387,9 +8437,9 @@ static u8 bnx2x_8726_config_init(struct bnx2x_phy *phy, | |||
5387 | bnx2x_cl45_write(bp, phy, | 8437 | bnx2x_cl45_write(bp, phy, |
5388 | MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, 0xD); | 8438 | MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, 0xD); |
5389 | bnx2x_cl45_write(bp, phy, | 8439 | bnx2x_cl45_write(bp, phy, |
5390 | MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_CTRL, 0x5); | 8440 | MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 0x5); |
5391 | bnx2x_cl45_write(bp, phy, | 8441 | bnx2x_cl45_write(bp, phy, |
5392 | MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_ALARM_CTRL, | 8442 | MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL, |
5393 | 0x400); | 8443 | 0x400); |
5394 | } else if ((phy->req_line_speed == SPEED_AUTO_NEG) && | 8444 | } else if ((phy->req_line_speed == SPEED_AUTO_NEG) && |
5395 | (phy->speed_cap_mask & | 8445 | (phy->speed_cap_mask & |
@@ -5415,14 +8465,14 @@ static u8 bnx2x_8726_config_init(struct bnx2x_phy *phy, | |||
5415 | * change | 8465 | * change |
5416 | */ | 8466 | */ |
5417 | bnx2x_cl45_write(bp, phy, | 8467 | bnx2x_cl45_write(bp, phy, |
5418 | MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_CTRL, 0x4); | 8468 | MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 0x4); |
5419 | bnx2x_cl45_write(bp, phy, | 8469 | bnx2x_cl45_write(bp, phy, |
5420 | MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_ALARM_CTRL, | 8470 | MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL, |
5421 | 0x400); | 8471 | 0x400); |
5422 | 8472 | ||
5423 | } else { /* Default 10G. Set only LASI control */ | 8473 | } else { /* Default 10G. Set only LASI control */ |
5424 | bnx2x_cl45_write(bp, phy, | 8474 | bnx2x_cl45_write(bp, phy, |
5425 | MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_CTRL, 1); | 8475 | MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 1); |
5426 | } | 8476 | } |
5427 | 8477 | ||
5428 | /* Set TX PreEmphasis if needed */ | 8478 | /* Set TX PreEmphasis if needed */ |
@@ -5443,30 +8493,6 @@ static u8 bnx2x_8726_config_init(struct bnx2x_phy *phy, | |||
5443 | phy->tx_preemphasis[1]); | 8493 | phy->tx_preemphasis[1]); |
5444 | } | 8494 | } |
5445 | 8495 | ||
5446 | /* Set GPIO3 to trigger SFP+ module insertion/removal */ | ||
5447 | bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3, | ||
5448 | MISC_REGISTERS_GPIO_INPUT_HI_Z, params->port); | ||
5449 | |||
5450 | /* The GPIO should be swapped if the swap register is set and active */ | ||
5451 | swap_val = REG_RD(bp, NIG_REG_PORT_SWAP); | ||
5452 | swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE); | ||
5453 | |||
5454 | /* Select function upon port-swap configuration */ | ||
5455 | if (params->port == 0) { | ||
5456 | offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0; | ||
5457 | aeu_gpio_mask = (swap_val && swap_override) ? | ||
5458 | AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 : | ||
5459 | AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0; | ||
5460 | } else { | ||
5461 | offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0; | ||
5462 | aeu_gpio_mask = (swap_val && swap_override) ? | ||
5463 | AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 : | ||
5464 | AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1; | ||
5465 | } | ||
5466 | val = REG_RD(bp, offset); | ||
5467 | /* add GPIO3 to group */ | ||
5468 | val |= aeu_gpio_mask; | ||
5469 | REG_WR(bp, offset, val); | ||
5470 | return 0; | 8496 | return 0; |
5471 | 8497 | ||
5472 | } | 8498 | } |
@@ -5548,9 +8574,9 @@ static void bnx2x_8727_hw_reset(struct bnx2x_phy *phy, | |||
5548 | MISC_REGISTERS_GPIO_OUTPUT_LOW, port); | 8574 | MISC_REGISTERS_GPIO_OUTPUT_LOW, port); |
5549 | } | 8575 | } |
5550 | 8576 | ||
5551 | static u8 bnx2x_8727_config_init(struct bnx2x_phy *phy, | 8577 | static int bnx2x_8727_config_init(struct bnx2x_phy *phy, |
5552 | struct link_params *params, | 8578 | struct link_params *params, |
5553 | struct link_vars *vars) | 8579 | struct link_vars *vars) |
5554 | { | 8580 | { |
5555 | u32 tx_en_mode; | 8581 | u32 tx_en_mode; |
5556 | u16 tmp1, val, mod_abs, tmp2; | 8582 | u16 tmp1, val, mod_abs, tmp2; |
@@ -5559,18 +8585,24 @@ static u8 bnx2x_8727_config_init(struct bnx2x_phy *phy, | |||
5559 | struct bnx2x *bp = params->bp; | 8585 | struct bnx2x *bp = params->bp; |
5560 | /* Enable PMD link, MOD_ABS_FLT, and 1G link alarm */ | 8586 | /* Enable PMD link, MOD_ABS_FLT, and 1G link alarm */ |
5561 | 8587 | ||
8588 | /* SPF+ PHY: Set flag to check for Tx error */ | ||
8589 | vars->phy_flags = PHY_TX_ERROR_CHECK_FLAG; | ||
8590 | |||
5562 | bnx2x_wait_reset_complete(bp, phy, params); | 8591 | bnx2x_wait_reset_complete(bp, phy, params); |
5563 | rx_alarm_ctrl_val = (1<<2) | (1<<5) ; | 8592 | rx_alarm_ctrl_val = (1<<2) | (1<<5) ; |
5564 | lasi_ctrl_val = 0x0004; | 8593 | /* Should be 0x6 to enable XS on Tx side. */ |
8594 | lasi_ctrl_val = 0x0006; | ||
5565 | 8595 | ||
5566 | DP(NETIF_MSG_LINK, "Initializing BCM8727\n"); | 8596 | DP(NETIF_MSG_LINK, "Initializing BCM8727\n"); |
5567 | /* enable LASI */ | 8597 | /* enable LASI */ |
5568 | bnx2x_cl45_write(bp, phy, | 8598 | bnx2x_cl45_write(bp, phy, |
5569 | MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_ALARM_CTRL, | 8599 | MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL, |
5570 | rx_alarm_ctrl_val); | 8600 | rx_alarm_ctrl_val); |
5571 | |||
5572 | bnx2x_cl45_write(bp, phy, | 8601 | bnx2x_cl45_write(bp, phy, |
5573 | MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_CTRL, lasi_ctrl_val); | 8602 | MDIO_PMA_DEVAD, MDIO_PMA_LASI_TXCTRL, |
8603 | 0); | ||
8604 | bnx2x_cl45_write(bp, phy, | ||
8605 | MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, lasi_ctrl_val); | ||
5574 | 8606 | ||
5575 | /* | 8607 | /* |
5576 | * Initially configure MOD_ABS to interrupt when module is | 8608 | * Initially configure MOD_ABS to interrupt when module is |
@@ -5590,6 +8622,9 @@ static u8 bnx2x_8727_config_init(struct bnx2x_phy *phy, | |||
5590 | MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER, mod_abs); | 8622 | MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER, mod_abs); |
5591 | 8623 | ||
5592 | 8624 | ||
8625 | /* Enable/Disable PHY transmitter output */ | ||
8626 | bnx2x_set_disable_pmd_transmit(params, phy, 0); | ||
8627 | |||
5593 | /* Make MOD_ABS give interrupt on change */ | 8628 | /* Make MOD_ABS give interrupt on change */ |
5594 | bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_PCS_OPT_CTRL, | 8629 | bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_PCS_OPT_CTRL, |
5595 | &val); | 8630 | &val); |
@@ -5612,7 +8647,7 @@ static u8 bnx2x_8727_config_init(struct bnx2x_phy *phy, | |||
5612 | MDIO_PMA_DEVAD, MDIO_PMA_REG_M8051_MSGOUT_REG, &tmp1); | 8647 | MDIO_PMA_DEVAD, MDIO_PMA_REG_M8051_MSGOUT_REG, &tmp1); |
5613 | 8648 | ||
5614 | bnx2x_cl45_read(bp, phy, | 8649 | bnx2x_cl45_read(bp, phy, |
5615 | MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_ALARM, &tmp1); | 8650 | MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXSTAT, &tmp1); |
5616 | 8651 | ||
5617 | /* Set option 1G speed */ | 8652 | /* Set option 1G speed */ |
5618 | if (phy->req_line_speed == SPEED_1000) { | 8653 | if (phy->req_line_speed == SPEED_1000) { |
@@ -5730,7 +8765,7 @@ static void bnx2x_8727_handle_mod_abs(struct bnx2x_phy *phy, | |||
5730 | /* Module is absent */ | 8765 | /* Module is absent */ |
5731 | DP(NETIF_MSG_LINK, "MOD_ABS indication " | 8766 | DP(NETIF_MSG_LINK, "MOD_ABS indication " |
5732 | "show module is absent\n"); | 8767 | "show module is absent\n"); |
5733 | 8768 | phy->media_type = ETH_PHY_NOT_PRESENT; | |
5734 | /* | 8769 | /* |
5735 | * 1. Set mod_abs to detect next module | 8770 | * 1. Set mod_abs to detect next module |
5736 | * presence event | 8771 | * presence event |
@@ -5752,7 +8787,7 @@ static void bnx2x_8727_handle_mod_abs(struct bnx2x_phy *phy, | |||
5752 | */ | 8787 | */ |
5753 | bnx2x_cl45_read(bp, phy, | 8788 | bnx2x_cl45_read(bp, phy, |
5754 | MDIO_PMA_DEVAD, | 8789 | MDIO_PMA_DEVAD, |
5755 | MDIO_PMA_REG_RX_ALARM, &rx_alarm_status); | 8790 | MDIO_PMA_LASI_RXSTAT, &rx_alarm_status); |
5756 | 8791 | ||
5757 | } else { | 8792 | } else { |
5758 | /* Module is present */ | 8793 | /* Module is present */ |
@@ -5781,7 +8816,7 @@ static void bnx2x_8727_handle_mod_abs(struct bnx2x_phy *phy, | |||
5781 | */ | 8816 | */ |
5782 | bnx2x_cl45_read(bp, phy, | 8817 | bnx2x_cl45_read(bp, phy, |
5783 | MDIO_PMA_DEVAD, | 8818 | MDIO_PMA_DEVAD, |
5784 | MDIO_PMA_REG_RX_ALARM, &rx_alarm_status); | 8819 | MDIO_PMA_LASI_RXSTAT, &rx_alarm_status); |
5785 | 8820 | ||
5786 | 8821 | ||
5787 | if ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) == | 8822 | if ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) == |
@@ -5805,26 +8840,29 @@ static u8 bnx2x_8727_read_status(struct bnx2x_phy *phy, | |||
5805 | 8840 | ||
5806 | { | 8841 | { |
5807 | struct bnx2x *bp = params->bp; | 8842 | struct bnx2x *bp = params->bp; |
5808 | u8 link_up = 0; | 8843 | u8 link_up = 0, oc_port = params->port; |
5809 | u16 link_status = 0; | 8844 | u16 link_status = 0; |
5810 | u16 rx_alarm_status, lasi_ctrl, val1; | 8845 | u16 rx_alarm_status, lasi_ctrl, val1; |
5811 | 8846 | ||
5812 | /* If PHY is not initialized, do not check link status */ | 8847 | /* If PHY is not initialized, do not check link status */ |
5813 | bnx2x_cl45_read(bp, phy, | 8848 | bnx2x_cl45_read(bp, phy, |
5814 | MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_CTRL, | 8849 | MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, |
5815 | &lasi_ctrl); | 8850 | &lasi_ctrl); |
5816 | if (!lasi_ctrl) | 8851 | if (!lasi_ctrl) |
5817 | return 0; | 8852 | return 0; |
5818 | 8853 | ||
5819 | /* Check the LASI */ | 8854 | /* Check the LASI on Rx */ |
5820 | bnx2x_cl45_read(bp, phy, | 8855 | bnx2x_cl45_read(bp, phy, |
5821 | MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_ALARM, | 8856 | MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXSTAT, |
5822 | &rx_alarm_status); | 8857 | &rx_alarm_status); |
5823 | vars->line_speed = 0; | 8858 | vars->line_speed = 0; |
5824 | DP(NETIF_MSG_LINK, "8727 RX_ALARM_STATUS 0x%x\n", rx_alarm_status); | 8859 | DP(NETIF_MSG_LINK, "8727 RX_ALARM_STATUS 0x%x\n", rx_alarm_status); |
5825 | 8860 | ||
8861 | bnx2x_sfp_mask_fault(bp, phy, MDIO_PMA_LASI_TXSTAT, | ||
8862 | MDIO_PMA_LASI_TXCTRL); | ||
8863 | |||
5826 | bnx2x_cl45_read(bp, phy, | 8864 | bnx2x_cl45_read(bp, phy, |
5827 | MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_STATUS, &val1); | 8865 | MDIO_PMA_DEVAD, MDIO_PMA_LASI_STAT, &val1); |
5828 | 8866 | ||
5829 | DP(NETIF_MSG_LINK, "8727 LASI status 0x%x\n", val1); | 8867 | DP(NETIF_MSG_LINK, "8727 LASI status 0x%x\n", val1); |
5830 | 8868 | ||
@@ -5843,8 +8881,10 @@ static u8 bnx2x_8727_read_status(struct bnx2x_phy *phy, | |||
5843 | &val1); | 8881 | &val1); |
5844 | 8882 | ||
5845 | if ((val1 & (1<<8)) == 0) { | 8883 | if ((val1 & (1<<8)) == 0) { |
8884 | if (!CHIP_IS_E1x(bp)) | ||
8885 | oc_port = BP_PATH(bp) + (params->port << 1); | ||
5846 | DP(NETIF_MSG_LINK, "8727 Power fault has been detected" | 8886 | DP(NETIF_MSG_LINK, "8727 Power fault has been detected" |
5847 | " on port %d\n", params->port); | 8887 | " on port %d\n", oc_port); |
5848 | netdev_err(bp->dev, "Error: Power fault on Port %d has" | 8888 | netdev_err(bp->dev, "Error: Power fault on Port %d has" |
5849 | " been detected and the power to " | 8889 | " been detected and the power to " |
5850 | "that SFP+ module has been removed" | 8890 | "that SFP+ module has been removed" |
@@ -5852,11 +8892,11 @@ static u8 bnx2x_8727_read_status(struct bnx2x_phy *phy, | |||
5852 | " Please remove the SFP+ module and" | 8892 | " Please remove the SFP+ module and" |
5853 | " restart the system to clear this" | 8893 | " restart the system to clear this" |
5854 | " error.\n", | 8894 | " error.\n", |
5855 | params->port); | 8895 | oc_port); |
5856 | /* Disable all RX_ALARMs except for mod_abs */ | 8896 | /* Disable all RX_ALARMs except for mod_abs */ |
5857 | bnx2x_cl45_write(bp, phy, | 8897 | bnx2x_cl45_write(bp, phy, |
5858 | MDIO_PMA_DEVAD, | 8898 | MDIO_PMA_DEVAD, |
5859 | MDIO_PMA_REG_RX_ALARM_CTRL, (1<<5)); | 8899 | MDIO_PMA_LASI_RXCTRL, (1<<5)); |
5860 | 8900 | ||
5861 | bnx2x_cl45_read(bp, phy, | 8901 | bnx2x_cl45_read(bp, phy, |
5862 | MDIO_PMA_DEVAD, | 8902 | MDIO_PMA_DEVAD, |
@@ -5869,7 +8909,7 @@ static u8 bnx2x_8727_read_status(struct bnx2x_phy *phy, | |||
5869 | /* Clear RX alarm */ | 8909 | /* Clear RX alarm */ |
5870 | bnx2x_cl45_read(bp, phy, | 8910 | bnx2x_cl45_read(bp, phy, |
5871 | MDIO_PMA_DEVAD, | 8911 | MDIO_PMA_DEVAD, |
5872 | MDIO_PMA_REG_RX_ALARM, &rx_alarm_status); | 8912 | MDIO_PMA_LASI_RXSTAT, &rx_alarm_status); |
5873 | return 0; | 8913 | return 0; |
5874 | } | 8914 | } |
5875 | } /* Over current check */ | 8915 | } /* Over current check */ |
@@ -5879,7 +8919,7 @@ static u8 bnx2x_8727_read_status(struct bnx2x_phy *phy, | |||
5879 | bnx2x_8727_handle_mod_abs(phy, params); | 8919 | bnx2x_8727_handle_mod_abs(phy, params); |
5880 | /* Enable all mod_abs and link detection bits */ | 8920 | /* Enable all mod_abs and link detection bits */ |
5881 | bnx2x_cl45_write(bp, phy, | 8921 | bnx2x_cl45_write(bp, phy, |
5882 | MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_ALARM_CTRL, | 8922 | MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL, |
5883 | ((1<<5) | (1<<2))); | 8923 | ((1<<5) | (1<<2))); |
5884 | } | 8924 | } |
5885 | DP(NETIF_MSG_LINK, "Enabling 8727 TX laser if SFP is approved\n"); | 8925 | DP(NETIF_MSG_LINK, "Enabling 8727 TX laser if SFP is approved\n"); |
@@ -5915,6 +8955,20 @@ static u8 bnx2x_8727_read_status(struct bnx2x_phy *phy, | |||
5915 | DP(NETIF_MSG_LINK, "port %x: External link is down\n", | 8955 | DP(NETIF_MSG_LINK, "port %x: External link is down\n", |
5916 | params->port); | 8956 | params->port); |
5917 | } | 8957 | } |
8958 | |||
8959 | /* Capture 10G link fault. */ | ||
8960 | if (vars->line_speed == SPEED_10000) { | ||
8961 | bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, | ||
8962 | MDIO_PMA_LASI_TXSTAT, &val1); | ||
8963 | |||
8964 | bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, | ||
8965 | MDIO_PMA_LASI_TXSTAT, &val1); | ||
8966 | |||
8967 | if (val1 & (1<<0)) { | ||
8968 | vars->fault_detected = 1; | ||
8969 | } | ||
8970 | } | ||
8971 | |||
5918 | if (link_up) { | 8972 | if (link_up) { |
5919 | bnx2x_ext_phy_resolve_fc(phy, params, vars); | 8973 | bnx2x_ext_phy_resolve_fc(phy, params, vars); |
5920 | vars->duplex = DUPLEX_FULL; | 8974 | vars->duplex = DUPLEX_FULL; |
@@ -5945,10 +8999,14 @@ static void bnx2x_8727_link_reset(struct bnx2x_phy *phy, | |||
5945 | struct link_params *params) | 8999 | struct link_params *params) |
5946 | { | 9000 | { |
5947 | struct bnx2x *bp = params->bp; | 9001 | struct bnx2x *bp = params->bp; |
9002 | |||
9003 | /* Enable/Disable PHY transmitter output */ | ||
9004 | bnx2x_set_disable_pmd_transmit(params, phy, 1); | ||
9005 | |||
5948 | /* Disable Transmitter */ | 9006 | /* Disable Transmitter */ |
5949 | bnx2x_sfp_set_transmitter(params, phy, 0); | 9007 | bnx2x_sfp_set_transmitter(params, phy, 0); |
5950 | /* Clear LASI */ | 9008 | /* Clear LASI */ |
5951 | bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_CTRL, 0); | 9009 | bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 0); |
5952 | 9010 | ||
5953 | } | 9011 | } |
5954 | 9012 | ||
@@ -5958,111 +9016,106 @@ static void bnx2x_8727_link_reset(struct bnx2x_phy *phy, | |||
5958 | static void bnx2x_save_848xx_spirom_version(struct bnx2x_phy *phy, | 9016 | static void bnx2x_save_848xx_spirom_version(struct bnx2x_phy *phy, |
5959 | struct link_params *params) | 9017 | struct link_params *params) |
5960 | { | 9018 | { |
5961 | u16 val, fw_ver1, fw_ver2, cnt, adj; | 9019 | u16 val, fw_ver1, fw_ver2, cnt; |
9020 | u8 port; | ||
5962 | struct bnx2x *bp = params->bp; | 9021 | struct bnx2x *bp = params->bp; |
5963 | 9022 | ||
5964 | adj = 0; | 9023 | port = params->port; |
5965 | if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) | ||
5966 | adj = -1; | ||
5967 | 9024 | ||
5968 | /* For the 32 bits registers in 848xx, access via MDIO2ARM interface.*/ | 9025 | /* For the 32 bits registers in 848xx, access via MDIO2ARM interface.*/ |
5969 | /* (1) set register 0xc200_0014(SPI_BRIDGE_CTRL_2) to 0x03000000 */ | 9026 | /* (1) set register 0xc200_0014(SPI_BRIDGE_CTRL_2) to 0x03000000 */ |
5970 | bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA819 + adj, 0x0014); | 9027 | bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA819, 0x0014); |
5971 | bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81A + adj, 0xc200); | 9028 | bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81A, 0xc200); |
5972 | bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81B + adj, 0x0000); | 9029 | bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81B, 0x0000); |
5973 | bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81C + adj, 0x0300); | 9030 | bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81C, 0x0300); |
5974 | bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA817 + adj, 0x0009); | 9031 | bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA817, 0x0009); |
5975 | 9032 | ||
5976 | for (cnt = 0; cnt < 100; cnt++) { | 9033 | for (cnt = 0; cnt < 100; cnt++) { |
5977 | bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA818 + adj, &val); | 9034 | bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA818, &val); |
5978 | if (val & 1) | 9035 | if (val & 1) |
5979 | break; | 9036 | break; |
5980 | udelay(5); | 9037 | udelay(5); |
5981 | } | 9038 | } |
5982 | if (cnt == 100) { | 9039 | if (cnt == 100) { |
5983 | DP(NETIF_MSG_LINK, "Unable to read 848xx phy fw version(1)\n"); | 9040 | DP(NETIF_MSG_LINK, "Unable to read 848xx phy fw version(1)\n"); |
5984 | bnx2x_save_spirom_version(bp, params->port, 0, | 9041 | bnx2x_save_spirom_version(bp, port, 0, |
5985 | phy->ver_addr); | 9042 | phy->ver_addr); |
5986 | return; | 9043 | return; |
5987 | } | 9044 | } |
5988 | 9045 | ||
5989 | 9046 | ||
5990 | /* 2) read register 0xc200_0000 (SPI_FW_STATUS) */ | 9047 | /* 2) read register 0xc200_0000 (SPI_FW_STATUS) */ |
5991 | bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA819 + adj, 0x0000); | 9048 | bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA819, 0x0000); |
5992 | bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81A + adj, 0xc200); | 9049 | bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81A, 0xc200); |
5993 | bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA817 + adj, 0x000A); | 9050 | bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA817, 0x000A); |
5994 | for (cnt = 0; cnt < 100; cnt++) { | 9051 | for (cnt = 0; cnt < 100; cnt++) { |
5995 | bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA818 + adj, &val); | 9052 | bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA818, &val); |
5996 | if (val & 1) | 9053 | if (val & 1) |
5997 | break; | 9054 | break; |
5998 | udelay(5); | 9055 | udelay(5); |
5999 | } | 9056 | } |
6000 | if (cnt == 100) { | 9057 | if (cnt == 100) { |
6001 | DP(NETIF_MSG_LINK, "Unable to read 848xx phy fw version(2)\n"); | 9058 | DP(NETIF_MSG_LINK, "Unable to read 848xx phy fw version(2)\n"); |
6002 | bnx2x_save_spirom_version(bp, params->port, 0, | 9059 | bnx2x_save_spirom_version(bp, port, 0, |
6003 | phy->ver_addr); | 9060 | phy->ver_addr); |
6004 | return; | 9061 | return; |
6005 | } | 9062 | } |
6006 | 9063 | ||
6007 | /* lower 16 bits of the register SPI_FW_STATUS */ | 9064 | /* lower 16 bits of the register SPI_FW_STATUS */ |
6008 | bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA81B + adj, &fw_ver1); | 9065 | bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA81B, &fw_ver1); |
6009 | /* upper 16 bits of register SPI_FW_STATUS */ | 9066 | /* upper 16 bits of register SPI_FW_STATUS */ |
6010 | bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA81C + adj, &fw_ver2); | 9067 | bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA81C, &fw_ver2); |
6011 | 9068 | ||
6012 | bnx2x_save_spirom_version(bp, params->port, (fw_ver2<<16) | fw_ver1, | 9069 | bnx2x_save_spirom_version(bp, port, (fw_ver2<<16) | fw_ver1, |
6013 | phy->ver_addr); | 9070 | phy->ver_addr); |
6014 | } | 9071 | } |
6015 | 9072 | ||
6016 | static void bnx2x_848xx_set_led(struct bnx2x *bp, | 9073 | static void bnx2x_848xx_set_led(struct bnx2x *bp, |
6017 | struct bnx2x_phy *phy) | 9074 | struct bnx2x_phy *phy) |
6018 | { | 9075 | { |
6019 | u16 val, adj; | 9076 | u16 val; |
6020 | |||
6021 | adj = 0; | ||
6022 | if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) | ||
6023 | adj = -1; | ||
6024 | 9077 | ||
6025 | /* PHYC_CTL_LED_CTL */ | 9078 | /* PHYC_CTL_LED_CTL */ |
6026 | bnx2x_cl45_read(bp, phy, | 9079 | bnx2x_cl45_read(bp, phy, |
6027 | MDIO_PMA_DEVAD, | 9080 | MDIO_PMA_DEVAD, |
6028 | MDIO_PMA_REG_8481_LINK_SIGNAL + adj, &val); | 9081 | MDIO_PMA_REG_8481_LINK_SIGNAL, &val); |
6029 | val &= 0xFE00; | 9082 | val &= 0xFE00; |
6030 | val |= 0x0092; | 9083 | val |= 0x0092; |
6031 | 9084 | ||
6032 | bnx2x_cl45_write(bp, phy, | 9085 | bnx2x_cl45_write(bp, phy, |
6033 | MDIO_PMA_DEVAD, | 9086 | MDIO_PMA_DEVAD, |
6034 | MDIO_PMA_REG_8481_LINK_SIGNAL + adj, val); | 9087 | MDIO_PMA_REG_8481_LINK_SIGNAL, val); |
6035 | 9088 | ||
6036 | bnx2x_cl45_write(bp, phy, | 9089 | bnx2x_cl45_write(bp, phy, |
6037 | MDIO_PMA_DEVAD, | 9090 | MDIO_PMA_DEVAD, |
6038 | MDIO_PMA_REG_8481_LED1_MASK + adj, | 9091 | MDIO_PMA_REG_8481_LED1_MASK, |
6039 | 0x80); | 9092 | 0x80); |
6040 | 9093 | ||
6041 | bnx2x_cl45_write(bp, phy, | 9094 | bnx2x_cl45_write(bp, phy, |
6042 | MDIO_PMA_DEVAD, | 9095 | MDIO_PMA_DEVAD, |
6043 | MDIO_PMA_REG_8481_LED2_MASK + adj, | 9096 | MDIO_PMA_REG_8481_LED2_MASK, |
6044 | 0x18); | 9097 | 0x18); |
6045 | 9098 | ||
6046 | /* Select activity source by Tx and Rx, as suggested by PHY AE */ | 9099 | /* Select activity source by Tx and Rx, as suggested by PHY AE */ |
6047 | bnx2x_cl45_write(bp, phy, | 9100 | bnx2x_cl45_write(bp, phy, |
6048 | MDIO_PMA_DEVAD, | 9101 | MDIO_PMA_DEVAD, |
6049 | MDIO_PMA_REG_8481_LED3_MASK + adj, | 9102 | MDIO_PMA_REG_8481_LED3_MASK, |
6050 | 0x0006); | 9103 | 0x0006); |
6051 | 9104 | ||
6052 | /* Select the closest activity blink rate to that in 10/100/1000 */ | 9105 | /* Select the closest activity blink rate to that in 10/100/1000 */ |
6053 | bnx2x_cl45_write(bp, phy, | 9106 | bnx2x_cl45_write(bp, phy, |
6054 | MDIO_PMA_DEVAD, | 9107 | MDIO_PMA_DEVAD, |
6055 | MDIO_PMA_REG_8481_LED3_BLINK + adj, | 9108 | MDIO_PMA_REG_8481_LED3_BLINK, |
6056 | 0); | 9109 | 0); |
6057 | 9110 | ||
6058 | bnx2x_cl45_read(bp, phy, | 9111 | bnx2x_cl45_read(bp, phy, |
6059 | MDIO_PMA_DEVAD, | 9112 | MDIO_PMA_DEVAD, |
6060 | MDIO_PMA_REG_84823_CTL_LED_CTL_1 + adj, &val); | 9113 | MDIO_PMA_REG_84823_CTL_LED_CTL_1, &val); |
6061 | val |= MDIO_PMA_REG_84823_LED3_STRETCH_EN; /* stretch_en for LED3*/ | 9114 | val |= MDIO_PMA_REG_84823_LED3_STRETCH_EN; /* stretch_en for LED3*/ |
6062 | 9115 | ||
6063 | bnx2x_cl45_write(bp, phy, | 9116 | bnx2x_cl45_write(bp, phy, |
6064 | MDIO_PMA_DEVAD, | 9117 | MDIO_PMA_DEVAD, |
6065 | MDIO_PMA_REG_84823_CTL_LED_CTL_1 + adj, val); | 9118 | MDIO_PMA_REG_84823_CTL_LED_CTL_1, val); |
6066 | 9119 | ||
6067 | /* 'Interrupt Mask' */ | 9120 | /* 'Interrupt Mask' */ |
6068 | bnx2x_cl45_write(bp, phy, | 9121 | bnx2x_cl45_write(bp, phy, |
@@ -6070,12 +9123,19 @@ static void bnx2x_848xx_set_led(struct bnx2x *bp, | |||
6070 | 0xFFFB, 0xFFFD); | 9123 | 0xFFFB, 0xFFFD); |
6071 | } | 9124 | } |
6072 | 9125 | ||
6073 | static u8 bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy, | 9126 | static int bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy, |
6074 | struct link_params *params, | 9127 | struct link_params *params, |
6075 | struct link_vars *vars) | 9128 | struct link_vars *vars) |
6076 | { | 9129 | { |
6077 | struct bnx2x *bp = params->bp; | 9130 | struct bnx2x *bp = params->bp; |
6078 | u16 autoneg_val, an_1000_val, an_10_100_val; | 9131 | u16 autoneg_val, an_1000_val, an_10_100_val; |
9132 | u16 tmp_req_line_speed; | ||
9133 | |||
9134 | tmp_req_line_speed = phy->req_line_speed; | ||
9135 | if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) | ||
9136 | if (phy->req_line_speed == SPEED_10000) | ||
9137 | phy->req_line_speed = SPEED_AUTO_NEG; | ||
9138 | |||
6079 | /* | 9139 | /* |
6080 | * This phy uses the NIG latch mechanism since link indication | 9140 | * This phy uses the NIG latch mechanism since link indication |
6081 | * arrives through its LED4 and not via its LASI signal, so we | 9141 | * arrives through its LED4 and not via its LASI signal, so we |
@@ -6122,11 +9182,14 @@ static u8 bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy, | |||
6122 | MDIO_AN_DEVAD, MDIO_AN_REG_8481_1000T_CTRL, | 9182 | MDIO_AN_DEVAD, MDIO_AN_REG_8481_1000T_CTRL, |
6123 | an_1000_val); | 9183 | an_1000_val); |
6124 | 9184 | ||
6125 | /* set 10 speed advertisement */ | 9185 | /* set 100 speed advertisement */ |
6126 | if (((phy->req_line_speed == SPEED_AUTO_NEG) && | 9186 | if (((phy->req_line_speed == SPEED_AUTO_NEG) && |
6127 | (phy->speed_cap_mask & | 9187 | (phy->speed_cap_mask & |
6128 | (PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL | | 9188 | (PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL | |
6129 | PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF)))) { | 9189 | PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF)) && |
9190 | (phy->supported & | ||
9191 | (SUPPORTED_100baseT_Half | | ||
9192 | SUPPORTED_100baseT_Full)))) { | ||
6130 | an_10_100_val |= (1<<7); | 9193 | an_10_100_val |= (1<<7); |
6131 | /* Enable autoneg and restart autoneg for legacy speeds */ | 9194 | /* Enable autoneg and restart autoneg for legacy speeds */ |
6132 | autoneg_val |= (1<<9 | 1<<12); | 9195 | autoneg_val |= (1<<9 | 1<<12); |
@@ -6137,9 +9200,12 @@ static u8 bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy, | |||
6137 | } | 9200 | } |
6138 | /* set 10 speed advertisement */ | 9201 | /* set 10 speed advertisement */ |
6139 | if (((phy->req_line_speed == SPEED_AUTO_NEG) && | 9202 | if (((phy->req_line_speed == SPEED_AUTO_NEG) && |
6140 | (phy->speed_cap_mask & | 9203 | (phy->speed_cap_mask & |
6141 | (PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL | | 9204 | (PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL | |
6142 | PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF)))) { | 9205 | PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF)) && |
9206 | (phy->supported & | ||
9207 | (SUPPORTED_10baseT_Half | | ||
9208 | SUPPORTED_10baseT_Full)))) { | ||
6143 | an_10_100_val |= (1<<5); | 9209 | an_10_100_val |= (1<<5); |
6144 | autoneg_val |= (1<<9 | 1<<12); | 9210 | autoneg_val |= (1<<9 | 1<<12); |
6145 | if (phy->req_duplex == DUPLEX_FULL) | 9211 | if (phy->req_duplex == DUPLEX_FULL) |
@@ -6148,7 +9214,10 @@ static u8 bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy, | |||
6148 | } | 9214 | } |
6149 | 9215 | ||
6150 | /* Only 10/100 are allowed to work in FORCE mode */ | 9216 | /* Only 10/100 are allowed to work in FORCE mode */ |
6151 | if (phy->req_line_speed == SPEED_100) { | 9217 | if ((phy->req_line_speed == SPEED_100) && |
9218 | (phy->supported & | ||
9219 | (SUPPORTED_100baseT_Half | | ||
9220 | SUPPORTED_100baseT_Full))) { | ||
6152 | autoneg_val |= (1<<13); | 9221 | autoneg_val |= (1<<13); |
6153 | /* Enabled AUTO-MDIX when autoneg is disabled */ | 9222 | /* Enabled AUTO-MDIX when autoneg is disabled */ |
6154 | bnx2x_cl45_write(bp, phy, | 9223 | bnx2x_cl45_write(bp, phy, |
@@ -6156,7 +9225,10 @@ static u8 bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy, | |||
6156 | (1<<15 | 1<<9 | 7<<0)); | 9225 | (1<<15 | 1<<9 | 7<<0)); |
6157 | DP(NETIF_MSG_LINK, "Setting 100M force\n"); | 9226 | DP(NETIF_MSG_LINK, "Setting 100M force\n"); |
6158 | } | 9227 | } |
6159 | if (phy->req_line_speed == SPEED_10) { | 9228 | if ((phy->req_line_speed == SPEED_10) && |
9229 | (phy->supported & | ||
9230 | (SUPPORTED_10baseT_Half | | ||
9231 | SUPPORTED_10baseT_Full))) { | ||
6160 | /* Enabled AUTO-MDIX when autoneg is disabled */ | 9232 | /* Enabled AUTO-MDIX when autoneg is disabled */ |
6161 | bnx2x_cl45_write(bp, phy, | 9233 | bnx2x_cl45_write(bp, phy, |
6162 | MDIO_AN_DEVAD, MDIO_AN_REG_8481_AUX_CTRL, | 9234 | MDIO_AN_DEVAD, MDIO_AN_REG_8481_AUX_CTRL, |
@@ -6179,10 +9251,10 @@ static u8 bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy, | |||
6179 | (phy->speed_cap_mask & | 9251 | (phy->speed_cap_mask & |
6180 | PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) || | 9252 | PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) || |
6181 | (phy->req_line_speed == SPEED_10000)) { | 9253 | (phy->req_line_speed == SPEED_10000)) { |
6182 | DP(NETIF_MSG_LINK, "Advertising 10G\n"); | 9254 | DP(NETIF_MSG_LINK, "Advertising 10G\n"); |
6183 | /* Restart autoneg for 10G*/ | 9255 | /* Restart autoneg for 10G*/ |
6184 | 9256 | ||
6185 | bnx2x_cl45_write(bp, phy, | 9257 | bnx2x_cl45_write(bp, phy, |
6186 | MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, | 9258 | MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, |
6187 | 0x3200); | 9259 | 0x3200); |
6188 | } else if (phy->req_line_speed != SPEED_10 && | 9260 | } else if (phy->req_line_speed != SPEED_10 && |
@@ -6195,12 +9267,14 @@ static u8 bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy, | |||
6195 | /* Save spirom version */ | 9267 | /* Save spirom version */ |
6196 | bnx2x_save_848xx_spirom_version(phy, params); | 9268 | bnx2x_save_848xx_spirom_version(phy, params); |
6197 | 9269 | ||
9270 | phy->req_line_speed = tmp_req_line_speed; | ||
9271 | |||
6198 | return 0; | 9272 | return 0; |
6199 | } | 9273 | } |
6200 | 9274 | ||
6201 | static u8 bnx2x_8481_config_init(struct bnx2x_phy *phy, | 9275 | static int bnx2x_8481_config_init(struct bnx2x_phy *phy, |
6202 | struct link_params *params, | 9276 | struct link_params *params, |
6203 | struct link_vars *vars) | 9277 | struct link_vars *vars) |
6204 | { | 9278 | { |
6205 | struct bnx2x *bp = params->bp; | 9279 | struct bnx2x *bp = params->bp; |
6206 | /* Restore normal power mode*/ | 9280 | /* Restore normal power mode*/ |
@@ -6215,33 +9289,200 @@ static u8 bnx2x_8481_config_init(struct bnx2x_phy *phy, | |||
6215 | return bnx2x_848xx_cmn_config_init(phy, params, vars); | 9289 | return bnx2x_848xx_cmn_config_init(phy, params, vars); |
6216 | } | 9290 | } |
6217 | 9291 | ||
6218 | static u8 bnx2x_848x3_config_init(struct bnx2x_phy *phy, | 9292 | |
6219 | struct link_params *params, | 9293 | #define PHY84833_HDSHK_WAIT 300 |
6220 | struct link_vars *vars) | 9294 | static int bnx2x_84833_pair_swap_cfg(struct bnx2x_phy *phy, |
9295 | struct link_params *params, | ||
9296 | struct link_vars *vars) | ||
9297 | { | ||
9298 | u32 idx; | ||
9299 | u32 pair_swap; | ||
9300 | u16 val; | ||
9301 | u16 data; | ||
9302 | struct bnx2x *bp = params->bp; | ||
9303 | /* Do pair swap */ | ||
9304 | |||
9305 | /* Check for configuration. */ | ||
9306 | pair_swap = REG_RD(bp, params->shmem_base + | ||
9307 | offsetof(struct shmem_region, | ||
9308 | dev_info.port_hw_config[params->port].xgbt_phy_cfg)) & | ||
9309 | PORT_HW_CFG_RJ45_PAIR_SWAP_MASK; | ||
9310 | |||
9311 | if (pair_swap == 0) | ||
9312 | return 0; | ||
9313 | |||
9314 | data = (u16)pair_swap; | ||
9315 | |||
9316 | /* Write CMD_OPEN_OVERRIDE to STATUS reg */ | ||
9317 | bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, | ||
9318 | MDIO_84833_TOP_CFG_SCRATCH_REG2, | ||
9319 | PHY84833_CMD_OPEN_OVERRIDE); | ||
9320 | for (idx = 0; idx < PHY84833_HDSHK_WAIT; idx++) { | ||
9321 | bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, | ||
9322 | MDIO_84833_TOP_CFG_SCRATCH_REG2, &val); | ||
9323 | if (val == PHY84833_CMD_OPEN_FOR_CMDS) | ||
9324 | break; | ||
9325 | msleep(1); | ||
9326 | } | ||
9327 | if (idx >= PHY84833_HDSHK_WAIT) { | ||
9328 | DP(NETIF_MSG_LINK, "Pairswap: FW not ready.\n"); | ||
9329 | return -EINVAL; | ||
9330 | } | ||
9331 | |||
9332 | bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, | ||
9333 | MDIO_84833_TOP_CFG_SCRATCH_REG4, | ||
9334 | data); | ||
9335 | /* Issue pair swap command */ | ||
9336 | bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, | ||
9337 | MDIO_84833_TOP_CFG_SCRATCH_REG0, | ||
9338 | PHY84833_DIAG_CMD_PAIR_SWAP_CHANGE); | ||
9339 | for (idx = 0; idx < PHY84833_HDSHK_WAIT; idx++) { | ||
9340 | bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, | ||
9341 | MDIO_84833_TOP_CFG_SCRATCH_REG2, &val); | ||
9342 | if ((val == PHY84833_CMD_COMPLETE_PASS) || | ||
9343 | (val == PHY84833_CMD_COMPLETE_ERROR)) | ||
9344 | break; | ||
9345 | msleep(1); | ||
9346 | } | ||
9347 | if ((idx >= PHY84833_HDSHK_WAIT) || | ||
9348 | (val == PHY84833_CMD_COMPLETE_ERROR)) { | ||
9349 | DP(NETIF_MSG_LINK, "Pairswap: override failed.\n"); | ||
9350 | return -EINVAL; | ||
9351 | } | ||
9352 | bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, | ||
9353 | MDIO_84833_TOP_CFG_SCRATCH_REG2, | ||
9354 | PHY84833_CMD_CLEAR_COMPLETE); | ||
9355 | DP(NETIF_MSG_LINK, "Pairswap OK, val=0x%x\n", data); | ||
9356 | return 0; | ||
9357 | } | ||
9358 | |||
9359 | |||
9360 | static u8 bnx2x_84833_get_reset_gpios(struct bnx2x *bp, | ||
9361 | u32 shmem_base_path[], | ||
9362 | u32 chip_id) | ||
9363 | { | ||
9364 | u32 reset_pin[2]; | ||
9365 | u32 idx; | ||
9366 | u8 reset_gpios; | ||
9367 | if (CHIP_IS_E3(bp)) { | ||
9368 | /* Assume that these will be GPIOs, not EPIOs. */ | ||
9369 | for (idx = 0; idx < 2; idx++) { | ||
9370 | /* Map config param to register bit. */ | ||
9371 | reset_pin[idx] = REG_RD(bp, shmem_base_path[idx] + | ||
9372 | offsetof(struct shmem_region, | ||
9373 | dev_info.port_hw_config[0].e3_cmn_pin_cfg)); | ||
9374 | reset_pin[idx] = (reset_pin[idx] & | ||
9375 | PORT_HW_CFG_E3_PHY_RESET_MASK) >> | ||
9376 | PORT_HW_CFG_E3_PHY_RESET_SHIFT; | ||
9377 | reset_pin[idx] -= PIN_CFG_GPIO0_P0; | ||
9378 | reset_pin[idx] = (1 << reset_pin[idx]); | ||
9379 | } | ||
9380 | reset_gpios = (u8)(reset_pin[0] | reset_pin[1]); | ||
9381 | } else { | ||
9382 | /* E2, look from diff place of shmem. */ | ||
9383 | for (idx = 0; idx < 2; idx++) { | ||
9384 | reset_pin[idx] = REG_RD(bp, shmem_base_path[idx] + | ||
9385 | offsetof(struct shmem_region, | ||
9386 | dev_info.port_hw_config[0].default_cfg)); | ||
9387 | reset_pin[idx] &= PORT_HW_CFG_EXT_PHY_GPIO_RST_MASK; | ||
9388 | reset_pin[idx] -= PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO0_P0; | ||
9389 | reset_pin[idx] >>= PORT_HW_CFG_EXT_PHY_GPIO_RST_SHIFT; | ||
9390 | reset_pin[idx] = (1 << reset_pin[idx]); | ||
9391 | } | ||
9392 | reset_gpios = (u8)(reset_pin[0] | reset_pin[1]); | ||
9393 | } | ||
9394 | |||
9395 | return reset_gpios; | ||
9396 | } | ||
9397 | |||
9398 | static int bnx2x_84833_hw_reset_phy(struct bnx2x_phy *phy, | ||
9399 | struct link_params *params) | ||
9400 | { | ||
9401 | struct bnx2x *bp = params->bp; | ||
9402 | u8 reset_gpios; | ||
9403 | u32 other_shmem_base_addr = REG_RD(bp, params->shmem2_base + | ||
9404 | offsetof(struct shmem2_region, | ||
9405 | other_shmem_base_addr)); | ||
9406 | |||
9407 | u32 shmem_base_path[2]; | ||
9408 | shmem_base_path[0] = params->shmem_base; | ||
9409 | shmem_base_path[1] = other_shmem_base_addr; | ||
9410 | |||
9411 | reset_gpios = bnx2x_84833_get_reset_gpios(bp, shmem_base_path, | ||
9412 | params->chip_id); | ||
9413 | |||
9414 | bnx2x_set_mult_gpio(bp, reset_gpios, MISC_REGISTERS_GPIO_OUTPUT_LOW); | ||
9415 | udelay(10); | ||
9416 | DP(NETIF_MSG_LINK, "84833 hw reset on pin values 0x%x\n", | ||
9417 | reset_gpios); | ||
9418 | |||
9419 | return 0; | ||
9420 | } | ||
9421 | |||
9422 | static int bnx2x_84833_common_init_phy(struct bnx2x *bp, | ||
9423 | u32 shmem_base_path[], | ||
9424 | u32 chip_id) | ||
9425 | { | ||
9426 | u8 reset_gpios; | ||
9427 | |||
9428 | reset_gpios = bnx2x_84833_get_reset_gpios(bp, shmem_base_path, chip_id); | ||
9429 | |||
9430 | bnx2x_set_mult_gpio(bp, reset_gpios, MISC_REGISTERS_GPIO_OUTPUT_LOW); | ||
9431 | udelay(10); | ||
9432 | bnx2x_set_mult_gpio(bp, reset_gpios, MISC_REGISTERS_GPIO_OUTPUT_HIGH); | ||
9433 | msleep(800); | ||
9434 | DP(NETIF_MSG_LINK, "84833 reset pulse on pin values 0x%x\n", | ||
9435 | reset_gpios); | ||
9436 | |||
9437 | return 0; | ||
9438 | } | ||
9439 | |||
9440 | #define PHY84833_CONSTANT_LATENCY 1193 | ||
9441 | static int bnx2x_848x3_config_init(struct bnx2x_phy *phy, | ||
9442 | struct link_params *params, | ||
9443 | struct link_vars *vars) | ||
6221 | { | 9444 | { |
6222 | struct bnx2x *bp = params->bp; | 9445 | struct bnx2x *bp = params->bp; |
6223 | u8 port, initialize = 1; | 9446 | u8 port, initialize = 1; |
6224 | u16 val, adj; | 9447 | u16 val; |
6225 | u16 temp; | 9448 | u16 temp; |
6226 | u32 actual_phy_selection, cms_enable; | 9449 | u32 actual_phy_selection, cms_enable, idx; |
6227 | u8 rc = 0; | 9450 | int rc = 0; |
6228 | |||
6229 | /* This is just for MDIO_CTL_REG_84823_MEDIA register. */ | ||
6230 | adj = 0; | ||
6231 | if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) | ||
6232 | adj = 3; | ||
6233 | 9451 | ||
6234 | msleep(1); | 9452 | msleep(1); |
6235 | if (CHIP_IS_E2(bp)) | 9453 | |
9454 | if (!(CHIP_IS_E1(bp))) | ||
6236 | port = BP_PATH(bp); | 9455 | port = BP_PATH(bp); |
6237 | else | 9456 | else |
6238 | port = params->port; | 9457 | port = params->port; |
6239 | bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3, | 9458 | |
6240 | MISC_REGISTERS_GPIO_OUTPUT_HIGH, | 9459 | if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823) { |
6241 | port); | 9460 | bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3, |
9461 | MISC_REGISTERS_GPIO_OUTPUT_HIGH, | ||
9462 | port); | ||
9463 | } else { | ||
9464 | /* MDIO reset */ | ||
9465 | bnx2x_cl45_write(bp, phy, | ||
9466 | MDIO_PMA_DEVAD, | ||
9467 | MDIO_PMA_REG_CTRL, 0x8000); | ||
9468 | /* Bring PHY out of super isolate mode */ | ||
9469 | bnx2x_cl45_read(bp, phy, | ||
9470 | MDIO_CTL_DEVAD, | ||
9471 | MDIO_84833_TOP_CFG_XGPHY_STRAP1, &val); | ||
9472 | val &= ~MDIO_84833_SUPER_ISOLATE; | ||
9473 | bnx2x_cl45_write(bp, phy, | ||
9474 | MDIO_CTL_DEVAD, | ||
9475 | MDIO_84833_TOP_CFG_XGPHY_STRAP1, val); | ||
9476 | } | ||
9477 | |||
6242 | bnx2x_wait_reset_complete(bp, phy, params); | 9478 | bnx2x_wait_reset_complete(bp, phy, params); |
9479 | |||
6243 | /* Wait for GPHY to come out of reset */ | 9480 | /* Wait for GPHY to come out of reset */ |
6244 | msleep(50); | 9481 | msleep(50); |
9482 | |||
9483 | if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) | ||
9484 | bnx2x_84833_pair_swap_cfg(phy, params, vars); | ||
9485 | |||
6245 | /* | 9486 | /* |
6246 | * BCM84823 requires that XGXS links up first @ 10G for normal behavior | 9487 | * BCM84823 requires that XGXS links up first @ 10G for normal behavior |
6247 | */ | 9488 | */ |
@@ -6254,14 +9495,20 @@ static u8 bnx2x_848x3_config_init(struct bnx2x_phy *phy, | |||
6254 | /* Set dual-media configuration according to configuration */ | 9495 | /* Set dual-media configuration according to configuration */ |
6255 | 9496 | ||
6256 | bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, | 9497 | bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, |
6257 | MDIO_CTL_REG_84823_MEDIA + adj, &val); | 9498 | MDIO_CTL_REG_84823_MEDIA, &val); |
6258 | val &= ~(MDIO_CTL_REG_84823_MEDIA_MAC_MASK | | 9499 | val &= ~(MDIO_CTL_REG_84823_MEDIA_MAC_MASK | |
6259 | MDIO_CTL_REG_84823_MEDIA_LINE_MASK | | 9500 | MDIO_CTL_REG_84823_MEDIA_LINE_MASK | |
6260 | MDIO_CTL_REG_84823_MEDIA_COPPER_CORE_DOWN | | 9501 | MDIO_CTL_REG_84823_MEDIA_COPPER_CORE_DOWN | |
6261 | MDIO_CTL_REG_84823_MEDIA_PRIORITY_MASK | | 9502 | MDIO_CTL_REG_84823_MEDIA_PRIORITY_MASK | |
6262 | MDIO_CTL_REG_84823_MEDIA_FIBER_1G); | 9503 | MDIO_CTL_REG_84823_MEDIA_FIBER_1G); |
6263 | val |= MDIO_CTL_REG_84823_CTRL_MAC_XFI | | 9504 | |
6264 | MDIO_CTL_REG_84823_MEDIA_LINE_XAUI_L; | 9505 | if (CHIP_IS_E3(bp)) { |
9506 | val &= ~(MDIO_CTL_REG_84823_MEDIA_MAC_MASK | | ||
9507 | MDIO_CTL_REG_84823_MEDIA_LINE_MASK); | ||
9508 | } else { | ||
9509 | val |= (MDIO_CTL_REG_84823_CTRL_MAC_XFI | | ||
9510 | MDIO_CTL_REG_84823_MEDIA_LINE_XAUI_L); | ||
9511 | } | ||
6265 | 9512 | ||
6266 | actual_phy_selection = bnx2x_phy_selection(params); | 9513 | actual_phy_selection = bnx2x_phy_selection(params); |
6267 | 9514 | ||
@@ -6287,28 +9534,90 @@ static u8 bnx2x_848x3_config_init(struct bnx2x_phy *phy, | |||
6287 | val |= MDIO_CTL_REG_84823_MEDIA_FIBER_1G; | 9534 | val |= MDIO_CTL_REG_84823_MEDIA_FIBER_1G; |
6288 | 9535 | ||
6289 | bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, | 9536 | bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, |
6290 | MDIO_CTL_REG_84823_MEDIA + adj, val); | 9537 | MDIO_CTL_REG_84823_MEDIA, val); |
6291 | DP(NETIF_MSG_LINK, "Multi_phy config = 0x%x, Media control = 0x%x\n", | 9538 | DP(NETIF_MSG_LINK, "Multi_phy config = 0x%x, Media control = 0x%x\n", |
6292 | params->multi_phy_config, val); | 9539 | params->multi_phy_config, val); |
6293 | 9540 | ||
9541 | /* AutogrEEEn */ | ||
9542 | if (params->feature_config_flags & | ||
9543 | FEATURE_CONFIG_AUTOGREEEN_ENABLED) { | ||
9544 | /* Ensure that f/w is ready */ | ||
9545 | for (idx = 0; idx < PHY84833_HDSHK_WAIT; idx++) { | ||
9546 | bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, | ||
9547 | MDIO_84833_TOP_CFG_SCRATCH_REG2, &val); | ||
9548 | if (val == PHY84833_CMD_OPEN_FOR_CMDS) | ||
9549 | break; | ||
9550 | usleep_range(1000, 1000); | ||
9551 | } | ||
9552 | if (idx >= PHY84833_HDSHK_WAIT) { | ||
9553 | DP(NETIF_MSG_LINK, "AutogrEEEn: FW not ready.\n"); | ||
9554 | return -EINVAL; | ||
9555 | } | ||
9556 | |||
9557 | /* Select EEE mode */ | ||
9558 | bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, | ||
9559 | MDIO_84833_TOP_CFG_SCRATCH_REG3, | ||
9560 | 0x2); | ||
9561 | |||
9562 | /* Set Idle and Latency */ | ||
9563 | bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, | ||
9564 | MDIO_84833_TOP_CFG_SCRATCH_REG4, | ||
9565 | PHY84833_CONSTANT_LATENCY + 1); | ||
9566 | |||
9567 | bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, | ||
9568 | MDIO_84833_TOP_CFG_DATA3_REG, | ||
9569 | PHY84833_CONSTANT_LATENCY + 1); | ||
9570 | |||
9571 | bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, | ||
9572 | MDIO_84833_TOP_CFG_DATA4_REG, | ||
9573 | PHY84833_CONSTANT_LATENCY); | ||
9574 | |||
9575 | /* Send EEE instruction to command register */ | ||
9576 | bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, | ||
9577 | MDIO_84833_TOP_CFG_SCRATCH_REG0, | ||
9578 | PHY84833_DIAG_CMD_SET_EEE_MODE); | ||
9579 | |||
9580 | /* Ensure that the command has completed */ | ||
9581 | for (idx = 0; idx < PHY84833_HDSHK_WAIT; idx++) { | ||
9582 | bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, | ||
9583 | MDIO_84833_TOP_CFG_SCRATCH_REG2, &val); | ||
9584 | if ((val == PHY84833_CMD_COMPLETE_PASS) || | ||
9585 | (val == PHY84833_CMD_COMPLETE_ERROR)) | ||
9586 | break; | ||
9587 | usleep_range(1000, 1000); | ||
9588 | } | ||
9589 | if ((idx >= PHY84833_HDSHK_WAIT) || | ||
9590 | (val == PHY84833_CMD_COMPLETE_ERROR)) { | ||
9591 | DP(NETIF_MSG_LINK, "AutogrEEEn: command failed.\n"); | ||
9592 | return -EINVAL; | ||
9593 | } | ||
9594 | |||
9595 | /* Reset command handler */ | ||
9596 | bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, | ||
9597 | MDIO_84833_TOP_CFG_SCRATCH_REG2, | ||
9598 | PHY84833_CMD_CLEAR_COMPLETE); | ||
9599 | } | ||
9600 | |||
6294 | if (initialize) | 9601 | if (initialize) |
6295 | rc = bnx2x_848xx_cmn_config_init(phy, params, vars); | 9602 | rc = bnx2x_848xx_cmn_config_init(phy, params, vars); |
6296 | else | 9603 | else |
6297 | bnx2x_save_848xx_spirom_version(phy, params); | 9604 | bnx2x_save_848xx_spirom_version(phy, params); |
6298 | cms_enable = REG_RD(bp, params->shmem_base + | 9605 | /* 84833 PHY has a better feature and doesn't need to support this. */ |
9606 | if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823) { | ||
9607 | cms_enable = REG_RD(bp, params->shmem_base + | ||
6299 | offsetof(struct shmem_region, | 9608 | offsetof(struct shmem_region, |
6300 | dev_info.port_hw_config[params->port].default_cfg)) & | 9609 | dev_info.port_hw_config[params->port].default_cfg)) & |
6301 | PORT_HW_CFG_ENABLE_CMS_MASK; | 9610 | PORT_HW_CFG_ENABLE_CMS_MASK; |
6302 | 9611 | ||
6303 | bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, | 9612 | bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, |
6304 | MDIO_CTL_REG_84823_USER_CTRL_REG, &val); | 9613 | MDIO_CTL_REG_84823_USER_CTRL_REG, &val); |
6305 | if (cms_enable) | 9614 | if (cms_enable) |
6306 | val |= MDIO_CTL_REG_84823_USER_CTRL_CMS; | 9615 | val |= MDIO_CTL_REG_84823_USER_CTRL_CMS; |
6307 | else | 9616 | else |
6308 | val &= ~MDIO_CTL_REG_84823_USER_CTRL_CMS; | 9617 | val &= ~MDIO_CTL_REG_84823_USER_CTRL_CMS; |
6309 | bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, | 9618 | bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, |
6310 | MDIO_CTL_REG_84823_USER_CTRL_REG, val); | 9619 | MDIO_CTL_REG_84823_USER_CTRL_REG, val); |
6311 | 9620 | } | |
6312 | 9621 | ||
6313 | return rc; | 9622 | return rc; |
6314 | } | 9623 | } |
@@ -6318,20 +9627,16 @@ static u8 bnx2x_848xx_read_status(struct bnx2x_phy *phy, | |||
6318 | struct link_vars *vars) | 9627 | struct link_vars *vars) |
6319 | { | 9628 | { |
6320 | struct bnx2x *bp = params->bp; | 9629 | struct bnx2x *bp = params->bp; |
6321 | u16 val, val1, val2, adj; | 9630 | u16 val, val1, val2; |
6322 | u8 link_up = 0; | 9631 | u8 link_up = 0; |
6323 | 9632 | ||
6324 | /* Reg offset adjustment for 84833 */ | ||
6325 | adj = 0; | ||
6326 | if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) | ||
6327 | adj = -1; | ||
6328 | 9633 | ||
6329 | /* Check 10G-BaseT link status */ | 9634 | /* Check 10G-BaseT link status */ |
6330 | /* Check PMD signal ok */ | 9635 | /* Check PMD signal ok */ |
6331 | bnx2x_cl45_read(bp, phy, | 9636 | bnx2x_cl45_read(bp, phy, |
6332 | MDIO_AN_DEVAD, 0xFFFA, &val1); | 9637 | MDIO_AN_DEVAD, 0xFFFA, &val1); |
6333 | bnx2x_cl45_read(bp, phy, | 9638 | bnx2x_cl45_read(bp, phy, |
6334 | MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_PMD_SIGNAL + adj, | 9639 | MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_PMD_SIGNAL, |
6335 | &val2); | 9640 | &val2); |
6336 | DP(NETIF_MSG_LINK, "BCM848xx: PMD_SIGNAL 1.a811 = 0x%x\n", val2); | 9641 | DP(NETIF_MSG_LINK, "BCM848xx: PMD_SIGNAL 1.a811 = 0x%x\n", val2); |
6337 | 9642 | ||
@@ -6403,9 +9708,10 @@ static u8 bnx2x_848xx_read_status(struct bnx2x_phy *phy, | |||
6403 | return link_up; | 9708 | return link_up; |
6404 | } | 9709 | } |
6405 | 9710 | ||
6406 | static u8 bnx2x_848xx_format_ver(u32 raw_ver, u8 *str, u16 *len) | 9711 | |
9712 | static int bnx2x_848xx_format_ver(u32 raw_ver, u8 *str, u16 *len) | ||
6407 | { | 9713 | { |
6408 | u8 status = 0; | 9714 | int status = 0; |
6409 | u32 spirom_ver; | 9715 | u32 spirom_ver; |
6410 | spirom_ver = ((raw_ver & 0xF80) >> 7) << 16 | (raw_ver & 0x7F); | 9716 | spirom_ver = ((raw_ver & 0xF80) >> 7) << 16 | (raw_ver & 0x7F); |
6411 | status = bnx2x_format_ver(spirom_ver, str, len); | 9717 | status = bnx2x_format_ver(spirom_ver, str, len); |
@@ -6435,13 +9741,27 @@ static void bnx2x_848x3_link_reset(struct bnx2x_phy *phy, | |||
6435 | { | 9741 | { |
6436 | struct bnx2x *bp = params->bp; | 9742 | struct bnx2x *bp = params->bp; |
6437 | u8 port; | 9743 | u8 port; |
6438 | if (CHIP_IS_E2(bp)) | 9744 | u16 val16; |
9745 | |||
9746 | if (!(CHIP_IS_E1(bp))) | ||
6439 | port = BP_PATH(bp); | 9747 | port = BP_PATH(bp); |
6440 | else | 9748 | else |
6441 | port = params->port; | 9749 | port = params->port; |
6442 | bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3, | 9750 | |
6443 | MISC_REGISTERS_GPIO_OUTPUT_LOW, | 9751 | if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823) { |
6444 | port); | 9752 | bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3, |
9753 | MISC_REGISTERS_GPIO_OUTPUT_LOW, | ||
9754 | port); | ||
9755 | } else { | ||
9756 | bnx2x_cl45_read(bp, phy, | ||
9757 | MDIO_CTL_DEVAD, | ||
9758 | 0x400f, &val16); | ||
9759 | /* Put to low power mode on newer FW */ | ||
9760 | if ((val16 & 0x303f) > 0x1009) | ||
9761 | bnx2x_cl45_write(bp, phy, | ||
9762 | MDIO_PMA_DEVAD, | ||
9763 | MDIO_PMA_REG_CTRL, 0x800); | ||
9764 | } | ||
6445 | } | 9765 | } |
6446 | 9766 | ||
6447 | static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy, | 9767 | static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy, |
@@ -6449,11 +9769,17 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy, | |||
6449 | { | 9769 | { |
6450 | struct bnx2x *bp = params->bp; | 9770 | struct bnx2x *bp = params->bp; |
6451 | u16 val; | 9771 | u16 val; |
9772 | u8 port; | ||
9773 | |||
9774 | if (!(CHIP_IS_E1(bp))) | ||
9775 | port = BP_PATH(bp); | ||
9776 | else | ||
9777 | port = params->port; | ||
6452 | 9778 | ||
6453 | switch (mode) { | 9779 | switch (mode) { |
6454 | case LED_MODE_OFF: | 9780 | case LED_MODE_OFF: |
6455 | 9781 | ||
6456 | DP(NETIF_MSG_LINK, "Port 0x%x: LED MODE OFF\n", params->port); | 9782 | DP(NETIF_MSG_LINK, "Port 0x%x: LED MODE OFF\n", port); |
6457 | 9783 | ||
6458 | if ((params->hw_led_mode << SHARED_HW_CFG_LED_MODE_SHIFT) == | 9784 | if ((params->hw_led_mode << SHARED_HW_CFG_LED_MODE_SHIFT) == |
6459 | SHARED_HW_CFG_LED_EXTPHY1) { | 9785 | SHARED_HW_CFG_LED_EXTPHY1) { |
@@ -6489,7 +9815,7 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy, | |||
6489 | case LED_MODE_FRONT_PANEL_OFF: | 9815 | case LED_MODE_FRONT_PANEL_OFF: |
6490 | 9816 | ||
6491 | DP(NETIF_MSG_LINK, "Port 0x%x: LED MODE FRONT PANEL OFF\n", | 9817 | DP(NETIF_MSG_LINK, "Port 0x%x: LED MODE FRONT PANEL OFF\n", |
6492 | params->port); | 9818 | port); |
6493 | 9819 | ||
6494 | if ((params->hw_led_mode << SHARED_HW_CFG_LED_MODE_SHIFT) == | 9820 | if ((params->hw_led_mode << SHARED_HW_CFG_LED_MODE_SHIFT) == |
6495 | SHARED_HW_CFG_LED_EXTPHY1) { | 9821 | SHARED_HW_CFG_LED_EXTPHY1) { |
@@ -6524,7 +9850,7 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy, | |||
6524 | break; | 9850 | break; |
6525 | case LED_MODE_ON: | 9851 | case LED_MODE_ON: |
6526 | 9852 | ||
6527 | DP(NETIF_MSG_LINK, "Port 0x%x: LED MODE ON\n", params->port); | 9853 | DP(NETIF_MSG_LINK, "Port 0x%x: LED MODE ON\n", port); |
6528 | 9854 | ||
6529 | if ((params->hw_led_mode << SHARED_HW_CFG_LED_MODE_SHIFT) == | 9855 | if ((params->hw_led_mode << SHARED_HW_CFG_LED_MODE_SHIFT) == |
6530 | SHARED_HW_CFG_LED_EXTPHY1) { | 9856 | SHARED_HW_CFG_LED_EXTPHY1) { |
@@ -6571,7 +9897,7 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy, | |||
6571 | 9897 | ||
6572 | case LED_MODE_OPER: | 9898 | case LED_MODE_OPER: |
6573 | 9899 | ||
6574 | DP(NETIF_MSG_LINK, "Port 0x%x: LED MODE OPER\n", params->port); | 9900 | DP(NETIF_MSG_LINK, "Port 0x%x: LED MODE OPER\n", port); |
6575 | 9901 | ||
6576 | if ((params->hw_led_mode << SHARED_HW_CFG_LED_MODE_SHIFT) == | 9902 | if ((params->hw_led_mode << SHARED_HW_CFG_LED_MODE_SHIFT) == |
6577 | SHARED_HW_CFG_LED_EXTPHY1) { | 9903 | SHARED_HW_CFG_LED_EXTPHY1) { |
@@ -6633,7 +9959,388 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy, | |||
6633 | } | 9959 | } |
6634 | break; | 9960 | break; |
6635 | } | 9961 | } |
9962 | |||
9963 | /* | ||
9964 | * This is a workaround for E3+84833 until autoneg | ||
9965 | * restart is fixed in f/w | ||
9966 | */ | ||
9967 | if (CHIP_IS_E3(bp)) { | ||
9968 | bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, | ||
9969 | MDIO_WC_REG_GP2_STATUS_GP_2_1, &val); | ||
9970 | } | ||
9971 | } | ||
9972 | |||
9973 | /******************************************************************/ | ||
9974 | /* 54618SE PHY SECTION */ | ||
9975 | /******************************************************************/ | ||
9976 | static int bnx2x_54618se_config_init(struct bnx2x_phy *phy, | ||
9977 | struct link_params *params, | ||
9978 | struct link_vars *vars) | ||
9979 | { | ||
9980 | struct bnx2x *bp = params->bp; | ||
9981 | u8 port; | ||
9982 | u16 autoneg_val, an_1000_val, an_10_100_val, fc_val, temp; | ||
9983 | u32 cfg_pin; | ||
9984 | |||
9985 | DP(NETIF_MSG_LINK, "54618SE cfg init\n"); | ||
9986 | usleep_range(1000, 1000); | ||
9987 | |||
9988 | /* This works with E3 only, no need to check the chip | ||
9989 | before determining the port. */ | ||
9990 | port = params->port; | ||
9991 | |||
9992 | cfg_pin = (REG_RD(bp, params->shmem_base + | ||
9993 | offsetof(struct shmem_region, | ||
9994 | dev_info.port_hw_config[port].e3_cmn_pin_cfg)) & | ||
9995 | PORT_HW_CFG_E3_PHY_RESET_MASK) >> | ||
9996 | PORT_HW_CFG_E3_PHY_RESET_SHIFT; | ||
9997 | |||
9998 | /* Drive pin high to bring the GPHY out of reset. */ | ||
9999 | bnx2x_set_cfg_pin(bp, cfg_pin, 1); | ||
10000 | |||
10001 | /* wait for GPHY to reset */ | ||
10002 | msleep(50); | ||
10003 | |||
10004 | /* reset phy */ | ||
10005 | bnx2x_cl22_write(bp, phy, | ||
10006 | MDIO_PMA_REG_CTRL, 0x8000); | ||
10007 | bnx2x_wait_reset_complete(bp, phy, params); | ||
10008 | |||
10009 | /*wait for GPHY to reset */ | ||
10010 | msleep(50); | ||
10011 | |||
10012 | /* Configure LED4: set to INTR (0x6). */ | ||
10013 | /* Accessing shadow register 0xe. */ | ||
10014 | bnx2x_cl22_write(bp, phy, | ||
10015 | MDIO_REG_GPHY_SHADOW, | ||
10016 | MDIO_REG_GPHY_SHADOW_LED_SEL2); | ||
10017 | bnx2x_cl22_read(bp, phy, | ||
10018 | MDIO_REG_GPHY_SHADOW, | ||
10019 | &temp); | ||
10020 | temp &= ~(0xf << 4); | ||
10021 | temp |= (0x6 << 4); | ||
10022 | bnx2x_cl22_write(bp, phy, | ||
10023 | MDIO_REG_GPHY_SHADOW, | ||
10024 | MDIO_REG_GPHY_SHADOW_WR_ENA | temp); | ||
10025 | /* Configure INTR based on link status change. */ | ||
10026 | bnx2x_cl22_write(bp, phy, | ||
10027 | MDIO_REG_INTR_MASK, | ||
10028 | ~MDIO_REG_INTR_MASK_LINK_STATUS); | ||
10029 | |||
10030 | /* Flip the signal detect polarity (set 0x1c.0x1e[8]). */ | ||
10031 | bnx2x_cl22_write(bp, phy, | ||
10032 | MDIO_REG_GPHY_SHADOW, | ||
10033 | MDIO_REG_GPHY_SHADOW_AUTO_DET_MED); | ||
10034 | bnx2x_cl22_read(bp, phy, | ||
10035 | MDIO_REG_GPHY_SHADOW, | ||
10036 | &temp); | ||
10037 | temp |= MDIO_REG_GPHY_SHADOW_INVERT_FIB_SD; | ||
10038 | bnx2x_cl22_write(bp, phy, | ||
10039 | MDIO_REG_GPHY_SHADOW, | ||
10040 | MDIO_REG_GPHY_SHADOW_WR_ENA | temp); | ||
10041 | |||
10042 | /* Set up fc */ | ||
10043 | /* Please refer to Table 28B-3 of 802.3ab-1999 spec. */ | ||
10044 | bnx2x_calc_ieee_aneg_adv(phy, params, &vars->ieee_fc); | ||
10045 | fc_val = 0; | ||
10046 | if ((vars->ieee_fc & MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) == | ||
10047 | MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) | ||
10048 | fc_val |= MDIO_AN_REG_ADV_PAUSE_ASYMMETRIC; | ||
10049 | |||
10050 | if ((vars->ieee_fc & MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) == | ||
10051 | MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) | ||
10052 | fc_val |= MDIO_AN_REG_ADV_PAUSE_PAUSE; | ||
10053 | |||
10054 | /* read all advertisement */ | ||
10055 | bnx2x_cl22_read(bp, phy, | ||
10056 | 0x09, | ||
10057 | &an_1000_val); | ||
10058 | |||
10059 | bnx2x_cl22_read(bp, phy, | ||
10060 | 0x04, | ||
10061 | &an_10_100_val); | ||
10062 | |||
10063 | bnx2x_cl22_read(bp, phy, | ||
10064 | MDIO_PMA_REG_CTRL, | ||
10065 | &autoneg_val); | ||
10066 | |||
10067 | /* Disable forced speed */ | ||
10068 | autoneg_val &= ~((1<<6) | (1<<8) | (1<<9) | (1<<12) | (1<<13)); | ||
10069 | an_10_100_val &= ~((1<<5) | (1<<6) | (1<<7) | (1<<8) | (1<<10) | | ||
10070 | (1<<11)); | ||
10071 | |||
10072 | if (((phy->req_line_speed == SPEED_AUTO_NEG) && | ||
10073 | (phy->speed_cap_mask & | ||
10074 | PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) || | ||
10075 | (phy->req_line_speed == SPEED_1000)) { | ||
10076 | an_1000_val |= (1<<8); | ||
10077 | autoneg_val |= (1<<9 | 1<<12); | ||
10078 | if (phy->req_duplex == DUPLEX_FULL) | ||
10079 | an_1000_val |= (1<<9); | ||
10080 | DP(NETIF_MSG_LINK, "Advertising 1G\n"); | ||
10081 | } else | ||
10082 | an_1000_val &= ~((1<<8) | (1<<9)); | ||
10083 | |||
10084 | bnx2x_cl22_write(bp, phy, | ||
10085 | 0x09, | ||
10086 | an_1000_val); | ||
10087 | bnx2x_cl22_read(bp, phy, | ||
10088 | 0x09, | ||
10089 | &an_1000_val); | ||
10090 | |||
10091 | /* set 100 speed advertisement */ | ||
10092 | if (((phy->req_line_speed == SPEED_AUTO_NEG) && | ||
10093 | (phy->speed_cap_mask & | ||
10094 | (PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL | | ||
10095 | PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF)))) { | ||
10096 | an_10_100_val |= (1<<7); | ||
10097 | /* Enable autoneg and restart autoneg for legacy speeds */ | ||
10098 | autoneg_val |= (1<<9 | 1<<12); | ||
10099 | |||
10100 | if (phy->req_duplex == DUPLEX_FULL) | ||
10101 | an_10_100_val |= (1<<8); | ||
10102 | DP(NETIF_MSG_LINK, "Advertising 100M\n"); | ||
10103 | } | ||
10104 | |||
10105 | /* set 10 speed advertisement */ | ||
10106 | if (((phy->req_line_speed == SPEED_AUTO_NEG) && | ||
10107 | (phy->speed_cap_mask & | ||
10108 | (PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL | | ||
10109 | PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF)))) { | ||
10110 | an_10_100_val |= (1<<5); | ||
10111 | autoneg_val |= (1<<9 | 1<<12); | ||
10112 | if (phy->req_duplex == DUPLEX_FULL) | ||
10113 | an_10_100_val |= (1<<6); | ||
10114 | DP(NETIF_MSG_LINK, "Advertising 10M\n"); | ||
10115 | } | ||
10116 | |||
10117 | /* Only 10/100 are allowed to work in FORCE mode */ | ||
10118 | if (phy->req_line_speed == SPEED_100) { | ||
10119 | autoneg_val |= (1<<13); | ||
10120 | /* Enabled AUTO-MDIX when autoneg is disabled */ | ||
10121 | bnx2x_cl22_write(bp, phy, | ||
10122 | 0x18, | ||
10123 | (1<<15 | 1<<9 | 7<<0)); | ||
10124 | DP(NETIF_MSG_LINK, "Setting 100M force\n"); | ||
10125 | } | ||
10126 | if (phy->req_line_speed == SPEED_10) { | ||
10127 | /* Enabled AUTO-MDIX when autoneg is disabled */ | ||
10128 | bnx2x_cl22_write(bp, phy, | ||
10129 | 0x18, | ||
10130 | (1<<15 | 1<<9 | 7<<0)); | ||
10131 | DP(NETIF_MSG_LINK, "Setting 10M force\n"); | ||
10132 | } | ||
10133 | |||
10134 | /* Check if we should turn on Auto-GrEEEn */ | ||
10135 | bnx2x_cl22_read(bp, phy, MDIO_REG_GPHY_PHYID_LSB, &temp); | ||
10136 | if (temp == MDIO_REG_GPHY_ID_54618SE) { | ||
10137 | if (params->feature_config_flags & | ||
10138 | FEATURE_CONFIG_AUTOGREEEN_ENABLED) { | ||
10139 | temp = 6; | ||
10140 | DP(NETIF_MSG_LINK, "Enabling Auto-GrEEEn\n"); | ||
10141 | } else { | ||
10142 | temp = 0; | ||
10143 | DP(NETIF_MSG_LINK, "Disabling Auto-GrEEEn\n"); | ||
10144 | } | ||
10145 | bnx2x_cl22_write(bp, phy, | ||
10146 | MDIO_REG_GPHY_CL45_ADDR_REG, MDIO_AN_DEVAD); | ||
10147 | bnx2x_cl22_write(bp, phy, | ||
10148 | MDIO_REG_GPHY_CL45_DATA_REG, | ||
10149 | MDIO_REG_GPHY_EEE_ADV); | ||
10150 | bnx2x_cl22_write(bp, phy, | ||
10151 | MDIO_REG_GPHY_CL45_ADDR_REG, | ||
10152 | (0x1 << 14) | MDIO_AN_DEVAD); | ||
10153 | bnx2x_cl22_write(bp, phy, | ||
10154 | MDIO_REG_GPHY_CL45_DATA_REG, | ||
10155 | temp); | ||
10156 | } | ||
10157 | |||
10158 | bnx2x_cl22_write(bp, phy, | ||
10159 | 0x04, | ||
10160 | an_10_100_val | fc_val); | ||
10161 | |||
10162 | if (phy->req_duplex == DUPLEX_FULL) | ||
10163 | autoneg_val |= (1<<8); | ||
10164 | |||
10165 | bnx2x_cl22_write(bp, phy, | ||
10166 | MDIO_PMA_REG_CTRL, autoneg_val); | ||
10167 | |||
10168 | return 0; | ||
10169 | } | ||
10170 | |||
10171 | static void bnx2x_54618se_set_link_led(struct bnx2x_phy *phy, | ||
10172 | struct link_params *params, u8 mode) | ||
10173 | { | ||
10174 | struct bnx2x *bp = params->bp; | ||
10175 | DP(NETIF_MSG_LINK, "54618SE set link led (mode=%x)\n", mode); | ||
10176 | switch (mode) { | ||
10177 | case LED_MODE_FRONT_PANEL_OFF: | ||
10178 | case LED_MODE_OFF: | ||
10179 | case LED_MODE_OPER: | ||
10180 | case LED_MODE_ON: | ||
10181 | default: | ||
10182 | break; | ||
10183 | } | ||
10184 | return; | ||
10185 | } | ||
10186 | |||
10187 | static void bnx2x_54618se_link_reset(struct bnx2x_phy *phy, | ||
10188 | struct link_params *params) | ||
10189 | { | ||
10190 | struct bnx2x *bp = params->bp; | ||
10191 | u32 cfg_pin; | ||
10192 | u8 port; | ||
10193 | |||
10194 | /* This works with E3 only, no need to check the chip | ||
10195 | before determining the port. */ | ||
10196 | port = params->port; | ||
10197 | cfg_pin = (REG_RD(bp, params->shmem_base + | ||
10198 | offsetof(struct shmem_region, | ||
10199 | dev_info.port_hw_config[port].e3_cmn_pin_cfg)) & | ||
10200 | PORT_HW_CFG_E3_PHY_RESET_MASK) >> | ||
10201 | PORT_HW_CFG_E3_PHY_RESET_SHIFT; | ||
10202 | |||
10203 | /* Drive pin low to put GPHY in reset. */ | ||
10204 | bnx2x_set_cfg_pin(bp, cfg_pin, 0); | ||
10205 | } | ||
10206 | |||
10207 | static u8 bnx2x_54618se_read_status(struct bnx2x_phy *phy, | ||
10208 | struct link_params *params, | ||
10209 | struct link_vars *vars) | ||
10210 | { | ||
10211 | struct bnx2x *bp = params->bp; | ||
10212 | u16 val; | ||
10213 | u8 link_up = 0; | ||
10214 | u16 legacy_status, legacy_speed; | ||
10215 | |||
10216 | /* Get speed operation status */ | ||
10217 | bnx2x_cl22_read(bp, phy, | ||
10218 | 0x19, | ||
10219 | &legacy_status); | ||
10220 | DP(NETIF_MSG_LINK, "54618SE read_status: 0x%x\n", legacy_status); | ||
10221 | |||
10222 | /* Read status to clear the PHY interrupt. */ | ||
10223 | bnx2x_cl22_read(bp, phy, | ||
10224 | MDIO_REG_INTR_STATUS, | ||
10225 | &val); | ||
10226 | |||
10227 | link_up = ((legacy_status & (1<<2)) == (1<<2)); | ||
10228 | |||
10229 | if (link_up) { | ||
10230 | legacy_speed = (legacy_status & (7<<8)); | ||
10231 | if (legacy_speed == (7<<8)) { | ||
10232 | vars->line_speed = SPEED_1000; | ||
10233 | vars->duplex = DUPLEX_FULL; | ||
10234 | } else if (legacy_speed == (6<<8)) { | ||
10235 | vars->line_speed = SPEED_1000; | ||
10236 | vars->duplex = DUPLEX_HALF; | ||
10237 | } else if (legacy_speed == (5<<8)) { | ||
10238 | vars->line_speed = SPEED_100; | ||
10239 | vars->duplex = DUPLEX_FULL; | ||
10240 | } | ||
10241 | /* Omitting 100Base-T4 for now */ | ||
10242 | else if (legacy_speed == (3<<8)) { | ||
10243 | vars->line_speed = SPEED_100; | ||
10244 | vars->duplex = DUPLEX_HALF; | ||
10245 | } else if (legacy_speed == (2<<8)) { | ||
10246 | vars->line_speed = SPEED_10; | ||
10247 | vars->duplex = DUPLEX_FULL; | ||
10248 | } else if (legacy_speed == (1<<8)) { | ||
10249 | vars->line_speed = SPEED_10; | ||
10250 | vars->duplex = DUPLEX_HALF; | ||
10251 | } else /* Should not happen */ | ||
10252 | vars->line_speed = 0; | ||
10253 | |||
10254 | DP(NETIF_MSG_LINK, "Link is up in %dMbps," | ||
10255 | " is_duplex_full= %d\n", vars->line_speed, | ||
10256 | (vars->duplex == DUPLEX_FULL)); | ||
10257 | |||
10258 | /* Check legacy speed AN resolution */ | ||
10259 | bnx2x_cl22_read(bp, phy, | ||
10260 | 0x01, | ||
10261 | &val); | ||
10262 | if (val & (1<<5)) | ||
10263 | vars->link_status |= | ||
10264 | LINK_STATUS_AUTO_NEGOTIATE_COMPLETE; | ||
10265 | bnx2x_cl22_read(bp, phy, | ||
10266 | 0x06, | ||
10267 | &val); | ||
10268 | if ((val & (1<<0)) == 0) | ||
10269 | vars->link_status |= | ||
10270 | LINK_STATUS_PARALLEL_DETECTION_USED; | ||
10271 | |||
10272 | DP(NETIF_MSG_LINK, "BCM54618SE: link speed is %d\n", | ||
10273 | vars->line_speed); | ||
10274 | |||
10275 | /* Report whether EEE is resolved. */ | ||
10276 | bnx2x_cl22_read(bp, phy, MDIO_REG_GPHY_PHYID_LSB, &val); | ||
10277 | if (val == MDIO_REG_GPHY_ID_54618SE) { | ||
10278 | if (vars->link_status & | ||
10279 | LINK_STATUS_AUTO_NEGOTIATE_COMPLETE) | ||
10280 | val = 0; | ||
10281 | else { | ||
10282 | bnx2x_cl22_write(bp, phy, | ||
10283 | MDIO_REG_GPHY_CL45_ADDR_REG, | ||
10284 | MDIO_AN_DEVAD); | ||
10285 | bnx2x_cl22_write(bp, phy, | ||
10286 | MDIO_REG_GPHY_CL45_DATA_REG, | ||
10287 | MDIO_REG_GPHY_EEE_RESOLVED); | ||
10288 | bnx2x_cl22_write(bp, phy, | ||
10289 | MDIO_REG_GPHY_CL45_ADDR_REG, | ||
10290 | (0x1 << 14) | MDIO_AN_DEVAD); | ||
10291 | bnx2x_cl22_read(bp, phy, | ||
10292 | MDIO_REG_GPHY_CL45_DATA_REG, | ||
10293 | &val); | ||
10294 | } | ||
10295 | DP(NETIF_MSG_LINK, "EEE resolution: 0x%x\n", val); | ||
10296 | } | ||
10297 | |||
10298 | bnx2x_ext_phy_resolve_fc(phy, params, vars); | ||
10299 | } | ||
10300 | return link_up; | ||
6636 | } | 10301 | } |
10302 | |||
10303 | static void bnx2x_54618se_config_loopback(struct bnx2x_phy *phy, | ||
10304 | struct link_params *params) | ||
10305 | { | ||
10306 | struct bnx2x *bp = params->bp; | ||
10307 | u16 val; | ||
10308 | u32 umac_base = params->port ? GRCBASE_UMAC1 : GRCBASE_UMAC0; | ||
10309 | |||
10310 | DP(NETIF_MSG_LINK, "2PMA/PMD ext_phy_loopback: 54618se\n"); | ||
10311 | |||
10312 | /* Enable master/slave manual mmode and set to master */ | ||
10313 | /* mii write 9 [bits set 11 12] */ | ||
10314 | bnx2x_cl22_write(bp, phy, 0x09, 3<<11); | ||
10315 | |||
10316 | /* forced 1G and disable autoneg */ | ||
10317 | /* set val [mii read 0] */ | ||
10318 | /* set val [expr $val & [bits clear 6 12 13]] */ | ||
10319 | /* set val [expr $val | [bits set 6 8]] */ | ||
10320 | /* mii write 0 $val */ | ||
10321 | bnx2x_cl22_read(bp, phy, 0x00, &val); | ||
10322 | val &= ~((1<<6) | (1<<12) | (1<<13)); | ||
10323 | val |= (1<<6) | (1<<8); | ||
10324 | bnx2x_cl22_write(bp, phy, 0x00, val); | ||
10325 | |||
10326 | /* Set external loopback and Tx using 6dB coding */ | ||
10327 | /* mii write 0x18 7 */ | ||
10328 | /* set val [mii read 0x18] */ | ||
10329 | /* mii write 0x18 [expr $val | [bits set 10 15]] */ | ||
10330 | bnx2x_cl22_write(bp, phy, 0x18, 7); | ||
10331 | bnx2x_cl22_read(bp, phy, 0x18, &val); | ||
10332 | bnx2x_cl22_write(bp, phy, 0x18, val | (1<<10) | (1<<15)); | ||
10333 | |||
10334 | /* This register opens the gate for the UMAC despite its name */ | ||
10335 | REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + params->port*4, 1); | ||
10336 | |||
10337 | /* | ||
10338 | * Maximum Frame Length (RW). Defines a 14-Bit maximum frame | ||
10339 | * length used by the MAC receive logic to check frames. | ||
10340 | */ | ||
10341 | REG_WR(bp, umac_base + UMAC_REG_MAXFR, 0x2710); | ||
10342 | } | ||
10343 | |||
6637 | /******************************************************************/ | 10344 | /******************************************************************/ |
6638 | /* SFX7101 PHY SECTION */ | 10345 | /* SFX7101 PHY SECTION */ |
6639 | /******************************************************************/ | 10346 | /******************************************************************/ |
@@ -6646,9 +10353,9 @@ static void bnx2x_7101_config_loopback(struct bnx2x_phy *phy, | |||
6646 | MDIO_XS_DEVAD, MDIO_XS_SFX7101_XGXS_TEST1, 0x100); | 10353 | MDIO_XS_DEVAD, MDIO_XS_SFX7101_XGXS_TEST1, 0x100); |
6647 | } | 10354 | } |
6648 | 10355 | ||
6649 | static u8 bnx2x_7101_config_init(struct bnx2x_phy *phy, | 10356 | static int bnx2x_7101_config_init(struct bnx2x_phy *phy, |
6650 | struct link_params *params, | 10357 | struct link_params *params, |
6651 | struct link_vars *vars) | 10358 | struct link_vars *vars) |
6652 | { | 10359 | { |
6653 | u16 fw_ver1, fw_ver2, val; | 10360 | u16 fw_ver1, fw_ver2, val; |
6654 | struct bnx2x *bp = params->bp; | 10361 | struct bnx2x *bp = params->bp; |
@@ -6662,7 +10369,7 @@ static u8 bnx2x_7101_config_init(struct bnx2x_phy *phy, | |||
6662 | bnx2x_wait_reset_complete(bp, phy, params); | 10369 | bnx2x_wait_reset_complete(bp, phy, params); |
6663 | 10370 | ||
6664 | bnx2x_cl45_write(bp, phy, | 10371 | bnx2x_cl45_write(bp, phy, |
6665 | MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_CTRL, 0x1); | 10372 | MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 0x1); |
6666 | DP(NETIF_MSG_LINK, "Setting the SFX7101 LED to blink on traffic\n"); | 10373 | DP(NETIF_MSG_LINK, "Setting the SFX7101 LED to blink on traffic\n"); |
6667 | bnx2x_cl45_write(bp, phy, | 10374 | bnx2x_cl45_write(bp, phy, |
6668 | MDIO_PMA_DEVAD, MDIO_PMA_REG_7107_LED_CNTL, (1<<3)); | 10375 | MDIO_PMA_DEVAD, MDIO_PMA_REG_7107_LED_CNTL, (1<<3)); |
@@ -6694,9 +10401,9 @@ static u8 bnx2x_7101_read_status(struct bnx2x_phy *phy, | |||
6694 | u8 link_up; | 10401 | u8 link_up; |
6695 | u16 val1, val2; | 10402 | u16 val1, val2; |
6696 | bnx2x_cl45_read(bp, phy, | 10403 | bnx2x_cl45_read(bp, phy, |
6697 | MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_STATUS, &val2); | 10404 | MDIO_PMA_DEVAD, MDIO_PMA_LASI_STAT, &val2); |
6698 | bnx2x_cl45_read(bp, phy, | 10405 | bnx2x_cl45_read(bp, phy, |
6699 | MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_STATUS, &val1); | 10406 | MDIO_PMA_DEVAD, MDIO_PMA_LASI_STAT, &val1); |
6700 | DP(NETIF_MSG_LINK, "10G-base-T LASI status 0x%x->0x%x\n", | 10407 | DP(NETIF_MSG_LINK, "10G-base-T LASI status 0x%x->0x%x\n", |
6701 | val2, val1); | 10408 | val2, val1); |
6702 | bnx2x_cl45_read(bp, phy, | 10409 | bnx2x_cl45_read(bp, phy, |
@@ -6721,8 +10428,7 @@ static u8 bnx2x_7101_read_status(struct bnx2x_phy *phy, | |||
6721 | return link_up; | 10428 | return link_up; |
6722 | } | 10429 | } |
6723 | 10430 | ||
6724 | 10431 | static int bnx2x_7101_format_ver(u32 spirom_ver, u8 *str, u16 *len) | |
6725 | static u8 bnx2x_7101_format_ver(u32 spirom_ver, u8 *str, u16 *len) | ||
6726 | { | 10432 | { |
6727 | if (*len < 5) | 10433 | if (*len < 5) |
6728 | return -EINVAL; | 10434 | return -EINVAL; |
@@ -6800,9 +10506,8 @@ static void bnx2x_7101_set_link_led(struct bnx2x_phy *phy, | |||
6800 | static struct bnx2x_phy phy_null = { | 10506 | static struct bnx2x_phy phy_null = { |
6801 | .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN, | 10507 | .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN, |
6802 | .addr = 0, | 10508 | .addr = 0, |
6803 | .flags = FLAGS_INIT_XGXS_FIRST, | ||
6804 | .def_md_devad = 0, | 10509 | .def_md_devad = 0, |
6805 | .reserved = 0, | 10510 | .flags = FLAGS_INIT_XGXS_FIRST, |
6806 | .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, | 10511 | .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, |
6807 | .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, | 10512 | .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, |
6808 | .mdio_ctrl = 0, | 10513 | .mdio_ctrl = 0, |
@@ -6827,9 +10532,8 @@ static struct bnx2x_phy phy_null = { | |||
6827 | static struct bnx2x_phy phy_serdes = { | 10532 | static struct bnx2x_phy phy_serdes = { |
6828 | .type = PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT, | 10533 | .type = PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT, |
6829 | .addr = 0xff, | 10534 | .addr = 0xff, |
6830 | .flags = 0, | ||
6831 | .def_md_devad = 0, | 10535 | .def_md_devad = 0, |
6832 | .reserved = 0, | 10536 | .flags = 0, |
6833 | .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, | 10537 | .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, |
6834 | .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, | 10538 | .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, |
6835 | .mdio_ctrl = 0, | 10539 | .mdio_ctrl = 0, |
@@ -6843,14 +10547,14 @@ static struct bnx2x_phy phy_serdes = { | |||
6843 | SUPPORTED_Autoneg | | 10547 | SUPPORTED_Autoneg | |
6844 | SUPPORTED_Pause | | 10548 | SUPPORTED_Pause | |
6845 | SUPPORTED_Asym_Pause), | 10549 | SUPPORTED_Asym_Pause), |
6846 | .media_type = ETH_PHY_UNSPECIFIED, | 10550 | .media_type = ETH_PHY_BASE_T, |
6847 | .ver_addr = 0, | 10551 | .ver_addr = 0, |
6848 | .req_flow_ctrl = 0, | 10552 | .req_flow_ctrl = 0, |
6849 | .req_line_speed = 0, | 10553 | .req_line_speed = 0, |
6850 | .speed_cap_mask = 0, | 10554 | .speed_cap_mask = 0, |
6851 | .req_duplex = 0, | 10555 | .req_duplex = 0, |
6852 | .rsrv = 0, | 10556 | .rsrv = 0, |
6853 | .config_init = (config_init_t)bnx2x_init_serdes, | 10557 | .config_init = (config_init_t)bnx2x_xgxs_config_init, |
6854 | .read_status = (read_status_t)bnx2x_link_settings_status, | 10558 | .read_status = (read_status_t)bnx2x_link_settings_status, |
6855 | .link_reset = (link_reset_t)bnx2x_int_link_reset, | 10559 | .link_reset = (link_reset_t)bnx2x_int_link_reset, |
6856 | .config_loopback = (config_loopback_t)NULL, | 10560 | .config_loopback = (config_loopback_t)NULL, |
@@ -6863,9 +10567,8 @@ static struct bnx2x_phy phy_serdes = { | |||
6863 | static struct bnx2x_phy phy_xgxs = { | 10567 | static struct bnx2x_phy phy_xgxs = { |
6864 | .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT, | 10568 | .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT, |
6865 | .addr = 0xff, | 10569 | .addr = 0xff, |
6866 | .flags = 0, | ||
6867 | .def_md_devad = 0, | 10570 | .def_md_devad = 0, |
6868 | .reserved = 0, | 10571 | .flags = 0, |
6869 | .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, | 10572 | .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, |
6870 | .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, | 10573 | .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, |
6871 | .mdio_ctrl = 0, | 10574 | .mdio_ctrl = 0, |
@@ -6880,14 +10583,14 @@ static struct bnx2x_phy phy_xgxs = { | |||
6880 | SUPPORTED_Autoneg | | 10583 | SUPPORTED_Autoneg | |
6881 | SUPPORTED_Pause | | 10584 | SUPPORTED_Pause | |
6882 | SUPPORTED_Asym_Pause), | 10585 | SUPPORTED_Asym_Pause), |
6883 | .media_type = ETH_PHY_UNSPECIFIED, | 10586 | .media_type = ETH_PHY_CX4, |
6884 | .ver_addr = 0, | 10587 | .ver_addr = 0, |
6885 | .req_flow_ctrl = 0, | 10588 | .req_flow_ctrl = 0, |
6886 | .req_line_speed = 0, | 10589 | .req_line_speed = 0, |
6887 | .speed_cap_mask = 0, | 10590 | .speed_cap_mask = 0, |
6888 | .req_duplex = 0, | 10591 | .req_duplex = 0, |
6889 | .rsrv = 0, | 10592 | .rsrv = 0, |
6890 | .config_init = (config_init_t)bnx2x_init_xgxs, | 10593 | .config_init = (config_init_t)bnx2x_xgxs_config_init, |
6891 | .read_status = (read_status_t)bnx2x_link_settings_status, | 10594 | .read_status = (read_status_t)bnx2x_link_settings_status, |
6892 | .link_reset = (link_reset_t)bnx2x_int_link_reset, | 10595 | .link_reset = (link_reset_t)bnx2x_int_link_reset, |
6893 | .config_loopback = (config_loopback_t)bnx2x_set_xgxs_loopback, | 10596 | .config_loopback = (config_loopback_t)bnx2x_set_xgxs_loopback, |
@@ -6896,13 +10599,49 @@ static struct bnx2x_phy phy_xgxs = { | |||
6896 | .set_link_led = (set_link_led_t)NULL, | 10599 | .set_link_led = (set_link_led_t)NULL, |
6897 | .phy_specific_func = (phy_specific_func_t)NULL | 10600 | .phy_specific_func = (phy_specific_func_t)NULL |
6898 | }; | 10601 | }; |
10602 | static struct bnx2x_phy phy_warpcore = { | ||
10603 | .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT, | ||
10604 | .addr = 0xff, | ||
10605 | .def_md_devad = 0, | ||
10606 | .flags = FLAGS_HW_LOCK_REQUIRED, | ||
10607 | .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, | ||
10608 | .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, | ||
10609 | .mdio_ctrl = 0, | ||
10610 | .supported = (SUPPORTED_10baseT_Half | | ||
10611 | SUPPORTED_10baseT_Full | | ||
10612 | SUPPORTED_100baseT_Half | | ||
10613 | SUPPORTED_100baseT_Full | | ||
10614 | SUPPORTED_1000baseT_Full | | ||
10615 | SUPPORTED_10000baseT_Full | | ||
10616 | SUPPORTED_20000baseKR2_Full | | ||
10617 | SUPPORTED_20000baseMLD2_Full | | ||
10618 | SUPPORTED_FIBRE | | ||
10619 | SUPPORTED_Autoneg | | ||
10620 | SUPPORTED_Pause | | ||
10621 | SUPPORTED_Asym_Pause), | ||
10622 | .media_type = ETH_PHY_UNSPECIFIED, | ||
10623 | .ver_addr = 0, | ||
10624 | .req_flow_ctrl = 0, | ||
10625 | .req_line_speed = 0, | ||
10626 | .speed_cap_mask = 0, | ||
10627 | /* req_duplex = */0, | ||
10628 | /* rsrv = */0, | ||
10629 | .config_init = (config_init_t)bnx2x_warpcore_config_init, | ||
10630 | .read_status = (read_status_t)bnx2x_warpcore_read_status, | ||
10631 | .link_reset = (link_reset_t)bnx2x_warpcore_link_reset, | ||
10632 | .config_loopback = (config_loopback_t)bnx2x_set_warpcore_loopback, | ||
10633 | .format_fw_ver = (format_fw_ver_t)NULL, | ||
10634 | .hw_reset = (hw_reset_t)bnx2x_warpcore_hw_reset, | ||
10635 | .set_link_led = (set_link_led_t)NULL, | ||
10636 | .phy_specific_func = (phy_specific_func_t)NULL | ||
10637 | }; | ||
10638 | |||
6899 | 10639 | ||
6900 | static struct bnx2x_phy phy_7101 = { | 10640 | static struct bnx2x_phy phy_7101 = { |
6901 | .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101, | 10641 | .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101, |
6902 | .addr = 0xff, | 10642 | .addr = 0xff, |
6903 | .flags = FLAGS_FAN_FAILURE_DET_REQ, | ||
6904 | .def_md_devad = 0, | 10643 | .def_md_devad = 0, |
6905 | .reserved = 0, | 10644 | .flags = FLAGS_FAN_FAILURE_DET_REQ, |
6906 | .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, | 10645 | .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, |
6907 | .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, | 10646 | .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, |
6908 | .mdio_ctrl = 0, | 10647 | .mdio_ctrl = 0, |
@@ -6930,9 +10669,8 @@ static struct bnx2x_phy phy_7101 = { | |||
6930 | static struct bnx2x_phy phy_8073 = { | 10669 | static struct bnx2x_phy phy_8073 = { |
6931 | .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073, | 10670 | .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073, |
6932 | .addr = 0xff, | 10671 | .addr = 0xff, |
6933 | .flags = FLAGS_HW_LOCK_REQUIRED, | ||
6934 | .def_md_devad = 0, | 10672 | .def_md_devad = 0, |
6935 | .reserved = 0, | 10673 | .flags = FLAGS_HW_LOCK_REQUIRED, |
6936 | .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, | 10674 | .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, |
6937 | .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, | 10675 | .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, |
6938 | .mdio_ctrl = 0, | 10676 | .mdio_ctrl = 0, |
@@ -6943,7 +10681,7 @@ static struct bnx2x_phy phy_8073 = { | |||
6943 | SUPPORTED_Autoneg | | 10681 | SUPPORTED_Autoneg | |
6944 | SUPPORTED_Pause | | 10682 | SUPPORTED_Pause | |
6945 | SUPPORTED_Asym_Pause), | 10683 | SUPPORTED_Asym_Pause), |
6946 | .media_type = ETH_PHY_UNSPECIFIED, | 10684 | .media_type = ETH_PHY_KR, |
6947 | .ver_addr = 0, | 10685 | .ver_addr = 0, |
6948 | .req_flow_ctrl = 0, | 10686 | .req_flow_ctrl = 0, |
6949 | .req_line_speed = 0, | 10687 | .req_line_speed = 0, |
@@ -6962,9 +10700,8 @@ static struct bnx2x_phy phy_8073 = { | |||
6962 | static struct bnx2x_phy phy_8705 = { | 10700 | static struct bnx2x_phy phy_8705 = { |
6963 | .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705, | 10701 | .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705, |
6964 | .addr = 0xff, | 10702 | .addr = 0xff, |
6965 | .flags = FLAGS_INIT_XGXS_FIRST, | ||
6966 | .def_md_devad = 0, | 10703 | .def_md_devad = 0, |
6967 | .reserved = 0, | 10704 | .flags = FLAGS_INIT_XGXS_FIRST, |
6968 | .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, | 10705 | .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, |
6969 | .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, | 10706 | .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, |
6970 | .mdio_ctrl = 0, | 10707 | .mdio_ctrl = 0, |
@@ -6991,9 +10728,8 @@ static struct bnx2x_phy phy_8705 = { | |||
6991 | static struct bnx2x_phy phy_8706 = { | 10728 | static struct bnx2x_phy phy_8706 = { |
6992 | .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706, | 10729 | .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706, |
6993 | .addr = 0xff, | 10730 | .addr = 0xff, |
6994 | .flags = FLAGS_INIT_XGXS_FIRST, | ||
6995 | .def_md_devad = 0, | 10731 | .def_md_devad = 0, |
6996 | .reserved = 0, | 10732 | .flags = FLAGS_INIT_XGXS_FIRST, |
6997 | .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, | 10733 | .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, |
6998 | .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, | 10734 | .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, |
6999 | .mdio_ctrl = 0, | 10735 | .mdio_ctrl = 0, |
@@ -7022,10 +10758,9 @@ static struct bnx2x_phy phy_8706 = { | |||
7022 | static struct bnx2x_phy phy_8726 = { | 10758 | static struct bnx2x_phy phy_8726 = { |
7023 | .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726, | 10759 | .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726, |
7024 | .addr = 0xff, | 10760 | .addr = 0xff, |
10761 | .def_md_devad = 0, | ||
7025 | .flags = (FLAGS_HW_LOCK_REQUIRED | | 10762 | .flags = (FLAGS_HW_LOCK_REQUIRED | |
7026 | FLAGS_INIT_XGXS_FIRST), | 10763 | FLAGS_INIT_XGXS_FIRST), |
7027 | .def_md_devad = 0, | ||
7028 | .reserved = 0, | ||
7029 | .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, | 10764 | .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, |
7030 | .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, | 10765 | .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, |
7031 | .mdio_ctrl = 0, | 10766 | .mdio_ctrl = 0, |
@@ -7035,7 +10770,7 @@ static struct bnx2x_phy phy_8726 = { | |||
7035 | SUPPORTED_FIBRE | | 10770 | SUPPORTED_FIBRE | |
7036 | SUPPORTED_Pause | | 10771 | SUPPORTED_Pause | |
7037 | SUPPORTED_Asym_Pause), | 10772 | SUPPORTED_Asym_Pause), |
7038 | .media_type = ETH_PHY_SFP_FIBER, | 10773 | .media_type = ETH_PHY_NOT_PRESENT, |
7039 | .ver_addr = 0, | 10774 | .ver_addr = 0, |
7040 | .req_flow_ctrl = 0, | 10775 | .req_flow_ctrl = 0, |
7041 | .req_line_speed = 0, | 10776 | .req_line_speed = 0, |
@@ -7055,9 +10790,8 @@ static struct bnx2x_phy phy_8726 = { | |||
7055 | static struct bnx2x_phy phy_8727 = { | 10790 | static struct bnx2x_phy phy_8727 = { |
7056 | .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727, | 10791 | .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727, |
7057 | .addr = 0xff, | 10792 | .addr = 0xff, |
7058 | .flags = FLAGS_FAN_FAILURE_DET_REQ, | ||
7059 | .def_md_devad = 0, | 10793 | .def_md_devad = 0, |
7060 | .reserved = 0, | 10794 | .flags = FLAGS_FAN_FAILURE_DET_REQ, |
7061 | .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, | 10795 | .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, |
7062 | .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, | 10796 | .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, |
7063 | .mdio_ctrl = 0, | 10797 | .mdio_ctrl = 0, |
@@ -7066,7 +10800,7 @@ static struct bnx2x_phy phy_8727 = { | |||
7066 | SUPPORTED_FIBRE | | 10800 | SUPPORTED_FIBRE | |
7067 | SUPPORTED_Pause | | 10801 | SUPPORTED_Pause | |
7068 | SUPPORTED_Asym_Pause), | 10802 | SUPPORTED_Asym_Pause), |
7069 | .media_type = ETH_PHY_SFP_FIBER, | 10803 | .media_type = ETH_PHY_NOT_PRESENT, |
7070 | .ver_addr = 0, | 10804 | .ver_addr = 0, |
7071 | .req_flow_ctrl = 0, | 10805 | .req_flow_ctrl = 0, |
7072 | .req_line_speed = 0, | 10806 | .req_line_speed = 0, |
@@ -7085,10 +10819,9 @@ static struct bnx2x_phy phy_8727 = { | |||
7085 | static struct bnx2x_phy phy_8481 = { | 10819 | static struct bnx2x_phy phy_8481 = { |
7086 | .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481, | 10820 | .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481, |
7087 | .addr = 0xff, | 10821 | .addr = 0xff, |
10822 | .def_md_devad = 0, | ||
7088 | .flags = FLAGS_FAN_FAILURE_DET_REQ | | 10823 | .flags = FLAGS_FAN_FAILURE_DET_REQ | |
7089 | FLAGS_REARM_LATCH_SIGNAL, | 10824 | FLAGS_REARM_LATCH_SIGNAL, |
7090 | .def_md_devad = 0, | ||
7091 | .reserved = 0, | ||
7092 | .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, | 10825 | .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, |
7093 | .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, | 10826 | .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, |
7094 | .mdio_ctrl = 0, | 10827 | .mdio_ctrl = 0, |
@@ -7122,10 +10855,9 @@ static struct bnx2x_phy phy_8481 = { | |||
7122 | static struct bnx2x_phy phy_84823 = { | 10855 | static struct bnx2x_phy phy_84823 = { |
7123 | .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823, | 10856 | .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823, |
7124 | .addr = 0xff, | 10857 | .addr = 0xff, |
10858 | .def_md_devad = 0, | ||
7125 | .flags = FLAGS_FAN_FAILURE_DET_REQ | | 10859 | .flags = FLAGS_FAN_FAILURE_DET_REQ | |
7126 | FLAGS_REARM_LATCH_SIGNAL, | 10860 | FLAGS_REARM_LATCH_SIGNAL, |
7127 | .def_md_devad = 0, | ||
7128 | .reserved = 0, | ||
7129 | .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, | 10861 | .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, |
7130 | .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, | 10862 | .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, |
7131 | .mdio_ctrl = 0, | 10863 | .mdio_ctrl = 0, |
@@ -7159,16 +10891,13 @@ static struct bnx2x_phy phy_84823 = { | |||
7159 | static struct bnx2x_phy phy_84833 = { | 10891 | static struct bnx2x_phy phy_84833 = { |
7160 | .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833, | 10892 | .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833, |
7161 | .addr = 0xff, | 10893 | .addr = 0xff, |
10894 | .def_md_devad = 0, | ||
7162 | .flags = FLAGS_FAN_FAILURE_DET_REQ | | 10895 | .flags = FLAGS_FAN_FAILURE_DET_REQ | |
7163 | FLAGS_REARM_LATCH_SIGNAL, | 10896 | FLAGS_REARM_LATCH_SIGNAL, |
7164 | .def_md_devad = 0, | ||
7165 | .reserved = 0, | ||
7166 | .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, | 10897 | .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, |
7167 | .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, | 10898 | .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, |
7168 | .mdio_ctrl = 0, | 10899 | .mdio_ctrl = 0, |
7169 | .supported = (SUPPORTED_10baseT_Half | | 10900 | .supported = (SUPPORTED_100baseT_Half | |
7170 | SUPPORTED_10baseT_Full | | ||
7171 | SUPPORTED_100baseT_Half | | ||
7172 | SUPPORTED_100baseT_Full | | 10901 | SUPPORTED_100baseT_Full | |
7173 | SUPPORTED_1000baseT_Full | | 10902 | SUPPORTED_1000baseT_Full | |
7174 | SUPPORTED_10000baseT_Full | | 10903 | SUPPORTED_10000baseT_Full | |
@@ -7188,11 +10917,44 @@ static struct bnx2x_phy phy_84833 = { | |||
7188 | .link_reset = (link_reset_t)bnx2x_848x3_link_reset, | 10917 | .link_reset = (link_reset_t)bnx2x_848x3_link_reset, |
7189 | .config_loopback = (config_loopback_t)NULL, | 10918 | .config_loopback = (config_loopback_t)NULL, |
7190 | .format_fw_ver = (format_fw_ver_t)bnx2x_848xx_format_ver, | 10919 | .format_fw_ver = (format_fw_ver_t)bnx2x_848xx_format_ver, |
7191 | .hw_reset = (hw_reset_t)NULL, | 10920 | .hw_reset = (hw_reset_t)bnx2x_84833_hw_reset_phy, |
7192 | .set_link_led = (set_link_led_t)bnx2x_848xx_set_link_led, | 10921 | .set_link_led = (set_link_led_t)bnx2x_848xx_set_link_led, |
7193 | .phy_specific_func = (phy_specific_func_t)NULL | 10922 | .phy_specific_func = (phy_specific_func_t)NULL |
7194 | }; | 10923 | }; |
7195 | 10924 | ||
10925 | static struct bnx2x_phy phy_54618se = { | ||
10926 | .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE, | ||
10927 | .addr = 0xff, | ||
10928 | .def_md_devad = 0, | ||
10929 | .flags = FLAGS_INIT_XGXS_FIRST, | ||
10930 | .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, | ||
10931 | .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, | ||
10932 | .mdio_ctrl = 0, | ||
10933 | .supported = (SUPPORTED_10baseT_Half | | ||
10934 | SUPPORTED_10baseT_Full | | ||
10935 | SUPPORTED_100baseT_Half | | ||
10936 | SUPPORTED_100baseT_Full | | ||
10937 | SUPPORTED_1000baseT_Full | | ||
10938 | SUPPORTED_TP | | ||
10939 | SUPPORTED_Autoneg | | ||
10940 | SUPPORTED_Pause | | ||
10941 | SUPPORTED_Asym_Pause), | ||
10942 | .media_type = ETH_PHY_BASE_T, | ||
10943 | .ver_addr = 0, | ||
10944 | .req_flow_ctrl = 0, | ||
10945 | .req_line_speed = 0, | ||
10946 | .speed_cap_mask = 0, | ||
10947 | /* req_duplex = */0, | ||
10948 | /* rsrv = */0, | ||
10949 | .config_init = (config_init_t)bnx2x_54618se_config_init, | ||
10950 | .read_status = (read_status_t)bnx2x_54618se_read_status, | ||
10951 | .link_reset = (link_reset_t)bnx2x_54618se_link_reset, | ||
10952 | .config_loopback = (config_loopback_t)bnx2x_54618se_config_loopback, | ||
10953 | .format_fw_ver = (format_fw_ver_t)NULL, | ||
10954 | .hw_reset = (hw_reset_t)NULL, | ||
10955 | .set_link_led = (set_link_led_t)bnx2x_54618se_set_link_led, | ||
10956 | .phy_specific_func = (phy_specific_func_t)NULL | ||
10957 | }; | ||
7196 | /*****************************************************************/ | 10958 | /*****************************************************************/ |
7197 | /* */ | 10959 | /* */ |
7198 | /* Populate the phy according. Main function: bnx2x_populate_phy */ | 10960 | /* Populate the phy according. Main function: bnx2x_populate_phy */ |
@@ -7259,8 +11021,8 @@ static u32 bnx2x_get_ext_phy_config(struct bnx2x *bp, u32 shmem_base, | |||
7259 | 11021 | ||
7260 | return ext_phy_config; | 11022 | return ext_phy_config; |
7261 | } | 11023 | } |
7262 | static u8 bnx2x_populate_int_phy(struct bnx2x *bp, u32 shmem_base, u8 port, | 11024 | static int bnx2x_populate_int_phy(struct bnx2x *bp, u32 shmem_base, u8 port, |
7263 | struct bnx2x_phy *phy) | 11025 | struct bnx2x_phy *phy) |
7264 | { | 11026 | { |
7265 | u32 phy_addr; | 11027 | u32 phy_addr; |
7266 | u32 chip_id; | 11028 | u32 chip_id; |
@@ -7269,22 +11031,105 @@ static u8 bnx2x_populate_int_phy(struct bnx2x *bp, u32 shmem_base, u8 port, | |||
7269 | dev_info.port_feature_config[port].link_config)) & | 11031 | dev_info.port_feature_config[port].link_config)) & |
7270 | PORT_FEATURE_CONNECTED_SWITCH_MASK); | 11032 | PORT_FEATURE_CONNECTED_SWITCH_MASK); |
7271 | chip_id = REG_RD(bp, MISC_REG_CHIP_NUM) << 16; | 11033 | chip_id = REG_RD(bp, MISC_REG_CHIP_NUM) << 16; |
7272 | switch (switch_cfg) { | 11034 | DP(NETIF_MSG_LINK, ":chip_id = 0x%x\n", chip_id); |
7273 | case SWITCH_CFG_1G: | 11035 | if (USES_WARPCORE(bp)) { |
11036 | u32 serdes_net_if; | ||
7274 | phy_addr = REG_RD(bp, | 11037 | phy_addr = REG_RD(bp, |
7275 | NIG_REG_SERDES0_CTRL_PHY_ADDR + | 11038 | MISC_REG_WC0_CTRL_PHY_ADDR); |
7276 | port * 0x10); | 11039 | *phy = phy_warpcore; |
7277 | *phy = phy_serdes; | 11040 | if (REG_RD(bp, MISC_REG_PORT4MODE_EN_OVWR) == 0x3) |
7278 | break; | 11041 | phy->flags |= FLAGS_4_PORT_MODE; |
7279 | case SWITCH_CFG_10G: | 11042 | else |
7280 | phy_addr = REG_RD(bp, | 11043 | phy->flags &= ~FLAGS_4_PORT_MODE; |
7281 | NIG_REG_XGXS0_CTRL_PHY_ADDR + | 11044 | /* Check Dual mode */ |
7282 | port * 0x18); | 11045 | serdes_net_if = (REG_RD(bp, shmem_base + |
7283 | *phy = phy_xgxs; | 11046 | offsetof(struct shmem_region, dev_info. |
7284 | break; | 11047 | port_hw_config[port].default_cfg)) & |
7285 | default: | 11048 | PORT_HW_CFG_NET_SERDES_IF_MASK); |
7286 | DP(NETIF_MSG_LINK, "Invalid switch_cfg\n"); | 11049 | /* |
7287 | return -EINVAL; | 11050 | * Set the appropriate supported and flags indications per |
11051 | * interface type of the chip | ||
11052 | */ | ||
11053 | switch (serdes_net_if) { | ||
11054 | case PORT_HW_CFG_NET_SERDES_IF_SGMII: | ||
11055 | phy->supported &= (SUPPORTED_10baseT_Half | | ||
11056 | SUPPORTED_10baseT_Full | | ||
11057 | SUPPORTED_100baseT_Half | | ||
11058 | SUPPORTED_100baseT_Full | | ||
11059 | SUPPORTED_1000baseT_Full | | ||
11060 | SUPPORTED_FIBRE | | ||
11061 | SUPPORTED_Autoneg | | ||
11062 | SUPPORTED_Pause | | ||
11063 | SUPPORTED_Asym_Pause); | ||
11064 | phy->media_type = ETH_PHY_BASE_T; | ||
11065 | break; | ||
11066 | case PORT_HW_CFG_NET_SERDES_IF_XFI: | ||
11067 | phy->media_type = ETH_PHY_XFP_FIBER; | ||
11068 | break; | ||
11069 | case PORT_HW_CFG_NET_SERDES_IF_SFI: | ||
11070 | phy->supported &= (SUPPORTED_1000baseT_Full | | ||
11071 | SUPPORTED_10000baseT_Full | | ||
11072 | SUPPORTED_FIBRE | | ||
11073 | SUPPORTED_Pause | | ||
11074 | SUPPORTED_Asym_Pause); | ||
11075 | phy->media_type = ETH_PHY_SFP_FIBER; | ||
11076 | break; | ||
11077 | case PORT_HW_CFG_NET_SERDES_IF_KR: | ||
11078 | phy->media_type = ETH_PHY_KR; | ||
11079 | phy->supported &= (SUPPORTED_1000baseT_Full | | ||
11080 | SUPPORTED_10000baseT_Full | | ||
11081 | SUPPORTED_FIBRE | | ||
11082 | SUPPORTED_Autoneg | | ||
11083 | SUPPORTED_Pause | | ||
11084 | SUPPORTED_Asym_Pause); | ||
11085 | break; | ||
11086 | case PORT_HW_CFG_NET_SERDES_IF_DXGXS: | ||
11087 | phy->media_type = ETH_PHY_KR; | ||
11088 | phy->flags |= FLAGS_WC_DUAL_MODE; | ||
11089 | phy->supported &= (SUPPORTED_20000baseMLD2_Full | | ||
11090 | SUPPORTED_FIBRE | | ||
11091 | SUPPORTED_Pause | | ||
11092 | SUPPORTED_Asym_Pause); | ||
11093 | break; | ||
11094 | case PORT_HW_CFG_NET_SERDES_IF_KR2: | ||
11095 | phy->media_type = ETH_PHY_KR; | ||
11096 | phy->flags |= FLAGS_WC_DUAL_MODE; | ||
11097 | phy->supported &= (SUPPORTED_20000baseKR2_Full | | ||
11098 | SUPPORTED_FIBRE | | ||
11099 | SUPPORTED_Pause | | ||
11100 | SUPPORTED_Asym_Pause); | ||
11101 | break; | ||
11102 | default: | ||
11103 | DP(NETIF_MSG_LINK, "Unknown WC interface type 0x%x\n", | ||
11104 | serdes_net_if); | ||
11105 | break; | ||
11106 | } | ||
11107 | |||
11108 | /* | ||
11109 | * Enable MDC/MDIO work-around for E3 A0 since free running MDC | ||
11110 | * was not set as expected. For B0, ECO will be enabled so there | ||
11111 | * won't be an issue there | ||
11112 | */ | ||
11113 | if (CHIP_REV(bp) == CHIP_REV_Ax) | ||
11114 | phy->flags |= FLAGS_MDC_MDIO_WA; | ||
11115 | } else { | ||
11116 | switch (switch_cfg) { | ||
11117 | case SWITCH_CFG_1G: | ||
11118 | phy_addr = REG_RD(bp, | ||
11119 | NIG_REG_SERDES0_CTRL_PHY_ADDR + | ||
11120 | port * 0x10); | ||
11121 | *phy = phy_serdes; | ||
11122 | break; | ||
11123 | case SWITCH_CFG_10G: | ||
11124 | phy_addr = REG_RD(bp, | ||
11125 | NIG_REG_XGXS0_CTRL_PHY_ADDR + | ||
11126 | port * 0x18); | ||
11127 | *phy = phy_xgxs; | ||
11128 | break; | ||
11129 | default: | ||
11130 | DP(NETIF_MSG_LINK, "Invalid switch_cfg\n"); | ||
11131 | return -EINVAL; | ||
11132 | } | ||
7288 | } | 11133 | } |
7289 | phy->addr = (u8)phy_addr; | 11134 | phy->addr = (u8)phy_addr; |
7290 | phy->mdio_ctrl = bnx2x_get_emac_base(bp, | 11135 | phy->mdio_ctrl = bnx2x_get_emac_base(bp, |
@@ -7302,12 +11147,12 @@ static u8 bnx2x_populate_int_phy(struct bnx2x *bp, u32 shmem_base, u8 port, | |||
7302 | return 0; | 11147 | return 0; |
7303 | } | 11148 | } |
7304 | 11149 | ||
7305 | static u8 bnx2x_populate_ext_phy(struct bnx2x *bp, | 11150 | static int bnx2x_populate_ext_phy(struct bnx2x *bp, |
7306 | u8 phy_index, | 11151 | u8 phy_index, |
7307 | u32 shmem_base, | 11152 | u32 shmem_base, |
7308 | u32 shmem2_base, | 11153 | u32 shmem2_base, |
7309 | u8 port, | 11154 | u8 port, |
7310 | struct bnx2x_phy *phy) | 11155 | struct bnx2x_phy *phy) |
7311 | { | 11156 | { |
7312 | u32 ext_phy_config, phy_type, config2; | 11157 | u32 ext_phy_config, phy_type, config2; |
7313 | u32 mdc_mdio_access = SHARED_HW_CFG_MDC_MDIO_ACCESS1_BOTH; | 11158 | u32 mdc_mdio_access = SHARED_HW_CFG_MDC_MDIO_ACCESS1_BOTH; |
@@ -7336,6 +11181,7 @@ static u8 bnx2x_populate_ext_phy(struct bnx2x *bp, | |||
7336 | *phy = phy_8727; | 11181 | *phy = phy_8727; |
7337 | phy->flags |= FLAGS_NOC; | 11182 | phy->flags |= FLAGS_NOC; |
7338 | break; | 11183 | break; |
11184 | case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8722: | ||
7339 | case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727: | 11185 | case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727: |
7340 | mdc_mdio_access = SHARED_HW_CFG_MDC_MDIO_ACCESS1_EMAC1; | 11186 | mdc_mdio_access = SHARED_HW_CFG_MDC_MDIO_ACCESS1_EMAC1; |
7341 | *phy = phy_8727; | 11187 | *phy = phy_8727; |
@@ -7349,6 +11195,9 @@ static u8 bnx2x_populate_ext_phy(struct bnx2x *bp, | |||
7349 | case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833: | 11195 | case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833: |
7350 | *phy = phy_84833; | 11196 | *phy = phy_84833; |
7351 | break; | 11197 | break; |
11198 | case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE: | ||
11199 | *phy = phy_54618se; | ||
11200 | break; | ||
7352 | case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101: | 11201 | case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101: |
7353 | *phy = phy_7101; | 11202 | *phy = phy_7101; |
7354 | break; | 11203 | break; |
@@ -7410,10 +11259,10 @@ static u8 bnx2x_populate_ext_phy(struct bnx2x *bp, | |||
7410 | return 0; | 11259 | return 0; |
7411 | } | 11260 | } |
7412 | 11261 | ||
7413 | static u8 bnx2x_populate_phy(struct bnx2x *bp, u8 phy_index, u32 shmem_base, | 11262 | static int bnx2x_populate_phy(struct bnx2x *bp, u8 phy_index, u32 shmem_base, |
7414 | u32 shmem2_base, u8 port, struct bnx2x_phy *phy) | 11263 | u32 shmem2_base, u8 port, struct bnx2x_phy *phy) |
7415 | { | 11264 | { |
7416 | u8 status = 0; | 11265 | int status = 0; |
7417 | phy->type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN; | 11266 | phy->type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN; |
7418 | if (phy_index == INT_PHY) | 11267 | if (phy_index == INT_PHY) |
7419 | return bnx2x_populate_int_phy(bp, shmem_base, port, phy); | 11268 | return bnx2x_populate_int_phy(bp, shmem_base, port, phy); |
@@ -7527,10 +11376,10 @@ u32 bnx2x_phy_selection(struct link_params *params) | |||
7527 | } | 11376 | } |
7528 | 11377 | ||
7529 | 11378 | ||
7530 | u8 bnx2x_phy_probe(struct link_params *params) | 11379 | int bnx2x_phy_probe(struct link_params *params) |
7531 | { | 11380 | { |
7532 | u8 phy_index, actual_phy_idx, link_cfg_idx; | 11381 | u8 phy_index, actual_phy_idx, link_cfg_idx; |
7533 | u32 phy_config_swapped; | 11382 | u32 phy_config_swapped, sync_offset, media_types; |
7534 | struct bnx2x *bp = params->bp; | 11383 | struct bnx2x *bp = params->bp; |
7535 | struct bnx2x_phy *phy; | 11384 | struct bnx2x_phy *phy; |
7536 | params->num_phys = 0; | 11385 | params->num_phys = 0; |
@@ -7567,6 +11416,26 @@ u8 bnx2x_phy_probe(struct link_params *params) | |||
7567 | if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN) | 11416 | if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN) |
7568 | break; | 11417 | break; |
7569 | 11418 | ||
11419 | sync_offset = params->shmem_base + | ||
11420 | offsetof(struct shmem_region, | ||
11421 | dev_info.port_hw_config[params->port].media_type); | ||
11422 | media_types = REG_RD(bp, sync_offset); | ||
11423 | |||
11424 | /* | ||
11425 | * Update media type for non-PMF sync only for the first time | ||
11426 | * In case the media type changes afterwards, it will be updated | ||
11427 | * using the update_status function | ||
11428 | */ | ||
11429 | if ((media_types & (PORT_HW_CFG_MEDIA_TYPE_PHY0_MASK << | ||
11430 | (PORT_HW_CFG_MEDIA_TYPE_PHY1_SHIFT * | ||
11431 | actual_phy_idx))) == 0) { | ||
11432 | media_types |= ((phy->media_type & | ||
11433 | PORT_HW_CFG_MEDIA_TYPE_PHY0_MASK) << | ||
11434 | (PORT_HW_CFG_MEDIA_TYPE_PHY1_SHIFT * | ||
11435 | actual_phy_idx)); | ||
11436 | } | ||
11437 | REG_WR(bp, sync_offset, media_types); | ||
11438 | |||
7570 | bnx2x_phy_def_cfg(params, phy, phy_index); | 11439 | bnx2x_phy_def_cfg(params, phy, phy_index); |
7571 | params->num_phys++; | 11440 | params->num_phys++; |
7572 | } | 11441 | } |
@@ -7575,77 +11444,10 @@ u8 bnx2x_phy_probe(struct link_params *params) | |||
7575 | return 0; | 11444 | return 0; |
7576 | } | 11445 | } |
7577 | 11446 | ||
7578 | static void set_phy_vars(struct link_params *params) | 11447 | void bnx2x_init_bmac_loopback(struct link_params *params, |
7579 | { | 11448 | struct link_vars *vars) |
7580 | struct bnx2x *bp = params->bp; | ||
7581 | u8 actual_phy_idx, phy_index, link_cfg_idx; | ||
7582 | u8 phy_config_swapped = params->multi_phy_config & | ||
7583 | PORT_HW_CFG_PHY_SWAPPED_ENABLED; | ||
7584 | for (phy_index = INT_PHY; phy_index < params->num_phys; | ||
7585 | phy_index++) { | ||
7586 | link_cfg_idx = LINK_CONFIG_IDX(phy_index); | ||
7587 | actual_phy_idx = phy_index; | ||
7588 | if (phy_config_swapped) { | ||
7589 | if (phy_index == EXT_PHY1) | ||
7590 | actual_phy_idx = EXT_PHY2; | ||
7591 | else if (phy_index == EXT_PHY2) | ||
7592 | actual_phy_idx = EXT_PHY1; | ||
7593 | } | ||
7594 | params->phy[actual_phy_idx].req_flow_ctrl = | ||
7595 | params->req_flow_ctrl[link_cfg_idx]; | ||
7596 | |||
7597 | params->phy[actual_phy_idx].req_line_speed = | ||
7598 | params->req_line_speed[link_cfg_idx]; | ||
7599 | |||
7600 | params->phy[actual_phy_idx].speed_cap_mask = | ||
7601 | params->speed_cap_mask[link_cfg_idx]; | ||
7602 | |||
7603 | params->phy[actual_phy_idx].req_duplex = | ||
7604 | params->req_duplex[link_cfg_idx]; | ||
7605 | |||
7606 | DP(NETIF_MSG_LINK, "req_flow_ctrl %x, req_line_speed %x," | ||
7607 | " speed_cap_mask %x\n", | ||
7608 | params->phy[actual_phy_idx].req_flow_ctrl, | ||
7609 | params->phy[actual_phy_idx].req_line_speed, | ||
7610 | params->phy[actual_phy_idx].speed_cap_mask); | ||
7611 | } | ||
7612 | } | ||
7613 | |||
7614 | u8 bnx2x_phy_init(struct link_params *params, struct link_vars *vars) | ||
7615 | { | 11449 | { |
7616 | struct bnx2x *bp = params->bp; | 11450 | struct bnx2x *bp = params->bp; |
7617 | DP(NETIF_MSG_LINK, "Phy Initialization started\n"); | ||
7618 | DP(NETIF_MSG_LINK, "(1) req_speed %d, req_flowctrl %d\n", | ||
7619 | params->req_line_speed[0], params->req_flow_ctrl[0]); | ||
7620 | DP(NETIF_MSG_LINK, "(2) req_speed %d, req_flowctrl %d\n", | ||
7621 | params->req_line_speed[1], params->req_flow_ctrl[1]); | ||
7622 | vars->link_status = 0; | ||
7623 | vars->phy_link_up = 0; | ||
7624 | vars->link_up = 0; | ||
7625 | vars->line_speed = 0; | ||
7626 | vars->duplex = DUPLEX_FULL; | ||
7627 | vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE; | ||
7628 | vars->mac_type = MAC_TYPE_NONE; | ||
7629 | vars->phy_flags = 0; | ||
7630 | |||
7631 | /* disable attentions */ | ||
7632 | bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + params->port*4, | ||
7633 | (NIG_MASK_XGXS0_LINK_STATUS | | ||
7634 | NIG_MASK_XGXS0_LINK10G | | ||
7635 | NIG_MASK_SERDES0_LINK_STATUS | | ||
7636 | NIG_MASK_MI_INT)); | ||
7637 | |||
7638 | bnx2x_emac_init(params, vars); | ||
7639 | |||
7640 | if (params->num_phys == 0) { | ||
7641 | DP(NETIF_MSG_LINK, "No phy found for initialization !!\n"); | ||
7642 | return -EINVAL; | ||
7643 | } | ||
7644 | set_phy_vars(params); | ||
7645 | |||
7646 | DP(NETIF_MSG_LINK, "Num of phys on board: %d\n", params->num_phys); | ||
7647 | if (params->loopback_mode == LOOPBACK_BMAC) { | ||
7648 | |||
7649 | vars->link_up = 1; | 11451 | vars->link_up = 1; |
7650 | vars->line_speed = SPEED_10000; | 11452 | vars->line_speed = SPEED_10000; |
7651 | vars->duplex = DUPLEX_FULL; | 11453 | vars->duplex = DUPLEX_FULL; |
@@ -7660,9 +11462,12 @@ u8 bnx2x_phy_init(struct link_params *params, struct link_vars *vars) | |||
7660 | bnx2x_bmac_enable(params, vars, 1); | 11462 | bnx2x_bmac_enable(params, vars, 1); |
7661 | 11463 | ||
7662 | REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0); | 11464 | REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0); |
11465 | } | ||
7663 | 11466 | ||
7664 | } else if (params->loopback_mode == LOOPBACK_EMAC) { | 11467 | void bnx2x_init_emac_loopback(struct link_params *params, |
7665 | 11468 | struct link_vars *vars) | |
11469 | { | ||
11470 | struct bnx2x *bp = params->bp; | ||
7666 | vars->link_up = 1; | 11471 | vars->link_up = 1; |
7667 | vars->line_speed = SPEED_1000; | 11472 | vars->line_speed = SPEED_1000; |
7668 | vars->duplex = DUPLEX_FULL; | 11473 | vars->duplex = DUPLEX_FULL; |
@@ -7676,29 +11481,81 @@ u8 bnx2x_phy_init(struct link_params *params, struct link_vars *vars) | |||
7676 | bnx2x_emac_enable(params, vars, 1); | 11481 | bnx2x_emac_enable(params, vars, 1); |
7677 | bnx2x_emac_program(params, vars); | 11482 | bnx2x_emac_program(params, vars); |
7678 | REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0); | 11483 | REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0); |
11484 | } | ||
7679 | 11485 | ||
7680 | } else if ((params->loopback_mode == LOOPBACK_XGXS) || | 11486 | void bnx2x_init_xmac_loopback(struct link_params *params, |
7681 | (params->loopback_mode == LOOPBACK_EXT_PHY)) { | 11487 | struct link_vars *vars) |
11488 | { | ||
11489 | struct bnx2x *bp = params->bp; | ||
11490 | vars->link_up = 1; | ||
11491 | if (!params->req_line_speed[0]) | ||
11492 | vars->line_speed = SPEED_10000; | ||
11493 | else | ||
11494 | vars->line_speed = params->req_line_speed[0]; | ||
11495 | vars->duplex = DUPLEX_FULL; | ||
11496 | vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE; | ||
11497 | vars->mac_type = MAC_TYPE_XMAC; | ||
11498 | vars->phy_flags = PHY_XGXS_FLAG; | ||
11499 | /* | ||
11500 | * Set WC to loopback mode since link is required to provide clock | ||
11501 | * to the XMAC in 20G mode | ||
11502 | */ | ||
11503 | if (vars->line_speed == SPEED_20000) { | ||
11504 | bnx2x_set_aer_mmd(params, ¶ms->phy[0]); | ||
11505 | bnx2x_warpcore_reset_lane(bp, ¶ms->phy[0], 0); | ||
11506 | params->phy[INT_PHY].config_loopback( | ||
11507 | ¶ms->phy[INT_PHY], | ||
11508 | params); | ||
11509 | } | ||
11510 | bnx2x_xmac_enable(params, vars, 1); | ||
11511 | REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0); | ||
11512 | } | ||
11513 | |||
11514 | void bnx2x_init_umac_loopback(struct link_params *params, | ||
11515 | struct link_vars *vars) | ||
11516 | { | ||
11517 | struct bnx2x *bp = params->bp; | ||
11518 | vars->link_up = 1; | ||
11519 | vars->line_speed = SPEED_1000; | ||
11520 | vars->duplex = DUPLEX_FULL; | ||
11521 | vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE; | ||
11522 | vars->mac_type = MAC_TYPE_UMAC; | ||
11523 | vars->phy_flags = PHY_XGXS_FLAG; | ||
11524 | bnx2x_umac_enable(params, vars, 1); | ||
7682 | 11525 | ||
11526 | REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0); | ||
11527 | } | ||
11528 | |||
11529 | void bnx2x_init_xgxs_loopback(struct link_params *params, | ||
11530 | struct link_vars *vars) | ||
11531 | { | ||
11532 | struct bnx2x *bp = params->bp; | ||
7683 | vars->link_up = 1; | 11533 | vars->link_up = 1; |
7684 | vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE; | 11534 | vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE; |
7685 | vars->duplex = DUPLEX_FULL; | 11535 | vars->duplex = DUPLEX_FULL; |
7686 | if (params->req_line_speed[0] == SPEED_1000) { | 11536 | if (params->req_line_speed[0] == SPEED_1000) |
7687 | vars->line_speed = SPEED_1000; | 11537 | vars->line_speed = SPEED_1000; |
7688 | vars->mac_type = MAC_TYPE_EMAC; | 11538 | else |
7689 | } else { | ||
7690 | vars->line_speed = SPEED_10000; | 11539 | vars->line_speed = SPEED_10000; |
7691 | vars->mac_type = MAC_TYPE_BMAC; | ||
7692 | } | ||
7693 | 11540 | ||
11541 | if (!USES_WARPCORE(bp)) | ||
7694 | bnx2x_xgxs_deassert(params); | 11542 | bnx2x_xgxs_deassert(params); |
7695 | bnx2x_link_initialize(params, vars); | 11543 | bnx2x_link_initialize(params, vars); |
7696 | 11544 | ||
7697 | if (params->req_line_speed[0] == SPEED_1000) { | 11545 | if (params->req_line_speed[0] == SPEED_1000) { |
11546 | if (USES_WARPCORE(bp)) | ||
11547 | bnx2x_umac_enable(params, vars, 0); | ||
11548 | else { | ||
7698 | bnx2x_emac_program(params, vars); | 11549 | bnx2x_emac_program(params, vars); |
7699 | bnx2x_emac_enable(params, vars, 0); | 11550 | bnx2x_emac_enable(params, vars, 0); |
7700 | } else | 11551 | } |
11552 | } else { | ||
11553 | if (USES_WARPCORE(bp)) | ||
11554 | bnx2x_xmac_enable(params, vars, 0); | ||
11555 | else | ||
7701 | bnx2x_bmac_enable(params, vars, 0); | 11556 | bnx2x_bmac_enable(params, vars, 0); |
11557 | } | ||
11558 | |||
7702 | if (params->loopback_mode == LOOPBACK_XGXS) { | 11559 | if (params->loopback_mode == LOOPBACK_XGXS) { |
7703 | /* set 10G XGXS loopback */ | 11560 | /* set 10G XGXS loopback */ |
7704 | params->phy[INT_PHY].config_loopback( | 11561 | params->phy[INT_PHY].config_loopback( |
@@ -7718,24 +11575,76 @@ u8 bnx2x_phy_init(struct link_params *params, struct link_vars *vars) | |||
7718 | } | 11575 | } |
7719 | REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0); | 11576 | REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0); |
7720 | 11577 | ||
7721 | bnx2x_set_led(params, vars, | 11578 | bnx2x_set_led(params, vars, LED_MODE_OPER, vars->line_speed); |
7722 | LED_MODE_OPER, vars->line_speed); | 11579 | } |
7723 | } else | ||
7724 | /* No loopback */ | ||
7725 | { | ||
7726 | if (params->switch_cfg == SWITCH_CFG_10G) | ||
7727 | bnx2x_xgxs_deassert(params); | ||
7728 | else | ||
7729 | bnx2x_serdes_deassert(bp, params->port); | ||
7730 | 11580 | ||
11581 | int bnx2x_phy_init(struct link_params *params, struct link_vars *vars) | ||
11582 | { | ||
11583 | struct bnx2x *bp = params->bp; | ||
11584 | DP(NETIF_MSG_LINK, "Phy Initialization started\n"); | ||
11585 | DP(NETIF_MSG_LINK, "(1) req_speed %d, req_flowctrl %d\n", | ||
11586 | params->req_line_speed[0], params->req_flow_ctrl[0]); | ||
11587 | DP(NETIF_MSG_LINK, "(2) req_speed %d, req_flowctrl %d\n", | ||
11588 | params->req_line_speed[1], params->req_flow_ctrl[1]); | ||
11589 | vars->link_status = 0; | ||
11590 | vars->phy_link_up = 0; | ||
11591 | vars->link_up = 0; | ||
11592 | vars->line_speed = 0; | ||
11593 | vars->duplex = DUPLEX_FULL; | ||
11594 | vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE; | ||
11595 | vars->mac_type = MAC_TYPE_NONE; | ||
11596 | vars->phy_flags = 0; | ||
11597 | |||
11598 | /* disable attentions */ | ||
11599 | bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + params->port*4, | ||
11600 | (NIG_MASK_XGXS0_LINK_STATUS | | ||
11601 | NIG_MASK_XGXS0_LINK10G | | ||
11602 | NIG_MASK_SERDES0_LINK_STATUS | | ||
11603 | NIG_MASK_MI_INT)); | ||
11604 | |||
11605 | bnx2x_emac_init(params, vars); | ||
11606 | |||
11607 | if (params->num_phys == 0) { | ||
11608 | DP(NETIF_MSG_LINK, "No phy found for initialization !!\n"); | ||
11609 | return -EINVAL; | ||
11610 | } | ||
11611 | set_phy_vars(params, vars); | ||
11612 | |||
11613 | DP(NETIF_MSG_LINK, "Num of phys on board: %d\n", params->num_phys); | ||
11614 | switch (params->loopback_mode) { | ||
11615 | case LOOPBACK_BMAC: | ||
11616 | bnx2x_init_bmac_loopback(params, vars); | ||
11617 | break; | ||
11618 | case LOOPBACK_EMAC: | ||
11619 | bnx2x_init_emac_loopback(params, vars); | ||
11620 | break; | ||
11621 | case LOOPBACK_XMAC: | ||
11622 | bnx2x_init_xmac_loopback(params, vars); | ||
11623 | break; | ||
11624 | case LOOPBACK_UMAC: | ||
11625 | bnx2x_init_umac_loopback(params, vars); | ||
11626 | break; | ||
11627 | case LOOPBACK_XGXS: | ||
11628 | case LOOPBACK_EXT_PHY: | ||
11629 | bnx2x_init_xgxs_loopback(params, vars); | ||
11630 | break; | ||
11631 | default: | ||
11632 | if (!CHIP_IS_E3(bp)) { | ||
11633 | if (params->switch_cfg == SWITCH_CFG_10G) | ||
11634 | bnx2x_xgxs_deassert(params); | ||
11635 | else | ||
11636 | bnx2x_serdes_deassert(bp, params->port); | ||
11637 | } | ||
7731 | bnx2x_link_initialize(params, vars); | 11638 | bnx2x_link_initialize(params, vars); |
7732 | msleep(30); | 11639 | msleep(30); |
7733 | bnx2x_link_int_enable(params); | 11640 | bnx2x_link_int_enable(params); |
11641 | break; | ||
7734 | } | 11642 | } |
7735 | return 0; | 11643 | return 0; |
7736 | } | 11644 | } |
7737 | u8 bnx2x_link_reset(struct link_params *params, struct link_vars *vars, | 11645 | |
7738 | u8 reset_ext_phy) | 11646 | int bnx2x_link_reset(struct link_params *params, struct link_vars *vars, |
11647 | u8 reset_ext_phy) | ||
7739 | { | 11648 | { |
7740 | struct bnx2x *bp = params->bp; | 11649 | struct bnx2x *bp = params->bp; |
7741 | u8 phy_index, port = params->port, clear_latch_ind = 0; | 11650 | u8 phy_index, port = params->port, clear_latch_ind = 0; |
@@ -7753,14 +11662,19 @@ u8 bnx2x_link_reset(struct link_params *params, struct link_vars *vars, | |||
7753 | REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1); | 11662 | REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1); |
7754 | 11663 | ||
7755 | /* disable nig egress interface */ | 11664 | /* disable nig egress interface */ |
7756 | REG_WR(bp, NIG_REG_BMAC0_OUT_EN + port*4, 0); | 11665 | if (!CHIP_IS_E3(bp)) { |
7757 | REG_WR(bp, NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0); | 11666 | REG_WR(bp, NIG_REG_BMAC0_OUT_EN + port*4, 0); |
11667 | REG_WR(bp, NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0); | ||
11668 | } | ||
7758 | 11669 | ||
7759 | /* Stop BigMac rx */ | 11670 | /* Stop BigMac rx */ |
7760 | bnx2x_bmac_rx_disable(bp, port); | 11671 | if (!CHIP_IS_E3(bp)) |
7761 | 11672 | bnx2x_bmac_rx_disable(bp, port); | |
11673 | else | ||
11674 | bnx2x_xmac_disable(params); | ||
7762 | /* disable emac */ | 11675 | /* disable emac */ |
7763 | REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0); | 11676 | if (!CHIP_IS_E3(bp)) |
11677 | REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0); | ||
7764 | 11678 | ||
7765 | msleep(10); | 11679 | msleep(10); |
7766 | /* The PHY reset is controlled by GPIO 1 | 11680 | /* The PHY reset is controlled by GPIO 1 |
@@ -7796,21 +11710,22 @@ u8 bnx2x_link_reset(struct link_params *params, struct link_vars *vars, | |||
7796 | (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port)); | 11710 | (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port)); |
7797 | 11711 | ||
7798 | /* disable nig ingress interface */ | 11712 | /* disable nig ingress interface */ |
7799 | REG_WR(bp, NIG_REG_BMAC0_IN_EN + port*4, 0); | 11713 | if (!CHIP_IS_E3(bp)) { |
7800 | REG_WR(bp, NIG_REG_EMAC0_IN_EN + port*4, 0); | 11714 | REG_WR(bp, NIG_REG_BMAC0_IN_EN + port*4, 0); |
7801 | REG_WR(bp, NIG_REG_BMAC0_OUT_EN + port*4, 0); | 11715 | REG_WR(bp, NIG_REG_EMAC0_IN_EN + port*4, 0); |
7802 | REG_WR(bp, NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0); | 11716 | } |
7803 | vars->link_up = 0; | 11717 | vars->link_up = 0; |
11718 | vars->phy_flags = 0; | ||
7804 | return 0; | 11719 | return 0; |
7805 | } | 11720 | } |
7806 | 11721 | ||
7807 | /****************************************************************************/ | 11722 | /****************************************************************************/ |
7808 | /* Common function */ | 11723 | /* Common function */ |
7809 | /****************************************************************************/ | 11724 | /****************************************************************************/ |
7810 | static u8 bnx2x_8073_common_init_phy(struct bnx2x *bp, | 11725 | static int bnx2x_8073_common_init_phy(struct bnx2x *bp, |
7811 | u32 shmem_base_path[], | 11726 | u32 shmem_base_path[], |
7812 | u32 shmem2_base_path[], u8 phy_index, | 11727 | u32 shmem2_base_path[], u8 phy_index, |
7813 | u32 chip_id) | 11728 | u32 chip_id) |
7814 | { | 11729 | { |
7815 | struct bnx2x_phy phy[PORT_MAX]; | 11730 | struct bnx2x_phy phy[PORT_MAX]; |
7816 | struct bnx2x_phy *phy_blk[PORT_MAX]; | 11731 | struct bnx2x_phy *phy_blk[PORT_MAX]; |
@@ -7826,14 +11741,14 @@ static u8 bnx2x_8073_common_init_phy(struct bnx2x *bp, | |||
7826 | for (port = PORT_MAX - 1; port >= PORT_0; port--) { | 11741 | for (port = PORT_MAX - 1; port >= PORT_0; port--) { |
7827 | u32 shmem_base, shmem2_base; | 11742 | u32 shmem_base, shmem2_base; |
7828 | /* In E2, same phy is using for port0 of the two paths */ | 11743 | /* In E2, same phy is using for port0 of the two paths */ |
7829 | if (CHIP_IS_E2(bp)) { | 11744 | if (CHIP_IS_E1x(bp)) { |
7830 | shmem_base = shmem_base_path[port]; | ||
7831 | shmem2_base = shmem2_base_path[port]; | ||
7832 | port_of_path = 0; | ||
7833 | } else { | ||
7834 | shmem_base = shmem_base_path[0]; | 11745 | shmem_base = shmem_base_path[0]; |
7835 | shmem2_base = shmem2_base_path[0]; | 11746 | shmem2_base = shmem2_base_path[0]; |
7836 | port_of_path = port; | 11747 | port_of_path = port; |
11748 | } else { | ||
11749 | shmem_base = shmem_base_path[port]; | ||
11750 | shmem2_base = shmem2_base_path[port]; | ||
11751 | port_of_path = 0; | ||
7837 | } | 11752 | } |
7838 | 11753 | ||
7839 | /* Extract the ext phy address for the port */ | 11754 | /* Extract the ext phy address for the port */ |
@@ -7877,10 +11792,10 @@ static u8 bnx2x_8073_common_init_phy(struct bnx2x *bp, | |||
7877 | 11792 | ||
7878 | /* PART2 - Download firmware to both phys */ | 11793 | /* PART2 - Download firmware to both phys */ |
7879 | for (port = PORT_MAX - 1; port >= PORT_0; port--) { | 11794 | for (port = PORT_MAX - 1; port >= PORT_0; port--) { |
7880 | if (CHIP_IS_E2(bp)) | 11795 | if (CHIP_IS_E1x(bp)) |
7881 | port_of_path = 0; | ||
7882 | else | ||
7883 | port_of_path = port; | 11796 | port_of_path = port; |
11797 | else | ||
11798 | port_of_path = 0; | ||
7884 | 11799 | ||
7885 | DP(NETIF_MSG_LINK, "Loading spirom for phy address 0x%x\n", | 11800 | DP(NETIF_MSG_LINK, "Loading spirom for phy address 0x%x\n", |
7886 | phy_blk[port]->addr); | 11801 | phy_blk[port]->addr); |
@@ -7933,10 +11848,10 @@ static u8 bnx2x_8073_common_init_phy(struct bnx2x *bp, | |||
7933 | } | 11848 | } |
7934 | return 0; | 11849 | return 0; |
7935 | } | 11850 | } |
7936 | static u8 bnx2x_8726_common_init_phy(struct bnx2x *bp, | 11851 | static int bnx2x_8726_common_init_phy(struct bnx2x *bp, |
7937 | u32 shmem_base_path[], | 11852 | u32 shmem_base_path[], |
7938 | u32 shmem2_base_path[], u8 phy_index, | 11853 | u32 shmem2_base_path[], u8 phy_index, |
7939 | u32 chip_id) | 11854 | u32 chip_id) |
7940 | { | 11855 | { |
7941 | u32 val; | 11856 | u32 val; |
7942 | s8 port; | 11857 | s8 port; |
@@ -7954,12 +11869,12 @@ static u8 bnx2x_8726_common_init_phy(struct bnx2x *bp, | |||
7954 | u32 shmem_base, shmem2_base; | 11869 | u32 shmem_base, shmem2_base; |
7955 | 11870 | ||
7956 | /* In E2, same phy is using for port0 of the two paths */ | 11871 | /* In E2, same phy is using for port0 of the two paths */ |
7957 | if (CHIP_IS_E2(bp)) { | 11872 | if (CHIP_IS_E1x(bp)) { |
7958 | shmem_base = shmem_base_path[port]; | ||
7959 | shmem2_base = shmem2_base_path[port]; | ||
7960 | } else { | ||
7961 | shmem_base = shmem_base_path[0]; | 11873 | shmem_base = shmem_base_path[0]; |
7962 | shmem2_base = shmem2_base_path[0]; | 11874 | shmem2_base = shmem2_base_path[0]; |
11875 | } else { | ||
11876 | shmem_base = shmem_base_path[port]; | ||
11877 | shmem2_base = shmem2_base_path[port]; | ||
7963 | } | 11878 | } |
7964 | /* Extract the ext phy address for the port */ | 11879 | /* Extract the ext phy address for the port */ |
7965 | if (bnx2x_populate_phy(bp, phy_index, shmem_base, shmem2_base, | 11880 | if (bnx2x_populate_phy(bp, phy_index, shmem_base, shmem2_base, |
@@ -8027,10 +11942,11 @@ static void bnx2x_get_ext_phy_reset_gpio(struct bnx2x *bp, u32 shmem_base, | |||
8027 | break; | 11942 | break; |
8028 | } | 11943 | } |
8029 | } | 11944 | } |
8030 | static u8 bnx2x_8727_common_init_phy(struct bnx2x *bp, | 11945 | |
8031 | u32 shmem_base_path[], | 11946 | static int bnx2x_8727_common_init_phy(struct bnx2x *bp, |
8032 | u32 shmem2_base_path[], u8 phy_index, | 11947 | u32 shmem_base_path[], |
8033 | u32 chip_id) | 11948 | u32 shmem2_base_path[], u8 phy_index, |
11949 | u32 chip_id) | ||
8034 | { | 11950 | { |
8035 | s8 port, reset_gpio; | 11951 | s8 port, reset_gpio; |
8036 | u32 swap_val, swap_override; | 11952 | u32 swap_val, swap_override; |
@@ -8067,14 +11983,14 @@ static u8 bnx2x_8727_common_init_phy(struct bnx2x *bp, | |||
8067 | u32 shmem_base, shmem2_base; | 11983 | u32 shmem_base, shmem2_base; |
8068 | 11984 | ||
8069 | /* In E2, same phy is using for port0 of the two paths */ | 11985 | /* In E2, same phy is using for port0 of the two paths */ |
8070 | if (CHIP_IS_E2(bp)) { | 11986 | if (CHIP_IS_E1x(bp)) { |
8071 | shmem_base = shmem_base_path[port]; | ||
8072 | shmem2_base = shmem2_base_path[port]; | ||
8073 | port_of_path = 0; | ||
8074 | } else { | ||
8075 | shmem_base = shmem_base_path[0]; | 11987 | shmem_base = shmem_base_path[0]; |
8076 | shmem2_base = shmem2_base_path[0]; | 11988 | shmem2_base = shmem2_base_path[0]; |
8077 | port_of_path = port; | 11989 | port_of_path = port; |
11990 | } else { | ||
11991 | shmem_base = shmem_base_path[port]; | ||
11992 | shmem2_base = shmem2_base_path[port]; | ||
11993 | port_of_path = 0; | ||
8078 | } | 11994 | } |
8079 | 11995 | ||
8080 | /* Extract the ext phy address for the port */ | 11996 | /* Extract the ext phy address for the port */ |
@@ -8109,25 +12025,29 @@ static u8 bnx2x_8727_common_init_phy(struct bnx2x *bp, | |||
8109 | } | 12025 | } |
8110 | /* PART2 - Download firmware to both phys */ | 12026 | /* PART2 - Download firmware to both phys */ |
8111 | for (port = PORT_MAX - 1; port >= PORT_0; port--) { | 12027 | for (port = PORT_MAX - 1; port >= PORT_0; port--) { |
8112 | if (CHIP_IS_E2(bp)) | 12028 | if (CHIP_IS_E1x(bp)) |
8113 | port_of_path = 0; | ||
8114 | else | ||
8115 | port_of_path = port; | 12029 | port_of_path = port; |
12030 | else | ||
12031 | port_of_path = 0; | ||
8116 | DP(NETIF_MSG_LINK, "Loading spirom for phy address 0x%x\n", | 12032 | DP(NETIF_MSG_LINK, "Loading spirom for phy address 0x%x\n", |
8117 | phy_blk[port]->addr); | 12033 | phy_blk[port]->addr); |
8118 | if (bnx2x_8073_8727_external_rom_boot(bp, phy_blk[port], | 12034 | if (bnx2x_8073_8727_external_rom_boot(bp, phy_blk[port], |
8119 | port_of_path)) | 12035 | port_of_path)) |
8120 | return -EINVAL; | 12036 | return -EINVAL; |
12037 | /* Disable PHY transmitter output */ | ||
12038 | bnx2x_cl45_write(bp, phy_blk[port], | ||
12039 | MDIO_PMA_DEVAD, | ||
12040 | MDIO_PMA_REG_TX_DISABLE, 1); | ||
8121 | 12041 | ||
8122 | } | 12042 | } |
8123 | return 0; | 12043 | return 0; |
8124 | } | 12044 | } |
8125 | 12045 | ||
8126 | static u8 bnx2x_ext_phy_common_init(struct bnx2x *bp, u32 shmem_base_path[], | 12046 | static int bnx2x_ext_phy_common_init(struct bnx2x *bp, u32 shmem_base_path[], |
8127 | u32 shmem2_base_path[], u8 phy_index, | 12047 | u32 shmem2_base_path[], u8 phy_index, |
8128 | u32 ext_phy_type, u32 chip_id) | 12048 | u32 ext_phy_type, u32 chip_id) |
8129 | { | 12049 | { |
8130 | u8 rc = 0; | 12050 | int rc = 0; |
8131 | 12051 | ||
8132 | switch (ext_phy_type) { | 12052 | switch (ext_phy_type) { |
8133 | case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073: | 12053 | case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073: |
@@ -8135,7 +12055,7 @@ static u8 bnx2x_ext_phy_common_init(struct bnx2x *bp, u32 shmem_base_path[], | |||
8135 | shmem2_base_path, | 12055 | shmem2_base_path, |
8136 | phy_index, chip_id); | 12056 | phy_index, chip_id); |
8137 | break; | 12057 | break; |
8138 | 12058 | case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8722: | |
8139 | case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727: | 12059 | case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727: |
8140 | case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC: | 12060 | case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC: |
8141 | rc = bnx2x_8727_common_init_phy(bp, shmem_base_path, | 12061 | rc = bnx2x_8727_common_init_phy(bp, shmem_base_path, |
@@ -8152,6 +12072,13 @@ static u8 bnx2x_ext_phy_common_init(struct bnx2x *bp, u32 shmem_base_path[], | |||
8152 | shmem2_base_path, | 12072 | shmem2_base_path, |
8153 | phy_index, chip_id); | 12073 | phy_index, chip_id); |
8154 | break; | 12074 | break; |
12075 | case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833: | ||
12076 | /* | ||
12077 | * GPIO3's are linked, and so both need to be toggled | ||
12078 | * to obtain required 2us pulse. | ||
12079 | */ | ||
12080 | rc = bnx2x_84833_common_init_phy(bp, shmem_base_path, chip_id); | ||
12081 | break; | ||
8155 | case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE: | 12082 | case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE: |
8156 | rc = -EINVAL; | 12083 | rc = -EINVAL; |
8157 | break; | 12084 | break; |
@@ -8169,15 +12096,21 @@ static u8 bnx2x_ext_phy_common_init(struct bnx2x *bp, u32 shmem_base_path[], | |||
8169 | return rc; | 12096 | return rc; |
8170 | } | 12097 | } |
8171 | 12098 | ||
8172 | u8 bnx2x_common_init_phy(struct bnx2x *bp, u32 shmem_base_path[], | 12099 | int bnx2x_common_init_phy(struct bnx2x *bp, u32 shmem_base_path[], |
8173 | u32 shmem2_base_path[], u32 chip_id) | 12100 | u32 shmem2_base_path[], u32 chip_id) |
8174 | { | 12101 | { |
8175 | u8 rc = 0; | 12102 | int rc = 0; |
8176 | u32 phy_ver; | 12103 | u32 phy_ver, val; |
8177 | u8 phy_index; | 12104 | u8 phy_index = 0; |
8178 | u32 ext_phy_type, ext_phy_config; | 12105 | u32 ext_phy_type, ext_phy_config; |
12106 | bnx2x_set_mdio_clk(bp, chip_id, PORT_0); | ||
12107 | bnx2x_set_mdio_clk(bp, chip_id, PORT_1); | ||
8179 | DP(NETIF_MSG_LINK, "Begin common phy init\n"); | 12108 | DP(NETIF_MSG_LINK, "Begin common phy init\n"); |
8180 | 12109 | if (CHIP_IS_E3(bp)) { | |
12110 | /* Enable EPIO */ | ||
12111 | val = REG_RD(bp, MISC_REG_GEN_PURP_HWG); | ||
12112 | REG_WR(bp, MISC_REG_GEN_PURP_HWG, val | 1); | ||
12113 | } | ||
8181 | /* Check if common init was already done */ | 12114 | /* Check if common init was already done */ |
8182 | phy_ver = REG_RD(bp, shmem_base_path[0] + | 12115 | phy_ver = REG_RD(bp, shmem_base_path[0] + |
8183 | offsetof(struct shmem_region, | 12116 | offsetof(struct shmem_region, |
@@ -8203,6 +12136,135 @@ u8 bnx2x_common_init_phy(struct bnx2x *bp, u32 shmem_base_path[], | |||
8203 | return rc; | 12136 | return rc; |
8204 | } | 12137 | } |
8205 | 12138 | ||
12139 | static void bnx2x_check_over_curr(struct link_params *params, | ||
12140 | struct link_vars *vars) | ||
12141 | { | ||
12142 | struct bnx2x *bp = params->bp; | ||
12143 | u32 cfg_pin; | ||
12144 | u8 port = params->port; | ||
12145 | u32 pin_val; | ||
12146 | |||
12147 | cfg_pin = (REG_RD(bp, params->shmem_base + | ||
12148 | offsetof(struct shmem_region, | ||
12149 | dev_info.port_hw_config[port].e3_cmn_pin_cfg1)) & | ||
12150 | PORT_HW_CFG_E3_OVER_CURRENT_MASK) >> | ||
12151 | PORT_HW_CFG_E3_OVER_CURRENT_SHIFT; | ||
12152 | |||
12153 | /* Ignore check if no external input PIN available */ | ||
12154 | if (bnx2x_get_cfg_pin(bp, cfg_pin, &pin_val) != 0) | ||
12155 | return; | ||
12156 | |||
12157 | if (!pin_val) { | ||
12158 | if ((vars->phy_flags & PHY_OVER_CURRENT_FLAG) == 0) { | ||
12159 | netdev_err(bp->dev, "Error: Power fault on Port %d has" | ||
12160 | " been detected and the power to " | ||
12161 | "that SFP+ module has been removed" | ||
12162 | " to prevent failure of the card." | ||
12163 | " Please remove the SFP+ module and" | ||
12164 | " restart the system to clear this" | ||
12165 | " error.\n", | ||
12166 | params->port); | ||
12167 | vars->phy_flags |= PHY_OVER_CURRENT_FLAG; | ||
12168 | } | ||
12169 | } else | ||
12170 | vars->phy_flags &= ~PHY_OVER_CURRENT_FLAG; | ||
12171 | } | ||
12172 | |||
12173 | static void bnx2x_analyze_link_error(struct link_params *params, | ||
12174 | struct link_vars *vars, u32 lss_status) | ||
12175 | { | ||
12176 | struct bnx2x *bp = params->bp; | ||
12177 | /* Compare new value with previous value */ | ||
12178 | u8 led_mode; | ||
12179 | u32 half_open_conn = (vars->phy_flags & PHY_HALF_OPEN_CONN_FLAG) > 0; | ||
12180 | |||
12181 | /*DP(NETIF_MSG_LINK, "CHECK LINK: %x half_open:%x-> lss:%x\n", | ||
12182 | vars->link_up, | ||
12183 | half_open_conn, lss_status);*/ | ||
12184 | |||
12185 | if ((lss_status ^ half_open_conn) == 0) | ||
12186 | return; | ||
12187 | |||
12188 | /* If values differ */ | ||
12189 | DP(NETIF_MSG_LINK, "Link changed:%x %x->%x\n", vars->link_up, | ||
12190 | half_open_conn, lss_status); | ||
12191 | |||
12192 | /* | ||
12193 | * a. Update shmem->link_status accordingly | ||
12194 | * b. Update link_vars->link_up | ||
12195 | */ | ||
12196 | if (lss_status) { | ||
12197 | vars->link_status &= ~LINK_STATUS_LINK_UP; | ||
12198 | vars->link_up = 0; | ||
12199 | vars->phy_flags |= PHY_HALF_OPEN_CONN_FLAG; | ||
12200 | /* | ||
12201 | * Set LED mode to off since the PHY doesn't know about these | ||
12202 | * errors | ||
12203 | */ | ||
12204 | led_mode = LED_MODE_OFF; | ||
12205 | } else { | ||
12206 | vars->link_status |= LINK_STATUS_LINK_UP; | ||
12207 | vars->link_up = 1; | ||
12208 | vars->phy_flags &= ~PHY_HALF_OPEN_CONN_FLAG; | ||
12209 | led_mode = LED_MODE_OPER; | ||
12210 | } | ||
12211 | /* Update the LED according to the link state */ | ||
12212 | bnx2x_set_led(params, vars, led_mode, SPEED_10000); | ||
12213 | |||
12214 | /* Update link status in the shared memory */ | ||
12215 | bnx2x_update_mng(params, vars->link_status); | ||
12216 | |||
12217 | /* C. Trigger General Attention */ | ||
12218 | vars->periodic_flags |= PERIODIC_FLAGS_LINK_EVENT; | ||
12219 | bnx2x_notify_link_changed(bp); | ||
12220 | } | ||
12221 | |||
12222 | static void bnx2x_check_half_open_conn(struct link_params *params, | ||
12223 | struct link_vars *vars) | ||
12224 | { | ||
12225 | struct bnx2x *bp = params->bp; | ||
12226 | u32 lss_status = 0; | ||
12227 | u32 mac_base; | ||
12228 | /* In case link status is physically up @ 10G do */ | ||
12229 | if ((vars->phy_flags & PHY_PHYSICAL_LINK_FLAG) == 0) | ||
12230 | return; | ||
12231 | |||
12232 | if (!CHIP_IS_E3(bp) && | ||
12233 | (REG_RD(bp, MISC_REG_RESET_REG_2) & | ||
12234 | (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << params->port))) { | ||
12235 | /* Check E1X / E2 BMAC */ | ||
12236 | u32 lss_status_reg; | ||
12237 | u32 wb_data[2]; | ||
12238 | mac_base = params->port ? NIG_REG_INGRESS_BMAC1_MEM : | ||
12239 | NIG_REG_INGRESS_BMAC0_MEM; | ||
12240 | /* Read BIGMAC_REGISTER_RX_LSS_STATUS */ | ||
12241 | if (CHIP_IS_E2(bp)) | ||
12242 | lss_status_reg = BIGMAC2_REGISTER_RX_LSS_STAT; | ||
12243 | else | ||
12244 | lss_status_reg = BIGMAC_REGISTER_RX_LSS_STATUS; | ||
12245 | |||
12246 | REG_RD_DMAE(bp, mac_base + lss_status_reg, wb_data, 2); | ||
12247 | lss_status = (wb_data[0] > 0); | ||
12248 | |||
12249 | bnx2x_analyze_link_error(params, vars, lss_status); | ||
12250 | } | ||
12251 | } | ||
12252 | |||
12253 | void bnx2x_period_func(struct link_params *params, struct link_vars *vars) | ||
12254 | { | ||
12255 | struct bnx2x *bp = params->bp; | ||
12256 | if (!params) { | ||
12257 | DP(NETIF_MSG_LINK, "Ininitliazed params !\n"); | ||
12258 | return; | ||
12259 | } | ||
12260 | /* DP(NETIF_MSG_LINK, "Periodic called vars->phy_flags 0x%x speed 0x%x | ||
12261 | RESET_REG_2 0x%x\n", vars->phy_flags, vars->line_speed, | ||
12262 | REG_RD(bp, MISC_REG_RESET_REG_2)); */ | ||
12263 | bnx2x_check_half_open_conn(params, vars); | ||
12264 | if (CHIP_IS_E3(bp)) | ||
12265 | bnx2x_check_over_curr(params, vars); | ||
12266 | } | ||
12267 | |||
8206 | u8 bnx2x_hw_lock_required(struct bnx2x *bp, u32 shmem_base, u32 shmem2_base) | 12268 | u8 bnx2x_hw_lock_required(struct bnx2x *bp, u32 shmem_base, u32 shmem2_base) |
8207 | { | 12269 | { |
8208 | u8 phy_index; | 12270 | u8 phy_index; |
@@ -8245,7 +12307,15 @@ u8 bnx2x_fan_failure_det_req(struct bnx2x *bp, | |||
8245 | void bnx2x_hw_reset_phy(struct link_params *params) | 12307 | void bnx2x_hw_reset_phy(struct link_params *params) |
8246 | { | 12308 | { |
8247 | u8 phy_index; | 12309 | u8 phy_index; |
8248 | for (phy_index = EXT_PHY1; phy_index < MAX_PHYS; | 12310 | struct bnx2x *bp = params->bp; |
12311 | bnx2x_update_mng(params, 0); | ||
12312 | bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + params->port*4, | ||
12313 | (NIG_MASK_XGXS0_LINK_STATUS | | ||
12314 | NIG_MASK_XGXS0_LINK10G | | ||
12315 | NIG_MASK_SERDES0_LINK_STATUS | | ||
12316 | NIG_MASK_MI_INT)); | ||
12317 | |||
12318 | for (phy_index = INT_PHY; phy_index < MAX_PHYS; | ||
8249 | phy_index++) { | 12319 | phy_index++) { |
8250 | if (params->phy[phy_index].hw_reset) { | 12320 | if (params->phy[phy_index].hw_reset) { |
8251 | params->phy[phy_index].hw_reset( | 12321 | params->phy[phy_index].hw_reset( |
@@ -8255,3 +12325,72 @@ void bnx2x_hw_reset_phy(struct link_params *params) | |||
8255 | } | 12325 | } |
8256 | } | 12326 | } |
8257 | } | 12327 | } |
12328 | |||
12329 | void bnx2x_init_mod_abs_int(struct bnx2x *bp, struct link_vars *vars, | ||
12330 | u32 chip_id, u32 shmem_base, u32 shmem2_base, | ||
12331 | u8 port) | ||
12332 | { | ||
12333 | u8 gpio_num = 0xff, gpio_port = 0xff, phy_index; | ||
12334 | u32 val; | ||
12335 | u32 offset, aeu_mask, swap_val, swap_override, sync_offset; | ||
12336 | if (CHIP_IS_E3(bp)) { | ||
12337 | if (bnx2x_get_mod_abs_int_cfg(bp, chip_id, | ||
12338 | shmem_base, | ||
12339 | port, | ||
12340 | &gpio_num, | ||
12341 | &gpio_port) != 0) | ||
12342 | return; | ||
12343 | } else { | ||
12344 | struct bnx2x_phy phy; | ||
12345 | for (phy_index = EXT_PHY1; phy_index < MAX_PHYS; | ||
12346 | phy_index++) { | ||
12347 | if (bnx2x_populate_phy(bp, phy_index, shmem_base, | ||
12348 | shmem2_base, port, &phy) | ||
12349 | != 0) { | ||
12350 | DP(NETIF_MSG_LINK, "populate phy failed\n"); | ||
12351 | return; | ||
12352 | } | ||
12353 | if (phy.type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726) { | ||
12354 | gpio_num = MISC_REGISTERS_GPIO_3; | ||
12355 | gpio_port = port; | ||
12356 | break; | ||
12357 | } | ||
12358 | } | ||
12359 | } | ||
12360 | |||
12361 | if (gpio_num == 0xff) | ||
12362 | return; | ||
12363 | |||
12364 | /* Set GPIO3 to trigger SFP+ module insertion/removal */ | ||
12365 | bnx2x_set_gpio(bp, gpio_num, MISC_REGISTERS_GPIO_INPUT_HI_Z, gpio_port); | ||
12366 | |||
12367 | swap_val = REG_RD(bp, NIG_REG_PORT_SWAP); | ||
12368 | swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE); | ||
12369 | gpio_port ^= (swap_val && swap_override); | ||
12370 | |||
12371 | vars->aeu_int_mask = AEU_INPUTS_ATTN_BITS_GPIO0_FUNCTION_0 << | ||
12372 | (gpio_num + (gpio_port << 2)); | ||
12373 | |||
12374 | sync_offset = shmem_base + | ||
12375 | offsetof(struct shmem_region, | ||
12376 | dev_info.port_hw_config[port].aeu_int_mask); | ||
12377 | REG_WR(bp, sync_offset, vars->aeu_int_mask); | ||
12378 | |||
12379 | DP(NETIF_MSG_LINK, "Setting MOD_ABS (GPIO%d_P%d) AEU to 0x%x\n", | ||
12380 | gpio_num, gpio_port, vars->aeu_int_mask); | ||
12381 | |||
12382 | if (port == 0) | ||
12383 | offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0; | ||
12384 | else | ||
12385 | offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0; | ||
12386 | |||
12387 | /* Open appropriate AEU for interrupts */ | ||
12388 | aeu_mask = REG_RD(bp, offset); | ||
12389 | aeu_mask |= vars->aeu_int_mask; | ||
12390 | REG_WR(bp, offset, aeu_mask); | ||
12391 | |||
12392 | /* Enable the GPIO to trigger interrupt */ | ||
12393 | val = REG_RD(bp, MISC_REG_GPIO_EVENT_EN); | ||
12394 | val |= 1 << (gpio_num + (gpio_port << 2)); | ||
12395 | REG_WR(bp, MISC_REG_GPIO_EVENT_EN, val); | ||
12396 | } | ||
diff --git a/drivers/net/bnx2x/bnx2x_link.h b/drivers/net/bnx2x/bnx2x_link.h index 92f36b6950dc..6a7708d5da37 100644 --- a/drivers/net/bnx2x/bnx2x_link.h +++ b/drivers/net/bnx2x/bnx2x_link.h | |||
@@ -33,12 +33,13 @@ | |||
33 | #define BNX2X_FLOW_CTRL_BOTH PORT_FEATURE_FLOW_CONTROL_BOTH | 33 | #define BNX2X_FLOW_CTRL_BOTH PORT_FEATURE_FLOW_CONTROL_BOTH |
34 | #define BNX2X_FLOW_CTRL_NONE PORT_FEATURE_FLOW_CONTROL_NONE | 34 | #define BNX2X_FLOW_CTRL_NONE PORT_FEATURE_FLOW_CONTROL_NONE |
35 | 35 | ||
36 | #define NET_SERDES_IF_XFI 1 | ||
37 | #define NET_SERDES_IF_SFI 2 | ||
38 | #define NET_SERDES_IF_KR 3 | ||
39 | #define NET_SERDES_IF_DXGXS 4 | ||
40 | |||
36 | #define SPEED_AUTO_NEG 0 | 41 | #define SPEED_AUTO_NEG 0 |
37 | #define SPEED_12000 12000 | 42 | #define SPEED_20000 20000 |
38 | #define SPEED_12500 12500 | ||
39 | #define SPEED_13000 13000 | ||
40 | #define SPEED_15000 15000 | ||
41 | #define SPEED_16000 16000 | ||
42 | 43 | ||
43 | #define SFP_EEPROM_VENDOR_NAME_ADDR 0x14 | 44 | #define SFP_EEPROM_VENDOR_NAME_ADDR 0x14 |
44 | #define SFP_EEPROM_VENDOR_NAME_SIZE 16 | 45 | #define SFP_EEPROM_VENDOR_NAME_SIZE 16 |
@@ -46,6 +47,12 @@ | |||
46 | #define SFP_EEPROM_VENDOR_OUI_SIZE 3 | 47 | #define SFP_EEPROM_VENDOR_OUI_SIZE 3 |
47 | #define SFP_EEPROM_PART_NO_ADDR 0x28 | 48 | #define SFP_EEPROM_PART_NO_ADDR 0x28 |
48 | #define SFP_EEPROM_PART_NO_SIZE 16 | 49 | #define SFP_EEPROM_PART_NO_SIZE 16 |
50 | #define SFP_EEPROM_REVISION_ADDR 0x38 | ||
51 | #define SFP_EEPROM_REVISION_SIZE 4 | ||
52 | #define SFP_EEPROM_SERIAL_ADDR 0x44 | ||
53 | #define SFP_EEPROM_SERIAL_SIZE 16 | ||
54 | #define SFP_EEPROM_DATE_ADDR 0x54 /* ASCII YYMMDD */ | ||
55 | #define SFP_EEPROM_DATE_SIZE 6 | ||
49 | #define PWR_FLT_ERR_MSG_LEN 250 | 56 | #define PWR_FLT_ERR_MSG_LEN 250 |
50 | 57 | ||
51 | #define XGXS_EXT_PHY_TYPE(ext_phy_config) \ | 58 | #define XGXS_EXT_PHY_TYPE(ext_phy_config) \ |
@@ -62,25 +69,26 @@ | |||
62 | #define SINGLE_MEDIA(params) (params->num_phys == 2) | 69 | #define SINGLE_MEDIA(params) (params->num_phys == 2) |
63 | /* Dual Media board contains two external phy with different media */ | 70 | /* Dual Media board contains two external phy with different media */ |
64 | #define DUAL_MEDIA(params) (params->num_phys == 3) | 71 | #define DUAL_MEDIA(params) (params->num_phys == 3) |
72 | |||
73 | #define FW_PARAM_PHY_ADDR_MASK 0x000000FF | ||
74 | #define FW_PARAM_PHY_TYPE_MASK 0x0000FF00 | ||
75 | #define FW_PARAM_MDIO_CTRL_MASK 0xFFFF0000 | ||
65 | #define FW_PARAM_MDIO_CTRL_OFFSET 16 | 76 | #define FW_PARAM_MDIO_CTRL_OFFSET 16 |
77 | #define FW_PARAM_PHY_ADDR(fw_param) (fw_param & \ | ||
78 | FW_PARAM_PHY_ADDR_MASK) | ||
79 | #define FW_PARAM_PHY_TYPE(fw_param) (fw_param & \ | ||
80 | FW_PARAM_PHY_TYPE_MASK) | ||
81 | #define FW_PARAM_MDIO_CTRL(fw_param) ((fw_param & \ | ||
82 | FW_PARAM_MDIO_CTRL_MASK) >> \ | ||
83 | FW_PARAM_MDIO_CTRL_OFFSET) | ||
66 | #define FW_PARAM_SET(phy_addr, phy_type, mdio_access) \ | 84 | #define FW_PARAM_SET(phy_addr, phy_type, mdio_access) \ |
67 | (phy_addr | phy_type | mdio_access << FW_PARAM_MDIO_CTRL_OFFSET) | 85 | (phy_addr | phy_type | mdio_access << FW_PARAM_MDIO_CTRL_OFFSET) |
68 | 86 | ||
69 | #define PFC_BRB_MAC_PAUSE_XOFF_THRESHOLD_PAUSEABLE 170 | ||
70 | #define PFC_BRB_MAC_PAUSE_XOFF_THRESHOLD_NON_PAUSEABLE 0 | ||
71 | |||
72 | #define PFC_BRB_MAC_PAUSE_XON_THRESHOLD_PAUSEABLE 250 | ||
73 | #define PFC_BRB_MAC_PAUSE_XON_THRESHOLD_NON_PAUSEABLE 0 | ||
74 | |||
75 | #define PFC_BRB_MAC_FULL_XOFF_THRESHOLD_PAUSEABLE 10 | ||
76 | #define PFC_BRB_MAC_FULL_XOFF_THRESHOLD_NON_PAUSEABLE 90 | ||
77 | |||
78 | #define PFC_BRB_MAC_FULL_XON_THRESHOLD_PAUSEABLE 50 | ||
79 | #define PFC_BRB_MAC_FULL_XON_THRESHOLD_NON_PAUSEABLE 250 | ||
80 | 87 | ||
81 | #define PFC_BRB_FULL_LB_XOFF_THRESHOLD 170 | 88 | #define PFC_BRB_FULL_LB_XOFF_THRESHOLD 170 |
82 | #define PFC_BRB_FULL_LB_XON_THRESHOLD 250 | 89 | #define PFC_BRB_FULL_LB_XON_THRESHOLD 250 |
83 | 90 | ||
91 | #define MAXVAL(a, b) (((a) > (b)) ? (a) : (b)) | ||
84 | /***********************************************************/ | 92 | /***********************************************************/ |
85 | /* Structs */ | 93 | /* Structs */ |
86 | /***********************************************************/ | 94 | /***********************************************************/ |
@@ -121,8 +129,8 @@ struct bnx2x_phy { | |||
121 | 129 | ||
122 | /* Loaded during init */ | 130 | /* Loaded during init */ |
123 | u8 addr; | 131 | u8 addr; |
124 | 132 | u8 def_md_devad; | |
125 | u8 flags; | 133 | u16 flags; |
126 | /* Require HW lock */ | 134 | /* Require HW lock */ |
127 | #define FLAGS_HW_LOCK_REQUIRED (1<<0) | 135 | #define FLAGS_HW_LOCK_REQUIRED (1<<0) |
128 | /* No Over-Current detection */ | 136 | /* No Over-Current detection */ |
@@ -131,11 +139,13 @@ struct bnx2x_phy { | |||
131 | #define FLAGS_FAN_FAILURE_DET_REQ (1<<2) | 139 | #define FLAGS_FAN_FAILURE_DET_REQ (1<<2) |
132 | /* Initialize first the XGXS and only then the phy itself */ | 140 | /* Initialize first the XGXS and only then the phy itself */ |
133 | #define FLAGS_INIT_XGXS_FIRST (1<<3) | 141 | #define FLAGS_INIT_XGXS_FIRST (1<<3) |
142 | #define FLAGS_WC_DUAL_MODE (1<<4) | ||
143 | #define FLAGS_4_PORT_MODE (1<<5) | ||
134 | #define FLAGS_REARM_LATCH_SIGNAL (1<<6) | 144 | #define FLAGS_REARM_LATCH_SIGNAL (1<<6) |
135 | #define FLAGS_SFP_NOT_APPROVED (1<<7) | 145 | #define FLAGS_SFP_NOT_APPROVED (1<<7) |
146 | #define FLAGS_MDC_MDIO_WA (1<<8) | ||
147 | #define FLAGS_DUMMY_READ (1<<9) | ||
136 | 148 | ||
137 | u8 def_md_devad; | ||
138 | u8 reserved; | ||
139 | /* preemphasis values for the rx side */ | 149 | /* preemphasis values for the rx side */ |
140 | u16 rx_preemphasis[4]; | 150 | u16 rx_preemphasis[4]; |
141 | 151 | ||
@@ -153,6 +163,8 @@ struct bnx2x_phy { | |||
153 | #define ETH_PHY_XFP_FIBER 0x2 | 163 | #define ETH_PHY_XFP_FIBER 0x2 |
154 | #define ETH_PHY_DA_TWINAX 0x3 | 164 | #define ETH_PHY_DA_TWINAX 0x3 |
155 | #define ETH_PHY_BASE_T 0x4 | 165 | #define ETH_PHY_BASE_T 0x4 |
166 | #define ETH_PHY_KR 0xf0 | ||
167 | #define ETH_PHY_CX4 0xf1 | ||
156 | #define ETH_PHY_NOT_PRESENT 0xff | 168 | #define ETH_PHY_NOT_PRESENT 0xff |
157 | 169 | ||
158 | /* The address in which version is located*/ | 170 | /* The address in which version is located*/ |
@@ -238,6 +250,8 @@ struct link_params { | |||
238 | #define FEATURE_CONFIG_PFC_ENABLED (1<<1) | 250 | #define FEATURE_CONFIG_PFC_ENABLED (1<<1) |
239 | #define FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY (1<<2) | 251 | #define FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY (1<<2) |
240 | #define FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY (1<<3) | 252 | #define FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY (1<<3) |
253 | #define FEATURE_CONFIG_AUTOGREEEN_ENABLED (1<<9) | ||
254 | #define FEATURE_CONFIG_BC_SUPPORTS_SFP_TX_DISABLED (1<<10) | ||
241 | /* Will be populated during common init */ | 255 | /* Will be populated during common init */ |
242 | struct bnx2x_phy phy[MAX_PHYS]; | 256 | struct bnx2x_phy phy[MAX_PHYS]; |
243 | 257 | ||
@@ -257,11 +271,19 @@ struct link_params { | |||
257 | /* Output parameters */ | 271 | /* Output parameters */ |
258 | struct link_vars { | 272 | struct link_vars { |
259 | u8 phy_flags; | 273 | u8 phy_flags; |
274 | #define PHY_XGXS_FLAG (1<<0) | ||
275 | #define PHY_SGMII_FLAG (1<<1) | ||
276 | #define PHY_PHYSICAL_LINK_FLAG (1<<2) | ||
277 | #define PHY_HALF_OPEN_CONN_FLAG (1<<3) | ||
278 | #define PHY_OVER_CURRENT_FLAG (1<<4) | ||
279 | #define PHY_TX_ERROR_CHECK_FLAG (1<<5) | ||
260 | 280 | ||
261 | u8 mac_type; | 281 | u8 mac_type; |
262 | #define MAC_TYPE_NONE 0 | 282 | #define MAC_TYPE_NONE 0 |
263 | #define MAC_TYPE_EMAC 1 | 283 | #define MAC_TYPE_EMAC 1 |
264 | #define MAC_TYPE_BMAC 2 | 284 | #define MAC_TYPE_BMAC 2 |
285 | #define MAC_TYPE_UMAC 3 | ||
286 | #define MAC_TYPE_XMAC 4 | ||
265 | 287 | ||
266 | u8 phy_link_up; /* internal phy link indication */ | 288 | u8 phy_link_up; /* internal phy link indication */ |
267 | u8 link_up; | 289 | u8 link_up; |
@@ -274,45 +296,52 @@ struct link_vars { | |||
274 | 296 | ||
275 | /* The same definitions as the shmem parameter */ | 297 | /* The same definitions as the shmem parameter */ |
276 | u32 link_status; | 298 | u32 link_status; |
299 | u8 fault_detected; | ||
300 | u8 rsrv1; | ||
301 | u16 periodic_flags; | ||
302 | #define PERIODIC_FLAGS_LINK_EVENT 0x0001 | ||
303 | |||
304 | u32 aeu_int_mask; | ||
277 | }; | 305 | }; |
278 | 306 | ||
279 | /***********************************************************/ | 307 | /***********************************************************/ |
280 | /* Functions */ | 308 | /* Functions */ |
281 | /***********************************************************/ | 309 | /***********************************************************/ |
282 | u8 bnx2x_phy_init(struct link_params *input, struct link_vars *output); | 310 | int bnx2x_phy_init(struct link_params *params, struct link_vars *vars); |
283 | 311 | ||
284 | /* Reset the link. Should be called when driver or interface goes down | 312 | /* Reset the link. Should be called when driver or interface goes down |
285 | Before calling phy firmware upgrade, the reset_ext_phy should be set | 313 | Before calling phy firmware upgrade, the reset_ext_phy should be set |
286 | to 0 */ | 314 | to 0 */ |
287 | u8 bnx2x_link_reset(struct link_params *params, struct link_vars *vars, | 315 | int bnx2x_link_reset(struct link_params *params, struct link_vars *vars, |
288 | u8 reset_ext_phy); | 316 | u8 reset_ext_phy); |
289 | 317 | ||
290 | /* bnx2x_link_update should be called upon link interrupt */ | 318 | /* bnx2x_link_update should be called upon link interrupt */ |
291 | u8 bnx2x_link_update(struct link_params *input, struct link_vars *output); | 319 | int bnx2x_link_update(struct link_params *params, struct link_vars *vars); |
292 | 320 | ||
293 | /* use the following phy functions to read/write from external_phy | 321 | /* use the following phy functions to read/write from external_phy |
294 | In order to use it to read/write internal phy registers, use | 322 | In order to use it to read/write internal phy registers, use |
295 | DEFAULT_PHY_DEV_ADDR as devad, and (_bank + (_addr & 0xf)) as | 323 | DEFAULT_PHY_DEV_ADDR as devad, and (_bank + (_addr & 0xf)) as |
296 | the register */ | 324 | the register */ |
297 | u8 bnx2x_phy_read(struct link_params *params, u8 phy_addr, | 325 | int bnx2x_phy_read(struct link_params *params, u8 phy_addr, |
298 | u8 devad, u16 reg, u16 *ret_val); | 326 | u8 devad, u16 reg, u16 *ret_val); |
327 | |||
328 | int bnx2x_phy_write(struct link_params *params, u8 phy_addr, | ||
329 | u8 devad, u16 reg, u16 val); | ||
299 | 330 | ||
300 | u8 bnx2x_phy_write(struct link_params *params, u8 phy_addr, | ||
301 | u8 devad, u16 reg, u16 val); | ||
302 | /* Reads the link_status from the shmem, | 331 | /* Reads the link_status from the shmem, |
303 | and update the link vars accordingly */ | 332 | and update the link vars accordingly */ |
304 | void bnx2x_link_status_update(struct link_params *input, | 333 | void bnx2x_link_status_update(struct link_params *input, |
305 | struct link_vars *output); | 334 | struct link_vars *output); |
306 | /* returns string representing the fw_version of the external phy */ | 335 | /* returns string representing the fw_version of the external phy */ |
307 | u8 bnx2x_get_ext_phy_fw_version(struct link_params *params, u8 driver_loaded, | 336 | int bnx2x_get_ext_phy_fw_version(struct link_params *params, u8 driver_loaded, |
308 | u8 *version, u16 len); | 337 | u8 *version, u16 len); |
309 | 338 | ||
310 | /* Set/Unset the led | 339 | /* Set/Unset the led |
311 | Basically, the CLC takes care of the led for the link, but in case one needs | 340 | Basically, the CLC takes care of the led for the link, but in case one needs |
312 | to set/unset the led unnaturally, set the "mode" to LED_MODE_OPER to | 341 | to set/unset the led unnaturally, set the "mode" to LED_MODE_OPER to |
313 | blink the led, and LED_MODE_OFF to set the led off.*/ | 342 | blink the led, and LED_MODE_OFF to set the led off.*/ |
314 | u8 bnx2x_set_led(struct link_params *params, struct link_vars *vars, | 343 | int bnx2x_set_led(struct link_params *params, |
315 | u8 mode, u32 speed); | 344 | struct link_vars *vars, u8 mode, u32 speed); |
316 | #define LED_MODE_OFF 0 | 345 | #define LED_MODE_OFF 0 |
317 | #define LED_MODE_ON 1 | 346 | #define LED_MODE_ON 1 |
318 | #define LED_MODE_OPER 2 | 347 | #define LED_MODE_OPER 2 |
@@ -324,12 +353,12 @@ void bnx2x_handle_module_detect_int(struct link_params *params); | |||
324 | 353 | ||
325 | /* Get the actual link status. In case it returns 0, link is up, | 354 | /* Get the actual link status. In case it returns 0, link is up, |
326 | otherwise link is down*/ | 355 | otherwise link is down*/ |
327 | u8 bnx2x_test_link(struct link_params *input, struct link_vars *vars, | 356 | int bnx2x_test_link(struct link_params *params, struct link_vars *vars, |
328 | u8 is_serdes); | 357 | u8 is_serdes); |
329 | 358 | ||
330 | /* One-time initialization for external phy after power up */ | 359 | /* One-time initialization for external phy after power up */ |
331 | u8 bnx2x_common_init_phy(struct bnx2x *bp, u32 shmem_base_path[], | 360 | int bnx2x_common_init_phy(struct bnx2x *bp, u32 shmem_base_path[], |
332 | u32 shmem2_base_path[], u32 chip_id); | 361 | u32 shmem2_base_path[], u32 chip_id); |
333 | 362 | ||
334 | /* Reset the external PHY using GPIO */ | 363 | /* Reset the external PHY using GPIO */ |
335 | void bnx2x_ext_phy_hw_reset(struct bnx2x *bp, u8 port); | 364 | void bnx2x_ext_phy_hw_reset(struct bnx2x *bp, u8 port); |
@@ -338,9 +367,9 @@ void bnx2x_ext_phy_hw_reset(struct bnx2x *bp, u8 port); | |||
338 | void bnx2x_sfx7101_sp_sw_reset(struct bnx2x *bp, struct bnx2x_phy *phy); | 367 | void bnx2x_sfx7101_sp_sw_reset(struct bnx2x *bp, struct bnx2x_phy *phy); |
339 | 368 | ||
340 | /* Read "byte_cnt" bytes from address "addr" from the SFP+ EEPROM */ | 369 | /* Read "byte_cnt" bytes from address "addr" from the SFP+ EEPROM */ |
341 | u8 bnx2x_read_sfp_module_eeprom(struct bnx2x_phy *phy, | 370 | int bnx2x_read_sfp_module_eeprom(struct bnx2x_phy *phy, |
342 | struct link_params *params, u16 addr, | 371 | struct link_params *params, u16 addr, |
343 | u8 byte_cnt, u8 *o_buf); | 372 | u8 byte_cnt, u8 *o_buf); |
344 | 373 | ||
345 | void bnx2x_hw_reset_phy(struct link_params *params); | 374 | void bnx2x_hw_reset_phy(struct link_params *params); |
346 | 375 | ||
@@ -352,11 +381,28 @@ u8 bnx2x_hw_lock_required(struct bnx2x *bp, u32 shmem_base, | |||
352 | u32 bnx2x_phy_selection(struct link_params *params); | 381 | u32 bnx2x_phy_selection(struct link_params *params); |
353 | 382 | ||
354 | /* Probe the phys on board, and populate them in "params" */ | 383 | /* Probe the phys on board, and populate them in "params" */ |
355 | u8 bnx2x_phy_probe(struct link_params *params); | 384 | int bnx2x_phy_probe(struct link_params *params); |
385 | |||
356 | /* Checks if fan failure detection is required on one of the phys on board */ | 386 | /* Checks if fan failure detection is required on one of the phys on board */ |
357 | u8 bnx2x_fan_failure_det_req(struct bnx2x *bp, u32 shmem_base, | 387 | u8 bnx2x_fan_failure_det_req(struct bnx2x *bp, u32 shmem_base, |
358 | u32 shmem2_base, u8 port); | 388 | u32 shmem2_base, u8 port); |
359 | 389 | ||
390 | |||
391 | |||
392 | /* DCBX structs */ | ||
393 | |||
394 | /* Number of maximum COS per chip */ | ||
395 | #define DCBX_E2E3_MAX_NUM_COS (2) | ||
396 | #define DCBX_E3B0_MAX_NUM_COS_PORT0 (6) | ||
397 | #define DCBX_E3B0_MAX_NUM_COS_PORT1 (3) | ||
398 | #define DCBX_E3B0_MAX_NUM_COS ( \ | ||
399 | MAXVAL(DCBX_E3B0_MAX_NUM_COS_PORT0, \ | ||
400 | DCBX_E3B0_MAX_NUM_COS_PORT1)) | ||
401 | |||
402 | #define DCBX_MAX_NUM_COS ( \ | ||
403 | MAXVAL(DCBX_E3B0_MAX_NUM_COS, \ | ||
404 | DCBX_E2E3_MAX_NUM_COS)) | ||
405 | |||
360 | /* PFC port configuration params */ | 406 | /* PFC port configuration params */ |
361 | struct bnx2x_nig_brb_pfc_port_params { | 407 | struct bnx2x_nig_brb_pfc_port_params { |
362 | /* NIG */ | 408 | /* NIG */ |
@@ -364,8 +410,8 @@ struct bnx2x_nig_brb_pfc_port_params { | |||
364 | u32 llfc_out_en; | 410 | u32 llfc_out_en; |
365 | u32 llfc_enable; | 411 | u32 llfc_enable; |
366 | u32 pkt_priority_to_cos; | 412 | u32 pkt_priority_to_cos; |
367 | u32 rx_cos0_priority_mask; | 413 | u8 num_of_rx_cos_priority_mask; |
368 | u32 rx_cos1_priority_mask; | 414 | u32 rx_cos_priority_mask[DCBX_MAX_NUM_COS]; |
369 | u32 llfc_high_priority_classes; | 415 | u32 llfc_high_priority_classes; |
370 | u32 llfc_low_priority_classes; | 416 | u32 llfc_low_priority_classes; |
371 | /* BRB */ | 417 | /* BRB */ |
@@ -373,27 +419,74 @@ struct bnx2x_nig_brb_pfc_port_params { | |||
373 | u32 cos1_pauseable; | 419 | u32 cos1_pauseable; |
374 | }; | 420 | }; |
375 | 421 | ||
422 | |||
423 | /* ETS port configuration params */ | ||
424 | struct bnx2x_ets_bw_params { | ||
425 | u8 bw; | ||
426 | }; | ||
427 | |||
428 | struct bnx2x_ets_sp_params { | ||
429 | /** | ||
430 | * valid values are 0 - 5. 0 is highest strict priority. | ||
431 | * There can't be two COS's with the same pri. | ||
432 | */ | ||
433 | u8 pri; | ||
434 | }; | ||
435 | |||
436 | enum bnx2x_cos_state { | ||
437 | bnx2x_cos_state_strict = 0, | ||
438 | bnx2x_cos_state_bw = 1, | ||
439 | }; | ||
440 | |||
441 | struct bnx2x_ets_cos_params { | ||
442 | enum bnx2x_cos_state state ; | ||
443 | union { | ||
444 | struct bnx2x_ets_bw_params bw_params; | ||
445 | struct bnx2x_ets_sp_params sp_params; | ||
446 | } params; | ||
447 | }; | ||
448 | |||
449 | struct bnx2x_ets_params { | ||
450 | u8 num_of_cos; /* Number of valid COS entries*/ | ||
451 | struct bnx2x_ets_cos_params cos[DCBX_MAX_NUM_COS]; | ||
452 | }; | ||
453 | |||
376 | /** | 454 | /** |
377 | * Used to update the PFC attributes in EMAC, BMAC, NIG and BRB | 455 | * Used to update the PFC attributes in EMAC, BMAC, NIG and BRB |
378 | * when link is already up | 456 | * when link is already up |
379 | */ | 457 | */ |
380 | void bnx2x_update_pfc(struct link_params *params, | 458 | int bnx2x_update_pfc(struct link_params *params, |
381 | struct link_vars *vars, | 459 | struct link_vars *vars, |
382 | struct bnx2x_nig_brb_pfc_port_params *pfc_params); | 460 | struct bnx2x_nig_brb_pfc_port_params *pfc_params); |
383 | 461 | ||
384 | 462 | ||
385 | /* Used to configure the ETS to disable */ | 463 | /* Used to configure the ETS to disable */ |
386 | void bnx2x_ets_disabled(struct link_params *params); | 464 | int bnx2x_ets_disabled(struct link_params *params, |
465 | struct link_vars *vars); | ||
387 | 466 | ||
388 | /* Used to configure the ETS to BW limited */ | 467 | /* Used to configure the ETS to BW limited */ |
389 | void bnx2x_ets_bw_limit(const struct link_params *params, const u32 cos0_bw, | 468 | void bnx2x_ets_bw_limit(const struct link_params *params, const u32 cos0_bw, |
390 | const u32 cos1_bw); | 469 | const u32 cos1_bw); |
391 | 470 | ||
392 | /* Used to configure the ETS to strict */ | 471 | /* Used to configure the ETS to strict */ |
393 | u8 bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos); | 472 | int bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos); |
473 | |||
394 | 474 | ||
475 | /* Configure the COS to ETS according to BW and SP settings.*/ | ||
476 | int bnx2x_ets_e3b0_config(const struct link_params *params, | ||
477 | const struct link_vars *vars, | ||
478 | const struct bnx2x_ets_params *ets_params); | ||
395 | /* Read pfc statistic*/ | 479 | /* Read pfc statistic*/ |
396 | void bnx2x_pfc_statistic(struct link_params *params, struct link_vars *vars, | 480 | void bnx2x_pfc_statistic(struct link_params *params, struct link_vars *vars, |
397 | u32 pfc_frames_sent[2], | 481 | u32 pfc_frames_sent[2], |
398 | u32 pfc_frames_received[2]); | 482 | u32 pfc_frames_received[2]); |
483 | void bnx2x_init_mod_abs_int(struct bnx2x *bp, struct link_vars *vars, | ||
484 | u32 chip_id, u32 shmem_base, u32 shmem2_base, | ||
485 | u8 port); | ||
486 | |||
487 | int bnx2x_sfp_module_detection(struct bnx2x_phy *phy, | ||
488 | struct link_params *params); | ||
489 | |||
490 | void bnx2x_period_func(struct link_params *params, struct link_vars *vars); | ||
491 | |||
399 | #endif /* BNX2X_LINK_H */ | 492 | #endif /* BNX2X_LINK_H */ |
diff --git a/drivers/net/bnx2x/bnx2x_main.c b/drivers/net/bnx2x/bnx2x_main.c index 74be989f51c5..5b4a8f34b13c 100644 --- a/drivers/net/bnx2x/bnx2x_main.c +++ b/drivers/net/bnx2x/bnx2x_main.c | |||
@@ -39,6 +39,7 @@ | |||
39 | #include <linux/mii.h> | 39 | #include <linux/mii.h> |
40 | #include <linux/if_vlan.h> | 40 | #include <linux/if_vlan.h> |
41 | #include <net/ip.h> | 41 | #include <net/ip.h> |
42 | #include <net/ipv6.h> | ||
42 | #include <net/tcp.h> | 43 | #include <net/tcp.h> |
43 | #include <net/checksum.h> | 44 | #include <net/checksum.h> |
44 | #include <net/ip6_checksum.h> | 45 | #include <net/ip6_checksum.h> |
@@ -51,12 +52,12 @@ | |||
51 | #include <linux/stringify.h> | 52 | #include <linux/stringify.h> |
52 | #include <linux/vmalloc.h> | 53 | #include <linux/vmalloc.h> |
53 | 54 | ||
54 | #define BNX2X_MAIN | ||
55 | #include "bnx2x.h" | 55 | #include "bnx2x.h" |
56 | #include "bnx2x_init.h" | 56 | #include "bnx2x_init.h" |
57 | #include "bnx2x_init_ops.h" | 57 | #include "bnx2x_init_ops.h" |
58 | #include "bnx2x_cmn.h" | 58 | #include "bnx2x_cmn.h" |
59 | #include "bnx2x_dcb.h" | 59 | #include "bnx2x_dcb.h" |
60 | #include "bnx2x_sp.h" | ||
60 | 61 | ||
61 | #include <linux/firmware.h> | 62 | #include <linux/firmware.h> |
62 | #include "bnx2x_fw_file_hdr.h" | 63 | #include "bnx2x_fw_file_hdr.h" |
@@ -74,12 +75,14 @@ | |||
74 | #define TX_TIMEOUT (5*HZ) | 75 | #define TX_TIMEOUT (5*HZ) |
75 | 76 | ||
76 | static char version[] __devinitdata = | 77 | static char version[] __devinitdata = |
77 | "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver " | 78 | "Broadcom NetXtreme II 5771x/578xx 10/20-Gigabit Ethernet Driver " |
78 | DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; | 79 | DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; |
79 | 80 | ||
80 | MODULE_AUTHOR("Eliezer Tamir"); | 81 | MODULE_AUTHOR("Eliezer Tamir"); |
81 | MODULE_DESCRIPTION("Broadcom NetXtreme II " | 82 | MODULE_DESCRIPTION("Broadcom NetXtreme II " |
82 | "BCM57710/57711/57711E/57712/57712E Driver"); | 83 | "BCM57710/57711/57711E/" |
84 | "57712/57712_MF/57800/57800_MF/57810/57810_MF/" | ||
85 | "57840/57840_MF Driver"); | ||
83 | MODULE_LICENSE("GPL"); | 86 | MODULE_LICENSE("GPL"); |
84 | MODULE_VERSION(DRV_MODULE_VERSION); | 87 | MODULE_VERSION(DRV_MODULE_VERSION); |
85 | MODULE_FIRMWARE(FW_FILE_NAME_E1); | 88 | MODULE_FIRMWARE(FW_FILE_NAME_E1); |
@@ -100,9 +103,11 @@ static int disable_tpa; | |||
100 | module_param(disable_tpa, int, 0); | 103 | module_param(disable_tpa, int, 0); |
101 | MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature"); | 104 | MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature"); |
102 | 105 | ||
106 | #define INT_MODE_INTx 1 | ||
107 | #define INT_MODE_MSI 2 | ||
103 | static int int_mode; | 108 | static int int_mode; |
104 | module_param(int_mode, int, 0); | 109 | module_param(int_mode, int, 0); |
105 | MODULE_PARM_DESC(int_mode, " Force interrupt mode other then MSI-X " | 110 | MODULE_PARM_DESC(int_mode, " Force interrupt mode other than MSI-X " |
106 | "(1 INT#x; 2 MSI)"); | 111 | "(1 INT#x; 2 MSI)"); |
107 | 112 | ||
108 | static int dropless_fc; | 113 | static int dropless_fc; |
@@ -121,37 +126,87 @@ static int debug; | |||
121 | module_param(debug, int, 0); | 126 | module_param(debug, int, 0); |
122 | MODULE_PARM_DESC(debug, " Default debug msglevel"); | 127 | MODULE_PARM_DESC(debug, " Default debug msglevel"); |
123 | 128 | ||
124 | static struct workqueue_struct *bnx2x_wq; | ||
125 | 129 | ||
126 | #ifdef BCM_CNIC | 130 | |
127 | static u8 ALL_ENODE_MACS[] = {0x01, 0x10, 0x18, 0x01, 0x00, 0x01}; | 131 | struct workqueue_struct *bnx2x_wq; |
128 | #endif | ||
129 | 132 | ||
130 | enum bnx2x_board_type { | 133 | enum bnx2x_board_type { |
131 | BCM57710 = 0, | 134 | BCM57710 = 0, |
132 | BCM57711 = 1, | 135 | BCM57711, |
133 | BCM57711E = 2, | 136 | BCM57711E, |
134 | BCM57712 = 3, | 137 | BCM57712, |
135 | BCM57712E = 4 | 138 | BCM57712_MF, |
139 | BCM57800, | ||
140 | BCM57800_MF, | ||
141 | BCM57810, | ||
142 | BCM57810_MF, | ||
143 | BCM57840, | ||
144 | BCM57840_MF | ||
136 | }; | 145 | }; |
137 | 146 | ||
138 | /* indexed by board_type, above */ | 147 | /* indexed by board_type, above */ |
139 | static struct { | 148 | static struct { |
140 | char *name; | 149 | char *name; |
141 | } board_info[] __devinitdata = { | 150 | } board_info[] __devinitdata = { |
142 | { "Broadcom NetXtreme II BCM57710 XGb" }, | 151 | { "Broadcom NetXtreme II BCM57710 10 Gigabit PCIe [Everest]" }, |
143 | { "Broadcom NetXtreme II BCM57711 XGb" }, | 152 | { "Broadcom NetXtreme II BCM57711 10 Gigabit PCIe" }, |
144 | { "Broadcom NetXtreme II BCM57711E XGb" }, | 153 | { "Broadcom NetXtreme II BCM57711E 10 Gigabit PCIe" }, |
145 | { "Broadcom NetXtreme II BCM57712 XGb" }, | 154 | { "Broadcom NetXtreme II BCM57712 10 Gigabit Ethernet" }, |
146 | { "Broadcom NetXtreme II BCM57712E XGb" } | 155 | { "Broadcom NetXtreme II BCM57712 10 Gigabit Ethernet Multi Function" }, |
156 | { "Broadcom NetXtreme II BCM57800 10 Gigabit Ethernet" }, | ||
157 | { "Broadcom NetXtreme II BCM57800 10 Gigabit Ethernet Multi Function" }, | ||
158 | { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet" }, | ||
159 | { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet Multi Function" }, | ||
160 | { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet" }, | ||
161 | { "Broadcom NetXtreme II BCM57840 10/20 Gigabit " | ||
162 | "Ethernet Multi Function"} | ||
147 | }; | 163 | }; |
148 | 164 | ||
165 | #ifndef PCI_DEVICE_ID_NX2_57710 | ||
166 | #define PCI_DEVICE_ID_NX2_57710 CHIP_NUM_57710 | ||
167 | #endif | ||
168 | #ifndef PCI_DEVICE_ID_NX2_57711 | ||
169 | #define PCI_DEVICE_ID_NX2_57711 CHIP_NUM_57711 | ||
170 | #endif | ||
171 | #ifndef PCI_DEVICE_ID_NX2_57711E | ||
172 | #define PCI_DEVICE_ID_NX2_57711E CHIP_NUM_57711E | ||
173 | #endif | ||
174 | #ifndef PCI_DEVICE_ID_NX2_57712 | ||
175 | #define PCI_DEVICE_ID_NX2_57712 CHIP_NUM_57712 | ||
176 | #endif | ||
177 | #ifndef PCI_DEVICE_ID_NX2_57712_MF | ||
178 | #define PCI_DEVICE_ID_NX2_57712_MF CHIP_NUM_57712_MF | ||
179 | #endif | ||
180 | #ifndef PCI_DEVICE_ID_NX2_57800 | ||
181 | #define PCI_DEVICE_ID_NX2_57800 CHIP_NUM_57800 | ||
182 | #endif | ||
183 | #ifndef PCI_DEVICE_ID_NX2_57800_MF | ||
184 | #define PCI_DEVICE_ID_NX2_57800_MF CHIP_NUM_57800_MF | ||
185 | #endif | ||
186 | #ifndef PCI_DEVICE_ID_NX2_57810 | ||
187 | #define PCI_DEVICE_ID_NX2_57810 CHIP_NUM_57810 | ||
188 | #endif | ||
189 | #ifndef PCI_DEVICE_ID_NX2_57810_MF | ||
190 | #define PCI_DEVICE_ID_NX2_57810_MF CHIP_NUM_57810_MF | ||
191 | #endif | ||
192 | #ifndef PCI_DEVICE_ID_NX2_57840 | ||
193 | #define PCI_DEVICE_ID_NX2_57840 CHIP_NUM_57840 | ||
194 | #endif | ||
195 | #ifndef PCI_DEVICE_ID_NX2_57840_MF | ||
196 | #define PCI_DEVICE_ID_NX2_57840_MF CHIP_NUM_57840_MF | ||
197 | #endif | ||
149 | static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = { | 198 | static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = { |
150 | { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 }, | 199 | { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 }, |
151 | { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 }, | 200 | { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 }, |
152 | { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E }, | 201 | { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E }, |
153 | { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712), BCM57712 }, | 202 | { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712), BCM57712 }, |
154 | { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712E), BCM57712E }, | 203 | { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712_MF), BCM57712_MF }, |
204 | { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800), BCM57800 }, | ||
205 | { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800_MF), BCM57800_MF }, | ||
206 | { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810), BCM57810 }, | ||
207 | { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810_MF), BCM57810_MF }, | ||
208 | { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840), BCM57840 }, | ||
209 | { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_MF), BCM57840_MF }, | ||
155 | { 0 } | 210 | { 0 } |
156 | }; | 211 | }; |
157 | 212 | ||
@@ -168,48 +223,6 @@ static inline void __storm_memset_dma_mapping(struct bnx2x *bp, | |||
168 | REG_WR(bp, addr + 4, U64_HI(mapping)); | 223 | REG_WR(bp, addr + 4, U64_HI(mapping)); |
169 | } | 224 | } |
170 | 225 | ||
171 | static inline void __storm_memset_fill(struct bnx2x *bp, | ||
172 | u32 addr, size_t size, u32 val) | ||
173 | { | ||
174 | int i; | ||
175 | for (i = 0; i < size/4; i++) | ||
176 | REG_WR(bp, addr + (i * 4), val); | ||
177 | } | ||
178 | |||
179 | static inline void storm_memset_ustats_zero(struct bnx2x *bp, | ||
180 | u8 port, u16 stat_id) | ||
181 | { | ||
182 | size_t size = sizeof(struct ustorm_per_client_stats); | ||
183 | |||
184 | u32 addr = BAR_USTRORM_INTMEM + | ||
185 | USTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id); | ||
186 | |||
187 | __storm_memset_fill(bp, addr, size, 0); | ||
188 | } | ||
189 | |||
190 | static inline void storm_memset_tstats_zero(struct bnx2x *bp, | ||
191 | u8 port, u16 stat_id) | ||
192 | { | ||
193 | size_t size = sizeof(struct tstorm_per_client_stats); | ||
194 | |||
195 | u32 addr = BAR_TSTRORM_INTMEM + | ||
196 | TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id); | ||
197 | |||
198 | __storm_memset_fill(bp, addr, size, 0); | ||
199 | } | ||
200 | |||
201 | static inline void storm_memset_xstats_zero(struct bnx2x *bp, | ||
202 | u8 port, u16 stat_id) | ||
203 | { | ||
204 | size_t size = sizeof(struct xstorm_per_client_stats); | ||
205 | |||
206 | u32 addr = BAR_XSTRORM_INTMEM + | ||
207 | XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id); | ||
208 | |||
209 | __storm_memset_fill(bp, addr, size, 0); | ||
210 | } | ||
211 | |||
212 | |||
213 | static inline void storm_memset_spq_addr(struct bnx2x *bp, | 226 | static inline void storm_memset_spq_addr(struct bnx2x *bp, |
214 | dma_addr_t mapping, u16 abs_fid) | 227 | dma_addr_t mapping, u16 abs_fid) |
215 | { | 228 | { |
@@ -219,103 +232,6 @@ static inline void storm_memset_spq_addr(struct bnx2x *bp, | |||
219 | __storm_memset_dma_mapping(bp, addr, mapping); | 232 | __storm_memset_dma_mapping(bp, addr, mapping); |
220 | } | 233 | } |
221 | 234 | ||
222 | static inline void storm_memset_ov(struct bnx2x *bp, u16 ov, u16 abs_fid) | ||
223 | { | ||
224 | REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(abs_fid), ov); | ||
225 | } | ||
226 | |||
227 | static inline void storm_memset_func_cfg(struct bnx2x *bp, | ||
228 | struct tstorm_eth_function_common_config *tcfg, | ||
229 | u16 abs_fid) | ||
230 | { | ||
231 | size_t size = sizeof(struct tstorm_eth_function_common_config); | ||
232 | |||
233 | u32 addr = BAR_TSTRORM_INTMEM + | ||
234 | TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid); | ||
235 | |||
236 | __storm_memset_struct(bp, addr, size, (u32 *)tcfg); | ||
237 | } | ||
238 | |||
239 | static inline void storm_memset_xstats_flags(struct bnx2x *bp, | ||
240 | struct stats_indication_flags *flags, | ||
241 | u16 abs_fid) | ||
242 | { | ||
243 | size_t size = sizeof(struct stats_indication_flags); | ||
244 | |||
245 | u32 addr = BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(abs_fid); | ||
246 | |||
247 | __storm_memset_struct(bp, addr, size, (u32 *)flags); | ||
248 | } | ||
249 | |||
250 | static inline void storm_memset_tstats_flags(struct bnx2x *bp, | ||
251 | struct stats_indication_flags *flags, | ||
252 | u16 abs_fid) | ||
253 | { | ||
254 | size_t size = sizeof(struct stats_indication_flags); | ||
255 | |||
256 | u32 addr = BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(abs_fid); | ||
257 | |||
258 | __storm_memset_struct(bp, addr, size, (u32 *)flags); | ||
259 | } | ||
260 | |||
261 | static inline void storm_memset_ustats_flags(struct bnx2x *bp, | ||
262 | struct stats_indication_flags *flags, | ||
263 | u16 abs_fid) | ||
264 | { | ||
265 | size_t size = sizeof(struct stats_indication_flags); | ||
266 | |||
267 | u32 addr = BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(abs_fid); | ||
268 | |||
269 | __storm_memset_struct(bp, addr, size, (u32 *)flags); | ||
270 | } | ||
271 | |||
272 | static inline void storm_memset_cstats_flags(struct bnx2x *bp, | ||
273 | struct stats_indication_flags *flags, | ||
274 | u16 abs_fid) | ||
275 | { | ||
276 | size_t size = sizeof(struct stats_indication_flags); | ||
277 | |||
278 | u32 addr = BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(abs_fid); | ||
279 | |||
280 | __storm_memset_struct(bp, addr, size, (u32 *)flags); | ||
281 | } | ||
282 | |||
283 | static inline void storm_memset_xstats_addr(struct bnx2x *bp, | ||
284 | dma_addr_t mapping, u16 abs_fid) | ||
285 | { | ||
286 | u32 addr = BAR_XSTRORM_INTMEM + | ||
287 | XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid); | ||
288 | |||
289 | __storm_memset_dma_mapping(bp, addr, mapping); | ||
290 | } | ||
291 | |||
292 | static inline void storm_memset_tstats_addr(struct bnx2x *bp, | ||
293 | dma_addr_t mapping, u16 abs_fid) | ||
294 | { | ||
295 | u32 addr = BAR_TSTRORM_INTMEM + | ||
296 | TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid); | ||
297 | |||
298 | __storm_memset_dma_mapping(bp, addr, mapping); | ||
299 | } | ||
300 | |||
301 | static inline void storm_memset_ustats_addr(struct bnx2x *bp, | ||
302 | dma_addr_t mapping, u16 abs_fid) | ||
303 | { | ||
304 | u32 addr = BAR_USTRORM_INTMEM + | ||
305 | USTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid); | ||
306 | |||
307 | __storm_memset_dma_mapping(bp, addr, mapping); | ||
308 | } | ||
309 | |||
310 | static inline void storm_memset_cstats_addr(struct bnx2x *bp, | ||
311 | dma_addr_t mapping, u16 abs_fid) | ||
312 | { | ||
313 | u32 addr = BAR_CSTRORM_INTMEM + | ||
314 | CSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid); | ||
315 | |||
316 | __storm_memset_dma_mapping(bp, addr, mapping); | ||
317 | } | ||
318 | |||
319 | static inline void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid, | 235 | static inline void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid, |
320 | u16 pf_id) | 236 | u16 pf_id) |
321 | { | 237 | { |
@@ -360,45 +276,6 @@ static inline void storm_memset_eq_prod(struct bnx2x *bp, u16 eq_prod, | |||
360 | REG_WR16(bp, addr, eq_prod); | 276 | REG_WR16(bp, addr, eq_prod); |
361 | } | 277 | } |
362 | 278 | ||
363 | static inline void storm_memset_hc_timeout(struct bnx2x *bp, u8 port, | ||
364 | u16 fw_sb_id, u8 sb_index, | ||
365 | u8 ticks) | ||
366 | { | ||
367 | |||
368 | int index_offset = CHIP_IS_E2(bp) ? | ||
369 | offsetof(struct hc_status_block_data_e2, index_data) : | ||
370 | offsetof(struct hc_status_block_data_e1x, index_data); | ||
371 | u32 addr = BAR_CSTRORM_INTMEM + | ||
372 | CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) + | ||
373 | index_offset + | ||
374 | sizeof(struct hc_index_data)*sb_index + | ||
375 | offsetof(struct hc_index_data, timeout); | ||
376 | REG_WR8(bp, addr, ticks); | ||
377 | DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d ticks %d\n", | ||
378 | port, fw_sb_id, sb_index, ticks); | ||
379 | } | ||
380 | static inline void storm_memset_hc_disable(struct bnx2x *bp, u8 port, | ||
381 | u16 fw_sb_id, u8 sb_index, | ||
382 | u8 disable) | ||
383 | { | ||
384 | u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT); | ||
385 | int index_offset = CHIP_IS_E2(bp) ? | ||
386 | offsetof(struct hc_status_block_data_e2, index_data) : | ||
387 | offsetof(struct hc_status_block_data_e1x, index_data); | ||
388 | u32 addr = BAR_CSTRORM_INTMEM + | ||
389 | CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) + | ||
390 | index_offset + | ||
391 | sizeof(struct hc_index_data)*sb_index + | ||
392 | offsetof(struct hc_index_data, flags); | ||
393 | u16 flags = REG_RD16(bp, addr); | ||
394 | /* clear and set */ | ||
395 | flags &= ~HC_INDEX_DATA_HC_ENABLED; | ||
396 | flags |= enable_flag; | ||
397 | REG_WR16(bp, addr, flags); | ||
398 | DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d disable %d\n", | ||
399 | port, fw_sb_id, sb_index, disable); | ||
400 | } | ||
401 | |||
402 | /* used only at init | 279 | /* used only at init |
403 | * locking is done by mcp | 280 | * locking is done by mcp |
404 | */ | 281 | */ |
@@ -492,13 +369,6 @@ static void bnx2x_dp_dmae(struct bnx2x *bp, struct dmae_command *dmae, | |||
492 | 369 | ||
493 | } | 370 | } |
494 | 371 | ||
495 | const u32 dmae_reg_go_c[] = { | ||
496 | DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3, | ||
497 | DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7, | ||
498 | DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11, | ||
499 | DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15 | ||
500 | }; | ||
501 | |||
502 | /* copy command into DMAE command memory and set DMAE command go */ | 372 | /* copy command into DMAE command memory and set DMAE command go */ |
503 | void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx) | 373 | void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx) |
504 | { | 374 | { |
@@ -579,7 +449,11 @@ static int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, | |||
579 | bp->slowpath->wb_data[0], bp->slowpath->wb_data[1], | 449 | bp->slowpath->wb_data[0], bp->slowpath->wb_data[1], |
580 | bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]); | 450 | bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]); |
581 | 451 | ||
582 | /* lock the dmae channel */ | 452 | /* |
453 | * Lock the dmae channel. Disable BHs to prevent a dead-lock | ||
454 | * as long as this code is called both from syscall context and | ||
455 | * from ndo_set_rx_mode() flow that may be called from BH. | ||
456 | */ | ||
583 | spin_lock_bh(&bp->dmae_lock); | 457 | spin_lock_bh(&bp->dmae_lock); |
584 | 458 | ||
585 | /* reset completion */ | 459 | /* reset completion */ |
@@ -834,9 +708,9 @@ static int bnx2x_mc_assert(struct bnx2x *bp) | |||
834 | return rc; | 708 | return rc; |
835 | } | 709 | } |
836 | 710 | ||
837 | static void bnx2x_fw_dump(struct bnx2x *bp) | 711 | void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl) |
838 | { | 712 | { |
839 | u32 addr; | 713 | u32 addr, val; |
840 | u32 mark, offset; | 714 | u32 mark, offset; |
841 | __be32 data[9]; | 715 | __be32 data[9]; |
842 | int word; | 716 | int word; |
@@ -845,6 +719,14 @@ static void bnx2x_fw_dump(struct bnx2x *bp) | |||
845 | BNX2X_ERR("NO MCP - can not dump\n"); | 719 | BNX2X_ERR("NO MCP - can not dump\n"); |
846 | return; | 720 | return; |
847 | } | 721 | } |
722 | netdev_printk(lvl, bp->dev, "bc %d.%d.%d\n", | ||
723 | (bp->common.bc_ver & 0xff0000) >> 16, | ||
724 | (bp->common.bc_ver & 0xff00) >> 8, | ||
725 | (bp->common.bc_ver & 0xff)); | ||
726 | |||
727 | val = REG_RD(bp, MCP_REG_MCPR_CPU_PROGRAM_COUNTER); | ||
728 | if (val == REG_RD(bp, MCP_REG_MCPR_CPU_PROGRAM_COUNTER)) | ||
729 | printk("%s" "MCP PC at 0x%x\n", lvl, val); | ||
848 | 730 | ||
849 | if (BP_PATH(bp) == 0) | 731 | if (BP_PATH(bp) == 0) |
850 | trace_shmem_base = bp->common.shmem_base; | 732 | trace_shmem_base = bp->common.shmem_base; |
@@ -854,9 +736,9 @@ static void bnx2x_fw_dump(struct bnx2x *bp) | |||
854 | mark = REG_RD(bp, addr); | 736 | mark = REG_RD(bp, addr); |
855 | mark = (CHIP_IS_E1x(bp) ? MCP_REG_MCPR_SCRATCH : MCP_A_REG_MCPR_SCRATCH) | 737 | mark = (CHIP_IS_E1x(bp) ? MCP_REG_MCPR_SCRATCH : MCP_A_REG_MCPR_SCRATCH) |
856 | + ((mark + 0x3) & ~0x3) - 0x08000000; | 738 | + ((mark + 0x3) & ~0x3) - 0x08000000; |
857 | pr_err("begin fw dump (mark 0x%x)\n", mark); | 739 | printk("%s" "begin fw dump (mark 0x%x)\n", lvl, mark); |
858 | 740 | ||
859 | pr_err(""); | 741 | printk("%s", lvl); |
860 | for (offset = mark; offset <= trace_shmem_base; offset += 0x8*4) { | 742 | for (offset = mark; offset <= trace_shmem_base; offset += 0x8*4) { |
861 | for (word = 0; word < 8; word++) | 743 | for (word = 0; word < 8; word++) |
862 | data[word] = htonl(REG_RD(bp, offset + 4*word)); | 744 | data[word] = htonl(REG_RD(bp, offset + 4*word)); |
@@ -869,7 +751,12 @@ static void bnx2x_fw_dump(struct bnx2x *bp) | |||
869 | data[8] = 0x0; | 751 | data[8] = 0x0; |
870 | pr_cont("%s", (char *)data); | 752 | pr_cont("%s", (char *)data); |
871 | } | 753 | } |
872 | pr_err("end of fw dump\n"); | 754 | printk("%s" "end of fw dump\n", lvl); |
755 | } | ||
756 | |||
757 | static inline void bnx2x_fw_dump(struct bnx2x *bp) | ||
758 | { | ||
759 | bnx2x_fw_dump_lvl(bp, KERN_ERR); | ||
873 | } | 760 | } |
874 | 761 | ||
875 | void bnx2x_panic_dump(struct bnx2x *bp) | 762 | void bnx2x_panic_dump(struct bnx2x *bp) |
@@ -890,9 +777,9 @@ void bnx2x_panic_dump(struct bnx2x *bp) | |||
890 | /* Indices */ | 777 | /* Indices */ |
891 | /* Common */ | 778 | /* Common */ |
892 | BNX2X_ERR("def_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x)" | 779 | BNX2X_ERR("def_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x)" |
893 | " spq_prod_idx(0x%x)\n", | 780 | " spq_prod_idx(0x%x) next_stats_cnt(0x%x)\n", |
894 | bp->def_idx, bp->def_att_idx, | 781 | bp->def_idx, bp->def_att_idx, bp->attn_state, |
895 | bp->attn_state, bp->spq_prod_idx); | 782 | bp->spq_prod_idx, bp->stats_counter); |
896 | BNX2X_ERR("DSB: attn bits(0x%x) ack(0x%x) id(0x%x) idx(0x%x)\n", | 783 | BNX2X_ERR("DSB: attn bits(0x%x) ack(0x%x) id(0x%x) idx(0x%x)\n", |
897 | bp->def_status_blk->atten_status_block.attn_bits, | 784 | bp->def_status_blk->atten_status_block.attn_bits, |
898 | bp->def_status_blk->atten_status_block.attn_bits_ack, | 785 | bp->def_status_blk->atten_status_block.attn_bits_ack, |
@@ -909,15 +796,17 @@ void bnx2x_panic_dump(struct bnx2x *bp) | |||
909 | CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) + | 796 | CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) + |
910 | i*sizeof(u32)); | 797 | i*sizeof(u32)); |
911 | 798 | ||
912 | pr_cont("igu_sb_id(0x%x) igu_seg_id (0x%x) " | 799 | pr_cont("igu_sb_id(0x%x) igu_seg_id(0x%x) " |
913 | "pf_id(0x%x) vnic_id(0x%x) " | 800 | "pf_id(0x%x) vnic_id(0x%x) " |
914 | "vf_id(0x%x) vf_valid (0x%x)\n", | 801 | "vf_id(0x%x) vf_valid (0x%x) " |
802 | "state(0x%x)\n", | ||
915 | sp_sb_data.igu_sb_id, | 803 | sp_sb_data.igu_sb_id, |
916 | sp_sb_data.igu_seg_id, | 804 | sp_sb_data.igu_seg_id, |
917 | sp_sb_data.p_func.pf_id, | 805 | sp_sb_data.p_func.pf_id, |
918 | sp_sb_data.p_func.vnic_id, | 806 | sp_sb_data.p_func.vnic_id, |
919 | sp_sb_data.p_func.vf_id, | 807 | sp_sb_data.p_func.vf_id, |
920 | sp_sb_data.p_func.vf_valid); | 808 | sp_sb_data.p_func.vf_valid, |
809 | sp_sb_data.state); | ||
921 | 810 | ||
922 | 811 | ||
923 | for_each_eth_queue(bp, i) { | 812 | for_each_eth_queue(bp, i) { |
@@ -926,13 +815,13 @@ void bnx2x_panic_dump(struct bnx2x *bp) | |||
926 | struct hc_status_block_data_e2 sb_data_e2; | 815 | struct hc_status_block_data_e2 sb_data_e2; |
927 | struct hc_status_block_data_e1x sb_data_e1x; | 816 | struct hc_status_block_data_e1x sb_data_e1x; |
928 | struct hc_status_block_sm *hc_sm_p = | 817 | struct hc_status_block_sm *hc_sm_p = |
929 | CHIP_IS_E2(bp) ? | 818 | CHIP_IS_E1x(bp) ? |
930 | sb_data_e2.common.state_machine : | 819 | sb_data_e1x.common.state_machine : |
931 | sb_data_e1x.common.state_machine; | 820 | sb_data_e2.common.state_machine; |
932 | struct hc_index_data *hc_index_p = | 821 | struct hc_index_data *hc_index_p = |
933 | CHIP_IS_E2(bp) ? | 822 | CHIP_IS_E1x(bp) ? |
934 | sb_data_e2.index_data : | 823 | sb_data_e1x.index_data : |
935 | sb_data_e1x.index_data; | 824 | sb_data_e2.index_data; |
936 | int data_size; | 825 | int data_size; |
937 | u32 *sb_data_p; | 826 | u32 *sb_data_p; |
938 | 827 | ||
@@ -955,8 +844,8 @@ void bnx2x_panic_dump(struct bnx2x *bp) | |||
955 | i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod, | 844 | i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod, |
956 | fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb)); | 845 | fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb)); |
957 | 846 | ||
958 | loop = CHIP_IS_E2(bp) ? | 847 | loop = CHIP_IS_E1x(bp) ? |
959 | HC_SB_MAX_INDICES_E2 : HC_SB_MAX_INDICES_E1X; | 848 | HC_SB_MAX_INDICES_E1X : HC_SB_MAX_INDICES_E2; |
960 | 849 | ||
961 | /* host sb data */ | 850 | /* host sb data */ |
962 | 851 | ||
@@ -976,35 +865,39 @@ void bnx2x_panic_dump(struct bnx2x *bp) | |||
976 | fp->sb_index_values[j], | 865 | fp->sb_index_values[j], |
977 | (j == loop - 1) ? ")" : " "); | 866 | (j == loop - 1) ? ")" : " "); |
978 | /* fw sb data */ | 867 | /* fw sb data */ |
979 | data_size = CHIP_IS_E2(bp) ? | 868 | data_size = CHIP_IS_E1x(bp) ? |
980 | sizeof(struct hc_status_block_data_e2) : | 869 | sizeof(struct hc_status_block_data_e1x) : |
981 | sizeof(struct hc_status_block_data_e1x); | 870 | sizeof(struct hc_status_block_data_e2); |
982 | data_size /= sizeof(u32); | 871 | data_size /= sizeof(u32); |
983 | sb_data_p = CHIP_IS_E2(bp) ? | 872 | sb_data_p = CHIP_IS_E1x(bp) ? |
984 | (u32 *)&sb_data_e2 : | 873 | (u32 *)&sb_data_e1x : |
985 | (u32 *)&sb_data_e1x; | 874 | (u32 *)&sb_data_e2; |
986 | /* copy sb data in here */ | 875 | /* copy sb data in here */ |
987 | for (j = 0; j < data_size; j++) | 876 | for (j = 0; j < data_size; j++) |
988 | *(sb_data_p + j) = REG_RD(bp, BAR_CSTRORM_INTMEM + | 877 | *(sb_data_p + j) = REG_RD(bp, BAR_CSTRORM_INTMEM + |
989 | CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id) + | 878 | CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id) + |
990 | j * sizeof(u32)); | 879 | j * sizeof(u32)); |
991 | 880 | ||
992 | if (CHIP_IS_E2(bp)) { | 881 | if (!CHIP_IS_E1x(bp)) { |
993 | pr_cont("pf_id(0x%x) vf_id (0x%x) vf_valid(0x%x) " | 882 | pr_cont("pf_id(0x%x) vf_id(0x%x) vf_valid(0x%x) " |
994 | "vnic_id(0x%x) same_igu_sb_1b(0x%x)\n", | 883 | "vnic_id(0x%x) same_igu_sb_1b(0x%x) " |
884 | "state(0x%x)\n", | ||
995 | sb_data_e2.common.p_func.pf_id, | 885 | sb_data_e2.common.p_func.pf_id, |
996 | sb_data_e2.common.p_func.vf_id, | 886 | sb_data_e2.common.p_func.vf_id, |
997 | sb_data_e2.common.p_func.vf_valid, | 887 | sb_data_e2.common.p_func.vf_valid, |
998 | sb_data_e2.common.p_func.vnic_id, | 888 | sb_data_e2.common.p_func.vnic_id, |
999 | sb_data_e2.common.same_igu_sb_1b); | 889 | sb_data_e2.common.same_igu_sb_1b, |
890 | sb_data_e2.common.state); | ||
1000 | } else { | 891 | } else { |
1001 | pr_cont("pf_id(0x%x) vf_id (0x%x) vf_valid(0x%x) " | 892 | pr_cont("pf_id(0x%x) vf_id(0x%x) vf_valid(0x%x) " |
1002 | "vnic_id(0x%x) same_igu_sb_1b(0x%x)\n", | 893 | "vnic_id(0x%x) same_igu_sb_1b(0x%x) " |
894 | "state(0x%x)\n", | ||
1003 | sb_data_e1x.common.p_func.pf_id, | 895 | sb_data_e1x.common.p_func.pf_id, |
1004 | sb_data_e1x.common.p_func.vf_id, | 896 | sb_data_e1x.common.p_func.vf_id, |
1005 | sb_data_e1x.common.p_func.vf_valid, | 897 | sb_data_e1x.common.p_func.vf_valid, |
1006 | sb_data_e1x.common.p_func.vnic_id, | 898 | sb_data_e1x.common.p_func.vnic_id, |
1007 | sb_data_e1x.common.same_igu_sb_1b); | 899 | sb_data_e1x.common.same_igu_sb_1b, |
900 | sb_data_e1x.common.state); | ||
1008 | } | 901 | } |
1009 | 902 | ||
1010 | /* SB_SMs data */ | 903 | /* SB_SMs data */ |
@@ -1093,6 +986,373 @@ void bnx2x_panic_dump(struct bnx2x *bp) | |||
1093 | BNX2X_ERR("end crash dump -----------------\n"); | 986 | BNX2X_ERR("end crash dump -----------------\n"); |
1094 | } | 987 | } |
1095 | 988 | ||
989 | /* | ||
990 | * FLR Support for E2 | ||
991 | * | ||
992 | * bnx2x_pf_flr_clnup() is called during nic_load in the per function HW | ||
993 | * initialization. | ||
994 | */ | ||
995 | #define FLR_WAIT_USEC 10000 /* 10 miliseconds */ | ||
996 | #define FLR_WAIT_INTERAVAL 50 /* usec */ | ||
997 | #define FLR_POLL_CNT (FLR_WAIT_USEC/FLR_WAIT_INTERAVAL) /* 200 */ | ||
998 | |||
999 | struct pbf_pN_buf_regs { | ||
1000 | int pN; | ||
1001 | u32 init_crd; | ||
1002 | u32 crd; | ||
1003 | u32 crd_freed; | ||
1004 | }; | ||
1005 | |||
1006 | struct pbf_pN_cmd_regs { | ||
1007 | int pN; | ||
1008 | u32 lines_occup; | ||
1009 | u32 lines_freed; | ||
1010 | }; | ||
1011 | |||
1012 | static void bnx2x_pbf_pN_buf_flushed(struct bnx2x *bp, | ||
1013 | struct pbf_pN_buf_regs *regs, | ||
1014 | u32 poll_count) | ||
1015 | { | ||
1016 | u32 init_crd, crd, crd_start, crd_freed, crd_freed_start; | ||
1017 | u32 cur_cnt = poll_count; | ||
1018 | |||
1019 | crd_freed = crd_freed_start = REG_RD(bp, regs->crd_freed); | ||
1020 | crd = crd_start = REG_RD(bp, regs->crd); | ||
1021 | init_crd = REG_RD(bp, regs->init_crd); | ||
1022 | |||
1023 | DP(BNX2X_MSG_SP, "INIT CREDIT[%d] : %x\n", regs->pN, init_crd); | ||
1024 | DP(BNX2X_MSG_SP, "CREDIT[%d] : s:%x\n", regs->pN, crd); | ||
1025 | DP(BNX2X_MSG_SP, "CREDIT_FREED[%d]: s:%x\n", regs->pN, crd_freed); | ||
1026 | |||
1027 | while ((crd != init_crd) && ((u32)SUB_S32(crd_freed, crd_freed_start) < | ||
1028 | (init_crd - crd_start))) { | ||
1029 | if (cur_cnt--) { | ||
1030 | udelay(FLR_WAIT_INTERAVAL); | ||
1031 | crd = REG_RD(bp, regs->crd); | ||
1032 | crd_freed = REG_RD(bp, regs->crd_freed); | ||
1033 | } else { | ||
1034 | DP(BNX2X_MSG_SP, "PBF tx buffer[%d] timed out\n", | ||
1035 | regs->pN); | ||
1036 | DP(BNX2X_MSG_SP, "CREDIT[%d] : c:%x\n", | ||
1037 | regs->pN, crd); | ||
1038 | DP(BNX2X_MSG_SP, "CREDIT_FREED[%d]: c:%x\n", | ||
1039 | regs->pN, crd_freed); | ||
1040 | break; | ||
1041 | } | ||
1042 | } | ||
1043 | DP(BNX2X_MSG_SP, "Waited %d*%d usec for PBF tx buffer[%d]\n", | ||
1044 | poll_count-cur_cnt, FLR_WAIT_INTERAVAL, regs->pN); | ||
1045 | } | ||
1046 | |||
1047 | static void bnx2x_pbf_pN_cmd_flushed(struct bnx2x *bp, | ||
1048 | struct pbf_pN_cmd_regs *regs, | ||
1049 | u32 poll_count) | ||
1050 | { | ||
1051 | u32 occup, to_free, freed, freed_start; | ||
1052 | u32 cur_cnt = poll_count; | ||
1053 | |||
1054 | occup = to_free = REG_RD(bp, regs->lines_occup); | ||
1055 | freed = freed_start = REG_RD(bp, regs->lines_freed); | ||
1056 | |||
1057 | DP(BNX2X_MSG_SP, "OCCUPANCY[%d] : s:%x\n", regs->pN, occup); | ||
1058 | DP(BNX2X_MSG_SP, "LINES_FREED[%d] : s:%x\n", regs->pN, freed); | ||
1059 | |||
1060 | while (occup && ((u32)SUB_S32(freed, freed_start) < to_free)) { | ||
1061 | if (cur_cnt--) { | ||
1062 | udelay(FLR_WAIT_INTERAVAL); | ||
1063 | occup = REG_RD(bp, regs->lines_occup); | ||
1064 | freed = REG_RD(bp, regs->lines_freed); | ||
1065 | } else { | ||
1066 | DP(BNX2X_MSG_SP, "PBF cmd queue[%d] timed out\n", | ||
1067 | regs->pN); | ||
1068 | DP(BNX2X_MSG_SP, "OCCUPANCY[%d] : s:%x\n", | ||
1069 | regs->pN, occup); | ||
1070 | DP(BNX2X_MSG_SP, "LINES_FREED[%d] : s:%x\n", | ||
1071 | regs->pN, freed); | ||
1072 | break; | ||
1073 | } | ||
1074 | } | ||
1075 | DP(BNX2X_MSG_SP, "Waited %d*%d usec for PBF cmd queue[%d]\n", | ||
1076 | poll_count-cur_cnt, FLR_WAIT_INTERAVAL, regs->pN); | ||
1077 | } | ||
1078 | |||
1079 | static inline u32 bnx2x_flr_clnup_reg_poll(struct bnx2x *bp, u32 reg, | ||
1080 | u32 expected, u32 poll_count) | ||
1081 | { | ||
1082 | u32 cur_cnt = poll_count; | ||
1083 | u32 val; | ||
1084 | |||
1085 | while ((val = REG_RD(bp, reg)) != expected && cur_cnt--) | ||
1086 | udelay(FLR_WAIT_INTERAVAL); | ||
1087 | |||
1088 | return val; | ||
1089 | } | ||
1090 | |||
1091 | static inline int bnx2x_flr_clnup_poll_hw_counter(struct bnx2x *bp, u32 reg, | ||
1092 | char *msg, u32 poll_cnt) | ||
1093 | { | ||
1094 | u32 val = bnx2x_flr_clnup_reg_poll(bp, reg, 0, poll_cnt); | ||
1095 | if (val != 0) { | ||
1096 | BNX2X_ERR("%s usage count=%d\n", msg, val); | ||
1097 | return 1; | ||
1098 | } | ||
1099 | return 0; | ||
1100 | } | ||
1101 | |||
1102 | static u32 bnx2x_flr_clnup_poll_count(struct bnx2x *bp) | ||
1103 | { | ||
1104 | /* adjust polling timeout */ | ||
1105 | if (CHIP_REV_IS_EMUL(bp)) | ||
1106 | return FLR_POLL_CNT * 2000; | ||
1107 | |||
1108 | if (CHIP_REV_IS_FPGA(bp)) | ||
1109 | return FLR_POLL_CNT * 120; | ||
1110 | |||
1111 | return FLR_POLL_CNT; | ||
1112 | } | ||
1113 | |||
1114 | static void bnx2x_tx_hw_flushed(struct bnx2x *bp, u32 poll_count) | ||
1115 | { | ||
1116 | struct pbf_pN_cmd_regs cmd_regs[] = { | ||
1117 | {0, (CHIP_IS_E3B0(bp)) ? | ||
1118 | PBF_REG_TQ_OCCUPANCY_Q0 : | ||
1119 | PBF_REG_P0_TQ_OCCUPANCY, | ||
1120 | (CHIP_IS_E3B0(bp)) ? | ||
1121 | PBF_REG_TQ_LINES_FREED_CNT_Q0 : | ||
1122 | PBF_REG_P0_TQ_LINES_FREED_CNT}, | ||
1123 | {1, (CHIP_IS_E3B0(bp)) ? | ||
1124 | PBF_REG_TQ_OCCUPANCY_Q1 : | ||
1125 | PBF_REG_P1_TQ_OCCUPANCY, | ||
1126 | (CHIP_IS_E3B0(bp)) ? | ||
1127 | PBF_REG_TQ_LINES_FREED_CNT_Q1 : | ||
1128 | PBF_REG_P1_TQ_LINES_FREED_CNT}, | ||
1129 | {4, (CHIP_IS_E3B0(bp)) ? | ||
1130 | PBF_REG_TQ_OCCUPANCY_LB_Q : | ||
1131 | PBF_REG_P4_TQ_OCCUPANCY, | ||
1132 | (CHIP_IS_E3B0(bp)) ? | ||
1133 | PBF_REG_TQ_LINES_FREED_CNT_LB_Q : | ||
1134 | PBF_REG_P4_TQ_LINES_FREED_CNT} | ||
1135 | }; | ||
1136 | |||
1137 | struct pbf_pN_buf_regs buf_regs[] = { | ||
1138 | {0, (CHIP_IS_E3B0(bp)) ? | ||
1139 | PBF_REG_INIT_CRD_Q0 : | ||
1140 | PBF_REG_P0_INIT_CRD , | ||
1141 | (CHIP_IS_E3B0(bp)) ? | ||
1142 | PBF_REG_CREDIT_Q0 : | ||
1143 | PBF_REG_P0_CREDIT, | ||
1144 | (CHIP_IS_E3B0(bp)) ? | ||
1145 | PBF_REG_INTERNAL_CRD_FREED_CNT_Q0 : | ||
1146 | PBF_REG_P0_INTERNAL_CRD_FREED_CNT}, | ||
1147 | {1, (CHIP_IS_E3B0(bp)) ? | ||
1148 | PBF_REG_INIT_CRD_Q1 : | ||
1149 | PBF_REG_P1_INIT_CRD, | ||
1150 | (CHIP_IS_E3B0(bp)) ? | ||
1151 | PBF_REG_CREDIT_Q1 : | ||
1152 | PBF_REG_P1_CREDIT, | ||
1153 | (CHIP_IS_E3B0(bp)) ? | ||
1154 | PBF_REG_INTERNAL_CRD_FREED_CNT_Q1 : | ||
1155 | PBF_REG_P1_INTERNAL_CRD_FREED_CNT}, | ||
1156 | {4, (CHIP_IS_E3B0(bp)) ? | ||
1157 | PBF_REG_INIT_CRD_LB_Q : | ||
1158 | PBF_REG_P4_INIT_CRD, | ||
1159 | (CHIP_IS_E3B0(bp)) ? | ||
1160 | PBF_REG_CREDIT_LB_Q : | ||
1161 | PBF_REG_P4_CREDIT, | ||
1162 | (CHIP_IS_E3B0(bp)) ? | ||
1163 | PBF_REG_INTERNAL_CRD_FREED_CNT_LB_Q : | ||
1164 | PBF_REG_P4_INTERNAL_CRD_FREED_CNT}, | ||
1165 | }; | ||
1166 | |||
1167 | int i; | ||
1168 | |||
1169 | /* Verify the command queues are flushed P0, P1, P4 */ | ||
1170 | for (i = 0; i < ARRAY_SIZE(cmd_regs); i++) | ||
1171 | bnx2x_pbf_pN_cmd_flushed(bp, &cmd_regs[i], poll_count); | ||
1172 | |||
1173 | |||
1174 | /* Verify the transmission buffers are flushed P0, P1, P4 */ | ||
1175 | for (i = 0; i < ARRAY_SIZE(buf_regs); i++) | ||
1176 | bnx2x_pbf_pN_buf_flushed(bp, &buf_regs[i], poll_count); | ||
1177 | } | ||
1178 | |||
1179 | #define OP_GEN_PARAM(param) \ | ||
1180 | (((param) << SDM_OP_GEN_COMP_PARAM_SHIFT) & SDM_OP_GEN_COMP_PARAM) | ||
1181 | |||
1182 | #define OP_GEN_TYPE(type) \ | ||
1183 | (((type) << SDM_OP_GEN_COMP_TYPE_SHIFT) & SDM_OP_GEN_COMP_TYPE) | ||
1184 | |||
1185 | #define OP_GEN_AGG_VECT(index) \ | ||
1186 | (((index) << SDM_OP_GEN_AGG_VECT_IDX_SHIFT) & SDM_OP_GEN_AGG_VECT_IDX) | ||
1187 | |||
1188 | |||
1189 | static inline int bnx2x_send_final_clnup(struct bnx2x *bp, u8 clnup_func, | ||
1190 | u32 poll_cnt) | ||
1191 | { | ||
1192 | struct sdm_op_gen op_gen = {0}; | ||
1193 | |||
1194 | u32 comp_addr = BAR_CSTRORM_INTMEM + | ||
1195 | CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(clnup_func); | ||
1196 | int ret = 0; | ||
1197 | |||
1198 | if (REG_RD(bp, comp_addr)) { | ||
1199 | BNX2X_ERR("Cleanup complete is not 0\n"); | ||
1200 | return 1; | ||
1201 | } | ||
1202 | |||
1203 | op_gen.command |= OP_GEN_PARAM(XSTORM_AGG_INT_FINAL_CLEANUP_INDEX); | ||
1204 | op_gen.command |= OP_GEN_TYPE(XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE); | ||
1205 | op_gen.command |= OP_GEN_AGG_VECT(clnup_func); | ||
1206 | op_gen.command |= 1 << SDM_OP_GEN_AGG_VECT_IDX_VALID_SHIFT; | ||
1207 | |||
1208 | DP(BNX2X_MSG_SP, "FW Final cleanup\n"); | ||
1209 | REG_WR(bp, XSDM_REG_OPERATION_GEN, op_gen.command); | ||
1210 | |||
1211 | if (bnx2x_flr_clnup_reg_poll(bp, comp_addr, 1, poll_cnt) != 1) { | ||
1212 | BNX2X_ERR("FW final cleanup did not succeed\n"); | ||
1213 | ret = 1; | ||
1214 | } | ||
1215 | /* Zero completion for nxt FLR */ | ||
1216 | REG_WR(bp, comp_addr, 0); | ||
1217 | |||
1218 | return ret; | ||
1219 | } | ||
1220 | |||
1221 | static inline u8 bnx2x_is_pcie_pending(struct pci_dev *dev) | ||
1222 | { | ||
1223 | int pos; | ||
1224 | u16 status; | ||
1225 | |||
1226 | pos = pci_pcie_cap(dev); | ||
1227 | if (!pos) | ||
1228 | return false; | ||
1229 | |||
1230 | pci_read_config_word(dev, pos + PCI_EXP_DEVSTA, &status); | ||
1231 | return status & PCI_EXP_DEVSTA_TRPND; | ||
1232 | } | ||
1233 | |||
1234 | /* PF FLR specific routines | ||
1235 | */ | ||
1236 | static int bnx2x_poll_hw_usage_counters(struct bnx2x *bp, u32 poll_cnt) | ||
1237 | { | ||
1238 | |||
1239 | /* wait for CFC PF usage-counter to zero (includes all the VFs) */ | ||
1240 | if (bnx2x_flr_clnup_poll_hw_counter(bp, | ||
1241 | CFC_REG_NUM_LCIDS_INSIDE_PF, | ||
1242 | "CFC PF usage counter timed out", | ||
1243 | poll_cnt)) | ||
1244 | return 1; | ||
1245 | |||
1246 | |||
1247 | /* Wait for DQ PF usage-counter to zero (until DQ cleanup) */ | ||
1248 | if (bnx2x_flr_clnup_poll_hw_counter(bp, | ||
1249 | DORQ_REG_PF_USAGE_CNT, | ||
1250 | "DQ PF usage counter timed out", | ||
1251 | poll_cnt)) | ||
1252 | return 1; | ||
1253 | |||
1254 | /* Wait for QM PF usage-counter to zero (until DQ cleanup) */ | ||
1255 | if (bnx2x_flr_clnup_poll_hw_counter(bp, | ||
1256 | QM_REG_PF_USG_CNT_0 + 4*BP_FUNC(bp), | ||
1257 | "QM PF usage counter timed out", | ||
1258 | poll_cnt)) | ||
1259 | return 1; | ||
1260 | |||
1261 | /* Wait for Timer PF usage-counters to zero (until DQ cleanup) */ | ||
1262 | if (bnx2x_flr_clnup_poll_hw_counter(bp, | ||
1263 | TM_REG_LIN0_VNIC_UC + 4*BP_PORT(bp), | ||
1264 | "Timers VNIC usage counter timed out", | ||
1265 | poll_cnt)) | ||
1266 | return 1; | ||
1267 | if (bnx2x_flr_clnup_poll_hw_counter(bp, | ||
1268 | TM_REG_LIN0_NUM_SCANS + 4*BP_PORT(bp), | ||
1269 | "Timers NUM_SCANS usage counter timed out", | ||
1270 | poll_cnt)) | ||
1271 | return 1; | ||
1272 | |||
1273 | /* Wait DMAE PF usage counter to zero */ | ||
1274 | if (bnx2x_flr_clnup_poll_hw_counter(bp, | ||
1275 | dmae_reg_go_c[INIT_DMAE_C(bp)], | ||
1276 | "DMAE dommand register timed out", | ||
1277 | poll_cnt)) | ||
1278 | return 1; | ||
1279 | |||
1280 | return 0; | ||
1281 | } | ||
1282 | |||
1283 | static void bnx2x_hw_enable_status(struct bnx2x *bp) | ||
1284 | { | ||
1285 | u32 val; | ||
1286 | |||
1287 | val = REG_RD(bp, CFC_REG_WEAK_ENABLE_PF); | ||
1288 | DP(BNX2X_MSG_SP, "CFC_REG_WEAK_ENABLE_PF is 0x%x\n", val); | ||
1289 | |||
1290 | val = REG_RD(bp, PBF_REG_DISABLE_PF); | ||
1291 | DP(BNX2X_MSG_SP, "PBF_REG_DISABLE_PF is 0x%x\n", val); | ||
1292 | |||
1293 | val = REG_RD(bp, IGU_REG_PCI_PF_MSI_EN); | ||
1294 | DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSI_EN is 0x%x\n", val); | ||
1295 | |||
1296 | val = REG_RD(bp, IGU_REG_PCI_PF_MSIX_EN); | ||
1297 | DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSIX_EN is 0x%x\n", val); | ||
1298 | |||
1299 | val = REG_RD(bp, IGU_REG_PCI_PF_MSIX_FUNC_MASK); | ||
1300 | DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSIX_FUNC_MASK is 0x%x\n", val); | ||
1301 | |||
1302 | val = REG_RD(bp, PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR); | ||
1303 | DP(BNX2X_MSG_SP, "PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR is 0x%x\n", val); | ||
1304 | |||
1305 | val = REG_RD(bp, PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR); | ||
1306 | DP(BNX2X_MSG_SP, "PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR is 0x%x\n", val); | ||
1307 | |||
1308 | val = REG_RD(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER); | ||
1309 | DP(BNX2X_MSG_SP, "PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER is 0x%x\n", | ||
1310 | val); | ||
1311 | } | ||
1312 | |||
1313 | static int bnx2x_pf_flr_clnup(struct bnx2x *bp) | ||
1314 | { | ||
1315 | u32 poll_cnt = bnx2x_flr_clnup_poll_count(bp); | ||
1316 | |||
1317 | DP(BNX2X_MSG_SP, "Cleanup after FLR PF[%d]\n", BP_ABS_FUNC(bp)); | ||
1318 | |||
1319 | /* Re-enable PF target read access */ | ||
1320 | REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1); | ||
1321 | |||
1322 | /* Poll HW usage counters */ | ||
1323 | if (bnx2x_poll_hw_usage_counters(bp, poll_cnt)) | ||
1324 | return -EBUSY; | ||
1325 | |||
1326 | /* Zero the igu 'trailing edge' and 'leading edge' */ | ||
1327 | |||
1328 | /* Send the FW cleanup command */ | ||
1329 | if (bnx2x_send_final_clnup(bp, (u8)BP_FUNC(bp), poll_cnt)) | ||
1330 | return -EBUSY; | ||
1331 | |||
1332 | /* ATC cleanup */ | ||
1333 | |||
1334 | /* Verify TX hw is flushed */ | ||
1335 | bnx2x_tx_hw_flushed(bp, poll_cnt); | ||
1336 | |||
1337 | /* Wait 100ms (not adjusted according to platform) */ | ||
1338 | msleep(100); | ||
1339 | |||
1340 | /* Verify no pending pci transactions */ | ||
1341 | if (bnx2x_is_pcie_pending(bp->pdev)) | ||
1342 | BNX2X_ERR("PCIE Transactions still pending\n"); | ||
1343 | |||
1344 | /* Debug */ | ||
1345 | bnx2x_hw_enable_status(bp); | ||
1346 | |||
1347 | /* | ||
1348 | * Master enable - Due to WB DMAE writes performed before this | ||
1349 | * register is re-initialized as part of the regular function init | ||
1350 | */ | ||
1351 | REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1); | ||
1352 | |||
1353 | return 0; | ||
1354 | } | ||
1355 | |||
1096 | static void bnx2x_hc_int_enable(struct bnx2x *bp) | 1356 | static void bnx2x_hc_int_enable(struct bnx2x *bp) |
1097 | { | 1357 | { |
1098 | int port = BP_PORT(bp); | 1358 | int port = BP_PORT(bp); |
@@ -1286,10 +1546,6 @@ void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw) | |||
1286 | int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0; | 1546 | int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0; |
1287 | int i, offset; | 1547 | int i, offset; |
1288 | 1548 | ||
1289 | /* disable interrupt handling */ | ||
1290 | atomic_inc(&bp->intr_sem); | ||
1291 | smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */ | ||
1292 | |||
1293 | if (disable_hw) | 1549 | if (disable_hw) |
1294 | /* prevent the HW from sending interrupts */ | 1550 | /* prevent the HW from sending interrupts */ |
1295 | bnx2x_int_disable(bp); | 1551 | bnx2x_int_disable(bp); |
@@ -1302,12 +1558,13 @@ void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw) | |||
1302 | offset++; | 1558 | offset++; |
1303 | #endif | 1559 | #endif |
1304 | for_each_eth_queue(bp, i) | 1560 | for_each_eth_queue(bp, i) |
1305 | synchronize_irq(bp->msix_table[i + offset].vector); | 1561 | synchronize_irq(bp->msix_table[offset++].vector); |
1306 | } else | 1562 | } else |
1307 | synchronize_irq(bp->pdev->irq); | 1563 | synchronize_irq(bp->pdev->irq); |
1308 | 1564 | ||
1309 | /* make sure sp_task is not running */ | 1565 | /* make sure sp_task is not running */ |
1310 | cancel_delayed_work(&bp->sp_task); | 1566 | cancel_delayed_work(&bp->sp_task); |
1567 | cancel_delayed_work(&bp->period_task); | ||
1311 | flush_workqueue(bnx2x_wq); | 1568 | flush_workqueue(bnx2x_wq); |
1312 | } | 1569 | } |
1313 | 1570 | ||
@@ -1351,53 +1608,114 @@ static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource) | |||
1351 | return false; | 1608 | return false; |
1352 | } | 1609 | } |
1353 | 1610 | ||
1611 | /** | ||
1612 | * bnx2x_get_leader_lock_resource - get the recovery leader resource id | ||
1613 | * | ||
1614 | * @bp: driver handle | ||
1615 | * | ||
1616 | * Returns the recovery leader resource id according to the engine this function | ||
1617 | * belongs to. Currently only only 2 engines is supported. | ||
1618 | */ | ||
1619 | static inline int bnx2x_get_leader_lock_resource(struct bnx2x *bp) | ||
1620 | { | ||
1621 | if (BP_PATH(bp)) | ||
1622 | return HW_LOCK_RESOURCE_RECOVERY_LEADER_1; | ||
1623 | else | ||
1624 | return HW_LOCK_RESOURCE_RECOVERY_LEADER_0; | ||
1625 | } | ||
1626 | |||
1627 | /** | ||
1628 | * bnx2x_trylock_leader_lock- try to aquire a leader lock. | ||
1629 | * | ||
1630 | * @bp: driver handle | ||
1631 | * | ||
1632 | * Tries to aquire a leader lock for cuurent engine. | ||
1633 | */ | ||
1634 | static inline bool bnx2x_trylock_leader_lock(struct bnx2x *bp) | ||
1635 | { | ||
1636 | return bnx2x_trylock_hw_lock(bp, bnx2x_get_leader_lock_resource(bp)); | ||
1637 | } | ||
1638 | |||
1354 | #ifdef BCM_CNIC | 1639 | #ifdef BCM_CNIC |
1355 | static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid); | 1640 | static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid, u8 err); |
1356 | #endif | 1641 | #endif |
1357 | 1642 | ||
1358 | void bnx2x_sp_event(struct bnx2x_fastpath *fp, | 1643 | void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe) |
1359 | union eth_rx_cqe *rr_cqe) | ||
1360 | { | 1644 | { |
1361 | struct bnx2x *bp = fp->bp; | 1645 | struct bnx2x *bp = fp->bp; |
1362 | int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data); | 1646 | int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data); |
1363 | int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data); | 1647 | int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data); |
1648 | enum bnx2x_queue_cmd drv_cmd = BNX2X_Q_CMD_MAX; | ||
1649 | struct bnx2x_queue_sp_obj *q_obj = &fp->q_obj; | ||
1364 | 1650 | ||
1365 | DP(BNX2X_MSG_SP, | 1651 | DP(BNX2X_MSG_SP, |
1366 | "fp %d cid %d got ramrod #%d state is %x type is %d\n", | 1652 | "fp %d cid %d got ramrod #%d state is %x type is %d\n", |
1367 | fp->index, cid, command, bp->state, | 1653 | fp->index, cid, command, bp->state, |
1368 | rr_cqe->ramrod_cqe.ramrod_type); | 1654 | rr_cqe->ramrod_cqe.ramrod_type); |
1369 | 1655 | ||
1370 | switch (command | fp->state) { | 1656 | switch (command) { |
1371 | case (RAMROD_CMD_ID_ETH_CLIENT_SETUP | BNX2X_FP_STATE_OPENING): | 1657 | case (RAMROD_CMD_ID_ETH_CLIENT_UPDATE): |
1658 | DP(NETIF_MSG_IFUP, "got UPDATE ramrod. CID %d\n", cid); | ||
1659 | drv_cmd = BNX2X_Q_CMD_UPDATE; | ||
1660 | break; | ||
1661 | case (RAMROD_CMD_ID_ETH_CLIENT_SETUP): | ||
1372 | DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n", cid); | 1662 | DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n", cid); |
1373 | fp->state = BNX2X_FP_STATE_OPEN; | 1663 | drv_cmd = BNX2X_Q_CMD_SETUP; |
1374 | break; | 1664 | break; |
1375 | 1665 | ||
1376 | case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING): | 1666 | case (RAMROD_CMD_ID_ETH_HALT): |
1377 | DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n", cid); | 1667 | DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n", cid); |
1378 | fp->state = BNX2X_FP_STATE_HALTED; | 1668 | drv_cmd = BNX2X_Q_CMD_HALT; |
1379 | break; | 1669 | break; |
1380 | 1670 | ||
1381 | case (RAMROD_CMD_ID_ETH_TERMINATE | BNX2X_FP_STATE_TERMINATING): | 1671 | case (RAMROD_CMD_ID_ETH_TERMINATE): |
1382 | DP(NETIF_MSG_IFDOWN, "got MULTI[%d] teminate ramrod\n", cid); | 1672 | DP(NETIF_MSG_IFDOWN, "got MULTI[%d] teminate ramrod\n", cid); |
1383 | fp->state = BNX2X_FP_STATE_TERMINATED; | 1673 | drv_cmd = BNX2X_Q_CMD_TERMINATE; |
1384 | break; | 1674 | break; |
1385 | 1675 | ||
1386 | default: | 1676 | case (RAMROD_CMD_ID_ETH_EMPTY): |
1387 | BNX2X_ERR("unexpected MC reply (%d) " | 1677 | DP(NETIF_MSG_IFDOWN, "got MULTI[%d] empty ramrod\n", cid); |
1388 | "fp[%d] state is %x\n", | 1678 | drv_cmd = BNX2X_Q_CMD_EMPTY; |
1389 | command, fp->index, fp->state); | ||
1390 | break; | 1679 | break; |
1680 | |||
1681 | default: | ||
1682 | BNX2X_ERR("unexpected MC reply (%d) on fp[%d]\n", | ||
1683 | command, fp->index); | ||
1684 | return; | ||
1391 | } | 1685 | } |
1392 | 1686 | ||
1687 | if ((drv_cmd != BNX2X_Q_CMD_MAX) && | ||
1688 | q_obj->complete_cmd(bp, q_obj, drv_cmd)) | ||
1689 | /* q_obj->complete_cmd() failure means that this was | ||
1690 | * an unexpected completion. | ||
1691 | * | ||
1692 | * In this case we don't want to increase the bp->spq_left | ||
1693 | * because apparently we haven't sent this command the first | ||
1694 | * place. | ||
1695 | */ | ||
1696 | #ifdef BNX2X_STOP_ON_ERROR | ||
1697 | bnx2x_panic(); | ||
1698 | #else | ||
1699 | return; | ||
1700 | #endif | ||
1701 | |||
1393 | smp_mb__before_atomic_inc(); | 1702 | smp_mb__before_atomic_inc(); |
1394 | atomic_inc(&bp->cq_spq_left); | 1703 | atomic_inc(&bp->cq_spq_left); |
1395 | /* push the change in fp->state and towards the memory */ | 1704 | /* push the change in bp->spq_left and towards the memory */ |
1396 | smp_wmb(); | 1705 | smp_mb__after_atomic_inc(); |
1397 | 1706 | ||
1398 | return; | 1707 | return; |
1399 | } | 1708 | } |
1400 | 1709 | ||
1710 | void bnx2x_update_rx_prod(struct bnx2x *bp, struct bnx2x_fastpath *fp, | ||
1711 | u16 bd_prod, u16 rx_comp_prod, u16 rx_sge_prod) | ||
1712 | { | ||
1713 | u32 start = BAR_USTRORM_INTMEM + fp->ustorm_rx_prods_offset; | ||
1714 | |||
1715 | bnx2x_update_rx_prod_gen(bp, fp, bd_prod, rx_comp_prod, rx_sge_prod, | ||
1716 | start); | ||
1717 | } | ||
1718 | |||
1401 | irqreturn_t bnx2x_interrupt(int irq, void *dev_instance) | 1719 | irqreturn_t bnx2x_interrupt(int irq, void *dev_instance) |
1402 | { | 1720 | { |
1403 | struct bnx2x *bp = netdev_priv(dev_instance); | 1721 | struct bnx2x *bp = netdev_priv(dev_instance); |
@@ -1412,12 +1730,6 @@ irqreturn_t bnx2x_interrupt(int irq, void *dev_instance) | |||
1412 | } | 1730 | } |
1413 | DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status); | 1731 | DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status); |
1414 | 1732 | ||
1415 | /* Return here if interrupt is disabled */ | ||
1416 | if (unlikely(atomic_read(&bp->intr_sem) != 0)) { | ||
1417 | DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n"); | ||
1418 | return IRQ_HANDLED; | ||
1419 | } | ||
1420 | |||
1421 | #ifdef BNX2X_STOP_ON_ERROR | 1733 | #ifdef BNX2X_STOP_ON_ERROR |
1422 | if (unlikely(bp->panic)) | 1734 | if (unlikely(bp->panic)) |
1423 | return IRQ_HANDLED; | 1735 | return IRQ_HANDLED; |
@@ -1428,7 +1740,7 @@ irqreturn_t bnx2x_interrupt(int irq, void *dev_instance) | |||
1428 | 1740 | ||
1429 | mask = 0x2 << (fp->index + CNIC_CONTEXT_USE); | 1741 | mask = 0x2 << (fp->index + CNIC_CONTEXT_USE); |
1430 | if (status & mask) { | 1742 | if (status & mask) { |
1431 | /* Handle Rx and Tx according to SB id */ | 1743 | /* Handle Rx or Tx according to SB id */ |
1432 | prefetch(fp->rx_cons_sb); | 1744 | prefetch(fp->rx_cons_sb); |
1433 | prefetch(fp->tx_cons_sb); | 1745 | prefetch(fp->tx_cons_sb); |
1434 | prefetch(&fp->sb_running_index[SM_RX_ID]); | 1746 | prefetch(&fp->sb_running_index[SM_RX_ID]); |
@@ -1442,11 +1754,13 @@ irqreturn_t bnx2x_interrupt(int irq, void *dev_instance) | |||
1442 | if (status & (mask | 0x1)) { | 1754 | if (status & (mask | 0x1)) { |
1443 | struct cnic_ops *c_ops = NULL; | 1755 | struct cnic_ops *c_ops = NULL; |
1444 | 1756 | ||
1445 | rcu_read_lock(); | 1757 | if (likely(bp->state == BNX2X_STATE_OPEN)) { |
1446 | c_ops = rcu_dereference(bp->cnic_ops); | 1758 | rcu_read_lock(); |
1447 | if (c_ops) | 1759 | c_ops = rcu_dereference(bp->cnic_ops); |
1448 | c_ops->cnic_handler(bp->cnic_data, NULL); | 1760 | if (c_ops) |
1449 | rcu_read_unlock(); | 1761 | c_ops->cnic_handler(bp->cnic_data, NULL); |
1762 | rcu_read_unlock(); | ||
1763 | } | ||
1450 | 1764 | ||
1451 | status &= ~mask; | 1765 | status &= ~mask; |
1452 | } | 1766 | } |
@@ -1467,9 +1781,6 @@ irqreturn_t bnx2x_interrupt(int irq, void *dev_instance) | |||
1467 | return IRQ_HANDLED; | 1781 | return IRQ_HANDLED; |
1468 | } | 1782 | } |
1469 | 1783 | ||
1470 | /* end of fast path */ | ||
1471 | |||
1472 | |||
1473 | /* Link */ | 1784 | /* Link */ |
1474 | 1785 | ||
1475 | /* | 1786 | /* |
@@ -1521,6 +1832,11 @@ int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource) | |||
1521 | return -EAGAIN; | 1832 | return -EAGAIN; |
1522 | } | 1833 | } |
1523 | 1834 | ||
1835 | int bnx2x_release_leader_lock(struct bnx2x *bp) | ||
1836 | { | ||
1837 | return bnx2x_release_hw_lock(bp, bnx2x_get_leader_lock_resource(bp)); | ||
1838 | } | ||
1839 | |||
1524 | int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource) | 1840 | int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource) |
1525 | { | 1841 | { |
1526 | u32 lock_status; | 1842 | u32 lock_status; |
@@ -1641,6 +1957,53 @@ int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port) | |||
1641 | return 0; | 1957 | return 0; |
1642 | } | 1958 | } |
1643 | 1959 | ||
1960 | int bnx2x_set_mult_gpio(struct bnx2x *bp, u8 pins, u32 mode) | ||
1961 | { | ||
1962 | u32 gpio_reg = 0; | ||
1963 | int rc = 0; | ||
1964 | |||
1965 | /* Any port swapping should be handled by caller. */ | ||
1966 | |||
1967 | bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO); | ||
1968 | /* read GPIO and mask except the float bits */ | ||
1969 | gpio_reg = REG_RD(bp, MISC_REG_GPIO); | ||
1970 | gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_FLOAT_POS); | ||
1971 | gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_CLR_POS); | ||
1972 | gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_SET_POS); | ||
1973 | |||
1974 | switch (mode) { | ||
1975 | case MISC_REGISTERS_GPIO_OUTPUT_LOW: | ||
1976 | DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> output low\n", pins); | ||
1977 | /* set CLR */ | ||
1978 | gpio_reg |= (pins << MISC_REGISTERS_GPIO_CLR_POS); | ||
1979 | break; | ||
1980 | |||
1981 | case MISC_REGISTERS_GPIO_OUTPUT_HIGH: | ||
1982 | DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> output high\n", pins); | ||
1983 | /* set SET */ | ||
1984 | gpio_reg |= (pins << MISC_REGISTERS_GPIO_SET_POS); | ||
1985 | break; | ||
1986 | |||
1987 | case MISC_REGISTERS_GPIO_INPUT_HI_Z: | ||
1988 | DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> input\n", pins); | ||
1989 | /* set FLOAT */ | ||
1990 | gpio_reg |= (pins << MISC_REGISTERS_GPIO_FLOAT_POS); | ||
1991 | break; | ||
1992 | |||
1993 | default: | ||
1994 | BNX2X_ERR("Invalid GPIO mode assignment %d\n", mode); | ||
1995 | rc = -EINVAL; | ||
1996 | break; | ||
1997 | } | ||
1998 | |||
1999 | if (rc == 0) | ||
2000 | REG_WR(bp, MISC_REG_GPIO, gpio_reg); | ||
2001 | |||
2002 | bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO); | ||
2003 | |||
2004 | return rc; | ||
2005 | } | ||
2006 | |||
1644 | int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port) | 2007 | int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port) |
1645 | { | 2008 | { |
1646 | /* The GPIO should be swapped if swap register is set and active */ | 2009 | /* The GPIO should be swapped if swap register is set and active */ |
@@ -1733,45 +2096,6 @@ static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode) | |||
1733 | return 0; | 2096 | return 0; |
1734 | } | 2097 | } |
1735 | 2098 | ||
1736 | int bnx2x_get_link_cfg_idx(struct bnx2x *bp) | ||
1737 | { | ||
1738 | u32 sel_phy_idx = 0; | ||
1739 | if (bp->link_vars.link_up) { | ||
1740 | sel_phy_idx = EXT_PHY1; | ||
1741 | /* In case link is SERDES, check if the EXT_PHY2 is the one */ | ||
1742 | if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) && | ||
1743 | (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE)) | ||
1744 | sel_phy_idx = EXT_PHY2; | ||
1745 | } else { | ||
1746 | |||
1747 | switch (bnx2x_phy_selection(&bp->link_params)) { | ||
1748 | case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT: | ||
1749 | case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY: | ||
1750 | case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY: | ||
1751 | sel_phy_idx = EXT_PHY1; | ||
1752 | break; | ||
1753 | case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY: | ||
1754 | case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY: | ||
1755 | sel_phy_idx = EXT_PHY2; | ||
1756 | break; | ||
1757 | } | ||
1758 | } | ||
1759 | /* | ||
1760 | * The selected actived PHY is always after swapping (in case PHY | ||
1761 | * swapping is enabled). So when swapping is enabled, we need to reverse | ||
1762 | * the configuration | ||
1763 | */ | ||
1764 | |||
1765 | if (bp->link_params.multi_phy_config & | ||
1766 | PORT_HW_CFG_PHY_SWAPPED_ENABLED) { | ||
1767 | if (sel_phy_idx == EXT_PHY1) | ||
1768 | sel_phy_idx = EXT_PHY2; | ||
1769 | else if (sel_phy_idx == EXT_PHY2) | ||
1770 | sel_phy_idx = EXT_PHY1; | ||
1771 | } | ||
1772 | return LINK_CONFIG_IDX(sel_phy_idx); | ||
1773 | } | ||
1774 | |||
1775 | void bnx2x_calc_fc_adv(struct bnx2x *bp) | 2099 | void bnx2x_calc_fc_adv(struct bnx2x *bp) |
1776 | { | 2100 | { |
1777 | u8 cfg_idx = bnx2x_get_link_cfg_idx(bp); | 2101 | u8 cfg_idx = bnx2x_get_link_cfg_idx(bp); |
@@ -1828,7 +2152,8 @@ u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode) | |||
1828 | if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) { | 2152 | if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) { |
1829 | bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP); | 2153 | bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP); |
1830 | bnx2x_link_report(bp); | 2154 | bnx2x_link_report(bp); |
1831 | } | 2155 | } else |
2156 | queue_delayed_work(bnx2x_wq, &bp->period_task, 0); | ||
1832 | bp->link_params.req_line_speed[cfx_idx] = req_line_speed; | 2157 | bp->link_params.req_line_speed[cfx_idx] = req_line_speed; |
1833 | return rc; | 2158 | return rc; |
1834 | } | 2159 | } |
@@ -1942,8 +2267,12 @@ static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp) | |||
1942 | bp->vn_weight_sum += vn_min_rate; | 2267 | bp->vn_weight_sum += vn_min_rate; |
1943 | } | 2268 | } |
1944 | 2269 | ||
1945 | /* ... only if all min rates are zeros - disable fairness */ | 2270 | /* if ETS or all min rates are zeros - disable fairness */ |
1946 | if (all_zero) { | 2271 | if (BNX2X_IS_ETS_ENABLED(bp)) { |
2272 | bp->cmng.flags.cmng_enables &= | ||
2273 | ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN; | ||
2274 | DP(NETIF_MSG_IFUP, "Fairness will be disabled due to ETS\n"); | ||
2275 | } else if (all_zero) { | ||
1947 | bp->cmng.flags.cmng_enables &= | 2276 | bp->cmng.flags.cmng_enables &= |
1948 | ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN; | 2277 | ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN; |
1949 | DP(NETIF_MSG_IFUP, "All MIN values are zeroes" | 2278 | DP(NETIF_MSG_IFUP, "All MIN values are zeroes" |
@@ -2144,11 +2473,11 @@ static void bnx2x_link_attn(struct bnx2x *bp) | |||
2144 | pause_enabled); | 2473 | pause_enabled); |
2145 | } | 2474 | } |
2146 | 2475 | ||
2147 | if (bp->link_vars.mac_type == MAC_TYPE_BMAC) { | 2476 | if (bp->link_vars.mac_type != MAC_TYPE_EMAC) { |
2148 | struct host_port_stats *pstats; | 2477 | struct host_port_stats *pstats; |
2149 | 2478 | ||
2150 | pstats = bnx2x_sp(bp, port_stats); | 2479 | pstats = bnx2x_sp(bp, port_stats); |
2151 | /* reset old bmac stats */ | 2480 | /* reset old mac stats */ |
2152 | memset(&(pstats->mac_stx[0]), 0, | 2481 | memset(&(pstats->mac_stx[0]), 0, |
2153 | sizeof(struct mac_stx)); | 2482 | sizeof(struct mac_stx)); |
2154 | } | 2483 | } |
@@ -2198,12 +2527,23 @@ static void bnx2x_pmf_update(struct bnx2x *bp) | |||
2198 | bp->port.pmf = 1; | 2527 | bp->port.pmf = 1; |
2199 | DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf); | 2528 | DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf); |
2200 | 2529 | ||
2530 | /* | ||
2531 | * We need the mb() to ensure the ordering between the writing to | ||
2532 | * bp->port.pmf here and reading it from the bnx2x_periodic_task(). | ||
2533 | */ | ||
2534 | smp_mb(); | ||
2535 | |||
2536 | /* queue a periodic task */ | ||
2537 | queue_delayed_work(bnx2x_wq, &bp->period_task, 0); | ||
2538 | |||
2539 | bnx2x_dcbx_pmf_update(bp); | ||
2540 | |||
2201 | /* enable nig attention */ | 2541 | /* enable nig attention */ |
2202 | val = (0xff0f | (1 << (BP_E1HVN(bp) + 4))); | 2542 | val = (0xff0f | (1 << (BP_E1HVN(bp) + 4))); |
2203 | if (bp->common.int_block == INT_BLOCK_HC) { | 2543 | if (bp->common.int_block == INT_BLOCK_HC) { |
2204 | REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val); | 2544 | REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val); |
2205 | REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val); | 2545 | REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val); |
2206 | } else if (CHIP_IS_E2(bp)) { | 2546 | } else if (!CHIP_IS_E1x(bp)) { |
2207 | REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val); | 2547 | REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val); |
2208 | REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val); | 2548 | REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val); |
2209 | } | 2549 | } |
@@ -2233,7 +2573,8 @@ u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param) | |||
2233 | SHMEM_WR(bp, func_mb[mb_idx].drv_mb_param, param); | 2573 | SHMEM_WR(bp, func_mb[mb_idx].drv_mb_param, param); |
2234 | SHMEM_WR(bp, func_mb[mb_idx].drv_mb_header, (command | seq)); | 2574 | SHMEM_WR(bp, func_mb[mb_idx].drv_mb_header, (command | seq)); |
2235 | 2575 | ||
2236 | DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq)); | 2576 | DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB param 0x%08x\n", |
2577 | (command | seq), param); | ||
2237 | 2578 | ||
2238 | do { | 2579 | do { |
2239 | /* let the FW do it's magic ... */ | 2580 | /* let the FW do it's magic ... */ |
@@ -2264,141 +2605,25 @@ u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param) | |||
2264 | static u8 stat_counter_valid(struct bnx2x *bp, struct bnx2x_fastpath *fp) | 2605 | static u8 stat_counter_valid(struct bnx2x *bp, struct bnx2x_fastpath *fp) |
2265 | { | 2606 | { |
2266 | #ifdef BCM_CNIC | 2607 | #ifdef BCM_CNIC |
2267 | if (IS_FCOE_FP(fp) && IS_MF(bp)) | 2608 | /* Statistics are not supported for CNIC Clients at the moment */ |
2609 | if (IS_FCOE_FP(fp)) | ||
2268 | return false; | 2610 | return false; |
2269 | #endif | 2611 | #endif |
2270 | return true; | 2612 | return true; |
2271 | } | 2613 | } |
2272 | 2614 | ||
2273 | /* must be called under rtnl_lock */ | 2615 | void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p) |
2274 | static void bnx2x_rxq_set_mac_filters(struct bnx2x *bp, u16 cl_id, u32 filters) | ||
2275 | { | 2616 | { |
2276 | u32 mask = (1 << cl_id); | 2617 | if (CHIP_IS_E1x(bp)) { |
2277 | 2618 | struct tstorm_eth_function_common_config tcfg = {0}; | |
2278 | /* initial seeting is BNX2X_ACCEPT_NONE */ | ||
2279 | u8 drop_all_ucast = 1, drop_all_bcast = 1, drop_all_mcast = 1; | ||
2280 | u8 accp_all_ucast = 0, accp_all_bcast = 0, accp_all_mcast = 0; | ||
2281 | u8 unmatched_unicast = 0; | ||
2282 | |||
2283 | if (filters & BNX2X_ACCEPT_UNMATCHED_UCAST) | ||
2284 | unmatched_unicast = 1; | ||
2285 | 2619 | ||
2286 | if (filters & BNX2X_PROMISCUOUS_MODE) { | 2620 | storm_memset_func_cfg(bp, &tcfg, p->func_id); |
2287 | /* promiscious - accept all, drop none */ | ||
2288 | drop_all_ucast = drop_all_bcast = drop_all_mcast = 0; | ||
2289 | accp_all_ucast = accp_all_bcast = accp_all_mcast = 1; | ||
2290 | if (IS_MF_SI(bp)) { | ||
2291 | /* | ||
2292 | * SI mode defines to accept in promiscuos mode | ||
2293 | * only unmatched packets | ||
2294 | */ | ||
2295 | unmatched_unicast = 1; | ||
2296 | accp_all_ucast = 0; | ||
2297 | } | ||
2298 | } | ||
2299 | if (filters & BNX2X_ACCEPT_UNICAST) { | ||
2300 | /* accept matched ucast */ | ||
2301 | drop_all_ucast = 0; | ||
2302 | } | 2621 | } |
2303 | if (filters & BNX2X_ACCEPT_MULTICAST) | ||
2304 | /* accept matched mcast */ | ||
2305 | drop_all_mcast = 0; | ||
2306 | |||
2307 | if (filters & BNX2X_ACCEPT_ALL_UNICAST) { | ||
2308 | /* accept all mcast */ | ||
2309 | drop_all_ucast = 0; | ||
2310 | accp_all_ucast = 1; | ||
2311 | } | ||
2312 | if (filters & BNX2X_ACCEPT_ALL_MULTICAST) { | ||
2313 | /* accept all mcast */ | ||
2314 | drop_all_mcast = 0; | ||
2315 | accp_all_mcast = 1; | ||
2316 | } | ||
2317 | if (filters & BNX2X_ACCEPT_BROADCAST) { | ||
2318 | /* accept (all) bcast */ | ||
2319 | drop_all_bcast = 0; | ||
2320 | accp_all_bcast = 1; | ||
2321 | } | ||
2322 | |||
2323 | bp->mac_filters.ucast_drop_all = drop_all_ucast ? | ||
2324 | bp->mac_filters.ucast_drop_all | mask : | ||
2325 | bp->mac_filters.ucast_drop_all & ~mask; | ||
2326 | |||
2327 | bp->mac_filters.mcast_drop_all = drop_all_mcast ? | ||
2328 | bp->mac_filters.mcast_drop_all | mask : | ||
2329 | bp->mac_filters.mcast_drop_all & ~mask; | ||
2330 | |||
2331 | bp->mac_filters.bcast_drop_all = drop_all_bcast ? | ||
2332 | bp->mac_filters.bcast_drop_all | mask : | ||
2333 | bp->mac_filters.bcast_drop_all & ~mask; | ||
2334 | |||
2335 | bp->mac_filters.ucast_accept_all = accp_all_ucast ? | ||
2336 | bp->mac_filters.ucast_accept_all | mask : | ||
2337 | bp->mac_filters.ucast_accept_all & ~mask; | ||
2338 | |||
2339 | bp->mac_filters.mcast_accept_all = accp_all_mcast ? | ||
2340 | bp->mac_filters.mcast_accept_all | mask : | ||
2341 | bp->mac_filters.mcast_accept_all & ~mask; | ||
2342 | |||
2343 | bp->mac_filters.bcast_accept_all = accp_all_bcast ? | ||
2344 | bp->mac_filters.bcast_accept_all | mask : | ||
2345 | bp->mac_filters.bcast_accept_all & ~mask; | ||
2346 | |||
2347 | bp->mac_filters.unmatched_unicast = unmatched_unicast ? | ||
2348 | bp->mac_filters.unmatched_unicast | mask : | ||
2349 | bp->mac_filters.unmatched_unicast & ~mask; | ||
2350 | } | ||
2351 | |||
2352 | static void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p) | ||
2353 | { | ||
2354 | struct tstorm_eth_function_common_config tcfg = {0}; | ||
2355 | u16 rss_flgs; | ||
2356 | |||
2357 | /* tpa */ | ||
2358 | if (p->func_flgs & FUNC_FLG_TPA) | ||
2359 | tcfg.config_flags |= | ||
2360 | TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA; | ||
2361 | |||
2362 | /* set rss flags */ | ||
2363 | rss_flgs = (p->rss->mode << | ||
2364 | TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE_SHIFT); | ||
2365 | |||
2366 | if (p->rss->cap & RSS_IPV4_CAP) | ||
2367 | rss_flgs |= RSS_IPV4_CAP_MASK; | ||
2368 | if (p->rss->cap & RSS_IPV4_TCP_CAP) | ||
2369 | rss_flgs |= RSS_IPV4_TCP_CAP_MASK; | ||
2370 | if (p->rss->cap & RSS_IPV6_CAP) | ||
2371 | rss_flgs |= RSS_IPV6_CAP_MASK; | ||
2372 | if (p->rss->cap & RSS_IPV6_TCP_CAP) | ||
2373 | rss_flgs |= RSS_IPV6_TCP_CAP_MASK; | ||
2374 | |||
2375 | tcfg.config_flags |= rss_flgs; | ||
2376 | tcfg.rss_result_mask = p->rss->result_mask; | ||
2377 | |||
2378 | storm_memset_func_cfg(bp, &tcfg, p->func_id); | ||
2379 | 2622 | ||
2380 | /* Enable the function in the FW */ | 2623 | /* Enable the function in the FW */ |
2381 | storm_memset_vf_to_pf(bp, p->func_id, p->pf_id); | 2624 | storm_memset_vf_to_pf(bp, p->func_id, p->pf_id); |
2382 | storm_memset_func_en(bp, p->func_id, 1); | 2625 | storm_memset_func_en(bp, p->func_id, 1); |
2383 | 2626 | ||
2384 | /* statistics */ | ||
2385 | if (p->func_flgs & FUNC_FLG_STATS) { | ||
2386 | struct stats_indication_flags stats_flags = {0}; | ||
2387 | stats_flags.collect_eth = 1; | ||
2388 | |||
2389 | storm_memset_xstats_flags(bp, &stats_flags, p->func_id); | ||
2390 | storm_memset_xstats_addr(bp, p->fw_stat_map, p->func_id); | ||
2391 | |||
2392 | storm_memset_tstats_flags(bp, &stats_flags, p->func_id); | ||
2393 | storm_memset_tstats_addr(bp, p->fw_stat_map, p->func_id); | ||
2394 | |||
2395 | storm_memset_ustats_flags(bp, &stats_flags, p->func_id); | ||
2396 | storm_memset_ustats_addr(bp, p->fw_stat_map, p->func_id); | ||
2397 | |||
2398 | storm_memset_cstats_flags(bp, &stats_flags, p->func_id); | ||
2399 | storm_memset_cstats_addr(bp, p->fw_stat_map, p->func_id); | ||
2400 | } | ||
2401 | |||
2402 | /* spq */ | 2627 | /* spq */ |
2403 | if (p->func_flgs & FUNC_FLG_SPQ) { | 2628 | if (p->func_flgs & FUNC_FLG_SPQ) { |
2404 | storm_memset_spq_addr(bp, p->spq_map, p->func_id); | 2629 | storm_memset_spq_addr(bp, p->spq_map, p->func_id); |
@@ -2407,39 +2632,62 @@ static void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p) | |||
2407 | } | 2632 | } |
2408 | } | 2633 | } |
2409 | 2634 | ||
2410 | static inline u16 bnx2x_get_cl_flags(struct bnx2x *bp, | 2635 | static inline unsigned long bnx2x_get_q_flags(struct bnx2x *bp, |
2411 | struct bnx2x_fastpath *fp) | 2636 | struct bnx2x_fastpath *fp, |
2637 | bool leading) | ||
2412 | { | 2638 | { |
2413 | u16 flags = 0; | 2639 | unsigned long flags = 0; |
2414 | 2640 | ||
2415 | /* calculate queue flags */ | 2641 | /* PF driver will always initialize the Queue to an ACTIVE state */ |
2416 | flags |= QUEUE_FLG_CACHE_ALIGN; | 2642 | __set_bit(BNX2X_Q_FLG_ACTIVE, &flags); |
2417 | flags |= QUEUE_FLG_HC; | ||
2418 | flags |= IS_MF_SD(bp) ? QUEUE_FLG_OV : 0; | ||
2419 | 2643 | ||
2420 | flags |= QUEUE_FLG_VLAN; | 2644 | /* calculate other queue flags */ |
2421 | DP(NETIF_MSG_IFUP, "vlan removal enabled\n"); | 2645 | if (IS_MF_SD(bp)) |
2646 | __set_bit(BNX2X_Q_FLG_OV, &flags); | ||
2647 | |||
2648 | if (IS_FCOE_FP(fp)) | ||
2649 | __set_bit(BNX2X_Q_FLG_FCOE, &flags); | ||
2422 | 2650 | ||
2423 | if (!fp->disable_tpa) | 2651 | if (!fp->disable_tpa) |
2424 | flags |= QUEUE_FLG_TPA; | 2652 | __set_bit(BNX2X_Q_FLG_TPA, &flags); |
2425 | 2653 | ||
2426 | flags = stat_counter_valid(bp, fp) ? | 2654 | if (stat_counter_valid(bp, fp)) { |
2427 | (flags | QUEUE_FLG_STATS) : (flags & ~QUEUE_FLG_STATS); | 2655 | __set_bit(BNX2X_Q_FLG_STATS, &flags); |
2656 | __set_bit(BNX2X_Q_FLG_ZERO_STATS, &flags); | ||
2657 | } | ||
2658 | |||
2659 | if (leading) { | ||
2660 | __set_bit(BNX2X_Q_FLG_LEADING_RSS, &flags); | ||
2661 | __set_bit(BNX2X_Q_FLG_MCAST, &flags); | ||
2662 | } | ||
2663 | |||
2664 | /* Always set HW VLAN stripping */ | ||
2665 | __set_bit(BNX2X_Q_FLG_VLAN, &flags); | ||
2428 | 2666 | ||
2429 | return flags; | 2667 | return flags; |
2430 | } | 2668 | } |
2431 | 2669 | ||
2432 | static void bnx2x_pf_rx_cl_prep(struct bnx2x *bp, | 2670 | static void bnx2x_pf_q_prep_general(struct bnx2x *bp, |
2671 | struct bnx2x_fastpath *fp, struct bnx2x_general_setup_params *gen_init) | ||
2672 | { | ||
2673 | gen_init->stat_id = bnx2x_stats_id(fp); | ||
2674 | gen_init->spcl_id = fp->cl_id; | ||
2675 | |||
2676 | /* Always use mini-jumbo MTU for FCoE L2 ring */ | ||
2677 | if (IS_FCOE_FP(fp)) | ||
2678 | gen_init->mtu = BNX2X_FCOE_MINI_JUMBO_MTU; | ||
2679 | else | ||
2680 | gen_init->mtu = bp->dev->mtu; | ||
2681 | } | ||
2682 | |||
2683 | static void bnx2x_pf_rx_q_prep(struct bnx2x *bp, | ||
2433 | struct bnx2x_fastpath *fp, struct rxq_pause_params *pause, | 2684 | struct bnx2x_fastpath *fp, struct rxq_pause_params *pause, |
2434 | struct bnx2x_rxq_init_params *rxq_init) | 2685 | struct bnx2x_rxq_setup_params *rxq_init) |
2435 | { | 2686 | { |
2436 | u16 max_sge = 0; | 2687 | u8 max_sge = 0; |
2437 | u16 sge_sz = 0; | 2688 | u16 sge_sz = 0; |
2438 | u16 tpa_agg_size = 0; | 2689 | u16 tpa_agg_size = 0; |
2439 | 2690 | ||
2440 | /* calculate queue flags */ | ||
2441 | u16 flags = bnx2x_get_cl_flags(bp, fp); | ||
2442 | |||
2443 | if (!fp->disable_tpa) { | 2691 | if (!fp->disable_tpa) { |
2444 | pause->sge_th_hi = 250; | 2692 | pause->sge_th_hi = 250; |
2445 | pause->sge_th_lo = 150; | 2693 | pause->sge_th_lo = 150; |
@@ -2460,33 +2708,37 @@ static void bnx2x_pf_rx_cl_prep(struct bnx2x *bp, | |||
2460 | pause->bd_th_lo = 250; | 2708 | pause->bd_th_lo = 250; |
2461 | pause->rcq_th_hi = 350; | 2709 | pause->rcq_th_hi = 350; |
2462 | pause->rcq_th_lo = 250; | 2710 | pause->rcq_th_lo = 250; |
2463 | pause->sge_th_hi = 0; | 2711 | |
2464 | pause->sge_th_lo = 0; | ||
2465 | pause->pri_map = 1; | 2712 | pause->pri_map = 1; |
2466 | } | 2713 | } |
2467 | 2714 | ||
2468 | /* rxq setup */ | 2715 | /* rxq setup */ |
2469 | rxq_init->flags = flags; | ||
2470 | rxq_init->cxt = &bp->context.vcxt[fp->cid].eth; | ||
2471 | rxq_init->dscr_map = fp->rx_desc_mapping; | 2716 | rxq_init->dscr_map = fp->rx_desc_mapping; |
2472 | rxq_init->sge_map = fp->rx_sge_mapping; | 2717 | rxq_init->sge_map = fp->rx_sge_mapping; |
2473 | rxq_init->rcq_map = fp->rx_comp_mapping; | 2718 | rxq_init->rcq_map = fp->rx_comp_mapping; |
2474 | rxq_init->rcq_np_map = fp->rx_comp_mapping + BCM_PAGE_SIZE; | 2719 | rxq_init->rcq_np_map = fp->rx_comp_mapping + BCM_PAGE_SIZE; |
2475 | 2720 | ||
2476 | /* Always use mini-jumbo MTU for FCoE L2 ring */ | 2721 | /* This should be a maximum number of data bytes that may be |
2477 | if (IS_FCOE_FP(fp)) | 2722 | * placed on the BD (not including paddings). |
2478 | rxq_init->mtu = BNX2X_FCOE_MINI_JUMBO_MTU; | 2723 | */ |
2479 | else | 2724 | rxq_init->buf_sz = fp->rx_buf_size - BNX2X_FW_RX_ALIGN - |
2480 | rxq_init->mtu = bp->dev->mtu; | 2725 | IP_HEADER_ALIGNMENT_PADDING; |
2481 | 2726 | ||
2482 | rxq_init->buf_sz = fp->rx_buf_size; | ||
2483 | rxq_init->cl_qzone_id = fp->cl_qzone_id; | 2727 | rxq_init->cl_qzone_id = fp->cl_qzone_id; |
2484 | rxq_init->cl_id = fp->cl_id; | ||
2485 | rxq_init->spcl_id = fp->cl_id; | ||
2486 | rxq_init->stat_id = fp->cl_id; | ||
2487 | rxq_init->tpa_agg_sz = tpa_agg_size; | 2728 | rxq_init->tpa_agg_sz = tpa_agg_size; |
2488 | rxq_init->sge_buf_sz = sge_sz; | 2729 | rxq_init->sge_buf_sz = sge_sz; |
2489 | rxq_init->max_sges_pkt = max_sge; | 2730 | rxq_init->max_sges_pkt = max_sge; |
2731 | rxq_init->rss_engine_id = BP_FUNC(bp); | ||
2732 | |||
2733 | /* Maximum number or simultaneous TPA aggregation for this Queue. | ||
2734 | * | ||
2735 | * For PF Clients it should be the maximum avaliable number. | ||
2736 | * VF driver(s) may want to define it to a smaller value. | ||
2737 | */ | ||
2738 | rxq_init->max_tpa_queues = | ||
2739 | (CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 : | ||
2740 | ETH_MAX_AGGREGATION_QUEUES_E1H_E2); | ||
2741 | |||
2490 | rxq_init->cache_line_log = BNX2X_RX_ALIGN_SHIFT; | 2742 | rxq_init->cache_line_log = BNX2X_RX_ALIGN_SHIFT; |
2491 | rxq_init->fw_sb_id = fp->fw_sb_id; | 2743 | rxq_init->fw_sb_id = fp->fw_sb_id; |
2492 | 2744 | ||
@@ -2494,46 +2746,35 @@ static void bnx2x_pf_rx_cl_prep(struct bnx2x *bp, | |||
2494 | rxq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_RX_CQ_CONS; | 2746 | rxq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_RX_CQ_CONS; |
2495 | else | 2747 | else |
2496 | rxq_init->sb_cq_index = U_SB_ETH_RX_CQ_INDEX; | 2748 | rxq_init->sb_cq_index = U_SB_ETH_RX_CQ_INDEX; |
2497 | |||
2498 | rxq_init->cid = HW_CID(bp, fp->cid); | ||
2499 | |||
2500 | rxq_init->hc_rate = bp->rx_ticks ? (1000000 / bp->rx_ticks) : 0; | ||
2501 | } | 2749 | } |
2502 | 2750 | ||
2503 | static void bnx2x_pf_tx_cl_prep(struct bnx2x *bp, | 2751 | static void bnx2x_pf_tx_q_prep(struct bnx2x *bp, |
2504 | struct bnx2x_fastpath *fp, struct bnx2x_txq_init_params *txq_init) | 2752 | struct bnx2x_fastpath *fp, struct bnx2x_txq_setup_params *txq_init) |
2505 | { | 2753 | { |
2506 | u16 flags = bnx2x_get_cl_flags(bp, fp); | ||
2507 | |||
2508 | txq_init->flags = flags; | ||
2509 | txq_init->cxt = &bp->context.vcxt[fp->cid].eth; | ||
2510 | txq_init->dscr_map = fp->tx_desc_mapping; | 2754 | txq_init->dscr_map = fp->tx_desc_mapping; |
2511 | txq_init->stat_id = fp->cl_id; | ||
2512 | txq_init->cid = HW_CID(bp, fp->cid); | ||
2513 | txq_init->sb_cq_index = C_SB_ETH_TX_CQ_INDEX; | 2755 | txq_init->sb_cq_index = C_SB_ETH_TX_CQ_INDEX; |
2514 | txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW; | 2756 | txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW; |
2515 | txq_init->fw_sb_id = fp->fw_sb_id; | 2757 | txq_init->fw_sb_id = fp->fw_sb_id; |
2516 | 2758 | ||
2759 | /* | ||
2760 | * set the tss leading client id for TX classfication == | ||
2761 | * leading RSS client id | ||
2762 | */ | ||
2763 | txq_init->tss_leading_cl_id = bnx2x_fp(bp, 0, cl_id); | ||
2764 | |||
2517 | if (IS_FCOE_FP(fp)) { | 2765 | if (IS_FCOE_FP(fp)) { |
2518 | txq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_TX_CQ_CONS; | 2766 | txq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_TX_CQ_CONS; |
2519 | txq_init->traffic_type = LLFC_TRAFFIC_TYPE_FCOE; | 2767 | txq_init->traffic_type = LLFC_TRAFFIC_TYPE_FCOE; |
2520 | } | 2768 | } |
2521 | |||
2522 | txq_init->hc_rate = bp->tx_ticks ? (1000000 / bp->tx_ticks) : 0; | ||
2523 | } | 2769 | } |
2524 | 2770 | ||
2525 | static void bnx2x_pf_init(struct bnx2x *bp) | 2771 | static void bnx2x_pf_init(struct bnx2x *bp) |
2526 | { | 2772 | { |
2527 | struct bnx2x_func_init_params func_init = {0}; | 2773 | struct bnx2x_func_init_params func_init = {0}; |
2528 | struct bnx2x_rss_params rss = {0}; | ||
2529 | struct event_ring_data eq_data = { {0} }; | 2774 | struct event_ring_data eq_data = { {0} }; |
2530 | u16 flags; | 2775 | u16 flags; |
2531 | 2776 | ||
2532 | /* pf specific setups */ | 2777 | if (!CHIP_IS_E1x(bp)) { |
2533 | if (!CHIP_IS_E1(bp)) | ||
2534 | storm_memset_ov(bp, bp->mf_ov, BP_FUNC(bp)); | ||
2535 | |||
2536 | if (CHIP_IS_E2(bp)) { | ||
2537 | /* reset IGU PF statistics: MSIX + ATTN */ | 2778 | /* reset IGU PF statistics: MSIX + ATTN */ |
2538 | /* PF */ | 2779 | /* PF */ |
2539 | REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT + | 2780 | REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT + |
@@ -2551,27 +2792,14 @@ static void bnx2x_pf_init(struct bnx2x *bp) | |||
2551 | /* function setup flags */ | 2792 | /* function setup flags */ |
2552 | flags = (FUNC_FLG_STATS | FUNC_FLG_LEADING | FUNC_FLG_SPQ); | 2793 | flags = (FUNC_FLG_STATS | FUNC_FLG_LEADING | FUNC_FLG_SPQ); |
2553 | 2794 | ||
2554 | if (CHIP_IS_E1x(bp)) | 2795 | /* This flag is relevant for E1x only. |
2555 | flags |= (bp->flags & TPA_ENABLE_FLAG) ? FUNC_FLG_TPA : 0; | 2796 | * E2 doesn't have a TPA configuration in a function level. |
2556 | else | ||
2557 | flags |= FUNC_FLG_TPA; | ||
2558 | |||
2559 | /* function setup */ | ||
2560 | |||
2561 | /** | ||
2562 | * Although RSS is meaningless when there is a single HW queue we | ||
2563 | * still need it enabled in order to have HW Rx hash generated. | ||
2564 | */ | 2797 | */ |
2565 | rss.cap = (RSS_IPV4_CAP | RSS_IPV4_TCP_CAP | | 2798 | flags |= (bp->flags & TPA_ENABLE_FLAG) ? FUNC_FLG_TPA : 0; |
2566 | RSS_IPV6_CAP | RSS_IPV6_TCP_CAP); | ||
2567 | rss.mode = bp->multi_mode; | ||
2568 | rss.result_mask = MULTI_MASK; | ||
2569 | func_init.rss = &rss; | ||
2570 | 2799 | ||
2571 | func_init.func_flgs = flags; | 2800 | func_init.func_flgs = flags; |
2572 | func_init.pf_id = BP_FUNC(bp); | 2801 | func_init.pf_id = BP_FUNC(bp); |
2573 | func_init.func_id = BP_FUNC(bp); | 2802 | func_init.func_id = BP_FUNC(bp); |
2574 | func_init.fw_stat_map = bnx2x_sp_mapping(bp, fw_stats); | ||
2575 | func_init.spq_map = bp->spq_mapping; | 2803 | func_init.spq_map = bp->spq_mapping; |
2576 | func_init.spq_prod = bp->spq_prod_idx; | 2804 | func_init.spq_prod = bp->spq_prod_idx; |
2577 | 2805 | ||
@@ -2580,11 +2808,11 @@ static void bnx2x_pf_init(struct bnx2x *bp) | |||
2580 | memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port)); | 2808 | memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port)); |
2581 | 2809 | ||
2582 | /* | 2810 | /* |
2583 | Congestion management values depend on the link rate | 2811 | * Congestion management values depend on the link rate |
2584 | There is no active link so initial link rate is set to 10 Gbps. | 2812 | * There is no active link so initial link rate is set to 10 Gbps. |
2585 | When the link comes up The congestion management values are | 2813 | * When the link comes up The congestion management values are |
2586 | re-calculated according to the actual link rate. | 2814 | * re-calculated according to the actual link rate. |
2587 | */ | 2815 | */ |
2588 | bp->link_vars.line_speed = SPEED_10000; | 2816 | bp->link_vars.line_speed = SPEED_10000; |
2589 | bnx2x_cmng_fns_init(bp, true, bnx2x_get_cmng_fns_mode(bp)); | 2817 | bnx2x_cmng_fns_init(bp, true, bnx2x_get_cmng_fns_mode(bp)); |
2590 | 2818 | ||
@@ -2592,10 +2820,6 @@ static void bnx2x_pf_init(struct bnx2x *bp) | |||
2592 | if (bp->port.pmf) | 2820 | if (bp->port.pmf) |
2593 | storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp)); | 2821 | storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp)); |
2594 | 2822 | ||
2595 | /* no rx until link is up */ | ||
2596 | bp->rx_mode = BNX2X_RX_MODE_NONE; | ||
2597 | bnx2x_set_storm_rx_mode(bp); | ||
2598 | |||
2599 | /* init Event Queue */ | 2823 | /* init Event Queue */ |
2600 | eq_data.base_addr.hi = U64_HI(bp->eq_mapping); | 2824 | eq_data.base_addr.hi = U64_HI(bp->eq_mapping); |
2601 | eq_data.base_addr.lo = U64_LO(bp->eq_mapping); | 2825 | eq_data.base_addr.lo = U64_LO(bp->eq_mapping); |
@@ -2610,11 +2834,9 @@ static void bnx2x_e1h_disable(struct bnx2x *bp) | |||
2610 | { | 2834 | { |
2611 | int port = BP_PORT(bp); | 2835 | int port = BP_PORT(bp); |
2612 | 2836 | ||
2613 | netif_tx_disable(bp->dev); | 2837 | bnx2x_tx_disable(bp); |
2614 | 2838 | ||
2615 | REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0); | 2839 | REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0); |
2616 | |||
2617 | netif_carrier_off(bp->dev); | ||
2618 | } | 2840 | } |
2619 | 2841 | ||
2620 | static void bnx2x_e1h_enable(struct bnx2x *bp) | 2842 | static void bnx2x_e1h_enable(struct bnx2x *bp) |
@@ -2717,12 +2939,47 @@ static inline void bnx2x_sp_prod_update(struct bnx2x *bp) | |||
2717 | mmiowb(); | 2939 | mmiowb(); |
2718 | } | 2940 | } |
2719 | 2941 | ||
2720 | /* the slow path queue is odd since completions arrive on the fastpath ring */ | 2942 | /** |
2943 | * bnx2x_is_contextless_ramrod - check if the current command ends on EQ | ||
2944 | * | ||
2945 | * @cmd: command to check | ||
2946 | * @cmd_type: command type | ||
2947 | */ | ||
2948 | static inline bool bnx2x_is_contextless_ramrod(int cmd, int cmd_type) | ||
2949 | { | ||
2950 | if ((cmd_type == NONE_CONNECTION_TYPE) || | ||
2951 | (cmd == RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES) || | ||
2952 | (cmd == RAMROD_CMD_ID_ETH_FILTER_RULES) || | ||
2953 | (cmd == RAMROD_CMD_ID_ETH_MULTICAST_RULES) || | ||
2954 | (cmd == RAMROD_CMD_ID_ETH_SET_MAC) || | ||
2955 | (cmd == RAMROD_CMD_ID_ETH_RSS_UPDATE)) | ||
2956 | return true; | ||
2957 | else | ||
2958 | return false; | ||
2959 | |||
2960 | } | ||
2961 | |||
2962 | |||
2963 | /** | ||
2964 | * bnx2x_sp_post - place a single command on an SP ring | ||
2965 | * | ||
2966 | * @bp: driver handle | ||
2967 | * @command: command to place (e.g. SETUP, FILTER_RULES, etc.) | ||
2968 | * @cid: SW CID the command is related to | ||
2969 | * @data_hi: command private data address (high 32 bits) | ||
2970 | * @data_lo: command private data address (low 32 bits) | ||
2971 | * @cmd_type: command type (e.g. NONE, ETH) | ||
2972 | * | ||
2973 | * SP data is handled as if it's always an address pair, thus data fields are | ||
2974 | * not swapped to little endian in upper functions. Instead this function swaps | ||
2975 | * data as if it's two u32 fields. | ||
2976 | */ | ||
2721 | int bnx2x_sp_post(struct bnx2x *bp, int command, int cid, | 2977 | int bnx2x_sp_post(struct bnx2x *bp, int command, int cid, |
2722 | u32 data_hi, u32 data_lo, int common) | 2978 | u32 data_hi, u32 data_lo, int cmd_type) |
2723 | { | 2979 | { |
2724 | struct eth_spe *spe; | 2980 | struct eth_spe *spe; |
2725 | u16 type; | 2981 | u16 type; |
2982 | bool common = bnx2x_is_contextless_ramrod(command, cmd_type); | ||
2726 | 2983 | ||
2727 | #ifdef BNX2X_STOP_ON_ERROR | 2984 | #ifdef BNX2X_STOP_ON_ERROR |
2728 | if (unlikely(bp->panic)) | 2985 | if (unlikely(bp->panic)) |
@@ -2752,17 +3009,7 @@ int bnx2x_sp_post(struct bnx2x *bp, int command, int cid, | |||
2752 | cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) | | 3009 | cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) | |
2753 | HW_CID(bp, cid)); | 3010 | HW_CID(bp, cid)); |
2754 | 3011 | ||
2755 | if (common) | 3012 | type = (cmd_type << SPE_HDR_CONN_TYPE_SHIFT) & SPE_HDR_CONN_TYPE; |
2756 | /* Common ramrods: | ||
2757 | * FUNC_START, FUNC_STOP, CFC_DEL, STATS, SET_MAC | ||
2758 | * TRAFFIC_STOP, TRAFFIC_START | ||
2759 | */ | ||
2760 | type = (NONE_CONNECTION_TYPE << SPE_HDR_CONN_TYPE_SHIFT) | ||
2761 | & SPE_HDR_CONN_TYPE; | ||
2762 | else | ||
2763 | /* ETH ramrods: SETUP, HALT */ | ||
2764 | type = (ETH_CONNECTION_TYPE << SPE_HDR_CONN_TYPE_SHIFT) | ||
2765 | & SPE_HDR_CONN_TYPE; | ||
2766 | 3013 | ||
2767 | type |= ((BP_FUNC(bp) << SPE_HDR_FUNCTION_ID_SHIFT) & | 3014 | type |= ((BP_FUNC(bp) << SPE_HDR_FUNCTION_ID_SHIFT) & |
2768 | SPE_HDR_FUNCTION_ID); | 3015 | SPE_HDR_FUNCTION_ID); |
@@ -2774,7 +3021,8 @@ int bnx2x_sp_post(struct bnx2x *bp, int command, int cid, | |||
2774 | 3021 | ||
2775 | /* stats ramrod has it's own slot on the spq */ | 3022 | /* stats ramrod has it's own slot on the spq */ |
2776 | if (command != RAMROD_CMD_ID_COMMON_STAT_QUERY) { | 3023 | if (command != RAMROD_CMD_ID_COMMON_STAT_QUERY) { |
2777 | /* It's ok if the actual decrement is issued towards the memory | 3024 | /* |
3025 | * It's ok if the actual decrement is issued towards the memory | ||
2778 | * somewhere between the spin_lock and spin_unlock. Thus no | 3026 | * somewhere between the spin_lock and spin_unlock. Thus no |
2779 | * more explict memory barrier is needed. | 3027 | * more explict memory barrier is needed. |
2780 | */ | 3028 | */ |
@@ -2893,9 +3141,15 @@ static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted) | |||
2893 | 3141 | ||
2894 | /* save nig interrupt mask */ | 3142 | /* save nig interrupt mask */ |
2895 | nig_mask = REG_RD(bp, nig_int_mask_addr); | 3143 | nig_mask = REG_RD(bp, nig_int_mask_addr); |
2896 | REG_WR(bp, nig_int_mask_addr, 0); | ||
2897 | 3144 | ||
2898 | bnx2x_link_attn(bp); | 3145 | /* If nig_mask is not set, no need to call the update |
3146 | * function. | ||
3147 | */ | ||
3148 | if (nig_mask) { | ||
3149 | REG_WR(bp, nig_int_mask_addr, 0); | ||
3150 | |||
3151 | bnx2x_link_attn(bp); | ||
3152 | } | ||
2899 | 3153 | ||
2900 | /* handle unicore attn? */ | 3154 | /* handle unicore attn? */ |
2901 | } | 3155 | } |
@@ -3000,8 +3254,7 @@ static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn) | |||
3000 | bnx2x_fan_failure(bp); | 3254 | bnx2x_fan_failure(bp); |
3001 | } | 3255 | } |
3002 | 3256 | ||
3003 | if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 | | 3257 | if ((attn & bp->link_vars.aeu_int_mask) && bp->port.pmf) { |
3004 | AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) { | ||
3005 | bnx2x_acquire_phy_lock(bp); | 3258 | bnx2x_acquire_phy_lock(bp); |
3006 | bnx2x_handle_module_detect_int(&bp->link_params); | 3259 | bnx2x_handle_module_detect_int(&bp->link_params); |
3007 | bnx2x_release_phy_lock(bp); | 3260 | bnx2x_release_phy_lock(bp); |
@@ -3064,13 +3317,13 @@ static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn) | |||
3064 | } | 3317 | } |
3065 | 3318 | ||
3066 | if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) { | 3319 | if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) { |
3067 | |||
3068 | val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0); | 3320 | val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0); |
3069 | BNX2X_ERR("PXP hw attention 0x%x\n", val); | 3321 | BNX2X_ERR("PXP hw attention-0 0x%x\n", val); |
3070 | /* RQ_USDMDP_FIFO_OVERFLOW */ | 3322 | /* RQ_USDMDP_FIFO_OVERFLOW */ |
3071 | if (val & 0x18000) | 3323 | if (val & 0x18000) |
3072 | BNX2X_ERR("FATAL error from PXP\n"); | 3324 | BNX2X_ERR("FATAL error from PXP\n"); |
3073 | if (CHIP_IS_E2(bp)) { | 3325 | |
3326 | if (!CHIP_IS_E1x(bp)) { | ||
3074 | val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_1); | 3327 | val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_1); |
3075 | BNX2X_ERR("PXP hw attention-1 0x%x\n", val); | 3328 | BNX2X_ERR("PXP hw attention-1 0x%x\n", val); |
3076 | } | 3329 | } |
@@ -3118,17 +3371,27 @@ static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn) | |||
3118 | if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF)) | 3371 | if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF)) |
3119 | bnx2x_pmf_update(bp); | 3372 | bnx2x_pmf_update(bp); |
3120 | 3373 | ||
3121 | /* Always call it here: bnx2x_link_report() will | ||
3122 | * prevent the link indication duplication. | ||
3123 | */ | ||
3124 | bnx2x__link_status_update(bp); | ||
3125 | |||
3126 | if (bp->port.pmf && | 3374 | if (bp->port.pmf && |
3127 | (val & DRV_STATUS_DCBX_NEGOTIATION_RESULTS) && | 3375 | (val & DRV_STATUS_DCBX_NEGOTIATION_RESULTS) && |
3128 | bp->dcbx_enabled > 0) | 3376 | bp->dcbx_enabled > 0) |
3129 | /* start dcbx state machine */ | 3377 | /* start dcbx state machine */ |
3130 | bnx2x_dcbx_set_params(bp, | 3378 | bnx2x_dcbx_set_params(bp, |
3131 | BNX2X_DCBX_STATE_NEG_RECEIVED); | 3379 | BNX2X_DCBX_STATE_NEG_RECEIVED); |
3380 | if (bp->link_vars.periodic_flags & | ||
3381 | PERIODIC_FLAGS_LINK_EVENT) { | ||
3382 | /* sync with link */ | ||
3383 | bnx2x_acquire_phy_lock(bp); | ||
3384 | bp->link_vars.periodic_flags &= | ||
3385 | ~PERIODIC_FLAGS_LINK_EVENT; | ||
3386 | bnx2x_release_phy_lock(bp); | ||
3387 | if (IS_MF(bp)) | ||
3388 | bnx2x_link_sync_notify(bp); | ||
3389 | bnx2x_link_report(bp); | ||
3390 | } | ||
3391 | /* Always call it here: bnx2x_link_report() will | ||
3392 | * prevent the link indication duplication. | ||
3393 | */ | ||
3394 | bnx2x__link_status_update(bp); | ||
3132 | } else if (attn & BNX2X_MC_ASSERT_BITS) { | 3395 | } else if (attn & BNX2X_MC_ASSERT_BITS) { |
3133 | 3396 | ||
3134 | BNX2X_ERR("MC assert!\n"); | 3397 | BNX2X_ERR("MC assert!\n"); |
@@ -3164,72 +3427,185 @@ static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn) | |||
3164 | } | 3427 | } |
3165 | } | 3428 | } |
3166 | 3429 | ||
3167 | #define BNX2X_MISC_GEN_REG MISC_REG_GENERIC_POR_1 | 3430 | /* |
3168 | #define LOAD_COUNTER_BITS 16 /* Number of bits for load counter */ | 3431 | * Bits map: |
3169 | #define LOAD_COUNTER_MASK (((u32)0x1 << LOAD_COUNTER_BITS) - 1) | 3432 | * 0-7 - Engine0 load counter. |
3170 | #define RESET_DONE_FLAG_MASK (~LOAD_COUNTER_MASK) | 3433 | * 8-15 - Engine1 load counter. |
3171 | #define RESET_DONE_FLAG_SHIFT LOAD_COUNTER_BITS | 3434 | * 16 - Engine0 RESET_IN_PROGRESS bit. |
3435 | * 17 - Engine1 RESET_IN_PROGRESS bit. | ||
3436 | * 18 - Engine0 ONE_IS_LOADED. Set when there is at least one active function | ||
3437 | * on the engine | ||
3438 | * 19 - Engine1 ONE_IS_LOADED. | ||
3439 | * 20 - Chip reset flow bit. When set none-leader must wait for both engines | ||
3440 | * leader to complete (check for both RESET_IN_PROGRESS bits and not for | ||
3441 | * just the one belonging to its engine). | ||
3442 | * | ||
3443 | */ | ||
3444 | #define BNX2X_RECOVERY_GLOB_REG MISC_REG_GENERIC_POR_1 | ||
3445 | |||
3446 | #define BNX2X_PATH0_LOAD_CNT_MASK 0x000000ff | ||
3447 | #define BNX2X_PATH0_LOAD_CNT_SHIFT 0 | ||
3448 | #define BNX2X_PATH1_LOAD_CNT_MASK 0x0000ff00 | ||
3449 | #define BNX2X_PATH1_LOAD_CNT_SHIFT 8 | ||
3450 | #define BNX2X_PATH0_RST_IN_PROG_BIT 0x00010000 | ||
3451 | #define BNX2X_PATH1_RST_IN_PROG_BIT 0x00020000 | ||
3452 | #define BNX2X_GLOBAL_RESET_BIT 0x00040000 | ||
3172 | 3453 | ||
3173 | /* | 3454 | /* |
3455 | * Set the GLOBAL_RESET bit. | ||
3456 | * | ||
3457 | * Should be run under rtnl lock | ||
3458 | */ | ||
3459 | void bnx2x_set_reset_global(struct bnx2x *bp) | ||
3460 | { | ||
3461 | u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); | ||
3462 | |||
3463 | REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val | BNX2X_GLOBAL_RESET_BIT); | ||
3464 | barrier(); | ||
3465 | mmiowb(); | ||
3466 | } | ||
3467 | |||
3468 | /* | ||
3469 | * Clear the GLOBAL_RESET bit. | ||
3470 | * | ||
3471 | * Should be run under rtnl lock | ||
3472 | */ | ||
3473 | static inline void bnx2x_clear_reset_global(struct bnx2x *bp) | ||
3474 | { | ||
3475 | u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); | ||
3476 | |||
3477 | REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val & (~BNX2X_GLOBAL_RESET_BIT)); | ||
3478 | barrier(); | ||
3479 | mmiowb(); | ||
3480 | } | ||
3481 | |||
3482 | /* | ||
3483 | * Checks the GLOBAL_RESET bit. | ||
3484 | * | ||
3174 | * should be run under rtnl lock | 3485 | * should be run under rtnl lock |
3175 | */ | 3486 | */ |
3487 | static inline bool bnx2x_reset_is_global(struct bnx2x *bp) | ||
3488 | { | ||
3489 | u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); | ||
3490 | |||
3491 | DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val); | ||
3492 | return (val & BNX2X_GLOBAL_RESET_BIT) ? true : false; | ||
3493 | } | ||
3494 | |||
3495 | /* | ||
3496 | * Clear RESET_IN_PROGRESS bit for the current engine. | ||
3497 | * | ||
3498 | * Should be run under rtnl lock | ||
3499 | */ | ||
3176 | static inline void bnx2x_set_reset_done(struct bnx2x *bp) | 3500 | static inline void bnx2x_set_reset_done(struct bnx2x *bp) |
3177 | { | 3501 | { |
3178 | u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG); | 3502 | u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); |
3179 | val &= ~(1 << RESET_DONE_FLAG_SHIFT); | 3503 | u32 bit = BP_PATH(bp) ? |
3180 | REG_WR(bp, BNX2X_MISC_GEN_REG, val); | 3504 | BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT; |
3505 | |||
3506 | /* Clear the bit */ | ||
3507 | val &= ~bit; | ||
3508 | REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val); | ||
3181 | barrier(); | 3509 | barrier(); |
3182 | mmiowb(); | 3510 | mmiowb(); |
3183 | } | 3511 | } |
3184 | 3512 | ||
3185 | /* | 3513 | /* |
3514 | * Set RESET_IN_PROGRESS for the current engine. | ||
3515 | * | ||
3186 | * should be run under rtnl lock | 3516 | * should be run under rtnl lock |
3187 | */ | 3517 | */ |
3188 | static inline void bnx2x_set_reset_in_progress(struct bnx2x *bp) | 3518 | void bnx2x_set_reset_in_progress(struct bnx2x *bp) |
3189 | { | 3519 | { |
3190 | u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG); | 3520 | u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); |
3191 | val |= (1 << 16); | 3521 | u32 bit = BP_PATH(bp) ? |
3192 | REG_WR(bp, BNX2X_MISC_GEN_REG, val); | 3522 | BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT; |
3523 | |||
3524 | /* Set the bit */ | ||
3525 | val |= bit; | ||
3526 | REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val); | ||
3193 | barrier(); | 3527 | barrier(); |
3194 | mmiowb(); | 3528 | mmiowb(); |
3195 | } | 3529 | } |
3196 | 3530 | ||
3197 | /* | 3531 | /* |
3532 | * Checks the RESET_IN_PROGRESS bit for the given engine. | ||
3198 | * should be run under rtnl lock | 3533 | * should be run under rtnl lock |
3199 | */ | 3534 | */ |
3200 | bool bnx2x_reset_is_done(struct bnx2x *bp) | 3535 | bool bnx2x_reset_is_done(struct bnx2x *bp, int engine) |
3201 | { | 3536 | { |
3202 | u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG); | 3537 | u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); |
3203 | DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val); | 3538 | u32 bit = engine ? |
3204 | return (val & RESET_DONE_FLAG_MASK) ? false : true; | 3539 | BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT; |
3540 | |||
3541 | /* return false if bit is set */ | ||
3542 | return (val & bit) ? false : true; | ||
3205 | } | 3543 | } |
3206 | 3544 | ||
3207 | /* | 3545 | /* |
3546 | * Increment the load counter for the current engine. | ||
3547 | * | ||
3208 | * should be run under rtnl lock | 3548 | * should be run under rtnl lock |
3209 | */ | 3549 | */ |
3210 | inline void bnx2x_inc_load_cnt(struct bnx2x *bp) | 3550 | void bnx2x_inc_load_cnt(struct bnx2x *bp) |
3211 | { | 3551 | { |
3212 | u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG); | 3552 | u32 val1, val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); |
3553 | u32 mask = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK : | ||
3554 | BNX2X_PATH0_LOAD_CNT_MASK; | ||
3555 | u32 shift = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_SHIFT : | ||
3556 | BNX2X_PATH0_LOAD_CNT_SHIFT; | ||
3213 | 3557 | ||
3214 | DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val); | 3558 | DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val); |
3215 | 3559 | ||
3216 | val1 = ((val & LOAD_COUNTER_MASK) + 1) & LOAD_COUNTER_MASK; | 3560 | /* get the current counter value */ |
3217 | REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1); | 3561 | val1 = (val & mask) >> shift; |
3562 | |||
3563 | /* increment... */ | ||
3564 | val1++; | ||
3565 | |||
3566 | /* clear the old value */ | ||
3567 | val &= ~mask; | ||
3568 | |||
3569 | /* set the new one */ | ||
3570 | val |= ((val1 << shift) & mask); | ||
3571 | |||
3572 | REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val); | ||
3218 | barrier(); | 3573 | barrier(); |
3219 | mmiowb(); | 3574 | mmiowb(); |
3220 | } | 3575 | } |
3221 | 3576 | ||
3222 | /* | 3577 | /** |
3223 | * should be run under rtnl lock | 3578 | * bnx2x_dec_load_cnt - decrement the load counter |
3579 | * | ||
3580 | * @bp: driver handle | ||
3581 | * | ||
3582 | * Should be run under rtnl lock. | ||
3583 | * Decrements the load counter for the current engine. Returns | ||
3584 | * the new counter value. | ||
3224 | */ | 3585 | */ |
3225 | u32 bnx2x_dec_load_cnt(struct bnx2x *bp) | 3586 | u32 bnx2x_dec_load_cnt(struct bnx2x *bp) |
3226 | { | 3587 | { |
3227 | u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG); | 3588 | u32 val1, val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); |
3589 | u32 mask = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK : | ||
3590 | BNX2X_PATH0_LOAD_CNT_MASK; | ||
3591 | u32 shift = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_SHIFT : | ||
3592 | BNX2X_PATH0_LOAD_CNT_SHIFT; | ||
3228 | 3593 | ||
3229 | DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val); | 3594 | DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val); |
3230 | 3595 | ||
3231 | val1 = ((val & LOAD_COUNTER_MASK) - 1) & LOAD_COUNTER_MASK; | 3596 | /* get the current counter value */ |
3232 | REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1); | 3597 | val1 = (val & mask) >> shift; |
3598 | |||
3599 | /* decrement... */ | ||
3600 | val1--; | ||
3601 | |||
3602 | /* clear the old value */ | ||
3603 | val &= ~mask; | ||
3604 | |||
3605 | /* set the new one */ | ||
3606 | val |= ((val1 << shift) & mask); | ||
3607 | |||
3608 | REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val); | ||
3233 | barrier(); | 3609 | barrier(); |
3234 | mmiowb(); | 3610 | mmiowb(); |
3235 | 3611 | ||
@@ -3237,17 +3613,39 @@ u32 bnx2x_dec_load_cnt(struct bnx2x *bp) | |||
3237 | } | 3613 | } |
3238 | 3614 | ||
3239 | /* | 3615 | /* |
3616 | * Read the load counter for the current engine. | ||
3617 | * | ||
3240 | * should be run under rtnl lock | 3618 | * should be run under rtnl lock |
3241 | */ | 3619 | */ |
3242 | static inline u32 bnx2x_get_load_cnt(struct bnx2x *bp) | 3620 | static inline u32 bnx2x_get_load_cnt(struct bnx2x *bp, int engine) |
3243 | { | 3621 | { |
3244 | return REG_RD(bp, BNX2X_MISC_GEN_REG) & LOAD_COUNTER_MASK; | 3622 | u32 mask = (engine ? BNX2X_PATH1_LOAD_CNT_MASK : |
3623 | BNX2X_PATH0_LOAD_CNT_MASK); | ||
3624 | u32 shift = (engine ? BNX2X_PATH1_LOAD_CNT_SHIFT : | ||
3625 | BNX2X_PATH0_LOAD_CNT_SHIFT); | ||
3626 | u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); | ||
3627 | |||
3628 | DP(NETIF_MSG_HW, "GLOB_REG=0x%08x\n", val); | ||
3629 | |||
3630 | val = (val & mask) >> shift; | ||
3631 | |||
3632 | DP(NETIF_MSG_HW, "load_cnt for engine %d = %d\n", engine, val); | ||
3633 | |||
3634 | return val; | ||
3245 | } | 3635 | } |
3246 | 3636 | ||
3637 | /* | ||
3638 | * Reset the load counter for the current engine. | ||
3639 | * | ||
3640 | * should be run under rtnl lock | ||
3641 | */ | ||
3247 | static inline void bnx2x_clear_load_cnt(struct bnx2x *bp) | 3642 | static inline void bnx2x_clear_load_cnt(struct bnx2x *bp) |
3248 | { | 3643 | { |
3249 | u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG); | 3644 | u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); |
3250 | REG_WR(bp, BNX2X_MISC_GEN_REG, val & (~LOAD_COUNTER_MASK)); | 3645 | u32 mask = (BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK : |
3646 | BNX2X_PATH0_LOAD_CNT_MASK); | ||
3647 | |||
3648 | REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val & (~mask)); | ||
3251 | } | 3649 | } |
3252 | 3650 | ||
3253 | static inline void _print_next_block(int idx, const char *blk) | 3651 | static inline void _print_next_block(int idx, const char *blk) |
@@ -3257,7 +3655,8 @@ static inline void _print_next_block(int idx, const char *blk) | |||
3257 | pr_cont("%s", blk); | 3655 | pr_cont("%s", blk); |
3258 | } | 3656 | } |
3259 | 3657 | ||
3260 | static inline int bnx2x_print_blocks_with_parity0(u32 sig, int par_num) | 3658 | static inline int bnx2x_check_blocks_with_parity0(u32 sig, int par_num, |
3659 | bool print) | ||
3261 | { | 3660 | { |
3262 | int i = 0; | 3661 | int i = 0; |
3263 | u32 cur_bit = 0; | 3662 | u32 cur_bit = 0; |
@@ -3266,19 +3665,33 @@ static inline int bnx2x_print_blocks_with_parity0(u32 sig, int par_num) | |||
3266 | if (sig & cur_bit) { | 3665 | if (sig & cur_bit) { |
3267 | switch (cur_bit) { | 3666 | switch (cur_bit) { |
3268 | case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR: | 3667 | case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR: |
3269 | _print_next_block(par_num++, "BRB"); | 3668 | if (print) |
3669 | _print_next_block(par_num++, "BRB"); | ||
3270 | break; | 3670 | break; |
3271 | case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR: | 3671 | case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR: |
3272 | _print_next_block(par_num++, "PARSER"); | 3672 | if (print) |
3673 | _print_next_block(par_num++, "PARSER"); | ||
3273 | break; | 3674 | break; |
3274 | case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR: | 3675 | case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR: |
3275 | _print_next_block(par_num++, "TSDM"); | 3676 | if (print) |
3677 | _print_next_block(par_num++, "TSDM"); | ||
3276 | break; | 3678 | break; |
3277 | case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR: | 3679 | case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR: |
3278 | _print_next_block(par_num++, "SEARCHER"); | 3680 | if (print) |
3681 | _print_next_block(par_num++, | ||
3682 | "SEARCHER"); | ||
3683 | break; | ||
3684 | case AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR: | ||
3685 | if (print) | ||
3686 | _print_next_block(par_num++, "TCM"); | ||
3279 | break; | 3687 | break; |
3280 | case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR: | 3688 | case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR: |
3281 | _print_next_block(par_num++, "TSEMI"); | 3689 | if (print) |
3690 | _print_next_block(par_num++, "TSEMI"); | ||
3691 | break; | ||
3692 | case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR: | ||
3693 | if (print) | ||
3694 | _print_next_block(par_num++, "XPB"); | ||
3282 | break; | 3695 | break; |
3283 | } | 3696 | } |
3284 | 3697 | ||
@@ -3290,7 +3703,8 @@ static inline int bnx2x_print_blocks_with_parity0(u32 sig, int par_num) | |||
3290 | return par_num; | 3703 | return par_num; |
3291 | } | 3704 | } |
3292 | 3705 | ||
3293 | static inline int bnx2x_print_blocks_with_parity1(u32 sig, int par_num) | 3706 | static inline int bnx2x_check_blocks_with_parity1(u32 sig, int par_num, |
3707 | bool *global, bool print) | ||
3294 | { | 3708 | { |
3295 | int i = 0; | 3709 | int i = 0; |
3296 | u32 cur_bit = 0; | 3710 | u32 cur_bit = 0; |
@@ -3298,38 +3712,64 @@ static inline int bnx2x_print_blocks_with_parity1(u32 sig, int par_num) | |||
3298 | cur_bit = ((u32)0x1 << i); | 3712 | cur_bit = ((u32)0x1 << i); |
3299 | if (sig & cur_bit) { | 3713 | if (sig & cur_bit) { |
3300 | switch (cur_bit) { | 3714 | switch (cur_bit) { |
3301 | case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR: | 3715 | case AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR: |
3302 | _print_next_block(par_num++, "PBCLIENT"); | 3716 | if (print) |
3717 | _print_next_block(par_num++, "PBF"); | ||
3303 | break; | 3718 | break; |
3304 | case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR: | 3719 | case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR: |
3305 | _print_next_block(par_num++, "QM"); | 3720 | if (print) |
3721 | _print_next_block(par_num++, "QM"); | ||
3722 | break; | ||
3723 | case AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR: | ||
3724 | if (print) | ||
3725 | _print_next_block(par_num++, "TM"); | ||
3306 | break; | 3726 | break; |
3307 | case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR: | 3727 | case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR: |
3308 | _print_next_block(par_num++, "XSDM"); | 3728 | if (print) |
3729 | _print_next_block(par_num++, "XSDM"); | ||
3730 | break; | ||
3731 | case AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR: | ||
3732 | if (print) | ||
3733 | _print_next_block(par_num++, "XCM"); | ||
3309 | break; | 3734 | break; |
3310 | case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR: | 3735 | case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR: |
3311 | _print_next_block(par_num++, "XSEMI"); | 3736 | if (print) |
3737 | _print_next_block(par_num++, "XSEMI"); | ||
3312 | break; | 3738 | break; |
3313 | case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR: | 3739 | case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR: |
3314 | _print_next_block(par_num++, "DOORBELLQ"); | 3740 | if (print) |
3741 | _print_next_block(par_num++, | ||
3742 | "DOORBELLQ"); | ||
3743 | break; | ||
3744 | case AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR: | ||
3745 | if (print) | ||
3746 | _print_next_block(par_num++, "NIG"); | ||
3315 | break; | 3747 | break; |
3316 | case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR: | 3748 | case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR: |
3317 | _print_next_block(par_num++, "VAUX PCI CORE"); | 3749 | if (print) |
3750 | _print_next_block(par_num++, | ||
3751 | "VAUX PCI CORE"); | ||
3752 | *global = true; | ||
3318 | break; | 3753 | break; |
3319 | case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR: | 3754 | case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR: |
3320 | _print_next_block(par_num++, "DEBUG"); | 3755 | if (print) |
3756 | _print_next_block(par_num++, "DEBUG"); | ||
3321 | break; | 3757 | break; |
3322 | case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR: | 3758 | case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR: |
3323 | _print_next_block(par_num++, "USDM"); | 3759 | if (print) |
3760 | _print_next_block(par_num++, "USDM"); | ||
3324 | break; | 3761 | break; |
3325 | case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR: | 3762 | case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR: |
3326 | _print_next_block(par_num++, "USEMI"); | 3763 | if (print) |
3764 | _print_next_block(par_num++, "USEMI"); | ||
3327 | break; | 3765 | break; |
3328 | case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR: | 3766 | case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR: |
3329 | _print_next_block(par_num++, "UPB"); | 3767 | if (print) |
3768 | _print_next_block(par_num++, "UPB"); | ||
3330 | break; | 3769 | break; |
3331 | case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR: | 3770 | case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR: |
3332 | _print_next_block(par_num++, "CSDM"); | 3771 | if (print) |
3772 | _print_next_block(par_num++, "CSDM"); | ||
3333 | break; | 3773 | break; |
3334 | } | 3774 | } |
3335 | 3775 | ||
@@ -3341,7 +3781,8 @@ static inline int bnx2x_print_blocks_with_parity1(u32 sig, int par_num) | |||
3341 | return par_num; | 3781 | return par_num; |
3342 | } | 3782 | } |
3343 | 3783 | ||
3344 | static inline int bnx2x_print_blocks_with_parity2(u32 sig, int par_num) | 3784 | static inline int bnx2x_check_blocks_with_parity2(u32 sig, int par_num, |
3785 | bool print) | ||
3345 | { | 3786 | { |
3346 | int i = 0; | 3787 | int i = 0; |
3347 | u32 cur_bit = 0; | 3788 | u32 cur_bit = 0; |
@@ -3350,26 +3791,37 @@ static inline int bnx2x_print_blocks_with_parity2(u32 sig, int par_num) | |||
3350 | if (sig & cur_bit) { | 3791 | if (sig & cur_bit) { |
3351 | switch (cur_bit) { | 3792 | switch (cur_bit) { |
3352 | case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR: | 3793 | case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR: |
3353 | _print_next_block(par_num++, "CSEMI"); | 3794 | if (print) |
3795 | _print_next_block(par_num++, "CSEMI"); | ||
3354 | break; | 3796 | break; |
3355 | case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR: | 3797 | case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR: |
3356 | _print_next_block(par_num++, "PXP"); | 3798 | if (print) |
3799 | _print_next_block(par_num++, "PXP"); | ||
3357 | break; | 3800 | break; |
3358 | case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR: | 3801 | case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR: |
3359 | _print_next_block(par_num++, | 3802 | if (print) |
3803 | _print_next_block(par_num++, | ||
3360 | "PXPPCICLOCKCLIENT"); | 3804 | "PXPPCICLOCKCLIENT"); |
3361 | break; | 3805 | break; |
3362 | case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR: | 3806 | case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR: |
3363 | _print_next_block(par_num++, "CFC"); | 3807 | if (print) |
3808 | _print_next_block(par_num++, "CFC"); | ||
3364 | break; | 3809 | break; |
3365 | case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR: | 3810 | case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR: |
3366 | _print_next_block(par_num++, "CDU"); | 3811 | if (print) |
3812 | _print_next_block(par_num++, "CDU"); | ||
3813 | break; | ||
3814 | case AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR: | ||
3815 | if (print) | ||
3816 | _print_next_block(par_num++, "DMAE"); | ||
3367 | break; | 3817 | break; |
3368 | case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR: | 3818 | case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR: |
3369 | _print_next_block(par_num++, "IGU"); | 3819 | if (print) |
3820 | _print_next_block(par_num++, "IGU"); | ||
3370 | break; | 3821 | break; |
3371 | case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR: | 3822 | case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR: |
3372 | _print_next_block(par_num++, "MISC"); | 3823 | if (print) |
3824 | _print_next_block(par_num++, "MISC"); | ||
3373 | break; | 3825 | break; |
3374 | } | 3826 | } |
3375 | 3827 | ||
@@ -3381,7 +3833,8 @@ static inline int bnx2x_print_blocks_with_parity2(u32 sig, int par_num) | |||
3381 | return par_num; | 3833 | return par_num; |
3382 | } | 3834 | } |
3383 | 3835 | ||
3384 | static inline int bnx2x_print_blocks_with_parity3(u32 sig, int par_num) | 3836 | static inline int bnx2x_check_blocks_with_parity3(u32 sig, int par_num, |
3837 | bool *global, bool print) | ||
3385 | { | 3838 | { |
3386 | int i = 0; | 3839 | int i = 0; |
3387 | u32 cur_bit = 0; | 3840 | u32 cur_bit = 0; |
@@ -3390,16 +3843,27 @@ static inline int bnx2x_print_blocks_with_parity3(u32 sig, int par_num) | |||
3390 | if (sig & cur_bit) { | 3843 | if (sig & cur_bit) { |
3391 | switch (cur_bit) { | 3844 | switch (cur_bit) { |
3392 | case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY: | 3845 | case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY: |
3393 | _print_next_block(par_num++, "MCP ROM"); | 3846 | if (print) |
3847 | _print_next_block(par_num++, "MCP ROM"); | ||
3848 | *global = true; | ||
3394 | break; | 3849 | break; |
3395 | case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY: | 3850 | case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY: |
3396 | _print_next_block(par_num++, "MCP UMP RX"); | 3851 | if (print) |
3852 | _print_next_block(par_num++, | ||
3853 | "MCP UMP RX"); | ||
3854 | *global = true; | ||
3397 | break; | 3855 | break; |
3398 | case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY: | 3856 | case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY: |
3399 | _print_next_block(par_num++, "MCP UMP TX"); | 3857 | if (print) |
3858 | _print_next_block(par_num++, | ||
3859 | "MCP UMP TX"); | ||
3860 | *global = true; | ||
3400 | break; | 3861 | break; |
3401 | case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY: | 3862 | case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY: |
3402 | _print_next_block(par_num++, "MCP SCPAD"); | 3863 | if (print) |
3864 | _print_next_block(par_num++, | ||
3865 | "MCP SCPAD"); | ||
3866 | *global = true; | ||
3403 | break; | 3867 | break; |
3404 | } | 3868 | } |
3405 | 3869 | ||
@@ -3411,8 +3875,8 @@ static inline int bnx2x_print_blocks_with_parity3(u32 sig, int par_num) | |||
3411 | return par_num; | 3875 | return par_num; |
3412 | } | 3876 | } |
3413 | 3877 | ||
3414 | static inline bool bnx2x_parity_attn(struct bnx2x *bp, u32 sig0, u32 sig1, | 3878 | static inline bool bnx2x_parity_attn(struct bnx2x *bp, bool *global, bool print, |
3415 | u32 sig2, u32 sig3) | 3879 | u32 sig0, u32 sig1, u32 sig2, u32 sig3) |
3416 | { | 3880 | { |
3417 | if ((sig0 & HW_PRTY_ASSERT_SET_0) || (sig1 & HW_PRTY_ASSERT_SET_1) || | 3881 | if ((sig0 & HW_PRTY_ASSERT_SET_0) || (sig1 & HW_PRTY_ASSERT_SET_1) || |
3418 | (sig2 & HW_PRTY_ASSERT_SET_2) || (sig3 & HW_PRTY_ASSERT_SET_3)) { | 3882 | (sig2 & HW_PRTY_ASSERT_SET_2) || (sig3 & HW_PRTY_ASSERT_SET_3)) { |
@@ -3424,23 +3888,32 @@ static inline bool bnx2x_parity_attn(struct bnx2x *bp, u32 sig0, u32 sig1, | |||
3424 | sig1 & HW_PRTY_ASSERT_SET_1, | 3888 | sig1 & HW_PRTY_ASSERT_SET_1, |
3425 | sig2 & HW_PRTY_ASSERT_SET_2, | 3889 | sig2 & HW_PRTY_ASSERT_SET_2, |
3426 | sig3 & HW_PRTY_ASSERT_SET_3); | 3890 | sig3 & HW_PRTY_ASSERT_SET_3); |
3427 | printk(KERN_ERR"%s: Parity errors detected in blocks: ", | 3891 | if (print) |
3428 | bp->dev->name); | 3892 | netdev_err(bp->dev, |
3429 | par_num = bnx2x_print_blocks_with_parity0( | 3893 | "Parity errors detected in blocks: "); |
3430 | sig0 & HW_PRTY_ASSERT_SET_0, par_num); | 3894 | par_num = bnx2x_check_blocks_with_parity0( |
3431 | par_num = bnx2x_print_blocks_with_parity1( | 3895 | sig0 & HW_PRTY_ASSERT_SET_0, par_num, print); |
3432 | sig1 & HW_PRTY_ASSERT_SET_1, par_num); | 3896 | par_num = bnx2x_check_blocks_with_parity1( |
3433 | par_num = bnx2x_print_blocks_with_parity2( | 3897 | sig1 & HW_PRTY_ASSERT_SET_1, par_num, global, print); |
3434 | sig2 & HW_PRTY_ASSERT_SET_2, par_num); | 3898 | par_num = bnx2x_check_blocks_with_parity2( |
3435 | par_num = bnx2x_print_blocks_with_parity3( | 3899 | sig2 & HW_PRTY_ASSERT_SET_2, par_num, print); |
3436 | sig3 & HW_PRTY_ASSERT_SET_3, par_num); | 3900 | par_num = bnx2x_check_blocks_with_parity3( |
3437 | printk("\n"); | 3901 | sig3 & HW_PRTY_ASSERT_SET_3, par_num, global, print); |
3902 | if (print) | ||
3903 | pr_cont("\n"); | ||
3438 | return true; | 3904 | return true; |
3439 | } else | 3905 | } else |
3440 | return false; | 3906 | return false; |
3441 | } | 3907 | } |
3442 | 3908 | ||
3443 | bool bnx2x_chk_parity_attn(struct bnx2x *bp) | 3909 | /** |
3910 | * bnx2x_chk_parity_attn - checks for parity attentions. | ||
3911 | * | ||
3912 | * @bp: driver handle | ||
3913 | * @global: true if there was a global attention | ||
3914 | * @print: show parity attention in syslog | ||
3915 | */ | ||
3916 | bool bnx2x_chk_parity_attn(struct bnx2x *bp, bool *global, bool print) | ||
3444 | { | 3917 | { |
3445 | struct attn_route attn; | 3918 | struct attn_route attn; |
3446 | int port = BP_PORT(bp); | 3919 | int port = BP_PORT(bp); |
@@ -3458,8 +3931,8 @@ bool bnx2x_chk_parity_attn(struct bnx2x *bp) | |||
3458 | MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + | 3931 | MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + |
3459 | port*4); | 3932 | port*4); |
3460 | 3933 | ||
3461 | return bnx2x_parity_attn(bp, attn.sig[0], attn.sig[1], attn.sig[2], | 3934 | return bnx2x_parity_attn(bp, global, print, attn.sig[0], attn.sig[1], |
3462 | attn.sig[3]); | 3935 | attn.sig[2], attn.sig[3]); |
3463 | } | 3936 | } |
3464 | 3937 | ||
3465 | 3938 | ||
@@ -3538,21 +4011,25 @@ static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted) | |||
3538 | u32 reg_addr; | 4011 | u32 reg_addr; |
3539 | u32 val; | 4012 | u32 val; |
3540 | u32 aeu_mask; | 4013 | u32 aeu_mask; |
4014 | bool global = false; | ||
3541 | 4015 | ||
3542 | /* need to take HW lock because MCP or other port might also | 4016 | /* need to take HW lock because MCP or other port might also |
3543 | try to handle this event */ | 4017 | try to handle this event */ |
3544 | bnx2x_acquire_alr(bp); | 4018 | bnx2x_acquire_alr(bp); |
3545 | 4019 | ||
3546 | if (CHIP_PARITY_ENABLED(bp) && bnx2x_chk_parity_attn(bp)) { | 4020 | if (bnx2x_chk_parity_attn(bp, &global, true)) { |
4021 | #ifndef BNX2X_STOP_ON_ERROR | ||
3547 | bp->recovery_state = BNX2X_RECOVERY_INIT; | 4022 | bp->recovery_state = BNX2X_RECOVERY_INIT; |
3548 | bnx2x_set_reset_in_progress(bp); | ||
3549 | schedule_delayed_work(&bp->reset_task, 0); | 4023 | schedule_delayed_work(&bp->reset_task, 0); |
3550 | /* Disable HW interrupts */ | 4024 | /* Disable HW interrupts */ |
3551 | bnx2x_int_disable(bp); | 4025 | bnx2x_int_disable(bp); |
3552 | bnx2x_release_alr(bp); | ||
3553 | /* In case of parity errors don't handle attentions so that | 4026 | /* In case of parity errors don't handle attentions so that |
3554 | * other function would "see" parity errors. | 4027 | * other function would "see" parity errors. |
3555 | */ | 4028 | */ |
4029 | #else | ||
4030 | bnx2x_panic(); | ||
4031 | #endif | ||
4032 | bnx2x_release_alr(bp); | ||
3556 | return; | 4033 | return; |
3557 | } | 4034 | } |
3558 | 4035 | ||
@@ -3560,7 +4037,7 @@ static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted) | |||
3560 | attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4); | 4037 | attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4); |
3561 | attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4); | 4038 | attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4); |
3562 | attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4); | 4039 | attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4); |
3563 | if (CHIP_IS_E2(bp)) | 4040 | if (!CHIP_IS_E1x(bp)) |
3564 | attn.sig[4] = | 4041 | attn.sig[4] = |
3565 | REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4); | 4042 | REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4); |
3566 | else | 4043 | else |
@@ -3656,6 +4133,15 @@ static void bnx2x_attn_int(struct bnx2x *bp) | |||
3656 | bnx2x_attn_int_deasserted(bp, deasserted); | 4133 | bnx2x_attn_int_deasserted(bp, deasserted); |
3657 | } | 4134 | } |
3658 | 4135 | ||
4136 | void bnx2x_igu_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 segment, | ||
4137 | u16 index, u8 op, u8 update) | ||
4138 | { | ||
4139 | u32 igu_addr = BAR_IGU_INTMEM + (IGU_CMD_INT_ACK_BASE + igu_sb_id)*8; | ||
4140 | |||
4141 | bnx2x_igu_ack_sb_gen(bp, igu_sb_id, segment, index, op, update, | ||
4142 | igu_addr); | ||
4143 | } | ||
4144 | |||
3659 | static inline void bnx2x_update_eq_prod(struct bnx2x *bp, u16 prod) | 4145 | static inline void bnx2x_update_eq_prod(struct bnx2x *bp, u16 prod) |
3660 | { | 4146 | { |
3661 | /* No memory barriers */ | 4147 | /* No memory barriers */ |
@@ -3667,6 +4153,8 @@ static inline void bnx2x_update_eq_prod(struct bnx2x *bp, u16 prod) | |||
3667 | static int bnx2x_cnic_handle_cfc_del(struct bnx2x *bp, u32 cid, | 4153 | static int bnx2x_cnic_handle_cfc_del(struct bnx2x *bp, u32 cid, |
3668 | union event_ring_elem *elem) | 4154 | union event_ring_elem *elem) |
3669 | { | 4155 | { |
4156 | u8 err = elem->message.error; | ||
4157 | |||
3670 | if (!bp->cnic_eth_dev.starting_cid || | 4158 | if (!bp->cnic_eth_dev.starting_cid || |
3671 | (cid < bp->cnic_eth_dev.starting_cid && | 4159 | (cid < bp->cnic_eth_dev.starting_cid && |
3672 | cid != bp->cnic_eth_dev.iscsi_l2_cid)) | 4160 | cid != bp->cnic_eth_dev.iscsi_l2_cid)) |
@@ -3674,16 +4162,122 @@ static int bnx2x_cnic_handle_cfc_del(struct bnx2x *bp, u32 cid, | |||
3674 | 4162 | ||
3675 | DP(BNX2X_MSG_SP, "got delete ramrod for CNIC CID %d\n", cid); | 4163 | DP(BNX2X_MSG_SP, "got delete ramrod for CNIC CID %d\n", cid); |
3676 | 4164 | ||
3677 | if (unlikely(elem->message.data.cfc_del_event.error)) { | 4165 | if (unlikely(err)) { |
4166 | |||
3678 | BNX2X_ERR("got delete ramrod for CNIC CID %d with error!\n", | 4167 | BNX2X_ERR("got delete ramrod for CNIC CID %d with error!\n", |
3679 | cid); | 4168 | cid); |
3680 | bnx2x_panic_dump(bp); | 4169 | bnx2x_panic_dump(bp); |
3681 | } | 4170 | } |
3682 | bnx2x_cnic_cfc_comp(bp, cid); | 4171 | bnx2x_cnic_cfc_comp(bp, cid, err); |
3683 | return 0; | 4172 | return 0; |
3684 | } | 4173 | } |
3685 | #endif | 4174 | #endif |
3686 | 4175 | ||
4176 | static inline void bnx2x_handle_mcast_eqe(struct bnx2x *bp) | ||
4177 | { | ||
4178 | struct bnx2x_mcast_ramrod_params rparam; | ||
4179 | int rc; | ||
4180 | |||
4181 | memset(&rparam, 0, sizeof(rparam)); | ||
4182 | |||
4183 | rparam.mcast_obj = &bp->mcast_obj; | ||
4184 | |||
4185 | netif_addr_lock_bh(bp->dev); | ||
4186 | |||
4187 | /* Clear pending state for the last command */ | ||
4188 | bp->mcast_obj.raw.clear_pending(&bp->mcast_obj.raw); | ||
4189 | |||
4190 | /* If there are pending mcast commands - send them */ | ||
4191 | if (bp->mcast_obj.check_pending(&bp->mcast_obj)) { | ||
4192 | rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT); | ||
4193 | if (rc < 0) | ||
4194 | BNX2X_ERR("Failed to send pending mcast commands: %d\n", | ||
4195 | rc); | ||
4196 | } | ||
4197 | |||
4198 | netif_addr_unlock_bh(bp->dev); | ||
4199 | } | ||
4200 | |||
4201 | static inline void bnx2x_handle_classification_eqe(struct bnx2x *bp, | ||
4202 | union event_ring_elem *elem) | ||
4203 | { | ||
4204 | unsigned long ramrod_flags = 0; | ||
4205 | int rc = 0; | ||
4206 | u32 cid = elem->message.data.eth_event.echo & BNX2X_SWCID_MASK; | ||
4207 | struct bnx2x_vlan_mac_obj *vlan_mac_obj; | ||
4208 | |||
4209 | /* Always push next commands out, don't wait here */ | ||
4210 | __set_bit(RAMROD_CONT, &ramrod_flags); | ||
4211 | |||
4212 | switch (elem->message.data.eth_event.echo >> BNX2X_SWCID_SHIFT) { | ||
4213 | case BNX2X_FILTER_MAC_PENDING: | ||
4214 | #ifdef BCM_CNIC | ||
4215 | if (cid == BNX2X_ISCSI_ETH_CID) | ||
4216 | vlan_mac_obj = &bp->iscsi_l2_mac_obj; | ||
4217 | else | ||
4218 | #endif | ||
4219 | vlan_mac_obj = &bp->fp[cid].mac_obj; | ||
4220 | |||
4221 | break; | ||
4222 | vlan_mac_obj = &bp->fp[cid].mac_obj; | ||
4223 | |||
4224 | case BNX2X_FILTER_MCAST_PENDING: | ||
4225 | /* This is only relevant for 57710 where multicast MACs are | ||
4226 | * configured as unicast MACs using the same ramrod. | ||
4227 | */ | ||
4228 | bnx2x_handle_mcast_eqe(bp); | ||
4229 | return; | ||
4230 | default: | ||
4231 | BNX2X_ERR("Unsupported classification command: %d\n", | ||
4232 | elem->message.data.eth_event.echo); | ||
4233 | return; | ||
4234 | } | ||
4235 | |||
4236 | rc = vlan_mac_obj->complete(bp, vlan_mac_obj, elem, &ramrod_flags); | ||
4237 | |||
4238 | if (rc < 0) | ||
4239 | BNX2X_ERR("Failed to schedule new commands: %d\n", rc); | ||
4240 | else if (rc > 0) | ||
4241 | DP(BNX2X_MSG_SP, "Scheduled next pending commands...\n"); | ||
4242 | |||
4243 | } | ||
4244 | |||
4245 | #ifdef BCM_CNIC | ||
4246 | static void bnx2x_set_iscsi_eth_rx_mode(struct bnx2x *bp, bool start); | ||
4247 | #endif | ||
4248 | |||
4249 | static inline void bnx2x_handle_rx_mode_eqe(struct bnx2x *bp) | ||
4250 | { | ||
4251 | netif_addr_lock_bh(bp->dev); | ||
4252 | |||
4253 | clear_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state); | ||
4254 | |||
4255 | /* Send rx_mode command again if was requested */ | ||
4256 | if (test_and_clear_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state)) | ||
4257 | bnx2x_set_storm_rx_mode(bp); | ||
4258 | #ifdef BCM_CNIC | ||
4259 | else if (test_and_clear_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED, | ||
4260 | &bp->sp_state)) | ||
4261 | bnx2x_set_iscsi_eth_rx_mode(bp, true); | ||
4262 | else if (test_and_clear_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED, | ||
4263 | &bp->sp_state)) | ||
4264 | bnx2x_set_iscsi_eth_rx_mode(bp, false); | ||
4265 | #endif | ||
4266 | |||
4267 | netif_addr_unlock_bh(bp->dev); | ||
4268 | } | ||
4269 | |||
4270 | static inline struct bnx2x_queue_sp_obj *bnx2x_cid_to_q_obj( | ||
4271 | struct bnx2x *bp, u32 cid) | ||
4272 | { | ||
4273 | #ifdef BCM_CNIC | ||
4274 | if (cid == BNX2X_FCOE_ETH_CID) | ||
4275 | return &bnx2x_fcoe(bp, q_obj); | ||
4276 | else | ||
4277 | #endif | ||
4278 | return &bnx2x_fp(bp, cid, q_obj); | ||
4279 | } | ||
4280 | |||
3687 | static void bnx2x_eq_int(struct bnx2x *bp) | 4281 | static void bnx2x_eq_int(struct bnx2x *bp) |
3688 | { | 4282 | { |
3689 | u16 hw_cons, sw_cons, sw_prod; | 4283 | u16 hw_cons, sw_cons, sw_prod; |
@@ -3691,6 +4285,9 @@ static void bnx2x_eq_int(struct bnx2x *bp) | |||
3691 | u32 cid; | 4285 | u32 cid; |
3692 | u8 opcode; | 4286 | u8 opcode; |
3693 | int spqe_cnt = 0; | 4287 | int spqe_cnt = 0; |
4288 | struct bnx2x_queue_sp_obj *q_obj; | ||
4289 | struct bnx2x_func_sp_obj *f_obj = &bp->func_obj; | ||
4290 | struct bnx2x_raw_obj *rss_raw = &bp->rss_conf_obj.raw; | ||
3694 | 4291 | ||
3695 | hw_cons = le16_to_cpu(*bp->eq_cons_sb); | 4292 | hw_cons = le16_to_cpu(*bp->eq_cons_sb); |
3696 | 4293 | ||
@@ -3725,7 +4322,8 @@ static void bnx2x_eq_int(struct bnx2x *bp) | |||
3725 | /* handle eq element */ | 4322 | /* handle eq element */ |
3726 | switch (opcode) { | 4323 | switch (opcode) { |
3727 | case EVENT_RING_OPCODE_STAT_QUERY: | 4324 | case EVENT_RING_OPCODE_STAT_QUERY: |
3728 | DP(NETIF_MSG_TIMER, "got statistics comp event\n"); | 4325 | DP(NETIF_MSG_TIMER, "got statistics comp event %d\n", |
4326 | bp->stats_comp++); | ||
3729 | /* nothing to do with stats comp */ | 4327 | /* nothing to do with stats comp */ |
3730 | continue; | 4328 | continue; |
3731 | 4329 | ||
@@ -3740,12 +4338,13 @@ static void bnx2x_eq_int(struct bnx2x *bp) | |||
3740 | #ifdef BCM_CNIC | 4338 | #ifdef BCM_CNIC |
3741 | if (!bnx2x_cnic_handle_cfc_del(bp, cid, elem)) | 4339 | if (!bnx2x_cnic_handle_cfc_del(bp, cid, elem)) |
3742 | goto next_spqe; | 4340 | goto next_spqe; |
3743 | if (cid == BNX2X_FCOE_ETH_CID) | ||
3744 | bnx2x_fcoe(bp, state) = BNX2X_FP_STATE_CLOSED; | ||
3745 | else | ||
3746 | #endif | 4341 | #endif |
3747 | bnx2x_fp(bp, cid, state) = | 4342 | q_obj = bnx2x_cid_to_q_obj(bp, cid); |
3748 | BNX2X_FP_STATE_CLOSED; | 4343 | |
4344 | if (q_obj->complete_cmd(bp, q_obj, BNX2X_Q_CMD_CFC_DEL)) | ||
4345 | break; | ||
4346 | |||
4347 | |||
3749 | 4348 | ||
3750 | goto next_spqe; | 4349 | goto next_spqe; |
3751 | 4350 | ||
@@ -3753,42 +4352,75 @@ static void bnx2x_eq_int(struct bnx2x *bp) | |||
3753 | DP(NETIF_MSG_IFUP, "got STOP TRAFFIC\n"); | 4352 | DP(NETIF_MSG_IFUP, "got STOP TRAFFIC\n"); |
3754 | bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_PAUSED); | 4353 | bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_PAUSED); |
3755 | goto next_spqe; | 4354 | goto next_spqe; |
4355 | |||
3756 | case EVENT_RING_OPCODE_START_TRAFFIC: | 4356 | case EVENT_RING_OPCODE_START_TRAFFIC: |
3757 | DP(NETIF_MSG_IFUP, "got START TRAFFIC\n"); | 4357 | DP(NETIF_MSG_IFUP, "got START TRAFFIC\n"); |
3758 | bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_RELEASED); | 4358 | bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_RELEASED); |
3759 | goto next_spqe; | 4359 | goto next_spqe; |
4360 | case EVENT_RING_OPCODE_FUNCTION_START: | ||
4361 | DP(NETIF_MSG_IFUP, "got FUNC_START ramrod\n"); | ||
4362 | if (f_obj->complete_cmd(bp, f_obj, BNX2X_F_CMD_START)) | ||
4363 | break; | ||
4364 | |||
4365 | goto next_spqe; | ||
4366 | |||
4367 | case EVENT_RING_OPCODE_FUNCTION_STOP: | ||
4368 | DP(NETIF_MSG_IFDOWN, "got FUNC_STOP ramrod\n"); | ||
4369 | if (f_obj->complete_cmd(bp, f_obj, BNX2X_F_CMD_STOP)) | ||
4370 | break; | ||
4371 | |||
4372 | goto next_spqe; | ||
3760 | } | 4373 | } |
3761 | 4374 | ||
3762 | switch (opcode | bp->state) { | 4375 | switch (opcode | bp->state) { |
3763 | case (EVENT_RING_OPCODE_FUNCTION_START | | 4376 | case (EVENT_RING_OPCODE_RSS_UPDATE_RULES | |
4377 | BNX2X_STATE_OPEN): | ||
4378 | case (EVENT_RING_OPCODE_RSS_UPDATE_RULES | | ||
3764 | BNX2X_STATE_OPENING_WAIT4_PORT): | 4379 | BNX2X_STATE_OPENING_WAIT4_PORT): |
3765 | DP(NETIF_MSG_IFUP, "got setup ramrod\n"); | 4380 | cid = elem->message.data.eth_event.echo & |
3766 | bp->state = BNX2X_STATE_FUNC_STARTED; | 4381 | BNX2X_SWCID_MASK; |
4382 | DP(NETIF_MSG_IFUP, "got RSS_UPDATE ramrod. CID %d\n", | ||
4383 | cid); | ||
4384 | rss_raw->clear_pending(rss_raw); | ||
3767 | break; | 4385 | break; |
3768 | 4386 | ||
3769 | case (EVENT_RING_OPCODE_FUNCTION_STOP | | 4387 | case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_OPEN): |
4388 | case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_DIAG): | ||
4389 | case (EVENT_RING_OPCODE_SET_MAC | | ||
4390 | BNX2X_STATE_CLOSING_WAIT4_HALT): | ||
4391 | case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | | ||
4392 | BNX2X_STATE_OPEN): | ||
4393 | case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | | ||
4394 | BNX2X_STATE_DIAG): | ||
4395 | case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | | ||
3770 | BNX2X_STATE_CLOSING_WAIT4_HALT): | 4396 | BNX2X_STATE_CLOSING_WAIT4_HALT): |
3771 | DP(NETIF_MSG_IFDOWN, "got halt ramrod\n"); | 4397 | DP(NETIF_MSG_IFUP, "got (un)set mac ramrod\n"); |
3772 | bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD; | 4398 | bnx2x_handle_classification_eqe(bp, elem); |
3773 | break; | 4399 | break; |
3774 | 4400 | ||
3775 | case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_OPEN): | 4401 | case (EVENT_RING_OPCODE_MULTICAST_RULES | |
3776 | case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_DIAG): | 4402 | BNX2X_STATE_OPEN): |
3777 | DP(NETIF_MSG_IFUP, "got set mac ramrod\n"); | 4403 | case (EVENT_RING_OPCODE_MULTICAST_RULES | |
3778 | if (elem->message.data.set_mac_event.echo) | 4404 | BNX2X_STATE_DIAG): |
3779 | bp->set_mac_pending = 0; | 4405 | case (EVENT_RING_OPCODE_MULTICAST_RULES | |
4406 | BNX2X_STATE_CLOSING_WAIT4_HALT): | ||
4407 | DP(NETIF_MSG_IFUP, "got mcast ramrod\n"); | ||
4408 | bnx2x_handle_mcast_eqe(bp); | ||
3780 | break; | 4409 | break; |
3781 | 4410 | ||
3782 | case (EVENT_RING_OPCODE_SET_MAC | | 4411 | case (EVENT_RING_OPCODE_FILTERS_RULES | |
4412 | BNX2X_STATE_OPEN): | ||
4413 | case (EVENT_RING_OPCODE_FILTERS_RULES | | ||
4414 | BNX2X_STATE_DIAG): | ||
4415 | case (EVENT_RING_OPCODE_FILTERS_RULES | | ||
3783 | BNX2X_STATE_CLOSING_WAIT4_HALT): | 4416 | BNX2X_STATE_CLOSING_WAIT4_HALT): |
3784 | DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n"); | 4417 | DP(NETIF_MSG_IFUP, "got rx_mode ramrod\n"); |
3785 | if (elem->message.data.set_mac_event.echo) | 4418 | bnx2x_handle_rx_mode_eqe(bp); |
3786 | bp->set_mac_pending = 0; | ||
3787 | break; | 4419 | break; |
3788 | default: | 4420 | default: |
3789 | /* unknown event log error and continue */ | 4421 | /* unknown event log error and continue */ |
3790 | BNX2X_ERR("Unknown EQ event %d\n", | 4422 | BNX2X_ERR("Unknown EQ event %d, bp->state 0x%x\n", |
3791 | elem->message.opcode); | 4423 | elem->message.opcode, bp->state); |
3792 | } | 4424 | } |
3793 | next_spqe: | 4425 | next_spqe: |
3794 | spqe_cnt++; | 4426 | spqe_cnt++; |
@@ -3811,12 +4443,6 @@ static void bnx2x_sp_task(struct work_struct *work) | |||
3811 | struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work); | 4443 | struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work); |
3812 | u16 status; | 4444 | u16 status; |
3813 | 4445 | ||
3814 | /* Return here if interrupt is disabled */ | ||
3815 | if (unlikely(atomic_read(&bp->intr_sem) != 0)) { | ||
3816 | DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n"); | ||
3817 | return; | ||
3818 | } | ||
3819 | |||
3820 | status = bnx2x_update_dsb_idx(bp); | 4446 | status = bnx2x_update_dsb_idx(bp); |
3821 | /* if (status == 0) */ | 4447 | /* if (status == 0) */ |
3822 | /* BNX2X_ERR("spurious slowpath interrupt!\n"); */ | 4448 | /* BNX2X_ERR("spurious slowpath interrupt!\n"); */ |
@@ -3860,12 +4486,6 @@ irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance) | |||
3860 | struct net_device *dev = dev_instance; | 4486 | struct net_device *dev = dev_instance; |
3861 | struct bnx2x *bp = netdev_priv(dev); | 4487 | struct bnx2x *bp = netdev_priv(dev); |
3862 | 4488 | ||
3863 | /* Return here if interrupt is disabled */ | ||
3864 | if (unlikely(atomic_read(&bp->intr_sem) != 0)) { | ||
3865 | DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n"); | ||
3866 | return IRQ_HANDLED; | ||
3867 | } | ||
3868 | |||
3869 | bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0, | 4489 | bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0, |
3870 | IGU_INT_DISABLE, 0); | 4490 | IGU_INT_DISABLE, 0); |
3871 | 4491 | ||
@@ -3892,6 +4512,14 @@ irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance) | |||
3892 | 4512 | ||
3893 | /* end of slow path */ | 4513 | /* end of slow path */ |
3894 | 4514 | ||
4515 | |||
4516 | void bnx2x_drv_pulse(struct bnx2x *bp) | ||
4517 | { | ||
4518 | SHMEM_WR(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb, | ||
4519 | bp->fw_drv_pulse_wr_seq); | ||
4520 | } | ||
4521 | |||
4522 | |||
3895 | static void bnx2x_timer(unsigned long data) | 4523 | static void bnx2x_timer(unsigned long data) |
3896 | { | 4524 | { |
3897 | struct bnx2x *bp = (struct bnx2x *) data; | 4525 | struct bnx2x *bp = (struct bnx2x *) data; |
@@ -3899,9 +4527,6 @@ static void bnx2x_timer(unsigned long data) | |||
3899 | if (!netif_running(bp->dev)) | 4527 | if (!netif_running(bp->dev)) |
3900 | return; | 4528 | return; |
3901 | 4529 | ||
3902 | if (atomic_read(&bp->intr_sem) != 0) | ||
3903 | goto timer_restart; | ||
3904 | |||
3905 | if (poll) { | 4530 | if (poll) { |
3906 | struct bnx2x_fastpath *fp = &bp->fp[0]; | 4531 | struct bnx2x_fastpath *fp = &bp->fp[0]; |
3907 | 4532 | ||
@@ -3918,7 +4543,7 @@ static void bnx2x_timer(unsigned long data) | |||
3918 | bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK; | 4543 | bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK; |
3919 | /* TBD - add SYSTEM_TIME */ | 4544 | /* TBD - add SYSTEM_TIME */ |
3920 | drv_pulse = bp->fw_drv_pulse_wr_seq; | 4545 | drv_pulse = bp->fw_drv_pulse_wr_seq; |
3921 | SHMEM_WR(bp, func_mb[mb_idx].drv_pulse_mb, drv_pulse); | 4546 | bnx2x_drv_pulse(bp); |
3922 | 4547 | ||
3923 | mcp_pulse = (SHMEM_RD(bp, func_mb[mb_idx].mcp_pulse_mb) & | 4548 | mcp_pulse = (SHMEM_RD(bp, func_mb[mb_idx].mcp_pulse_mb) & |
3924 | MCP_PULSE_SEQ_MASK); | 4549 | MCP_PULSE_SEQ_MASK); |
@@ -3936,7 +4561,6 @@ static void bnx2x_timer(unsigned long data) | |||
3936 | if (bp->state == BNX2X_STATE_OPEN) | 4561 | if (bp->state == BNX2X_STATE_OPEN) |
3937 | bnx2x_stats_handle(bp, STATS_EVENT_UPDATE); | 4562 | bnx2x_stats_handle(bp, STATS_EVENT_UPDATE); |
3938 | 4563 | ||
3939 | timer_restart: | ||
3940 | mod_timer(&bp->timer, jiffies + bp->current_interval); | 4564 | mod_timer(&bp->timer, jiffies + bp->current_interval); |
3941 | } | 4565 | } |
3942 | 4566 | ||
@@ -3982,18 +4606,16 @@ static inline void bnx2x_zero_fp_sb(struct bnx2x *bp, int fw_sb_id) | |||
3982 | struct hc_status_block_data_e1x sb_data_e1x; | 4606 | struct hc_status_block_data_e1x sb_data_e1x; |
3983 | 4607 | ||
3984 | /* disable the function first */ | 4608 | /* disable the function first */ |
3985 | if (CHIP_IS_E2(bp)) { | 4609 | if (!CHIP_IS_E1x(bp)) { |
3986 | memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2)); | 4610 | memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2)); |
3987 | sb_data_e2.common.p_func.pf_id = HC_FUNCTION_DISABLED; | 4611 | sb_data_e2.common.state = SB_DISABLED; |
3988 | sb_data_e2.common.p_func.vf_id = HC_FUNCTION_DISABLED; | ||
3989 | sb_data_e2.common.p_func.vf_valid = false; | 4612 | sb_data_e2.common.p_func.vf_valid = false; |
3990 | sb_data_p = (u32 *)&sb_data_e2; | 4613 | sb_data_p = (u32 *)&sb_data_e2; |
3991 | data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32); | 4614 | data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32); |
3992 | } else { | 4615 | } else { |
3993 | memset(&sb_data_e1x, 0, | 4616 | memset(&sb_data_e1x, 0, |
3994 | sizeof(struct hc_status_block_data_e1x)); | 4617 | sizeof(struct hc_status_block_data_e1x)); |
3995 | sb_data_e1x.common.p_func.pf_id = HC_FUNCTION_DISABLED; | 4618 | sb_data_e1x.common.state = SB_DISABLED; |
3996 | sb_data_e1x.common.p_func.vf_id = HC_FUNCTION_DISABLED; | ||
3997 | sb_data_e1x.common.p_func.vf_valid = false; | 4619 | sb_data_e1x.common.p_func.vf_valid = false; |
3998 | sb_data_p = (u32 *)&sb_data_e1x; | 4620 | sb_data_p = (u32 *)&sb_data_e1x; |
3999 | data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32); | 4621 | data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32); |
@@ -4027,8 +4649,7 @@ static inline void bnx2x_zero_sp_sb(struct bnx2x *bp) | |||
4027 | struct hc_sp_status_block_data sp_sb_data; | 4649 | struct hc_sp_status_block_data sp_sb_data; |
4028 | memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data)); | 4650 | memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data)); |
4029 | 4651 | ||
4030 | sp_sb_data.p_func.pf_id = HC_FUNCTION_DISABLED; | 4652 | sp_sb_data.state = SB_DISABLED; |
4031 | sp_sb_data.p_func.vf_id = HC_FUNCTION_DISABLED; | ||
4032 | sp_sb_data.p_func.vf_valid = false; | 4653 | sp_sb_data.p_func.vf_valid = false; |
4033 | 4654 | ||
4034 | bnx2x_wr_sp_sb_data(bp, &sp_sb_data); | 4655 | bnx2x_wr_sp_sb_data(bp, &sp_sb_data); |
@@ -4071,8 +4692,9 @@ static void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid, | |||
4071 | 4692 | ||
4072 | bnx2x_zero_fp_sb(bp, fw_sb_id); | 4693 | bnx2x_zero_fp_sb(bp, fw_sb_id); |
4073 | 4694 | ||
4074 | if (CHIP_IS_E2(bp)) { | 4695 | if (!CHIP_IS_E1x(bp)) { |
4075 | memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2)); | 4696 | memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2)); |
4697 | sb_data_e2.common.state = SB_ENABLED; | ||
4076 | sb_data_e2.common.p_func.pf_id = BP_FUNC(bp); | 4698 | sb_data_e2.common.p_func.pf_id = BP_FUNC(bp); |
4077 | sb_data_e2.common.p_func.vf_id = vfid; | 4699 | sb_data_e2.common.p_func.vf_id = vfid; |
4078 | sb_data_e2.common.p_func.vf_valid = vf_valid; | 4700 | sb_data_e2.common.p_func.vf_valid = vf_valid; |
@@ -4086,6 +4708,7 @@ static void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid, | |||
4086 | } else { | 4708 | } else { |
4087 | memset(&sb_data_e1x, 0, | 4709 | memset(&sb_data_e1x, 0, |
4088 | sizeof(struct hc_status_block_data_e1x)); | 4710 | sizeof(struct hc_status_block_data_e1x)); |
4711 | sb_data_e1x.common.state = SB_ENABLED; | ||
4089 | sb_data_e1x.common.p_func.pf_id = BP_FUNC(bp); | 4712 | sb_data_e1x.common.p_func.pf_id = BP_FUNC(bp); |
4090 | sb_data_e1x.common.p_func.vf_id = 0xff; | 4713 | sb_data_e1x.common.p_func.vf_id = 0xff; |
4091 | sb_data_e1x.common.p_func.vf_valid = false; | 4714 | sb_data_e1x.common.p_func.vf_valid = false; |
@@ -4109,19 +4732,7 @@ static void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid, | |||
4109 | bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size); | 4732 | bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size); |
4110 | } | 4733 | } |
4111 | 4734 | ||
4112 | static void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u16 fw_sb_id, | 4735 | static void bnx2x_update_coalesce_sb(struct bnx2x *bp, u8 fw_sb_id, |
4113 | u8 sb_index, u8 disable, u16 usec) | ||
4114 | { | ||
4115 | int port = BP_PORT(bp); | ||
4116 | u8 ticks = usec / BNX2X_BTR; | ||
4117 | |||
4118 | storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks); | ||
4119 | |||
4120 | disable = disable ? 1 : (usec ? 0 : 1); | ||
4121 | storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable); | ||
4122 | } | ||
4123 | |||
4124 | static void bnx2x_update_coalesce_sb(struct bnx2x *bp, u16 fw_sb_id, | ||
4125 | u16 tx_usec, u16 rx_usec) | 4736 | u16 tx_usec, u16 rx_usec) |
4126 | { | 4737 | { |
4127 | bnx2x_update_coalesce_sb_index(bp, fw_sb_id, U_SB_ETH_RX_CQ_INDEX, | 4738 | bnx2x_update_coalesce_sb_index(bp, fw_sb_id, U_SB_ETH_RX_CQ_INDEX, |
@@ -4168,7 +4779,7 @@ static void bnx2x_init_def_sb(struct bnx2x *bp) | |||
4168 | bp->attn_group[index].sig[sindex] = | 4779 | bp->attn_group[index].sig[sindex] = |
4169 | REG_RD(bp, reg_offset + sindex*0x4 + 0x10*index); | 4780 | REG_RD(bp, reg_offset + sindex*0x4 + 0x10*index); |
4170 | 4781 | ||
4171 | if (CHIP_IS_E2(bp)) | 4782 | if (!CHIP_IS_E1x(bp)) |
4172 | /* | 4783 | /* |
4173 | * enable5 is separate from the rest of the registers, | 4784 | * enable5 is separate from the rest of the registers, |
4174 | * and therefore the address skip is 4 | 4785 | * and therefore the address skip is 4 |
@@ -4186,7 +4797,7 @@ static void bnx2x_init_def_sb(struct bnx2x *bp) | |||
4186 | 4797 | ||
4187 | REG_WR(bp, reg_offset, U64_LO(section)); | 4798 | REG_WR(bp, reg_offset, U64_LO(section)); |
4188 | REG_WR(bp, reg_offset + 4, U64_HI(section)); | 4799 | REG_WR(bp, reg_offset + 4, U64_HI(section)); |
4189 | } else if (CHIP_IS_E2(bp)) { | 4800 | } else if (!CHIP_IS_E1x(bp)) { |
4190 | REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_L, U64_LO(section)); | 4801 | REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_L, U64_LO(section)); |
4191 | REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_H, U64_HI(section)); | 4802 | REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_H, U64_HI(section)); |
4192 | } | 4803 | } |
@@ -4196,6 +4807,7 @@ static void bnx2x_init_def_sb(struct bnx2x *bp) | |||
4196 | 4807 | ||
4197 | bnx2x_zero_sp_sb(bp); | 4808 | bnx2x_zero_sp_sb(bp); |
4198 | 4809 | ||
4810 | sp_sb_data.state = SB_ENABLED; | ||
4199 | sp_sb_data.host_sb_addr.lo = U64_LO(section); | 4811 | sp_sb_data.host_sb_addr.lo = U64_LO(section); |
4200 | sp_sb_data.host_sb_addr.hi = U64_HI(section); | 4812 | sp_sb_data.host_sb_addr.hi = U64_HI(section); |
4201 | sp_sb_data.igu_sb_id = igu_sp_sb_index; | 4813 | sp_sb_data.igu_sb_id = igu_sp_sb_index; |
@@ -4206,9 +4818,6 @@ static void bnx2x_init_def_sb(struct bnx2x *bp) | |||
4206 | 4818 | ||
4207 | bnx2x_wr_sp_sb_data(bp, &sp_sb_data); | 4819 | bnx2x_wr_sp_sb_data(bp, &sp_sb_data); |
4208 | 4820 | ||
4209 | bp->stats_pending = 0; | ||
4210 | bp->set_mac_pending = 0; | ||
4211 | |||
4212 | bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0); | 4821 | bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0); |
4213 | } | 4822 | } |
4214 | 4823 | ||
@@ -4254,146 +4863,129 @@ static void bnx2x_init_eq_ring(struct bnx2x *bp) | |||
4254 | min_t(int, MAX_SP_DESC_CNT - MAX_SPQ_PENDING, NUM_EQ_DESC) - 1); | 4863 | min_t(int, MAX_SP_DESC_CNT - MAX_SPQ_PENDING, NUM_EQ_DESC) - 1); |
4255 | } | 4864 | } |
4256 | 4865 | ||
4257 | void bnx2x_push_indir_table(struct bnx2x *bp) | 4866 | |
4867 | /* called with netif_addr_lock_bh() */ | ||
4868 | void bnx2x_set_q_rx_mode(struct bnx2x *bp, u8 cl_id, | ||
4869 | unsigned long rx_mode_flags, | ||
4870 | unsigned long rx_accept_flags, | ||
4871 | unsigned long tx_accept_flags, | ||
4872 | unsigned long ramrod_flags) | ||
4258 | { | 4873 | { |
4259 | int func = BP_FUNC(bp); | 4874 | struct bnx2x_rx_mode_ramrod_params ramrod_param; |
4260 | int i; | 4875 | int rc; |
4261 | 4876 | ||
4262 | if (bp->multi_mode == ETH_RSS_MODE_DISABLED) | 4877 | memset(&ramrod_param, 0, sizeof(ramrod_param)); |
4263 | return; | ||
4264 | 4878 | ||
4265 | for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++) | 4879 | /* Prepare ramrod parameters */ |
4266 | REG_WR8(bp, BAR_TSTRORM_INTMEM + | 4880 | ramrod_param.cid = 0; |
4267 | TSTORM_INDIRECTION_TABLE_OFFSET(func) + i, | 4881 | ramrod_param.cl_id = cl_id; |
4268 | bp->fp->cl_id + bp->rx_indir_table[i]); | 4882 | ramrod_param.rx_mode_obj = &bp->rx_mode_obj; |
4269 | } | 4883 | ramrod_param.func_id = BP_FUNC(bp); |
4270 | 4884 | ||
4271 | static void bnx2x_init_ind_table(struct bnx2x *bp) | 4885 | ramrod_param.pstate = &bp->sp_state; |
4272 | { | 4886 | ramrod_param.state = BNX2X_FILTER_RX_MODE_PENDING; |
4273 | int i; | 4887 | |
4888 | ramrod_param.rdata = bnx2x_sp(bp, rx_mode_rdata); | ||
4889 | ramrod_param.rdata_mapping = bnx2x_sp_mapping(bp, rx_mode_rdata); | ||
4274 | 4890 | ||
4275 | for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++) | 4891 | set_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state); |
4276 | bp->rx_indir_table[i] = i % BNX2X_NUM_ETH_QUEUES(bp); | ||
4277 | 4892 | ||
4278 | bnx2x_push_indir_table(bp); | 4893 | ramrod_param.ramrod_flags = ramrod_flags; |
4894 | ramrod_param.rx_mode_flags = rx_mode_flags; | ||
4895 | |||
4896 | ramrod_param.rx_accept_flags = rx_accept_flags; | ||
4897 | ramrod_param.tx_accept_flags = tx_accept_flags; | ||
4898 | |||
4899 | rc = bnx2x_config_rx_mode(bp, &ramrod_param); | ||
4900 | if (rc < 0) { | ||
4901 | BNX2X_ERR("Set rx_mode %d failed\n", bp->rx_mode); | ||
4902 | return; | ||
4903 | } | ||
4279 | } | 4904 | } |
4280 | 4905 | ||
4906 | /* called with netif_addr_lock_bh() */ | ||
4281 | void bnx2x_set_storm_rx_mode(struct bnx2x *bp) | 4907 | void bnx2x_set_storm_rx_mode(struct bnx2x *bp) |
4282 | { | 4908 | { |
4283 | int mode = bp->rx_mode; | 4909 | unsigned long rx_mode_flags = 0, ramrod_flags = 0; |
4284 | int port = BP_PORT(bp); | 4910 | unsigned long rx_accept_flags = 0, tx_accept_flags = 0; |
4285 | u16 cl_id; | ||
4286 | u32 def_q_filters = 0; | ||
4287 | 4911 | ||
4288 | /* All but management unicast packets should pass to the host as well */ | ||
4289 | u32 llh_mask = | ||
4290 | NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST | | ||
4291 | NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST | | ||
4292 | NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN | | ||
4293 | NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN; | ||
4294 | |||
4295 | switch (mode) { | ||
4296 | case BNX2X_RX_MODE_NONE: /* no Rx */ | ||
4297 | def_q_filters = BNX2X_ACCEPT_NONE; | ||
4298 | #ifdef BCM_CNIC | 4912 | #ifdef BCM_CNIC |
4299 | if (!NO_FCOE(bp)) { | 4913 | if (!NO_FCOE(bp)) |
4300 | cl_id = bnx2x_fcoe(bp, cl_id); | ||
4301 | bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_NONE); | ||
4302 | } | ||
4303 | #endif | ||
4304 | break; | ||
4305 | 4914 | ||
4306 | case BNX2X_RX_MODE_NORMAL: | 4915 | /* Configure rx_mode of FCoE Queue */ |
4307 | def_q_filters |= BNX2X_ACCEPT_UNICAST | BNX2X_ACCEPT_BROADCAST | | 4916 | __set_bit(BNX2X_RX_MODE_FCOE_ETH, &rx_mode_flags); |
4308 | BNX2X_ACCEPT_MULTICAST; | ||
4309 | #ifdef BCM_CNIC | ||
4310 | if (!NO_FCOE(bp)) { | ||
4311 | cl_id = bnx2x_fcoe(bp, cl_id); | ||
4312 | bnx2x_rxq_set_mac_filters(bp, cl_id, | ||
4313 | BNX2X_ACCEPT_UNICAST | | ||
4314 | BNX2X_ACCEPT_MULTICAST); | ||
4315 | } | ||
4316 | #endif | 4917 | #endif |
4317 | break; | ||
4318 | 4918 | ||
4319 | case BNX2X_RX_MODE_ALLMULTI: | 4919 | switch (bp->rx_mode) { |
4320 | def_q_filters |= BNX2X_ACCEPT_UNICAST | BNX2X_ACCEPT_BROADCAST | | 4920 | case BNX2X_RX_MODE_NONE: |
4321 | BNX2X_ACCEPT_ALL_MULTICAST; | ||
4322 | #ifdef BCM_CNIC | ||
4323 | /* | 4921 | /* |
4324 | * Prevent duplication of multicast packets by configuring FCoE | 4922 | * 'drop all' supersedes any accept flags that may have been |
4325 | * L2 Client to receive only matched unicast frames. | 4923 | * passed to the function. |
4326 | */ | 4924 | */ |
4327 | if (!NO_FCOE(bp)) { | ||
4328 | cl_id = bnx2x_fcoe(bp, cl_id); | ||
4329 | bnx2x_rxq_set_mac_filters(bp, cl_id, | ||
4330 | BNX2X_ACCEPT_UNICAST); | ||
4331 | } | ||
4332 | #endif | ||
4333 | break; | 4925 | break; |
4926 | case BNX2X_RX_MODE_NORMAL: | ||
4927 | __set_bit(BNX2X_ACCEPT_UNICAST, &rx_accept_flags); | ||
4928 | __set_bit(BNX2X_ACCEPT_MULTICAST, &rx_accept_flags); | ||
4929 | __set_bit(BNX2X_ACCEPT_BROADCAST, &rx_accept_flags); | ||
4930 | |||
4931 | /* internal switching mode */ | ||
4932 | __set_bit(BNX2X_ACCEPT_UNICAST, &tx_accept_flags); | ||
4933 | __set_bit(BNX2X_ACCEPT_MULTICAST, &tx_accept_flags); | ||
4934 | __set_bit(BNX2X_ACCEPT_BROADCAST, &tx_accept_flags); | ||
4935 | |||
4936 | break; | ||
4937 | case BNX2X_RX_MODE_ALLMULTI: | ||
4938 | __set_bit(BNX2X_ACCEPT_UNICAST, &rx_accept_flags); | ||
4939 | __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &rx_accept_flags); | ||
4940 | __set_bit(BNX2X_ACCEPT_BROADCAST, &rx_accept_flags); | ||
4941 | |||
4942 | /* internal switching mode */ | ||
4943 | __set_bit(BNX2X_ACCEPT_UNICAST, &tx_accept_flags); | ||
4944 | __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &tx_accept_flags); | ||
4945 | __set_bit(BNX2X_ACCEPT_BROADCAST, &tx_accept_flags); | ||
4334 | 4946 | ||
4947 | break; | ||
4335 | case BNX2X_RX_MODE_PROMISC: | 4948 | case BNX2X_RX_MODE_PROMISC: |
4336 | def_q_filters |= BNX2X_PROMISCUOUS_MODE; | 4949 | /* According to deffinition of SI mode, iface in promisc mode |
4337 | #ifdef BCM_CNIC | 4950 | * should receive matched and unmatched (in resolution of port) |
4338 | /* | 4951 | * unicast packets. |
4339 | * Prevent packets duplication by configuring DROP_ALL for FCoE | ||
4340 | * L2 Client. | ||
4341 | */ | 4952 | */ |
4342 | if (!NO_FCOE(bp)) { | 4953 | __set_bit(BNX2X_ACCEPT_UNMATCHED, &rx_accept_flags); |
4343 | cl_id = bnx2x_fcoe(bp, cl_id); | 4954 | __set_bit(BNX2X_ACCEPT_UNICAST, &rx_accept_flags); |
4344 | bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_NONE); | 4955 | __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &rx_accept_flags); |
4345 | } | 4956 | __set_bit(BNX2X_ACCEPT_BROADCAST, &rx_accept_flags); |
4346 | #endif | 4957 | |
4347 | /* pass management unicast packets as well */ | 4958 | /* internal switching mode */ |
4348 | llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST; | 4959 | __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &tx_accept_flags); |
4349 | break; | 4960 | __set_bit(BNX2X_ACCEPT_BROADCAST, &tx_accept_flags); |
4961 | |||
4962 | if (IS_MF_SI(bp)) | ||
4963 | __set_bit(BNX2X_ACCEPT_ALL_UNICAST, &tx_accept_flags); | ||
4964 | else | ||
4965 | __set_bit(BNX2X_ACCEPT_UNICAST, &tx_accept_flags); | ||
4350 | 4966 | ||
4351 | default: | ||
4352 | BNX2X_ERR("BAD rx mode (%d)\n", mode); | ||
4353 | break; | 4967 | break; |
4968 | default: | ||
4969 | BNX2X_ERR("Unknown rx_mode: %d\n", bp->rx_mode); | ||
4970 | return; | ||
4354 | } | 4971 | } |
4355 | 4972 | ||
4356 | cl_id = BP_L_ID(bp); | 4973 | if (bp->rx_mode != BNX2X_RX_MODE_NONE) { |
4357 | bnx2x_rxq_set_mac_filters(bp, cl_id, def_q_filters); | 4974 | __set_bit(BNX2X_ACCEPT_ANY_VLAN, &rx_accept_flags); |
4358 | 4975 | __set_bit(BNX2X_ACCEPT_ANY_VLAN, &tx_accept_flags); | |
4359 | REG_WR(bp, | 4976 | } |
4360 | (port ? NIG_REG_LLH1_BRB1_DRV_MASK : | ||
4361 | NIG_REG_LLH0_BRB1_DRV_MASK), llh_mask); | ||
4362 | 4977 | ||
4363 | DP(NETIF_MSG_IFUP, "rx mode %d\n" | 4978 | __set_bit(RAMROD_RX, &ramrod_flags); |
4364 | "drop_ucast 0x%x\ndrop_mcast 0x%x\ndrop_bcast 0x%x\n" | 4979 | __set_bit(RAMROD_TX, &ramrod_flags); |
4365 | "accp_ucast 0x%x\naccp_mcast 0x%x\naccp_bcast 0x%x\n" | ||
4366 | "unmatched_ucast 0x%x\n", mode, | ||
4367 | bp->mac_filters.ucast_drop_all, | ||
4368 | bp->mac_filters.mcast_drop_all, | ||
4369 | bp->mac_filters.bcast_drop_all, | ||
4370 | bp->mac_filters.ucast_accept_all, | ||
4371 | bp->mac_filters.mcast_accept_all, | ||
4372 | bp->mac_filters.bcast_accept_all, | ||
4373 | bp->mac_filters.unmatched_unicast | ||
4374 | ); | ||
4375 | 4980 | ||
4376 | storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp)); | 4981 | bnx2x_set_q_rx_mode(bp, bp->fp->cl_id, rx_mode_flags, rx_accept_flags, |
4982 | tx_accept_flags, ramrod_flags); | ||
4377 | } | 4983 | } |
4378 | 4984 | ||
4379 | static void bnx2x_init_internal_common(struct bnx2x *bp) | 4985 | static void bnx2x_init_internal_common(struct bnx2x *bp) |
4380 | { | 4986 | { |
4381 | int i; | 4987 | int i; |
4382 | 4988 | ||
4383 | if (!CHIP_IS_E1(bp)) { | ||
4384 | |||
4385 | /* xstorm needs to know whether to add ovlan to packets or not, | ||
4386 | * in switch-independent we'll write 0 to here... */ | ||
4387 | REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET, | ||
4388 | bp->mf_mode); | ||
4389 | REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET, | ||
4390 | bp->mf_mode); | ||
4391 | REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET, | ||
4392 | bp->mf_mode); | ||
4393 | REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET, | ||
4394 | bp->mf_mode); | ||
4395 | } | ||
4396 | |||
4397 | if (IS_MF_SI(bp)) | 4989 | if (IS_MF_SI(bp)) |
4398 | /* | 4990 | /* |
4399 | * In switch independent mode, the TSTORM needs to accept | 4991 | * In switch independent mode, the TSTORM needs to accept |
@@ -4402,25 +4994,22 @@ static void bnx2x_init_internal_common(struct bnx2x *bp) | |||
4402 | */ | 4994 | */ |
4403 | REG_WR8(bp, BAR_TSTRORM_INTMEM + | 4995 | REG_WR8(bp, BAR_TSTRORM_INTMEM + |
4404 | TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET, 2); | 4996 | TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET, 2); |
4997 | else if (!CHIP_IS_E1(bp)) /* 57710 doesn't support MF */ | ||
4998 | REG_WR8(bp, BAR_TSTRORM_INTMEM + | ||
4999 | TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET, 0); | ||
4405 | 5000 | ||
4406 | /* Zero this manually as its initialization is | 5001 | /* Zero this manually as its initialization is |
4407 | currently missing in the initTool */ | 5002 | currently missing in the initTool */ |
4408 | for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++) | 5003 | for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++) |
4409 | REG_WR(bp, BAR_USTRORM_INTMEM + | 5004 | REG_WR(bp, BAR_USTRORM_INTMEM + |
4410 | USTORM_AGG_DATA_OFFSET + i * 4, 0); | 5005 | USTORM_AGG_DATA_OFFSET + i * 4, 0); |
4411 | if (CHIP_IS_E2(bp)) { | 5006 | if (!CHIP_IS_E1x(bp)) { |
4412 | REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_IGU_MODE_OFFSET, | 5007 | REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_IGU_MODE_OFFSET, |
4413 | CHIP_INT_MODE_IS_BC(bp) ? | 5008 | CHIP_INT_MODE_IS_BC(bp) ? |
4414 | HC_IGU_BC_MODE : HC_IGU_NBC_MODE); | 5009 | HC_IGU_BC_MODE : HC_IGU_NBC_MODE); |
4415 | } | 5010 | } |
4416 | } | 5011 | } |
4417 | 5012 | ||
4418 | static void bnx2x_init_internal_port(struct bnx2x *bp) | ||
4419 | { | ||
4420 | /* port */ | ||
4421 | bnx2x_dcb_init_intmem_pfc(bp); | ||
4422 | } | ||
4423 | |||
4424 | static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code) | 5013 | static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code) |
4425 | { | 5014 | { |
4426 | switch (load_code) { | 5015 | switch (load_code) { |
@@ -4430,7 +5019,7 @@ static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code) | |||
4430 | /* no break */ | 5019 | /* no break */ |
4431 | 5020 | ||
4432 | case FW_MSG_CODE_DRV_LOAD_PORT: | 5021 | case FW_MSG_CODE_DRV_LOAD_PORT: |
4433 | bnx2x_init_internal_port(bp); | 5022 | /* nothing to do */ |
4434 | /* no break */ | 5023 | /* no break */ |
4435 | 5024 | ||
4436 | case FW_MSG_CODE_DRV_LOAD_FUNCTION: | 5025 | case FW_MSG_CODE_DRV_LOAD_FUNCTION: |
@@ -4444,31 +5033,57 @@ static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code) | |||
4444 | } | 5033 | } |
4445 | } | 5034 | } |
4446 | 5035 | ||
4447 | static void bnx2x_init_fp_sb(struct bnx2x *bp, int fp_idx) | 5036 | static inline u8 bnx2x_fp_igu_sb_id(struct bnx2x_fastpath *fp) |
4448 | { | 5037 | { |
4449 | struct bnx2x_fastpath *fp = &bp->fp[fp_idx]; | 5038 | return fp->bp->igu_base_sb + fp->index + CNIC_CONTEXT_USE; |
5039 | } | ||
5040 | |||
5041 | static inline u8 bnx2x_fp_fw_sb_id(struct bnx2x_fastpath *fp) | ||
5042 | { | ||
5043 | return fp->bp->base_fw_ndsb + fp->index + CNIC_CONTEXT_USE; | ||
5044 | } | ||
5045 | |||
5046 | static inline u8 bnx2x_fp_cl_id(struct bnx2x_fastpath *fp) | ||
5047 | { | ||
5048 | if (CHIP_IS_E1x(fp->bp)) | ||
5049 | return BP_L_ID(fp->bp) + fp->index; | ||
5050 | else /* We want Client ID to be the same as IGU SB ID for 57712 */ | ||
5051 | return bnx2x_fp_igu_sb_id(fp); | ||
5052 | } | ||
4450 | 5053 | ||
4451 | fp->state = BNX2X_FP_STATE_CLOSED; | 5054 | static void bnx2x_init_fp(struct bnx2x *bp, int fp_idx) |
5055 | { | ||
5056 | struct bnx2x_fastpath *fp = &bp->fp[fp_idx]; | ||
5057 | unsigned long q_type = 0; | ||
4452 | 5058 | ||
4453 | fp->cid = fp_idx; | 5059 | fp->cid = fp_idx; |
4454 | fp->cl_id = BP_L_ID(bp) + fp_idx; | 5060 | fp->cl_id = bnx2x_fp_cl_id(fp); |
4455 | fp->fw_sb_id = bp->base_fw_ndsb + fp->cl_id + CNIC_CONTEXT_USE; | 5061 | fp->fw_sb_id = bnx2x_fp_fw_sb_id(fp); |
4456 | fp->igu_sb_id = bp->igu_base_sb + fp_idx + CNIC_CONTEXT_USE; | 5062 | fp->igu_sb_id = bnx2x_fp_igu_sb_id(fp); |
4457 | /* qZone id equals to FW (per path) client id */ | 5063 | /* qZone id equals to FW (per path) client id */ |
4458 | fp->cl_qzone_id = fp->cl_id + | 5064 | fp->cl_qzone_id = bnx2x_fp_qzone_id(fp); |
4459 | BP_PORT(bp)*(CHIP_IS_E2(bp) ? ETH_MAX_RX_CLIENTS_E2 : | 5065 | |
4460 | ETH_MAX_RX_CLIENTS_E1H); | ||
4461 | /* init shortcut */ | 5066 | /* init shortcut */ |
4462 | fp->ustorm_rx_prods_offset = CHIP_IS_E2(bp) ? | 5067 | fp->ustorm_rx_prods_offset = bnx2x_rx_ustorm_prods_offset(fp); |
4463 | USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id) : | ||
4464 | USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), fp->cl_id); | ||
4465 | /* Setup SB indicies */ | 5068 | /* Setup SB indicies */ |
4466 | fp->rx_cons_sb = BNX2X_RX_SB_INDEX; | 5069 | fp->rx_cons_sb = BNX2X_RX_SB_INDEX; |
4467 | fp->tx_cons_sb = BNX2X_TX_SB_INDEX; | 5070 | fp->tx_cons_sb = BNX2X_TX_SB_INDEX; |
4468 | 5071 | ||
5072 | /* Configure Queue State object */ | ||
5073 | __set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type); | ||
5074 | __set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type); | ||
5075 | bnx2x_init_queue_obj(bp, &fp->q_obj, fp->cl_id, fp->cid, BP_FUNC(bp), | ||
5076 | bnx2x_sp(bp, q_rdata), bnx2x_sp_mapping(bp, q_rdata), | ||
5077 | q_type); | ||
5078 | |||
5079 | /** | ||
5080 | * Configure classification DBs: Always enable Tx switching | ||
5081 | */ | ||
5082 | bnx2x_init_vlan_mac_fp_objs(fp, BNX2X_OBJ_TYPE_RX_TX); | ||
5083 | |||
4469 | DP(NETIF_MSG_IFUP, "queue[%d]: bnx2x_init_sb(%p,%p) " | 5084 | DP(NETIF_MSG_IFUP, "queue[%d]: bnx2x_init_sb(%p,%p) " |
4470 | "cl_id %d fw_sb %d igu_sb %d\n", | 5085 | "cl_id %d fw_sb %d igu_sb %d\n", |
4471 | fp_idx, bp, fp->status_blk.e1x_sb, fp->cl_id, fp->fw_sb_id, | 5086 | fp_idx, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id, |
4472 | fp->igu_sb_id); | 5087 | fp->igu_sb_id); |
4473 | bnx2x_init_sb(bp, fp->status_blk_mapping, BNX2X_VF_ID_INVALID, false, | 5088 | bnx2x_init_sb(bp, fp->status_blk_mapping, BNX2X_VF_ID_INVALID, false, |
4474 | fp->fw_sb_id, fp->igu_sb_id); | 5089 | fp->fw_sb_id, fp->igu_sb_id); |
@@ -4481,17 +5096,21 @@ void bnx2x_nic_init(struct bnx2x *bp, u32 load_code) | |||
4481 | int i; | 5096 | int i; |
4482 | 5097 | ||
4483 | for_each_eth_queue(bp, i) | 5098 | for_each_eth_queue(bp, i) |
4484 | bnx2x_init_fp_sb(bp, i); | 5099 | bnx2x_init_fp(bp, i); |
4485 | #ifdef BCM_CNIC | 5100 | #ifdef BCM_CNIC |
4486 | if (!NO_FCOE(bp)) | 5101 | if (!NO_FCOE(bp)) |
4487 | bnx2x_init_fcoe_fp(bp); | 5102 | bnx2x_init_fcoe_fp(bp); |
4488 | 5103 | ||
4489 | bnx2x_init_sb(bp, bp->cnic_sb_mapping, | 5104 | bnx2x_init_sb(bp, bp->cnic_sb_mapping, |
4490 | BNX2X_VF_ID_INVALID, false, | 5105 | BNX2X_VF_ID_INVALID, false, |
4491 | CNIC_SB_ID(bp), CNIC_IGU_SB_ID(bp)); | 5106 | bnx2x_cnic_fw_sb_id(bp), bnx2x_cnic_igu_sb_id(bp)); |
4492 | 5107 | ||
4493 | #endif | 5108 | #endif |
4494 | 5109 | ||
5110 | /* Initialize MOD_ABS interrupts */ | ||
5111 | bnx2x_init_mod_abs_int(bp, &bp->link_vars, bp->common.chip_id, | ||
5112 | bp->common.shmem_base, bp->common.shmem2_base, | ||
5113 | BP_PORT(bp)); | ||
4495 | /* ensure status block indices were read */ | 5114 | /* ensure status block indices were read */ |
4496 | rmb(); | 5115 | rmb(); |
4497 | 5116 | ||
@@ -4503,12 +5122,8 @@ void bnx2x_nic_init(struct bnx2x *bp, u32 load_code) | |||
4503 | bnx2x_init_eq_ring(bp); | 5122 | bnx2x_init_eq_ring(bp); |
4504 | bnx2x_init_internal(bp, load_code); | 5123 | bnx2x_init_internal(bp, load_code); |
4505 | bnx2x_pf_init(bp); | 5124 | bnx2x_pf_init(bp); |
4506 | bnx2x_init_ind_table(bp); | ||
4507 | bnx2x_stats_init(bp); | 5125 | bnx2x_stats_init(bp); |
4508 | 5126 | ||
4509 | /* At this point, we are ready for interrupts */ | ||
4510 | atomic_set(&bp->intr_sem, 0); | ||
4511 | |||
4512 | /* flush all before enabling interrupts */ | 5127 | /* flush all before enabling interrupts */ |
4513 | mb(); | 5128 | mb(); |
4514 | mmiowb(); | 5129 | mmiowb(); |
@@ -4711,8 +5326,8 @@ static int bnx2x_int_mem_test(struct bnx2x *bp) | |||
4711 | msleep(50); | 5326 | msleep(50); |
4712 | REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03); | 5327 | REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03); |
4713 | msleep(50); | 5328 | msleep(50); |
4714 | bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE); | 5329 | bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON); |
4715 | bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE); | 5330 | bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON); |
4716 | 5331 | ||
4717 | DP(NETIF_MSG_HW, "part2\n"); | 5332 | DP(NETIF_MSG_HW, "part2\n"); |
4718 | 5333 | ||
@@ -4776,8 +5391,8 @@ static int bnx2x_int_mem_test(struct bnx2x *bp) | |||
4776 | msleep(50); | 5391 | msleep(50); |
4777 | REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03); | 5392 | REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03); |
4778 | msleep(50); | 5393 | msleep(50); |
4779 | bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE); | 5394 | bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON); |
4780 | bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE); | 5395 | bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON); |
4781 | #ifndef BCM_CNIC | 5396 | #ifndef BCM_CNIC |
4782 | /* set NIC mode */ | 5397 | /* set NIC mode */ |
4783 | REG_WR(bp, PRS_REG_NIC_MODE, 1); | 5398 | REG_WR(bp, PRS_REG_NIC_MODE, 1); |
@@ -4797,7 +5412,7 @@ static int bnx2x_int_mem_test(struct bnx2x *bp) | |||
4797 | static void bnx2x_enable_blocks_attention(struct bnx2x *bp) | 5412 | static void bnx2x_enable_blocks_attention(struct bnx2x *bp) |
4798 | { | 5413 | { |
4799 | REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0); | 5414 | REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0); |
4800 | if (CHIP_IS_E2(bp)) | 5415 | if (!CHIP_IS_E1x(bp)) |
4801 | REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0x40); | 5416 | REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0x40); |
4802 | else | 5417 | else |
4803 | REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0); | 5418 | REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0); |
@@ -4831,7 +5446,7 @@ static void bnx2x_enable_blocks_attention(struct bnx2x *bp) | |||
4831 | 5446 | ||
4832 | if (CHIP_REV_IS_FPGA(bp)) | 5447 | if (CHIP_REV_IS_FPGA(bp)) |
4833 | REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000); | 5448 | REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000); |
4834 | else if (CHIP_IS_E2(bp)) | 5449 | else if (!CHIP_IS_E1x(bp)) |
4835 | REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, | 5450 | REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, |
4836 | (PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF | 5451 | (PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF |
4837 | | PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT | 5452 | | PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT |
@@ -4844,7 +5459,11 @@ static void bnx2x_enable_blocks_attention(struct bnx2x *bp) | |||
4844 | REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0); | 5459 | REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0); |
4845 | REG_WR(bp, TCM_REG_TCM_INT_MASK, 0); | 5460 | REG_WR(bp, TCM_REG_TCM_INT_MASK, 0); |
4846 | /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */ | 5461 | /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */ |
4847 | /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */ | 5462 | |
5463 | if (!CHIP_IS_E1x(bp)) | ||
5464 | /* enable VFC attentions: bits 11 and 12, bits 31:13 reserved */ | ||
5465 | REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0x07ff); | ||
5466 | |||
4848 | REG_WR(bp, CDU_REG_CDU_INT_MASK, 0); | 5467 | REG_WR(bp, CDU_REG_CDU_INT_MASK, 0); |
4849 | REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0); | 5468 | REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0); |
4850 | /* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */ | 5469 | /* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */ |
@@ -4853,10 +5472,24 @@ static void bnx2x_enable_blocks_attention(struct bnx2x *bp) | |||
4853 | 5472 | ||
4854 | static void bnx2x_reset_common(struct bnx2x *bp) | 5473 | static void bnx2x_reset_common(struct bnx2x *bp) |
4855 | { | 5474 | { |
5475 | u32 val = 0x1400; | ||
5476 | |||
4856 | /* reset_common */ | 5477 | /* reset_common */ |
4857 | REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, | 5478 | REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, |
4858 | 0xd3ffff7f); | 5479 | 0xd3ffff7f); |
4859 | REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403); | 5480 | |
5481 | if (CHIP_IS_E3(bp)) { | ||
5482 | val |= MISC_REGISTERS_RESET_REG_2_MSTAT0; | ||
5483 | val |= MISC_REGISTERS_RESET_REG_2_MSTAT1; | ||
5484 | } | ||
5485 | |||
5486 | REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, val); | ||
5487 | } | ||
5488 | |||
5489 | static void bnx2x_setup_dmae(struct bnx2x *bp) | ||
5490 | { | ||
5491 | bp->dmae_ready = 0; | ||
5492 | spin_lock_init(&bp->dmae_lock); | ||
4860 | } | 5493 | } |
4861 | 5494 | ||
4862 | static void bnx2x_init_pxp(struct bnx2x *bp) | 5495 | static void bnx2x_init_pxp(struct bnx2x *bp) |
@@ -4865,7 +5498,7 @@ static void bnx2x_init_pxp(struct bnx2x *bp) | |||
4865 | int r_order, w_order; | 5498 | int r_order, w_order; |
4866 | 5499 | ||
4867 | pci_read_config_word(bp->pdev, | 5500 | pci_read_config_word(bp->pdev, |
4868 | bp->pcie_cap + PCI_EXP_DEVCTL, &devctl); | 5501 | bp->pdev->pcie_cap + PCI_EXP_DEVCTL, &devctl); |
4869 | DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl); | 5502 | DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl); |
4870 | w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5); | 5503 | w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5); |
4871 | if (bp->mrrs == -1) | 5504 | if (bp->mrrs == -1) |
@@ -4973,7 +5606,7 @@ static void bnx2x_pretend_func(struct bnx2x *bp, u8 pretend_func_num) | |||
4973 | DP(NETIF_MSG_HW, "Pretending to func %d\n", pretend_func_num); | 5606 | DP(NETIF_MSG_HW, "Pretending to func %d\n", pretend_func_num); |
4974 | } | 5607 | } |
4975 | 5608 | ||
4976 | static void bnx2x_pf_disable(struct bnx2x *bp) | 5609 | void bnx2x_pf_disable(struct bnx2x *bp) |
4977 | { | 5610 | { |
4978 | u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION); | 5611 | u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION); |
4979 | val &= ~IGU_PF_CONF_FUNC_EN; | 5612 | val &= ~IGU_PF_CONF_FUNC_EN; |
@@ -4983,22 +5616,48 @@ static void bnx2x_pf_disable(struct bnx2x *bp) | |||
4983 | REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 0); | 5616 | REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 0); |
4984 | } | 5617 | } |
4985 | 5618 | ||
4986 | static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code) | 5619 | static inline void bnx2x__common_init_phy(struct bnx2x *bp) |
4987 | { | 5620 | { |
4988 | u32 val, i; | 5621 | u32 shmem_base[2], shmem2_base[2]; |
5622 | shmem_base[0] = bp->common.shmem_base; | ||
5623 | shmem2_base[0] = bp->common.shmem2_base; | ||
5624 | if (!CHIP_IS_E1x(bp)) { | ||
5625 | shmem_base[1] = | ||
5626 | SHMEM2_RD(bp, other_shmem_base_addr); | ||
5627 | shmem2_base[1] = | ||
5628 | SHMEM2_RD(bp, other_shmem2_base_addr); | ||
5629 | } | ||
5630 | bnx2x_acquire_phy_lock(bp); | ||
5631 | bnx2x_common_init_phy(bp, shmem_base, shmem2_base, | ||
5632 | bp->common.chip_id); | ||
5633 | bnx2x_release_phy_lock(bp); | ||
5634 | } | ||
5635 | |||
5636 | /** | ||
5637 | * bnx2x_init_hw_common - initialize the HW at the COMMON phase. | ||
5638 | * | ||
5639 | * @bp: driver handle | ||
5640 | */ | ||
5641 | static int bnx2x_init_hw_common(struct bnx2x *bp) | ||
5642 | { | ||
5643 | u32 val; | ||
4989 | 5644 | ||
4990 | DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_ABS_FUNC(bp)); | 5645 | DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_ABS_FUNC(bp)); |
4991 | 5646 | ||
4992 | bnx2x_reset_common(bp); | 5647 | bnx2x_reset_common(bp); |
4993 | REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff); | 5648 | REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff); |
4994 | REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc); | ||
4995 | 5649 | ||
4996 | bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE); | 5650 | val = 0xfffc; |
4997 | if (!CHIP_IS_E1(bp)) | 5651 | if (CHIP_IS_E3(bp)) { |
4998 | REG_WR(bp, MISC_REG_E1HMF_MODE, IS_MF(bp)); | 5652 | val |= MISC_REGISTERS_RESET_REG_2_MSTAT0; |
5653 | val |= MISC_REGISTERS_RESET_REG_2_MSTAT1; | ||
5654 | } | ||
5655 | REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, val); | ||
4999 | 5656 | ||
5000 | if (CHIP_IS_E2(bp)) { | 5657 | bnx2x_init_block(bp, BLOCK_MISC, PHASE_COMMON); |
5001 | u8 fid; | 5658 | |
5659 | if (!CHIP_IS_E1x(bp)) { | ||
5660 | u8 abs_func_id; | ||
5002 | 5661 | ||
5003 | /** | 5662 | /** |
5004 | * 4-port mode or 2-port mode we need to turn of master-enable | 5663 | * 4-port mode or 2-port mode we need to turn of master-enable |
@@ -5007,29 +5666,30 @@ static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code) | |||
5007 | * for all functions on the given path, this means 0,2,4,6 for | 5666 | * for all functions on the given path, this means 0,2,4,6 for |
5008 | * path 0 and 1,3,5,7 for path 1 | 5667 | * path 0 and 1,3,5,7 for path 1 |
5009 | */ | 5668 | */ |
5010 | for (fid = BP_PATH(bp); fid < E2_FUNC_MAX*2; fid += 2) { | 5669 | for (abs_func_id = BP_PATH(bp); |
5011 | if (fid == BP_ABS_FUNC(bp)) { | 5670 | abs_func_id < E2_FUNC_MAX*2; abs_func_id += 2) { |
5671 | if (abs_func_id == BP_ABS_FUNC(bp)) { | ||
5012 | REG_WR(bp, | 5672 | REG_WR(bp, |
5013 | PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, | 5673 | PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, |
5014 | 1); | 5674 | 1); |
5015 | continue; | 5675 | continue; |
5016 | } | 5676 | } |
5017 | 5677 | ||
5018 | bnx2x_pretend_func(bp, fid); | 5678 | bnx2x_pretend_func(bp, abs_func_id); |
5019 | /* clear pf enable */ | 5679 | /* clear pf enable */ |
5020 | bnx2x_pf_disable(bp); | 5680 | bnx2x_pf_disable(bp); |
5021 | bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); | 5681 | bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); |
5022 | } | 5682 | } |
5023 | } | 5683 | } |
5024 | 5684 | ||
5025 | bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE); | 5685 | bnx2x_init_block(bp, BLOCK_PXP, PHASE_COMMON); |
5026 | if (CHIP_IS_E1(bp)) { | 5686 | if (CHIP_IS_E1(bp)) { |
5027 | /* enable HW interrupt from PXP on USDM overflow | 5687 | /* enable HW interrupt from PXP on USDM overflow |
5028 | bit 16 on INT_MASK_0 */ | 5688 | bit 16 on INT_MASK_0 */ |
5029 | REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0); | 5689 | REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0); |
5030 | } | 5690 | } |
5031 | 5691 | ||
5032 | bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE); | 5692 | bnx2x_init_block(bp, BLOCK_PXP2, PHASE_COMMON); |
5033 | bnx2x_init_pxp(bp); | 5693 | bnx2x_init_pxp(bp); |
5034 | 5694 | ||
5035 | #ifdef __BIG_ENDIAN | 5695 | #ifdef __BIG_ENDIAN |
@@ -5072,7 +5732,69 @@ static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code) | |||
5072 | * This needs to be done by the first PF that is loaded in a path | 5732 | * This needs to be done by the first PF that is loaded in a path |
5073 | * (i.e. common phase) | 5733 | * (i.e. common phase) |
5074 | */ | 5734 | */ |
5075 | if (CHIP_IS_E2(bp)) { | 5735 | if (!CHIP_IS_E1x(bp)) { |
5736 | /* In E2 there is a bug in the timers block that can cause function 6 / 7 | ||
5737 | * (i.e. vnic3) to start even if it is marked as "scan-off". | ||
5738 | * This occurs when a different function (func2,3) is being marked | ||
5739 | * as "scan-off". Real-life scenario for example: if a driver is being | ||
5740 | * load-unloaded while func6,7 are down. This will cause the timer to access | ||
5741 | * the ilt, translate to a logical address and send a request to read/write. | ||
5742 | * Since the ilt for the function that is down is not valid, this will cause | ||
5743 | * a translation error which is unrecoverable. | ||
5744 | * The Workaround is intended to make sure that when this happens nothing fatal | ||
5745 | * will occur. The workaround: | ||
5746 | * 1. First PF driver which loads on a path will: | ||
5747 | * a. After taking the chip out of reset, by using pretend, | ||
5748 | * it will write "0" to the following registers of | ||
5749 | * the other vnics. | ||
5750 | * REG_WR(pdev, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0); | ||
5751 | * REG_WR(pdev, CFC_REG_WEAK_ENABLE_PF,0); | ||
5752 | * REG_WR(pdev, CFC_REG_STRONG_ENABLE_PF,0); | ||
5753 | * And for itself it will write '1' to | ||
5754 | * PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER to enable | ||
5755 | * dmae-operations (writing to pram for example.) | ||
5756 | * note: can be done for only function 6,7 but cleaner this | ||
5757 | * way. | ||
5758 | * b. Write zero+valid to the entire ILT. | ||
5759 | * c. Init the first_timers_ilt_entry, last_timers_ilt_entry of | ||
5760 | * VNIC3 (of that port). The range allocated will be the | ||
5761 | * entire ILT. This is needed to prevent ILT range error. | ||
5762 | * 2. Any PF driver load flow: | ||
5763 | * a. ILT update with the physical addresses of the allocated | ||
5764 | * logical pages. | ||
5765 | * b. Wait 20msec. - note that this timeout is needed to make | ||
5766 | * sure there are no requests in one of the PXP internal | ||
5767 | * queues with "old" ILT addresses. | ||
5768 | * c. PF enable in the PGLC. | ||
5769 | * d. Clear the was_error of the PF in the PGLC. (could have | ||
5770 | * occured while driver was down) | ||
5771 | * e. PF enable in the CFC (WEAK + STRONG) | ||
5772 | * f. Timers scan enable | ||
5773 | * 3. PF driver unload flow: | ||
5774 | * a. Clear the Timers scan_en. | ||
5775 | * b. Polling for scan_on=0 for that PF. | ||
5776 | * c. Clear the PF enable bit in the PXP. | ||
5777 | * d. Clear the PF enable in the CFC (WEAK + STRONG) | ||
5778 | * e. Write zero+valid to all ILT entries (The valid bit must | ||
5779 | * stay set) | ||
5780 | * f. If this is VNIC 3 of a port then also init | ||
5781 | * first_timers_ilt_entry to zero and last_timers_ilt_entry | ||
5782 | * to the last enrty in the ILT. | ||
5783 | * | ||
5784 | * Notes: | ||
5785 | * Currently the PF error in the PGLC is non recoverable. | ||
5786 | * In the future the there will be a recovery routine for this error. | ||
5787 | * Currently attention is masked. | ||
5788 | * Having an MCP lock on the load/unload process does not guarantee that | ||
5789 | * there is no Timer disable during Func6/7 enable. This is because the | ||
5790 | * Timers scan is currently being cleared by the MCP on FLR. | ||
5791 | * Step 2.d can be done only for PF6/7 and the driver can also check if | ||
5792 | * there is error before clearing it. But the flow above is simpler and | ||
5793 | * more general. | ||
5794 | * All ILT entries are written by zero+valid and not just PF6/7 | ||
5795 | * ILT entries since in the future the ILT entries allocation for | ||
5796 | * PF-s might be dynamic. | ||
5797 | */ | ||
5076 | struct ilt_client_info ilt_cli; | 5798 | struct ilt_client_info ilt_cli; |
5077 | struct bnx2x_ilt ilt; | 5799 | struct bnx2x_ilt ilt; |
5078 | memset(&ilt_cli, 0, sizeof(struct ilt_client_info)); | 5800 | memset(&ilt_cli, 0, sizeof(struct ilt_client_info)); |
@@ -5086,7 +5808,7 @@ static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code) | |||
5086 | /* Step 1: set zeroes to all ilt page entries with valid bit on | 5808 | /* Step 1: set zeroes to all ilt page entries with valid bit on |
5087 | * Step 2: set the timers first/last ilt entry to point | 5809 | * Step 2: set the timers first/last ilt entry to point |
5088 | * to the entire range to prevent ILT range error for 3rd/4th | 5810 | * to the entire range to prevent ILT range error for 3rd/4th |
5089 | * vnic (this code assumes existence of the vnic) | 5811 | * vnic (this code assumes existance of the vnic) |
5090 | * | 5812 | * |
5091 | * both steps performed by call to bnx2x_ilt_client_init_op() | 5813 | * both steps performed by call to bnx2x_ilt_client_init_op() |
5092 | * with dummy TM client | 5814 | * with dummy TM client |
@@ -5107,12 +5829,12 @@ static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code) | |||
5107 | REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0); | 5829 | REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0); |
5108 | REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0); | 5830 | REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0); |
5109 | 5831 | ||
5110 | if (CHIP_IS_E2(bp)) { | 5832 | if (!CHIP_IS_E1x(bp)) { |
5111 | int factor = CHIP_REV_IS_EMUL(bp) ? 1000 : | 5833 | int factor = CHIP_REV_IS_EMUL(bp) ? 1000 : |
5112 | (CHIP_REV_IS_FPGA(bp) ? 400 : 0); | 5834 | (CHIP_REV_IS_FPGA(bp) ? 400 : 0); |
5113 | bnx2x_init_block(bp, PGLUE_B_BLOCK, COMMON_STAGE); | 5835 | bnx2x_init_block(bp, BLOCK_PGLUE_B, PHASE_COMMON); |
5114 | 5836 | ||
5115 | bnx2x_init_block(bp, ATC_BLOCK, COMMON_STAGE); | 5837 | bnx2x_init_block(bp, BLOCK_ATC, PHASE_COMMON); |
5116 | 5838 | ||
5117 | /* let the HW do it's magic ... */ | 5839 | /* let the HW do it's magic ... */ |
5118 | do { | 5840 | do { |
@@ -5126,26 +5848,27 @@ static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code) | |||
5126 | } | 5848 | } |
5127 | } | 5849 | } |
5128 | 5850 | ||
5129 | bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE); | 5851 | bnx2x_init_block(bp, BLOCK_DMAE, PHASE_COMMON); |
5130 | 5852 | ||
5131 | /* clean the DMAE memory */ | 5853 | /* clean the DMAE memory */ |
5132 | bp->dmae_ready = 1; | 5854 | bp->dmae_ready = 1; |
5133 | bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8); | 5855 | bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8, 1); |
5856 | |||
5857 | bnx2x_init_block(bp, BLOCK_TCM, PHASE_COMMON); | ||
5134 | 5858 | ||
5135 | bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE); | 5859 | bnx2x_init_block(bp, BLOCK_UCM, PHASE_COMMON); |
5136 | bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE); | 5860 | |
5137 | bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE); | 5861 | bnx2x_init_block(bp, BLOCK_CCM, PHASE_COMMON); |
5138 | bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE); | 5862 | |
5863 | bnx2x_init_block(bp, BLOCK_XCM, PHASE_COMMON); | ||
5139 | 5864 | ||
5140 | bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3); | 5865 | bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3); |
5141 | bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3); | 5866 | bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3); |
5142 | bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3); | 5867 | bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3); |
5143 | bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3); | 5868 | bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3); |
5144 | 5869 | ||
5145 | bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE); | 5870 | bnx2x_init_block(bp, BLOCK_QM, PHASE_COMMON); |
5146 | 5871 | ||
5147 | if (CHIP_MODE_IS_4_PORT(bp)) | ||
5148 | bnx2x_init_block(bp, QM_4PORT_BLOCK, COMMON_STAGE); | ||
5149 | 5872 | ||
5150 | /* QM queues pointers table */ | 5873 | /* QM queues pointers table */ |
5151 | bnx2x_qm_init_ptr_table(bp, bp->qm_cid_count, INITOP_SET); | 5874 | bnx2x_qm_init_ptr_table(bp, bp->qm_cid_count, INITOP_SET); |
@@ -5155,57 +5878,51 @@ static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code) | |||
5155 | REG_WR(bp, QM_REG_SOFT_RESET, 0); | 5878 | REG_WR(bp, QM_REG_SOFT_RESET, 0); |
5156 | 5879 | ||
5157 | #ifdef BCM_CNIC | 5880 | #ifdef BCM_CNIC |
5158 | bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE); | 5881 | bnx2x_init_block(bp, BLOCK_TM, PHASE_COMMON); |
5159 | #endif | 5882 | #endif |
5160 | 5883 | ||
5161 | bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE); | 5884 | bnx2x_init_block(bp, BLOCK_DORQ, PHASE_COMMON); |
5162 | REG_WR(bp, DORQ_REG_DPM_CID_OFST, BNX2X_DB_SHIFT); | 5885 | REG_WR(bp, DORQ_REG_DPM_CID_OFST, BNX2X_DB_SHIFT); |
5163 | 5886 | if (!CHIP_REV_IS_SLOW(bp)) | |
5164 | if (!CHIP_REV_IS_SLOW(bp)) { | ||
5165 | /* enable hw interrupt from doorbell Q */ | 5887 | /* enable hw interrupt from doorbell Q */ |
5166 | REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0); | 5888 | REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0); |
5167 | } | ||
5168 | 5889 | ||
5169 | bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE); | 5890 | bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON); |
5170 | if (CHIP_MODE_IS_4_PORT(bp)) { | ||
5171 | REG_WR(bp, BRB1_REG_FULL_LB_XOFF_THRESHOLD, 248); | ||
5172 | REG_WR(bp, BRB1_REG_FULL_LB_XON_THRESHOLD, 328); | ||
5173 | } | ||
5174 | 5891 | ||
5175 | bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE); | 5892 | bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON); |
5176 | REG_WR(bp, PRS_REG_A_PRSU_20, 0xf); | 5893 | REG_WR(bp, PRS_REG_A_PRSU_20, 0xf); |
5177 | #ifndef BCM_CNIC | 5894 | |
5178 | /* set NIC mode */ | ||
5179 | REG_WR(bp, PRS_REG_NIC_MODE, 1); | ||
5180 | #endif | ||
5181 | if (!CHIP_IS_E1(bp)) | 5895 | if (!CHIP_IS_E1(bp)) |
5182 | REG_WR(bp, PRS_REG_E1HOV_MODE, IS_MF_SD(bp)); | 5896 | REG_WR(bp, PRS_REG_E1HOV_MODE, bp->path_has_ovlan); |
5183 | 5897 | ||
5184 | if (CHIP_IS_E2(bp)) { | 5898 | if (!CHIP_IS_E1x(bp) && !CHIP_IS_E3B0(bp)) |
5185 | /* Bit-map indicating which L2 hdrs may appear after the | 5899 | /* Bit-map indicating which L2 hdrs may appear |
5186 | basic Ethernet header */ | 5900 | * after the basic Ethernet header |
5187 | int has_ovlan = IS_MF_SD(bp); | 5901 | */ |
5188 | REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC, (has_ovlan ? 7 : 6)); | 5902 | REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC, |
5189 | REG_WR(bp, PRS_REG_MUST_HAVE_HDRS, (has_ovlan ? 1 : 0)); | 5903 | bp->path_has_ovlan ? 7 : 6); |
5190 | } | ||
5191 | 5904 | ||
5192 | bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE); | 5905 | bnx2x_init_block(bp, BLOCK_TSDM, PHASE_COMMON); |
5193 | bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE); | 5906 | bnx2x_init_block(bp, BLOCK_CSDM, PHASE_COMMON); |
5194 | bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE); | 5907 | bnx2x_init_block(bp, BLOCK_USDM, PHASE_COMMON); |
5195 | bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE); | 5908 | bnx2x_init_block(bp, BLOCK_XSDM, PHASE_COMMON); |
5196 | 5909 | ||
5197 | bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp)); | 5910 | if (!CHIP_IS_E1x(bp)) { |
5198 | bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp)); | 5911 | /* reset VFC memories */ |
5199 | bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp)); | 5912 | REG_WR(bp, TSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST, |
5200 | bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp)); | 5913 | VFC_MEMORIES_RST_REG_CAM_RST | |
5914 | VFC_MEMORIES_RST_REG_RAM_RST); | ||
5915 | REG_WR(bp, XSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST, | ||
5916 | VFC_MEMORIES_RST_REG_CAM_RST | | ||
5917 | VFC_MEMORIES_RST_REG_RAM_RST); | ||
5201 | 5918 | ||
5202 | bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE); | 5919 | msleep(20); |
5203 | bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE); | 5920 | } |
5204 | bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE); | ||
5205 | bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE); | ||
5206 | 5921 | ||
5207 | if (CHIP_MODE_IS_4_PORT(bp)) | 5922 | bnx2x_init_block(bp, BLOCK_TSEM, PHASE_COMMON); |
5208 | bnx2x_init_block(bp, XSEM_4PORT_BLOCK, COMMON_STAGE); | 5923 | bnx2x_init_block(bp, BLOCK_USEM, PHASE_COMMON); |
5924 | bnx2x_init_block(bp, BLOCK_CSEM, PHASE_COMMON); | ||
5925 | bnx2x_init_block(bp, BLOCK_XSEM, PHASE_COMMON); | ||
5209 | 5926 | ||
5210 | /* sync semi rtc */ | 5927 | /* sync semi rtc */ |
5211 | REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, | 5928 | REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, |
@@ -5213,21 +5930,18 @@ static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code) | |||
5213 | REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, | 5930 | REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, |
5214 | 0x80000000); | 5931 | 0x80000000); |
5215 | 5932 | ||
5216 | bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE); | 5933 | bnx2x_init_block(bp, BLOCK_UPB, PHASE_COMMON); |
5217 | bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE); | 5934 | bnx2x_init_block(bp, BLOCK_XPB, PHASE_COMMON); |
5218 | bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE); | 5935 | bnx2x_init_block(bp, BLOCK_PBF, PHASE_COMMON); |
5219 | 5936 | ||
5220 | if (CHIP_IS_E2(bp)) { | 5937 | if (!CHIP_IS_E1x(bp)) |
5221 | int has_ovlan = IS_MF_SD(bp); | 5938 | REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC, |
5222 | REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC, (has_ovlan ? 7 : 6)); | 5939 | bp->path_has_ovlan ? 7 : 6); |
5223 | REG_WR(bp, PBF_REG_MUST_HAVE_HDRS, (has_ovlan ? 1 : 0)); | ||
5224 | } | ||
5225 | 5940 | ||
5226 | REG_WR(bp, SRC_REG_SOFT_RST, 1); | 5941 | REG_WR(bp, SRC_REG_SOFT_RST, 1); |
5227 | for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) | ||
5228 | REG_WR(bp, i, random32()); | ||
5229 | 5942 | ||
5230 | bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE); | 5943 | bnx2x_init_block(bp, BLOCK_SRC, PHASE_COMMON); |
5944 | |||
5231 | #ifdef BCM_CNIC | 5945 | #ifdef BCM_CNIC |
5232 | REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672); | 5946 | REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672); |
5233 | REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc); | 5947 | REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc); |
@@ -5248,11 +5962,11 @@ static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code) | |||
5248 | "of cdu_context(%ld)\n", | 5962 | "of cdu_context(%ld)\n", |
5249 | (long)sizeof(union cdu_context)); | 5963 | (long)sizeof(union cdu_context)); |
5250 | 5964 | ||
5251 | bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE); | 5965 | bnx2x_init_block(bp, BLOCK_CDU, PHASE_COMMON); |
5252 | val = (4 << 24) + (0 << 12) + 1024; | 5966 | val = (4 << 24) + (0 << 12) + 1024; |
5253 | REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val); | 5967 | REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val); |
5254 | 5968 | ||
5255 | bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE); | 5969 | bnx2x_init_block(bp, BLOCK_CFC, PHASE_COMMON); |
5256 | REG_WR(bp, CFC_REG_INIT_REG, 0x7FF); | 5970 | REG_WR(bp, CFC_REG_INIT_REG, 0x7FF); |
5257 | /* enable context validation interrupt from CFC */ | 5971 | /* enable context validation interrupt from CFC */ |
5258 | REG_WR(bp, CFC_REG_CFC_INT_MASK, 0); | 5972 | REG_WR(bp, CFC_REG_CFC_INT_MASK, 0); |
@@ -5260,20 +5974,19 @@ static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code) | |||
5260 | /* set the thresholds to prevent CFC/CDU race */ | 5974 | /* set the thresholds to prevent CFC/CDU race */ |
5261 | REG_WR(bp, CFC_REG_DEBUG0, 0x20020000); | 5975 | REG_WR(bp, CFC_REG_DEBUG0, 0x20020000); |
5262 | 5976 | ||
5263 | bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE); | 5977 | bnx2x_init_block(bp, BLOCK_HC, PHASE_COMMON); |
5264 | 5978 | ||
5265 | if (CHIP_IS_E2(bp) && BP_NOMCP(bp)) | 5979 | if (!CHIP_IS_E1x(bp) && BP_NOMCP(bp)) |
5266 | REG_WR(bp, IGU_REG_RESET_MEMORIES, 0x36); | 5980 | REG_WR(bp, IGU_REG_RESET_MEMORIES, 0x36); |
5267 | 5981 | ||
5268 | bnx2x_init_block(bp, IGU_BLOCK, COMMON_STAGE); | 5982 | bnx2x_init_block(bp, BLOCK_IGU, PHASE_COMMON); |
5269 | bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE); | 5983 | bnx2x_init_block(bp, BLOCK_MISC_AEU, PHASE_COMMON); |
5270 | 5984 | ||
5271 | bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE); | ||
5272 | /* Reset PCIE errors for debug */ | 5985 | /* Reset PCIE errors for debug */ |
5273 | REG_WR(bp, 0x2814, 0xffffffff); | 5986 | REG_WR(bp, 0x2814, 0xffffffff); |
5274 | REG_WR(bp, 0x3820, 0xffffffff); | 5987 | REG_WR(bp, 0x3820, 0xffffffff); |
5275 | 5988 | ||
5276 | if (CHIP_IS_E2(bp)) { | 5989 | if (!CHIP_IS_E1x(bp)) { |
5277 | REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_CONTROL_5, | 5990 | REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_CONTROL_5, |
5278 | (PXPCS_TL_CONTROL_5_ERR_UNSPPORT1 | | 5991 | (PXPCS_TL_CONTROL_5_ERR_UNSPPORT1 | |
5279 | PXPCS_TL_CONTROL_5_ERR_UNSPPORT)); | 5992 | PXPCS_TL_CONTROL_5_ERR_UNSPPORT)); |
@@ -5287,21 +6000,15 @@ static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code) | |||
5287 | PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5)); | 6000 | PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5)); |
5288 | } | 6001 | } |
5289 | 6002 | ||
5290 | bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE); | 6003 | bnx2x_init_block(bp, BLOCK_NIG, PHASE_COMMON); |
5291 | bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE); | ||
5292 | bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE); | ||
5293 | bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE); | ||
5294 | |||
5295 | bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE); | ||
5296 | if (!CHIP_IS_E1(bp)) { | 6004 | if (!CHIP_IS_E1(bp)) { |
5297 | REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_MF(bp)); | 6005 | /* in E3 this done in per-port section */ |
5298 | REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_MF_SD(bp)); | 6006 | if (!CHIP_IS_E3(bp)) |
5299 | } | 6007 | REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_MF(bp)); |
5300 | if (CHIP_IS_E2(bp)) { | ||
5301 | /* Bit-map indicating which L2 hdrs may appear after the | ||
5302 | basic Ethernet header */ | ||
5303 | REG_WR(bp, NIG_REG_P0_HDRS_AFTER_BASIC, (IS_MF_SD(bp) ? 7 : 6)); | ||
5304 | } | 6008 | } |
6009 | if (CHIP_IS_E1H(bp)) | ||
6010 | /* not applicable for E2 (and above ...) */ | ||
6011 | REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_MF_SD(bp)); | ||
5305 | 6012 | ||
5306 | if (CHIP_REV_IS_SLOW(bp)) | 6013 | if (CHIP_REV_IS_SLOW(bp)) |
5307 | msleep(200); | 6014 | msleep(200); |
@@ -5343,127 +6050,136 @@ static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code) | |||
5343 | REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0); | 6050 | REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0); |
5344 | 6051 | ||
5345 | bnx2x_enable_blocks_attention(bp); | 6052 | bnx2x_enable_blocks_attention(bp); |
5346 | if (CHIP_PARITY_ENABLED(bp)) | 6053 | bnx2x_enable_blocks_parity(bp); |
5347 | bnx2x_enable_blocks_parity(bp); | ||
5348 | 6054 | ||
5349 | if (!BP_NOMCP(bp)) { | 6055 | if (!BP_NOMCP(bp)) { |
5350 | /* In E2 2-PORT mode, same ext phy is used for the two paths */ | 6056 | if (CHIP_IS_E1x(bp)) |
5351 | if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) || | 6057 | bnx2x__common_init_phy(bp); |
5352 | CHIP_IS_E1x(bp)) { | ||
5353 | u32 shmem_base[2], shmem2_base[2]; | ||
5354 | shmem_base[0] = bp->common.shmem_base; | ||
5355 | shmem2_base[0] = bp->common.shmem2_base; | ||
5356 | if (CHIP_IS_E2(bp)) { | ||
5357 | shmem_base[1] = | ||
5358 | SHMEM2_RD(bp, other_shmem_base_addr); | ||
5359 | shmem2_base[1] = | ||
5360 | SHMEM2_RD(bp, other_shmem2_base_addr); | ||
5361 | } | ||
5362 | bnx2x_acquire_phy_lock(bp); | ||
5363 | bnx2x_common_init_phy(bp, shmem_base, shmem2_base, | ||
5364 | bp->common.chip_id); | ||
5365 | bnx2x_release_phy_lock(bp); | ||
5366 | } | ||
5367 | } else | 6058 | } else |
5368 | BNX2X_ERR("Bootcode is missing - can not initialize link\n"); | 6059 | BNX2X_ERR("Bootcode is missing - can not initialize link\n"); |
5369 | 6060 | ||
5370 | return 0; | 6061 | return 0; |
5371 | } | 6062 | } |
5372 | 6063 | ||
6064 | /** | ||
6065 | * bnx2x_init_hw_common_chip - init HW at the COMMON_CHIP phase. | ||
6066 | * | ||
6067 | * @bp: driver handle | ||
6068 | */ | ||
6069 | static int bnx2x_init_hw_common_chip(struct bnx2x *bp) | ||
6070 | { | ||
6071 | int rc = bnx2x_init_hw_common(bp); | ||
6072 | |||
6073 | if (rc) | ||
6074 | return rc; | ||
6075 | |||
6076 | /* In E2 2-PORT mode, same ext phy is used for the two paths */ | ||
6077 | if (!BP_NOMCP(bp)) | ||
6078 | bnx2x__common_init_phy(bp); | ||
6079 | |||
6080 | return 0; | ||
6081 | } | ||
6082 | |||
5373 | static int bnx2x_init_hw_port(struct bnx2x *bp) | 6083 | static int bnx2x_init_hw_port(struct bnx2x *bp) |
5374 | { | 6084 | { |
5375 | int port = BP_PORT(bp); | 6085 | int port = BP_PORT(bp); |
5376 | int init_stage = port ? PORT1_STAGE : PORT0_STAGE; | 6086 | int init_phase = port ? PHASE_PORT1 : PHASE_PORT0; |
5377 | u32 low, high; | 6087 | u32 low, high; |
5378 | u32 val; | 6088 | u32 val; |
5379 | 6089 | ||
6090 | bnx2x__link_reset(bp); | ||
6091 | |||
5380 | DP(BNX2X_MSG_MCP, "starting port init port %d\n", port); | 6092 | DP(BNX2X_MSG_MCP, "starting port init port %d\n", port); |
5381 | 6093 | ||
5382 | REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0); | 6094 | REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0); |
5383 | 6095 | ||
5384 | bnx2x_init_block(bp, PXP_BLOCK, init_stage); | 6096 | bnx2x_init_block(bp, BLOCK_MISC, init_phase); |
5385 | bnx2x_init_block(bp, PXP2_BLOCK, init_stage); | 6097 | bnx2x_init_block(bp, BLOCK_PXP, init_phase); |
6098 | bnx2x_init_block(bp, BLOCK_PXP2, init_phase); | ||
5386 | 6099 | ||
5387 | /* Timers bug workaround: disables the pf_master bit in pglue at | 6100 | /* Timers bug workaround: disables the pf_master bit in pglue at |
5388 | * common phase, we need to enable it here before any dmae access are | 6101 | * common phase, we need to enable it here before any dmae access are |
5389 | * attempted. Therefore we manually added the enable-master to the | 6102 | * attempted. Therefore we manually added the enable-master to the |
5390 | * port phase (it also happens in the function phase) | 6103 | * port phase (it also happens in the function phase) |
5391 | */ | 6104 | */ |
5392 | if (CHIP_IS_E2(bp)) | 6105 | if (!CHIP_IS_E1x(bp)) |
5393 | REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1); | 6106 | REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1); |
5394 | 6107 | ||
5395 | bnx2x_init_block(bp, TCM_BLOCK, init_stage); | 6108 | bnx2x_init_block(bp, BLOCK_ATC, init_phase); |
5396 | bnx2x_init_block(bp, UCM_BLOCK, init_stage); | 6109 | bnx2x_init_block(bp, BLOCK_DMAE, init_phase); |
5397 | bnx2x_init_block(bp, CCM_BLOCK, init_stage); | 6110 | bnx2x_init_block(bp, BLOCK_PGLUE_B, init_phase); |
5398 | bnx2x_init_block(bp, XCM_BLOCK, init_stage); | 6111 | bnx2x_init_block(bp, BLOCK_QM, init_phase); |
6112 | |||
6113 | bnx2x_init_block(bp, BLOCK_TCM, init_phase); | ||
6114 | bnx2x_init_block(bp, BLOCK_UCM, init_phase); | ||
6115 | bnx2x_init_block(bp, BLOCK_CCM, init_phase); | ||
6116 | bnx2x_init_block(bp, BLOCK_XCM, init_phase); | ||
5399 | 6117 | ||
5400 | /* QM cid (connection) count */ | 6118 | /* QM cid (connection) count */ |
5401 | bnx2x_qm_init_cid_count(bp, bp->qm_cid_count, INITOP_SET); | 6119 | bnx2x_qm_init_cid_count(bp, bp->qm_cid_count, INITOP_SET); |
5402 | 6120 | ||
5403 | #ifdef BCM_CNIC | 6121 | #ifdef BCM_CNIC |
5404 | bnx2x_init_block(bp, TIMERS_BLOCK, init_stage); | 6122 | bnx2x_init_block(bp, BLOCK_TM, init_phase); |
5405 | REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20); | 6123 | REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20); |
5406 | REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31); | 6124 | REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31); |
5407 | #endif | 6125 | #endif |
5408 | 6126 | ||
5409 | bnx2x_init_block(bp, DQ_BLOCK, init_stage); | 6127 | bnx2x_init_block(bp, BLOCK_DORQ, init_phase); |
5410 | |||
5411 | if (CHIP_MODE_IS_4_PORT(bp)) | ||
5412 | bnx2x_init_block(bp, QM_4PORT_BLOCK, init_stage); | ||
5413 | 6128 | ||
5414 | if (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp)) { | 6129 | if (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp)) { |
5415 | bnx2x_init_block(bp, BRB1_BLOCK, init_stage); | 6130 | bnx2x_init_block(bp, BLOCK_BRB1, init_phase); |
5416 | if (CHIP_REV_IS_SLOW(bp) && CHIP_IS_E1(bp)) { | 6131 | |
5417 | /* no pause for emulation and FPGA */ | 6132 | if (IS_MF(bp)) |
5418 | low = 0; | 6133 | low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246); |
5419 | high = 513; | 6134 | else if (bp->dev->mtu > 4096) { |
5420 | } else { | 6135 | if (bp->flags & ONE_PORT_FLAG) |
5421 | if (IS_MF(bp)) | 6136 | low = 160; |
5422 | low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246); | 6137 | else { |
5423 | else if (bp->dev->mtu > 4096) { | 6138 | val = bp->dev->mtu; |
5424 | if (bp->flags & ONE_PORT_FLAG) | 6139 | /* (24*1024 + val*4)/256 */ |
5425 | low = 160; | 6140 | low = 96 + (val/64) + |
5426 | else { | 6141 | ((val % 64) ? 1 : 0); |
5427 | val = bp->dev->mtu; | 6142 | } |
5428 | /* (24*1024 + val*4)/256 */ | 6143 | } else |
5429 | low = 96 + (val/64) + | 6144 | low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160); |
5430 | ((val % 64) ? 1 : 0); | 6145 | high = low + 56; /* 14*1024/256 */ |
5431 | } | ||
5432 | } else | ||
5433 | low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160); | ||
5434 | high = low + 56; /* 14*1024/256 */ | ||
5435 | } | ||
5436 | REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low); | 6146 | REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low); |
5437 | REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high); | 6147 | REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high); |
5438 | } | 6148 | } |
5439 | 6149 | ||
5440 | if (CHIP_MODE_IS_4_PORT(bp)) { | 6150 | if (CHIP_MODE_IS_4_PORT(bp)) |
5441 | REG_WR(bp, BRB1_REG_PAUSE_0_XOFF_THRESHOLD_0 + port*8, 248); | 6151 | REG_WR(bp, (BP_PORT(bp) ? |
5442 | REG_WR(bp, BRB1_REG_PAUSE_0_XON_THRESHOLD_0 + port*8, 328); | 6152 | BRB1_REG_MAC_GUARANTIED_1 : |
5443 | REG_WR(bp, (BP_PORT(bp) ? BRB1_REG_MAC_GUARANTIED_1 : | 6153 | BRB1_REG_MAC_GUARANTIED_0), 40); |
5444 | BRB1_REG_MAC_GUARANTIED_0), 40); | ||
5445 | } | ||
5446 | 6154 | ||
5447 | bnx2x_init_block(bp, PRS_BLOCK, init_stage); | ||
5448 | 6155 | ||
5449 | bnx2x_init_block(bp, TSDM_BLOCK, init_stage); | 6156 | bnx2x_init_block(bp, BLOCK_PRS, init_phase); |
5450 | bnx2x_init_block(bp, CSDM_BLOCK, init_stage); | 6157 | if (CHIP_IS_E3B0(bp)) |
5451 | bnx2x_init_block(bp, USDM_BLOCK, init_stage); | 6158 | /* Ovlan exists only if we are in multi-function + |
5452 | bnx2x_init_block(bp, XSDM_BLOCK, init_stage); | 6159 | * switch-dependent mode, in switch-independent there |
6160 | * is no ovlan headers | ||
6161 | */ | ||
6162 | REG_WR(bp, BP_PORT(bp) ? | ||
6163 | PRS_REG_HDRS_AFTER_BASIC_PORT_1 : | ||
6164 | PRS_REG_HDRS_AFTER_BASIC_PORT_0, | ||
6165 | (bp->path_has_ovlan ? 7 : 6)); | ||
5453 | 6166 | ||
5454 | bnx2x_init_block(bp, TSEM_BLOCK, init_stage); | 6167 | bnx2x_init_block(bp, BLOCK_TSDM, init_phase); |
5455 | bnx2x_init_block(bp, USEM_BLOCK, init_stage); | 6168 | bnx2x_init_block(bp, BLOCK_CSDM, init_phase); |
5456 | bnx2x_init_block(bp, CSEM_BLOCK, init_stage); | 6169 | bnx2x_init_block(bp, BLOCK_USDM, init_phase); |
5457 | bnx2x_init_block(bp, XSEM_BLOCK, init_stage); | 6170 | bnx2x_init_block(bp, BLOCK_XSDM, init_phase); |
5458 | if (CHIP_MODE_IS_4_PORT(bp)) | 6171 | |
5459 | bnx2x_init_block(bp, XSEM_4PORT_BLOCK, init_stage); | 6172 | bnx2x_init_block(bp, BLOCK_TSEM, init_phase); |
6173 | bnx2x_init_block(bp, BLOCK_USEM, init_phase); | ||
6174 | bnx2x_init_block(bp, BLOCK_CSEM, init_phase); | ||
6175 | bnx2x_init_block(bp, BLOCK_XSEM, init_phase); | ||
5460 | 6176 | ||
5461 | bnx2x_init_block(bp, UPB_BLOCK, init_stage); | 6177 | bnx2x_init_block(bp, BLOCK_UPB, init_phase); |
5462 | bnx2x_init_block(bp, XPB_BLOCK, init_stage); | 6178 | bnx2x_init_block(bp, BLOCK_XPB, init_phase); |
5463 | 6179 | ||
5464 | bnx2x_init_block(bp, PBF_BLOCK, init_stage); | 6180 | bnx2x_init_block(bp, BLOCK_PBF, init_phase); |
5465 | 6181 | ||
5466 | if (!CHIP_IS_E2(bp)) { | 6182 | if (CHIP_IS_E1x(bp)) { |
5467 | /* configure PBF to work without PAUSE mtu 9000 */ | 6183 | /* configure PBF to work without PAUSE mtu 9000 */ |
5468 | REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0); | 6184 | REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0); |
5469 | 6185 | ||
@@ -5479,20 +6195,20 @@ static int bnx2x_init_hw_port(struct bnx2x *bp) | |||
5479 | } | 6195 | } |
5480 | 6196 | ||
5481 | #ifdef BCM_CNIC | 6197 | #ifdef BCM_CNIC |
5482 | bnx2x_init_block(bp, SRCH_BLOCK, init_stage); | 6198 | bnx2x_init_block(bp, BLOCK_SRC, init_phase); |
5483 | #endif | 6199 | #endif |
5484 | bnx2x_init_block(bp, CDU_BLOCK, init_stage); | 6200 | bnx2x_init_block(bp, BLOCK_CDU, init_phase); |
5485 | bnx2x_init_block(bp, CFC_BLOCK, init_stage); | 6201 | bnx2x_init_block(bp, BLOCK_CFC, init_phase); |
5486 | 6202 | ||
5487 | if (CHIP_IS_E1(bp)) { | 6203 | if (CHIP_IS_E1(bp)) { |
5488 | REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0); | 6204 | REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0); |
5489 | REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0); | 6205 | REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0); |
5490 | } | 6206 | } |
5491 | bnx2x_init_block(bp, HC_BLOCK, init_stage); | 6207 | bnx2x_init_block(bp, BLOCK_HC, init_phase); |
5492 | 6208 | ||
5493 | bnx2x_init_block(bp, IGU_BLOCK, init_stage); | 6209 | bnx2x_init_block(bp, BLOCK_IGU, init_phase); |
5494 | 6210 | ||
5495 | bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage); | 6211 | bnx2x_init_block(bp, BLOCK_MISC_AEU, init_phase); |
5496 | /* init aeu_mask_attn_func_0/1: | 6212 | /* init aeu_mask_attn_func_0/1: |
5497 | * - SF mode: bits 3-7 are masked. only bits 0-2 are in use | 6213 | * - SF mode: bits 3-7 are masked. only bits 0-2 are in use |
5498 | * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF | 6214 | * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF |
@@ -5502,22 +6218,31 @@ static int bnx2x_init_hw_port(struct bnx2x *bp) | |||
5502 | val |= CHIP_IS_E1(bp) ? 0 : 0x10; | 6218 | val |= CHIP_IS_E1(bp) ? 0 : 0x10; |
5503 | REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, val); | 6219 | REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, val); |
5504 | 6220 | ||
5505 | bnx2x_init_block(bp, PXPCS_BLOCK, init_stage); | 6221 | bnx2x_init_block(bp, BLOCK_NIG, init_phase); |
5506 | bnx2x_init_block(bp, EMAC0_BLOCK, init_stage); | ||
5507 | bnx2x_init_block(bp, EMAC1_BLOCK, init_stage); | ||
5508 | bnx2x_init_block(bp, DBU_BLOCK, init_stage); | ||
5509 | bnx2x_init_block(bp, DBG_BLOCK, init_stage); | ||
5510 | 6222 | ||
5511 | bnx2x_init_block(bp, NIG_BLOCK, init_stage); | 6223 | if (!CHIP_IS_E1x(bp)) { |
6224 | /* Bit-map indicating which L2 hdrs may appear after the | ||
6225 | * basic Ethernet header | ||
6226 | */ | ||
6227 | REG_WR(bp, BP_PORT(bp) ? | ||
6228 | NIG_REG_P1_HDRS_AFTER_BASIC : | ||
6229 | NIG_REG_P0_HDRS_AFTER_BASIC, | ||
6230 | IS_MF_SD(bp) ? 7 : 6); | ||
5512 | 6231 | ||
5513 | REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1); | 6232 | if (CHIP_IS_E3(bp)) |
6233 | REG_WR(bp, BP_PORT(bp) ? | ||
6234 | NIG_REG_LLH1_MF_MODE : | ||
6235 | NIG_REG_LLH_MF_MODE, IS_MF(bp)); | ||
6236 | } | ||
6237 | if (!CHIP_IS_E3(bp)) | ||
6238 | REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1); | ||
5514 | 6239 | ||
5515 | if (!CHIP_IS_E1(bp)) { | 6240 | if (!CHIP_IS_E1(bp)) { |
5516 | /* 0x2 disable mf_ov, 0x1 enable */ | 6241 | /* 0x2 disable mf_ov, 0x1 enable */ |
5517 | REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4, | 6242 | REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4, |
5518 | (IS_MF_SD(bp) ? 0x1 : 0x2)); | 6243 | (IS_MF_SD(bp) ? 0x1 : 0x2)); |
5519 | 6244 | ||
5520 | if (CHIP_IS_E2(bp)) { | 6245 | if (!CHIP_IS_E1x(bp)) { |
5521 | val = 0; | 6246 | val = 0; |
5522 | switch (bp->mf_mode) { | 6247 | switch (bp->mf_mode) { |
5523 | case MULTI_FUNCTION_SD: | 6248 | case MULTI_FUNCTION_SD: |
@@ -5538,17 +6263,16 @@ static int bnx2x_init_hw_port(struct bnx2x *bp) | |||
5538 | } | 6263 | } |
5539 | } | 6264 | } |
5540 | 6265 | ||
5541 | bnx2x_init_block(bp, MCP_BLOCK, init_stage); | 6266 | |
5542 | bnx2x_init_block(bp, DMAE_BLOCK, init_stage); | 6267 | /* If SPIO5 is set to generate interrupts, enable it for this port */ |
5543 | if (bnx2x_fan_failure_det_req(bp, bp->common.shmem_base, | 6268 | val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN); |
5544 | bp->common.shmem2_base, port)) { | 6269 | if (val & (1 << MISC_REGISTERS_SPIO_5)) { |
5545 | u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : | 6270 | u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : |
5546 | MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0); | 6271 | MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0); |
5547 | val = REG_RD(bp, reg_addr); | 6272 | val = REG_RD(bp, reg_addr); |
5548 | val |= AEU_INPUTS_ATTN_BITS_SPIO5; | 6273 | val |= AEU_INPUTS_ATTN_BITS_SPIO5; |
5549 | REG_WR(bp, reg_addr, val); | 6274 | REG_WR(bp, reg_addr, val); |
5550 | } | 6275 | } |
5551 | bnx2x__link_reset(bp); | ||
5552 | 6276 | ||
5553 | return 0; | 6277 | return 0; |
5554 | } | 6278 | } |
@@ -5567,7 +6291,7 @@ static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr) | |||
5567 | 6291 | ||
5568 | static inline void bnx2x_igu_clear_sb(struct bnx2x *bp, u8 idu_sb_id) | 6292 | static inline void bnx2x_igu_clear_sb(struct bnx2x *bp, u8 idu_sb_id) |
5569 | { | 6293 | { |
5570 | bnx2x_igu_clear_sb_gen(bp, idu_sb_id, true /*PF*/); | 6294 | bnx2x_igu_clear_sb_gen(bp, BP_FUNC(bp), idu_sb_id, true /*PF*/); |
5571 | } | 6295 | } |
5572 | 6296 | ||
5573 | static inline void bnx2x_clear_func_ilt(struct bnx2x *bp, u32 func) | 6297 | static inline void bnx2x_clear_func_ilt(struct bnx2x *bp, u32 func) |
@@ -5581,6 +6305,7 @@ static int bnx2x_init_hw_func(struct bnx2x *bp) | |||
5581 | { | 6305 | { |
5582 | int port = BP_PORT(bp); | 6306 | int port = BP_PORT(bp); |
5583 | int func = BP_FUNC(bp); | 6307 | int func = BP_FUNC(bp); |
6308 | int init_phase = PHASE_PF0 + func; | ||
5584 | struct bnx2x_ilt *ilt = BP_ILT(bp); | 6309 | struct bnx2x_ilt *ilt = BP_ILT(bp); |
5585 | u16 cdu_ilt_start; | 6310 | u16 cdu_ilt_start; |
5586 | u32 addr, val; | 6311 | u32 addr, val; |
@@ -5589,6 +6314,10 @@ static int bnx2x_init_hw_func(struct bnx2x *bp) | |||
5589 | 6314 | ||
5590 | DP(BNX2X_MSG_MCP, "starting func init func %d\n", func); | 6315 | DP(BNX2X_MSG_MCP, "starting func init func %d\n", func); |
5591 | 6316 | ||
6317 | /* FLR cleanup - hmmm */ | ||
6318 | if (!CHIP_IS_E1x(bp)) | ||
6319 | bnx2x_pf_flr_clnup(bp); | ||
6320 | |||
5592 | /* set MSI reconfigure capability */ | 6321 | /* set MSI reconfigure capability */ |
5593 | if (bp->common.int_block == INT_BLOCK_HC) { | 6322 | if (bp->common.int_block == INT_BLOCK_HC) { |
5594 | addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0); | 6323 | addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0); |
@@ -5597,6 +6326,9 @@ static int bnx2x_init_hw_func(struct bnx2x *bp) | |||
5597 | REG_WR(bp, addr, val); | 6326 | REG_WR(bp, addr, val); |
5598 | } | 6327 | } |
5599 | 6328 | ||
6329 | bnx2x_init_block(bp, BLOCK_PXP, init_phase); | ||
6330 | bnx2x_init_block(bp, BLOCK_PXP2, init_phase); | ||
6331 | |||
5600 | ilt = BP_ILT(bp); | 6332 | ilt = BP_ILT(bp); |
5601 | cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start; | 6333 | cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start; |
5602 | 6334 | ||
@@ -5622,7 +6354,7 @@ static int bnx2x_init_hw_func(struct bnx2x *bp) | |||
5622 | REG_WR(bp, PRS_REG_NIC_MODE, 1); | 6354 | REG_WR(bp, PRS_REG_NIC_MODE, 1); |
5623 | #endif /* BCM_CNIC */ | 6355 | #endif /* BCM_CNIC */ |
5624 | 6356 | ||
5625 | if (CHIP_IS_E2(bp)) { | 6357 | if (!CHIP_IS_E1x(bp)) { |
5626 | u32 pf_conf = IGU_PF_CONF_FUNC_EN; | 6358 | u32 pf_conf = IGU_PF_CONF_FUNC_EN; |
5627 | 6359 | ||
5628 | /* Turn on a single ISR mode in IGU if driver is going to use | 6360 | /* Turn on a single ISR mode in IGU if driver is going to use |
@@ -5649,58 +6381,55 @@ static int bnx2x_init_hw_func(struct bnx2x *bp) | |||
5649 | 6381 | ||
5650 | bp->dmae_ready = 1; | 6382 | bp->dmae_ready = 1; |
5651 | 6383 | ||
5652 | bnx2x_init_block(bp, PGLUE_B_BLOCK, FUNC0_STAGE + func); | 6384 | bnx2x_init_block(bp, BLOCK_PGLUE_B, init_phase); |
5653 | 6385 | ||
5654 | if (CHIP_IS_E2(bp)) | 6386 | if (!CHIP_IS_E1x(bp)) |
5655 | REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, func); | 6387 | REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, func); |
5656 | 6388 | ||
5657 | bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func); | 6389 | bnx2x_init_block(bp, BLOCK_ATC, init_phase); |
5658 | bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func); | 6390 | bnx2x_init_block(bp, BLOCK_DMAE, init_phase); |
5659 | bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func); | 6391 | bnx2x_init_block(bp, BLOCK_NIG, init_phase); |
5660 | bnx2x_init_block(bp, CCM_BLOCK, FUNC0_STAGE + func); | 6392 | bnx2x_init_block(bp, BLOCK_SRC, init_phase); |
5661 | bnx2x_init_block(bp, XCM_BLOCK, FUNC0_STAGE + func); | 6393 | bnx2x_init_block(bp, BLOCK_MISC, init_phase); |
5662 | bnx2x_init_block(bp, TSEM_BLOCK, FUNC0_STAGE + func); | 6394 | bnx2x_init_block(bp, BLOCK_TCM, init_phase); |
5663 | bnx2x_init_block(bp, USEM_BLOCK, FUNC0_STAGE + func); | 6395 | bnx2x_init_block(bp, BLOCK_UCM, init_phase); |
5664 | bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func); | 6396 | bnx2x_init_block(bp, BLOCK_CCM, init_phase); |
5665 | bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func); | 6397 | bnx2x_init_block(bp, BLOCK_XCM, init_phase); |
5666 | 6398 | bnx2x_init_block(bp, BLOCK_TSEM, init_phase); | |
5667 | if (CHIP_IS_E2(bp)) { | 6399 | bnx2x_init_block(bp, BLOCK_USEM, init_phase); |
5668 | REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_PATH_ID_OFFSET, | 6400 | bnx2x_init_block(bp, BLOCK_CSEM, init_phase); |
5669 | BP_PATH(bp)); | 6401 | bnx2x_init_block(bp, BLOCK_XSEM, init_phase); |
5670 | REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_PATH_ID_OFFSET, | 6402 | |
5671 | BP_PATH(bp)); | 6403 | if (!CHIP_IS_E1x(bp)) |
5672 | } | ||
5673 | |||
5674 | if (CHIP_MODE_IS_4_PORT(bp)) | ||
5675 | bnx2x_init_block(bp, XSEM_4PORT_BLOCK, FUNC0_STAGE + func); | ||
5676 | |||
5677 | if (CHIP_IS_E2(bp)) | ||
5678 | REG_WR(bp, QM_REG_PF_EN, 1); | 6404 | REG_WR(bp, QM_REG_PF_EN, 1); |
5679 | 6405 | ||
5680 | bnx2x_init_block(bp, QM_BLOCK, FUNC0_STAGE + func); | 6406 | if (!CHIP_IS_E1x(bp)) { |
5681 | 6407 | REG_WR(bp, TSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func); | |
5682 | if (CHIP_MODE_IS_4_PORT(bp)) | 6408 | REG_WR(bp, USEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func); |
5683 | bnx2x_init_block(bp, QM_4PORT_BLOCK, FUNC0_STAGE + func); | 6409 | REG_WR(bp, CSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func); |
5684 | 6410 | REG_WR(bp, XSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func); | |
5685 | bnx2x_init_block(bp, TIMERS_BLOCK, FUNC0_STAGE + func); | 6411 | } |
5686 | bnx2x_init_block(bp, DQ_BLOCK, FUNC0_STAGE + func); | 6412 | bnx2x_init_block(bp, BLOCK_QM, init_phase); |
5687 | bnx2x_init_block(bp, BRB1_BLOCK, FUNC0_STAGE + func); | 6413 | |
5688 | bnx2x_init_block(bp, PRS_BLOCK, FUNC0_STAGE + func); | 6414 | bnx2x_init_block(bp, BLOCK_TM, init_phase); |
5689 | bnx2x_init_block(bp, TSDM_BLOCK, FUNC0_STAGE + func); | 6415 | bnx2x_init_block(bp, BLOCK_DORQ, init_phase); |
5690 | bnx2x_init_block(bp, CSDM_BLOCK, FUNC0_STAGE + func); | 6416 | bnx2x_init_block(bp, BLOCK_BRB1, init_phase); |
5691 | bnx2x_init_block(bp, USDM_BLOCK, FUNC0_STAGE + func); | 6417 | bnx2x_init_block(bp, BLOCK_PRS, init_phase); |
5692 | bnx2x_init_block(bp, XSDM_BLOCK, FUNC0_STAGE + func); | 6418 | bnx2x_init_block(bp, BLOCK_TSDM, init_phase); |
5693 | bnx2x_init_block(bp, UPB_BLOCK, FUNC0_STAGE + func); | 6419 | bnx2x_init_block(bp, BLOCK_CSDM, init_phase); |
5694 | bnx2x_init_block(bp, XPB_BLOCK, FUNC0_STAGE + func); | 6420 | bnx2x_init_block(bp, BLOCK_USDM, init_phase); |
5695 | bnx2x_init_block(bp, PBF_BLOCK, FUNC0_STAGE + func); | 6421 | bnx2x_init_block(bp, BLOCK_XSDM, init_phase); |
5696 | if (CHIP_IS_E2(bp)) | 6422 | bnx2x_init_block(bp, BLOCK_UPB, init_phase); |
6423 | bnx2x_init_block(bp, BLOCK_XPB, init_phase); | ||
6424 | bnx2x_init_block(bp, BLOCK_PBF, init_phase); | ||
6425 | if (!CHIP_IS_E1x(bp)) | ||
5697 | REG_WR(bp, PBF_REG_DISABLE_PF, 0); | 6426 | REG_WR(bp, PBF_REG_DISABLE_PF, 0); |
5698 | 6427 | ||
5699 | bnx2x_init_block(bp, CDU_BLOCK, FUNC0_STAGE + func); | 6428 | bnx2x_init_block(bp, BLOCK_CDU, init_phase); |
5700 | 6429 | ||
5701 | bnx2x_init_block(bp, CFC_BLOCK, FUNC0_STAGE + func); | 6430 | bnx2x_init_block(bp, BLOCK_CFC, init_phase); |
5702 | 6431 | ||
5703 | if (CHIP_IS_E2(bp)) | 6432 | if (!CHIP_IS_E1x(bp)) |
5704 | REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 1); | 6433 | REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 1); |
5705 | 6434 | ||
5706 | if (IS_MF(bp)) { | 6435 | if (IS_MF(bp)) { |
@@ -5708,7 +6437,7 @@ static int bnx2x_init_hw_func(struct bnx2x *bp) | |||
5708 | REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->mf_ov); | 6437 | REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->mf_ov); |
5709 | } | 6438 | } |
5710 | 6439 | ||
5711 | bnx2x_init_block(bp, MISC_AEU_BLOCK, FUNC0_STAGE + func); | 6440 | bnx2x_init_block(bp, BLOCK_MISC_AEU, init_phase); |
5712 | 6441 | ||
5713 | /* HC init per function */ | 6442 | /* HC init per function */ |
5714 | if (bp->common.int_block == INT_BLOCK_HC) { | 6443 | if (bp->common.int_block == INT_BLOCK_HC) { |
@@ -5718,21 +6447,21 @@ static int bnx2x_init_hw_func(struct bnx2x *bp) | |||
5718 | REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0); | 6447 | REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0); |
5719 | REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0); | 6448 | REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0); |
5720 | } | 6449 | } |
5721 | bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func); | 6450 | bnx2x_init_block(bp, BLOCK_HC, init_phase); |
5722 | 6451 | ||
5723 | } else { | 6452 | } else { |
5724 | int num_segs, sb_idx, prod_offset; | 6453 | int num_segs, sb_idx, prod_offset; |
5725 | 6454 | ||
5726 | REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0); | 6455 | REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0); |
5727 | 6456 | ||
5728 | if (CHIP_IS_E2(bp)) { | 6457 | if (!CHIP_IS_E1x(bp)) { |
5729 | REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0); | 6458 | REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0); |
5730 | REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0); | 6459 | REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0); |
5731 | } | 6460 | } |
5732 | 6461 | ||
5733 | bnx2x_init_block(bp, IGU_BLOCK, FUNC0_STAGE + func); | 6462 | bnx2x_init_block(bp, BLOCK_IGU, init_phase); |
5734 | 6463 | ||
5735 | if (CHIP_IS_E2(bp)) { | 6464 | if (!CHIP_IS_E1x(bp)) { |
5736 | int dsb_idx = 0; | 6465 | int dsb_idx = 0; |
5737 | /** | 6466 | /** |
5738 | * Producer memory: | 6467 | * Producer memory: |
@@ -5827,13 +6556,6 @@ static int bnx2x_init_hw_func(struct bnx2x *bp) | |||
5827 | REG_WR(bp, 0x2114, 0xffffffff); | 6556 | REG_WR(bp, 0x2114, 0xffffffff); |
5828 | REG_WR(bp, 0x2120, 0xffffffff); | 6557 | REG_WR(bp, 0x2120, 0xffffffff); |
5829 | 6558 | ||
5830 | bnx2x_init_block(bp, EMAC0_BLOCK, FUNC0_STAGE + func); | ||
5831 | bnx2x_init_block(bp, EMAC1_BLOCK, FUNC0_STAGE + func); | ||
5832 | bnx2x_init_block(bp, DBU_BLOCK, FUNC0_STAGE + func); | ||
5833 | bnx2x_init_block(bp, DBG_BLOCK, FUNC0_STAGE + func); | ||
5834 | bnx2x_init_block(bp, MCP_BLOCK, FUNC0_STAGE + func); | ||
5835 | bnx2x_init_block(bp, DMAE_BLOCK, FUNC0_STAGE + func); | ||
5836 | |||
5837 | if (CHIP_IS_E1x(bp)) { | 6559 | if (CHIP_IS_E1x(bp)) { |
5838 | main_mem_size = HC_REG_MAIN_MEMORY_SIZE / 2; /*dwords*/ | 6560 | main_mem_size = HC_REG_MAIN_MEMORY_SIZE / 2; /*dwords*/ |
5839 | main_mem_base = HC_REG_MAIN_MEMORY + | 6561 | main_mem_base = HC_REG_MAIN_MEMORY + |
@@ -5859,65 +6581,26 @@ static int bnx2x_init_hw_func(struct bnx2x *bp) | |||
5859 | REG_RD(bp, main_mem_prty_clr); | 6581 | REG_RD(bp, main_mem_prty_clr); |
5860 | } | 6582 | } |
5861 | 6583 | ||
6584 | #ifdef BNX2X_STOP_ON_ERROR | ||
6585 | /* Enable STORMs SP logging */ | ||
6586 | REG_WR8(bp, BAR_USTRORM_INTMEM + | ||
6587 | USTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1); | ||
6588 | REG_WR8(bp, BAR_TSTRORM_INTMEM + | ||
6589 | TSTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1); | ||
6590 | REG_WR8(bp, BAR_CSTRORM_INTMEM + | ||
6591 | CSTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1); | ||
6592 | REG_WR8(bp, BAR_XSTRORM_INTMEM + | ||
6593 | XSTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1); | ||
6594 | #endif | ||
6595 | |||
5862 | bnx2x_phy_probe(&bp->link_params); | 6596 | bnx2x_phy_probe(&bp->link_params); |
5863 | 6597 | ||
5864 | return 0; | 6598 | return 0; |
5865 | } | 6599 | } |
5866 | 6600 | ||
5867 | int bnx2x_init_hw(struct bnx2x *bp, u32 load_code) | ||
5868 | { | ||
5869 | int rc = 0; | ||
5870 | |||
5871 | DP(BNX2X_MSG_MCP, "function %d load_code %x\n", | ||
5872 | BP_ABS_FUNC(bp), load_code); | ||
5873 | |||
5874 | bp->dmae_ready = 0; | ||
5875 | spin_lock_init(&bp->dmae_lock); | ||
5876 | |||
5877 | switch (load_code) { | ||
5878 | case FW_MSG_CODE_DRV_LOAD_COMMON: | ||
5879 | case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: | ||
5880 | rc = bnx2x_init_hw_common(bp, load_code); | ||
5881 | if (rc) | ||
5882 | goto init_hw_err; | ||
5883 | /* no break */ | ||
5884 | |||
5885 | case FW_MSG_CODE_DRV_LOAD_PORT: | ||
5886 | rc = bnx2x_init_hw_port(bp); | ||
5887 | if (rc) | ||
5888 | goto init_hw_err; | ||
5889 | /* no break */ | ||
5890 | |||
5891 | case FW_MSG_CODE_DRV_LOAD_FUNCTION: | ||
5892 | rc = bnx2x_init_hw_func(bp); | ||
5893 | if (rc) | ||
5894 | goto init_hw_err; | ||
5895 | break; | ||
5896 | |||
5897 | default: | ||
5898 | BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code); | ||
5899 | break; | ||
5900 | } | ||
5901 | |||
5902 | if (!BP_NOMCP(bp)) { | ||
5903 | int mb_idx = BP_FW_MB_IDX(bp); | ||
5904 | |||
5905 | bp->fw_drv_pulse_wr_seq = | ||
5906 | (SHMEM_RD(bp, func_mb[mb_idx].drv_pulse_mb) & | ||
5907 | DRV_PULSE_SEQ_MASK); | ||
5908 | DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq); | ||
5909 | } | ||
5910 | |||
5911 | init_hw_err: | ||
5912 | bnx2x_gunzip_end(bp); | ||
5913 | |||
5914 | return rc; | ||
5915 | } | ||
5916 | 6601 | ||
5917 | void bnx2x_free_mem(struct bnx2x *bp) | 6602 | void bnx2x_free_mem(struct bnx2x *bp) |
5918 | { | 6603 | { |
5919 | bnx2x_gunzip_end(bp); | ||
5920 | |||
5921 | /* fastpath */ | 6604 | /* fastpath */ |
5922 | bnx2x_free_fp_mem(bp); | 6605 | bnx2x_free_fp_mem(bp); |
5923 | /* end of fastpath */ | 6606 | /* end of fastpath */ |
@@ -5925,6 +6608,9 @@ void bnx2x_free_mem(struct bnx2x *bp) | |||
5925 | BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping, | 6608 | BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping, |
5926 | sizeof(struct host_sp_status_block)); | 6609 | sizeof(struct host_sp_status_block)); |
5927 | 6610 | ||
6611 | BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping, | ||
6612 | bp->fw_stats_data_sz + bp->fw_stats_req_sz); | ||
6613 | |||
5928 | BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping, | 6614 | BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping, |
5929 | sizeof(struct bnx2x_slowpath)); | 6615 | sizeof(struct bnx2x_slowpath)); |
5930 | 6616 | ||
@@ -5936,7 +6622,7 @@ void bnx2x_free_mem(struct bnx2x *bp) | |||
5936 | BNX2X_FREE(bp->ilt->lines); | 6622 | BNX2X_FREE(bp->ilt->lines); |
5937 | 6623 | ||
5938 | #ifdef BCM_CNIC | 6624 | #ifdef BCM_CNIC |
5939 | if (CHIP_IS_E2(bp)) | 6625 | if (!CHIP_IS_E1x(bp)) |
5940 | BNX2X_PCI_FREE(bp->cnic_sb.e2_sb, bp->cnic_sb_mapping, | 6626 | BNX2X_PCI_FREE(bp->cnic_sb.e2_sb, bp->cnic_sb_mapping, |
5941 | sizeof(struct host_hc_status_block_e2)); | 6627 | sizeof(struct host_hc_status_block_e2)); |
5942 | else | 6628 | else |
@@ -5950,18 +6636,67 @@ void bnx2x_free_mem(struct bnx2x *bp) | |||
5950 | 6636 | ||
5951 | BNX2X_PCI_FREE(bp->eq_ring, bp->eq_mapping, | 6637 | BNX2X_PCI_FREE(bp->eq_ring, bp->eq_mapping, |
5952 | BCM_PAGE_SIZE * NUM_EQ_PAGES); | 6638 | BCM_PAGE_SIZE * NUM_EQ_PAGES); |
6639 | } | ||
6640 | |||
6641 | static inline int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp) | ||
6642 | { | ||
6643 | int num_groups; | ||
6644 | |||
6645 | /* number of eth_queues */ | ||
6646 | u8 num_queue_stats = BNX2X_NUM_ETH_QUEUES(bp); | ||
6647 | |||
6648 | /* Total number of FW statistics requests = | ||
6649 | * 1 for port stats + 1 for PF stats + num_eth_queues */ | ||
6650 | bp->fw_stats_num = 2 + num_queue_stats; | ||
6651 | |||
5953 | 6652 | ||
5954 | BNX2X_FREE(bp->rx_indir_table); | 6653 | /* Request is built from stats_query_header and an array of |
6654 | * stats_query_cmd_group each of which contains | ||
6655 | * STATS_QUERY_CMD_COUNT rules. The real number or requests is | ||
6656 | * configured in the stats_query_header. | ||
6657 | */ | ||
6658 | num_groups = (2 + num_queue_stats) / STATS_QUERY_CMD_COUNT + | ||
6659 | (((2 + num_queue_stats) % STATS_QUERY_CMD_COUNT) ? 1 : 0); | ||
6660 | |||
6661 | bp->fw_stats_req_sz = sizeof(struct stats_query_header) + | ||
6662 | num_groups * sizeof(struct stats_query_cmd_group); | ||
6663 | |||
6664 | /* Data for statistics requests + stats_conter | ||
6665 | * | ||
6666 | * stats_counter holds per-STORM counters that are incremented | ||
6667 | * when STORM has finished with the current request. | ||
6668 | */ | ||
6669 | bp->fw_stats_data_sz = sizeof(struct per_port_stats) + | ||
6670 | sizeof(struct per_pf_stats) + | ||
6671 | sizeof(struct per_queue_stats) * num_queue_stats + | ||
6672 | sizeof(struct stats_counter); | ||
6673 | |||
6674 | BNX2X_PCI_ALLOC(bp->fw_stats, &bp->fw_stats_mapping, | ||
6675 | bp->fw_stats_data_sz + bp->fw_stats_req_sz); | ||
6676 | |||
6677 | /* Set shortcuts */ | ||
6678 | bp->fw_stats_req = (struct bnx2x_fw_stats_req *)bp->fw_stats; | ||
6679 | bp->fw_stats_req_mapping = bp->fw_stats_mapping; | ||
6680 | |||
6681 | bp->fw_stats_data = (struct bnx2x_fw_stats_data *) | ||
6682 | ((u8 *)bp->fw_stats + bp->fw_stats_req_sz); | ||
6683 | |||
6684 | bp->fw_stats_data_mapping = bp->fw_stats_mapping + | ||
6685 | bp->fw_stats_req_sz; | ||
6686 | return 0; | ||
6687 | |||
6688 | alloc_mem_err: | ||
6689 | BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping, | ||
6690 | bp->fw_stats_data_sz + bp->fw_stats_req_sz); | ||
6691 | return -ENOMEM; | ||
5955 | } | 6692 | } |
5956 | 6693 | ||
5957 | 6694 | ||
5958 | int bnx2x_alloc_mem(struct bnx2x *bp) | 6695 | int bnx2x_alloc_mem(struct bnx2x *bp) |
5959 | { | 6696 | { |
5960 | if (bnx2x_gunzip_init(bp)) | ||
5961 | return -ENOMEM; | ||
5962 | |||
5963 | #ifdef BCM_CNIC | 6697 | #ifdef BCM_CNIC |
5964 | if (CHIP_IS_E2(bp)) | 6698 | if (!CHIP_IS_E1x(bp)) |
6699 | /* size = the status block + ramrod buffers */ | ||
5965 | BNX2X_PCI_ALLOC(bp->cnic_sb.e2_sb, &bp->cnic_sb_mapping, | 6700 | BNX2X_PCI_ALLOC(bp->cnic_sb.e2_sb, &bp->cnic_sb_mapping, |
5966 | sizeof(struct host_hc_status_block_e2)); | 6701 | sizeof(struct host_hc_status_block_e2)); |
5967 | else | 6702 | else |
@@ -5979,6 +6714,10 @@ int bnx2x_alloc_mem(struct bnx2x *bp) | |||
5979 | BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping, | 6714 | BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping, |
5980 | sizeof(struct bnx2x_slowpath)); | 6715 | sizeof(struct bnx2x_slowpath)); |
5981 | 6716 | ||
6717 | /* Allocated memory for FW statistics */ | ||
6718 | if (bnx2x_alloc_fw_stats_mem(bp)) | ||
6719 | goto alloc_mem_err; | ||
6720 | |||
5982 | bp->context.size = sizeof(union cdu_context) * bp->l2_cid_count; | 6721 | bp->context.size = sizeof(union cdu_context) * bp->l2_cid_count; |
5983 | 6722 | ||
5984 | BNX2X_PCI_ALLOC(bp->context.vcxt, &bp->context.cxt_mapping, | 6723 | BNX2X_PCI_ALLOC(bp->context.vcxt, &bp->context.cxt_mapping, |
@@ -5996,8 +6735,6 @@ int bnx2x_alloc_mem(struct bnx2x *bp) | |||
5996 | BNX2X_PCI_ALLOC(bp->eq_ring, &bp->eq_mapping, | 6735 | BNX2X_PCI_ALLOC(bp->eq_ring, &bp->eq_mapping, |
5997 | BCM_PAGE_SIZE * NUM_EQ_PAGES); | 6736 | BCM_PAGE_SIZE * NUM_EQ_PAGES); |
5998 | 6737 | ||
5999 | BNX2X_ALLOC(bp->rx_indir_table, sizeof(bp->rx_indir_table[0]) * | ||
6000 | TSTORM_INDIRECTION_TABLE_SIZE); | ||
6001 | 6738 | ||
6002 | /* fastpath */ | 6739 | /* fastpath */ |
6003 | /* need to be done at the end, since it's self adjusting to amount | 6740 | /* need to be done at the end, since it's self adjusting to amount |
@@ -6015,629 +6752,75 @@ alloc_mem_err: | |||
6015 | /* | 6752 | /* |
6016 | * Init service functions | 6753 | * Init service functions |
6017 | */ | 6754 | */ |
6018 | static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx, | ||
6019 | int *state_p, int flags); | ||
6020 | |||
6021 | int bnx2x_func_start(struct bnx2x *bp) | ||
6022 | { | ||
6023 | bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_START, 0, 0, 0, 1); | ||
6024 | |||
6025 | /* Wait for completion */ | ||
6026 | return bnx2x_wait_ramrod(bp, BNX2X_STATE_FUNC_STARTED, 0, &(bp->state), | ||
6027 | WAIT_RAMROD_COMMON); | ||
6028 | } | ||
6029 | |||
6030 | static int bnx2x_func_stop(struct bnx2x *bp) | ||
6031 | { | ||
6032 | bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_STOP, 0, 0, 0, 1); | ||
6033 | |||
6034 | /* Wait for completion */ | ||
6035 | return bnx2x_wait_ramrod(bp, BNX2X_STATE_CLOSING_WAIT4_UNLOAD, | ||
6036 | 0, &(bp->state), WAIT_RAMROD_COMMON); | ||
6037 | } | ||
6038 | |||
6039 | /** | ||
6040 | * bnx2x_set_mac_addr_gen - set a MAC in a CAM for a few L2 Clients for E1x chips | ||
6041 | * | ||
6042 | * @bp: driver handle | ||
6043 | * @set: set or clear an entry (1 or 0) | ||
6044 | * @mac: pointer to a buffer containing a MAC | ||
6045 | * @cl_bit_vec: bit vector of clients to register a MAC for | ||
6046 | * @cam_offset: offset in a CAM to use | ||
6047 | * @is_bcast: is the set MAC a broadcast address (for E1 only) | ||
6048 | */ | ||
6049 | static void bnx2x_set_mac_addr_gen(struct bnx2x *bp, int set, const u8 *mac, | ||
6050 | u32 cl_bit_vec, u8 cam_offset, | ||
6051 | u8 is_bcast) | ||
6052 | { | ||
6053 | struct mac_configuration_cmd *config = | ||
6054 | (struct mac_configuration_cmd *)bnx2x_sp(bp, mac_config); | ||
6055 | int ramrod_flags = WAIT_RAMROD_COMMON; | ||
6056 | |||
6057 | bp->set_mac_pending = 1; | ||
6058 | |||
6059 | config->hdr.length = 1; | ||
6060 | config->hdr.offset = cam_offset; | ||
6061 | config->hdr.client_id = 0xff; | ||
6062 | /* Mark the single MAC configuration ramrod as opposed to a | ||
6063 | * UC/MC list configuration). | ||
6064 | */ | ||
6065 | config->hdr.echo = 1; | ||
6066 | |||
6067 | /* primary MAC */ | ||
6068 | config->config_table[0].msb_mac_addr = | ||
6069 | swab16(*(u16 *)&mac[0]); | ||
6070 | config->config_table[0].middle_mac_addr = | ||
6071 | swab16(*(u16 *)&mac[2]); | ||
6072 | config->config_table[0].lsb_mac_addr = | ||
6073 | swab16(*(u16 *)&mac[4]); | ||
6074 | config->config_table[0].clients_bit_vector = | ||
6075 | cpu_to_le32(cl_bit_vec); | ||
6076 | config->config_table[0].vlan_id = 0; | ||
6077 | config->config_table[0].pf_id = BP_FUNC(bp); | ||
6078 | if (set) | ||
6079 | SET_FLAG(config->config_table[0].flags, | ||
6080 | MAC_CONFIGURATION_ENTRY_ACTION_TYPE, | ||
6081 | T_ETH_MAC_COMMAND_SET); | ||
6082 | else | ||
6083 | SET_FLAG(config->config_table[0].flags, | ||
6084 | MAC_CONFIGURATION_ENTRY_ACTION_TYPE, | ||
6085 | T_ETH_MAC_COMMAND_INVALIDATE); | ||
6086 | |||
6087 | if (is_bcast) | ||
6088 | SET_FLAG(config->config_table[0].flags, | ||
6089 | MAC_CONFIGURATION_ENTRY_BROADCAST, 1); | ||
6090 | |||
6091 | DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) PF_ID %d CLID mask %d\n", | ||
6092 | (set ? "setting" : "clearing"), | ||
6093 | config->config_table[0].msb_mac_addr, | ||
6094 | config->config_table[0].middle_mac_addr, | ||
6095 | config->config_table[0].lsb_mac_addr, BP_FUNC(bp), cl_bit_vec); | ||
6096 | |||
6097 | mb(); | ||
6098 | |||
6099 | bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0, | ||
6100 | U64_HI(bnx2x_sp_mapping(bp, mac_config)), | ||
6101 | U64_LO(bnx2x_sp_mapping(bp, mac_config)), 1); | ||
6102 | |||
6103 | /* Wait for a completion */ | ||
6104 | bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, ramrod_flags); | ||
6105 | } | ||
6106 | |||
6107 | static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx, | ||
6108 | int *state_p, int flags) | ||
6109 | { | ||
6110 | /* can take a while if any port is running */ | ||
6111 | int cnt = 5000; | ||
6112 | u8 poll = flags & WAIT_RAMROD_POLL; | ||
6113 | u8 common = flags & WAIT_RAMROD_COMMON; | ||
6114 | |||
6115 | DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n", | ||
6116 | poll ? "polling" : "waiting", state, idx); | ||
6117 | |||
6118 | might_sleep(); | ||
6119 | while (cnt--) { | ||
6120 | if (poll) { | ||
6121 | if (common) | ||
6122 | bnx2x_eq_int(bp); | ||
6123 | else { | ||
6124 | bnx2x_rx_int(bp->fp, 10); | ||
6125 | /* if index is different from 0 | ||
6126 | * the reply for some commands will | ||
6127 | * be on the non default queue | ||
6128 | */ | ||
6129 | if (idx) | ||
6130 | bnx2x_rx_int(&bp->fp[idx], 10); | ||
6131 | } | ||
6132 | } | ||
6133 | |||
6134 | mb(); /* state is changed by bnx2x_sp_event() */ | ||
6135 | if (*state_p == state) { | ||
6136 | #ifdef BNX2X_STOP_ON_ERROR | ||
6137 | DP(NETIF_MSG_IFUP, "exit (cnt %d)\n", 5000 - cnt); | ||
6138 | #endif | ||
6139 | return 0; | ||
6140 | } | ||
6141 | |||
6142 | msleep(1); | ||
6143 | |||
6144 | if (bp->panic) | ||
6145 | return -EIO; | ||
6146 | } | ||
6147 | |||
6148 | /* timeout! */ | ||
6149 | BNX2X_ERR("timeout %s for state %x on IDX [%d]\n", | ||
6150 | poll ? "polling" : "waiting", state, idx); | ||
6151 | #ifdef BNX2X_STOP_ON_ERROR | ||
6152 | bnx2x_panic(); | ||
6153 | #endif | ||
6154 | |||
6155 | return -EBUSY; | ||
6156 | } | ||
6157 | |||
6158 | static u8 bnx2x_e1h_cam_offset(struct bnx2x *bp, u8 rel_offset) | ||
6159 | { | ||
6160 | if (CHIP_IS_E1H(bp)) | ||
6161 | return E1H_FUNC_MAX * rel_offset + BP_FUNC(bp); | ||
6162 | else if (CHIP_MODE_IS_4_PORT(bp)) | ||
6163 | return E2_FUNC_MAX * rel_offset + BP_FUNC(bp); | ||
6164 | else | ||
6165 | return E2_FUNC_MAX * rel_offset + BP_VN(bp); | ||
6166 | } | ||
6167 | |||
6168 | /** | ||
6169 | * LLH CAM line allocations: currently only iSCSI and ETH macs are | ||
6170 | * relevant. In addition, current implementation is tuned for a | ||
6171 | * single ETH MAC. | ||
6172 | */ | ||
6173 | enum { | ||
6174 | LLH_CAM_ISCSI_ETH_LINE = 0, | ||
6175 | LLH_CAM_ETH_LINE, | ||
6176 | LLH_CAM_MAX_PF_LINE = NIG_REG_LLH1_FUNC_MEM_SIZE | ||
6177 | }; | ||
6178 | 6755 | ||
6179 | static void bnx2x_set_mac_in_nig(struct bnx2x *bp, | 6756 | int bnx2x_set_mac_one(struct bnx2x *bp, u8 *mac, |
6180 | int set, | 6757 | struct bnx2x_vlan_mac_obj *obj, bool set, |
6181 | unsigned char *dev_addr, | 6758 | int mac_type, unsigned long *ramrod_flags) |
6182 | int index) | ||
6183 | { | 6759 | { |
6184 | u32 wb_data[2]; | 6760 | int rc; |
6185 | u32 mem_offset, ena_offset, mem_index; | 6761 | struct bnx2x_vlan_mac_ramrod_params ramrod_param; |
6186 | /** | ||
6187 | * indexes mapping: | ||
6188 | * 0..7 - goes to MEM | ||
6189 | * 8..15 - goes to MEM2 | ||
6190 | */ | ||
6191 | |||
6192 | if (!IS_MF_SI(bp) || index > LLH_CAM_MAX_PF_LINE) | ||
6193 | return; | ||
6194 | |||
6195 | /* calculate memory start offset according to the mapping | ||
6196 | * and index in the memory */ | ||
6197 | if (index < NIG_LLH_FUNC_MEM_MAX_OFFSET) { | ||
6198 | mem_offset = BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM : | ||
6199 | NIG_REG_LLH0_FUNC_MEM; | ||
6200 | ena_offset = BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM_ENABLE : | ||
6201 | NIG_REG_LLH0_FUNC_MEM_ENABLE; | ||
6202 | mem_index = index; | ||
6203 | } else { | ||
6204 | mem_offset = BP_PORT(bp) ? NIG_REG_P1_LLH_FUNC_MEM2 : | ||
6205 | NIG_REG_P0_LLH_FUNC_MEM2; | ||
6206 | ena_offset = BP_PORT(bp) ? NIG_REG_P1_LLH_FUNC_MEM2_ENABLE : | ||
6207 | NIG_REG_P0_LLH_FUNC_MEM2_ENABLE; | ||
6208 | mem_index = index - NIG_LLH_FUNC_MEM_MAX_OFFSET; | ||
6209 | } | ||
6210 | |||
6211 | if (set) { | ||
6212 | /* LLH_FUNC_MEM is a u64 WB register */ | ||
6213 | mem_offset += 8*mem_index; | ||
6214 | |||
6215 | wb_data[0] = ((dev_addr[2] << 24) | (dev_addr[3] << 16) | | ||
6216 | (dev_addr[4] << 8) | dev_addr[5]); | ||
6217 | wb_data[1] = ((dev_addr[0] << 8) | dev_addr[1]); | ||
6218 | |||
6219 | REG_WR_DMAE(bp, mem_offset, wb_data, 2); | ||
6220 | } | ||
6221 | |||
6222 | /* enable/disable the entry */ | ||
6223 | REG_WR(bp, ena_offset + 4*mem_index, set); | ||
6224 | |||
6225 | } | ||
6226 | 6762 | ||
6227 | void bnx2x_set_eth_mac(struct bnx2x *bp, int set) | 6763 | memset(&ramrod_param, 0, sizeof(ramrod_param)); |
6228 | { | ||
6229 | u8 cam_offset = (CHIP_IS_E1(bp) ? (BP_PORT(bp) ? 32 : 0) : | ||
6230 | bnx2x_e1h_cam_offset(bp, CAM_ETH_LINE)); | ||
6231 | 6764 | ||
6232 | /* networking MAC */ | 6765 | /* Fill general parameters */ |
6233 | bnx2x_set_mac_addr_gen(bp, set, bp->dev->dev_addr, | 6766 | ramrod_param.vlan_mac_obj = obj; |
6234 | (1 << bp->fp->cl_id), cam_offset , 0); | 6767 | ramrod_param.ramrod_flags = *ramrod_flags; |
6235 | 6768 | ||
6236 | bnx2x_set_mac_in_nig(bp, set, bp->dev->dev_addr, LLH_CAM_ETH_LINE); | 6769 | /* Fill a user request section if needed */ |
6770 | if (!test_bit(RAMROD_CONT, ramrod_flags)) { | ||
6771 | memcpy(ramrod_param.user_req.u.mac.mac, mac, ETH_ALEN); | ||
6237 | 6772 | ||
6238 | if (CHIP_IS_E1(bp)) { | 6773 | __set_bit(mac_type, &ramrod_param.user_req.vlan_mac_flags); |
6239 | /* broadcast MAC */ | ||
6240 | static const u8 bcast[ETH_ALEN] = { | ||
6241 | 0xff, 0xff, 0xff, 0xff, 0xff, 0xff | ||
6242 | }; | ||
6243 | bnx2x_set_mac_addr_gen(bp, set, bcast, 0, cam_offset + 1, 1); | ||
6244 | } | ||
6245 | } | ||
6246 | 6774 | ||
6247 | static inline u8 bnx2x_e1_cam_mc_offset(struct bnx2x *bp) | 6775 | /* Set the command: ADD or DEL */ |
6248 | { | 6776 | if (set) |
6249 | return CHIP_REV_IS_SLOW(bp) ? | 6777 | ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD; |
6250 | (BNX2X_MAX_EMUL_MULTI * (1 + BP_PORT(bp))) : | 6778 | else |
6251 | (BNX2X_MAX_MULTICAST * (1 + BP_PORT(bp))); | 6779 | ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_DEL; |
6252 | } | ||
6253 | |||
6254 | /* set mc list, do not wait as wait implies sleep and | ||
6255 | * set_rx_mode can be invoked from non-sleepable context. | ||
6256 | * | ||
6257 | * Instead we use the same ramrod data buffer each time we need | ||
6258 | * to configure a list of addresses, and use the fact that the | ||
6259 | * list of MACs is changed in an incremental way and that the | ||
6260 | * function is called under the netif_addr_lock. A temporary | ||
6261 | * inconsistent CAM configuration (possible in case of a very fast | ||
6262 | * sequence of add/del/add on the host side) will shortly be | ||
6263 | * restored by the handler of the last ramrod. | ||
6264 | */ | ||
6265 | static int bnx2x_set_e1_mc_list(struct bnx2x *bp) | ||
6266 | { | ||
6267 | int i = 0, old; | ||
6268 | struct net_device *dev = bp->dev; | ||
6269 | u8 offset = bnx2x_e1_cam_mc_offset(bp); | ||
6270 | struct netdev_hw_addr *ha; | ||
6271 | struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, mcast_config); | ||
6272 | dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, mcast_config); | ||
6273 | |||
6274 | if (netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) | ||
6275 | return -EINVAL; | ||
6276 | |||
6277 | netdev_for_each_mc_addr(ha, dev) { | ||
6278 | /* copy mac */ | ||
6279 | config_cmd->config_table[i].msb_mac_addr = | ||
6280 | swab16(*(u16 *)&bnx2x_mc_addr(ha)[0]); | ||
6281 | config_cmd->config_table[i].middle_mac_addr = | ||
6282 | swab16(*(u16 *)&bnx2x_mc_addr(ha)[2]); | ||
6283 | config_cmd->config_table[i].lsb_mac_addr = | ||
6284 | swab16(*(u16 *)&bnx2x_mc_addr(ha)[4]); | ||
6285 | |||
6286 | config_cmd->config_table[i].vlan_id = 0; | ||
6287 | config_cmd->config_table[i].pf_id = BP_FUNC(bp); | ||
6288 | config_cmd->config_table[i].clients_bit_vector = | ||
6289 | cpu_to_le32(1 << BP_L_ID(bp)); | ||
6290 | |||
6291 | SET_FLAG(config_cmd->config_table[i].flags, | ||
6292 | MAC_CONFIGURATION_ENTRY_ACTION_TYPE, | ||
6293 | T_ETH_MAC_COMMAND_SET); | ||
6294 | |||
6295 | DP(NETIF_MSG_IFUP, | ||
6296 | "setting MCAST[%d] (%04x:%04x:%04x)\n", i, | ||
6297 | config_cmd->config_table[i].msb_mac_addr, | ||
6298 | config_cmd->config_table[i].middle_mac_addr, | ||
6299 | config_cmd->config_table[i].lsb_mac_addr); | ||
6300 | i++; | ||
6301 | } | ||
6302 | old = config_cmd->hdr.length; | ||
6303 | if (old > i) { | ||
6304 | for (; i < old; i++) { | ||
6305 | if (CAM_IS_INVALID(config_cmd-> | ||
6306 | config_table[i])) { | ||
6307 | /* already invalidated */ | ||
6308 | break; | ||
6309 | } | ||
6310 | /* invalidate */ | ||
6311 | SET_FLAG(config_cmd->config_table[i].flags, | ||
6312 | MAC_CONFIGURATION_ENTRY_ACTION_TYPE, | ||
6313 | T_ETH_MAC_COMMAND_INVALIDATE); | ||
6314 | } | ||
6315 | } | 6780 | } |
6316 | 6781 | ||
6317 | wmb(); | 6782 | rc = bnx2x_config_vlan_mac(bp, &ramrod_param); |
6318 | 6783 | if (rc < 0) | |
6319 | config_cmd->hdr.length = i; | 6784 | BNX2X_ERR("%s MAC failed\n", (set ? "Set" : "Del")); |
6320 | config_cmd->hdr.offset = offset; | 6785 | return rc; |
6321 | config_cmd->hdr.client_id = 0xff; | ||
6322 | /* Mark that this ramrod doesn't use bp->set_mac_pending for | ||
6323 | * synchronization. | ||
6324 | */ | ||
6325 | config_cmd->hdr.echo = 0; | ||
6326 | |||
6327 | mb(); | ||
6328 | |||
6329 | return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0, | ||
6330 | U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1); | ||
6331 | } | 6786 | } |
6332 | 6787 | ||
6333 | void bnx2x_invalidate_e1_mc_list(struct bnx2x *bp) | 6788 | int bnx2x_del_all_macs(struct bnx2x *bp, |
6789 | struct bnx2x_vlan_mac_obj *mac_obj, | ||
6790 | int mac_type, bool wait_for_comp) | ||
6334 | { | 6791 | { |
6335 | int i; | 6792 | int rc; |
6336 | struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, mcast_config); | 6793 | unsigned long ramrod_flags = 0, vlan_mac_flags = 0; |
6337 | dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, mcast_config); | ||
6338 | int ramrod_flags = WAIT_RAMROD_COMMON; | ||
6339 | u8 offset = bnx2x_e1_cam_mc_offset(bp); | ||
6340 | |||
6341 | for (i = 0; i < BNX2X_MAX_MULTICAST; i++) | ||
6342 | SET_FLAG(config_cmd->config_table[i].flags, | ||
6343 | MAC_CONFIGURATION_ENTRY_ACTION_TYPE, | ||
6344 | T_ETH_MAC_COMMAND_INVALIDATE); | ||
6345 | |||
6346 | wmb(); | ||
6347 | |||
6348 | config_cmd->hdr.length = BNX2X_MAX_MULTICAST; | ||
6349 | config_cmd->hdr.offset = offset; | ||
6350 | config_cmd->hdr.client_id = 0xff; | ||
6351 | /* We'll wait for a completion this time... */ | ||
6352 | config_cmd->hdr.echo = 1; | ||
6353 | |||
6354 | bp->set_mac_pending = 1; | ||
6355 | 6794 | ||
6356 | mb(); | 6795 | /* Wait for completion of requested */ |
6796 | if (wait_for_comp) | ||
6797 | __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); | ||
6357 | 6798 | ||
6358 | bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0, | 6799 | /* Set the mac type of addresses we want to clear */ |
6359 | U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1); | 6800 | __set_bit(mac_type, &vlan_mac_flags); |
6360 | 6801 | ||
6361 | /* Wait for a completion */ | 6802 | rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags, &ramrod_flags); |
6362 | bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, | 6803 | if (rc < 0) |
6363 | ramrod_flags); | 6804 | BNX2X_ERR("Failed to delete MACs: %d\n", rc); |
6364 | 6805 | ||
6806 | return rc; | ||
6365 | } | 6807 | } |
6366 | 6808 | ||
6367 | /* Accept one or more multicasts */ | 6809 | int bnx2x_set_eth_mac(struct bnx2x *bp, bool set) |
6368 | static int bnx2x_set_e1h_mc_list(struct bnx2x *bp) | ||
6369 | { | 6810 | { |
6370 | struct net_device *dev = bp->dev; | 6811 | unsigned long ramrod_flags = 0; |
6371 | struct netdev_hw_addr *ha; | ||
6372 | u32 mc_filter[MC_HASH_SIZE]; | ||
6373 | u32 crc, bit, regidx; | ||
6374 | int i; | ||
6375 | 6812 | ||
6376 | memset(mc_filter, 0, 4 * MC_HASH_SIZE); | 6813 | DP(NETIF_MSG_IFUP, "Adding Eth MAC\n"); |
6377 | 6814 | ||
6378 | netdev_for_each_mc_addr(ha, dev) { | 6815 | __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); |
6379 | DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n", | 6816 | /* Eth MAC is set on RSS leading client (fp[0]) */ |
6380 | bnx2x_mc_addr(ha)); | 6817 | return bnx2x_set_mac_one(bp, bp->dev->dev_addr, &bp->fp->mac_obj, set, |
6381 | 6818 | BNX2X_ETH_MAC, &ramrod_flags); | |
6382 | crc = crc32c_le(0, bnx2x_mc_addr(ha), | ||
6383 | ETH_ALEN); | ||
6384 | bit = (crc >> 24) & 0xff; | ||
6385 | regidx = bit >> 5; | ||
6386 | bit &= 0x1f; | ||
6387 | mc_filter[regidx] |= (1 << bit); | ||
6388 | } | ||
6389 | |||
6390 | for (i = 0; i < MC_HASH_SIZE; i++) | ||
6391 | REG_WR(bp, MC_HASH_OFFSET(bp, i), | ||
6392 | mc_filter[i]); | ||
6393 | |||
6394 | return 0; | ||
6395 | } | 6819 | } |
6396 | 6820 | ||
6397 | void bnx2x_invalidate_e1h_mc_list(struct bnx2x *bp) | 6821 | int bnx2x_setup_leading(struct bnx2x *bp) |
6398 | { | 6822 | { |
6399 | int i; | 6823 | return bnx2x_setup_queue(bp, &bp->fp[0], 1); |
6400 | |||
6401 | for (i = 0; i < MC_HASH_SIZE; i++) | ||
6402 | REG_WR(bp, MC_HASH_OFFSET(bp, i), 0); | ||
6403 | } | ||
6404 | |||
6405 | #ifdef BCM_CNIC | ||
6406 | /** | ||
6407 | * bnx2x_set_iscsi_eth_mac_addr - set iSCSI MAC(s). | ||
6408 | * | ||
6409 | * @bp: driver handle | ||
6410 | * @set: set or clear the CAM entry | ||
6411 | * | ||
6412 | * This function will wait until the ramdord completion returns. | ||
6413 | * Return 0 if success, -ENODEV if ramrod doesn't return. | ||
6414 | */ | ||
6415 | static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set) | ||
6416 | { | ||
6417 | u8 cam_offset = (CHIP_IS_E1(bp) ? ((BP_PORT(bp) ? 32 : 0) + 2) : | ||
6418 | bnx2x_e1h_cam_offset(bp, CAM_ISCSI_ETH_LINE)); | ||
6419 | u32 iscsi_l2_cl_id = BNX2X_ISCSI_ETH_CL_ID + | ||
6420 | BP_E1HVN(bp) * NONE_ETH_CONTEXT_USE; | ||
6421 | u32 cl_bit_vec = (1 << iscsi_l2_cl_id); | ||
6422 | u8 *iscsi_mac = bp->cnic_eth_dev.iscsi_mac; | ||
6423 | |||
6424 | /* Send a SET_MAC ramrod */ | ||
6425 | bnx2x_set_mac_addr_gen(bp, set, iscsi_mac, cl_bit_vec, | ||
6426 | cam_offset, 0); | ||
6427 | |||
6428 | bnx2x_set_mac_in_nig(bp, set, iscsi_mac, LLH_CAM_ISCSI_ETH_LINE); | ||
6429 | |||
6430 | return 0; | ||
6431 | } | ||
6432 | |||
6433 | /** | ||
6434 | * bnx2x_set_fip_eth_mac_addr - set FCoE L2 MAC(s) | ||
6435 | * | ||
6436 | * @bp: driver handle | ||
6437 | * @set: set or clear the CAM entry | ||
6438 | * | ||
6439 | * This function will wait until the ramrod completion returns. | ||
6440 | * Returns 0 if success, -ENODEV if ramrod doesn't return. | ||
6441 | */ | ||
6442 | int bnx2x_set_fip_eth_mac_addr(struct bnx2x *bp, int set) | ||
6443 | { | ||
6444 | u32 cl_bit_vec = (1 << bnx2x_fcoe(bp, cl_id)); | ||
6445 | /** | ||
6446 | * CAM allocation for E1H | ||
6447 | * eth unicasts: by func number | ||
6448 | * iscsi: by func number | ||
6449 | * fip unicast: by func number | ||
6450 | * fip multicast: by func number | ||
6451 | */ | ||
6452 | bnx2x_set_mac_addr_gen(bp, set, bp->fip_mac, | ||
6453 | cl_bit_vec, bnx2x_e1h_cam_offset(bp, CAM_FIP_ETH_LINE), 0); | ||
6454 | |||
6455 | return 0; | ||
6456 | } | ||
6457 | |||
6458 | int bnx2x_set_all_enode_macs(struct bnx2x *bp, int set) | ||
6459 | { | ||
6460 | u32 cl_bit_vec = (1 << bnx2x_fcoe(bp, cl_id)); | ||
6461 | |||
6462 | /** | ||
6463 | * CAM allocation for E1H | ||
6464 | * eth unicasts: by func number | ||
6465 | * iscsi: by func number | ||
6466 | * fip unicast: by func number | ||
6467 | * fip multicast: by func number | ||
6468 | */ | ||
6469 | bnx2x_set_mac_addr_gen(bp, set, ALL_ENODE_MACS, cl_bit_vec, | ||
6470 | bnx2x_e1h_cam_offset(bp, CAM_FIP_MCAST_LINE), 0); | ||
6471 | |||
6472 | return 0; | ||
6473 | } | ||
6474 | #endif | ||
6475 | |||
6476 | static void bnx2x_fill_cl_init_data(struct bnx2x *bp, | ||
6477 | struct bnx2x_client_init_params *params, | ||
6478 | u8 activate, | ||
6479 | struct client_init_ramrod_data *data) | ||
6480 | { | ||
6481 | /* Clear the buffer */ | ||
6482 | memset(data, 0, sizeof(*data)); | ||
6483 | |||
6484 | /* general */ | ||
6485 | data->general.client_id = params->rxq_params.cl_id; | ||
6486 | data->general.statistics_counter_id = params->rxq_params.stat_id; | ||
6487 | data->general.statistics_en_flg = | ||
6488 | (params->rxq_params.flags & QUEUE_FLG_STATS) ? 1 : 0; | ||
6489 | data->general.is_fcoe_flg = | ||
6490 | (params->ramrod_params.flags & CLIENT_IS_FCOE) ? 1 : 0; | ||
6491 | data->general.activate_flg = activate; | ||
6492 | data->general.sp_client_id = params->rxq_params.spcl_id; | ||
6493 | |||
6494 | /* Rx data */ | ||
6495 | data->rx.tpa_en_flg = | ||
6496 | (params->rxq_params.flags & QUEUE_FLG_TPA) ? 1 : 0; | ||
6497 | data->rx.vmqueue_mode_en_flg = 0; | ||
6498 | data->rx.cache_line_alignment_log_size = | ||
6499 | params->rxq_params.cache_line_log; | ||
6500 | data->rx.enable_dynamic_hc = | ||
6501 | (params->rxq_params.flags & QUEUE_FLG_DHC) ? 1 : 0; | ||
6502 | data->rx.max_sges_for_packet = params->rxq_params.max_sges_pkt; | ||
6503 | data->rx.client_qzone_id = params->rxq_params.cl_qzone_id; | ||
6504 | data->rx.max_agg_size = params->rxq_params.tpa_agg_sz; | ||
6505 | |||
6506 | /* We don't set drop flags */ | ||
6507 | data->rx.drop_ip_cs_err_flg = 0; | ||
6508 | data->rx.drop_tcp_cs_err_flg = 0; | ||
6509 | data->rx.drop_ttl0_flg = 0; | ||
6510 | data->rx.drop_udp_cs_err_flg = 0; | ||
6511 | |||
6512 | data->rx.inner_vlan_removal_enable_flg = | ||
6513 | (params->rxq_params.flags & QUEUE_FLG_VLAN) ? 1 : 0; | ||
6514 | data->rx.outer_vlan_removal_enable_flg = | ||
6515 | (params->rxq_params.flags & QUEUE_FLG_OV) ? 1 : 0; | ||
6516 | data->rx.status_block_id = params->rxq_params.fw_sb_id; | ||
6517 | data->rx.rx_sb_index_number = params->rxq_params.sb_cq_index; | ||
6518 | data->rx.bd_buff_size = cpu_to_le16(params->rxq_params.buf_sz); | ||
6519 | data->rx.sge_buff_size = cpu_to_le16(params->rxq_params.sge_buf_sz); | ||
6520 | data->rx.mtu = cpu_to_le16(params->rxq_params.mtu); | ||
6521 | data->rx.bd_page_base.lo = | ||
6522 | cpu_to_le32(U64_LO(params->rxq_params.dscr_map)); | ||
6523 | data->rx.bd_page_base.hi = | ||
6524 | cpu_to_le32(U64_HI(params->rxq_params.dscr_map)); | ||
6525 | data->rx.sge_page_base.lo = | ||
6526 | cpu_to_le32(U64_LO(params->rxq_params.sge_map)); | ||
6527 | data->rx.sge_page_base.hi = | ||
6528 | cpu_to_le32(U64_HI(params->rxq_params.sge_map)); | ||
6529 | data->rx.cqe_page_base.lo = | ||
6530 | cpu_to_le32(U64_LO(params->rxq_params.rcq_map)); | ||
6531 | data->rx.cqe_page_base.hi = | ||
6532 | cpu_to_le32(U64_HI(params->rxq_params.rcq_map)); | ||
6533 | data->rx.is_leading_rss = | ||
6534 | (params->ramrod_params.flags & CLIENT_IS_LEADING_RSS) ? 1 : 0; | ||
6535 | data->rx.is_approx_mcast = data->rx.is_leading_rss; | ||
6536 | |||
6537 | /* Tx data */ | ||
6538 | data->tx.enforce_security_flg = 0; /* VF specific */ | ||
6539 | data->tx.tx_status_block_id = params->txq_params.fw_sb_id; | ||
6540 | data->tx.tx_sb_index_number = params->txq_params.sb_cq_index; | ||
6541 | data->tx.mtu = 0; /* VF specific */ | ||
6542 | data->tx.tx_bd_page_base.lo = | ||
6543 | cpu_to_le32(U64_LO(params->txq_params.dscr_map)); | ||
6544 | data->tx.tx_bd_page_base.hi = | ||
6545 | cpu_to_le32(U64_HI(params->txq_params.dscr_map)); | ||
6546 | |||
6547 | /* flow control data */ | ||
6548 | data->fc.cqe_pause_thr_low = cpu_to_le16(params->pause.rcq_th_lo); | ||
6549 | data->fc.cqe_pause_thr_high = cpu_to_le16(params->pause.rcq_th_hi); | ||
6550 | data->fc.bd_pause_thr_low = cpu_to_le16(params->pause.bd_th_lo); | ||
6551 | data->fc.bd_pause_thr_high = cpu_to_le16(params->pause.bd_th_hi); | ||
6552 | data->fc.sge_pause_thr_low = cpu_to_le16(params->pause.sge_th_lo); | ||
6553 | data->fc.sge_pause_thr_high = cpu_to_le16(params->pause.sge_th_hi); | ||
6554 | data->fc.rx_cos_mask = cpu_to_le16(params->pause.pri_map); | ||
6555 | |||
6556 | data->fc.safc_group_num = params->txq_params.cos; | ||
6557 | data->fc.safc_group_en_flg = | ||
6558 | (params->txq_params.flags & QUEUE_FLG_COS) ? 1 : 0; | ||
6559 | data->fc.traffic_type = | ||
6560 | (params->ramrod_params.flags & CLIENT_IS_FCOE) ? | ||
6561 | LLFC_TRAFFIC_TYPE_FCOE : LLFC_TRAFFIC_TYPE_NW; | ||
6562 | } | ||
6563 | |||
6564 | static inline void bnx2x_set_ctx_validation(struct eth_context *cxt, u32 cid) | ||
6565 | { | ||
6566 | /* ustorm cxt validation */ | ||
6567 | cxt->ustorm_ag_context.cdu_usage = | ||
6568 | CDU_RSRVD_VALUE_TYPE_A(cid, CDU_REGION_NUMBER_UCM_AG, | ||
6569 | ETH_CONNECTION_TYPE); | ||
6570 | /* xcontext validation */ | ||
6571 | cxt->xstorm_ag_context.cdu_reserved = | ||
6572 | CDU_RSRVD_VALUE_TYPE_A(cid, CDU_REGION_NUMBER_XCM_AG, | ||
6573 | ETH_CONNECTION_TYPE); | ||
6574 | } | ||
6575 | |||
6576 | static int bnx2x_setup_fw_client(struct bnx2x *bp, | ||
6577 | struct bnx2x_client_init_params *params, | ||
6578 | u8 activate, | ||
6579 | struct client_init_ramrod_data *data, | ||
6580 | dma_addr_t data_mapping) | ||
6581 | { | ||
6582 | u16 hc_usec; | ||
6583 | int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP; | ||
6584 | int ramrod_flags = 0, rc; | ||
6585 | |||
6586 | /* HC and context validation values */ | ||
6587 | hc_usec = params->txq_params.hc_rate ? | ||
6588 | 1000000 / params->txq_params.hc_rate : 0; | ||
6589 | bnx2x_update_coalesce_sb_index(bp, | ||
6590 | params->txq_params.fw_sb_id, | ||
6591 | params->txq_params.sb_cq_index, | ||
6592 | !(params->txq_params.flags & QUEUE_FLG_HC), | ||
6593 | hc_usec); | ||
6594 | |||
6595 | *(params->ramrod_params.pstate) = BNX2X_FP_STATE_OPENING; | ||
6596 | |||
6597 | hc_usec = params->rxq_params.hc_rate ? | ||
6598 | 1000000 / params->rxq_params.hc_rate : 0; | ||
6599 | bnx2x_update_coalesce_sb_index(bp, | ||
6600 | params->rxq_params.fw_sb_id, | ||
6601 | params->rxq_params.sb_cq_index, | ||
6602 | !(params->rxq_params.flags & QUEUE_FLG_HC), | ||
6603 | hc_usec); | ||
6604 | |||
6605 | bnx2x_set_ctx_validation(params->rxq_params.cxt, | ||
6606 | params->rxq_params.cid); | ||
6607 | |||
6608 | /* zero stats */ | ||
6609 | if (params->txq_params.flags & QUEUE_FLG_STATS) | ||
6610 | storm_memset_xstats_zero(bp, BP_PORT(bp), | ||
6611 | params->txq_params.stat_id); | ||
6612 | |||
6613 | if (params->rxq_params.flags & QUEUE_FLG_STATS) { | ||
6614 | storm_memset_ustats_zero(bp, BP_PORT(bp), | ||
6615 | params->rxq_params.stat_id); | ||
6616 | storm_memset_tstats_zero(bp, BP_PORT(bp), | ||
6617 | params->rxq_params.stat_id); | ||
6618 | } | ||
6619 | |||
6620 | /* Fill the ramrod data */ | ||
6621 | bnx2x_fill_cl_init_data(bp, params, activate, data); | ||
6622 | |||
6623 | /* SETUP ramrod. | ||
6624 | * | ||
6625 | * bnx2x_sp_post() takes a spin_lock thus no other explict memory | ||
6626 | * barrier except from mmiowb() is needed to impose a | ||
6627 | * proper ordering of memory operations. | ||
6628 | */ | ||
6629 | mmiowb(); | ||
6630 | |||
6631 | |||
6632 | bnx2x_sp_post(bp, ramrod, params->ramrod_params.cid, | ||
6633 | U64_HI(data_mapping), U64_LO(data_mapping), 0); | ||
6634 | |||
6635 | /* Wait for completion */ | ||
6636 | rc = bnx2x_wait_ramrod(bp, params->ramrod_params.state, | ||
6637 | params->ramrod_params.index, | ||
6638 | params->ramrod_params.pstate, | ||
6639 | ramrod_flags); | ||
6640 | return rc; | ||
6641 | } | 6824 | } |
6642 | 6825 | ||
6643 | /** | 6826 | /** |
@@ -6647,11 +6830,9 @@ static int bnx2x_setup_fw_client(struct bnx2x *bp, | |||
6647 | * | 6830 | * |
6648 | * In case of MSI-X it will also try to enable MSI-X. | 6831 | * In case of MSI-X it will also try to enable MSI-X. |
6649 | */ | 6832 | */ |
6650 | static int __devinit bnx2x_set_int_mode(struct bnx2x *bp) | 6833 | static void __devinit bnx2x_set_int_mode(struct bnx2x *bp) |
6651 | { | 6834 | { |
6652 | int rc = 0; | 6835 | switch (int_mode) { |
6653 | |||
6654 | switch (bp->int_mode) { | ||
6655 | case INT_MODE_MSI: | 6836 | case INT_MODE_MSI: |
6656 | bnx2x_enable_msi(bp); | 6837 | bnx2x_enable_msi(bp); |
6657 | /* falling through... */ | 6838 | /* falling through... */ |
@@ -6670,8 +6851,7 @@ static int __devinit bnx2x_set_int_mode(struct bnx2x *bp) | |||
6670 | * so try to enable MSI-X with the requested number of fp's | 6851 | * so try to enable MSI-X with the requested number of fp's |
6671 | * and fallback to MSI or legacy INTx with one fp | 6852 | * and fallback to MSI or legacy INTx with one fp |
6672 | */ | 6853 | */ |
6673 | rc = bnx2x_enable_msix(bp); | 6854 | if (bnx2x_enable_msix(bp)) { |
6674 | if (rc) { | ||
6675 | /* failed to enable MSI-X */ | 6855 | /* failed to enable MSI-X */ |
6676 | if (bp->multi_mode) | 6856 | if (bp->multi_mode) |
6677 | DP(NETIF_MSG_IFUP, | 6857 | DP(NETIF_MSG_IFUP, |
@@ -6682,14 +6862,12 @@ static int __devinit bnx2x_set_int_mode(struct bnx2x *bp) | |||
6682 | 1 + NONE_ETH_CONTEXT_USE); | 6862 | 1 + NONE_ETH_CONTEXT_USE); |
6683 | bp->num_queues = 1 + NONE_ETH_CONTEXT_USE; | 6863 | bp->num_queues = 1 + NONE_ETH_CONTEXT_USE; |
6684 | 6864 | ||
6865 | /* Try to enable MSI */ | ||
6685 | if (!(bp->flags & DISABLE_MSI_FLAG)) | 6866 | if (!(bp->flags & DISABLE_MSI_FLAG)) |
6686 | bnx2x_enable_msi(bp); | 6867 | bnx2x_enable_msi(bp); |
6687 | } | 6868 | } |
6688 | |||
6689 | break; | 6869 | break; |
6690 | } | 6870 | } |
6691 | |||
6692 | return rc; | ||
6693 | } | 6871 | } |
6694 | 6872 | ||
6695 | /* must be called prioir to any HW initializations */ | 6873 | /* must be called prioir to any HW initializations */ |
@@ -6713,7 +6891,7 @@ void bnx2x_ilt_set_info(struct bnx2x *bp) | |||
6713 | ilt_client->page_size = CDU_ILT_PAGE_SZ; | 6891 | ilt_client->page_size = CDU_ILT_PAGE_SZ; |
6714 | ilt_client->flags = ILT_CLIENT_SKIP_MEM; | 6892 | ilt_client->flags = ILT_CLIENT_SKIP_MEM; |
6715 | ilt_client->start = line; | 6893 | ilt_client->start = line; |
6716 | line += L2_ILT_LINES(bp); | 6894 | line += bnx2x_cid_ilt_lines(bp); |
6717 | #ifdef BCM_CNIC | 6895 | #ifdef BCM_CNIC |
6718 | line += CNIC_ILT_LINES; | 6896 | line += CNIC_ILT_LINES; |
6719 | #endif | 6897 | #endif |
@@ -6793,12 +6971,72 @@ void bnx2x_ilt_set_info(struct bnx2x *bp) | |||
6793 | #else | 6971 | #else |
6794 | ilt_client->flags = (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM); | 6972 | ilt_client->flags = (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM); |
6795 | #endif | 6973 | #endif |
6974 | BUG_ON(line > ILT_MAX_LINES); | ||
6796 | } | 6975 | } |
6797 | 6976 | ||
6798 | int bnx2x_setup_client(struct bnx2x *bp, struct bnx2x_fastpath *fp, | 6977 | /** |
6799 | int is_leading) | 6978 | * bnx2x_pf_q_prep_init - prepare INIT transition parameters |
6979 | * | ||
6980 | * @bp: driver handle | ||
6981 | * @fp: pointer to fastpath | ||
6982 | * @init_params: pointer to parameters structure | ||
6983 | * | ||
6984 | * parameters configured: | ||
6985 | * - HC configuration | ||
6986 | * - Queue's CDU context | ||
6987 | */ | ||
6988 | static inline void bnx2x_pf_q_prep_init(struct bnx2x *bp, | ||
6989 | struct bnx2x_fastpath *fp, struct bnx2x_queue_init_params *init_params) | ||
6800 | { | 6990 | { |
6801 | struct bnx2x_client_init_params params = { {0} }; | 6991 | /* FCoE Queue uses Default SB, thus has no HC capabilities */ |
6992 | if (!IS_FCOE_FP(fp)) { | ||
6993 | __set_bit(BNX2X_Q_FLG_HC, &init_params->rx.flags); | ||
6994 | __set_bit(BNX2X_Q_FLG_HC, &init_params->tx.flags); | ||
6995 | |||
6996 | /* If HC is supporterd, enable host coalescing in the transition | ||
6997 | * to INIT state. | ||
6998 | */ | ||
6999 | __set_bit(BNX2X_Q_FLG_HC_EN, &init_params->rx.flags); | ||
7000 | __set_bit(BNX2X_Q_FLG_HC_EN, &init_params->tx.flags); | ||
7001 | |||
7002 | /* HC rate */ | ||
7003 | init_params->rx.hc_rate = bp->rx_ticks ? | ||
7004 | (1000000 / bp->rx_ticks) : 0; | ||
7005 | init_params->tx.hc_rate = bp->tx_ticks ? | ||
7006 | (1000000 / bp->tx_ticks) : 0; | ||
7007 | |||
7008 | /* FW SB ID */ | ||
7009 | init_params->rx.fw_sb_id = init_params->tx.fw_sb_id = | ||
7010 | fp->fw_sb_id; | ||
7011 | |||
7012 | /* | ||
7013 | * CQ index among the SB indices: FCoE clients uses the default | ||
7014 | * SB, therefore it's different. | ||
7015 | */ | ||
7016 | init_params->rx.sb_cq_index = U_SB_ETH_RX_CQ_INDEX; | ||
7017 | init_params->tx.sb_cq_index = C_SB_ETH_TX_CQ_INDEX; | ||
7018 | } | ||
7019 | |||
7020 | init_params->cxt = &bp->context.vcxt[fp->cid].eth; | ||
7021 | } | ||
7022 | |||
7023 | /** | ||
7024 | * bnx2x_setup_queue - setup queue | ||
7025 | * | ||
7026 | * @bp: driver handle | ||
7027 | * @fp: pointer to fastpath | ||
7028 | * @leading: is leading | ||
7029 | * | ||
7030 | * This function performs 2 steps in a Queue state machine | ||
7031 | * actually: 1) RESET->INIT 2) INIT->SETUP | ||
7032 | */ | ||
7033 | |||
7034 | int bnx2x_setup_queue(struct bnx2x *bp, struct bnx2x_fastpath *fp, | ||
7035 | bool leading) | ||
7036 | { | ||
7037 | struct bnx2x_queue_state_params q_params = {0}; | ||
7038 | struct bnx2x_queue_setup_params *setup_params = | ||
7039 | &q_params.params.setup; | ||
6802 | int rc; | 7040 | int rc; |
6803 | 7041 | ||
6804 | /* reset IGU state skip FCoE L2 queue */ | 7042 | /* reset IGU state skip FCoE L2 queue */ |
@@ -6806,79 +7044,73 @@ int bnx2x_setup_client(struct bnx2x *bp, struct bnx2x_fastpath *fp, | |||
6806 | bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, | 7044 | bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, |
6807 | IGU_INT_ENABLE, 0); | 7045 | IGU_INT_ENABLE, 0); |
6808 | 7046 | ||
6809 | params.ramrod_params.pstate = &fp->state; | 7047 | q_params.q_obj = &fp->q_obj; |
6810 | params.ramrod_params.state = BNX2X_FP_STATE_OPEN; | 7048 | /* We want to wait for completion in this context */ |
6811 | params.ramrod_params.index = fp->index; | 7049 | __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); |
6812 | params.ramrod_params.cid = fp->cid; | ||
6813 | 7050 | ||
6814 | #ifdef BCM_CNIC | 7051 | /* Prepare the INIT parameters */ |
6815 | if (IS_FCOE_FP(fp)) | 7052 | bnx2x_pf_q_prep_init(bp, fp, &q_params.params.init); |
6816 | params.ramrod_params.flags |= CLIENT_IS_FCOE; | ||
6817 | 7053 | ||
6818 | #endif | 7054 | /* Set the command */ |
7055 | q_params.cmd = BNX2X_Q_CMD_INIT; | ||
6819 | 7056 | ||
6820 | if (is_leading) | 7057 | /* Change the state to INIT */ |
6821 | params.ramrod_params.flags |= CLIENT_IS_LEADING_RSS; | 7058 | rc = bnx2x_queue_state_change(bp, &q_params); |
7059 | if (rc) { | ||
7060 | BNX2X_ERR("Queue INIT failed\n"); | ||
7061 | return rc; | ||
7062 | } | ||
7063 | |||
7064 | /* Now move the Queue to the SETUP state... */ | ||
7065 | memset(setup_params, 0, sizeof(*setup_params)); | ||
7066 | |||
7067 | /* Set QUEUE flags */ | ||
7068 | setup_params->flags = bnx2x_get_q_flags(bp, fp, leading); | ||
7069 | |||
7070 | /* Set general SETUP parameters */ | ||
7071 | bnx2x_pf_q_prep_general(bp, fp, &setup_params->gen_params); | ||
7072 | |||
7073 | bnx2x_pf_rx_q_prep(bp, fp, &setup_params->pause, | ||
7074 | &setup_params->rxq_params); | ||
6822 | 7075 | ||
6823 | bnx2x_pf_rx_cl_prep(bp, fp, ¶ms.pause, ¶ms.rxq_params); | 7076 | bnx2x_pf_tx_q_prep(bp, fp, &setup_params->txq_params); |
6824 | 7077 | ||
6825 | bnx2x_pf_tx_cl_prep(bp, fp, ¶ms.txq_params); | 7078 | /* Set the command */ |
7079 | q_params.cmd = BNX2X_Q_CMD_SETUP; | ||
7080 | |||
7081 | /* Change the state to SETUP */ | ||
7082 | rc = bnx2x_queue_state_change(bp, &q_params); | ||
7083 | if (rc) | ||
7084 | BNX2X_ERR("Queue SETUP failed\n"); | ||
6826 | 7085 | ||
6827 | rc = bnx2x_setup_fw_client(bp, ¶ms, 1, | ||
6828 | bnx2x_sp(bp, client_init_data), | ||
6829 | bnx2x_sp_mapping(bp, client_init_data)); | ||
6830 | return rc; | 7086 | return rc; |
6831 | } | 7087 | } |
6832 | 7088 | ||
6833 | static int bnx2x_stop_fw_client(struct bnx2x *bp, | 7089 | static int bnx2x_stop_queue(struct bnx2x *bp, int index) |
6834 | struct bnx2x_client_ramrod_params *p) | ||
6835 | { | 7090 | { |
7091 | struct bnx2x_fastpath *fp = &bp->fp[index]; | ||
7092 | struct bnx2x_queue_state_params q_params = {0}; | ||
6836 | int rc; | 7093 | int rc; |
6837 | 7094 | ||
6838 | int poll_flag = p->poll ? WAIT_RAMROD_POLL : 0; | 7095 | q_params.q_obj = &fp->q_obj; |
7096 | /* We want to wait for completion in this context */ | ||
7097 | __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); | ||
6839 | 7098 | ||
6840 | /* halt the connection */ | 7099 | /* halt the connection */ |
6841 | *p->pstate = BNX2X_FP_STATE_HALTING; | 7100 | q_params.cmd = BNX2X_Q_CMD_HALT; |
6842 | bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, p->cid, 0, | 7101 | rc = bnx2x_queue_state_change(bp, &q_params); |
6843 | p->cl_id, 0); | 7102 | if (rc) |
6844 | |||
6845 | /* Wait for completion */ | ||
6846 | rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, p->index, | ||
6847 | p->pstate, poll_flag); | ||
6848 | if (rc) /* timeout */ | ||
6849 | return rc; | 7103 | return rc; |
6850 | 7104 | ||
6851 | *p->pstate = BNX2X_FP_STATE_TERMINATING; | 7105 | /* terminate the connection */ |
6852 | bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_TERMINATE, p->cid, 0, | 7106 | q_params.cmd = BNX2X_Q_CMD_TERMINATE; |
6853 | p->cl_id, 0); | 7107 | rc = bnx2x_queue_state_change(bp, &q_params); |
6854 | /* Wait for completion */ | 7108 | if (rc) |
6855 | rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_TERMINATED, p->index, | ||
6856 | p->pstate, poll_flag); | ||
6857 | if (rc) /* timeout */ | ||
6858 | return rc; | 7109 | return rc; |
6859 | 7110 | ||
6860 | |||
6861 | /* delete cfc entry */ | 7111 | /* delete cfc entry */ |
6862 | bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_CFC_DEL, p->cid, 0, 0, 1); | 7112 | q_params.cmd = BNX2X_Q_CMD_CFC_DEL; |
6863 | 7113 | return bnx2x_queue_state_change(bp, &q_params); | |
6864 | /* Wait for completion */ | ||
6865 | rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, p->index, | ||
6866 | p->pstate, WAIT_RAMROD_COMMON); | ||
6867 | return rc; | ||
6868 | } | ||
6869 | |||
6870 | static int bnx2x_stop_client(struct bnx2x *bp, int index) | ||
6871 | { | ||
6872 | struct bnx2x_client_ramrod_params client_stop = {0}; | ||
6873 | struct bnx2x_fastpath *fp = &bp->fp[index]; | ||
6874 | |||
6875 | client_stop.index = index; | ||
6876 | client_stop.cid = fp->cid; | ||
6877 | client_stop.cl_id = fp->cl_id; | ||
6878 | client_stop.pstate = &(fp->state); | ||
6879 | client_stop.poll = 0; | ||
6880 | |||
6881 | return bnx2x_stop_fw_client(bp, &client_stop); | ||
6882 | } | 7114 | } |
6883 | 7115 | ||
6884 | 7116 | ||
@@ -6887,12 +7119,6 @@ static void bnx2x_reset_func(struct bnx2x *bp) | |||
6887 | int port = BP_PORT(bp); | 7119 | int port = BP_PORT(bp); |
6888 | int func = BP_FUNC(bp); | 7120 | int func = BP_FUNC(bp); |
6889 | int i; | 7121 | int i; |
6890 | int pfunc_offset_fp = offsetof(struct hc_sb_data, p_func) + | ||
6891 | (CHIP_IS_E2(bp) ? | ||
6892 | offsetof(struct hc_status_block_data_e2, common) : | ||
6893 | offsetof(struct hc_status_block_data_e1x, common)); | ||
6894 | int pfunc_offset_sp = offsetof(struct hc_sp_status_block_data, p_func); | ||
6895 | int pfid_offset = offsetof(struct pci_entity, pf_id); | ||
6896 | 7122 | ||
6897 | /* Disable the function in the FW */ | 7123 | /* Disable the function in the FW */ |
6898 | REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(func), 0); | 7124 | REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(func), 0); |
@@ -6903,20 +7129,21 @@ static void bnx2x_reset_func(struct bnx2x *bp) | |||
6903 | /* FP SBs */ | 7129 | /* FP SBs */ |
6904 | for_each_eth_queue(bp, i) { | 7130 | for_each_eth_queue(bp, i) { |
6905 | struct bnx2x_fastpath *fp = &bp->fp[i]; | 7131 | struct bnx2x_fastpath *fp = &bp->fp[i]; |
6906 | REG_WR8(bp, | 7132 | REG_WR8(bp, BAR_CSTRORM_INTMEM + |
6907 | BAR_CSTRORM_INTMEM + | 7133 | CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(fp->fw_sb_id), |
6908 | CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id) | 7134 | SB_DISABLED); |
6909 | + pfunc_offset_fp + pfid_offset, | ||
6910 | HC_FUNCTION_DISABLED); | ||
6911 | } | 7135 | } |
6912 | 7136 | ||
7137 | #ifdef BCM_CNIC | ||
7138 | /* CNIC SB */ | ||
7139 | REG_WR8(bp, BAR_CSTRORM_INTMEM + | ||
7140 | CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(bnx2x_cnic_fw_sb_id(bp)), | ||
7141 | SB_DISABLED); | ||
7142 | #endif | ||
6913 | /* SP SB */ | 7143 | /* SP SB */ |
6914 | REG_WR8(bp, | 7144 | REG_WR8(bp, BAR_CSTRORM_INTMEM + |
6915 | BAR_CSTRORM_INTMEM + | 7145 | CSTORM_SP_STATUS_BLOCK_DATA_STATE_OFFSET(func), |
6916 | CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) + | 7146 | SB_DISABLED); |
6917 | pfunc_offset_sp + pfid_offset, | ||
6918 | HC_FUNCTION_DISABLED); | ||
6919 | |||
6920 | 7147 | ||
6921 | for (i = 0; i < XSTORM_SPQ_DATA_SIZE / 4; i++) | 7148 | for (i = 0; i < XSTORM_SPQ_DATA_SIZE / 4; i++) |
6922 | REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_DATA_OFFSET(func), | 7149 | REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_DATA_OFFSET(func), |
@@ -6950,7 +7177,7 @@ static void bnx2x_reset_func(struct bnx2x *bp) | |||
6950 | /* Timers workaround bug for E2: if this is vnic-3, | 7177 | /* Timers workaround bug for E2: if this is vnic-3, |
6951 | * we need to set the entire ilt range for this timers. | 7178 | * we need to set the entire ilt range for this timers. |
6952 | */ | 7179 | */ |
6953 | if (CHIP_IS_E2(bp) && BP_VN(bp) == 3) { | 7180 | if (!CHIP_IS_E1x(bp) && BP_VN(bp) == 3) { |
6954 | struct ilt_client_info ilt_cli; | 7181 | struct ilt_client_info ilt_cli; |
6955 | /* use dummy TM client */ | 7182 | /* use dummy TM client */ |
6956 | memset(&ilt_cli, 0, sizeof(struct ilt_client_info)); | 7183 | memset(&ilt_cli, 0, sizeof(struct ilt_client_info)); |
@@ -6962,7 +7189,7 @@ static void bnx2x_reset_func(struct bnx2x *bp) | |||
6962 | } | 7189 | } |
6963 | 7190 | ||
6964 | /* this assumes that reset_port() called before reset_func()*/ | 7191 | /* this assumes that reset_port() called before reset_func()*/ |
6965 | if (CHIP_IS_E2(bp)) | 7192 | if (!CHIP_IS_E1x(bp)) |
6966 | bnx2x_pf_disable(bp); | 7193 | bnx2x_pf_disable(bp); |
6967 | 7194 | ||
6968 | bp->dmae_ready = 0; | 7195 | bp->dmae_ready = 0; |
@@ -6973,6 +7200,9 @@ static void bnx2x_reset_port(struct bnx2x *bp) | |||
6973 | int port = BP_PORT(bp); | 7200 | int port = BP_PORT(bp); |
6974 | u32 val; | 7201 | u32 val; |
6975 | 7202 | ||
7203 | /* Reset physical Link */ | ||
7204 | bnx2x__link_reset(bp); | ||
7205 | |||
6976 | REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0); | 7206 | REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0); |
6977 | 7207 | ||
6978 | /* Do not rcv packets to BRB */ | 7208 | /* Do not rcv packets to BRB */ |
@@ -6994,92 +7224,66 @@ static void bnx2x_reset_port(struct bnx2x *bp) | |||
6994 | /* TODO: Close Doorbell port? */ | 7224 | /* TODO: Close Doorbell port? */ |
6995 | } | 7225 | } |
6996 | 7226 | ||
6997 | static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code) | 7227 | static inline int bnx2x_reset_hw(struct bnx2x *bp, u32 load_code) |
6998 | { | 7228 | { |
6999 | DP(BNX2X_MSG_MCP, "function %d reset_code %x\n", | 7229 | struct bnx2x_func_state_params func_params = {0}; |
7000 | BP_ABS_FUNC(bp), reset_code); | ||
7001 | 7230 | ||
7002 | switch (reset_code) { | 7231 | /* Prepare parameters for function state transitions */ |
7003 | case FW_MSG_CODE_DRV_UNLOAD_COMMON: | 7232 | __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags); |
7004 | bnx2x_reset_port(bp); | ||
7005 | bnx2x_reset_func(bp); | ||
7006 | bnx2x_reset_common(bp); | ||
7007 | break; | ||
7008 | 7233 | ||
7009 | case FW_MSG_CODE_DRV_UNLOAD_PORT: | 7234 | func_params.f_obj = &bp->func_obj; |
7010 | bnx2x_reset_port(bp); | 7235 | func_params.cmd = BNX2X_F_CMD_HW_RESET; |
7011 | bnx2x_reset_func(bp); | ||
7012 | break; | ||
7013 | 7236 | ||
7014 | case FW_MSG_CODE_DRV_UNLOAD_FUNCTION: | 7237 | func_params.params.hw_init.load_phase = load_code; |
7015 | bnx2x_reset_func(bp); | ||
7016 | break; | ||
7017 | 7238 | ||
7018 | default: | 7239 | return bnx2x_func_state_change(bp, &func_params); |
7019 | BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code); | ||
7020 | break; | ||
7021 | } | ||
7022 | } | 7240 | } |
7023 | 7241 | ||
7024 | #ifdef BCM_CNIC | 7242 | static inline int bnx2x_func_stop(struct bnx2x *bp) |
7025 | static inline void bnx2x_del_fcoe_eth_macs(struct bnx2x *bp) | ||
7026 | { | 7243 | { |
7027 | if (bp->flags & FCOE_MACS_SET) { | 7244 | struct bnx2x_func_state_params func_params = {0}; |
7028 | if (!IS_MF_SD(bp)) | 7245 | int rc; |
7029 | bnx2x_set_fip_eth_mac_addr(bp, 0); | ||
7030 | |||
7031 | bnx2x_set_all_enode_macs(bp, 0); | ||
7032 | |||
7033 | bp->flags &= ~FCOE_MACS_SET; | ||
7034 | } | ||
7035 | } | ||
7036 | #endif | ||
7037 | |||
7038 | void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode) | ||
7039 | { | ||
7040 | int port = BP_PORT(bp); | ||
7041 | u32 reset_code = 0; | ||
7042 | int i, cnt, rc; | ||
7043 | |||
7044 | /* Wait until tx fastpath tasks complete */ | ||
7045 | for_each_tx_queue(bp, i) { | ||
7046 | struct bnx2x_fastpath *fp = &bp->fp[i]; | ||
7047 | 7246 | ||
7048 | cnt = 1000; | 7247 | /* Prepare parameters for function state transitions */ |
7049 | while (bnx2x_has_tx_work_unload(fp)) { | 7248 | __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags); |
7249 | func_params.f_obj = &bp->func_obj; | ||
7250 | func_params.cmd = BNX2X_F_CMD_STOP; | ||
7050 | 7251 | ||
7051 | if (!cnt) { | 7252 | /* |
7052 | BNX2X_ERR("timeout waiting for queue[%d]\n", | 7253 | * Try to stop the function the 'good way'. If fails (in case |
7053 | i); | 7254 | * of a parity error during bnx2x_chip_cleanup()) and we are |
7255 | * not in a debug mode, perform a state transaction in order to | ||
7256 | * enable further HW_RESET transaction. | ||
7257 | */ | ||
7258 | rc = bnx2x_func_state_change(bp, &func_params); | ||
7259 | if (rc) { | ||
7054 | #ifdef BNX2X_STOP_ON_ERROR | 7260 | #ifdef BNX2X_STOP_ON_ERROR |
7055 | bnx2x_panic(); | 7261 | return rc; |
7056 | return -EBUSY; | ||
7057 | #else | 7262 | #else |
7058 | break; | 7263 | BNX2X_ERR("FUNC_STOP ramrod failed. Running a dry " |
7264 | "transaction\n"); | ||
7265 | __set_bit(RAMROD_DRV_CLR_ONLY, &func_params.ramrod_flags); | ||
7266 | return bnx2x_func_state_change(bp, &func_params); | ||
7059 | #endif | 7267 | #endif |
7060 | } | ||
7061 | cnt--; | ||
7062 | msleep(1); | ||
7063 | } | ||
7064 | } | 7268 | } |
7065 | /* Give HW time to discard old tx messages */ | ||
7066 | msleep(1); | ||
7067 | |||
7068 | bnx2x_set_eth_mac(bp, 0); | ||
7069 | 7269 | ||
7070 | bnx2x_invalidate_uc_list(bp); | 7270 | return 0; |
7071 | 7271 | } | |
7072 | if (CHIP_IS_E1(bp)) | ||
7073 | bnx2x_invalidate_e1_mc_list(bp); | ||
7074 | else { | ||
7075 | bnx2x_invalidate_e1h_mc_list(bp); | ||
7076 | REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0); | ||
7077 | } | ||
7078 | 7272 | ||
7079 | #ifdef BCM_CNIC | 7273 | /** |
7080 | bnx2x_del_fcoe_eth_macs(bp); | 7274 | * bnx2x_send_unload_req - request unload mode from the MCP. |
7081 | #endif | 7275 | * |
7276 | * @bp: driver handle | ||
7277 | * @unload_mode: requested function's unload mode | ||
7278 | * | ||
7279 | * Return unload mode returned by the MCP: COMMON, PORT or FUNC. | ||
7280 | */ | ||
7281 | u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode) | ||
7282 | { | ||
7283 | u32 reset_code = 0; | ||
7284 | int port = BP_PORT(bp); | ||
7082 | 7285 | ||
7286 | /* Select the UNLOAD request mode */ | ||
7083 | if (unload_mode == UNLOAD_NORMAL) | 7287 | if (unload_mode == UNLOAD_NORMAL) |
7084 | reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; | 7288 | reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; |
7085 | 7289 | ||
@@ -7106,54 +7310,135 @@ void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode) | |||
7106 | } else | 7310 | } else |
7107 | reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; | 7311 | reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; |
7108 | 7312 | ||
7313 | /* Send the request to the MCP */ | ||
7314 | if (!BP_NOMCP(bp)) | ||
7315 | reset_code = bnx2x_fw_command(bp, reset_code, 0); | ||
7316 | else { | ||
7317 | int path = BP_PATH(bp); | ||
7318 | |||
7319 | DP(NETIF_MSG_IFDOWN, "NO MCP - load counts[%d] " | ||
7320 | "%d, %d, %d\n", | ||
7321 | path, load_count[path][0], load_count[path][1], | ||
7322 | load_count[path][2]); | ||
7323 | load_count[path][0]--; | ||
7324 | load_count[path][1 + port]--; | ||
7325 | DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts[%d] " | ||
7326 | "%d, %d, %d\n", | ||
7327 | path, load_count[path][0], load_count[path][1], | ||
7328 | load_count[path][2]); | ||
7329 | if (load_count[path][0] == 0) | ||
7330 | reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON; | ||
7331 | else if (load_count[path][1 + port] == 0) | ||
7332 | reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT; | ||
7333 | else | ||
7334 | reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION; | ||
7335 | } | ||
7336 | |||
7337 | return reset_code; | ||
7338 | } | ||
7339 | |||
7340 | /** | ||
7341 | * bnx2x_send_unload_done - send UNLOAD_DONE command to the MCP. | ||
7342 | * | ||
7343 | * @bp: driver handle | ||
7344 | */ | ||
7345 | void bnx2x_send_unload_done(struct bnx2x *bp) | ||
7346 | { | ||
7347 | /* Report UNLOAD_DONE to MCP */ | ||
7348 | if (!BP_NOMCP(bp)) | ||
7349 | bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0); | ||
7350 | } | ||
7351 | |||
7352 | void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode) | ||
7353 | { | ||
7354 | int port = BP_PORT(bp); | ||
7355 | int i, rc; | ||
7356 | struct bnx2x_mcast_ramrod_params rparam = {0}; | ||
7357 | u32 reset_code; | ||
7358 | |||
7359 | /* Wait until tx fastpath tasks complete */ | ||
7360 | for_each_tx_queue(bp, i) { | ||
7361 | struct bnx2x_fastpath *fp = &bp->fp[i]; | ||
7362 | |||
7363 | rc = bnx2x_clean_tx_queue(bp, fp); | ||
7364 | #ifdef BNX2X_STOP_ON_ERROR | ||
7365 | if (rc) | ||
7366 | return; | ||
7367 | #endif | ||
7368 | } | ||
7369 | |||
7370 | /* Give HW time to discard old tx messages */ | ||
7371 | usleep_range(1000, 1000); | ||
7372 | |||
7373 | /* Clean all ETH MACs */ | ||
7374 | rc = bnx2x_del_all_macs(bp, &bp->fp[0].mac_obj, BNX2X_ETH_MAC, false); | ||
7375 | if (rc < 0) | ||
7376 | BNX2X_ERR("Failed to delete all ETH macs: %d\n", rc); | ||
7377 | |||
7378 | /* Clean up UC list */ | ||
7379 | rc = bnx2x_del_all_macs(bp, &bp->fp[0].mac_obj, BNX2X_UC_LIST_MAC, | ||
7380 | true); | ||
7381 | if (rc < 0) | ||
7382 | BNX2X_ERR("Failed to schedule DEL commands for UC MACs list: " | ||
7383 | "%d\n", rc); | ||
7384 | |||
7385 | /* Disable LLH */ | ||
7386 | if (!CHIP_IS_E1(bp)) | ||
7387 | REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0); | ||
7388 | |||
7389 | /* Set "drop all" (stop Rx). | ||
7390 | * We need to take a netif_addr_lock() here in order to prevent | ||
7391 | * a race between the completion code and this code. | ||
7392 | */ | ||
7393 | netif_addr_lock_bh(bp->dev); | ||
7394 | /* Schedule the rx_mode command */ | ||
7395 | if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state)) | ||
7396 | set_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state); | ||
7397 | else | ||
7398 | bnx2x_set_storm_rx_mode(bp); | ||
7399 | |||
7400 | /* Cleanup multicast configuration */ | ||
7401 | rparam.mcast_obj = &bp->mcast_obj; | ||
7402 | rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL); | ||
7403 | if (rc < 0) | ||
7404 | BNX2X_ERR("Failed to send DEL multicast command: %d\n", rc); | ||
7405 | |||
7406 | netif_addr_unlock_bh(bp->dev); | ||
7407 | |||
7408 | |||
7109 | /* Close multi and leading connections | 7409 | /* Close multi and leading connections |
7110 | Completions for ramrods are collected in a synchronous way */ | 7410 | * Completions for ramrods are collected in a synchronous way |
7411 | */ | ||
7111 | for_each_queue(bp, i) | 7412 | for_each_queue(bp, i) |
7112 | 7413 | if (bnx2x_stop_queue(bp, i)) | |
7113 | if (bnx2x_stop_client(bp, i)) | ||
7114 | #ifdef BNX2X_STOP_ON_ERROR | 7414 | #ifdef BNX2X_STOP_ON_ERROR |
7115 | return; | 7415 | return; |
7116 | #else | 7416 | #else |
7117 | goto unload_error; | 7417 | goto unload_error; |
7118 | #endif | 7418 | #endif |
7419 | /* If SP settings didn't get completed so far - something | ||
7420 | * very wrong has happen. | ||
7421 | */ | ||
7422 | if (!bnx2x_wait_sp_comp(bp, ~0x0UL)) | ||
7423 | BNX2X_ERR("Hmmm... Common slow path ramrods got stuck!\n"); | ||
7119 | 7424 | ||
7425 | #ifndef BNX2X_STOP_ON_ERROR | ||
7426 | unload_error: | ||
7427 | #endif | ||
7120 | rc = bnx2x_func_stop(bp); | 7428 | rc = bnx2x_func_stop(bp); |
7121 | if (rc) { | 7429 | if (rc) { |
7122 | BNX2X_ERR("Function stop failed!\n"); | 7430 | BNX2X_ERR("Function stop failed!\n"); |
7123 | #ifdef BNX2X_STOP_ON_ERROR | 7431 | #ifdef BNX2X_STOP_ON_ERROR |
7124 | return; | 7432 | return; |
7125 | #else | ||
7126 | goto unload_error; | ||
7127 | #endif | 7433 | #endif |
7128 | } | 7434 | } |
7129 | #ifndef BNX2X_STOP_ON_ERROR | ||
7130 | unload_error: | ||
7131 | #endif | ||
7132 | if (!BP_NOMCP(bp)) | ||
7133 | reset_code = bnx2x_fw_command(bp, reset_code, 0); | ||
7134 | else { | ||
7135 | DP(NETIF_MSG_IFDOWN, "NO MCP - load counts[%d] " | ||
7136 | "%d, %d, %d\n", BP_PATH(bp), | ||
7137 | load_count[BP_PATH(bp)][0], | ||
7138 | load_count[BP_PATH(bp)][1], | ||
7139 | load_count[BP_PATH(bp)][2]); | ||
7140 | load_count[BP_PATH(bp)][0]--; | ||
7141 | load_count[BP_PATH(bp)][1 + port]--; | ||
7142 | DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts[%d] " | ||
7143 | "%d, %d, %d\n", BP_PATH(bp), | ||
7144 | load_count[BP_PATH(bp)][0], load_count[BP_PATH(bp)][1], | ||
7145 | load_count[BP_PATH(bp)][2]); | ||
7146 | if (load_count[BP_PATH(bp)][0] == 0) | ||
7147 | reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON; | ||
7148 | else if (load_count[BP_PATH(bp)][1 + port] == 0) | ||
7149 | reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT; | ||
7150 | else | ||
7151 | reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION; | ||
7152 | } | ||
7153 | 7435 | ||
7154 | if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) || | 7436 | /* |
7155 | (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT)) | 7437 | * Send the UNLOAD_REQUEST to the MCP. This will return if |
7156 | bnx2x__link_reset(bp); | 7438 | * this function should perform FUNC, PORT or COMMON HW |
7439 | * reset. | ||
7440 | */ | ||
7441 | reset_code = bnx2x_send_unload_req(bp, unload_mode); | ||
7157 | 7442 | ||
7158 | /* Disable HW interrupts, NAPI */ | 7443 | /* Disable HW interrupts, NAPI */ |
7159 | bnx2x_netif_stop(bp, 1); | 7444 | bnx2x_netif_stop(bp, 1); |
@@ -7162,12 +7447,13 @@ unload_error: | |||
7162 | bnx2x_free_irq(bp); | 7447 | bnx2x_free_irq(bp); |
7163 | 7448 | ||
7164 | /* Reset the chip */ | 7449 | /* Reset the chip */ |
7165 | bnx2x_reset_chip(bp, reset_code); | 7450 | rc = bnx2x_reset_hw(bp, reset_code); |
7451 | if (rc) | ||
7452 | BNX2X_ERR("HW_RESET failed\n"); | ||
7166 | 7453 | ||
7167 | /* Report UNLOAD_DONE to MCP */ | ||
7168 | if (!BP_NOMCP(bp)) | ||
7169 | bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0); | ||
7170 | 7454 | ||
7455 | /* Report UNLOAD_DONE to MCP */ | ||
7456 | bnx2x_send_unload_done(bp); | ||
7171 | } | 7457 | } |
7172 | 7458 | ||
7173 | void bnx2x_disable_close_the_gate(struct bnx2x *bp) | 7459 | void bnx2x_disable_close_the_gate(struct bnx2x *bp) |
@@ -7184,7 +7470,7 @@ void bnx2x_disable_close_the_gate(struct bnx2x *bp) | |||
7184 | val = REG_RD(bp, addr); | 7470 | val = REG_RD(bp, addr); |
7185 | val &= ~(0x300); | 7471 | val &= ~(0x300); |
7186 | REG_WR(bp, addr, val); | 7472 | REG_WR(bp, addr, val); |
7187 | } else if (CHIP_IS_E1H(bp)) { | 7473 | } else { |
7188 | val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK); | 7474 | val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK); |
7189 | val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK | | 7475 | val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK | |
7190 | MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK); | 7476 | MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK); |
@@ -7195,24 +7481,37 @@ void bnx2x_disable_close_the_gate(struct bnx2x *bp) | |||
7195 | /* Close gates #2, #3 and #4: */ | 7481 | /* Close gates #2, #3 and #4: */ |
7196 | static void bnx2x_set_234_gates(struct bnx2x *bp, bool close) | 7482 | static void bnx2x_set_234_gates(struct bnx2x *bp, bool close) |
7197 | { | 7483 | { |
7198 | u32 val, addr; | 7484 | u32 val; |
7199 | 7485 | ||
7200 | /* Gates #2 and #4a are closed/opened for "not E1" only */ | 7486 | /* Gates #2 and #4a are closed/opened for "not E1" only */ |
7201 | if (!CHIP_IS_E1(bp)) { | 7487 | if (!CHIP_IS_E1(bp)) { |
7202 | /* #4 */ | 7488 | /* #4 */ |
7203 | val = REG_RD(bp, PXP_REG_HST_DISCARD_DOORBELLS); | 7489 | REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS, !!close); |
7204 | REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS, | ||
7205 | close ? (val | 0x1) : (val & (~(u32)1))); | ||
7206 | /* #2 */ | 7490 | /* #2 */ |
7207 | val = REG_RD(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES); | 7491 | REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES, !!close); |
7208 | REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES, | ||
7209 | close ? (val | 0x1) : (val & (~(u32)1))); | ||
7210 | } | 7492 | } |
7211 | 7493 | ||
7212 | /* #3 */ | 7494 | /* #3 */ |
7213 | addr = BP_PORT(bp) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0; | 7495 | if (CHIP_IS_E1x(bp)) { |
7214 | val = REG_RD(bp, addr); | 7496 | /* Prevent interrupts from HC on both ports */ |
7215 | REG_WR(bp, addr, (!close) ? (val | 0x1) : (val & (~(u32)1))); | 7497 | val = REG_RD(bp, HC_REG_CONFIG_1); |
7498 | REG_WR(bp, HC_REG_CONFIG_1, | ||
7499 | (!close) ? (val | HC_CONFIG_1_REG_BLOCK_DISABLE_1) : | ||
7500 | (val & ~(u32)HC_CONFIG_1_REG_BLOCK_DISABLE_1)); | ||
7501 | |||
7502 | val = REG_RD(bp, HC_REG_CONFIG_0); | ||
7503 | REG_WR(bp, HC_REG_CONFIG_0, | ||
7504 | (!close) ? (val | HC_CONFIG_0_REG_BLOCK_DISABLE_0) : | ||
7505 | (val & ~(u32)HC_CONFIG_0_REG_BLOCK_DISABLE_0)); | ||
7506 | } else { | ||
7507 | /* Prevent incomming interrupts in IGU */ | ||
7508 | val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION); | ||
7509 | |||
7510 | REG_WR(bp, IGU_REG_BLOCK_CONFIGURATION, | ||
7511 | (!close) ? | ||
7512 | (val | IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE) : | ||
7513 | (val & ~(u32)IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE)); | ||
7514 | } | ||
7216 | 7515 | ||
7217 | DP(NETIF_MSG_HW, "%s gates #2, #3 and #4\n", | 7516 | DP(NETIF_MSG_HW, "%s gates #2, #3 and #4\n", |
7218 | close ? "closing" : "opening"); | 7517 | close ? "closing" : "opening"); |
@@ -7330,7 +7629,6 @@ static void bnx2x_pxp_prep(struct bnx2x *bp) | |||
7330 | if (!CHIP_IS_E1(bp)) { | 7629 | if (!CHIP_IS_E1(bp)) { |
7331 | REG_WR(bp, PXP2_REG_RD_START_INIT, 0); | 7630 | REG_WR(bp, PXP2_REG_RD_START_INIT, 0); |
7332 | REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0); | 7631 | REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0); |
7333 | REG_WR(bp, PXP2_REG_RQ_CFG_DONE, 0); | ||
7334 | mmiowb(); | 7632 | mmiowb(); |
7335 | } | 7633 | } |
7336 | } | 7634 | } |
@@ -7345,9 +7643,18 @@ static void bnx2x_pxp_prep(struct bnx2x *bp) | |||
7345 | * - GRC | 7643 | * - GRC |
7346 | * - RBCN, RBCP | 7644 | * - RBCN, RBCP |
7347 | */ | 7645 | */ |
7348 | static void bnx2x_process_kill_chip_reset(struct bnx2x *bp) | 7646 | static void bnx2x_process_kill_chip_reset(struct bnx2x *bp, bool global) |
7349 | { | 7647 | { |
7350 | u32 not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2; | 7648 | u32 not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2; |
7649 | u32 global_bits2; | ||
7650 | |||
7651 | /* | ||
7652 | * Bits that have to be set in reset_mask2 if we want to reset 'global' | ||
7653 | * (per chip) blocks. | ||
7654 | */ | ||
7655 | global_bits2 = | ||
7656 | MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CPU | | ||
7657 | MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CORE; | ||
7351 | 7658 | ||
7352 | not_reset_mask1 = | 7659 | not_reset_mask1 = |
7353 | MISC_REGISTERS_RESET_REG_1_RST_HC | | 7660 | MISC_REGISTERS_RESET_REG_1_RST_HC | |
@@ -7355,7 +7662,7 @@ static void bnx2x_process_kill_chip_reset(struct bnx2x *bp) | |||
7355 | MISC_REGISTERS_RESET_REG_1_RST_PXP; | 7662 | MISC_REGISTERS_RESET_REG_1_RST_PXP; |
7356 | 7663 | ||
7357 | not_reset_mask2 = | 7664 | not_reset_mask2 = |
7358 | MISC_REGISTERS_RESET_REG_2_RST_MDIO | | 7665 | MISC_REGISTERS_RESET_REG_2_RST_PCI_MDIO | |
7359 | MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE | | 7666 | MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE | |
7360 | MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE | | 7667 | MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE | |
7361 | MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE | | 7668 | MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE | |
@@ -7371,20 +7678,76 @@ static void bnx2x_process_kill_chip_reset(struct bnx2x *bp) | |||
7371 | else | 7678 | else |
7372 | reset_mask2 = 0x1ffff; | 7679 | reset_mask2 = 0x1ffff; |
7373 | 7680 | ||
7374 | REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, | 7681 | if (CHIP_IS_E3(bp)) { |
7375 | reset_mask1 & (~not_reset_mask1)); | 7682 | reset_mask2 |= MISC_REGISTERS_RESET_REG_2_MSTAT0; |
7683 | reset_mask2 |= MISC_REGISTERS_RESET_REG_2_MSTAT1; | ||
7684 | } | ||
7685 | |||
7686 | /* Don't reset global blocks unless we need to */ | ||
7687 | if (!global) | ||
7688 | reset_mask2 &= ~global_bits2; | ||
7689 | |||
7690 | /* | ||
7691 | * In case of attention in the QM, we need to reset PXP | ||
7692 | * (MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR) before QM | ||
7693 | * because otherwise QM reset would release 'close the gates' shortly | ||
7694 | * before resetting the PXP, then the PSWRQ would send a write | ||
7695 | * request to PGLUE. Then when PXP is reset, PGLUE would try to | ||
7696 | * read the payload data from PSWWR, but PSWWR would not | ||
7697 | * respond. The write queue in PGLUE would stuck, dmae commands | ||
7698 | * would not return. Therefore it's important to reset the second | ||
7699 | * reset register (containing the | ||
7700 | * MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR bit) before the | ||
7701 | * first one (containing the MISC_REGISTERS_RESET_REG_1_RST_QM | ||
7702 | * bit). | ||
7703 | */ | ||
7376 | REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, | 7704 | REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, |
7377 | reset_mask2 & (~not_reset_mask2)); | 7705 | reset_mask2 & (~not_reset_mask2)); |
7378 | 7706 | ||
7707 | REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, | ||
7708 | reset_mask1 & (~not_reset_mask1)); | ||
7709 | |||
7379 | barrier(); | 7710 | barrier(); |
7380 | mmiowb(); | 7711 | mmiowb(); |
7381 | 7712 | ||
7382 | REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1); | ||
7383 | REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, reset_mask2); | 7713 | REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, reset_mask2); |
7714 | REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1); | ||
7384 | mmiowb(); | 7715 | mmiowb(); |
7385 | } | 7716 | } |
7386 | 7717 | ||
7387 | static int bnx2x_process_kill(struct bnx2x *bp) | 7718 | /** |
7719 | * bnx2x_er_poll_igu_vq - poll for pending writes bit. | ||
7720 | * It should get cleared in no more than 1s. | ||
7721 | * | ||
7722 | * @bp: driver handle | ||
7723 | * | ||
7724 | * It should get cleared in no more than 1s. Returns 0 if | ||
7725 | * pending writes bit gets cleared. | ||
7726 | */ | ||
7727 | static int bnx2x_er_poll_igu_vq(struct bnx2x *bp) | ||
7728 | { | ||
7729 | u32 cnt = 1000; | ||
7730 | u32 pend_bits = 0; | ||
7731 | |||
7732 | do { | ||
7733 | pend_bits = REG_RD(bp, IGU_REG_PENDING_BITS_STATUS); | ||
7734 | |||
7735 | if (pend_bits == 0) | ||
7736 | break; | ||
7737 | |||
7738 | usleep_range(1000, 1000); | ||
7739 | } while (cnt-- > 0); | ||
7740 | |||
7741 | if (cnt <= 0) { | ||
7742 | BNX2X_ERR("Still pending IGU requests pend_bits=%x!\n", | ||
7743 | pend_bits); | ||
7744 | return -EBUSY; | ||
7745 | } | ||
7746 | |||
7747 | return 0; | ||
7748 | } | ||
7749 | |||
7750 | static int bnx2x_process_kill(struct bnx2x *bp, bool global) | ||
7388 | { | 7751 | { |
7389 | int cnt = 1000; | 7752 | int cnt = 1000; |
7390 | u32 val = 0; | 7753 | u32 val = 0; |
@@ -7403,7 +7766,7 @@ static int bnx2x_process_kill(struct bnx2x *bp) | |||
7403 | ((port_is_idle_1 & 0x1) == 0x1) && | 7766 | ((port_is_idle_1 & 0x1) == 0x1) && |
7404 | (pgl_exp_rom2 == 0xffffffff)) | 7767 | (pgl_exp_rom2 == 0xffffffff)) |
7405 | break; | 7768 | break; |
7406 | msleep(1); | 7769 | usleep_range(1000, 1000); |
7407 | } while (cnt-- > 0); | 7770 | } while (cnt-- > 0); |
7408 | 7771 | ||
7409 | if (cnt <= 0) { | 7772 | if (cnt <= 0) { |
@@ -7423,6 +7786,11 @@ static int bnx2x_process_kill(struct bnx2x *bp) | |||
7423 | /* Close gates #2, #3 and #4 */ | 7786 | /* Close gates #2, #3 and #4 */ |
7424 | bnx2x_set_234_gates(bp, true); | 7787 | bnx2x_set_234_gates(bp, true); |
7425 | 7788 | ||
7789 | /* Poll for IGU VQs for 57712 and newer chips */ | ||
7790 | if (!CHIP_IS_E1x(bp) && bnx2x_er_poll_igu_vq(bp)) | ||
7791 | return -EAGAIN; | ||
7792 | |||
7793 | |||
7426 | /* TBD: Indicate that "process kill" is in progress to MCP */ | 7794 | /* TBD: Indicate that "process kill" is in progress to MCP */ |
7427 | 7795 | ||
7428 | /* Clear "unprepared" bit */ | 7796 | /* Clear "unprepared" bit */ |
@@ -7435,25 +7803,28 @@ static int bnx2x_process_kill(struct bnx2x *bp) | |||
7435 | /* Wait for 1ms to empty GLUE and PCI-E core queues, | 7803 | /* Wait for 1ms to empty GLUE and PCI-E core queues, |
7436 | * PSWHST, GRC and PSWRD Tetris buffer. | 7804 | * PSWHST, GRC and PSWRD Tetris buffer. |
7437 | */ | 7805 | */ |
7438 | msleep(1); | 7806 | usleep_range(1000, 1000); |
7439 | 7807 | ||
7440 | /* Prepare to chip reset: */ | 7808 | /* Prepare to chip reset: */ |
7441 | /* MCP */ | 7809 | /* MCP */ |
7442 | bnx2x_reset_mcp_prep(bp, &val); | 7810 | if (global) |
7811 | bnx2x_reset_mcp_prep(bp, &val); | ||
7443 | 7812 | ||
7444 | /* PXP */ | 7813 | /* PXP */ |
7445 | bnx2x_pxp_prep(bp); | 7814 | bnx2x_pxp_prep(bp); |
7446 | barrier(); | 7815 | barrier(); |
7447 | 7816 | ||
7448 | /* reset the chip */ | 7817 | /* reset the chip */ |
7449 | bnx2x_process_kill_chip_reset(bp); | 7818 | bnx2x_process_kill_chip_reset(bp, global); |
7450 | barrier(); | 7819 | barrier(); |
7451 | 7820 | ||
7452 | /* Recover after reset: */ | 7821 | /* Recover after reset: */ |
7453 | /* MCP */ | 7822 | /* MCP */ |
7454 | if (bnx2x_reset_mcp_comp(bp, val)) | 7823 | if (global && bnx2x_reset_mcp_comp(bp, val)) |
7455 | return -EAGAIN; | 7824 | return -EAGAIN; |
7456 | 7825 | ||
7826 | /* TBD: Add resetting the NO_MCP mode DB here */ | ||
7827 | |||
7457 | /* PXP */ | 7828 | /* PXP */ |
7458 | bnx2x_pxp_prep(bp); | 7829 | bnx2x_pxp_prep(bp); |
7459 | 7830 | ||
@@ -7466,43 +7837,85 @@ static int bnx2x_process_kill(struct bnx2x *bp) | |||
7466 | return 0; | 7837 | return 0; |
7467 | } | 7838 | } |
7468 | 7839 | ||
7469 | static int bnx2x_leader_reset(struct bnx2x *bp) | 7840 | int bnx2x_leader_reset(struct bnx2x *bp) |
7470 | { | 7841 | { |
7471 | int rc = 0; | 7842 | int rc = 0; |
7843 | bool global = bnx2x_reset_is_global(bp); | ||
7844 | |||
7472 | /* Try to recover after the failure */ | 7845 | /* Try to recover after the failure */ |
7473 | if (bnx2x_process_kill(bp)) { | 7846 | if (bnx2x_process_kill(bp, global)) { |
7474 | printk(KERN_ERR "%s: Something bad had happen! Aii!\n", | 7847 | netdev_err(bp->dev, "Something bad had happen on engine %d! " |
7475 | bp->dev->name); | 7848 | "Aii!\n", BP_PATH(bp)); |
7476 | rc = -EAGAIN; | 7849 | rc = -EAGAIN; |
7477 | goto exit_leader_reset; | 7850 | goto exit_leader_reset; |
7478 | } | 7851 | } |
7479 | 7852 | ||
7480 | /* Clear "reset is in progress" bit and update the driver state */ | 7853 | /* |
7854 | * Clear RESET_IN_PROGRES and RESET_GLOBAL bits and update the driver | ||
7855 | * state. | ||
7856 | */ | ||
7481 | bnx2x_set_reset_done(bp); | 7857 | bnx2x_set_reset_done(bp); |
7482 | bp->recovery_state = BNX2X_RECOVERY_DONE; | 7858 | if (global) |
7859 | bnx2x_clear_reset_global(bp); | ||
7483 | 7860 | ||
7484 | exit_leader_reset: | 7861 | exit_leader_reset: |
7485 | bp->is_leader = 0; | 7862 | bp->is_leader = 0; |
7486 | bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08); | 7863 | bnx2x_release_leader_lock(bp); |
7487 | smp_wmb(); | 7864 | smp_mb(); |
7488 | return rc; | 7865 | return rc; |
7489 | } | 7866 | } |
7490 | 7867 | ||
7491 | /* Assumption: runs under rtnl lock. This together with the fact | 7868 | static inline void bnx2x_recovery_failed(struct bnx2x *bp) |
7869 | { | ||
7870 | netdev_err(bp->dev, "Recovery has failed. Power cycle is needed.\n"); | ||
7871 | |||
7872 | /* Disconnect this device */ | ||
7873 | netif_device_detach(bp->dev); | ||
7874 | |||
7875 | /* | ||
7876 | * Block ifup for all function on this engine until "process kill" | ||
7877 | * or power cycle. | ||
7878 | */ | ||
7879 | bnx2x_set_reset_in_progress(bp); | ||
7880 | |||
7881 | /* Shut down the power */ | ||
7882 | bnx2x_set_power_state(bp, PCI_D3hot); | ||
7883 | |||
7884 | bp->recovery_state = BNX2X_RECOVERY_FAILED; | ||
7885 | |||
7886 | smp_mb(); | ||
7887 | } | ||
7888 | |||
7889 | /* | ||
7890 | * Assumption: runs under rtnl lock. This together with the fact | ||
7492 | * that it's called only from bnx2x_reset_task() ensure that it | 7891 | * that it's called only from bnx2x_reset_task() ensure that it |
7493 | * will never be called when netif_running(bp->dev) is false. | 7892 | * will never be called when netif_running(bp->dev) is false. |
7494 | */ | 7893 | */ |
7495 | static void bnx2x_parity_recover(struct bnx2x *bp) | 7894 | static void bnx2x_parity_recover(struct bnx2x *bp) |
7496 | { | 7895 | { |
7896 | bool global = false; | ||
7897 | |||
7497 | DP(NETIF_MSG_HW, "Handling parity\n"); | 7898 | DP(NETIF_MSG_HW, "Handling parity\n"); |
7498 | while (1) { | 7899 | while (1) { |
7499 | switch (bp->recovery_state) { | 7900 | switch (bp->recovery_state) { |
7500 | case BNX2X_RECOVERY_INIT: | 7901 | case BNX2X_RECOVERY_INIT: |
7501 | DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n"); | 7902 | DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n"); |
7903 | bnx2x_chk_parity_attn(bp, &global, false); | ||
7904 | |||
7502 | /* Try to get a LEADER_LOCK HW lock */ | 7905 | /* Try to get a LEADER_LOCK HW lock */ |
7503 | if (bnx2x_trylock_hw_lock(bp, | 7906 | if (bnx2x_trylock_leader_lock(bp)) { |
7504 | HW_LOCK_RESOURCE_RESERVED_08)) | 7907 | bnx2x_set_reset_in_progress(bp); |
7908 | /* | ||
7909 | * Check if there is a global attention and if | ||
7910 | * there was a global attention, set the global | ||
7911 | * reset bit. | ||
7912 | */ | ||
7913 | |||
7914 | if (global) | ||
7915 | bnx2x_set_reset_global(bp); | ||
7916 | |||
7505 | bp->is_leader = 1; | 7917 | bp->is_leader = 1; |
7918 | } | ||
7506 | 7919 | ||
7507 | /* Stop the driver */ | 7920 | /* Stop the driver */ |
7508 | /* If interface has been removed - break */ | 7921 | /* If interface has been removed - break */ |
@@ -7510,17 +7923,43 @@ static void bnx2x_parity_recover(struct bnx2x *bp) | |||
7510 | return; | 7923 | return; |
7511 | 7924 | ||
7512 | bp->recovery_state = BNX2X_RECOVERY_WAIT; | 7925 | bp->recovery_state = BNX2X_RECOVERY_WAIT; |
7513 | /* Ensure "is_leader" and "recovery_state" | 7926 | |
7514 | * update values are seen on other CPUs | 7927 | /* |
7928 | * Reset MCP command sequence number and MCP mail box | ||
7929 | * sequence as we are going to reset the MCP. | ||
7515 | */ | 7930 | */ |
7516 | smp_wmb(); | 7931 | if (global) { |
7932 | bp->fw_seq = 0; | ||
7933 | bp->fw_drv_pulse_wr_seq = 0; | ||
7934 | } | ||
7935 | |||
7936 | /* Ensure "is_leader", MCP command sequence and | ||
7937 | * "recovery_state" update values are seen on other | ||
7938 | * CPUs. | ||
7939 | */ | ||
7940 | smp_mb(); | ||
7517 | break; | 7941 | break; |
7518 | 7942 | ||
7519 | case BNX2X_RECOVERY_WAIT: | 7943 | case BNX2X_RECOVERY_WAIT: |
7520 | DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n"); | 7944 | DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n"); |
7521 | if (bp->is_leader) { | 7945 | if (bp->is_leader) { |
7522 | u32 load_counter = bnx2x_get_load_cnt(bp); | 7946 | int other_engine = BP_PATH(bp) ? 0 : 1; |
7523 | if (load_counter) { | 7947 | u32 other_load_counter = |
7948 | bnx2x_get_load_cnt(bp, other_engine); | ||
7949 | u32 load_counter = | ||
7950 | bnx2x_get_load_cnt(bp, BP_PATH(bp)); | ||
7951 | global = bnx2x_reset_is_global(bp); | ||
7952 | |||
7953 | /* | ||
7954 | * In case of a parity in a global block, let | ||
7955 | * the first leader that performs a | ||
7956 | * leader_reset() reset the global blocks in | ||
7957 | * order to clear global attentions. Otherwise | ||
7958 | * the the gates will remain closed for that | ||
7959 | * engine. | ||
7960 | */ | ||
7961 | if (load_counter || | ||
7962 | (global && other_load_counter)) { | ||
7524 | /* Wait until all other functions get | 7963 | /* Wait until all other functions get |
7525 | * down. | 7964 | * down. |
7526 | */ | 7965 | */ |
@@ -7533,37 +7972,27 @@ static void bnx2x_parity_recover(struct bnx2x *bp) | |||
7533 | * normal. In any case it's an exit | 7972 | * normal. In any case it's an exit |
7534 | * point for a leader. | 7973 | * point for a leader. |
7535 | */ | 7974 | */ |
7536 | if (bnx2x_leader_reset(bp) || | 7975 | if (bnx2x_leader_reset(bp)) { |
7537 | bnx2x_nic_load(bp, LOAD_NORMAL)) { | 7976 | bnx2x_recovery_failed(bp); |
7538 | printk(KERN_ERR"%s: Recovery " | ||
7539 | "has failed. Power cycle is " | ||
7540 | "needed.\n", bp->dev->name); | ||
7541 | /* Disconnect this device */ | ||
7542 | netif_device_detach(bp->dev); | ||
7543 | /* Block ifup for all function | ||
7544 | * of this ASIC until | ||
7545 | * "process kill" or power | ||
7546 | * cycle. | ||
7547 | */ | ||
7548 | bnx2x_set_reset_in_progress(bp); | ||
7549 | /* Shut down the power */ | ||
7550 | bnx2x_set_power_state(bp, | ||
7551 | PCI_D3hot); | ||
7552 | return; | 7977 | return; |
7553 | } | 7978 | } |
7554 | 7979 | ||
7555 | return; | 7980 | /* If we are here, means that the |
7981 | * leader has succeeded and doesn't | ||
7982 | * want to be a leader any more. Try | ||
7983 | * to continue as a none-leader. | ||
7984 | */ | ||
7985 | break; | ||
7556 | } | 7986 | } |
7557 | } else { /* non-leader */ | 7987 | } else { /* non-leader */ |
7558 | if (!bnx2x_reset_is_done(bp)) { | 7988 | if (!bnx2x_reset_is_done(bp, BP_PATH(bp))) { |
7559 | /* Try to get a LEADER_LOCK HW lock as | 7989 | /* Try to get a LEADER_LOCK HW lock as |
7560 | * long as a former leader may have | 7990 | * long as a former leader may have |
7561 | * been unloaded by the user or | 7991 | * been unloaded by the user or |
7562 | * released a leadership by another | 7992 | * released a leadership by another |
7563 | * reason. | 7993 | * reason. |
7564 | */ | 7994 | */ |
7565 | if (bnx2x_trylock_hw_lock(bp, | 7995 | if (bnx2x_trylock_leader_lock(bp)) { |
7566 | HW_LOCK_RESOURCE_RESERVED_08)) { | ||
7567 | /* I'm a leader now! Restart a | 7996 | /* I'm a leader now! Restart a |
7568 | * switch case. | 7997 | * switch case. |
7569 | */ | 7998 | */ |
@@ -7575,14 +8004,25 @@ static void bnx2x_parity_recover(struct bnx2x *bp) | |||
7575 | HZ/10); | 8004 | HZ/10); |
7576 | return; | 8005 | return; |
7577 | 8006 | ||
7578 | } else { /* A leader has completed | 8007 | } else { |
7579 | * the "process kill". It's an exit | 8008 | /* |
7580 | * point for a non-leader. | 8009 | * If there was a global attention, wait |
7581 | */ | 8010 | * for it to be cleared. |
7582 | bnx2x_nic_load(bp, LOAD_NORMAL); | 8011 | */ |
7583 | bp->recovery_state = | 8012 | if (bnx2x_reset_is_global(bp)) { |
7584 | BNX2X_RECOVERY_DONE; | 8013 | schedule_delayed_work( |
7585 | smp_wmb(); | 8014 | &bp->reset_task, HZ/10); |
8015 | return; | ||
8016 | } | ||
8017 | |||
8018 | if (bnx2x_nic_load(bp, LOAD_NORMAL)) | ||
8019 | bnx2x_recovery_failed(bp); | ||
8020 | else { | ||
8021 | bp->recovery_state = | ||
8022 | BNX2X_RECOVERY_DONE; | ||
8023 | smp_mb(); | ||
8024 | } | ||
8025 | |||
7586 | return; | 8026 | return; |
7587 | } | 8027 | } |
7588 | } | 8028 | } |
@@ -7624,6 +8064,37 @@ reset_task_exit: | |||
7624 | 8064 | ||
7625 | /* end of nic load/unload */ | 8065 | /* end of nic load/unload */ |
7626 | 8066 | ||
8067 | static void bnx2x_period_task(struct work_struct *work) | ||
8068 | { | ||
8069 | struct bnx2x *bp = container_of(work, struct bnx2x, period_task.work); | ||
8070 | |||
8071 | if (!netif_running(bp->dev)) | ||
8072 | goto period_task_exit; | ||
8073 | |||
8074 | if (CHIP_REV_IS_SLOW(bp)) { | ||
8075 | BNX2X_ERR("period task called on emulation, ignoring\n"); | ||
8076 | goto period_task_exit; | ||
8077 | } | ||
8078 | |||
8079 | bnx2x_acquire_phy_lock(bp); | ||
8080 | /* | ||
8081 | * The barrier is needed to ensure the ordering between the writing to | ||
8082 | * the bp->port.pmf in the bnx2x_nic_load() or bnx2x_pmf_update() and | ||
8083 | * the reading here. | ||
8084 | */ | ||
8085 | smp_mb(); | ||
8086 | if (bp->port.pmf) { | ||
8087 | bnx2x_period_func(&bp->link_params, &bp->link_vars); | ||
8088 | |||
8089 | /* Re-queue task in 1 sec */ | ||
8090 | queue_delayed_work(bnx2x_wq, &bp->period_task, 1*HZ); | ||
8091 | } | ||
8092 | |||
8093 | bnx2x_release_phy_lock(bp); | ||
8094 | period_task_exit: | ||
8095 | return; | ||
8096 | } | ||
8097 | |||
7627 | /* | 8098 | /* |
7628 | * Init service functions | 8099 | * Init service functions |
7629 | */ | 8100 | */ |
@@ -7681,8 +8152,8 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp) | |||
7681 | u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; | 8152 | u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; |
7682 | /* save our pf_num */ | 8153 | /* save our pf_num */ |
7683 | int orig_pf_num = bp->pf_num; | 8154 | int orig_pf_num = bp->pf_num; |
7684 | u32 swap_en; | 8155 | int port; |
7685 | u32 swap_val; | 8156 | u32 swap_en, swap_val, value; |
7686 | 8157 | ||
7687 | /* clear the UNDI indication */ | 8158 | /* clear the UNDI indication */ |
7688 | REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0); | 8159 | REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0); |
@@ -7717,21 +8188,19 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp) | |||
7717 | bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI); | 8188 | bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI); |
7718 | 8189 | ||
7719 | bnx2x_undi_int_disable(bp); | 8190 | bnx2x_undi_int_disable(bp); |
8191 | port = BP_PORT(bp); | ||
7720 | 8192 | ||
7721 | /* close input traffic and wait for it */ | 8193 | /* close input traffic and wait for it */ |
7722 | /* Do not rcv packets to BRB */ | 8194 | /* Do not rcv packets to BRB */ |
7723 | REG_WR(bp, | 8195 | REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_DRV_MASK : |
7724 | (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK : | 8196 | NIG_REG_LLH0_BRB1_DRV_MASK), 0x0); |
7725 | NIG_REG_LLH0_BRB1_DRV_MASK), 0x0); | ||
7726 | /* Do not direct rcv packets that are not for MCP to | 8197 | /* Do not direct rcv packets that are not for MCP to |
7727 | * the BRB */ | 8198 | * the BRB */ |
7728 | REG_WR(bp, | 8199 | REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP : |
7729 | (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP : | 8200 | NIG_REG_LLH0_BRB1_NOT_MCP), 0x0); |
7730 | NIG_REG_LLH0_BRB1_NOT_MCP), 0x0); | ||
7731 | /* clear AEU */ | 8201 | /* clear AEU */ |
7732 | REG_WR(bp, | 8202 | REG_WR(bp, (port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 : |
7733 | (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 : | 8203 | MISC_REG_AEU_MASK_ATTN_FUNC_0), 0); |
7734 | MISC_REG_AEU_MASK_ATTN_FUNC_0), 0); | ||
7735 | msleep(10); | 8204 | msleep(10); |
7736 | 8205 | ||
7737 | /* save NIG port swap info */ | 8206 | /* save NIG port swap info */ |
@@ -7741,9 +8210,17 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp) | |||
7741 | REG_WR(bp, | 8210 | REG_WR(bp, |
7742 | GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, | 8211 | GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, |
7743 | 0xd3ffffff); | 8212 | 0xd3ffffff); |
8213 | |||
8214 | value = 0x1400; | ||
8215 | if (CHIP_IS_E3(bp)) { | ||
8216 | value |= MISC_REGISTERS_RESET_REG_2_MSTAT0; | ||
8217 | value |= MISC_REGISTERS_RESET_REG_2_MSTAT1; | ||
8218 | } | ||
8219 | |||
7744 | REG_WR(bp, | 8220 | REG_WR(bp, |
7745 | GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, | 8221 | GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, |
7746 | 0x1403); | 8222 | value); |
8223 | |||
7747 | /* take the NIG out of reset and restore swap values */ | 8224 | /* take the NIG out of reset and restore swap values */ |
7748 | REG_WR(bp, | 8225 | REG_WR(bp, |
7749 | GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, | 8226 | GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, |
@@ -7784,7 +8261,7 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp) | |||
7784 | /* Set doorbell size */ | 8261 | /* Set doorbell size */ |
7785 | bp->db_size = (1 << BNX2X_DB_SHIFT); | 8262 | bp->db_size = (1 << BNX2X_DB_SHIFT); |
7786 | 8263 | ||
7787 | if (CHIP_IS_E2(bp)) { | 8264 | if (!CHIP_IS_E1x(bp)) { |
7788 | val = REG_RD(bp, MISC_REG_PORT4MODE_EN_OVWR); | 8265 | val = REG_RD(bp, MISC_REG_PORT4MODE_EN_OVWR); |
7789 | if ((val & 1) == 0) | 8266 | if ((val & 1) == 0) |
7790 | val = REG_RD(bp, MISC_REG_PORT4MODE_EN); | 8267 | val = REG_RD(bp, MISC_REG_PORT4MODE_EN); |
@@ -7804,16 +8281,6 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp) | |||
7804 | bp->pfid = bp->pf_num; /* 0..7 */ | 8281 | bp->pfid = bp->pf_num; /* 0..7 */ |
7805 | } | 8282 | } |
7806 | 8283 | ||
7807 | /* | ||
7808 | * set base FW non-default (fast path) status block id, this value is | ||
7809 | * used to initialize the fw_sb_id saved on the fp/queue structure to | ||
7810 | * determine the id used by the FW. | ||
7811 | */ | ||
7812 | if (CHIP_IS_E1x(bp)) | ||
7813 | bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E1x; | ||
7814 | else /* E2 */ | ||
7815 | bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E2; | ||
7816 | |||
7817 | bp->link_params.chip_id = bp->common.chip_id; | 8284 | bp->link_params.chip_id = bp->common.chip_id; |
7818 | BNX2X_DEV_INFO("chip ID is 0x%x\n", id); | 8285 | BNX2X_DEV_INFO("chip ID is 0x%x\n", id); |
7819 | 8286 | ||
@@ -7825,13 +8292,15 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp) | |||
7825 | } | 8292 | } |
7826 | 8293 | ||
7827 | val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4); | 8294 | val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4); |
7828 | bp->common.flash_size = (NVRAM_1MB_SIZE << | 8295 | bp->common.flash_size = (BNX2X_NVRAM_1MB_SIZE << |
7829 | (val & MCPR_NVM_CFG4_FLASH_SIZE)); | 8296 | (val & MCPR_NVM_CFG4_FLASH_SIZE)); |
7830 | BNX2X_DEV_INFO("flash_size 0x%x (%d)\n", | 8297 | BNX2X_DEV_INFO("flash_size 0x%x (%d)\n", |
7831 | bp->common.flash_size, bp->common.flash_size); | 8298 | bp->common.flash_size, bp->common.flash_size); |
7832 | 8299 | ||
7833 | bnx2x_init_shmem(bp); | 8300 | bnx2x_init_shmem(bp); |
7834 | 8301 | ||
8302 | |||
8303 | |||
7835 | bp->common.shmem2_base = REG_RD(bp, (BP_PATH(bp) ? | 8304 | bp->common.shmem2_base = REG_RD(bp, (BP_PATH(bp) ? |
7836 | MISC_REG_GENERIC_CR_1 : | 8305 | MISC_REG_GENERIC_CR_1 : |
7837 | MISC_REG_GENERIC_CR_0)); | 8306 | MISC_REG_GENERIC_CR_0)); |
@@ -7880,6 +8349,10 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp) | |||
7880 | (val >= REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL) ? | 8349 | (val >= REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL) ? |
7881 | FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY : 0; | 8350 | FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY : 0; |
7882 | 8351 | ||
8352 | bp->link_params.feature_config_flags |= | ||
8353 | (val >= REQ_BC_VER_4_SFP_TX_DISABLE_SUPPORTED) ? | ||
8354 | FEATURE_CONFIG_BC_SUPPORTS_SFP_TX_DISABLED : 0; | ||
8355 | |||
7883 | pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc); | 8356 | pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc); |
7884 | bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG; | 8357 | bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG; |
7885 | 8358 | ||
@@ -7941,8 +8414,14 @@ static void __devinit bnx2x_get_igu_cam_info(struct bnx2x *bp) | |||
7941 | } | 8414 | } |
7942 | } | 8415 | } |
7943 | } | 8416 | } |
7944 | bp->igu_sb_cnt = min_t(u8, bp->igu_sb_cnt, | 8417 | |
7945 | NUM_IGU_SB_REQUIRED(bp->l2_cid_count)); | 8418 | /* It's expected that number of CAM entries for this |
8419 | * functions is equal to the MSI-X table size (which was a | ||
8420 | * used during bp->l2_cid_count value calculation. | ||
8421 | * We want a harsh warning if these values are different! | ||
8422 | */ | ||
8423 | WARN_ON(bp->igu_sb_cnt != NUM_IGU_SB_REQUIRED(bp->l2_cid_count)); | ||
8424 | |||
7946 | if (bp->igu_sb_cnt == 0) | 8425 | if (bp->igu_sb_cnt == 0) |
7947 | BNX2X_ERR("CAM configuration error\n"); | 8426 | BNX2X_ERR("CAM configuration error\n"); |
7948 | } | 8427 | } |
@@ -7991,24 +8470,25 @@ static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp, | |||
7991 | return; | 8470 | return; |
7992 | } | 8471 | } |
7993 | 8472 | ||
7994 | switch (switch_cfg) { | 8473 | if (CHIP_IS_E3(bp)) |
7995 | case SWITCH_CFG_1G: | 8474 | bp->port.phy_addr = REG_RD(bp, MISC_REG_WC0_CTRL_PHY_ADDR); |
7996 | bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR + | 8475 | else { |
7997 | port*0x10); | 8476 | switch (switch_cfg) { |
7998 | BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr); | 8477 | case SWITCH_CFG_1G: |
7999 | break; | 8478 | bp->port.phy_addr = REG_RD( |
8000 | 8479 | bp, NIG_REG_SERDES0_CTRL_PHY_ADDR + port*0x10); | |
8001 | case SWITCH_CFG_10G: | 8480 | break; |
8002 | bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR + | 8481 | case SWITCH_CFG_10G: |
8003 | port*0x18); | 8482 | bp->port.phy_addr = REG_RD( |
8004 | BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr); | 8483 | bp, NIG_REG_XGXS0_CTRL_PHY_ADDR + port*0x18); |
8005 | break; | 8484 | break; |
8006 | 8485 | default: | |
8007 | default: | 8486 | BNX2X_ERR("BAD switch_cfg link_config 0x%x\n", |
8008 | BNX2X_ERR("BAD switch_cfg link_config 0x%x\n", | 8487 | bp->port.link_config[0]); |
8009 | bp->port.link_config[0]); | 8488 | return; |
8010 | return; | 8489 | } |
8011 | } | 8490 | } |
8491 | BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr); | ||
8012 | /* mask what we support according to speed_cap_mask per configuration */ | 8492 | /* mask what we support according to speed_cap_mask per configuration */ |
8013 | for (idx = 0; idx < cfg_size; idx++) { | 8493 | for (idx = 0; idx < cfg_size; idx++) { |
8014 | if (!(bp->link_params.speed_cap_mask[idx] & | 8494 | if (!(bp->link_params.speed_cap_mask[idx] & |
@@ -8089,7 +8569,7 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp) | |||
8089 | (ADVERTISED_10baseT_Full | | 8569 | (ADVERTISED_10baseT_Full | |
8090 | ADVERTISED_TP); | 8570 | ADVERTISED_TP); |
8091 | } else { | 8571 | } else { |
8092 | BNX2X_ERROR("NVRAM config error. " | 8572 | BNX2X_ERR("NVRAM config error. " |
8093 | "Invalid link_config 0x%x" | 8573 | "Invalid link_config 0x%x" |
8094 | " speed_cap_mask 0x%x\n", | 8574 | " speed_cap_mask 0x%x\n", |
8095 | link_config, | 8575 | link_config, |
@@ -8108,7 +8588,7 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp) | |||
8108 | (ADVERTISED_10baseT_Half | | 8588 | (ADVERTISED_10baseT_Half | |
8109 | ADVERTISED_TP); | 8589 | ADVERTISED_TP); |
8110 | } else { | 8590 | } else { |
8111 | BNX2X_ERROR("NVRAM config error. " | 8591 | BNX2X_ERR("NVRAM config error. " |
8112 | "Invalid link_config 0x%x" | 8592 | "Invalid link_config 0x%x" |
8113 | " speed_cap_mask 0x%x\n", | 8593 | " speed_cap_mask 0x%x\n", |
8114 | link_config, | 8594 | link_config, |
@@ -8126,7 +8606,7 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp) | |||
8126 | (ADVERTISED_100baseT_Full | | 8606 | (ADVERTISED_100baseT_Full | |
8127 | ADVERTISED_TP); | 8607 | ADVERTISED_TP); |
8128 | } else { | 8608 | } else { |
8129 | BNX2X_ERROR("NVRAM config error. " | 8609 | BNX2X_ERR("NVRAM config error. " |
8130 | "Invalid link_config 0x%x" | 8610 | "Invalid link_config 0x%x" |
8131 | " speed_cap_mask 0x%x\n", | 8611 | " speed_cap_mask 0x%x\n", |
8132 | link_config, | 8612 | link_config, |
@@ -8146,7 +8626,7 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp) | |||
8146 | (ADVERTISED_100baseT_Half | | 8626 | (ADVERTISED_100baseT_Half | |
8147 | ADVERTISED_TP); | 8627 | ADVERTISED_TP); |
8148 | } else { | 8628 | } else { |
8149 | BNX2X_ERROR("NVRAM config error. " | 8629 | BNX2X_ERR("NVRAM config error. " |
8150 | "Invalid link_config 0x%x" | 8630 | "Invalid link_config 0x%x" |
8151 | " speed_cap_mask 0x%x\n", | 8631 | " speed_cap_mask 0x%x\n", |
8152 | link_config, | 8632 | link_config, |
@@ -8164,7 +8644,7 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp) | |||
8164 | (ADVERTISED_1000baseT_Full | | 8644 | (ADVERTISED_1000baseT_Full | |
8165 | ADVERTISED_TP); | 8645 | ADVERTISED_TP); |
8166 | } else { | 8646 | } else { |
8167 | BNX2X_ERROR("NVRAM config error. " | 8647 | BNX2X_ERR("NVRAM config error. " |
8168 | "Invalid link_config 0x%x" | 8648 | "Invalid link_config 0x%x" |
8169 | " speed_cap_mask 0x%x\n", | 8649 | " speed_cap_mask 0x%x\n", |
8170 | link_config, | 8650 | link_config, |
@@ -8182,7 +8662,7 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp) | |||
8182 | (ADVERTISED_2500baseX_Full | | 8662 | (ADVERTISED_2500baseX_Full | |
8183 | ADVERTISED_TP); | 8663 | ADVERTISED_TP); |
8184 | } else { | 8664 | } else { |
8185 | BNX2X_ERROR("NVRAM config error. " | 8665 | BNX2X_ERR("NVRAM config error. " |
8186 | "Invalid link_config 0x%x" | 8666 | "Invalid link_config 0x%x" |
8187 | " speed_cap_mask 0x%x\n", | 8667 | " speed_cap_mask 0x%x\n", |
8188 | link_config, | 8668 | link_config, |
@@ -8192,8 +8672,6 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp) | |||
8192 | break; | 8672 | break; |
8193 | 8673 | ||
8194 | case PORT_FEATURE_LINK_SPEED_10G_CX4: | 8674 | case PORT_FEATURE_LINK_SPEED_10G_CX4: |
8195 | case PORT_FEATURE_LINK_SPEED_10G_KX4: | ||
8196 | case PORT_FEATURE_LINK_SPEED_10G_KR: | ||
8197 | if (bp->port.supported[idx] & | 8675 | if (bp->port.supported[idx] & |
8198 | SUPPORTED_10000baseT_Full) { | 8676 | SUPPORTED_10000baseT_Full) { |
8199 | bp->link_params.req_line_speed[idx] = | 8677 | bp->link_params.req_line_speed[idx] = |
@@ -8202,7 +8680,7 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp) | |||
8202 | (ADVERTISED_10000baseT_Full | | 8680 | (ADVERTISED_10000baseT_Full | |
8203 | ADVERTISED_FIBRE); | 8681 | ADVERTISED_FIBRE); |
8204 | } else { | 8682 | } else { |
8205 | BNX2X_ERROR("NVRAM config error. " | 8683 | BNX2X_ERR("NVRAM config error. " |
8206 | "Invalid link_config 0x%x" | 8684 | "Invalid link_config 0x%x" |
8207 | " speed_cap_mask 0x%x\n", | 8685 | " speed_cap_mask 0x%x\n", |
8208 | link_config, | 8686 | link_config, |
@@ -8210,11 +8688,14 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp) | |||
8210 | return; | 8688 | return; |
8211 | } | 8689 | } |
8212 | break; | 8690 | break; |
8691 | case PORT_FEATURE_LINK_SPEED_20G: | ||
8692 | bp->link_params.req_line_speed[idx] = SPEED_20000; | ||
8213 | 8693 | ||
8694 | break; | ||
8214 | default: | 8695 | default: |
8215 | BNX2X_ERROR("NVRAM config error. " | 8696 | BNX2X_ERR("NVRAM config error. " |
8216 | "BAD link speed link_config 0x%x\n", | 8697 | "BAD link speed link_config 0x%x\n", |
8217 | link_config); | 8698 | link_config); |
8218 | bp->link_params.req_line_speed[idx] = | 8699 | bp->link_params.req_line_speed[idx] = |
8219 | SPEED_AUTO_NEG; | 8700 | SPEED_AUTO_NEG; |
8220 | bp->port.advertising[idx] = | 8701 | bp->port.advertising[idx] = |
@@ -8364,6 +8845,9 @@ static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp) | |||
8364 | u8 *fip_mac = bp->fip_mac; | 8845 | u8 *fip_mac = bp->fip_mac; |
8365 | #endif | 8846 | #endif |
8366 | 8847 | ||
8848 | /* Zero primary MAC configuration */ | ||
8849 | memset(bp->dev->dev_addr, 0, ETH_ALEN); | ||
8850 | |||
8367 | if (BP_NOMCP(bp)) { | 8851 | if (BP_NOMCP(bp)) { |
8368 | BNX2X_ERROR("warning: random MAC workaround active\n"); | 8852 | BNX2X_ERROR("warning: random MAC workaround active\n"); |
8369 | random_ether_addr(bp->dev->dev_addr); | 8853 | random_ether_addr(bp->dev->dev_addr); |
@@ -8385,9 +8869,10 @@ static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp) | |||
8385 | iscsi_mac_addr_upper); | 8869 | iscsi_mac_addr_upper); |
8386 | val = MF_CFG_RD(bp, func_ext_config[func]. | 8870 | val = MF_CFG_RD(bp, func_ext_config[func]. |
8387 | iscsi_mac_addr_lower); | 8871 | iscsi_mac_addr_lower); |
8388 | BNX2X_DEV_INFO("Read iSCSI MAC: " | ||
8389 | "0x%x:0x%04x\n", val2, val); | ||
8390 | bnx2x_set_mac_buf(iscsi_mac, val, val2); | 8872 | bnx2x_set_mac_buf(iscsi_mac, val, val2); |
8873 | BNX2X_DEV_INFO("Read iSCSI MAC: " | ||
8874 | BNX2X_MAC_FMT"\n", | ||
8875 | BNX2X_MAC_PRN_LIST(iscsi_mac)); | ||
8391 | } else | 8876 | } else |
8392 | bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG; | 8877 | bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG; |
8393 | 8878 | ||
@@ -8396,9 +8881,10 @@ static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp) | |||
8396 | fcoe_mac_addr_upper); | 8881 | fcoe_mac_addr_upper); |
8397 | val = MF_CFG_RD(bp, func_ext_config[func]. | 8882 | val = MF_CFG_RD(bp, func_ext_config[func]. |
8398 | fcoe_mac_addr_lower); | 8883 | fcoe_mac_addr_lower); |
8399 | BNX2X_DEV_INFO("Read FCoE MAC to " | ||
8400 | "0x%x:0x%04x\n", val2, val); | ||
8401 | bnx2x_set_mac_buf(fip_mac, val, val2); | 8884 | bnx2x_set_mac_buf(fip_mac, val, val2); |
8885 | BNX2X_DEV_INFO("Read FCoE L2 MAC to " | ||
8886 | BNX2X_MAC_FMT"\n", | ||
8887 | BNX2X_MAC_PRN_LIST(fip_mac)); | ||
8402 | 8888 | ||
8403 | } else | 8889 | } else |
8404 | bp->flags |= NO_FCOE_FLAG; | 8890 | bp->flags |= NO_FCOE_FLAG; |
@@ -8447,6 +8933,13 @@ static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp) | |||
8447 | memset(bp->fip_mac, 0, ETH_ALEN); | 8933 | memset(bp->fip_mac, 0, ETH_ALEN); |
8448 | } | 8934 | } |
8449 | #endif | 8935 | #endif |
8936 | |||
8937 | if (!is_valid_ether_addr(bp->dev->dev_addr)) | ||
8938 | dev_err(&bp->pdev->dev, | ||
8939 | "bad Ethernet MAC address configuration: " | ||
8940 | BNX2X_MAC_FMT", change it manually before bringing up " | ||
8941 | "the appropriate network interface\n", | ||
8942 | BNX2X_MAC_PRN_LIST(bp->dev->dev_addr)); | ||
8450 | } | 8943 | } |
8451 | 8944 | ||
8452 | static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp) | 8945 | static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp) |
@@ -8468,17 +8961,55 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp) | |||
8468 | } else { | 8961 | } else { |
8469 | bp->common.int_block = INT_BLOCK_IGU; | 8962 | bp->common.int_block = INT_BLOCK_IGU; |
8470 | val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION); | 8963 | val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION); |
8964 | |||
8965 | if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) { | ||
8966 | int tout = 5000; | ||
8967 | |||
8968 | BNX2X_DEV_INFO("FORCING Normal Mode\n"); | ||
8969 | |||
8970 | val &= ~(IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN); | ||
8971 | REG_WR(bp, IGU_REG_BLOCK_CONFIGURATION, val); | ||
8972 | REG_WR(bp, IGU_REG_RESET_MEMORIES, 0x7f); | ||
8973 | |||
8974 | while (tout && REG_RD(bp, IGU_REG_RESET_MEMORIES)) { | ||
8975 | tout--; | ||
8976 | usleep_range(1000, 1000); | ||
8977 | } | ||
8978 | |||
8979 | if (REG_RD(bp, IGU_REG_RESET_MEMORIES)) { | ||
8980 | dev_err(&bp->pdev->dev, | ||
8981 | "FORCING Normal Mode failed!!!\n"); | ||
8982 | return -EPERM; | ||
8983 | } | ||
8984 | } | ||
8985 | |||
8471 | if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) { | 8986 | if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) { |
8472 | DP(NETIF_MSG_PROBE, "IGU Backward Compatible Mode\n"); | 8987 | BNX2X_DEV_INFO("IGU Backward Compatible Mode\n"); |
8473 | bp->common.int_block |= INT_BLOCK_MODE_BW_COMP; | 8988 | bp->common.int_block |= INT_BLOCK_MODE_BW_COMP; |
8474 | } else | 8989 | } else |
8475 | DP(NETIF_MSG_PROBE, "IGU Normal Mode\n"); | 8990 | BNX2X_DEV_INFO("IGU Normal Mode\n"); |
8476 | 8991 | ||
8477 | bnx2x_get_igu_cam_info(bp); | 8992 | bnx2x_get_igu_cam_info(bp); |
8478 | 8993 | ||
8479 | } | 8994 | } |
8480 | DP(NETIF_MSG_PROBE, "igu_dsb_id %d igu_base_sb %d igu_sb_cnt %d\n", | 8995 | |
8481 | bp->igu_dsb_id, bp->igu_base_sb, bp->igu_sb_cnt); | 8996 | /* |
8997 | * set base FW non-default (fast path) status block id, this value is | ||
8998 | * used to initialize the fw_sb_id saved on the fp/queue structure to | ||
8999 | * determine the id used by the FW. | ||
9000 | */ | ||
9001 | if (CHIP_IS_E1x(bp)) | ||
9002 | bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E1x + BP_L_ID(bp); | ||
9003 | else /* | ||
9004 | * 57712 - we currently use one FW SB per IGU SB (Rx and Tx of | ||
9005 | * the same queue are indicated on the same IGU SB). So we prefer | ||
9006 | * FW and IGU SBs to be the same value. | ||
9007 | */ | ||
9008 | bp->base_fw_ndsb = bp->igu_base_sb; | ||
9009 | |||
9010 | BNX2X_DEV_INFO("igu_dsb_id %d igu_base_sb %d igu_sb_cnt %d\n" | ||
9011 | "base_fw_ndsb %d\n", bp->igu_dsb_id, bp->igu_base_sb, | ||
9012 | bp->igu_sb_cnt, bp->base_fw_ndsb); | ||
8482 | 9013 | ||
8483 | /* | 9014 | /* |
8484 | * Initialize MF configuration | 9015 | * Initialize MF configuration |
@@ -8489,10 +9020,10 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp) | |||
8489 | vn = BP_E1HVN(bp); | 9020 | vn = BP_E1HVN(bp); |
8490 | 9021 | ||
8491 | if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) { | 9022 | if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) { |
8492 | DP(NETIF_MSG_PROBE, | 9023 | BNX2X_DEV_INFO("shmem2base 0x%x, size %d, mfcfg offset %d\n", |
8493 | "shmem2base 0x%x, size %d, mfcfg offset %d\n", | 9024 | bp->common.shmem2_base, SHMEM2_RD(bp, size), |
8494 | bp->common.shmem2_base, SHMEM2_RD(bp, size), | 9025 | (u32)offsetof(struct shmem2_region, mf_cfg_addr)); |
8495 | (u32)offsetof(struct shmem2_region, mf_cfg_addr)); | 9026 | |
8496 | if (SHMEM2_HAS(bp, mf_cfg_addr)) | 9027 | if (SHMEM2_HAS(bp, mf_cfg_addr)) |
8497 | bp->common.mf_cfg_base = SHMEM2_RD(bp, mf_cfg_addr); | 9028 | bp->common.mf_cfg_base = SHMEM2_RD(bp, mf_cfg_addr); |
8498 | else | 9029 | else |
@@ -8523,8 +9054,8 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp) | |||
8523 | bp->mf_config[vn] = MF_CFG_RD(bp, | 9054 | bp->mf_config[vn] = MF_CFG_RD(bp, |
8524 | func_mf_config[func].config); | 9055 | func_mf_config[func].config); |
8525 | } else | 9056 | } else |
8526 | DP(NETIF_MSG_PROBE, "illegal MAC " | 9057 | BNX2X_DEV_INFO("illegal MAC address " |
8527 | "address for SI\n"); | 9058 | "for SI\n"); |
8528 | break; | 9059 | break; |
8529 | case SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED: | 9060 | case SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED: |
8530 | /* get OV configuration */ | 9061 | /* get OV configuration */ |
@@ -8537,14 +9068,12 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp) | |||
8537 | bp->mf_config[vn] = MF_CFG_RD(bp, | 9068 | bp->mf_config[vn] = MF_CFG_RD(bp, |
8538 | func_mf_config[func].config); | 9069 | func_mf_config[func].config); |
8539 | } else | 9070 | } else |
8540 | DP(NETIF_MSG_PROBE, "illegal OV for " | 9071 | BNX2X_DEV_INFO("illegal OV for SD\n"); |
8541 | "SD\n"); | ||
8542 | break; | 9072 | break; |
8543 | default: | 9073 | default: |
8544 | /* Unknown configuration: reset mf_config */ | 9074 | /* Unknown configuration: reset mf_config */ |
8545 | bp->mf_config[vn] = 0; | 9075 | bp->mf_config[vn] = 0; |
8546 | DP(NETIF_MSG_PROBE, "Unknown MF mode 0x%x\n", | 9076 | BNX2X_DEV_INFO("unkown MF mode 0x%x\n", val); |
8547 | val); | ||
8548 | } | 9077 | } |
8549 | } | 9078 | } |
8550 | 9079 | ||
@@ -8557,13 +9086,16 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp) | |||
8557 | FUNC_MF_CFG_E1HOV_TAG_MASK; | 9086 | FUNC_MF_CFG_E1HOV_TAG_MASK; |
8558 | if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) { | 9087 | if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) { |
8559 | bp->mf_ov = val; | 9088 | bp->mf_ov = val; |
8560 | BNX2X_DEV_INFO("MF OV for func %d is %d" | 9089 | bp->path_has_ovlan = true; |
8561 | " (0x%04x)\n", func, | 9090 | |
8562 | bp->mf_ov, bp->mf_ov); | 9091 | BNX2X_DEV_INFO("MF OV for func %d is %d " |
9092 | "(0x%04x)\n", func, bp->mf_ov, | ||
9093 | bp->mf_ov); | ||
8563 | } else { | 9094 | } else { |
8564 | BNX2X_ERR("No valid MF OV for func %d," | 9095 | dev_err(&bp->pdev->dev, |
8565 | " aborting\n", func); | 9096 | "No valid MF OV for func %d, " |
8566 | rc = -EPERM; | 9097 | "aborting\n", func); |
9098 | return -EPERM; | ||
8567 | } | 9099 | } |
8568 | break; | 9100 | break; |
8569 | case MULTI_FUNCTION_SI: | 9101 | case MULTI_FUNCTION_SI: |
@@ -8572,31 +9104,40 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp) | |||
8572 | break; | 9104 | break; |
8573 | default: | 9105 | default: |
8574 | if (vn) { | 9106 | if (vn) { |
8575 | BNX2X_ERR("VN %d in single function mode," | 9107 | dev_err(&bp->pdev->dev, |
8576 | " aborting\n", vn); | 9108 | "VN %d is in a single function mode, " |
8577 | rc = -EPERM; | 9109 | "aborting\n", vn); |
9110 | return -EPERM; | ||
8578 | } | 9111 | } |
8579 | break; | 9112 | break; |
8580 | } | 9113 | } |
8581 | 9114 | ||
9115 | /* check if other port on the path needs ovlan: | ||
9116 | * Since MF configuration is shared between ports | ||
9117 | * Possible mixed modes are only | ||
9118 | * {SF, SI} {SF, SD} {SD, SF} {SI, SF} | ||
9119 | */ | ||
9120 | if (CHIP_MODE_IS_4_PORT(bp) && | ||
9121 | !bp->path_has_ovlan && | ||
9122 | !IS_MF(bp) && | ||
9123 | bp->common.mf_cfg_base != SHMEM_MF_CFG_ADDR_NONE) { | ||
9124 | u8 other_port = !BP_PORT(bp); | ||
9125 | u8 other_func = BP_PATH(bp) + 2*other_port; | ||
9126 | val = MF_CFG_RD(bp, | ||
9127 | func_mf_config[other_func].e1hov_tag); | ||
9128 | if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) | ||
9129 | bp->path_has_ovlan = true; | ||
9130 | } | ||
8582 | } | 9131 | } |
8583 | 9132 | ||
8584 | /* adjust igu_sb_cnt to MF for E1x */ | 9133 | /* adjust igu_sb_cnt to MF for E1x */ |
8585 | if (CHIP_IS_E1x(bp) && IS_MF(bp)) | 9134 | if (CHIP_IS_E1x(bp) && IS_MF(bp)) |
8586 | bp->igu_sb_cnt /= E1HVN_MAX; | 9135 | bp->igu_sb_cnt /= E1HVN_MAX; |
8587 | 9136 | ||
8588 | /* | 9137 | /* port info */ |
8589 | * adjust E2 sb count: to be removed when FW will support | 9138 | bnx2x_get_port_hwinfo(bp); |
8590 | * more then 16 L2 clients | ||
8591 | */ | ||
8592 | #define MAX_L2_CLIENTS 16 | ||
8593 | if (CHIP_IS_E2(bp)) | ||
8594 | bp->igu_sb_cnt = min_t(u8, bp->igu_sb_cnt, | ||
8595 | MAX_L2_CLIENTS / (IS_MF(bp) ? 4 : 1)); | ||
8596 | 9139 | ||
8597 | if (!BP_NOMCP(bp)) { | 9140 | if (!BP_NOMCP(bp)) { |
8598 | bnx2x_get_port_hwinfo(bp); | ||
8599 | |||
8600 | bp->fw_seq = | 9141 | bp->fw_seq = |
8601 | (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) & | 9142 | (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) & |
8602 | DRV_MSG_SEQ_NUMBER_MASK); | 9143 | DRV_MSG_SEQ_NUMBER_MASK); |
@@ -8610,6 +9151,16 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp) | |||
8610 | bnx2x_get_cnic_info(bp); | 9151 | bnx2x_get_cnic_info(bp); |
8611 | #endif | 9152 | #endif |
8612 | 9153 | ||
9154 | /* Get current FW pulse sequence */ | ||
9155 | if (!BP_NOMCP(bp)) { | ||
9156 | int mb_idx = BP_FW_MB_IDX(bp); | ||
9157 | |||
9158 | bp->fw_drv_pulse_wr_seq = | ||
9159 | (SHMEM_RD(bp, func_mb[mb_idx].drv_pulse_mb) & | ||
9160 | DRV_PULSE_SEQ_MASK); | ||
9161 | BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq); | ||
9162 | } | ||
9163 | |||
8613 | return rc; | 9164 | return rc; |
8614 | } | 9165 | } |
8615 | 9166 | ||
@@ -8677,16 +9228,61 @@ out_not_found: | |||
8677 | return; | 9228 | return; |
8678 | } | 9229 | } |
8679 | 9230 | ||
9231 | static void __devinit bnx2x_set_modes_bitmap(struct bnx2x *bp) | ||
9232 | { | ||
9233 | u32 flags = 0; | ||
9234 | |||
9235 | if (CHIP_REV_IS_FPGA(bp)) | ||
9236 | SET_FLAGS(flags, MODE_FPGA); | ||
9237 | else if (CHIP_REV_IS_EMUL(bp)) | ||
9238 | SET_FLAGS(flags, MODE_EMUL); | ||
9239 | else | ||
9240 | SET_FLAGS(flags, MODE_ASIC); | ||
9241 | |||
9242 | if (CHIP_MODE_IS_4_PORT(bp)) | ||
9243 | SET_FLAGS(flags, MODE_PORT4); | ||
9244 | else | ||
9245 | SET_FLAGS(flags, MODE_PORT2); | ||
9246 | |||
9247 | if (CHIP_IS_E2(bp)) | ||
9248 | SET_FLAGS(flags, MODE_E2); | ||
9249 | else if (CHIP_IS_E3(bp)) { | ||
9250 | SET_FLAGS(flags, MODE_E3); | ||
9251 | if (CHIP_REV(bp) == CHIP_REV_Ax) | ||
9252 | SET_FLAGS(flags, MODE_E3_A0); | ||
9253 | else {/*if (CHIP_REV(bp) == CHIP_REV_Bx)*/ | ||
9254 | SET_FLAGS(flags, MODE_E3_B0); | ||
9255 | SET_FLAGS(flags, MODE_COS_BC); | ||
9256 | } | ||
9257 | } | ||
9258 | |||
9259 | if (IS_MF(bp)) { | ||
9260 | SET_FLAGS(flags, MODE_MF); | ||
9261 | switch (bp->mf_mode) { | ||
9262 | case MULTI_FUNCTION_SD: | ||
9263 | SET_FLAGS(flags, MODE_MF_SD); | ||
9264 | break; | ||
9265 | case MULTI_FUNCTION_SI: | ||
9266 | SET_FLAGS(flags, MODE_MF_SI); | ||
9267 | break; | ||
9268 | } | ||
9269 | } else | ||
9270 | SET_FLAGS(flags, MODE_SF); | ||
9271 | |||
9272 | #if defined(__LITTLE_ENDIAN) | ||
9273 | SET_FLAGS(flags, MODE_LITTLE_ENDIAN); | ||
9274 | #else /*(__BIG_ENDIAN)*/ | ||
9275 | SET_FLAGS(flags, MODE_BIG_ENDIAN); | ||
9276 | #endif | ||
9277 | INIT_MODE_FLAGS(bp) = flags; | ||
9278 | } | ||
9279 | |||
8680 | static int __devinit bnx2x_init_bp(struct bnx2x *bp) | 9280 | static int __devinit bnx2x_init_bp(struct bnx2x *bp) |
8681 | { | 9281 | { |
8682 | int func; | 9282 | int func; |
8683 | int timer_interval; | 9283 | int timer_interval; |
8684 | int rc; | 9284 | int rc; |
8685 | 9285 | ||
8686 | /* Disable interrupt handling until HW is initialized */ | ||
8687 | atomic_set(&bp->intr_sem, 1); | ||
8688 | smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */ | ||
8689 | |||
8690 | mutex_init(&bp->port.phy_mutex); | 9286 | mutex_init(&bp->port.phy_mutex); |
8691 | mutex_init(&bp->fw_mb_mutex); | 9287 | mutex_init(&bp->fw_mb_mutex); |
8692 | spin_lock_init(&bp->stats_lock); | 9288 | spin_lock_init(&bp->stats_lock); |
@@ -8696,11 +9292,16 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp) | |||
8696 | 9292 | ||
8697 | INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task); | 9293 | INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task); |
8698 | INIT_DELAYED_WORK(&bp->reset_task, bnx2x_reset_task); | 9294 | INIT_DELAYED_WORK(&bp->reset_task, bnx2x_reset_task); |
8699 | 9295 | INIT_DELAYED_WORK(&bp->period_task, bnx2x_period_task); | |
8700 | rc = bnx2x_get_hwinfo(bp); | 9296 | rc = bnx2x_get_hwinfo(bp); |
9297 | if (rc) | ||
9298 | return rc; | ||
8701 | 9299 | ||
8702 | if (!rc) | 9300 | bnx2x_set_modes_bitmap(bp); |
8703 | rc = bnx2x_alloc_mem_bp(bp); | 9301 | |
9302 | rc = bnx2x_alloc_mem_bp(bp); | ||
9303 | if (rc) | ||
9304 | return rc; | ||
8704 | 9305 | ||
8705 | bnx2x_read_fwinfo(bp); | 9306 | bnx2x_read_fwinfo(bp); |
8706 | 9307 | ||
@@ -8718,7 +9319,6 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp) | |||
8718 | "must load devices in order!\n"); | 9319 | "must load devices in order!\n"); |
8719 | 9320 | ||
8720 | bp->multi_mode = multi_mode; | 9321 | bp->multi_mode = multi_mode; |
8721 | bp->int_mode = int_mode; | ||
8722 | 9322 | ||
8723 | /* Set TPA flags */ | 9323 | /* Set TPA flags */ |
8724 | if (disable_tpa) { | 9324 | if (disable_tpa) { |
@@ -8754,6 +9354,13 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp) | |||
8754 | bnx2x_dcbx_set_state(bp, true, BNX2X_DCBX_ENABLED_ON_NEG_ON); | 9354 | bnx2x_dcbx_set_state(bp, true, BNX2X_DCBX_ENABLED_ON_NEG_ON); |
8755 | bnx2x_dcbx_init_params(bp); | 9355 | bnx2x_dcbx_init_params(bp); |
8756 | 9356 | ||
9357 | #ifdef BCM_CNIC | ||
9358 | if (CHIP_IS_E1x(bp)) | ||
9359 | bp->cnic_base_cl_id = FP_SB_MAX_E1x; | ||
9360 | else | ||
9361 | bp->cnic_base_cl_id = FP_SB_MAX_E2; | ||
9362 | #endif | ||
9363 | |||
8757 | return rc; | 9364 | return rc; |
8758 | } | 9365 | } |
8759 | 9366 | ||
@@ -8762,49 +9369,70 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp) | |||
8762 | * General service functions | 9369 | * General service functions |
8763 | ****************************************************************************/ | 9370 | ****************************************************************************/ |
8764 | 9371 | ||
9372 | /* | ||
9373 | * net_device service functions | ||
9374 | */ | ||
9375 | |||
8765 | /* called with rtnl_lock */ | 9376 | /* called with rtnl_lock */ |
8766 | static int bnx2x_open(struct net_device *dev) | 9377 | static int bnx2x_open(struct net_device *dev) |
8767 | { | 9378 | { |
8768 | struct bnx2x *bp = netdev_priv(dev); | 9379 | struct bnx2x *bp = netdev_priv(dev); |
9380 | bool global = false; | ||
9381 | int other_engine = BP_PATH(bp) ? 0 : 1; | ||
9382 | u32 other_load_counter, load_counter; | ||
8769 | 9383 | ||
8770 | netif_carrier_off(dev); | 9384 | netif_carrier_off(dev); |
8771 | 9385 | ||
8772 | bnx2x_set_power_state(bp, PCI_D0); | 9386 | bnx2x_set_power_state(bp, PCI_D0); |
8773 | 9387 | ||
8774 | if (!bnx2x_reset_is_done(bp)) { | 9388 | other_load_counter = bnx2x_get_load_cnt(bp, other_engine); |
9389 | load_counter = bnx2x_get_load_cnt(bp, BP_PATH(bp)); | ||
9390 | |||
9391 | /* | ||
9392 | * If parity had happen during the unload, then attentions | ||
9393 | * and/or RECOVERY_IN_PROGRES may still be set. In this case we | ||
9394 | * want the first function loaded on the current engine to | ||
9395 | * complete the recovery. | ||
9396 | */ | ||
9397 | if (!bnx2x_reset_is_done(bp, BP_PATH(bp)) || | ||
9398 | bnx2x_chk_parity_attn(bp, &global, true)) | ||
8775 | do { | 9399 | do { |
8776 | /* Reset MCP mail box sequence if there is on going | 9400 | /* |
8777 | * recovery | 9401 | * If there are attentions and they are in a global |
9402 | * blocks, set the GLOBAL_RESET bit regardless whether | ||
9403 | * it will be this function that will complete the | ||
9404 | * recovery or not. | ||
8778 | */ | 9405 | */ |
8779 | bp->fw_seq = 0; | 9406 | if (global) |
9407 | bnx2x_set_reset_global(bp); | ||
8780 | 9408 | ||
8781 | /* If it's the first function to load and reset done | 9409 | /* |
8782 | * is still not cleared it may mean that. We don't | 9410 | * Only the first function on the current engine should |
8783 | * check the attention state here because it may have | 9411 | * try to recover in open. In case of attentions in |
8784 | * already been cleared by a "common" reset but we | 9412 | * global blocks only the first in the chip should try |
8785 | * shell proceed with "process kill" anyway. | 9413 | * to recover. |
8786 | */ | 9414 | */ |
8787 | if ((bnx2x_get_load_cnt(bp) == 0) && | 9415 | if ((!load_counter && |
8788 | bnx2x_trylock_hw_lock(bp, | 9416 | (!global || !other_load_counter)) && |
8789 | HW_LOCK_RESOURCE_RESERVED_08) && | 9417 | bnx2x_trylock_leader_lock(bp) && |
8790 | (!bnx2x_leader_reset(bp))) { | 9418 | !bnx2x_leader_reset(bp)) { |
8791 | DP(NETIF_MSG_HW, "Recovered in open\n"); | 9419 | netdev_info(bp->dev, "Recovered in open\n"); |
8792 | break; | 9420 | break; |
8793 | } | 9421 | } |
8794 | 9422 | ||
9423 | /* recovery has failed... */ | ||
8795 | bnx2x_set_power_state(bp, PCI_D3hot); | 9424 | bnx2x_set_power_state(bp, PCI_D3hot); |
9425 | bp->recovery_state = BNX2X_RECOVERY_FAILED; | ||
8796 | 9426 | ||
8797 | printk(KERN_ERR"%s: Recovery flow hasn't been properly" | 9427 | netdev_err(bp->dev, "Recovery flow hasn't been properly" |
8798 | " completed yet. Try again later. If u still see this" | 9428 | " completed yet. Try again later. If u still see this" |
8799 | " message after a few retries then power cycle is" | 9429 | " message after a few retries then power cycle is" |
8800 | " required.\n", bp->dev->name); | 9430 | " required.\n"); |
8801 | 9431 | ||
8802 | return -EAGAIN; | 9432 | return -EAGAIN; |
8803 | } while (0); | 9433 | } while (0); |
8804 | } | ||
8805 | 9434 | ||
8806 | bp->recovery_state = BNX2X_RECOVERY_DONE; | 9435 | bp->recovery_state = BNX2X_RECOVERY_DONE; |
8807 | |||
8808 | return bnx2x_nic_load(bp, LOAD_OPEN); | 9436 | return bnx2x_nic_load(bp, LOAD_OPEN); |
8809 | } | 9437 | } |
8810 | 9438 | ||
@@ -8815,198 +9443,126 @@ static int bnx2x_close(struct net_device *dev) | |||
8815 | 9443 | ||
8816 | /* Unload the driver, release IRQs */ | 9444 | /* Unload the driver, release IRQs */ |
8817 | bnx2x_nic_unload(bp, UNLOAD_CLOSE); | 9445 | bnx2x_nic_unload(bp, UNLOAD_CLOSE); |
9446 | |||
9447 | /* Power off */ | ||
8818 | bnx2x_set_power_state(bp, PCI_D3hot); | 9448 | bnx2x_set_power_state(bp, PCI_D3hot); |
8819 | 9449 | ||
8820 | return 0; | 9450 | return 0; |
8821 | } | 9451 | } |
8822 | 9452 | ||
8823 | #define E1_MAX_UC_LIST 29 | 9453 | static inline int bnx2x_init_mcast_macs_list(struct bnx2x *bp, |
8824 | #define E1H_MAX_UC_LIST 30 | 9454 | struct bnx2x_mcast_ramrod_params *p) |
8825 | #define E2_MAX_UC_LIST 14 | ||
8826 | static inline u8 bnx2x_max_uc_list(struct bnx2x *bp) | ||
8827 | { | 9455 | { |
8828 | if (CHIP_IS_E1(bp)) | 9456 | int mc_count = netdev_mc_count(bp->dev); |
8829 | return E1_MAX_UC_LIST; | 9457 | struct bnx2x_mcast_list_elem *mc_mac = |
8830 | else if (CHIP_IS_E1H(bp)) | 9458 | kzalloc(sizeof(*mc_mac) * mc_count, GFP_ATOMIC); |
8831 | return E1H_MAX_UC_LIST; | 9459 | struct netdev_hw_addr *ha; |
8832 | else | ||
8833 | return E2_MAX_UC_LIST; | ||
8834 | } | ||
8835 | 9460 | ||
9461 | if (!mc_mac) | ||
9462 | return -ENOMEM; | ||
8836 | 9463 | ||
8837 | static inline u8 bnx2x_uc_list_cam_offset(struct bnx2x *bp) | 9464 | INIT_LIST_HEAD(&p->mcast_list); |
8838 | { | ||
8839 | if (CHIP_IS_E1(bp)) | ||
8840 | /* CAM Entries for Port0: | ||
8841 | * 0 - prim ETH MAC | ||
8842 | * 1 - BCAST MAC | ||
8843 | * 2 - iSCSI L2 ring ETH MAC | ||
8844 | * 3-31 - UC MACs | ||
8845 | * | ||
8846 | * Port1 entries are allocated the same way starting from | ||
8847 | * entry 32. | ||
8848 | */ | ||
8849 | return 3 + 32 * BP_PORT(bp); | ||
8850 | else if (CHIP_IS_E1H(bp)) { | ||
8851 | /* CAM Entries: | ||
8852 | * 0-7 - prim ETH MAC for each function | ||
8853 | * 8-15 - iSCSI L2 ring ETH MAC for each function | ||
8854 | * 16 till 255 UC MAC lists for each function | ||
8855 | * | ||
8856 | * Remark: There is no FCoE support for E1H, thus FCoE related | ||
8857 | * MACs are not considered. | ||
8858 | */ | ||
8859 | return E1H_FUNC_MAX * (CAM_ISCSI_ETH_LINE + 1) + | ||
8860 | bnx2x_max_uc_list(bp) * BP_FUNC(bp); | ||
8861 | } else { | ||
8862 | /* CAM Entries (there is a separate CAM per engine): | ||
8863 | * 0-4 - prim ETH MAC for each function | ||
8864 | * 4-7 - iSCSI L2 ring ETH MAC for each function | ||
8865 | * 8-11 - FIP ucast L2 MAC for each function | ||
8866 | * 12-15 - ALL_ENODE_MACS mcast MAC for each function | ||
8867 | * 16 till 71 UC MAC lists for each function | ||
8868 | */ | ||
8869 | u8 func_idx = | ||
8870 | (CHIP_MODE_IS_4_PORT(bp) ? BP_FUNC(bp) : BP_VN(bp)); | ||
8871 | 9465 | ||
8872 | return E2_FUNC_MAX * (CAM_MAX_PF_LINE + 1) + | 9466 | netdev_for_each_mc_addr(ha, bp->dev) { |
8873 | bnx2x_max_uc_list(bp) * func_idx; | 9467 | mc_mac->mac = bnx2x_mc_addr(ha); |
9468 | list_add_tail(&mc_mac->link, &p->mcast_list); | ||
9469 | mc_mac++; | ||
8874 | } | 9470 | } |
9471 | |||
9472 | p->mcast_list_len = mc_count; | ||
9473 | |||
9474 | return 0; | ||
8875 | } | 9475 | } |
8876 | 9476 | ||
8877 | /* set uc list, do not wait as wait implies sleep and | 9477 | static inline void bnx2x_free_mcast_macs_list( |
8878 | * set_rx_mode can be invoked from non-sleepable context. | 9478 | struct bnx2x_mcast_ramrod_params *p) |
9479 | { | ||
9480 | struct bnx2x_mcast_list_elem *mc_mac = | ||
9481 | list_first_entry(&p->mcast_list, struct bnx2x_mcast_list_elem, | ||
9482 | link); | ||
9483 | |||
9484 | WARN_ON(!mc_mac); | ||
9485 | kfree(mc_mac); | ||
9486 | } | ||
9487 | |||
9488 | /** | ||
9489 | * bnx2x_set_uc_list - configure a new unicast MACs list. | ||
8879 | * | 9490 | * |
8880 | * Instead we use the same ramrod data buffer each time we need | 9491 | * @bp: driver handle |
8881 | * to configure a list of addresses, and use the fact that the | 9492 | * |
8882 | * list of MACs is changed in an incremental way and that the | 9493 | * We will use zero (0) as a MAC type for these MACs. |
8883 | * function is called under the netif_addr_lock. A temporary | ||
8884 | * inconsistent CAM configuration (possible in case of very fast | ||
8885 | * sequence of add/del/add on the host side) will shortly be | ||
8886 | * restored by the handler of the last ramrod. | ||
8887 | */ | 9494 | */ |
8888 | static int bnx2x_set_uc_list(struct bnx2x *bp) | 9495 | static inline int bnx2x_set_uc_list(struct bnx2x *bp) |
8889 | { | 9496 | { |
8890 | int i = 0, old; | 9497 | int rc; |
8891 | struct net_device *dev = bp->dev; | 9498 | struct net_device *dev = bp->dev; |
8892 | u8 offset = bnx2x_uc_list_cam_offset(bp); | ||
8893 | struct netdev_hw_addr *ha; | 9499 | struct netdev_hw_addr *ha; |
8894 | struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, uc_mac_config); | 9500 | struct bnx2x_vlan_mac_obj *mac_obj = &bp->fp->mac_obj; |
8895 | dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, uc_mac_config); | 9501 | unsigned long ramrod_flags = 0; |
8896 | 9502 | ||
8897 | if (netdev_uc_count(dev) > bnx2x_max_uc_list(bp)) | 9503 | /* First schedule a cleanup up of old configuration */ |
8898 | return -EINVAL; | 9504 | rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_UC_LIST_MAC, false); |
9505 | if (rc < 0) { | ||
9506 | BNX2X_ERR("Failed to schedule DELETE operations: %d\n", rc); | ||
9507 | return rc; | ||
9508 | } | ||
8899 | 9509 | ||
8900 | netdev_for_each_uc_addr(ha, dev) { | 9510 | netdev_for_each_uc_addr(ha, dev) { |
8901 | /* copy mac */ | 9511 | rc = bnx2x_set_mac_one(bp, bnx2x_uc_addr(ha), mac_obj, true, |
8902 | config_cmd->config_table[i].msb_mac_addr = | 9512 | BNX2X_UC_LIST_MAC, &ramrod_flags); |
8903 | swab16(*(u16 *)&bnx2x_uc_addr(ha)[0]); | 9513 | if (rc < 0) { |
8904 | config_cmd->config_table[i].middle_mac_addr = | 9514 | BNX2X_ERR("Failed to schedule ADD operations: %d\n", |
8905 | swab16(*(u16 *)&bnx2x_uc_addr(ha)[2]); | 9515 | rc); |
8906 | config_cmd->config_table[i].lsb_mac_addr = | 9516 | return rc; |
8907 | swab16(*(u16 *)&bnx2x_uc_addr(ha)[4]); | ||
8908 | |||
8909 | config_cmd->config_table[i].vlan_id = 0; | ||
8910 | config_cmd->config_table[i].pf_id = BP_FUNC(bp); | ||
8911 | config_cmd->config_table[i].clients_bit_vector = | ||
8912 | cpu_to_le32(1 << BP_L_ID(bp)); | ||
8913 | |||
8914 | SET_FLAG(config_cmd->config_table[i].flags, | ||
8915 | MAC_CONFIGURATION_ENTRY_ACTION_TYPE, | ||
8916 | T_ETH_MAC_COMMAND_SET); | ||
8917 | |||
8918 | DP(NETIF_MSG_IFUP, | ||
8919 | "setting UCAST[%d] (%04x:%04x:%04x)\n", i, | ||
8920 | config_cmd->config_table[i].msb_mac_addr, | ||
8921 | config_cmd->config_table[i].middle_mac_addr, | ||
8922 | config_cmd->config_table[i].lsb_mac_addr); | ||
8923 | |||
8924 | i++; | ||
8925 | |||
8926 | /* Set uc MAC in NIG */ | ||
8927 | bnx2x_set_mac_in_nig(bp, 1, bnx2x_uc_addr(ha), | ||
8928 | LLH_CAM_ETH_LINE + i); | ||
8929 | } | ||
8930 | old = config_cmd->hdr.length; | ||
8931 | if (old > i) { | ||
8932 | for (; i < old; i++) { | ||
8933 | if (CAM_IS_INVALID(config_cmd-> | ||
8934 | config_table[i])) { | ||
8935 | /* already invalidated */ | ||
8936 | break; | ||
8937 | } | ||
8938 | /* invalidate */ | ||
8939 | SET_FLAG(config_cmd->config_table[i].flags, | ||
8940 | MAC_CONFIGURATION_ENTRY_ACTION_TYPE, | ||
8941 | T_ETH_MAC_COMMAND_INVALIDATE); | ||
8942 | } | 9517 | } |
8943 | } | 9518 | } |
8944 | 9519 | ||
8945 | wmb(); | 9520 | /* Execute the pending commands */ |
8946 | 9521 | __set_bit(RAMROD_CONT, &ramrod_flags); | |
8947 | config_cmd->hdr.length = i; | 9522 | return bnx2x_set_mac_one(bp, NULL, mac_obj, false /* don't care */, |
8948 | config_cmd->hdr.offset = offset; | 9523 | BNX2X_UC_LIST_MAC, &ramrod_flags); |
8949 | config_cmd->hdr.client_id = 0xff; | ||
8950 | /* Mark that this ramrod doesn't use bp->set_mac_pending for | ||
8951 | * synchronization. | ||
8952 | */ | ||
8953 | config_cmd->hdr.echo = 0; | ||
8954 | |||
8955 | mb(); | ||
8956 | |||
8957 | return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0, | ||
8958 | U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1); | ||
8959 | |||
8960 | } | 9524 | } |
8961 | 9525 | ||
8962 | void bnx2x_invalidate_uc_list(struct bnx2x *bp) | 9526 | static inline int bnx2x_set_mc_list(struct bnx2x *bp) |
8963 | { | 9527 | { |
8964 | int i; | 9528 | struct net_device *dev = bp->dev; |
8965 | struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, uc_mac_config); | 9529 | struct bnx2x_mcast_ramrod_params rparam = {0}; |
8966 | dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, uc_mac_config); | 9530 | int rc = 0; |
8967 | int ramrod_flags = WAIT_RAMROD_COMMON; | ||
8968 | u8 offset = bnx2x_uc_list_cam_offset(bp); | ||
8969 | u8 max_list_size = bnx2x_max_uc_list(bp); | ||
8970 | |||
8971 | for (i = 0; i < max_list_size; i++) { | ||
8972 | SET_FLAG(config_cmd->config_table[i].flags, | ||
8973 | MAC_CONFIGURATION_ENTRY_ACTION_TYPE, | ||
8974 | T_ETH_MAC_COMMAND_INVALIDATE); | ||
8975 | bnx2x_set_mac_in_nig(bp, 0, NULL, LLH_CAM_ETH_LINE + 1 + i); | ||
8976 | } | ||
8977 | |||
8978 | wmb(); | ||
8979 | 9531 | ||
8980 | config_cmd->hdr.length = max_list_size; | 9532 | rparam.mcast_obj = &bp->mcast_obj; |
8981 | config_cmd->hdr.offset = offset; | ||
8982 | config_cmd->hdr.client_id = 0xff; | ||
8983 | /* We'll wait for a completion this time... */ | ||
8984 | config_cmd->hdr.echo = 1; | ||
8985 | 9533 | ||
8986 | bp->set_mac_pending = 1; | 9534 | /* first, clear all configured multicast MACs */ |
9535 | rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL); | ||
9536 | if (rc < 0) { | ||
9537 | BNX2X_ERR("Failed to clear multicast " | ||
9538 | "configuration: %d\n", rc); | ||
9539 | return rc; | ||
9540 | } | ||
8987 | 9541 | ||
8988 | mb(); | 9542 | /* then, configure a new MACs list */ |
9543 | if (netdev_mc_count(dev)) { | ||
9544 | rc = bnx2x_init_mcast_macs_list(bp, &rparam); | ||
9545 | if (rc) { | ||
9546 | BNX2X_ERR("Failed to create multicast MACs " | ||
9547 | "list: %d\n", rc); | ||
9548 | return rc; | ||
9549 | } | ||
8989 | 9550 | ||
8990 | bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0, | 9551 | /* Now add the new MACs */ |
8991 | U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1); | 9552 | rc = bnx2x_config_mcast(bp, &rparam, |
9553 | BNX2X_MCAST_CMD_ADD); | ||
9554 | if (rc < 0) | ||
9555 | BNX2X_ERR("Failed to set a new multicast " | ||
9556 | "configuration: %d\n", rc); | ||
8992 | 9557 | ||
8993 | /* Wait for a completion */ | 9558 | bnx2x_free_mcast_macs_list(&rparam); |
8994 | bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, | 9559 | } |
8995 | ramrod_flags); | ||
8996 | 9560 | ||
9561 | return rc; | ||
8997 | } | 9562 | } |
8998 | 9563 | ||
8999 | static inline int bnx2x_set_mc_list(struct bnx2x *bp) | ||
9000 | { | ||
9001 | /* some multicasts */ | ||
9002 | if (CHIP_IS_E1(bp)) { | ||
9003 | return bnx2x_set_e1_mc_list(bp); | ||
9004 | } else { /* E1H and newer */ | ||
9005 | return bnx2x_set_e1h_mc_list(bp); | ||
9006 | } | ||
9007 | } | ||
9008 | 9564 | ||
9009 | /* called with netif_tx_lock from dev_mcast.c */ | 9565 | /* If bp->state is OPEN, should be called with netif_addr_lock_bh() */ |
9010 | void bnx2x_set_rx_mode(struct net_device *dev) | 9566 | void bnx2x_set_rx_mode(struct net_device *dev) |
9011 | { | 9567 | { |
9012 | struct bnx2x *bp = netdev_priv(dev); | 9568 | struct bnx2x *bp = netdev_priv(dev); |
@@ -9017,23 +9573,31 @@ void bnx2x_set_rx_mode(struct net_device *dev) | |||
9017 | return; | 9573 | return; |
9018 | } | 9574 | } |
9019 | 9575 | ||
9020 | DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags); | 9576 | DP(NETIF_MSG_IFUP, "dev->flags = %x\n", bp->dev->flags); |
9021 | 9577 | ||
9022 | if (dev->flags & IFF_PROMISC) | 9578 | if (dev->flags & IFF_PROMISC) |
9023 | rx_mode = BNX2X_RX_MODE_PROMISC; | 9579 | rx_mode = BNX2X_RX_MODE_PROMISC; |
9024 | else if (dev->flags & IFF_ALLMULTI) | 9580 | else if ((dev->flags & IFF_ALLMULTI) || |
9581 | ((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) && | ||
9582 | CHIP_IS_E1(bp))) | ||
9025 | rx_mode = BNX2X_RX_MODE_ALLMULTI; | 9583 | rx_mode = BNX2X_RX_MODE_ALLMULTI; |
9026 | else { | 9584 | else { |
9027 | /* some multicasts */ | 9585 | /* some multicasts */ |
9028 | if (bnx2x_set_mc_list(bp)) | 9586 | if (bnx2x_set_mc_list(bp) < 0) |
9029 | rx_mode = BNX2X_RX_MODE_ALLMULTI; | 9587 | rx_mode = BNX2X_RX_MODE_ALLMULTI; |
9030 | 9588 | ||
9031 | /* some unicasts */ | 9589 | if (bnx2x_set_uc_list(bp) < 0) |
9032 | if (bnx2x_set_uc_list(bp)) | ||
9033 | rx_mode = BNX2X_RX_MODE_PROMISC; | 9590 | rx_mode = BNX2X_RX_MODE_PROMISC; |
9034 | } | 9591 | } |
9035 | 9592 | ||
9036 | bp->rx_mode = rx_mode; | 9593 | bp->rx_mode = rx_mode; |
9594 | |||
9595 | /* Schedule the rx_mode command */ | ||
9596 | if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state)) { | ||
9597 | set_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state); | ||
9598 | return; | ||
9599 | } | ||
9600 | |||
9037 | bnx2x_set_storm_rx_mode(bp); | 9601 | bnx2x_set_storm_rx_mode(bp); |
9038 | } | 9602 | } |
9039 | 9603 | ||
@@ -9124,8 +9688,28 @@ static const struct net_device_ops bnx2x_netdev_ops = { | |||
9124 | #endif | 9688 | #endif |
9125 | }; | 9689 | }; |
9126 | 9690 | ||
9691 | static inline int bnx2x_set_coherency_mask(struct bnx2x *bp) | ||
9692 | { | ||
9693 | struct device *dev = &bp->pdev->dev; | ||
9694 | |||
9695 | if (dma_set_mask(dev, DMA_BIT_MASK(64)) == 0) { | ||
9696 | bp->flags |= USING_DAC_FLAG; | ||
9697 | if (dma_set_coherent_mask(dev, DMA_BIT_MASK(64)) != 0) { | ||
9698 | dev_err(dev, "dma_set_coherent_mask failed, " | ||
9699 | "aborting\n"); | ||
9700 | return -EIO; | ||
9701 | } | ||
9702 | } else if (dma_set_mask(dev, DMA_BIT_MASK(32)) != 0) { | ||
9703 | dev_err(dev, "System does not support DMA, aborting\n"); | ||
9704 | return -EIO; | ||
9705 | } | ||
9706 | |||
9707 | return 0; | ||
9708 | } | ||
9709 | |||
9127 | static int __devinit bnx2x_init_dev(struct pci_dev *pdev, | 9710 | static int __devinit bnx2x_init_dev(struct pci_dev *pdev, |
9128 | struct net_device *dev) | 9711 | struct net_device *dev, |
9712 | unsigned long board_type) | ||
9129 | { | 9713 | { |
9130 | struct bnx2x *bp; | 9714 | struct bnx2x *bp; |
9131 | int rc; | 9715 | int rc; |
@@ -9179,29 +9763,15 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev, | |||
9179 | goto err_out_release; | 9763 | goto err_out_release; |
9180 | } | 9764 | } |
9181 | 9765 | ||
9182 | bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP); | 9766 | if (!pci_is_pcie(pdev)) { |
9183 | if (bp->pcie_cap == 0) { | 9767 | dev_err(&bp->pdev->dev, "Not PCI Express, aborting\n"); |
9184 | dev_err(&bp->pdev->dev, | ||
9185 | "Cannot find PCI Express capability, aborting\n"); | ||
9186 | rc = -EIO; | 9768 | rc = -EIO; |
9187 | goto err_out_release; | 9769 | goto err_out_release; |
9188 | } | 9770 | } |
9189 | 9771 | ||
9190 | if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) == 0) { | 9772 | rc = bnx2x_set_coherency_mask(bp); |
9191 | bp->flags |= USING_DAC_FLAG; | 9773 | if (rc) |
9192 | if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)) != 0) { | ||
9193 | dev_err(&bp->pdev->dev, "dma_set_coherent_mask" | ||
9194 | " failed, aborting\n"); | ||
9195 | rc = -EIO; | ||
9196 | goto err_out_release; | ||
9197 | } | ||
9198 | |||
9199 | } else if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) != 0) { | ||
9200 | dev_err(&bp->pdev->dev, | ||
9201 | "System does not support DMA, aborting\n"); | ||
9202 | rc = -EIO; | ||
9203 | goto err_out_release; | 9774 | goto err_out_release; |
9204 | } | ||
9205 | 9775 | ||
9206 | dev->mem_start = pci_resource_start(pdev, 0); | 9776 | dev->mem_start = pci_resource_start(pdev, 0); |
9207 | dev->base_addr = dev->mem_start; | 9777 | dev->base_addr = dev->mem_start; |
@@ -9237,6 +9807,12 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev, | |||
9237 | REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0); | 9807 | REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0); |
9238 | REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0); | 9808 | REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0); |
9239 | 9809 | ||
9810 | /** | ||
9811 | * Enable internal target-read (in case we are probed after PF FLR). | ||
9812 | * Must be done prior to any BAR read access | ||
9813 | */ | ||
9814 | REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1); | ||
9815 | |||
9240 | /* Reset the load counter */ | 9816 | /* Reset the load counter */ |
9241 | bnx2x_clear_load_cnt(bp); | 9817 | bnx2x_clear_load_cnt(bp); |
9242 | 9818 | ||
@@ -9451,7 +10027,7 @@ int bnx2x_init_firmware(struct bnx2x *bp) | |||
9451 | fw_file_name = FW_FILE_NAME_E1; | 10027 | fw_file_name = FW_FILE_NAME_E1; |
9452 | else if (CHIP_IS_E1H(bp)) | 10028 | else if (CHIP_IS_E1H(bp)) |
9453 | fw_file_name = FW_FILE_NAME_E1H; | 10029 | fw_file_name = FW_FILE_NAME_E1H; |
9454 | else if (CHIP_IS_E2(bp)) | 10030 | else if (!CHIP_IS_E1x(bp)) |
9455 | fw_file_name = FW_FILE_NAME_E2; | 10031 | fw_file_name = FW_FILE_NAME_E2; |
9456 | else { | 10032 | else { |
9457 | BNX2X_ERR("Unsupported chip revision\n"); | 10033 | BNX2X_ERR("Unsupported chip revision\n"); |
@@ -9519,6 +10095,44 @@ request_firmware_exit: | |||
9519 | return rc; | 10095 | return rc; |
9520 | } | 10096 | } |
9521 | 10097 | ||
10098 | static void bnx2x_release_firmware(struct bnx2x *bp) | ||
10099 | { | ||
10100 | kfree(bp->init_ops_offsets); | ||
10101 | kfree(bp->init_ops); | ||
10102 | kfree(bp->init_data); | ||
10103 | release_firmware(bp->firmware); | ||
10104 | } | ||
10105 | |||
10106 | |||
10107 | static struct bnx2x_func_sp_drv_ops bnx2x_func_sp_drv = { | ||
10108 | .init_hw_cmn_chip = bnx2x_init_hw_common_chip, | ||
10109 | .init_hw_cmn = bnx2x_init_hw_common, | ||
10110 | .init_hw_port = bnx2x_init_hw_port, | ||
10111 | .init_hw_func = bnx2x_init_hw_func, | ||
10112 | |||
10113 | .reset_hw_cmn = bnx2x_reset_common, | ||
10114 | .reset_hw_port = bnx2x_reset_port, | ||
10115 | .reset_hw_func = bnx2x_reset_func, | ||
10116 | |||
10117 | .gunzip_init = bnx2x_gunzip_init, | ||
10118 | .gunzip_end = bnx2x_gunzip_end, | ||
10119 | |||
10120 | .init_fw = bnx2x_init_firmware, | ||
10121 | .release_fw = bnx2x_release_firmware, | ||
10122 | }; | ||
10123 | |||
10124 | void bnx2x__init_func_obj(struct bnx2x *bp) | ||
10125 | { | ||
10126 | /* Prepare DMAE related driver resources */ | ||
10127 | bnx2x_setup_dmae(bp); | ||
10128 | |||
10129 | bnx2x_init_func_obj(bp, &bp->func_obj, | ||
10130 | bnx2x_sp(bp, func_rdata), | ||
10131 | bnx2x_sp_mapping(bp, func_rdata), | ||
10132 | &bnx2x_func_sp_drv); | ||
10133 | } | ||
10134 | |||
10135 | /* must be called after sriov-enable */ | ||
9522 | static inline int bnx2x_set_qm_cid_count(struct bnx2x *bp, int l2_cid_count) | 10136 | static inline int bnx2x_set_qm_cid_count(struct bnx2x *bp, int l2_cid_count) |
9523 | { | 10137 | { |
9524 | int cid_count = L2_FP_COUNT(l2_cid_count); | 10138 | int cid_count = L2_FP_COUNT(l2_cid_count); |
@@ -9529,6 +10143,25 @@ static inline int bnx2x_set_qm_cid_count(struct bnx2x *bp, int l2_cid_count) | |||
9529 | return roundup(cid_count, QM_CID_ROUND); | 10143 | return roundup(cid_count, QM_CID_ROUND); |
9530 | } | 10144 | } |
9531 | 10145 | ||
10146 | /** | ||
10147 | * bnx2x_pci_msix_table_size - get the size of the MSI-X table. | ||
10148 | * | ||
10149 | * @dev: pci device | ||
10150 | * | ||
10151 | */ | ||
10152 | static inline int bnx2x_pci_msix_table_size(struct pci_dev *pdev) | ||
10153 | { | ||
10154 | int pos; | ||
10155 | u16 control; | ||
10156 | |||
10157 | pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX); | ||
10158 | if (!pos) | ||
10159 | return 0; | ||
10160 | |||
10161 | pci_read_config_word(pdev, pos + PCI_MSI_FLAGS, &control); | ||
10162 | return (control & PCI_MSIX_FLAGS_QSIZE) + 1; | ||
10163 | } | ||
10164 | |||
9532 | static int __devinit bnx2x_init_one(struct pci_dev *pdev, | 10165 | static int __devinit bnx2x_init_one(struct pci_dev *pdev, |
9533 | const struct pci_device_id *ent) | 10166 | const struct pci_device_id *ent) |
9534 | { | 10167 | { |
@@ -9541,12 +10174,28 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev, | |||
9541 | case BCM57710: | 10174 | case BCM57710: |
9542 | case BCM57711: | 10175 | case BCM57711: |
9543 | case BCM57711E: | 10176 | case BCM57711E: |
9544 | cid_count = FP_SB_MAX_E1x; | ||
9545 | break; | ||
9546 | |||
9547 | case BCM57712: | 10177 | case BCM57712: |
9548 | case BCM57712E: | 10178 | case BCM57712_MF: |
9549 | cid_count = FP_SB_MAX_E2; | 10179 | case BCM57800: |
10180 | case BCM57800_MF: | ||
10181 | case BCM57810: | ||
10182 | case BCM57810_MF: | ||
10183 | case BCM57840: | ||
10184 | case BCM57840_MF: | ||
10185 | /* The size requested for the MSI-X table corresponds to the | ||
10186 | * actual amount of avaliable IGU/HC status blocks. It includes | ||
10187 | * the default SB vector but we want cid_count to contain the | ||
10188 | * amount of only non-default SBs, that's what '-1' stands for. | ||
10189 | */ | ||
10190 | cid_count = bnx2x_pci_msix_table_size(pdev) - 1; | ||
10191 | |||
10192 | /* do not allow initial cid_count grow above 16 | ||
10193 | * since Special CIDs starts from this number | ||
10194 | * use old FP_SB_MAX_E1x define for this matter | ||
10195 | */ | ||
10196 | cid_count = min_t(int, FP_SB_MAX_E1x, cid_count); | ||
10197 | |||
10198 | WARN_ON(!cid_count); | ||
9550 | break; | 10199 | break; |
9551 | 10200 | ||
9552 | default: | 10201 | default: |
@@ -9555,7 +10204,7 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev, | |||
9555 | return -ENODEV; | 10204 | return -ENODEV; |
9556 | } | 10205 | } |
9557 | 10206 | ||
9558 | cid_count += NONE_ETH_CONTEXT_USE + CNIC_CONTEXT_USE; | 10207 | cid_count += FCOE_CONTEXT_USE; |
9559 | 10208 | ||
9560 | /* dev zeroed in init_etherdev */ | 10209 | /* dev zeroed in init_etherdev */ |
9561 | dev = alloc_etherdev_mq(sizeof(*bp), cid_count); | 10210 | dev = alloc_etherdev_mq(sizeof(*bp), cid_count); |
@@ -9564,6 +10213,11 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev, | |||
9564 | return -ENOMEM; | 10213 | return -ENOMEM; |
9565 | } | 10214 | } |
9566 | 10215 | ||
10216 | /* We don't need a Tx queue for a CNIC and an OOO Rx-only ring, | ||
10217 | * so update a cid_count after a netdev allocation. | ||
10218 | */ | ||
10219 | cid_count += CNIC_CONTEXT_USE; | ||
10220 | |||
9567 | bp = netdev_priv(dev); | 10221 | bp = netdev_priv(dev); |
9568 | bp->msg_enable = debug; | 10222 | bp->msg_enable = debug; |
9569 | 10223 | ||
@@ -9571,12 +10225,14 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev, | |||
9571 | 10225 | ||
9572 | bp->l2_cid_count = cid_count; | 10226 | bp->l2_cid_count = cid_count; |
9573 | 10227 | ||
9574 | rc = bnx2x_init_dev(pdev, dev); | 10228 | rc = bnx2x_init_dev(pdev, dev, ent->driver_data); |
9575 | if (rc < 0) { | 10229 | if (rc < 0) { |
9576 | free_netdev(dev); | 10230 | free_netdev(dev); |
9577 | return rc; | 10231 | return rc; |
9578 | } | 10232 | } |
9579 | 10233 | ||
10234 | BNX2X_DEV_INFO("cid_count=%d\n", cid_count); | ||
10235 | |||
9580 | rc = bnx2x_init_bp(bp); | 10236 | rc = bnx2x_init_bp(bp); |
9581 | if (rc) | 10237 | if (rc) |
9582 | goto init_one_exit; | 10238 | goto init_one_exit; |
@@ -9713,12 +10369,17 @@ static int bnx2x_eeh_nic_unload(struct bnx2x *bp) | |||
9713 | 10369 | ||
9714 | bp->rx_mode = BNX2X_RX_MODE_NONE; | 10370 | bp->rx_mode = BNX2X_RX_MODE_NONE; |
9715 | 10371 | ||
10372 | #ifdef BCM_CNIC | ||
10373 | bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD); | ||
10374 | #endif | ||
10375 | /* Stop Tx */ | ||
10376 | bnx2x_tx_disable(bp); | ||
10377 | |||
9716 | bnx2x_netif_stop(bp, 0); | 10378 | bnx2x_netif_stop(bp, 0); |
9717 | netif_carrier_off(bp->dev); | ||
9718 | 10379 | ||
9719 | del_timer_sync(&bp->timer); | 10380 | del_timer_sync(&bp->timer); |
9720 | bp->stats_state = STATS_STATE_DISABLED; | 10381 | |
9721 | DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n"); | 10382 | bnx2x_stats_handle(bp, STATS_EVENT_STOP); |
9722 | 10383 | ||
9723 | /* Release IRQs */ | 10384 | /* Release IRQs */ |
9724 | bnx2x_free_irq(bp); | 10385 | bnx2x_free_irq(bp); |
@@ -9733,6 +10394,8 @@ static int bnx2x_eeh_nic_unload(struct bnx2x *bp) | |||
9733 | 10394 | ||
9734 | bp->state = BNX2X_STATE_CLOSED; | 10395 | bp->state = BNX2X_STATE_CLOSED; |
9735 | 10396 | ||
10397 | netif_carrier_off(bp->dev); | ||
10398 | |||
9736 | return 0; | 10399 | return 0; |
9737 | } | 10400 | } |
9738 | 10401 | ||
@@ -9845,8 +10508,8 @@ static void bnx2x_io_resume(struct pci_dev *pdev) | |||
9845 | struct bnx2x *bp = netdev_priv(dev); | 10508 | struct bnx2x *bp = netdev_priv(dev); |
9846 | 10509 | ||
9847 | if (bp->recovery_state != BNX2X_RECOVERY_DONE) { | 10510 | if (bp->recovery_state != BNX2X_RECOVERY_DONE) { |
9848 | printk(KERN_ERR "Handling parity error recovery. " | 10511 | netdev_err(bp->dev, "Handling parity error recovery. " |
9849 | "Try again later\n"); | 10512 | "Try again later\n"); |
9850 | return; | 10513 | return; |
9851 | } | 10514 | } |
9852 | 10515 | ||
@@ -9905,10 +10568,33 @@ static void __exit bnx2x_cleanup(void) | |||
9905 | destroy_workqueue(bnx2x_wq); | 10568 | destroy_workqueue(bnx2x_wq); |
9906 | } | 10569 | } |
9907 | 10570 | ||
10571 | void bnx2x_notify_link_changed(struct bnx2x *bp) | ||
10572 | { | ||
10573 | REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + BP_FUNC(bp)*sizeof(u32), 1); | ||
10574 | } | ||
10575 | |||
9908 | module_init(bnx2x_init); | 10576 | module_init(bnx2x_init); |
9909 | module_exit(bnx2x_cleanup); | 10577 | module_exit(bnx2x_cleanup); |
9910 | 10578 | ||
9911 | #ifdef BCM_CNIC | 10579 | #ifdef BCM_CNIC |
10580 | /** | ||
10581 | * bnx2x_set_iscsi_eth_mac_addr - set iSCSI MAC(s). | ||
10582 | * | ||
10583 | * @bp: driver handle | ||
10584 | * @set: set or clear the CAM entry | ||
10585 | * | ||
10586 | * This function will wait until the ramdord completion returns. | ||
10587 | * Return 0 if success, -ENODEV if ramrod doesn't return. | ||
10588 | */ | ||
10589 | static inline int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp) | ||
10590 | { | ||
10591 | unsigned long ramrod_flags = 0; | ||
10592 | |||
10593 | __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); | ||
10594 | return bnx2x_set_mac_one(bp, bp->cnic_eth_dev.iscsi_mac, | ||
10595 | &bp->iscsi_l2_mac_obj, true, | ||
10596 | BNX2X_ISCSI_ETH_MAC, &ramrod_flags); | ||
10597 | } | ||
9912 | 10598 | ||
9913 | /* count denotes the number of new completions we have seen */ | 10599 | /* count denotes the number of new completions we have seen */ |
9914 | static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count) | 10600 | static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count) |
@@ -9929,23 +10615,22 @@ static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count) | |||
9929 | u16 type = (le16_to_cpu(bp->cnic_kwq_cons->hdr.type) | 10615 | u16 type = (le16_to_cpu(bp->cnic_kwq_cons->hdr.type) |
9930 | & SPE_HDR_CONN_TYPE) >> | 10616 | & SPE_HDR_CONN_TYPE) >> |
9931 | SPE_HDR_CONN_TYPE_SHIFT; | 10617 | SPE_HDR_CONN_TYPE_SHIFT; |
10618 | u8 cmd = (le32_to_cpu(bp->cnic_kwq_cons->hdr.conn_and_cmd_data) | ||
10619 | >> SPE_HDR_CMD_ID_SHIFT) & 0xff; | ||
9932 | 10620 | ||
9933 | /* Set validation for iSCSI L2 client before sending SETUP | 10621 | /* Set validation for iSCSI L2 client before sending SETUP |
9934 | * ramrod | 10622 | * ramrod |
9935 | */ | 10623 | */ |
9936 | if (type == ETH_CONNECTION_TYPE) { | 10624 | if (type == ETH_CONNECTION_TYPE) { |
9937 | u8 cmd = (le32_to_cpu(bp->cnic_kwq_cons-> | ||
9938 | hdr.conn_and_cmd_data) >> | ||
9939 | SPE_HDR_CMD_ID_SHIFT) & 0xff; | ||
9940 | |||
9941 | if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP) | 10625 | if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP) |
9942 | bnx2x_set_ctx_validation(&bp->context. | 10626 | bnx2x_set_ctx_validation(bp, &bp->context. |
9943 | vcxt[BNX2X_ISCSI_ETH_CID].eth, | 10627 | vcxt[BNX2X_ISCSI_ETH_CID].eth, |
9944 | HW_CID(bp, BNX2X_ISCSI_ETH_CID)); | 10628 | BNX2X_ISCSI_ETH_CID); |
9945 | } | 10629 | } |
9946 | 10630 | ||
9947 | /* There may be not more than 8 L2 and not more than 8 L5 SPEs | 10631 | /* |
9948 | * We also check that the number of outstanding | 10632 | * There may be not more than 8 L2, not more than 8 L5 SPEs |
10633 | * and in the air. We also check that number of outstanding | ||
9949 | * COMMON ramrods is not more than the EQ and SPQ can | 10634 | * COMMON ramrods is not more than the EQ and SPQ can |
9950 | * accommodate. | 10635 | * accommodate. |
9951 | */ | 10636 | */ |
@@ -10071,18 +10756,61 @@ int bnx2x_cnic_notify(struct bnx2x *bp, int cmd) | |||
10071 | return bnx2x_cnic_ctl_send(bp, &ctl); | 10756 | return bnx2x_cnic_ctl_send(bp, &ctl); |
10072 | } | 10757 | } |
10073 | 10758 | ||
10074 | static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid) | 10759 | static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid, u8 err) |
10075 | { | 10760 | { |
10076 | struct cnic_ctl_info ctl; | 10761 | struct cnic_ctl_info ctl = {0}; |
10077 | 10762 | ||
10078 | /* first we tell CNIC and only then we count this as a completion */ | 10763 | /* first we tell CNIC and only then we count this as a completion */ |
10079 | ctl.cmd = CNIC_CTL_COMPLETION_CMD; | 10764 | ctl.cmd = CNIC_CTL_COMPLETION_CMD; |
10080 | ctl.data.comp.cid = cid; | 10765 | ctl.data.comp.cid = cid; |
10766 | ctl.data.comp.error = err; | ||
10081 | 10767 | ||
10082 | bnx2x_cnic_ctl_send_bh(bp, &ctl); | 10768 | bnx2x_cnic_ctl_send_bh(bp, &ctl); |
10083 | bnx2x_cnic_sp_post(bp, 0); | 10769 | bnx2x_cnic_sp_post(bp, 0); |
10084 | } | 10770 | } |
10085 | 10771 | ||
10772 | |||
10773 | /* Called with netif_addr_lock_bh() taken. | ||
10774 | * Sets an rx_mode config for an iSCSI ETH client. | ||
10775 | * Doesn't block. | ||
10776 | * Completion should be checked outside. | ||
10777 | */ | ||
10778 | static void bnx2x_set_iscsi_eth_rx_mode(struct bnx2x *bp, bool start) | ||
10779 | { | ||
10780 | unsigned long accept_flags = 0, ramrod_flags = 0; | ||
10781 | u8 cl_id = bnx2x_cnic_eth_cl_id(bp, BNX2X_ISCSI_ETH_CL_ID_IDX); | ||
10782 | int sched_state = BNX2X_FILTER_ISCSI_ETH_STOP_SCHED; | ||
10783 | |||
10784 | if (start) { | ||
10785 | /* Start accepting on iSCSI L2 ring. Accept all multicasts | ||
10786 | * because it's the only way for UIO Queue to accept | ||
10787 | * multicasts (in non-promiscuous mode only one Queue per | ||
10788 | * function will receive multicast packets (leading in our | ||
10789 | * case). | ||
10790 | */ | ||
10791 | __set_bit(BNX2X_ACCEPT_UNICAST, &accept_flags); | ||
10792 | __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &accept_flags); | ||
10793 | __set_bit(BNX2X_ACCEPT_BROADCAST, &accept_flags); | ||
10794 | __set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags); | ||
10795 | |||
10796 | /* Clear STOP_PENDING bit if START is requested */ | ||
10797 | clear_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED, &bp->sp_state); | ||
10798 | |||
10799 | sched_state = BNX2X_FILTER_ISCSI_ETH_START_SCHED; | ||
10800 | } else | ||
10801 | /* Clear START_PENDING bit if STOP is requested */ | ||
10802 | clear_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED, &bp->sp_state); | ||
10803 | |||
10804 | if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state)) | ||
10805 | set_bit(sched_state, &bp->sp_state); | ||
10806 | else { | ||
10807 | __set_bit(RAMROD_RX, &ramrod_flags); | ||
10808 | bnx2x_set_q_rx_mode(bp, cl_id, 0, accept_flags, 0, | ||
10809 | ramrod_flags); | ||
10810 | } | ||
10811 | } | ||
10812 | |||
10813 | |||
10086 | static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl) | 10814 | static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl) |
10087 | { | 10815 | { |
10088 | struct bnx2x *bp = netdev_priv(dev); | 10816 | struct bnx2x *bp = netdev_priv(dev); |
@@ -10106,45 +10834,65 @@ static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl) | |||
10106 | 10834 | ||
10107 | /* rtnl_lock is held. */ | 10835 | /* rtnl_lock is held. */ |
10108 | case DRV_CTL_START_L2_CMD: { | 10836 | case DRV_CTL_START_L2_CMD: { |
10109 | u32 cli = ctl->data.ring.client_id; | 10837 | struct cnic_eth_dev *cp = &bp->cnic_eth_dev; |
10110 | 10838 | unsigned long sp_bits = 0; | |
10111 | /* Clear FCoE FIP and ALL ENODE MACs addresses first */ | 10839 | |
10112 | bnx2x_del_fcoe_eth_macs(bp); | 10840 | /* Configure the iSCSI classification object */ |
10841 | bnx2x_init_mac_obj(bp, &bp->iscsi_l2_mac_obj, | ||
10842 | cp->iscsi_l2_client_id, | ||
10843 | cp->iscsi_l2_cid, BP_FUNC(bp), | ||
10844 | bnx2x_sp(bp, mac_rdata), | ||
10845 | bnx2x_sp_mapping(bp, mac_rdata), | ||
10846 | BNX2X_FILTER_MAC_PENDING, | ||
10847 | &bp->sp_state, BNX2X_OBJ_TYPE_RX, | ||
10848 | &bp->macs_pool); | ||
10113 | 10849 | ||
10114 | /* Set iSCSI MAC address */ | 10850 | /* Set iSCSI MAC address */ |
10115 | bnx2x_set_iscsi_eth_mac_addr(bp, 1); | 10851 | rc = bnx2x_set_iscsi_eth_mac_addr(bp); |
10852 | if (rc) | ||
10853 | break; | ||
10116 | 10854 | ||
10117 | mmiowb(); | 10855 | mmiowb(); |
10118 | barrier(); | 10856 | barrier(); |
10119 | 10857 | ||
10120 | /* Start accepting on iSCSI L2 ring. Accept all multicasts | 10858 | /* Start accepting on iSCSI L2 ring */ |
10121 | * because it's the only way for UIO Client to accept | 10859 | |
10122 | * multicasts (in non-promiscuous mode only one Client per | 10860 | netif_addr_lock_bh(dev); |
10123 | * function will receive multicast packets (leading in our | 10861 | bnx2x_set_iscsi_eth_rx_mode(bp, true); |
10124 | * case). | 10862 | netif_addr_unlock_bh(dev); |
10125 | */ | 10863 | |
10126 | bnx2x_rxq_set_mac_filters(bp, cli, | 10864 | /* bits to wait on */ |
10127 | BNX2X_ACCEPT_UNICAST | | 10865 | __set_bit(BNX2X_FILTER_RX_MODE_PENDING, &sp_bits); |
10128 | BNX2X_ACCEPT_BROADCAST | | 10866 | __set_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED, &sp_bits); |
10129 | BNX2X_ACCEPT_ALL_MULTICAST); | 10867 | |
10130 | storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp)); | 10868 | if (!bnx2x_wait_sp_comp(bp, sp_bits)) |
10869 | BNX2X_ERR("rx_mode completion timed out!\n"); | ||
10131 | 10870 | ||
10132 | break; | 10871 | break; |
10133 | } | 10872 | } |
10134 | 10873 | ||
10135 | /* rtnl_lock is held. */ | 10874 | /* rtnl_lock is held. */ |
10136 | case DRV_CTL_STOP_L2_CMD: { | 10875 | case DRV_CTL_STOP_L2_CMD: { |
10137 | u32 cli = ctl->data.ring.client_id; | 10876 | unsigned long sp_bits = 0; |
10138 | 10877 | ||
10139 | /* Stop accepting on iSCSI L2 ring */ | 10878 | /* Stop accepting on iSCSI L2 ring */ |
10140 | bnx2x_rxq_set_mac_filters(bp, cli, BNX2X_ACCEPT_NONE); | 10879 | netif_addr_lock_bh(dev); |
10141 | storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp)); | 10880 | bnx2x_set_iscsi_eth_rx_mode(bp, false); |
10881 | netif_addr_unlock_bh(dev); | ||
10882 | |||
10883 | /* bits to wait on */ | ||
10884 | __set_bit(BNX2X_FILTER_RX_MODE_PENDING, &sp_bits); | ||
10885 | __set_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED, &sp_bits); | ||
10886 | |||
10887 | if (!bnx2x_wait_sp_comp(bp, sp_bits)) | ||
10888 | BNX2X_ERR("rx_mode completion timed out!\n"); | ||
10142 | 10889 | ||
10143 | mmiowb(); | 10890 | mmiowb(); |
10144 | barrier(); | 10891 | barrier(); |
10145 | 10892 | ||
10146 | /* Unset iSCSI L2 MAC */ | 10893 | /* Unset iSCSI L2 MAC */ |
10147 | bnx2x_set_iscsi_eth_mac_addr(bp, 0); | 10894 | rc = bnx2x_del_all_macs(bp, &bp->iscsi_l2_mac_obj, |
10895 | BNX2X_ISCSI_ETH_MAC, true); | ||
10148 | break; | 10896 | break; |
10149 | } | 10897 | } |
10150 | case DRV_CTL_RET_L2_SPQ_CREDIT_CMD: { | 10898 | case DRV_CTL_RET_L2_SPQ_CREDIT_CMD: { |
@@ -10156,11 +10904,6 @@ static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl) | |||
10156 | break; | 10904 | break; |
10157 | } | 10905 | } |
10158 | 10906 | ||
10159 | case DRV_CTL_ISCSI_STOPPED_CMD: { | ||
10160 | bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_ISCSI_STOPPED); | ||
10161 | break; | ||
10162 | } | ||
10163 | |||
10164 | default: | 10907 | default: |
10165 | BNX2X_ERR("unknown command %x\n", ctl->cmd); | 10908 | BNX2X_ERR("unknown command %x\n", ctl->cmd); |
10166 | rc = -EINVAL; | 10909 | rc = -EINVAL; |
@@ -10181,13 +10924,13 @@ void bnx2x_setup_cnic_irq_info(struct bnx2x *bp) | |||
10181 | cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX; | 10924 | cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX; |
10182 | cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX; | 10925 | cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX; |
10183 | } | 10926 | } |
10184 | if (CHIP_IS_E2(bp)) | 10927 | if (!CHIP_IS_E1x(bp)) |
10185 | cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e2_sb; | 10928 | cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e2_sb; |
10186 | else | 10929 | else |
10187 | cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e1x_sb; | 10930 | cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e1x_sb; |
10188 | 10931 | ||
10189 | cp->irq_arr[0].status_blk_num = CNIC_SB_ID(bp); | 10932 | cp->irq_arr[0].status_blk_num = bnx2x_cnic_fw_sb_id(bp); |
10190 | cp->irq_arr[0].status_blk_num2 = CNIC_IGU_SB_ID(bp); | 10933 | cp->irq_arr[0].status_blk_num2 = bnx2x_cnic_igu_sb_id(bp); |
10191 | cp->irq_arr[1].status_blk = bp->def_status_blk; | 10934 | cp->irq_arr[1].status_blk = bp->def_status_blk; |
10192 | cp->irq_arr[1].status_blk_num = DEF_SB_ID; | 10935 | cp->irq_arr[1].status_blk_num = DEF_SB_ID; |
10193 | cp->irq_arr[1].status_blk_num2 = DEF_SB_IGU_ID; | 10936 | cp->irq_arr[1].status_blk_num2 = DEF_SB_IGU_ID; |
@@ -10204,9 +10947,6 @@ static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops, | |||
10204 | if (ops == NULL) | 10947 | if (ops == NULL) |
10205 | return -EINVAL; | 10948 | return -EINVAL; |
10206 | 10949 | ||
10207 | if (atomic_read(&bp->intr_sem) != 0) | ||
10208 | return -EBUSY; | ||
10209 | |||
10210 | bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL); | 10950 | bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL); |
10211 | if (!bp->cnic_kwq) | 10951 | if (!bp->cnic_kwq) |
10212 | return -ENOMEM; | 10952 | return -ENOMEM; |
@@ -10221,7 +10961,7 @@ static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops, | |||
10221 | bp->cnic_data = data; | 10961 | bp->cnic_data = data; |
10222 | 10962 | ||
10223 | cp->num_irq = 0; | 10963 | cp->num_irq = 0; |
10224 | cp->drv_state = CNIC_DRV_STATE_REGD; | 10964 | cp->drv_state |= CNIC_DRV_STATE_REGD; |
10225 | cp->iro_arr = bp->iro_arr; | 10965 | cp->iro_arr = bp->iro_arr; |
10226 | 10966 | ||
10227 | bnx2x_setup_cnic_irq_info(bp); | 10967 | bnx2x_setup_cnic_irq_info(bp); |
@@ -10275,8 +11015,8 @@ struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev) | |||
10275 | cp->drv_register_cnic = bnx2x_register_cnic; | 11015 | cp->drv_register_cnic = bnx2x_register_cnic; |
10276 | cp->drv_unregister_cnic = bnx2x_unregister_cnic; | 11016 | cp->drv_unregister_cnic = bnx2x_unregister_cnic; |
10277 | cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID; | 11017 | cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID; |
10278 | cp->iscsi_l2_client_id = BNX2X_ISCSI_ETH_CL_ID + | 11018 | cp->iscsi_l2_client_id = |
10279 | BP_E1HVN(bp) * NONE_ETH_CONTEXT_USE; | 11019 | bnx2x_cnic_eth_cl_id(bp, BNX2X_ISCSI_ETH_CL_ID_IDX); |
10280 | cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID; | 11020 | cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID; |
10281 | 11021 | ||
10282 | if (NO_ISCSI_OOO(bp)) | 11022 | if (NO_ISCSI_OOO(bp)) |
diff --git a/drivers/net/bnx2x/bnx2x_reg.h b/drivers/net/bnx2x/bnx2x_reg.h index 86bba25d2d3f..005c05af0905 100644 --- a/drivers/net/bnx2x/bnx2x_reg.h +++ b/drivers/net/bnx2x/bnx2x_reg.h | |||
@@ -54,16 +54,20 @@ | |||
54 | /* [RW 10] The number of free blocks below which the full signal to class 0 | 54 | /* [RW 10] The number of free blocks below which the full signal to class 0 |
55 | * is asserted */ | 55 | * is asserted */ |
56 | #define BRB1_REG_FULL_0_XOFF_THRESHOLD_0 0x601d0 | 56 | #define BRB1_REG_FULL_0_XOFF_THRESHOLD_0 0x601d0 |
57 | /* [RW 10] The number of free blocks above which the full signal to class 0 | 57 | #define BRB1_REG_FULL_0_XOFF_THRESHOLD_1 0x60230 |
58 | /* [RW 11] The number of free blocks above which the full signal to class 0 | ||
58 | * is de-asserted */ | 59 | * is de-asserted */ |
59 | #define BRB1_REG_FULL_0_XON_THRESHOLD_0 0x601d4 | 60 | #define BRB1_REG_FULL_0_XON_THRESHOLD_0 0x601d4 |
60 | /* [RW 10] The number of free blocks below which the full signal to class 1 | 61 | #define BRB1_REG_FULL_0_XON_THRESHOLD_1 0x60234 |
62 | /* [RW 11] The number of free blocks below which the full signal to class 1 | ||
61 | * is asserted */ | 63 | * is asserted */ |
62 | #define BRB1_REG_FULL_1_XOFF_THRESHOLD_0 0x601d8 | 64 | #define BRB1_REG_FULL_1_XOFF_THRESHOLD_0 0x601d8 |
63 | /* [RW 10] The number of free blocks above which the full signal to class 1 | 65 | #define BRB1_REG_FULL_1_XOFF_THRESHOLD_1 0x60238 |
66 | /* [RW 11] The number of free blocks above which the full signal to class 1 | ||
64 | * is de-asserted */ | 67 | * is de-asserted */ |
65 | #define BRB1_REG_FULL_1_XON_THRESHOLD_0 0x601dc | 68 | #define BRB1_REG_FULL_1_XON_THRESHOLD_0 0x601dc |
66 | /* [RW 10] The number of free blocks below which the full signal to the LB | 69 | #define BRB1_REG_FULL_1_XON_THRESHOLD_1 0x6023c |
70 | /* [RW 11] The number of free blocks below which the full signal to the LB | ||
67 | * port is asserted */ | 71 | * port is asserted */ |
68 | #define BRB1_REG_FULL_LB_XOFF_THRESHOLD 0x601e0 | 72 | #define BRB1_REG_FULL_LB_XOFF_THRESHOLD 0x601e0 |
69 | /* [RW 10] The number of free blocks above which the full signal to the LB | 73 | /* [RW 10] The number of free blocks above which the full signal to the LB |
@@ -75,15 +79,49 @@ | |||
75 | /* [RW 10] The number of free blocks below which the High_llfc signal to | 79 | /* [RW 10] The number of free blocks below which the High_llfc signal to |
76 | interface #n is asserted. */ | 80 | interface #n is asserted. */ |
77 | #define BRB1_REG_HIGH_LLFC_LOW_THRESHOLD_0 0x6013c | 81 | #define BRB1_REG_HIGH_LLFC_LOW_THRESHOLD_0 0x6013c |
78 | /* [RW 23] LL RAM data. */ | 82 | /* [RW 11] The number of blocks guarantied for the LB port */ |
79 | #define BRB1_REG_LL_RAM 0x61000 | 83 | #define BRB1_REG_LB_GUARANTIED 0x601ec |
84 | /* [RW 11] The hysteresis on the guarantied buffer space for the Lb port | ||
85 | * before signaling XON. */ | ||
86 | #define BRB1_REG_LB_GUARANTIED_HYST 0x60264 | ||
87 | /* [RW 24] LL RAM data. */ | ||
88 | #define BRB1_REG_LL_RAM 0x61000 | ||
80 | /* [RW 10] The number of free blocks above which the Low_llfc signal to | 89 | /* [RW 10] The number of free blocks above which the Low_llfc signal to |
81 | interface #n is de-asserted. */ | 90 | interface #n is de-asserted. */ |
82 | #define BRB1_REG_LOW_LLFC_HIGH_THRESHOLD_0 0x6016c | 91 | #define BRB1_REG_LOW_LLFC_HIGH_THRESHOLD_0 0x6016c |
83 | /* [RW 10] The number of free blocks below which the Low_llfc signal to | 92 | /* [RW 10] The number of free blocks below which the Low_llfc signal to |
84 | interface #n is asserted. */ | 93 | interface #n is asserted. */ |
85 | #define BRB1_REG_LOW_LLFC_LOW_THRESHOLD_0 0x6015c | 94 | #define BRB1_REG_LOW_LLFC_LOW_THRESHOLD_0 0x6015c |
86 | /* [RW 10] The number of blocks guarantied for the MAC port */ | 95 | /* [RW 11] The number of blocks guarantied for class 0 in MAC 0. The |
96 | * register is applicable only when per_class_guaranty_mode is set. */ | ||
97 | #define BRB1_REG_MAC_0_CLASS_0_GUARANTIED 0x60244 | ||
98 | /* [RW 11] The hysteresis on the guarantied buffer space for class 0 in MAC | ||
99 | * 1 before signaling XON. The register is applicable only when | ||
100 | * per_class_guaranty_mode is set. */ | ||
101 | #define BRB1_REG_MAC_0_CLASS_0_GUARANTIED_HYST 0x60254 | ||
102 | /* [RW 11] The number of blocks guarantied for class 1 in MAC 0. The | ||
103 | * register is applicable only when per_class_guaranty_mode is set. */ | ||
104 | #define BRB1_REG_MAC_0_CLASS_1_GUARANTIED 0x60248 | ||
105 | /* [RW 11] The hysteresis on the guarantied buffer space for class 1in MAC 0 | ||
106 | * before signaling XON. The register is applicable only when | ||
107 | * per_class_guaranty_mode is set. */ | ||
108 | #define BRB1_REG_MAC_0_CLASS_1_GUARANTIED_HYST 0x60258 | ||
109 | /* [RW 11] The number of blocks guarantied for class 0in MAC1.The register | ||
110 | * is applicable only when per_class_guaranty_mode is set. */ | ||
111 | #define BRB1_REG_MAC_1_CLASS_0_GUARANTIED 0x6024c | ||
112 | /* [RW 11] The hysteresis on the guarantied buffer space for class 0 in MAC | ||
113 | * 1 before signaling XON. The register is applicable only when | ||
114 | * per_class_guaranty_mode is set. */ | ||
115 | #define BRB1_REG_MAC_1_CLASS_0_GUARANTIED_HYST 0x6025c | ||
116 | /* [RW 11] The number of blocks guarantied for class 1 in MAC 1. The | ||
117 | * register is applicable only when per_class_guaranty_mode is set. */ | ||
118 | #define BRB1_REG_MAC_1_CLASS_1_GUARANTIED 0x60250 | ||
119 | /* [RW 11] The hysteresis on the guarantied buffer space for class 1 in MAC | ||
120 | * 1 before signaling XON. The register is applicable only when | ||
121 | * per_class_guaranty_mode is set. */ | ||
122 | #define BRB1_REG_MAC_1_CLASS_1_GUARANTIED_HYST 0x60260 | ||
123 | /* [RW 11] The number of blocks guarantied for the MAC port. The register is | ||
124 | * applicable only when per_class_guaranty_mode is reset. */ | ||
87 | #define BRB1_REG_MAC_GUARANTIED_0 0x601e8 | 125 | #define BRB1_REG_MAC_GUARANTIED_0 0x601e8 |
88 | #define BRB1_REG_MAC_GUARANTIED_1 0x60240 | 126 | #define BRB1_REG_MAC_GUARANTIED_1 0x60240 |
89 | /* [R 24] The number of full blocks. */ | 127 | /* [R 24] The number of full blocks. */ |
@@ -100,15 +138,19 @@ | |||
100 | /* [RW 10] The number of free blocks below which the pause signal to class 0 | 138 | /* [RW 10] The number of free blocks below which the pause signal to class 0 |
101 | * is asserted */ | 139 | * is asserted */ |
102 | #define BRB1_REG_PAUSE_0_XOFF_THRESHOLD_0 0x601c0 | 140 | #define BRB1_REG_PAUSE_0_XOFF_THRESHOLD_0 0x601c0 |
103 | /* [RW 10] The number of free blocks above which the pause signal to class 0 | 141 | #define BRB1_REG_PAUSE_0_XOFF_THRESHOLD_1 0x60220 |
142 | /* [RW 11] The number of free blocks above which the pause signal to class 0 | ||
104 | * is de-asserted */ | 143 | * is de-asserted */ |
105 | #define BRB1_REG_PAUSE_0_XON_THRESHOLD_0 0x601c4 | 144 | #define BRB1_REG_PAUSE_0_XON_THRESHOLD_0 0x601c4 |
106 | /* [RW 10] The number of free blocks below which the pause signal to class 1 | 145 | #define BRB1_REG_PAUSE_0_XON_THRESHOLD_1 0x60224 |
146 | /* [RW 11] The number of free blocks below which the pause signal to class 1 | ||
107 | * is asserted */ | 147 | * is asserted */ |
108 | #define BRB1_REG_PAUSE_1_XOFF_THRESHOLD_0 0x601c8 | 148 | #define BRB1_REG_PAUSE_1_XOFF_THRESHOLD_0 0x601c8 |
109 | /* [RW 10] The number of free blocks above which the pause signal to class 1 | 149 | #define BRB1_REG_PAUSE_1_XOFF_THRESHOLD_1 0x60228 |
150 | /* [RW 11] The number of free blocks above which the pause signal to class 1 | ||
110 | * is de-asserted */ | 151 | * is de-asserted */ |
111 | #define BRB1_REG_PAUSE_1_XON_THRESHOLD_0 0x601cc | 152 | #define BRB1_REG_PAUSE_1_XON_THRESHOLD_0 0x601cc |
153 | #define BRB1_REG_PAUSE_1_XON_THRESHOLD_1 0x6022c | ||
112 | /* [RW 10] Write client 0: De-assert pause threshold. Not Functional */ | 154 | /* [RW 10] Write client 0: De-assert pause threshold. Not Functional */ |
113 | #define BRB1_REG_PAUSE_HIGH_THRESHOLD_0 0x60078 | 155 | #define BRB1_REG_PAUSE_HIGH_THRESHOLD_0 0x60078 |
114 | #define BRB1_REG_PAUSE_HIGH_THRESHOLD_1 0x6007c | 156 | #define BRB1_REG_PAUSE_HIGH_THRESHOLD_1 0x6007c |
@@ -422,6 +464,7 @@ | |||
422 | #define CFC_REG_NUM_LCIDS_ALLOC 0x104020 | 464 | #define CFC_REG_NUM_LCIDS_ALLOC 0x104020 |
423 | /* [R 9] Number of Arriving LCIDs in Link List Block */ | 465 | /* [R 9] Number of Arriving LCIDs in Link List Block */ |
424 | #define CFC_REG_NUM_LCIDS_ARRIVING 0x104004 | 466 | #define CFC_REG_NUM_LCIDS_ARRIVING 0x104004 |
467 | #define CFC_REG_NUM_LCIDS_INSIDE_PF 0x104120 | ||
425 | /* [R 9] Number of Leaving LCIDs in Link List Block */ | 468 | /* [R 9] Number of Leaving LCIDs in Link List Block */ |
426 | #define CFC_REG_NUM_LCIDS_LEAVING 0x104018 | 469 | #define CFC_REG_NUM_LCIDS_LEAVING 0x104018 |
427 | #define CFC_REG_WEAK_ENABLE_PF 0x104124 | 470 | #define CFC_REG_WEAK_ENABLE_PF 0x104124 |
@@ -783,6 +826,7 @@ | |||
783 | /* [RW 3] The number of simultaneous outstanding requests to Context Fetch | 826 | /* [RW 3] The number of simultaneous outstanding requests to Context Fetch |
784 | Interface. */ | 827 | Interface. */ |
785 | #define DORQ_REG_OUTST_REQ 0x17003c | 828 | #define DORQ_REG_OUTST_REQ 0x17003c |
829 | #define DORQ_REG_PF_USAGE_CNT 0x1701d0 | ||
786 | #define DORQ_REG_REGN 0x170038 | 830 | #define DORQ_REG_REGN 0x170038 |
787 | /* [R 4] Current value of response A counter credit. Initial credit is | 831 | /* [R 4] Current value of response A counter credit. Initial credit is |
788 | configured through write to ~dorq_registers_rsp_init_crd.rsp_init_crd | 832 | configured through write to ~dorq_registers_rsp_init_crd.rsp_init_crd |
@@ -802,10 +846,12 @@ | |||
802 | /* [RW 28] TCM Header when both ULP and TCP context is loaded. */ | 846 | /* [RW 28] TCM Header when both ULP and TCP context is loaded. */ |
803 | #define DORQ_REG_SHRT_CMHEAD 0x170054 | 847 | #define DORQ_REG_SHRT_CMHEAD 0x170054 |
804 | #define HC_CONFIG_0_REG_ATTN_BIT_EN_0 (0x1<<4) | 848 | #define HC_CONFIG_0_REG_ATTN_BIT_EN_0 (0x1<<4) |
849 | #define HC_CONFIG_0_REG_BLOCK_DISABLE_0 (0x1<<0) | ||
805 | #define HC_CONFIG_0_REG_INT_LINE_EN_0 (0x1<<3) | 850 | #define HC_CONFIG_0_REG_INT_LINE_EN_0 (0x1<<3) |
806 | #define HC_CONFIG_0_REG_MSI_ATTN_EN_0 (0x1<<7) | 851 | #define HC_CONFIG_0_REG_MSI_ATTN_EN_0 (0x1<<7) |
807 | #define HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 (0x1<<2) | 852 | #define HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 (0x1<<2) |
808 | #define HC_CONFIG_0_REG_SINGLE_ISR_EN_0 (0x1<<1) | 853 | #define HC_CONFIG_0_REG_SINGLE_ISR_EN_0 (0x1<<1) |
854 | #define HC_CONFIG_1_REG_BLOCK_DISABLE_1 (0x1<<0) | ||
809 | #define HC_REG_AGG_INT_0 0x108050 | 855 | #define HC_REG_AGG_INT_0 0x108050 |
810 | #define HC_REG_AGG_INT_1 0x108054 | 856 | #define HC_REG_AGG_INT_1 0x108054 |
811 | #define HC_REG_ATTN_BIT 0x108120 | 857 | #define HC_REG_ATTN_BIT 0x108120 |
@@ -844,6 +890,7 @@ | |||
844 | #define HC_REG_VQID_0 0x108008 | 890 | #define HC_REG_VQID_0 0x108008 |
845 | #define HC_REG_VQID_1 0x10800c | 891 | #define HC_REG_VQID_1 0x10800c |
846 | #define IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN (0x1<<1) | 892 | #define IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN (0x1<<1) |
893 | #define IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE (0x1<<0) | ||
847 | #define IGU_REG_ATTENTION_ACK_BITS 0x130108 | 894 | #define IGU_REG_ATTENTION_ACK_BITS 0x130108 |
848 | /* [R 4] Debug: attn_fsm */ | 895 | /* [R 4] Debug: attn_fsm */ |
849 | #define IGU_REG_ATTN_FSM 0x130054 | 896 | #define IGU_REG_ATTN_FSM 0x130054 |
@@ -933,6 +980,14 @@ | |||
933 | * clear; 1 = set. Data valid only in addresses 0-4. all the rest are zero. */ | 980 | * clear; 1 = set. Data valid only in addresses 0-4. all the rest are zero. */ |
934 | #define IGU_REG_WRITE_DONE_PENDING 0x130480 | 981 | #define IGU_REG_WRITE_DONE_PENDING 0x130480 |
935 | #define MCP_A_REG_MCPR_SCRATCH 0x3a0000 | 982 | #define MCP_A_REG_MCPR_SCRATCH 0x3a0000 |
983 | #define MCP_REG_MCPR_CPU_PROGRAM_COUNTER 0x8501c | ||
984 | #define MCP_REG_MCPR_GP_INPUTS 0x800c0 | ||
985 | #define MCP_REG_MCPR_GP_OENABLE 0x800c8 | ||
986 | #define MCP_REG_MCPR_GP_OUTPUTS 0x800c4 | ||
987 | #define MCP_REG_MCPR_IMC_COMMAND 0x85900 | ||
988 | #define MCP_REG_MCPR_IMC_DATAREG0 0x85920 | ||
989 | #define MCP_REG_MCPR_IMC_SLAVE_CONTROL 0x85904 | ||
990 | #define MCP_REG_MCPR_CPU_PROGRAM_COUNTER 0x8501c | ||
936 | #define MCP_REG_MCPR_NVM_ACCESS_ENABLE 0x86424 | 991 | #define MCP_REG_MCPR_NVM_ACCESS_ENABLE 0x86424 |
937 | #define MCP_REG_MCPR_NVM_ADDR 0x8640c | 992 | #define MCP_REG_MCPR_NVM_ADDR 0x8640c |
938 | #define MCP_REG_MCPR_NVM_CFG4 0x8642c | 993 | #define MCP_REG_MCPR_NVM_CFG4 0x8642c |
@@ -1429,11 +1484,37 @@ | |||
1429 | /* [RW 1] e1hmf for WOL. If clr WOL signal o the PXP will be send on bit 0 | 1484 | /* [RW 1] e1hmf for WOL. If clr WOL signal o the PXP will be send on bit 0 |
1430 | only. */ | 1485 | only. */ |
1431 | #define MISC_REG_E1HMF_MODE 0xa5f8 | 1486 | #define MISC_REG_E1HMF_MODE 0xa5f8 |
1487 | /* [R 1] Status of four port mode path swap input pin. */ | ||
1488 | #define MISC_REG_FOUR_PORT_PATH_SWAP 0xa75c | ||
1489 | /* [RW 2] 4 port path swap overwrite.[0] - Overwrite control; if it is 0 - | ||
1490 | the path_swap output is equal to 4 port mode path swap input pin; if it | ||
1491 | is 1 - the path_swap output is equal to bit[1] of this register; [1] - | ||
1492 | Overwrite value. If bit[0] of this register is 1 this is the value that | ||
1493 | receives the path_swap output. Reset on Hard reset. */ | ||
1494 | #define MISC_REG_FOUR_PORT_PATH_SWAP_OVWR 0xa738 | ||
1495 | /* [R 1] Status of 4 port mode port swap input pin. */ | ||
1496 | #define MISC_REG_FOUR_PORT_PORT_SWAP 0xa754 | ||
1497 | /* [RW 2] 4 port port swap overwrite.[0] - Overwrite control; if it is 0 - | ||
1498 | the port_swap output is equal to 4 port mode port swap input pin; if it | ||
1499 | is 1 - the port_swap output is equal to bit[1] of this register; [1] - | ||
1500 | Overwrite value. If bit[0] of this register is 1 this is the value that | ||
1501 | receives the port_swap output. Reset on Hard reset. */ | ||
1502 | #define MISC_REG_FOUR_PORT_PORT_SWAP_OVWR 0xa734 | ||
1432 | /* [RW 32] Debug only: spare RW register reset by core reset */ | 1503 | /* [RW 32] Debug only: spare RW register reset by core reset */ |
1433 | #define MISC_REG_GENERIC_CR_0 0xa460 | 1504 | #define MISC_REG_GENERIC_CR_0 0xa460 |
1434 | #define MISC_REG_GENERIC_CR_1 0xa464 | 1505 | #define MISC_REG_GENERIC_CR_1 0xa464 |
1435 | /* [RW 32] Debug only: spare RW register reset by por reset */ | 1506 | /* [RW 32] Debug only: spare RW register reset by por reset */ |
1436 | #define MISC_REG_GENERIC_POR_1 0xa474 | 1507 | #define MISC_REG_GENERIC_POR_1 0xa474 |
1508 | /* [RW 32] Bit[0]: EPIO MODE SEL: Setting this bit to 1 will allow SW/FW to | ||
1509 | use all of the 32 Extended GPIO pins. Without setting this bit; an EPIO | ||
1510 | can not be configured as an output. Each output has its output enable in | ||
1511 | the MCP register space; but this bit needs to be set to make use of that. | ||
1512 | Bit[3:1] spare. Bit[4]: WCVTMON_PWRDN: Powerdown for Warpcore VTMON. When | ||
1513 | set to 1 - Powerdown. Bit[5]: WCVTMON_RESETB: Reset for Warpcore VTMON. | ||
1514 | When set to 0 - vTMON is in reset. Bit[6]: setting this bit will change | ||
1515 | the i/o to an output and will drive the TimeSync output. Bit[31:7]: | ||
1516 | spare. Global register. Reset by hard reset. */ | ||
1517 | #define MISC_REG_GEN_PURP_HWG 0xa9a0 | ||
1437 | /* [RW 32] GPIO. [31-28] FLOAT port 0; [27-24] FLOAT port 0; When any of | 1518 | /* [RW 32] GPIO. [31-28] FLOAT port 0; [27-24] FLOAT port 0; When any of |
1438 | these bits is written as a '1'; the corresponding SPIO bit will turn off | 1519 | these bits is written as a '1'; the corresponding SPIO bit will turn off |
1439 | it's drivers and become an input. This is the reset state of all GPIO | 1520 | it's drivers and become an input. This is the reset state of all GPIO |
@@ -1636,6 +1717,14 @@ | |||
1636 | in this register. address 0 - timer 1; address 1 - timer 2, ... address 7 - | 1717 | in this register. address 0 - timer 1; address 1 - timer 2, ... address 7 - |
1637 | timer 8 */ | 1718 | timer 8 */ |
1638 | #define MISC_REG_SW_TIMER_VAL 0xa5c0 | 1719 | #define MISC_REG_SW_TIMER_VAL 0xa5c0 |
1720 | /* [R 1] Status of two port mode path swap input pin. */ | ||
1721 | #define MISC_REG_TWO_PORT_PATH_SWAP 0xa758 | ||
1722 | /* [RW 2] 2 port swap overwrite.[0] - Overwrite control; if it is 0 - the | ||
1723 | path_swap output is equal to 2 port mode path swap input pin; if it is 1 | ||
1724 | - the path_swap output is equal to bit[1] of this register; [1] - | ||
1725 | Overwrite value. If bit[0] of this register is 1 this is the value that | ||
1726 | receives the path_swap output. Reset on Hard reset. */ | ||
1727 | #define MISC_REG_TWO_PORT_PATH_SWAP_OVWR 0xa72c | ||
1639 | /* [RW 1] Set by the MCP to remember if one or more of the drivers is/are | 1728 | /* [RW 1] Set by the MCP to remember if one or more of the drivers is/are |
1640 | loaded; 0-prepare; -unprepare */ | 1729 | loaded; 0-prepare; -unprepare */ |
1641 | #define MISC_REG_UNPREPARED 0xa424 | 1730 | #define MISC_REG_UNPREPARED 0xa424 |
@@ -1644,6 +1733,36 @@ | |||
1644 | #define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN (0x1<<4) | 1733 | #define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN (0x1<<4) |
1645 | #define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST (0x1<<2) | 1734 | #define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST (0x1<<2) |
1646 | #define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN (0x1<<3) | 1735 | #define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN (0x1<<3) |
1736 | /* [RW 5] MDIO PHY Address. The WC uses this address to determine whether or | ||
1737 | * not it is the recipient of the message on the MDIO interface. The value | ||
1738 | * is compared to the value on ctrl_md_devad. Drives output | ||
1739 | * misc_xgxs0_phy_addr. Global register. */ | ||
1740 | #define MISC_REG_WC0_CTRL_PHY_ADDR 0xa9cc | ||
1741 | /* [RW 2] XMAC Core port mode. Indicates the number of ports on the system | ||
1742 | side. This should be less than or equal to phy_port_mode; if some of the | ||
1743 | ports are not used. This enables reduction of frequency on the core side. | ||
1744 | This is a strap input for the XMAC_MP core. 00 - Single Port Mode; 01 - | ||
1745 | Dual Port Mode; 10 - Tri Port Mode; 11 - Quad Port Mode. This is a strap | ||
1746 | input for the XMAC_MP core; and should be changed only while reset is | ||
1747 | held low. Reset on Hard reset. */ | ||
1748 | #define MISC_REG_XMAC_CORE_PORT_MODE 0xa964 | ||
1749 | /* [RW 2] XMAC PHY port mode. Indicates the number of ports on the Warp | ||
1750 | Core. This is a strap input for the XMAC_MP core. 00 - Single Port Mode; | ||
1751 | 01 - Dual Port Mode; 1x - Quad Port Mode; This is a strap input for the | ||
1752 | XMAC_MP core; and should be changed only while reset is held low. Reset | ||
1753 | on Hard reset. */ | ||
1754 | #define MISC_REG_XMAC_PHY_PORT_MODE 0xa960 | ||
1755 | /* [RW 32] 1 [47] Packet Size = 64 Write to this register write bits 31:0. | ||
1756 | * Reads from this register will clear bits 31:0. */ | ||
1757 | #define MSTAT_REG_RX_STAT_GR64_LO 0x200 | ||
1758 | /* [RW 32] 1 [00] Tx Good Packet Count Write to this register write bits | ||
1759 | * 31:0. Reads from this register will clear bits 31:0. */ | ||
1760 | #define MSTAT_REG_TX_STAT_GTXPOK_LO 0 | ||
1761 | #define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST (0x1<<0) | ||
1762 | #define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST (0x1<<1) | ||
1763 | #define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN (0x1<<4) | ||
1764 | #define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST (0x1<<2) | ||
1765 | #define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN (0x1<<3) | ||
1647 | #define NIG_LLH0_XCM_MASK_REG_LLH0_XCM_MASK_BCN (0x1<<0) | 1766 | #define NIG_LLH0_XCM_MASK_REG_LLH0_XCM_MASK_BCN (0x1<<0) |
1648 | #define NIG_LLH1_XCM_MASK_REG_LLH1_XCM_MASK_BCN (0x1<<0) | 1767 | #define NIG_LLH1_XCM_MASK_REG_LLH1_XCM_MASK_BCN (0x1<<0) |
1649 | #define NIG_MASK_INTERRUPT_PORT0_REG_MASK_EMAC0_MISC_MI_INT (0x1<<0) | 1768 | #define NIG_MASK_INTERRUPT_PORT0_REG_MASK_EMAC0_MISC_MI_INT (0x1<<0) |
@@ -1837,6 +1956,10 @@ | |||
1837 | #define NIG_REG_LLH1_FUNC_MEM 0x161c0 | 1956 | #define NIG_REG_LLH1_FUNC_MEM 0x161c0 |
1838 | #define NIG_REG_LLH1_FUNC_MEM_ENABLE 0x16160 | 1957 | #define NIG_REG_LLH1_FUNC_MEM_ENABLE 0x16160 |
1839 | #define NIG_REG_LLH1_FUNC_MEM_SIZE 16 | 1958 | #define NIG_REG_LLH1_FUNC_MEM_SIZE 16 |
1959 | /* [RW 1] When this bit is set; the LLH will classify the packet before | ||
1960 | * sending it to the BRB or calculating WoL on it. This bit controls port 1 | ||
1961 | * only. The legacy llh_multi_function_mode bit controls port 0. */ | ||
1962 | #define NIG_REG_LLH1_MF_MODE 0x18614 | ||
1840 | /* [RW 8] init credit counter for port1 in LLH */ | 1963 | /* [RW 8] init credit counter for port1 in LLH */ |
1841 | #define NIG_REG_LLH1_XCM_INIT_CREDIT 0x10564 | 1964 | #define NIG_REG_LLH1_XCM_INIT_CREDIT 0x10564 |
1842 | #define NIG_REG_LLH1_XCM_MASK 0x10134 | 1965 | #define NIG_REG_LLH1_XCM_MASK 0x10134 |
@@ -1858,11 +1981,25 @@ | |||
1858 | /* [R 32] Interrupt register #0 read */ | 1981 | /* [R 32] Interrupt register #0 read */ |
1859 | #define NIG_REG_NIG_INT_STS_0 0x103b0 | 1982 | #define NIG_REG_NIG_INT_STS_0 0x103b0 |
1860 | #define NIG_REG_NIG_INT_STS_1 0x103c0 | 1983 | #define NIG_REG_NIG_INT_STS_1 0x103c0 |
1984 | /* [R 32] Legacy E1 and E1H location for parity error mask register. */ | ||
1985 | #define NIG_REG_NIG_PRTY_MASK 0x103dc | ||
1986 | /* [RW 32] Parity mask register #0 read/write */ | ||
1987 | #define NIG_REG_NIG_PRTY_MASK_0 0x183c8 | ||
1988 | #define NIG_REG_NIG_PRTY_MASK_1 0x183d8 | ||
1861 | /* [R 32] Legacy E1 and E1H location for parity error status register. */ | 1989 | /* [R 32] Legacy E1 and E1H location for parity error status register. */ |
1862 | #define NIG_REG_NIG_PRTY_STS 0x103d0 | 1990 | #define NIG_REG_NIG_PRTY_STS 0x103d0 |
1863 | /* [R 32] Parity register #0 read */ | 1991 | /* [R 32] Parity register #0 read */ |
1864 | #define NIG_REG_NIG_PRTY_STS_0 0x183bc | 1992 | #define NIG_REG_NIG_PRTY_STS_0 0x183bc |
1865 | #define NIG_REG_NIG_PRTY_STS_1 0x183cc | 1993 | #define NIG_REG_NIG_PRTY_STS_1 0x183cc |
1994 | /* [R 32] Legacy E1 and E1H location for parity error status clear register. */ | ||
1995 | #define NIG_REG_NIG_PRTY_STS_CLR 0x103d4 | ||
1996 | /* [RC 32] Parity register #0 read clear */ | ||
1997 | #define NIG_REG_NIG_PRTY_STS_CLR_0 0x183c0 | ||
1998 | #define NIG_REG_NIG_PRTY_STS_CLR_1 0x183d0 | ||
1999 | #define MCPR_IMC_COMMAND_ENABLE (1L<<31) | ||
2000 | #define MCPR_IMC_COMMAND_IMC_STATUS_BITSHIFT 16 | ||
2001 | #define MCPR_IMC_COMMAND_OPERATION_BITSHIFT 28 | ||
2002 | #define MCPR_IMC_COMMAND_TRANSFER_ADDRESS_BITSHIFT 8 | ||
1866 | /* [RW 6] Bit-map indicating which L2 hdrs may appear after the basic | 2003 | /* [RW 6] Bit-map indicating which L2 hdrs may appear after the basic |
1867 | * Ethernet header. */ | 2004 | * Ethernet header. */ |
1868 | #define NIG_REG_P0_HDRS_AFTER_BASIC 0x18038 | 2005 | #define NIG_REG_P0_HDRS_AFTER_BASIC 0x18038 |
@@ -1872,6 +2009,12 @@ | |||
1872 | #define NIG_REG_P0_HWPFC_ENABLE 0x18078 | 2009 | #define NIG_REG_P0_HWPFC_ENABLE 0x18078 |
1873 | #define NIG_REG_P0_LLH_FUNC_MEM2 0x18480 | 2010 | #define NIG_REG_P0_LLH_FUNC_MEM2 0x18480 |
1874 | #define NIG_REG_P0_LLH_FUNC_MEM2_ENABLE 0x18440 | 2011 | #define NIG_REG_P0_LLH_FUNC_MEM2_ENABLE 0x18440 |
2012 | /* [RW 1] Input enable for RX MAC interface. */ | ||
2013 | #define NIG_REG_P0_MAC_IN_EN 0x185ac | ||
2014 | /* [RW 1] Output enable for TX MAC interface */ | ||
2015 | #define NIG_REG_P0_MAC_OUT_EN 0x185b0 | ||
2016 | /* [RW 1] Output enable for TX PAUSE signal to the MAC. */ | ||
2017 | #define NIG_REG_P0_MAC_PAUSE_OUT_EN 0x185b4 | ||
1875 | /* [RW 32] Eight 4-bit configurations for specifying which COS (0-15 for | 2018 | /* [RW 32] Eight 4-bit configurations for specifying which COS (0-15 for |
1876 | * future expansion) each priorty is to be mapped to. Bits 3:0 specify the | 2019 | * future expansion) each priorty is to be mapped to. Bits 3:0 specify the |
1877 | * COS for priority 0. Bits 31:28 specify the COS for priority 7. The 3-bit | 2020 | * COS for priority 0. Bits 31:28 specify the COS for priority 7. The 3-bit |
@@ -1888,11 +2031,52 @@ | |||
1888 | * than one bit may be set; allowing multiple priorities to be mapped to one | 2031 | * than one bit may be set; allowing multiple priorities to be mapped to one |
1889 | * COS. */ | 2032 | * COS. */ |
1890 | #define NIG_REG_P0_RX_COS1_PRIORITY_MASK 0x1805c | 2033 | #define NIG_REG_P0_RX_COS1_PRIORITY_MASK 0x1805c |
2034 | /* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 2. A | ||
2035 | * priority is mapped to COS 2 when the corresponding mask bit is 1. More | ||
2036 | * than one bit may be set; allowing multiple priorities to be mapped to one | ||
2037 | * COS. */ | ||
2038 | #define NIG_REG_P0_RX_COS2_PRIORITY_MASK 0x186b0 | ||
2039 | /* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 3. A | ||
2040 | * priority is mapped to COS 3 when the corresponding mask bit is 1. More | ||
2041 | * than one bit may be set; allowing multiple priorities to be mapped to one | ||
2042 | * COS. */ | ||
2043 | #define NIG_REG_P0_RX_COS3_PRIORITY_MASK 0x186b4 | ||
2044 | /* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 4. A | ||
2045 | * priority is mapped to COS 4 when the corresponding mask bit is 1. More | ||
2046 | * than one bit may be set; allowing multiple priorities to be mapped to one | ||
2047 | * COS. */ | ||
2048 | #define NIG_REG_P0_RX_COS4_PRIORITY_MASK 0x186b8 | ||
2049 | /* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 5. A | ||
2050 | * priority is mapped to COS 5 when the corresponding mask bit is 1. More | ||
2051 | * than one bit may be set; allowing multiple priorities to be mapped to one | ||
2052 | * COS. */ | ||
2053 | #define NIG_REG_P0_RX_COS5_PRIORITY_MASK 0x186bc | ||
2054 | /* [R 1] RX FIFO for receiving data from MAC is empty. */ | ||
1891 | /* [RW 15] Specify which of the credit registers the client is to be mapped | 2055 | /* [RW 15] Specify which of the credit registers the client is to be mapped |
1892 | * to. Bits[2:0] are for client 0; bits [14:12] are for client 4. For | 2056 | * to. Bits[2:0] are for client 0; bits [14:12] are for client 4. For |
1893 | * clients that are not subject to WFQ credit blocking - their | 2057 | * clients that are not subject to WFQ credit blocking - their |
1894 | * specifications here are not used. */ | 2058 | * specifications here are not used. */ |
1895 | #define NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP 0x180f0 | 2059 | #define NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP 0x180f0 |
2060 | /* [RW 32] Specify which of the credit registers the client is to be mapped | ||
2061 | * to. This register specifies bits 31:0 of the 36-bit value. Bits[3:0] are | ||
2062 | * for client 0; bits [35:32] are for client 8. For clients that are not | ||
2063 | * subject to WFQ credit blocking - their specifications here are not used. | ||
2064 | * This is a new register (with 2_) added in E3 B0 to accommodate the 9 | ||
2065 | * input clients to ETS arbiter. The reset default is set for management and | ||
2066 | * debug to use credit registers 6, 7, and 8, respectively, and COSes 0-5 to | ||
2067 | * use credit registers 0-5 respectively (0x543210876). Note that credit | ||
2068 | * registers can not be shared between clients. */ | ||
2069 | #define NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP2_LSB 0x18688 | ||
2070 | /* [RW 4] Specify which of the credit registers the client is to be mapped | ||
2071 | * to. This register specifies bits 35:32 of the 36-bit value. Bits[3:0] are | ||
2072 | * for client 0; bits [35:32] are for client 8. For clients that are not | ||
2073 | * subject to WFQ credit blocking - their specifications here are not used. | ||
2074 | * This is a new register (with 2_) added in E3 B0 to accommodate the 9 | ||
2075 | * input clients to ETS arbiter. The reset default is set for management and | ||
2076 | * debug to use credit registers 6, 7, and 8, respectively, and COSes 0-5 to | ||
2077 | * use credit registers 0-5 respectively (0x543210876). Note that credit | ||
2078 | * registers can not be shared between clients. */ | ||
2079 | #define NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP2_MSB 0x1868c | ||
1896 | /* [RW 5] Specify whether the client competes directly in the strict | 2080 | /* [RW 5] Specify whether the client competes directly in the strict |
1897 | * priority arbiter. The bits are mapped according to client ID (client IDs | 2081 | * priority arbiter. The bits are mapped according to client ID (client IDs |
1898 | * are defined in tx_arb_priority_client). Default value is set to enable | 2082 | * are defined in tx_arb_priority_client). Default value is set to enable |
@@ -1907,10 +2091,24 @@ | |||
1907 | * reach. */ | 2091 | * reach. */ |
1908 | #define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_0 0x1810c | 2092 | #define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_0 0x1810c |
1909 | #define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_1 0x18110 | 2093 | #define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_1 0x18110 |
2094 | #define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_2 0x18114 | ||
2095 | #define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_3 0x18118 | ||
2096 | #define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_4 0x1811c | ||
2097 | #define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_5 0x186a0 | ||
2098 | #define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_6 0x186a4 | ||
2099 | #define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_7 0x186a8 | ||
2100 | #define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_8 0x186ac | ||
1910 | /* [RW 32] Specify the weight (in bytes) to be added to credit register 0 | 2101 | /* [RW 32] Specify the weight (in bytes) to be added to credit register 0 |
1911 | * when it is time to increment. */ | 2102 | * when it is time to increment. */ |
1912 | #define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_0 0x180f8 | 2103 | #define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_0 0x180f8 |
1913 | #define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_1 0x180fc | 2104 | #define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_1 0x180fc |
2105 | #define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_2 0x18100 | ||
2106 | #define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_3 0x18104 | ||
2107 | #define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_4 0x18108 | ||
2108 | #define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_5 0x18690 | ||
2109 | #define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_6 0x18694 | ||
2110 | #define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_7 0x18698 | ||
2111 | #define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_8 0x1869c | ||
1914 | /* [RW 12] Specify the number of strict priority arbitration slots between | 2112 | /* [RW 12] Specify the number of strict priority arbitration slots between |
1915 | * two round-robin arbitration slots to avoid starvation. A value of 0 means | 2113 | * two round-robin arbitration slots to avoid starvation. A value of 0 means |
1916 | * no strict priority cycles - the strict priority with anti-starvation | 2114 | * no strict priority cycles - the strict priority with anti-starvation |
@@ -1925,8 +2123,36 @@ | |||
1925 | * for management at priority 0; debug traffic at priorities 1 and 2; COS0 | 2123 | * for management at priority 0; debug traffic at priorities 1 and 2; COS0 |
1926 | * traffic at priority 3; and COS1 traffic at priority 4. */ | 2124 | * traffic at priority 3; and COS1 traffic at priority 4. */ |
1927 | #define NIG_REG_P0_TX_ARB_PRIORITY_CLIENT 0x180e4 | 2125 | #define NIG_REG_P0_TX_ARB_PRIORITY_CLIENT 0x180e4 |
2126 | /* [RW 6] Bit-map indicating which L2 hdrs may appear after the basic | ||
2127 | * Ethernet header. */ | ||
2128 | #define NIG_REG_P1_HDRS_AFTER_BASIC 0x1818c | ||
1928 | #define NIG_REG_P1_LLH_FUNC_MEM2 0x184c0 | 2129 | #define NIG_REG_P1_LLH_FUNC_MEM2 0x184c0 |
1929 | #define NIG_REG_P1_LLH_FUNC_MEM2_ENABLE 0x18460 | 2130 | #define NIG_REG_P1_LLH_FUNC_MEM2_ENABLE 0x18460 |
2131 | /* [RW 32] Specify the client number to be assigned to each priority of the | ||
2132 | * strict priority arbiter. This register specifies bits 31:0 of the 36-bit | ||
2133 | * value. Priority 0 is the highest priority. Bits [3:0] are for priority 0 | ||
2134 | * client; bits [35-32] are for priority 8 client. The clients are assigned | ||
2135 | * the following IDs: 0-management; 1-debug traffic from this port; 2-debug | ||
2136 | * traffic from other port; 3-COS0 traffic; 4-COS1 traffic; 5-COS2 traffic; | ||
2137 | * 6-COS3 traffic; 7-COS4 traffic; 8-COS5 traffic. The reset value[35:0] is | ||
2138 | * set to 0x345678021. This is a new register (with 2_) added in E3 B0 to | ||
2139 | * accommodate the 9 input clients to ETS arbiter. */ | ||
2140 | #define NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_LSB 0x18680 | ||
2141 | /* [RW 4] Specify the client number to be assigned to each priority of the | ||
2142 | * strict priority arbiter. This register specifies bits 35:32 of the 36-bit | ||
2143 | * value. Priority 0 is the highest priority. Bits [3:0] are for priority 0 | ||
2144 | * client; bits [35-32] are for priority 8 client. The clients are assigned | ||
2145 | * the following IDs: 0-management; 1-debug traffic from this port; 2-debug | ||
2146 | * traffic from other port; 3-COS0 traffic; 4-COS1 traffic; 5-COS2 traffic; | ||
2147 | * 6-COS3 traffic; 7-COS4 traffic; 8-COS5 traffic. The reset value[35:0] is | ||
2148 | * set to 0x345678021. This is a new register (with 2_) added in E3 B0 to | ||
2149 | * accommodate the 9 input clients to ETS arbiter. */ | ||
2150 | #define NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_MSB 0x18684 | ||
2151 | #define NIG_REG_P1_MAC_IN_EN 0x185c0 | ||
2152 | /* [RW 1] Output enable for TX MAC interface */ | ||
2153 | #define NIG_REG_P1_MAC_OUT_EN 0x185c4 | ||
2154 | /* [RW 1] Output enable for TX PAUSE signal to the MAC. */ | ||
2155 | #define NIG_REG_P1_MAC_PAUSE_OUT_EN 0x185c8 | ||
1930 | /* [RW 32] Eight 4-bit configurations for specifying which COS (0-15 for | 2156 | /* [RW 32] Eight 4-bit configurations for specifying which COS (0-15 for |
1931 | * future expansion) each priorty is to be mapped to. Bits 3:0 specify the | 2157 | * future expansion) each priorty is to be mapped to. Bits 3:0 specify the |
1932 | * COS for priority 0. Bits 31:28 specify the COS for priority 7. The 3-bit | 2158 | * COS for priority 0. Bits 31:28 specify the COS for priority 7. The 3-bit |
@@ -1943,6 +2169,105 @@ | |||
1943 | * than one bit may be set; allowing multiple priorities to be mapped to one | 2169 | * than one bit may be set; allowing multiple priorities to be mapped to one |
1944 | * COS. */ | 2170 | * COS. */ |
1945 | #define NIG_REG_P1_RX_COS1_PRIORITY_MASK 0x181b0 | 2171 | #define NIG_REG_P1_RX_COS1_PRIORITY_MASK 0x181b0 |
2172 | /* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 2. A | ||
2173 | * priority is mapped to COS 2 when the corresponding mask bit is 1. More | ||
2174 | * than one bit may be set; allowing multiple priorities to be mapped to one | ||
2175 | * COS. */ | ||
2176 | #define NIG_REG_P1_RX_COS2_PRIORITY_MASK 0x186f8 | ||
2177 | /* [R 1] RX FIFO for receiving data from MAC is empty. */ | ||
2178 | #define NIG_REG_P1_RX_MACFIFO_EMPTY 0x1858c | ||
2179 | /* [R 1] TLLH FIFO is empty. */ | ||
2180 | #define NIG_REG_P1_TLLH_FIFO_EMPTY 0x18338 | ||
2181 | /* [RW 32] Specify which of the credit registers the client is to be mapped | ||
2182 | * to. This register specifies bits 31:0 of the 36-bit value. Bits[3:0] are | ||
2183 | * for client 0; bits [35:32] are for client 8. For clients that are not | ||
2184 | * subject to WFQ credit blocking - their specifications here are not used. | ||
2185 | * This is a new register (with 2_) added in E3 B0 to accommodate the 9 | ||
2186 | * input clients to ETS arbiter. The reset default is set for management and | ||
2187 | * debug to use credit registers 6, 7, and 8, respectively, and COSes 0-5 to | ||
2188 | * use credit registers 0-5 respectively (0x543210876). Note that credit | ||
2189 | * registers can not be shared between clients. Note also that there are | ||
2190 | * only COS0-2 in port 1- there is a total of 6 clients in port 1. Only | ||
2191 | * credit registers 0-5 are valid. This register should be configured | ||
2192 | * appropriately before enabling WFQ. */ | ||
2193 | #define NIG_REG_P1_TX_ARB_CLIENT_CREDIT_MAP2_LSB 0x186e8 | ||
2194 | /* [RW 4] Specify which of the credit registers the client is to be mapped | ||
2195 | * to. This register specifies bits 35:32 of the 36-bit value. Bits[3:0] are | ||
2196 | * for client 0; bits [35:32] are for client 8. For clients that are not | ||
2197 | * subject to WFQ credit blocking - their specifications here are not used. | ||
2198 | * This is a new register (with 2_) added in E3 B0 to accommodate the 9 | ||
2199 | * input clients to ETS arbiter. The reset default is set for management and | ||
2200 | * debug to use credit registers 6, 7, and 8, respectively, and COSes 0-5 to | ||
2201 | * use credit registers 0-5 respectively (0x543210876). Note that credit | ||
2202 | * registers can not be shared between clients. Note also that there are | ||
2203 | * only COS0-2 in port 1- there is a total of 6 clients in port 1. Only | ||
2204 | * credit registers 0-5 are valid. This register should be configured | ||
2205 | * appropriately before enabling WFQ. */ | ||
2206 | #define NIG_REG_P1_TX_ARB_CLIENT_CREDIT_MAP2_MSB 0x186ec | ||
2207 | /* [RW 9] Specify whether the client competes directly in the strict | ||
2208 | * priority arbiter. The bits are mapped according to client ID (client IDs | ||
2209 | * are defined in tx_arb_priority_client2): 0-management; 1-debug traffic | ||
2210 | * from this port; 2-debug traffic from other port; 3-COS0 traffic; 4-COS1 | ||
2211 | * traffic; 5-COS2 traffic; 6-COS3 traffic; 7-COS4 traffic; 8-COS5 traffic. | ||
2212 | * Default value is set to enable strict priorities for all clients. */ | ||
2213 | #define NIG_REG_P1_TX_ARB_CLIENT_IS_STRICT 0x18234 | ||
2214 | /* [RW 9] Specify whether the client is subject to WFQ credit blocking. The | ||
2215 | * bits are mapped according to client ID (client IDs are defined in | ||
2216 | * tx_arb_priority_client2): 0-management; 1-debug traffic from this port; | ||
2217 | * 2-debug traffic from other port; 3-COS0 traffic; 4-COS1 traffic; 5-COS2 | ||
2218 | * traffic; 6-COS3 traffic; 7-COS4 traffic; 8-COS5 traffic. Default value is | ||
2219 | * 0 for not using WFQ credit blocking. */ | ||
2220 | #define NIG_REG_P1_TX_ARB_CLIENT_IS_SUBJECT2WFQ 0x18238 | ||
2221 | #define NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_0 0x18258 | ||
2222 | #define NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_1 0x1825c | ||
2223 | #define NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_2 0x18260 | ||
2224 | #define NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_3 0x18264 | ||
2225 | #define NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_4 0x18268 | ||
2226 | #define NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_5 0x186f4 | ||
2227 | /* [RW 32] Specify the weight (in bytes) to be added to credit register 0 | ||
2228 | * when it is time to increment. */ | ||
2229 | #define NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_0 0x18244 | ||
2230 | #define NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_1 0x18248 | ||
2231 | #define NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_2 0x1824c | ||
2232 | #define NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_3 0x18250 | ||
2233 | #define NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_4 0x18254 | ||
2234 | #define NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_5 0x186f0 | ||
2235 | /* [RW 12] Specify the number of strict priority arbitration slots between | ||
2236 | two round-robin arbitration slots to avoid starvation. A value of 0 means | ||
2237 | no strict priority cycles - the strict priority with anti-starvation | ||
2238 | arbiter becomes a round-robin arbiter. */ | ||
2239 | #define NIG_REG_P1_TX_ARB_NUM_STRICT_ARB_SLOTS 0x18240 | ||
2240 | /* [RW 32] Specify the client number to be assigned to each priority of the | ||
2241 | strict priority arbiter. This register specifies bits 31:0 of the 36-bit | ||
2242 | value. Priority 0 is the highest priority. Bits [3:0] are for priority 0 | ||
2243 | client; bits [35-32] are for priority 8 client. The clients are assigned | ||
2244 | the following IDs: 0-management; 1-debug traffic from this port; 2-debug | ||
2245 | traffic from other port; 3-COS0 traffic; 4-COS1 traffic; 5-COS2 traffic; | ||
2246 | 6-COS3 traffic; 7-COS4 traffic; 8-COS5 traffic. The reset value[35:0] is | ||
2247 | set to 0x345678021. This is a new register (with 2_) added in E3 B0 to | ||
2248 | accommodate the 9 input clients to ETS arbiter. Note that this register | ||
2249 | is the same as the one for port 0, except that port 1 only has COS 0-2 | ||
2250 | traffic. There is no traffic for COS 3-5 of port 1. */ | ||
2251 | #define NIG_REG_P1_TX_ARB_PRIORITY_CLIENT2_LSB 0x186e0 | ||
2252 | /* [RW 4] Specify the client number to be assigned to each priority of the | ||
2253 | strict priority arbiter. This register specifies bits 35:32 of the 36-bit | ||
2254 | value. Priority 0 is the highest priority. Bits [3:0] are for priority 0 | ||
2255 | client; bits [35-32] are for priority 8 client. The clients are assigned | ||
2256 | the following IDs: 0-management; 1-debug traffic from this port; 2-debug | ||
2257 | traffic from other port; 3-COS0 traffic; 4-COS1 traffic; 5-COS2 traffic; | ||
2258 | 6-COS3 traffic; 7-COS4 traffic; 8-COS5 traffic. The reset value[35:0] is | ||
2259 | set to 0x345678021. This is a new register (with 2_) added in E3 B0 to | ||
2260 | accommodate the 9 input clients to ETS arbiter. Note that this register | ||
2261 | is the same as the one for port 0, except that port 1 only has COS 0-2 | ||
2262 | traffic. There is no traffic for COS 3-5 of port 1. */ | ||
2263 | #define NIG_REG_P1_TX_ARB_PRIORITY_CLIENT2_MSB 0x186e4 | ||
2264 | /* [R 1] TX FIFO for transmitting data to MAC is empty. */ | ||
2265 | #define NIG_REG_P1_TX_MACFIFO_EMPTY 0x18594 | ||
2266 | /* [R 1] FIFO empty status of the MCP TX FIFO used for storing MCP packets | ||
2267 | forwarded to the host. */ | ||
2268 | #define NIG_REG_P1_TX_MNG_HOST_FIFO_EMPTY 0x182b8 | ||
2269 | /* [RW 32] Specify the upper bound that credit register 0 is allowed to | ||
2270 | * reach. */ | ||
1946 | /* [RW 1] Pause enable for port0. This register may get 1 only when | 2271 | /* [RW 1] Pause enable for port0. This register may get 1 only when |
1947 | ~safc_enable.safc_enable = 0 and ppp_enable.ppp_enable =0 for the same | 2272 | ~safc_enable.safc_enable = 0 and ppp_enable.ppp_enable =0 for the same |
1948 | port */ | 2273 | port */ |
@@ -2026,12 +2351,45 @@ | |||
2026 | #define NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_LINK_STATUS_SIZE 18 | 2351 | #define NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_LINK_STATUS_SIZE 18 |
2027 | /* [RW 31] The upper bound of the weight of COS0 in the ETS command arbiter. */ | 2352 | /* [RW 31] The upper bound of the weight of COS0 in the ETS command arbiter. */ |
2028 | #define PBF_REG_COS0_UPPER_BOUND 0x15c05c | 2353 | #define PBF_REG_COS0_UPPER_BOUND 0x15c05c |
2354 | /* [RW 31] The upper bound of the weight of COS0 in the ETS command arbiter | ||
2355 | * of port 0. */ | ||
2356 | #define PBF_REG_COS0_UPPER_BOUND_P0 0x15c2cc | ||
2357 | /* [RW 31] The upper bound of the weight of COS0 in the ETS command arbiter | ||
2358 | * of port 1. */ | ||
2359 | #define PBF_REG_COS0_UPPER_BOUND_P1 0x15c2e4 | ||
2029 | /* [RW 31] The weight of COS0 in the ETS command arbiter. */ | 2360 | /* [RW 31] The weight of COS0 in the ETS command arbiter. */ |
2030 | #define PBF_REG_COS0_WEIGHT 0x15c054 | 2361 | #define PBF_REG_COS0_WEIGHT 0x15c054 |
2362 | /* [RW 31] The weight of COS0 in port 0 ETS command arbiter. */ | ||
2363 | #define PBF_REG_COS0_WEIGHT_P0 0x15c2a8 | ||
2364 | /* [RW 31] The weight of COS0 in port 1 ETS command arbiter. */ | ||
2365 | #define PBF_REG_COS0_WEIGHT_P1 0x15c2c0 | ||
2031 | /* [RW 31] The upper bound of the weight of COS1 in the ETS command arbiter. */ | 2366 | /* [RW 31] The upper bound of the weight of COS1 in the ETS command arbiter. */ |
2032 | #define PBF_REG_COS1_UPPER_BOUND 0x15c060 | 2367 | #define PBF_REG_COS1_UPPER_BOUND 0x15c060 |
2033 | /* [RW 31] The weight of COS1 in the ETS command arbiter. */ | 2368 | /* [RW 31] The weight of COS1 in the ETS command arbiter. */ |
2034 | #define PBF_REG_COS1_WEIGHT 0x15c058 | 2369 | #define PBF_REG_COS1_WEIGHT 0x15c058 |
2370 | /* [RW 31] The weight of COS1 in port 0 ETS command arbiter. */ | ||
2371 | #define PBF_REG_COS1_WEIGHT_P0 0x15c2ac | ||
2372 | /* [RW 31] The weight of COS1 in port 1 ETS command arbiter. */ | ||
2373 | #define PBF_REG_COS1_WEIGHT_P1 0x15c2c4 | ||
2374 | /* [RW 31] The weight of COS2 in port 0 ETS command arbiter. */ | ||
2375 | #define PBF_REG_COS2_WEIGHT_P0 0x15c2b0 | ||
2376 | /* [RW 31] The weight of COS2 in port 1 ETS command arbiter. */ | ||
2377 | #define PBF_REG_COS2_WEIGHT_P1 0x15c2c8 | ||
2378 | /* [RW 31] The weight of COS3 in port 0 ETS command arbiter. */ | ||
2379 | #define PBF_REG_COS3_WEIGHT_P0 0x15c2b4 | ||
2380 | /* [RW 31] The weight of COS4 in port 0 ETS command arbiter. */ | ||
2381 | #define PBF_REG_COS4_WEIGHT_P0 0x15c2b8 | ||
2382 | /* [RW 31] The weight of COS5 in port 0 ETS command arbiter. */ | ||
2383 | #define PBF_REG_COS5_WEIGHT_P0 0x15c2bc | ||
2384 | /* [R 11] Current credit for the LB queue in the tx port buffers in 16 byte | ||
2385 | * lines. */ | ||
2386 | #define PBF_REG_CREDIT_LB_Q 0x140338 | ||
2387 | /* [R 11] Current credit for queue 0 in the tx port buffers in 16 byte | ||
2388 | * lines. */ | ||
2389 | #define PBF_REG_CREDIT_Q0 0x14033c | ||
2390 | /* [R 11] Current credit for queue 1 in the tx port buffers in 16 byte | ||
2391 | * lines. */ | ||
2392 | #define PBF_REG_CREDIT_Q1 0x140340 | ||
2035 | /* [RW 1] Disable processing further tasks from port 0 (after ending the | 2393 | /* [RW 1] Disable processing further tasks from port 0 (after ending the |
2036 | current task in process). */ | 2394 | current task in process). */ |
2037 | #define PBF_REG_DISABLE_NEW_TASK_PROC_P0 0x14005c | 2395 | #define PBF_REG_DISABLE_NEW_TASK_PROC_P0 0x14005c |
@@ -2042,6 +2400,52 @@ | |||
2042 | current task in process). */ | 2400 | current task in process). */ |
2043 | #define PBF_REG_DISABLE_NEW_TASK_PROC_P4 0x14006c | 2401 | #define PBF_REG_DISABLE_NEW_TASK_PROC_P4 0x14006c |
2044 | #define PBF_REG_DISABLE_PF 0x1402e8 | 2402 | #define PBF_REG_DISABLE_PF 0x1402e8 |
2403 | /* [RW 18] For port 0: For each client that is subject to WFQ (the | ||
2404 | * corresponding bit is 1); indicates to which of the credit registers this | ||
2405 | * client is mapped. For clients which are not credit blocked; their mapping | ||
2406 | * is dont care. */ | ||
2407 | #define PBF_REG_ETS_ARB_CLIENT_CREDIT_MAP_P0 0x15c288 | ||
2408 | /* [RW 9] For port 1: For each client that is subject to WFQ (the | ||
2409 | * corresponding bit is 1); indicates to which of the credit registers this | ||
2410 | * client is mapped. For clients which are not credit blocked; their mapping | ||
2411 | * is dont care. */ | ||
2412 | #define PBF_REG_ETS_ARB_CLIENT_CREDIT_MAP_P1 0x15c28c | ||
2413 | /* [RW 6] For port 0: Bit per client to indicate if the client competes in | ||
2414 | * the strict priority arbiter directly (corresponding bit = 1); or first | ||
2415 | * goes to the RR arbiter (corresponding bit = 0); and then competes in the | ||
2416 | * lowest priority in the strict-priority arbiter. */ | ||
2417 | #define PBF_REG_ETS_ARB_CLIENT_IS_STRICT_P0 0x15c278 | ||
2418 | /* [RW 3] For port 1: Bit per client to indicate if the client competes in | ||
2419 | * the strict priority arbiter directly (corresponding bit = 1); or first | ||
2420 | * goes to the RR arbiter (corresponding bit = 0); and then competes in the | ||
2421 | * lowest priority in the strict-priority arbiter. */ | ||
2422 | #define PBF_REG_ETS_ARB_CLIENT_IS_STRICT_P1 0x15c27c | ||
2423 | /* [RW 6] For port 0: Bit per client to indicate if the client is subject to | ||
2424 | * WFQ credit blocking (corresponding bit = 1). */ | ||
2425 | #define PBF_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ_P0 0x15c280 | ||
2426 | /* [RW 3] For port 0: Bit per client to indicate if the client is subject to | ||
2427 | * WFQ credit blocking (corresponding bit = 1). */ | ||
2428 | #define PBF_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ_P1 0x15c284 | ||
2429 | /* [RW 16] For port 0: The number of strict priority arbitration slots | ||
2430 | * between 2 RR arbitration slots. A value of 0 means no strict priority | ||
2431 | * cycles; i.e. the strict-priority w/ anti-starvation arbiter is a RR | ||
2432 | * arbiter. */ | ||
2433 | #define PBF_REG_ETS_ARB_NUM_STRICT_ARB_SLOTS_P0 0x15c2a0 | ||
2434 | /* [RW 16] For port 1: The number of strict priority arbitration slots | ||
2435 | * between 2 RR arbitration slots. A value of 0 means no strict priority | ||
2436 | * cycles; i.e. the strict-priority w/ anti-starvation arbiter is a RR | ||
2437 | * arbiter. */ | ||
2438 | #define PBF_REG_ETS_ARB_NUM_STRICT_ARB_SLOTS_P1 0x15c2a4 | ||
2439 | /* [RW 18] For port 0: Indicates which client is connected to each priority | ||
2440 | * in the strict-priority arbiter. Priority 0 is the highest priority, and | ||
2441 | * priority 5 is the lowest; to which the RR output is connected to (this is | ||
2442 | * not configurable). */ | ||
2443 | #define PBF_REG_ETS_ARB_PRIORITY_CLIENT_P0 0x15c270 | ||
2444 | /* [RW 9] For port 1: Indicates which client is connected to each priority | ||
2445 | * in the strict-priority arbiter. Priority 0 is the highest priority, and | ||
2446 | * priority 5 is the lowest; to which the RR output is connected to (this is | ||
2447 | * not configurable). */ | ||
2448 | #define PBF_REG_ETS_ARB_PRIORITY_CLIENT_P1 0x15c274 | ||
2045 | /* [RW 1] Indicates that ETS is performed between the COSes in the command | 2449 | /* [RW 1] Indicates that ETS is performed between the COSes in the command |
2046 | * arbiter. If reset strict priority w/ anti-starvation will be performed | 2450 | * arbiter. If reset strict priority w/ anti-starvation will be performed |
2047 | * w/o WFQ. */ | 2451 | * w/o WFQ. */ |
@@ -2049,14 +2453,25 @@ | |||
2049 | /* [RW 6] Bit-map indicating which L2 hdrs may appear after the basic | 2453 | /* [RW 6] Bit-map indicating which L2 hdrs may appear after the basic |
2050 | * Ethernet header. */ | 2454 | * Ethernet header. */ |
2051 | #define PBF_REG_HDRS_AFTER_BASIC 0x15c0a8 | 2455 | #define PBF_REG_HDRS_AFTER_BASIC 0x15c0a8 |
2052 | /* [RW 1] Indicates which COS is conncted to the highest priority in the | 2456 | /* [RW 6] Bit-map indicating which L2 hdrs may appear after L2 tag 0 */ |
2053 | * command arbiter. */ | 2457 | #define PBF_REG_HDRS_AFTER_TAG_0 0x15c0b8 |
2458 | /* [R 1] Removed for E3 B0 - Indicates which COS is conncted to the highest | ||
2459 | * priority in the command arbiter. */ | ||
2054 | #define PBF_REG_HIGH_PRIORITY_COS_NUM 0x15c04c | 2460 | #define PBF_REG_HIGH_PRIORITY_COS_NUM 0x15c04c |
2055 | #define PBF_REG_IF_ENABLE_REG 0x140044 | 2461 | #define PBF_REG_IF_ENABLE_REG 0x140044 |
2056 | /* [RW 1] Init bit. When set the initial credits are copied to the credit | 2462 | /* [RW 1] Init bit. When set the initial credits are copied to the credit |
2057 | registers (except the port credits). Should be set and then reset after | 2463 | registers (except the port credits). Should be set and then reset after |
2058 | the configuration of the block has ended. */ | 2464 | the configuration of the block has ended. */ |
2059 | #define PBF_REG_INIT 0x140000 | 2465 | #define PBF_REG_INIT 0x140000 |
2466 | /* [RW 11] Initial credit for the LB queue in the tx port buffers in 16 byte | ||
2467 | * lines. */ | ||
2468 | #define PBF_REG_INIT_CRD_LB_Q 0x15c248 | ||
2469 | /* [RW 11] Initial credit for queue 0 in the tx port buffers in 16 byte | ||
2470 | * lines. */ | ||
2471 | #define PBF_REG_INIT_CRD_Q0 0x15c230 | ||
2472 | /* [RW 11] Initial credit for queue 1 in the tx port buffers in 16 byte | ||
2473 | * lines. */ | ||
2474 | #define PBF_REG_INIT_CRD_Q1 0x15c234 | ||
2060 | /* [RW 1] Init bit for port 0. When set the initial credit of port 0 is | 2475 | /* [RW 1] Init bit for port 0. When set the initial credit of port 0 is |
2061 | copied to the credit register. Should be set and then reset after the | 2476 | copied to the credit register. Should be set and then reset after the |
2062 | configuration of the port has ended. */ | 2477 | configuration of the port has ended. */ |
@@ -2069,6 +2484,15 @@ | |||
2069 | copied to the credit register. Should be set and then reset after the | 2484 | copied to the credit register. Should be set and then reset after the |
2070 | configuration of the port has ended. */ | 2485 | configuration of the port has ended. */ |
2071 | #define PBF_REG_INIT_P4 0x14000c | 2486 | #define PBF_REG_INIT_P4 0x14000c |
2487 | /* [R 32] Cyclic counter for the amount credits in 16 bytes lines added for | ||
2488 | * the LB queue. Reset upon init. */ | ||
2489 | #define PBF_REG_INTERNAL_CRD_FREED_CNT_LB_Q 0x140354 | ||
2490 | /* [R 32] Cyclic counter for the amount credits in 16 bytes lines added for | ||
2491 | * queue 0. Reset upon init. */ | ||
2492 | #define PBF_REG_INTERNAL_CRD_FREED_CNT_Q0 0x140358 | ||
2493 | /* [R 32] Cyclic counter for the amount credits in 16 bytes lines added for | ||
2494 | * queue 1. Reset upon init. */ | ||
2495 | #define PBF_REG_INTERNAL_CRD_FREED_CNT_Q1 0x14035c | ||
2072 | /* [RW 1] Enable for mac interface 0. */ | 2496 | /* [RW 1] Enable for mac interface 0. */ |
2073 | #define PBF_REG_MAC_IF0_ENABLE 0x140030 | 2497 | #define PBF_REG_MAC_IF0_ENABLE 0x140030 |
2074 | /* [RW 1] Enable for mac interface 1. */ | 2498 | /* [RW 1] Enable for mac interface 1. */ |
@@ -2089,24 +2513,49 @@ | |||
2089 | /* [RW 11] Initial credit for port 0 in the tx port buffers in 16 byte | 2513 | /* [RW 11] Initial credit for port 0 in the tx port buffers in 16 byte |
2090 | lines. */ | 2514 | lines. */ |
2091 | #define PBF_REG_P0_INIT_CRD 0x1400d0 | 2515 | #define PBF_REG_P0_INIT_CRD 0x1400d0 |
2092 | /* [RW 1] Indication that pause is enabled for port 0. */ | 2516 | /* [R 32] Cyclic counter for the amount credits in 16 bytes lines added for |
2093 | #define PBF_REG_P0_PAUSE_ENABLE 0x140014 | 2517 | * port 0. Reset upon init. */ |
2094 | /* [R 8] Number of tasks in port 0 task queue. */ | 2518 | #define PBF_REG_P0_INTERNAL_CRD_FREED_CNT 0x140308 |
2519 | /* [R 1] Removed for E3 B0 - Indication that pause is enabled for port 0. */ | ||
2520 | #define PBF_REG_P0_PAUSE_ENABLE 0x140014 | ||
2521 | /* [R 8] Removed for E3 B0 - Number of tasks in port 0 task queue. */ | ||
2095 | #define PBF_REG_P0_TASK_CNT 0x140204 | 2522 | #define PBF_REG_P0_TASK_CNT 0x140204 |
2096 | /* [R 11] Current credit for port 1 in the tx port buffers in 16 byte lines. */ | 2523 | /* [R 32] Removed for E3 B0 - Cyclic counter for number of 8 byte lines |
2524 | * freed from the task queue of port 0. Reset upon init. */ | ||
2525 | #define PBF_REG_P0_TQ_LINES_FREED_CNT 0x1402f0 | ||
2526 | /* [R 12] Number of 8 bytes lines occupied in the task queue of port 0. */ | ||
2527 | #define PBF_REG_P0_TQ_OCCUPANCY 0x1402fc | ||
2528 | /* [R 11] Removed for E3 B0 - Current credit for port 1 in the tx port | ||
2529 | * buffers in 16 byte lines. */ | ||
2097 | #define PBF_REG_P1_CREDIT 0x140208 | 2530 | #define PBF_REG_P1_CREDIT 0x140208 |
2098 | /* [RW 11] Initial credit for port 1 in the tx port buffers in 16 byte | 2531 | /* [R 11] Removed for E3 B0 - Initial credit for port 0 in the tx port |
2099 | lines. */ | 2532 | * buffers in 16 byte lines. */ |
2100 | #define PBF_REG_P1_INIT_CRD 0x1400d4 | 2533 | #define PBF_REG_P1_INIT_CRD 0x1400d4 |
2101 | /* [R 8] Number of tasks in port 1 task queue. */ | 2534 | /* [R 32] Cyclic counter for the amount credits in 16 bytes lines added for |
2535 | * port 1. Reset upon init. */ | ||
2536 | #define PBF_REG_P1_INTERNAL_CRD_FREED_CNT 0x14030c | ||
2537 | /* [R 8] Removed for E3 B0 - Number of tasks in port 1 task queue. */ | ||
2102 | #define PBF_REG_P1_TASK_CNT 0x14020c | 2538 | #define PBF_REG_P1_TASK_CNT 0x14020c |
2539 | /* [R 32] Removed for E3 B0 - Cyclic counter for number of 8 byte lines | ||
2540 | * freed from the task queue of port 1. Reset upon init. */ | ||
2541 | #define PBF_REG_P1_TQ_LINES_FREED_CNT 0x1402f4 | ||
2542 | /* [R 12] Number of 8 bytes lines occupied in the task queue of port 1. */ | ||
2543 | #define PBF_REG_P1_TQ_OCCUPANCY 0x140300 | ||
2103 | /* [R 11] Current credit for port 4 in the tx port buffers in 16 byte lines. */ | 2544 | /* [R 11] Current credit for port 4 in the tx port buffers in 16 byte lines. */ |
2104 | #define PBF_REG_P4_CREDIT 0x140210 | 2545 | #define PBF_REG_P4_CREDIT 0x140210 |
2105 | /* [RW 11] Initial credit for port 4 in the tx port buffers in 16 byte | 2546 | /* [RW 11] Initial credit for port 4 in the tx port buffers in 16 byte |
2106 | lines. */ | 2547 | lines. */ |
2107 | #define PBF_REG_P4_INIT_CRD 0x1400e0 | 2548 | #define PBF_REG_P4_INIT_CRD 0x1400e0 |
2108 | /* [R 8] Number of tasks in port 4 task queue. */ | 2549 | /* [R 32] Cyclic counter for the amount credits in 16 bytes lines added for |
2550 | * port 4. Reset upon init. */ | ||
2551 | #define PBF_REG_P4_INTERNAL_CRD_FREED_CNT 0x140310 | ||
2552 | /* [R 8] Removed for E3 B0 - Number of tasks in port 4 task queue. */ | ||
2109 | #define PBF_REG_P4_TASK_CNT 0x140214 | 2553 | #define PBF_REG_P4_TASK_CNT 0x140214 |
2554 | /* [R 32] Removed for E3 B0 - Cyclic counter for number of 8 byte lines | ||
2555 | * freed from the task queue of port 4. Reset upon init. */ | ||
2556 | #define PBF_REG_P4_TQ_LINES_FREED_CNT 0x1402f8 | ||
2557 | /* [R 12] Number of 8 bytes lines occupied in the task queue of port 4. */ | ||
2558 | #define PBF_REG_P4_TQ_OCCUPANCY 0x140304 | ||
2110 | /* [RW 5] Interrupt mask register #0 read/write */ | 2559 | /* [RW 5] Interrupt mask register #0 read/write */ |
2111 | #define PBF_REG_PBF_INT_MASK 0x1401d4 | 2560 | #define PBF_REG_PBF_INT_MASK 0x1401d4 |
2112 | /* [R 5] Interrupt register #0 read */ | 2561 | /* [R 5] Interrupt register #0 read */ |
@@ -2115,6 +2564,27 @@ | |||
2115 | #define PBF_REG_PBF_PRTY_MASK 0x1401e4 | 2564 | #define PBF_REG_PBF_PRTY_MASK 0x1401e4 |
2116 | /* [RC 20] Parity register #0 read clear */ | 2565 | /* [RC 20] Parity register #0 read clear */ |
2117 | #define PBF_REG_PBF_PRTY_STS_CLR 0x1401dc | 2566 | #define PBF_REG_PBF_PRTY_STS_CLR 0x1401dc |
2567 | /* [RW 16] The Ethernet type value for L2 tag 0 */ | ||
2568 | #define PBF_REG_TAG_ETHERTYPE_0 0x15c090 | ||
2569 | /* [RW 4] The length of the info field for L2 tag 0. The length is between | ||
2570 | * 2B and 14B; in 2B granularity */ | ||
2571 | #define PBF_REG_TAG_LEN_0 0x15c09c | ||
2572 | /* [R 32] Cyclic counter for number of 8 byte lines freed from the LB task | ||
2573 | * queue. Reset upon init. */ | ||
2574 | #define PBF_REG_TQ_LINES_FREED_CNT_LB_Q 0x14038c | ||
2575 | /* [R 32] Cyclic counter for number of 8 byte lines freed from the task | ||
2576 | * queue 0. Reset upon init. */ | ||
2577 | #define PBF_REG_TQ_LINES_FREED_CNT_Q0 0x140390 | ||
2578 | /* [R 32] Cyclic counter for number of 8 byte lines freed from task queue 1. | ||
2579 | * Reset upon init. */ | ||
2580 | #define PBF_REG_TQ_LINES_FREED_CNT_Q1 0x140394 | ||
2581 | /* [R 13] Number of 8 bytes lines occupied in the task queue of the LB | ||
2582 | * queue. */ | ||
2583 | #define PBF_REG_TQ_OCCUPANCY_LB_Q 0x1403a8 | ||
2584 | /* [R 13] Number of 8 bytes lines occupied in the task queue of queue 0. */ | ||
2585 | #define PBF_REG_TQ_OCCUPANCY_Q0 0x1403ac | ||
2586 | /* [R 13] Number of 8 bytes lines occupied in the task queue of queue 1. */ | ||
2587 | #define PBF_REG_TQ_OCCUPANCY_Q1 0x1403b0 | ||
2118 | #define PB_REG_CONTROL 0 | 2588 | #define PB_REG_CONTROL 0 |
2119 | /* [RW 2] Interrupt mask register #0 read/write */ | 2589 | /* [RW 2] Interrupt mask register #0 read/write */ |
2120 | #define PB_REG_PB_INT_MASK 0x28 | 2590 | #define PB_REG_PB_INT_MASK 0x28 |
@@ -2444,10 +2914,24 @@ | |||
2444 | /* [RW 6] Bit-map indicating which L2 hdrs may appear after the basic | 2914 | /* [RW 6] Bit-map indicating which L2 hdrs may appear after the basic |
2445 | * Ethernet header. */ | 2915 | * Ethernet header. */ |
2446 | #define PRS_REG_HDRS_AFTER_BASIC 0x40238 | 2916 | #define PRS_REG_HDRS_AFTER_BASIC 0x40238 |
2917 | /* [RW 6] Bit-map indicating which L2 hdrs may appear after the basic | ||
2918 | * Ethernet header for port 0 packets. */ | ||
2919 | #define PRS_REG_HDRS_AFTER_BASIC_PORT_0 0x40270 | ||
2920 | #define PRS_REG_HDRS_AFTER_BASIC_PORT_1 0x40290 | ||
2921 | /* [R 6] Bit-map indicating which L2 hdrs may appear after L2 tag 0 */ | ||
2922 | #define PRS_REG_HDRS_AFTER_TAG_0 0x40248 | ||
2923 | /* [RW 6] Bit-map indicating which L2 hdrs may appear after L2 tag 0 for | ||
2924 | * port 0 packets */ | ||
2925 | #define PRS_REG_HDRS_AFTER_TAG_0_PORT_0 0x40280 | ||
2926 | #define PRS_REG_HDRS_AFTER_TAG_0_PORT_1 0x402a0 | ||
2447 | /* [RW 4] The increment value to send in the CFC load request message */ | 2927 | /* [RW 4] The increment value to send in the CFC load request message */ |
2448 | #define PRS_REG_INC_VALUE 0x40048 | 2928 | #define PRS_REG_INC_VALUE 0x40048 |
2449 | /* [RW 6] Bit-map indicating which headers must appear in the packet */ | 2929 | /* [RW 6] Bit-map indicating which headers must appear in the packet */ |
2450 | #define PRS_REG_MUST_HAVE_HDRS 0x40254 | 2930 | #define PRS_REG_MUST_HAVE_HDRS 0x40254 |
2931 | /* [RW 6] Bit-map indicating which headers must appear in the packet for | ||
2932 | * port 0 packets */ | ||
2933 | #define PRS_REG_MUST_HAVE_HDRS_PORT_0 0x4028c | ||
2934 | #define PRS_REG_MUST_HAVE_HDRS_PORT_1 0x402ac | ||
2451 | #define PRS_REG_NIC_MODE 0x40138 | 2935 | #define PRS_REG_NIC_MODE 0x40138 |
2452 | /* [RW 8] The 8-bit event ID for cases where there is no match on the | 2936 | /* [RW 8] The 8-bit event ID for cases where there is no match on the |
2453 | connection. Used in packet start message to TCM. */ | 2937 | connection. Used in packet start message to TCM. */ |
@@ -2496,6 +2980,11 @@ | |||
2496 | #define PRS_REG_SERIAL_NUM_STATUS_MSB 0x40158 | 2980 | #define PRS_REG_SERIAL_NUM_STATUS_MSB 0x40158 |
2497 | /* [R 4] debug only: SRC current credit. Transaction based. */ | 2981 | /* [R 4] debug only: SRC current credit. Transaction based. */ |
2498 | #define PRS_REG_SRC_CURRENT_CREDIT 0x4016c | 2982 | #define PRS_REG_SRC_CURRENT_CREDIT 0x4016c |
2983 | /* [RW 16] The Ethernet type value for L2 tag 0 */ | ||
2984 | #define PRS_REG_TAG_ETHERTYPE_0 0x401d4 | ||
2985 | /* [RW 4] The length of the info field for L2 tag 0. The length is between | ||
2986 | * 2B and 14B; in 2B granularity */ | ||
2987 | #define PRS_REG_TAG_LEN_0 0x4022c | ||
2499 | /* [R 8] debug only: TCM current credit. Cycle based. */ | 2988 | /* [R 8] debug only: TCM current credit. Cycle based. */ |
2500 | #define PRS_REG_TCM_CURRENT_CREDIT 0x40160 | 2989 | #define PRS_REG_TCM_CURRENT_CREDIT 0x40160 |
2501 | /* [R 8] debug only: TSDM current credit. Transaction based. */ | 2990 | /* [R 8] debug only: TSDM current credit. Transaction based. */ |
@@ -3080,6 +3569,7 @@ | |||
3080 | #define QM_REG_BYTECREDITAFULLTHR 0x168094 | 3569 | #define QM_REG_BYTECREDITAFULLTHR 0x168094 |
3081 | /* [RW 4] The initial credit for interface */ | 3570 | /* [RW 4] The initial credit for interface */ |
3082 | #define QM_REG_CMINITCRD_0 0x1680cc | 3571 | #define QM_REG_CMINITCRD_0 0x1680cc |
3572 | #define QM_REG_BYTECRDCMDQ_0 0x16e6e8 | ||
3083 | #define QM_REG_CMINITCRD_1 0x1680d0 | 3573 | #define QM_REG_CMINITCRD_1 0x1680d0 |
3084 | #define QM_REG_CMINITCRD_2 0x1680d4 | 3574 | #define QM_REG_CMINITCRD_2 0x1680d4 |
3085 | #define QM_REG_CMINITCRD_3 0x1680d8 | 3575 | #define QM_REG_CMINITCRD_3 0x1680d8 |
@@ -3170,7 +3660,10 @@ | |||
3170 | /* [RW 2] The PCI attributes field used in the PCI request. */ | 3660 | /* [RW 2] The PCI attributes field used in the PCI request. */ |
3171 | #define QM_REG_PCIREQAT 0x168054 | 3661 | #define QM_REG_PCIREQAT 0x168054 |
3172 | #define QM_REG_PF_EN 0x16e70c | 3662 | #define QM_REG_PF_EN 0x16e70c |
3173 | /* [R 16] The byte credit of port 0 */ | 3663 | /* [R 24] The number of tasks stored in the QM for the PF. only even |
3664 | * functions are valid in E2 (odd I registers will be hard wired to 0) */ | ||
3665 | #define QM_REG_PF_USG_CNT_0 0x16e040 | ||
3666 | /* [R 16] NOT USED */ | ||
3174 | #define QM_REG_PORT0BYTECRD 0x168300 | 3667 | #define QM_REG_PORT0BYTECRD 0x168300 |
3175 | /* [R 16] The byte credit of port 1 */ | 3668 | /* [R 16] The byte credit of port 1 */ |
3176 | #define QM_REG_PORT1BYTECRD 0x168304 | 3669 | #define QM_REG_PORT1BYTECRD 0x168304 |
@@ -3782,6 +4275,8 @@ | |||
3782 | #define TM_REG_LIN0_LOGIC_ADDR 0x164240 | 4275 | #define TM_REG_LIN0_LOGIC_ADDR 0x164240 |
3783 | /* [RW 18] Linear0 Max active cid (in banks of 32 entries). */ | 4276 | /* [RW 18] Linear0 Max active cid (in banks of 32 entries). */ |
3784 | #define TM_REG_LIN0_MAX_ACTIVE_CID 0x164048 | 4277 | #define TM_REG_LIN0_MAX_ACTIVE_CID 0x164048 |
4278 | /* [ST 16] Linear0 Number of scans counter. */ | ||
4279 | #define TM_REG_LIN0_NUM_SCANS 0x1640a0 | ||
3785 | /* [WB 64] Linear0 phy address. */ | 4280 | /* [WB 64] Linear0 phy address. */ |
3786 | #define TM_REG_LIN0_PHY_ADDR 0x164270 | 4281 | #define TM_REG_LIN0_PHY_ADDR 0x164270 |
3787 | /* [RW 1] Linear0 physical address valid. */ | 4282 | /* [RW 1] Linear0 physical address valid. */ |
@@ -3789,6 +4284,7 @@ | |||
3789 | #define TM_REG_LIN0_SCAN_ON 0x1640d0 | 4284 | #define TM_REG_LIN0_SCAN_ON 0x1640d0 |
3790 | /* [RW 24] Linear0 array scan timeout. */ | 4285 | /* [RW 24] Linear0 array scan timeout. */ |
3791 | #define TM_REG_LIN0_SCAN_TIME 0x16403c | 4286 | #define TM_REG_LIN0_SCAN_TIME 0x16403c |
4287 | #define TM_REG_LIN0_VNIC_UC 0x164128 | ||
3792 | /* [RW 32] Linear1 logic address. */ | 4288 | /* [RW 32] Linear1 logic address. */ |
3793 | #define TM_REG_LIN1_LOGIC_ADDR 0x164250 | 4289 | #define TM_REG_LIN1_LOGIC_ADDR 0x164250 |
3794 | /* [WB 64] Linear1 phy address. */ | 4290 | /* [WB 64] Linear1 phy address. */ |
@@ -4175,6 +4671,8 @@ | |||
4175 | #define UCM_REG_UCM_INT_MASK 0xe01d4 | 4671 | #define UCM_REG_UCM_INT_MASK 0xe01d4 |
4176 | /* [R 11] Interrupt register #0 read */ | 4672 | /* [R 11] Interrupt register #0 read */ |
4177 | #define UCM_REG_UCM_INT_STS 0xe01c8 | 4673 | #define UCM_REG_UCM_INT_STS 0xe01c8 |
4674 | /* [RW 27] Parity mask register #0 read/write */ | ||
4675 | #define UCM_REG_UCM_PRTY_MASK 0xe01e4 | ||
4178 | /* [R 27] Parity register #0 read */ | 4676 | /* [R 27] Parity register #0 read */ |
4179 | #define UCM_REG_UCM_PRTY_STS 0xe01d8 | 4677 | #define UCM_REG_UCM_PRTY_STS 0xe01d8 |
4180 | /* [RC 27] Parity register #0 read clear */ | 4678 | /* [RC 27] Parity register #0 read clear */ |
@@ -4265,6 +4763,23 @@ | |||
4265 | The fields are: [4:0] - tail pointer; 10:5] - Link List size; 15:11] - | 4763 | The fields are: [4:0] - tail pointer; 10:5] - Link List size; 15:11] - |
4266 | header pointer. */ | 4764 | header pointer. */ |
4267 | #define UCM_REG_XX_TABLE 0xe0300 | 4765 | #define UCM_REG_XX_TABLE 0xe0300 |
4766 | #define UMAC_COMMAND_CONFIG_REG_LOOP_ENA (0x1<<15) | ||
4767 | #define UMAC_COMMAND_CONFIG_REG_NO_LGTH_CHECK (0x1<<24) | ||
4768 | #define UMAC_COMMAND_CONFIG_REG_PAD_EN (0x1<<5) | ||
4769 | #define UMAC_COMMAND_CONFIG_REG_PROMIS_EN (0x1<<4) | ||
4770 | #define UMAC_COMMAND_CONFIG_REG_RX_ENA (0x1<<1) | ||
4771 | #define UMAC_COMMAND_CONFIG_REG_SW_RESET (0x1<<13) | ||
4772 | #define UMAC_COMMAND_CONFIG_REG_TX_ENA (0x1<<0) | ||
4773 | #define UMAC_REG_COMMAND_CONFIG 0x8 | ||
4774 | /* [RW 32] Register Bit 0 refers to Bit 16 of the MAC address; Bit 1 refers | ||
4775 | * to bit 17 of the MAC address etc. */ | ||
4776 | #define UMAC_REG_MAC_ADDR0 0xc | ||
4777 | /* [RW 16] Register Bit 0 refers to Bit 0 of the MAC address; Register Bit 1 | ||
4778 | * refers to Bit 1 of the MAC address etc. Bits 16 to 31 are reserved. */ | ||
4779 | #define UMAC_REG_MAC_ADDR1 0x10 | ||
4780 | /* [RW 14] Defines a 14-Bit maximum frame length used by the MAC receive | ||
4781 | * logic to check frames. */ | ||
4782 | #define UMAC_REG_MAXFR 0x14 | ||
4268 | /* [RW 8] The event id for aggregated interrupt 0 */ | 4783 | /* [RW 8] The event id for aggregated interrupt 0 */ |
4269 | #define USDM_REG_AGG_INT_EVENT_0 0xc4038 | 4784 | #define USDM_REG_AGG_INT_EVENT_0 0xc4038 |
4270 | #define USDM_REG_AGG_INT_EVENT_1 0xc403c | 4785 | #define USDM_REG_AGG_INT_EVENT_1 0xc403c |
@@ -4696,8 +5211,13 @@ | |||
4696 | #define XCM_REG_XCM_INT_MASK 0x202b4 | 5211 | #define XCM_REG_XCM_INT_MASK 0x202b4 |
4697 | /* [R 14] Interrupt register #0 read */ | 5212 | /* [R 14] Interrupt register #0 read */ |
4698 | #define XCM_REG_XCM_INT_STS 0x202a8 | 5213 | #define XCM_REG_XCM_INT_STS 0x202a8 |
5214 | /* [RW 30] Parity mask register #0 read/write */ | ||
5215 | #define XCM_REG_XCM_PRTY_MASK 0x202c4 | ||
4699 | /* [R 30] Parity register #0 read */ | 5216 | /* [R 30] Parity register #0 read */ |
4700 | #define XCM_REG_XCM_PRTY_STS 0x202b8 | 5217 | #define XCM_REG_XCM_PRTY_STS 0x202b8 |
5218 | /* [RC 30] Parity register #0 read clear */ | ||
5219 | #define XCM_REG_XCM_PRTY_STS_CLR 0x202bc | ||
5220 | |||
4701 | /* [RW 4] The size of AG context region 0 in REG-pairs. Designates the MS | 5221 | /* [RW 4] The size of AG context region 0 in REG-pairs. Designates the MS |
4702 | REG-pair number (e.g. if region 0 is 6 REG-pairs; the value should be 5). | 5222 | REG-pair number (e.g. if region 0 is 6 REG-pairs; the value should be 5). |
4703 | Is used to determine the number of the AG context REG-pairs written back; | 5223 | Is used to determine the number of the AG context REG-pairs written back; |
@@ -4772,6 +5292,34 @@ | |||
4772 | #define XCM_REG_XX_MSG_NUM 0x20428 | 5292 | #define XCM_REG_XX_MSG_NUM 0x20428 |
4773 | /* [RW 8] The Event ID; sent to the STORM in case of XX overflow. */ | 5293 | /* [RW 8] The Event ID; sent to the STORM in case of XX overflow. */ |
4774 | #define XCM_REG_XX_OVFL_EVNT_ID 0x20058 | 5294 | #define XCM_REG_XX_OVFL_EVNT_ID 0x20058 |
5295 | #define XMAC_CLEAR_RX_LSS_STATUS_REG_CLEAR_LOCAL_FAULT_STATUS (0x1<<0) | ||
5296 | #define XMAC_CLEAR_RX_LSS_STATUS_REG_CLEAR_REMOTE_FAULT_STATUS (0x1<<1) | ||
5297 | #define XMAC_CTRL_REG_CORE_LOCAL_LPBK (0x1<<3) | ||
5298 | #define XMAC_CTRL_REG_RX_EN (0x1<<1) | ||
5299 | #define XMAC_CTRL_REG_SOFT_RESET (0x1<<6) | ||
5300 | #define XMAC_CTRL_REG_TX_EN (0x1<<0) | ||
5301 | #define XMAC_PAUSE_CTRL_REG_RX_PAUSE_EN (0x1<<18) | ||
5302 | #define XMAC_PAUSE_CTRL_REG_TX_PAUSE_EN (0x1<<17) | ||
5303 | #define XMAC_PFC_CTRL_HI_REG_PFC_REFRESH_EN (0x1<<0) | ||
5304 | #define XMAC_PFC_CTRL_HI_REG_PFC_STATS_EN (0x1<<3) | ||
5305 | #define XMAC_PFC_CTRL_HI_REG_RX_PFC_EN (0x1<<4) | ||
5306 | #define XMAC_PFC_CTRL_HI_REG_TX_PFC_EN (0x1<<5) | ||
5307 | #define XMAC_REG_CLEAR_RX_LSS_STATUS 0x60 | ||
5308 | #define XMAC_REG_CTRL 0 | ||
5309 | /* [RW 16] Upper 48 bits of ctrl_sa register. Used as the SA in PAUSE/PFC | ||
5310 | * packets transmitted by the MAC */ | ||
5311 | #define XMAC_REG_CTRL_SA_HI 0x2c | ||
5312 | /* [RW 32] Lower 48 bits of ctrl_sa register. Used as the SA in PAUSE/PFC | ||
5313 | * packets transmitted by the MAC */ | ||
5314 | #define XMAC_REG_CTRL_SA_LO 0x28 | ||
5315 | #define XMAC_REG_PAUSE_CTRL 0x68 | ||
5316 | #define XMAC_REG_PFC_CTRL 0x70 | ||
5317 | #define XMAC_REG_PFC_CTRL_HI 0x74 | ||
5318 | #define XMAC_REG_RX_LSS_STATUS 0x58 | ||
5319 | /* [RW 14] Maximum packet size in receive direction; exclusive of preamble & | ||
5320 | * CRC in strip mode */ | ||
5321 | #define XMAC_REG_RX_MAX_SIZE 0x40 | ||
5322 | #define XMAC_REG_TX_CTRL 0x20 | ||
4775 | /* [RW 16] Indirect access to the XX table of the XX protection mechanism. | 5323 | /* [RW 16] Indirect access to the XX table of the XX protection mechanism. |
4776 | The fields are:[4:0] - tail pointer; 9:5] - Link List size; 14:10] - | 5324 | The fields are:[4:0] - tail pointer; 9:5] - Link List size; 14:10] - |
4777 | header pointer. */ | 5325 | header pointer. */ |
@@ -4846,6 +5394,10 @@ | |||
4846 | #define XSDM_REG_NUM_OF_Q9_CMD 0x166268 | 5394 | #define XSDM_REG_NUM_OF_Q9_CMD 0x166268 |
4847 | /* [RW 13] The start address in the internal RAM for queue counters */ | 5395 | /* [RW 13] The start address in the internal RAM for queue counters */ |
4848 | #define XSDM_REG_Q_COUNTER_START_ADDR 0x166010 | 5396 | #define XSDM_REG_Q_COUNTER_START_ADDR 0x166010 |
5397 | /* [W 17] Generate an operation after completion; bit-16 is | ||
5398 | * AggVectIdx_valid; bits 15:8 are AggVectIdx; bits 7:5 are the TRIG and | ||
5399 | * bits 4:0 are the T124Param[4:0] */ | ||
5400 | #define XSDM_REG_OPERATION_GEN 0x1664c4 | ||
4849 | /* [R 1] pxp_ctrl rd_data fifo empty in sdm_dma_rsp block */ | 5401 | /* [R 1] pxp_ctrl rd_data fifo empty in sdm_dma_rsp block */ |
4850 | #define XSDM_REG_RSP_PXP_CTRL_RDATA_EMPTY 0x166548 | 5402 | #define XSDM_REG_RSP_PXP_CTRL_RDATA_EMPTY 0x166548 |
4851 | /* [R 1] parser fifo empty in sdm_sync block */ | 5403 | /* [R 1] parser fifo empty in sdm_sync block */ |
@@ -5019,6 +5571,7 @@ | |||
5019 | #define BIGMAC_REGISTER_CNT_MAX_SIZE (0x05<<3) | 5571 | #define BIGMAC_REGISTER_CNT_MAX_SIZE (0x05<<3) |
5020 | #define BIGMAC_REGISTER_RX_CONTROL (0x21<<3) | 5572 | #define BIGMAC_REGISTER_RX_CONTROL (0x21<<3) |
5021 | #define BIGMAC_REGISTER_RX_LLFC_MSG_FLDS (0x46<<3) | 5573 | #define BIGMAC_REGISTER_RX_LLFC_MSG_FLDS (0x46<<3) |
5574 | #define BIGMAC_REGISTER_RX_LSS_STATUS (0x43<<3) | ||
5022 | #define BIGMAC_REGISTER_RX_MAX_SIZE (0x23<<3) | 5575 | #define BIGMAC_REGISTER_RX_MAX_SIZE (0x23<<3) |
5023 | #define BIGMAC_REGISTER_RX_STAT_GR64 (0x26<<3) | 5576 | #define BIGMAC_REGISTER_RX_STAT_GR64 (0x26<<3) |
5024 | #define BIGMAC_REGISTER_RX_STAT_GRIPJ (0x42<<3) | 5577 | #define BIGMAC_REGISTER_RX_STAT_GRIPJ (0x42<<3) |
@@ -5034,6 +5587,7 @@ | |||
5034 | #define BIGMAC2_REGISTER_PFC_CONTROL (0x06<<3) | 5587 | #define BIGMAC2_REGISTER_PFC_CONTROL (0x06<<3) |
5035 | #define BIGMAC2_REGISTER_RX_CONTROL (0x3A<<3) | 5588 | #define BIGMAC2_REGISTER_RX_CONTROL (0x3A<<3) |
5036 | #define BIGMAC2_REGISTER_RX_LLFC_MSG_FLDS (0x62<<3) | 5589 | #define BIGMAC2_REGISTER_RX_LLFC_MSG_FLDS (0x62<<3) |
5590 | #define BIGMAC2_REGISTER_RX_LSS_STAT (0x3E<<3) | ||
5037 | #define BIGMAC2_REGISTER_RX_MAX_SIZE (0x3C<<3) | 5591 | #define BIGMAC2_REGISTER_RX_MAX_SIZE (0x3C<<3) |
5038 | #define BIGMAC2_REGISTER_RX_STAT_GR64 (0x40<<3) | 5592 | #define BIGMAC2_REGISTER_RX_STAT_GR64 (0x40<<3) |
5039 | #define BIGMAC2_REGISTER_RX_STAT_GRIPJ (0x5f<<3) | 5593 | #define BIGMAC2_REGISTER_RX_STAT_GRIPJ (0x5f<<3) |
@@ -5052,7 +5606,9 @@ | |||
5052 | #define EMAC_LED_OVERRIDE (1L<<0) | 5606 | #define EMAC_LED_OVERRIDE (1L<<0) |
5053 | #define EMAC_LED_TRAFFIC (1L<<6) | 5607 | #define EMAC_LED_TRAFFIC (1L<<6) |
5054 | #define EMAC_MDIO_COMM_COMMAND_ADDRESS (0L<<26) | 5608 | #define EMAC_MDIO_COMM_COMMAND_ADDRESS (0L<<26) |
5609 | #define EMAC_MDIO_COMM_COMMAND_READ_22 (2L<<26) | ||
5055 | #define EMAC_MDIO_COMM_COMMAND_READ_45 (3L<<26) | 5610 | #define EMAC_MDIO_COMM_COMMAND_READ_45 (3L<<26) |
5611 | #define EMAC_MDIO_COMM_COMMAND_WRITE_22 (1L<<26) | ||
5056 | #define EMAC_MDIO_COMM_COMMAND_WRITE_45 (1L<<26) | 5612 | #define EMAC_MDIO_COMM_COMMAND_WRITE_45 (1L<<26) |
5057 | #define EMAC_MDIO_COMM_DATA (0xffffL<<0) | 5613 | #define EMAC_MDIO_COMM_DATA (0xffffL<<0) |
5058 | #define EMAC_MDIO_COMM_START_BUSY (1L<<29) | 5614 | #define EMAC_MDIO_COMM_START_BUSY (1L<<29) |
@@ -5128,16 +5684,24 @@ | |||
5128 | #define MISC_REGISTERS_RESET_REG_1_RST_PXPV (0x1<<27) | 5684 | #define MISC_REGISTERS_RESET_REG_1_RST_PXPV (0x1<<27) |
5129 | #define MISC_REGISTERS_RESET_REG_1_SET 0x584 | 5685 | #define MISC_REGISTERS_RESET_REG_1_SET 0x584 |
5130 | #define MISC_REGISTERS_RESET_REG_2_CLEAR 0x598 | 5686 | #define MISC_REGISTERS_RESET_REG_2_CLEAR 0x598 |
5687 | #define MISC_REGISTERS_RESET_REG_2_MSTAT0 (0x1<<24) | ||
5688 | #define MISC_REGISTERS_RESET_REG_2_MSTAT1 (0x1<<25) | ||
5131 | #define MISC_REGISTERS_RESET_REG_2_RST_BMAC0 (0x1<<0) | 5689 | #define MISC_REGISTERS_RESET_REG_2_RST_BMAC0 (0x1<<0) |
5132 | #define MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE (0x1<<14) | 5690 | #define MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE (0x1<<14) |
5133 | #define MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE (0x1<<15) | 5691 | #define MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE (0x1<<15) |
5134 | #define MISC_REGISTERS_RESET_REG_2_RST_GRC (0x1<<4) | 5692 | #define MISC_REGISTERS_RESET_REG_2_RST_GRC (0x1<<4) |
5135 | #define MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B (0x1<<6) | 5693 | #define MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B (0x1<<6) |
5694 | #define MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CORE (0x1<<8) | ||
5695 | #define MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CPU (0x1<<7) | ||
5136 | #define MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE (0x1<<5) | 5696 | #define MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE (0x1<<5) |
5137 | #define MISC_REGISTERS_RESET_REG_2_RST_MDIO (0x1<<13) | 5697 | #define MISC_REGISTERS_RESET_REG_2_RST_MDIO (0x1<<13) |
5138 | #define MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE (0x1<<11) | 5698 | #define MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE (0x1<<11) |
5699 | #define MISC_REGISTERS_RESET_REG_2_RST_PCI_MDIO (0x1<<13) | ||
5139 | #define MISC_REGISTERS_RESET_REG_2_RST_RBCN (0x1<<9) | 5700 | #define MISC_REGISTERS_RESET_REG_2_RST_RBCN (0x1<<9) |
5140 | #define MISC_REGISTERS_RESET_REG_2_SET 0x594 | 5701 | #define MISC_REGISTERS_RESET_REG_2_SET 0x594 |
5702 | #define MISC_REGISTERS_RESET_REG_2_UMAC0 (0x1<<20) | ||
5703 | #define MISC_REGISTERS_RESET_REG_2_XMAC (0x1<<22) | ||
5704 | #define MISC_REGISTERS_RESET_REG_2_XMAC_SOFT (0x1<<23) | ||
5141 | #define MISC_REGISTERS_RESET_REG_3_CLEAR 0x5a8 | 5705 | #define MISC_REGISTERS_RESET_REG_3_CLEAR 0x5a8 |
5142 | #define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_IDDQ (0x1<<1) | 5706 | #define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_IDDQ (0x1<<1) |
5143 | #define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_PWRDWN (0x1<<2) | 5707 | #define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_PWRDWN (0x1<<2) |
@@ -5160,74 +5724,86 @@ | |||
5160 | #define MISC_REGISTERS_SPIO_OUTPUT_HIGH 1 | 5724 | #define MISC_REGISTERS_SPIO_OUTPUT_HIGH 1 |
5161 | #define MISC_REGISTERS_SPIO_OUTPUT_LOW 0 | 5725 | #define MISC_REGISTERS_SPIO_OUTPUT_LOW 0 |
5162 | #define MISC_REGISTERS_SPIO_SET_POS 8 | 5726 | #define MISC_REGISTERS_SPIO_SET_POS 8 |
5727 | #define HW_LOCK_DRV_FLAGS 10 | ||
5163 | #define HW_LOCK_MAX_RESOURCE_VALUE 31 | 5728 | #define HW_LOCK_MAX_RESOURCE_VALUE 31 |
5164 | #define HW_LOCK_RESOURCE_GPIO 1 | 5729 | #define HW_LOCK_RESOURCE_GPIO 1 |
5165 | #define HW_LOCK_RESOURCE_MDIO 0 | 5730 | #define HW_LOCK_RESOURCE_MDIO 0 |
5166 | #define HW_LOCK_RESOURCE_PORT0_ATT_MASK 3 | 5731 | #define HW_LOCK_RESOURCE_PORT0_ATT_MASK 3 |
5167 | #define HW_LOCK_RESOURCE_RESERVED_08 8 | 5732 | #define HW_LOCK_RESOURCE_RECOVERY_LEADER_0 8 |
5733 | #define HW_LOCK_RESOURCE_RECOVERY_LEADER_1 9 | ||
5168 | #define HW_LOCK_RESOURCE_SPIO 2 | 5734 | #define HW_LOCK_RESOURCE_SPIO 2 |
5169 | #define HW_LOCK_RESOURCE_UNDI 5 | 5735 | #define HW_LOCK_RESOURCE_UNDI 5 |
5170 | #define PRS_FLAG_OVERETH_IPV4 1 | 5736 | #define AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT (0x1<<4) |
5171 | #define AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT (0x1<<4) | 5737 | #define AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR (0x1<<5) |
5172 | #define AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR (0x1<<5) | 5738 | #define AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR (0x1<<18) |
5173 | #define AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR (1<<18) | 5739 | #define AEU_INPUTS_ATTN_BITS_CCM_HW_INTERRUPT (0x1<<31) |
5174 | #define AEU_INPUTS_ATTN_BITS_CCM_HW_INTERRUPT (1<<31) | 5740 | #define AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR (0x1<<30) |
5175 | #define AEU_INPUTS_ATTN_BITS_CDU_HW_INTERRUPT (1<<9) | 5741 | #define AEU_INPUTS_ATTN_BITS_CDU_HW_INTERRUPT (0x1<<9) |
5176 | #define AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR (1<<8) | 5742 | #define AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR (0x1<<8) |
5177 | #define AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT (1<<7) | 5743 | #define AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT (0x1<<7) |
5178 | #define AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR (1<<6) | 5744 | #define AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR (0x1<<6) |
5179 | #define AEU_INPUTS_ATTN_BITS_CSDM_HW_INTERRUPT (1<<29) | 5745 | #define AEU_INPUTS_ATTN_BITS_CSDM_HW_INTERRUPT (0x1<<29) |
5180 | #define AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR (1<<28) | 5746 | #define AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR (0x1<<28) |
5181 | #define AEU_INPUTS_ATTN_BITS_CSEMI_HW_INTERRUPT (1<<1) | 5747 | #define AEU_INPUTS_ATTN_BITS_CSEMI_HW_INTERRUPT (0x1<<1) |
5182 | #define AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR (1<<0) | 5748 | #define AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR (0x1<<0) |
5183 | #define AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR (1<<18) | 5749 | #define AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR (0x1<<18) |
5184 | #define AEU_INPUTS_ATTN_BITS_DMAE_HW_INTERRUPT (1<<11) | 5750 | #define AEU_INPUTS_ATTN_BITS_DMAE_HW_INTERRUPT (0x1<<11) |
5185 | #define AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT (1<<13) | 5751 | #define AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR (0x1<<10) |
5186 | #define AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR (1<<12) | 5752 | #define AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT (0x1<<13) |
5187 | #define AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 (1<<5) | 5753 | #define AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR (0x1<<12) |
5188 | #define AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 (1<<9) | 5754 | #define AEU_INPUTS_ATTN_BITS_GPIO0_FUNCTION_0 (0x1<<2) |
5189 | #define AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR (1<<12) | 5755 | #define AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR (0x1<<12) |
5190 | #define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY (1<<28) | 5756 | #define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY (0x1<<28) |
5191 | #define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY (1<<31) | 5757 | #define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY (0x1<<31) |
5192 | #define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY (1<<29) | 5758 | #define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY (0x1<<29) |
5193 | #define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY (1<<30) | 5759 | #define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY (0x1<<30) |
5194 | #define AEU_INPUTS_ATTN_BITS_MISC_HW_INTERRUPT (1<<15) | 5760 | #define AEU_INPUTS_ATTN_BITS_MISC_HW_INTERRUPT (0x1<<15) |
5195 | #define AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR (1<<14) | 5761 | #define AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR (0x1<<14) |
5196 | #define AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR (1<<20) | 5762 | #define AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR (0x1<<14) |
5197 | #define AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR (1<<0) | 5763 | #define AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR (0x1<<20) |
5198 | #define AEU_INPUTS_ATTN_BITS_PBF_HW_INTERRUPT (1<<31) | 5764 | #define AEU_INPUTS_ATTN_BITS_PBCLIENT_HW_INTERRUPT (0x1<<31) |
5199 | #define AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT (0x1<<2) | 5765 | #define AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR (0x1<<30) |
5200 | #define AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR (0x1<<3) | 5766 | #define AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR (0x1<<0) |
5201 | #define AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT (1<<3) | 5767 | #define AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT (0x1<<2) |
5202 | #define AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR (1<<2) | 5768 | #define AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR (0x1<<3) |
5203 | #define AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_HW_INTERRUPT (1<<5) | 5769 | #define AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_HW_INTERRUPT (0x1<<5) |
5204 | #define AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR (1<<4) | 5770 | #define AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR (0x1<<4) |
5205 | #define AEU_INPUTS_ATTN_BITS_QM_HW_INTERRUPT (1<<3) | 5771 | #define AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT (0x1<<3) |
5206 | #define AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR (1<<2) | 5772 | #define AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR (0x1<<2) |
5207 | #define AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR (1<<22) | 5773 | #define AEU_INPUTS_ATTN_BITS_QM_HW_INTERRUPT (0x1<<3) |
5208 | #define AEU_INPUTS_ATTN_BITS_SPIO5 (1<<15) | 5774 | #define AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR (0x1<<2) |
5209 | #define AEU_INPUTS_ATTN_BITS_TCM_HW_INTERRUPT (1<<27) | 5775 | #define AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR (0x1<<22) |
5210 | #define AEU_INPUTS_ATTN_BITS_TIMERS_HW_INTERRUPT (1<<5) | 5776 | #define AEU_INPUTS_ATTN_BITS_SPIO5 (0x1<<15) |
5211 | #define AEU_INPUTS_ATTN_BITS_TSDM_HW_INTERRUPT (1<<25) | 5777 | #define AEU_INPUTS_ATTN_BITS_TCM_HW_INTERRUPT (0x1<<27) |
5212 | #define AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR (1<<24) | 5778 | #define AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR (0x1<<26) |
5213 | #define AEU_INPUTS_ATTN_BITS_TSEMI_HW_INTERRUPT (1<<29) | 5779 | #define AEU_INPUTS_ATTN_BITS_TIMERS_HW_INTERRUPT (0x1<<5) |
5214 | #define AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR (1<<28) | 5780 | #define AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR (0x1<<4) |
5215 | #define AEU_INPUTS_ATTN_BITS_UCM_HW_INTERRUPT (1<<23) | 5781 | #define AEU_INPUTS_ATTN_BITS_TSDM_HW_INTERRUPT (0x1<<25) |
5216 | #define AEU_INPUTS_ATTN_BITS_UPB_HW_INTERRUPT (1<<27) | 5782 | #define AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR (0x1<<24) |
5217 | #define AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR (1<<26) | 5783 | #define AEU_INPUTS_ATTN_BITS_TSEMI_HW_INTERRUPT (0x1<<29) |
5218 | #define AEU_INPUTS_ATTN_BITS_USDM_HW_INTERRUPT (1<<21) | 5784 | #define AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR (0x1<<28) |
5219 | #define AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR (1<<20) | 5785 | #define AEU_INPUTS_ATTN_BITS_UCM_HW_INTERRUPT (0x1<<23) |
5220 | #define AEU_INPUTS_ATTN_BITS_USEMI_HW_INTERRUPT (1<<25) | 5786 | #define AEU_INPUTS_ATTN_BITS_UCM_PARITY_ERROR (0x1<<22) |
5221 | #define AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR (1<<24) | 5787 | #define AEU_INPUTS_ATTN_BITS_UPB_HW_INTERRUPT (0x1<<27) |
5222 | #define AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR (1<<16) | 5788 | #define AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR (0x1<<26) |
5223 | #define AEU_INPUTS_ATTN_BITS_XCM_HW_INTERRUPT (1<<9) | 5789 | #define AEU_INPUTS_ATTN_BITS_USDM_HW_INTERRUPT (0x1<<21) |
5224 | #define AEU_INPUTS_ATTN_BITS_XSDM_HW_INTERRUPT (1<<7) | 5790 | #define AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR (0x1<<20) |
5225 | #define AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR (1<<6) | 5791 | #define AEU_INPUTS_ATTN_BITS_USEMI_HW_INTERRUPT (0x1<<25) |
5226 | #define AEU_INPUTS_ATTN_BITS_XSEMI_HW_INTERRUPT (1<<11) | 5792 | #define AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR (0x1<<24) |
5227 | #define AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR (1<<10) | 5793 | #define AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR (0x1<<16) |
5794 | #define AEU_INPUTS_ATTN_BITS_XCM_HW_INTERRUPT (0x1<<9) | ||
5795 | #define AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR (0x1<<8) | ||
5796 | #define AEU_INPUTS_ATTN_BITS_XSDM_HW_INTERRUPT (0x1<<7) | ||
5797 | #define AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR (0x1<<6) | ||
5798 | #define AEU_INPUTS_ATTN_BITS_XSEMI_HW_INTERRUPT (0x1<<11) | ||
5799 | #define AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR (0x1<<10) | ||
5800 | |||
5801 | #define AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 (0x1<<5) | ||
5802 | #define AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 (0x1<<9) | ||
5803 | |||
5228 | #define RESERVED_GENERAL_ATTENTION_BIT_0 0 | 5804 | #define RESERVED_GENERAL_ATTENTION_BIT_0 0 |
5229 | 5805 | ||
5230 | #define EVEREST_GEN_ATTN_IN_USE_MASK 0x3ffe0 | 5806 | #define EVEREST_GEN_ATTN_IN_USE_MASK 0x7ffe0 |
5231 | #define EVEREST_LATCHED_ATTN_IN_USE_MASK 0xffe00000 | 5807 | #define EVEREST_LATCHED_ATTN_IN_USE_MASK 0xffe00000 |
5232 | 5808 | ||
5233 | #define RESERVED_GENERAL_ATTENTION_BIT_6 6 | 5809 | #define RESERVED_GENERAL_ATTENTION_BIT_6 6 |
@@ -5317,7 +5893,13 @@ | |||
5317 | #define GRCBASE_HC 0x108000 | 5893 | #define GRCBASE_HC 0x108000 |
5318 | #define GRCBASE_PXP2 0x120000 | 5894 | #define GRCBASE_PXP2 0x120000 |
5319 | #define GRCBASE_PBF 0x140000 | 5895 | #define GRCBASE_PBF 0x140000 |
5896 | #define GRCBASE_UMAC0 0x160000 | ||
5897 | #define GRCBASE_UMAC1 0x160400 | ||
5320 | #define GRCBASE_XPB 0x161000 | 5898 | #define GRCBASE_XPB 0x161000 |
5899 | #define GRCBASE_MSTAT0 0x162000 | ||
5900 | #define GRCBASE_MSTAT1 0x162800 | ||
5901 | #define GRCBASE_XMAC0 0x163000 | ||
5902 | #define GRCBASE_XMAC1 0x163800 | ||
5321 | #define GRCBASE_TIMERS 0x164000 | 5903 | #define GRCBASE_TIMERS 0x164000 |
5322 | #define GRCBASE_XSDM 0x166000 | 5904 | #define GRCBASE_XSDM 0x166000 |
5323 | #define GRCBASE_QM 0x168000 | 5905 | #define GRCBASE_QM 0x168000 |
@@ -5883,6 +6465,10 @@ | |||
5883 | #define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_16G 0x0C00 | 6465 | #define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_16G 0x0C00 |
5884 | #define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_1G_KX 0x0D00 | 6466 | #define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_1G_KX 0x0D00 |
5885 | #define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_KX4 0x0E00 | 6467 | #define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_KX4 0x0E00 |
6468 | #define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_KR 0x0F00 | ||
6469 | #define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_XFI 0x1B00 | ||
6470 | #define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_20G_DXGXS 0x1E00 | ||
6471 | #define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_SFI 0x1F00 | ||
5886 | 6472 | ||
5887 | 6473 | ||
5888 | #define MDIO_REG_BANK_10G_PARALLEL_DETECT 0x8130 | 6474 | #define MDIO_REG_BANK_10G_PARALLEL_DETECT 0x8130 |
@@ -6032,15 +6618,11 @@ Theotherbitsarereservedandshouldbezero*/ | |||
6032 | #define MDIO_PMA_REG_CTRL 0x0 | 6618 | #define MDIO_PMA_REG_CTRL 0x0 |
6033 | #define MDIO_PMA_REG_STATUS 0x1 | 6619 | #define MDIO_PMA_REG_STATUS 0x1 |
6034 | #define MDIO_PMA_REG_10G_CTRL2 0x7 | 6620 | #define MDIO_PMA_REG_10G_CTRL2 0x7 |
6621 | #define MDIO_PMA_REG_TX_DISABLE 0x0009 | ||
6035 | #define MDIO_PMA_REG_RX_SD 0xa | 6622 | #define MDIO_PMA_REG_RX_SD 0xa |
6036 | /*bcm*/ | 6623 | /*bcm*/ |
6037 | #define MDIO_PMA_REG_BCM_CTRL 0x0096 | 6624 | #define MDIO_PMA_REG_BCM_CTRL 0x0096 |
6038 | #define MDIO_PMA_REG_FEC_CTRL 0x00ab | 6625 | #define MDIO_PMA_REG_FEC_CTRL 0x00ab |
6039 | #define MDIO_PMA_REG_RX_ALARM_CTRL 0x9000 | ||
6040 | #define MDIO_PMA_REG_LASI_CTRL 0x9002 | ||
6041 | #define MDIO_PMA_REG_RX_ALARM 0x9003 | ||
6042 | #define MDIO_PMA_REG_TX_ALARM 0x9004 | ||
6043 | #define MDIO_PMA_REG_LASI_STATUS 0x9005 | ||
6044 | #define MDIO_PMA_REG_PHY_IDENTIFIER 0xc800 | 6626 | #define MDIO_PMA_REG_PHY_IDENTIFIER 0xc800 |
6045 | #define MDIO_PMA_REG_DIGITAL_CTRL 0xc808 | 6627 | #define MDIO_PMA_REG_DIGITAL_CTRL 0xc808 |
6046 | #define MDIO_PMA_REG_DIGITAL_STATUS 0xc809 | 6628 | #define MDIO_PMA_REG_DIGITAL_STATUS 0xc809 |
@@ -6201,6 +6783,169 @@ Theotherbitsarereservedandshouldbezero*/ | |||
6201 | #define MDIO_PMA_REG_84823_CTL_LED_CTL_1 0xa8e3 | 6783 | #define MDIO_PMA_REG_84823_CTL_LED_CTL_1 0xa8e3 |
6202 | #define MDIO_PMA_REG_84823_LED3_STRETCH_EN 0x0080 | 6784 | #define MDIO_PMA_REG_84823_LED3_STRETCH_EN 0x0080 |
6203 | 6785 | ||
6786 | /* BCM84833 only */ | ||
6787 | #define MDIO_84833_TOP_CFG_XGPHY_STRAP1 0x401a | ||
6788 | #define MDIO_84833_SUPER_ISOLATE 0x8000 | ||
6789 | /* These are mailbox register set used by 84833. */ | ||
6790 | #define MDIO_84833_TOP_CFG_SCRATCH_REG0 0x4005 | ||
6791 | #define MDIO_84833_TOP_CFG_SCRATCH_REG1 0x4006 | ||
6792 | #define MDIO_84833_TOP_CFG_SCRATCH_REG2 0x4007 | ||
6793 | #define MDIO_84833_TOP_CFG_SCRATCH_REG3 0x4008 | ||
6794 | #define MDIO_84833_TOP_CFG_SCRATCH_REG4 0x4009 | ||
6795 | #define MDIO_84833_TOP_CFG_DATA3_REG 0x4011 | ||
6796 | #define MDIO_84833_TOP_CFG_DATA4_REG 0x4012 | ||
6797 | |||
6798 | /* Mailbox command set used by 84833. */ | ||
6799 | #define PHY84833_DIAG_CMD_PAIR_SWAP_CHANGE 0x2 | ||
6800 | /* Mailbox status set used by 84833. */ | ||
6801 | #define PHY84833_CMD_RECEIVED 0x0001 | ||
6802 | #define PHY84833_CMD_IN_PROGRESS 0x0002 | ||
6803 | #define PHY84833_CMD_COMPLETE_PASS 0x0004 | ||
6804 | #define PHY84833_CMD_COMPLETE_ERROR 0x0008 | ||
6805 | #define PHY84833_CMD_OPEN_FOR_CMDS 0x0010 | ||
6806 | #define PHY84833_CMD_SYSTEM_BOOT 0x0020 | ||
6807 | #define PHY84833_CMD_NOT_OPEN_FOR_CMDS 0x0040 | ||
6808 | #define PHY84833_CMD_CLEAR_COMPLETE 0x0080 | ||
6809 | #define PHY84833_CMD_OPEN_OVERRIDE 0xa5a5 | ||
6810 | |||
6811 | |||
6812 | /* 84833 F/W Feature Commands */ | ||
6813 | #define PHY84833_DIAG_CMD_GET_EEE_MODE 0x27 | ||
6814 | #define PHY84833_DIAG_CMD_SET_EEE_MODE 0x28 | ||
6815 | |||
6816 | /* Warpcore clause 45 addressing */ | ||
6817 | #define MDIO_WC_DEVAD 0x3 | ||
6818 | #define MDIO_WC_REG_IEEE0BLK_MIICNTL 0x0 | ||
6819 | #define MDIO_WC_REG_IEEE0BLK_AUTONEGNP 0x7 | ||
6820 | #define MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT0 0x10 | ||
6821 | #define MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT1 0x11 | ||
6822 | #define MDIO_WC_REG_PMD_IEEE9BLK_TENGBASE_KR_PMD_CONTROL_REGISTER_150 0x96 | ||
6823 | #define MDIO_WC_REG_XGXSBLK0_XGXSCONTROL 0x8000 | ||
6824 | #define MDIO_WC_REG_XGXSBLK0_MISCCONTROL1 0x800e | ||
6825 | #define MDIO_WC_REG_XGXSBLK1_DESKEW 0x8010 | ||
6826 | #define MDIO_WC_REG_XGXSBLK1_LANECTRL0 0x8015 | ||
6827 | #define MDIO_WC_REG_XGXSBLK1_LANECTRL1 0x8016 | ||
6828 | #define MDIO_WC_REG_XGXSBLK1_LANECTRL2 0x8017 | ||
6829 | #define MDIO_WC_REG_TX0_ANA_CTRL0 0x8061 | ||
6830 | #define MDIO_WC_REG_TX1_ANA_CTRL0 0x8071 | ||
6831 | #define MDIO_WC_REG_TX2_ANA_CTRL0 0x8081 | ||
6832 | #define MDIO_WC_REG_TX3_ANA_CTRL0 0x8091 | ||
6833 | #define MDIO_WC_REG_TX0_TX_DRIVER 0x8067 | ||
6834 | #define MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_OFFSET 0x04 | ||
6835 | #define MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_MASK 0x00f0 | ||
6836 | #define MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_OFFSET 0x08 | ||
6837 | #define MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_MASK 0x0f00 | ||
6838 | #define MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_OFFSET 0x0c | ||
6839 | #define MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_MASK 0x7000 | ||
6840 | #define MDIO_WC_REG_TX1_TX_DRIVER 0x8077 | ||
6841 | #define MDIO_WC_REG_TX2_TX_DRIVER 0x8087 | ||
6842 | #define MDIO_WC_REG_TX3_TX_DRIVER 0x8097 | ||
6843 | #define MDIO_WC_REG_RX0_ANARXCONTROL1G 0x80b9 | ||
6844 | #define MDIO_WC_REG_RX2_ANARXCONTROL1G 0x80d9 | ||
6845 | #define MDIO_WC_REG_RX0_PCI_CTRL 0x80ba | ||
6846 | #define MDIO_WC_REG_RX1_PCI_CTRL 0x80ca | ||
6847 | #define MDIO_WC_REG_RX2_PCI_CTRL 0x80da | ||
6848 | #define MDIO_WC_REG_RX3_PCI_CTRL 0x80ea | ||
6849 | #define MDIO_WC_REG_XGXSBLK2_UNICORE_MODE_10G 0x8104 | ||
6850 | #define MDIO_WC_REG_XGXS_STATUS3 0x8129 | ||
6851 | #define MDIO_WC_REG_PAR_DET_10G_STATUS 0x8130 | ||
6852 | #define MDIO_WC_REG_PAR_DET_10G_CTRL 0x8131 | ||
6853 | #define MDIO_WC_REG_XGXS_X2_CONTROL2 0x8141 | ||
6854 | #define MDIO_WC_REG_XGXS_RX_LN_SWAP1 0x816B | ||
6855 | #define MDIO_WC_REG_XGXS_TX_LN_SWAP1 0x8169 | ||
6856 | #define MDIO_WC_REG_GP2_STATUS_GP_2_0 0x81d0 | ||
6857 | #define MDIO_WC_REG_GP2_STATUS_GP_2_1 0x81d1 | ||
6858 | #define MDIO_WC_REG_GP2_STATUS_GP_2_2 0x81d2 | ||
6859 | #define MDIO_WC_REG_GP2_STATUS_GP_2_3 0x81d3 | ||
6860 | #define MDIO_WC_REG_GP2_STATUS_GP_2_4 0x81d4 | ||
6861 | #define MDIO_WC_REG_UC_INFO_B0_DEAD_TRAP 0x81EE | ||
6862 | #define MDIO_WC_REG_UC_INFO_B1_VERSION 0x81F0 | ||
6863 | #define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE 0x81F2 | ||
6864 | #define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_LANE0_OFFSET 0x0 | ||
6865 | #define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_DEFAULT 0x0 | ||
6866 | #define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_SFP_OPT_LR 0x1 | ||
6867 | #define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_SFP_DAC 0x2 | ||
6868 | #define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_SFP_XLAUI 0x3 | ||
6869 | #define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_LONG_CH_6G 0x4 | ||
6870 | #define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_LANE1_OFFSET 0x4 | ||
6871 | #define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_LANE2_OFFSET 0x8 | ||
6872 | #define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_LANE3_OFFSET 0xc | ||
6873 | #define MDIO_WC_REG_UC_INFO_B1_CRC 0x81FE | ||
6874 | #define MDIO_WC_REG_DSC_SMC 0x8213 | ||
6875 | #define MDIO_WC_REG_DSC2B0_DSC_MISC_CTRL0 0x821e | ||
6876 | #define MDIO_WC_REG_TX_FIR_TAP 0x82e2 | ||
6877 | #define MDIO_WC_REG_TX_FIR_TAP_PRE_TAP_OFFSET 0x00 | ||
6878 | #define MDIO_WC_REG_TX_FIR_TAP_PRE_TAP_MASK 0x000f | ||
6879 | #define MDIO_WC_REG_TX_FIR_TAP_MAIN_TAP_OFFSET 0x04 | ||
6880 | #define MDIO_WC_REG_TX_FIR_TAP_MAIN_TAP_MASK 0x03f0 | ||
6881 | #define MDIO_WC_REG_TX_FIR_TAP_POST_TAP_OFFSET 0x0a | ||
6882 | #define MDIO_WC_REG_TX_FIR_TAP_POST_TAP_MASK 0x7c00 | ||
6883 | #define MDIO_WC_REG_TX_FIR_TAP_ENABLE 0x8000 | ||
6884 | #define MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL 0x82e3 | ||
6885 | #define MDIO_WC_REG_CL72_USERB0_CL72_OS_DEF_CTRL 0x82e6 | ||
6886 | #define MDIO_WC_REG_CL72_USERB0_CL72_BR_DEF_CTRL 0x82e7 | ||
6887 | #define MDIO_WC_REG_CL72_USERB0_CL72_2P5_DEF_CTRL 0x82e8 | ||
6888 | #define MDIO_WC_REG_CL72_USERB0_CL72_MISC4_CONTROL 0x82ec | ||
6889 | #define MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1 0x8300 | ||
6890 | #define MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2 0x8301 | ||
6891 | #define MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3 0x8302 | ||
6892 | #define MDIO_WC_REG_SERDESDIGITAL_STATUS1000X1 0x8304 | ||
6893 | #define MDIO_WC_REG_SERDESDIGITAL_MISC1 0x8308 | ||
6894 | #define MDIO_WC_REG_SERDESDIGITAL_MISC2 0x8309 | ||
6895 | #define MDIO_WC_REG_DIGITAL3_UP1 0x8329 | ||
6896 | #define MDIO_WC_REG_DIGITAL4_MISC3 0x833c | ||
6897 | #define MDIO_WC_REG_DIGITAL5_MISC6 0x8345 | ||
6898 | #define MDIO_WC_REG_DIGITAL5_MISC7 0x8349 | ||
6899 | #define MDIO_WC_REG_DIGITAL5_ACTUAL_SPEED 0x834e | ||
6900 | #define MDIO_WC_REG_DIGITAL6_MP5_NEXTPAGECTRL 0x8350 | ||
6901 | #define MDIO_WC_REG_CL49_USERB0_CTRL 0x8368 | ||
6902 | #define MDIO_WC_REG_TX66_CONTROL 0x83b0 | ||
6903 | #define MDIO_WC_REG_RX66_CONTROL 0x83c0 | ||
6904 | #define MDIO_WC_REG_RX66_SCW0 0x83c2 | ||
6905 | #define MDIO_WC_REG_RX66_SCW1 0x83c3 | ||
6906 | #define MDIO_WC_REG_RX66_SCW2 0x83c4 | ||
6907 | #define MDIO_WC_REG_RX66_SCW3 0x83c5 | ||
6908 | #define MDIO_WC_REG_RX66_SCW0_MASK 0x83c6 | ||
6909 | #define MDIO_WC_REG_RX66_SCW1_MASK 0x83c7 | ||
6910 | #define MDIO_WC_REG_RX66_SCW2_MASK 0x83c8 | ||
6911 | #define MDIO_WC_REG_RX66_SCW3_MASK 0x83c9 | ||
6912 | #define MDIO_WC_REG_FX100_CTRL1 0x8400 | ||
6913 | #define MDIO_WC_REG_FX100_CTRL3 0x8402 | ||
6914 | |||
6915 | #define MDIO_WC_REG_MICROBLK_CMD 0xffc2 | ||
6916 | #define MDIO_WC_REG_MICROBLK_DL_STATUS 0xffc5 | ||
6917 | #define MDIO_WC_REG_MICROBLK_CMD3 0xffcc | ||
6918 | |||
6919 | #define MDIO_WC_REG_AERBLK_AER 0xffde | ||
6920 | #define MDIO_WC_REG_COMBO_IEEE0_MIICTRL 0xffe0 | ||
6921 | #define MDIO_WC_REG_COMBO_IEEE0_MIIISTAT 0xffe1 | ||
6922 | |||
6923 | #define MDIO_WC0_XGXS_BLK2_LANE_RESET 0x810A | ||
6924 | #define MDIO_WC0_XGXS_BLK2_LANE_RESET_RX_BITSHIFT 0 | ||
6925 | #define MDIO_WC0_XGXS_BLK2_LANE_RESET_TX_BITSHIFT 4 | ||
6926 | |||
6927 | #define MDIO_WC0_XGXS_BLK6_XGXS_X2_CONTROL2 0x8141 | ||
6928 | |||
6929 | #define DIGITAL5_ACTUAL_SPEED_TX_MASK 0x003f | ||
6930 | |||
6931 | /* 54618se */ | ||
6932 | #define MDIO_REG_GPHY_PHYID_LSB 0x3 | ||
6933 | #define MDIO_REG_GPHY_ID_54618SE 0x5cd5 | ||
6934 | #define MDIO_REG_GPHY_CL45_ADDR_REG 0xd | ||
6935 | #define MDIO_REG_GPHY_CL45_DATA_REG 0xe | ||
6936 | #define MDIO_REG_GPHY_EEE_ADV 0x3c | ||
6937 | #define MDIO_REG_GPHY_EEE_1G (0x1 << 2) | ||
6938 | #define MDIO_REG_GPHY_EEE_100 (0x1 << 1) | ||
6939 | #define MDIO_REG_GPHY_EEE_RESOLVED 0x803e | ||
6940 | #define MDIO_REG_INTR_STATUS 0x1a | ||
6941 | #define MDIO_REG_INTR_MASK 0x1b | ||
6942 | #define MDIO_REG_INTR_MASK_LINK_STATUS (0x1 << 1) | ||
6943 | #define MDIO_REG_GPHY_SHADOW 0x1c | ||
6944 | #define MDIO_REG_GPHY_SHADOW_LED_SEL2 (0x0e << 10) | ||
6945 | #define MDIO_REG_GPHY_SHADOW_WR_ENA (0x1 << 15) | ||
6946 | #define MDIO_REG_GPHY_SHADOW_AUTO_DET_MED (0x1e << 10) | ||
6947 | #define MDIO_REG_GPHY_SHADOW_INVERT_FIB_SD (0x1 << 8) | ||
6948 | |||
6204 | #define IGU_FUNC_BASE 0x0400 | 6949 | #define IGU_FUNC_BASE 0x0400 |
6205 | 6950 | ||
6206 | #define IGU_ADDR_MSIX 0x0000 | 6951 | #define IGU_ADDR_MSIX 0x0000 |
@@ -6217,11 +6962,6 @@ Theotherbitsarereservedandshouldbezero*/ | |||
6217 | #define IGU_ADDR_MSI_ADDR_HI 0x0212 | 6962 | #define IGU_ADDR_MSI_ADDR_HI 0x0212 |
6218 | #define IGU_ADDR_MSI_DATA 0x0213 | 6963 | #define IGU_ADDR_MSI_DATA 0x0213 |
6219 | 6964 | ||
6220 | #define IGU_INT_ENABLE 0 | ||
6221 | #define IGU_INT_DISABLE 1 | ||
6222 | #define IGU_INT_NOP 2 | ||
6223 | #define IGU_INT_NOP2 3 | ||
6224 | |||
6225 | #define IGU_USE_REGISTER_ustorm_type_0_sb_cleanup 0 | 6965 | #define IGU_USE_REGISTER_ustorm_type_0_sb_cleanup 0 |
6226 | #define IGU_USE_REGISTER_ustorm_type_1_sb_cleanup 1 | 6966 | #define IGU_USE_REGISTER_ustorm_type_1_sb_cleanup 1 |
6227 | #define IGU_USE_REGISTER_cstorm_type_0_sb_cleanup 2 | 6967 | #define IGU_USE_REGISTER_cstorm_type_0_sb_cleanup 2 |
@@ -6292,15 +7032,6 @@ Theotherbitsarereservedandshouldbezero*/ | |||
6292 | #define IGU_BC_BASE_DSB_PROD 128 | 7032 | #define IGU_BC_BASE_DSB_PROD 128 |
6293 | #define IGU_NORM_BASE_DSB_PROD 136 | 7033 | #define IGU_NORM_BASE_DSB_PROD 136 |
6294 | 7034 | ||
6295 | #define IGU_CTRL_CMD_TYPE_WR\ | ||
6296 | 1 | ||
6297 | #define IGU_CTRL_CMD_TYPE_RD\ | ||
6298 | 0 | ||
6299 | |||
6300 | #define IGU_SEG_ACCESS_NORM 0 | ||
6301 | #define IGU_SEG_ACCESS_DEF 1 | ||
6302 | #define IGU_SEG_ACCESS_ATTN 2 | ||
6303 | |||
6304 | /* FID (if VF - [6] = 0; [5:0] = VF number; if PF - [6] = 1; \ | 7035 | /* FID (if VF - [6] = 0; [5:0] = VF number; if PF - [6] = 1; \ |
6305 | [5:2] = 0; [1:0] = PF number) */ | 7036 | [5:2] = 0; [1:0] = PF number) */ |
6306 | #define IGU_FID_ENCODE_IS_PF (0x1<<6) | 7037 | #define IGU_FID_ENCODE_IS_PF (0x1<<6) |
diff --git a/drivers/net/bnx2x/bnx2x_sp.c b/drivers/net/bnx2x/bnx2x_sp.c new file mode 100644 index 000000000000..5bdf09459a08 --- /dev/null +++ b/drivers/net/bnx2x/bnx2x_sp.c | |||
@@ -0,0 +1,5333 @@ | |||
1 | /* bnx2x_sp.c: Broadcom Everest network driver. | ||
2 | * | ||
3 | * Copyright 2011 Broadcom Corporation | ||
4 | * | ||
5 | * Unless you and Broadcom execute a separate written software license | ||
6 | * agreement governing use of this software, this software is licensed to you | ||
7 | * under the terms of the GNU General Public License version 2, available | ||
8 | * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL"). | ||
9 | * | ||
10 | * Notwithstanding the above, under no circumstances may you combine this | ||
11 | * software in any way with any other Broadcom software provided under a | ||
12 | * license other than the GPL, without Broadcom's express prior written | ||
13 | * consent. | ||
14 | * | ||
15 | * Maintained by: Eilon Greenstein <eilong@broadcom.com> | ||
16 | * Written by: Vladislav Zolotarov | ||
17 | * | ||
18 | */ | ||
19 | #include <linux/version.h> | ||
20 | #include <linux/module.h> | ||
21 | #include <linux/crc32.h> | ||
22 | #include <linux/netdevice.h> | ||
23 | #include <linux/etherdevice.h> | ||
24 | #include <linux/crc32c.h> | ||
25 | #include "bnx2x.h" | ||
26 | #include "bnx2x_cmn.h" | ||
27 | #include "bnx2x_sp.h" | ||
28 | |||
29 | #define BNX2X_MAX_EMUL_MULTI 16 | ||
30 | |||
31 | /**** Exe Queue interfaces ****/ | ||
32 | |||
33 | /** | ||
34 | * bnx2x_exe_queue_init - init the Exe Queue object | ||
35 | * | ||
36 | * @o: poiter to the object | ||
37 | * @exe_len: length | ||
38 | * @owner: poiter to the owner | ||
39 | * @validate: validate function pointer | ||
40 | * @optimize: optimize function pointer | ||
41 | * @exec: execute function pointer | ||
42 | * @get: get function pointer | ||
43 | */ | ||
44 | static inline void bnx2x_exe_queue_init(struct bnx2x *bp, | ||
45 | struct bnx2x_exe_queue_obj *o, | ||
46 | int exe_len, | ||
47 | union bnx2x_qable_obj *owner, | ||
48 | exe_q_validate validate, | ||
49 | exe_q_optimize optimize, | ||
50 | exe_q_execute exec, | ||
51 | exe_q_get get) | ||
52 | { | ||
53 | memset(o, 0, sizeof(*o)); | ||
54 | |||
55 | INIT_LIST_HEAD(&o->exe_queue); | ||
56 | INIT_LIST_HEAD(&o->pending_comp); | ||
57 | |||
58 | spin_lock_init(&o->lock); | ||
59 | |||
60 | o->exe_chunk_len = exe_len; | ||
61 | o->owner = owner; | ||
62 | |||
63 | /* Owner specific callbacks */ | ||
64 | o->validate = validate; | ||
65 | o->optimize = optimize; | ||
66 | o->execute = exec; | ||
67 | o->get = get; | ||
68 | |||
69 | DP(BNX2X_MSG_SP, "Setup the execution queue with the chunk " | ||
70 | "length of %d\n", exe_len); | ||
71 | } | ||
72 | |||
73 | static inline void bnx2x_exe_queue_free_elem(struct bnx2x *bp, | ||
74 | struct bnx2x_exeq_elem *elem) | ||
75 | { | ||
76 | DP(BNX2X_MSG_SP, "Deleting an exe_queue element\n"); | ||
77 | kfree(elem); | ||
78 | } | ||
79 | |||
80 | static inline int bnx2x_exe_queue_length(struct bnx2x_exe_queue_obj *o) | ||
81 | { | ||
82 | struct bnx2x_exeq_elem *elem; | ||
83 | int cnt = 0; | ||
84 | |||
85 | spin_lock_bh(&o->lock); | ||
86 | |||
87 | list_for_each_entry(elem, &o->exe_queue, link) | ||
88 | cnt++; | ||
89 | |||
90 | spin_unlock_bh(&o->lock); | ||
91 | |||
92 | return cnt; | ||
93 | } | ||
94 | |||
95 | /** | ||
96 | * bnx2x_exe_queue_add - add a new element to the execution queue | ||
97 | * | ||
98 | * @bp: driver handle | ||
99 | * @o: queue | ||
100 | * @cmd: new command to add | ||
101 | * @restore: true - do not optimize the command | ||
102 | * | ||
103 | * If the element is optimized or is illegal, frees it. | ||
104 | */ | ||
105 | static inline int bnx2x_exe_queue_add(struct bnx2x *bp, | ||
106 | struct bnx2x_exe_queue_obj *o, | ||
107 | struct bnx2x_exeq_elem *elem, | ||
108 | bool restore) | ||
109 | { | ||
110 | int rc; | ||
111 | |||
112 | spin_lock_bh(&o->lock); | ||
113 | |||
114 | if (!restore) { | ||
115 | /* Try to cancel this element queue */ | ||
116 | rc = o->optimize(bp, o->owner, elem); | ||
117 | if (rc) | ||
118 | goto free_and_exit; | ||
119 | |||
120 | /* Check if this request is ok */ | ||
121 | rc = o->validate(bp, o->owner, elem); | ||
122 | if (rc) { | ||
123 | BNX2X_ERR("Preamble failed: %d\n", rc); | ||
124 | goto free_and_exit; | ||
125 | } | ||
126 | } | ||
127 | |||
128 | /* If so, add it to the execution queue */ | ||
129 | list_add_tail(&elem->link, &o->exe_queue); | ||
130 | |||
131 | spin_unlock_bh(&o->lock); | ||
132 | |||
133 | return 0; | ||
134 | |||
135 | free_and_exit: | ||
136 | bnx2x_exe_queue_free_elem(bp, elem); | ||
137 | |||
138 | spin_unlock_bh(&o->lock); | ||
139 | |||
140 | return rc; | ||
141 | |||
142 | } | ||
143 | |||
144 | static inline void __bnx2x_exe_queue_reset_pending( | ||
145 | struct bnx2x *bp, | ||
146 | struct bnx2x_exe_queue_obj *o) | ||
147 | { | ||
148 | struct bnx2x_exeq_elem *elem; | ||
149 | |||
150 | while (!list_empty(&o->pending_comp)) { | ||
151 | elem = list_first_entry(&o->pending_comp, | ||
152 | struct bnx2x_exeq_elem, link); | ||
153 | |||
154 | list_del(&elem->link); | ||
155 | bnx2x_exe_queue_free_elem(bp, elem); | ||
156 | } | ||
157 | } | ||
158 | |||
159 | static inline void bnx2x_exe_queue_reset_pending(struct bnx2x *bp, | ||
160 | struct bnx2x_exe_queue_obj *o) | ||
161 | { | ||
162 | |||
163 | spin_lock_bh(&o->lock); | ||
164 | |||
165 | __bnx2x_exe_queue_reset_pending(bp, o); | ||
166 | |||
167 | spin_unlock_bh(&o->lock); | ||
168 | |||
169 | } | ||
170 | |||
171 | /** | ||
172 | * bnx2x_exe_queue_step - execute one execution chunk atomically | ||
173 | * | ||
174 | * @bp: driver handle | ||
175 | * @o: queue | ||
176 | * @ramrod_flags: flags | ||
177 | * | ||
178 | * (Atomicy is ensured using the exe_queue->lock). | ||
179 | */ | ||
180 | static inline int bnx2x_exe_queue_step(struct bnx2x *bp, | ||
181 | struct bnx2x_exe_queue_obj *o, | ||
182 | unsigned long *ramrod_flags) | ||
183 | { | ||
184 | struct bnx2x_exeq_elem *elem, spacer; | ||
185 | int cur_len = 0, rc; | ||
186 | |||
187 | memset(&spacer, 0, sizeof(spacer)); | ||
188 | |||
189 | spin_lock_bh(&o->lock); | ||
190 | |||
191 | /* | ||
192 | * Next step should not be performed until the current is finished, | ||
193 | * unless a DRV_CLEAR_ONLY bit is set. In this case we just want to | ||
194 | * properly clear object internals without sending any command to the FW | ||
195 | * which also implies there won't be any completion to clear the | ||
196 | * 'pending' list. | ||
197 | */ | ||
198 | if (!list_empty(&o->pending_comp)) { | ||
199 | if (test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags)) { | ||
200 | DP(BNX2X_MSG_SP, "RAMROD_DRV_CLR_ONLY requested: " | ||
201 | "resetting pending_comp\n"); | ||
202 | __bnx2x_exe_queue_reset_pending(bp, o); | ||
203 | } else { | ||
204 | spin_unlock_bh(&o->lock); | ||
205 | return 1; | ||
206 | } | ||
207 | } | ||
208 | |||
209 | /* | ||
210 | * Run through the pending commands list and create a next | ||
211 | * execution chunk. | ||
212 | */ | ||
213 | while (!list_empty(&o->exe_queue)) { | ||
214 | elem = list_first_entry(&o->exe_queue, struct bnx2x_exeq_elem, | ||
215 | link); | ||
216 | WARN_ON(!elem->cmd_len); | ||
217 | |||
218 | if (cur_len + elem->cmd_len <= o->exe_chunk_len) { | ||
219 | cur_len += elem->cmd_len; | ||
220 | /* | ||
221 | * Prevent from both lists being empty when moving an | ||
222 | * element. This will allow the call of | ||
223 | * bnx2x_exe_queue_empty() without locking. | ||
224 | */ | ||
225 | list_add_tail(&spacer.link, &o->pending_comp); | ||
226 | mb(); | ||
227 | list_del(&elem->link); | ||
228 | list_add_tail(&elem->link, &o->pending_comp); | ||
229 | list_del(&spacer.link); | ||
230 | } else | ||
231 | break; | ||
232 | } | ||
233 | |||
234 | /* Sanity check */ | ||
235 | if (!cur_len) { | ||
236 | spin_unlock_bh(&o->lock); | ||
237 | return 0; | ||
238 | } | ||
239 | |||
240 | rc = o->execute(bp, o->owner, &o->pending_comp, ramrod_flags); | ||
241 | if (rc < 0) | ||
242 | /* | ||
243 | * In case of an error return the commands back to the queue | ||
244 | * and reset the pending_comp. | ||
245 | */ | ||
246 | list_splice_init(&o->pending_comp, &o->exe_queue); | ||
247 | else if (!rc) | ||
248 | /* | ||
249 | * If zero is returned, means there are no outstanding pending | ||
250 | * completions and we may dismiss the pending list. | ||
251 | */ | ||
252 | __bnx2x_exe_queue_reset_pending(bp, o); | ||
253 | |||
254 | spin_unlock_bh(&o->lock); | ||
255 | return rc; | ||
256 | } | ||
257 | |||
258 | static inline bool bnx2x_exe_queue_empty(struct bnx2x_exe_queue_obj *o) | ||
259 | { | ||
260 | bool empty = list_empty(&o->exe_queue); | ||
261 | |||
262 | /* Don't reorder!!! */ | ||
263 | mb(); | ||
264 | |||
265 | return empty && list_empty(&o->pending_comp); | ||
266 | } | ||
267 | |||
268 | static inline struct bnx2x_exeq_elem *bnx2x_exe_queue_alloc_elem( | ||
269 | struct bnx2x *bp) | ||
270 | { | ||
271 | DP(BNX2X_MSG_SP, "Allocating a new exe_queue element\n"); | ||
272 | return kzalloc(sizeof(struct bnx2x_exeq_elem), GFP_ATOMIC); | ||
273 | } | ||
274 | |||
275 | /************************ raw_obj functions ***********************************/ | ||
276 | static bool bnx2x_raw_check_pending(struct bnx2x_raw_obj *o) | ||
277 | { | ||
278 | return !!test_bit(o->state, o->pstate); | ||
279 | } | ||
280 | |||
281 | static void bnx2x_raw_clear_pending(struct bnx2x_raw_obj *o) | ||
282 | { | ||
283 | smp_mb__before_clear_bit(); | ||
284 | clear_bit(o->state, o->pstate); | ||
285 | smp_mb__after_clear_bit(); | ||
286 | } | ||
287 | |||
288 | static void bnx2x_raw_set_pending(struct bnx2x_raw_obj *o) | ||
289 | { | ||
290 | smp_mb__before_clear_bit(); | ||
291 | set_bit(o->state, o->pstate); | ||
292 | smp_mb__after_clear_bit(); | ||
293 | } | ||
294 | |||
295 | /** | ||
296 | * bnx2x_state_wait - wait until the given bit(state) is cleared | ||
297 | * | ||
298 | * @bp: device handle | ||
299 | * @state: state which is to be cleared | ||
300 | * @state_p: state buffer | ||
301 | * | ||
302 | */ | ||
303 | static inline int bnx2x_state_wait(struct bnx2x *bp, int state, | ||
304 | unsigned long *pstate) | ||
305 | { | ||
306 | /* can take a while if any port is running */ | ||
307 | int cnt = 5000; | ||
308 | |||
309 | |||
310 | if (CHIP_REV_IS_EMUL(bp)) | ||
311 | cnt *= 20; | ||
312 | |||
313 | DP(BNX2X_MSG_SP, "waiting for state to become %d\n", state); | ||
314 | |||
315 | might_sleep(); | ||
316 | while (cnt--) { | ||
317 | if (!test_bit(state, pstate)) { | ||
318 | #ifdef BNX2X_STOP_ON_ERROR | ||
319 | DP(BNX2X_MSG_SP, "exit (cnt %d)\n", 5000 - cnt); | ||
320 | #endif | ||
321 | return 0; | ||
322 | } | ||
323 | |||
324 | usleep_range(1000, 1000); | ||
325 | |||
326 | if (bp->panic) | ||
327 | return -EIO; | ||
328 | } | ||
329 | |||
330 | /* timeout! */ | ||
331 | BNX2X_ERR("timeout waiting for state %d\n", state); | ||
332 | #ifdef BNX2X_STOP_ON_ERROR | ||
333 | bnx2x_panic(); | ||
334 | #endif | ||
335 | |||
336 | return -EBUSY; | ||
337 | } | ||
338 | |||
339 | static int bnx2x_raw_wait(struct bnx2x *bp, struct bnx2x_raw_obj *raw) | ||
340 | { | ||
341 | return bnx2x_state_wait(bp, raw->state, raw->pstate); | ||
342 | } | ||
343 | |||
344 | /***************** Classification verbs: Set/Del MAC/VLAN/VLAN-MAC ************/ | ||
345 | /* credit handling callbacks */ | ||
346 | static bool bnx2x_get_cam_offset_mac(struct bnx2x_vlan_mac_obj *o, int *offset) | ||
347 | { | ||
348 | struct bnx2x_credit_pool_obj *mp = o->macs_pool; | ||
349 | |||
350 | WARN_ON(!mp); | ||
351 | |||
352 | return mp->get_entry(mp, offset); | ||
353 | } | ||
354 | |||
355 | static bool bnx2x_get_credit_mac(struct bnx2x_vlan_mac_obj *o) | ||
356 | { | ||
357 | struct bnx2x_credit_pool_obj *mp = o->macs_pool; | ||
358 | |||
359 | WARN_ON(!mp); | ||
360 | |||
361 | return mp->get(mp, 1); | ||
362 | } | ||
363 | |||
364 | static bool bnx2x_get_cam_offset_vlan(struct bnx2x_vlan_mac_obj *o, int *offset) | ||
365 | { | ||
366 | struct bnx2x_credit_pool_obj *vp = o->vlans_pool; | ||
367 | |||
368 | WARN_ON(!vp); | ||
369 | |||
370 | return vp->get_entry(vp, offset); | ||
371 | } | ||
372 | |||
373 | static bool bnx2x_get_credit_vlan(struct bnx2x_vlan_mac_obj *o) | ||
374 | { | ||
375 | struct bnx2x_credit_pool_obj *vp = o->vlans_pool; | ||
376 | |||
377 | WARN_ON(!vp); | ||
378 | |||
379 | return vp->get(vp, 1); | ||
380 | } | ||
381 | |||
382 | static bool bnx2x_get_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o) | ||
383 | { | ||
384 | struct bnx2x_credit_pool_obj *mp = o->macs_pool; | ||
385 | struct bnx2x_credit_pool_obj *vp = o->vlans_pool; | ||
386 | |||
387 | if (!mp->get(mp, 1)) | ||
388 | return false; | ||
389 | |||
390 | if (!vp->get(vp, 1)) { | ||
391 | mp->put(mp, 1); | ||
392 | return false; | ||
393 | } | ||
394 | |||
395 | return true; | ||
396 | } | ||
397 | |||
398 | static bool bnx2x_put_cam_offset_mac(struct bnx2x_vlan_mac_obj *o, int offset) | ||
399 | { | ||
400 | struct bnx2x_credit_pool_obj *mp = o->macs_pool; | ||
401 | |||
402 | return mp->put_entry(mp, offset); | ||
403 | } | ||
404 | |||
405 | static bool bnx2x_put_credit_mac(struct bnx2x_vlan_mac_obj *o) | ||
406 | { | ||
407 | struct bnx2x_credit_pool_obj *mp = o->macs_pool; | ||
408 | |||
409 | return mp->put(mp, 1); | ||
410 | } | ||
411 | |||
412 | static bool bnx2x_put_cam_offset_vlan(struct bnx2x_vlan_mac_obj *o, int offset) | ||
413 | { | ||
414 | struct bnx2x_credit_pool_obj *vp = o->vlans_pool; | ||
415 | |||
416 | return vp->put_entry(vp, offset); | ||
417 | } | ||
418 | |||
419 | static bool bnx2x_put_credit_vlan(struct bnx2x_vlan_mac_obj *o) | ||
420 | { | ||
421 | struct bnx2x_credit_pool_obj *vp = o->vlans_pool; | ||
422 | |||
423 | return vp->put(vp, 1); | ||
424 | } | ||
425 | |||
426 | static bool bnx2x_put_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o) | ||
427 | { | ||
428 | struct bnx2x_credit_pool_obj *mp = o->macs_pool; | ||
429 | struct bnx2x_credit_pool_obj *vp = o->vlans_pool; | ||
430 | |||
431 | if (!mp->put(mp, 1)) | ||
432 | return false; | ||
433 | |||
434 | if (!vp->put(vp, 1)) { | ||
435 | mp->get(mp, 1); | ||
436 | return false; | ||
437 | } | ||
438 | |||
439 | return true; | ||
440 | } | ||
441 | |||
442 | /* check_add() callbacks */ | ||
443 | static int bnx2x_check_mac_add(struct bnx2x_vlan_mac_obj *o, | ||
444 | union bnx2x_classification_ramrod_data *data) | ||
445 | { | ||
446 | struct bnx2x_vlan_mac_registry_elem *pos; | ||
447 | |||
448 | if (!is_valid_ether_addr(data->mac.mac)) | ||
449 | return -EINVAL; | ||
450 | |||
451 | /* Check if a requested MAC already exists */ | ||
452 | list_for_each_entry(pos, &o->head, link) | ||
453 | if (!memcmp(data->mac.mac, pos->u.mac.mac, ETH_ALEN)) | ||
454 | return -EEXIST; | ||
455 | |||
456 | return 0; | ||
457 | } | ||
458 | |||
459 | static int bnx2x_check_vlan_add(struct bnx2x_vlan_mac_obj *o, | ||
460 | union bnx2x_classification_ramrod_data *data) | ||
461 | { | ||
462 | struct bnx2x_vlan_mac_registry_elem *pos; | ||
463 | |||
464 | list_for_each_entry(pos, &o->head, link) | ||
465 | if (data->vlan.vlan == pos->u.vlan.vlan) | ||
466 | return -EEXIST; | ||
467 | |||
468 | return 0; | ||
469 | } | ||
470 | |||
471 | static int bnx2x_check_vlan_mac_add(struct bnx2x_vlan_mac_obj *o, | ||
472 | union bnx2x_classification_ramrod_data *data) | ||
473 | { | ||
474 | struct bnx2x_vlan_mac_registry_elem *pos; | ||
475 | |||
476 | list_for_each_entry(pos, &o->head, link) | ||
477 | if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) && | ||
478 | (!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac, | ||
479 | ETH_ALEN))) | ||
480 | return -EEXIST; | ||
481 | |||
482 | return 0; | ||
483 | } | ||
484 | |||
485 | |||
486 | /* check_del() callbacks */ | ||
487 | static struct bnx2x_vlan_mac_registry_elem * | ||
488 | bnx2x_check_mac_del(struct bnx2x_vlan_mac_obj *o, | ||
489 | union bnx2x_classification_ramrod_data *data) | ||
490 | { | ||
491 | struct bnx2x_vlan_mac_registry_elem *pos; | ||
492 | |||
493 | list_for_each_entry(pos, &o->head, link) | ||
494 | if (!memcmp(data->mac.mac, pos->u.mac.mac, ETH_ALEN)) | ||
495 | return pos; | ||
496 | |||
497 | return NULL; | ||
498 | } | ||
499 | |||
500 | static struct bnx2x_vlan_mac_registry_elem * | ||
501 | bnx2x_check_vlan_del(struct bnx2x_vlan_mac_obj *o, | ||
502 | union bnx2x_classification_ramrod_data *data) | ||
503 | { | ||
504 | struct bnx2x_vlan_mac_registry_elem *pos; | ||
505 | |||
506 | list_for_each_entry(pos, &o->head, link) | ||
507 | if (data->vlan.vlan == pos->u.vlan.vlan) | ||
508 | return pos; | ||
509 | |||
510 | return NULL; | ||
511 | } | ||
512 | |||
513 | static struct bnx2x_vlan_mac_registry_elem * | ||
514 | bnx2x_check_vlan_mac_del(struct bnx2x_vlan_mac_obj *o, | ||
515 | union bnx2x_classification_ramrod_data *data) | ||
516 | { | ||
517 | struct bnx2x_vlan_mac_registry_elem *pos; | ||
518 | |||
519 | list_for_each_entry(pos, &o->head, link) | ||
520 | if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) && | ||
521 | (!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac, | ||
522 | ETH_ALEN))) | ||
523 | return pos; | ||
524 | |||
525 | return NULL; | ||
526 | } | ||
527 | |||
528 | /* check_move() callback */ | ||
529 | static bool bnx2x_check_move(struct bnx2x_vlan_mac_obj *src_o, | ||
530 | struct bnx2x_vlan_mac_obj *dst_o, | ||
531 | union bnx2x_classification_ramrod_data *data) | ||
532 | { | ||
533 | struct bnx2x_vlan_mac_registry_elem *pos; | ||
534 | int rc; | ||
535 | |||
536 | /* Check if we can delete the requested configuration from the first | ||
537 | * object. | ||
538 | */ | ||
539 | pos = src_o->check_del(src_o, data); | ||
540 | |||
541 | /* check if configuration can be added */ | ||
542 | rc = dst_o->check_add(dst_o, data); | ||
543 | |||
544 | /* If this classification can not be added (is already set) | ||
545 | * or can't be deleted - return an error. | ||
546 | */ | ||
547 | if (rc || !pos) | ||
548 | return false; | ||
549 | |||
550 | return true; | ||
551 | } | ||
552 | |||
553 | static bool bnx2x_check_move_always_err( | ||
554 | struct bnx2x_vlan_mac_obj *src_o, | ||
555 | struct bnx2x_vlan_mac_obj *dst_o, | ||
556 | union bnx2x_classification_ramrod_data *data) | ||
557 | { | ||
558 | return false; | ||
559 | } | ||
560 | |||
561 | |||
562 | static inline u8 bnx2x_vlan_mac_get_rx_tx_flag(struct bnx2x_vlan_mac_obj *o) | ||
563 | { | ||
564 | struct bnx2x_raw_obj *raw = &o->raw; | ||
565 | u8 rx_tx_flag = 0; | ||
566 | |||
567 | if ((raw->obj_type == BNX2X_OBJ_TYPE_TX) || | ||
568 | (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX)) | ||
569 | rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_TX_CMD; | ||
570 | |||
571 | if ((raw->obj_type == BNX2X_OBJ_TYPE_RX) || | ||
572 | (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX)) | ||
573 | rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_RX_CMD; | ||
574 | |||
575 | return rx_tx_flag; | ||
576 | } | ||
577 | |||
578 | /* LLH CAM line allocations */ | ||
579 | enum { | ||
580 | LLH_CAM_ISCSI_ETH_LINE = 0, | ||
581 | LLH_CAM_ETH_LINE, | ||
582 | LLH_CAM_MAX_PF_LINE = NIG_REG_LLH1_FUNC_MEM_SIZE / 2 | ||
583 | }; | ||
584 | |||
585 | static inline void bnx2x_set_mac_in_nig(struct bnx2x *bp, | ||
586 | bool add, unsigned char *dev_addr, int index) | ||
587 | { | ||
588 | u32 wb_data[2]; | ||
589 | u32 reg_offset = BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM : | ||
590 | NIG_REG_LLH0_FUNC_MEM; | ||
591 | |||
592 | if (!IS_MF_SI(bp) || index > LLH_CAM_MAX_PF_LINE) | ||
593 | return; | ||
594 | |||
595 | DP(BNX2X_MSG_SP, "Going to %s LLH configuration at entry %d\n", | ||
596 | (add ? "ADD" : "DELETE"), index); | ||
597 | |||
598 | if (add) { | ||
599 | /* LLH_FUNC_MEM is a u64 WB register */ | ||
600 | reg_offset += 8*index; | ||
601 | |||
602 | wb_data[0] = ((dev_addr[2] << 24) | (dev_addr[3] << 16) | | ||
603 | (dev_addr[4] << 8) | dev_addr[5]); | ||
604 | wb_data[1] = ((dev_addr[0] << 8) | dev_addr[1]); | ||
605 | |||
606 | REG_WR_DMAE(bp, reg_offset, wb_data, 2); | ||
607 | } | ||
608 | |||
609 | REG_WR(bp, (BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM_ENABLE : | ||
610 | NIG_REG_LLH0_FUNC_MEM_ENABLE) + 4*index, add); | ||
611 | } | ||
612 | |||
613 | /** | ||
614 | * bnx2x_vlan_mac_set_cmd_hdr_e2 - set a header in a single classify ramrod | ||
615 | * | ||
616 | * @bp: device handle | ||
617 | * @o: queue for which we want to configure this rule | ||
618 | * @add: if true the command is an ADD command, DEL otherwise | ||
619 | * @opcode: CLASSIFY_RULE_OPCODE_XXX | ||
620 | * @hdr: pointer to a header to setup | ||
621 | * | ||
622 | */ | ||
623 | static inline void bnx2x_vlan_mac_set_cmd_hdr_e2(struct bnx2x *bp, | ||
624 | struct bnx2x_vlan_mac_obj *o, bool add, int opcode, | ||
625 | struct eth_classify_cmd_header *hdr) | ||
626 | { | ||
627 | struct bnx2x_raw_obj *raw = &o->raw; | ||
628 | |||
629 | hdr->client_id = raw->cl_id; | ||
630 | hdr->func_id = raw->func_id; | ||
631 | |||
632 | /* Rx or/and Tx (internal switching) configuration ? */ | ||
633 | hdr->cmd_general_data |= | ||
634 | bnx2x_vlan_mac_get_rx_tx_flag(o); | ||
635 | |||
636 | if (add) | ||
637 | hdr->cmd_general_data |= ETH_CLASSIFY_CMD_HEADER_IS_ADD; | ||
638 | |||
639 | hdr->cmd_general_data |= | ||
640 | (opcode << ETH_CLASSIFY_CMD_HEADER_OPCODE_SHIFT); | ||
641 | } | ||
642 | |||
643 | /** | ||
644 | * bnx2x_vlan_mac_set_rdata_hdr_e2 - set the classify ramrod data header | ||
645 | * | ||
646 | * @cid: connection id | ||
647 | * @type: BNX2X_FILTER_XXX_PENDING | ||
648 | * @hdr: poiter to header to setup | ||
649 | * @rule_cnt: | ||
650 | * | ||
651 | * currently we always configure one rule and echo field to contain a CID and an | ||
652 | * opcode type. | ||
653 | */ | ||
654 | static inline void bnx2x_vlan_mac_set_rdata_hdr_e2(u32 cid, int type, | ||
655 | struct eth_classify_header *hdr, int rule_cnt) | ||
656 | { | ||
657 | hdr->echo = (cid & BNX2X_SWCID_MASK) | (type << BNX2X_SWCID_SHIFT); | ||
658 | hdr->rule_cnt = (u8)rule_cnt; | ||
659 | } | ||
660 | |||
661 | |||
662 | /* hw_config() callbacks */ | ||
663 | static void bnx2x_set_one_mac_e2(struct bnx2x *bp, | ||
664 | struct bnx2x_vlan_mac_obj *o, | ||
665 | struct bnx2x_exeq_elem *elem, int rule_idx, | ||
666 | int cam_offset) | ||
667 | { | ||
668 | struct bnx2x_raw_obj *raw = &o->raw; | ||
669 | struct eth_classify_rules_ramrod_data *data = | ||
670 | (struct eth_classify_rules_ramrod_data *)(raw->rdata); | ||
671 | int rule_cnt = rule_idx + 1, cmd = elem->cmd_data.vlan_mac.cmd; | ||
672 | union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx]; | ||
673 | bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false; | ||
674 | unsigned long *vlan_mac_flags = &elem->cmd_data.vlan_mac.vlan_mac_flags; | ||
675 | u8 *mac = elem->cmd_data.vlan_mac.u.mac.mac; | ||
676 | |||
677 | /* | ||
678 | * Set LLH CAM entry: currently only iSCSI and ETH macs are | ||
679 | * relevant. In addition, current implementation is tuned for a | ||
680 | * single ETH MAC. | ||
681 | * | ||
682 | * When multiple unicast ETH MACs PF configuration in switch | ||
683 | * independent mode is required (NetQ, multiple netdev MACs, | ||
684 | * etc.), consider better utilisation of 8 per function MAC | ||
685 | * entries in the LLH register. There is also | ||
686 | * NIG_REG_P[01]_LLH_FUNC_MEM2 registers that complete the | ||
687 | * total number of CAM entries to 16. | ||
688 | * | ||
689 | * Currently we won't configure NIG for MACs other than a primary ETH | ||
690 | * MAC and iSCSI L2 MAC. | ||
691 | * | ||
692 | * If this MAC is moving from one Queue to another, no need to change | ||
693 | * NIG configuration. | ||
694 | */ | ||
695 | if (cmd != BNX2X_VLAN_MAC_MOVE) { | ||
696 | if (test_bit(BNX2X_ISCSI_ETH_MAC, vlan_mac_flags)) | ||
697 | bnx2x_set_mac_in_nig(bp, add, mac, | ||
698 | LLH_CAM_ISCSI_ETH_LINE); | ||
699 | else if (test_bit(BNX2X_ETH_MAC, vlan_mac_flags)) | ||
700 | bnx2x_set_mac_in_nig(bp, add, mac, LLH_CAM_ETH_LINE); | ||
701 | } | ||
702 | |||
703 | /* Reset the ramrod data buffer for the first rule */ | ||
704 | if (rule_idx == 0) | ||
705 | memset(data, 0, sizeof(*data)); | ||
706 | |||
707 | /* Setup a command header */ | ||
708 | bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_MAC, | ||
709 | &rule_entry->mac.header); | ||
710 | |||
711 | DP(BNX2X_MSG_SP, "About to %s MAC "BNX2X_MAC_FMT" for " | ||
712 | "Queue %d\n", (add ? "add" : "delete"), | ||
713 | BNX2X_MAC_PRN_LIST(mac), raw->cl_id); | ||
714 | |||
715 | /* Set a MAC itself */ | ||
716 | bnx2x_set_fw_mac_addr(&rule_entry->mac.mac_msb, | ||
717 | &rule_entry->mac.mac_mid, | ||
718 | &rule_entry->mac.mac_lsb, mac); | ||
719 | |||
720 | /* MOVE: Add a rule that will add this MAC to the target Queue */ | ||
721 | if (cmd == BNX2X_VLAN_MAC_MOVE) { | ||
722 | rule_entry++; | ||
723 | rule_cnt++; | ||
724 | |||
725 | /* Setup ramrod data */ | ||
726 | bnx2x_vlan_mac_set_cmd_hdr_e2(bp, | ||
727 | elem->cmd_data.vlan_mac.target_obj, | ||
728 | true, CLASSIFY_RULE_OPCODE_MAC, | ||
729 | &rule_entry->mac.header); | ||
730 | |||
731 | /* Set a MAC itself */ | ||
732 | bnx2x_set_fw_mac_addr(&rule_entry->mac.mac_msb, | ||
733 | &rule_entry->mac.mac_mid, | ||
734 | &rule_entry->mac.mac_lsb, mac); | ||
735 | } | ||
736 | |||
737 | /* Set the ramrod data header */ | ||
738 | /* TODO: take this to the higher level in order to prevent multiple | ||
739 | writing */ | ||
740 | bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header, | ||
741 | rule_cnt); | ||
742 | } | ||
743 | |||
744 | /** | ||
745 | * bnx2x_vlan_mac_set_rdata_hdr_e1x - set a header in a single classify ramrod | ||
746 | * | ||
747 | * @bp: device handle | ||
748 | * @o: queue | ||
749 | * @type: | ||
750 | * @cam_offset: offset in cam memory | ||
751 | * @hdr: pointer to a header to setup | ||
752 | * | ||
753 | * E1/E1H | ||
754 | */ | ||
755 | static inline void bnx2x_vlan_mac_set_rdata_hdr_e1x(struct bnx2x *bp, | ||
756 | struct bnx2x_vlan_mac_obj *o, int type, int cam_offset, | ||
757 | struct mac_configuration_hdr *hdr) | ||
758 | { | ||
759 | struct bnx2x_raw_obj *r = &o->raw; | ||
760 | |||
761 | hdr->length = 1; | ||
762 | hdr->offset = (u8)cam_offset; | ||
763 | hdr->client_id = 0xff; | ||
764 | hdr->echo = ((r->cid & BNX2X_SWCID_MASK) | (type << BNX2X_SWCID_SHIFT)); | ||
765 | } | ||
766 | |||
767 | static inline void bnx2x_vlan_mac_set_cfg_entry_e1x(struct bnx2x *bp, | ||
768 | struct bnx2x_vlan_mac_obj *o, bool add, int opcode, u8 *mac, | ||
769 | u16 vlan_id, struct mac_configuration_entry *cfg_entry) | ||
770 | { | ||
771 | struct bnx2x_raw_obj *r = &o->raw; | ||
772 | u32 cl_bit_vec = (1 << r->cl_id); | ||
773 | |||
774 | cfg_entry->clients_bit_vector = cpu_to_le32(cl_bit_vec); | ||
775 | cfg_entry->pf_id = r->func_id; | ||
776 | cfg_entry->vlan_id = cpu_to_le16(vlan_id); | ||
777 | |||
778 | if (add) { | ||
779 | SET_FLAG(cfg_entry->flags, MAC_CONFIGURATION_ENTRY_ACTION_TYPE, | ||
780 | T_ETH_MAC_COMMAND_SET); | ||
781 | SET_FLAG(cfg_entry->flags, | ||
782 | MAC_CONFIGURATION_ENTRY_VLAN_FILTERING_MODE, opcode); | ||
783 | |||
784 | /* Set a MAC in a ramrod data */ | ||
785 | bnx2x_set_fw_mac_addr(&cfg_entry->msb_mac_addr, | ||
786 | &cfg_entry->middle_mac_addr, | ||
787 | &cfg_entry->lsb_mac_addr, mac); | ||
788 | } else | ||
789 | SET_FLAG(cfg_entry->flags, MAC_CONFIGURATION_ENTRY_ACTION_TYPE, | ||
790 | T_ETH_MAC_COMMAND_INVALIDATE); | ||
791 | } | ||
792 | |||
793 | static inline void bnx2x_vlan_mac_set_rdata_e1x(struct bnx2x *bp, | ||
794 | struct bnx2x_vlan_mac_obj *o, int type, int cam_offset, bool add, | ||
795 | u8 *mac, u16 vlan_id, int opcode, struct mac_configuration_cmd *config) | ||
796 | { | ||
797 | struct mac_configuration_entry *cfg_entry = &config->config_table[0]; | ||
798 | struct bnx2x_raw_obj *raw = &o->raw; | ||
799 | |||
800 | bnx2x_vlan_mac_set_rdata_hdr_e1x(bp, o, type, cam_offset, | ||
801 | &config->hdr); | ||
802 | bnx2x_vlan_mac_set_cfg_entry_e1x(bp, o, add, opcode, mac, vlan_id, | ||
803 | cfg_entry); | ||
804 | |||
805 | DP(BNX2X_MSG_SP, "%s MAC "BNX2X_MAC_FMT" CLID %d CAM offset %d\n", | ||
806 | (add ? "setting" : "clearing"), | ||
807 | BNX2X_MAC_PRN_LIST(mac), raw->cl_id, cam_offset); | ||
808 | } | ||
809 | |||
810 | /** | ||
811 | * bnx2x_set_one_mac_e1x - fill a single MAC rule ramrod data | ||
812 | * | ||
813 | * @bp: device handle | ||
814 | * @o: bnx2x_vlan_mac_obj | ||
815 | * @elem: bnx2x_exeq_elem | ||
816 | * @rule_idx: rule_idx | ||
817 | * @cam_offset: cam_offset | ||
818 | */ | ||
819 | static void bnx2x_set_one_mac_e1x(struct bnx2x *bp, | ||
820 | struct bnx2x_vlan_mac_obj *o, | ||
821 | struct bnx2x_exeq_elem *elem, int rule_idx, | ||
822 | int cam_offset) | ||
823 | { | ||
824 | struct bnx2x_raw_obj *raw = &o->raw; | ||
825 | struct mac_configuration_cmd *config = | ||
826 | (struct mac_configuration_cmd *)(raw->rdata); | ||
827 | /* | ||
828 | * 57710 and 57711 do not support MOVE command, | ||
829 | * so it's either ADD or DEL | ||
830 | */ | ||
831 | bool add = (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ? | ||
832 | true : false; | ||
833 | |||
834 | /* Reset the ramrod data buffer */ | ||
835 | memset(config, 0, sizeof(*config)); | ||
836 | |||
837 | bnx2x_vlan_mac_set_rdata_e1x(bp, o, BNX2X_FILTER_MAC_PENDING, | ||
838 | cam_offset, add, | ||
839 | elem->cmd_data.vlan_mac.u.mac.mac, 0, | ||
840 | ETH_VLAN_FILTER_ANY_VLAN, config); | ||
841 | } | ||
842 | |||
843 | static void bnx2x_set_one_vlan_e2(struct bnx2x *bp, | ||
844 | struct bnx2x_vlan_mac_obj *o, | ||
845 | struct bnx2x_exeq_elem *elem, int rule_idx, | ||
846 | int cam_offset) | ||
847 | { | ||
848 | struct bnx2x_raw_obj *raw = &o->raw; | ||
849 | struct eth_classify_rules_ramrod_data *data = | ||
850 | (struct eth_classify_rules_ramrod_data *)(raw->rdata); | ||
851 | int rule_cnt = rule_idx + 1; | ||
852 | union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx]; | ||
853 | int cmd = elem->cmd_data.vlan_mac.cmd; | ||
854 | bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false; | ||
855 | u16 vlan = elem->cmd_data.vlan_mac.u.vlan.vlan; | ||
856 | |||
857 | /* Reset the ramrod data buffer for the first rule */ | ||
858 | if (rule_idx == 0) | ||
859 | memset(data, 0, sizeof(*data)); | ||
860 | |||
861 | /* Set a rule header */ | ||
862 | bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_VLAN, | ||
863 | &rule_entry->vlan.header); | ||
864 | |||
865 | DP(BNX2X_MSG_SP, "About to %s VLAN %d\n", (add ? "add" : "delete"), | ||
866 | vlan); | ||
867 | |||
868 | /* Set a VLAN itself */ | ||
869 | rule_entry->vlan.vlan = cpu_to_le16(vlan); | ||
870 | |||
871 | /* MOVE: Add a rule that will add this MAC to the target Queue */ | ||
872 | if (cmd == BNX2X_VLAN_MAC_MOVE) { | ||
873 | rule_entry++; | ||
874 | rule_cnt++; | ||
875 | |||
876 | /* Setup ramrod data */ | ||
877 | bnx2x_vlan_mac_set_cmd_hdr_e2(bp, | ||
878 | elem->cmd_data.vlan_mac.target_obj, | ||
879 | true, CLASSIFY_RULE_OPCODE_VLAN, | ||
880 | &rule_entry->vlan.header); | ||
881 | |||
882 | /* Set a VLAN itself */ | ||
883 | rule_entry->vlan.vlan = cpu_to_le16(vlan); | ||
884 | } | ||
885 | |||
886 | /* Set the ramrod data header */ | ||
887 | /* TODO: take this to the higher level in order to prevent multiple | ||
888 | writing */ | ||
889 | bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header, | ||
890 | rule_cnt); | ||
891 | } | ||
892 | |||
893 | static void bnx2x_set_one_vlan_mac_e2(struct bnx2x *bp, | ||
894 | struct bnx2x_vlan_mac_obj *o, | ||
895 | struct bnx2x_exeq_elem *elem, | ||
896 | int rule_idx, int cam_offset) | ||
897 | { | ||
898 | struct bnx2x_raw_obj *raw = &o->raw; | ||
899 | struct eth_classify_rules_ramrod_data *data = | ||
900 | (struct eth_classify_rules_ramrod_data *)(raw->rdata); | ||
901 | int rule_cnt = rule_idx + 1; | ||
902 | union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx]; | ||
903 | int cmd = elem->cmd_data.vlan_mac.cmd; | ||
904 | bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false; | ||
905 | u16 vlan = elem->cmd_data.vlan_mac.u.vlan_mac.vlan; | ||
906 | u8 *mac = elem->cmd_data.vlan_mac.u.vlan_mac.mac; | ||
907 | |||
908 | |||
909 | /* Reset the ramrod data buffer for the first rule */ | ||
910 | if (rule_idx == 0) | ||
911 | memset(data, 0, sizeof(*data)); | ||
912 | |||
913 | /* Set a rule header */ | ||
914 | bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_PAIR, | ||
915 | &rule_entry->pair.header); | ||
916 | |||
917 | /* Set VLAN and MAC themselvs */ | ||
918 | rule_entry->pair.vlan = cpu_to_le16(vlan); | ||
919 | bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb, | ||
920 | &rule_entry->pair.mac_mid, | ||
921 | &rule_entry->pair.mac_lsb, mac); | ||
922 | |||
923 | /* MOVE: Add a rule that will add this MAC to the target Queue */ | ||
924 | if (cmd == BNX2X_VLAN_MAC_MOVE) { | ||
925 | rule_entry++; | ||
926 | rule_cnt++; | ||
927 | |||
928 | /* Setup ramrod data */ | ||
929 | bnx2x_vlan_mac_set_cmd_hdr_e2(bp, | ||
930 | elem->cmd_data.vlan_mac.target_obj, | ||
931 | true, CLASSIFY_RULE_OPCODE_PAIR, | ||
932 | &rule_entry->pair.header); | ||
933 | |||
934 | /* Set a VLAN itself */ | ||
935 | rule_entry->pair.vlan = cpu_to_le16(vlan); | ||
936 | bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb, | ||
937 | &rule_entry->pair.mac_mid, | ||
938 | &rule_entry->pair.mac_lsb, mac); | ||
939 | } | ||
940 | |||
941 | /* Set the ramrod data header */ | ||
942 | /* TODO: take this to the higher level in order to prevent multiple | ||
943 | writing */ | ||
944 | bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header, | ||
945 | rule_cnt); | ||
946 | } | ||
947 | |||
948 | /** | ||
949 | * bnx2x_set_one_vlan_mac_e1h - | ||
950 | * | ||
951 | * @bp: device handle | ||
952 | * @o: bnx2x_vlan_mac_obj | ||
953 | * @elem: bnx2x_exeq_elem | ||
954 | * @rule_idx: rule_idx | ||
955 | * @cam_offset: cam_offset | ||
956 | */ | ||
957 | static void bnx2x_set_one_vlan_mac_e1h(struct bnx2x *bp, | ||
958 | struct bnx2x_vlan_mac_obj *o, | ||
959 | struct bnx2x_exeq_elem *elem, | ||
960 | int rule_idx, int cam_offset) | ||
961 | { | ||
962 | struct bnx2x_raw_obj *raw = &o->raw; | ||
963 | struct mac_configuration_cmd *config = | ||
964 | (struct mac_configuration_cmd *)(raw->rdata); | ||
965 | /* | ||
966 | * 57710 and 57711 do not support MOVE command, | ||
967 | * so it's either ADD or DEL | ||
968 | */ | ||
969 | bool add = (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ? | ||
970 | true : false; | ||
971 | |||
972 | /* Reset the ramrod data buffer */ | ||
973 | memset(config, 0, sizeof(*config)); | ||
974 | |||
975 | bnx2x_vlan_mac_set_rdata_e1x(bp, o, BNX2X_FILTER_VLAN_MAC_PENDING, | ||
976 | cam_offset, add, | ||
977 | elem->cmd_data.vlan_mac.u.vlan_mac.mac, | ||
978 | elem->cmd_data.vlan_mac.u.vlan_mac.vlan, | ||
979 | ETH_VLAN_FILTER_CLASSIFY, config); | ||
980 | } | ||
981 | |||
982 | #define list_next_entry(pos, member) \ | ||
983 | list_entry((pos)->member.next, typeof(*(pos)), member) | ||
984 | |||
985 | /** | ||
986 | * bnx2x_vlan_mac_restore - reconfigure next MAC/VLAN/VLAN-MAC element | ||
987 | * | ||
988 | * @bp: device handle | ||
989 | * @p: command parameters | ||
990 | * @ppos: pointer to the cooky | ||
991 | * | ||
992 | * reconfigure next MAC/VLAN/VLAN-MAC element from the | ||
993 | * previously configured elements list. | ||
994 | * | ||
995 | * from command parameters only RAMROD_COMP_WAIT bit in ramrod_flags is taken | ||
996 | * into an account | ||
997 | * | ||
998 | * pointer to the cooky - that should be given back in the next call to make | ||
999 | * function handle the next element. If *ppos is set to NULL it will restart the | ||
1000 | * iterator. If returned *ppos == NULL this means that the last element has been | ||
1001 | * handled. | ||
1002 | * | ||
1003 | */ | ||
1004 | static int bnx2x_vlan_mac_restore(struct bnx2x *bp, | ||
1005 | struct bnx2x_vlan_mac_ramrod_params *p, | ||
1006 | struct bnx2x_vlan_mac_registry_elem **ppos) | ||
1007 | { | ||
1008 | struct bnx2x_vlan_mac_registry_elem *pos; | ||
1009 | struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj; | ||
1010 | |||
1011 | /* If list is empty - there is nothing to do here */ | ||
1012 | if (list_empty(&o->head)) { | ||
1013 | *ppos = NULL; | ||
1014 | return 0; | ||
1015 | } | ||
1016 | |||
1017 | /* make a step... */ | ||
1018 | if (*ppos == NULL) | ||
1019 | *ppos = list_first_entry(&o->head, | ||
1020 | struct bnx2x_vlan_mac_registry_elem, | ||
1021 | link); | ||
1022 | else | ||
1023 | *ppos = list_next_entry(*ppos, link); | ||
1024 | |||
1025 | pos = *ppos; | ||
1026 | |||
1027 | /* If it's the last step - return NULL */ | ||
1028 | if (list_is_last(&pos->link, &o->head)) | ||
1029 | *ppos = NULL; | ||
1030 | |||
1031 | /* Prepare a 'user_req' */ | ||
1032 | memcpy(&p->user_req.u, &pos->u, sizeof(pos->u)); | ||
1033 | |||
1034 | /* Set the command */ | ||
1035 | p->user_req.cmd = BNX2X_VLAN_MAC_ADD; | ||
1036 | |||
1037 | /* Set vlan_mac_flags */ | ||
1038 | p->user_req.vlan_mac_flags = pos->vlan_mac_flags; | ||
1039 | |||
1040 | /* Set a restore bit */ | ||
1041 | __set_bit(RAMROD_RESTORE, &p->ramrod_flags); | ||
1042 | |||
1043 | return bnx2x_config_vlan_mac(bp, p); | ||
1044 | } | ||
1045 | |||
1046 | /* | ||
1047 | * bnx2x_exeq_get_mac/bnx2x_exeq_get_vlan/bnx2x_exeq_get_vlan_mac return a | ||
1048 | * pointer to an element with a specific criteria and NULL if such an element | ||
1049 | * hasn't been found. | ||
1050 | */ | ||
1051 | static struct bnx2x_exeq_elem *bnx2x_exeq_get_mac( | ||
1052 | struct bnx2x_exe_queue_obj *o, | ||
1053 | struct bnx2x_exeq_elem *elem) | ||
1054 | { | ||
1055 | struct bnx2x_exeq_elem *pos; | ||
1056 | struct bnx2x_mac_ramrod_data *data = &elem->cmd_data.vlan_mac.u.mac; | ||
1057 | |||
1058 | /* Check pending for execution commands */ | ||
1059 | list_for_each_entry(pos, &o->exe_queue, link) | ||
1060 | if (!memcmp(&pos->cmd_data.vlan_mac.u.mac, data, | ||
1061 | sizeof(*data)) && | ||
1062 | (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd)) | ||
1063 | return pos; | ||
1064 | |||
1065 | return NULL; | ||
1066 | } | ||
1067 | |||
1068 | static struct bnx2x_exeq_elem *bnx2x_exeq_get_vlan( | ||
1069 | struct bnx2x_exe_queue_obj *o, | ||
1070 | struct bnx2x_exeq_elem *elem) | ||
1071 | { | ||
1072 | struct bnx2x_exeq_elem *pos; | ||
1073 | struct bnx2x_vlan_ramrod_data *data = &elem->cmd_data.vlan_mac.u.vlan; | ||
1074 | |||
1075 | /* Check pending for execution commands */ | ||
1076 | list_for_each_entry(pos, &o->exe_queue, link) | ||
1077 | if (!memcmp(&pos->cmd_data.vlan_mac.u.vlan, data, | ||
1078 | sizeof(*data)) && | ||
1079 | (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd)) | ||
1080 | return pos; | ||
1081 | |||
1082 | return NULL; | ||
1083 | } | ||
1084 | |||
1085 | static struct bnx2x_exeq_elem *bnx2x_exeq_get_vlan_mac( | ||
1086 | struct bnx2x_exe_queue_obj *o, | ||
1087 | struct bnx2x_exeq_elem *elem) | ||
1088 | { | ||
1089 | struct bnx2x_exeq_elem *pos; | ||
1090 | struct bnx2x_vlan_mac_ramrod_data *data = | ||
1091 | &elem->cmd_data.vlan_mac.u.vlan_mac; | ||
1092 | |||
1093 | /* Check pending for execution commands */ | ||
1094 | list_for_each_entry(pos, &o->exe_queue, link) | ||
1095 | if (!memcmp(&pos->cmd_data.vlan_mac.u.vlan_mac, data, | ||
1096 | sizeof(*data)) && | ||
1097 | (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd)) | ||
1098 | return pos; | ||
1099 | |||
1100 | return NULL; | ||
1101 | } | ||
1102 | |||
1103 | /** | ||
1104 | * bnx2x_validate_vlan_mac_add - check if an ADD command can be executed | ||
1105 | * | ||
1106 | * @bp: device handle | ||
1107 | * @qo: bnx2x_qable_obj | ||
1108 | * @elem: bnx2x_exeq_elem | ||
1109 | * | ||
1110 | * Checks that the requested configuration can be added. If yes and if | ||
1111 | * requested, consume CAM credit. | ||
1112 | * | ||
1113 | * The 'validate' is run after the 'optimize'. | ||
1114 | * | ||
1115 | */ | ||
1116 | static inline int bnx2x_validate_vlan_mac_add(struct bnx2x *bp, | ||
1117 | union bnx2x_qable_obj *qo, | ||
1118 | struct bnx2x_exeq_elem *elem) | ||
1119 | { | ||
1120 | struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac; | ||
1121 | struct bnx2x_exe_queue_obj *exeq = &o->exe_queue; | ||
1122 | int rc; | ||
1123 | |||
1124 | /* Check the registry */ | ||
1125 | rc = o->check_add(o, &elem->cmd_data.vlan_mac.u); | ||
1126 | if (rc) { | ||
1127 | DP(BNX2X_MSG_SP, "ADD command is not allowed considering " | ||
1128 | "current registry state\n"); | ||
1129 | return rc; | ||
1130 | } | ||
1131 | |||
1132 | /* | ||
1133 | * Check if there is a pending ADD command for this | ||
1134 | * MAC/VLAN/VLAN-MAC. Return an error if there is. | ||
1135 | */ | ||
1136 | if (exeq->get(exeq, elem)) { | ||
1137 | DP(BNX2X_MSG_SP, "There is a pending ADD command already\n"); | ||
1138 | return -EEXIST; | ||
1139 | } | ||
1140 | |||
1141 | /* | ||
1142 | * TODO: Check the pending MOVE from other objects where this | ||
1143 | * object is a destination object. | ||
1144 | */ | ||
1145 | |||
1146 | /* Consume the credit if not requested not to */ | ||
1147 | if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT, | ||
1148 | &elem->cmd_data.vlan_mac.vlan_mac_flags) || | ||
1149 | o->get_credit(o))) | ||
1150 | return -EINVAL; | ||
1151 | |||
1152 | return 0; | ||
1153 | } | ||
1154 | |||
1155 | /** | ||
1156 | * bnx2x_validate_vlan_mac_del - check if the DEL command can be executed | ||
1157 | * | ||
1158 | * @bp: device handle | ||
1159 | * @qo: quable object to check | ||
1160 | * @elem: element that needs to be deleted | ||
1161 | * | ||
1162 | * Checks that the requested configuration can be deleted. If yes and if | ||
1163 | * requested, returns a CAM credit. | ||
1164 | * | ||
1165 | * The 'validate' is run after the 'optimize'. | ||
1166 | */ | ||
1167 | static inline int bnx2x_validate_vlan_mac_del(struct bnx2x *bp, | ||
1168 | union bnx2x_qable_obj *qo, | ||
1169 | struct bnx2x_exeq_elem *elem) | ||
1170 | { | ||
1171 | struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac; | ||
1172 | struct bnx2x_vlan_mac_registry_elem *pos; | ||
1173 | struct bnx2x_exe_queue_obj *exeq = &o->exe_queue; | ||
1174 | struct bnx2x_exeq_elem query_elem; | ||
1175 | |||
1176 | /* If this classification can not be deleted (doesn't exist) | ||
1177 | * - return a BNX2X_EXIST. | ||
1178 | */ | ||
1179 | pos = o->check_del(o, &elem->cmd_data.vlan_mac.u); | ||
1180 | if (!pos) { | ||
1181 | DP(BNX2X_MSG_SP, "DEL command is not allowed considering " | ||
1182 | "current registry state\n"); | ||
1183 | return -EEXIST; | ||
1184 | } | ||
1185 | |||
1186 | /* | ||
1187 | * Check if there are pending DEL or MOVE commands for this | ||
1188 | * MAC/VLAN/VLAN-MAC. Return an error if so. | ||
1189 | */ | ||
1190 | memcpy(&query_elem, elem, sizeof(query_elem)); | ||
1191 | |||
1192 | /* Check for MOVE commands */ | ||
1193 | query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_MOVE; | ||
1194 | if (exeq->get(exeq, &query_elem)) { | ||
1195 | BNX2X_ERR("There is a pending MOVE command already\n"); | ||
1196 | return -EINVAL; | ||
1197 | } | ||
1198 | |||
1199 | /* Check for DEL commands */ | ||
1200 | if (exeq->get(exeq, elem)) { | ||
1201 | DP(BNX2X_MSG_SP, "There is a pending DEL command already\n"); | ||
1202 | return -EEXIST; | ||
1203 | } | ||
1204 | |||
1205 | /* Return the credit to the credit pool if not requested not to */ | ||
1206 | if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT, | ||
1207 | &elem->cmd_data.vlan_mac.vlan_mac_flags) || | ||
1208 | o->put_credit(o))) { | ||
1209 | BNX2X_ERR("Failed to return a credit\n"); | ||
1210 | return -EINVAL; | ||
1211 | } | ||
1212 | |||
1213 | return 0; | ||
1214 | } | ||
1215 | |||
1216 | /** | ||
1217 | * bnx2x_validate_vlan_mac_move - check if the MOVE command can be executed | ||
1218 | * | ||
1219 | * @bp: device handle | ||
1220 | * @qo: quable object to check (source) | ||
1221 | * @elem: element that needs to be moved | ||
1222 | * | ||
1223 | * Checks that the requested configuration can be moved. If yes and if | ||
1224 | * requested, returns a CAM credit. | ||
1225 | * | ||
1226 | * The 'validate' is run after the 'optimize'. | ||
1227 | */ | ||
1228 | static inline int bnx2x_validate_vlan_mac_move(struct bnx2x *bp, | ||
1229 | union bnx2x_qable_obj *qo, | ||
1230 | struct bnx2x_exeq_elem *elem) | ||
1231 | { | ||
1232 | struct bnx2x_vlan_mac_obj *src_o = &qo->vlan_mac; | ||
1233 | struct bnx2x_vlan_mac_obj *dest_o = elem->cmd_data.vlan_mac.target_obj; | ||
1234 | struct bnx2x_exeq_elem query_elem; | ||
1235 | struct bnx2x_exe_queue_obj *src_exeq = &src_o->exe_queue; | ||
1236 | struct bnx2x_exe_queue_obj *dest_exeq = &dest_o->exe_queue; | ||
1237 | |||
1238 | /* | ||
1239 | * Check if we can perform this operation based on the current registry | ||
1240 | * state. | ||
1241 | */ | ||
1242 | if (!src_o->check_move(src_o, dest_o, &elem->cmd_data.vlan_mac.u)) { | ||
1243 | DP(BNX2X_MSG_SP, "MOVE command is not allowed considering " | ||
1244 | "current registry state\n"); | ||
1245 | return -EINVAL; | ||
1246 | } | ||
1247 | |||
1248 | /* | ||
1249 | * Check if there is an already pending DEL or MOVE command for the | ||
1250 | * source object or ADD command for a destination object. Return an | ||
1251 | * error if so. | ||
1252 | */ | ||
1253 | memcpy(&query_elem, elem, sizeof(query_elem)); | ||
1254 | |||
1255 | /* Check DEL on source */ | ||
1256 | query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_DEL; | ||
1257 | if (src_exeq->get(src_exeq, &query_elem)) { | ||
1258 | BNX2X_ERR("There is a pending DEL command on the source " | ||
1259 | "queue already\n"); | ||
1260 | return -EINVAL; | ||
1261 | } | ||
1262 | |||
1263 | /* Check MOVE on source */ | ||
1264 | if (src_exeq->get(src_exeq, elem)) { | ||
1265 | DP(BNX2X_MSG_SP, "There is a pending MOVE command already\n"); | ||
1266 | return -EEXIST; | ||
1267 | } | ||
1268 | |||
1269 | /* Check ADD on destination */ | ||
1270 | query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_ADD; | ||
1271 | if (dest_exeq->get(dest_exeq, &query_elem)) { | ||
1272 | BNX2X_ERR("There is a pending ADD command on the " | ||
1273 | "destination queue already\n"); | ||
1274 | return -EINVAL; | ||
1275 | } | ||
1276 | |||
1277 | /* Consume the credit if not requested not to */ | ||
1278 | if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT_DEST, | ||
1279 | &elem->cmd_data.vlan_mac.vlan_mac_flags) || | ||
1280 | dest_o->get_credit(dest_o))) | ||
1281 | return -EINVAL; | ||
1282 | |||
1283 | if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT, | ||
1284 | &elem->cmd_data.vlan_mac.vlan_mac_flags) || | ||
1285 | src_o->put_credit(src_o))) { | ||
1286 | /* return the credit taken from dest... */ | ||
1287 | dest_o->put_credit(dest_o); | ||
1288 | return -EINVAL; | ||
1289 | } | ||
1290 | |||
1291 | return 0; | ||
1292 | } | ||
1293 | |||
1294 | static int bnx2x_validate_vlan_mac(struct bnx2x *bp, | ||
1295 | union bnx2x_qable_obj *qo, | ||
1296 | struct bnx2x_exeq_elem *elem) | ||
1297 | { | ||
1298 | switch (elem->cmd_data.vlan_mac.cmd) { | ||
1299 | case BNX2X_VLAN_MAC_ADD: | ||
1300 | return bnx2x_validate_vlan_mac_add(bp, qo, elem); | ||
1301 | case BNX2X_VLAN_MAC_DEL: | ||
1302 | return bnx2x_validate_vlan_mac_del(bp, qo, elem); | ||
1303 | case BNX2X_VLAN_MAC_MOVE: | ||
1304 | return bnx2x_validate_vlan_mac_move(bp, qo, elem); | ||
1305 | default: | ||
1306 | return -EINVAL; | ||
1307 | } | ||
1308 | } | ||
1309 | |||
1310 | /** | ||
1311 | * bnx2x_wait_vlan_mac - passivly wait for 5 seconds until all work completes. | ||
1312 | * | ||
1313 | * @bp: device handle | ||
1314 | * @o: bnx2x_vlan_mac_obj | ||
1315 | * | ||
1316 | */ | ||
1317 | static int bnx2x_wait_vlan_mac(struct bnx2x *bp, | ||
1318 | struct bnx2x_vlan_mac_obj *o) | ||
1319 | { | ||
1320 | int cnt = 5000, rc; | ||
1321 | struct bnx2x_exe_queue_obj *exeq = &o->exe_queue; | ||
1322 | struct bnx2x_raw_obj *raw = &o->raw; | ||
1323 | |||
1324 | while (cnt--) { | ||
1325 | /* Wait for the current command to complete */ | ||
1326 | rc = raw->wait_comp(bp, raw); | ||
1327 | if (rc) | ||
1328 | return rc; | ||
1329 | |||
1330 | /* Wait until there are no pending commands */ | ||
1331 | if (!bnx2x_exe_queue_empty(exeq)) | ||
1332 | usleep_range(1000, 1000); | ||
1333 | else | ||
1334 | return 0; | ||
1335 | } | ||
1336 | |||
1337 | return -EBUSY; | ||
1338 | } | ||
1339 | |||
1340 | /** | ||
1341 | * bnx2x_complete_vlan_mac - complete one VLAN-MAC ramrod | ||
1342 | * | ||
1343 | * @bp: device handle | ||
1344 | * @o: bnx2x_vlan_mac_obj | ||
1345 | * @cqe: | ||
1346 | * @cont: if true schedule next execution chunk | ||
1347 | * | ||
1348 | */ | ||
1349 | static int bnx2x_complete_vlan_mac(struct bnx2x *bp, | ||
1350 | struct bnx2x_vlan_mac_obj *o, | ||
1351 | union event_ring_elem *cqe, | ||
1352 | unsigned long *ramrod_flags) | ||
1353 | { | ||
1354 | struct bnx2x_raw_obj *r = &o->raw; | ||
1355 | int rc; | ||
1356 | |||
1357 | /* Reset pending list */ | ||
1358 | bnx2x_exe_queue_reset_pending(bp, &o->exe_queue); | ||
1359 | |||
1360 | /* Clear pending */ | ||
1361 | r->clear_pending(r); | ||
1362 | |||
1363 | /* If ramrod failed this is most likely a SW bug */ | ||
1364 | if (cqe->message.error) | ||
1365 | return -EINVAL; | ||
1366 | |||
1367 | /* Run the next bulk of pending commands if requeted */ | ||
1368 | if (test_bit(RAMROD_CONT, ramrod_flags)) { | ||
1369 | rc = bnx2x_exe_queue_step(bp, &o->exe_queue, ramrod_flags); | ||
1370 | if (rc < 0) | ||
1371 | return rc; | ||
1372 | } | ||
1373 | |||
1374 | /* If there is more work to do return PENDING */ | ||
1375 | if (!bnx2x_exe_queue_empty(&o->exe_queue)) | ||
1376 | return 1; | ||
1377 | |||
1378 | return 0; | ||
1379 | } | ||
1380 | |||
1381 | /** | ||
1382 | * bnx2x_optimize_vlan_mac - optimize ADD and DEL commands. | ||
1383 | * | ||
1384 | * @bp: device handle | ||
1385 | * @o: bnx2x_qable_obj | ||
1386 | * @elem: bnx2x_exeq_elem | ||
1387 | */ | ||
1388 | static int bnx2x_optimize_vlan_mac(struct bnx2x *bp, | ||
1389 | union bnx2x_qable_obj *qo, | ||
1390 | struct bnx2x_exeq_elem *elem) | ||
1391 | { | ||
1392 | struct bnx2x_exeq_elem query, *pos; | ||
1393 | struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac; | ||
1394 | struct bnx2x_exe_queue_obj *exeq = &o->exe_queue; | ||
1395 | |||
1396 | memcpy(&query, elem, sizeof(query)); | ||
1397 | |||
1398 | switch (elem->cmd_data.vlan_mac.cmd) { | ||
1399 | case BNX2X_VLAN_MAC_ADD: | ||
1400 | query.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_DEL; | ||
1401 | break; | ||
1402 | case BNX2X_VLAN_MAC_DEL: | ||
1403 | query.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_ADD; | ||
1404 | break; | ||
1405 | default: | ||
1406 | /* Don't handle anything other than ADD or DEL */ | ||
1407 | return 0; | ||
1408 | } | ||
1409 | |||
1410 | /* If we found the appropriate element - delete it */ | ||
1411 | pos = exeq->get(exeq, &query); | ||
1412 | if (pos) { | ||
1413 | |||
1414 | /* Return the credit of the optimized command */ | ||
1415 | if (!test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT, | ||
1416 | &pos->cmd_data.vlan_mac.vlan_mac_flags)) { | ||
1417 | if ((query.cmd_data.vlan_mac.cmd == | ||
1418 | BNX2X_VLAN_MAC_ADD) && !o->put_credit(o)) { | ||
1419 | BNX2X_ERR("Failed to return the credit for the " | ||
1420 | "optimized ADD command\n"); | ||
1421 | return -EINVAL; | ||
1422 | } else if (!o->get_credit(o)) { /* VLAN_MAC_DEL */ | ||
1423 | BNX2X_ERR("Failed to recover the credit from " | ||
1424 | "the optimized DEL command\n"); | ||
1425 | return -EINVAL; | ||
1426 | } | ||
1427 | } | ||
1428 | |||
1429 | DP(BNX2X_MSG_SP, "Optimizing %s command\n", | ||
1430 | (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ? | ||
1431 | "ADD" : "DEL"); | ||
1432 | |||
1433 | list_del(&pos->link); | ||
1434 | bnx2x_exe_queue_free_elem(bp, pos); | ||
1435 | return 1; | ||
1436 | } | ||
1437 | |||
1438 | return 0; | ||
1439 | } | ||
1440 | |||
1441 | /** | ||
1442 | * bnx2x_vlan_mac_get_registry_elem - prepare a registry element | ||
1443 | * | ||
1444 | * @bp: device handle | ||
1445 | * @o: | ||
1446 | * @elem: | ||
1447 | * @restore: | ||
1448 | * @re: | ||
1449 | * | ||
1450 | * prepare a registry element according to the current command request. | ||
1451 | */ | ||
1452 | static inline int bnx2x_vlan_mac_get_registry_elem( | ||
1453 | struct bnx2x *bp, | ||
1454 | struct bnx2x_vlan_mac_obj *o, | ||
1455 | struct bnx2x_exeq_elem *elem, | ||
1456 | bool restore, | ||
1457 | struct bnx2x_vlan_mac_registry_elem **re) | ||
1458 | { | ||
1459 | int cmd = elem->cmd_data.vlan_mac.cmd; | ||
1460 | struct bnx2x_vlan_mac_registry_elem *reg_elem; | ||
1461 | |||
1462 | /* Allocate a new registry element if needed. */ | ||
1463 | if (!restore && | ||
1464 | ((cmd == BNX2X_VLAN_MAC_ADD) || (cmd == BNX2X_VLAN_MAC_MOVE))) { | ||
1465 | reg_elem = kzalloc(sizeof(*reg_elem), GFP_ATOMIC); | ||
1466 | if (!reg_elem) | ||
1467 | return -ENOMEM; | ||
1468 | |||
1469 | /* Get a new CAM offset */ | ||
1470 | if (!o->get_cam_offset(o, ®_elem->cam_offset)) { | ||
1471 | /* | ||
1472 | * This shell never happen, because we have checked the | ||
1473 | * CAM availiability in the 'validate'. | ||
1474 | */ | ||
1475 | WARN_ON(1); | ||
1476 | kfree(reg_elem); | ||
1477 | return -EINVAL; | ||
1478 | } | ||
1479 | |||
1480 | DP(BNX2X_MSG_SP, "Got cam offset %d\n", reg_elem->cam_offset); | ||
1481 | |||
1482 | /* Set a VLAN-MAC data */ | ||
1483 | memcpy(®_elem->u, &elem->cmd_data.vlan_mac.u, | ||
1484 | sizeof(reg_elem->u)); | ||
1485 | |||
1486 | /* Copy the flags (needed for DEL and RESTORE flows) */ | ||
1487 | reg_elem->vlan_mac_flags = | ||
1488 | elem->cmd_data.vlan_mac.vlan_mac_flags; | ||
1489 | } else /* DEL, RESTORE */ | ||
1490 | reg_elem = o->check_del(o, &elem->cmd_data.vlan_mac.u); | ||
1491 | |||
1492 | *re = reg_elem; | ||
1493 | return 0; | ||
1494 | } | ||
1495 | |||
1496 | /** | ||
1497 | * bnx2x_execute_vlan_mac - execute vlan mac command | ||
1498 | * | ||
1499 | * @bp: device handle | ||
1500 | * @qo: | ||
1501 | * @exe_chunk: | ||
1502 | * @ramrod_flags: | ||
1503 | * | ||
1504 | * go and send a ramrod! | ||
1505 | */ | ||
1506 | static int bnx2x_execute_vlan_mac(struct bnx2x *bp, | ||
1507 | union bnx2x_qable_obj *qo, | ||
1508 | struct list_head *exe_chunk, | ||
1509 | unsigned long *ramrod_flags) | ||
1510 | { | ||
1511 | struct bnx2x_exeq_elem *elem; | ||
1512 | struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac, *cam_obj; | ||
1513 | struct bnx2x_raw_obj *r = &o->raw; | ||
1514 | int rc, idx = 0; | ||
1515 | bool restore = test_bit(RAMROD_RESTORE, ramrod_flags); | ||
1516 | bool drv_only = test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags); | ||
1517 | struct bnx2x_vlan_mac_registry_elem *reg_elem; | ||
1518 | int cmd; | ||
1519 | |||
1520 | /* | ||
1521 | * If DRIVER_ONLY execution is requested, cleanup a registry | ||
1522 | * and exit. Otherwise send a ramrod to FW. | ||
1523 | */ | ||
1524 | if (!drv_only) { | ||
1525 | WARN_ON(r->check_pending(r)); | ||
1526 | |||
1527 | /* Set pending */ | ||
1528 | r->set_pending(r); | ||
1529 | |||
1530 | /* Fill tha ramrod data */ | ||
1531 | list_for_each_entry(elem, exe_chunk, link) { | ||
1532 | cmd = elem->cmd_data.vlan_mac.cmd; | ||
1533 | /* | ||
1534 | * We will add to the target object in MOVE command, so | ||
1535 | * change the object for a CAM search. | ||
1536 | */ | ||
1537 | if (cmd == BNX2X_VLAN_MAC_MOVE) | ||
1538 | cam_obj = elem->cmd_data.vlan_mac.target_obj; | ||
1539 | else | ||
1540 | cam_obj = o; | ||
1541 | |||
1542 | rc = bnx2x_vlan_mac_get_registry_elem(bp, cam_obj, | ||
1543 | elem, restore, | ||
1544 | ®_elem); | ||
1545 | if (rc) | ||
1546 | goto error_exit; | ||
1547 | |||
1548 | WARN_ON(!reg_elem); | ||
1549 | |||
1550 | /* Push a new entry into the registry */ | ||
1551 | if (!restore && | ||
1552 | ((cmd == BNX2X_VLAN_MAC_ADD) || | ||
1553 | (cmd == BNX2X_VLAN_MAC_MOVE))) | ||
1554 | list_add(®_elem->link, &cam_obj->head); | ||
1555 | |||
1556 | /* Configure a single command in a ramrod data buffer */ | ||
1557 | o->set_one_rule(bp, o, elem, idx, | ||
1558 | reg_elem->cam_offset); | ||
1559 | |||
1560 | /* MOVE command consumes 2 entries in the ramrod data */ | ||
1561 | if (cmd == BNX2X_VLAN_MAC_MOVE) | ||
1562 | idx += 2; | ||
1563 | else | ||
1564 | idx++; | ||
1565 | } | ||
1566 | |||
1567 | /* Commit the data writes towards the memory */ | ||
1568 | mb(); | ||
1569 | |||
1570 | rc = bnx2x_sp_post(bp, o->ramrod_cmd, r->cid, | ||
1571 | U64_HI(r->rdata_mapping), | ||
1572 | U64_LO(r->rdata_mapping), | ||
1573 | ETH_CONNECTION_TYPE); | ||
1574 | if (rc) | ||
1575 | goto error_exit; | ||
1576 | } | ||
1577 | |||
1578 | /* Now, when we are done with the ramrod - clean up the registry */ | ||
1579 | list_for_each_entry(elem, exe_chunk, link) { | ||
1580 | cmd = elem->cmd_data.vlan_mac.cmd; | ||
1581 | if ((cmd == BNX2X_VLAN_MAC_DEL) || | ||
1582 | (cmd == BNX2X_VLAN_MAC_MOVE)) { | ||
1583 | reg_elem = o->check_del(o, &elem->cmd_data.vlan_mac.u); | ||
1584 | |||
1585 | WARN_ON(!reg_elem); | ||
1586 | |||
1587 | o->put_cam_offset(o, reg_elem->cam_offset); | ||
1588 | list_del(®_elem->link); | ||
1589 | kfree(reg_elem); | ||
1590 | } | ||
1591 | } | ||
1592 | |||
1593 | if (!drv_only) | ||
1594 | return 1; | ||
1595 | else | ||
1596 | return 0; | ||
1597 | |||
1598 | error_exit: | ||
1599 | r->clear_pending(r); | ||
1600 | |||
1601 | /* Cleanup a registry in case of a failure */ | ||
1602 | list_for_each_entry(elem, exe_chunk, link) { | ||
1603 | cmd = elem->cmd_data.vlan_mac.cmd; | ||
1604 | |||
1605 | if (cmd == BNX2X_VLAN_MAC_MOVE) | ||
1606 | cam_obj = elem->cmd_data.vlan_mac.target_obj; | ||
1607 | else | ||
1608 | cam_obj = o; | ||
1609 | |||
1610 | /* Delete all newly added above entries */ | ||
1611 | if (!restore && | ||
1612 | ((cmd == BNX2X_VLAN_MAC_ADD) || | ||
1613 | (cmd == BNX2X_VLAN_MAC_MOVE))) { | ||
1614 | reg_elem = o->check_del(cam_obj, | ||
1615 | &elem->cmd_data.vlan_mac.u); | ||
1616 | if (reg_elem) { | ||
1617 | list_del(®_elem->link); | ||
1618 | kfree(reg_elem); | ||
1619 | } | ||
1620 | } | ||
1621 | } | ||
1622 | |||
1623 | return rc; | ||
1624 | } | ||
1625 | |||
1626 | static inline int bnx2x_vlan_mac_push_new_cmd( | ||
1627 | struct bnx2x *bp, | ||
1628 | struct bnx2x_vlan_mac_ramrod_params *p) | ||
1629 | { | ||
1630 | struct bnx2x_exeq_elem *elem; | ||
1631 | struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj; | ||
1632 | bool restore = test_bit(RAMROD_RESTORE, &p->ramrod_flags); | ||
1633 | |||
1634 | /* Allocate the execution queue element */ | ||
1635 | elem = bnx2x_exe_queue_alloc_elem(bp); | ||
1636 | if (!elem) | ||
1637 | return -ENOMEM; | ||
1638 | |||
1639 | /* Set the command 'length' */ | ||
1640 | switch (p->user_req.cmd) { | ||
1641 | case BNX2X_VLAN_MAC_MOVE: | ||
1642 | elem->cmd_len = 2; | ||
1643 | break; | ||
1644 | default: | ||
1645 | elem->cmd_len = 1; | ||
1646 | } | ||
1647 | |||
1648 | /* Fill the object specific info */ | ||
1649 | memcpy(&elem->cmd_data.vlan_mac, &p->user_req, sizeof(p->user_req)); | ||
1650 | |||
1651 | /* Try to add a new command to the pending list */ | ||
1652 | return bnx2x_exe_queue_add(bp, &o->exe_queue, elem, restore); | ||
1653 | } | ||
1654 | |||
1655 | /** | ||
1656 | * bnx2x_config_vlan_mac - configure VLAN/MAC/VLAN_MAC filtering rules. | ||
1657 | * | ||
1658 | * @bp: device handle | ||
1659 | * @p: | ||
1660 | * | ||
1661 | */ | ||
1662 | int bnx2x_config_vlan_mac( | ||
1663 | struct bnx2x *bp, | ||
1664 | struct bnx2x_vlan_mac_ramrod_params *p) | ||
1665 | { | ||
1666 | int rc = 0; | ||
1667 | struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj; | ||
1668 | unsigned long *ramrod_flags = &p->ramrod_flags; | ||
1669 | bool cont = test_bit(RAMROD_CONT, ramrod_flags); | ||
1670 | struct bnx2x_raw_obj *raw = &o->raw; | ||
1671 | |||
1672 | /* | ||
1673 | * Add new elements to the execution list for commands that require it. | ||
1674 | */ | ||
1675 | if (!cont) { | ||
1676 | rc = bnx2x_vlan_mac_push_new_cmd(bp, p); | ||
1677 | if (rc) | ||
1678 | return rc; | ||
1679 | } | ||
1680 | |||
1681 | /* | ||
1682 | * If nothing will be executed further in this iteration we want to | ||
1683 | * return PENDING if there are pending commands | ||
1684 | */ | ||
1685 | if (!bnx2x_exe_queue_empty(&o->exe_queue)) | ||
1686 | rc = 1; | ||
1687 | |||
1688 | /* Execute commands if required */ | ||
1689 | if (cont || test_bit(RAMROD_EXEC, ramrod_flags) || | ||
1690 | test_bit(RAMROD_COMP_WAIT, ramrod_flags)) { | ||
1691 | rc = bnx2x_exe_queue_step(bp, &o->exe_queue, ramrod_flags); | ||
1692 | if (rc < 0) | ||
1693 | return rc; | ||
1694 | } | ||
1695 | |||
1696 | /* | ||
1697 | * RAMROD_COMP_WAIT is a superset of RAMROD_EXEC. If it was set | ||
1698 | * then user want to wait until the last command is done. | ||
1699 | */ | ||
1700 | if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags)) { | ||
1701 | /* | ||
1702 | * Wait maximum for the current exe_queue length iterations plus | ||
1703 | * one (for the current pending command). | ||
1704 | */ | ||
1705 | int max_iterations = bnx2x_exe_queue_length(&o->exe_queue) + 1; | ||
1706 | |||
1707 | while (!bnx2x_exe_queue_empty(&o->exe_queue) && | ||
1708 | max_iterations--) { | ||
1709 | |||
1710 | /* Wait for the current command to complete */ | ||
1711 | rc = raw->wait_comp(bp, raw); | ||
1712 | if (rc) | ||
1713 | return rc; | ||
1714 | |||
1715 | /* Make a next step */ | ||
1716 | rc = bnx2x_exe_queue_step(bp, &o->exe_queue, | ||
1717 | ramrod_flags); | ||
1718 | if (rc < 0) | ||
1719 | return rc; | ||
1720 | } | ||
1721 | |||
1722 | return 0; | ||
1723 | } | ||
1724 | |||
1725 | return rc; | ||
1726 | } | ||
1727 | |||
1728 | |||
1729 | |||
1730 | /** | ||
1731 | * bnx2x_vlan_mac_del_all - delete elements with given vlan_mac_flags spec | ||
1732 | * | ||
1733 | * @bp: device handle | ||
1734 | * @o: | ||
1735 | * @vlan_mac_flags: | ||
1736 | * @ramrod_flags: execution flags to be used for this deletion | ||
1737 | * | ||
1738 | * if the last operation has completed successfully and there are no | ||
1739 | * moreelements left, positive value if the last operation has completed | ||
1740 | * successfully and there are more previously configured elements, negative | ||
1741 | * value is current operation has failed. | ||
1742 | */ | ||
1743 | static int bnx2x_vlan_mac_del_all(struct bnx2x *bp, | ||
1744 | struct bnx2x_vlan_mac_obj *o, | ||
1745 | unsigned long *vlan_mac_flags, | ||
1746 | unsigned long *ramrod_flags) | ||
1747 | { | ||
1748 | struct bnx2x_vlan_mac_registry_elem *pos = NULL; | ||
1749 | int rc = 0; | ||
1750 | struct bnx2x_vlan_mac_ramrod_params p; | ||
1751 | struct bnx2x_exe_queue_obj *exeq = &o->exe_queue; | ||
1752 | struct bnx2x_exeq_elem *exeq_pos, *exeq_pos_n; | ||
1753 | |||
1754 | /* Clear pending commands first */ | ||
1755 | |||
1756 | spin_lock_bh(&exeq->lock); | ||
1757 | |||
1758 | list_for_each_entry_safe(exeq_pos, exeq_pos_n, &exeq->exe_queue, link) { | ||
1759 | if (exeq_pos->cmd_data.vlan_mac.vlan_mac_flags == | ||
1760 | *vlan_mac_flags) | ||
1761 | list_del(&exeq_pos->link); | ||
1762 | } | ||
1763 | |||
1764 | spin_unlock_bh(&exeq->lock); | ||
1765 | |||
1766 | /* Prepare a command request */ | ||
1767 | memset(&p, 0, sizeof(p)); | ||
1768 | p.vlan_mac_obj = o; | ||
1769 | p.ramrod_flags = *ramrod_flags; | ||
1770 | p.user_req.cmd = BNX2X_VLAN_MAC_DEL; | ||
1771 | |||
1772 | /* | ||
1773 | * Add all but the last VLAN-MAC to the execution queue without actually | ||
1774 | * execution anything. | ||
1775 | */ | ||
1776 | __clear_bit(RAMROD_COMP_WAIT, &p.ramrod_flags); | ||
1777 | __clear_bit(RAMROD_EXEC, &p.ramrod_flags); | ||
1778 | __clear_bit(RAMROD_CONT, &p.ramrod_flags); | ||
1779 | |||
1780 | list_for_each_entry(pos, &o->head, link) { | ||
1781 | if (pos->vlan_mac_flags == *vlan_mac_flags) { | ||
1782 | p.user_req.vlan_mac_flags = pos->vlan_mac_flags; | ||
1783 | memcpy(&p.user_req.u, &pos->u, sizeof(pos->u)); | ||
1784 | rc = bnx2x_config_vlan_mac(bp, &p); | ||
1785 | if (rc < 0) { | ||
1786 | BNX2X_ERR("Failed to add a new DEL command\n"); | ||
1787 | return rc; | ||
1788 | } | ||
1789 | } | ||
1790 | } | ||
1791 | |||
1792 | p.ramrod_flags = *ramrod_flags; | ||
1793 | __set_bit(RAMROD_CONT, &p.ramrod_flags); | ||
1794 | |||
1795 | return bnx2x_config_vlan_mac(bp, &p); | ||
1796 | } | ||
1797 | |||
1798 | static inline void bnx2x_init_raw_obj(struct bnx2x_raw_obj *raw, u8 cl_id, | ||
1799 | u32 cid, u8 func_id, void *rdata, dma_addr_t rdata_mapping, int state, | ||
1800 | unsigned long *pstate, bnx2x_obj_type type) | ||
1801 | { | ||
1802 | raw->func_id = func_id; | ||
1803 | raw->cid = cid; | ||
1804 | raw->cl_id = cl_id; | ||
1805 | raw->rdata = rdata; | ||
1806 | raw->rdata_mapping = rdata_mapping; | ||
1807 | raw->state = state; | ||
1808 | raw->pstate = pstate; | ||
1809 | raw->obj_type = type; | ||
1810 | raw->check_pending = bnx2x_raw_check_pending; | ||
1811 | raw->clear_pending = bnx2x_raw_clear_pending; | ||
1812 | raw->set_pending = bnx2x_raw_set_pending; | ||
1813 | raw->wait_comp = bnx2x_raw_wait; | ||
1814 | } | ||
1815 | |||
1816 | static inline void bnx2x_init_vlan_mac_common(struct bnx2x_vlan_mac_obj *o, | ||
1817 | u8 cl_id, u32 cid, u8 func_id, void *rdata, dma_addr_t rdata_mapping, | ||
1818 | int state, unsigned long *pstate, bnx2x_obj_type type, | ||
1819 | struct bnx2x_credit_pool_obj *macs_pool, | ||
1820 | struct bnx2x_credit_pool_obj *vlans_pool) | ||
1821 | { | ||
1822 | INIT_LIST_HEAD(&o->head); | ||
1823 | |||
1824 | o->macs_pool = macs_pool; | ||
1825 | o->vlans_pool = vlans_pool; | ||
1826 | |||
1827 | o->delete_all = bnx2x_vlan_mac_del_all; | ||
1828 | o->restore = bnx2x_vlan_mac_restore; | ||
1829 | o->complete = bnx2x_complete_vlan_mac; | ||
1830 | o->wait = bnx2x_wait_vlan_mac; | ||
1831 | |||
1832 | bnx2x_init_raw_obj(&o->raw, cl_id, cid, func_id, rdata, rdata_mapping, | ||
1833 | state, pstate, type); | ||
1834 | } | ||
1835 | |||
1836 | |||
1837 | void bnx2x_init_mac_obj(struct bnx2x *bp, | ||
1838 | struct bnx2x_vlan_mac_obj *mac_obj, | ||
1839 | u8 cl_id, u32 cid, u8 func_id, void *rdata, | ||
1840 | dma_addr_t rdata_mapping, int state, | ||
1841 | unsigned long *pstate, bnx2x_obj_type type, | ||
1842 | struct bnx2x_credit_pool_obj *macs_pool) | ||
1843 | { | ||
1844 | union bnx2x_qable_obj *qable_obj = (union bnx2x_qable_obj *)mac_obj; | ||
1845 | |||
1846 | bnx2x_init_vlan_mac_common(mac_obj, cl_id, cid, func_id, rdata, | ||
1847 | rdata_mapping, state, pstate, type, | ||
1848 | macs_pool, NULL); | ||
1849 | |||
1850 | /* CAM credit pool handling */ | ||
1851 | mac_obj->get_credit = bnx2x_get_credit_mac; | ||
1852 | mac_obj->put_credit = bnx2x_put_credit_mac; | ||
1853 | mac_obj->get_cam_offset = bnx2x_get_cam_offset_mac; | ||
1854 | mac_obj->put_cam_offset = bnx2x_put_cam_offset_mac; | ||
1855 | |||
1856 | if (CHIP_IS_E1x(bp)) { | ||
1857 | mac_obj->set_one_rule = bnx2x_set_one_mac_e1x; | ||
1858 | mac_obj->check_del = bnx2x_check_mac_del; | ||
1859 | mac_obj->check_add = bnx2x_check_mac_add; | ||
1860 | mac_obj->check_move = bnx2x_check_move_always_err; | ||
1861 | mac_obj->ramrod_cmd = RAMROD_CMD_ID_ETH_SET_MAC; | ||
1862 | |||
1863 | /* Exe Queue */ | ||
1864 | bnx2x_exe_queue_init(bp, | ||
1865 | &mac_obj->exe_queue, 1, qable_obj, | ||
1866 | bnx2x_validate_vlan_mac, | ||
1867 | bnx2x_optimize_vlan_mac, | ||
1868 | bnx2x_execute_vlan_mac, | ||
1869 | bnx2x_exeq_get_mac); | ||
1870 | } else { | ||
1871 | mac_obj->set_one_rule = bnx2x_set_one_mac_e2; | ||
1872 | mac_obj->check_del = bnx2x_check_mac_del; | ||
1873 | mac_obj->check_add = bnx2x_check_mac_add; | ||
1874 | mac_obj->check_move = bnx2x_check_move; | ||
1875 | mac_obj->ramrod_cmd = | ||
1876 | RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES; | ||
1877 | |||
1878 | /* Exe Queue */ | ||
1879 | bnx2x_exe_queue_init(bp, | ||
1880 | &mac_obj->exe_queue, CLASSIFY_RULES_COUNT, | ||
1881 | qable_obj, bnx2x_validate_vlan_mac, | ||
1882 | bnx2x_optimize_vlan_mac, | ||
1883 | bnx2x_execute_vlan_mac, | ||
1884 | bnx2x_exeq_get_mac); | ||
1885 | } | ||
1886 | } | ||
1887 | |||
1888 | void bnx2x_init_vlan_obj(struct bnx2x *bp, | ||
1889 | struct bnx2x_vlan_mac_obj *vlan_obj, | ||
1890 | u8 cl_id, u32 cid, u8 func_id, void *rdata, | ||
1891 | dma_addr_t rdata_mapping, int state, | ||
1892 | unsigned long *pstate, bnx2x_obj_type type, | ||
1893 | struct bnx2x_credit_pool_obj *vlans_pool) | ||
1894 | { | ||
1895 | union bnx2x_qable_obj *qable_obj = (union bnx2x_qable_obj *)vlan_obj; | ||
1896 | |||
1897 | bnx2x_init_vlan_mac_common(vlan_obj, cl_id, cid, func_id, rdata, | ||
1898 | rdata_mapping, state, pstate, type, NULL, | ||
1899 | vlans_pool); | ||
1900 | |||
1901 | vlan_obj->get_credit = bnx2x_get_credit_vlan; | ||
1902 | vlan_obj->put_credit = bnx2x_put_credit_vlan; | ||
1903 | vlan_obj->get_cam_offset = bnx2x_get_cam_offset_vlan; | ||
1904 | vlan_obj->put_cam_offset = bnx2x_put_cam_offset_vlan; | ||
1905 | |||
1906 | if (CHIP_IS_E1x(bp)) { | ||
1907 | BNX2X_ERR("Do not support chips others than E2 and newer\n"); | ||
1908 | BUG(); | ||
1909 | } else { | ||
1910 | vlan_obj->set_one_rule = bnx2x_set_one_vlan_e2; | ||
1911 | vlan_obj->check_del = bnx2x_check_vlan_del; | ||
1912 | vlan_obj->check_add = bnx2x_check_vlan_add; | ||
1913 | vlan_obj->check_move = bnx2x_check_move; | ||
1914 | vlan_obj->ramrod_cmd = | ||
1915 | RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES; | ||
1916 | |||
1917 | /* Exe Queue */ | ||
1918 | bnx2x_exe_queue_init(bp, | ||
1919 | &vlan_obj->exe_queue, CLASSIFY_RULES_COUNT, | ||
1920 | qable_obj, bnx2x_validate_vlan_mac, | ||
1921 | bnx2x_optimize_vlan_mac, | ||
1922 | bnx2x_execute_vlan_mac, | ||
1923 | bnx2x_exeq_get_vlan); | ||
1924 | } | ||
1925 | } | ||
1926 | |||
1927 | void bnx2x_init_vlan_mac_obj(struct bnx2x *bp, | ||
1928 | struct bnx2x_vlan_mac_obj *vlan_mac_obj, | ||
1929 | u8 cl_id, u32 cid, u8 func_id, void *rdata, | ||
1930 | dma_addr_t rdata_mapping, int state, | ||
1931 | unsigned long *pstate, bnx2x_obj_type type, | ||
1932 | struct bnx2x_credit_pool_obj *macs_pool, | ||
1933 | struct bnx2x_credit_pool_obj *vlans_pool) | ||
1934 | { | ||
1935 | union bnx2x_qable_obj *qable_obj = | ||
1936 | (union bnx2x_qable_obj *)vlan_mac_obj; | ||
1937 | |||
1938 | bnx2x_init_vlan_mac_common(vlan_mac_obj, cl_id, cid, func_id, rdata, | ||
1939 | rdata_mapping, state, pstate, type, | ||
1940 | macs_pool, vlans_pool); | ||
1941 | |||
1942 | /* CAM pool handling */ | ||
1943 | vlan_mac_obj->get_credit = bnx2x_get_credit_vlan_mac; | ||
1944 | vlan_mac_obj->put_credit = bnx2x_put_credit_vlan_mac; | ||
1945 | /* | ||
1946 | * CAM offset is relevant for 57710 and 57711 chips only which have a | ||
1947 | * single CAM for both MACs and VLAN-MAC pairs. So the offset | ||
1948 | * will be taken from MACs' pool object only. | ||
1949 | */ | ||
1950 | vlan_mac_obj->get_cam_offset = bnx2x_get_cam_offset_mac; | ||
1951 | vlan_mac_obj->put_cam_offset = bnx2x_put_cam_offset_mac; | ||
1952 | |||
1953 | if (CHIP_IS_E1(bp)) { | ||
1954 | BNX2X_ERR("Do not support chips others than E2\n"); | ||
1955 | BUG(); | ||
1956 | } else if (CHIP_IS_E1H(bp)) { | ||
1957 | vlan_mac_obj->set_one_rule = bnx2x_set_one_vlan_mac_e1h; | ||
1958 | vlan_mac_obj->check_del = bnx2x_check_vlan_mac_del; | ||
1959 | vlan_mac_obj->check_add = bnx2x_check_vlan_mac_add; | ||
1960 | vlan_mac_obj->check_move = bnx2x_check_move_always_err; | ||
1961 | vlan_mac_obj->ramrod_cmd = RAMROD_CMD_ID_ETH_SET_MAC; | ||
1962 | |||
1963 | /* Exe Queue */ | ||
1964 | bnx2x_exe_queue_init(bp, | ||
1965 | &vlan_mac_obj->exe_queue, 1, qable_obj, | ||
1966 | bnx2x_validate_vlan_mac, | ||
1967 | bnx2x_optimize_vlan_mac, | ||
1968 | bnx2x_execute_vlan_mac, | ||
1969 | bnx2x_exeq_get_vlan_mac); | ||
1970 | } else { | ||
1971 | vlan_mac_obj->set_one_rule = bnx2x_set_one_vlan_mac_e2; | ||
1972 | vlan_mac_obj->check_del = bnx2x_check_vlan_mac_del; | ||
1973 | vlan_mac_obj->check_add = bnx2x_check_vlan_mac_add; | ||
1974 | vlan_mac_obj->check_move = bnx2x_check_move; | ||
1975 | vlan_mac_obj->ramrod_cmd = | ||
1976 | RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES; | ||
1977 | |||
1978 | /* Exe Queue */ | ||
1979 | bnx2x_exe_queue_init(bp, | ||
1980 | &vlan_mac_obj->exe_queue, | ||
1981 | CLASSIFY_RULES_COUNT, | ||
1982 | qable_obj, bnx2x_validate_vlan_mac, | ||
1983 | bnx2x_optimize_vlan_mac, | ||
1984 | bnx2x_execute_vlan_mac, | ||
1985 | bnx2x_exeq_get_vlan_mac); | ||
1986 | } | ||
1987 | |||
1988 | } | ||
1989 | |||
1990 | /* RX_MODE verbs: DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */ | ||
1991 | static inline void __storm_memset_mac_filters(struct bnx2x *bp, | ||
1992 | struct tstorm_eth_mac_filter_config *mac_filters, | ||
1993 | u16 pf_id) | ||
1994 | { | ||
1995 | size_t size = sizeof(struct tstorm_eth_mac_filter_config); | ||
1996 | |||
1997 | u32 addr = BAR_TSTRORM_INTMEM + | ||
1998 | TSTORM_MAC_FILTER_CONFIG_OFFSET(pf_id); | ||
1999 | |||
2000 | __storm_memset_struct(bp, addr, size, (u32 *)mac_filters); | ||
2001 | } | ||
2002 | |||
2003 | static int bnx2x_set_rx_mode_e1x(struct bnx2x *bp, | ||
2004 | struct bnx2x_rx_mode_ramrod_params *p) | ||
2005 | { | ||
2006 | /* update the bp MAC filter structure */ | ||
2007 | u32 mask = (1 << p->cl_id); | ||
2008 | |||
2009 | struct tstorm_eth_mac_filter_config *mac_filters = | ||
2010 | (struct tstorm_eth_mac_filter_config *)p->rdata; | ||
2011 | |||
2012 | /* initial seeting is drop-all */ | ||
2013 | u8 drop_all_ucast = 1, drop_all_mcast = 1; | ||
2014 | u8 accp_all_ucast = 0, accp_all_bcast = 0, accp_all_mcast = 0; | ||
2015 | u8 unmatched_unicast = 0; | ||
2016 | |||
2017 | /* In e1x there we only take into account rx acceot flag since tx switching | ||
2018 | * isn't enabled. */ | ||
2019 | if (test_bit(BNX2X_ACCEPT_UNICAST, &p->rx_accept_flags)) | ||
2020 | /* accept matched ucast */ | ||
2021 | drop_all_ucast = 0; | ||
2022 | |||
2023 | if (test_bit(BNX2X_ACCEPT_MULTICAST, &p->rx_accept_flags)) | ||
2024 | /* accept matched mcast */ | ||
2025 | drop_all_mcast = 0; | ||
2026 | |||
2027 | if (test_bit(BNX2X_ACCEPT_ALL_UNICAST, &p->rx_accept_flags)) { | ||
2028 | /* accept all mcast */ | ||
2029 | drop_all_ucast = 0; | ||
2030 | accp_all_ucast = 1; | ||
2031 | } | ||
2032 | if (test_bit(BNX2X_ACCEPT_ALL_MULTICAST, &p->rx_accept_flags)) { | ||
2033 | /* accept all mcast */ | ||
2034 | drop_all_mcast = 0; | ||
2035 | accp_all_mcast = 1; | ||
2036 | } | ||
2037 | if (test_bit(BNX2X_ACCEPT_BROADCAST, &p->rx_accept_flags)) | ||
2038 | /* accept (all) bcast */ | ||
2039 | accp_all_bcast = 1; | ||
2040 | if (test_bit(BNX2X_ACCEPT_UNMATCHED, &p->rx_accept_flags)) | ||
2041 | /* accept unmatched unicasts */ | ||
2042 | unmatched_unicast = 1; | ||
2043 | |||
2044 | mac_filters->ucast_drop_all = drop_all_ucast ? | ||
2045 | mac_filters->ucast_drop_all | mask : | ||
2046 | mac_filters->ucast_drop_all & ~mask; | ||
2047 | |||
2048 | mac_filters->mcast_drop_all = drop_all_mcast ? | ||
2049 | mac_filters->mcast_drop_all | mask : | ||
2050 | mac_filters->mcast_drop_all & ~mask; | ||
2051 | |||
2052 | mac_filters->ucast_accept_all = accp_all_ucast ? | ||
2053 | mac_filters->ucast_accept_all | mask : | ||
2054 | mac_filters->ucast_accept_all & ~mask; | ||
2055 | |||
2056 | mac_filters->mcast_accept_all = accp_all_mcast ? | ||
2057 | mac_filters->mcast_accept_all | mask : | ||
2058 | mac_filters->mcast_accept_all & ~mask; | ||
2059 | |||
2060 | mac_filters->bcast_accept_all = accp_all_bcast ? | ||
2061 | mac_filters->bcast_accept_all | mask : | ||
2062 | mac_filters->bcast_accept_all & ~mask; | ||
2063 | |||
2064 | mac_filters->unmatched_unicast = unmatched_unicast ? | ||
2065 | mac_filters->unmatched_unicast | mask : | ||
2066 | mac_filters->unmatched_unicast & ~mask; | ||
2067 | |||
2068 | DP(BNX2X_MSG_SP, "drop_ucast 0x%x\ndrop_mcast 0x%x\n accp_ucast 0x%x\n" | ||
2069 | "accp_mcast 0x%x\naccp_bcast 0x%x\n", | ||
2070 | mac_filters->ucast_drop_all, | ||
2071 | mac_filters->mcast_drop_all, | ||
2072 | mac_filters->ucast_accept_all, | ||
2073 | mac_filters->mcast_accept_all, | ||
2074 | mac_filters->bcast_accept_all); | ||
2075 | |||
2076 | /* write the MAC filter structure*/ | ||
2077 | __storm_memset_mac_filters(bp, mac_filters, p->func_id); | ||
2078 | |||
2079 | /* The operation is completed */ | ||
2080 | clear_bit(p->state, p->pstate); | ||
2081 | smp_mb__after_clear_bit(); | ||
2082 | |||
2083 | return 0; | ||
2084 | } | ||
2085 | |||
2086 | /* Setup ramrod data */ | ||
2087 | static inline void bnx2x_rx_mode_set_rdata_hdr_e2(u32 cid, | ||
2088 | struct eth_classify_header *hdr, | ||
2089 | u8 rule_cnt) | ||
2090 | { | ||
2091 | hdr->echo = cid; | ||
2092 | hdr->rule_cnt = rule_cnt; | ||
2093 | } | ||
2094 | |||
2095 | static inline void bnx2x_rx_mode_set_cmd_state_e2(struct bnx2x *bp, | ||
2096 | unsigned long accept_flags, | ||
2097 | struct eth_filter_rules_cmd *cmd, | ||
2098 | bool clear_accept_all) | ||
2099 | { | ||
2100 | u16 state; | ||
2101 | |||
2102 | /* start with 'drop-all' */ | ||
2103 | state = ETH_FILTER_RULES_CMD_UCAST_DROP_ALL | | ||
2104 | ETH_FILTER_RULES_CMD_MCAST_DROP_ALL; | ||
2105 | |||
2106 | if (accept_flags) { | ||
2107 | if (test_bit(BNX2X_ACCEPT_UNICAST, &accept_flags)) | ||
2108 | state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL; | ||
2109 | |||
2110 | if (test_bit(BNX2X_ACCEPT_MULTICAST, &accept_flags)) | ||
2111 | state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL; | ||
2112 | |||
2113 | if (test_bit(BNX2X_ACCEPT_ALL_UNICAST, &accept_flags)) { | ||
2114 | state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL; | ||
2115 | state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL; | ||
2116 | } | ||
2117 | |||
2118 | if (test_bit(BNX2X_ACCEPT_ALL_MULTICAST, &accept_flags)) { | ||
2119 | state |= ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL; | ||
2120 | state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL; | ||
2121 | } | ||
2122 | if (test_bit(BNX2X_ACCEPT_BROADCAST, &accept_flags)) | ||
2123 | state |= ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL; | ||
2124 | |||
2125 | if (test_bit(BNX2X_ACCEPT_UNMATCHED, &accept_flags)) { | ||
2126 | state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL; | ||
2127 | state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED; | ||
2128 | } | ||
2129 | if (test_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags)) | ||
2130 | state |= ETH_FILTER_RULES_CMD_ACCEPT_ANY_VLAN; | ||
2131 | } | ||
2132 | |||
2133 | /* Clear ACCEPT_ALL_XXX flags for FCoE L2 Queue */ | ||
2134 | if (clear_accept_all) { | ||
2135 | state &= ~ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL; | ||
2136 | state &= ~ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL; | ||
2137 | state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL; | ||
2138 | state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED; | ||
2139 | } | ||
2140 | |||
2141 | cmd->state = cpu_to_le16(state); | ||
2142 | |||
2143 | } | ||
2144 | |||
2145 | static int bnx2x_set_rx_mode_e2(struct bnx2x *bp, | ||
2146 | struct bnx2x_rx_mode_ramrod_params *p) | ||
2147 | { | ||
2148 | struct eth_filter_rules_ramrod_data *data = p->rdata; | ||
2149 | int rc; | ||
2150 | u8 rule_idx = 0; | ||
2151 | |||
2152 | /* Reset the ramrod data buffer */ | ||
2153 | memset(data, 0, sizeof(*data)); | ||
2154 | |||
2155 | /* Setup ramrod data */ | ||
2156 | |||
2157 | /* Tx (internal switching) */ | ||
2158 | if (test_bit(RAMROD_TX, &p->ramrod_flags)) { | ||
2159 | data->rules[rule_idx].client_id = p->cl_id; | ||
2160 | data->rules[rule_idx].func_id = p->func_id; | ||
2161 | |||
2162 | data->rules[rule_idx].cmd_general_data = | ||
2163 | ETH_FILTER_RULES_CMD_TX_CMD; | ||
2164 | |||
2165 | bnx2x_rx_mode_set_cmd_state_e2(bp, p->tx_accept_flags, | ||
2166 | &(data->rules[rule_idx++]), false); | ||
2167 | } | ||
2168 | |||
2169 | /* Rx */ | ||
2170 | if (test_bit(RAMROD_RX, &p->ramrod_flags)) { | ||
2171 | data->rules[rule_idx].client_id = p->cl_id; | ||
2172 | data->rules[rule_idx].func_id = p->func_id; | ||
2173 | |||
2174 | data->rules[rule_idx].cmd_general_data = | ||
2175 | ETH_FILTER_RULES_CMD_RX_CMD; | ||
2176 | |||
2177 | bnx2x_rx_mode_set_cmd_state_e2(bp, p->rx_accept_flags, | ||
2178 | &(data->rules[rule_idx++]), false); | ||
2179 | } | ||
2180 | |||
2181 | |||
2182 | /* | ||
2183 | * If FCoE Queue configuration has been requested configure the Rx and | ||
2184 | * internal switching modes for this queue in separate rules. | ||
2185 | * | ||
2186 | * FCoE queue shell never be set to ACCEPT_ALL packets of any sort: | ||
2187 | * MCAST_ALL, UCAST_ALL, BCAST_ALL and UNMATCHED. | ||
2188 | */ | ||
2189 | if (test_bit(BNX2X_RX_MODE_FCOE_ETH, &p->rx_mode_flags)) { | ||
2190 | /* Tx (internal switching) */ | ||
2191 | if (test_bit(RAMROD_TX, &p->ramrod_flags)) { | ||
2192 | data->rules[rule_idx].client_id = bnx2x_fcoe(bp, cl_id); | ||
2193 | data->rules[rule_idx].func_id = p->func_id; | ||
2194 | |||
2195 | data->rules[rule_idx].cmd_general_data = | ||
2196 | ETH_FILTER_RULES_CMD_TX_CMD; | ||
2197 | |||
2198 | bnx2x_rx_mode_set_cmd_state_e2(bp, p->tx_accept_flags, | ||
2199 | &(data->rules[rule_idx++]), | ||
2200 | true); | ||
2201 | } | ||
2202 | |||
2203 | /* Rx */ | ||
2204 | if (test_bit(RAMROD_RX, &p->ramrod_flags)) { | ||
2205 | data->rules[rule_idx].client_id = bnx2x_fcoe(bp, cl_id); | ||
2206 | data->rules[rule_idx].func_id = p->func_id; | ||
2207 | |||
2208 | data->rules[rule_idx].cmd_general_data = | ||
2209 | ETH_FILTER_RULES_CMD_RX_CMD; | ||
2210 | |||
2211 | bnx2x_rx_mode_set_cmd_state_e2(bp, p->rx_accept_flags, | ||
2212 | &(data->rules[rule_idx++]), | ||
2213 | true); | ||
2214 | } | ||
2215 | } | ||
2216 | |||
2217 | /* | ||
2218 | * Set the ramrod header (most importantly - number of rules to | ||
2219 | * configure). | ||
2220 | */ | ||
2221 | bnx2x_rx_mode_set_rdata_hdr_e2(p->cid, &data->header, rule_idx); | ||
2222 | |||
2223 | DP(BNX2X_MSG_SP, "About to configure %d rules, rx_accept_flags 0x%lx, " | ||
2224 | "tx_accept_flags 0x%lx\n", | ||
2225 | data->header.rule_cnt, p->rx_accept_flags, | ||
2226 | p->tx_accept_flags); | ||
2227 | |||
2228 | /* Commit writes towards the memory before sending a ramrod */ | ||
2229 | mb(); | ||
2230 | |||
2231 | /* Send a ramrod */ | ||
2232 | rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_FILTER_RULES, p->cid, | ||
2233 | U64_HI(p->rdata_mapping), | ||
2234 | U64_LO(p->rdata_mapping), | ||
2235 | ETH_CONNECTION_TYPE); | ||
2236 | if (rc) | ||
2237 | return rc; | ||
2238 | |||
2239 | /* Ramrod completion is pending */ | ||
2240 | return 1; | ||
2241 | } | ||
2242 | |||
2243 | static int bnx2x_wait_rx_mode_comp_e2(struct bnx2x *bp, | ||
2244 | struct bnx2x_rx_mode_ramrod_params *p) | ||
2245 | { | ||
2246 | return bnx2x_state_wait(bp, p->state, p->pstate); | ||
2247 | } | ||
2248 | |||
2249 | static int bnx2x_empty_rx_mode_wait(struct bnx2x *bp, | ||
2250 | struct bnx2x_rx_mode_ramrod_params *p) | ||
2251 | { | ||
2252 | /* Do nothing */ | ||
2253 | return 0; | ||
2254 | } | ||
2255 | |||
2256 | int bnx2x_config_rx_mode(struct bnx2x *bp, | ||
2257 | struct bnx2x_rx_mode_ramrod_params *p) | ||
2258 | { | ||
2259 | int rc; | ||
2260 | |||
2261 | /* Configure the new classification in the chip */ | ||
2262 | rc = p->rx_mode_obj->config_rx_mode(bp, p); | ||
2263 | if (rc < 0) | ||
2264 | return rc; | ||
2265 | |||
2266 | /* Wait for a ramrod completion if was requested */ | ||
2267 | if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags)) { | ||
2268 | rc = p->rx_mode_obj->wait_comp(bp, p); | ||
2269 | if (rc) | ||
2270 | return rc; | ||
2271 | } | ||
2272 | |||
2273 | return rc; | ||
2274 | } | ||
2275 | |||
2276 | void bnx2x_init_rx_mode_obj(struct bnx2x *bp, | ||
2277 | struct bnx2x_rx_mode_obj *o) | ||
2278 | { | ||
2279 | if (CHIP_IS_E1x(bp)) { | ||
2280 | o->wait_comp = bnx2x_empty_rx_mode_wait; | ||
2281 | o->config_rx_mode = bnx2x_set_rx_mode_e1x; | ||
2282 | } else { | ||
2283 | o->wait_comp = bnx2x_wait_rx_mode_comp_e2; | ||
2284 | o->config_rx_mode = bnx2x_set_rx_mode_e2; | ||
2285 | } | ||
2286 | } | ||
2287 | |||
2288 | /********************* Multicast verbs: SET, CLEAR ****************************/ | ||
2289 | static inline u8 bnx2x_mcast_bin_from_mac(u8 *mac) | ||
2290 | { | ||
2291 | return (crc32c_le(0, mac, ETH_ALEN) >> 24) & 0xff; | ||
2292 | } | ||
2293 | |||
2294 | struct bnx2x_mcast_mac_elem { | ||
2295 | struct list_head link; | ||
2296 | u8 mac[ETH_ALEN]; | ||
2297 | u8 pad[2]; /* For a natural alignment of the following buffer */ | ||
2298 | }; | ||
2299 | |||
2300 | struct bnx2x_pending_mcast_cmd { | ||
2301 | struct list_head link; | ||
2302 | int type; /* BNX2X_MCAST_CMD_X */ | ||
2303 | union { | ||
2304 | struct list_head macs_head; | ||
2305 | u32 macs_num; /* Needed for DEL command */ | ||
2306 | int next_bin; /* Needed for RESTORE flow with aprox match */ | ||
2307 | } data; | ||
2308 | |||
2309 | bool done; /* set to true, when the command has been handled, | ||
2310 | * practically used in 57712 handling only, where one pending | ||
2311 | * command may be handled in a few operations. As long as for | ||
2312 | * other chips every operation handling is completed in a | ||
2313 | * single ramrod, there is no need to utilize this field. | ||
2314 | */ | ||
2315 | }; | ||
2316 | |||
2317 | static int bnx2x_mcast_wait(struct bnx2x *bp, | ||
2318 | struct bnx2x_mcast_obj *o) | ||
2319 | { | ||
2320 | if (bnx2x_state_wait(bp, o->sched_state, o->raw.pstate) || | ||
2321 | o->raw.wait_comp(bp, &o->raw)) | ||
2322 | return -EBUSY; | ||
2323 | |||
2324 | return 0; | ||
2325 | } | ||
2326 | |||
2327 | static int bnx2x_mcast_enqueue_cmd(struct bnx2x *bp, | ||
2328 | struct bnx2x_mcast_obj *o, | ||
2329 | struct bnx2x_mcast_ramrod_params *p, | ||
2330 | int cmd) | ||
2331 | { | ||
2332 | int total_sz; | ||
2333 | struct bnx2x_pending_mcast_cmd *new_cmd; | ||
2334 | struct bnx2x_mcast_mac_elem *cur_mac = NULL; | ||
2335 | struct bnx2x_mcast_list_elem *pos; | ||
2336 | int macs_list_len = ((cmd == BNX2X_MCAST_CMD_ADD) ? | ||
2337 | p->mcast_list_len : 0); | ||
2338 | |||
2339 | /* If the command is empty ("handle pending commands only"), break */ | ||
2340 | if (!p->mcast_list_len) | ||
2341 | return 0; | ||
2342 | |||
2343 | total_sz = sizeof(*new_cmd) + | ||
2344 | macs_list_len * sizeof(struct bnx2x_mcast_mac_elem); | ||
2345 | |||
2346 | /* Add mcast is called under spin_lock, thus calling with GFP_ATOMIC */ | ||
2347 | new_cmd = kzalloc(total_sz, GFP_ATOMIC); | ||
2348 | |||
2349 | if (!new_cmd) | ||
2350 | return -ENOMEM; | ||
2351 | |||
2352 | DP(BNX2X_MSG_SP, "About to enqueue a new %d command. " | ||
2353 | "macs_list_len=%d\n", cmd, macs_list_len); | ||
2354 | |||
2355 | INIT_LIST_HEAD(&new_cmd->data.macs_head); | ||
2356 | |||
2357 | new_cmd->type = cmd; | ||
2358 | new_cmd->done = false; | ||
2359 | |||
2360 | switch (cmd) { | ||
2361 | case BNX2X_MCAST_CMD_ADD: | ||
2362 | cur_mac = (struct bnx2x_mcast_mac_elem *) | ||
2363 | ((u8 *)new_cmd + sizeof(*new_cmd)); | ||
2364 | |||
2365 | /* Push the MACs of the current command into the pendig command | ||
2366 | * MACs list: FIFO | ||
2367 | */ | ||
2368 | list_for_each_entry(pos, &p->mcast_list, link) { | ||
2369 | memcpy(cur_mac->mac, pos->mac, ETH_ALEN); | ||
2370 | list_add_tail(&cur_mac->link, &new_cmd->data.macs_head); | ||
2371 | cur_mac++; | ||
2372 | } | ||
2373 | |||
2374 | break; | ||
2375 | |||
2376 | case BNX2X_MCAST_CMD_DEL: | ||
2377 | new_cmd->data.macs_num = p->mcast_list_len; | ||
2378 | break; | ||
2379 | |||
2380 | case BNX2X_MCAST_CMD_RESTORE: | ||
2381 | new_cmd->data.next_bin = 0; | ||
2382 | break; | ||
2383 | |||
2384 | default: | ||
2385 | BNX2X_ERR("Unknown command: %d\n", cmd); | ||
2386 | return -EINVAL; | ||
2387 | } | ||
2388 | |||
2389 | /* Push the new pending command to the tail of the pending list: FIFO */ | ||
2390 | list_add_tail(&new_cmd->link, &o->pending_cmds_head); | ||
2391 | |||
2392 | o->set_sched(o); | ||
2393 | |||
2394 | return 1; | ||
2395 | } | ||
2396 | |||
2397 | /** | ||
2398 | * bnx2x_mcast_get_next_bin - get the next set bin (index) | ||
2399 | * | ||
2400 | * @o: | ||
2401 | * @last: index to start looking from (including) | ||
2402 | * | ||
2403 | * returns the next found (set) bin or a negative value if none is found. | ||
2404 | */ | ||
2405 | static inline int bnx2x_mcast_get_next_bin(struct bnx2x_mcast_obj *o, int last) | ||
2406 | { | ||
2407 | int i, j, inner_start = last % BIT_VEC64_ELEM_SZ; | ||
2408 | |||
2409 | for (i = last / BIT_VEC64_ELEM_SZ; i < BNX2X_MCAST_VEC_SZ; i++) { | ||
2410 | if (o->registry.aprox_match.vec[i]) | ||
2411 | for (j = inner_start; j < BIT_VEC64_ELEM_SZ; j++) { | ||
2412 | int cur_bit = j + BIT_VEC64_ELEM_SZ * i; | ||
2413 | if (BIT_VEC64_TEST_BIT(o->registry.aprox_match. | ||
2414 | vec, cur_bit)) { | ||
2415 | return cur_bit; | ||
2416 | } | ||
2417 | } | ||
2418 | inner_start = 0; | ||
2419 | } | ||
2420 | |||
2421 | /* None found */ | ||
2422 | return -1; | ||
2423 | } | ||
2424 | |||
2425 | /** | ||
2426 | * bnx2x_mcast_clear_first_bin - find the first set bin and clear it | ||
2427 | * | ||
2428 | * @o: | ||
2429 | * | ||
2430 | * returns the index of the found bin or -1 if none is found | ||
2431 | */ | ||
2432 | static inline int bnx2x_mcast_clear_first_bin(struct bnx2x_mcast_obj *o) | ||
2433 | { | ||
2434 | int cur_bit = bnx2x_mcast_get_next_bin(o, 0); | ||
2435 | |||
2436 | if (cur_bit >= 0) | ||
2437 | BIT_VEC64_CLEAR_BIT(o->registry.aprox_match.vec, cur_bit); | ||
2438 | |||
2439 | return cur_bit; | ||
2440 | } | ||
2441 | |||
2442 | static inline u8 bnx2x_mcast_get_rx_tx_flag(struct bnx2x_mcast_obj *o) | ||
2443 | { | ||
2444 | struct bnx2x_raw_obj *raw = &o->raw; | ||
2445 | u8 rx_tx_flag = 0; | ||
2446 | |||
2447 | if ((raw->obj_type == BNX2X_OBJ_TYPE_TX) || | ||
2448 | (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX)) | ||
2449 | rx_tx_flag |= ETH_MULTICAST_RULES_CMD_TX_CMD; | ||
2450 | |||
2451 | if ((raw->obj_type == BNX2X_OBJ_TYPE_RX) || | ||
2452 | (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX)) | ||
2453 | rx_tx_flag |= ETH_MULTICAST_RULES_CMD_RX_CMD; | ||
2454 | |||
2455 | return rx_tx_flag; | ||
2456 | } | ||
2457 | |||
2458 | static void bnx2x_mcast_set_one_rule_e2(struct bnx2x *bp, | ||
2459 | struct bnx2x_mcast_obj *o, int idx, | ||
2460 | union bnx2x_mcast_config_data *cfg_data, | ||
2461 | int cmd) | ||
2462 | { | ||
2463 | struct bnx2x_raw_obj *r = &o->raw; | ||
2464 | struct eth_multicast_rules_ramrod_data *data = | ||
2465 | (struct eth_multicast_rules_ramrod_data *)(r->rdata); | ||
2466 | u8 func_id = r->func_id; | ||
2467 | u8 rx_tx_add_flag = bnx2x_mcast_get_rx_tx_flag(o); | ||
2468 | int bin; | ||
2469 | |||
2470 | if ((cmd == BNX2X_MCAST_CMD_ADD) || (cmd == BNX2X_MCAST_CMD_RESTORE)) | ||
2471 | rx_tx_add_flag |= ETH_MULTICAST_RULES_CMD_IS_ADD; | ||
2472 | |||
2473 | data->rules[idx].cmd_general_data |= rx_tx_add_flag; | ||
2474 | |||
2475 | /* Get a bin and update a bins' vector */ | ||
2476 | switch (cmd) { | ||
2477 | case BNX2X_MCAST_CMD_ADD: | ||
2478 | bin = bnx2x_mcast_bin_from_mac(cfg_data->mac); | ||
2479 | BIT_VEC64_SET_BIT(o->registry.aprox_match.vec, bin); | ||
2480 | break; | ||
2481 | |||
2482 | case BNX2X_MCAST_CMD_DEL: | ||
2483 | /* If there were no more bins to clear | ||
2484 | * (bnx2x_mcast_clear_first_bin() returns -1) then we would | ||
2485 | * clear any (0xff) bin. | ||
2486 | * See bnx2x_mcast_validate_e2() for explanation when it may | ||
2487 | * happen. | ||
2488 | */ | ||
2489 | bin = bnx2x_mcast_clear_first_bin(o); | ||
2490 | break; | ||
2491 | |||
2492 | case BNX2X_MCAST_CMD_RESTORE: | ||
2493 | bin = cfg_data->bin; | ||
2494 | break; | ||
2495 | |||
2496 | default: | ||
2497 | BNX2X_ERR("Unknown command: %d\n", cmd); | ||
2498 | return; | ||
2499 | } | ||
2500 | |||
2501 | DP(BNX2X_MSG_SP, "%s bin %d\n", | ||
2502 | ((rx_tx_add_flag & ETH_MULTICAST_RULES_CMD_IS_ADD) ? | ||
2503 | "Setting" : "Clearing"), bin); | ||
2504 | |||
2505 | data->rules[idx].bin_id = (u8)bin; | ||
2506 | data->rules[idx].func_id = func_id; | ||
2507 | data->rules[idx].engine_id = o->engine_id; | ||
2508 | } | ||
2509 | |||
2510 | /** | ||
2511 | * bnx2x_mcast_handle_restore_cmd_e2 - restore configuration from the registry | ||
2512 | * | ||
2513 | * @bp: device handle | ||
2514 | * @o: | ||
2515 | * @start_bin: index in the registry to start from (including) | ||
2516 | * @rdata_idx: index in the ramrod data to start from | ||
2517 | * | ||
2518 | * returns last handled bin index or -1 if all bins have been handled | ||
2519 | */ | ||
2520 | static inline int bnx2x_mcast_handle_restore_cmd_e2( | ||
2521 | struct bnx2x *bp, struct bnx2x_mcast_obj *o , int start_bin, | ||
2522 | int *rdata_idx) | ||
2523 | { | ||
2524 | int cur_bin, cnt = *rdata_idx; | ||
2525 | union bnx2x_mcast_config_data cfg_data = {0}; | ||
2526 | |||
2527 | /* go through the registry and configure the bins from it */ | ||
2528 | for (cur_bin = bnx2x_mcast_get_next_bin(o, start_bin); cur_bin >= 0; | ||
2529 | cur_bin = bnx2x_mcast_get_next_bin(o, cur_bin + 1)) { | ||
2530 | |||
2531 | cfg_data.bin = (u8)cur_bin; | ||
2532 | o->set_one_rule(bp, o, cnt, &cfg_data, | ||
2533 | BNX2X_MCAST_CMD_RESTORE); | ||
2534 | |||
2535 | cnt++; | ||
2536 | |||
2537 | DP(BNX2X_MSG_SP, "About to configure a bin %d\n", cur_bin); | ||
2538 | |||
2539 | /* Break if we reached the maximum number | ||
2540 | * of rules. | ||
2541 | */ | ||
2542 | if (cnt >= o->max_cmd_len) | ||
2543 | break; | ||
2544 | } | ||
2545 | |||
2546 | *rdata_idx = cnt; | ||
2547 | |||
2548 | return cur_bin; | ||
2549 | } | ||
2550 | |||
2551 | static inline void bnx2x_mcast_hdl_pending_add_e2(struct bnx2x *bp, | ||
2552 | struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos, | ||
2553 | int *line_idx) | ||
2554 | { | ||
2555 | struct bnx2x_mcast_mac_elem *pmac_pos, *pmac_pos_n; | ||
2556 | int cnt = *line_idx; | ||
2557 | union bnx2x_mcast_config_data cfg_data = {0}; | ||
2558 | |||
2559 | list_for_each_entry_safe(pmac_pos, pmac_pos_n, &cmd_pos->data.macs_head, | ||
2560 | link) { | ||
2561 | |||
2562 | cfg_data.mac = &pmac_pos->mac[0]; | ||
2563 | o->set_one_rule(bp, o, cnt, &cfg_data, cmd_pos->type); | ||
2564 | |||
2565 | cnt++; | ||
2566 | |||
2567 | DP(BNX2X_MSG_SP, "About to configure "BNX2X_MAC_FMT | ||
2568 | " mcast MAC\n", | ||
2569 | BNX2X_MAC_PRN_LIST(pmac_pos->mac)); | ||
2570 | |||
2571 | list_del(&pmac_pos->link); | ||
2572 | |||
2573 | /* Break if we reached the maximum number | ||
2574 | * of rules. | ||
2575 | */ | ||
2576 | if (cnt >= o->max_cmd_len) | ||
2577 | break; | ||
2578 | } | ||
2579 | |||
2580 | *line_idx = cnt; | ||
2581 | |||
2582 | /* if no more MACs to configure - we are done */ | ||
2583 | if (list_empty(&cmd_pos->data.macs_head)) | ||
2584 | cmd_pos->done = true; | ||
2585 | } | ||
2586 | |||
2587 | static inline void bnx2x_mcast_hdl_pending_del_e2(struct bnx2x *bp, | ||
2588 | struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos, | ||
2589 | int *line_idx) | ||
2590 | { | ||
2591 | int cnt = *line_idx; | ||
2592 | |||
2593 | while (cmd_pos->data.macs_num) { | ||
2594 | o->set_one_rule(bp, o, cnt, NULL, cmd_pos->type); | ||
2595 | |||
2596 | cnt++; | ||
2597 | |||
2598 | cmd_pos->data.macs_num--; | ||
2599 | |||
2600 | DP(BNX2X_MSG_SP, "Deleting MAC. %d left,cnt is %d\n", | ||
2601 | cmd_pos->data.macs_num, cnt); | ||
2602 | |||
2603 | /* Break if we reached the maximum | ||
2604 | * number of rules. | ||
2605 | */ | ||
2606 | if (cnt >= o->max_cmd_len) | ||
2607 | break; | ||
2608 | } | ||
2609 | |||
2610 | *line_idx = cnt; | ||
2611 | |||
2612 | /* If we cleared all bins - we are done */ | ||
2613 | if (!cmd_pos->data.macs_num) | ||
2614 | cmd_pos->done = true; | ||
2615 | } | ||
2616 | |||
2617 | static inline void bnx2x_mcast_hdl_pending_restore_e2(struct bnx2x *bp, | ||
2618 | struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos, | ||
2619 | int *line_idx) | ||
2620 | { | ||
2621 | cmd_pos->data.next_bin = o->hdl_restore(bp, o, cmd_pos->data.next_bin, | ||
2622 | line_idx); | ||
2623 | |||
2624 | if (cmd_pos->data.next_bin < 0) | ||
2625 | /* If o->set_restore returned -1 we are done */ | ||
2626 | cmd_pos->done = true; | ||
2627 | else | ||
2628 | /* Start from the next bin next time */ | ||
2629 | cmd_pos->data.next_bin++; | ||
2630 | } | ||
2631 | |||
2632 | static inline int bnx2x_mcast_handle_pending_cmds_e2(struct bnx2x *bp, | ||
2633 | struct bnx2x_mcast_ramrod_params *p) | ||
2634 | { | ||
2635 | struct bnx2x_pending_mcast_cmd *cmd_pos, *cmd_pos_n; | ||
2636 | int cnt = 0; | ||
2637 | struct bnx2x_mcast_obj *o = p->mcast_obj; | ||
2638 | |||
2639 | list_for_each_entry_safe(cmd_pos, cmd_pos_n, &o->pending_cmds_head, | ||
2640 | link) { | ||
2641 | switch (cmd_pos->type) { | ||
2642 | case BNX2X_MCAST_CMD_ADD: | ||
2643 | bnx2x_mcast_hdl_pending_add_e2(bp, o, cmd_pos, &cnt); | ||
2644 | break; | ||
2645 | |||
2646 | case BNX2X_MCAST_CMD_DEL: | ||
2647 | bnx2x_mcast_hdl_pending_del_e2(bp, o, cmd_pos, &cnt); | ||
2648 | break; | ||
2649 | |||
2650 | case BNX2X_MCAST_CMD_RESTORE: | ||
2651 | bnx2x_mcast_hdl_pending_restore_e2(bp, o, cmd_pos, | ||
2652 | &cnt); | ||
2653 | break; | ||
2654 | |||
2655 | default: | ||
2656 | BNX2X_ERR("Unknown command: %d\n", cmd_pos->type); | ||
2657 | return -EINVAL; | ||
2658 | } | ||
2659 | |||
2660 | /* If the command has been completed - remove it from the list | ||
2661 | * and free the memory | ||
2662 | */ | ||
2663 | if (cmd_pos->done) { | ||
2664 | list_del(&cmd_pos->link); | ||
2665 | kfree(cmd_pos); | ||
2666 | } | ||
2667 | |||
2668 | /* Break if we reached the maximum number of rules */ | ||
2669 | if (cnt >= o->max_cmd_len) | ||
2670 | break; | ||
2671 | } | ||
2672 | |||
2673 | return cnt; | ||
2674 | } | ||
2675 | |||
2676 | static inline void bnx2x_mcast_hdl_add(struct bnx2x *bp, | ||
2677 | struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p, | ||
2678 | int *line_idx) | ||
2679 | { | ||
2680 | struct bnx2x_mcast_list_elem *mlist_pos; | ||
2681 | union bnx2x_mcast_config_data cfg_data = {0}; | ||
2682 | int cnt = *line_idx; | ||
2683 | |||
2684 | list_for_each_entry(mlist_pos, &p->mcast_list, link) { | ||
2685 | cfg_data.mac = mlist_pos->mac; | ||
2686 | o->set_one_rule(bp, o, cnt, &cfg_data, BNX2X_MCAST_CMD_ADD); | ||
2687 | |||
2688 | cnt++; | ||
2689 | |||
2690 | DP(BNX2X_MSG_SP, "About to configure "BNX2X_MAC_FMT | ||
2691 | " mcast MAC\n", | ||
2692 | BNX2X_MAC_PRN_LIST(mlist_pos->mac)); | ||
2693 | } | ||
2694 | |||
2695 | *line_idx = cnt; | ||
2696 | } | ||
2697 | |||
2698 | static inline void bnx2x_mcast_hdl_del(struct bnx2x *bp, | ||
2699 | struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p, | ||
2700 | int *line_idx) | ||
2701 | { | ||
2702 | int cnt = *line_idx, i; | ||
2703 | |||
2704 | for (i = 0; i < p->mcast_list_len; i++) { | ||
2705 | o->set_one_rule(bp, o, cnt, NULL, BNX2X_MCAST_CMD_DEL); | ||
2706 | |||
2707 | cnt++; | ||
2708 | |||
2709 | DP(BNX2X_MSG_SP, "Deleting MAC. %d left\n", | ||
2710 | p->mcast_list_len - i - 1); | ||
2711 | } | ||
2712 | |||
2713 | *line_idx = cnt; | ||
2714 | } | ||
2715 | |||
2716 | /** | ||
2717 | * bnx2x_mcast_handle_current_cmd - | ||
2718 | * | ||
2719 | * @bp: device handle | ||
2720 | * @p: | ||
2721 | * @cmd: | ||
2722 | * @start_cnt: first line in the ramrod data that may be used | ||
2723 | * | ||
2724 | * This function is called iff there is enough place for the current command in | ||
2725 | * the ramrod data. | ||
2726 | * Returns number of lines filled in the ramrod data in total. | ||
2727 | */ | ||
2728 | static inline int bnx2x_mcast_handle_current_cmd(struct bnx2x *bp, | ||
2729 | struct bnx2x_mcast_ramrod_params *p, int cmd, | ||
2730 | int start_cnt) | ||
2731 | { | ||
2732 | struct bnx2x_mcast_obj *o = p->mcast_obj; | ||
2733 | int cnt = start_cnt; | ||
2734 | |||
2735 | DP(BNX2X_MSG_SP, "p->mcast_list_len=%d\n", p->mcast_list_len); | ||
2736 | |||
2737 | switch (cmd) { | ||
2738 | case BNX2X_MCAST_CMD_ADD: | ||
2739 | bnx2x_mcast_hdl_add(bp, o, p, &cnt); | ||
2740 | break; | ||
2741 | |||
2742 | case BNX2X_MCAST_CMD_DEL: | ||
2743 | bnx2x_mcast_hdl_del(bp, o, p, &cnt); | ||
2744 | break; | ||
2745 | |||
2746 | case BNX2X_MCAST_CMD_RESTORE: | ||
2747 | o->hdl_restore(bp, o, 0, &cnt); | ||
2748 | break; | ||
2749 | |||
2750 | default: | ||
2751 | BNX2X_ERR("Unknown command: %d\n", cmd); | ||
2752 | return -EINVAL; | ||
2753 | } | ||
2754 | |||
2755 | /* The current command has been handled */ | ||
2756 | p->mcast_list_len = 0; | ||
2757 | |||
2758 | return cnt; | ||
2759 | } | ||
2760 | |||
2761 | static int bnx2x_mcast_validate_e2(struct bnx2x *bp, | ||
2762 | struct bnx2x_mcast_ramrod_params *p, | ||
2763 | int cmd) | ||
2764 | { | ||
2765 | struct bnx2x_mcast_obj *o = p->mcast_obj; | ||
2766 | int reg_sz = o->get_registry_size(o); | ||
2767 | |||
2768 | switch (cmd) { | ||
2769 | /* DEL command deletes all currently configured MACs */ | ||
2770 | case BNX2X_MCAST_CMD_DEL: | ||
2771 | o->set_registry_size(o, 0); | ||
2772 | /* Don't break */ | ||
2773 | |||
2774 | /* RESTORE command will restore the entire multicast configuration */ | ||
2775 | case BNX2X_MCAST_CMD_RESTORE: | ||
2776 | /* Here we set the approximate amount of work to do, which in | ||
2777 | * fact may be only less as some MACs in postponed ADD | ||
2778 | * command(s) scheduled before this command may fall into | ||
2779 | * the same bin and the actual number of bins set in the | ||
2780 | * registry would be less than we estimated here. See | ||
2781 | * bnx2x_mcast_set_one_rule_e2() for further details. | ||
2782 | */ | ||
2783 | p->mcast_list_len = reg_sz; | ||
2784 | break; | ||
2785 | |||
2786 | case BNX2X_MCAST_CMD_ADD: | ||
2787 | case BNX2X_MCAST_CMD_CONT: | ||
2788 | /* Here we assume that all new MACs will fall into new bins. | ||
2789 | * However we will correct the real registry size after we | ||
2790 | * handle all pending commands. | ||
2791 | */ | ||
2792 | o->set_registry_size(o, reg_sz + p->mcast_list_len); | ||
2793 | break; | ||
2794 | |||
2795 | default: | ||
2796 | BNX2X_ERR("Unknown command: %d\n", cmd); | ||
2797 | return -EINVAL; | ||
2798 | |||
2799 | } | ||
2800 | |||
2801 | /* Increase the total number of MACs pending to be configured */ | ||
2802 | o->total_pending_num += p->mcast_list_len; | ||
2803 | |||
2804 | return 0; | ||
2805 | } | ||
2806 | |||
2807 | static void bnx2x_mcast_revert_e2(struct bnx2x *bp, | ||
2808 | struct bnx2x_mcast_ramrod_params *p, | ||
2809 | int old_num_bins) | ||
2810 | { | ||
2811 | struct bnx2x_mcast_obj *o = p->mcast_obj; | ||
2812 | |||
2813 | o->set_registry_size(o, old_num_bins); | ||
2814 | o->total_pending_num -= p->mcast_list_len; | ||
2815 | } | ||
2816 | |||
2817 | /** | ||
2818 | * bnx2x_mcast_set_rdata_hdr_e2 - sets a header values | ||
2819 | * | ||
2820 | * @bp: device handle | ||
2821 | * @p: | ||
2822 | * @len: number of rules to handle | ||
2823 | */ | ||
2824 | static inline void bnx2x_mcast_set_rdata_hdr_e2(struct bnx2x *bp, | ||
2825 | struct bnx2x_mcast_ramrod_params *p, | ||
2826 | u8 len) | ||
2827 | { | ||
2828 | struct bnx2x_raw_obj *r = &p->mcast_obj->raw; | ||
2829 | struct eth_multicast_rules_ramrod_data *data = | ||
2830 | (struct eth_multicast_rules_ramrod_data *)(r->rdata); | ||
2831 | |||
2832 | data->header.echo = ((r->cid & BNX2X_SWCID_MASK) | | ||
2833 | (BNX2X_FILTER_MCAST_PENDING << BNX2X_SWCID_SHIFT)); | ||
2834 | data->header.rule_cnt = len; | ||
2835 | } | ||
2836 | |||
2837 | /** | ||
2838 | * bnx2x_mcast_refresh_registry_e2 - recalculate the actual number of set bins | ||
2839 | * | ||
2840 | * @bp: device handle | ||
2841 | * @o: | ||
2842 | * | ||
2843 | * Recalculate the actual number of set bins in the registry using Brian | ||
2844 | * Kernighan's algorithm: it's execution complexity is as a number of set bins. | ||
2845 | * | ||
2846 | * returns 0 for the compliance with bnx2x_mcast_refresh_registry_e1(). | ||
2847 | */ | ||
2848 | static inline int bnx2x_mcast_refresh_registry_e2(struct bnx2x *bp, | ||
2849 | struct bnx2x_mcast_obj *o) | ||
2850 | { | ||
2851 | int i, cnt = 0; | ||
2852 | u64 elem; | ||
2853 | |||
2854 | for (i = 0; i < BNX2X_MCAST_VEC_SZ; i++) { | ||
2855 | elem = o->registry.aprox_match.vec[i]; | ||
2856 | for (; elem; cnt++) | ||
2857 | elem &= elem - 1; | ||
2858 | } | ||
2859 | |||
2860 | o->set_registry_size(o, cnt); | ||
2861 | |||
2862 | return 0; | ||
2863 | } | ||
2864 | |||
2865 | static int bnx2x_mcast_setup_e2(struct bnx2x *bp, | ||
2866 | struct bnx2x_mcast_ramrod_params *p, | ||
2867 | int cmd) | ||
2868 | { | ||
2869 | struct bnx2x_raw_obj *raw = &p->mcast_obj->raw; | ||
2870 | struct bnx2x_mcast_obj *o = p->mcast_obj; | ||
2871 | struct eth_multicast_rules_ramrod_data *data = | ||
2872 | (struct eth_multicast_rules_ramrod_data *)(raw->rdata); | ||
2873 | int cnt = 0, rc; | ||
2874 | |||
2875 | /* Reset the ramrod data buffer */ | ||
2876 | memset(data, 0, sizeof(*data)); | ||
2877 | |||
2878 | cnt = bnx2x_mcast_handle_pending_cmds_e2(bp, p); | ||
2879 | |||
2880 | /* If there are no more pending commands - clear SCHEDULED state */ | ||
2881 | if (list_empty(&o->pending_cmds_head)) | ||
2882 | o->clear_sched(o); | ||
2883 | |||
2884 | /* The below may be true iff there was enough room in ramrod | ||
2885 | * data for all pending commands and for the current | ||
2886 | * command. Otherwise the current command would have been added | ||
2887 | * to the pending commands and p->mcast_list_len would have been | ||
2888 | * zeroed. | ||
2889 | */ | ||
2890 | if (p->mcast_list_len > 0) | ||
2891 | cnt = bnx2x_mcast_handle_current_cmd(bp, p, cmd, cnt); | ||
2892 | |||
2893 | /* We've pulled out some MACs - update the total number of | ||
2894 | * outstanding. | ||
2895 | */ | ||
2896 | o->total_pending_num -= cnt; | ||
2897 | |||
2898 | /* send a ramrod */ | ||
2899 | WARN_ON(o->total_pending_num < 0); | ||
2900 | WARN_ON(cnt > o->max_cmd_len); | ||
2901 | |||
2902 | bnx2x_mcast_set_rdata_hdr_e2(bp, p, (u8)cnt); | ||
2903 | |||
2904 | /* Update a registry size if there are no more pending operations. | ||
2905 | * | ||
2906 | * We don't want to change the value of the registry size if there are | ||
2907 | * pending operations because we want it to always be equal to the | ||
2908 | * exact or the approximate number (see bnx2x_mcast_validate_e2()) of | ||
2909 | * set bins after the last requested operation in order to properly | ||
2910 | * evaluate the size of the next DEL/RESTORE operation. | ||
2911 | * | ||
2912 | * Note that we update the registry itself during command(s) handling | ||
2913 | * - see bnx2x_mcast_set_one_rule_e2(). That's because for 57712 we | ||
2914 | * aggregate multiple commands (ADD/DEL/RESTORE) into one ramrod but | ||
2915 | * with a limited amount of update commands (per MAC/bin) and we don't | ||
2916 | * know in this scope what the actual state of bins configuration is | ||
2917 | * going to be after this ramrod. | ||
2918 | */ | ||
2919 | if (!o->total_pending_num) | ||
2920 | bnx2x_mcast_refresh_registry_e2(bp, o); | ||
2921 | |||
2922 | /* Commit writes towards the memory before sending a ramrod */ | ||
2923 | mb(); | ||
2924 | |||
2925 | /* If CLEAR_ONLY was requested - don't send a ramrod and clear | ||
2926 | * RAMROD_PENDING status immediately. | ||
2927 | */ | ||
2928 | if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) { | ||
2929 | raw->clear_pending(raw); | ||
2930 | return 0; | ||
2931 | } else { | ||
2932 | /* Send a ramrod */ | ||
2933 | rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_MULTICAST_RULES, | ||
2934 | raw->cid, U64_HI(raw->rdata_mapping), | ||
2935 | U64_LO(raw->rdata_mapping), | ||
2936 | ETH_CONNECTION_TYPE); | ||
2937 | if (rc) | ||
2938 | return rc; | ||
2939 | |||
2940 | /* Ramrod completion is pending */ | ||
2941 | return 1; | ||
2942 | } | ||
2943 | } | ||
2944 | |||
2945 | static int bnx2x_mcast_validate_e1h(struct bnx2x *bp, | ||
2946 | struct bnx2x_mcast_ramrod_params *p, | ||
2947 | int cmd) | ||
2948 | { | ||
2949 | /* Mark, that there is a work to do */ | ||
2950 | if ((cmd == BNX2X_MCAST_CMD_DEL) || (cmd == BNX2X_MCAST_CMD_RESTORE)) | ||
2951 | p->mcast_list_len = 1; | ||
2952 | |||
2953 | return 0; | ||
2954 | } | ||
2955 | |||
2956 | static void bnx2x_mcast_revert_e1h(struct bnx2x *bp, | ||
2957 | struct bnx2x_mcast_ramrod_params *p, | ||
2958 | int old_num_bins) | ||
2959 | { | ||
2960 | /* Do nothing */ | ||
2961 | } | ||
2962 | |||
2963 | #define BNX2X_57711_SET_MC_FILTER(filter, bit) \ | ||
2964 | do { \ | ||
2965 | (filter)[(bit) >> 5] |= (1 << ((bit) & 0x1f)); \ | ||
2966 | } while (0) | ||
2967 | |||
2968 | static inline void bnx2x_mcast_hdl_add_e1h(struct bnx2x *bp, | ||
2969 | struct bnx2x_mcast_obj *o, | ||
2970 | struct bnx2x_mcast_ramrod_params *p, | ||
2971 | u32 *mc_filter) | ||
2972 | { | ||
2973 | struct bnx2x_mcast_list_elem *mlist_pos; | ||
2974 | int bit; | ||
2975 | |||
2976 | list_for_each_entry(mlist_pos, &p->mcast_list, link) { | ||
2977 | bit = bnx2x_mcast_bin_from_mac(mlist_pos->mac); | ||
2978 | BNX2X_57711_SET_MC_FILTER(mc_filter, bit); | ||
2979 | |||
2980 | DP(BNX2X_MSG_SP, "About to configure " | ||
2981 | BNX2X_MAC_FMT" mcast MAC, bin %d\n", | ||
2982 | BNX2X_MAC_PRN_LIST(mlist_pos->mac), bit); | ||
2983 | |||
2984 | /* bookkeeping... */ | ||
2985 | BIT_VEC64_SET_BIT(o->registry.aprox_match.vec, | ||
2986 | bit); | ||
2987 | } | ||
2988 | } | ||
2989 | |||
2990 | static inline void bnx2x_mcast_hdl_restore_e1h(struct bnx2x *bp, | ||
2991 | struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p, | ||
2992 | u32 *mc_filter) | ||
2993 | { | ||
2994 | int bit; | ||
2995 | |||
2996 | for (bit = bnx2x_mcast_get_next_bin(o, 0); | ||
2997 | bit >= 0; | ||
2998 | bit = bnx2x_mcast_get_next_bin(o, bit + 1)) { | ||
2999 | BNX2X_57711_SET_MC_FILTER(mc_filter, bit); | ||
3000 | DP(BNX2X_MSG_SP, "About to set bin %d\n", bit); | ||
3001 | } | ||
3002 | } | ||
3003 | |||
3004 | /* On 57711 we write the multicast MACs' aproximate match | ||
3005 | * table by directly into the TSTORM's internal RAM. So we don't | ||
3006 | * really need to handle any tricks to make it work. | ||
3007 | */ | ||
3008 | static int bnx2x_mcast_setup_e1h(struct bnx2x *bp, | ||
3009 | struct bnx2x_mcast_ramrod_params *p, | ||
3010 | int cmd) | ||
3011 | { | ||
3012 | int i; | ||
3013 | struct bnx2x_mcast_obj *o = p->mcast_obj; | ||
3014 | struct bnx2x_raw_obj *r = &o->raw; | ||
3015 | |||
3016 | /* If CLEAR_ONLY has been requested - clear the registry | ||
3017 | * and clear a pending bit. | ||
3018 | */ | ||
3019 | if (!test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) { | ||
3020 | u32 mc_filter[MC_HASH_SIZE] = {0}; | ||
3021 | |||
3022 | /* Set the multicast filter bits before writing it into | ||
3023 | * the internal memory. | ||
3024 | */ | ||
3025 | switch (cmd) { | ||
3026 | case BNX2X_MCAST_CMD_ADD: | ||
3027 | bnx2x_mcast_hdl_add_e1h(bp, o, p, mc_filter); | ||
3028 | break; | ||
3029 | |||
3030 | case BNX2X_MCAST_CMD_DEL: | ||
3031 | DP(BNX2X_MSG_SP, "Invalidating multicast " | ||
3032 | "MACs configuration\n"); | ||
3033 | |||
3034 | /* clear the registry */ | ||
3035 | memset(o->registry.aprox_match.vec, 0, | ||
3036 | sizeof(o->registry.aprox_match.vec)); | ||
3037 | break; | ||
3038 | |||
3039 | case BNX2X_MCAST_CMD_RESTORE: | ||
3040 | bnx2x_mcast_hdl_restore_e1h(bp, o, p, mc_filter); | ||
3041 | break; | ||
3042 | |||
3043 | default: | ||
3044 | BNX2X_ERR("Unknown command: %d\n", cmd); | ||
3045 | return -EINVAL; | ||
3046 | } | ||
3047 | |||
3048 | /* Set the mcast filter in the internal memory */ | ||
3049 | for (i = 0; i < MC_HASH_SIZE; i++) | ||
3050 | REG_WR(bp, MC_HASH_OFFSET(bp, i), mc_filter[i]); | ||
3051 | } else | ||
3052 | /* clear the registry */ | ||
3053 | memset(o->registry.aprox_match.vec, 0, | ||
3054 | sizeof(o->registry.aprox_match.vec)); | ||
3055 | |||
3056 | /* We are done */ | ||
3057 | r->clear_pending(r); | ||
3058 | |||
3059 | return 0; | ||
3060 | } | ||
3061 | |||
3062 | static int bnx2x_mcast_validate_e1(struct bnx2x *bp, | ||
3063 | struct bnx2x_mcast_ramrod_params *p, | ||
3064 | int cmd) | ||
3065 | { | ||
3066 | struct bnx2x_mcast_obj *o = p->mcast_obj; | ||
3067 | int reg_sz = o->get_registry_size(o); | ||
3068 | |||
3069 | switch (cmd) { | ||
3070 | /* DEL command deletes all currently configured MACs */ | ||
3071 | case BNX2X_MCAST_CMD_DEL: | ||
3072 | o->set_registry_size(o, 0); | ||
3073 | /* Don't break */ | ||
3074 | |||
3075 | /* RESTORE command will restore the entire multicast configuration */ | ||
3076 | case BNX2X_MCAST_CMD_RESTORE: | ||
3077 | p->mcast_list_len = reg_sz; | ||
3078 | DP(BNX2X_MSG_SP, "Command %d, p->mcast_list_len=%d\n", | ||
3079 | cmd, p->mcast_list_len); | ||
3080 | break; | ||
3081 | |||
3082 | case BNX2X_MCAST_CMD_ADD: | ||
3083 | case BNX2X_MCAST_CMD_CONT: | ||
3084 | /* Multicast MACs on 57710 are configured as unicast MACs and | ||
3085 | * there is only a limited number of CAM entries for that | ||
3086 | * matter. | ||
3087 | */ | ||
3088 | if (p->mcast_list_len > o->max_cmd_len) { | ||
3089 | BNX2X_ERR("Can't configure more than %d multicast MACs" | ||
3090 | "on 57710\n", o->max_cmd_len); | ||
3091 | return -EINVAL; | ||
3092 | } | ||
3093 | /* Every configured MAC should be cleared if DEL command is | ||
3094 | * called. Only the last ADD command is relevant as long as | ||
3095 | * every ADD commands overrides the previous configuration. | ||
3096 | */ | ||
3097 | DP(BNX2X_MSG_SP, "p->mcast_list_len=%d\n", p->mcast_list_len); | ||
3098 | if (p->mcast_list_len > 0) | ||
3099 | o->set_registry_size(o, p->mcast_list_len); | ||
3100 | |||
3101 | break; | ||
3102 | |||
3103 | default: | ||
3104 | BNX2X_ERR("Unknown command: %d\n", cmd); | ||
3105 | return -EINVAL; | ||
3106 | |||
3107 | } | ||
3108 | |||
3109 | /* We want to ensure that commands are executed one by one for 57710. | ||
3110 | * Therefore each none-empty command will consume o->max_cmd_len. | ||
3111 | */ | ||
3112 | if (p->mcast_list_len) | ||
3113 | o->total_pending_num += o->max_cmd_len; | ||
3114 | |||
3115 | return 0; | ||
3116 | } | ||
3117 | |||
3118 | static void bnx2x_mcast_revert_e1(struct bnx2x *bp, | ||
3119 | struct bnx2x_mcast_ramrod_params *p, | ||
3120 | int old_num_macs) | ||
3121 | { | ||
3122 | struct bnx2x_mcast_obj *o = p->mcast_obj; | ||
3123 | |||
3124 | o->set_registry_size(o, old_num_macs); | ||
3125 | |||
3126 | /* If current command hasn't been handled yet and we are | ||
3127 | * here means that it's meant to be dropped and we have to | ||
3128 | * update the number of outstandling MACs accordingly. | ||
3129 | */ | ||
3130 | if (p->mcast_list_len) | ||
3131 | o->total_pending_num -= o->max_cmd_len; | ||
3132 | } | ||
3133 | |||
3134 | static void bnx2x_mcast_set_one_rule_e1(struct bnx2x *bp, | ||
3135 | struct bnx2x_mcast_obj *o, int idx, | ||
3136 | union bnx2x_mcast_config_data *cfg_data, | ||
3137 | int cmd) | ||
3138 | { | ||
3139 | struct bnx2x_raw_obj *r = &o->raw; | ||
3140 | struct mac_configuration_cmd *data = | ||
3141 | (struct mac_configuration_cmd *)(r->rdata); | ||
3142 | |||
3143 | /* copy mac */ | ||
3144 | if ((cmd == BNX2X_MCAST_CMD_ADD) || (cmd == BNX2X_MCAST_CMD_RESTORE)) { | ||
3145 | bnx2x_set_fw_mac_addr(&data->config_table[idx].msb_mac_addr, | ||
3146 | &data->config_table[idx].middle_mac_addr, | ||
3147 | &data->config_table[idx].lsb_mac_addr, | ||
3148 | cfg_data->mac); | ||
3149 | |||
3150 | data->config_table[idx].vlan_id = 0; | ||
3151 | data->config_table[idx].pf_id = r->func_id; | ||
3152 | data->config_table[idx].clients_bit_vector = | ||
3153 | cpu_to_le32(1 << r->cl_id); | ||
3154 | |||
3155 | SET_FLAG(data->config_table[idx].flags, | ||
3156 | MAC_CONFIGURATION_ENTRY_ACTION_TYPE, | ||
3157 | T_ETH_MAC_COMMAND_SET); | ||
3158 | } | ||
3159 | } | ||
3160 | |||
3161 | /** | ||
3162 | * bnx2x_mcast_set_rdata_hdr_e1 - set header values in mac_configuration_cmd | ||
3163 | * | ||
3164 | * @bp: device handle | ||
3165 | * @p: | ||
3166 | * @len: number of rules to handle | ||
3167 | */ | ||
3168 | static inline void bnx2x_mcast_set_rdata_hdr_e1(struct bnx2x *bp, | ||
3169 | struct bnx2x_mcast_ramrod_params *p, | ||
3170 | u8 len) | ||
3171 | { | ||
3172 | struct bnx2x_raw_obj *r = &p->mcast_obj->raw; | ||
3173 | struct mac_configuration_cmd *data = | ||
3174 | (struct mac_configuration_cmd *)(r->rdata); | ||
3175 | |||
3176 | u8 offset = (CHIP_REV_IS_SLOW(bp) ? | ||
3177 | BNX2X_MAX_EMUL_MULTI*(1 + r->func_id) : | ||
3178 | BNX2X_MAX_MULTICAST*(1 + r->func_id)); | ||
3179 | |||
3180 | data->hdr.offset = offset; | ||
3181 | data->hdr.client_id = 0xff; | ||
3182 | data->hdr.echo = ((r->cid & BNX2X_SWCID_MASK) | | ||
3183 | (BNX2X_FILTER_MCAST_PENDING << BNX2X_SWCID_SHIFT)); | ||
3184 | data->hdr.length = len; | ||
3185 | } | ||
3186 | |||
3187 | /** | ||
3188 | * bnx2x_mcast_handle_restore_cmd_e1 - restore command for 57710 | ||
3189 | * | ||
3190 | * @bp: device handle | ||
3191 | * @o: | ||
3192 | * @start_idx: index in the registry to start from | ||
3193 | * @rdata_idx: index in the ramrod data to start from | ||
3194 | * | ||
3195 | * restore command for 57710 is like all other commands - always a stand alone | ||
3196 | * command - start_idx and rdata_idx will always be 0. This function will always | ||
3197 | * succeed. | ||
3198 | * returns -1 to comply with 57712 variant. | ||
3199 | */ | ||
3200 | static inline int bnx2x_mcast_handle_restore_cmd_e1( | ||
3201 | struct bnx2x *bp, struct bnx2x_mcast_obj *o , int start_idx, | ||
3202 | int *rdata_idx) | ||
3203 | { | ||
3204 | struct bnx2x_mcast_mac_elem *elem; | ||
3205 | int i = 0; | ||
3206 | union bnx2x_mcast_config_data cfg_data = {0}; | ||
3207 | |||
3208 | /* go through the registry and configure the MACs from it. */ | ||
3209 | list_for_each_entry(elem, &o->registry.exact_match.macs, link) { | ||
3210 | cfg_data.mac = &elem->mac[0]; | ||
3211 | o->set_one_rule(bp, o, i, &cfg_data, BNX2X_MCAST_CMD_RESTORE); | ||
3212 | |||
3213 | i++; | ||
3214 | |||
3215 | DP(BNX2X_MSG_SP, "About to configure "BNX2X_MAC_FMT | ||
3216 | " mcast MAC\n", | ||
3217 | BNX2X_MAC_PRN_LIST(cfg_data.mac)); | ||
3218 | } | ||
3219 | |||
3220 | *rdata_idx = i; | ||
3221 | |||
3222 | return -1; | ||
3223 | } | ||
3224 | |||
3225 | |||
3226 | static inline int bnx2x_mcast_handle_pending_cmds_e1( | ||
3227 | struct bnx2x *bp, struct bnx2x_mcast_ramrod_params *p) | ||
3228 | { | ||
3229 | struct bnx2x_pending_mcast_cmd *cmd_pos; | ||
3230 | struct bnx2x_mcast_mac_elem *pmac_pos; | ||
3231 | struct bnx2x_mcast_obj *o = p->mcast_obj; | ||
3232 | union bnx2x_mcast_config_data cfg_data = {0}; | ||
3233 | int cnt = 0; | ||
3234 | |||
3235 | |||
3236 | /* If nothing to be done - return */ | ||
3237 | if (list_empty(&o->pending_cmds_head)) | ||
3238 | return 0; | ||
3239 | |||
3240 | /* Handle the first command */ | ||
3241 | cmd_pos = list_first_entry(&o->pending_cmds_head, | ||
3242 | struct bnx2x_pending_mcast_cmd, link); | ||
3243 | |||
3244 | switch (cmd_pos->type) { | ||
3245 | case BNX2X_MCAST_CMD_ADD: | ||
3246 | list_for_each_entry(pmac_pos, &cmd_pos->data.macs_head, link) { | ||
3247 | cfg_data.mac = &pmac_pos->mac[0]; | ||
3248 | o->set_one_rule(bp, o, cnt, &cfg_data, cmd_pos->type); | ||
3249 | |||
3250 | cnt++; | ||
3251 | |||
3252 | DP(BNX2X_MSG_SP, "About to configure "BNX2X_MAC_FMT | ||
3253 | " mcast MAC\n", | ||
3254 | BNX2X_MAC_PRN_LIST(pmac_pos->mac)); | ||
3255 | } | ||
3256 | break; | ||
3257 | |||
3258 | case BNX2X_MCAST_CMD_DEL: | ||
3259 | cnt = cmd_pos->data.macs_num; | ||
3260 | DP(BNX2X_MSG_SP, "About to delete %d multicast MACs\n", cnt); | ||
3261 | break; | ||
3262 | |||
3263 | case BNX2X_MCAST_CMD_RESTORE: | ||
3264 | o->hdl_restore(bp, o, 0, &cnt); | ||
3265 | break; | ||
3266 | |||
3267 | default: | ||
3268 | BNX2X_ERR("Unknown command: %d\n", cmd_pos->type); | ||
3269 | return -EINVAL; | ||
3270 | } | ||
3271 | |||
3272 | list_del(&cmd_pos->link); | ||
3273 | kfree(cmd_pos); | ||
3274 | |||
3275 | return cnt; | ||
3276 | } | ||
3277 | |||
3278 | /** | ||
3279 | * bnx2x_get_fw_mac_addr - revert the bnx2x_set_fw_mac_addr(). | ||
3280 | * | ||
3281 | * @fw_hi: | ||
3282 | * @fw_mid: | ||
3283 | * @fw_lo: | ||
3284 | * @mac: | ||
3285 | */ | ||
3286 | static inline void bnx2x_get_fw_mac_addr(__le16 *fw_hi, __le16 *fw_mid, | ||
3287 | __le16 *fw_lo, u8 *mac) | ||
3288 | { | ||
3289 | mac[1] = ((u8 *)fw_hi)[0]; | ||
3290 | mac[0] = ((u8 *)fw_hi)[1]; | ||
3291 | mac[3] = ((u8 *)fw_mid)[0]; | ||
3292 | mac[2] = ((u8 *)fw_mid)[1]; | ||
3293 | mac[5] = ((u8 *)fw_lo)[0]; | ||
3294 | mac[4] = ((u8 *)fw_lo)[1]; | ||
3295 | } | ||
3296 | |||
3297 | /** | ||
3298 | * bnx2x_mcast_refresh_registry_e1 - | ||
3299 | * | ||
3300 | * @bp: device handle | ||
3301 | * @cnt: | ||
3302 | * | ||
3303 | * Check the ramrod data first entry flag to see if it's a DELETE or ADD command | ||
3304 | * and update the registry correspondingly: if ADD - allocate a memory and add | ||
3305 | * the entries to the registry (list), if DELETE - clear the registry and free | ||
3306 | * the memory. | ||
3307 | */ | ||
3308 | static inline int bnx2x_mcast_refresh_registry_e1(struct bnx2x *bp, | ||
3309 | struct bnx2x_mcast_obj *o) | ||
3310 | { | ||
3311 | struct bnx2x_raw_obj *raw = &o->raw; | ||
3312 | struct bnx2x_mcast_mac_elem *elem; | ||
3313 | struct mac_configuration_cmd *data = | ||
3314 | (struct mac_configuration_cmd *)(raw->rdata); | ||
3315 | |||
3316 | /* If first entry contains a SET bit - the command was ADD, | ||
3317 | * otherwise - DEL_ALL | ||
3318 | */ | ||
3319 | if (GET_FLAG(data->config_table[0].flags, | ||
3320 | MAC_CONFIGURATION_ENTRY_ACTION_TYPE)) { | ||
3321 | int i, len = data->hdr.length; | ||
3322 | |||
3323 | /* Break if it was a RESTORE command */ | ||
3324 | if (!list_empty(&o->registry.exact_match.macs)) | ||
3325 | return 0; | ||
3326 | |||
3327 | elem = kzalloc(sizeof(*elem)*len, GFP_ATOMIC); | ||
3328 | if (!elem) { | ||
3329 | BNX2X_ERR("Failed to allocate registry memory\n"); | ||
3330 | return -ENOMEM; | ||
3331 | } | ||
3332 | |||
3333 | for (i = 0; i < len; i++, elem++) { | ||
3334 | bnx2x_get_fw_mac_addr( | ||
3335 | &data->config_table[i].msb_mac_addr, | ||
3336 | &data->config_table[i].middle_mac_addr, | ||
3337 | &data->config_table[i].lsb_mac_addr, | ||
3338 | elem->mac); | ||
3339 | DP(BNX2X_MSG_SP, "Adding registry entry for [" | ||
3340 | BNX2X_MAC_FMT"]\n", | ||
3341 | BNX2X_MAC_PRN_LIST(elem->mac)); | ||
3342 | list_add_tail(&elem->link, | ||
3343 | &o->registry.exact_match.macs); | ||
3344 | } | ||
3345 | } else { | ||
3346 | elem = list_first_entry(&o->registry.exact_match.macs, | ||
3347 | struct bnx2x_mcast_mac_elem, link); | ||
3348 | DP(BNX2X_MSG_SP, "Deleting a registry\n"); | ||
3349 | kfree(elem); | ||
3350 | INIT_LIST_HEAD(&o->registry.exact_match.macs); | ||
3351 | } | ||
3352 | |||
3353 | return 0; | ||
3354 | } | ||
3355 | |||
3356 | static int bnx2x_mcast_setup_e1(struct bnx2x *bp, | ||
3357 | struct bnx2x_mcast_ramrod_params *p, | ||
3358 | int cmd) | ||
3359 | { | ||
3360 | struct bnx2x_mcast_obj *o = p->mcast_obj; | ||
3361 | struct bnx2x_raw_obj *raw = &o->raw; | ||
3362 | struct mac_configuration_cmd *data = | ||
3363 | (struct mac_configuration_cmd *)(raw->rdata); | ||
3364 | int cnt = 0, i, rc; | ||
3365 | |||
3366 | /* Reset the ramrod data buffer */ | ||
3367 | memset(data, 0, sizeof(*data)); | ||
3368 | |||
3369 | /* First set all entries as invalid */ | ||
3370 | for (i = 0; i < o->max_cmd_len ; i++) | ||
3371 | SET_FLAG(data->config_table[i].flags, | ||
3372 | MAC_CONFIGURATION_ENTRY_ACTION_TYPE, | ||
3373 | T_ETH_MAC_COMMAND_INVALIDATE); | ||
3374 | |||
3375 | /* Handle pending commands first */ | ||
3376 | cnt = bnx2x_mcast_handle_pending_cmds_e1(bp, p); | ||
3377 | |||
3378 | /* If there are no more pending commands - clear SCHEDULED state */ | ||
3379 | if (list_empty(&o->pending_cmds_head)) | ||
3380 | o->clear_sched(o); | ||
3381 | |||
3382 | /* The below may be true iff there were no pending commands */ | ||
3383 | if (!cnt) | ||
3384 | cnt = bnx2x_mcast_handle_current_cmd(bp, p, cmd, 0); | ||
3385 | |||
3386 | /* For 57710 every command has o->max_cmd_len length to ensure that | ||
3387 | * commands are done one at a time. | ||
3388 | */ | ||
3389 | o->total_pending_num -= o->max_cmd_len; | ||
3390 | |||
3391 | /* send a ramrod */ | ||
3392 | |||
3393 | WARN_ON(cnt > o->max_cmd_len); | ||
3394 | |||
3395 | /* Set ramrod header (in particular, a number of entries to update) */ | ||
3396 | bnx2x_mcast_set_rdata_hdr_e1(bp, p, (u8)cnt); | ||
3397 | |||
3398 | /* update a registry: we need the registry contents to be always up | ||
3399 | * to date in order to be able to execute a RESTORE opcode. Here | ||
3400 | * we use the fact that for 57710 we sent one command at a time | ||
3401 | * hence we may take the registry update out of the command handling | ||
3402 | * and do it in a simpler way here. | ||
3403 | */ | ||
3404 | rc = bnx2x_mcast_refresh_registry_e1(bp, o); | ||
3405 | if (rc) | ||
3406 | return rc; | ||
3407 | |||
3408 | /* Commit writes towards the memory before sending a ramrod */ | ||
3409 | mb(); | ||
3410 | |||
3411 | /* If CLEAR_ONLY was requested - don't send a ramrod and clear | ||
3412 | * RAMROD_PENDING status immediately. | ||
3413 | */ | ||
3414 | if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) { | ||
3415 | raw->clear_pending(raw); | ||
3416 | return 0; | ||
3417 | } else { | ||
3418 | /* Send a ramrod */ | ||
3419 | rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, raw->cid, | ||
3420 | U64_HI(raw->rdata_mapping), | ||
3421 | U64_LO(raw->rdata_mapping), | ||
3422 | ETH_CONNECTION_TYPE); | ||
3423 | if (rc) | ||
3424 | return rc; | ||
3425 | |||
3426 | /* Ramrod completion is pending */ | ||
3427 | return 1; | ||
3428 | } | ||
3429 | |||
3430 | } | ||
3431 | |||
3432 | static int bnx2x_mcast_get_registry_size_exact(struct bnx2x_mcast_obj *o) | ||
3433 | { | ||
3434 | return o->registry.exact_match.num_macs_set; | ||
3435 | } | ||
3436 | |||
3437 | static int bnx2x_mcast_get_registry_size_aprox(struct bnx2x_mcast_obj *o) | ||
3438 | { | ||
3439 | return o->registry.aprox_match.num_bins_set; | ||
3440 | } | ||
3441 | |||
3442 | static void bnx2x_mcast_set_registry_size_exact(struct bnx2x_mcast_obj *o, | ||
3443 | int n) | ||
3444 | { | ||
3445 | o->registry.exact_match.num_macs_set = n; | ||
3446 | } | ||
3447 | |||
3448 | static void bnx2x_mcast_set_registry_size_aprox(struct bnx2x_mcast_obj *o, | ||
3449 | int n) | ||
3450 | { | ||
3451 | o->registry.aprox_match.num_bins_set = n; | ||
3452 | } | ||
3453 | |||
3454 | int bnx2x_config_mcast(struct bnx2x *bp, | ||
3455 | struct bnx2x_mcast_ramrod_params *p, | ||
3456 | int cmd) | ||
3457 | { | ||
3458 | struct bnx2x_mcast_obj *o = p->mcast_obj; | ||
3459 | struct bnx2x_raw_obj *r = &o->raw; | ||
3460 | int rc = 0, old_reg_size; | ||
3461 | |||
3462 | /* This is needed to recover number of currently configured mcast macs | ||
3463 | * in case of failure. | ||
3464 | */ | ||
3465 | old_reg_size = o->get_registry_size(o); | ||
3466 | |||
3467 | /* Do some calculations and checks */ | ||
3468 | rc = o->validate(bp, p, cmd); | ||
3469 | if (rc) | ||
3470 | return rc; | ||
3471 | |||
3472 | /* Return if there is no work to do */ | ||
3473 | if ((!p->mcast_list_len) && (!o->check_sched(o))) | ||
3474 | return 0; | ||
3475 | |||
3476 | DP(BNX2X_MSG_SP, "o->total_pending_num=%d p->mcast_list_len=%d " | ||
3477 | "o->max_cmd_len=%d\n", o->total_pending_num, | ||
3478 | p->mcast_list_len, o->max_cmd_len); | ||
3479 | |||
3480 | /* Enqueue the current command to the pending list if we can't complete | ||
3481 | * it in the current iteration | ||
3482 | */ | ||
3483 | if (r->check_pending(r) || | ||
3484 | ((o->max_cmd_len > 0) && (o->total_pending_num > o->max_cmd_len))) { | ||
3485 | rc = o->enqueue_cmd(bp, p->mcast_obj, p, cmd); | ||
3486 | if (rc < 0) | ||
3487 | goto error_exit1; | ||
3488 | |||
3489 | /* As long as the current command is in a command list we | ||
3490 | * don't need to handle it separately. | ||
3491 | */ | ||
3492 | p->mcast_list_len = 0; | ||
3493 | } | ||
3494 | |||
3495 | if (!r->check_pending(r)) { | ||
3496 | |||
3497 | /* Set 'pending' state */ | ||
3498 | r->set_pending(r); | ||
3499 | |||
3500 | /* Configure the new classification in the chip */ | ||
3501 | rc = o->config_mcast(bp, p, cmd); | ||
3502 | if (rc < 0) | ||
3503 | goto error_exit2; | ||
3504 | |||
3505 | /* Wait for a ramrod completion if was requested */ | ||
3506 | if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags)) | ||
3507 | rc = o->wait_comp(bp, o); | ||
3508 | } | ||
3509 | |||
3510 | return rc; | ||
3511 | |||
3512 | error_exit2: | ||
3513 | r->clear_pending(r); | ||
3514 | |||
3515 | error_exit1: | ||
3516 | o->revert(bp, p, old_reg_size); | ||
3517 | |||
3518 | return rc; | ||
3519 | } | ||
3520 | |||
3521 | static void bnx2x_mcast_clear_sched(struct bnx2x_mcast_obj *o) | ||
3522 | { | ||
3523 | smp_mb__before_clear_bit(); | ||
3524 | clear_bit(o->sched_state, o->raw.pstate); | ||
3525 | smp_mb__after_clear_bit(); | ||
3526 | } | ||
3527 | |||
3528 | static void bnx2x_mcast_set_sched(struct bnx2x_mcast_obj *o) | ||
3529 | { | ||
3530 | smp_mb__before_clear_bit(); | ||
3531 | set_bit(o->sched_state, o->raw.pstate); | ||
3532 | smp_mb__after_clear_bit(); | ||
3533 | } | ||
3534 | |||
3535 | static bool bnx2x_mcast_check_sched(struct bnx2x_mcast_obj *o) | ||
3536 | { | ||
3537 | return !!test_bit(o->sched_state, o->raw.pstate); | ||
3538 | } | ||
3539 | |||
3540 | static bool bnx2x_mcast_check_pending(struct bnx2x_mcast_obj *o) | ||
3541 | { | ||
3542 | return o->raw.check_pending(&o->raw) || o->check_sched(o); | ||
3543 | } | ||
3544 | |||
3545 | void bnx2x_init_mcast_obj(struct bnx2x *bp, | ||
3546 | struct bnx2x_mcast_obj *mcast_obj, | ||
3547 | u8 mcast_cl_id, u32 mcast_cid, u8 func_id, | ||
3548 | u8 engine_id, void *rdata, dma_addr_t rdata_mapping, | ||
3549 | int state, unsigned long *pstate, bnx2x_obj_type type) | ||
3550 | { | ||
3551 | memset(mcast_obj, 0, sizeof(*mcast_obj)); | ||
3552 | |||
3553 | bnx2x_init_raw_obj(&mcast_obj->raw, mcast_cl_id, mcast_cid, func_id, | ||
3554 | rdata, rdata_mapping, state, pstate, type); | ||
3555 | |||
3556 | mcast_obj->engine_id = engine_id; | ||
3557 | |||
3558 | INIT_LIST_HEAD(&mcast_obj->pending_cmds_head); | ||
3559 | |||
3560 | mcast_obj->sched_state = BNX2X_FILTER_MCAST_SCHED; | ||
3561 | mcast_obj->check_sched = bnx2x_mcast_check_sched; | ||
3562 | mcast_obj->set_sched = bnx2x_mcast_set_sched; | ||
3563 | mcast_obj->clear_sched = bnx2x_mcast_clear_sched; | ||
3564 | |||
3565 | if (CHIP_IS_E1(bp)) { | ||
3566 | mcast_obj->config_mcast = bnx2x_mcast_setup_e1; | ||
3567 | mcast_obj->enqueue_cmd = bnx2x_mcast_enqueue_cmd; | ||
3568 | mcast_obj->hdl_restore = | ||
3569 | bnx2x_mcast_handle_restore_cmd_e1; | ||
3570 | mcast_obj->check_pending = bnx2x_mcast_check_pending; | ||
3571 | |||
3572 | if (CHIP_REV_IS_SLOW(bp)) | ||
3573 | mcast_obj->max_cmd_len = BNX2X_MAX_EMUL_MULTI; | ||
3574 | else | ||
3575 | mcast_obj->max_cmd_len = BNX2X_MAX_MULTICAST; | ||
3576 | |||
3577 | mcast_obj->wait_comp = bnx2x_mcast_wait; | ||
3578 | mcast_obj->set_one_rule = bnx2x_mcast_set_one_rule_e1; | ||
3579 | mcast_obj->validate = bnx2x_mcast_validate_e1; | ||
3580 | mcast_obj->revert = bnx2x_mcast_revert_e1; | ||
3581 | mcast_obj->get_registry_size = | ||
3582 | bnx2x_mcast_get_registry_size_exact; | ||
3583 | mcast_obj->set_registry_size = | ||
3584 | bnx2x_mcast_set_registry_size_exact; | ||
3585 | |||
3586 | /* 57710 is the only chip that uses the exact match for mcast | ||
3587 | * at the moment. | ||
3588 | */ | ||
3589 | INIT_LIST_HEAD(&mcast_obj->registry.exact_match.macs); | ||
3590 | |||
3591 | } else if (CHIP_IS_E1H(bp)) { | ||
3592 | mcast_obj->config_mcast = bnx2x_mcast_setup_e1h; | ||
3593 | mcast_obj->enqueue_cmd = NULL; | ||
3594 | mcast_obj->hdl_restore = NULL; | ||
3595 | mcast_obj->check_pending = bnx2x_mcast_check_pending; | ||
3596 | |||
3597 | /* 57711 doesn't send a ramrod, so it has unlimited credit | ||
3598 | * for one command. | ||
3599 | */ | ||
3600 | mcast_obj->max_cmd_len = -1; | ||
3601 | mcast_obj->wait_comp = bnx2x_mcast_wait; | ||
3602 | mcast_obj->set_one_rule = NULL; | ||
3603 | mcast_obj->validate = bnx2x_mcast_validate_e1h; | ||
3604 | mcast_obj->revert = bnx2x_mcast_revert_e1h; | ||
3605 | mcast_obj->get_registry_size = | ||
3606 | bnx2x_mcast_get_registry_size_aprox; | ||
3607 | mcast_obj->set_registry_size = | ||
3608 | bnx2x_mcast_set_registry_size_aprox; | ||
3609 | } else { | ||
3610 | mcast_obj->config_mcast = bnx2x_mcast_setup_e2; | ||
3611 | mcast_obj->enqueue_cmd = bnx2x_mcast_enqueue_cmd; | ||
3612 | mcast_obj->hdl_restore = | ||
3613 | bnx2x_mcast_handle_restore_cmd_e2; | ||
3614 | mcast_obj->check_pending = bnx2x_mcast_check_pending; | ||
3615 | /* TODO: There should be a proper HSI define for this number!!! | ||
3616 | */ | ||
3617 | mcast_obj->max_cmd_len = 16; | ||
3618 | mcast_obj->wait_comp = bnx2x_mcast_wait; | ||
3619 | mcast_obj->set_one_rule = bnx2x_mcast_set_one_rule_e2; | ||
3620 | mcast_obj->validate = bnx2x_mcast_validate_e2; | ||
3621 | mcast_obj->revert = bnx2x_mcast_revert_e2; | ||
3622 | mcast_obj->get_registry_size = | ||
3623 | bnx2x_mcast_get_registry_size_aprox; | ||
3624 | mcast_obj->set_registry_size = | ||
3625 | bnx2x_mcast_set_registry_size_aprox; | ||
3626 | } | ||
3627 | } | ||
3628 | |||
3629 | /*************************** Credit handling **********************************/ | ||
3630 | |||
3631 | /** | ||
3632 | * atomic_add_ifless - add if the result is less than a given value. | ||
3633 | * | ||
3634 | * @v: pointer of type atomic_t | ||
3635 | * @a: the amount to add to v... | ||
3636 | * @u: ...if (v + a) is less than u. | ||
3637 | * | ||
3638 | * returns true if (v + a) was less than u, and false otherwise. | ||
3639 | * | ||
3640 | */ | ||
3641 | static inline bool __atomic_add_ifless(atomic_t *v, int a, int u) | ||
3642 | { | ||
3643 | int c, old; | ||
3644 | |||
3645 | c = atomic_read(v); | ||
3646 | for (;;) { | ||
3647 | if (unlikely(c + a >= u)) | ||
3648 | return false; | ||
3649 | |||
3650 | old = atomic_cmpxchg((v), c, c + a); | ||
3651 | if (likely(old == c)) | ||
3652 | break; | ||
3653 | c = old; | ||
3654 | } | ||
3655 | |||
3656 | return true; | ||
3657 | } | ||
3658 | |||
3659 | /** | ||
3660 | * atomic_dec_ifmoe - dec if the result is more or equal than a given value. | ||
3661 | * | ||
3662 | * @v: pointer of type atomic_t | ||
3663 | * @a: the amount to dec from v... | ||
3664 | * @u: ...if (v - a) is more or equal than u. | ||
3665 | * | ||
3666 | * returns true if (v - a) was more or equal than u, and false | ||
3667 | * otherwise. | ||
3668 | */ | ||
3669 | static inline bool __atomic_dec_ifmoe(atomic_t *v, int a, int u) | ||
3670 | { | ||
3671 | int c, old; | ||
3672 | |||
3673 | c = atomic_read(v); | ||
3674 | for (;;) { | ||
3675 | if (unlikely(c - a < u)) | ||
3676 | return false; | ||
3677 | |||
3678 | old = atomic_cmpxchg((v), c, c - a); | ||
3679 | if (likely(old == c)) | ||
3680 | break; | ||
3681 | c = old; | ||
3682 | } | ||
3683 | |||
3684 | return true; | ||
3685 | } | ||
3686 | |||
3687 | static bool bnx2x_credit_pool_get(struct bnx2x_credit_pool_obj *o, int cnt) | ||
3688 | { | ||
3689 | bool rc; | ||
3690 | |||
3691 | smp_mb(); | ||
3692 | rc = __atomic_dec_ifmoe(&o->credit, cnt, 0); | ||
3693 | smp_mb(); | ||
3694 | |||
3695 | return rc; | ||
3696 | } | ||
3697 | |||
3698 | static bool bnx2x_credit_pool_put(struct bnx2x_credit_pool_obj *o, int cnt) | ||
3699 | { | ||
3700 | bool rc; | ||
3701 | |||
3702 | smp_mb(); | ||
3703 | |||
3704 | /* Don't let to refill if credit + cnt > pool_sz */ | ||
3705 | rc = __atomic_add_ifless(&o->credit, cnt, o->pool_sz + 1); | ||
3706 | |||
3707 | smp_mb(); | ||
3708 | |||
3709 | return rc; | ||
3710 | } | ||
3711 | |||
3712 | static int bnx2x_credit_pool_check(struct bnx2x_credit_pool_obj *o) | ||
3713 | { | ||
3714 | int cur_credit; | ||
3715 | |||
3716 | smp_mb(); | ||
3717 | cur_credit = atomic_read(&o->credit); | ||
3718 | |||
3719 | return cur_credit; | ||
3720 | } | ||
3721 | |||
3722 | static bool bnx2x_credit_pool_always_true(struct bnx2x_credit_pool_obj *o, | ||
3723 | int cnt) | ||
3724 | { | ||
3725 | return true; | ||
3726 | } | ||
3727 | |||
3728 | |||
3729 | static bool bnx2x_credit_pool_get_entry( | ||
3730 | struct bnx2x_credit_pool_obj *o, | ||
3731 | int *offset) | ||
3732 | { | ||
3733 | int idx, vec, i; | ||
3734 | |||
3735 | *offset = -1; | ||
3736 | |||
3737 | /* Find "internal cam-offset" then add to base for this object... */ | ||
3738 | for (vec = 0; vec < BNX2X_POOL_VEC_SIZE; vec++) { | ||
3739 | |||
3740 | /* Skip the current vector if there are no free entries in it */ | ||
3741 | if (!o->pool_mirror[vec]) | ||
3742 | continue; | ||
3743 | |||
3744 | /* If we've got here we are going to find a free entry */ | ||
3745 | for (idx = vec * BNX2X_POOL_VEC_SIZE, i = 0; | ||
3746 | i < BIT_VEC64_ELEM_SZ; idx++, i++) | ||
3747 | |||
3748 | if (BIT_VEC64_TEST_BIT(o->pool_mirror, idx)) { | ||
3749 | /* Got one!! */ | ||
3750 | BIT_VEC64_CLEAR_BIT(o->pool_mirror, idx); | ||
3751 | *offset = o->base_pool_offset + idx; | ||
3752 | return true; | ||
3753 | } | ||
3754 | } | ||
3755 | |||
3756 | return false; | ||
3757 | } | ||
3758 | |||
3759 | static bool bnx2x_credit_pool_put_entry( | ||
3760 | struct bnx2x_credit_pool_obj *o, | ||
3761 | int offset) | ||
3762 | { | ||
3763 | if (offset < o->base_pool_offset) | ||
3764 | return false; | ||
3765 | |||
3766 | offset -= o->base_pool_offset; | ||
3767 | |||
3768 | if (offset >= o->pool_sz) | ||
3769 | return false; | ||
3770 | |||
3771 | /* Return the entry to the pool */ | ||
3772 | BIT_VEC64_SET_BIT(o->pool_mirror, offset); | ||
3773 | |||
3774 | return true; | ||
3775 | } | ||
3776 | |||
3777 | static bool bnx2x_credit_pool_put_entry_always_true( | ||
3778 | struct bnx2x_credit_pool_obj *o, | ||
3779 | int offset) | ||
3780 | { | ||
3781 | return true; | ||
3782 | } | ||
3783 | |||
3784 | static bool bnx2x_credit_pool_get_entry_always_true( | ||
3785 | struct bnx2x_credit_pool_obj *o, | ||
3786 | int *offset) | ||
3787 | { | ||
3788 | *offset = -1; | ||
3789 | return true; | ||
3790 | } | ||
3791 | /** | ||
3792 | * bnx2x_init_credit_pool - initialize credit pool internals. | ||
3793 | * | ||
3794 | * @p: | ||
3795 | * @base: Base entry in the CAM to use. | ||
3796 | * @credit: pool size. | ||
3797 | * | ||
3798 | * If base is negative no CAM entries handling will be performed. | ||
3799 | * If credit is negative pool operations will always succeed (unlimited pool). | ||
3800 | * | ||
3801 | */ | ||
3802 | static inline void bnx2x_init_credit_pool(struct bnx2x_credit_pool_obj *p, | ||
3803 | int base, int credit) | ||
3804 | { | ||
3805 | /* Zero the object first */ | ||
3806 | memset(p, 0, sizeof(*p)); | ||
3807 | |||
3808 | /* Set the table to all 1s */ | ||
3809 | memset(&p->pool_mirror, 0xff, sizeof(p->pool_mirror)); | ||
3810 | |||
3811 | /* Init a pool as full */ | ||
3812 | atomic_set(&p->credit, credit); | ||
3813 | |||
3814 | /* The total poll size */ | ||
3815 | p->pool_sz = credit; | ||
3816 | |||
3817 | p->base_pool_offset = base; | ||
3818 | |||
3819 | /* Commit the change */ | ||
3820 | smp_mb(); | ||
3821 | |||
3822 | p->check = bnx2x_credit_pool_check; | ||
3823 | |||
3824 | /* if pool credit is negative - disable the checks */ | ||
3825 | if (credit >= 0) { | ||
3826 | p->put = bnx2x_credit_pool_put; | ||
3827 | p->get = bnx2x_credit_pool_get; | ||
3828 | p->put_entry = bnx2x_credit_pool_put_entry; | ||
3829 | p->get_entry = bnx2x_credit_pool_get_entry; | ||
3830 | } else { | ||
3831 | p->put = bnx2x_credit_pool_always_true; | ||
3832 | p->get = bnx2x_credit_pool_always_true; | ||
3833 | p->put_entry = bnx2x_credit_pool_put_entry_always_true; | ||
3834 | p->get_entry = bnx2x_credit_pool_get_entry_always_true; | ||
3835 | } | ||
3836 | |||
3837 | /* If base is negative - disable entries handling */ | ||
3838 | if (base < 0) { | ||
3839 | p->put_entry = bnx2x_credit_pool_put_entry_always_true; | ||
3840 | p->get_entry = bnx2x_credit_pool_get_entry_always_true; | ||
3841 | } | ||
3842 | } | ||
3843 | |||
3844 | void bnx2x_init_mac_credit_pool(struct bnx2x *bp, | ||
3845 | struct bnx2x_credit_pool_obj *p, u8 func_id, | ||
3846 | u8 func_num) | ||
3847 | { | ||
3848 | /* TODO: this will be defined in consts as well... */ | ||
3849 | #define BNX2X_CAM_SIZE_EMUL 5 | ||
3850 | |||
3851 | int cam_sz; | ||
3852 | |||
3853 | if (CHIP_IS_E1(bp)) { | ||
3854 | /* In E1, Multicast is saved in cam... */ | ||
3855 | if (!CHIP_REV_IS_SLOW(bp)) | ||
3856 | cam_sz = (MAX_MAC_CREDIT_E1 / 2) - BNX2X_MAX_MULTICAST; | ||
3857 | else | ||
3858 | cam_sz = BNX2X_CAM_SIZE_EMUL - BNX2X_MAX_EMUL_MULTI; | ||
3859 | |||
3860 | bnx2x_init_credit_pool(p, func_id * cam_sz, cam_sz); | ||
3861 | |||
3862 | } else if (CHIP_IS_E1H(bp)) { | ||
3863 | /* CAM credit is equaly divided between all active functions | ||
3864 | * on the PORT!. | ||
3865 | */ | ||
3866 | if ((func_num > 0)) { | ||
3867 | if (!CHIP_REV_IS_SLOW(bp)) | ||
3868 | cam_sz = (MAX_MAC_CREDIT_E1H / (2*func_num)); | ||
3869 | else | ||
3870 | cam_sz = BNX2X_CAM_SIZE_EMUL; | ||
3871 | bnx2x_init_credit_pool(p, func_id * cam_sz, cam_sz); | ||
3872 | } else { | ||
3873 | /* this should never happen! Block MAC operations. */ | ||
3874 | bnx2x_init_credit_pool(p, 0, 0); | ||
3875 | } | ||
3876 | |||
3877 | } else { | ||
3878 | |||
3879 | /* | ||
3880 | * CAM credit is equaly divided between all active functions | ||
3881 | * on the PATH. | ||
3882 | */ | ||
3883 | if ((func_num > 0)) { | ||
3884 | if (!CHIP_REV_IS_SLOW(bp)) | ||
3885 | cam_sz = (MAX_MAC_CREDIT_E2 / func_num); | ||
3886 | else | ||
3887 | cam_sz = BNX2X_CAM_SIZE_EMUL; | ||
3888 | |||
3889 | /* | ||
3890 | * No need for CAM entries handling for 57712 and | ||
3891 | * newer. | ||
3892 | */ | ||
3893 | bnx2x_init_credit_pool(p, -1, cam_sz); | ||
3894 | } else { | ||
3895 | /* this should never happen! Block MAC operations. */ | ||
3896 | bnx2x_init_credit_pool(p, 0, 0); | ||
3897 | } | ||
3898 | |||
3899 | } | ||
3900 | } | ||
3901 | |||
3902 | void bnx2x_init_vlan_credit_pool(struct bnx2x *bp, | ||
3903 | struct bnx2x_credit_pool_obj *p, | ||
3904 | u8 func_id, | ||
3905 | u8 func_num) | ||
3906 | { | ||
3907 | if (CHIP_IS_E1x(bp)) { | ||
3908 | /* | ||
3909 | * There is no VLAN credit in HW on 57710 and 57711 only | ||
3910 | * MAC / MAC-VLAN can be set | ||
3911 | */ | ||
3912 | bnx2x_init_credit_pool(p, 0, -1); | ||
3913 | } else { | ||
3914 | /* | ||
3915 | * CAM credit is equaly divided between all active functions | ||
3916 | * on the PATH. | ||
3917 | */ | ||
3918 | if (func_num > 0) { | ||
3919 | int credit = MAX_VLAN_CREDIT_E2 / func_num; | ||
3920 | bnx2x_init_credit_pool(p, func_id * credit, credit); | ||
3921 | } else | ||
3922 | /* this should never happen! Block VLAN operations. */ | ||
3923 | bnx2x_init_credit_pool(p, 0, 0); | ||
3924 | } | ||
3925 | } | ||
3926 | |||
3927 | /****************** RSS Configuration ******************/ | ||
3928 | /** | ||
3929 | * bnx2x_debug_print_ind_table - prints the indirection table configuration. | ||
3930 | * | ||
3931 | * @bp: driver hanlde | ||
3932 | * @p: pointer to rss configuration | ||
3933 | * | ||
3934 | * Prints it when NETIF_MSG_IFUP debug level is configured. | ||
3935 | */ | ||
3936 | static inline void bnx2x_debug_print_ind_table(struct bnx2x *bp, | ||
3937 | struct bnx2x_config_rss_params *p) | ||
3938 | { | ||
3939 | int i; | ||
3940 | |||
3941 | DP(BNX2X_MSG_SP, "Setting indirection table to:\n"); | ||
3942 | DP(BNX2X_MSG_SP, "0x0000: "); | ||
3943 | for (i = 0; i < T_ETH_INDIRECTION_TABLE_SIZE; i++) { | ||
3944 | DP_CONT(BNX2X_MSG_SP, "0x%02x ", p->ind_table[i]); | ||
3945 | |||
3946 | /* Print 4 bytes in a line */ | ||
3947 | if ((i + 1 < T_ETH_INDIRECTION_TABLE_SIZE) && | ||
3948 | (((i + 1) & 0x3) == 0)) { | ||
3949 | DP_CONT(BNX2X_MSG_SP, "\n"); | ||
3950 | DP(BNX2X_MSG_SP, "0x%04x: ", i + 1); | ||
3951 | } | ||
3952 | } | ||
3953 | |||
3954 | DP_CONT(BNX2X_MSG_SP, "\n"); | ||
3955 | } | ||
3956 | |||
3957 | /** | ||
3958 | * bnx2x_setup_rss - configure RSS | ||
3959 | * | ||
3960 | * @bp: device handle | ||
3961 | * @p: rss configuration | ||
3962 | * | ||
3963 | * sends on UPDATE ramrod for that matter. | ||
3964 | */ | ||
3965 | static int bnx2x_setup_rss(struct bnx2x *bp, | ||
3966 | struct bnx2x_config_rss_params *p) | ||
3967 | { | ||
3968 | struct bnx2x_rss_config_obj *o = p->rss_obj; | ||
3969 | struct bnx2x_raw_obj *r = &o->raw; | ||
3970 | struct eth_rss_update_ramrod_data *data = | ||
3971 | (struct eth_rss_update_ramrod_data *)(r->rdata); | ||
3972 | u8 rss_mode = 0; | ||
3973 | int rc; | ||
3974 | |||
3975 | memset(data, 0, sizeof(*data)); | ||
3976 | |||
3977 | DP(BNX2X_MSG_SP, "Configuring RSS\n"); | ||
3978 | |||
3979 | /* Set an echo field */ | ||
3980 | data->echo = (r->cid & BNX2X_SWCID_MASK) | | ||
3981 | (r->state << BNX2X_SWCID_SHIFT); | ||
3982 | |||
3983 | /* RSS mode */ | ||
3984 | if (test_bit(BNX2X_RSS_MODE_DISABLED, &p->rss_flags)) | ||
3985 | rss_mode = ETH_RSS_MODE_DISABLED; | ||
3986 | else if (test_bit(BNX2X_RSS_MODE_REGULAR, &p->rss_flags)) | ||
3987 | rss_mode = ETH_RSS_MODE_REGULAR; | ||
3988 | else if (test_bit(BNX2X_RSS_MODE_VLAN_PRI, &p->rss_flags)) | ||
3989 | rss_mode = ETH_RSS_MODE_VLAN_PRI; | ||
3990 | else if (test_bit(BNX2X_RSS_MODE_E1HOV_PRI, &p->rss_flags)) | ||
3991 | rss_mode = ETH_RSS_MODE_E1HOV_PRI; | ||
3992 | else if (test_bit(BNX2X_RSS_MODE_IP_DSCP, &p->rss_flags)) | ||
3993 | rss_mode = ETH_RSS_MODE_IP_DSCP; | ||
3994 | |||
3995 | data->rss_mode = rss_mode; | ||
3996 | |||
3997 | DP(BNX2X_MSG_SP, "rss_mode=%d\n", rss_mode); | ||
3998 | |||
3999 | /* RSS capabilities */ | ||
4000 | if (test_bit(BNX2X_RSS_IPV4, &p->rss_flags)) | ||
4001 | data->capabilities |= | ||
4002 | ETH_RSS_UPDATE_RAMROD_DATA_IPV4_CAPABILITY; | ||
4003 | |||
4004 | if (test_bit(BNX2X_RSS_IPV4_TCP, &p->rss_flags)) | ||
4005 | data->capabilities |= | ||
4006 | ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY; | ||
4007 | |||
4008 | if (test_bit(BNX2X_RSS_IPV6, &p->rss_flags)) | ||
4009 | data->capabilities |= | ||
4010 | ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY; | ||
4011 | |||
4012 | if (test_bit(BNX2X_RSS_IPV6_TCP, &p->rss_flags)) | ||
4013 | data->capabilities |= | ||
4014 | ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY; | ||
4015 | |||
4016 | /* Hashing mask */ | ||
4017 | data->rss_result_mask = p->rss_result_mask; | ||
4018 | |||
4019 | /* RSS engine ID */ | ||
4020 | data->rss_engine_id = o->engine_id; | ||
4021 | |||
4022 | DP(BNX2X_MSG_SP, "rss_engine_id=%d\n", data->rss_engine_id); | ||
4023 | |||
4024 | /* Indirection table */ | ||
4025 | memcpy(data->indirection_table, p->ind_table, | ||
4026 | T_ETH_INDIRECTION_TABLE_SIZE); | ||
4027 | |||
4028 | /* Remember the last configuration */ | ||
4029 | memcpy(o->ind_table, p->ind_table, T_ETH_INDIRECTION_TABLE_SIZE); | ||
4030 | |||
4031 | /* Print the indirection table */ | ||
4032 | if (netif_msg_ifup(bp)) | ||
4033 | bnx2x_debug_print_ind_table(bp, p); | ||
4034 | |||
4035 | /* RSS keys */ | ||
4036 | if (test_bit(BNX2X_RSS_SET_SRCH, &p->rss_flags)) { | ||
4037 | memcpy(&data->rss_key[0], &p->rss_key[0], | ||
4038 | sizeof(data->rss_key)); | ||
4039 | data->capabilities |= ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY; | ||
4040 | } | ||
4041 | |||
4042 | /* Commit writes towards the memory before sending a ramrod */ | ||
4043 | mb(); | ||
4044 | |||
4045 | /* Send a ramrod */ | ||
4046 | rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_RSS_UPDATE, r->cid, | ||
4047 | U64_HI(r->rdata_mapping), | ||
4048 | U64_LO(r->rdata_mapping), | ||
4049 | ETH_CONNECTION_TYPE); | ||
4050 | |||
4051 | if (rc < 0) | ||
4052 | return rc; | ||
4053 | |||
4054 | return 1; | ||
4055 | } | ||
4056 | |||
4057 | void bnx2x_get_rss_ind_table(struct bnx2x_rss_config_obj *rss_obj, | ||
4058 | u8 *ind_table) | ||
4059 | { | ||
4060 | memcpy(ind_table, rss_obj->ind_table, sizeof(rss_obj->ind_table)); | ||
4061 | } | ||
4062 | |||
4063 | int bnx2x_config_rss(struct bnx2x *bp, | ||
4064 | struct bnx2x_config_rss_params *p) | ||
4065 | { | ||
4066 | int rc; | ||
4067 | struct bnx2x_rss_config_obj *o = p->rss_obj; | ||
4068 | struct bnx2x_raw_obj *r = &o->raw; | ||
4069 | |||
4070 | /* Do nothing if only driver cleanup was requested */ | ||
4071 | if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) | ||
4072 | return 0; | ||
4073 | |||
4074 | r->set_pending(r); | ||
4075 | |||
4076 | rc = o->config_rss(bp, p); | ||
4077 | if (rc < 0) { | ||
4078 | r->clear_pending(r); | ||
4079 | return rc; | ||
4080 | } | ||
4081 | |||
4082 | if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags)) | ||
4083 | rc = r->wait_comp(bp, r); | ||
4084 | |||
4085 | return rc; | ||
4086 | } | ||
4087 | |||
4088 | |||
4089 | void bnx2x_init_rss_config_obj(struct bnx2x *bp, | ||
4090 | struct bnx2x_rss_config_obj *rss_obj, | ||
4091 | u8 cl_id, u32 cid, u8 func_id, u8 engine_id, | ||
4092 | void *rdata, dma_addr_t rdata_mapping, | ||
4093 | int state, unsigned long *pstate, | ||
4094 | bnx2x_obj_type type) | ||
4095 | { | ||
4096 | bnx2x_init_raw_obj(&rss_obj->raw, cl_id, cid, func_id, rdata, | ||
4097 | rdata_mapping, state, pstate, type); | ||
4098 | |||
4099 | rss_obj->engine_id = engine_id; | ||
4100 | rss_obj->config_rss = bnx2x_setup_rss; | ||
4101 | } | ||
4102 | |||
4103 | /********************** Queue state object ***********************************/ | ||
4104 | |||
4105 | /** | ||
4106 | * bnx2x_queue_state_change - perform Queue state change transition | ||
4107 | * | ||
4108 | * @bp: device handle | ||
4109 | * @params: parameters to perform the transition | ||
4110 | * | ||
4111 | * returns 0 in case of successfully completed transition, negative error | ||
4112 | * code in case of failure, positive (EBUSY) value if there is a completion | ||
4113 | * to that is still pending (possible only if RAMROD_COMP_WAIT is | ||
4114 | * not set in params->ramrod_flags for asynchronous commands). | ||
4115 | * | ||
4116 | */ | ||
4117 | int bnx2x_queue_state_change(struct bnx2x *bp, | ||
4118 | struct bnx2x_queue_state_params *params) | ||
4119 | { | ||
4120 | struct bnx2x_queue_sp_obj *o = params->q_obj; | ||
4121 | int rc, pending_bit; | ||
4122 | unsigned long *pending = &o->pending; | ||
4123 | |||
4124 | /* Check that the requested transition is legal */ | ||
4125 | if (o->check_transition(bp, o, params)) | ||
4126 | return -EINVAL; | ||
4127 | |||
4128 | /* Set "pending" bit */ | ||
4129 | pending_bit = o->set_pending(o, params); | ||
4130 | |||
4131 | /* Don't send a command if only driver cleanup was requested */ | ||
4132 | if (test_bit(RAMROD_DRV_CLR_ONLY, ¶ms->ramrod_flags)) | ||
4133 | o->complete_cmd(bp, o, pending_bit); | ||
4134 | else { | ||
4135 | /* Send a ramrod */ | ||
4136 | rc = o->send_cmd(bp, params); | ||
4137 | if (rc) { | ||
4138 | o->next_state = BNX2X_Q_STATE_MAX; | ||
4139 | clear_bit(pending_bit, pending); | ||
4140 | smp_mb__after_clear_bit(); | ||
4141 | return rc; | ||
4142 | } | ||
4143 | |||
4144 | if (test_bit(RAMROD_COMP_WAIT, ¶ms->ramrod_flags)) { | ||
4145 | rc = o->wait_comp(bp, o, pending_bit); | ||
4146 | if (rc) | ||
4147 | return rc; | ||
4148 | |||
4149 | return 0; | ||
4150 | } | ||
4151 | } | ||
4152 | |||
4153 | return !!test_bit(pending_bit, pending); | ||
4154 | } | ||
4155 | |||
4156 | |||
4157 | static int bnx2x_queue_set_pending(struct bnx2x_queue_sp_obj *obj, | ||
4158 | struct bnx2x_queue_state_params *params) | ||
4159 | { | ||
4160 | enum bnx2x_queue_cmd cmd = params->cmd, bit; | ||
4161 | |||
4162 | /* ACTIVATE and DEACTIVATE commands are implemented on top of | ||
4163 | * UPDATE command. | ||
4164 | */ | ||
4165 | if ((cmd == BNX2X_Q_CMD_ACTIVATE) || | ||
4166 | (cmd == BNX2X_Q_CMD_DEACTIVATE)) | ||
4167 | bit = BNX2X_Q_CMD_UPDATE; | ||
4168 | else | ||
4169 | bit = cmd; | ||
4170 | |||
4171 | set_bit(bit, &obj->pending); | ||
4172 | return bit; | ||
4173 | } | ||
4174 | |||
4175 | static int bnx2x_queue_wait_comp(struct bnx2x *bp, | ||
4176 | struct bnx2x_queue_sp_obj *o, | ||
4177 | enum bnx2x_queue_cmd cmd) | ||
4178 | { | ||
4179 | return bnx2x_state_wait(bp, cmd, &o->pending); | ||
4180 | } | ||
4181 | |||
4182 | /** | ||
4183 | * bnx2x_queue_comp_cmd - complete the state change command. | ||
4184 | * | ||
4185 | * @bp: device handle | ||
4186 | * @o: | ||
4187 | * @cmd: | ||
4188 | * | ||
4189 | * Checks that the arrived completion is expected. | ||
4190 | */ | ||
4191 | static int bnx2x_queue_comp_cmd(struct bnx2x *bp, | ||
4192 | struct bnx2x_queue_sp_obj *o, | ||
4193 | enum bnx2x_queue_cmd cmd) | ||
4194 | { | ||
4195 | unsigned long cur_pending = o->pending; | ||
4196 | |||
4197 | if (!test_and_clear_bit(cmd, &cur_pending)) { | ||
4198 | BNX2X_ERR("Bad MC reply %d for queue %d in state %d " | ||
4199 | "pending 0x%lx, next_state %d\n", cmd, o->cid, | ||
4200 | o->state, cur_pending, o->next_state); | ||
4201 | return -EINVAL; | ||
4202 | } | ||
4203 | |||
4204 | DP(BNX2X_MSG_SP, "Completing command %d for queue %d, " | ||
4205 | "setting state to %d\n", cmd, o->cid, o->next_state); | ||
4206 | |||
4207 | o->state = o->next_state; | ||
4208 | o->next_state = BNX2X_Q_STATE_MAX; | ||
4209 | |||
4210 | /* It's important that o->state and o->next_state are | ||
4211 | * updated before o->pending. | ||
4212 | */ | ||
4213 | wmb(); | ||
4214 | |||
4215 | clear_bit(cmd, &o->pending); | ||
4216 | smp_mb__after_clear_bit(); | ||
4217 | |||
4218 | return 0; | ||
4219 | } | ||
4220 | |||
4221 | static void bnx2x_q_fill_setup_data_e2(struct bnx2x *bp, | ||
4222 | struct bnx2x_queue_state_params *cmd_params, | ||
4223 | struct client_init_ramrod_data *data) | ||
4224 | { | ||
4225 | struct bnx2x_queue_setup_params *params = &cmd_params->params.setup; | ||
4226 | |||
4227 | /* Rx data */ | ||
4228 | |||
4229 | /* IPv6 TPA supported for E2 and above only */ | ||
4230 | data->rx.tpa_en |= test_bit(BNX2X_Q_FLG_TPA, ¶ms->flags) * | ||
4231 | CLIENT_INIT_RX_DATA_TPA_EN_IPV6; | ||
4232 | } | ||
4233 | |||
4234 | static void bnx2x_q_fill_setup_data_cmn(struct bnx2x *bp, | ||
4235 | struct bnx2x_queue_state_params *cmd_params, | ||
4236 | struct client_init_ramrod_data *data) | ||
4237 | { | ||
4238 | struct bnx2x_queue_sp_obj *o = cmd_params->q_obj; | ||
4239 | struct bnx2x_queue_setup_params *params = &cmd_params->params.setup; | ||
4240 | |||
4241 | |||
4242 | /* general */ | ||
4243 | data->general.client_id = o->cl_id; | ||
4244 | |||
4245 | if (test_bit(BNX2X_Q_FLG_STATS, ¶ms->flags)) { | ||
4246 | data->general.statistics_counter_id = | ||
4247 | params->gen_params.stat_id; | ||
4248 | data->general.statistics_en_flg = 1; | ||
4249 | data->general.statistics_zero_flg = | ||
4250 | test_bit(BNX2X_Q_FLG_ZERO_STATS, ¶ms->flags); | ||
4251 | } else | ||
4252 | data->general.statistics_counter_id = | ||
4253 | DISABLE_STATISTIC_COUNTER_ID_VALUE; | ||
4254 | |||
4255 | data->general.is_fcoe_flg = test_bit(BNX2X_Q_FLG_FCOE, ¶ms->flags); | ||
4256 | data->general.activate_flg = test_bit(BNX2X_Q_FLG_ACTIVE, | ||
4257 | ¶ms->flags); | ||
4258 | data->general.sp_client_id = params->gen_params.spcl_id; | ||
4259 | data->general.mtu = cpu_to_le16(params->gen_params.mtu); | ||
4260 | data->general.func_id = o->func_id; | ||
4261 | |||
4262 | |||
4263 | data->general.cos = params->txq_params.cos; | ||
4264 | |||
4265 | data->general.traffic_type = | ||
4266 | test_bit(BNX2X_Q_FLG_FCOE, ¶ms->flags) ? | ||
4267 | LLFC_TRAFFIC_TYPE_FCOE : LLFC_TRAFFIC_TYPE_NW; | ||
4268 | |||
4269 | /* Rx data */ | ||
4270 | data->rx.tpa_en = test_bit(BNX2X_Q_FLG_TPA, ¶ms->flags) * | ||
4271 | CLIENT_INIT_RX_DATA_TPA_EN_IPV4; | ||
4272 | data->rx.vmqueue_mode_en_flg = 0; | ||
4273 | |||
4274 | data->rx.cache_line_alignment_log_size = | ||
4275 | params->rxq_params.cache_line_log; | ||
4276 | data->rx.enable_dynamic_hc = | ||
4277 | test_bit(BNX2X_Q_FLG_DHC, ¶ms->flags); | ||
4278 | data->rx.max_sges_for_packet = params->rxq_params.max_sges_pkt; | ||
4279 | data->rx.client_qzone_id = params->rxq_params.cl_qzone_id; | ||
4280 | data->rx.max_agg_size = cpu_to_le16(params->rxq_params.tpa_agg_sz); | ||
4281 | |||
4282 | /* Always start in DROP_ALL mode */ | ||
4283 | data->rx.state = cpu_to_le16(CLIENT_INIT_RX_DATA_UCAST_DROP_ALL | | ||
4284 | CLIENT_INIT_RX_DATA_MCAST_DROP_ALL); | ||
4285 | |||
4286 | /* We don't set drop flags */ | ||
4287 | data->rx.drop_ip_cs_err_flg = 0; | ||
4288 | data->rx.drop_tcp_cs_err_flg = 0; | ||
4289 | data->rx.drop_ttl0_flg = 0; | ||
4290 | data->rx.drop_udp_cs_err_flg = 0; | ||
4291 | data->rx.inner_vlan_removal_enable_flg = | ||
4292 | test_bit(BNX2X_Q_FLG_VLAN, ¶ms->flags); | ||
4293 | data->rx.outer_vlan_removal_enable_flg = | ||
4294 | test_bit(BNX2X_Q_FLG_OV, ¶ms->flags); | ||
4295 | data->rx.status_block_id = params->rxq_params.fw_sb_id; | ||
4296 | data->rx.rx_sb_index_number = params->rxq_params.sb_cq_index; | ||
4297 | data->rx.max_tpa_queues = params->rxq_params.max_tpa_queues; | ||
4298 | data->rx.max_bytes_on_bd = cpu_to_le16(params->rxq_params.buf_sz); | ||
4299 | data->rx.sge_buff_size = cpu_to_le16(params->rxq_params.sge_buf_sz); | ||
4300 | data->rx.bd_page_base.lo = | ||
4301 | cpu_to_le32(U64_LO(params->rxq_params.dscr_map)); | ||
4302 | data->rx.bd_page_base.hi = | ||
4303 | cpu_to_le32(U64_HI(params->rxq_params.dscr_map)); | ||
4304 | data->rx.sge_page_base.lo = | ||
4305 | cpu_to_le32(U64_LO(params->rxq_params.sge_map)); | ||
4306 | data->rx.sge_page_base.hi = | ||
4307 | cpu_to_le32(U64_HI(params->rxq_params.sge_map)); | ||
4308 | data->rx.cqe_page_base.lo = | ||
4309 | cpu_to_le32(U64_LO(params->rxq_params.rcq_map)); | ||
4310 | data->rx.cqe_page_base.hi = | ||
4311 | cpu_to_le32(U64_HI(params->rxq_params.rcq_map)); | ||
4312 | data->rx.is_leading_rss = test_bit(BNX2X_Q_FLG_LEADING_RSS, | ||
4313 | ¶ms->flags); | ||
4314 | |||
4315 | if (test_bit(BNX2X_Q_FLG_MCAST, ¶ms->flags)) { | ||
4316 | data->rx.approx_mcast_engine_id = o->func_id; | ||
4317 | data->rx.is_approx_mcast = 1; | ||
4318 | } | ||
4319 | |||
4320 | data->rx.rss_engine_id = params->rxq_params.rss_engine_id; | ||
4321 | |||
4322 | /* flow control data */ | ||
4323 | data->rx.cqe_pause_thr_low = cpu_to_le16(params->pause.rcq_th_lo); | ||
4324 | data->rx.cqe_pause_thr_high = cpu_to_le16(params->pause.rcq_th_hi); | ||
4325 | data->rx.bd_pause_thr_low = cpu_to_le16(params->pause.bd_th_lo); | ||
4326 | data->rx.bd_pause_thr_high = cpu_to_le16(params->pause.bd_th_hi); | ||
4327 | data->rx.sge_pause_thr_low = cpu_to_le16(params->pause.sge_th_lo); | ||
4328 | data->rx.sge_pause_thr_high = cpu_to_le16(params->pause.sge_th_hi); | ||
4329 | data->rx.rx_cos_mask = cpu_to_le16(params->pause.pri_map); | ||
4330 | |||
4331 | /* silent vlan removal */ | ||
4332 | data->rx.silent_vlan_removal_flg = | ||
4333 | test_bit(BNX2X_Q_FLG_SILENT_VLAN_REM, ¶ms->flags); | ||
4334 | data->rx.silent_vlan_value = | ||
4335 | cpu_to_le16(params->rxq_params.silent_removal_value); | ||
4336 | data->rx.silent_vlan_mask = | ||
4337 | cpu_to_le16(params->rxq_params.silent_removal_mask); | ||
4338 | |||
4339 | /* Tx data */ | ||
4340 | data->tx.enforce_security_flg = | ||
4341 | test_bit(BNX2X_Q_FLG_TX_SEC, ¶ms->flags); | ||
4342 | data->tx.default_vlan = | ||
4343 | cpu_to_le16(params->txq_params.default_vlan); | ||
4344 | data->tx.default_vlan_flg = | ||
4345 | test_bit(BNX2X_Q_FLG_DEF_VLAN, ¶ms->flags); | ||
4346 | data->tx.tx_switching_flg = | ||
4347 | test_bit(BNX2X_Q_FLG_TX_SWITCH, ¶ms->flags); | ||
4348 | data->tx.anti_spoofing_flg = | ||
4349 | test_bit(BNX2X_Q_FLG_ANTI_SPOOF, ¶ms->flags); | ||
4350 | data->tx.tx_status_block_id = params->txq_params.fw_sb_id; | ||
4351 | data->tx.tx_sb_index_number = params->txq_params.sb_cq_index; | ||
4352 | data->tx.tss_leading_client_id = params->txq_params.tss_leading_cl_id; | ||
4353 | |||
4354 | data->tx.tx_bd_page_base.lo = | ||
4355 | cpu_to_le32(U64_LO(params->txq_params.dscr_map)); | ||
4356 | data->tx.tx_bd_page_base.hi = | ||
4357 | cpu_to_le32(U64_HI(params->txq_params.dscr_map)); | ||
4358 | |||
4359 | /* Don't configure any Tx switching mode during queue SETUP */ | ||
4360 | data->tx.state = 0; | ||
4361 | } | ||
4362 | |||
4363 | |||
4364 | /** | ||
4365 | * bnx2x_q_init - init HW/FW queue | ||
4366 | * | ||
4367 | * @bp: device handle | ||
4368 | * @params: | ||
4369 | * | ||
4370 | * HW/FW initial Queue configuration: | ||
4371 | * - HC: Rx and Tx | ||
4372 | * - CDU context validation | ||
4373 | * | ||
4374 | */ | ||
4375 | static inline int bnx2x_q_init(struct bnx2x *bp, | ||
4376 | struct bnx2x_queue_state_params *params) | ||
4377 | { | ||
4378 | struct bnx2x_queue_sp_obj *o = params->q_obj; | ||
4379 | struct bnx2x_queue_init_params *init = ¶ms->params.init; | ||
4380 | u16 hc_usec; | ||
4381 | |||
4382 | /* Tx HC configuration */ | ||
4383 | if (test_bit(BNX2X_Q_TYPE_HAS_TX, &o->type) && | ||
4384 | test_bit(BNX2X_Q_FLG_HC, &init->tx.flags)) { | ||
4385 | hc_usec = init->tx.hc_rate ? 1000000 / init->tx.hc_rate : 0; | ||
4386 | |||
4387 | bnx2x_update_coalesce_sb_index(bp, init->tx.fw_sb_id, | ||
4388 | init->tx.sb_cq_index, | ||
4389 | !test_bit(BNX2X_Q_FLG_HC_EN, &init->tx.flags), | ||
4390 | hc_usec); | ||
4391 | } | ||
4392 | |||
4393 | /* Rx HC configuration */ | ||
4394 | if (test_bit(BNX2X_Q_TYPE_HAS_RX, &o->type) && | ||
4395 | test_bit(BNX2X_Q_FLG_HC, &init->rx.flags)) { | ||
4396 | hc_usec = init->rx.hc_rate ? 1000000 / init->rx.hc_rate : 0; | ||
4397 | |||
4398 | bnx2x_update_coalesce_sb_index(bp, init->rx.fw_sb_id, | ||
4399 | init->rx.sb_cq_index, | ||
4400 | !test_bit(BNX2X_Q_FLG_HC_EN, &init->rx.flags), | ||
4401 | hc_usec); | ||
4402 | } | ||
4403 | |||
4404 | /* Set CDU context validation values */ | ||
4405 | bnx2x_set_ctx_validation(bp, init->cxt, o->cid); | ||
4406 | |||
4407 | /* As no ramrod is sent, complete the command immediately */ | ||
4408 | o->complete_cmd(bp, o, BNX2X_Q_CMD_INIT); | ||
4409 | |||
4410 | mmiowb(); | ||
4411 | smp_mb(); | ||
4412 | |||
4413 | return 0; | ||
4414 | } | ||
4415 | |||
4416 | static inline int bnx2x_q_send_setup_e1x(struct bnx2x *bp, | ||
4417 | struct bnx2x_queue_state_params *params) | ||
4418 | { | ||
4419 | struct bnx2x_queue_sp_obj *o = params->q_obj; | ||
4420 | struct client_init_ramrod_data *rdata = | ||
4421 | (struct client_init_ramrod_data *)o->rdata; | ||
4422 | dma_addr_t data_mapping = o->rdata_mapping; | ||
4423 | int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP; | ||
4424 | |||
4425 | /* Clear the ramrod data */ | ||
4426 | memset(rdata, 0, sizeof(*rdata)); | ||
4427 | |||
4428 | /* Fill the ramrod data */ | ||
4429 | bnx2x_q_fill_setup_data_cmn(bp, params, rdata); | ||
4430 | |||
4431 | mb(); | ||
4432 | |||
4433 | return bnx2x_sp_post(bp, ramrod, o->cid, U64_HI(data_mapping), | ||
4434 | U64_LO(data_mapping), ETH_CONNECTION_TYPE); | ||
4435 | } | ||
4436 | |||
4437 | static inline int bnx2x_q_send_setup_e2(struct bnx2x *bp, | ||
4438 | struct bnx2x_queue_state_params *params) | ||
4439 | { | ||
4440 | struct bnx2x_queue_sp_obj *o = params->q_obj; | ||
4441 | struct client_init_ramrod_data *rdata = | ||
4442 | (struct client_init_ramrod_data *)o->rdata; | ||
4443 | dma_addr_t data_mapping = o->rdata_mapping; | ||
4444 | int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP; | ||
4445 | |||
4446 | /* Clear the ramrod data */ | ||
4447 | memset(rdata, 0, sizeof(*rdata)); | ||
4448 | |||
4449 | /* Fill the ramrod data */ | ||
4450 | bnx2x_q_fill_setup_data_cmn(bp, params, rdata); | ||
4451 | bnx2x_q_fill_setup_data_e2(bp, params, rdata); | ||
4452 | |||
4453 | mb(); | ||
4454 | |||
4455 | return bnx2x_sp_post(bp, ramrod, o->cid, U64_HI(data_mapping), | ||
4456 | U64_LO(data_mapping), ETH_CONNECTION_TYPE); | ||
4457 | } | ||
4458 | |||
4459 | static void bnx2x_q_fill_update_data(struct bnx2x *bp, | ||
4460 | struct bnx2x_queue_sp_obj *obj, | ||
4461 | struct bnx2x_queue_update_params *params, | ||
4462 | struct client_update_ramrod_data *data) | ||
4463 | { | ||
4464 | /* Client ID of the client to update */ | ||
4465 | data->client_id = obj->cl_id; | ||
4466 | |||
4467 | /* Function ID of the client to update */ | ||
4468 | data->func_id = obj->func_id; | ||
4469 | |||
4470 | /* Default VLAN value */ | ||
4471 | data->default_vlan = cpu_to_le16(params->def_vlan); | ||
4472 | |||
4473 | /* Inner VLAN stripping */ | ||
4474 | data->inner_vlan_removal_enable_flg = | ||
4475 | test_bit(BNX2X_Q_UPDATE_IN_VLAN_REM, ¶ms->update_flags); | ||
4476 | data->inner_vlan_removal_change_flg = | ||
4477 | test_bit(BNX2X_Q_UPDATE_IN_VLAN_REM_CHNG, | ||
4478 | ¶ms->update_flags); | ||
4479 | |||
4480 | /* Outer VLAN sripping */ | ||
4481 | data->outer_vlan_removal_enable_flg = | ||
4482 | test_bit(BNX2X_Q_UPDATE_OUT_VLAN_REM, ¶ms->update_flags); | ||
4483 | data->outer_vlan_removal_change_flg = | ||
4484 | test_bit(BNX2X_Q_UPDATE_OUT_VLAN_REM_CHNG, | ||
4485 | ¶ms->update_flags); | ||
4486 | |||
4487 | /* Drop packets that have source MAC that doesn't belong to this | ||
4488 | * Queue. | ||
4489 | */ | ||
4490 | data->anti_spoofing_enable_flg = | ||
4491 | test_bit(BNX2X_Q_UPDATE_ANTI_SPOOF, ¶ms->update_flags); | ||
4492 | data->anti_spoofing_change_flg = | ||
4493 | test_bit(BNX2X_Q_UPDATE_ANTI_SPOOF_CHNG, ¶ms->update_flags); | ||
4494 | |||
4495 | /* Activate/Deactivate */ | ||
4496 | data->activate_flg = | ||
4497 | test_bit(BNX2X_Q_UPDATE_ACTIVATE, ¶ms->update_flags); | ||
4498 | data->activate_change_flg = | ||
4499 | test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, ¶ms->update_flags); | ||
4500 | |||
4501 | /* Enable default VLAN */ | ||
4502 | data->default_vlan_enable_flg = | ||
4503 | test_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN, ¶ms->update_flags); | ||
4504 | data->default_vlan_change_flg = | ||
4505 | test_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG, | ||
4506 | ¶ms->update_flags); | ||
4507 | |||
4508 | /* silent vlan removal */ | ||
4509 | data->silent_vlan_change_flg = | ||
4510 | test_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG, | ||
4511 | ¶ms->update_flags); | ||
4512 | data->silent_vlan_removal_flg = | ||
4513 | test_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM, ¶ms->update_flags); | ||
4514 | data->silent_vlan_value = cpu_to_le16(params->silent_removal_value); | ||
4515 | data->silent_vlan_mask = cpu_to_le16(params->silent_removal_mask); | ||
4516 | } | ||
4517 | |||
4518 | static inline int bnx2x_q_send_update(struct bnx2x *bp, | ||
4519 | struct bnx2x_queue_state_params *params) | ||
4520 | { | ||
4521 | struct bnx2x_queue_sp_obj *o = params->q_obj; | ||
4522 | struct client_update_ramrod_data *rdata = | ||
4523 | (struct client_update_ramrod_data *)o->rdata; | ||
4524 | dma_addr_t data_mapping = o->rdata_mapping; | ||
4525 | |||
4526 | /* Clear the ramrod data */ | ||
4527 | memset(rdata, 0, sizeof(*rdata)); | ||
4528 | |||
4529 | /* Fill the ramrod data */ | ||
4530 | bnx2x_q_fill_update_data(bp, o, ¶ms->params.update, rdata); | ||
4531 | |||
4532 | mb(); | ||
4533 | |||
4534 | return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_UPDATE, o->cid, | ||
4535 | U64_HI(data_mapping), | ||
4536 | U64_LO(data_mapping), ETH_CONNECTION_TYPE); | ||
4537 | } | ||
4538 | |||
4539 | /** | ||
4540 | * bnx2x_q_send_deactivate - send DEACTIVATE command | ||
4541 | * | ||
4542 | * @bp: device handle | ||
4543 | * @params: | ||
4544 | * | ||
4545 | * implemented using the UPDATE command. | ||
4546 | */ | ||
4547 | static inline int bnx2x_q_send_deactivate(struct bnx2x *bp, | ||
4548 | struct bnx2x_queue_state_params *params) | ||
4549 | { | ||
4550 | struct bnx2x_queue_update_params *update = ¶ms->params.update; | ||
4551 | |||
4552 | memset(update, 0, sizeof(*update)); | ||
4553 | |||
4554 | __set_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, &update->update_flags); | ||
4555 | |||
4556 | return bnx2x_q_send_update(bp, params); | ||
4557 | } | ||
4558 | |||
4559 | /** | ||
4560 | * bnx2x_q_send_activate - send ACTIVATE command | ||
4561 | * | ||
4562 | * @bp: device handle | ||
4563 | * @params: | ||
4564 | * | ||
4565 | * implemented using the UPDATE command. | ||
4566 | */ | ||
4567 | static inline int bnx2x_q_send_activate(struct bnx2x *bp, | ||
4568 | struct bnx2x_queue_state_params *params) | ||
4569 | { | ||
4570 | struct bnx2x_queue_update_params *update = ¶ms->params.update; | ||
4571 | |||
4572 | memset(update, 0, sizeof(*update)); | ||
4573 | |||
4574 | __set_bit(BNX2X_Q_UPDATE_ACTIVATE, &update->update_flags); | ||
4575 | __set_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, &update->update_flags); | ||
4576 | |||
4577 | return bnx2x_q_send_update(bp, params); | ||
4578 | } | ||
4579 | |||
4580 | static inline int bnx2x_q_send_update_tpa(struct bnx2x *bp, | ||
4581 | struct bnx2x_queue_state_params *params) | ||
4582 | { | ||
4583 | /* TODO: Not implemented yet. */ | ||
4584 | return -1; | ||
4585 | } | ||
4586 | |||
4587 | static inline int bnx2x_q_send_halt(struct bnx2x *bp, | ||
4588 | struct bnx2x_queue_state_params *params) | ||
4589 | { | ||
4590 | struct bnx2x_queue_sp_obj *o = params->q_obj; | ||
4591 | |||
4592 | return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, o->cid, 0, o->cl_id, | ||
4593 | ETH_CONNECTION_TYPE); | ||
4594 | } | ||
4595 | |||
4596 | static inline int bnx2x_q_send_cfc_del(struct bnx2x *bp, | ||
4597 | struct bnx2x_queue_state_params *params) | ||
4598 | { | ||
4599 | struct bnx2x_queue_sp_obj *o = params->q_obj; | ||
4600 | |||
4601 | return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_CFC_DEL, o->cid, 0, 0, | ||
4602 | NONE_CONNECTION_TYPE); | ||
4603 | } | ||
4604 | |||
4605 | static inline int bnx2x_q_send_terminate(struct bnx2x *bp, | ||
4606 | struct bnx2x_queue_state_params *params) | ||
4607 | { | ||
4608 | struct bnx2x_queue_sp_obj *o = params->q_obj; | ||
4609 | |||
4610 | return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_TERMINATE, o->cid, 0, 0, | ||
4611 | ETH_CONNECTION_TYPE); | ||
4612 | } | ||
4613 | |||
4614 | static inline int bnx2x_q_send_empty(struct bnx2x *bp, | ||
4615 | struct bnx2x_queue_state_params *params) | ||
4616 | { | ||
4617 | struct bnx2x_queue_sp_obj *o = params->q_obj; | ||
4618 | |||
4619 | return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_EMPTY, o->cid, 0, 0, | ||
4620 | ETH_CONNECTION_TYPE); | ||
4621 | } | ||
4622 | |||
4623 | static inline int bnx2x_queue_send_cmd_cmn(struct bnx2x *bp, | ||
4624 | struct bnx2x_queue_state_params *params) | ||
4625 | { | ||
4626 | switch (params->cmd) { | ||
4627 | case BNX2X_Q_CMD_INIT: | ||
4628 | return bnx2x_q_init(bp, params); | ||
4629 | case BNX2X_Q_CMD_DEACTIVATE: | ||
4630 | return bnx2x_q_send_deactivate(bp, params); | ||
4631 | case BNX2X_Q_CMD_ACTIVATE: | ||
4632 | return bnx2x_q_send_activate(bp, params); | ||
4633 | case BNX2X_Q_CMD_UPDATE: | ||
4634 | return bnx2x_q_send_update(bp, params); | ||
4635 | case BNX2X_Q_CMD_UPDATE_TPA: | ||
4636 | return bnx2x_q_send_update_tpa(bp, params); | ||
4637 | case BNX2X_Q_CMD_HALT: | ||
4638 | return bnx2x_q_send_halt(bp, params); | ||
4639 | case BNX2X_Q_CMD_CFC_DEL: | ||
4640 | return bnx2x_q_send_cfc_del(bp, params); | ||
4641 | case BNX2X_Q_CMD_TERMINATE: | ||
4642 | return bnx2x_q_send_terminate(bp, params); | ||
4643 | case BNX2X_Q_CMD_EMPTY: | ||
4644 | return bnx2x_q_send_empty(bp, params); | ||
4645 | default: | ||
4646 | BNX2X_ERR("Unknown command: %d\n", params->cmd); | ||
4647 | return -EINVAL; | ||
4648 | } | ||
4649 | } | ||
4650 | |||
4651 | static int bnx2x_queue_send_cmd_e1x(struct bnx2x *bp, | ||
4652 | struct bnx2x_queue_state_params *params) | ||
4653 | { | ||
4654 | switch (params->cmd) { | ||
4655 | case BNX2X_Q_CMD_SETUP: | ||
4656 | return bnx2x_q_send_setup_e1x(bp, params); | ||
4657 | case BNX2X_Q_CMD_INIT: | ||
4658 | case BNX2X_Q_CMD_DEACTIVATE: | ||
4659 | case BNX2X_Q_CMD_ACTIVATE: | ||
4660 | case BNX2X_Q_CMD_UPDATE: | ||
4661 | case BNX2X_Q_CMD_UPDATE_TPA: | ||
4662 | case BNX2X_Q_CMD_HALT: | ||
4663 | case BNX2X_Q_CMD_CFC_DEL: | ||
4664 | case BNX2X_Q_CMD_TERMINATE: | ||
4665 | case BNX2X_Q_CMD_EMPTY: | ||
4666 | return bnx2x_queue_send_cmd_cmn(bp, params); | ||
4667 | default: | ||
4668 | BNX2X_ERR("Unknown command: %d\n", params->cmd); | ||
4669 | return -EINVAL; | ||
4670 | } | ||
4671 | } | ||
4672 | |||
4673 | static int bnx2x_queue_send_cmd_e2(struct bnx2x *bp, | ||
4674 | struct bnx2x_queue_state_params *params) | ||
4675 | { | ||
4676 | switch (params->cmd) { | ||
4677 | case BNX2X_Q_CMD_SETUP: | ||
4678 | return bnx2x_q_send_setup_e2(bp, params); | ||
4679 | case BNX2X_Q_CMD_INIT: | ||
4680 | case BNX2X_Q_CMD_DEACTIVATE: | ||
4681 | case BNX2X_Q_CMD_ACTIVATE: | ||
4682 | case BNX2X_Q_CMD_UPDATE: | ||
4683 | case BNX2X_Q_CMD_UPDATE_TPA: | ||
4684 | case BNX2X_Q_CMD_HALT: | ||
4685 | case BNX2X_Q_CMD_CFC_DEL: | ||
4686 | case BNX2X_Q_CMD_TERMINATE: | ||
4687 | case BNX2X_Q_CMD_EMPTY: | ||
4688 | return bnx2x_queue_send_cmd_cmn(bp, params); | ||
4689 | default: | ||
4690 | BNX2X_ERR("Unknown command: %d\n", params->cmd); | ||
4691 | return -EINVAL; | ||
4692 | } | ||
4693 | } | ||
4694 | |||
4695 | /** | ||
4696 | * bnx2x_queue_chk_transition - check state machine of a regular Queue | ||
4697 | * | ||
4698 | * @bp: device handle | ||
4699 | * @o: | ||
4700 | * @params: | ||
4701 | * | ||
4702 | * (not Forwarding) | ||
4703 | * It both checks if the requested command is legal in a current | ||
4704 | * state and, if it's legal, sets a `next_state' in the object | ||
4705 | * that will be used in the completion flow to set the `state' | ||
4706 | * of the object. | ||
4707 | * | ||
4708 | * returns 0 if a requested command is a legal transition, | ||
4709 | * -EINVAL otherwise. | ||
4710 | */ | ||
4711 | static int bnx2x_queue_chk_transition(struct bnx2x *bp, | ||
4712 | struct bnx2x_queue_sp_obj *o, | ||
4713 | struct bnx2x_queue_state_params *params) | ||
4714 | { | ||
4715 | enum bnx2x_q_state state = o->state, next_state = BNX2X_Q_STATE_MAX; | ||
4716 | enum bnx2x_queue_cmd cmd = params->cmd; | ||
4717 | |||
4718 | switch (state) { | ||
4719 | case BNX2X_Q_STATE_RESET: | ||
4720 | if (cmd == BNX2X_Q_CMD_INIT) | ||
4721 | next_state = BNX2X_Q_STATE_INITIALIZED; | ||
4722 | |||
4723 | break; | ||
4724 | case BNX2X_Q_STATE_INITIALIZED: | ||
4725 | if (cmd == BNX2X_Q_CMD_SETUP) { | ||
4726 | if (test_bit(BNX2X_Q_FLG_ACTIVE, | ||
4727 | ¶ms->params.setup.flags)) | ||
4728 | next_state = BNX2X_Q_STATE_ACTIVE; | ||
4729 | else | ||
4730 | next_state = BNX2X_Q_STATE_INACTIVE; | ||
4731 | } | ||
4732 | |||
4733 | break; | ||
4734 | case BNX2X_Q_STATE_ACTIVE: | ||
4735 | if (cmd == BNX2X_Q_CMD_DEACTIVATE) | ||
4736 | next_state = BNX2X_Q_STATE_INACTIVE; | ||
4737 | |||
4738 | else if ((cmd == BNX2X_Q_CMD_EMPTY) || | ||
4739 | (cmd == BNX2X_Q_CMD_UPDATE_TPA)) | ||
4740 | next_state = BNX2X_Q_STATE_ACTIVE; | ||
4741 | |||
4742 | else if (cmd == BNX2X_Q_CMD_HALT) | ||
4743 | next_state = BNX2X_Q_STATE_STOPPED; | ||
4744 | |||
4745 | else if (cmd == BNX2X_Q_CMD_UPDATE) { | ||
4746 | struct bnx2x_queue_update_params *update_params = | ||
4747 | ¶ms->params.update; | ||
4748 | |||
4749 | /* If "active" state change is requested, update the | ||
4750 | * state accordingly. | ||
4751 | */ | ||
4752 | if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, | ||
4753 | &update_params->update_flags) && | ||
4754 | !test_bit(BNX2X_Q_UPDATE_ACTIVATE, | ||
4755 | &update_params->update_flags)) | ||
4756 | next_state = BNX2X_Q_STATE_INACTIVE; | ||
4757 | else | ||
4758 | next_state = BNX2X_Q_STATE_ACTIVE; | ||
4759 | } | ||
4760 | |||
4761 | break; | ||
4762 | case BNX2X_Q_STATE_INACTIVE: | ||
4763 | if (cmd == BNX2X_Q_CMD_ACTIVATE) | ||
4764 | next_state = BNX2X_Q_STATE_ACTIVE; | ||
4765 | |||
4766 | else if ((cmd == BNX2X_Q_CMD_EMPTY) || | ||
4767 | (cmd == BNX2X_Q_CMD_UPDATE_TPA)) | ||
4768 | next_state = BNX2X_Q_STATE_INACTIVE; | ||
4769 | |||
4770 | else if (cmd == BNX2X_Q_CMD_HALT) | ||
4771 | next_state = BNX2X_Q_STATE_STOPPED; | ||
4772 | |||
4773 | else if (cmd == BNX2X_Q_CMD_UPDATE) { | ||
4774 | struct bnx2x_queue_update_params *update_params = | ||
4775 | ¶ms->params.update; | ||
4776 | |||
4777 | /* If "active" state change is requested, update the | ||
4778 | * state accordingly. | ||
4779 | */ | ||
4780 | if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, | ||
4781 | &update_params->update_flags) && | ||
4782 | test_bit(BNX2X_Q_UPDATE_ACTIVATE, | ||
4783 | &update_params->update_flags)) | ||
4784 | next_state = BNX2X_Q_STATE_ACTIVE; | ||
4785 | else | ||
4786 | next_state = BNX2X_Q_STATE_INACTIVE; | ||
4787 | } | ||
4788 | |||
4789 | break; | ||
4790 | case BNX2X_Q_STATE_STOPPED: | ||
4791 | if (cmd == BNX2X_Q_CMD_TERMINATE) | ||
4792 | next_state = BNX2X_Q_STATE_TERMINATED; | ||
4793 | |||
4794 | break; | ||
4795 | case BNX2X_Q_STATE_TERMINATED: | ||
4796 | if (cmd == BNX2X_Q_CMD_CFC_DEL) | ||
4797 | next_state = BNX2X_Q_STATE_RESET; | ||
4798 | |||
4799 | break; | ||
4800 | default: | ||
4801 | BNX2X_ERR("Illegal state: %d\n", state); | ||
4802 | } | ||
4803 | |||
4804 | /* Transition is assured */ | ||
4805 | if (next_state != BNX2X_Q_STATE_MAX) { | ||
4806 | DP(BNX2X_MSG_SP, "Good state transition: %d(%d)->%d\n", | ||
4807 | state, cmd, next_state); | ||
4808 | o->next_state = next_state; | ||
4809 | return 0; | ||
4810 | } | ||
4811 | |||
4812 | DP(BNX2X_MSG_SP, "Bad state transition request: %d %d\n", state, cmd); | ||
4813 | |||
4814 | return -EINVAL; | ||
4815 | } | ||
4816 | |||
4817 | void bnx2x_init_queue_obj(struct bnx2x *bp, | ||
4818 | struct bnx2x_queue_sp_obj *obj, | ||
4819 | u8 cl_id, u32 cid, u8 func_id, void *rdata, | ||
4820 | dma_addr_t rdata_mapping, unsigned long type) | ||
4821 | { | ||
4822 | memset(obj, 0, sizeof(*obj)); | ||
4823 | |||
4824 | obj->cid = cid; | ||
4825 | obj->cl_id = cl_id; | ||
4826 | obj->func_id = func_id; | ||
4827 | obj->rdata = rdata; | ||
4828 | obj->rdata_mapping = rdata_mapping; | ||
4829 | obj->type = type; | ||
4830 | obj->next_state = BNX2X_Q_STATE_MAX; | ||
4831 | |||
4832 | if (CHIP_IS_E1x(bp)) | ||
4833 | obj->send_cmd = bnx2x_queue_send_cmd_e1x; | ||
4834 | else | ||
4835 | obj->send_cmd = bnx2x_queue_send_cmd_e2; | ||
4836 | |||
4837 | obj->check_transition = bnx2x_queue_chk_transition; | ||
4838 | |||
4839 | obj->complete_cmd = bnx2x_queue_comp_cmd; | ||
4840 | obj->wait_comp = bnx2x_queue_wait_comp; | ||
4841 | obj->set_pending = bnx2x_queue_set_pending; | ||
4842 | } | ||
4843 | |||
4844 | /********************** Function state object *********************************/ | ||
4845 | |||
4846 | static int bnx2x_func_wait_comp(struct bnx2x *bp, | ||
4847 | struct bnx2x_func_sp_obj *o, | ||
4848 | enum bnx2x_func_cmd cmd) | ||
4849 | { | ||
4850 | return bnx2x_state_wait(bp, cmd, &o->pending); | ||
4851 | } | ||
4852 | |||
4853 | /** | ||
4854 | * bnx2x_func_state_change_comp - complete the state machine transition | ||
4855 | * | ||
4856 | * @bp: device handle | ||
4857 | * @o: | ||
4858 | * @cmd: | ||
4859 | * | ||
4860 | * Called on state change transition. Completes the state | ||
4861 | * machine transition only - no HW interaction. | ||
4862 | */ | ||
4863 | static inline int bnx2x_func_state_change_comp(struct bnx2x *bp, | ||
4864 | struct bnx2x_func_sp_obj *o, | ||
4865 | enum bnx2x_func_cmd cmd) | ||
4866 | { | ||
4867 | unsigned long cur_pending = o->pending; | ||
4868 | |||
4869 | if (!test_and_clear_bit(cmd, &cur_pending)) { | ||
4870 | BNX2X_ERR("Bad MC reply %d for func %d in state %d " | ||
4871 | "pending 0x%lx, next_state %d\n", cmd, BP_FUNC(bp), | ||
4872 | o->state, cur_pending, o->next_state); | ||
4873 | return -EINVAL; | ||
4874 | } | ||
4875 | |||
4876 | DP(BNX2X_MSG_SP, "Completing command %d for func %d, setting state to " | ||
4877 | "%d\n", cmd, BP_FUNC(bp), o->next_state); | ||
4878 | |||
4879 | o->state = o->next_state; | ||
4880 | o->next_state = BNX2X_F_STATE_MAX; | ||
4881 | |||
4882 | /* It's important that o->state and o->next_state are | ||
4883 | * updated before o->pending. | ||
4884 | */ | ||
4885 | wmb(); | ||
4886 | |||
4887 | clear_bit(cmd, &o->pending); | ||
4888 | smp_mb__after_clear_bit(); | ||
4889 | |||
4890 | return 0; | ||
4891 | } | ||
4892 | |||
4893 | /** | ||
4894 | * bnx2x_func_comp_cmd - complete the state change command | ||
4895 | * | ||
4896 | * @bp: device handle | ||
4897 | * @o: | ||
4898 | * @cmd: | ||
4899 | * | ||
4900 | * Checks that the arrived completion is expected. | ||
4901 | */ | ||
4902 | static int bnx2x_func_comp_cmd(struct bnx2x *bp, | ||
4903 | struct bnx2x_func_sp_obj *o, | ||
4904 | enum bnx2x_func_cmd cmd) | ||
4905 | { | ||
4906 | /* Complete the state machine part first, check if it's a | ||
4907 | * legal completion. | ||
4908 | */ | ||
4909 | int rc = bnx2x_func_state_change_comp(bp, o, cmd); | ||
4910 | return rc; | ||
4911 | } | ||
4912 | |||
4913 | /** | ||
4914 | * bnx2x_func_chk_transition - perform function state machine transition | ||
4915 | * | ||
4916 | * @bp: device handle | ||
4917 | * @o: | ||
4918 | * @params: | ||
4919 | * | ||
4920 | * It both checks if the requested command is legal in a current | ||
4921 | * state and, if it's legal, sets a `next_state' in the object | ||
4922 | * that will be used in the completion flow to set the `state' | ||
4923 | * of the object. | ||
4924 | * | ||
4925 | * returns 0 if a requested command is a legal transition, | ||
4926 | * -EINVAL otherwise. | ||
4927 | */ | ||
4928 | static int bnx2x_func_chk_transition(struct bnx2x *bp, | ||
4929 | struct bnx2x_func_sp_obj *o, | ||
4930 | struct bnx2x_func_state_params *params) | ||
4931 | { | ||
4932 | enum bnx2x_func_state state = o->state, next_state = BNX2X_F_STATE_MAX; | ||
4933 | enum bnx2x_func_cmd cmd = params->cmd; | ||
4934 | |||
4935 | switch (state) { | ||
4936 | case BNX2X_F_STATE_RESET: | ||
4937 | if (cmd == BNX2X_F_CMD_HW_INIT) | ||
4938 | next_state = BNX2X_F_STATE_INITIALIZED; | ||
4939 | |||
4940 | break; | ||
4941 | case BNX2X_F_STATE_INITIALIZED: | ||
4942 | if (cmd == BNX2X_F_CMD_START) | ||
4943 | next_state = BNX2X_F_STATE_STARTED; | ||
4944 | |||
4945 | else if (cmd == BNX2X_F_CMD_HW_RESET) | ||
4946 | next_state = BNX2X_F_STATE_RESET; | ||
4947 | |||
4948 | break; | ||
4949 | case BNX2X_F_STATE_STARTED: | ||
4950 | if (cmd == BNX2X_F_CMD_STOP) | ||
4951 | next_state = BNX2X_F_STATE_INITIALIZED; | ||
4952 | |||
4953 | break; | ||
4954 | default: | ||
4955 | BNX2X_ERR("Unknown state: %d\n", state); | ||
4956 | } | ||
4957 | |||
4958 | /* Transition is assured */ | ||
4959 | if (next_state != BNX2X_F_STATE_MAX) { | ||
4960 | DP(BNX2X_MSG_SP, "Good function state transition: %d(%d)->%d\n", | ||
4961 | state, cmd, next_state); | ||
4962 | o->next_state = next_state; | ||
4963 | return 0; | ||
4964 | } | ||
4965 | |||
4966 | DP(BNX2X_MSG_SP, "Bad function state transition request: %d %d\n", | ||
4967 | state, cmd); | ||
4968 | |||
4969 | return -EINVAL; | ||
4970 | } | ||
4971 | |||
4972 | /** | ||
4973 | * bnx2x_func_init_func - performs HW init at function stage | ||
4974 | * | ||
4975 | * @bp: device handle | ||
4976 | * @drv: | ||
4977 | * | ||
4978 | * Init HW when the current phase is | ||
4979 | * FW_MSG_CODE_DRV_LOAD_FUNCTION: initialize only FUNCTION-only | ||
4980 | * HW blocks. | ||
4981 | */ | ||
4982 | static inline int bnx2x_func_init_func(struct bnx2x *bp, | ||
4983 | const struct bnx2x_func_sp_drv_ops *drv) | ||
4984 | { | ||
4985 | return drv->init_hw_func(bp); | ||
4986 | } | ||
4987 | |||
4988 | /** | ||
4989 | * bnx2x_func_init_port - performs HW init at port stage | ||
4990 | * | ||
4991 | * @bp: device handle | ||
4992 | * @drv: | ||
4993 | * | ||
4994 | * Init HW when the current phase is | ||
4995 | * FW_MSG_CODE_DRV_LOAD_PORT: initialize PORT-only and | ||
4996 | * FUNCTION-only HW blocks. | ||
4997 | * | ||
4998 | */ | ||
4999 | static inline int bnx2x_func_init_port(struct bnx2x *bp, | ||
5000 | const struct bnx2x_func_sp_drv_ops *drv) | ||
5001 | { | ||
5002 | int rc = drv->init_hw_port(bp); | ||
5003 | if (rc) | ||
5004 | return rc; | ||
5005 | |||
5006 | return bnx2x_func_init_func(bp, drv); | ||
5007 | } | ||
5008 | |||
5009 | /** | ||
5010 | * bnx2x_func_init_cmn_chip - performs HW init at chip-common stage | ||
5011 | * | ||
5012 | * @bp: device handle | ||
5013 | * @drv: | ||
5014 | * | ||
5015 | * Init HW when the current phase is | ||
5016 | * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: initialize COMMON_CHIP, | ||
5017 | * PORT-only and FUNCTION-only HW blocks. | ||
5018 | */ | ||
5019 | static inline int bnx2x_func_init_cmn_chip(struct bnx2x *bp, | ||
5020 | const struct bnx2x_func_sp_drv_ops *drv) | ||
5021 | { | ||
5022 | int rc = drv->init_hw_cmn_chip(bp); | ||
5023 | if (rc) | ||
5024 | return rc; | ||
5025 | |||
5026 | return bnx2x_func_init_port(bp, drv); | ||
5027 | } | ||
5028 | |||
5029 | /** | ||
5030 | * bnx2x_func_init_cmn - performs HW init at common stage | ||
5031 | * | ||
5032 | * @bp: device handle | ||
5033 | * @drv: | ||
5034 | * | ||
5035 | * Init HW when the current phase is | ||
5036 | * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: initialize COMMON, | ||
5037 | * PORT-only and FUNCTION-only HW blocks. | ||
5038 | */ | ||
5039 | static inline int bnx2x_func_init_cmn(struct bnx2x *bp, | ||
5040 | const struct bnx2x_func_sp_drv_ops *drv) | ||
5041 | { | ||
5042 | int rc = drv->init_hw_cmn(bp); | ||
5043 | if (rc) | ||
5044 | return rc; | ||
5045 | |||
5046 | return bnx2x_func_init_port(bp, drv); | ||
5047 | } | ||
5048 | |||
5049 | static int bnx2x_func_hw_init(struct bnx2x *bp, | ||
5050 | struct bnx2x_func_state_params *params) | ||
5051 | { | ||
5052 | u32 load_code = params->params.hw_init.load_phase; | ||
5053 | struct bnx2x_func_sp_obj *o = params->f_obj; | ||
5054 | const struct bnx2x_func_sp_drv_ops *drv = o->drv; | ||
5055 | int rc = 0; | ||
5056 | |||
5057 | DP(BNX2X_MSG_SP, "function %d load_code %x\n", | ||
5058 | BP_ABS_FUNC(bp), load_code); | ||
5059 | |||
5060 | /* Prepare buffers for unzipping the FW */ | ||
5061 | rc = drv->gunzip_init(bp); | ||
5062 | if (rc) | ||
5063 | return rc; | ||
5064 | |||
5065 | /* Prepare FW */ | ||
5066 | rc = drv->init_fw(bp); | ||
5067 | if (rc) { | ||
5068 | BNX2X_ERR("Error loading firmware\n"); | ||
5069 | goto fw_init_err; | ||
5070 | } | ||
5071 | |||
5072 | /* Handle the beginning of COMMON_XXX pases separatelly... */ | ||
5073 | switch (load_code) { | ||
5074 | case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: | ||
5075 | rc = bnx2x_func_init_cmn_chip(bp, drv); | ||
5076 | if (rc) | ||
5077 | goto init_hw_err; | ||
5078 | |||
5079 | break; | ||
5080 | case FW_MSG_CODE_DRV_LOAD_COMMON: | ||
5081 | rc = bnx2x_func_init_cmn(bp, drv); | ||
5082 | if (rc) | ||
5083 | goto init_hw_err; | ||
5084 | |||
5085 | break; | ||
5086 | case FW_MSG_CODE_DRV_LOAD_PORT: | ||
5087 | rc = bnx2x_func_init_port(bp, drv); | ||
5088 | if (rc) | ||
5089 | goto init_hw_err; | ||
5090 | |||
5091 | break; | ||
5092 | case FW_MSG_CODE_DRV_LOAD_FUNCTION: | ||
5093 | rc = bnx2x_func_init_func(bp, drv); | ||
5094 | if (rc) | ||
5095 | goto init_hw_err; | ||
5096 | |||
5097 | break; | ||
5098 | default: | ||
5099 | BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code); | ||
5100 | rc = -EINVAL; | ||
5101 | } | ||
5102 | |||
5103 | init_hw_err: | ||
5104 | drv->release_fw(bp); | ||
5105 | |||
5106 | fw_init_err: | ||
5107 | drv->gunzip_end(bp); | ||
5108 | |||
5109 | /* In case of success, complete the comand immediatelly: no ramrods | ||
5110 | * have been sent. | ||
5111 | */ | ||
5112 | if (!rc) | ||
5113 | o->complete_cmd(bp, o, BNX2X_F_CMD_HW_INIT); | ||
5114 | |||
5115 | return rc; | ||
5116 | } | ||
5117 | |||
5118 | /** | ||
5119 | * bnx2x_func_reset_func - reset HW at function stage | ||
5120 | * | ||
5121 | * @bp: device handle | ||
5122 | * @drv: | ||
5123 | * | ||
5124 | * Reset HW at FW_MSG_CODE_DRV_UNLOAD_FUNCTION stage: reset only | ||
5125 | * FUNCTION-only HW blocks. | ||
5126 | */ | ||
5127 | static inline void bnx2x_func_reset_func(struct bnx2x *bp, | ||
5128 | const struct bnx2x_func_sp_drv_ops *drv) | ||
5129 | { | ||
5130 | drv->reset_hw_func(bp); | ||
5131 | } | ||
5132 | |||
5133 | /** | ||
5134 | * bnx2x_func_reset_port - reser HW at port stage | ||
5135 | * | ||
5136 | * @bp: device handle | ||
5137 | * @drv: | ||
5138 | * | ||
5139 | * Reset HW at FW_MSG_CODE_DRV_UNLOAD_PORT stage: reset | ||
5140 | * FUNCTION-only and PORT-only HW blocks. | ||
5141 | * | ||
5142 | * !!!IMPORTANT!!! | ||
5143 | * | ||
5144 | * It's important to call reset_port before reset_func() as the last thing | ||
5145 | * reset_func does is pf_disable() thus disabling PGLUE_B, which | ||
5146 | * makes impossible any DMAE transactions. | ||
5147 | */ | ||
5148 | static inline void bnx2x_func_reset_port(struct bnx2x *bp, | ||
5149 | const struct bnx2x_func_sp_drv_ops *drv) | ||
5150 | { | ||
5151 | drv->reset_hw_port(bp); | ||
5152 | bnx2x_func_reset_func(bp, drv); | ||
5153 | } | ||
5154 | |||
5155 | /** | ||
5156 | * bnx2x_func_reset_cmn - reser HW at common stage | ||
5157 | * | ||
5158 | * @bp: device handle | ||
5159 | * @drv: | ||
5160 | * | ||
5161 | * Reset HW at FW_MSG_CODE_DRV_UNLOAD_COMMON and | ||
5162 | * FW_MSG_CODE_DRV_UNLOAD_COMMON_CHIP stages: reset COMMON, | ||
5163 | * COMMON_CHIP, FUNCTION-only and PORT-only HW blocks. | ||
5164 | */ | ||
5165 | static inline void bnx2x_func_reset_cmn(struct bnx2x *bp, | ||
5166 | const struct bnx2x_func_sp_drv_ops *drv) | ||
5167 | { | ||
5168 | bnx2x_func_reset_port(bp, drv); | ||
5169 | drv->reset_hw_cmn(bp); | ||
5170 | } | ||
5171 | |||
5172 | |||
5173 | static inline int bnx2x_func_hw_reset(struct bnx2x *bp, | ||
5174 | struct bnx2x_func_state_params *params) | ||
5175 | { | ||
5176 | u32 reset_phase = params->params.hw_reset.reset_phase; | ||
5177 | struct bnx2x_func_sp_obj *o = params->f_obj; | ||
5178 | const struct bnx2x_func_sp_drv_ops *drv = o->drv; | ||
5179 | |||
5180 | DP(BNX2X_MSG_SP, "function %d reset_phase %x\n", BP_ABS_FUNC(bp), | ||
5181 | reset_phase); | ||
5182 | |||
5183 | switch (reset_phase) { | ||
5184 | case FW_MSG_CODE_DRV_UNLOAD_COMMON: | ||
5185 | bnx2x_func_reset_cmn(bp, drv); | ||
5186 | break; | ||
5187 | case FW_MSG_CODE_DRV_UNLOAD_PORT: | ||
5188 | bnx2x_func_reset_port(bp, drv); | ||
5189 | break; | ||
5190 | case FW_MSG_CODE_DRV_UNLOAD_FUNCTION: | ||
5191 | bnx2x_func_reset_func(bp, drv); | ||
5192 | break; | ||
5193 | default: | ||
5194 | BNX2X_ERR("Unknown reset_phase (0x%x) from MCP\n", | ||
5195 | reset_phase); | ||
5196 | break; | ||
5197 | } | ||
5198 | |||
5199 | /* Complete the comand immediatelly: no ramrods have been sent. */ | ||
5200 | o->complete_cmd(bp, o, BNX2X_F_CMD_HW_RESET); | ||
5201 | |||
5202 | return 0; | ||
5203 | } | ||
5204 | |||
5205 | static inline int bnx2x_func_send_start(struct bnx2x *bp, | ||
5206 | struct bnx2x_func_state_params *params) | ||
5207 | { | ||
5208 | struct bnx2x_func_sp_obj *o = params->f_obj; | ||
5209 | struct function_start_data *rdata = | ||
5210 | (struct function_start_data *)o->rdata; | ||
5211 | dma_addr_t data_mapping = o->rdata_mapping; | ||
5212 | struct bnx2x_func_start_params *start_params = ¶ms->params.start; | ||
5213 | |||
5214 | memset(rdata, 0, sizeof(*rdata)); | ||
5215 | |||
5216 | /* Fill the ramrod data with provided parameters */ | ||
5217 | rdata->function_mode = cpu_to_le16(start_params->mf_mode); | ||
5218 | rdata->sd_vlan_tag = start_params->sd_vlan_tag; | ||
5219 | rdata->path_id = BP_PATH(bp); | ||
5220 | rdata->network_cos_mode = start_params->network_cos_mode; | ||
5221 | |||
5222 | mb(); | ||
5223 | |||
5224 | return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_START, 0, | ||
5225 | U64_HI(data_mapping), | ||
5226 | U64_LO(data_mapping), NONE_CONNECTION_TYPE); | ||
5227 | } | ||
5228 | |||
5229 | static inline int bnx2x_func_send_stop(struct bnx2x *bp, | ||
5230 | struct bnx2x_func_state_params *params) | ||
5231 | { | ||
5232 | return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_STOP, 0, 0, 0, | ||
5233 | NONE_CONNECTION_TYPE); | ||
5234 | } | ||
5235 | |||
5236 | static int bnx2x_func_send_cmd(struct bnx2x *bp, | ||
5237 | struct bnx2x_func_state_params *params) | ||
5238 | { | ||
5239 | switch (params->cmd) { | ||
5240 | case BNX2X_F_CMD_HW_INIT: | ||
5241 | return bnx2x_func_hw_init(bp, params); | ||
5242 | case BNX2X_F_CMD_START: | ||
5243 | return bnx2x_func_send_start(bp, params); | ||
5244 | case BNX2X_F_CMD_STOP: | ||
5245 | return bnx2x_func_send_stop(bp, params); | ||
5246 | case BNX2X_F_CMD_HW_RESET: | ||
5247 | return bnx2x_func_hw_reset(bp, params); | ||
5248 | default: | ||
5249 | BNX2X_ERR("Unknown command: %d\n", params->cmd); | ||
5250 | return -EINVAL; | ||
5251 | } | ||
5252 | } | ||
5253 | |||
5254 | void bnx2x_init_func_obj(struct bnx2x *bp, | ||
5255 | struct bnx2x_func_sp_obj *obj, | ||
5256 | void *rdata, dma_addr_t rdata_mapping, | ||
5257 | struct bnx2x_func_sp_drv_ops *drv_iface) | ||
5258 | { | ||
5259 | memset(obj, 0, sizeof(*obj)); | ||
5260 | |||
5261 | mutex_init(&obj->one_pending_mutex); | ||
5262 | |||
5263 | obj->rdata = rdata; | ||
5264 | obj->rdata_mapping = rdata_mapping; | ||
5265 | |||
5266 | obj->send_cmd = bnx2x_func_send_cmd; | ||
5267 | obj->check_transition = bnx2x_func_chk_transition; | ||
5268 | obj->complete_cmd = bnx2x_func_comp_cmd; | ||
5269 | obj->wait_comp = bnx2x_func_wait_comp; | ||
5270 | |||
5271 | obj->drv = drv_iface; | ||
5272 | } | ||
5273 | |||
5274 | /** | ||
5275 | * bnx2x_func_state_change - perform Function state change transition | ||
5276 | * | ||
5277 | * @bp: device handle | ||
5278 | * @params: parameters to perform the transaction | ||
5279 | * | ||
5280 | * returns 0 in case of successfully completed transition, | ||
5281 | * negative error code in case of failure, positive | ||
5282 | * (EBUSY) value if there is a completion to that is | ||
5283 | * still pending (possible only if RAMROD_COMP_WAIT is | ||
5284 | * not set in params->ramrod_flags for asynchronous | ||
5285 | * commands). | ||
5286 | */ | ||
5287 | int bnx2x_func_state_change(struct bnx2x *bp, | ||
5288 | struct bnx2x_func_state_params *params) | ||
5289 | { | ||
5290 | struct bnx2x_func_sp_obj *o = params->f_obj; | ||
5291 | int rc; | ||
5292 | enum bnx2x_func_cmd cmd = params->cmd; | ||
5293 | unsigned long *pending = &o->pending; | ||
5294 | |||
5295 | mutex_lock(&o->one_pending_mutex); | ||
5296 | |||
5297 | /* Check that the requested transition is legal */ | ||
5298 | if (o->check_transition(bp, o, params)) { | ||
5299 | mutex_unlock(&o->one_pending_mutex); | ||
5300 | return -EINVAL; | ||
5301 | } | ||
5302 | |||
5303 | /* Set "pending" bit */ | ||
5304 | set_bit(cmd, pending); | ||
5305 | |||
5306 | /* Don't send a command if only driver cleanup was requested */ | ||
5307 | if (test_bit(RAMROD_DRV_CLR_ONLY, ¶ms->ramrod_flags)) { | ||
5308 | bnx2x_func_state_change_comp(bp, o, cmd); | ||
5309 | mutex_unlock(&o->one_pending_mutex); | ||
5310 | } else { | ||
5311 | /* Send a ramrod */ | ||
5312 | rc = o->send_cmd(bp, params); | ||
5313 | |||
5314 | mutex_unlock(&o->one_pending_mutex); | ||
5315 | |||
5316 | if (rc) { | ||
5317 | o->next_state = BNX2X_F_STATE_MAX; | ||
5318 | clear_bit(cmd, pending); | ||
5319 | smp_mb__after_clear_bit(); | ||
5320 | return rc; | ||
5321 | } | ||
5322 | |||
5323 | if (test_bit(RAMROD_COMP_WAIT, ¶ms->ramrod_flags)) { | ||
5324 | rc = o->wait_comp(bp, o, cmd); | ||
5325 | if (rc) | ||
5326 | return rc; | ||
5327 | |||
5328 | return 0; | ||
5329 | } | ||
5330 | } | ||
5331 | |||
5332 | return !!test_bit(cmd, pending); | ||
5333 | } | ||
diff --git a/drivers/net/bnx2x/bnx2x_sp.h b/drivers/net/bnx2x/bnx2x_sp.h new file mode 100644 index 000000000000..86eaa80721ea --- /dev/null +++ b/drivers/net/bnx2x/bnx2x_sp.h | |||
@@ -0,0 +1,1235 @@ | |||
1 | /* bnx2x_sp.h: Broadcom Everest network driver. | ||
2 | * | ||
3 | * Copyright 2011 Broadcom Corporation | ||
4 | * | ||
5 | * Unless you and Broadcom execute a separate written software license | ||
6 | * agreement governing use of this software, this software is licensed to you | ||
7 | * under the terms of the GNU General Public License version 2, available | ||
8 | * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL"). | ||
9 | * | ||
10 | * Notwithstanding the above, under no circumstances may you combine this | ||
11 | * software in any way with any other Broadcom software provided under a | ||
12 | * license other than the GPL, without Broadcom's express prior written | ||
13 | * consent. | ||
14 | * | ||
15 | * Maintained by: Eilon Greenstein <eilong@broadcom.com> | ||
16 | * Written by: Vladislav Zolotarov | ||
17 | * | ||
18 | */ | ||
19 | #ifndef BNX2X_SP_VERBS | ||
20 | #define BNX2X_SP_VERBS | ||
21 | |||
22 | struct bnx2x; | ||
23 | struct eth_context; | ||
24 | |||
25 | /* Bits representing general command's configuration */ | ||
26 | enum { | ||
27 | RAMROD_TX, | ||
28 | RAMROD_RX, | ||
29 | /* Wait until all pending commands complete */ | ||
30 | RAMROD_COMP_WAIT, | ||
31 | /* Don't send a ramrod, only update a registry */ | ||
32 | RAMROD_DRV_CLR_ONLY, | ||
33 | /* Configure HW according to the current object state */ | ||
34 | RAMROD_RESTORE, | ||
35 | /* Execute the next command now */ | ||
36 | RAMROD_EXEC, | ||
37 | /* | ||
38 | * Don't add a new command and continue execution of posponed | ||
39 | * commands. If not set a new command will be added to the | ||
40 | * pending commands list. | ||
41 | */ | ||
42 | RAMROD_CONT, | ||
43 | }; | ||
44 | |||
45 | typedef enum { | ||
46 | BNX2X_OBJ_TYPE_RX, | ||
47 | BNX2X_OBJ_TYPE_TX, | ||
48 | BNX2X_OBJ_TYPE_RX_TX, | ||
49 | } bnx2x_obj_type; | ||
50 | |||
51 | /* Filtering states */ | ||
52 | enum { | ||
53 | BNX2X_FILTER_MAC_PENDING, | ||
54 | BNX2X_FILTER_VLAN_PENDING, | ||
55 | BNX2X_FILTER_VLAN_MAC_PENDING, | ||
56 | BNX2X_FILTER_RX_MODE_PENDING, | ||
57 | BNX2X_FILTER_RX_MODE_SCHED, | ||
58 | BNX2X_FILTER_ISCSI_ETH_START_SCHED, | ||
59 | BNX2X_FILTER_ISCSI_ETH_STOP_SCHED, | ||
60 | BNX2X_FILTER_FCOE_ETH_START_SCHED, | ||
61 | BNX2X_FILTER_FCOE_ETH_STOP_SCHED, | ||
62 | BNX2X_FILTER_MCAST_PENDING, | ||
63 | BNX2X_FILTER_MCAST_SCHED, | ||
64 | BNX2X_FILTER_RSS_CONF_PENDING, | ||
65 | }; | ||
66 | |||
67 | struct bnx2x_raw_obj { | ||
68 | u8 func_id; | ||
69 | |||
70 | /* Queue params */ | ||
71 | u8 cl_id; | ||
72 | u32 cid; | ||
73 | |||
74 | /* Ramrod data buffer params */ | ||
75 | void *rdata; | ||
76 | dma_addr_t rdata_mapping; | ||
77 | |||
78 | /* Ramrod state params */ | ||
79 | int state; /* "ramrod is pending" state bit */ | ||
80 | unsigned long *pstate; /* pointer to state buffer */ | ||
81 | |||
82 | bnx2x_obj_type obj_type; | ||
83 | |||
84 | int (*wait_comp)(struct bnx2x *bp, | ||
85 | struct bnx2x_raw_obj *o); | ||
86 | |||
87 | bool (*check_pending)(struct bnx2x_raw_obj *o); | ||
88 | void (*clear_pending)(struct bnx2x_raw_obj *o); | ||
89 | void (*set_pending)(struct bnx2x_raw_obj *o); | ||
90 | }; | ||
91 | |||
92 | /************************* VLAN-MAC commands related parameters ***************/ | ||
93 | struct bnx2x_mac_ramrod_data { | ||
94 | u8 mac[ETH_ALEN]; | ||
95 | }; | ||
96 | |||
97 | struct bnx2x_vlan_ramrod_data { | ||
98 | u16 vlan; | ||
99 | }; | ||
100 | |||
101 | struct bnx2x_vlan_mac_ramrod_data { | ||
102 | u8 mac[ETH_ALEN]; | ||
103 | u16 vlan; | ||
104 | }; | ||
105 | |||
106 | union bnx2x_classification_ramrod_data { | ||
107 | struct bnx2x_mac_ramrod_data mac; | ||
108 | struct bnx2x_vlan_ramrod_data vlan; | ||
109 | struct bnx2x_vlan_mac_ramrod_data vlan_mac; | ||
110 | }; | ||
111 | |||
112 | /* VLAN_MAC commands */ | ||
113 | enum bnx2x_vlan_mac_cmd { | ||
114 | BNX2X_VLAN_MAC_ADD, | ||
115 | BNX2X_VLAN_MAC_DEL, | ||
116 | BNX2X_VLAN_MAC_MOVE, | ||
117 | }; | ||
118 | |||
119 | struct bnx2x_vlan_mac_data { | ||
120 | /* Requested command: BNX2X_VLAN_MAC_XX */ | ||
121 | enum bnx2x_vlan_mac_cmd cmd; | ||
122 | /* | ||
123 | * used to contain the data related vlan_mac_flags bits from | ||
124 | * ramrod parameters. | ||
125 | */ | ||
126 | unsigned long vlan_mac_flags; | ||
127 | |||
128 | /* Needed for MOVE command */ | ||
129 | struct bnx2x_vlan_mac_obj *target_obj; | ||
130 | |||
131 | union bnx2x_classification_ramrod_data u; | ||
132 | }; | ||
133 | |||
134 | /*************************** Exe Queue obj ************************************/ | ||
135 | union bnx2x_exe_queue_cmd_data { | ||
136 | struct bnx2x_vlan_mac_data vlan_mac; | ||
137 | |||
138 | struct { | ||
139 | /* TODO */ | ||
140 | } mcast; | ||
141 | }; | ||
142 | |||
143 | struct bnx2x_exeq_elem { | ||
144 | struct list_head link; | ||
145 | |||
146 | /* Length of this element in the exe_chunk. */ | ||
147 | int cmd_len; | ||
148 | |||
149 | union bnx2x_exe_queue_cmd_data cmd_data; | ||
150 | }; | ||
151 | |||
152 | union bnx2x_qable_obj; | ||
153 | |||
154 | union bnx2x_exeq_comp_elem { | ||
155 | union event_ring_elem *elem; | ||
156 | }; | ||
157 | |||
158 | struct bnx2x_exe_queue_obj; | ||
159 | |||
160 | typedef int (*exe_q_validate)(struct bnx2x *bp, | ||
161 | union bnx2x_qable_obj *o, | ||
162 | struct bnx2x_exeq_elem *elem); | ||
163 | |||
164 | /** | ||
165 | * @return positive is entry was optimized, 0 - if not, negative | ||
166 | * in case of an error. | ||
167 | */ | ||
168 | typedef int (*exe_q_optimize)(struct bnx2x *bp, | ||
169 | union bnx2x_qable_obj *o, | ||
170 | struct bnx2x_exeq_elem *elem); | ||
171 | typedef int (*exe_q_execute)(struct bnx2x *bp, | ||
172 | union bnx2x_qable_obj *o, | ||
173 | struct list_head *exe_chunk, | ||
174 | unsigned long *ramrod_flags); | ||
175 | typedef struct bnx2x_exeq_elem * | ||
176 | (*exe_q_get)(struct bnx2x_exe_queue_obj *o, | ||
177 | struct bnx2x_exeq_elem *elem); | ||
178 | |||
179 | struct bnx2x_exe_queue_obj { | ||
180 | /* | ||
181 | * Commands pending for an execution. | ||
182 | */ | ||
183 | struct list_head exe_queue; | ||
184 | |||
185 | /* | ||
186 | * Commands pending for an completion. | ||
187 | */ | ||
188 | struct list_head pending_comp; | ||
189 | |||
190 | spinlock_t lock; | ||
191 | |||
192 | /* Maximum length of commands' list for one execution */ | ||
193 | int exe_chunk_len; | ||
194 | |||
195 | union bnx2x_qable_obj *owner; | ||
196 | |||
197 | /****** Virtual functions ******/ | ||
198 | /** | ||
199 | * Called before commands execution for commands that are really | ||
200 | * going to be executed (after 'optimize'). | ||
201 | * | ||
202 | * Must run under exe_queue->lock | ||
203 | */ | ||
204 | exe_q_validate validate; | ||
205 | |||
206 | |||
207 | /** | ||
208 | * This will try to cancel the current pending commands list | ||
209 | * considering the new command. | ||
210 | * | ||
211 | * Must run under exe_queue->lock | ||
212 | */ | ||
213 | exe_q_optimize optimize; | ||
214 | |||
215 | /** | ||
216 | * Run the next commands chunk (owner specific). | ||
217 | */ | ||
218 | exe_q_execute execute; | ||
219 | |||
220 | /** | ||
221 | * Return the exe_queue element containing the specific command | ||
222 | * if any. Otherwise return NULL. | ||
223 | */ | ||
224 | exe_q_get get; | ||
225 | }; | ||
226 | /***************** Classification verbs: Set/Del MAC/VLAN/VLAN-MAC ************/ | ||
227 | /* | ||
228 | * Element in the VLAN_MAC registry list having all currenty configured | ||
229 | * rules. | ||
230 | */ | ||
231 | struct bnx2x_vlan_mac_registry_elem { | ||
232 | struct list_head link; | ||
233 | |||
234 | /* | ||
235 | * Used to store the cam offset used for the mac/vlan/vlan-mac. | ||
236 | * Relevant for 57710 and 57711 only. VLANs and MACs share the | ||
237 | * same CAM for these chips. | ||
238 | */ | ||
239 | int cam_offset; | ||
240 | |||
241 | /* Needed for DEL and RESTORE flows */ | ||
242 | unsigned long vlan_mac_flags; | ||
243 | |||
244 | union bnx2x_classification_ramrod_data u; | ||
245 | }; | ||
246 | |||
247 | /* Bits representing VLAN_MAC commands specific flags */ | ||
248 | enum { | ||
249 | BNX2X_UC_LIST_MAC, | ||
250 | BNX2X_ETH_MAC, | ||
251 | BNX2X_ISCSI_ETH_MAC, | ||
252 | BNX2X_NETQ_ETH_MAC, | ||
253 | BNX2X_DONT_CONSUME_CAM_CREDIT, | ||
254 | BNX2X_DONT_CONSUME_CAM_CREDIT_DEST, | ||
255 | }; | ||
256 | |||
257 | struct bnx2x_vlan_mac_ramrod_params { | ||
258 | /* Object to run the command from */ | ||
259 | struct bnx2x_vlan_mac_obj *vlan_mac_obj; | ||
260 | |||
261 | /* General command flags: COMP_WAIT, etc. */ | ||
262 | unsigned long ramrod_flags; | ||
263 | |||
264 | /* Command specific configuration request */ | ||
265 | struct bnx2x_vlan_mac_data user_req; | ||
266 | }; | ||
267 | |||
268 | struct bnx2x_vlan_mac_obj { | ||
269 | struct bnx2x_raw_obj raw; | ||
270 | |||
271 | /* Bookkeeping list: will prevent the addition of already existing | ||
272 | * entries. | ||
273 | */ | ||
274 | struct list_head head; | ||
275 | |||
276 | /* TODO: Add it's initialization in the init functions */ | ||
277 | struct bnx2x_exe_queue_obj exe_queue; | ||
278 | |||
279 | /* MACs credit pool */ | ||
280 | struct bnx2x_credit_pool_obj *macs_pool; | ||
281 | |||
282 | /* VLANs credit pool */ | ||
283 | struct bnx2x_credit_pool_obj *vlans_pool; | ||
284 | |||
285 | /* RAMROD command to be used */ | ||
286 | int ramrod_cmd; | ||
287 | |||
288 | /** | ||
289 | * Checks if ADD-ramrod with the given params may be performed. | ||
290 | * | ||
291 | * @return zero if the element may be added | ||
292 | */ | ||
293 | |||
294 | int (*check_add)(struct bnx2x_vlan_mac_obj *o, | ||
295 | union bnx2x_classification_ramrod_data *data); | ||
296 | |||
297 | /** | ||
298 | * Checks if DEL-ramrod with the given params may be performed. | ||
299 | * | ||
300 | * @return true if the element may be deleted | ||
301 | */ | ||
302 | struct bnx2x_vlan_mac_registry_elem * | ||
303 | (*check_del)(struct bnx2x_vlan_mac_obj *o, | ||
304 | union bnx2x_classification_ramrod_data *data); | ||
305 | |||
306 | /** | ||
307 | * Checks if DEL-ramrod with the given params may be performed. | ||
308 | * | ||
309 | * @return true if the element may be deleted | ||
310 | */ | ||
311 | bool (*check_move)(struct bnx2x_vlan_mac_obj *src_o, | ||
312 | struct bnx2x_vlan_mac_obj *dst_o, | ||
313 | union bnx2x_classification_ramrod_data *data); | ||
314 | |||
315 | /** | ||
316 | * Update the relevant credit object(s) (consume/return | ||
317 | * correspondingly). | ||
318 | */ | ||
319 | bool (*get_credit)(struct bnx2x_vlan_mac_obj *o); | ||
320 | bool (*put_credit)(struct bnx2x_vlan_mac_obj *o); | ||
321 | bool (*get_cam_offset)(struct bnx2x_vlan_mac_obj *o, int *offset); | ||
322 | bool (*put_cam_offset)(struct bnx2x_vlan_mac_obj *o, int offset); | ||
323 | |||
324 | /** | ||
325 | * Configures one rule in the ramrod data buffer. | ||
326 | */ | ||
327 | void (*set_one_rule)(struct bnx2x *bp, | ||
328 | struct bnx2x_vlan_mac_obj *o, | ||
329 | struct bnx2x_exeq_elem *elem, int rule_idx, | ||
330 | int cam_offset); | ||
331 | |||
332 | /** | ||
333 | * Delete all configured elements having the given | ||
334 | * vlan_mac_flags specification. Assumes no pending for | ||
335 | * execution commands. Will schedule all all currently | ||
336 | * configured MACs/VLANs/VLAN-MACs matching the vlan_mac_flags | ||
337 | * specification for deletion and will use the given | ||
338 | * ramrod_flags for the last DEL operation. | ||
339 | * | ||
340 | * @param bp | ||
341 | * @param o | ||
342 | * @param ramrod_flags RAMROD_XX flags | ||
343 | * | ||
344 | * @return 0 if the last operation has completed successfully | ||
345 | * and there are no more elements left, positive value | ||
346 | * if there are pending for completion commands, | ||
347 | * negative value in case of failure. | ||
348 | */ | ||
349 | int (*delete_all)(struct bnx2x *bp, | ||
350 | struct bnx2x_vlan_mac_obj *o, | ||
351 | unsigned long *vlan_mac_flags, | ||
352 | unsigned long *ramrod_flags); | ||
353 | |||
354 | /** | ||
355 | * Reconfigures the next MAC/VLAN/VLAN-MAC element from the previously | ||
356 | * configured elements list. | ||
357 | * | ||
358 | * @param bp | ||
359 | * @param p Command parameters (RAMROD_COMP_WAIT bit in | ||
360 | * ramrod_flags is only taken into an account) | ||
361 | * @param ppos a pointer to the cooky that should be given back in the | ||
362 | * next call to make function handle the next element. If | ||
363 | * *ppos is set to NULL it will restart the iterator. | ||
364 | * If returned *ppos == NULL this means that the last | ||
365 | * element has been handled. | ||
366 | * | ||
367 | * @return int | ||
368 | */ | ||
369 | int (*restore)(struct bnx2x *bp, | ||
370 | struct bnx2x_vlan_mac_ramrod_params *p, | ||
371 | struct bnx2x_vlan_mac_registry_elem **ppos); | ||
372 | |||
373 | /** | ||
374 | * Should be called on a completion arival. | ||
375 | * | ||
376 | * @param bp | ||
377 | * @param o | ||
378 | * @param cqe Completion element we are handling | ||
379 | * @param ramrod_flags if RAMROD_CONT is set the next bulk of | ||
380 | * pending commands will be executed. | ||
381 | * RAMROD_DRV_CLR_ONLY and RAMROD_RESTORE | ||
382 | * may also be set if needed. | ||
383 | * | ||
384 | * @return 0 if there are neither pending nor waiting for | ||
385 | * completion commands. Positive value if there are | ||
386 | * pending for execution or for completion commands. | ||
387 | * Negative value in case of an error (including an | ||
388 | * error in the cqe). | ||
389 | */ | ||
390 | int (*complete)(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o, | ||
391 | union event_ring_elem *cqe, | ||
392 | unsigned long *ramrod_flags); | ||
393 | |||
394 | /** | ||
395 | * Wait for completion of all commands. Don't schedule new ones, | ||
396 | * just wait. It assumes that the completion code will schedule | ||
397 | * for new commands. | ||
398 | */ | ||
399 | int (*wait)(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o); | ||
400 | }; | ||
401 | |||
402 | /** RX_MODE verbs:DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */ | ||
403 | |||
404 | /* RX_MODE ramrod spesial flags: set in rx_mode_flags field in | ||
405 | * a bnx2x_rx_mode_ramrod_params. | ||
406 | */ | ||
407 | enum { | ||
408 | BNX2X_RX_MODE_FCOE_ETH, | ||
409 | BNX2X_RX_MODE_ISCSI_ETH, | ||
410 | }; | ||
411 | |||
412 | enum { | ||
413 | BNX2X_ACCEPT_UNICAST, | ||
414 | BNX2X_ACCEPT_MULTICAST, | ||
415 | BNX2X_ACCEPT_ALL_UNICAST, | ||
416 | BNX2X_ACCEPT_ALL_MULTICAST, | ||
417 | BNX2X_ACCEPT_BROADCAST, | ||
418 | BNX2X_ACCEPT_UNMATCHED, | ||
419 | BNX2X_ACCEPT_ANY_VLAN | ||
420 | }; | ||
421 | |||
422 | struct bnx2x_rx_mode_ramrod_params { | ||
423 | struct bnx2x_rx_mode_obj *rx_mode_obj; | ||
424 | unsigned long *pstate; | ||
425 | int state; | ||
426 | u8 cl_id; | ||
427 | u32 cid; | ||
428 | u8 func_id; | ||
429 | unsigned long ramrod_flags; | ||
430 | unsigned long rx_mode_flags; | ||
431 | |||
432 | /* | ||
433 | * rdata is either a pointer to eth_filter_rules_ramrod_data(e2) or to | ||
434 | * a tstorm_eth_mac_filter_config (e1x). | ||
435 | */ | ||
436 | void *rdata; | ||
437 | dma_addr_t rdata_mapping; | ||
438 | |||
439 | /* Rx mode settings */ | ||
440 | unsigned long rx_accept_flags; | ||
441 | |||
442 | /* internal switching settings */ | ||
443 | unsigned long tx_accept_flags; | ||
444 | }; | ||
445 | |||
446 | struct bnx2x_rx_mode_obj { | ||
447 | int (*config_rx_mode)(struct bnx2x *bp, | ||
448 | struct bnx2x_rx_mode_ramrod_params *p); | ||
449 | |||
450 | int (*wait_comp)(struct bnx2x *bp, | ||
451 | struct bnx2x_rx_mode_ramrod_params *p); | ||
452 | }; | ||
453 | |||
454 | /********************** Set multicast group ***********************************/ | ||
455 | |||
456 | struct bnx2x_mcast_list_elem { | ||
457 | struct list_head link; | ||
458 | u8 *mac; | ||
459 | }; | ||
460 | |||
461 | union bnx2x_mcast_config_data { | ||
462 | u8 *mac; | ||
463 | u8 bin; /* used in a RESTORE flow */ | ||
464 | }; | ||
465 | |||
466 | struct bnx2x_mcast_ramrod_params { | ||
467 | struct bnx2x_mcast_obj *mcast_obj; | ||
468 | |||
469 | /* Relevant options are RAMROD_COMP_WAIT and RAMROD_DRV_CLR_ONLY */ | ||
470 | unsigned long ramrod_flags; | ||
471 | |||
472 | struct list_head mcast_list; /* list of struct bnx2x_mcast_list_elem */ | ||
473 | /** TODO: | ||
474 | * - rename it to macs_num. | ||
475 | * - Add a new command type for handling pending commands | ||
476 | * (remove "zero semantics"). | ||
477 | * | ||
478 | * Length of mcast_list. If zero and ADD_CONT command - post | ||
479 | * pending commands. | ||
480 | */ | ||
481 | int mcast_list_len; | ||
482 | }; | ||
483 | |||
484 | enum { | ||
485 | BNX2X_MCAST_CMD_ADD, | ||
486 | BNX2X_MCAST_CMD_CONT, | ||
487 | BNX2X_MCAST_CMD_DEL, | ||
488 | BNX2X_MCAST_CMD_RESTORE, | ||
489 | }; | ||
490 | |||
491 | struct bnx2x_mcast_obj { | ||
492 | struct bnx2x_raw_obj raw; | ||
493 | |||
494 | union { | ||
495 | struct { | ||
496 | #define BNX2X_MCAST_BINS_NUM 256 | ||
497 | #define BNX2X_MCAST_VEC_SZ (BNX2X_MCAST_BINS_NUM / 64) | ||
498 | u64 vec[BNX2X_MCAST_VEC_SZ]; | ||
499 | |||
500 | /** Number of BINs to clear. Should be updated | ||
501 | * immediately when a command arrives in order to | ||
502 | * properly create DEL commands. | ||
503 | */ | ||
504 | int num_bins_set; | ||
505 | } aprox_match; | ||
506 | |||
507 | struct { | ||
508 | struct list_head macs; | ||
509 | int num_macs_set; | ||
510 | } exact_match; | ||
511 | } registry; | ||
512 | |||
513 | /* Pending commands */ | ||
514 | struct list_head pending_cmds_head; | ||
515 | |||
516 | /* A state that is set in raw.pstate, when there are pending commands */ | ||
517 | int sched_state; | ||
518 | |||
519 | /* Maximal number of mcast MACs configured in one command */ | ||
520 | int max_cmd_len; | ||
521 | |||
522 | /* Total number of currently pending MACs to configure: both | ||
523 | * in the pending commands list and in the current command. | ||
524 | */ | ||
525 | int total_pending_num; | ||
526 | |||
527 | u8 engine_id; | ||
528 | |||
529 | /** | ||
530 | * @param cmd command to execute (BNX2X_MCAST_CMD_X, see above) | ||
531 | */ | ||
532 | int (*config_mcast)(struct bnx2x *bp, | ||
533 | struct bnx2x_mcast_ramrod_params *p, int cmd); | ||
534 | |||
535 | /** | ||
536 | * Fills the ramrod data during the RESTORE flow. | ||
537 | * | ||
538 | * @param bp | ||
539 | * @param o | ||
540 | * @param start_idx Registry index to start from | ||
541 | * @param rdata_idx Index in the ramrod data to start from | ||
542 | * | ||
543 | * @return -1 if we handled the whole registry or index of the last | ||
544 | * handled registry element. | ||
545 | */ | ||
546 | int (*hdl_restore)(struct bnx2x *bp, struct bnx2x_mcast_obj *o, | ||
547 | int start_bin, int *rdata_idx); | ||
548 | |||
549 | int (*enqueue_cmd)(struct bnx2x *bp, struct bnx2x_mcast_obj *o, | ||
550 | struct bnx2x_mcast_ramrod_params *p, int cmd); | ||
551 | |||
552 | void (*set_one_rule)(struct bnx2x *bp, | ||
553 | struct bnx2x_mcast_obj *o, int idx, | ||
554 | union bnx2x_mcast_config_data *cfg_data, int cmd); | ||
555 | |||
556 | /** Checks if there are more mcast MACs to be set or a previous | ||
557 | * command is still pending. | ||
558 | */ | ||
559 | bool (*check_pending)(struct bnx2x_mcast_obj *o); | ||
560 | |||
561 | /** | ||
562 | * Set/Clear/Check SCHEDULED state of the object | ||
563 | */ | ||
564 | void (*set_sched)(struct bnx2x_mcast_obj *o); | ||
565 | void (*clear_sched)(struct bnx2x_mcast_obj *o); | ||
566 | bool (*check_sched)(struct bnx2x_mcast_obj *o); | ||
567 | |||
568 | /* Wait until all pending commands complete */ | ||
569 | int (*wait_comp)(struct bnx2x *bp, struct bnx2x_mcast_obj *o); | ||
570 | |||
571 | /** | ||
572 | * Handle the internal object counters needed for proper | ||
573 | * commands handling. Checks that the provided parameters are | ||
574 | * feasible. | ||
575 | */ | ||
576 | int (*validate)(struct bnx2x *bp, | ||
577 | struct bnx2x_mcast_ramrod_params *p, int cmd); | ||
578 | |||
579 | /** | ||
580 | * Restore the values of internal counters in case of a failure. | ||
581 | */ | ||
582 | void (*revert)(struct bnx2x *bp, | ||
583 | struct bnx2x_mcast_ramrod_params *p, | ||
584 | int old_num_bins); | ||
585 | |||
586 | int (*get_registry_size)(struct bnx2x_mcast_obj *o); | ||
587 | void (*set_registry_size)(struct bnx2x_mcast_obj *o, int n); | ||
588 | }; | ||
589 | |||
590 | /*************************** Credit handling **********************************/ | ||
591 | struct bnx2x_credit_pool_obj { | ||
592 | |||
593 | /* Current amount of credit in the pool */ | ||
594 | atomic_t credit; | ||
595 | |||
596 | /* Maximum allowed credit. put() will check against it. */ | ||
597 | int pool_sz; | ||
598 | |||
599 | /* | ||
600 | * Allocate a pool table statically. | ||
601 | * | ||
602 | * Currently the mamimum allowed size is MAX_MAC_CREDIT_E2(272) | ||
603 | * | ||
604 | * The set bit in the table will mean that the entry is available. | ||
605 | */ | ||
606 | #define BNX2X_POOL_VEC_SIZE (MAX_MAC_CREDIT_E2 / 64) | ||
607 | u64 pool_mirror[BNX2X_POOL_VEC_SIZE]; | ||
608 | |||
609 | /* Base pool offset (initialized differently */ | ||
610 | int base_pool_offset; | ||
611 | |||
612 | /** | ||
613 | * Get the next free pool entry. | ||
614 | * | ||
615 | * @return true if there was a free entry in the pool | ||
616 | */ | ||
617 | bool (*get_entry)(struct bnx2x_credit_pool_obj *o, int *entry); | ||
618 | |||
619 | /** | ||
620 | * Return the entry back to the pool. | ||
621 | * | ||
622 | * @return true if entry is legal and has been successfully | ||
623 | * returned to the pool. | ||
624 | */ | ||
625 | bool (*put_entry)(struct bnx2x_credit_pool_obj *o, int entry); | ||
626 | |||
627 | /** | ||
628 | * Get the requested amount of credit from the pool. | ||
629 | * | ||
630 | * @param cnt Amount of requested credit | ||
631 | * @return true if the operation is successful | ||
632 | */ | ||
633 | bool (*get)(struct bnx2x_credit_pool_obj *o, int cnt); | ||
634 | |||
635 | /** | ||
636 | * Returns the credit to the pool. | ||
637 | * | ||
638 | * @param cnt Amount of credit to return | ||
639 | * @return true if the operation is successful | ||
640 | */ | ||
641 | bool (*put)(struct bnx2x_credit_pool_obj *o, int cnt); | ||
642 | |||
643 | /** | ||
644 | * Reads the current amount of credit. | ||
645 | */ | ||
646 | int (*check)(struct bnx2x_credit_pool_obj *o); | ||
647 | }; | ||
648 | |||
649 | /*************************** RSS configuration ********************************/ | ||
650 | enum { | ||
651 | /* RSS_MODE bits are mutually exclusive */ | ||
652 | BNX2X_RSS_MODE_DISABLED, | ||
653 | BNX2X_RSS_MODE_REGULAR, | ||
654 | BNX2X_RSS_MODE_VLAN_PRI, | ||
655 | BNX2X_RSS_MODE_E1HOV_PRI, | ||
656 | BNX2X_RSS_MODE_IP_DSCP, | ||
657 | |||
658 | BNX2X_RSS_SET_SRCH, /* Setup searcher, E1x specific flag */ | ||
659 | |||
660 | BNX2X_RSS_IPV4, | ||
661 | BNX2X_RSS_IPV4_TCP, | ||
662 | BNX2X_RSS_IPV6, | ||
663 | BNX2X_RSS_IPV6_TCP, | ||
664 | }; | ||
665 | |||
666 | struct bnx2x_config_rss_params { | ||
667 | struct bnx2x_rss_config_obj *rss_obj; | ||
668 | |||
669 | /* may have RAMROD_COMP_WAIT set only */ | ||
670 | unsigned long ramrod_flags; | ||
671 | |||
672 | /* BNX2X_RSS_X bits */ | ||
673 | unsigned long rss_flags; | ||
674 | |||
675 | /* Number hash bits to take into an account */ | ||
676 | u8 rss_result_mask; | ||
677 | |||
678 | /* Indirection table */ | ||
679 | u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE]; | ||
680 | |||
681 | /* RSS hash values */ | ||
682 | u32 rss_key[10]; | ||
683 | |||
684 | /* valid only iff BNX2X_RSS_UPDATE_TOE is set */ | ||
685 | u16 toe_rss_bitmap; | ||
686 | }; | ||
687 | |||
688 | struct bnx2x_rss_config_obj { | ||
689 | struct bnx2x_raw_obj raw; | ||
690 | |||
691 | /* RSS engine to use */ | ||
692 | u8 engine_id; | ||
693 | |||
694 | /* Last configured indirection table */ | ||
695 | u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE]; | ||
696 | |||
697 | int (*config_rss)(struct bnx2x *bp, | ||
698 | struct bnx2x_config_rss_params *p); | ||
699 | }; | ||
700 | |||
701 | /*********************** Queue state update ***********************************/ | ||
702 | |||
703 | /* UPDATE command options */ | ||
704 | enum { | ||
705 | BNX2X_Q_UPDATE_IN_VLAN_REM, | ||
706 | BNX2X_Q_UPDATE_IN_VLAN_REM_CHNG, | ||
707 | BNX2X_Q_UPDATE_OUT_VLAN_REM, | ||
708 | BNX2X_Q_UPDATE_OUT_VLAN_REM_CHNG, | ||
709 | BNX2X_Q_UPDATE_ANTI_SPOOF, | ||
710 | BNX2X_Q_UPDATE_ANTI_SPOOF_CHNG, | ||
711 | BNX2X_Q_UPDATE_ACTIVATE, | ||
712 | BNX2X_Q_UPDATE_ACTIVATE_CHNG, | ||
713 | BNX2X_Q_UPDATE_DEF_VLAN_EN, | ||
714 | BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG, | ||
715 | BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG, | ||
716 | BNX2X_Q_UPDATE_SILENT_VLAN_REM | ||
717 | }; | ||
718 | |||
719 | /* Allowed Queue states */ | ||
720 | enum bnx2x_q_state { | ||
721 | BNX2X_Q_STATE_RESET, | ||
722 | BNX2X_Q_STATE_INITIALIZED, | ||
723 | BNX2X_Q_STATE_ACTIVE, | ||
724 | BNX2X_Q_STATE_INACTIVE, | ||
725 | BNX2X_Q_STATE_STOPPED, | ||
726 | BNX2X_Q_STATE_TERMINATED, | ||
727 | BNX2X_Q_STATE_FLRED, | ||
728 | BNX2X_Q_STATE_MAX, | ||
729 | }; | ||
730 | |||
731 | /* Allowed commands */ | ||
732 | enum bnx2x_queue_cmd { | ||
733 | BNX2X_Q_CMD_INIT, | ||
734 | BNX2X_Q_CMD_SETUP, | ||
735 | BNX2X_Q_CMD_DEACTIVATE, | ||
736 | BNX2X_Q_CMD_ACTIVATE, | ||
737 | BNX2X_Q_CMD_UPDATE, | ||
738 | BNX2X_Q_CMD_UPDATE_TPA, | ||
739 | BNX2X_Q_CMD_HALT, | ||
740 | BNX2X_Q_CMD_CFC_DEL, | ||
741 | BNX2X_Q_CMD_TERMINATE, | ||
742 | BNX2X_Q_CMD_EMPTY, | ||
743 | BNX2X_Q_CMD_MAX, | ||
744 | }; | ||
745 | |||
746 | /* queue SETUP + INIT flags */ | ||
747 | enum { | ||
748 | BNX2X_Q_FLG_TPA, | ||
749 | BNX2X_Q_FLG_STATS, | ||
750 | BNX2X_Q_FLG_ZERO_STATS, | ||
751 | BNX2X_Q_FLG_ACTIVE, | ||
752 | BNX2X_Q_FLG_OV, | ||
753 | BNX2X_Q_FLG_VLAN, | ||
754 | BNX2X_Q_FLG_COS, | ||
755 | BNX2X_Q_FLG_HC, | ||
756 | BNX2X_Q_FLG_HC_EN, | ||
757 | BNX2X_Q_FLG_DHC, | ||
758 | BNX2X_Q_FLG_FCOE, | ||
759 | BNX2X_Q_FLG_LEADING_RSS, | ||
760 | BNX2X_Q_FLG_MCAST, | ||
761 | BNX2X_Q_FLG_DEF_VLAN, | ||
762 | BNX2X_Q_FLG_TX_SWITCH, | ||
763 | BNX2X_Q_FLG_TX_SEC, | ||
764 | BNX2X_Q_FLG_ANTI_SPOOF, | ||
765 | BNX2X_Q_FLG_SILENT_VLAN_REM | ||
766 | }; | ||
767 | |||
768 | /* Queue type options: queue type may be a compination of below. */ | ||
769 | enum bnx2x_q_type { | ||
770 | /** TODO: Consider moving both these flags into the init() | ||
771 | * ramrod params. | ||
772 | */ | ||
773 | BNX2X_Q_TYPE_HAS_RX, | ||
774 | BNX2X_Q_TYPE_HAS_TX, | ||
775 | }; | ||
776 | |||
777 | struct bnx2x_queue_init_params { | ||
778 | struct { | ||
779 | unsigned long flags; | ||
780 | u16 hc_rate; | ||
781 | u8 fw_sb_id; | ||
782 | u8 sb_cq_index; | ||
783 | } tx; | ||
784 | |||
785 | struct { | ||
786 | unsigned long flags; | ||
787 | u16 hc_rate; | ||
788 | u8 fw_sb_id; | ||
789 | u8 sb_cq_index; | ||
790 | } rx; | ||
791 | |||
792 | /* CID context in the host memory */ | ||
793 | struct eth_context *cxt; | ||
794 | }; | ||
795 | |||
796 | struct bnx2x_queue_update_params { | ||
797 | unsigned long update_flags; /* BNX2X_Q_UPDATE_XX bits */ | ||
798 | u16 def_vlan; | ||
799 | u16 silent_removal_value; | ||
800 | u16 silent_removal_mask; | ||
801 | }; | ||
802 | |||
803 | struct rxq_pause_params { | ||
804 | u16 bd_th_lo; | ||
805 | u16 bd_th_hi; | ||
806 | u16 rcq_th_lo; | ||
807 | u16 rcq_th_hi; | ||
808 | u16 sge_th_lo; /* valid iff BNX2X_Q_FLG_TPA */ | ||
809 | u16 sge_th_hi; /* valid iff BNX2X_Q_FLG_TPA */ | ||
810 | u16 pri_map; | ||
811 | }; | ||
812 | |||
813 | /* general */ | ||
814 | struct bnx2x_general_setup_params { | ||
815 | /* valid iff BNX2X_Q_FLG_STATS */ | ||
816 | u8 stat_id; | ||
817 | |||
818 | u8 spcl_id; | ||
819 | u16 mtu; | ||
820 | }; | ||
821 | |||
822 | struct bnx2x_rxq_setup_params { | ||
823 | /* dma */ | ||
824 | dma_addr_t dscr_map; | ||
825 | dma_addr_t sge_map; | ||
826 | dma_addr_t rcq_map; | ||
827 | dma_addr_t rcq_np_map; | ||
828 | |||
829 | u16 drop_flags; | ||
830 | u16 buf_sz; | ||
831 | u8 fw_sb_id; | ||
832 | u8 cl_qzone_id; | ||
833 | |||
834 | /* valid iff BNX2X_Q_FLG_TPA */ | ||
835 | u16 tpa_agg_sz; | ||
836 | u16 sge_buf_sz; | ||
837 | u8 max_sges_pkt; | ||
838 | u8 max_tpa_queues; | ||
839 | u8 rss_engine_id; | ||
840 | |||
841 | u8 cache_line_log; | ||
842 | |||
843 | u8 sb_cq_index; | ||
844 | |||
845 | /* valid iff BXN2X_Q_FLG_SILENT_VLAN_REM */ | ||
846 | u16 silent_removal_value; | ||
847 | u16 silent_removal_mask; | ||
848 | }; | ||
849 | |||
850 | struct bnx2x_txq_setup_params { | ||
851 | /* dma */ | ||
852 | dma_addr_t dscr_map; | ||
853 | |||
854 | u8 fw_sb_id; | ||
855 | u8 sb_cq_index; | ||
856 | u8 cos; /* valid iff BNX2X_Q_FLG_COS */ | ||
857 | u16 traffic_type; | ||
858 | /* equals to the leading rss client id, used for TX classification*/ | ||
859 | u8 tss_leading_cl_id; | ||
860 | |||
861 | /* valid iff BNX2X_Q_FLG_DEF_VLAN */ | ||
862 | u16 default_vlan; | ||
863 | }; | ||
864 | |||
865 | struct bnx2x_queue_setup_params { | ||
866 | struct rxq_pause_params pause; | ||
867 | struct bnx2x_general_setup_params gen_params; | ||
868 | struct bnx2x_rxq_setup_params rxq_params; | ||
869 | struct bnx2x_txq_setup_params txq_params; | ||
870 | unsigned long flags; | ||
871 | }; | ||
872 | |||
873 | |||
874 | struct bnx2x_queue_state_params { | ||
875 | struct bnx2x_queue_sp_obj *q_obj; | ||
876 | |||
877 | /* Current command */ | ||
878 | enum bnx2x_queue_cmd cmd; | ||
879 | |||
880 | /* may have RAMROD_COMP_WAIT set only */ | ||
881 | unsigned long ramrod_flags; | ||
882 | |||
883 | /* Params according to the current command */ | ||
884 | union { | ||
885 | struct bnx2x_queue_update_params update; | ||
886 | struct bnx2x_queue_setup_params setup; | ||
887 | struct bnx2x_queue_init_params init; | ||
888 | } params; | ||
889 | }; | ||
890 | |||
891 | struct bnx2x_queue_sp_obj { | ||
892 | u32 cid; | ||
893 | u8 cl_id; | ||
894 | u8 func_id; | ||
895 | |||
896 | enum bnx2x_q_state state, next_state; | ||
897 | |||
898 | /* bits from enum bnx2x_q_type */ | ||
899 | unsigned long type; | ||
900 | |||
901 | /* BNX2X_Q_CMD_XX bits. This object implements "one | ||
902 | * pending" paradigm but for debug and tracing purposes it's | ||
903 | * more convinient to have different bits for different | ||
904 | * commands. | ||
905 | */ | ||
906 | unsigned long pending; | ||
907 | |||
908 | /* Buffer to use as a ramrod data and its mapping */ | ||
909 | void *rdata; | ||
910 | dma_addr_t rdata_mapping; | ||
911 | |||
912 | /** | ||
913 | * Performs one state change according to the given parameters. | ||
914 | * | ||
915 | * @return 0 in case of success and negative value otherwise. | ||
916 | */ | ||
917 | int (*send_cmd)(struct bnx2x *bp, | ||
918 | struct bnx2x_queue_state_params *params); | ||
919 | |||
920 | /** | ||
921 | * Sets the pending bit according to the requested transition. | ||
922 | */ | ||
923 | int (*set_pending)(struct bnx2x_queue_sp_obj *o, | ||
924 | struct bnx2x_queue_state_params *params); | ||
925 | |||
926 | /** | ||
927 | * Checks that the requested state transition is legal. | ||
928 | */ | ||
929 | int (*check_transition)(struct bnx2x *bp, | ||
930 | struct bnx2x_queue_sp_obj *o, | ||
931 | struct bnx2x_queue_state_params *params); | ||
932 | |||
933 | /** | ||
934 | * Completes the pending command. | ||
935 | */ | ||
936 | int (*complete_cmd)(struct bnx2x *bp, | ||
937 | struct bnx2x_queue_sp_obj *o, | ||
938 | enum bnx2x_queue_cmd); | ||
939 | |||
940 | int (*wait_comp)(struct bnx2x *bp, | ||
941 | struct bnx2x_queue_sp_obj *o, | ||
942 | enum bnx2x_queue_cmd cmd); | ||
943 | }; | ||
944 | |||
945 | /********************** Function state update *********************************/ | ||
946 | /* Allowed Function states */ | ||
947 | enum bnx2x_func_state { | ||
948 | BNX2X_F_STATE_RESET, | ||
949 | BNX2X_F_STATE_INITIALIZED, | ||
950 | BNX2X_F_STATE_STARTED, | ||
951 | BNX2X_F_STATE_MAX, | ||
952 | }; | ||
953 | |||
954 | /* Allowed Function commands */ | ||
955 | enum bnx2x_func_cmd { | ||
956 | BNX2X_F_CMD_HW_INIT, | ||
957 | BNX2X_F_CMD_START, | ||
958 | BNX2X_F_CMD_STOP, | ||
959 | BNX2X_F_CMD_HW_RESET, | ||
960 | BNX2X_F_CMD_MAX, | ||
961 | }; | ||
962 | |||
963 | struct bnx2x_func_hw_init_params { | ||
964 | /* A load phase returned by MCP. | ||
965 | * | ||
966 | * May be: | ||
967 | * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP | ||
968 | * FW_MSG_CODE_DRV_LOAD_COMMON | ||
969 | * FW_MSG_CODE_DRV_LOAD_PORT | ||
970 | * FW_MSG_CODE_DRV_LOAD_FUNCTION | ||
971 | */ | ||
972 | u32 load_phase; | ||
973 | }; | ||
974 | |||
975 | struct bnx2x_func_hw_reset_params { | ||
976 | /* A load phase returned by MCP. | ||
977 | * | ||
978 | * May be: | ||
979 | * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP | ||
980 | * FW_MSG_CODE_DRV_LOAD_COMMON | ||
981 | * FW_MSG_CODE_DRV_LOAD_PORT | ||
982 | * FW_MSG_CODE_DRV_LOAD_FUNCTION | ||
983 | */ | ||
984 | u32 reset_phase; | ||
985 | }; | ||
986 | |||
987 | struct bnx2x_func_start_params { | ||
988 | /* Multi Function mode: | ||
989 | * - Single Function | ||
990 | * - Switch Dependent | ||
991 | * - Switch Independent | ||
992 | */ | ||
993 | u16 mf_mode; | ||
994 | |||
995 | /* Switch Dependent mode outer VLAN tag */ | ||
996 | u16 sd_vlan_tag; | ||
997 | |||
998 | /* Function cos mode */ | ||
999 | u8 network_cos_mode; | ||
1000 | }; | ||
1001 | |||
1002 | struct bnx2x_func_state_params { | ||
1003 | struct bnx2x_func_sp_obj *f_obj; | ||
1004 | |||
1005 | /* Current command */ | ||
1006 | enum bnx2x_func_cmd cmd; | ||
1007 | |||
1008 | /* may have RAMROD_COMP_WAIT set only */ | ||
1009 | unsigned long ramrod_flags; | ||
1010 | |||
1011 | /* Params according to the current command */ | ||
1012 | union { | ||
1013 | struct bnx2x_func_hw_init_params hw_init; | ||
1014 | struct bnx2x_func_hw_reset_params hw_reset; | ||
1015 | struct bnx2x_func_start_params start; | ||
1016 | } params; | ||
1017 | }; | ||
1018 | |||
1019 | struct bnx2x_func_sp_drv_ops { | ||
1020 | /* Init tool + runtime initialization: | ||
1021 | * - Common Chip | ||
1022 | * - Common (per Path) | ||
1023 | * - Port | ||
1024 | * - Function phases | ||
1025 | */ | ||
1026 | int (*init_hw_cmn_chip)(struct bnx2x *bp); | ||
1027 | int (*init_hw_cmn)(struct bnx2x *bp); | ||
1028 | int (*init_hw_port)(struct bnx2x *bp); | ||
1029 | int (*init_hw_func)(struct bnx2x *bp); | ||
1030 | |||
1031 | /* Reset Function HW: Common, Port, Function phases. */ | ||
1032 | void (*reset_hw_cmn)(struct bnx2x *bp); | ||
1033 | void (*reset_hw_port)(struct bnx2x *bp); | ||
1034 | void (*reset_hw_func)(struct bnx2x *bp); | ||
1035 | |||
1036 | /* Init/Free GUNZIP resources */ | ||
1037 | int (*gunzip_init)(struct bnx2x *bp); | ||
1038 | void (*gunzip_end)(struct bnx2x *bp); | ||
1039 | |||
1040 | /* Prepare/Release FW resources */ | ||
1041 | int (*init_fw)(struct bnx2x *bp); | ||
1042 | void (*release_fw)(struct bnx2x *bp); | ||
1043 | }; | ||
1044 | |||
1045 | struct bnx2x_func_sp_obj { | ||
1046 | enum bnx2x_func_state state, next_state; | ||
1047 | |||
1048 | /* BNX2X_FUNC_CMD_XX bits. This object implements "one | ||
1049 | * pending" paradigm but for debug and tracing purposes it's | ||
1050 | * more convinient to have different bits for different | ||
1051 | * commands. | ||
1052 | */ | ||
1053 | unsigned long pending; | ||
1054 | |||
1055 | /* Buffer to use as a ramrod data and its mapping */ | ||
1056 | void *rdata; | ||
1057 | dma_addr_t rdata_mapping; | ||
1058 | |||
1059 | /* this mutex validates that when pending flag is taken, the next | ||
1060 | * ramrod to be sent will be the one set the pending bit | ||
1061 | */ | ||
1062 | struct mutex one_pending_mutex; | ||
1063 | |||
1064 | /* Driver interface */ | ||
1065 | struct bnx2x_func_sp_drv_ops *drv; | ||
1066 | |||
1067 | /** | ||
1068 | * Performs one state change according to the given parameters. | ||
1069 | * | ||
1070 | * @return 0 in case of success and negative value otherwise. | ||
1071 | */ | ||
1072 | int (*send_cmd)(struct bnx2x *bp, | ||
1073 | struct bnx2x_func_state_params *params); | ||
1074 | |||
1075 | /** | ||
1076 | * Checks that the requested state transition is legal. | ||
1077 | */ | ||
1078 | int (*check_transition)(struct bnx2x *bp, | ||
1079 | struct bnx2x_func_sp_obj *o, | ||
1080 | struct bnx2x_func_state_params *params); | ||
1081 | |||
1082 | /** | ||
1083 | * Completes the pending command. | ||
1084 | */ | ||
1085 | int (*complete_cmd)(struct bnx2x *bp, | ||
1086 | struct bnx2x_func_sp_obj *o, | ||
1087 | enum bnx2x_func_cmd cmd); | ||
1088 | |||
1089 | int (*wait_comp)(struct bnx2x *bp, struct bnx2x_func_sp_obj *o, | ||
1090 | enum bnx2x_func_cmd cmd); | ||
1091 | }; | ||
1092 | |||
1093 | /********************** Interfaces ********************************************/ | ||
1094 | /* Queueable objects set */ | ||
1095 | union bnx2x_qable_obj { | ||
1096 | struct bnx2x_vlan_mac_obj vlan_mac; | ||
1097 | }; | ||
1098 | /************** Function state update *********/ | ||
1099 | void bnx2x_init_func_obj(struct bnx2x *bp, | ||
1100 | struct bnx2x_func_sp_obj *obj, | ||
1101 | void *rdata, dma_addr_t rdata_mapping, | ||
1102 | struct bnx2x_func_sp_drv_ops *drv_iface); | ||
1103 | |||
1104 | int bnx2x_func_state_change(struct bnx2x *bp, | ||
1105 | struct bnx2x_func_state_params *params); | ||
1106 | |||
1107 | /******************* Queue State **************/ | ||
1108 | void bnx2x_init_queue_obj(struct bnx2x *bp, | ||
1109 | struct bnx2x_queue_sp_obj *obj, u8 cl_id, u32 cid, | ||
1110 | u8 func_id, void *rdata, dma_addr_t rdata_mapping, | ||
1111 | unsigned long type); | ||
1112 | |||
1113 | int bnx2x_queue_state_change(struct bnx2x *bp, | ||
1114 | struct bnx2x_queue_state_params *params); | ||
1115 | |||
1116 | /********************* VLAN-MAC ****************/ | ||
1117 | void bnx2x_init_mac_obj(struct bnx2x *bp, | ||
1118 | struct bnx2x_vlan_mac_obj *mac_obj, | ||
1119 | u8 cl_id, u32 cid, u8 func_id, void *rdata, | ||
1120 | dma_addr_t rdata_mapping, int state, | ||
1121 | unsigned long *pstate, bnx2x_obj_type type, | ||
1122 | struct bnx2x_credit_pool_obj *macs_pool); | ||
1123 | |||
1124 | void bnx2x_init_vlan_obj(struct bnx2x *bp, | ||
1125 | struct bnx2x_vlan_mac_obj *vlan_obj, | ||
1126 | u8 cl_id, u32 cid, u8 func_id, void *rdata, | ||
1127 | dma_addr_t rdata_mapping, int state, | ||
1128 | unsigned long *pstate, bnx2x_obj_type type, | ||
1129 | struct bnx2x_credit_pool_obj *vlans_pool); | ||
1130 | |||
1131 | void bnx2x_init_vlan_mac_obj(struct bnx2x *bp, | ||
1132 | struct bnx2x_vlan_mac_obj *vlan_mac_obj, | ||
1133 | u8 cl_id, u32 cid, u8 func_id, void *rdata, | ||
1134 | dma_addr_t rdata_mapping, int state, | ||
1135 | unsigned long *pstate, bnx2x_obj_type type, | ||
1136 | struct bnx2x_credit_pool_obj *macs_pool, | ||
1137 | struct bnx2x_credit_pool_obj *vlans_pool); | ||
1138 | |||
1139 | int bnx2x_config_vlan_mac(struct bnx2x *bp, | ||
1140 | struct bnx2x_vlan_mac_ramrod_params *p); | ||
1141 | |||
1142 | int bnx2x_vlan_mac_move(struct bnx2x *bp, | ||
1143 | struct bnx2x_vlan_mac_ramrod_params *p, | ||
1144 | struct bnx2x_vlan_mac_obj *dest_o); | ||
1145 | |||
1146 | /********************* RX MODE ****************/ | ||
1147 | |||
1148 | void bnx2x_init_rx_mode_obj(struct bnx2x *bp, | ||
1149 | struct bnx2x_rx_mode_obj *o); | ||
1150 | |||
1151 | /** | ||
1152 | * Send and RX_MODE ramrod according to the provided parameters. | ||
1153 | * | ||
1154 | * @param bp | ||
1155 | * @param p Command parameters | ||
1156 | * | ||
1157 | * @return 0 - if operation was successfull and there is no pending completions, | ||
1158 | * positive number - if there are pending completions, | ||
1159 | * negative - if there were errors | ||
1160 | */ | ||
1161 | int bnx2x_config_rx_mode(struct bnx2x *bp, | ||
1162 | struct bnx2x_rx_mode_ramrod_params *p); | ||
1163 | |||
1164 | /****************** MULTICASTS ****************/ | ||
1165 | |||
1166 | void bnx2x_init_mcast_obj(struct bnx2x *bp, | ||
1167 | struct bnx2x_mcast_obj *mcast_obj, | ||
1168 | u8 mcast_cl_id, u32 mcast_cid, u8 func_id, | ||
1169 | u8 engine_id, void *rdata, dma_addr_t rdata_mapping, | ||
1170 | int state, unsigned long *pstate, | ||
1171 | bnx2x_obj_type type); | ||
1172 | |||
1173 | /** | ||
1174 | * Configure multicast MACs list. May configure a new list | ||
1175 | * provided in p->mcast_list (BNX2X_MCAST_CMD_ADD), clean up | ||
1176 | * (BNX2X_MCAST_CMD_DEL) or restore (BNX2X_MCAST_CMD_RESTORE) a current | ||
1177 | * configuration, continue to execute the pending commands | ||
1178 | * (BNX2X_MCAST_CMD_CONT). | ||
1179 | * | ||
1180 | * If previous command is still pending or if number of MACs to | ||
1181 | * configure is more that maximum number of MACs in one command, | ||
1182 | * the current command will be enqueued to the tail of the | ||
1183 | * pending commands list. | ||
1184 | * | ||
1185 | * @param bp | ||
1186 | * @param p | ||
1187 | * @param command to execute: BNX2X_MCAST_CMD_X | ||
1188 | * | ||
1189 | * @return 0 is operation was sucessfull and there are no pending completions, | ||
1190 | * negative if there were errors, positive if there are pending | ||
1191 | * completions. | ||
1192 | */ | ||
1193 | int bnx2x_config_mcast(struct bnx2x *bp, | ||
1194 | struct bnx2x_mcast_ramrod_params *p, int cmd); | ||
1195 | |||
1196 | /****************** CREDIT POOL ****************/ | ||
1197 | void bnx2x_init_mac_credit_pool(struct bnx2x *bp, | ||
1198 | struct bnx2x_credit_pool_obj *p, u8 func_id, | ||
1199 | u8 func_num); | ||
1200 | void bnx2x_init_vlan_credit_pool(struct bnx2x *bp, | ||
1201 | struct bnx2x_credit_pool_obj *p, u8 func_id, | ||
1202 | u8 func_num); | ||
1203 | |||
1204 | |||
1205 | /****************** RSS CONFIGURATION ****************/ | ||
1206 | void bnx2x_init_rss_config_obj(struct bnx2x *bp, | ||
1207 | struct bnx2x_rss_config_obj *rss_obj, | ||
1208 | u8 cl_id, u32 cid, u8 func_id, u8 engine_id, | ||
1209 | void *rdata, dma_addr_t rdata_mapping, | ||
1210 | int state, unsigned long *pstate, | ||
1211 | bnx2x_obj_type type); | ||
1212 | |||
1213 | /** | ||
1214 | * Updates RSS configuration according to provided parameters. | ||
1215 | * | ||
1216 | * @param bp | ||
1217 | * @param p | ||
1218 | * | ||
1219 | * @return 0 in case of success | ||
1220 | */ | ||
1221 | int bnx2x_config_rss(struct bnx2x *bp, | ||
1222 | struct bnx2x_config_rss_params *p); | ||
1223 | |||
1224 | /** | ||
1225 | * Return the current ind_table configuration. | ||
1226 | * | ||
1227 | * @param bp | ||
1228 | * @param ind_table buffer to fill with the current indirection | ||
1229 | * table content. Should be at least | ||
1230 | * T_ETH_INDIRECTION_TABLE_SIZE bytes long. | ||
1231 | */ | ||
1232 | void bnx2x_get_rss_ind_table(struct bnx2x_rss_config_obj *rss_obj, | ||
1233 | u8 *ind_table); | ||
1234 | |||
1235 | #endif /* BNX2X_SP_VERBS */ | ||
diff --git a/drivers/net/bnx2x/bnx2x_stats.c b/drivers/net/bnx2x/bnx2x_stats.c index e535bfa08945..54c07f557ad4 100644 --- a/drivers/net/bnx2x/bnx2x_stats.c +++ b/drivers/net/bnx2x/bnx2x_stats.c | |||
@@ -14,120 +14,11 @@ | |||
14 | * Statistics and Link management by Yitchak Gertner | 14 | * Statistics and Link management by Yitchak Gertner |
15 | * | 15 | * |
16 | */ | 16 | */ |
17 | #include "bnx2x_cmn.h" | ||
18 | #include "bnx2x_stats.h" | 17 | #include "bnx2x_stats.h" |
18 | #include "bnx2x_cmn.h" | ||
19 | 19 | ||
20 | /* Statistics */ | ||
21 | 20 | ||
22 | /**************************************************************************** | 21 | /* Statistics */ |
23 | * Macros | ||
24 | ****************************************************************************/ | ||
25 | |||
26 | /* sum[hi:lo] += add[hi:lo] */ | ||
27 | #define ADD_64(s_hi, a_hi, s_lo, a_lo) \ | ||
28 | do { \ | ||
29 | s_lo += a_lo; \ | ||
30 | s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \ | ||
31 | } while (0) | ||
32 | |||
33 | /* difference = minuend - subtrahend */ | ||
34 | #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \ | ||
35 | do { \ | ||
36 | if (m_lo < s_lo) { \ | ||
37 | /* underflow */ \ | ||
38 | d_hi = m_hi - s_hi; \ | ||
39 | if (d_hi > 0) { \ | ||
40 | /* we can 'loan' 1 */ \ | ||
41 | d_hi--; \ | ||
42 | d_lo = m_lo + (UINT_MAX - s_lo) + 1; \ | ||
43 | } else { \ | ||
44 | /* m_hi <= s_hi */ \ | ||
45 | d_hi = 0; \ | ||
46 | d_lo = 0; \ | ||
47 | } \ | ||
48 | } else { \ | ||
49 | /* m_lo >= s_lo */ \ | ||
50 | if (m_hi < s_hi) { \ | ||
51 | d_hi = 0; \ | ||
52 | d_lo = 0; \ | ||
53 | } else { \ | ||
54 | /* m_hi >= s_hi */ \ | ||
55 | d_hi = m_hi - s_hi; \ | ||
56 | d_lo = m_lo - s_lo; \ | ||
57 | } \ | ||
58 | } \ | ||
59 | } while (0) | ||
60 | |||
61 | #define UPDATE_STAT64(s, t) \ | ||
62 | do { \ | ||
63 | DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \ | ||
64 | diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \ | ||
65 | pstats->mac_stx[0].t##_hi = new->s##_hi; \ | ||
66 | pstats->mac_stx[0].t##_lo = new->s##_lo; \ | ||
67 | ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \ | ||
68 | pstats->mac_stx[1].t##_lo, diff.lo); \ | ||
69 | } while (0) | ||
70 | |||
71 | #define UPDATE_STAT64_NIG(s, t) \ | ||
72 | do { \ | ||
73 | DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \ | ||
74 | diff.lo, new->s##_lo, old->s##_lo); \ | ||
75 | ADD_64(estats->t##_hi, diff.hi, \ | ||
76 | estats->t##_lo, diff.lo); \ | ||
77 | } while (0) | ||
78 | |||
79 | /* sum[hi:lo] += add */ | ||
80 | #define ADD_EXTEND_64(s_hi, s_lo, a) \ | ||
81 | do { \ | ||
82 | s_lo += a; \ | ||
83 | s_hi += (s_lo < a) ? 1 : 0; \ | ||
84 | } while (0) | ||
85 | |||
86 | #define UPDATE_EXTEND_STAT(s) \ | ||
87 | do { \ | ||
88 | ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \ | ||
89 | pstats->mac_stx[1].s##_lo, \ | ||
90 | new->s); \ | ||
91 | } while (0) | ||
92 | |||
93 | #define UPDATE_EXTEND_TSTAT(s, t) \ | ||
94 | do { \ | ||
95 | diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \ | ||
96 | old_tclient->s = tclient->s; \ | ||
97 | ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \ | ||
98 | } while (0) | ||
99 | |||
100 | #define UPDATE_EXTEND_USTAT(s, t) \ | ||
101 | do { \ | ||
102 | diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \ | ||
103 | old_uclient->s = uclient->s; \ | ||
104 | ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \ | ||
105 | } while (0) | ||
106 | |||
107 | #define UPDATE_EXTEND_XSTAT(s, t) \ | ||
108 | do { \ | ||
109 | diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \ | ||
110 | old_xclient->s = xclient->s; \ | ||
111 | ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \ | ||
112 | } while (0) | ||
113 | |||
114 | /* minuend -= subtrahend */ | ||
115 | #define SUB_64(m_hi, s_hi, m_lo, s_lo) \ | ||
116 | do { \ | ||
117 | DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \ | ||
118 | } while (0) | ||
119 | |||
120 | /* minuend[hi:lo] -= subtrahend */ | ||
121 | #define SUB_EXTEND_64(m_hi, m_lo, s) \ | ||
122 | do { \ | ||
123 | SUB_64(m_hi, 0, m_lo, s); \ | ||
124 | } while (0) | ||
125 | |||
126 | #define SUB_EXTEND_USTAT(s, t) \ | ||
127 | do { \ | ||
128 | diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \ | ||
129 | SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \ | ||
130 | } while (0) | ||
131 | 22 | ||
132 | /* | 23 | /* |
133 | * General service functions | 24 | * General service functions |
@@ -149,12 +40,16 @@ static inline long bnx2x_hilo(u32 *hiref) | |||
149 | * Init service functions | 40 | * Init service functions |
150 | */ | 41 | */ |
151 | 42 | ||
152 | 43 | /* Post the next statistics ramrod. Protect it with the spin in | |
44 | * order to ensure the strict order between statistics ramrods | ||
45 | * (each ramrod has a sequence number passed in a | ||
46 | * bp->fw_stats_req->hdr.drv_stats_counter and ramrods must be | ||
47 | * sent in order). | ||
48 | */ | ||
153 | static void bnx2x_storm_stats_post(struct bnx2x *bp) | 49 | static void bnx2x_storm_stats_post(struct bnx2x *bp) |
154 | { | 50 | { |
155 | if (!bp->stats_pending) { | 51 | if (!bp->stats_pending) { |
156 | struct common_query_ramrod_data ramrod_data = {0}; | 52 | int rc; |
157 | int i, rc; | ||
158 | 53 | ||
159 | spin_lock_bh(&bp->stats_lock); | 54 | spin_lock_bh(&bp->stats_lock); |
160 | 55 | ||
@@ -163,14 +58,19 @@ static void bnx2x_storm_stats_post(struct bnx2x *bp) | |||
163 | return; | 58 | return; |
164 | } | 59 | } |
165 | 60 | ||
166 | ramrod_data.drv_counter = bp->stats_counter++; | 61 | bp->fw_stats_req->hdr.drv_stats_counter = |
167 | ramrod_data.collect_port = bp->port.pmf ? 1 : 0; | 62 | cpu_to_le16(bp->stats_counter++); |
168 | for_each_eth_queue(bp, i) | ||
169 | ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id); | ||
170 | 63 | ||
64 | DP(NETIF_MSG_TIMER, "Sending statistics ramrod %d\n", | ||
65 | bp->fw_stats_req->hdr.drv_stats_counter); | ||
66 | |||
67 | |||
68 | |||
69 | /* send FW stats ramrod */ | ||
171 | rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_STAT_QUERY, 0, | 70 | rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_STAT_QUERY, 0, |
172 | ((u32 *)&ramrod_data)[1], | 71 | U64_HI(bp->fw_stats_req_mapping), |
173 | ((u32 *)&ramrod_data)[0], 1); | 72 | U64_LO(bp->fw_stats_req_mapping), |
73 | NONE_CONNECTION_TYPE); | ||
174 | if (rc == 0) | 74 | if (rc == 0) |
175 | bp->stats_pending = 1; | 75 | bp->stats_pending = 1; |
176 | 76 | ||
@@ -230,7 +130,7 @@ static int bnx2x_stats_comp(struct bnx2x *bp) | |||
230 | break; | 130 | break; |
231 | } | 131 | } |
232 | cnt--; | 132 | cnt--; |
233 | msleep(1); | 133 | usleep_range(1000, 1000); |
234 | } | 134 | } |
235 | return 1; | 135 | return 1; |
236 | } | 136 | } |
@@ -338,69 +238,8 @@ static void bnx2x_port_stats_init(struct bnx2x *bp) | |||
338 | opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_GRC, DMAE_DST_PCI, | 238 | opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_GRC, DMAE_DST_PCI, |
339 | true, DMAE_COMP_GRC); | 239 | true, DMAE_COMP_GRC); |
340 | 240 | ||
341 | if (bp->link_vars.mac_type == MAC_TYPE_BMAC) { | 241 | /* EMAC is special */ |
342 | 242 | if (bp->link_vars.mac_type == MAC_TYPE_EMAC) { | |
343 | mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM : | ||
344 | NIG_REG_INGRESS_BMAC0_MEM); | ||
345 | |||
346 | /* BIGMAC_REGISTER_TX_STAT_GTPKT .. | ||
347 | BIGMAC_REGISTER_TX_STAT_GTBYT */ | ||
348 | dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); | ||
349 | dmae->opcode = opcode; | ||
350 | if (CHIP_IS_E1x(bp)) { | ||
351 | dmae->src_addr_lo = (mac_addr + | ||
352 | BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2; | ||
353 | dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT - | ||
354 | BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2; | ||
355 | } else { | ||
356 | dmae->src_addr_lo = (mac_addr + | ||
357 | BIGMAC2_REGISTER_TX_STAT_GTPOK) >> 2; | ||
358 | dmae->len = (8 + BIGMAC2_REGISTER_TX_STAT_GTBYT - | ||
359 | BIGMAC2_REGISTER_TX_STAT_GTPOK) >> 2; | ||
360 | } | ||
361 | |||
362 | dmae->src_addr_hi = 0; | ||
363 | dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats)); | ||
364 | dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats)); | ||
365 | dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; | ||
366 | dmae->comp_addr_hi = 0; | ||
367 | dmae->comp_val = 1; | ||
368 | |||
369 | /* BIGMAC_REGISTER_RX_STAT_GR64 .. | ||
370 | BIGMAC_REGISTER_RX_STAT_GRIPJ */ | ||
371 | dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); | ||
372 | dmae->opcode = opcode; | ||
373 | dmae->src_addr_hi = 0; | ||
374 | if (CHIP_IS_E1x(bp)) { | ||
375 | dmae->src_addr_lo = (mac_addr + | ||
376 | BIGMAC_REGISTER_RX_STAT_GR64) >> 2; | ||
377 | dmae->dst_addr_lo = | ||
378 | U64_LO(bnx2x_sp_mapping(bp, mac_stats) + | ||
379 | offsetof(struct bmac1_stats, rx_stat_gr64_lo)); | ||
380 | dmae->dst_addr_hi = | ||
381 | U64_HI(bnx2x_sp_mapping(bp, mac_stats) + | ||
382 | offsetof(struct bmac1_stats, rx_stat_gr64_lo)); | ||
383 | dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ - | ||
384 | BIGMAC_REGISTER_RX_STAT_GR64) >> 2; | ||
385 | } else { | ||
386 | dmae->src_addr_lo = | ||
387 | (mac_addr + BIGMAC2_REGISTER_RX_STAT_GR64) >> 2; | ||
388 | dmae->dst_addr_lo = | ||
389 | U64_LO(bnx2x_sp_mapping(bp, mac_stats) + | ||
390 | offsetof(struct bmac2_stats, rx_stat_gr64_lo)); | ||
391 | dmae->dst_addr_hi = | ||
392 | U64_HI(bnx2x_sp_mapping(bp, mac_stats) + | ||
393 | offsetof(struct bmac2_stats, rx_stat_gr64_lo)); | ||
394 | dmae->len = (8 + BIGMAC2_REGISTER_RX_STAT_GRIPJ - | ||
395 | BIGMAC2_REGISTER_RX_STAT_GR64) >> 2; | ||
396 | } | ||
397 | |||
398 | dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; | ||
399 | dmae->comp_addr_hi = 0; | ||
400 | dmae->comp_val = 1; | ||
401 | |||
402 | } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) { | ||
403 | |||
404 | mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0); | 243 | mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0); |
405 | 244 | ||
406 | /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/ | 245 | /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/ |
@@ -445,46 +284,122 @@ static void bnx2x_port_stats_init(struct bnx2x *bp) | |||
445 | dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; | 284 | dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; |
446 | dmae->comp_addr_hi = 0; | 285 | dmae->comp_addr_hi = 0; |
447 | dmae->comp_val = 1; | 286 | dmae->comp_val = 1; |
287 | } else { | ||
288 | u32 tx_src_addr_lo, rx_src_addr_lo; | ||
289 | u16 rx_len, tx_len; | ||
290 | |||
291 | /* configure the params according to MAC type */ | ||
292 | switch (bp->link_vars.mac_type) { | ||
293 | case MAC_TYPE_BMAC: | ||
294 | mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM : | ||
295 | NIG_REG_INGRESS_BMAC0_MEM); | ||
296 | |||
297 | /* BIGMAC_REGISTER_TX_STAT_GTPKT .. | ||
298 | BIGMAC_REGISTER_TX_STAT_GTBYT */ | ||
299 | if (CHIP_IS_E1x(bp)) { | ||
300 | tx_src_addr_lo = (mac_addr + | ||
301 | BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2; | ||
302 | tx_len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT - | ||
303 | BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2; | ||
304 | rx_src_addr_lo = (mac_addr + | ||
305 | BIGMAC_REGISTER_RX_STAT_GR64) >> 2; | ||
306 | rx_len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ - | ||
307 | BIGMAC_REGISTER_RX_STAT_GR64) >> 2; | ||
308 | } else { | ||
309 | tx_src_addr_lo = (mac_addr + | ||
310 | BIGMAC2_REGISTER_TX_STAT_GTPOK) >> 2; | ||
311 | tx_len = (8 + BIGMAC2_REGISTER_TX_STAT_GTBYT - | ||
312 | BIGMAC2_REGISTER_TX_STAT_GTPOK) >> 2; | ||
313 | rx_src_addr_lo = (mac_addr + | ||
314 | BIGMAC2_REGISTER_RX_STAT_GR64) >> 2; | ||
315 | rx_len = (8 + BIGMAC2_REGISTER_RX_STAT_GRIPJ - | ||
316 | BIGMAC2_REGISTER_RX_STAT_GR64) >> 2; | ||
317 | } | ||
318 | break; | ||
319 | |||
320 | case MAC_TYPE_UMAC: /* handled by MSTAT */ | ||
321 | case MAC_TYPE_XMAC: /* handled by MSTAT */ | ||
322 | default: | ||
323 | mac_addr = port ? GRCBASE_MSTAT1 : GRCBASE_MSTAT0; | ||
324 | tx_src_addr_lo = (mac_addr + | ||
325 | MSTAT_REG_TX_STAT_GTXPOK_LO) >> 2; | ||
326 | rx_src_addr_lo = (mac_addr + | ||
327 | MSTAT_REG_RX_STAT_GR64_LO) >> 2; | ||
328 | tx_len = sizeof(bp->slowpath-> | ||
329 | mac_stats.mstat_stats.stats_tx) >> 2; | ||
330 | rx_len = sizeof(bp->slowpath-> | ||
331 | mac_stats.mstat_stats.stats_rx) >> 2; | ||
332 | break; | ||
333 | } | ||
334 | |||
335 | /* TX stats */ | ||
336 | dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); | ||
337 | dmae->opcode = opcode; | ||
338 | dmae->src_addr_lo = tx_src_addr_lo; | ||
339 | dmae->src_addr_hi = 0; | ||
340 | dmae->len = tx_len; | ||
341 | dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats)); | ||
342 | dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats)); | ||
343 | dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; | ||
344 | dmae->comp_addr_hi = 0; | ||
345 | dmae->comp_val = 1; | ||
346 | |||
347 | /* RX stats */ | ||
348 | dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); | ||
349 | dmae->opcode = opcode; | ||
350 | dmae->src_addr_hi = 0; | ||
351 | dmae->src_addr_lo = rx_src_addr_lo; | ||
352 | dmae->dst_addr_lo = | ||
353 | U64_LO(bnx2x_sp_mapping(bp, mac_stats) + (tx_len << 2)); | ||
354 | dmae->dst_addr_hi = | ||
355 | U64_HI(bnx2x_sp_mapping(bp, mac_stats) + (tx_len << 2)); | ||
356 | dmae->len = rx_len; | ||
357 | dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; | ||
358 | dmae->comp_addr_hi = 0; | ||
359 | dmae->comp_val = 1; | ||
448 | } | 360 | } |
449 | 361 | ||
450 | /* NIG */ | 362 | /* NIG */ |
363 | if (!CHIP_IS_E3(bp)) { | ||
364 | dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); | ||
365 | dmae->opcode = opcode; | ||
366 | dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 : | ||
367 | NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2; | ||
368 | dmae->src_addr_hi = 0; | ||
369 | dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) + | ||
370 | offsetof(struct nig_stats, egress_mac_pkt0_lo)); | ||
371 | dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) + | ||
372 | offsetof(struct nig_stats, egress_mac_pkt0_lo)); | ||
373 | dmae->len = (2*sizeof(u32)) >> 2; | ||
374 | dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; | ||
375 | dmae->comp_addr_hi = 0; | ||
376 | dmae->comp_val = 1; | ||
377 | |||
378 | dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); | ||
379 | dmae->opcode = opcode; | ||
380 | dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 : | ||
381 | NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2; | ||
382 | dmae->src_addr_hi = 0; | ||
383 | dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) + | ||
384 | offsetof(struct nig_stats, egress_mac_pkt1_lo)); | ||
385 | dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) + | ||
386 | offsetof(struct nig_stats, egress_mac_pkt1_lo)); | ||
387 | dmae->len = (2*sizeof(u32)) >> 2; | ||
388 | dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; | ||
389 | dmae->comp_addr_hi = 0; | ||
390 | dmae->comp_val = 1; | ||
391 | } | ||
392 | |||
451 | dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); | 393 | dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); |
452 | dmae->opcode = opcode; | 394 | dmae->opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_GRC, DMAE_DST_PCI, |
395 | true, DMAE_COMP_PCI); | ||
453 | dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD : | 396 | dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD : |
454 | NIG_REG_STAT0_BRB_DISCARD) >> 2; | 397 | NIG_REG_STAT0_BRB_DISCARD) >> 2; |
455 | dmae->src_addr_hi = 0; | 398 | dmae->src_addr_hi = 0; |
456 | dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats)); | 399 | dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats)); |
457 | dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats)); | 400 | dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats)); |
458 | dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2; | 401 | dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2; |
459 | dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; | ||
460 | dmae->comp_addr_hi = 0; | ||
461 | dmae->comp_val = 1; | ||
462 | 402 | ||
463 | dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); | ||
464 | dmae->opcode = opcode; | ||
465 | dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 : | ||
466 | NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2; | ||
467 | dmae->src_addr_hi = 0; | ||
468 | dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) + | ||
469 | offsetof(struct nig_stats, egress_mac_pkt0_lo)); | ||
470 | dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) + | ||
471 | offsetof(struct nig_stats, egress_mac_pkt0_lo)); | ||
472 | dmae->len = (2*sizeof(u32)) >> 2; | ||
473 | dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; | ||
474 | dmae->comp_addr_hi = 0; | ||
475 | dmae->comp_val = 1; | ||
476 | |||
477 | dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); | ||
478 | dmae->opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_GRC, DMAE_DST_PCI, | ||
479 | true, DMAE_COMP_PCI); | ||
480 | dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 : | ||
481 | NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2; | ||
482 | dmae->src_addr_hi = 0; | ||
483 | dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) + | ||
484 | offsetof(struct nig_stats, egress_mac_pkt1_lo)); | ||
485 | dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) + | ||
486 | offsetof(struct nig_stats, egress_mac_pkt1_lo)); | ||
487 | dmae->len = (2*sizeof(u32)) >> 2; | ||
488 | dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp)); | 403 | dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp)); |
489 | dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp)); | 404 | dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp)); |
490 | dmae->comp_val = DMAE_COMP_VAL; | 405 | dmae->comp_val = DMAE_COMP_VAL; |
@@ -566,7 +481,8 @@ static void bnx2x_bmac_stats_update(struct bnx2x *bp) | |||
566 | UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers); | 481 | UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers); |
567 | UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived); | 482 | UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived); |
568 | UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered); | 483 | UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered); |
569 | UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf); | 484 | UPDATE_STAT64(rx_stat_grxpf, rx_stat_mac_xpf); |
485 | |||
570 | UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent); | 486 | UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent); |
571 | UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone); | 487 | UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone); |
572 | UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets); | 488 | UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets); |
@@ -580,13 +496,13 @@ static void bnx2x_bmac_stats_update(struct bnx2x *bp) | |||
580 | tx_stat_etherstatspkts512octetsto1023octets); | 496 | tx_stat_etherstatspkts512octetsto1023octets); |
581 | UPDATE_STAT64(tx_stat_gt1518, | 497 | UPDATE_STAT64(tx_stat_gt1518, |
582 | tx_stat_etherstatspkts1024octetsto1522octets); | 498 | tx_stat_etherstatspkts1024octetsto1522octets); |
583 | UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047); | 499 | UPDATE_STAT64(tx_stat_gt2047, tx_stat_mac_2047); |
584 | UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095); | 500 | UPDATE_STAT64(tx_stat_gt4095, tx_stat_mac_4095); |
585 | UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216); | 501 | UPDATE_STAT64(tx_stat_gt9216, tx_stat_mac_9216); |
586 | UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383); | 502 | UPDATE_STAT64(tx_stat_gt16383, tx_stat_mac_16383); |
587 | UPDATE_STAT64(tx_stat_gterr, | 503 | UPDATE_STAT64(tx_stat_gterr, |
588 | tx_stat_dot3statsinternalmactransmiterrors); | 504 | tx_stat_dot3statsinternalmactransmiterrors); |
589 | UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl); | 505 | UPDATE_STAT64(tx_stat_gtufl, tx_stat_mac_ufl); |
590 | 506 | ||
591 | } else { | 507 | } else { |
592 | struct bmac2_stats *new = bnx2x_sp(bp, mac_stats.bmac2_stats); | 508 | struct bmac2_stats *new = bnx2x_sp(bp, mac_stats.bmac2_stats); |
@@ -600,7 +516,7 @@ static void bnx2x_bmac_stats_update(struct bnx2x *bp) | |||
600 | UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers); | 516 | UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers); |
601 | UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived); | 517 | UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived); |
602 | UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered); | 518 | UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered); |
603 | UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf); | 519 | UPDATE_STAT64(rx_stat_grxpf, rx_stat_mac_xpf); |
604 | UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent); | 520 | UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent); |
605 | UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone); | 521 | UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone); |
606 | UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets); | 522 | UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets); |
@@ -614,19 +530,96 @@ static void bnx2x_bmac_stats_update(struct bnx2x *bp) | |||
614 | tx_stat_etherstatspkts512octetsto1023octets); | 530 | tx_stat_etherstatspkts512octetsto1023octets); |
615 | UPDATE_STAT64(tx_stat_gt1518, | 531 | UPDATE_STAT64(tx_stat_gt1518, |
616 | tx_stat_etherstatspkts1024octetsto1522octets); | 532 | tx_stat_etherstatspkts1024octetsto1522octets); |
617 | UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047); | 533 | UPDATE_STAT64(tx_stat_gt2047, tx_stat_mac_2047); |
618 | UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095); | 534 | UPDATE_STAT64(tx_stat_gt4095, tx_stat_mac_4095); |
619 | UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216); | 535 | UPDATE_STAT64(tx_stat_gt9216, tx_stat_mac_9216); |
620 | UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383); | 536 | UPDATE_STAT64(tx_stat_gt16383, tx_stat_mac_16383); |
621 | UPDATE_STAT64(tx_stat_gterr, | 537 | UPDATE_STAT64(tx_stat_gterr, |
622 | tx_stat_dot3statsinternalmactransmiterrors); | 538 | tx_stat_dot3statsinternalmactransmiterrors); |
623 | UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl); | 539 | UPDATE_STAT64(tx_stat_gtufl, tx_stat_mac_ufl); |
624 | } | 540 | } |
625 | 541 | ||
626 | estats->pause_frames_received_hi = | 542 | estats->pause_frames_received_hi = |
627 | pstats->mac_stx[1].rx_stat_bmac_xpf_hi; | 543 | pstats->mac_stx[1].rx_stat_mac_xpf_hi; |
544 | estats->pause_frames_received_lo = | ||
545 | pstats->mac_stx[1].rx_stat_mac_xpf_lo; | ||
546 | |||
547 | estats->pause_frames_sent_hi = | ||
548 | pstats->mac_stx[1].tx_stat_outxoffsent_hi; | ||
549 | estats->pause_frames_sent_lo = | ||
550 | pstats->mac_stx[1].tx_stat_outxoffsent_lo; | ||
551 | } | ||
552 | |||
553 | static void bnx2x_mstat_stats_update(struct bnx2x *bp) | ||
554 | { | ||
555 | struct host_port_stats *pstats = bnx2x_sp(bp, port_stats); | ||
556 | struct bnx2x_eth_stats *estats = &bp->eth_stats; | ||
557 | |||
558 | struct mstat_stats *new = bnx2x_sp(bp, mac_stats.mstat_stats); | ||
559 | |||
560 | ADD_STAT64(stats_rx.rx_grerb, rx_stat_ifhcinbadoctets); | ||
561 | ADD_STAT64(stats_rx.rx_grfcs, rx_stat_dot3statsfcserrors); | ||
562 | ADD_STAT64(stats_rx.rx_grund, rx_stat_etherstatsundersizepkts); | ||
563 | ADD_STAT64(stats_rx.rx_grovr, rx_stat_dot3statsframestoolong); | ||
564 | ADD_STAT64(stats_rx.rx_grfrg, rx_stat_etherstatsfragments); | ||
565 | ADD_STAT64(stats_rx.rx_grxcf, rx_stat_maccontrolframesreceived); | ||
566 | ADD_STAT64(stats_rx.rx_grxpf, rx_stat_xoffstateentered); | ||
567 | ADD_STAT64(stats_rx.rx_grxpf, rx_stat_mac_xpf); | ||
568 | ADD_STAT64(stats_tx.tx_gtxpf, tx_stat_outxoffsent); | ||
569 | ADD_STAT64(stats_tx.tx_gtxpf, tx_stat_flowcontroldone); | ||
570 | |||
571 | |||
572 | ADD_STAT64(stats_tx.tx_gt64, tx_stat_etherstatspkts64octets); | ||
573 | ADD_STAT64(stats_tx.tx_gt127, | ||
574 | tx_stat_etherstatspkts65octetsto127octets); | ||
575 | ADD_STAT64(stats_tx.tx_gt255, | ||
576 | tx_stat_etherstatspkts128octetsto255octets); | ||
577 | ADD_STAT64(stats_tx.tx_gt511, | ||
578 | tx_stat_etherstatspkts256octetsto511octets); | ||
579 | ADD_STAT64(stats_tx.tx_gt1023, | ||
580 | tx_stat_etherstatspkts512octetsto1023octets); | ||
581 | ADD_STAT64(stats_tx.tx_gt1518, | ||
582 | tx_stat_etherstatspkts1024octetsto1522octets); | ||
583 | ADD_STAT64(stats_tx.tx_gt2047, tx_stat_mac_2047); | ||
584 | |||
585 | ADD_STAT64(stats_tx.tx_gt4095, tx_stat_mac_4095); | ||
586 | ADD_STAT64(stats_tx.tx_gt9216, tx_stat_mac_9216); | ||
587 | ADD_STAT64(stats_tx.tx_gt16383, tx_stat_mac_16383); | ||
588 | |||
589 | ADD_STAT64(stats_tx.tx_gterr, | ||
590 | tx_stat_dot3statsinternalmactransmiterrors); | ||
591 | ADD_STAT64(stats_tx.tx_gtufl, tx_stat_mac_ufl); | ||
592 | |||
593 | ADD_64(estats->etherstatspkts1024octetsto1522octets_hi, | ||
594 | new->stats_tx.tx_gt1518_hi, | ||
595 | estats->etherstatspkts1024octetsto1522octets_lo, | ||
596 | new->stats_tx.tx_gt1518_lo); | ||
597 | |||
598 | ADD_64(estats->etherstatspktsover1522octets_hi, | ||
599 | new->stats_tx.tx_gt2047_hi, | ||
600 | estats->etherstatspktsover1522octets_lo, | ||
601 | new->stats_tx.tx_gt2047_lo); | ||
602 | |||
603 | ADD_64(estats->etherstatspktsover1522octets_hi, | ||
604 | new->stats_tx.tx_gt4095_hi, | ||
605 | estats->etherstatspktsover1522octets_lo, | ||
606 | new->stats_tx.tx_gt4095_lo); | ||
607 | |||
608 | ADD_64(estats->etherstatspktsover1522octets_hi, | ||
609 | new->stats_tx.tx_gt9216_hi, | ||
610 | estats->etherstatspktsover1522octets_lo, | ||
611 | new->stats_tx.tx_gt9216_lo); | ||
612 | |||
613 | |||
614 | ADD_64(estats->etherstatspktsover1522octets_hi, | ||
615 | new->stats_tx.tx_gt16383_hi, | ||
616 | estats->etherstatspktsover1522octets_lo, | ||
617 | new->stats_tx.tx_gt16383_lo); | ||
618 | |||
619 | estats->pause_frames_received_hi = | ||
620 | pstats->mac_stx[1].rx_stat_mac_xpf_hi; | ||
628 | estats->pause_frames_received_lo = | 621 | estats->pause_frames_received_lo = |
629 | pstats->mac_stx[1].rx_stat_bmac_xpf_lo; | 622 | pstats->mac_stx[1].rx_stat_mac_xpf_lo; |
630 | 623 | ||
631 | estats->pause_frames_sent_hi = | 624 | estats->pause_frames_sent_hi = |
632 | pstats->mac_stx[1].tx_stat_outxoffsent_hi; | 625 | pstats->mac_stx[1].tx_stat_outxoffsent_hi; |
@@ -702,15 +695,26 @@ static int bnx2x_hw_stats_update(struct bnx2x *bp) | |||
702 | u32 hi; | 695 | u32 hi; |
703 | } diff; | 696 | } diff; |
704 | 697 | ||
705 | if (bp->link_vars.mac_type == MAC_TYPE_BMAC) | 698 | switch (bp->link_vars.mac_type) { |
699 | case MAC_TYPE_BMAC: | ||
706 | bnx2x_bmac_stats_update(bp); | 700 | bnx2x_bmac_stats_update(bp); |
701 | break; | ||
707 | 702 | ||
708 | else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) | 703 | case MAC_TYPE_EMAC: |
709 | bnx2x_emac_stats_update(bp); | 704 | bnx2x_emac_stats_update(bp); |
705 | break; | ||
706 | |||
707 | case MAC_TYPE_UMAC: | ||
708 | case MAC_TYPE_XMAC: | ||
709 | bnx2x_mstat_stats_update(bp); | ||
710 | break; | ||
710 | 711 | ||
711 | else { /* unreached */ | 712 | case MAC_TYPE_NONE: /* unreached */ |
712 | BNX2X_ERR("stats updated by DMAE but no MAC active\n"); | 713 | BNX2X_ERR("stats updated by DMAE but no MAC active\n"); |
713 | return -1; | 714 | return -1; |
715 | |||
716 | default: /* unreached */ | ||
717 | BNX2X_ERR("Unknown MAC type\n"); | ||
714 | } | 718 | } |
715 | 719 | ||
716 | ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo, | 720 | ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo, |
@@ -718,9 +722,12 @@ static int bnx2x_hw_stats_update(struct bnx2x *bp) | |||
718 | ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo, | 722 | ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo, |
719 | new->brb_truncate - old->brb_truncate); | 723 | new->brb_truncate - old->brb_truncate); |
720 | 724 | ||
721 | UPDATE_STAT64_NIG(egress_mac_pkt0, | 725 | if (!CHIP_IS_E3(bp)) { |
726 | UPDATE_STAT64_NIG(egress_mac_pkt0, | ||
722 | etherstatspkts1024octetsto1522octets); | 727 | etherstatspkts1024octetsto1522octets); |
723 | UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets); | 728 | UPDATE_STAT64_NIG(egress_mac_pkt1, |
729 | etherstatspktsover1522octets); | ||
730 | } | ||
724 | 731 | ||
725 | memcpy(old, new, sizeof(struct nig_stats)); | 732 | memcpy(old, new, sizeof(struct nig_stats)); |
726 | 733 | ||
@@ -746,11 +753,13 @@ static int bnx2x_hw_stats_update(struct bnx2x *bp) | |||
746 | 753 | ||
747 | static int bnx2x_storm_stats_update(struct bnx2x *bp) | 754 | static int bnx2x_storm_stats_update(struct bnx2x *bp) |
748 | { | 755 | { |
749 | struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats); | ||
750 | struct tstorm_per_port_stats *tport = | 756 | struct tstorm_per_port_stats *tport = |
751 | &stats->tstorm_common.port_statistics; | 757 | &bp->fw_stats_data->port.tstorm_port_statistics; |
758 | struct tstorm_per_pf_stats *tfunc = | ||
759 | &bp->fw_stats_data->pf.tstorm_pf_statistics; | ||
752 | struct host_func_stats *fstats = bnx2x_sp(bp, func_stats); | 760 | struct host_func_stats *fstats = bnx2x_sp(bp, func_stats); |
753 | struct bnx2x_eth_stats *estats = &bp->eth_stats; | 761 | struct bnx2x_eth_stats *estats = &bp->eth_stats; |
762 | struct stats_counter *counters = &bp->fw_stats_data->storm_counters; | ||
754 | int i; | 763 | int i; |
755 | u16 cur_stats_counter; | 764 | u16 cur_stats_counter; |
756 | 765 | ||
@@ -761,6 +770,35 @@ static int bnx2x_storm_stats_update(struct bnx2x *bp) | |||
761 | cur_stats_counter = bp->stats_counter - 1; | 770 | cur_stats_counter = bp->stats_counter - 1; |
762 | spin_unlock_bh(&bp->stats_lock); | 771 | spin_unlock_bh(&bp->stats_lock); |
763 | 772 | ||
773 | /* are storm stats valid? */ | ||
774 | if (le16_to_cpu(counters->xstats_counter) != cur_stats_counter) { | ||
775 | DP(BNX2X_MSG_STATS, "stats not updated by xstorm" | ||
776 | " xstorm counter (0x%x) != stats_counter (0x%x)\n", | ||
777 | le16_to_cpu(counters->xstats_counter), bp->stats_counter); | ||
778 | return -EAGAIN; | ||
779 | } | ||
780 | |||
781 | if (le16_to_cpu(counters->ustats_counter) != cur_stats_counter) { | ||
782 | DP(BNX2X_MSG_STATS, "stats not updated by ustorm" | ||
783 | " ustorm counter (0x%x) != stats_counter (0x%x)\n", | ||
784 | le16_to_cpu(counters->ustats_counter), bp->stats_counter); | ||
785 | return -EAGAIN; | ||
786 | } | ||
787 | |||
788 | if (le16_to_cpu(counters->cstats_counter) != cur_stats_counter) { | ||
789 | DP(BNX2X_MSG_STATS, "stats not updated by cstorm" | ||
790 | " cstorm counter (0x%x) != stats_counter (0x%x)\n", | ||
791 | le16_to_cpu(counters->cstats_counter), bp->stats_counter); | ||
792 | return -EAGAIN; | ||
793 | } | ||
794 | |||
795 | if (le16_to_cpu(counters->tstats_counter) != cur_stats_counter) { | ||
796 | DP(BNX2X_MSG_STATS, "stats not updated by tstorm" | ||
797 | " tstorm counter (0x%x) != stats_counter (0x%x)\n", | ||
798 | le16_to_cpu(counters->tstats_counter), bp->stats_counter); | ||
799 | return -EAGAIN; | ||
800 | } | ||
801 | |||
764 | memcpy(&(fstats->total_bytes_received_hi), | 802 | memcpy(&(fstats->total_bytes_received_hi), |
765 | &(bnx2x_sp(bp, func_stats_base)->total_bytes_received_hi), | 803 | &(bnx2x_sp(bp, func_stats_base)->total_bytes_received_hi), |
766 | sizeof(struct host_func_stats) - 2*sizeof(u32)); | 804 | sizeof(struct host_func_stats) - 2*sizeof(u32)); |
@@ -770,94 +808,84 @@ static int bnx2x_storm_stats_update(struct bnx2x *bp) | |||
770 | estats->etherstatsoverrsizepkts_lo = 0; | 808 | estats->etherstatsoverrsizepkts_lo = 0; |
771 | estats->no_buff_discard_hi = 0; | 809 | estats->no_buff_discard_hi = 0; |
772 | estats->no_buff_discard_lo = 0; | 810 | estats->no_buff_discard_lo = 0; |
811 | estats->total_tpa_aggregations_hi = 0; | ||
812 | estats->total_tpa_aggregations_lo = 0; | ||
813 | estats->total_tpa_aggregated_frames_hi = 0; | ||
814 | estats->total_tpa_aggregated_frames_lo = 0; | ||
815 | estats->total_tpa_bytes_hi = 0; | ||
816 | estats->total_tpa_bytes_lo = 0; | ||
773 | 817 | ||
774 | for_each_eth_queue(bp, i) { | 818 | for_each_eth_queue(bp, i) { |
775 | struct bnx2x_fastpath *fp = &bp->fp[i]; | 819 | struct bnx2x_fastpath *fp = &bp->fp[i]; |
776 | int cl_id = fp->cl_id; | 820 | struct tstorm_per_queue_stats *tclient = |
777 | struct tstorm_per_client_stats *tclient = | 821 | &bp->fw_stats_data->queue_stats[i]. |
778 | &stats->tstorm_common.client_statistics[cl_id]; | 822 | tstorm_queue_statistics; |
779 | struct tstorm_per_client_stats *old_tclient = &fp->old_tclient; | 823 | struct tstorm_per_queue_stats *old_tclient = &fp->old_tclient; |
780 | struct ustorm_per_client_stats *uclient = | 824 | struct ustorm_per_queue_stats *uclient = |
781 | &stats->ustorm_common.client_statistics[cl_id]; | 825 | &bp->fw_stats_data->queue_stats[i]. |
782 | struct ustorm_per_client_stats *old_uclient = &fp->old_uclient; | 826 | ustorm_queue_statistics; |
783 | struct xstorm_per_client_stats *xclient = | 827 | struct ustorm_per_queue_stats *old_uclient = &fp->old_uclient; |
784 | &stats->xstorm_common.client_statistics[cl_id]; | 828 | struct xstorm_per_queue_stats *xclient = |
785 | struct xstorm_per_client_stats *old_xclient = &fp->old_xclient; | 829 | &bp->fw_stats_data->queue_stats[i]. |
830 | xstorm_queue_statistics; | ||
831 | struct xstorm_per_queue_stats *old_xclient = &fp->old_xclient; | ||
786 | struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats; | 832 | struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats; |
787 | u32 diff; | 833 | u32 diff; |
788 | 834 | ||
789 | /* are storm stats valid? */ | 835 | DP(BNX2X_MSG_STATS, "queue[%d]: ucast_sent 0x%x, " |
790 | if (le16_to_cpu(xclient->stats_counter) != cur_stats_counter) { | 836 | "bcast_sent 0x%x mcast_sent 0x%x\n", |
791 | DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm" | 837 | i, xclient->ucast_pkts_sent, |
792 | " xstorm counter (0x%x) != stats_counter (0x%x)\n", | 838 | xclient->bcast_pkts_sent, xclient->mcast_pkts_sent); |
793 | i, xclient->stats_counter, cur_stats_counter + 1); | 839 | |
794 | return -1; | 840 | DP(BNX2X_MSG_STATS, "---------------\n"); |
795 | } | ||
796 | if (le16_to_cpu(tclient->stats_counter) != cur_stats_counter) { | ||
797 | DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm" | ||
798 | " tstorm counter (0x%x) != stats_counter (0x%x)\n", | ||
799 | i, tclient->stats_counter, cur_stats_counter + 1); | ||
800 | return -2; | ||
801 | } | ||
802 | if (le16_to_cpu(uclient->stats_counter) != cur_stats_counter) { | ||
803 | DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm" | ||
804 | " ustorm counter (0x%x) != stats_counter (0x%x)\n", | ||
805 | i, uclient->stats_counter, cur_stats_counter + 1); | ||
806 | return -4; | ||
807 | } | ||
808 | 841 | ||
842 | qstats->total_broadcast_bytes_received_hi = | ||
843 | le32_to_cpu(tclient->rcv_bcast_bytes.hi); | ||
844 | qstats->total_broadcast_bytes_received_lo = | ||
845 | le32_to_cpu(tclient->rcv_bcast_bytes.lo); | ||
846 | |||
847 | qstats->total_multicast_bytes_received_hi = | ||
848 | le32_to_cpu(tclient->rcv_mcast_bytes.hi); | ||
849 | qstats->total_multicast_bytes_received_lo = | ||
850 | le32_to_cpu(tclient->rcv_mcast_bytes.lo); | ||
851 | |||
852 | qstats->total_unicast_bytes_received_hi = | ||
853 | le32_to_cpu(tclient->rcv_ucast_bytes.hi); | ||
854 | qstats->total_unicast_bytes_received_lo = | ||
855 | le32_to_cpu(tclient->rcv_ucast_bytes.lo); | ||
856 | |||
857 | /* | ||
858 | * sum to total_bytes_received all | ||
859 | * unicast/multicast/broadcast | ||
860 | */ | ||
809 | qstats->total_bytes_received_hi = | 861 | qstats->total_bytes_received_hi = |
810 | le32_to_cpu(tclient->rcv_broadcast_bytes.hi); | 862 | qstats->total_broadcast_bytes_received_hi; |
811 | qstats->total_bytes_received_lo = | 863 | qstats->total_bytes_received_lo = |
812 | le32_to_cpu(tclient->rcv_broadcast_bytes.lo); | 864 | qstats->total_broadcast_bytes_received_lo; |
813 | 865 | ||
814 | ADD_64(qstats->total_bytes_received_hi, | 866 | ADD_64(qstats->total_bytes_received_hi, |
815 | le32_to_cpu(tclient->rcv_multicast_bytes.hi), | 867 | qstats->total_multicast_bytes_received_hi, |
816 | qstats->total_bytes_received_lo, | 868 | qstats->total_bytes_received_lo, |
817 | le32_to_cpu(tclient->rcv_multicast_bytes.lo)); | 869 | qstats->total_multicast_bytes_received_lo); |
818 | 870 | ||
819 | ADD_64(qstats->total_bytes_received_hi, | 871 | ADD_64(qstats->total_bytes_received_hi, |
820 | le32_to_cpu(tclient->rcv_unicast_bytes.hi), | 872 | qstats->total_unicast_bytes_received_hi, |
821 | qstats->total_bytes_received_lo, | ||
822 | le32_to_cpu(tclient->rcv_unicast_bytes.lo)); | ||
823 | |||
824 | SUB_64(qstats->total_bytes_received_hi, | ||
825 | le32_to_cpu(uclient->bcast_no_buff_bytes.hi), | ||
826 | qstats->total_bytes_received_lo, | 873 | qstats->total_bytes_received_lo, |
827 | le32_to_cpu(uclient->bcast_no_buff_bytes.lo)); | 874 | qstats->total_unicast_bytes_received_lo); |
828 | |||
829 | SUB_64(qstats->total_bytes_received_hi, | ||
830 | le32_to_cpu(uclient->mcast_no_buff_bytes.hi), | ||
831 | qstats->total_bytes_received_lo, | ||
832 | le32_to_cpu(uclient->mcast_no_buff_bytes.lo)); | ||
833 | |||
834 | SUB_64(qstats->total_bytes_received_hi, | ||
835 | le32_to_cpu(uclient->ucast_no_buff_bytes.hi), | ||
836 | qstats->total_bytes_received_lo, | ||
837 | le32_to_cpu(uclient->ucast_no_buff_bytes.lo)); | ||
838 | 875 | ||
839 | qstats->valid_bytes_received_hi = | 876 | qstats->valid_bytes_received_hi = |
840 | qstats->total_bytes_received_hi; | 877 | qstats->total_bytes_received_hi; |
841 | qstats->valid_bytes_received_lo = | 878 | qstats->valid_bytes_received_lo = |
842 | qstats->total_bytes_received_lo; | 879 | qstats->total_bytes_received_lo; |
843 | 880 | ||
844 | qstats->error_bytes_received_hi = | ||
845 | le32_to_cpu(tclient->rcv_error_bytes.hi); | ||
846 | qstats->error_bytes_received_lo = | ||
847 | le32_to_cpu(tclient->rcv_error_bytes.lo); | ||
848 | |||
849 | ADD_64(qstats->total_bytes_received_hi, | ||
850 | qstats->error_bytes_received_hi, | ||
851 | qstats->total_bytes_received_lo, | ||
852 | qstats->error_bytes_received_lo); | ||
853 | 881 | ||
854 | UPDATE_EXTEND_TSTAT(rcv_unicast_pkts, | 882 | UPDATE_EXTEND_TSTAT(rcv_ucast_pkts, |
855 | total_unicast_packets_received); | 883 | total_unicast_packets_received); |
856 | UPDATE_EXTEND_TSTAT(rcv_multicast_pkts, | 884 | UPDATE_EXTEND_TSTAT(rcv_mcast_pkts, |
857 | total_multicast_packets_received); | 885 | total_multicast_packets_received); |
858 | UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts, | 886 | UPDATE_EXTEND_TSTAT(rcv_bcast_pkts, |
859 | total_broadcast_packets_received); | 887 | total_broadcast_packets_received); |
860 | UPDATE_EXTEND_TSTAT(packets_too_big_discard, | 888 | UPDATE_EXTEND_TSTAT(pkts_too_big_discard, |
861 | etherstatsoverrsizepkts); | 889 | etherstatsoverrsizepkts); |
862 | UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard); | 890 | UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard); |
863 | 891 | ||
@@ -871,30 +899,78 @@ static int bnx2x_storm_stats_update(struct bnx2x *bp) | |||
871 | UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard); | 899 | UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard); |
872 | UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard); | 900 | UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard); |
873 | 901 | ||
902 | qstats->total_broadcast_bytes_transmitted_hi = | ||
903 | le32_to_cpu(xclient->bcast_bytes_sent.hi); | ||
904 | qstats->total_broadcast_bytes_transmitted_lo = | ||
905 | le32_to_cpu(xclient->bcast_bytes_sent.lo); | ||
906 | |||
907 | qstats->total_multicast_bytes_transmitted_hi = | ||
908 | le32_to_cpu(xclient->mcast_bytes_sent.hi); | ||
909 | qstats->total_multicast_bytes_transmitted_lo = | ||
910 | le32_to_cpu(xclient->mcast_bytes_sent.lo); | ||
911 | |||
912 | qstats->total_unicast_bytes_transmitted_hi = | ||
913 | le32_to_cpu(xclient->ucast_bytes_sent.hi); | ||
914 | qstats->total_unicast_bytes_transmitted_lo = | ||
915 | le32_to_cpu(xclient->ucast_bytes_sent.lo); | ||
916 | /* | ||
917 | * sum to total_bytes_transmitted all | ||
918 | * unicast/multicast/broadcast | ||
919 | */ | ||
874 | qstats->total_bytes_transmitted_hi = | 920 | qstats->total_bytes_transmitted_hi = |
875 | le32_to_cpu(xclient->unicast_bytes_sent.hi); | 921 | qstats->total_unicast_bytes_transmitted_hi; |
876 | qstats->total_bytes_transmitted_lo = | 922 | qstats->total_bytes_transmitted_lo = |
877 | le32_to_cpu(xclient->unicast_bytes_sent.lo); | 923 | qstats->total_unicast_bytes_transmitted_lo; |
878 | 924 | ||
879 | ADD_64(qstats->total_bytes_transmitted_hi, | 925 | ADD_64(qstats->total_bytes_transmitted_hi, |
880 | le32_to_cpu(xclient->multicast_bytes_sent.hi), | 926 | qstats->total_broadcast_bytes_transmitted_hi, |
881 | qstats->total_bytes_transmitted_lo, | 927 | qstats->total_bytes_transmitted_lo, |
882 | le32_to_cpu(xclient->multicast_bytes_sent.lo)); | 928 | qstats->total_broadcast_bytes_transmitted_lo); |
883 | 929 | ||
884 | ADD_64(qstats->total_bytes_transmitted_hi, | 930 | ADD_64(qstats->total_bytes_transmitted_hi, |
885 | le32_to_cpu(xclient->broadcast_bytes_sent.hi), | 931 | qstats->total_multicast_bytes_transmitted_hi, |
886 | qstats->total_bytes_transmitted_lo, | 932 | qstats->total_bytes_transmitted_lo, |
887 | le32_to_cpu(xclient->broadcast_bytes_sent.lo)); | 933 | qstats->total_multicast_bytes_transmitted_lo); |
888 | 934 | ||
889 | UPDATE_EXTEND_XSTAT(unicast_pkts_sent, | 935 | UPDATE_EXTEND_XSTAT(ucast_pkts_sent, |
890 | total_unicast_packets_transmitted); | 936 | total_unicast_packets_transmitted); |
891 | UPDATE_EXTEND_XSTAT(multicast_pkts_sent, | 937 | UPDATE_EXTEND_XSTAT(mcast_pkts_sent, |
892 | total_multicast_packets_transmitted); | 938 | total_multicast_packets_transmitted); |
893 | UPDATE_EXTEND_XSTAT(broadcast_pkts_sent, | 939 | UPDATE_EXTEND_XSTAT(bcast_pkts_sent, |
894 | total_broadcast_packets_transmitted); | 940 | total_broadcast_packets_transmitted); |
895 | 941 | ||
896 | old_tclient->checksum_discard = tclient->checksum_discard; | 942 | UPDATE_EXTEND_TSTAT(checksum_discard, |
897 | old_tclient->ttl0_discard = tclient->ttl0_discard; | 943 | total_packets_received_checksum_discarded); |
944 | UPDATE_EXTEND_TSTAT(ttl0_discard, | ||
945 | total_packets_received_ttl0_discarded); | ||
946 | |||
947 | UPDATE_EXTEND_XSTAT(error_drop_pkts, | ||
948 | total_transmitted_dropped_packets_error); | ||
949 | |||
950 | /* TPA aggregations completed */ | ||
951 | UPDATE_EXTEND_USTAT(coalesced_events, total_tpa_aggregations); | ||
952 | /* Number of network frames aggregated by TPA */ | ||
953 | UPDATE_EXTEND_USTAT(coalesced_pkts, | ||
954 | total_tpa_aggregated_frames); | ||
955 | /* Total number of bytes in completed TPA aggregations */ | ||
956 | qstats->total_tpa_bytes_lo = | ||
957 | le32_to_cpu(uclient->coalesced_bytes.lo); | ||
958 | qstats->total_tpa_bytes_hi = | ||
959 | le32_to_cpu(uclient->coalesced_bytes.hi); | ||
960 | |||
961 | /* TPA stats per-function */ | ||
962 | ADD_64(estats->total_tpa_aggregations_hi, | ||
963 | qstats->total_tpa_aggregations_hi, | ||
964 | estats->total_tpa_aggregations_lo, | ||
965 | qstats->total_tpa_aggregations_lo); | ||
966 | ADD_64(estats->total_tpa_aggregated_frames_hi, | ||
967 | qstats->total_tpa_aggregated_frames_hi, | ||
968 | estats->total_tpa_aggregated_frames_lo, | ||
969 | qstats->total_tpa_aggregated_frames_lo); | ||
970 | ADD_64(estats->total_tpa_bytes_hi, | ||
971 | qstats->total_tpa_bytes_hi, | ||
972 | estats->total_tpa_bytes_lo, | ||
973 | qstats->total_tpa_bytes_lo); | ||
898 | 974 | ||
899 | ADD_64(fstats->total_bytes_received_hi, | 975 | ADD_64(fstats->total_bytes_received_hi, |
900 | qstats->total_bytes_received_hi, | 976 | qstats->total_bytes_received_hi, |
@@ -933,10 +1009,6 @@ static int bnx2x_storm_stats_update(struct bnx2x *bp) | |||
933 | fstats->valid_bytes_received_lo, | 1009 | fstats->valid_bytes_received_lo, |
934 | qstats->valid_bytes_received_lo); | 1010 | qstats->valid_bytes_received_lo); |
935 | 1011 | ||
936 | ADD_64(estats->error_bytes_received_hi, | ||
937 | qstats->error_bytes_received_hi, | ||
938 | estats->error_bytes_received_lo, | ||
939 | qstats->error_bytes_received_lo); | ||
940 | ADD_64(estats->etherstatsoverrsizepkts_hi, | 1012 | ADD_64(estats->etherstatsoverrsizepkts_hi, |
941 | qstats->etherstatsoverrsizepkts_hi, | 1013 | qstats->etherstatsoverrsizepkts_hi, |
942 | estats->etherstatsoverrsizepkts_lo, | 1014 | estats->etherstatsoverrsizepkts_lo, |
@@ -950,9 +1022,19 @@ static int bnx2x_storm_stats_update(struct bnx2x *bp) | |||
950 | fstats->total_bytes_received_lo, | 1022 | fstats->total_bytes_received_lo, |
951 | estats->rx_stat_ifhcinbadoctets_lo); | 1023 | estats->rx_stat_ifhcinbadoctets_lo); |
952 | 1024 | ||
1025 | ADD_64(fstats->total_bytes_received_hi, | ||
1026 | tfunc->rcv_error_bytes.hi, | ||
1027 | fstats->total_bytes_received_lo, | ||
1028 | tfunc->rcv_error_bytes.lo); | ||
1029 | |||
953 | memcpy(estats, &(fstats->total_bytes_received_hi), | 1030 | memcpy(estats, &(fstats->total_bytes_received_hi), |
954 | sizeof(struct host_func_stats) - 2*sizeof(u32)); | 1031 | sizeof(struct host_func_stats) - 2*sizeof(u32)); |
955 | 1032 | ||
1033 | ADD_64(estats->error_bytes_received_hi, | ||
1034 | tfunc->rcv_error_bytes.hi, | ||
1035 | estats->error_bytes_received_lo, | ||
1036 | tfunc->rcv_error_bytes.lo); | ||
1037 | |||
956 | ADD_64(estats->etherstatsoverrsizepkts_hi, | 1038 | ADD_64(estats->etherstatsoverrsizepkts_hi, |
957 | estats->rx_stat_dot3statsframestoolong_hi, | 1039 | estats->rx_stat_dot3statsframestoolong_hi, |
958 | estats->etherstatsoverrsizepkts_lo, | 1040 | estats->etherstatsoverrsizepkts_lo, |
@@ -965,8 +1047,8 @@ static int bnx2x_storm_stats_update(struct bnx2x *bp) | |||
965 | if (bp->port.pmf) { | 1047 | if (bp->port.pmf) { |
966 | estats->mac_filter_discard = | 1048 | estats->mac_filter_discard = |
967 | le32_to_cpu(tport->mac_filter_discard); | 1049 | le32_to_cpu(tport->mac_filter_discard); |
968 | estats->xxoverflow_discard = | 1050 | estats->mf_tag_discard = |
969 | le32_to_cpu(tport->xxoverflow_discard); | 1051 | le32_to_cpu(tport->mf_tag_discard); |
970 | estats->brb_truncate_discard = | 1052 | estats->brb_truncate_discard = |
971 | le32_to_cpu(tport->brb_truncate_discard); | 1053 | le32_to_cpu(tport->brb_truncate_discard); |
972 | estats->mac_discard = le32_to_cpu(tport->mac_discard); | 1054 | estats->mac_discard = le32_to_cpu(tport->mac_discard); |
@@ -1023,7 +1105,7 @@ static void bnx2x_net_stats_update(struct bnx2x *bp) | |||
1023 | nstats->rx_frame_errors = | 1105 | nstats->rx_frame_errors = |
1024 | bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi); | 1106 | bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi); |
1025 | nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi); | 1107 | nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi); |
1026 | nstats->rx_missed_errors = estats->xxoverflow_discard; | 1108 | nstats->rx_missed_errors = 0; |
1027 | 1109 | ||
1028 | nstats->rx_errors = nstats->rx_length_errors + | 1110 | nstats->rx_errors = nstats->rx_length_errors + |
1029 | nstats->rx_over_errors + | 1111 | nstats->rx_over_errors + |
@@ -1065,10 +1147,27 @@ static void bnx2x_drv_stats_update(struct bnx2x *bp) | |||
1065 | } | 1147 | } |
1066 | } | 1148 | } |
1067 | 1149 | ||
1150 | static bool bnx2x_edebug_stats_stopped(struct bnx2x *bp) | ||
1151 | { | ||
1152 | u32 val; | ||
1153 | |||
1154 | if (SHMEM2_HAS(bp, edebug_driver_if[1])) { | ||
1155 | val = SHMEM2_RD(bp, edebug_driver_if[1]); | ||
1156 | |||
1157 | if (val == EDEBUG_DRIVER_IF_OP_CODE_DISABLE_STAT) | ||
1158 | return true; | ||
1159 | } | ||
1160 | |||
1161 | return false; | ||
1162 | } | ||
1163 | |||
1068 | static void bnx2x_stats_update(struct bnx2x *bp) | 1164 | static void bnx2x_stats_update(struct bnx2x *bp) |
1069 | { | 1165 | { |
1070 | u32 *stats_comp = bnx2x_sp(bp, stats_comp); | 1166 | u32 *stats_comp = bnx2x_sp(bp, stats_comp); |
1071 | 1167 | ||
1168 | if (bnx2x_edebug_stats_stopped(bp)) | ||
1169 | return; | ||
1170 | |||
1072 | if (*stats_comp != DMAE_COMP_VAL) | 1171 | if (*stats_comp != DMAE_COMP_VAL) |
1073 | return; | 1172 | return; |
1074 | 1173 | ||
@@ -1088,8 +1187,7 @@ static void bnx2x_stats_update(struct bnx2x *bp) | |||
1088 | struct bnx2x_eth_stats *estats = &bp->eth_stats; | 1187 | struct bnx2x_eth_stats *estats = &bp->eth_stats; |
1089 | int i; | 1188 | int i; |
1090 | 1189 | ||
1091 | printk(KERN_DEBUG "%s: brb drops %u brb truncate %u\n", | 1190 | netdev_dbg(bp->dev, "brb drops %u brb truncate %u\n", |
1092 | bp->dev->name, | ||
1093 | estats->brb_drop_lo, estats->brb_truncate_lo); | 1191 | estats->brb_drop_lo, estats->brb_truncate_lo); |
1094 | 1192 | ||
1095 | for_each_eth_queue(bp, i) { | 1193 | for_each_eth_queue(bp, i) { |
@@ -1149,6 +1247,7 @@ static void bnx2x_port_stats_stop(struct bnx2x *bp) | |||
1149 | else | 1247 | else |
1150 | dmae->opcode = bnx2x_dmae_opcode_add_comp( | 1248 | dmae->opcode = bnx2x_dmae_opcode_add_comp( |
1151 | opcode, DMAE_COMP_PCI); | 1249 | opcode, DMAE_COMP_PCI); |
1250 | |||
1152 | dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats)); | 1251 | dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats)); |
1153 | dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats)); | 1252 | dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats)); |
1154 | dmae->dst_addr_lo = bp->port.port_stx >> 2; | 1253 | dmae->dst_addr_lo = bp->port.port_stx >> 2; |
@@ -1235,13 +1334,9 @@ static const struct { | |||
1235 | void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event) | 1334 | void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event) |
1236 | { | 1335 | { |
1237 | enum bnx2x_stats_state state; | 1336 | enum bnx2x_stats_state state; |
1238 | |||
1239 | if (unlikely(bp->panic)) | 1337 | if (unlikely(bp->panic)) |
1240 | return; | 1338 | return; |
1241 | |||
1242 | bnx2x_stats_stm[bp->stats_state][event].action(bp); | 1339 | bnx2x_stats_stm[bp->stats_state][event].action(bp); |
1243 | |||
1244 | /* Protect a state change flow */ | ||
1245 | spin_lock_bh(&bp->stats_lock); | 1340 | spin_lock_bh(&bp->stats_lock); |
1246 | state = bp->stats_state; | 1341 | state = bp->stats_state; |
1247 | bp->stats_state = bnx2x_stats_stm[state][event].next_state; | 1342 | bp->stats_state = bnx2x_stats_stm[state][event].next_state; |
@@ -1297,7 +1392,7 @@ static void bnx2x_func_stats_base_init(struct bnx2x *bp) | |||
1297 | func_stx = bp->func_stx; | 1392 | func_stx = bp->func_stx; |
1298 | 1393 | ||
1299 | for (vn = VN_0; vn < vn_max; vn++) { | 1394 | for (vn = VN_0; vn < vn_max; vn++) { |
1300 | int mb_idx = !CHIP_IS_E2(bp) ? 2*vn + BP_PORT(bp) : vn; | 1395 | int mb_idx = CHIP_IS_E1x(bp) ? 2*vn + BP_PORT(bp) : vn; |
1301 | 1396 | ||
1302 | bp->func_stx = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_param); | 1397 | bp->func_stx = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_param); |
1303 | bnx2x_func_stats_init(bp); | 1398 | bnx2x_func_stats_init(bp); |
@@ -1339,12 +1434,97 @@ static void bnx2x_func_stats_base_update(struct bnx2x *bp) | |||
1339 | bnx2x_stats_comp(bp); | 1434 | bnx2x_stats_comp(bp); |
1340 | } | 1435 | } |
1341 | 1436 | ||
1437 | /** | ||
1438 | * This function will prepare the statistics ramrod data the way | ||
1439 | * we will only have to increment the statistics counter and | ||
1440 | * send the ramrod each time we have to. | ||
1441 | * | ||
1442 | * @param bp | ||
1443 | */ | ||
1444 | static inline void bnx2x_prep_fw_stats_req(struct bnx2x *bp) | ||
1445 | { | ||
1446 | int i; | ||
1447 | struct stats_query_header *stats_hdr = &bp->fw_stats_req->hdr; | ||
1448 | |||
1449 | dma_addr_t cur_data_offset; | ||
1450 | struct stats_query_entry *cur_query_entry; | ||
1451 | |||
1452 | stats_hdr->cmd_num = bp->fw_stats_num; | ||
1453 | stats_hdr->drv_stats_counter = 0; | ||
1454 | |||
1455 | /* storm_counters struct contains the counters of completed | ||
1456 | * statistics requests per storm which are incremented by FW | ||
1457 | * each time it completes hadning a statistics ramrod. We will | ||
1458 | * check these counters in the timer handler and discard a | ||
1459 | * (statistics) ramrod completion. | ||
1460 | */ | ||
1461 | cur_data_offset = bp->fw_stats_data_mapping + | ||
1462 | offsetof(struct bnx2x_fw_stats_data, storm_counters); | ||
1463 | |||
1464 | stats_hdr->stats_counters_addrs.hi = | ||
1465 | cpu_to_le32(U64_HI(cur_data_offset)); | ||
1466 | stats_hdr->stats_counters_addrs.lo = | ||
1467 | cpu_to_le32(U64_LO(cur_data_offset)); | ||
1468 | |||
1469 | /* prepare to the first stats ramrod (will be completed with | ||
1470 | * the counters equal to zero) - init counters to somethig different. | ||
1471 | */ | ||
1472 | memset(&bp->fw_stats_data->storm_counters, 0xff, | ||
1473 | sizeof(struct stats_counter)); | ||
1474 | |||
1475 | /**** Port FW statistics data ****/ | ||
1476 | cur_data_offset = bp->fw_stats_data_mapping + | ||
1477 | offsetof(struct bnx2x_fw_stats_data, port); | ||
1478 | |||
1479 | cur_query_entry = &bp->fw_stats_req->query[BNX2X_PORT_QUERY_IDX]; | ||
1480 | |||
1481 | cur_query_entry->kind = STATS_TYPE_PORT; | ||
1482 | /* For port query index is a DONT CARE */ | ||
1483 | cur_query_entry->index = BP_PORT(bp); | ||
1484 | /* For port query funcID is a DONT CARE */ | ||
1485 | cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp)); | ||
1486 | cur_query_entry->address.hi = cpu_to_le32(U64_HI(cur_data_offset)); | ||
1487 | cur_query_entry->address.lo = cpu_to_le32(U64_LO(cur_data_offset)); | ||
1488 | |||
1489 | /**** PF FW statistics data ****/ | ||
1490 | cur_data_offset = bp->fw_stats_data_mapping + | ||
1491 | offsetof(struct bnx2x_fw_stats_data, pf); | ||
1492 | |||
1493 | cur_query_entry = &bp->fw_stats_req->query[BNX2X_PF_QUERY_IDX]; | ||
1494 | |||
1495 | cur_query_entry->kind = STATS_TYPE_PF; | ||
1496 | /* For PF query index is a DONT CARE */ | ||
1497 | cur_query_entry->index = BP_PORT(bp); | ||
1498 | cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp)); | ||
1499 | cur_query_entry->address.hi = cpu_to_le32(U64_HI(cur_data_offset)); | ||
1500 | cur_query_entry->address.lo = cpu_to_le32(U64_LO(cur_data_offset)); | ||
1501 | |||
1502 | /**** Clients' queries ****/ | ||
1503 | cur_data_offset = bp->fw_stats_data_mapping + | ||
1504 | offsetof(struct bnx2x_fw_stats_data, queue_stats); | ||
1505 | |||
1506 | for_each_eth_queue(bp, i) { | ||
1507 | cur_query_entry = | ||
1508 | &bp->fw_stats_req-> | ||
1509 | query[BNX2X_FIRST_QUEUE_QUERY_IDX + i]; | ||
1510 | |||
1511 | cur_query_entry->kind = STATS_TYPE_QUEUE; | ||
1512 | cur_query_entry->index = bnx2x_stats_id(&bp->fp[i]); | ||
1513 | cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp)); | ||
1514 | cur_query_entry->address.hi = | ||
1515 | cpu_to_le32(U64_HI(cur_data_offset)); | ||
1516 | cur_query_entry->address.lo = | ||
1517 | cpu_to_le32(U64_LO(cur_data_offset)); | ||
1518 | |||
1519 | cur_data_offset += sizeof(struct per_queue_stats); | ||
1520 | } | ||
1521 | } | ||
1522 | |||
1342 | void bnx2x_stats_init(struct bnx2x *bp) | 1523 | void bnx2x_stats_init(struct bnx2x *bp) |
1343 | { | 1524 | { |
1344 | int port = BP_PORT(bp); | 1525 | int /*abs*/port = BP_PORT(bp); |
1345 | int mb_idx = BP_FW_MB_IDX(bp); | 1526 | int mb_idx = BP_FW_MB_IDX(bp); |
1346 | int i; | 1527 | int i; |
1347 | struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats); | ||
1348 | 1528 | ||
1349 | bp->stats_pending = 0; | 1529 | bp->stats_pending = 0; |
1350 | bp->executer_idx = 0; | 1530 | bp->executer_idx = 0; |
@@ -1362,45 +1542,35 @@ void bnx2x_stats_init(struct bnx2x *bp) | |||
1362 | DP(BNX2X_MSG_STATS, "port_stx 0x%x func_stx 0x%x\n", | 1542 | DP(BNX2X_MSG_STATS, "port_stx 0x%x func_stx 0x%x\n", |
1363 | bp->port.port_stx, bp->func_stx); | 1543 | bp->port.port_stx, bp->func_stx); |
1364 | 1544 | ||
1545 | port = BP_PORT(bp); | ||
1365 | /* port stats */ | 1546 | /* port stats */ |
1366 | memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats)); | 1547 | memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats)); |
1367 | bp->port.old_nig_stats.brb_discard = | 1548 | bp->port.old_nig_stats.brb_discard = |
1368 | REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38); | 1549 | REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38); |
1369 | bp->port.old_nig_stats.brb_truncate = | 1550 | bp->port.old_nig_stats.brb_truncate = |
1370 | REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38); | 1551 | REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38); |
1371 | REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50, | 1552 | if (!CHIP_IS_E3(bp)) { |
1372 | &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2); | 1553 | REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50, |
1373 | REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50, | 1554 | &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2); |
1374 | &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2); | 1555 | REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50, |
1556 | &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2); | ||
1557 | } | ||
1375 | 1558 | ||
1376 | /* function stats */ | 1559 | /* function stats */ |
1377 | for_each_queue(bp, i) { | 1560 | for_each_queue(bp, i) { |
1378 | struct bnx2x_fastpath *fp = &bp->fp[i]; | 1561 | struct bnx2x_fastpath *fp = &bp->fp[i]; |
1379 | 1562 | ||
1380 | memset(&fp->old_tclient, 0, | 1563 | memset(&fp->old_tclient, 0, sizeof(fp->old_tclient)); |
1381 | sizeof(struct tstorm_per_client_stats)); | 1564 | memset(&fp->old_uclient, 0, sizeof(fp->old_uclient)); |
1382 | memset(&fp->old_uclient, 0, | 1565 | memset(&fp->old_xclient, 0, sizeof(fp->old_xclient)); |
1383 | sizeof(struct ustorm_per_client_stats)); | 1566 | memset(&fp->eth_q_stats, 0, sizeof(fp->eth_q_stats)); |
1384 | memset(&fp->old_xclient, 0, | ||
1385 | sizeof(struct xstorm_per_client_stats)); | ||
1386 | memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats)); | ||
1387 | } | 1567 | } |
1388 | 1568 | ||
1389 | /* FW stats are currently collected for ETH clients only */ | 1569 | /* Prepare statistics ramrod data */ |
1390 | for_each_eth_queue(bp, i) { | 1570 | bnx2x_prep_fw_stats_req(bp); |
1391 | /* Set initial stats counter in the stats ramrod data to -1 */ | ||
1392 | int cl_id = bp->fp[i].cl_id; | ||
1393 | |||
1394 | stats->xstorm_common.client_statistics[cl_id]. | ||
1395 | stats_counter = 0xffff; | ||
1396 | stats->ustorm_common.client_statistics[cl_id]. | ||
1397 | stats_counter = 0xffff; | ||
1398 | stats->tstorm_common.client_statistics[cl_id]. | ||
1399 | stats_counter = 0xffff; | ||
1400 | } | ||
1401 | 1571 | ||
1402 | memset(&bp->dev->stats, 0, sizeof(struct net_device_stats)); | 1572 | memset(&bp->dev->stats, 0, sizeof(bp->dev->stats)); |
1403 | memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats)); | 1573 | memset(&bp->eth_stats, 0, sizeof(bp->eth_stats)); |
1404 | 1574 | ||
1405 | bp->stats_state = STATS_STATE_DISABLED; | 1575 | bp->stats_state = STATS_STATE_DISABLED; |
1406 | 1576 | ||
diff --git a/drivers/net/bnx2x/bnx2x_stats.h b/drivers/net/bnx2x/bnx2x_stats.h index 45d14d8bc1aa..5d8ce2f6afef 100644 --- a/drivers/net/bnx2x/bnx2x_stats.h +++ b/drivers/net/bnx2x/bnx2x_stats.h | |||
@@ -14,48 +14,11 @@ | |||
14 | * Statistics and Link management by Yitchak Gertner | 14 | * Statistics and Link management by Yitchak Gertner |
15 | * | 15 | * |
16 | */ | 16 | */ |
17 | |||
18 | #ifndef BNX2X_STATS_H | 17 | #ifndef BNX2X_STATS_H |
19 | #define BNX2X_STATS_H | 18 | #define BNX2X_STATS_H |
20 | 19 | ||
21 | #include <linux/types.h> | 20 | #include <linux/types.h> |
22 | 21 | ||
23 | struct bnx2x_eth_q_stats { | ||
24 | u32 total_bytes_received_hi; | ||
25 | u32 total_bytes_received_lo; | ||
26 | u32 total_bytes_transmitted_hi; | ||
27 | u32 total_bytes_transmitted_lo; | ||
28 | u32 total_unicast_packets_received_hi; | ||
29 | u32 total_unicast_packets_received_lo; | ||
30 | u32 total_multicast_packets_received_hi; | ||
31 | u32 total_multicast_packets_received_lo; | ||
32 | u32 total_broadcast_packets_received_hi; | ||
33 | u32 total_broadcast_packets_received_lo; | ||
34 | u32 total_unicast_packets_transmitted_hi; | ||
35 | u32 total_unicast_packets_transmitted_lo; | ||
36 | u32 total_multicast_packets_transmitted_hi; | ||
37 | u32 total_multicast_packets_transmitted_lo; | ||
38 | u32 total_broadcast_packets_transmitted_hi; | ||
39 | u32 total_broadcast_packets_transmitted_lo; | ||
40 | u32 valid_bytes_received_hi; | ||
41 | u32 valid_bytes_received_lo; | ||
42 | |||
43 | u32 error_bytes_received_hi; | ||
44 | u32 error_bytes_received_lo; | ||
45 | u32 etherstatsoverrsizepkts_hi; | ||
46 | u32 etherstatsoverrsizepkts_lo; | ||
47 | u32 no_buff_discard_hi; | ||
48 | u32 no_buff_discard_lo; | ||
49 | |||
50 | u32 driver_xoff; | ||
51 | u32 rx_err_discard_pkt; | ||
52 | u32 rx_skb_alloc_failed; | ||
53 | u32 hw_csum_err; | ||
54 | }; | ||
55 | |||
56 | #define Q_STATS_OFFSET32(stat_name) \ | ||
57 | (offsetof(struct bnx2x_eth_q_stats, stat_name) / 4) | ||
58 | |||
59 | struct nig_stats { | 22 | struct nig_stats { |
60 | u32 brb_discard; | 23 | u32 brb_discard; |
61 | u32 brb_packet; | 24 | u32 brb_packet; |
@@ -212,7 +175,7 @@ struct bnx2x_eth_stats { | |||
212 | u32 brb_truncate_lo; | 175 | u32 brb_truncate_lo; |
213 | 176 | ||
214 | u32 mac_filter_discard; | 177 | u32 mac_filter_discard; |
215 | u32 xxoverflow_discard; | 178 | u32 mf_tag_discard; |
216 | u32 brb_truncate_discard; | 179 | u32 brb_truncate_discard; |
217 | u32 mac_discard; | 180 | u32 mac_discard; |
218 | 181 | ||
@@ -222,16 +185,197 @@ struct bnx2x_eth_stats { | |||
222 | u32 hw_csum_err; | 185 | u32 hw_csum_err; |
223 | 186 | ||
224 | u32 nig_timer_max; | 187 | u32 nig_timer_max; |
188 | |||
189 | /* TPA */ | ||
190 | u32 total_tpa_aggregations_hi; | ||
191 | u32 total_tpa_aggregations_lo; | ||
192 | u32 total_tpa_aggregated_frames_hi; | ||
193 | u32 total_tpa_aggregated_frames_lo; | ||
194 | u32 total_tpa_bytes_hi; | ||
195 | u32 total_tpa_bytes_lo; | ||
196 | }; | ||
197 | |||
198 | |||
199 | struct bnx2x_eth_q_stats { | ||
200 | u32 total_unicast_bytes_received_hi; | ||
201 | u32 total_unicast_bytes_received_lo; | ||
202 | u32 total_broadcast_bytes_received_hi; | ||
203 | u32 total_broadcast_bytes_received_lo; | ||
204 | u32 total_multicast_bytes_received_hi; | ||
205 | u32 total_multicast_bytes_received_lo; | ||
206 | u32 total_bytes_received_hi; | ||
207 | u32 total_bytes_received_lo; | ||
208 | u32 total_unicast_bytes_transmitted_hi; | ||
209 | u32 total_unicast_bytes_transmitted_lo; | ||
210 | u32 total_broadcast_bytes_transmitted_hi; | ||
211 | u32 total_broadcast_bytes_transmitted_lo; | ||
212 | u32 total_multicast_bytes_transmitted_hi; | ||
213 | u32 total_multicast_bytes_transmitted_lo; | ||
214 | u32 total_bytes_transmitted_hi; | ||
215 | u32 total_bytes_transmitted_lo; | ||
216 | u32 total_unicast_packets_received_hi; | ||
217 | u32 total_unicast_packets_received_lo; | ||
218 | u32 total_multicast_packets_received_hi; | ||
219 | u32 total_multicast_packets_received_lo; | ||
220 | u32 total_broadcast_packets_received_hi; | ||
221 | u32 total_broadcast_packets_received_lo; | ||
222 | u32 total_unicast_packets_transmitted_hi; | ||
223 | u32 total_unicast_packets_transmitted_lo; | ||
224 | u32 total_multicast_packets_transmitted_hi; | ||
225 | u32 total_multicast_packets_transmitted_lo; | ||
226 | u32 total_broadcast_packets_transmitted_hi; | ||
227 | u32 total_broadcast_packets_transmitted_lo; | ||
228 | u32 valid_bytes_received_hi; | ||
229 | u32 valid_bytes_received_lo; | ||
230 | |||
231 | u32 etherstatsoverrsizepkts_hi; | ||
232 | u32 etherstatsoverrsizepkts_lo; | ||
233 | u32 no_buff_discard_hi; | ||
234 | u32 no_buff_discard_lo; | ||
235 | |||
236 | u32 driver_xoff; | ||
237 | u32 rx_err_discard_pkt; | ||
238 | u32 rx_skb_alloc_failed; | ||
239 | u32 hw_csum_err; | ||
240 | |||
241 | u32 total_packets_received_checksum_discarded_hi; | ||
242 | u32 total_packets_received_checksum_discarded_lo; | ||
243 | u32 total_packets_received_ttl0_discarded_hi; | ||
244 | u32 total_packets_received_ttl0_discarded_lo; | ||
245 | u32 total_transmitted_dropped_packets_error_hi; | ||
246 | u32 total_transmitted_dropped_packets_error_lo; | ||
247 | |||
248 | /* TPA */ | ||
249 | u32 total_tpa_aggregations_hi; | ||
250 | u32 total_tpa_aggregations_lo; | ||
251 | u32 total_tpa_aggregated_frames_hi; | ||
252 | u32 total_tpa_aggregated_frames_lo; | ||
253 | u32 total_tpa_bytes_hi; | ||
254 | u32 total_tpa_bytes_lo; | ||
225 | }; | 255 | }; |
226 | 256 | ||
227 | #define STATS_OFFSET32(stat_name) \ | 257 | /**************************************************************************** |
228 | (offsetof(struct bnx2x_eth_stats, stat_name) / 4) | 258 | * Macros |
259 | ****************************************************************************/ | ||
260 | |||
261 | /* sum[hi:lo] += add[hi:lo] */ | ||
262 | #define ADD_64(s_hi, a_hi, s_lo, a_lo) \ | ||
263 | do { \ | ||
264 | s_lo += a_lo; \ | ||
265 | s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \ | ||
266 | } while (0) | ||
267 | |||
268 | /* difference = minuend - subtrahend */ | ||
269 | #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \ | ||
270 | do { \ | ||
271 | if (m_lo < s_lo) { \ | ||
272 | /* underflow */ \ | ||
273 | d_hi = m_hi - s_hi; \ | ||
274 | if (d_hi > 0) { \ | ||
275 | /* we can 'loan' 1 */ \ | ||
276 | d_hi--; \ | ||
277 | d_lo = m_lo + (UINT_MAX - s_lo) + 1; \ | ||
278 | } else { \ | ||
279 | /* m_hi <= s_hi */ \ | ||
280 | d_hi = 0; \ | ||
281 | d_lo = 0; \ | ||
282 | } \ | ||
283 | } else { \ | ||
284 | /* m_lo >= s_lo */ \ | ||
285 | if (m_hi < s_hi) { \ | ||
286 | d_hi = 0; \ | ||
287 | d_lo = 0; \ | ||
288 | } else { \ | ||
289 | /* m_hi >= s_hi */ \ | ||
290 | d_hi = m_hi - s_hi; \ | ||
291 | d_lo = m_lo - s_lo; \ | ||
292 | } \ | ||
293 | } \ | ||
294 | } while (0) | ||
295 | |||
296 | #define UPDATE_STAT64(s, t) \ | ||
297 | do { \ | ||
298 | DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \ | ||
299 | diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \ | ||
300 | pstats->mac_stx[0].t##_hi = new->s##_hi; \ | ||
301 | pstats->mac_stx[0].t##_lo = new->s##_lo; \ | ||
302 | ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \ | ||
303 | pstats->mac_stx[1].t##_lo, diff.lo); \ | ||
304 | } while (0) | ||
305 | |||
306 | #define UPDATE_STAT64_NIG(s, t) \ | ||
307 | do { \ | ||
308 | DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \ | ||
309 | diff.lo, new->s##_lo, old->s##_lo); \ | ||
310 | ADD_64(estats->t##_hi, diff.hi, \ | ||
311 | estats->t##_lo, diff.lo); \ | ||
312 | } while (0) | ||
313 | |||
314 | /* sum[hi:lo] += add */ | ||
315 | #define ADD_EXTEND_64(s_hi, s_lo, a) \ | ||
316 | do { \ | ||
317 | s_lo += a; \ | ||
318 | s_hi += (s_lo < a) ? 1 : 0; \ | ||
319 | } while (0) | ||
320 | |||
321 | #define ADD_STAT64(diff, t) \ | ||
322 | do { \ | ||
323 | ADD_64(pstats->mac_stx[1].t##_hi, new->diff##_hi, \ | ||
324 | pstats->mac_stx[1].t##_lo, new->diff##_lo); \ | ||
325 | } while (0) | ||
326 | |||
327 | #define UPDATE_EXTEND_STAT(s) \ | ||
328 | do { \ | ||
329 | ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \ | ||
330 | pstats->mac_stx[1].s##_lo, \ | ||
331 | new->s); \ | ||
332 | } while (0) | ||
333 | |||
334 | #define UPDATE_EXTEND_TSTAT(s, t) \ | ||
335 | do { \ | ||
336 | diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \ | ||
337 | old_tclient->s = tclient->s; \ | ||
338 | ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \ | ||
339 | } while (0) | ||
340 | |||
341 | #define UPDATE_EXTEND_USTAT(s, t) \ | ||
342 | do { \ | ||
343 | diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \ | ||
344 | old_uclient->s = uclient->s; \ | ||
345 | ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \ | ||
346 | } while (0) | ||
347 | |||
348 | #define UPDATE_EXTEND_XSTAT(s, t) \ | ||
349 | do { \ | ||
350 | diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \ | ||
351 | old_xclient->s = xclient->s; \ | ||
352 | ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \ | ||
353 | } while (0) | ||
354 | |||
355 | /* minuend -= subtrahend */ | ||
356 | #define SUB_64(m_hi, s_hi, m_lo, s_lo) \ | ||
357 | do { \ | ||
358 | DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \ | ||
359 | } while (0) | ||
360 | |||
361 | /* minuend[hi:lo] -= subtrahend */ | ||
362 | #define SUB_EXTEND_64(m_hi, m_lo, s) \ | ||
363 | do { \ | ||
364 | SUB_64(m_hi, 0, m_lo, s); \ | ||
365 | } while (0) | ||
366 | |||
367 | #define SUB_EXTEND_USTAT(s, t) \ | ||
368 | do { \ | ||
369 | diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \ | ||
370 | SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \ | ||
371 | } while (0) | ||
372 | |||
229 | 373 | ||
230 | /* Forward declaration */ | 374 | /* forward */ |
231 | struct bnx2x; | 375 | struct bnx2x; |
232 | 376 | ||
233 | void bnx2x_stats_init(struct bnx2x *bp); | 377 | void bnx2x_stats_init(struct bnx2x *bp); |
234 | 378 | ||
235 | extern const u32 dmae_reg_go_c[]; | 379 | void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event); |
236 | 380 | ||
237 | #endif /* BNX2X_STATS_H */ | 381 | #endif /* BNX2X_STATS_H */ |