diff options
author | Vlad Zolotarov <vladz@broadcom.com> | 2011-06-14 07:33:44 -0400 |
---|---|---|
committer | David S. Miller <davem@conan.davemloft.net> | 2011-06-15 10:56:37 -0400 |
commit | 619c5cb6885b936c44ae1422ef805b69c6291485 (patch) | |
tree | 4604ae08f1eb12c6ad1f65106879c2e73946ae12 /drivers | |
parent | 042181f5aa8833a8918e1a91cfaf292146ffc62c (diff) |
New 7.0 FW: bnx2x, cnic, bnx2i, bnx2fc
New FW/HSI (7.0):
- Added support to 578xx chips
- Improved HSI - much less driver's direct access to the FW internal
memory needed.
New implementation of the HSI handling layer in the bnx2x (bnx2x_sp.c):
- Introduced chip dependent objects that have chip independent interfaces
for configuration of MACs, multicast addresses, Rx mode, indirection table,
fast path queues and function initialization/cleanup.
- Objects functionality is based on the private function pointers, which
allows not only a per-chip but also PF/VF differentiation while still
preserving the same interface towards the driver.
- Objects interface is not influenced by the HSI changes which do not require
providing new parameters keeping the code outside the bnx2x_sp.c invariant
with regard to such HSI chnages.
Changes in a CNIC, bnx2fc and bnx2i modules due to the new HSI.
Signed-off-by: Vladislav Zolotarov <vladz@broadcom.com>
Signed-off-by: Michael Chan <mchan@broadcom.com>
Signed-off-by: Bhanu Prakash Gollapudi <bprakash@broadcom.com>
Signed-off-by: Eilon Greenstein <eilong@broadcom.com>
Signed-off-by: David S. Miller <davem@conan.davemloft.net>
Diffstat (limited to 'drivers')
32 files changed, 20047 insertions, 9481 deletions
diff --git a/drivers/net/bnx2x/bnx2x.h b/drivers/net/bnx2x/bnx2x.h index 6d4d6d4e53c6..b4b3abe9e7c4 100644 --- a/drivers/net/bnx2x/bnx2x.h +++ b/drivers/net/bnx2x/bnx2x.h | |||
@@ -47,11 +47,12 @@ | |||
47 | #endif | 47 | #endif |
48 | 48 | ||
49 | #include <linux/mdio.h> | 49 | #include <linux/mdio.h> |
50 | #include <linux/pci.h> | 50 | |
51 | #include "bnx2x_reg.h" | 51 | #include "bnx2x_reg.h" |
52 | #include "bnx2x_fw_defs.h" | 52 | #include "bnx2x_fw_defs.h" |
53 | #include "bnx2x_hsi.h" | 53 | #include "bnx2x_hsi.h" |
54 | #include "bnx2x_link.h" | 54 | #include "bnx2x_link.h" |
55 | #include "bnx2x_sp.h" | ||
55 | #include "bnx2x_dcb.h" | 56 | #include "bnx2x_dcb.h" |
56 | #include "bnx2x_stats.h" | 57 | #include "bnx2x_stats.h" |
57 | 58 | ||
@@ -80,6 +81,12 @@ do { \ | |||
80 | ##__args); \ | 81 | ##__args); \ |
81 | } while (0) | 82 | } while (0) |
82 | 83 | ||
84 | #define DP_CONT(__mask, __fmt, __args...) \ | ||
85 | do { \ | ||
86 | if (bp->msg_enable & (__mask)) \ | ||
87 | pr_cont(__fmt, ##__args); \ | ||
88 | } while (0) | ||
89 | |||
83 | /* errors debug print */ | 90 | /* errors debug print */ |
84 | #define BNX2X_DBG_ERR(__fmt, __args...) \ | 91 | #define BNX2X_DBG_ERR(__fmt, __args...) \ |
85 | do { \ | 92 | do { \ |
@@ -111,7 +118,9 @@ do { \ | |||
111 | dev_info(&bp->pdev->dev, __fmt, ##__args); \ | 118 | dev_info(&bp->pdev->dev, __fmt, ##__args); \ |
112 | } while (0) | 119 | } while (0) |
113 | 120 | ||
114 | void bnx2x_panic_dump(struct bnx2x *bp); | 121 | #define BNX2X_MAC_FMT "%pM" |
122 | #define BNX2X_MAC_PRN_LIST(mac) (mac) | ||
123 | |||
115 | 124 | ||
116 | #ifdef BNX2X_STOP_ON_ERROR | 125 | #ifdef BNX2X_STOP_ON_ERROR |
117 | #define bnx2x_panic() do { \ | 126 | #define bnx2x_panic() do { \ |
@@ -233,11 +242,11 @@ void bnx2x_panic_dump(struct bnx2x *bp); | |||
233 | * | 242 | * |
234 | */ | 243 | */ |
235 | /* iSCSI L2 */ | 244 | /* iSCSI L2 */ |
236 | #define BNX2X_ISCSI_ETH_CL_ID 17 | 245 | #define BNX2X_ISCSI_ETH_CL_ID_IDX 1 |
237 | #define BNX2X_ISCSI_ETH_CID 17 | 246 | #define BNX2X_ISCSI_ETH_CID 17 |
238 | 247 | ||
239 | /* FCoE L2 */ | 248 | /* FCoE L2 */ |
240 | #define BNX2X_FCOE_ETH_CL_ID 18 | 249 | #define BNX2X_FCOE_ETH_CL_ID_IDX 2 |
241 | #define BNX2X_FCOE_ETH_CID 18 | 250 | #define BNX2X_FCOE_ETH_CID 18 |
242 | 251 | ||
243 | /** Additional rings budgeting */ | 252 | /** Additional rings budgeting */ |
@@ -283,44 +292,73 @@ union db_prod { | |||
283 | 292 | ||
284 | 293 | ||
285 | /* MC hsi */ | 294 | /* MC hsi */ |
286 | #define BCM_PAGE_SHIFT 12 | 295 | #define BCM_PAGE_SHIFT 12 |
287 | #define BCM_PAGE_SIZE (1 << BCM_PAGE_SHIFT) | 296 | #define BCM_PAGE_SIZE (1 << BCM_PAGE_SHIFT) |
288 | #define BCM_PAGE_MASK (~(BCM_PAGE_SIZE - 1)) | 297 | #define BCM_PAGE_MASK (~(BCM_PAGE_SIZE - 1)) |
289 | #define BCM_PAGE_ALIGN(addr) (((addr) + BCM_PAGE_SIZE - 1) & BCM_PAGE_MASK) | 298 | #define BCM_PAGE_ALIGN(addr) (((addr) + BCM_PAGE_SIZE - 1) & BCM_PAGE_MASK) |
290 | 299 | ||
291 | #define PAGES_PER_SGE_SHIFT 0 | 300 | #define PAGES_PER_SGE_SHIFT 0 |
292 | #define PAGES_PER_SGE (1 << PAGES_PER_SGE_SHIFT) | 301 | #define PAGES_PER_SGE (1 << PAGES_PER_SGE_SHIFT) |
293 | #define SGE_PAGE_SIZE PAGE_SIZE | 302 | #define SGE_PAGE_SIZE PAGE_SIZE |
294 | #define SGE_PAGE_SHIFT PAGE_SHIFT | 303 | #define SGE_PAGE_SHIFT PAGE_SHIFT |
295 | #define SGE_PAGE_ALIGN(addr) PAGE_ALIGN((typeof(PAGE_SIZE))(addr)) | 304 | #define SGE_PAGE_ALIGN(addr) PAGE_ALIGN((typeof(PAGE_SIZE))(addr)) |
296 | 305 | ||
297 | /* SGE ring related macros */ | 306 | /* SGE ring related macros */ |
298 | #define NUM_RX_SGE_PAGES 2 | 307 | #define NUM_RX_SGE_PAGES 2 |
299 | #define RX_SGE_CNT (BCM_PAGE_SIZE / sizeof(struct eth_rx_sge)) | 308 | #define RX_SGE_CNT (BCM_PAGE_SIZE / sizeof(struct eth_rx_sge)) |
300 | #define MAX_RX_SGE_CNT (RX_SGE_CNT - 2) | 309 | #define MAX_RX_SGE_CNT (RX_SGE_CNT - 2) |
301 | /* RX_SGE_CNT is promised to be a power of 2 */ | 310 | /* RX_SGE_CNT is promised to be a power of 2 */ |
302 | #define RX_SGE_MASK (RX_SGE_CNT - 1) | 311 | #define RX_SGE_MASK (RX_SGE_CNT - 1) |
303 | #define NUM_RX_SGE (RX_SGE_CNT * NUM_RX_SGE_PAGES) | 312 | #define NUM_RX_SGE (RX_SGE_CNT * NUM_RX_SGE_PAGES) |
304 | #define MAX_RX_SGE (NUM_RX_SGE - 1) | 313 | #define MAX_RX_SGE (NUM_RX_SGE - 1) |
305 | #define NEXT_SGE_IDX(x) ((((x) & RX_SGE_MASK) == \ | 314 | #define NEXT_SGE_IDX(x) ((((x) & RX_SGE_MASK) == \ |
306 | (MAX_RX_SGE_CNT - 1)) ? (x) + 3 : (x) + 1) | 315 | (MAX_RX_SGE_CNT - 1)) ? (x) + 3 : (x) + 1) |
307 | #define RX_SGE(x) ((x) & MAX_RX_SGE) | 316 | #define RX_SGE(x) ((x) & MAX_RX_SGE) |
317 | |||
318 | /* Manipulate a bit vector defined as an array of u64 */ | ||
308 | 319 | ||
309 | /* SGE producer mask related macros */ | ||
310 | /* Number of bits in one sge_mask array element */ | 320 | /* Number of bits in one sge_mask array element */ |
311 | #define RX_SGE_MASK_ELEM_SZ 64 | 321 | #define BIT_VEC64_ELEM_SZ 64 |
312 | #define RX_SGE_MASK_ELEM_SHIFT 6 | 322 | #define BIT_VEC64_ELEM_SHIFT 6 |
313 | #define RX_SGE_MASK_ELEM_MASK ((u64)RX_SGE_MASK_ELEM_SZ - 1) | 323 | #define BIT_VEC64_ELEM_MASK ((u64)BIT_VEC64_ELEM_SZ - 1) |
324 | |||
325 | |||
326 | #define __BIT_VEC64_SET_BIT(el, bit) \ | ||
327 | do { \ | ||
328 | el = ((el) | ((u64)0x1 << (bit))); \ | ||
329 | } while (0) | ||
330 | |||
331 | #define __BIT_VEC64_CLEAR_BIT(el, bit) \ | ||
332 | do { \ | ||
333 | el = ((el) & (~((u64)0x1 << (bit)))); \ | ||
334 | } while (0) | ||
335 | |||
336 | |||
337 | #define BIT_VEC64_SET_BIT(vec64, idx) \ | ||
338 | __BIT_VEC64_SET_BIT((vec64)[(idx) >> BIT_VEC64_ELEM_SHIFT], \ | ||
339 | (idx) & BIT_VEC64_ELEM_MASK) | ||
340 | |||
341 | #define BIT_VEC64_CLEAR_BIT(vec64, idx) \ | ||
342 | __BIT_VEC64_CLEAR_BIT((vec64)[(idx) >> BIT_VEC64_ELEM_SHIFT], \ | ||
343 | (idx) & BIT_VEC64_ELEM_MASK) | ||
344 | |||
345 | #define BIT_VEC64_TEST_BIT(vec64, idx) \ | ||
346 | (((vec64)[(idx) >> BIT_VEC64_ELEM_SHIFT] >> \ | ||
347 | ((idx) & BIT_VEC64_ELEM_MASK)) & 0x1) | ||
314 | 348 | ||
315 | /* Creates a bitmask of all ones in less significant bits. | 349 | /* Creates a bitmask of all ones in less significant bits. |
316 | idx - index of the most significant bit in the created mask */ | 350 | idx - index of the most significant bit in the created mask */ |
317 | #define RX_SGE_ONES_MASK(idx) \ | 351 | #define BIT_VEC64_ONES_MASK(idx) \ |
318 | (((u64)0x1 << (((idx) & RX_SGE_MASK_ELEM_MASK) + 1)) - 1) | 352 | (((u64)0x1 << (((idx) & BIT_VEC64_ELEM_MASK) + 1)) - 1) |
319 | #define RX_SGE_MASK_ELEM_ONE_MASK ((u64)(~0)) | 353 | #define BIT_VEC64_ELEM_ONE_MASK ((u64)(~0)) |
354 | |||
355 | /*******************************************************/ | ||
356 | |||
357 | |||
320 | 358 | ||
321 | /* Number of u64 elements in SGE mask array */ | 359 | /* Number of u64 elements in SGE mask array */ |
322 | #define RX_SGE_MASK_LEN ((NUM_RX_SGE_PAGES * RX_SGE_CNT) / \ | 360 | #define RX_SGE_MASK_LEN ((NUM_RX_SGE_PAGES * RX_SGE_CNT) / \ |
323 | RX_SGE_MASK_ELEM_SZ) | 361 | BIT_VEC64_ELEM_SZ) |
324 | #define RX_SGE_MASK_LEN_MASK (RX_SGE_MASK_LEN - 1) | 362 | #define RX_SGE_MASK_LEN_MASK (RX_SGE_MASK_LEN - 1) |
325 | #define NEXT_SGE_MASK_ELEM(el) (((el) + 1) & RX_SGE_MASK_LEN_MASK) | 363 | #define NEXT_SGE_MASK_ELEM(el) (((el) + 1) & RX_SGE_MASK_LEN_MASK) |
326 | 364 | ||
@@ -331,7 +369,30 @@ union host_hc_status_block { | |||
331 | struct host_hc_status_block_e2 *e2_sb; | 369 | struct host_hc_status_block_e2 *e2_sb; |
332 | }; | 370 | }; |
333 | 371 | ||
372 | struct bnx2x_agg_info { | ||
373 | /* | ||
374 | * First aggregation buffer is an skb, the following - are pages. | ||
375 | * We will preallocate the skbs for each aggregation when | ||
376 | * we open the interface and will replace the BD at the consumer | ||
377 | * with this one when we receive the TPA_START CQE in order to | ||
378 | * keep the Rx BD ring consistent. | ||
379 | */ | ||
380 | struct sw_rx_bd first_buf; | ||
381 | u8 tpa_state; | ||
382 | #define BNX2X_TPA_START 1 | ||
383 | #define BNX2X_TPA_STOP 2 | ||
384 | #define BNX2X_TPA_ERROR 3 | ||
385 | u8 placement_offset; | ||
386 | u16 parsing_flags; | ||
387 | u16 vlan_tag; | ||
388 | u16 len_on_bd; | ||
389 | }; | ||
390 | |||
391 | #define Q_STATS_OFFSET32(stat_name) \ | ||
392 | (offsetof(struct bnx2x_eth_q_stats, stat_name) / 4) | ||
393 | |||
334 | struct bnx2x_fastpath { | 394 | struct bnx2x_fastpath { |
395 | struct bnx2x *bp; /* parent */ | ||
335 | 396 | ||
336 | #define BNX2X_NAPI_WEIGHT 128 | 397 | #define BNX2X_NAPI_WEIGHT 128 |
337 | struct napi_struct napi; | 398 | struct napi_struct napi; |
@@ -366,23 +427,13 @@ struct bnx2x_fastpath { | |||
366 | 427 | ||
367 | u64 sge_mask[RX_SGE_MASK_LEN]; | 428 | u64 sge_mask[RX_SGE_MASK_LEN]; |
368 | 429 | ||
369 | int state; | 430 | u32 cid; |
370 | #define BNX2X_FP_STATE_CLOSED 0 | ||
371 | #define BNX2X_FP_STATE_IRQ 0x80000 | ||
372 | #define BNX2X_FP_STATE_OPENING 0x90000 | ||
373 | #define BNX2X_FP_STATE_OPEN 0xa0000 | ||
374 | #define BNX2X_FP_STATE_HALTING 0xb0000 | ||
375 | #define BNX2X_FP_STATE_HALTED 0xc0000 | ||
376 | #define BNX2X_FP_STATE_TERMINATING 0xd0000 | ||
377 | #define BNX2X_FP_STATE_TERMINATED 0xe0000 | ||
378 | 431 | ||
379 | u8 index; /* number in fp array */ | 432 | u8 index; /* number in fp array */ |
380 | u8 cl_id; /* eth client id */ | 433 | u8 cl_id; /* eth client id */ |
381 | u8 cl_qzone_id; | 434 | u8 cl_qzone_id; |
382 | u8 fw_sb_id; /* status block number in FW */ | 435 | u8 fw_sb_id; /* status block number in FW */ |
383 | u8 igu_sb_id; /* status block number in HW */ | 436 | u8 igu_sb_id; /* status block number in HW */ |
384 | u32 cid; | ||
385 | |||
386 | union db_prod tx_db; | 437 | union db_prod tx_db; |
387 | 438 | ||
388 | u16 tx_pkt_prod; | 439 | u16 tx_pkt_prod; |
@@ -401,24 +452,20 @@ struct bnx2x_fastpath { | |||
401 | /* The last maximal completed SGE */ | 452 | /* The last maximal completed SGE */ |
402 | u16 last_max_sge; | 453 | u16 last_max_sge; |
403 | __le16 *rx_cons_sb; | 454 | __le16 *rx_cons_sb; |
404 | |||
405 | unsigned long tx_pkt, | 455 | unsigned long tx_pkt, |
406 | rx_pkt, | 456 | rx_pkt, |
407 | rx_calls; | 457 | rx_calls; |
408 | 458 | ||
409 | /* TPA related */ | 459 | /* TPA related */ |
410 | struct sw_rx_bd tpa_pool[ETH_MAX_AGGREGATION_QUEUES_E1H]; | 460 | struct bnx2x_agg_info tpa_info[ETH_MAX_AGGREGATION_QUEUES_E1H_E2]; |
411 | u8 tpa_state[ETH_MAX_AGGREGATION_QUEUES_E1H]; | ||
412 | #define BNX2X_TPA_START 1 | ||
413 | #define BNX2X_TPA_STOP 2 | ||
414 | u8 disable_tpa; | 461 | u8 disable_tpa; |
415 | #ifdef BNX2X_STOP_ON_ERROR | 462 | #ifdef BNX2X_STOP_ON_ERROR |
416 | u64 tpa_queue_used; | 463 | u64 tpa_queue_used; |
417 | #endif | 464 | #endif |
418 | 465 | ||
419 | struct tstorm_per_client_stats old_tclient; | 466 | struct tstorm_per_queue_stats old_tclient; |
420 | struct ustorm_per_client_stats old_uclient; | 467 | struct ustorm_per_queue_stats old_uclient; |
421 | struct xstorm_per_client_stats old_xclient; | 468 | struct xstorm_per_queue_stats old_xclient; |
422 | struct bnx2x_eth_q_stats eth_q_stats; | 469 | struct bnx2x_eth_q_stats eth_q_stats; |
423 | 470 | ||
424 | /* The size is calculated using the following: | 471 | /* The size is calculated using the following: |
@@ -427,7 +474,13 @@ struct bnx2x_fastpath { | |||
427 | 4 (for the digits and to make it DWORD aligned) */ | 474 | 4 (for the digits and to make it DWORD aligned) */ |
428 | #define FP_NAME_SIZE (sizeof(((struct net_device *)0)->name) + 8) | 475 | #define FP_NAME_SIZE (sizeof(((struct net_device *)0)->name) + 8) |
429 | char name[FP_NAME_SIZE]; | 476 | char name[FP_NAME_SIZE]; |
430 | struct bnx2x *bp; /* parent */ | 477 | |
478 | /* MACs object */ | ||
479 | struct bnx2x_vlan_mac_obj mac_obj; | ||
480 | |||
481 | /* Queue State object */ | ||
482 | struct bnx2x_queue_sp_obj q_obj; | ||
483 | |||
431 | }; | 484 | }; |
432 | 485 | ||
433 | #define bnx2x_fp(bp, nr, var) (bp->fp[nr].var) | 486 | #define bnx2x_fp(bp, nr, var) (bp->fp[nr].var) |
@@ -435,11 +488,13 @@ struct bnx2x_fastpath { | |||
435 | /* Use 2500 as a mini-jumbo MTU for FCoE */ | 488 | /* Use 2500 as a mini-jumbo MTU for FCoE */ |
436 | #define BNX2X_FCOE_MINI_JUMBO_MTU 2500 | 489 | #define BNX2X_FCOE_MINI_JUMBO_MTU 2500 |
437 | 490 | ||
438 | #ifdef BCM_CNIC | 491 | /* FCoE L2 `fastpath' entry is right after the eth entries */ |
439 | /* FCoE L2 `fastpath' is right after the eth entries */ | ||
440 | #define FCOE_IDX BNX2X_NUM_ETH_QUEUES(bp) | 492 | #define FCOE_IDX BNX2X_NUM_ETH_QUEUES(bp) |
441 | #define bnx2x_fcoe_fp(bp) (&bp->fp[FCOE_IDX]) | 493 | #define bnx2x_fcoe_fp(bp) (&bp->fp[FCOE_IDX]) |
442 | #define bnx2x_fcoe(bp, var) (bnx2x_fcoe_fp(bp)->var) | 494 | #define bnx2x_fcoe(bp, var) (bnx2x_fcoe_fp(bp)->var) |
495 | |||
496 | |||
497 | #ifdef BCM_CNIC | ||
443 | #define IS_FCOE_FP(fp) (fp->index == FCOE_IDX) | 498 | #define IS_FCOE_FP(fp) (fp->index == FCOE_IDX) |
444 | #define IS_FCOE_IDX(idx) ((idx) == FCOE_IDX) | 499 | #define IS_FCOE_IDX(idx) ((idx) == FCOE_IDX) |
445 | #else | 500 | #else |
@@ -449,77 +504,68 @@ struct bnx2x_fastpath { | |||
449 | 504 | ||
450 | 505 | ||
451 | /* MC hsi */ | 506 | /* MC hsi */ |
452 | #define MAX_FETCH_BD 13 /* HW max BDs per packet */ | 507 | #define MAX_FETCH_BD 13 /* HW max BDs per packet */ |
453 | #define RX_COPY_THRESH 92 | 508 | #define RX_COPY_THRESH 92 |
454 | 509 | ||
455 | #define NUM_TX_RINGS 16 | 510 | #define NUM_TX_RINGS 16 |
456 | #define TX_DESC_CNT (BCM_PAGE_SIZE / sizeof(union eth_tx_bd_types)) | 511 | #define TX_DESC_CNT (BCM_PAGE_SIZE / sizeof(union eth_tx_bd_types)) |
457 | #define MAX_TX_DESC_CNT (TX_DESC_CNT - 1) | 512 | #define MAX_TX_DESC_CNT (TX_DESC_CNT - 1) |
458 | #define NUM_TX_BD (TX_DESC_CNT * NUM_TX_RINGS) | 513 | #define NUM_TX_BD (TX_DESC_CNT * NUM_TX_RINGS) |
459 | #define MAX_TX_BD (NUM_TX_BD - 1) | 514 | #define MAX_TX_BD (NUM_TX_BD - 1) |
460 | #define MAX_TX_AVAIL (MAX_TX_DESC_CNT * NUM_TX_RINGS - 2) | 515 | #define MAX_TX_AVAIL (MAX_TX_DESC_CNT * NUM_TX_RINGS - 2) |
461 | #define INIT_JUMBO_TX_RING_SIZE MAX_TX_AVAIL | ||
462 | #define INIT_TX_RING_SIZE MAX_TX_AVAIL | ||
463 | #define NEXT_TX_IDX(x) ((((x) & MAX_TX_DESC_CNT) == \ | 516 | #define NEXT_TX_IDX(x) ((((x) & MAX_TX_DESC_CNT) == \ |
464 | (MAX_TX_DESC_CNT - 1)) ? (x) + 2 : (x) + 1) | 517 | (MAX_TX_DESC_CNT - 1)) ? (x) + 2 : (x) + 1) |
465 | #define TX_BD(x) ((x) & MAX_TX_BD) | 518 | #define TX_BD(x) ((x) & MAX_TX_BD) |
466 | #define TX_BD_POFF(x) ((x) & MAX_TX_DESC_CNT) | 519 | #define TX_BD_POFF(x) ((x) & MAX_TX_DESC_CNT) |
467 | 520 | ||
468 | /* The RX BD ring is special, each bd is 8 bytes but the last one is 16 */ | 521 | /* The RX BD ring is special, each bd is 8 bytes but the last one is 16 */ |
469 | #define NUM_RX_RINGS 8 | 522 | #define NUM_RX_RINGS 8 |
470 | #define RX_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct eth_rx_bd)) | 523 | #define RX_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct eth_rx_bd)) |
471 | #define MAX_RX_DESC_CNT (RX_DESC_CNT - 2) | 524 | #define MAX_RX_DESC_CNT (RX_DESC_CNT - 2) |
472 | #define RX_DESC_MASK (RX_DESC_CNT - 1) | 525 | #define RX_DESC_MASK (RX_DESC_CNT - 1) |
473 | #define NUM_RX_BD (RX_DESC_CNT * NUM_RX_RINGS) | 526 | #define NUM_RX_BD (RX_DESC_CNT * NUM_RX_RINGS) |
474 | #define MAX_RX_BD (NUM_RX_BD - 1) | 527 | #define MAX_RX_BD (NUM_RX_BD - 1) |
475 | #define MAX_RX_AVAIL (MAX_RX_DESC_CNT * NUM_RX_RINGS - 2) | 528 | #define MAX_RX_AVAIL (MAX_RX_DESC_CNT * NUM_RX_RINGS - 2) |
476 | #define MIN_RX_SIZE_TPA 72 | 529 | #define MIN_RX_AVAIL 128 |
477 | #define MIN_RX_SIZE_NONTPA 10 | 530 | |
478 | #define INIT_JUMBO_RX_RING_SIZE MAX_RX_AVAIL | 531 | #define MIN_RX_SIZE_TPA_HW (CHIP_IS_E1(bp) ? \ |
479 | #define INIT_RX_RING_SIZE MAX_RX_AVAIL | 532 | ETH_MIN_RX_CQES_WITH_TPA_E1 : \ |
533 | ETH_MIN_RX_CQES_WITH_TPA_E1H_E2) | ||
534 | #define MIN_RX_SIZE_NONTPA_HW ETH_MIN_RX_CQES_WITHOUT_TPA | ||
535 | #define MIN_RX_SIZE_TPA (max_t(u32, MIN_RX_SIZE_TPA_HW, MIN_RX_AVAIL)) | ||
536 | #define MIN_RX_SIZE_NONTPA (max_t(u32, MIN_RX_SIZE_NONTPA_HW,\ | ||
537 | MIN_RX_AVAIL)) | ||
538 | |||
480 | #define NEXT_RX_IDX(x) ((((x) & RX_DESC_MASK) == \ | 539 | #define NEXT_RX_IDX(x) ((((x) & RX_DESC_MASK) == \ |
481 | (MAX_RX_DESC_CNT - 1)) ? (x) + 3 : (x) + 1) | 540 | (MAX_RX_DESC_CNT - 1)) ? (x) + 3 : (x) + 1) |
482 | #define RX_BD(x) ((x) & MAX_RX_BD) | 541 | #define RX_BD(x) ((x) & MAX_RX_BD) |
483 | 542 | ||
484 | /* As long as CQE is 4 times bigger than BD entry we have to allocate | 543 | /* |
485 | 4 times more pages for CQ ring in order to keep it balanced with | 544 | * As long as CQE is X times bigger than BD entry we have to allocate X times |
486 | BD ring */ | 545 | * more pages for CQ ring in order to keep it balanced with BD ring |
487 | #define NUM_RCQ_RINGS (NUM_RX_RINGS * 4) | 546 | */ |
547 | #define CQE_BD_REL (sizeof(union eth_rx_cqe) / sizeof(struct eth_rx_bd)) | ||
548 | #define NUM_RCQ_RINGS (NUM_RX_RINGS * CQE_BD_REL) | ||
488 | #define RCQ_DESC_CNT (BCM_PAGE_SIZE / sizeof(union eth_rx_cqe)) | 549 | #define RCQ_DESC_CNT (BCM_PAGE_SIZE / sizeof(union eth_rx_cqe)) |
489 | #define MAX_RCQ_DESC_CNT (RCQ_DESC_CNT - 1) | 550 | #define MAX_RCQ_DESC_CNT (RCQ_DESC_CNT - 1) |
490 | #define NUM_RCQ_BD (RCQ_DESC_CNT * NUM_RCQ_RINGS) | 551 | #define NUM_RCQ_BD (RCQ_DESC_CNT * NUM_RCQ_RINGS) |
491 | #define MAX_RCQ_BD (NUM_RCQ_BD - 1) | 552 | #define MAX_RCQ_BD (NUM_RCQ_BD - 1) |
492 | #define MAX_RCQ_AVAIL (MAX_RCQ_DESC_CNT * NUM_RCQ_RINGS - 2) | 553 | #define MAX_RCQ_AVAIL (MAX_RCQ_DESC_CNT * NUM_RCQ_RINGS - 2) |
493 | #define NEXT_RCQ_IDX(x) ((((x) & MAX_RCQ_DESC_CNT) == \ | 554 | #define NEXT_RCQ_IDX(x) ((((x) & MAX_RCQ_DESC_CNT) == \ |
494 | (MAX_RCQ_DESC_CNT - 1)) ? (x) + 2 : (x) + 1) | 555 | (MAX_RCQ_DESC_CNT - 1)) ? (x) + 2 : (x) + 1) |
495 | #define RCQ_BD(x) ((x) & MAX_RCQ_BD) | 556 | #define RCQ_BD(x) ((x) & MAX_RCQ_BD) |
496 | 557 | ||
497 | 558 | ||
498 | /* This is needed for determining of last_max */ | 559 | /* This is needed for determining of last_max */ |
499 | #define SUB_S16(a, b) (s16)((s16)(a) - (s16)(b)) | 560 | #define SUB_S16(a, b) (s16)((s16)(a) - (s16)(b)) |
500 | 561 | #define SUB_S32(a, b) (s32)((s32)(a) - (s32)(b)) | |
501 | #define __SGE_MASK_SET_BIT(el, bit) \ | ||
502 | do { \ | ||
503 | el = ((el) | ((u64)0x1 << (bit))); \ | ||
504 | } while (0) | ||
505 | |||
506 | #define __SGE_MASK_CLEAR_BIT(el, bit) \ | ||
507 | do { \ | ||
508 | el = ((el) & (~((u64)0x1 << (bit)))); \ | ||
509 | } while (0) | ||
510 | |||
511 | #define SGE_MASK_SET_BIT(fp, idx) \ | ||
512 | __SGE_MASK_SET_BIT(fp->sge_mask[(idx) >> RX_SGE_MASK_ELEM_SHIFT], \ | ||
513 | ((idx) & RX_SGE_MASK_ELEM_MASK)) | ||
514 | 562 | ||
515 | #define SGE_MASK_CLEAR_BIT(fp, idx) \ | ||
516 | __SGE_MASK_CLEAR_BIT(fp->sge_mask[(idx) >> RX_SGE_MASK_ELEM_SHIFT], \ | ||
517 | ((idx) & RX_SGE_MASK_ELEM_MASK)) | ||
518 | 563 | ||
564 | #define BNX2X_SWCID_SHIFT 17 | ||
565 | #define BNX2X_SWCID_MASK ((0x1 << BNX2X_SWCID_SHIFT) - 1) | ||
519 | 566 | ||
520 | /* used on a CID received from the HW */ | 567 | /* used on a CID received from the HW */ |
521 | #define SW_CID(x) (le32_to_cpu(x) & \ | 568 | #define SW_CID(x) (le32_to_cpu(x) & BNX2X_SWCID_MASK) |
522 | (COMMON_RAMROD_ETH_RX_CQE_CID >> 7)) | ||
523 | #define CQE_CMD(x) (le32_to_cpu(x) >> \ | 569 | #define CQE_CMD(x) (le32_to_cpu(x) >> \ |
524 | COMMON_RAMROD_ETH_RX_CQE_CMD_ID_SHIFT) | 570 | COMMON_RAMROD_ETH_RX_CQE_CMD_ID_SHIFT) |
525 | 571 | ||
@@ -529,6 +575,9 @@ struct bnx2x_fastpath { | |||
529 | 575 | ||
530 | #define BNX2X_DB_MIN_SHIFT 3 /* 8 bytes */ | 576 | #define BNX2X_DB_MIN_SHIFT 3 /* 8 bytes */ |
531 | #define BNX2X_DB_SHIFT 7 /* 128 bytes*/ | 577 | #define BNX2X_DB_SHIFT 7 /* 128 bytes*/ |
578 | #if (BNX2X_DB_SHIFT < BNX2X_DB_MIN_SHIFT) | ||
579 | #error "Min DB doorbell stride is 8" | ||
580 | #endif | ||
532 | #define DPM_TRIGER_TYPE 0x40 | 581 | #define DPM_TRIGER_TYPE 0x40 |
533 | #define DOORBELL(bp, cid, val) \ | 582 | #define DOORBELL(bp, cid, val) \ |
534 | do { \ | 583 | do { \ |
@@ -557,13 +606,11 @@ struct bnx2x_fastpath { | |||
557 | 606 | ||
558 | 607 | ||
559 | /* stuff added to make the code fit 80Col */ | 608 | /* stuff added to make the code fit 80Col */ |
560 | 609 | #define CQE_TYPE(cqe_fp_flags) ((cqe_fp_flags) & ETH_FAST_PATH_RX_CQE_TYPE) | |
561 | #define CQE_TYPE(cqe_fp_flags) ((cqe_fp_flags) & ETH_FAST_PATH_RX_CQE_TYPE) | 610 | #define CQE_TYPE_START(cqe_type) ((cqe_type) == RX_ETH_CQE_TYPE_ETH_START_AGG) |
562 | 611 | #define CQE_TYPE_STOP(cqe_type) ((cqe_type) == RX_ETH_CQE_TYPE_ETH_STOP_AGG) | |
563 | #define TPA_TYPE_START ETH_FAST_PATH_RX_CQE_START_FLG | 612 | #define CQE_TYPE_SLOW(cqe_type) ((cqe_type) == RX_ETH_CQE_TYPE_ETH_RAMROD) |
564 | #define TPA_TYPE_END ETH_FAST_PATH_RX_CQE_END_FLG | 613 | #define CQE_TYPE_FAST(cqe_type) ((cqe_type) == RX_ETH_CQE_TYPE_ETH_FASTPATH) |
565 | #define TPA_TYPE(cqe_fp_flags) ((cqe_fp_flags) & \ | ||
566 | (TPA_TYPE_START | TPA_TYPE_END)) | ||
567 | 614 | ||
568 | #define ETH_RX_ERROR_FALGS ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG | 615 | #define ETH_RX_ERROR_FALGS ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG |
569 | 616 | ||
@@ -590,12 +637,30 @@ struct bnx2x_fastpath { | |||
590 | #define BNX2X_RX_SUM_FIX(cqe) \ | 637 | #define BNX2X_RX_SUM_FIX(cqe) \ |
591 | BNX2X_PRS_FLAG_OVERETH_IPV4(cqe->fast_path_cqe.pars_flags.flags) | 638 | BNX2X_PRS_FLAG_OVERETH_IPV4(cqe->fast_path_cqe.pars_flags.flags) |
592 | 639 | ||
593 | #define U_SB_ETH_RX_CQ_INDEX 1 | 640 | |
594 | #define U_SB_ETH_RX_BD_INDEX 2 | 641 | #define FP_USB_FUNC_OFF \ |
595 | #define C_SB_ETH_TX_CQ_INDEX 5 | 642 | offsetof(struct cstorm_status_block_u, func) |
643 | #define FP_CSB_FUNC_OFF \ | ||
644 | offsetof(struct cstorm_status_block_c, func) | ||
645 | |||
646 | #define HC_INDEX_TOE_RX_CQ_CONS 0 /* Formerly Ustorm TOE CQ index */ | ||
647 | /* (HC_INDEX_U_TOE_RX_CQ_CONS) */ | ||
648 | #define HC_INDEX_ETH_RX_CQ_CONS 1 /* Formerly Ustorm ETH CQ index */ | ||
649 | /* (HC_INDEX_U_ETH_RX_CQ_CONS) */ | ||
650 | #define HC_INDEX_ETH_RX_BD_CONS 2 /* Formerly Ustorm ETH BD index */ | ||
651 | /* (HC_INDEX_U_ETH_RX_BD_CONS) */ | ||
652 | |||
653 | #define HC_INDEX_TOE_TX_CQ_CONS 4 /* Formerly Cstorm TOE CQ index */ | ||
654 | /* (HC_INDEX_C_TOE_TX_CQ_CONS) */ | ||
655 | #define HC_INDEX_ETH_TX_CQ_CONS 5 /* Formerly Cstorm ETH CQ index */ | ||
656 | /* (HC_INDEX_C_ETH_TX_CQ_CONS) */ | ||
657 | |||
658 | #define U_SB_ETH_RX_CQ_INDEX HC_INDEX_ETH_RX_CQ_CONS | ||
659 | #define U_SB_ETH_RX_BD_INDEX HC_INDEX_ETH_RX_BD_CONS | ||
660 | #define C_SB_ETH_TX_CQ_INDEX HC_INDEX_ETH_TX_CQ_CONS | ||
596 | 661 | ||
597 | #define BNX2X_RX_SB_INDEX \ | 662 | #define BNX2X_RX_SB_INDEX \ |
598 | (&fp->sb_index_values[U_SB_ETH_RX_CQ_INDEX]) | 663 | (&fp->sb_index_values[HC_INDEX_ETH_RX_CQ_CONS]) |
599 | 664 | ||
600 | #define BNX2X_TX_SB_INDEX \ | 665 | #define BNX2X_TX_SB_INDEX \ |
601 | (&fp->sb_index_values[C_SB_ETH_TX_CQ_INDEX]) | 666 | (&fp->sb_index_values[C_SB_ETH_TX_CQ_INDEX]) |
@@ -615,29 +680,53 @@ struct bnx2x_common { | |||
615 | #define CHIP_NUM_57711 0x164f | 680 | #define CHIP_NUM_57711 0x164f |
616 | #define CHIP_NUM_57711E 0x1650 | 681 | #define CHIP_NUM_57711E 0x1650 |
617 | #define CHIP_NUM_57712 0x1662 | 682 | #define CHIP_NUM_57712 0x1662 |
618 | #define CHIP_NUM_57712E 0x1663 | 683 | #define CHIP_NUM_57712_MF 0x1663 |
684 | #define CHIP_NUM_57713 0x1651 | ||
685 | #define CHIP_NUM_57713E 0x1652 | ||
686 | #define CHIP_NUM_57800 0x168a | ||
687 | #define CHIP_NUM_57800_MF 0x16a5 | ||
688 | #define CHIP_NUM_57810 0x168e | ||
689 | #define CHIP_NUM_57810_MF 0x16ae | ||
690 | #define CHIP_NUM_57840 0x168d | ||
691 | #define CHIP_NUM_57840_MF 0x16ab | ||
619 | #define CHIP_IS_E1(bp) (CHIP_NUM(bp) == CHIP_NUM_57710) | 692 | #define CHIP_IS_E1(bp) (CHIP_NUM(bp) == CHIP_NUM_57710) |
620 | #define CHIP_IS_57711(bp) (CHIP_NUM(bp) == CHIP_NUM_57711) | 693 | #define CHIP_IS_57711(bp) (CHIP_NUM(bp) == CHIP_NUM_57711) |
621 | #define CHIP_IS_57711E(bp) (CHIP_NUM(bp) == CHIP_NUM_57711E) | 694 | #define CHIP_IS_57711E(bp) (CHIP_NUM(bp) == CHIP_NUM_57711E) |
622 | #define CHIP_IS_57712(bp) (CHIP_NUM(bp) == CHIP_NUM_57712) | 695 | #define CHIP_IS_57712(bp) (CHIP_NUM(bp) == CHIP_NUM_57712) |
623 | #define CHIP_IS_57712E(bp) (CHIP_NUM(bp) == CHIP_NUM_57712E) | 696 | #define CHIP_IS_57712_MF(bp) (CHIP_NUM(bp) == CHIP_NUM_57712_MF) |
697 | #define CHIP_IS_57800(bp) (CHIP_NUM(bp) == CHIP_NUM_57800) | ||
698 | #define CHIP_IS_57800_MF(bp) (CHIP_NUM(bp) == CHIP_NUM_57800_MF) | ||
699 | #define CHIP_IS_57810(bp) (CHIP_NUM(bp) == CHIP_NUM_57810) | ||
700 | #define CHIP_IS_57810_MF(bp) (CHIP_NUM(bp) == CHIP_NUM_57810_MF) | ||
701 | #define CHIP_IS_57840(bp) (CHIP_NUM(bp) == CHIP_NUM_57840) | ||
702 | #define CHIP_IS_57840_MF(bp) (CHIP_NUM(bp) == CHIP_NUM_57840_MF) | ||
624 | #define CHIP_IS_E1H(bp) (CHIP_IS_57711(bp) || \ | 703 | #define CHIP_IS_E1H(bp) (CHIP_IS_57711(bp) || \ |
625 | CHIP_IS_57711E(bp)) | 704 | CHIP_IS_57711E(bp)) |
626 | #define CHIP_IS_E2(bp) (CHIP_IS_57712(bp) || \ | 705 | #define CHIP_IS_E2(bp) (CHIP_IS_57712(bp) || \ |
627 | CHIP_IS_57712E(bp)) | 706 | CHIP_IS_57712_MF(bp)) |
707 | #define CHIP_IS_E3(bp) (CHIP_IS_57800(bp) || \ | ||
708 | CHIP_IS_57800_MF(bp) || \ | ||
709 | CHIP_IS_57810(bp) || \ | ||
710 | CHIP_IS_57810_MF(bp) || \ | ||
711 | CHIP_IS_57840(bp) || \ | ||
712 | CHIP_IS_57840_MF(bp)) | ||
628 | #define CHIP_IS_E1x(bp) (CHIP_IS_E1((bp)) || CHIP_IS_E1H((bp))) | 713 | #define CHIP_IS_E1x(bp) (CHIP_IS_E1((bp)) || CHIP_IS_E1H((bp))) |
629 | #define IS_E1H_OFFSET (CHIP_IS_E1H(bp) || CHIP_IS_E2(bp)) | 714 | #define USES_WARPCORE(bp) (CHIP_IS_E3(bp)) |
630 | 715 | #define IS_E1H_OFFSET (!CHIP_IS_E1(bp)) | |
631 | #define CHIP_REV(bp) (bp->common.chip_id & 0x0000f000) | 716 | |
632 | #define CHIP_REV_Ax 0x00000000 | 717 | #define CHIP_REV_SHIFT 12 |
718 | #define CHIP_REV_MASK (0xF << CHIP_REV_SHIFT) | ||
719 | #define CHIP_REV_VAL(bp) (bp->common.chip_id & CHIP_REV_MASK) | ||
720 | #define CHIP_REV_Ax (0x0 << CHIP_REV_SHIFT) | ||
721 | #define CHIP_REV_Bx (0x1 << CHIP_REV_SHIFT) | ||
633 | /* assume maximum 5 revisions */ | 722 | /* assume maximum 5 revisions */ |
634 | #define CHIP_REV_IS_SLOW(bp) (CHIP_REV(bp) > 0x00005000) | 723 | #define CHIP_REV_IS_SLOW(bp) (CHIP_REV_VAL(bp) > 0x00005000) |
635 | /* Emul versions are A=>0xe, B=>0xc, C=>0xa, D=>8, E=>6 */ | 724 | /* Emul versions are A=>0xe, B=>0xc, C=>0xa, D=>8, E=>6 */ |
636 | #define CHIP_REV_IS_EMUL(bp) ((CHIP_REV_IS_SLOW(bp)) && \ | 725 | #define CHIP_REV_IS_EMUL(bp) ((CHIP_REV_IS_SLOW(bp)) && \ |
637 | !(CHIP_REV(bp) & 0x00001000)) | 726 | !(CHIP_REV_VAL(bp) & 0x00001000)) |
638 | /* FPGA versions are A=>0xf, B=>0xd, C=>0xb, D=>9, E=>7 */ | 727 | /* FPGA versions are A=>0xf, B=>0xd, C=>0xb, D=>9, E=>7 */ |
639 | #define CHIP_REV_IS_FPGA(bp) ((CHIP_REV_IS_SLOW(bp)) && \ | 728 | #define CHIP_REV_IS_FPGA(bp) ((CHIP_REV_IS_SLOW(bp)) && \ |
640 | (CHIP_REV(bp) & 0x00001000)) | 729 | (CHIP_REV_VAL(bp) & 0x00001000)) |
641 | 730 | ||
642 | #define CHIP_TIME(bp) ((CHIP_REV_IS_EMUL(bp)) ? 2000 : \ | 731 | #define CHIP_TIME(bp) ((CHIP_REV_IS_EMUL(bp)) ? 2000 : \ |
643 | ((CHIP_REV_IS_FPGA(bp)) ? 200 : 1)) | 732 | ((CHIP_REV_IS_FPGA(bp)) ? 200 : 1)) |
@@ -645,6 +734,16 @@ struct bnx2x_common { | |||
645 | #define CHIP_METAL(bp) (bp->common.chip_id & 0x00000ff0) | 734 | #define CHIP_METAL(bp) (bp->common.chip_id & 0x00000ff0) |
646 | #define CHIP_BOND_ID(bp) (bp->common.chip_id & 0x0000000f) | 735 | #define CHIP_BOND_ID(bp) (bp->common.chip_id & 0x0000000f) |
647 | #define CHIP_PARITY_ENABLED(bp) (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp)) | 736 | #define CHIP_PARITY_ENABLED(bp) (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp)) |
737 | #define CHIP_REV_SIM(bp) (((CHIP_REV_MASK - CHIP_REV_VAL(bp)) >>\ | ||
738 | (CHIP_REV_SHIFT + 1)) \ | ||
739 | << CHIP_REV_SHIFT) | ||
740 | #define CHIP_REV(bp) (CHIP_REV_IS_SLOW(bp) ? \ | ||
741 | CHIP_REV_SIM(bp) :\ | ||
742 | CHIP_REV_VAL(bp)) | ||
743 | #define CHIP_IS_E3B0(bp) (CHIP_IS_E3(bp) && \ | ||
744 | (CHIP_REV(bp) == CHIP_REV_Bx)) | ||
745 | #define CHIP_IS_E3A0(bp) (CHIP_IS_E3(bp) && \ | ||
746 | (CHIP_REV(bp) == CHIP_REV_Ax)) | ||
648 | 747 | ||
649 | int flash_size; | 748 | int flash_size; |
650 | #define NVRAM_1MB_SIZE 0x20000 /* 1M bit in bytes */ | 749 | #define NVRAM_1MB_SIZE 0x20000 /* 1M bit in bytes */ |
@@ -666,7 +765,7 @@ struct bnx2x_common { | |||
666 | #define INT_BLOCK_MODE_NORMAL 0 | 765 | #define INT_BLOCK_MODE_NORMAL 0 |
667 | #define INT_BLOCK_MODE_BW_COMP 2 | 766 | #define INT_BLOCK_MODE_BW_COMP 2 |
668 | #define CHIP_INT_MODE_IS_NBC(bp) \ | 767 | #define CHIP_INT_MODE_IS_NBC(bp) \ |
669 | (CHIP_IS_E2(bp) && \ | 768 | (!CHIP_IS_E1x(bp) && \ |
670 | !((bp)->common.int_block & INT_BLOCK_MODE_BW_COMP)) | 769 | !((bp)->common.int_block & INT_BLOCK_MODE_BW_COMP)) |
671 | #define CHIP_INT_MODE_IS_BC(bp) (!CHIP_INT_MODE_IS_NBC(bp)) | 770 | #define CHIP_INT_MODE_IS_BC(bp) (!CHIP_INT_MODE_IS_NBC(bp)) |
672 | 771 | ||
@@ -712,19 +811,15 @@ struct bnx2x_port { | |||
712 | 811 | ||
713 | /* end of port */ | 812 | /* end of port */ |
714 | 813 | ||
715 | /* e1h Classification CAM line allocations */ | 814 | #define STATS_OFFSET32(stat_name) \ |
716 | enum { | 815 | (offsetof(struct bnx2x_eth_stats, stat_name) / 4) |
717 | CAM_ETH_LINE = 0, | ||
718 | CAM_ISCSI_ETH_LINE, | ||
719 | CAM_FIP_ETH_LINE, | ||
720 | CAM_FIP_MCAST_LINE, | ||
721 | CAM_MAX_PF_LINE = CAM_FIP_MCAST_LINE | ||
722 | }; | ||
723 | /* number of MACs per function in NIG memory - used for SI mode */ | ||
724 | #define NIG_LLH_FUNC_MEM_SIZE 16 | ||
725 | /* number of entries in NIG_REG_LLHX_FUNC_MEM */ | ||
726 | #define NIG_LLH_FUNC_MEM_MAX_OFFSET 8 | ||
727 | 816 | ||
817 | /* slow path */ | ||
818 | |||
819 | /* slow path work-queue */ | ||
820 | extern struct workqueue_struct *bnx2x_wq; | ||
821 | |||
822 | #define BNX2X_MAX_NUM_OF_VFS 64 | ||
728 | #define BNX2X_VF_ID_INVALID 0xFF | 823 | #define BNX2X_VF_ID_INVALID 0xFF |
729 | 824 | ||
730 | /* | 825 | /* |
@@ -749,8 +844,10 @@ enum { | |||
749 | * L2 queue is supported. the cid for the FCoE L2 queue is always X. | 844 | * L2 queue is supported. the cid for the FCoE L2 queue is always X. |
750 | */ | 845 | */ |
751 | 846 | ||
752 | #define FP_SB_MAX_E1x 16 /* fast-path interrupt contexts E1x */ | 847 | /* fast-path interrupt contexts E1x */ |
753 | #define FP_SB_MAX_E2 16 /* fast-path interrupt contexts E2 */ | 848 | #define FP_SB_MAX_E1x 16 |
849 | /* fast-path interrupt contexts E2 */ | ||
850 | #define FP_SB_MAX_E2 HC_SB_MAX_SB_E2 | ||
754 | 851 | ||
755 | /* | 852 | /* |
756 | * cid_cnt paramter below refers to the value returned by | 853 | * cid_cnt paramter below refers to the value returned by |
@@ -761,13 +858,13 @@ enum { | |||
761 | * The number of FP context allocated by the driver == max number of regular | 858 | * The number of FP context allocated by the driver == max number of regular |
762 | * L2 queues + 1 for the FCoE L2 queue | 859 | * L2 queues + 1 for the FCoE L2 queue |
763 | */ | 860 | */ |
764 | #define L2_FP_COUNT(cid_cnt) ((cid_cnt) - CNIC_CONTEXT_USE) | 861 | #define L2_FP_COUNT(cid_cnt) ((cid_cnt) - FCOE_CONTEXT_USE) |
765 | 862 | ||
766 | /* | 863 | /* |
767 | * The number of FP-SB allocated by the driver == max number of regular L2 | 864 | * The number of FP-SB allocated by the driver == max number of regular L2 |
768 | * queues + 1 for the CNIC which also consumes an FP-SB | 865 | * queues + 1 for the CNIC which also consumes an FP-SB |
769 | */ | 866 | */ |
770 | #define FP_SB_COUNT(cid_cnt) ((cid_cnt) - FCOE_CONTEXT_USE) | 867 | #define FP_SB_COUNT(cid_cnt) ((cid_cnt) - CNIC_CONTEXT_USE) |
771 | #define NUM_IGU_SB_REQUIRED(cid_cnt) \ | 868 | #define NUM_IGU_SB_REQUIRED(cid_cnt) \ |
772 | (FP_SB_COUNT(cid_cnt) - NONE_ETH_CONTEXT_USE) | 869 | (FP_SB_COUNT(cid_cnt) - NONE_ETH_CONTEXT_USE) |
773 | 870 | ||
@@ -788,38 +885,61 @@ union cdu_context { | |||
788 | #define CNIC_ILT_LINES DIV_ROUND_UP(CNIC_CID_MAX, ILT_PAGE_CIDS) | 885 | #define CNIC_ILT_LINES DIV_ROUND_UP(CNIC_CID_MAX, ILT_PAGE_CIDS) |
789 | #endif | 886 | #endif |
790 | 887 | ||
791 | #define QM_ILT_PAGE_SZ_HW 3 | 888 | #define QM_ILT_PAGE_SZ_HW 0 |
792 | #define QM_ILT_PAGE_SZ (4096 << QM_ILT_PAGE_SZ_HW) /* 32K */ | 889 | #define QM_ILT_PAGE_SZ (4096 << QM_ILT_PAGE_SZ_HW) /* 4K */ |
793 | #define QM_CID_ROUND 1024 | 890 | #define QM_CID_ROUND 1024 |
794 | 891 | ||
795 | #ifdef BCM_CNIC | 892 | #ifdef BCM_CNIC |
796 | /* TM (timers) host DB constants */ | 893 | /* TM (timers) host DB constants */ |
797 | #define TM_ILT_PAGE_SZ_HW 2 | 894 | #define TM_ILT_PAGE_SZ_HW 0 |
798 | #define TM_ILT_PAGE_SZ (4096 << TM_ILT_PAGE_SZ_HW) /* 16K */ | 895 | #define TM_ILT_PAGE_SZ (4096 << TM_ILT_PAGE_SZ_HW) /* 4K */ |
799 | /* #define TM_CONN_NUM (CNIC_STARTING_CID+CNIC_ISCSI_CXT_MAX) */ | 896 | /* #define TM_CONN_NUM (CNIC_STARTING_CID+CNIC_ISCSI_CXT_MAX) */ |
800 | #define TM_CONN_NUM 1024 | 897 | #define TM_CONN_NUM 1024 |
801 | #define TM_ILT_SZ (8 * TM_CONN_NUM) | 898 | #define TM_ILT_SZ (8 * TM_CONN_NUM) |
802 | #define TM_ILT_LINES DIV_ROUND_UP(TM_ILT_SZ, TM_ILT_PAGE_SZ) | 899 | #define TM_ILT_LINES DIV_ROUND_UP(TM_ILT_SZ, TM_ILT_PAGE_SZ) |
803 | 900 | ||
804 | /* SRC (Searcher) host DB constants */ | 901 | /* SRC (Searcher) host DB constants */ |
805 | #define SRC_ILT_PAGE_SZ_HW 3 | 902 | #define SRC_ILT_PAGE_SZ_HW 0 |
806 | #define SRC_ILT_PAGE_SZ (4096 << SRC_ILT_PAGE_SZ_HW) /* 32K */ | 903 | #define SRC_ILT_PAGE_SZ (4096 << SRC_ILT_PAGE_SZ_HW) /* 4K */ |
807 | #define SRC_HASH_BITS 10 | 904 | #define SRC_HASH_BITS 10 |
808 | #define SRC_CONN_NUM (1 << SRC_HASH_BITS) /* 1024 */ | 905 | #define SRC_CONN_NUM (1 << SRC_HASH_BITS) /* 1024 */ |
809 | #define SRC_ILT_SZ (sizeof(struct src_ent) * SRC_CONN_NUM) | 906 | #define SRC_ILT_SZ (sizeof(struct src_ent) * SRC_CONN_NUM) |
810 | #define SRC_T2_SZ SRC_ILT_SZ | 907 | #define SRC_T2_SZ SRC_ILT_SZ |
811 | #define SRC_ILT_LINES DIV_ROUND_UP(SRC_ILT_SZ, SRC_ILT_PAGE_SZ) | 908 | #define SRC_ILT_LINES DIV_ROUND_UP(SRC_ILT_SZ, SRC_ILT_PAGE_SZ) |
909 | |||
812 | #endif | 910 | #endif |
813 | 911 | ||
814 | #define MAX_DMAE_C 8 | 912 | #define MAX_DMAE_C 8 |
815 | 913 | ||
816 | /* DMA memory not used in fastpath */ | 914 | /* DMA memory not used in fastpath */ |
817 | struct bnx2x_slowpath { | 915 | struct bnx2x_slowpath { |
818 | struct eth_stats_query fw_stats; | 916 | union { |
819 | struct mac_configuration_cmd mac_config; | 917 | struct mac_configuration_cmd e1x; |
820 | struct mac_configuration_cmd mcast_config; | 918 | struct eth_classify_rules_ramrod_data e2; |
821 | struct mac_configuration_cmd uc_mac_config; | 919 | } mac_rdata; |
822 | struct client_init_ramrod_data client_init_data; | 920 | |
921 | |||
922 | union { | ||
923 | struct tstorm_eth_mac_filter_config e1x; | ||
924 | struct eth_filter_rules_ramrod_data e2; | ||
925 | } rx_mode_rdata; | ||
926 | |||
927 | union { | ||
928 | struct mac_configuration_cmd e1; | ||
929 | struct eth_multicast_rules_ramrod_data e2; | ||
930 | } mcast_rdata; | ||
931 | |||
932 | struct eth_rss_update_ramrod_data rss_rdata; | ||
933 | |||
934 | /* Queue State related ramrods are always sent under rtnl_lock */ | ||
935 | union { | ||
936 | struct client_init_ramrod_data init_data; | ||
937 | struct client_update_ramrod_data update_data; | ||
938 | } q_rdata; | ||
939 | |||
940 | union { | ||
941 | struct function_start_data func_start; | ||
942 | } func_rdata; | ||
823 | 943 | ||
824 | /* used by dmae command executer */ | 944 | /* used by dmae command executer */ |
825 | struct dmae_command dmae[MAX_DMAE_C]; | 945 | struct dmae_command dmae[MAX_DMAE_C]; |
@@ -846,7 +966,7 @@ struct bnx2x_slowpath { | |||
846 | #define MAX_DYNAMIC_ATTN_GRPS 8 | 966 | #define MAX_DYNAMIC_ATTN_GRPS 8 |
847 | 967 | ||
848 | struct attn_route { | 968 | struct attn_route { |
849 | u32 sig[5]; | 969 | u32 sig[5]; |
850 | }; | 970 | }; |
851 | 971 | ||
852 | struct iro { | 972 | struct iro { |
@@ -872,7 +992,7 @@ typedef enum { | |||
872 | BNX2X_RECOVERY_WAIT, | 992 | BNX2X_RECOVERY_WAIT, |
873 | } bnx2x_recovery_state_t; | 993 | } bnx2x_recovery_state_t; |
874 | 994 | ||
875 | /** | 995 | /* |
876 | * Event queue (EQ or event ring) MC hsi | 996 | * Event queue (EQ or event ring) MC hsi |
877 | * NUM_EQ_PAGES and EQ_DESC_CNT_PAGE must be power of 2 | 997 | * NUM_EQ_PAGES and EQ_DESC_CNT_PAGE must be power of 2 |
878 | */ | 998 | */ |
@@ -910,6 +1030,24 @@ enum { | |||
910 | BNX2X_LINK_REPORT_TX_FC_ON, | 1030 | BNX2X_LINK_REPORT_TX_FC_ON, |
911 | }; | 1031 | }; |
912 | 1032 | ||
1033 | enum { | ||
1034 | BNX2X_PORT_QUERY_IDX, | ||
1035 | BNX2X_PF_QUERY_IDX, | ||
1036 | BNX2X_FIRST_QUEUE_QUERY_IDX, | ||
1037 | }; | ||
1038 | |||
1039 | struct bnx2x_fw_stats_req { | ||
1040 | struct stats_query_header hdr; | ||
1041 | struct stats_query_entry query[STATS_QUERY_CMD_COUNT]; | ||
1042 | }; | ||
1043 | |||
1044 | struct bnx2x_fw_stats_data { | ||
1045 | struct stats_counter storm_counters; | ||
1046 | struct per_port_stats port; | ||
1047 | struct per_pf_stats pf; | ||
1048 | struct per_queue_stats queue_stats[1]; | ||
1049 | }; | ||
1050 | |||
913 | struct bnx2x { | 1051 | struct bnx2x { |
914 | /* Fields used in the tx and intr/napi performance paths | 1052 | /* Fields used in the tx and intr/napi performance paths |
915 | * are grouped together in the beginning of the structure | 1053 | * are grouped together in the beginning of the structure |
@@ -919,10 +1057,23 @@ struct bnx2x { | |||
919 | void __iomem *doorbells; | 1057 | void __iomem *doorbells; |
920 | u16 db_size; | 1058 | u16 db_size; |
921 | 1059 | ||
1060 | u8 pf_num; /* absolute PF number */ | ||
1061 | u8 pfid; /* per-path PF number */ | ||
1062 | int base_fw_ndsb; /**/ | ||
1063 | #define BP_PATH(bp) (CHIP_IS_E1x(bp) ? 0 : (bp->pf_num & 1)) | ||
1064 | #define BP_PORT(bp) (bp->pfid & 1) | ||
1065 | #define BP_FUNC(bp) (bp->pfid) | ||
1066 | #define BP_ABS_FUNC(bp) (bp->pf_num) | ||
1067 | #define BP_E1HVN(bp) (bp->pfid >> 1) | ||
1068 | #define BP_VN(bp) (BP_E1HVN(bp)) /*remove when approved*/ | ||
1069 | #define BP_L_ID(bp) (BP_E1HVN(bp) << 2) | ||
1070 | #define BP_FW_MB_IDX(bp) (BP_PORT(bp) +\ | ||
1071 | BP_VN(bp) * (CHIP_IS_E1x(bp) ? 2 : 1)) | ||
1072 | |||
922 | struct net_device *dev; | 1073 | struct net_device *dev; |
923 | struct pci_dev *pdev; | 1074 | struct pci_dev *pdev; |
924 | 1075 | ||
925 | struct iro *iro_arr; | 1076 | const struct iro *iro_arr; |
926 | #define IRO (bp->iro_arr) | 1077 | #define IRO (bp->iro_arr) |
927 | 1078 | ||
928 | bnx2x_recovery_state_t recovery_state; | 1079 | bnx2x_recovery_state_t recovery_state; |
@@ -940,7 +1091,8 @@ struct bnx2x { | |||
940 | /* Max supported alignment is 256 (8 shift) */ | 1091 | /* Max supported alignment is 256 (8 shift) */ |
941 | #define BNX2X_RX_ALIGN_SHIFT ((L1_CACHE_SHIFT < 8) ? \ | 1092 | #define BNX2X_RX_ALIGN_SHIFT ((L1_CACHE_SHIFT < 8) ? \ |
942 | L1_CACHE_SHIFT : 8) | 1093 | L1_CACHE_SHIFT : 8) |
943 | #define BNX2X_RX_ALIGN (1 << BNX2X_RX_ALIGN_SHIFT) | 1094 | /* FW use 2 Cache lines Alignment for start packet and size */ |
1095 | #define BNX2X_FW_RX_ALIGN (2 << BNX2X_RX_ALIGN_SHIFT) | ||
944 | #define BNX2X_PXP_DRAM_ALIGN (BNX2X_RX_ALIGN_SHIFT - 5) | 1096 | #define BNX2X_PXP_DRAM_ALIGN (BNX2X_RX_ALIGN_SHIFT - 5) |
945 | 1097 | ||
946 | struct host_sp_status_block *def_status_blk; | 1098 | struct host_sp_status_block *def_status_blk; |
@@ -970,10 +1122,12 @@ struct bnx2x { | |||
970 | __le16 *eq_cons_sb; | 1122 | __le16 *eq_cons_sb; |
971 | atomic_t eq_spq_left; /* COMMON_XXX ramrods credit */ | 1123 | atomic_t eq_spq_left; /* COMMON_XXX ramrods credit */ |
972 | 1124 | ||
973 | /* Flags for marking that there is a STAT_QUERY or | 1125 | |
974 | SET_MAC ramrod pending */ | 1126 | |
975 | int stats_pending; | 1127 | /* Counter for marking that there is a STAT_QUERY ramrod pending */ |
976 | int set_mac_pending; | 1128 | u16 stats_pending; |
1129 | /* Counter for completed statistics ramrods */ | ||
1130 | u16 stats_comp; | ||
977 | 1131 | ||
978 | /* End of fields used in the performance code paths */ | 1132 | /* End of fields used in the performance code paths */ |
979 | 1133 | ||
@@ -981,47 +1135,27 @@ struct bnx2x { | |||
981 | int msg_enable; | 1135 | int msg_enable; |
982 | 1136 | ||
983 | u32 flags; | 1137 | u32 flags; |
984 | #define PCIX_FLAG 1 | 1138 | #define PCIX_FLAG (1 << 0) |
985 | #define PCI_32BIT_FLAG 2 | 1139 | #define PCI_32BIT_FLAG (1 << 1) |
986 | #define ONE_PORT_FLAG 4 | 1140 | #define ONE_PORT_FLAG (1 << 2) |
987 | #define NO_WOL_FLAG 8 | 1141 | #define NO_WOL_FLAG (1 << 3) |
988 | #define USING_DAC_FLAG 0x10 | 1142 | #define USING_DAC_FLAG (1 << 4) |
989 | #define USING_MSIX_FLAG 0x20 | 1143 | #define USING_MSIX_FLAG (1 << 5) |
990 | #define USING_MSI_FLAG 0x40 | 1144 | #define USING_MSI_FLAG (1 << 6) |
991 | 1145 | #define DISABLE_MSI_FLAG (1 << 7) | |
992 | #define TPA_ENABLE_FLAG 0x80 | 1146 | #define TPA_ENABLE_FLAG (1 << 8) |
993 | #define NO_MCP_FLAG 0x100 | 1147 | #define NO_MCP_FLAG (1 << 9) |
994 | #define DISABLE_MSI_FLAG 0x200 | 1148 | |
995 | #define BP_NOMCP(bp) (bp->flags & NO_MCP_FLAG) | 1149 | #define BP_NOMCP(bp) (bp->flags & NO_MCP_FLAG) |
996 | #define MF_FUNC_DIS 0x1000 | 1150 | #define MF_FUNC_DIS (1 << 11) |
997 | #define FCOE_MACS_SET 0x2000 | 1151 | #define OWN_CNIC_IRQ (1 << 12) |
998 | #define NO_FCOE_FLAG 0x4000 | 1152 | #define NO_ISCSI_OOO_FLAG (1 << 13) |
999 | #define NO_ISCSI_OOO_FLAG 0x8000 | 1153 | #define NO_ISCSI_FLAG (1 << 14) |
1000 | #define NO_ISCSI_FLAG 0x10000 | 1154 | #define NO_FCOE_FLAG (1 << 15) |
1001 | 1155 | ||
1002 | #define NO_FCOE(bp) ((bp)->flags & NO_FCOE_FLAG) | ||
1003 | #define NO_ISCSI(bp) ((bp)->flags & NO_ISCSI_FLAG) | 1156 | #define NO_ISCSI(bp) ((bp)->flags & NO_ISCSI_FLAG) |
1004 | #define NO_ISCSI_OOO(bp) ((bp)->flags & NO_ISCSI_OOO_FLAG) | 1157 | #define NO_ISCSI_OOO(bp) ((bp)->flags & NO_ISCSI_OOO_FLAG) |
1005 | 1158 | #define NO_FCOE(bp) ((bp)->flags & NO_FCOE_FLAG) | |
1006 | int pf_num; /* absolute PF number */ | ||
1007 | int pfid; /* per-path PF number */ | ||
1008 | int base_fw_ndsb; | ||
1009 | #define BP_PATH(bp) (!CHIP_IS_E2(bp) ? \ | ||
1010 | 0 : (bp->pf_num & 1)) | ||
1011 | #define BP_PORT(bp) (bp->pfid & 1) | ||
1012 | #define BP_FUNC(bp) (bp->pfid) | ||
1013 | #define BP_ABS_FUNC(bp) (bp->pf_num) | ||
1014 | #define BP_E1HVN(bp) (bp->pfid >> 1) | ||
1015 | #define BP_VN(bp) (CHIP_MODE_IS_4_PORT(bp) ? \ | ||
1016 | 0 : BP_E1HVN(bp)) | ||
1017 | #define BP_L_ID(bp) (BP_E1HVN(bp) << 2) | ||
1018 | #define BP_FW_MB_IDX(bp) (BP_PORT(bp) +\ | ||
1019 | BP_VN(bp) * (CHIP_IS_E1x(bp) ? 2 : 1)) | ||
1020 | |||
1021 | #ifdef BCM_CNIC | ||
1022 | #define BCM_CNIC_CID_START 16 | ||
1023 | #define BCM_ISCSI_ETH_CL_ID 17 | ||
1024 | #endif | ||
1025 | 1159 | ||
1026 | int pm_cap; | 1160 | int pm_cap; |
1027 | int pcie_cap; | 1161 | int pcie_cap; |
@@ -1048,9 +1182,9 @@ struct bnx2x { | |||
1048 | 1182 | ||
1049 | struct cmng_struct_per_port cmng; | 1183 | struct cmng_struct_per_port cmng; |
1050 | u32 vn_weight_sum; | 1184 | u32 vn_weight_sum; |
1051 | |||
1052 | u32 mf_config[E1HVN_MAX]; | 1185 | u32 mf_config[E1HVN_MAX]; |
1053 | u32 mf2_config[E2_FUNC_MAX]; | 1186 | u32 mf2_config[E2_FUNC_MAX]; |
1187 | u32 path_has_ovlan; /* E3 */ | ||
1054 | u16 mf_ov; | 1188 | u16 mf_ov; |
1055 | u8 mf_mode; | 1189 | u8 mf_mode; |
1056 | #define IS_MF(bp) (bp->mf_mode != 0) | 1190 | #define IS_MF(bp) (bp->mf_mode != 0) |
@@ -1075,32 +1209,20 @@ struct bnx2x { | |||
1075 | 1209 | ||
1076 | u32 lin_cnt; | 1210 | u32 lin_cnt; |
1077 | 1211 | ||
1078 | int state; | 1212 | u16 state; |
1079 | #define BNX2X_STATE_CLOSED 0 | 1213 | #define BNX2X_STATE_CLOSED 0 |
1080 | #define BNX2X_STATE_OPENING_WAIT4_LOAD 0x1000 | 1214 | #define BNX2X_STATE_OPENING_WAIT4_LOAD 0x1000 |
1081 | #define BNX2X_STATE_OPENING_WAIT4_PORT 0x2000 | 1215 | #define BNX2X_STATE_OPENING_WAIT4_PORT 0x2000 |
1082 | #define BNX2X_STATE_OPEN 0x3000 | 1216 | #define BNX2X_STATE_OPEN 0x3000 |
1083 | #define BNX2X_STATE_CLOSING_WAIT4_HALT 0x4000 | 1217 | #define BNX2X_STATE_CLOSING_WAIT4_HALT 0x4000 |
1084 | #define BNX2X_STATE_CLOSING_WAIT4_DELETE 0x5000 | 1218 | #define BNX2X_STATE_CLOSING_WAIT4_DELETE 0x5000 |
1085 | #define BNX2X_STATE_CLOSING_WAIT4_UNLOAD 0x6000 | 1219 | |
1086 | #define BNX2X_STATE_FUNC_STARTED 0x7000 | ||
1087 | #define BNX2X_STATE_DIAG 0xe000 | 1220 | #define BNX2X_STATE_DIAG 0xe000 |
1088 | #define BNX2X_STATE_ERROR 0xf000 | 1221 | #define BNX2X_STATE_ERROR 0xf000 |
1089 | 1222 | ||
1090 | int multi_mode; | 1223 | int multi_mode; |
1091 | int num_queues; | 1224 | int num_queues; |
1092 | int disable_tpa; | 1225 | int disable_tpa; |
1093 | u32 *rx_indir_table; | ||
1094 | |||
1095 | struct tstorm_eth_mac_filter_config mac_filters; | ||
1096 | #define BNX2X_ACCEPT_NONE 0x0000 | ||
1097 | #define BNX2X_ACCEPT_UNICAST 0x0001 | ||
1098 | #define BNX2X_ACCEPT_MULTICAST 0x0002 | ||
1099 | #define BNX2X_ACCEPT_ALL_UNICAST 0x0004 | ||
1100 | #define BNX2X_ACCEPT_ALL_MULTICAST 0x0008 | ||
1101 | #define BNX2X_ACCEPT_BROADCAST 0x0010 | ||
1102 | #define BNX2X_ACCEPT_UNMATCHED_UCAST 0x0020 | ||
1103 | #define BNX2X_PROMISCUOUS_MODE 0x10000 | ||
1104 | 1226 | ||
1105 | u32 rx_mode; | 1227 | u32 rx_mode; |
1106 | #define BNX2X_RX_MODE_NONE 0 | 1228 | #define BNX2X_RX_MODE_NONE 0 |
@@ -1108,7 +1230,6 @@ struct bnx2x { | |||
1108 | #define BNX2X_RX_MODE_ALLMULTI 2 | 1230 | #define BNX2X_RX_MODE_ALLMULTI 2 |
1109 | #define BNX2X_RX_MODE_PROMISC 3 | 1231 | #define BNX2X_RX_MODE_PROMISC 3 |
1110 | #define BNX2X_MAX_MULTICAST 64 | 1232 | #define BNX2X_MAX_MULTICAST 64 |
1111 | #define BNX2X_MAX_EMUL_MULTI 16 | ||
1112 | 1233 | ||
1113 | u8 igu_dsb_id; | 1234 | u8 igu_dsb_id; |
1114 | u8 igu_base_sb; | 1235 | u8 igu_base_sb; |
@@ -1117,11 +1238,38 @@ struct bnx2x { | |||
1117 | 1238 | ||
1118 | struct bnx2x_slowpath *slowpath; | 1239 | struct bnx2x_slowpath *slowpath; |
1119 | dma_addr_t slowpath_mapping; | 1240 | dma_addr_t slowpath_mapping; |
1241 | |||
1242 | /* Total number of FW statistics requests */ | ||
1243 | u8 fw_stats_num; | ||
1244 | |||
1245 | /* | ||
1246 | * This is a memory buffer that will contain both statistics | ||
1247 | * ramrod request and data. | ||
1248 | */ | ||
1249 | void *fw_stats; | ||
1250 | dma_addr_t fw_stats_mapping; | ||
1251 | |||
1252 | /* | ||
1253 | * FW statistics request shortcut (points at the | ||
1254 | * beginning of fw_stats buffer). | ||
1255 | */ | ||
1256 | struct bnx2x_fw_stats_req *fw_stats_req; | ||
1257 | dma_addr_t fw_stats_req_mapping; | ||
1258 | int fw_stats_req_sz; | ||
1259 | |||
1260 | /* | ||
1261 | * FW statistics data shortcut (points at the begining of | ||
1262 | * fw_stats buffer + fw_stats_req_sz). | ||
1263 | */ | ||
1264 | struct bnx2x_fw_stats_data *fw_stats_data; | ||
1265 | dma_addr_t fw_stats_data_mapping; | ||
1266 | int fw_stats_data_sz; | ||
1267 | |||
1120 | struct hw_context context; | 1268 | struct hw_context context; |
1121 | 1269 | ||
1122 | struct bnx2x_ilt *ilt; | 1270 | struct bnx2x_ilt *ilt; |
1123 | #define BP_ILT(bp) ((bp)->ilt) | 1271 | #define BP_ILT(bp) ((bp)->ilt) |
1124 | #define ILT_MAX_LINES 128 | 1272 | #define ILT_MAX_LINES 256 |
1125 | 1273 | ||
1126 | int l2_cid_count; | 1274 | int l2_cid_count; |
1127 | #define L2_ILT_LINES(bp) (DIV_ROUND_UP((bp)->l2_cid_count, \ | 1275 | #define L2_ILT_LINES(bp) (DIV_ROUND_UP((bp)->l2_cid_count, \ |
@@ -1143,16 +1291,18 @@ struct bnx2x { | |||
1143 | struct cnic_eth_dev cnic_eth_dev; | 1291 | struct cnic_eth_dev cnic_eth_dev; |
1144 | union host_hc_status_block cnic_sb; | 1292 | union host_hc_status_block cnic_sb; |
1145 | dma_addr_t cnic_sb_mapping; | 1293 | dma_addr_t cnic_sb_mapping; |
1146 | #define CNIC_SB_ID(bp) ((bp)->base_fw_ndsb + BP_L_ID(bp)) | ||
1147 | #define CNIC_IGU_SB_ID(bp) ((bp)->igu_base_sb) | ||
1148 | struct eth_spe *cnic_kwq; | 1294 | struct eth_spe *cnic_kwq; |
1149 | struct eth_spe *cnic_kwq_prod; | 1295 | struct eth_spe *cnic_kwq_prod; |
1150 | struct eth_spe *cnic_kwq_cons; | 1296 | struct eth_spe *cnic_kwq_cons; |
1151 | struct eth_spe *cnic_kwq_last; | 1297 | struct eth_spe *cnic_kwq_last; |
1152 | u16 cnic_kwq_pending; | 1298 | u16 cnic_kwq_pending; |
1153 | u16 cnic_spq_pending; | 1299 | u16 cnic_spq_pending; |
1154 | struct mutex cnic_mutex; | ||
1155 | u8 fip_mac[ETH_ALEN]; | 1300 | u8 fip_mac[ETH_ALEN]; |
1301 | struct mutex cnic_mutex; | ||
1302 | struct bnx2x_vlan_mac_obj iscsi_l2_mac_obj; | ||
1303 | |||
1304 | /* Start index of the "special" (CNIC related) L2 cleints */ | ||
1305 | u8 cnic_base_cl_id; | ||
1156 | #endif | 1306 | #endif |
1157 | 1307 | ||
1158 | int dmae_ready; | 1308 | int dmae_ready; |
@@ -1189,6 +1339,8 @@ struct bnx2x { | |||
1189 | u16 *init_ops_offsets; | 1339 | u16 *init_ops_offsets; |
1190 | /* Data blob - has 32 bit granularity */ | 1340 | /* Data blob - has 32 bit granularity */ |
1191 | u32 *init_data; | 1341 | u32 *init_data; |
1342 | u32 init_mode_flags; | ||
1343 | #define INIT_MODE_FLAGS(bp) (bp->init_mode_flags) | ||
1192 | /* Zipped PRAM blobs - raw data */ | 1344 | /* Zipped PRAM blobs - raw data */ |
1193 | const u8 *tsem_int_table_data; | 1345 | const u8 *tsem_int_table_data; |
1194 | const u8 *tsem_pram_data; | 1346 | const u8 *tsem_pram_data; |
@@ -1210,8 +1362,10 @@ struct bnx2x { | |||
1210 | #define INIT_CSEM_INT_TABLE_DATA(bp) (bp->csem_int_table_data) | 1362 | #define INIT_CSEM_INT_TABLE_DATA(bp) (bp->csem_int_table_data) |
1211 | #define INIT_CSEM_PRAM_DATA(bp) (bp->csem_pram_data) | 1363 | #define INIT_CSEM_PRAM_DATA(bp) (bp->csem_pram_data) |
1212 | 1364 | ||
1365 | #define PHY_FW_VER_LEN 20 | ||
1213 | char fw_ver[32]; | 1366 | char fw_ver[32]; |
1214 | const struct firmware *firmware; | 1367 | const struct firmware *firmware; |
1368 | |||
1215 | /* LLDP params */ | 1369 | /* LLDP params */ |
1216 | struct bnx2x_config_lldp_params lldp_config_params; | 1370 | struct bnx2x_config_lldp_params lldp_config_params; |
1217 | 1371 | ||
@@ -1230,13 +1384,30 @@ struct bnx2x { | |||
1230 | bool dcbx_mode_uset; | 1384 | bool dcbx_mode_uset; |
1231 | 1385 | ||
1232 | struct bnx2x_config_dcbx_params dcbx_config_params; | 1386 | struct bnx2x_config_dcbx_params dcbx_config_params; |
1233 | |||
1234 | struct bnx2x_dcbx_port_params dcbx_port_params; | 1387 | struct bnx2x_dcbx_port_params dcbx_port_params; |
1235 | int dcb_version; | 1388 | int dcb_version; |
1236 | 1389 | ||
1237 | /* DCBX Negotiation results */ | 1390 | /* CAM credit pools */ |
1391 | struct bnx2x_credit_pool_obj macs_pool; | ||
1392 | |||
1393 | /* RX_MODE object */ | ||
1394 | struct bnx2x_rx_mode_obj rx_mode_obj; | ||
1395 | |||
1396 | /* MCAST object */ | ||
1397 | struct bnx2x_mcast_obj mcast_obj; | ||
1398 | |||
1399 | /* RSS configuration object */ | ||
1400 | struct bnx2x_rss_config_obj rss_conf_obj; | ||
1401 | |||
1402 | /* Function State controlling object */ | ||
1403 | struct bnx2x_func_sp_obj func_obj; | ||
1404 | |||
1405 | unsigned long sp_state; | ||
1406 | |||
1407 | /* DCBX Negotation results */ | ||
1238 | struct dcbx_features dcbx_local_feat; | 1408 | struct dcbx_features dcbx_local_feat; |
1239 | u32 dcbx_error; | 1409 | u32 dcbx_error; |
1410 | |||
1240 | #ifdef BCM_DCBNL | 1411 | #ifdef BCM_DCBNL |
1241 | struct dcbx_features dcbx_remote_feat; | 1412 | struct dcbx_features dcbx_remote_feat; |
1242 | u32 dcbx_remote_flags; | 1413 | u32 dcbx_remote_flags; |
@@ -1244,42 +1415,11 @@ struct bnx2x { | |||
1244 | u32 pending_max; | 1415 | u32 pending_max; |
1245 | }; | 1416 | }; |
1246 | 1417 | ||
1247 | /** | 1418 | /* Tx queues may be less or equal to Rx queues */ |
1248 | * Init queue/func interface | 1419 | extern int num_queues; |
1249 | */ | ||
1250 | /* queue init flags */ | ||
1251 | #define QUEUE_FLG_TPA 0x0001 | ||
1252 | #define QUEUE_FLG_CACHE_ALIGN 0x0002 | ||
1253 | #define QUEUE_FLG_STATS 0x0004 | ||
1254 | #define QUEUE_FLG_OV 0x0008 | ||
1255 | #define QUEUE_FLG_VLAN 0x0010 | ||
1256 | #define QUEUE_FLG_COS 0x0020 | ||
1257 | #define QUEUE_FLG_HC 0x0040 | ||
1258 | #define QUEUE_FLG_DHC 0x0080 | ||
1259 | #define QUEUE_FLG_OOO 0x0100 | ||
1260 | |||
1261 | #define QUEUE_DROP_IP_CS_ERR TSTORM_ETH_CLIENT_CONFIG_DROP_IP_CS_ERR | ||
1262 | #define QUEUE_DROP_TCP_CS_ERR TSTORM_ETH_CLIENT_CONFIG_DROP_TCP_CS_ERR | ||
1263 | #define QUEUE_DROP_TTL0 TSTORM_ETH_CLIENT_CONFIG_DROP_TTL0 | ||
1264 | #define QUEUE_DROP_UDP_CS_ERR TSTORM_ETH_CLIENT_CONFIG_DROP_UDP_CS_ERR | ||
1265 | |||
1266 | |||
1267 | |||
1268 | /* rss capabilities */ | ||
1269 | #define RSS_IPV4_CAP 0x0001 | ||
1270 | #define RSS_IPV4_TCP_CAP 0x0002 | ||
1271 | #define RSS_IPV6_CAP 0x0004 | ||
1272 | #define RSS_IPV6_TCP_CAP 0x0008 | ||
1273 | |||
1274 | #define BNX2X_NUM_QUEUES(bp) (bp->num_queues) | 1420 | #define BNX2X_NUM_QUEUES(bp) (bp->num_queues) |
1275 | #define BNX2X_NUM_ETH_QUEUES(bp) (BNX2X_NUM_QUEUES(bp) - NONE_ETH_CONTEXT_USE) | 1421 | #define BNX2X_NUM_ETH_QUEUES(bp) (BNX2X_NUM_QUEUES(bp) - NONE_ETH_CONTEXT_USE) |
1276 | 1422 | ||
1277 | /* ethtool statistics are displayed for all regular ethernet queues and the | ||
1278 | * fcoe L2 queue if not disabled | ||
1279 | */ | ||
1280 | #define BNX2X_NUM_STAT_QUEUES(bp) (NO_FCOE(bp) ? BNX2X_NUM_ETH_QUEUES(bp) : \ | ||
1281 | (BNX2X_NUM_ETH_QUEUES(bp) + FCOE_CONTEXT_USE)) | ||
1282 | |||
1283 | #define is_multi(bp) (BNX2X_NUM_QUEUES(bp) > 1) | 1423 | #define is_multi(bp) (BNX2X_NUM_QUEUES(bp) > 1) |
1284 | 1424 | ||
1285 | #define BNX2X_MAX_QUEUES(bp) (bp->igu_sb_cnt - CNIC_CONTEXT_USE) | 1425 | #define BNX2X_MAX_QUEUES(bp) (bp->igu_sb_cnt - CNIC_CONTEXT_USE) |
@@ -1297,107 +1437,15 @@ struct bnx2x { | |||
1297 | TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_TCP_CAPABILITY | 1437 | TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_TCP_CAPABILITY |
1298 | 1438 | ||
1299 | /* func init flags */ | 1439 | /* func init flags */ |
1300 | #define FUNC_FLG_STATS 0x0001 | 1440 | #define FUNC_FLG_RSS 0x0001 |
1301 | #define FUNC_FLG_TPA 0x0002 | 1441 | #define FUNC_FLG_STATS 0x0002 |
1302 | #define FUNC_FLG_SPQ 0x0004 | 1442 | /* removed FUNC_FLG_UNMATCHED 0x0004 */ |
1303 | #define FUNC_FLG_LEADING 0x0008 /* PF only */ | 1443 | #define FUNC_FLG_TPA 0x0008 |
1304 | 1444 | #define FUNC_FLG_SPQ 0x0010 | |
1305 | struct rxq_pause_params { | 1445 | #define FUNC_FLG_LEADING 0x0020 /* PF only */ |
1306 | u16 bd_th_lo; | ||
1307 | u16 bd_th_hi; | ||
1308 | u16 rcq_th_lo; | ||
1309 | u16 rcq_th_hi; | ||
1310 | u16 sge_th_lo; /* valid iff QUEUE_FLG_TPA */ | ||
1311 | u16 sge_th_hi; /* valid iff QUEUE_FLG_TPA */ | ||
1312 | u16 pri_map; | ||
1313 | }; | ||
1314 | |||
1315 | struct bnx2x_rxq_init_params { | ||
1316 | /* cxt*/ | ||
1317 | struct eth_context *cxt; | ||
1318 | |||
1319 | /* dma */ | ||
1320 | dma_addr_t dscr_map; | ||
1321 | dma_addr_t sge_map; | ||
1322 | dma_addr_t rcq_map; | ||
1323 | dma_addr_t rcq_np_map; | ||
1324 | |||
1325 | u16 flags; | ||
1326 | u16 drop_flags; | ||
1327 | u16 mtu; | ||
1328 | u16 buf_sz; | ||
1329 | u16 fw_sb_id; | ||
1330 | u16 cl_id; | ||
1331 | u16 spcl_id; | ||
1332 | u16 cl_qzone_id; | ||
1333 | |||
1334 | /* valid iff QUEUE_FLG_STATS */ | ||
1335 | u16 stat_id; | ||
1336 | |||
1337 | /* valid iff QUEUE_FLG_TPA */ | ||
1338 | u16 tpa_agg_sz; | ||
1339 | u16 sge_buf_sz; | ||
1340 | u16 max_sges_pkt; | ||
1341 | |||
1342 | /* valid iff QUEUE_FLG_CACHE_ALIGN */ | ||
1343 | u8 cache_line_log; | ||
1344 | |||
1345 | u8 sb_cq_index; | ||
1346 | u32 cid; | ||
1347 | |||
1348 | /* desired interrupts per sec. valid iff QUEUE_FLG_HC */ | ||
1349 | u32 hc_rate; | ||
1350 | }; | ||
1351 | |||
1352 | struct bnx2x_txq_init_params { | ||
1353 | /* cxt*/ | ||
1354 | struct eth_context *cxt; | ||
1355 | 1446 | ||
1356 | /* dma */ | ||
1357 | dma_addr_t dscr_map; | ||
1358 | |||
1359 | u16 flags; | ||
1360 | u16 fw_sb_id; | ||
1361 | u8 sb_cq_index; | ||
1362 | u8 cos; /* valid iff QUEUE_FLG_COS */ | ||
1363 | u16 stat_id; /* valid iff QUEUE_FLG_STATS */ | ||
1364 | u16 traffic_type; | ||
1365 | u32 cid; | ||
1366 | u16 hc_rate; /* desired interrupts per sec.*/ | ||
1367 | /* valid iff QUEUE_FLG_HC */ | ||
1368 | |||
1369 | }; | ||
1370 | |||
1371 | struct bnx2x_client_ramrod_params { | ||
1372 | int *pstate; | ||
1373 | int state; | ||
1374 | u16 index; | ||
1375 | u16 cl_id; | ||
1376 | u32 cid; | ||
1377 | u8 poll; | ||
1378 | #define CLIENT_IS_FCOE 0x01 | ||
1379 | #define CLIENT_IS_LEADING_RSS 0x02 | ||
1380 | u8 flags; | ||
1381 | }; | ||
1382 | |||
1383 | struct bnx2x_client_init_params { | ||
1384 | struct rxq_pause_params pause; | ||
1385 | struct bnx2x_rxq_init_params rxq_params; | ||
1386 | struct bnx2x_txq_init_params txq_params; | ||
1387 | struct bnx2x_client_ramrod_params ramrod_params; | ||
1388 | }; | ||
1389 | |||
1390 | struct bnx2x_rss_params { | ||
1391 | int mode; | ||
1392 | u16 cap; | ||
1393 | u16 result_mask; | ||
1394 | }; | ||
1395 | 1447 | ||
1396 | struct bnx2x_func_init_params { | 1448 | struct bnx2x_func_init_params { |
1397 | |||
1398 | /* rss */ | ||
1399 | struct bnx2x_rss_params *rss; /* valid iff FUNC_FLG_RSS */ | ||
1400 | |||
1401 | /* dma */ | 1449 | /* dma */ |
1402 | dma_addr_t fw_stat_map; /* valid iff FUNC_FLG_STATS */ | 1450 | dma_addr_t fw_stat_map; /* valid iff FUNC_FLG_STATS */ |
1403 | dma_addr_t spq_map; /* valid iff FUNC_FLG_SPQ */ | 1451 | dma_addr_t spq_map; /* valid iff FUNC_FLG_SPQ */ |
@@ -1409,17 +1457,10 @@ struct bnx2x_func_init_params { | |||
1409 | }; | 1457 | }; |
1410 | 1458 | ||
1411 | #define for_each_eth_queue(bp, var) \ | 1459 | #define for_each_eth_queue(bp, var) \ |
1412 | for (var = 0; var < BNX2X_NUM_ETH_QUEUES(bp); var++) | 1460 | for (var = 0; var < BNX2X_NUM_ETH_QUEUES(bp); var++) |
1413 | 1461 | ||
1414 | #define for_each_nondefault_eth_queue(bp, var) \ | 1462 | #define for_each_nondefault_eth_queue(bp, var) \ |
1415 | for (var = 1; var < BNX2X_NUM_ETH_QUEUES(bp); var++) | 1463 | for (var = 1; var < BNX2X_NUM_ETH_QUEUES(bp); var++) |
1416 | |||
1417 | #define for_each_napi_queue(bp, var) \ | ||
1418 | for (var = 0; \ | ||
1419 | var < BNX2X_NUM_ETH_QUEUES(bp) + FCOE_CONTEXT_USE; var++) \ | ||
1420 | if (skip_queue(bp, var)) \ | ||
1421 | continue; \ | ||
1422 | else | ||
1423 | 1464 | ||
1424 | #define for_each_queue(bp, var) \ | 1465 | #define for_each_queue(bp, var) \ |
1425 | for (var = 0; var < BNX2X_NUM_QUEUES(bp); var++) \ | 1466 | for (var = 0; var < BNX2X_NUM_QUEUES(bp); var++) \ |
@@ -1457,11 +1498,66 @@ struct bnx2x_func_init_params { | |||
1457 | 1498 | ||
1458 | #define skip_queue(bp, idx) (NO_FCOE(bp) && IS_FCOE_IDX(idx)) | 1499 | #define skip_queue(bp, idx) (NO_FCOE(bp) && IS_FCOE_IDX(idx)) |
1459 | 1500 | ||
1460 | #define WAIT_RAMROD_POLL 0x01 | ||
1461 | #define WAIT_RAMROD_COMMON 0x02 | ||
1462 | 1501 | ||
1502 | |||
1503 | |||
1504 | /** | ||
1505 | * bnx2x_set_mac_one - configure a single MAC address | ||
1506 | * | ||
1507 | * @bp: driver handle | ||
1508 | * @mac: MAC to configure | ||
1509 | * @obj: MAC object handle | ||
1510 | * @set: if 'true' add a new MAC, otherwise - delete | ||
1511 | * @mac_type: the type of the MAC to configure (e.g. ETH, UC list) | ||
1512 | * @ramrod_flags: RAMROD_XXX flags (e.g. RAMROD_CONT, RAMROD_COMP_WAIT) | ||
1513 | * | ||
1514 | * Configures one MAC according to provided parameters or continues the | ||
1515 | * execution of previously scheduled commands if RAMROD_CONT is set in | ||
1516 | * ramrod_flags. | ||
1517 | * | ||
1518 | * Returns zero if operation has successfully completed, a positive value if the | ||
1519 | * operation has been successfully scheduled and a negative - if a requested | ||
1520 | * operations has failed. | ||
1521 | */ | ||
1522 | int bnx2x_set_mac_one(struct bnx2x *bp, u8 *mac, | ||
1523 | struct bnx2x_vlan_mac_obj *obj, bool set, | ||
1524 | int mac_type, unsigned long *ramrod_flags); | ||
1525 | /** | ||
1526 | * Deletes all MACs configured for the specific MAC object. | ||
1527 | * | ||
1528 | * @param bp Function driver instance | ||
1529 | * @param mac_obj MAC object to cleanup | ||
1530 | * | ||
1531 | * @return zero if all MACs were cleaned | ||
1532 | */ | ||
1533 | |||
1534 | /** | ||
1535 | * bnx2x_del_all_macs - delete all MACs configured for the specific MAC object | ||
1536 | * | ||
1537 | * @bp: driver handle | ||
1538 | * @mac_obj: MAC object handle | ||
1539 | * @mac_type: type of the MACs to clear (BNX2X_XXX_MAC) | ||
1540 | * @wait_for_comp: if 'true' block until completion | ||
1541 | * | ||
1542 | * Deletes all MACs of the specific type (e.g. ETH, UC list). | ||
1543 | * | ||
1544 | * Returns zero if operation has successfully completed, a positive value if the | ||
1545 | * operation has been successfully scheduled and a negative - if a requested | ||
1546 | * operations has failed. | ||
1547 | */ | ||
1548 | int bnx2x_del_all_macs(struct bnx2x *bp, | ||
1549 | struct bnx2x_vlan_mac_obj *mac_obj, | ||
1550 | int mac_type, bool wait_for_comp); | ||
1551 | |||
1552 | /* Init Function API */ | ||
1553 | void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p); | ||
1554 | int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port); | ||
1555 | int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port); | ||
1556 | int bnx2x_set_mult_gpio(struct bnx2x *bp, u8 pins, u32 mode); | ||
1557 | int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port); | ||
1463 | void bnx2x_read_mf_cfg(struct bnx2x *bp); | 1558 | void bnx2x_read_mf_cfg(struct bnx2x *bp); |
1464 | 1559 | ||
1560 | |||
1465 | /* dmae */ | 1561 | /* dmae */ |
1466 | void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32); | 1562 | void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32); |
1467 | void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr, | 1563 | void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr, |
@@ -1472,21 +1568,10 @@ u32 bnx2x_dmae_opcode_clr_src_reset(u32 opcode); | |||
1472 | u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type, | 1568 | u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type, |
1473 | bool with_comp, u8 comp_type); | 1569 | bool with_comp, u8 comp_type); |
1474 | 1570 | ||
1475 | int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port); | ||
1476 | int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port); | ||
1477 | int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port); | ||
1478 | u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param); | ||
1479 | 1571 | ||
1480 | void bnx2x_calc_fc_adv(struct bnx2x *bp); | 1572 | void bnx2x_calc_fc_adv(struct bnx2x *bp); |
1481 | int bnx2x_sp_post(struct bnx2x *bp, int command, int cid, | 1573 | int bnx2x_sp_post(struct bnx2x *bp, int command, int cid, |
1482 | u32 data_hi, u32 data_lo, int common); | 1574 | u32 data_hi, u32 data_lo, int cmd_type); |
1483 | |||
1484 | /* Clears multicast and unicast list configuration in the chip. */ | ||
1485 | void bnx2x_invalidate_uc_list(struct bnx2x *bp); | ||
1486 | |||
1487 | int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx, | ||
1488 | int *state_p, int flags); | ||
1489 | |||
1490 | void bnx2x_update_coalesce(struct bnx2x *bp); | 1575 | void bnx2x_update_coalesce(struct bnx2x *bp); |
1491 | int bnx2x_get_cur_phy_idx(struct bnx2x *bp); | 1576 | int bnx2x_get_cur_phy_idx(struct bnx2x *bp); |
1492 | 1577 | ||
@@ -1644,7 +1729,8 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms, | |||
1644 | 1729 | ||
1645 | /* must be used on a CID before placing it on a HW ring */ | 1730 | /* must be used on a CID before placing it on a HW ring */ |
1646 | #define HW_CID(bp, x) ((BP_PORT(bp) << 23) | \ | 1731 | #define HW_CID(bp, x) ((BP_PORT(bp) << 23) | \ |
1647 | (BP_E1HVN(bp) << 17) | (x)) | 1732 | (BP_E1HVN(bp) << BNX2X_SWCID_SHIFT) | \ |
1733 | (x)) | ||
1648 | 1734 | ||
1649 | #define SP_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct eth_spe)) | 1735 | #define SP_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct eth_spe)) |
1650 | #define MAX_SP_DESC_CNT (SP_DESC_CNT - 1) | 1736 | #define MAX_SP_DESC_CNT (SP_DESC_CNT - 1) |
@@ -1771,6 +1857,30 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms, | |||
1771 | TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE_SHIFT)) | 1857 | TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE_SHIFT)) |
1772 | #define MULTI_MASK 0x7f | 1858 | #define MULTI_MASK 0x7f |
1773 | 1859 | ||
1860 | |||
1861 | #define DEF_USB_FUNC_OFF offsetof(struct cstorm_def_status_block_u, func) | ||
1862 | #define DEF_CSB_FUNC_OFF offsetof(struct cstorm_def_status_block_c, func) | ||
1863 | #define DEF_XSB_FUNC_OFF offsetof(struct xstorm_def_status_block, func) | ||
1864 | #define DEF_TSB_FUNC_OFF offsetof(struct tstorm_def_status_block, func) | ||
1865 | |||
1866 | #define DEF_USB_IGU_INDEX_OFF \ | ||
1867 | offsetof(struct cstorm_def_status_block_u, igu_index) | ||
1868 | #define DEF_CSB_IGU_INDEX_OFF \ | ||
1869 | offsetof(struct cstorm_def_status_block_c, igu_index) | ||
1870 | #define DEF_XSB_IGU_INDEX_OFF \ | ||
1871 | offsetof(struct xstorm_def_status_block, igu_index) | ||
1872 | #define DEF_TSB_IGU_INDEX_OFF \ | ||
1873 | offsetof(struct tstorm_def_status_block, igu_index) | ||
1874 | |||
1875 | #define DEF_USB_SEGMENT_OFF \ | ||
1876 | offsetof(struct cstorm_def_status_block_u, segment) | ||
1877 | #define DEF_CSB_SEGMENT_OFF \ | ||
1878 | offsetof(struct cstorm_def_status_block_c, segment) | ||
1879 | #define DEF_XSB_SEGMENT_OFF \ | ||
1880 | offsetof(struct xstorm_def_status_block, segment) | ||
1881 | #define DEF_TSB_SEGMENT_OFF \ | ||
1882 | offsetof(struct tstorm_def_status_block, segment) | ||
1883 | |||
1774 | #define BNX2X_SP_DSB_INDEX \ | 1884 | #define BNX2X_SP_DSB_INDEX \ |
1775 | (&bp->def_status_blk->sp_sb.\ | 1885 | (&bp->def_status_blk->sp_sb.\ |
1776 | index_values[HC_SP_INDEX_ETH_DEF_CONS]) | 1886 | index_values[HC_SP_INDEX_ETH_DEF_CONS]) |
@@ -1782,7 +1892,7 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms, | |||
1782 | } while (0) | 1892 | } while (0) |
1783 | 1893 | ||
1784 | #define GET_FLAG(value, mask) \ | 1894 | #define GET_FLAG(value, mask) \ |
1785 | (((value) &= (mask)) >> (mask##_SHIFT)) | 1895 | (((value) & (mask)) >> (mask##_SHIFT)) |
1786 | 1896 | ||
1787 | #define GET_FIELD(value, fname) \ | 1897 | #define GET_FIELD(value, fname) \ |
1788 | (((value) & (fname##_MASK)) >> (fname##_SHIFT)) | 1898 | (((value) & (fname##_MASK)) >> (fname##_SHIFT)) |
@@ -1817,14 +1927,12 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms, | |||
1817 | #define HC_SEG_ACCESS_ATTN 4 | 1927 | #define HC_SEG_ACCESS_ATTN 4 |
1818 | #define HC_SEG_ACCESS_NORM 0 /*Driver decision 0-1*/ | 1928 | #define HC_SEG_ACCESS_NORM 0 /*Driver decision 0-1*/ |
1819 | 1929 | ||
1820 | #ifdef BNX2X_MAIN | 1930 | static const u32 dmae_reg_go_c[] = { |
1821 | #define BNX2X_EXTERN | 1931 | DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3, |
1822 | #else | 1932 | DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7, |
1823 | #define BNX2X_EXTERN extern | 1933 | DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11, |
1824 | #endif | 1934 | DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15 |
1825 | 1935 | }; | |
1826 | BNX2X_EXTERN int load_count[2][3]; /* per path: 0-common, 1-port0, 2-port1 */ | ||
1827 | |||
1828 | extern void bnx2x_set_ethtool_ops(struct net_device *netdev); | ||
1829 | 1936 | ||
1937 | void bnx2x_set_ethtool_ops(struct net_device *netdev); | ||
1830 | #endif /* bnx2x.h */ | 1938 | #endif /* bnx2x.h */ |
diff --git a/drivers/net/bnx2x/bnx2x_cmn.c b/drivers/net/bnx2x/bnx2x_cmn.c index c72e1df04728..ebd8b1cdd58c 100644 --- a/drivers/net/bnx2x/bnx2x_cmn.c +++ b/drivers/net/bnx2x/bnx2x_cmn.c | |||
@@ -27,7 +27,7 @@ | |||
27 | #include "bnx2x_init.h" | 27 | #include "bnx2x_init.h" |
28 | #include "bnx2x_sp.h" | 28 | #include "bnx2x_sp.h" |
29 | 29 | ||
30 | static int bnx2x_setup_irqs(struct bnx2x *bp); | 30 | |
31 | 31 | ||
32 | /** | 32 | /** |
33 | * bnx2x_bz_fp - zero content of the fastpath structure. | 33 | * bnx2x_bz_fp - zero content of the fastpath structure. |
@@ -72,6 +72,8 @@ static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to) | |||
72 | to_fp->napi = orig_napi; | 72 | to_fp->napi = orig_napi; |
73 | } | 73 | } |
74 | 74 | ||
75 | int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */ | ||
76 | |||
75 | /* free skb in the packet ring at pos idx | 77 | /* free skb in the packet ring at pos idx |
76 | * return idx of last bd freed | 78 | * return idx of last bd freed |
77 | */ | 79 | */ |
@@ -88,8 +90,8 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp, | |||
88 | /* prefetch skb end pointer to speedup dev_kfree_skb() */ | 90 | /* prefetch skb end pointer to speedup dev_kfree_skb() */ |
89 | prefetch(&skb->end); | 91 | prefetch(&skb->end); |
90 | 92 | ||
91 | DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n", | 93 | DP(BNX2X_MSG_FP, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n", |
92 | idx, tx_buf, skb); | 94 | fp->index, idx, tx_buf, skb); |
93 | 95 | ||
94 | /* unmap first bd */ | 96 | /* unmap first bd */ |
95 | DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx); | 97 | DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx); |
@@ -97,6 +99,7 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp, | |||
97 | dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd), | 99 | dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd), |
98 | BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE); | 100 | BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE); |
99 | 101 | ||
102 | |||
100 | nbd = le16_to_cpu(tx_start_bd->nbd) - 1; | 103 | nbd = le16_to_cpu(tx_start_bd->nbd) - 1; |
101 | #ifdef BNX2X_STOP_ON_ERROR | 104 | #ifdef BNX2X_STOP_ON_ERROR |
102 | if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) { | 105 | if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) { |
@@ -175,6 +178,9 @@ int bnx2x_tx_int(struct bnx2x_fastpath *fp) | |||
175 | * memory barrier, there is a small possibility that | 178 | * memory barrier, there is a small possibility that |
176 | * start_xmit() will miss it and cause the queue to be stopped | 179 | * start_xmit() will miss it and cause the queue to be stopped |
177 | * forever. | 180 | * forever. |
181 | * On the other hand we need an rmb() here to ensure the proper | ||
182 | * ordering of bit testing in the following | ||
183 | * netif_tx_queue_stopped(txq) call. | ||
178 | */ | 184 | */ |
179 | smp_mb(); | 185 | smp_mb(); |
180 | 186 | ||
@@ -226,7 +232,7 @@ static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp, | |||
226 | 232 | ||
227 | /* First mark all used pages */ | 233 | /* First mark all used pages */ |
228 | for (i = 0; i < sge_len; i++) | 234 | for (i = 0; i < sge_len; i++) |
229 | SGE_MASK_CLEAR_BIT(fp, | 235 | BIT_VEC64_CLEAR_BIT(fp->sge_mask, |
230 | RX_SGE(le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[i]))); | 236 | RX_SGE(le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[i]))); |
231 | 237 | ||
232 | DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n", | 238 | DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n", |
@@ -238,8 +244,8 @@ static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp, | |||
238 | le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[sge_len - 1])); | 244 | le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[sge_len - 1])); |
239 | 245 | ||
240 | last_max = RX_SGE(fp->last_max_sge); | 246 | last_max = RX_SGE(fp->last_max_sge); |
241 | last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT; | 247 | last_elem = last_max >> BIT_VEC64_ELEM_SHIFT; |
242 | first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT; | 248 | first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT; |
243 | 249 | ||
244 | /* If ring is not full */ | 250 | /* If ring is not full */ |
245 | if (last_elem + 1 != first_elem) | 251 | if (last_elem + 1 != first_elem) |
@@ -250,8 +256,8 @@ static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp, | |||
250 | if (likely(fp->sge_mask[i])) | 256 | if (likely(fp->sge_mask[i])) |
251 | break; | 257 | break; |
252 | 258 | ||
253 | fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK; | 259 | fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK; |
254 | delta += RX_SGE_MASK_ELEM_SZ; | 260 | delta += BIT_VEC64_ELEM_SZ; |
255 | } | 261 | } |
256 | 262 | ||
257 | if (delta > 0) { | 263 | if (delta > 0) { |
@@ -266,33 +272,56 @@ static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp, | |||
266 | } | 272 | } |
267 | 273 | ||
268 | static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue, | 274 | static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue, |
269 | struct sk_buff *skb, u16 cons, u16 prod) | 275 | struct sk_buff *skb, u16 cons, u16 prod, |
276 | struct eth_fast_path_rx_cqe *cqe) | ||
270 | { | 277 | { |
271 | struct bnx2x *bp = fp->bp; | 278 | struct bnx2x *bp = fp->bp; |
272 | struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons]; | 279 | struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons]; |
273 | struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod]; | 280 | struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod]; |
274 | struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod]; | 281 | struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod]; |
275 | dma_addr_t mapping; | 282 | dma_addr_t mapping; |
283 | struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue]; | ||
284 | struct sw_rx_bd *first_buf = &tpa_info->first_buf; | ||
276 | 285 | ||
277 | /* move empty skb from pool to prod and map it */ | 286 | /* print error if current state != stop */ |
278 | prod_rx_buf->skb = fp->tpa_pool[queue].skb; | 287 | if (tpa_info->tpa_state != BNX2X_TPA_STOP) |
279 | mapping = dma_map_single(&bp->pdev->dev, fp->tpa_pool[queue].skb->data, | ||
280 | fp->rx_buf_size, DMA_FROM_DEVICE); | ||
281 | dma_unmap_addr_set(prod_rx_buf, mapping, mapping); | ||
282 | |||
283 | /* move partial skb from cons to pool (don't unmap yet) */ | ||
284 | fp->tpa_pool[queue] = *cons_rx_buf; | ||
285 | |||
286 | /* mark bin state as start - print error if current state != stop */ | ||
287 | if (fp->tpa_state[queue] != BNX2X_TPA_STOP) | ||
288 | BNX2X_ERR("start of bin not in stop [%d]\n", queue); | 288 | BNX2X_ERR("start of bin not in stop [%d]\n", queue); |
289 | 289 | ||
290 | fp->tpa_state[queue] = BNX2X_TPA_START; | 290 | /* Try to map an empty skb from the aggregation info */ |
291 | mapping = dma_map_single(&bp->pdev->dev, | ||
292 | first_buf->skb->data, | ||
293 | fp->rx_buf_size, DMA_FROM_DEVICE); | ||
294 | /* | ||
295 | * ...if it fails - move the skb from the consumer to the producer | ||
296 | * and set the current aggregation state as ERROR to drop it | ||
297 | * when TPA_STOP arrives. | ||
298 | */ | ||
299 | |||
300 | if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { | ||
301 | /* Move the BD from the consumer to the producer */ | ||
302 | bnx2x_reuse_rx_skb(fp, cons, prod); | ||
303 | tpa_info->tpa_state = BNX2X_TPA_ERROR; | ||
304 | return; | ||
305 | } | ||
291 | 306 | ||
307 | /* move empty skb from pool to prod */ | ||
308 | prod_rx_buf->skb = first_buf->skb; | ||
309 | dma_unmap_addr_set(prod_rx_buf, mapping, mapping); | ||
292 | /* point prod_bd to new skb */ | 310 | /* point prod_bd to new skb */ |
293 | prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); | 311 | prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); |
294 | prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); | 312 | prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); |
295 | 313 | ||
314 | /* move partial skb from cons to pool (don't unmap yet) */ | ||
315 | *first_buf = *cons_rx_buf; | ||
316 | |||
317 | /* mark bin state as START */ | ||
318 | tpa_info->parsing_flags = | ||
319 | le16_to_cpu(cqe->pars_flags.flags); | ||
320 | tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag); | ||
321 | tpa_info->tpa_state = BNX2X_TPA_START; | ||
322 | tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd); | ||
323 | tpa_info->placement_offset = cqe->placement_offset; | ||
324 | |||
296 | #ifdef BNX2X_STOP_ON_ERROR | 325 | #ifdef BNX2X_STOP_ON_ERROR |
297 | fp->tpa_queue_used |= (1 << queue); | 326 | fp->tpa_queue_used |= (1 << queue); |
298 | #ifdef _ASM_GENERIC_INT_L64_H | 327 | #ifdef _ASM_GENERIC_INT_L64_H |
@@ -323,10 +352,17 @@ static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue, | |||
323 | static inline u16 bnx2x_set_lro_mss(struct bnx2x *bp, u16 parsing_flags, | 352 | static inline u16 bnx2x_set_lro_mss(struct bnx2x *bp, u16 parsing_flags, |
324 | u16 len_on_bd) | 353 | u16 len_on_bd) |
325 | { | 354 | { |
326 | /* TPA arrgregation won't have an IP options and TCP options | 355 | /* |
327 | * other than timestamp. | 356 | * TPA arrgregation won't have either IP options or TCP options |
357 | * other than timestamp or IPv6 extension headers. | ||
328 | */ | 358 | */ |
329 | u16 hdrs_len = ETH_HLEN + sizeof(struct iphdr) + sizeof(struct tcphdr); | 359 | u16 hdrs_len = ETH_HLEN + sizeof(struct tcphdr); |
360 | |||
361 | if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) == | ||
362 | PRS_FLAG_OVERETH_IPV6) | ||
363 | hdrs_len += sizeof(struct ipv6hdr); | ||
364 | else /* IPv4 */ | ||
365 | hdrs_len += sizeof(struct iphdr); | ||
330 | 366 | ||
331 | 367 | ||
332 | /* Check if there was a TCP timestamp, if there is it's will | 368 | /* Check if there was a TCP timestamp, if there is it's will |
@@ -341,30 +377,30 @@ static inline u16 bnx2x_set_lro_mss(struct bnx2x *bp, u16 parsing_flags, | |||
341 | } | 377 | } |
342 | 378 | ||
343 | static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp, | 379 | static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp, |
344 | struct sk_buff *skb, | 380 | u16 queue, struct sk_buff *skb, |
345 | struct eth_fast_path_rx_cqe *fp_cqe, | 381 | struct eth_end_agg_rx_cqe *cqe, |
346 | u16 cqe_idx, u16 parsing_flags) | 382 | u16 cqe_idx) |
347 | { | 383 | { |
348 | struct sw_rx_page *rx_pg, old_rx_pg; | 384 | struct sw_rx_page *rx_pg, old_rx_pg; |
349 | u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd); | ||
350 | u32 i, frag_len, frag_size, pages; | 385 | u32 i, frag_len, frag_size, pages; |
351 | int err; | 386 | int err; |
352 | int j; | 387 | int j; |
388 | struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue]; | ||
389 | u16 len_on_bd = tpa_info->len_on_bd; | ||
353 | 390 | ||
354 | frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd; | 391 | frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd; |
355 | pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT; | 392 | pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT; |
356 | 393 | ||
357 | /* This is needed in order to enable forwarding support */ | 394 | /* This is needed in order to enable forwarding support */ |
358 | if (frag_size) | 395 | if (frag_size) |
359 | skb_shinfo(skb)->gso_size = bnx2x_set_lro_mss(bp, parsing_flags, | 396 | skb_shinfo(skb)->gso_size = bnx2x_set_lro_mss(bp, |
360 | len_on_bd); | 397 | tpa_info->parsing_flags, len_on_bd); |
361 | 398 | ||
362 | #ifdef BNX2X_STOP_ON_ERROR | 399 | #ifdef BNX2X_STOP_ON_ERROR |
363 | if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) { | 400 | if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) { |
364 | BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n", | 401 | BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n", |
365 | pages, cqe_idx); | 402 | pages, cqe_idx); |
366 | BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n", | 403 | BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len); |
367 | fp_cqe->pkt_len, len_on_bd); | ||
368 | bnx2x_panic(); | 404 | bnx2x_panic(); |
369 | return -EINVAL; | 405 | return -EINVAL; |
370 | } | 406 | } |
@@ -372,8 +408,7 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp, | |||
372 | 408 | ||
373 | /* Run through the SGL and compose the fragmented skb */ | 409 | /* Run through the SGL and compose the fragmented skb */ |
374 | for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) { | 410 | for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) { |
375 | u16 sge_idx = | 411 | u16 sge_idx = RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[j])); |
376 | RX_SGE(le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[j])); | ||
377 | 412 | ||
378 | /* FW gives the indices of the SGE as if the ring is an array | 413 | /* FW gives the indices of the SGE as if the ring is an array |
379 | (meaning that "next" element will consume 2 indices) */ | 414 | (meaning that "next" element will consume 2 indices) */ |
@@ -408,13 +443,28 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp, | |||
408 | } | 443 | } |
409 | 444 | ||
410 | static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp, | 445 | static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp, |
411 | u16 queue, int pad, int len, union eth_rx_cqe *cqe, | 446 | u16 queue, struct eth_end_agg_rx_cqe *cqe, |
412 | u16 cqe_idx) | 447 | u16 cqe_idx) |
413 | { | 448 | { |
414 | struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue]; | 449 | struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue]; |
450 | struct sw_rx_bd *rx_buf = &tpa_info->first_buf; | ||
451 | u8 pad = tpa_info->placement_offset; | ||
452 | u16 len = tpa_info->len_on_bd; | ||
415 | struct sk_buff *skb = rx_buf->skb; | 453 | struct sk_buff *skb = rx_buf->skb; |
416 | /* alloc new skb */ | 454 | /* alloc new skb */ |
417 | struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, fp->rx_buf_size); | 455 | struct sk_buff *new_skb; |
456 | u8 old_tpa_state = tpa_info->tpa_state; | ||
457 | |||
458 | tpa_info->tpa_state = BNX2X_TPA_STOP; | ||
459 | |||
460 | /* If we there was an error during the handling of the TPA_START - | ||
461 | * drop this aggregation. | ||
462 | */ | ||
463 | if (old_tpa_state == BNX2X_TPA_ERROR) | ||
464 | goto drop; | ||
465 | |||
466 | /* Try to allocate the new skb */ | ||
467 | new_skb = netdev_alloc_skb(bp->dev, fp->rx_buf_size); | ||
418 | 468 | ||
419 | /* Unmap skb in the pool anyway, as we are going to change | 469 | /* Unmap skb in the pool anyway, as we are going to change |
420 | pool entry status to BNX2X_TPA_STOP even if new skb allocation | 470 | pool entry status to BNX2X_TPA_STOP even if new skb allocation |
@@ -423,11 +473,6 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp, | |||
423 | fp->rx_buf_size, DMA_FROM_DEVICE); | 473 | fp->rx_buf_size, DMA_FROM_DEVICE); |
424 | 474 | ||
425 | if (likely(new_skb)) { | 475 | if (likely(new_skb)) { |
426 | /* fix ip xsum and give it to the stack */ | ||
427 | /* (no need to map the new skb) */ | ||
428 | u16 parsing_flags = | ||
429 | le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags); | ||
430 | |||
431 | prefetch(skb); | 476 | prefetch(skb); |
432 | prefetch(((char *)(skb)) + L1_CACHE_BYTES); | 477 | prefetch(((char *)(skb)) + L1_CACHE_BYTES); |
433 | 478 | ||
@@ -447,21 +492,9 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp, | |||
447 | skb->protocol = eth_type_trans(skb, bp->dev); | 492 | skb->protocol = eth_type_trans(skb, bp->dev); |
448 | skb->ip_summed = CHECKSUM_UNNECESSARY; | 493 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
449 | 494 | ||
450 | { | 495 | if (!bnx2x_fill_frag_skb(bp, fp, queue, skb, cqe, cqe_idx)) { |
451 | struct iphdr *iph; | 496 | if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN) |
452 | 497 | __vlan_hwaccel_put_tag(skb, tpa_info->vlan_tag); | |
453 | iph = (struct iphdr *)skb->data; | ||
454 | iph->check = 0; | ||
455 | iph->check = ip_fast_csum((u8 *)iph, iph->ihl); | ||
456 | } | ||
457 | |||
458 | if (!bnx2x_fill_frag_skb(bp, fp, skb, | ||
459 | &cqe->fast_path_cqe, cqe_idx, | ||
460 | parsing_flags)) { | ||
461 | if (parsing_flags & PARSING_FLAGS_VLAN) | ||
462 | __vlan_hwaccel_put_tag(skb, | ||
463 | le16_to_cpu(cqe->fast_path_cqe. | ||
464 | vlan_tag)); | ||
465 | napi_gro_receive(&fp->napi, skb); | 498 | napi_gro_receive(&fp->napi, skb); |
466 | } else { | 499 | } else { |
467 | DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages" | 500 | DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages" |
@@ -471,16 +504,16 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp, | |||
471 | 504 | ||
472 | 505 | ||
473 | /* put new skb in bin */ | 506 | /* put new skb in bin */ |
474 | fp->tpa_pool[queue].skb = new_skb; | 507 | rx_buf->skb = new_skb; |
475 | 508 | ||
476 | } else { | 509 | return; |
477 | /* else drop the packet and keep the buffer in the bin */ | ||
478 | DP(NETIF_MSG_RX_STATUS, | ||
479 | "Failed to allocate new skb - dropping packet!\n"); | ||
480 | fp->eth_q_stats.rx_skb_alloc_failed++; | ||
481 | } | 510 | } |
482 | 511 | ||
483 | fp->tpa_state[queue] = BNX2X_TPA_STOP; | 512 | drop: |
513 | /* drop the packet and keep the buffer in the bin */ | ||
514 | DP(NETIF_MSG_RX_STATUS, | ||
515 | "Failed to allocate or map a new skb - dropping packet!\n"); | ||
516 | fp->eth_q_stats.rx_skb_alloc_failed++; | ||
484 | } | 517 | } |
485 | 518 | ||
486 | /* Set Toeplitz hash value in the skb using the value from the | 519 | /* Set Toeplitz hash value in the skb using the value from the |
@@ -534,9 +567,16 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget) | |||
534 | struct sw_rx_bd *rx_buf = NULL; | 567 | struct sw_rx_bd *rx_buf = NULL; |
535 | struct sk_buff *skb; | 568 | struct sk_buff *skb; |
536 | union eth_rx_cqe *cqe; | 569 | union eth_rx_cqe *cqe; |
570 | struct eth_fast_path_rx_cqe *cqe_fp; | ||
537 | u8 cqe_fp_flags; | 571 | u8 cqe_fp_flags; |
572 | enum eth_rx_cqe_type cqe_fp_type; | ||
538 | u16 len, pad; | 573 | u16 len, pad; |
539 | 574 | ||
575 | #ifdef BNX2X_STOP_ON_ERROR | ||
576 | if (unlikely(bp->panic)) | ||
577 | return 0; | ||
578 | #endif | ||
579 | |||
540 | comp_ring_cons = RCQ_BD(sw_comp_cons); | 580 | comp_ring_cons = RCQ_BD(sw_comp_cons); |
541 | bd_prod = RX_BD(bd_prod); | 581 | bd_prod = RX_BD(bd_prod); |
542 | bd_cons = RX_BD(bd_cons); | 582 | bd_cons = RX_BD(bd_cons); |
@@ -549,17 +589,18 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget) | |||
549 | PAGE_SIZE + 1)); | 589 | PAGE_SIZE + 1)); |
550 | 590 | ||
551 | cqe = &fp->rx_comp_ring[comp_ring_cons]; | 591 | cqe = &fp->rx_comp_ring[comp_ring_cons]; |
552 | cqe_fp_flags = cqe->fast_path_cqe.type_error_flags; | 592 | cqe_fp = &cqe->fast_path_cqe; |
593 | cqe_fp_flags = cqe_fp->type_error_flags; | ||
594 | cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE; | ||
553 | 595 | ||
554 | DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x" | 596 | DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x" |
555 | " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags), | 597 | " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags), |
556 | cqe_fp_flags, cqe->fast_path_cqe.status_flags, | 598 | cqe_fp_flags, cqe_fp->status_flags, |
557 | le32_to_cpu(cqe->fast_path_cqe.rss_hash_result), | 599 | le32_to_cpu(cqe_fp->rss_hash_result), |
558 | le16_to_cpu(cqe->fast_path_cqe.vlan_tag), | 600 | le16_to_cpu(cqe_fp->vlan_tag), le16_to_cpu(cqe_fp->pkt_len)); |
559 | le16_to_cpu(cqe->fast_path_cqe.pkt_len)); | ||
560 | 601 | ||
561 | /* is this a slowpath msg? */ | 602 | /* is this a slowpath msg? */ |
562 | if (unlikely(CQE_TYPE(cqe_fp_flags))) { | 603 | if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) { |
563 | bnx2x_sp_event(fp, cqe); | 604 | bnx2x_sp_event(fp, cqe); |
564 | goto next_cqe; | 605 | goto next_cqe; |
565 | 606 | ||
@@ -568,61 +609,59 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget) | |||
568 | rx_buf = &fp->rx_buf_ring[bd_cons]; | 609 | rx_buf = &fp->rx_buf_ring[bd_cons]; |
569 | skb = rx_buf->skb; | 610 | skb = rx_buf->skb; |
570 | prefetch(skb); | 611 | prefetch(skb); |
571 | len = le16_to_cpu(cqe->fast_path_cqe.pkt_len); | ||
572 | pad = cqe->fast_path_cqe.placement_offset; | ||
573 | 612 | ||
574 | /* - If CQE is marked both TPA_START and TPA_END it is | 613 | if (!CQE_TYPE_FAST(cqe_fp_type)) { |
575 | * a non-TPA CQE. | 614 | #ifdef BNX2X_STOP_ON_ERROR |
576 | * - FP CQE will always have either TPA_START or/and | 615 | /* sanity check */ |
577 | * TPA_STOP flags set. | 616 | if (fp->disable_tpa && |
578 | */ | 617 | (CQE_TYPE_START(cqe_fp_type) || |
579 | if ((!fp->disable_tpa) && | 618 | CQE_TYPE_STOP(cqe_fp_type))) |
580 | (TPA_TYPE(cqe_fp_flags) != | 619 | BNX2X_ERR("START/STOP packet while " |
581 | (TPA_TYPE_START | TPA_TYPE_END))) { | 620 | "disable_tpa type %x\n", |
582 | u16 queue = cqe->fast_path_cqe.queue_index; | 621 | CQE_TYPE(cqe_fp_type)); |
622 | #endif | ||
583 | 623 | ||
584 | if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) { | 624 | if (CQE_TYPE_START(cqe_fp_type)) { |
625 | u16 queue = cqe_fp->queue_index; | ||
585 | DP(NETIF_MSG_RX_STATUS, | 626 | DP(NETIF_MSG_RX_STATUS, |
586 | "calling tpa_start on queue %d\n", | 627 | "calling tpa_start on queue %d\n", |
587 | queue); | 628 | queue); |
588 | 629 | ||
589 | bnx2x_tpa_start(fp, queue, skb, | 630 | bnx2x_tpa_start(fp, queue, skb, |
590 | bd_cons, bd_prod); | 631 | bd_cons, bd_prod, |
632 | cqe_fp); | ||
591 | 633 | ||
592 | /* Set Toeplitz hash for an LRO skb */ | 634 | /* Set Toeplitz hash for LRO skb */ |
593 | bnx2x_set_skb_rxhash(bp, cqe, skb); | 635 | bnx2x_set_skb_rxhash(bp, cqe, skb); |
594 | 636 | ||
595 | goto next_rx; | 637 | goto next_rx; |
596 | } else { /* TPA_STOP */ | 638 | |
639 | } else { | ||
640 | u16 queue = | ||
641 | cqe->end_agg_cqe.queue_index; | ||
597 | DP(NETIF_MSG_RX_STATUS, | 642 | DP(NETIF_MSG_RX_STATUS, |
598 | "calling tpa_stop on queue %d\n", | 643 | "calling tpa_stop on queue %d\n", |
599 | queue); | 644 | queue); |
600 | 645 | ||
601 | if (!BNX2X_RX_SUM_FIX(cqe)) | 646 | bnx2x_tpa_stop(bp, fp, queue, |
602 | BNX2X_ERR("STOP on none TCP " | 647 | &cqe->end_agg_cqe, |
603 | "data\n"); | 648 | comp_ring_cons); |
604 | |||
605 | /* This is a size of the linear data | ||
606 | on this skb */ | ||
607 | len = le16_to_cpu(cqe->fast_path_cqe. | ||
608 | len_on_bd); | ||
609 | bnx2x_tpa_stop(bp, fp, queue, pad, | ||
610 | len, cqe, comp_ring_cons); | ||
611 | #ifdef BNX2X_STOP_ON_ERROR | 649 | #ifdef BNX2X_STOP_ON_ERROR |
612 | if (bp->panic) | 650 | if (bp->panic) |
613 | return 0; | 651 | return 0; |
614 | #endif | 652 | #endif |
615 | 653 | ||
616 | bnx2x_update_sge_prod(fp, | 654 | bnx2x_update_sge_prod(fp, cqe_fp); |
617 | &cqe->fast_path_cqe); | ||
618 | goto next_cqe; | 655 | goto next_cqe; |
619 | } | 656 | } |
620 | } | 657 | } |
621 | 658 | /* non TPA */ | |
659 | len = le16_to_cpu(cqe_fp->pkt_len); | ||
660 | pad = cqe_fp->placement_offset; | ||
622 | dma_sync_single_for_device(&bp->pdev->dev, | 661 | dma_sync_single_for_device(&bp->pdev->dev, |
623 | dma_unmap_addr(rx_buf, mapping), | 662 | dma_unmap_addr(rx_buf, mapping), |
624 | pad + RX_COPY_THRESH, | 663 | pad + RX_COPY_THRESH, |
625 | DMA_FROM_DEVICE); | 664 | DMA_FROM_DEVICE); |
626 | prefetch(((char *)(skb)) + L1_CACHE_BYTES); | 665 | prefetch(((char *)(skb)) + L1_CACHE_BYTES); |
627 | 666 | ||
628 | /* is this an error packet? */ | 667 | /* is this an error packet? */ |
@@ -641,8 +680,7 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget) | |||
641 | (len <= RX_COPY_THRESH)) { | 680 | (len <= RX_COPY_THRESH)) { |
642 | struct sk_buff *new_skb; | 681 | struct sk_buff *new_skb; |
643 | 682 | ||
644 | new_skb = netdev_alloc_skb(bp->dev, | 683 | new_skb = netdev_alloc_skb(bp->dev, len + pad); |
645 | len + pad); | ||
646 | if (new_skb == NULL) { | 684 | if (new_skb == NULL) { |
647 | DP(NETIF_MSG_RX_ERR, | 685 | DP(NETIF_MSG_RX_ERR, |
648 | "ERROR packet dropped " | 686 | "ERROR packet dropped " |
@@ -688,6 +726,7 @@ reuse_rx: | |||
688 | skb_checksum_none_assert(skb); | 726 | skb_checksum_none_assert(skb); |
689 | 727 | ||
690 | if (bp->dev->features & NETIF_F_RXCSUM) { | 728 | if (bp->dev->features & NETIF_F_RXCSUM) { |
729 | |||
691 | if (likely(BNX2X_RX_CSUM_OK(cqe))) | 730 | if (likely(BNX2X_RX_CSUM_OK(cqe))) |
692 | skb->ip_summed = CHECKSUM_UNNECESSARY; | 731 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
693 | else | 732 | else |
@@ -697,10 +736,10 @@ reuse_rx: | |||
697 | 736 | ||
698 | skb_record_rx_queue(skb, fp->index); | 737 | skb_record_rx_queue(skb, fp->index); |
699 | 738 | ||
700 | if (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) & | 739 | if (le16_to_cpu(cqe_fp->pars_flags.flags) & |
701 | PARSING_FLAGS_VLAN) | 740 | PARSING_FLAGS_VLAN) |
702 | __vlan_hwaccel_put_tag(skb, | 741 | __vlan_hwaccel_put_tag(skb, |
703 | le16_to_cpu(cqe->fast_path_cqe.vlan_tag)); | 742 | le16_to_cpu(cqe_fp->vlan_tag)); |
704 | napi_gro_receive(&fp->napi, skb); | 743 | napi_gro_receive(&fp->napi, skb); |
705 | 744 | ||
706 | 745 | ||
@@ -926,7 +965,7 @@ void bnx2x_init_rx_rings(struct bnx2x *bp) | |||
926 | { | 965 | { |
927 | int func = BP_FUNC(bp); | 966 | int func = BP_FUNC(bp); |
928 | int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 : | 967 | int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 : |
929 | ETH_MAX_AGGREGATION_QUEUES_E1H; | 968 | ETH_MAX_AGGREGATION_QUEUES_E1H_E2; |
930 | u16 ring_prod; | 969 | u16 ring_prod; |
931 | int i, j; | 970 | int i, j; |
932 | 971 | ||
@@ -938,11 +977,16 @@ void bnx2x_init_rx_rings(struct bnx2x *bp) | |||
938 | "mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size); | 977 | "mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size); |
939 | 978 | ||
940 | if (!fp->disable_tpa) { | 979 | if (!fp->disable_tpa) { |
941 | /* Fill the per-aggregation pool */ | 980 | /* Fill the per-aggregtion pool */ |
942 | for (i = 0; i < max_agg_queues; i++) { | 981 | for (i = 0; i < max_agg_queues; i++) { |
943 | fp->tpa_pool[i].skb = | 982 | struct bnx2x_agg_info *tpa_info = |
944 | netdev_alloc_skb(bp->dev, fp->rx_buf_size); | 983 | &fp->tpa_info[i]; |
945 | if (!fp->tpa_pool[i].skb) { | 984 | struct sw_rx_bd *first_buf = |
985 | &tpa_info->first_buf; | ||
986 | |||
987 | first_buf->skb = netdev_alloc_skb(bp->dev, | ||
988 | fp->rx_buf_size); | ||
989 | if (!first_buf->skb) { | ||
946 | BNX2X_ERR("Failed to allocate TPA " | 990 | BNX2X_ERR("Failed to allocate TPA " |
947 | "skb pool for queue[%d] - " | 991 | "skb pool for queue[%d] - " |
948 | "disabling TPA on this " | 992 | "disabling TPA on this " |
@@ -951,10 +995,8 @@ void bnx2x_init_rx_rings(struct bnx2x *bp) | |||
951 | fp->disable_tpa = 1; | 995 | fp->disable_tpa = 1; |
952 | break; | 996 | break; |
953 | } | 997 | } |
954 | dma_unmap_addr_set((struct sw_rx_bd *) | 998 | dma_unmap_addr_set(first_buf, mapping, 0); |
955 | &bp->fp->tpa_pool[i], | 999 | tpa_info->tpa_state = BNX2X_TPA_STOP; |
956 | mapping, 0); | ||
957 | fp->tpa_state[i] = BNX2X_TPA_STOP; | ||
958 | } | 1000 | } |
959 | 1001 | ||
960 | /* "next page" elements initialization */ | 1002 | /* "next page" elements initialization */ |
@@ -970,13 +1012,13 @@ void bnx2x_init_rx_rings(struct bnx2x *bp) | |||
970 | if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) { | 1012 | if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) { |
971 | BNX2X_ERR("was only able to allocate " | 1013 | BNX2X_ERR("was only able to allocate " |
972 | "%d rx sges\n", i); | 1014 | "%d rx sges\n", i); |
973 | BNX2X_ERR("disabling TPA for" | 1015 | BNX2X_ERR("disabling TPA for " |
974 | " queue[%d]\n", j); | 1016 | "queue[%d]\n", j); |
975 | /* Cleanup already allocated elements */ | 1017 | /* Cleanup already allocated elements */ |
976 | bnx2x_free_rx_sge_range(bp, | 1018 | bnx2x_free_rx_sge_range(bp, fp, |
977 | fp, ring_prod); | 1019 | ring_prod); |
978 | bnx2x_free_tpa_pool(bp, | 1020 | bnx2x_free_tpa_pool(bp, fp, |
979 | fp, max_agg_queues); | 1021 | max_agg_queues); |
980 | fp->disable_tpa = 1; | 1022 | fp->disable_tpa = 1; |
981 | ring_prod = 0; | 1023 | ring_prod = 0; |
982 | break; | 1024 | break; |
@@ -1004,7 +1046,7 @@ void bnx2x_init_rx_rings(struct bnx2x *bp) | |||
1004 | if (j != 0) | 1046 | if (j != 0) |
1005 | continue; | 1047 | continue; |
1006 | 1048 | ||
1007 | if (!CHIP_IS_E2(bp)) { | 1049 | if (CHIP_IS_E1(bp)) { |
1008 | REG_WR(bp, BAR_USTRORM_INTMEM + | 1050 | REG_WR(bp, BAR_USTRORM_INTMEM + |
1009 | USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func), | 1051 | USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func), |
1010 | U64_LO(fp->rx_comp_mapping)); | 1052 | U64_LO(fp->rx_comp_mapping)); |
@@ -1048,7 +1090,6 @@ static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp) | |||
1048 | 1090 | ||
1049 | if (skb == NULL) | 1091 | if (skb == NULL) |
1050 | continue; | 1092 | continue; |
1051 | |||
1052 | dma_unmap_single(&bp->pdev->dev, | 1093 | dma_unmap_single(&bp->pdev->dev, |
1053 | dma_unmap_addr(rx_buf, mapping), | 1094 | dma_unmap_addr(rx_buf, mapping), |
1054 | fp->rx_buf_size, DMA_FROM_DEVICE); | 1095 | fp->rx_buf_size, DMA_FROM_DEVICE); |
@@ -1070,7 +1111,7 @@ static void bnx2x_free_rx_skbs(struct bnx2x *bp) | |||
1070 | if (!fp->disable_tpa) | 1111 | if (!fp->disable_tpa) |
1071 | bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ? | 1112 | bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ? |
1072 | ETH_MAX_AGGREGATION_QUEUES_E1 : | 1113 | ETH_MAX_AGGREGATION_QUEUES_E1 : |
1073 | ETH_MAX_AGGREGATION_QUEUES_E1H); | 1114 | ETH_MAX_AGGREGATION_QUEUES_E1H_E2); |
1074 | } | 1115 | } |
1075 | } | 1116 | } |
1076 | 1117 | ||
@@ -1234,7 +1275,6 @@ static int bnx2x_req_msix_irqs(struct bnx2x *bp) | |||
1234 | } | 1275 | } |
1235 | 1276 | ||
1236 | offset++; | 1277 | offset++; |
1237 | fp->state = BNX2X_FP_STATE_IRQ; | ||
1238 | } | 1278 | } |
1239 | 1279 | ||
1240 | i = BNX2X_NUM_ETH_QUEUES(bp); | 1280 | i = BNX2X_NUM_ETH_QUEUES(bp); |
@@ -1274,25 +1314,46 @@ static int bnx2x_req_irq(struct bnx2x *bp) | |||
1274 | 1314 | ||
1275 | rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags, | 1315 | rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags, |
1276 | bp->dev->name, bp->dev); | 1316 | bp->dev->name, bp->dev); |
1277 | if (!rc) | ||
1278 | bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ; | ||
1279 | |||
1280 | return rc; | 1317 | return rc; |
1281 | } | 1318 | } |
1282 | 1319 | ||
1283 | static void bnx2x_napi_enable(struct bnx2x *bp) | 1320 | static inline int bnx2x_setup_irqs(struct bnx2x *bp) |
1321 | { | ||
1322 | int rc = 0; | ||
1323 | if (bp->flags & USING_MSIX_FLAG) { | ||
1324 | rc = bnx2x_req_msix_irqs(bp); | ||
1325 | if (rc) | ||
1326 | return rc; | ||
1327 | } else { | ||
1328 | bnx2x_ack_int(bp); | ||
1329 | rc = bnx2x_req_irq(bp); | ||
1330 | if (rc) { | ||
1331 | BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc); | ||
1332 | return rc; | ||
1333 | } | ||
1334 | if (bp->flags & USING_MSI_FLAG) { | ||
1335 | bp->dev->irq = bp->pdev->irq; | ||
1336 | netdev_info(bp->dev, "using MSI IRQ %d\n", | ||
1337 | bp->pdev->irq); | ||
1338 | } | ||
1339 | } | ||
1340 | |||
1341 | return 0; | ||
1342 | } | ||
1343 | |||
1344 | static inline void bnx2x_napi_enable(struct bnx2x *bp) | ||
1284 | { | 1345 | { |
1285 | int i; | 1346 | int i; |
1286 | 1347 | ||
1287 | for_each_napi_queue(bp, i) | 1348 | for_each_rx_queue(bp, i) |
1288 | napi_enable(&bnx2x_fp(bp, i, napi)); | 1349 | napi_enable(&bnx2x_fp(bp, i, napi)); |
1289 | } | 1350 | } |
1290 | 1351 | ||
1291 | static void bnx2x_napi_disable(struct bnx2x *bp) | 1352 | static inline void bnx2x_napi_disable(struct bnx2x *bp) |
1292 | { | 1353 | { |
1293 | int i; | 1354 | int i; |
1294 | 1355 | ||
1295 | for_each_napi_queue(bp, i) | 1356 | for_each_rx_queue(bp, i) |
1296 | napi_disable(&bnx2x_fp(bp, i, napi)); | 1357 | napi_disable(&bnx2x_fp(bp, i, napi)); |
1297 | } | 1358 | } |
1298 | 1359 | ||
@@ -1310,7 +1371,6 @@ void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw) | |||
1310 | { | 1371 | { |
1311 | bnx2x_int_disable_sync(bp, disable_hw); | 1372 | bnx2x_int_disable_sync(bp, disable_hw); |
1312 | bnx2x_napi_disable(bp); | 1373 | bnx2x_napi_disable(bp); |
1313 | netif_tx_disable(bp->dev); | ||
1314 | } | 1374 | } |
1315 | 1375 | ||
1316 | u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb) | 1376 | u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb) |
@@ -1361,26 +1421,6 @@ void bnx2x_set_num_queues(struct bnx2x *bp) | |||
1361 | bp->num_queues += NONE_ETH_CONTEXT_USE; | 1421 | bp->num_queues += NONE_ETH_CONTEXT_USE; |
1362 | } | 1422 | } |
1363 | 1423 | ||
1364 | #ifdef BCM_CNIC | ||
1365 | static inline void bnx2x_set_fcoe_eth_macs(struct bnx2x *bp) | ||
1366 | { | ||
1367 | if (!NO_FCOE(bp)) { | ||
1368 | if (!IS_MF_SD(bp)) | ||
1369 | bnx2x_set_fip_eth_mac_addr(bp, 1); | ||
1370 | bnx2x_set_all_enode_macs(bp, 1); | ||
1371 | bp->flags |= FCOE_MACS_SET; | ||
1372 | } | ||
1373 | } | ||
1374 | #endif | ||
1375 | |||
1376 | static void bnx2x_release_firmware(struct bnx2x *bp) | ||
1377 | { | ||
1378 | kfree(bp->init_ops_offsets); | ||
1379 | kfree(bp->init_ops); | ||
1380 | kfree(bp->init_data); | ||
1381 | release_firmware(bp->firmware); | ||
1382 | } | ||
1383 | |||
1384 | static inline int bnx2x_set_real_num_queues(struct bnx2x *bp) | 1424 | static inline int bnx2x_set_real_num_queues(struct bnx2x *bp) |
1385 | { | 1425 | { |
1386 | int rc, num = bp->num_queues; | 1426 | int rc, num = bp->num_queues; |
@@ -1412,27 +1452,198 @@ static inline void bnx2x_set_rx_buf_size(struct bnx2x *bp) | |||
1412 | */ | 1452 | */ |
1413 | fp->rx_buf_size = | 1453 | fp->rx_buf_size = |
1414 | BNX2X_FCOE_MINI_JUMBO_MTU + ETH_OVREHEAD + | 1454 | BNX2X_FCOE_MINI_JUMBO_MTU + ETH_OVREHEAD + |
1415 | BNX2X_RX_ALIGN + IP_HEADER_ALIGNMENT_PADDING; | 1455 | BNX2X_FW_RX_ALIGN + IP_HEADER_ALIGNMENT_PADDING; |
1416 | else | 1456 | else |
1417 | fp->rx_buf_size = | 1457 | fp->rx_buf_size = |
1418 | bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN + | 1458 | bp->dev->mtu + ETH_OVREHEAD + |
1419 | IP_HEADER_ALIGNMENT_PADDING; | 1459 | BNX2X_FW_RX_ALIGN + IP_HEADER_ALIGNMENT_PADDING; |
1420 | } | 1460 | } |
1421 | } | 1461 | } |
1422 | 1462 | ||
1463 | static inline int bnx2x_init_rss_pf(struct bnx2x *bp) | ||
1464 | { | ||
1465 | int i; | ||
1466 | u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE] = {0}; | ||
1467 | u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp); | ||
1468 | |||
1469 | /* | ||
1470 | * Prepare the inital contents fo the indirection table if RSS is | ||
1471 | * enabled | ||
1472 | */ | ||
1473 | if (bp->multi_mode != ETH_RSS_MODE_DISABLED) { | ||
1474 | for (i = 0; i < sizeof(ind_table); i++) | ||
1475 | ind_table[i] = | ||
1476 | bp->fp->cl_id + (i % num_eth_queues); | ||
1477 | } | ||
1478 | |||
1479 | /* | ||
1480 | * For 57710 and 57711 SEARCHER configuration (rss_keys) is | ||
1481 | * per-port, so if explicit configuration is needed , do it only | ||
1482 | * for a PMF. | ||
1483 | * | ||
1484 | * For 57712 and newer on the other hand it's a per-function | ||
1485 | * configuration. | ||
1486 | */ | ||
1487 | return bnx2x_config_rss_pf(bp, ind_table, | ||
1488 | bp->port.pmf || !CHIP_IS_E1x(bp)); | ||
1489 | } | ||
1490 | |||
1491 | int bnx2x_config_rss_pf(struct bnx2x *bp, u8 *ind_table, bool config_hash) | ||
1492 | { | ||
1493 | struct bnx2x_config_rss_params params = {0}; | ||
1494 | int i; | ||
1495 | |||
1496 | /* Although RSS is meaningless when there is a single HW queue we | ||
1497 | * still need it enabled in order to have HW Rx hash generated. | ||
1498 | * | ||
1499 | * if (!is_eth_multi(bp)) | ||
1500 | * bp->multi_mode = ETH_RSS_MODE_DISABLED; | ||
1501 | */ | ||
1502 | |||
1503 | params.rss_obj = &bp->rss_conf_obj; | ||
1504 | |||
1505 | __set_bit(RAMROD_COMP_WAIT, ¶ms.ramrod_flags); | ||
1506 | |||
1507 | /* RSS mode */ | ||
1508 | switch (bp->multi_mode) { | ||
1509 | case ETH_RSS_MODE_DISABLED: | ||
1510 | __set_bit(BNX2X_RSS_MODE_DISABLED, ¶ms.rss_flags); | ||
1511 | break; | ||
1512 | case ETH_RSS_MODE_REGULAR: | ||
1513 | __set_bit(BNX2X_RSS_MODE_REGULAR, ¶ms.rss_flags); | ||
1514 | break; | ||
1515 | case ETH_RSS_MODE_VLAN_PRI: | ||
1516 | __set_bit(BNX2X_RSS_MODE_VLAN_PRI, ¶ms.rss_flags); | ||
1517 | break; | ||
1518 | case ETH_RSS_MODE_E1HOV_PRI: | ||
1519 | __set_bit(BNX2X_RSS_MODE_E1HOV_PRI, ¶ms.rss_flags); | ||
1520 | break; | ||
1521 | case ETH_RSS_MODE_IP_DSCP: | ||
1522 | __set_bit(BNX2X_RSS_MODE_IP_DSCP, ¶ms.rss_flags); | ||
1523 | break; | ||
1524 | default: | ||
1525 | BNX2X_ERR("Unknown multi_mode: %d\n", bp->multi_mode); | ||
1526 | return -EINVAL; | ||
1527 | } | ||
1528 | |||
1529 | /* If RSS is enabled */ | ||
1530 | if (bp->multi_mode != ETH_RSS_MODE_DISABLED) { | ||
1531 | /* RSS configuration */ | ||
1532 | __set_bit(BNX2X_RSS_IPV4, ¶ms.rss_flags); | ||
1533 | __set_bit(BNX2X_RSS_IPV4_TCP, ¶ms.rss_flags); | ||
1534 | __set_bit(BNX2X_RSS_IPV6, ¶ms.rss_flags); | ||
1535 | __set_bit(BNX2X_RSS_IPV6_TCP, ¶ms.rss_flags); | ||
1536 | |||
1537 | /* Hash bits */ | ||
1538 | params.rss_result_mask = MULTI_MASK; | ||
1539 | |||
1540 | memcpy(params.ind_table, ind_table, sizeof(params.ind_table)); | ||
1541 | |||
1542 | if (config_hash) { | ||
1543 | /* RSS keys */ | ||
1544 | for (i = 0; i < sizeof(params.rss_key) / 4; i++) | ||
1545 | params.rss_key[i] = random32(); | ||
1546 | |||
1547 | __set_bit(BNX2X_RSS_SET_SRCH, ¶ms.rss_flags); | ||
1548 | } | ||
1549 | } | ||
1550 | |||
1551 | return bnx2x_config_rss(bp, ¶ms); | ||
1552 | } | ||
1553 | |||
1554 | static inline int bnx2x_init_hw(struct bnx2x *bp, u32 load_code) | ||
1555 | { | ||
1556 | struct bnx2x_func_state_params func_params = {0}; | ||
1557 | |||
1558 | /* Prepare parameters for function state transitions */ | ||
1559 | __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags); | ||
1560 | |||
1561 | func_params.f_obj = &bp->func_obj; | ||
1562 | func_params.cmd = BNX2X_F_CMD_HW_INIT; | ||
1563 | |||
1564 | func_params.params.hw_init.load_phase = load_code; | ||
1565 | |||
1566 | return bnx2x_func_state_change(bp, &func_params); | ||
1567 | } | ||
1568 | |||
1569 | /* | ||
1570 | * Cleans the object that have internal lists without sending | ||
1571 | * ramrods. Should be run when interrutps are disabled. | ||
1572 | */ | ||
1573 | static void bnx2x_squeeze_objects(struct bnx2x *bp) | ||
1574 | { | ||
1575 | int rc; | ||
1576 | unsigned long ramrod_flags = 0, vlan_mac_flags = 0; | ||
1577 | struct bnx2x_mcast_ramrod_params rparam = {0}; | ||
1578 | struct bnx2x_vlan_mac_obj *mac_obj = &bp->fp->mac_obj; | ||
1579 | |||
1580 | /***************** Cleanup MACs' object first *************************/ | ||
1581 | |||
1582 | /* Wait for completion of requested */ | ||
1583 | __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); | ||
1584 | /* Perform a dry cleanup */ | ||
1585 | __set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags); | ||
1586 | |||
1587 | /* Clean ETH primary MAC */ | ||
1588 | __set_bit(BNX2X_ETH_MAC, &vlan_mac_flags); | ||
1589 | rc = mac_obj->delete_all(bp, &bp->fp->mac_obj, &vlan_mac_flags, | ||
1590 | &ramrod_flags); | ||
1591 | if (rc != 0) | ||
1592 | BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc); | ||
1593 | |||
1594 | /* Cleanup UC list */ | ||
1595 | vlan_mac_flags = 0; | ||
1596 | __set_bit(BNX2X_UC_LIST_MAC, &vlan_mac_flags); | ||
1597 | rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags, | ||
1598 | &ramrod_flags); | ||
1599 | if (rc != 0) | ||
1600 | BNX2X_ERR("Failed to clean UC list MACs: %d\n", rc); | ||
1601 | |||
1602 | /***************** Now clean mcast object *****************************/ | ||
1603 | rparam.mcast_obj = &bp->mcast_obj; | ||
1604 | __set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags); | ||
1605 | |||
1606 | /* Add a DEL command... */ | ||
1607 | rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL); | ||
1608 | if (rc < 0) | ||
1609 | BNX2X_ERR("Failed to add a new DEL command to a multi-cast " | ||
1610 | "object: %d\n", rc); | ||
1611 | |||
1612 | /* ...and wait until all pending commands are cleared */ | ||
1613 | rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT); | ||
1614 | while (rc != 0) { | ||
1615 | if (rc < 0) { | ||
1616 | BNX2X_ERR("Failed to clean multi-cast object: %d\n", | ||
1617 | rc); | ||
1618 | return; | ||
1619 | } | ||
1620 | |||
1621 | rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT); | ||
1622 | } | ||
1623 | } | ||
1624 | |||
1625 | #ifndef BNX2X_STOP_ON_ERROR | ||
1626 | #define LOAD_ERROR_EXIT(bp, label) \ | ||
1627 | do { \ | ||
1628 | (bp)->state = BNX2X_STATE_ERROR; \ | ||
1629 | goto label; \ | ||
1630 | } while (0) | ||
1631 | #else | ||
1632 | #define LOAD_ERROR_EXIT(bp, label) \ | ||
1633 | do { \ | ||
1634 | (bp)->state = BNX2X_STATE_ERROR; \ | ||
1635 | (bp)->panic = 1; \ | ||
1636 | return -EBUSY; \ | ||
1637 | } while (0) | ||
1638 | #endif | ||
1639 | |||
1423 | /* must be called with rtnl_lock */ | 1640 | /* must be called with rtnl_lock */ |
1424 | int bnx2x_nic_load(struct bnx2x *bp, int load_mode) | 1641 | int bnx2x_nic_load(struct bnx2x *bp, int load_mode) |
1425 | { | 1642 | { |
1643 | int port = BP_PORT(bp); | ||
1426 | u32 load_code; | 1644 | u32 load_code; |
1427 | int i, rc; | 1645 | int i, rc; |
1428 | 1646 | ||
1429 | /* Set init arrays */ | ||
1430 | rc = bnx2x_init_firmware(bp); | ||
1431 | if (rc) { | ||
1432 | BNX2X_ERR("Error loading firmware\n"); | ||
1433 | return rc; | ||
1434 | } | ||
1435 | |||
1436 | #ifdef BNX2X_STOP_ON_ERROR | 1647 | #ifdef BNX2X_STOP_ON_ERROR |
1437 | if (unlikely(bp->panic)) | 1648 | if (unlikely(bp->panic)) |
1438 | return -EPERM; | 1649 | return -EPERM; |
@@ -1459,6 +1670,10 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode) | |||
1459 | /* Set the receive queues buffer size */ | 1670 | /* Set the receive queues buffer size */ |
1460 | bnx2x_set_rx_buf_size(bp); | 1671 | bnx2x_set_rx_buf_size(bp); |
1461 | 1672 | ||
1673 | /* | ||
1674 | * set the tpa flag for each queue. The tpa flag determines the queue | ||
1675 | * minimal size so it must be set prior to queue memory allocation | ||
1676 | */ | ||
1462 | for_each_queue(bp, i) | 1677 | for_each_queue(bp, i) |
1463 | bnx2x_fp(bp, i, disable_tpa) = | 1678 | bnx2x_fp(bp, i, disable_tpa) = |
1464 | ((bp->flags & TPA_ENABLE_FLAG) == 0); | 1679 | ((bp->flags & TPA_ENABLE_FLAG) == 0); |
@@ -1478,31 +1693,30 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode) | |||
1478 | rc = bnx2x_set_real_num_queues(bp); | 1693 | rc = bnx2x_set_real_num_queues(bp); |
1479 | if (rc) { | 1694 | if (rc) { |
1480 | BNX2X_ERR("Unable to set real_num_queues\n"); | 1695 | BNX2X_ERR("Unable to set real_num_queues\n"); |
1481 | goto load_error0; | 1696 | LOAD_ERROR_EXIT(bp, load_error0); |
1482 | } | 1697 | } |
1483 | 1698 | ||
1484 | bnx2x_napi_enable(bp); | 1699 | bnx2x_napi_enable(bp); |
1485 | 1700 | ||
1486 | /* Send LOAD_REQUEST command to MCP | 1701 | /* Send LOAD_REQUEST command to MCP |
1487 | Returns the type of LOAD command: | 1702 | * Returns the type of LOAD command: |
1488 | if it is the first port to be initialized | 1703 | * if it is the first port to be initialized |
1489 | common blocks should be initialized, otherwise - not | 1704 | * common blocks should be initialized, otherwise - not |
1490 | */ | 1705 | */ |
1491 | if (!BP_NOMCP(bp)) { | 1706 | if (!BP_NOMCP(bp)) { |
1492 | load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, 0); | 1707 | load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, 0); |
1493 | if (!load_code) { | 1708 | if (!load_code) { |
1494 | BNX2X_ERR("MCP response failure, aborting\n"); | 1709 | BNX2X_ERR("MCP response failure, aborting\n"); |
1495 | rc = -EBUSY; | 1710 | rc = -EBUSY; |
1496 | goto load_error1; | 1711 | LOAD_ERROR_EXIT(bp, load_error1); |
1497 | } | 1712 | } |
1498 | if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) { | 1713 | if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) { |
1499 | rc = -EBUSY; /* other port in diagnostic mode */ | 1714 | rc = -EBUSY; /* other port in diagnostic mode */ |
1500 | goto load_error1; | 1715 | LOAD_ERROR_EXIT(bp, load_error1); |
1501 | } | 1716 | } |
1502 | 1717 | ||
1503 | } else { | 1718 | } else { |
1504 | int path = BP_PATH(bp); | 1719 | int path = BP_PATH(bp); |
1505 | int port = BP_PORT(bp); | ||
1506 | 1720 | ||
1507 | DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d] %d, %d, %d\n", | 1721 | DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d] %d, %d, %d\n", |
1508 | path, load_count[path][0], load_count[path][1], | 1722 | path, load_count[path][0], load_count[path][1], |
@@ -1528,30 +1742,45 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode) | |||
1528 | bp->port.pmf = 0; | 1742 | bp->port.pmf = 0; |
1529 | DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf); | 1743 | DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf); |
1530 | 1744 | ||
1745 | /* Init Function state controlling object */ | ||
1746 | bnx2x__init_func_obj(bp); | ||
1747 | |||
1531 | /* Initialize HW */ | 1748 | /* Initialize HW */ |
1532 | rc = bnx2x_init_hw(bp, load_code); | 1749 | rc = bnx2x_init_hw(bp, load_code); |
1533 | if (rc) { | 1750 | if (rc) { |
1534 | BNX2X_ERR("HW init failed, aborting\n"); | 1751 | BNX2X_ERR("HW init failed, aborting\n"); |
1535 | bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0); | 1752 | bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0); |
1536 | goto load_error2; | 1753 | LOAD_ERROR_EXIT(bp, load_error2); |
1537 | } | 1754 | } |
1538 | 1755 | ||
1539 | /* Connect to IRQs */ | 1756 | /* Connect to IRQs */ |
1540 | rc = bnx2x_setup_irqs(bp); | 1757 | rc = bnx2x_setup_irqs(bp); |
1541 | if (rc) { | 1758 | if (rc) { |
1542 | bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0); | 1759 | bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0); |
1543 | goto load_error2; | 1760 | LOAD_ERROR_EXIT(bp, load_error2); |
1544 | } | 1761 | } |
1545 | 1762 | ||
1546 | /* Setup NIC internals and enable interrupts */ | 1763 | /* Setup NIC internals and enable interrupts */ |
1547 | bnx2x_nic_init(bp, load_code); | 1764 | bnx2x_nic_init(bp, load_code); |
1548 | 1765 | ||
1766 | /* Init per-function objects */ | ||
1767 | bnx2x_init_bp_objs(bp); | ||
1768 | |||
1549 | if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) || | 1769 | if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) || |
1550 | (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) && | 1770 | (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) && |
1551 | (bp->common.shmem2_base)) | 1771 | (bp->common.shmem2_base)) { |
1552 | SHMEM2_WR(bp, dcc_support, | 1772 | if (SHMEM2_HAS(bp, dcc_support)) |
1553 | (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV | | 1773 | SHMEM2_WR(bp, dcc_support, |
1554 | SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV)); | 1774 | (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV | |
1775 | SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV)); | ||
1776 | } | ||
1777 | |||
1778 | bp->state = BNX2X_STATE_OPENING_WAIT4_PORT; | ||
1779 | rc = bnx2x_func_start(bp); | ||
1780 | if (rc) { | ||
1781 | BNX2X_ERR("Function start failed!\n"); | ||
1782 | LOAD_ERROR_EXIT(bp, load_error3); | ||
1783 | } | ||
1555 | 1784 | ||
1556 | /* Send LOAD_DONE command to MCP */ | 1785 | /* Send LOAD_DONE command to MCP */ |
1557 | if (!BP_NOMCP(bp)) { | 1786 | if (!BP_NOMCP(bp)) { |
@@ -1559,74 +1788,38 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode) | |||
1559 | if (!load_code) { | 1788 | if (!load_code) { |
1560 | BNX2X_ERR("MCP response failure, aborting\n"); | 1789 | BNX2X_ERR("MCP response failure, aborting\n"); |
1561 | rc = -EBUSY; | 1790 | rc = -EBUSY; |
1562 | goto load_error3; | 1791 | LOAD_ERROR_EXIT(bp, load_error3); |
1563 | } | 1792 | } |
1564 | } | 1793 | } |
1565 | 1794 | ||
1566 | bnx2x_dcbx_init(bp); | 1795 | rc = bnx2x_setup_leading(bp); |
1567 | |||
1568 | bp->state = BNX2X_STATE_OPENING_WAIT4_PORT; | ||
1569 | |||
1570 | rc = bnx2x_func_start(bp); | ||
1571 | if (rc) { | ||
1572 | BNX2X_ERR("Function start failed!\n"); | ||
1573 | #ifndef BNX2X_STOP_ON_ERROR | ||
1574 | goto load_error3; | ||
1575 | #else | ||
1576 | bp->panic = 1; | ||
1577 | return -EBUSY; | ||
1578 | #endif | ||
1579 | } | ||
1580 | |||
1581 | rc = bnx2x_setup_client(bp, &bp->fp[0], 1 /* Leading */); | ||
1582 | if (rc) { | 1796 | if (rc) { |
1583 | BNX2X_ERR("Setup leading failed!\n"); | 1797 | BNX2X_ERR("Setup leading failed!\n"); |
1584 | #ifndef BNX2X_STOP_ON_ERROR | 1798 | LOAD_ERROR_EXIT(bp, load_error3); |
1585 | goto load_error3; | ||
1586 | #else | ||
1587 | bp->panic = 1; | ||
1588 | return -EBUSY; | ||
1589 | #endif | ||
1590 | } | ||
1591 | |||
1592 | if (!CHIP_IS_E1(bp) && | ||
1593 | (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED)) { | ||
1594 | DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n"); | ||
1595 | bp->flags |= MF_FUNC_DIS; | ||
1596 | } | 1799 | } |
1597 | 1800 | ||
1598 | #ifdef BCM_CNIC | 1801 | #ifdef BCM_CNIC |
1599 | /* Enable Timer scan */ | 1802 | /* Enable Timer scan */ |
1600 | REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 1); | 1803 | REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1); |
1601 | #endif | 1804 | #endif |
1602 | 1805 | ||
1603 | for_each_nondefault_queue(bp, i) { | 1806 | for_each_nondefault_queue(bp, i) { |
1604 | rc = bnx2x_setup_client(bp, &bp->fp[i], 0); | 1807 | rc = bnx2x_setup_queue(bp, &bp->fp[i], 0); |
1605 | if (rc) | 1808 | if (rc) |
1606 | #ifdef BCM_CNIC | 1809 | LOAD_ERROR_EXIT(bp, load_error4); |
1607 | goto load_error4; | ||
1608 | #else | ||
1609 | goto load_error3; | ||
1610 | #endif | ||
1611 | } | 1810 | } |
1612 | 1811 | ||
1812 | rc = bnx2x_init_rss_pf(bp); | ||
1813 | if (rc) | ||
1814 | LOAD_ERROR_EXIT(bp, load_error4); | ||
1815 | |||
1613 | /* Now when Clients are configured we are ready to work */ | 1816 | /* Now when Clients are configured we are ready to work */ |
1614 | bp->state = BNX2X_STATE_OPEN; | 1817 | bp->state = BNX2X_STATE_OPEN; |
1615 | 1818 | ||
1616 | #ifdef BCM_CNIC | 1819 | /* Configure a ucast MAC */ |
1617 | bnx2x_set_fcoe_eth_macs(bp); | 1820 | rc = bnx2x_set_eth_mac(bp, true); |
1618 | #endif | 1821 | if (rc) |
1619 | 1822 | LOAD_ERROR_EXIT(bp, load_error4); | |
1620 | bnx2x_set_eth_mac(bp, 1); | ||
1621 | |||
1622 | /* Clear MC configuration */ | ||
1623 | if (CHIP_IS_E1(bp)) | ||
1624 | bnx2x_invalidate_e1_mc_list(bp); | ||
1625 | else | ||
1626 | bnx2x_invalidate_e1h_mc_list(bp); | ||
1627 | |||
1628 | /* Clear UC lists configuration */ | ||
1629 | bnx2x_invalidate_uc_list(bp); | ||
1630 | 1823 | ||
1631 | if (bp->pending_max) { | 1824 | if (bp->pending_max) { |
1632 | bnx2x_update_max_mf_config(bp, bp->pending_max); | 1825 | bnx2x_update_max_mf_config(bp, bp->pending_max); |
@@ -1636,15 +1829,18 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode) | |||
1636 | if (bp->port.pmf) | 1829 | if (bp->port.pmf) |
1637 | bnx2x_initial_phy_init(bp, load_mode); | 1830 | bnx2x_initial_phy_init(bp, load_mode); |
1638 | 1831 | ||
1639 | /* Initialize Rx filtering */ | 1832 | /* Start fast path */ |
1833 | |||
1834 | /* Initialize Rx filter. */ | ||
1835 | netif_addr_lock_bh(bp->dev); | ||
1640 | bnx2x_set_rx_mode(bp->dev); | 1836 | bnx2x_set_rx_mode(bp->dev); |
1837 | netif_addr_unlock_bh(bp->dev); | ||
1641 | 1838 | ||
1642 | /* Start fast path */ | 1839 | /* Start the Tx */ |
1643 | switch (load_mode) { | 1840 | switch (load_mode) { |
1644 | case LOAD_NORMAL: | 1841 | case LOAD_NORMAL: |
1645 | /* Tx queue should be only reenabled */ | 1842 | /* Tx queue should be only reenabled */ |
1646 | netif_tx_wake_all_queues(bp->dev); | 1843 | netif_tx_wake_all_queues(bp->dev); |
1647 | /* Initialize the receive filter. */ | ||
1648 | break; | 1844 | break; |
1649 | 1845 | ||
1650 | case LOAD_OPEN: | 1846 | case LOAD_OPEN: |
@@ -1673,18 +1869,28 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode) | |||
1673 | #endif | 1869 | #endif |
1674 | bnx2x_inc_load_cnt(bp); | 1870 | bnx2x_inc_load_cnt(bp); |
1675 | 1871 | ||
1676 | bnx2x_release_firmware(bp); | 1872 | /* Wait for all pending SP commands to complete */ |
1873 | if (!bnx2x_wait_sp_comp(bp, ~0x0UL)) { | ||
1874 | BNX2X_ERR("Timeout waiting for SP elements to complete\n"); | ||
1875 | bnx2x_nic_unload(bp, UNLOAD_CLOSE); | ||
1876 | return -EBUSY; | ||
1877 | } | ||
1677 | 1878 | ||
1879 | bnx2x_dcbx_init(bp); | ||
1678 | return 0; | 1880 | return 0; |
1679 | 1881 | ||
1680 | #ifdef BCM_CNIC | 1882 | #ifndef BNX2X_STOP_ON_ERROR |
1681 | load_error4: | 1883 | load_error4: |
1884 | #ifdef BCM_CNIC | ||
1682 | /* Disable Timer scan */ | 1885 | /* Disable Timer scan */ |
1683 | REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 0); | 1886 | REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0); |
1684 | #endif | 1887 | #endif |
1685 | load_error3: | 1888 | load_error3: |
1686 | bnx2x_int_disable_sync(bp, 1); | 1889 | bnx2x_int_disable_sync(bp, 1); |
1687 | 1890 | ||
1891 | /* Clean queueable objects */ | ||
1892 | bnx2x_squeeze_objects(bp); | ||
1893 | |||
1688 | /* Free SKBs, SGEs, TPA pool and driver internals */ | 1894 | /* Free SKBs, SGEs, TPA pool and driver internals */ |
1689 | bnx2x_free_skbs(bp); | 1895 | bnx2x_free_skbs(bp); |
1690 | for_each_rx_queue(bp, i) | 1896 | for_each_rx_queue(bp, i) |
@@ -1704,9 +1910,8 @@ load_error1: | |||
1704 | load_error0: | 1910 | load_error0: |
1705 | bnx2x_free_mem(bp); | 1911 | bnx2x_free_mem(bp); |
1706 | 1912 | ||
1707 | bnx2x_release_firmware(bp); | ||
1708 | |||
1709 | return rc; | 1913 | return rc; |
1914 | #endif /* ! BNX2X_STOP_ON_ERROR */ | ||
1710 | } | 1915 | } |
1711 | 1916 | ||
1712 | /* must be called with rtnl_lock */ | 1917 | /* must be called with rtnl_lock */ |
@@ -1728,18 +1933,19 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode) | |||
1728 | bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD); | 1933 | bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD); |
1729 | #endif | 1934 | #endif |
1730 | bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT; | 1935 | bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT; |
1936 | smp_mb(); | ||
1731 | 1937 | ||
1732 | /* Set "drop all" */ | ||
1733 | bp->rx_mode = BNX2X_RX_MODE_NONE; | 1938 | bp->rx_mode = BNX2X_RX_MODE_NONE; |
1734 | bnx2x_set_storm_rx_mode(bp); | ||
1735 | 1939 | ||
1736 | /* Stop Tx */ | 1940 | /* Stop Tx */ |
1737 | bnx2x_tx_disable(bp); | 1941 | bnx2x_tx_disable(bp); |
1738 | 1942 | ||
1739 | del_timer_sync(&bp->timer); | 1943 | del_timer_sync(&bp->timer); |
1740 | 1944 | ||
1741 | SHMEM_WR(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb, | 1945 | /* Set ALWAYS_ALIVE bit in shmem */ |
1742 | (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq)); | 1946 | bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE; |
1947 | |||
1948 | bnx2x_drv_pulse(bp); | ||
1743 | 1949 | ||
1744 | bnx2x_stats_handle(bp, STATS_EVENT_STOP); | 1950 | bnx2x_stats_handle(bp, STATS_EVENT_STOP); |
1745 | 1951 | ||
@@ -1754,6 +1960,12 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode) | |||
1754 | bnx2x_free_irq(bp); | 1960 | bnx2x_free_irq(bp); |
1755 | } | 1961 | } |
1756 | 1962 | ||
1963 | /* | ||
1964 | * At this stage no more interrupts will arrive so we may safly clean | ||
1965 | * the queueable objects here in case they failed to get cleaned so far. | ||
1966 | */ | ||
1967 | bnx2x_squeeze_objects(bp); | ||
1968 | |||
1757 | bp->port.pmf = 0; | 1969 | bp->port.pmf = 0; |
1758 | 1970 | ||
1759 | /* Free SKBs, SGEs, TPA pool and driver internals */ | 1971 | /* Free SKBs, SGEs, TPA pool and driver internals */ |
@@ -2154,7 +2366,6 @@ static inline u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb, | |||
2154 | static inline void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb, | 2366 | static inline void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb, |
2155 | struct eth_tx_start_bd *tx_start_bd, u32 xmit_type) | 2367 | struct eth_tx_start_bd *tx_start_bd, u32 xmit_type) |
2156 | { | 2368 | { |
2157 | |||
2158 | tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM; | 2369 | tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM; |
2159 | 2370 | ||
2160 | if (xmit_type & XMIT_CSUM_V4) | 2371 | if (xmit_type & XMIT_CSUM_V4) |
@@ -2166,7 +2377,6 @@ static inline void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb, | |||
2166 | 2377 | ||
2167 | if (!(xmit_type & XMIT_CSUM_TCP)) | 2378 | if (!(xmit_type & XMIT_CSUM_TCP)) |
2168 | tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP; | 2379 | tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP; |
2169 | |||
2170 | } | 2380 | } |
2171 | 2381 | ||
2172 | /** | 2382 | /** |
@@ -2234,7 +2444,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
2234 | struct bnx2x_fastpath *fp; | 2444 | struct bnx2x_fastpath *fp; |
2235 | struct netdev_queue *txq; | 2445 | struct netdev_queue *txq; |
2236 | struct sw_tx_bd *tx_buf; | 2446 | struct sw_tx_bd *tx_buf; |
2237 | struct eth_tx_start_bd *tx_start_bd; | 2447 | struct eth_tx_start_bd *tx_start_bd, *first_bd; |
2238 | struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL; | 2448 | struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL; |
2239 | struct eth_tx_parse_bd_e1x *pbd_e1x = NULL; | 2449 | struct eth_tx_parse_bd_e1x *pbd_e1x = NULL; |
2240 | struct eth_tx_parse_bd_e2 *pbd_e2 = NULL; | 2450 | struct eth_tx_parse_bd_e2 *pbd_e2 = NULL; |
@@ -2296,7 +2506,15 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
2296 | } | 2506 | } |
2297 | } | 2507 | } |
2298 | #endif | 2508 | #endif |
2299 | 2509 | /* Map skb linear data for DMA */ | |
2510 | mapping = dma_map_single(&bp->pdev->dev, skb->data, | ||
2511 | skb_headlen(skb), DMA_TO_DEVICE); | ||
2512 | if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { | ||
2513 | DP(NETIF_MSG_TX_QUEUED, "SKB mapping failed - " | ||
2514 | "silently dropping this SKB\n"); | ||
2515 | dev_kfree_skb_any(skb); | ||
2516 | return NETDEV_TX_OK; | ||
2517 | } | ||
2300 | /* | 2518 | /* |
2301 | Please read carefully. First we use one BD which we mark as start, | 2519 | Please read carefully. First we use one BD which we mark as start, |
2302 | then we have a parsing info BD (used for TSO or xsum), | 2520 | then we have a parsing info BD (used for TSO or xsum), |
@@ -2306,12 +2524,19 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
2306 | And above all, all pdb sizes are in words - NOT DWORDS! | 2524 | And above all, all pdb sizes are in words - NOT DWORDS! |
2307 | */ | 2525 | */ |
2308 | 2526 | ||
2309 | pkt_prod = fp->tx_pkt_prod++; | 2527 | /* get current pkt produced now - advance it just before sending packet |
2528 | * since mapping of pages may fail and cause packet to be dropped | ||
2529 | */ | ||
2530 | pkt_prod = fp->tx_pkt_prod; | ||
2310 | bd_prod = TX_BD(fp->tx_bd_prod); | 2531 | bd_prod = TX_BD(fp->tx_bd_prod); |
2311 | 2532 | ||
2312 | /* get a tx_buf and first BD */ | 2533 | /* get a tx_buf and first BD |
2534 | * tx_start_bd may be changed during SPLIT, | ||
2535 | * but first_bd will always stay first | ||
2536 | */ | ||
2313 | tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)]; | 2537 | tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)]; |
2314 | tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd; | 2538 | tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd; |
2539 | first_bd = tx_start_bd; | ||
2315 | 2540 | ||
2316 | tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD; | 2541 | tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD; |
2317 | SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_ETH_ADDR_TYPE, | 2542 | SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_ETH_ADDR_TYPE, |
@@ -2343,7 +2568,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
2343 | if (xmit_type & XMIT_CSUM) | 2568 | if (xmit_type & XMIT_CSUM) |
2344 | bnx2x_set_sbd_csum(bp, skb, tx_start_bd, xmit_type); | 2569 | bnx2x_set_sbd_csum(bp, skb, tx_start_bd, xmit_type); |
2345 | 2570 | ||
2346 | if (CHIP_IS_E2(bp)) { | 2571 | if (!CHIP_IS_E1x(bp)) { |
2347 | pbd_e2 = &fp->tx_desc_ring[bd_prod].parse_bd_e2; | 2572 | pbd_e2 = &fp->tx_desc_ring[bd_prod].parse_bd_e2; |
2348 | memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2)); | 2573 | memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2)); |
2349 | /* Set PBD in checksum offload case */ | 2574 | /* Set PBD in checksum offload case */ |
@@ -2351,6 +2576,20 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
2351 | hlen = bnx2x_set_pbd_csum_e2(bp, skb, | 2576 | hlen = bnx2x_set_pbd_csum_e2(bp, skb, |
2352 | &pbd_e2_parsing_data, | 2577 | &pbd_e2_parsing_data, |
2353 | xmit_type); | 2578 | xmit_type); |
2579 | if (IS_MF_SI(bp)) { | ||
2580 | /* | ||
2581 | * fill in the MAC addresses in the PBD - for local | ||
2582 | * switching | ||
2583 | */ | ||
2584 | bnx2x_set_fw_mac_addr(&pbd_e2->src_mac_addr_hi, | ||
2585 | &pbd_e2->src_mac_addr_mid, | ||
2586 | &pbd_e2->src_mac_addr_lo, | ||
2587 | eth->h_source); | ||
2588 | bnx2x_set_fw_mac_addr(&pbd_e2->dst_mac_addr_hi, | ||
2589 | &pbd_e2->dst_mac_addr_mid, | ||
2590 | &pbd_e2->dst_mac_addr_lo, | ||
2591 | eth->h_dest); | ||
2592 | } | ||
2354 | } else { | 2593 | } else { |
2355 | pbd_e1x = &fp->tx_desc_ring[bd_prod].parse_bd_e1x; | 2594 | pbd_e1x = &fp->tx_desc_ring[bd_prod].parse_bd_e1x; |
2356 | memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x)); | 2595 | memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x)); |
@@ -2360,15 +2599,10 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
2360 | 2599 | ||
2361 | } | 2600 | } |
2362 | 2601 | ||
2363 | /* Map skb linear data for DMA */ | ||
2364 | mapping = dma_map_single(&bp->pdev->dev, skb->data, | ||
2365 | skb_headlen(skb), DMA_TO_DEVICE); | ||
2366 | |||
2367 | /* Setup the data pointer of the first BD of the packet */ | 2602 | /* Setup the data pointer of the first BD of the packet */ |
2368 | tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); | 2603 | tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); |
2369 | tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); | 2604 | tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); |
2370 | nbd = skb_shinfo(skb)->nr_frags + 2; /* start_bd + pbd + frags */ | 2605 | nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */ |
2371 | tx_start_bd->nbd = cpu_to_le16(nbd); | ||
2372 | tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb)); | 2606 | tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb)); |
2373 | pkt_size = tx_start_bd->nbytes; | 2607 | pkt_size = tx_start_bd->nbytes; |
2374 | 2608 | ||
@@ -2391,7 +2625,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
2391 | if (unlikely(skb_headlen(skb) > hlen)) | 2625 | if (unlikely(skb_headlen(skb) > hlen)) |
2392 | bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd, | 2626 | bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd, |
2393 | hlen, bd_prod, ++nbd); | 2627 | hlen, bd_prod, ++nbd); |
2394 | if (CHIP_IS_E2(bp)) | 2628 | if (!CHIP_IS_E1x(bp)) |
2395 | bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data, | 2629 | bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data, |
2396 | xmit_type); | 2630 | xmit_type); |
2397 | else | 2631 | else |
@@ -2410,19 +2644,34 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
2410 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { | 2644 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { |
2411 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | 2645 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; |
2412 | 2646 | ||
2647 | mapping = dma_map_page(&bp->pdev->dev, frag->page, | ||
2648 | frag->page_offset, frag->size, | ||
2649 | DMA_TO_DEVICE); | ||
2650 | if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { | ||
2651 | |||
2652 | DP(NETIF_MSG_TX_QUEUED, "Unable to map page - " | ||
2653 | "dropping packet...\n"); | ||
2654 | |||
2655 | /* we need unmap all buffers already mapped | ||
2656 | * for this SKB; | ||
2657 | * first_bd->nbd need to be properly updated | ||
2658 | * before call to bnx2x_free_tx_pkt | ||
2659 | */ | ||
2660 | first_bd->nbd = cpu_to_le16(nbd); | ||
2661 | bnx2x_free_tx_pkt(bp, fp, TX_BD(fp->tx_pkt_prod)); | ||
2662 | return NETDEV_TX_OK; | ||
2663 | } | ||
2664 | |||
2413 | bd_prod = TX_BD(NEXT_TX_IDX(bd_prod)); | 2665 | bd_prod = TX_BD(NEXT_TX_IDX(bd_prod)); |
2414 | tx_data_bd = &fp->tx_desc_ring[bd_prod].reg_bd; | 2666 | tx_data_bd = &fp->tx_desc_ring[bd_prod].reg_bd; |
2415 | if (total_pkt_bd == NULL) | 2667 | if (total_pkt_bd == NULL) |
2416 | total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd; | 2668 | total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd; |
2417 | 2669 | ||
2418 | mapping = dma_map_page(&bp->pdev->dev, frag->page, | ||
2419 | frag->page_offset, | ||
2420 | frag->size, DMA_TO_DEVICE); | ||
2421 | |||
2422 | tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); | 2670 | tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); |
2423 | tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); | 2671 | tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); |
2424 | tx_data_bd->nbytes = cpu_to_le16(frag->size); | 2672 | tx_data_bd->nbytes = cpu_to_le16(frag->size); |
2425 | le16_add_cpu(&pkt_size, frag->size); | 2673 | le16_add_cpu(&pkt_size, frag->size); |
2674 | nbd++; | ||
2426 | 2675 | ||
2427 | DP(NETIF_MSG_TX_QUEUED, | 2676 | DP(NETIF_MSG_TX_QUEUED, |
2428 | "frag %d bd @%p addr (%x:%x) nbytes %d\n", | 2677 | "frag %d bd @%p addr (%x:%x) nbytes %d\n", |
@@ -2432,6 +2681,9 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
2432 | 2681 | ||
2433 | DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd); | 2682 | DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd); |
2434 | 2683 | ||
2684 | /* update with actual num BDs */ | ||
2685 | first_bd->nbd = cpu_to_le16(nbd); | ||
2686 | |||
2435 | bd_prod = TX_BD(NEXT_TX_IDX(bd_prod)); | 2687 | bd_prod = TX_BD(NEXT_TX_IDX(bd_prod)); |
2436 | 2688 | ||
2437 | /* now send a tx doorbell, counting the next BD | 2689 | /* now send a tx doorbell, counting the next BD |
@@ -2440,6 +2692,13 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
2440 | if (TX_BD_POFF(bd_prod) < nbd) | 2692 | if (TX_BD_POFF(bd_prod) < nbd) |
2441 | nbd++; | 2693 | nbd++; |
2442 | 2694 | ||
2695 | /* total_pkt_bytes should be set on the first data BD if | ||
2696 | * it's not an LSO packet and there is more than one | ||
2697 | * data BD. In this case pkt_size is limited by an MTU value. | ||
2698 | * However we prefer to set it for an LSO packet (while we don't | ||
2699 | * have to) in order to save some CPU cycles in a none-LSO | ||
2700 | * case, when we much more care about them. | ||
2701 | */ | ||
2443 | if (total_pkt_bd != NULL) | 2702 | if (total_pkt_bd != NULL) |
2444 | total_pkt_bd->total_pkt_bytes = pkt_size; | 2703 | total_pkt_bd->total_pkt_bytes = pkt_size; |
2445 | 2704 | ||
@@ -2460,6 +2719,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
2460 | pbd_e2->parsing_data); | 2719 | pbd_e2->parsing_data); |
2461 | DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod); | 2720 | DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod); |
2462 | 2721 | ||
2722 | fp->tx_pkt_prod++; | ||
2463 | /* | 2723 | /* |
2464 | * Make sure that the BD data is updated before updating the producer | 2724 | * Make sure that the BD data is updated before updating the producer |
2465 | * since FW might read the BD right after the producer is updated. | 2725 | * since FW might read the BD right after the producer is updated. |
@@ -2500,15 +2760,23 @@ int bnx2x_change_mac_addr(struct net_device *dev, void *p) | |||
2500 | { | 2760 | { |
2501 | struct sockaddr *addr = p; | 2761 | struct sockaddr *addr = p; |
2502 | struct bnx2x *bp = netdev_priv(dev); | 2762 | struct bnx2x *bp = netdev_priv(dev); |
2763 | int rc = 0; | ||
2503 | 2764 | ||
2504 | if (!is_valid_ether_addr((u8 *)(addr->sa_data))) | 2765 | if (!is_valid_ether_addr((u8 *)(addr->sa_data))) |
2505 | return -EINVAL; | 2766 | return -EINVAL; |
2506 | 2767 | ||
2768 | if (netif_running(dev)) { | ||
2769 | rc = bnx2x_set_eth_mac(bp, false); | ||
2770 | if (rc) | ||
2771 | return rc; | ||
2772 | } | ||
2773 | |||
2507 | memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); | 2774 | memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); |
2775 | |||
2508 | if (netif_running(dev)) | 2776 | if (netif_running(dev)) |
2509 | bnx2x_set_eth_mac(bp, 1); | 2777 | rc = bnx2x_set_eth_mac(bp, true); |
2510 | 2778 | ||
2511 | return 0; | 2779 | return rc; |
2512 | } | 2780 | } |
2513 | 2781 | ||
2514 | static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index) | 2782 | static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index) |
@@ -2525,7 +2793,7 @@ static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index) | |||
2525 | } else { | 2793 | } else { |
2526 | #endif | 2794 | #endif |
2527 | /* status blocks */ | 2795 | /* status blocks */ |
2528 | if (CHIP_IS_E2(bp)) | 2796 | if (!CHIP_IS_E1x(bp)) |
2529 | BNX2X_PCI_FREE(sb->e2_sb, | 2797 | BNX2X_PCI_FREE(sb->e2_sb, |
2530 | bnx2x_fp(bp, fp_index, | 2798 | bnx2x_fp(bp, fp_index, |
2531 | status_blk_mapping), | 2799 | status_blk_mapping), |
@@ -2581,7 +2849,7 @@ void bnx2x_free_fp_mem(struct bnx2x *bp) | |||
2581 | static inline void set_sb_shortcuts(struct bnx2x *bp, int index) | 2849 | static inline void set_sb_shortcuts(struct bnx2x *bp, int index) |
2582 | { | 2850 | { |
2583 | union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk); | 2851 | union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk); |
2584 | if (CHIP_IS_E2(bp)) { | 2852 | if (!CHIP_IS_E1x(bp)) { |
2585 | bnx2x_fp(bp, index, sb_index_values) = | 2853 | bnx2x_fp(bp, index, sb_index_values) = |
2586 | (__le16 *)status_blk.e2_sb->sb.index_values; | 2854 | (__le16 *)status_blk.e2_sb->sb.index_values; |
2587 | bnx2x_fp(bp, index, sb_running_index) = | 2855 | bnx2x_fp(bp, index, sb_running_index) = |
@@ -2618,7 +2886,7 @@ static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index) | |||
2618 | if (!IS_FCOE_IDX(index)) { | 2886 | if (!IS_FCOE_IDX(index)) { |
2619 | #endif | 2887 | #endif |
2620 | /* status blocks */ | 2888 | /* status blocks */ |
2621 | if (CHIP_IS_E2(bp)) | 2889 | if (!CHIP_IS_E1x(bp)) |
2622 | BNX2X_PCI_ALLOC(sb->e2_sb, | 2890 | BNX2X_PCI_ALLOC(sb->e2_sb, |
2623 | &bnx2x_fp(bp, index, status_blk_mapping), | 2891 | &bnx2x_fp(bp, index, status_blk_mapping), |
2624 | sizeof(struct host_hc_status_block_e2)); | 2892 | sizeof(struct host_hc_status_block_e2)); |
@@ -2747,30 +3015,6 @@ int bnx2x_alloc_fp_mem(struct bnx2x *bp) | |||
2747 | return 0; | 3015 | return 0; |
2748 | } | 3016 | } |
2749 | 3017 | ||
2750 | static int bnx2x_setup_irqs(struct bnx2x *bp) | ||
2751 | { | ||
2752 | int rc = 0; | ||
2753 | if (bp->flags & USING_MSIX_FLAG) { | ||
2754 | rc = bnx2x_req_msix_irqs(bp); | ||
2755 | if (rc) | ||
2756 | return rc; | ||
2757 | } else { | ||
2758 | bnx2x_ack_int(bp); | ||
2759 | rc = bnx2x_req_irq(bp); | ||
2760 | if (rc) { | ||
2761 | BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc); | ||
2762 | return rc; | ||
2763 | } | ||
2764 | if (bp->flags & USING_MSI_FLAG) { | ||
2765 | bp->dev->irq = bp->pdev->irq; | ||
2766 | netdev_info(bp->dev, "using MSI IRQ %d\n", | ||
2767 | bp->pdev->irq); | ||
2768 | } | ||
2769 | } | ||
2770 | |||
2771 | return 0; | ||
2772 | } | ||
2773 | |||
2774 | void bnx2x_free_mem_bp(struct bnx2x *bp) | 3018 | void bnx2x_free_mem_bp(struct bnx2x *bp) |
2775 | { | 3019 | { |
2776 | kfree(bp->fp); | 3020 | kfree(bp->fp); |
@@ -3021,3 +3265,57 @@ int bnx2x_resume(struct pci_dev *pdev) | |||
3021 | 3265 | ||
3022 | return rc; | 3266 | return rc; |
3023 | } | 3267 | } |
3268 | |||
3269 | |||
3270 | void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt, | ||
3271 | u32 cid) | ||
3272 | { | ||
3273 | /* ustorm cxt validation */ | ||
3274 | cxt->ustorm_ag_context.cdu_usage = | ||
3275 | CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid), | ||
3276 | CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE); | ||
3277 | /* xcontext validation */ | ||
3278 | cxt->xstorm_ag_context.cdu_reserved = | ||
3279 | CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid), | ||
3280 | CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE); | ||
3281 | } | ||
3282 | |||
3283 | static inline void storm_memset_hc_timeout(struct bnx2x *bp, u8 port, | ||
3284 | u8 fw_sb_id, u8 sb_index, | ||
3285 | u8 ticks) | ||
3286 | { | ||
3287 | |||
3288 | u32 addr = BAR_CSTRORM_INTMEM + | ||
3289 | CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index); | ||
3290 | REG_WR8(bp, addr, ticks); | ||
3291 | DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d ticks %d\n", | ||
3292 | port, fw_sb_id, sb_index, ticks); | ||
3293 | } | ||
3294 | |||
3295 | static inline void storm_memset_hc_disable(struct bnx2x *bp, u8 port, | ||
3296 | u16 fw_sb_id, u8 sb_index, | ||
3297 | u8 disable) | ||
3298 | { | ||
3299 | u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT); | ||
3300 | u32 addr = BAR_CSTRORM_INTMEM + | ||
3301 | CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index); | ||
3302 | u16 flags = REG_RD16(bp, addr); | ||
3303 | /* clear and set */ | ||
3304 | flags &= ~HC_INDEX_DATA_HC_ENABLED; | ||
3305 | flags |= enable_flag; | ||
3306 | REG_WR16(bp, addr, flags); | ||
3307 | DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d disable %d\n", | ||
3308 | port, fw_sb_id, sb_index, disable); | ||
3309 | } | ||
3310 | |||
3311 | void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id, | ||
3312 | u8 sb_index, u8 disable, u16 usec) | ||
3313 | { | ||
3314 | int port = BP_PORT(bp); | ||
3315 | u8 ticks = usec / BNX2X_BTR; | ||
3316 | |||
3317 | storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks); | ||
3318 | |||
3319 | disable = disable ? 1 : (usec ? 0 : 1); | ||
3320 | storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable); | ||
3321 | } | ||
diff --git a/drivers/net/bnx2x/bnx2x_cmn.h b/drivers/net/bnx2x/bnx2x_cmn.h index 5a97f92b340c..944bcaeeba45 100644 --- a/drivers/net/bnx2x/bnx2x_cmn.h +++ b/drivers/net/bnx2x/bnx2x_cmn.h | |||
@@ -18,11 +18,15 @@ | |||
18 | #define BNX2X_CMN_H | 18 | #define BNX2X_CMN_H |
19 | 19 | ||
20 | #include <linux/types.h> | 20 | #include <linux/types.h> |
21 | #include <linux/pci.h> | ||
21 | #include <linux/netdevice.h> | 22 | #include <linux/netdevice.h> |
22 | 23 | ||
23 | 24 | ||
24 | #include "bnx2x.h" | 25 | #include "bnx2x.h" |
25 | 26 | ||
27 | /* This is used as a replacement for an MCP if it's not present */ | ||
28 | extern int load_count[2][3]; /* per-path: 0-common, 1-port0, 2-port1 */ | ||
29 | |||
26 | extern int num_queues; | 30 | extern int num_queues; |
27 | 31 | ||
28 | /************************ Macros ********************************/ | 32 | /************************ Macros ********************************/ |
@@ -61,6 +65,73 @@ extern int num_queues; | |||
61 | /*********************** Interfaces **************************** | 65 | /*********************** Interfaces **************************** |
62 | * Functions that need to be implemented by each driver version | 66 | * Functions that need to be implemented by each driver version |
63 | */ | 67 | */ |
68 | /* Init */ | ||
69 | |||
70 | /** | ||
71 | * bnx2x_send_unload_req - request unload mode from the MCP. | ||
72 | * | ||
73 | * @bp: driver handle | ||
74 | * @unload_mode: requested function's unload mode | ||
75 | * | ||
76 | * Return unload mode returned by the MCP: COMMON, PORT or FUNC. | ||
77 | */ | ||
78 | u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode); | ||
79 | |||
80 | /** | ||
81 | * bnx2x_send_unload_done - send UNLOAD_DONE command to the MCP. | ||
82 | * | ||
83 | * @bp: driver handle | ||
84 | */ | ||
85 | void bnx2x_send_unload_done(struct bnx2x *bp); | ||
86 | |||
87 | /** | ||
88 | * bnx2x_config_rss_pf - configure RSS parameters. | ||
89 | * | ||
90 | * @bp: driver handle | ||
91 | * @ind_table: indirection table to configure | ||
92 | * @config_hash: re-configure RSS hash keys configuration | ||
93 | */ | ||
94 | int bnx2x_config_rss_pf(struct bnx2x *bp, u8 *ind_table, bool config_hash); | ||
95 | |||
96 | /** | ||
97 | * bnx2x__init_func_obj - init function object | ||
98 | * | ||
99 | * @bp: driver handle | ||
100 | * | ||
101 | * Initializes the Function Object with the appropriate | ||
102 | * parameters which include a function slow path driver | ||
103 | * interface. | ||
104 | */ | ||
105 | void bnx2x__init_func_obj(struct bnx2x *bp); | ||
106 | |||
107 | /** | ||
108 | * bnx2x_setup_queue - setup eth queue. | ||
109 | * | ||
110 | * @bp: driver handle | ||
111 | * @fp: pointer to the fastpath structure | ||
112 | * @leading: boolean | ||
113 | * | ||
114 | */ | ||
115 | int bnx2x_setup_queue(struct bnx2x *bp, struct bnx2x_fastpath *fp, | ||
116 | bool leading); | ||
117 | |||
118 | /** | ||
119 | * bnx2x_setup_leading - bring up a leading eth queue. | ||
120 | * | ||
121 | * @bp: driver handle | ||
122 | */ | ||
123 | int bnx2x_setup_leading(struct bnx2x *bp); | ||
124 | |||
125 | /** | ||
126 | * bnx2x_fw_command - send the MCP a request | ||
127 | * | ||
128 | * @bp: driver handle | ||
129 | * @command: request | ||
130 | * @param: request's parameter | ||
131 | * | ||
132 | * block until there is a reply | ||
133 | */ | ||
134 | u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param); | ||
64 | 135 | ||
65 | /** | 136 | /** |
66 | * bnx2x_initial_phy_init - initialize link parameters structure variables. | 137 | * bnx2x_initial_phy_init - initialize link parameters structure variables. |
@@ -88,6 +159,29 @@ void bnx2x_link_set(struct bnx2x *bp); | |||
88 | u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes); | 159 | u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes); |
89 | 160 | ||
90 | /** | 161 | /** |
162 | * bnx2x_drv_pulse - write driver pulse to shmem | ||
163 | * | ||
164 | * @bp: driver handle | ||
165 | * | ||
166 | * writes the value in bp->fw_drv_pulse_wr_seq to drv_pulse mbox | ||
167 | * in the shmem. | ||
168 | */ | ||
169 | void bnx2x_drv_pulse(struct bnx2x *bp); | ||
170 | |||
171 | /** | ||
172 | * bnx2x_igu_ack_sb - update IGU with current SB value | ||
173 | * | ||
174 | * @bp: driver handle | ||
175 | * @igu_sb_id: SB id | ||
176 | * @segment: SB segment | ||
177 | * @index: SB index | ||
178 | * @op: SB operation | ||
179 | * @update: is HW update required | ||
180 | */ | ||
181 | void bnx2x_igu_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 segment, | ||
182 | u16 index, u8 op, u8 update); | ||
183 | |||
184 | /** | ||
91 | * bnx2x__link_status_update - handles link status change. | 185 | * bnx2x__link_status_update - handles link status change. |
92 | * | 186 | * |
93 | * @bp: driver handle | 187 | * @bp: driver handle |
@@ -165,21 +259,6 @@ void bnx2x_int_enable(struct bnx2x *bp); | |||
165 | void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw); | 259 | void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw); |
166 | 260 | ||
167 | /** | 261 | /** |
168 | * bnx2x_init_firmware - loads device firmware | ||
169 | * | ||
170 | * @bp: driver handle | ||
171 | */ | ||
172 | int bnx2x_init_firmware(struct bnx2x *bp); | ||
173 | |||
174 | /** | ||
175 | * bnx2x_init_hw - init HW blocks according to current initialization stage. | ||
176 | * | ||
177 | * @bp: driver handle | ||
178 | * @load_code: COMMON, PORT or FUNCTION | ||
179 | */ | ||
180 | int bnx2x_init_hw(struct bnx2x *bp, u32 load_code); | ||
181 | |||
182 | /** | ||
183 | * bnx2x_nic_init - init driver internals. | 262 | * bnx2x_nic_init - init driver internals. |
184 | * | 263 | * |
185 | * @bp: driver handle | 264 | * @bp: driver handle |
@@ -207,16 +286,6 @@ int bnx2x_alloc_mem(struct bnx2x *bp); | |||
207 | void bnx2x_free_mem(struct bnx2x *bp); | 286 | void bnx2x_free_mem(struct bnx2x *bp); |
208 | 287 | ||
209 | /** | 288 | /** |
210 | * bnx2x_setup_client - setup eth client. | ||
211 | * | ||
212 | * @bp: driver handle | ||
213 | * @fp: pointer to fastpath structure | ||
214 | * @is_leading: boolean | ||
215 | */ | ||
216 | int bnx2x_setup_client(struct bnx2x *bp, struct bnx2x_fastpath *fp, | ||
217 | int is_leading); | ||
218 | |||
219 | /** | ||
220 | * bnx2x_set_num_queues - set number of queues according to mode. | 289 | * bnx2x_set_num_queues - set number of queues according to mode. |
221 | * | 290 | * |
222 | * @bp: driver handle | 291 | * @bp: driver handle |
@@ -259,38 +328,44 @@ int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource); | |||
259 | * | 328 | * |
260 | * Configures according to the value in netdev->dev_addr. | 329 | * Configures according to the value in netdev->dev_addr. |
261 | */ | 330 | */ |
262 | void bnx2x_set_eth_mac(struct bnx2x *bp, int set); | 331 | int bnx2x_set_eth_mac(struct bnx2x *bp, bool set); |
263 | 332 | ||
264 | #ifdef BCM_CNIC | ||
265 | /** | 333 | /** |
266 | * bnx2x_set_fip_eth_mac_addr - Set/Clear FIP MAC(s) | 334 | * bnx2x_set_rx_mode - set MAC filtering configurations. |
267 | * | 335 | * |
268 | * @bp: driver handle | 336 | * @dev: netdevice |
269 | * @set: set or clear the CAM entry | ||
270 | * | 337 | * |
271 | * Used next enties in the CAM after the ETH MAC(s). | 338 | * called with netif_tx_lock from dev_mcast.c |
272 | * This function will wait until the ramdord completion returns. | 339 | * If bp->state is OPEN, should be called with |
273 | * Return 0 if cussess, -ENODEV if ramrod doesn't return. | 340 | * netif_addr_lock_bh() |
274 | */ | 341 | */ |
275 | int bnx2x_set_fip_eth_mac_addr(struct bnx2x *bp, int set); | 342 | void bnx2x_set_rx_mode(struct net_device *dev); |
276 | 343 | ||
277 | /** | 344 | /** |
278 | * bnx2x_set_all_enode_macs - Set/Clear ALL_ENODE mcast MAC. | 345 | * bnx2x_set_storm_rx_mode - configure MAC filtering rules in a FW. |
279 | * | 346 | * |
280 | * @bp: driver handle | 347 | * @bp: driver handle |
281 | * @set: set or clear | 348 | * |
349 | * If bp->state is OPEN, should be called with | ||
350 | * netif_addr_lock_bh(). | ||
282 | */ | 351 | */ |
283 | int bnx2x_set_all_enode_macs(struct bnx2x *bp, int set); | 352 | void bnx2x_set_storm_rx_mode(struct bnx2x *bp); |
284 | #endif | ||
285 | 353 | ||
286 | /** | 354 | /** |
287 | * bnx2x_set_rx_mode - set MAC filtering configurations. | 355 | * bnx2x_set_q_rx_mode - configures rx_mode for a single queue. |
288 | * | ||
289 | * @dev: netdevice | ||
290 | * | 356 | * |
291 | * called with netif_tx_lock from dev_mcast.c | 357 | * @bp: driver handle |
358 | * @cl_id: client id | ||
359 | * @rx_mode_flags: rx mode configuration | ||
360 | * @rx_accept_flags: rx accept configuration | ||
361 | * @tx_accept_flags: tx accept configuration (tx switch) | ||
362 | * @ramrod_flags: ramrod configuration | ||
292 | */ | 363 | */ |
293 | void bnx2x_set_rx_mode(struct net_device *dev); | 364 | void bnx2x_set_q_rx_mode(struct bnx2x *bp, u8 cl_id, |
365 | unsigned long rx_mode_flags, | ||
366 | unsigned long rx_accept_flags, | ||
367 | unsigned long tx_accept_flags, | ||
368 | unsigned long ramrod_flags); | ||
294 | 369 | ||
295 | /* Parity errors related */ | 370 | /* Parity errors related */ |
296 | void bnx2x_inc_load_cnt(struct bnx2x *bp); | 371 | void bnx2x_inc_load_cnt(struct bnx2x *bp); |
@@ -300,14 +375,6 @@ bool bnx2x_reset_is_done(struct bnx2x *bp); | |||
300 | void bnx2x_disable_close_the_gate(struct bnx2x *bp); | 375 | void bnx2x_disable_close_the_gate(struct bnx2x *bp); |
301 | 376 | ||
302 | /** | 377 | /** |
303 | * bnx2x_stats_handle - perform statistics handling according to event. | ||
304 | * | ||
305 | * @bp: driver handle | ||
306 | * @event: bnx2x_stats_event | ||
307 | */ | ||
308 | void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event); | ||
309 | |||
310 | /** | ||
311 | * bnx2x_sp_event - handle ramrods completion. | 378 | * bnx2x_sp_event - handle ramrods completion. |
312 | * | 379 | * |
313 | * @fp: fastpath handle for the event | 380 | * @fp: fastpath handle for the event |
@@ -316,15 +383,6 @@ void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event); | |||
316 | void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe); | 383 | void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe); |
317 | 384 | ||
318 | /** | 385 | /** |
319 | * bnx2x_func_start - init function | ||
320 | * | ||
321 | * @bp: driver handle | ||
322 | * | ||
323 | * Must be called before sending CLIENT_SETUP for the first client. | ||
324 | */ | ||
325 | int bnx2x_func_start(struct bnx2x *bp); | ||
326 | |||
327 | /** | ||
328 | * bnx2x_ilt_set_info - prepare ILT configurations. | 386 | * bnx2x_ilt_set_info - prepare ILT configurations. |
329 | * | 387 | * |
330 | * @bp: driver handle | 388 | * @bp: driver handle |
@@ -355,6 +413,8 @@ int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state); | |||
355 | * @value: new value | 413 | * @value: new value |
356 | */ | 414 | */ |
357 | void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value); | 415 | void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value); |
416 | /* Error handling */ | ||
417 | void bnx2x_panic_dump(struct bnx2x *bp); | ||
358 | 418 | ||
359 | void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl); | 419 | void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl); |
360 | 420 | ||
@@ -378,6 +438,9 @@ int bnx2x_change_mac_addr(struct net_device *dev, void *p); | |||
378 | /* NAPI poll Rx part */ | 438 | /* NAPI poll Rx part */ |
379 | int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget); | 439 | int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget); |
380 | 440 | ||
441 | void bnx2x_update_rx_prod(struct bnx2x *bp, struct bnx2x_fastpath *fp, | ||
442 | u16 bd_prod, u16 rx_comp_prod, u16 rx_sge_prod); | ||
443 | |||
381 | /* NAPI poll Tx part */ | 444 | /* NAPI poll Tx part */ |
382 | int bnx2x_tx_int(struct bnx2x_fastpath *fp); | 445 | int bnx2x_tx_int(struct bnx2x_fastpath *fp); |
383 | 446 | ||
@@ -390,7 +453,6 @@ void bnx2x_free_irq(struct bnx2x *bp); | |||
390 | 453 | ||
391 | void bnx2x_free_fp_mem(struct bnx2x *bp); | 454 | void bnx2x_free_fp_mem(struct bnx2x *bp); |
392 | int bnx2x_alloc_fp_mem(struct bnx2x *bp); | 455 | int bnx2x_alloc_fp_mem(struct bnx2x *bp); |
393 | |||
394 | void bnx2x_init_rx_rings(struct bnx2x *bp); | 456 | void bnx2x_init_rx_rings(struct bnx2x *bp); |
395 | void bnx2x_free_skbs(struct bnx2x *bp); | 457 | void bnx2x_free_skbs(struct bnx2x *bp); |
396 | void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw); | 458 | void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw); |
@@ -455,19 +517,20 @@ int bnx2x_set_features(struct net_device *dev, u32 features); | |||
455 | */ | 517 | */ |
456 | void bnx2x_tx_timeout(struct net_device *dev); | 518 | void bnx2x_tx_timeout(struct net_device *dev); |
457 | 519 | ||
520 | /*********************** Inlines **********************************/ | ||
521 | /*********************** Fast path ********************************/ | ||
458 | static inline void bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp) | 522 | static inline void bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp) |
459 | { | 523 | { |
460 | barrier(); /* status block is written to by the chip */ | 524 | barrier(); /* status block is written to by the chip */ |
461 | fp->fp_hc_idx = fp->sb_running_index[SM_RX_ID]; | 525 | fp->fp_hc_idx = fp->sb_running_index[SM_RX_ID]; |
462 | } | 526 | } |
463 | 527 | ||
464 | static inline void bnx2x_update_rx_prod(struct bnx2x *bp, | 528 | static inline void bnx2x_update_rx_prod_gen(struct bnx2x *bp, |
465 | struct bnx2x_fastpath *fp, | 529 | struct bnx2x_fastpath *fp, u16 bd_prod, |
466 | u16 bd_prod, u16 rx_comp_prod, | 530 | u16 rx_comp_prod, u16 rx_sge_prod, u32 start) |
467 | u16 rx_sge_prod) | ||
468 | { | 531 | { |
469 | struct ustorm_eth_rx_producers rx_prods = {0}; | 532 | struct ustorm_eth_rx_producers rx_prods = {0}; |
470 | int i; | 533 | u32 i; |
471 | 534 | ||
472 | /* Update producers */ | 535 | /* Update producers */ |
473 | rx_prods.bd_prod = bd_prod; | 536 | rx_prods.bd_prod = bd_prod; |
@@ -484,10 +547,8 @@ static inline void bnx2x_update_rx_prod(struct bnx2x *bp, | |||
484 | */ | 547 | */ |
485 | wmb(); | 548 | wmb(); |
486 | 549 | ||
487 | for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++) | 550 | for (i = 0; i < sizeof(rx_prods)/4; i++) |
488 | REG_WR(bp, | 551 | REG_WR(bp, start + i*4, ((u32 *)&rx_prods)[i]); |
489 | BAR_USTRORM_INTMEM + fp->ustorm_rx_prods_offset + i*4, | ||
490 | ((u32 *)&rx_prods)[i]); | ||
491 | 552 | ||
492 | mmiowb(); /* keep prod updates ordered */ | 553 | mmiowb(); /* keep prod updates ordered */ |
493 | 554 | ||
@@ -517,7 +578,7 @@ static inline void bnx2x_igu_ack_sb_gen(struct bnx2x *bp, u8 igu_sb_id, | |||
517 | barrier(); | 578 | barrier(); |
518 | } | 579 | } |
519 | 580 | ||
520 | static inline void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, | 581 | static inline void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, |
521 | u8 idu_sb_id, bool is_Pf) | 582 | u8 idu_sb_id, bool is_Pf) |
522 | { | 583 | { |
523 | u32 data, ctl, cnt = 100; | 584 | u32 data, ctl, cnt = 100; |
@@ -525,7 +586,7 @@ static inline void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, | |||
525 | u32 igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL; | 586 | u32 igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL; |
526 | u32 igu_addr_ack = IGU_REG_CSTORM_TYPE_0_SB_CLEANUP + (idu_sb_id/32)*4; | 587 | u32 igu_addr_ack = IGU_REG_CSTORM_TYPE_0_SB_CLEANUP + (idu_sb_id/32)*4; |
527 | u32 sb_bit = 1 << (idu_sb_id%32); | 588 | u32 sb_bit = 1 << (idu_sb_id%32); |
528 | u32 func_encode = BP_FUNC(bp) | | 589 | u32 func_encode = func | |
529 | ((is_Pf == true ? 1 : 0) << IGU_FID_ENCODE_IS_PF_SHIFT); | 590 | ((is_Pf == true ? 1 : 0) << IGU_FID_ENCODE_IS_PF_SHIFT); |
530 | u32 addr_encode = IGU_CMD_E2_PROD_UPD_BASE + idu_sb_id; | 591 | u32 addr_encode = IGU_CMD_E2_PROD_UPD_BASE + idu_sb_id; |
531 | 592 | ||
@@ -588,15 +649,6 @@ static inline void bnx2x_hc_ack_sb(struct bnx2x *bp, u8 sb_id, | |||
588 | barrier(); | 649 | barrier(); |
589 | } | 650 | } |
590 | 651 | ||
591 | static inline void bnx2x_igu_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 segment, | ||
592 | u16 index, u8 op, u8 update) | ||
593 | { | ||
594 | u32 igu_addr = BAR_IGU_INTMEM + (IGU_CMD_INT_ACK_BASE + igu_sb_id)*8; | ||
595 | |||
596 | bnx2x_igu_ack_sb_gen(bp, igu_sb_id, segment, index, op, update, | ||
597 | igu_addr); | ||
598 | } | ||
599 | |||
600 | static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 storm, | 652 | static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 storm, |
601 | u16 index, u8 op, u8 update) | 653 | u16 index, u8 op, u8 update) |
602 | { | 654 | { |
@@ -703,7 +755,7 @@ static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp) | |||
703 | } | 755 | } |
704 | 756 | ||
705 | /** | 757 | /** |
706 | * disables tx from stack point of view | 758 | * bnx2x_tx_disable - disables tx from stack point of view |
707 | * | 759 | * |
708 | * @bp: driver handle | 760 | * @bp: driver handle |
709 | */ | 761 | */ |
@@ -738,7 +790,7 @@ static inline void bnx2x_add_all_napi(struct bnx2x *bp) | |||
738 | int i; | 790 | int i; |
739 | 791 | ||
740 | /* Add NAPI objects */ | 792 | /* Add NAPI objects */ |
741 | for_each_napi_queue(bp, i) | 793 | for_each_rx_queue(bp, i) |
742 | netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi), | 794 | netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi), |
743 | bnx2x_poll, BNX2X_NAPI_WEIGHT); | 795 | bnx2x_poll, BNX2X_NAPI_WEIGHT); |
744 | } | 796 | } |
@@ -747,7 +799,7 @@ static inline void bnx2x_del_all_napi(struct bnx2x *bp) | |||
747 | { | 799 | { |
748 | int i; | 800 | int i; |
749 | 801 | ||
750 | for_each_napi_queue(bp, i) | 802 | for_each_rx_queue(bp, i) |
751 | netif_napi_del(&bnx2x_fp(bp, i, napi)); | 803 | netif_napi_del(&bnx2x_fp(bp, i, napi)); |
752 | } | 804 | } |
753 | 805 | ||
@@ -777,7 +829,7 @@ static inline void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp) | |||
777 | int idx = RX_SGE_CNT * i - 1; | 829 | int idx = RX_SGE_CNT * i - 1; |
778 | 830 | ||
779 | for (j = 0; j < 2; j++) { | 831 | for (j = 0; j < 2; j++) { |
780 | SGE_MASK_CLEAR_BIT(fp, idx); | 832 | BIT_VEC64_CLEAR_BIT(fp->sge_mask, idx); |
781 | idx--; | 833 | idx--; |
782 | } | 834 | } |
783 | } | 835 | } |
@@ -787,7 +839,7 @@ static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp) | |||
787 | { | 839 | { |
788 | /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */ | 840 | /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */ |
789 | memset(fp->sge_mask, 0xff, | 841 | memset(fp->sge_mask, 0xff, |
790 | (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64)); | 842 | (NUM_RX_SGE >> BIT_VEC64_ELEM_SHIFT)*sizeof(u64)); |
791 | 843 | ||
792 | /* Clear the two last indices in the page to 1: | 844 | /* Clear the two last indices in the page to 1: |
793 | these are the indices that correspond to the "next" element, | 845 | these are the indices that correspond to the "next" element, |
@@ -869,12 +921,61 @@ static inline void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp, | |||
869 | dma_unmap_addr(cons_rx_buf, mapping), | 921 | dma_unmap_addr(cons_rx_buf, mapping), |
870 | RX_COPY_THRESH, DMA_FROM_DEVICE); | 922 | RX_COPY_THRESH, DMA_FROM_DEVICE); |
871 | 923 | ||
872 | prod_rx_buf->skb = cons_rx_buf->skb; | ||
873 | dma_unmap_addr_set(prod_rx_buf, mapping, | 924 | dma_unmap_addr_set(prod_rx_buf, mapping, |
874 | dma_unmap_addr(cons_rx_buf, mapping)); | 925 | dma_unmap_addr(cons_rx_buf, mapping)); |
926 | prod_rx_buf->skb = cons_rx_buf->skb; | ||
875 | *prod_bd = *cons_bd; | 927 | *prod_bd = *cons_bd; |
876 | } | 928 | } |
877 | 929 | ||
930 | /************************* Init ******************************************/ | ||
931 | |||
932 | /** | ||
933 | * bnx2x_func_start - init function | ||
934 | * | ||
935 | * @bp: driver handle | ||
936 | * | ||
937 | * Must be called before sending CLIENT_SETUP for the first client. | ||
938 | */ | ||
939 | static inline int bnx2x_func_start(struct bnx2x *bp) | ||
940 | { | ||
941 | struct bnx2x_func_state_params func_params = {0}; | ||
942 | struct bnx2x_func_start_params *start_params = | ||
943 | &func_params.params.start; | ||
944 | |||
945 | /* Prepare parameters for function state transitions */ | ||
946 | __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags); | ||
947 | |||
948 | func_params.f_obj = &bp->func_obj; | ||
949 | func_params.cmd = BNX2X_F_CMD_START; | ||
950 | |||
951 | /* Function parameters */ | ||
952 | start_params->mf_mode = bp->mf_mode; | ||
953 | start_params->sd_vlan_tag = bp->mf_ov; | ||
954 | start_params->network_cos_mode = OVERRIDE_COS; | ||
955 | |||
956 | return bnx2x_func_state_change(bp, &func_params); | ||
957 | } | ||
958 | |||
959 | |||
960 | /** | ||
961 | * bnx2x_set_fw_mac_addr - fill in a MAC address in FW format | ||
962 | * | ||
963 | * @fw_hi: pointer to upper part | ||
964 | * @fw_mid: pointer to middle part | ||
965 | * @fw_lo: pointer to lower part | ||
966 | * @mac: pointer to MAC address | ||
967 | */ | ||
968 | static inline void bnx2x_set_fw_mac_addr(u16 *fw_hi, u16 *fw_mid, u16 *fw_lo, | ||
969 | u8 *mac) | ||
970 | { | ||
971 | ((u8 *)fw_hi)[0] = mac[1]; | ||
972 | ((u8 *)fw_hi)[1] = mac[0]; | ||
973 | ((u8 *)fw_mid)[0] = mac[3]; | ||
974 | ((u8 *)fw_mid)[1] = mac[2]; | ||
975 | ((u8 *)fw_lo)[0] = mac[5]; | ||
976 | ((u8 *)fw_lo)[1] = mac[4]; | ||
977 | } | ||
978 | |||
878 | static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp, | 979 | static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp, |
879 | struct bnx2x_fastpath *fp, int last) | 980 | struct bnx2x_fastpath *fp, int last) |
880 | { | 981 | { |
@@ -893,21 +994,20 @@ static inline void bnx2x_free_tpa_pool(struct bnx2x *bp, | |||
893 | int i; | 994 | int i; |
894 | 995 | ||
895 | for (i = 0; i < last; i++) { | 996 | for (i = 0; i < last; i++) { |
896 | struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]); | 997 | struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i]; |
897 | struct sk_buff *skb = rx_buf->skb; | 998 | struct sw_rx_bd *first_buf = &tpa_info->first_buf; |
999 | struct sk_buff *skb = first_buf->skb; | ||
898 | 1000 | ||
899 | if (skb == NULL) { | 1001 | if (skb == NULL) { |
900 | DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i); | 1002 | DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i); |
901 | continue; | 1003 | continue; |
902 | } | 1004 | } |
903 | 1005 | if (tpa_info->tpa_state == BNX2X_TPA_START) | |
904 | if (fp->tpa_state[i] == BNX2X_TPA_START) | ||
905 | dma_unmap_single(&bp->pdev->dev, | 1006 | dma_unmap_single(&bp->pdev->dev, |
906 | dma_unmap_addr(rx_buf, mapping), | 1007 | dma_unmap_addr(first_buf, mapping), |
907 | fp->rx_buf_size, DMA_FROM_DEVICE); | 1008 | fp->rx_buf_size, DMA_FROM_DEVICE); |
908 | |||
909 | dev_kfree_skb(skb); | 1009 | dev_kfree_skb(skb); |
910 | rx_buf->skb = NULL; | 1010 | first_buf->skb = NULL; |
911 | } | 1011 | } |
912 | } | 1012 | } |
913 | 1013 | ||
@@ -1036,31 +1136,199 @@ static inline int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp, | |||
1036 | return i - fp->eth_q_stats.rx_skb_alloc_failed; | 1136 | return i - fp->eth_q_stats.rx_skb_alloc_failed; |
1037 | } | 1137 | } |
1038 | 1138 | ||
1139 | /* Statistics ID are global per chip/path, while Client IDs for E1x are per | ||
1140 | * port. | ||
1141 | */ | ||
1142 | static inline u8 bnx2x_stats_id(struct bnx2x_fastpath *fp) | ||
1143 | { | ||
1144 | if (!CHIP_IS_E1x(fp->bp)) | ||
1145 | return fp->cl_id; | ||
1146 | else | ||
1147 | return fp->cl_id + BP_PORT(fp->bp) * FP_SB_MAX_E1x; | ||
1148 | } | ||
1149 | |||
1150 | static inline void bnx2x_init_vlan_mac_fp_objs(struct bnx2x_fastpath *fp, | ||
1151 | bnx2x_obj_type obj_type) | ||
1152 | { | ||
1153 | struct bnx2x *bp = fp->bp; | ||
1154 | |||
1155 | /* Configure classification DBs */ | ||
1156 | bnx2x_init_mac_obj(bp, &fp->mac_obj, fp->cl_id, fp->cid, | ||
1157 | BP_FUNC(bp), bnx2x_sp(bp, mac_rdata), | ||
1158 | bnx2x_sp_mapping(bp, mac_rdata), | ||
1159 | BNX2X_FILTER_MAC_PENDING, | ||
1160 | &bp->sp_state, obj_type, | ||
1161 | &bp->macs_pool); | ||
1162 | } | ||
1163 | |||
1164 | /** | ||
1165 | * bnx2x_get_path_func_num - get number of active functions | ||
1166 | * | ||
1167 | * @bp: driver handle | ||
1168 | * | ||
1169 | * Calculates the number of active (not hidden) functions on the | ||
1170 | * current path. | ||
1171 | */ | ||
1172 | static inline u8 bnx2x_get_path_func_num(struct bnx2x *bp) | ||
1173 | { | ||
1174 | u8 func_num = 0, i; | ||
1175 | |||
1176 | /* 57710 has only one function per-port */ | ||
1177 | if (CHIP_IS_E1(bp)) | ||
1178 | return 1; | ||
1179 | |||
1180 | /* Calculate a number of functions enabled on the current | ||
1181 | * PATH/PORT. | ||
1182 | */ | ||
1183 | if (CHIP_REV_IS_SLOW(bp)) { | ||
1184 | if (IS_MF(bp)) | ||
1185 | func_num = 4; | ||
1186 | else | ||
1187 | func_num = 2; | ||
1188 | } else { | ||
1189 | for (i = 0; i < E1H_FUNC_MAX / 2; i++) { | ||
1190 | u32 func_config = | ||
1191 | MF_CFG_RD(bp, | ||
1192 | func_mf_config[BP_PORT(bp) + 2 * i]. | ||
1193 | config); | ||
1194 | func_num += | ||
1195 | ((func_config & FUNC_MF_CFG_FUNC_HIDE) ? 0 : 1); | ||
1196 | } | ||
1197 | } | ||
1198 | |||
1199 | WARN_ON(!func_num); | ||
1200 | |||
1201 | return func_num; | ||
1202 | } | ||
1203 | |||
1204 | static inline void bnx2x_init_bp_objs(struct bnx2x *bp) | ||
1205 | { | ||
1206 | /* RX_MODE controlling object */ | ||
1207 | bnx2x_init_rx_mode_obj(bp, &bp->rx_mode_obj); | ||
1208 | |||
1209 | /* multicast configuration controlling object */ | ||
1210 | bnx2x_init_mcast_obj(bp, &bp->mcast_obj, bp->fp->cl_id, bp->fp->cid, | ||
1211 | BP_FUNC(bp), BP_FUNC(bp), | ||
1212 | bnx2x_sp(bp, mcast_rdata), | ||
1213 | bnx2x_sp_mapping(bp, mcast_rdata), | ||
1214 | BNX2X_FILTER_MCAST_PENDING, &bp->sp_state, | ||
1215 | BNX2X_OBJ_TYPE_RX); | ||
1216 | |||
1217 | /* Setup CAM credit pools */ | ||
1218 | bnx2x_init_mac_credit_pool(bp, &bp->macs_pool, BP_FUNC(bp), | ||
1219 | bnx2x_get_path_func_num(bp)); | ||
1220 | |||
1221 | /* RSS configuration object */ | ||
1222 | bnx2x_init_rss_config_obj(bp, &bp->rss_conf_obj, bp->fp->cl_id, | ||
1223 | bp->fp->cid, BP_FUNC(bp), BP_FUNC(bp), | ||
1224 | bnx2x_sp(bp, rss_rdata), | ||
1225 | bnx2x_sp_mapping(bp, rss_rdata), | ||
1226 | BNX2X_FILTER_RSS_CONF_PENDING, &bp->sp_state, | ||
1227 | BNX2X_OBJ_TYPE_RX); | ||
1228 | } | ||
1229 | |||
1230 | static inline u8 bnx2x_fp_qzone_id(struct bnx2x_fastpath *fp) | ||
1231 | { | ||
1232 | if (CHIP_IS_E1x(fp->bp)) | ||
1233 | return fp->cl_id + BP_PORT(fp->bp) * ETH_MAX_RX_CLIENTS_E1H; | ||
1234 | else | ||
1235 | return fp->cl_id; | ||
1236 | } | ||
1237 | |||
1238 | static inline u32 bnx2x_rx_ustorm_prods_offset(struct bnx2x_fastpath *fp) | ||
1239 | { | ||
1240 | struct bnx2x *bp = fp->bp; | ||
1241 | |||
1242 | if (!CHIP_IS_E1x(bp)) | ||
1243 | return USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id); | ||
1244 | else | ||
1245 | return USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), fp->cl_id); | ||
1246 | } | ||
1247 | |||
1248 | |||
1039 | #ifdef BCM_CNIC | 1249 | #ifdef BCM_CNIC |
1250 | static inline u8 bnx2x_cnic_eth_cl_id(struct bnx2x *bp, u8 cl_idx) | ||
1251 | { | ||
1252 | return bp->cnic_base_cl_id + cl_idx + | ||
1253 | (bp->pf_num >> 1) * NONE_ETH_CONTEXT_USE; | ||
1254 | } | ||
1255 | |||
1256 | static inline u8 bnx2x_cnic_fw_sb_id(struct bnx2x *bp) | ||
1257 | { | ||
1258 | |||
1259 | /* the 'first' id is allocated for the cnic */ | ||
1260 | return bp->base_fw_ndsb; | ||
1261 | } | ||
1262 | |||
1263 | static inline u8 bnx2x_cnic_igu_sb_id(struct bnx2x *bp) | ||
1264 | { | ||
1265 | return bp->igu_base_sb; | ||
1266 | } | ||
1267 | |||
1268 | |||
1040 | static inline void bnx2x_init_fcoe_fp(struct bnx2x *bp) | 1269 | static inline void bnx2x_init_fcoe_fp(struct bnx2x *bp) |
1041 | { | 1270 | { |
1042 | bnx2x_fcoe(bp, cl_id) = BNX2X_FCOE_ETH_CL_ID + | 1271 | struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp); |
1043 | BP_E1HVN(bp) * NONE_ETH_CONTEXT_USE; | 1272 | unsigned long q_type = 0; |
1273 | |||
1274 | bnx2x_fcoe(bp, cl_id) = bnx2x_cnic_eth_cl_id(bp, | ||
1275 | BNX2X_FCOE_ETH_CL_ID_IDX); | ||
1276 | /** Current BNX2X_FCOE_ETH_CID deffinition implies not more than | ||
1277 | * 16 ETH clients per function when CNIC is enabled! | ||
1278 | * | ||
1279 | * Fix it ASAP!!! | ||
1280 | */ | ||
1044 | bnx2x_fcoe(bp, cid) = BNX2X_FCOE_ETH_CID; | 1281 | bnx2x_fcoe(bp, cid) = BNX2X_FCOE_ETH_CID; |
1045 | bnx2x_fcoe(bp, fw_sb_id) = DEF_SB_ID; | 1282 | bnx2x_fcoe(bp, fw_sb_id) = DEF_SB_ID; |
1046 | bnx2x_fcoe(bp, igu_sb_id) = bp->igu_dsb_id; | 1283 | bnx2x_fcoe(bp, igu_sb_id) = bp->igu_dsb_id; |
1047 | bnx2x_fcoe(bp, bp) = bp; | 1284 | bnx2x_fcoe(bp, bp) = bp; |
1048 | bnx2x_fcoe(bp, state) = BNX2X_FP_STATE_CLOSED; | ||
1049 | bnx2x_fcoe(bp, index) = FCOE_IDX; | 1285 | bnx2x_fcoe(bp, index) = FCOE_IDX; |
1050 | bnx2x_fcoe(bp, rx_cons_sb) = BNX2X_FCOE_L2_RX_INDEX; | 1286 | bnx2x_fcoe(bp, rx_cons_sb) = BNX2X_FCOE_L2_RX_INDEX; |
1051 | bnx2x_fcoe(bp, tx_cons_sb) = BNX2X_FCOE_L2_TX_INDEX; | 1287 | bnx2x_fcoe(bp, tx_cons_sb) = BNX2X_FCOE_L2_TX_INDEX; |
1052 | /* qZone id equals to FW (per path) client id */ | 1288 | /* qZone id equals to FW (per path) client id */ |
1053 | bnx2x_fcoe(bp, cl_qzone_id) = bnx2x_fcoe(bp, cl_id) + | 1289 | bnx2x_fcoe(bp, cl_qzone_id) = bnx2x_fp_qzone_id(fp); |
1054 | BP_PORT(bp)*(CHIP_IS_E2(bp) ? ETH_MAX_RX_CLIENTS_E2 : | ||
1055 | ETH_MAX_RX_CLIENTS_E1H); | ||
1056 | /* init shortcut */ | 1290 | /* init shortcut */ |
1057 | bnx2x_fcoe(bp, ustorm_rx_prods_offset) = CHIP_IS_E2(bp) ? | 1291 | bnx2x_fcoe(bp, ustorm_rx_prods_offset) = |
1058 | USTORM_RX_PRODS_E2_OFFSET(bnx2x_fcoe(bp, cl_qzone_id)) : | 1292 | bnx2x_rx_ustorm_prods_offset(fp); |
1059 | USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), bnx2x_fcoe_fp(bp)->cl_id); | 1293 | |
1060 | 1294 | /* Configure Queue State object */ | |
1295 | __set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type); | ||
1296 | __set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type); | ||
1297 | bnx2x_init_queue_obj(bp, &fp->q_obj, fp->cl_id, fp->cid, BP_FUNC(bp), | ||
1298 | bnx2x_sp(bp, q_rdata), bnx2x_sp_mapping(bp, q_rdata), | ||
1299 | q_type); | ||
1300 | |||
1301 | DP(NETIF_MSG_IFUP, "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d fw_sb %d " | ||
1302 | "igu_sb %d\n", | ||
1303 | fp->index, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id, | ||
1304 | fp->igu_sb_id); | ||
1061 | } | 1305 | } |
1062 | #endif | 1306 | #endif |
1063 | 1307 | ||
1308 | static inline int bnx2x_clean_tx_queue(struct bnx2x *bp, | ||
1309 | struct bnx2x_fastpath *fp) | ||
1310 | { | ||
1311 | int cnt = 1000; | ||
1312 | |||
1313 | while (bnx2x_has_tx_work_unload(fp)) { | ||
1314 | if (!cnt) { | ||
1315 | BNX2X_ERR("timeout waiting for queue[%d]: " | ||
1316 | "fp->tx_pkt_prod(%d) != fp->tx_pkt_cons(%d)\n", | ||
1317 | fp->index, fp->tx_pkt_prod, fp->tx_pkt_cons); | ||
1318 | #ifdef BNX2X_STOP_ON_ERROR | ||
1319 | bnx2x_panic(); | ||
1320 | return -EBUSY; | ||
1321 | #else | ||
1322 | break; | ||
1323 | #endif | ||
1324 | } | ||
1325 | cnt--; | ||
1326 | usleep_range(1000, 1000); | ||
1327 | } | ||
1328 | |||
1329 | return 0; | ||
1330 | } | ||
1331 | |||
1064 | int bnx2x_get_link_cfg_idx(struct bnx2x *bp); | 1332 | int bnx2x_get_link_cfg_idx(struct bnx2x *bp); |
1065 | 1333 | ||
1066 | static inline void __storm_memset_struct(struct bnx2x *bp, | 1334 | static inline void __storm_memset_struct(struct bnx2x *bp, |
@@ -1071,48 +1339,81 @@ static inline void __storm_memset_struct(struct bnx2x *bp, | |||
1071 | REG_WR(bp, addr + (i * 4), data[i]); | 1339 | REG_WR(bp, addr + (i * 4), data[i]); |
1072 | } | 1340 | } |
1073 | 1341 | ||
1074 | static inline void storm_memset_mac_filters(struct bnx2x *bp, | 1342 | static inline void storm_memset_func_cfg(struct bnx2x *bp, |
1075 | struct tstorm_eth_mac_filter_config *mac_filters, | 1343 | struct tstorm_eth_function_common_config *tcfg, |
1076 | u16 abs_fid) | 1344 | u16 abs_fid) |
1077 | { | 1345 | { |
1078 | size_t size = sizeof(struct tstorm_eth_mac_filter_config); | 1346 | size_t size = sizeof(struct tstorm_eth_function_common_config); |
1079 | 1347 | ||
1080 | u32 addr = BAR_TSTRORM_INTMEM + | 1348 | u32 addr = BAR_TSTRORM_INTMEM + |
1081 | TSTORM_MAC_FILTER_CONFIG_OFFSET(abs_fid); | 1349 | TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid); |
1082 | 1350 | ||
1083 | __storm_memset_struct(bp, addr, size, (u32 *)mac_filters); | 1351 | __storm_memset_struct(bp, addr, size, (u32 *)tcfg); |
1084 | } | 1352 | } |
1085 | 1353 | ||
1086 | static inline void storm_memset_cmng(struct bnx2x *bp, | 1354 | static inline void storm_memset_cmng(struct bnx2x *bp, |
1087 | struct cmng_struct_per_port *cmng, | 1355 | struct cmng_struct_per_port *cmng, |
1088 | u8 port) | 1356 | u8 port) |
1089 | { | 1357 | { |
1090 | size_t size = | 1358 | size_t size = sizeof(struct cmng_struct_per_port); |
1091 | sizeof(struct rate_shaping_vars_per_port) + | ||
1092 | sizeof(struct fairness_vars_per_port) + | ||
1093 | sizeof(struct safc_struct_per_port) + | ||
1094 | sizeof(struct pfc_struct_per_port); | ||
1095 | 1359 | ||
1096 | u32 addr = BAR_XSTRORM_INTMEM + | 1360 | u32 addr = BAR_XSTRORM_INTMEM + |
1097 | XSTORM_CMNG_PER_PORT_VARS_OFFSET(port); | 1361 | XSTORM_CMNG_PER_PORT_VARS_OFFSET(port); |
1098 | 1362 | ||
1099 | __storm_memset_struct(bp, addr, size, (u32 *)cmng); | 1363 | __storm_memset_struct(bp, addr, size, (u32 *)cmng); |
1364 | } | ||
1365 | |||
1366 | /** | ||
1367 | * bnx2x_wait_sp_comp - wait for the outstanding SP commands. | ||
1368 | * | ||
1369 | * @bp: driver handle | ||
1370 | * @mask: bits that need to be cleared | ||
1371 | */ | ||
1372 | static inline bool bnx2x_wait_sp_comp(struct bnx2x *bp, unsigned long mask) | ||
1373 | { | ||
1374 | int tout = 5000; /* Wait for 5 secs tops */ | ||
1375 | |||
1376 | while (tout--) { | ||
1377 | smp_mb(); | ||
1378 | netif_addr_lock_bh(bp->dev); | ||
1379 | if (!(bp->sp_state & mask)) { | ||
1380 | netif_addr_unlock_bh(bp->dev); | ||
1381 | return true; | ||
1382 | } | ||
1383 | netif_addr_unlock_bh(bp->dev); | ||
1100 | 1384 | ||
1101 | addr += size + 4 /* SKIP DCB+LLFC */; | 1385 | usleep_range(1000, 1000); |
1102 | size = sizeof(struct cmng_struct_per_port) - | 1386 | } |
1103 | size /* written */ - 4 /*skipped*/; | 1387 | |
1388 | smp_mb(); | ||
1389 | |||
1390 | netif_addr_lock_bh(bp->dev); | ||
1391 | if (bp->sp_state & mask) { | ||
1392 | BNX2X_ERR("Filtering completion timed out. sp_state 0x%lx, " | ||
1393 | "mask 0x%lx\n", bp->sp_state, mask); | ||
1394 | netif_addr_unlock_bh(bp->dev); | ||
1395 | return false; | ||
1396 | } | ||
1397 | netif_addr_unlock_bh(bp->dev); | ||
1104 | 1398 | ||
1105 | __storm_memset_struct(bp, addr, size, | 1399 | return true; |
1106 | (u32 *)(cmng->traffic_type_to_priority_cos)); | ||
1107 | } | 1400 | } |
1108 | 1401 | ||
1109 | /* HW Lock for shared dual port PHYs */ | 1402 | /** |
1403 | * bnx2x_set_ctx_validation - set CDU context validation values | ||
1404 | * | ||
1405 | * @bp: driver handle | ||
1406 | * @cxt: context of the connection on the host memory | ||
1407 | * @cid: SW CID of the connection to be configured | ||
1408 | */ | ||
1409 | void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt, | ||
1410 | u32 cid); | ||
1411 | |||
1412 | void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id, | ||
1413 | u8 sb_index, u8 disable, u16 usec); | ||
1110 | void bnx2x_acquire_phy_lock(struct bnx2x *bp); | 1414 | void bnx2x_acquire_phy_lock(struct bnx2x *bp); |
1111 | void bnx2x_release_phy_lock(struct bnx2x *bp); | 1415 | void bnx2x_release_phy_lock(struct bnx2x *bp); |
1112 | 1416 | ||
1113 | void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u16 fw_sb_id, | ||
1114 | u8 sb_index, u8 disable, u16 usec); | ||
1115 | |||
1116 | /** | 1417 | /** |
1117 | * bnx2x_extract_max_cfg - extract MAX BW part from MF configuration. | 1418 | * bnx2x_extract_max_cfg - extract MAX BW part from MF configuration. |
1118 | * | 1419 | * |
diff --git a/drivers/net/bnx2x/bnx2x_dcb.c b/drivers/net/bnx2x/bnx2x_dcb.c index 410a49e571ac..aaed9f09c329 100644 --- a/drivers/net/bnx2x/bnx2x_dcb.c +++ b/drivers/net/bnx2x/bnx2x_dcb.c | |||
@@ -55,15 +55,14 @@ static void bnx2x_pfc_set(struct bnx2x *bp) | |||
55 | struct bnx2x_nig_brb_pfc_port_params pfc_params = {0}; | 55 | struct bnx2x_nig_brb_pfc_port_params pfc_params = {0}; |
56 | u32 pri_bit, val = 0; | 56 | u32 pri_bit, val = 0; |
57 | u8 pri; | 57 | u8 pri; |
58 | int i; | ||
58 | 59 | ||
59 | /* Tx COS configuration */ | 60 | /* Tx COS configuration */ |
60 | if (bp->dcbx_port_params.ets.cos_params[0].pauseable) | 61 | for (i = 0; i < bp->dcbx_port_params.ets.num_of_cos; i++) |
61 | pfc_params.rx_cos0_priority_mask = | 62 | if (bp->dcbx_port_params.ets.cos_params[i].pauseable) |
62 | bp->dcbx_port_params.ets.cos_params[0].pri_bitmask; | 63 | pfc_params.rx_cos_priority_mask[i] = |
63 | if (bp->dcbx_port_params.ets.cos_params[1].pauseable) | 64 | bp->dcbx_port_params.ets. |
64 | pfc_params.rx_cos1_priority_mask = | 65 | cos_params[i].pri_bitmask; |
65 | bp->dcbx_port_params.ets.cos_params[1].pri_bitmask; | ||
66 | |||
67 | 66 | ||
68 | /** | 67 | /** |
69 | * Rx COS configuration | 68 | * Rx COS configuration |
@@ -378,7 +377,7 @@ static int bnx2x_dcbx_read_mib(struct bnx2x *bp, | |||
378 | 377 | ||
379 | static void bnx2x_pfc_set_pfc(struct bnx2x *bp) | 378 | static void bnx2x_pfc_set_pfc(struct bnx2x *bp) |
380 | { | 379 | { |
381 | if (CHIP_IS_E2(bp)) { | 380 | if (!CHIP_IS_E1x(bp)) { |
382 | if (BP_PORT(bp)) { | 381 | if (BP_PORT(bp)) { |
383 | BNX2X_ERR("4 port mode is not supported"); | 382 | BNX2X_ERR("4 port mode is not supported"); |
384 | return; | 383 | return; |
@@ -406,7 +405,7 @@ static void bnx2x_dcbx_stop_hw_tx(struct bnx2x *bp) | |||
406 | 0 /* connectionless */, | 405 | 0 /* connectionless */, |
407 | 0 /* dataHi is zero */, | 406 | 0 /* dataHi is zero */, |
408 | 0 /* dataLo is zero */, | 407 | 0 /* dataLo is zero */, |
409 | 1 /* common */); | 408 | NONE_CONNECTION_TYPE); |
410 | } | 409 | } |
411 | 410 | ||
412 | static void bnx2x_dcbx_resume_hw_tx(struct bnx2x *bp) | 411 | static void bnx2x_dcbx_resume_hw_tx(struct bnx2x *bp) |
@@ -417,7 +416,7 @@ static void bnx2x_dcbx_resume_hw_tx(struct bnx2x *bp) | |||
417 | 0, /* connectionless */ | 416 | 0, /* connectionless */ |
418 | U64_HI(bnx2x_sp_mapping(bp, pfc_config)), | 417 | U64_HI(bnx2x_sp_mapping(bp, pfc_config)), |
419 | U64_LO(bnx2x_sp_mapping(bp, pfc_config)), | 418 | U64_LO(bnx2x_sp_mapping(bp, pfc_config)), |
420 | 1 /* commmon */); | 419 | NONE_CONNECTION_TYPE); |
421 | } | 420 | } |
422 | 421 | ||
423 | static void bnx2x_dcbx_update_ets_params(struct bnx2x *bp) | 422 | static void bnx2x_dcbx_update_ets_params(struct bnx2x *bp) |
@@ -425,7 +424,7 @@ static void bnx2x_dcbx_update_ets_params(struct bnx2x *bp) | |||
425 | struct bnx2x_dcbx_pg_params *ets = &(bp->dcbx_port_params.ets); | 424 | struct bnx2x_dcbx_pg_params *ets = &(bp->dcbx_port_params.ets); |
426 | u8 status = 0; | 425 | u8 status = 0; |
427 | 426 | ||
428 | bnx2x_ets_disabled(&bp->link_params); | 427 | bnx2x_ets_disabled(&bp->link_params/*, &bp->link_vars*/); |
429 | 428 | ||
430 | if (!ets->enabled) | 429 | if (!ets->enabled) |
431 | return; | 430 | return; |
@@ -527,6 +526,7 @@ static int bnx2x_dcbx_read_shmem_neg_results(struct bnx2x *bp) | |||
527 | BNX2X_ERR("FW doesn't support dcbx_neg_res_offset\n"); | 526 | BNX2X_ERR("FW doesn't support dcbx_neg_res_offset\n"); |
528 | return -EINVAL; | 527 | return -EINVAL; |
529 | } | 528 | } |
529 | |||
530 | rc = bnx2x_dcbx_read_mib(bp, (u32 *)&local_mib, dcbx_neg_res_offset, | 530 | rc = bnx2x_dcbx_read_mib(bp, (u32 *)&local_mib, dcbx_neg_res_offset, |
531 | DCBX_READ_LOCAL_MIB); | 531 | DCBX_READ_LOCAL_MIB); |
532 | 532 | ||
@@ -563,15 +563,6 @@ u8 bnx2x_dcbx_dcbnl_app_idtype(struct dcbx_app_priority_entry *ent) | |||
563 | DCB_APP_IDTYPE_ETHTYPE; | 563 | DCB_APP_IDTYPE_ETHTYPE; |
564 | } | 564 | } |
565 | 565 | ||
566 | static inline | ||
567 | void bnx2x_dcbx_invalidate_local_apps(struct bnx2x *bp) | ||
568 | { | ||
569 | int i; | ||
570 | for (i = 0; i < DCBX_MAX_APP_PROTOCOL; i++) | ||
571 | bp->dcbx_local_feat.app.app_pri_tbl[i].appBitfield &= | ||
572 | ~DCBX_APP_ENTRY_VALID; | ||
573 | } | ||
574 | |||
575 | int bnx2x_dcbnl_update_applist(struct bnx2x *bp, bool delall) | 566 | int bnx2x_dcbnl_update_applist(struct bnx2x *bp, bool delall) |
576 | { | 567 | { |
577 | int i, err = 0; | 568 | int i, err = 0; |
@@ -597,32 +588,28 @@ int bnx2x_dcbnl_update_applist(struct bnx2x *bp, bool delall) | |||
597 | } | 588 | } |
598 | #endif | 589 | #endif |
599 | 590 | ||
591 | static inline void bnx2x_update_drv_flags(struct bnx2x *bp, u32 flags, u32 set) | ||
592 | { | ||
593 | if (SHMEM2_HAS(bp, drv_flags)) { | ||
594 | u32 drv_flags; | ||
595 | bnx2x_acquire_hw_lock(bp, HW_LOCK_DRV_FLAGS); | ||
596 | drv_flags = SHMEM2_RD(bp, drv_flags); | ||
597 | |||
598 | if (set) | ||
599 | SET_FLAGS(drv_flags, flags); | ||
600 | else | ||
601 | RESET_FLAGS(drv_flags, flags); | ||
602 | |||
603 | SHMEM2_WR(bp, drv_flags, drv_flags); | ||
604 | DP(NETIF_MSG_HW, "drv_flags 0x%08x\n", drv_flags); | ||
605 | bnx2x_release_hw_lock(bp, HW_LOCK_DRV_FLAGS); | ||
606 | } | ||
607 | } | ||
608 | |||
600 | void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state) | 609 | void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state) |
601 | { | 610 | { |
602 | switch (state) { | 611 | switch (state) { |
603 | case BNX2X_DCBX_STATE_NEG_RECEIVED: | 612 | case BNX2X_DCBX_STATE_NEG_RECEIVED: |
604 | #ifdef BCM_CNIC | ||
605 | if (bp->state != BNX2X_STATE_OPENING_WAIT4_LOAD) { | ||
606 | struct cnic_ops *c_ops; | ||
607 | struct cnic_eth_dev *cp = &bp->cnic_eth_dev; | ||
608 | bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG; | ||
609 | cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI_OOO; | ||
610 | cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI; | ||
611 | |||
612 | rcu_read_lock(); | ||
613 | c_ops = rcu_dereference(bp->cnic_ops); | ||
614 | if (c_ops) { | ||
615 | bnx2x_cnic_notify(bp, CNIC_CTL_STOP_ISCSI_CMD); | ||
616 | rcu_read_unlock(); | ||
617 | return; | ||
618 | } | ||
619 | rcu_read_unlock(); | ||
620 | } | ||
621 | |||
622 | /* fall through if no CNIC initialized */ | ||
623 | case BNX2X_DCBX_STATE_ISCSI_STOPPED: | ||
624 | #endif | ||
625 | |||
626 | { | 613 | { |
627 | DP(NETIF_MSG_LINK, "BNX2X_DCBX_STATE_NEG_RECEIVED\n"); | 614 | DP(NETIF_MSG_LINK, "BNX2X_DCBX_STATE_NEG_RECEIVED\n"); |
628 | #ifdef BCM_DCBNL | 615 | #ifdef BCM_DCBNL |
@@ -646,41 +633,28 @@ void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state) | |||
646 | bnx2x_get_dcbx_drv_param(bp, &bp->dcbx_local_feat, | 633 | bnx2x_get_dcbx_drv_param(bp, &bp->dcbx_local_feat, |
647 | bp->dcbx_error); | 634 | bp->dcbx_error); |
648 | 635 | ||
649 | if (bp->state != BNX2X_STATE_OPENING_WAIT4_LOAD) { | 636 | /* mark DCBX result for PMF migration */ |
650 | #ifdef BCM_DCBNL | 637 | bnx2x_update_drv_flags(bp, DRV_FLAGS_DCB_CONFIGURED, 1); |
651 | /** | ||
652 | * Add new app tlvs to dcbnl | ||
653 | */ | ||
654 | bnx2x_dcbnl_update_applist(bp, false); | ||
655 | #endif | ||
656 | bnx2x_dcbx_stop_hw_tx(bp); | ||
657 | return; | ||
658 | } | ||
659 | /* fall through */ | ||
660 | #ifdef BCM_DCBNL | 638 | #ifdef BCM_DCBNL |
661 | /** | 639 | /** |
662 | * Invalidate the local app tlvs if they are not added | 640 | * Add new app tlvs to dcbnl |
663 | * to the dcbnl app list to avoid deleting them from | ||
664 | * the list later on | ||
665 | */ | 641 | */ |
666 | bnx2x_dcbx_invalidate_local_apps(bp); | 642 | bnx2x_dcbnl_update_applist(bp, false); |
667 | #endif | 643 | #endif |
644 | bnx2x_dcbx_stop_hw_tx(bp); | ||
645 | |||
646 | return; | ||
668 | } | 647 | } |
669 | case BNX2X_DCBX_STATE_TX_PAUSED: | 648 | case BNX2X_DCBX_STATE_TX_PAUSED: |
670 | DP(NETIF_MSG_LINK, "BNX2X_DCBX_STATE_TX_PAUSED\n"); | 649 | DP(NETIF_MSG_LINK, "BNX2X_DCBX_STATE_TX_PAUSED\n"); |
671 | bnx2x_pfc_set_pfc(bp); | 650 | bnx2x_pfc_set_pfc(bp); |
672 | 651 | ||
673 | bnx2x_dcbx_update_ets_params(bp); | 652 | bnx2x_dcbx_update_ets_params(bp); |
674 | if (bp->state != BNX2X_STATE_OPENING_WAIT4_LOAD) { | 653 | bnx2x_dcbx_resume_hw_tx(bp); |
675 | bnx2x_dcbx_resume_hw_tx(bp); | 654 | return; |
676 | return; | ||
677 | } | ||
678 | /* fall through */ | ||
679 | case BNX2X_DCBX_STATE_TX_RELEASED: | 655 | case BNX2X_DCBX_STATE_TX_RELEASED: |
680 | DP(NETIF_MSG_LINK, "BNX2X_DCBX_STATE_TX_RELEASED\n"); | 656 | DP(NETIF_MSG_LINK, "BNX2X_DCBX_STATE_TX_RELEASED\n"); |
681 | if (bp->state != BNX2X_STATE_OPENING_WAIT4_LOAD) | 657 | bnx2x_fw_command(bp, DRV_MSG_CODE_DCBX_PMF_DRV_OK, 0); |
682 | bnx2x_fw_command(bp, DRV_MSG_CODE_DCBX_PMF_DRV_OK, 0); | ||
683 | |||
684 | return; | 658 | return; |
685 | default: | 659 | default: |
686 | BNX2X_ERR("Unknown DCBX_STATE\n"); | 660 | BNX2X_ERR("Unknown DCBX_STATE\n"); |
@@ -868,7 +842,7 @@ static void bnx2x_dcbx_admin_mib_updated_params(struct bnx2x *bp, | |||
868 | 842 | ||
869 | void bnx2x_dcbx_set_state(struct bnx2x *bp, bool dcb_on, u32 dcbx_enabled) | 843 | void bnx2x_dcbx_set_state(struct bnx2x *bp, bool dcb_on, u32 dcbx_enabled) |
870 | { | 844 | { |
871 | if (CHIP_IS_E2(bp) && !CHIP_MODE_IS_4_PORT(bp)) { | 845 | if (!CHIP_IS_E1x(bp) && !CHIP_MODE_IS_4_PORT(bp)) { |
872 | bp->dcb_state = dcb_on; | 846 | bp->dcb_state = dcb_on; |
873 | bp->dcbx_enabled = dcbx_enabled; | 847 | bp->dcbx_enabled = dcbx_enabled; |
874 | } else { | 848 | } else { |
@@ -966,7 +940,7 @@ void bnx2x_dcbx_init(struct bnx2x *bp) | |||
966 | DP(NETIF_MSG_LINK, "dcb_state %d bp->port.pmf %d\n", | 940 | DP(NETIF_MSG_LINK, "dcb_state %d bp->port.pmf %d\n", |
967 | bp->dcb_state, bp->port.pmf); | 941 | bp->dcb_state, bp->port.pmf); |
968 | 942 | ||
969 | if (bp->dcb_state == BNX2X_DCB_STATE_ON && bp->port.pmf && | 943 | if (bp->dcb_state == BNX2X_DCB_STATE_ON && bp->port.pmf && |
970 | SHMEM2_HAS(bp, dcbx_lldp_params_offset)) { | 944 | SHMEM2_HAS(bp, dcbx_lldp_params_offset)) { |
971 | dcbx_lldp_params_offset = | 945 | dcbx_lldp_params_offset = |
972 | SHMEM2_RD(bp, dcbx_lldp_params_offset); | 946 | SHMEM2_RD(bp, dcbx_lldp_params_offset); |
@@ -974,6 +948,8 @@ void bnx2x_dcbx_init(struct bnx2x *bp) | |||
974 | DP(NETIF_MSG_LINK, "dcbx_lldp_params_offset 0x%x\n", | 948 | DP(NETIF_MSG_LINK, "dcbx_lldp_params_offset 0x%x\n", |
975 | dcbx_lldp_params_offset); | 949 | dcbx_lldp_params_offset); |
976 | 950 | ||
951 | bnx2x_update_drv_flags(bp, DRV_FLAGS_DCB_CONFIGURED, 0); | ||
952 | |||
977 | if (SHMEM_LLDP_DCBX_PARAMS_NONE != dcbx_lldp_params_offset) { | 953 | if (SHMEM_LLDP_DCBX_PARAMS_NONE != dcbx_lldp_params_offset) { |
978 | bnx2x_dcbx_lldp_updated_params(bp, | 954 | bnx2x_dcbx_lldp_updated_params(bp, |
979 | dcbx_lldp_params_offset); | 955 | dcbx_lldp_params_offset); |
@@ -981,46 +957,12 @@ void bnx2x_dcbx_init(struct bnx2x *bp) | |||
981 | bnx2x_dcbx_admin_mib_updated_params(bp, | 957 | bnx2x_dcbx_admin_mib_updated_params(bp, |
982 | dcbx_lldp_params_offset); | 958 | dcbx_lldp_params_offset); |
983 | 959 | ||
984 | /* set default configuration BC has */ | 960 | /* Let HW start negotiation */ |
985 | bnx2x_dcbx_set_params(bp, | ||
986 | BNX2X_DCBX_STATE_NEG_RECEIVED); | ||
987 | |||
988 | bnx2x_fw_command(bp, | 961 | bnx2x_fw_command(bp, |
989 | DRV_MSG_CODE_DCBX_ADMIN_PMF_MSG, 0); | 962 | DRV_MSG_CODE_DCBX_ADMIN_PMF_MSG, 0); |
990 | } | 963 | } |
991 | } | 964 | } |
992 | } | 965 | } |
993 | |||
994 | void bnx2x_dcb_init_intmem_pfc(struct bnx2x *bp) | ||
995 | { | ||
996 | struct priority_cos pricos[MAX_PFC_TRAFFIC_TYPES]; | ||
997 | u32 i = 0, addr; | ||
998 | memset(pricos, 0, sizeof(pricos)); | ||
999 | /* Default initialization */ | ||
1000 | for (i = 0; i < MAX_PFC_TRAFFIC_TYPES; i++) | ||
1001 | pricos[i].priority = LLFC_TRAFFIC_TYPE_TO_PRIORITY_UNMAPPED; | ||
1002 | |||
1003 | /* Store per port struct to internal memory */ | ||
1004 | addr = BAR_XSTRORM_INTMEM + | ||
1005 | XSTORM_CMNG_PER_PORT_VARS_OFFSET(BP_PORT(bp)) + | ||
1006 | offsetof(struct cmng_struct_per_port, | ||
1007 | traffic_type_to_priority_cos); | ||
1008 | __storm_memset_struct(bp, addr, sizeof(pricos), (u32 *)pricos); | ||
1009 | |||
1010 | |||
1011 | /* LLFC disabled.*/ | ||
1012 | REG_WR8(bp , BAR_XSTRORM_INTMEM + | ||
1013 | XSTORM_CMNG_PER_PORT_VARS_OFFSET(BP_PORT(bp)) + | ||
1014 | offsetof(struct cmng_struct_per_port, llfc_mode), | ||
1015 | LLFC_MODE_NONE); | ||
1016 | |||
1017 | /* DCBX disabled.*/ | ||
1018 | REG_WR8(bp , BAR_XSTRORM_INTMEM + | ||
1019 | XSTORM_CMNG_PER_PORT_VARS_OFFSET(BP_PORT(bp)) + | ||
1020 | offsetof(struct cmng_struct_per_port, dcb_enabled), | ||
1021 | DCB_DISABLED); | ||
1022 | } | ||
1023 | |||
1024 | static void | 966 | static void |
1025 | bnx2x_dcbx_print_cos_params(struct bnx2x *bp, | 967 | bnx2x_dcbx_print_cos_params(struct bnx2x *bp, |
1026 | struct flow_control_configuration *pfc_fw_cfg) | 968 | struct flow_control_configuration *pfc_fw_cfg) |
@@ -1591,13 +1533,7 @@ static void bnx2x_pfc_fw_struct_e2(struct bnx2x *bp) | |||
1591 | 1533 | ||
1592 | /* Fw version should be incremented each update */ | 1534 | /* Fw version should be incremented each update */ |
1593 | pfc_fw_cfg->dcb_version = ++bp->dcb_version; | 1535 | pfc_fw_cfg->dcb_version = ++bp->dcb_version; |
1594 | pfc_fw_cfg->dcb_enabled = DCB_ENABLED; | 1536 | pfc_fw_cfg->dcb_enabled = 1; |
1595 | |||
1596 | /* Default initialization */ | ||
1597 | for (pri = 0; pri < MAX_PFC_TRAFFIC_TYPES ; pri++) { | ||
1598 | tt2cos[pri].priority = LLFC_TRAFFIC_TYPE_TO_PRIORITY_UNMAPPED; | ||
1599 | tt2cos[pri].cos = 0; | ||
1600 | } | ||
1601 | 1537 | ||
1602 | /* Fill priority parameters */ | 1538 | /* Fill priority parameters */ |
1603 | for (pri = 0; pri < LLFC_DRIVER_TRAFFIC_TYPE_MAX; pri++) { | 1539 | for (pri = 0; pri < LLFC_DRIVER_TRAFFIC_TYPE_MAX; pri++) { |
@@ -1605,14 +1541,37 @@ static void bnx2x_pfc_fw_struct_e2(struct bnx2x *bp) | |||
1605 | pri_bit = 1 << tt2cos[pri].priority; | 1541 | pri_bit = 1 << tt2cos[pri].priority; |
1606 | 1542 | ||
1607 | /* Fill COS parameters based on COS calculated to | 1543 | /* Fill COS parameters based on COS calculated to |
1608 | * make it more generally for future use */ | 1544 | * make it more general for future use */ |
1609 | for (cos = 0; cos < bp->dcbx_port_params.ets.num_of_cos; cos++) | 1545 | for (cos = 0; cos < bp->dcbx_port_params.ets.num_of_cos; cos++) |
1610 | if (bp->dcbx_port_params.ets.cos_params[cos]. | 1546 | if (bp->dcbx_port_params.ets.cos_params[cos]. |
1611 | pri_bitmask & pri_bit) | 1547 | pri_bitmask & pri_bit) |
1612 | tt2cos[pri].cos = cos; | 1548 | tt2cos[pri].cos = cos; |
1613 | } | 1549 | } |
1550 | |||
1551 | /* we never want the FW to add a 0 vlan tag */ | ||
1552 | pfc_fw_cfg->dont_add_pri_0_en = 1; | ||
1553 | |||
1614 | bnx2x_dcbx_print_cos_params(bp, pfc_fw_cfg); | 1554 | bnx2x_dcbx_print_cos_params(bp, pfc_fw_cfg); |
1615 | } | 1555 | } |
1556 | |||
1557 | void bnx2x_dcbx_pmf_update(struct bnx2x *bp) | ||
1558 | { | ||
1559 | /* if we need to syncronize DCBX result from prev PMF | ||
1560 | * read it from shmem and update bp accordingly | ||
1561 | */ | ||
1562 | if (SHMEM2_HAS(bp, drv_flags) && | ||
1563 | GET_FLAGS(SHMEM2_RD(bp, drv_flags), DRV_FLAGS_DCB_CONFIGURED)) { | ||
1564 | /* Read neg results if dcbx is in the FW */ | ||
1565 | if (bnx2x_dcbx_read_shmem_neg_results(bp)) | ||
1566 | return; | ||
1567 | |||
1568 | bnx2x_dump_dcbx_drv_param(bp, &bp->dcbx_local_feat, | ||
1569 | bp->dcbx_error); | ||
1570 | bnx2x_get_dcbx_drv_param(bp, &bp->dcbx_local_feat, | ||
1571 | bp->dcbx_error); | ||
1572 | } | ||
1573 | } | ||
1574 | |||
1616 | /* DCB netlink */ | 1575 | /* DCB netlink */ |
1617 | #ifdef BCM_DCBNL | 1576 | #ifdef BCM_DCBNL |
1618 | 1577 | ||
diff --git a/drivers/net/bnx2x/bnx2x_dcb.h b/drivers/net/bnx2x/bnx2x_dcb.h index 5bedd85e94df..7887834cd65c 100644 --- a/drivers/net/bnx2x/bnx2x_dcb.h +++ b/drivers/net/bnx2x/bnx2x_dcb.h | |||
@@ -179,9 +179,6 @@ void bnx2x_dcbx_set_state(struct bnx2x *bp, bool dcb_on, u32 dcbx_enabled); | |||
179 | 179 | ||
180 | enum { | 180 | enum { |
181 | BNX2X_DCBX_STATE_NEG_RECEIVED = 0x1, | 181 | BNX2X_DCBX_STATE_NEG_RECEIVED = 0x1, |
182 | #ifdef BCM_CNIC | ||
183 | BNX2X_DCBX_STATE_ISCSI_STOPPED, | ||
184 | #endif | ||
185 | BNX2X_DCBX_STATE_TX_PAUSED, | 182 | BNX2X_DCBX_STATE_TX_PAUSED, |
186 | BNX2X_DCBX_STATE_TX_RELEASED | 183 | BNX2X_DCBX_STATE_TX_RELEASED |
187 | }; | 184 | }; |
diff --git a/drivers/net/bnx2x/bnx2x_dump.h b/drivers/net/bnx2x/bnx2x_dump.h index fb3ff7c4d7ca..407531a9ab13 100644 --- a/drivers/net/bnx2x/bnx2x_dump.h +++ b/drivers/net/bnx2x/bnx2x_dump.h | |||
@@ -25,34 +25,55 @@ | |||
25 | 25 | ||
26 | 26 | ||
27 | /*definitions */ | 27 | /*definitions */ |
28 | #define XSTORM_WAITP_ADDR 0x2b8a80 | 28 | #define XSTORM_WAITP_ADDR 0x2b8a80 |
29 | #define TSTORM_WAITP_ADDR 0x1b8a80 | 29 | #define TSTORM_WAITP_ADDR 0x1b8a80 |
30 | #define USTORM_WAITP_ADDR 0x338a80 | 30 | #define USTORM_WAITP_ADDR 0x338a80 |
31 | #define CSTORM_WAITP_ADDR 0x238a80 | 31 | #define CSTORM_WAITP_ADDR 0x238a80 |
32 | #define TSTORM_CAM_MODE 0x1B1440 | 32 | #define TSTORM_CAM_MODE 0x1B1440 |
33 | 33 | ||
34 | #define MAX_TIMER_PENDING 200 | 34 | #define MAX_TIMER_PENDING 200 |
35 | #define TIMER_SCAN_DONT_CARE 0xFF | 35 | #define TIMER_SCAN_DONT_CARE 0xFF |
36 | #define RI_E1 0x1 | 36 | #define RI_E1 0x1 |
37 | #define RI_E1H 0x2 | 37 | #define RI_E1H 0x2 |
38 | #define RI_E2 0x4 | 38 | #define RI_E2 0x4 |
39 | #define RI_ONLINE 0x100 | 39 | #define RI_E3 0x8 |
40 | #define RI_PATH0_DUMP 0x200 | 40 | #define RI_ONLINE 0x100 |
41 | #define RI_PATH1_DUMP 0x400 | 41 | #define RI_PATH0_DUMP 0x200 |
42 | #define RI_E1_OFFLINE (RI_E1) | 42 | #define RI_PATH1_DUMP 0x400 |
43 | #define RI_E1_ONLINE (RI_E1 | RI_ONLINE) | 43 | #define RI_E1_OFFLINE (RI_E1) |
44 | #define RI_E1H_OFFLINE (RI_E1H) | 44 | #define RI_E1_ONLINE (RI_E1 | RI_ONLINE) |
45 | #define RI_E1H_ONLINE (RI_E1H | RI_ONLINE) | 45 | #define RI_E1H_OFFLINE (RI_E1H) |
46 | #define RI_E2_OFFLINE (RI_E2) | 46 | #define RI_E1H_ONLINE (RI_E1H | RI_ONLINE) |
47 | #define RI_E2_ONLINE (RI_E2 | RI_ONLINE) | 47 | #define RI_E2_OFFLINE (RI_E2) |
48 | #define RI_E1E1H_OFFLINE (RI_E1 | RI_E1H) | 48 | #define RI_E2_ONLINE (RI_E2 | RI_ONLINE) |
49 | #define RI_E1E1H_ONLINE (RI_E1 | RI_E1H | RI_ONLINE) | 49 | #define RI_E3_OFFLINE (RI_E3) |
50 | #define RI_E1HE2_OFFLINE (RI_E2 | RI_E1H) | 50 | #define RI_E3_ONLINE (RI_E3 | RI_ONLINE) |
51 | #define RI_E1HE2_ONLINE (RI_E2 | RI_E1H | RI_ONLINE) | 51 | #define RI_E1E1H_OFFLINE (RI_E1 | RI_E1H) |
52 | #define RI_E1E2_OFFLINE (RI_E2 | RI_E1) | 52 | #define RI_E1E1H_ONLINE (RI_E1 | RI_E1H | RI_ONLINE) |
53 | #define RI_E1E2_ONLINE (RI_E2 | RI_E1 | RI_ONLINE) | 53 | #define RI_E1E1HE2_OFFLINE (RI_E1 | RI_E1H | RI_E2) |
54 | #define RI_ALL_OFFLINE (RI_E1 | RI_E1H | RI_E2) | 54 | #define RI_E1E1HE2_ONLINE (RI_E1 | RI_E1H | RI_E2 | RI_ONLINE) |
55 | #define RI_ALL_ONLINE (RI_E1 | RI_E1H | RI_E2 | RI_ONLINE) | 55 | #define RI_E1HE2_OFFLINE (RI_E2 | RI_E1H) |
56 | #define RI_E1HE2_ONLINE (RI_E2 | RI_E1H | RI_ONLINE) | ||
57 | #define RI_E1E2_OFFLINE (RI_E2 | RI_E1) | ||
58 | #define RI_E1E2_ONLINE (RI_E2 | RI_E1 | RI_ONLINE) | ||
59 | #define RI_E1E3_OFFLINE (RI_E1 | RI_E3) | ||
60 | #define RI_E1E3_ONLINE (RI_E1 | RI_E3 | RI_ONLINE) | ||
61 | #define RI_E1HE3_OFFLINE (RI_E1H | RI_E3) | ||
62 | #define RI_E1HE3_ONLINE (RI_E1H | RI_E3 | RI_ONLINE) | ||
63 | #define RI_E2E3_OFFLINE (RI_E2 | RI_E3) | ||
64 | #define RI_E2E3_ONLINE (RI_E2 | RI_E3 | RI_ONLINE) | ||
65 | #define RI_E1E1HE3_OFFLINE (RI_E1 | RI_E1H | RI_E3) | ||
66 | #define RI_E1E1HE3_ONLINE (RI_E1 | RI_E1H | RI_E3 | RI_ONLINE) | ||
67 | #define RI_E1HE2E3_OFFLINE (RI_E2 | RI_E1H | RI_E3) | ||
68 | #define RI_E1HE2E3_ONLINE (RI_E2 | RI_E1H | RI_E3 | RI_ONLINE) | ||
69 | #define RI_E1E2E3_OFFLINE (RI_E2 | RI_E1 | RI_E3) | ||
70 | #define RI_E1E2E3_ONLINE (RI_E2 | RI_E1 | RI_E3 | RI_ONLINE) | ||
71 | #define RI_ALL_OFFLINE (RI_E1 | RI_E1H | RI_E2 | RI_E3) | ||
72 | #define RI_ALL_ONLINE (RI_E1 | RI_E1H | RI_E2 | RI_E3 | RI_ONLINE) | ||
73 | |||
74 | #define DBG_DMP_TRACE_BUFFER_SIZE 0x800 | ||
75 | #define DBG_DMP_TRACE_BUFFER_OFFSET(shmem0_offset) \ | ||
76 | ((shmem0_offset) - DBG_DMP_TRACE_BUFFER_SIZE) | ||
56 | 77 | ||
57 | struct dump_sign { | 78 | struct dump_sign { |
58 | u32 time_stamp; | 79 | u32 time_stamp; |
@@ -86,185 +107,255 @@ struct wreg_addr { | |||
86 | u16 info; | 107 | u16 info; |
87 | }; | 108 | }; |
88 | 109 | ||
89 | #define REGS_COUNT 834 | 110 | static const struct reg_addr reg_addrs[] = { |
90 | static const struct reg_addr reg_addrs[REGS_COUNT] = { | ||
91 | { 0x2000, 341, RI_ALL_ONLINE }, { 0x2800, 103, RI_ALL_ONLINE }, | 111 | { 0x2000, 341, RI_ALL_ONLINE }, { 0x2800, 103, RI_ALL_ONLINE }, |
92 | { 0x3000, 287, RI_ALL_ONLINE }, { 0x3800, 331, RI_ALL_ONLINE }, | 112 | { 0x3000, 287, RI_ALL_ONLINE }, { 0x3800, 331, RI_ALL_ONLINE }, |
93 | { 0x8800, 6, RI_ALL_ONLINE }, { 0x8818, 1, RI_E1HE2_ONLINE }, | 113 | { 0x8800, 6, RI_ALL_ONLINE }, { 0x8818, 1, RI_E1HE2E3_ONLINE }, |
94 | { 0x9000, 164, RI_E2_ONLINE }, { 0x9400, 33, RI_E2_ONLINE }, | 114 | { 0x9000, 147, RI_E2E3_ONLINE }, { 0x924c, 1, RI_E2_ONLINE }, |
95 | { 0xa000, 27, RI_ALL_ONLINE }, { 0xa06c, 1, RI_E1E1H_ONLINE }, | 115 | { 0x9250, 16, RI_E2E3_ONLINE }, { 0x9400, 33, RI_E2E3_ONLINE }, |
96 | { 0xa070, 71, RI_ALL_ONLINE }, { 0xa18c, 4, RI_E1E1H_ONLINE }, | 116 | { 0x9484, 5, RI_E3_ONLINE }, { 0xa000, 27, RI_ALL_ONLINE }, |
97 | { 0xa19c, 62, RI_ALL_ONLINE }, { 0xa294, 2, RI_E1E1H_ONLINE }, | 117 | { 0xa06c, 1, RI_E1E1H_ONLINE }, { 0xa070, 71, RI_ALL_ONLINE }, |
98 | { 0xa29c, 56, RI_ALL_ONLINE }, { 0xa39c, 7, RI_E1HE2_ONLINE }, | 118 | { 0xa18c, 4, RI_E1E1H_ONLINE }, { 0xa19c, 62, RI_ALL_ONLINE }, |
99 | { 0xa3c0, 3, RI_E1HE2_ONLINE }, { 0xa3d0, 1, RI_E1HE2_ONLINE }, | 119 | { 0xa294, 2, RI_E1E1H_ONLINE }, { 0xa29c, 2, RI_ALL_ONLINE }, |
100 | { 0xa3d8, 1, RI_E1HE2_ONLINE }, { 0xa3e0, 1, RI_E1HE2_ONLINE }, | 120 | { 0xa2a4, 2, RI_E1E1HE2_ONLINE }, { 0xa2ac, 52, RI_ALL_ONLINE }, |
101 | { 0xa3e8, 1, RI_E1HE2_ONLINE }, { 0xa3f0, 1, RI_E1HE2_ONLINE }, | 121 | { 0xa39c, 7, RI_E1HE2E3_ONLINE }, { 0xa3b8, 2, RI_E3_ONLINE }, |
102 | { 0xa3f8, 1, RI_E1HE2_ONLINE }, { 0xa400, 43, RI_ALL_ONLINE }, | 122 | { 0xa3c0, 3, RI_E1HE2E3_ONLINE }, { 0xa3d0, 1, RI_E1HE2E3_ONLINE }, |
103 | { 0xa4ac, 2, RI_E1E1H_ONLINE }, { 0xa4b4, 1, RI_ALL_ONLINE }, | 123 | { 0xa3d8, 1, RI_E1HE2E3_ONLINE }, { 0xa3e0, 1, RI_E1HE2E3_ONLINE }, |
124 | { 0xa3e8, 1, RI_E1HE2E3_ONLINE }, { 0xa3f0, 1, RI_E1HE2E3_ONLINE }, | ||
125 | { 0xa3f8, 1, RI_E1HE2E3_ONLINE }, { 0xa400, 40, RI_ALL_ONLINE }, | ||
126 | { 0xa4a0, 1, RI_E1E1HE2_ONLINE }, { 0xa4a4, 2, RI_ALL_ONLINE }, | ||
127 | { 0xa4ac, 2, RI_E1E1H_ONLINE }, { 0xa4b4, 1, RI_E1E1HE2_ONLINE }, | ||
104 | { 0xa4b8, 2, RI_E1E1H_ONLINE }, { 0xa4c0, 3, RI_ALL_ONLINE }, | 128 | { 0xa4b8, 2, RI_E1E1H_ONLINE }, { 0xa4c0, 3, RI_ALL_ONLINE }, |
105 | { 0xa4cc, 5, RI_E1E1H_ONLINE }, { 0xa4e0, 9, RI_ALL_ONLINE }, | 129 | { 0xa4cc, 5, RI_E1E1H_ONLINE }, { 0xa4e0, 3, RI_ALL_ONLINE }, |
106 | { 0xa504, 1, RI_E1E1H_ONLINE }, { 0xa508, 3, RI_ALL_ONLINE }, | 130 | { 0xa4fc, 2, RI_ALL_ONLINE }, { 0xa504, 1, RI_E1E1H_ONLINE }, |
107 | { 0xa518, 1, RI_ALL_ONLINE }, { 0xa520, 1, RI_ALL_ONLINE }, | 131 | { 0xa508, 3, RI_ALL_ONLINE }, { 0xa518, 1, RI_ALL_ONLINE }, |
108 | { 0xa528, 1, RI_ALL_ONLINE }, { 0xa530, 1, RI_ALL_ONLINE }, | 132 | { 0xa520, 1, RI_ALL_ONLINE }, { 0xa528, 1, RI_ALL_ONLINE }, |
109 | { 0xa538, 1, RI_ALL_ONLINE }, { 0xa540, 1, RI_ALL_ONLINE }, | 133 | { 0xa530, 1, RI_ALL_ONLINE }, { 0xa538, 1, RI_ALL_ONLINE }, |
110 | { 0xa548, 1, RI_E1E1H_ONLINE }, { 0xa550, 1, RI_E1E1H_ONLINE }, | 134 | { 0xa540, 1, RI_ALL_ONLINE }, { 0xa548, 1, RI_E1E1H_ONLINE }, |
111 | { 0xa558, 1, RI_E1E1H_ONLINE }, { 0xa560, 1, RI_E1E1H_ONLINE }, | 135 | { 0xa550, 1, RI_E1E1H_ONLINE }, { 0xa558, 1, RI_E1E1H_ONLINE }, |
112 | { 0xa568, 1, RI_E1E1H_ONLINE }, { 0xa570, 1, RI_ALL_ONLINE }, | 136 | { 0xa560, 1, RI_E1E1H_ONLINE }, { 0xa568, 1, RI_E1E1H_ONLINE }, |
113 | { 0xa580, 1, RI_ALL_ONLINE }, { 0xa590, 1, RI_ALL_ONLINE }, | 137 | { 0xa570, 1, RI_ALL_ONLINE }, { 0xa580, 1, RI_ALL_ONLINE }, |
114 | { 0xa5a0, 1, RI_ALL_ONLINE }, { 0xa5c0, 1, RI_ALL_ONLINE }, | 138 | { 0xa590, 1, RI_ALL_ONLINE }, { 0xa5a0, 1, RI_E1E1HE2_ONLINE }, |
115 | { 0xa5e0, 1, RI_E1HE2_ONLINE }, { 0xa5e8, 1, RI_E1HE2_ONLINE }, | 139 | { 0xa5c0, 1, RI_ALL_ONLINE }, { 0xa5e0, 1, RI_E1HE2E3_ONLINE }, |
116 | { 0xa5f0, 1, RI_E1HE2_ONLINE }, { 0xa5f8, 10, RI_E1HE2_ONLINE }, | 140 | { 0xa5e8, 1, RI_E1HE2E3_ONLINE }, { 0xa5f0, 1, RI_E1HE2E3_ONLINE }, |
117 | { 0xa620, 111, RI_E2_ONLINE }, { 0xa800, 51, RI_E2_ONLINE }, | 141 | { 0xa5f8, 1, RI_E1HE2_ONLINE }, { 0xa5fc, 9, RI_E1HE2E3_ONLINE }, |
118 | { 0xa8d4, 4, RI_E2_ONLINE }, { 0xa8e8, 1, RI_E2_ONLINE }, | 142 | { 0xa620, 6, RI_E2E3_ONLINE }, { 0xa638, 20, RI_E2_ONLINE }, |
119 | { 0xa8f0, 1, RI_E2_ONLINE }, { 0x10000, 236, RI_ALL_ONLINE }, | 143 | { 0xa688, 42, RI_E2E3_ONLINE }, { 0xa730, 1, RI_E2_ONLINE }, |
120 | { 0x10400, 57, RI_ALL_ONLINE }, { 0x104e8, 2, RI_ALL_ONLINE }, | 144 | { 0xa734, 2, RI_E2E3_ONLINE }, { 0xa73c, 4, RI_E2_ONLINE }, |
145 | { 0xa74c, 5, RI_E2E3_ONLINE }, { 0xa760, 5, RI_E2_ONLINE }, | ||
146 | { 0xa774, 7, RI_E2E3_ONLINE }, { 0xa790, 15, RI_E2_ONLINE }, | ||
147 | { 0xa7cc, 4, RI_E2E3_ONLINE }, { 0xa7e0, 6, RI_E3_ONLINE }, | ||
148 | { 0xa800, 18, RI_E2_ONLINE }, { 0xa848, 33, RI_E2E3_ONLINE }, | ||
149 | { 0xa8cc, 2, RI_E3_ONLINE }, { 0xa8d4, 4, RI_E2E3_ONLINE }, | ||
150 | { 0xa8e4, 1, RI_E3_ONLINE }, { 0xa8e8, 1, RI_E2E3_ONLINE }, | ||
151 | { 0xa8f0, 1, RI_E2E3_ONLINE }, { 0xa8f8, 30, RI_E3_ONLINE }, | ||
152 | { 0xa974, 73, RI_E3_ONLINE }, { 0xac30, 1, RI_E3_ONLINE }, | ||
153 | { 0xac40, 1, RI_E3_ONLINE }, { 0xac50, 1, RI_E3_ONLINE }, | ||
154 | { 0x10000, 9, RI_ALL_ONLINE }, { 0x10024, 1, RI_E1E1HE2_ONLINE }, | ||
155 | { 0x10028, 5, RI_ALL_ONLINE }, { 0x1003c, 6, RI_E1E1HE2_ONLINE }, | ||
156 | { 0x10054, 20, RI_ALL_ONLINE }, { 0x100a4, 4, RI_E1E1HE2_ONLINE }, | ||
157 | { 0x100b4, 11, RI_ALL_ONLINE }, { 0x100e0, 4, RI_E1E1HE2_ONLINE }, | ||
158 | { 0x100f0, 8, RI_ALL_ONLINE }, { 0x10110, 6, RI_E1E1HE2_ONLINE }, | ||
159 | { 0x10128, 110, RI_ALL_ONLINE }, { 0x102e0, 4, RI_E1E1HE2_ONLINE }, | ||
160 | { 0x102f0, 18, RI_ALL_ONLINE }, { 0x10338, 20, RI_E1E1HE2_ONLINE }, | ||
161 | { 0x10388, 10, RI_ALL_ONLINE }, { 0x10400, 6, RI_E1E1HE2_ONLINE }, | ||
162 | { 0x10418, 6, RI_ALL_ONLINE }, { 0x10430, 10, RI_E1E1HE2_ONLINE }, | ||
163 | { 0x10458, 22, RI_ALL_ONLINE }, { 0x104b0, 12, RI_E1E1HE2_ONLINE }, | ||
164 | { 0x104e0, 1, RI_ALL_ONLINE }, { 0x104e8, 2, RI_ALL_ONLINE }, | ||
121 | { 0x104f4, 2, RI_ALL_ONLINE }, { 0x10500, 146, RI_ALL_ONLINE }, | 165 | { 0x104f4, 2, RI_ALL_ONLINE }, { 0x10500, 146, RI_ALL_ONLINE }, |
122 | { 0x10750, 2, RI_ALL_ONLINE }, { 0x10760, 2, RI_ALL_ONLINE }, | 166 | { 0x10750, 2, RI_E1E1HE2_ONLINE }, { 0x10760, 2, RI_E1E1HE2_ONLINE }, |
123 | { 0x10770, 2, RI_ALL_ONLINE }, { 0x10780, 2, RI_ALL_ONLINE }, | 167 | { 0x10770, 2, RI_E1E1HE2_ONLINE }, { 0x10780, 2, RI_E1E1HE2_ONLINE }, |
124 | { 0x10790, 2, RI_ALL_ONLINE }, { 0x107a0, 2, RI_ALL_ONLINE }, | 168 | { 0x10790, 2, RI_ALL_ONLINE }, { 0x107a0, 2, RI_E1E1HE2_ONLINE }, |
125 | { 0x107b0, 2, RI_ALL_ONLINE }, { 0x107c0, 2, RI_ALL_ONLINE }, | 169 | { 0x107b0, 2, RI_E1E1HE2_ONLINE }, { 0x107c0, 2, RI_E1E1HE2_ONLINE }, |
126 | { 0x107d0, 2, RI_ALL_ONLINE }, { 0x107e0, 2, RI_ALL_ONLINE }, | 170 | { 0x107d0, 2, RI_E1E1HE2_ONLINE }, { 0x107e0, 2, RI_ALL_ONLINE }, |
127 | { 0x10880, 2, RI_ALL_ONLINE }, { 0x10900, 2, RI_ALL_ONLINE }, | 171 | { 0x10880, 2, RI_ALL_ONLINE }, { 0x10900, 2, RI_ALL_ONLINE }, |
128 | { 0x16000, 26, RI_E1HE2_ONLINE }, { 0x16070, 18, RI_E1HE2_ONLINE }, | 172 | { 0x16000, 1, RI_E1HE2_ONLINE }, { 0x16004, 25, RI_E1HE2E3_ONLINE }, |
129 | { 0x160c0, 27, RI_E1HE2_ONLINE }, { 0x16140, 1, RI_E1HE2_ONLINE }, | 173 | { 0x16070, 18, RI_E1HE2E3_ONLINE }, { 0x160c0, 7, RI_E1HE2E3_ONLINE }, |
130 | { 0x16160, 1, RI_E1HE2_ONLINE }, { 0x16180, 2, RI_E1HE2_ONLINE }, | 174 | { 0x160dc, 2, RI_E1HE2_ONLINE }, { 0x160e4, 10, RI_E1HE2E3_ONLINE }, |
131 | { 0x161c0, 2, RI_E1HE2_ONLINE }, { 0x16204, 5, RI_E1HE2_ONLINE }, | 175 | { 0x1610c, 2, RI_E1HE2_ONLINE }, { 0x16114, 6, RI_E1HE2E3_ONLINE }, |
132 | { 0x18000, 1, RI_E1HE2_ONLINE }, { 0x18008, 1, RI_E1HE2_ONLINE }, | 176 | { 0x16140, 48, RI_E1HE2E3_ONLINE }, { 0x16204, 5, RI_E1HE2E3_ONLINE }, |
133 | { 0x18010, 35, RI_E2_ONLINE }, { 0x180a4, 2, RI_E2_ONLINE }, | 177 | { 0x18000, 1, RI_E1HE2E3_ONLINE }, { 0x18008, 1, RI_E1HE2E3_ONLINE }, |
134 | { 0x180c0, 191, RI_E2_ONLINE }, { 0x18440, 1, RI_E2_ONLINE }, | 178 | { 0x18010, 35, RI_E2E3_ONLINE }, { 0x180a4, 2, RI_E2E3_ONLINE }, |
135 | { 0x18460, 1, RI_E2_ONLINE }, { 0x18480, 2, RI_E2_ONLINE }, | 179 | { 0x180c0, 109, RI_E2E3_ONLINE }, { 0x18274, 1, RI_E2_ONLINE }, |
136 | { 0x184c0, 2, RI_E2_ONLINE }, { 0x18500, 15, RI_E2_ONLINE }, | 180 | { 0x18278, 81, RI_E2E3_ONLINE }, { 0x18440, 63, RI_E2E3_ONLINE }, |
137 | { 0x20000, 24, RI_ALL_ONLINE }, { 0x20060, 8, RI_ALL_ONLINE }, | 181 | { 0x18570, 42, RI_E3_ONLINE }, { 0x20000, 24, RI_ALL_ONLINE }, |
138 | { 0x20080, 94, RI_ALL_ONLINE }, { 0x201f8, 1, RI_E1E1H_ONLINE }, | 182 | { 0x20060, 8, RI_ALL_ONLINE }, { 0x20080, 94, RI_ALL_ONLINE }, |
139 | { 0x201fc, 1, RI_ALL_ONLINE }, { 0x20200, 1, RI_E1E1H_ONLINE }, | 183 | { 0x201f8, 1, RI_E1E1H_ONLINE }, { 0x201fc, 1, RI_ALL_ONLINE }, |
140 | { 0x20204, 1, RI_ALL_ONLINE }, { 0x20208, 1, RI_E1E1H_ONLINE }, | 184 | { 0x20200, 1, RI_E1E1H_ONLINE }, { 0x20204, 1, RI_ALL_ONLINE }, |
141 | { 0x2020c, 39, RI_ALL_ONLINE }, { 0x202c8, 1, RI_E2_ONLINE }, | 185 | { 0x20208, 1, RI_E1E1H_ONLINE }, { 0x2020c, 39, RI_ALL_ONLINE }, |
142 | { 0x202d8, 4, RI_E2_ONLINE }, { 0x20400, 2, RI_ALL_ONLINE }, | 186 | { 0x202c8, 1, RI_E2E3_ONLINE }, { 0x202d8, 4, RI_E2E3_ONLINE }, |
143 | { 0x2040c, 8, RI_ALL_ONLINE }, { 0x2042c, 18, RI_E1HE2_ONLINE }, | 187 | { 0x20400, 2, RI_ALL_ONLINE }, { 0x2040c, 8, RI_ALL_ONLINE }, |
144 | { 0x20480, 1, RI_ALL_ONLINE }, { 0x20500, 1, RI_ALL_ONLINE }, | 188 | { 0x2042c, 18, RI_E1HE2E3_ONLINE }, { 0x20480, 1, RI_ALL_ONLINE }, |
145 | { 0x20600, 1, RI_ALL_ONLINE }, { 0x28000, 1, RI_ALL_ONLINE }, | 189 | { 0x20500, 1, RI_ALL_ONLINE }, { 0x20600, 1, RI_ALL_ONLINE }, |
146 | { 0x28004, 8191, RI_ALL_OFFLINE }, { 0x30000, 1, RI_ALL_ONLINE }, | 190 | { 0x28000, 1, RI_ALL_ONLINE }, { 0x28004, 8191, RI_ALL_OFFLINE }, |
147 | { 0x30004, 16383, RI_ALL_OFFLINE }, { 0x40000, 98, RI_ALL_ONLINE }, | 191 | { 0x30000, 1, RI_ALL_ONLINE }, { 0x30004, 16383, RI_ALL_OFFLINE }, |
148 | { 0x401a8, 8, RI_E1HE2_ONLINE }, { 0x401c8, 1, RI_E1H_ONLINE }, | 192 | { 0x40000, 98, RI_ALL_ONLINE }, { 0x401a8, 8, RI_E1HE2E3_ONLINE }, |
149 | { 0x401cc, 2, RI_E1HE2_ONLINE }, { 0x401d4, 2, RI_E2_ONLINE }, | 193 | { 0x401c8, 1, RI_E1H_ONLINE }, { 0x401cc, 2, RI_E1HE2E3_ONLINE }, |
150 | { 0x40200, 4, RI_ALL_ONLINE }, { 0x40220, 18, RI_E2_ONLINE }, | 194 | { 0x401d4, 2, RI_E2E3_ONLINE }, { 0x40200, 4, RI_ALL_ONLINE }, |
151 | { 0x40400, 43, RI_ALL_ONLINE }, { 0x404cc, 3, RI_E1HE2_ONLINE }, | 195 | { 0x40220, 18, RI_E2E3_ONLINE }, { 0x40268, 2, RI_E3_ONLINE }, |
152 | { 0x404e0, 1, RI_E2_ONLINE }, { 0x40500, 2, RI_ALL_ONLINE }, | 196 | { 0x40400, 43, RI_ALL_ONLINE }, { 0x404cc, 3, RI_E1HE2E3_ONLINE }, |
197 | { 0x404e0, 1, RI_E2E3_ONLINE }, { 0x40500, 2, RI_ALL_ONLINE }, | ||
153 | { 0x40510, 2, RI_ALL_ONLINE }, { 0x40520, 2, RI_ALL_ONLINE }, | 198 | { 0x40510, 2, RI_ALL_ONLINE }, { 0x40520, 2, RI_ALL_ONLINE }, |
154 | { 0x40530, 2, RI_ALL_ONLINE }, { 0x40540, 2, RI_ALL_ONLINE }, | 199 | { 0x40530, 2, RI_ALL_ONLINE }, { 0x40540, 2, RI_ALL_ONLINE }, |
155 | { 0x40550, 10, RI_E2_ONLINE }, { 0x40610, 2, RI_E2_ONLINE }, | 200 | { 0x40550, 10, RI_E2E3_ONLINE }, { 0x40610, 2, RI_E2E3_ONLINE }, |
156 | { 0x42000, 164, RI_ALL_ONLINE }, { 0x422c0, 4, RI_E2_ONLINE }, | 201 | { 0x42000, 164, RI_ALL_ONLINE }, { 0x422c0, 4, RI_E2E3_ONLINE }, |
157 | { 0x422d4, 5, RI_E1HE2_ONLINE }, { 0x422e8, 1, RI_E2_ONLINE }, | 202 | { 0x422d4, 5, RI_E1HE2E3_ONLINE }, { 0x422e8, 1, RI_E2E3_ONLINE }, |
158 | { 0x42400, 49, RI_ALL_ONLINE }, { 0x424c8, 38, RI_ALL_ONLINE }, | 203 | { 0x42400, 49, RI_ALL_ONLINE }, { 0x424c8, 38, RI_ALL_ONLINE }, |
159 | { 0x42568, 2, RI_ALL_ONLINE }, { 0x42640, 5, RI_E2_ONLINE }, | 204 | { 0x42568, 2, RI_ALL_ONLINE }, { 0x42640, 5, RI_E2E3_ONLINE }, |
160 | { 0x42800, 1, RI_ALL_ONLINE }, { 0x50000, 1, RI_ALL_ONLINE }, | 205 | { 0x42800, 1, RI_ALL_ONLINE }, { 0x50000, 1, RI_ALL_ONLINE }, |
161 | { 0x50004, 19, RI_ALL_ONLINE }, { 0x50050, 8, RI_ALL_ONLINE }, | 206 | { 0x50004, 19, RI_ALL_ONLINE }, { 0x50050, 8, RI_ALL_ONLINE }, |
162 | { 0x50070, 88, RI_ALL_ONLINE }, { 0x501f0, 4, RI_E1HE2_ONLINE }, | 207 | { 0x50070, 88, RI_ALL_ONLINE }, { 0x501f0, 4, RI_E1HE2E3_ONLINE }, |
163 | { 0x50200, 2, RI_ALL_ONLINE }, { 0x5020c, 7, RI_ALL_ONLINE }, | 208 | { 0x50200, 2, RI_ALL_ONLINE }, { 0x5020c, 7, RI_ALL_ONLINE }, |
164 | { 0x50228, 6, RI_E1HE2_ONLINE }, { 0x50240, 1, RI_ALL_ONLINE }, | 209 | { 0x50228, 6, RI_E1HE2E3_ONLINE }, { 0x50240, 1, RI_ALL_ONLINE }, |
165 | { 0x50280, 1, RI_ALL_ONLINE }, { 0x50300, 1, RI_E2_ONLINE }, | 210 | { 0x50280, 1, RI_ALL_ONLINE }, { 0x50300, 1, RI_E2E3_ONLINE }, |
166 | { 0x5030c, 1, RI_E2_ONLINE }, { 0x50318, 1, RI_E2_ONLINE }, | 211 | { 0x5030c, 1, RI_E2E3_ONLINE }, { 0x50318, 1, RI_E2E3_ONLINE }, |
167 | { 0x5031c, 1, RI_E2_ONLINE }, { 0x50320, 2, RI_E2_ONLINE }, | 212 | { 0x5031c, 1, RI_E2E3_ONLINE }, { 0x50320, 2, RI_E2E3_ONLINE }, |
168 | { 0x52000, 1, RI_ALL_ONLINE }, { 0x54000, 1, RI_ALL_ONLINE }, | 213 | { 0x52000, 1, RI_ALL_ONLINE }, { 0x54000, 1, RI_ALL_ONLINE }, |
169 | { 0x54004, 3327, RI_ALL_OFFLINE }, { 0x58000, 1, RI_ALL_ONLINE }, | 214 | { 0x54004, 3327, RI_ALL_OFFLINE }, { 0x58000, 1, RI_ALL_ONLINE }, |
170 | { 0x58004, 8191, RI_E1E1H_OFFLINE }, { 0x60000, 26, RI_ALL_ONLINE }, | 215 | { 0x58004, 8191, RI_E1E1H_OFFLINE }, { 0x60000, 26, RI_ALL_ONLINE }, |
171 | { 0x60068, 8, RI_E1E1H_ONLINE }, { 0x60088, 12, RI_ALL_ONLINE }, | 216 | { 0x60068, 8, RI_E1E1H_ONLINE }, { 0x60088, 12, RI_ALL_ONLINE }, |
172 | { 0x600b8, 9, RI_E1E1H_ONLINE }, { 0x600dc, 1, RI_ALL_ONLINE }, | 217 | { 0x600b8, 9, RI_E1E1H_ONLINE }, { 0x600dc, 1, RI_ALL_ONLINE }, |
173 | { 0x600e0, 5, RI_E1E1H_ONLINE }, { 0x600f4, 1, RI_ALL_ONLINE }, | 218 | { 0x600e0, 5, RI_E1E1H_ONLINE }, { 0x600f4, 1, RI_E1E1HE2_ONLINE }, |
174 | { 0x600f8, 1, RI_E1E1H_ONLINE }, { 0x600fc, 8, RI_ALL_ONLINE }, | 219 | { 0x600f8, 1, RI_E1E1H_ONLINE }, { 0x600fc, 8, RI_ALL_ONLINE }, |
175 | { 0x6013c, 24, RI_E1H_ONLINE }, { 0x6019c, 2, RI_E2_ONLINE }, | 220 | { 0x6013c, 24, RI_E1H_ONLINE }, { 0x6019c, 2, RI_E2E3_ONLINE }, |
176 | { 0x601ac, 18, RI_E2_ONLINE }, { 0x60200, 1, RI_ALL_ONLINE }, | 221 | { 0x601ac, 18, RI_E2E3_ONLINE }, { 0x60200, 1, RI_ALL_ONLINE }, |
177 | { 0x60204, 2, RI_ALL_OFFLINE }, { 0x60210, 13, RI_E2_ONLINE }, | 222 | { 0x60204, 2, RI_ALL_OFFLINE }, { 0x60210, 13, RI_E2E3_ONLINE }, |
178 | { 0x61000, 1, RI_ALL_ONLINE }, { 0x61004, 511, RI_ALL_OFFLINE }, | 223 | { 0x61000, 1, RI_ALL_ONLINE }, { 0x61004, 511, RI_ALL_OFFLINE }, |
179 | { 0x70000, 8, RI_ALL_ONLINE }, { 0x70020, 8184, RI_ALL_OFFLINE }, | 224 | { 0x61800, 512, RI_E3_OFFLINE }, { 0x70000, 8, RI_ALL_ONLINE }, |
225 | { 0x70020, 8184, RI_ALL_OFFLINE }, { 0x78000, 8192, RI_E3_OFFLINE }, | ||
180 | { 0x85000, 3, RI_ALL_ONLINE }, { 0x8501c, 7, RI_ALL_ONLINE }, | 226 | { 0x85000, 3, RI_ALL_ONLINE }, { 0x8501c, 7, RI_ALL_ONLINE }, |
181 | { 0x85048, 1, RI_ALL_ONLINE }, { 0x85200, 32, RI_ALL_ONLINE }, | 227 | { 0x85048, 1, RI_ALL_ONLINE }, { 0x85200, 32, RI_ALL_ONLINE }, |
182 | { 0xc1000, 7, RI_ALL_ONLINE }, { 0xc103c, 2, RI_E2_ONLINE }, | 228 | { 0xb0000, 16384, RI_E1H_ONLINE }, |
229 | { 0xc1000, 7, RI_ALL_ONLINE }, { 0xc103c, 2, RI_E2E3_ONLINE }, | ||
183 | { 0xc1800, 2, RI_ALL_ONLINE }, { 0xc2000, 164, RI_ALL_ONLINE }, | 230 | { 0xc1800, 2, RI_ALL_ONLINE }, { 0xc2000, 164, RI_ALL_ONLINE }, |
184 | { 0xc22c0, 5, RI_E2_ONLINE }, { 0xc22d8, 4, RI_E2_ONLINE }, | 231 | { 0xc22c0, 5, RI_E2E3_ONLINE }, { 0xc22d8, 4, RI_E2E3_ONLINE }, |
185 | { 0xc2400, 49, RI_ALL_ONLINE }, { 0xc24c8, 38, RI_ALL_ONLINE }, | 232 | { 0xc2400, 49, RI_ALL_ONLINE }, { 0xc24c8, 38, RI_ALL_ONLINE }, |
186 | { 0xc2568, 2, RI_ALL_ONLINE }, { 0xc2600, 1, RI_ALL_ONLINE }, | 233 | { 0xc2568, 2, RI_ALL_ONLINE }, { 0xc2600, 1, RI_ALL_ONLINE }, |
187 | { 0xc4000, 165, RI_ALL_ONLINE }, { 0xc42d8, 2, RI_E2_ONLINE }, | 234 | { 0xc4000, 165, RI_ALL_ONLINE }, { 0xc42d8, 2, RI_E2E3_ONLINE }, |
188 | { 0xc42e0, 7, RI_E1HE2_ONLINE }, { 0xc42fc, 1, RI_E2_ONLINE }, | 235 | { 0xc42e0, 7, RI_E1HE2E3_ONLINE }, { 0xc42fc, 1, RI_E2E3_ONLINE }, |
189 | { 0xc4400, 51, RI_ALL_ONLINE }, { 0xc44d0, 38, RI_ALL_ONLINE }, | 236 | { 0xc4400, 51, RI_ALL_ONLINE }, { 0xc44d0, 38, RI_ALL_ONLINE }, |
190 | { 0xc4570, 2, RI_ALL_ONLINE }, { 0xc4578, 5, RI_E2_ONLINE }, | 237 | { 0xc4570, 2, RI_ALL_ONLINE }, { 0xc4578, 5, RI_E2E3_ONLINE }, |
191 | { 0xc4600, 1, RI_ALL_ONLINE }, { 0xd0000, 19, RI_ALL_ONLINE }, | 238 | { 0xc4600, 1, RI_ALL_ONLINE }, { 0xd0000, 19, RI_ALL_ONLINE }, |
192 | { 0xd004c, 8, RI_ALL_ONLINE }, { 0xd006c, 91, RI_ALL_ONLINE }, | 239 | { 0xd004c, 8, RI_ALL_ONLINE }, { 0xd006c, 91, RI_ALL_ONLINE }, |
193 | { 0xd01fc, 1, RI_E2_ONLINE }, { 0xd0200, 2, RI_ALL_ONLINE }, | 240 | { 0xd01fc, 1, RI_E2E3_ONLINE }, { 0xd0200, 2, RI_ALL_ONLINE }, |
194 | { 0xd020c, 7, RI_ALL_ONLINE }, { 0xd0228, 18, RI_E1HE2_ONLINE }, | 241 | { 0xd020c, 7, RI_ALL_ONLINE }, { 0xd0228, 18, RI_E1HE2E3_ONLINE }, |
195 | { 0xd0280, 1, RI_ALL_ONLINE }, { 0xd0300, 1, RI_ALL_ONLINE }, | 242 | { 0xd0280, 1, RI_ALL_ONLINE }, { 0xd0300, 1, RI_ALL_ONLINE }, |
196 | { 0xd0400, 1, RI_ALL_ONLINE }, { 0xd4000, 1, RI_ALL_ONLINE }, | 243 | { 0xd0400, 1, RI_ALL_ONLINE }, { 0xd4000, 1, RI_ALL_ONLINE }, |
197 | { 0xd4004, 2559, RI_ALL_OFFLINE }, { 0xd8000, 1, RI_ALL_ONLINE }, | 244 | { 0xd4004, 2559, RI_ALL_OFFLINE }, { 0xd8000, 1, RI_ALL_ONLINE }, |
198 | { 0xd8004, 8191, RI_ALL_OFFLINE }, { 0xe0000, 21, RI_ALL_ONLINE }, | 245 | { 0xd8004, 8191, RI_ALL_OFFLINE }, { 0xe0000, 21, RI_ALL_ONLINE }, |
199 | { 0xe0054, 8, RI_ALL_ONLINE }, { 0xe0074, 49, RI_ALL_ONLINE }, | 246 | { 0xe0054, 8, RI_ALL_ONLINE }, { 0xe0074, 49, RI_ALL_ONLINE }, |
200 | { 0xe0138, 1, RI_E1E1H_ONLINE }, { 0xe013c, 35, RI_ALL_ONLINE }, | 247 | { 0xe0138, 1, RI_E1E1H_ONLINE }, { 0xe013c, 35, RI_ALL_ONLINE }, |
201 | { 0xe01f4, 2, RI_E2_ONLINE }, { 0xe0200, 2, RI_ALL_ONLINE }, | 248 | { 0xe01f4, 1, RI_E2_ONLINE }, { 0xe01f8, 1, RI_E2E3_ONLINE }, |
202 | { 0xe020c, 8, RI_ALL_ONLINE }, { 0xe022c, 18, RI_E1HE2_ONLINE }, | 249 | { 0xe0200, 2, RI_ALL_ONLINE }, { 0xe020c, 8, RI_ALL_ONLINE }, |
203 | { 0xe0280, 1, RI_ALL_ONLINE }, { 0xe0300, 1, RI_ALL_ONLINE }, | 250 | { 0xe022c, 18, RI_E1HE2E3_ONLINE }, { 0xe0280, 1, RI_ALL_ONLINE }, |
204 | { 0xe1000, 1, RI_ALL_ONLINE }, { 0xe2000, 1, RI_ALL_ONLINE }, | 251 | { 0xe0300, 1, RI_ALL_ONLINE }, { 0xe1000, 1, RI_ALL_ONLINE }, |
205 | { 0xe2004, 2047, RI_ALL_OFFLINE }, { 0xf0000, 1, RI_ALL_ONLINE }, | 252 | { 0xe2000, 1, RI_ALL_ONLINE }, { 0xe2004, 2047, RI_ALL_OFFLINE }, |
206 | { 0xf0004, 16383, RI_ALL_OFFLINE }, { 0x101000, 12, RI_ALL_ONLINE }, | 253 | { 0xf0000, 1, RI_ALL_ONLINE }, { 0xf0004, 16383, RI_ALL_OFFLINE }, |
207 | { 0x101050, 1, RI_E1HE2_ONLINE }, { 0x101054, 3, RI_E2_ONLINE }, | 254 | { 0x101000, 12, RI_ALL_ONLINE }, { 0x101050, 1, RI_E1HE2E3_ONLINE }, |
208 | { 0x101100, 1, RI_ALL_ONLINE }, { 0x101800, 8, RI_ALL_ONLINE }, | 255 | { 0x101054, 3, RI_E2E3_ONLINE }, { 0x101100, 1, RI_ALL_ONLINE }, |
209 | { 0x102000, 18, RI_ALL_ONLINE }, { 0x102068, 6, RI_E2_ONLINE }, | 256 | { 0x101800, 8, RI_ALL_ONLINE }, { 0x102000, 18, RI_ALL_ONLINE }, |
210 | { 0x102080, 17, RI_ALL_ONLINE }, { 0x1020c8, 8, RI_E1H_ONLINE }, | 257 | { 0x102068, 6, RI_E2E3_ONLINE }, { 0x102080, 17, RI_ALL_ONLINE }, |
211 | { 0x1020e8, 9, RI_E2_ONLINE }, { 0x102400, 1, RI_ALL_ONLINE }, | 258 | { 0x1020c8, 8, RI_E1H_ONLINE }, { 0x1020e8, 9, RI_E2E3_ONLINE }, |
212 | { 0x103000, 26, RI_ALL_ONLINE }, { 0x103098, 5, RI_E1HE2_ONLINE }, | 259 | { 0x102400, 1, RI_ALL_ONLINE }, { 0x103000, 26, RI_ALL_ONLINE }, |
213 | { 0x1030ac, 10, RI_E2_ONLINE }, { 0x1030d8, 8, RI_E2_ONLINE }, | 260 | { 0x103098, 5, RI_E1HE2E3_ONLINE }, { 0x1030ac, 2, RI_E2E3_ONLINE }, |
214 | { 0x103400, 1, RI_E2_ONLINE }, { 0x103404, 135, RI_E2_OFFLINE }, | 261 | { 0x1030b4, 1, RI_E2_ONLINE }, { 0x1030b8, 7, RI_E2E3_ONLINE }, |
215 | { 0x103800, 8, RI_ALL_ONLINE }, { 0x104000, 63, RI_ALL_ONLINE }, | 262 | { 0x1030d8, 8, RI_E2E3_ONLINE }, { 0x103400, 1, RI_E2E3_ONLINE }, |
216 | { 0x10411c, 16, RI_E2_ONLINE }, { 0x104200, 17, RI_ALL_ONLINE }, | 263 | { 0x103404, 135, RI_E2E3_OFFLINE }, { 0x103800, 8, RI_ALL_ONLINE }, |
217 | { 0x104400, 64, RI_ALL_ONLINE }, { 0x104500, 192, RI_ALL_OFFLINE }, | 264 | { 0x104000, 63, RI_ALL_ONLINE }, { 0x10411c, 16, RI_E2E3_ONLINE }, |
218 | { 0x104800, 64, RI_ALL_ONLINE }, { 0x104900, 192, RI_ALL_OFFLINE }, | 265 | { 0x104200, 17, RI_ALL_ONLINE }, { 0x104400, 64, RI_ALL_ONLINE }, |
219 | { 0x105000, 256, RI_ALL_ONLINE }, { 0x105400, 768, RI_ALL_OFFLINE }, | 266 | { 0x104500, 192, RI_ALL_OFFLINE }, { 0x104800, 64, RI_ALL_ONLINE }, |
220 | { 0x107000, 7, RI_E2_ONLINE }, { 0x108000, 33, RI_E1E1H_ONLINE }, | 267 | { 0x104900, 192, RI_ALL_OFFLINE }, { 0x105000, 256, RI_ALL_ONLINE }, |
268 | { 0x105400, 768, RI_ALL_OFFLINE }, { 0x107000, 7, RI_E2E3_ONLINE }, | ||
269 | { 0x10701c, 1, RI_E3_ONLINE }, { 0x108000, 33, RI_E1E1H_ONLINE }, | ||
221 | { 0x1080ac, 5, RI_E1H_ONLINE }, { 0x108100, 5, RI_E1E1H_ONLINE }, | 270 | { 0x1080ac, 5, RI_E1H_ONLINE }, { 0x108100, 5, RI_E1E1H_ONLINE }, |
222 | { 0x108120, 5, RI_E1E1H_ONLINE }, { 0x108200, 74, RI_E1E1H_ONLINE }, | 271 | { 0x108120, 5, RI_E1E1H_ONLINE }, { 0x108200, 74, RI_E1E1H_ONLINE }, |
223 | { 0x108400, 74, RI_E1E1H_ONLINE }, { 0x108800, 152, RI_E1E1H_ONLINE }, | 272 | { 0x108400, 74, RI_E1E1H_ONLINE }, { 0x108800, 152, RI_E1E1H_ONLINE }, |
224 | { 0x110000, 111, RI_E2_ONLINE }, { 0x110200, 4, RI_E2_ONLINE }, | 273 | { 0x110000, 111, RI_E2E3_ONLINE }, { 0x1101dc, 1, RI_E3_ONLINE }, |
225 | { 0x120000, 2, RI_ALL_ONLINE }, { 0x120008, 4, RI_ALL_ONLINE }, | 274 | { 0x110200, 4, RI_E2E3_ONLINE }, { 0x120000, 2, RI_ALL_ONLINE }, |
226 | { 0x120018, 3, RI_ALL_ONLINE }, { 0x120024, 4, RI_ALL_ONLINE }, | 275 | { 0x120008, 4, RI_ALL_ONLINE }, { 0x120018, 3, RI_ALL_ONLINE }, |
227 | { 0x120034, 3, RI_ALL_ONLINE }, { 0x120040, 4, RI_ALL_ONLINE }, | 276 | { 0x120024, 4, RI_ALL_ONLINE }, { 0x120034, 3, RI_ALL_ONLINE }, |
228 | { 0x120050, 3, RI_ALL_ONLINE }, { 0x12005c, 4, RI_ALL_ONLINE }, | 277 | { 0x120040, 4, RI_ALL_ONLINE }, { 0x120050, 3, RI_ALL_ONLINE }, |
229 | { 0x12006c, 3, RI_ALL_ONLINE }, { 0x120078, 4, RI_ALL_ONLINE }, | 278 | { 0x12005c, 4, RI_ALL_ONLINE }, { 0x12006c, 3, RI_ALL_ONLINE }, |
230 | { 0x120088, 3, RI_ALL_ONLINE }, { 0x120094, 4, RI_ALL_ONLINE }, | 279 | { 0x120078, 4, RI_ALL_ONLINE }, { 0x120088, 3, RI_ALL_ONLINE }, |
231 | { 0x1200a4, 3, RI_ALL_ONLINE }, { 0x1200b0, 4, RI_ALL_ONLINE }, | 280 | { 0x120094, 4, RI_ALL_ONLINE }, { 0x1200a4, 3, RI_ALL_ONLINE }, |
232 | { 0x1200c0, 3, RI_ALL_ONLINE }, { 0x1200cc, 4, RI_ALL_ONLINE }, | 281 | { 0x1200b0, 4, RI_ALL_ONLINE }, { 0x1200c0, 3, RI_ALL_ONLINE }, |
233 | { 0x1200dc, 3, RI_ALL_ONLINE }, { 0x1200e8, 4, RI_ALL_ONLINE }, | 282 | { 0x1200cc, 4, RI_ALL_ONLINE }, { 0x1200dc, 3, RI_ALL_ONLINE }, |
234 | { 0x1200f8, 3, RI_ALL_ONLINE }, { 0x120104, 4, RI_ALL_ONLINE }, | 283 | { 0x1200e8, 4, RI_ALL_ONLINE }, { 0x1200f8, 3, RI_ALL_ONLINE }, |
235 | { 0x120114, 1, RI_ALL_ONLINE }, { 0x120118, 22, RI_ALL_ONLINE }, | 284 | { 0x120104, 4, RI_ALL_ONLINE }, { 0x120114, 1, RI_ALL_ONLINE }, |
236 | { 0x120170, 2, RI_E1E1H_ONLINE }, { 0x120178, 243, RI_ALL_ONLINE }, | 285 | { 0x120118, 22, RI_ALL_ONLINE }, { 0x120170, 2, RI_E1E1H_ONLINE }, |
237 | { 0x120544, 4, RI_E1E1H_ONLINE }, { 0x120554, 7, RI_ALL_ONLINE }, | 286 | { 0x120178, 243, RI_ALL_ONLINE }, { 0x120544, 4, RI_E1E1H_ONLINE }, |
238 | { 0x12059c, 6, RI_E1HE2_ONLINE }, { 0x1205b4, 1, RI_E1HE2_ONLINE }, | 287 | { 0x120554, 6, RI_ALL_ONLINE }, { 0x12059c, 6, RI_E1HE2E3_ONLINE }, |
239 | { 0x1205b8, 16, RI_E1HE2_ONLINE }, { 0x1205f8, 4, RI_E2_ONLINE }, | 288 | { 0x1205b4, 1, RI_E1HE2E3_ONLINE }, { 0x1205b8, 15, RI_E1HE2E3_ONLINE }, |
240 | { 0x120618, 1, RI_E2_ONLINE }, { 0x12061c, 20, RI_E1HE2_ONLINE }, | 289 | { 0x1205f4, 1, RI_E1HE2_ONLINE }, { 0x1205f8, 4, RI_E2E3_ONLINE }, |
241 | { 0x12066c, 11, RI_E1HE2_ONLINE }, { 0x120698, 5, RI_E2_ONLINE }, | 290 | { 0x120618, 1, RI_E2E3_ONLINE }, { 0x12061c, 20, RI_E1HE2E3_ONLINE }, |
242 | { 0x1206b0, 76, RI_E2_ONLINE }, { 0x1207fc, 1, RI_E2_ONLINE }, | 291 | { 0x12066c, 11, RI_E1HE2E3_ONLINE }, { 0x120698, 3, RI_E2E3_ONLINE }, |
243 | { 0x120808, 66, RI_ALL_ONLINE }, { 0x120910, 7, RI_E2_ONLINE }, | 292 | { 0x1206a4, 1, RI_E2_ONLINE }, { 0x1206a8, 1, RI_E2E3_ONLINE }, |
244 | { 0x120930, 9, RI_E2_ONLINE }, { 0x120a00, 2, RI_ALL_ONLINE }, | 293 | { 0x1206b0, 75, RI_E2E3_ONLINE }, { 0x1207dc, 1, RI_E2_ONLINE }, |
245 | { 0x122000, 2, RI_ALL_ONLINE }, { 0x122008, 2046, RI_E1_OFFLINE }, | 294 | { 0x1207fc, 1, RI_E2E3_ONLINE }, { 0x12080c, 65, RI_ALL_ONLINE }, |
246 | { 0x128000, 2, RI_E1HE2_ONLINE }, { 0x128008, 6142, RI_E1HE2_OFFLINE }, | 295 | { 0x120910, 7, RI_E2E3_ONLINE }, { 0x120930, 9, RI_E2E3_ONLINE }, |
247 | { 0x130000, 35, RI_E2_ONLINE }, { 0x130100, 29, RI_E2_ONLINE }, | 296 | { 0x12095c, 37, RI_E3_ONLINE }, { 0x120a00, 2, RI_E1E1HE2_ONLINE }, |
248 | { 0x130180, 1, RI_E2_ONLINE }, { 0x130200, 1, RI_E2_ONLINE }, | 297 | { 0x120b00, 1, RI_E3_ONLINE }, { 0x122000, 2, RI_ALL_ONLINE }, |
249 | { 0x130280, 1, RI_E2_ONLINE }, { 0x130300, 5, RI_E2_ONLINE }, | 298 | { 0x122008, 2046, RI_E1_OFFLINE }, { 0x128000, 2, RI_E1HE2E3_ONLINE }, |
250 | { 0x130380, 1, RI_E2_ONLINE }, { 0x130400, 1, RI_E2_ONLINE }, | 299 | { 0x128008, 6142, RI_E1HE2E3_OFFLINE }, |
251 | { 0x130480, 5, RI_E2_ONLINE }, { 0x130800, 72, RI_E2_ONLINE }, | 300 | { 0x130000, 35, RI_E2E3_ONLINE }, |
252 | { 0x131000, 136, RI_E2_ONLINE }, { 0x132000, 148, RI_E2_ONLINE }, | 301 | { 0x130100, 29, RI_E2E3_ONLINE }, { 0x130180, 1, RI_E2E3_ONLINE }, |
253 | { 0x134000, 544, RI_E2_ONLINE }, { 0x140000, 64, RI_ALL_ONLINE }, | 302 | { 0x130200, 1, RI_E2E3_ONLINE }, { 0x130280, 1, RI_E2E3_ONLINE }, |
254 | { 0x140100, 5, RI_E1E1H_ONLINE }, { 0x140114, 45, RI_ALL_ONLINE }, | 303 | { 0x130300, 5, RI_E2E3_ONLINE }, { 0x130380, 1, RI_E2E3_ONLINE }, |
255 | { 0x140200, 6, RI_ALL_ONLINE }, { 0x140220, 4, RI_E2_ONLINE }, | 304 | { 0x130400, 1, RI_E2E3_ONLINE }, { 0x130480, 5, RI_E2E3_ONLINE }, |
256 | { 0x140240, 4, RI_E2_ONLINE }, { 0x140260, 4, RI_E2_ONLINE }, | 305 | { 0x130800, 72, RI_E2E3_ONLINE }, { 0x131000, 136, RI_E2E3_ONLINE }, |
257 | { 0x140280, 4, RI_E2_ONLINE }, { 0x1402a0, 4, RI_E2_ONLINE }, | 306 | { 0x132000, 148, RI_E2E3_ONLINE }, { 0x134000, 544, RI_E2E3_ONLINE }, |
258 | { 0x1402c0, 4, RI_E2_ONLINE }, { 0x1402e0, 13, RI_E2_ONLINE }, | 307 | { 0x140000, 64, RI_ALL_ONLINE }, { 0x140100, 5, RI_E1E1H_ONLINE }, |
259 | { 0x144000, 4, RI_E1E1H_ONLINE }, { 0x148000, 4, RI_E1E1H_ONLINE }, | 308 | { 0x140114, 45, RI_ALL_ONLINE }, { 0x140200, 6, RI_ALL_ONLINE }, |
260 | { 0x14c000, 4, RI_E1E1H_ONLINE }, { 0x150000, 4, RI_E1E1H_ONLINE }, | 309 | { 0x140220, 4, RI_E2E3_ONLINE }, { 0x140240, 4, RI_E2E3_ONLINE }, |
261 | { 0x154000, 4, RI_E1E1H_ONLINE }, { 0x158000, 4, RI_E1E1H_ONLINE }, | 310 | { 0x140260, 4, RI_E2E3_ONLINE }, { 0x140280, 4, RI_E2E3_ONLINE }, |
262 | { 0x15c000, 2, RI_E1HE2_ONLINE }, { 0x15c008, 5, RI_E1H_ONLINE }, | 311 | { 0x1402a0, 4, RI_E2E3_ONLINE }, { 0x1402c0, 4, RI_E2E3_ONLINE }, |
263 | { 0x15c020, 27, RI_E2_ONLINE }, { 0x15c090, 13, RI_E2_ONLINE }, | 312 | { 0x1402e0, 13, RI_E2E3_ONLINE }, { 0x144000, 4, RI_E1E1H_ONLINE }, |
264 | { 0x15c0c8, 34, RI_E2_ONLINE }, { 0x161000, 7, RI_ALL_ONLINE }, | 313 | { 0x148000, 4, RI_E1E1H_ONLINE }, { 0x14c000, 4, RI_E1E1H_ONLINE }, |
265 | { 0x16103c, 2, RI_E2_ONLINE }, { 0x161800, 2, RI_ALL_ONLINE }, | 314 | { 0x150000, 4, RI_E1E1H_ONLINE }, { 0x154000, 4, RI_E1E1H_ONLINE }, |
266 | { 0x164000, 60, RI_ALL_ONLINE }, { 0x164110, 2, RI_E1HE2_ONLINE }, | 315 | { 0x158000, 4, RI_E1E1H_ONLINE }, { 0x15c000, 2, RI_E1HE2E3_ONLINE }, |
267 | { 0x164118, 15, RI_E2_ONLINE }, { 0x164200, 1, RI_ALL_ONLINE }, | 316 | { 0x15c008, 5, RI_E1H_ONLINE }, { 0x15c020, 27, RI_E2E3_ONLINE }, |
317 | { 0x15c090, 13, RI_E2E3_ONLINE }, { 0x15c0c8, 34, RI_E2E3_ONLINE }, | ||
318 | { 0x15c150, 4, RI_E3_ONLINE }, { 0x160004, 6, RI_E3_ONLINE }, | ||
319 | { 0x160040, 6, RI_E3_ONLINE }, { 0x16005c, 6, RI_E3_ONLINE }, | ||
320 | { 0x160078, 2, RI_E3_ONLINE }, { 0x160300, 8, RI_E3_ONLINE }, | ||
321 | { 0x160330, 6, RI_E3_ONLINE }, { 0x160404, 6, RI_E3_ONLINE }, | ||
322 | { 0x160440, 6, RI_E3_ONLINE }, { 0x16045c, 6, RI_E3_ONLINE }, | ||
323 | { 0x160478, 2, RI_E3_ONLINE }, { 0x160700, 8, RI_E3_ONLINE }, | ||
324 | { 0x160730, 6, RI_E3_ONLINE }, { 0x161000, 7, RI_ALL_ONLINE }, | ||
325 | { 0x16103c, 2, RI_E2E3_ONLINE }, { 0x161800, 2, RI_ALL_ONLINE }, | ||
326 | { 0x162000, 54, RI_E3_ONLINE }, { 0x162200, 60, RI_E3_ONLINE }, | ||
327 | { 0x162400, 54, RI_E3_ONLINE }, { 0x162600, 60, RI_E3_ONLINE }, | ||
328 | { 0x162800, 54, RI_E3_ONLINE }, { 0x162a00, 60, RI_E3_ONLINE }, | ||
329 | { 0x162c00, 54, RI_E3_ONLINE }, { 0x162e00, 60, RI_E3_ONLINE }, | ||
330 | { 0x163000, 1, RI_E3_ONLINE }, { 0x163008, 1, RI_E3_ONLINE }, | ||
331 | { 0x163010, 1, RI_E3_ONLINE }, { 0x163018, 1, RI_E3_ONLINE }, | ||
332 | { 0x163020, 5, RI_E3_ONLINE }, { 0x163038, 3, RI_E3_ONLINE }, | ||
333 | { 0x163048, 3, RI_E3_ONLINE }, { 0x163058, 1, RI_E3_ONLINE }, | ||
334 | { 0x163060, 1, RI_E3_ONLINE }, { 0x163068, 1, RI_E3_ONLINE }, | ||
335 | { 0x163070, 3, RI_E3_ONLINE }, { 0x163080, 1, RI_E3_ONLINE }, | ||
336 | { 0x163088, 3, RI_E3_ONLINE }, { 0x163098, 1, RI_E3_ONLINE }, | ||
337 | { 0x1630a0, 1, RI_E3_ONLINE }, { 0x1630a8, 1, RI_E3_ONLINE }, | ||
338 | { 0x1630c0, 1, RI_E3_ONLINE }, { 0x1630c8, 1, RI_E3_ONLINE }, | ||
339 | { 0x1630d0, 1, RI_E3_ONLINE }, { 0x1630d8, 1, RI_E3_ONLINE }, | ||
340 | { 0x1630e0, 2, RI_E3_ONLINE }, { 0x163110, 1, RI_E3_ONLINE }, | ||
341 | { 0x163120, 2, RI_E3_ONLINE }, { 0x163420, 4, RI_E3_ONLINE }, | ||
342 | { 0x163438, 2, RI_E3_ONLINE }, { 0x163488, 2, RI_E3_ONLINE }, | ||
343 | { 0x163520, 2, RI_E3_ONLINE }, { 0x163800, 1, RI_E3_ONLINE }, | ||
344 | { 0x163808, 1, RI_E3_ONLINE }, { 0x163810, 1, RI_E3_ONLINE }, | ||
345 | { 0x163818, 1, RI_E3_ONLINE }, { 0x163820, 5, RI_E3_ONLINE }, | ||
346 | { 0x163838, 3, RI_E3_ONLINE }, { 0x163848, 3, RI_E3_ONLINE }, | ||
347 | { 0x163858, 1, RI_E3_ONLINE }, { 0x163860, 1, RI_E3_ONLINE }, | ||
348 | { 0x163868, 1, RI_E3_ONLINE }, { 0x163870, 3, RI_E3_ONLINE }, | ||
349 | { 0x163880, 1, RI_E3_ONLINE }, { 0x163888, 3, RI_E3_ONLINE }, | ||
350 | { 0x163898, 1, RI_E3_ONLINE }, { 0x1638a0, 1, RI_E3_ONLINE }, | ||
351 | { 0x1638a8, 1, RI_E3_ONLINE }, { 0x1638c0, 1, RI_E3_ONLINE }, | ||
352 | { 0x1638c8, 1, RI_E3_ONLINE }, { 0x1638d0, 1, RI_E3_ONLINE }, | ||
353 | { 0x1638d8, 1, RI_E3_ONLINE }, { 0x1638e0, 2, RI_E3_ONLINE }, | ||
354 | { 0x163910, 1, RI_E3_ONLINE }, { 0x163920, 2, RI_E3_ONLINE }, | ||
355 | { 0x163c20, 4, RI_E3_ONLINE }, { 0x163c38, 2, RI_E3_ONLINE }, | ||
356 | { 0x163c88, 2, RI_E3_ONLINE }, { 0x163d20, 2, RI_E3_ONLINE }, | ||
357 | { 0x164000, 60, RI_ALL_ONLINE }, { 0x164110, 2, RI_E1HE2E3_ONLINE }, | ||
358 | { 0x164118, 15, RI_E2E3_ONLINE }, { 0x164200, 1, RI_ALL_ONLINE }, | ||
268 | { 0x164208, 1, RI_ALL_ONLINE }, { 0x164210, 1, RI_ALL_ONLINE }, | 359 | { 0x164208, 1, RI_ALL_ONLINE }, { 0x164210, 1, RI_ALL_ONLINE }, |
269 | { 0x164218, 1, RI_ALL_ONLINE }, { 0x164220, 1, RI_ALL_ONLINE }, | 360 | { 0x164218, 1, RI_ALL_ONLINE }, { 0x164220, 1, RI_ALL_ONLINE }, |
270 | { 0x164228, 1, RI_ALL_ONLINE }, { 0x164230, 1, RI_ALL_ONLINE }, | 361 | { 0x164228, 1, RI_ALL_ONLINE }, { 0x164230, 1, RI_ALL_ONLINE }, |
@@ -273,9 +364,9 @@ static const struct reg_addr reg_addrs[REGS_COUNT] = { | |||
273 | { 0x164258, 1, RI_ALL_ONLINE }, { 0x164260, 1, RI_ALL_ONLINE }, | 364 | { 0x164258, 1, RI_ALL_ONLINE }, { 0x164260, 1, RI_ALL_ONLINE }, |
274 | { 0x164270, 2, RI_ALL_ONLINE }, { 0x164280, 2, RI_ALL_ONLINE }, | 365 | { 0x164270, 2, RI_ALL_ONLINE }, { 0x164280, 2, RI_ALL_ONLINE }, |
275 | { 0x164800, 2, RI_ALL_ONLINE }, { 0x165000, 2, RI_ALL_ONLINE }, | 366 | { 0x164800, 2, RI_ALL_ONLINE }, { 0x165000, 2, RI_ALL_ONLINE }, |
276 | { 0x166000, 164, RI_ALL_ONLINE }, { 0x1662cc, 7, RI_E2_ONLINE }, | 367 | { 0x166000, 164, RI_ALL_ONLINE }, { 0x1662cc, 7, RI_E2E3_ONLINE }, |
277 | { 0x166400, 49, RI_ALL_ONLINE }, { 0x1664c8, 38, RI_ALL_ONLINE }, | 368 | { 0x166400, 49, RI_ALL_ONLINE }, { 0x1664c8, 38, RI_ALL_ONLINE }, |
278 | { 0x166568, 2, RI_ALL_ONLINE }, { 0x166570, 5, RI_E2_ONLINE }, | 369 | { 0x166568, 2, RI_ALL_ONLINE }, { 0x166570, 5, RI_E2E3_ONLINE }, |
279 | { 0x166800, 1, RI_ALL_ONLINE }, { 0x168000, 137, RI_ALL_ONLINE }, | 370 | { 0x166800, 1, RI_ALL_ONLINE }, { 0x168000, 137, RI_ALL_ONLINE }, |
280 | { 0x168224, 2, RI_E1E1H_ONLINE }, { 0x16822c, 29, RI_ALL_ONLINE }, | 371 | { 0x168224, 2, RI_E1E1H_ONLINE }, { 0x16822c, 29, RI_ALL_ONLINE }, |
281 | { 0x1682a0, 12, RI_E1E1H_ONLINE }, { 0x1682d0, 12, RI_ALL_ONLINE }, | 372 | { 0x1682a0, 12, RI_E1E1H_ONLINE }, { 0x1682d0, 12, RI_ALL_ONLINE }, |
@@ -285,89 +376,94 @@ static const struct reg_addr reg_addrs[REGS_COUNT] = { | |||
285 | { 0x168a00, 128, RI_ALL_ONLINE }, { 0x16a000, 1, RI_ALL_ONLINE }, | 376 | { 0x168a00, 128, RI_ALL_ONLINE }, { 0x16a000, 1, RI_ALL_ONLINE }, |
286 | { 0x16a004, 1535, RI_ALL_OFFLINE }, { 0x16c000, 1, RI_ALL_ONLINE }, | 377 | { 0x16a004, 1535, RI_ALL_OFFLINE }, { 0x16c000, 1, RI_ALL_ONLINE }, |
287 | { 0x16c004, 1535, RI_ALL_OFFLINE }, { 0x16e000, 16, RI_E1H_ONLINE }, | 378 | { 0x16c004, 1535, RI_ALL_OFFLINE }, { 0x16e000, 16, RI_E1H_ONLINE }, |
288 | { 0x16e040, 8, RI_E2_ONLINE }, { 0x16e100, 1, RI_E1H_ONLINE }, | 379 | { 0x16e040, 8, RI_E2E3_ONLINE }, { 0x16e100, 1, RI_E1H_ONLINE }, |
289 | { 0x16e200, 2, RI_E1H_ONLINE }, { 0x16e400, 161, RI_E1H_ONLINE }, | 380 | { 0x16e200, 2, RI_E1H_ONLINE }, { 0x16e400, 161, RI_E1H_ONLINE }, |
290 | { 0x16e684, 2, RI_E1HE2_ONLINE }, { 0x16e68c, 12, RI_E1H_ONLINE }, | 381 | { 0x16e684, 2, RI_E1HE2E3_ONLINE }, { 0x16e68c, 12, RI_E1H_ONLINE }, |
291 | { 0x16e6bc, 4, RI_E1HE2_ONLINE }, { 0x16e6cc, 4, RI_E1H_ONLINE }, | 382 | { 0x16e6bc, 4, RI_E1HE2E3_ONLINE }, { 0x16e6cc, 4, RI_E1H_ONLINE }, |
292 | { 0x16e6e0, 12, RI_E2_ONLINE }, { 0x16e768, 17, RI_E2_ONLINE }, | 383 | { 0x16e6e0, 12, RI_E2E3_ONLINE }, { 0x16e768, 17, RI_E2E3_ONLINE }, |
293 | { 0x170000, 24, RI_ALL_ONLINE }, { 0x170060, 4, RI_E1E1H_ONLINE }, | 384 | { 0x170000, 24, RI_ALL_ONLINE }, { 0x170060, 4, RI_E1E1H_ONLINE }, |
294 | { 0x170070, 65, RI_ALL_ONLINE }, { 0x170194, 11, RI_E2_ONLINE }, | 385 | { 0x170070, 65, RI_ALL_ONLINE }, { 0x170194, 11, RI_E2E3_ONLINE }, |
295 | { 0x1701c4, 1, RI_E2_ONLINE }, { 0x1701cc, 7, RI_E2_ONLINE }, | 386 | { 0x1701c4, 1, RI_E2E3_ONLINE }, { 0x1701cc, 7, RI_E2E3_ONLINE }, |
296 | { 0x1701ec, 1, RI_E2_ONLINE }, { 0x1701f4, 1, RI_E2_ONLINE }, | 387 | { 0x1701e8, 1, RI_E3_ONLINE }, { 0x1701ec, 1, RI_E2E3_ONLINE }, |
297 | { 0x170200, 4, RI_ALL_ONLINE }, { 0x170214, 1, RI_ALL_ONLINE }, | 388 | { 0x1701f4, 1, RI_E2E3_ONLINE }, { 0x170200, 4, RI_ALL_ONLINE }, |
298 | { 0x170218, 77, RI_E2_ONLINE }, { 0x170400, 64, RI_E2_ONLINE }, | 389 | { 0x170214, 1, RI_ALL_ONLINE }, { 0x170218, 77, RI_E2E3_ONLINE }, |
299 | { 0x178000, 1, RI_ALL_ONLINE }, { 0x180000, 61, RI_ALL_ONLINE }, | 390 | { 0x170400, 64, RI_E2E3_ONLINE }, { 0x178000, 1, RI_ALL_ONLINE }, |
300 | { 0x18013c, 2, RI_E1HE2_ONLINE }, { 0x180200, 58, RI_ALL_ONLINE }, | 391 | { 0x180000, 61, RI_ALL_ONLINE }, { 0x18013c, 2, RI_E1HE2E3_ONLINE }, |
301 | { 0x180340, 4, RI_ALL_ONLINE }, { 0x180380, 1, RI_E2_ONLINE }, | 392 | { 0x180200, 58, RI_ALL_ONLINE }, { 0x180340, 4, RI_ALL_ONLINE }, |
302 | { 0x180388, 1, RI_E2_ONLINE }, { 0x180390, 1, RI_E2_ONLINE }, | 393 | { 0x180380, 1, RI_E2E3_ONLINE }, { 0x180388, 1, RI_E2E3_ONLINE }, |
303 | { 0x180398, 1, RI_E2_ONLINE }, { 0x1803a0, 5, RI_E2_ONLINE }, | 394 | { 0x180390, 1, RI_E2E3_ONLINE }, { 0x180398, 1, RI_E2E3_ONLINE }, |
395 | { 0x1803a0, 5, RI_E2E3_ONLINE }, { 0x1803b4, 2, RI_E3_ONLINE }, | ||
304 | { 0x180400, 1, RI_ALL_ONLINE }, { 0x180404, 255, RI_E1E1H_OFFLINE }, | 396 | { 0x180400, 1, RI_ALL_ONLINE }, { 0x180404, 255, RI_E1E1H_OFFLINE }, |
305 | { 0x181000, 4, RI_ALL_ONLINE }, { 0x181010, 1020, RI_ALL_OFFLINE }, | 397 | { 0x181000, 4, RI_ALL_ONLINE }, { 0x181010, 1020, RI_ALL_OFFLINE }, |
306 | { 0x1a0000, 1, RI_ALL_ONLINE }, { 0x1a0004, 5631, RI_ALL_OFFLINE }, | 398 | { 0x182000, 4, RI_E3_ONLINE }, { 0x1a0000, 1, RI_ALL_ONLINE }, |
307 | { 0x1a5800, 2560, RI_E1HE2_OFFLINE }, { 0x1a8000, 1, RI_ALL_ONLINE }, | 399 | { 0x1a0004, 5631, RI_ALL_OFFLINE }, |
308 | { 0x1a8004, 8191, RI_E1HE2_OFFLINE }, { 0x1b0000, 1, RI_ALL_ONLINE }, | 400 | { 0x1a5800, 2560, RI_E1HE2E3_OFFLINE }, |
309 | { 0x1b0004, 15, RI_E1H_OFFLINE }, { 0x1b0040, 1, RI_E1HE2_ONLINE }, | 401 | { 0x1a8000, 1, RI_ALL_ONLINE }, { 0x1a8004, 8191, RI_E1HE2E3_OFFLINE }, |
310 | { 0x1b0044, 239, RI_E1H_OFFLINE }, { 0x1b0400, 1, RI_ALL_ONLINE }, | 402 | { 0x1b0000, 1, RI_ALL_ONLINE }, { 0x1b0004, 15, RI_E1H_OFFLINE }, |
311 | { 0x1b0404, 255, RI_E1H_OFFLINE }, { 0x1b0800, 1, RI_ALL_ONLINE }, | 403 | { 0x1b0040, 1, RI_E1HE2E3_ONLINE }, { 0x1b0044, 239, RI_E1H_OFFLINE }, |
312 | { 0x1b0840, 1, RI_E1HE2_ONLINE }, { 0x1b0c00, 1, RI_ALL_ONLINE }, | 404 | { 0x1b0400, 1, RI_ALL_ONLINE }, { 0x1b0404, 255, RI_E1H_OFFLINE }, |
313 | { 0x1b1000, 1, RI_ALL_ONLINE }, { 0x1b1040, 1, RI_E1HE2_ONLINE }, | 405 | { 0x1b0800, 1, RI_ALL_ONLINE }, { 0x1b0840, 1, RI_E1HE2E3_ONLINE }, |
314 | { 0x1b1400, 1, RI_ALL_ONLINE }, { 0x1b1440, 1, RI_E1HE2_ONLINE }, | 406 | { 0x1b0c00, 1, RI_ALL_ONLINE }, { 0x1b1000, 1, RI_ALL_ONLINE }, |
315 | { 0x1b1480, 1, RI_E1HE2_ONLINE }, { 0x1b14c0, 1, RI_E1HE2_ONLINE }, | 407 | { 0x1b1040, 1, RI_E1HE2E3_ONLINE }, { 0x1b1400, 1, RI_ALL_ONLINE }, |
316 | { 0x1b1800, 128, RI_ALL_OFFLINE }, { 0x1b1c00, 128, RI_ALL_OFFLINE }, | 408 | { 0x1b1440, 1, RI_E1HE2E3_ONLINE }, { 0x1b1480, 1, RI_E1HE2E3_ONLINE }, |
317 | { 0x1b2000, 1, RI_ALL_ONLINE }, { 0x1b2400, 1, RI_E1HE2_ONLINE }, | 409 | { 0x1b14c0, 1, RI_E1HE2E3_ONLINE }, { 0x1b1800, 128, RI_ALL_OFFLINE }, |
318 | { 0x1b2404, 5631, RI_E2_OFFLINE }, { 0x1b8000, 1, RI_ALL_ONLINE }, | 410 | { 0x1b1c00, 128, RI_ALL_OFFLINE }, { 0x1b2000, 1, RI_ALL_ONLINE }, |
319 | { 0x1b8040, 1, RI_ALL_ONLINE }, { 0x1b8080, 1, RI_ALL_ONLINE }, | 411 | { 0x1b2400, 1, RI_E1HE2E3_ONLINE }, { 0x1b2404, 5631, RI_E2E3_OFFLINE }, |
320 | { 0x1b80c0, 1, RI_ALL_ONLINE }, { 0x1b8100, 1, RI_ALL_ONLINE }, | 412 | { 0x1b8000, 1, RI_ALL_ONLINE }, { 0x1b8040, 1, RI_ALL_ONLINE }, |
321 | { 0x1b8140, 1, RI_ALL_ONLINE }, { 0x1b8180, 1, RI_ALL_ONLINE }, | 413 | { 0x1b8080, 1, RI_ALL_ONLINE }, { 0x1b80c0, 1, RI_ALL_ONLINE }, |
322 | { 0x1b81c0, 1, RI_ALL_ONLINE }, { 0x1b8200, 1, RI_ALL_ONLINE }, | 414 | { 0x1b8100, 1, RI_ALL_ONLINE }, { 0x1b8140, 1, RI_ALL_ONLINE }, |
323 | { 0x1b8240, 1, RI_ALL_ONLINE }, { 0x1b8280, 1, RI_ALL_ONLINE }, | 415 | { 0x1b8180, 1, RI_ALL_ONLINE }, { 0x1b81c0, 1, RI_ALL_ONLINE }, |
324 | { 0x1b82c0, 1, RI_ALL_ONLINE }, { 0x1b8300, 1, RI_ALL_ONLINE }, | 416 | { 0x1b8200, 1, RI_ALL_ONLINE }, { 0x1b8240, 1, RI_ALL_ONLINE }, |
325 | { 0x1b8340, 1, RI_ALL_ONLINE }, { 0x1b8380, 1, RI_ALL_ONLINE }, | 417 | { 0x1b8280, 1, RI_ALL_ONLINE }, { 0x1b82c0, 1, RI_ALL_ONLINE }, |
326 | { 0x1b83c0, 1, RI_ALL_ONLINE }, { 0x1b8400, 1, RI_ALL_ONLINE }, | 418 | { 0x1b8300, 1, RI_ALL_ONLINE }, { 0x1b8340, 1, RI_ALL_ONLINE }, |
327 | { 0x1b8440, 1, RI_ALL_ONLINE }, { 0x1b8480, 1, RI_ALL_ONLINE }, | 419 | { 0x1b8380, 1, RI_ALL_ONLINE }, { 0x1b83c0, 1, RI_ALL_ONLINE }, |
328 | { 0x1b84c0, 1, RI_ALL_ONLINE }, { 0x1b8500, 1, RI_ALL_ONLINE }, | 420 | { 0x1b8400, 1, RI_ALL_ONLINE }, { 0x1b8440, 1, RI_ALL_ONLINE }, |
329 | { 0x1b8540, 1, RI_ALL_ONLINE }, { 0x1b8580, 1, RI_ALL_ONLINE }, | 421 | { 0x1b8480, 1, RI_ALL_ONLINE }, { 0x1b84c0, 1, RI_ALL_ONLINE }, |
330 | { 0x1b85c0, 19, RI_E2_ONLINE }, { 0x1b8800, 1, RI_ALL_ONLINE }, | 422 | { 0x1b8500, 1, RI_ALL_ONLINE }, { 0x1b8540, 1, RI_ALL_ONLINE }, |
331 | { 0x1b8840, 1, RI_ALL_ONLINE }, { 0x1b8880, 1, RI_ALL_ONLINE }, | 423 | { 0x1b8580, 1, RI_ALL_ONLINE }, { 0x1b85c0, 19, RI_E2E3_ONLINE }, |
332 | { 0x1b88c0, 1, RI_ALL_ONLINE }, { 0x1b8900, 1, RI_ALL_ONLINE }, | 424 | { 0x1b8800, 1, RI_ALL_ONLINE }, { 0x1b8840, 1, RI_ALL_ONLINE }, |
333 | { 0x1b8940, 1, RI_ALL_ONLINE }, { 0x1b8980, 1, RI_ALL_ONLINE }, | 425 | { 0x1b8880, 1, RI_ALL_ONLINE }, { 0x1b88c0, 1, RI_ALL_ONLINE }, |
334 | { 0x1b89c0, 1, RI_ALL_ONLINE }, { 0x1b8a00, 1, RI_ALL_ONLINE }, | 426 | { 0x1b8900, 1, RI_ALL_ONLINE }, { 0x1b8940, 1, RI_ALL_ONLINE }, |
335 | { 0x1b8a40, 1, RI_ALL_ONLINE }, { 0x1b8a80, 1, RI_ALL_ONLINE }, | 427 | { 0x1b8980, 1, RI_ALL_ONLINE }, { 0x1b89c0, 1, RI_ALL_ONLINE }, |
336 | { 0x1b8ac0, 1, RI_ALL_ONLINE }, { 0x1b8b00, 1, RI_ALL_ONLINE }, | 428 | { 0x1b8a00, 1, RI_ALL_ONLINE }, { 0x1b8a40, 1, RI_ALL_ONLINE }, |
337 | { 0x1b8b40, 1, RI_ALL_ONLINE }, { 0x1b8b80, 1, RI_ALL_ONLINE }, | 429 | { 0x1b8a80, 1, RI_ALL_ONLINE }, { 0x1b8ac0, 1, RI_ALL_ONLINE }, |
338 | { 0x1b8bc0, 1, RI_ALL_ONLINE }, { 0x1b8c00, 1, RI_ALL_ONLINE }, | 430 | { 0x1b8b00, 1, RI_ALL_ONLINE }, { 0x1b8b40, 1, RI_ALL_ONLINE }, |
339 | { 0x1b8c40, 1, RI_ALL_ONLINE }, { 0x1b8c80, 1, RI_ALL_ONLINE }, | 431 | { 0x1b8b80, 1, RI_ALL_ONLINE }, { 0x1b8bc0, 1, RI_ALL_ONLINE }, |
340 | { 0x1b8cc0, 1, RI_ALL_ONLINE }, { 0x1b8cc4, 1, RI_E2_ONLINE }, | 432 | { 0x1b8c00, 1, RI_ALL_ONLINE }, { 0x1b8c40, 1, RI_ALL_ONLINE }, |
341 | { 0x1b8d00, 1, RI_ALL_ONLINE }, { 0x1b8d40, 1, RI_ALL_ONLINE }, | 433 | { 0x1b8c80, 1, RI_ALL_ONLINE }, { 0x1b8cc0, 1, RI_ALL_ONLINE }, |
342 | { 0x1b8d80, 1, RI_ALL_ONLINE }, { 0x1b8dc0, 1, RI_ALL_ONLINE }, | 434 | { 0x1b8cc4, 1, RI_E2E3_ONLINE }, { 0x1b8d00, 1, RI_ALL_ONLINE }, |
343 | { 0x1b8e00, 1, RI_ALL_ONLINE }, { 0x1b8e40, 1, RI_ALL_ONLINE }, | 435 | { 0x1b8d40, 1, RI_ALL_ONLINE }, { 0x1b8d80, 1, RI_ALL_ONLINE }, |
344 | { 0x1b8e80, 1, RI_ALL_ONLINE }, { 0x1b8e84, 1, RI_E2_ONLINE }, | 436 | { 0x1b8dc0, 1, RI_ALL_ONLINE }, { 0x1b8e00, 1, RI_ALL_ONLINE }, |
345 | { 0x1b8ec0, 1, RI_E1HE2_ONLINE }, { 0x1b8f00, 1, RI_E1HE2_ONLINE }, | 437 | { 0x1b8e40, 1, RI_ALL_ONLINE }, { 0x1b8e80, 1, RI_ALL_ONLINE }, |
346 | { 0x1b8f40, 1, RI_E1HE2_ONLINE }, { 0x1b8f80, 1, RI_E1HE2_ONLINE }, | 438 | { 0x1b8e84, 1, RI_E2E3_ONLINE }, { 0x1b8ec0, 1, RI_E1HE2E3_ONLINE }, |
347 | { 0x1b8fc0, 1, RI_E1HE2_ONLINE }, { 0x1b8fc4, 2, RI_E2_ONLINE }, | 439 | { 0x1b8f00, 1, RI_E1HE2E3_ONLINE }, { 0x1b8f40, 1, RI_E1HE2E3_ONLINE }, |
348 | { 0x1b8fd0, 6, RI_E2_ONLINE }, { 0x1b9000, 1, RI_E2_ONLINE }, | 440 | { 0x1b8f80, 1, RI_E1HE2E3_ONLINE }, { 0x1b8fc0, 1, RI_E1HE2E3_ONLINE }, |
349 | { 0x1b9040, 3, RI_E2_ONLINE }, { 0x1b9400, 14, RI_E2_ONLINE }, | 441 | { 0x1b8fc4, 2, RI_E2E3_ONLINE }, { 0x1b8fd0, 6, RI_E2E3_ONLINE }, |
350 | { 0x1b943c, 19, RI_E2_ONLINE }, { 0x1b9490, 10, RI_E2_ONLINE }, | 442 | { 0x1b8fe8, 2, RI_E3_ONLINE }, { 0x1b9000, 1, RI_E2E3_ONLINE }, |
351 | { 0x1c0000, 2, RI_ALL_ONLINE }, { 0x200000, 65, RI_ALL_ONLINE }, | 443 | { 0x1b9040, 3, RI_E2E3_ONLINE }, { 0x1b905c, 1, RI_E3_ONLINE }, |
352 | { 0x20014c, 2, RI_E1HE2_ONLINE }, { 0x200200, 58, RI_ALL_ONLINE }, | 444 | { 0x1b9400, 14, RI_E2E3_ONLINE }, { 0x1b943c, 19, RI_E2E3_ONLINE }, |
353 | { 0x200340, 4, RI_ALL_ONLINE }, { 0x200380, 1, RI_E2_ONLINE }, | 445 | { 0x1b9490, 10, RI_E2E3_ONLINE }, { 0x1c0000, 2, RI_ALL_ONLINE }, |
354 | { 0x200388, 1, RI_E2_ONLINE }, { 0x200390, 1, RI_E2_ONLINE }, | 446 | { 0x200000, 65, RI_ALL_ONLINE }, { 0x20014c, 2, RI_E1HE2E3_ONLINE }, |
355 | { 0x200398, 1, RI_E2_ONLINE }, { 0x2003a0, 1, RI_E2_ONLINE }, | 447 | { 0x200200, 58, RI_ALL_ONLINE }, { 0x200340, 4, RI_ALL_ONLINE }, |
356 | { 0x2003a8, 2, RI_E2_ONLINE }, { 0x200400, 1, RI_ALL_ONLINE }, | 448 | { 0x200380, 1, RI_E2E3_ONLINE }, { 0x200388, 1, RI_E2E3_ONLINE }, |
357 | { 0x200404, 255, RI_E1E1H_OFFLINE }, { 0x202000, 4, RI_ALL_ONLINE }, | 449 | { 0x200390, 1, RI_E2E3_ONLINE }, { 0x200398, 1, RI_E2E3_ONLINE }, |
358 | { 0x202010, 2044, RI_ALL_OFFLINE }, { 0x220000, 1, RI_ALL_ONLINE }, | 450 | { 0x2003a0, 1, RI_E2E3_ONLINE }, { 0x2003a8, 2, RI_E2E3_ONLINE }, |
359 | { 0x220004, 5631, RI_ALL_OFFLINE }, { 0x225800, 2560, RI_E1HE2_OFFLINE}, | 451 | { 0x200400, 1, RI_ALL_ONLINE }, { 0x200404, 255, RI_E1E1H_OFFLINE }, |
360 | { 0x228000, 1, RI_ALL_ONLINE }, { 0x228004, 8191, RI_E1HE2_OFFLINE }, | 452 | { 0x202000, 4, RI_ALL_ONLINE }, { 0x202010, 2044, RI_ALL_OFFLINE }, |
453 | { 0x204000, 4, RI_E3_ONLINE }, { 0x220000, 1, RI_ALL_ONLINE }, | ||
454 | { 0x220004, 5631, RI_ALL_OFFLINE }, | ||
455 | { 0x225800, 2560, RI_E1HE2E3_OFFLINE }, | ||
456 | { 0x228000, 1, RI_ALL_ONLINE }, { 0x228004, 8191, RI_E1HE2E3_OFFLINE }, | ||
361 | { 0x230000, 1, RI_ALL_ONLINE }, { 0x230004, 15, RI_E1H_OFFLINE }, | 457 | { 0x230000, 1, RI_ALL_ONLINE }, { 0x230004, 15, RI_E1H_OFFLINE }, |
362 | { 0x230040, 1, RI_E1HE2_ONLINE }, { 0x230044, 239, RI_E1H_OFFLINE }, | 458 | { 0x230040, 1, RI_E1HE2E3_ONLINE }, { 0x230044, 239, RI_E1H_OFFLINE }, |
363 | { 0x230400, 1, RI_ALL_ONLINE }, { 0x230404, 255, RI_E1H_OFFLINE }, | 459 | { 0x230400, 1, RI_ALL_ONLINE }, { 0x230404, 255, RI_E1H_OFFLINE }, |
364 | { 0x230800, 1, RI_ALL_ONLINE }, { 0x230840, 1, RI_E1HE2_ONLINE }, | 460 | { 0x230800, 1, RI_ALL_ONLINE }, { 0x230840, 1, RI_E1HE2E3_ONLINE }, |
365 | { 0x230c00, 1, RI_ALL_ONLINE }, { 0x231000, 1, RI_ALL_ONLINE }, | 461 | { 0x230c00, 1, RI_ALL_ONLINE }, { 0x231000, 1, RI_ALL_ONLINE }, |
366 | { 0x231040, 1, RI_E1HE2_ONLINE }, { 0x231400, 1, RI_ALL_ONLINE }, | 462 | { 0x231040, 1, RI_E1HE2E3_ONLINE }, { 0x231400, 1, RI_ALL_ONLINE }, |
367 | { 0x231440, 1, RI_E1HE2_ONLINE }, { 0x231480, 1, RI_E1HE2_ONLINE }, | 463 | { 0x231440, 1, RI_E1HE2E3_ONLINE }, { 0x231480, 1, RI_E1HE2E3_ONLINE }, |
368 | { 0x2314c0, 1, RI_E1HE2_ONLINE }, { 0x231800, 128, RI_ALL_OFFLINE }, | 464 | { 0x2314c0, 1, RI_E1HE2E3_ONLINE }, { 0x231800, 128, RI_ALL_OFFLINE }, |
369 | { 0x231c00, 128, RI_ALL_OFFLINE }, { 0x232000, 1, RI_ALL_ONLINE }, | 465 | { 0x231c00, 128, RI_ALL_OFFLINE }, { 0x232000, 1, RI_ALL_ONLINE }, |
370 | { 0x232400, 1, RI_E1HE2_ONLINE }, { 0x232404, 5631, RI_E2_OFFLINE }, | 466 | { 0x232400, 1, RI_E1HE2E3_ONLINE }, { 0x232404, 5631, RI_E2E3_OFFLINE }, |
371 | { 0x238000, 1, RI_ALL_ONLINE }, { 0x238040, 1, RI_ALL_ONLINE }, | 467 | { 0x238000, 1, RI_ALL_ONLINE }, { 0x238040, 1, RI_ALL_ONLINE }, |
372 | { 0x238080, 1, RI_ALL_ONLINE }, { 0x2380c0, 1, RI_ALL_ONLINE }, | 468 | { 0x238080, 1, RI_ALL_ONLINE }, { 0x2380c0, 1, RI_ALL_ONLINE }, |
373 | { 0x238100, 1, RI_ALL_ONLINE }, { 0x238140, 1, RI_ALL_ONLINE }, | 469 | { 0x238100, 1, RI_ALL_ONLINE }, { 0x238140, 1, RI_ALL_ONLINE }, |
@@ -379,7 +475,7 @@ static const struct reg_addr reg_addrs[REGS_COUNT] = { | |||
379 | { 0x238400, 1, RI_ALL_ONLINE }, { 0x238440, 1, RI_ALL_ONLINE }, | 475 | { 0x238400, 1, RI_ALL_ONLINE }, { 0x238440, 1, RI_ALL_ONLINE }, |
380 | { 0x238480, 1, RI_ALL_ONLINE }, { 0x2384c0, 1, RI_ALL_ONLINE }, | 476 | { 0x238480, 1, RI_ALL_ONLINE }, { 0x2384c0, 1, RI_ALL_ONLINE }, |
381 | { 0x238500, 1, RI_ALL_ONLINE }, { 0x238540, 1, RI_ALL_ONLINE }, | 477 | { 0x238500, 1, RI_ALL_ONLINE }, { 0x238540, 1, RI_ALL_ONLINE }, |
382 | { 0x238580, 1, RI_ALL_ONLINE }, { 0x2385c0, 19, RI_E2_ONLINE }, | 478 | { 0x238580, 1, RI_ALL_ONLINE }, { 0x2385c0, 19, RI_E2E3_ONLINE }, |
383 | { 0x238800, 1, RI_ALL_ONLINE }, { 0x238840, 1, RI_ALL_ONLINE }, | 479 | { 0x238800, 1, RI_ALL_ONLINE }, { 0x238840, 1, RI_ALL_ONLINE }, |
384 | { 0x238880, 1, RI_ALL_ONLINE }, { 0x2388c0, 1, RI_ALL_ONLINE }, | 480 | { 0x238880, 1, RI_ALL_ONLINE }, { 0x2388c0, 1, RI_ALL_ONLINE }, |
385 | { 0x238900, 1, RI_ALL_ONLINE }, { 0x238940, 1, RI_ALL_ONLINE }, | 481 | { 0x238900, 1, RI_ALL_ONLINE }, { 0x238940, 1, RI_ALL_ONLINE }, |
@@ -390,88 +486,91 @@ static const struct reg_addr reg_addrs[REGS_COUNT] = { | |||
390 | { 0x238b80, 1, RI_ALL_ONLINE }, { 0x238bc0, 1, RI_ALL_ONLINE }, | 486 | { 0x238b80, 1, RI_ALL_ONLINE }, { 0x238bc0, 1, RI_ALL_ONLINE }, |
391 | { 0x238c00, 1, RI_ALL_ONLINE }, { 0x238c40, 1, RI_ALL_ONLINE }, | 487 | { 0x238c00, 1, RI_ALL_ONLINE }, { 0x238c40, 1, RI_ALL_ONLINE }, |
392 | { 0x238c80, 1, RI_ALL_ONLINE }, { 0x238cc0, 1, RI_ALL_ONLINE }, | 488 | { 0x238c80, 1, RI_ALL_ONLINE }, { 0x238cc0, 1, RI_ALL_ONLINE }, |
393 | { 0x238cc4, 1, RI_E2_ONLINE }, { 0x238d00, 1, RI_ALL_ONLINE }, | 489 | { 0x238cc4, 1, RI_E2E3_ONLINE }, { 0x238d00, 1, RI_ALL_ONLINE }, |
394 | { 0x238d40, 1, RI_ALL_ONLINE }, { 0x238d80, 1, RI_ALL_ONLINE }, | 490 | { 0x238d40, 1, RI_ALL_ONLINE }, { 0x238d80, 1, RI_ALL_ONLINE }, |
395 | { 0x238dc0, 1, RI_ALL_ONLINE }, { 0x238e00, 1, RI_ALL_ONLINE }, | 491 | { 0x238dc0, 1, RI_ALL_ONLINE }, { 0x238e00, 1, RI_ALL_ONLINE }, |
396 | { 0x238e40, 1, RI_ALL_ONLINE }, { 0x238e80, 1, RI_ALL_ONLINE }, | 492 | { 0x238e40, 1, RI_ALL_ONLINE }, { 0x238e80, 1, RI_ALL_ONLINE }, |
397 | { 0x238e84, 1, RI_E2_ONLINE }, { 0x238ec0, 1, RI_E1HE2_ONLINE }, | 493 | { 0x238e84, 1, RI_E2E3_ONLINE }, { 0x238ec0, 1, RI_E1HE2E3_ONLINE }, |
398 | { 0x238f00, 1, RI_E1HE2_ONLINE }, { 0x238f40, 1, RI_E1HE2_ONLINE }, | 494 | { 0x238f00, 1, RI_E1HE2E3_ONLINE }, { 0x238f40, 1, RI_E1HE2E3_ONLINE }, |
399 | { 0x238f80, 1, RI_E1HE2_ONLINE }, { 0x238fc0, 1, RI_E1HE2_ONLINE }, | 495 | { 0x238f80, 1, RI_E1HE2E3_ONLINE }, { 0x238fc0, 1, RI_E1HE2E3_ONLINE }, |
400 | { 0x238fc4, 2, RI_E2_ONLINE }, { 0x238fd0, 6, RI_E2_ONLINE }, | 496 | { 0x238fc4, 2, RI_E2E3_ONLINE }, { 0x238fd0, 6, RI_E2E3_ONLINE }, |
401 | { 0x239000, 1, RI_E2_ONLINE }, { 0x239040, 3, RI_E2_ONLINE }, | 497 | { 0x238fe8, 2, RI_E3_ONLINE }, { 0x239000, 1, RI_E2E3_ONLINE }, |
498 | { 0x239040, 3, RI_E2E3_ONLINE }, { 0x23905c, 1, RI_E3_ONLINE }, | ||
402 | { 0x240000, 2, RI_ALL_ONLINE }, { 0x280000, 65, RI_ALL_ONLINE }, | 499 | { 0x240000, 2, RI_ALL_ONLINE }, { 0x280000, 65, RI_ALL_ONLINE }, |
403 | { 0x28014c, 2, RI_E1HE2_ONLINE }, { 0x280200, 58, RI_ALL_ONLINE }, | 500 | { 0x28014c, 2, RI_E1HE2E3_ONLINE }, { 0x280200, 58, RI_ALL_ONLINE }, |
404 | { 0x280340, 4, RI_ALL_ONLINE }, { 0x280380, 1, RI_E2_ONLINE }, | 501 | { 0x280340, 4, RI_ALL_ONLINE }, { 0x280380, 1, RI_E2E3_ONLINE }, |
405 | { 0x280388, 1, RI_E2_ONLINE }, { 0x280390, 1, RI_E2_ONLINE }, | 502 | { 0x280388, 1, RI_E2E3_ONLINE }, { 0x280390, 1, RI_E2E3_ONLINE }, |
406 | { 0x280398, 1, RI_E2_ONLINE }, { 0x2803a0, 1, RI_E2_ONLINE }, | 503 | { 0x280398, 1, RI_E2E3_ONLINE }, { 0x2803a0, 1, RI_E2E3_ONLINE }, |
407 | { 0x2803a8, 2, RI_E2_ONLINE }, { 0x280400, 1, RI_ALL_ONLINE }, | 504 | { 0x2803a8, 2, RI_E2E3_ONLINE }, { 0x280400, 1, RI_ALL_ONLINE }, |
408 | { 0x280404, 255, RI_E1E1H_OFFLINE }, { 0x282000, 4, RI_ALL_ONLINE }, | 505 | { 0x280404, 255, RI_E1E1H_OFFLINE }, { 0x282000, 4, RI_ALL_ONLINE }, |
409 | { 0x282010, 2044, RI_ALL_OFFLINE }, { 0x2a0000, 1, RI_ALL_ONLINE }, | 506 | { 0x282010, 2044, RI_ALL_OFFLINE }, { 0x284000, 4, RI_E3_ONLINE }, |
410 | { 0x2a0004, 5631, RI_ALL_OFFLINE }, { 0x2a5800, 2560, RI_E1HE2_OFFLINE}, | 507 | { 0x2a0000, 1, RI_ALL_ONLINE }, { 0x2a0004, 5631, RI_ALL_OFFLINE }, |
411 | { 0x2a8000, 1, RI_ALL_ONLINE }, { 0x2a8004, 8191, RI_E1HE2_OFFLINE }, | 508 | { 0x2a5800, 2560, RI_E1HE2E3_OFFLINE }, { 0x2a8000, 1, RI_ALL_ONLINE }, |
412 | { 0x2b0000, 1, RI_ALL_ONLINE }, { 0x2b0004, 15, RI_E1H_OFFLINE }, | 509 | { 0x2a8004, 8191, RI_E1HE2E3_OFFLINE }, { 0x2b0000, 1, RI_ALL_ONLINE }, |
413 | { 0x2b0040, 1, RI_E1HE2_ONLINE }, { 0x2b0044, 239, RI_E1H_OFFLINE }, | 510 | { 0x2b0004, 15, RI_E1H_OFFLINE }, { 0x2b0040, 1, RI_E1HE2E3_ONLINE }, |
414 | { 0x2b0400, 1, RI_ALL_ONLINE }, { 0x2b0404, 255, RI_E1H_OFFLINE }, | 511 | { 0x2b0044, 239, RI_E1H_OFFLINE }, { 0x2b0400, 1, RI_ALL_ONLINE }, |
415 | { 0x2b0800, 1, RI_ALL_ONLINE }, { 0x2b0840, 1, RI_E1HE2_ONLINE }, | 512 | { 0x2b0404, 255, RI_E1H_OFFLINE }, { 0x2b0800, 1, RI_ALL_ONLINE }, |
416 | { 0x2b0c00, 1, RI_ALL_ONLINE }, { 0x2b1000, 1, RI_ALL_ONLINE }, | 513 | { 0x2b0840, 1, RI_E1HE2E3_ONLINE }, { 0x2b0c00, 1, RI_ALL_ONLINE }, |
417 | { 0x2b1040, 1, RI_E1HE2_ONLINE }, { 0x2b1400, 1, RI_ALL_ONLINE }, | 514 | { 0x2b1000, 1, RI_ALL_ONLINE }, { 0x2b1040, 1, RI_E1HE2E3_ONLINE }, |
418 | { 0x2b1440, 1, RI_E1HE2_ONLINE }, { 0x2b1480, 1, RI_E1HE2_ONLINE }, | 515 | { 0x2b1400, 1, RI_ALL_ONLINE }, { 0x2b1440, 1, RI_E1HE2E3_ONLINE }, |
419 | { 0x2b14c0, 1, RI_E1HE2_ONLINE }, { 0x2b1800, 128, RI_ALL_OFFLINE }, | 516 | { 0x2b1480, 1, RI_E1HE2E3_ONLINE }, { 0x2b14c0, 1, RI_E1HE2E3_ONLINE }, |
420 | { 0x2b1c00, 128, RI_ALL_OFFLINE }, { 0x2b2000, 1, RI_ALL_ONLINE }, | 517 | { 0x2b1800, 128, RI_ALL_OFFLINE }, { 0x2b1c00, 128, RI_ALL_OFFLINE }, |
421 | { 0x2b2400, 1, RI_E1HE2_ONLINE }, { 0x2b2404, 5631, RI_E2_OFFLINE }, | 518 | { 0x2b2000, 1, RI_ALL_ONLINE }, { 0x2b2400, 1, RI_E1HE2E3_ONLINE }, |
422 | { 0x2b8000, 1, RI_ALL_ONLINE }, { 0x2b8040, 1, RI_ALL_ONLINE }, | 519 | { 0x2b2404, 5631, RI_E2E3_OFFLINE }, { 0x2b8000, 1, RI_ALL_ONLINE }, |
423 | { 0x2b8080, 1, RI_ALL_ONLINE }, { 0x2b80c0, 1, RI_ALL_ONLINE }, | 520 | { 0x2b8040, 1, RI_ALL_ONLINE }, { 0x2b8080, 1, RI_ALL_ONLINE }, |
424 | { 0x2b8100, 1, RI_ALL_ONLINE }, { 0x2b8140, 1, RI_ALL_ONLINE }, | 521 | { 0x2b80c0, 1, RI_ALL_ONLINE }, { 0x2b8100, 1, RI_ALL_ONLINE }, |
425 | { 0x2b8180, 1, RI_ALL_ONLINE }, { 0x2b81c0, 1, RI_ALL_ONLINE }, | 522 | { 0x2b8140, 1, RI_ALL_ONLINE }, { 0x2b8180, 1, RI_ALL_ONLINE }, |
426 | { 0x2b8200, 1, RI_ALL_ONLINE }, { 0x2b8240, 1, RI_ALL_ONLINE }, | 523 | { 0x2b81c0, 1, RI_ALL_ONLINE }, { 0x2b8200, 1, RI_ALL_ONLINE }, |
427 | { 0x2b8280, 1, RI_ALL_ONLINE }, { 0x2b82c0, 1, RI_ALL_ONLINE }, | 524 | { 0x2b8240, 1, RI_ALL_ONLINE }, { 0x2b8280, 1, RI_ALL_ONLINE }, |
428 | { 0x2b8300, 1, RI_ALL_ONLINE }, { 0x2b8340, 1, RI_ALL_ONLINE }, | 525 | { 0x2b82c0, 1, RI_ALL_ONLINE }, { 0x2b8300, 1, RI_ALL_ONLINE }, |
429 | { 0x2b8380, 1, RI_ALL_ONLINE }, { 0x2b83c0, 1, RI_ALL_ONLINE }, | 526 | { 0x2b8340, 1, RI_ALL_ONLINE }, { 0x2b8380, 1, RI_ALL_ONLINE }, |
430 | { 0x2b8400, 1, RI_ALL_ONLINE }, { 0x2b8440, 1, RI_ALL_ONLINE }, | 527 | { 0x2b83c0, 1, RI_ALL_ONLINE }, { 0x2b8400, 1, RI_ALL_ONLINE }, |
431 | { 0x2b8480, 1, RI_ALL_ONLINE }, { 0x2b84c0, 1, RI_ALL_ONLINE }, | 528 | { 0x2b8440, 1, RI_ALL_ONLINE }, { 0x2b8480, 1, RI_ALL_ONLINE }, |
432 | { 0x2b8500, 1, RI_ALL_ONLINE }, { 0x2b8540, 1, RI_ALL_ONLINE }, | 529 | { 0x2b84c0, 1, RI_ALL_ONLINE }, { 0x2b8500, 1, RI_ALL_ONLINE }, |
433 | { 0x2b8580, 1, RI_ALL_ONLINE }, { 0x2b85c0, 19, RI_E2_ONLINE }, | 530 | { 0x2b8540, 1, RI_ALL_ONLINE }, { 0x2b8580, 1, RI_ALL_ONLINE }, |
434 | { 0x2b8800, 1, RI_ALL_ONLINE }, { 0x2b8840, 1, RI_ALL_ONLINE }, | 531 | { 0x2b85c0, 19, RI_E2E3_ONLINE }, { 0x2b8800, 1, RI_ALL_ONLINE }, |
435 | { 0x2b8880, 1, RI_ALL_ONLINE }, { 0x2b88c0, 1, RI_ALL_ONLINE }, | 532 | { 0x2b8840, 1, RI_ALL_ONLINE }, { 0x2b8880, 1, RI_ALL_ONLINE }, |
436 | { 0x2b8900, 1, RI_ALL_ONLINE }, { 0x2b8940, 1, RI_ALL_ONLINE }, | 533 | { 0x2b88c0, 1, RI_ALL_ONLINE }, { 0x2b8900, 1, RI_ALL_ONLINE }, |
437 | { 0x2b8980, 1, RI_ALL_ONLINE }, { 0x2b89c0, 1, RI_ALL_ONLINE }, | 534 | { 0x2b8940, 1, RI_ALL_ONLINE }, { 0x2b8980, 1, RI_ALL_ONLINE }, |
438 | { 0x2b8a00, 1, RI_ALL_ONLINE }, { 0x2b8a40, 1, RI_ALL_ONLINE }, | 535 | { 0x2b89c0, 1, RI_ALL_ONLINE }, { 0x2b8a00, 1, RI_ALL_ONLINE }, |
439 | { 0x2b8a80, 1, RI_ALL_ONLINE }, { 0x2b8ac0, 1, RI_ALL_ONLINE }, | 536 | { 0x2b8a40, 1, RI_ALL_ONLINE }, { 0x2b8a80, 1, RI_ALL_ONLINE }, |
440 | { 0x2b8b00, 1, RI_ALL_ONLINE }, { 0x2b8b40, 1, RI_ALL_ONLINE }, | 537 | { 0x2b8ac0, 1, RI_ALL_ONLINE }, { 0x2b8b00, 1, RI_ALL_ONLINE }, |
441 | { 0x2b8b80, 1, RI_ALL_ONLINE }, { 0x2b8bc0, 1, RI_ALL_ONLINE }, | 538 | { 0x2b8b40, 1, RI_ALL_ONLINE }, { 0x2b8b80, 1, RI_ALL_ONLINE }, |
442 | { 0x2b8c00, 1, RI_ALL_ONLINE }, { 0x2b8c40, 1, RI_ALL_ONLINE }, | 539 | { 0x2b8bc0, 1, RI_ALL_ONLINE }, { 0x2b8c00, 1, RI_ALL_ONLINE }, |
443 | { 0x2b8c80, 1, RI_ALL_ONLINE }, { 0x2b8cc0, 1, RI_ALL_ONLINE }, | 540 | { 0x2b8c40, 1, RI_ALL_ONLINE }, { 0x2b8c80, 1, RI_ALL_ONLINE }, |
444 | { 0x2b8cc4, 1, RI_E2_ONLINE }, { 0x2b8d00, 1, RI_ALL_ONLINE }, | 541 | { 0x2b8cc0, 1, RI_ALL_ONLINE }, { 0x2b8cc4, 1, RI_E2E3_ONLINE }, |
445 | { 0x2b8d40, 1, RI_ALL_ONLINE }, { 0x2b8d80, 1, RI_ALL_ONLINE }, | 542 | { 0x2b8d00, 1, RI_ALL_ONLINE }, { 0x2b8d40, 1, RI_ALL_ONLINE }, |
446 | { 0x2b8dc0, 1, RI_ALL_ONLINE }, { 0x2b8e00, 1, RI_ALL_ONLINE }, | 543 | { 0x2b8d80, 1, RI_ALL_ONLINE }, { 0x2b8dc0, 1, RI_ALL_ONLINE }, |
447 | { 0x2b8e40, 1, RI_ALL_ONLINE }, { 0x2b8e80, 1, RI_ALL_ONLINE }, | 544 | { 0x2b8e00, 1, RI_ALL_ONLINE }, { 0x2b8e40, 1, RI_ALL_ONLINE }, |
448 | { 0x2b8e84, 1, RI_E2_ONLINE }, { 0x2b8ec0, 1, RI_E1HE2_ONLINE }, | 545 | { 0x2b8e80, 1, RI_ALL_ONLINE }, { 0x2b8e84, 1, RI_E2E3_ONLINE }, |
449 | { 0x2b8f00, 1, RI_E1HE2_ONLINE }, { 0x2b8f40, 1, RI_E1HE2_ONLINE }, | 546 | { 0x2b8ec0, 1, RI_E1HE2E3_ONLINE }, { 0x2b8f00, 1, RI_E1HE2E3_ONLINE }, |
450 | { 0x2b8f80, 1, RI_E1HE2_ONLINE }, { 0x2b8fc0, 1, RI_E1HE2_ONLINE }, | 547 | { 0x2b8f40, 1, RI_E1HE2E3_ONLINE }, { 0x2b8f80, 1, RI_E1HE2E3_ONLINE }, |
451 | { 0x2b8fc4, 2, RI_E2_ONLINE }, { 0x2b8fd0, 6, RI_E2_ONLINE }, | 548 | { 0x2b8fc0, 1, RI_E1HE2E3_ONLINE }, { 0x2b8fc4, 2, RI_E2E3_ONLINE }, |
452 | { 0x2b9000, 1, RI_E2_ONLINE }, { 0x2b9040, 3, RI_E2_ONLINE }, | 549 | { 0x2b8fd0, 6, RI_E2E3_ONLINE }, { 0x2b8fe8, 2, RI_E3_ONLINE }, |
453 | { 0x2b9400, 14, RI_E2_ONLINE }, { 0x2b943c, 19, RI_E2_ONLINE }, | 550 | { 0x2b9000, 1, RI_E2E3_ONLINE }, { 0x2b9040, 3, RI_E2E3_ONLINE }, |
454 | { 0x2b9490, 10, RI_E2_ONLINE }, { 0x2c0000, 2, RI_ALL_ONLINE }, | 551 | { 0x2b905c, 1, RI_E3_ONLINE }, { 0x2b9400, 14, RI_E2E3_ONLINE }, |
455 | { 0x300000, 65, RI_ALL_ONLINE }, { 0x30014c, 2, RI_E1HE2_ONLINE }, | 552 | { 0x2b943c, 19, RI_E2E3_ONLINE }, { 0x2b9490, 10, RI_E2E3_ONLINE }, |
456 | { 0x300200, 58, RI_ALL_ONLINE }, { 0x300340, 4, RI_ALL_ONLINE }, | 553 | { 0x2c0000, 2, RI_ALL_ONLINE }, { 0x300000, 65, RI_ALL_ONLINE }, |
457 | { 0x300380, 1, RI_E2_ONLINE }, { 0x300388, 1, RI_E2_ONLINE }, | 554 | { 0x30014c, 2, RI_E1HE2E3_ONLINE }, { 0x300200, 58, RI_ALL_ONLINE }, |
458 | { 0x300390, 1, RI_E2_ONLINE }, { 0x300398, 1, RI_E2_ONLINE }, | 555 | { 0x300340, 4, RI_ALL_ONLINE }, { 0x300380, 1, RI_E2E3_ONLINE }, |
459 | { 0x3003a0, 1, RI_E2_ONLINE }, { 0x3003a8, 2, RI_E2_ONLINE }, | 556 | { 0x300388, 1, RI_E2E3_ONLINE }, { 0x300390, 1, RI_E2E3_ONLINE }, |
460 | { 0x300400, 1, RI_ALL_ONLINE }, { 0x300404, 255, RI_E1E1H_OFFLINE }, | 557 | { 0x300398, 1, RI_E2E3_ONLINE }, { 0x3003a0, 1, RI_E2E3_ONLINE }, |
461 | { 0x302000, 4, RI_ALL_ONLINE }, { 0x302010, 2044, RI_ALL_OFFLINE }, | 558 | { 0x3003a8, 2, RI_E2E3_ONLINE }, { 0x300400, 1, RI_ALL_ONLINE }, |
559 | { 0x300404, 255, RI_E1E1H_OFFLINE }, { 0x302000, 4, RI_ALL_ONLINE }, | ||
560 | { 0x302010, 2044, RI_ALL_OFFLINE }, { 0x304000, 4, RI_E3_ONLINE }, | ||
462 | { 0x320000, 1, RI_ALL_ONLINE }, { 0x320004, 5631, RI_ALL_OFFLINE }, | 561 | { 0x320000, 1, RI_ALL_ONLINE }, { 0x320004, 5631, RI_ALL_OFFLINE }, |
463 | { 0x325800, 2560, RI_E1HE2_OFFLINE }, { 0x328000, 1, RI_ALL_ONLINE }, | 562 | { 0x325800, 2560, RI_E1HE2E3_OFFLINE }, { 0x328000, 1, RI_ALL_ONLINE }, |
464 | { 0x328004, 8191, RI_E1HE2_OFFLINE }, { 0x330000, 1, RI_ALL_ONLINE }, | 563 | { 0x328004, 8191, RI_E1HE2E3_OFFLINE }, { 0x330000, 1, RI_ALL_ONLINE }, |
465 | { 0x330004, 15, RI_E1H_OFFLINE }, { 0x330040, 1, RI_E1HE2_ONLINE }, | 564 | { 0x330004, 15, RI_E1H_OFFLINE }, { 0x330040, 1, RI_E1HE2E3_ONLINE }, |
466 | { 0x330044, 239, RI_E1H_OFFLINE }, { 0x330400, 1, RI_ALL_ONLINE }, | 565 | { 0x330044, 239, RI_E1H_OFFLINE }, { 0x330400, 1, RI_ALL_ONLINE }, |
467 | { 0x330404, 255, RI_E1H_OFFLINE }, { 0x330800, 1, RI_ALL_ONLINE }, | 566 | { 0x330404, 255, RI_E1H_OFFLINE }, { 0x330800, 1, RI_ALL_ONLINE }, |
468 | { 0x330840, 1, RI_E1HE2_ONLINE }, { 0x330c00, 1, RI_ALL_ONLINE }, | 567 | { 0x330840, 1, RI_E1HE2E3_ONLINE }, { 0x330c00, 1, RI_ALL_ONLINE }, |
469 | { 0x331000, 1, RI_ALL_ONLINE }, { 0x331040, 1, RI_E1HE2_ONLINE }, | 568 | { 0x331000, 1, RI_ALL_ONLINE }, { 0x331040, 1, RI_E1HE2E3_ONLINE }, |
470 | { 0x331400, 1, RI_ALL_ONLINE }, { 0x331440, 1, RI_E1HE2_ONLINE }, | 569 | { 0x331400, 1, RI_ALL_ONLINE }, { 0x331440, 1, RI_E1HE2E3_ONLINE }, |
471 | { 0x331480, 1, RI_E1HE2_ONLINE }, { 0x3314c0, 1, RI_E1HE2_ONLINE }, | 570 | { 0x331480, 1, RI_E1HE2E3_ONLINE }, { 0x3314c0, 1, RI_E1HE2E3_ONLINE }, |
472 | { 0x331800, 128, RI_ALL_OFFLINE }, { 0x331c00, 128, RI_ALL_OFFLINE }, | 571 | { 0x331800, 128, RI_ALL_OFFLINE }, { 0x331c00, 128, RI_ALL_OFFLINE }, |
473 | { 0x332000, 1, RI_ALL_ONLINE }, { 0x332400, 1, RI_E1HE2_ONLINE }, | 572 | { 0x332000, 1, RI_ALL_ONLINE }, { 0x332400, 1, RI_E1HE2E3_ONLINE }, |
474 | { 0x332404, 5631, RI_E2_OFFLINE }, { 0x338000, 1, RI_ALL_ONLINE }, | 573 | { 0x332404, 5631, RI_E2E3_OFFLINE }, { 0x338000, 1, RI_ALL_ONLINE }, |
475 | { 0x338040, 1, RI_ALL_ONLINE }, { 0x338080, 1, RI_ALL_ONLINE }, | 574 | { 0x338040, 1, RI_ALL_ONLINE }, { 0x338080, 1, RI_ALL_ONLINE }, |
476 | { 0x3380c0, 1, RI_ALL_ONLINE }, { 0x338100, 1, RI_ALL_ONLINE }, | 575 | { 0x3380c0, 1, RI_ALL_ONLINE }, { 0x338100, 1, RI_ALL_ONLINE }, |
477 | { 0x338140, 1, RI_ALL_ONLINE }, { 0x338180, 1, RI_ALL_ONLINE }, | 576 | { 0x338140, 1, RI_ALL_ONLINE }, { 0x338180, 1, RI_ALL_ONLINE }, |
@@ -483,7 +582,7 @@ static const struct reg_addr reg_addrs[REGS_COUNT] = { | |||
483 | { 0x338440, 1, RI_ALL_ONLINE }, { 0x338480, 1, RI_ALL_ONLINE }, | 582 | { 0x338440, 1, RI_ALL_ONLINE }, { 0x338480, 1, RI_ALL_ONLINE }, |
484 | { 0x3384c0, 1, RI_ALL_ONLINE }, { 0x338500, 1, RI_ALL_ONLINE }, | 583 | { 0x3384c0, 1, RI_ALL_ONLINE }, { 0x338500, 1, RI_ALL_ONLINE }, |
485 | { 0x338540, 1, RI_ALL_ONLINE }, { 0x338580, 1, RI_ALL_ONLINE }, | 584 | { 0x338540, 1, RI_ALL_ONLINE }, { 0x338580, 1, RI_ALL_ONLINE }, |
486 | { 0x3385c0, 19, RI_E2_ONLINE }, { 0x338800, 1, RI_ALL_ONLINE }, | 585 | { 0x3385c0, 19, RI_E2E3_ONLINE }, { 0x338800, 1, RI_ALL_ONLINE }, |
487 | { 0x338840, 1, RI_ALL_ONLINE }, { 0x338880, 1, RI_ALL_ONLINE }, | 586 | { 0x338840, 1, RI_ALL_ONLINE }, { 0x338880, 1, RI_ALL_ONLINE }, |
488 | { 0x3388c0, 1, RI_ALL_ONLINE }, { 0x338900, 1, RI_ALL_ONLINE }, | 587 | { 0x3388c0, 1, RI_ALL_ONLINE }, { 0x338900, 1, RI_ALL_ONLINE }, |
489 | { 0x338940, 1, RI_ALL_ONLINE }, { 0x338980, 1, RI_ALL_ONLINE }, | 588 | { 0x338940, 1, RI_ALL_ONLINE }, { 0x338980, 1, RI_ALL_ONLINE }, |
@@ -493,35 +592,48 @@ static const struct reg_addr reg_addrs[REGS_COUNT] = { | |||
493 | { 0x338b40, 1, RI_ALL_ONLINE }, { 0x338b80, 1, RI_ALL_ONLINE }, | 592 | { 0x338b40, 1, RI_ALL_ONLINE }, { 0x338b80, 1, RI_ALL_ONLINE }, |
494 | { 0x338bc0, 1, RI_ALL_ONLINE }, { 0x338c00, 1, RI_ALL_ONLINE }, | 593 | { 0x338bc0, 1, RI_ALL_ONLINE }, { 0x338c00, 1, RI_ALL_ONLINE }, |
495 | { 0x338c40, 1, RI_ALL_ONLINE }, { 0x338c80, 1, RI_ALL_ONLINE }, | 594 | { 0x338c40, 1, RI_ALL_ONLINE }, { 0x338c80, 1, RI_ALL_ONLINE }, |
496 | { 0x338cc0, 1, RI_ALL_ONLINE }, { 0x338cc4, 1, RI_E2_ONLINE }, | 595 | { 0x338cc0, 1, RI_ALL_ONLINE }, { 0x338cc4, 1, RI_E2E3_ONLINE }, |
497 | { 0x338d00, 1, RI_ALL_ONLINE }, { 0x338d40, 1, RI_ALL_ONLINE }, | 596 | { 0x338d00, 1, RI_ALL_ONLINE }, { 0x338d40, 1, RI_ALL_ONLINE }, |
498 | { 0x338d80, 1, RI_ALL_ONLINE }, { 0x338dc0, 1, RI_ALL_ONLINE }, | 597 | { 0x338d80, 1, RI_ALL_ONLINE }, { 0x338dc0, 1, RI_ALL_ONLINE }, |
499 | { 0x338e00, 1, RI_ALL_ONLINE }, { 0x338e40, 1, RI_ALL_ONLINE }, | 598 | { 0x338e00, 1, RI_ALL_ONLINE }, { 0x338e40, 1, RI_ALL_ONLINE }, |
500 | { 0x338e80, 1, RI_ALL_ONLINE }, { 0x338e84, 1, RI_E2_ONLINE }, | 599 | { 0x338e80, 1, RI_ALL_ONLINE }, { 0x338e84, 1, RI_E2E3_ONLINE }, |
501 | { 0x338ec0, 1, RI_E1HE2_ONLINE }, { 0x338f00, 1, RI_E1HE2_ONLINE }, | 600 | { 0x338ec0, 1, RI_E1HE2E3_ONLINE }, { 0x338f00, 1, RI_E1HE2E3_ONLINE }, |
502 | { 0x338f40, 1, RI_E1HE2_ONLINE }, { 0x338f80, 1, RI_E1HE2_ONLINE }, | 601 | { 0x338f40, 1, RI_E1HE2E3_ONLINE }, { 0x338f80, 1, RI_E1HE2E3_ONLINE }, |
503 | { 0x338fc0, 1, RI_E1HE2_ONLINE }, { 0x338fc4, 2, RI_E2_ONLINE }, | 602 | { 0x338fc0, 1, RI_E1HE2E3_ONLINE }, { 0x338fc4, 2, RI_E2E3_ONLINE }, |
504 | { 0x338fd0, 6, RI_E2_ONLINE }, { 0x339000, 1, RI_E2_ONLINE }, | 603 | { 0x338fd0, 6, RI_E2E3_ONLINE }, { 0x338fe8, 2, RI_E3_ONLINE }, |
505 | { 0x339040, 3, RI_E2_ONLINE }, { 0x340000, 2, RI_ALL_ONLINE }, | 604 | { 0x339000, 1, RI_E2E3_ONLINE }, { 0x339040, 3, RI_E2E3_ONLINE }, |
605 | { 0x33905c, 1, RI_E3_ONLINE }, { 0x340000, 2, RI_ALL_ONLINE }, | ||
506 | }; | 606 | }; |
607 | #define REGS_COUNT ARRAY_SIZE(reg_addrs) | ||
507 | 608 | ||
508 | #define IDLE_REGS_COUNT 237 | 609 | static const struct reg_addr idle_addrs[] = { |
509 | static const struct reg_addr idle_addrs[IDLE_REGS_COUNT] = { | ||
510 | { 0x2104, 1, RI_ALL_ONLINE }, { 0x2110, 2, RI_ALL_ONLINE }, | 610 | { 0x2104, 1, RI_ALL_ONLINE }, { 0x2110, 2, RI_ALL_ONLINE }, |
511 | { 0x211c, 8, RI_ALL_ONLINE }, { 0x2814, 1, RI_ALL_ONLINE }, | 611 | { 0x211c, 8, RI_ALL_ONLINE }, { 0x2814, 1, RI_ALL_ONLINE }, |
512 | { 0x281c, 2, RI_ALL_ONLINE }, { 0x2854, 1, RI_ALL_ONLINE }, | 612 | { 0x281c, 2, RI_ALL_ONLINE }, { 0x2854, 1, RI_ALL_ONLINE }, |
513 | { 0x285c, 1, RI_ALL_ONLINE }, { 0x9010, 7, RI_E2_ONLINE }, | 613 | { 0x285c, 1, RI_ALL_ONLINE }, { 0x3040, 1, RI_ALL_ONLINE }, |
514 | { 0x9030, 1, RI_E2_ONLINE }, { 0x9068, 16, RI_E2_ONLINE }, | 614 | { 0x9010, 7, RI_E2E3_ONLINE }, { 0x9030, 1, RI_E2E3_ONLINE }, |
515 | { 0x9230, 2, RI_E2_ONLINE }, { 0x9244, 1, RI_E2_ONLINE }, | 615 | { 0x9068, 16, RI_E2E3_ONLINE }, { 0x9230, 2, RI_E2E3_ONLINE }, |
516 | { 0x9298, 1, RI_E2_ONLINE }, { 0x92a8, 1, RI_E2_ONLINE }, | 616 | { 0x9244, 1, RI_E2E3_ONLINE }, { 0x9298, 1, RI_E2E3_ONLINE }, |
517 | { 0xa38c, 1, RI_ALL_ONLINE }, { 0xa3c4, 1, RI_E1HE2_ONLINE }, | 617 | { 0x92a8, 1, RI_E2E3_ONLINE }, { 0xa38c, 1, RI_ALL_ONLINE }, |
518 | { 0xa408, 1, RI_ALL_ONLINE }, { 0xa42c, 12, RI_ALL_ONLINE }, | 618 | { 0xa3c4, 1, RI_E1HE2E3_ONLINE }, { 0xa404, 3, RI_ALL_ONLINE }, |
519 | { 0xa600, 5, RI_E1HE2_ONLINE }, { 0xa618, 1, RI_E1HE2_ONLINE }, | 619 | { 0xa42c, 12, RI_ALL_ONLINE }, { 0xa600, 5, RI_E1HE2E3_ONLINE }, |
520 | { 0xa714, 1, RI_E2_ONLINE }, { 0xa720, 1, RI_E2_ONLINE }, | 620 | { 0xa618, 1, RI_E1HE2E3_ONLINE }, { 0xa714, 1, RI_E2E3_ONLINE }, |
521 | { 0xa750, 1, RI_E2_ONLINE }, { 0xc09c, 1, RI_E1E1H_ONLINE }, | 621 | { 0xa720, 1, RI_E2E3_ONLINE }, { 0xa750, 1, RI_E2E3_ONLINE }, |
522 | { 0x103b0, 1, RI_ALL_ONLINE }, { 0x103c0, 1, RI_ALL_ONLINE }, | 622 | { 0xc09c, 1, RI_E1E1H_ONLINE }, { 0x103b0, 1, RI_ALL_ONLINE }, |
523 | { 0x103d0, 1, RI_E1H_ONLINE }, { 0x183bc, 1, RI_E2_ONLINE }, | 623 | { 0x103c0, 1, RI_ALL_ONLINE }, { 0x103d0, 1, RI_E1H_ONLINE }, |
524 | { 0x183cc, 1, RI_E2_ONLINE }, { 0x2021c, 11, RI_ALL_ONLINE }, | 624 | { 0x10418, 1, RI_ALL_ONLINE }, { 0x10420, 1, RI_ALL_ONLINE }, |
625 | { 0x10428, 1, RI_ALL_ONLINE }, { 0x10460, 1, RI_ALL_ONLINE }, | ||
626 | { 0x10474, 1, RI_ALL_ONLINE }, { 0x104e0, 1, RI_ALL_ONLINE }, | ||
627 | { 0x104ec, 1, RI_ALL_ONLINE }, { 0x104f8, 1, RI_ALL_ONLINE }, | ||
628 | { 0x10508, 1, RI_ALL_ONLINE }, { 0x10530, 1, RI_ALL_ONLINE }, | ||
629 | { 0x10538, 1, RI_ALL_ONLINE }, { 0x10548, 1, RI_ALL_ONLINE }, | ||
630 | { 0x10558, 1, RI_ALL_ONLINE }, { 0x182a8, 1, RI_E2E3_ONLINE }, | ||
631 | { 0x182b8, 1, RI_E2E3_ONLINE }, { 0x18308, 1, RI_E2E3_ONLINE }, | ||
632 | { 0x18318, 1, RI_E2E3_ONLINE }, { 0x18338, 1, RI_E2E3_ONLINE }, | ||
633 | { 0x18348, 1, RI_E2E3_ONLINE }, { 0x183bc, 1, RI_E2E3_ONLINE }, | ||
634 | { 0x183cc, 1, RI_E2E3_ONLINE }, { 0x18570, 1, RI_E3_ONLINE }, | ||
635 | { 0x18578, 1, RI_E3_ONLINE }, { 0x1858c, 1, RI_E3_ONLINE }, | ||
636 | { 0x18594, 1, RI_E3_ONLINE }, { 0x2021c, 11, RI_ALL_ONLINE }, | ||
525 | { 0x202a8, 1, RI_ALL_ONLINE }, { 0x202b8, 1, RI_ALL_ONLINE }, | 637 | { 0x202a8, 1, RI_ALL_ONLINE }, { 0x202b8, 1, RI_ALL_ONLINE }, |
526 | { 0x20404, 1, RI_ALL_ONLINE }, { 0x2040c, 2, RI_ALL_ONLINE }, | 638 | { 0x20404, 1, RI_ALL_ONLINE }, { 0x2040c, 2, RI_ALL_ONLINE }, |
527 | { 0x2041c, 2, RI_ALL_ONLINE }, { 0x40154, 14, RI_ALL_ONLINE }, | 639 | { 0x2041c, 2, RI_ALL_ONLINE }, { 0x40154, 14, RI_ALL_ONLINE }, |
@@ -551,8 +663,8 @@ static const struct reg_addr idle_addrs[IDLE_REGS_COUNT] = { | |||
551 | { 0x102058, 1, RI_ALL_ONLINE }, { 0x102080, 16, RI_ALL_ONLINE }, | 663 | { 0x102058, 1, RI_ALL_ONLINE }, { 0x102080, 16, RI_ALL_ONLINE }, |
552 | { 0x103004, 2, RI_ALL_ONLINE }, { 0x103068, 1, RI_ALL_ONLINE }, | 664 | { 0x103004, 2, RI_ALL_ONLINE }, { 0x103068, 1, RI_ALL_ONLINE }, |
553 | { 0x103078, 1, RI_ALL_ONLINE }, { 0x103088, 1, RI_ALL_ONLINE }, | 665 | { 0x103078, 1, RI_ALL_ONLINE }, { 0x103088, 1, RI_ALL_ONLINE }, |
554 | { 0x10309c, 2, RI_E1HE2_ONLINE }, { 0x1030b8, 2, RI_E2_ONLINE }, | 666 | { 0x10309c, 2, RI_E1HE2E3_ONLINE }, { 0x1030b8, 2, RI_E2E3_ONLINE }, |
555 | { 0x1030cc, 1, RI_E2_ONLINE }, { 0x1030e0, 1, RI_E2_ONLINE }, | 667 | { 0x1030cc, 1, RI_E2E3_ONLINE }, { 0x1030e0, 1, RI_E2E3_ONLINE }, |
556 | { 0x104004, 1, RI_ALL_ONLINE }, { 0x104018, 1, RI_ALL_ONLINE }, | 668 | { 0x104004, 1, RI_ALL_ONLINE }, { 0x104018, 1, RI_ALL_ONLINE }, |
557 | { 0x104020, 1, RI_ALL_ONLINE }, { 0x10403c, 1, RI_ALL_ONLINE }, | 669 | { 0x104020, 1, RI_ALL_ONLINE }, { 0x10403c, 1, RI_ALL_ONLINE }, |
558 | { 0x1040fc, 1, RI_ALL_ONLINE }, { 0x10410c, 1, RI_ALL_ONLINE }, | 670 | { 0x1040fc, 1, RI_ALL_ONLINE }, { 0x10410c, 1, RI_ALL_ONLINE }, |
@@ -563,28 +675,27 @@ static const struct reg_addr idle_addrs[IDLE_REGS_COUNT] = { | |||
563 | { 0x120414, 15, RI_ALL_ONLINE }, { 0x120478, 2, RI_ALL_ONLINE }, | 675 | { 0x120414, 15, RI_ALL_ONLINE }, { 0x120478, 2, RI_ALL_ONLINE }, |
564 | { 0x12052c, 1, RI_ALL_ONLINE }, { 0x120564, 3, RI_ALL_ONLINE }, | 676 | { 0x12052c, 1, RI_ALL_ONLINE }, { 0x120564, 3, RI_ALL_ONLINE }, |
565 | { 0x12057c, 1, RI_ALL_ONLINE }, { 0x12058c, 1, RI_ALL_ONLINE }, | 677 | { 0x12057c, 1, RI_ALL_ONLINE }, { 0x12058c, 1, RI_ALL_ONLINE }, |
566 | { 0x120608, 1, RI_E1HE2_ONLINE }, { 0x120738, 1, RI_E2_ONLINE }, | 678 | { 0x120608, 1, RI_E1HE2E3_ONLINE }, { 0x120778, 2, RI_E2E3_ONLINE }, |
567 | { 0x120778, 2, RI_E2_ONLINE }, { 0x120808, 3, RI_ALL_ONLINE }, | 679 | { 0x120808, 3, RI_ALL_ONLINE }, { 0x120818, 1, RI_ALL_ONLINE }, |
568 | { 0x120818, 1, RI_ALL_ONLINE }, { 0x120820, 1, RI_ALL_ONLINE }, | 680 | { 0x120820, 1, RI_ALL_ONLINE }, { 0x120828, 1, RI_ALL_ONLINE }, |
569 | { 0x120828, 1, RI_ALL_ONLINE }, { 0x120830, 1, RI_ALL_ONLINE }, | 681 | { 0x120830, 1, RI_ALL_ONLINE }, { 0x120838, 1, RI_ALL_ONLINE }, |
570 | { 0x120838, 1, RI_ALL_ONLINE }, { 0x120840, 1, RI_ALL_ONLINE }, | 682 | { 0x120840, 1, RI_ALL_ONLINE }, { 0x120848, 1, RI_ALL_ONLINE }, |
571 | { 0x120848, 1, RI_ALL_ONLINE }, { 0x120850, 1, RI_ALL_ONLINE }, | 683 | { 0x120850, 1, RI_ALL_ONLINE }, { 0x120858, 1, RI_ALL_ONLINE }, |
572 | { 0x120858, 1, RI_ALL_ONLINE }, { 0x120860, 1, RI_ALL_ONLINE }, | 684 | { 0x120860, 1, RI_ALL_ONLINE }, { 0x120868, 1, RI_ALL_ONLINE }, |
573 | { 0x120868, 1, RI_ALL_ONLINE }, { 0x120870, 1, RI_ALL_ONLINE }, | 685 | { 0x120870, 1, RI_ALL_ONLINE }, { 0x120878, 1, RI_ALL_ONLINE }, |
574 | { 0x120878, 1, RI_ALL_ONLINE }, { 0x120880, 1, RI_ALL_ONLINE }, | 686 | { 0x120880, 1, RI_ALL_ONLINE }, { 0x120888, 1, RI_ALL_ONLINE }, |
575 | { 0x120888, 1, RI_ALL_ONLINE }, { 0x120890, 1, RI_ALL_ONLINE }, | 687 | { 0x120890, 1, RI_ALL_ONLINE }, { 0x120898, 1, RI_ALL_ONLINE }, |
576 | { 0x120898, 1, RI_ALL_ONLINE }, { 0x1208a0, 1, RI_ALL_ONLINE }, | 688 | { 0x1208a0, 1, RI_ALL_ONLINE }, { 0x1208a8, 1, RI_ALL_ONLINE }, |
577 | { 0x1208a8, 1, RI_ALL_ONLINE }, { 0x1208b0, 1, RI_ALL_ONLINE }, | 689 | { 0x1208b0, 1, RI_ALL_ONLINE }, { 0x1208b8, 1, RI_ALL_ONLINE }, |
578 | { 0x1208b8, 1, RI_ALL_ONLINE }, { 0x1208c0, 1, RI_ALL_ONLINE }, | 690 | { 0x1208c0, 1, RI_ALL_ONLINE }, { 0x1208c8, 1, RI_ALL_ONLINE }, |
579 | { 0x1208c8, 1, RI_ALL_ONLINE }, { 0x1208d0, 1, RI_ALL_ONLINE }, | 691 | { 0x1208d0, 1, RI_ALL_ONLINE }, { 0x1208d8, 1, RI_ALL_ONLINE }, |
580 | { 0x1208d8, 1, RI_ALL_ONLINE }, { 0x1208e0, 1, RI_ALL_ONLINE }, | 692 | { 0x1208e0, 1, RI_ALL_ONLINE }, { 0x1208e8, 1, RI_ALL_ONLINE }, |
581 | { 0x1208e8, 1, RI_ALL_ONLINE }, { 0x1208f0, 1, RI_ALL_ONLINE }, | 693 | { 0x1208f0, 1, RI_ALL_ONLINE }, { 0x1208f8, 1, RI_ALL_ONLINE }, |
582 | { 0x1208f8, 1, RI_ALL_ONLINE }, { 0x120900, 1, RI_ALL_ONLINE }, | 694 | { 0x120900, 1, RI_ALL_ONLINE }, { 0x120908, 1, RI_ALL_ONLINE }, |
583 | { 0x120908, 1, RI_ALL_ONLINE }, { 0x120940, 5, RI_E2_ONLINE }, | 695 | { 0x130030, 1, RI_E2E3_ONLINE }, { 0x13004c, 3, RI_E2E3_ONLINE }, |
584 | { 0x130030, 1, RI_E2_ONLINE }, { 0x13004c, 3, RI_E2_ONLINE }, | 696 | { 0x130064, 2, RI_E2E3_ONLINE }, { 0x13009c, 1, RI_E2E3_ONLINE }, |
585 | { 0x130064, 2, RI_E2_ONLINE }, { 0x13009c, 1, RI_E2_ONLINE }, | 697 | { 0x130130, 1, RI_E2E3_ONLINE }, { 0x13016c, 1, RI_E2E3_ONLINE }, |
586 | { 0x130130, 1, RI_E2_ONLINE }, { 0x13016c, 1, RI_E2_ONLINE }, | 698 | { 0x130300, 1, RI_E2E3_ONLINE }, { 0x130480, 1, RI_E2E3_ONLINE }, |
587 | { 0x130300, 1, RI_E2_ONLINE }, { 0x130480, 1, RI_E2_ONLINE }, | ||
588 | { 0x14005c, 2, RI_ALL_ONLINE }, { 0x1400d0, 2, RI_ALL_ONLINE }, | 699 | { 0x14005c, 2, RI_ALL_ONLINE }, { 0x1400d0, 2, RI_ALL_ONLINE }, |
589 | { 0x1400e0, 1, RI_ALL_ONLINE }, { 0x1401c8, 1, RI_ALL_ONLINE }, | 700 | { 0x1400e0, 1, RI_ALL_ONLINE }, { 0x1401c8, 1, RI_ALL_ONLINE }, |
590 | { 0x140200, 6, RI_ALL_ONLINE }, { 0x16101c, 1, RI_ALL_ONLINE }, | 701 | { 0x140200, 6, RI_ALL_ONLINE }, { 0x16101c, 1, RI_ALL_ONLINE }, |
@@ -602,8 +713,8 @@ static const struct reg_addr idle_addrs[IDLE_REGS_COUNT] = { | |||
602 | { 0x168438, 1, RI_ALL_ONLINE }, { 0x168448, 1, RI_ALL_ONLINE }, | 713 | { 0x168438, 1, RI_ALL_ONLINE }, { 0x168448, 1, RI_ALL_ONLINE }, |
603 | { 0x168a00, 128, RI_ALL_ONLINE }, { 0x16e200, 128, RI_E1H_ONLINE }, | 714 | { 0x168a00, 128, RI_ALL_ONLINE }, { 0x16e200, 128, RI_E1H_ONLINE }, |
604 | { 0x16e404, 2, RI_E1H_ONLINE }, { 0x16e584, 64, RI_E1H_ONLINE }, | 715 | { 0x16e404, 2, RI_E1H_ONLINE }, { 0x16e584, 64, RI_E1H_ONLINE }, |
605 | { 0x16e684, 2, RI_E1HE2_ONLINE }, { 0x16e68c, 4, RI_E1H_ONLINE }, | 716 | { 0x16e684, 2, RI_E1HE2E3_ONLINE }, { 0x16e68c, 4, RI_E1H_ONLINE }, |
606 | { 0x16e6fc, 4, RI_E2_ONLINE }, { 0x1700a4, 1, RI_ALL_ONLINE }, | 717 | { 0x16e6fc, 4, RI_E2E3_ONLINE }, { 0x1700a4, 1, RI_ALL_ONLINE }, |
607 | { 0x1700ac, 2, RI_ALL_ONLINE }, { 0x1700c0, 1, RI_ALL_ONLINE }, | 718 | { 0x1700ac, 2, RI_ALL_ONLINE }, { 0x1700c0, 1, RI_ALL_ONLINE }, |
608 | { 0x170174, 1, RI_ALL_ONLINE }, { 0x170184, 1, RI_ALL_ONLINE }, | 719 | { 0x170174, 1, RI_ALL_ONLINE }, { 0x170184, 1, RI_ALL_ONLINE }, |
609 | { 0x1800f4, 1, RI_ALL_ONLINE }, { 0x180104, 1, RI_ALL_ONLINE }, | 720 | { 0x1800f4, 1, RI_ALL_ONLINE }, { 0x180104, 1, RI_ALL_ONLINE }, |
@@ -627,51 +738,61 @@ static const struct reg_addr idle_addrs[IDLE_REGS_COUNT] = { | |||
627 | { 0x338040, 1, RI_ALL_ONLINE }, { 0x338080, 1, RI_ALL_ONLINE }, | 738 | { 0x338040, 1, RI_ALL_ONLINE }, { 0x338080, 1, RI_ALL_ONLINE }, |
628 | { 0x3380c0, 1, RI_ALL_ONLINE } | 739 | { 0x3380c0, 1, RI_ALL_ONLINE } |
629 | }; | 740 | }; |
741 | #define IDLE_REGS_COUNT ARRAY_SIZE(idle_addrs) | ||
630 | 742 | ||
631 | #define WREGS_COUNT_E1 1 | ||
632 | static const u32 read_reg_e1_0[] = { 0x1b1000 }; | 743 | static const u32 read_reg_e1_0[] = { 0x1b1000 }; |
744 | #define WREGS_COUNT_E1 ARRAY_SIZE(read_reg_e1_0) | ||
633 | 745 | ||
634 | static const struct wreg_addr wreg_addrs_e1[WREGS_COUNT_E1] = { | 746 | static const struct wreg_addr wreg_addrs_e1[WREGS_COUNT_E1] = { |
635 | { 0x1b0c00, 192, 1, read_reg_e1_0, RI_E1_OFFLINE } | 747 | { 0x1b0c00, 192, 1, read_reg_e1_0, RI_E1_OFFLINE } |
636 | }; | 748 | }; |
637 | 749 | ||
638 | #define WREGS_COUNT_E1H 1 | ||
639 | static const u32 read_reg_e1h_0[] = { 0x1b1040, 0x1b1000 }; | 750 | static const u32 read_reg_e1h_0[] = { 0x1b1040, 0x1b1000 }; |
751 | #define WREGS_COUNT_E1H ARRAY_SIZE(read_reg_e1h_0) | ||
640 | 752 | ||
641 | static const struct wreg_addr wreg_addrs_e1h[WREGS_COUNT_E1H] = { | 753 | static const struct wreg_addr wreg_addrs_e1h[WREGS_COUNT_E1H] = { |
642 | { 0x1b0c00, 256, 2, read_reg_e1h_0, RI_E1H_OFFLINE } | 754 | { 0x1b0c00, 256, 2, read_reg_e1h_0, RI_E1H_OFFLINE } |
643 | }; | 755 | }; |
644 | 756 | ||
645 | #define WREGS_COUNT_E2 1 | ||
646 | static const u32 read_reg_e2_0[] = { 0x1b1040, 0x1b1000 }; | 757 | static const u32 read_reg_e2_0[] = { 0x1b1040, 0x1b1000 }; |
758 | #define WREGS_COUNT_E2 ARRAY_SIZE(read_reg_e2_0) | ||
647 | 759 | ||
648 | static const struct wreg_addr wreg_addrs_e2[WREGS_COUNT_E2] = { | 760 | static const struct wreg_addr wreg_addrs_e2[WREGS_COUNT_E2] = { |
649 | { 0x1b0c00, 128, 2, read_reg_e2_0, RI_E2_OFFLINE } | 761 | { 0x1b0c00, 128, 2, read_reg_e2_0, RI_E2_OFFLINE } |
650 | }; | 762 | }; |
651 | 763 | ||
652 | static const struct dump_sign dump_sign_all = { 0x4d18b0a4, 0x60010, 0x3a }; | 764 | static const u32 read_reg_e3_0[] = { 0x1b1040, 0x1b1000 }; |
765 | #define WREGS_COUNT_E3 ARRAY_SIZE(read_reg_e3_0) | ||
766 | |||
767 | static const struct wreg_addr wreg_addrs_e3[WREGS_COUNT_E3] = { | ||
768 | { 0x1b0c00, 128, 2, read_reg_e3_0, RI_E3_OFFLINE } }; | ||
769 | |||
770 | static const struct dump_sign dump_sign_all = { 0x4dbe9fca, 0x60011, 0x3a }; | ||
653 | 771 | ||
654 | #define TIMER_REGS_COUNT_E1 2 | 772 | static const u32 timer_status_regs_e1[] = { 0x164014, 0x164018 }; |
773 | #define TIMER_REGS_COUNT_E1 ARRAY_SIZE(timer_status_regs_e1) | ||
655 | 774 | ||
656 | static const u32 timer_status_regs_e1[TIMER_REGS_COUNT_E1] = { | ||
657 | 0x164014, 0x164018 }; | ||
658 | static const u32 timer_scan_regs_e1[TIMER_REGS_COUNT_E1] = { | 775 | static const u32 timer_scan_regs_e1[TIMER_REGS_COUNT_E1] = { |
659 | 0x1640d0, 0x1640d4 }; | 776 | 0x1640d0, 0x1640d4 }; |
660 | 777 | ||
661 | #define TIMER_REGS_COUNT_E1H 2 | 778 | static const u32 timer_status_regs_e1h[] = { 0x164014, 0x164018 }; |
779 | #define TIMER_REGS_COUNT_E1H ARRAY_SIZE(timer_status_regs_e1h) | ||
662 | 780 | ||
663 | static const u32 timer_status_regs_e1h[TIMER_REGS_COUNT_E1H] = { | ||
664 | 0x164014, 0x164018 }; | ||
665 | static const u32 timer_scan_regs_e1h[TIMER_REGS_COUNT_E1H] = { | 781 | static const u32 timer_scan_regs_e1h[TIMER_REGS_COUNT_E1H] = { |
666 | 0x1640d0, 0x1640d4 }; | 782 | 0x1640d0, 0x1640d4 }; |
667 | 783 | ||
668 | #define TIMER_REGS_COUNT_E2 2 | 784 | static const u32 timer_status_regs_e2[] = { 0x164014, 0x164018 }; |
785 | #define TIMER_REGS_COUNT_E2 ARRAY_SIZE(timer_status_regs_e2) | ||
669 | 786 | ||
670 | static const u32 timer_status_regs_e2[TIMER_REGS_COUNT_E2] = { | ||
671 | 0x164014, 0x164018 }; | ||
672 | static const u32 timer_scan_regs_e2[TIMER_REGS_COUNT_E2] = { | 787 | static const u32 timer_scan_regs_e2[TIMER_REGS_COUNT_E2] = { |
673 | 0x1640d0, 0x1640d4 }; | 788 | 0x1640d0, 0x1640d4 }; |
674 | 789 | ||
790 | static const u32 timer_status_regs_e3[] = { 0x164014, 0x164018 }; | ||
791 | #define TIMER_REGS_COUNT_E3 ARRAY_SIZE(timer_status_regs_e3) | ||
792 | |||
793 | static const u32 timer_scan_regs_e3[TIMER_REGS_COUNT_E3] = { | ||
794 | 0x1640d0, 0x1640d4 }; | ||
795 | |||
675 | #define PAGE_MODE_VALUES_E1 0 | 796 | #define PAGE_MODE_VALUES_E1 0 |
676 | 797 | ||
677 | #define PAGE_READ_REGS_E1 0 | 798 | #define PAGE_READ_REGS_E1 0 |
@@ -682,7 +803,8 @@ static const u32 page_vals_e1[] = { 0 }; | |||
682 | 803 | ||
683 | static const u32 page_write_regs_e1[] = { 0 }; | 804 | static const u32 page_write_regs_e1[] = { 0 }; |
684 | 805 | ||
685 | static const struct reg_addr page_read_regs_e1[] = { { 0x0, 0, RI_E1_ONLINE } }; | 806 | static const struct reg_addr page_read_regs_e1[] = { |
807 | { 0x0, 0, RI_E1_ONLINE } }; | ||
686 | 808 | ||
687 | #define PAGE_MODE_VALUES_E1H 0 | 809 | #define PAGE_MODE_VALUES_E1H 0 |
688 | 810 | ||
@@ -697,17 +819,24 @@ static const u32 page_write_regs_e1h[] = { 0 }; | |||
697 | static const struct reg_addr page_read_regs_e1h[] = { | 819 | static const struct reg_addr page_read_regs_e1h[] = { |
698 | { 0x0, 0, RI_E1H_ONLINE } }; | 820 | { 0x0, 0, RI_E1H_ONLINE } }; |
699 | 821 | ||
700 | #define PAGE_MODE_VALUES_E2 2 | 822 | static const u32 page_vals_e2[] = { 0, 128 }; |
823 | #define PAGE_MODE_VALUES_E2 ARRAY_SIZE(page_vals_e2) | ||
701 | 824 | ||
702 | #define PAGE_READ_REGS_E2 1 | 825 | static const u32 page_write_regs_e2[] = { 328476 }; |
826 | #define PAGE_WRITE_REGS_E2 ARRAY_SIZE(page_write_regs_e2) | ||
703 | 827 | ||
704 | #define PAGE_WRITE_REGS_E2 1 | 828 | static const struct reg_addr page_read_regs_e2[] = { |
829 | { 0x58000, 4608, RI_E2_ONLINE } }; | ||
830 | #define PAGE_READ_REGS_E2 ARRAY_SIZE(page_read_regs_e2) | ||
705 | 831 | ||
706 | static const u32 page_vals_e2[PAGE_MODE_VALUES_E2] = { 0, 128 }; | 832 | static const u32 page_vals_e3[] = { 0, 128 }; |
833 | #define PAGE_MODE_VALUES_E3 ARRAY_SIZE(page_vals_e3) | ||
707 | 834 | ||
708 | static const u32 page_write_regs_e2[PAGE_WRITE_REGS_E2] = { 328476 }; | 835 | static const u32 page_write_regs_e3[] = { 328476 }; |
836 | #define PAGE_WRITE_REGS_E3 ARRAY_SIZE(page_write_regs_e3) | ||
709 | 837 | ||
710 | static const struct reg_addr page_read_regs_e2[PAGE_READ_REGS_E2] = { | 838 | static const struct reg_addr page_read_regs_e3[] = { |
711 | { 0x58000, 4608, RI_E2_ONLINE } }; | 839 | { 0x58000, 4608, RI_E3_ONLINE } }; |
840 | #define PAGE_READ_REGS_E3 ARRAY_SIZE(page_read_regs_e3) | ||
712 | 841 | ||
713 | #endif /* BNX2X_DUMP_H */ | 842 | #endif /* BNX2X_DUMP_H */ |
diff --git a/drivers/net/bnx2x/bnx2x_ethtool.c b/drivers/net/bnx2x/bnx2x_ethtool.c index 7a133052660a..1b4fa1d98005 100644 --- a/drivers/net/bnx2x/bnx2x_ethtool.c +++ b/drivers/net/bnx2x/bnx2x_ethtool.c | |||
@@ -38,8 +38,6 @@ static const struct { | |||
38 | char string[ETH_GSTRING_LEN]; | 38 | char string[ETH_GSTRING_LEN]; |
39 | } bnx2x_q_stats_arr[] = { | 39 | } bnx2x_q_stats_arr[] = { |
40 | /* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%s]: rx_bytes" }, | 40 | /* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%s]: rx_bytes" }, |
41 | { Q_STATS_OFFSET32(error_bytes_received_hi), | ||
42 | 8, "[%s]: rx_error_bytes" }, | ||
43 | { Q_STATS_OFFSET32(total_unicast_packets_received_hi), | 41 | { Q_STATS_OFFSET32(total_unicast_packets_received_hi), |
44 | 8, "[%s]: rx_ucast_packets" }, | 42 | 8, "[%s]: rx_ucast_packets" }, |
45 | { Q_STATS_OFFSET32(total_multicast_packets_received_hi), | 43 | { Q_STATS_OFFSET32(total_multicast_packets_received_hi), |
@@ -53,13 +51,18 @@ static const struct { | |||
53 | 4, "[%s]: rx_skb_alloc_discard" }, | 51 | 4, "[%s]: rx_skb_alloc_discard" }, |
54 | { Q_STATS_OFFSET32(hw_csum_err), 4, "[%s]: rx_csum_offload_errors" }, | 52 | { Q_STATS_OFFSET32(hw_csum_err), 4, "[%s]: rx_csum_offload_errors" }, |
55 | 53 | ||
56 | /* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%s]: tx_bytes" }, | 54 | { Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%s]: tx_bytes" }, |
57 | { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi), | 55 | /* 10 */{ Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi), |
58 | 8, "[%s]: tx_ucast_packets" }, | 56 | 8, "[%s]: tx_ucast_packets" }, |
59 | { Q_STATS_OFFSET32(total_multicast_packets_transmitted_hi), | 57 | { Q_STATS_OFFSET32(total_multicast_packets_transmitted_hi), |
60 | 8, "[%s]: tx_mcast_packets" }, | 58 | 8, "[%s]: tx_mcast_packets" }, |
61 | { Q_STATS_OFFSET32(total_broadcast_packets_transmitted_hi), | 59 | { Q_STATS_OFFSET32(total_broadcast_packets_transmitted_hi), |
62 | 8, "[%s]: tx_bcast_packets" } | 60 | 8, "[%s]: tx_bcast_packets" }, |
61 | { Q_STATS_OFFSET32(total_tpa_aggregations_hi), | ||
62 | 8, "[%s]: tpa_aggregations" }, | ||
63 | { Q_STATS_OFFSET32(total_tpa_aggregated_frames_hi), | ||
64 | 8, "[%s]: tpa_aggregated_frames"}, | ||
65 | { Q_STATS_OFFSET32(total_tpa_bytes_hi), 8, "[%s]: tpa_bytes"} | ||
63 | }; | 66 | }; |
64 | 67 | ||
65 | #define BNX2X_NUM_Q_STATS ARRAY_SIZE(bnx2x_q_stats_arr) | 68 | #define BNX2X_NUM_Q_STATS ARRAY_SIZE(bnx2x_q_stats_arr) |
@@ -99,8 +102,8 @@ static const struct { | |||
99 | 8, STATS_FLAGS_BOTH, "rx_discards" }, | 102 | 8, STATS_FLAGS_BOTH, "rx_discards" }, |
100 | { STATS_OFFSET32(mac_filter_discard), | 103 | { STATS_OFFSET32(mac_filter_discard), |
101 | 4, STATS_FLAGS_PORT, "rx_filtered_packets" }, | 104 | 4, STATS_FLAGS_PORT, "rx_filtered_packets" }, |
102 | { STATS_OFFSET32(xxoverflow_discard), | 105 | { STATS_OFFSET32(mf_tag_discard), |
103 | 4, STATS_FLAGS_PORT, "rx_fw_discards" }, | 106 | 4, STATS_FLAGS_PORT, "rx_mf_tag_discard" }, |
104 | { STATS_OFFSET32(brb_drop_hi), | 107 | { STATS_OFFSET32(brb_drop_hi), |
105 | 8, STATS_FLAGS_PORT, "rx_brb_discard" }, | 108 | 8, STATS_FLAGS_PORT, "rx_brb_discard" }, |
106 | { STATS_OFFSET32(brb_truncate_hi), | 109 | { STATS_OFFSET32(brb_truncate_hi), |
@@ -159,7 +162,13 @@ static const struct { | |||
159 | { STATS_OFFSET32(etherstatspktsover1522octets_hi), | 162 | { STATS_OFFSET32(etherstatspktsover1522octets_hi), |
160 | 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" }, | 163 | 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" }, |
161 | { STATS_OFFSET32(pause_frames_sent_hi), | 164 | { STATS_OFFSET32(pause_frames_sent_hi), |
162 | 8, STATS_FLAGS_PORT, "tx_pause_frames" } | 165 | 8, STATS_FLAGS_PORT, "tx_pause_frames" }, |
166 | { STATS_OFFSET32(total_tpa_aggregations_hi), | ||
167 | 8, STATS_FLAGS_FUNC, "tpa_aggregations" }, | ||
168 | { STATS_OFFSET32(total_tpa_aggregated_frames_hi), | ||
169 | 8, STATS_FLAGS_FUNC, "tpa_aggregated_frames"}, | ||
170 | { STATS_OFFSET32(total_tpa_bytes_hi), | ||
171 | 8, STATS_FLAGS_FUNC, "tpa_bytes"} | ||
163 | }; | 172 | }; |
164 | 173 | ||
165 | #define BNX2X_NUM_STATS ARRAY_SIZE(bnx2x_stats_arr) | 174 | #define BNX2X_NUM_STATS ARRAY_SIZE(bnx2x_stats_arr) |
@@ -517,7 +526,7 @@ static int bnx2x_get_regs_len(struct net_device *dev) | |||
517 | if (IS_E1H_ONLINE(wreg_addrs_e1h[i].info)) | 526 | if (IS_E1H_ONLINE(wreg_addrs_e1h[i].info)) |
518 | regdump_len += wreg_addrs_e1h[i].size * | 527 | regdump_len += wreg_addrs_e1h[i].size * |
519 | (1 + wreg_addrs_e1h[i].read_regs_count); | 528 | (1 + wreg_addrs_e1h[i].read_regs_count); |
520 | } else if (CHIP_IS_E2(bp)) { | 529 | } else if (!CHIP_IS_E1x(bp)) { |
521 | for (i = 0; i < REGS_COUNT; i++) | 530 | for (i = 0; i < REGS_COUNT; i++) |
522 | if (IS_E2_ONLINE(reg_addrs[i].info)) | 531 | if (IS_E2_ONLINE(reg_addrs[i].info)) |
523 | regdump_len += reg_addrs[i].size; | 532 | regdump_len += reg_addrs[i].size; |
@@ -589,7 +598,7 @@ static void bnx2x_get_regs(struct net_device *dev, | |||
589 | dump_hdr.info = RI_E1_ONLINE; | 598 | dump_hdr.info = RI_E1_ONLINE; |
590 | else if (CHIP_IS_E1H(bp)) | 599 | else if (CHIP_IS_E1H(bp)) |
591 | dump_hdr.info = RI_E1H_ONLINE; | 600 | dump_hdr.info = RI_E1H_ONLINE; |
592 | else if (CHIP_IS_E2(bp)) | 601 | else if (!CHIP_IS_E1x(bp)) |
593 | dump_hdr.info = RI_E2_ONLINE | | 602 | dump_hdr.info = RI_E2_ONLINE | |
594 | (BP_PATH(bp) ? RI_PATH1_DUMP : RI_PATH0_DUMP); | 603 | (BP_PATH(bp) ? RI_PATH1_DUMP : RI_PATH0_DUMP); |
595 | 604 | ||
@@ -610,14 +619,18 @@ static void bnx2x_get_regs(struct net_device *dev, | |||
610 | *p++ = REG_RD(bp, | 619 | *p++ = REG_RD(bp, |
611 | reg_addrs[i].addr + j*4); | 620 | reg_addrs[i].addr + j*4); |
612 | 621 | ||
613 | } else if (CHIP_IS_E2(bp)) { | 622 | } else if (!CHIP_IS_E1x(bp)) { |
614 | for (i = 0; i < REGS_COUNT; i++) | 623 | for (i = 0; i < REGS_COUNT; i++) |
615 | if (IS_E2_ONLINE(reg_addrs[i].info)) | 624 | if (IS_E2_ONLINE(reg_addrs[i].info)) |
616 | for (j = 0; j < reg_addrs[i].size; j++) | 625 | for (j = 0; j < reg_addrs[i].size; j++) |
617 | *p++ = REG_RD(bp, | 626 | *p++ = REG_RD(bp, |
618 | reg_addrs[i].addr + j*4); | 627 | reg_addrs[i].addr + j*4); |
619 | 628 | ||
620 | bnx2x_read_pages_regs_e2(bp, p); | 629 | if (CHIP_IS_E2(bp)) |
630 | bnx2x_read_pages_regs_e2(bp, p); | ||
631 | else | ||
632 | /* E3 paged registers read is unimplemented yet */ | ||
633 | WARN_ON(1); | ||
621 | } | 634 | } |
622 | /* Re-enable parity attentions */ | 635 | /* Re-enable parity attentions */ |
623 | bnx2x_clear_blocks_parity(bp); | 636 | bnx2x_clear_blocks_parity(bp); |
@@ -625,8 +638,6 @@ static void bnx2x_get_regs(struct net_device *dev, | |||
625 | bnx2x_enable_blocks_parity(bp); | 638 | bnx2x_enable_blocks_parity(bp); |
626 | } | 639 | } |
627 | 640 | ||
628 | #define PHY_FW_VER_LEN 20 | ||
629 | |||
630 | static void bnx2x_get_drvinfo(struct net_device *dev, | 641 | static void bnx2x_get_drvinfo(struct net_device *dev, |
631 | struct ethtool_drvinfo *info) | 642 | struct ethtool_drvinfo *info) |
632 | { | 643 | { |
@@ -1334,60 +1345,129 @@ static const struct { | |||
1334 | { "idle check (online)" } | 1345 | { "idle check (online)" } |
1335 | }; | 1346 | }; |
1336 | 1347 | ||
1348 | enum { | ||
1349 | BNX2X_CHIP_E1_OFST = 0, | ||
1350 | BNX2X_CHIP_E1H_OFST, | ||
1351 | BNX2X_CHIP_E2_OFST, | ||
1352 | BNX2X_CHIP_E3_OFST, | ||
1353 | BNX2X_CHIP_E3B0_OFST, | ||
1354 | BNX2X_CHIP_MAX_OFST | ||
1355 | }; | ||
1356 | |||
1357 | #define BNX2X_CHIP_MASK_E1 (1 << BNX2X_CHIP_E1_OFST) | ||
1358 | #define BNX2X_CHIP_MASK_E1H (1 << BNX2X_CHIP_E1H_OFST) | ||
1359 | #define BNX2X_CHIP_MASK_E2 (1 << BNX2X_CHIP_E2_OFST) | ||
1360 | #define BNX2X_CHIP_MASK_E3 (1 << BNX2X_CHIP_E3_OFST) | ||
1361 | #define BNX2X_CHIP_MASK_E3B0 (1 << BNX2X_CHIP_E3B0_OFST) | ||
1362 | |||
1363 | #define BNX2X_CHIP_MASK_ALL ((1 << BNX2X_CHIP_MAX_OFST) - 1) | ||
1364 | #define BNX2X_CHIP_MASK_E1X (BNX2X_CHIP_MASK_E1 | BNX2X_CHIP_MASK_E1H) | ||
1365 | |||
1337 | static int bnx2x_test_registers(struct bnx2x *bp) | 1366 | static int bnx2x_test_registers(struct bnx2x *bp) |
1338 | { | 1367 | { |
1339 | int idx, i, rc = -ENODEV; | 1368 | int idx, i, rc = -ENODEV; |
1340 | u32 wr_val = 0; | 1369 | u32 wr_val = 0, hw; |
1341 | int port = BP_PORT(bp); | 1370 | int port = BP_PORT(bp); |
1342 | static const struct { | 1371 | static const struct { |
1372 | u32 hw; | ||
1343 | u32 offset0; | 1373 | u32 offset0; |
1344 | u32 offset1; | 1374 | u32 offset1; |
1345 | u32 mask; | 1375 | u32 mask; |
1346 | } reg_tbl[] = { | 1376 | } reg_tbl[] = { |
1347 | /* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff }, | 1377 | /* 0 */ { BNX2X_CHIP_MASK_ALL, |
1348 | { DORQ_REG_DB_ADDR0, 4, 0xffffffff }, | 1378 | BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff }, |
1349 | { HC_REG_AGG_INT_0, 4, 0x000003ff }, | 1379 | { BNX2X_CHIP_MASK_ALL, |
1350 | { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 }, | 1380 | DORQ_REG_DB_ADDR0, 4, 0xffffffff }, |
1351 | { PBF_REG_P0_INIT_CRD, 4, 0x000007ff }, | 1381 | { BNX2X_CHIP_MASK_E1X, |
1352 | { PRS_REG_CID_PORT_0, 4, 0x00ffffff }, | 1382 | HC_REG_AGG_INT_0, 4, 0x000003ff }, |
1353 | { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff }, | 1383 | { BNX2X_CHIP_MASK_ALL, |
1354 | { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff }, | 1384 | PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 }, |
1355 | { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff }, | 1385 | { BNX2X_CHIP_MASK_E1X | BNX2X_CHIP_MASK_E2 | BNX2X_CHIP_MASK_E3, |
1356 | { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff }, | 1386 | PBF_REG_P0_INIT_CRD, 4, 0x000007ff }, |
1357 | /* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff }, | 1387 | { BNX2X_CHIP_MASK_E3B0, |
1358 | { QM_REG_CONNNUM_0, 4, 0x000fffff }, | 1388 | PBF_REG_INIT_CRD_Q0, 4, 0x000007ff }, |
1359 | { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff }, | 1389 | { BNX2X_CHIP_MASK_ALL, |
1360 | { SRC_REG_KEYRSS0_0, 40, 0xffffffff }, | 1390 | PRS_REG_CID_PORT_0, 4, 0x00ffffff }, |
1361 | { SRC_REG_KEYRSS0_7, 40, 0xffffffff }, | 1391 | { BNX2X_CHIP_MASK_ALL, |
1362 | { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 }, | 1392 | PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff }, |
1363 | { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 }, | 1393 | { BNX2X_CHIP_MASK_ALL, |
1364 | { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff }, | 1394 | PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff }, |
1365 | { NIG_REG_LLH0_T_BIT, 4, 0x00000001 }, | 1395 | { BNX2X_CHIP_MASK_ALL, |
1366 | { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 }, | 1396 | PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff }, |
1367 | /* 20 */ { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 }, | 1397 | /* 10 */ { BNX2X_CHIP_MASK_ALL, |
1368 | { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 }, | 1398 | PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff }, |
1369 | { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 }, | 1399 | { BNX2X_CHIP_MASK_ALL, |
1370 | { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 }, | 1400 | PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff }, |
1371 | { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff }, | 1401 | { BNX2X_CHIP_MASK_ALL, |
1372 | { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff }, | 1402 | QM_REG_CONNNUM_0, 4, 0x000fffff }, |
1373 | { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff }, | 1403 | { BNX2X_CHIP_MASK_ALL, |
1374 | { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff }, | 1404 | TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff }, |
1375 | { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 }, | 1405 | { BNX2X_CHIP_MASK_ALL, |
1376 | { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff }, | 1406 | SRC_REG_KEYRSS0_0, 40, 0xffffffff }, |
1377 | /* 30 */ { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff }, | 1407 | { BNX2X_CHIP_MASK_ALL, |
1378 | { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff }, | 1408 | SRC_REG_KEYRSS0_7, 40, 0xffffffff }, |
1379 | { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 }, | 1409 | { BNX2X_CHIP_MASK_ALL, |
1380 | { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 }, | 1410 | XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 }, |
1381 | { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff }, | 1411 | { BNX2X_CHIP_MASK_ALL, |
1382 | { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 }, | 1412 | XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 }, |
1383 | { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f }, | 1413 | { BNX2X_CHIP_MASK_ALL, |
1384 | 1414 | XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff }, | |
1385 | { 0xffffffff, 0, 0x00000000 } | 1415 | { BNX2X_CHIP_MASK_ALL, |
1416 | NIG_REG_LLH0_T_BIT, 4, 0x00000001 }, | ||
1417 | /* 20 */ { BNX2X_CHIP_MASK_E1X | BNX2X_CHIP_MASK_E2, | ||
1418 | NIG_REG_EMAC0_IN_EN, 4, 0x00000001 }, | ||
1419 | { BNX2X_CHIP_MASK_E1X | BNX2X_CHIP_MASK_E2, | ||
1420 | NIG_REG_BMAC0_IN_EN, 4, 0x00000001 }, | ||
1421 | { BNX2X_CHIP_MASK_ALL, | ||
1422 | NIG_REG_XCM0_OUT_EN, 4, 0x00000001 }, | ||
1423 | { BNX2X_CHIP_MASK_ALL, | ||
1424 | NIG_REG_BRB0_OUT_EN, 4, 0x00000001 }, | ||
1425 | { BNX2X_CHIP_MASK_ALL, | ||
1426 | NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 }, | ||
1427 | { BNX2X_CHIP_MASK_ALL, | ||
1428 | NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff }, | ||
1429 | { BNX2X_CHIP_MASK_ALL, | ||
1430 | NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff }, | ||
1431 | { BNX2X_CHIP_MASK_ALL, | ||
1432 | NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff }, | ||
1433 | { BNX2X_CHIP_MASK_ALL, | ||
1434 | NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff }, | ||
1435 | { BNX2X_CHIP_MASK_ALL, | ||
1436 | NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 }, | ||
1437 | /* 30 */ { BNX2X_CHIP_MASK_ALL, | ||
1438 | NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff }, | ||
1439 | { BNX2X_CHIP_MASK_ALL, | ||
1440 | NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff }, | ||
1441 | { BNX2X_CHIP_MASK_ALL, | ||
1442 | NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff }, | ||
1443 | { BNX2X_CHIP_MASK_E1X | BNX2X_CHIP_MASK_E2, | ||
1444 | NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 }, | ||
1445 | { BNX2X_CHIP_MASK_ALL, | ||
1446 | NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001}, | ||
1447 | { BNX2X_CHIP_MASK_ALL, | ||
1448 | NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff }, | ||
1449 | { BNX2X_CHIP_MASK_E1X | BNX2X_CHIP_MASK_E2, | ||
1450 | NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 }, | ||
1451 | { BNX2X_CHIP_MASK_E1X | BNX2X_CHIP_MASK_E2, | ||
1452 | NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f }, | ||
1453 | |||
1454 | { BNX2X_CHIP_MASK_ALL, 0xffffffff, 0, 0x00000000 } | ||
1386 | }; | 1455 | }; |
1387 | 1456 | ||
1388 | if (!netif_running(bp->dev)) | 1457 | if (!netif_running(bp->dev)) |
1389 | return rc; | 1458 | return rc; |
1390 | 1459 | ||
1460 | if (CHIP_IS_E1(bp)) | ||
1461 | hw = BNX2X_CHIP_MASK_E1; | ||
1462 | else if (CHIP_IS_E1H(bp)) | ||
1463 | hw = BNX2X_CHIP_MASK_E1H; | ||
1464 | else if (CHIP_IS_E2(bp)) | ||
1465 | hw = BNX2X_CHIP_MASK_E2; | ||
1466 | else if (CHIP_IS_E3B0(bp)) | ||
1467 | hw = BNX2X_CHIP_MASK_E3B0; | ||
1468 | else /* e3 A0 */ | ||
1469 | hw = BNX2X_CHIP_MASK_E3; | ||
1470 | |||
1391 | /* Repeat the test twice: | 1471 | /* Repeat the test twice: |
1392 | First by writing 0x00000000, second by writing 0xffffffff */ | 1472 | First by writing 0x00000000, second by writing 0xffffffff */ |
1393 | for (idx = 0; idx < 2; idx++) { | 1473 | for (idx = 0; idx < 2; idx++) { |
@@ -1403,8 +1483,7 @@ static int bnx2x_test_registers(struct bnx2x *bp) | |||
1403 | 1483 | ||
1404 | for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) { | 1484 | for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) { |
1405 | u32 offset, mask, save_val, val; | 1485 | u32 offset, mask, save_val, val; |
1406 | if (CHIP_IS_E2(bp) && | 1486 | if (!(hw & reg_tbl[i].hw)) |
1407 | reg_tbl[i].offset0 == HC_REG_AGG_INT_0) | ||
1408 | continue; | 1487 | continue; |
1409 | 1488 | ||
1410 | offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1; | 1489 | offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1; |
@@ -1421,7 +1500,7 @@ static int bnx2x_test_registers(struct bnx2x *bp) | |||
1421 | 1500 | ||
1422 | /* verify value is as expected */ | 1501 | /* verify value is as expected */ |
1423 | if ((val & mask) != (wr_val & mask)) { | 1502 | if ((val & mask) != (wr_val & mask)) { |
1424 | DP(NETIF_MSG_PROBE, | 1503 | DP(NETIF_MSG_HW, |
1425 | "offset 0x%x: val 0x%x != 0x%x mask 0x%x\n", | 1504 | "offset 0x%x: val 0x%x != 0x%x mask 0x%x\n", |
1426 | offset, val, wr_val, mask); | 1505 | offset, val, wr_val, mask); |
1427 | goto test_reg_exit; | 1506 | goto test_reg_exit; |
@@ -1438,7 +1517,7 @@ test_reg_exit: | |||
1438 | static int bnx2x_test_memory(struct bnx2x *bp) | 1517 | static int bnx2x_test_memory(struct bnx2x *bp) |
1439 | { | 1518 | { |
1440 | int i, j, rc = -ENODEV; | 1519 | int i, j, rc = -ENODEV; |
1441 | u32 val; | 1520 | u32 val, index; |
1442 | static const struct { | 1521 | static const struct { |
1443 | u32 offset; | 1522 | u32 offset; |
1444 | int size; | 1523 | int size; |
@@ -1453,32 +1532,44 @@ static int bnx2x_test_memory(struct bnx2x *bp) | |||
1453 | 1532 | ||
1454 | { 0xffffffff, 0 } | 1533 | { 0xffffffff, 0 } |
1455 | }; | 1534 | }; |
1535 | |||
1456 | static const struct { | 1536 | static const struct { |
1457 | char *name; | 1537 | char *name; |
1458 | u32 offset; | 1538 | u32 offset; |
1459 | u32 e1_mask; | 1539 | u32 hw_mask[BNX2X_CHIP_MAX_OFST]; |
1460 | u32 e1h_mask; | ||
1461 | u32 e2_mask; | ||
1462 | } prty_tbl[] = { | 1540 | } prty_tbl[] = { |
1463 | { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0, 0 }, | 1541 | { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, |
1464 | { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2, 0 }, | 1542 | {0x3ffc0, 0, 0, 0} }, |
1465 | { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0, 0 }, | 1543 | { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, |
1466 | { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0, 0 }, | 1544 | {0x2, 0x2, 0, 0} }, |
1467 | { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0, 0 }, | 1545 | { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, |
1468 | { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0, 0 }, | 1546 | {0, 0, 0, 0} }, |
1469 | 1547 | { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, | |
1470 | { NULL, 0xffffffff, 0, 0, 0 } | 1548 | {0x3ffc0, 0, 0, 0} }, |
1549 | { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, | ||
1550 | {0x3ffc0, 0, 0, 0} }, | ||
1551 | { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, | ||
1552 | {0x3ffc1, 0, 0, 0} }, | ||
1553 | |||
1554 | { NULL, 0xffffffff, {0, 0, 0, 0} } | ||
1471 | }; | 1555 | }; |
1472 | 1556 | ||
1473 | if (!netif_running(bp->dev)) | 1557 | if (!netif_running(bp->dev)) |
1474 | return rc; | 1558 | return rc; |
1475 | 1559 | ||
1560 | if (CHIP_IS_E1(bp)) | ||
1561 | index = BNX2X_CHIP_E1_OFST; | ||
1562 | else if (CHIP_IS_E1H(bp)) | ||
1563 | index = BNX2X_CHIP_E1H_OFST; | ||
1564 | else if (CHIP_IS_E2(bp)) | ||
1565 | index = BNX2X_CHIP_E2_OFST; | ||
1566 | else /* e3 */ | ||
1567 | index = BNX2X_CHIP_E3_OFST; | ||
1568 | |||
1476 | /* pre-Check the parity status */ | 1569 | /* pre-Check the parity status */ |
1477 | for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) { | 1570 | for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) { |
1478 | val = REG_RD(bp, prty_tbl[i].offset); | 1571 | val = REG_RD(bp, prty_tbl[i].offset); |
1479 | if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) || | 1572 | if (val & ~(prty_tbl[i].hw_mask[index])) { |
1480 | (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask))) || | ||
1481 | (CHIP_IS_E2(bp) && (val & ~(prty_tbl[i].e2_mask)))) { | ||
1482 | DP(NETIF_MSG_HW, | 1573 | DP(NETIF_MSG_HW, |
1483 | "%s is 0x%x\n", prty_tbl[i].name, val); | 1574 | "%s is 0x%x\n", prty_tbl[i].name, val); |
1484 | goto test_mem_exit; | 1575 | goto test_mem_exit; |
@@ -1493,9 +1584,7 @@ static int bnx2x_test_memory(struct bnx2x *bp) | |||
1493 | /* Check the parity status */ | 1584 | /* Check the parity status */ |
1494 | for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) { | 1585 | for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) { |
1495 | val = REG_RD(bp, prty_tbl[i].offset); | 1586 | val = REG_RD(bp, prty_tbl[i].offset); |
1496 | if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) || | 1587 | if (val & ~(prty_tbl[i].hw_mask[index])) { |
1497 | (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask))) || | ||
1498 | (CHIP_IS_E2(bp) && (val & ~(prty_tbl[i].e2_mask)))) { | ||
1499 | DP(NETIF_MSG_HW, | 1588 | DP(NETIF_MSG_HW, |
1500 | "%s is 0x%x\n", prty_tbl[i].name, val); | 1589 | "%s is 0x%x\n", prty_tbl[i].name, val); |
1501 | goto test_mem_exit; | 1590 | goto test_mem_exit; |
@@ -1512,12 +1601,16 @@ static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up, u8 is_serdes) | |||
1512 | { | 1601 | { |
1513 | int cnt = 1400; | 1602 | int cnt = 1400; |
1514 | 1603 | ||
1515 | if (link_up) | 1604 | if (link_up) { |
1516 | while (bnx2x_link_test(bp, is_serdes) && cnt--) | 1605 | while (bnx2x_link_test(bp, is_serdes) && cnt--) |
1517 | msleep(10); | 1606 | msleep(20); |
1607 | |||
1608 | if (cnt <= 0 && bnx2x_link_test(bp, is_serdes)) | ||
1609 | DP(NETIF_MSG_LINK, "Timeout waiting for link up\n"); | ||
1610 | } | ||
1518 | } | 1611 | } |
1519 | 1612 | ||
1520 | static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up) | 1613 | static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode) |
1521 | { | 1614 | { |
1522 | unsigned int pkt_size, num_pkts, i; | 1615 | unsigned int pkt_size, num_pkts, i; |
1523 | struct sk_buff *skb; | 1616 | struct sk_buff *skb; |
@@ -1526,14 +1619,14 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up) | |||
1526 | struct bnx2x_fastpath *fp_tx = &bp->fp[0]; | 1619 | struct bnx2x_fastpath *fp_tx = &bp->fp[0]; |
1527 | u16 tx_start_idx, tx_idx; | 1620 | u16 tx_start_idx, tx_idx; |
1528 | u16 rx_start_idx, rx_idx; | 1621 | u16 rx_start_idx, rx_idx; |
1529 | u16 pkt_prod, bd_prod; | 1622 | u16 pkt_prod, bd_prod, rx_comp_cons; |
1530 | struct sw_tx_bd *tx_buf; | 1623 | struct sw_tx_bd *tx_buf; |
1531 | struct eth_tx_start_bd *tx_start_bd; | 1624 | struct eth_tx_start_bd *tx_start_bd; |
1532 | struct eth_tx_parse_bd_e1x *pbd_e1x = NULL; | 1625 | struct eth_tx_parse_bd_e1x *pbd_e1x = NULL; |
1533 | struct eth_tx_parse_bd_e2 *pbd_e2 = NULL; | 1626 | struct eth_tx_parse_bd_e2 *pbd_e2 = NULL; |
1534 | dma_addr_t mapping; | 1627 | dma_addr_t mapping; |
1535 | union eth_rx_cqe *cqe; | 1628 | union eth_rx_cqe *cqe; |
1536 | u8 cqe_fp_flags; | 1629 | u8 cqe_fp_flags, cqe_fp_type; |
1537 | struct sw_rx_bd *rx_buf; | 1630 | struct sw_rx_bd *rx_buf; |
1538 | u16 len; | 1631 | u16 len; |
1539 | int rc = -ENODEV; | 1632 | int rc = -ENODEV; |
@@ -1545,7 +1638,8 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up) | |||
1545 | return -EINVAL; | 1638 | return -EINVAL; |
1546 | break; | 1639 | break; |
1547 | case BNX2X_MAC_LOOPBACK: | 1640 | case BNX2X_MAC_LOOPBACK: |
1548 | bp->link_params.loopback_mode = LOOPBACK_BMAC; | 1641 | bp->link_params.loopback_mode = CHIP_IS_E3(bp) ? |
1642 | LOOPBACK_XMAC : LOOPBACK_BMAC; | ||
1549 | bnx2x_phy_init(&bp->link_params, &bp->link_vars); | 1643 | bnx2x_phy_init(&bp->link_params, &bp->link_vars); |
1550 | break; | 1644 | break; |
1551 | default: | 1645 | default: |
@@ -1566,6 +1660,14 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up) | |||
1566 | memset(packet + 2*ETH_ALEN, 0x77, (ETH_HLEN - 2*ETH_ALEN)); | 1660 | memset(packet + 2*ETH_ALEN, 0x77, (ETH_HLEN - 2*ETH_ALEN)); |
1567 | for (i = ETH_HLEN; i < pkt_size; i++) | 1661 | for (i = ETH_HLEN; i < pkt_size; i++) |
1568 | packet[i] = (unsigned char) (i & 0xff); | 1662 | packet[i] = (unsigned char) (i & 0xff); |
1663 | mapping = dma_map_single(&bp->pdev->dev, skb->data, | ||
1664 | skb_headlen(skb), DMA_TO_DEVICE); | ||
1665 | if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { | ||
1666 | rc = -ENOMEM; | ||
1667 | dev_kfree_skb(skb); | ||
1668 | BNX2X_ERR("Unable to map SKB\n"); | ||
1669 | goto test_loopback_exit; | ||
1670 | } | ||
1569 | 1671 | ||
1570 | /* send the loopback packet */ | 1672 | /* send the loopback packet */ |
1571 | num_pkts = 0; | 1673 | num_pkts = 0; |
@@ -1580,8 +1682,6 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up) | |||
1580 | 1682 | ||
1581 | bd_prod = TX_BD(fp_tx->tx_bd_prod); | 1683 | bd_prod = TX_BD(fp_tx->tx_bd_prod); |
1582 | tx_start_bd = &fp_tx->tx_desc_ring[bd_prod].start_bd; | 1684 | tx_start_bd = &fp_tx->tx_desc_ring[bd_prod].start_bd; |
1583 | mapping = dma_map_single(&bp->pdev->dev, skb->data, | ||
1584 | skb_headlen(skb), DMA_TO_DEVICE); | ||
1585 | tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); | 1685 | tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); |
1586 | tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); | 1686 | tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); |
1587 | tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */ | 1687 | tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */ |
@@ -1611,6 +1711,7 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up) | |||
1611 | DOORBELL(bp, fp_tx->index, fp_tx->tx_db.raw); | 1711 | DOORBELL(bp, fp_tx->index, fp_tx->tx_db.raw); |
1612 | 1712 | ||
1613 | mmiowb(); | 1713 | mmiowb(); |
1714 | barrier(); | ||
1614 | 1715 | ||
1615 | num_pkts++; | 1716 | num_pkts++; |
1616 | fp_tx->tx_bd_prod += 2; /* start + pbd */ | 1717 | fp_tx->tx_bd_prod += 2; /* start + pbd */ |
@@ -1639,9 +1740,11 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up) | |||
1639 | if (rx_idx != rx_start_idx + num_pkts) | 1740 | if (rx_idx != rx_start_idx + num_pkts) |
1640 | goto test_loopback_exit; | 1741 | goto test_loopback_exit; |
1641 | 1742 | ||
1642 | cqe = &fp_rx->rx_comp_ring[RCQ_BD(fp_rx->rx_comp_cons)]; | 1743 | rx_comp_cons = le16_to_cpu(fp_rx->rx_comp_cons); |
1744 | cqe = &fp_rx->rx_comp_ring[RCQ_BD(rx_comp_cons)]; | ||
1643 | cqe_fp_flags = cqe->fast_path_cqe.type_error_flags; | 1745 | cqe_fp_flags = cqe->fast_path_cqe.type_error_flags; |
1644 | if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS)) | 1746 | cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE; |
1747 | if (!CQE_TYPE_FAST(cqe_fp_type) || (cqe_fp_flags & ETH_RX_ERROR_FALGS)) | ||
1645 | goto test_loopback_rx_exit; | 1748 | goto test_loopback_rx_exit; |
1646 | 1749 | ||
1647 | len = le16_to_cpu(cqe->fast_path_cqe.pkt_len); | 1750 | len = le16_to_cpu(cqe->fast_path_cqe.pkt_len); |
@@ -1649,6 +1752,9 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up) | |||
1649 | goto test_loopback_rx_exit; | 1752 | goto test_loopback_rx_exit; |
1650 | 1753 | ||
1651 | rx_buf = &fp_rx->rx_buf_ring[RX_BD(fp_rx->rx_bd_cons)]; | 1754 | rx_buf = &fp_rx->rx_buf_ring[RX_BD(fp_rx->rx_bd_cons)]; |
1755 | dma_sync_single_for_device(&bp->pdev->dev, | ||
1756 | dma_unmap_addr(rx_buf, mapping), | ||
1757 | fp_rx->rx_buf_size, DMA_FROM_DEVICE); | ||
1652 | skb = rx_buf->skb; | 1758 | skb = rx_buf->skb; |
1653 | skb_reserve(skb, cqe->fast_path_cqe.placement_offset); | 1759 | skb_reserve(skb, cqe->fast_path_cqe.placement_offset); |
1654 | for (i = ETH_HLEN; i < pkt_size; i++) | 1760 | for (i = ETH_HLEN; i < pkt_size; i++) |
@@ -1674,7 +1780,7 @@ test_loopback_exit: | |||
1674 | return rc; | 1780 | return rc; |
1675 | } | 1781 | } |
1676 | 1782 | ||
1677 | static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up) | 1783 | static int bnx2x_test_loopback(struct bnx2x *bp) |
1678 | { | 1784 | { |
1679 | int rc = 0, res; | 1785 | int rc = 0, res; |
1680 | 1786 | ||
@@ -1687,13 +1793,13 @@ static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up) | |||
1687 | bnx2x_netif_stop(bp, 1); | 1793 | bnx2x_netif_stop(bp, 1); |
1688 | bnx2x_acquire_phy_lock(bp); | 1794 | bnx2x_acquire_phy_lock(bp); |
1689 | 1795 | ||
1690 | res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up); | 1796 | res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK); |
1691 | if (res) { | 1797 | if (res) { |
1692 | DP(NETIF_MSG_PROBE, " PHY loopback failed (res %d)\n", res); | 1798 | DP(NETIF_MSG_PROBE, " PHY loopback failed (res %d)\n", res); |
1693 | rc |= BNX2X_PHY_LOOPBACK_FAILED; | 1799 | rc |= BNX2X_PHY_LOOPBACK_FAILED; |
1694 | } | 1800 | } |
1695 | 1801 | ||
1696 | res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up); | 1802 | res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK); |
1697 | if (res) { | 1803 | if (res) { |
1698 | DP(NETIF_MSG_PROBE, " MAC loopback failed (res %d)\n", res); | 1804 | DP(NETIF_MSG_PROBE, " MAC loopback failed (res %d)\n", res); |
1699 | rc |= BNX2X_MAC_LOOPBACK_FAILED; | 1805 | rc |= BNX2X_MAC_LOOPBACK_FAILED; |
@@ -1765,39 +1871,20 @@ test_nvram_exit: | |||
1765 | return rc; | 1871 | return rc; |
1766 | } | 1872 | } |
1767 | 1873 | ||
1874 | /* Send an EMPTY ramrod on the first queue */ | ||
1768 | static int bnx2x_test_intr(struct bnx2x *bp) | 1875 | static int bnx2x_test_intr(struct bnx2x *bp) |
1769 | { | 1876 | { |
1770 | struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config); | 1877 | struct bnx2x_queue_state_params params = {0}; |
1771 | int i, rc; | ||
1772 | 1878 | ||
1773 | if (!netif_running(bp->dev)) | 1879 | if (!netif_running(bp->dev)) |
1774 | return -ENODEV; | 1880 | return -ENODEV; |
1775 | 1881 | ||
1776 | config->hdr.length = 0; | 1882 | params.q_obj = &bp->fp->q_obj; |
1777 | if (CHIP_IS_E1(bp)) | 1883 | params.cmd = BNX2X_Q_CMD_EMPTY; |
1778 | config->hdr.offset = (BP_PORT(bp) ? 32 : 0); | ||
1779 | else | ||
1780 | config->hdr.offset = BP_FUNC(bp); | ||
1781 | config->hdr.client_id = bp->fp->cl_id; | ||
1782 | config->hdr.reserved1 = 0; | ||
1783 | |||
1784 | bp->set_mac_pending = 1; | ||
1785 | smp_wmb(); | ||
1786 | rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0, | ||
1787 | U64_HI(bnx2x_sp_mapping(bp, mac_config)), | ||
1788 | U64_LO(bnx2x_sp_mapping(bp, mac_config)), 1); | ||
1789 | if (rc == 0) { | ||
1790 | for (i = 0; i < 10; i++) { | ||
1791 | if (!bp->set_mac_pending) | ||
1792 | break; | ||
1793 | smp_rmb(); | ||
1794 | msleep_interruptible(10); | ||
1795 | } | ||
1796 | if (i == 10) | ||
1797 | rc = -ENODEV; | ||
1798 | } | ||
1799 | 1884 | ||
1800 | return rc; | 1885 | __set_bit(RAMROD_COMP_WAIT, ¶ms.ramrod_flags); |
1886 | |||
1887 | return bnx2x_queue_state_change(bp, ¶ms); | ||
1801 | } | 1888 | } |
1802 | 1889 | ||
1803 | static void bnx2x_self_test(struct net_device *dev, | 1890 | static void bnx2x_self_test(struct net_device *dev, |
@@ -1836,7 +1923,7 @@ static void bnx2x_self_test(struct net_device *dev, | |||
1836 | bnx2x_nic_unload(bp, UNLOAD_NORMAL); | 1923 | bnx2x_nic_unload(bp, UNLOAD_NORMAL); |
1837 | bnx2x_nic_load(bp, LOAD_DIAG); | 1924 | bnx2x_nic_load(bp, LOAD_DIAG); |
1838 | /* wait until link state is restored */ | 1925 | /* wait until link state is restored */ |
1839 | bnx2x_wait_for_link(bp, link_up, is_serdes); | 1926 | bnx2x_wait_for_link(bp, 1, is_serdes); |
1840 | 1927 | ||
1841 | if (bnx2x_test_registers(bp) != 0) { | 1928 | if (bnx2x_test_registers(bp) != 0) { |
1842 | buf[0] = 1; | 1929 | buf[0] = 1; |
@@ -1847,7 +1934,7 @@ static void bnx2x_self_test(struct net_device *dev, | |||
1847 | etest->flags |= ETH_TEST_FL_FAILED; | 1934 | etest->flags |= ETH_TEST_FL_FAILED; |
1848 | } | 1935 | } |
1849 | 1936 | ||
1850 | buf[2] = bnx2x_test_loopback(bp, link_up); | 1937 | buf[2] = bnx2x_test_loopback(bp); |
1851 | if (buf[2] != 0) | 1938 | if (buf[2] != 0) |
1852 | etest->flags |= ETH_TEST_FL_FAILED; | 1939 | etest->flags |= ETH_TEST_FL_FAILED; |
1853 | 1940 | ||
@@ -1885,6 +1972,14 @@ static void bnx2x_self_test(struct net_device *dev, | |||
1885 | #define IS_MF_MODE_STAT(bp) \ | 1972 | #define IS_MF_MODE_STAT(bp) \ |
1886 | (IS_MF(bp) && !(bp->msg_enable & BNX2X_MSG_STATS)) | 1973 | (IS_MF(bp) && !(bp->msg_enable & BNX2X_MSG_STATS)) |
1887 | 1974 | ||
1975 | /* ethtool statistics are displayed for all regular ethernet queues and the | ||
1976 | * fcoe L2 queue if not disabled | ||
1977 | */ | ||
1978 | static inline int bnx2x_num_stat_queues(struct bnx2x *bp) | ||
1979 | { | ||
1980 | return BNX2X_NUM_ETH_QUEUES(bp); | ||
1981 | } | ||
1982 | |||
1888 | static int bnx2x_get_sset_count(struct net_device *dev, int stringset) | 1983 | static int bnx2x_get_sset_count(struct net_device *dev, int stringset) |
1889 | { | 1984 | { |
1890 | struct bnx2x *bp = netdev_priv(dev); | 1985 | struct bnx2x *bp = netdev_priv(dev); |
@@ -1893,7 +1988,7 @@ static int bnx2x_get_sset_count(struct net_device *dev, int stringset) | |||
1893 | switch (stringset) { | 1988 | switch (stringset) { |
1894 | case ETH_SS_STATS: | 1989 | case ETH_SS_STATS: |
1895 | if (is_multi(bp)) { | 1990 | if (is_multi(bp)) { |
1896 | num_stats = BNX2X_NUM_STAT_QUEUES(bp) * | 1991 | num_stats = bnx2x_num_stat_queues(bp) * |
1897 | BNX2X_NUM_Q_STATS; | 1992 | BNX2X_NUM_Q_STATS; |
1898 | if (!IS_MF_MODE_STAT(bp)) | 1993 | if (!IS_MF_MODE_STAT(bp)) |
1899 | num_stats += BNX2X_NUM_STATS; | 1994 | num_stats += BNX2X_NUM_STATS; |
@@ -1926,14 +2021,9 @@ static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf) | |||
1926 | case ETH_SS_STATS: | 2021 | case ETH_SS_STATS: |
1927 | if (is_multi(bp)) { | 2022 | if (is_multi(bp)) { |
1928 | k = 0; | 2023 | k = 0; |
1929 | for_each_napi_queue(bp, i) { | 2024 | for_each_eth_queue(bp, i) { |
1930 | memset(queue_name, 0, sizeof(queue_name)); | 2025 | memset(queue_name, 0, sizeof(queue_name)); |
1931 | 2026 | sprintf(queue_name, "%d", i); | |
1932 | if (IS_FCOE_IDX(i)) | ||
1933 | sprintf(queue_name, "fcoe"); | ||
1934 | else | ||
1935 | sprintf(queue_name, "%d", i); | ||
1936 | |||
1937 | for (j = 0; j < BNX2X_NUM_Q_STATS; j++) | 2027 | for (j = 0; j < BNX2X_NUM_Q_STATS; j++) |
1938 | snprintf(buf + (k + j)*ETH_GSTRING_LEN, | 2028 | snprintf(buf + (k + j)*ETH_GSTRING_LEN, |
1939 | ETH_GSTRING_LEN, | 2029 | ETH_GSTRING_LEN, |
@@ -1972,7 +2062,7 @@ static void bnx2x_get_ethtool_stats(struct net_device *dev, | |||
1972 | 2062 | ||
1973 | if (is_multi(bp)) { | 2063 | if (is_multi(bp)) { |
1974 | k = 0; | 2064 | k = 0; |
1975 | for_each_napi_queue(bp, i) { | 2065 | for_each_eth_queue(bp, i) { |
1976 | hw_stats = (u32 *)&bp->fp[i].eth_q_stats; | 2066 | hw_stats = (u32 *)&bp->fp[i].eth_q_stats; |
1977 | for (j = 0; j < BNX2X_NUM_Q_STATS; j++) { | 2067 | for (j = 0; j < BNX2X_NUM_Q_STATS; j++) { |
1978 | if (bnx2x_q_stats_arr[j].size == 0) { | 2068 | if (bnx2x_q_stats_arr[j].size == 0) { |
@@ -2090,14 +2180,30 @@ static int bnx2x_get_rxfh_indir(struct net_device *dev, | |||
2090 | { | 2180 | { |
2091 | struct bnx2x *bp = netdev_priv(dev); | 2181 | struct bnx2x *bp = netdev_priv(dev); |
2092 | size_t copy_size = | 2182 | size_t copy_size = |
2093 | min_t(size_t, indir->size, TSTORM_INDIRECTION_TABLE_SIZE); | 2183 | min_t(size_t, indir->size, T_ETH_INDIRECTION_TABLE_SIZE); |
2184 | u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE] = {0}; | ||
2185 | size_t i; | ||
2094 | 2186 | ||
2095 | if (bp->multi_mode == ETH_RSS_MODE_DISABLED) | 2187 | if (bp->multi_mode == ETH_RSS_MODE_DISABLED) |
2096 | return -EOPNOTSUPP; | 2188 | return -EOPNOTSUPP; |
2097 | 2189 | ||
2098 | indir->size = TSTORM_INDIRECTION_TABLE_SIZE; | 2190 | /* Get the current configuration of the RSS indirection table */ |
2099 | memcpy(indir->ring_index, bp->rx_indir_table, | 2191 | bnx2x_get_rss_ind_table(&bp->rss_conf_obj, ind_table); |
2100 | copy_size * sizeof(bp->rx_indir_table[0])); | 2192 | |
2193 | /* | ||
2194 | * We can't use a memcpy() as an internal storage of an | ||
2195 | * indirection table is a u8 array while indir->ring_index | ||
2196 | * points to an array of u32. | ||
2197 | * | ||
2198 | * Indirection table contains the FW Client IDs, so we need to | ||
2199 | * align the returned table to the Client ID of the leading RSS | ||
2200 | * queue. | ||
2201 | */ | ||
2202 | for (i = 0; i < copy_size; i++) | ||
2203 | indir->ring_index[i] = ind_table[i] - bp->fp->cl_id; | ||
2204 | |||
2205 | indir->size = T_ETH_INDIRECTION_TABLE_SIZE; | ||
2206 | |||
2101 | return 0; | 2207 | return 0; |
2102 | } | 2208 | } |
2103 | 2209 | ||
@@ -2106,21 +2212,33 @@ static int bnx2x_set_rxfh_indir(struct net_device *dev, | |||
2106 | { | 2212 | { |
2107 | struct bnx2x *bp = netdev_priv(dev); | 2213 | struct bnx2x *bp = netdev_priv(dev); |
2108 | size_t i; | 2214 | size_t i; |
2215 | u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE] = {0}; | ||
2216 | u32 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp); | ||
2109 | 2217 | ||
2110 | if (bp->multi_mode == ETH_RSS_MODE_DISABLED) | 2218 | if (bp->multi_mode == ETH_RSS_MODE_DISABLED) |
2111 | return -EOPNOTSUPP; | 2219 | return -EOPNOTSUPP; |
2112 | 2220 | ||
2113 | /* Validate size and indices */ | 2221 | /* validate the size */ |
2114 | if (indir->size != TSTORM_INDIRECTION_TABLE_SIZE) | 2222 | if (indir->size != T_ETH_INDIRECTION_TABLE_SIZE) |
2115 | return -EINVAL; | 2223 | return -EINVAL; |
2116 | for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++) | 2224 | |
2117 | if (indir->ring_index[i] >= BNX2X_NUM_ETH_QUEUES(bp)) | 2225 | for (i = 0; i < T_ETH_INDIRECTION_TABLE_SIZE; i++) { |
2226 | /* validate the indices */ | ||
2227 | if (indir->ring_index[i] >= num_eth_queues) | ||
2118 | return -EINVAL; | 2228 | return -EINVAL; |
2229 | /* | ||
2230 | * The same as in bnx2x_get_rxfh_indir: we can't use a memcpy() | ||
2231 | * as an internal storage of an indirection table is a u8 array | ||
2232 | * while indir->ring_index points to an array of u32. | ||
2233 | * | ||
2234 | * Indirection table contains the FW Client IDs, so we need to | ||
2235 | * align the received table to the Client ID of the leading RSS | ||
2236 | * queue | ||
2237 | */ | ||
2238 | ind_table[i] = indir->ring_index[i] + bp->fp->cl_id; | ||
2239 | } | ||
2119 | 2240 | ||
2120 | memcpy(bp->rx_indir_table, indir->ring_index, | 2241 | return bnx2x_config_rss_pf(bp, ind_table, false); |
2121 | indir->size * sizeof(bp->rx_indir_table[0])); | ||
2122 | bnx2x_push_indir_table(bp); | ||
2123 | return 0; | ||
2124 | } | 2242 | } |
2125 | 2243 | ||
2126 | static const struct ethtool_ops bnx2x_ethtool_ops = { | 2244 | static const struct ethtool_ops bnx2x_ethtool_ops = { |
diff --git a/drivers/net/bnx2x/bnx2x_fw_defs.h b/drivers/net/bnx2x/bnx2x_fw_defs.h index 9fe367836a57..998652a1b858 100644 --- a/drivers/net/bnx2x/bnx2x_fw_defs.h +++ b/drivers/net/bnx2x/bnx2x_fw_defs.h | |||
@@ -10,249 +10,221 @@ | |||
10 | #ifndef BNX2X_FW_DEFS_H | 10 | #ifndef BNX2X_FW_DEFS_H |
11 | #define BNX2X_FW_DEFS_H | 11 | #define BNX2X_FW_DEFS_H |
12 | 12 | ||
13 | #define CSTORM_ASSERT_LIST_INDEX_OFFSET (IRO[142].base) | 13 | #define CSTORM_ASSERT_LIST_INDEX_OFFSET (IRO[148].base) |
14 | #define CSTORM_ASSERT_LIST_OFFSET(assertListEntry) \ | 14 | #define CSTORM_ASSERT_LIST_OFFSET(assertListEntry) \ |
15 | (IRO[141].base + ((assertListEntry) * IRO[141].m1)) | 15 | (IRO[147].base + ((assertListEntry) * IRO[147].m1)) |
16 | #define CSTORM_ETH_STATS_QUERY_ADDR_OFFSET(pfId) \ | ||
17 | (IRO[144].base + ((pfId) * IRO[144].m1)) | ||
18 | #define CSTORM_EVENT_RING_DATA_OFFSET(pfId) \ | 16 | #define CSTORM_EVENT_RING_DATA_OFFSET(pfId) \ |
19 | (IRO[149].base + (((pfId)>>1) * IRO[149].m1) + (((pfId)&1) * \ | 17 | (IRO[153].base + (((pfId)>>1) * IRO[153].m1) + (((pfId)&1) * \ |
20 | IRO[149].m2)) | 18 | IRO[153].m2)) |
21 | #define CSTORM_EVENT_RING_PROD_OFFSET(pfId) \ | 19 | #define CSTORM_EVENT_RING_PROD_OFFSET(pfId) \ |
22 | (IRO[150].base + (((pfId)>>1) * IRO[150].m1) + (((pfId)&1) * \ | 20 | (IRO[154].base + (((pfId)>>1) * IRO[154].m1) + (((pfId)&1) * \ |
23 | IRO[150].m2)) | 21 | IRO[154].m2)) |
24 | #define CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(funcId) \ | 22 | #define CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(funcId) \ |
25 | (IRO[156].base + ((funcId) * IRO[156].m1)) | 23 | (IRO[159].base + ((funcId) * IRO[159].m1)) |
26 | #define CSTORM_FUNC_EN_OFFSET(funcId) \ | 24 | #define CSTORM_FUNC_EN_OFFSET(funcId) \ |
27 | (IRO[146].base + ((funcId) * IRO[146].m1)) | 25 | (IRO[149].base + ((funcId) * IRO[149].m1)) |
28 | #define CSTORM_FUNCTION_MODE_OFFSET (IRO[153].base) | 26 | #define CSTORM_IGU_MODE_OFFSET (IRO[157].base) |
29 | #define CSTORM_IGU_MODE_OFFSET (IRO[154].base) | ||
30 | #define CSTORM_ISCSI_CQ_SIZE_OFFSET(pfId) \ | 27 | #define CSTORM_ISCSI_CQ_SIZE_OFFSET(pfId) \ |
31 | (IRO[311].base + ((pfId) * IRO[311].m1)) | 28 | (IRO[315].base + ((pfId) * IRO[315].m1)) |
32 | #define CSTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfId) \ | 29 | #define CSTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfId) \ |
33 | (IRO[312].base + ((pfId) * IRO[312].m1)) | 30 | (IRO[316].base + ((pfId) * IRO[316].m1)) |
34 | #define CSTORM_ISCSI_EQ_CONS_OFFSET(pfId, iscsiEqId) \ | 31 | #define CSTORM_ISCSI_EQ_CONS_OFFSET(pfId, iscsiEqId) \ |
35 | (IRO[304].base + ((pfId) * IRO[304].m1) + ((iscsiEqId) * \ | 32 | (IRO[308].base + ((pfId) * IRO[308].m1) + ((iscsiEqId) * IRO[308].m2)) |
36 | IRO[304].m2)) | 33 | #define CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfId, iscsiEqId) \ |
37 | #define CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfId, iscsiEqId) \ | 34 | (IRO[310].base + ((pfId) * IRO[310].m1) + ((iscsiEqId) * IRO[310].m2)) |
38 | (IRO[306].base + ((pfId) * IRO[306].m1) + ((iscsiEqId) * \ | 35 | #define CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfId, iscsiEqId) \ |
39 | IRO[306].m2)) | 36 | (IRO[309].base + ((pfId) * IRO[309].m1) + ((iscsiEqId) * IRO[309].m2)) |
40 | #define CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfId, iscsiEqId) \ | 37 | #define CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_VALID_OFFSET(pfId, iscsiEqId) \ |
41 | (IRO[305].base + ((pfId) * IRO[305].m1) + ((iscsiEqId) * \ | 38 | (IRO[311].base + ((pfId) * IRO[311].m1) + ((iscsiEqId) * IRO[311].m2)) |
42 | IRO[305].m2)) | 39 | #define CSTORM_ISCSI_EQ_PROD_OFFSET(pfId, iscsiEqId) \ |
43 | #define \ | 40 | (IRO[307].base + ((pfId) * IRO[307].m1) + ((iscsiEqId) * IRO[307].m2)) |
44 | CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_VALID_OFFSET(pfId, iscsiEqId) \ | 41 | #define CSTORM_ISCSI_EQ_SB_INDEX_OFFSET(pfId, iscsiEqId) \ |
45 | (IRO[307].base + ((pfId) * IRO[307].m1) + ((iscsiEqId) * \ | 42 | (IRO[313].base + ((pfId) * IRO[313].m1) + ((iscsiEqId) * IRO[313].m2)) |
46 | IRO[307].m2)) | 43 | #define CSTORM_ISCSI_EQ_SB_NUM_OFFSET(pfId, iscsiEqId) \ |
47 | #define CSTORM_ISCSI_EQ_PROD_OFFSET(pfId, iscsiEqId) \ | 44 | (IRO[312].base + ((pfId) * IRO[312].m1) + ((iscsiEqId) * IRO[312].m2)) |
48 | (IRO[303].base + ((pfId) * IRO[303].m1) + ((iscsiEqId) * \ | ||
49 | IRO[303].m2)) | ||
50 | #define CSTORM_ISCSI_EQ_SB_INDEX_OFFSET(pfId, iscsiEqId) \ | ||
51 | (IRO[309].base + ((pfId) * IRO[309].m1) + ((iscsiEqId) * \ | ||
52 | IRO[309].m2)) | ||
53 | #define CSTORM_ISCSI_EQ_SB_NUM_OFFSET(pfId, iscsiEqId) \ | ||
54 | (IRO[308].base + ((pfId) * IRO[308].m1) + ((iscsiEqId) * \ | ||
55 | IRO[308].m2)) | ||
56 | #define CSTORM_ISCSI_HQ_SIZE_OFFSET(pfId) \ | 45 | #define CSTORM_ISCSI_HQ_SIZE_OFFSET(pfId) \ |
57 | (IRO[310].base + ((pfId) * IRO[310].m1)) | 46 | (IRO[314].base + ((pfId) * IRO[314].m1)) |
58 | #define CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \ | 47 | #define CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \ |
59 | (IRO[302].base + ((pfId) * IRO[302].m1)) | 48 | (IRO[306].base + ((pfId) * IRO[306].m1)) |
60 | #define CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \ | 49 | #define CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \ |
61 | (IRO[301].base + ((pfId) * IRO[301].m1)) | 50 | (IRO[305].base + ((pfId) * IRO[305].m1)) |
62 | #define CSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \ | 51 | #define CSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \ |
63 | (IRO[300].base + ((pfId) * IRO[300].m1)) | 52 | (IRO[304].base + ((pfId) * IRO[304].m1)) |
64 | #define CSTORM_PATH_ID_OFFSET (IRO[159].base) | 53 | #define CSTORM_RECORD_SLOW_PATH_OFFSET(funcId) \ |
54 | (IRO[151].base + ((funcId) * IRO[151].m1)) | ||
65 | #define CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(pfId) \ | 55 | #define CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(pfId) \ |
66 | (IRO[137].base + ((pfId) * IRO[137].m1)) | 56 | (IRO[142].base + ((pfId) * IRO[142].m1)) |
57 | #define CSTORM_SP_STATUS_BLOCK_DATA_STATE_OFFSET(pfId) \ | ||
58 | (IRO[143].base + ((pfId) * IRO[143].m1)) | ||
67 | #define CSTORM_SP_STATUS_BLOCK_OFFSET(pfId) \ | 59 | #define CSTORM_SP_STATUS_BLOCK_OFFSET(pfId) \ |
68 | (IRO[136].base + ((pfId) * IRO[136].m1)) | 60 | (IRO[141].base + ((pfId) * IRO[141].m1)) |
69 | #define CSTORM_SP_STATUS_BLOCK_SIZE (IRO[136].size) | 61 | #define CSTORM_SP_STATUS_BLOCK_SIZE (IRO[141].size) |
70 | #define CSTORM_SP_SYNC_BLOCK_OFFSET(pfId) \ | 62 | #define CSTORM_SP_SYNC_BLOCK_OFFSET(pfId) \ |
71 | (IRO[138].base + ((pfId) * IRO[138].m1)) | 63 | (IRO[144].base + ((pfId) * IRO[144].m1)) |
72 | #define CSTORM_SP_SYNC_BLOCK_SIZE (IRO[138].size) | 64 | #define CSTORM_SP_SYNC_BLOCK_SIZE (IRO[144].size) |
73 | #define CSTORM_STATS_FLAGS_OFFSET(pfId) \ | 65 | #define CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(sbId, hcIndex) \ |
74 | (IRO[143].base + ((pfId) * IRO[143].m1)) | 66 | (IRO[136].base + ((sbId) * IRO[136].m1) + ((hcIndex) * IRO[136].m2)) |
75 | #define CSTORM_STATUS_BLOCK_DATA_OFFSET(sbId) \ | 67 | #define CSTORM_STATUS_BLOCK_DATA_OFFSET(sbId) \ |
76 | (IRO[129].base + ((sbId) * IRO[129].m1)) | 68 | (IRO[133].base + ((sbId) * IRO[133].m1)) |
69 | #define CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(sbId) \ | ||
70 | (IRO[134].base + ((sbId) * IRO[134].m1)) | ||
71 | #define CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(sbId, hcIndex) \ | ||
72 | (IRO[135].base + ((sbId) * IRO[135].m1) + ((hcIndex) * IRO[135].m2)) | ||
77 | #define CSTORM_STATUS_BLOCK_OFFSET(sbId) \ | 73 | #define CSTORM_STATUS_BLOCK_OFFSET(sbId) \ |
78 | (IRO[128].base + ((sbId) * IRO[128].m1)) | ||
79 | #define CSTORM_STATUS_BLOCK_SIZE (IRO[128].size) | ||
80 | #define CSTORM_SYNC_BLOCK_OFFSET(sbId) \ | ||
81 | (IRO[132].base + ((sbId) * IRO[132].m1)) | 74 | (IRO[132].base + ((sbId) * IRO[132].m1)) |
82 | #define CSTORM_SYNC_BLOCK_SIZE (IRO[132].size) | 75 | #define CSTORM_STATUS_BLOCK_SIZE (IRO[132].size) |
76 | #define CSTORM_SYNC_BLOCK_OFFSET(sbId) \ | ||
77 | (IRO[137].base + ((sbId) * IRO[137].m1)) | ||
78 | #define CSTORM_SYNC_BLOCK_SIZE (IRO[137].size) | ||
83 | #define CSTORM_VF_PF_CHANNEL_STATE_OFFSET(vfId) \ | 79 | #define CSTORM_VF_PF_CHANNEL_STATE_OFFSET(vfId) \ |
84 | (IRO[151].base + ((vfId) * IRO[151].m1)) | 80 | (IRO[155].base + ((vfId) * IRO[155].m1)) |
85 | #define CSTORM_VF_PF_CHANNEL_VALID_OFFSET(vfId) \ | 81 | #define CSTORM_VF_PF_CHANNEL_VALID_OFFSET(vfId) \ |
86 | (IRO[152].base + ((vfId) * IRO[152].m1)) | 82 | (IRO[156].base + ((vfId) * IRO[156].m1)) |
87 | #define CSTORM_VF_TO_PF_OFFSET(funcId) \ | 83 | #define CSTORM_VF_TO_PF_OFFSET(funcId) \ |
88 | (IRO[147].base + ((funcId) * IRO[147].m1)) | 84 | (IRO[150].base + ((funcId) * IRO[150].m1)) |
89 | #define TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET (IRO[199].base) | 85 | #define TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET (IRO[204].base) |
90 | #define TSTORM_APPROXIMATE_MATCH_MULTICAST_FILTERING_OFFSET(pfId) \ | 86 | #define TSTORM_APPROXIMATE_MATCH_MULTICAST_FILTERING_OFFSET(pfId) \ |
91 | (IRO[198].base + ((pfId) * IRO[198].m1)) | 87 | (IRO[203].base + ((pfId) * IRO[203].m1)) |
92 | #define TSTORM_ASSERT_LIST_INDEX_OFFSET (IRO[99].base) | 88 | #define TSTORM_ASSERT_LIST_INDEX_OFFSET (IRO[102].base) |
93 | #define TSTORM_ASSERT_LIST_OFFSET(assertListEntry) \ | 89 | #define TSTORM_ASSERT_LIST_OFFSET(assertListEntry) \ |
94 | (IRO[98].base + ((assertListEntry) * IRO[98].m1)) | 90 | (IRO[101].base + ((assertListEntry) * IRO[101].m1)) |
95 | #define TSTORM_CLIENT_CONFIG_OFFSET(portId, clientId) \ | 91 | #define TSTORM_COMMON_SAFC_WORKAROUND_ENABLE_OFFSET (IRO[107].base) |
96 | (IRO[197].base + ((portId) * IRO[197].m1) + ((clientId) * \ | ||
97 | IRO[197].m2)) | ||
98 | #define TSTORM_COMMON_SAFC_WORKAROUND_ENABLE_OFFSET (IRO[104].base) | ||
99 | #define TSTORM_COMMON_SAFC_WORKAROUND_TIMEOUT_10USEC_OFFSET \ | 92 | #define TSTORM_COMMON_SAFC_WORKAROUND_TIMEOUT_10USEC_OFFSET \ |
100 | (IRO[105].base) | 93 | (IRO[108].base) |
101 | #define TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(pfId) \ | ||
102 | (IRO[96].base + ((pfId) * IRO[96].m1)) | ||
103 | #define TSTORM_FUNC_EN_OFFSET(funcId) \ | ||
104 | (IRO[101].base + ((funcId) * IRO[101].m1)) | ||
105 | #define TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(pfId) \ | 94 | #define TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(pfId) \ |
106 | (IRO[195].base + ((pfId) * IRO[195].m1)) | 95 | (IRO[201].base + ((pfId) * IRO[201].m1)) |
107 | #define TSTORM_FUNCTION_MODE_OFFSET (IRO[103].base) | 96 | #define TSTORM_FUNC_EN_OFFSET(funcId) \ |
108 | #define TSTORM_INDIRECTION_TABLE_OFFSET(pfId) \ | 97 | (IRO[103].base + ((funcId) * IRO[103].m1)) |
109 | (IRO[91].base + ((pfId) * IRO[91].m1)) | ||
110 | #define TSTORM_INDIRECTION_TABLE_SIZE (IRO[91].size) | ||
111 | #define \ | ||
112 | TSTORM_ISCSI_CONN_BUF_PBL_OFFSET(pfId, iscsiConBufPblEntry) \ | ||
113 | (IRO[260].base + ((pfId) * IRO[260].m1) + ((iscsiConBufPblEntry) \ | ||
114 | * IRO[260].m2)) | ||
115 | #define TSTORM_ISCSI_ERROR_BITMAP_OFFSET(pfId) \ | 98 | #define TSTORM_ISCSI_ERROR_BITMAP_OFFSET(pfId) \ |
116 | (IRO[264].base + ((pfId) * IRO[264].m1)) | 99 | (IRO[271].base + ((pfId) * IRO[271].m1)) |
117 | #define TSTORM_ISCSI_L2_ISCSI_OOO_CID_TABLE_OFFSET(pfId) \ | 100 | #define TSTORM_ISCSI_L2_ISCSI_OOO_CID_TABLE_OFFSET(pfId) \ |
118 | (IRO[265].base + ((pfId) * IRO[265].m1)) | 101 | (IRO[272].base + ((pfId) * IRO[272].m1)) |
119 | #define TSTORM_ISCSI_L2_ISCSI_OOO_CLIENT_ID_TABLE_OFFSET(pfId) \ | 102 | #define TSTORM_ISCSI_L2_ISCSI_OOO_CLIENT_ID_TABLE_OFFSET(pfId) \ |
120 | (IRO[266].base + ((pfId) * IRO[266].m1)) | 103 | (IRO[273].base + ((pfId) * IRO[273].m1)) |
121 | #define TSTORM_ISCSI_L2_ISCSI_OOO_PROD_OFFSET(pfId) \ | 104 | #define TSTORM_ISCSI_L2_ISCSI_OOO_PROD_OFFSET(pfId) \ |
122 | (IRO[267].base + ((pfId) * IRO[267].m1)) | 105 | (IRO[274].base + ((pfId) * IRO[274].m1)) |
123 | #define TSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \ | 106 | #define TSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \ |
124 | (IRO[263].base + ((pfId) * IRO[263].m1)) | 107 | (IRO[270].base + ((pfId) * IRO[270].m1)) |
125 | #define TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \ | 108 | #define TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \ |
126 | (IRO[262].base + ((pfId) * IRO[262].m1)) | 109 | (IRO[269].base + ((pfId) * IRO[269].m1)) |
127 | #define TSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \ | 110 | #define TSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \ |
128 | (IRO[261].base + ((pfId) * IRO[261].m1)) | 111 | (IRO[268].base + ((pfId) * IRO[268].m1)) |
129 | #define TSTORM_ISCSI_RQ_SIZE_OFFSET(pfId) \ | 112 | #define TSTORM_ISCSI_RQ_SIZE_OFFSET(pfId) \ |
130 | (IRO[259].base + ((pfId) * IRO[259].m1)) | 113 | (IRO[267].base + ((pfId) * IRO[267].m1)) |
131 | #define TSTORM_ISCSI_TCP_LOCAL_ADV_WND_OFFSET(pfId) \ | 114 | #define TSTORM_ISCSI_TCP_LOCAL_ADV_WND_OFFSET(pfId) \ |
132 | (IRO[269].base + ((pfId) * IRO[269].m1)) | 115 | (IRO[276].base + ((pfId) * IRO[276].m1)) |
133 | #define TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(pfId) \ | 116 | #define TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(pfId) \ |
134 | (IRO[256].base + ((pfId) * IRO[256].m1)) | 117 | (IRO[263].base + ((pfId) * IRO[263].m1)) |
135 | #define TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfId) \ | 118 | #define TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfId) \ |
136 | (IRO[257].base + ((pfId) * IRO[257].m1)) | 119 | (IRO[264].base + ((pfId) * IRO[264].m1)) |
120 | #define TSTORM_ISCSI_TCP_VARS_MID_LOCAL_MAC_ADDR_OFFSET(pfId) \ | ||
121 | (IRO[265].base + ((pfId) * IRO[265].m1)) | ||
137 | #define TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfId) \ | 122 | #define TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfId) \ |
138 | (IRO[258].base + ((pfId) * IRO[258].m1)) | 123 | (IRO[266].base + ((pfId) * IRO[266].m1)) |
139 | #define TSTORM_MAC_FILTER_CONFIG_OFFSET(pfId) \ | 124 | #define TSTORM_MAC_FILTER_CONFIG_OFFSET(pfId) \ |
140 | (IRO[196].base + ((pfId) * IRO[196].m1)) | 125 | (IRO[202].base + ((pfId) * IRO[202].m1)) |
141 | #define TSTORM_PER_COUNTER_ID_STATS_OFFSET(portId, tStatCntId) \ | 126 | #define TSTORM_RECORD_SLOW_PATH_OFFSET(funcId) \ |
142 | (IRO[100].base + ((portId) * IRO[100].m1) + ((tStatCntId) * \ | 127 | (IRO[105].base + ((funcId) * IRO[105].m1)) |
143 | IRO[100].m2)) | ||
144 | #define TSTORM_STATS_FLAGS_OFFSET(pfId) \ | ||
145 | (IRO[95].base + ((pfId) * IRO[95].m1)) | ||
146 | #define TSTORM_TCP_MAX_CWND_OFFSET(pfId) \ | 128 | #define TSTORM_TCP_MAX_CWND_OFFSET(pfId) \ |
147 | (IRO[211].base + ((pfId) * IRO[211].m1)) | 129 | (IRO[216].base + ((pfId) * IRO[216].m1)) |
148 | #define TSTORM_VF_TO_PF_OFFSET(funcId) \ | 130 | #define TSTORM_VF_TO_PF_OFFSET(funcId) \ |
149 | (IRO[102].base + ((funcId) * IRO[102].m1)) | 131 | (IRO[104].base + ((funcId) * IRO[104].m1)) |
150 | #define USTORM_AGG_DATA_OFFSET (IRO[201].base) | 132 | #define USTORM_AGG_DATA_OFFSET (IRO[206].base) |
151 | #define USTORM_AGG_DATA_SIZE (IRO[201].size) | 133 | #define USTORM_AGG_DATA_SIZE (IRO[206].size) |
152 | #define USTORM_ASSERT_LIST_INDEX_OFFSET (IRO[170].base) | 134 | #define USTORM_ASSERT_LIST_INDEX_OFFSET (IRO[177].base) |
153 | #define USTORM_ASSERT_LIST_OFFSET(assertListEntry) \ | 135 | #define USTORM_ASSERT_LIST_OFFSET(assertListEntry) \ |
154 | (IRO[169].base + ((assertListEntry) * IRO[169].m1)) | 136 | (IRO[176].base + ((assertListEntry) * IRO[176].m1)) |
137 | #define USTORM_CQE_PAGE_NEXT_OFFSET(portId, clientId) \ | ||
138 | (IRO[205].base + ((portId) * IRO[205].m1) + ((clientId) * \ | ||
139 | IRO[205].m2)) | ||
155 | #define USTORM_ETH_PAUSE_ENABLED_OFFSET(portId) \ | 140 | #define USTORM_ETH_PAUSE_ENABLED_OFFSET(portId) \ |
156 | (IRO[178].base + ((portId) * IRO[178].m1)) | 141 | (IRO[183].base + ((portId) * IRO[183].m1)) |
157 | #define USTORM_ETH_STATS_QUERY_ADDR_OFFSET(pfId) \ | ||
158 | (IRO[172].base + ((pfId) * IRO[172].m1)) | ||
159 | #define USTORM_FCOE_EQ_PROD_OFFSET(pfId) \ | 142 | #define USTORM_FCOE_EQ_PROD_OFFSET(pfId) \ |
160 | (IRO[313].base + ((pfId) * IRO[313].m1)) | 143 | (IRO[317].base + ((pfId) * IRO[317].m1)) |
161 | #define USTORM_FUNC_EN_OFFSET(funcId) \ | 144 | #define USTORM_FUNC_EN_OFFSET(funcId) \ |
162 | (IRO[174].base + ((funcId) * IRO[174].m1)) | 145 | (IRO[178].base + ((funcId) * IRO[178].m1)) |
163 | #define USTORM_FUNCTION_MODE_OFFSET (IRO[177].base) | ||
164 | #define USTORM_ISCSI_CQ_SIZE_OFFSET(pfId) \ | 146 | #define USTORM_ISCSI_CQ_SIZE_OFFSET(pfId) \ |
165 | (IRO[277].base + ((pfId) * IRO[277].m1)) | 147 | (IRO[281].base + ((pfId) * IRO[281].m1)) |
166 | #define USTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfId) \ | 148 | #define USTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfId) \ |
167 | (IRO[278].base + ((pfId) * IRO[278].m1)) | ||
168 | #define USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfId) \ | ||
169 | (IRO[282].base + ((pfId) * IRO[282].m1)) | 149 | (IRO[282].base + ((pfId) * IRO[282].m1)) |
150 | #define USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfId) \ | ||
151 | (IRO[286].base + ((pfId) * IRO[286].m1)) | ||
170 | #define USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfId) \ | 152 | #define USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfId) \ |
171 | (IRO[279].base + ((pfId) * IRO[279].m1)) | 153 | (IRO[283].base + ((pfId) * IRO[283].m1)) |
172 | #define USTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \ | 154 | #define USTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \ |
173 | (IRO[275].base + ((pfId) * IRO[275].m1)) | 155 | (IRO[279].base + ((pfId) * IRO[279].m1)) |
174 | #define USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \ | 156 | #define USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \ |
175 | (IRO[274].base + ((pfId) * IRO[274].m1)) | 157 | (IRO[278].base + ((pfId) * IRO[278].m1)) |
176 | #define USTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \ | 158 | #define USTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \ |
177 | (IRO[273].base + ((pfId) * IRO[273].m1)) | 159 | (IRO[277].base + ((pfId) * IRO[277].m1)) |
178 | #define USTORM_ISCSI_R2TQ_SIZE_OFFSET(pfId) \ | 160 | #define USTORM_ISCSI_R2TQ_SIZE_OFFSET(pfId) \ |
179 | (IRO[276].base + ((pfId) * IRO[276].m1)) | ||
180 | #define USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(pfId) \ | ||
181 | (IRO[280].base + ((pfId) * IRO[280].m1)) | 161 | (IRO[280].base + ((pfId) * IRO[280].m1)) |
162 | #define USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(pfId) \ | ||
163 | (IRO[284].base + ((pfId) * IRO[284].m1)) | ||
182 | #define USTORM_ISCSI_RQ_SIZE_OFFSET(pfId) \ | 164 | #define USTORM_ISCSI_RQ_SIZE_OFFSET(pfId) \ |
183 | (IRO[281].base + ((pfId) * IRO[281].m1)) | 165 | (IRO[285].base + ((pfId) * IRO[285].m1)) |
184 | #define USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(pfId) \ | 166 | #define USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(pfId) \ |
185 | (IRO[176].base + ((pfId) * IRO[176].m1)) | 167 | (IRO[182].base + ((pfId) * IRO[182].m1)) |
186 | #define USTORM_PER_COUNTER_ID_STATS_OFFSET(portId, uStatCntId) \ | 168 | #define USTORM_RECORD_SLOW_PATH_OFFSET(funcId) \ |
187 | (IRO[173].base + ((portId) * IRO[173].m1) + ((uStatCntId) * \ | 169 | (IRO[180].base + ((funcId) * IRO[180].m1)) |
188 | IRO[173].m2)) | 170 | #define USTORM_RX_PRODS_E1X_OFFSET(portId, clientId) \ |
189 | #define USTORM_RX_PRODS_E1X_OFFSET(portId, clientId) \ | 171 | (IRO[209].base + ((portId) * IRO[209].m1) + ((clientId) * \ |
190 | (IRO[204].base + ((portId) * IRO[204].m1) + ((clientId) * \ | 172 | IRO[209].m2)) |
191 | IRO[204].m2)) | ||
192 | #define USTORM_RX_PRODS_E2_OFFSET(qzoneId) \ | 173 | #define USTORM_RX_PRODS_E2_OFFSET(qzoneId) \ |
193 | (IRO[205].base + ((qzoneId) * IRO[205].m1)) | 174 | (IRO[210].base + ((qzoneId) * IRO[210].m1)) |
194 | #define USTORM_STATS_FLAGS_OFFSET(pfId) \ | 175 | #define USTORM_TPA_BTR_OFFSET (IRO[207].base) |
195 | (IRO[171].base + ((pfId) * IRO[171].m1)) | 176 | #define USTORM_TPA_BTR_SIZE (IRO[207].size) |
196 | #define USTORM_TPA_BTR_OFFSET (IRO[202].base) | ||
197 | #define USTORM_TPA_BTR_SIZE (IRO[202].size) | ||
198 | #define USTORM_VF_TO_PF_OFFSET(funcId) \ | 177 | #define USTORM_VF_TO_PF_OFFSET(funcId) \ |
199 | (IRO[175].base + ((funcId) * IRO[175].m1)) | 178 | (IRO[179].base + ((funcId) * IRO[179].m1)) |
200 | #define XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE (IRO[59].base) | 179 | #define XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE (IRO[67].base) |
201 | #define XSTORM_AGG_INT_FINAL_CLEANUP_INDEX (IRO[58].base) | 180 | #define XSTORM_AGG_INT_FINAL_CLEANUP_INDEX (IRO[66].base) |
202 | #define XSTORM_ASSERT_LIST_INDEX_OFFSET (IRO[54].base) | 181 | #define XSTORM_ASSERT_LIST_INDEX_OFFSET (IRO[51].base) |
203 | #define XSTORM_ASSERT_LIST_OFFSET(assertListEntry) \ | 182 | #define XSTORM_ASSERT_LIST_OFFSET(assertListEntry) \ |
204 | (IRO[53].base + ((assertListEntry) * IRO[53].m1)) | 183 | (IRO[50].base + ((assertListEntry) * IRO[50].m1)) |
205 | #define XSTORM_CMNG_PER_PORT_VARS_OFFSET(portId) \ | 184 | #define XSTORM_CMNG_PER_PORT_VARS_OFFSET(portId) \ |
206 | (IRO[47].base + ((portId) * IRO[47].m1)) | 185 | (IRO[43].base + ((portId) * IRO[43].m1)) |
207 | #define XSTORM_E1HOV_OFFSET(pfId) \ | ||
208 | (IRO[55].base + ((pfId) * IRO[55].m1)) | ||
209 | #define XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(pfId) \ | ||
210 | (IRO[45].base + ((pfId) * IRO[45].m1)) | ||
211 | #define XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(pfId) \ | 186 | #define XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(pfId) \ |
212 | (IRO[49].base + ((pfId) * IRO[49].m1)) | 187 | (IRO[45].base + ((pfId) * IRO[45].m1)) |
213 | #define XSTORM_FUNC_EN_OFFSET(funcId) \ | 188 | #define XSTORM_FUNC_EN_OFFSET(funcId) \ |
214 | (IRO[51].base + ((funcId) * IRO[51].m1)) | 189 | (IRO[47].base + ((funcId) * IRO[47].m1)) |
215 | #define XSTORM_FUNCTION_MODE_OFFSET (IRO[56].base) | ||
216 | #define XSTORM_ISCSI_HQ_SIZE_OFFSET(pfId) \ | 190 | #define XSTORM_ISCSI_HQ_SIZE_OFFSET(pfId) \ |
217 | (IRO[290].base + ((pfId) * IRO[290].m1)) | 191 | (IRO[294].base + ((pfId) * IRO[294].m1)) |
218 | #define XSTORM_ISCSI_LOCAL_MAC_ADDR0_OFFSET(pfId) \ | 192 | #define XSTORM_ISCSI_LOCAL_MAC_ADDR0_OFFSET(pfId) \ |
219 | (IRO[293].base + ((pfId) * IRO[293].m1)) | 193 | (IRO[297].base + ((pfId) * IRO[297].m1)) |
220 | #define XSTORM_ISCSI_LOCAL_MAC_ADDR1_OFFSET(pfId) \ | 194 | #define XSTORM_ISCSI_LOCAL_MAC_ADDR1_OFFSET(pfId) \ |
221 | (IRO[294].base + ((pfId) * IRO[294].m1)) | 195 | (IRO[298].base + ((pfId) * IRO[298].m1)) |
222 | #define XSTORM_ISCSI_LOCAL_MAC_ADDR2_OFFSET(pfId) \ | 196 | #define XSTORM_ISCSI_LOCAL_MAC_ADDR2_OFFSET(pfId) \ |
223 | (IRO[295].base + ((pfId) * IRO[295].m1)) | 197 | (IRO[299].base + ((pfId) * IRO[299].m1)) |
224 | #define XSTORM_ISCSI_LOCAL_MAC_ADDR3_OFFSET(pfId) \ | 198 | #define XSTORM_ISCSI_LOCAL_MAC_ADDR3_OFFSET(pfId) \ |
225 | (IRO[296].base + ((pfId) * IRO[296].m1)) | 199 | (IRO[300].base + ((pfId) * IRO[300].m1)) |
226 | #define XSTORM_ISCSI_LOCAL_MAC_ADDR4_OFFSET(pfId) \ | 200 | #define XSTORM_ISCSI_LOCAL_MAC_ADDR4_OFFSET(pfId) \ |
227 | (IRO[297].base + ((pfId) * IRO[297].m1)) | 201 | (IRO[301].base + ((pfId) * IRO[301].m1)) |
228 | #define XSTORM_ISCSI_LOCAL_MAC_ADDR5_OFFSET(pfId) \ | 202 | #define XSTORM_ISCSI_LOCAL_MAC_ADDR5_OFFSET(pfId) \ |
229 | (IRO[298].base + ((pfId) * IRO[298].m1)) | 203 | (IRO[302].base + ((pfId) * IRO[302].m1)) |
230 | #define XSTORM_ISCSI_LOCAL_VLAN_OFFSET(pfId) \ | 204 | #define XSTORM_ISCSI_LOCAL_VLAN_OFFSET(pfId) \ |
231 | (IRO[299].base + ((pfId) * IRO[299].m1)) | 205 | (IRO[303].base + ((pfId) * IRO[303].m1)) |
232 | #define XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \ | 206 | #define XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \ |
233 | (IRO[289].base + ((pfId) * IRO[289].m1)) | 207 | (IRO[293].base + ((pfId) * IRO[293].m1)) |
234 | #define XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \ | 208 | #define XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \ |
235 | (IRO[288].base + ((pfId) * IRO[288].m1)) | 209 | (IRO[292].base + ((pfId) * IRO[292].m1)) |
236 | #define XSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \ | 210 | #define XSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \ |
237 | (IRO[287].base + ((pfId) * IRO[287].m1)) | 211 | (IRO[291].base + ((pfId) * IRO[291].m1)) |
238 | #define XSTORM_ISCSI_R2TQ_SIZE_OFFSET(pfId) \ | 212 | #define XSTORM_ISCSI_R2TQ_SIZE_OFFSET(pfId) \ |
239 | (IRO[292].base + ((pfId) * IRO[292].m1)) | 213 | (IRO[296].base + ((pfId) * IRO[296].m1)) |
240 | #define XSTORM_ISCSI_SQ_SIZE_OFFSET(pfId) \ | 214 | #define XSTORM_ISCSI_SQ_SIZE_OFFSET(pfId) \ |
241 | (IRO[291].base + ((pfId) * IRO[291].m1)) | 215 | (IRO[295].base + ((pfId) * IRO[295].m1)) |
242 | #define XSTORM_ISCSI_TCP_VARS_ADV_WND_SCL_OFFSET(pfId) \ | 216 | #define XSTORM_ISCSI_TCP_VARS_ADV_WND_SCL_OFFSET(pfId) \ |
243 | (IRO[286].base + ((pfId) * IRO[286].m1)) | 217 | (IRO[290].base + ((pfId) * IRO[290].m1)) |
244 | #define XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(pfId) \ | 218 | #define XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(pfId) \ |
245 | (IRO[285].base + ((pfId) * IRO[285].m1)) | 219 | (IRO[289].base + ((pfId) * IRO[289].m1)) |
246 | #define XSTORM_ISCSI_TCP_VARS_TOS_OFFSET(pfId) \ | 220 | #define XSTORM_ISCSI_TCP_VARS_TOS_OFFSET(pfId) \ |
247 | (IRO[284].base + ((pfId) * IRO[284].m1)) | 221 | (IRO[288].base + ((pfId) * IRO[288].m1)) |
248 | #define XSTORM_ISCSI_TCP_VARS_TTL_OFFSET(pfId) \ | 222 | #define XSTORM_ISCSI_TCP_VARS_TTL_OFFSET(pfId) \ |
249 | (IRO[283].base + ((pfId) * IRO[283].m1)) | 223 | (IRO[287].base + ((pfId) * IRO[287].m1)) |
250 | #define XSTORM_PATH_ID_OFFSET (IRO[65].base) | ||
251 | #define XSTORM_PER_COUNTER_ID_STATS_OFFSET(portId, xStatCntId) \ | ||
252 | (IRO[50].base + ((portId) * IRO[50].m1) + ((xStatCntId) * \ | ||
253 | IRO[50].m2)) | ||
254 | #define XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(pfId) \ | 224 | #define XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(pfId) \ |
255 | (IRO[48].base + ((pfId) * IRO[48].m1)) | 225 | (IRO[44].base + ((pfId) * IRO[44].m1)) |
226 | #define XSTORM_RECORD_SLOW_PATH_OFFSET(funcId) \ | ||
227 | (IRO[49].base + ((funcId) * IRO[49].m1)) | ||
256 | #define XSTORM_SPQ_DATA_OFFSET(funcId) \ | 228 | #define XSTORM_SPQ_DATA_OFFSET(funcId) \ |
257 | (IRO[32].base + ((funcId) * IRO[32].m1)) | 229 | (IRO[32].base + ((funcId) * IRO[32].m1)) |
258 | #define XSTORM_SPQ_DATA_SIZE (IRO[32].size) | 230 | #define XSTORM_SPQ_DATA_SIZE (IRO[32].size) |
@@ -260,42 +232,37 @@ | |||
260 | (IRO[30].base + ((funcId) * IRO[30].m1)) | 232 | (IRO[30].base + ((funcId) * IRO[30].m1)) |
261 | #define XSTORM_SPQ_PROD_OFFSET(funcId) \ | 233 | #define XSTORM_SPQ_PROD_OFFSET(funcId) \ |
262 | (IRO[31].base + ((funcId) * IRO[31].m1)) | 234 | (IRO[31].base + ((funcId) * IRO[31].m1)) |
263 | #define XSTORM_STATS_FLAGS_OFFSET(pfId) \ | ||
264 | (IRO[43].base + ((pfId) * IRO[43].m1)) | ||
265 | #define XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_ENABLED_OFFSET(portId) \ | 235 | #define XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_ENABLED_OFFSET(portId) \ |
266 | (IRO[206].base + ((portId) * IRO[206].m1)) | 236 | (IRO[211].base + ((portId) * IRO[211].m1)) |
267 | #define XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_MAX_COUNT_OFFSET(portId) \ | 237 | #define XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_MAX_COUNT_OFFSET(portId) \ |
268 | (IRO[207].base + ((portId) * IRO[207].m1)) | 238 | (IRO[212].base + ((portId) * IRO[212].m1)) |
269 | #define XSTORM_TCP_TX_SWS_TIMER_VAL_OFFSET(pfId) \ | 239 | #define XSTORM_TCP_TX_SWS_TIMER_VAL_OFFSET(pfId) \ |
270 | (IRO[209].base + (((pfId)>>1) * IRO[209].m1) + (((pfId)&1) * \ | 240 | (IRO[214].base + (((pfId)>>1) * IRO[214].m1) + (((pfId)&1) * \ |
271 | IRO[209].m2)) | 241 | IRO[214].m2)) |
272 | #define XSTORM_VF_TO_PF_OFFSET(funcId) \ | 242 | #define XSTORM_VF_TO_PF_OFFSET(funcId) \ |
273 | (IRO[52].base + ((funcId) * IRO[52].m1)) | 243 | (IRO[48].base + ((funcId) * IRO[48].m1)) |
274 | #define COMMON_ASM_INVALID_ASSERT_OPCODE 0x0 | 244 | #define COMMON_ASM_INVALID_ASSERT_OPCODE 0x0 |
275 | 245 | ||
276 | /* RSS hash types */ | 246 | /** |
277 | #define DEFAULT_HASH_TYPE 0 | 247 | * This file defines HSI constants for the ETH flow |
278 | #define IPV4_HASH_TYPE 1 | 248 | */ |
279 | #define TCP_IPV4_HASH_TYPE 2 | 249 | #ifdef _EVEREST_MICROCODE |
280 | #define IPV6_HASH_TYPE 3 | 250 | #include "Microcode\Generated\DataTypes\eth_rx_bd.h" |
281 | #define TCP_IPV6_HASH_TYPE 4 | 251 | #include "Microcode\Generated\DataTypes\eth_tx_bd.h" |
282 | #define VLAN_PRI_HASH_TYPE 5 | 252 | #include "Microcode\Generated\DataTypes\eth_rx_cqe.h" |
283 | #define E1HOV_PRI_HASH_TYPE 6 | 253 | #include "Microcode\Generated\DataTypes\eth_rx_sge.h" |
284 | #define DSCP_HASH_TYPE 7 | 254 | #include "Microcode\Generated\DataTypes\eth_rx_cqe_next_page.h" |
255 | #endif | ||
285 | 256 | ||
286 | 257 | ||
287 | /* Ethernet Ring parameters */ | 258 | /* Ethernet Ring parameters */ |
288 | #define X_ETH_LOCAL_RING_SIZE 13 | 259 | #define X_ETH_LOCAL_RING_SIZE 13 |
289 | #define FIRST_BD_IN_PKT 0 | 260 | #define FIRST_BD_IN_PKT 0 |
290 | #define PARSE_BD_INDEX 1 | 261 | #define PARSE_BD_INDEX 1 |
291 | #define NUM_OF_ETH_BDS_IN_PAGE ((PAGE_SIZE)/(STRUCT_SIZE(eth_tx_bd)/8)) | 262 | #define NUM_OF_ETH_BDS_IN_PAGE ((PAGE_SIZE)/(STRUCT_SIZE(eth_tx_bd)/8)) |
292 | #define U_ETH_NUM_OF_SGES_TO_FETCH 8 | 263 | #define U_ETH_NUM_OF_SGES_TO_FETCH 8 |
293 | #define U_ETH_MAX_SGES_FOR_PACKET 3 | 264 | #define U_ETH_MAX_SGES_FOR_PACKET 3 |
294 | 265 | ||
295 | /*Tx params*/ | ||
296 | #define X_ETH_NO_VLAN 0 | ||
297 | #define X_ETH_OUTBAND_VLAN 1 | ||
298 | #define X_ETH_INBAND_VLAN 2 | ||
299 | /* Rx ring params */ | 266 | /* Rx ring params */ |
300 | #define U_ETH_LOCAL_BD_RING_SIZE 8 | 267 | #define U_ETH_LOCAL_BD_RING_SIZE 8 |
301 | #define U_ETH_LOCAL_SGE_RING_SIZE 10 | 268 | #define U_ETH_LOCAL_SGE_RING_SIZE 10 |
@@ -311,79 +278,64 @@ | |||
311 | #define U_ETH_BDS_PER_PAGE (PAGE_SIZE/(STRUCT_SIZE(eth_rx_bd)/8)) | 278 | #define U_ETH_BDS_PER_PAGE (PAGE_SIZE/(STRUCT_SIZE(eth_rx_bd)/8)) |
312 | #define U_ETH_SGES_PER_PAGE (PAGE_SIZE/(STRUCT_SIZE(eth_rx_sge)/8)) | 279 | #define U_ETH_SGES_PER_PAGE (PAGE_SIZE/(STRUCT_SIZE(eth_rx_sge)/8)) |
313 | 280 | ||
314 | #define U_ETH_BDS_PER_PAGE_MASK (U_ETH_BDS_PER_PAGE-1) | 281 | #define U_ETH_BDS_PER_PAGE_MASK (U_ETH_BDS_PER_PAGE-1) |
315 | #define U_ETH_CQE_PER_PAGE_MASK (TU_ETH_CQES_PER_PAGE-1) | 282 | #define U_ETH_CQE_PER_PAGE_MASK (TU_ETH_CQES_PER_PAGE-1) |
316 | #define U_ETH_SGES_PER_PAGE_MASK (U_ETH_SGES_PER_PAGE-1) | 283 | #define U_ETH_SGES_PER_PAGE_MASK (U_ETH_SGES_PER_PAGE-1) |
317 | 284 | ||
318 | #define U_ETH_UNDEFINED_Q 0xFF | 285 | #define U_ETH_UNDEFINED_Q 0xFF |
319 | 286 | ||
320 | /* values of command IDs in the ramrod message */ | ||
321 | #define RAMROD_CMD_ID_ETH_UNUSED 0 | ||
322 | #define RAMROD_CMD_ID_ETH_CLIENT_SETUP 1 | ||
323 | #define RAMROD_CMD_ID_ETH_UPDATE 2 | ||
324 | #define RAMROD_CMD_ID_ETH_HALT 3 | ||
325 | #define RAMROD_CMD_ID_ETH_FORWARD_SETUP 4 | ||
326 | #define RAMROD_CMD_ID_ETH_ACTIVATE 5 | ||
327 | #define RAMROD_CMD_ID_ETH_DEACTIVATE 6 | ||
328 | #define RAMROD_CMD_ID_ETH_EMPTY 7 | ||
329 | #define RAMROD_CMD_ID_ETH_TERMINATE 8 | ||
330 | |||
331 | /* command values for set mac command */ | ||
332 | #define T_ETH_MAC_COMMAND_SET 0 | ||
333 | #define T_ETH_MAC_COMMAND_INVALIDATE 1 | ||
334 | |||
335 | #define T_ETH_INDIRECTION_TABLE_SIZE 128 | 287 | #define T_ETH_INDIRECTION_TABLE_SIZE 128 |
288 | #define T_ETH_RSS_KEY 10 | ||
289 | #define ETH_NUM_OF_RSS_ENGINES_E2 72 | ||
290 | |||
291 | #define FILTER_RULES_COUNT 16 | ||
292 | #define MULTICAST_RULES_COUNT 16 | ||
293 | #define CLASSIFY_RULES_COUNT 16 | ||
336 | 294 | ||
337 | /*The CRC32 seed, that is used for the hash(reduction) multicast address */ | 295 | /*The CRC32 seed, that is used for the hash(reduction) multicast address */ |
338 | #define T_ETH_CRC32_HASH_SEED 0x00000000 | 296 | #define ETH_CRC32_HASH_SEED 0x00000000 |
297 | |||
298 | #define ETH_CRC32_HASH_BIT_SIZE (8) | ||
299 | #define ETH_CRC32_HASH_MASK EVAL((1<<ETH_CRC32_HASH_BIT_SIZE)-1) | ||
339 | 300 | ||
340 | /* Maximal L2 clients supported */ | 301 | /* Maximal L2 clients supported */ |
341 | #define ETH_MAX_RX_CLIENTS_E1 18 | 302 | #define ETH_MAX_RX_CLIENTS_E1 18 |
342 | #define ETH_MAX_RX_CLIENTS_E1H 28 | 303 | #define ETH_MAX_RX_CLIENTS_E1H 28 |
304 | #define ETH_MAX_RX_CLIENTS_E2 152 | ||
305 | |||
306 | /* Maximal statistics client Ids */ | ||
307 | #define MAX_STAT_COUNTER_ID_E1 36 | ||
308 | #define MAX_STAT_COUNTER_ID_E1H 56 | ||
309 | #define MAX_STAT_COUNTER_ID_E2 140 | ||
310 | |||
311 | #define MAX_MAC_CREDIT_E1 192 /* Per Chip */ | ||
312 | #define MAX_MAC_CREDIT_E1H 256 /* Per Chip */ | ||
313 | #define MAX_MAC_CREDIT_E2 272 /* Per Path */ | ||
314 | #define MAX_VLAN_CREDIT_E1 0 /* Per Chip */ | ||
315 | #define MAX_VLAN_CREDIT_E1H 0 /* Per Chip */ | ||
316 | #define MAX_VLAN_CREDIT_E2 272 /* Per Path */ | ||
343 | 317 | ||
344 | #define MAX_STAT_COUNTER_ID ETH_MAX_RX_CLIENTS_E1H | ||
345 | 318 | ||
346 | /* Maximal aggregation queues supported */ | 319 | /* Maximal aggregation queues supported */ |
347 | #define ETH_MAX_AGGREGATION_QUEUES_E1 32 | 320 | #define ETH_MAX_AGGREGATION_QUEUES_E1 32 |
348 | #define ETH_MAX_AGGREGATION_QUEUES_E1H 64 | 321 | #define ETH_MAX_AGGREGATION_QUEUES_E1H_E2 64 |
349 | 322 | ||
350 | /* ETH RSS modes */ | ||
351 | #define ETH_RSS_MODE_DISABLED 0 | ||
352 | #define ETH_RSS_MODE_REGULAR 1 | ||
353 | #define ETH_RSS_MODE_VLAN_PRI 2 | ||
354 | #define ETH_RSS_MODE_E1HOV_PRI 3 | ||
355 | #define ETH_RSS_MODE_IP_DSCP 4 | ||
356 | #define ETH_RSS_MODE_E2_INTEG 5 | ||
357 | 323 | ||
324 | #define ETH_NUM_OF_MCAST_BINS 256 | ||
325 | #define ETH_NUM_OF_MCAST_ENGINES_E2 72 | ||
358 | 326 | ||
359 | /* ETH vlan filtering modes */ | 327 | #define ETH_MIN_RX_CQES_WITHOUT_TPA (MAX_RAMRODS_PER_PORT + 3) |
360 | #define ETH_VLAN_FILTER_ANY_VLAN 0 /* Don't filter by vlan */ | 328 | #define ETH_MIN_RX_CQES_WITH_TPA_E1 \ |
361 | #define ETH_VLAN_FILTER_SPECIFIC_VLAN \ | 329 | (ETH_MAX_AGGREGATION_QUEUES_E1 + ETH_MIN_RX_CQES_WITHOUT_TPA) |
362 | 1 /* Only the vlan_id is allowed */ | 330 | #define ETH_MIN_RX_CQES_WITH_TPA_E1H_E2 \ |
363 | #define ETH_VLAN_FILTER_CLASSIFY \ | 331 | (ETH_MAX_AGGREGATION_QUEUES_E1H_E2 + ETH_MIN_RX_CQES_WITHOUT_TPA) |
364 | 2 /* vlan will be added to CAM for classification */ | ||
365 | 332 | ||
366 | /* Fast path CQE selection */ | 333 | #define DISABLE_STATISTIC_COUNTER_ID_VALUE 0 |
367 | #define ETH_FP_CQE_REGULAR 0 | ||
368 | #define ETH_FP_CQE_SGL 1 | ||
369 | #define ETH_FP_CQE_RAW 2 | ||
370 | 334 | ||
371 | 335 | ||
372 | /** | 336 | /** |
373 | * This file defines HSI constants common to all microcode flows | 337 | * This file defines HSI constants common to all microcode flows |
374 | */ | 338 | */ |
375 | |||
376 | /* Connection types */ | ||
377 | #define ETH_CONNECTION_TYPE 0 | ||
378 | #define TOE_CONNECTION_TYPE 1 | ||
379 | #define RDMA_CONNECTION_TYPE 2 | ||
380 | #define ISCSI_CONNECTION_TYPE 3 | ||
381 | #define FCOE_CONNECTION_TYPE 4 | ||
382 | #define RESERVED_CONNECTION_TYPE_0 5 | ||
383 | #define RESERVED_CONNECTION_TYPE_1 6 | ||
384 | #define RESERVED_CONNECTION_TYPE_2 7 | ||
385 | #define NONE_CONNECTION_TYPE 8 | ||
386 | |||
387 | 339 | ||
388 | #define PROTOCOL_STATE_BIT_OFFSET 6 | 340 | #define PROTOCOL_STATE_BIT_OFFSET 6 |
389 | 341 | ||
@@ -391,25 +343,9 @@ | |||
391 | #define TOE_STATE (TOE_CONNECTION_TYPE << PROTOCOL_STATE_BIT_OFFSET) | 343 | #define TOE_STATE (TOE_CONNECTION_TYPE << PROTOCOL_STATE_BIT_OFFSET) |
392 | #define RDMA_STATE (RDMA_CONNECTION_TYPE << PROTOCOL_STATE_BIT_OFFSET) | 344 | #define RDMA_STATE (RDMA_CONNECTION_TYPE << PROTOCOL_STATE_BIT_OFFSET) |
393 | 345 | ||
394 | /* values of command IDs in the ramrod message */ | ||
395 | #define RAMROD_CMD_ID_COMMON_FUNCTION_START 1 | ||
396 | #define RAMROD_CMD_ID_COMMON_FUNCTION_STOP 2 | ||
397 | #define RAMROD_CMD_ID_COMMON_CFC_DEL 3 | ||
398 | #define RAMROD_CMD_ID_COMMON_CFC_DEL_WB 4 | ||
399 | #define RAMROD_CMD_ID_COMMON_SET_MAC 5 | ||
400 | #define RAMROD_CMD_ID_COMMON_STAT_QUERY 6 | ||
401 | #define RAMROD_CMD_ID_COMMON_STOP_TRAFFIC 7 | ||
402 | #define RAMROD_CMD_ID_COMMON_START_TRAFFIC 8 | ||
403 | |||
404 | /* microcode fixed page page size 4K (chains and ring segments) */ | 346 | /* microcode fixed page page size 4K (chains and ring segments) */ |
405 | #define MC_PAGE_SIZE 4096 | 347 | #define MC_PAGE_SIZE 4096 |
406 | 348 | ||
407 | |||
408 | /* Host coalescing constants */ | ||
409 | #define HC_IGU_BC_MODE 0 | ||
410 | #define HC_IGU_NBC_MODE 1 | ||
411 | /* Host coalescing constants. E1 includes E1H as well */ | ||
412 | |||
413 | /* Number of indices per slow-path SB */ | 349 | /* Number of indices per slow-path SB */ |
414 | #define HC_SP_SB_MAX_INDICES 16 | 350 | #define HC_SP_SB_MAX_INDICES 16 |
415 | 351 | ||
@@ -418,30 +354,17 @@ | |||
418 | #define HC_SB_MAX_INDICES_E2 8 | 354 | #define HC_SB_MAX_INDICES_E2 8 |
419 | 355 | ||
420 | #define HC_SB_MAX_SB_E1X 32 | 356 | #define HC_SB_MAX_SB_E1X 32 |
421 | #define HC_SB_MAX_SB_E2 136 | 357 | #define HC_SB_MAX_SB_E2 136 |
422 | 358 | ||
423 | #define HC_SP_SB_ID 0xde | 359 | #define HC_SP_SB_ID 0xde |
424 | 360 | ||
425 | #define HC_REGULAR_SEGMENT 0 | ||
426 | #define HC_DEFAULT_SEGMENT 1 | ||
427 | #define HC_SB_MAX_SM 2 | 361 | #define HC_SB_MAX_SM 2 |
428 | 362 | ||
429 | #define HC_SB_MAX_DYNAMIC_INDICES 4 | 363 | #define HC_SB_MAX_DYNAMIC_INDICES 4 |
430 | #define HC_FUNCTION_DISABLED 0xff | ||
431 | /* used by the driver to get the SB offset */ | ||
432 | #define USTORM_ID 0 | ||
433 | #define CSTORM_ID 1 | ||
434 | #define XSTORM_ID 2 | ||
435 | #define TSTORM_ID 3 | ||
436 | #define ATTENTION_ID 4 | ||
437 | 364 | ||
438 | /* max number of slow path commands per port */ | 365 | /* max number of slow path commands per port */ |
439 | #define MAX_RAMRODS_PER_PORT 8 | 366 | #define MAX_RAMRODS_PER_PORT 8 |
440 | 367 | ||
441 | /* values for RX ETH CQE type field */ | ||
442 | #define RX_ETH_CQE_TYPE_ETH_FASTPATH 0 | ||
443 | #define RX_ETH_CQE_TYPE_ETH_RAMROD 1 | ||
444 | |||
445 | 368 | ||
446 | /**** DEFINES FOR TIMERS/CLOCKS RESOLUTIONS ****/ | 369 | /**** DEFINES FOR TIMERS/CLOCKS RESOLUTIONS ****/ |
447 | 370 | ||
@@ -451,7 +374,7 @@ | |||
451 | 374 | ||
452 | #define XSEMI_CLK1_RESUL_CHIP (1e-3) | 375 | #define XSEMI_CLK1_RESUL_CHIP (1e-3) |
453 | 376 | ||
454 | #define SDM_TIMER_TICK_RESUL_CHIP (4*(1e-6)) | 377 | #define SDM_TIMER_TICK_RESUL_CHIP (4 * (1e-6)) |
455 | 378 | ||
456 | /**** END DEFINES FOR TIMERS/CLOCKS RESOLUTIONS ****/ | 379 | /**** END DEFINES FOR TIMERS/CLOCKS RESOLUTIONS ****/ |
457 | 380 | ||
@@ -460,72 +383,28 @@ | |||
460 | 383 | ||
461 | #define FW_LOG_LIST_SIZE 50 | 384 | #define FW_LOG_LIST_SIZE 50 |
462 | 385 | ||
463 | #define NUM_OF_PROTOCOLS 4 | ||
464 | #define NUM_OF_SAFC_BITS 16 | 386 | #define NUM_OF_SAFC_BITS 16 |
465 | #define MAX_COS_NUMBER 4 | 387 | #define MAX_COS_NUMBER 4 |
466 | 388 | #define MAX_TRAFFIC_TYPES 8 | |
467 | #define FAIRNESS_COS_WRR_MODE 0 | ||
468 | #define FAIRNESS_COS_ETS_MODE 1 | ||
469 | |||
470 | |||
471 | /* Priority Flow Control (PFC) */ | ||
472 | #define MAX_PFC_PRIORITIES 8 | 389 | #define MAX_PFC_PRIORITIES 8 |
473 | #define MAX_PFC_TRAFFIC_TYPES 8 | ||
474 | |||
475 | /* Available Traffic Types for Link Layer Flow Control */ | ||
476 | #define LLFC_TRAFFIC_TYPE_NW 0 | ||
477 | #define LLFC_TRAFFIC_TYPE_FCOE 1 | ||
478 | #define LLFC_TRAFFIC_TYPE_ISCSI 2 | ||
479 | /***************** START OF E2 INTEGRATION \ | ||
480 | CODE***************************************/ | ||
481 | #define LLFC_TRAFFIC_TYPE_NW_COS1_E2INTEG 3 | ||
482 | /***************** END OF E2 INTEGRATION \ | ||
483 | CODE***************************************/ | ||
484 | #define LLFC_TRAFFIC_TYPE_MAX 4 | ||
485 | 390 | ||
486 | /* used by array traffic_type_to_priority[] to mark traffic type \ | 391 | /* used by array traffic_type_to_priority[] to mark traffic type \ |
487 | that is not mapped to priority*/ | 392 | that is not mapped to priority*/ |
488 | #define LLFC_TRAFFIC_TYPE_TO_PRIORITY_UNMAPPED 0xFF | 393 | #define LLFC_TRAFFIC_TYPE_TO_PRIORITY_UNMAPPED 0xFF |
489 | 394 | ||
490 | #define LLFC_MODE_NONE 0 | ||
491 | #define LLFC_MODE_PFC 1 | ||
492 | #define LLFC_MODE_SAFC 2 | ||
493 | |||
494 | #define DCB_DISABLED 0 | ||
495 | #define DCB_ENABLED 1 | ||
496 | 395 | ||
497 | #define UNKNOWN_ADDRESS 0 | 396 | #define C_ERES_PER_PAGE \ |
498 | #define UNICAST_ADDRESS 1 | 397 | (PAGE_SIZE / BITS_TO_BYTES(STRUCT_SIZE(event_ring_elem))) |
499 | #define MULTICAST_ADDRESS 2 | 398 | #define C_ERE_PER_PAGE_MASK (C_ERES_PER_PAGE - 1) |
500 | #define BROADCAST_ADDRESS 3 | ||
501 | 399 | ||
502 | #define SINGLE_FUNCTION 0 | 400 | #define STATS_QUERY_CMD_COUNT 16 |
503 | #define MULTI_FUNCTION_SD 1 | ||
504 | #define MULTI_FUNCTION_SI 2 | ||
505 | 401 | ||
506 | #define IP_V4 0 | 402 | #define NIV_LIST_TABLE_SIZE 4096 |
507 | #define IP_V6 1 | ||
508 | 403 | ||
404 | #define INVALID_VNIC_ID 0xFF | ||
509 | 405 | ||
510 | #define C_ERES_PER_PAGE \ | ||
511 | (PAGE_SIZE / BITS_TO_BYTES(STRUCT_SIZE(event_ring_elem))) | ||
512 | #define C_ERE_PER_PAGE_MASK (C_ERES_PER_PAGE - 1) | ||
513 | 406 | ||
514 | #define EVENT_RING_OPCODE_VF_PF_CHANNEL 0 | 407 | #define UNDEF_IRO 0x80000000 |
515 | #define EVENT_RING_OPCODE_FUNCTION_START 1 | ||
516 | #define EVENT_RING_OPCODE_FUNCTION_STOP 2 | ||
517 | #define EVENT_RING_OPCODE_CFC_DEL 3 | ||
518 | #define EVENT_RING_OPCODE_CFC_DEL_WB 4 | ||
519 | #define EVENT_RING_OPCODE_SET_MAC 5 | ||
520 | #define EVENT_RING_OPCODE_STAT_QUERY 6 | ||
521 | #define EVENT_RING_OPCODE_STOP_TRAFFIC 7 | ||
522 | #define EVENT_RING_OPCODE_START_TRAFFIC 8 | ||
523 | #define EVENT_RING_OPCODE_FORWARD_SETUP 9 | ||
524 | |||
525 | #define VF_PF_CHANNEL_STATE_READY 0 | ||
526 | #define VF_PF_CHANNEL_STATE_WAITING_FOR_ACK 1 | ||
527 | |||
528 | #define VF_PF_CHANNEL_STATE_MAX_NUMBER 2 | ||
529 | 408 | ||
530 | 409 | ||
531 | #endif /* BNX2X_FW_DEFS_H */ | 410 | #endif /* BNX2X_FW_DEFS_H */ |
diff --git a/drivers/net/bnx2x/bnx2x_hsi.h b/drivers/net/bnx2x/bnx2x_hsi.h index b8b4b2f0b60a..df5355818c30 100644 --- a/drivers/net/bnx2x/bnx2x_hsi.h +++ b/drivers/net/bnx2x/bnx2x_hsi.h | |||
@@ -11,7 +11,7 @@ | |||
11 | 11 | ||
12 | #include "bnx2x_fw_defs.h" | 12 | #include "bnx2x_fw_defs.h" |
13 | 13 | ||
14 | #define FW_ENCODE_32BIT_PATTERN 0x1e1e1e1e | 14 | #define FW_ENCODE_32BIT_PATTERN 0x1e1e1e1e |
15 | 15 | ||
16 | struct license_key { | 16 | struct license_key { |
17 | u32 reserved[6]; | 17 | u32 reserved[6]; |
@@ -33,201 +33,366 @@ struct license_key { | |||
33 | u32 reserved_b[4]; | 33 | u32 reserved_b[4]; |
34 | }; | 34 | }; |
35 | 35 | ||
36 | #define PORT_0 0 | 36 | |
37 | #define PORT_1 1 | 37 | #define PORT_0 0 |
38 | #define PORT_MAX 2 | 38 | #define PORT_1 1 |
39 | #define PORT_MAX 2 | ||
39 | 40 | ||
40 | /**************************************************************************** | 41 | /**************************************************************************** |
41 | * Shared HW configuration * | 42 | * Shared HW configuration * |
42 | ****************************************************************************/ | 43 | ****************************************************************************/ |
43 | struct shared_hw_cfg { /* NVRAM Offset */ | 44 | #define PIN_CFG_NA 0x00000000 |
45 | #define PIN_CFG_GPIO0_P0 0x00000001 | ||
46 | #define PIN_CFG_GPIO1_P0 0x00000002 | ||
47 | #define PIN_CFG_GPIO2_P0 0x00000003 | ||
48 | #define PIN_CFG_GPIO3_P0 0x00000004 | ||
49 | #define PIN_CFG_GPIO0_P1 0x00000005 | ||
50 | #define PIN_CFG_GPIO1_P1 0x00000006 | ||
51 | #define PIN_CFG_GPIO2_P1 0x00000007 | ||
52 | #define PIN_CFG_GPIO3_P1 0x00000008 | ||
53 | #define PIN_CFG_EPIO0 0x00000009 | ||
54 | #define PIN_CFG_EPIO1 0x0000000a | ||
55 | #define PIN_CFG_EPIO2 0x0000000b | ||
56 | #define PIN_CFG_EPIO3 0x0000000c | ||
57 | #define PIN_CFG_EPIO4 0x0000000d | ||
58 | #define PIN_CFG_EPIO5 0x0000000e | ||
59 | #define PIN_CFG_EPIO6 0x0000000f | ||
60 | #define PIN_CFG_EPIO7 0x00000010 | ||
61 | #define PIN_CFG_EPIO8 0x00000011 | ||
62 | #define PIN_CFG_EPIO9 0x00000012 | ||
63 | #define PIN_CFG_EPIO10 0x00000013 | ||
64 | #define PIN_CFG_EPIO11 0x00000014 | ||
65 | #define PIN_CFG_EPIO12 0x00000015 | ||
66 | #define PIN_CFG_EPIO13 0x00000016 | ||
67 | #define PIN_CFG_EPIO14 0x00000017 | ||
68 | #define PIN_CFG_EPIO15 0x00000018 | ||
69 | #define PIN_CFG_EPIO16 0x00000019 | ||
70 | #define PIN_CFG_EPIO17 0x0000001a | ||
71 | #define PIN_CFG_EPIO18 0x0000001b | ||
72 | #define PIN_CFG_EPIO19 0x0000001c | ||
73 | #define PIN_CFG_EPIO20 0x0000001d | ||
74 | #define PIN_CFG_EPIO21 0x0000001e | ||
75 | #define PIN_CFG_EPIO22 0x0000001f | ||
76 | #define PIN_CFG_EPIO23 0x00000020 | ||
77 | #define PIN_CFG_EPIO24 0x00000021 | ||
78 | #define PIN_CFG_EPIO25 0x00000022 | ||
79 | #define PIN_CFG_EPIO26 0x00000023 | ||
80 | #define PIN_CFG_EPIO27 0x00000024 | ||
81 | #define PIN_CFG_EPIO28 0x00000025 | ||
82 | #define PIN_CFG_EPIO29 0x00000026 | ||
83 | #define PIN_CFG_EPIO30 0x00000027 | ||
84 | #define PIN_CFG_EPIO31 0x00000028 | ||
85 | |||
86 | /* EPIO definition */ | ||
87 | #define EPIO_CFG_NA 0x00000000 | ||
88 | #define EPIO_CFG_EPIO0 0x00000001 | ||
89 | #define EPIO_CFG_EPIO1 0x00000002 | ||
90 | #define EPIO_CFG_EPIO2 0x00000003 | ||
91 | #define EPIO_CFG_EPIO3 0x00000004 | ||
92 | #define EPIO_CFG_EPIO4 0x00000005 | ||
93 | #define EPIO_CFG_EPIO5 0x00000006 | ||
94 | #define EPIO_CFG_EPIO6 0x00000007 | ||
95 | #define EPIO_CFG_EPIO7 0x00000008 | ||
96 | #define EPIO_CFG_EPIO8 0x00000009 | ||
97 | #define EPIO_CFG_EPIO9 0x0000000a | ||
98 | #define EPIO_CFG_EPIO10 0x0000000b | ||
99 | #define EPIO_CFG_EPIO11 0x0000000c | ||
100 | #define EPIO_CFG_EPIO12 0x0000000d | ||
101 | #define EPIO_CFG_EPIO13 0x0000000e | ||
102 | #define EPIO_CFG_EPIO14 0x0000000f | ||
103 | #define EPIO_CFG_EPIO15 0x00000010 | ||
104 | #define EPIO_CFG_EPIO16 0x00000011 | ||
105 | #define EPIO_CFG_EPIO17 0x00000012 | ||
106 | #define EPIO_CFG_EPIO18 0x00000013 | ||
107 | #define EPIO_CFG_EPIO19 0x00000014 | ||
108 | #define EPIO_CFG_EPIO20 0x00000015 | ||
109 | #define EPIO_CFG_EPIO21 0x00000016 | ||
110 | #define EPIO_CFG_EPIO22 0x00000017 | ||
111 | #define EPIO_CFG_EPIO23 0x00000018 | ||
112 | #define EPIO_CFG_EPIO24 0x00000019 | ||
113 | #define EPIO_CFG_EPIO25 0x0000001a | ||
114 | #define EPIO_CFG_EPIO26 0x0000001b | ||
115 | #define EPIO_CFG_EPIO27 0x0000001c | ||
116 | #define EPIO_CFG_EPIO28 0x0000001d | ||
117 | #define EPIO_CFG_EPIO29 0x0000001e | ||
118 | #define EPIO_CFG_EPIO30 0x0000001f | ||
119 | #define EPIO_CFG_EPIO31 0x00000020 | ||
120 | |||
121 | |||
122 | struct shared_hw_cfg { /* NVRAM Offset */ | ||
44 | /* Up to 16 bytes of NULL-terminated string */ | 123 | /* Up to 16 bytes of NULL-terminated string */ |
45 | u8 part_num[16]; /* 0x104 */ | 124 | u8 part_num[16]; /* 0x104 */ |
125 | |||
126 | u32 config; /* 0x114 */ | ||
127 | #define SHARED_HW_CFG_MDIO_VOLTAGE_MASK 0x00000001 | ||
128 | #define SHARED_HW_CFG_MDIO_VOLTAGE_SHIFT 0 | ||
129 | #define SHARED_HW_CFG_MDIO_VOLTAGE_1_2V 0x00000000 | ||
130 | #define SHARED_HW_CFG_MDIO_VOLTAGE_2_5V 0x00000001 | ||
131 | #define SHARED_HW_CFG_MCP_RST_ON_CORE_RST_EN 0x00000002 | ||
46 | 132 | ||
47 | u32 config; /* 0x114 */ | 133 | #define SHARED_HW_CFG_PORT_SWAP 0x00000004 |
48 | #define SHARED_HW_CFG_MDIO_VOLTAGE_MASK 0x00000001 | ||
49 | #define SHARED_HW_CFG_MDIO_VOLTAGE_SHIFT 0 | ||
50 | #define SHARED_HW_CFG_MDIO_VOLTAGE_1_2V 0x00000000 | ||
51 | #define SHARED_HW_CFG_MDIO_VOLTAGE_2_5V 0x00000001 | ||
52 | #define SHARED_HW_CFG_MCP_RST_ON_CORE_RST_EN 0x00000002 | ||
53 | 134 | ||
54 | #define SHARED_HW_CFG_PORT_SWAP 0x00000004 | 135 | #define SHARED_HW_CFG_BEACON_WOL_EN 0x00000008 |
55 | 136 | ||
56 | #define SHARED_HW_CFG_BEACON_WOL_EN 0x00000008 | 137 | #define SHARED_HW_CFG_PCIE_GEN3_DISABLED 0x00000000 |
138 | #define SHARED_HW_CFG_PCIE_GEN3_ENABLED 0x00000010 | ||
57 | 139 | ||
58 | #define SHARED_HW_CFG_MFW_SELECT_MASK 0x00000700 | 140 | #define SHARED_HW_CFG_MFW_SELECT_MASK 0x00000700 |
59 | #define SHARED_HW_CFG_MFW_SELECT_SHIFT 8 | 141 | #define SHARED_HW_CFG_MFW_SELECT_SHIFT 8 |
60 | /* Whatever MFW found in NVM | 142 | /* Whatever MFW found in NVM |
61 | (if multiple found, priority order is: NC-SI, UMP, IPMI) */ | 143 | (if multiple found, priority order is: NC-SI, UMP, IPMI) */ |
62 | #define SHARED_HW_CFG_MFW_SELECT_DEFAULT 0x00000000 | 144 | #define SHARED_HW_CFG_MFW_SELECT_DEFAULT 0x00000000 |
63 | #define SHARED_HW_CFG_MFW_SELECT_NC_SI 0x00000100 | 145 | #define SHARED_HW_CFG_MFW_SELECT_NC_SI 0x00000100 |
64 | #define SHARED_HW_CFG_MFW_SELECT_UMP 0x00000200 | 146 | #define SHARED_HW_CFG_MFW_SELECT_UMP 0x00000200 |
65 | #define SHARED_HW_CFG_MFW_SELECT_IPMI 0x00000300 | 147 | #define SHARED_HW_CFG_MFW_SELECT_IPMI 0x00000300 |
66 | /* Use SPIO4 as an arbiter between: 0-NC_SI, 1-IPMI | 148 | /* Use SPIO4 as an arbiter between: 0-NC_SI, 1-IPMI |
67 | (can only be used when an add-in board, not BMC, pulls-down SPIO4) */ | 149 | (can only be used when an add-in board, not BMC, pulls-down SPIO4) */ |
68 | #define SHARED_HW_CFG_MFW_SELECT_SPIO4_NC_SI_IPMI 0x00000400 | 150 | #define SHARED_HW_CFG_MFW_SELECT_SPIO4_NC_SI_IPMI 0x00000400 |
69 | /* Use SPIO4 as an arbiter between: 0-UMP, 1-IPMI | 151 | /* Use SPIO4 as an arbiter between: 0-UMP, 1-IPMI |
70 | (can only be used when an add-in board, not BMC, pulls-down SPIO4) */ | 152 | (can only be used when an add-in board, not BMC, pulls-down SPIO4) */ |
71 | #define SHARED_HW_CFG_MFW_SELECT_SPIO4_UMP_IPMI 0x00000500 | 153 | #define SHARED_HW_CFG_MFW_SELECT_SPIO4_UMP_IPMI 0x00000500 |
72 | /* Use SPIO4 as an arbiter between: 0-NC-SI, 1-UMP | 154 | /* Use SPIO4 as an arbiter between: 0-NC-SI, 1-UMP |
73 | (can only be used when an add-in board, not BMC, pulls-down SPIO4) */ | 155 | (can only be used when an add-in board, not BMC, pulls-down SPIO4) */ |
74 | #define SHARED_HW_CFG_MFW_SELECT_SPIO4_NC_SI_UMP 0x00000600 | 156 | #define SHARED_HW_CFG_MFW_SELECT_SPIO4_NC_SI_UMP 0x00000600 |
75 | 157 | ||
76 | #define SHARED_HW_CFG_LED_MODE_MASK 0x000f0000 | 158 | #define SHARED_HW_CFG_LED_MODE_MASK 0x000f0000 |
77 | #define SHARED_HW_CFG_LED_MODE_SHIFT 16 | 159 | #define SHARED_HW_CFG_LED_MODE_SHIFT 16 |
78 | #define SHARED_HW_CFG_LED_MAC1 0x00000000 | 160 | #define SHARED_HW_CFG_LED_MAC1 0x00000000 |
79 | #define SHARED_HW_CFG_LED_PHY1 0x00010000 | 161 | #define SHARED_HW_CFG_LED_PHY1 0x00010000 |
80 | #define SHARED_HW_CFG_LED_PHY2 0x00020000 | 162 | #define SHARED_HW_CFG_LED_PHY2 0x00020000 |
81 | #define SHARED_HW_CFG_LED_PHY3 0x00030000 | 163 | #define SHARED_HW_CFG_LED_PHY3 0x00030000 |
82 | #define SHARED_HW_CFG_LED_MAC2 0x00040000 | 164 | #define SHARED_HW_CFG_LED_MAC2 0x00040000 |
83 | #define SHARED_HW_CFG_LED_PHY4 0x00050000 | 165 | #define SHARED_HW_CFG_LED_PHY4 0x00050000 |
84 | #define SHARED_HW_CFG_LED_PHY5 0x00060000 | 166 | #define SHARED_HW_CFG_LED_PHY5 0x00060000 |
85 | #define SHARED_HW_CFG_LED_PHY6 0x00070000 | 167 | #define SHARED_HW_CFG_LED_PHY6 0x00070000 |
86 | #define SHARED_HW_CFG_LED_MAC3 0x00080000 | 168 | #define SHARED_HW_CFG_LED_MAC3 0x00080000 |
87 | #define SHARED_HW_CFG_LED_PHY7 0x00090000 | 169 | #define SHARED_HW_CFG_LED_PHY7 0x00090000 |
88 | #define SHARED_HW_CFG_LED_PHY9 0x000a0000 | 170 | #define SHARED_HW_CFG_LED_PHY9 0x000a0000 |
89 | #define SHARED_HW_CFG_LED_PHY11 0x000b0000 | 171 | #define SHARED_HW_CFG_LED_PHY11 0x000b0000 |
90 | #define SHARED_HW_CFG_LED_MAC4 0x000c0000 | 172 | #define SHARED_HW_CFG_LED_MAC4 0x000c0000 |
91 | #define SHARED_HW_CFG_LED_PHY8 0x000d0000 | 173 | #define SHARED_HW_CFG_LED_PHY8 0x000d0000 |
92 | #define SHARED_HW_CFG_LED_EXTPHY1 0x000e0000 | 174 | #define SHARED_HW_CFG_LED_EXTPHY1 0x000e0000 |
93 | 175 | ||
94 | 176 | ||
95 | #define SHARED_HW_CFG_AN_ENABLE_MASK 0x3f000000 | 177 | #define SHARED_HW_CFG_AN_ENABLE_MASK 0x3f000000 |
96 | #define SHARED_HW_CFG_AN_ENABLE_SHIFT 24 | 178 | #define SHARED_HW_CFG_AN_ENABLE_SHIFT 24 |
97 | #define SHARED_HW_CFG_AN_ENABLE_CL37 0x01000000 | 179 | #define SHARED_HW_CFG_AN_ENABLE_CL37 0x01000000 |
98 | #define SHARED_HW_CFG_AN_ENABLE_CL73 0x02000000 | 180 | #define SHARED_HW_CFG_AN_ENABLE_CL73 0x02000000 |
99 | #define SHARED_HW_CFG_AN_ENABLE_BAM 0x04000000 | 181 | #define SHARED_HW_CFG_AN_ENABLE_BAM 0x04000000 |
100 | #define SHARED_HW_CFG_AN_ENABLE_PARALLEL_DETECTION 0x08000000 | 182 | #define SHARED_HW_CFG_AN_ENABLE_PARALLEL_DETECTION 0x08000000 |
101 | #define SHARED_HW_CFG_AN_EN_SGMII_FIBER_AUTO_DETECT 0x10000000 | 183 | #define SHARED_HW_CFG_AN_EN_SGMII_FIBER_AUTO_DETECT 0x10000000 |
102 | #define SHARED_HW_CFG_AN_ENABLE_REMOTE_PHY 0x20000000 | 184 | #define SHARED_HW_CFG_AN_ENABLE_REMOTE_PHY 0x20000000 |
103 | 185 | ||
104 | u32 config2; /* 0x118 */ | 186 | #define SHARED_HW_CFG_SRIOV_MASK 0x40000000 |
187 | #define SHARED_HW_CFG_SRIOV_DISABLED 0x00000000 | ||
188 | #define SHARED_HW_CFG_SRIOV_ENABLED 0x40000000 | ||
189 | |||
190 | #define SHARED_HW_CFG_ATC_MASK 0x80000000 | ||
191 | #define SHARED_HW_CFG_ATC_DISABLED 0x00000000 | ||
192 | #define SHARED_HW_CFG_ATC_ENABLED 0x80000000 | ||
193 | |||
194 | u32 config2; /* 0x118 */ | ||
105 | /* one time auto detect grace period (in sec) */ | 195 | /* one time auto detect grace period (in sec) */ |
106 | #define SHARED_HW_CFG_GRACE_PERIOD_MASK 0x000000ff | 196 | #define SHARED_HW_CFG_GRACE_PERIOD_MASK 0x000000ff |
107 | #define SHARED_HW_CFG_GRACE_PERIOD_SHIFT 0 | 197 | #define SHARED_HW_CFG_GRACE_PERIOD_SHIFT 0 |
108 | 198 | ||
109 | #define SHARED_HW_CFG_PCIE_GEN2_ENABLED 0x00000100 | 199 | #define SHARED_HW_CFG_PCIE_GEN2_ENABLED 0x00000100 |
200 | #define SHARED_HW_CFG_PCIE_GEN2_DISABLED 0x00000000 | ||
110 | 201 | ||
111 | /* The default value for the core clock is 250MHz and it is | 202 | /* The default value for the core clock is 250MHz and it is |
112 | achieved by setting the clock change to 4 */ | 203 | achieved by setting the clock change to 4 */ |
113 | #define SHARED_HW_CFG_CLOCK_CHANGE_MASK 0x00000e00 | 204 | #define SHARED_HW_CFG_CLOCK_CHANGE_MASK 0x00000e00 |
114 | #define SHARED_HW_CFG_CLOCK_CHANGE_SHIFT 9 | 205 | #define SHARED_HW_CFG_CLOCK_CHANGE_SHIFT 9 |
115 | 206 | ||
116 | #define SHARED_HW_CFG_SMBUS_TIMING_100KHZ 0x00000000 | 207 | #define SHARED_HW_CFG_SMBUS_TIMING_MASK 0x00001000 |
117 | #define SHARED_HW_CFG_SMBUS_TIMING_400KHZ 0x00001000 | 208 | #define SHARED_HW_CFG_SMBUS_TIMING_100KHZ 0x00000000 |
209 | #define SHARED_HW_CFG_SMBUS_TIMING_400KHZ 0x00001000 | ||
118 | 210 | ||
119 | #define SHARED_HW_CFG_HIDE_PORT1 0x00002000 | 211 | #define SHARED_HW_CFG_HIDE_PORT1 0x00002000 |
120 | 212 | ||
121 | /* The fan failure mechanism is usually related to the PHY type | 213 | #define SHARED_HW_CFG_WOL_CAPABLE_MASK 0x00004000 |
122 | since the power consumption of the board is determined by the PHY. | 214 | #define SHARED_HW_CFG_WOL_CAPABLE_DISABLED 0x00000000 |
123 | Currently, fan is required for most designs with SFX7101, BCM8727 | 215 | #define SHARED_HW_CFG_WOL_CAPABLE_ENABLED 0x00004000 |
124 | and BCM8481. If a fan is not required for a board which uses one | ||
125 | of those PHYs, this field should be set to "Disabled". If a fan is | ||
126 | required for a different PHY type, this option should be set to | ||
127 | "Enabled". | ||
128 | The fan failure indication is expected on | ||
129 | SPIO5 */ | ||
130 | #define SHARED_HW_CFG_FAN_FAILURE_MASK 0x00180000 | ||
131 | #define SHARED_HW_CFG_FAN_FAILURE_SHIFT 19 | ||
132 | #define SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE 0x00000000 | ||
133 | #define SHARED_HW_CFG_FAN_FAILURE_DISABLED 0x00080000 | ||
134 | #define SHARED_HW_CFG_FAN_FAILURE_ENABLED 0x00100000 | ||
135 | |||
136 | /* Set the MDC/MDIO access for the first external phy */ | ||
137 | #define SHARED_HW_CFG_MDC_MDIO_ACCESS1_MASK 0x1C000000 | ||
138 | #define SHARED_HW_CFG_MDC_MDIO_ACCESS1_SHIFT 26 | ||
139 | #define SHARED_HW_CFG_MDC_MDIO_ACCESS1_PHY_TYPE 0x00000000 | ||
140 | #define SHARED_HW_CFG_MDC_MDIO_ACCESS1_EMAC0 0x04000000 | ||
141 | #define SHARED_HW_CFG_MDC_MDIO_ACCESS1_EMAC1 0x08000000 | ||
142 | #define SHARED_HW_CFG_MDC_MDIO_ACCESS1_BOTH 0x0c000000 | ||
143 | #define SHARED_HW_CFG_MDC_MDIO_ACCESS1_SWAPPED 0x10000000 | ||
144 | |||
145 | /* Set the MDC/MDIO access for the second external phy */ | ||
146 | #define SHARED_HW_CFG_MDC_MDIO_ACCESS2_MASK 0xE0000000 | ||
147 | #define SHARED_HW_CFG_MDC_MDIO_ACCESS2_SHIFT 29 | ||
148 | #define SHARED_HW_CFG_MDC_MDIO_ACCESS2_PHY_TYPE 0x00000000 | ||
149 | #define SHARED_HW_CFG_MDC_MDIO_ACCESS2_EMAC0 0x20000000 | ||
150 | #define SHARED_HW_CFG_MDC_MDIO_ACCESS2_EMAC1 0x40000000 | ||
151 | #define SHARED_HW_CFG_MDC_MDIO_ACCESS2_BOTH 0x60000000 | ||
152 | #define SHARED_HW_CFG_MDC_MDIO_ACCESS2_SWAPPED 0x80000000 | ||
153 | u32 power_dissipated; /* 0x11c */ | ||
154 | #define SHARED_HW_CFG_POWER_DIS_CMN_MASK 0xff000000 | ||
155 | #define SHARED_HW_CFG_POWER_DIS_CMN_SHIFT 24 | ||
156 | |||
157 | #define SHARED_HW_CFG_POWER_MGNT_SCALE_MASK 0x00ff0000 | ||
158 | #define SHARED_HW_CFG_POWER_MGNT_SCALE_SHIFT 16 | ||
159 | #define SHARED_HW_CFG_POWER_MGNT_UNKNOWN_SCALE 0x00000000 | ||
160 | #define SHARED_HW_CFG_POWER_MGNT_DOT_1_WATT 0x00010000 | ||
161 | #define SHARED_HW_CFG_POWER_MGNT_DOT_01_WATT 0x00020000 | ||
162 | #define SHARED_HW_CFG_POWER_MGNT_DOT_001_WATT 0x00030000 | ||
163 | |||
164 | u32 ump_nc_si_config; /* 0x120 */ | ||
165 | #define SHARED_HW_CFG_UMP_NC_SI_MII_MODE_MASK 0x00000003 | ||
166 | #define SHARED_HW_CFG_UMP_NC_SI_MII_MODE_SHIFT 0 | ||
167 | #define SHARED_HW_CFG_UMP_NC_SI_MII_MODE_MAC 0x00000000 | ||
168 | #define SHARED_HW_CFG_UMP_NC_SI_MII_MODE_PHY 0x00000001 | ||
169 | #define SHARED_HW_CFG_UMP_NC_SI_MII_MODE_MII 0x00000000 | ||
170 | #define SHARED_HW_CFG_UMP_NC_SI_MII_MODE_RMII 0x00000002 | ||
171 | |||
172 | #define SHARED_HW_CFG_UMP_NC_SI_NUM_DEVS_MASK 0x00000f00 | ||
173 | #define SHARED_HW_CFG_UMP_NC_SI_NUM_DEVS_SHIFT 8 | ||
174 | |||
175 | #define SHARED_HW_CFG_UMP_NC_SI_EXT_PHY_TYPE_MASK 0x00ff0000 | ||
176 | #define SHARED_HW_CFG_UMP_NC_SI_EXT_PHY_TYPE_SHIFT 16 | ||
177 | #define SHARED_HW_CFG_UMP_NC_SI_EXT_PHY_TYPE_NONE 0x00000000 | ||
178 | #define SHARED_HW_CFG_UMP_NC_SI_EXT_PHY_TYPE_BCM5221 0x00010000 | ||
179 | |||
180 | u32 board; /* 0x124 */ | ||
181 | #define SHARED_HW_CFG_BOARD_REV_MASK 0x00FF0000 | ||
182 | #define SHARED_HW_CFG_BOARD_REV_SHIFT 16 | ||
183 | |||
184 | #define SHARED_HW_CFG_BOARD_MAJOR_VER_MASK 0x0F000000 | ||
185 | #define SHARED_HW_CFG_BOARD_MAJOR_VER_SHIFT 24 | ||
186 | |||
187 | #define SHARED_HW_CFG_BOARD_MINOR_VER_MASK 0xF0000000 | ||
188 | #define SHARED_HW_CFG_BOARD_MINOR_VER_SHIFT 28 | ||
189 | |||
190 | u32 reserved; /* 0x128 */ | ||
191 | 216 | ||
217 | /* Output low when PERST is asserted */ | ||
218 | #define SHARED_HW_CFG_SPIO4_FOLLOW_PERST_MASK 0x00008000 | ||
219 | #define SHARED_HW_CFG_SPIO4_FOLLOW_PERST_DISABLED 0x00000000 | ||
220 | #define SHARED_HW_CFG_SPIO4_FOLLOW_PERST_ENABLED 0x00008000 | ||
221 | |||
222 | #define SHARED_HW_CFG_PCIE_GEN2_PREEMPHASIS_MASK 0x00070000 | ||
223 | #define SHARED_HW_CFG_PCIE_GEN2_PREEMPHASIS_SHIFT 16 | ||
224 | #define SHARED_HW_CFG_PCIE_GEN2_PREEMPHASIS_HW 0x00000000 | ||
225 | #define SHARED_HW_CFG_PCIE_GEN2_PREEMPHASIS_0DB 0x00010000 | ||
226 | #define SHARED_HW_CFG_PCIE_GEN2_PREEMPHASIS_3_5DB 0x00020000 | ||
227 | #define SHARED_HW_CFG_PCIE_GEN2_PREEMPHASIS_6_0DB 0x00030000 | ||
228 | |||
229 | /* The fan failure mechanism is usually related to the PHY type | ||
230 | since the power consumption of the board is determined by the PHY. | ||
231 | Currently, fan is required for most designs with SFX7101, BCM8727 | ||
232 | and BCM8481. If a fan is not required for a board which uses one | ||
233 | of those PHYs, this field should be set to "Disabled". If a fan is | ||
234 | required for a different PHY type, this option should be set to | ||
235 | "Enabled". The fan failure indication is expected on SPIO5 */ | ||
236 | #define SHARED_HW_CFG_FAN_FAILURE_MASK 0x00180000 | ||
237 | #define SHARED_HW_CFG_FAN_FAILURE_SHIFT 19 | ||
238 | #define SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE 0x00000000 | ||
239 | #define SHARED_HW_CFG_FAN_FAILURE_DISABLED 0x00080000 | ||
240 | #define SHARED_HW_CFG_FAN_FAILURE_ENABLED 0x00100000 | ||
241 | |||
242 | /* ASPM Power Management support */ | ||
243 | #define SHARED_HW_CFG_ASPM_SUPPORT_MASK 0x00600000 | ||
244 | #define SHARED_HW_CFG_ASPM_SUPPORT_SHIFT 21 | ||
245 | #define SHARED_HW_CFG_ASPM_SUPPORT_L0S_L1_ENABLED 0x00000000 | ||
246 | #define SHARED_HW_CFG_ASPM_SUPPORT_L0S_DISABLED 0x00200000 | ||
247 | #define SHARED_HW_CFG_ASPM_SUPPORT_L1_DISABLED 0x00400000 | ||
248 | #define SHARED_HW_CFG_ASPM_SUPPORT_L0S_L1_DISABLED 0x00600000 | ||
249 | |||
250 | /* The value of PM_TL_IGNORE_REQS (bit0) in PCI register | ||
251 | tl_control_0 (register 0x2800) */ | ||
252 | #define SHARED_HW_CFG_PREVENT_L1_ENTRY_MASK 0x00800000 | ||
253 | #define SHARED_HW_CFG_PREVENT_L1_ENTRY_DISABLED 0x00000000 | ||
254 | #define SHARED_HW_CFG_PREVENT_L1_ENTRY_ENABLED 0x00800000 | ||
255 | |||
256 | #define SHARED_HW_CFG_PORT_MODE_MASK 0x01000000 | ||
257 | #define SHARED_HW_CFG_PORT_MODE_2 0x00000000 | ||
258 | #define SHARED_HW_CFG_PORT_MODE_4 0x01000000 | ||
259 | |||
260 | #define SHARED_HW_CFG_PATH_SWAP_MASK 0x02000000 | ||
261 | #define SHARED_HW_CFG_PATH_SWAP_DISABLED 0x00000000 | ||
262 | #define SHARED_HW_CFG_PATH_SWAP_ENABLED 0x02000000 | ||
263 | |||
264 | /* Set the MDC/MDIO access for the first external phy */ | ||
265 | #define SHARED_HW_CFG_MDC_MDIO_ACCESS1_MASK 0x1C000000 | ||
266 | #define SHARED_HW_CFG_MDC_MDIO_ACCESS1_SHIFT 26 | ||
267 | #define SHARED_HW_CFG_MDC_MDIO_ACCESS1_PHY_TYPE 0x00000000 | ||
268 | #define SHARED_HW_CFG_MDC_MDIO_ACCESS1_EMAC0 0x04000000 | ||
269 | #define SHARED_HW_CFG_MDC_MDIO_ACCESS1_EMAC1 0x08000000 | ||
270 | #define SHARED_HW_CFG_MDC_MDIO_ACCESS1_BOTH 0x0c000000 | ||
271 | #define SHARED_HW_CFG_MDC_MDIO_ACCESS1_SWAPPED 0x10000000 | ||
272 | |||
273 | /* Set the MDC/MDIO access for the second external phy */ | ||
274 | #define SHARED_HW_CFG_MDC_MDIO_ACCESS2_MASK 0xE0000000 | ||
275 | #define SHARED_HW_CFG_MDC_MDIO_ACCESS2_SHIFT 29 | ||
276 | #define SHARED_HW_CFG_MDC_MDIO_ACCESS2_PHY_TYPE 0x00000000 | ||
277 | #define SHARED_HW_CFG_MDC_MDIO_ACCESS2_EMAC0 0x20000000 | ||
278 | #define SHARED_HW_CFG_MDC_MDIO_ACCESS2_EMAC1 0x40000000 | ||
279 | #define SHARED_HW_CFG_MDC_MDIO_ACCESS2_BOTH 0x60000000 | ||
280 | #define SHARED_HW_CFG_MDC_MDIO_ACCESS2_SWAPPED 0x80000000 | ||
281 | |||
282 | |||
283 | u32 power_dissipated; /* 0x11c */ | ||
284 | #define SHARED_HW_CFG_POWER_MGNT_SCALE_MASK 0x00ff0000 | ||
285 | #define SHARED_HW_CFG_POWER_MGNT_SCALE_SHIFT 16 | ||
286 | #define SHARED_HW_CFG_POWER_MGNT_UNKNOWN_SCALE 0x00000000 | ||
287 | #define SHARED_HW_CFG_POWER_MGNT_DOT_1_WATT 0x00010000 | ||
288 | #define SHARED_HW_CFG_POWER_MGNT_DOT_01_WATT 0x00020000 | ||
289 | #define SHARED_HW_CFG_POWER_MGNT_DOT_001_WATT 0x00030000 | ||
290 | |||
291 | #define SHARED_HW_CFG_POWER_DIS_CMN_MASK 0xff000000 | ||
292 | #define SHARED_HW_CFG_POWER_DIS_CMN_SHIFT 24 | ||
293 | |||
294 | u32 ump_nc_si_config; /* 0x120 */ | ||
295 | #define SHARED_HW_CFG_UMP_NC_SI_MII_MODE_MASK 0x00000003 | ||
296 | #define SHARED_HW_CFG_UMP_NC_SI_MII_MODE_SHIFT 0 | ||
297 | #define SHARED_HW_CFG_UMP_NC_SI_MII_MODE_MAC 0x00000000 | ||
298 | #define SHARED_HW_CFG_UMP_NC_SI_MII_MODE_PHY 0x00000001 | ||
299 | #define SHARED_HW_CFG_UMP_NC_SI_MII_MODE_MII 0x00000000 | ||
300 | #define SHARED_HW_CFG_UMP_NC_SI_MII_MODE_RMII 0x00000002 | ||
301 | |||
302 | #define SHARED_HW_CFG_UMP_NC_SI_NUM_DEVS_MASK 0x00000f00 | ||
303 | #define SHARED_HW_CFG_UMP_NC_SI_NUM_DEVS_SHIFT 8 | ||
304 | |||
305 | #define SHARED_HW_CFG_UMP_NC_SI_EXT_PHY_TYPE_MASK 0x00ff0000 | ||
306 | #define SHARED_HW_CFG_UMP_NC_SI_EXT_PHY_TYPE_SHIFT 16 | ||
307 | #define SHARED_HW_CFG_UMP_NC_SI_EXT_PHY_TYPE_NONE 0x00000000 | ||
308 | #define SHARED_HW_CFG_UMP_NC_SI_EXT_PHY_TYPE_BCM5221 0x00010000 | ||
309 | |||
310 | u32 board; /* 0x124 */ | ||
311 | #define SHARED_HW_CFG_E3_I2C_MUX0_MASK 0x0000003F | ||
312 | #define SHARED_HW_CFG_E3_I2C_MUX0_SHIFT 0 | ||
313 | #define SHARED_HW_CFG_E3_I2C_MUX1_MASK 0x00000FC0 | ||
314 | #define SHARED_HW_CFG_E3_I2C_MUX1_SHIFT 6 | ||
315 | /* Use the PIN_CFG_XXX defines on top */ | ||
316 | #define SHARED_HW_CFG_BOARD_REV_MASK 0x00ff0000 | ||
317 | #define SHARED_HW_CFG_BOARD_REV_SHIFT 16 | ||
318 | |||
319 | #define SHARED_HW_CFG_BOARD_MAJOR_VER_MASK 0x0f000000 | ||
320 | #define SHARED_HW_CFG_BOARD_MAJOR_VER_SHIFT 24 | ||
321 | |||
322 | #define SHARED_HW_CFG_BOARD_MINOR_VER_MASK 0xf0000000 | ||
323 | #define SHARED_HW_CFG_BOARD_MINOR_VER_SHIFT 28 | ||
324 | |||
325 | u32 wc_lane_config; /* 0x128 */ | ||
326 | #define SHARED_HW_CFG_LANE_SWAP_CFG_MASK 0x0000FFFF | ||
327 | #define SHARED_HW_CFG_LANE_SWAP_CFG_SHIFT 0 | ||
328 | #define SHARED_HW_CFG_LANE_SWAP_CFG_32103210 0x00001b1b | ||
329 | #define SHARED_HW_CFG_LANE_SWAP_CFG_32100123 0x00001be4 | ||
330 | #define SHARED_HW_CFG_LANE_SWAP_CFG_01233210 0x0000e41b | ||
331 | #define SHARED_HW_CFG_LANE_SWAP_CFG_01230123 0x0000e4e4 | ||
332 | #define SHARED_HW_CFG_LANE_SWAP_CFG_TX_MASK 0x000000FF | ||
333 | #define SHARED_HW_CFG_LANE_SWAP_CFG_TX_SHIFT 0 | ||
334 | #define SHARED_HW_CFG_LANE_SWAP_CFG_RX_MASK 0x0000FF00 | ||
335 | #define SHARED_HW_CFG_LANE_SWAP_CFG_RX_SHIFT 8 | ||
336 | |||
337 | /* TX lane Polarity swap */ | ||
338 | #define SHARED_HW_CFG_TX_LANE0_POL_FLIP_ENABLED 0x00010000 | ||
339 | #define SHARED_HW_CFG_TX_LANE1_POL_FLIP_ENABLED 0x00020000 | ||
340 | #define SHARED_HW_CFG_TX_LANE2_POL_FLIP_ENABLED 0x00040000 | ||
341 | #define SHARED_HW_CFG_TX_LANE3_POL_FLIP_ENABLED 0x00080000 | ||
342 | /* TX lane Polarity swap */ | ||
343 | #define SHARED_HW_CFG_RX_LANE0_POL_FLIP_ENABLED 0x00100000 | ||
344 | #define SHARED_HW_CFG_RX_LANE1_POL_FLIP_ENABLED 0x00200000 | ||
345 | #define SHARED_HW_CFG_RX_LANE2_POL_FLIP_ENABLED 0x00400000 | ||
346 | #define SHARED_HW_CFG_RX_LANE3_POL_FLIP_ENABLED 0x00800000 | ||
347 | |||
348 | /* Selects the port layout of the board */ | ||
349 | #define SHARED_HW_CFG_E3_PORT_LAYOUT_MASK 0x0F000000 | ||
350 | #define SHARED_HW_CFG_E3_PORT_LAYOUT_SHIFT 24 | ||
351 | #define SHARED_HW_CFG_E3_PORT_LAYOUT_2P_01 0x00000000 | ||
352 | #define SHARED_HW_CFG_E3_PORT_LAYOUT_2P_10 0x01000000 | ||
353 | #define SHARED_HW_CFG_E3_PORT_LAYOUT_4P_0123 0x02000000 | ||
354 | #define SHARED_HW_CFG_E3_PORT_LAYOUT_4P_1032 0x03000000 | ||
355 | #define SHARED_HW_CFG_E3_PORT_LAYOUT_4P_2301 0x04000000 | ||
356 | #define SHARED_HW_CFG_E3_PORT_LAYOUT_4P_3210 0x05000000 | ||
192 | }; | 357 | }; |
193 | 358 | ||
194 | 359 | ||
195 | /**************************************************************************** | 360 | /**************************************************************************** |
196 | * Port HW configuration * | 361 | * Port HW configuration * |
197 | ****************************************************************************/ | 362 | ****************************************************************************/ |
198 | struct port_hw_cfg { /* port 0: 0x12c port 1: 0x2bc */ | 363 | struct port_hw_cfg { /* port 0: 0x12c port 1: 0x2bc */ |
199 | 364 | ||
200 | u32 pci_id; | 365 | u32 pci_id; |
201 | #define PORT_HW_CFG_PCI_VENDOR_ID_MASK 0xffff0000 | 366 | #define PORT_HW_CFG_PCI_VENDOR_ID_MASK 0xffff0000 |
202 | #define PORT_HW_CFG_PCI_DEVICE_ID_MASK 0x0000ffff | 367 | #define PORT_HW_CFG_PCI_DEVICE_ID_MASK 0x0000ffff |
203 | 368 | ||
204 | u32 pci_sub_id; | 369 | u32 pci_sub_id; |
205 | #define PORT_HW_CFG_PCI_SUBSYS_DEVICE_ID_MASK 0xffff0000 | 370 | #define PORT_HW_CFG_PCI_SUBSYS_DEVICE_ID_MASK 0xffff0000 |
206 | #define PORT_HW_CFG_PCI_SUBSYS_VENDOR_ID_MASK 0x0000ffff | 371 | #define PORT_HW_CFG_PCI_SUBSYS_VENDOR_ID_MASK 0x0000ffff |
207 | 372 | ||
208 | u32 power_dissipated; | 373 | u32 power_dissipated; |
209 | #define PORT_HW_CFG_POWER_DIS_D3_MASK 0xff000000 | 374 | #define PORT_HW_CFG_POWER_DIS_D0_MASK 0x000000ff |
210 | #define PORT_HW_CFG_POWER_DIS_D3_SHIFT 24 | 375 | #define PORT_HW_CFG_POWER_DIS_D0_SHIFT 0 |
211 | #define PORT_HW_CFG_POWER_DIS_D2_MASK 0x00ff0000 | 376 | #define PORT_HW_CFG_POWER_DIS_D1_MASK 0x0000ff00 |
212 | #define PORT_HW_CFG_POWER_DIS_D2_SHIFT 16 | 377 | #define PORT_HW_CFG_POWER_DIS_D1_SHIFT 8 |
213 | #define PORT_HW_CFG_POWER_DIS_D1_MASK 0x0000ff00 | 378 | #define PORT_HW_CFG_POWER_DIS_D2_MASK 0x00ff0000 |
214 | #define PORT_HW_CFG_POWER_DIS_D1_SHIFT 8 | 379 | #define PORT_HW_CFG_POWER_DIS_D2_SHIFT 16 |
215 | #define PORT_HW_CFG_POWER_DIS_D0_MASK 0x000000ff | 380 | #define PORT_HW_CFG_POWER_DIS_D3_MASK 0xff000000 |
216 | #define PORT_HW_CFG_POWER_DIS_D0_SHIFT 0 | 381 | #define PORT_HW_CFG_POWER_DIS_D3_SHIFT 24 |
217 | 382 | ||
218 | u32 power_consumed; | 383 | u32 power_consumed; |
219 | #define PORT_HW_CFG_POWER_CONS_D3_MASK 0xff000000 | 384 | #define PORT_HW_CFG_POWER_CONS_D0_MASK 0x000000ff |
220 | #define PORT_HW_CFG_POWER_CONS_D3_SHIFT 24 | 385 | #define PORT_HW_CFG_POWER_CONS_D0_SHIFT 0 |
221 | #define PORT_HW_CFG_POWER_CONS_D2_MASK 0x00ff0000 | 386 | #define PORT_HW_CFG_POWER_CONS_D1_MASK 0x0000ff00 |
222 | #define PORT_HW_CFG_POWER_CONS_D2_SHIFT 16 | 387 | #define PORT_HW_CFG_POWER_CONS_D1_SHIFT 8 |
223 | #define PORT_HW_CFG_POWER_CONS_D1_MASK 0x0000ff00 | 388 | #define PORT_HW_CFG_POWER_CONS_D2_MASK 0x00ff0000 |
224 | #define PORT_HW_CFG_POWER_CONS_D1_SHIFT 8 | 389 | #define PORT_HW_CFG_POWER_CONS_D2_SHIFT 16 |
225 | #define PORT_HW_CFG_POWER_CONS_D0_MASK 0x000000ff | 390 | #define PORT_HW_CFG_POWER_CONS_D3_MASK 0xff000000 |
226 | #define PORT_HW_CFG_POWER_CONS_D0_SHIFT 0 | 391 | #define PORT_HW_CFG_POWER_CONS_D3_SHIFT 24 |
227 | 392 | ||
228 | u32 mac_upper; | 393 | u32 mac_upper; |
229 | #define PORT_HW_CFG_UPPERMAC_MASK 0x0000ffff | 394 | #define PORT_HW_CFG_UPPERMAC_MASK 0x0000ffff |
230 | #define PORT_HW_CFG_UPPERMAC_SHIFT 0 | 395 | #define PORT_HW_CFG_UPPERMAC_SHIFT 0 |
231 | u32 mac_lower; | 396 | u32 mac_lower; |
232 | 397 | ||
233 | u32 iscsi_mac_upper; /* Upper 16 bits are always zeroes */ | 398 | u32 iscsi_mac_upper; /* Upper 16 bits are always zeroes */ |
@@ -237,658 +402,799 @@ struct port_hw_cfg { /* port 0: 0x12c port 1: 0x2bc */ | |||
237 | u32 rdma_mac_lower; | 402 | u32 rdma_mac_lower; |
238 | 403 | ||
239 | u32 serdes_config; | 404 | u32 serdes_config; |
240 | #define PORT_HW_CFG_SERDES_TX_DRV_PRE_EMPHASIS_MASK 0x0000FFFF | 405 | #define PORT_HW_CFG_SERDES_TX_DRV_PRE_EMPHASIS_MASK 0x0000ffff |
241 | #define PORT_HW_CFG_SERDES_TX_DRV_PRE_EMPHASIS_SHIFT 0 | 406 | #define PORT_HW_CFG_SERDES_TX_DRV_PRE_EMPHASIS_SHIFT 0 |
242 | 407 | ||
243 | #define PORT_HW_CFG_SERDES_RX_DRV_EQUALIZER_MASK 0xFFFF0000 | 408 | #define PORT_HW_CFG_SERDES_RX_DRV_EQUALIZER_MASK 0xffff0000 |
244 | #define PORT_HW_CFG_SERDES_RX_DRV_EQUALIZER_SHIFT 16 | 409 | #define PORT_HW_CFG_SERDES_RX_DRV_EQUALIZER_SHIFT 16 |
245 | 410 | ||
246 | 411 | ||
247 | u32 Reserved0[3]; /* 0x158 */ | 412 | /* Default values: 2P-64, 4P-32 */ |
248 | /* Controls the TX laser of the SFP+ module */ | 413 | u32 pf_config; /* 0x158 */ |
249 | u32 sfp_ctrl; /* 0x164 */ | 414 | #define PORT_HW_CFG_PF_NUM_VF_MASK 0x0000007F |
250 | #define PORT_HW_CFG_TX_LASER_MASK 0x000000FF | 415 | #define PORT_HW_CFG_PF_NUM_VF_SHIFT 0 |
251 | #define PORT_HW_CFG_TX_LASER_SHIFT 0 | 416 | |
252 | #define PORT_HW_CFG_TX_LASER_MDIO 0x00000000 | 417 | /* Default values: 17 */ |
253 | #define PORT_HW_CFG_TX_LASER_GPIO0 0x00000001 | 418 | #define PORT_HW_CFG_PF_NUM_MSIX_VECTORS_MASK 0x00007F00 |
254 | #define PORT_HW_CFG_TX_LASER_GPIO1 0x00000002 | 419 | #define PORT_HW_CFG_PF_NUM_MSIX_VECTORS_SHIFT 8 |
255 | #define PORT_HW_CFG_TX_LASER_GPIO2 0x00000003 | 420 | |
256 | #define PORT_HW_CFG_TX_LASER_GPIO3 0x00000004 | 421 | #define PORT_HW_CFG_ENABLE_FLR_MASK 0x00010000 |
257 | 422 | #define PORT_HW_CFG_FLR_ENABLED 0x00010000 | |
258 | /* Controls the fault module LED of the SFP+ */ | 423 | |
259 | #define PORT_HW_CFG_FAULT_MODULE_LED_MASK 0x0000FF00 | 424 | u32 vf_config; /* 0x15C */ |
260 | #define PORT_HW_CFG_FAULT_MODULE_LED_SHIFT 8 | 425 | #define PORT_HW_CFG_VF_NUM_MSIX_VECTORS_MASK 0x0000007F |
261 | #define PORT_HW_CFG_FAULT_MODULE_LED_GPIO0 0x00000000 | 426 | #define PORT_HW_CFG_VF_NUM_MSIX_VECTORS_SHIFT 0 |
262 | #define PORT_HW_CFG_FAULT_MODULE_LED_GPIO1 0x00000100 | 427 | |
263 | #define PORT_HW_CFG_FAULT_MODULE_LED_GPIO2 0x00000200 | 428 | #define PORT_HW_CFG_VF_PCI_DEVICE_ID_MASK 0xFFFF0000 |
264 | #define PORT_HW_CFG_FAULT_MODULE_LED_GPIO3 0x00000300 | 429 | #define PORT_HW_CFG_VF_PCI_DEVICE_ID_SHIFT 16 |
265 | #define PORT_HW_CFG_FAULT_MODULE_LED_DISABLED 0x00000400 | 430 | |
266 | 431 | u32 mf_pci_id; /* 0x160 */ | |
267 | u32 Reserved01[10]; /* 0x158 */ | 432 | #define PORT_HW_CFG_MF_PCI_DEVICE_ID_MASK 0x0000FFFF |
268 | 433 | #define PORT_HW_CFG_MF_PCI_DEVICE_ID_SHIFT 0 | |
269 | u32 aeu_int_mask; /* 0x190 */ | 434 | |
270 | 435 | /* Controls the TX laser of the SFP+ module */ | |
271 | u32 media_type; /* 0x194 */ | 436 | u32 sfp_ctrl; /* 0x164 */ |
272 | #define PORT_HW_CFG_MEDIA_TYPE_PHY0_MASK 0x000000FF | 437 | #define PORT_HW_CFG_TX_LASER_MASK 0x000000FF |
273 | #define PORT_HW_CFG_MEDIA_TYPE_PHY0_SHIFT 0 | 438 | #define PORT_HW_CFG_TX_LASER_SHIFT 0 |
274 | 439 | #define PORT_HW_CFG_TX_LASER_MDIO 0x00000000 | |
275 | #define PORT_HW_CFG_MEDIA_TYPE_PHY1_MASK 0x0000FF00 | 440 | #define PORT_HW_CFG_TX_LASER_GPIO0 0x00000001 |
276 | #define PORT_HW_CFG_MEDIA_TYPE_PHY1_SHIFT 8 | 441 | #define PORT_HW_CFG_TX_LASER_GPIO1 0x00000002 |
277 | 442 | #define PORT_HW_CFG_TX_LASER_GPIO2 0x00000003 | |
278 | #define PORT_HW_CFG_MEDIA_TYPE_PHY2_MASK 0x00FF0000 | 443 | #define PORT_HW_CFG_TX_LASER_GPIO3 0x00000004 |
279 | #define PORT_HW_CFG_MEDIA_TYPE_PHY2_SHIFT 16 | 444 | |
280 | /* for external PHY, or forced mode or during AN */ | 445 | /* Controls the fault module LED of the SFP+ */ |
281 | u16 xgxs_config_rx[4]; /* 0x198 */ | 446 | #define PORT_HW_CFG_FAULT_MODULE_LED_MASK 0x0000FF00 |
282 | 447 | #define PORT_HW_CFG_FAULT_MODULE_LED_SHIFT 8 | |
283 | u16 xgxs_config_tx[4]; /* 0x1A0 */ | 448 | #define PORT_HW_CFG_FAULT_MODULE_LED_GPIO0 0x00000000 |
284 | 449 | #define PORT_HW_CFG_FAULT_MODULE_LED_GPIO1 0x00000100 | |
285 | u32 Reserved1[56]; /* 0x1A8 */ | 450 | #define PORT_HW_CFG_FAULT_MODULE_LED_GPIO2 0x00000200 |
286 | u32 default_cfg; /* 0x288 */ | 451 | #define PORT_HW_CFG_FAULT_MODULE_LED_GPIO3 0x00000300 |
287 | #define PORT_HW_CFG_GPIO0_CONFIG_MASK 0x00000003 | 452 | #define PORT_HW_CFG_FAULT_MODULE_LED_DISABLED 0x00000400 |
288 | #define PORT_HW_CFG_GPIO0_CONFIG_SHIFT 0 | 453 | |
289 | #define PORT_HW_CFG_GPIO0_CONFIG_NA 0x00000000 | 454 | /* The output pin TX_DIS that controls the TX laser of the SFP+ |
290 | #define PORT_HW_CFG_GPIO0_CONFIG_LOW 0x00000001 | 455 | module. Use the PIN_CFG_XXX defines on top */ |
291 | #define PORT_HW_CFG_GPIO0_CONFIG_HIGH 0x00000002 | 456 | u32 e3_sfp_ctrl; /* 0x168 */ |
292 | #define PORT_HW_CFG_GPIO0_CONFIG_INPUT 0x00000003 | 457 | #define PORT_HW_CFG_E3_TX_LASER_MASK 0x000000FF |
293 | 458 | #define PORT_HW_CFG_E3_TX_LASER_SHIFT 0 | |
294 | #define PORT_HW_CFG_GPIO1_CONFIG_MASK 0x0000000C | 459 | |
295 | #define PORT_HW_CFG_GPIO1_CONFIG_SHIFT 2 | 460 | /* The output pin for SFPP_TYPE which turns on the Fault module LED */ |
296 | #define PORT_HW_CFG_GPIO1_CONFIG_NA 0x00000000 | 461 | #define PORT_HW_CFG_E3_FAULT_MDL_LED_MASK 0x0000FF00 |
297 | #define PORT_HW_CFG_GPIO1_CONFIG_LOW 0x00000004 | 462 | #define PORT_HW_CFG_E3_FAULT_MDL_LED_SHIFT 8 |
298 | #define PORT_HW_CFG_GPIO1_CONFIG_HIGH 0x00000008 | 463 | |
299 | #define PORT_HW_CFG_GPIO1_CONFIG_INPUT 0x0000000c | 464 | /* The input pin MOD_ABS that indicates whether SFP+ module is |
300 | 465 | present or not. Use the PIN_CFG_XXX defines on top */ | |
301 | #define PORT_HW_CFG_GPIO2_CONFIG_MASK 0x00000030 | 466 | #define PORT_HW_CFG_E3_MOD_ABS_MASK 0x00FF0000 |
302 | #define PORT_HW_CFG_GPIO2_CONFIG_SHIFT 4 | 467 | #define PORT_HW_CFG_E3_MOD_ABS_SHIFT 16 |
303 | #define PORT_HW_CFG_GPIO2_CONFIG_NA 0x00000000 | 468 | |
304 | #define PORT_HW_CFG_GPIO2_CONFIG_LOW 0x00000010 | 469 | /* The output pin PWRDIS_SFP_X which disable the power of the SFP+ |
305 | #define PORT_HW_CFG_GPIO2_CONFIG_HIGH 0x00000020 | 470 | module. Use the PIN_CFG_XXX defines on top */ |
306 | #define PORT_HW_CFG_GPIO2_CONFIG_INPUT 0x00000030 | 471 | #define PORT_HW_CFG_E3_PWR_DIS_MASK 0xFF000000 |
307 | 472 | #define PORT_HW_CFG_E3_PWR_DIS_SHIFT 24 | |
308 | #define PORT_HW_CFG_GPIO3_CONFIG_MASK 0x000000C0 | ||
309 | #define PORT_HW_CFG_GPIO3_CONFIG_SHIFT 6 | ||
310 | #define PORT_HW_CFG_GPIO3_CONFIG_NA 0x00000000 | ||
311 | #define PORT_HW_CFG_GPIO3_CONFIG_LOW 0x00000040 | ||
312 | #define PORT_HW_CFG_GPIO3_CONFIG_HIGH 0x00000080 | ||
313 | #define PORT_HW_CFG_GPIO3_CONFIG_INPUT 0x000000c0 | ||
314 | 473 | ||
315 | /* | 474 | /* |
316 | * When KR link is required to be set to force which is not | 475 | * The input pin which signals module transmit fault. Use the |
317 | * KR-compliant, this parameter determine what is the trigger for it. | 476 | * PIN_CFG_XXX defines on top |
318 | * When GPIO is selected, low input will force the speed. Currently | ||
319 | * default speed is 1G. In the future, it may be widen to select the | ||
320 | * forced speed in with another parameter. Note when force-1G is | ||
321 | * enabled, it override option 56: Link Speed option. | ||
322 | */ | 477 | */ |
323 | #define PORT_HW_CFG_FORCE_KR_ENABLER_MASK 0x00000F00 | 478 | u32 e3_cmn_pin_cfg; /* 0x16C */ |
324 | #define PORT_HW_CFG_FORCE_KR_ENABLER_SHIFT 8 | 479 | #define PORT_HW_CFG_E3_TX_FAULT_MASK 0x000000FF |
325 | #define PORT_HW_CFG_FORCE_KR_ENABLER_NOT_FORCED 0x00000000 | 480 | #define PORT_HW_CFG_E3_TX_FAULT_SHIFT 0 |
326 | #define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO0_P0 0x00000100 | 481 | |
327 | #define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO1_P0 0x00000200 | 482 | /* The output pin which reset the PHY. Use the PIN_CFG_XXX defines on |
328 | #define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO2_P0 0x00000300 | 483 | top */ |
329 | #define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO3_P0 0x00000400 | 484 | #define PORT_HW_CFG_E3_PHY_RESET_MASK 0x0000FF00 |
330 | #define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO0_P1 0x00000500 | 485 | #define PORT_HW_CFG_E3_PHY_RESET_SHIFT 8 |
331 | #define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO1_P1 0x00000600 | 486 | |
332 | #define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO2_P1 0x00000700 | 487 | /* |
333 | #define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO3_P1 0x00000800 | 488 | * The output pin which powers down the PHY. Use the PIN_CFG_XXX |
334 | #define PORT_HW_CFG_FORCE_KR_ENABLER_FORCED 0x00000900 | 489 | * defines on top |
335 | /* Enable to determine with which GPIO to reset the external phy */ | 490 | */ |
336 | #define PORT_HW_CFG_EXT_PHY_GPIO_RST_MASK 0x000F0000 | 491 | #define PORT_HW_CFG_E3_PWR_DOWN_MASK 0x00FF0000 |
337 | #define PORT_HW_CFG_EXT_PHY_GPIO_RST_SHIFT 16 | 492 | #define PORT_HW_CFG_E3_PWR_DOWN_SHIFT 16 |
338 | #define PORT_HW_CFG_EXT_PHY_GPIO_RST_PHY_TYPE 0x00000000 | 493 | |
339 | #define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO0_P0 0x00010000 | 494 | /* The output pin values BSC_SEL which selects the I2C for this port |
340 | #define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO1_P0 0x00020000 | 495 | in the I2C Mux */ |
341 | #define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO2_P0 0x00030000 | 496 | #define PORT_HW_CFG_E3_I2C_MUX0_MASK 0x01000000 |
342 | #define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO3_P0 0x00040000 | 497 | #define PORT_HW_CFG_E3_I2C_MUX1_MASK 0x02000000 |
343 | #define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO0_P1 0x00050000 | 498 | |
344 | #define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO1_P1 0x00060000 | 499 | |
345 | #define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO2_P1 0x00070000 | 500 | /* |
346 | #define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO3_P1 0x00080000 | 501 | * The input pin I_FAULT which indicate over-current has occurred. |
502 | * Use the PIN_CFG_XXX defines on top | ||
503 | */ | ||
504 | u32 e3_cmn_pin_cfg1; /* 0x170 */ | ||
505 | #define PORT_HW_CFG_E3_OVER_CURRENT_MASK 0x000000FF | ||
506 | #define PORT_HW_CFG_E3_OVER_CURRENT_SHIFT 0 | ||
507 | u32 reserved0[7]; /* 0x174 */ | ||
508 | |||
509 | u32 aeu_int_mask; /* 0x190 */ | ||
510 | |||
511 | u32 media_type; /* 0x194 */ | ||
512 | #define PORT_HW_CFG_MEDIA_TYPE_PHY0_MASK 0x000000FF | ||
513 | #define PORT_HW_CFG_MEDIA_TYPE_PHY0_SHIFT 0 | ||
514 | |||
515 | #define PORT_HW_CFG_MEDIA_TYPE_PHY1_MASK 0x0000FF00 | ||
516 | #define PORT_HW_CFG_MEDIA_TYPE_PHY1_SHIFT 8 | ||
517 | |||
518 | #define PORT_HW_CFG_MEDIA_TYPE_PHY2_MASK 0x00FF0000 | ||
519 | #define PORT_HW_CFG_MEDIA_TYPE_PHY2_SHIFT 16 | ||
520 | |||
521 | /* 4 times 16 bits for all 4 lanes. In case external PHY is present | ||
522 | (not direct mode), those values will not take effect on the 4 XGXS | ||
523 | lanes. For some external PHYs (such as 8706 and 8726) the values | ||
524 | will be used to configure the external PHY in those cases, not | ||
525 | all 4 values are needed. */ | ||
526 | u16 xgxs_config_rx[4]; /* 0x198 */ | ||
527 | u16 xgxs_config_tx[4]; /* 0x1A0 */ | ||
528 | |||
529 | /* For storing FCOE mac on shared memory */ | ||
530 | u32 fcoe_fip_mac_upper; | ||
531 | #define PORT_HW_CFG_FCOE_UPPERMAC_MASK 0x0000ffff | ||
532 | #define PORT_HW_CFG_FCOE_UPPERMAC_SHIFT 0 | ||
533 | u32 fcoe_fip_mac_lower; | ||
534 | |||
535 | u32 fcoe_wwn_port_name_upper; | ||
536 | u32 fcoe_wwn_port_name_lower; | ||
537 | |||
538 | u32 fcoe_wwn_node_name_upper; | ||
539 | u32 fcoe_wwn_node_name_lower; | ||
540 | |||
541 | u32 Reserved1[50]; /* 0x1C0 */ | ||
542 | |||
543 | u32 default_cfg; /* 0x288 */ | ||
544 | #define PORT_HW_CFG_GPIO0_CONFIG_MASK 0x00000003 | ||
545 | #define PORT_HW_CFG_GPIO0_CONFIG_SHIFT 0 | ||
546 | #define PORT_HW_CFG_GPIO0_CONFIG_NA 0x00000000 | ||
547 | #define PORT_HW_CFG_GPIO0_CONFIG_LOW 0x00000001 | ||
548 | #define PORT_HW_CFG_GPIO0_CONFIG_HIGH 0x00000002 | ||
549 | #define PORT_HW_CFG_GPIO0_CONFIG_INPUT 0x00000003 | ||
550 | |||
551 | #define PORT_HW_CFG_GPIO1_CONFIG_MASK 0x0000000C | ||
552 | #define PORT_HW_CFG_GPIO1_CONFIG_SHIFT 2 | ||
553 | #define PORT_HW_CFG_GPIO1_CONFIG_NA 0x00000000 | ||
554 | #define PORT_HW_CFG_GPIO1_CONFIG_LOW 0x00000004 | ||
555 | #define PORT_HW_CFG_GPIO1_CONFIG_HIGH 0x00000008 | ||
556 | #define PORT_HW_CFG_GPIO1_CONFIG_INPUT 0x0000000c | ||
557 | |||
558 | #define PORT_HW_CFG_GPIO2_CONFIG_MASK 0x00000030 | ||
559 | #define PORT_HW_CFG_GPIO2_CONFIG_SHIFT 4 | ||
560 | #define PORT_HW_CFG_GPIO2_CONFIG_NA 0x00000000 | ||
561 | #define PORT_HW_CFG_GPIO2_CONFIG_LOW 0x00000010 | ||
562 | #define PORT_HW_CFG_GPIO2_CONFIG_HIGH 0x00000020 | ||
563 | #define PORT_HW_CFG_GPIO2_CONFIG_INPUT 0x00000030 | ||
564 | |||
565 | #define PORT_HW_CFG_GPIO3_CONFIG_MASK 0x000000C0 | ||
566 | #define PORT_HW_CFG_GPIO3_CONFIG_SHIFT 6 | ||
567 | #define PORT_HW_CFG_GPIO3_CONFIG_NA 0x00000000 | ||
568 | #define PORT_HW_CFG_GPIO3_CONFIG_LOW 0x00000040 | ||
569 | #define PORT_HW_CFG_GPIO3_CONFIG_HIGH 0x00000080 | ||
570 | #define PORT_HW_CFG_GPIO3_CONFIG_INPUT 0x000000c0 | ||
571 | |||
572 | /* When KR link is required to be set to force which is not | ||
573 | KR-compliant, this parameter determine what is the trigger for it. | ||
574 | When GPIO is selected, low input will force the speed. Currently | ||
575 | default speed is 1G. In the future, it may be widen to select the | ||
576 | forced speed in with another parameter. Note when force-1G is | ||
577 | enabled, it override option 56: Link Speed option. */ | ||
578 | #define PORT_HW_CFG_FORCE_KR_ENABLER_MASK 0x00000F00 | ||
579 | #define PORT_HW_CFG_FORCE_KR_ENABLER_SHIFT 8 | ||
580 | #define PORT_HW_CFG_FORCE_KR_ENABLER_NOT_FORCED 0x00000000 | ||
581 | #define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO0_P0 0x00000100 | ||
582 | #define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO1_P0 0x00000200 | ||
583 | #define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO2_P0 0x00000300 | ||
584 | #define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO3_P0 0x00000400 | ||
585 | #define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO0_P1 0x00000500 | ||
586 | #define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO1_P1 0x00000600 | ||
587 | #define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO2_P1 0x00000700 | ||
588 | #define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO3_P1 0x00000800 | ||
589 | #define PORT_HW_CFG_FORCE_KR_ENABLER_FORCED 0x00000900 | ||
590 | /* Enable to determine with which GPIO to reset the external phy */ | ||
591 | #define PORT_HW_CFG_EXT_PHY_GPIO_RST_MASK 0x000F0000 | ||
592 | #define PORT_HW_CFG_EXT_PHY_GPIO_RST_SHIFT 16 | ||
593 | #define PORT_HW_CFG_EXT_PHY_GPIO_RST_PHY_TYPE 0x00000000 | ||
594 | #define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO0_P0 0x00010000 | ||
595 | #define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO1_P0 0x00020000 | ||
596 | #define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO2_P0 0x00030000 | ||
597 | #define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO3_P0 0x00040000 | ||
598 | #define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO0_P1 0x00050000 | ||
599 | #define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO1_P1 0x00060000 | ||
600 | #define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO2_P1 0x00070000 | ||
601 | #define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO3_P1 0x00080000 | ||
602 | |||
347 | /* Enable BAM on KR */ | 603 | /* Enable BAM on KR */ |
348 | #define PORT_HW_CFG_ENABLE_BAM_ON_KR_MASK 0x00100000 | 604 | #define PORT_HW_CFG_ENABLE_BAM_ON_KR_MASK 0x00100000 |
349 | #define PORT_HW_CFG_ENABLE_BAM_ON_KR_SHIFT 20 | 605 | #define PORT_HW_CFG_ENABLE_BAM_ON_KR_SHIFT 20 |
350 | #define PORT_HW_CFG_ENABLE_BAM_ON_KR_DISABLED 0x00000000 | 606 | #define PORT_HW_CFG_ENABLE_BAM_ON_KR_DISABLED 0x00000000 |
351 | #define PORT_HW_CFG_ENABLE_BAM_ON_KR_ENABLED 0x00100000 | 607 | #define PORT_HW_CFG_ENABLE_BAM_ON_KR_ENABLED 0x00100000 |
352 | 608 | ||
353 | /* Enable Common Mode Sense */ | 609 | /* Enable Common Mode Sense */ |
354 | #define PORT_HW_CFG_ENABLE_CMS_MASK 0x00200000 | 610 | #define PORT_HW_CFG_ENABLE_CMS_MASK 0x00200000 |
355 | #define PORT_HW_CFG_ENABLE_CMS_SHIFT 21 | 611 | #define PORT_HW_CFG_ENABLE_CMS_SHIFT 21 |
356 | #define PORT_HW_CFG_ENABLE_CMS_DISABLED 0x00000000 | 612 | #define PORT_HW_CFG_ENABLE_CMS_DISABLED 0x00000000 |
357 | #define PORT_HW_CFG_ENABLE_CMS_ENABLED 0x00200000 | 613 | #define PORT_HW_CFG_ENABLE_CMS_ENABLED 0x00200000 |
614 | |||
615 | /* Enable RJ45 magjack pair swapping on 10GBase-T PHY, 84833 only */ | ||
616 | #define PORT_HW_CFG_RJ45_PR_SWP_MASK 0x00400000 | ||
617 | #define PORT_HW_CFG_RJ45_PR_SWP_SHIFT 22 | ||
618 | #define PORT_HW_CFG_RJ45_PR_SWP_DISABLED 0x00000000 | ||
619 | #define PORT_HW_CFG_RJ45_PR_SWP_ENABLED 0x00400000 | ||
620 | |||
621 | /* Determine the Serdes electrical interface */ | ||
622 | #define PORT_HW_CFG_NET_SERDES_IF_MASK 0x0F000000 | ||
623 | #define PORT_HW_CFG_NET_SERDES_IF_SHIFT 24 | ||
624 | #define PORT_HW_CFG_NET_SERDES_IF_SGMII 0x00000000 | ||
625 | #define PORT_HW_CFG_NET_SERDES_IF_XFI 0x01000000 | ||
626 | #define PORT_HW_CFG_NET_SERDES_IF_SFI 0x02000000 | ||
627 | #define PORT_HW_CFG_NET_SERDES_IF_KR 0x03000000 | ||
628 | #define PORT_HW_CFG_NET_SERDES_IF_DXGXS 0x04000000 | ||
629 | #define PORT_HW_CFG_NET_SERDES_IF_KR2 0x05000000 | ||
630 | |||
358 | 631 | ||
359 | u32 speed_capability_mask2; /* 0x28C */ | 632 | u32 speed_capability_mask2; /* 0x28C */ |
360 | #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_MASK 0x0000FFFF | 633 | #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_MASK 0x0000FFFF |
361 | #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_SHIFT 0 | 634 | #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_SHIFT 0 |
362 | #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_10M_FULL 0x00000001 | 635 | #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_10M_FULL 0x00000001 |
363 | #define PORT_HW_CFG_SPEED_CAPABILITY2_D3__ 0x00000002 | 636 | #define PORT_HW_CFG_SPEED_CAPABILITY2_D3__ 0x00000002 |
364 | #define PORT_HW_CFG_SPEED_CAPABILITY2_D3___ 0x00000004 | 637 | #define PORT_HW_CFG_SPEED_CAPABILITY2_D3___ 0x00000004 |
365 | #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_100M_FULL 0x00000008 | 638 | #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_100M_FULL 0x00000008 |
366 | #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_1G 0x00000010 | 639 | #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_1G 0x00000010 |
367 | #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_2_DOT_5G 0x00000020 | 640 | #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_2_DOT_5G 0x00000020 |
368 | #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_10G 0x00000040 | 641 | #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_10G 0x00000040 |
369 | #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_12G 0x00000080 | 642 | #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_20G 0x00000080 |
370 | #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_12_DOT_5G 0x00000100 | 643 | |
371 | #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_13G 0x00000200 | 644 | #define PORT_HW_CFG_SPEED_CAPABILITY2_D0_MASK 0xFFFF0000 |
372 | #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_15G 0x00000400 | 645 | #define PORT_HW_CFG_SPEED_CAPABILITY2_D0_SHIFT 16 |
373 | #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_16G 0x00000800 | 646 | #define PORT_HW_CFG_SPEED_CAPABILITY2_D0_10M_FULL 0x00010000 |
374 | 647 | #define PORT_HW_CFG_SPEED_CAPABILITY2_D0__ 0x00020000 | |
375 | #define PORT_HW_CFG_SPEED_CAPABILITY2_D0_MASK 0xFFFF0000 | 648 | #define PORT_HW_CFG_SPEED_CAPABILITY2_D0___ 0x00040000 |
376 | #define PORT_HW_CFG_SPEED_CAPABILITY2_D0_SHIFT 16 | 649 | #define PORT_HW_CFG_SPEED_CAPABILITY2_D0_100M_FULL 0x00080000 |
377 | #define PORT_HW_CFG_SPEED_CAPABILITY2_D0_10M_FULL 0x00010000 | 650 | #define PORT_HW_CFG_SPEED_CAPABILITY2_D0_1G 0x00100000 |
378 | #define PORT_HW_CFG_SPEED_CAPABILITY2_D0__ 0x00020000 | 651 | #define PORT_HW_CFG_SPEED_CAPABILITY2_D0_2_DOT_5G 0x00200000 |
379 | #define PORT_HW_CFG_SPEED_CAPABILITY2_D0___ 0x00040000 | 652 | #define PORT_HW_CFG_SPEED_CAPABILITY2_D0_10G 0x00400000 |
380 | #define PORT_HW_CFG_SPEED_CAPABILITY2_D0_100M_FULL 0x00080000 | 653 | #define PORT_HW_CFG_SPEED_CAPABILITY2_D0_20G 0x00800000 |
381 | #define PORT_HW_CFG_SPEED_CAPABILITY2_D0_1G 0x00100000 | 654 | |
382 | #define PORT_HW_CFG_SPEED_CAPABILITY2_D0_2_DOT_5G 0x00200000 | 655 | |
383 | #define PORT_HW_CFG_SPEED_CAPABILITY2_D0_10G 0x00400000 | 656 | /* In the case where two media types (e.g. copper and fiber) are |
384 | #define PORT_HW_CFG_SPEED_CAPABILITY2_D0_12G 0x00800000 | 657 | present and electrically active at the same time, PHY Selection |
385 | #define PORT_HW_CFG_SPEED_CAPABILITY2_D0_12_DOT_5G 0x01000000 | 658 | will determine which of the two PHYs will be designated as the |
386 | #define PORT_HW_CFG_SPEED_CAPABILITY2_D0_13G 0x02000000 | 659 | Active PHY and used for a connection to the network. */ |
387 | #define PORT_HW_CFG_SPEED_CAPABILITY2_D0_15G 0x04000000 | 660 | u32 multi_phy_config; /* 0x290 */ |
388 | #define PORT_HW_CFG_SPEED_CAPABILITY2_D0_16G 0x08000000 | 661 | #define PORT_HW_CFG_PHY_SELECTION_MASK 0x00000007 |
389 | 662 | #define PORT_HW_CFG_PHY_SELECTION_SHIFT 0 | |
390 | /* In the case where two media types (e.g. copper and fiber) are | 663 | #define PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT 0x00000000 |
391 | present and electrically active at the same time, PHY Selection | 664 | #define PORT_HW_CFG_PHY_SELECTION_FIRST_PHY 0x00000001 |
392 | will determine which of the two PHYs will be designated as the | 665 | #define PORT_HW_CFG_PHY_SELECTION_SECOND_PHY 0x00000002 |
393 | Active PHY and used for a connection to the network. */ | 666 | #define PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY 0x00000003 |
394 | u32 multi_phy_config; /* 0x290 */ | 667 | #define PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY 0x00000004 |
395 | #define PORT_HW_CFG_PHY_SELECTION_MASK 0x00000007 | 668 | |
396 | #define PORT_HW_CFG_PHY_SELECTION_SHIFT 0 | 669 | /* When enabled, all second phy nvram parameters will be swapped |
397 | #define PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT 0x00000000 | 670 | with the first phy parameters */ |
398 | #define PORT_HW_CFG_PHY_SELECTION_FIRST_PHY 0x00000001 | 671 | #define PORT_HW_CFG_PHY_SWAPPED_MASK 0x00000008 |
399 | #define PORT_HW_CFG_PHY_SELECTION_SECOND_PHY 0x00000002 | 672 | #define PORT_HW_CFG_PHY_SWAPPED_SHIFT 3 |
400 | #define PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY 0x00000003 | 673 | #define PORT_HW_CFG_PHY_SWAPPED_DISABLED 0x00000000 |
401 | #define PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY 0x00000004 | 674 | #define PORT_HW_CFG_PHY_SWAPPED_ENABLED 0x00000008 |
402 | 675 | ||
403 | /* When enabled, all second phy nvram parameters will be swapped | 676 | |
404 | with the first phy parameters */ | 677 | /* Address of the second external phy */ |
405 | #define PORT_HW_CFG_PHY_SWAPPED_MASK 0x00000008 | 678 | u32 external_phy_config2; /* 0x294 */ |
406 | #define PORT_HW_CFG_PHY_SWAPPED_SHIFT 3 | 679 | #define PORT_HW_CFG_XGXS_EXT_PHY2_ADDR_MASK 0x000000FF |
407 | #define PORT_HW_CFG_PHY_SWAPPED_DISABLED 0x00000000 | 680 | #define PORT_HW_CFG_XGXS_EXT_PHY2_ADDR_SHIFT 0 |
408 | #define PORT_HW_CFG_PHY_SWAPPED_ENABLED 0x00000008 | 681 | |
409 | 682 | /* The second XGXS external PHY type */ | |
410 | 683 | #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_MASK 0x0000FF00 | |
411 | /* Address of the second external phy */ | 684 | #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_SHIFT 8 |
412 | u32 external_phy_config2; /* 0x294 */ | 685 | #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_DIRECT 0x00000000 |
413 | #define PORT_HW_CFG_XGXS_EXT_PHY2_ADDR_MASK 0x000000FF | 686 | #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8071 0x00000100 |
414 | #define PORT_HW_CFG_XGXS_EXT_PHY2_ADDR_SHIFT 0 | 687 | #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8072 0x00000200 |
415 | 688 | #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8073 0x00000300 | |
416 | /* The second XGXS external PHY type */ | 689 | #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8705 0x00000400 |
417 | #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_MASK 0x0000FF00 | 690 | #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8706 0x00000500 |
418 | #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_SHIFT 8 | 691 | #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8726 0x00000600 |
419 | #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_DIRECT 0x00000000 | 692 | #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8481 0x00000700 |
420 | #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8071 0x00000100 | 693 | #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_SFX7101 0x00000800 |
421 | #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8072 0x00000200 | 694 | #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8727 0x00000900 |
422 | #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8073 0x00000300 | 695 | #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8727_NOC 0x00000a00 |
423 | #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8705 0x00000400 | 696 | #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM84823 0x00000b00 |
424 | #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8706 0x00000500 | 697 | #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM54640 0x00000c00 |
425 | #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8726 0x00000600 | 698 | #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM84833 0x00000d00 |
426 | #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8481 0x00000700 | 699 | #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM54616 0x00000e00 |
427 | #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_SFX7101 0x00000800 | 700 | #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8722 0x00000f00 |
428 | #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8727 0x00000900 | 701 | #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_FAILURE 0x0000fd00 |
429 | #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8727_NOC 0x00000a00 | 702 | #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_NOT_CONN 0x0000ff00 |
430 | #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM84823 0x00000b00 | 703 | |
431 | #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM54640 0x00000c00 | 704 | |
432 | #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM84833 0x00000d00 | 705 | /* 4 times 16 bits for all 4 lanes. For some external PHYs (such as |
433 | #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8722 0x00000f00 | 706 | 8706, 8726 and 8727) not all 4 values are needed. */ |
434 | #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_FAILURE 0x0000fd00 | 707 | u16 xgxs_config2_rx[4]; /* 0x296 */ |
435 | #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_NOT_CONN 0x0000ff00 | 708 | u16 xgxs_config2_tx[4]; /* 0x2A0 */ |
436 | |||
437 | /* 4 times 16 bits for all 4 lanes. For some external PHYs (such as | ||
438 | 8706, 8726 and 8727) not all 4 values are needed. */ | ||
439 | u16 xgxs_config2_rx[4]; /* 0x296 */ | ||
440 | u16 xgxs_config2_tx[4]; /* 0x2A0 */ | ||
441 | 709 | ||
442 | u32 lane_config; | 710 | u32 lane_config; |
443 | #define PORT_HW_CFG_LANE_SWAP_CFG_MASK 0x0000ffff | 711 | #define PORT_HW_CFG_LANE_SWAP_CFG_MASK 0x0000ffff |
444 | #define PORT_HW_CFG_LANE_SWAP_CFG_SHIFT 0 | 712 | #define PORT_HW_CFG_LANE_SWAP_CFG_SHIFT 0 |
445 | 713 | /* AN and forced */ | |
446 | #define PORT_HW_CFG_LANE_SWAP_CFG_TX_MASK 0x000000ff | 714 | #define PORT_HW_CFG_LANE_SWAP_CFG_01230123 0x00001b1b |
447 | #define PORT_HW_CFG_LANE_SWAP_CFG_TX_SHIFT 0 | 715 | /* forced only */ |
448 | #define PORT_HW_CFG_LANE_SWAP_CFG_RX_MASK 0x0000ff00 | 716 | #define PORT_HW_CFG_LANE_SWAP_CFG_01233210 0x00001be4 |
449 | #define PORT_HW_CFG_LANE_SWAP_CFG_RX_SHIFT 8 | 717 | /* forced only */ |
450 | #define PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK 0x0000c000 | 718 | #define PORT_HW_CFG_LANE_SWAP_CFG_31203120 0x0000d8d8 |
451 | #define PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT 14 | 719 | /* forced only */ |
452 | /* AN and forced */ | 720 | #define PORT_HW_CFG_LANE_SWAP_CFG_32103210 0x0000e4e4 |
453 | #define PORT_HW_CFG_LANE_SWAP_CFG_01230123 0x00001b1b | 721 | #define PORT_HW_CFG_LANE_SWAP_CFG_TX_MASK 0x000000ff |
454 | /* forced only */ | 722 | #define PORT_HW_CFG_LANE_SWAP_CFG_TX_SHIFT 0 |
455 | #define PORT_HW_CFG_LANE_SWAP_CFG_01233210 0x00001be4 | 723 | #define PORT_HW_CFG_LANE_SWAP_CFG_RX_MASK 0x0000ff00 |
456 | /* forced only */ | 724 | #define PORT_HW_CFG_LANE_SWAP_CFG_RX_SHIFT 8 |
457 | #define PORT_HW_CFG_LANE_SWAP_CFG_31203120 0x0000d8d8 | 725 | #define PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK 0x0000c000 |
458 | /* forced only */ | 726 | #define PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT 14 |
459 | #define PORT_HW_CFG_LANE_SWAP_CFG_32103210 0x0000e4e4 | 727 | |
460 | /* Indicate whether to swap the external phy polarity */ | 728 | /* Indicate whether to swap the external phy polarity */ |
461 | #define PORT_HW_CFG_SWAP_PHY_POLARITY_MASK 0x00010000 | 729 | #define PORT_HW_CFG_SWAP_PHY_POLARITY_MASK 0x00010000 |
462 | #define PORT_HW_CFG_SWAP_PHY_POLARITY_DISABLED 0x00000000 | 730 | #define PORT_HW_CFG_SWAP_PHY_POLARITY_DISABLED 0x00000000 |
463 | #define PORT_HW_CFG_SWAP_PHY_POLARITY_ENABLED 0x00010000 | 731 | #define PORT_HW_CFG_SWAP_PHY_POLARITY_ENABLED 0x00010000 |
732 | |||
464 | 733 | ||
465 | u32 external_phy_config; | 734 | u32 external_phy_config; |
466 | #define PORT_HW_CFG_SERDES_EXT_PHY_TYPE_MASK 0xff000000 | 735 | #define PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK 0x000000ff |
467 | #define PORT_HW_CFG_SERDES_EXT_PHY_TYPE_SHIFT 24 | 736 | #define PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT 0 |
468 | #define PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT 0x00000000 | 737 | |
469 | #define PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482 0x01000000 | 738 | #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK 0x0000ff00 |
470 | #define PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT_SD 0x02000000 | 739 | #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SHIFT 8 |
471 | #define PORT_HW_CFG_SERDES_EXT_PHY_TYPE_NOT_CONN 0xff000000 | 740 | #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT 0x00000000 |
472 | 741 | #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8071 0x00000100 | |
473 | #define PORT_HW_CFG_SERDES_EXT_PHY_ADDR_MASK 0x00ff0000 | 742 | #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072 0x00000200 |
474 | #define PORT_HW_CFG_SERDES_EXT_PHY_ADDR_SHIFT 16 | 743 | #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073 0x00000300 |
475 | 744 | #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705 0x00000400 | |
476 | #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK 0x0000ff00 | 745 | #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706 0x00000500 |
477 | #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SHIFT 8 | 746 | #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726 0x00000600 |
478 | #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT 0x00000000 | 747 | #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481 0x00000700 |
479 | #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8071 0x00000100 | 748 | #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101 0x00000800 |
480 | #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072 0x00000200 | 749 | #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727 0x00000900 |
481 | #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073 0x00000300 | 750 | #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC 0x00000a00 |
482 | #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705 0x00000400 | 751 | #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823 0x00000b00 |
483 | #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706 0x00000500 | 752 | #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54640 0x00000c00 |
484 | #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726 0x00000600 | 753 | #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833 0x00000d00 |
485 | #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481 0x00000700 | 754 | #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54616 0x00000e00 |
486 | #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101 0x00000800 | 755 | #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8722 0x00000f00 |
487 | #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727 0x00000900 | 756 | #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT_WC 0x0000fc00 |
488 | #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC 0x00000a00 | 757 | #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE 0x0000fd00 |
489 | #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823 0x00000b00 | 758 | #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN 0x0000ff00 |
490 | #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833 0x00000d00 | 759 | |
491 | #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8722 0x00000f00 | 760 | #define PORT_HW_CFG_SERDES_EXT_PHY_ADDR_MASK 0x00ff0000 |
492 | #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE 0x0000fd00 | 761 | #define PORT_HW_CFG_SERDES_EXT_PHY_ADDR_SHIFT 16 |
493 | #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN 0x0000ff00 | 762 | |
494 | 763 | #define PORT_HW_CFG_SERDES_EXT_PHY_TYPE_MASK 0xff000000 | |
495 | #define PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK 0x000000ff | 764 | #define PORT_HW_CFG_SERDES_EXT_PHY_TYPE_SHIFT 24 |
496 | #define PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT 0 | 765 | #define PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT 0x00000000 |
766 | #define PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482 0x01000000 | ||
767 | #define PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT_SD 0x02000000 | ||
768 | #define PORT_HW_CFG_SERDES_EXT_PHY_TYPE_NOT_CONN 0xff000000 | ||
497 | 769 | ||
498 | u32 speed_capability_mask; | 770 | u32 speed_capability_mask; |
499 | #define PORT_HW_CFG_SPEED_CAPABILITY_D0_MASK 0xffff0000 | 771 | #define PORT_HW_CFG_SPEED_CAPABILITY_D3_MASK 0x0000ffff |
500 | #define PORT_HW_CFG_SPEED_CAPABILITY_D0_SHIFT 16 | 772 | #define PORT_HW_CFG_SPEED_CAPABILITY_D3_SHIFT 0 |
501 | #define PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL 0x00010000 | 773 | #define PORT_HW_CFG_SPEED_CAPABILITY_D3_10M_FULL 0x00000001 |
502 | #define PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF 0x00020000 | 774 | #define PORT_HW_CFG_SPEED_CAPABILITY_D3_10M_HALF 0x00000002 |
503 | #define PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF 0x00040000 | 775 | #define PORT_HW_CFG_SPEED_CAPABILITY_D3_100M_HALF 0x00000004 |
504 | #define PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL 0x00080000 | 776 | #define PORT_HW_CFG_SPEED_CAPABILITY_D3_100M_FULL 0x00000008 |
505 | #define PORT_HW_CFG_SPEED_CAPABILITY_D0_1G 0x00100000 | 777 | #define PORT_HW_CFG_SPEED_CAPABILITY_D3_1G 0x00000010 |
506 | #define PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G 0x00200000 | 778 | #define PORT_HW_CFG_SPEED_CAPABILITY_D3_2_5G 0x00000020 |
507 | #define PORT_HW_CFG_SPEED_CAPABILITY_D0_10G 0x00400000 | 779 | #define PORT_HW_CFG_SPEED_CAPABILITY_D3_10G 0x00000040 |
508 | #define PORT_HW_CFG_SPEED_CAPABILITY_D0_12G 0x00800000 | 780 | #define PORT_HW_CFG_SPEED_CAPABILITY_D3_20G 0x00000080 |
509 | #define PORT_HW_CFG_SPEED_CAPABILITY_D0_12_5G 0x01000000 | 781 | #define PORT_HW_CFG_SPEED_CAPABILITY_D3_RESERVED 0x0000f000 |
510 | #define PORT_HW_CFG_SPEED_CAPABILITY_D0_13G 0x02000000 | 782 | |
511 | #define PORT_HW_CFG_SPEED_CAPABILITY_D0_15G 0x04000000 | 783 | #define PORT_HW_CFG_SPEED_CAPABILITY_D0_MASK 0xffff0000 |
512 | #define PORT_HW_CFG_SPEED_CAPABILITY_D0_16G 0x08000000 | 784 | #define PORT_HW_CFG_SPEED_CAPABILITY_D0_SHIFT 16 |
513 | #define PORT_HW_CFG_SPEED_CAPABILITY_D0_RESERVED 0xf0000000 | 785 | #define PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL 0x00010000 |
514 | 786 | #define PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF 0x00020000 | |
515 | #define PORT_HW_CFG_SPEED_CAPABILITY_D3_MASK 0x0000ffff | 787 | #define PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF 0x00040000 |
516 | #define PORT_HW_CFG_SPEED_CAPABILITY_D3_SHIFT 0 | 788 | #define PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL 0x00080000 |
517 | #define PORT_HW_CFG_SPEED_CAPABILITY_D3_10M_FULL 0x00000001 | 789 | #define PORT_HW_CFG_SPEED_CAPABILITY_D0_1G 0x00100000 |
518 | #define PORT_HW_CFG_SPEED_CAPABILITY_D3_10M_HALF 0x00000002 | 790 | #define PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G 0x00200000 |
519 | #define PORT_HW_CFG_SPEED_CAPABILITY_D3_100M_HALF 0x00000004 | 791 | #define PORT_HW_CFG_SPEED_CAPABILITY_D0_10G 0x00400000 |
520 | #define PORT_HW_CFG_SPEED_CAPABILITY_D3_100M_FULL 0x00000008 | 792 | #define PORT_HW_CFG_SPEED_CAPABILITY_D0_20G 0x00800000 |
521 | #define PORT_HW_CFG_SPEED_CAPABILITY_D3_1G 0x00000010 | 793 | #define PORT_HW_CFG_SPEED_CAPABILITY_D0_RESERVED 0xf0000000 |
522 | #define PORT_HW_CFG_SPEED_CAPABILITY_D3_2_5G 0x00000020 | 794 | |
523 | #define PORT_HW_CFG_SPEED_CAPABILITY_D3_10G 0x00000040 | 795 | /* A place to hold the original MAC address as a backup */ |
524 | #define PORT_HW_CFG_SPEED_CAPABILITY_D3_12G 0x00000080 | 796 | u32 backup_mac_upper; /* 0x2B4 */ |
525 | #define PORT_HW_CFG_SPEED_CAPABILITY_D3_12_5G 0x00000100 | 797 | u32 backup_mac_lower; /* 0x2B8 */ |
526 | #define PORT_HW_CFG_SPEED_CAPABILITY_D3_13G 0x00000200 | ||
527 | #define PORT_HW_CFG_SPEED_CAPABILITY_D3_15G 0x00000400 | ||
528 | #define PORT_HW_CFG_SPEED_CAPABILITY_D3_16G 0x00000800 | ||
529 | #define PORT_HW_CFG_SPEED_CAPABILITY_D3_RESERVED 0x0000f000 | ||
530 | |||
531 | u32 reserved[2]; | ||
532 | 798 | ||
533 | }; | 799 | }; |
534 | 800 | ||
535 | 801 | ||
536 | /**************************************************************************** | 802 | /**************************************************************************** |
537 | * Shared Feature configuration * | 803 | * Shared Feature configuration * |
538 | ****************************************************************************/ | 804 | ****************************************************************************/ |
539 | struct shared_feat_cfg { /* NVRAM Offset */ | 805 | struct shared_feat_cfg { /* NVRAM Offset */ |
806 | |||
807 | u32 config; /* 0x450 */ | ||
808 | #define SHARED_FEATURE_BMC_ECHO_MODE_EN 0x00000001 | ||
809 | |||
810 | /* Use NVRAM values instead of HW default values */ | ||
811 | #define SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_MASK \ | ||
812 | 0x00000002 | ||
813 | #define SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_DISABLED \ | ||
814 | 0x00000000 | ||
815 | #define SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED \ | ||
816 | 0x00000002 | ||
540 | 817 | ||
541 | u32 config; /* 0x450 */ | 818 | #define SHARED_FEAT_CFG_NCSI_ID_METHOD_MASK 0x00000008 |
542 | #define SHARED_FEATURE_BMC_ECHO_MODE_EN 0x00000001 | 819 | #define SHARED_FEAT_CFG_NCSI_ID_METHOD_SPIO 0x00000000 |
820 | #define SHARED_FEAT_CFG_NCSI_ID_METHOD_NVRAM 0x00000008 | ||
543 | 821 | ||
544 | /* Use the values from options 47 and 48 instead of the HW default | 822 | #define SHARED_FEAT_CFG_NCSI_ID_MASK 0x00000030 |
545 | values */ | 823 | #define SHARED_FEAT_CFG_NCSI_ID_SHIFT 4 |
546 | #define SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_DISABLED 0x00000000 | ||
547 | #define SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED 0x00000002 | ||
548 | 824 | ||
549 | #define SHARED_FEAT_CFG_FORCE_SF_MODE_MASK 0x00000700 | 825 | /* Override the OTP back to single function mode. When using GPIO, |
550 | #define SHARED_FEAT_CFG_FORCE_SF_MODE_SHIFT 8 | 826 | high means only SF, 0 is according to CLP configuration */ |
551 | #define SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED 0x00000000 | 827 | #define SHARED_FEAT_CFG_FORCE_SF_MODE_MASK 0x00000700 |
552 | #define SHARED_FEAT_CFG_FORCE_SF_MODE_FORCED_SF 0x00000100 | 828 | #define SHARED_FEAT_CFG_FORCE_SF_MODE_SHIFT 8 |
553 | #define SHARED_FEAT_CFG_FORCE_SF_MODE_SPIO4 0x00000200 | 829 | #define SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED 0x00000000 |
554 | #define SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT 0x00000300 | 830 | #define SHARED_FEAT_CFG_FORCE_SF_MODE_FORCED_SF 0x00000100 |
831 | #define SHARED_FEAT_CFG_FORCE_SF_MODE_SPIO4 0x00000200 | ||
832 | #define SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT 0x00000300 | ||
833 | |||
834 | /* The interval in seconds between sending LLDP packets. Set to zero | ||
835 | to disable the feature */ | ||
836 | #define SHARED_FEAT_CFG_LLDP_XMIT_INTERVAL_MASK 0x00ff0000 | ||
837 | #define SHARED_FEAT_CFG_LLDP_XMIT_INTERVAL_SHIFT 16 | ||
838 | |||
839 | /* The assigned device type ID for LLDP usage */ | ||
840 | #define SHARED_FEAT_CFG_LLDP_DEVICE_TYPE_ID_MASK 0xff000000 | ||
841 | #define SHARED_FEAT_CFG_LLDP_DEVICE_TYPE_ID_SHIFT 24 | ||
555 | 842 | ||
556 | }; | 843 | }; |
557 | 844 | ||
558 | 845 | ||
559 | /**************************************************************************** | 846 | /**************************************************************************** |
560 | * Port Feature configuration * | 847 | * Port Feature configuration * |
561 | ****************************************************************************/ | 848 | ****************************************************************************/ |
562 | struct port_feat_cfg { /* port 0: 0x454 port 1: 0x4c8 */ | 849 | struct port_feat_cfg { /* port 0: 0x454 port 1: 0x4c8 */ |
563 | 850 | ||
564 | u32 config; | 851 | u32 config; |
565 | #define PORT_FEATURE_BAR1_SIZE_MASK 0x0000000f | 852 | #define PORT_FEATURE_BAR1_SIZE_MASK 0x0000000f |
566 | #define PORT_FEATURE_BAR1_SIZE_SHIFT 0 | 853 | #define PORT_FEATURE_BAR1_SIZE_SHIFT 0 |
567 | #define PORT_FEATURE_BAR1_SIZE_DISABLED 0x00000000 | 854 | #define PORT_FEATURE_BAR1_SIZE_DISABLED 0x00000000 |
568 | #define PORT_FEATURE_BAR1_SIZE_64K 0x00000001 | 855 | #define PORT_FEATURE_BAR1_SIZE_64K 0x00000001 |
569 | #define PORT_FEATURE_BAR1_SIZE_128K 0x00000002 | 856 | #define PORT_FEATURE_BAR1_SIZE_128K 0x00000002 |
570 | #define PORT_FEATURE_BAR1_SIZE_256K 0x00000003 | 857 | #define PORT_FEATURE_BAR1_SIZE_256K 0x00000003 |
571 | #define PORT_FEATURE_BAR1_SIZE_512K 0x00000004 | 858 | #define PORT_FEATURE_BAR1_SIZE_512K 0x00000004 |
572 | #define PORT_FEATURE_BAR1_SIZE_1M 0x00000005 | 859 | #define PORT_FEATURE_BAR1_SIZE_1M 0x00000005 |
573 | #define PORT_FEATURE_BAR1_SIZE_2M 0x00000006 | 860 | #define PORT_FEATURE_BAR1_SIZE_2M 0x00000006 |
574 | #define PORT_FEATURE_BAR1_SIZE_4M 0x00000007 | 861 | #define PORT_FEATURE_BAR1_SIZE_4M 0x00000007 |
575 | #define PORT_FEATURE_BAR1_SIZE_8M 0x00000008 | 862 | #define PORT_FEATURE_BAR1_SIZE_8M 0x00000008 |
576 | #define PORT_FEATURE_BAR1_SIZE_16M 0x00000009 | 863 | #define PORT_FEATURE_BAR1_SIZE_16M 0x00000009 |
577 | #define PORT_FEATURE_BAR1_SIZE_32M 0x0000000a | 864 | #define PORT_FEATURE_BAR1_SIZE_32M 0x0000000a |
578 | #define PORT_FEATURE_BAR1_SIZE_64M 0x0000000b | 865 | #define PORT_FEATURE_BAR1_SIZE_64M 0x0000000b |
579 | #define PORT_FEATURE_BAR1_SIZE_128M 0x0000000c | 866 | #define PORT_FEATURE_BAR1_SIZE_128M 0x0000000c |
580 | #define PORT_FEATURE_BAR1_SIZE_256M 0x0000000d | 867 | #define PORT_FEATURE_BAR1_SIZE_256M 0x0000000d |
581 | #define PORT_FEATURE_BAR1_SIZE_512M 0x0000000e | 868 | #define PORT_FEATURE_BAR1_SIZE_512M 0x0000000e |
582 | #define PORT_FEATURE_BAR1_SIZE_1G 0x0000000f | 869 | #define PORT_FEATURE_BAR1_SIZE_1G 0x0000000f |
583 | #define PORT_FEATURE_BAR2_SIZE_MASK 0x000000f0 | 870 | #define PORT_FEATURE_BAR2_SIZE_MASK 0x000000f0 |
584 | #define PORT_FEATURE_BAR2_SIZE_SHIFT 4 | 871 | #define PORT_FEATURE_BAR2_SIZE_SHIFT 4 |
585 | #define PORT_FEATURE_BAR2_SIZE_DISABLED 0x00000000 | 872 | #define PORT_FEATURE_BAR2_SIZE_DISABLED 0x00000000 |
586 | #define PORT_FEATURE_BAR2_SIZE_64K 0x00000010 | 873 | #define PORT_FEATURE_BAR2_SIZE_64K 0x00000010 |
587 | #define PORT_FEATURE_BAR2_SIZE_128K 0x00000020 | 874 | #define PORT_FEATURE_BAR2_SIZE_128K 0x00000020 |
588 | #define PORT_FEATURE_BAR2_SIZE_256K 0x00000030 | 875 | #define PORT_FEATURE_BAR2_SIZE_256K 0x00000030 |
589 | #define PORT_FEATURE_BAR2_SIZE_512K 0x00000040 | 876 | #define PORT_FEATURE_BAR2_SIZE_512K 0x00000040 |
590 | #define PORT_FEATURE_BAR2_SIZE_1M 0x00000050 | 877 | #define PORT_FEATURE_BAR2_SIZE_1M 0x00000050 |
591 | #define PORT_FEATURE_BAR2_SIZE_2M 0x00000060 | 878 | #define PORT_FEATURE_BAR2_SIZE_2M 0x00000060 |
592 | #define PORT_FEATURE_BAR2_SIZE_4M 0x00000070 | 879 | #define PORT_FEATURE_BAR2_SIZE_4M 0x00000070 |
593 | #define PORT_FEATURE_BAR2_SIZE_8M 0x00000080 | 880 | #define PORT_FEATURE_BAR2_SIZE_8M 0x00000080 |
594 | #define PORT_FEATURE_BAR2_SIZE_16M 0x00000090 | 881 | #define PORT_FEATURE_BAR2_SIZE_16M 0x00000090 |
595 | #define PORT_FEATURE_BAR2_SIZE_32M 0x000000a0 | 882 | #define PORT_FEATURE_BAR2_SIZE_32M 0x000000a0 |
596 | #define PORT_FEATURE_BAR2_SIZE_64M 0x000000b0 | 883 | #define PORT_FEATURE_BAR2_SIZE_64M 0x000000b0 |
597 | #define PORT_FEATURE_BAR2_SIZE_128M 0x000000c0 | 884 | #define PORT_FEATURE_BAR2_SIZE_128M 0x000000c0 |
598 | #define PORT_FEATURE_BAR2_SIZE_256M 0x000000d0 | 885 | #define PORT_FEATURE_BAR2_SIZE_256M 0x000000d0 |
599 | #define PORT_FEATURE_BAR2_SIZE_512M 0x000000e0 | 886 | #define PORT_FEATURE_BAR2_SIZE_512M 0x000000e0 |
600 | #define PORT_FEATURE_BAR2_SIZE_1G 0x000000f0 | 887 | #define PORT_FEATURE_BAR2_SIZE_1G 0x000000f0 |
601 | #define PORT_FEATURE_EN_SIZE_MASK 0x07000000 | 888 | |
602 | #define PORT_FEATURE_EN_SIZE_SHIFT 24 | 889 | #define PORT_FEAT_CFG_DCBX_MASK 0x00000100 |
603 | #define PORT_FEATURE_WOL_ENABLED 0x01000000 | 890 | #define PORT_FEAT_CFG_DCBX_DISABLED 0x00000000 |
604 | #define PORT_FEATURE_MBA_ENABLED 0x02000000 | 891 | #define PORT_FEAT_CFG_DCBX_ENABLED 0x00000100 |
605 | #define PORT_FEATURE_MFW_ENABLED 0x04000000 | 892 | |
606 | 893 | #define PORT_FEAT_CFG_AUTOGREEN_MASK 0x00000200 | |
607 | /* Reserved bits: 28-29 */ | 894 | #define PORT_FEAT_CFG_AUTOGREEN_SHIFT 9 |
608 | /* Check the optic vendor via i2c against a list of approved modules | 895 | #define PORT_FEAT_CFG_AUTOGREEN_DISABLED 0x00000000 |
609 | in a separate nvram image */ | 896 | #define PORT_FEAT_CFG_AUTOGREEN_ENABLED 0x00000200 |
610 | #define PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK 0xE0000000 | 897 | |
611 | #define PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_SHIFT 29 | 898 | #define PORT_FEATURE_EN_SIZE_MASK 0x0f000000 |
612 | #define PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_NO_ENFORCEMENT 0x00000000 | 899 | #define PORT_FEATURE_EN_SIZE_SHIFT 24 |
613 | #define PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER 0x20000000 | 900 | #define PORT_FEATURE_WOL_ENABLED 0x01000000 |
614 | #define PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_WARNING_MSG 0x40000000 | 901 | #define PORT_FEATURE_MBA_ENABLED 0x02000000 |
615 | #define PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_POWER_DOWN 0x60000000 | 902 | #define PORT_FEATURE_MFW_ENABLED 0x04000000 |
616 | 903 | ||
904 | /* Advertise expansion ROM even if MBA is disabled */ | ||
905 | #define PORT_FEAT_CFG_FORCE_EXP_ROM_ADV_MASK 0x08000000 | ||
906 | #define PORT_FEAT_CFG_FORCE_EXP_ROM_ADV_DISABLED 0x00000000 | ||
907 | #define PORT_FEAT_CFG_FORCE_EXP_ROM_ADV_ENABLED 0x08000000 | ||
908 | |||
909 | /* Check the optic vendor via i2c against a list of approved modules | ||
910 | in a separate nvram image */ | ||
911 | #define PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK 0xe0000000 | ||
912 | #define PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_SHIFT 29 | ||
913 | #define PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_NO_ENFORCEMENT \ | ||
914 | 0x00000000 | ||
915 | #define PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER \ | ||
916 | 0x20000000 | ||
917 | #define PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_WARNING_MSG 0x40000000 | ||
918 | #define PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_POWER_DOWN 0x60000000 | ||
617 | 919 | ||
618 | u32 wol_config; | 920 | u32 wol_config; |
619 | /* Default is used when driver sets to "auto" mode */ | 921 | /* Default is used when driver sets to "auto" mode */ |
620 | #define PORT_FEATURE_WOL_DEFAULT_MASK 0x00000003 | 922 | #define PORT_FEATURE_WOL_DEFAULT_MASK 0x00000003 |
621 | #define PORT_FEATURE_WOL_DEFAULT_SHIFT 0 | 923 | #define PORT_FEATURE_WOL_DEFAULT_SHIFT 0 |
622 | #define PORT_FEATURE_WOL_DEFAULT_DISABLE 0x00000000 | 924 | #define PORT_FEATURE_WOL_DEFAULT_DISABLE 0x00000000 |
623 | #define PORT_FEATURE_WOL_DEFAULT_MAGIC 0x00000001 | 925 | #define PORT_FEATURE_WOL_DEFAULT_MAGIC 0x00000001 |
624 | #define PORT_FEATURE_WOL_DEFAULT_ACPI 0x00000002 | 926 | #define PORT_FEATURE_WOL_DEFAULT_ACPI 0x00000002 |
625 | #define PORT_FEATURE_WOL_DEFAULT_MAGIC_AND_ACPI 0x00000003 | 927 | #define PORT_FEATURE_WOL_DEFAULT_MAGIC_AND_ACPI 0x00000003 |
626 | #define PORT_FEATURE_WOL_RES_PAUSE_CAP 0x00000004 | 928 | #define PORT_FEATURE_WOL_RES_PAUSE_CAP 0x00000004 |
627 | #define PORT_FEATURE_WOL_RES_ASYM_PAUSE_CAP 0x00000008 | 929 | #define PORT_FEATURE_WOL_RES_ASYM_PAUSE_CAP 0x00000008 |
628 | #define PORT_FEATURE_WOL_ACPI_UPON_MGMT 0x00000010 | 930 | #define PORT_FEATURE_WOL_ACPI_UPON_MGMT 0x00000010 |
629 | 931 | ||
630 | u32 mba_config; | 932 | u32 mba_config; |
631 | #define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_MASK 0x00000003 | 933 | #define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_MASK 0x00000007 |
632 | #define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_SHIFT 0 | 934 | #define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_SHIFT 0 |
633 | #define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_PXE 0x00000000 | 935 | #define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_PXE 0x00000000 |
634 | #define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_RPL 0x00000001 | 936 | #define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_RPL 0x00000001 |
635 | #define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_BOOTP 0x00000002 | 937 | #define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_BOOTP 0x00000002 |
636 | #define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_ISCSIB 0x00000003 | 938 | #define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_ISCSIB 0x00000003 |
637 | #define PORT_FEATURE_MBA_RES_PAUSE_CAP 0x00000100 | 939 | #define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_FCOE_BOOT 0x00000004 |
638 | #define PORT_FEATURE_MBA_RES_ASYM_PAUSE_CAP 0x00000200 | 940 | #define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_NONE 0x00000007 |
639 | #define PORT_FEATURE_MBA_SETUP_PROMPT_ENABLE 0x00000400 | 941 | |
640 | #define PORT_FEATURE_MBA_HOTKEY_CTRL_S 0x00000000 | 942 | #define PORT_FEATURE_MBA_BOOT_RETRY_MASK 0x00000038 |
641 | #define PORT_FEATURE_MBA_HOTKEY_CTRL_B 0x00000800 | 943 | #define PORT_FEATURE_MBA_BOOT_RETRY_SHIFT 3 |
642 | #define PORT_FEATURE_MBA_EXP_ROM_SIZE_MASK 0x000ff000 | 944 | |
643 | #define PORT_FEATURE_MBA_EXP_ROM_SIZE_SHIFT 12 | 945 | #define PORT_FEATURE_MBA_RES_PAUSE_CAP 0x00000100 |
644 | #define PORT_FEATURE_MBA_EXP_ROM_SIZE_DISABLED 0x00000000 | 946 | #define PORT_FEATURE_MBA_RES_ASYM_PAUSE_CAP 0x00000200 |
645 | #define PORT_FEATURE_MBA_EXP_ROM_SIZE_2K 0x00001000 | 947 | #define PORT_FEATURE_MBA_SETUP_PROMPT_ENABLE 0x00000400 |
646 | #define PORT_FEATURE_MBA_EXP_ROM_SIZE_4K 0x00002000 | 948 | #define PORT_FEATURE_MBA_HOTKEY_MASK 0x00000800 |
647 | #define PORT_FEATURE_MBA_EXP_ROM_SIZE_8K 0x00003000 | 949 | #define PORT_FEATURE_MBA_HOTKEY_CTRL_S 0x00000000 |
648 | #define PORT_FEATURE_MBA_EXP_ROM_SIZE_16K 0x00004000 | 950 | #define PORT_FEATURE_MBA_HOTKEY_CTRL_B 0x00000800 |
649 | #define PORT_FEATURE_MBA_EXP_ROM_SIZE_32K 0x00005000 | 951 | #define PORT_FEATURE_MBA_EXP_ROM_SIZE_MASK 0x000ff000 |
650 | #define PORT_FEATURE_MBA_EXP_ROM_SIZE_64K 0x00006000 | 952 | #define PORT_FEATURE_MBA_EXP_ROM_SIZE_SHIFT 12 |
651 | #define PORT_FEATURE_MBA_EXP_ROM_SIZE_128K 0x00007000 | 953 | #define PORT_FEATURE_MBA_EXP_ROM_SIZE_DISABLED 0x00000000 |
652 | #define PORT_FEATURE_MBA_EXP_ROM_SIZE_256K 0x00008000 | 954 | #define PORT_FEATURE_MBA_EXP_ROM_SIZE_2K 0x00001000 |
653 | #define PORT_FEATURE_MBA_EXP_ROM_SIZE_512K 0x00009000 | 955 | #define PORT_FEATURE_MBA_EXP_ROM_SIZE_4K 0x00002000 |
654 | #define PORT_FEATURE_MBA_EXP_ROM_SIZE_1M 0x0000a000 | 956 | #define PORT_FEATURE_MBA_EXP_ROM_SIZE_8K 0x00003000 |
655 | #define PORT_FEATURE_MBA_EXP_ROM_SIZE_2M 0x0000b000 | 957 | #define PORT_FEATURE_MBA_EXP_ROM_SIZE_16K 0x00004000 |
656 | #define PORT_FEATURE_MBA_EXP_ROM_SIZE_4M 0x0000c000 | 958 | #define PORT_FEATURE_MBA_EXP_ROM_SIZE_32K 0x00005000 |
657 | #define PORT_FEATURE_MBA_EXP_ROM_SIZE_8M 0x0000d000 | 959 | #define PORT_FEATURE_MBA_EXP_ROM_SIZE_64K 0x00006000 |
658 | #define PORT_FEATURE_MBA_EXP_ROM_SIZE_16M 0x0000e000 | 960 | #define PORT_FEATURE_MBA_EXP_ROM_SIZE_128K 0x00007000 |
659 | #define PORT_FEATURE_MBA_EXP_ROM_SIZE_32M 0x0000f000 | 961 | #define PORT_FEATURE_MBA_EXP_ROM_SIZE_256K 0x00008000 |
660 | #define PORT_FEATURE_MBA_MSG_TIMEOUT_MASK 0x00f00000 | 962 | #define PORT_FEATURE_MBA_EXP_ROM_SIZE_512K 0x00009000 |
661 | #define PORT_FEATURE_MBA_MSG_TIMEOUT_SHIFT 20 | 963 | #define PORT_FEATURE_MBA_EXP_ROM_SIZE_1M 0x0000a000 |
662 | #define PORT_FEATURE_MBA_BIOS_BOOTSTRAP_MASK 0x03000000 | 964 | #define PORT_FEATURE_MBA_EXP_ROM_SIZE_2M 0x0000b000 |
663 | #define PORT_FEATURE_MBA_BIOS_BOOTSTRAP_SHIFT 24 | 965 | #define PORT_FEATURE_MBA_EXP_ROM_SIZE_4M 0x0000c000 |
664 | #define PORT_FEATURE_MBA_BIOS_BOOTSTRAP_AUTO 0x00000000 | 966 | #define PORT_FEATURE_MBA_EXP_ROM_SIZE_8M 0x0000d000 |
665 | #define PORT_FEATURE_MBA_BIOS_BOOTSTRAP_BBS 0x01000000 | 967 | #define PORT_FEATURE_MBA_EXP_ROM_SIZE_16M 0x0000e000 |
666 | #define PORT_FEATURE_MBA_BIOS_BOOTSTRAP_INT18H 0x02000000 | 968 | #define PORT_FEATURE_MBA_EXP_ROM_SIZE_32M 0x0000f000 |
667 | #define PORT_FEATURE_MBA_BIOS_BOOTSTRAP_INT19H 0x03000000 | 969 | #define PORT_FEATURE_MBA_MSG_TIMEOUT_MASK 0x00f00000 |
668 | #define PORT_FEATURE_MBA_LINK_SPEED_MASK 0x3c000000 | 970 | #define PORT_FEATURE_MBA_MSG_TIMEOUT_SHIFT 20 |
669 | #define PORT_FEATURE_MBA_LINK_SPEED_SHIFT 26 | 971 | #define PORT_FEATURE_MBA_BIOS_BOOTSTRAP_MASK 0x03000000 |
670 | #define PORT_FEATURE_MBA_LINK_SPEED_AUTO 0x00000000 | 972 | #define PORT_FEATURE_MBA_BIOS_BOOTSTRAP_SHIFT 24 |
671 | #define PORT_FEATURE_MBA_LINK_SPEED_10HD 0x04000000 | 973 | #define PORT_FEATURE_MBA_BIOS_BOOTSTRAP_AUTO 0x00000000 |
672 | #define PORT_FEATURE_MBA_LINK_SPEED_10FD 0x08000000 | 974 | #define PORT_FEATURE_MBA_BIOS_BOOTSTRAP_BBS 0x01000000 |
673 | #define PORT_FEATURE_MBA_LINK_SPEED_100HD 0x0c000000 | 975 | #define PORT_FEATURE_MBA_BIOS_BOOTSTRAP_INT18H 0x02000000 |
674 | #define PORT_FEATURE_MBA_LINK_SPEED_100FD 0x10000000 | 976 | #define PORT_FEATURE_MBA_BIOS_BOOTSTRAP_INT19H 0x03000000 |
675 | #define PORT_FEATURE_MBA_LINK_SPEED_1GBPS 0x14000000 | 977 | #define PORT_FEATURE_MBA_LINK_SPEED_MASK 0x3c000000 |
676 | #define PORT_FEATURE_MBA_LINK_SPEED_2_5GBPS 0x18000000 | 978 | #define PORT_FEATURE_MBA_LINK_SPEED_SHIFT 26 |
677 | #define PORT_FEATURE_MBA_LINK_SPEED_10GBPS_CX4 0x1c000000 | 979 | #define PORT_FEATURE_MBA_LINK_SPEED_AUTO 0x00000000 |
678 | #define PORT_FEATURE_MBA_LINK_SPEED_10GBPS_KX4 0x20000000 | 980 | #define PORT_FEATURE_MBA_LINK_SPEED_10HD 0x04000000 |
679 | #define PORT_FEATURE_MBA_LINK_SPEED_10GBPS_KR 0x24000000 | 981 | #define PORT_FEATURE_MBA_LINK_SPEED_10FD 0x08000000 |
680 | #define PORT_FEATURE_MBA_LINK_SPEED_12GBPS 0x28000000 | 982 | #define PORT_FEATURE_MBA_LINK_SPEED_100HD 0x0c000000 |
681 | #define PORT_FEATURE_MBA_LINK_SPEED_12_5GBPS 0x2c000000 | 983 | #define PORT_FEATURE_MBA_LINK_SPEED_100FD 0x10000000 |
682 | #define PORT_FEATURE_MBA_LINK_SPEED_13GBPS 0x30000000 | 984 | #define PORT_FEATURE_MBA_LINK_SPEED_1GBPS 0x14000000 |
683 | #define PORT_FEATURE_MBA_LINK_SPEED_15GBPS 0x34000000 | 985 | #define PORT_FEATURE_MBA_LINK_SPEED_2_5GBPS 0x18000000 |
684 | #define PORT_FEATURE_MBA_LINK_SPEED_16GBPS 0x38000000 | 986 | #define PORT_FEATURE_MBA_LINK_SPEED_10GBPS_CX4 0x1c000000 |
685 | 987 | #define PORT_FEATURE_MBA_LINK_SPEED_20GBPS 0x20000000 | |
686 | u32 bmc_config; | 988 | u32 bmc_config; |
687 | #define PORT_FEATURE_BMC_LINK_OVERRIDE_DEFAULT 0x00000000 | 989 | #define PORT_FEATURE_BMC_LINK_OVERRIDE_MASK 0x00000001 |
688 | #define PORT_FEATURE_BMC_LINK_OVERRIDE_EN 0x00000001 | 990 | #define PORT_FEATURE_BMC_LINK_OVERRIDE_DEFAULT 0x00000000 |
991 | #define PORT_FEATURE_BMC_LINK_OVERRIDE_EN 0x00000001 | ||
689 | 992 | ||
690 | u32 mba_vlan_cfg; | 993 | u32 mba_vlan_cfg; |
691 | #define PORT_FEATURE_MBA_VLAN_TAG_MASK 0x0000ffff | 994 | #define PORT_FEATURE_MBA_VLAN_TAG_MASK 0x0000ffff |
692 | #define PORT_FEATURE_MBA_VLAN_TAG_SHIFT 0 | 995 | #define PORT_FEATURE_MBA_VLAN_TAG_SHIFT 0 |
693 | #define PORT_FEATURE_MBA_VLAN_EN 0x00010000 | 996 | #define PORT_FEATURE_MBA_VLAN_EN 0x00010000 |
694 | 997 | ||
695 | u32 resource_cfg; | 998 | u32 resource_cfg; |
696 | #define PORT_FEATURE_RESOURCE_CFG_VALID 0x00000001 | 999 | #define PORT_FEATURE_RESOURCE_CFG_VALID 0x00000001 |
697 | #define PORT_FEATURE_RESOURCE_CFG_DIAG 0x00000002 | 1000 | #define PORT_FEATURE_RESOURCE_CFG_DIAG 0x00000002 |
698 | #define PORT_FEATURE_RESOURCE_CFG_L2 0x00000004 | 1001 | #define PORT_FEATURE_RESOURCE_CFG_L2 0x00000004 |
699 | #define PORT_FEATURE_RESOURCE_CFG_ISCSI 0x00000008 | 1002 | #define PORT_FEATURE_RESOURCE_CFG_ISCSI 0x00000008 |
700 | #define PORT_FEATURE_RESOURCE_CFG_RDMA 0x00000010 | 1003 | #define PORT_FEATURE_RESOURCE_CFG_RDMA 0x00000010 |
701 | 1004 | ||
702 | u32 smbus_config; | 1005 | u32 smbus_config; |
703 | /* Obsolete */ | 1006 | #define PORT_FEATURE_SMBUS_ADDR_MASK 0x000000fe |
704 | #define PORT_FEATURE_SMBUS_EN 0x00000001 | 1007 | #define PORT_FEATURE_SMBUS_ADDR_SHIFT 1 |
705 | #define PORT_FEATURE_SMBUS_ADDR_MASK 0x000000fe | 1008 | |
706 | #define PORT_FEATURE_SMBUS_ADDR_SHIFT 1 | 1009 | u32 vf_config; |
707 | 1010 | #define PORT_FEAT_CFG_VF_BAR2_SIZE_MASK 0x0000000f | |
708 | u32 reserved1; | 1011 | #define PORT_FEAT_CFG_VF_BAR2_SIZE_SHIFT 0 |
1012 | #define PORT_FEAT_CFG_VF_BAR2_SIZE_DISABLED 0x00000000 | ||
1013 | #define PORT_FEAT_CFG_VF_BAR2_SIZE_4K 0x00000001 | ||
1014 | #define PORT_FEAT_CFG_VF_BAR2_SIZE_8K 0x00000002 | ||
1015 | #define PORT_FEAT_CFG_VF_BAR2_SIZE_16K 0x00000003 | ||
1016 | #define PORT_FEAT_CFG_VF_BAR2_SIZE_32K 0x00000004 | ||
1017 | #define PORT_FEAT_CFG_VF_BAR2_SIZE_64K 0x00000005 | ||
1018 | #define PORT_FEAT_CFG_VF_BAR2_SIZE_128K 0x00000006 | ||
1019 | #define PORT_FEAT_CFG_VF_BAR2_SIZE_256K 0x00000007 | ||
1020 | #define PORT_FEAT_CFG_VF_BAR2_SIZE_512K 0x00000008 | ||
1021 | #define PORT_FEAT_CFG_VF_BAR2_SIZE_1M 0x00000009 | ||
1022 | #define PORT_FEAT_CFG_VF_BAR2_SIZE_2M 0x0000000a | ||
1023 | #define PORT_FEAT_CFG_VF_BAR2_SIZE_4M 0x0000000b | ||
1024 | #define PORT_FEAT_CFG_VF_BAR2_SIZE_8M 0x0000000c | ||
1025 | #define PORT_FEAT_CFG_VF_BAR2_SIZE_16M 0x0000000d | ||
1026 | #define PORT_FEAT_CFG_VF_BAR2_SIZE_32M 0x0000000e | ||
1027 | #define PORT_FEAT_CFG_VF_BAR2_SIZE_64M 0x0000000f | ||
709 | 1028 | ||
710 | u32 link_config; /* Used as HW defaults for the driver */ | 1029 | u32 link_config; /* Used as HW defaults for the driver */ |
711 | #define PORT_FEATURE_CONNECTED_SWITCH_MASK 0x03000000 | 1030 | #define PORT_FEATURE_CONNECTED_SWITCH_MASK 0x03000000 |
712 | #define PORT_FEATURE_CONNECTED_SWITCH_SHIFT 24 | 1031 | #define PORT_FEATURE_CONNECTED_SWITCH_SHIFT 24 |
713 | /* (forced) low speed switch (< 10G) */ | 1032 | /* (forced) low speed switch (< 10G) */ |
714 | #define PORT_FEATURE_CON_SWITCH_1G_SWITCH 0x00000000 | 1033 | #define PORT_FEATURE_CON_SWITCH_1G_SWITCH 0x00000000 |
715 | /* (forced) high speed switch (>= 10G) */ | 1034 | /* (forced) high speed switch (>= 10G) */ |
716 | #define PORT_FEATURE_CON_SWITCH_10G_SWITCH 0x01000000 | 1035 | #define PORT_FEATURE_CON_SWITCH_10G_SWITCH 0x01000000 |
717 | #define PORT_FEATURE_CON_SWITCH_AUTO_DETECT 0x02000000 | 1036 | #define PORT_FEATURE_CON_SWITCH_AUTO_DETECT 0x02000000 |
718 | #define PORT_FEATURE_CON_SWITCH_ONE_TIME_DETECT 0x03000000 | 1037 | #define PORT_FEATURE_CON_SWITCH_ONE_TIME_DETECT 0x03000000 |
719 | 1038 | ||
720 | #define PORT_FEATURE_LINK_SPEED_MASK 0x000f0000 | 1039 | #define PORT_FEATURE_LINK_SPEED_MASK 0x000f0000 |
721 | #define PORT_FEATURE_LINK_SPEED_SHIFT 16 | 1040 | #define PORT_FEATURE_LINK_SPEED_SHIFT 16 |
722 | #define PORT_FEATURE_LINK_SPEED_AUTO 0x00000000 | 1041 | #define PORT_FEATURE_LINK_SPEED_AUTO 0x00000000 |
723 | #define PORT_FEATURE_LINK_SPEED_10M_FULL 0x00010000 | 1042 | #define PORT_FEATURE_LINK_SPEED_10M_FULL 0x00010000 |
724 | #define PORT_FEATURE_LINK_SPEED_10M_HALF 0x00020000 | 1043 | #define PORT_FEATURE_LINK_SPEED_10M_HALF 0x00020000 |
725 | #define PORT_FEATURE_LINK_SPEED_100M_HALF 0x00030000 | 1044 | #define PORT_FEATURE_LINK_SPEED_100M_HALF 0x00030000 |
726 | #define PORT_FEATURE_LINK_SPEED_100M_FULL 0x00040000 | 1045 | #define PORT_FEATURE_LINK_SPEED_100M_FULL 0x00040000 |
727 | #define PORT_FEATURE_LINK_SPEED_1G 0x00050000 | 1046 | #define PORT_FEATURE_LINK_SPEED_1G 0x00050000 |
728 | #define PORT_FEATURE_LINK_SPEED_2_5G 0x00060000 | 1047 | #define PORT_FEATURE_LINK_SPEED_2_5G 0x00060000 |
729 | #define PORT_FEATURE_LINK_SPEED_10G_CX4 0x00070000 | 1048 | #define PORT_FEATURE_LINK_SPEED_10G_CX4 0x00070000 |
730 | #define PORT_FEATURE_LINK_SPEED_10G_KX4 0x00080000 | 1049 | #define PORT_FEATURE_LINK_SPEED_20G 0x00080000 |
731 | #define PORT_FEATURE_LINK_SPEED_10G_KR 0x00090000 | 1050 | |
732 | #define PORT_FEATURE_LINK_SPEED_12G 0x000a0000 | 1051 | #define PORT_FEATURE_FLOW_CONTROL_MASK 0x00000700 |
733 | #define PORT_FEATURE_LINK_SPEED_12_5G 0x000b0000 | 1052 | #define PORT_FEATURE_FLOW_CONTROL_SHIFT 8 |
734 | #define PORT_FEATURE_LINK_SPEED_13G 0x000c0000 | 1053 | #define PORT_FEATURE_FLOW_CONTROL_AUTO 0x00000000 |
735 | #define PORT_FEATURE_LINK_SPEED_15G 0x000d0000 | 1054 | #define PORT_FEATURE_FLOW_CONTROL_TX 0x00000100 |
736 | #define PORT_FEATURE_LINK_SPEED_16G 0x000e0000 | 1055 | #define PORT_FEATURE_FLOW_CONTROL_RX 0x00000200 |
737 | 1056 | #define PORT_FEATURE_FLOW_CONTROL_BOTH 0x00000300 | |
738 | #define PORT_FEATURE_FLOW_CONTROL_MASK 0x00000700 | 1057 | #define PORT_FEATURE_FLOW_CONTROL_NONE 0x00000400 |
739 | #define PORT_FEATURE_FLOW_CONTROL_SHIFT 8 | ||
740 | #define PORT_FEATURE_FLOW_CONTROL_AUTO 0x00000000 | ||
741 | #define PORT_FEATURE_FLOW_CONTROL_TX 0x00000100 | ||
742 | #define PORT_FEATURE_FLOW_CONTROL_RX 0x00000200 | ||
743 | #define PORT_FEATURE_FLOW_CONTROL_BOTH 0x00000300 | ||
744 | #define PORT_FEATURE_FLOW_CONTROL_NONE 0x00000400 | ||
745 | 1058 | ||
746 | /* The default for MCP link configuration, | 1059 | /* The default for MCP link configuration, |
747 | uses the same defines as link_config */ | 1060 | uses the same defines as link_config */ |
748 | u32 mfw_wol_link_cfg; | 1061 | u32 mfw_wol_link_cfg; |
1062 | |||
749 | /* The default for the driver of the second external phy, | 1063 | /* The default for the driver of the second external phy, |
750 | uses the same defines as link_config */ | 1064 | uses the same defines as link_config */ |
751 | u32 link_config2; /* 0x47C */ | 1065 | u32 link_config2; /* 0x47C */ |
752 | 1066 | ||
753 | /* The default for MCP of the second external phy, | 1067 | /* The default for MCP of the second external phy, |
754 | uses the same defines as link_config */ | 1068 | uses the same defines as link_config */ |
755 | u32 mfw_wol_link_cfg2; /* 0x480 */ | 1069 | u32 mfw_wol_link_cfg2; /* 0x480 */ |
756 | 1070 | ||
757 | u32 Reserved2[17]; /* 0x484 */ | 1071 | u32 Reserved2[17]; /* 0x484 */ |
758 | 1072 | ||
759 | }; | 1073 | }; |
760 | 1074 | ||
761 | 1075 | ||
762 | /**************************************************************************** | 1076 | /**************************************************************************** |
763 | * Device Information * | 1077 | * Device Information * |
764 | ****************************************************************************/ | 1078 | ****************************************************************************/ |
765 | struct shm_dev_info { /* size */ | 1079 | struct shm_dev_info { /* size */ |
766 | 1080 | ||
767 | u32 bc_rev; /* 8 bits each: major, minor, build */ /* 4 */ | 1081 | u32 bc_rev; /* 8 bits each: major, minor, build */ /* 4 */ |
768 | 1082 | ||
769 | struct shared_hw_cfg shared_hw_config; /* 40 */ | 1083 | struct shared_hw_cfg shared_hw_config; /* 40 */ |
770 | 1084 | ||
771 | struct port_hw_cfg port_hw_config[PORT_MAX]; /* 400*2=800 */ | 1085 | struct port_hw_cfg port_hw_config[PORT_MAX]; /* 400*2=800 */ |
772 | 1086 | ||
773 | struct shared_feat_cfg shared_feature_config; /* 4 */ | 1087 | struct shared_feat_cfg shared_feature_config; /* 4 */ |
774 | 1088 | ||
775 | struct port_feat_cfg port_feature_config[PORT_MAX];/* 116*2=232 */ | 1089 | struct port_feat_cfg port_feature_config[PORT_MAX];/* 116*2=232 */ |
776 | 1090 | ||
777 | }; | 1091 | }; |
778 | 1092 | ||
779 | 1093 | ||
780 | #define FUNC_0 0 | 1094 | #if !defined(__LITTLE_ENDIAN) && !defined(__BIG_ENDIAN) |
781 | #define FUNC_1 1 | 1095 | #error "Missing either LITTLE_ENDIAN or BIG_ENDIAN definition." |
782 | #define FUNC_2 2 | 1096 | #endif |
783 | #define FUNC_3 3 | ||
784 | #define FUNC_4 4 | ||
785 | #define FUNC_5 5 | ||
786 | #define FUNC_6 6 | ||
787 | #define FUNC_7 7 | ||
788 | #define E1_FUNC_MAX 2 | ||
789 | #define E1H_FUNC_MAX 8 | ||
790 | #define E2_FUNC_MAX 4 /* per path */ | ||
791 | |||
792 | #define VN_0 0 | ||
793 | #define VN_1 1 | ||
794 | #define VN_2 2 | ||
795 | #define VN_3 3 | ||
796 | #define E1VN_MAX 1 | ||
797 | #define E1HVN_MAX 4 | ||
798 | 1097 | ||
799 | #define E2_VF_MAX 64 | 1098 | #define FUNC_0 0 |
1099 | #define FUNC_1 1 | ||
1100 | #define FUNC_2 2 | ||
1101 | #define FUNC_3 3 | ||
1102 | #define FUNC_4 4 | ||
1103 | #define FUNC_5 5 | ||
1104 | #define FUNC_6 6 | ||
1105 | #define FUNC_7 7 | ||
1106 | #define E1_FUNC_MAX 2 | ||
1107 | #define E1H_FUNC_MAX 8 | ||
1108 | #define E2_FUNC_MAX 4 /* per path */ | ||
1109 | |||
1110 | #define VN_0 0 | ||
1111 | #define VN_1 1 | ||
1112 | #define VN_2 2 | ||
1113 | #define VN_3 3 | ||
1114 | #define E1VN_MAX 1 | ||
1115 | #define E1HVN_MAX 4 | ||
1116 | |||
1117 | #define E2_VF_MAX 64 /* HC_REG_VF_CONFIGURATION_SIZE */ | ||
800 | /* This value (in milliseconds) determines the frequency of the driver | 1118 | /* This value (in milliseconds) determines the frequency of the driver |
801 | * issuing the PULSE message code. The firmware monitors this periodic | 1119 | * issuing the PULSE message code. The firmware monitors this periodic |
802 | * pulse to determine when to switch to an OS-absent mode. */ | 1120 | * pulse to determine when to switch to an OS-absent mode. */ |
803 | #define DRV_PULSE_PERIOD_MS 250 | 1121 | #define DRV_PULSE_PERIOD_MS 250 |
804 | 1122 | ||
805 | /* This value (in milliseconds) determines how long the driver should | 1123 | /* This value (in milliseconds) determines how long the driver should |
806 | * wait for an acknowledgement from the firmware before timing out. Once | 1124 | * wait for an acknowledgement from the firmware before timing out. Once |
807 | * the firmware has timed out, the driver will assume there is no firmware | 1125 | * the firmware has timed out, the driver will assume there is no firmware |
808 | * running and there won't be any firmware-driver synchronization during a | 1126 | * running and there won't be any firmware-driver synchronization during a |
809 | * driver reset. */ | 1127 | * driver reset. */ |
810 | #define FW_ACK_TIME_OUT_MS 5000 | 1128 | #define FW_ACK_TIME_OUT_MS 5000 |
811 | 1129 | ||
812 | #define FW_ACK_POLL_TIME_MS 1 | 1130 | #define FW_ACK_POLL_TIME_MS 1 |
813 | 1131 | ||
814 | #define FW_ACK_NUM_OF_POLL (FW_ACK_TIME_OUT_MS/FW_ACK_POLL_TIME_MS) | 1132 | #define FW_ACK_NUM_OF_POLL (FW_ACK_TIME_OUT_MS/FW_ACK_POLL_TIME_MS) |
815 | 1133 | ||
816 | /* LED Blink rate that will achieve ~15.9Hz */ | 1134 | /* LED Blink rate that will achieve ~15.9Hz */ |
817 | #define LED_BLINK_RATE_VAL 480 | 1135 | #define LED_BLINK_RATE_VAL 480 |
818 | 1136 | ||
819 | /**************************************************************************** | 1137 | /**************************************************************************** |
820 | * Driver <-> FW Mailbox * | 1138 | * Driver <-> FW Mailbox * |
821 | ****************************************************************************/ | 1139 | ****************************************************************************/ |
822 | struct drv_port_mb { | 1140 | struct drv_port_mb { |
823 | 1141 | ||
824 | u32 link_status; | 1142 | u32 link_status; |
825 | /* Driver should update this field on any link change event */ | 1143 | /* Driver should update this field on any link change event */ |
826 | 1144 | ||
827 | #define LINK_STATUS_LINK_FLAG_MASK 0x00000001 | 1145 | #define LINK_STATUS_LINK_FLAG_MASK 0x00000001 |
828 | #define LINK_STATUS_LINK_UP 0x00000001 | 1146 | #define LINK_STATUS_LINK_UP 0x00000001 |
829 | #define LINK_STATUS_SPEED_AND_DUPLEX_MASK 0x0000001E | 1147 | #define LINK_STATUS_SPEED_AND_DUPLEX_MASK 0x0000001E |
830 | #define LINK_STATUS_SPEED_AND_DUPLEX_AN_NOT_COMPLETE (0<<1) | 1148 | #define LINK_STATUS_SPEED_AND_DUPLEX_AN_NOT_COMPLETE (0<<1) |
831 | #define LINK_STATUS_SPEED_AND_DUPLEX_10THD (1<<1) | 1149 | #define LINK_STATUS_SPEED_AND_DUPLEX_10THD (1<<1) |
832 | #define LINK_STATUS_SPEED_AND_DUPLEX_10TFD (2<<1) | 1150 | #define LINK_STATUS_SPEED_AND_DUPLEX_10TFD (2<<1) |
833 | #define LINK_STATUS_SPEED_AND_DUPLEX_100TXHD (3<<1) | 1151 | #define LINK_STATUS_SPEED_AND_DUPLEX_100TXHD (3<<1) |
834 | #define LINK_STATUS_SPEED_AND_DUPLEX_100T4 (4<<1) | 1152 | #define LINK_STATUS_SPEED_AND_DUPLEX_100T4 (4<<1) |
835 | #define LINK_STATUS_SPEED_AND_DUPLEX_100TXFD (5<<1) | 1153 | #define LINK_STATUS_SPEED_AND_DUPLEX_100TXFD (5<<1) |
836 | #define LINK_STATUS_SPEED_AND_DUPLEX_1000THD (6<<1) | 1154 | #define LINK_STATUS_SPEED_AND_DUPLEX_1000THD (6<<1) |
837 | #define LINK_STATUS_SPEED_AND_DUPLEX_1000TFD (7<<1) | 1155 | #define LINK_STATUS_SPEED_AND_DUPLEX_1000TFD (7<<1) |
838 | #define LINK_STATUS_SPEED_AND_DUPLEX_1000XFD (7<<1) | 1156 | #define LINK_STATUS_SPEED_AND_DUPLEX_1000XFD (7<<1) |
839 | #define LINK_STATUS_SPEED_AND_DUPLEX_2500THD (8<<1) | 1157 | #define LINK_STATUS_SPEED_AND_DUPLEX_2500THD (8<<1) |
840 | #define LINK_STATUS_SPEED_AND_DUPLEX_2500TFD (9<<1) | 1158 | #define LINK_STATUS_SPEED_AND_DUPLEX_2500TFD (9<<1) |
841 | #define LINK_STATUS_SPEED_AND_DUPLEX_2500XFD (9<<1) | 1159 | #define LINK_STATUS_SPEED_AND_DUPLEX_2500XFD (9<<1) |
842 | #define LINK_STATUS_SPEED_AND_DUPLEX_10GTFD (10<<1) | 1160 | #define LINK_STATUS_SPEED_AND_DUPLEX_10GTFD (10<<1) |
843 | #define LINK_STATUS_SPEED_AND_DUPLEX_10GXFD (10<<1) | 1161 | #define LINK_STATUS_SPEED_AND_DUPLEX_10GXFD (10<<1) |
844 | #define LINK_STATUS_SPEED_AND_DUPLEX_12GTFD (11<<1) | 1162 | #define LINK_STATUS_SPEED_AND_DUPLEX_20GTFD (11<<1) |
845 | #define LINK_STATUS_SPEED_AND_DUPLEX_12GXFD (11<<1) | 1163 | #define LINK_STATUS_SPEED_AND_DUPLEX_20GXFD (11<<1) |
846 | #define LINK_STATUS_SPEED_AND_DUPLEX_12_5GTFD (12<<1) | 1164 | |
847 | #define LINK_STATUS_SPEED_AND_DUPLEX_12_5GXFD (12<<1) | 1165 | #define LINK_STATUS_AUTO_NEGOTIATE_FLAG_MASK 0x00000020 |
848 | #define LINK_STATUS_SPEED_AND_DUPLEX_13GTFD (13<<1) | 1166 | #define LINK_STATUS_AUTO_NEGOTIATE_ENABLED 0x00000020 |
849 | #define LINK_STATUS_SPEED_AND_DUPLEX_13GXFD (13<<1) | 1167 | |
850 | #define LINK_STATUS_SPEED_AND_DUPLEX_15GTFD (14<<1) | 1168 | #define LINK_STATUS_AUTO_NEGOTIATE_COMPLETE 0x00000040 |
851 | #define LINK_STATUS_SPEED_AND_DUPLEX_15GXFD (14<<1) | 1169 | #define LINK_STATUS_PARALLEL_DETECTION_FLAG_MASK 0x00000080 |
852 | #define LINK_STATUS_SPEED_AND_DUPLEX_16GTFD (15<<1) | 1170 | #define LINK_STATUS_PARALLEL_DETECTION_USED 0x00000080 |
853 | #define LINK_STATUS_SPEED_AND_DUPLEX_16GXFD (15<<1) | 1171 | |
854 | 1172 | #define LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE 0x00000200 | |
855 | #define LINK_STATUS_AUTO_NEGOTIATE_FLAG_MASK 0x00000020 | 1173 | #define LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE 0x00000400 |
856 | #define LINK_STATUS_AUTO_NEGOTIATE_ENABLED 0x00000020 | 1174 | #define LINK_STATUS_LINK_PARTNER_100T4_CAPABLE 0x00000800 |
857 | 1175 | #define LINK_STATUS_LINK_PARTNER_100TXFD_CAPABLE 0x00001000 | |
858 | #define LINK_STATUS_AUTO_NEGOTIATE_COMPLETE 0x00000040 | 1176 | #define LINK_STATUS_LINK_PARTNER_100TXHD_CAPABLE 0x00002000 |
859 | #define LINK_STATUS_PARALLEL_DETECTION_FLAG_MASK 0x00000080 | 1177 | #define LINK_STATUS_LINK_PARTNER_10TFD_CAPABLE 0x00004000 |
860 | #define LINK_STATUS_PARALLEL_DETECTION_USED 0x00000080 | 1178 | #define LINK_STATUS_LINK_PARTNER_10THD_CAPABLE 0x00008000 |
861 | 1179 | ||
862 | #define LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE 0x00000200 | 1180 | #define LINK_STATUS_TX_FLOW_CONTROL_FLAG_MASK 0x00010000 |
863 | #define LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE 0x00000400 | 1181 | #define LINK_STATUS_TX_FLOW_CONTROL_ENABLED 0x00010000 |
864 | #define LINK_STATUS_LINK_PARTNER_100T4_CAPABLE 0x00000800 | 1182 | |
865 | #define LINK_STATUS_LINK_PARTNER_100TXFD_CAPABLE 0x00001000 | 1183 | #define LINK_STATUS_RX_FLOW_CONTROL_FLAG_MASK 0x00020000 |
866 | #define LINK_STATUS_LINK_PARTNER_100TXHD_CAPABLE 0x00002000 | 1184 | #define LINK_STATUS_RX_FLOW_CONTROL_ENABLED 0x00020000 |
867 | #define LINK_STATUS_LINK_PARTNER_10TFD_CAPABLE 0x00004000 | 1185 | |
868 | #define LINK_STATUS_LINK_PARTNER_10THD_CAPABLE 0x00008000 | 1186 | #define LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK 0x000C0000 |
869 | 1187 | #define LINK_STATUS_LINK_PARTNER_NOT_PAUSE_CAPABLE (0<<18) | |
870 | #define LINK_STATUS_TX_FLOW_CONTROL_FLAG_MASK 0x00010000 | 1188 | #define LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE (1<<18) |
871 | #define LINK_STATUS_TX_FLOW_CONTROL_ENABLED 0x00010000 | 1189 | #define LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE (2<<18) |
872 | 1190 | #define LINK_STATUS_LINK_PARTNER_BOTH_PAUSE (3<<18) | |
873 | #define LINK_STATUS_RX_FLOW_CONTROL_FLAG_MASK 0x00020000 | 1191 | |
874 | #define LINK_STATUS_RX_FLOW_CONTROL_ENABLED 0x00020000 | 1192 | #define LINK_STATUS_SERDES_LINK 0x00100000 |
875 | 1193 | ||
876 | #define LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK 0x000C0000 | 1194 | #define LINK_STATUS_LINK_PARTNER_2500XFD_CAPABLE 0x00200000 |
877 | #define LINK_STATUS_LINK_PARTNER_NOT_PAUSE_CAPABLE (0<<18) | 1195 | #define LINK_STATUS_LINK_PARTNER_2500XHD_CAPABLE 0x00400000 |
878 | #define LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE (1<<18) | 1196 | #define LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE 0x00800000 |
879 | #define LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE (2<<18) | 1197 | #define LINK_STATUS_LINK_PARTNER_20GXFD_CAPABLE 0x10000000 |
880 | #define LINK_STATUS_LINK_PARTNER_BOTH_PAUSE (3<<18) | ||
881 | |||
882 | #define LINK_STATUS_SERDES_LINK 0x00100000 | ||
883 | |||
884 | #define LINK_STATUS_LINK_PARTNER_2500XFD_CAPABLE 0x00200000 | ||
885 | #define LINK_STATUS_LINK_PARTNER_2500XHD_CAPABLE 0x00400000 | ||
886 | #define LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE 0x00800000 | ||
887 | #define LINK_STATUS_LINK_PARTNER_12GXFD_CAPABLE 0x01000000 | ||
888 | #define LINK_STATUS_LINK_PARTNER_12_5GXFD_CAPABLE 0x02000000 | ||
889 | #define LINK_STATUS_LINK_PARTNER_13GXFD_CAPABLE 0x04000000 | ||
890 | #define LINK_STATUS_LINK_PARTNER_15GXFD_CAPABLE 0x08000000 | ||
891 | #define LINK_STATUS_LINK_PARTNER_16GXFD_CAPABLE 0x10000000 | ||
892 | 1198 | ||
893 | u32 port_stx; | 1199 | u32 port_stx; |
894 | 1200 | ||
@@ -903,138 +1209,158 @@ struct drv_port_mb { | |||
903 | struct drv_func_mb { | 1209 | struct drv_func_mb { |
904 | 1210 | ||
905 | u32 drv_mb_header; | 1211 | u32 drv_mb_header; |
906 | #define DRV_MSG_CODE_MASK 0xffff0000 | 1212 | #define DRV_MSG_CODE_MASK 0xffff0000 |
907 | #define DRV_MSG_CODE_LOAD_REQ 0x10000000 | 1213 | #define DRV_MSG_CODE_LOAD_REQ 0x10000000 |
908 | #define DRV_MSG_CODE_LOAD_DONE 0x11000000 | 1214 | #define DRV_MSG_CODE_LOAD_DONE 0x11000000 |
909 | #define DRV_MSG_CODE_UNLOAD_REQ_WOL_EN 0x20000000 | 1215 | #define DRV_MSG_CODE_UNLOAD_REQ_WOL_EN 0x20000000 |
910 | #define DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS 0x20010000 | 1216 | #define DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS 0x20010000 |
911 | #define DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP 0x20020000 | 1217 | #define DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP 0x20020000 |
912 | #define DRV_MSG_CODE_UNLOAD_DONE 0x21000000 | 1218 | #define DRV_MSG_CODE_UNLOAD_DONE 0x21000000 |
913 | #define DRV_MSG_CODE_DCC_OK 0x30000000 | 1219 | #define DRV_MSG_CODE_DCC_OK 0x30000000 |
914 | #define DRV_MSG_CODE_DCC_FAILURE 0x31000000 | 1220 | #define DRV_MSG_CODE_DCC_FAILURE 0x31000000 |
915 | #define DRV_MSG_CODE_DIAG_ENTER_REQ 0x50000000 | 1221 | #define DRV_MSG_CODE_DIAG_ENTER_REQ 0x50000000 |
916 | #define DRV_MSG_CODE_DIAG_EXIT_REQ 0x60000000 | 1222 | #define DRV_MSG_CODE_DIAG_EXIT_REQ 0x60000000 |
917 | #define DRV_MSG_CODE_VALIDATE_KEY 0x70000000 | 1223 | #define DRV_MSG_CODE_VALIDATE_KEY 0x70000000 |
918 | #define DRV_MSG_CODE_GET_CURR_KEY 0x80000000 | 1224 | #define DRV_MSG_CODE_GET_CURR_KEY 0x80000000 |
919 | #define DRV_MSG_CODE_GET_UPGRADE_KEY 0x81000000 | 1225 | #define DRV_MSG_CODE_GET_UPGRADE_KEY 0x81000000 |
920 | #define DRV_MSG_CODE_GET_MANUF_KEY 0x82000000 | 1226 | #define DRV_MSG_CODE_GET_MANUF_KEY 0x82000000 |
921 | #define DRV_MSG_CODE_LOAD_L2B_PRAM 0x90000000 | 1227 | #define DRV_MSG_CODE_LOAD_L2B_PRAM 0x90000000 |
922 | /* | ||
923 | * The optic module verification commands require bootcode | ||
924 | * v5.0.6 or later | ||
925 | */ | ||
926 | #define DRV_MSG_CODE_VRFY_FIRST_PHY_OPT_MDL 0xa0000000 | ||
927 | #define REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL 0x00050006 | ||
928 | /* | 1228 | /* |
929 | * The specific optic module verification command requires bootcode | 1229 | * The optic module verification command requires bootcode |
930 | * v5.2.12 or later | 1230 | * v5.0.6 or later, te specific optic module verification command |
1231 | * requires bootcode v5.2.12 or later | ||
931 | */ | 1232 | */ |
932 | #define DRV_MSG_CODE_VRFY_SPECIFIC_PHY_OPT_MDL 0xa1000000 | 1233 | #define DRV_MSG_CODE_VRFY_FIRST_PHY_OPT_MDL 0xa0000000 |
933 | #define REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL 0x00050234 | 1234 | #define REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL 0x00050006 |
1235 | #define DRV_MSG_CODE_VRFY_SPECIFIC_PHY_OPT_MDL 0xa1000000 | ||
1236 | #define REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL 0x00050234 | ||
934 | 1237 | ||
935 | #define DRV_MSG_CODE_DCBX_ADMIN_PMF_MSG 0xb0000000 | 1238 | #define DRV_MSG_CODE_DCBX_ADMIN_PMF_MSG 0xb0000000 |
936 | #define DRV_MSG_CODE_DCBX_PMF_DRV_OK 0xb2000000 | 1239 | #define DRV_MSG_CODE_DCBX_PMF_DRV_OK 0xb2000000 |
937 | #define DRV_MSG_CODE_SET_MF_BW 0xe0000000 | ||
938 | #define REQ_BC_VER_4_SET_MF_BW 0x00060202 | ||
939 | #define DRV_MSG_CODE_SET_MF_BW_ACK 0xe1000000 | ||
940 | #define BIOS_MSG_CODE_LIC_CHALLENGE 0xff010000 | ||
941 | #define BIOS_MSG_CODE_LIC_RESPONSE 0xff020000 | ||
942 | #define BIOS_MSG_CODE_VIRT_MAC_PRIM 0xff030000 | ||
943 | #define BIOS_MSG_CODE_VIRT_MAC_ISCSI 0xff040000 | ||
944 | 1240 | ||
945 | #define DRV_MSG_SEQ_NUMBER_MASK 0x0000ffff | 1241 | #define DRV_MSG_CODE_VF_DISABLED_DONE 0xc0000000 |
1242 | |||
1243 | #define DRV_MSG_CODE_SET_MF_BW 0xe0000000 | ||
1244 | #define REQ_BC_VER_4_SET_MF_BW 0x00060202 | ||
1245 | #define DRV_MSG_CODE_SET_MF_BW_ACK 0xe1000000 | ||
1246 | |||
1247 | #define DRV_MSG_CODE_LINK_STATUS_CHANGED 0x01000000 | ||
1248 | |||
1249 | #define BIOS_MSG_CODE_LIC_CHALLENGE 0xff010000 | ||
1250 | #define BIOS_MSG_CODE_LIC_RESPONSE 0xff020000 | ||
1251 | #define BIOS_MSG_CODE_VIRT_MAC_PRIM 0xff030000 | ||
1252 | #define BIOS_MSG_CODE_VIRT_MAC_ISCSI 0xff040000 | ||
1253 | |||
1254 | #define DRV_MSG_SEQ_NUMBER_MASK 0x0000ffff | ||
946 | 1255 | ||
947 | u32 drv_mb_param; | 1256 | u32 drv_mb_param; |
1257 | #define DRV_MSG_CODE_SET_MF_BW_MIN_MASK 0x00ff0000 | ||
1258 | #define DRV_MSG_CODE_SET_MF_BW_MAX_MASK 0xff000000 | ||
948 | 1259 | ||
949 | u32 fw_mb_header; | 1260 | u32 fw_mb_header; |
950 | #define FW_MSG_CODE_MASK 0xffff0000 | 1261 | #define FW_MSG_CODE_MASK 0xffff0000 |
951 | #define FW_MSG_CODE_DRV_LOAD_COMMON 0x10100000 | 1262 | #define FW_MSG_CODE_DRV_LOAD_COMMON 0x10100000 |
952 | #define FW_MSG_CODE_DRV_LOAD_PORT 0x10110000 | 1263 | #define FW_MSG_CODE_DRV_LOAD_PORT 0x10110000 |
953 | #define FW_MSG_CODE_DRV_LOAD_FUNCTION 0x10120000 | 1264 | #define FW_MSG_CODE_DRV_LOAD_FUNCTION 0x10120000 |
954 | /* Load common chip is supported from bc 6.0.0 */ | 1265 | /* Load common chip is supported from bc 6.0.0 */ |
955 | #define REQ_BC_VER_4_DRV_LOAD_COMMON_CHIP 0x00060000 | 1266 | #define REQ_BC_VER_4_DRV_LOAD_COMMON_CHIP 0x00060000 |
956 | #define FW_MSG_CODE_DRV_LOAD_COMMON_CHIP 0x10130000 | 1267 | #define FW_MSG_CODE_DRV_LOAD_COMMON_CHIP 0x10130000 |
957 | #define FW_MSG_CODE_DRV_LOAD_REFUSED 0x10200000 | 1268 | |
958 | #define FW_MSG_CODE_DRV_LOAD_DONE 0x11100000 | 1269 | #define FW_MSG_CODE_DRV_LOAD_REFUSED 0x10200000 |
959 | #define FW_MSG_CODE_DRV_UNLOAD_COMMON 0x20100000 | 1270 | #define FW_MSG_CODE_DRV_LOAD_DONE 0x11100000 |
960 | #define FW_MSG_CODE_DRV_UNLOAD_PORT 0x20110000 | 1271 | #define FW_MSG_CODE_DRV_UNLOAD_COMMON 0x20100000 |
961 | #define FW_MSG_CODE_DRV_UNLOAD_FUNCTION 0x20120000 | 1272 | #define FW_MSG_CODE_DRV_UNLOAD_PORT 0x20110000 |
962 | #define FW_MSG_CODE_DRV_UNLOAD_DONE 0x21100000 | 1273 | #define FW_MSG_CODE_DRV_UNLOAD_FUNCTION 0x20120000 |
963 | #define FW_MSG_CODE_DCC_DONE 0x30100000 | 1274 | #define FW_MSG_CODE_DRV_UNLOAD_DONE 0x21100000 |
964 | #define FW_MSG_CODE_DIAG_ENTER_DONE 0x50100000 | 1275 | #define FW_MSG_CODE_DCC_DONE 0x30100000 |
965 | #define FW_MSG_CODE_DIAG_REFUSE 0x50200000 | 1276 | #define FW_MSG_CODE_LLDP_DONE 0x40100000 |
966 | #define FW_MSG_CODE_DIAG_EXIT_DONE 0x60100000 | 1277 | #define FW_MSG_CODE_DIAG_ENTER_DONE 0x50100000 |
967 | #define FW_MSG_CODE_VALIDATE_KEY_SUCCESS 0x70100000 | 1278 | #define FW_MSG_CODE_DIAG_REFUSE 0x50200000 |
968 | #define FW_MSG_CODE_VALIDATE_KEY_FAILURE 0x70200000 | 1279 | #define FW_MSG_CODE_DIAG_EXIT_DONE 0x60100000 |
969 | #define FW_MSG_CODE_GET_KEY_DONE 0x80100000 | 1280 | #define FW_MSG_CODE_VALIDATE_KEY_SUCCESS 0x70100000 |
970 | #define FW_MSG_CODE_NO_KEY 0x80f00000 | 1281 | #define FW_MSG_CODE_VALIDATE_KEY_FAILURE 0x70200000 |
971 | #define FW_MSG_CODE_LIC_INFO_NOT_READY 0x80f80000 | 1282 | #define FW_MSG_CODE_GET_KEY_DONE 0x80100000 |
972 | #define FW_MSG_CODE_L2B_PRAM_LOADED 0x90100000 | 1283 | #define FW_MSG_CODE_NO_KEY 0x80f00000 |
973 | #define FW_MSG_CODE_L2B_PRAM_T_LOAD_FAILURE 0x90210000 | 1284 | #define FW_MSG_CODE_LIC_INFO_NOT_READY 0x80f80000 |
974 | #define FW_MSG_CODE_L2B_PRAM_C_LOAD_FAILURE 0x90220000 | 1285 | #define FW_MSG_CODE_L2B_PRAM_LOADED 0x90100000 |
975 | #define FW_MSG_CODE_L2B_PRAM_X_LOAD_FAILURE 0x90230000 | 1286 | #define FW_MSG_CODE_L2B_PRAM_T_LOAD_FAILURE 0x90210000 |
976 | #define FW_MSG_CODE_L2B_PRAM_U_LOAD_FAILURE 0x90240000 | 1287 | #define FW_MSG_CODE_L2B_PRAM_C_LOAD_FAILURE 0x90220000 |
977 | #define FW_MSG_CODE_VRFY_OPT_MDL_SUCCESS 0xa0100000 | 1288 | #define FW_MSG_CODE_L2B_PRAM_X_LOAD_FAILURE 0x90230000 |
978 | #define FW_MSG_CODE_VRFY_OPT_MDL_INVLD_IMG 0xa0200000 | 1289 | #define FW_MSG_CODE_L2B_PRAM_U_LOAD_FAILURE 0x90240000 |
979 | #define FW_MSG_CODE_VRFY_OPT_MDL_UNAPPROVED 0xa0300000 | 1290 | #define FW_MSG_CODE_VRFY_OPT_MDL_SUCCESS 0xa0100000 |
980 | 1291 | #define FW_MSG_CODE_VRFY_OPT_MDL_INVLD_IMG 0xa0200000 | |
981 | #define FW_MSG_CODE_LIC_CHALLENGE 0xff010000 | 1292 | #define FW_MSG_CODE_VRFY_OPT_MDL_UNAPPROVED 0xa0300000 |
982 | #define FW_MSG_CODE_LIC_RESPONSE 0xff020000 | 1293 | #define FW_MSG_CODE_VF_DISABLED_DONE 0xb0000000 |
983 | #define FW_MSG_CODE_VIRT_MAC_PRIM 0xff030000 | 1294 | |
984 | #define FW_MSG_CODE_VIRT_MAC_ISCSI 0xff040000 | 1295 | #define FW_MSG_CODE_SET_MF_BW_SENT 0xe0000000 |
985 | 1296 | #define FW_MSG_CODE_SET_MF_BW_DONE 0xe1000000 | |
986 | #define FW_MSG_SEQ_NUMBER_MASK 0x0000ffff | 1297 | |
1298 | #define FW_MSG_CODE_LINK_CHANGED_ACK 0x01100000 | ||
1299 | |||
1300 | #define FW_MSG_CODE_LIC_CHALLENGE 0xff010000 | ||
1301 | #define FW_MSG_CODE_LIC_RESPONSE 0xff020000 | ||
1302 | #define FW_MSG_CODE_VIRT_MAC_PRIM 0xff030000 | ||
1303 | #define FW_MSG_CODE_VIRT_MAC_ISCSI 0xff040000 | ||
1304 | |||
1305 | #define FW_MSG_SEQ_NUMBER_MASK 0x0000ffff | ||
987 | 1306 | ||
988 | u32 fw_mb_param; | 1307 | u32 fw_mb_param; |
989 | 1308 | ||
990 | u32 drv_pulse_mb; | 1309 | u32 drv_pulse_mb; |
991 | #define DRV_PULSE_SEQ_MASK 0x00007fff | 1310 | #define DRV_PULSE_SEQ_MASK 0x00007fff |
992 | #define DRV_PULSE_SYSTEM_TIME_MASK 0xffff0000 | 1311 | #define DRV_PULSE_SYSTEM_TIME_MASK 0xffff0000 |
993 | /* The system time is in the format of | 1312 | /* |
994 | * (year-2001)*12*32 + month*32 + day. */ | 1313 | * The system time is in the format of |
995 | #define DRV_PULSE_ALWAYS_ALIVE 0x00008000 | 1314 | * (year-2001)*12*32 + month*32 + day. |
996 | /* Indicate to the firmware not to go into the | 1315 | */ |
1316 | #define DRV_PULSE_ALWAYS_ALIVE 0x00008000 | ||
1317 | /* | ||
1318 | * Indicate to the firmware not to go into the | ||
997 | * OS-absent when it is not getting driver pulse. | 1319 | * OS-absent when it is not getting driver pulse. |
998 | * This is used for debugging as well for PXE(MBA). */ | 1320 | * This is used for debugging as well for PXE(MBA). |
1321 | */ | ||
999 | 1322 | ||
1000 | u32 mcp_pulse_mb; | 1323 | u32 mcp_pulse_mb; |
1001 | #define MCP_PULSE_SEQ_MASK 0x00007fff | 1324 | #define MCP_PULSE_SEQ_MASK 0x00007fff |
1002 | #define MCP_PULSE_ALWAYS_ALIVE 0x00008000 | 1325 | #define MCP_PULSE_ALWAYS_ALIVE 0x00008000 |
1003 | /* Indicates to the driver not to assert due to lack | 1326 | /* Indicates to the driver not to assert due to lack |
1004 | * of MCP response */ | 1327 | * of MCP response */ |
1005 | #define MCP_EVENT_MASK 0xffff0000 | 1328 | #define MCP_EVENT_MASK 0xffff0000 |
1006 | #define MCP_EVENT_OTHER_DRIVER_RESET_REQ 0x00010000 | 1329 | #define MCP_EVENT_OTHER_DRIVER_RESET_REQ 0x00010000 |
1007 | 1330 | ||
1008 | u32 iscsi_boot_signature; | 1331 | u32 iscsi_boot_signature; |
1009 | u32 iscsi_boot_block_offset; | 1332 | u32 iscsi_boot_block_offset; |
1010 | 1333 | ||
1011 | u32 drv_status; | 1334 | u32 drv_status; |
1012 | #define DRV_STATUS_PMF 0x00000001 | 1335 | #define DRV_STATUS_PMF 0x00000001 |
1013 | #define DRV_STATUS_SET_MF_BW 0x00000004 | 1336 | #define DRV_STATUS_VF_DISABLED 0x00000002 |
1014 | 1337 | #define DRV_STATUS_SET_MF_BW 0x00000004 | |
1015 | #define DRV_STATUS_DCC_EVENT_MASK 0x0000ff00 | 1338 | #define DRV_STATUS_LINK_EVENT 0x00000008 |
1016 | #define DRV_STATUS_DCC_DISABLE_ENABLE_PF 0x00000100 | 1339 | |
1017 | #define DRV_STATUS_DCC_BANDWIDTH_ALLOCATION 0x00000200 | 1340 | #define DRV_STATUS_DCC_EVENT_MASK 0x0000ff00 |
1018 | #define DRV_STATUS_DCC_CHANGE_MAC_ADDRESS 0x00000400 | 1341 | #define DRV_STATUS_DCC_DISABLE_ENABLE_PF 0x00000100 |
1019 | #define DRV_STATUS_DCC_RESERVED1 0x00000800 | 1342 | #define DRV_STATUS_DCC_BANDWIDTH_ALLOCATION 0x00000200 |
1020 | #define DRV_STATUS_DCC_SET_PROTOCOL 0x00001000 | 1343 | #define DRV_STATUS_DCC_CHANGE_MAC_ADDRESS 0x00000400 |
1021 | #define DRV_STATUS_DCC_SET_PRIORITY 0x00002000 | 1344 | #define DRV_STATUS_DCC_RESERVED1 0x00000800 |
1022 | #define DRV_STATUS_DCBX_EVENT_MASK 0x000f0000 | 1345 | #define DRV_STATUS_DCC_SET_PROTOCOL 0x00001000 |
1023 | #define DRV_STATUS_DCBX_NEGOTIATION_RESULTS 0x00010000 | 1346 | #define DRV_STATUS_DCC_SET_PRIORITY 0x00002000 |
1347 | |||
1348 | #define DRV_STATUS_DCBX_EVENT_MASK 0x000f0000 | ||
1349 | #define DRV_STATUS_DCBX_NEGOTIATION_RESULTS 0x00010000 | ||
1024 | 1350 | ||
1025 | u32 virt_mac_upper; | 1351 | u32 virt_mac_upper; |
1026 | #define VIRT_MAC_SIGN_MASK 0xffff0000 | 1352 | #define VIRT_MAC_SIGN_MASK 0xffff0000 |
1027 | #define VIRT_MAC_SIGNATURE 0x564d0000 | 1353 | #define VIRT_MAC_SIGNATURE 0x564d0000 |
1028 | u32 virt_mac_lower; | 1354 | u32 virt_mac_lower; |
1029 | 1355 | ||
1030 | }; | 1356 | }; |
1031 | 1357 | ||
1032 | 1358 | ||
1033 | /**************************************************************************** | 1359 | /**************************************************************************** |
1034 | * Management firmware state * | 1360 | * Management firmware state * |
1035 | ****************************************************************************/ | 1361 | ****************************************************************************/ |
1036 | /* Allocate 440 bytes for management firmware */ | 1362 | /* Allocate 440 bytes for management firmware */ |
1037 | #define MGMTFW_STATE_WORD_SIZE 110 | 1363 | #define MGMTFW_STATE_WORD_SIZE 110 |
1038 | 1364 | ||
1039 | struct mgmtfw_state { | 1365 | struct mgmtfw_state { |
1040 | u32 opaque[MGMTFW_STATE_WORD_SIZE]; | 1366 | u32 opaque[MGMTFW_STATE_WORD_SIZE]; |
@@ -1042,25 +1368,25 @@ struct mgmtfw_state { | |||
1042 | 1368 | ||
1043 | 1369 | ||
1044 | /**************************************************************************** | 1370 | /**************************************************************************** |
1045 | * Multi-Function configuration * | 1371 | * Multi-Function configuration * |
1046 | ****************************************************************************/ | 1372 | ****************************************************************************/ |
1047 | struct shared_mf_cfg { | 1373 | struct shared_mf_cfg { |
1048 | 1374 | ||
1049 | u32 clp_mb; | 1375 | u32 clp_mb; |
1050 | #define SHARED_MF_CLP_SET_DEFAULT 0x00000000 | 1376 | #define SHARED_MF_CLP_SET_DEFAULT 0x00000000 |
1051 | /* set by CLP */ | 1377 | /* set by CLP */ |
1052 | #define SHARED_MF_CLP_EXIT 0x00000001 | 1378 | #define SHARED_MF_CLP_EXIT 0x00000001 |
1053 | /* set by MCP */ | 1379 | /* set by MCP */ |
1054 | #define SHARED_MF_CLP_EXIT_DONE 0x00010000 | 1380 | #define SHARED_MF_CLP_EXIT_DONE 0x00010000 |
1055 | 1381 | ||
1056 | }; | 1382 | }; |
1057 | 1383 | ||
1058 | struct port_mf_cfg { | 1384 | struct port_mf_cfg { |
1059 | 1385 | ||
1060 | u32 dynamic_cfg; /* device control channel */ | 1386 | u32 dynamic_cfg; /* device control channel */ |
1061 | #define PORT_MF_CFG_E1HOV_TAG_MASK 0x0000ffff | 1387 | #define PORT_MF_CFG_E1HOV_TAG_MASK 0x0000ffff |
1062 | #define PORT_MF_CFG_E1HOV_TAG_SHIFT 0 | 1388 | #define PORT_MF_CFG_E1HOV_TAG_SHIFT 0 |
1063 | #define PORT_MF_CFG_E1HOV_TAG_DEFAULT PORT_MF_CFG_E1HOV_TAG_MASK | 1389 | #define PORT_MF_CFG_E1HOV_TAG_DEFAULT PORT_MF_CFG_E1HOV_TAG_MASK |
1064 | 1390 | ||
1065 | u32 reserved[3]; | 1391 | u32 reserved[3]; |
1066 | 1392 | ||
@@ -1071,57 +1397,58 @@ struct func_mf_cfg { | |||
1071 | u32 config; | 1397 | u32 config; |
1072 | /* E/R/I/D */ | 1398 | /* E/R/I/D */ |
1073 | /* function 0 of each port cannot be hidden */ | 1399 | /* function 0 of each port cannot be hidden */ |
1074 | #define FUNC_MF_CFG_FUNC_HIDE 0x00000001 | 1400 | #define FUNC_MF_CFG_FUNC_HIDE 0x00000001 |
1075 | 1401 | ||
1076 | #define FUNC_MF_CFG_PROTOCOL_MASK 0x00000007 | 1402 | #define FUNC_MF_CFG_PROTOCOL_MASK 0x00000006 |
1077 | #define FUNC_MF_CFG_PROTOCOL_ETHERNET 0x00000002 | 1403 | #define FUNC_MF_CFG_PROTOCOL_FCOE 0x00000000 |
1078 | #define FUNC_MF_CFG_PROTOCOL_ETHERNET_WITH_RDMA 0x00000004 | 1404 | #define FUNC_MF_CFG_PROTOCOL_ETHERNET 0x00000002 |
1079 | #define FUNC_MF_CFG_PROTOCOL_ISCSI 0x00000006 | 1405 | #define FUNC_MF_CFG_PROTOCOL_ETHERNET_WITH_RDMA 0x00000004 |
1080 | #define FUNC_MF_CFG_PROTOCOL_DEFAULT\ | 1406 | #define FUNC_MF_CFG_PROTOCOL_ISCSI 0x00000006 |
1081 | FUNC_MF_CFG_PROTOCOL_ETHERNET_WITH_RDMA | 1407 | #define FUNC_MF_CFG_PROTOCOL_DEFAULT \ |
1408 | FUNC_MF_CFG_PROTOCOL_ETHERNET_WITH_RDMA | ||
1082 | 1409 | ||
1083 | #define FUNC_MF_CFG_FUNC_DISABLED 0x00000008 | 1410 | #define FUNC_MF_CFG_FUNC_DISABLED 0x00000008 |
1411 | #define FUNC_MF_CFG_FUNC_DELETED 0x00000010 | ||
1084 | 1412 | ||
1085 | /* PRI */ | 1413 | /* PRI */ |
1086 | /* 0 - low priority, 3 - high priority */ | 1414 | /* 0 - low priority, 3 - high priority */ |
1087 | #define FUNC_MF_CFG_TRANSMIT_PRIORITY_MASK 0x00000300 | 1415 | #define FUNC_MF_CFG_TRANSMIT_PRIORITY_MASK 0x00000300 |
1088 | #define FUNC_MF_CFG_TRANSMIT_PRIORITY_SHIFT 8 | 1416 | #define FUNC_MF_CFG_TRANSMIT_PRIORITY_SHIFT 8 |
1089 | #define FUNC_MF_CFG_TRANSMIT_PRIORITY_DEFAULT 0x00000000 | 1417 | #define FUNC_MF_CFG_TRANSMIT_PRIORITY_DEFAULT 0x00000000 |
1090 | 1418 | ||
1091 | /* MINBW, MAXBW */ | 1419 | /* MINBW, MAXBW */ |
1092 | /* value range - 0..100, increments in 100Mbps */ | 1420 | /* value range - 0..100, increments in 100Mbps */ |
1093 | #define FUNC_MF_CFG_MIN_BW_MASK 0x00ff0000 | 1421 | #define FUNC_MF_CFG_MIN_BW_MASK 0x00ff0000 |
1094 | #define FUNC_MF_CFG_MIN_BW_SHIFT 16 | 1422 | #define FUNC_MF_CFG_MIN_BW_SHIFT 16 |
1095 | #define FUNC_MF_CFG_MIN_BW_DEFAULT 0x00000000 | 1423 | #define FUNC_MF_CFG_MIN_BW_DEFAULT 0x00000000 |
1096 | #define FUNC_MF_CFG_MAX_BW_MASK 0xff000000 | 1424 | #define FUNC_MF_CFG_MAX_BW_MASK 0xff000000 |
1097 | #define FUNC_MF_CFG_MAX_BW_SHIFT 24 | 1425 | #define FUNC_MF_CFG_MAX_BW_SHIFT 24 |
1098 | #define FUNC_MF_CFG_MAX_BW_DEFAULT 0x64000000 | 1426 | #define FUNC_MF_CFG_MAX_BW_DEFAULT 0x64000000 |
1099 | 1427 | ||
1100 | u32 mac_upper; /* MAC */ | 1428 | u32 mac_upper; /* MAC */ |
1101 | #define FUNC_MF_CFG_UPPERMAC_MASK 0x0000ffff | 1429 | #define FUNC_MF_CFG_UPPERMAC_MASK 0x0000ffff |
1102 | #define FUNC_MF_CFG_UPPERMAC_SHIFT 0 | 1430 | #define FUNC_MF_CFG_UPPERMAC_SHIFT 0 |
1103 | #define FUNC_MF_CFG_UPPERMAC_DEFAULT FUNC_MF_CFG_UPPERMAC_MASK | 1431 | #define FUNC_MF_CFG_UPPERMAC_DEFAULT FUNC_MF_CFG_UPPERMAC_MASK |
1104 | u32 mac_lower; | 1432 | u32 mac_lower; |
1105 | #define FUNC_MF_CFG_LOWERMAC_DEFAULT 0xffffffff | 1433 | #define FUNC_MF_CFG_LOWERMAC_DEFAULT 0xffffffff |
1106 | 1434 | ||
1107 | u32 e1hov_tag; /* VNI */ | 1435 | u32 e1hov_tag; /* VNI */ |
1108 | #define FUNC_MF_CFG_E1HOV_TAG_MASK 0x0000ffff | 1436 | #define FUNC_MF_CFG_E1HOV_TAG_MASK 0x0000ffff |
1109 | #define FUNC_MF_CFG_E1HOV_TAG_SHIFT 0 | 1437 | #define FUNC_MF_CFG_E1HOV_TAG_SHIFT 0 |
1110 | #define FUNC_MF_CFG_E1HOV_TAG_DEFAULT FUNC_MF_CFG_E1HOV_TAG_MASK | 1438 | #define FUNC_MF_CFG_E1HOV_TAG_DEFAULT FUNC_MF_CFG_E1HOV_TAG_MASK |
1111 | 1439 | ||
1112 | u32 reserved[2]; | 1440 | u32 reserved[2]; |
1113 | |||
1114 | }; | 1441 | }; |
1115 | 1442 | ||
1116 | /* This structure is not applicable and should not be accessed on 57711 */ | 1443 | /* This structure is not applicable and should not be accessed on 57711 */ |
1117 | struct func_ext_cfg { | 1444 | struct func_ext_cfg { |
1118 | u32 func_cfg; | 1445 | u32 func_cfg; |
1119 | #define MACP_FUNC_CFG_FLAGS_MASK 0x000000FF | 1446 | #define MACP_FUNC_CFG_FLAGS_MASK 0x000000FF |
1120 | #define MACP_FUNC_CFG_FLAGS_SHIFT 0 | 1447 | #define MACP_FUNC_CFG_FLAGS_SHIFT 0 |
1121 | #define MACP_FUNC_CFG_FLAGS_ENABLED 0x00000001 | 1448 | #define MACP_FUNC_CFG_FLAGS_ENABLED 0x00000001 |
1122 | #define MACP_FUNC_CFG_FLAGS_ETHERNET 0x00000002 | 1449 | #define MACP_FUNC_CFG_FLAGS_ETHERNET 0x00000002 |
1123 | #define MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD 0x00000004 | 1450 | #define MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD 0x00000004 |
1124 | #define MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD 0x00000008 | 1451 | #define MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD 0x00000008 |
1125 | 1452 | ||
1126 | u32 iscsi_mac_addr_upper; | 1453 | u32 iscsi_mac_addr_upper; |
1127 | u32 iscsi_mac_addr_lower; | 1454 | u32 iscsi_mac_addr_lower; |
@@ -1136,73 +1463,99 @@ struct func_ext_cfg { | |||
1136 | u32 fcoe_wwn_node_name_lower; | 1463 | u32 fcoe_wwn_node_name_lower; |
1137 | 1464 | ||
1138 | u32 preserve_data; | 1465 | u32 preserve_data; |
1139 | #define MF_FUNC_CFG_PRESERVE_L2_MAC (1<<0) | 1466 | #define MF_FUNC_CFG_PRESERVE_L2_MAC (1<<0) |
1140 | #define MF_FUNC_CFG_PRESERVE_ISCSI_MAC (1<<1) | 1467 | #define MF_FUNC_CFG_PRESERVE_ISCSI_MAC (1<<1) |
1141 | #define MF_FUNC_CFG_PRESERVE_FCOE_MAC (1<<2) | 1468 | #define MF_FUNC_CFG_PRESERVE_FCOE_MAC (1<<2) |
1142 | #define MF_FUNC_CFG_PRESERVE_FCOE_WWN_P (1<<3) | 1469 | #define MF_FUNC_CFG_PRESERVE_FCOE_WWN_P (1<<3) |
1143 | #define MF_FUNC_CFG_PRESERVE_FCOE_WWN_N (1<<4) | 1470 | #define MF_FUNC_CFG_PRESERVE_FCOE_WWN_N (1<<4) |
1471 | #define MF_FUNC_CFG_PRESERVE_TX_BW (1<<5) | ||
1144 | }; | 1472 | }; |
1145 | 1473 | ||
1146 | struct mf_cfg { | 1474 | struct mf_cfg { |
1147 | 1475 | ||
1148 | struct shared_mf_cfg shared_mf_config; | 1476 | struct shared_mf_cfg shared_mf_config; /* 0x4 */ |
1149 | struct port_mf_cfg port_mf_config[PORT_MAX]; | 1477 | struct port_mf_cfg port_mf_config[PORT_MAX]; /* 0x10 * 2 = 0x20 */ |
1150 | struct func_mf_cfg func_mf_config[E1H_FUNC_MAX]; | 1478 | /* for all chips, there are 8 mf functions */ |
1151 | 1479 | struct func_mf_cfg func_mf_config[E1H_FUNC_MAX]; /* 0x18 * 8 = 0xc0 */ | |
1152 | struct func_ext_cfg func_ext_config[E1H_FUNC_MAX]; | 1480 | /* |
1153 | }; | 1481 | * Extended configuration per function - this array does not exist and |
1154 | 1482 | * should not be accessed on 57711 | |
1483 | */ | ||
1484 | struct func_ext_cfg func_ext_config[E1H_FUNC_MAX]; /* 0x28 * 8 = 0x140*/ | ||
1485 | }; /* 0x224 */ | ||
1155 | 1486 | ||
1156 | /**************************************************************************** | 1487 | /**************************************************************************** |
1157 | * Shared Memory Region * | 1488 | * Shared Memory Region * |
1158 | ****************************************************************************/ | 1489 | ****************************************************************************/ |
1159 | struct shmem_region { /* SharedMem Offset (size) */ | 1490 | struct shmem_region { /* SharedMem Offset (size) */ |
1160 | 1491 | ||
1161 | u32 validity_map[PORT_MAX]; /* 0x0 (4*2 = 0x8) */ | 1492 | u32 validity_map[PORT_MAX]; /* 0x0 (4*2 = 0x8) */ |
1162 | #define SHR_MEM_FORMAT_REV_ID ('A'<<24) | 1493 | #define SHR_MEM_FORMAT_REV_MASK 0xff000000 |
1163 | #define SHR_MEM_FORMAT_REV_MASK 0xff000000 | 1494 | #define SHR_MEM_FORMAT_REV_ID ('A'<<24) |
1164 | /* validity bits */ | 1495 | /* validity bits */ |
1165 | #define SHR_MEM_VALIDITY_PCI_CFG 0x00100000 | 1496 | #define SHR_MEM_VALIDITY_PCI_CFG 0x00100000 |
1166 | #define SHR_MEM_VALIDITY_MB 0x00200000 | 1497 | #define SHR_MEM_VALIDITY_MB 0x00200000 |
1167 | #define SHR_MEM_VALIDITY_DEV_INFO 0x00400000 | 1498 | #define SHR_MEM_VALIDITY_DEV_INFO 0x00400000 |
1168 | #define SHR_MEM_VALIDITY_RESERVED 0x00000007 | 1499 | #define SHR_MEM_VALIDITY_RESERVED 0x00000007 |
1169 | /* One licensing bit should be set */ | 1500 | /* One licensing bit should be set */ |
1170 | #define SHR_MEM_VALIDITY_LIC_KEY_IN_EFFECT_MASK 0x00000038 | 1501 | #define SHR_MEM_VALIDITY_LIC_KEY_IN_EFFECT_MASK 0x00000038 |
1171 | #define SHR_MEM_VALIDITY_LIC_MANUF_KEY_IN_EFFECT 0x00000008 | 1502 | #define SHR_MEM_VALIDITY_LIC_MANUF_KEY_IN_EFFECT 0x00000008 |
1172 | #define SHR_MEM_VALIDITY_LIC_UPGRADE_KEY_IN_EFFECT 0x00000010 | 1503 | #define SHR_MEM_VALIDITY_LIC_UPGRADE_KEY_IN_EFFECT 0x00000010 |
1173 | #define SHR_MEM_VALIDITY_LIC_NO_KEY_IN_EFFECT 0x00000020 | 1504 | #define SHR_MEM_VALIDITY_LIC_NO_KEY_IN_EFFECT 0x00000020 |
1174 | /* Active MFW */ | 1505 | /* Active MFW */ |
1175 | #define SHR_MEM_VALIDITY_ACTIVE_MFW_UNKNOWN 0x00000000 | 1506 | #define SHR_MEM_VALIDITY_ACTIVE_MFW_UNKNOWN 0x00000000 |
1176 | #define SHR_MEM_VALIDITY_ACTIVE_MFW_IPMI 0x00000040 | 1507 | #define SHR_MEM_VALIDITY_ACTIVE_MFW_MASK 0x000001c0 |
1177 | #define SHR_MEM_VALIDITY_ACTIVE_MFW_UMP 0x00000080 | 1508 | #define SHR_MEM_VALIDITY_ACTIVE_MFW_IPMI 0x00000040 |
1178 | #define SHR_MEM_VALIDITY_ACTIVE_MFW_NCSI 0x000000c0 | 1509 | #define SHR_MEM_VALIDITY_ACTIVE_MFW_UMP 0x00000080 |
1179 | #define SHR_MEM_VALIDITY_ACTIVE_MFW_NONE 0x000001c0 | 1510 | #define SHR_MEM_VALIDITY_ACTIVE_MFW_NCSI 0x000000c0 |
1180 | #define SHR_MEM_VALIDITY_ACTIVE_MFW_MASK 0x000001c0 | 1511 | #define SHR_MEM_VALIDITY_ACTIVE_MFW_NONE 0x000001c0 |
1181 | 1512 | ||
1182 | struct shm_dev_info dev_info; /* 0x8 (0x438) */ | 1513 | struct shm_dev_info dev_info; /* 0x8 (0x438) */ |
1183 | 1514 | ||
1184 | struct license_key drv_lic_key[PORT_MAX]; /* 0x440 (52*2=0x68) */ | 1515 | struct license_key drv_lic_key[PORT_MAX]; /* 0x440 (52*2=0x68) */ |
1185 | 1516 | ||
1186 | /* FW information (for internal FW use) */ | 1517 | /* FW information (for internal FW use) */ |
1187 | u32 fw_info_fio_offset; /* 0x4a8 (0x4) */ | 1518 | u32 fw_info_fio_offset; /* 0x4a8 (0x4) */ |
1188 | struct mgmtfw_state mgmtfw_state; /* 0x4ac (0x1b8) */ | 1519 | struct mgmtfw_state mgmtfw_state; /* 0x4ac (0x1b8) */ |
1520 | |||
1521 | struct drv_port_mb port_mb[PORT_MAX]; /* 0x664 (16*2=0x20) */ | ||
1189 | 1522 | ||
1190 | struct drv_port_mb port_mb[PORT_MAX]; /* 0x664 (16*2=0x20) */ | 1523 | #ifdef BMAPI |
1191 | struct drv_func_mb func_mb[]; /* 0x684 | 1524 | /* This is a variable length array */ |
1192 | (44*2/4/8=0x58/0xb0/0x160) */ | 1525 | /* the number of function depends on the chip type */ |
1526 | struct drv_func_mb func_mb[1]; /* 0x684 (44*2/4/8=0x58/0xb0/0x160) */ | ||
1527 | #else | ||
1528 | /* the number of function depends on the chip type */ | ||
1529 | struct drv_func_mb func_mb[]; /* 0x684 (44*2/4/8=0x58/0xb0/0x160) */ | ||
1530 | #endif /* BMAPI */ | ||
1193 | 1531 | ||
1194 | }; /* 57710 = 0x6dc | 57711 = 0x7E4 | 57712 = 0x734 */ | 1532 | }; /* 57710 = 0x6dc | 57711 = 0x7E4 | 57712 = 0x734 */ |
1195 | 1533 | ||
1534 | /**************************************************************************** | ||
1535 | * Shared Memory 2 Region * | ||
1536 | ****************************************************************************/ | ||
1537 | /* The fw_flr_ack is actually built in the following way: */ | ||
1538 | /* 8 bit: PF ack */ | ||
1539 | /* 64 bit: VF ack */ | ||
1540 | /* 8 bit: ios_dis_ack */ | ||
1541 | /* In order to maintain endianity in the mailbox hsi, we want to keep using */ | ||
1542 | /* u32. The fw must have the VF right after the PF since this is how it */ | ||
1543 | /* access arrays(it expects always the VF to reside after the PF, and that */ | ||
1544 | /* makes the calculation much easier for it. ) */ | ||
1545 | /* In order to answer both limitations, and keep the struct small, the code */ | ||
1546 | /* will abuse the structure defined here to achieve the actual partition */ | ||
1547 | /* above */ | ||
1548 | /****************************************************************************/ | ||
1196 | struct fw_flr_ack { | 1549 | struct fw_flr_ack { |
1197 | u32 pf_ack; | 1550 | u32 pf_ack; |
1198 | u32 vf_ack[1]; | 1551 | u32 vf_ack[1]; |
1199 | u32 iov_dis_ack; | 1552 | u32 iov_dis_ack; |
1200 | }; | 1553 | }; |
1201 | 1554 | ||
1202 | struct fw_flr_mb { | 1555 | struct fw_flr_mb { |
1203 | u32 aggint; | 1556 | u32 aggint; |
1204 | u32 opgen_addr; | 1557 | u32 opgen_addr; |
1205 | struct fw_flr_ack ack; | 1558 | struct fw_flr_ack ack; |
1206 | }; | 1559 | }; |
1207 | 1560 | ||
1208 | /**** SUPPORT FOR SHMEM ARRRAYS *** | 1561 | /**** SUPPORT FOR SHMEM ARRRAYS *** |
@@ -1226,36 +1579,36 @@ struct fw_flr_mb { | |||
1226 | * | 1579 | * |
1227 | * SHMEM_ARRAY_BITPOS(i, 4, 4) defines the stadard ordering: | 1580 | * SHMEM_ARRAY_BITPOS(i, 4, 4) defines the stadard ordering: |
1228 | * | 1581 | * |
1229 | * | | | | | 1582 | * | | | | |
1230 | * 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | | 1583 | * 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | |
1231 | * | | | | | 1584 | * | | | | |
1232 | * | 1585 | * |
1233 | * SHMEM_ARRAY_BITPOS(i, 4, 8) defines a flip ordering per byte: | 1586 | * SHMEM_ARRAY_BITPOS(i, 4, 8) defines a flip ordering per byte: |
1234 | * | 1587 | * |
1235 | * | | | | | 1588 | * | | | | |
1236 | * 1 | 0 | 3 | 2 | 5 | 4 | 7 | 6 | | 1589 | * 1 | 0 | 3 | 2 | 5 | 4 | 7 | 6 | |
1237 | * | | | | | 1590 | * | | | | |
1238 | * | 1591 | * |
1239 | * SHMEM_ARRAY_BITPOS(i, 4, 16) defines a flip ordering per word: | 1592 | * SHMEM_ARRAY_BITPOS(i, 4, 16) defines a flip ordering per word: |
1240 | * | 1593 | * |
1241 | * | | | | | 1594 | * | | | | |
1242 | * 3 | 2 | 1 | 0 | 7 | 6 | 5 | 4 | | 1595 | * 3 | 2 | 1 | 0 | 7 | 6 | 5 | 4 | |
1243 | * | | | | | 1596 | * | | | | |
1244 | */ | 1597 | */ |
1245 | #define SHMEM_ARRAY_BITPOS(i, eb, fb) \ | 1598 | #define SHMEM_ARRAY_BITPOS(i, eb, fb) \ |
1246 | ((((32/(fb)) - 1 - ((i)/((fb)/(eb))) % (32/(fb))) * (fb)) + \ | 1599 | ((((32/(fb)) - 1 - ((i)/((fb)/(eb))) % (32/(fb))) * (fb)) + \ |
1247 | (((i)%((fb)/(eb))) * (eb))) | 1600 | (((i)%((fb)/(eb))) * (eb))) |
1248 | 1601 | ||
1249 | #define SHMEM_ARRAY_GET(a, i, eb, fb) \ | 1602 | #define SHMEM_ARRAY_GET(a, i, eb, fb) \ |
1250 | ((a[SHMEM_ARRAY_ENTRY(i, eb)] >> SHMEM_ARRAY_BITPOS(i, eb, fb)) & \ | 1603 | ((a[SHMEM_ARRAY_ENTRY(i, eb)] >> SHMEM_ARRAY_BITPOS(i, eb, fb)) & \ |
1251 | SHMEM_ARRAY_MASK(eb)) | 1604 | SHMEM_ARRAY_MASK(eb)) |
1252 | 1605 | ||
1253 | #define SHMEM_ARRAY_SET(a, i, eb, fb, val) \ | 1606 | #define SHMEM_ARRAY_SET(a, i, eb, fb, val) \ |
1254 | do { \ | 1607 | do { \ |
1255 | a[SHMEM_ARRAY_ENTRY(i, eb)] &= ~(SHMEM_ARRAY_MASK(eb) << \ | 1608 | a[SHMEM_ARRAY_ENTRY(i, eb)] &= ~(SHMEM_ARRAY_MASK(eb) << \ |
1256 | SHMEM_ARRAY_BITPOS(i, eb, fb)); \ | 1609 | SHMEM_ARRAY_BITPOS(i, eb, fb)); \ |
1257 | a[SHMEM_ARRAY_ENTRY(i, eb)] |= (((val) & SHMEM_ARRAY_MASK(eb)) << \ | 1610 | a[SHMEM_ARRAY_ENTRY(i, eb)] |= (((val) & SHMEM_ARRAY_MASK(eb)) << \ |
1258 | SHMEM_ARRAY_BITPOS(i, eb, fb)); \ | 1611 | SHMEM_ARRAY_BITPOS(i, eb, fb)); \ |
1259 | } while (0) | 1612 | } while (0) |
1260 | 1613 | ||
1261 | 1614 | ||
@@ -1279,23 +1632,30 @@ do { \ | |||
1279 | #define ISCSI_APP_IDX 1 | 1632 | #define ISCSI_APP_IDX 1 |
1280 | #define PREDEFINED_APP_IDX_MAX 2 | 1633 | #define PREDEFINED_APP_IDX_MAX 2 |
1281 | 1634 | ||
1635 | |||
1636 | /* Big/Little endian have the same representation. */ | ||
1282 | struct dcbx_ets_feature { | 1637 | struct dcbx_ets_feature { |
1638 | /* | ||
1639 | * For Admin MIB - is this feature supported by the | ||
1640 | * driver | For Local MIB - should this feature be enabled. | ||
1641 | */ | ||
1283 | u32 enabled; | 1642 | u32 enabled; |
1284 | u32 pg_bw_tbl[2]; | 1643 | u32 pg_bw_tbl[2]; |
1285 | u32 pri_pg_tbl[1]; | 1644 | u32 pri_pg_tbl[1]; |
1286 | }; | 1645 | }; |
1287 | 1646 | ||
1647 | /* Driver structure in LE */ | ||
1288 | struct dcbx_pfc_feature { | 1648 | struct dcbx_pfc_feature { |
1289 | #ifdef __BIG_ENDIAN | 1649 | #ifdef __BIG_ENDIAN |
1290 | u8 pri_en_bitmap; | 1650 | u8 pri_en_bitmap; |
1291 | #define DCBX_PFC_PRI_0 0x01 | 1651 | #define DCBX_PFC_PRI_0 0x01 |
1292 | #define DCBX_PFC_PRI_1 0x02 | 1652 | #define DCBX_PFC_PRI_1 0x02 |
1293 | #define DCBX_PFC_PRI_2 0x04 | 1653 | #define DCBX_PFC_PRI_2 0x04 |
1294 | #define DCBX_PFC_PRI_3 0x08 | 1654 | #define DCBX_PFC_PRI_3 0x08 |
1295 | #define DCBX_PFC_PRI_4 0x10 | 1655 | #define DCBX_PFC_PRI_4 0x10 |
1296 | #define DCBX_PFC_PRI_5 0x20 | 1656 | #define DCBX_PFC_PRI_5 0x20 |
1297 | #define DCBX_PFC_PRI_6 0x40 | 1657 | #define DCBX_PFC_PRI_6 0x40 |
1298 | #define DCBX_PFC_PRI_7 0x80 | 1658 | #define DCBX_PFC_PRI_7 0x80 |
1299 | u8 pfc_caps; | 1659 | u8 pfc_caps; |
1300 | u8 reserved; | 1660 | u8 reserved; |
1301 | u8 enabled; | 1661 | u8 enabled; |
@@ -1304,39 +1664,41 @@ struct dcbx_pfc_feature { | |||
1304 | u8 reserved; | 1664 | u8 reserved; |
1305 | u8 pfc_caps; | 1665 | u8 pfc_caps; |
1306 | u8 pri_en_bitmap; | 1666 | u8 pri_en_bitmap; |
1307 | #define DCBX_PFC_PRI_0 0x01 | 1667 | #define DCBX_PFC_PRI_0 0x01 |
1308 | #define DCBX_PFC_PRI_1 0x02 | 1668 | #define DCBX_PFC_PRI_1 0x02 |
1309 | #define DCBX_PFC_PRI_2 0x04 | 1669 | #define DCBX_PFC_PRI_2 0x04 |
1310 | #define DCBX_PFC_PRI_3 0x08 | 1670 | #define DCBX_PFC_PRI_3 0x08 |
1311 | #define DCBX_PFC_PRI_4 0x10 | 1671 | #define DCBX_PFC_PRI_4 0x10 |
1312 | #define DCBX_PFC_PRI_5 0x20 | 1672 | #define DCBX_PFC_PRI_5 0x20 |
1313 | #define DCBX_PFC_PRI_6 0x40 | 1673 | #define DCBX_PFC_PRI_6 0x40 |
1314 | #define DCBX_PFC_PRI_7 0x80 | 1674 | #define DCBX_PFC_PRI_7 0x80 |
1315 | #endif | 1675 | #endif |
1316 | }; | 1676 | }; |
1317 | 1677 | ||
1318 | struct dcbx_app_priority_entry { | 1678 | struct dcbx_app_priority_entry { |
1319 | #ifdef __BIG_ENDIAN | 1679 | #ifdef __BIG_ENDIAN |
1320 | u16 app_id; | 1680 | u16 app_id; |
1321 | u8 pri_bitmap; | 1681 | u8 pri_bitmap; |
1322 | u8 appBitfield; | 1682 | u8 appBitfield; |
1323 | #define DCBX_APP_ENTRY_VALID 0x01 | 1683 | #define DCBX_APP_ENTRY_VALID 0x01 |
1324 | #define DCBX_APP_ENTRY_SF_MASK 0x30 | 1684 | #define DCBX_APP_ENTRY_SF_MASK 0x30 |
1325 | #define DCBX_APP_ENTRY_SF_SHIFT 4 | 1685 | #define DCBX_APP_ENTRY_SF_SHIFT 4 |
1326 | #define DCBX_APP_SF_ETH_TYPE 0x10 | 1686 | #define DCBX_APP_SF_ETH_TYPE 0x10 |
1327 | #define DCBX_APP_SF_PORT 0x20 | 1687 | #define DCBX_APP_SF_PORT 0x20 |
1328 | #elif defined(__LITTLE_ENDIAN) | 1688 | #elif defined(__LITTLE_ENDIAN) |
1329 | u8 appBitfield; | 1689 | u8 appBitfield; |
1330 | #define DCBX_APP_ENTRY_VALID 0x01 | 1690 | #define DCBX_APP_ENTRY_VALID 0x01 |
1331 | #define DCBX_APP_ENTRY_SF_MASK 0x30 | 1691 | #define DCBX_APP_ENTRY_SF_MASK 0x30 |
1332 | #define DCBX_APP_ENTRY_SF_SHIFT 4 | 1692 | #define DCBX_APP_ENTRY_SF_SHIFT 4 |
1333 | #define DCBX_APP_SF_ETH_TYPE 0x10 | 1693 | #define DCBX_APP_SF_ETH_TYPE 0x10 |
1334 | #define DCBX_APP_SF_PORT 0x20 | 1694 | #define DCBX_APP_SF_PORT 0x20 |
1335 | u8 pri_bitmap; | 1695 | u8 pri_bitmap; |
1336 | u16 app_id; | 1696 | u16 app_id; |
1337 | #endif | 1697 | #endif |
1338 | }; | 1698 | }; |
1339 | 1699 | ||
1700 | |||
1701 | /* FW structure in BE */ | ||
1340 | struct dcbx_app_priority_feature { | 1702 | struct dcbx_app_priority_feature { |
1341 | #ifdef __BIG_ENDIAN | 1703 | #ifdef __BIG_ENDIAN |
1342 | u8 reserved; | 1704 | u8 reserved; |
@@ -1352,302 +1714,402 @@ struct dcbx_app_priority_feature { | |||
1352 | struct dcbx_app_priority_entry app_pri_tbl[DCBX_MAX_APP_PROTOCOL]; | 1714 | struct dcbx_app_priority_entry app_pri_tbl[DCBX_MAX_APP_PROTOCOL]; |
1353 | }; | 1715 | }; |
1354 | 1716 | ||
1717 | /* FW structure in BE */ | ||
1355 | struct dcbx_features { | 1718 | struct dcbx_features { |
1719 | /* PG feature */ | ||
1356 | struct dcbx_ets_feature ets; | 1720 | struct dcbx_ets_feature ets; |
1721 | /* PFC feature */ | ||
1357 | struct dcbx_pfc_feature pfc; | 1722 | struct dcbx_pfc_feature pfc; |
1723 | /* APP feature */ | ||
1358 | struct dcbx_app_priority_feature app; | 1724 | struct dcbx_app_priority_feature app; |
1359 | }; | 1725 | }; |
1360 | 1726 | ||
1727 | /* LLDP protocol parameters */ | ||
1728 | /* FW structure in BE */ | ||
1361 | struct lldp_params { | 1729 | struct lldp_params { |
1362 | #ifdef __BIG_ENDIAN | 1730 | #ifdef __BIG_ENDIAN |
1363 | u8 msg_fast_tx_interval; | 1731 | u8 msg_fast_tx_interval; |
1364 | u8 msg_tx_hold; | 1732 | u8 msg_tx_hold; |
1365 | u8 msg_tx_interval; | 1733 | u8 msg_tx_interval; |
1366 | u8 admin_status; | 1734 | u8 admin_status; |
1367 | #define LLDP_TX_ONLY 0x01 | 1735 | #define LLDP_TX_ONLY 0x01 |
1368 | #define LLDP_RX_ONLY 0x02 | 1736 | #define LLDP_RX_ONLY 0x02 |
1369 | #define LLDP_TX_RX 0x03 | 1737 | #define LLDP_TX_RX 0x03 |
1370 | #define LLDP_DISABLED 0x04 | 1738 | #define LLDP_DISABLED 0x04 |
1371 | u8 reserved1; | 1739 | u8 reserved1; |
1372 | u8 tx_fast; | 1740 | u8 tx_fast; |
1373 | u8 tx_crd_max; | 1741 | u8 tx_crd_max; |
1374 | u8 tx_crd; | 1742 | u8 tx_crd; |
1375 | #elif defined(__LITTLE_ENDIAN) | 1743 | #elif defined(__LITTLE_ENDIAN) |
1376 | u8 admin_status; | 1744 | u8 admin_status; |
1377 | #define LLDP_TX_ONLY 0x01 | 1745 | #define LLDP_TX_ONLY 0x01 |
1378 | #define LLDP_RX_ONLY 0x02 | 1746 | #define LLDP_RX_ONLY 0x02 |
1379 | #define LLDP_TX_RX 0x03 | 1747 | #define LLDP_TX_RX 0x03 |
1380 | #define LLDP_DISABLED 0x04 | 1748 | #define LLDP_DISABLED 0x04 |
1381 | u8 msg_tx_interval; | 1749 | u8 msg_tx_interval; |
1382 | u8 msg_tx_hold; | 1750 | u8 msg_tx_hold; |
1383 | u8 msg_fast_tx_interval; | 1751 | u8 msg_fast_tx_interval; |
1384 | u8 tx_crd; | 1752 | u8 tx_crd; |
1385 | u8 tx_crd_max; | 1753 | u8 tx_crd_max; |
1386 | u8 tx_fast; | 1754 | u8 tx_fast; |
1387 | u8 reserved1; | 1755 | u8 reserved1; |
1388 | #endif | 1756 | #endif |
1389 | #define REM_CHASSIS_ID_STAT_LEN 4 | 1757 | #define REM_CHASSIS_ID_STAT_LEN 4 |
1390 | #define REM_PORT_ID_STAT_LEN 4 | 1758 | #define REM_PORT_ID_STAT_LEN 4 |
1759 | /* Holds remote Chassis ID TLV header, subtype and 9B of payload. */ | ||
1391 | u32 peer_chassis_id[REM_CHASSIS_ID_STAT_LEN]; | 1760 | u32 peer_chassis_id[REM_CHASSIS_ID_STAT_LEN]; |
1761 | /* Holds remote Port ID TLV header, subtype and 9B of payload. */ | ||
1392 | u32 peer_port_id[REM_PORT_ID_STAT_LEN]; | 1762 | u32 peer_port_id[REM_PORT_ID_STAT_LEN]; |
1393 | }; | 1763 | }; |
1394 | 1764 | ||
1395 | struct lldp_dcbx_stat { | 1765 | struct lldp_dcbx_stat { |
1396 | #define LOCAL_CHASSIS_ID_STAT_LEN 2 | 1766 | #define LOCAL_CHASSIS_ID_STAT_LEN 2 |
1397 | #define LOCAL_PORT_ID_STAT_LEN 2 | 1767 | #define LOCAL_PORT_ID_STAT_LEN 2 |
1768 | /* Holds local Chassis ID 8B payload of constant subtype 4. */ | ||
1398 | u32 local_chassis_id[LOCAL_CHASSIS_ID_STAT_LEN]; | 1769 | u32 local_chassis_id[LOCAL_CHASSIS_ID_STAT_LEN]; |
1770 | /* Holds local Port ID 8B payload of constant subtype 3. */ | ||
1399 | u32 local_port_id[LOCAL_PORT_ID_STAT_LEN]; | 1771 | u32 local_port_id[LOCAL_PORT_ID_STAT_LEN]; |
1772 | /* Number of DCBX frames transmitted. */ | ||
1400 | u32 num_tx_dcbx_pkts; | 1773 | u32 num_tx_dcbx_pkts; |
1774 | /* Number of DCBX frames received. */ | ||
1401 | u32 num_rx_dcbx_pkts; | 1775 | u32 num_rx_dcbx_pkts; |
1402 | }; | 1776 | }; |
1403 | 1777 | ||
1778 | /* ADMIN MIB - DCBX local machine default configuration. */ | ||
1404 | struct lldp_admin_mib { | 1779 | struct lldp_admin_mib { |
1405 | u32 ver_cfg_flags; | 1780 | u32 ver_cfg_flags; |
1406 | #define DCBX_ETS_CONFIG_TX_ENABLED 0x00000001 | 1781 | #define DCBX_ETS_CONFIG_TX_ENABLED 0x00000001 |
1407 | #define DCBX_PFC_CONFIG_TX_ENABLED 0x00000002 | 1782 | #define DCBX_PFC_CONFIG_TX_ENABLED 0x00000002 |
1408 | #define DCBX_APP_CONFIG_TX_ENABLED 0x00000004 | 1783 | #define DCBX_APP_CONFIG_TX_ENABLED 0x00000004 |
1409 | #define DCBX_ETS_RECO_TX_ENABLED 0x00000008 | 1784 | #define DCBX_ETS_RECO_TX_ENABLED 0x00000008 |
1410 | #define DCBX_ETS_RECO_VALID 0x00000010 | 1785 | #define DCBX_ETS_RECO_VALID 0x00000010 |
1411 | #define DCBX_ETS_WILLING 0x00000020 | 1786 | #define DCBX_ETS_WILLING 0x00000020 |
1412 | #define DCBX_PFC_WILLING 0x00000040 | 1787 | #define DCBX_PFC_WILLING 0x00000040 |
1413 | #define DCBX_APP_WILLING 0x00000080 | 1788 | #define DCBX_APP_WILLING 0x00000080 |
1414 | #define DCBX_VERSION_CEE 0x00000100 | 1789 | #define DCBX_VERSION_CEE 0x00000100 |
1415 | #define DCBX_VERSION_IEEE 0x00000200 | 1790 | #define DCBX_VERSION_IEEE 0x00000200 |
1416 | #define DCBX_DCBX_ENABLED 0x00000400 | 1791 | #define DCBX_DCBX_ENABLED 0x00000400 |
1417 | #define DCBX_CEE_VERSION_MASK 0x0000f000 | 1792 | #define DCBX_CEE_VERSION_MASK 0x0000f000 |
1418 | #define DCBX_CEE_VERSION_SHIFT 12 | 1793 | #define DCBX_CEE_VERSION_SHIFT 12 |
1419 | #define DCBX_CEE_MAX_VERSION_MASK 0x000f0000 | 1794 | #define DCBX_CEE_MAX_VERSION_MASK 0x000f0000 |
1420 | #define DCBX_CEE_MAX_VERSION_SHIFT 16 | 1795 | #define DCBX_CEE_MAX_VERSION_SHIFT 16 |
1421 | struct dcbx_features features; | 1796 | struct dcbx_features features; |
1422 | }; | 1797 | }; |
1423 | 1798 | ||
1799 | /* REMOTE MIB - remote machine DCBX configuration. */ | ||
1424 | struct lldp_remote_mib { | 1800 | struct lldp_remote_mib { |
1425 | u32 prefix_seq_num; | 1801 | u32 prefix_seq_num; |
1426 | u32 flags; | 1802 | u32 flags; |
1427 | #define DCBX_ETS_TLV_RX 0x00000001 | 1803 | #define DCBX_ETS_TLV_RX 0x00000001 |
1428 | #define DCBX_PFC_TLV_RX 0x00000002 | 1804 | #define DCBX_PFC_TLV_RX 0x00000002 |
1429 | #define DCBX_APP_TLV_RX 0x00000004 | 1805 | #define DCBX_APP_TLV_RX 0x00000004 |
1430 | #define DCBX_ETS_RX_ERROR 0x00000010 | 1806 | #define DCBX_ETS_RX_ERROR 0x00000010 |
1431 | #define DCBX_PFC_RX_ERROR 0x00000020 | 1807 | #define DCBX_PFC_RX_ERROR 0x00000020 |
1432 | #define DCBX_APP_RX_ERROR 0x00000040 | 1808 | #define DCBX_APP_RX_ERROR 0x00000040 |
1433 | #define DCBX_ETS_REM_WILLING 0x00000100 | 1809 | #define DCBX_ETS_REM_WILLING 0x00000100 |
1434 | #define DCBX_PFC_REM_WILLING 0x00000200 | 1810 | #define DCBX_PFC_REM_WILLING 0x00000200 |
1435 | #define DCBX_APP_REM_WILLING 0x00000400 | 1811 | #define DCBX_APP_REM_WILLING 0x00000400 |
1436 | #define DCBX_REMOTE_ETS_RECO_VALID 0x00001000 | 1812 | #define DCBX_REMOTE_ETS_RECO_VALID 0x00001000 |
1813 | #define DCBX_REMOTE_MIB_VALID 0x00002000 | ||
1437 | struct dcbx_features features; | 1814 | struct dcbx_features features; |
1438 | u32 suffix_seq_num; | 1815 | u32 suffix_seq_num; |
1439 | }; | 1816 | }; |
1440 | 1817 | ||
1818 | /* LOCAL MIB - operational DCBX configuration - transmitted on Tx LLDPDU. */ | ||
1441 | struct lldp_local_mib { | 1819 | struct lldp_local_mib { |
1442 | u32 prefix_seq_num; | 1820 | u32 prefix_seq_num; |
1821 | /* Indicates if there is mismatch with negotiation results. */ | ||
1443 | u32 error; | 1822 | u32 error; |
1444 | #define DCBX_LOCAL_ETS_ERROR 0x00000001 | 1823 | #define DCBX_LOCAL_ETS_ERROR 0x00000001 |
1445 | #define DCBX_LOCAL_PFC_ERROR 0x00000002 | 1824 | #define DCBX_LOCAL_PFC_ERROR 0x00000002 |
1446 | #define DCBX_LOCAL_APP_ERROR 0x00000004 | 1825 | #define DCBX_LOCAL_APP_ERROR 0x00000004 |
1447 | #define DCBX_LOCAL_PFC_MISMATCH 0x00000010 | 1826 | #define DCBX_LOCAL_PFC_MISMATCH 0x00000010 |
1448 | #define DCBX_LOCAL_APP_MISMATCH 0x00000020 | 1827 | #define DCBX_LOCAL_APP_MISMATCH 0x00000020 |
1449 | struct dcbx_features features; | 1828 | struct dcbx_features features; |
1450 | u32 suffix_seq_num; | 1829 | u32 suffix_seq_num; |
1451 | }; | 1830 | }; |
1452 | /***END OF DCBX STRUCTURES DECLARATIONS***/ | 1831 | /***END OF DCBX STRUCTURES DECLARATIONS***/ |
1453 | 1832 | ||
1833 | struct ncsi_oem_fcoe_features { | ||
1834 | u32 fcoe_features1; | ||
1835 | #define FCOE_FEATURES1_IOS_PER_CONNECTION_MASK 0x0000FFFF | ||
1836 | #define FCOE_FEATURES1_IOS_PER_CONNECTION_OFFSET 0 | ||
1837 | |||
1838 | #define FCOE_FEATURES1_LOGINS_PER_PORT_MASK 0xFFFF0000 | ||
1839 | #define FCOE_FEATURES1_LOGINS_PER_PORT_OFFSET 16 | ||
1840 | |||
1841 | u32 fcoe_features2; | ||
1842 | #define FCOE_FEATURES2_EXCHANGES_MASK 0x0000FFFF | ||
1843 | #define FCOE_FEATURES2_EXCHANGES_OFFSET 0 | ||
1844 | |||
1845 | #define FCOE_FEATURES2_NPIV_WWN_PER_PORT_MASK 0xFFFF0000 | ||
1846 | #define FCOE_FEATURES2_NPIV_WWN_PER_PORT_OFFSET 16 | ||
1847 | |||
1848 | u32 fcoe_features3; | ||
1849 | #define FCOE_FEATURES3_TARGETS_SUPPORTED_MASK 0x0000FFFF | ||
1850 | #define FCOE_FEATURES3_TARGETS_SUPPORTED_OFFSET 0 | ||
1851 | |||
1852 | #define FCOE_FEATURES3_OUTSTANDING_COMMANDS_MASK 0xFFFF0000 | ||
1853 | #define FCOE_FEATURES3_OUTSTANDING_COMMANDS_OFFSET 16 | ||
1854 | |||
1855 | u32 fcoe_features4; | ||
1856 | #define FCOE_FEATURES4_FEATURE_SETTINGS_MASK 0x0000000F | ||
1857 | #define FCOE_FEATURES4_FEATURE_SETTINGS_OFFSET 0 | ||
1858 | }; | ||
1859 | |||
1860 | struct ncsi_oem_data { | ||
1861 | u32 driver_version[4]; | ||
1862 | struct ncsi_oem_fcoe_features ncsi_oem_fcoe_features; | ||
1863 | }; | ||
1864 | |||
1454 | struct shmem2_region { | 1865 | struct shmem2_region { |
1455 | 1866 | ||
1456 | u32 size; | 1867 | u32 size; /* 0x0000 */ |
1457 | 1868 | ||
1458 | u32 dcc_support; | 1869 | u32 dcc_support; /* 0x0004 */ |
1459 | #define SHMEM_DCC_SUPPORT_NONE 0x00000000 | 1870 | #define SHMEM_DCC_SUPPORT_NONE 0x00000000 |
1460 | #define SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV 0x00000001 | 1871 | #define SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV 0x00000001 |
1461 | #define SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV 0x00000004 | 1872 | #define SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV 0x00000004 |
1462 | #define SHMEM_DCC_SUPPORT_CHANGE_MAC_ADDRESS_TLV 0x00000008 | 1873 | #define SHMEM_DCC_SUPPORT_CHANGE_MAC_ADDRESS_TLV 0x00000008 |
1463 | #define SHMEM_DCC_SUPPORT_SET_PROTOCOL_TLV 0x00000040 | 1874 | #define SHMEM_DCC_SUPPORT_SET_PROTOCOL_TLV 0x00000040 |
1464 | #define SHMEM_DCC_SUPPORT_SET_PRIORITY_TLV 0x00000080 | 1875 | #define SHMEM_DCC_SUPPORT_SET_PRIORITY_TLV 0x00000080 |
1465 | #define SHMEM_DCC_SUPPORT_DEFAULT SHMEM_DCC_SUPPORT_NONE | 1876 | |
1466 | u32 ext_phy_fw_version2[PORT_MAX]; | 1877 | u32 ext_phy_fw_version2[PORT_MAX]; /* 0x0008 */ |
1467 | /* | 1878 | /* |
1468 | * For backwards compatibility, if the mf_cfg_addr does not exist | 1879 | * For backwards compatibility, if the mf_cfg_addr does not exist |
1469 | * (the size filed is smaller than 0xc) the mf_cfg resides at the | 1880 | * (the size filed is smaller than 0xc) the mf_cfg resides at the |
1470 | * end of struct shmem_region | 1881 | * end of struct shmem_region |
1471 | */ | 1882 | */ |
1472 | u32 mf_cfg_addr; | 1883 | u32 mf_cfg_addr; /* 0x0010 */ |
1473 | #define SHMEM_MF_CFG_ADDR_NONE 0x00000000 | 1884 | #define SHMEM_MF_CFG_ADDR_NONE 0x00000000 |
1474 | 1885 | ||
1475 | struct fw_flr_mb flr_mb; | 1886 | struct fw_flr_mb flr_mb; /* 0x0014 */ |
1476 | u32 dcbx_lldp_params_offset; | 1887 | u32 dcbx_lldp_params_offset; /* 0x0028 */ |
1477 | #define SHMEM_LLDP_DCBX_PARAMS_NONE 0x00000000 | 1888 | #define SHMEM_LLDP_DCBX_PARAMS_NONE 0x00000000 |
1478 | u32 dcbx_neg_res_offset; | 1889 | u32 dcbx_neg_res_offset; /* 0x002c */ |
1479 | #define SHMEM_DCBX_NEG_RES_NONE 0x00000000 | 1890 | #define SHMEM_DCBX_NEG_RES_NONE 0x00000000 |
1480 | u32 dcbx_remote_mib_offset; | 1891 | u32 dcbx_remote_mib_offset; /* 0x0030 */ |
1481 | #define SHMEM_DCBX_REMOTE_MIB_NONE 0x00000000 | 1892 | #define SHMEM_DCBX_REMOTE_MIB_NONE 0x00000000 |
1482 | /* | 1893 | /* |
1483 | * The other shmemX_base_addr holds the other path's shmem address | 1894 | * The other shmemX_base_addr holds the other path's shmem address |
1484 | * required for example in case of common phy init, or for path1 to know | 1895 | * required for example in case of common phy init, or for path1 to know |
1485 | * the address of mcp debug trace which is located in offset from shmem | 1896 | * the address of mcp debug trace which is located in offset from shmem |
1486 | * of path0 | 1897 | * of path0 |
1487 | */ | 1898 | */ |
1488 | u32 other_shmem_base_addr; | 1899 | u32 other_shmem_base_addr; /* 0x0034 */ |
1489 | u32 other_shmem2_base_addr; | 1900 | u32 other_shmem2_base_addr; /* 0x0038 */ |
1490 | u32 reserved1[E2_VF_MAX / 32]; | 1901 | /* |
1491 | u32 reserved2[E2_FUNC_MAX][E2_VF_MAX / 32]; | 1902 | * mcp_vf_disabled is set by the MCP to indicate the driver about VFs |
1492 | u32 dcbx_lldp_dcbx_stat_offset; | 1903 | * which were disabled/flred |
1493 | #define SHMEM_LLDP_DCBX_STAT_NONE 0x00000000 | 1904 | */ |
1905 | u32 mcp_vf_disabled[E2_VF_MAX / 32]; /* 0x003c */ | ||
1906 | |||
1907 | /* | ||
1908 | * drv_ack_vf_disabled is set by the PF driver to ack handled disabled | ||
1909 | * VFs | ||
1910 | */ | ||
1911 | u32 drv_ack_vf_disabled[E2_FUNC_MAX][E2_VF_MAX / 32]; /* 0x0044 */ | ||
1912 | |||
1913 | u32 dcbx_lldp_dcbx_stat_offset; /* 0x0064 */ | ||
1914 | #define SHMEM_LLDP_DCBX_STAT_NONE 0x00000000 | ||
1915 | |||
1916 | /* | ||
1917 | * edebug_driver_if field is used to transfer messages between edebug | ||
1918 | * app to the driver through shmem2. | ||
1919 | * | ||
1920 | * message format: | ||
1921 | * bits 0-2 - function number / instance of driver to perform request | ||
1922 | * bits 3-5 - op code / is_ack? | ||
1923 | * bits 6-63 - data | ||
1924 | */ | ||
1925 | u32 edebug_driver_if[2]; /* 0x0068 */ | ||
1926 | #define EDEBUG_DRIVER_IF_OP_CODE_GET_PHYS_ADDR 1 | ||
1927 | #define EDEBUG_DRIVER_IF_OP_CODE_GET_BUS_ADDR 2 | ||
1928 | #define EDEBUG_DRIVER_IF_OP_CODE_DISABLE_STAT 3 | ||
1929 | |||
1930 | u32 nvm_retain_bitmap_addr; /* 0x0070 */ | ||
1931 | |||
1932 | u32 reserved1; /* 0x0074 */ | ||
1933 | |||
1934 | u32 reserved2[E2_FUNC_MAX]; | ||
1935 | |||
1936 | u32 reserved3[E2_FUNC_MAX];/* 0x0088 */ | ||
1937 | u32 reserved4[E2_FUNC_MAX];/* 0x0098 */ | ||
1938 | |||
1939 | u32 swim_base_addr; /* 0x0108 */ | ||
1940 | u32 swim_funcs; | ||
1941 | u32 swim_main_cb; | ||
1942 | |||
1943 | u32 reserved5[2]; | ||
1944 | |||
1945 | /* generic flags controlled by the driver */ | ||
1946 | u32 drv_flags; | ||
1947 | #define DRV_FLAGS_DCB_CONFIGURED 0x1 | ||
1948 | |||
1949 | /* pointer to extended dev_info shared data copied from nvm image */ | ||
1950 | u32 extended_dev_info_shared_addr; | ||
1951 | u32 ncsi_oem_data_addr; | ||
1952 | |||
1953 | u32 ocsd_host_addr; | ||
1954 | u32 ocbb_host_addr; | ||
1955 | u32 ocsd_req_update_interval; | ||
1494 | }; | 1956 | }; |
1495 | 1957 | ||
1496 | 1958 | ||
1497 | struct emac_stats { | 1959 | struct emac_stats { |
1498 | u32 rx_stat_ifhcinoctets; | 1960 | u32 rx_stat_ifhcinoctets; |
1499 | u32 rx_stat_ifhcinbadoctets; | 1961 | u32 rx_stat_ifhcinbadoctets; |
1500 | u32 rx_stat_etherstatsfragments; | 1962 | u32 rx_stat_etherstatsfragments; |
1501 | u32 rx_stat_ifhcinucastpkts; | 1963 | u32 rx_stat_ifhcinucastpkts; |
1502 | u32 rx_stat_ifhcinmulticastpkts; | 1964 | u32 rx_stat_ifhcinmulticastpkts; |
1503 | u32 rx_stat_ifhcinbroadcastpkts; | 1965 | u32 rx_stat_ifhcinbroadcastpkts; |
1504 | u32 rx_stat_dot3statsfcserrors; | 1966 | u32 rx_stat_dot3statsfcserrors; |
1505 | u32 rx_stat_dot3statsalignmenterrors; | 1967 | u32 rx_stat_dot3statsalignmenterrors; |
1506 | u32 rx_stat_dot3statscarriersenseerrors; | 1968 | u32 rx_stat_dot3statscarriersenseerrors; |
1507 | u32 rx_stat_xonpauseframesreceived; | 1969 | u32 rx_stat_xonpauseframesreceived; |
1508 | u32 rx_stat_xoffpauseframesreceived; | 1970 | u32 rx_stat_xoffpauseframesreceived; |
1509 | u32 rx_stat_maccontrolframesreceived; | 1971 | u32 rx_stat_maccontrolframesreceived; |
1510 | u32 rx_stat_xoffstateentered; | 1972 | u32 rx_stat_xoffstateentered; |
1511 | u32 rx_stat_dot3statsframestoolong; | 1973 | u32 rx_stat_dot3statsframestoolong; |
1512 | u32 rx_stat_etherstatsjabbers; | 1974 | u32 rx_stat_etherstatsjabbers; |
1513 | u32 rx_stat_etherstatsundersizepkts; | 1975 | u32 rx_stat_etherstatsundersizepkts; |
1514 | u32 rx_stat_etherstatspkts64octets; | 1976 | u32 rx_stat_etherstatspkts64octets; |
1515 | u32 rx_stat_etherstatspkts65octetsto127octets; | 1977 | u32 rx_stat_etherstatspkts65octetsto127octets; |
1516 | u32 rx_stat_etherstatspkts128octetsto255octets; | 1978 | u32 rx_stat_etherstatspkts128octetsto255octets; |
1517 | u32 rx_stat_etherstatspkts256octetsto511octets; | 1979 | u32 rx_stat_etherstatspkts256octetsto511octets; |
1518 | u32 rx_stat_etherstatspkts512octetsto1023octets; | 1980 | u32 rx_stat_etherstatspkts512octetsto1023octets; |
1519 | u32 rx_stat_etherstatspkts1024octetsto1522octets; | 1981 | u32 rx_stat_etherstatspkts1024octetsto1522octets; |
1520 | u32 rx_stat_etherstatspktsover1522octets; | 1982 | u32 rx_stat_etherstatspktsover1522octets; |
1521 | 1983 | ||
1522 | u32 rx_stat_falsecarriererrors; | 1984 | u32 rx_stat_falsecarriererrors; |
1523 | 1985 | ||
1524 | u32 tx_stat_ifhcoutoctets; | 1986 | u32 tx_stat_ifhcoutoctets; |
1525 | u32 tx_stat_ifhcoutbadoctets; | 1987 | u32 tx_stat_ifhcoutbadoctets; |
1526 | u32 tx_stat_etherstatscollisions; | 1988 | u32 tx_stat_etherstatscollisions; |
1527 | u32 tx_stat_outxonsent; | 1989 | u32 tx_stat_outxonsent; |
1528 | u32 tx_stat_outxoffsent; | 1990 | u32 tx_stat_outxoffsent; |
1529 | u32 tx_stat_flowcontroldone; | 1991 | u32 tx_stat_flowcontroldone; |
1530 | u32 tx_stat_dot3statssinglecollisionframes; | 1992 | u32 tx_stat_dot3statssinglecollisionframes; |
1531 | u32 tx_stat_dot3statsmultiplecollisionframes; | 1993 | u32 tx_stat_dot3statsmultiplecollisionframes; |
1532 | u32 tx_stat_dot3statsdeferredtransmissions; | 1994 | u32 tx_stat_dot3statsdeferredtransmissions; |
1533 | u32 tx_stat_dot3statsexcessivecollisions; | 1995 | u32 tx_stat_dot3statsexcessivecollisions; |
1534 | u32 tx_stat_dot3statslatecollisions; | 1996 | u32 tx_stat_dot3statslatecollisions; |
1535 | u32 tx_stat_ifhcoutucastpkts; | 1997 | u32 tx_stat_ifhcoutucastpkts; |
1536 | u32 tx_stat_ifhcoutmulticastpkts; | 1998 | u32 tx_stat_ifhcoutmulticastpkts; |
1537 | u32 tx_stat_ifhcoutbroadcastpkts; | 1999 | u32 tx_stat_ifhcoutbroadcastpkts; |
1538 | u32 tx_stat_etherstatspkts64octets; | 2000 | u32 tx_stat_etherstatspkts64octets; |
1539 | u32 tx_stat_etherstatspkts65octetsto127octets; | 2001 | u32 tx_stat_etherstatspkts65octetsto127octets; |
1540 | u32 tx_stat_etherstatspkts128octetsto255octets; | 2002 | u32 tx_stat_etherstatspkts128octetsto255octets; |
1541 | u32 tx_stat_etherstatspkts256octetsto511octets; | 2003 | u32 tx_stat_etherstatspkts256octetsto511octets; |
1542 | u32 tx_stat_etherstatspkts512octetsto1023octets; | 2004 | u32 tx_stat_etherstatspkts512octetsto1023octets; |
1543 | u32 tx_stat_etherstatspkts1024octetsto1522octets; | 2005 | u32 tx_stat_etherstatspkts1024octetsto1522octets; |
1544 | u32 tx_stat_etherstatspktsover1522octets; | 2006 | u32 tx_stat_etherstatspktsover1522octets; |
1545 | u32 tx_stat_dot3statsinternalmactransmiterrors; | 2007 | u32 tx_stat_dot3statsinternalmactransmiterrors; |
1546 | }; | 2008 | }; |
1547 | 2009 | ||
1548 | 2010 | ||
1549 | struct bmac1_stats { | 2011 | struct bmac1_stats { |
1550 | u32 tx_stat_gtpkt_lo; | 2012 | u32 tx_stat_gtpkt_lo; |
1551 | u32 tx_stat_gtpkt_hi; | 2013 | u32 tx_stat_gtpkt_hi; |
1552 | u32 tx_stat_gtxpf_lo; | 2014 | u32 tx_stat_gtxpf_lo; |
1553 | u32 tx_stat_gtxpf_hi; | 2015 | u32 tx_stat_gtxpf_hi; |
1554 | u32 tx_stat_gtfcs_lo; | 2016 | u32 tx_stat_gtfcs_lo; |
1555 | u32 tx_stat_gtfcs_hi; | 2017 | u32 tx_stat_gtfcs_hi; |
1556 | u32 tx_stat_gtmca_lo; | 2018 | u32 tx_stat_gtmca_lo; |
1557 | u32 tx_stat_gtmca_hi; | 2019 | u32 tx_stat_gtmca_hi; |
1558 | u32 tx_stat_gtbca_lo; | 2020 | u32 tx_stat_gtbca_lo; |
1559 | u32 tx_stat_gtbca_hi; | 2021 | u32 tx_stat_gtbca_hi; |
1560 | u32 tx_stat_gtfrg_lo; | 2022 | u32 tx_stat_gtfrg_lo; |
1561 | u32 tx_stat_gtfrg_hi; | 2023 | u32 tx_stat_gtfrg_hi; |
1562 | u32 tx_stat_gtovr_lo; | 2024 | u32 tx_stat_gtovr_lo; |
1563 | u32 tx_stat_gtovr_hi; | 2025 | u32 tx_stat_gtovr_hi; |
1564 | u32 tx_stat_gt64_lo; | 2026 | u32 tx_stat_gt64_lo; |
1565 | u32 tx_stat_gt64_hi; | 2027 | u32 tx_stat_gt64_hi; |
1566 | u32 tx_stat_gt127_lo; | 2028 | u32 tx_stat_gt127_lo; |
1567 | u32 tx_stat_gt127_hi; | 2029 | u32 tx_stat_gt127_hi; |
1568 | u32 tx_stat_gt255_lo; | 2030 | u32 tx_stat_gt255_lo; |
1569 | u32 tx_stat_gt255_hi; | 2031 | u32 tx_stat_gt255_hi; |
1570 | u32 tx_stat_gt511_lo; | 2032 | u32 tx_stat_gt511_lo; |
1571 | u32 tx_stat_gt511_hi; | 2033 | u32 tx_stat_gt511_hi; |
1572 | u32 tx_stat_gt1023_lo; | 2034 | u32 tx_stat_gt1023_lo; |
1573 | u32 tx_stat_gt1023_hi; | 2035 | u32 tx_stat_gt1023_hi; |
1574 | u32 tx_stat_gt1518_lo; | 2036 | u32 tx_stat_gt1518_lo; |
1575 | u32 tx_stat_gt1518_hi; | 2037 | u32 tx_stat_gt1518_hi; |
1576 | u32 tx_stat_gt2047_lo; | 2038 | u32 tx_stat_gt2047_lo; |
1577 | u32 tx_stat_gt2047_hi; | 2039 | u32 tx_stat_gt2047_hi; |
1578 | u32 tx_stat_gt4095_lo; | 2040 | u32 tx_stat_gt4095_lo; |
1579 | u32 tx_stat_gt4095_hi; | 2041 | u32 tx_stat_gt4095_hi; |
1580 | u32 tx_stat_gt9216_lo; | 2042 | u32 tx_stat_gt9216_lo; |
1581 | u32 tx_stat_gt9216_hi; | 2043 | u32 tx_stat_gt9216_hi; |
1582 | u32 tx_stat_gt16383_lo; | 2044 | u32 tx_stat_gt16383_lo; |
1583 | u32 tx_stat_gt16383_hi; | 2045 | u32 tx_stat_gt16383_hi; |
1584 | u32 tx_stat_gtmax_lo; | 2046 | u32 tx_stat_gtmax_lo; |
1585 | u32 tx_stat_gtmax_hi; | 2047 | u32 tx_stat_gtmax_hi; |
1586 | u32 tx_stat_gtufl_lo; | 2048 | u32 tx_stat_gtufl_lo; |
1587 | u32 tx_stat_gtufl_hi; | 2049 | u32 tx_stat_gtufl_hi; |
1588 | u32 tx_stat_gterr_lo; | 2050 | u32 tx_stat_gterr_lo; |
1589 | u32 tx_stat_gterr_hi; | 2051 | u32 tx_stat_gterr_hi; |
1590 | u32 tx_stat_gtbyt_lo; | 2052 | u32 tx_stat_gtbyt_lo; |
1591 | u32 tx_stat_gtbyt_hi; | 2053 | u32 tx_stat_gtbyt_hi; |
1592 | 2054 | ||
1593 | u32 rx_stat_gr64_lo; | 2055 | u32 rx_stat_gr64_lo; |
1594 | u32 rx_stat_gr64_hi; | 2056 | u32 rx_stat_gr64_hi; |
1595 | u32 rx_stat_gr127_lo; | 2057 | u32 rx_stat_gr127_lo; |
1596 | u32 rx_stat_gr127_hi; | 2058 | u32 rx_stat_gr127_hi; |
1597 | u32 rx_stat_gr255_lo; | 2059 | u32 rx_stat_gr255_lo; |
1598 | u32 rx_stat_gr255_hi; | 2060 | u32 rx_stat_gr255_hi; |
1599 | u32 rx_stat_gr511_lo; | 2061 | u32 rx_stat_gr511_lo; |
1600 | u32 rx_stat_gr511_hi; | 2062 | u32 rx_stat_gr511_hi; |
1601 | u32 rx_stat_gr1023_lo; | 2063 | u32 rx_stat_gr1023_lo; |
1602 | u32 rx_stat_gr1023_hi; | 2064 | u32 rx_stat_gr1023_hi; |
1603 | u32 rx_stat_gr1518_lo; | 2065 | u32 rx_stat_gr1518_lo; |
1604 | u32 rx_stat_gr1518_hi; | 2066 | u32 rx_stat_gr1518_hi; |
1605 | u32 rx_stat_gr2047_lo; | 2067 | u32 rx_stat_gr2047_lo; |
1606 | u32 rx_stat_gr2047_hi; | 2068 | u32 rx_stat_gr2047_hi; |
1607 | u32 rx_stat_gr4095_lo; | 2069 | u32 rx_stat_gr4095_lo; |
1608 | u32 rx_stat_gr4095_hi; | 2070 | u32 rx_stat_gr4095_hi; |
1609 | u32 rx_stat_gr9216_lo; | 2071 | u32 rx_stat_gr9216_lo; |
1610 | u32 rx_stat_gr9216_hi; | 2072 | u32 rx_stat_gr9216_hi; |
1611 | u32 rx_stat_gr16383_lo; | 2073 | u32 rx_stat_gr16383_lo; |
1612 | u32 rx_stat_gr16383_hi; | 2074 | u32 rx_stat_gr16383_hi; |
1613 | u32 rx_stat_grmax_lo; | 2075 | u32 rx_stat_grmax_lo; |
1614 | u32 rx_stat_grmax_hi; | 2076 | u32 rx_stat_grmax_hi; |
1615 | u32 rx_stat_grpkt_lo; | 2077 | u32 rx_stat_grpkt_lo; |
1616 | u32 rx_stat_grpkt_hi; | 2078 | u32 rx_stat_grpkt_hi; |
1617 | u32 rx_stat_grfcs_lo; | 2079 | u32 rx_stat_grfcs_lo; |
1618 | u32 rx_stat_grfcs_hi; | 2080 | u32 rx_stat_grfcs_hi; |
1619 | u32 rx_stat_grmca_lo; | 2081 | u32 rx_stat_grmca_lo; |
1620 | u32 rx_stat_grmca_hi; | 2082 | u32 rx_stat_grmca_hi; |
1621 | u32 rx_stat_grbca_lo; | 2083 | u32 rx_stat_grbca_lo; |
1622 | u32 rx_stat_grbca_hi; | 2084 | u32 rx_stat_grbca_hi; |
1623 | u32 rx_stat_grxcf_lo; | 2085 | u32 rx_stat_grxcf_lo; |
1624 | u32 rx_stat_grxcf_hi; | 2086 | u32 rx_stat_grxcf_hi; |
1625 | u32 rx_stat_grxpf_lo; | 2087 | u32 rx_stat_grxpf_lo; |
1626 | u32 rx_stat_grxpf_hi; | 2088 | u32 rx_stat_grxpf_hi; |
1627 | u32 rx_stat_grxuo_lo; | 2089 | u32 rx_stat_grxuo_lo; |
1628 | u32 rx_stat_grxuo_hi; | 2090 | u32 rx_stat_grxuo_hi; |
1629 | u32 rx_stat_grjbr_lo; | 2091 | u32 rx_stat_grjbr_lo; |
1630 | u32 rx_stat_grjbr_hi; | 2092 | u32 rx_stat_grjbr_hi; |
1631 | u32 rx_stat_grovr_lo; | 2093 | u32 rx_stat_grovr_lo; |
1632 | u32 rx_stat_grovr_hi; | 2094 | u32 rx_stat_grovr_hi; |
1633 | u32 rx_stat_grflr_lo; | 2095 | u32 rx_stat_grflr_lo; |
1634 | u32 rx_stat_grflr_hi; | 2096 | u32 rx_stat_grflr_hi; |
1635 | u32 rx_stat_grmeg_lo; | 2097 | u32 rx_stat_grmeg_lo; |
1636 | u32 rx_stat_grmeg_hi; | 2098 | u32 rx_stat_grmeg_hi; |
1637 | u32 rx_stat_grmeb_lo; | 2099 | u32 rx_stat_grmeb_lo; |
1638 | u32 rx_stat_grmeb_hi; | 2100 | u32 rx_stat_grmeb_hi; |
1639 | u32 rx_stat_grbyt_lo; | 2101 | u32 rx_stat_grbyt_lo; |
1640 | u32 rx_stat_grbyt_hi; | 2102 | u32 rx_stat_grbyt_hi; |
1641 | u32 rx_stat_grund_lo; | 2103 | u32 rx_stat_grund_lo; |
1642 | u32 rx_stat_grund_hi; | 2104 | u32 rx_stat_grund_hi; |
1643 | u32 rx_stat_grfrg_lo; | 2105 | u32 rx_stat_grfrg_lo; |
1644 | u32 rx_stat_grfrg_hi; | 2106 | u32 rx_stat_grfrg_hi; |
1645 | u32 rx_stat_grerb_lo; | 2107 | u32 rx_stat_grerb_lo; |
1646 | u32 rx_stat_grerb_hi; | 2108 | u32 rx_stat_grerb_hi; |
1647 | u32 rx_stat_grfre_lo; | 2109 | u32 rx_stat_grfre_lo; |
1648 | u32 rx_stat_grfre_hi; | 2110 | u32 rx_stat_grfre_hi; |
1649 | u32 rx_stat_gripj_lo; | 2111 | u32 rx_stat_gripj_lo; |
1650 | u32 rx_stat_gripj_hi; | 2112 | u32 rx_stat_gripj_hi; |
1651 | }; | 2113 | }; |
1652 | 2114 | ||
1653 | struct bmac2_stats { | 2115 | struct bmac2_stats { |
@@ -1766,187 +2228,316 @@ struct bmac2_stats { | |||
1766 | u32 rx_stat_gripj_hi; | 2228 | u32 rx_stat_gripj_hi; |
1767 | }; | 2229 | }; |
1768 | 2230 | ||
2231 | struct mstat_stats { | ||
2232 | struct { | ||
2233 | /* OTE MSTAT on E3 has a bug where this register's contents are | ||
2234 | * actually tx_gtxpok + tx_gtxpf + (possibly)tx_gtxpp | ||
2235 | */ | ||
2236 | u32 tx_gtxpok_lo; | ||
2237 | u32 tx_gtxpok_hi; | ||
2238 | u32 tx_gtxpf_lo; | ||
2239 | u32 tx_gtxpf_hi; | ||
2240 | u32 tx_gtxpp_lo; | ||
2241 | u32 tx_gtxpp_hi; | ||
2242 | u32 tx_gtfcs_lo; | ||
2243 | u32 tx_gtfcs_hi; | ||
2244 | u32 tx_gtuca_lo; | ||
2245 | u32 tx_gtuca_hi; | ||
2246 | u32 tx_gtmca_lo; | ||
2247 | u32 tx_gtmca_hi; | ||
2248 | u32 tx_gtgca_lo; | ||
2249 | u32 tx_gtgca_hi; | ||
2250 | u32 tx_gtpkt_lo; | ||
2251 | u32 tx_gtpkt_hi; | ||
2252 | u32 tx_gt64_lo; | ||
2253 | u32 tx_gt64_hi; | ||
2254 | u32 tx_gt127_lo; | ||
2255 | u32 tx_gt127_hi; | ||
2256 | u32 tx_gt255_lo; | ||
2257 | u32 tx_gt255_hi; | ||
2258 | u32 tx_gt511_lo; | ||
2259 | u32 tx_gt511_hi; | ||
2260 | u32 tx_gt1023_lo; | ||
2261 | u32 tx_gt1023_hi; | ||
2262 | u32 tx_gt1518_lo; | ||
2263 | u32 tx_gt1518_hi; | ||
2264 | u32 tx_gt2047_lo; | ||
2265 | u32 tx_gt2047_hi; | ||
2266 | u32 tx_gt4095_lo; | ||
2267 | u32 tx_gt4095_hi; | ||
2268 | u32 tx_gt9216_lo; | ||
2269 | u32 tx_gt9216_hi; | ||
2270 | u32 tx_gt16383_lo; | ||
2271 | u32 tx_gt16383_hi; | ||
2272 | u32 tx_gtufl_lo; | ||
2273 | u32 tx_gtufl_hi; | ||
2274 | u32 tx_gterr_lo; | ||
2275 | u32 tx_gterr_hi; | ||
2276 | u32 tx_gtbyt_lo; | ||
2277 | u32 tx_gtbyt_hi; | ||
2278 | u32 tx_collisions_lo; | ||
2279 | u32 tx_collisions_hi; | ||
2280 | u32 tx_singlecollision_lo; | ||
2281 | u32 tx_singlecollision_hi; | ||
2282 | u32 tx_multiplecollisions_lo; | ||
2283 | u32 tx_multiplecollisions_hi; | ||
2284 | u32 tx_deferred_lo; | ||
2285 | u32 tx_deferred_hi; | ||
2286 | u32 tx_excessivecollisions_lo; | ||
2287 | u32 tx_excessivecollisions_hi; | ||
2288 | u32 tx_latecollisions_lo; | ||
2289 | u32 tx_latecollisions_hi; | ||
2290 | } stats_tx; | ||
2291 | |||
2292 | struct { | ||
2293 | u32 rx_gr64_lo; | ||
2294 | u32 rx_gr64_hi; | ||
2295 | u32 rx_gr127_lo; | ||
2296 | u32 rx_gr127_hi; | ||
2297 | u32 rx_gr255_lo; | ||
2298 | u32 rx_gr255_hi; | ||
2299 | u32 rx_gr511_lo; | ||
2300 | u32 rx_gr511_hi; | ||
2301 | u32 rx_gr1023_lo; | ||
2302 | u32 rx_gr1023_hi; | ||
2303 | u32 rx_gr1518_lo; | ||
2304 | u32 rx_gr1518_hi; | ||
2305 | u32 rx_gr2047_lo; | ||
2306 | u32 rx_gr2047_hi; | ||
2307 | u32 rx_gr4095_lo; | ||
2308 | u32 rx_gr4095_hi; | ||
2309 | u32 rx_gr9216_lo; | ||
2310 | u32 rx_gr9216_hi; | ||
2311 | u32 rx_gr16383_lo; | ||
2312 | u32 rx_gr16383_hi; | ||
2313 | u32 rx_grpkt_lo; | ||
2314 | u32 rx_grpkt_hi; | ||
2315 | u32 rx_grfcs_lo; | ||
2316 | u32 rx_grfcs_hi; | ||
2317 | u32 rx_gruca_lo; | ||
2318 | u32 rx_gruca_hi; | ||
2319 | u32 rx_grmca_lo; | ||
2320 | u32 rx_grmca_hi; | ||
2321 | u32 rx_grbca_lo; | ||
2322 | u32 rx_grbca_hi; | ||
2323 | u32 rx_grxpf_lo; | ||
2324 | u32 rx_grxpf_hi; | ||
2325 | u32 rx_grxpp_lo; | ||
2326 | u32 rx_grxpp_hi; | ||
2327 | u32 rx_grxuo_lo; | ||
2328 | u32 rx_grxuo_hi; | ||
2329 | u32 rx_grovr_lo; | ||
2330 | u32 rx_grovr_hi; | ||
2331 | u32 rx_grxcf_lo; | ||
2332 | u32 rx_grxcf_hi; | ||
2333 | u32 rx_grflr_lo; | ||
2334 | u32 rx_grflr_hi; | ||
2335 | u32 rx_grpok_lo; | ||
2336 | u32 rx_grpok_hi; | ||
2337 | u32 rx_grbyt_lo; | ||
2338 | u32 rx_grbyt_hi; | ||
2339 | u32 rx_grund_lo; | ||
2340 | u32 rx_grund_hi; | ||
2341 | u32 rx_grfrg_lo; | ||
2342 | u32 rx_grfrg_hi; | ||
2343 | u32 rx_grerb_lo; | ||
2344 | u32 rx_grerb_hi; | ||
2345 | u32 rx_grfre_lo; | ||
2346 | u32 rx_grfre_hi; | ||
2347 | |||
2348 | u32 rx_alignmenterrors_lo; | ||
2349 | u32 rx_alignmenterrors_hi; | ||
2350 | u32 rx_falsecarrier_lo; | ||
2351 | u32 rx_falsecarrier_hi; | ||
2352 | u32 rx_llfcmsgcnt_lo; | ||
2353 | u32 rx_llfcmsgcnt_hi; | ||
2354 | } stats_rx; | ||
2355 | }; | ||
2356 | |||
1769 | union mac_stats { | 2357 | union mac_stats { |
1770 | struct emac_stats emac_stats; | 2358 | struct emac_stats emac_stats; |
1771 | struct bmac1_stats bmac1_stats; | 2359 | struct bmac1_stats bmac1_stats; |
1772 | struct bmac2_stats bmac2_stats; | 2360 | struct bmac2_stats bmac2_stats; |
2361 | struct mstat_stats mstat_stats; | ||
1773 | }; | 2362 | }; |
1774 | 2363 | ||
1775 | 2364 | ||
1776 | struct mac_stx { | 2365 | struct mac_stx { |
1777 | /* in_bad_octets */ | 2366 | /* in_bad_octets */ |
1778 | u32 rx_stat_ifhcinbadoctets_hi; | 2367 | u32 rx_stat_ifhcinbadoctets_hi; |
1779 | u32 rx_stat_ifhcinbadoctets_lo; | 2368 | u32 rx_stat_ifhcinbadoctets_lo; |
1780 | 2369 | ||
1781 | /* out_bad_octets */ | 2370 | /* out_bad_octets */ |
1782 | u32 tx_stat_ifhcoutbadoctets_hi; | 2371 | u32 tx_stat_ifhcoutbadoctets_hi; |
1783 | u32 tx_stat_ifhcoutbadoctets_lo; | 2372 | u32 tx_stat_ifhcoutbadoctets_lo; |
1784 | 2373 | ||
1785 | /* crc_receive_errors */ | 2374 | /* crc_receive_errors */ |
1786 | u32 rx_stat_dot3statsfcserrors_hi; | 2375 | u32 rx_stat_dot3statsfcserrors_hi; |
1787 | u32 rx_stat_dot3statsfcserrors_lo; | 2376 | u32 rx_stat_dot3statsfcserrors_lo; |
1788 | /* alignment_errors */ | 2377 | /* alignment_errors */ |
1789 | u32 rx_stat_dot3statsalignmenterrors_hi; | 2378 | u32 rx_stat_dot3statsalignmenterrors_hi; |
1790 | u32 rx_stat_dot3statsalignmenterrors_lo; | 2379 | u32 rx_stat_dot3statsalignmenterrors_lo; |
1791 | /* carrier_sense_errors */ | 2380 | /* carrier_sense_errors */ |
1792 | u32 rx_stat_dot3statscarriersenseerrors_hi; | 2381 | u32 rx_stat_dot3statscarriersenseerrors_hi; |
1793 | u32 rx_stat_dot3statscarriersenseerrors_lo; | 2382 | u32 rx_stat_dot3statscarriersenseerrors_lo; |
1794 | /* false_carrier_detections */ | 2383 | /* false_carrier_detections */ |
1795 | u32 rx_stat_falsecarriererrors_hi; | 2384 | u32 rx_stat_falsecarriererrors_hi; |
1796 | u32 rx_stat_falsecarriererrors_lo; | 2385 | u32 rx_stat_falsecarriererrors_lo; |
1797 | 2386 | ||
1798 | /* runt_packets_received */ | 2387 | /* runt_packets_received */ |
1799 | u32 rx_stat_etherstatsundersizepkts_hi; | 2388 | u32 rx_stat_etherstatsundersizepkts_hi; |
1800 | u32 rx_stat_etherstatsundersizepkts_lo; | 2389 | u32 rx_stat_etherstatsundersizepkts_lo; |
1801 | /* jabber_packets_received */ | 2390 | /* jabber_packets_received */ |
1802 | u32 rx_stat_dot3statsframestoolong_hi; | 2391 | u32 rx_stat_dot3statsframestoolong_hi; |
1803 | u32 rx_stat_dot3statsframestoolong_lo; | 2392 | u32 rx_stat_dot3statsframestoolong_lo; |
1804 | 2393 | ||
1805 | /* error_runt_packets_received */ | 2394 | /* error_runt_packets_received */ |
1806 | u32 rx_stat_etherstatsfragments_hi; | 2395 | u32 rx_stat_etherstatsfragments_hi; |
1807 | u32 rx_stat_etherstatsfragments_lo; | 2396 | u32 rx_stat_etherstatsfragments_lo; |
1808 | /* error_jabber_packets_received */ | 2397 | /* error_jabber_packets_received */ |
1809 | u32 rx_stat_etherstatsjabbers_hi; | 2398 | u32 rx_stat_etherstatsjabbers_hi; |
1810 | u32 rx_stat_etherstatsjabbers_lo; | 2399 | u32 rx_stat_etherstatsjabbers_lo; |
1811 | 2400 | ||
1812 | /* control_frames_received */ | 2401 | /* control_frames_received */ |
1813 | u32 rx_stat_maccontrolframesreceived_hi; | 2402 | u32 rx_stat_maccontrolframesreceived_hi; |
1814 | u32 rx_stat_maccontrolframesreceived_lo; | 2403 | u32 rx_stat_maccontrolframesreceived_lo; |
1815 | u32 rx_stat_bmac_xpf_hi; | 2404 | u32 rx_stat_mac_xpf_hi; |
1816 | u32 rx_stat_bmac_xpf_lo; | 2405 | u32 rx_stat_mac_xpf_lo; |
1817 | u32 rx_stat_bmac_xcf_hi; | 2406 | u32 rx_stat_mac_xcf_hi; |
1818 | u32 rx_stat_bmac_xcf_lo; | 2407 | u32 rx_stat_mac_xcf_lo; |
1819 | 2408 | ||
1820 | /* xoff_state_entered */ | 2409 | /* xoff_state_entered */ |
1821 | u32 rx_stat_xoffstateentered_hi; | 2410 | u32 rx_stat_xoffstateentered_hi; |
1822 | u32 rx_stat_xoffstateentered_lo; | 2411 | u32 rx_stat_xoffstateentered_lo; |
1823 | /* pause_xon_frames_received */ | 2412 | /* pause_xon_frames_received */ |
1824 | u32 rx_stat_xonpauseframesreceived_hi; | 2413 | u32 rx_stat_xonpauseframesreceived_hi; |
1825 | u32 rx_stat_xonpauseframesreceived_lo; | 2414 | u32 rx_stat_xonpauseframesreceived_lo; |
1826 | /* pause_xoff_frames_received */ | 2415 | /* pause_xoff_frames_received */ |
1827 | u32 rx_stat_xoffpauseframesreceived_hi; | 2416 | u32 rx_stat_xoffpauseframesreceived_hi; |
1828 | u32 rx_stat_xoffpauseframesreceived_lo; | 2417 | u32 rx_stat_xoffpauseframesreceived_lo; |
1829 | /* pause_xon_frames_transmitted */ | 2418 | /* pause_xon_frames_transmitted */ |
1830 | u32 tx_stat_outxonsent_hi; | 2419 | u32 tx_stat_outxonsent_hi; |
1831 | u32 tx_stat_outxonsent_lo; | 2420 | u32 tx_stat_outxonsent_lo; |
1832 | /* pause_xoff_frames_transmitted */ | 2421 | /* pause_xoff_frames_transmitted */ |
1833 | u32 tx_stat_outxoffsent_hi; | 2422 | u32 tx_stat_outxoffsent_hi; |
1834 | u32 tx_stat_outxoffsent_lo; | 2423 | u32 tx_stat_outxoffsent_lo; |
1835 | /* flow_control_done */ | 2424 | /* flow_control_done */ |
1836 | u32 tx_stat_flowcontroldone_hi; | 2425 | u32 tx_stat_flowcontroldone_hi; |
1837 | u32 tx_stat_flowcontroldone_lo; | 2426 | u32 tx_stat_flowcontroldone_lo; |
1838 | 2427 | ||
1839 | /* ether_stats_collisions */ | 2428 | /* ether_stats_collisions */ |
1840 | u32 tx_stat_etherstatscollisions_hi; | 2429 | u32 tx_stat_etherstatscollisions_hi; |
1841 | u32 tx_stat_etherstatscollisions_lo; | 2430 | u32 tx_stat_etherstatscollisions_lo; |
1842 | /* single_collision_transmit_frames */ | 2431 | /* single_collision_transmit_frames */ |
1843 | u32 tx_stat_dot3statssinglecollisionframes_hi; | 2432 | u32 tx_stat_dot3statssinglecollisionframes_hi; |
1844 | u32 tx_stat_dot3statssinglecollisionframes_lo; | 2433 | u32 tx_stat_dot3statssinglecollisionframes_lo; |
1845 | /* multiple_collision_transmit_frames */ | 2434 | /* multiple_collision_transmit_frames */ |
1846 | u32 tx_stat_dot3statsmultiplecollisionframes_hi; | 2435 | u32 tx_stat_dot3statsmultiplecollisionframes_hi; |
1847 | u32 tx_stat_dot3statsmultiplecollisionframes_lo; | 2436 | u32 tx_stat_dot3statsmultiplecollisionframes_lo; |
1848 | /* deferred_transmissions */ | 2437 | /* deferred_transmissions */ |
1849 | u32 tx_stat_dot3statsdeferredtransmissions_hi; | 2438 | u32 tx_stat_dot3statsdeferredtransmissions_hi; |
1850 | u32 tx_stat_dot3statsdeferredtransmissions_lo; | 2439 | u32 tx_stat_dot3statsdeferredtransmissions_lo; |
1851 | /* excessive_collision_frames */ | 2440 | /* excessive_collision_frames */ |
1852 | u32 tx_stat_dot3statsexcessivecollisions_hi; | 2441 | u32 tx_stat_dot3statsexcessivecollisions_hi; |
1853 | u32 tx_stat_dot3statsexcessivecollisions_lo; | 2442 | u32 tx_stat_dot3statsexcessivecollisions_lo; |
1854 | /* late_collision_frames */ | 2443 | /* late_collision_frames */ |
1855 | u32 tx_stat_dot3statslatecollisions_hi; | 2444 | u32 tx_stat_dot3statslatecollisions_hi; |
1856 | u32 tx_stat_dot3statslatecollisions_lo; | 2445 | u32 tx_stat_dot3statslatecollisions_lo; |
1857 | 2446 | ||
1858 | /* frames_transmitted_64_bytes */ | 2447 | /* frames_transmitted_64_bytes */ |
1859 | u32 tx_stat_etherstatspkts64octets_hi; | 2448 | u32 tx_stat_etherstatspkts64octets_hi; |
1860 | u32 tx_stat_etherstatspkts64octets_lo; | 2449 | u32 tx_stat_etherstatspkts64octets_lo; |
1861 | /* frames_transmitted_65_127_bytes */ | 2450 | /* frames_transmitted_65_127_bytes */ |
1862 | u32 tx_stat_etherstatspkts65octetsto127octets_hi; | 2451 | u32 tx_stat_etherstatspkts65octetsto127octets_hi; |
1863 | u32 tx_stat_etherstatspkts65octetsto127octets_lo; | 2452 | u32 tx_stat_etherstatspkts65octetsto127octets_lo; |
1864 | /* frames_transmitted_128_255_bytes */ | 2453 | /* frames_transmitted_128_255_bytes */ |
1865 | u32 tx_stat_etherstatspkts128octetsto255octets_hi; | 2454 | u32 tx_stat_etherstatspkts128octetsto255octets_hi; |
1866 | u32 tx_stat_etherstatspkts128octetsto255octets_lo; | 2455 | u32 tx_stat_etherstatspkts128octetsto255octets_lo; |
1867 | /* frames_transmitted_256_511_bytes */ | 2456 | /* frames_transmitted_256_511_bytes */ |
1868 | u32 tx_stat_etherstatspkts256octetsto511octets_hi; | 2457 | u32 tx_stat_etherstatspkts256octetsto511octets_hi; |
1869 | u32 tx_stat_etherstatspkts256octetsto511octets_lo; | 2458 | u32 tx_stat_etherstatspkts256octetsto511octets_lo; |
1870 | /* frames_transmitted_512_1023_bytes */ | 2459 | /* frames_transmitted_512_1023_bytes */ |
1871 | u32 tx_stat_etherstatspkts512octetsto1023octets_hi; | 2460 | u32 tx_stat_etherstatspkts512octetsto1023octets_hi; |
1872 | u32 tx_stat_etherstatspkts512octetsto1023octets_lo; | 2461 | u32 tx_stat_etherstatspkts512octetsto1023octets_lo; |
1873 | /* frames_transmitted_1024_1522_bytes */ | 2462 | /* frames_transmitted_1024_1522_bytes */ |
1874 | u32 tx_stat_etherstatspkts1024octetsto1522octets_hi; | 2463 | u32 tx_stat_etherstatspkts1024octetsto1522octets_hi; |
1875 | u32 tx_stat_etherstatspkts1024octetsto1522octets_lo; | 2464 | u32 tx_stat_etherstatspkts1024octetsto1522octets_lo; |
1876 | /* frames_transmitted_1523_9022_bytes */ | 2465 | /* frames_transmitted_1523_9022_bytes */ |
1877 | u32 tx_stat_etherstatspktsover1522octets_hi; | 2466 | u32 tx_stat_etherstatspktsover1522octets_hi; |
1878 | u32 tx_stat_etherstatspktsover1522octets_lo; | 2467 | u32 tx_stat_etherstatspktsover1522octets_lo; |
1879 | u32 tx_stat_bmac_2047_hi; | 2468 | u32 tx_stat_mac_2047_hi; |
1880 | u32 tx_stat_bmac_2047_lo; | 2469 | u32 tx_stat_mac_2047_lo; |
1881 | u32 tx_stat_bmac_4095_hi; | 2470 | u32 tx_stat_mac_4095_hi; |
1882 | u32 tx_stat_bmac_4095_lo; | 2471 | u32 tx_stat_mac_4095_lo; |
1883 | u32 tx_stat_bmac_9216_hi; | 2472 | u32 tx_stat_mac_9216_hi; |
1884 | u32 tx_stat_bmac_9216_lo; | 2473 | u32 tx_stat_mac_9216_lo; |
1885 | u32 tx_stat_bmac_16383_hi; | 2474 | u32 tx_stat_mac_16383_hi; |
1886 | u32 tx_stat_bmac_16383_lo; | 2475 | u32 tx_stat_mac_16383_lo; |
1887 | 2476 | ||
1888 | /* internal_mac_transmit_errors */ | 2477 | /* internal_mac_transmit_errors */ |
1889 | u32 tx_stat_dot3statsinternalmactransmiterrors_hi; | 2478 | u32 tx_stat_dot3statsinternalmactransmiterrors_hi; |
1890 | u32 tx_stat_dot3statsinternalmactransmiterrors_lo; | 2479 | u32 tx_stat_dot3statsinternalmactransmiterrors_lo; |
1891 | 2480 | ||
1892 | /* if_out_discards */ | 2481 | /* if_out_discards */ |
1893 | u32 tx_stat_bmac_ufl_hi; | 2482 | u32 tx_stat_mac_ufl_hi; |
1894 | u32 tx_stat_bmac_ufl_lo; | 2483 | u32 tx_stat_mac_ufl_lo; |
1895 | }; | 2484 | }; |
1896 | 2485 | ||
1897 | 2486 | ||
1898 | #define MAC_STX_IDX_MAX 2 | 2487 | #define MAC_STX_IDX_MAX 2 |
1899 | 2488 | ||
1900 | struct host_port_stats { | 2489 | struct host_port_stats { |
1901 | u32 host_port_stats_start; | 2490 | u32 host_port_stats_start; |
1902 | 2491 | ||
1903 | struct mac_stx mac_stx[MAC_STX_IDX_MAX]; | 2492 | struct mac_stx mac_stx[MAC_STX_IDX_MAX]; |
1904 | 2493 | ||
1905 | u32 brb_drop_hi; | 2494 | u32 brb_drop_hi; |
1906 | u32 brb_drop_lo; | 2495 | u32 brb_drop_lo; |
1907 | 2496 | ||
1908 | u32 host_port_stats_end; | 2497 | u32 host_port_stats_end; |
1909 | }; | 2498 | }; |
1910 | 2499 | ||
1911 | 2500 | ||
1912 | struct host_func_stats { | 2501 | struct host_func_stats { |
1913 | u32 host_func_stats_start; | 2502 | u32 host_func_stats_start; |
1914 | 2503 | ||
1915 | u32 total_bytes_received_hi; | 2504 | u32 total_bytes_received_hi; |
1916 | u32 total_bytes_received_lo; | 2505 | u32 total_bytes_received_lo; |
1917 | 2506 | ||
1918 | u32 total_bytes_transmitted_hi; | 2507 | u32 total_bytes_transmitted_hi; |
1919 | u32 total_bytes_transmitted_lo; | 2508 | u32 total_bytes_transmitted_lo; |
1920 | 2509 | ||
1921 | u32 total_unicast_packets_received_hi; | 2510 | u32 total_unicast_packets_received_hi; |
1922 | u32 total_unicast_packets_received_lo; | 2511 | u32 total_unicast_packets_received_lo; |
1923 | 2512 | ||
1924 | u32 total_multicast_packets_received_hi; | 2513 | u32 total_multicast_packets_received_hi; |
1925 | u32 total_multicast_packets_received_lo; | 2514 | u32 total_multicast_packets_received_lo; |
1926 | 2515 | ||
1927 | u32 total_broadcast_packets_received_hi; | 2516 | u32 total_broadcast_packets_received_hi; |
1928 | u32 total_broadcast_packets_received_lo; | 2517 | u32 total_broadcast_packets_received_lo; |
1929 | 2518 | ||
1930 | u32 total_unicast_packets_transmitted_hi; | 2519 | u32 total_unicast_packets_transmitted_hi; |
1931 | u32 total_unicast_packets_transmitted_lo; | 2520 | u32 total_unicast_packets_transmitted_lo; |
1932 | 2521 | ||
1933 | u32 total_multicast_packets_transmitted_hi; | 2522 | u32 total_multicast_packets_transmitted_hi; |
1934 | u32 total_multicast_packets_transmitted_lo; | 2523 | u32 total_multicast_packets_transmitted_lo; |
1935 | 2524 | ||
1936 | u32 total_broadcast_packets_transmitted_hi; | 2525 | u32 total_broadcast_packets_transmitted_hi; |
1937 | u32 total_broadcast_packets_transmitted_lo; | 2526 | u32 total_broadcast_packets_transmitted_lo; |
1938 | 2527 | ||
1939 | u32 valid_bytes_received_hi; | 2528 | u32 valid_bytes_received_hi; |
1940 | u32 valid_bytes_received_lo; | 2529 | u32 valid_bytes_received_lo; |
1941 | 2530 | ||
1942 | u32 host_func_stats_end; | 2531 | u32 host_func_stats_end; |
1943 | }; | 2532 | }; |
1944 | 2533 | ||
2534 | /* VIC definitions */ | ||
2535 | #define VICSTATST_UIF_INDEX 2 | ||
1945 | 2536 | ||
1946 | #define BCM_5710_FW_MAJOR_VERSION 6 | 2537 | #define BCM_5710_FW_MAJOR_VERSION 7 |
1947 | #define BCM_5710_FW_MINOR_VERSION 2 | 2538 | #define BCM_5710_FW_MINOR_VERSION 0 |
1948 | #define BCM_5710_FW_REVISION_VERSION 9 | 2539 | #define BCM_5710_FW_REVISION_VERSION 20 |
1949 | #define BCM_5710_FW_ENGINEERING_VERSION 0 | 2540 | #define BCM_5710_FW_ENGINEERING_VERSION 0 |
1950 | #define BCM_5710_FW_COMPILE_FLAGS 1 | 2541 | #define BCM_5710_FW_COMPILE_FLAGS 1 |
1951 | 2542 | ||
1952 | 2543 | ||
@@ -1964,6 +2555,115 @@ struct atten_sp_status_block { | |||
1964 | 2555 | ||
1965 | 2556 | ||
1966 | /* | 2557 | /* |
2558 | * The eth aggregative context of Cstorm | ||
2559 | */ | ||
2560 | struct cstorm_eth_ag_context { | ||
2561 | u32 __reserved0[10]; | ||
2562 | }; | ||
2563 | |||
2564 | |||
2565 | /* | ||
2566 | * dmae command structure | ||
2567 | */ | ||
2568 | struct dmae_command { | ||
2569 | u32 opcode; | ||
2570 | #define DMAE_COMMAND_SRC (0x1<<0) | ||
2571 | #define DMAE_COMMAND_SRC_SHIFT 0 | ||
2572 | #define DMAE_COMMAND_DST (0x3<<1) | ||
2573 | #define DMAE_COMMAND_DST_SHIFT 1 | ||
2574 | #define DMAE_COMMAND_C_DST (0x1<<3) | ||
2575 | #define DMAE_COMMAND_C_DST_SHIFT 3 | ||
2576 | #define DMAE_COMMAND_C_TYPE_ENABLE (0x1<<4) | ||
2577 | #define DMAE_COMMAND_C_TYPE_ENABLE_SHIFT 4 | ||
2578 | #define DMAE_COMMAND_C_TYPE_CRC_ENABLE (0x1<<5) | ||
2579 | #define DMAE_COMMAND_C_TYPE_CRC_ENABLE_SHIFT 5 | ||
2580 | #define DMAE_COMMAND_C_TYPE_CRC_OFFSET (0x7<<6) | ||
2581 | #define DMAE_COMMAND_C_TYPE_CRC_OFFSET_SHIFT 6 | ||
2582 | #define DMAE_COMMAND_ENDIANITY (0x3<<9) | ||
2583 | #define DMAE_COMMAND_ENDIANITY_SHIFT 9 | ||
2584 | #define DMAE_COMMAND_PORT (0x1<<11) | ||
2585 | #define DMAE_COMMAND_PORT_SHIFT 11 | ||
2586 | #define DMAE_COMMAND_CRC_RESET (0x1<<12) | ||
2587 | #define DMAE_COMMAND_CRC_RESET_SHIFT 12 | ||
2588 | #define DMAE_COMMAND_SRC_RESET (0x1<<13) | ||
2589 | #define DMAE_COMMAND_SRC_RESET_SHIFT 13 | ||
2590 | #define DMAE_COMMAND_DST_RESET (0x1<<14) | ||
2591 | #define DMAE_COMMAND_DST_RESET_SHIFT 14 | ||
2592 | #define DMAE_COMMAND_E1HVN (0x3<<15) | ||
2593 | #define DMAE_COMMAND_E1HVN_SHIFT 15 | ||
2594 | #define DMAE_COMMAND_DST_VN (0x3<<17) | ||
2595 | #define DMAE_COMMAND_DST_VN_SHIFT 17 | ||
2596 | #define DMAE_COMMAND_C_FUNC (0x1<<19) | ||
2597 | #define DMAE_COMMAND_C_FUNC_SHIFT 19 | ||
2598 | #define DMAE_COMMAND_ERR_POLICY (0x3<<20) | ||
2599 | #define DMAE_COMMAND_ERR_POLICY_SHIFT 20 | ||
2600 | #define DMAE_COMMAND_RESERVED0 (0x3FF<<22) | ||
2601 | #define DMAE_COMMAND_RESERVED0_SHIFT 22 | ||
2602 | u32 src_addr_lo; | ||
2603 | u32 src_addr_hi; | ||
2604 | u32 dst_addr_lo; | ||
2605 | u32 dst_addr_hi; | ||
2606 | #if defined(__BIG_ENDIAN) | ||
2607 | u16 opcode_iov; | ||
2608 | #define DMAE_COMMAND_SRC_VFID (0x3F<<0) | ||
2609 | #define DMAE_COMMAND_SRC_VFID_SHIFT 0 | ||
2610 | #define DMAE_COMMAND_SRC_VFPF (0x1<<6) | ||
2611 | #define DMAE_COMMAND_SRC_VFPF_SHIFT 6 | ||
2612 | #define DMAE_COMMAND_RESERVED1 (0x1<<7) | ||
2613 | #define DMAE_COMMAND_RESERVED1_SHIFT 7 | ||
2614 | #define DMAE_COMMAND_DST_VFID (0x3F<<8) | ||
2615 | #define DMAE_COMMAND_DST_VFID_SHIFT 8 | ||
2616 | #define DMAE_COMMAND_DST_VFPF (0x1<<14) | ||
2617 | #define DMAE_COMMAND_DST_VFPF_SHIFT 14 | ||
2618 | #define DMAE_COMMAND_RESERVED2 (0x1<<15) | ||
2619 | #define DMAE_COMMAND_RESERVED2_SHIFT 15 | ||
2620 | u16 len; | ||
2621 | #elif defined(__LITTLE_ENDIAN) | ||
2622 | u16 len; | ||
2623 | u16 opcode_iov; | ||
2624 | #define DMAE_COMMAND_SRC_VFID (0x3F<<0) | ||
2625 | #define DMAE_COMMAND_SRC_VFID_SHIFT 0 | ||
2626 | #define DMAE_COMMAND_SRC_VFPF (0x1<<6) | ||
2627 | #define DMAE_COMMAND_SRC_VFPF_SHIFT 6 | ||
2628 | #define DMAE_COMMAND_RESERVED1 (0x1<<7) | ||
2629 | #define DMAE_COMMAND_RESERVED1_SHIFT 7 | ||
2630 | #define DMAE_COMMAND_DST_VFID (0x3F<<8) | ||
2631 | #define DMAE_COMMAND_DST_VFID_SHIFT 8 | ||
2632 | #define DMAE_COMMAND_DST_VFPF (0x1<<14) | ||
2633 | #define DMAE_COMMAND_DST_VFPF_SHIFT 14 | ||
2634 | #define DMAE_COMMAND_RESERVED2 (0x1<<15) | ||
2635 | #define DMAE_COMMAND_RESERVED2_SHIFT 15 | ||
2636 | #endif | ||
2637 | u32 comp_addr_lo; | ||
2638 | u32 comp_addr_hi; | ||
2639 | u32 comp_val; | ||
2640 | u32 crc32; | ||
2641 | u32 crc32_c; | ||
2642 | #if defined(__BIG_ENDIAN) | ||
2643 | u16 crc16_c; | ||
2644 | u16 crc16; | ||
2645 | #elif defined(__LITTLE_ENDIAN) | ||
2646 | u16 crc16; | ||
2647 | u16 crc16_c; | ||
2648 | #endif | ||
2649 | #if defined(__BIG_ENDIAN) | ||
2650 | u16 reserved3; | ||
2651 | u16 crc_t10; | ||
2652 | #elif defined(__LITTLE_ENDIAN) | ||
2653 | u16 crc_t10; | ||
2654 | u16 reserved3; | ||
2655 | #endif | ||
2656 | #if defined(__BIG_ENDIAN) | ||
2657 | u16 xsum8; | ||
2658 | u16 xsum16; | ||
2659 | #elif defined(__LITTLE_ENDIAN) | ||
2660 | u16 xsum16; | ||
2661 | u16 xsum8; | ||
2662 | #endif | ||
2663 | }; | ||
2664 | |||
2665 | |||
2666 | /* | ||
1967 | * common data for all protocols | 2667 | * common data for all protocols |
1968 | */ | 2668 | */ |
1969 | struct doorbell_hdr { | 2669 | struct doorbell_hdr { |
@@ -1979,33 +2679,29 @@ struct doorbell_hdr { | |||
1979 | }; | 2679 | }; |
1980 | 2680 | ||
1981 | /* | 2681 | /* |
1982 | * doorbell message sent to the chip | 2682 | * Ethernet doorbell |
1983 | */ | ||
1984 | struct doorbell { | ||
1985 | #if defined(__BIG_ENDIAN) | ||
1986 | u16 zero_fill2; | ||
1987 | u8 zero_fill1; | ||
1988 | struct doorbell_hdr header; | ||
1989 | #elif defined(__LITTLE_ENDIAN) | ||
1990 | struct doorbell_hdr header; | ||
1991 | u8 zero_fill1; | ||
1992 | u16 zero_fill2; | ||
1993 | #endif | ||
1994 | }; | ||
1995 | |||
1996 | |||
1997 | /* | ||
1998 | * doorbell message sent to the chip | ||
1999 | */ | 2683 | */ |
2000 | struct doorbell_set_prod { | 2684 | struct eth_tx_doorbell { |
2001 | #if defined(__BIG_ENDIAN) | 2685 | #if defined(__BIG_ENDIAN) |
2002 | u16 prod; | 2686 | u16 npackets; |
2003 | u8 zero_fill1; | 2687 | u8 params; |
2004 | struct doorbell_hdr header; | 2688 | #define ETH_TX_DOORBELL_NUM_BDS (0x3F<<0) |
2689 | #define ETH_TX_DOORBELL_NUM_BDS_SHIFT 0 | ||
2690 | #define ETH_TX_DOORBELL_RESERVED_TX_FIN_FLAG (0x1<<6) | ||
2691 | #define ETH_TX_DOORBELL_RESERVED_TX_FIN_FLAG_SHIFT 6 | ||
2692 | #define ETH_TX_DOORBELL_SPARE (0x1<<7) | ||
2693 | #define ETH_TX_DOORBELL_SPARE_SHIFT 7 | ||
2694 | struct doorbell_hdr hdr; | ||
2005 | #elif defined(__LITTLE_ENDIAN) | 2695 | #elif defined(__LITTLE_ENDIAN) |
2006 | struct doorbell_hdr header; | 2696 | struct doorbell_hdr hdr; |
2007 | u8 zero_fill1; | 2697 | u8 params; |
2008 | u16 prod; | 2698 | #define ETH_TX_DOORBELL_NUM_BDS (0x3F<<0) |
2699 | #define ETH_TX_DOORBELL_NUM_BDS_SHIFT 0 | ||
2700 | #define ETH_TX_DOORBELL_RESERVED_TX_FIN_FLAG (0x1<<6) | ||
2701 | #define ETH_TX_DOORBELL_RESERVED_TX_FIN_FLAG_SHIFT 6 | ||
2702 | #define ETH_TX_DOORBELL_SPARE (0x1<<7) | ||
2703 | #define ETH_TX_DOORBELL_SPARE_SHIFT 7 | ||
2704 | u16 npackets; | ||
2009 | #endif | 2705 | #endif |
2010 | }; | 2706 | }; |
2011 | 2707 | ||
@@ -2016,7 +2712,7 @@ struct doorbell_set_prod { | |||
2016 | struct hc_status_block_e1x { | 2712 | struct hc_status_block_e1x { |
2017 | __le16 index_values[HC_SB_MAX_INDICES_E1X]; | 2713 | __le16 index_values[HC_SB_MAX_INDICES_E1X]; |
2018 | __le16 running_index[HC_SB_MAX_SM]; | 2714 | __le16 running_index[HC_SB_MAX_SM]; |
2019 | u32 rsrv; | 2715 | __le32 rsrv[11]; |
2020 | }; | 2716 | }; |
2021 | 2717 | ||
2022 | /* | 2718 | /* |
@@ -2033,7 +2729,7 @@ struct host_hc_status_block_e1x { | |||
2033 | struct hc_status_block_e2 { | 2729 | struct hc_status_block_e2 { |
2034 | __le16 index_values[HC_SB_MAX_INDICES_E2]; | 2730 | __le16 index_values[HC_SB_MAX_INDICES_E2]; |
2035 | __le16 running_index[HC_SB_MAX_SM]; | 2731 | __le16 running_index[HC_SB_MAX_SM]; |
2036 | u32 reserved; | 2732 | __le32 reserved[11]; |
2037 | }; | 2733 | }; |
2038 | 2734 | ||
2039 | /* | 2735 | /* |
@@ -2154,6 +2850,16 @@ union igu_consprod_reg { | |||
2154 | 2850 | ||
2155 | 2851 | ||
2156 | /* | 2852 | /* |
2853 | * Igu control commands | ||
2854 | */ | ||
2855 | enum igu_ctrl_cmd { | ||
2856 | IGU_CTRL_CMD_TYPE_RD, | ||
2857 | IGU_CTRL_CMD_TYPE_WR, | ||
2858 | MAX_IGU_CTRL_CMD | ||
2859 | }; | ||
2860 | |||
2861 | |||
2862 | /* | ||
2157 | * Control register for the IGU command register | 2863 | * Control register for the IGU command register |
2158 | */ | 2864 | */ |
2159 | struct igu_ctrl_reg { | 2865 | struct igu_ctrl_reg { |
@@ -2172,6 +2878,29 @@ struct igu_ctrl_reg { | |||
2172 | 2878 | ||
2173 | 2879 | ||
2174 | /* | 2880 | /* |
2881 | * Igu interrupt command | ||
2882 | */ | ||
2883 | enum igu_int_cmd { | ||
2884 | IGU_INT_ENABLE, | ||
2885 | IGU_INT_DISABLE, | ||
2886 | IGU_INT_NOP, | ||
2887 | IGU_INT_NOP2, | ||
2888 | MAX_IGU_INT_CMD | ||
2889 | }; | ||
2890 | |||
2891 | |||
2892 | /* | ||
2893 | * Igu segments | ||
2894 | */ | ||
2895 | enum igu_seg_access { | ||
2896 | IGU_SEG_ACCESS_NORM, | ||
2897 | IGU_SEG_ACCESS_DEF, | ||
2898 | IGU_SEG_ACCESS_ATTN, | ||
2899 | MAX_IGU_SEG_ACCESS | ||
2900 | }; | ||
2901 | |||
2902 | |||
2903 | /* | ||
2175 | * Parser parsing flags field | 2904 | * Parser parsing flags field |
2176 | */ | 2905 | */ |
2177 | struct parsing_flags { | 2906 | struct parsing_flags { |
@@ -2205,94 +2934,46 @@ struct parsing_flags { | |||
2205 | }; | 2934 | }; |
2206 | 2935 | ||
2207 | 2936 | ||
2208 | struct regpair { | 2937 | /* |
2209 | __le32 lo; | 2938 | * Parsing flags for TCP ACK type |
2210 | __le32 hi; | 2939 | */ |
2940 | enum prs_flags_ack_type { | ||
2941 | PRS_FLAG_PUREACK_PIGGY, | ||
2942 | PRS_FLAG_PUREACK_PURE, | ||
2943 | MAX_PRS_FLAGS_ACK_TYPE | ||
2211 | }; | 2944 | }; |
2212 | 2945 | ||
2213 | 2946 | ||
2214 | /* | 2947 | /* |
2215 | * dmae command structure | 2948 | * Parsing flags for Ethernet address type |
2216 | */ | 2949 | */ |
2217 | struct dmae_command { | 2950 | enum prs_flags_eth_addr_type { |
2218 | u32 opcode; | 2951 | PRS_FLAG_ETHTYPE_NON_UNICAST, |
2219 | #define DMAE_COMMAND_SRC (0x1<<0) | 2952 | PRS_FLAG_ETHTYPE_UNICAST, |
2220 | #define DMAE_COMMAND_SRC_SHIFT 0 | 2953 | MAX_PRS_FLAGS_ETH_ADDR_TYPE |
2221 | #define DMAE_COMMAND_DST (0x3<<1) | ||
2222 | #define DMAE_COMMAND_DST_SHIFT 1 | ||
2223 | #define DMAE_COMMAND_C_DST (0x1<<3) | ||
2224 | #define DMAE_COMMAND_C_DST_SHIFT 3 | ||
2225 | #define DMAE_COMMAND_C_TYPE_ENABLE (0x1<<4) | ||
2226 | #define DMAE_COMMAND_C_TYPE_ENABLE_SHIFT 4 | ||
2227 | #define DMAE_COMMAND_C_TYPE_CRC_ENABLE (0x1<<5) | ||
2228 | #define DMAE_COMMAND_C_TYPE_CRC_ENABLE_SHIFT 5 | ||
2229 | #define DMAE_COMMAND_C_TYPE_CRC_OFFSET (0x7<<6) | ||
2230 | #define DMAE_COMMAND_C_TYPE_CRC_OFFSET_SHIFT 6 | ||
2231 | #define DMAE_COMMAND_ENDIANITY (0x3<<9) | ||
2232 | #define DMAE_COMMAND_ENDIANITY_SHIFT 9 | ||
2233 | #define DMAE_COMMAND_PORT (0x1<<11) | ||
2234 | #define DMAE_COMMAND_PORT_SHIFT 11 | ||
2235 | #define DMAE_COMMAND_CRC_RESET (0x1<<12) | ||
2236 | #define DMAE_COMMAND_CRC_RESET_SHIFT 12 | ||
2237 | #define DMAE_COMMAND_SRC_RESET (0x1<<13) | ||
2238 | #define DMAE_COMMAND_SRC_RESET_SHIFT 13 | ||
2239 | #define DMAE_COMMAND_DST_RESET (0x1<<14) | ||
2240 | #define DMAE_COMMAND_DST_RESET_SHIFT 14 | ||
2241 | #define DMAE_COMMAND_E1HVN (0x3<<15) | ||
2242 | #define DMAE_COMMAND_E1HVN_SHIFT 15 | ||
2243 | #define DMAE_COMMAND_DST_VN (0x3<<17) | ||
2244 | #define DMAE_COMMAND_DST_VN_SHIFT 17 | ||
2245 | #define DMAE_COMMAND_C_FUNC (0x1<<19) | ||
2246 | #define DMAE_COMMAND_C_FUNC_SHIFT 19 | ||
2247 | #define DMAE_COMMAND_ERR_POLICY (0x3<<20) | ||
2248 | #define DMAE_COMMAND_ERR_POLICY_SHIFT 20 | ||
2249 | #define DMAE_COMMAND_RESERVED0 (0x3FF<<22) | ||
2250 | #define DMAE_COMMAND_RESERVED0_SHIFT 22 | ||
2251 | u32 src_addr_lo; | ||
2252 | u32 src_addr_hi; | ||
2253 | u32 dst_addr_lo; | ||
2254 | u32 dst_addr_hi; | ||
2255 | #if defined(__BIG_ENDIAN) | ||
2256 | u16 reserved1; | ||
2257 | u16 len; | ||
2258 | #elif defined(__LITTLE_ENDIAN) | ||
2259 | u16 len; | ||
2260 | u16 reserved1; | ||
2261 | #endif | ||
2262 | u32 comp_addr_lo; | ||
2263 | u32 comp_addr_hi; | ||
2264 | u32 comp_val; | ||
2265 | u32 crc32; | ||
2266 | u32 crc32_c; | ||
2267 | #if defined(__BIG_ENDIAN) | ||
2268 | u16 crc16_c; | ||
2269 | u16 crc16; | ||
2270 | #elif defined(__LITTLE_ENDIAN) | ||
2271 | u16 crc16; | ||
2272 | u16 crc16_c; | ||
2273 | #endif | ||
2274 | #if defined(__BIG_ENDIAN) | ||
2275 | u16 reserved3; | ||
2276 | u16 crc_t10; | ||
2277 | #elif defined(__LITTLE_ENDIAN) | ||
2278 | u16 crc_t10; | ||
2279 | u16 reserved3; | ||
2280 | #endif | ||
2281 | #if defined(__BIG_ENDIAN) | ||
2282 | u16 xsum8; | ||
2283 | u16 xsum16; | ||
2284 | #elif defined(__LITTLE_ENDIAN) | ||
2285 | u16 xsum16; | ||
2286 | u16 xsum8; | ||
2287 | #endif | ||
2288 | }; | 2954 | }; |
2289 | 2955 | ||
2290 | 2956 | ||
2291 | struct double_regpair { | 2957 | /* |
2292 | u32 regpair0_lo; | 2958 | * Parsing flags for over-ethernet protocol |
2293 | u32 regpair0_hi; | 2959 | */ |
2294 | u32 regpair1_lo; | 2960 | enum prs_flags_over_eth { |
2295 | u32 regpair1_hi; | 2961 | PRS_FLAG_OVERETH_UNKNOWN, |
2962 | PRS_FLAG_OVERETH_IPV4, | ||
2963 | PRS_FLAG_OVERETH_IPV6, | ||
2964 | PRS_FLAG_OVERETH_LLCSNAP_UNKNOWN, | ||
2965 | MAX_PRS_FLAGS_OVER_ETH | ||
2966 | }; | ||
2967 | |||
2968 | |||
2969 | /* | ||
2970 | * Parsing flags for over-IP protocol | ||
2971 | */ | ||
2972 | enum prs_flags_over_ip { | ||
2973 | PRS_FLAG_OVERIP_UNKNOWN, | ||
2974 | PRS_FLAG_OVERIP_TCP, | ||
2975 | PRS_FLAG_OVERIP_UDP, | ||
2976 | MAX_PRS_FLAGS_OVER_IP | ||
2296 | }; | 2977 | }; |
2297 | 2978 | ||
2298 | 2979 | ||
@@ -2313,54 +2994,23 @@ struct sdm_op_gen { | |||
2313 | #define SDM_OP_GEN_RESERVED_SHIFT 17 | 2994 | #define SDM_OP_GEN_RESERVED_SHIFT 17 |
2314 | }; | 2995 | }; |
2315 | 2996 | ||
2316 | /* | ||
2317 | * The eth Rx Buffer Descriptor | ||
2318 | */ | ||
2319 | struct eth_rx_bd { | ||
2320 | __le32 addr_lo; | ||
2321 | __le32 addr_hi; | ||
2322 | }; | ||
2323 | 2997 | ||
2324 | /* | 2998 | /* |
2325 | * The eth Rx SGE Descriptor | 2999 | * Timers connection context |
2326 | */ | ||
2327 | struct eth_rx_sge { | ||
2328 | __le32 addr_lo; | ||
2329 | __le32 addr_hi; | ||
2330 | }; | ||
2331 | |||
2332 | |||
2333 | |||
2334 | /* | ||
2335 | * The eth storm context of Ustorm | ||
2336 | */ | ||
2337 | struct ustorm_eth_st_context { | ||
2338 | u32 reserved0[48]; | ||
2339 | }; | ||
2340 | |||
2341 | /* | ||
2342 | * The eth storm context of Tstorm | ||
2343 | */ | 3000 | */ |
2344 | struct tstorm_eth_st_context { | 3001 | struct timers_block_context { |
2345 | u32 __reserved0[28]; | 3002 | u32 __reserved_0; |
3003 | u32 __reserved_1; | ||
3004 | u32 __reserved_2; | ||
3005 | u32 flags; | ||
3006 | #define __TIMERS_BLOCK_CONTEXT_NUM_OF_ACTIVE_TIMERS (0x3<<0) | ||
3007 | #define __TIMERS_BLOCK_CONTEXT_NUM_OF_ACTIVE_TIMERS_SHIFT 0 | ||
3008 | #define TIMERS_BLOCK_CONTEXT_CONN_VALID_FLG (0x1<<2) | ||
3009 | #define TIMERS_BLOCK_CONTEXT_CONN_VALID_FLG_SHIFT 2 | ||
3010 | #define __TIMERS_BLOCK_CONTEXT_RESERVED0 (0x1FFFFFFF<<3) | ||
3011 | #define __TIMERS_BLOCK_CONTEXT_RESERVED0_SHIFT 3 | ||
2346 | }; | 3012 | }; |
2347 | 3013 | ||
2348 | /* | ||
2349 | * The eth aggregative context of Xstorm | ||
2350 | */ | ||
2351 | struct xstorm_eth_ag_context { | ||
2352 | u32 reserved0; | ||
2353 | #if defined(__BIG_ENDIAN) | ||
2354 | u8 cdu_reserved; | ||
2355 | u8 reserved2; | ||
2356 | u16 reserved1; | ||
2357 | #elif defined(__LITTLE_ENDIAN) | ||
2358 | u16 reserved1; | ||
2359 | u8 reserved2; | ||
2360 | u8 cdu_reserved; | ||
2361 | #endif | ||
2362 | u32 reserved3[30]; | ||
2363 | }; | ||
2364 | 3014 | ||
2365 | /* | 3015 | /* |
2366 | * The eth aggregative context of Tstorm | 3016 | * The eth aggregative context of Tstorm |
@@ -2371,14 +3021,6 @@ struct tstorm_eth_ag_context { | |||
2371 | 3021 | ||
2372 | 3022 | ||
2373 | /* | 3023 | /* |
2374 | * The eth aggregative context of Cstorm | ||
2375 | */ | ||
2376 | struct cstorm_eth_ag_context { | ||
2377 | u32 __reserved0[10]; | ||
2378 | }; | ||
2379 | |||
2380 | |||
2381 | /* | ||
2382 | * The eth aggregative context of Ustorm | 3024 | * The eth aggregative context of Ustorm |
2383 | */ | 3025 | */ |
2384 | struct ustorm_eth_ag_context { | 3026 | struct ustorm_eth_ag_context { |
@@ -2395,229 +3037,81 @@ struct ustorm_eth_ag_context { | |||
2395 | u32 __reserved3[6]; | 3037 | u32 __reserved3[6]; |
2396 | }; | 3038 | }; |
2397 | 3039 | ||
2398 | /* | ||
2399 | * Timers connection context | ||
2400 | */ | ||
2401 | struct timers_block_context { | ||
2402 | u32 __reserved_0; | ||
2403 | u32 __reserved_1; | ||
2404 | u32 __reserved_2; | ||
2405 | u32 flags; | ||
2406 | #define __TIMERS_BLOCK_CONTEXT_NUM_OF_ACTIVE_TIMERS (0x3<<0) | ||
2407 | #define __TIMERS_BLOCK_CONTEXT_NUM_OF_ACTIVE_TIMERS_SHIFT 0 | ||
2408 | #define TIMERS_BLOCK_CONTEXT_CONN_VALID_FLG (0x1<<2) | ||
2409 | #define TIMERS_BLOCK_CONTEXT_CONN_VALID_FLG_SHIFT 2 | ||
2410 | #define __TIMERS_BLOCK_CONTEXT_RESERVED0 (0x1FFFFFFF<<3) | ||
2411 | #define __TIMERS_BLOCK_CONTEXT_RESERVED0_SHIFT 3 | ||
2412 | }; | ||
2413 | 3040 | ||
2414 | /* | 3041 | /* |
2415 | * structure for easy accessibility to assembler | 3042 | * The eth aggregative context of Xstorm |
2416 | */ | ||
2417 | struct eth_tx_bd_flags { | ||
2418 | u8 as_bitfield; | ||
2419 | #define ETH_TX_BD_FLAGS_IP_CSUM (0x1<<0) | ||
2420 | #define ETH_TX_BD_FLAGS_IP_CSUM_SHIFT 0 | ||
2421 | #define ETH_TX_BD_FLAGS_L4_CSUM (0x1<<1) | ||
2422 | #define ETH_TX_BD_FLAGS_L4_CSUM_SHIFT 1 | ||
2423 | #define ETH_TX_BD_FLAGS_VLAN_MODE (0x3<<2) | ||
2424 | #define ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT 2 | ||
2425 | #define ETH_TX_BD_FLAGS_START_BD (0x1<<4) | ||
2426 | #define ETH_TX_BD_FLAGS_START_BD_SHIFT 4 | ||
2427 | #define ETH_TX_BD_FLAGS_IS_UDP (0x1<<5) | ||
2428 | #define ETH_TX_BD_FLAGS_IS_UDP_SHIFT 5 | ||
2429 | #define ETH_TX_BD_FLAGS_SW_LSO (0x1<<6) | ||
2430 | #define ETH_TX_BD_FLAGS_SW_LSO_SHIFT 6 | ||
2431 | #define ETH_TX_BD_FLAGS_IPV6 (0x1<<7) | ||
2432 | #define ETH_TX_BD_FLAGS_IPV6_SHIFT 7 | ||
2433 | }; | ||
2434 | |||
2435 | /* | ||
2436 | * The eth Tx Buffer Descriptor | ||
2437 | */ | ||
2438 | struct eth_tx_start_bd { | ||
2439 | __le32 addr_lo; | ||
2440 | __le32 addr_hi; | ||
2441 | __le16 nbd; | ||
2442 | __le16 nbytes; | ||
2443 | __le16 vlan_or_ethertype; | ||
2444 | struct eth_tx_bd_flags bd_flags; | ||
2445 | u8 general_data; | ||
2446 | #define ETH_TX_START_BD_HDR_NBDS (0x3F<<0) | ||
2447 | #define ETH_TX_START_BD_HDR_NBDS_SHIFT 0 | ||
2448 | #define ETH_TX_START_BD_ETH_ADDR_TYPE (0x3<<6) | ||
2449 | #define ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT 6 | ||
2450 | }; | ||
2451 | |||
2452 | /* | ||
2453 | * Tx regular BD structure | ||
2454 | */ | ||
2455 | struct eth_tx_bd { | ||
2456 | __le32 addr_lo; | ||
2457 | __le32 addr_hi; | ||
2458 | __le16 total_pkt_bytes; | ||
2459 | __le16 nbytes; | ||
2460 | u8 reserved[4]; | ||
2461 | }; | ||
2462 | |||
2463 | /* | ||
2464 | * Tx parsing BD structure for ETH E1/E1h | ||
2465 | */ | ||
2466 | struct eth_tx_parse_bd_e1x { | ||
2467 | u8 global_data; | ||
2468 | #define ETH_TX_PARSE_BD_E1X_IP_HDR_START_OFFSET_W (0xF<<0) | ||
2469 | #define ETH_TX_PARSE_BD_E1X_IP_HDR_START_OFFSET_W_SHIFT 0 | ||
2470 | #define ETH_TX_PARSE_BD_E1X_RESERVED0 (0x1<<4) | ||
2471 | #define ETH_TX_PARSE_BD_E1X_RESERVED0_SHIFT 4 | ||
2472 | #define ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN (0x1<<5) | ||
2473 | #define ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN_SHIFT 5 | ||
2474 | #define ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN (0x1<<6) | ||
2475 | #define ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT 6 | ||
2476 | #define ETH_TX_PARSE_BD_E1X_NS_FLG (0x1<<7) | ||
2477 | #define ETH_TX_PARSE_BD_E1X_NS_FLG_SHIFT 7 | ||
2478 | u8 tcp_flags; | ||
2479 | #define ETH_TX_PARSE_BD_E1X_FIN_FLG (0x1<<0) | ||
2480 | #define ETH_TX_PARSE_BD_E1X_FIN_FLG_SHIFT 0 | ||
2481 | #define ETH_TX_PARSE_BD_E1X_SYN_FLG (0x1<<1) | ||
2482 | #define ETH_TX_PARSE_BD_E1X_SYN_FLG_SHIFT 1 | ||
2483 | #define ETH_TX_PARSE_BD_E1X_RST_FLG (0x1<<2) | ||
2484 | #define ETH_TX_PARSE_BD_E1X_RST_FLG_SHIFT 2 | ||
2485 | #define ETH_TX_PARSE_BD_E1X_PSH_FLG (0x1<<3) | ||
2486 | #define ETH_TX_PARSE_BD_E1X_PSH_FLG_SHIFT 3 | ||
2487 | #define ETH_TX_PARSE_BD_E1X_ACK_FLG (0x1<<4) | ||
2488 | #define ETH_TX_PARSE_BD_E1X_ACK_FLG_SHIFT 4 | ||
2489 | #define ETH_TX_PARSE_BD_E1X_URG_FLG (0x1<<5) | ||
2490 | #define ETH_TX_PARSE_BD_E1X_URG_FLG_SHIFT 5 | ||
2491 | #define ETH_TX_PARSE_BD_E1X_ECE_FLG (0x1<<6) | ||
2492 | #define ETH_TX_PARSE_BD_E1X_ECE_FLG_SHIFT 6 | ||
2493 | #define ETH_TX_PARSE_BD_E1X_CWR_FLG (0x1<<7) | ||
2494 | #define ETH_TX_PARSE_BD_E1X_CWR_FLG_SHIFT 7 | ||
2495 | u8 ip_hlen_w; | ||
2496 | s8 reserved; | ||
2497 | __le16 total_hlen_w; | ||
2498 | __le16 tcp_pseudo_csum; | ||
2499 | __le16 lso_mss; | ||
2500 | __le16 ip_id; | ||
2501 | __le32 tcp_send_seq; | ||
2502 | }; | ||
2503 | |||
2504 | /* | ||
2505 | * Tx parsing BD structure for ETH E2 | ||
2506 | */ | 3043 | */ |
2507 | struct eth_tx_parse_bd_e2 { | 3044 | struct xstorm_eth_ag_context { |
2508 | __le16 dst_mac_addr_lo; | 3045 | u32 reserved0; |
2509 | __le16 dst_mac_addr_mid; | 3046 | #if defined(__BIG_ENDIAN) |
2510 | __le16 dst_mac_addr_hi; | 3047 | u8 cdu_reserved; |
2511 | __le16 src_mac_addr_lo; | 3048 | u8 reserved2; |
2512 | __le16 src_mac_addr_mid; | 3049 | u16 reserved1; |
2513 | __le16 src_mac_addr_hi; | 3050 | #elif defined(__LITTLE_ENDIAN) |
2514 | __le32 parsing_data; | 3051 | u16 reserved1; |
2515 | #define ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W (0x1FFF<<0) | 3052 | u8 reserved2; |
2516 | #define ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT 0 | 3053 | u8 cdu_reserved; |
2517 | #define ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW (0xF<<13) | 3054 | #endif |
2518 | #define ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT 13 | 3055 | u32 reserved3[30]; |
2519 | #define ETH_TX_PARSE_BD_E2_LSO_MSS (0x3FFF<<17) | ||
2520 | #define ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT 17 | ||
2521 | #define ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR (0x1<<31) | ||
2522 | #define ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR_SHIFT 31 | ||
2523 | }; | 3056 | }; |
2524 | 3057 | ||
2525 | /* | ||
2526 | * The last BD in the BD memory will hold a pointer to the next BD memory | ||
2527 | */ | ||
2528 | struct eth_tx_next_bd { | ||
2529 | __le32 addr_lo; | ||
2530 | __le32 addr_hi; | ||
2531 | u8 reserved[8]; | ||
2532 | }; | ||
2533 | 3058 | ||
2534 | /* | 3059 | /* |
2535 | * union for 4 Bd types | 3060 | * doorbell message sent to the chip |
2536 | */ | 3061 | */ |
2537 | union eth_tx_bd_types { | 3062 | struct doorbell { |
2538 | struct eth_tx_start_bd start_bd; | 3063 | #if defined(__BIG_ENDIAN) |
2539 | struct eth_tx_bd reg_bd; | 3064 | u16 zero_fill2; |
2540 | struct eth_tx_parse_bd_e1x parse_bd_e1x; | 3065 | u8 zero_fill1; |
2541 | struct eth_tx_parse_bd_e2 parse_bd_e2; | 3066 | struct doorbell_hdr header; |
2542 | struct eth_tx_next_bd next_bd; | 3067 | #elif defined(__LITTLE_ENDIAN) |
3068 | struct doorbell_hdr header; | ||
3069 | u8 zero_fill1; | ||
3070 | u16 zero_fill2; | ||
3071 | #endif | ||
2543 | }; | 3072 | }; |
2544 | 3073 | ||
2545 | 3074 | ||
2546 | /* | 3075 | /* |
2547 | * The eth storm context of Xstorm | 3076 | * doorbell message sent to the chip |
2548 | */ | 3077 | */ |
2549 | struct xstorm_eth_st_context { | 3078 | struct doorbell_set_prod { |
2550 | u32 reserved0[60]; | 3079 | #if defined(__BIG_ENDIAN) |
3080 | u16 prod; | ||
3081 | u8 zero_fill1; | ||
3082 | struct doorbell_hdr header; | ||
3083 | #elif defined(__LITTLE_ENDIAN) | ||
3084 | struct doorbell_hdr header; | ||
3085 | u8 zero_fill1; | ||
3086 | u16 prod; | ||
3087 | #endif | ||
2551 | }; | 3088 | }; |
2552 | 3089 | ||
2553 | /* | ||
2554 | * The eth storm context of Cstorm | ||
2555 | */ | ||
2556 | struct cstorm_eth_st_context { | ||
2557 | u32 __reserved0[4]; | ||
2558 | }; | ||
2559 | 3090 | ||
2560 | /* | 3091 | struct regpair { |
2561 | * Ethernet connection context | 3092 | __le32 lo; |
2562 | */ | 3093 | __le32 hi; |
2563 | struct eth_context { | ||
2564 | struct ustorm_eth_st_context ustorm_st_context; | ||
2565 | struct tstorm_eth_st_context tstorm_st_context; | ||
2566 | struct xstorm_eth_ag_context xstorm_ag_context; | ||
2567 | struct tstorm_eth_ag_context tstorm_ag_context; | ||
2568 | struct cstorm_eth_ag_context cstorm_ag_context; | ||
2569 | struct ustorm_eth_ag_context ustorm_ag_context; | ||
2570 | struct timers_block_context timers_context; | ||
2571 | struct xstorm_eth_st_context xstorm_st_context; | ||
2572 | struct cstorm_eth_st_context cstorm_st_context; | ||
2573 | }; | 3094 | }; |
2574 | 3095 | ||
2575 | 3096 | ||
2576 | /* | 3097 | /* |
2577 | * Ethernet doorbell | 3098 | * Classify rule opcodes in E2/E3 |
2578 | */ | 3099 | */ |
2579 | struct eth_tx_doorbell { | 3100 | enum classify_rule { |
2580 | #if defined(__BIG_ENDIAN) | 3101 | CLASSIFY_RULE_OPCODE_MAC, |
2581 | u16 npackets; | 3102 | CLASSIFY_RULE_OPCODE_VLAN, |
2582 | u8 params; | 3103 | CLASSIFY_RULE_OPCODE_PAIR, |
2583 | #define ETH_TX_DOORBELL_NUM_BDS (0x3F<<0) | 3104 | MAX_CLASSIFY_RULE |
2584 | #define ETH_TX_DOORBELL_NUM_BDS_SHIFT 0 | ||
2585 | #define ETH_TX_DOORBELL_RESERVED_TX_FIN_FLAG (0x1<<6) | ||
2586 | #define ETH_TX_DOORBELL_RESERVED_TX_FIN_FLAG_SHIFT 6 | ||
2587 | #define ETH_TX_DOORBELL_SPARE (0x1<<7) | ||
2588 | #define ETH_TX_DOORBELL_SPARE_SHIFT 7 | ||
2589 | struct doorbell_hdr hdr; | ||
2590 | #elif defined(__LITTLE_ENDIAN) | ||
2591 | struct doorbell_hdr hdr; | ||
2592 | u8 params; | ||
2593 | #define ETH_TX_DOORBELL_NUM_BDS (0x3F<<0) | ||
2594 | #define ETH_TX_DOORBELL_NUM_BDS_SHIFT 0 | ||
2595 | #define ETH_TX_DOORBELL_RESERVED_TX_FIN_FLAG (0x1<<6) | ||
2596 | #define ETH_TX_DOORBELL_RESERVED_TX_FIN_FLAG_SHIFT 6 | ||
2597 | #define ETH_TX_DOORBELL_SPARE (0x1<<7) | ||
2598 | #define ETH_TX_DOORBELL_SPARE_SHIFT 7 | ||
2599 | u16 npackets; | ||
2600 | #endif | ||
2601 | }; | 3105 | }; |
2602 | 3106 | ||
2603 | 3107 | ||
2604 | /* | 3108 | /* |
2605 | * client init fc data | 3109 | * Classify rule types in E2/E3 |
2606 | */ | 3110 | */ |
2607 | struct client_init_fc_data { | 3111 | enum classify_rule_action_type { |
2608 | __le16 cqe_pause_thr_low; | 3112 | CLASSIFY_RULE_REMOVE, |
2609 | __le16 cqe_pause_thr_high; | 3113 | CLASSIFY_RULE_ADD, |
2610 | __le16 bd_pause_thr_low; | 3114 | MAX_CLASSIFY_RULE_ACTION_TYPE |
2611 | __le16 bd_pause_thr_high; | ||
2612 | __le16 sge_pause_thr_low; | ||
2613 | __le16 sge_pause_thr_high; | ||
2614 | __le16 rx_cos_mask; | ||
2615 | u8 safc_group_num; | ||
2616 | u8 safc_group_en_flg; | ||
2617 | u8 traffic_type; | ||
2618 | u8 reserved0; | ||
2619 | __le16 reserved1; | ||
2620 | __le32 reserved2; | ||
2621 | }; | 3115 | }; |
2622 | 3116 | ||
2623 | 3117 | ||
@@ -2631,8 +3125,12 @@ struct client_init_general_data { | |||
2631 | u8 is_fcoe_flg; | 3125 | u8 is_fcoe_flg; |
2632 | u8 activate_flg; | 3126 | u8 activate_flg; |
2633 | u8 sp_client_id; | 3127 | u8 sp_client_id; |
2634 | __le16 reserved0; | 3128 | __le16 mtu; |
2635 | __le32 reserved1[2]; | 3129 | u8 statistics_zero_flg; |
3130 | u8 func_id; | ||
3131 | u8 cos; | ||
3132 | u8 traffic_type; | ||
3133 | u32 reserved0; | ||
2636 | }; | 3134 | }; |
2637 | 3135 | ||
2638 | 3136 | ||
@@ -2640,7 +3138,13 @@ struct client_init_general_data { | |||
2640 | * client init rx data | 3138 | * client init rx data |
2641 | */ | 3139 | */ |
2642 | struct client_init_rx_data { | 3140 | struct client_init_rx_data { |
2643 | u8 tpa_en_flg; | 3141 | u8 tpa_en; |
3142 | #define CLIENT_INIT_RX_DATA_TPA_EN_IPV4 (0x1<<0) | ||
3143 | #define CLIENT_INIT_RX_DATA_TPA_EN_IPV4_SHIFT 0 | ||
3144 | #define CLIENT_INIT_RX_DATA_TPA_EN_IPV6 (0x1<<1) | ||
3145 | #define CLIENT_INIT_RX_DATA_TPA_EN_IPV6_SHIFT 1 | ||
3146 | #define CLIENT_INIT_RX_DATA_RESERVED5 (0x3F<<2) | ||
3147 | #define CLIENT_INIT_RX_DATA_RESERVED5_SHIFT 2 | ||
2644 | u8 vmqueue_mode_en_flg; | 3148 | u8 vmqueue_mode_en_flg; |
2645 | u8 extra_data_over_sgl_en_flg; | 3149 | u8 extra_data_over_sgl_en_flg; |
2646 | u8 cache_line_alignment_log_size; | 3150 | u8 cache_line_alignment_log_size; |
@@ -2655,17 +3159,46 @@ struct client_init_rx_data { | |||
2655 | u8 outer_vlan_removal_enable_flg; | 3159 | u8 outer_vlan_removal_enable_flg; |
2656 | u8 status_block_id; | 3160 | u8 status_block_id; |
2657 | u8 rx_sb_index_number; | 3161 | u8 rx_sb_index_number; |
2658 | u8 reserved0[3]; | 3162 | u8 reserved0; |
2659 | __le16 bd_buff_size; | 3163 | u8 max_tpa_queues; |
3164 | u8 silent_vlan_removal_flg; | ||
3165 | __le16 max_bytes_on_bd; | ||
2660 | __le16 sge_buff_size; | 3166 | __le16 sge_buff_size; |
2661 | __le16 mtu; | 3167 | u8 approx_mcast_engine_id; |
3168 | u8 rss_engine_id; | ||
2662 | struct regpair bd_page_base; | 3169 | struct regpair bd_page_base; |
2663 | struct regpair sge_page_base; | 3170 | struct regpair sge_page_base; |
2664 | struct regpair cqe_page_base; | 3171 | struct regpair cqe_page_base; |
2665 | u8 is_leading_rss; | 3172 | u8 is_leading_rss; |
2666 | u8 is_approx_mcast; | 3173 | u8 is_approx_mcast; |
2667 | __le16 max_agg_size; | 3174 | __le16 max_agg_size; |
2668 | __le32 reserved2[3]; | 3175 | __le16 state; |
3176 | #define CLIENT_INIT_RX_DATA_UCAST_DROP_ALL (0x1<<0) | ||
3177 | #define CLIENT_INIT_RX_DATA_UCAST_DROP_ALL_SHIFT 0 | ||
3178 | #define CLIENT_INIT_RX_DATA_UCAST_ACCEPT_ALL (0x1<<1) | ||
3179 | #define CLIENT_INIT_RX_DATA_UCAST_ACCEPT_ALL_SHIFT 1 | ||
3180 | #define CLIENT_INIT_RX_DATA_UCAST_ACCEPT_UNMATCHED (0x1<<2) | ||
3181 | #define CLIENT_INIT_RX_DATA_UCAST_ACCEPT_UNMATCHED_SHIFT 2 | ||
3182 | #define CLIENT_INIT_RX_DATA_MCAST_DROP_ALL (0x1<<3) | ||
3183 | #define CLIENT_INIT_RX_DATA_MCAST_DROP_ALL_SHIFT 3 | ||
3184 | #define CLIENT_INIT_RX_DATA_MCAST_ACCEPT_ALL (0x1<<4) | ||
3185 | #define CLIENT_INIT_RX_DATA_MCAST_ACCEPT_ALL_SHIFT 4 | ||
3186 | #define CLIENT_INIT_RX_DATA_BCAST_ACCEPT_ALL (0x1<<5) | ||
3187 | #define CLIENT_INIT_RX_DATA_BCAST_ACCEPT_ALL_SHIFT 5 | ||
3188 | #define CLIENT_INIT_RX_DATA_ACCEPT_ANY_VLAN (0x1<<6) | ||
3189 | #define CLIENT_INIT_RX_DATA_ACCEPT_ANY_VLAN_SHIFT 6 | ||
3190 | #define CLIENT_INIT_RX_DATA_RESERVED2 (0x1FF<<7) | ||
3191 | #define CLIENT_INIT_RX_DATA_RESERVED2_SHIFT 7 | ||
3192 | __le16 cqe_pause_thr_low; | ||
3193 | __le16 cqe_pause_thr_high; | ||
3194 | __le16 bd_pause_thr_low; | ||
3195 | __le16 bd_pause_thr_high; | ||
3196 | __le16 sge_pause_thr_low; | ||
3197 | __le16 sge_pause_thr_high; | ||
3198 | __le16 rx_cos_mask; | ||
3199 | __le16 silent_vlan_value; | ||
3200 | __le16 silent_vlan_mask; | ||
3201 | __le32 reserved6[2]; | ||
2669 | }; | 3202 | }; |
2670 | 3203 | ||
2671 | /* | 3204 | /* |
@@ -2675,11 +3208,25 @@ struct client_init_tx_data { | |||
2675 | u8 enforce_security_flg; | 3208 | u8 enforce_security_flg; |
2676 | u8 tx_status_block_id; | 3209 | u8 tx_status_block_id; |
2677 | u8 tx_sb_index_number; | 3210 | u8 tx_sb_index_number; |
2678 | u8 reserved0; | 3211 | u8 tss_leading_client_id; |
2679 | __le16 mtu; | 3212 | u8 tx_switching_flg; |
2680 | __le16 reserved1; | 3213 | u8 anti_spoofing_flg; |
3214 | __le16 default_vlan; | ||
2681 | struct regpair tx_bd_page_base; | 3215 | struct regpair tx_bd_page_base; |
2682 | __le32 reserved2[2]; | 3216 | __le16 state; |
3217 | #define CLIENT_INIT_TX_DATA_UCAST_ACCEPT_ALL (0x1<<0) | ||
3218 | #define CLIENT_INIT_TX_DATA_UCAST_ACCEPT_ALL_SHIFT 0 | ||
3219 | #define CLIENT_INIT_TX_DATA_MCAST_ACCEPT_ALL (0x1<<1) | ||
3220 | #define CLIENT_INIT_TX_DATA_MCAST_ACCEPT_ALL_SHIFT 1 | ||
3221 | #define CLIENT_INIT_TX_DATA_BCAST_ACCEPT_ALL (0x1<<2) | ||
3222 | #define CLIENT_INIT_TX_DATA_BCAST_ACCEPT_ALL_SHIFT 2 | ||
3223 | #define CLIENT_INIT_TX_DATA_ACCEPT_ANY_VLAN (0x1<<3) | ||
3224 | #define CLIENT_INIT_TX_DATA_ACCEPT_ANY_VLAN_SHIFT 3 | ||
3225 | #define CLIENT_INIT_TX_DATA_RESERVED1 (0xFFF<<4) | ||
3226 | #define CLIENT_INIT_TX_DATA_RESERVED1_SHIFT 4 | ||
3227 | u8 default_vlan_flg; | ||
3228 | u8 reserved2; | ||
3229 | __le32 reserved3; | ||
2683 | }; | 3230 | }; |
2684 | 3231 | ||
2685 | /* | 3232 | /* |
@@ -2689,7 +3236,146 @@ struct client_init_ramrod_data { | |||
2689 | struct client_init_general_data general; | 3236 | struct client_init_general_data general; |
2690 | struct client_init_rx_data rx; | 3237 | struct client_init_rx_data rx; |
2691 | struct client_init_tx_data tx; | 3238 | struct client_init_tx_data tx; |
2692 | struct client_init_fc_data fc; | 3239 | }; |
3240 | |||
3241 | |||
3242 | /* | ||
3243 | * client update ramrod data | ||
3244 | */ | ||
3245 | struct client_update_ramrod_data { | ||
3246 | u8 client_id; | ||
3247 | u8 func_id; | ||
3248 | u8 inner_vlan_removal_enable_flg; | ||
3249 | u8 inner_vlan_removal_change_flg; | ||
3250 | u8 outer_vlan_removal_enable_flg; | ||
3251 | u8 outer_vlan_removal_change_flg; | ||
3252 | u8 anti_spoofing_enable_flg; | ||
3253 | u8 anti_spoofing_change_flg; | ||
3254 | u8 activate_flg; | ||
3255 | u8 activate_change_flg; | ||
3256 | __le16 default_vlan; | ||
3257 | u8 default_vlan_enable_flg; | ||
3258 | u8 default_vlan_change_flg; | ||
3259 | __le16 silent_vlan_value; | ||
3260 | __le16 silent_vlan_mask; | ||
3261 | u8 silent_vlan_removal_flg; | ||
3262 | u8 silent_vlan_change_flg; | ||
3263 | __le32 echo; | ||
3264 | }; | ||
3265 | |||
3266 | |||
3267 | /* | ||
3268 | * The eth storm context of Cstorm | ||
3269 | */ | ||
3270 | struct cstorm_eth_st_context { | ||
3271 | u32 __reserved0[4]; | ||
3272 | }; | ||
3273 | |||
3274 | |||
3275 | struct double_regpair { | ||
3276 | u32 regpair0_lo; | ||
3277 | u32 regpair0_hi; | ||
3278 | u32 regpair1_lo; | ||
3279 | u32 regpair1_hi; | ||
3280 | }; | ||
3281 | |||
3282 | |||
3283 | /* | ||
3284 | * Ethernet address typesm used in ethernet tx BDs | ||
3285 | */ | ||
3286 | enum eth_addr_type { | ||
3287 | UNKNOWN_ADDRESS, | ||
3288 | UNICAST_ADDRESS, | ||
3289 | MULTICAST_ADDRESS, | ||
3290 | BROADCAST_ADDRESS, | ||
3291 | MAX_ETH_ADDR_TYPE | ||
3292 | }; | ||
3293 | |||
3294 | |||
3295 | /* | ||
3296 | * | ||
3297 | */ | ||
3298 | struct eth_classify_cmd_header { | ||
3299 | u8 cmd_general_data; | ||
3300 | #define ETH_CLASSIFY_CMD_HEADER_RX_CMD (0x1<<0) | ||
3301 | #define ETH_CLASSIFY_CMD_HEADER_RX_CMD_SHIFT 0 | ||
3302 | #define ETH_CLASSIFY_CMD_HEADER_TX_CMD (0x1<<1) | ||
3303 | #define ETH_CLASSIFY_CMD_HEADER_TX_CMD_SHIFT 1 | ||
3304 | #define ETH_CLASSIFY_CMD_HEADER_OPCODE (0x3<<2) | ||
3305 | #define ETH_CLASSIFY_CMD_HEADER_OPCODE_SHIFT 2 | ||
3306 | #define ETH_CLASSIFY_CMD_HEADER_IS_ADD (0x1<<4) | ||
3307 | #define ETH_CLASSIFY_CMD_HEADER_IS_ADD_SHIFT 4 | ||
3308 | #define ETH_CLASSIFY_CMD_HEADER_RESERVED0 (0x7<<5) | ||
3309 | #define ETH_CLASSIFY_CMD_HEADER_RESERVED0_SHIFT 5 | ||
3310 | u8 func_id; | ||
3311 | u8 client_id; | ||
3312 | u8 reserved1; | ||
3313 | }; | ||
3314 | |||
3315 | |||
3316 | /* | ||
3317 | * header for eth classification config ramrod | ||
3318 | */ | ||
3319 | struct eth_classify_header { | ||
3320 | u8 rule_cnt; | ||
3321 | u8 reserved0; | ||
3322 | __le16 reserved1; | ||
3323 | __le32 echo; | ||
3324 | }; | ||
3325 | |||
3326 | |||
3327 | /* | ||
3328 | * Command for adding/removing a MAC classification rule | ||
3329 | */ | ||
3330 | struct eth_classify_mac_cmd { | ||
3331 | struct eth_classify_cmd_header header; | ||
3332 | __le32 reserved0; | ||
3333 | __le16 mac_lsb; | ||
3334 | __le16 mac_mid; | ||
3335 | __le16 mac_msb; | ||
3336 | __le16 reserved1; | ||
3337 | }; | ||
3338 | |||
3339 | |||
3340 | /* | ||
3341 | * Command for adding/removing a MAC-VLAN pair classification rule | ||
3342 | */ | ||
3343 | struct eth_classify_pair_cmd { | ||
3344 | struct eth_classify_cmd_header header; | ||
3345 | __le32 reserved0; | ||
3346 | __le16 mac_lsb; | ||
3347 | __le16 mac_mid; | ||
3348 | __le16 mac_msb; | ||
3349 | __le16 vlan; | ||
3350 | }; | ||
3351 | |||
3352 | |||
3353 | /* | ||
3354 | * Command for adding/removing a VLAN classification rule | ||
3355 | */ | ||
3356 | struct eth_classify_vlan_cmd { | ||
3357 | struct eth_classify_cmd_header header; | ||
3358 | __le32 reserved0; | ||
3359 | __le32 reserved1; | ||
3360 | __le16 reserved2; | ||
3361 | __le16 vlan; | ||
3362 | }; | ||
3363 | |||
3364 | /* | ||
3365 | * union for eth classification rule | ||
3366 | */ | ||
3367 | union eth_classify_rule_cmd { | ||
3368 | struct eth_classify_mac_cmd mac; | ||
3369 | struct eth_classify_vlan_cmd vlan; | ||
3370 | struct eth_classify_pair_cmd pair; | ||
3371 | }; | ||
3372 | |||
3373 | /* | ||
3374 | * parameters for eth classification configuration ramrod | ||
3375 | */ | ||
3376 | struct eth_classify_rules_ramrod_data { | ||
3377 | struct eth_classify_header header; | ||
3378 | union eth_classify_rule_cmd rules[CLASSIFY_RULES_COUNT]; | ||
2693 | }; | 3379 | }; |
2694 | 3380 | ||
2695 | 3381 | ||
@@ -2697,8 +3383,45 @@ struct client_init_ramrod_data { | |||
2697 | * The data contain client ID need to the ramrod | 3383 | * The data contain client ID need to the ramrod |
2698 | */ | 3384 | */ |
2699 | struct eth_common_ramrod_data { | 3385 | struct eth_common_ramrod_data { |
2700 | u32 client_id; | 3386 | __le32 client_id; |
2701 | u32 reserved1; | 3387 | __le32 reserved1; |
3388 | }; | ||
3389 | |||
3390 | |||
3391 | /* | ||
3392 | * The eth storm context of Ustorm | ||
3393 | */ | ||
3394 | struct ustorm_eth_st_context { | ||
3395 | u32 reserved0[52]; | ||
3396 | }; | ||
3397 | |||
3398 | /* | ||
3399 | * The eth storm context of Tstorm | ||
3400 | */ | ||
3401 | struct tstorm_eth_st_context { | ||
3402 | u32 __reserved0[28]; | ||
3403 | }; | ||
3404 | |||
3405 | /* | ||
3406 | * The eth storm context of Xstorm | ||
3407 | */ | ||
3408 | struct xstorm_eth_st_context { | ||
3409 | u32 reserved0[60]; | ||
3410 | }; | ||
3411 | |||
3412 | /* | ||
3413 | * Ethernet connection context | ||
3414 | */ | ||
3415 | struct eth_context { | ||
3416 | struct ustorm_eth_st_context ustorm_st_context; | ||
3417 | struct tstorm_eth_st_context tstorm_st_context; | ||
3418 | struct xstorm_eth_ag_context xstorm_ag_context; | ||
3419 | struct tstorm_eth_ag_context tstorm_ag_context; | ||
3420 | struct cstorm_eth_ag_context cstorm_ag_context; | ||
3421 | struct ustorm_eth_ag_context ustorm_ag_context; | ||
3422 | struct timers_block_context timers_context; | ||
3423 | struct xstorm_eth_st_context xstorm_st_context; | ||
3424 | struct cstorm_eth_st_context cstorm_st_context; | ||
2702 | }; | 3425 | }; |
2703 | 3426 | ||
2704 | 3427 | ||
@@ -2711,24 +3434,47 @@ union eth_sgl_or_raw_data { | |||
2711 | }; | 3434 | }; |
2712 | 3435 | ||
2713 | /* | 3436 | /* |
3437 | * eth FP end aggregation CQE parameters struct | ||
3438 | */ | ||
3439 | struct eth_end_agg_rx_cqe { | ||
3440 | u8 type_error_flags; | ||
3441 | #define ETH_END_AGG_RX_CQE_TYPE (0x3<<0) | ||
3442 | #define ETH_END_AGG_RX_CQE_TYPE_SHIFT 0 | ||
3443 | #define ETH_END_AGG_RX_CQE_SGL_RAW_SEL (0x1<<2) | ||
3444 | #define ETH_END_AGG_RX_CQE_SGL_RAW_SEL_SHIFT 2 | ||
3445 | #define ETH_END_AGG_RX_CQE_RESERVED0 (0x1F<<3) | ||
3446 | #define ETH_END_AGG_RX_CQE_RESERVED0_SHIFT 3 | ||
3447 | u8 reserved1; | ||
3448 | u8 queue_index; | ||
3449 | u8 reserved2; | ||
3450 | __le32 timestamp_delta; | ||
3451 | __le16 num_of_coalesced_segs; | ||
3452 | __le16 pkt_len; | ||
3453 | u8 pure_ack_count; | ||
3454 | u8 reserved3; | ||
3455 | __le16 reserved4; | ||
3456 | union eth_sgl_or_raw_data sgl_or_raw_data; | ||
3457 | __le32 reserved5[8]; | ||
3458 | }; | ||
3459 | |||
3460 | |||
3461 | /* | ||
2714 | * regular eth FP CQE parameters struct | 3462 | * regular eth FP CQE parameters struct |
2715 | */ | 3463 | */ |
2716 | struct eth_fast_path_rx_cqe { | 3464 | struct eth_fast_path_rx_cqe { |
2717 | u8 type_error_flags; | 3465 | u8 type_error_flags; |
2718 | #define ETH_FAST_PATH_RX_CQE_TYPE (0x1<<0) | 3466 | #define ETH_FAST_PATH_RX_CQE_TYPE (0x3<<0) |
2719 | #define ETH_FAST_PATH_RX_CQE_TYPE_SHIFT 0 | 3467 | #define ETH_FAST_PATH_RX_CQE_TYPE_SHIFT 0 |
2720 | #define ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG (0x1<<1) | 3468 | #define ETH_FAST_PATH_RX_CQE_SGL_RAW_SEL (0x1<<2) |
2721 | #define ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG_SHIFT 1 | 3469 | #define ETH_FAST_PATH_RX_CQE_SGL_RAW_SEL_SHIFT 2 |
2722 | #define ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG (0x1<<2) | 3470 | #define ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG (0x1<<3) |
2723 | #define ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG_SHIFT 2 | 3471 | #define ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG_SHIFT 3 |
2724 | #define ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG (0x1<<3) | 3472 | #define ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG (0x1<<4) |
2725 | #define ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG_SHIFT 3 | 3473 | #define ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG_SHIFT 4 |
2726 | #define ETH_FAST_PATH_RX_CQE_START_FLG (0x1<<4) | 3474 | #define ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG (0x1<<5) |
2727 | #define ETH_FAST_PATH_RX_CQE_START_FLG_SHIFT 4 | 3475 | #define ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG_SHIFT 5 |
2728 | #define ETH_FAST_PATH_RX_CQE_END_FLG (0x1<<5) | 3476 | #define ETH_FAST_PATH_RX_CQE_RESERVED0 (0x3<<6) |
2729 | #define ETH_FAST_PATH_RX_CQE_END_FLG_SHIFT 5 | 3477 | #define ETH_FAST_PATH_RX_CQE_RESERVED0_SHIFT 6 |
2730 | #define ETH_FAST_PATH_RX_CQE_SGL_RAW_SEL (0x3<<6) | ||
2731 | #define ETH_FAST_PATH_RX_CQE_SGL_RAW_SEL_SHIFT 6 | ||
2732 | u8 status_flags; | 3478 | u8 status_flags; |
2733 | #define ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE (0x7<<0) | 3479 | #define ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE (0x7<<0) |
2734 | #define ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE_SHIFT 0 | 3480 | #define ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE_SHIFT 0 |
@@ -2742,39 +3488,108 @@ struct eth_fast_path_rx_cqe { | |||
2742 | #define ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG_SHIFT 6 | 3488 | #define ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG_SHIFT 6 |
2743 | #define ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG (0x1<<7) | 3489 | #define ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG (0x1<<7) |
2744 | #define ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG_SHIFT 7 | 3490 | #define ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG_SHIFT 7 |
2745 | u8 placement_offset; | ||
2746 | u8 queue_index; | 3491 | u8 queue_index; |
3492 | u8 placement_offset; | ||
2747 | __le32 rss_hash_result; | 3493 | __le32 rss_hash_result; |
2748 | __le16 vlan_tag; | 3494 | __le16 vlan_tag; |
2749 | __le16 pkt_len; | 3495 | __le16 pkt_len; |
2750 | __le16 len_on_bd; | 3496 | __le16 len_on_bd; |
2751 | struct parsing_flags pars_flags; | 3497 | struct parsing_flags pars_flags; |
2752 | union eth_sgl_or_raw_data sgl_or_raw_data; | 3498 | union eth_sgl_or_raw_data sgl_or_raw_data; |
3499 | __le32 reserved1[8]; | ||
2753 | }; | 3500 | }; |
2754 | 3501 | ||
2755 | 3502 | ||
2756 | /* | 3503 | /* |
2757 | * The data for RSS setup ramrod | 3504 | * Command for setting classification flags for a client |
3505 | */ | ||
3506 | struct eth_filter_rules_cmd { | ||
3507 | u8 cmd_general_data; | ||
3508 | #define ETH_FILTER_RULES_CMD_RX_CMD (0x1<<0) | ||
3509 | #define ETH_FILTER_RULES_CMD_RX_CMD_SHIFT 0 | ||
3510 | #define ETH_FILTER_RULES_CMD_TX_CMD (0x1<<1) | ||
3511 | #define ETH_FILTER_RULES_CMD_TX_CMD_SHIFT 1 | ||
3512 | #define ETH_FILTER_RULES_CMD_RESERVED0 (0x3F<<2) | ||
3513 | #define ETH_FILTER_RULES_CMD_RESERVED0_SHIFT 2 | ||
3514 | u8 func_id; | ||
3515 | u8 client_id; | ||
3516 | u8 reserved1; | ||
3517 | __le16 state; | ||
3518 | #define ETH_FILTER_RULES_CMD_UCAST_DROP_ALL (0x1<<0) | ||
3519 | #define ETH_FILTER_RULES_CMD_UCAST_DROP_ALL_SHIFT 0 | ||
3520 | #define ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL (0x1<<1) | ||
3521 | #define ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL_SHIFT 1 | ||
3522 | #define ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED (0x1<<2) | ||
3523 | #define ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED_SHIFT 2 | ||
3524 | #define ETH_FILTER_RULES_CMD_MCAST_DROP_ALL (0x1<<3) | ||
3525 | #define ETH_FILTER_RULES_CMD_MCAST_DROP_ALL_SHIFT 3 | ||
3526 | #define ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL (0x1<<4) | ||
3527 | #define ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL_SHIFT 4 | ||
3528 | #define ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL (0x1<<5) | ||
3529 | #define ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL_SHIFT 5 | ||
3530 | #define ETH_FILTER_RULES_CMD_ACCEPT_ANY_VLAN (0x1<<6) | ||
3531 | #define ETH_FILTER_RULES_CMD_ACCEPT_ANY_VLAN_SHIFT 6 | ||
3532 | #define ETH_FILTER_RULES_CMD_RESERVED2 (0x1FF<<7) | ||
3533 | #define ETH_FILTER_RULES_CMD_RESERVED2_SHIFT 7 | ||
3534 | __le16 reserved3; | ||
3535 | struct regpair reserved4; | ||
3536 | }; | ||
3537 | |||
3538 | |||
3539 | /* | ||
3540 | * parameters for eth classification filters ramrod | ||
3541 | */ | ||
3542 | struct eth_filter_rules_ramrod_data { | ||
3543 | struct eth_classify_header header; | ||
3544 | struct eth_filter_rules_cmd rules[FILTER_RULES_COUNT]; | ||
3545 | }; | ||
3546 | |||
3547 | |||
3548 | /* | ||
3549 | * parameters for eth classification configuration ramrod | ||
3550 | */ | ||
3551 | struct eth_general_rules_ramrod_data { | ||
3552 | struct eth_classify_header header; | ||
3553 | union eth_classify_rule_cmd rules[CLASSIFY_RULES_COUNT]; | ||
3554 | }; | ||
3555 | |||
3556 | |||
3557 | /* | ||
3558 | * The data for Halt ramrod | ||
2758 | */ | 3559 | */ |
2759 | struct eth_halt_ramrod_data { | 3560 | struct eth_halt_ramrod_data { |
2760 | u32 client_id; | 3561 | __le32 client_id; |
2761 | u32 reserved0; | 3562 | __le32 reserved0; |
2762 | }; | 3563 | }; |
2763 | 3564 | ||
3565 | |||
2764 | /* | 3566 | /* |
2765 | * The data for statistics query ramrod | 3567 | * Command for setting multicast classification for a client |
2766 | */ | 3568 | */ |
2767 | struct common_query_ramrod_data { | 3569 | struct eth_multicast_rules_cmd { |
2768 | #if defined(__BIG_ENDIAN) | 3570 | u8 cmd_general_data; |
2769 | u8 reserved0; | 3571 | #define ETH_MULTICAST_RULES_CMD_RX_CMD (0x1<<0) |
2770 | u8 collect_port; | 3572 | #define ETH_MULTICAST_RULES_CMD_RX_CMD_SHIFT 0 |
2771 | u16 drv_counter; | 3573 | #define ETH_MULTICAST_RULES_CMD_TX_CMD (0x1<<1) |
2772 | #elif defined(__LITTLE_ENDIAN) | 3574 | #define ETH_MULTICAST_RULES_CMD_TX_CMD_SHIFT 1 |
2773 | u16 drv_counter; | 3575 | #define ETH_MULTICAST_RULES_CMD_IS_ADD (0x1<<2) |
2774 | u8 collect_port; | 3576 | #define ETH_MULTICAST_RULES_CMD_IS_ADD_SHIFT 2 |
2775 | u8 reserved0; | 3577 | #define ETH_MULTICAST_RULES_CMD_RESERVED0 (0x1F<<3) |
2776 | #endif | 3578 | #define ETH_MULTICAST_RULES_CMD_RESERVED0_SHIFT 3 |
2777 | u32 ctr_id_vector; | 3579 | u8 func_id; |
3580 | u8 bin_id; | ||
3581 | u8 engine_id; | ||
3582 | __le32 reserved2; | ||
3583 | struct regpair reserved3; | ||
3584 | }; | ||
3585 | |||
3586 | |||
3587 | /* | ||
3588 | * parameters for multicast classification ramrod | ||
3589 | */ | ||
3590 | struct eth_multicast_rules_ramrod_data { | ||
3591 | struct eth_classify_header header; | ||
3592 | struct eth_multicast_rules_cmd rules[MULTICAST_RULES_COUNT]; | ||
2778 | }; | 3593 | }; |
2779 | 3594 | ||
2780 | 3595 | ||
@@ -2795,16 +3610,86 @@ union eth_ramrod_data { | |||
2795 | 3610 | ||
2796 | 3611 | ||
2797 | /* | 3612 | /* |
3613 | * RSS toeplitz hash type, as reported in CQE | ||
3614 | */ | ||
3615 | enum eth_rss_hash_type { | ||
3616 | DEFAULT_HASH_TYPE, | ||
3617 | IPV4_HASH_TYPE, | ||
3618 | TCP_IPV4_HASH_TYPE, | ||
3619 | IPV6_HASH_TYPE, | ||
3620 | TCP_IPV6_HASH_TYPE, | ||
3621 | VLAN_PRI_HASH_TYPE, | ||
3622 | E1HOV_PRI_HASH_TYPE, | ||
3623 | DSCP_HASH_TYPE, | ||
3624 | MAX_ETH_RSS_HASH_TYPE | ||
3625 | }; | ||
3626 | |||
3627 | |||
3628 | /* | ||
3629 | * Ethernet RSS mode | ||
3630 | */ | ||
3631 | enum eth_rss_mode { | ||
3632 | ETH_RSS_MODE_DISABLED, | ||
3633 | ETH_RSS_MODE_REGULAR, | ||
3634 | ETH_RSS_MODE_VLAN_PRI, | ||
3635 | ETH_RSS_MODE_E1HOV_PRI, | ||
3636 | ETH_RSS_MODE_IP_DSCP, | ||
3637 | MAX_ETH_RSS_MODE | ||
3638 | }; | ||
3639 | |||
3640 | |||
3641 | /* | ||
3642 | * parameters for RSS update ramrod (E2) | ||
3643 | */ | ||
3644 | struct eth_rss_update_ramrod_data { | ||
3645 | u8 rss_engine_id; | ||
3646 | u8 capabilities; | ||
3647 | #define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_CAPABILITY (0x1<<0) | ||
3648 | #define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_CAPABILITY_SHIFT 0 | ||
3649 | #define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY (0x1<<1) | ||
3650 | #define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY_SHIFT 1 | ||
3651 | #define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_UDP_CAPABILITY (0x1<<2) | ||
3652 | #define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_UDP_CAPABILITY_SHIFT 2 | ||
3653 | #define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY (0x1<<3) | ||
3654 | #define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY_SHIFT 3 | ||
3655 | #define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY (0x1<<4) | ||
3656 | #define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY_SHIFT 4 | ||
3657 | #define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY (0x1<<5) | ||
3658 | #define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY_SHIFT 5 | ||
3659 | #define ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY (0x1<<6) | ||
3660 | #define ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY_SHIFT 6 | ||
3661 | #define __ETH_RSS_UPDATE_RAMROD_DATA_RESERVED0 (0x1<<7) | ||
3662 | #define __ETH_RSS_UPDATE_RAMROD_DATA_RESERVED0_SHIFT 7 | ||
3663 | u8 rss_result_mask; | ||
3664 | u8 rss_mode; | ||
3665 | __le32 __reserved2; | ||
3666 | u8 indirection_table[T_ETH_INDIRECTION_TABLE_SIZE]; | ||
3667 | __le32 rss_key[T_ETH_RSS_KEY]; | ||
3668 | __le32 echo; | ||
3669 | __le32 reserved3; | ||
3670 | }; | ||
3671 | |||
3672 | |||
3673 | /* | ||
3674 | * The eth Rx Buffer Descriptor | ||
3675 | */ | ||
3676 | struct eth_rx_bd { | ||
3677 | __le32 addr_lo; | ||
3678 | __le32 addr_hi; | ||
3679 | }; | ||
3680 | |||
3681 | |||
3682 | /* | ||
2798 | * Eth Rx Cqe structure- general structure for ramrods | 3683 | * Eth Rx Cqe structure- general structure for ramrods |
2799 | */ | 3684 | */ |
2800 | struct common_ramrod_eth_rx_cqe { | 3685 | struct common_ramrod_eth_rx_cqe { |
2801 | u8 ramrod_type; | 3686 | u8 ramrod_type; |
2802 | #define COMMON_RAMROD_ETH_RX_CQE_TYPE (0x1<<0) | 3687 | #define COMMON_RAMROD_ETH_RX_CQE_TYPE (0x3<<0) |
2803 | #define COMMON_RAMROD_ETH_RX_CQE_TYPE_SHIFT 0 | 3688 | #define COMMON_RAMROD_ETH_RX_CQE_TYPE_SHIFT 0 |
2804 | #define COMMON_RAMROD_ETH_RX_CQE_ERROR (0x1<<1) | 3689 | #define COMMON_RAMROD_ETH_RX_CQE_ERROR (0x1<<2) |
2805 | #define COMMON_RAMROD_ETH_RX_CQE_ERROR_SHIFT 1 | 3690 | #define COMMON_RAMROD_ETH_RX_CQE_ERROR_SHIFT 2 |
2806 | #define COMMON_RAMROD_ETH_RX_CQE_RESERVED0 (0x3F<<2) | 3691 | #define COMMON_RAMROD_ETH_RX_CQE_RESERVED0 (0x1F<<3) |
2807 | #define COMMON_RAMROD_ETH_RX_CQE_RESERVED0_SHIFT 2 | 3692 | #define COMMON_RAMROD_ETH_RX_CQE_RESERVED0_SHIFT 3 |
2808 | u8 conn_type; | 3693 | u8 conn_type; |
2809 | __le16 reserved1; | 3694 | __le16 reserved1; |
2810 | __le32 conn_and_cmd_data; | 3695 | __le32 conn_and_cmd_data; |
@@ -2813,7 +3698,8 @@ struct common_ramrod_eth_rx_cqe { | |||
2813 | #define COMMON_RAMROD_ETH_RX_CQE_CMD_ID (0xFF<<24) | 3698 | #define COMMON_RAMROD_ETH_RX_CQE_CMD_ID (0xFF<<24) |
2814 | #define COMMON_RAMROD_ETH_RX_CQE_CMD_ID_SHIFT 24 | 3699 | #define COMMON_RAMROD_ETH_RX_CQE_CMD_ID_SHIFT 24 |
2815 | struct ramrod_data protocol_data; | 3700 | struct ramrod_data protocol_data; |
2816 | __le32 reserved2[4]; | 3701 | __le32 echo; |
3702 | __le32 reserved2[11]; | ||
2817 | }; | 3703 | }; |
2818 | 3704 | ||
2819 | /* | 3705 | /* |
@@ -2822,7 +3708,7 @@ struct common_ramrod_eth_rx_cqe { | |||
2822 | struct eth_rx_cqe_next_page { | 3708 | struct eth_rx_cqe_next_page { |
2823 | __le32 addr_lo; | 3709 | __le32 addr_lo; |
2824 | __le32 addr_hi; | 3710 | __le32 addr_hi; |
2825 | __le32 reserved[6]; | 3711 | __le32 reserved[14]; |
2826 | }; | 3712 | }; |
2827 | 3713 | ||
2828 | /* | 3714 | /* |
@@ -2832,6 +3718,38 @@ union eth_rx_cqe { | |||
2832 | struct eth_fast_path_rx_cqe fast_path_cqe; | 3718 | struct eth_fast_path_rx_cqe fast_path_cqe; |
2833 | struct common_ramrod_eth_rx_cqe ramrod_cqe; | 3719 | struct common_ramrod_eth_rx_cqe ramrod_cqe; |
2834 | struct eth_rx_cqe_next_page next_page_cqe; | 3720 | struct eth_rx_cqe_next_page next_page_cqe; |
3721 | struct eth_end_agg_rx_cqe end_agg_cqe; | ||
3722 | }; | ||
3723 | |||
3724 | |||
3725 | /* | ||
3726 | * Values for RX ETH CQE type field | ||
3727 | */ | ||
3728 | enum eth_rx_cqe_type { | ||
3729 | RX_ETH_CQE_TYPE_ETH_FASTPATH, | ||
3730 | RX_ETH_CQE_TYPE_ETH_RAMROD, | ||
3731 | RX_ETH_CQE_TYPE_ETH_START_AGG, | ||
3732 | RX_ETH_CQE_TYPE_ETH_STOP_AGG, | ||
3733 | MAX_ETH_RX_CQE_TYPE | ||
3734 | }; | ||
3735 | |||
3736 | |||
3737 | /* | ||
3738 | * Type of SGL/Raw field in ETH RX fast path CQE | ||
3739 | */ | ||
3740 | enum eth_rx_fp_sel { | ||
3741 | ETH_FP_CQE_REGULAR, | ||
3742 | ETH_FP_CQE_RAW, | ||
3743 | MAX_ETH_RX_FP_SEL | ||
3744 | }; | ||
3745 | |||
3746 | |||
3747 | /* | ||
3748 | * The eth Rx SGE Descriptor | ||
3749 | */ | ||
3750 | struct eth_rx_sge { | ||
3751 | __le32 addr_lo; | ||
3752 | __le32 addr_hi; | ||
2835 | }; | 3753 | }; |
2836 | 3754 | ||
2837 | 3755 | ||
@@ -2853,14 +3771,18 @@ struct spe_hdr { | |||
2853 | }; | 3771 | }; |
2854 | 3772 | ||
2855 | /* | 3773 | /* |
2856 | * Ethernet slow path element | 3774 | * specific data for ethernet slow path element |
2857 | */ | 3775 | */ |
2858 | union eth_specific_data { | 3776 | union eth_specific_data { |
2859 | u8 protocol_data[8]; | 3777 | u8 protocol_data[8]; |
3778 | struct regpair client_update_ramrod_data; | ||
2860 | struct regpair client_init_ramrod_init_data; | 3779 | struct regpair client_init_ramrod_init_data; |
2861 | struct eth_halt_ramrod_data halt_ramrod_data; | 3780 | struct eth_halt_ramrod_data halt_ramrod_data; |
2862 | struct regpair update_data_addr; | 3781 | struct regpair update_data_addr; |
2863 | struct eth_common_ramrod_data common_ramrod_data; | 3782 | struct eth_common_ramrod_data common_ramrod_data; |
3783 | struct regpair classify_cfg_addr; | ||
3784 | struct regpair filter_cfg_addr; | ||
3785 | struct regpair mcast_cfg_addr; | ||
2864 | }; | 3786 | }; |
2865 | 3787 | ||
2866 | /* | 3788 | /* |
@@ -2873,94 +3795,202 @@ struct eth_spe { | |||
2873 | 3795 | ||
2874 | 3796 | ||
2875 | /* | 3797 | /* |
2876 | * array of 13 bds as appears in the eth xstorm context | 3798 | * Ethernet command ID for slow path elements |
2877 | */ | 3799 | */ |
2878 | struct eth_tx_bds_array { | 3800 | enum eth_spqe_cmd_id { |
2879 | union eth_tx_bd_types bds[13]; | 3801 | RAMROD_CMD_ID_ETH_UNUSED, |
3802 | RAMROD_CMD_ID_ETH_CLIENT_SETUP, | ||
3803 | RAMROD_CMD_ID_ETH_HALT, | ||
3804 | RAMROD_CMD_ID_ETH_FORWARD_SETUP, | ||
3805 | RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP, | ||
3806 | RAMROD_CMD_ID_ETH_CLIENT_UPDATE, | ||
3807 | RAMROD_CMD_ID_ETH_EMPTY, | ||
3808 | RAMROD_CMD_ID_ETH_TERMINATE, | ||
3809 | RAMROD_CMD_ID_ETH_TPA_UPDATE, | ||
3810 | RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES, | ||
3811 | RAMROD_CMD_ID_ETH_FILTER_RULES, | ||
3812 | RAMROD_CMD_ID_ETH_MULTICAST_RULES, | ||
3813 | RAMROD_CMD_ID_ETH_RSS_UPDATE, | ||
3814 | RAMROD_CMD_ID_ETH_SET_MAC, | ||
3815 | MAX_ETH_SPQE_CMD_ID | ||
2880 | }; | 3816 | }; |
2881 | 3817 | ||
2882 | 3818 | ||
2883 | /* | 3819 | /* |
2884 | * Common configuration parameters per function in Tstorm | 3820 | * eth tpa update command |
2885 | */ | 3821 | */ |
2886 | struct tstorm_eth_function_common_config { | 3822 | enum eth_tpa_update_command { |
2887 | #if defined(__BIG_ENDIAN) | 3823 | TPA_UPDATE_NONE_COMMAND, |
2888 | u8 reserved1; | 3824 | TPA_UPDATE_ENABLE_COMMAND, |
2889 | u8 rss_result_mask; | 3825 | TPA_UPDATE_DISABLE_COMMAND, |
2890 | u16 config_flags; | 3826 | MAX_ETH_TPA_UPDATE_COMMAND |
2891 | #define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY (0x1<<0) | ||
2892 | #define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY_SHIFT 0 | ||
2893 | #define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_TCP_CAPABILITY (0x1<<1) | ||
2894 | #define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_TCP_CAPABILITY_SHIFT 1 | ||
2895 | #define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_CAPABILITY (0x1<<2) | ||
2896 | #define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_CAPABILITY_SHIFT 2 | ||
2897 | #define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_TCP_CAPABILITY (0x1<<3) | ||
2898 | #define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_TCP_CAPABILITY_SHIFT 3 | ||
2899 | #define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE (0x7<<4) | ||
2900 | #define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE_SHIFT 4 | ||
2901 | #define TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA (0x1<<7) | ||
2902 | #define TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA_SHIFT 7 | ||
2903 | #define TSTORM_ETH_FUNCTION_COMMON_CONFIG_VLAN_FILTERING_ENABLE (0x1<<8) | ||
2904 | #define TSTORM_ETH_FUNCTION_COMMON_CONFIG_VLAN_FILTERING_ENABLE_SHIFT 8 | ||
2905 | #define __TSTORM_ETH_FUNCTION_COMMON_CONFIG_RESERVED0 (0x7F<<9) | ||
2906 | #define __TSTORM_ETH_FUNCTION_COMMON_CONFIG_RESERVED0_SHIFT 9 | ||
2907 | #elif defined(__LITTLE_ENDIAN) | ||
2908 | u16 config_flags; | ||
2909 | #define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY (0x1<<0) | ||
2910 | #define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY_SHIFT 0 | ||
2911 | #define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_TCP_CAPABILITY (0x1<<1) | ||
2912 | #define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_TCP_CAPABILITY_SHIFT 1 | ||
2913 | #define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_CAPABILITY (0x1<<2) | ||
2914 | #define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_CAPABILITY_SHIFT 2 | ||
2915 | #define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_TCP_CAPABILITY (0x1<<3) | ||
2916 | #define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_TCP_CAPABILITY_SHIFT 3 | ||
2917 | #define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE (0x7<<4) | ||
2918 | #define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE_SHIFT 4 | ||
2919 | #define TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA (0x1<<7) | ||
2920 | #define TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA_SHIFT 7 | ||
2921 | #define TSTORM_ETH_FUNCTION_COMMON_CONFIG_VLAN_FILTERING_ENABLE (0x1<<8) | ||
2922 | #define TSTORM_ETH_FUNCTION_COMMON_CONFIG_VLAN_FILTERING_ENABLE_SHIFT 8 | ||
2923 | #define __TSTORM_ETH_FUNCTION_COMMON_CONFIG_RESERVED0 (0x7F<<9) | ||
2924 | #define __TSTORM_ETH_FUNCTION_COMMON_CONFIG_RESERVED0_SHIFT 9 | ||
2925 | u8 rss_result_mask; | ||
2926 | u8 reserved1; | ||
2927 | #endif | ||
2928 | u16 vlan_id[2]; | ||
2929 | }; | 3827 | }; |
2930 | 3828 | ||
3829 | |||
2931 | /* | 3830 | /* |
2932 | * RSS idirection table update configuration | 3831 | * Tx regular BD structure |
2933 | */ | 3832 | */ |
2934 | struct rss_update_config { | 3833 | struct eth_tx_bd { |
2935 | #if defined(__BIG_ENDIAN) | 3834 | __le32 addr_lo; |
2936 | u16 toe_rss_bitmap; | 3835 | __le32 addr_hi; |
2937 | u16 flags; | 3836 | __le16 total_pkt_bytes; |
2938 | #define RSS_UPDATE_CONFIG_ETH_UPDATE_ENABLE (0x1<<0) | 3837 | __le16 nbytes; |
2939 | #define RSS_UPDATE_CONFIG_ETH_UPDATE_ENABLE_SHIFT 0 | 3838 | u8 reserved[4]; |
2940 | #define RSS_UPDATE_CONFIG_TOE_UPDATE_ENABLE (0x1<<1) | 3839 | }; |
2941 | #define RSS_UPDATE_CONFIG_TOE_UPDATE_ENABLE_SHIFT 1 | 3840 | |
2942 | #define __RSS_UPDATE_CONFIG_RESERVED0 (0x3FFF<<2) | 3841 | |
2943 | #define __RSS_UPDATE_CONFIG_RESERVED0_SHIFT 2 | 3842 | /* |
2944 | #elif defined(__LITTLE_ENDIAN) | 3843 | * structure for easy accessibility to assembler |
2945 | u16 flags; | 3844 | */ |
2946 | #define RSS_UPDATE_CONFIG_ETH_UPDATE_ENABLE (0x1<<0) | 3845 | struct eth_tx_bd_flags { |
2947 | #define RSS_UPDATE_CONFIG_ETH_UPDATE_ENABLE_SHIFT 0 | 3846 | u8 as_bitfield; |
2948 | #define RSS_UPDATE_CONFIG_TOE_UPDATE_ENABLE (0x1<<1) | 3847 | #define ETH_TX_BD_FLAGS_IP_CSUM (0x1<<0) |
2949 | #define RSS_UPDATE_CONFIG_TOE_UPDATE_ENABLE_SHIFT 1 | 3848 | #define ETH_TX_BD_FLAGS_IP_CSUM_SHIFT 0 |
2950 | #define __RSS_UPDATE_CONFIG_RESERVED0 (0x3FFF<<2) | 3849 | #define ETH_TX_BD_FLAGS_L4_CSUM (0x1<<1) |
2951 | #define __RSS_UPDATE_CONFIG_RESERVED0_SHIFT 2 | 3850 | #define ETH_TX_BD_FLAGS_L4_CSUM_SHIFT 1 |
2952 | u16 toe_rss_bitmap; | 3851 | #define ETH_TX_BD_FLAGS_VLAN_MODE (0x3<<2) |
2953 | #endif | 3852 | #define ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT 2 |
2954 | u32 reserved1; | 3853 | #define ETH_TX_BD_FLAGS_START_BD (0x1<<4) |
3854 | #define ETH_TX_BD_FLAGS_START_BD_SHIFT 4 | ||
3855 | #define ETH_TX_BD_FLAGS_IS_UDP (0x1<<5) | ||
3856 | #define ETH_TX_BD_FLAGS_IS_UDP_SHIFT 5 | ||
3857 | #define ETH_TX_BD_FLAGS_SW_LSO (0x1<<6) | ||
3858 | #define ETH_TX_BD_FLAGS_SW_LSO_SHIFT 6 | ||
3859 | #define ETH_TX_BD_FLAGS_IPV6 (0x1<<7) | ||
3860 | #define ETH_TX_BD_FLAGS_IPV6_SHIFT 7 | ||
3861 | }; | ||
3862 | |||
3863 | /* | ||
3864 | * The eth Tx Buffer Descriptor | ||
3865 | */ | ||
3866 | struct eth_tx_start_bd { | ||
3867 | __le32 addr_lo; | ||
3868 | __le32 addr_hi; | ||
3869 | __le16 nbd; | ||
3870 | __le16 nbytes; | ||
3871 | __le16 vlan_or_ethertype; | ||
3872 | struct eth_tx_bd_flags bd_flags; | ||
3873 | u8 general_data; | ||
3874 | #define ETH_TX_START_BD_HDR_NBDS (0xF<<0) | ||
3875 | #define ETH_TX_START_BD_HDR_NBDS_SHIFT 0 | ||
3876 | #define ETH_TX_START_BD_FORCE_VLAN_MODE (0x1<<4) | ||
3877 | #define ETH_TX_START_BD_FORCE_VLAN_MODE_SHIFT 4 | ||
3878 | #define ETH_TX_START_BD_RESREVED (0x1<<5) | ||
3879 | #define ETH_TX_START_BD_RESREVED_SHIFT 5 | ||
3880 | #define ETH_TX_START_BD_ETH_ADDR_TYPE (0x3<<6) | ||
3881 | #define ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT 6 | ||
3882 | }; | ||
3883 | |||
3884 | /* | ||
3885 | * Tx parsing BD structure for ETH E1/E1h | ||
3886 | */ | ||
3887 | struct eth_tx_parse_bd_e1x { | ||
3888 | u8 global_data; | ||
3889 | #define ETH_TX_PARSE_BD_E1X_IP_HDR_START_OFFSET_W (0xF<<0) | ||
3890 | #define ETH_TX_PARSE_BD_E1X_IP_HDR_START_OFFSET_W_SHIFT 0 | ||
3891 | #define ETH_TX_PARSE_BD_E1X_RESERVED0 (0x1<<4) | ||
3892 | #define ETH_TX_PARSE_BD_E1X_RESERVED0_SHIFT 4 | ||
3893 | #define ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN (0x1<<5) | ||
3894 | #define ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN_SHIFT 5 | ||
3895 | #define ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN (0x1<<6) | ||
3896 | #define ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT 6 | ||
3897 | #define ETH_TX_PARSE_BD_E1X_NS_FLG (0x1<<7) | ||
3898 | #define ETH_TX_PARSE_BD_E1X_NS_FLG_SHIFT 7 | ||
3899 | u8 tcp_flags; | ||
3900 | #define ETH_TX_PARSE_BD_E1X_FIN_FLG (0x1<<0) | ||
3901 | #define ETH_TX_PARSE_BD_E1X_FIN_FLG_SHIFT 0 | ||
3902 | #define ETH_TX_PARSE_BD_E1X_SYN_FLG (0x1<<1) | ||
3903 | #define ETH_TX_PARSE_BD_E1X_SYN_FLG_SHIFT 1 | ||
3904 | #define ETH_TX_PARSE_BD_E1X_RST_FLG (0x1<<2) | ||
3905 | #define ETH_TX_PARSE_BD_E1X_RST_FLG_SHIFT 2 | ||
3906 | #define ETH_TX_PARSE_BD_E1X_PSH_FLG (0x1<<3) | ||
3907 | #define ETH_TX_PARSE_BD_E1X_PSH_FLG_SHIFT 3 | ||
3908 | #define ETH_TX_PARSE_BD_E1X_ACK_FLG (0x1<<4) | ||
3909 | #define ETH_TX_PARSE_BD_E1X_ACK_FLG_SHIFT 4 | ||
3910 | #define ETH_TX_PARSE_BD_E1X_URG_FLG (0x1<<5) | ||
3911 | #define ETH_TX_PARSE_BD_E1X_URG_FLG_SHIFT 5 | ||
3912 | #define ETH_TX_PARSE_BD_E1X_ECE_FLG (0x1<<6) | ||
3913 | #define ETH_TX_PARSE_BD_E1X_ECE_FLG_SHIFT 6 | ||
3914 | #define ETH_TX_PARSE_BD_E1X_CWR_FLG (0x1<<7) | ||
3915 | #define ETH_TX_PARSE_BD_E1X_CWR_FLG_SHIFT 7 | ||
3916 | u8 ip_hlen_w; | ||
3917 | s8 reserved; | ||
3918 | __le16 total_hlen_w; | ||
3919 | __le16 tcp_pseudo_csum; | ||
3920 | __le16 lso_mss; | ||
3921 | __le16 ip_id; | ||
3922 | __le32 tcp_send_seq; | ||
3923 | }; | ||
3924 | |||
3925 | /* | ||
3926 | * Tx parsing BD structure for ETH E2 | ||
3927 | */ | ||
3928 | struct eth_tx_parse_bd_e2 { | ||
3929 | __le16 dst_mac_addr_lo; | ||
3930 | __le16 dst_mac_addr_mid; | ||
3931 | __le16 dst_mac_addr_hi; | ||
3932 | __le16 src_mac_addr_lo; | ||
3933 | __le16 src_mac_addr_mid; | ||
3934 | __le16 src_mac_addr_hi; | ||
3935 | __le32 parsing_data; | ||
3936 | #define ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W (0x1FFF<<0) | ||
3937 | #define ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT 0 | ||
3938 | #define ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW (0xF<<13) | ||
3939 | #define ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT 13 | ||
3940 | #define ETH_TX_PARSE_BD_E2_LSO_MSS (0x3FFF<<17) | ||
3941 | #define ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT 17 | ||
3942 | #define ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR (0x1<<31) | ||
3943 | #define ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR_SHIFT 31 | ||
3944 | }; | ||
3945 | |||
3946 | /* | ||
3947 | * The last BD in the BD memory will hold a pointer to the next BD memory | ||
3948 | */ | ||
3949 | struct eth_tx_next_bd { | ||
3950 | __le32 addr_lo; | ||
3951 | __le32 addr_hi; | ||
3952 | u8 reserved[8]; | ||
3953 | }; | ||
3954 | |||
3955 | /* | ||
3956 | * union for 4 Bd types | ||
3957 | */ | ||
3958 | union eth_tx_bd_types { | ||
3959 | struct eth_tx_start_bd start_bd; | ||
3960 | struct eth_tx_bd reg_bd; | ||
3961 | struct eth_tx_parse_bd_e1x parse_bd_e1x; | ||
3962 | struct eth_tx_parse_bd_e2 parse_bd_e2; | ||
3963 | struct eth_tx_next_bd next_bd; | ||
3964 | }; | ||
3965 | |||
3966 | /* | ||
3967 | * array of 13 bds as appears in the eth xstorm context | ||
3968 | */ | ||
3969 | struct eth_tx_bds_array { | ||
3970 | union eth_tx_bd_types bds[13]; | ||
2955 | }; | 3971 | }; |
2956 | 3972 | ||
3973 | |||
2957 | /* | 3974 | /* |
2958 | * parameters for eth update ramrod | 3975 | * VLAN mode on TX BDs |
2959 | */ | 3976 | */ |
2960 | struct eth_update_ramrod_data { | 3977 | enum eth_tx_vlan_type { |
2961 | struct tstorm_eth_function_common_config func_config; | 3978 | X_ETH_NO_VLAN, |
2962 | u8 indirectionTable[128]; | 3979 | X_ETH_OUTBAND_VLAN, |
2963 | struct rss_update_config rss_config; | 3980 | X_ETH_INBAND_VLAN, |
3981 | X_ETH_FW_ADDED_VLAN, | ||
3982 | MAX_ETH_TX_VLAN_TYPE | ||
3983 | }; | ||
3984 | |||
3985 | |||
3986 | /* | ||
3987 | * Ethernet VLAN filtering mode in E1x | ||
3988 | */ | ||
3989 | enum eth_vlan_filter_mode { | ||
3990 | ETH_VLAN_FILTER_ANY_VLAN, | ||
3991 | ETH_VLAN_FILTER_SPECIFIC_VLAN, | ||
3992 | ETH_VLAN_FILTER_CLASSIFY, | ||
3993 | MAX_ETH_VLAN_FILTER_MODE | ||
2964 | }; | 3994 | }; |
2965 | 3995 | ||
2966 | 3996 | ||
@@ -2970,9 +4000,8 @@ struct eth_update_ramrod_data { | |||
2970 | struct mac_configuration_hdr { | 4000 | struct mac_configuration_hdr { |
2971 | u8 length; | 4001 | u8 length; |
2972 | u8 offset; | 4002 | u8 offset; |
2973 | u16 client_id; | 4003 | __le16 client_id; |
2974 | u16 echo; | 4004 | __le32 echo; |
2975 | u16 reserved1; | ||
2976 | }; | 4005 | }; |
2977 | 4006 | ||
2978 | /* | 4007 | /* |
@@ -2997,8 +4026,8 @@ struct mac_configuration_entry { | |||
2997 | #define MAC_CONFIGURATION_ENTRY_BROADCAST_SHIFT 5 | 4026 | #define MAC_CONFIGURATION_ENTRY_BROADCAST_SHIFT 5 |
2998 | #define MAC_CONFIGURATION_ENTRY_RESERVED1 (0x3<<6) | 4027 | #define MAC_CONFIGURATION_ENTRY_RESERVED1 (0x3<<6) |
2999 | #define MAC_CONFIGURATION_ENTRY_RESERVED1_SHIFT 6 | 4028 | #define MAC_CONFIGURATION_ENTRY_RESERVED1_SHIFT 6 |
3000 | u16 reserved0; | 4029 | __le16 reserved0; |
3001 | u32 clients_bit_vector; | 4030 | __le32 clients_bit_vector; |
3002 | }; | 4031 | }; |
3003 | 4032 | ||
3004 | /* | 4033 | /* |
@@ -3011,6 +4040,36 @@ struct mac_configuration_cmd { | |||
3011 | 4040 | ||
3012 | 4041 | ||
3013 | /* | 4042 | /* |
4043 | * Set-MAC command type (in E1x) | ||
4044 | */ | ||
4045 | enum set_mac_action_type { | ||
4046 | T_ETH_MAC_COMMAND_INVALIDATE, | ||
4047 | T_ETH_MAC_COMMAND_SET, | ||
4048 | MAX_SET_MAC_ACTION_TYPE | ||
4049 | }; | ||
4050 | |||
4051 | |||
4052 | /* | ||
4053 | * tpa update ramrod data | ||
4054 | */ | ||
4055 | struct tpa_update_ramrod_data { | ||
4056 | u8 update_ipv4; | ||
4057 | u8 update_ipv6; | ||
4058 | u8 client_id; | ||
4059 | u8 max_tpa_queues; | ||
4060 | u8 max_sges_for_packet; | ||
4061 | u8 complete_on_both_clients; | ||
4062 | __le16 reserved1; | ||
4063 | __le16 sge_buff_size; | ||
4064 | __le16 max_agg_size; | ||
4065 | __le32 sge_page_base_lo; | ||
4066 | __le32 sge_page_base_hi; | ||
4067 | __le16 sge_pause_thr_low; | ||
4068 | __le16 sge_pause_thr_high; | ||
4069 | }; | ||
4070 | |||
4071 | |||
4072 | /* | ||
3014 | * approximate-match multicast filtering for E1H per function in Tstorm | 4073 | * approximate-match multicast filtering for E1H per function in Tstorm |
3015 | */ | 4074 | */ |
3016 | struct tstorm_eth_approximate_match_multicast_filtering { | 4075 | struct tstorm_eth_approximate_match_multicast_filtering { |
@@ -3019,35 +4078,50 @@ struct tstorm_eth_approximate_match_multicast_filtering { | |||
3019 | 4078 | ||
3020 | 4079 | ||
3021 | /* | 4080 | /* |
4081 | * Common configuration parameters per function in Tstorm | ||
4082 | */ | ||
4083 | struct tstorm_eth_function_common_config { | ||
4084 | __le16 config_flags; | ||
4085 | #define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY (0x1<<0) | ||
4086 | #define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY_SHIFT 0 | ||
4087 | #define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_TCP_CAPABILITY (0x1<<1) | ||
4088 | #define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_TCP_CAPABILITY_SHIFT 1 | ||
4089 | #define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_CAPABILITY (0x1<<2) | ||
4090 | #define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_CAPABILITY_SHIFT 2 | ||
4091 | #define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_TCP_CAPABILITY (0x1<<3) | ||
4092 | #define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_TCP_CAPABILITY_SHIFT 3 | ||
4093 | #define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE (0x7<<4) | ||
4094 | #define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE_SHIFT 4 | ||
4095 | #define TSTORM_ETH_FUNCTION_COMMON_CONFIG_VLAN_FILTERING_ENABLE (0x1<<7) | ||
4096 | #define TSTORM_ETH_FUNCTION_COMMON_CONFIG_VLAN_FILTERING_ENABLE_SHIFT 7 | ||
4097 | #define __TSTORM_ETH_FUNCTION_COMMON_CONFIG_RESERVED0 (0xFF<<8) | ||
4098 | #define __TSTORM_ETH_FUNCTION_COMMON_CONFIG_RESERVED0_SHIFT 8 | ||
4099 | u8 rss_result_mask; | ||
4100 | u8 reserved1; | ||
4101 | __le16 vlan_id[2]; | ||
4102 | }; | ||
4103 | |||
4104 | |||
4105 | /* | ||
3022 | * MAC filtering configuration parameters per port in Tstorm | 4106 | * MAC filtering configuration parameters per port in Tstorm |
3023 | */ | 4107 | */ |
3024 | struct tstorm_eth_mac_filter_config { | 4108 | struct tstorm_eth_mac_filter_config { |
3025 | u32 ucast_drop_all; | 4109 | __le32 ucast_drop_all; |
3026 | u32 ucast_accept_all; | 4110 | __le32 ucast_accept_all; |
3027 | u32 mcast_drop_all; | 4111 | __le32 mcast_drop_all; |
3028 | u32 mcast_accept_all; | 4112 | __le32 mcast_accept_all; |
3029 | u32 bcast_drop_all; | 4113 | __le32 bcast_accept_all; |
3030 | u32 bcast_accept_all; | 4114 | __le32 vlan_filter[2]; |
3031 | u32 vlan_filter[2]; | 4115 | __le32 unmatched_unicast; |
3032 | u32 unmatched_unicast; | ||
3033 | u32 reserved; | ||
3034 | }; | 4116 | }; |
3035 | 4117 | ||
3036 | 4118 | ||
3037 | /* | 4119 | /* |
3038 | * common flag to indicate existence of TPA. | 4120 | * tx only queue init ramrod data |
3039 | */ | 4121 | */ |
3040 | struct tstorm_eth_tpa_exist { | 4122 | struct tx_queue_init_ramrod_data { |
3041 | #if defined(__BIG_ENDIAN) | 4123 | struct client_init_general_data general; |
3042 | u16 reserved1; | 4124 | struct client_init_tx_data tx; |
3043 | u8 reserved0; | ||
3044 | u8 tpa_exist; | ||
3045 | #elif defined(__LITTLE_ENDIAN) | ||
3046 | u8 tpa_exist; | ||
3047 | u8 reserved0; | ||
3048 | u16 reserved1; | ||
3049 | #endif | ||
3050 | u32 reserved2; | ||
3051 | }; | 4125 | }; |
3052 | 4126 | ||
3053 | 4127 | ||
@@ -3077,10 +4151,8 @@ struct ustorm_eth_rx_producers { | |||
3077 | */ | 4151 | */ |
3078 | struct cfc_del_event_data { | 4152 | struct cfc_del_event_data { |
3079 | u32 cid; | 4153 | u32 cid; |
3080 | u8 error; | 4154 | u32 reserved0; |
3081 | u8 reserved0; | 4155 | u32 reserved1; |
3082 | u16 reserved1; | ||
3083 | u32 reserved2; | ||
3084 | }; | 4156 | }; |
3085 | 4157 | ||
3086 | 4158 | ||
@@ -3088,22 +4160,18 @@ struct cfc_del_event_data { | |||
3088 | * per-port SAFC demo variables | 4160 | * per-port SAFC demo variables |
3089 | */ | 4161 | */ |
3090 | struct cmng_flags_per_port { | 4162 | struct cmng_flags_per_port { |
3091 | u8 con_number[NUM_OF_PROTOCOLS]; | ||
3092 | u32 cmng_enables; | 4163 | u32 cmng_enables; |
3093 | #define CMNG_FLAGS_PER_PORT_FAIRNESS_VN (0x1<<0) | 4164 | #define CMNG_FLAGS_PER_PORT_FAIRNESS_VN (0x1<<0) |
3094 | #define CMNG_FLAGS_PER_PORT_FAIRNESS_VN_SHIFT 0 | 4165 | #define CMNG_FLAGS_PER_PORT_FAIRNESS_VN_SHIFT 0 |
3095 | #define CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN (0x1<<1) | 4166 | #define CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN (0x1<<1) |
3096 | #define CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN_SHIFT 1 | 4167 | #define CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN_SHIFT 1 |
3097 | #define CMNG_FLAGS_PER_PORT_FAIRNESS_PROTOCOL (0x1<<2) | 4168 | #define CMNG_FLAGS_PER_PORT_FAIRNESS_COS (0x1<<2) |
3098 | #define CMNG_FLAGS_PER_PORT_FAIRNESS_PROTOCOL_SHIFT 2 | 4169 | #define CMNG_FLAGS_PER_PORT_FAIRNESS_COS_SHIFT 2 |
3099 | #define CMNG_FLAGS_PER_PORT_RATE_SHAPING_PROTOCOL (0x1<<3) | 4170 | #define CMNG_FLAGS_PER_PORT_FAIRNESS_COS_MODE (0x1<<3) |
3100 | #define CMNG_FLAGS_PER_PORT_RATE_SHAPING_PROTOCOL_SHIFT 3 | 4171 | #define CMNG_FLAGS_PER_PORT_FAIRNESS_COS_MODE_SHIFT 3 |
3101 | #define CMNG_FLAGS_PER_PORT_FAIRNESS_COS (0x1<<4) | 4172 | #define __CMNG_FLAGS_PER_PORT_RESERVED0 (0xFFFFFFF<<4) |
3102 | #define CMNG_FLAGS_PER_PORT_FAIRNESS_COS_SHIFT 4 | 4173 | #define __CMNG_FLAGS_PER_PORT_RESERVED0_SHIFT 4 |
3103 | #define CMNG_FLAGS_PER_PORT_FAIRNESS_COS_MODE (0x1<<5) | 4174 | u32 __reserved1; |
3104 | #define CMNG_FLAGS_PER_PORT_FAIRNESS_COS_MODE_SHIFT 5 | ||
3105 | #define __CMNG_FLAGS_PER_PORT_RESERVED0 (0x3FFFFFF<<6) | ||
3106 | #define __CMNG_FLAGS_PER_PORT_RESERVED0_SHIFT 6 | ||
3107 | }; | 4175 | }; |
3108 | 4176 | ||
3109 | 4177 | ||
@@ -3122,6 +4190,7 @@ struct fairness_vars_per_port { | |||
3122 | u32 upper_bound; | 4190 | u32 upper_bound; |
3123 | u32 fair_threshold; | 4191 | u32 fair_threshold; |
3124 | u32 fairness_timeout; | 4192 | u32 fairness_timeout; |
4193 | u32 reserved0; | ||
3125 | }; | 4194 | }; |
3126 | 4195 | ||
3127 | /* | 4196 | /* |
@@ -3138,65 +4207,65 @@ struct safc_struct_per_port { | |||
3138 | u16 __reserved1; | 4207 | u16 __reserved1; |
3139 | #endif | 4208 | #endif |
3140 | u8 cos_to_traffic_types[MAX_COS_NUMBER]; | 4209 | u8 cos_to_traffic_types[MAX_COS_NUMBER]; |
3141 | u32 __reserved2; | ||
3142 | u16 cos_to_pause_mask[NUM_OF_SAFC_BITS]; | 4210 | u16 cos_to_pause_mask[NUM_OF_SAFC_BITS]; |
3143 | }; | 4211 | }; |
3144 | 4212 | ||
3145 | /* | 4213 | /* |
3146 | * per-port PFC variables | 4214 | * Per-port congestion management variables |
3147 | */ | 4215 | */ |
3148 | struct pfc_struct_per_port { | 4216 | struct cmng_struct_per_port { |
3149 | u8 priority_to_traffic_types[MAX_PFC_PRIORITIES]; | 4217 | struct rate_shaping_vars_per_port rs_vars; |
3150 | #if defined(__BIG_ENDIAN) | 4218 | struct fairness_vars_per_port fair_vars; |
3151 | u16 pfc_pause_quanta_in_nanosec; | 4219 | struct safc_struct_per_port safc_vars; |
3152 | u8 __reserved0; | 4220 | struct cmng_flags_per_port flags; |
3153 | u8 priority_non_pausable_mask; | ||
3154 | #elif defined(__LITTLE_ENDIAN) | ||
3155 | u8 priority_non_pausable_mask; | ||
3156 | u8 __reserved0; | ||
3157 | u16 pfc_pause_quanta_in_nanosec; | ||
3158 | #endif | ||
3159 | }; | 4221 | }; |
3160 | 4222 | ||
4223 | |||
3161 | /* | 4224 | /* |
3162 | * Priority and cos | 4225 | * Protocol-common command ID for slow path elements |
3163 | */ | 4226 | */ |
3164 | struct priority_cos { | 4227 | enum common_spqe_cmd_id { |
3165 | #if defined(__BIG_ENDIAN) | 4228 | RAMROD_CMD_ID_COMMON_UNUSED, |
3166 | u16 reserved1; | 4229 | RAMROD_CMD_ID_COMMON_FUNCTION_START, |
3167 | u8 cos; | 4230 | RAMROD_CMD_ID_COMMON_FUNCTION_STOP, |
3168 | u8 priority; | 4231 | RAMROD_CMD_ID_COMMON_CFC_DEL, |
3169 | #elif defined(__LITTLE_ENDIAN) | 4232 | RAMROD_CMD_ID_COMMON_CFC_DEL_WB, |
3170 | u8 priority; | 4233 | RAMROD_CMD_ID_COMMON_STAT_QUERY, |
3171 | u8 cos; | 4234 | RAMROD_CMD_ID_COMMON_STOP_TRAFFIC, |
3172 | u16 reserved1; | 4235 | RAMROD_CMD_ID_COMMON_START_TRAFFIC, |
3173 | #endif | 4236 | RAMROD_CMD_ID_COMMON_RESERVED1, |
3174 | u32 reserved2; | 4237 | RAMROD_CMD_ID_COMMON_RESERVED2, |
4238 | MAX_COMMON_SPQE_CMD_ID | ||
3175 | }; | 4239 | }; |
3176 | 4240 | ||
4241 | |||
3177 | /* | 4242 | /* |
3178 | * Per-port congestion management variables | 4243 | * Per-protocol connection types |
3179 | */ | 4244 | */ |
3180 | struct cmng_struct_per_port { | 4245 | enum connection_type { |
3181 | struct rate_shaping_vars_per_port rs_vars; | 4246 | ETH_CONNECTION_TYPE, |
3182 | struct fairness_vars_per_port fair_vars; | 4247 | TOE_CONNECTION_TYPE, |
3183 | struct safc_struct_per_port safc_vars; | 4248 | RDMA_CONNECTION_TYPE, |
3184 | struct pfc_struct_per_port pfc_vars; | 4249 | ISCSI_CONNECTION_TYPE, |
3185 | #if defined(__BIG_ENDIAN) | 4250 | FCOE_CONNECTION_TYPE, |
3186 | u16 __reserved1; | 4251 | RESERVED_CONNECTION_TYPE_0, |
3187 | u8 dcb_enabled; | 4252 | RESERVED_CONNECTION_TYPE_1, |
3188 | u8 llfc_mode; | 4253 | RESERVED_CONNECTION_TYPE_2, |
3189 | #elif defined(__LITTLE_ENDIAN) | 4254 | NONE_CONNECTION_TYPE, |
3190 | u8 llfc_mode; | 4255 | MAX_CONNECTION_TYPE |
3191 | u8 dcb_enabled; | ||
3192 | u16 __reserved1; | ||
3193 | #endif | ||
3194 | struct priority_cos | ||
3195 | traffic_type_to_priority_cos[MAX_PFC_TRAFFIC_TYPES]; | ||
3196 | struct cmng_flags_per_port flags; | ||
3197 | }; | 4256 | }; |
3198 | 4257 | ||
3199 | 4258 | ||
4259 | /* | ||
4260 | * Cos modes | ||
4261 | */ | ||
4262 | enum cos_mode { | ||
4263 | OVERRIDE_COS, | ||
4264 | STATIC_COS, | ||
4265 | FW_WRR, | ||
4266 | MAX_COS_MODE | ||
4267 | }; | ||
4268 | |||
3200 | 4269 | ||
3201 | /* | 4270 | /* |
3202 | * Dynamic HC counters set by the driver | 4271 | * Dynamic HC counters set by the driver |
@@ -3213,126 +4282,174 @@ struct cstorm_queue_zone_data { | |||
3213 | struct regpair reserved[2]; | 4282 | struct regpair reserved[2]; |
3214 | }; | 4283 | }; |
3215 | 4284 | ||
4285 | |||
3216 | /* | 4286 | /* |
3217 | * Dynamic host coalescing init parameters | 4287 | * Vf-PF channel data in cstorm ram (non-triggered zone) |
3218 | */ | 4288 | */ |
3219 | struct dynamic_hc_config { | 4289 | struct vf_pf_channel_zone_data { |
3220 | u32 threshold[3]; | 4290 | u32 msg_addr_lo; |
3221 | u8 shift_per_protocol[HC_SB_MAX_DYNAMIC_INDICES]; | 4291 | u32 msg_addr_hi; |
3222 | u8 hc_timeout0[HC_SB_MAX_DYNAMIC_INDICES]; | ||
3223 | u8 hc_timeout1[HC_SB_MAX_DYNAMIC_INDICES]; | ||
3224 | u8 hc_timeout2[HC_SB_MAX_DYNAMIC_INDICES]; | ||
3225 | u8 hc_timeout3[HC_SB_MAX_DYNAMIC_INDICES]; | ||
3226 | }; | 4292 | }; |
3227 | 4293 | ||
3228 | |||
3229 | /* | 4294 | /* |
3230 | * Protocol-common statistics collected by the Xstorm (per client) | 4295 | * zone for VF non-triggered data |
3231 | */ | 4296 | */ |
3232 | struct xstorm_per_client_stats { | 4297 | struct non_trigger_vf_zone { |
3233 | __le32 reserved0; | 4298 | struct vf_pf_channel_zone_data vf_pf_channel; |
3234 | __le32 unicast_pkts_sent; | ||
3235 | struct regpair unicast_bytes_sent; | ||
3236 | struct regpair multicast_bytes_sent; | ||
3237 | __le32 multicast_pkts_sent; | ||
3238 | __le32 broadcast_pkts_sent; | ||
3239 | struct regpair broadcast_bytes_sent; | ||
3240 | __le16 stats_counter; | ||
3241 | __le16 reserved1; | ||
3242 | __le32 reserved2; | ||
3243 | }; | 4299 | }; |
3244 | 4300 | ||
3245 | /* | 4301 | /* |
3246 | * Common statistics collected by the Xstorm (per port) | 4302 | * Vf-PF channel trigger zone in cstorm ram |
3247 | */ | 4303 | */ |
3248 | struct xstorm_common_stats { | 4304 | struct vf_pf_channel_zone_trigger { |
3249 | struct xstorm_per_client_stats client_statistics[MAX_STAT_COUNTER_ID]; | 4305 | u8 addr_valid; |
3250 | }; | 4306 | }; |
3251 | 4307 | ||
3252 | /* | 4308 | /* |
3253 | * Protocol-common statistics collected by the Tstorm (per port) | 4309 | * zone that triggers the in-bound interrupt |
3254 | */ | 4310 | */ |
3255 | struct tstorm_per_port_stats { | 4311 | struct trigger_vf_zone { |
3256 | __le32 mac_filter_discard; | 4312 | #if defined(__BIG_ENDIAN) |
3257 | __le32 xxoverflow_discard; | 4313 | u16 reserved1; |
3258 | __le32 brb_truncate_discard; | 4314 | u8 reserved0; |
3259 | __le32 mac_discard; | 4315 | struct vf_pf_channel_zone_trigger vf_pf_channel; |
4316 | #elif defined(__LITTLE_ENDIAN) | ||
4317 | struct vf_pf_channel_zone_trigger vf_pf_channel; | ||
4318 | u8 reserved0; | ||
4319 | u16 reserved1; | ||
4320 | #endif | ||
4321 | u32 reserved2; | ||
3260 | }; | 4322 | }; |
3261 | 4323 | ||
3262 | /* | 4324 | /* |
3263 | * Protocol-common statistics collected by the Tstorm (per client) | 4325 | * zone B per-VF data |
3264 | */ | 4326 | */ |
3265 | struct tstorm_per_client_stats { | 4327 | struct cstorm_vf_zone_data { |
3266 | struct regpair rcv_unicast_bytes; | 4328 | struct non_trigger_vf_zone non_trigger; |
3267 | struct regpair rcv_broadcast_bytes; | 4329 | struct trigger_vf_zone trigger; |
3268 | struct regpair rcv_multicast_bytes; | ||
3269 | struct regpair rcv_error_bytes; | ||
3270 | __le32 checksum_discard; | ||
3271 | __le32 packets_too_big_discard; | ||
3272 | __le32 rcv_unicast_pkts; | ||
3273 | __le32 rcv_broadcast_pkts; | ||
3274 | __le32 rcv_multicast_pkts; | ||
3275 | __le32 no_buff_discard; | ||
3276 | __le32 ttl0_discard; | ||
3277 | __le16 stats_counter; | ||
3278 | __le16 reserved0; | ||
3279 | }; | 4330 | }; |
3280 | 4331 | ||
4332 | |||
3281 | /* | 4333 | /* |
3282 | * Protocol-common statistics collected by the Tstorm | 4334 | * Dynamic host coalescing init parameters, per state machine |
3283 | */ | 4335 | */ |
3284 | struct tstorm_common_stats { | 4336 | struct dynamic_hc_sm_config { |
3285 | struct tstorm_per_port_stats port_statistics; | 4337 | u32 threshold[3]; |
3286 | struct tstorm_per_client_stats client_statistics[MAX_STAT_COUNTER_ID]; | 4338 | u8 shift_per_protocol[HC_SB_MAX_DYNAMIC_INDICES]; |
4339 | u8 hc_timeout0[HC_SB_MAX_DYNAMIC_INDICES]; | ||
4340 | u8 hc_timeout1[HC_SB_MAX_DYNAMIC_INDICES]; | ||
4341 | u8 hc_timeout2[HC_SB_MAX_DYNAMIC_INDICES]; | ||
4342 | u8 hc_timeout3[HC_SB_MAX_DYNAMIC_INDICES]; | ||
3287 | }; | 4343 | }; |
3288 | 4344 | ||
3289 | /* | 4345 | /* |
3290 | * Protocol-common statistics collected by the Ustorm (per client) | 4346 | * Dynamic host coalescing init parameters |
3291 | */ | 4347 | */ |
3292 | struct ustorm_per_client_stats { | 4348 | struct dynamic_hc_config { |
3293 | struct regpair ucast_no_buff_bytes; | 4349 | struct dynamic_hc_sm_config sm_config[HC_SB_MAX_SM]; |
3294 | struct regpair mcast_no_buff_bytes; | 4350 | }; |
3295 | struct regpair bcast_no_buff_bytes; | 4351 | |
3296 | __le32 ucast_no_buff_pkts; | 4352 | |
3297 | __le32 mcast_no_buff_pkts; | 4353 | struct e2_integ_data { |
3298 | __le32 bcast_no_buff_pkts; | 4354 | #if defined(__BIG_ENDIAN) |
3299 | __le16 stats_counter; | 4355 | u8 flags; |
3300 | __le16 reserved0; | 4356 | #define E2_INTEG_DATA_TESTING_EN (0x1<<0) |
4357 | #define E2_INTEG_DATA_TESTING_EN_SHIFT 0 | ||
4358 | #define E2_INTEG_DATA_LB_TX (0x1<<1) | ||
4359 | #define E2_INTEG_DATA_LB_TX_SHIFT 1 | ||
4360 | #define E2_INTEG_DATA_COS_TX (0x1<<2) | ||
4361 | #define E2_INTEG_DATA_COS_TX_SHIFT 2 | ||
4362 | #define E2_INTEG_DATA_OPPORTUNISTICQM (0x1<<3) | ||
4363 | #define E2_INTEG_DATA_OPPORTUNISTICQM_SHIFT 3 | ||
4364 | #define E2_INTEG_DATA_DPMTESTRELEASEDQ (0x1<<4) | ||
4365 | #define E2_INTEG_DATA_DPMTESTRELEASEDQ_SHIFT 4 | ||
4366 | #define E2_INTEG_DATA_RESERVED (0x7<<5) | ||
4367 | #define E2_INTEG_DATA_RESERVED_SHIFT 5 | ||
4368 | u8 cos; | ||
4369 | u8 voq; | ||
4370 | u8 pbf_queue; | ||
4371 | #elif defined(__LITTLE_ENDIAN) | ||
4372 | u8 pbf_queue; | ||
4373 | u8 voq; | ||
4374 | u8 cos; | ||
4375 | u8 flags; | ||
4376 | #define E2_INTEG_DATA_TESTING_EN (0x1<<0) | ||
4377 | #define E2_INTEG_DATA_TESTING_EN_SHIFT 0 | ||
4378 | #define E2_INTEG_DATA_LB_TX (0x1<<1) | ||
4379 | #define E2_INTEG_DATA_LB_TX_SHIFT 1 | ||
4380 | #define E2_INTEG_DATA_COS_TX (0x1<<2) | ||
4381 | #define E2_INTEG_DATA_COS_TX_SHIFT 2 | ||
4382 | #define E2_INTEG_DATA_OPPORTUNISTICQM (0x1<<3) | ||
4383 | #define E2_INTEG_DATA_OPPORTUNISTICQM_SHIFT 3 | ||
4384 | #define E2_INTEG_DATA_DPMTESTRELEASEDQ (0x1<<4) | ||
4385 | #define E2_INTEG_DATA_DPMTESTRELEASEDQ_SHIFT 4 | ||
4386 | #define E2_INTEG_DATA_RESERVED (0x7<<5) | ||
4387 | #define E2_INTEG_DATA_RESERVED_SHIFT 5 | ||
4388 | #endif | ||
4389 | #if defined(__BIG_ENDIAN) | ||
4390 | u16 reserved3; | ||
4391 | u8 reserved2; | ||
4392 | u8 ramEn; | ||
4393 | #elif defined(__LITTLE_ENDIAN) | ||
4394 | u8 ramEn; | ||
4395 | u8 reserved2; | ||
4396 | u16 reserved3; | ||
4397 | #endif | ||
3301 | }; | 4398 | }; |
3302 | 4399 | ||
4400 | |||
3303 | /* | 4401 | /* |
3304 | * Protocol-common statistics collected by the Ustorm | 4402 | * set mac event data |
3305 | */ | 4403 | */ |
3306 | struct ustorm_common_stats { | 4404 | struct eth_event_data { |
3307 | struct ustorm_per_client_stats client_statistics[MAX_STAT_COUNTER_ID]; | 4405 | u32 echo; |
4406 | u32 reserved0; | ||
4407 | u32 reserved1; | ||
3308 | }; | 4408 | }; |
3309 | 4409 | ||
4410 | |||
3310 | /* | 4411 | /* |
3311 | * Eth statistics query structure for the eth_stats_query ramrod | 4412 | * pf-vf event data |
3312 | */ | 4413 | */ |
3313 | struct eth_stats_query { | 4414 | struct vf_pf_event_data { |
3314 | struct xstorm_common_stats xstorm_common; | 4415 | u8 vf_id; |
3315 | struct tstorm_common_stats tstorm_common; | 4416 | u8 reserved0; |
3316 | struct ustorm_common_stats ustorm_common; | 4417 | u16 reserved1; |
4418 | u32 msg_addr_lo; | ||
4419 | u32 msg_addr_hi; | ||
3317 | }; | 4420 | }; |
3318 | 4421 | ||
4422 | /* | ||
4423 | * VF FLR event data | ||
4424 | */ | ||
4425 | struct vf_flr_event_data { | ||
4426 | u8 vf_id; | ||
4427 | u8 reserved0; | ||
4428 | u16 reserved1; | ||
4429 | u32 reserved2; | ||
4430 | u32 reserved3; | ||
4431 | }; | ||
3319 | 4432 | ||
3320 | /* | 4433 | /* |
3321 | * set mac event data | 4434 | * malicious VF event data |
3322 | */ | 4435 | */ |
3323 | struct set_mac_event_data { | 4436 | struct malicious_vf_event_data { |
3324 | u16 echo; | 4437 | u8 vf_id; |
3325 | u16 reserved0; | 4438 | u8 reserved0; |
3326 | u32 reserved1; | 4439 | u16 reserved1; |
3327 | u32 reserved2; | 4440 | u32 reserved2; |
4441 | u32 reserved3; | ||
3328 | }; | 4442 | }; |
3329 | 4443 | ||
3330 | /* | 4444 | /* |
3331 | * union for all event ring message types | 4445 | * union for all event ring message types |
3332 | */ | 4446 | */ |
3333 | union event_data { | 4447 | union event_data { |
3334 | struct set_mac_event_data set_mac_event; | 4448 | struct vf_pf_event_data vf_pf_event; |
4449 | struct eth_event_data eth_event; | ||
3335 | struct cfc_del_event_data cfc_del_event; | 4450 | struct cfc_del_event_data cfc_del_event; |
4451 | struct vf_flr_event_data vf_flr_event; | ||
4452 | struct malicious_vf_event_data malicious_vf_event; | ||
3336 | }; | 4453 | }; |
3337 | 4454 | ||
3338 | 4455 | ||
@@ -3359,7 +4476,7 @@ struct event_ring_data { | |||
3359 | */ | 4476 | */ |
3360 | struct event_ring_msg { | 4477 | struct event_ring_msg { |
3361 | u8 opcode; | 4478 | u8 opcode; |
3362 | u8 reserved0; | 4479 | u8 error; |
3363 | u16 reserved1; | 4480 | u16 reserved1; |
3364 | union event_data data; | 4481 | union event_data data; |
3365 | }; | 4482 | }; |
@@ -3382,32 +4499,82 @@ union event_ring_elem { | |||
3382 | 4499 | ||
3383 | 4500 | ||
3384 | /* | 4501 | /* |
4502 | * Common event ring opcodes | ||
4503 | */ | ||
4504 | enum event_ring_opcode { | ||
4505 | EVENT_RING_OPCODE_VF_PF_CHANNEL, | ||
4506 | EVENT_RING_OPCODE_FUNCTION_START, | ||
4507 | EVENT_RING_OPCODE_FUNCTION_STOP, | ||
4508 | EVENT_RING_OPCODE_CFC_DEL, | ||
4509 | EVENT_RING_OPCODE_CFC_DEL_WB, | ||
4510 | EVENT_RING_OPCODE_STAT_QUERY, | ||
4511 | EVENT_RING_OPCODE_STOP_TRAFFIC, | ||
4512 | EVENT_RING_OPCODE_START_TRAFFIC, | ||
4513 | EVENT_RING_OPCODE_VF_FLR, | ||
4514 | EVENT_RING_OPCODE_MALICIOUS_VF, | ||
4515 | EVENT_RING_OPCODE_FORWARD_SETUP, | ||
4516 | EVENT_RING_OPCODE_RSS_UPDATE_RULES, | ||
4517 | EVENT_RING_OPCODE_RESERVED1, | ||
4518 | EVENT_RING_OPCODE_RESERVED2, | ||
4519 | EVENT_RING_OPCODE_SET_MAC, | ||
4520 | EVENT_RING_OPCODE_CLASSIFICATION_RULES, | ||
4521 | EVENT_RING_OPCODE_FILTERS_RULES, | ||
4522 | EVENT_RING_OPCODE_MULTICAST_RULES, | ||
4523 | MAX_EVENT_RING_OPCODE | ||
4524 | }; | ||
4525 | |||
4526 | |||
4527 | /* | ||
4528 | * Modes for fairness algorithm | ||
4529 | */ | ||
4530 | enum fairness_mode { | ||
4531 | FAIRNESS_COS_WRR_MODE, | ||
4532 | FAIRNESS_COS_ETS_MODE, | ||
4533 | MAX_FAIRNESS_MODE | ||
4534 | }; | ||
4535 | |||
4536 | |||
4537 | /* | ||
3385 | * per-vnic fairness variables | 4538 | * per-vnic fairness variables |
3386 | */ | 4539 | */ |
3387 | struct fairness_vars_per_vn { | 4540 | struct fairness_vars_per_vn { |
3388 | u32 cos_credit_delta[MAX_COS_NUMBER]; | 4541 | u32 cos_credit_delta[MAX_COS_NUMBER]; |
3389 | u32 protocol_credit_delta[NUM_OF_PROTOCOLS]; | ||
3390 | u32 vn_credit_delta; | 4542 | u32 vn_credit_delta; |
3391 | u32 __reserved0; | 4543 | u32 __reserved0; |
3392 | }; | 4544 | }; |
3393 | 4545 | ||
3394 | 4546 | ||
3395 | /* | 4547 | /* |
4548 | * Priority and cos | ||
4549 | */ | ||
4550 | struct priority_cos { | ||
4551 | u8 priority; | ||
4552 | u8 cos; | ||
4553 | __le16 reserved1; | ||
4554 | }; | ||
4555 | |||
4556 | /* | ||
3396 | * The data for flow control configuration | 4557 | * The data for flow control configuration |
3397 | */ | 4558 | */ |
3398 | struct flow_control_configuration { | 4559 | struct flow_control_configuration { |
3399 | struct priority_cos | 4560 | struct priority_cos traffic_type_to_priority_cos[MAX_TRAFFIC_TYPES]; |
3400 | traffic_type_to_priority_cos[MAX_PFC_TRAFFIC_TYPES]; | ||
3401 | #if defined(__BIG_ENDIAN) | ||
3402 | u16 reserved1; | ||
3403 | u8 dcb_version; | ||
3404 | u8 dcb_enabled; | ||
3405 | #elif defined(__LITTLE_ENDIAN) | ||
3406 | u8 dcb_enabled; | 4561 | u8 dcb_enabled; |
3407 | u8 dcb_version; | 4562 | u8 dcb_version; |
3408 | u16 reserved1; | 4563 | u8 dont_add_pri_0_en; |
3409 | #endif | 4564 | u8 reserved1; |
3410 | u32 reserved2; | 4565 | __le32 reserved2; |
4566 | }; | ||
4567 | |||
4568 | |||
4569 | /* | ||
4570 | * | ||
4571 | */ | ||
4572 | struct function_start_data { | ||
4573 | __le16 function_mode; | ||
4574 | __le16 sd_vlan_tag; | ||
4575 | u16 reserved; | ||
4576 | u8 path_id; | ||
4577 | u8 network_cos_mode; | ||
3411 | }; | 4578 | }; |
3412 | 4579 | ||
3413 | 4580 | ||
@@ -3520,13 +4687,13 @@ struct hc_sb_data { | |||
3520 | struct pci_entity p_func; | 4687 | struct pci_entity p_func; |
3521 | #if defined(__BIG_ENDIAN) | 4688 | #if defined(__BIG_ENDIAN) |
3522 | u8 rsrv0; | 4689 | u8 rsrv0; |
4690 | u8 state; | ||
3523 | u8 dhc_qzone_id; | 4691 | u8 dhc_qzone_id; |
3524 | u8 __dynamic_hc_level; | ||
3525 | u8 same_igu_sb_1b; | 4692 | u8 same_igu_sb_1b; |
3526 | #elif defined(__LITTLE_ENDIAN) | 4693 | #elif defined(__LITTLE_ENDIAN) |
3527 | u8 same_igu_sb_1b; | 4694 | u8 same_igu_sb_1b; |
3528 | u8 __dynamic_hc_level; | ||
3529 | u8 dhc_qzone_id; | 4695 | u8 dhc_qzone_id; |
4696 | u8 state; | ||
3530 | u8 rsrv0; | 4697 | u8 rsrv0; |
3531 | #endif | 4698 | #endif |
3532 | struct regpair rsrv1[2]; | 4699 | struct regpair rsrv1[2]; |
@@ -3534,18 +4701,30 @@ struct hc_sb_data { | |||
3534 | 4701 | ||
3535 | 4702 | ||
3536 | /* | 4703 | /* |
4704 | * Segment types for host coaslescing | ||
4705 | */ | ||
4706 | enum hc_segment { | ||
4707 | HC_REGULAR_SEGMENT, | ||
4708 | HC_DEFAULT_SEGMENT, | ||
4709 | MAX_HC_SEGMENT | ||
4710 | }; | ||
4711 | |||
4712 | |||
4713 | /* | ||
3537 | * The fast-path status block meta-data | 4714 | * The fast-path status block meta-data |
3538 | */ | 4715 | */ |
3539 | struct hc_sp_status_block_data { | 4716 | struct hc_sp_status_block_data { |
3540 | struct regpair host_sb_addr; | 4717 | struct regpair host_sb_addr; |
3541 | #if defined(__BIG_ENDIAN) | 4718 | #if defined(__BIG_ENDIAN) |
3542 | u16 rsrv; | 4719 | u8 rsrv1; |
4720 | u8 state; | ||
3543 | u8 igu_seg_id; | 4721 | u8 igu_seg_id; |
3544 | u8 igu_sb_id; | 4722 | u8 igu_sb_id; |
3545 | #elif defined(__LITTLE_ENDIAN) | 4723 | #elif defined(__LITTLE_ENDIAN) |
3546 | u8 igu_sb_id; | 4724 | u8 igu_sb_id; |
3547 | u8 igu_seg_id; | 4725 | u8 igu_seg_id; |
3548 | u16 rsrv; | 4726 | u8 state; |
4727 | u8 rsrv1; | ||
3549 | #endif | 4728 | #endif |
3550 | struct pci_entity p_func; | 4729 | struct pci_entity p_func; |
3551 | }; | 4730 | }; |
@@ -3570,6 +4749,129 @@ struct hc_status_block_data_e2 { | |||
3570 | 4749 | ||
3571 | 4750 | ||
3572 | /* | 4751 | /* |
4752 | * IGU block operartion modes (in Everest2) | ||
4753 | */ | ||
4754 | enum igu_mode { | ||
4755 | HC_IGU_BC_MODE, | ||
4756 | HC_IGU_NBC_MODE, | ||
4757 | MAX_IGU_MODE | ||
4758 | }; | ||
4759 | |||
4760 | |||
4761 | /* | ||
4762 | * IP versions | ||
4763 | */ | ||
4764 | enum ip_ver { | ||
4765 | IP_V4, | ||
4766 | IP_V6, | ||
4767 | MAX_IP_VER | ||
4768 | }; | ||
4769 | |||
4770 | |||
4771 | /* | ||
4772 | * Multi-function modes | ||
4773 | */ | ||
4774 | enum mf_mode { | ||
4775 | SINGLE_FUNCTION, | ||
4776 | MULTI_FUNCTION_SD, | ||
4777 | MULTI_FUNCTION_SI, | ||
4778 | MULTI_FUNCTION_RESERVED, | ||
4779 | MAX_MF_MODE | ||
4780 | }; | ||
4781 | |||
4782 | /* | ||
4783 | * Protocol-common statistics collected by the Tstorm (per pf) | ||
4784 | */ | ||
4785 | struct tstorm_per_pf_stats { | ||
4786 | struct regpair rcv_error_bytes; | ||
4787 | }; | ||
4788 | |||
4789 | /* | ||
4790 | * | ||
4791 | */ | ||
4792 | struct per_pf_stats { | ||
4793 | struct tstorm_per_pf_stats tstorm_pf_statistics; | ||
4794 | }; | ||
4795 | |||
4796 | |||
4797 | /* | ||
4798 | * Protocol-common statistics collected by the Tstorm (per port) | ||
4799 | */ | ||
4800 | struct tstorm_per_port_stats { | ||
4801 | __le32 mac_discard; | ||
4802 | __le32 mac_filter_discard; | ||
4803 | __le32 brb_truncate_discard; | ||
4804 | __le32 mf_tag_discard; | ||
4805 | __le32 packet_drop; | ||
4806 | __le32 reserved; | ||
4807 | }; | ||
4808 | |||
4809 | /* | ||
4810 | * | ||
4811 | */ | ||
4812 | struct per_port_stats { | ||
4813 | struct tstorm_per_port_stats tstorm_port_statistics; | ||
4814 | }; | ||
4815 | |||
4816 | |||
4817 | /* | ||
4818 | * Protocol-common statistics collected by the Tstorm (per client) | ||
4819 | */ | ||
4820 | struct tstorm_per_queue_stats { | ||
4821 | struct regpair rcv_ucast_bytes; | ||
4822 | __le32 rcv_ucast_pkts; | ||
4823 | __le32 checksum_discard; | ||
4824 | struct regpair rcv_bcast_bytes; | ||
4825 | __le32 rcv_bcast_pkts; | ||
4826 | __le32 pkts_too_big_discard; | ||
4827 | struct regpair rcv_mcast_bytes; | ||
4828 | __le32 rcv_mcast_pkts; | ||
4829 | __le32 ttl0_discard; | ||
4830 | __le16 no_buff_discard; | ||
4831 | __le16 reserved0; | ||
4832 | __le32 reserved1; | ||
4833 | }; | ||
4834 | |||
4835 | /* | ||
4836 | * Protocol-common statistics collected by the Ustorm (per client) | ||
4837 | */ | ||
4838 | struct ustorm_per_queue_stats { | ||
4839 | struct regpair ucast_no_buff_bytes; | ||
4840 | struct regpair mcast_no_buff_bytes; | ||
4841 | struct regpair bcast_no_buff_bytes; | ||
4842 | __le32 ucast_no_buff_pkts; | ||
4843 | __le32 mcast_no_buff_pkts; | ||
4844 | __le32 bcast_no_buff_pkts; | ||
4845 | __le32 coalesced_pkts; | ||
4846 | struct regpair coalesced_bytes; | ||
4847 | __le32 coalesced_events; | ||
4848 | __le32 coalesced_aborts; | ||
4849 | }; | ||
4850 | |||
4851 | /* | ||
4852 | * Protocol-common statistics collected by the Xstorm (per client) | ||
4853 | */ | ||
4854 | struct xstorm_per_queue_stats { | ||
4855 | struct regpair ucast_bytes_sent; | ||
4856 | struct regpair mcast_bytes_sent; | ||
4857 | struct regpair bcast_bytes_sent; | ||
4858 | __le32 ucast_pkts_sent; | ||
4859 | __le32 mcast_pkts_sent; | ||
4860 | __le32 bcast_pkts_sent; | ||
4861 | __le32 error_drop_pkts; | ||
4862 | }; | ||
4863 | |||
4864 | /* | ||
4865 | * | ||
4866 | */ | ||
4867 | struct per_queue_stats { | ||
4868 | struct tstorm_per_queue_stats tstorm_queue_statistics; | ||
4869 | struct ustorm_per_queue_stats ustorm_queue_statistics; | ||
4870 | struct xstorm_per_queue_stats xstorm_queue_statistics; | ||
4871 | }; | ||
4872 | |||
4873 | |||
4874 | /* | ||
3573 | * FW version stored in first line of pram | 4875 | * FW version stored in first line of pram |
3574 | */ | 4876 | */ |
3575 | struct pram_fw_version { | 4877 | struct pram_fw_version { |
@@ -3598,7 +4900,6 @@ union protocol_common_specific_data { | |||
3598 | u8 protocol_data[8]; | 4900 | u8 protocol_data[8]; |
3599 | struct regpair phy_address; | 4901 | struct regpair phy_address; |
3600 | struct regpair mac_config_addr; | 4902 | struct regpair mac_config_addr; |
3601 | struct common_query_ramrod_data query_ramrod_data; | ||
3602 | }; | 4903 | }; |
3603 | 4904 | ||
3604 | /* | 4905 | /* |
@@ -3629,7 +4930,6 @@ struct rate_shaping_counter { | |||
3629 | * per-vnic rate shaping variables | 4930 | * per-vnic rate shaping variables |
3630 | */ | 4931 | */ |
3631 | struct rate_shaping_vars_per_vn { | 4932 | struct rate_shaping_vars_per_vn { |
3632 | struct rate_shaping_counter protocol_counters[NUM_OF_PROTOCOLS]; | ||
3633 | struct rate_shaping_counter vn_counter; | 4933 | struct rate_shaping_counter vn_counter; |
3634 | }; | 4934 | }; |
3635 | 4935 | ||
@@ -3644,39 +4944,100 @@ struct slow_path_element { | |||
3644 | 4944 | ||
3645 | 4945 | ||
3646 | /* | 4946 | /* |
3647 | * eth/toe flags that indicate if to query | 4947 | * Protocol-common statistics counter |
3648 | */ | 4948 | */ |
3649 | struct stats_indication_flags { | 4949 | struct stats_counter { |
3650 | u32 collect_eth; | 4950 | __le16 xstats_counter; |
3651 | u32 collect_toe; | 4951 | __le16 reserved0; |
4952 | __le32 reserved1; | ||
4953 | __le16 tstats_counter; | ||
4954 | __le16 reserved2; | ||
4955 | __le32 reserved3; | ||
4956 | __le16 ustats_counter; | ||
4957 | __le16 reserved4; | ||
4958 | __le32 reserved5; | ||
4959 | __le16 cstats_counter; | ||
4960 | __le16 reserved6; | ||
4961 | __le32 reserved7; | ||
3652 | }; | 4962 | }; |
3653 | 4963 | ||
3654 | 4964 | ||
3655 | /* | 4965 | /* |
3656 | * per-port PFC variables | 4966 | * |
3657 | */ | 4967 | */ |
3658 | struct storm_pfc_struct_per_port { | 4968 | struct stats_query_entry { |
3659 | #if defined(__BIG_ENDIAN) | 4969 | u8 kind; |
3660 | u16 mid_mac_addr; | 4970 | u8 index; |
3661 | u16 msb_mac_addr; | 4971 | __le16 funcID; |
3662 | #elif defined(__LITTLE_ENDIAN) | 4972 | __le32 reserved; |
3663 | u16 msb_mac_addr; | 4973 | struct regpair address; |
3664 | u16 mid_mac_addr; | ||
3665 | #endif | ||
3666 | #if defined(__BIG_ENDIAN) | ||
3667 | u16 pfc_pause_quanta_in_nanosec; | ||
3668 | u16 lsb_mac_addr; | ||
3669 | #elif defined(__LITTLE_ENDIAN) | ||
3670 | u16 lsb_mac_addr; | ||
3671 | u16 pfc_pause_quanta_in_nanosec; | ||
3672 | #endif | ||
3673 | }; | 4974 | }; |
3674 | 4975 | ||
3675 | /* | 4976 | /* |
3676 | * Per-port congestion management variables | 4977 | * statistic command |
3677 | */ | 4978 | */ |
3678 | struct storm_cmng_struct_per_port { | 4979 | struct stats_query_cmd_group { |
3679 | struct storm_pfc_struct_per_port pfc_vars; | 4980 | struct stats_query_entry query[STATS_QUERY_CMD_COUNT]; |
4981 | }; | ||
4982 | |||
4983 | |||
4984 | /* | ||
4985 | * statistic command header | ||
4986 | */ | ||
4987 | struct stats_query_header { | ||
4988 | u8 cmd_num; | ||
4989 | u8 reserved0; | ||
4990 | __le16 drv_stats_counter; | ||
4991 | __le32 reserved1; | ||
4992 | struct regpair stats_counters_addrs; | ||
4993 | }; | ||
4994 | |||
4995 | |||
4996 | /* | ||
4997 | * Types of statistcis query entry | ||
4998 | */ | ||
4999 | enum stats_query_type { | ||
5000 | STATS_TYPE_QUEUE, | ||
5001 | STATS_TYPE_PORT, | ||
5002 | STATS_TYPE_PF, | ||
5003 | STATS_TYPE_TOE, | ||
5004 | STATS_TYPE_FCOE, | ||
5005 | MAX_STATS_QUERY_TYPE | ||
5006 | }; | ||
5007 | |||
5008 | |||
5009 | /* | ||
5010 | * Indicate of the function status block state | ||
5011 | */ | ||
5012 | enum status_block_state { | ||
5013 | SB_DISABLED, | ||
5014 | SB_ENABLED, | ||
5015 | SB_CLEANED, | ||
5016 | MAX_STATUS_BLOCK_STATE | ||
5017 | }; | ||
5018 | |||
5019 | |||
5020 | /* | ||
5021 | * Storm IDs (including attentions for IGU related enums) | ||
5022 | */ | ||
5023 | enum storm_id { | ||
5024 | USTORM_ID, | ||
5025 | CSTORM_ID, | ||
5026 | XSTORM_ID, | ||
5027 | TSTORM_ID, | ||
5028 | ATTENTION_ID, | ||
5029 | MAX_STORM_ID | ||
5030 | }; | ||
5031 | |||
5032 | |||
5033 | /* | ||
5034 | * Taffic types used in ETS and flow control algorithms | ||
5035 | */ | ||
5036 | enum traffic_type { | ||
5037 | LLFC_TRAFFIC_TYPE_NW, | ||
5038 | LLFC_TRAFFIC_TYPE_FCOE, | ||
5039 | LLFC_TRAFFIC_TYPE_ISCSI, | ||
5040 | MAX_TRAFFIC_TYPE | ||
3680 | }; | 5041 | }; |
3681 | 5042 | ||
3682 | 5043 | ||
@@ -3731,6 +5092,16 @@ struct vf_pf_channel_data { | |||
3731 | 5092 | ||
3732 | 5093 | ||
3733 | /* | 5094 | /* |
5095 | * State of VF-PF channel | ||
5096 | */ | ||
5097 | enum vf_pf_channel_state { | ||
5098 | VF_PF_CHANNEL_STATE_READY, | ||
5099 | VF_PF_CHANNEL_STATE_WAITING_FOR_ACK, | ||
5100 | MAX_VF_PF_CHANNEL_STATE | ||
5101 | }; | ||
5102 | |||
5103 | |||
5104 | /* | ||
3734 | * zone A per-queue data | 5105 | * zone A per-queue data |
3735 | */ | 5106 | */ |
3736 | struct xstorm_queue_zone_data { | 5107 | struct xstorm_queue_zone_data { |
diff --git a/drivers/net/bnx2x/bnx2x_init.h b/drivers/net/bnx2x/bnx2x_init.h index d5399206f66e..8b1d62584436 100644 --- a/drivers/net/bnx2x/bnx2x_init.h +++ b/drivers/net/bnx2x/bnx2x_init.h | |||
@@ -15,98 +15,34 @@ | |||
15 | #ifndef BNX2X_INIT_H | 15 | #ifndef BNX2X_INIT_H |
16 | #define BNX2X_INIT_H | 16 | #define BNX2X_INIT_H |
17 | 17 | ||
18 | /* RAM0 size in bytes */ | ||
19 | #define STORM_INTMEM_SIZE_E1 0x5800 | ||
20 | #define STORM_INTMEM_SIZE_E1H 0x10000 | ||
21 | #define STORM_INTMEM_SIZE(bp) ((CHIP_IS_E1(bp) ? STORM_INTMEM_SIZE_E1 : \ | ||
22 | STORM_INTMEM_SIZE_E1H) / 4) | ||
23 | |||
24 | |||
25 | /* Init operation types and structures */ | 18 | /* Init operation types and structures */ |
26 | /* Common for both E1 and E1H */ | 19 | enum { |
27 | #define OP_RD 0x1 /* read single register */ | 20 | OP_RD = 0x1, /* read a single register */ |
28 | #define OP_WR 0x2 /* write single register */ | 21 | OP_WR, /* write a single register */ |
29 | #define OP_IW 0x3 /* write single register using mailbox */ | 22 | OP_SW, /* copy a string to the device */ |
30 | #define OP_SW 0x4 /* copy a string to the device */ | 23 | OP_ZR, /* clear memory */ |
31 | #define OP_SI 0x5 /* copy a string using mailbox */ | 24 | OP_ZP, /* unzip then copy with DMAE */ |
32 | #define OP_ZR 0x6 /* clear memory */ | 25 | OP_WR_64, /* write 64 bit pattern */ |
33 | #define OP_ZP 0x7 /* unzip then copy with DMAE */ | 26 | OP_WB, /* copy a string using DMAE */ |
34 | #define OP_WR_64 0x8 /* write 64 bit pattern */ | 27 | OP_WB_ZR, /* Clear a string using DMAE or indirect-wr */ |
35 | #define OP_WB 0x9 /* copy a string using DMAE */ | 28 | /* Skip the following ops if all of the init modes don't match */ |
36 | 29 | OP_IF_MODE_OR, | |
37 | /* FPGA and EMUL specific operations */ | 30 | /* Skip the following ops if any of the init modes don't match */ |
38 | #define OP_WR_EMUL 0xa /* write single register on Emulation */ | 31 | OP_IF_MODE_AND, |
39 | #define OP_WR_FPGA 0xb /* write single register on FPGA */ | 32 | OP_MAX |
40 | #define OP_WR_ASIC 0xc /* write single register on ASIC */ | 33 | }; |
41 | |||
42 | /* Init stages */ | ||
43 | /* Never reorder stages !!! */ | ||
44 | #define COMMON_STAGE 0 | ||
45 | #define PORT0_STAGE 1 | ||
46 | #define PORT1_STAGE 2 | ||
47 | #define FUNC0_STAGE 3 | ||
48 | #define FUNC1_STAGE 4 | ||
49 | #define FUNC2_STAGE 5 | ||
50 | #define FUNC3_STAGE 6 | ||
51 | #define FUNC4_STAGE 7 | ||
52 | #define FUNC5_STAGE 8 | ||
53 | #define FUNC6_STAGE 9 | ||
54 | #define FUNC7_STAGE 10 | ||
55 | #define STAGE_IDX_MAX 11 | ||
56 | |||
57 | #define STAGE_START 0 | ||
58 | #define STAGE_END 1 | ||
59 | |||
60 | |||
61 | /* Indices of blocks */ | ||
62 | #define PRS_BLOCK 0 | ||
63 | #define SRCH_BLOCK 1 | ||
64 | #define TSDM_BLOCK 2 | ||
65 | #define TCM_BLOCK 3 | ||
66 | #define BRB1_BLOCK 4 | ||
67 | #define TSEM_BLOCK 5 | ||
68 | #define PXPCS_BLOCK 6 | ||
69 | #define EMAC0_BLOCK 7 | ||
70 | #define EMAC1_BLOCK 8 | ||
71 | #define DBU_BLOCK 9 | ||
72 | #define MISC_BLOCK 10 | ||
73 | #define DBG_BLOCK 11 | ||
74 | #define NIG_BLOCK 12 | ||
75 | #define MCP_BLOCK 13 | ||
76 | #define UPB_BLOCK 14 | ||
77 | #define CSDM_BLOCK 15 | ||
78 | #define USDM_BLOCK 16 | ||
79 | #define CCM_BLOCK 17 | ||
80 | #define UCM_BLOCK 18 | ||
81 | #define USEM_BLOCK 19 | ||
82 | #define CSEM_BLOCK 20 | ||
83 | #define XPB_BLOCK 21 | ||
84 | #define DQ_BLOCK 22 | ||
85 | #define TIMERS_BLOCK 23 | ||
86 | #define XSDM_BLOCK 24 | ||
87 | #define QM_BLOCK 25 | ||
88 | #define PBF_BLOCK 26 | ||
89 | #define XCM_BLOCK 27 | ||
90 | #define XSEM_BLOCK 28 | ||
91 | #define CDU_BLOCK 29 | ||
92 | #define DMAE_BLOCK 30 | ||
93 | #define PXP_BLOCK 31 | ||
94 | #define CFC_BLOCK 32 | ||
95 | #define HC_BLOCK 33 | ||
96 | #define PXP2_BLOCK 34 | ||
97 | #define MISC_AEU_BLOCK 35 | ||
98 | #define PGLUE_B_BLOCK 36 | ||
99 | #define IGU_BLOCK 37 | ||
100 | #define ATC_BLOCK 38 | ||
101 | #define QM_4PORT_BLOCK 39 | ||
102 | #define XSEM_4PORT_BLOCK 40 | ||
103 | 34 | ||
35 | enum { | ||
36 | STAGE_START, | ||
37 | STAGE_END, | ||
38 | }; | ||
104 | 39 | ||
105 | /* Returns the index of start or end of a specific block stage in ops array*/ | 40 | /* Returns the index of start or end of a specific block stage in ops array*/ |
106 | #define BLOCK_OPS_IDX(block, stage, end) \ | 41 | #define BLOCK_OPS_IDX(block, stage, end) \ |
107 | (2*(((block)*STAGE_IDX_MAX) + (stage)) + (end)) | 42 | (2*(((block)*NUM_OF_INIT_PHASES) + (stage)) + (end)) |
108 | 43 | ||
109 | 44 | ||
45 | /* structs for the various opcodes */ | ||
110 | struct raw_op { | 46 | struct raw_op { |
111 | u32 op:8; | 47 | u32 op:8; |
112 | u32 offset:24; | 48 | u32 offset:24; |
@@ -116,7 +52,7 @@ struct raw_op { | |||
116 | struct op_read { | 52 | struct op_read { |
117 | u32 op:8; | 53 | u32 op:8; |
118 | u32 offset:24; | 54 | u32 offset:24; |
119 | u32 pad; | 55 | u32 val; |
120 | }; | 56 | }; |
121 | 57 | ||
122 | struct op_write { | 58 | struct op_write { |
@@ -125,15 +61,15 @@ struct op_write { | |||
125 | u32 val; | 61 | u32 val; |
126 | }; | 62 | }; |
127 | 63 | ||
128 | struct op_string_write { | 64 | struct op_arr_write { |
129 | u32 op:8; | 65 | u32 op:8; |
130 | u32 offset:24; | 66 | u32 offset:24; |
131 | #ifdef __LITTLE_ENDIAN | 67 | #ifdef __BIG_ENDIAN |
132 | u16 data_off; | ||
133 | u16 data_len; | ||
134 | #else /* __BIG_ENDIAN */ | ||
135 | u16 data_len; | 68 | u16 data_len; |
136 | u16 data_off; | 69 | u16 data_off; |
70 | #else /* __LITTLE_ENDIAN */ | ||
71 | u16 data_off; | ||
72 | u16 data_len; | ||
137 | #endif | 73 | #endif |
138 | }; | 74 | }; |
139 | 75 | ||
@@ -143,14 +79,210 @@ struct op_zero { | |||
143 | u32 len; | 79 | u32 len; |
144 | }; | 80 | }; |
145 | 81 | ||
82 | struct op_if_mode { | ||
83 | u32 op:8; | ||
84 | u32 cmd_offset:24; | ||
85 | u32 mode_bit_map; | ||
86 | }; | ||
87 | |||
88 | |||
146 | union init_op { | 89 | union init_op { |
147 | struct op_read read; | 90 | struct op_read read; |
148 | struct op_write write; | 91 | struct op_write write; |
149 | struct op_string_write str_wr; | 92 | struct op_arr_write arr_wr; |
150 | struct op_zero zero; | 93 | struct op_zero zero; |
151 | struct raw_op raw; | 94 | struct raw_op raw; |
95 | struct op_if_mode if_mode; | ||
96 | }; | ||
97 | |||
98 | |||
99 | /* Init Phases */ | ||
100 | enum { | ||
101 | PHASE_COMMON, | ||
102 | PHASE_PORT0, | ||
103 | PHASE_PORT1, | ||
104 | PHASE_PF0, | ||
105 | PHASE_PF1, | ||
106 | PHASE_PF2, | ||
107 | PHASE_PF3, | ||
108 | PHASE_PF4, | ||
109 | PHASE_PF5, | ||
110 | PHASE_PF6, | ||
111 | PHASE_PF7, | ||
112 | NUM_OF_INIT_PHASES | ||
152 | }; | 113 | }; |
153 | 114 | ||
115 | /* Init Modes */ | ||
116 | enum { | ||
117 | MODE_ASIC = 0x00000001, | ||
118 | MODE_FPGA = 0x00000002, | ||
119 | MODE_EMUL = 0x00000004, | ||
120 | MODE_E2 = 0x00000008, | ||
121 | MODE_E3 = 0x00000010, | ||
122 | MODE_PORT2 = 0x00000020, | ||
123 | MODE_PORT4 = 0x00000040, | ||
124 | MODE_SF = 0x00000080, | ||
125 | MODE_MF = 0x00000100, | ||
126 | MODE_MF_SD = 0x00000200, | ||
127 | MODE_MF_SI = 0x00000400, | ||
128 | MODE_MF_NIV = 0x00000800, | ||
129 | MODE_E3_A0 = 0x00001000, | ||
130 | MODE_E3_B0 = 0x00002000, | ||
131 | MODE_COS_BC = 0x00004000, | ||
132 | MODE_COS3 = 0x00008000, | ||
133 | MODE_COS6 = 0x00010000, | ||
134 | MODE_LITTLE_ENDIAN = 0x00020000, | ||
135 | MODE_BIG_ENDIAN = 0x00040000, | ||
136 | }; | ||
137 | |||
138 | /* Init Blocks */ | ||
139 | enum { | ||
140 | BLOCK_ATC, | ||
141 | BLOCK_BRB1, | ||
142 | BLOCK_CCM, | ||
143 | BLOCK_CDU, | ||
144 | BLOCK_CFC, | ||
145 | BLOCK_CSDM, | ||
146 | BLOCK_CSEM, | ||
147 | BLOCK_DBG, | ||
148 | BLOCK_DMAE, | ||
149 | BLOCK_DORQ, | ||
150 | BLOCK_HC, | ||
151 | BLOCK_IGU, | ||
152 | BLOCK_MISC, | ||
153 | BLOCK_NIG, | ||
154 | BLOCK_PBF, | ||
155 | BLOCK_PGLUE_B, | ||
156 | BLOCK_PRS, | ||
157 | BLOCK_PXP2, | ||
158 | BLOCK_PXP, | ||
159 | BLOCK_QM, | ||
160 | BLOCK_SRC, | ||
161 | BLOCK_TCM, | ||
162 | BLOCK_TM, | ||
163 | BLOCK_TSDM, | ||
164 | BLOCK_TSEM, | ||
165 | BLOCK_UCM, | ||
166 | BLOCK_UPB, | ||
167 | BLOCK_USDM, | ||
168 | BLOCK_USEM, | ||
169 | BLOCK_XCM, | ||
170 | BLOCK_XPB, | ||
171 | BLOCK_XSDM, | ||
172 | BLOCK_XSEM, | ||
173 | BLOCK_MISC_AEU, | ||
174 | NUM_OF_INIT_BLOCKS | ||
175 | }; | ||
176 | |||
177 | /* QM queue numbers */ | ||
178 | #define BNX2X_ETH_Q 0 | ||
179 | #define BNX2X_TOE_Q 3 | ||
180 | #define BNX2X_TOE_ACK_Q 6 | ||
181 | #define BNX2X_ISCSI_Q 9 | ||
182 | #define BNX2X_ISCSI_ACK_Q 8 | ||
183 | #define BNX2X_FCOE_Q 10 | ||
184 | |||
185 | /* Vnics per mode */ | ||
186 | #define BNX2X_PORT2_MODE_NUM_VNICS 4 | ||
187 | #define BNX2X_PORT4_MODE_NUM_VNICS 2 | ||
188 | |||
189 | /* COS offset for port1 in E3 B0 4port mode */ | ||
190 | #define BNX2X_E3B0_PORT1_COS_OFFSET 3 | ||
191 | |||
192 | /* QM Register addresses */ | ||
193 | #define BNX2X_Q_VOQ_REG_ADDR(pf_q_num)\ | ||
194 | (QM_REG_QVOQIDX_0 + 4 * (pf_q_num)) | ||
195 | #define BNX2X_VOQ_Q_REG_ADDR(cos, pf_q_num)\ | ||
196 | (QM_REG_VOQQMASK_0_LSB + 4 * ((cos) * 2 + ((pf_q_num) >> 5))) | ||
197 | #define BNX2X_Q_CMDQ_REG_ADDR(pf_q_num)\ | ||
198 | (QM_REG_BYTECRDCMDQ_0 + 4 * ((pf_q_num) >> 4)) | ||
199 | |||
200 | /* extracts the QM queue number for the specified port and vnic */ | ||
201 | #define BNX2X_PF_Q_NUM(q_num, port, vnic)\ | ||
202 | ((((port) << 1) | (vnic)) * 16 + (q_num)) | ||
203 | |||
204 | |||
205 | /* Maps the specified queue to the specified COS */ | ||
206 | static inline void bnx2x_map_q_cos(struct bnx2x *bp, u32 q_num, u32 new_cos) | ||
207 | { | ||
208 | /* find current COS mapping */ | ||
209 | u32 curr_cos = REG_RD(bp, QM_REG_QVOQIDX_0 + q_num * 4); | ||
210 | |||
211 | /* check if queue->COS mapping has changed */ | ||
212 | if (curr_cos != new_cos) { | ||
213 | u32 num_vnics = BNX2X_PORT2_MODE_NUM_VNICS; | ||
214 | u32 reg_addr, reg_bit_map, vnic; | ||
215 | |||
216 | /* update parameters for 4port mode */ | ||
217 | if (INIT_MODE_FLAGS(bp) & MODE_PORT4) { | ||
218 | num_vnics = BNX2X_PORT4_MODE_NUM_VNICS; | ||
219 | if (BP_PORT(bp)) { | ||
220 | curr_cos += BNX2X_E3B0_PORT1_COS_OFFSET; | ||
221 | new_cos += BNX2X_E3B0_PORT1_COS_OFFSET; | ||
222 | } | ||
223 | } | ||
224 | |||
225 | /* change queue mapping for each VNIC */ | ||
226 | for (vnic = 0; vnic < num_vnics; vnic++) { | ||
227 | u32 pf_q_num = | ||
228 | BNX2X_PF_Q_NUM(q_num, BP_PORT(bp), vnic); | ||
229 | u32 q_bit_map = 1 << (pf_q_num & 0x1f); | ||
230 | |||
231 | /* overwrite queue->VOQ mapping */ | ||
232 | REG_WR(bp, BNX2X_Q_VOQ_REG_ADDR(pf_q_num), new_cos); | ||
233 | |||
234 | /* clear queue bit from current COS bit map */ | ||
235 | reg_addr = BNX2X_VOQ_Q_REG_ADDR(curr_cos, pf_q_num); | ||
236 | reg_bit_map = REG_RD(bp, reg_addr); | ||
237 | REG_WR(bp, reg_addr, reg_bit_map & (~q_bit_map)); | ||
238 | |||
239 | /* set queue bit in new COS bit map */ | ||
240 | reg_addr = BNX2X_VOQ_Q_REG_ADDR(new_cos, pf_q_num); | ||
241 | reg_bit_map = REG_RD(bp, reg_addr); | ||
242 | REG_WR(bp, reg_addr, reg_bit_map | q_bit_map); | ||
243 | |||
244 | /* set/clear queue bit in command-queue bit map | ||
245 | (E2/E3A0 only, valid COS values are 0/1) */ | ||
246 | if (!(INIT_MODE_FLAGS(bp) & MODE_E3_B0)) { | ||
247 | reg_addr = BNX2X_Q_CMDQ_REG_ADDR(pf_q_num); | ||
248 | reg_bit_map = REG_RD(bp, reg_addr); | ||
249 | q_bit_map = 1 << (2 * (pf_q_num & 0xf)); | ||
250 | reg_bit_map = new_cos ? | ||
251 | (reg_bit_map | q_bit_map) : | ||
252 | (reg_bit_map & (~q_bit_map)); | ||
253 | REG_WR(bp, reg_addr, reg_bit_map); | ||
254 | } | ||
255 | } | ||
256 | } | ||
257 | } | ||
258 | |||
259 | /* Configures the QM according to the specified per-traffic-type COSes */ | ||
260 | static inline void bnx2x_dcb_config_qm(struct bnx2x *bp, | ||
261 | struct priority_cos *traffic_cos) | ||
262 | { | ||
263 | bnx2x_map_q_cos(bp, BNX2X_FCOE_Q, | ||
264 | traffic_cos[LLFC_TRAFFIC_TYPE_FCOE].cos); | ||
265 | bnx2x_map_q_cos(bp, BNX2X_ISCSI_Q, | ||
266 | traffic_cos[LLFC_TRAFFIC_TYPE_ISCSI].cos); | ||
267 | if (INIT_MODE_FLAGS(bp) & MODE_COS_BC) { | ||
268 | /* required only in backward compatible COS mode */ | ||
269 | bnx2x_map_q_cos(bp, BNX2X_ETH_Q, | ||
270 | traffic_cos[LLFC_TRAFFIC_TYPE_NW].cos); | ||
271 | bnx2x_map_q_cos(bp, BNX2X_TOE_Q, | ||
272 | traffic_cos[LLFC_TRAFFIC_TYPE_NW].cos); | ||
273 | bnx2x_map_q_cos(bp, BNX2X_TOE_ACK_Q, | ||
274 | traffic_cos[LLFC_TRAFFIC_TYPE_NW].cos); | ||
275 | bnx2x_map_q_cos(bp, BNX2X_ISCSI_ACK_Q, | ||
276 | traffic_cos[LLFC_TRAFFIC_TYPE_ISCSI].cos); | ||
277 | } | ||
278 | } | ||
279 | |||
280 | |||
281 | /* Returns the index of start or end of a specific block stage in ops array*/ | ||
282 | #define BLOCK_OPS_IDX(block, stage, end) \ | ||
283 | (2*(((block)*NUM_OF_INIT_PHASES) + (stage)) + (end)) | ||
284 | |||
285 | |||
154 | #define INITOP_SET 0 /* set the HW directly */ | 286 | #define INITOP_SET 0 /* set the HW directly */ |
155 | #define INITOP_CLEAR 1 /* clear the HW directly */ | 287 | #define INITOP_CLEAR 1 /* clear the HW directly */ |
156 | #define INITOP_INIT 2 /* set the init-value array */ | 288 | #define INITOP_INIT 2 /* set the init-value array */ |
diff --git a/drivers/net/bnx2x/bnx2x_init_ops.h b/drivers/net/bnx2x/bnx2x_init_ops.h index aafd0232393f..7ec1724753ad 100644 --- a/drivers/net/bnx2x/bnx2x_init_ops.h +++ b/drivers/net/bnx2x/bnx2x_init_ops.h | |||
@@ -15,13 +15,39 @@ | |||
15 | #ifndef BNX2X_INIT_OPS_H | 15 | #ifndef BNX2X_INIT_OPS_H |
16 | #define BNX2X_INIT_OPS_H | 16 | #define BNX2X_INIT_OPS_H |
17 | 17 | ||
18 | |||
19 | #ifndef BP_ILT | ||
20 | #define BP_ILT(bp) NULL | ||
21 | #endif | ||
22 | |||
23 | #ifndef BP_FUNC | ||
24 | #define BP_FUNC(bp) 0 | ||
25 | #endif | ||
26 | |||
27 | #ifndef BP_PORT | ||
28 | #define BP_PORT(bp) 0 | ||
29 | #endif | ||
30 | |||
31 | #ifndef BNX2X_ILT_FREE | ||
32 | #define BNX2X_ILT_FREE(x, y, sz) | ||
33 | #endif | ||
34 | |||
35 | #ifndef BNX2X_ILT_ZALLOC | ||
36 | #define BNX2X_ILT_ZALLOC(x, y, sz) | ||
37 | #endif | ||
38 | |||
39 | #ifndef ILOG2 | ||
40 | #define ILOG2(x) x | ||
41 | #endif | ||
42 | |||
18 | static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len); | 43 | static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len); |
19 | static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val); | 44 | static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val); |
20 | static void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr, | 45 | static void bnx2x_write_dmae_phys_len(struct bnx2x *bp, |
21 | u32 addr, u32 len); | 46 | dma_addr_t phys_addr, u32 addr, |
47 | u32 len); | ||
22 | 48 | ||
23 | static void bnx2x_init_str_wr(struct bnx2x *bp, u32 addr, const u32 *data, | 49 | static void bnx2x_init_str_wr(struct bnx2x *bp, u32 addr, |
24 | u32 len) | 50 | const u32 *data, u32 len) |
25 | { | 51 | { |
26 | u32 i; | 52 | u32 i; |
27 | 53 | ||
@@ -29,24 +55,32 @@ static void bnx2x_init_str_wr(struct bnx2x *bp, u32 addr, const u32 *data, | |||
29 | REG_WR(bp, addr + i*4, data[i]); | 55 | REG_WR(bp, addr + i*4, data[i]); |
30 | } | 56 | } |
31 | 57 | ||
32 | static void bnx2x_init_ind_wr(struct bnx2x *bp, u32 addr, const u32 *data, | 58 | static void bnx2x_init_ind_wr(struct bnx2x *bp, u32 addr, |
33 | u32 len) | 59 | const u32 *data, u32 len) |
34 | { | 60 | { |
35 | u32 i; | 61 | u32 i; |
36 | 62 | ||
37 | for (i = 0; i < len; i++) | 63 | for (i = 0; i < len; i++) |
38 | REG_WR_IND(bp, addr + i*4, data[i]); | 64 | bnx2x_reg_wr_ind(bp, addr + i*4, data[i]); |
39 | } | 65 | } |
40 | 66 | ||
41 | static void bnx2x_write_big_buf(struct bnx2x *bp, u32 addr, u32 len) | 67 | static void bnx2x_write_big_buf(struct bnx2x *bp, u32 addr, u32 len, |
68 | u8 wb) | ||
42 | { | 69 | { |
43 | if (bp->dmae_ready) | 70 | if (bp->dmae_ready) |
44 | bnx2x_write_dmae_phys_len(bp, GUNZIP_PHYS(bp), addr, len); | 71 | bnx2x_write_dmae_phys_len(bp, GUNZIP_PHYS(bp), addr, len); |
72 | else if (wb) | ||
73 | /* | ||
74 | * Wide bus registers with no dmae need to be written | ||
75 | * using indirect write. | ||
76 | */ | ||
77 | bnx2x_init_ind_wr(bp, addr, GUNZIP_BUF(bp), len); | ||
45 | else | 78 | else |
46 | bnx2x_init_str_wr(bp, addr, GUNZIP_BUF(bp), len); | 79 | bnx2x_init_str_wr(bp, addr, GUNZIP_BUF(bp), len); |
47 | } | 80 | } |
48 | 81 | ||
49 | static void bnx2x_init_fill(struct bnx2x *bp, u32 addr, int fill, u32 len) | 82 | static void bnx2x_init_fill(struct bnx2x *bp, u32 addr, int fill, |
83 | u32 len, u8 wb) | ||
50 | { | 84 | { |
51 | u32 buf_len = (((len*4) > FW_BUF_SIZE) ? FW_BUF_SIZE : (len*4)); | 85 | u32 buf_len = (((len*4) > FW_BUF_SIZE) ? FW_BUF_SIZE : (len*4)); |
52 | u32 buf_len32 = buf_len/4; | 86 | u32 buf_len32 = buf_len/4; |
@@ -57,12 +91,20 @@ static void bnx2x_init_fill(struct bnx2x *bp, u32 addr, int fill, u32 len) | |||
57 | for (i = 0; i < len; i += buf_len32) { | 91 | for (i = 0; i < len; i += buf_len32) { |
58 | u32 cur_len = min(buf_len32, len - i); | 92 | u32 cur_len = min(buf_len32, len - i); |
59 | 93 | ||
60 | bnx2x_write_big_buf(bp, addr + i*4, cur_len); | 94 | bnx2x_write_big_buf(bp, addr + i*4, cur_len, wb); |
61 | } | 95 | } |
62 | } | 96 | } |
63 | 97 | ||
64 | static void bnx2x_init_wr_64(struct bnx2x *bp, u32 addr, const u32 *data, | 98 | static void bnx2x_write_big_buf_wb(struct bnx2x *bp, u32 addr, u32 len) |
65 | u32 len64) | 99 | { |
100 | if (bp->dmae_ready) | ||
101 | bnx2x_write_dmae_phys_len(bp, GUNZIP_PHYS(bp), addr, len); | ||
102 | else | ||
103 | bnx2x_init_ind_wr(bp, addr, GUNZIP_BUF(bp), len); | ||
104 | } | ||
105 | |||
106 | static void bnx2x_init_wr_64(struct bnx2x *bp, u32 addr, | ||
107 | const u32 *data, u32 len64) | ||
66 | { | 108 | { |
67 | u32 buf_len32 = FW_BUF_SIZE/4; | 109 | u32 buf_len32 = FW_BUF_SIZE/4; |
68 | u32 len = len64*2; | 110 | u32 len = len64*2; |
@@ -82,7 +124,7 @@ static void bnx2x_init_wr_64(struct bnx2x *bp, u32 addr, const u32 *data, | |||
82 | for (i = 0; i < len; i += buf_len32) { | 124 | for (i = 0; i < len; i += buf_len32) { |
83 | u32 cur_len = min(buf_len32, len - i); | 125 | u32 cur_len = min(buf_len32, len - i); |
84 | 126 | ||
85 | bnx2x_write_big_buf(bp, addr + i*4, cur_len); | 127 | bnx2x_write_big_buf_wb(bp, addr + i*4, cur_len); |
86 | } | 128 | } |
87 | } | 129 | } |
88 | 130 | ||
@@ -100,7 +142,8 @@ static void bnx2x_init_wr_64(struct bnx2x *bp, u32 addr, const u32 *data, | |||
100 | #define IF_IS_PRAM_ADDR(base, addr) \ | 142 | #define IF_IS_PRAM_ADDR(base, addr) \ |
101 | if (((base) <= (addr)) && ((base) + 0x40000 >= (addr))) | 143 | if (((base) <= (addr)) && ((base) + 0x40000 >= (addr))) |
102 | 144 | ||
103 | static const u8 *bnx2x_sel_blob(struct bnx2x *bp, u32 addr, const u8 *data) | 145 | static const u8 *bnx2x_sel_blob(struct bnx2x *bp, u32 addr, |
146 | const u8 *data) | ||
104 | { | 147 | { |
105 | IF_IS_INT_TABLE_ADDR(TSEM_REG_INT_TABLE, addr) | 148 | IF_IS_INT_TABLE_ADDR(TSEM_REG_INT_TABLE, addr) |
106 | data = INIT_TSEM_INT_TABLE_DATA(bp); | 149 | data = INIT_TSEM_INT_TABLE_DATA(bp); |
@@ -129,31 +172,17 @@ static const u8 *bnx2x_sel_blob(struct bnx2x *bp, u32 addr, const u8 *data) | |||
129 | return data; | 172 | return data; |
130 | } | 173 | } |
131 | 174 | ||
132 | static void bnx2x_write_big_buf_wb(struct bnx2x *bp, u32 addr, u32 len) | 175 | static void bnx2x_init_wr_wb(struct bnx2x *bp, u32 addr, |
176 | const u32 *data, u32 len) | ||
133 | { | 177 | { |
134 | if (bp->dmae_ready) | 178 | if (bp->dmae_ready) |
135 | bnx2x_write_dmae_phys_len(bp, GUNZIP_PHYS(bp), addr, len); | 179 | VIRT_WR_DMAE_LEN(bp, data, addr, len, 0); |
136 | else | 180 | else |
137 | bnx2x_init_ind_wr(bp, addr, GUNZIP_BUF(bp), len); | ||
138 | } | ||
139 | |||
140 | static void bnx2x_init_wr_wb(struct bnx2x *bp, u32 addr, const u32 *data, | ||
141 | u32 len) | ||
142 | { | ||
143 | const u32 *old_data = data; | ||
144 | |||
145 | data = (const u32 *)bnx2x_sel_blob(bp, addr, (const u8 *)data); | ||
146 | |||
147 | if (bp->dmae_ready) { | ||
148 | if (old_data != data) | ||
149 | VIRT_WR_DMAE_LEN(bp, data, addr, len, 1); | ||
150 | else | ||
151 | VIRT_WR_DMAE_LEN(bp, data, addr, len, 0); | ||
152 | } else | ||
153 | bnx2x_init_ind_wr(bp, addr, data, len); | 181 | bnx2x_init_ind_wr(bp, addr, data, len); |
154 | } | 182 | } |
155 | 183 | ||
156 | static void bnx2x_wr_64(struct bnx2x *bp, u32 reg, u32 val_lo, u32 val_hi) | 184 | static void bnx2x_wr_64(struct bnx2x *bp, u32 reg, u32 val_lo, |
185 | u32 val_hi) | ||
157 | { | 186 | { |
158 | u32 wb_write[2]; | 187 | u32 wb_write[2]; |
159 | 188 | ||
@@ -161,8 +190,8 @@ static void bnx2x_wr_64(struct bnx2x *bp, u32 reg, u32 val_lo, u32 val_hi) | |||
161 | wb_write[1] = val_hi; | 190 | wb_write[1] = val_hi; |
162 | REG_WR_DMAE_LEN(bp, reg, wb_write, 2); | 191 | REG_WR_DMAE_LEN(bp, reg, wb_write, 2); |
163 | } | 192 | } |
164 | 193 | static void bnx2x_init_wr_zp(struct bnx2x *bp, u32 addr, u32 len, | |
165 | static void bnx2x_init_wr_zp(struct bnx2x *bp, u32 addr, u32 len, u32 blob_off) | 194 | u32 blob_off) |
166 | { | 195 | { |
167 | const u8 *data = NULL; | 196 | const u8 *data = NULL; |
168 | int rc; | 197 | int rc; |
@@ -186,39 +215,33 @@ static void bnx2x_init_wr_zp(struct bnx2x *bp, u32 addr, u32 len, u32 blob_off) | |||
186 | static void bnx2x_init_block(struct bnx2x *bp, u32 block, u32 stage) | 215 | static void bnx2x_init_block(struct bnx2x *bp, u32 block, u32 stage) |
187 | { | 216 | { |
188 | u16 op_start = | 217 | u16 op_start = |
189 | INIT_OPS_OFFSETS(bp)[BLOCK_OPS_IDX(block, stage, STAGE_START)]; | 218 | INIT_OPS_OFFSETS(bp)[BLOCK_OPS_IDX(block, stage, |
219 | STAGE_START)]; | ||
190 | u16 op_end = | 220 | u16 op_end = |
191 | INIT_OPS_OFFSETS(bp)[BLOCK_OPS_IDX(block, stage, STAGE_END)]; | 221 | INIT_OPS_OFFSETS(bp)[BLOCK_OPS_IDX(block, stage, |
222 | STAGE_END)]; | ||
192 | union init_op *op; | 223 | union init_op *op; |
193 | int hw_wr; | 224 | u32 op_idx, op_type, addr, len; |
194 | u32 i, op_type, addr, len; | ||
195 | const u32 *data, *data_base; | 225 | const u32 *data, *data_base; |
196 | 226 | ||
197 | /* If empty block */ | 227 | /* If empty block */ |
198 | if (op_start == op_end) | 228 | if (op_start == op_end) |
199 | return; | 229 | return; |
200 | 230 | ||
201 | if (CHIP_REV_IS_FPGA(bp)) | ||
202 | hw_wr = OP_WR_FPGA; | ||
203 | else if (CHIP_REV_IS_EMUL(bp)) | ||
204 | hw_wr = OP_WR_EMUL; | ||
205 | else | ||
206 | hw_wr = OP_WR_ASIC; | ||
207 | |||
208 | data_base = INIT_DATA(bp); | 231 | data_base = INIT_DATA(bp); |
209 | 232 | ||
210 | for (i = op_start; i < op_end; i++) { | 233 | for (op_idx = op_start; op_idx < op_end; op_idx++) { |
211 | |||
212 | op = (union init_op *)&(INIT_OPS(bp)[i]); | ||
213 | 234 | ||
214 | op_type = op->str_wr.op; | 235 | op = (union init_op *)&(INIT_OPS(bp)[op_idx]); |
215 | addr = op->str_wr.offset; | 236 | /* Get generic data */ |
216 | len = op->str_wr.data_len; | 237 | op_type = op->raw.op; |
217 | data = data_base + op->str_wr.data_off; | 238 | addr = op->raw.offset; |
218 | 239 | /* Get data that's used for OP_SW, OP_WB, OP_FW, OP_ZP and | |
219 | /* HW/EMUL specific */ | 240 | * OP_WR64 (we assume that op_arr_write and op_write have the |
220 | if ((op_type > OP_WB) && (op_type == hw_wr)) | 241 | * same structure). |
221 | op_type = OP_WR; | 242 | */ |
243 | len = op->arr_wr.data_len; | ||
244 | data = data_base + op->arr_wr.data_off; | ||
222 | 245 | ||
223 | switch (op_type) { | 246 | switch (op_type) { |
224 | case OP_RD: | 247 | case OP_RD: |
@@ -233,21 +256,39 @@ static void bnx2x_init_block(struct bnx2x *bp, u32 block, u32 stage) | |||
233 | case OP_WB: | 256 | case OP_WB: |
234 | bnx2x_init_wr_wb(bp, addr, data, len); | 257 | bnx2x_init_wr_wb(bp, addr, data, len); |
235 | break; | 258 | break; |
236 | case OP_SI: | ||
237 | bnx2x_init_ind_wr(bp, addr, data, len); | ||
238 | break; | ||
239 | case OP_ZR: | 259 | case OP_ZR: |
240 | bnx2x_init_fill(bp, addr, 0, op->zero.len); | 260 | bnx2x_init_fill(bp, addr, 0, op->zero.len, 0); |
261 | break; | ||
262 | case OP_WB_ZR: | ||
263 | bnx2x_init_fill(bp, addr, 0, op->zero.len, 1); | ||
241 | break; | 264 | break; |
242 | case OP_ZP: | 265 | case OP_ZP: |
243 | bnx2x_init_wr_zp(bp, addr, len, | 266 | bnx2x_init_wr_zp(bp, addr, len, |
244 | op->str_wr.data_off); | 267 | op->arr_wr.data_off); |
245 | break; | 268 | break; |
246 | case OP_WR_64: | 269 | case OP_WR_64: |
247 | bnx2x_init_wr_64(bp, addr, data, len); | 270 | bnx2x_init_wr_64(bp, addr, data, len); |
248 | break; | 271 | break; |
272 | case OP_IF_MODE_AND: | ||
273 | /* if any of the flags doesn't match, skip the | ||
274 | * conditional block. | ||
275 | */ | ||
276 | if ((INIT_MODE_FLAGS(bp) & | ||
277 | op->if_mode.mode_bit_map) != | ||
278 | op->if_mode.mode_bit_map) | ||
279 | op_idx += op->if_mode.cmd_offset; | ||
280 | break; | ||
281 | case OP_IF_MODE_OR: | ||
282 | /* if all the flags don't match, skip the conditional | ||
283 | * block. | ||
284 | */ | ||
285 | if ((INIT_MODE_FLAGS(bp) & | ||
286 | op->if_mode.mode_bit_map) == 0) | ||
287 | op_idx += op->if_mode.cmd_offset; | ||
288 | break; | ||
249 | default: | 289 | default: |
250 | /* happens whenever an op is of a diff HW */ | 290 | /* Should never get here! */ |
291 | |||
251 | break; | 292 | break; |
252 | } | 293 | } |
253 | } | 294 | } |
@@ -417,7 +458,8 @@ static const struct arb_line write_arb_addr[NUM_WR_Q-1] = { | |||
417 | PXP2_REG_RQ_BW_WR_UBOUND30} | 458 | PXP2_REG_RQ_BW_WR_UBOUND30} |
418 | }; | 459 | }; |
419 | 460 | ||
420 | static void bnx2x_init_pxp_arb(struct bnx2x *bp, int r_order, int w_order) | 461 | static void bnx2x_init_pxp_arb(struct bnx2x *bp, int r_order, |
462 | int w_order) | ||
421 | { | 463 | { |
422 | u32 val, i; | 464 | u32 val, i; |
423 | 465 | ||
@@ -491,19 +533,21 @@ static void bnx2x_init_pxp_arb(struct bnx2x *bp, int r_order, int w_order) | |||
491 | if ((CHIP_IS_E1(bp) || CHIP_IS_E1H(bp)) && (r_order == MAX_RD_ORD)) | 533 | if ((CHIP_IS_E1(bp) || CHIP_IS_E1H(bp)) && (r_order == MAX_RD_ORD)) |
492 | REG_WR(bp, PXP2_REG_RQ_PDR_LIMIT, 0xe00); | 534 | REG_WR(bp, PXP2_REG_RQ_PDR_LIMIT, 0xe00); |
493 | 535 | ||
494 | if (CHIP_IS_E2(bp)) | 536 | if (CHIP_IS_E3(bp)) |
537 | REG_WR(bp, PXP2_REG_WR_USDMDP_TH, (0x4 << w_order)); | ||
538 | else if (CHIP_IS_E2(bp)) | ||
495 | REG_WR(bp, PXP2_REG_WR_USDMDP_TH, (0x8 << w_order)); | 539 | REG_WR(bp, PXP2_REG_WR_USDMDP_TH, (0x8 << w_order)); |
496 | else | 540 | else |
497 | REG_WR(bp, PXP2_REG_WR_USDMDP_TH, (0x18 << w_order)); | 541 | REG_WR(bp, PXP2_REG_WR_USDMDP_TH, (0x18 << w_order)); |
498 | 542 | ||
499 | if (CHIP_IS_E1H(bp) || CHIP_IS_E2(bp)) { | 543 | if (!CHIP_IS_E1(bp)) { |
500 | /* MPS w_order optimal TH presently TH | 544 | /* MPS w_order optimal TH presently TH |
501 | * 128 0 0 2 | 545 | * 128 0 0 2 |
502 | * 256 1 1 3 | 546 | * 256 1 1 3 |
503 | * >=512 2 2 3 | 547 | * >=512 2 2 3 |
504 | */ | 548 | */ |
505 | /* DMAE is special */ | 549 | /* DMAE is special */ |
506 | if (CHIP_IS_E2(bp)) { | 550 | if (!CHIP_IS_E1H(bp)) { |
507 | /* E2 can use optimal TH */ | 551 | /* E2 can use optimal TH */ |
508 | val = w_order; | 552 | val = w_order; |
509 | REG_WR(bp, PXP2_REG_WR_DMAE_MPS, val); | 553 | REG_WR(bp, PXP2_REG_WR_DMAE_MPS, val); |
@@ -557,8 +601,8 @@ static void bnx2x_init_pxp_arb(struct bnx2x *bp, int r_order, int w_order) | |||
557 | #define ILT_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44))) | 601 | #define ILT_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44))) |
558 | #define ILT_RANGE(f, l) (((l) << 10) | f) | 602 | #define ILT_RANGE(f, l) (((l) << 10) | f) |
559 | 603 | ||
560 | static int bnx2x_ilt_line_mem_op(struct bnx2x *bp, struct ilt_line *line, | 604 | static int bnx2x_ilt_line_mem_op(struct bnx2x *bp, |
561 | u32 size, u8 memop) | 605 | struct ilt_line *line, u32 size, u8 memop) |
562 | { | 606 | { |
563 | if (memop == ILT_MEMOP_FREE) { | 607 | if (memop == ILT_MEMOP_FREE) { |
564 | BNX2X_ILT_FREE(line->page, line->page_mapping, line->size); | 608 | BNX2X_ILT_FREE(line->page, line->page_mapping, line->size); |
@@ -572,7 +616,8 @@ static int bnx2x_ilt_line_mem_op(struct bnx2x *bp, struct ilt_line *line, | |||
572 | } | 616 | } |
573 | 617 | ||
574 | 618 | ||
575 | static int bnx2x_ilt_client_mem_op(struct bnx2x *bp, int cli_num, u8 memop) | 619 | static int bnx2x_ilt_client_mem_op(struct bnx2x *bp, int cli_num, |
620 | u8 memop) | ||
576 | { | 621 | { |
577 | int i, rc; | 622 | int i, rc; |
578 | struct bnx2x_ilt *ilt = BP_ILT(bp); | 623 | struct bnx2x_ilt *ilt = BP_ILT(bp); |
@@ -617,8 +662,8 @@ static void bnx2x_ilt_line_wr(struct bnx2x *bp, int abs_idx, | |||
617 | bnx2x_wr_64(bp, reg, ILT_ADDR1(page_mapping), ILT_ADDR2(page_mapping)); | 662 | bnx2x_wr_64(bp, reg, ILT_ADDR1(page_mapping), ILT_ADDR2(page_mapping)); |
618 | } | 663 | } |
619 | 664 | ||
620 | static void bnx2x_ilt_line_init_op(struct bnx2x *bp, struct bnx2x_ilt *ilt, | 665 | static void bnx2x_ilt_line_init_op(struct bnx2x *bp, |
621 | int idx, u8 initop) | 666 | struct bnx2x_ilt *ilt, int idx, u8 initop) |
622 | { | 667 | { |
623 | dma_addr_t null_mapping; | 668 | dma_addr_t null_mapping; |
624 | int abs_idx = ilt->start_line + idx; | 669 | int abs_idx = ilt->start_line + idx; |
@@ -733,7 +778,7 @@ static void bnx2x_ilt_init_op(struct bnx2x *bp, u8 initop) | |||
733 | } | 778 | } |
734 | 779 | ||
735 | static void bnx2x_ilt_init_client_psz(struct bnx2x *bp, int cli_num, | 780 | static void bnx2x_ilt_init_client_psz(struct bnx2x *bp, int cli_num, |
736 | u32 psz_reg, u8 initop) | 781 | u32 psz_reg, u8 initop) |
737 | { | 782 | { |
738 | struct bnx2x_ilt *ilt = BP_ILT(bp); | 783 | struct bnx2x_ilt *ilt = BP_ILT(bp); |
739 | struct ilt_client_info *ilt_cli = &ilt->clients[cli_num]; | 784 | struct ilt_client_info *ilt_cli = &ilt->clients[cli_num]; |
@@ -848,7 +893,8 @@ static void bnx2x_src_init_t2(struct bnx2x *bp, struct src_ent *t2, | |||
848 | 893 | ||
849 | /* Initialize T2 */ | 894 | /* Initialize T2 */ |
850 | for (i = 0; i < src_cid_count-1; i++) | 895 | for (i = 0; i < src_cid_count-1; i++) |
851 | t2[i].next = (u64)(t2_mapping + (i+1)*sizeof(struct src_ent)); | 896 | t2[i].next = (u64)(t2_mapping + |
897 | (i+1)*sizeof(struct src_ent)); | ||
852 | 898 | ||
853 | /* tell the searcher where the T2 table is */ | 899 | /* tell the searcher where the T2 table is */ |
854 | REG_WR(bp, SRC_REG_COUNTFREE0 + port*4, src_cid_count); | 900 | REG_WR(bp, SRC_REG_COUNTFREE0 + port*4, src_cid_count); |
diff --git a/drivers/net/bnx2x/bnx2x_link.c b/drivers/net/bnx2x/bnx2x_link.c index 15b5bc174331..347f3239ad1f 100644 --- a/drivers/net/bnx2x/bnx2x_link.c +++ b/drivers/net/bnx2x/bnx2x_link.c | |||
@@ -25,6 +25,8 @@ | |||
25 | #include <linux/mutex.h> | 25 | #include <linux/mutex.h> |
26 | 26 | ||
27 | #include "bnx2x.h" | 27 | #include "bnx2x.h" |
28 | #include "bnx2x_cmn.h" | ||
29 | |||
28 | 30 | ||
29 | /********************************************************/ | 31 | /********************************************************/ |
30 | #define ETH_HLEN 14 | 32 | #define ETH_HLEN 14 |
@@ -874,6 +876,54 @@ static void bnx2x_update_pfc_brb(struct link_params *params, | |||
874 | } | 876 | } |
875 | } | 877 | } |
876 | 878 | ||
879 | /****************************************************************************** | ||
880 | * Description: | ||
881 | * This function is needed because NIG ARB_CREDIT_WEIGHT_X are | ||
882 | * not continues and ARB_CREDIT_WEIGHT_0 + offset is suitable. | ||
883 | ******************************************************************************/ | ||
884 | int bnx2x_pfc_nig_rx_priority_mask(struct bnx2x *bp, | ||
885 | u8 cos_entry, | ||
886 | u32 priority_mask, u8 port) | ||
887 | { | ||
888 | u32 nig_reg_rx_priority_mask_add = 0; | ||
889 | |||
890 | switch (cos_entry) { | ||
891 | case 0: | ||
892 | nig_reg_rx_priority_mask_add = (port) ? | ||
893 | NIG_REG_P1_RX_COS0_PRIORITY_MASK : | ||
894 | NIG_REG_P0_RX_COS0_PRIORITY_MASK; | ||
895 | break; | ||
896 | case 1: | ||
897 | nig_reg_rx_priority_mask_add = (port) ? | ||
898 | NIG_REG_P1_RX_COS1_PRIORITY_MASK : | ||
899 | NIG_REG_P0_RX_COS1_PRIORITY_MASK; | ||
900 | break; | ||
901 | case 2: | ||
902 | nig_reg_rx_priority_mask_add = (port) ? | ||
903 | NIG_REG_P1_RX_COS2_PRIORITY_MASK : | ||
904 | NIG_REG_P0_RX_COS2_PRIORITY_MASK; | ||
905 | break; | ||
906 | case 3: | ||
907 | if (port) | ||
908 | return -EINVAL; | ||
909 | nig_reg_rx_priority_mask_add = NIG_REG_P0_RX_COS3_PRIORITY_MASK; | ||
910 | break; | ||
911 | case 4: | ||
912 | if (port) | ||
913 | return -EINVAL; | ||
914 | nig_reg_rx_priority_mask_add = NIG_REG_P0_RX_COS4_PRIORITY_MASK; | ||
915 | break; | ||
916 | case 5: | ||
917 | if (port) | ||
918 | return -EINVAL; | ||
919 | nig_reg_rx_priority_mask_add = NIG_REG_P0_RX_COS5_PRIORITY_MASK; | ||
920 | break; | ||
921 | } | ||
922 | |||
923 | REG_WR(bp, nig_reg_rx_priority_mask_add, priority_mask); | ||
924 | |||
925 | return 0; | ||
926 | } | ||
877 | static void bnx2x_update_pfc_nig(struct link_params *params, | 927 | static void bnx2x_update_pfc_nig(struct link_params *params, |
878 | struct link_vars *vars, | 928 | struct link_vars *vars, |
879 | struct bnx2x_nig_brb_pfc_port_params *nig_params) | 929 | struct bnx2x_nig_brb_pfc_port_params *nig_params) |
@@ -958,15 +1008,12 @@ static void bnx2x_update_pfc_nig(struct link_params *params, | |||
958 | REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT, val); | 1008 | REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT, val); |
959 | 1009 | ||
960 | if (nig_params) { | 1010 | if (nig_params) { |
1011 | u8 i = 0; | ||
961 | pkt_priority_to_cos = nig_params->pkt_priority_to_cos; | 1012 | pkt_priority_to_cos = nig_params->pkt_priority_to_cos; |
962 | 1013 | ||
963 | REG_WR(bp, port ? NIG_REG_P1_RX_COS0_PRIORITY_MASK : | 1014 | for (i = 0; i < nig_params->num_of_rx_cos_priority_mask; i++) |
964 | NIG_REG_P0_RX_COS0_PRIORITY_MASK, | 1015 | bnx2x_pfc_nig_rx_priority_mask(bp, i, |
965 | nig_params->rx_cos0_priority_mask); | 1016 | nig_params->rx_cos_priority_mask[i], port); |
966 | |||
967 | REG_WR(bp, port ? NIG_REG_P1_RX_COS1_PRIORITY_MASK : | ||
968 | NIG_REG_P0_RX_COS1_PRIORITY_MASK, | ||
969 | nig_params->rx_cos1_priority_mask); | ||
970 | 1017 | ||
971 | REG_WR(bp, port ? NIG_REG_LLFC_HIGH_PRIORITY_CLASSES_1 : | 1018 | REG_WR(bp, port ? NIG_REG_LLFC_HIGH_PRIORITY_CLASSES_1 : |
972 | NIG_REG_LLFC_HIGH_PRIORITY_CLASSES_0, | 1019 | NIG_REG_LLFC_HIGH_PRIORITY_CLASSES_0, |
@@ -1824,26 +1871,6 @@ void bnx2x_link_status_update(struct link_params *params, | |||
1824 | vars->line_speed = SPEED_10000; | 1871 | vars->line_speed = SPEED_10000; |
1825 | break; | 1872 | break; |
1826 | 1873 | ||
1827 | case LINK_12GTFD: | ||
1828 | vars->line_speed = SPEED_12000; | ||
1829 | break; | ||
1830 | |||
1831 | case LINK_12_5GTFD: | ||
1832 | vars->line_speed = SPEED_12500; | ||
1833 | break; | ||
1834 | |||
1835 | case LINK_13GTFD: | ||
1836 | vars->line_speed = SPEED_13000; | ||
1837 | break; | ||
1838 | |||
1839 | case LINK_15GTFD: | ||
1840 | vars->line_speed = SPEED_15000; | ||
1841 | break; | ||
1842 | |||
1843 | case LINK_16GTFD: | ||
1844 | vars->line_speed = SPEED_16000; | ||
1845 | break; | ||
1846 | |||
1847 | default: | 1874 | default: |
1848 | break; | 1875 | break; |
1849 | } | 1876 | } |
@@ -2667,31 +2694,6 @@ static int bnx2x_link_settings_status(struct bnx2x_phy *phy, | |||
2667 | vars->link_status |= LINK_10GTFD; | 2694 | vars->link_status |= LINK_10GTFD; |
2668 | break; | 2695 | break; |
2669 | 2696 | ||
2670 | case GP_STATUS_12G_HIG: | ||
2671 | new_line_speed = SPEED_12000; | ||
2672 | vars->link_status |= LINK_12GTFD; | ||
2673 | break; | ||
2674 | |||
2675 | case GP_STATUS_12_5G: | ||
2676 | new_line_speed = SPEED_12500; | ||
2677 | vars->link_status |= LINK_12_5GTFD; | ||
2678 | break; | ||
2679 | |||
2680 | case GP_STATUS_13G: | ||
2681 | new_line_speed = SPEED_13000; | ||
2682 | vars->link_status |= LINK_13GTFD; | ||
2683 | break; | ||
2684 | |||
2685 | case GP_STATUS_15G: | ||
2686 | new_line_speed = SPEED_15000; | ||
2687 | vars->link_status |= LINK_15GTFD; | ||
2688 | break; | ||
2689 | |||
2690 | case GP_STATUS_16G: | ||
2691 | new_line_speed = SPEED_16000; | ||
2692 | vars->link_status |= LINK_16GTFD; | ||
2693 | break; | ||
2694 | |||
2695 | default: | 2697 | default: |
2696 | DP(NETIF_MSG_LINK, | 2698 | DP(NETIF_MSG_LINK, |
2697 | "link speed unsupported gp_status 0x%x\n", | 2699 | "link speed unsupported gp_status 0x%x\n", |
diff --git a/drivers/net/bnx2x/bnx2x_link.h b/drivers/net/bnx2x/bnx2x_link.h index a106d8cbd69f..3fef7782490a 100644 --- a/drivers/net/bnx2x/bnx2x_link.h +++ b/drivers/net/bnx2x/bnx2x_link.h | |||
@@ -81,6 +81,7 @@ | |||
81 | #define PFC_BRB_FULL_LB_XOFF_THRESHOLD 170 | 81 | #define PFC_BRB_FULL_LB_XOFF_THRESHOLD 170 |
82 | #define PFC_BRB_FULL_LB_XON_THRESHOLD 250 | 82 | #define PFC_BRB_FULL_LB_XON_THRESHOLD 250 |
83 | 83 | ||
84 | #define MAXVAL(a, b) (((a) > (b)) ? (a) : (b)) | ||
84 | /***********************************************************/ | 85 | /***********************************************************/ |
85 | /* Structs */ | 86 | /* Structs */ |
86 | /***********************************************************/ | 87 | /***********************************************************/ |
@@ -262,6 +263,8 @@ struct link_vars { | |||
262 | #define MAC_TYPE_NONE 0 | 263 | #define MAC_TYPE_NONE 0 |
263 | #define MAC_TYPE_EMAC 1 | 264 | #define MAC_TYPE_EMAC 1 |
264 | #define MAC_TYPE_BMAC 2 | 265 | #define MAC_TYPE_BMAC 2 |
266 | #define MAC_TYPE_UMAC 3 | ||
267 | #define MAC_TYPE_XMAC 4 | ||
265 | 268 | ||
266 | u8 phy_link_up; /* internal phy link indication */ | 269 | u8 phy_link_up; /* internal phy link indication */ |
267 | u8 link_up; | 270 | u8 link_up; |
@@ -363,6 +366,20 @@ int bnx2x_phy_probe(struct link_params *params); | |||
363 | u8 bnx2x_fan_failure_det_req(struct bnx2x *bp, u32 shmem_base, | 366 | u8 bnx2x_fan_failure_det_req(struct bnx2x *bp, u32 shmem_base, |
364 | u32 shmem2_base, u8 port); | 367 | u32 shmem2_base, u8 port); |
365 | 368 | ||
369 | /* DCBX structs */ | ||
370 | |||
371 | /* Number of maximum COS per chip */ | ||
372 | #define DCBX_E2E3_MAX_NUM_COS (2) | ||
373 | #define DCBX_E3B0_MAX_NUM_COS_PORT0 (6) | ||
374 | #define DCBX_E3B0_MAX_NUM_COS_PORT1 (3) | ||
375 | #define DCBX_E3B0_MAX_NUM_COS ( \ | ||
376 | MAXVAL(DCBX_E3B0_MAX_NUM_COS_PORT0, \ | ||
377 | DCBX_E3B0_MAX_NUM_COS_PORT1)) | ||
378 | |||
379 | #define DCBX_MAX_NUM_COS ( \ | ||
380 | MAXVAL(DCBX_E3B0_MAX_NUM_COS, \ | ||
381 | DCBX_E2E3_MAX_NUM_COS)) | ||
382 | |||
366 | /* PFC port configuration params */ | 383 | /* PFC port configuration params */ |
367 | struct bnx2x_nig_brb_pfc_port_params { | 384 | struct bnx2x_nig_brb_pfc_port_params { |
368 | /* NIG */ | 385 | /* NIG */ |
@@ -370,8 +387,8 @@ struct bnx2x_nig_brb_pfc_port_params { | |||
370 | u32 llfc_out_en; | 387 | u32 llfc_out_en; |
371 | u32 llfc_enable; | 388 | u32 llfc_enable; |
372 | u32 pkt_priority_to_cos; | 389 | u32 pkt_priority_to_cos; |
373 | u32 rx_cos0_priority_mask; | 390 | u8 num_of_rx_cos_priority_mask; |
374 | u32 rx_cos1_priority_mask; | 391 | u32 rx_cos_priority_mask[DCBX_MAX_NUM_COS]; |
375 | u32 llfc_high_priority_classes; | 392 | u32 llfc_high_priority_classes; |
376 | u32 llfc_low_priority_classes; | 393 | u32 llfc_low_priority_classes; |
377 | /* BRB */ | 394 | /* BRB */ |
diff --git a/drivers/net/bnx2x/bnx2x_main.c b/drivers/net/bnx2x/bnx2x_main.c index 84f419fcde26..63c92091586f 100644 --- a/drivers/net/bnx2x/bnx2x_main.c +++ b/drivers/net/bnx2x/bnx2x_main.c | |||
@@ -39,6 +39,7 @@ | |||
39 | #include <linux/mii.h> | 39 | #include <linux/mii.h> |
40 | #include <linux/if_vlan.h> | 40 | #include <linux/if_vlan.h> |
41 | #include <net/ip.h> | 41 | #include <net/ip.h> |
42 | #include <net/ipv6.h> | ||
42 | #include <net/tcp.h> | 43 | #include <net/tcp.h> |
43 | #include <net/checksum.h> | 44 | #include <net/checksum.h> |
44 | #include <net/ip6_checksum.h> | 45 | #include <net/ip6_checksum.h> |
@@ -50,7 +51,6 @@ | |||
50 | #include <linux/io.h> | 51 | #include <linux/io.h> |
51 | #include <linux/stringify.h> | 52 | #include <linux/stringify.h> |
52 | 53 | ||
53 | #define BNX2X_MAIN | ||
54 | #include "bnx2x.h" | 54 | #include "bnx2x.h" |
55 | #include "bnx2x_init.h" | 55 | #include "bnx2x_init.h" |
56 | #include "bnx2x_init_ops.h" | 56 | #include "bnx2x_init_ops.h" |
@@ -74,12 +74,14 @@ | |||
74 | #define TX_TIMEOUT (5*HZ) | 74 | #define TX_TIMEOUT (5*HZ) |
75 | 75 | ||
76 | static char version[] __devinitdata = | 76 | static char version[] __devinitdata = |
77 | "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver " | 77 | "Broadcom NetXtreme II 5771x/578xx 10/20-Gigabit Ethernet Driver " |
78 | DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; | 78 | DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; |
79 | 79 | ||
80 | MODULE_AUTHOR("Eliezer Tamir"); | 80 | MODULE_AUTHOR("Eliezer Tamir"); |
81 | MODULE_DESCRIPTION("Broadcom NetXtreme II " | 81 | MODULE_DESCRIPTION("Broadcom NetXtreme II " |
82 | "BCM57710/57711/57711E/57712/57712E Driver"); | 82 | "BCM57710/57711/57711E/" |
83 | "57712/57712_MF/57800/57800_MF/57810/57810_MF/" | ||
84 | "57840/57840_MF Driver"); | ||
83 | MODULE_LICENSE("GPL"); | 85 | MODULE_LICENSE("GPL"); |
84 | MODULE_VERSION(DRV_MODULE_VERSION); | 86 | MODULE_VERSION(DRV_MODULE_VERSION); |
85 | MODULE_FIRMWARE(FW_FILE_NAME_E1); | 87 | MODULE_FIRMWARE(FW_FILE_NAME_E1); |
@@ -104,7 +106,7 @@ MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature"); | |||
104 | #define INT_MODE_MSI 2 | 106 | #define INT_MODE_MSI 2 |
105 | static int int_mode; | 107 | static int int_mode; |
106 | module_param(int_mode, int, 0); | 108 | module_param(int_mode, int, 0); |
107 | MODULE_PARM_DESC(int_mode, " Force interrupt mode other then MSI-X " | 109 | MODULE_PARM_DESC(int_mode, " Force interrupt mode other than MSI-X " |
108 | "(1 INT#x; 2 MSI)"); | 110 | "(1 INT#x; 2 MSI)"); |
109 | 111 | ||
110 | static int dropless_fc; | 112 | static int dropless_fc; |
@@ -123,37 +125,87 @@ static int debug; | |||
123 | module_param(debug, int, 0); | 125 | module_param(debug, int, 0); |
124 | MODULE_PARM_DESC(debug, " Default debug msglevel"); | 126 | MODULE_PARM_DESC(debug, " Default debug msglevel"); |
125 | 127 | ||
126 | static struct workqueue_struct *bnx2x_wq; | ||
127 | 128 | ||
128 | #ifdef BCM_CNIC | 129 | |
129 | static u8 ALL_ENODE_MACS[] = {0x01, 0x10, 0x18, 0x01, 0x00, 0x01}; | 130 | struct workqueue_struct *bnx2x_wq; |
130 | #endif | ||
131 | 131 | ||
132 | enum bnx2x_board_type { | 132 | enum bnx2x_board_type { |
133 | BCM57710 = 0, | 133 | BCM57710 = 0, |
134 | BCM57711 = 1, | 134 | BCM57711, |
135 | BCM57711E = 2, | 135 | BCM57711E, |
136 | BCM57712 = 3, | 136 | BCM57712, |
137 | BCM57712E = 4 | 137 | BCM57712_MF, |
138 | BCM57800, | ||
139 | BCM57800_MF, | ||
140 | BCM57810, | ||
141 | BCM57810_MF, | ||
142 | BCM57840, | ||
143 | BCM57840_MF | ||
138 | }; | 144 | }; |
139 | 145 | ||
140 | /* indexed by board_type, above */ | 146 | /* indexed by board_type, above */ |
141 | static struct { | 147 | static struct { |
142 | char *name; | 148 | char *name; |
143 | } board_info[] __devinitdata = { | 149 | } board_info[] __devinitdata = { |
144 | { "Broadcom NetXtreme II BCM57710 XGb" }, | 150 | { "Broadcom NetXtreme II BCM57710 10 Gigabit PCIe [Everest]" }, |
145 | { "Broadcom NetXtreme II BCM57711 XGb" }, | 151 | { "Broadcom NetXtreme II BCM57711 10 Gigabit PCIe" }, |
146 | { "Broadcom NetXtreme II BCM57711E XGb" }, | 152 | { "Broadcom NetXtreme II BCM57711E 10 Gigabit PCIe" }, |
147 | { "Broadcom NetXtreme II BCM57712 XGb" }, | 153 | { "Broadcom NetXtreme II BCM57712 10 Gigabit Ethernet" }, |
148 | { "Broadcom NetXtreme II BCM57712E XGb" } | 154 | { "Broadcom NetXtreme II BCM57712 10 Gigabit Ethernet Multi Function" }, |
155 | { "Broadcom NetXtreme II BCM57800 10 Gigabit Ethernet" }, | ||
156 | { "Broadcom NetXtreme II BCM57800 10 Gigabit Ethernet Multi Function" }, | ||
157 | { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet" }, | ||
158 | { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet Multi Function" }, | ||
159 | { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet" }, | ||
160 | { "Broadcom NetXtreme II BCM57840 10/20 Gigabit " | ||
161 | "Ethernet Multi Function"} | ||
149 | }; | 162 | }; |
150 | 163 | ||
164 | #ifndef PCI_DEVICE_ID_NX2_57710 | ||
165 | #define PCI_DEVICE_ID_NX2_57710 CHIP_NUM_57710 | ||
166 | #endif | ||
167 | #ifndef PCI_DEVICE_ID_NX2_57711 | ||
168 | #define PCI_DEVICE_ID_NX2_57711 CHIP_NUM_57711 | ||
169 | #endif | ||
170 | #ifndef PCI_DEVICE_ID_NX2_57711E | ||
171 | #define PCI_DEVICE_ID_NX2_57711E CHIP_NUM_57711E | ||
172 | #endif | ||
173 | #ifndef PCI_DEVICE_ID_NX2_57712 | ||
174 | #define PCI_DEVICE_ID_NX2_57712 CHIP_NUM_57712 | ||
175 | #endif | ||
176 | #ifndef PCI_DEVICE_ID_NX2_57712_MF | ||
177 | #define PCI_DEVICE_ID_NX2_57712_MF CHIP_NUM_57712_MF | ||
178 | #endif | ||
179 | #ifndef PCI_DEVICE_ID_NX2_57800 | ||
180 | #define PCI_DEVICE_ID_NX2_57800 CHIP_NUM_57800 | ||
181 | #endif | ||
182 | #ifndef PCI_DEVICE_ID_NX2_57800_MF | ||
183 | #define PCI_DEVICE_ID_NX2_57800_MF CHIP_NUM_57800_MF | ||
184 | #endif | ||
185 | #ifndef PCI_DEVICE_ID_NX2_57810 | ||
186 | #define PCI_DEVICE_ID_NX2_57810 CHIP_NUM_57810 | ||
187 | #endif | ||
188 | #ifndef PCI_DEVICE_ID_NX2_57810_MF | ||
189 | #define PCI_DEVICE_ID_NX2_57810_MF CHIP_NUM_57810_MF | ||
190 | #endif | ||
191 | #ifndef PCI_DEVICE_ID_NX2_57840 | ||
192 | #define PCI_DEVICE_ID_NX2_57840 CHIP_NUM_57840 | ||
193 | #endif | ||
194 | #ifndef PCI_DEVICE_ID_NX2_57840_MF | ||
195 | #define PCI_DEVICE_ID_NX2_57840_MF CHIP_NUM_57840_MF | ||
196 | #endif | ||
151 | static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = { | 197 | static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = { |
152 | { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 }, | 198 | { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 }, |
153 | { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 }, | 199 | { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 }, |
154 | { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E }, | 200 | { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E }, |
155 | { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712), BCM57712 }, | 201 | { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712), BCM57712 }, |
156 | { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712E), BCM57712E }, | 202 | { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712_MF), BCM57712_MF }, |
203 | { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800), BCM57800 }, | ||
204 | { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800_MF), BCM57800_MF }, | ||
205 | { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810), BCM57810 }, | ||
206 | { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810_MF), BCM57810_MF }, | ||
207 | { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840), BCM57840 }, | ||
208 | { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_MF), BCM57840_MF }, | ||
157 | { 0 } | 209 | { 0 } |
158 | }; | 210 | }; |
159 | 211 | ||
@@ -163,11 +215,47 @@ MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl); | |||
163 | * General service functions | 215 | * General service functions |
164 | ****************************************************************************/ | 216 | ****************************************************************************/ |
165 | 217 | ||
166 | static inline void storm_memset_ov(struct bnx2x *bp, u16 ov, u16 abs_fid) | 218 | static inline void __storm_memset_dma_mapping(struct bnx2x *bp, |
219 | u32 addr, dma_addr_t mapping) | ||
220 | { | ||
221 | REG_WR(bp, addr, U64_LO(mapping)); | ||
222 | REG_WR(bp, addr + 4, U64_HI(mapping)); | ||
223 | } | ||
224 | |||
225 | static inline void storm_memset_spq_addr(struct bnx2x *bp, | ||
226 | dma_addr_t mapping, u16 abs_fid) | ||
227 | { | ||
228 | u32 addr = XSEM_REG_FAST_MEMORY + | ||
229 | XSTORM_SPQ_PAGE_BASE_OFFSET(abs_fid); | ||
230 | |||
231 | __storm_memset_dma_mapping(bp, addr, mapping); | ||
232 | } | ||
233 | |||
234 | static inline void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid, | ||
235 | u16 pf_id) | ||
167 | { | 236 | { |
168 | REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(abs_fid), ov); | 237 | REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid), |
238 | pf_id); | ||
239 | REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid), | ||
240 | pf_id); | ||
241 | REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid), | ||
242 | pf_id); | ||
243 | REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid), | ||
244 | pf_id); | ||
169 | } | 245 | } |
170 | 246 | ||
247 | static inline void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid, | ||
248 | u8 enable) | ||
249 | { | ||
250 | REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid), | ||
251 | enable); | ||
252 | REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid), | ||
253 | enable); | ||
254 | REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid), | ||
255 | enable); | ||
256 | REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid), | ||
257 | enable); | ||
258 | } | ||
171 | 259 | ||
172 | static inline void storm_memset_eq_data(struct bnx2x *bp, | 260 | static inline void storm_memset_eq_data(struct bnx2x *bp, |
173 | struct event_ring_data *eq_data, | 261 | struct event_ring_data *eq_data, |
@@ -187,45 +275,6 @@ static inline void storm_memset_eq_prod(struct bnx2x *bp, u16 eq_prod, | |||
187 | REG_WR16(bp, addr, eq_prod); | 275 | REG_WR16(bp, addr, eq_prod); |
188 | } | 276 | } |
189 | 277 | ||
190 | static inline void storm_memset_hc_timeout(struct bnx2x *bp, u8 port, | ||
191 | u16 fw_sb_id, u8 sb_index, | ||
192 | u8 ticks) | ||
193 | { | ||
194 | |||
195 | int index_offset = CHIP_IS_E2(bp) ? | ||
196 | offsetof(struct hc_status_block_data_e2, index_data) : | ||
197 | offsetof(struct hc_status_block_data_e1x, index_data); | ||
198 | u32 addr = BAR_CSTRORM_INTMEM + | ||
199 | CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) + | ||
200 | index_offset + | ||
201 | sizeof(struct hc_index_data)*sb_index + | ||
202 | offsetof(struct hc_index_data, timeout); | ||
203 | REG_WR8(bp, addr, ticks); | ||
204 | DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d ticks %d\n", | ||
205 | port, fw_sb_id, sb_index, ticks); | ||
206 | } | ||
207 | static inline void storm_memset_hc_disable(struct bnx2x *bp, u8 port, | ||
208 | u16 fw_sb_id, u8 sb_index, | ||
209 | u8 disable) | ||
210 | { | ||
211 | u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT); | ||
212 | int index_offset = CHIP_IS_E2(bp) ? | ||
213 | offsetof(struct hc_status_block_data_e2, index_data) : | ||
214 | offsetof(struct hc_status_block_data_e1x, index_data); | ||
215 | u32 addr = BAR_CSTRORM_INTMEM + | ||
216 | CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) + | ||
217 | index_offset + | ||
218 | sizeof(struct hc_index_data)*sb_index + | ||
219 | offsetof(struct hc_index_data, flags); | ||
220 | u16 flags = REG_RD16(bp, addr); | ||
221 | /* clear and set */ | ||
222 | flags &= ~HC_INDEX_DATA_HC_ENABLED; | ||
223 | flags |= enable_flag; | ||
224 | REG_WR16(bp, addr, flags); | ||
225 | DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d disable %d\n", | ||
226 | port, fw_sb_id, sb_index, disable); | ||
227 | } | ||
228 | |||
229 | /* used only at init | 278 | /* used only at init |
230 | * locking is done by mcp | 279 | * locking is done by mcp |
231 | */ | 280 | */ |
@@ -319,13 +368,6 @@ static void bnx2x_dp_dmae(struct bnx2x *bp, struct dmae_command *dmae, | |||
319 | 368 | ||
320 | } | 369 | } |
321 | 370 | ||
322 | const u32 dmae_reg_go_c[] = { | ||
323 | DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3, | ||
324 | DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7, | ||
325 | DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11, | ||
326 | DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15 | ||
327 | }; | ||
328 | |||
329 | /* copy command into DMAE command memory and set DMAE command go */ | 371 | /* copy command into DMAE command memory and set DMAE command go */ |
330 | void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx) | 372 | void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx) |
331 | { | 373 | { |
@@ -406,7 +448,11 @@ static int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, | |||
406 | bp->slowpath->wb_data[0], bp->slowpath->wb_data[1], | 448 | bp->slowpath->wb_data[0], bp->slowpath->wb_data[1], |
407 | bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]); | 449 | bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]); |
408 | 450 | ||
409 | /* lock the dmae channel */ | 451 | /* |
452 | * Lock the dmae channel. Disable BHs to prevent a dead-lock | ||
453 | * as long as this code is called both from syscall context and | ||
454 | * from ndo_set_rx_mode() flow that may be called from BH. | ||
455 | */ | ||
410 | spin_lock_bh(&bp->dmae_lock); | 456 | spin_lock_bh(&bp->dmae_lock); |
411 | 457 | ||
412 | /* reset completion */ | 458 | /* reset completion */ |
@@ -730,9 +776,9 @@ void bnx2x_panic_dump(struct bnx2x *bp) | |||
730 | /* Indices */ | 776 | /* Indices */ |
731 | /* Common */ | 777 | /* Common */ |
732 | BNX2X_ERR("def_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x)" | 778 | BNX2X_ERR("def_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x)" |
733 | " spq_prod_idx(0x%x)\n", | 779 | " spq_prod_idx(0x%x) next_stats_cnt(0x%x)\n", |
734 | bp->def_idx, bp->def_att_idx, | 780 | bp->def_idx, bp->def_att_idx, bp->attn_state, |
735 | bp->attn_state, bp->spq_prod_idx); | 781 | bp->spq_prod_idx, bp->stats_counter); |
736 | BNX2X_ERR("DSB: attn bits(0x%x) ack(0x%x) id(0x%x) idx(0x%x)\n", | 782 | BNX2X_ERR("DSB: attn bits(0x%x) ack(0x%x) id(0x%x) idx(0x%x)\n", |
737 | bp->def_status_blk->atten_status_block.attn_bits, | 783 | bp->def_status_blk->atten_status_block.attn_bits, |
738 | bp->def_status_blk->atten_status_block.attn_bits_ack, | 784 | bp->def_status_blk->atten_status_block.attn_bits_ack, |
@@ -749,15 +795,17 @@ void bnx2x_panic_dump(struct bnx2x *bp) | |||
749 | CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) + | 795 | CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) + |
750 | i*sizeof(u32)); | 796 | i*sizeof(u32)); |
751 | 797 | ||
752 | pr_cont("igu_sb_id(0x%x) igu_seg_id (0x%x) " | 798 | pr_cont("igu_sb_id(0x%x) igu_seg_id(0x%x) " |
753 | "pf_id(0x%x) vnic_id(0x%x) " | 799 | "pf_id(0x%x) vnic_id(0x%x) " |
754 | "vf_id(0x%x) vf_valid (0x%x)\n", | 800 | "vf_id(0x%x) vf_valid (0x%x) " |
801 | "state(0x%x)\n", | ||
755 | sp_sb_data.igu_sb_id, | 802 | sp_sb_data.igu_sb_id, |
756 | sp_sb_data.igu_seg_id, | 803 | sp_sb_data.igu_seg_id, |
757 | sp_sb_data.p_func.pf_id, | 804 | sp_sb_data.p_func.pf_id, |
758 | sp_sb_data.p_func.vnic_id, | 805 | sp_sb_data.p_func.vnic_id, |
759 | sp_sb_data.p_func.vf_id, | 806 | sp_sb_data.p_func.vf_id, |
760 | sp_sb_data.p_func.vf_valid); | 807 | sp_sb_data.p_func.vf_valid, |
808 | sp_sb_data.state); | ||
761 | 809 | ||
762 | 810 | ||
763 | for_each_eth_queue(bp, i) { | 811 | for_each_eth_queue(bp, i) { |
@@ -766,13 +814,13 @@ void bnx2x_panic_dump(struct bnx2x *bp) | |||
766 | struct hc_status_block_data_e2 sb_data_e2; | 814 | struct hc_status_block_data_e2 sb_data_e2; |
767 | struct hc_status_block_data_e1x sb_data_e1x; | 815 | struct hc_status_block_data_e1x sb_data_e1x; |
768 | struct hc_status_block_sm *hc_sm_p = | 816 | struct hc_status_block_sm *hc_sm_p = |
769 | CHIP_IS_E2(bp) ? | 817 | CHIP_IS_E1x(bp) ? |
770 | sb_data_e2.common.state_machine : | 818 | sb_data_e1x.common.state_machine : |
771 | sb_data_e1x.common.state_machine; | 819 | sb_data_e2.common.state_machine; |
772 | struct hc_index_data *hc_index_p = | 820 | struct hc_index_data *hc_index_p = |
773 | CHIP_IS_E2(bp) ? | 821 | CHIP_IS_E1x(bp) ? |
774 | sb_data_e2.index_data : | 822 | sb_data_e1x.index_data : |
775 | sb_data_e1x.index_data; | 823 | sb_data_e2.index_data; |
776 | int data_size; | 824 | int data_size; |
777 | u32 *sb_data_p; | 825 | u32 *sb_data_p; |
778 | 826 | ||
@@ -795,8 +843,8 @@ void bnx2x_panic_dump(struct bnx2x *bp) | |||
795 | i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod, | 843 | i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod, |
796 | fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb)); | 844 | fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb)); |
797 | 845 | ||
798 | loop = CHIP_IS_E2(bp) ? | 846 | loop = CHIP_IS_E1x(bp) ? |
799 | HC_SB_MAX_INDICES_E2 : HC_SB_MAX_INDICES_E1X; | 847 | HC_SB_MAX_INDICES_E1X : HC_SB_MAX_INDICES_E2; |
800 | 848 | ||
801 | /* host sb data */ | 849 | /* host sb data */ |
802 | 850 | ||
@@ -816,35 +864,39 @@ void bnx2x_panic_dump(struct bnx2x *bp) | |||
816 | fp->sb_index_values[j], | 864 | fp->sb_index_values[j], |
817 | (j == loop - 1) ? ")" : " "); | 865 | (j == loop - 1) ? ")" : " "); |
818 | /* fw sb data */ | 866 | /* fw sb data */ |
819 | data_size = CHIP_IS_E2(bp) ? | 867 | data_size = CHIP_IS_E1x(bp) ? |
820 | sizeof(struct hc_status_block_data_e2) : | 868 | sizeof(struct hc_status_block_data_e1x) : |
821 | sizeof(struct hc_status_block_data_e1x); | 869 | sizeof(struct hc_status_block_data_e2); |
822 | data_size /= sizeof(u32); | 870 | data_size /= sizeof(u32); |
823 | sb_data_p = CHIP_IS_E2(bp) ? | 871 | sb_data_p = CHIP_IS_E1x(bp) ? |
824 | (u32 *)&sb_data_e2 : | 872 | (u32 *)&sb_data_e1x : |
825 | (u32 *)&sb_data_e1x; | 873 | (u32 *)&sb_data_e2; |
826 | /* copy sb data in here */ | 874 | /* copy sb data in here */ |
827 | for (j = 0; j < data_size; j++) | 875 | for (j = 0; j < data_size; j++) |
828 | *(sb_data_p + j) = REG_RD(bp, BAR_CSTRORM_INTMEM + | 876 | *(sb_data_p + j) = REG_RD(bp, BAR_CSTRORM_INTMEM + |
829 | CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id) + | 877 | CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id) + |
830 | j * sizeof(u32)); | 878 | j * sizeof(u32)); |
831 | 879 | ||
832 | if (CHIP_IS_E2(bp)) { | 880 | if (!CHIP_IS_E1x(bp)) { |
833 | pr_cont("pf_id(0x%x) vf_id (0x%x) vf_valid(0x%x) " | 881 | pr_cont("pf_id(0x%x) vf_id(0x%x) vf_valid(0x%x) " |
834 | "vnic_id(0x%x) same_igu_sb_1b(0x%x)\n", | 882 | "vnic_id(0x%x) same_igu_sb_1b(0x%x) " |
883 | "state(0x%x)\n", | ||
835 | sb_data_e2.common.p_func.pf_id, | 884 | sb_data_e2.common.p_func.pf_id, |
836 | sb_data_e2.common.p_func.vf_id, | 885 | sb_data_e2.common.p_func.vf_id, |
837 | sb_data_e2.common.p_func.vf_valid, | 886 | sb_data_e2.common.p_func.vf_valid, |
838 | sb_data_e2.common.p_func.vnic_id, | 887 | sb_data_e2.common.p_func.vnic_id, |
839 | sb_data_e2.common.same_igu_sb_1b); | 888 | sb_data_e2.common.same_igu_sb_1b, |
889 | sb_data_e2.common.state); | ||
840 | } else { | 890 | } else { |
841 | pr_cont("pf_id(0x%x) vf_id (0x%x) vf_valid(0x%x) " | 891 | pr_cont("pf_id(0x%x) vf_id(0x%x) vf_valid(0x%x) " |
842 | "vnic_id(0x%x) same_igu_sb_1b(0x%x)\n", | 892 | "vnic_id(0x%x) same_igu_sb_1b(0x%x) " |
893 | "state(0x%x)\n", | ||
843 | sb_data_e1x.common.p_func.pf_id, | 894 | sb_data_e1x.common.p_func.pf_id, |
844 | sb_data_e1x.common.p_func.vf_id, | 895 | sb_data_e1x.common.p_func.vf_id, |
845 | sb_data_e1x.common.p_func.vf_valid, | 896 | sb_data_e1x.common.p_func.vf_valid, |
846 | sb_data_e1x.common.p_func.vnic_id, | 897 | sb_data_e1x.common.p_func.vnic_id, |
847 | sb_data_e1x.common.same_igu_sb_1b); | 898 | sb_data_e1x.common.same_igu_sb_1b, |
899 | sb_data_e1x.common.state); | ||
848 | } | 900 | } |
849 | 901 | ||
850 | /* SB_SMs data */ | 902 | /* SB_SMs data */ |
@@ -933,6 +985,373 @@ void bnx2x_panic_dump(struct bnx2x *bp) | |||
933 | BNX2X_ERR("end crash dump -----------------\n"); | 985 | BNX2X_ERR("end crash dump -----------------\n"); |
934 | } | 986 | } |
935 | 987 | ||
988 | /* | ||
989 | * FLR Support for E2 | ||
990 | * | ||
991 | * bnx2x_pf_flr_clnup() is called during nic_load in the per function HW | ||
992 | * initialization. | ||
993 | */ | ||
994 | #define FLR_WAIT_USEC 10000 /* 10 miliseconds */ | ||
995 | #define FLR_WAIT_INTERAVAL 50 /* usec */ | ||
996 | #define FLR_POLL_CNT (FLR_WAIT_USEC/FLR_WAIT_INTERAVAL) /* 200 */ | ||
997 | |||
998 | struct pbf_pN_buf_regs { | ||
999 | int pN; | ||
1000 | u32 init_crd; | ||
1001 | u32 crd; | ||
1002 | u32 crd_freed; | ||
1003 | }; | ||
1004 | |||
1005 | struct pbf_pN_cmd_regs { | ||
1006 | int pN; | ||
1007 | u32 lines_occup; | ||
1008 | u32 lines_freed; | ||
1009 | }; | ||
1010 | |||
1011 | static void bnx2x_pbf_pN_buf_flushed(struct bnx2x *bp, | ||
1012 | struct pbf_pN_buf_regs *regs, | ||
1013 | u32 poll_count) | ||
1014 | { | ||
1015 | u32 init_crd, crd, crd_start, crd_freed, crd_freed_start; | ||
1016 | u32 cur_cnt = poll_count; | ||
1017 | |||
1018 | crd_freed = crd_freed_start = REG_RD(bp, regs->crd_freed); | ||
1019 | crd = crd_start = REG_RD(bp, regs->crd); | ||
1020 | init_crd = REG_RD(bp, regs->init_crd); | ||
1021 | |||
1022 | DP(BNX2X_MSG_SP, "INIT CREDIT[%d] : %x\n", regs->pN, init_crd); | ||
1023 | DP(BNX2X_MSG_SP, "CREDIT[%d] : s:%x\n", regs->pN, crd); | ||
1024 | DP(BNX2X_MSG_SP, "CREDIT_FREED[%d]: s:%x\n", regs->pN, crd_freed); | ||
1025 | |||
1026 | while ((crd != init_crd) && ((u32)SUB_S32(crd_freed, crd_freed_start) < | ||
1027 | (init_crd - crd_start))) { | ||
1028 | if (cur_cnt--) { | ||
1029 | udelay(FLR_WAIT_INTERAVAL); | ||
1030 | crd = REG_RD(bp, regs->crd); | ||
1031 | crd_freed = REG_RD(bp, regs->crd_freed); | ||
1032 | } else { | ||
1033 | DP(BNX2X_MSG_SP, "PBF tx buffer[%d] timed out\n", | ||
1034 | regs->pN); | ||
1035 | DP(BNX2X_MSG_SP, "CREDIT[%d] : c:%x\n", | ||
1036 | regs->pN, crd); | ||
1037 | DP(BNX2X_MSG_SP, "CREDIT_FREED[%d]: c:%x\n", | ||
1038 | regs->pN, crd_freed); | ||
1039 | break; | ||
1040 | } | ||
1041 | } | ||
1042 | DP(BNX2X_MSG_SP, "Waited %d*%d usec for PBF tx buffer[%d]\n", | ||
1043 | poll_count-cur_cnt, FLR_WAIT_INTERAVAL, regs->pN); | ||
1044 | } | ||
1045 | |||
1046 | static void bnx2x_pbf_pN_cmd_flushed(struct bnx2x *bp, | ||
1047 | struct pbf_pN_cmd_regs *regs, | ||
1048 | u32 poll_count) | ||
1049 | { | ||
1050 | u32 occup, to_free, freed, freed_start; | ||
1051 | u32 cur_cnt = poll_count; | ||
1052 | |||
1053 | occup = to_free = REG_RD(bp, regs->lines_occup); | ||
1054 | freed = freed_start = REG_RD(bp, regs->lines_freed); | ||
1055 | |||
1056 | DP(BNX2X_MSG_SP, "OCCUPANCY[%d] : s:%x\n", regs->pN, occup); | ||
1057 | DP(BNX2X_MSG_SP, "LINES_FREED[%d] : s:%x\n", regs->pN, freed); | ||
1058 | |||
1059 | while (occup && ((u32)SUB_S32(freed, freed_start) < to_free)) { | ||
1060 | if (cur_cnt--) { | ||
1061 | udelay(FLR_WAIT_INTERAVAL); | ||
1062 | occup = REG_RD(bp, regs->lines_occup); | ||
1063 | freed = REG_RD(bp, regs->lines_freed); | ||
1064 | } else { | ||
1065 | DP(BNX2X_MSG_SP, "PBF cmd queue[%d] timed out\n", | ||
1066 | regs->pN); | ||
1067 | DP(BNX2X_MSG_SP, "OCCUPANCY[%d] : s:%x\n", | ||
1068 | regs->pN, occup); | ||
1069 | DP(BNX2X_MSG_SP, "LINES_FREED[%d] : s:%x\n", | ||
1070 | regs->pN, freed); | ||
1071 | break; | ||
1072 | } | ||
1073 | } | ||
1074 | DP(BNX2X_MSG_SP, "Waited %d*%d usec for PBF cmd queue[%d]\n", | ||
1075 | poll_count-cur_cnt, FLR_WAIT_INTERAVAL, regs->pN); | ||
1076 | } | ||
1077 | |||
1078 | static inline u32 bnx2x_flr_clnup_reg_poll(struct bnx2x *bp, u32 reg, | ||
1079 | u32 expected, u32 poll_count) | ||
1080 | { | ||
1081 | u32 cur_cnt = poll_count; | ||
1082 | u32 val; | ||
1083 | |||
1084 | while ((val = REG_RD(bp, reg)) != expected && cur_cnt--) | ||
1085 | udelay(FLR_WAIT_INTERAVAL); | ||
1086 | |||
1087 | return val; | ||
1088 | } | ||
1089 | |||
1090 | static inline int bnx2x_flr_clnup_poll_hw_counter(struct bnx2x *bp, u32 reg, | ||
1091 | char *msg, u32 poll_cnt) | ||
1092 | { | ||
1093 | u32 val = bnx2x_flr_clnup_reg_poll(bp, reg, 0, poll_cnt); | ||
1094 | if (val != 0) { | ||
1095 | BNX2X_ERR("%s usage count=%d\n", msg, val); | ||
1096 | return 1; | ||
1097 | } | ||
1098 | return 0; | ||
1099 | } | ||
1100 | |||
1101 | static u32 bnx2x_flr_clnup_poll_count(struct bnx2x *bp) | ||
1102 | { | ||
1103 | /* adjust polling timeout */ | ||
1104 | if (CHIP_REV_IS_EMUL(bp)) | ||
1105 | return FLR_POLL_CNT * 2000; | ||
1106 | |||
1107 | if (CHIP_REV_IS_FPGA(bp)) | ||
1108 | return FLR_POLL_CNT * 120; | ||
1109 | |||
1110 | return FLR_POLL_CNT; | ||
1111 | } | ||
1112 | |||
1113 | static void bnx2x_tx_hw_flushed(struct bnx2x *bp, u32 poll_count) | ||
1114 | { | ||
1115 | struct pbf_pN_cmd_regs cmd_regs[] = { | ||
1116 | {0, (CHIP_IS_E3B0(bp)) ? | ||
1117 | PBF_REG_TQ_OCCUPANCY_Q0 : | ||
1118 | PBF_REG_P0_TQ_OCCUPANCY, | ||
1119 | (CHIP_IS_E3B0(bp)) ? | ||
1120 | PBF_REG_TQ_LINES_FREED_CNT_Q0 : | ||
1121 | PBF_REG_P0_TQ_LINES_FREED_CNT}, | ||
1122 | {1, (CHIP_IS_E3B0(bp)) ? | ||
1123 | PBF_REG_TQ_OCCUPANCY_Q1 : | ||
1124 | PBF_REG_P1_TQ_OCCUPANCY, | ||
1125 | (CHIP_IS_E3B0(bp)) ? | ||
1126 | PBF_REG_TQ_LINES_FREED_CNT_Q1 : | ||
1127 | PBF_REG_P1_TQ_LINES_FREED_CNT}, | ||
1128 | {4, (CHIP_IS_E3B0(bp)) ? | ||
1129 | PBF_REG_TQ_OCCUPANCY_LB_Q : | ||
1130 | PBF_REG_P4_TQ_OCCUPANCY, | ||
1131 | (CHIP_IS_E3B0(bp)) ? | ||
1132 | PBF_REG_TQ_LINES_FREED_CNT_LB_Q : | ||
1133 | PBF_REG_P4_TQ_LINES_FREED_CNT} | ||
1134 | }; | ||
1135 | |||
1136 | struct pbf_pN_buf_regs buf_regs[] = { | ||
1137 | {0, (CHIP_IS_E3B0(bp)) ? | ||
1138 | PBF_REG_INIT_CRD_Q0 : | ||
1139 | PBF_REG_P0_INIT_CRD , | ||
1140 | (CHIP_IS_E3B0(bp)) ? | ||
1141 | PBF_REG_CREDIT_Q0 : | ||
1142 | PBF_REG_P0_CREDIT, | ||
1143 | (CHIP_IS_E3B0(bp)) ? | ||
1144 | PBF_REG_INTERNAL_CRD_FREED_CNT_Q0 : | ||
1145 | PBF_REG_P0_INTERNAL_CRD_FREED_CNT}, | ||
1146 | {1, (CHIP_IS_E3B0(bp)) ? | ||
1147 | PBF_REG_INIT_CRD_Q1 : | ||
1148 | PBF_REG_P1_INIT_CRD, | ||
1149 | (CHIP_IS_E3B0(bp)) ? | ||
1150 | PBF_REG_CREDIT_Q1 : | ||
1151 | PBF_REG_P1_CREDIT, | ||
1152 | (CHIP_IS_E3B0(bp)) ? | ||
1153 | PBF_REG_INTERNAL_CRD_FREED_CNT_Q1 : | ||
1154 | PBF_REG_P1_INTERNAL_CRD_FREED_CNT}, | ||
1155 | {4, (CHIP_IS_E3B0(bp)) ? | ||
1156 | PBF_REG_INIT_CRD_LB_Q : | ||
1157 | PBF_REG_P4_INIT_CRD, | ||
1158 | (CHIP_IS_E3B0(bp)) ? | ||
1159 | PBF_REG_CREDIT_LB_Q : | ||
1160 | PBF_REG_P4_CREDIT, | ||
1161 | (CHIP_IS_E3B0(bp)) ? | ||
1162 | PBF_REG_INTERNAL_CRD_FREED_CNT_LB_Q : | ||
1163 | PBF_REG_P4_INTERNAL_CRD_FREED_CNT}, | ||
1164 | }; | ||
1165 | |||
1166 | int i; | ||
1167 | |||
1168 | /* Verify the command queues are flushed P0, P1, P4 */ | ||
1169 | for (i = 0; i < ARRAY_SIZE(cmd_regs); i++) | ||
1170 | bnx2x_pbf_pN_cmd_flushed(bp, &cmd_regs[i], poll_count); | ||
1171 | |||
1172 | |||
1173 | /* Verify the transmission buffers are flushed P0, P1, P4 */ | ||
1174 | for (i = 0; i < ARRAY_SIZE(buf_regs); i++) | ||
1175 | bnx2x_pbf_pN_buf_flushed(bp, &buf_regs[i], poll_count); | ||
1176 | } | ||
1177 | |||
1178 | #define OP_GEN_PARAM(param) \ | ||
1179 | (((param) << SDM_OP_GEN_COMP_PARAM_SHIFT) & SDM_OP_GEN_COMP_PARAM) | ||
1180 | |||
1181 | #define OP_GEN_TYPE(type) \ | ||
1182 | (((type) << SDM_OP_GEN_COMP_TYPE_SHIFT) & SDM_OP_GEN_COMP_TYPE) | ||
1183 | |||
1184 | #define OP_GEN_AGG_VECT(index) \ | ||
1185 | (((index) << SDM_OP_GEN_AGG_VECT_IDX_SHIFT) & SDM_OP_GEN_AGG_VECT_IDX) | ||
1186 | |||
1187 | |||
1188 | static inline int bnx2x_send_final_clnup(struct bnx2x *bp, u8 clnup_func, | ||
1189 | u32 poll_cnt) | ||
1190 | { | ||
1191 | struct sdm_op_gen op_gen = {0}; | ||
1192 | |||
1193 | u32 comp_addr = BAR_CSTRORM_INTMEM + | ||
1194 | CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(clnup_func); | ||
1195 | int ret = 0; | ||
1196 | |||
1197 | if (REG_RD(bp, comp_addr)) { | ||
1198 | BNX2X_ERR("Cleanup complete is not 0\n"); | ||
1199 | return 1; | ||
1200 | } | ||
1201 | |||
1202 | op_gen.command |= OP_GEN_PARAM(XSTORM_AGG_INT_FINAL_CLEANUP_INDEX); | ||
1203 | op_gen.command |= OP_GEN_TYPE(XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE); | ||
1204 | op_gen.command |= OP_GEN_AGG_VECT(clnup_func); | ||
1205 | op_gen.command |= 1 << SDM_OP_GEN_AGG_VECT_IDX_VALID_SHIFT; | ||
1206 | |||
1207 | DP(BNX2X_MSG_SP, "FW Final cleanup\n"); | ||
1208 | REG_WR(bp, XSDM_REG_OPERATION_GEN, op_gen.command); | ||
1209 | |||
1210 | if (bnx2x_flr_clnup_reg_poll(bp, comp_addr, 1, poll_cnt) != 1) { | ||
1211 | BNX2X_ERR("FW final cleanup did not succeed\n"); | ||
1212 | ret = 1; | ||
1213 | } | ||
1214 | /* Zero completion for nxt FLR */ | ||
1215 | REG_WR(bp, comp_addr, 0); | ||
1216 | |||
1217 | return ret; | ||
1218 | } | ||
1219 | |||
1220 | static inline u8 bnx2x_is_pcie_pending(struct pci_dev *dev) | ||
1221 | { | ||
1222 | int pos; | ||
1223 | u16 status; | ||
1224 | |||
1225 | pos = pci_find_capability(dev, PCI_CAP_ID_EXP); | ||
1226 | if (!pos) | ||
1227 | return false; | ||
1228 | |||
1229 | pci_read_config_word(dev, pos + PCI_EXP_DEVSTA, &status); | ||
1230 | return status & PCI_EXP_DEVSTA_TRPND; | ||
1231 | } | ||
1232 | |||
1233 | /* PF FLR specific routines | ||
1234 | */ | ||
1235 | static int bnx2x_poll_hw_usage_counters(struct bnx2x *bp, u32 poll_cnt) | ||
1236 | { | ||
1237 | |||
1238 | /* wait for CFC PF usage-counter to zero (includes all the VFs) */ | ||
1239 | if (bnx2x_flr_clnup_poll_hw_counter(bp, | ||
1240 | CFC_REG_NUM_LCIDS_INSIDE_PF, | ||
1241 | "CFC PF usage counter timed out", | ||
1242 | poll_cnt)) | ||
1243 | return 1; | ||
1244 | |||
1245 | |||
1246 | /* Wait for DQ PF usage-counter to zero (until DQ cleanup) */ | ||
1247 | if (bnx2x_flr_clnup_poll_hw_counter(bp, | ||
1248 | DORQ_REG_PF_USAGE_CNT, | ||
1249 | "DQ PF usage counter timed out", | ||
1250 | poll_cnt)) | ||
1251 | return 1; | ||
1252 | |||
1253 | /* Wait for QM PF usage-counter to zero (until DQ cleanup) */ | ||
1254 | if (bnx2x_flr_clnup_poll_hw_counter(bp, | ||
1255 | QM_REG_PF_USG_CNT_0 + 4*BP_FUNC(bp), | ||
1256 | "QM PF usage counter timed out", | ||
1257 | poll_cnt)) | ||
1258 | return 1; | ||
1259 | |||
1260 | /* Wait for Timer PF usage-counters to zero (until DQ cleanup) */ | ||
1261 | if (bnx2x_flr_clnup_poll_hw_counter(bp, | ||
1262 | TM_REG_LIN0_VNIC_UC + 4*BP_PORT(bp), | ||
1263 | "Timers VNIC usage counter timed out", | ||
1264 | poll_cnt)) | ||
1265 | return 1; | ||
1266 | if (bnx2x_flr_clnup_poll_hw_counter(bp, | ||
1267 | TM_REG_LIN0_NUM_SCANS + 4*BP_PORT(bp), | ||
1268 | "Timers NUM_SCANS usage counter timed out", | ||
1269 | poll_cnt)) | ||
1270 | return 1; | ||
1271 | |||
1272 | /* Wait DMAE PF usage counter to zero */ | ||
1273 | if (bnx2x_flr_clnup_poll_hw_counter(bp, | ||
1274 | dmae_reg_go_c[INIT_DMAE_C(bp)], | ||
1275 | "DMAE dommand register timed out", | ||
1276 | poll_cnt)) | ||
1277 | return 1; | ||
1278 | |||
1279 | return 0; | ||
1280 | } | ||
1281 | |||
1282 | static void bnx2x_hw_enable_status(struct bnx2x *bp) | ||
1283 | { | ||
1284 | u32 val; | ||
1285 | |||
1286 | val = REG_RD(bp, CFC_REG_WEAK_ENABLE_PF); | ||
1287 | DP(BNX2X_MSG_SP, "CFC_REG_WEAK_ENABLE_PF is 0x%x\n", val); | ||
1288 | |||
1289 | val = REG_RD(bp, PBF_REG_DISABLE_PF); | ||
1290 | DP(BNX2X_MSG_SP, "PBF_REG_DISABLE_PF is 0x%x\n", val); | ||
1291 | |||
1292 | val = REG_RD(bp, IGU_REG_PCI_PF_MSI_EN); | ||
1293 | DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSI_EN is 0x%x\n", val); | ||
1294 | |||
1295 | val = REG_RD(bp, IGU_REG_PCI_PF_MSIX_EN); | ||
1296 | DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSIX_EN is 0x%x\n", val); | ||
1297 | |||
1298 | val = REG_RD(bp, IGU_REG_PCI_PF_MSIX_FUNC_MASK); | ||
1299 | DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSIX_FUNC_MASK is 0x%x\n", val); | ||
1300 | |||
1301 | val = REG_RD(bp, PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR); | ||
1302 | DP(BNX2X_MSG_SP, "PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR is 0x%x\n", val); | ||
1303 | |||
1304 | val = REG_RD(bp, PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR); | ||
1305 | DP(BNX2X_MSG_SP, "PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR is 0x%x\n", val); | ||
1306 | |||
1307 | val = REG_RD(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER); | ||
1308 | DP(BNX2X_MSG_SP, "PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER is 0x%x\n", | ||
1309 | val); | ||
1310 | } | ||
1311 | |||
1312 | static int bnx2x_pf_flr_clnup(struct bnx2x *bp) | ||
1313 | { | ||
1314 | u32 poll_cnt = bnx2x_flr_clnup_poll_count(bp); | ||
1315 | |||
1316 | DP(BNX2X_MSG_SP, "Cleanup after FLR PF[%d]\n", BP_ABS_FUNC(bp)); | ||
1317 | |||
1318 | /* Re-enable PF target read access */ | ||
1319 | REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1); | ||
1320 | |||
1321 | /* Poll HW usage counters */ | ||
1322 | if (bnx2x_poll_hw_usage_counters(bp, poll_cnt)) | ||
1323 | return -EBUSY; | ||
1324 | |||
1325 | /* Zero the igu 'trailing edge' and 'leading edge' */ | ||
1326 | |||
1327 | /* Send the FW cleanup command */ | ||
1328 | if (bnx2x_send_final_clnup(bp, (u8)BP_FUNC(bp), poll_cnt)) | ||
1329 | return -EBUSY; | ||
1330 | |||
1331 | /* ATC cleanup */ | ||
1332 | |||
1333 | /* Verify TX hw is flushed */ | ||
1334 | bnx2x_tx_hw_flushed(bp, poll_cnt); | ||
1335 | |||
1336 | /* Wait 100ms (not adjusted according to platform) */ | ||
1337 | msleep(100); | ||
1338 | |||
1339 | /* Verify no pending pci transactions */ | ||
1340 | if (bnx2x_is_pcie_pending(bp->pdev)) | ||
1341 | BNX2X_ERR("PCIE Transactions still pending\n"); | ||
1342 | |||
1343 | /* Debug */ | ||
1344 | bnx2x_hw_enable_status(bp); | ||
1345 | |||
1346 | /* | ||
1347 | * Master enable - Due to WB DMAE writes performed before this | ||
1348 | * register is re-initialized as part of the regular function init | ||
1349 | */ | ||
1350 | REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1); | ||
1351 | |||
1352 | return 0; | ||
1353 | } | ||
1354 | |||
936 | static void bnx2x_hc_int_enable(struct bnx2x *bp) | 1355 | static void bnx2x_hc_int_enable(struct bnx2x *bp) |
937 | { | 1356 | { |
938 | int port = BP_PORT(bp); | 1357 | int port = BP_PORT(bp); |
@@ -1188,52 +1607,85 @@ static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource) | |||
1188 | } | 1607 | } |
1189 | 1608 | ||
1190 | #ifdef BCM_CNIC | 1609 | #ifdef BCM_CNIC |
1191 | static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid); | 1610 | static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid, u8 err); |
1192 | #endif | 1611 | #endif |
1193 | 1612 | ||
1194 | void bnx2x_sp_event(struct bnx2x_fastpath *fp, | 1613 | void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe) |
1195 | union eth_rx_cqe *rr_cqe) | ||
1196 | { | 1614 | { |
1197 | struct bnx2x *bp = fp->bp; | 1615 | struct bnx2x *bp = fp->bp; |
1198 | int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data); | 1616 | int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data); |
1199 | int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data); | 1617 | int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data); |
1618 | enum bnx2x_queue_cmd drv_cmd = BNX2X_Q_CMD_MAX; | ||
1619 | struct bnx2x_queue_sp_obj *q_obj = &fp->q_obj; | ||
1200 | 1620 | ||
1201 | DP(BNX2X_MSG_SP, | 1621 | DP(BNX2X_MSG_SP, |
1202 | "fp %d cid %d got ramrod #%d state is %x type is %d\n", | 1622 | "fp %d cid %d got ramrod #%d state is %x type is %d\n", |
1203 | fp->index, cid, command, bp->state, | 1623 | fp->index, cid, command, bp->state, |
1204 | rr_cqe->ramrod_cqe.ramrod_type); | 1624 | rr_cqe->ramrod_cqe.ramrod_type); |
1205 | 1625 | ||
1206 | switch (command | fp->state) { | 1626 | switch (command) { |
1207 | case (RAMROD_CMD_ID_ETH_CLIENT_SETUP | BNX2X_FP_STATE_OPENING): | 1627 | case (RAMROD_CMD_ID_ETH_CLIENT_UPDATE): |
1628 | DP(NETIF_MSG_IFUP, "got UPDATE ramrod. CID %d\n", cid); | ||
1629 | drv_cmd = BNX2X_Q_CMD_UPDATE; | ||
1630 | break; | ||
1631 | case (RAMROD_CMD_ID_ETH_CLIENT_SETUP): | ||
1208 | DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n", cid); | 1632 | DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n", cid); |
1209 | fp->state = BNX2X_FP_STATE_OPEN; | 1633 | drv_cmd = BNX2X_Q_CMD_SETUP; |
1210 | break; | 1634 | break; |
1211 | 1635 | ||
1212 | case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING): | 1636 | case (RAMROD_CMD_ID_ETH_HALT): |
1213 | DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n", cid); | 1637 | DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n", cid); |
1214 | fp->state = BNX2X_FP_STATE_HALTED; | 1638 | drv_cmd = BNX2X_Q_CMD_HALT; |
1215 | break; | 1639 | break; |
1216 | 1640 | ||
1217 | case (RAMROD_CMD_ID_ETH_TERMINATE | BNX2X_FP_STATE_TERMINATING): | 1641 | case (RAMROD_CMD_ID_ETH_TERMINATE): |
1218 | DP(NETIF_MSG_IFDOWN, "got MULTI[%d] teminate ramrod\n", cid); | 1642 | DP(NETIF_MSG_IFDOWN, "got MULTI[%d] teminate ramrod\n", cid); |
1219 | fp->state = BNX2X_FP_STATE_TERMINATED; | 1643 | drv_cmd = BNX2X_Q_CMD_TERMINATE; |
1220 | break; | 1644 | break; |
1221 | 1645 | ||
1222 | default: | 1646 | case (RAMROD_CMD_ID_ETH_EMPTY): |
1223 | BNX2X_ERR("unexpected MC reply (%d) " | 1647 | DP(NETIF_MSG_IFDOWN, "got MULTI[%d] empty ramrod\n", cid); |
1224 | "fp[%d] state is %x\n", | 1648 | drv_cmd = BNX2X_Q_CMD_EMPTY; |
1225 | command, fp->index, fp->state); | ||
1226 | break; | 1649 | break; |
1650 | |||
1651 | default: | ||
1652 | BNX2X_ERR("unexpected MC reply (%d) on fp[%d]\n", | ||
1653 | command, fp->index); | ||
1654 | return; | ||
1227 | } | 1655 | } |
1228 | 1656 | ||
1657 | if ((drv_cmd != BNX2X_Q_CMD_MAX) && | ||
1658 | q_obj->complete_cmd(bp, q_obj, drv_cmd)) | ||
1659 | /* q_obj->complete_cmd() failure means that this was | ||
1660 | * an unexpected completion. | ||
1661 | * | ||
1662 | * In this case we don't want to increase the bp->spq_left | ||
1663 | * because apparently we haven't sent this command the first | ||
1664 | * place. | ||
1665 | */ | ||
1666 | #ifdef BNX2X_STOP_ON_ERROR | ||
1667 | bnx2x_panic(); | ||
1668 | #else | ||
1669 | return; | ||
1670 | #endif | ||
1671 | |||
1229 | smp_mb__before_atomic_inc(); | 1672 | smp_mb__before_atomic_inc(); |
1230 | atomic_inc(&bp->cq_spq_left); | 1673 | atomic_inc(&bp->cq_spq_left); |
1231 | /* push the change in fp->state and towards the memory */ | 1674 | /* push the change in bp->spq_left and towards the memory */ |
1232 | smp_wmb(); | 1675 | smp_mb__after_atomic_inc(); |
1233 | 1676 | ||
1234 | return; | 1677 | return; |
1235 | } | 1678 | } |
1236 | 1679 | ||
1680 | void bnx2x_update_rx_prod(struct bnx2x *bp, struct bnx2x_fastpath *fp, | ||
1681 | u16 bd_prod, u16 rx_comp_prod, u16 rx_sge_prod) | ||
1682 | { | ||
1683 | u32 start = BAR_USTRORM_INTMEM + fp->ustorm_rx_prods_offset; | ||
1684 | |||
1685 | bnx2x_update_rx_prod_gen(bp, fp, bd_prod, rx_comp_prod, rx_sge_prod, | ||
1686 | start); | ||
1687 | } | ||
1688 | |||
1237 | irqreturn_t bnx2x_interrupt(int irq, void *dev_instance) | 1689 | irqreturn_t bnx2x_interrupt(int irq, void *dev_instance) |
1238 | { | 1690 | { |
1239 | struct bnx2x *bp = netdev_priv(dev_instance); | 1691 | struct bnx2x *bp = netdev_priv(dev_instance); |
@@ -1258,7 +1710,7 @@ irqreturn_t bnx2x_interrupt(int irq, void *dev_instance) | |||
1258 | 1710 | ||
1259 | mask = 0x2 << (fp->index + CNIC_CONTEXT_USE); | 1711 | mask = 0x2 << (fp->index + CNIC_CONTEXT_USE); |
1260 | if (status & mask) { | 1712 | if (status & mask) { |
1261 | /* Handle Rx and Tx according to SB id */ | 1713 | /* Handle Rx or Tx according to SB id */ |
1262 | prefetch(fp->rx_cons_sb); | 1714 | prefetch(fp->rx_cons_sb); |
1263 | prefetch(fp->tx_cons_sb); | 1715 | prefetch(fp->tx_cons_sb); |
1264 | prefetch(&fp->sb_running_index[SM_RX_ID]); | 1716 | prefetch(&fp->sb_running_index[SM_RX_ID]); |
@@ -1272,11 +1724,13 @@ irqreturn_t bnx2x_interrupt(int irq, void *dev_instance) | |||
1272 | if (status & (mask | 0x1)) { | 1724 | if (status & (mask | 0x1)) { |
1273 | struct cnic_ops *c_ops = NULL; | 1725 | struct cnic_ops *c_ops = NULL; |
1274 | 1726 | ||
1275 | rcu_read_lock(); | 1727 | if (likely(bp->state == BNX2X_STATE_OPEN)) { |
1276 | c_ops = rcu_dereference(bp->cnic_ops); | 1728 | rcu_read_lock(); |
1277 | if (c_ops) | 1729 | c_ops = rcu_dereference(bp->cnic_ops); |
1278 | c_ops->cnic_handler(bp->cnic_data, NULL); | 1730 | if (c_ops) |
1279 | rcu_read_unlock(); | 1731 | c_ops->cnic_handler(bp->cnic_data, NULL); |
1732 | rcu_read_unlock(); | ||
1733 | } | ||
1280 | 1734 | ||
1281 | status &= ~mask; | 1735 | status &= ~mask; |
1282 | } | 1736 | } |
@@ -1297,9 +1751,6 @@ irqreturn_t bnx2x_interrupt(int irq, void *dev_instance) | |||
1297 | return IRQ_HANDLED; | 1751 | return IRQ_HANDLED; |
1298 | } | 1752 | } |
1299 | 1753 | ||
1300 | /* end of fast path */ | ||
1301 | |||
1302 | |||
1303 | /* Link */ | 1754 | /* Link */ |
1304 | 1755 | ||
1305 | /* | 1756 | /* |
@@ -1939,11 +2390,11 @@ static void bnx2x_link_attn(struct bnx2x *bp) | |||
1939 | pause_enabled); | 2390 | pause_enabled); |
1940 | } | 2391 | } |
1941 | 2392 | ||
1942 | if (bp->link_vars.mac_type == MAC_TYPE_BMAC) { | 2393 | if (bp->link_vars.mac_type != MAC_TYPE_EMAC) { |
1943 | struct host_port_stats *pstats; | 2394 | struct host_port_stats *pstats; |
1944 | 2395 | ||
1945 | pstats = bnx2x_sp(bp, port_stats); | 2396 | pstats = bnx2x_sp(bp, port_stats); |
1946 | /* reset old bmac stats */ | 2397 | /* reset old mac stats */ |
1947 | memset(&(pstats->mac_stx[0]), 0, | 2398 | memset(&(pstats->mac_stx[0]), 0, |
1948 | sizeof(struct mac_stx)); | 2399 | sizeof(struct mac_stx)); |
1949 | } | 2400 | } |
@@ -1998,7 +2449,7 @@ static void bnx2x_pmf_update(struct bnx2x *bp) | |||
1998 | if (bp->common.int_block == INT_BLOCK_HC) { | 2449 | if (bp->common.int_block == INT_BLOCK_HC) { |
1999 | REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val); | 2450 | REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val); |
2000 | REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val); | 2451 | REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val); |
2001 | } else if (CHIP_IS_E2(bp)) { | 2452 | } else if (!CHIP_IS_E1x(bp)) { |
2002 | REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val); | 2453 | REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val); |
2003 | REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val); | 2454 | REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val); |
2004 | } | 2455 | } |
@@ -2059,45 +2510,89 @@ u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param) | |||
2059 | static u8 stat_counter_valid(struct bnx2x *bp, struct bnx2x_fastpath *fp) | 2510 | static u8 stat_counter_valid(struct bnx2x *bp, struct bnx2x_fastpath *fp) |
2060 | { | 2511 | { |
2061 | #ifdef BCM_CNIC | 2512 | #ifdef BCM_CNIC |
2062 | if (IS_FCOE_FP(fp) && IS_MF(bp)) | 2513 | /* Statistics are not supported for CNIC Clients at the moment */ |
2514 | if (IS_FCOE_FP(fp)) | ||
2063 | return false; | 2515 | return false; |
2064 | #endif | 2516 | #endif |
2065 | return true; | 2517 | return true; |
2066 | } | 2518 | } |
2067 | 2519 | ||
2068 | static inline u16 bnx2x_get_cl_flags(struct bnx2x *bp, | 2520 | void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p) |
2069 | struct bnx2x_fastpath *fp) | 2521 | { |
2522 | if (CHIP_IS_E1x(bp)) { | ||
2523 | struct tstorm_eth_function_common_config tcfg = {0}; | ||
2524 | |||
2525 | storm_memset_func_cfg(bp, &tcfg, p->func_id); | ||
2526 | } | ||
2527 | |||
2528 | /* Enable the function in the FW */ | ||
2529 | storm_memset_vf_to_pf(bp, p->func_id, p->pf_id); | ||
2530 | storm_memset_func_en(bp, p->func_id, 1); | ||
2531 | |||
2532 | /* spq */ | ||
2533 | if (p->func_flgs & FUNC_FLG_SPQ) { | ||
2534 | storm_memset_spq_addr(bp, p->spq_map, p->func_id); | ||
2535 | REG_WR(bp, XSEM_REG_FAST_MEMORY + | ||
2536 | XSTORM_SPQ_PROD_OFFSET(p->func_id), p->spq_prod); | ||
2537 | } | ||
2538 | } | ||
2539 | |||
2540 | static inline unsigned long bnx2x_get_q_flags(struct bnx2x *bp, | ||
2541 | struct bnx2x_fastpath *fp, | ||
2542 | bool leading) | ||
2070 | { | 2543 | { |
2071 | u16 flags = 0; | 2544 | unsigned long flags = 0; |
2072 | 2545 | ||
2073 | /* calculate queue flags */ | 2546 | /* PF driver will always initialize the Queue to an ACTIVE state */ |
2074 | flags |= QUEUE_FLG_CACHE_ALIGN; | 2547 | __set_bit(BNX2X_Q_FLG_ACTIVE, &flags); |
2075 | flags |= QUEUE_FLG_HC; | ||
2076 | flags |= IS_MF_SD(bp) ? QUEUE_FLG_OV : 0; | ||
2077 | 2548 | ||
2078 | flags |= QUEUE_FLG_VLAN; | 2549 | /* calculate other queue flags */ |
2079 | DP(NETIF_MSG_IFUP, "vlan removal enabled\n"); | 2550 | if (IS_MF_SD(bp)) |
2551 | __set_bit(BNX2X_Q_FLG_OV, &flags); | ||
2552 | |||
2553 | if (IS_FCOE_FP(fp)) | ||
2554 | __set_bit(BNX2X_Q_FLG_FCOE, &flags); | ||
2080 | 2555 | ||
2081 | if (!fp->disable_tpa) | 2556 | if (!fp->disable_tpa) |
2082 | flags |= QUEUE_FLG_TPA; | 2557 | __set_bit(BNX2X_Q_FLG_TPA, &flags); |
2558 | |||
2559 | if (stat_counter_valid(bp, fp)) { | ||
2560 | __set_bit(BNX2X_Q_FLG_STATS, &flags); | ||
2561 | __set_bit(BNX2X_Q_FLG_ZERO_STATS, &flags); | ||
2562 | } | ||
2563 | |||
2564 | if (leading) { | ||
2565 | __set_bit(BNX2X_Q_FLG_LEADING_RSS, &flags); | ||
2566 | __set_bit(BNX2X_Q_FLG_MCAST, &flags); | ||
2567 | } | ||
2083 | 2568 | ||
2084 | flags = stat_counter_valid(bp, fp) ? | 2569 | /* Always set HW VLAN stripping */ |
2085 | (flags | QUEUE_FLG_STATS) : (flags & ~QUEUE_FLG_STATS); | 2570 | __set_bit(BNX2X_Q_FLG_VLAN, &flags); |
2086 | 2571 | ||
2087 | return flags; | 2572 | return flags; |
2088 | } | 2573 | } |
2089 | 2574 | ||
2090 | static void bnx2x_pf_rx_cl_prep(struct bnx2x *bp, | 2575 | static void bnx2x_pf_q_prep_general(struct bnx2x *bp, |
2576 | struct bnx2x_fastpath *fp, struct bnx2x_general_setup_params *gen_init) | ||
2577 | { | ||
2578 | gen_init->stat_id = bnx2x_stats_id(fp); | ||
2579 | gen_init->spcl_id = fp->cl_id; | ||
2580 | |||
2581 | /* Always use mini-jumbo MTU for FCoE L2 ring */ | ||
2582 | if (IS_FCOE_FP(fp)) | ||
2583 | gen_init->mtu = BNX2X_FCOE_MINI_JUMBO_MTU; | ||
2584 | else | ||
2585 | gen_init->mtu = bp->dev->mtu; | ||
2586 | } | ||
2587 | |||
2588 | static void bnx2x_pf_rx_q_prep(struct bnx2x *bp, | ||
2091 | struct bnx2x_fastpath *fp, struct rxq_pause_params *pause, | 2589 | struct bnx2x_fastpath *fp, struct rxq_pause_params *pause, |
2092 | struct bnx2x_rxq_init_params *rxq_init) | 2590 | struct bnx2x_rxq_setup_params *rxq_init) |
2093 | { | 2591 | { |
2094 | u16 max_sge = 0; | 2592 | u8 max_sge = 0; |
2095 | u16 sge_sz = 0; | 2593 | u16 sge_sz = 0; |
2096 | u16 tpa_agg_size = 0; | 2594 | u16 tpa_agg_size = 0; |
2097 | 2595 | ||
2098 | /* calculate queue flags */ | ||
2099 | u16 flags = bnx2x_get_cl_flags(bp, fp); | ||
2100 | |||
2101 | if (!fp->disable_tpa) { | 2596 | if (!fp->disable_tpa) { |
2102 | pause->sge_th_hi = 250; | 2597 | pause->sge_th_hi = 250; |
2103 | pause->sge_th_lo = 150; | 2598 | pause->sge_th_lo = 150; |
@@ -2118,33 +2613,37 @@ static void bnx2x_pf_rx_cl_prep(struct bnx2x *bp, | |||
2118 | pause->bd_th_lo = 250; | 2613 | pause->bd_th_lo = 250; |
2119 | pause->rcq_th_hi = 350; | 2614 | pause->rcq_th_hi = 350; |
2120 | pause->rcq_th_lo = 250; | 2615 | pause->rcq_th_lo = 250; |
2121 | pause->sge_th_hi = 0; | 2616 | |
2122 | pause->sge_th_lo = 0; | ||
2123 | pause->pri_map = 1; | 2617 | pause->pri_map = 1; |
2124 | } | 2618 | } |
2125 | 2619 | ||
2126 | /* rxq setup */ | 2620 | /* rxq setup */ |
2127 | rxq_init->flags = flags; | ||
2128 | rxq_init->cxt = &bp->context.vcxt[fp->cid].eth; | ||
2129 | rxq_init->dscr_map = fp->rx_desc_mapping; | 2621 | rxq_init->dscr_map = fp->rx_desc_mapping; |
2130 | rxq_init->sge_map = fp->rx_sge_mapping; | 2622 | rxq_init->sge_map = fp->rx_sge_mapping; |
2131 | rxq_init->rcq_map = fp->rx_comp_mapping; | 2623 | rxq_init->rcq_map = fp->rx_comp_mapping; |
2132 | rxq_init->rcq_np_map = fp->rx_comp_mapping + BCM_PAGE_SIZE; | 2624 | rxq_init->rcq_np_map = fp->rx_comp_mapping + BCM_PAGE_SIZE; |
2133 | 2625 | ||
2134 | /* Always use mini-jumbo MTU for FCoE L2 ring */ | 2626 | /* This should be a maximum number of data bytes that may be |
2135 | if (IS_FCOE_FP(fp)) | 2627 | * placed on the BD (not including paddings). |
2136 | rxq_init->mtu = BNX2X_FCOE_MINI_JUMBO_MTU; | 2628 | */ |
2137 | else | 2629 | rxq_init->buf_sz = fp->rx_buf_size - BNX2X_FW_RX_ALIGN - |
2138 | rxq_init->mtu = bp->dev->mtu; | 2630 | IP_HEADER_ALIGNMENT_PADDING; |
2139 | 2631 | ||
2140 | rxq_init->buf_sz = fp->rx_buf_size; | ||
2141 | rxq_init->cl_qzone_id = fp->cl_qzone_id; | 2632 | rxq_init->cl_qzone_id = fp->cl_qzone_id; |
2142 | rxq_init->cl_id = fp->cl_id; | ||
2143 | rxq_init->spcl_id = fp->cl_id; | ||
2144 | rxq_init->stat_id = fp->cl_id; | ||
2145 | rxq_init->tpa_agg_sz = tpa_agg_size; | 2633 | rxq_init->tpa_agg_sz = tpa_agg_size; |
2146 | rxq_init->sge_buf_sz = sge_sz; | 2634 | rxq_init->sge_buf_sz = sge_sz; |
2147 | rxq_init->max_sges_pkt = max_sge; | 2635 | rxq_init->max_sges_pkt = max_sge; |
2636 | rxq_init->rss_engine_id = BP_FUNC(bp); | ||
2637 | |||
2638 | /* Maximum number or simultaneous TPA aggregation for this Queue. | ||
2639 | * | ||
2640 | * For PF Clients it should be the maximum avaliable number. | ||
2641 | * VF driver(s) may want to define it to a smaller value. | ||
2642 | */ | ||
2643 | rxq_init->max_tpa_queues = | ||
2644 | (CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 : | ||
2645 | ETH_MAX_AGGREGATION_QUEUES_E1H_E2); | ||
2646 | |||
2148 | rxq_init->cache_line_log = BNX2X_RX_ALIGN_SHIFT; | 2647 | rxq_init->cache_line_log = BNX2X_RX_ALIGN_SHIFT; |
2149 | rxq_init->fw_sb_id = fp->fw_sb_id; | 2648 | rxq_init->fw_sb_id = fp->fw_sb_id; |
2150 | 2649 | ||
@@ -2152,46 +2651,35 @@ static void bnx2x_pf_rx_cl_prep(struct bnx2x *bp, | |||
2152 | rxq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_RX_CQ_CONS; | 2651 | rxq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_RX_CQ_CONS; |
2153 | else | 2652 | else |
2154 | rxq_init->sb_cq_index = U_SB_ETH_RX_CQ_INDEX; | 2653 | rxq_init->sb_cq_index = U_SB_ETH_RX_CQ_INDEX; |
2155 | |||
2156 | rxq_init->cid = HW_CID(bp, fp->cid); | ||
2157 | |||
2158 | rxq_init->hc_rate = bp->rx_ticks ? (1000000 / bp->rx_ticks) : 0; | ||
2159 | } | 2654 | } |
2160 | 2655 | ||
2161 | static void bnx2x_pf_tx_cl_prep(struct bnx2x *bp, | 2656 | static void bnx2x_pf_tx_q_prep(struct bnx2x *bp, |
2162 | struct bnx2x_fastpath *fp, struct bnx2x_txq_init_params *txq_init) | 2657 | struct bnx2x_fastpath *fp, struct bnx2x_txq_setup_params *txq_init) |
2163 | { | 2658 | { |
2164 | u16 flags = bnx2x_get_cl_flags(bp, fp); | ||
2165 | |||
2166 | txq_init->flags = flags; | ||
2167 | txq_init->cxt = &bp->context.vcxt[fp->cid].eth; | ||
2168 | txq_init->dscr_map = fp->tx_desc_mapping; | 2659 | txq_init->dscr_map = fp->tx_desc_mapping; |
2169 | txq_init->stat_id = fp->cl_id; | ||
2170 | txq_init->cid = HW_CID(bp, fp->cid); | ||
2171 | txq_init->sb_cq_index = C_SB_ETH_TX_CQ_INDEX; | 2660 | txq_init->sb_cq_index = C_SB_ETH_TX_CQ_INDEX; |
2172 | txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW; | 2661 | txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW; |
2173 | txq_init->fw_sb_id = fp->fw_sb_id; | 2662 | txq_init->fw_sb_id = fp->fw_sb_id; |
2174 | 2663 | ||
2664 | /* | ||
2665 | * set the tss leading client id for TX classfication == | ||
2666 | * leading RSS client id | ||
2667 | */ | ||
2668 | txq_init->tss_leading_cl_id = bnx2x_fp(bp, 0, cl_id); | ||
2669 | |||
2175 | if (IS_FCOE_FP(fp)) { | 2670 | if (IS_FCOE_FP(fp)) { |
2176 | txq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_TX_CQ_CONS; | 2671 | txq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_TX_CQ_CONS; |
2177 | txq_init->traffic_type = LLFC_TRAFFIC_TYPE_FCOE; | 2672 | txq_init->traffic_type = LLFC_TRAFFIC_TYPE_FCOE; |
2178 | } | 2673 | } |
2179 | |||
2180 | txq_init->hc_rate = bp->tx_ticks ? (1000000 / bp->tx_ticks) : 0; | ||
2181 | } | 2674 | } |
2182 | 2675 | ||
2183 | static void bnx2x_pf_init(struct bnx2x *bp) | 2676 | static void bnx2x_pf_init(struct bnx2x *bp) |
2184 | { | 2677 | { |
2185 | struct bnx2x_func_init_params func_init = {0}; | 2678 | struct bnx2x_func_init_params func_init = {0}; |
2186 | struct bnx2x_rss_params rss = {0}; | ||
2187 | struct event_ring_data eq_data = { {0} }; | 2679 | struct event_ring_data eq_data = { {0} }; |
2188 | u16 flags; | 2680 | u16 flags; |
2189 | 2681 | ||
2190 | /* pf specific setups */ | 2682 | if (!CHIP_IS_E1x(bp)) { |
2191 | if (!CHIP_IS_E1(bp)) | ||
2192 | storm_memset_ov(bp, bp->mf_ov, BP_FUNC(bp)); | ||
2193 | |||
2194 | if (CHIP_IS_E2(bp)) { | ||
2195 | /* reset IGU PF statistics: MSIX + ATTN */ | 2683 | /* reset IGU PF statistics: MSIX + ATTN */ |
2196 | /* PF */ | 2684 | /* PF */ |
2197 | REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT + | 2685 | REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT + |
@@ -2209,27 +2697,14 @@ static void bnx2x_pf_init(struct bnx2x *bp) | |||
2209 | /* function setup flags */ | 2697 | /* function setup flags */ |
2210 | flags = (FUNC_FLG_STATS | FUNC_FLG_LEADING | FUNC_FLG_SPQ); | 2698 | flags = (FUNC_FLG_STATS | FUNC_FLG_LEADING | FUNC_FLG_SPQ); |
2211 | 2699 | ||
2212 | if (CHIP_IS_E1x(bp)) | 2700 | /* This flag is relevant for E1x only. |
2213 | flags |= (bp->flags & TPA_ENABLE_FLAG) ? FUNC_FLG_TPA : 0; | 2701 | * E2 doesn't have a TPA configuration in a function level. |
2214 | else | ||
2215 | flags |= FUNC_FLG_TPA; | ||
2216 | |||
2217 | /* function setup */ | ||
2218 | |||
2219 | /** | ||
2220 | * Although RSS is meaningless when there is a single HW queue we | ||
2221 | * still need it enabled in order to have HW Rx hash generated. | ||
2222 | */ | 2702 | */ |
2223 | rss.cap = (RSS_IPV4_CAP | RSS_IPV4_TCP_CAP | | 2703 | flags |= (bp->flags & TPA_ENABLE_FLAG) ? FUNC_FLG_TPA : 0; |
2224 | RSS_IPV6_CAP | RSS_IPV6_TCP_CAP); | ||
2225 | rss.mode = bp->multi_mode; | ||
2226 | rss.result_mask = MULTI_MASK; | ||
2227 | func_init.rss = &rss; | ||
2228 | 2704 | ||
2229 | func_init.func_flgs = flags; | 2705 | func_init.func_flgs = flags; |
2230 | func_init.pf_id = BP_FUNC(bp); | 2706 | func_init.pf_id = BP_FUNC(bp); |
2231 | func_init.func_id = BP_FUNC(bp); | 2707 | func_init.func_id = BP_FUNC(bp); |
2232 | func_init.fw_stat_map = bnx2x_sp_mapping(bp, fw_stats); | ||
2233 | func_init.spq_map = bp->spq_mapping; | 2708 | func_init.spq_map = bp->spq_mapping; |
2234 | func_init.spq_prod = bp->spq_prod_idx; | 2709 | func_init.spq_prod = bp->spq_prod_idx; |
2235 | 2710 | ||
@@ -2238,11 +2713,11 @@ static void bnx2x_pf_init(struct bnx2x *bp) | |||
2238 | memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port)); | 2713 | memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port)); |
2239 | 2714 | ||
2240 | /* | 2715 | /* |
2241 | Congestion management values depend on the link rate | 2716 | * Congestion management values depend on the link rate |
2242 | There is no active link so initial link rate is set to 10 Gbps. | 2717 | * There is no active link so initial link rate is set to 10 Gbps. |
2243 | When the link comes up The congestion management values are | 2718 | * When the link comes up The congestion management values are |
2244 | re-calculated according to the actual link rate. | 2719 | * re-calculated according to the actual link rate. |
2245 | */ | 2720 | */ |
2246 | bp->link_vars.line_speed = SPEED_10000; | 2721 | bp->link_vars.line_speed = SPEED_10000; |
2247 | bnx2x_cmng_fns_init(bp, true, bnx2x_get_cmng_fns_mode(bp)); | 2722 | bnx2x_cmng_fns_init(bp, true, bnx2x_get_cmng_fns_mode(bp)); |
2248 | 2723 | ||
@@ -2250,10 +2725,6 @@ static void bnx2x_pf_init(struct bnx2x *bp) | |||
2250 | if (bp->port.pmf) | 2725 | if (bp->port.pmf) |
2251 | storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp)); | 2726 | storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp)); |
2252 | 2727 | ||
2253 | /* no rx until link is up */ | ||
2254 | bp->rx_mode = BNX2X_RX_MODE_NONE; | ||
2255 | bnx2x_set_storm_rx_mode(bp); | ||
2256 | |||
2257 | /* init Event Queue */ | 2728 | /* init Event Queue */ |
2258 | eq_data.base_addr.hi = U64_HI(bp->eq_mapping); | 2729 | eq_data.base_addr.hi = U64_HI(bp->eq_mapping); |
2259 | eq_data.base_addr.lo = U64_LO(bp->eq_mapping); | 2730 | eq_data.base_addr.lo = U64_LO(bp->eq_mapping); |
@@ -2268,11 +2739,9 @@ static void bnx2x_e1h_disable(struct bnx2x *bp) | |||
2268 | { | 2739 | { |
2269 | int port = BP_PORT(bp); | 2740 | int port = BP_PORT(bp); |
2270 | 2741 | ||
2271 | netif_tx_disable(bp->dev); | 2742 | bnx2x_tx_disable(bp); |
2272 | 2743 | ||
2273 | REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0); | 2744 | REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0); |
2274 | |||
2275 | netif_carrier_off(bp->dev); | ||
2276 | } | 2745 | } |
2277 | 2746 | ||
2278 | static void bnx2x_e1h_enable(struct bnx2x *bp) | 2747 | static void bnx2x_e1h_enable(struct bnx2x *bp) |
@@ -2375,12 +2844,47 @@ static inline void bnx2x_sp_prod_update(struct bnx2x *bp) | |||
2375 | mmiowb(); | 2844 | mmiowb(); |
2376 | } | 2845 | } |
2377 | 2846 | ||
2378 | /* the slow path queue is odd since completions arrive on the fastpath ring */ | 2847 | /** |
2848 | * bnx2x_is_contextless_ramrod - check if the current command ends on EQ | ||
2849 | * | ||
2850 | * @cmd: command to check | ||
2851 | * @cmd_type: command type | ||
2852 | */ | ||
2853 | static inline bool bnx2x_is_contextless_ramrod(int cmd, int cmd_type) | ||
2854 | { | ||
2855 | if ((cmd_type == NONE_CONNECTION_TYPE) || | ||
2856 | (cmd == RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES) || | ||
2857 | (cmd == RAMROD_CMD_ID_ETH_FILTER_RULES) || | ||
2858 | (cmd == RAMROD_CMD_ID_ETH_MULTICAST_RULES) || | ||
2859 | (cmd == RAMROD_CMD_ID_ETH_SET_MAC) || | ||
2860 | (cmd == RAMROD_CMD_ID_ETH_RSS_UPDATE)) | ||
2861 | return true; | ||
2862 | else | ||
2863 | return false; | ||
2864 | |||
2865 | } | ||
2866 | |||
2867 | |||
2868 | /** | ||
2869 | * bnx2x_sp_post - place a single command on an SP ring | ||
2870 | * | ||
2871 | * @bp: driver handle | ||
2872 | * @command: command to place (e.g. SETUP, FILTER_RULES, etc.) | ||
2873 | * @cid: SW CID the command is related to | ||
2874 | * @data_hi: command private data address (high 32 bits) | ||
2875 | * @data_lo: command private data address (low 32 bits) | ||
2876 | * @cmd_type: command type (e.g. NONE, ETH) | ||
2877 | * | ||
2878 | * SP data is handled as if it's always an address pair, thus data fields are | ||
2879 | * not swapped to little endian in upper functions. Instead this function swaps | ||
2880 | * data as if it's two u32 fields. | ||
2881 | */ | ||
2379 | int bnx2x_sp_post(struct bnx2x *bp, int command, int cid, | 2882 | int bnx2x_sp_post(struct bnx2x *bp, int command, int cid, |
2380 | u32 data_hi, u32 data_lo, int common) | 2883 | u32 data_hi, u32 data_lo, int cmd_type) |
2381 | { | 2884 | { |
2382 | struct eth_spe *spe; | 2885 | struct eth_spe *spe; |
2383 | u16 type; | 2886 | u16 type; |
2887 | bool common = bnx2x_is_contextless_ramrod(command, cmd_type); | ||
2384 | 2888 | ||
2385 | #ifdef BNX2X_STOP_ON_ERROR | 2889 | #ifdef BNX2X_STOP_ON_ERROR |
2386 | if (unlikely(bp->panic)) | 2890 | if (unlikely(bp->panic)) |
@@ -2410,17 +2914,7 @@ int bnx2x_sp_post(struct bnx2x *bp, int command, int cid, | |||
2410 | cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) | | 2914 | cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) | |
2411 | HW_CID(bp, cid)); | 2915 | HW_CID(bp, cid)); |
2412 | 2916 | ||
2413 | if (common) | 2917 | type = (cmd_type << SPE_HDR_CONN_TYPE_SHIFT) & SPE_HDR_CONN_TYPE; |
2414 | /* Common ramrods: | ||
2415 | * FUNC_START, FUNC_STOP, CFC_DEL, STATS, SET_MAC | ||
2416 | * TRAFFIC_STOP, TRAFFIC_START | ||
2417 | */ | ||
2418 | type = (NONE_CONNECTION_TYPE << SPE_HDR_CONN_TYPE_SHIFT) | ||
2419 | & SPE_HDR_CONN_TYPE; | ||
2420 | else | ||
2421 | /* ETH ramrods: SETUP, HALT */ | ||
2422 | type = (ETH_CONNECTION_TYPE << SPE_HDR_CONN_TYPE_SHIFT) | ||
2423 | & SPE_HDR_CONN_TYPE; | ||
2424 | 2918 | ||
2425 | type |= ((BP_FUNC(bp) << SPE_HDR_FUNCTION_ID_SHIFT) & | 2919 | type |= ((BP_FUNC(bp) << SPE_HDR_FUNCTION_ID_SHIFT) & |
2426 | SPE_HDR_FUNCTION_ID); | 2920 | SPE_HDR_FUNCTION_ID); |
@@ -2432,7 +2926,8 @@ int bnx2x_sp_post(struct bnx2x *bp, int command, int cid, | |||
2432 | 2926 | ||
2433 | /* stats ramrod has it's own slot on the spq */ | 2927 | /* stats ramrod has it's own slot on the spq */ |
2434 | if (command != RAMROD_CMD_ID_COMMON_STAT_QUERY) { | 2928 | if (command != RAMROD_CMD_ID_COMMON_STAT_QUERY) { |
2435 | /* It's ok if the actual decrement is issued towards the memory | 2929 | /* |
2930 | * It's ok if the actual decrement is issued towards the memory | ||
2436 | * somewhere between the spin_lock and spin_unlock. Thus no | 2931 | * somewhere between the spin_lock and spin_unlock. Thus no |
2437 | * more explict memory barrier is needed. | 2932 | * more explict memory barrier is needed. |
2438 | */ | 2933 | */ |
@@ -2728,13 +3223,13 @@ static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn) | |||
2728 | } | 3223 | } |
2729 | 3224 | ||
2730 | if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) { | 3225 | if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) { |
2731 | |||
2732 | val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0); | 3226 | val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0); |
2733 | BNX2X_ERR("PXP hw attention 0x%x\n", val); | 3227 | BNX2X_ERR("PXP hw attention-0 0x%x\n", val); |
2734 | /* RQ_USDMDP_FIFO_OVERFLOW */ | 3228 | /* RQ_USDMDP_FIFO_OVERFLOW */ |
2735 | if (val & 0x18000) | 3229 | if (val & 0x18000) |
2736 | BNX2X_ERR("FATAL error from PXP\n"); | 3230 | BNX2X_ERR("FATAL error from PXP\n"); |
2737 | if (CHIP_IS_E2(bp)) { | 3231 | |
3232 | if (!CHIP_IS_E1x(bp)) { | ||
2738 | val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_1); | 3233 | val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_1); |
2739 | BNX2X_ERR("PXP hw attention-1 0x%x\n", val); | 3234 | BNX2X_ERR("PXP hw attention-1 0x%x\n", val); |
2740 | } | 3235 | } |
@@ -3224,7 +3719,7 @@ static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted) | |||
3224 | attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4); | 3719 | attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4); |
3225 | attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4); | 3720 | attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4); |
3226 | attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4); | 3721 | attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4); |
3227 | if (CHIP_IS_E2(bp)) | 3722 | if (!CHIP_IS_E1x(bp)) |
3228 | attn.sig[4] = | 3723 | attn.sig[4] = |
3229 | REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4); | 3724 | REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4); |
3230 | else | 3725 | else |
@@ -3320,6 +3815,15 @@ static void bnx2x_attn_int(struct bnx2x *bp) | |||
3320 | bnx2x_attn_int_deasserted(bp, deasserted); | 3815 | bnx2x_attn_int_deasserted(bp, deasserted); |
3321 | } | 3816 | } |
3322 | 3817 | ||
3818 | void bnx2x_igu_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 segment, | ||
3819 | u16 index, u8 op, u8 update) | ||
3820 | { | ||
3821 | u32 igu_addr = BAR_IGU_INTMEM + (IGU_CMD_INT_ACK_BASE + igu_sb_id)*8; | ||
3822 | |||
3823 | bnx2x_igu_ack_sb_gen(bp, igu_sb_id, segment, index, op, update, | ||
3824 | igu_addr); | ||
3825 | } | ||
3826 | |||
3323 | static inline void bnx2x_update_eq_prod(struct bnx2x *bp, u16 prod) | 3827 | static inline void bnx2x_update_eq_prod(struct bnx2x *bp, u16 prod) |
3324 | { | 3828 | { |
3325 | /* No memory barriers */ | 3829 | /* No memory barriers */ |
@@ -3331,6 +3835,8 @@ static inline void bnx2x_update_eq_prod(struct bnx2x *bp, u16 prod) | |||
3331 | static int bnx2x_cnic_handle_cfc_del(struct bnx2x *bp, u32 cid, | 3835 | static int bnx2x_cnic_handle_cfc_del(struct bnx2x *bp, u32 cid, |
3332 | union event_ring_elem *elem) | 3836 | union event_ring_elem *elem) |
3333 | { | 3837 | { |
3838 | u8 err = elem->message.error; | ||
3839 | |||
3334 | if (!bp->cnic_eth_dev.starting_cid || | 3840 | if (!bp->cnic_eth_dev.starting_cid || |
3335 | (cid < bp->cnic_eth_dev.starting_cid && | 3841 | (cid < bp->cnic_eth_dev.starting_cid && |
3336 | cid != bp->cnic_eth_dev.iscsi_l2_cid)) | 3842 | cid != bp->cnic_eth_dev.iscsi_l2_cid)) |
@@ -3338,16 +3844,122 @@ static int bnx2x_cnic_handle_cfc_del(struct bnx2x *bp, u32 cid, | |||
3338 | 3844 | ||
3339 | DP(BNX2X_MSG_SP, "got delete ramrod for CNIC CID %d\n", cid); | 3845 | DP(BNX2X_MSG_SP, "got delete ramrod for CNIC CID %d\n", cid); |
3340 | 3846 | ||
3341 | if (unlikely(elem->message.data.cfc_del_event.error)) { | 3847 | if (unlikely(err)) { |
3848 | |||
3342 | BNX2X_ERR("got delete ramrod for CNIC CID %d with error!\n", | 3849 | BNX2X_ERR("got delete ramrod for CNIC CID %d with error!\n", |
3343 | cid); | 3850 | cid); |
3344 | bnx2x_panic_dump(bp); | 3851 | bnx2x_panic_dump(bp); |
3345 | } | 3852 | } |
3346 | bnx2x_cnic_cfc_comp(bp, cid); | 3853 | bnx2x_cnic_cfc_comp(bp, cid, err); |
3347 | return 0; | 3854 | return 0; |
3348 | } | 3855 | } |
3349 | #endif | 3856 | #endif |
3350 | 3857 | ||
3858 | static inline void bnx2x_handle_mcast_eqe(struct bnx2x *bp) | ||
3859 | { | ||
3860 | struct bnx2x_mcast_ramrod_params rparam; | ||
3861 | int rc; | ||
3862 | |||
3863 | memset(&rparam, 0, sizeof(rparam)); | ||
3864 | |||
3865 | rparam.mcast_obj = &bp->mcast_obj; | ||
3866 | |||
3867 | netif_addr_lock_bh(bp->dev); | ||
3868 | |||
3869 | /* Clear pending state for the last command */ | ||
3870 | bp->mcast_obj.raw.clear_pending(&bp->mcast_obj.raw); | ||
3871 | |||
3872 | /* If there are pending mcast commands - send them */ | ||
3873 | if (bp->mcast_obj.check_pending(&bp->mcast_obj)) { | ||
3874 | rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT); | ||
3875 | if (rc < 0) | ||
3876 | BNX2X_ERR("Failed to send pending mcast commands: %d\n", | ||
3877 | rc); | ||
3878 | } | ||
3879 | |||
3880 | netif_addr_unlock_bh(bp->dev); | ||
3881 | } | ||
3882 | |||
3883 | static inline void bnx2x_handle_classification_eqe(struct bnx2x *bp, | ||
3884 | union event_ring_elem *elem) | ||
3885 | { | ||
3886 | unsigned long ramrod_flags = 0; | ||
3887 | int rc = 0; | ||
3888 | u32 cid = elem->message.data.eth_event.echo & BNX2X_SWCID_MASK; | ||
3889 | struct bnx2x_vlan_mac_obj *vlan_mac_obj; | ||
3890 | |||
3891 | /* Always push next commands out, don't wait here */ | ||
3892 | __set_bit(RAMROD_CONT, &ramrod_flags); | ||
3893 | |||
3894 | switch (elem->message.data.eth_event.echo >> BNX2X_SWCID_SHIFT) { | ||
3895 | case BNX2X_FILTER_MAC_PENDING: | ||
3896 | #ifdef BCM_CNIC | ||
3897 | if (cid == BNX2X_ISCSI_ETH_CID) | ||
3898 | vlan_mac_obj = &bp->iscsi_l2_mac_obj; | ||
3899 | else | ||
3900 | #endif | ||
3901 | vlan_mac_obj = &bp->fp[cid].mac_obj; | ||
3902 | |||
3903 | break; | ||
3904 | vlan_mac_obj = &bp->fp[cid].mac_obj; | ||
3905 | |||
3906 | case BNX2X_FILTER_MCAST_PENDING: | ||
3907 | /* This is only relevant for 57710 where multicast MACs are | ||
3908 | * configured as unicast MACs using the same ramrod. | ||
3909 | */ | ||
3910 | bnx2x_handle_mcast_eqe(bp); | ||
3911 | return; | ||
3912 | default: | ||
3913 | BNX2X_ERR("Unsupported classification command: %d\n", | ||
3914 | elem->message.data.eth_event.echo); | ||
3915 | return; | ||
3916 | } | ||
3917 | |||
3918 | rc = vlan_mac_obj->complete(bp, vlan_mac_obj, elem, &ramrod_flags); | ||
3919 | |||
3920 | if (rc < 0) | ||
3921 | BNX2X_ERR("Failed to schedule new commands: %d\n", rc); | ||
3922 | else if (rc > 0) | ||
3923 | DP(BNX2X_MSG_SP, "Scheduled next pending commands...\n"); | ||
3924 | |||
3925 | } | ||
3926 | |||
3927 | #ifdef BCM_CNIC | ||
3928 | static void bnx2x_set_iscsi_eth_rx_mode(struct bnx2x *bp, bool start); | ||
3929 | #endif | ||
3930 | |||
3931 | static inline void bnx2x_handle_rx_mode_eqe(struct bnx2x *bp) | ||
3932 | { | ||
3933 | netif_addr_lock_bh(bp->dev); | ||
3934 | |||
3935 | clear_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state); | ||
3936 | |||
3937 | /* Send rx_mode command again if was requested */ | ||
3938 | if (test_and_clear_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state)) | ||
3939 | bnx2x_set_storm_rx_mode(bp); | ||
3940 | #ifdef BCM_CNIC | ||
3941 | else if (test_and_clear_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED, | ||
3942 | &bp->sp_state)) | ||
3943 | bnx2x_set_iscsi_eth_rx_mode(bp, true); | ||
3944 | else if (test_and_clear_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED, | ||
3945 | &bp->sp_state)) | ||
3946 | bnx2x_set_iscsi_eth_rx_mode(bp, false); | ||
3947 | #endif | ||
3948 | |||
3949 | netif_addr_unlock_bh(bp->dev); | ||
3950 | } | ||
3951 | |||
3952 | static inline struct bnx2x_queue_sp_obj *bnx2x_cid_to_q_obj( | ||
3953 | struct bnx2x *bp, u32 cid) | ||
3954 | { | ||
3955 | #ifdef BCM_CNIC | ||
3956 | if (cid == BNX2X_FCOE_ETH_CID) | ||
3957 | return &bnx2x_fcoe(bp, q_obj); | ||
3958 | else | ||
3959 | #endif | ||
3960 | return &bnx2x_fp(bp, cid, q_obj); | ||
3961 | } | ||
3962 | |||
3351 | static void bnx2x_eq_int(struct bnx2x *bp) | 3963 | static void bnx2x_eq_int(struct bnx2x *bp) |
3352 | { | 3964 | { |
3353 | u16 hw_cons, sw_cons, sw_prod; | 3965 | u16 hw_cons, sw_cons, sw_prod; |
@@ -3355,6 +3967,9 @@ static void bnx2x_eq_int(struct bnx2x *bp) | |||
3355 | u32 cid; | 3967 | u32 cid; |
3356 | u8 opcode; | 3968 | u8 opcode; |
3357 | int spqe_cnt = 0; | 3969 | int spqe_cnt = 0; |
3970 | struct bnx2x_queue_sp_obj *q_obj; | ||
3971 | struct bnx2x_func_sp_obj *f_obj = &bp->func_obj; | ||
3972 | struct bnx2x_raw_obj *rss_raw = &bp->rss_conf_obj.raw; | ||
3358 | 3973 | ||
3359 | hw_cons = le16_to_cpu(*bp->eq_cons_sb); | 3974 | hw_cons = le16_to_cpu(*bp->eq_cons_sb); |
3360 | 3975 | ||
@@ -3389,7 +4004,8 @@ static void bnx2x_eq_int(struct bnx2x *bp) | |||
3389 | /* handle eq element */ | 4004 | /* handle eq element */ |
3390 | switch (opcode) { | 4005 | switch (opcode) { |
3391 | case EVENT_RING_OPCODE_STAT_QUERY: | 4006 | case EVENT_RING_OPCODE_STAT_QUERY: |
3392 | DP(NETIF_MSG_TIMER, "got statistics comp event\n"); | 4007 | DP(NETIF_MSG_TIMER, "got statistics comp event %d\n", |
4008 | bp->stats_comp++); | ||
3393 | /* nothing to do with stats comp */ | 4009 | /* nothing to do with stats comp */ |
3394 | continue; | 4010 | continue; |
3395 | 4011 | ||
@@ -3404,12 +4020,13 @@ static void bnx2x_eq_int(struct bnx2x *bp) | |||
3404 | #ifdef BCM_CNIC | 4020 | #ifdef BCM_CNIC |
3405 | if (!bnx2x_cnic_handle_cfc_del(bp, cid, elem)) | 4021 | if (!bnx2x_cnic_handle_cfc_del(bp, cid, elem)) |
3406 | goto next_spqe; | 4022 | goto next_spqe; |
3407 | if (cid == BNX2X_FCOE_ETH_CID) | ||
3408 | bnx2x_fcoe(bp, state) = BNX2X_FP_STATE_CLOSED; | ||
3409 | else | ||
3410 | #endif | 4023 | #endif |
3411 | bnx2x_fp(bp, cid, state) = | 4024 | q_obj = bnx2x_cid_to_q_obj(bp, cid); |
3412 | BNX2X_FP_STATE_CLOSED; | 4025 | |
4026 | if (q_obj->complete_cmd(bp, q_obj, BNX2X_Q_CMD_CFC_DEL)) | ||
4027 | break; | ||
4028 | |||
4029 | |||
3413 | 4030 | ||
3414 | goto next_spqe; | 4031 | goto next_spqe; |
3415 | 4032 | ||
@@ -3417,42 +4034,75 @@ static void bnx2x_eq_int(struct bnx2x *bp) | |||
3417 | DP(NETIF_MSG_IFUP, "got STOP TRAFFIC\n"); | 4034 | DP(NETIF_MSG_IFUP, "got STOP TRAFFIC\n"); |
3418 | bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_PAUSED); | 4035 | bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_PAUSED); |
3419 | goto next_spqe; | 4036 | goto next_spqe; |
4037 | |||
3420 | case EVENT_RING_OPCODE_START_TRAFFIC: | 4038 | case EVENT_RING_OPCODE_START_TRAFFIC: |
3421 | DP(NETIF_MSG_IFUP, "got START TRAFFIC\n"); | 4039 | DP(NETIF_MSG_IFUP, "got START TRAFFIC\n"); |
3422 | bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_RELEASED); | 4040 | bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_RELEASED); |
3423 | goto next_spqe; | 4041 | goto next_spqe; |
4042 | case EVENT_RING_OPCODE_FUNCTION_START: | ||
4043 | DP(NETIF_MSG_IFUP, "got FUNC_START ramrod\n"); | ||
4044 | if (f_obj->complete_cmd(bp, f_obj, BNX2X_F_CMD_START)) | ||
4045 | break; | ||
4046 | |||
4047 | goto next_spqe; | ||
4048 | |||
4049 | case EVENT_RING_OPCODE_FUNCTION_STOP: | ||
4050 | DP(NETIF_MSG_IFDOWN, "got FUNC_STOP ramrod\n"); | ||
4051 | if (f_obj->complete_cmd(bp, f_obj, BNX2X_F_CMD_STOP)) | ||
4052 | break; | ||
4053 | |||
4054 | goto next_spqe; | ||
3424 | } | 4055 | } |
3425 | 4056 | ||
3426 | switch (opcode | bp->state) { | 4057 | switch (opcode | bp->state) { |
3427 | case (EVENT_RING_OPCODE_FUNCTION_START | | 4058 | case (EVENT_RING_OPCODE_RSS_UPDATE_RULES | |
4059 | BNX2X_STATE_OPEN): | ||
4060 | case (EVENT_RING_OPCODE_RSS_UPDATE_RULES | | ||
3428 | BNX2X_STATE_OPENING_WAIT4_PORT): | 4061 | BNX2X_STATE_OPENING_WAIT4_PORT): |
3429 | DP(NETIF_MSG_IFUP, "got setup ramrod\n"); | 4062 | cid = elem->message.data.eth_event.echo & |
3430 | bp->state = BNX2X_STATE_FUNC_STARTED; | 4063 | BNX2X_SWCID_MASK; |
4064 | DP(NETIF_MSG_IFUP, "got RSS_UPDATE ramrod. CID %d\n", | ||
4065 | cid); | ||
4066 | rss_raw->clear_pending(rss_raw); | ||
3431 | break; | 4067 | break; |
3432 | 4068 | ||
3433 | case (EVENT_RING_OPCODE_FUNCTION_STOP | | 4069 | case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_OPEN): |
4070 | case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_DIAG): | ||
4071 | case (EVENT_RING_OPCODE_SET_MAC | | ||
4072 | BNX2X_STATE_CLOSING_WAIT4_HALT): | ||
4073 | case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | | ||
4074 | BNX2X_STATE_OPEN): | ||
4075 | case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | | ||
4076 | BNX2X_STATE_DIAG): | ||
4077 | case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | | ||
3434 | BNX2X_STATE_CLOSING_WAIT4_HALT): | 4078 | BNX2X_STATE_CLOSING_WAIT4_HALT): |
3435 | DP(NETIF_MSG_IFDOWN, "got halt ramrod\n"); | 4079 | DP(NETIF_MSG_IFUP, "got (un)set mac ramrod\n"); |
3436 | bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD; | 4080 | bnx2x_handle_classification_eqe(bp, elem); |
3437 | break; | 4081 | break; |
3438 | 4082 | ||
3439 | case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_OPEN): | 4083 | case (EVENT_RING_OPCODE_MULTICAST_RULES | |
3440 | case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_DIAG): | 4084 | BNX2X_STATE_OPEN): |
3441 | DP(NETIF_MSG_IFUP, "got set mac ramrod\n"); | 4085 | case (EVENT_RING_OPCODE_MULTICAST_RULES | |
3442 | if (elem->message.data.set_mac_event.echo) | 4086 | BNX2X_STATE_DIAG): |
3443 | bp->set_mac_pending = 0; | 4087 | case (EVENT_RING_OPCODE_MULTICAST_RULES | |
4088 | BNX2X_STATE_CLOSING_WAIT4_HALT): | ||
4089 | DP(NETIF_MSG_IFUP, "got mcast ramrod\n"); | ||
4090 | bnx2x_handle_mcast_eqe(bp); | ||
3444 | break; | 4091 | break; |
3445 | 4092 | ||
3446 | case (EVENT_RING_OPCODE_SET_MAC | | 4093 | case (EVENT_RING_OPCODE_FILTERS_RULES | |
4094 | BNX2X_STATE_OPEN): | ||
4095 | case (EVENT_RING_OPCODE_FILTERS_RULES | | ||
4096 | BNX2X_STATE_DIAG): | ||
4097 | case (EVENT_RING_OPCODE_FILTERS_RULES | | ||
3447 | BNX2X_STATE_CLOSING_WAIT4_HALT): | 4098 | BNX2X_STATE_CLOSING_WAIT4_HALT): |
3448 | DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n"); | 4099 | DP(NETIF_MSG_IFUP, "got rx_mode ramrod\n"); |
3449 | if (elem->message.data.set_mac_event.echo) | 4100 | bnx2x_handle_rx_mode_eqe(bp); |
3450 | bp->set_mac_pending = 0; | ||
3451 | break; | 4101 | break; |
3452 | default: | 4102 | default: |
3453 | /* unknown event log error and continue */ | 4103 | /* unknown event log error and continue */ |
3454 | BNX2X_ERR("Unknown EQ event %d\n", | 4104 | BNX2X_ERR("Unknown EQ event %d, bp->state 0x%x\n", |
3455 | elem->message.opcode); | 4105 | elem->message.opcode, bp->state); |
3456 | } | 4106 | } |
3457 | next_spqe: | 4107 | next_spqe: |
3458 | spqe_cnt++; | 4108 | spqe_cnt++; |
@@ -3544,6 +4194,14 @@ irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance) | |||
3544 | 4194 | ||
3545 | /* end of slow path */ | 4195 | /* end of slow path */ |
3546 | 4196 | ||
4197 | |||
4198 | void bnx2x_drv_pulse(struct bnx2x *bp) | ||
4199 | { | ||
4200 | SHMEM_WR(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb, | ||
4201 | bp->fw_drv_pulse_wr_seq); | ||
4202 | } | ||
4203 | |||
4204 | |||
3547 | static void bnx2x_timer(unsigned long data) | 4205 | static void bnx2x_timer(unsigned long data) |
3548 | { | 4206 | { |
3549 | struct bnx2x *bp = (struct bnx2x *) data; | 4207 | struct bnx2x *bp = (struct bnx2x *) data; |
@@ -3567,7 +4225,7 @@ static void bnx2x_timer(unsigned long data) | |||
3567 | bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK; | 4225 | bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK; |
3568 | /* TBD - add SYSTEM_TIME */ | 4226 | /* TBD - add SYSTEM_TIME */ |
3569 | drv_pulse = bp->fw_drv_pulse_wr_seq; | 4227 | drv_pulse = bp->fw_drv_pulse_wr_seq; |
3570 | SHMEM_WR(bp, func_mb[mb_idx].drv_pulse_mb, drv_pulse); | 4228 | bnx2x_drv_pulse(bp); |
3571 | 4229 | ||
3572 | mcp_pulse = (SHMEM_RD(bp, func_mb[mb_idx].mcp_pulse_mb) & | 4230 | mcp_pulse = (SHMEM_RD(bp, func_mb[mb_idx].mcp_pulse_mb) & |
3573 | MCP_PULSE_SEQ_MASK); | 4231 | MCP_PULSE_SEQ_MASK); |
@@ -3630,18 +4288,16 @@ static inline void bnx2x_zero_fp_sb(struct bnx2x *bp, int fw_sb_id) | |||
3630 | struct hc_status_block_data_e1x sb_data_e1x; | 4288 | struct hc_status_block_data_e1x sb_data_e1x; |
3631 | 4289 | ||
3632 | /* disable the function first */ | 4290 | /* disable the function first */ |
3633 | if (CHIP_IS_E2(bp)) { | 4291 | if (!CHIP_IS_E1x(bp)) { |
3634 | memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2)); | 4292 | memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2)); |
3635 | sb_data_e2.common.p_func.pf_id = HC_FUNCTION_DISABLED; | 4293 | sb_data_e2.common.state = SB_DISABLED; |
3636 | sb_data_e2.common.p_func.vf_id = HC_FUNCTION_DISABLED; | ||
3637 | sb_data_e2.common.p_func.vf_valid = false; | 4294 | sb_data_e2.common.p_func.vf_valid = false; |
3638 | sb_data_p = (u32 *)&sb_data_e2; | 4295 | sb_data_p = (u32 *)&sb_data_e2; |
3639 | data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32); | 4296 | data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32); |
3640 | } else { | 4297 | } else { |
3641 | memset(&sb_data_e1x, 0, | 4298 | memset(&sb_data_e1x, 0, |
3642 | sizeof(struct hc_status_block_data_e1x)); | 4299 | sizeof(struct hc_status_block_data_e1x)); |
3643 | sb_data_e1x.common.p_func.pf_id = HC_FUNCTION_DISABLED; | 4300 | sb_data_e1x.common.state = SB_DISABLED; |
3644 | sb_data_e1x.common.p_func.vf_id = HC_FUNCTION_DISABLED; | ||
3645 | sb_data_e1x.common.p_func.vf_valid = false; | 4301 | sb_data_e1x.common.p_func.vf_valid = false; |
3646 | sb_data_p = (u32 *)&sb_data_e1x; | 4302 | sb_data_p = (u32 *)&sb_data_e1x; |
3647 | data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32); | 4303 | data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32); |
@@ -3675,8 +4331,7 @@ static inline void bnx2x_zero_sp_sb(struct bnx2x *bp) | |||
3675 | struct hc_sp_status_block_data sp_sb_data; | 4331 | struct hc_sp_status_block_data sp_sb_data; |
3676 | memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data)); | 4332 | memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data)); |
3677 | 4333 | ||
3678 | sp_sb_data.p_func.pf_id = HC_FUNCTION_DISABLED; | 4334 | sp_sb_data.state = SB_DISABLED; |
3679 | sp_sb_data.p_func.vf_id = HC_FUNCTION_DISABLED; | ||
3680 | sp_sb_data.p_func.vf_valid = false; | 4335 | sp_sb_data.p_func.vf_valid = false; |
3681 | 4336 | ||
3682 | bnx2x_wr_sp_sb_data(bp, &sp_sb_data); | 4337 | bnx2x_wr_sp_sb_data(bp, &sp_sb_data); |
@@ -3719,8 +4374,9 @@ static void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid, | |||
3719 | 4374 | ||
3720 | bnx2x_zero_fp_sb(bp, fw_sb_id); | 4375 | bnx2x_zero_fp_sb(bp, fw_sb_id); |
3721 | 4376 | ||
3722 | if (CHIP_IS_E2(bp)) { | 4377 | if (!CHIP_IS_E1x(bp)) { |
3723 | memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2)); | 4378 | memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2)); |
4379 | sb_data_e2.common.state = SB_ENABLED; | ||
3724 | sb_data_e2.common.p_func.pf_id = BP_FUNC(bp); | 4380 | sb_data_e2.common.p_func.pf_id = BP_FUNC(bp); |
3725 | sb_data_e2.common.p_func.vf_id = vfid; | 4381 | sb_data_e2.common.p_func.vf_id = vfid; |
3726 | sb_data_e2.common.p_func.vf_valid = vf_valid; | 4382 | sb_data_e2.common.p_func.vf_valid = vf_valid; |
@@ -3734,6 +4390,7 @@ static void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid, | |||
3734 | } else { | 4390 | } else { |
3735 | memset(&sb_data_e1x, 0, | 4391 | memset(&sb_data_e1x, 0, |
3736 | sizeof(struct hc_status_block_data_e1x)); | 4392 | sizeof(struct hc_status_block_data_e1x)); |
4393 | sb_data_e1x.common.state = SB_ENABLED; | ||
3737 | sb_data_e1x.common.p_func.pf_id = BP_FUNC(bp); | 4394 | sb_data_e1x.common.p_func.pf_id = BP_FUNC(bp); |
3738 | sb_data_e1x.common.p_func.vf_id = 0xff; | 4395 | sb_data_e1x.common.p_func.vf_id = 0xff; |
3739 | sb_data_e1x.common.p_func.vf_valid = false; | 4396 | sb_data_e1x.common.p_func.vf_valid = false; |
@@ -3757,19 +4414,7 @@ static void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid, | |||
3757 | bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size); | 4414 | bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size); |
3758 | } | 4415 | } |
3759 | 4416 | ||
3760 | void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u16 fw_sb_id, | 4417 | static void bnx2x_update_coalesce_sb(struct bnx2x *bp, u8 fw_sb_id, |
3761 | u8 sb_index, u8 disable, u16 usec) | ||
3762 | { | ||
3763 | int port = BP_PORT(bp); | ||
3764 | u8 ticks = usec / BNX2X_BTR; | ||
3765 | |||
3766 | storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks); | ||
3767 | |||
3768 | disable = disable ? 1 : (usec ? 0 : 1); | ||
3769 | storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable); | ||
3770 | } | ||
3771 | |||
3772 | static void bnx2x_update_coalesce_sb(struct bnx2x *bp, u16 fw_sb_id, | ||
3773 | u16 tx_usec, u16 rx_usec) | 4418 | u16 tx_usec, u16 rx_usec) |
3774 | { | 4419 | { |
3775 | bnx2x_update_coalesce_sb_index(bp, fw_sb_id, U_SB_ETH_RX_CQ_INDEX, | 4420 | bnx2x_update_coalesce_sb_index(bp, fw_sb_id, U_SB_ETH_RX_CQ_INDEX, |
@@ -3816,7 +4461,7 @@ static void bnx2x_init_def_sb(struct bnx2x *bp) | |||
3816 | bp->attn_group[index].sig[sindex] = | 4461 | bp->attn_group[index].sig[sindex] = |
3817 | REG_RD(bp, reg_offset + sindex*0x4 + 0x10*index); | 4462 | REG_RD(bp, reg_offset + sindex*0x4 + 0x10*index); |
3818 | 4463 | ||
3819 | if (CHIP_IS_E2(bp)) | 4464 | if (!CHIP_IS_E1x(bp)) |
3820 | /* | 4465 | /* |
3821 | * enable5 is separate from the rest of the registers, | 4466 | * enable5 is separate from the rest of the registers, |
3822 | * and therefore the address skip is 4 | 4467 | * and therefore the address skip is 4 |
@@ -3834,7 +4479,7 @@ static void bnx2x_init_def_sb(struct bnx2x *bp) | |||
3834 | 4479 | ||
3835 | REG_WR(bp, reg_offset, U64_LO(section)); | 4480 | REG_WR(bp, reg_offset, U64_LO(section)); |
3836 | REG_WR(bp, reg_offset + 4, U64_HI(section)); | 4481 | REG_WR(bp, reg_offset + 4, U64_HI(section)); |
3837 | } else if (CHIP_IS_E2(bp)) { | 4482 | } else if (!CHIP_IS_E1x(bp)) { |
3838 | REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_L, U64_LO(section)); | 4483 | REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_L, U64_LO(section)); |
3839 | REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_H, U64_HI(section)); | 4484 | REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_H, U64_HI(section)); |
3840 | } | 4485 | } |
@@ -3844,6 +4489,7 @@ static void bnx2x_init_def_sb(struct bnx2x *bp) | |||
3844 | 4489 | ||
3845 | bnx2x_zero_sp_sb(bp); | 4490 | bnx2x_zero_sp_sb(bp); |
3846 | 4491 | ||
4492 | sp_sb_data.state = SB_ENABLED; | ||
3847 | sp_sb_data.host_sb_addr.lo = U64_LO(section); | 4493 | sp_sb_data.host_sb_addr.lo = U64_LO(section); |
3848 | sp_sb_data.host_sb_addr.hi = U64_HI(section); | 4494 | sp_sb_data.host_sb_addr.hi = U64_HI(section); |
3849 | sp_sb_data.igu_sb_id = igu_sp_sb_index; | 4495 | sp_sb_data.igu_sb_id = igu_sp_sb_index; |
@@ -3854,9 +4500,6 @@ static void bnx2x_init_def_sb(struct bnx2x *bp) | |||
3854 | 4500 | ||
3855 | bnx2x_wr_sp_sb_data(bp, &sp_sb_data); | 4501 | bnx2x_wr_sp_sb_data(bp, &sp_sb_data); |
3856 | 4502 | ||
3857 | bp->stats_pending = 0; | ||
3858 | bp->set_mac_pending = 0; | ||
3859 | |||
3860 | bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0); | 4503 | bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0); |
3861 | } | 4504 | } |
3862 | 4505 | ||
@@ -3902,34 +4545,129 @@ static void bnx2x_init_eq_ring(struct bnx2x *bp) | |||
3902 | min_t(int, MAX_SP_DESC_CNT - MAX_SPQ_PENDING, NUM_EQ_DESC) - 1); | 4545 | min_t(int, MAX_SP_DESC_CNT - MAX_SPQ_PENDING, NUM_EQ_DESC) - 1); |
3903 | } | 4546 | } |
3904 | 4547 | ||
3905 | static void bnx2x_init_ind_table(struct bnx2x *bp) | 4548 | |
4549 | /* called with netif_addr_lock_bh() */ | ||
4550 | void bnx2x_set_q_rx_mode(struct bnx2x *bp, u8 cl_id, | ||
4551 | unsigned long rx_mode_flags, | ||
4552 | unsigned long rx_accept_flags, | ||
4553 | unsigned long tx_accept_flags, | ||
4554 | unsigned long ramrod_flags) | ||
3906 | { | 4555 | { |
3907 | int i; | 4556 | struct bnx2x_rx_mode_ramrod_params ramrod_param; |
4557 | int rc; | ||
4558 | |||
4559 | memset(&ramrod_param, 0, sizeof(ramrod_param)); | ||
3908 | 4560 | ||
3909 | for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++) | 4561 | /* Prepare ramrod parameters */ |
3910 | bp->rx_indir_table[i] = i % BNX2X_NUM_ETH_QUEUES(bp); | 4562 | ramrod_param.cid = 0; |
4563 | ramrod_param.cl_id = cl_id; | ||
4564 | ramrod_param.rx_mode_obj = &bp->rx_mode_obj; | ||
4565 | ramrod_param.func_id = BP_FUNC(bp); | ||
3911 | 4566 | ||
3912 | bnx2x_push_indir_table(bp); | 4567 | ramrod_param.pstate = &bp->sp_state; |
4568 | ramrod_param.state = BNX2X_FILTER_RX_MODE_PENDING; | ||
4569 | |||
4570 | ramrod_param.rdata = bnx2x_sp(bp, rx_mode_rdata); | ||
4571 | ramrod_param.rdata_mapping = bnx2x_sp_mapping(bp, rx_mode_rdata); | ||
4572 | |||
4573 | set_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state); | ||
4574 | |||
4575 | ramrod_param.ramrod_flags = ramrod_flags; | ||
4576 | ramrod_param.rx_mode_flags = rx_mode_flags; | ||
4577 | |||
4578 | ramrod_param.rx_accept_flags = rx_accept_flags; | ||
4579 | ramrod_param.tx_accept_flags = tx_accept_flags; | ||
4580 | |||
4581 | rc = bnx2x_config_rx_mode(bp, &ramrod_param); | ||
4582 | if (rc < 0) { | ||
4583 | BNX2X_ERR("Set rx_mode %d failed\n", bp->rx_mode); | ||
4584 | return; | ||
4585 | } | ||
3913 | } | 4586 | } |
3914 | 4587 | ||
3915 | static void bnx2x_init_internal_common(struct bnx2x *bp) | 4588 | /* called with netif_addr_lock_bh() */ |
4589 | void bnx2x_set_storm_rx_mode(struct bnx2x *bp) | ||
3916 | { | 4590 | { |
3917 | int i; | 4591 | unsigned long rx_mode_flags = 0, ramrod_flags = 0; |
4592 | unsigned long rx_accept_flags = 0, tx_accept_flags = 0; | ||
3918 | 4593 | ||
3919 | if (!CHIP_IS_E1(bp)) { | 4594 | #ifdef BCM_CNIC |
4595 | if (!NO_FCOE(bp)) | ||
4596 | |||
4597 | /* Configure rx_mode of FCoE Queue */ | ||
4598 | __set_bit(BNX2X_RX_MODE_FCOE_ETH, &rx_mode_flags); | ||
4599 | #endif | ||
4600 | |||
4601 | switch (bp->rx_mode) { | ||
4602 | case BNX2X_RX_MODE_NONE: | ||
4603 | /* | ||
4604 | * 'drop all' supersedes any accept flags that may have been | ||
4605 | * passed to the function. | ||
4606 | */ | ||
4607 | break; | ||
4608 | case BNX2X_RX_MODE_NORMAL: | ||
4609 | __set_bit(BNX2X_ACCEPT_UNICAST, &rx_accept_flags); | ||
4610 | __set_bit(BNX2X_ACCEPT_MULTICAST, &rx_accept_flags); | ||
4611 | __set_bit(BNX2X_ACCEPT_BROADCAST, &rx_accept_flags); | ||
4612 | |||
4613 | /* internal switching mode */ | ||
4614 | __set_bit(BNX2X_ACCEPT_UNICAST, &tx_accept_flags); | ||
4615 | __set_bit(BNX2X_ACCEPT_MULTICAST, &tx_accept_flags); | ||
4616 | __set_bit(BNX2X_ACCEPT_BROADCAST, &tx_accept_flags); | ||
4617 | |||
4618 | break; | ||
4619 | case BNX2X_RX_MODE_ALLMULTI: | ||
4620 | __set_bit(BNX2X_ACCEPT_UNICAST, &rx_accept_flags); | ||
4621 | __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &rx_accept_flags); | ||
4622 | __set_bit(BNX2X_ACCEPT_BROADCAST, &rx_accept_flags); | ||
4623 | |||
4624 | /* internal switching mode */ | ||
4625 | __set_bit(BNX2X_ACCEPT_UNICAST, &tx_accept_flags); | ||
4626 | __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &tx_accept_flags); | ||
4627 | __set_bit(BNX2X_ACCEPT_BROADCAST, &tx_accept_flags); | ||
4628 | |||
4629 | break; | ||
4630 | case BNX2X_RX_MODE_PROMISC: | ||
4631 | /* According to deffinition of SI mode, iface in promisc mode | ||
4632 | * should receive matched and unmatched (in resolution of port) | ||
4633 | * unicast packets. | ||
4634 | */ | ||
4635 | __set_bit(BNX2X_ACCEPT_UNMATCHED, &rx_accept_flags); | ||
4636 | __set_bit(BNX2X_ACCEPT_UNICAST, &rx_accept_flags); | ||
4637 | __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &rx_accept_flags); | ||
4638 | __set_bit(BNX2X_ACCEPT_BROADCAST, &rx_accept_flags); | ||
4639 | |||
4640 | /* internal switching mode */ | ||
4641 | __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &tx_accept_flags); | ||
4642 | __set_bit(BNX2X_ACCEPT_BROADCAST, &tx_accept_flags); | ||
3920 | 4643 | ||
3921 | /* xstorm needs to know whether to add ovlan to packets or not, | 4644 | if (IS_MF_SI(bp)) |
3922 | * in switch-independent we'll write 0 to here... */ | 4645 | __set_bit(BNX2X_ACCEPT_ALL_UNICAST, &tx_accept_flags); |
3923 | REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET, | 4646 | else |
3924 | bp->mf_mode); | 4647 | __set_bit(BNX2X_ACCEPT_UNICAST, &tx_accept_flags); |
3925 | REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET, | 4648 | |
3926 | bp->mf_mode); | 4649 | break; |
3927 | REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET, | 4650 | default: |
3928 | bp->mf_mode); | 4651 | BNX2X_ERR("Unknown rx_mode: %d\n", bp->rx_mode); |
3929 | REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET, | 4652 | return; |
3930 | bp->mf_mode); | 4653 | } |
4654 | |||
4655 | if (bp->rx_mode != BNX2X_RX_MODE_NONE) { | ||
4656 | __set_bit(BNX2X_ACCEPT_ANY_VLAN, &rx_accept_flags); | ||
4657 | __set_bit(BNX2X_ACCEPT_ANY_VLAN, &tx_accept_flags); | ||
3931 | } | 4658 | } |
3932 | 4659 | ||
4660 | __set_bit(RAMROD_RX, &ramrod_flags); | ||
4661 | __set_bit(RAMROD_TX, &ramrod_flags); | ||
4662 | |||
4663 | bnx2x_set_q_rx_mode(bp, bp->fp->cl_id, rx_mode_flags, rx_accept_flags, | ||
4664 | tx_accept_flags, ramrod_flags); | ||
4665 | } | ||
4666 | |||
4667 | static void bnx2x_init_internal_common(struct bnx2x *bp) | ||
4668 | { | ||
4669 | int i; | ||
4670 | |||
3933 | if (IS_MF_SI(bp)) | 4671 | if (IS_MF_SI(bp)) |
3934 | /* | 4672 | /* |
3935 | * In switch independent mode, the TSTORM needs to accept | 4673 | * In switch independent mode, the TSTORM needs to accept |
@@ -3938,25 +4676,22 @@ static void bnx2x_init_internal_common(struct bnx2x *bp) | |||
3938 | */ | 4676 | */ |
3939 | REG_WR8(bp, BAR_TSTRORM_INTMEM + | 4677 | REG_WR8(bp, BAR_TSTRORM_INTMEM + |
3940 | TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET, 2); | 4678 | TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET, 2); |
4679 | else if (!CHIP_IS_E1(bp)) /* 57710 doesn't support MF */ | ||
4680 | REG_WR8(bp, BAR_TSTRORM_INTMEM + | ||
4681 | TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET, 0); | ||
3941 | 4682 | ||
3942 | /* Zero this manually as its initialization is | 4683 | /* Zero this manually as its initialization is |
3943 | currently missing in the initTool */ | 4684 | currently missing in the initTool */ |
3944 | for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++) | 4685 | for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++) |
3945 | REG_WR(bp, BAR_USTRORM_INTMEM + | 4686 | REG_WR(bp, BAR_USTRORM_INTMEM + |
3946 | USTORM_AGG_DATA_OFFSET + i * 4, 0); | 4687 | USTORM_AGG_DATA_OFFSET + i * 4, 0); |
3947 | if (CHIP_IS_E2(bp)) { | 4688 | if (!CHIP_IS_E1x(bp)) { |
3948 | REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_IGU_MODE_OFFSET, | 4689 | REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_IGU_MODE_OFFSET, |
3949 | CHIP_INT_MODE_IS_BC(bp) ? | 4690 | CHIP_INT_MODE_IS_BC(bp) ? |
3950 | HC_IGU_BC_MODE : HC_IGU_NBC_MODE); | 4691 | HC_IGU_BC_MODE : HC_IGU_NBC_MODE); |
3951 | } | 4692 | } |
3952 | } | 4693 | } |
3953 | 4694 | ||
3954 | static void bnx2x_init_internal_port(struct bnx2x *bp) | ||
3955 | { | ||
3956 | /* port */ | ||
3957 | bnx2x_dcb_init_intmem_pfc(bp); | ||
3958 | } | ||
3959 | |||
3960 | static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code) | 4695 | static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code) |
3961 | { | 4696 | { |
3962 | switch (load_code) { | 4697 | switch (load_code) { |
@@ -3966,7 +4701,7 @@ static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code) | |||
3966 | /* no break */ | 4701 | /* no break */ |
3967 | 4702 | ||
3968 | case FW_MSG_CODE_DRV_LOAD_PORT: | 4703 | case FW_MSG_CODE_DRV_LOAD_PORT: |
3969 | bnx2x_init_internal_port(bp); | 4704 | /* nothing to do */ |
3970 | /* no break */ | 4705 | /* no break */ |
3971 | 4706 | ||
3972 | case FW_MSG_CODE_DRV_LOAD_FUNCTION: | 4707 | case FW_MSG_CODE_DRV_LOAD_FUNCTION: |
@@ -3980,31 +4715,57 @@ static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code) | |||
3980 | } | 4715 | } |
3981 | } | 4716 | } |
3982 | 4717 | ||
3983 | static void bnx2x_init_fp_sb(struct bnx2x *bp, int fp_idx) | 4718 | static inline u8 bnx2x_fp_igu_sb_id(struct bnx2x_fastpath *fp) |
3984 | { | 4719 | { |
3985 | struct bnx2x_fastpath *fp = &bp->fp[fp_idx]; | 4720 | return fp->bp->igu_base_sb + fp->index + CNIC_CONTEXT_USE; |
4721 | } | ||
4722 | |||
4723 | static inline u8 bnx2x_fp_fw_sb_id(struct bnx2x_fastpath *fp) | ||
4724 | { | ||
4725 | return fp->bp->base_fw_ndsb + fp->index + CNIC_CONTEXT_USE; | ||
4726 | } | ||
4727 | |||
4728 | static inline u8 bnx2x_fp_cl_id(struct bnx2x_fastpath *fp) | ||
4729 | { | ||
4730 | if (CHIP_IS_E1x(fp->bp)) | ||
4731 | return BP_L_ID(fp->bp) + fp->index; | ||
4732 | else /* We want Client ID to be the same as IGU SB ID for 57712 */ | ||
4733 | return bnx2x_fp_igu_sb_id(fp); | ||
4734 | } | ||
3986 | 4735 | ||
3987 | fp->state = BNX2X_FP_STATE_CLOSED; | 4736 | static void bnx2x_init_fp(struct bnx2x *bp, int fp_idx) |
4737 | { | ||
4738 | struct bnx2x_fastpath *fp = &bp->fp[fp_idx]; | ||
4739 | unsigned long q_type = 0; | ||
3988 | 4740 | ||
3989 | fp->cid = fp_idx; | 4741 | fp->cid = fp_idx; |
3990 | fp->cl_id = BP_L_ID(bp) + fp_idx; | 4742 | fp->cl_id = bnx2x_fp_cl_id(fp); |
3991 | fp->fw_sb_id = bp->base_fw_ndsb + fp->cl_id + CNIC_CONTEXT_USE; | 4743 | fp->fw_sb_id = bnx2x_fp_fw_sb_id(fp); |
3992 | fp->igu_sb_id = bp->igu_base_sb + fp_idx + CNIC_CONTEXT_USE; | 4744 | fp->igu_sb_id = bnx2x_fp_igu_sb_id(fp); |
3993 | /* qZone id equals to FW (per path) client id */ | 4745 | /* qZone id equals to FW (per path) client id */ |
3994 | fp->cl_qzone_id = fp->cl_id + | 4746 | fp->cl_qzone_id = bnx2x_fp_qzone_id(fp); |
3995 | BP_PORT(bp)*(CHIP_IS_E2(bp) ? ETH_MAX_RX_CLIENTS_E2 : | 4747 | |
3996 | ETH_MAX_RX_CLIENTS_E1H); | ||
3997 | /* init shortcut */ | 4748 | /* init shortcut */ |
3998 | fp->ustorm_rx_prods_offset = CHIP_IS_E2(bp) ? | 4749 | fp->ustorm_rx_prods_offset = bnx2x_rx_ustorm_prods_offset(fp); |
3999 | USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id) : | ||
4000 | USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), fp->cl_id); | ||
4001 | /* Setup SB indicies */ | 4750 | /* Setup SB indicies */ |
4002 | fp->rx_cons_sb = BNX2X_RX_SB_INDEX; | 4751 | fp->rx_cons_sb = BNX2X_RX_SB_INDEX; |
4003 | fp->tx_cons_sb = BNX2X_TX_SB_INDEX; | 4752 | fp->tx_cons_sb = BNX2X_TX_SB_INDEX; |
4004 | 4753 | ||
4754 | /* Configure Queue State object */ | ||
4755 | __set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type); | ||
4756 | __set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type); | ||
4757 | bnx2x_init_queue_obj(bp, &fp->q_obj, fp->cl_id, fp->cid, BP_FUNC(bp), | ||
4758 | bnx2x_sp(bp, q_rdata), bnx2x_sp_mapping(bp, q_rdata), | ||
4759 | q_type); | ||
4760 | |||
4761 | /** | ||
4762 | * Configure classification DBs: Always enable Tx switching | ||
4763 | */ | ||
4764 | bnx2x_init_vlan_mac_fp_objs(fp, BNX2X_OBJ_TYPE_RX_TX); | ||
4765 | |||
4005 | DP(NETIF_MSG_IFUP, "queue[%d]: bnx2x_init_sb(%p,%p) " | 4766 | DP(NETIF_MSG_IFUP, "queue[%d]: bnx2x_init_sb(%p,%p) " |
4006 | "cl_id %d fw_sb %d igu_sb %d\n", | 4767 | "cl_id %d fw_sb %d igu_sb %d\n", |
4007 | fp_idx, bp, fp->status_blk.e1x_sb, fp->cl_id, fp->fw_sb_id, | 4768 | fp_idx, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id, |
4008 | fp->igu_sb_id); | 4769 | fp->igu_sb_id); |
4009 | bnx2x_init_sb(bp, fp->status_blk_mapping, BNX2X_VF_ID_INVALID, false, | 4770 | bnx2x_init_sb(bp, fp->status_blk_mapping, BNX2X_VF_ID_INVALID, false, |
4010 | fp->fw_sb_id, fp->igu_sb_id); | 4771 | fp->fw_sb_id, fp->igu_sb_id); |
@@ -4017,14 +4778,14 @@ void bnx2x_nic_init(struct bnx2x *bp, u32 load_code) | |||
4017 | int i; | 4778 | int i; |
4018 | 4779 | ||
4019 | for_each_eth_queue(bp, i) | 4780 | for_each_eth_queue(bp, i) |
4020 | bnx2x_init_fp_sb(bp, i); | 4781 | bnx2x_init_fp(bp, i); |
4021 | #ifdef BCM_CNIC | 4782 | #ifdef BCM_CNIC |
4022 | if (!NO_FCOE(bp)) | 4783 | if (!NO_FCOE(bp)) |
4023 | bnx2x_init_fcoe_fp(bp); | 4784 | bnx2x_init_fcoe_fp(bp); |
4024 | 4785 | ||
4025 | bnx2x_init_sb(bp, bp->cnic_sb_mapping, | 4786 | bnx2x_init_sb(bp, bp->cnic_sb_mapping, |
4026 | BNX2X_VF_ID_INVALID, false, | 4787 | BNX2X_VF_ID_INVALID, false, |
4027 | CNIC_SB_ID(bp), CNIC_IGU_SB_ID(bp)); | 4788 | bnx2x_cnic_fw_sb_id(bp), bnx2x_cnic_igu_sb_id(bp)); |
4028 | 4789 | ||
4029 | #endif | 4790 | #endif |
4030 | 4791 | ||
@@ -4043,7 +4804,6 @@ void bnx2x_nic_init(struct bnx2x *bp, u32 load_code) | |||
4043 | bnx2x_init_eq_ring(bp); | 4804 | bnx2x_init_eq_ring(bp); |
4044 | bnx2x_init_internal(bp, load_code); | 4805 | bnx2x_init_internal(bp, load_code); |
4045 | bnx2x_pf_init(bp); | 4806 | bnx2x_pf_init(bp); |
4046 | bnx2x_init_ind_table(bp); | ||
4047 | bnx2x_stats_init(bp); | 4807 | bnx2x_stats_init(bp); |
4048 | 4808 | ||
4049 | /* flush all before enabling interrupts */ | 4809 | /* flush all before enabling interrupts */ |
@@ -4249,8 +5009,8 @@ static int bnx2x_int_mem_test(struct bnx2x *bp) | |||
4249 | msleep(50); | 5009 | msleep(50); |
4250 | REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03); | 5010 | REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03); |
4251 | msleep(50); | 5011 | msleep(50); |
4252 | bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE); | 5012 | bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON); |
4253 | bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE); | 5013 | bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON); |
4254 | 5014 | ||
4255 | DP(NETIF_MSG_HW, "part2\n"); | 5015 | DP(NETIF_MSG_HW, "part2\n"); |
4256 | 5016 | ||
@@ -4314,8 +5074,8 @@ static int bnx2x_int_mem_test(struct bnx2x *bp) | |||
4314 | msleep(50); | 5074 | msleep(50); |
4315 | REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03); | 5075 | REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03); |
4316 | msleep(50); | 5076 | msleep(50); |
4317 | bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE); | 5077 | bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON); |
4318 | bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE); | 5078 | bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON); |
4319 | #ifndef BCM_CNIC | 5079 | #ifndef BCM_CNIC |
4320 | /* set NIC mode */ | 5080 | /* set NIC mode */ |
4321 | REG_WR(bp, PRS_REG_NIC_MODE, 1); | 5081 | REG_WR(bp, PRS_REG_NIC_MODE, 1); |
@@ -4335,7 +5095,7 @@ static int bnx2x_int_mem_test(struct bnx2x *bp) | |||
4335 | static void bnx2x_enable_blocks_attention(struct bnx2x *bp) | 5095 | static void bnx2x_enable_blocks_attention(struct bnx2x *bp) |
4336 | { | 5096 | { |
4337 | REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0); | 5097 | REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0); |
4338 | if (CHIP_IS_E2(bp)) | 5098 | if (!CHIP_IS_E1x(bp)) |
4339 | REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0x40); | 5099 | REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0x40); |
4340 | else | 5100 | else |
4341 | REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0); | 5101 | REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0); |
@@ -4369,7 +5129,7 @@ static void bnx2x_enable_blocks_attention(struct bnx2x *bp) | |||
4369 | 5129 | ||
4370 | if (CHIP_REV_IS_FPGA(bp)) | 5130 | if (CHIP_REV_IS_FPGA(bp)) |
4371 | REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000); | 5131 | REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000); |
4372 | else if (CHIP_IS_E2(bp)) | 5132 | else if (!CHIP_IS_E1x(bp)) |
4373 | REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, | 5133 | REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, |
4374 | (PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF | 5134 | (PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF |
4375 | | PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT | 5135 | | PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT |
@@ -4382,7 +5142,11 @@ static void bnx2x_enable_blocks_attention(struct bnx2x *bp) | |||
4382 | REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0); | 5142 | REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0); |
4383 | REG_WR(bp, TCM_REG_TCM_INT_MASK, 0); | 5143 | REG_WR(bp, TCM_REG_TCM_INT_MASK, 0); |
4384 | /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */ | 5144 | /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */ |
4385 | /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */ | 5145 | |
5146 | if (!CHIP_IS_E1x(bp)) | ||
5147 | /* enable VFC attentions: bits 11 and 12, bits 31:13 reserved */ | ||
5148 | REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0x07ff); | ||
5149 | |||
4386 | REG_WR(bp, CDU_REG_CDU_INT_MASK, 0); | 5150 | REG_WR(bp, CDU_REG_CDU_INT_MASK, 0); |
4387 | REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0); | 5151 | REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0); |
4388 | /* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */ | 5152 | /* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */ |
@@ -4391,10 +5155,24 @@ static void bnx2x_enable_blocks_attention(struct bnx2x *bp) | |||
4391 | 5155 | ||
4392 | static void bnx2x_reset_common(struct bnx2x *bp) | 5156 | static void bnx2x_reset_common(struct bnx2x *bp) |
4393 | { | 5157 | { |
5158 | u32 val = 0x1400; | ||
5159 | |||
4394 | /* reset_common */ | 5160 | /* reset_common */ |
4395 | REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, | 5161 | REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, |
4396 | 0xd3ffff7f); | 5162 | 0xd3ffff7f); |
4397 | REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403); | 5163 | |
5164 | if (CHIP_IS_E3(bp)) { | ||
5165 | val |= MISC_REGISTERS_RESET_REG_2_MSTAT0; | ||
5166 | val |= MISC_REGISTERS_RESET_REG_2_MSTAT1; | ||
5167 | } | ||
5168 | |||
5169 | REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, val); | ||
5170 | } | ||
5171 | |||
5172 | static void bnx2x_setup_dmae(struct bnx2x *bp) | ||
5173 | { | ||
5174 | bp->dmae_ready = 0; | ||
5175 | spin_lock_init(&bp->dmae_lock); | ||
4398 | } | 5176 | } |
4399 | 5177 | ||
4400 | static void bnx2x_init_pxp(struct bnx2x *bp) | 5178 | static void bnx2x_init_pxp(struct bnx2x *bp) |
@@ -4521,22 +5299,48 @@ static void bnx2x_pf_disable(struct bnx2x *bp) | |||
4521 | REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 0); | 5299 | REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 0); |
4522 | } | 5300 | } |
4523 | 5301 | ||
4524 | static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code) | 5302 | static inline void bnx2x__common_init_phy(struct bnx2x *bp) |
5303 | { | ||
5304 | u32 shmem_base[2], shmem2_base[2]; | ||
5305 | shmem_base[0] = bp->common.shmem_base; | ||
5306 | shmem2_base[0] = bp->common.shmem2_base; | ||
5307 | if (!CHIP_IS_E1x(bp)) { | ||
5308 | shmem_base[1] = | ||
5309 | SHMEM2_RD(bp, other_shmem_base_addr); | ||
5310 | shmem2_base[1] = | ||
5311 | SHMEM2_RD(bp, other_shmem2_base_addr); | ||
5312 | } | ||
5313 | bnx2x_acquire_phy_lock(bp); | ||
5314 | bnx2x_common_init_phy(bp, shmem_base, shmem2_base, | ||
5315 | bp->common.chip_id); | ||
5316 | bnx2x_release_phy_lock(bp); | ||
5317 | } | ||
5318 | |||
5319 | /** | ||
5320 | * bnx2x_init_hw_common - initialize the HW at the COMMON phase. | ||
5321 | * | ||
5322 | * @bp: driver handle | ||
5323 | */ | ||
5324 | static int bnx2x_init_hw_common(struct bnx2x *bp) | ||
4525 | { | 5325 | { |
4526 | u32 val, i; | 5326 | u32 val; |
4527 | 5327 | ||
4528 | DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_ABS_FUNC(bp)); | 5328 | DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_ABS_FUNC(bp)); |
4529 | 5329 | ||
4530 | bnx2x_reset_common(bp); | 5330 | bnx2x_reset_common(bp); |
4531 | REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff); | 5331 | REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff); |
4532 | REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc); | ||
4533 | 5332 | ||
4534 | bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE); | 5333 | val = 0xfffc; |
4535 | if (!CHIP_IS_E1(bp)) | 5334 | if (CHIP_IS_E3(bp)) { |
4536 | REG_WR(bp, MISC_REG_E1HMF_MODE, IS_MF(bp)); | 5335 | val |= MISC_REGISTERS_RESET_REG_2_MSTAT0; |
5336 | val |= MISC_REGISTERS_RESET_REG_2_MSTAT1; | ||
5337 | } | ||
5338 | REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, val); | ||
4537 | 5339 | ||
4538 | if (CHIP_IS_E2(bp)) { | 5340 | bnx2x_init_block(bp, BLOCK_MISC, PHASE_COMMON); |
4539 | u8 fid; | 5341 | |
5342 | if (!CHIP_IS_E1x(bp)) { | ||
5343 | u8 abs_func_id; | ||
4540 | 5344 | ||
4541 | /** | 5345 | /** |
4542 | * 4-port mode or 2-port mode we need to turn of master-enable | 5346 | * 4-port mode or 2-port mode we need to turn of master-enable |
@@ -4545,29 +5349,30 @@ static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code) | |||
4545 | * for all functions on the given path, this means 0,2,4,6 for | 5349 | * for all functions on the given path, this means 0,2,4,6 for |
4546 | * path 0 and 1,3,5,7 for path 1 | 5350 | * path 0 and 1,3,5,7 for path 1 |
4547 | */ | 5351 | */ |
4548 | for (fid = BP_PATH(bp); fid < E2_FUNC_MAX*2; fid += 2) { | 5352 | for (abs_func_id = BP_PATH(bp); |
4549 | if (fid == BP_ABS_FUNC(bp)) { | 5353 | abs_func_id < E2_FUNC_MAX*2; abs_func_id += 2) { |
5354 | if (abs_func_id == BP_ABS_FUNC(bp)) { | ||
4550 | REG_WR(bp, | 5355 | REG_WR(bp, |
4551 | PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, | 5356 | PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, |
4552 | 1); | 5357 | 1); |
4553 | continue; | 5358 | continue; |
4554 | } | 5359 | } |
4555 | 5360 | ||
4556 | bnx2x_pretend_func(bp, fid); | 5361 | bnx2x_pretend_func(bp, abs_func_id); |
4557 | /* clear pf enable */ | 5362 | /* clear pf enable */ |
4558 | bnx2x_pf_disable(bp); | 5363 | bnx2x_pf_disable(bp); |
4559 | bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); | 5364 | bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); |
4560 | } | 5365 | } |
4561 | } | 5366 | } |
4562 | 5367 | ||
4563 | bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE); | 5368 | bnx2x_init_block(bp, BLOCK_PXP, PHASE_COMMON); |
4564 | if (CHIP_IS_E1(bp)) { | 5369 | if (CHIP_IS_E1(bp)) { |
4565 | /* enable HW interrupt from PXP on USDM overflow | 5370 | /* enable HW interrupt from PXP on USDM overflow |
4566 | bit 16 on INT_MASK_0 */ | 5371 | bit 16 on INT_MASK_0 */ |
4567 | REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0); | 5372 | REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0); |
4568 | } | 5373 | } |
4569 | 5374 | ||
4570 | bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE); | 5375 | bnx2x_init_block(bp, BLOCK_PXP2, PHASE_COMMON); |
4571 | bnx2x_init_pxp(bp); | 5376 | bnx2x_init_pxp(bp); |
4572 | 5377 | ||
4573 | #ifdef __BIG_ENDIAN | 5378 | #ifdef __BIG_ENDIAN |
@@ -4610,7 +5415,69 @@ static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code) | |||
4610 | * This needs to be done by the first PF that is loaded in a path | 5415 | * This needs to be done by the first PF that is loaded in a path |
4611 | * (i.e. common phase) | 5416 | * (i.e. common phase) |
4612 | */ | 5417 | */ |
4613 | if (CHIP_IS_E2(bp)) { | 5418 | if (!CHIP_IS_E1x(bp)) { |
5419 | /* In E2 there is a bug in the timers block that can cause function 6 / 7 | ||
5420 | * (i.e. vnic3) to start even if it is marked as "scan-off". | ||
5421 | * This occurs when a different function (func2,3) is being marked | ||
5422 | * as "scan-off". Real-life scenario for example: if a driver is being | ||
5423 | * load-unloaded while func6,7 are down. This will cause the timer to access | ||
5424 | * the ilt, translate to a logical address and send a request to read/write. | ||
5425 | * Since the ilt for the function that is down is not valid, this will cause | ||
5426 | * a translation error which is unrecoverable. | ||
5427 | * The Workaround is intended to make sure that when this happens nothing fatal | ||
5428 | * will occur. The workaround: | ||
5429 | * 1. First PF driver which loads on a path will: | ||
5430 | * a. After taking the chip out of reset, by using pretend, | ||
5431 | * it will write "0" to the following registers of | ||
5432 | * the other vnics. | ||
5433 | * REG_WR(pdev, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0); | ||
5434 | * REG_WR(pdev, CFC_REG_WEAK_ENABLE_PF,0); | ||
5435 | * REG_WR(pdev, CFC_REG_STRONG_ENABLE_PF,0); | ||
5436 | * And for itself it will write '1' to | ||
5437 | * PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER to enable | ||
5438 | * dmae-operations (writing to pram for example.) | ||
5439 | * note: can be done for only function 6,7 but cleaner this | ||
5440 | * way. | ||
5441 | * b. Write zero+valid to the entire ILT. | ||
5442 | * c. Init the first_timers_ilt_entry, last_timers_ilt_entry of | ||
5443 | * VNIC3 (of that port). The range allocated will be the | ||
5444 | * entire ILT. This is needed to prevent ILT range error. | ||
5445 | * 2. Any PF driver load flow: | ||
5446 | * a. ILT update with the physical addresses of the allocated | ||
5447 | * logical pages. | ||
5448 | * b. Wait 20msec. - note that this timeout is needed to make | ||
5449 | * sure there are no requests in one of the PXP internal | ||
5450 | * queues with "old" ILT addresses. | ||
5451 | * c. PF enable in the PGLC. | ||
5452 | * d. Clear the was_error of the PF in the PGLC. (could have | ||
5453 | * occured while driver was down) | ||
5454 | * e. PF enable in the CFC (WEAK + STRONG) | ||
5455 | * f. Timers scan enable | ||
5456 | * 3. PF driver unload flow: | ||
5457 | * a. Clear the Timers scan_en. | ||
5458 | * b. Polling for scan_on=0 for that PF. | ||
5459 | * c. Clear the PF enable bit in the PXP. | ||
5460 | * d. Clear the PF enable in the CFC (WEAK + STRONG) | ||
5461 | * e. Write zero+valid to all ILT entries (The valid bit must | ||
5462 | * stay set) | ||
5463 | * f. If this is VNIC 3 of a port then also init | ||
5464 | * first_timers_ilt_entry to zero and last_timers_ilt_entry | ||
5465 | * to the last enrty in the ILT. | ||
5466 | * | ||
5467 | * Notes: | ||
5468 | * Currently the PF error in the PGLC is non recoverable. | ||
5469 | * In the future the there will be a recovery routine for this error. | ||
5470 | * Currently attention is masked. | ||
5471 | * Having an MCP lock on the load/unload process does not guarantee that | ||
5472 | * there is no Timer disable during Func6/7 enable. This is because the | ||
5473 | * Timers scan is currently being cleared by the MCP on FLR. | ||
5474 | * Step 2.d can be done only for PF6/7 and the driver can also check if | ||
5475 | * there is error before clearing it. But the flow above is simpler and | ||
5476 | * more general. | ||
5477 | * All ILT entries are written by zero+valid and not just PF6/7 | ||
5478 | * ILT entries since in the future the ILT entries allocation for | ||
5479 | * PF-s might be dynamic. | ||
5480 | */ | ||
4614 | struct ilt_client_info ilt_cli; | 5481 | struct ilt_client_info ilt_cli; |
4615 | struct bnx2x_ilt ilt; | 5482 | struct bnx2x_ilt ilt; |
4616 | memset(&ilt_cli, 0, sizeof(struct ilt_client_info)); | 5483 | memset(&ilt_cli, 0, sizeof(struct ilt_client_info)); |
@@ -4624,7 +5491,7 @@ static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code) | |||
4624 | /* Step 1: set zeroes to all ilt page entries with valid bit on | 5491 | /* Step 1: set zeroes to all ilt page entries with valid bit on |
4625 | * Step 2: set the timers first/last ilt entry to point | 5492 | * Step 2: set the timers first/last ilt entry to point |
4626 | * to the entire range to prevent ILT range error for 3rd/4th | 5493 | * to the entire range to prevent ILT range error for 3rd/4th |
4627 | * vnic (this code assumes existence of the vnic) | 5494 | * vnic (this code assumes existance of the vnic) |
4628 | * | 5495 | * |
4629 | * both steps performed by call to bnx2x_ilt_client_init_op() | 5496 | * both steps performed by call to bnx2x_ilt_client_init_op() |
4630 | * with dummy TM client | 5497 | * with dummy TM client |
@@ -4645,12 +5512,12 @@ static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code) | |||
4645 | REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0); | 5512 | REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0); |
4646 | REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0); | 5513 | REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0); |
4647 | 5514 | ||
4648 | if (CHIP_IS_E2(bp)) { | 5515 | if (!CHIP_IS_E1x(bp)) { |
4649 | int factor = CHIP_REV_IS_EMUL(bp) ? 1000 : | 5516 | int factor = CHIP_REV_IS_EMUL(bp) ? 1000 : |
4650 | (CHIP_REV_IS_FPGA(bp) ? 400 : 0); | 5517 | (CHIP_REV_IS_FPGA(bp) ? 400 : 0); |
4651 | bnx2x_init_block(bp, PGLUE_B_BLOCK, COMMON_STAGE); | 5518 | bnx2x_init_block(bp, BLOCK_PGLUE_B, PHASE_COMMON); |
4652 | 5519 | ||
4653 | bnx2x_init_block(bp, ATC_BLOCK, COMMON_STAGE); | 5520 | bnx2x_init_block(bp, BLOCK_ATC, PHASE_COMMON); |
4654 | 5521 | ||
4655 | /* let the HW do it's magic ... */ | 5522 | /* let the HW do it's magic ... */ |
4656 | do { | 5523 | do { |
@@ -4664,26 +5531,27 @@ static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code) | |||
4664 | } | 5531 | } |
4665 | } | 5532 | } |
4666 | 5533 | ||
4667 | bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE); | 5534 | bnx2x_init_block(bp, BLOCK_DMAE, PHASE_COMMON); |
4668 | 5535 | ||
4669 | /* clean the DMAE memory */ | 5536 | /* clean the DMAE memory */ |
4670 | bp->dmae_ready = 1; | 5537 | bp->dmae_ready = 1; |
4671 | bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8); | 5538 | bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8, 1); |
5539 | |||
5540 | bnx2x_init_block(bp, BLOCK_TCM, PHASE_COMMON); | ||
4672 | 5541 | ||
4673 | bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE); | 5542 | bnx2x_init_block(bp, BLOCK_UCM, PHASE_COMMON); |
4674 | bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE); | 5543 | |
4675 | bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE); | 5544 | bnx2x_init_block(bp, BLOCK_CCM, PHASE_COMMON); |
4676 | bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE); | 5545 | |
5546 | bnx2x_init_block(bp, BLOCK_XCM, PHASE_COMMON); | ||
4677 | 5547 | ||
4678 | bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3); | 5548 | bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3); |
4679 | bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3); | 5549 | bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3); |
4680 | bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3); | 5550 | bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3); |
4681 | bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3); | 5551 | bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3); |
4682 | 5552 | ||
4683 | bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE); | 5553 | bnx2x_init_block(bp, BLOCK_QM, PHASE_COMMON); |
4684 | 5554 | ||
4685 | if (CHIP_MODE_IS_4_PORT(bp)) | ||
4686 | bnx2x_init_block(bp, QM_4PORT_BLOCK, COMMON_STAGE); | ||
4687 | 5555 | ||
4688 | /* QM queues pointers table */ | 5556 | /* QM queues pointers table */ |
4689 | bnx2x_qm_init_ptr_table(bp, bp->qm_cid_count, INITOP_SET); | 5557 | bnx2x_qm_init_ptr_table(bp, bp->qm_cid_count, INITOP_SET); |
@@ -4693,57 +5561,51 @@ static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code) | |||
4693 | REG_WR(bp, QM_REG_SOFT_RESET, 0); | 5561 | REG_WR(bp, QM_REG_SOFT_RESET, 0); |
4694 | 5562 | ||
4695 | #ifdef BCM_CNIC | 5563 | #ifdef BCM_CNIC |
4696 | bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE); | 5564 | bnx2x_init_block(bp, BLOCK_TM, PHASE_COMMON); |
4697 | #endif | 5565 | #endif |
4698 | 5566 | ||
4699 | bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE); | 5567 | bnx2x_init_block(bp, BLOCK_DORQ, PHASE_COMMON); |
4700 | REG_WR(bp, DORQ_REG_DPM_CID_OFST, BNX2X_DB_SHIFT); | 5568 | REG_WR(bp, DORQ_REG_DPM_CID_OFST, BNX2X_DB_SHIFT); |
4701 | 5569 | if (!CHIP_REV_IS_SLOW(bp)) | |
4702 | if (!CHIP_REV_IS_SLOW(bp)) { | ||
4703 | /* enable hw interrupt from doorbell Q */ | 5570 | /* enable hw interrupt from doorbell Q */ |
4704 | REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0); | 5571 | REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0); |
4705 | } | ||
4706 | 5572 | ||
4707 | bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE); | 5573 | bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON); |
4708 | if (CHIP_MODE_IS_4_PORT(bp)) { | ||
4709 | REG_WR(bp, BRB1_REG_FULL_LB_XOFF_THRESHOLD, 248); | ||
4710 | REG_WR(bp, BRB1_REG_FULL_LB_XON_THRESHOLD, 328); | ||
4711 | } | ||
4712 | 5574 | ||
4713 | bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE); | 5575 | bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON); |
4714 | REG_WR(bp, PRS_REG_A_PRSU_20, 0xf); | 5576 | REG_WR(bp, PRS_REG_A_PRSU_20, 0xf); |
4715 | #ifndef BCM_CNIC | 5577 | |
4716 | /* set NIC mode */ | ||
4717 | REG_WR(bp, PRS_REG_NIC_MODE, 1); | ||
4718 | #endif | ||
4719 | if (!CHIP_IS_E1(bp)) | 5578 | if (!CHIP_IS_E1(bp)) |
4720 | REG_WR(bp, PRS_REG_E1HOV_MODE, IS_MF_SD(bp)); | 5579 | REG_WR(bp, PRS_REG_E1HOV_MODE, bp->path_has_ovlan); |
4721 | 5580 | ||
4722 | if (CHIP_IS_E2(bp)) { | 5581 | if (!CHIP_IS_E1x(bp) && !CHIP_IS_E3B0(bp)) |
4723 | /* Bit-map indicating which L2 hdrs may appear after the | 5582 | /* Bit-map indicating which L2 hdrs may appear |
4724 | basic Ethernet header */ | 5583 | * after the basic Ethernet header |
4725 | int has_ovlan = IS_MF_SD(bp); | 5584 | */ |
4726 | REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC, (has_ovlan ? 7 : 6)); | 5585 | REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC, |
4727 | REG_WR(bp, PRS_REG_MUST_HAVE_HDRS, (has_ovlan ? 1 : 0)); | 5586 | bp->path_has_ovlan ? 7 : 6); |
4728 | } | ||
4729 | 5587 | ||
4730 | bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE); | 5588 | bnx2x_init_block(bp, BLOCK_TSDM, PHASE_COMMON); |
4731 | bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE); | 5589 | bnx2x_init_block(bp, BLOCK_CSDM, PHASE_COMMON); |
4732 | bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE); | 5590 | bnx2x_init_block(bp, BLOCK_USDM, PHASE_COMMON); |
4733 | bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE); | 5591 | bnx2x_init_block(bp, BLOCK_XSDM, PHASE_COMMON); |
4734 | 5592 | ||
4735 | bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp)); | 5593 | if (!CHIP_IS_E1x(bp)) { |
4736 | bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp)); | 5594 | /* reset VFC memories */ |
4737 | bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp)); | 5595 | REG_WR(bp, TSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST, |
4738 | bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp)); | 5596 | VFC_MEMORIES_RST_REG_CAM_RST | |
5597 | VFC_MEMORIES_RST_REG_RAM_RST); | ||
5598 | REG_WR(bp, XSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST, | ||
5599 | VFC_MEMORIES_RST_REG_CAM_RST | | ||
5600 | VFC_MEMORIES_RST_REG_RAM_RST); | ||
4739 | 5601 | ||
4740 | bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE); | 5602 | msleep(20); |
4741 | bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE); | 5603 | } |
4742 | bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE); | ||
4743 | bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE); | ||
4744 | 5604 | ||
4745 | if (CHIP_MODE_IS_4_PORT(bp)) | 5605 | bnx2x_init_block(bp, BLOCK_TSEM, PHASE_COMMON); |
4746 | bnx2x_init_block(bp, XSEM_4PORT_BLOCK, COMMON_STAGE); | 5606 | bnx2x_init_block(bp, BLOCK_USEM, PHASE_COMMON); |
5607 | bnx2x_init_block(bp, BLOCK_CSEM, PHASE_COMMON); | ||
5608 | bnx2x_init_block(bp, BLOCK_XSEM, PHASE_COMMON); | ||
4747 | 5609 | ||
4748 | /* sync semi rtc */ | 5610 | /* sync semi rtc */ |
4749 | REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, | 5611 | REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, |
@@ -4751,21 +5613,18 @@ static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code) | |||
4751 | REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, | 5613 | REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, |
4752 | 0x80000000); | 5614 | 0x80000000); |
4753 | 5615 | ||
4754 | bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE); | 5616 | bnx2x_init_block(bp, BLOCK_UPB, PHASE_COMMON); |
4755 | bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE); | 5617 | bnx2x_init_block(bp, BLOCK_XPB, PHASE_COMMON); |
4756 | bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE); | 5618 | bnx2x_init_block(bp, BLOCK_PBF, PHASE_COMMON); |
4757 | 5619 | ||
4758 | if (CHIP_IS_E2(bp)) { | 5620 | if (!CHIP_IS_E1x(bp)) |
4759 | int has_ovlan = IS_MF_SD(bp); | 5621 | REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC, |
4760 | REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC, (has_ovlan ? 7 : 6)); | 5622 | bp->path_has_ovlan ? 7 : 6); |
4761 | REG_WR(bp, PBF_REG_MUST_HAVE_HDRS, (has_ovlan ? 1 : 0)); | ||
4762 | } | ||
4763 | 5623 | ||
4764 | REG_WR(bp, SRC_REG_SOFT_RST, 1); | 5624 | REG_WR(bp, SRC_REG_SOFT_RST, 1); |
4765 | for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) | ||
4766 | REG_WR(bp, i, random32()); | ||
4767 | 5625 | ||
4768 | bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE); | 5626 | bnx2x_init_block(bp, BLOCK_SRC, PHASE_COMMON); |
5627 | |||
4769 | #ifdef BCM_CNIC | 5628 | #ifdef BCM_CNIC |
4770 | REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672); | 5629 | REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672); |
4771 | REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc); | 5630 | REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc); |
@@ -4786,11 +5645,11 @@ static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code) | |||
4786 | "of cdu_context(%ld)\n", | 5645 | "of cdu_context(%ld)\n", |
4787 | (long)sizeof(union cdu_context)); | 5646 | (long)sizeof(union cdu_context)); |
4788 | 5647 | ||
4789 | bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE); | 5648 | bnx2x_init_block(bp, BLOCK_CDU, PHASE_COMMON); |
4790 | val = (4 << 24) + (0 << 12) + 1024; | 5649 | val = (4 << 24) + (0 << 12) + 1024; |
4791 | REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val); | 5650 | REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val); |
4792 | 5651 | ||
4793 | bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE); | 5652 | bnx2x_init_block(bp, BLOCK_CFC, PHASE_COMMON); |
4794 | REG_WR(bp, CFC_REG_INIT_REG, 0x7FF); | 5653 | REG_WR(bp, CFC_REG_INIT_REG, 0x7FF); |
4795 | /* enable context validation interrupt from CFC */ | 5654 | /* enable context validation interrupt from CFC */ |
4796 | REG_WR(bp, CFC_REG_CFC_INT_MASK, 0); | 5655 | REG_WR(bp, CFC_REG_CFC_INT_MASK, 0); |
@@ -4798,20 +5657,19 @@ static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code) | |||
4798 | /* set the thresholds to prevent CFC/CDU race */ | 5657 | /* set the thresholds to prevent CFC/CDU race */ |
4799 | REG_WR(bp, CFC_REG_DEBUG0, 0x20020000); | 5658 | REG_WR(bp, CFC_REG_DEBUG0, 0x20020000); |
4800 | 5659 | ||
4801 | bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE); | 5660 | bnx2x_init_block(bp, BLOCK_HC, PHASE_COMMON); |
4802 | 5661 | ||
4803 | if (CHIP_IS_E2(bp) && BP_NOMCP(bp)) | 5662 | if (!CHIP_IS_E1x(bp) && BP_NOMCP(bp)) |
4804 | REG_WR(bp, IGU_REG_RESET_MEMORIES, 0x36); | 5663 | REG_WR(bp, IGU_REG_RESET_MEMORIES, 0x36); |
4805 | 5664 | ||
4806 | bnx2x_init_block(bp, IGU_BLOCK, COMMON_STAGE); | 5665 | bnx2x_init_block(bp, BLOCK_IGU, PHASE_COMMON); |
4807 | bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE); | 5666 | bnx2x_init_block(bp, BLOCK_MISC_AEU, PHASE_COMMON); |
4808 | 5667 | ||
4809 | bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE); | ||
4810 | /* Reset PCIE errors for debug */ | 5668 | /* Reset PCIE errors for debug */ |
4811 | REG_WR(bp, 0x2814, 0xffffffff); | 5669 | REG_WR(bp, 0x2814, 0xffffffff); |
4812 | REG_WR(bp, 0x3820, 0xffffffff); | 5670 | REG_WR(bp, 0x3820, 0xffffffff); |
4813 | 5671 | ||
4814 | if (CHIP_IS_E2(bp)) { | 5672 | if (!CHIP_IS_E1x(bp)) { |
4815 | REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_CONTROL_5, | 5673 | REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_CONTROL_5, |
4816 | (PXPCS_TL_CONTROL_5_ERR_UNSPPORT1 | | 5674 | (PXPCS_TL_CONTROL_5_ERR_UNSPPORT1 | |
4817 | PXPCS_TL_CONTROL_5_ERR_UNSPPORT)); | 5675 | PXPCS_TL_CONTROL_5_ERR_UNSPPORT)); |
@@ -4825,21 +5683,15 @@ static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code) | |||
4825 | PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5)); | 5683 | PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5)); |
4826 | } | 5684 | } |
4827 | 5685 | ||
4828 | bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE); | 5686 | bnx2x_init_block(bp, BLOCK_NIG, PHASE_COMMON); |
4829 | bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE); | ||
4830 | bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE); | ||
4831 | bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE); | ||
4832 | |||
4833 | bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE); | ||
4834 | if (!CHIP_IS_E1(bp)) { | 5687 | if (!CHIP_IS_E1(bp)) { |
4835 | REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_MF(bp)); | 5688 | /* in E3 this done in per-port section */ |
4836 | REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_MF_SD(bp)); | 5689 | if (!CHIP_IS_E3(bp)) |
4837 | } | 5690 | REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_MF(bp)); |
4838 | if (CHIP_IS_E2(bp)) { | ||
4839 | /* Bit-map indicating which L2 hdrs may appear after the | ||
4840 | basic Ethernet header */ | ||
4841 | REG_WR(bp, NIG_REG_P0_HDRS_AFTER_BASIC, (IS_MF_SD(bp) ? 7 : 6)); | ||
4842 | } | 5691 | } |
5692 | if (CHIP_IS_E1H(bp)) | ||
5693 | /* not applicable for E2 (and above ...) */ | ||
5694 | REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_MF_SD(bp)); | ||
4843 | 5695 | ||
4844 | if (CHIP_REV_IS_SLOW(bp)) | 5696 | if (CHIP_REV_IS_SLOW(bp)) |
4845 | msleep(200); | 5697 | msleep(200); |
@@ -4885,123 +5737,133 @@ static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code) | |||
4885 | bnx2x_enable_blocks_parity(bp); | 5737 | bnx2x_enable_blocks_parity(bp); |
4886 | 5738 | ||
4887 | if (!BP_NOMCP(bp)) { | 5739 | if (!BP_NOMCP(bp)) { |
4888 | /* In E2 2-PORT mode, same ext phy is used for the two paths */ | 5740 | if (CHIP_IS_E1x(bp)) |
4889 | if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) || | 5741 | bnx2x__common_init_phy(bp); |
4890 | CHIP_IS_E1x(bp)) { | ||
4891 | u32 shmem_base[2], shmem2_base[2]; | ||
4892 | shmem_base[0] = bp->common.shmem_base; | ||
4893 | shmem2_base[0] = bp->common.shmem2_base; | ||
4894 | if (CHIP_IS_E2(bp)) { | ||
4895 | shmem_base[1] = | ||
4896 | SHMEM2_RD(bp, other_shmem_base_addr); | ||
4897 | shmem2_base[1] = | ||
4898 | SHMEM2_RD(bp, other_shmem2_base_addr); | ||
4899 | } | ||
4900 | bnx2x_acquire_phy_lock(bp); | ||
4901 | bnx2x_common_init_phy(bp, shmem_base, shmem2_base, | ||
4902 | bp->common.chip_id); | ||
4903 | bnx2x_release_phy_lock(bp); | ||
4904 | } | ||
4905 | } else | 5742 | } else |
4906 | BNX2X_ERR("Bootcode is missing - can not initialize link\n"); | 5743 | BNX2X_ERR("Bootcode is missing - can not initialize link\n"); |
4907 | 5744 | ||
4908 | return 0; | 5745 | return 0; |
4909 | } | 5746 | } |
4910 | 5747 | ||
5748 | /** | ||
5749 | * bnx2x_init_hw_common_chip - init HW at the COMMON_CHIP phase. | ||
5750 | * | ||
5751 | * @bp: driver handle | ||
5752 | */ | ||
5753 | static int bnx2x_init_hw_common_chip(struct bnx2x *bp) | ||
5754 | { | ||
5755 | int rc = bnx2x_init_hw_common(bp); | ||
5756 | |||
5757 | if (rc) | ||
5758 | return rc; | ||
5759 | |||
5760 | /* In E2 2-PORT mode, same ext phy is used for the two paths */ | ||
5761 | if (!BP_NOMCP(bp)) | ||
5762 | bnx2x__common_init_phy(bp); | ||
5763 | |||
5764 | return 0; | ||
5765 | } | ||
5766 | |||
4911 | static int bnx2x_init_hw_port(struct bnx2x *bp) | 5767 | static int bnx2x_init_hw_port(struct bnx2x *bp) |
4912 | { | 5768 | { |
4913 | int port = BP_PORT(bp); | 5769 | int port = BP_PORT(bp); |
4914 | int init_stage = port ? PORT1_STAGE : PORT0_STAGE; | 5770 | int init_phase = port ? PHASE_PORT1 : PHASE_PORT0; |
4915 | u32 low, high; | 5771 | u32 low, high; |
4916 | u32 val; | 5772 | u32 val; |
4917 | 5773 | ||
5774 | bnx2x__link_reset(bp); | ||
5775 | |||
4918 | DP(BNX2X_MSG_MCP, "starting port init port %d\n", port); | 5776 | DP(BNX2X_MSG_MCP, "starting port init port %d\n", port); |
4919 | 5777 | ||
4920 | REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0); | 5778 | REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0); |
4921 | 5779 | ||
4922 | bnx2x_init_block(bp, PXP_BLOCK, init_stage); | 5780 | bnx2x_init_block(bp, BLOCK_MISC, init_phase); |
4923 | bnx2x_init_block(bp, PXP2_BLOCK, init_stage); | 5781 | bnx2x_init_block(bp, BLOCK_PXP, init_phase); |
5782 | bnx2x_init_block(bp, BLOCK_PXP2, init_phase); | ||
4924 | 5783 | ||
4925 | /* Timers bug workaround: disables the pf_master bit in pglue at | 5784 | /* Timers bug workaround: disables the pf_master bit in pglue at |
4926 | * common phase, we need to enable it here before any dmae access are | 5785 | * common phase, we need to enable it here before any dmae access are |
4927 | * attempted. Therefore we manually added the enable-master to the | 5786 | * attempted. Therefore we manually added the enable-master to the |
4928 | * port phase (it also happens in the function phase) | 5787 | * port phase (it also happens in the function phase) |
4929 | */ | 5788 | */ |
4930 | if (CHIP_IS_E2(bp)) | 5789 | if (!CHIP_IS_E1x(bp)) |
4931 | REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1); | 5790 | REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1); |
4932 | 5791 | ||
4933 | bnx2x_init_block(bp, TCM_BLOCK, init_stage); | 5792 | bnx2x_init_block(bp, BLOCK_ATC, init_phase); |
4934 | bnx2x_init_block(bp, UCM_BLOCK, init_stage); | 5793 | bnx2x_init_block(bp, BLOCK_DMAE, init_phase); |
4935 | bnx2x_init_block(bp, CCM_BLOCK, init_stage); | 5794 | bnx2x_init_block(bp, BLOCK_PGLUE_B, init_phase); |
4936 | bnx2x_init_block(bp, XCM_BLOCK, init_stage); | 5795 | bnx2x_init_block(bp, BLOCK_QM, init_phase); |
5796 | |||
5797 | bnx2x_init_block(bp, BLOCK_TCM, init_phase); | ||
5798 | bnx2x_init_block(bp, BLOCK_UCM, init_phase); | ||
5799 | bnx2x_init_block(bp, BLOCK_CCM, init_phase); | ||
5800 | bnx2x_init_block(bp, BLOCK_XCM, init_phase); | ||
4937 | 5801 | ||
4938 | /* QM cid (connection) count */ | 5802 | /* QM cid (connection) count */ |
4939 | bnx2x_qm_init_cid_count(bp, bp->qm_cid_count, INITOP_SET); | 5803 | bnx2x_qm_init_cid_count(bp, bp->qm_cid_count, INITOP_SET); |
4940 | 5804 | ||
4941 | #ifdef BCM_CNIC | 5805 | #ifdef BCM_CNIC |
4942 | bnx2x_init_block(bp, TIMERS_BLOCK, init_stage); | 5806 | bnx2x_init_block(bp, BLOCK_TM, init_phase); |
4943 | REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20); | 5807 | REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20); |
4944 | REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31); | 5808 | REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31); |
4945 | #endif | 5809 | #endif |
4946 | 5810 | ||
4947 | bnx2x_init_block(bp, DQ_BLOCK, init_stage); | 5811 | bnx2x_init_block(bp, BLOCK_DORQ, init_phase); |
4948 | |||
4949 | if (CHIP_MODE_IS_4_PORT(bp)) | ||
4950 | bnx2x_init_block(bp, QM_4PORT_BLOCK, init_stage); | ||
4951 | 5812 | ||
4952 | if (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp)) { | 5813 | if (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp)) { |
4953 | bnx2x_init_block(bp, BRB1_BLOCK, init_stage); | 5814 | bnx2x_init_block(bp, BLOCK_BRB1, init_phase); |
4954 | if (CHIP_REV_IS_SLOW(bp) && CHIP_IS_E1(bp)) { | 5815 | |
4955 | /* no pause for emulation and FPGA */ | 5816 | if (IS_MF(bp)) |
4956 | low = 0; | 5817 | low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246); |
4957 | high = 513; | 5818 | else if (bp->dev->mtu > 4096) { |
4958 | } else { | 5819 | if (bp->flags & ONE_PORT_FLAG) |
4959 | if (IS_MF(bp)) | 5820 | low = 160; |
4960 | low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246); | 5821 | else { |
4961 | else if (bp->dev->mtu > 4096) { | 5822 | val = bp->dev->mtu; |
4962 | if (bp->flags & ONE_PORT_FLAG) | 5823 | /* (24*1024 + val*4)/256 */ |
4963 | low = 160; | 5824 | low = 96 + (val/64) + |
4964 | else { | 5825 | ((val % 64) ? 1 : 0); |
4965 | val = bp->dev->mtu; | 5826 | } |
4966 | /* (24*1024 + val*4)/256 */ | 5827 | } else |
4967 | low = 96 + (val/64) + | 5828 | low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160); |
4968 | ((val % 64) ? 1 : 0); | 5829 | high = low + 56; /* 14*1024/256 */ |
4969 | } | ||
4970 | } else | ||
4971 | low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160); | ||
4972 | high = low + 56; /* 14*1024/256 */ | ||
4973 | } | ||
4974 | REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low); | 5830 | REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low); |
4975 | REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high); | 5831 | REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high); |
4976 | } | 5832 | } |
4977 | 5833 | ||
4978 | if (CHIP_MODE_IS_4_PORT(bp)) { | 5834 | if (CHIP_MODE_IS_4_PORT(bp)) |
4979 | REG_WR(bp, BRB1_REG_PAUSE_0_XOFF_THRESHOLD_0 + port*8, 248); | 5835 | REG_WR(bp, (BP_PORT(bp) ? |
4980 | REG_WR(bp, BRB1_REG_PAUSE_0_XON_THRESHOLD_0 + port*8, 328); | 5836 | BRB1_REG_MAC_GUARANTIED_1 : |
4981 | REG_WR(bp, (BP_PORT(bp) ? BRB1_REG_MAC_GUARANTIED_1 : | 5837 | BRB1_REG_MAC_GUARANTIED_0), 40); |
4982 | BRB1_REG_MAC_GUARANTIED_0), 40); | ||
4983 | } | ||
4984 | 5838 | ||
4985 | bnx2x_init_block(bp, PRS_BLOCK, init_stage); | ||
4986 | 5839 | ||
4987 | bnx2x_init_block(bp, TSDM_BLOCK, init_stage); | 5840 | bnx2x_init_block(bp, BLOCK_PRS, init_phase); |
4988 | bnx2x_init_block(bp, CSDM_BLOCK, init_stage); | 5841 | if (CHIP_IS_E3B0(bp)) |
4989 | bnx2x_init_block(bp, USDM_BLOCK, init_stage); | 5842 | /* Ovlan exists only if we are in multi-function + |
4990 | bnx2x_init_block(bp, XSDM_BLOCK, init_stage); | 5843 | * switch-dependent mode, in switch-independent there |
5844 | * is no ovlan headers | ||
5845 | */ | ||
5846 | REG_WR(bp, BP_PORT(bp) ? | ||
5847 | PRS_REG_HDRS_AFTER_BASIC_PORT_1 : | ||
5848 | PRS_REG_HDRS_AFTER_BASIC_PORT_0, | ||
5849 | (bp->path_has_ovlan ? 7 : 6)); | ||
4991 | 5850 | ||
4992 | bnx2x_init_block(bp, TSEM_BLOCK, init_stage); | 5851 | bnx2x_init_block(bp, BLOCK_TSDM, init_phase); |
4993 | bnx2x_init_block(bp, USEM_BLOCK, init_stage); | 5852 | bnx2x_init_block(bp, BLOCK_CSDM, init_phase); |
4994 | bnx2x_init_block(bp, CSEM_BLOCK, init_stage); | 5853 | bnx2x_init_block(bp, BLOCK_USDM, init_phase); |
4995 | bnx2x_init_block(bp, XSEM_BLOCK, init_stage); | 5854 | bnx2x_init_block(bp, BLOCK_XSDM, init_phase); |
4996 | if (CHIP_MODE_IS_4_PORT(bp)) | 5855 | |
4997 | bnx2x_init_block(bp, XSEM_4PORT_BLOCK, init_stage); | 5856 | bnx2x_init_block(bp, BLOCK_TSEM, init_phase); |
5857 | bnx2x_init_block(bp, BLOCK_USEM, init_phase); | ||
5858 | bnx2x_init_block(bp, BLOCK_CSEM, init_phase); | ||
5859 | bnx2x_init_block(bp, BLOCK_XSEM, init_phase); | ||
4998 | 5860 | ||
4999 | bnx2x_init_block(bp, UPB_BLOCK, init_stage); | 5861 | bnx2x_init_block(bp, BLOCK_UPB, init_phase); |
5000 | bnx2x_init_block(bp, XPB_BLOCK, init_stage); | 5862 | bnx2x_init_block(bp, BLOCK_XPB, init_phase); |
5001 | 5863 | ||
5002 | bnx2x_init_block(bp, PBF_BLOCK, init_stage); | 5864 | bnx2x_init_block(bp, BLOCK_PBF, init_phase); |
5003 | 5865 | ||
5004 | if (!CHIP_IS_E2(bp)) { | 5866 | if (CHIP_IS_E1x(bp)) { |
5005 | /* configure PBF to work without PAUSE mtu 9000 */ | 5867 | /* configure PBF to work without PAUSE mtu 9000 */ |
5006 | REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0); | 5868 | REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0); |
5007 | 5869 | ||
@@ -5017,20 +5879,20 @@ static int bnx2x_init_hw_port(struct bnx2x *bp) | |||
5017 | } | 5879 | } |
5018 | 5880 | ||
5019 | #ifdef BCM_CNIC | 5881 | #ifdef BCM_CNIC |
5020 | bnx2x_init_block(bp, SRCH_BLOCK, init_stage); | 5882 | bnx2x_init_block(bp, BLOCK_SRC, init_phase); |
5021 | #endif | 5883 | #endif |
5022 | bnx2x_init_block(bp, CDU_BLOCK, init_stage); | 5884 | bnx2x_init_block(bp, BLOCK_CDU, init_phase); |
5023 | bnx2x_init_block(bp, CFC_BLOCK, init_stage); | 5885 | bnx2x_init_block(bp, BLOCK_CFC, init_phase); |
5024 | 5886 | ||
5025 | if (CHIP_IS_E1(bp)) { | 5887 | if (CHIP_IS_E1(bp)) { |
5026 | REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0); | 5888 | REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0); |
5027 | REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0); | 5889 | REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0); |
5028 | } | 5890 | } |
5029 | bnx2x_init_block(bp, HC_BLOCK, init_stage); | 5891 | bnx2x_init_block(bp, BLOCK_HC, init_phase); |
5030 | 5892 | ||
5031 | bnx2x_init_block(bp, IGU_BLOCK, init_stage); | 5893 | bnx2x_init_block(bp, BLOCK_IGU, init_phase); |
5032 | 5894 | ||
5033 | bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage); | 5895 | bnx2x_init_block(bp, BLOCK_MISC_AEU, init_phase); |
5034 | /* init aeu_mask_attn_func_0/1: | 5896 | /* init aeu_mask_attn_func_0/1: |
5035 | * - SF mode: bits 3-7 are masked. only bits 0-2 are in use | 5897 | * - SF mode: bits 3-7 are masked. only bits 0-2 are in use |
5036 | * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF | 5898 | * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF |
@@ -5040,22 +5902,31 @@ static int bnx2x_init_hw_port(struct bnx2x *bp) | |||
5040 | val |= CHIP_IS_E1(bp) ? 0 : 0x10; | 5902 | val |= CHIP_IS_E1(bp) ? 0 : 0x10; |
5041 | REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, val); | 5903 | REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, val); |
5042 | 5904 | ||
5043 | bnx2x_init_block(bp, PXPCS_BLOCK, init_stage); | 5905 | bnx2x_init_block(bp, BLOCK_NIG, init_phase); |
5044 | bnx2x_init_block(bp, EMAC0_BLOCK, init_stage); | ||
5045 | bnx2x_init_block(bp, EMAC1_BLOCK, init_stage); | ||
5046 | bnx2x_init_block(bp, DBU_BLOCK, init_stage); | ||
5047 | bnx2x_init_block(bp, DBG_BLOCK, init_stage); | ||
5048 | 5906 | ||
5049 | bnx2x_init_block(bp, NIG_BLOCK, init_stage); | 5907 | if (!CHIP_IS_E1x(bp)) { |
5908 | /* Bit-map indicating which L2 hdrs may appear after the | ||
5909 | * basic Ethernet header | ||
5910 | */ | ||
5911 | REG_WR(bp, BP_PORT(bp) ? | ||
5912 | NIG_REG_P1_HDRS_AFTER_BASIC : | ||
5913 | NIG_REG_P0_HDRS_AFTER_BASIC, | ||
5914 | IS_MF_SD(bp) ? 7 : 6); | ||
5050 | 5915 | ||
5051 | REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1); | 5916 | if (CHIP_IS_E3(bp)) |
5917 | REG_WR(bp, BP_PORT(bp) ? | ||
5918 | NIG_REG_LLH1_MF_MODE : | ||
5919 | NIG_REG_LLH_MF_MODE, IS_MF(bp)); | ||
5920 | } | ||
5921 | if (!CHIP_IS_E3(bp)) | ||
5922 | REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1); | ||
5052 | 5923 | ||
5053 | if (!CHIP_IS_E1(bp)) { | 5924 | if (!CHIP_IS_E1(bp)) { |
5054 | /* 0x2 disable mf_ov, 0x1 enable */ | 5925 | /* 0x2 disable mf_ov, 0x1 enable */ |
5055 | REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4, | 5926 | REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4, |
5056 | (IS_MF_SD(bp) ? 0x1 : 0x2)); | 5927 | (IS_MF_SD(bp) ? 0x1 : 0x2)); |
5057 | 5928 | ||
5058 | if (CHIP_IS_E2(bp)) { | 5929 | if (!CHIP_IS_E1x(bp)) { |
5059 | val = 0; | 5930 | val = 0; |
5060 | switch (bp->mf_mode) { | 5931 | switch (bp->mf_mode) { |
5061 | case MULTI_FUNCTION_SD: | 5932 | case MULTI_FUNCTION_SD: |
@@ -5076,17 +5947,16 @@ static int bnx2x_init_hw_port(struct bnx2x *bp) | |||
5076 | } | 5947 | } |
5077 | } | 5948 | } |
5078 | 5949 | ||
5079 | bnx2x_init_block(bp, MCP_BLOCK, init_stage); | 5950 | |
5080 | bnx2x_init_block(bp, DMAE_BLOCK, init_stage); | 5951 | /* If SPIO5 is set to generate interrupts, enable it for this port */ |
5081 | if (bnx2x_fan_failure_det_req(bp, bp->common.shmem_base, | 5952 | val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN); |
5082 | bp->common.shmem2_base, port)) { | 5953 | if (val & (1 << MISC_REGISTERS_SPIO_5)) { |
5083 | u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : | 5954 | u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : |
5084 | MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0); | 5955 | MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0); |
5085 | val = REG_RD(bp, reg_addr); | 5956 | val = REG_RD(bp, reg_addr); |
5086 | val |= AEU_INPUTS_ATTN_BITS_SPIO5; | 5957 | val |= AEU_INPUTS_ATTN_BITS_SPIO5; |
5087 | REG_WR(bp, reg_addr, val); | 5958 | REG_WR(bp, reg_addr, val); |
5088 | } | 5959 | } |
5089 | bnx2x__link_reset(bp); | ||
5090 | 5960 | ||
5091 | return 0; | 5961 | return 0; |
5092 | } | 5962 | } |
@@ -5105,7 +5975,7 @@ static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr) | |||
5105 | 5975 | ||
5106 | static inline void bnx2x_igu_clear_sb(struct bnx2x *bp, u8 idu_sb_id) | 5976 | static inline void bnx2x_igu_clear_sb(struct bnx2x *bp, u8 idu_sb_id) |
5107 | { | 5977 | { |
5108 | bnx2x_igu_clear_sb_gen(bp, idu_sb_id, true /*PF*/); | 5978 | bnx2x_igu_clear_sb_gen(bp, BP_FUNC(bp), idu_sb_id, true /*PF*/); |
5109 | } | 5979 | } |
5110 | 5980 | ||
5111 | static inline void bnx2x_clear_func_ilt(struct bnx2x *bp, u32 func) | 5981 | static inline void bnx2x_clear_func_ilt(struct bnx2x *bp, u32 func) |
@@ -5119,6 +5989,7 @@ static int bnx2x_init_hw_func(struct bnx2x *bp) | |||
5119 | { | 5989 | { |
5120 | int port = BP_PORT(bp); | 5990 | int port = BP_PORT(bp); |
5121 | int func = BP_FUNC(bp); | 5991 | int func = BP_FUNC(bp); |
5992 | int init_phase = PHASE_PF0 + func; | ||
5122 | struct bnx2x_ilt *ilt = BP_ILT(bp); | 5993 | struct bnx2x_ilt *ilt = BP_ILT(bp); |
5123 | u16 cdu_ilt_start; | 5994 | u16 cdu_ilt_start; |
5124 | u32 addr, val; | 5995 | u32 addr, val; |
@@ -5127,6 +5998,10 @@ static int bnx2x_init_hw_func(struct bnx2x *bp) | |||
5127 | 5998 | ||
5128 | DP(BNX2X_MSG_MCP, "starting func init func %d\n", func); | 5999 | DP(BNX2X_MSG_MCP, "starting func init func %d\n", func); |
5129 | 6000 | ||
6001 | /* FLR cleanup - hmmm */ | ||
6002 | if (!CHIP_IS_E1x(bp)) | ||
6003 | bnx2x_pf_flr_clnup(bp); | ||
6004 | |||
5130 | /* set MSI reconfigure capability */ | 6005 | /* set MSI reconfigure capability */ |
5131 | if (bp->common.int_block == INT_BLOCK_HC) { | 6006 | if (bp->common.int_block == INT_BLOCK_HC) { |
5132 | addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0); | 6007 | addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0); |
@@ -5135,6 +6010,9 @@ static int bnx2x_init_hw_func(struct bnx2x *bp) | |||
5135 | REG_WR(bp, addr, val); | 6010 | REG_WR(bp, addr, val); |
5136 | } | 6011 | } |
5137 | 6012 | ||
6013 | bnx2x_init_block(bp, BLOCK_PXP, init_phase); | ||
6014 | bnx2x_init_block(bp, BLOCK_PXP2, init_phase); | ||
6015 | |||
5138 | ilt = BP_ILT(bp); | 6016 | ilt = BP_ILT(bp); |
5139 | cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start; | 6017 | cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start; |
5140 | 6018 | ||
@@ -5160,7 +6038,7 @@ static int bnx2x_init_hw_func(struct bnx2x *bp) | |||
5160 | REG_WR(bp, PRS_REG_NIC_MODE, 1); | 6038 | REG_WR(bp, PRS_REG_NIC_MODE, 1); |
5161 | #endif /* BCM_CNIC */ | 6039 | #endif /* BCM_CNIC */ |
5162 | 6040 | ||
5163 | if (CHIP_IS_E2(bp)) { | 6041 | if (!CHIP_IS_E1x(bp)) { |
5164 | u32 pf_conf = IGU_PF_CONF_FUNC_EN; | 6042 | u32 pf_conf = IGU_PF_CONF_FUNC_EN; |
5165 | 6043 | ||
5166 | /* Turn on a single ISR mode in IGU if driver is going to use | 6044 | /* Turn on a single ISR mode in IGU if driver is going to use |
@@ -5187,58 +6065,55 @@ static int bnx2x_init_hw_func(struct bnx2x *bp) | |||
5187 | 6065 | ||
5188 | bp->dmae_ready = 1; | 6066 | bp->dmae_ready = 1; |
5189 | 6067 | ||
5190 | bnx2x_init_block(bp, PGLUE_B_BLOCK, FUNC0_STAGE + func); | 6068 | bnx2x_init_block(bp, BLOCK_PGLUE_B, init_phase); |
5191 | 6069 | ||
5192 | if (CHIP_IS_E2(bp)) | 6070 | if (!CHIP_IS_E1x(bp)) |
5193 | REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, func); | 6071 | REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, func); |
5194 | 6072 | ||
5195 | bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func); | 6073 | bnx2x_init_block(bp, BLOCK_ATC, init_phase); |
5196 | bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func); | 6074 | bnx2x_init_block(bp, BLOCK_DMAE, init_phase); |
5197 | bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func); | 6075 | bnx2x_init_block(bp, BLOCK_NIG, init_phase); |
5198 | bnx2x_init_block(bp, CCM_BLOCK, FUNC0_STAGE + func); | 6076 | bnx2x_init_block(bp, BLOCK_SRC, init_phase); |
5199 | bnx2x_init_block(bp, XCM_BLOCK, FUNC0_STAGE + func); | 6077 | bnx2x_init_block(bp, BLOCK_MISC, init_phase); |
5200 | bnx2x_init_block(bp, TSEM_BLOCK, FUNC0_STAGE + func); | 6078 | bnx2x_init_block(bp, BLOCK_TCM, init_phase); |
5201 | bnx2x_init_block(bp, USEM_BLOCK, FUNC0_STAGE + func); | 6079 | bnx2x_init_block(bp, BLOCK_UCM, init_phase); |
5202 | bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func); | 6080 | bnx2x_init_block(bp, BLOCK_CCM, init_phase); |
5203 | bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func); | 6081 | bnx2x_init_block(bp, BLOCK_XCM, init_phase); |
5204 | 6082 | bnx2x_init_block(bp, BLOCK_TSEM, init_phase); | |
5205 | if (CHIP_IS_E2(bp)) { | 6083 | bnx2x_init_block(bp, BLOCK_USEM, init_phase); |
5206 | REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_PATH_ID_OFFSET, | 6084 | bnx2x_init_block(bp, BLOCK_CSEM, init_phase); |
5207 | BP_PATH(bp)); | 6085 | bnx2x_init_block(bp, BLOCK_XSEM, init_phase); |
5208 | REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_PATH_ID_OFFSET, | 6086 | |
5209 | BP_PATH(bp)); | 6087 | if (!CHIP_IS_E1x(bp)) |
5210 | } | ||
5211 | |||
5212 | if (CHIP_MODE_IS_4_PORT(bp)) | ||
5213 | bnx2x_init_block(bp, XSEM_4PORT_BLOCK, FUNC0_STAGE + func); | ||
5214 | |||
5215 | if (CHIP_IS_E2(bp)) | ||
5216 | REG_WR(bp, QM_REG_PF_EN, 1); | 6088 | REG_WR(bp, QM_REG_PF_EN, 1); |
5217 | 6089 | ||
5218 | bnx2x_init_block(bp, QM_BLOCK, FUNC0_STAGE + func); | 6090 | if (!CHIP_IS_E1x(bp)) { |
5219 | 6091 | REG_WR(bp, TSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func); | |
5220 | if (CHIP_MODE_IS_4_PORT(bp)) | 6092 | REG_WR(bp, USEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func); |
5221 | bnx2x_init_block(bp, QM_4PORT_BLOCK, FUNC0_STAGE + func); | 6093 | REG_WR(bp, CSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func); |
5222 | 6094 | REG_WR(bp, XSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func); | |
5223 | bnx2x_init_block(bp, TIMERS_BLOCK, FUNC0_STAGE + func); | 6095 | } |
5224 | bnx2x_init_block(bp, DQ_BLOCK, FUNC0_STAGE + func); | 6096 | bnx2x_init_block(bp, BLOCK_QM, init_phase); |
5225 | bnx2x_init_block(bp, BRB1_BLOCK, FUNC0_STAGE + func); | 6097 | |
5226 | bnx2x_init_block(bp, PRS_BLOCK, FUNC0_STAGE + func); | 6098 | bnx2x_init_block(bp, BLOCK_TM, init_phase); |
5227 | bnx2x_init_block(bp, TSDM_BLOCK, FUNC0_STAGE + func); | 6099 | bnx2x_init_block(bp, BLOCK_DORQ, init_phase); |
5228 | bnx2x_init_block(bp, CSDM_BLOCK, FUNC0_STAGE + func); | 6100 | bnx2x_init_block(bp, BLOCK_BRB1, init_phase); |
5229 | bnx2x_init_block(bp, USDM_BLOCK, FUNC0_STAGE + func); | 6101 | bnx2x_init_block(bp, BLOCK_PRS, init_phase); |
5230 | bnx2x_init_block(bp, XSDM_BLOCK, FUNC0_STAGE + func); | 6102 | bnx2x_init_block(bp, BLOCK_TSDM, init_phase); |
5231 | bnx2x_init_block(bp, UPB_BLOCK, FUNC0_STAGE + func); | 6103 | bnx2x_init_block(bp, BLOCK_CSDM, init_phase); |
5232 | bnx2x_init_block(bp, XPB_BLOCK, FUNC0_STAGE + func); | 6104 | bnx2x_init_block(bp, BLOCK_USDM, init_phase); |
5233 | bnx2x_init_block(bp, PBF_BLOCK, FUNC0_STAGE + func); | 6105 | bnx2x_init_block(bp, BLOCK_XSDM, init_phase); |
5234 | if (CHIP_IS_E2(bp)) | 6106 | bnx2x_init_block(bp, BLOCK_UPB, init_phase); |
6107 | bnx2x_init_block(bp, BLOCK_XPB, init_phase); | ||
6108 | bnx2x_init_block(bp, BLOCK_PBF, init_phase); | ||
6109 | if (!CHIP_IS_E1x(bp)) | ||
5235 | REG_WR(bp, PBF_REG_DISABLE_PF, 0); | 6110 | REG_WR(bp, PBF_REG_DISABLE_PF, 0); |
5236 | 6111 | ||
5237 | bnx2x_init_block(bp, CDU_BLOCK, FUNC0_STAGE + func); | 6112 | bnx2x_init_block(bp, BLOCK_CDU, init_phase); |
5238 | 6113 | ||
5239 | bnx2x_init_block(bp, CFC_BLOCK, FUNC0_STAGE + func); | 6114 | bnx2x_init_block(bp, BLOCK_CFC, init_phase); |
5240 | 6115 | ||
5241 | if (CHIP_IS_E2(bp)) | 6116 | if (!CHIP_IS_E1x(bp)) |
5242 | REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 1); | 6117 | REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 1); |
5243 | 6118 | ||
5244 | if (IS_MF(bp)) { | 6119 | if (IS_MF(bp)) { |
@@ -5246,7 +6121,7 @@ static int bnx2x_init_hw_func(struct bnx2x *bp) | |||
5246 | REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->mf_ov); | 6121 | REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->mf_ov); |
5247 | } | 6122 | } |
5248 | 6123 | ||
5249 | bnx2x_init_block(bp, MISC_AEU_BLOCK, FUNC0_STAGE + func); | 6124 | bnx2x_init_block(bp, BLOCK_MISC_AEU, init_phase); |
5250 | 6125 | ||
5251 | /* HC init per function */ | 6126 | /* HC init per function */ |
5252 | if (bp->common.int_block == INT_BLOCK_HC) { | 6127 | if (bp->common.int_block == INT_BLOCK_HC) { |
@@ -5256,21 +6131,21 @@ static int bnx2x_init_hw_func(struct bnx2x *bp) | |||
5256 | REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0); | 6131 | REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0); |
5257 | REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0); | 6132 | REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0); |
5258 | } | 6133 | } |
5259 | bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func); | 6134 | bnx2x_init_block(bp, BLOCK_HC, init_phase); |
5260 | 6135 | ||
5261 | } else { | 6136 | } else { |
5262 | int num_segs, sb_idx, prod_offset; | 6137 | int num_segs, sb_idx, prod_offset; |
5263 | 6138 | ||
5264 | REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0); | 6139 | REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0); |
5265 | 6140 | ||
5266 | if (CHIP_IS_E2(bp)) { | 6141 | if (!CHIP_IS_E1x(bp)) { |
5267 | REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0); | 6142 | REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0); |
5268 | REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0); | 6143 | REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0); |
5269 | } | 6144 | } |
5270 | 6145 | ||
5271 | bnx2x_init_block(bp, IGU_BLOCK, FUNC0_STAGE + func); | 6146 | bnx2x_init_block(bp, BLOCK_IGU, init_phase); |
5272 | 6147 | ||
5273 | if (CHIP_IS_E2(bp)) { | 6148 | if (!CHIP_IS_E1x(bp)) { |
5274 | int dsb_idx = 0; | 6149 | int dsb_idx = 0; |
5275 | /** | 6150 | /** |
5276 | * Producer memory: | 6151 | * Producer memory: |
@@ -5365,13 +6240,6 @@ static int bnx2x_init_hw_func(struct bnx2x *bp) | |||
5365 | REG_WR(bp, 0x2114, 0xffffffff); | 6240 | REG_WR(bp, 0x2114, 0xffffffff); |
5366 | REG_WR(bp, 0x2120, 0xffffffff); | 6241 | REG_WR(bp, 0x2120, 0xffffffff); |
5367 | 6242 | ||
5368 | bnx2x_init_block(bp, EMAC0_BLOCK, FUNC0_STAGE + func); | ||
5369 | bnx2x_init_block(bp, EMAC1_BLOCK, FUNC0_STAGE + func); | ||
5370 | bnx2x_init_block(bp, DBU_BLOCK, FUNC0_STAGE + func); | ||
5371 | bnx2x_init_block(bp, DBG_BLOCK, FUNC0_STAGE + func); | ||
5372 | bnx2x_init_block(bp, MCP_BLOCK, FUNC0_STAGE + func); | ||
5373 | bnx2x_init_block(bp, DMAE_BLOCK, FUNC0_STAGE + func); | ||
5374 | |||
5375 | if (CHIP_IS_E1x(bp)) { | 6243 | if (CHIP_IS_E1x(bp)) { |
5376 | main_mem_size = HC_REG_MAIN_MEMORY_SIZE / 2; /*dwords*/ | 6244 | main_mem_size = HC_REG_MAIN_MEMORY_SIZE / 2; /*dwords*/ |
5377 | main_mem_base = HC_REG_MAIN_MEMORY + | 6245 | main_mem_base = HC_REG_MAIN_MEMORY + |
@@ -5397,65 +6265,26 @@ static int bnx2x_init_hw_func(struct bnx2x *bp) | |||
5397 | REG_RD(bp, main_mem_prty_clr); | 6265 | REG_RD(bp, main_mem_prty_clr); |
5398 | } | 6266 | } |
5399 | 6267 | ||
6268 | #ifdef BNX2X_STOP_ON_ERROR | ||
6269 | /* Enable STORMs SP logging */ | ||
6270 | REG_WR8(bp, BAR_USTRORM_INTMEM + | ||
6271 | USTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1); | ||
6272 | REG_WR8(bp, BAR_TSTRORM_INTMEM + | ||
6273 | TSTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1); | ||
6274 | REG_WR8(bp, BAR_CSTRORM_INTMEM + | ||
6275 | CSTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1); | ||
6276 | REG_WR8(bp, BAR_XSTRORM_INTMEM + | ||
6277 | XSTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1); | ||
6278 | #endif | ||
6279 | |||
5400 | bnx2x_phy_probe(&bp->link_params); | 6280 | bnx2x_phy_probe(&bp->link_params); |
5401 | 6281 | ||
5402 | return 0; | 6282 | return 0; |
5403 | } | 6283 | } |
5404 | 6284 | ||
5405 | int bnx2x_init_hw(struct bnx2x *bp, u32 load_code) | ||
5406 | { | ||
5407 | int rc = 0; | ||
5408 | |||
5409 | DP(BNX2X_MSG_MCP, "function %d load_code %x\n", | ||
5410 | BP_ABS_FUNC(bp), load_code); | ||
5411 | |||
5412 | bp->dmae_ready = 0; | ||
5413 | spin_lock_init(&bp->dmae_lock); | ||
5414 | |||
5415 | switch (load_code) { | ||
5416 | case FW_MSG_CODE_DRV_LOAD_COMMON: | ||
5417 | case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: | ||
5418 | rc = bnx2x_init_hw_common(bp, load_code); | ||
5419 | if (rc) | ||
5420 | goto init_hw_err; | ||
5421 | /* no break */ | ||
5422 | |||
5423 | case FW_MSG_CODE_DRV_LOAD_PORT: | ||
5424 | rc = bnx2x_init_hw_port(bp); | ||
5425 | if (rc) | ||
5426 | goto init_hw_err; | ||
5427 | /* no break */ | ||
5428 | |||
5429 | case FW_MSG_CODE_DRV_LOAD_FUNCTION: | ||
5430 | rc = bnx2x_init_hw_func(bp); | ||
5431 | if (rc) | ||
5432 | goto init_hw_err; | ||
5433 | break; | ||
5434 | |||
5435 | default: | ||
5436 | BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code); | ||
5437 | break; | ||
5438 | } | ||
5439 | |||
5440 | if (!BP_NOMCP(bp)) { | ||
5441 | int mb_idx = BP_FW_MB_IDX(bp); | ||
5442 | |||
5443 | bp->fw_drv_pulse_wr_seq = | ||
5444 | (SHMEM_RD(bp, func_mb[mb_idx].drv_pulse_mb) & | ||
5445 | DRV_PULSE_SEQ_MASK); | ||
5446 | DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq); | ||
5447 | } | ||
5448 | |||
5449 | init_hw_err: | ||
5450 | bnx2x_gunzip_end(bp); | ||
5451 | |||
5452 | return rc; | ||
5453 | } | ||
5454 | 6285 | ||
5455 | void bnx2x_free_mem(struct bnx2x *bp) | 6286 | void bnx2x_free_mem(struct bnx2x *bp) |
5456 | { | 6287 | { |
5457 | bnx2x_gunzip_end(bp); | ||
5458 | |||
5459 | /* fastpath */ | 6288 | /* fastpath */ |
5460 | bnx2x_free_fp_mem(bp); | 6289 | bnx2x_free_fp_mem(bp); |
5461 | /* end of fastpath */ | 6290 | /* end of fastpath */ |
@@ -5463,6 +6292,9 @@ void bnx2x_free_mem(struct bnx2x *bp) | |||
5463 | BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping, | 6292 | BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping, |
5464 | sizeof(struct host_sp_status_block)); | 6293 | sizeof(struct host_sp_status_block)); |
5465 | 6294 | ||
6295 | BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping, | ||
6296 | bp->fw_stats_data_sz + bp->fw_stats_req_sz); | ||
6297 | |||
5466 | BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping, | 6298 | BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping, |
5467 | sizeof(struct bnx2x_slowpath)); | 6299 | sizeof(struct bnx2x_slowpath)); |
5468 | 6300 | ||
@@ -5474,7 +6306,7 @@ void bnx2x_free_mem(struct bnx2x *bp) | |||
5474 | BNX2X_FREE(bp->ilt->lines); | 6306 | BNX2X_FREE(bp->ilt->lines); |
5475 | 6307 | ||
5476 | #ifdef BCM_CNIC | 6308 | #ifdef BCM_CNIC |
5477 | if (CHIP_IS_E2(bp)) | 6309 | if (!CHIP_IS_E1x(bp)) |
5478 | BNX2X_PCI_FREE(bp->cnic_sb.e2_sb, bp->cnic_sb_mapping, | 6310 | BNX2X_PCI_FREE(bp->cnic_sb.e2_sb, bp->cnic_sb_mapping, |
5479 | sizeof(struct host_hc_status_block_e2)); | 6311 | sizeof(struct host_hc_status_block_e2)); |
5480 | else | 6312 | else |
@@ -5488,18 +6320,67 @@ void bnx2x_free_mem(struct bnx2x *bp) | |||
5488 | 6320 | ||
5489 | BNX2X_PCI_FREE(bp->eq_ring, bp->eq_mapping, | 6321 | BNX2X_PCI_FREE(bp->eq_ring, bp->eq_mapping, |
5490 | BCM_PAGE_SIZE * NUM_EQ_PAGES); | 6322 | BCM_PAGE_SIZE * NUM_EQ_PAGES); |
6323 | } | ||
6324 | |||
6325 | static inline int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp) | ||
6326 | { | ||
6327 | int num_groups; | ||
6328 | |||
6329 | /* number of eth_queues */ | ||
6330 | u8 num_queue_stats = BNX2X_NUM_ETH_QUEUES(bp); | ||
6331 | |||
6332 | /* Total number of FW statistics requests = | ||
6333 | * 1 for port stats + 1 for PF stats + num_eth_queues */ | ||
6334 | bp->fw_stats_num = 2 + num_queue_stats; | ||
6335 | |||
6336 | |||
6337 | /* Request is built from stats_query_header and an array of | ||
6338 | * stats_query_cmd_group each of which contains | ||
6339 | * STATS_QUERY_CMD_COUNT rules. The real number or requests is | ||
6340 | * configured in the stats_query_header. | ||
6341 | */ | ||
6342 | num_groups = (2 + num_queue_stats) / STATS_QUERY_CMD_COUNT + | ||
6343 | (((2 + num_queue_stats) % STATS_QUERY_CMD_COUNT) ? 1 : 0); | ||
6344 | |||
6345 | bp->fw_stats_req_sz = sizeof(struct stats_query_header) + | ||
6346 | num_groups * sizeof(struct stats_query_cmd_group); | ||
5491 | 6347 | ||
5492 | BNX2X_FREE(bp->rx_indir_table); | 6348 | /* Data for statistics requests + stats_conter |
6349 | * | ||
6350 | * stats_counter holds per-STORM counters that are incremented | ||
6351 | * when STORM has finished with the current request. | ||
6352 | */ | ||
6353 | bp->fw_stats_data_sz = sizeof(struct per_port_stats) + | ||
6354 | sizeof(struct per_pf_stats) + | ||
6355 | sizeof(struct per_queue_stats) * num_queue_stats + | ||
6356 | sizeof(struct stats_counter); | ||
6357 | |||
6358 | BNX2X_PCI_ALLOC(bp->fw_stats, &bp->fw_stats_mapping, | ||
6359 | bp->fw_stats_data_sz + bp->fw_stats_req_sz); | ||
6360 | |||
6361 | /* Set shortcuts */ | ||
6362 | bp->fw_stats_req = (struct bnx2x_fw_stats_req *)bp->fw_stats; | ||
6363 | bp->fw_stats_req_mapping = bp->fw_stats_mapping; | ||
6364 | |||
6365 | bp->fw_stats_data = (struct bnx2x_fw_stats_data *) | ||
6366 | ((u8 *)bp->fw_stats + bp->fw_stats_req_sz); | ||
6367 | |||
6368 | bp->fw_stats_data_mapping = bp->fw_stats_mapping + | ||
6369 | bp->fw_stats_req_sz; | ||
6370 | return 0; | ||
6371 | |||
6372 | alloc_mem_err: | ||
6373 | BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping, | ||
6374 | bp->fw_stats_data_sz + bp->fw_stats_req_sz); | ||
6375 | return -ENOMEM; | ||
5493 | } | 6376 | } |
5494 | 6377 | ||
5495 | 6378 | ||
5496 | int bnx2x_alloc_mem(struct bnx2x *bp) | 6379 | int bnx2x_alloc_mem(struct bnx2x *bp) |
5497 | { | 6380 | { |
5498 | if (bnx2x_gunzip_init(bp)) | ||
5499 | return -ENOMEM; | ||
5500 | |||
5501 | #ifdef BCM_CNIC | 6381 | #ifdef BCM_CNIC |
5502 | if (CHIP_IS_E2(bp)) | 6382 | if (!CHIP_IS_E1x(bp)) |
6383 | /* size = the status block + ramrod buffers */ | ||
5503 | BNX2X_PCI_ALLOC(bp->cnic_sb.e2_sb, &bp->cnic_sb_mapping, | 6384 | BNX2X_PCI_ALLOC(bp->cnic_sb.e2_sb, &bp->cnic_sb_mapping, |
5504 | sizeof(struct host_hc_status_block_e2)); | 6385 | sizeof(struct host_hc_status_block_e2)); |
5505 | else | 6386 | else |
@@ -5517,6 +6398,10 @@ int bnx2x_alloc_mem(struct bnx2x *bp) | |||
5517 | BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping, | 6398 | BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping, |
5518 | sizeof(struct bnx2x_slowpath)); | 6399 | sizeof(struct bnx2x_slowpath)); |
5519 | 6400 | ||
6401 | /* Allocated memory for FW statistics */ | ||
6402 | if (bnx2x_alloc_fw_stats_mem(bp)) | ||
6403 | goto alloc_mem_err; | ||
6404 | |||
5520 | bp->context.size = sizeof(union cdu_context) * bp->l2_cid_count; | 6405 | bp->context.size = sizeof(union cdu_context) * bp->l2_cid_count; |
5521 | 6406 | ||
5522 | BNX2X_PCI_ALLOC(bp->context.vcxt, &bp->context.cxt_mapping, | 6407 | BNX2X_PCI_ALLOC(bp->context.vcxt, &bp->context.cxt_mapping, |
@@ -5534,8 +6419,6 @@ int bnx2x_alloc_mem(struct bnx2x *bp) | |||
5534 | BNX2X_PCI_ALLOC(bp->eq_ring, &bp->eq_mapping, | 6419 | BNX2X_PCI_ALLOC(bp->eq_ring, &bp->eq_mapping, |
5535 | BCM_PAGE_SIZE * NUM_EQ_PAGES); | 6420 | BCM_PAGE_SIZE * NUM_EQ_PAGES); |
5536 | 6421 | ||
5537 | BNX2X_ALLOC(bp->rx_indir_table, sizeof(bp->rx_indir_table[0]) * | ||
5538 | TSTORM_INDIRECTION_TABLE_SIZE); | ||
5539 | 6422 | ||
5540 | /* fastpath */ | 6423 | /* fastpath */ |
5541 | /* need to be done at the end, since it's self adjusting to amount | 6424 | /* need to be done at the end, since it's self adjusting to amount |
@@ -5553,235 +6436,76 @@ alloc_mem_err: | |||
5553 | /* | 6436 | /* |
5554 | * Init service functions | 6437 | * Init service functions |
5555 | */ | 6438 | */ |
5556 | int bnx2x_func_start(struct bnx2x *bp) | ||
5557 | { | ||
5558 | bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_START, 0, 0, 0, 1); | ||
5559 | |||
5560 | /* Wait for completion */ | ||
5561 | return bnx2x_wait_ramrod(bp, BNX2X_STATE_FUNC_STARTED, 0, &(bp->state), | ||
5562 | WAIT_RAMROD_COMMON); | ||
5563 | } | ||
5564 | 6439 | ||
5565 | static int bnx2x_func_stop(struct bnx2x *bp) | 6440 | int bnx2x_set_mac_one(struct bnx2x *bp, u8 *mac, |
6441 | struct bnx2x_vlan_mac_obj *obj, bool set, | ||
6442 | int mac_type, unsigned long *ramrod_flags) | ||
5566 | { | 6443 | { |
5567 | bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_STOP, 0, 0, 0, 1); | 6444 | int rc; |
5568 | 6445 | struct bnx2x_vlan_mac_ramrod_params ramrod_param; | |
5569 | /* Wait for completion */ | ||
5570 | return bnx2x_wait_ramrod(bp, BNX2X_STATE_CLOSING_WAIT4_UNLOAD, | ||
5571 | 0, &(bp->state), WAIT_RAMROD_COMMON); | ||
5572 | } | ||
5573 | |||
5574 | int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx, | ||
5575 | int *state_p, int flags) | ||
5576 | { | ||
5577 | /* can take a while if any port is running */ | ||
5578 | int cnt = 5000; | ||
5579 | u8 poll = flags & WAIT_RAMROD_POLL; | ||
5580 | u8 common = flags & WAIT_RAMROD_COMMON; | ||
5581 | 6446 | ||
5582 | DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n", | 6447 | memset(&ramrod_param, 0, sizeof(ramrod_param)); |
5583 | poll ? "polling" : "waiting", state, idx); | ||
5584 | 6448 | ||
5585 | might_sleep(); | 6449 | /* Fill general parameters */ |
5586 | while (cnt--) { | 6450 | ramrod_param.vlan_mac_obj = obj; |
5587 | if (poll) { | 6451 | ramrod_param.ramrod_flags = *ramrod_flags; |
5588 | if (common) | ||
5589 | bnx2x_eq_int(bp); | ||
5590 | else { | ||
5591 | bnx2x_rx_int(bp->fp, 10); | ||
5592 | /* if index is different from 0 | ||
5593 | * the reply for some commands will | ||
5594 | * be on the non default queue | ||
5595 | */ | ||
5596 | if (idx) | ||
5597 | bnx2x_rx_int(&bp->fp[idx], 10); | ||
5598 | } | ||
5599 | } | ||
5600 | 6452 | ||
5601 | mb(); /* state is changed by bnx2x_sp_event() */ | 6453 | /* Fill a user request section if needed */ |
5602 | if (*state_p == state) { | 6454 | if (!test_bit(RAMROD_CONT, ramrod_flags)) { |
5603 | #ifdef BNX2X_STOP_ON_ERROR | 6455 | memcpy(ramrod_param.user_req.u.mac.mac, mac, ETH_ALEN); |
5604 | DP(NETIF_MSG_IFUP, "exit (cnt %d)\n", 5000 - cnt); | ||
5605 | #endif | ||
5606 | return 0; | ||
5607 | } | ||
5608 | 6456 | ||
5609 | msleep(1); | 6457 | __set_bit(mac_type, &ramrod_param.user_req.vlan_mac_flags); |
5610 | 6458 | ||
5611 | if (bp->panic) | 6459 | /* Set the command: ADD or DEL */ |
5612 | return -EIO; | 6460 | if (set) |
6461 | ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD; | ||
6462 | else | ||
6463 | ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_DEL; | ||
5613 | } | 6464 | } |
5614 | 6465 | ||
5615 | /* timeout! */ | 6466 | rc = bnx2x_config_vlan_mac(bp, &ramrod_param); |
5616 | BNX2X_ERR("timeout %s for state %x on IDX [%d]\n", | 6467 | if (rc < 0) |
5617 | poll ? "polling" : "waiting", state, idx); | 6468 | BNX2X_ERR("%s MAC failed\n", (set ? "Set" : "Del")); |
5618 | #ifdef BNX2X_STOP_ON_ERROR | 6469 | return rc; |
5619 | bnx2x_panic(); | ||
5620 | #endif | ||
5621 | |||
5622 | return -EBUSY; | ||
5623 | } | ||
5624 | |||
5625 | static u8 bnx2x_e1h_cam_offset(struct bnx2x *bp, u8 rel_offset) | ||
5626 | { | ||
5627 | if (CHIP_IS_E1H(bp)) | ||
5628 | return E1H_FUNC_MAX * rel_offset + BP_FUNC(bp); | ||
5629 | else if (CHIP_MODE_IS_4_PORT(bp)) | ||
5630 | return E2_FUNC_MAX * rel_offset + BP_FUNC(bp); | ||
5631 | else | ||
5632 | return E2_FUNC_MAX * rel_offset + BP_VN(bp); | ||
5633 | } | 6470 | } |
5634 | 6471 | ||
5635 | /** | 6472 | int bnx2x_del_all_macs(struct bnx2x *bp, |
5636 | * LLH CAM line allocations: currently only iSCSI and ETH macs are | 6473 | struct bnx2x_vlan_mac_obj *mac_obj, |
5637 | * relevant. In addition, current implementation is tuned for a | 6474 | int mac_type, bool wait_for_comp) |
5638 | * single ETH MAC. | ||
5639 | */ | ||
5640 | enum { | ||
5641 | LLH_CAM_ISCSI_ETH_LINE = 0, | ||
5642 | LLH_CAM_ETH_LINE, | ||
5643 | LLH_CAM_MAX_PF_LINE = NIG_REG_LLH1_FUNC_MEM_SIZE | ||
5644 | }; | ||
5645 | |||
5646 | static void bnx2x_set_mac_in_nig(struct bnx2x *bp, | ||
5647 | int set, | ||
5648 | unsigned char *dev_addr, | ||
5649 | int index) | ||
5650 | { | 6475 | { |
5651 | u32 wb_data[2]; | 6476 | int rc; |
5652 | u32 mem_offset, ena_offset, mem_index; | 6477 | unsigned long ramrod_flags = 0, vlan_mac_flags = 0; |
5653 | /** | ||
5654 | * indexes mapping: | ||
5655 | * 0..7 - goes to MEM | ||
5656 | * 8..15 - goes to MEM2 | ||
5657 | */ | ||
5658 | |||
5659 | if (!IS_MF_SI(bp) || index > LLH_CAM_MAX_PF_LINE) | ||
5660 | return; | ||
5661 | |||
5662 | /* calculate memory start offset according to the mapping | ||
5663 | * and index in the memory */ | ||
5664 | if (index < NIG_LLH_FUNC_MEM_MAX_OFFSET) { | ||
5665 | mem_offset = BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM : | ||
5666 | NIG_REG_LLH0_FUNC_MEM; | ||
5667 | ena_offset = BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM_ENABLE : | ||
5668 | NIG_REG_LLH0_FUNC_MEM_ENABLE; | ||
5669 | mem_index = index; | ||
5670 | } else { | ||
5671 | mem_offset = BP_PORT(bp) ? NIG_REG_P1_LLH_FUNC_MEM2 : | ||
5672 | NIG_REG_P0_LLH_FUNC_MEM2; | ||
5673 | ena_offset = BP_PORT(bp) ? NIG_REG_P1_LLH_FUNC_MEM2_ENABLE : | ||
5674 | NIG_REG_P0_LLH_FUNC_MEM2_ENABLE; | ||
5675 | mem_index = index - NIG_LLH_FUNC_MEM_MAX_OFFSET; | ||
5676 | } | ||
5677 | |||
5678 | if (set) { | ||
5679 | /* LLH_FUNC_MEM is a u64 WB register */ | ||
5680 | mem_offset += 8*mem_index; | ||
5681 | |||
5682 | wb_data[0] = ((dev_addr[2] << 24) | (dev_addr[3] << 16) | | ||
5683 | (dev_addr[4] << 8) | dev_addr[5]); | ||
5684 | wb_data[1] = ((dev_addr[0] << 8) | dev_addr[1]); | ||
5685 | |||
5686 | REG_WR_DMAE(bp, mem_offset, wb_data, 2); | ||
5687 | } | ||
5688 | |||
5689 | /* enable/disable the entry */ | ||
5690 | REG_WR(bp, ena_offset + 4*mem_index, set); | ||
5691 | |||
5692 | } | ||
5693 | 6478 | ||
5694 | void bnx2x_set_eth_mac(struct bnx2x *bp, int set) | 6479 | /* Wait for completion of requested */ |
5695 | { | 6480 | if (wait_for_comp) |
5696 | u8 cam_offset = (CHIP_IS_E1(bp) ? (BP_PORT(bp) ? 32 : 0) : | 6481 | __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); |
5697 | bnx2x_e1h_cam_offset(bp, CAM_ETH_LINE)); | ||
5698 | 6482 | ||
5699 | /* networking MAC */ | 6483 | /* Set the mac type of addresses we want to clear */ |
5700 | bnx2x_set_mac_addr_gen(bp, set, bp->dev->dev_addr, | 6484 | __set_bit(mac_type, &vlan_mac_flags); |
5701 | (1 << bp->fp->cl_id), cam_offset , 0); | ||
5702 | 6485 | ||
5703 | bnx2x_set_mac_in_nig(bp, set, bp->dev->dev_addr, LLH_CAM_ETH_LINE); | 6486 | rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags, &ramrod_flags); |
6487 | if (rc < 0) | ||
6488 | BNX2X_ERR("Failed to delete MACs: %d\n", rc); | ||
5704 | 6489 | ||
5705 | if (CHIP_IS_E1(bp)) { | 6490 | return rc; |
5706 | /* broadcast MAC */ | ||
5707 | static const u8 bcast[ETH_ALEN] = { | ||
5708 | 0xff, 0xff, 0xff, 0xff, 0xff, 0xff | ||
5709 | }; | ||
5710 | bnx2x_set_mac_addr_gen(bp, set, bcast, 0, cam_offset + 1, 1); | ||
5711 | } | ||
5712 | } | 6491 | } |
5713 | 6492 | ||
5714 | #ifdef BCM_CNIC | 6493 | int bnx2x_set_eth_mac(struct bnx2x *bp, bool set) |
5715 | /** | ||
5716 | * bnx2x_set_iscsi_eth_mac_addr - set iSCSI MAC(s). | ||
5717 | * | ||
5718 | * @bp: driver handle | ||
5719 | * @set: set or clear the CAM entry | ||
5720 | * | ||
5721 | * This function will wait until the ramdord completion returns. | ||
5722 | * Return 0 if success, -ENODEV if ramrod doesn't return. | ||
5723 | */ | ||
5724 | static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set) | ||
5725 | { | 6494 | { |
5726 | u8 cam_offset = (CHIP_IS_E1(bp) ? ((BP_PORT(bp) ? 32 : 0) + 2) : | 6495 | unsigned long ramrod_flags = 0; |
5727 | bnx2x_e1h_cam_offset(bp, CAM_ISCSI_ETH_LINE)); | ||
5728 | u32 iscsi_l2_cl_id = BNX2X_ISCSI_ETH_CL_ID + | ||
5729 | BP_E1HVN(bp) * NONE_ETH_CONTEXT_USE; | ||
5730 | u32 cl_bit_vec = (1 << iscsi_l2_cl_id); | ||
5731 | u8 *iscsi_mac = bp->cnic_eth_dev.iscsi_mac; | ||
5732 | |||
5733 | /* Send a SET_MAC ramrod */ | ||
5734 | bnx2x_set_mac_addr_gen(bp, set, iscsi_mac, cl_bit_vec, | ||
5735 | cam_offset, 0); | ||
5736 | |||
5737 | bnx2x_set_mac_in_nig(bp, set, iscsi_mac, LLH_CAM_ISCSI_ETH_LINE); | ||
5738 | |||
5739 | return 0; | ||
5740 | } | ||
5741 | 6496 | ||
5742 | /** | 6497 | DP(NETIF_MSG_IFUP, "Adding Eth MAC\n"); |
5743 | * bnx2x_set_fip_eth_mac_addr - set FCoE L2 MAC(s) | ||
5744 | * | ||
5745 | * @bp: driver handle | ||
5746 | * @set: set or clear the CAM entry | ||
5747 | * | ||
5748 | * This function will wait until the ramrod completion returns. | ||
5749 | * Returns 0 if success, -ENODEV if ramrod doesn't return. | ||
5750 | */ | ||
5751 | int bnx2x_set_fip_eth_mac_addr(struct bnx2x *bp, int set) | ||
5752 | { | ||
5753 | u32 cl_bit_vec = (1 << bnx2x_fcoe(bp, cl_id)); | ||
5754 | /** | ||
5755 | * CAM allocation for E1H | ||
5756 | * eth unicasts: by func number | ||
5757 | * iscsi: by func number | ||
5758 | * fip unicast: by func number | ||
5759 | * fip multicast: by func number | ||
5760 | */ | ||
5761 | bnx2x_set_mac_addr_gen(bp, set, bp->fip_mac, | ||
5762 | cl_bit_vec, bnx2x_e1h_cam_offset(bp, CAM_FIP_ETH_LINE), 0); | ||
5763 | 6498 | ||
5764 | return 0; | 6499 | __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); |
6500 | /* Eth MAC is set on RSS leading client (fp[0]) */ | ||
6501 | return bnx2x_set_mac_one(bp, bp->dev->dev_addr, &bp->fp->mac_obj, set, | ||
6502 | BNX2X_ETH_MAC, &ramrod_flags); | ||
5765 | } | 6503 | } |
5766 | 6504 | ||
5767 | int bnx2x_set_all_enode_macs(struct bnx2x *bp, int set) | 6505 | int bnx2x_setup_leading(struct bnx2x *bp) |
5768 | { | 6506 | { |
5769 | u32 cl_bit_vec = (1 << bnx2x_fcoe(bp, cl_id)); | 6507 | return bnx2x_setup_queue(bp, &bp->fp[0], 1); |
5770 | |||
5771 | /** | ||
5772 | * CAM allocation for E1H | ||
5773 | * eth unicasts: by func number | ||
5774 | * iscsi: by func number | ||
5775 | * fip unicast: by func number | ||
5776 | * fip multicast: by func number | ||
5777 | */ | ||
5778 | bnx2x_set_mac_addr_gen(bp, set, ALL_ENODE_MACS, cl_bit_vec, | ||
5779 | bnx2x_e1h_cam_offset(bp, CAM_FIP_MCAST_LINE), 0); | ||
5780 | |||
5781 | return 0; | ||
5782 | } | 6508 | } |
5783 | #endif | ||
5784 | |||
5785 | 6509 | ||
5786 | /** | 6510 | /** |
5787 | * bnx2x_set_int_mode - configure interrupt mode | 6511 | * bnx2x_set_int_mode - configure interrupt mode |
@@ -5792,7 +6516,6 @@ int bnx2x_set_all_enode_macs(struct bnx2x *bp, int set) | |||
5792 | */ | 6516 | */ |
5793 | static void __devinit bnx2x_set_int_mode(struct bnx2x *bp) | 6517 | static void __devinit bnx2x_set_int_mode(struct bnx2x *bp) |
5794 | { | 6518 | { |
5795 | |||
5796 | switch (int_mode) { | 6519 | switch (int_mode) { |
5797 | case INT_MODE_MSI: | 6520 | case INT_MODE_MSI: |
5798 | bnx2x_enable_msi(bp); | 6521 | bnx2x_enable_msi(bp); |
@@ -5852,7 +6575,7 @@ void bnx2x_ilt_set_info(struct bnx2x *bp) | |||
5852 | ilt_client->page_size = CDU_ILT_PAGE_SZ; | 6575 | ilt_client->page_size = CDU_ILT_PAGE_SZ; |
5853 | ilt_client->flags = ILT_CLIENT_SKIP_MEM; | 6576 | ilt_client->flags = ILT_CLIENT_SKIP_MEM; |
5854 | ilt_client->start = line; | 6577 | ilt_client->start = line; |
5855 | line += L2_ILT_LINES(bp); | 6578 | line += bnx2x_cid_ilt_lines(bp); |
5856 | #ifdef BCM_CNIC | 6579 | #ifdef BCM_CNIC |
5857 | line += CNIC_ILT_LINES; | 6580 | line += CNIC_ILT_LINES; |
5858 | #endif | 6581 | #endif |
@@ -5932,12 +6655,72 @@ void bnx2x_ilt_set_info(struct bnx2x *bp) | |||
5932 | #else | 6655 | #else |
5933 | ilt_client->flags = (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM); | 6656 | ilt_client->flags = (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM); |
5934 | #endif | 6657 | #endif |
6658 | BUG_ON(line > ILT_MAX_LINES); | ||
6659 | } | ||
6660 | |||
6661 | /** | ||
6662 | * bnx2x_pf_q_prep_init - prepare INIT transition parameters | ||
6663 | * | ||
6664 | * @bp: driver handle | ||
6665 | * @fp: pointer to fastpath | ||
6666 | * @init_params: pointer to parameters structure | ||
6667 | * | ||
6668 | * parameters configured: | ||
6669 | * - HC configuration | ||
6670 | * - Queue's CDU context | ||
6671 | */ | ||
6672 | static inline void bnx2x_pf_q_prep_init(struct bnx2x *bp, | ||
6673 | struct bnx2x_fastpath *fp, struct bnx2x_queue_init_params *init_params) | ||
6674 | { | ||
6675 | /* FCoE Queue uses Default SB, thus has no HC capabilities */ | ||
6676 | if (!IS_FCOE_FP(fp)) { | ||
6677 | __set_bit(BNX2X_Q_FLG_HC, &init_params->rx.flags); | ||
6678 | __set_bit(BNX2X_Q_FLG_HC, &init_params->tx.flags); | ||
6679 | |||
6680 | /* If HC is supporterd, enable host coalescing in the transition | ||
6681 | * to INIT state. | ||
6682 | */ | ||
6683 | __set_bit(BNX2X_Q_FLG_HC_EN, &init_params->rx.flags); | ||
6684 | __set_bit(BNX2X_Q_FLG_HC_EN, &init_params->tx.flags); | ||
6685 | |||
6686 | /* HC rate */ | ||
6687 | init_params->rx.hc_rate = bp->rx_ticks ? | ||
6688 | (1000000 / bp->rx_ticks) : 0; | ||
6689 | init_params->tx.hc_rate = bp->tx_ticks ? | ||
6690 | (1000000 / bp->tx_ticks) : 0; | ||
6691 | |||
6692 | /* FW SB ID */ | ||
6693 | init_params->rx.fw_sb_id = init_params->tx.fw_sb_id = | ||
6694 | fp->fw_sb_id; | ||
6695 | |||
6696 | /* | ||
6697 | * CQ index among the SB indices: FCoE clients uses the default | ||
6698 | * SB, therefore it's different. | ||
6699 | */ | ||
6700 | init_params->rx.sb_cq_index = U_SB_ETH_RX_CQ_INDEX; | ||
6701 | init_params->tx.sb_cq_index = C_SB_ETH_TX_CQ_INDEX; | ||
6702 | } | ||
6703 | |||
6704 | init_params->cxt = &bp->context.vcxt[fp->cid].eth; | ||
5935 | } | 6705 | } |
5936 | 6706 | ||
5937 | int bnx2x_setup_client(struct bnx2x *bp, struct bnx2x_fastpath *fp, | 6707 | /** |
5938 | int is_leading) | 6708 | * bnx2x_setup_queue - setup queue |
6709 | * | ||
6710 | * @bp: driver handle | ||
6711 | * @fp: pointer to fastpath | ||
6712 | * @leading: is leading | ||
6713 | * | ||
6714 | * This function performs 2 steps in a Queue state machine | ||
6715 | * actually: 1) RESET->INIT 2) INIT->SETUP | ||
6716 | */ | ||
6717 | |||
6718 | int bnx2x_setup_queue(struct bnx2x *bp, struct bnx2x_fastpath *fp, | ||
6719 | bool leading) | ||
5939 | { | 6720 | { |
5940 | struct bnx2x_client_init_params params = { {0} }; | 6721 | struct bnx2x_queue_state_params q_params = {0}; |
6722 | struct bnx2x_queue_setup_params *setup_params = | ||
6723 | &q_params.params.setup; | ||
5941 | int rc; | 6724 | int rc; |
5942 | 6725 | ||
5943 | /* reset IGU state skip FCoE L2 queue */ | 6726 | /* reset IGU state skip FCoE L2 queue */ |
@@ -5945,79 +6728,73 @@ int bnx2x_setup_client(struct bnx2x *bp, struct bnx2x_fastpath *fp, | |||
5945 | bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, | 6728 | bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, |
5946 | IGU_INT_ENABLE, 0); | 6729 | IGU_INT_ENABLE, 0); |
5947 | 6730 | ||
5948 | params.ramrod_params.pstate = &fp->state; | 6731 | q_params.q_obj = &fp->q_obj; |
5949 | params.ramrod_params.state = BNX2X_FP_STATE_OPEN; | 6732 | /* We want to wait for completion in this context */ |
5950 | params.ramrod_params.index = fp->index; | 6733 | __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); |
5951 | params.ramrod_params.cid = fp->cid; | ||
5952 | 6734 | ||
5953 | #ifdef BCM_CNIC | 6735 | /* Prepare the INIT parameters */ |
5954 | if (IS_FCOE_FP(fp)) | 6736 | bnx2x_pf_q_prep_init(bp, fp, &q_params.params.init); |
5955 | params.ramrod_params.flags |= CLIENT_IS_FCOE; | ||
5956 | 6737 | ||
5957 | #endif | 6738 | /* Set the command */ |
6739 | q_params.cmd = BNX2X_Q_CMD_INIT; | ||
5958 | 6740 | ||
5959 | if (is_leading) | 6741 | /* Change the state to INIT */ |
5960 | params.ramrod_params.flags |= CLIENT_IS_LEADING_RSS; | 6742 | rc = bnx2x_queue_state_change(bp, &q_params); |
6743 | if (rc) { | ||
6744 | BNX2X_ERR("Queue INIT failed\n"); | ||
6745 | return rc; | ||
6746 | } | ||
6747 | |||
6748 | /* Now move the Queue to the SETUP state... */ | ||
6749 | memset(setup_params, 0, sizeof(*setup_params)); | ||
6750 | |||
6751 | /* Set QUEUE flags */ | ||
6752 | setup_params->flags = bnx2x_get_q_flags(bp, fp, leading); | ||
5961 | 6753 | ||
5962 | bnx2x_pf_rx_cl_prep(bp, fp, ¶ms.pause, ¶ms.rxq_params); | 6754 | /* Set general SETUP parameters */ |
6755 | bnx2x_pf_q_prep_general(bp, fp, &setup_params->gen_params); | ||
5963 | 6756 | ||
5964 | bnx2x_pf_tx_cl_prep(bp, fp, ¶ms.txq_params); | 6757 | bnx2x_pf_rx_q_prep(bp, fp, &setup_params->pause, |
6758 | &setup_params->rxq_params); | ||
6759 | |||
6760 | bnx2x_pf_tx_q_prep(bp, fp, &setup_params->txq_params); | ||
6761 | |||
6762 | /* Set the command */ | ||
6763 | q_params.cmd = BNX2X_Q_CMD_SETUP; | ||
6764 | |||
6765 | /* Change the state to SETUP */ | ||
6766 | rc = bnx2x_queue_state_change(bp, &q_params); | ||
6767 | if (rc) | ||
6768 | BNX2X_ERR("Queue SETUP failed\n"); | ||
5965 | 6769 | ||
5966 | rc = bnx2x_setup_fw_client(bp, ¶ms, 1, | ||
5967 | bnx2x_sp(bp, client_init_data), | ||
5968 | bnx2x_sp_mapping(bp, client_init_data)); | ||
5969 | return rc; | 6770 | return rc; |
5970 | } | 6771 | } |
5971 | 6772 | ||
5972 | static int bnx2x_stop_fw_client(struct bnx2x *bp, | 6773 | static int bnx2x_stop_queue(struct bnx2x *bp, int index) |
5973 | struct bnx2x_client_ramrod_params *p) | ||
5974 | { | 6774 | { |
6775 | struct bnx2x_fastpath *fp = &bp->fp[index]; | ||
6776 | struct bnx2x_queue_state_params q_params = {0}; | ||
5975 | int rc; | 6777 | int rc; |
5976 | 6778 | ||
5977 | int poll_flag = p->poll ? WAIT_RAMROD_POLL : 0; | 6779 | q_params.q_obj = &fp->q_obj; |
6780 | /* We want to wait for completion in this context */ | ||
6781 | __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); | ||
5978 | 6782 | ||
5979 | /* halt the connection */ | 6783 | /* halt the connection */ |
5980 | *p->pstate = BNX2X_FP_STATE_HALTING; | 6784 | q_params.cmd = BNX2X_Q_CMD_HALT; |
5981 | bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, p->cid, 0, | 6785 | rc = bnx2x_queue_state_change(bp, &q_params); |
5982 | p->cl_id, 0); | 6786 | if (rc) |
5983 | |||
5984 | /* Wait for completion */ | ||
5985 | rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, p->index, | ||
5986 | p->pstate, poll_flag); | ||
5987 | if (rc) /* timeout */ | ||
5988 | return rc; | 6787 | return rc; |
5989 | 6788 | ||
5990 | *p->pstate = BNX2X_FP_STATE_TERMINATING; | 6789 | /* terminate the connection */ |
5991 | bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_TERMINATE, p->cid, 0, | 6790 | q_params.cmd = BNX2X_Q_CMD_TERMINATE; |
5992 | p->cl_id, 0); | 6791 | rc = bnx2x_queue_state_change(bp, &q_params); |
5993 | /* Wait for completion */ | 6792 | if (rc) |
5994 | rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_TERMINATED, p->index, | ||
5995 | p->pstate, poll_flag); | ||
5996 | if (rc) /* timeout */ | ||
5997 | return rc; | 6793 | return rc; |
5998 | 6794 | ||
5999 | |||
6000 | /* delete cfc entry */ | 6795 | /* delete cfc entry */ |
6001 | bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_CFC_DEL, p->cid, 0, 0, 1); | 6796 | q_params.cmd = BNX2X_Q_CMD_CFC_DEL; |
6002 | 6797 | return bnx2x_queue_state_change(bp, &q_params); | |
6003 | /* Wait for completion */ | ||
6004 | rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, p->index, | ||
6005 | p->pstate, WAIT_RAMROD_COMMON); | ||
6006 | return rc; | ||
6007 | } | ||
6008 | |||
6009 | static int bnx2x_stop_client(struct bnx2x *bp, int index) | ||
6010 | { | ||
6011 | struct bnx2x_client_ramrod_params client_stop = {0}; | ||
6012 | struct bnx2x_fastpath *fp = &bp->fp[index]; | ||
6013 | |||
6014 | client_stop.index = index; | ||
6015 | client_stop.cid = fp->cid; | ||
6016 | client_stop.cl_id = fp->cl_id; | ||
6017 | client_stop.pstate = &(fp->state); | ||
6018 | client_stop.poll = 0; | ||
6019 | |||
6020 | return bnx2x_stop_fw_client(bp, &client_stop); | ||
6021 | } | 6798 | } |
6022 | 6799 | ||
6023 | 6800 | ||
@@ -6026,12 +6803,6 @@ static void bnx2x_reset_func(struct bnx2x *bp) | |||
6026 | int port = BP_PORT(bp); | 6803 | int port = BP_PORT(bp); |
6027 | int func = BP_FUNC(bp); | 6804 | int func = BP_FUNC(bp); |
6028 | int i; | 6805 | int i; |
6029 | int pfunc_offset_fp = offsetof(struct hc_sb_data, p_func) + | ||
6030 | (CHIP_IS_E2(bp) ? | ||
6031 | offsetof(struct hc_status_block_data_e2, common) : | ||
6032 | offsetof(struct hc_status_block_data_e1x, common)); | ||
6033 | int pfunc_offset_sp = offsetof(struct hc_sp_status_block_data, p_func); | ||
6034 | int pfid_offset = offsetof(struct pci_entity, pf_id); | ||
6035 | 6806 | ||
6036 | /* Disable the function in the FW */ | 6807 | /* Disable the function in the FW */ |
6037 | REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(func), 0); | 6808 | REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(func), 0); |
@@ -6042,20 +6813,21 @@ static void bnx2x_reset_func(struct bnx2x *bp) | |||
6042 | /* FP SBs */ | 6813 | /* FP SBs */ |
6043 | for_each_eth_queue(bp, i) { | 6814 | for_each_eth_queue(bp, i) { |
6044 | struct bnx2x_fastpath *fp = &bp->fp[i]; | 6815 | struct bnx2x_fastpath *fp = &bp->fp[i]; |
6045 | REG_WR8(bp, | 6816 | REG_WR8(bp, BAR_CSTRORM_INTMEM + |
6046 | BAR_CSTRORM_INTMEM + | 6817 | CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(fp->fw_sb_id), |
6047 | CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id) | 6818 | SB_DISABLED); |
6048 | + pfunc_offset_fp + pfid_offset, | ||
6049 | HC_FUNCTION_DISABLED); | ||
6050 | } | 6819 | } |
6051 | 6820 | ||
6821 | #ifdef BCM_CNIC | ||
6822 | /* CNIC SB */ | ||
6823 | REG_WR8(bp, BAR_CSTRORM_INTMEM + | ||
6824 | CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(bnx2x_cnic_fw_sb_id(bp)), | ||
6825 | SB_DISABLED); | ||
6826 | #endif | ||
6052 | /* SP SB */ | 6827 | /* SP SB */ |
6053 | REG_WR8(bp, | 6828 | REG_WR8(bp, BAR_CSTRORM_INTMEM + |
6054 | BAR_CSTRORM_INTMEM + | 6829 | CSTORM_SP_STATUS_BLOCK_DATA_STATE_OFFSET(func), |
6055 | CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) + | 6830 | SB_DISABLED); |
6056 | pfunc_offset_sp + pfid_offset, | ||
6057 | HC_FUNCTION_DISABLED); | ||
6058 | |||
6059 | 6831 | ||
6060 | for (i = 0; i < XSTORM_SPQ_DATA_SIZE / 4; i++) | 6832 | for (i = 0; i < XSTORM_SPQ_DATA_SIZE / 4; i++) |
6061 | REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_DATA_OFFSET(func), | 6833 | REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_DATA_OFFSET(func), |
@@ -6089,7 +6861,7 @@ static void bnx2x_reset_func(struct bnx2x *bp) | |||
6089 | /* Timers workaround bug for E2: if this is vnic-3, | 6861 | /* Timers workaround bug for E2: if this is vnic-3, |
6090 | * we need to set the entire ilt range for this timers. | 6862 | * we need to set the entire ilt range for this timers. |
6091 | */ | 6863 | */ |
6092 | if (CHIP_IS_E2(bp) && BP_VN(bp) == 3) { | 6864 | if (!CHIP_IS_E1x(bp) && BP_VN(bp) == 3) { |
6093 | struct ilt_client_info ilt_cli; | 6865 | struct ilt_client_info ilt_cli; |
6094 | /* use dummy TM client */ | 6866 | /* use dummy TM client */ |
6095 | memset(&ilt_cli, 0, sizeof(struct ilt_client_info)); | 6867 | memset(&ilt_cli, 0, sizeof(struct ilt_client_info)); |
@@ -6101,7 +6873,7 @@ static void bnx2x_reset_func(struct bnx2x *bp) | |||
6101 | } | 6873 | } |
6102 | 6874 | ||
6103 | /* this assumes that reset_port() called before reset_func()*/ | 6875 | /* this assumes that reset_port() called before reset_func()*/ |
6104 | if (CHIP_IS_E2(bp)) | 6876 | if (!CHIP_IS_E1x(bp)) |
6105 | bnx2x_pf_disable(bp); | 6877 | bnx2x_pf_disable(bp); |
6106 | 6878 | ||
6107 | bp->dmae_ready = 0; | 6879 | bp->dmae_ready = 0; |
@@ -6112,6 +6884,9 @@ static void bnx2x_reset_port(struct bnx2x *bp) | |||
6112 | int port = BP_PORT(bp); | 6884 | int port = BP_PORT(bp); |
6113 | u32 val; | 6885 | u32 val; |
6114 | 6886 | ||
6887 | /* Reset physical Link */ | ||
6888 | bnx2x__link_reset(bp); | ||
6889 | |||
6115 | REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0); | 6890 | REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0); |
6116 | 6891 | ||
6117 | /* Do not rcv packets to BRB */ | 6892 | /* Do not rcv packets to BRB */ |
@@ -6133,92 +6908,66 @@ static void bnx2x_reset_port(struct bnx2x *bp) | |||
6133 | /* TODO: Close Doorbell port? */ | 6908 | /* TODO: Close Doorbell port? */ |
6134 | } | 6909 | } |
6135 | 6910 | ||
6136 | static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code) | 6911 | static inline int bnx2x_reset_hw(struct bnx2x *bp, u32 load_code) |
6137 | { | 6912 | { |
6138 | DP(BNX2X_MSG_MCP, "function %d reset_code %x\n", | 6913 | struct bnx2x_func_state_params func_params = {0}; |
6139 | BP_ABS_FUNC(bp), reset_code); | ||
6140 | 6914 | ||
6141 | switch (reset_code) { | 6915 | /* Prepare parameters for function state transitions */ |
6142 | case FW_MSG_CODE_DRV_UNLOAD_COMMON: | 6916 | __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags); |
6143 | bnx2x_reset_port(bp); | ||
6144 | bnx2x_reset_func(bp); | ||
6145 | bnx2x_reset_common(bp); | ||
6146 | break; | ||
6147 | |||
6148 | case FW_MSG_CODE_DRV_UNLOAD_PORT: | ||
6149 | bnx2x_reset_port(bp); | ||
6150 | bnx2x_reset_func(bp); | ||
6151 | break; | ||
6152 | |||
6153 | case FW_MSG_CODE_DRV_UNLOAD_FUNCTION: | ||
6154 | bnx2x_reset_func(bp); | ||
6155 | break; | ||
6156 | |||
6157 | default: | ||
6158 | BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code); | ||
6159 | break; | ||
6160 | } | ||
6161 | } | ||
6162 | 6917 | ||
6163 | #ifdef BCM_CNIC | 6918 | func_params.f_obj = &bp->func_obj; |
6164 | static inline void bnx2x_del_fcoe_eth_macs(struct bnx2x *bp) | 6919 | func_params.cmd = BNX2X_F_CMD_HW_RESET; |
6165 | { | ||
6166 | if (bp->flags & FCOE_MACS_SET) { | ||
6167 | if (!IS_MF_SD(bp)) | ||
6168 | bnx2x_set_fip_eth_mac_addr(bp, 0); | ||
6169 | 6920 | ||
6170 | bnx2x_set_all_enode_macs(bp, 0); | 6921 | func_params.params.hw_init.load_phase = load_code; |
6171 | 6922 | ||
6172 | bp->flags &= ~FCOE_MACS_SET; | 6923 | return bnx2x_func_state_change(bp, &func_params); |
6173 | } | ||
6174 | } | 6924 | } |
6175 | #endif | ||
6176 | 6925 | ||
6177 | void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode) | 6926 | static inline int bnx2x_func_stop(struct bnx2x *bp) |
6178 | { | 6927 | { |
6179 | int port = BP_PORT(bp); | 6928 | struct bnx2x_func_state_params func_params = {0}; |
6180 | u32 reset_code = 0; | 6929 | int rc; |
6181 | int i, cnt, rc; | ||
6182 | |||
6183 | /* Wait until tx fastpath tasks complete */ | ||
6184 | for_each_tx_queue(bp, i) { | ||
6185 | struct bnx2x_fastpath *fp = &bp->fp[i]; | ||
6186 | 6930 | ||
6187 | cnt = 1000; | 6931 | /* Prepare parameters for function state transitions */ |
6188 | while (bnx2x_has_tx_work_unload(fp)) { | 6932 | __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags); |
6933 | func_params.f_obj = &bp->func_obj; | ||
6934 | func_params.cmd = BNX2X_F_CMD_STOP; | ||
6189 | 6935 | ||
6190 | if (!cnt) { | 6936 | /* |
6191 | BNX2X_ERR("timeout waiting for queue[%d]\n", | 6937 | * Try to stop the function the 'good way'. If fails (in case |
6192 | i); | 6938 | * of a parity error during bnx2x_chip_cleanup()) and we are |
6939 | * not in a debug mode, perform a state transaction in order to | ||
6940 | * enable further HW_RESET transaction. | ||
6941 | */ | ||
6942 | rc = bnx2x_func_state_change(bp, &func_params); | ||
6943 | if (rc) { | ||
6193 | #ifdef BNX2X_STOP_ON_ERROR | 6944 | #ifdef BNX2X_STOP_ON_ERROR |
6194 | bnx2x_panic(); | 6945 | return rc; |
6195 | return -EBUSY; | ||
6196 | #else | 6946 | #else |
6197 | break; | 6947 | BNX2X_ERR("FUNC_STOP ramrod failed. Running a dry " |
6948 | "transaction\n"); | ||
6949 | __set_bit(RAMROD_DRV_CLR_ONLY, &func_params.ramrod_flags); | ||
6950 | return bnx2x_func_state_change(bp, &func_params); | ||
6198 | #endif | 6951 | #endif |
6199 | } | ||
6200 | cnt--; | ||
6201 | msleep(1); | ||
6202 | } | ||
6203 | } | 6952 | } |
6204 | /* Give HW time to discard old tx messages */ | ||
6205 | msleep(1); | ||
6206 | |||
6207 | bnx2x_set_eth_mac(bp, 0); | ||
6208 | 6953 | ||
6209 | bnx2x_invalidate_uc_list(bp); | 6954 | return 0; |
6210 | 6955 | } | |
6211 | if (CHIP_IS_E1(bp)) | ||
6212 | bnx2x_invalidate_e1_mc_list(bp); | ||
6213 | else { | ||
6214 | bnx2x_invalidate_e1h_mc_list(bp); | ||
6215 | REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0); | ||
6216 | } | ||
6217 | 6956 | ||
6218 | #ifdef BCM_CNIC | 6957 | /** |
6219 | bnx2x_del_fcoe_eth_macs(bp); | 6958 | * bnx2x_send_unload_req - request unload mode from the MCP. |
6220 | #endif | 6959 | * |
6960 | * @bp: driver handle | ||
6961 | * @unload_mode: requested function's unload mode | ||
6962 | * | ||
6963 | * Return unload mode returned by the MCP: COMMON, PORT or FUNC. | ||
6964 | */ | ||
6965 | u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode) | ||
6966 | { | ||
6967 | u32 reset_code = 0; | ||
6968 | int port = BP_PORT(bp); | ||
6221 | 6969 | ||
6970 | /* Select the UNLOAD request mode */ | ||
6222 | if (unload_mode == UNLOAD_NORMAL) | 6971 | if (unload_mode == UNLOAD_NORMAL) |
6223 | reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; | 6972 | reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; |
6224 | 6973 | ||
@@ -6245,54 +6994,135 @@ void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode) | |||
6245 | } else | 6994 | } else |
6246 | reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; | 6995 | reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; |
6247 | 6996 | ||
6997 | /* Send the request to the MCP */ | ||
6998 | if (!BP_NOMCP(bp)) | ||
6999 | reset_code = bnx2x_fw_command(bp, reset_code, 0); | ||
7000 | else { | ||
7001 | int path = BP_PATH(bp); | ||
7002 | |||
7003 | DP(NETIF_MSG_IFDOWN, "NO MCP - load counts[%d] " | ||
7004 | "%d, %d, %d\n", | ||
7005 | path, load_count[path][0], load_count[path][1], | ||
7006 | load_count[path][2]); | ||
7007 | load_count[path][0]--; | ||
7008 | load_count[path][1 + port]--; | ||
7009 | DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts[%d] " | ||
7010 | "%d, %d, %d\n", | ||
7011 | path, load_count[path][0], load_count[path][1], | ||
7012 | load_count[path][2]); | ||
7013 | if (load_count[path][0] == 0) | ||
7014 | reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON; | ||
7015 | else if (load_count[path][1 + port] == 0) | ||
7016 | reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT; | ||
7017 | else | ||
7018 | reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION; | ||
7019 | } | ||
7020 | |||
7021 | return reset_code; | ||
7022 | } | ||
7023 | |||
7024 | /** | ||
7025 | * bnx2x_send_unload_done - send UNLOAD_DONE command to the MCP. | ||
7026 | * | ||
7027 | * @bp: driver handle | ||
7028 | */ | ||
7029 | void bnx2x_send_unload_done(struct bnx2x *bp) | ||
7030 | { | ||
7031 | /* Report UNLOAD_DONE to MCP */ | ||
7032 | if (!BP_NOMCP(bp)) | ||
7033 | bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0); | ||
7034 | } | ||
7035 | |||
7036 | void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode) | ||
7037 | { | ||
7038 | int port = BP_PORT(bp); | ||
7039 | int i, rc; | ||
7040 | struct bnx2x_mcast_ramrod_params rparam = {0}; | ||
7041 | u32 reset_code; | ||
7042 | |||
7043 | /* Wait until tx fastpath tasks complete */ | ||
7044 | for_each_tx_queue(bp, i) { | ||
7045 | struct bnx2x_fastpath *fp = &bp->fp[i]; | ||
7046 | |||
7047 | rc = bnx2x_clean_tx_queue(bp, fp); | ||
7048 | #ifdef BNX2X_STOP_ON_ERROR | ||
7049 | if (rc) | ||
7050 | return; | ||
7051 | #endif | ||
7052 | } | ||
7053 | |||
7054 | /* Give HW time to discard old tx messages */ | ||
7055 | usleep_range(1000, 1000); | ||
7056 | |||
7057 | /* Clean all ETH MACs */ | ||
7058 | rc = bnx2x_del_all_macs(bp, &bp->fp[0].mac_obj, BNX2X_ETH_MAC, false); | ||
7059 | if (rc < 0) | ||
7060 | BNX2X_ERR("Failed to delete all ETH macs: %d\n", rc); | ||
7061 | |||
7062 | /* Clean up UC list */ | ||
7063 | rc = bnx2x_del_all_macs(bp, &bp->fp[0].mac_obj, BNX2X_UC_LIST_MAC, | ||
7064 | true); | ||
7065 | if (rc < 0) | ||
7066 | BNX2X_ERR("Failed to schedule DEL commands for UC MACs list: " | ||
7067 | "%d\n", rc); | ||
7068 | |||
7069 | /* Disable LLH */ | ||
7070 | if (!CHIP_IS_E1(bp)) | ||
7071 | REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0); | ||
7072 | |||
7073 | /* Set "drop all" (stop Rx). | ||
7074 | * We need to take a netif_addr_lock() here in order to prevent | ||
7075 | * a race between the completion code and this code. | ||
7076 | */ | ||
7077 | netif_addr_lock_bh(bp->dev); | ||
7078 | /* Schedule the rx_mode command */ | ||
7079 | if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state)) | ||
7080 | set_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state); | ||
7081 | else | ||
7082 | bnx2x_set_storm_rx_mode(bp); | ||
7083 | |||
7084 | /* Cleanup multicast configuration */ | ||
7085 | rparam.mcast_obj = &bp->mcast_obj; | ||
7086 | rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL); | ||
7087 | if (rc < 0) | ||
7088 | BNX2X_ERR("Failed to send DEL multicast command: %d\n", rc); | ||
7089 | |||
7090 | netif_addr_unlock_bh(bp->dev); | ||
7091 | |||
7092 | |||
6248 | /* Close multi and leading connections | 7093 | /* Close multi and leading connections |
6249 | Completions for ramrods are collected in a synchronous way */ | 7094 | * Completions for ramrods are collected in a synchronous way |
7095 | */ | ||
6250 | for_each_queue(bp, i) | 7096 | for_each_queue(bp, i) |
6251 | 7097 | if (bnx2x_stop_queue(bp, i)) | |
6252 | if (bnx2x_stop_client(bp, i)) | ||
6253 | #ifdef BNX2X_STOP_ON_ERROR | 7098 | #ifdef BNX2X_STOP_ON_ERROR |
6254 | return; | 7099 | return; |
6255 | #else | 7100 | #else |
6256 | goto unload_error; | 7101 | goto unload_error; |
6257 | #endif | 7102 | #endif |
7103 | /* If SP settings didn't get completed so far - something | ||
7104 | * very wrong has happen. | ||
7105 | */ | ||
7106 | if (!bnx2x_wait_sp_comp(bp, ~0x0UL)) | ||
7107 | BNX2X_ERR("Hmmm... Common slow path ramrods got stuck!\n"); | ||
6258 | 7108 | ||
7109 | #ifndef BNX2X_STOP_ON_ERROR | ||
7110 | unload_error: | ||
7111 | #endif | ||
6259 | rc = bnx2x_func_stop(bp); | 7112 | rc = bnx2x_func_stop(bp); |
6260 | if (rc) { | 7113 | if (rc) { |
6261 | BNX2X_ERR("Function stop failed!\n"); | 7114 | BNX2X_ERR("Function stop failed!\n"); |
6262 | #ifdef BNX2X_STOP_ON_ERROR | 7115 | #ifdef BNX2X_STOP_ON_ERROR |
6263 | return; | 7116 | return; |
6264 | #else | ||
6265 | goto unload_error; | ||
6266 | #endif | ||
6267 | } | ||
6268 | #ifndef BNX2X_STOP_ON_ERROR | ||
6269 | unload_error: | ||
6270 | #endif | 7117 | #endif |
6271 | if (!BP_NOMCP(bp)) | ||
6272 | reset_code = bnx2x_fw_command(bp, reset_code, 0); | ||
6273 | else { | ||
6274 | DP(NETIF_MSG_IFDOWN, "NO MCP - load counts[%d] " | ||
6275 | "%d, %d, %d\n", BP_PATH(bp), | ||
6276 | load_count[BP_PATH(bp)][0], | ||
6277 | load_count[BP_PATH(bp)][1], | ||
6278 | load_count[BP_PATH(bp)][2]); | ||
6279 | load_count[BP_PATH(bp)][0]--; | ||
6280 | load_count[BP_PATH(bp)][1 + port]--; | ||
6281 | DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts[%d] " | ||
6282 | "%d, %d, %d\n", BP_PATH(bp), | ||
6283 | load_count[BP_PATH(bp)][0], load_count[BP_PATH(bp)][1], | ||
6284 | load_count[BP_PATH(bp)][2]); | ||
6285 | if (load_count[BP_PATH(bp)][0] == 0) | ||
6286 | reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON; | ||
6287 | else if (load_count[BP_PATH(bp)][1 + port] == 0) | ||
6288 | reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT; | ||
6289 | else | ||
6290 | reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION; | ||
6291 | } | 7118 | } |
6292 | 7119 | ||
6293 | if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) || | 7120 | /* |
6294 | (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT)) | 7121 | * Send the UNLOAD_REQUEST to the MCP. This will return if |
6295 | bnx2x__link_reset(bp); | 7122 | * this function should perform FUNC, PORT or COMMON HW |
7123 | * reset. | ||
7124 | */ | ||
7125 | reset_code = bnx2x_send_unload_req(bp, unload_mode); | ||
6296 | 7126 | ||
6297 | /* Disable HW interrupts, NAPI */ | 7127 | /* Disable HW interrupts, NAPI */ |
6298 | bnx2x_netif_stop(bp, 1); | 7128 | bnx2x_netif_stop(bp, 1); |
@@ -6301,12 +7131,13 @@ unload_error: | |||
6301 | bnx2x_free_irq(bp); | 7131 | bnx2x_free_irq(bp); |
6302 | 7132 | ||
6303 | /* Reset the chip */ | 7133 | /* Reset the chip */ |
6304 | bnx2x_reset_chip(bp, reset_code); | 7134 | rc = bnx2x_reset_hw(bp, reset_code); |
7135 | if (rc) | ||
7136 | BNX2X_ERR("HW_RESET failed\n"); | ||
6305 | 7137 | ||
6306 | /* Report UNLOAD_DONE to MCP */ | ||
6307 | if (!BP_NOMCP(bp)) | ||
6308 | bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0); | ||
6309 | 7138 | ||
7139 | /* Report UNLOAD_DONE to MCP */ | ||
7140 | bnx2x_send_unload_done(bp); | ||
6310 | } | 7141 | } |
6311 | 7142 | ||
6312 | void bnx2x_disable_close_the_gate(struct bnx2x *bp) | 7143 | void bnx2x_disable_close_the_gate(struct bnx2x *bp) |
@@ -6323,7 +7154,7 @@ void bnx2x_disable_close_the_gate(struct bnx2x *bp) | |||
6323 | val = REG_RD(bp, addr); | 7154 | val = REG_RD(bp, addr); |
6324 | val &= ~(0x300); | 7155 | val &= ~(0x300); |
6325 | REG_WR(bp, addr, val); | 7156 | REG_WR(bp, addr, val); |
6326 | } else if (CHIP_IS_E1H(bp)) { | 7157 | } else { |
6327 | val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK); | 7158 | val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK); |
6328 | val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK | | 7159 | val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK | |
6329 | MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK); | 7160 | MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK); |
@@ -6820,8 +7651,8 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp) | |||
6820 | u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; | 7651 | u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; |
6821 | /* save our pf_num */ | 7652 | /* save our pf_num */ |
6822 | int orig_pf_num = bp->pf_num; | 7653 | int orig_pf_num = bp->pf_num; |
6823 | u32 swap_en; | 7654 | int port; |
6824 | u32 swap_val; | 7655 | u32 swap_en, swap_val, value; |
6825 | 7656 | ||
6826 | /* clear the UNDI indication */ | 7657 | /* clear the UNDI indication */ |
6827 | REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0); | 7658 | REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0); |
@@ -6856,21 +7687,19 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp) | |||
6856 | bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI); | 7687 | bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI); |
6857 | 7688 | ||
6858 | bnx2x_undi_int_disable(bp); | 7689 | bnx2x_undi_int_disable(bp); |
7690 | port = BP_PORT(bp); | ||
6859 | 7691 | ||
6860 | /* close input traffic and wait for it */ | 7692 | /* close input traffic and wait for it */ |
6861 | /* Do not rcv packets to BRB */ | 7693 | /* Do not rcv packets to BRB */ |
6862 | REG_WR(bp, | 7694 | REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_DRV_MASK : |
6863 | (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK : | 7695 | NIG_REG_LLH0_BRB1_DRV_MASK), 0x0); |
6864 | NIG_REG_LLH0_BRB1_DRV_MASK), 0x0); | ||
6865 | /* Do not direct rcv packets that are not for MCP to | 7696 | /* Do not direct rcv packets that are not for MCP to |
6866 | * the BRB */ | 7697 | * the BRB */ |
6867 | REG_WR(bp, | 7698 | REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP : |
6868 | (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP : | 7699 | NIG_REG_LLH0_BRB1_NOT_MCP), 0x0); |
6869 | NIG_REG_LLH0_BRB1_NOT_MCP), 0x0); | ||
6870 | /* clear AEU */ | 7700 | /* clear AEU */ |
6871 | REG_WR(bp, | 7701 | REG_WR(bp, (port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 : |
6872 | (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 : | 7702 | MISC_REG_AEU_MASK_ATTN_FUNC_0), 0); |
6873 | MISC_REG_AEU_MASK_ATTN_FUNC_0), 0); | ||
6874 | msleep(10); | 7703 | msleep(10); |
6875 | 7704 | ||
6876 | /* save NIG port swap info */ | 7705 | /* save NIG port swap info */ |
@@ -6880,9 +7709,17 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp) | |||
6880 | REG_WR(bp, | 7709 | REG_WR(bp, |
6881 | GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, | 7710 | GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, |
6882 | 0xd3ffffff); | 7711 | 0xd3ffffff); |
7712 | |||
7713 | value = 0x1400; | ||
7714 | if (CHIP_IS_E3(bp)) { | ||
7715 | value |= MISC_REGISTERS_RESET_REG_2_MSTAT0; | ||
7716 | value |= MISC_REGISTERS_RESET_REG_2_MSTAT1; | ||
7717 | } | ||
7718 | |||
6883 | REG_WR(bp, | 7719 | REG_WR(bp, |
6884 | GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, | 7720 | GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, |
6885 | 0x1403); | 7721 | value); |
7722 | |||
6886 | /* take the NIG out of reset and restore swap values */ | 7723 | /* take the NIG out of reset and restore swap values */ |
6887 | REG_WR(bp, | 7724 | REG_WR(bp, |
6888 | GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, | 7725 | GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, |
@@ -6923,7 +7760,7 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp) | |||
6923 | /* Set doorbell size */ | 7760 | /* Set doorbell size */ |
6924 | bp->db_size = (1 << BNX2X_DB_SHIFT); | 7761 | bp->db_size = (1 << BNX2X_DB_SHIFT); |
6925 | 7762 | ||
6926 | if (CHIP_IS_E2(bp)) { | 7763 | if (!CHIP_IS_E1x(bp)) { |
6927 | val = REG_RD(bp, MISC_REG_PORT4MODE_EN_OVWR); | 7764 | val = REG_RD(bp, MISC_REG_PORT4MODE_EN_OVWR); |
6928 | if ((val & 1) == 0) | 7765 | if ((val & 1) == 0) |
6929 | val = REG_RD(bp, MISC_REG_PORT4MODE_EN); | 7766 | val = REG_RD(bp, MISC_REG_PORT4MODE_EN); |
@@ -6943,16 +7780,6 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp) | |||
6943 | bp->pfid = bp->pf_num; /* 0..7 */ | 7780 | bp->pfid = bp->pf_num; /* 0..7 */ |
6944 | } | 7781 | } |
6945 | 7782 | ||
6946 | /* | ||
6947 | * set base FW non-default (fast path) status block id, this value is | ||
6948 | * used to initialize the fw_sb_id saved on the fp/queue structure to | ||
6949 | * determine the id used by the FW. | ||
6950 | */ | ||
6951 | if (CHIP_IS_E1x(bp)) | ||
6952 | bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E1x; | ||
6953 | else /* E2 */ | ||
6954 | bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E2; | ||
6955 | |||
6956 | bp->link_params.chip_id = bp->common.chip_id; | 7783 | bp->link_params.chip_id = bp->common.chip_id; |
6957 | BNX2X_DEV_INFO("chip ID is 0x%x\n", id); | 7784 | BNX2X_DEV_INFO("chip ID is 0x%x\n", id); |
6958 | 7785 | ||
@@ -6971,6 +7798,8 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp) | |||
6971 | 7798 | ||
6972 | bnx2x_init_shmem(bp); | 7799 | bnx2x_init_shmem(bp); |
6973 | 7800 | ||
7801 | |||
7802 | |||
6974 | bp->common.shmem2_base = REG_RD(bp, (BP_PATH(bp) ? | 7803 | bp->common.shmem2_base = REG_RD(bp, (BP_PATH(bp) ? |
6975 | MISC_REG_GENERIC_CR_1 : | 7804 | MISC_REG_GENERIC_CR_1 : |
6976 | MISC_REG_GENERIC_CR_0)); | 7805 | MISC_REG_GENERIC_CR_0)); |
@@ -7080,8 +7909,14 @@ static void __devinit bnx2x_get_igu_cam_info(struct bnx2x *bp) | |||
7080 | } | 7909 | } |
7081 | } | 7910 | } |
7082 | } | 7911 | } |
7083 | bp->igu_sb_cnt = min_t(u8, bp->igu_sb_cnt, | 7912 | |
7084 | NUM_IGU_SB_REQUIRED(bp->l2_cid_count)); | 7913 | /* It's expected that number of CAM entries for this |
7914 | * functions is equal to the MSI-X table size (which was a | ||
7915 | * used during bp->l2_cid_count value calculation. | ||
7916 | * We want a harsh warning if these values are different! | ||
7917 | */ | ||
7918 | WARN_ON(bp->igu_sb_cnt != NUM_IGU_SB_REQUIRED(bp->l2_cid_count)); | ||
7919 | |||
7085 | if (bp->igu_sb_cnt == 0) | 7920 | if (bp->igu_sb_cnt == 0) |
7086 | BNX2X_ERR("CAM configuration error\n"); | 7921 | BNX2X_ERR("CAM configuration error\n"); |
7087 | } | 7922 | } |
@@ -7130,24 +7965,25 @@ static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp, | |||
7130 | return; | 7965 | return; |
7131 | } | 7966 | } |
7132 | 7967 | ||
7133 | switch (switch_cfg) { | 7968 | if (CHIP_IS_E3(bp)) |
7134 | case SWITCH_CFG_1G: | 7969 | bp->port.phy_addr = REG_RD(bp, MISC_REG_WC0_CTRL_PHY_ADDR); |
7135 | bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR + | 7970 | else { |
7136 | port*0x10); | 7971 | switch (switch_cfg) { |
7137 | BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr); | 7972 | case SWITCH_CFG_1G: |
7138 | break; | 7973 | bp->port.phy_addr = REG_RD( |
7139 | 7974 | bp, NIG_REG_SERDES0_CTRL_PHY_ADDR + port*0x10); | |
7140 | case SWITCH_CFG_10G: | 7975 | break; |
7141 | bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR + | 7976 | case SWITCH_CFG_10G: |
7142 | port*0x18); | 7977 | bp->port.phy_addr = REG_RD( |
7143 | BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr); | 7978 | bp, NIG_REG_XGXS0_CTRL_PHY_ADDR + port*0x18); |
7144 | break; | 7979 | break; |
7145 | 7980 | default: | |
7146 | default: | 7981 | BNX2X_ERR("BAD switch_cfg link_config 0x%x\n", |
7147 | BNX2X_ERR("BAD switch_cfg link_config 0x%x\n", | 7982 | bp->port.link_config[0]); |
7148 | bp->port.link_config[0]); | 7983 | return; |
7149 | return; | 7984 | } |
7150 | } | 7985 | } |
7986 | BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr); | ||
7151 | /* mask what we support according to speed_cap_mask per configuration */ | 7987 | /* mask what we support according to speed_cap_mask per configuration */ |
7152 | for (idx = 0; idx < cfg_size; idx++) { | 7988 | for (idx = 0; idx < cfg_size; idx++) { |
7153 | if (!(bp->link_params.speed_cap_mask[idx] & | 7989 | if (!(bp->link_params.speed_cap_mask[idx] & |
@@ -7331,8 +8167,6 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp) | |||
7331 | break; | 8167 | break; |
7332 | 8168 | ||
7333 | case PORT_FEATURE_LINK_SPEED_10G_CX4: | 8169 | case PORT_FEATURE_LINK_SPEED_10G_CX4: |
7334 | case PORT_FEATURE_LINK_SPEED_10G_KX4: | ||
7335 | case PORT_FEATURE_LINK_SPEED_10G_KR: | ||
7336 | if (bp->port.supported[idx] & | 8170 | if (bp->port.supported[idx] & |
7337 | SUPPORTED_10000baseT_Full) { | 8171 | SUPPORTED_10000baseT_Full) { |
7338 | bp->link_params.req_line_speed[idx] = | 8172 | bp->link_params.req_line_speed[idx] = |
@@ -7503,6 +8337,9 @@ static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp) | |||
7503 | u8 *fip_mac = bp->fip_mac; | 8337 | u8 *fip_mac = bp->fip_mac; |
7504 | #endif | 8338 | #endif |
7505 | 8339 | ||
8340 | /* Zero primary MAC configuration */ | ||
8341 | memset(bp->dev->dev_addr, 0, ETH_ALEN); | ||
8342 | |||
7506 | if (BP_NOMCP(bp)) { | 8343 | if (BP_NOMCP(bp)) { |
7507 | BNX2X_ERROR("warning: random MAC workaround active\n"); | 8344 | BNX2X_ERROR("warning: random MAC workaround active\n"); |
7508 | random_ether_addr(bp->dev->dev_addr); | 8345 | random_ether_addr(bp->dev->dev_addr); |
@@ -7524,9 +8361,10 @@ static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp) | |||
7524 | iscsi_mac_addr_upper); | 8361 | iscsi_mac_addr_upper); |
7525 | val = MF_CFG_RD(bp, func_ext_config[func]. | 8362 | val = MF_CFG_RD(bp, func_ext_config[func]. |
7526 | iscsi_mac_addr_lower); | 8363 | iscsi_mac_addr_lower); |
7527 | BNX2X_DEV_INFO("Read iSCSI MAC: " | ||
7528 | "0x%x:0x%04x\n", val2, val); | ||
7529 | bnx2x_set_mac_buf(iscsi_mac, val, val2); | 8364 | bnx2x_set_mac_buf(iscsi_mac, val, val2); |
8365 | BNX2X_DEV_INFO("Read iSCSI MAC: " | ||
8366 | BNX2X_MAC_FMT"\n", | ||
8367 | BNX2X_MAC_PRN_LIST(iscsi_mac)); | ||
7530 | } else | 8368 | } else |
7531 | bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG; | 8369 | bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG; |
7532 | 8370 | ||
@@ -7535,9 +8373,10 @@ static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp) | |||
7535 | fcoe_mac_addr_upper); | 8373 | fcoe_mac_addr_upper); |
7536 | val = MF_CFG_RD(bp, func_ext_config[func]. | 8374 | val = MF_CFG_RD(bp, func_ext_config[func]. |
7537 | fcoe_mac_addr_lower); | 8375 | fcoe_mac_addr_lower); |
7538 | BNX2X_DEV_INFO("Read FCoE MAC to " | ||
7539 | "0x%x:0x%04x\n", val2, val); | ||
7540 | bnx2x_set_mac_buf(fip_mac, val, val2); | 8376 | bnx2x_set_mac_buf(fip_mac, val, val2); |
8377 | BNX2X_DEV_INFO("Read FCoE L2 MAC to " | ||
8378 | BNX2X_MAC_FMT"\n", | ||
8379 | BNX2X_MAC_PRN_LIST(fip_mac)); | ||
7541 | 8380 | ||
7542 | } else | 8381 | } else |
7543 | bp->flags |= NO_FCOE_FLAG; | 8382 | bp->flags |= NO_FCOE_FLAG; |
@@ -7586,6 +8425,13 @@ static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp) | |||
7586 | memset(bp->fip_mac, 0, ETH_ALEN); | 8425 | memset(bp->fip_mac, 0, ETH_ALEN); |
7587 | } | 8426 | } |
7588 | #endif | 8427 | #endif |
8428 | |||
8429 | if (!is_valid_ether_addr(bp->dev->dev_addr)) | ||
8430 | dev_err(&bp->pdev->dev, | ||
8431 | "bad Ethernet MAC address configuration: " | ||
8432 | BNX2X_MAC_FMT", change it manually before bringing up " | ||
8433 | "the appropriate network interface\n", | ||
8434 | BNX2X_MAC_PRN_LIST(bp->dev->dev_addr)); | ||
7589 | } | 8435 | } |
7590 | 8436 | ||
7591 | static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp) | 8437 | static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp) |
@@ -7607,17 +8453,55 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp) | |||
7607 | } else { | 8453 | } else { |
7608 | bp->common.int_block = INT_BLOCK_IGU; | 8454 | bp->common.int_block = INT_BLOCK_IGU; |
7609 | val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION); | 8455 | val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION); |
8456 | |||
8457 | if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) { | ||
8458 | int tout = 5000; | ||
8459 | |||
8460 | BNX2X_DEV_INFO("FORCING Normal Mode\n"); | ||
8461 | |||
8462 | val &= ~(IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN); | ||
8463 | REG_WR(bp, IGU_REG_BLOCK_CONFIGURATION, val); | ||
8464 | REG_WR(bp, IGU_REG_RESET_MEMORIES, 0x7f); | ||
8465 | |||
8466 | while (tout && REG_RD(bp, IGU_REG_RESET_MEMORIES)) { | ||
8467 | tout--; | ||
8468 | usleep_range(1000, 1000); | ||
8469 | } | ||
8470 | |||
8471 | if (REG_RD(bp, IGU_REG_RESET_MEMORIES)) { | ||
8472 | dev_err(&bp->pdev->dev, | ||
8473 | "FORCING Normal Mode failed!!!\n"); | ||
8474 | return -EPERM; | ||
8475 | } | ||
8476 | } | ||
8477 | |||
7610 | if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) { | 8478 | if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) { |
7611 | DP(NETIF_MSG_PROBE, "IGU Backward Compatible Mode\n"); | 8479 | BNX2X_DEV_INFO("IGU Backward Compatible Mode\n"); |
7612 | bp->common.int_block |= INT_BLOCK_MODE_BW_COMP; | 8480 | bp->common.int_block |= INT_BLOCK_MODE_BW_COMP; |
7613 | } else | 8481 | } else |
7614 | DP(NETIF_MSG_PROBE, "IGU Normal Mode\n"); | 8482 | BNX2X_DEV_INFO("IGU Normal Mode\n"); |
7615 | 8483 | ||
7616 | bnx2x_get_igu_cam_info(bp); | 8484 | bnx2x_get_igu_cam_info(bp); |
7617 | 8485 | ||
7618 | } | 8486 | } |
7619 | DP(NETIF_MSG_PROBE, "igu_dsb_id %d igu_base_sb %d igu_sb_cnt %d\n", | 8487 | |
7620 | bp->igu_dsb_id, bp->igu_base_sb, bp->igu_sb_cnt); | 8488 | /* |
8489 | * set base FW non-default (fast path) status block id, this value is | ||
8490 | * used to initialize the fw_sb_id saved on the fp/queue structure to | ||
8491 | * determine the id used by the FW. | ||
8492 | */ | ||
8493 | if (CHIP_IS_E1x(bp)) | ||
8494 | bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E1x + BP_L_ID(bp); | ||
8495 | else /* | ||
8496 | * 57712 - we currently use one FW SB per IGU SB (Rx and Tx of | ||
8497 | * the same queue are indicated on the same IGU SB). So we prefer | ||
8498 | * FW and IGU SBs to be the same value. | ||
8499 | */ | ||
8500 | bp->base_fw_ndsb = bp->igu_base_sb; | ||
8501 | |||
8502 | BNX2X_DEV_INFO("igu_dsb_id %d igu_base_sb %d igu_sb_cnt %d\n" | ||
8503 | "base_fw_ndsb %d\n", bp->igu_dsb_id, bp->igu_base_sb, | ||
8504 | bp->igu_sb_cnt, bp->base_fw_ndsb); | ||
7621 | 8505 | ||
7622 | /* | 8506 | /* |
7623 | * Initialize MF configuration | 8507 | * Initialize MF configuration |
@@ -7628,10 +8512,10 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp) | |||
7628 | vn = BP_E1HVN(bp); | 8512 | vn = BP_E1HVN(bp); |
7629 | 8513 | ||
7630 | if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) { | 8514 | if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) { |
7631 | DP(NETIF_MSG_PROBE, | 8515 | BNX2X_DEV_INFO("shmem2base 0x%x, size %d, mfcfg offset %d\n", |
7632 | "shmem2base 0x%x, size %d, mfcfg offset %d\n", | 8516 | bp->common.shmem2_base, SHMEM2_RD(bp, size), |
7633 | bp->common.shmem2_base, SHMEM2_RD(bp, size), | 8517 | (u32)offsetof(struct shmem2_region, mf_cfg_addr)); |
7634 | (u32)offsetof(struct shmem2_region, mf_cfg_addr)); | 8518 | |
7635 | if (SHMEM2_HAS(bp, mf_cfg_addr)) | 8519 | if (SHMEM2_HAS(bp, mf_cfg_addr)) |
7636 | bp->common.mf_cfg_base = SHMEM2_RD(bp, mf_cfg_addr); | 8520 | bp->common.mf_cfg_base = SHMEM2_RD(bp, mf_cfg_addr); |
7637 | else | 8521 | else |
@@ -7662,8 +8546,8 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp) | |||
7662 | bp->mf_config[vn] = MF_CFG_RD(bp, | 8546 | bp->mf_config[vn] = MF_CFG_RD(bp, |
7663 | func_mf_config[func].config); | 8547 | func_mf_config[func].config); |
7664 | } else | 8548 | } else |
7665 | DP(NETIF_MSG_PROBE, "illegal MAC " | 8549 | BNX2X_DEV_INFO("illegal MAC address " |
7666 | "address for SI\n"); | 8550 | "for SI\n"); |
7667 | break; | 8551 | break; |
7668 | case SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED: | 8552 | case SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED: |
7669 | /* get OV configuration */ | 8553 | /* get OV configuration */ |
@@ -7696,13 +8580,16 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp) | |||
7696 | FUNC_MF_CFG_E1HOV_TAG_MASK; | 8580 | FUNC_MF_CFG_E1HOV_TAG_MASK; |
7697 | if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) { | 8581 | if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) { |
7698 | bp->mf_ov = val; | 8582 | bp->mf_ov = val; |
7699 | BNX2X_DEV_INFO("MF OV for func %d is %d" | 8583 | bp->path_has_ovlan = true; |
7700 | " (0x%04x)\n", func, | 8584 | |
7701 | bp->mf_ov, bp->mf_ov); | 8585 | BNX2X_DEV_INFO("MF OV for func %d is %d " |
8586 | "(0x%04x)\n", func, bp->mf_ov, | ||
8587 | bp->mf_ov); | ||
7702 | } else { | 8588 | } else { |
7703 | BNX2X_ERR("No valid MF OV for func %d," | 8589 | dev_err(&bp->pdev->dev, |
7704 | " aborting\n", func); | 8590 | "No valid MF OV for func %d, " |
7705 | rc = -EPERM; | 8591 | "aborting\n", func); |
8592 | return -EPERM; | ||
7706 | } | 8593 | } |
7707 | break; | 8594 | break; |
7708 | case MULTI_FUNCTION_SI: | 8595 | case MULTI_FUNCTION_SI: |
@@ -7711,31 +8598,40 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp) | |||
7711 | break; | 8598 | break; |
7712 | default: | 8599 | default: |
7713 | if (vn) { | 8600 | if (vn) { |
7714 | BNX2X_ERR("VN %d in single function mode," | 8601 | dev_err(&bp->pdev->dev, |
7715 | " aborting\n", vn); | 8602 | "VN %d is in a single function mode, " |
7716 | rc = -EPERM; | 8603 | "aborting\n", vn); |
8604 | return -EPERM; | ||
7717 | } | 8605 | } |
7718 | break; | 8606 | break; |
7719 | } | 8607 | } |
7720 | 8608 | ||
8609 | /* check if other port on the path needs ovlan: | ||
8610 | * Since MF configuration is shared between ports | ||
8611 | * Possible mixed modes are only | ||
8612 | * {SF, SI} {SF, SD} {SD, SF} {SI, SF} | ||
8613 | */ | ||
8614 | if (CHIP_MODE_IS_4_PORT(bp) && | ||
8615 | !bp->path_has_ovlan && | ||
8616 | !IS_MF(bp) && | ||
8617 | bp->common.mf_cfg_base != SHMEM_MF_CFG_ADDR_NONE) { | ||
8618 | u8 other_port = !BP_PORT(bp); | ||
8619 | u8 other_func = BP_PATH(bp) + 2*other_port; | ||
8620 | val = MF_CFG_RD(bp, | ||
8621 | func_mf_config[other_func].e1hov_tag); | ||
8622 | if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) | ||
8623 | bp->path_has_ovlan = true; | ||
8624 | } | ||
7721 | } | 8625 | } |
7722 | 8626 | ||
7723 | /* adjust igu_sb_cnt to MF for E1x */ | 8627 | /* adjust igu_sb_cnt to MF for E1x */ |
7724 | if (CHIP_IS_E1x(bp) && IS_MF(bp)) | 8628 | if (CHIP_IS_E1x(bp) && IS_MF(bp)) |
7725 | bp->igu_sb_cnt /= E1HVN_MAX; | 8629 | bp->igu_sb_cnt /= E1HVN_MAX; |
7726 | 8630 | ||
7727 | /* | 8631 | /* port info */ |
7728 | * adjust E2 sb count: to be removed when FW will support | 8632 | bnx2x_get_port_hwinfo(bp); |
7729 | * more then 16 L2 clients | ||
7730 | */ | ||
7731 | #define MAX_L2_CLIENTS 16 | ||
7732 | if (CHIP_IS_E2(bp)) | ||
7733 | bp->igu_sb_cnt = min_t(u8, bp->igu_sb_cnt, | ||
7734 | MAX_L2_CLIENTS / (IS_MF(bp) ? 4 : 1)); | ||
7735 | 8633 | ||
7736 | if (!BP_NOMCP(bp)) { | 8634 | if (!BP_NOMCP(bp)) { |
7737 | bnx2x_get_port_hwinfo(bp); | ||
7738 | |||
7739 | bp->fw_seq = | 8635 | bp->fw_seq = |
7740 | (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) & | 8636 | (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) & |
7741 | DRV_MSG_SEQ_NUMBER_MASK); | 8637 | DRV_MSG_SEQ_NUMBER_MASK); |
@@ -7749,6 +8645,16 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp) | |||
7749 | bnx2x_get_cnic_info(bp); | 8645 | bnx2x_get_cnic_info(bp); |
7750 | #endif | 8646 | #endif |
7751 | 8647 | ||
8648 | /* Get current FW pulse sequence */ | ||
8649 | if (!BP_NOMCP(bp)) { | ||
8650 | int mb_idx = BP_FW_MB_IDX(bp); | ||
8651 | |||
8652 | bp->fw_drv_pulse_wr_seq = | ||
8653 | (SHMEM_RD(bp, func_mb[mb_idx].drv_pulse_mb) & | ||
8654 | DRV_PULSE_SEQ_MASK); | ||
8655 | BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq); | ||
8656 | } | ||
8657 | |||
7752 | return rc; | 8658 | return rc; |
7753 | } | 8659 | } |
7754 | 8660 | ||
@@ -7816,6 +8722,55 @@ out_not_found: | |||
7816 | return; | 8722 | return; |
7817 | } | 8723 | } |
7818 | 8724 | ||
8725 | static void __devinit bnx2x_set_modes_bitmap(struct bnx2x *bp) | ||
8726 | { | ||
8727 | u32 flags = 0; | ||
8728 | |||
8729 | if (CHIP_REV_IS_FPGA(bp)) | ||
8730 | SET_FLAGS(flags, MODE_FPGA); | ||
8731 | else if (CHIP_REV_IS_EMUL(bp)) | ||
8732 | SET_FLAGS(flags, MODE_EMUL); | ||
8733 | else | ||
8734 | SET_FLAGS(flags, MODE_ASIC); | ||
8735 | |||
8736 | if (CHIP_MODE_IS_4_PORT(bp)) | ||
8737 | SET_FLAGS(flags, MODE_PORT4); | ||
8738 | else | ||
8739 | SET_FLAGS(flags, MODE_PORT2); | ||
8740 | |||
8741 | if (CHIP_IS_E2(bp)) | ||
8742 | SET_FLAGS(flags, MODE_E2); | ||
8743 | else if (CHIP_IS_E3(bp)) { | ||
8744 | SET_FLAGS(flags, MODE_E3); | ||
8745 | if (CHIP_REV(bp) == CHIP_REV_Ax) | ||
8746 | SET_FLAGS(flags, MODE_E3_A0); | ||
8747 | else {/*if (CHIP_REV(bp) == CHIP_REV_Bx)*/ | ||
8748 | SET_FLAGS(flags, MODE_E3_B0); | ||
8749 | SET_FLAGS(flags, MODE_COS_BC); | ||
8750 | } | ||
8751 | } | ||
8752 | |||
8753 | if (IS_MF(bp)) { | ||
8754 | SET_FLAGS(flags, MODE_MF); | ||
8755 | switch (bp->mf_mode) { | ||
8756 | case MULTI_FUNCTION_SD: | ||
8757 | SET_FLAGS(flags, MODE_MF_SD); | ||
8758 | break; | ||
8759 | case MULTI_FUNCTION_SI: | ||
8760 | SET_FLAGS(flags, MODE_MF_SI); | ||
8761 | break; | ||
8762 | } | ||
8763 | } else | ||
8764 | SET_FLAGS(flags, MODE_SF); | ||
8765 | |||
8766 | #if defined(__LITTLE_ENDIAN) | ||
8767 | SET_FLAGS(flags, MODE_LITTLE_ENDIAN); | ||
8768 | #else /*(__BIG_ENDIAN)*/ | ||
8769 | SET_FLAGS(flags, MODE_BIG_ENDIAN); | ||
8770 | #endif | ||
8771 | INIT_MODE_FLAGS(bp) = flags; | ||
8772 | } | ||
8773 | |||
7819 | static int __devinit bnx2x_init_bp(struct bnx2x *bp) | 8774 | static int __devinit bnx2x_init_bp(struct bnx2x *bp) |
7820 | { | 8775 | { |
7821 | int func; | 8776 | int func; |
@@ -7833,9 +8788,14 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp) | |||
7833 | INIT_DELAYED_WORK(&bp->reset_task, bnx2x_reset_task); | 8788 | INIT_DELAYED_WORK(&bp->reset_task, bnx2x_reset_task); |
7834 | 8789 | ||
7835 | rc = bnx2x_get_hwinfo(bp); | 8790 | rc = bnx2x_get_hwinfo(bp); |
8791 | if (rc) | ||
8792 | return rc; | ||
7836 | 8793 | ||
7837 | if (!rc) | 8794 | bnx2x_set_modes_bitmap(bp); |
7838 | rc = bnx2x_alloc_mem_bp(bp); | 8795 | |
8796 | rc = bnx2x_alloc_mem_bp(bp); | ||
8797 | if (rc) | ||
8798 | return rc; | ||
7839 | 8799 | ||
7840 | bnx2x_read_fwinfo(bp); | 8800 | bnx2x_read_fwinfo(bp); |
7841 | 8801 | ||
@@ -7888,6 +8848,13 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp) | |||
7888 | bnx2x_dcbx_set_state(bp, true, BNX2X_DCBX_ENABLED_ON_NEG_ON); | 8848 | bnx2x_dcbx_set_state(bp, true, BNX2X_DCBX_ENABLED_ON_NEG_ON); |
7889 | bnx2x_dcbx_init_params(bp); | 8849 | bnx2x_dcbx_init_params(bp); |
7890 | 8850 | ||
8851 | #ifdef BCM_CNIC | ||
8852 | if (CHIP_IS_E1x(bp)) | ||
8853 | bp->cnic_base_cl_id = FP_SB_MAX_E1x; | ||
8854 | else | ||
8855 | bp->cnic_base_cl_id = FP_SB_MAX_E2; | ||
8856 | #endif | ||
8857 | |||
7891 | return rc; | 8858 | return rc; |
7892 | } | 8859 | } |
7893 | 8860 | ||
@@ -7896,6 +8863,10 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp) | |||
7896 | * General service functions | 8863 | * General service functions |
7897 | ****************************************************************************/ | 8864 | ****************************************************************************/ |
7898 | 8865 | ||
8866 | /* | ||
8867 | * net_device service functions | ||
8868 | */ | ||
8869 | |||
7899 | /* called with rtnl_lock */ | 8870 | /* called with rtnl_lock */ |
7900 | static int bnx2x_open(struct net_device *dev) | 8871 | static int bnx2x_open(struct net_device *dev) |
7901 | { | 8872 | { |
@@ -7954,193 +8925,119 @@ static int bnx2x_close(struct net_device *dev) | |||
7954 | return 0; | 8925 | return 0; |
7955 | } | 8926 | } |
7956 | 8927 | ||
7957 | #define E1_MAX_UC_LIST 29 | 8928 | static inline int bnx2x_init_mcast_macs_list(struct bnx2x *bp, |
7958 | #define E1H_MAX_UC_LIST 30 | 8929 | struct bnx2x_mcast_ramrod_params *p) |
7959 | #define E2_MAX_UC_LIST 14 | ||
7960 | static inline u8 bnx2x_max_uc_list(struct bnx2x *bp) | ||
7961 | { | 8930 | { |
7962 | if (CHIP_IS_E1(bp)) | 8931 | int mc_count = netdev_mc_count(bp->dev); |
7963 | return E1_MAX_UC_LIST; | 8932 | struct bnx2x_mcast_list_elem *mc_mac = |
7964 | else if (CHIP_IS_E1H(bp)) | 8933 | kzalloc(sizeof(*mc_mac) * mc_count, GFP_ATOMIC); |
7965 | return E1H_MAX_UC_LIST; | 8934 | struct netdev_hw_addr *ha; |
7966 | else | ||
7967 | return E2_MAX_UC_LIST; | ||
7968 | } | ||
7969 | 8935 | ||
8936 | if (!mc_mac) | ||
8937 | return -ENOMEM; | ||
7970 | 8938 | ||
7971 | static inline u8 bnx2x_uc_list_cam_offset(struct bnx2x *bp) | 8939 | INIT_LIST_HEAD(&p->mcast_list); |
7972 | { | ||
7973 | if (CHIP_IS_E1(bp)) | ||
7974 | /* CAM Entries for Port0: | ||
7975 | * 0 - prim ETH MAC | ||
7976 | * 1 - BCAST MAC | ||
7977 | * 2 - iSCSI L2 ring ETH MAC | ||
7978 | * 3-31 - UC MACs | ||
7979 | * | ||
7980 | * Port1 entries are allocated the same way starting from | ||
7981 | * entry 32. | ||
7982 | */ | ||
7983 | return 3 + 32 * BP_PORT(bp); | ||
7984 | else if (CHIP_IS_E1H(bp)) { | ||
7985 | /* CAM Entries: | ||
7986 | * 0-7 - prim ETH MAC for each function | ||
7987 | * 8-15 - iSCSI L2 ring ETH MAC for each function | ||
7988 | * 16 till 255 UC MAC lists for each function | ||
7989 | * | ||
7990 | * Remark: There is no FCoE support for E1H, thus FCoE related | ||
7991 | * MACs are not considered. | ||
7992 | */ | ||
7993 | return E1H_FUNC_MAX * (CAM_ISCSI_ETH_LINE + 1) + | ||
7994 | bnx2x_max_uc_list(bp) * BP_FUNC(bp); | ||
7995 | } else { | ||
7996 | /* CAM Entries (there is a separate CAM per engine): | ||
7997 | * 0-4 - prim ETH MAC for each function | ||
7998 | * 4-7 - iSCSI L2 ring ETH MAC for each function | ||
7999 | * 8-11 - FIP ucast L2 MAC for each function | ||
8000 | * 12-15 - ALL_ENODE_MACS mcast MAC for each function | ||
8001 | * 16 till 71 UC MAC lists for each function | ||
8002 | */ | ||
8003 | u8 func_idx = | ||
8004 | (CHIP_MODE_IS_4_PORT(bp) ? BP_FUNC(bp) : BP_VN(bp)); | ||
8005 | 8940 | ||
8006 | return E2_FUNC_MAX * (CAM_MAX_PF_LINE + 1) + | 8941 | netdev_for_each_mc_addr(ha, bp->dev) { |
8007 | bnx2x_max_uc_list(bp) * func_idx; | 8942 | mc_mac->mac = bnx2x_mc_addr(ha); |
8943 | list_add_tail(&mc_mac->link, &p->mcast_list); | ||
8944 | mc_mac++; | ||
8008 | } | 8945 | } |
8946 | |||
8947 | p->mcast_list_len = mc_count; | ||
8948 | |||
8949 | return 0; | ||
8950 | } | ||
8951 | |||
8952 | static inline void bnx2x_free_mcast_macs_list( | ||
8953 | struct bnx2x_mcast_ramrod_params *p) | ||
8954 | { | ||
8955 | struct bnx2x_mcast_list_elem *mc_mac = | ||
8956 | list_first_entry(&p->mcast_list, struct bnx2x_mcast_list_elem, | ||
8957 | link); | ||
8958 | |||
8959 | WARN_ON(!mc_mac); | ||
8960 | kfree(mc_mac); | ||
8009 | } | 8961 | } |
8010 | 8962 | ||
8011 | /* set uc list, do not wait as wait implies sleep and | 8963 | /** |
8012 | * set_rx_mode can be invoked from non-sleepable context. | 8964 | * bnx2x_set_uc_list - configure a new unicast MACs list. |
8965 | * | ||
8966 | * @bp: driver handle | ||
8013 | * | 8967 | * |
8014 | * Instead we use the same ramrod data buffer each time we need | 8968 | * We will use zero (0) as a MAC type for these MACs. |
8015 | * to configure a list of addresses, and use the fact that the | ||
8016 | * list of MACs is changed in an incremental way and that the | ||
8017 | * function is called under the netif_addr_lock. A temporary | ||
8018 | * inconsistent CAM configuration (possible in case of very fast | ||
8019 | * sequence of add/del/add on the host side) will shortly be | ||
8020 | * restored by the handler of the last ramrod. | ||
8021 | */ | 8969 | */ |
8022 | static int bnx2x_set_uc_list(struct bnx2x *bp) | 8970 | static inline int bnx2x_set_uc_list(struct bnx2x *bp) |
8023 | { | 8971 | { |
8024 | int i = 0, old; | 8972 | int rc; |
8025 | struct net_device *dev = bp->dev; | 8973 | struct net_device *dev = bp->dev; |
8026 | u8 offset = bnx2x_uc_list_cam_offset(bp); | ||
8027 | struct netdev_hw_addr *ha; | 8974 | struct netdev_hw_addr *ha; |
8028 | struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, uc_mac_config); | 8975 | struct bnx2x_vlan_mac_obj *mac_obj = &bp->fp->mac_obj; |
8029 | dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, uc_mac_config); | 8976 | unsigned long ramrod_flags = 0; |
8030 | 8977 | ||
8031 | if (netdev_uc_count(dev) > bnx2x_max_uc_list(bp)) | 8978 | /* First schedule a cleanup up of old configuration */ |
8032 | return -EINVAL; | 8979 | rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_UC_LIST_MAC, false); |
8980 | if (rc < 0) { | ||
8981 | BNX2X_ERR("Failed to schedule DELETE operations: %d\n", rc); | ||
8982 | return rc; | ||
8983 | } | ||
8033 | 8984 | ||
8034 | netdev_for_each_uc_addr(ha, dev) { | 8985 | netdev_for_each_uc_addr(ha, dev) { |
8035 | /* copy mac */ | 8986 | rc = bnx2x_set_mac_one(bp, bnx2x_uc_addr(ha), mac_obj, true, |
8036 | config_cmd->config_table[i].msb_mac_addr = | 8987 | BNX2X_UC_LIST_MAC, &ramrod_flags); |
8037 | swab16(*(u16 *)&bnx2x_uc_addr(ha)[0]); | 8988 | if (rc < 0) { |
8038 | config_cmd->config_table[i].middle_mac_addr = | 8989 | BNX2X_ERR("Failed to schedule ADD operations: %d\n", |
8039 | swab16(*(u16 *)&bnx2x_uc_addr(ha)[2]); | 8990 | rc); |
8040 | config_cmd->config_table[i].lsb_mac_addr = | 8991 | return rc; |
8041 | swab16(*(u16 *)&bnx2x_uc_addr(ha)[4]); | ||
8042 | |||
8043 | config_cmd->config_table[i].vlan_id = 0; | ||
8044 | config_cmd->config_table[i].pf_id = BP_FUNC(bp); | ||
8045 | config_cmd->config_table[i].clients_bit_vector = | ||
8046 | cpu_to_le32(1 << BP_L_ID(bp)); | ||
8047 | |||
8048 | SET_FLAG(config_cmd->config_table[i].flags, | ||
8049 | MAC_CONFIGURATION_ENTRY_ACTION_TYPE, | ||
8050 | T_ETH_MAC_COMMAND_SET); | ||
8051 | |||
8052 | DP(NETIF_MSG_IFUP, | ||
8053 | "setting UCAST[%d] (%04x:%04x:%04x)\n", i, | ||
8054 | config_cmd->config_table[i].msb_mac_addr, | ||
8055 | config_cmd->config_table[i].middle_mac_addr, | ||
8056 | config_cmd->config_table[i].lsb_mac_addr); | ||
8057 | |||
8058 | i++; | ||
8059 | |||
8060 | /* Set uc MAC in NIG */ | ||
8061 | bnx2x_set_mac_in_nig(bp, 1, bnx2x_uc_addr(ha), | ||
8062 | LLH_CAM_ETH_LINE + i); | ||
8063 | } | ||
8064 | old = config_cmd->hdr.length; | ||
8065 | if (old > i) { | ||
8066 | for (; i < old; i++) { | ||
8067 | if (CAM_IS_INVALID(config_cmd-> | ||
8068 | config_table[i])) { | ||
8069 | /* already invalidated */ | ||
8070 | break; | ||
8071 | } | ||
8072 | /* invalidate */ | ||
8073 | SET_FLAG(config_cmd->config_table[i].flags, | ||
8074 | MAC_CONFIGURATION_ENTRY_ACTION_TYPE, | ||
8075 | T_ETH_MAC_COMMAND_INVALIDATE); | ||
8076 | } | 8992 | } |
8077 | } | 8993 | } |
8078 | 8994 | ||
8079 | wmb(); | 8995 | /* Execute the pending commands */ |
8080 | 8996 | __set_bit(RAMROD_CONT, &ramrod_flags); | |
8081 | config_cmd->hdr.length = i; | 8997 | return bnx2x_set_mac_one(bp, NULL, mac_obj, false /* don't care */, |
8082 | config_cmd->hdr.offset = offset; | 8998 | BNX2X_UC_LIST_MAC, &ramrod_flags); |
8083 | config_cmd->hdr.client_id = 0xff; | ||
8084 | /* Mark that this ramrod doesn't use bp->set_mac_pending for | ||
8085 | * synchronization. | ||
8086 | */ | ||
8087 | config_cmd->hdr.echo = 0; | ||
8088 | |||
8089 | mb(); | ||
8090 | |||
8091 | return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0, | ||
8092 | U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1); | ||
8093 | |||
8094 | } | 8999 | } |
8095 | 9000 | ||
8096 | void bnx2x_invalidate_uc_list(struct bnx2x *bp) | 9001 | static inline int bnx2x_set_mc_list(struct bnx2x *bp) |
8097 | { | 9002 | { |
8098 | int i; | 9003 | struct net_device *dev = bp->dev; |
8099 | struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, uc_mac_config); | 9004 | struct bnx2x_mcast_ramrod_params rparam = {0}; |
8100 | dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, uc_mac_config); | 9005 | int rc = 0; |
8101 | int ramrod_flags = WAIT_RAMROD_COMMON; | ||
8102 | u8 offset = bnx2x_uc_list_cam_offset(bp); | ||
8103 | u8 max_list_size = bnx2x_max_uc_list(bp); | ||
8104 | |||
8105 | for (i = 0; i < max_list_size; i++) { | ||
8106 | SET_FLAG(config_cmd->config_table[i].flags, | ||
8107 | MAC_CONFIGURATION_ENTRY_ACTION_TYPE, | ||
8108 | T_ETH_MAC_COMMAND_INVALIDATE); | ||
8109 | bnx2x_set_mac_in_nig(bp, 0, NULL, LLH_CAM_ETH_LINE + 1 + i); | ||
8110 | } | ||
8111 | |||
8112 | wmb(); | ||
8113 | 9006 | ||
8114 | config_cmd->hdr.length = max_list_size; | 9007 | rparam.mcast_obj = &bp->mcast_obj; |
8115 | config_cmd->hdr.offset = offset; | ||
8116 | config_cmd->hdr.client_id = 0xff; | ||
8117 | /* We'll wait for a completion this time... */ | ||
8118 | config_cmd->hdr.echo = 1; | ||
8119 | 9008 | ||
8120 | bp->set_mac_pending = 1; | 9009 | /* first, clear all configured multicast MACs */ |
9010 | rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL); | ||
9011 | if (rc < 0) { | ||
9012 | BNX2X_ERR("Failed to clear multicast " | ||
9013 | "configuration: %d\n", rc); | ||
9014 | return rc; | ||
9015 | } | ||
8121 | 9016 | ||
8122 | mb(); | 9017 | /* then, configure a new MACs list */ |
9018 | if (netdev_mc_count(dev)) { | ||
9019 | rc = bnx2x_init_mcast_macs_list(bp, &rparam); | ||
9020 | if (rc) { | ||
9021 | BNX2X_ERR("Failed to create multicast MACs " | ||
9022 | "list: %d\n", rc); | ||
9023 | return rc; | ||
9024 | } | ||
8123 | 9025 | ||
8124 | bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0, | 9026 | /* Now add the new MACs */ |
8125 | U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1); | 9027 | rc = bnx2x_config_mcast(bp, &rparam, |
9028 | BNX2X_MCAST_CMD_ADD); | ||
9029 | if (rc < 0) | ||
9030 | BNX2X_ERR("Failed to set a new multicast " | ||
9031 | "configuration: %d\n", rc); | ||
8126 | 9032 | ||
8127 | /* Wait for a completion */ | 9033 | bnx2x_free_mcast_macs_list(&rparam); |
8128 | bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, | 9034 | } |
8129 | ramrod_flags); | ||
8130 | 9035 | ||
9036 | return rc; | ||
8131 | } | 9037 | } |
8132 | 9038 | ||
8133 | static inline int bnx2x_set_mc_list(struct bnx2x *bp) | ||
8134 | { | ||
8135 | /* some multicasts */ | ||
8136 | if (CHIP_IS_E1(bp)) { | ||
8137 | return bnx2x_set_e1_mc_list(bp); | ||
8138 | } else { /* E1H and newer */ | ||
8139 | return bnx2x_set_e1h_mc_list(bp); | ||
8140 | } | ||
8141 | } | ||
8142 | 9039 | ||
8143 | /* called with netif_tx_lock from dev_mcast.c */ | 9040 | /* If bp->state is OPEN, should be called with netif_addr_lock_bh() */ |
8144 | void bnx2x_set_rx_mode(struct net_device *dev) | 9041 | void bnx2x_set_rx_mode(struct net_device *dev) |
8145 | { | 9042 | { |
8146 | struct bnx2x *bp = netdev_priv(dev); | 9043 | struct bnx2x *bp = netdev_priv(dev); |
@@ -8151,23 +9048,31 @@ void bnx2x_set_rx_mode(struct net_device *dev) | |||
8151 | return; | 9048 | return; |
8152 | } | 9049 | } |
8153 | 9050 | ||
8154 | DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags); | 9051 | DP(NETIF_MSG_IFUP, "dev->flags = %x\n", bp->dev->flags); |
8155 | 9052 | ||
8156 | if (dev->flags & IFF_PROMISC) | 9053 | if (dev->flags & IFF_PROMISC) |
8157 | rx_mode = BNX2X_RX_MODE_PROMISC; | 9054 | rx_mode = BNX2X_RX_MODE_PROMISC; |
8158 | else if (dev->flags & IFF_ALLMULTI) | 9055 | else if ((dev->flags & IFF_ALLMULTI) || |
9056 | ((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) && | ||
9057 | CHIP_IS_E1(bp))) | ||
8159 | rx_mode = BNX2X_RX_MODE_ALLMULTI; | 9058 | rx_mode = BNX2X_RX_MODE_ALLMULTI; |
8160 | else { | 9059 | else { |
8161 | /* some multicasts */ | 9060 | /* some multicasts */ |
8162 | if (bnx2x_set_mc_list(bp)) | 9061 | if (bnx2x_set_mc_list(bp) < 0) |
8163 | rx_mode = BNX2X_RX_MODE_ALLMULTI; | 9062 | rx_mode = BNX2X_RX_MODE_ALLMULTI; |
8164 | 9063 | ||
8165 | /* some unicasts */ | 9064 | if (bnx2x_set_uc_list(bp) < 0) |
8166 | if (bnx2x_set_uc_list(bp)) | ||
8167 | rx_mode = BNX2X_RX_MODE_PROMISC; | 9065 | rx_mode = BNX2X_RX_MODE_PROMISC; |
8168 | } | 9066 | } |
8169 | 9067 | ||
8170 | bp->rx_mode = rx_mode; | 9068 | bp->rx_mode = rx_mode; |
9069 | |||
9070 | /* Schedule the rx_mode command */ | ||
9071 | if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state)) { | ||
9072 | set_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state); | ||
9073 | return; | ||
9074 | } | ||
9075 | |||
8171 | bnx2x_set_storm_rx_mode(bp); | 9076 | bnx2x_set_storm_rx_mode(bp); |
8172 | } | 9077 | } |
8173 | 9078 | ||
@@ -8258,8 +9163,28 @@ static const struct net_device_ops bnx2x_netdev_ops = { | |||
8258 | #endif | 9163 | #endif |
8259 | }; | 9164 | }; |
8260 | 9165 | ||
9166 | static inline int bnx2x_set_coherency_mask(struct bnx2x *bp) | ||
9167 | { | ||
9168 | struct device *dev = &bp->pdev->dev; | ||
9169 | |||
9170 | if (dma_set_mask(dev, DMA_BIT_MASK(64)) == 0) { | ||
9171 | bp->flags |= USING_DAC_FLAG; | ||
9172 | if (dma_set_coherent_mask(dev, DMA_BIT_MASK(64)) != 0) { | ||
9173 | dev_err(dev, "dma_set_coherent_mask failed, " | ||
9174 | "aborting\n"); | ||
9175 | return -EIO; | ||
9176 | } | ||
9177 | } else if (dma_set_mask(dev, DMA_BIT_MASK(32)) != 0) { | ||
9178 | dev_err(dev, "System does not support DMA, aborting\n"); | ||
9179 | return -EIO; | ||
9180 | } | ||
9181 | |||
9182 | return 0; | ||
9183 | } | ||
9184 | |||
8261 | static int __devinit bnx2x_init_dev(struct pci_dev *pdev, | 9185 | static int __devinit bnx2x_init_dev(struct pci_dev *pdev, |
8262 | struct net_device *dev) | 9186 | struct net_device *dev, |
9187 | unsigned long board_type) | ||
8263 | { | 9188 | { |
8264 | struct bnx2x *bp; | 9189 | struct bnx2x *bp; |
8265 | int rc; | 9190 | int rc; |
@@ -8321,21 +9246,9 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev, | |||
8321 | goto err_out_release; | 9246 | goto err_out_release; |
8322 | } | 9247 | } |
8323 | 9248 | ||
8324 | if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) == 0) { | 9249 | rc = bnx2x_set_coherency_mask(bp); |
8325 | bp->flags |= USING_DAC_FLAG; | 9250 | if (rc) |
8326 | if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)) != 0) { | ||
8327 | dev_err(&bp->pdev->dev, "dma_set_coherent_mask" | ||
8328 | " failed, aborting\n"); | ||
8329 | rc = -EIO; | ||
8330 | goto err_out_release; | ||
8331 | } | ||
8332 | |||
8333 | } else if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) != 0) { | ||
8334 | dev_err(&bp->pdev->dev, | ||
8335 | "System does not support DMA, aborting\n"); | ||
8336 | rc = -EIO; | ||
8337 | goto err_out_release; | 9251 | goto err_out_release; |
8338 | } | ||
8339 | 9252 | ||
8340 | dev->mem_start = pci_resource_start(pdev, 0); | 9253 | dev->mem_start = pci_resource_start(pdev, 0); |
8341 | dev->base_addr = dev->mem_start; | 9254 | dev->base_addr = dev->mem_start; |
@@ -8371,6 +9284,12 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev, | |||
8371 | REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0); | 9284 | REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0); |
8372 | REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0); | 9285 | REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0); |
8373 | 9286 | ||
9287 | /** | ||
9288 | * Enable internal target-read (in case we are probed after PF FLR). | ||
9289 | * Must be done prior to any BAR read access | ||
9290 | */ | ||
9291 | REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1); | ||
9292 | |||
8374 | /* Reset the load counter */ | 9293 | /* Reset the load counter */ |
8375 | bnx2x_clear_load_cnt(bp); | 9294 | bnx2x_clear_load_cnt(bp); |
8376 | 9295 | ||
@@ -8585,7 +9504,7 @@ int bnx2x_init_firmware(struct bnx2x *bp) | |||
8585 | fw_file_name = FW_FILE_NAME_E1; | 9504 | fw_file_name = FW_FILE_NAME_E1; |
8586 | else if (CHIP_IS_E1H(bp)) | 9505 | else if (CHIP_IS_E1H(bp)) |
8587 | fw_file_name = FW_FILE_NAME_E1H; | 9506 | fw_file_name = FW_FILE_NAME_E1H; |
8588 | else if (CHIP_IS_E2(bp)) | 9507 | else if (!CHIP_IS_E1x(bp)) |
8589 | fw_file_name = FW_FILE_NAME_E2; | 9508 | fw_file_name = FW_FILE_NAME_E2; |
8590 | else { | 9509 | else { |
8591 | BNX2X_ERR("Unsupported chip revision\n"); | 9510 | BNX2X_ERR("Unsupported chip revision\n"); |
@@ -8653,6 +9572,44 @@ request_firmware_exit: | |||
8653 | return rc; | 9572 | return rc; |
8654 | } | 9573 | } |
8655 | 9574 | ||
9575 | static void bnx2x_release_firmware(struct bnx2x *bp) | ||
9576 | { | ||
9577 | kfree(bp->init_ops_offsets); | ||
9578 | kfree(bp->init_ops); | ||
9579 | kfree(bp->init_data); | ||
9580 | release_firmware(bp->firmware); | ||
9581 | } | ||
9582 | |||
9583 | |||
9584 | static struct bnx2x_func_sp_drv_ops bnx2x_func_sp_drv = { | ||
9585 | .init_hw_cmn_chip = bnx2x_init_hw_common_chip, | ||
9586 | .init_hw_cmn = bnx2x_init_hw_common, | ||
9587 | .init_hw_port = bnx2x_init_hw_port, | ||
9588 | .init_hw_func = bnx2x_init_hw_func, | ||
9589 | |||
9590 | .reset_hw_cmn = bnx2x_reset_common, | ||
9591 | .reset_hw_port = bnx2x_reset_port, | ||
9592 | .reset_hw_func = bnx2x_reset_func, | ||
9593 | |||
9594 | .gunzip_init = bnx2x_gunzip_init, | ||
9595 | .gunzip_end = bnx2x_gunzip_end, | ||
9596 | |||
9597 | .init_fw = bnx2x_init_firmware, | ||
9598 | .release_fw = bnx2x_release_firmware, | ||
9599 | }; | ||
9600 | |||
9601 | void bnx2x__init_func_obj(struct bnx2x *bp) | ||
9602 | { | ||
9603 | /* Prepare DMAE related driver resources */ | ||
9604 | bnx2x_setup_dmae(bp); | ||
9605 | |||
9606 | bnx2x_init_func_obj(bp, &bp->func_obj, | ||
9607 | bnx2x_sp(bp, func_rdata), | ||
9608 | bnx2x_sp_mapping(bp, func_rdata), | ||
9609 | &bnx2x_func_sp_drv); | ||
9610 | } | ||
9611 | |||
9612 | /* must be called after sriov-enable */ | ||
8656 | static inline int bnx2x_set_qm_cid_count(struct bnx2x *bp, int l2_cid_count) | 9613 | static inline int bnx2x_set_qm_cid_count(struct bnx2x *bp, int l2_cid_count) |
8657 | { | 9614 | { |
8658 | int cid_count = L2_FP_COUNT(l2_cid_count); | 9615 | int cid_count = L2_FP_COUNT(l2_cid_count); |
@@ -8663,6 +9620,25 @@ static inline int bnx2x_set_qm_cid_count(struct bnx2x *bp, int l2_cid_count) | |||
8663 | return roundup(cid_count, QM_CID_ROUND); | 9620 | return roundup(cid_count, QM_CID_ROUND); |
8664 | } | 9621 | } |
8665 | 9622 | ||
9623 | /** | ||
9624 | * bnx2x_pci_msix_table_size - get the size of the MSI-X table. | ||
9625 | * | ||
9626 | * @dev: pci device | ||
9627 | * | ||
9628 | */ | ||
9629 | static inline int bnx2x_pci_msix_table_size(struct pci_dev *pdev) | ||
9630 | { | ||
9631 | int pos; | ||
9632 | u16 control; | ||
9633 | |||
9634 | pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX); | ||
9635 | if (!pos) | ||
9636 | return 0; | ||
9637 | |||
9638 | pci_read_config_word(pdev, pos + PCI_MSI_FLAGS, &control); | ||
9639 | return (control & PCI_MSIX_FLAGS_QSIZE) + 1; | ||
9640 | } | ||
9641 | |||
8666 | static int __devinit bnx2x_init_one(struct pci_dev *pdev, | 9642 | static int __devinit bnx2x_init_one(struct pci_dev *pdev, |
8667 | const struct pci_device_id *ent) | 9643 | const struct pci_device_id *ent) |
8668 | { | 9644 | { |
@@ -8675,12 +9651,28 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev, | |||
8675 | case BCM57710: | 9651 | case BCM57710: |
8676 | case BCM57711: | 9652 | case BCM57711: |
8677 | case BCM57711E: | 9653 | case BCM57711E: |
8678 | cid_count = FP_SB_MAX_E1x; | ||
8679 | break; | ||
8680 | |||
8681 | case BCM57712: | 9654 | case BCM57712: |
8682 | case BCM57712E: | 9655 | case BCM57712_MF: |
8683 | cid_count = FP_SB_MAX_E2; | 9656 | case BCM57800: |
9657 | case BCM57800_MF: | ||
9658 | case BCM57810: | ||
9659 | case BCM57810_MF: | ||
9660 | case BCM57840: | ||
9661 | case BCM57840_MF: | ||
9662 | /* The size requested for the MSI-X table corresponds to the | ||
9663 | * actual amount of avaliable IGU/HC status blocks. It includes | ||
9664 | * the default SB vector but we want cid_count to contain the | ||
9665 | * amount of only non-default SBs, that's what '-1' stands for. | ||
9666 | */ | ||
9667 | cid_count = bnx2x_pci_msix_table_size(pdev) - 1; | ||
9668 | |||
9669 | /* do not allow initial cid_count grow above 16 | ||
9670 | * since Special CIDs starts from this number | ||
9671 | * use old FP_SB_MAX_E1x define for this matter | ||
9672 | */ | ||
9673 | cid_count = min_t(int, FP_SB_MAX_E1x, cid_count); | ||
9674 | |||
9675 | WARN_ON(!cid_count); | ||
8684 | break; | 9676 | break; |
8685 | 9677 | ||
8686 | default: | 9678 | default: |
@@ -8689,7 +9681,7 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev, | |||
8689 | return -ENODEV; | 9681 | return -ENODEV; |
8690 | } | 9682 | } |
8691 | 9683 | ||
8692 | cid_count += NONE_ETH_CONTEXT_USE + CNIC_CONTEXT_USE; | 9684 | cid_count += FCOE_CONTEXT_USE; |
8693 | 9685 | ||
8694 | /* dev zeroed in init_etherdev */ | 9686 | /* dev zeroed in init_etherdev */ |
8695 | dev = alloc_etherdev_mq(sizeof(*bp), cid_count); | 9687 | dev = alloc_etherdev_mq(sizeof(*bp), cid_count); |
@@ -8698,6 +9690,11 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev, | |||
8698 | return -ENOMEM; | 9690 | return -ENOMEM; |
8699 | } | 9691 | } |
8700 | 9692 | ||
9693 | /* We don't need a Tx queue for a CNIC and an OOO Rx-only ring, | ||
9694 | * so update a cid_count after a netdev allocation. | ||
9695 | */ | ||
9696 | cid_count += CNIC_CONTEXT_USE; | ||
9697 | |||
8701 | bp = netdev_priv(dev); | 9698 | bp = netdev_priv(dev); |
8702 | bp->msg_enable = debug; | 9699 | bp->msg_enable = debug; |
8703 | 9700 | ||
@@ -8705,12 +9702,14 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev, | |||
8705 | 9702 | ||
8706 | bp->l2_cid_count = cid_count; | 9703 | bp->l2_cid_count = cid_count; |
8707 | 9704 | ||
8708 | rc = bnx2x_init_dev(pdev, dev); | 9705 | rc = bnx2x_init_dev(pdev, dev, ent->driver_data); |
8709 | if (rc < 0) { | 9706 | if (rc < 0) { |
8710 | free_netdev(dev); | 9707 | free_netdev(dev); |
8711 | return rc; | 9708 | return rc; |
8712 | } | 9709 | } |
8713 | 9710 | ||
9711 | BNX2X_DEV_INFO("cid_count=%d\n", cid_count); | ||
9712 | |||
8714 | rc = bnx2x_init_bp(bp); | 9713 | rc = bnx2x_init_bp(bp); |
8715 | if (rc) | 9714 | if (rc) |
8716 | goto init_one_exit; | 9715 | goto init_one_exit; |
@@ -8847,12 +9846,17 @@ static int bnx2x_eeh_nic_unload(struct bnx2x *bp) | |||
8847 | 9846 | ||
8848 | bp->rx_mode = BNX2X_RX_MODE_NONE; | 9847 | bp->rx_mode = BNX2X_RX_MODE_NONE; |
8849 | 9848 | ||
9849 | #ifdef BCM_CNIC | ||
9850 | bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD); | ||
9851 | #endif | ||
9852 | /* Stop Tx */ | ||
9853 | bnx2x_tx_disable(bp); | ||
9854 | |||
8850 | bnx2x_netif_stop(bp, 0); | 9855 | bnx2x_netif_stop(bp, 0); |
8851 | netif_carrier_off(bp->dev); | ||
8852 | 9856 | ||
8853 | del_timer_sync(&bp->timer); | 9857 | del_timer_sync(&bp->timer); |
8854 | bp->stats_state = STATS_STATE_DISABLED; | 9858 | |
8855 | DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n"); | 9859 | bnx2x_stats_handle(bp, STATS_EVENT_STOP); |
8856 | 9860 | ||
8857 | /* Release IRQs */ | 9861 | /* Release IRQs */ |
8858 | bnx2x_free_irq(bp); | 9862 | bnx2x_free_irq(bp); |
@@ -8867,6 +9871,8 @@ static int bnx2x_eeh_nic_unload(struct bnx2x *bp) | |||
8867 | 9871 | ||
8868 | bp->state = BNX2X_STATE_CLOSED; | 9872 | bp->state = BNX2X_STATE_CLOSED; |
8869 | 9873 | ||
9874 | netif_carrier_off(bp->dev); | ||
9875 | |||
8870 | return 0; | 9876 | return 0; |
8871 | } | 9877 | } |
8872 | 9878 | ||
@@ -9043,6 +10049,24 @@ module_init(bnx2x_init); | |||
9043 | module_exit(bnx2x_cleanup); | 10049 | module_exit(bnx2x_cleanup); |
9044 | 10050 | ||
9045 | #ifdef BCM_CNIC | 10051 | #ifdef BCM_CNIC |
10052 | /** | ||
10053 | * bnx2x_set_iscsi_eth_mac_addr - set iSCSI MAC(s). | ||
10054 | * | ||
10055 | * @bp: driver handle | ||
10056 | * @set: set or clear the CAM entry | ||
10057 | * | ||
10058 | * This function will wait until the ramdord completion returns. | ||
10059 | * Return 0 if success, -ENODEV if ramrod doesn't return. | ||
10060 | */ | ||
10061 | static inline int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp) | ||
10062 | { | ||
10063 | unsigned long ramrod_flags = 0; | ||
10064 | |||
10065 | __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); | ||
10066 | return bnx2x_set_mac_one(bp, bp->cnic_eth_dev.iscsi_mac, | ||
10067 | &bp->iscsi_l2_mac_obj, true, | ||
10068 | BNX2X_ISCSI_ETH_MAC, &ramrod_flags); | ||
10069 | } | ||
9046 | 10070 | ||
9047 | /* count denotes the number of new completions we have seen */ | 10071 | /* count denotes the number of new completions we have seen */ |
9048 | static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count) | 10072 | static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count) |
@@ -9063,23 +10087,22 @@ static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count) | |||
9063 | u16 type = (le16_to_cpu(bp->cnic_kwq_cons->hdr.type) | 10087 | u16 type = (le16_to_cpu(bp->cnic_kwq_cons->hdr.type) |
9064 | & SPE_HDR_CONN_TYPE) >> | 10088 | & SPE_HDR_CONN_TYPE) >> |
9065 | SPE_HDR_CONN_TYPE_SHIFT; | 10089 | SPE_HDR_CONN_TYPE_SHIFT; |
10090 | u8 cmd = (le32_to_cpu(bp->cnic_kwq_cons->hdr.conn_and_cmd_data) | ||
10091 | >> SPE_HDR_CMD_ID_SHIFT) & 0xff; | ||
9066 | 10092 | ||
9067 | /* Set validation for iSCSI L2 client before sending SETUP | 10093 | /* Set validation for iSCSI L2 client before sending SETUP |
9068 | * ramrod | 10094 | * ramrod |
9069 | */ | 10095 | */ |
9070 | if (type == ETH_CONNECTION_TYPE) { | 10096 | if (type == ETH_CONNECTION_TYPE) { |
9071 | u8 cmd = (le32_to_cpu(bp->cnic_kwq_cons-> | ||
9072 | hdr.conn_and_cmd_data) >> | ||
9073 | SPE_HDR_CMD_ID_SHIFT) & 0xff; | ||
9074 | |||
9075 | if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP) | 10097 | if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP) |
9076 | bnx2x_set_ctx_validation(&bp->context. | 10098 | bnx2x_set_ctx_validation(bp, &bp->context. |
9077 | vcxt[BNX2X_ISCSI_ETH_CID].eth, | 10099 | vcxt[BNX2X_ISCSI_ETH_CID].eth, |
9078 | HW_CID(bp, BNX2X_ISCSI_ETH_CID)); | 10100 | BNX2X_ISCSI_ETH_CID); |
9079 | } | 10101 | } |
9080 | 10102 | ||
9081 | /* There may be not more than 8 L2 and not more than 8 L5 SPEs | 10103 | /* |
9082 | * We also check that the number of outstanding | 10104 | * There may be not more than 8 L2, not more than 8 L5 SPEs |
10105 | * and in the air. We also check that number of outstanding | ||
9083 | * COMMON ramrods is not more than the EQ and SPQ can | 10106 | * COMMON ramrods is not more than the EQ and SPQ can |
9084 | * accommodate. | 10107 | * accommodate. |
9085 | */ | 10108 | */ |
@@ -9205,18 +10228,61 @@ int bnx2x_cnic_notify(struct bnx2x *bp, int cmd) | |||
9205 | return bnx2x_cnic_ctl_send(bp, &ctl); | 10228 | return bnx2x_cnic_ctl_send(bp, &ctl); |
9206 | } | 10229 | } |
9207 | 10230 | ||
9208 | static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid) | 10231 | static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid, u8 err) |
9209 | { | 10232 | { |
9210 | struct cnic_ctl_info ctl; | 10233 | struct cnic_ctl_info ctl = {0}; |
9211 | 10234 | ||
9212 | /* first we tell CNIC and only then we count this as a completion */ | 10235 | /* first we tell CNIC and only then we count this as a completion */ |
9213 | ctl.cmd = CNIC_CTL_COMPLETION_CMD; | 10236 | ctl.cmd = CNIC_CTL_COMPLETION_CMD; |
9214 | ctl.data.comp.cid = cid; | 10237 | ctl.data.comp.cid = cid; |
10238 | ctl.data.comp.error = err; | ||
9215 | 10239 | ||
9216 | bnx2x_cnic_ctl_send_bh(bp, &ctl); | 10240 | bnx2x_cnic_ctl_send_bh(bp, &ctl); |
9217 | bnx2x_cnic_sp_post(bp, 0); | 10241 | bnx2x_cnic_sp_post(bp, 0); |
9218 | } | 10242 | } |
9219 | 10243 | ||
10244 | |||
10245 | /* Called with netif_addr_lock_bh() taken. | ||
10246 | * Sets an rx_mode config for an iSCSI ETH client. | ||
10247 | * Doesn't block. | ||
10248 | * Completion should be checked outside. | ||
10249 | */ | ||
10250 | static void bnx2x_set_iscsi_eth_rx_mode(struct bnx2x *bp, bool start) | ||
10251 | { | ||
10252 | unsigned long accept_flags = 0, ramrod_flags = 0; | ||
10253 | u8 cl_id = bnx2x_cnic_eth_cl_id(bp, BNX2X_ISCSI_ETH_CL_ID_IDX); | ||
10254 | int sched_state = BNX2X_FILTER_ISCSI_ETH_STOP_SCHED; | ||
10255 | |||
10256 | if (start) { | ||
10257 | /* Start accepting on iSCSI L2 ring. Accept all multicasts | ||
10258 | * because it's the only way for UIO Queue to accept | ||
10259 | * multicasts (in non-promiscuous mode only one Queue per | ||
10260 | * function will receive multicast packets (leading in our | ||
10261 | * case). | ||
10262 | */ | ||
10263 | __set_bit(BNX2X_ACCEPT_UNICAST, &accept_flags); | ||
10264 | __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &accept_flags); | ||
10265 | __set_bit(BNX2X_ACCEPT_BROADCAST, &accept_flags); | ||
10266 | __set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags); | ||
10267 | |||
10268 | /* Clear STOP_PENDING bit if START is requested */ | ||
10269 | clear_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED, &bp->sp_state); | ||
10270 | |||
10271 | sched_state = BNX2X_FILTER_ISCSI_ETH_START_SCHED; | ||
10272 | } else | ||
10273 | /* Clear START_PENDING bit if STOP is requested */ | ||
10274 | clear_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED, &bp->sp_state); | ||
10275 | |||
10276 | if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state)) | ||
10277 | set_bit(sched_state, &bp->sp_state); | ||
10278 | else { | ||
10279 | __set_bit(RAMROD_RX, &ramrod_flags); | ||
10280 | bnx2x_set_q_rx_mode(bp, cl_id, 0, accept_flags, 0, | ||
10281 | ramrod_flags); | ||
10282 | } | ||
10283 | } | ||
10284 | |||
10285 | |||
9220 | static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl) | 10286 | static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl) |
9221 | { | 10287 | { |
9222 | struct bnx2x *bp = netdev_priv(dev); | 10288 | struct bnx2x *bp = netdev_priv(dev); |
@@ -9240,45 +10306,65 @@ static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl) | |||
9240 | 10306 | ||
9241 | /* rtnl_lock is held. */ | 10307 | /* rtnl_lock is held. */ |
9242 | case DRV_CTL_START_L2_CMD: { | 10308 | case DRV_CTL_START_L2_CMD: { |
9243 | u32 cli = ctl->data.ring.client_id; | 10309 | struct cnic_eth_dev *cp = &bp->cnic_eth_dev; |
9244 | 10310 | unsigned long sp_bits = 0; | |
9245 | /* Clear FCoE FIP and ALL ENODE MACs addresses first */ | 10311 | |
9246 | bnx2x_del_fcoe_eth_macs(bp); | 10312 | /* Configure the iSCSI classification object */ |
10313 | bnx2x_init_mac_obj(bp, &bp->iscsi_l2_mac_obj, | ||
10314 | cp->iscsi_l2_client_id, | ||
10315 | cp->iscsi_l2_cid, BP_FUNC(bp), | ||
10316 | bnx2x_sp(bp, mac_rdata), | ||
10317 | bnx2x_sp_mapping(bp, mac_rdata), | ||
10318 | BNX2X_FILTER_MAC_PENDING, | ||
10319 | &bp->sp_state, BNX2X_OBJ_TYPE_RX, | ||
10320 | &bp->macs_pool); | ||
9247 | 10321 | ||
9248 | /* Set iSCSI MAC address */ | 10322 | /* Set iSCSI MAC address */ |
9249 | bnx2x_set_iscsi_eth_mac_addr(bp, 1); | 10323 | rc = bnx2x_set_iscsi_eth_mac_addr(bp); |
10324 | if (rc) | ||
10325 | break; | ||
9250 | 10326 | ||
9251 | mmiowb(); | 10327 | mmiowb(); |
9252 | barrier(); | 10328 | barrier(); |
9253 | 10329 | ||
9254 | /* Start accepting on iSCSI L2 ring. Accept all multicasts | 10330 | /* Start accepting on iSCSI L2 ring */ |
9255 | * because it's the only way for UIO Client to accept | 10331 | |
9256 | * multicasts (in non-promiscuous mode only one Client per | 10332 | netif_addr_lock_bh(dev); |
9257 | * function will receive multicast packets (leading in our | 10333 | bnx2x_set_iscsi_eth_rx_mode(bp, true); |
9258 | * case). | 10334 | netif_addr_unlock_bh(dev); |
9259 | */ | 10335 | |
9260 | bnx2x_rxq_set_mac_filters(bp, cli, | 10336 | /* bits to wait on */ |
9261 | BNX2X_ACCEPT_UNICAST | | 10337 | __set_bit(BNX2X_FILTER_RX_MODE_PENDING, &sp_bits); |
9262 | BNX2X_ACCEPT_BROADCAST | | 10338 | __set_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED, &sp_bits); |
9263 | BNX2X_ACCEPT_ALL_MULTICAST); | 10339 | |
9264 | storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp)); | 10340 | if (!bnx2x_wait_sp_comp(bp, sp_bits)) |
10341 | BNX2X_ERR("rx_mode completion timed out!\n"); | ||
9265 | 10342 | ||
9266 | break; | 10343 | break; |
9267 | } | 10344 | } |
9268 | 10345 | ||
9269 | /* rtnl_lock is held. */ | 10346 | /* rtnl_lock is held. */ |
9270 | case DRV_CTL_STOP_L2_CMD: { | 10347 | case DRV_CTL_STOP_L2_CMD: { |
9271 | u32 cli = ctl->data.ring.client_id; | 10348 | unsigned long sp_bits = 0; |
9272 | 10349 | ||
9273 | /* Stop accepting on iSCSI L2 ring */ | 10350 | /* Stop accepting on iSCSI L2 ring */ |
9274 | bnx2x_rxq_set_mac_filters(bp, cli, BNX2X_ACCEPT_NONE); | 10351 | netif_addr_lock_bh(dev); |
9275 | storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp)); | 10352 | bnx2x_set_iscsi_eth_rx_mode(bp, false); |
10353 | netif_addr_unlock_bh(dev); | ||
10354 | |||
10355 | /* bits to wait on */ | ||
10356 | __set_bit(BNX2X_FILTER_RX_MODE_PENDING, &sp_bits); | ||
10357 | __set_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED, &sp_bits); | ||
10358 | |||
10359 | if (!bnx2x_wait_sp_comp(bp, sp_bits)) | ||
10360 | BNX2X_ERR("rx_mode completion timed out!\n"); | ||
9276 | 10361 | ||
9277 | mmiowb(); | 10362 | mmiowb(); |
9278 | barrier(); | 10363 | barrier(); |
9279 | 10364 | ||
9280 | /* Unset iSCSI L2 MAC */ | 10365 | /* Unset iSCSI L2 MAC */ |
9281 | bnx2x_set_iscsi_eth_mac_addr(bp, 0); | 10366 | rc = bnx2x_del_all_macs(bp, &bp->iscsi_l2_mac_obj, |
10367 | BNX2X_ISCSI_ETH_MAC, true); | ||
9282 | break; | 10368 | break; |
9283 | } | 10369 | } |
9284 | case DRV_CTL_RET_L2_SPQ_CREDIT_CMD: { | 10370 | case DRV_CTL_RET_L2_SPQ_CREDIT_CMD: { |
@@ -9290,11 +10376,6 @@ static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl) | |||
9290 | break; | 10376 | break; |
9291 | } | 10377 | } |
9292 | 10378 | ||
9293 | case DRV_CTL_ISCSI_STOPPED_CMD: { | ||
9294 | bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_ISCSI_STOPPED); | ||
9295 | break; | ||
9296 | } | ||
9297 | |||
9298 | default: | 10379 | default: |
9299 | BNX2X_ERR("unknown command %x\n", ctl->cmd); | 10380 | BNX2X_ERR("unknown command %x\n", ctl->cmd); |
9300 | rc = -EINVAL; | 10381 | rc = -EINVAL; |
@@ -9315,13 +10396,13 @@ void bnx2x_setup_cnic_irq_info(struct bnx2x *bp) | |||
9315 | cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX; | 10396 | cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX; |
9316 | cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX; | 10397 | cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX; |
9317 | } | 10398 | } |
9318 | if (CHIP_IS_E2(bp)) | 10399 | if (!CHIP_IS_E1x(bp)) |
9319 | cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e2_sb; | 10400 | cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e2_sb; |
9320 | else | 10401 | else |
9321 | cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e1x_sb; | 10402 | cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e1x_sb; |
9322 | 10403 | ||
9323 | cp->irq_arr[0].status_blk_num = CNIC_SB_ID(bp); | 10404 | cp->irq_arr[0].status_blk_num = bnx2x_cnic_fw_sb_id(bp); |
9324 | cp->irq_arr[0].status_blk_num2 = CNIC_IGU_SB_ID(bp); | 10405 | cp->irq_arr[0].status_blk_num2 = bnx2x_cnic_igu_sb_id(bp); |
9325 | cp->irq_arr[1].status_blk = bp->def_status_blk; | 10406 | cp->irq_arr[1].status_blk = bp->def_status_blk; |
9326 | cp->irq_arr[1].status_blk_num = DEF_SB_ID; | 10407 | cp->irq_arr[1].status_blk_num = DEF_SB_ID; |
9327 | cp->irq_arr[1].status_blk_num2 = DEF_SB_IGU_ID; | 10408 | cp->irq_arr[1].status_blk_num2 = DEF_SB_IGU_ID; |
@@ -9352,7 +10433,7 @@ static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops, | |||
9352 | bp->cnic_data = data; | 10433 | bp->cnic_data = data; |
9353 | 10434 | ||
9354 | cp->num_irq = 0; | 10435 | cp->num_irq = 0; |
9355 | cp->drv_state = CNIC_DRV_STATE_REGD; | 10436 | cp->drv_state |= CNIC_DRV_STATE_REGD; |
9356 | cp->iro_arr = bp->iro_arr; | 10437 | cp->iro_arr = bp->iro_arr; |
9357 | 10438 | ||
9358 | bnx2x_setup_cnic_irq_info(bp); | 10439 | bnx2x_setup_cnic_irq_info(bp); |
@@ -9406,8 +10487,8 @@ struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev) | |||
9406 | cp->drv_register_cnic = bnx2x_register_cnic; | 10487 | cp->drv_register_cnic = bnx2x_register_cnic; |
9407 | cp->drv_unregister_cnic = bnx2x_unregister_cnic; | 10488 | cp->drv_unregister_cnic = bnx2x_unregister_cnic; |
9408 | cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID; | 10489 | cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID; |
9409 | cp->iscsi_l2_client_id = BNX2X_ISCSI_ETH_CL_ID + | 10490 | cp->iscsi_l2_client_id = |
9410 | BP_E1HVN(bp) * NONE_ETH_CONTEXT_USE; | 10491 | bnx2x_cnic_eth_cl_id(bp, BNX2X_ISCSI_ETH_CL_ID_IDX); |
9411 | cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID; | 10492 | cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID; |
9412 | 10493 | ||
9413 | if (NO_ISCSI_OOO(bp)) | 10494 | if (NO_ISCSI_OOO(bp)) |
diff --git a/drivers/net/bnx2x/bnx2x_reg.h b/drivers/net/bnx2x/bnx2x_reg.h index 18ac2ab08f3d..9868cb0270a4 100644 --- a/drivers/net/bnx2x/bnx2x_reg.h +++ b/drivers/net/bnx2x/bnx2x_reg.h | |||
@@ -422,6 +422,7 @@ | |||
422 | #define CFC_REG_NUM_LCIDS_ALLOC 0x104020 | 422 | #define CFC_REG_NUM_LCIDS_ALLOC 0x104020 |
423 | /* [R 9] Number of Arriving LCIDs in Link List Block */ | 423 | /* [R 9] Number of Arriving LCIDs in Link List Block */ |
424 | #define CFC_REG_NUM_LCIDS_ARRIVING 0x104004 | 424 | #define CFC_REG_NUM_LCIDS_ARRIVING 0x104004 |
425 | #define CFC_REG_NUM_LCIDS_INSIDE_PF 0x104120 | ||
425 | /* [R 9] Number of Leaving LCIDs in Link List Block */ | 426 | /* [R 9] Number of Leaving LCIDs in Link List Block */ |
426 | #define CFC_REG_NUM_LCIDS_LEAVING 0x104018 | 427 | #define CFC_REG_NUM_LCIDS_LEAVING 0x104018 |
427 | #define CFC_REG_WEAK_ENABLE_PF 0x104124 | 428 | #define CFC_REG_WEAK_ENABLE_PF 0x104124 |
@@ -783,6 +784,7 @@ | |||
783 | /* [RW 3] The number of simultaneous outstanding requests to Context Fetch | 784 | /* [RW 3] The number of simultaneous outstanding requests to Context Fetch |
784 | Interface. */ | 785 | Interface. */ |
785 | #define DORQ_REG_OUTST_REQ 0x17003c | 786 | #define DORQ_REG_OUTST_REQ 0x17003c |
787 | #define DORQ_REG_PF_USAGE_CNT 0x1701d0 | ||
786 | #define DORQ_REG_REGN 0x170038 | 788 | #define DORQ_REG_REGN 0x170038 |
787 | /* [R 4] Current value of response A counter credit. Initial credit is | 789 | /* [R 4] Current value of response A counter credit. Initial credit is |
788 | configured through write to ~dorq_registers_rsp_init_crd.rsp_init_crd | 790 | configured through write to ~dorq_registers_rsp_init_crd.rsp_init_crd |
@@ -1645,6 +1647,17 @@ | |||
1645 | #define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN (0x1<<4) | 1647 | #define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN (0x1<<4) |
1646 | #define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST (0x1<<2) | 1648 | #define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST (0x1<<2) |
1647 | #define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN (0x1<<3) | 1649 | #define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN (0x1<<3) |
1650 | /* [RW 5] MDIO PHY Address. The WC uses this address to determine whether or | ||
1651 | * not it is the recipient of the message on the MDIO interface. The value | ||
1652 | * is compared to the value on ctrl_md_devad. Drives output | ||
1653 | * misc_xgxs0_phy_addr. Global register. */ | ||
1654 | #define MISC_REG_WC0_CTRL_PHY_ADDR 0xa9cc | ||
1655 | /* [RW 32] 1 [47] Packet Size = 64 Write to this register write bits 31:0. | ||
1656 | * Reads from this register will clear bits 31:0. */ | ||
1657 | #define MSTAT_REG_RX_STAT_GR64_LO 0x200 | ||
1658 | /* [RW 32] 1 [00] Tx Good Packet Count Write to this register write bits | ||
1659 | * 31:0. Reads from this register will clear bits 31:0. */ | ||
1660 | #define MSTAT_REG_TX_STAT_GTXPOK_LO 0 | ||
1648 | #define NIG_LLH0_XCM_MASK_REG_LLH0_XCM_MASK_BCN (0x1<<0) | 1661 | #define NIG_LLH0_XCM_MASK_REG_LLH0_XCM_MASK_BCN (0x1<<0) |
1649 | #define NIG_LLH1_XCM_MASK_REG_LLH1_XCM_MASK_BCN (0x1<<0) | 1662 | #define NIG_LLH1_XCM_MASK_REG_LLH1_XCM_MASK_BCN (0x1<<0) |
1650 | #define NIG_MASK_INTERRUPT_PORT0_REG_MASK_EMAC0_MISC_MI_INT (0x1<<0) | 1663 | #define NIG_MASK_INTERRUPT_PORT0_REG_MASK_EMAC0_MISC_MI_INT (0x1<<0) |
@@ -1838,6 +1851,10 @@ | |||
1838 | #define NIG_REG_LLH1_FUNC_MEM 0x161c0 | 1851 | #define NIG_REG_LLH1_FUNC_MEM 0x161c0 |
1839 | #define NIG_REG_LLH1_FUNC_MEM_ENABLE 0x16160 | 1852 | #define NIG_REG_LLH1_FUNC_MEM_ENABLE 0x16160 |
1840 | #define NIG_REG_LLH1_FUNC_MEM_SIZE 16 | 1853 | #define NIG_REG_LLH1_FUNC_MEM_SIZE 16 |
1854 | /* [RW 1] When this bit is set; the LLH will classify the packet before | ||
1855 | * sending it to the BRB or calculating WoL on it. This bit controls port 1 | ||
1856 | * only. The legacy llh_multi_function_mode bit controls port 0. */ | ||
1857 | #define NIG_REG_LLH1_MF_MODE 0x18614 | ||
1841 | /* [RW 8] init credit counter for port1 in LLH */ | 1858 | /* [RW 8] init credit counter for port1 in LLH */ |
1842 | #define NIG_REG_LLH1_XCM_INIT_CREDIT 0x10564 | 1859 | #define NIG_REG_LLH1_XCM_INIT_CREDIT 0x10564 |
1843 | #define NIG_REG_LLH1_XCM_MASK 0x10134 | 1860 | #define NIG_REG_LLH1_XCM_MASK 0x10134 |
@@ -1889,6 +1906,26 @@ | |||
1889 | * than one bit may be set; allowing multiple priorities to be mapped to one | 1906 | * than one bit may be set; allowing multiple priorities to be mapped to one |
1890 | * COS. */ | 1907 | * COS. */ |
1891 | #define NIG_REG_P0_RX_COS1_PRIORITY_MASK 0x1805c | 1908 | #define NIG_REG_P0_RX_COS1_PRIORITY_MASK 0x1805c |
1909 | /* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 2. A | ||
1910 | * priority is mapped to COS 2 when the corresponding mask bit is 1. More | ||
1911 | * than one bit may be set; allowing multiple priorities to be mapped to one | ||
1912 | * COS. */ | ||
1913 | #define NIG_REG_P0_RX_COS2_PRIORITY_MASK 0x186b0 | ||
1914 | /* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 3. A | ||
1915 | * priority is mapped to COS 3 when the corresponding mask bit is 1. More | ||
1916 | * than one bit may be set; allowing multiple priorities to be mapped to one | ||
1917 | * COS. */ | ||
1918 | #define NIG_REG_P0_RX_COS3_PRIORITY_MASK 0x186b4 | ||
1919 | /* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 4. A | ||
1920 | * priority is mapped to COS 4 when the corresponding mask bit is 1. More | ||
1921 | * than one bit may be set; allowing multiple priorities to be mapped to one | ||
1922 | * COS. */ | ||
1923 | #define NIG_REG_P0_RX_COS4_PRIORITY_MASK 0x186b8 | ||
1924 | /* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 5. A | ||
1925 | * priority is mapped to COS 5 when the corresponding mask bit is 1. More | ||
1926 | * than one bit may be set; allowing multiple priorities to be mapped to one | ||
1927 | * COS. */ | ||
1928 | #define NIG_REG_P0_RX_COS5_PRIORITY_MASK 0x186bc | ||
1892 | /* [RW 15] Specify which of the credit registers the client is to be mapped | 1929 | /* [RW 15] Specify which of the credit registers the client is to be mapped |
1893 | * to. Bits[2:0] are for client 0; bits [14:12] are for client 4. For | 1930 | * to. Bits[2:0] are for client 0; bits [14:12] are for client 4. For |
1894 | * clients that are not subject to WFQ credit blocking - their | 1931 | * clients that are not subject to WFQ credit blocking - their |
@@ -1926,6 +1963,9 @@ | |||
1926 | * for management at priority 0; debug traffic at priorities 1 and 2; COS0 | 1963 | * for management at priority 0; debug traffic at priorities 1 and 2; COS0 |
1927 | * traffic at priority 3; and COS1 traffic at priority 4. */ | 1964 | * traffic at priority 3; and COS1 traffic at priority 4. */ |
1928 | #define NIG_REG_P0_TX_ARB_PRIORITY_CLIENT 0x180e4 | 1965 | #define NIG_REG_P0_TX_ARB_PRIORITY_CLIENT 0x180e4 |
1966 | /* [RW 6] Bit-map indicating which L2 hdrs may appear after the basic | ||
1967 | * Ethernet header. */ | ||
1968 | #define NIG_REG_P1_HDRS_AFTER_BASIC 0x1818c | ||
1929 | #define NIG_REG_P1_LLH_FUNC_MEM2 0x184c0 | 1969 | #define NIG_REG_P1_LLH_FUNC_MEM2 0x184c0 |
1930 | #define NIG_REG_P1_LLH_FUNC_MEM2_ENABLE 0x18460 | 1970 | #define NIG_REG_P1_LLH_FUNC_MEM2_ENABLE 0x18460 |
1931 | /* [RW 32] Eight 4-bit configurations for specifying which COS (0-15 for | 1971 | /* [RW 32] Eight 4-bit configurations for specifying which COS (0-15 for |
@@ -1944,6 +1984,11 @@ | |||
1944 | * than one bit may be set; allowing multiple priorities to be mapped to one | 1984 | * than one bit may be set; allowing multiple priorities to be mapped to one |
1945 | * COS. */ | 1985 | * COS. */ |
1946 | #define NIG_REG_P1_RX_COS1_PRIORITY_MASK 0x181b0 | 1986 | #define NIG_REG_P1_RX_COS1_PRIORITY_MASK 0x181b0 |
1987 | /* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 2. A | ||
1988 | * priority is mapped to COS 2 when the corresponding mask bit is 1. More | ||
1989 | * than one bit may be set; allowing multiple priorities to be mapped to one | ||
1990 | * COS. */ | ||
1991 | #define NIG_REG_P1_RX_COS2_PRIORITY_MASK 0x186f8 | ||
1947 | /* [RW 1] Pause enable for port0. This register may get 1 only when | 1992 | /* [RW 1] Pause enable for port0. This register may get 1 only when |
1948 | ~safc_enable.safc_enable = 0 and ppp_enable.ppp_enable =0 for the same | 1993 | ~safc_enable.safc_enable = 0 and ppp_enable.ppp_enable =0 for the same |
1949 | port */ | 1994 | port */ |
@@ -2033,6 +2078,15 @@ | |||
2033 | #define PBF_REG_COS1_UPPER_BOUND 0x15c060 | 2078 | #define PBF_REG_COS1_UPPER_BOUND 0x15c060 |
2034 | /* [RW 31] The weight of COS1 in the ETS command arbiter. */ | 2079 | /* [RW 31] The weight of COS1 in the ETS command arbiter. */ |
2035 | #define PBF_REG_COS1_WEIGHT 0x15c058 | 2080 | #define PBF_REG_COS1_WEIGHT 0x15c058 |
2081 | /* [R 11] Current credit for the LB queue in the tx port buffers in 16 byte | ||
2082 | * lines. */ | ||
2083 | #define PBF_REG_CREDIT_LB_Q 0x140338 | ||
2084 | /* [R 11] Current credit for queue 0 in the tx port buffers in 16 byte | ||
2085 | * lines. */ | ||
2086 | #define PBF_REG_CREDIT_Q0 0x14033c | ||
2087 | /* [R 11] Current credit for queue 1 in the tx port buffers in 16 byte | ||
2088 | * lines. */ | ||
2089 | #define PBF_REG_CREDIT_Q1 0x140340 | ||
2036 | /* [RW 1] Disable processing further tasks from port 0 (after ending the | 2090 | /* [RW 1] Disable processing further tasks from port 0 (after ending the |
2037 | current task in process). */ | 2091 | current task in process). */ |
2038 | #define PBF_REG_DISABLE_NEW_TASK_PROC_P0 0x14005c | 2092 | #define PBF_REG_DISABLE_NEW_TASK_PROC_P0 0x14005c |
@@ -2050,14 +2104,25 @@ | |||
2050 | /* [RW 6] Bit-map indicating which L2 hdrs may appear after the basic | 2104 | /* [RW 6] Bit-map indicating which L2 hdrs may appear after the basic |
2051 | * Ethernet header. */ | 2105 | * Ethernet header. */ |
2052 | #define PBF_REG_HDRS_AFTER_BASIC 0x15c0a8 | 2106 | #define PBF_REG_HDRS_AFTER_BASIC 0x15c0a8 |
2053 | /* [RW 1] Indicates which COS is conncted to the highest priority in the | 2107 | /* [RW 6] Bit-map indicating which L2 hdrs may appear after L2 tag 0 */ |
2054 | * command arbiter. */ | 2108 | #define PBF_REG_HDRS_AFTER_TAG_0 0x15c0b8 |
2109 | /* [R 1] Removed for E3 B0 - Indicates which COS is conncted to the highest | ||
2110 | * priority in the command arbiter. */ | ||
2055 | #define PBF_REG_HIGH_PRIORITY_COS_NUM 0x15c04c | 2111 | #define PBF_REG_HIGH_PRIORITY_COS_NUM 0x15c04c |
2056 | #define PBF_REG_IF_ENABLE_REG 0x140044 | 2112 | #define PBF_REG_IF_ENABLE_REG 0x140044 |
2057 | /* [RW 1] Init bit. When set the initial credits are copied to the credit | 2113 | /* [RW 1] Init bit. When set the initial credits are copied to the credit |
2058 | registers (except the port credits). Should be set and then reset after | 2114 | registers (except the port credits). Should be set and then reset after |
2059 | the configuration of the block has ended. */ | 2115 | the configuration of the block has ended. */ |
2060 | #define PBF_REG_INIT 0x140000 | 2116 | #define PBF_REG_INIT 0x140000 |
2117 | /* [RW 11] Initial credit for the LB queue in the tx port buffers in 16 byte | ||
2118 | * lines. */ | ||
2119 | #define PBF_REG_INIT_CRD_LB_Q 0x15c248 | ||
2120 | /* [RW 11] Initial credit for queue 0 in the tx port buffers in 16 byte | ||
2121 | * lines. */ | ||
2122 | #define PBF_REG_INIT_CRD_Q0 0x15c230 | ||
2123 | /* [RW 11] Initial credit for queue 1 in the tx port buffers in 16 byte | ||
2124 | * lines. */ | ||
2125 | #define PBF_REG_INIT_CRD_Q1 0x15c234 | ||
2061 | /* [RW 1] Init bit for port 0. When set the initial credit of port 0 is | 2126 | /* [RW 1] Init bit for port 0. When set the initial credit of port 0 is |
2062 | copied to the credit register. Should be set and then reset after the | 2127 | copied to the credit register. Should be set and then reset after the |
2063 | configuration of the port has ended. */ | 2128 | configuration of the port has ended. */ |
@@ -2070,6 +2135,15 @@ | |||
2070 | copied to the credit register. Should be set and then reset after the | 2135 | copied to the credit register. Should be set and then reset after the |
2071 | configuration of the port has ended. */ | 2136 | configuration of the port has ended. */ |
2072 | #define PBF_REG_INIT_P4 0x14000c | 2137 | #define PBF_REG_INIT_P4 0x14000c |
2138 | /* [R 32] Cyclic counter for the amount credits in 16 bytes lines added for | ||
2139 | * the LB queue. Reset upon init. */ | ||
2140 | #define PBF_REG_INTERNAL_CRD_FREED_CNT_LB_Q 0x140354 | ||
2141 | /* [R 32] Cyclic counter for the amount credits in 16 bytes lines added for | ||
2142 | * queue 0. Reset upon init. */ | ||
2143 | #define PBF_REG_INTERNAL_CRD_FREED_CNT_Q0 0x140358 | ||
2144 | /* [R 32] Cyclic counter for the amount credits in 16 bytes lines added for | ||
2145 | * queue 1. Reset upon init. */ | ||
2146 | #define PBF_REG_INTERNAL_CRD_FREED_CNT_Q1 0x14035c | ||
2073 | /* [RW 1] Enable for mac interface 0. */ | 2147 | /* [RW 1] Enable for mac interface 0. */ |
2074 | #define PBF_REG_MAC_IF0_ENABLE 0x140030 | 2148 | #define PBF_REG_MAC_IF0_ENABLE 0x140030 |
2075 | /* [RW 1] Enable for mac interface 1. */ | 2149 | /* [RW 1] Enable for mac interface 1. */ |
@@ -2090,24 +2164,49 @@ | |||
2090 | /* [RW 11] Initial credit for port 0 in the tx port buffers in 16 byte | 2164 | /* [RW 11] Initial credit for port 0 in the tx port buffers in 16 byte |
2091 | lines. */ | 2165 | lines. */ |
2092 | #define PBF_REG_P0_INIT_CRD 0x1400d0 | 2166 | #define PBF_REG_P0_INIT_CRD 0x1400d0 |
2093 | /* [RW 1] Indication that pause is enabled for port 0. */ | 2167 | /* [R 32] Cyclic counter for the amount credits in 16 bytes lines added for |
2094 | #define PBF_REG_P0_PAUSE_ENABLE 0x140014 | 2168 | * port 0. Reset upon init. */ |
2095 | /* [R 8] Number of tasks in port 0 task queue. */ | 2169 | #define PBF_REG_P0_INTERNAL_CRD_FREED_CNT 0x140308 |
2170 | /* [R 1] Removed for E3 B0 - Indication that pause is enabled for port 0. */ | ||
2171 | #define PBF_REG_P0_PAUSE_ENABLE 0x140014 | ||
2172 | /* [R 8] Removed for E3 B0 - Number of tasks in port 0 task queue. */ | ||
2096 | #define PBF_REG_P0_TASK_CNT 0x140204 | 2173 | #define PBF_REG_P0_TASK_CNT 0x140204 |
2097 | /* [R 11] Current credit for port 1 in the tx port buffers in 16 byte lines. */ | 2174 | /* [R 32] Removed for E3 B0 - Cyclic counter for number of 8 byte lines |
2175 | * freed from the task queue of port 0. Reset upon init. */ | ||
2176 | #define PBF_REG_P0_TQ_LINES_FREED_CNT 0x1402f0 | ||
2177 | /* [R 12] Number of 8 bytes lines occupied in the task queue of port 0. */ | ||
2178 | #define PBF_REG_P0_TQ_OCCUPANCY 0x1402fc | ||
2179 | /* [R 11] Removed for E3 B0 - Current credit for port 1 in the tx port | ||
2180 | * buffers in 16 byte lines. */ | ||
2098 | #define PBF_REG_P1_CREDIT 0x140208 | 2181 | #define PBF_REG_P1_CREDIT 0x140208 |
2099 | /* [RW 11] Initial credit for port 1 in the tx port buffers in 16 byte | 2182 | /* [R 11] Removed for E3 B0 - Initial credit for port 0 in the tx port |
2100 | lines. */ | 2183 | * buffers in 16 byte lines. */ |
2101 | #define PBF_REG_P1_INIT_CRD 0x1400d4 | 2184 | #define PBF_REG_P1_INIT_CRD 0x1400d4 |
2102 | /* [R 8] Number of tasks in port 1 task queue. */ | 2185 | /* [R 32] Cyclic counter for the amount credits in 16 bytes lines added for |
2186 | * port 1. Reset upon init. */ | ||
2187 | #define PBF_REG_P1_INTERNAL_CRD_FREED_CNT 0x14030c | ||
2188 | /* [R 8] Removed for E3 B0 - Number of tasks in port 1 task queue. */ | ||
2103 | #define PBF_REG_P1_TASK_CNT 0x14020c | 2189 | #define PBF_REG_P1_TASK_CNT 0x14020c |
2190 | /* [R 32] Removed for E3 B0 - Cyclic counter for number of 8 byte lines | ||
2191 | * freed from the task queue of port 1. Reset upon init. */ | ||
2192 | #define PBF_REG_P1_TQ_LINES_FREED_CNT 0x1402f4 | ||
2193 | /* [R 12] Number of 8 bytes lines occupied in the task queue of port 1. */ | ||
2194 | #define PBF_REG_P1_TQ_OCCUPANCY 0x140300 | ||
2104 | /* [R 11] Current credit for port 4 in the tx port buffers in 16 byte lines. */ | 2195 | /* [R 11] Current credit for port 4 in the tx port buffers in 16 byte lines. */ |
2105 | #define PBF_REG_P4_CREDIT 0x140210 | 2196 | #define PBF_REG_P4_CREDIT 0x140210 |
2106 | /* [RW 11] Initial credit for port 4 in the tx port buffers in 16 byte | 2197 | /* [RW 11] Initial credit for port 4 in the tx port buffers in 16 byte |
2107 | lines. */ | 2198 | lines. */ |
2108 | #define PBF_REG_P4_INIT_CRD 0x1400e0 | 2199 | #define PBF_REG_P4_INIT_CRD 0x1400e0 |
2109 | /* [R 8] Number of tasks in port 4 task queue. */ | 2200 | /* [R 32] Cyclic counter for the amount credits in 16 bytes lines added for |
2201 | * port 4. Reset upon init. */ | ||
2202 | #define PBF_REG_P4_INTERNAL_CRD_FREED_CNT 0x140310 | ||
2203 | /* [R 8] Removed for E3 B0 - Number of tasks in port 4 task queue. */ | ||
2110 | #define PBF_REG_P4_TASK_CNT 0x140214 | 2204 | #define PBF_REG_P4_TASK_CNT 0x140214 |
2205 | /* [R 32] Removed for E3 B0 - Cyclic counter for number of 8 byte lines | ||
2206 | * freed from the task queue of port 4. Reset upon init. */ | ||
2207 | #define PBF_REG_P4_TQ_LINES_FREED_CNT 0x1402f8 | ||
2208 | /* [R 12] Number of 8 bytes lines occupied in the task queue of port 4. */ | ||
2209 | #define PBF_REG_P4_TQ_OCCUPANCY 0x140304 | ||
2111 | /* [RW 5] Interrupt mask register #0 read/write */ | 2210 | /* [RW 5] Interrupt mask register #0 read/write */ |
2112 | #define PBF_REG_PBF_INT_MASK 0x1401d4 | 2211 | #define PBF_REG_PBF_INT_MASK 0x1401d4 |
2113 | /* [R 5] Interrupt register #0 read */ | 2212 | /* [R 5] Interrupt register #0 read */ |
@@ -2116,6 +2215,27 @@ | |||
2116 | #define PBF_REG_PBF_PRTY_MASK 0x1401e4 | 2215 | #define PBF_REG_PBF_PRTY_MASK 0x1401e4 |
2117 | /* [RC 20] Parity register #0 read clear */ | 2216 | /* [RC 20] Parity register #0 read clear */ |
2118 | #define PBF_REG_PBF_PRTY_STS_CLR 0x1401dc | 2217 | #define PBF_REG_PBF_PRTY_STS_CLR 0x1401dc |
2218 | /* [RW 16] The Ethernet type value for L2 tag 0 */ | ||
2219 | #define PBF_REG_TAG_ETHERTYPE_0 0x15c090 | ||
2220 | /* [RW 4] The length of the info field for L2 tag 0. The length is between | ||
2221 | * 2B and 14B; in 2B granularity */ | ||
2222 | #define PBF_REG_TAG_LEN_0 0x15c09c | ||
2223 | /* [R 32] Cyclic counter for number of 8 byte lines freed from the LB task | ||
2224 | * queue. Reset upon init. */ | ||
2225 | #define PBF_REG_TQ_LINES_FREED_CNT_LB_Q 0x14038c | ||
2226 | /* [R 32] Cyclic counter for number of 8 byte lines freed from the task | ||
2227 | * queue 0. Reset upon init. */ | ||
2228 | #define PBF_REG_TQ_LINES_FREED_CNT_Q0 0x140390 | ||
2229 | /* [R 32] Cyclic counter for number of 8 byte lines freed from task queue 1. | ||
2230 | * Reset upon init. */ | ||
2231 | #define PBF_REG_TQ_LINES_FREED_CNT_Q1 0x140394 | ||
2232 | /* [R 13] Number of 8 bytes lines occupied in the task queue of the LB | ||
2233 | * queue. */ | ||
2234 | #define PBF_REG_TQ_OCCUPANCY_LB_Q 0x1403a8 | ||
2235 | /* [R 13] Number of 8 bytes lines occupied in the task queue of queue 0. */ | ||
2236 | #define PBF_REG_TQ_OCCUPANCY_Q0 0x1403ac | ||
2237 | /* [R 13] Number of 8 bytes lines occupied in the task queue of queue 1. */ | ||
2238 | #define PBF_REG_TQ_OCCUPANCY_Q1 0x1403b0 | ||
2119 | #define PB_REG_CONTROL 0 | 2239 | #define PB_REG_CONTROL 0 |
2120 | /* [RW 2] Interrupt mask register #0 read/write */ | 2240 | /* [RW 2] Interrupt mask register #0 read/write */ |
2121 | #define PB_REG_PB_INT_MASK 0x28 | 2241 | #define PB_REG_PB_INT_MASK 0x28 |
@@ -2445,10 +2565,24 @@ | |||
2445 | /* [RW 6] Bit-map indicating which L2 hdrs may appear after the basic | 2565 | /* [RW 6] Bit-map indicating which L2 hdrs may appear after the basic |
2446 | * Ethernet header. */ | 2566 | * Ethernet header. */ |
2447 | #define PRS_REG_HDRS_AFTER_BASIC 0x40238 | 2567 | #define PRS_REG_HDRS_AFTER_BASIC 0x40238 |
2568 | /* [RW 6] Bit-map indicating which L2 hdrs may appear after the basic | ||
2569 | * Ethernet header for port 0 packets. */ | ||
2570 | #define PRS_REG_HDRS_AFTER_BASIC_PORT_0 0x40270 | ||
2571 | #define PRS_REG_HDRS_AFTER_BASIC_PORT_1 0x40290 | ||
2572 | /* [R 6] Bit-map indicating which L2 hdrs may appear after L2 tag 0 */ | ||
2573 | #define PRS_REG_HDRS_AFTER_TAG_0 0x40248 | ||
2574 | /* [RW 6] Bit-map indicating which L2 hdrs may appear after L2 tag 0 for | ||
2575 | * port 0 packets */ | ||
2576 | #define PRS_REG_HDRS_AFTER_TAG_0_PORT_0 0x40280 | ||
2577 | #define PRS_REG_HDRS_AFTER_TAG_0_PORT_1 0x402a0 | ||
2448 | /* [RW 4] The increment value to send in the CFC load request message */ | 2578 | /* [RW 4] The increment value to send in the CFC load request message */ |
2449 | #define PRS_REG_INC_VALUE 0x40048 | 2579 | #define PRS_REG_INC_VALUE 0x40048 |
2450 | /* [RW 6] Bit-map indicating which headers must appear in the packet */ | 2580 | /* [RW 6] Bit-map indicating which headers must appear in the packet */ |
2451 | #define PRS_REG_MUST_HAVE_HDRS 0x40254 | 2581 | #define PRS_REG_MUST_HAVE_HDRS 0x40254 |
2582 | /* [RW 6] Bit-map indicating which headers must appear in the packet for | ||
2583 | * port 0 packets */ | ||
2584 | #define PRS_REG_MUST_HAVE_HDRS_PORT_0 0x4028c | ||
2585 | #define PRS_REG_MUST_HAVE_HDRS_PORT_1 0x402ac | ||
2452 | #define PRS_REG_NIC_MODE 0x40138 | 2586 | #define PRS_REG_NIC_MODE 0x40138 |
2453 | /* [RW 8] The 8-bit event ID for cases where there is no match on the | 2587 | /* [RW 8] The 8-bit event ID for cases where there is no match on the |
2454 | connection. Used in packet start message to TCM. */ | 2588 | connection. Used in packet start message to TCM. */ |
@@ -2497,6 +2631,11 @@ | |||
2497 | #define PRS_REG_SERIAL_NUM_STATUS_MSB 0x40158 | 2631 | #define PRS_REG_SERIAL_NUM_STATUS_MSB 0x40158 |
2498 | /* [R 4] debug only: SRC current credit. Transaction based. */ | 2632 | /* [R 4] debug only: SRC current credit. Transaction based. */ |
2499 | #define PRS_REG_SRC_CURRENT_CREDIT 0x4016c | 2633 | #define PRS_REG_SRC_CURRENT_CREDIT 0x4016c |
2634 | /* [RW 16] The Ethernet type value for L2 tag 0 */ | ||
2635 | #define PRS_REG_TAG_ETHERTYPE_0 0x401d4 | ||
2636 | /* [RW 4] The length of the info field for L2 tag 0. The length is between | ||
2637 | * 2B and 14B; in 2B granularity */ | ||
2638 | #define PRS_REG_TAG_LEN_0 0x4022c | ||
2500 | /* [R 8] debug only: TCM current credit. Cycle based. */ | 2639 | /* [R 8] debug only: TCM current credit. Cycle based. */ |
2501 | #define PRS_REG_TCM_CURRENT_CREDIT 0x40160 | 2640 | #define PRS_REG_TCM_CURRENT_CREDIT 0x40160 |
2502 | /* [R 8] debug only: TSDM current credit. Transaction based. */ | 2641 | /* [R 8] debug only: TSDM current credit. Transaction based. */ |
@@ -3081,6 +3220,7 @@ | |||
3081 | #define QM_REG_BYTECREDITAFULLTHR 0x168094 | 3220 | #define QM_REG_BYTECREDITAFULLTHR 0x168094 |
3082 | /* [RW 4] The initial credit for interface */ | 3221 | /* [RW 4] The initial credit for interface */ |
3083 | #define QM_REG_CMINITCRD_0 0x1680cc | 3222 | #define QM_REG_CMINITCRD_0 0x1680cc |
3223 | #define QM_REG_BYTECRDCMDQ_0 0x16e6e8 | ||
3084 | #define QM_REG_CMINITCRD_1 0x1680d0 | 3224 | #define QM_REG_CMINITCRD_1 0x1680d0 |
3085 | #define QM_REG_CMINITCRD_2 0x1680d4 | 3225 | #define QM_REG_CMINITCRD_2 0x1680d4 |
3086 | #define QM_REG_CMINITCRD_3 0x1680d8 | 3226 | #define QM_REG_CMINITCRD_3 0x1680d8 |
@@ -3171,7 +3311,10 @@ | |||
3171 | /* [RW 2] The PCI attributes field used in the PCI request. */ | 3311 | /* [RW 2] The PCI attributes field used in the PCI request. */ |
3172 | #define QM_REG_PCIREQAT 0x168054 | 3312 | #define QM_REG_PCIREQAT 0x168054 |
3173 | #define QM_REG_PF_EN 0x16e70c | 3313 | #define QM_REG_PF_EN 0x16e70c |
3174 | /* [R 16] The byte credit of port 0 */ | 3314 | /* [R 24] The number of tasks stored in the QM for the PF. only even |
3315 | * functions are valid in E2 (odd I registers will be hard wired to 0) */ | ||
3316 | #define QM_REG_PF_USG_CNT_0 0x16e040 | ||
3317 | /* [R 16] NOT USED */ | ||
3175 | #define QM_REG_PORT0BYTECRD 0x168300 | 3318 | #define QM_REG_PORT0BYTECRD 0x168300 |
3176 | /* [R 16] The byte credit of port 1 */ | 3319 | /* [R 16] The byte credit of port 1 */ |
3177 | #define QM_REG_PORT1BYTECRD 0x168304 | 3320 | #define QM_REG_PORT1BYTECRD 0x168304 |
@@ -3783,6 +3926,8 @@ | |||
3783 | #define TM_REG_LIN0_LOGIC_ADDR 0x164240 | 3926 | #define TM_REG_LIN0_LOGIC_ADDR 0x164240 |
3784 | /* [RW 18] Linear0 Max active cid (in banks of 32 entries). */ | 3927 | /* [RW 18] Linear0 Max active cid (in banks of 32 entries). */ |
3785 | #define TM_REG_LIN0_MAX_ACTIVE_CID 0x164048 | 3928 | #define TM_REG_LIN0_MAX_ACTIVE_CID 0x164048 |
3929 | /* [ST 16] Linear0 Number of scans counter. */ | ||
3930 | #define TM_REG_LIN0_NUM_SCANS 0x1640a0 | ||
3786 | /* [WB 64] Linear0 phy address. */ | 3931 | /* [WB 64] Linear0 phy address. */ |
3787 | #define TM_REG_LIN0_PHY_ADDR 0x164270 | 3932 | #define TM_REG_LIN0_PHY_ADDR 0x164270 |
3788 | /* [RW 1] Linear0 physical address valid. */ | 3933 | /* [RW 1] Linear0 physical address valid. */ |
@@ -3790,6 +3935,7 @@ | |||
3790 | #define TM_REG_LIN0_SCAN_ON 0x1640d0 | 3935 | #define TM_REG_LIN0_SCAN_ON 0x1640d0 |
3791 | /* [RW 24] Linear0 array scan timeout. */ | 3936 | /* [RW 24] Linear0 array scan timeout. */ |
3792 | #define TM_REG_LIN0_SCAN_TIME 0x16403c | 3937 | #define TM_REG_LIN0_SCAN_TIME 0x16403c |
3938 | #define TM_REG_LIN0_VNIC_UC 0x164128 | ||
3793 | /* [RW 32] Linear1 logic address. */ | 3939 | /* [RW 32] Linear1 logic address. */ |
3794 | #define TM_REG_LIN1_LOGIC_ADDR 0x164250 | 3940 | #define TM_REG_LIN1_LOGIC_ADDR 0x164250 |
3795 | /* [WB 64] Linear1 phy address. */ | 3941 | /* [WB 64] Linear1 phy address. */ |
@@ -4845,8 +4991,10 @@ | |||
4845 | #define XSDM_REG_NUM_OF_Q8_CMD 0x166264 | 4991 | #define XSDM_REG_NUM_OF_Q8_CMD 0x166264 |
4846 | /* [ST 32] The number of commands received in queue 9 */ | 4992 | /* [ST 32] The number of commands received in queue 9 */ |
4847 | #define XSDM_REG_NUM_OF_Q9_CMD 0x166268 | 4993 | #define XSDM_REG_NUM_OF_Q9_CMD 0x166268 |
4848 | /* [RW 13] The start address in the internal RAM for queue counters */ | 4994 | /* [W 17] Generate an operation after completion; bit-16 is |
4849 | #define XSDM_REG_Q_COUNTER_START_ADDR 0x166010 | 4995 | * AggVectIdx_valid; bits 15:8 are AggVectIdx; bits 7:5 are the TRIG and |
4996 | * bits 4:0 are the T124Param[4:0] */ | ||
4997 | #define XSDM_REG_OPERATION_GEN 0x1664c4 | ||
4850 | /* [R 1] pxp_ctrl rd_data fifo empty in sdm_dma_rsp block */ | 4998 | /* [R 1] pxp_ctrl rd_data fifo empty in sdm_dma_rsp block */ |
4851 | #define XSDM_REG_RSP_PXP_CTRL_RDATA_EMPTY 0x166548 | 4999 | #define XSDM_REG_RSP_PXP_CTRL_RDATA_EMPTY 0x166548 |
4852 | /* [R 1] parser fifo empty in sdm_sync block */ | 5000 | /* [R 1] parser fifo empty in sdm_sync block */ |
@@ -5129,6 +5277,8 @@ | |||
5129 | #define MISC_REGISTERS_RESET_REG_1_RST_PXPV (0x1<<27) | 5277 | #define MISC_REGISTERS_RESET_REG_1_RST_PXPV (0x1<<27) |
5130 | #define MISC_REGISTERS_RESET_REG_1_SET 0x584 | 5278 | #define MISC_REGISTERS_RESET_REG_1_SET 0x584 |
5131 | #define MISC_REGISTERS_RESET_REG_2_CLEAR 0x598 | 5279 | #define MISC_REGISTERS_RESET_REG_2_CLEAR 0x598 |
5280 | #define MISC_REGISTERS_RESET_REG_2_MSTAT0 (0x1<<24) | ||
5281 | #define MISC_REGISTERS_RESET_REG_2_MSTAT1 (0x1<<25) | ||
5132 | #define MISC_REGISTERS_RESET_REG_2_RST_BMAC0 (0x1<<0) | 5282 | #define MISC_REGISTERS_RESET_REG_2_RST_BMAC0 (0x1<<0) |
5133 | #define MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE (0x1<<14) | 5283 | #define MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE (0x1<<14) |
5134 | #define MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE (0x1<<15) | 5284 | #define MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE (0x1<<15) |
@@ -5161,6 +5311,7 @@ | |||
5161 | #define MISC_REGISTERS_SPIO_OUTPUT_HIGH 1 | 5311 | #define MISC_REGISTERS_SPIO_OUTPUT_HIGH 1 |
5162 | #define MISC_REGISTERS_SPIO_OUTPUT_LOW 0 | 5312 | #define MISC_REGISTERS_SPIO_OUTPUT_LOW 0 |
5163 | #define MISC_REGISTERS_SPIO_SET_POS 8 | 5313 | #define MISC_REGISTERS_SPIO_SET_POS 8 |
5314 | #define HW_LOCK_DRV_FLAGS 10 | ||
5164 | #define HW_LOCK_MAX_RESOURCE_VALUE 31 | 5315 | #define HW_LOCK_MAX_RESOURCE_VALUE 31 |
5165 | #define HW_LOCK_RESOURCE_GPIO 1 | 5316 | #define HW_LOCK_RESOURCE_GPIO 1 |
5166 | #define HW_LOCK_RESOURCE_MDIO 0 | 5317 | #define HW_LOCK_RESOURCE_MDIO 0 |
@@ -5168,7 +5319,6 @@ | |||
5168 | #define HW_LOCK_RESOURCE_RESERVED_08 8 | 5319 | #define HW_LOCK_RESOURCE_RESERVED_08 8 |
5169 | #define HW_LOCK_RESOURCE_SPIO 2 | 5320 | #define HW_LOCK_RESOURCE_SPIO 2 |
5170 | #define HW_LOCK_RESOURCE_UNDI 5 | 5321 | #define HW_LOCK_RESOURCE_UNDI 5 |
5171 | #define PRS_FLAG_OVERETH_IPV4 1 | ||
5172 | #define AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT (0x1<<4) | 5322 | #define AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT (0x1<<4) |
5173 | #define AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR (0x1<<5) | 5323 | #define AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR (0x1<<5) |
5174 | #define AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR (1<<18) | 5324 | #define AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR (1<<18) |
@@ -5320,6 +5470,8 @@ | |||
5320 | #define GRCBASE_PXP2 0x120000 | 5470 | #define GRCBASE_PXP2 0x120000 |
5321 | #define GRCBASE_PBF 0x140000 | 5471 | #define GRCBASE_PBF 0x140000 |
5322 | #define GRCBASE_XPB 0x161000 | 5472 | #define GRCBASE_XPB 0x161000 |
5473 | #define GRCBASE_MSTAT0 0x162000 | ||
5474 | #define GRCBASE_MSTAT1 0x162800 | ||
5323 | #define GRCBASE_TIMERS 0x164000 | 5475 | #define GRCBASE_TIMERS 0x164000 |
5324 | #define GRCBASE_XSDM 0x166000 | 5476 | #define GRCBASE_XSDM 0x166000 |
5325 | #define GRCBASE_QM 0x168000 | 5477 | #define GRCBASE_QM 0x168000 |
@@ -6243,11 +6395,6 @@ Theotherbitsarereservedandshouldbezero*/ | |||
6243 | #define IGU_ADDR_MSI_ADDR_HI 0x0212 | 6395 | #define IGU_ADDR_MSI_ADDR_HI 0x0212 |
6244 | #define IGU_ADDR_MSI_DATA 0x0213 | 6396 | #define IGU_ADDR_MSI_DATA 0x0213 |
6245 | 6397 | ||
6246 | #define IGU_INT_ENABLE 0 | ||
6247 | #define IGU_INT_DISABLE 1 | ||
6248 | #define IGU_INT_NOP 2 | ||
6249 | #define IGU_INT_NOP2 3 | ||
6250 | |||
6251 | #define IGU_USE_REGISTER_ustorm_type_0_sb_cleanup 0 | 6398 | #define IGU_USE_REGISTER_ustorm_type_0_sb_cleanup 0 |
6252 | #define IGU_USE_REGISTER_ustorm_type_1_sb_cleanup 1 | 6399 | #define IGU_USE_REGISTER_ustorm_type_1_sb_cleanup 1 |
6253 | #define IGU_USE_REGISTER_cstorm_type_0_sb_cleanup 2 | 6400 | #define IGU_USE_REGISTER_cstorm_type_0_sb_cleanup 2 |
@@ -6318,15 +6465,6 @@ Theotherbitsarereservedandshouldbezero*/ | |||
6318 | #define IGU_BC_BASE_DSB_PROD 128 | 6465 | #define IGU_BC_BASE_DSB_PROD 128 |
6319 | #define IGU_NORM_BASE_DSB_PROD 136 | 6466 | #define IGU_NORM_BASE_DSB_PROD 136 |
6320 | 6467 | ||
6321 | #define IGU_CTRL_CMD_TYPE_WR\ | ||
6322 | 1 | ||
6323 | #define IGU_CTRL_CMD_TYPE_RD\ | ||
6324 | 0 | ||
6325 | |||
6326 | #define IGU_SEG_ACCESS_NORM 0 | ||
6327 | #define IGU_SEG_ACCESS_DEF 1 | ||
6328 | #define IGU_SEG_ACCESS_ATTN 2 | ||
6329 | |||
6330 | /* FID (if VF - [6] = 0; [5:0] = VF number; if PF - [6] = 1; \ | 6468 | /* FID (if VF - [6] = 0; [5:0] = VF number; if PF - [6] = 1; \ |
6331 | [5:2] = 0; [1:0] = PF number) */ | 6469 | [5:2] = 0; [1:0] = PF number) */ |
6332 | #define IGU_FID_ENCODE_IS_PF (0x1<<6) | 6470 | #define IGU_FID_ENCODE_IS_PF (0x1<<6) |
diff --git a/drivers/net/bnx2x/bnx2x_sp.c b/drivers/net/bnx2x/bnx2x_sp.c index 7c876a06b779..5bdf09459a08 100644 --- a/drivers/net/bnx2x/bnx2x_sp.c +++ b/drivers/net/bnx2x/bnx2x_sp.c | |||
@@ -1,3 +1,21 @@ | |||
1 | /* bnx2x_sp.c: Broadcom Everest network driver. | ||
2 | * | ||
3 | * Copyright 2011 Broadcom Corporation | ||
4 | * | ||
5 | * Unless you and Broadcom execute a separate written software license | ||
6 | * agreement governing use of this software, this software is licensed to you | ||
7 | * under the terms of the GNU General Public License version 2, available | ||
8 | * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL"). | ||
9 | * | ||
10 | * Notwithstanding the above, under no circumstances may you combine this | ||
11 | * software in any way with any other Broadcom software provided under a | ||
12 | * license other than the GPL, without Broadcom's express prior written | ||
13 | * consent. | ||
14 | * | ||
15 | * Maintained by: Eilon Greenstein <eilong@broadcom.com> | ||
16 | * Written by: Vladislav Zolotarov | ||
17 | * | ||
18 | */ | ||
1 | #include <linux/version.h> | 19 | #include <linux/version.h> |
2 | #include <linux/module.h> | 20 | #include <linux/module.h> |
3 | #include <linux/crc32.h> | 21 | #include <linux/crc32.h> |
@@ -8,691 +26,4277 @@ | |||
8 | #include "bnx2x_cmn.h" | 26 | #include "bnx2x_cmn.h" |
9 | #include "bnx2x_sp.h" | 27 | #include "bnx2x_sp.h" |
10 | 28 | ||
29 | #define BNX2X_MAX_EMUL_MULTI 16 | ||
30 | |||
31 | /**** Exe Queue interfaces ****/ | ||
11 | 32 | ||
12 | /** | 33 | /** |
13 | * bnx2x_set_mac_addr_gen - set a MAC in a CAM for a few L2 Clients for E1x chips | 34 | * bnx2x_exe_queue_init - init the Exe Queue object |
35 | * | ||
36 | * @o: poiter to the object | ||
37 | * @exe_len: length | ||
38 | * @owner: poiter to the owner | ||
39 | * @validate: validate function pointer | ||
40 | * @optimize: optimize function pointer | ||
41 | * @exec: execute function pointer | ||
42 | * @get: get function pointer | ||
43 | */ | ||
44 | static inline void bnx2x_exe_queue_init(struct bnx2x *bp, | ||
45 | struct bnx2x_exe_queue_obj *o, | ||
46 | int exe_len, | ||
47 | union bnx2x_qable_obj *owner, | ||
48 | exe_q_validate validate, | ||
49 | exe_q_optimize optimize, | ||
50 | exe_q_execute exec, | ||
51 | exe_q_get get) | ||
52 | { | ||
53 | memset(o, 0, sizeof(*o)); | ||
54 | |||
55 | INIT_LIST_HEAD(&o->exe_queue); | ||
56 | INIT_LIST_HEAD(&o->pending_comp); | ||
57 | |||
58 | spin_lock_init(&o->lock); | ||
59 | |||
60 | o->exe_chunk_len = exe_len; | ||
61 | o->owner = owner; | ||
62 | |||
63 | /* Owner specific callbacks */ | ||
64 | o->validate = validate; | ||
65 | o->optimize = optimize; | ||
66 | o->execute = exec; | ||
67 | o->get = get; | ||
68 | |||
69 | DP(BNX2X_MSG_SP, "Setup the execution queue with the chunk " | ||
70 | "length of %d\n", exe_len); | ||
71 | } | ||
72 | |||
73 | static inline void bnx2x_exe_queue_free_elem(struct bnx2x *bp, | ||
74 | struct bnx2x_exeq_elem *elem) | ||
75 | { | ||
76 | DP(BNX2X_MSG_SP, "Deleting an exe_queue element\n"); | ||
77 | kfree(elem); | ||
78 | } | ||
79 | |||
80 | static inline int bnx2x_exe_queue_length(struct bnx2x_exe_queue_obj *o) | ||
81 | { | ||
82 | struct bnx2x_exeq_elem *elem; | ||
83 | int cnt = 0; | ||
84 | |||
85 | spin_lock_bh(&o->lock); | ||
86 | |||
87 | list_for_each_entry(elem, &o->exe_queue, link) | ||
88 | cnt++; | ||
89 | |||
90 | spin_unlock_bh(&o->lock); | ||
91 | |||
92 | return cnt; | ||
93 | } | ||
94 | |||
95 | /** | ||
96 | * bnx2x_exe_queue_add - add a new element to the execution queue | ||
14 | * | 97 | * |
15 | * @bp: driver handle | 98 | * @bp: driver handle |
16 | * @set: set or clear an entry (1 or 0) | 99 | * @o: queue |
17 | * @mac: pointer to a buffer containing a MAC | 100 | * @cmd: new command to add |
18 | * @cl_bit_vec: bit vector of clients to register a MAC for | 101 | * @restore: true - do not optimize the command |
19 | * @cam_offset: offset in a CAM to use | 102 | * |
20 | * @is_bcast: is the set MAC a broadcast address (for E1 only) | 103 | * If the element is optimized or is illegal, frees it. |
21 | */ | 104 | */ |
22 | void bnx2x_set_mac_addr_gen(struct bnx2x *bp, int set, const u8 *mac, | 105 | static inline int bnx2x_exe_queue_add(struct bnx2x *bp, |
23 | u32 cl_bit_vec, u8 cam_offset, | 106 | struct bnx2x_exe_queue_obj *o, |
24 | u8 is_bcast) | 107 | struct bnx2x_exeq_elem *elem, |
108 | bool restore) | ||
25 | { | 109 | { |
26 | struct mac_configuration_cmd *config = | 110 | int rc; |
27 | (struct mac_configuration_cmd *)bnx2x_sp(bp, mac_config); | 111 | |
28 | int ramrod_flags = WAIT_RAMROD_COMMON; | 112 | spin_lock_bh(&o->lock); |
29 | 113 | ||
30 | bp->set_mac_pending = 1; | 114 | if (!restore) { |
31 | 115 | /* Try to cancel this element queue */ | |
32 | config->hdr.length = 1; | 116 | rc = o->optimize(bp, o->owner, elem); |
33 | config->hdr.offset = cam_offset; | 117 | if (rc) |
34 | config->hdr.client_id = 0xff; | 118 | goto free_and_exit; |
35 | /* Mark the single MAC configuration ramrod as opposed to a | 119 | |
36 | * UC/MC list configuration). | 120 | /* Check if this request is ok */ |
37 | */ | 121 | rc = o->validate(bp, o->owner, elem); |
38 | config->hdr.echo = 1; | 122 | if (rc) { |
39 | 123 | BNX2X_ERR("Preamble failed: %d\n", rc); | |
40 | /* primary MAC */ | 124 | goto free_and_exit; |
41 | config->config_table[0].msb_mac_addr = | 125 | } |
42 | swab16(*(u16 *)&mac[0]); | 126 | } |
43 | config->config_table[0].middle_mac_addr = | 127 | |
44 | swab16(*(u16 *)&mac[2]); | 128 | /* If so, add it to the execution queue */ |
45 | config->config_table[0].lsb_mac_addr = | 129 | list_add_tail(&elem->link, &o->exe_queue); |
46 | swab16(*(u16 *)&mac[4]); | ||
47 | config->config_table[0].clients_bit_vector = | ||
48 | cpu_to_le32(cl_bit_vec); | ||
49 | config->config_table[0].vlan_id = 0; | ||
50 | config->config_table[0].pf_id = BP_FUNC(bp); | ||
51 | if (set) | ||
52 | SET_FLAG(config->config_table[0].flags, | ||
53 | MAC_CONFIGURATION_ENTRY_ACTION_TYPE, | ||
54 | T_ETH_MAC_COMMAND_SET); | ||
55 | else | ||
56 | SET_FLAG(config->config_table[0].flags, | ||
57 | MAC_CONFIGURATION_ENTRY_ACTION_TYPE, | ||
58 | T_ETH_MAC_COMMAND_INVALIDATE); | ||
59 | 130 | ||
60 | if (is_bcast) | 131 | spin_unlock_bh(&o->lock); |
61 | SET_FLAG(config->config_table[0].flags, | ||
62 | MAC_CONFIGURATION_ENTRY_BROADCAST, 1); | ||
63 | 132 | ||
64 | DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) PF_ID %d CLID mask %d\n", | 133 | return 0; |
65 | (set ? "setting" : "clearing"), | 134 | |
66 | config->config_table[0].msb_mac_addr, | 135 | free_and_exit: |
67 | config->config_table[0].middle_mac_addr, | 136 | bnx2x_exe_queue_free_elem(bp, elem); |
68 | config->config_table[0].lsb_mac_addr, BP_FUNC(bp), cl_bit_vec); | 137 | |
138 | spin_unlock_bh(&o->lock); | ||
139 | |||
140 | return rc; | ||
141 | |||
142 | } | ||
143 | |||
144 | static inline void __bnx2x_exe_queue_reset_pending( | ||
145 | struct bnx2x *bp, | ||
146 | struct bnx2x_exe_queue_obj *o) | ||
147 | { | ||
148 | struct bnx2x_exeq_elem *elem; | ||
69 | 149 | ||
150 | while (!list_empty(&o->pending_comp)) { | ||
151 | elem = list_first_entry(&o->pending_comp, | ||
152 | struct bnx2x_exeq_elem, link); | ||
153 | |||
154 | list_del(&elem->link); | ||
155 | bnx2x_exe_queue_free_elem(bp, elem); | ||
156 | } | ||
157 | } | ||
158 | |||
159 | static inline void bnx2x_exe_queue_reset_pending(struct bnx2x *bp, | ||
160 | struct bnx2x_exe_queue_obj *o) | ||
161 | { | ||
162 | |||
163 | spin_lock_bh(&o->lock); | ||
164 | |||
165 | __bnx2x_exe_queue_reset_pending(bp, o); | ||
166 | |||
167 | spin_unlock_bh(&o->lock); | ||
168 | |||
169 | } | ||
170 | |||
171 | /** | ||
172 | * bnx2x_exe_queue_step - execute one execution chunk atomically | ||
173 | * | ||
174 | * @bp: driver handle | ||
175 | * @o: queue | ||
176 | * @ramrod_flags: flags | ||
177 | * | ||
178 | * (Atomicy is ensured using the exe_queue->lock). | ||
179 | */ | ||
180 | static inline int bnx2x_exe_queue_step(struct bnx2x *bp, | ||
181 | struct bnx2x_exe_queue_obj *o, | ||
182 | unsigned long *ramrod_flags) | ||
183 | { | ||
184 | struct bnx2x_exeq_elem *elem, spacer; | ||
185 | int cur_len = 0, rc; | ||
186 | |||
187 | memset(&spacer, 0, sizeof(spacer)); | ||
188 | |||
189 | spin_lock_bh(&o->lock); | ||
190 | |||
191 | /* | ||
192 | * Next step should not be performed until the current is finished, | ||
193 | * unless a DRV_CLEAR_ONLY bit is set. In this case we just want to | ||
194 | * properly clear object internals without sending any command to the FW | ||
195 | * which also implies there won't be any completion to clear the | ||
196 | * 'pending' list. | ||
197 | */ | ||
198 | if (!list_empty(&o->pending_comp)) { | ||
199 | if (test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags)) { | ||
200 | DP(BNX2X_MSG_SP, "RAMROD_DRV_CLR_ONLY requested: " | ||
201 | "resetting pending_comp\n"); | ||
202 | __bnx2x_exe_queue_reset_pending(bp, o); | ||
203 | } else { | ||
204 | spin_unlock_bh(&o->lock); | ||
205 | return 1; | ||
206 | } | ||
207 | } | ||
208 | |||
209 | /* | ||
210 | * Run through the pending commands list and create a next | ||
211 | * execution chunk. | ||
212 | */ | ||
213 | while (!list_empty(&o->exe_queue)) { | ||
214 | elem = list_first_entry(&o->exe_queue, struct bnx2x_exeq_elem, | ||
215 | link); | ||
216 | WARN_ON(!elem->cmd_len); | ||
217 | |||
218 | if (cur_len + elem->cmd_len <= o->exe_chunk_len) { | ||
219 | cur_len += elem->cmd_len; | ||
220 | /* | ||
221 | * Prevent from both lists being empty when moving an | ||
222 | * element. This will allow the call of | ||
223 | * bnx2x_exe_queue_empty() without locking. | ||
224 | */ | ||
225 | list_add_tail(&spacer.link, &o->pending_comp); | ||
226 | mb(); | ||
227 | list_del(&elem->link); | ||
228 | list_add_tail(&elem->link, &o->pending_comp); | ||
229 | list_del(&spacer.link); | ||
230 | } else | ||
231 | break; | ||
232 | } | ||
233 | |||
234 | /* Sanity check */ | ||
235 | if (!cur_len) { | ||
236 | spin_unlock_bh(&o->lock); | ||
237 | return 0; | ||
238 | } | ||
239 | |||
240 | rc = o->execute(bp, o->owner, &o->pending_comp, ramrod_flags); | ||
241 | if (rc < 0) | ||
242 | /* | ||
243 | * In case of an error return the commands back to the queue | ||
244 | * and reset the pending_comp. | ||
245 | */ | ||
246 | list_splice_init(&o->pending_comp, &o->exe_queue); | ||
247 | else if (!rc) | ||
248 | /* | ||
249 | * If zero is returned, means there are no outstanding pending | ||
250 | * completions and we may dismiss the pending list. | ||
251 | */ | ||
252 | __bnx2x_exe_queue_reset_pending(bp, o); | ||
253 | |||
254 | spin_unlock_bh(&o->lock); | ||
255 | return rc; | ||
256 | } | ||
257 | |||
258 | static inline bool bnx2x_exe_queue_empty(struct bnx2x_exe_queue_obj *o) | ||
259 | { | ||
260 | bool empty = list_empty(&o->exe_queue); | ||
261 | |||
262 | /* Don't reorder!!! */ | ||
70 | mb(); | 263 | mb(); |
71 | 264 | ||
72 | bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0, | 265 | return empty && list_empty(&o->pending_comp); |
73 | U64_HI(bnx2x_sp_mapping(bp, mac_config)), | 266 | } |
74 | U64_LO(bnx2x_sp_mapping(bp, mac_config)), 1); | 267 | |
268 | static inline struct bnx2x_exeq_elem *bnx2x_exe_queue_alloc_elem( | ||
269 | struct bnx2x *bp) | ||
270 | { | ||
271 | DP(BNX2X_MSG_SP, "Allocating a new exe_queue element\n"); | ||
272 | return kzalloc(sizeof(struct bnx2x_exeq_elem), GFP_ATOMIC); | ||
273 | } | ||
75 | 274 | ||
76 | /* Wait for a completion */ | 275 | /************************ raw_obj functions ***********************************/ |
77 | bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, ramrod_flags); | 276 | static bool bnx2x_raw_check_pending(struct bnx2x_raw_obj *o) |
277 | { | ||
278 | return !!test_bit(o->state, o->pstate); | ||
78 | } | 279 | } |
79 | 280 | ||
281 | static void bnx2x_raw_clear_pending(struct bnx2x_raw_obj *o) | ||
282 | { | ||
283 | smp_mb__before_clear_bit(); | ||
284 | clear_bit(o->state, o->pstate); | ||
285 | smp_mb__after_clear_bit(); | ||
286 | } | ||
80 | 287 | ||
81 | static inline u8 bnx2x_e1_cam_mc_offset(struct bnx2x *bp) | 288 | static void bnx2x_raw_set_pending(struct bnx2x_raw_obj *o) |
82 | { | 289 | { |
83 | return CHIP_REV_IS_SLOW(bp) ? | 290 | smp_mb__before_clear_bit(); |
84 | (BNX2X_MAX_EMUL_MULTI * (1 + BP_PORT(bp))) : | 291 | set_bit(o->state, o->pstate); |
85 | (BNX2X_MAX_MULTICAST * (1 + BP_PORT(bp))); | 292 | smp_mb__after_clear_bit(); |
86 | } | 293 | } |
87 | 294 | ||
88 | /* set mc list, do not wait as wait implies sleep and | 295 | /** |
89 | * set_rx_mode can be invoked from non-sleepable context. | 296 | * bnx2x_state_wait - wait until the given bit(state) is cleared |
297 | * | ||
298 | * @bp: device handle | ||
299 | * @state: state which is to be cleared | ||
300 | * @state_p: state buffer | ||
90 | * | 301 | * |
91 | * Instead we use the same ramrod data buffer each time we need | ||
92 | * to configure a list of addresses, and use the fact that the | ||
93 | * list of MACs is changed in an incremental way and that the | ||
94 | * function is called under the netif_addr_lock. A temporary | ||
95 | * inconsistent CAM configuration (possible in case of a very fast | ||
96 | * sequence of add/del/add on the host side) will shortly be | ||
97 | * restored by the handler of the last ramrod. | ||
98 | */ | 302 | */ |
99 | int bnx2x_set_e1_mc_list(struct bnx2x *bp) | 303 | static inline int bnx2x_state_wait(struct bnx2x *bp, int state, |
304 | unsigned long *pstate) | ||
100 | { | 305 | { |
101 | int i = 0, old; | 306 | /* can take a while if any port is running */ |
102 | struct net_device *dev = bp->dev; | 307 | int cnt = 5000; |
103 | u8 offset = bnx2x_e1_cam_mc_offset(bp); | 308 | |
104 | struct netdev_hw_addr *ha; | ||
105 | struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, mcast_config); | ||
106 | dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, mcast_config); | ||
107 | 309 | ||
108 | if (netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) | 310 | if (CHIP_REV_IS_EMUL(bp)) |
311 | cnt *= 20; | ||
312 | |||
313 | DP(BNX2X_MSG_SP, "waiting for state to become %d\n", state); | ||
314 | |||
315 | might_sleep(); | ||
316 | while (cnt--) { | ||
317 | if (!test_bit(state, pstate)) { | ||
318 | #ifdef BNX2X_STOP_ON_ERROR | ||
319 | DP(BNX2X_MSG_SP, "exit (cnt %d)\n", 5000 - cnt); | ||
320 | #endif | ||
321 | return 0; | ||
322 | } | ||
323 | |||
324 | usleep_range(1000, 1000); | ||
325 | |||
326 | if (bp->panic) | ||
327 | return -EIO; | ||
328 | } | ||
329 | |||
330 | /* timeout! */ | ||
331 | BNX2X_ERR("timeout waiting for state %d\n", state); | ||
332 | #ifdef BNX2X_STOP_ON_ERROR | ||
333 | bnx2x_panic(); | ||
334 | #endif | ||
335 | |||
336 | return -EBUSY; | ||
337 | } | ||
338 | |||
339 | static int bnx2x_raw_wait(struct bnx2x *bp, struct bnx2x_raw_obj *raw) | ||
340 | { | ||
341 | return bnx2x_state_wait(bp, raw->state, raw->pstate); | ||
342 | } | ||
343 | |||
344 | /***************** Classification verbs: Set/Del MAC/VLAN/VLAN-MAC ************/ | ||
345 | /* credit handling callbacks */ | ||
346 | static bool bnx2x_get_cam_offset_mac(struct bnx2x_vlan_mac_obj *o, int *offset) | ||
347 | { | ||
348 | struct bnx2x_credit_pool_obj *mp = o->macs_pool; | ||
349 | |||
350 | WARN_ON(!mp); | ||
351 | |||
352 | return mp->get_entry(mp, offset); | ||
353 | } | ||
354 | |||
355 | static bool bnx2x_get_credit_mac(struct bnx2x_vlan_mac_obj *o) | ||
356 | { | ||
357 | struct bnx2x_credit_pool_obj *mp = o->macs_pool; | ||
358 | |||
359 | WARN_ON(!mp); | ||
360 | |||
361 | return mp->get(mp, 1); | ||
362 | } | ||
363 | |||
364 | static bool bnx2x_get_cam_offset_vlan(struct bnx2x_vlan_mac_obj *o, int *offset) | ||
365 | { | ||
366 | struct bnx2x_credit_pool_obj *vp = o->vlans_pool; | ||
367 | |||
368 | WARN_ON(!vp); | ||
369 | |||
370 | return vp->get_entry(vp, offset); | ||
371 | } | ||
372 | |||
373 | static bool bnx2x_get_credit_vlan(struct bnx2x_vlan_mac_obj *o) | ||
374 | { | ||
375 | struct bnx2x_credit_pool_obj *vp = o->vlans_pool; | ||
376 | |||
377 | WARN_ON(!vp); | ||
378 | |||
379 | return vp->get(vp, 1); | ||
380 | } | ||
381 | |||
382 | static bool bnx2x_get_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o) | ||
383 | { | ||
384 | struct bnx2x_credit_pool_obj *mp = o->macs_pool; | ||
385 | struct bnx2x_credit_pool_obj *vp = o->vlans_pool; | ||
386 | |||
387 | if (!mp->get(mp, 1)) | ||
388 | return false; | ||
389 | |||
390 | if (!vp->get(vp, 1)) { | ||
391 | mp->put(mp, 1); | ||
392 | return false; | ||
393 | } | ||
394 | |||
395 | return true; | ||
396 | } | ||
397 | |||
398 | static bool bnx2x_put_cam_offset_mac(struct bnx2x_vlan_mac_obj *o, int offset) | ||
399 | { | ||
400 | struct bnx2x_credit_pool_obj *mp = o->macs_pool; | ||
401 | |||
402 | return mp->put_entry(mp, offset); | ||
403 | } | ||
404 | |||
405 | static bool bnx2x_put_credit_mac(struct bnx2x_vlan_mac_obj *o) | ||
406 | { | ||
407 | struct bnx2x_credit_pool_obj *mp = o->macs_pool; | ||
408 | |||
409 | return mp->put(mp, 1); | ||
410 | } | ||
411 | |||
412 | static bool bnx2x_put_cam_offset_vlan(struct bnx2x_vlan_mac_obj *o, int offset) | ||
413 | { | ||
414 | struct bnx2x_credit_pool_obj *vp = o->vlans_pool; | ||
415 | |||
416 | return vp->put_entry(vp, offset); | ||
417 | } | ||
418 | |||
419 | static bool bnx2x_put_credit_vlan(struct bnx2x_vlan_mac_obj *o) | ||
420 | { | ||
421 | struct bnx2x_credit_pool_obj *vp = o->vlans_pool; | ||
422 | |||
423 | return vp->put(vp, 1); | ||
424 | } | ||
425 | |||
426 | static bool bnx2x_put_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o) | ||
427 | { | ||
428 | struct bnx2x_credit_pool_obj *mp = o->macs_pool; | ||
429 | struct bnx2x_credit_pool_obj *vp = o->vlans_pool; | ||
430 | |||
431 | if (!mp->put(mp, 1)) | ||
432 | return false; | ||
433 | |||
434 | if (!vp->put(vp, 1)) { | ||
435 | mp->get(mp, 1); | ||
436 | return false; | ||
437 | } | ||
438 | |||
439 | return true; | ||
440 | } | ||
441 | |||
442 | /* check_add() callbacks */ | ||
443 | static int bnx2x_check_mac_add(struct bnx2x_vlan_mac_obj *o, | ||
444 | union bnx2x_classification_ramrod_data *data) | ||
445 | { | ||
446 | struct bnx2x_vlan_mac_registry_elem *pos; | ||
447 | |||
448 | if (!is_valid_ether_addr(data->mac.mac)) | ||
109 | return -EINVAL; | 449 | return -EINVAL; |
110 | 450 | ||
111 | netdev_for_each_mc_addr(ha, dev) { | 451 | /* Check if a requested MAC already exists */ |
112 | /* copy mac */ | 452 | list_for_each_entry(pos, &o->head, link) |
113 | config_cmd->config_table[i].msb_mac_addr = | 453 | if (!memcmp(data->mac.mac, pos->u.mac.mac, ETH_ALEN)) |
114 | swab16(*(u16 *)&bnx2x_mc_addr(ha)[0]); | 454 | return -EEXIST; |
115 | config_cmd->config_table[i].middle_mac_addr = | 455 | |
116 | swab16(*(u16 *)&bnx2x_mc_addr(ha)[2]); | 456 | return 0; |
117 | config_cmd->config_table[i].lsb_mac_addr = | 457 | } |
118 | swab16(*(u16 *)&bnx2x_mc_addr(ha)[4]); | 458 | |
119 | 459 | static int bnx2x_check_vlan_add(struct bnx2x_vlan_mac_obj *o, | |
120 | config_cmd->config_table[i].vlan_id = 0; | 460 | union bnx2x_classification_ramrod_data *data) |
121 | config_cmd->config_table[i].pf_id = BP_FUNC(bp); | 461 | { |
122 | config_cmd->config_table[i].clients_bit_vector = | 462 | struct bnx2x_vlan_mac_registry_elem *pos; |
123 | cpu_to_le32(1 << BP_L_ID(bp)); | 463 | |
124 | 464 | list_for_each_entry(pos, &o->head, link) | |
125 | SET_FLAG(config_cmd->config_table[i].flags, | 465 | if (data->vlan.vlan == pos->u.vlan.vlan) |
126 | MAC_CONFIGURATION_ENTRY_ACTION_TYPE, | 466 | return -EEXIST; |
127 | T_ETH_MAC_COMMAND_SET); | 467 | |
128 | 468 | return 0; | |
129 | DP(NETIF_MSG_IFUP, | 469 | } |
130 | "setting MCAST[%d] (%04x:%04x:%04x)\n", i, | 470 | |
131 | config_cmd->config_table[i].msb_mac_addr, | 471 | static int bnx2x_check_vlan_mac_add(struct bnx2x_vlan_mac_obj *o, |
132 | config_cmd->config_table[i].middle_mac_addr, | 472 | union bnx2x_classification_ramrod_data *data) |
133 | config_cmd->config_table[i].lsb_mac_addr); | 473 | { |
134 | i++; | 474 | struct bnx2x_vlan_mac_registry_elem *pos; |
475 | |||
476 | list_for_each_entry(pos, &o->head, link) | ||
477 | if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) && | ||
478 | (!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac, | ||
479 | ETH_ALEN))) | ||
480 | return -EEXIST; | ||
481 | |||
482 | return 0; | ||
483 | } | ||
484 | |||
485 | |||
486 | /* check_del() callbacks */ | ||
487 | static struct bnx2x_vlan_mac_registry_elem * | ||
488 | bnx2x_check_mac_del(struct bnx2x_vlan_mac_obj *o, | ||
489 | union bnx2x_classification_ramrod_data *data) | ||
490 | { | ||
491 | struct bnx2x_vlan_mac_registry_elem *pos; | ||
492 | |||
493 | list_for_each_entry(pos, &o->head, link) | ||
494 | if (!memcmp(data->mac.mac, pos->u.mac.mac, ETH_ALEN)) | ||
495 | return pos; | ||
496 | |||
497 | return NULL; | ||
498 | } | ||
499 | |||
500 | static struct bnx2x_vlan_mac_registry_elem * | ||
501 | bnx2x_check_vlan_del(struct bnx2x_vlan_mac_obj *o, | ||
502 | union bnx2x_classification_ramrod_data *data) | ||
503 | { | ||
504 | struct bnx2x_vlan_mac_registry_elem *pos; | ||
505 | |||
506 | list_for_each_entry(pos, &o->head, link) | ||
507 | if (data->vlan.vlan == pos->u.vlan.vlan) | ||
508 | return pos; | ||
509 | |||
510 | return NULL; | ||
511 | } | ||
512 | |||
513 | static struct bnx2x_vlan_mac_registry_elem * | ||
514 | bnx2x_check_vlan_mac_del(struct bnx2x_vlan_mac_obj *o, | ||
515 | union bnx2x_classification_ramrod_data *data) | ||
516 | { | ||
517 | struct bnx2x_vlan_mac_registry_elem *pos; | ||
518 | |||
519 | list_for_each_entry(pos, &o->head, link) | ||
520 | if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) && | ||
521 | (!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac, | ||
522 | ETH_ALEN))) | ||
523 | return pos; | ||
524 | |||
525 | return NULL; | ||
526 | } | ||
527 | |||
528 | /* check_move() callback */ | ||
529 | static bool bnx2x_check_move(struct bnx2x_vlan_mac_obj *src_o, | ||
530 | struct bnx2x_vlan_mac_obj *dst_o, | ||
531 | union bnx2x_classification_ramrod_data *data) | ||
532 | { | ||
533 | struct bnx2x_vlan_mac_registry_elem *pos; | ||
534 | int rc; | ||
535 | |||
536 | /* Check if we can delete the requested configuration from the first | ||
537 | * object. | ||
538 | */ | ||
539 | pos = src_o->check_del(src_o, data); | ||
540 | |||
541 | /* check if configuration can be added */ | ||
542 | rc = dst_o->check_add(dst_o, data); | ||
543 | |||
544 | /* If this classification can not be added (is already set) | ||
545 | * or can't be deleted - return an error. | ||
546 | */ | ||
547 | if (rc || !pos) | ||
548 | return false; | ||
549 | |||
550 | return true; | ||
551 | } | ||
552 | |||
553 | static bool bnx2x_check_move_always_err( | ||
554 | struct bnx2x_vlan_mac_obj *src_o, | ||
555 | struct bnx2x_vlan_mac_obj *dst_o, | ||
556 | union bnx2x_classification_ramrod_data *data) | ||
557 | { | ||
558 | return false; | ||
559 | } | ||
560 | |||
561 | |||
562 | static inline u8 bnx2x_vlan_mac_get_rx_tx_flag(struct bnx2x_vlan_mac_obj *o) | ||
563 | { | ||
564 | struct bnx2x_raw_obj *raw = &o->raw; | ||
565 | u8 rx_tx_flag = 0; | ||
566 | |||
567 | if ((raw->obj_type == BNX2X_OBJ_TYPE_TX) || | ||
568 | (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX)) | ||
569 | rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_TX_CMD; | ||
570 | |||
571 | if ((raw->obj_type == BNX2X_OBJ_TYPE_RX) || | ||
572 | (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX)) | ||
573 | rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_RX_CMD; | ||
574 | |||
575 | return rx_tx_flag; | ||
576 | } | ||
577 | |||
578 | /* LLH CAM line allocations */ | ||
579 | enum { | ||
580 | LLH_CAM_ISCSI_ETH_LINE = 0, | ||
581 | LLH_CAM_ETH_LINE, | ||
582 | LLH_CAM_MAX_PF_LINE = NIG_REG_LLH1_FUNC_MEM_SIZE / 2 | ||
583 | }; | ||
584 | |||
585 | static inline void bnx2x_set_mac_in_nig(struct bnx2x *bp, | ||
586 | bool add, unsigned char *dev_addr, int index) | ||
587 | { | ||
588 | u32 wb_data[2]; | ||
589 | u32 reg_offset = BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM : | ||
590 | NIG_REG_LLH0_FUNC_MEM; | ||
591 | |||
592 | if (!IS_MF_SI(bp) || index > LLH_CAM_MAX_PF_LINE) | ||
593 | return; | ||
594 | |||
595 | DP(BNX2X_MSG_SP, "Going to %s LLH configuration at entry %d\n", | ||
596 | (add ? "ADD" : "DELETE"), index); | ||
597 | |||
598 | if (add) { | ||
599 | /* LLH_FUNC_MEM is a u64 WB register */ | ||
600 | reg_offset += 8*index; | ||
601 | |||
602 | wb_data[0] = ((dev_addr[2] << 24) | (dev_addr[3] << 16) | | ||
603 | (dev_addr[4] << 8) | dev_addr[5]); | ||
604 | wb_data[1] = ((dev_addr[0] << 8) | dev_addr[1]); | ||
605 | |||
606 | REG_WR_DMAE(bp, reg_offset, wb_data, 2); | ||
135 | } | 607 | } |
136 | old = config_cmd->hdr.length; | 608 | |
137 | if (old > i) { | 609 | REG_WR(bp, (BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM_ENABLE : |
138 | for (; i < old; i++) { | 610 | NIG_REG_LLH0_FUNC_MEM_ENABLE) + 4*index, add); |
139 | if (CAM_IS_INVALID(config_cmd-> | 611 | } |
140 | config_table[i])) { | 612 | |
141 | /* already invalidated */ | 613 | /** |
142 | break; | 614 | * bnx2x_vlan_mac_set_cmd_hdr_e2 - set a header in a single classify ramrod |
143 | } | 615 | * |
144 | /* invalidate */ | 616 | * @bp: device handle |
145 | SET_FLAG(config_cmd->config_table[i].flags, | 617 | * @o: queue for which we want to configure this rule |
146 | MAC_CONFIGURATION_ENTRY_ACTION_TYPE, | 618 | * @add: if true the command is an ADD command, DEL otherwise |
147 | T_ETH_MAC_COMMAND_INVALIDATE); | 619 | * @opcode: CLASSIFY_RULE_OPCODE_XXX |
148 | } | 620 | * @hdr: pointer to a header to setup |
621 | * | ||
622 | */ | ||
623 | static inline void bnx2x_vlan_mac_set_cmd_hdr_e2(struct bnx2x *bp, | ||
624 | struct bnx2x_vlan_mac_obj *o, bool add, int opcode, | ||
625 | struct eth_classify_cmd_header *hdr) | ||
626 | { | ||
627 | struct bnx2x_raw_obj *raw = &o->raw; | ||
628 | |||
629 | hdr->client_id = raw->cl_id; | ||
630 | hdr->func_id = raw->func_id; | ||
631 | |||
632 | /* Rx or/and Tx (internal switching) configuration ? */ | ||
633 | hdr->cmd_general_data |= | ||
634 | bnx2x_vlan_mac_get_rx_tx_flag(o); | ||
635 | |||
636 | if (add) | ||
637 | hdr->cmd_general_data |= ETH_CLASSIFY_CMD_HEADER_IS_ADD; | ||
638 | |||
639 | hdr->cmd_general_data |= | ||
640 | (opcode << ETH_CLASSIFY_CMD_HEADER_OPCODE_SHIFT); | ||
641 | } | ||
642 | |||
643 | /** | ||
644 | * bnx2x_vlan_mac_set_rdata_hdr_e2 - set the classify ramrod data header | ||
645 | * | ||
646 | * @cid: connection id | ||
647 | * @type: BNX2X_FILTER_XXX_PENDING | ||
648 | * @hdr: poiter to header to setup | ||
649 | * @rule_cnt: | ||
650 | * | ||
651 | * currently we always configure one rule and echo field to contain a CID and an | ||
652 | * opcode type. | ||
653 | */ | ||
654 | static inline void bnx2x_vlan_mac_set_rdata_hdr_e2(u32 cid, int type, | ||
655 | struct eth_classify_header *hdr, int rule_cnt) | ||
656 | { | ||
657 | hdr->echo = (cid & BNX2X_SWCID_MASK) | (type << BNX2X_SWCID_SHIFT); | ||
658 | hdr->rule_cnt = (u8)rule_cnt; | ||
659 | } | ||
660 | |||
661 | |||
662 | /* hw_config() callbacks */ | ||
663 | static void bnx2x_set_one_mac_e2(struct bnx2x *bp, | ||
664 | struct bnx2x_vlan_mac_obj *o, | ||
665 | struct bnx2x_exeq_elem *elem, int rule_idx, | ||
666 | int cam_offset) | ||
667 | { | ||
668 | struct bnx2x_raw_obj *raw = &o->raw; | ||
669 | struct eth_classify_rules_ramrod_data *data = | ||
670 | (struct eth_classify_rules_ramrod_data *)(raw->rdata); | ||
671 | int rule_cnt = rule_idx + 1, cmd = elem->cmd_data.vlan_mac.cmd; | ||
672 | union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx]; | ||
673 | bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false; | ||
674 | unsigned long *vlan_mac_flags = &elem->cmd_data.vlan_mac.vlan_mac_flags; | ||
675 | u8 *mac = elem->cmd_data.vlan_mac.u.mac.mac; | ||
676 | |||
677 | /* | ||
678 | * Set LLH CAM entry: currently only iSCSI and ETH macs are | ||
679 | * relevant. In addition, current implementation is tuned for a | ||
680 | * single ETH MAC. | ||
681 | * | ||
682 | * When multiple unicast ETH MACs PF configuration in switch | ||
683 | * independent mode is required (NetQ, multiple netdev MACs, | ||
684 | * etc.), consider better utilisation of 8 per function MAC | ||
685 | * entries in the LLH register. There is also | ||
686 | * NIG_REG_P[01]_LLH_FUNC_MEM2 registers that complete the | ||
687 | * total number of CAM entries to 16. | ||
688 | * | ||
689 | * Currently we won't configure NIG for MACs other than a primary ETH | ||
690 | * MAC and iSCSI L2 MAC. | ||
691 | * | ||
692 | * If this MAC is moving from one Queue to another, no need to change | ||
693 | * NIG configuration. | ||
694 | */ | ||
695 | if (cmd != BNX2X_VLAN_MAC_MOVE) { | ||
696 | if (test_bit(BNX2X_ISCSI_ETH_MAC, vlan_mac_flags)) | ||
697 | bnx2x_set_mac_in_nig(bp, add, mac, | ||
698 | LLH_CAM_ISCSI_ETH_LINE); | ||
699 | else if (test_bit(BNX2X_ETH_MAC, vlan_mac_flags)) | ||
700 | bnx2x_set_mac_in_nig(bp, add, mac, LLH_CAM_ETH_LINE); | ||
149 | } | 701 | } |
150 | 702 | ||
151 | wmb(); | 703 | /* Reset the ramrod data buffer for the first rule */ |
704 | if (rule_idx == 0) | ||
705 | memset(data, 0, sizeof(*data)); | ||
706 | |||
707 | /* Setup a command header */ | ||
708 | bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_MAC, | ||
709 | &rule_entry->mac.header); | ||
710 | |||
711 | DP(BNX2X_MSG_SP, "About to %s MAC "BNX2X_MAC_FMT" for " | ||
712 | "Queue %d\n", (add ? "add" : "delete"), | ||
713 | BNX2X_MAC_PRN_LIST(mac), raw->cl_id); | ||
714 | |||
715 | /* Set a MAC itself */ | ||
716 | bnx2x_set_fw_mac_addr(&rule_entry->mac.mac_msb, | ||
717 | &rule_entry->mac.mac_mid, | ||
718 | &rule_entry->mac.mac_lsb, mac); | ||
719 | |||
720 | /* MOVE: Add a rule that will add this MAC to the target Queue */ | ||
721 | if (cmd == BNX2X_VLAN_MAC_MOVE) { | ||
722 | rule_entry++; | ||
723 | rule_cnt++; | ||
724 | |||
725 | /* Setup ramrod data */ | ||
726 | bnx2x_vlan_mac_set_cmd_hdr_e2(bp, | ||
727 | elem->cmd_data.vlan_mac.target_obj, | ||
728 | true, CLASSIFY_RULE_OPCODE_MAC, | ||
729 | &rule_entry->mac.header); | ||
730 | |||
731 | /* Set a MAC itself */ | ||
732 | bnx2x_set_fw_mac_addr(&rule_entry->mac.mac_msb, | ||
733 | &rule_entry->mac.mac_mid, | ||
734 | &rule_entry->mac.mac_lsb, mac); | ||
735 | } | ||
736 | |||
737 | /* Set the ramrod data header */ | ||
738 | /* TODO: take this to the higher level in order to prevent multiple | ||
739 | writing */ | ||
740 | bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header, | ||
741 | rule_cnt); | ||
742 | } | ||
743 | |||
744 | /** | ||
745 | * bnx2x_vlan_mac_set_rdata_hdr_e1x - set a header in a single classify ramrod | ||
746 | * | ||
747 | * @bp: device handle | ||
748 | * @o: queue | ||
749 | * @type: | ||
750 | * @cam_offset: offset in cam memory | ||
751 | * @hdr: pointer to a header to setup | ||
752 | * | ||
753 | * E1/E1H | ||
754 | */ | ||
755 | static inline void bnx2x_vlan_mac_set_rdata_hdr_e1x(struct bnx2x *bp, | ||
756 | struct bnx2x_vlan_mac_obj *o, int type, int cam_offset, | ||
757 | struct mac_configuration_hdr *hdr) | ||
758 | { | ||
759 | struct bnx2x_raw_obj *r = &o->raw; | ||
760 | |||
761 | hdr->length = 1; | ||
762 | hdr->offset = (u8)cam_offset; | ||
763 | hdr->client_id = 0xff; | ||
764 | hdr->echo = ((r->cid & BNX2X_SWCID_MASK) | (type << BNX2X_SWCID_SHIFT)); | ||
765 | } | ||
152 | 766 | ||
153 | config_cmd->hdr.length = i; | 767 | static inline void bnx2x_vlan_mac_set_cfg_entry_e1x(struct bnx2x *bp, |
154 | config_cmd->hdr.offset = offset; | 768 | struct bnx2x_vlan_mac_obj *o, bool add, int opcode, u8 *mac, |
155 | config_cmd->hdr.client_id = 0xff; | 769 | u16 vlan_id, struct mac_configuration_entry *cfg_entry) |
156 | /* Mark that this ramrod doesn't use bp->set_mac_pending for | 770 | { |
157 | * synchronization. | 771 | struct bnx2x_raw_obj *r = &o->raw; |
772 | u32 cl_bit_vec = (1 << r->cl_id); | ||
773 | |||
774 | cfg_entry->clients_bit_vector = cpu_to_le32(cl_bit_vec); | ||
775 | cfg_entry->pf_id = r->func_id; | ||
776 | cfg_entry->vlan_id = cpu_to_le16(vlan_id); | ||
777 | |||
778 | if (add) { | ||
779 | SET_FLAG(cfg_entry->flags, MAC_CONFIGURATION_ENTRY_ACTION_TYPE, | ||
780 | T_ETH_MAC_COMMAND_SET); | ||
781 | SET_FLAG(cfg_entry->flags, | ||
782 | MAC_CONFIGURATION_ENTRY_VLAN_FILTERING_MODE, opcode); | ||
783 | |||
784 | /* Set a MAC in a ramrod data */ | ||
785 | bnx2x_set_fw_mac_addr(&cfg_entry->msb_mac_addr, | ||
786 | &cfg_entry->middle_mac_addr, | ||
787 | &cfg_entry->lsb_mac_addr, mac); | ||
788 | } else | ||
789 | SET_FLAG(cfg_entry->flags, MAC_CONFIGURATION_ENTRY_ACTION_TYPE, | ||
790 | T_ETH_MAC_COMMAND_INVALIDATE); | ||
791 | } | ||
792 | |||
793 | static inline void bnx2x_vlan_mac_set_rdata_e1x(struct bnx2x *bp, | ||
794 | struct bnx2x_vlan_mac_obj *o, int type, int cam_offset, bool add, | ||
795 | u8 *mac, u16 vlan_id, int opcode, struct mac_configuration_cmd *config) | ||
796 | { | ||
797 | struct mac_configuration_entry *cfg_entry = &config->config_table[0]; | ||
798 | struct bnx2x_raw_obj *raw = &o->raw; | ||
799 | |||
800 | bnx2x_vlan_mac_set_rdata_hdr_e1x(bp, o, type, cam_offset, | ||
801 | &config->hdr); | ||
802 | bnx2x_vlan_mac_set_cfg_entry_e1x(bp, o, add, opcode, mac, vlan_id, | ||
803 | cfg_entry); | ||
804 | |||
805 | DP(BNX2X_MSG_SP, "%s MAC "BNX2X_MAC_FMT" CLID %d CAM offset %d\n", | ||
806 | (add ? "setting" : "clearing"), | ||
807 | BNX2X_MAC_PRN_LIST(mac), raw->cl_id, cam_offset); | ||
808 | } | ||
809 | |||
810 | /** | ||
811 | * bnx2x_set_one_mac_e1x - fill a single MAC rule ramrod data | ||
812 | * | ||
813 | * @bp: device handle | ||
814 | * @o: bnx2x_vlan_mac_obj | ||
815 | * @elem: bnx2x_exeq_elem | ||
816 | * @rule_idx: rule_idx | ||
817 | * @cam_offset: cam_offset | ||
818 | */ | ||
819 | static void bnx2x_set_one_mac_e1x(struct bnx2x *bp, | ||
820 | struct bnx2x_vlan_mac_obj *o, | ||
821 | struct bnx2x_exeq_elem *elem, int rule_idx, | ||
822 | int cam_offset) | ||
823 | { | ||
824 | struct bnx2x_raw_obj *raw = &o->raw; | ||
825 | struct mac_configuration_cmd *config = | ||
826 | (struct mac_configuration_cmd *)(raw->rdata); | ||
827 | /* | ||
828 | * 57710 and 57711 do not support MOVE command, | ||
829 | * so it's either ADD or DEL | ||
158 | */ | 830 | */ |
159 | config_cmd->hdr.echo = 0; | 831 | bool add = (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ? |
832 | true : false; | ||
160 | 833 | ||
161 | mb(); | 834 | /* Reset the ramrod data buffer */ |
835 | memset(config, 0, sizeof(*config)); | ||
162 | 836 | ||
163 | return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0, | 837 | bnx2x_vlan_mac_set_rdata_e1x(bp, o, BNX2X_FILTER_MAC_PENDING, |
164 | U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1); | 838 | cam_offset, add, |
839 | elem->cmd_data.vlan_mac.u.mac.mac, 0, | ||
840 | ETH_VLAN_FILTER_ANY_VLAN, config); | ||
165 | } | 841 | } |
166 | 842 | ||
167 | void bnx2x_invalidate_e1_mc_list(struct bnx2x *bp) | 843 | static void bnx2x_set_one_vlan_e2(struct bnx2x *bp, |
844 | struct bnx2x_vlan_mac_obj *o, | ||
845 | struct bnx2x_exeq_elem *elem, int rule_idx, | ||
846 | int cam_offset) | ||
168 | { | 847 | { |
169 | int i; | 848 | struct bnx2x_raw_obj *raw = &o->raw; |
170 | struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, mcast_config); | 849 | struct eth_classify_rules_ramrod_data *data = |
171 | dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, mcast_config); | 850 | (struct eth_classify_rules_ramrod_data *)(raw->rdata); |
172 | int ramrod_flags = WAIT_RAMROD_COMMON; | 851 | int rule_cnt = rule_idx + 1; |
173 | u8 offset = bnx2x_e1_cam_mc_offset(bp); | 852 | union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx]; |
853 | int cmd = elem->cmd_data.vlan_mac.cmd; | ||
854 | bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false; | ||
855 | u16 vlan = elem->cmd_data.vlan_mac.u.vlan.vlan; | ||
856 | |||
857 | /* Reset the ramrod data buffer for the first rule */ | ||
858 | if (rule_idx == 0) | ||
859 | memset(data, 0, sizeof(*data)); | ||
860 | |||
861 | /* Set a rule header */ | ||
862 | bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_VLAN, | ||
863 | &rule_entry->vlan.header); | ||
864 | |||
865 | DP(BNX2X_MSG_SP, "About to %s VLAN %d\n", (add ? "add" : "delete"), | ||
866 | vlan); | ||
867 | |||
868 | /* Set a VLAN itself */ | ||
869 | rule_entry->vlan.vlan = cpu_to_le16(vlan); | ||
870 | |||
871 | /* MOVE: Add a rule that will add this MAC to the target Queue */ | ||
872 | if (cmd == BNX2X_VLAN_MAC_MOVE) { | ||
873 | rule_entry++; | ||
874 | rule_cnt++; | ||
875 | |||
876 | /* Setup ramrod data */ | ||
877 | bnx2x_vlan_mac_set_cmd_hdr_e2(bp, | ||
878 | elem->cmd_data.vlan_mac.target_obj, | ||
879 | true, CLASSIFY_RULE_OPCODE_VLAN, | ||
880 | &rule_entry->vlan.header); | ||
881 | |||
882 | /* Set a VLAN itself */ | ||
883 | rule_entry->vlan.vlan = cpu_to_le16(vlan); | ||
884 | } | ||
174 | 885 | ||
175 | for (i = 0; i < BNX2X_MAX_MULTICAST; i++) | 886 | /* Set the ramrod data header */ |
176 | SET_FLAG(config_cmd->config_table[i].flags, | 887 | /* TODO: take this to the higher level in order to prevent multiple |
177 | MAC_CONFIGURATION_ENTRY_ACTION_TYPE, | 888 | writing */ |
178 | T_ETH_MAC_COMMAND_INVALIDATE); | 889 | bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header, |
890 | rule_cnt); | ||
891 | } | ||
179 | 892 | ||
180 | wmb(); | 893 | static void bnx2x_set_one_vlan_mac_e2(struct bnx2x *bp, |
894 | struct bnx2x_vlan_mac_obj *o, | ||
895 | struct bnx2x_exeq_elem *elem, | ||
896 | int rule_idx, int cam_offset) | ||
897 | { | ||
898 | struct bnx2x_raw_obj *raw = &o->raw; | ||
899 | struct eth_classify_rules_ramrod_data *data = | ||
900 | (struct eth_classify_rules_ramrod_data *)(raw->rdata); | ||
901 | int rule_cnt = rule_idx + 1; | ||
902 | union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx]; | ||
903 | int cmd = elem->cmd_data.vlan_mac.cmd; | ||
904 | bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false; | ||
905 | u16 vlan = elem->cmd_data.vlan_mac.u.vlan_mac.vlan; | ||
906 | u8 *mac = elem->cmd_data.vlan_mac.u.vlan_mac.mac; | ||
907 | |||
908 | |||
909 | /* Reset the ramrod data buffer for the first rule */ | ||
910 | if (rule_idx == 0) | ||
911 | memset(data, 0, sizeof(*data)); | ||
912 | |||
913 | /* Set a rule header */ | ||
914 | bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_PAIR, | ||
915 | &rule_entry->pair.header); | ||
916 | |||
917 | /* Set VLAN and MAC themselvs */ | ||
918 | rule_entry->pair.vlan = cpu_to_le16(vlan); | ||
919 | bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb, | ||
920 | &rule_entry->pair.mac_mid, | ||
921 | &rule_entry->pair.mac_lsb, mac); | ||
922 | |||
923 | /* MOVE: Add a rule that will add this MAC to the target Queue */ | ||
924 | if (cmd == BNX2X_VLAN_MAC_MOVE) { | ||
925 | rule_entry++; | ||
926 | rule_cnt++; | ||
927 | |||
928 | /* Setup ramrod data */ | ||
929 | bnx2x_vlan_mac_set_cmd_hdr_e2(bp, | ||
930 | elem->cmd_data.vlan_mac.target_obj, | ||
931 | true, CLASSIFY_RULE_OPCODE_PAIR, | ||
932 | &rule_entry->pair.header); | ||
933 | |||
934 | /* Set a VLAN itself */ | ||
935 | rule_entry->pair.vlan = cpu_to_le16(vlan); | ||
936 | bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb, | ||
937 | &rule_entry->pair.mac_mid, | ||
938 | &rule_entry->pair.mac_lsb, mac); | ||
939 | } | ||
940 | |||
941 | /* Set the ramrod data header */ | ||
942 | /* TODO: take this to the higher level in order to prevent multiple | ||
943 | writing */ | ||
944 | bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header, | ||
945 | rule_cnt); | ||
946 | } | ||
947 | |||
948 | /** | ||
949 | * bnx2x_set_one_vlan_mac_e1h - | ||
950 | * | ||
951 | * @bp: device handle | ||
952 | * @o: bnx2x_vlan_mac_obj | ||
953 | * @elem: bnx2x_exeq_elem | ||
954 | * @rule_idx: rule_idx | ||
955 | * @cam_offset: cam_offset | ||
956 | */ | ||
957 | static void bnx2x_set_one_vlan_mac_e1h(struct bnx2x *bp, | ||
958 | struct bnx2x_vlan_mac_obj *o, | ||
959 | struct bnx2x_exeq_elem *elem, | ||
960 | int rule_idx, int cam_offset) | ||
961 | { | ||
962 | struct bnx2x_raw_obj *raw = &o->raw; | ||
963 | struct mac_configuration_cmd *config = | ||
964 | (struct mac_configuration_cmd *)(raw->rdata); | ||
965 | /* | ||
966 | * 57710 and 57711 do not support MOVE command, | ||
967 | * so it's either ADD or DEL | ||
968 | */ | ||
969 | bool add = (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ? | ||
970 | true : false; | ||
971 | |||
972 | /* Reset the ramrod data buffer */ | ||
973 | memset(config, 0, sizeof(*config)); | ||
181 | 974 | ||
182 | config_cmd->hdr.length = BNX2X_MAX_MULTICAST; | 975 | bnx2x_vlan_mac_set_rdata_e1x(bp, o, BNX2X_FILTER_VLAN_MAC_PENDING, |
183 | config_cmd->hdr.offset = offset; | 976 | cam_offset, add, |
184 | config_cmd->hdr.client_id = 0xff; | 977 | elem->cmd_data.vlan_mac.u.vlan_mac.mac, |
185 | /* We'll wait for a completion this time... */ | 978 | elem->cmd_data.vlan_mac.u.vlan_mac.vlan, |
186 | config_cmd->hdr.echo = 1; | 979 | ETH_VLAN_FILTER_CLASSIFY, config); |
980 | } | ||
187 | 981 | ||
188 | bp->set_mac_pending = 1; | 982 | #define list_next_entry(pos, member) \ |
983 | list_entry((pos)->member.next, typeof(*(pos)), member) | ||
189 | 984 | ||
190 | mb(); | 985 | /** |
986 | * bnx2x_vlan_mac_restore - reconfigure next MAC/VLAN/VLAN-MAC element | ||
987 | * | ||
988 | * @bp: device handle | ||
989 | * @p: command parameters | ||
990 | * @ppos: pointer to the cooky | ||
991 | * | ||
992 | * reconfigure next MAC/VLAN/VLAN-MAC element from the | ||
993 | * previously configured elements list. | ||
994 | * | ||
995 | * from command parameters only RAMROD_COMP_WAIT bit in ramrod_flags is taken | ||
996 | * into an account | ||
997 | * | ||
998 | * pointer to the cooky - that should be given back in the next call to make | ||
999 | * function handle the next element. If *ppos is set to NULL it will restart the | ||
1000 | * iterator. If returned *ppos == NULL this means that the last element has been | ||
1001 | * handled. | ||
1002 | * | ||
1003 | */ | ||
1004 | static int bnx2x_vlan_mac_restore(struct bnx2x *bp, | ||
1005 | struct bnx2x_vlan_mac_ramrod_params *p, | ||
1006 | struct bnx2x_vlan_mac_registry_elem **ppos) | ||
1007 | { | ||
1008 | struct bnx2x_vlan_mac_registry_elem *pos; | ||
1009 | struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj; | ||
1010 | |||
1011 | /* If list is empty - there is nothing to do here */ | ||
1012 | if (list_empty(&o->head)) { | ||
1013 | *ppos = NULL; | ||
1014 | return 0; | ||
1015 | } | ||
1016 | |||
1017 | /* make a step... */ | ||
1018 | if (*ppos == NULL) | ||
1019 | *ppos = list_first_entry(&o->head, | ||
1020 | struct bnx2x_vlan_mac_registry_elem, | ||
1021 | link); | ||
1022 | else | ||
1023 | *ppos = list_next_entry(*ppos, link); | ||
1024 | |||
1025 | pos = *ppos; | ||
1026 | |||
1027 | /* If it's the last step - return NULL */ | ||
1028 | if (list_is_last(&pos->link, &o->head)) | ||
1029 | *ppos = NULL; | ||
1030 | |||
1031 | /* Prepare a 'user_req' */ | ||
1032 | memcpy(&p->user_req.u, &pos->u, sizeof(pos->u)); | ||
1033 | |||
1034 | /* Set the command */ | ||
1035 | p->user_req.cmd = BNX2X_VLAN_MAC_ADD; | ||
191 | 1036 | ||
192 | bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0, | 1037 | /* Set vlan_mac_flags */ |
193 | U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1); | 1038 | p->user_req.vlan_mac_flags = pos->vlan_mac_flags; |
194 | 1039 | ||
195 | /* Wait for a completion */ | 1040 | /* Set a restore bit */ |
196 | bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, | 1041 | __set_bit(RAMROD_RESTORE, &p->ramrod_flags); |
197 | ramrod_flags); | ||
198 | 1042 | ||
1043 | return bnx2x_config_vlan_mac(bp, p); | ||
199 | } | 1044 | } |
200 | 1045 | ||
201 | /* Accept one or more multicasts */ | 1046 | /* |
202 | int bnx2x_set_e1h_mc_list(struct bnx2x *bp) | 1047 | * bnx2x_exeq_get_mac/bnx2x_exeq_get_vlan/bnx2x_exeq_get_vlan_mac return a |
1048 | * pointer to an element with a specific criteria and NULL if such an element | ||
1049 | * hasn't been found. | ||
1050 | */ | ||
1051 | static struct bnx2x_exeq_elem *bnx2x_exeq_get_mac( | ||
1052 | struct bnx2x_exe_queue_obj *o, | ||
1053 | struct bnx2x_exeq_elem *elem) | ||
203 | { | 1054 | { |
204 | struct net_device *dev = bp->dev; | 1055 | struct bnx2x_exeq_elem *pos; |
205 | struct netdev_hw_addr *ha; | 1056 | struct bnx2x_mac_ramrod_data *data = &elem->cmd_data.vlan_mac.u.mac; |
206 | u32 mc_filter[MC_HASH_SIZE]; | 1057 | |
207 | u32 crc, bit, regidx; | 1058 | /* Check pending for execution commands */ |
208 | int i; | 1059 | list_for_each_entry(pos, &o->exe_queue, link) |
1060 | if (!memcmp(&pos->cmd_data.vlan_mac.u.mac, data, | ||
1061 | sizeof(*data)) && | ||
1062 | (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd)) | ||
1063 | return pos; | ||
1064 | |||
1065 | return NULL; | ||
1066 | } | ||
1067 | |||
1068 | static struct bnx2x_exeq_elem *bnx2x_exeq_get_vlan( | ||
1069 | struct bnx2x_exe_queue_obj *o, | ||
1070 | struct bnx2x_exeq_elem *elem) | ||
1071 | { | ||
1072 | struct bnx2x_exeq_elem *pos; | ||
1073 | struct bnx2x_vlan_ramrod_data *data = &elem->cmd_data.vlan_mac.u.vlan; | ||
1074 | |||
1075 | /* Check pending for execution commands */ | ||
1076 | list_for_each_entry(pos, &o->exe_queue, link) | ||
1077 | if (!memcmp(&pos->cmd_data.vlan_mac.u.vlan, data, | ||
1078 | sizeof(*data)) && | ||
1079 | (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd)) | ||
1080 | return pos; | ||
1081 | |||
1082 | return NULL; | ||
1083 | } | ||
209 | 1084 | ||
210 | memset(mc_filter, 0, 4 * MC_HASH_SIZE); | 1085 | static struct bnx2x_exeq_elem *bnx2x_exeq_get_vlan_mac( |
1086 | struct bnx2x_exe_queue_obj *o, | ||
1087 | struct bnx2x_exeq_elem *elem) | ||
1088 | { | ||
1089 | struct bnx2x_exeq_elem *pos; | ||
1090 | struct bnx2x_vlan_mac_ramrod_data *data = | ||
1091 | &elem->cmd_data.vlan_mac.u.vlan_mac; | ||
1092 | |||
1093 | /* Check pending for execution commands */ | ||
1094 | list_for_each_entry(pos, &o->exe_queue, link) | ||
1095 | if (!memcmp(&pos->cmd_data.vlan_mac.u.vlan_mac, data, | ||
1096 | sizeof(*data)) && | ||
1097 | (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd)) | ||
1098 | return pos; | ||
1099 | |||
1100 | return NULL; | ||
1101 | } | ||
211 | 1102 | ||
212 | netdev_for_each_mc_addr(ha, dev) { | 1103 | /** |
213 | DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n", | 1104 | * bnx2x_validate_vlan_mac_add - check if an ADD command can be executed |
214 | bnx2x_mc_addr(ha)); | 1105 | * |
1106 | * @bp: device handle | ||
1107 | * @qo: bnx2x_qable_obj | ||
1108 | * @elem: bnx2x_exeq_elem | ||
1109 | * | ||
1110 | * Checks that the requested configuration can be added. If yes and if | ||
1111 | * requested, consume CAM credit. | ||
1112 | * | ||
1113 | * The 'validate' is run after the 'optimize'. | ||
1114 | * | ||
1115 | */ | ||
1116 | static inline int bnx2x_validate_vlan_mac_add(struct bnx2x *bp, | ||
1117 | union bnx2x_qable_obj *qo, | ||
1118 | struct bnx2x_exeq_elem *elem) | ||
1119 | { | ||
1120 | struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac; | ||
1121 | struct bnx2x_exe_queue_obj *exeq = &o->exe_queue; | ||
1122 | int rc; | ||
1123 | |||
1124 | /* Check the registry */ | ||
1125 | rc = o->check_add(o, &elem->cmd_data.vlan_mac.u); | ||
1126 | if (rc) { | ||
1127 | DP(BNX2X_MSG_SP, "ADD command is not allowed considering " | ||
1128 | "current registry state\n"); | ||
1129 | return rc; | ||
1130 | } | ||
215 | 1131 | ||
216 | crc = crc32c_le(0, bnx2x_mc_addr(ha), | 1132 | /* |
217 | ETH_ALEN); | 1133 | * Check if there is a pending ADD command for this |
218 | bit = (crc >> 24) & 0xff; | 1134 | * MAC/VLAN/VLAN-MAC. Return an error if there is. |
219 | regidx = bit >> 5; | 1135 | */ |
220 | bit &= 0x1f; | 1136 | if (exeq->get(exeq, elem)) { |
221 | mc_filter[regidx] |= (1 << bit); | 1137 | DP(BNX2X_MSG_SP, "There is a pending ADD command already\n"); |
1138 | return -EEXIST; | ||
222 | } | 1139 | } |
223 | 1140 | ||
224 | for (i = 0; i < MC_HASH_SIZE; i++) | 1141 | /* |
225 | REG_WR(bp, MC_HASH_OFFSET(bp, i), | 1142 | * TODO: Check the pending MOVE from other objects where this |
226 | mc_filter[i]); | 1143 | * object is a destination object. |
1144 | */ | ||
1145 | |||
1146 | /* Consume the credit if not requested not to */ | ||
1147 | if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT, | ||
1148 | &elem->cmd_data.vlan_mac.vlan_mac_flags) || | ||
1149 | o->get_credit(o))) | ||
1150 | return -EINVAL; | ||
227 | 1151 | ||
228 | return 0; | 1152 | return 0; |
229 | } | 1153 | } |
230 | 1154 | ||
231 | void bnx2x_invalidate_e1h_mc_list(struct bnx2x *bp) | 1155 | /** |
1156 | * bnx2x_validate_vlan_mac_del - check if the DEL command can be executed | ||
1157 | * | ||
1158 | * @bp: device handle | ||
1159 | * @qo: quable object to check | ||
1160 | * @elem: element that needs to be deleted | ||
1161 | * | ||
1162 | * Checks that the requested configuration can be deleted. If yes and if | ||
1163 | * requested, returns a CAM credit. | ||
1164 | * | ||
1165 | * The 'validate' is run after the 'optimize'. | ||
1166 | */ | ||
1167 | static inline int bnx2x_validate_vlan_mac_del(struct bnx2x *bp, | ||
1168 | union bnx2x_qable_obj *qo, | ||
1169 | struct bnx2x_exeq_elem *elem) | ||
232 | { | 1170 | { |
233 | int i; | 1171 | struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac; |
1172 | struct bnx2x_vlan_mac_registry_elem *pos; | ||
1173 | struct bnx2x_exe_queue_obj *exeq = &o->exe_queue; | ||
1174 | struct bnx2x_exeq_elem query_elem; | ||
1175 | |||
1176 | /* If this classification can not be deleted (doesn't exist) | ||
1177 | * - return a BNX2X_EXIST. | ||
1178 | */ | ||
1179 | pos = o->check_del(o, &elem->cmd_data.vlan_mac.u); | ||
1180 | if (!pos) { | ||
1181 | DP(BNX2X_MSG_SP, "DEL command is not allowed considering " | ||
1182 | "current registry state\n"); | ||
1183 | return -EEXIST; | ||
1184 | } | ||
1185 | |||
1186 | /* | ||
1187 | * Check if there are pending DEL or MOVE commands for this | ||
1188 | * MAC/VLAN/VLAN-MAC. Return an error if so. | ||
1189 | */ | ||
1190 | memcpy(&query_elem, elem, sizeof(query_elem)); | ||
1191 | |||
1192 | /* Check for MOVE commands */ | ||
1193 | query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_MOVE; | ||
1194 | if (exeq->get(exeq, &query_elem)) { | ||
1195 | BNX2X_ERR("There is a pending MOVE command already\n"); | ||
1196 | return -EINVAL; | ||
1197 | } | ||
1198 | |||
1199 | /* Check for DEL commands */ | ||
1200 | if (exeq->get(exeq, elem)) { | ||
1201 | DP(BNX2X_MSG_SP, "There is a pending DEL command already\n"); | ||
1202 | return -EEXIST; | ||
1203 | } | ||
1204 | |||
1205 | /* Return the credit to the credit pool if not requested not to */ | ||
1206 | if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT, | ||
1207 | &elem->cmd_data.vlan_mac.vlan_mac_flags) || | ||
1208 | o->put_credit(o))) { | ||
1209 | BNX2X_ERR("Failed to return a credit\n"); | ||
1210 | return -EINVAL; | ||
1211 | } | ||
234 | 1212 | ||
235 | for (i = 0; i < MC_HASH_SIZE; i++) | 1213 | return 0; |
236 | REG_WR(bp, MC_HASH_OFFSET(bp, i), 0); | ||
237 | } | 1214 | } |
238 | 1215 | ||
239 | /* must be called under rtnl_lock */ | 1216 | /** |
240 | void bnx2x_rxq_set_mac_filters(struct bnx2x *bp, u16 cl_id, u32 filters) | 1217 | * bnx2x_validate_vlan_mac_move - check if the MOVE command can be executed |
1218 | * | ||
1219 | * @bp: device handle | ||
1220 | * @qo: quable object to check (source) | ||
1221 | * @elem: element that needs to be moved | ||
1222 | * | ||
1223 | * Checks that the requested configuration can be moved. If yes and if | ||
1224 | * requested, returns a CAM credit. | ||
1225 | * | ||
1226 | * The 'validate' is run after the 'optimize'. | ||
1227 | */ | ||
1228 | static inline int bnx2x_validate_vlan_mac_move(struct bnx2x *bp, | ||
1229 | union bnx2x_qable_obj *qo, | ||
1230 | struct bnx2x_exeq_elem *elem) | ||
241 | { | 1231 | { |
242 | u32 mask = (1 << cl_id); | 1232 | struct bnx2x_vlan_mac_obj *src_o = &qo->vlan_mac; |
1233 | struct bnx2x_vlan_mac_obj *dest_o = elem->cmd_data.vlan_mac.target_obj; | ||
1234 | struct bnx2x_exeq_elem query_elem; | ||
1235 | struct bnx2x_exe_queue_obj *src_exeq = &src_o->exe_queue; | ||
1236 | struct bnx2x_exe_queue_obj *dest_exeq = &dest_o->exe_queue; | ||
1237 | |||
1238 | /* | ||
1239 | * Check if we can perform this operation based on the current registry | ||
1240 | * state. | ||
1241 | */ | ||
1242 | if (!src_o->check_move(src_o, dest_o, &elem->cmd_data.vlan_mac.u)) { | ||
1243 | DP(BNX2X_MSG_SP, "MOVE command is not allowed considering " | ||
1244 | "current registry state\n"); | ||
1245 | return -EINVAL; | ||
1246 | } | ||
243 | 1247 | ||
244 | /* initial seeting is BNX2X_ACCEPT_NONE */ | 1248 | /* |
245 | u8 drop_all_ucast = 1, drop_all_bcast = 1, drop_all_mcast = 1; | 1249 | * Check if there is an already pending DEL or MOVE command for the |
246 | u8 accp_all_ucast = 0, accp_all_bcast = 0, accp_all_mcast = 0; | 1250 | * source object or ADD command for a destination object. Return an |
247 | u8 unmatched_unicast = 0; | 1251 | * error if so. |
1252 | */ | ||
1253 | memcpy(&query_elem, elem, sizeof(query_elem)); | ||
248 | 1254 | ||
249 | if (filters & BNX2X_ACCEPT_UNMATCHED_UCAST) | 1255 | /* Check DEL on source */ |
250 | unmatched_unicast = 1; | 1256 | query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_DEL; |
1257 | if (src_exeq->get(src_exeq, &query_elem)) { | ||
1258 | BNX2X_ERR("There is a pending DEL command on the source " | ||
1259 | "queue already\n"); | ||
1260 | return -EINVAL; | ||
1261 | } | ||
1262 | |||
1263 | /* Check MOVE on source */ | ||
1264 | if (src_exeq->get(src_exeq, elem)) { | ||
1265 | DP(BNX2X_MSG_SP, "There is a pending MOVE command already\n"); | ||
1266 | return -EEXIST; | ||
1267 | } | ||
1268 | |||
1269 | /* Check ADD on destination */ | ||
1270 | query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_ADD; | ||
1271 | if (dest_exeq->get(dest_exeq, &query_elem)) { | ||
1272 | BNX2X_ERR("There is a pending ADD command on the " | ||
1273 | "destination queue already\n"); | ||
1274 | return -EINVAL; | ||
1275 | } | ||
1276 | |||
1277 | /* Consume the credit if not requested not to */ | ||
1278 | if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT_DEST, | ||
1279 | &elem->cmd_data.vlan_mac.vlan_mac_flags) || | ||
1280 | dest_o->get_credit(dest_o))) | ||
1281 | return -EINVAL; | ||
1282 | |||
1283 | if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT, | ||
1284 | &elem->cmd_data.vlan_mac.vlan_mac_flags) || | ||
1285 | src_o->put_credit(src_o))) { | ||
1286 | /* return the credit taken from dest... */ | ||
1287 | dest_o->put_credit(dest_o); | ||
1288 | return -EINVAL; | ||
1289 | } | ||
1290 | |||
1291 | return 0; | ||
1292 | } | ||
1293 | |||
1294 | static int bnx2x_validate_vlan_mac(struct bnx2x *bp, | ||
1295 | union bnx2x_qable_obj *qo, | ||
1296 | struct bnx2x_exeq_elem *elem) | ||
1297 | { | ||
1298 | switch (elem->cmd_data.vlan_mac.cmd) { | ||
1299 | case BNX2X_VLAN_MAC_ADD: | ||
1300 | return bnx2x_validate_vlan_mac_add(bp, qo, elem); | ||
1301 | case BNX2X_VLAN_MAC_DEL: | ||
1302 | return bnx2x_validate_vlan_mac_del(bp, qo, elem); | ||
1303 | case BNX2X_VLAN_MAC_MOVE: | ||
1304 | return bnx2x_validate_vlan_mac_move(bp, qo, elem); | ||
1305 | default: | ||
1306 | return -EINVAL; | ||
1307 | } | ||
1308 | } | ||
251 | 1309 | ||
252 | if (filters & BNX2X_PROMISCUOUS_MODE) { | 1310 | /** |
253 | /* promiscious - accept all, drop none */ | 1311 | * bnx2x_wait_vlan_mac - passivly wait for 5 seconds until all work completes. |
254 | drop_all_ucast = drop_all_bcast = drop_all_mcast = 0; | 1312 | * |
255 | accp_all_ucast = accp_all_bcast = accp_all_mcast = 1; | 1313 | * @bp: device handle |
256 | if (IS_MF_SI(bp)) { | 1314 | * @o: bnx2x_vlan_mac_obj |
1315 | * | ||
1316 | */ | ||
1317 | static int bnx2x_wait_vlan_mac(struct bnx2x *bp, | ||
1318 | struct bnx2x_vlan_mac_obj *o) | ||
1319 | { | ||
1320 | int cnt = 5000, rc; | ||
1321 | struct bnx2x_exe_queue_obj *exeq = &o->exe_queue; | ||
1322 | struct bnx2x_raw_obj *raw = &o->raw; | ||
1323 | |||
1324 | while (cnt--) { | ||
1325 | /* Wait for the current command to complete */ | ||
1326 | rc = raw->wait_comp(bp, raw); | ||
1327 | if (rc) | ||
1328 | return rc; | ||
1329 | |||
1330 | /* Wait until there are no pending commands */ | ||
1331 | if (!bnx2x_exe_queue_empty(exeq)) | ||
1332 | usleep_range(1000, 1000); | ||
1333 | else | ||
1334 | return 0; | ||
1335 | } | ||
1336 | |||
1337 | return -EBUSY; | ||
1338 | } | ||
1339 | |||
1340 | /** | ||
1341 | * bnx2x_complete_vlan_mac - complete one VLAN-MAC ramrod | ||
1342 | * | ||
1343 | * @bp: device handle | ||
1344 | * @o: bnx2x_vlan_mac_obj | ||
1345 | * @cqe: | ||
1346 | * @cont: if true schedule next execution chunk | ||
1347 | * | ||
1348 | */ | ||
1349 | static int bnx2x_complete_vlan_mac(struct bnx2x *bp, | ||
1350 | struct bnx2x_vlan_mac_obj *o, | ||
1351 | union event_ring_elem *cqe, | ||
1352 | unsigned long *ramrod_flags) | ||
1353 | { | ||
1354 | struct bnx2x_raw_obj *r = &o->raw; | ||
1355 | int rc; | ||
1356 | |||
1357 | /* Reset pending list */ | ||
1358 | bnx2x_exe_queue_reset_pending(bp, &o->exe_queue); | ||
1359 | |||
1360 | /* Clear pending */ | ||
1361 | r->clear_pending(r); | ||
1362 | |||
1363 | /* If ramrod failed this is most likely a SW bug */ | ||
1364 | if (cqe->message.error) | ||
1365 | return -EINVAL; | ||
1366 | |||
1367 | /* Run the next bulk of pending commands if requeted */ | ||
1368 | if (test_bit(RAMROD_CONT, ramrod_flags)) { | ||
1369 | rc = bnx2x_exe_queue_step(bp, &o->exe_queue, ramrod_flags); | ||
1370 | if (rc < 0) | ||
1371 | return rc; | ||
1372 | } | ||
1373 | |||
1374 | /* If there is more work to do return PENDING */ | ||
1375 | if (!bnx2x_exe_queue_empty(&o->exe_queue)) | ||
1376 | return 1; | ||
1377 | |||
1378 | return 0; | ||
1379 | } | ||
1380 | |||
1381 | /** | ||
1382 | * bnx2x_optimize_vlan_mac - optimize ADD and DEL commands. | ||
1383 | * | ||
1384 | * @bp: device handle | ||
1385 | * @o: bnx2x_qable_obj | ||
1386 | * @elem: bnx2x_exeq_elem | ||
1387 | */ | ||
1388 | static int bnx2x_optimize_vlan_mac(struct bnx2x *bp, | ||
1389 | union bnx2x_qable_obj *qo, | ||
1390 | struct bnx2x_exeq_elem *elem) | ||
1391 | { | ||
1392 | struct bnx2x_exeq_elem query, *pos; | ||
1393 | struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac; | ||
1394 | struct bnx2x_exe_queue_obj *exeq = &o->exe_queue; | ||
1395 | |||
1396 | memcpy(&query, elem, sizeof(query)); | ||
1397 | |||
1398 | switch (elem->cmd_data.vlan_mac.cmd) { | ||
1399 | case BNX2X_VLAN_MAC_ADD: | ||
1400 | query.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_DEL; | ||
1401 | break; | ||
1402 | case BNX2X_VLAN_MAC_DEL: | ||
1403 | query.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_ADD; | ||
1404 | break; | ||
1405 | default: | ||
1406 | /* Don't handle anything other than ADD or DEL */ | ||
1407 | return 0; | ||
1408 | } | ||
1409 | |||
1410 | /* If we found the appropriate element - delete it */ | ||
1411 | pos = exeq->get(exeq, &query); | ||
1412 | if (pos) { | ||
1413 | |||
1414 | /* Return the credit of the optimized command */ | ||
1415 | if (!test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT, | ||
1416 | &pos->cmd_data.vlan_mac.vlan_mac_flags)) { | ||
1417 | if ((query.cmd_data.vlan_mac.cmd == | ||
1418 | BNX2X_VLAN_MAC_ADD) && !o->put_credit(o)) { | ||
1419 | BNX2X_ERR("Failed to return the credit for the " | ||
1420 | "optimized ADD command\n"); | ||
1421 | return -EINVAL; | ||
1422 | } else if (!o->get_credit(o)) { /* VLAN_MAC_DEL */ | ||
1423 | BNX2X_ERR("Failed to recover the credit from " | ||
1424 | "the optimized DEL command\n"); | ||
1425 | return -EINVAL; | ||
1426 | } | ||
1427 | } | ||
1428 | |||
1429 | DP(BNX2X_MSG_SP, "Optimizing %s command\n", | ||
1430 | (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ? | ||
1431 | "ADD" : "DEL"); | ||
1432 | |||
1433 | list_del(&pos->link); | ||
1434 | bnx2x_exe_queue_free_elem(bp, pos); | ||
1435 | return 1; | ||
1436 | } | ||
1437 | |||
1438 | return 0; | ||
1439 | } | ||
1440 | |||
1441 | /** | ||
1442 | * bnx2x_vlan_mac_get_registry_elem - prepare a registry element | ||
1443 | * | ||
1444 | * @bp: device handle | ||
1445 | * @o: | ||
1446 | * @elem: | ||
1447 | * @restore: | ||
1448 | * @re: | ||
1449 | * | ||
1450 | * prepare a registry element according to the current command request. | ||
1451 | */ | ||
1452 | static inline int bnx2x_vlan_mac_get_registry_elem( | ||
1453 | struct bnx2x *bp, | ||
1454 | struct bnx2x_vlan_mac_obj *o, | ||
1455 | struct bnx2x_exeq_elem *elem, | ||
1456 | bool restore, | ||
1457 | struct bnx2x_vlan_mac_registry_elem **re) | ||
1458 | { | ||
1459 | int cmd = elem->cmd_data.vlan_mac.cmd; | ||
1460 | struct bnx2x_vlan_mac_registry_elem *reg_elem; | ||
1461 | |||
1462 | /* Allocate a new registry element if needed. */ | ||
1463 | if (!restore && | ||
1464 | ((cmd == BNX2X_VLAN_MAC_ADD) || (cmd == BNX2X_VLAN_MAC_MOVE))) { | ||
1465 | reg_elem = kzalloc(sizeof(*reg_elem), GFP_ATOMIC); | ||
1466 | if (!reg_elem) | ||
1467 | return -ENOMEM; | ||
1468 | |||
1469 | /* Get a new CAM offset */ | ||
1470 | if (!o->get_cam_offset(o, ®_elem->cam_offset)) { | ||
1471 | /* | ||
1472 | * This shell never happen, because we have checked the | ||
1473 | * CAM availiability in the 'validate'. | ||
1474 | */ | ||
1475 | WARN_ON(1); | ||
1476 | kfree(reg_elem); | ||
1477 | return -EINVAL; | ||
1478 | } | ||
1479 | |||
1480 | DP(BNX2X_MSG_SP, "Got cam offset %d\n", reg_elem->cam_offset); | ||
1481 | |||
1482 | /* Set a VLAN-MAC data */ | ||
1483 | memcpy(®_elem->u, &elem->cmd_data.vlan_mac.u, | ||
1484 | sizeof(reg_elem->u)); | ||
1485 | |||
1486 | /* Copy the flags (needed for DEL and RESTORE flows) */ | ||
1487 | reg_elem->vlan_mac_flags = | ||
1488 | elem->cmd_data.vlan_mac.vlan_mac_flags; | ||
1489 | } else /* DEL, RESTORE */ | ||
1490 | reg_elem = o->check_del(o, &elem->cmd_data.vlan_mac.u); | ||
1491 | |||
1492 | *re = reg_elem; | ||
1493 | return 0; | ||
1494 | } | ||
1495 | |||
1496 | /** | ||
1497 | * bnx2x_execute_vlan_mac - execute vlan mac command | ||
1498 | * | ||
1499 | * @bp: device handle | ||
1500 | * @qo: | ||
1501 | * @exe_chunk: | ||
1502 | * @ramrod_flags: | ||
1503 | * | ||
1504 | * go and send a ramrod! | ||
1505 | */ | ||
1506 | static int bnx2x_execute_vlan_mac(struct bnx2x *bp, | ||
1507 | union bnx2x_qable_obj *qo, | ||
1508 | struct list_head *exe_chunk, | ||
1509 | unsigned long *ramrod_flags) | ||
1510 | { | ||
1511 | struct bnx2x_exeq_elem *elem; | ||
1512 | struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac, *cam_obj; | ||
1513 | struct bnx2x_raw_obj *r = &o->raw; | ||
1514 | int rc, idx = 0; | ||
1515 | bool restore = test_bit(RAMROD_RESTORE, ramrod_flags); | ||
1516 | bool drv_only = test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags); | ||
1517 | struct bnx2x_vlan_mac_registry_elem *reg_elem; | ||
1518 | int cmd; | ||
1519 | |||
1520 | /* | ||
1521 | * If DRIVER_ONLY execution is requested, cleanup a registry | ||
1522 | * and exit. Otherwise send a ramrod to FW. | ||
1523 | */ | ||
1524 | if (!drv_only) { | ||
1525 | WARN_ON(r->check_pending(r)); | ||
1526 | |||
1527 | /* Set pending */ | ||
1528 | r->set_pending(r); | ||
1529 | |||
1530 | /* Fill tha ramrod data */ | ||
1531 | list_for_each_entry(elem, exe_chunk, link) { | ||
1532 | cmd = elem->cmd_data.vlan_mac.cmd; | ||
257 | /* | 1533 | /* |
258 | * SI mode defines to accept in promiscuos mode | 1534 | * We will add to the target object in MOVE command, so |
259 | * only unmatched packets | 1535 | * change the object for a CAM search. |
260 | */ | 1536 | */ |
261 | unmatched_unicast = 1; | 1537 | if (cmd == BNX2X_VLAN_MAC_MOVE) |
262 | accp_all_ucast = 0; | 1538 | cam_obj = elem->cmd_data.vlan_mac.target_obj; |
1539 | else | ||
1540 | cam_obj = o; | ||
1541 | |||
1542 | rc = bnx2x_vlan_mac_get_registry_elem(bp, cam_obj, | ||
1543 | elem, restore, | ||
1544 | ®_elem); | ||
1545 | if (rc) | ||
1546 | goto error_exit; | ||
1547 | |||
1548 | WARN_ON(!reg_elem); | ||
1549 | |||
1550 | /* Push a new entry into the registry */ | ||
1551 | if (!restore && | ||
1552 | ((cmd == BNX2X_VLAN_MAC_ADD) || | ||
1553 | (cmd == BNX2X_VLAN_MAC_MOVE))) | ||
1554 | list_add(®_elem->link, &cam_obj->head); | ||
1555 | |||
1556 | /* Configure a single command in a ramrod data buffer */ | ||
1557 | o->set_one_rule(bp, o, elem, idx, | ||
1558 | reg_elem->cam_offset); | ||
1559 | |||
1560 | /* MOVE command consumes 2 entries in the ramrod data */ | ||
1561 | if (cmd == BNX2X_VLAN_MAC_MOVE) | ||
1562 | idx += 2; | ||
1563 | else | ||
1564 | idx++; | ||
1565 | } | ||
1566 | |||
1567 | /* Commit the data writes towards the memory */ | ||
1568 | mb(); | ||
1569 | |||
1570 | rc = bnx2x_sp_post(bp, o->ramrod_cmd, r->cid, | ||
1571 | U64_HI(r->rdata_mapping), | ||
1572 | U64_LO(r->rdata_mapping), | ||
1573 | ETH_CONNECTION_TYPE); | ||
1574 | if (rc) | ||
1575 | goto error_exit; | ||
1576 | } | ||
1577 | |||
1578 | /* Now, when we are done with the ramrod - clean up the registry */ | ||
1579 | list_for_each_entry(elem, exe_chunk, link) { | ||
1580 | cmd = elem->cmd_data.vlan_mac.cmd; | ||
1581 | if ((cmd == BNX2X_VLAN_MAC_DEL) || | ||
1582 | (cmd == BNX2X_VLAN_MAC_MOVE)) { | ||
1583 | reg_elem = o->check_del(o, &elem->cmd_data.vlan_mac.u); | ||
1584 | |||
1585 | WARN_ON(!reg_elem); | ||
1586 | |||
1587 | o->put_cam_offset(o, reg_elem->cam_offset); | ||
1588 | list_del(®_elem->link); | ||
1589 | kfree(reg_elem); | ||
1590 | } | ||
1591 | } | ||
1592 | |||
1593 | if (!drv_only) | ||
1594 | return 1; | ||
1595 | else | ||
1596 | return 0; | ||
1597 | |||
1598 | error_exit: | ||
1599 | r->clear_pending(r); | ||
1600 | |||
1601 | /* Cleanup a registry in case of a failure */ | ||
1602 | list_for_each_entry(elem, exe_chunk, link) { | ||
1603 | cmd = elem->cmd_data.vlan_mac.cmd; | ||
1604 | |||
1605 | if (cmd == BNX2X_VLAN_MAC_MOVE) | ||
1606 | cam_obj = elem->cmd_data.vlan_mac.target_obj; | ||
1607 | else | ||
1608 | cam_obj = o; | ||
1609 | |||
1610 | /* Delete all newly added above entries */ | ||
1611 | if (!restore && | ||
1612 | ((cmd == BNX2X_VLAN_MAC_ADD) || | ||
1613 | (cmd == BNX2X_VLAN_MAC_MOVE))) { | ||
1614 | reg_elem = o->check_del(cam_obj, | ||
1615 | &elem->cmd_data.vlan_mac.u); | ||
1616 | if (reg_elem) { | ||
1617 | list_del(®_elem->link); | ||
1618 | kfree(reg_elem); | ||
1619 | } | ||
263 | } | 1620 | } |
264 | } | 1621 | } |
265 | if (filters & BNX2X_ACCEPT_UNICAST) { | 1622 | |
1623 | return rc; | ||
1624 | } | ||
1625 | |||
1626 | static inline int bnx2x_vlan_mac_push_new_cmd( | ||
1627 | struct bnx2x *bp, | ||
1628 | struct bnx2x_vlan_mac_ramrod_params *p) | ||
1629 | { | ||
1630 | struct bnx2x_exeq_elem *elem; | ||
1631 | struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj; | ||
1632 | bool restore = test_bit(RAMROD_RESTORE, &p->ramrod_flags); | ||
1633 | |||
1634 | /* Allocate the execution queue element */ | ||
1635 | elem = bnx2x_exe_queue_alloc_elem(bp); | ||
1636 | if (!elem) | ||
1637 | return -ENOMEM; | ||
1638 | |||
1639 | /* Set the command 'length' */ | ||
1640 | switch (p->user_req.cmd) { | ||
1641 | case BNX2X_VLAN_MAC_MOVE: | ||
1642 | elem->cmd_len = 2; | ||
1643 | break; | ||
1644 | default: | ||
1645 | elem->cmd_len = 1; | ||
1646 | } | ||
1647 | |||
1648 | /* Fill the object specific info */ | ||
1649 | memcpy(&elem->cmd_data.vlan_mac, &p->user_req, sizeof(p->user_req)); | ||
1650 | |||
1651 | /* Try to add a new command to the pending list */ | ||
1652 | return bnx2x_exe_queue_add(bp, &o->exe_queue, elem, restore); | ||
1653 | } | ||
1654 | |||
1655 | /** | ||
1656 | * bnx2x_config_vlan_mac - configure VLAN/MAC/VLAN_MAC filtering rules. | ||
1657 | * | ||
1658 | * @bp: device handle | ||
1659 | * @p: | ||
1660 | * | ||
1661 | */ | ||
1662 | int bnx2x_config_vlan_mac( | ||
1663 | struct bnx2x *bp, | ||
1664 | struct bnx2x_vlan_mac_ramrod_params *p) | ||
1665 | { | ||
1666 | int rc = 0; | ||
1667 | struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj; | ||
1668 | unsigned long *ramrod_flags = &p->ramrod_flags; | ||
1669 | bool cont = test_bit(RAMROD_CONT, ramrod_flags); | ||
1670 | struct bnx2x_raw_obj *raw = &o->raw; | ||
1671 | |||
1672 | /* | ||
1673 | * Add new elements to the execution list for commands that require it. | ||
1674 | */ | ||
1675 | if (!cont) { | ||
1676 | rc = bnx2x_vlan_mac_push_new_cmd(bp, p); | ||
1677 | if (rc) | ||
1678 | return rc; | ||
1679 | } | ||
1680 | |||
1681 | /* | ||
1682 | * If nothing will be executed further in this iteration we want to | ||
1683 | * return PENDING if there are pending commands | ||
1684 | */ | ||
1685 | if (!bnx2x_exe_queue_empty(&o->exe_queue)) | ||
1686 | rc = 1; | ||
1687 | |||
1688 | /* Execute commands if required */ | ||
1689 | if (cont || test_bit(RAMROD_EXEC, ramrod_flags) || | ||
1690 | test_bit(RAMROD_COMP_WAIT, ramrod_flags)) { | ||
1691 | rc = bnx2x_exe_queue_step(bp, &o->exe_queue, ramrod_flags); | ||
1692 | if (rc < 0) | ||
1693 | return rc; | ||
1694 | } | ||
1695 | |||
1696 | /* | ||
1697 | * RAMROD_COMP_WAIT is a superset of RAMROD_EXEC. If it was set | ||
1698 | * then user want to wait until the last command is done. | ||
1699 | */ | ||
1700 | if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags)) { | ||
1701 | /* | ||
1702 | * Wait maximum for the current exe_queue length iterations plus | ||
1703 | * one (for the current pending command). | ||
1704 | */ | ||
1705 | int max_iterations = bnx2x_exe_queue_length(&o->exe_queue) + 1; | ||
1706 | |||
1707 | while (!bnx2x_exe_queue_empty(&o->exe_queue) && | ||
1708 | max_iterations--) { | ||
1709 | |||
1710 | /* Wait for the current command to complete */ | ||
1711 | rc = raw->wait_comp(bp, raw); | ||
1712 | if (rc) | ||
1713 | return rc; | ||
1714 | |||
1715 | /* Make a next step */ | ||
1716 | rc = bnx2x_exe_queue_step(bp, &o->exe_queue, | ||
1717 | ramrod_flags); | ||
1718 | if (rc < 0) | ||
1719 | return rc; | ||
1720 | } | ||
1721 | |||
1722 | return 0; | ||
1723 | } | ||
1724 | |||
1725 | return rc; | ||
1726 | } | ||
1727 | |||
1728 | |||
1729 | |||
1730 | /** | ||
1731 | * bnx2x_vlan_mac_del_all - delete elements with given vlan_mac_flags spec | ||
1732 | * | ||
1733 | * @bp: device handle | ||
1734 | * @o: | ||
1735 | * @vlan_mac_flags: | ||
1736 | * @ramrod_flags: execution flags to be used for this deletion | ||
1737 | * | ||
1738 | * if the last operation has completed successfully and there are no | ||
1739 | * moreelements left, positive value if the last operation has completed | ||
1740 | * successfully and there are more previously configured elements, negative | ||
1741 | * value is current operation has failed. | ||
1742 | */ | ||
1743 | static int bnx2x_vlan_mac_del_all(struct bnx2x *bp, | ||
1744 | struct bnx2x_vlan_mac_obj *o, | ||
1745 | unsigned long *vlan_mac_flags, | ||
1746 | unsigned long *ramrod_flags) | ||
1747 | { | ||
1748 | struct bnx2x_vlan_mac_registry_elem *pos = NULL; | ||
1749 | int rc = 0; | ||
1750 | struct bnx2x_vlan_mac_ramrod_params p; | ||
1751 | struct bnx2x_exe_queue_obj *exeq = &o->exe_queue; | ||
1752 | struct bnx2x_exeq_elem *exeq_pos, *exeq_pos_n; | ||
1753 | |||
1754 | /* Clear pending commands first */ | ||
1755 | |||
1756 | spin_lock_bh(&exeq->lock); | ||
1757 | |||
1758 | list_for_each_entry_safe(exeq_pos, exeq_pos_n, &exeq->exe_queue, link) { | ||
1759 | if (exeq_pos->cmd_data.vlan_mac.vlan_mac_flags == | ||
1760 | *vlan_mac_flags) | ||
1761 | list_del(&exeq_pos->link); | ||
1762 | } | ||
1763 | |||
1764 | spin_unlock_bh(&exeq->lock); | ||
1765 | |||
1766 | /* Prepare a command request */ | ||
1767 | memset(&p, 0, sizeof(p)); | ||
1768 | p.vlan_mac_obj = o; | ||
1769 | p.ramrod_flags = *ramrod_flags; | ||
1770 | p.user_req.cmd = BNX2X_VLAN_MAC_DEL; | ||
1771 | |||
1772 | /* | ||
1773 | * Add all but the last VLAN-MAC to the execution queue without actually | ||
1774 | * execution anything. | ||
1775 | */ | ||
1776 | __clear_bit(RAMROD_COMP_WAIT, &p.ramrod_flags); | ||
1777 | __clear_bit(RAMROD_EXEC, &p.ramrod_flags); | ||
1778 | __clear_bit(RAMROD_CONT, &p.ramrod_flags); | ||
1779 | |||
1780 | list_for_each_entry(pos, &o->head, link) { | ||
1781 | if (pos->vlan_mac_flags == *vlan_mac_flags) { | ||
1782 | p.user_req.vlan_mac_flags = pos->vlan_mac_flags; | ||
1783 | memcpy(&p.user_req.u, &pos->u, sizeof(pos->u)); | ||
1784 | rc = bnx2x_config_vlan_mac(bp, &p); | ||
1785 | if (rc < 0) { | ||
1786 | BNX2X_ERR("Failed to add a new DEL command\n"); | ||
1787 | return rc; | ||
1788 | } | ||
1789 | } | ||
1790 | } | ||
1791 | |||
1792 | p.ramrod_flags = *ramrod_flags; | ||
1793 | __set_bit(RAMROD_CONT, &p.ramrod_flags); | ||
1794 | |||
1795 | return bnx2x_config_vlan_mac(bp, &p); | ||
1796 | } | ||
1797 | |||
1798 | static inline void bnx2x_init_raw_obj(struct bnx2x_raw_obj *raw, u8 cl_id, | ||
1799 | u32 cid, u8 func_id, void *rdata, dma_addr_t rdata_mapping, int state, | ||
1800 | unsigned long *pstate, bnx2x_obj_type type) | ||
1801 | { | ||
1802 | raw->func_id = func_id; | ||
1803 | raw->cid = cid; | ||
1804 | raw->cl_id = cl_id; | ||
1805 | raw->rdata = rdata; | ||
1806 | raw->rdata_mapping = rdata_mapping; | ||
1807 | raw->state = state; | ||
1808 | raw->pstate = pstate; | ||
1809 | raw->obj_type = type; | ||
1810 | raw->check_pending = bnx2x_raw_check_pending; | ||
1811 | raw->clear_pending = bnx2x_raw_clear_pending; | ||
1812 | raw->set_pending = bnx2x_raw_set_pending; | ||
1813 | raw->wait_comp = bnx2x_raw_wait; | ||
1814 | } | ||
1815 | |||
1816 | static inline void bnx2x_init_vlan_mac_common(struct bnx2x_vlan_mac_obj *o, | ||
1817 | u8 cl_id, u32 cid, u8 func_id, void *rdata, dma_addr_t rdata_mapping, | ||
1818 | int state, unsigned long *pstate, bnx2x_obj_type type, | ||
1819 | struct bnx2x_credit_pool_obj *macs_pool, | ||
1820 | struct bnx2x_credit_pool_obj *vlans_pool) | ||
1821 | { | ||
1822 | INIT_LIST_HEAD(&o->head); | ||
1823 | |||
1824 | o->macs_pool = macs_pool; | ||
1825 | o->vlans_pool = vlans_pool; | ||
1826 | |||
1827 | o->delete_all = bnx2x_vlan_mac_del_all; | ||
1828 | o->restore = bnx2x_vlan_mac_restore; | ||
1829 | o->complete = bnx2x_complete_vlan_mac; | ||
1830 | o->wait = bnx2x_wait_vlan_mac; | ||
1831 | |||
1832 | bnx2x_init_raw_obj(&o->raw, cl_id, cid, func_id, rdata, rdata_mapping, | ||
1833 | state, pstate, type); | ||
1834 | } | ||
1835 | |||
1836 | |||
1837 | void bnx2x_init_mac_obj(struct bnx2x *bp, | ||
1838 | struct bnx2x_vlan_mac_obj *mac_obj, | ||
1839 | u8 cl_id, u32 cid, u8 func_id, void *rdata, | ||
1840 | dma_addr_t rdata_mapping, int state, | ||
1841 | unsigned long *pstate, bnx2x_obj_type type, | ||
1842 | struct bnx2x_credit_pool_obj *macs_pool) | ||
1843 | { | ||
1844 | union bnx2x_qable_obj *qable_obj = (union bnx2x_qable_obj *)mac_obj; | ||
1845 | |||
1846 | bnx2x_init_vlan_mac_common(mac_obj, cl_id, cid, func_id, rdata, | ||
1847 | rdata_mapping, state, pstate, type, | ||
1848 | macs_pool, NULL); | ||
1849 | |||
1850 | /* CAM credit pool handling */ | ||
1851 | mac_obj->get_credit = bnx2x_get_credit_mac; | ||
1852 | mac_obj->put_credit = bnx2x_put_credit_mac; | ||
1853 | mac_obj->get_cam_offset = bnx2x_get_cam_offset_mac; | ||
1854 | mac_obj->put_cam_offset = bnx2x_put_cam_offset_mac; | ||
1855 | |||
1856 | if (CHIP_IS_E1x(bp)) { | ||
1857 | mac_obj->set_one_rule = bnx2x_set_one_mac_e1x; | ||
1858 | mac_obj->check_del = bnx2x_check_mac_del; | ||
1859 | mac_obj->check_add = bnx2x_check_mac_add; | ||
1860 | mac_obj->check_move = bnx2x_check_move_always_err; | ||
1861 | mac_obj->ramrod_cmd = RAMROD_CMD_ID_ETH_SET_MAC; | ||
1862 | |||
1863 | /* Exe Queue */ | ||
1864 | bnx2x_exe_queue_init(bp, | ||
1865 | &mac_obj->exe_queue, 1, qable_obj, | ||
1866 | bnx2x_validate_vlan_mac, | ||
1867 | bnx2x_optimize_vlan_mac, | ||
1868 | bnx2x_execute_vlan_mac, | ||
1869 | bnx2x_exeq_get_mac); | ||
1870 | } else { | ||
1871 | mac_obj->set_one_rule = bnx2x_set_one_mac_e2; | ||
1872 | mac_obj->check_del = bnx2x_check_mac_del; | ||
1873 | mac_obj->check_add = bnx2x_check_mac_add; | ||
1874 | mac_obj->check_move = bnx2x_check_move; | ||
1875 | mac_obj->ramrod_cmd = | ||
1876 | RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES; | ||
1877 | |||
1878 | /* Exe Queue */ | ||
1879 | bnx2x_exe_queue_init(bp, | ||
1880 | &mac_obj->exe_queue, CLASSIFY_RULES_COUNT, | ||
1881 | qable_obj, bnx2x_validate_vlan_mac, | ||
1882 | bnx2x_optimize_vlan_mac, | ||
1883 | bnx2x_execute_vlan_mac, | ||
1884 | bnx2x_exeq_get_mac); | ||
1885 | } | ||
1886 | } | ||
1887 | |||
1888 | void bnx2x_init_vlan_obj(struct bnx2x *bp, | ||
1889 | struct bnx2x_vlan_mac_obj *vlan_obj, | ||
1890 | u8 cl_id, u32 cid, u8 func_id, void *rdata, | ||
1891 | dma_addr_t rdata_mapping, int state, | ||
1892 | unsigned long *pstate, bnx2x_obj_type type, | ||
1893 | struct bnx2x_credit_pool_obj *vlans_pool) | ||
1894 | { | ||
1895 | union bnx2x_qable_obj *qable_obj = (union bnx2x_qable_obj *)vlan_obj; | ||
1896 | |||
1897 | bnx2x_init_vlan_mac_common(vlan_obj, cl_id, cid, func_id, rdata, | ||
1898 | rdata_mapping, state, pstate, type, NULL, | ||
1899 | vlans_pool); | ||
1900 | |||
1901 | vlan_obj->get_credit = bnx2x_get_credit_vlan; | ||
1902 | vlan_obj->put_credit = bnx2x_put_credit_vlan; | ||
1903 | vlan_obj->get_cam_offset = bnx2x_get_cam_offset_vlan; | ||
1904 | vlan_obj->put_cam_offset = bnx2x_put_cam_offset_vlan; | ||
1905 | |||
1906 | if (CHIP_IS_E1x(bp)) { | ||
1907 | BNX2X_ERR("Do not support chips others than E2 and newer\n"); | ||
1908 | BUG(); | ||
1909 | } else { | ||
1910 | vlan_obj->set_one_rule = bnx2x_set_one_vlan_e2; | ||
1911 | vlan_obj->check_del = bnx2x_check_vlan_del; | ||
1912 | vlan_obj->check_add = bnx2x_check_vlan_add; | ||
1913 | vlan_obj->check_move = bnx2x_check_move; | ||
1914 | vlan_obj->ramrod_cmd = | ||
1915 | RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES; | ||
1916 | |||
1917 | /* Exe Queue */ | ||
1918 | bnx2x_exe_queue_init(bp, | ||
1919 | &vlan_obj->exe_queue, CLASSIFY_RULES_COUNT, | ||
1920 | qable_obj, bnx2x_validate_vlan_mac, | ||
1921 | bnx2x_optimize_vlan_mac, | ||
1922 | bnx2x_execute_vlan_mac, | ||
1923 | bnx2x_exeq_get_vlan); | ||
1924 | } | ||
1925 | } | ||
1926 | |||
1927 | void bnx2x_init_vlan_mac_obj(struct bnx2x *bp, | ||
1928 | struct bnx2x_vlan_mac_obj *vlan_mac_obj, | ||
1929 | u8 cl_id, u32 cid, u8 func_id, void *rdata, | ||
1930 | dma_addr_t rdata_mapping, int state, | ||
1931 | unsigned long *pstate, bnx2x_obj_type type, | ||
1932 | struct bnx2x_credit_pool_obj *macs_pool, | ||
1933 | struct bnx2x_credit_pool_obj *vlans_pool) | ||
1934 | { | ||
1935 | union bnx2x_qable_obj *qable_obj = | ||
1936 | (union bnx2x_qable_obj *)vlan_mac_obj; | ||
1937 | |||
1938 | bnx2x_init_vlan_mac_common(vlan_mac_obj, cl_id, cid, func_id, rdata, | ||
1939 | rdata_mapping, state, pstate, type, | ||
1940 | macs_pool, vlans_pool); | ||
1941 | |||
1942 | /* CAM pool handling */ | ||
1943 | vlan_mac_obj->get_credit = bnx2x_get_credit_vlan_mac; | ||
1944 | vlan_mac_obj->put_credit = bnx2x_put_credit_vlan_mac; | ||
1945 | /* | ||
1946 | * CAM offset is relevant for 57710 and 57711 chips only which have a | ||
1947 | * single CAM for both MACs and VLAN-MAC pairs. So the offset | ||
1948 | * will be taken from MACs' pool object only. | ||
1949 | */ | ||
1950 | vlan_mac_obj->get_cam_offset = bnx2x_get_cam_offset_mac; | ||
1951 | vlan_mac_obj->put_cam_offset = bnx2x_put_cam_offset_mac; | ||
1952 | |||
1953 | if (CHIP_IS_E1(bp)) { | ||
1954 | BNX2X_ERR("Do not support chips others than E2\n"); | ||
1955 | BUG(); | ||
1956 | } else if (CHIP_IS_E1H(bp)) { | ||
1957 | vlan_mac_obj->set_one_rule = bnx2x_set_one_vlan_mac_e1h; | ||
1958 | vlan_mac_obj->check_del = bnx2x_check_vlan_mac_del; | ||
1959 | vlan_mac_obj->check_add = bnx2x_check_vlan_mac_add; | ||
1960 | vlan_mac_obj->check_move = bnx2x_check_move_always_err; | ||
1961 | vlan_mac_obj->ramrod_cmd = RAMROD_CMD_ID_ETH_SET_MAC; | ||
1962 | |||
1963 | /* Exe Queue */ | ||
1964 | bnx2x_exe_queue_init(bp, | ||
1965 | &vlan_mac_obj->exe_queue, 1, qable_obj, | ||
1966 | bnx2x_validate_vlan_mac, | ||
1967 | bnx2x_optimize_vlan_mac, | ||
1968 | bnx2x_execute_vlan_mac, | ||
1969 | bnx2x_exeq_get_vlan_mac); | ||
1970 | } else { | ||
1971 | vlan_mac_obj->set_one_rule = bnx2x_set_one_vlan_mac_e2; | ||
1972 | vlan_mac_obj->check_del = bnx2x_check_vlan_mac_del; | ||
1973 | vlan_mac_obj->check_add = bnx2x_check_vlan_mac_add; | ||
1974 | vlan_mac_obj->check_move = bnx2x_check_move; | ||
1975 | vlan_mac_obj->ramrod_cmd = | ||
1976 | RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES; | ||
1977 | |||
1978 | /* Exe Queue */ | ||
1979 | bnx2x_exe_queue_init(bp, | ||
1980 | &vlan_mac_obj->exe_queue, | ||
1981 | CLASSIFY_RULES_COUNT, | ||
1982 | qable_obj, bnx2x_validate_vlan_mac, | ||
1983 | bnx2x_optimize_vlan_mac, | ||
1984 | bnx2x_execute_vlan_mac, | ||
1985 | bnx2x_exeq_get_vlan_mac); | ||
1986 | } | ||
1987 | |||
1988 | } | ||
1989 | |||
1990 | /* RX_MODE verbs: DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */ | ||
1991 | static inline void __storm_memset_mac_filters(struct bnx2x *bp, | ||
1992 | struct tstorm_eth_mac_filter_config *mac_filters, | ||
1993 | u16 pf_id) | ||
1994 | { | ||
1995 | size_t size = sizeof(struct tstorm_eth_mac_filter_config); | ||
1996 | |||
1997 | u32 addr = BAR_TSTRORM_INTMEM + | ||
1998 | TSTORM_MAC_FILTER_CONFIG_OFFSET(pf_id); | ||
1999 | |||
2000 | __storm_memset_struct(bp, addr, size, (u32 *)mac_filters); | ||
2001 | } | ||
2002 | |||
2003 | static int bnx2x_set_rx_mode_e1x(struct bnx2x *bp, | ||
2004 | struct bnx2x_rx_mode_ramrod_params *p) | ||
2005 | { | ||
2006 | /* update the bp MAC filter structure */ | ||
2007 | u32 mask = (1 << p->cl_id); | ||
2008 | |||
2009 | struct tstorm_eth_mac_filter_config *mac_filters = | ||
2010 | (struct tstorm_eth_mac_filter_config *)p->rdata; | ||
2011 | |||
2012 | /* initial seeting is drop-all */ | ||
2013 | u8 drop_all_ucast = 1, drop_all_mcast = 1; | ||
2014 | u8 accp_all_ucast = 0, accp_all_bcast = 0, accp_all_mcast = 0; | ||
2015 | u8 unmatched_unicast = 0; | ||
2016 | |||
2017 | /* In e1x there we only take into account rx acceot flag since tx switching | ||
2018 | * isn't enabled. */ | ||
2019 | if (test_bit(BNX2X_ACCEPT_UNICAST, &p->rx_accept_flags)) | ||
266 | /* accept matched ucast */ | 2020 | /* accept matched ucast */ |
267 | drop_all_ucast = 0; | 2021 | drop_all_ucast = 0; |
268 | } | 2022 | |
269 | if (filters & BNX2X_ACCEPT_MULTICAST) | 2023 | if (test_bit(BNX2X_ACCEPT_MULTICAST, &p->rx_accept_flags)) |
270 | /* accept matched mcast */ | 2024 | /* accept matched mcast */ |
271 | drop_all_mcast = 0; | 2025 | drop_all_mcast = 0; |
272 | 2026 | ||
273 | if (filters & BNX2X_ACCEPT_ALL_UNICAST) { | 2027 | if (test_bit(BNX2X_ACCEPT_ALL_UNICAST, &p->rx_accept_flags)) { |
274 | /* accept all mcast */ | 2028 | /* accept all mcast */ |
275 | drop_all_ucast = 0; | 2029 | drop_all_ucast = 0; |
276 | accp_all_ucast = 1; | 2030 | accp_all_ucast = 1; |
277 | } | 2031 | } |
278 | if (filters & BNX2X_ACCEPT_ALL_MULTICAST) { | 2032 | if (test_bit(BNX2X_ACCEPT_ALL_MULTICAST, &p->rx_accept_flags)) { |
279 | /* accept all mcast */ | 2033 | /* accept all mcast */ |
280 | drop_all_mcast = 0; | 2034 | drop_all_mcast = 0; |
281 | accp_all_mcast = 1; | 2035 | accp_all_mcast = 1; |
282 | } | 2036 | } |
283 | if (filters & BNX2X_ACCEPT_BROADCAST) { | 2037 | if (test_bit(BNX2X_ACCEPT_BROADCAST, &p->rx_accept_flags)) |
284 | /* accept (all) bcast */ | 2038 | /* accept (all) bcast */ |
285 | drop_all_bcast = 0; | ||
286 | accp_all_bcast = 1; | 2039 | accp_all_bcast = 1; |
2040 | if (test_bit(BNX2X_ACCEPT_UNMATCHED, &p->rx_accept_flags)) | ||
2041 | /* accept unmatched unicasts */ | ||
2042 | unmatched_unicast = 1; | ||
2043 | |||
2044 | mac_filters->ucast_drop_all = drop_all_ucast ? | ||
2045 | mac_filters->ucast_drop_all | mask : | ||
2046 | mac_filters->ucast_drop_all & ~mask; | ||
2047 | |||
2048 | mac_filters->mcast_drop_all = drop_all_mcast ? | ||
2049 | mac_filters->mcast_drop_all | mask : | ||
2050 | mac_filters->mcast_drop_all & ~mask; | ||
2051 | |||
2052 | mac_filters->ucast_accept_all = accp_all_ucast ? | ||
2053 | mac_filters->ucast_accept_all | mask : | ||
2054 | mac_filters->ucast_accept_all & ~mask; | ||
2055 | |||
2056 | mac_filters->mcast_accept_all = accp_all_mcast ? | ||
2057 | mac_filters->mcast_accept_all | mask : | ||
2058 | mac_filters->mcast_accept_all & ~mask; | ||
2059 | |||
2060 | mac_filters->bcast_accept_all = accp_all_bcast ? | ||
2061 | mac_filters->bcast_accept_all | mask : | ||
2062 | mac_filters->bcast_accept_all & ~mask; | ||
2063 | |||
2064 | mac_filters->unmatched_unicast = unmatched_unicast ? | ||
2065 | mac_filters->unmatched_unicast | mask : | ||
2066 | mac_filters->unmatched_unicast & ~mask; | ||
2067 | |||
2068 | DP(BNX2X_MSG_SP, "drop_ucast 0x%x\ndrop_mcast 0x%x\n accp_ucast 0x%x\n" | ||
2069 | "accp_mcast 0x%x\naccp_bcast 0x%x\n", | ||
2070 | mac_filters->ucast_drop_all, | ||
2071 | mac_filters->mcast_drop_all, | ||
2072 | mac_filters->ucast_accept_all, | ||
2073 | mac_filters->mcast_accept_all, | ||
2074 | mac_filters->bcast_accept_all); | ||
2075 | |||
2076 | /* write the MAC filter structure*/ | ||
2077 | __storm_memset_mac_filters(bp, mac_filters, p->func_id); | ||
2078 | |||
2079 | /* The operation is completed */ | ||
2080 | clear_bit(p->state, p->pstate); | ||
2081 | smp_mb__after_clear_bit(); | ||
2082 | |||
2083 | return 0; | ||
2084 | } | ||
2085 | |||
2086 | /* Setup ramrod data */ | ||
2087 | static inline void bnx2x_rx_mode_set_rdata_hdr_e2(u32 cid, | ||
2088 | struct eth_classify_header *hdr, | ||
2089 | u8 rule_cnt) | ||
2090 | { | ||
2091 | hdr->echo = cid; | ||
2092 | hdr->rule_cnt = rule_cnt; | ||
2093 | } | ||
2094 | |||
2095 | static inline void bnx2x_rx_mode_set_cmd_state_e2(struct bnx2x *bp, | ||
2096 | unsigned long accept_flags, | ||
2097 | struct eth_filter_rules_cmd *cmd, | ||
2098 | bool clear_accept_all) | ||
2099 | { | ||
2100 | u16 state; | ||
2101 | |||
2102 | /* start with 'drop-all' */ | ||
2103 | state = ETH_FILTER_RULES_CMD_UCAST_DROP_ALL | | ||
2104 | ETH_FILTER_RULES_CMD_MCAST_DROP_ALL; | ||
2105 | |||
2106 | if (accept_flags) { | ||
2107 | if (test_bit(BNX2X_ACCEPT_UNICAST, &accept_flags)) | ||
2108 | state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL; | ||
2109 | |||
2110 | if (test_bit(BNX2X_ACCEPT_MULTICAST, &accept_flags)) | ||
2111 | state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL; | ||
2112 | |||
2113 | if (test_bit(BNX2X_ACCEPT_ALL_UNICAST, &accept_flags)) { | ||
2114 | state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL; | ||
2115 | state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL; | ||
2116 | } | ||
2117 | |||
2118 | if (test_bit(BNX2X_ACCEPT_ALL_MULTICAST, &accept_flags)) { | ||
2119 | state |= ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL; | ||
2120 | state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL; | ||
2121 | } | ||
2122 | if (test_bit(BNX2X_ACCEPT_BROADCAST, &accept_flags)) | ||
2123 | state |= ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL; | ||
2124 | |||
2125 | if (test_bit(BNX2X_ACCEPT_UNMATCHED, &accept_flags)) { | ||
2126 | state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL; | ||
2127 | state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED; | ||
2128 | } | ||
2129 | if (test_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags)) | ||
2130 | state |= ETH_FILTER_RULES_CMD_ACCEPT_ANY_VLAN; | ||
2131 | } | ||
2132 | |||
2133 | /* Clear ACCEPT_ALL_XXX flags for FCoE L2 Queue */ | ||
2134 | if (clear_accept_all) { | ||
2135 | state &= ~ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL; | ||
2136 | state &= ~ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL; | ||
2137 | state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL; | ||
2138 | state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED; | ||
2139 | } | ||
2140 | |||
2141 | cmd->state = cpu_to_le16(state); | ||
2142 | |||
2143 | } | ||
2144 | |||
2145 | static int bnx2x_set_rx_mode_e2(struct bnx2x *bp, | ||
2146 | struct bnx2x_rx_mode_ramrod_params *p) | ||
2147 | { | ||
2148 | struct eth_filter_rules_ramrod_data *data = p->rdata; | ||
2149 | int rc; | ||
2150 | u8 rule_idx = 0; | ||
2151 | |||
2152 | /* Reset the ramrod data buffer */ | ||
2153 | memset(data, 0, sizeof(*data)); | ||
2154 | |||
2155 | /* Setup ramrod data */ | ||
2156 | |||
2157 | /* Tx (internal switching) */ | ||
2158 | if (test_bit(RAMROD_TX, &p->ramrod_flags)) { | ||
2159 | data->rules[rule_idx].client_id = p->cl_id; | ||
2160 | data->rules[rule_idx].func_id = p->func_id; | ||
2161 | |||
2162 | data->rules[rule_idx].cmd_general_data = | ||
2163 | ETH_FILTER_RULES_CMD_TX_CMD; | ||
2164 | |||
2165 | bnx2x_rx_mode_set_cmd_state_e2(bp, p->tx_accept_flags, | ||
2166 | &(data->rules[rule_idx++]), false); | ||
2167 | } | ||
2168 | |||
2169 | /* Rx */ | ||
2170 | if (test_bit(RAMROD_RX, &p->ramrod_flags)) { | ||
2171 | data->rules[rule_idx].client_id = p->cl_id; | ||
2172 | data->rules[rule_idx].func_id = p->func_id; | ||
2173 | |||
2174 | data->rules[rule_idx].cmd_general_data = | ||
2175 | ETH_FILTER_RULES_CMD_RX_CMD; | ||
2176 | |||
2177 | bnx2x_rx_mode_set_cmd_state_e2(bp, p->rx_accept_flags, | ||
2178 | &(data->rules[rule_idx++]), false); | ||
2179 | } | ||
2180 | |||
2181 | |||
2182 | /* | ||
2183 | * If FCoE Queue configuration has been requested configure the Rx and | ||
2184 | * internal switching modes for this queue in separate rules. | ||
2185 | * | ||
2186 | * FCoE queue shell never be set to ACCEPT_ALL packets of any sort: | ||
2187 | * MCAST_ALL, UCAST_ALL, BCAST_ALL and UNMATCHED. | ||
2188 | */ | ||
2189 | if (test_bit(BNX2X_RX_MODE_FCOE_ETH, &p->rx_mode_flags)) { | ||
2190 | /* Tx (internal switching) */ | ||
2191 | if (test_bit(RAMROD_TX, &p->ramrod_flags)) { | ||
2192 | data->rules[rule_idx].client_id = bnx2x_fcoe(bp, cl_id); | ||
2193 | data->rules[rule_idx].func_id = p->func_id; | ||
2194 | |||
2195 | data->rules[rule_idx].cmd_general_data = | ||
2196 | ETH_FILTER_RULES_CMD_TX_CMD; | ||
2197 | |||
2198 | bnx2x_rx_mode_set_cmd_state_e2(bp, p->tx_accept_flags, | ||
2199 | &(data->rules[rule_idx++]), | ||
2200 | true); | ||
2201 | } | ||
2202 | |||
2203 | /* Rx */ | ||
2204 | if (test_bit(RAMROD_RX, &p->ramrod_flags)) { | ||
2205 | data->rules[rule_idx].client_id = bnx2x_fcoe(bp, cl_id); | ||
2206 | data->rules[rule_idx].func_id = p->func_id; | ||
2207 | |||
2208 | data->rules[rule_idx].cmd_general_data = | ||
2209 | ETH_FILTER_RULES_CMD_RX_CMD; | ||
2210 | |||
2211 | bnx2x_rx_mode_set_cmd_state_e2(bp, p->rx_accept_flags, | ||
2212 | &(data->rules[rule_idx++]), | ||
2213 | true); | ||
2214 | } | ||
287 | } | 2215 | } |
288 | 2216 | ||
289 | bp->mac_filters.ucast_drop_all = drop_all_ucast ? | 2217 | /* |
290 | bp->mac_filters.ucast_drop_all | mask : | 2218 | * Set the ramrod header (most importantly - number of rules to |
291 | bp->mac_filters.ucast_drop_all & ~mask; | 2219 | * configure). |
2220 | */ | ||
2221 | bnx2x_rx_mode_set_rdata_hdr_e2(p->cid, &data->header, rule_idx); | ||
2222 | |||
2223 | DP(BNX2X_MSG_SP, "About to configure %d rules, rx_accept_flags 0x%lx, " | ||
2224 | "tx_accept_flags 0x%lx\n", | ||
2225 | data->header.rule_cnt, p->rx_accept_flags, | ||
2226 | p->tx_accept_flags); | ||
2227 | |||
2228 | /* Commit writes towards the memory before sending a ramrod */ | ||
2229 | mb(); | ||
2230 | |||
2231 | /* Send a ramrod */ | ||
2232 | rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_FILTER_RULES, p->cid, | ||
2233 | U64_HI(p->rdata_mapping), | ||
2234 | U64_LO(p->rdata_mapping), | ||
2235 | ETH_CONNECTION_TYPE); | ||
2236 | if (rc) | ||
2237 | return rc; | ||
292 | 2238 | ||
293 | bp->mac_filters.mcast_drop_all = drop_all_mcast ? | 2239 | /* Ramrod completion is pending */ |
294 | bp->mac_filters.mcast_drop_all | mask : | 2240 | return 1; |
295 | bp->mac_filters.mcast_drop_all & ~mask; | 2241 | } |
2242 | |||
2243 | static int bnx2x_wait_rx_mode_comp_e2(struct bnx2x *bp, | ||
2244 | struct bnx2x_rx_mode_ramrod_params *p) | ||
2245 | { | ||
2246 | return bnx2x_state_wait(bp, p->state, p->pstate); | ||
2247 | } | ||
2248 | |||
2249 | static int bnx2x_empty_rx_mode_wait(struct bnx2x *bp, | ||
2250 | struct bnx2x_rx_mode_ramrod_params *p) | ||
2251 | { | ||
2252 | /* Do nothing */ | ||
2253 | return 0; | ||
2254 | } | ||
2255 | |||
2256 | int bnx2x_config_rx_mode(struct bnx2x *bp, | ||
2257 | struct bnx2x_rx_mode_ramrod_params *p) | ||
2258 | { | ||
2259 | int rc; | ||
2260 | |||
2261 | /* Configure the new classification in the chip */ | ||
2262 | rc = p->rx_mode_obj->config_rx_mode(bp, p); | ||
2263 | if (rc < 0) | ||
2264 | return rc; | ||
2265 | |||
2266 | /* Wait for a ramrod completion if was requested */ | ||
2267 | if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags)) { | ||
2268 | rc = p->rx_mode_obj->wait_comp(bp, p); | ||
2269 | if (rc) | ||
2270 | return rc; | ||
2271 | } | ||
296 | 2272 | ||
297 | bp->mac_filters.bcast_drop_all = drop_all_bcast ? | 2273 | return rc; |
298 | bp->mac_filters.bcast_drop_all | mask : | 2274 | } |
299 | bp->mac_filters.bcast_drop_all & ~mask; | ||
300 | 2275 | ||
301 | bp->mac_filters.ucast_accept_all = accp_all_ucast ? | 2276 | void bnx2x_init_rx_mode_obj(struct bnx2x *bp, |
302 | bp->mac_filters.ucast_accept_all | mask : | 2277 | struct bnx2x_rx_mode_obj *o) |
303 | bp->mac_filters.ucast_accept_all & ~mask; | 2278 | { |
2279 | if (CHIP_IS_E1x(bp)) { | ||
2280 | o->wait_comp = bnx2x_empty_rx_mode_wait; | ||
2281 | o->config_rx_mode = bnx2x_set_rx_mode_e1x; | ||
2282 | } else { | ||
2283 | o->wait_comp = bnx2x_wait_rx_mode_comp_e2; | ||
2284 | o->config_rx_mode = bnx2x_set_rx_mode_e2; | ||
2285 | } | ||
2286 | } | ||
304 | 2287 | ||
305 | bp->mac_filters.mcast_accept_all = accp_all_mcast ? | 2288 | /********************* Multicast verbs: SET, CLEAR ****************************/ |
306 | bp->mac_filters.mcast_accept_all | mask : | 2289 | static inline u8 bnx2x_mcast_bin_from_mac(u8 *mac) |
307 | bp->mac_filters.mcast_accept_all & ~mask; | 2290 | { |
2291 | return (crc32c_le(0, mac, ETH_ALEN) >> 24) & 0xff; | ||
2292 | } | ||
308 | 2293 | ||
309 | bp->mac_filters.bcast_accept_all = accp_all_bcast ? | 2294 | struct bnx2x_mcast_mac_elem { |
310 | bp->mac_filters.bcast_accept_all | mask : | 2295 | struct list_head link; |
311 | bp->mac_filters.bcast_accept_all & ~mask; | 2296 | u8 mac[ETH_ALEN]; |
2297 | u8 pad[2]; /* For a natural alignment of the following buffer */ | ||
2298 | }; | ||
2299 | |||
2300 | struct bnx2x_pending_mcast_cmd { | ||
2301 | struct list_head link; | ||
2302 | int type; /* BNX2X_MCAST_CMD_X */ | ||
2303 | union { | ||
2304 | struct list_head macs_head; | ||
2305 | u32 macs_num; /* Needed for DEL command */ | ||
2306 | int next_bin; /* Needed for RESTORE flow with aprox match */ | ||
2307 | } data; | ||
2308 | |||
2309 | bool done; /* set to true, when the command has been handled, | ||
2310 | * practically used in 57712 handling only, where one pending | ||
2311 | * command may be handled in a few operations. As long as for | ||
2312 | * other chips every operation handling is completed in a | ||
2313 | * single ramrod, there is no need to utilize this field. | ||
2314 | */ | ||
2315 | }; | ||
2316 | |||
2317 | static int bnx2x_mcast_wait(struct bnx2x *bp, | ||
2318 | struct bnx2x_mcast_obj *o) | ||
2319 | { | ||
2320 | if (bnx2x_state_wait(bp, o->sched_state, o->raw.pstate) || | ||
2321 | o->raw.wait_comp(bp, &o->raw)) | ||
2322 | return -EBUSY; | ||
312 | 2323 | ||
313 | bp->mac_filters.unmatched_unicast = unmatched_unicast ? | 2324 | return 0; |
314 | bp->mac_filters.unmatched_unicast | mask : | ||
315 | bp->mac_filters.unmatched_unicast & ~mask; | ||
316 | } | 2325 | } |
317 | 2326 | ||
318 | void bnx2x_set_storm_rx_mode(struct bnx2x *bp) | 2327 | static int bnx2x_mcast_enqueue_cmd(struct bnx2x *bp, |
2328 | struct bnx2x_mcast_obj *o, | ||
2329 | struct bnx2x_mcast_ramrod_params *p, | ||
2330 | int cmd) | ||
319 | { | 2331 | { |
320 | int mode = bp->rx_mode; | 2332 | int total_sz; |
321 | int port = BP_PORT(bp); | 2333 | struct bnx2x_pending_mcast_cmd *new_cmd; |
322 | u16 cl_id; | 2334 | struct bnx2x_mcast_mac_elem *cur_mac = NULL; |
323 | u32 def_q_filters = 0; | 2335 | struct bnx2x_mcast_list_elem *pos; |
2336 | int macs_list_len = ((cmd == BNX2X_MCAST_CMD_ADD) ? | ||
2337 | p->mcast_list_len : 0); | ||
2338 | |||
2339 | /* If the command is empty ("handle pending commands only"), break */ | ||
2340 | if (!p->mcast_list_len) | ||
2341 | return 0; | ||
2342 | |||
2343 | total_sz = sizeof(*new_cmd) + | ||
2344 | macs_list_len * sizeof(struct bnx2x_mcast_mac_elem); | ||
2345 | |||
2346 | /* Add mcast is called under spin_lock, thus calling with GFP_ATOMIC */ | ||
2347 | new_cmd = kzalloc(total_sz, GFP_ATOMIC); | ||
2348 | |||
2349 | if (!new_cmd) | ||
2350 | return -ENOMEM; | ||
324 | 2351 | ||
325 | /* All but management unicast packets should pass to the host as well */ | 2352 | DP(BNX2X_MSG_SP, "About to enqueue a new %d command. " |
326 | u32 llh_mask = | 2353 | "macs_list_len=%d\n", cmd, macs_list_len); |
327 | NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST | | ||
328 | NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST | | ||
329 | NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN | | ||
330 | NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN; | ||
331 | 2354 | ||
332 | switch (mode) { | 2355 | INIT_LIST_HEAD(&new_cmd->data.macs_head); |
333 | case BNX2X_RX_MODE_NONE: /* no Rx */ | 2356 | |
334 | def_q_filters = BNX2X_ACCEPT_NONE; | 2357 | new_cmd->type = cmd; |
335 | #ifdef BCM_CNIC | 2358 | new_cmd->done = false; |
336 | if (!NO_FCOE(bp)) { | 2359 | |
337 | cl_id = bnx2x_fcoe(bp, cl_id); | 2360 | switch (cmd) { |
338 | bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_NONE); | 2361 | case BNX2X_MCAST_CMD_ADD: |
2362 | cur_mac = (struct bnx2x_mcast_mac_elem *) | ||
2363 | ((u8 *)new_cmd + sizeof(*new_cmd)); | ||
2364 | |||
2365 | /* Push the MACs of the current command into the pendig command | ||
2366 | * MACs list: FIFO | ||
2367 | */ | ||
2368 | list_for_each_entry(pos, &p->mcast_list, link) { | ||
2369 | memcpy(cur_mac->mac, pos->mac, ETH_ALEN); | ||
2370 | list_add_tail(&cur_mac->link, &new_cmd->data.macs_head); | ||
2371 | cur_mac++; | ||
339 | } | 2372 | } |
340 | #endif | 2373 | |
341 | break; | 2374 | break; |
342 | 2375 | ||
343 | case BNX2X_RX_MODE_NORMAL: | 2376 | case BNX2X_MCAST_CMD_DEL: |
344 | def_q_filters |= BNX2X_ACCEPT_UNICAST | BNX2X_ACCEPT_BROADCAST | | 2377 | new_cmd->data.macs_num = p->mcast_list_len; |
345 | BNX2X_ACCEPT_MULTICAST; | ||
346 | #ifdef BCM_CNIC | ||
347 | if (!NO_FCOE(bp)) { | ||
348 | cl_id = bnx2x_fcoe(bp, cl_id); | ||
349 | bnx2x_rxq_set_mac_filters(bp, cl_id, | ||
350 | BNX2X_ACCEPT_UNICAST | | ||
351 | BNX2X_ACCEPT_MULTICAST); | ||
352 | } | ||
353 | #endif | ||
354 | break; | 2378 | break; |
355 | 2379 | ||
356 | case BNX2X_RX_MODE_ALLMULTI: | 2380 | case BNX2X_MCAST_CMD_RESTORE: |
357 | def_q_filters |= BNX2X_ACCEPT_UNICAST | BNX2X_ACCEPT_BROADCAST | | 2381 | new_cmd->data.next_bin = 0; |
358 | BNX2X_ACCEPT_ALL_MULTICAST; | 2382 | break; |
359 | #ifdef BCM_CNIC | 2383 | |
360 | /* | 2384 | default: |
361 | * Prevent duplication of multicast packets by configuring FCoE | 2385 | BNX2X_ERR("Unknown command: %d\n", cmd); |
362 | * L2 Client to receive only matched unicast frames. | 2386 | return -EINVAL; |
2387 | } | ||
2388 | |||
2389 | /* Push the new pending command to the tail of the pending list: FIFO */ | ||
2390 | list_add_tail(&new_cmd->link, &o->pending_cmds_head); | ||
2391 | |||
2392 | o->set_sched(o); | ||
2393 | |||
2394 | return 1; | ||
2395 | } | ||
2396 | |||
2397 | /** | ||
2398 | * bnx2x_mcast_get_next_bin - get the next set bin (index) | ||
2399 | * | ||
2400 | * @o: | ||
2401 | * @last: index to start looking from (including) | ||
2402 | * | ||
2403 | * returns the next found (set) bin or a negative value if none is found. | ||
2404 | */ | ||
2405 | static inline int bnx2x_mcast_get_next_bin(struct bnx2x_mcast_obj *o, int last) | ||
2406 | { | ||
2407 | int i, j, inner_start = last % BIT_VEC64_ELEM_SZ; | ||
2408 | |||
2409 | for (i = last / BIT_VEC64_ELEM_SZ; i < BNX2X_MCAST_VEC_SZ; i++) { | ||
2410 | if (o->registry.aprox_match.vec[i]) | ||
2411 | for (j = inner_start; j < BIT_VEC64_ELEM_SZ; j++) { | ||
2412 | int cur_bit = j + BIT_VEC64_ELEM_SZ * i; | ||
2413 | if (BIT_VEC64_TEST_BIT(o->registry.aprox_match. | ||
2414 | vec, cur_bit)) { | ||
2415 | return cur_bit; | ||
2416 | } | ||
2417 | } | ||
2418 | inner_start = 0; | ||
2419 | } | ||
2420 | |||
2421 | /* None found */ | ||
2422 | return -1; | ||
2423 | } | ||
2424 | |||
2425 | /** | ||
2426 | * bnx2x_mcast_clear_first_bin - find the first set bin and clear it | ||
2427 | * | ||
2428 | * @o: | ||
2429 | * | ||
2430 | * returns the index of the found bin or -1 if none is found | ||
2431 | */ | ||
2432 | static inline int bnx2x_mcast_clear_first_bin(struct bnx2x_mcast_obj *o) | ||
2433 | { | ||
2434 | int cur_bit = bnx2x_mcast_get_next_bin(o, 0); | ||
2435 | |||
2436 | if (cur_bit >= 0) | ||
2437 | BIT_VEC64_CLEAR_BIT(o->registry.aprox_match.vec, cur_bit); | ||
2438 | |||
2439 | return cur_bit; | ||
2440 | } | ||
2441 | |||
2442 | static inline u8 bnx2x_mcast_get_rx_tx_flag(struct bnx2x_mcast_obj *o) | ||
2443 | { | ||
2444 | struct bnx2x_raw_obj *raw = &o->raw; | ||
2445 | u8 rx_tx_flag = 0; | ||
2446 | |||
2447 | if ((raw->obj_type == BNX2X_OBJ_TYPE_TX) || | ||
2448 | (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX)) | ||
2449 | rx_tx_flag |= ETH_MULTICAST_RULES_CMD_TX_CMD; | ||
2450 | |||
2451 | if ((raw->obj_type == BNX2X_OBJ_TYPE_RX) || | ||
2452 | (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX)) | ||
2453 | rx_tx_flag |= ETH_MULTICAST_RULES_CMD_RX_CMD; | ||
2454 | |||
2455 | return rx_tx_flag; | ||
2456 | } | ||
2457 | |||
2458 | static void bnx2x_mcast_set_one_rule_e2(struct bnx2x *bp, | ||
2459 | struct bnx2x_mcast_obj *o, int idx, | ||
2460 | union bnx2x_mcast_config_data *cfg_data, | ||
2461 | int cmd) | ||
2462 | { | ||
2463 | struct bnx2x_raw_obj *r = &o->raw; | ||
2464 | struct eth_multicast_rules_ramrod_data *data = | ||
2465 | (struct eth_multicast_rules_ramrod_data *)(r->rdata); | ||
2466 | u8 func_id = r->func_id; | ||
2467 | u8 rx_tx_add_flag = bnx2x_mcast_get_rx_tx_flag(o); | ||
2468 | int bin; | ||
2469 | |||
2470 | if ((cmd == BNX2X_MCAST_CMD_ADD) || (cmd == BNX2X_MCAST_CMD_RESTORE)) | ||
2471 | rx_tx_add_flag |= ETH_MULTICAST_RULES_CMD_IS_ADD; | ||
2472 | |||
2473 | data->rules[idx].cmd_general_data |= rx_tx_add_flag; | ||
2474 | |||
2475 | /* Get a bin and update a bins' vector */ | ||
2476 | switch (cmd) { | ||
2477 | case BNX2X_MCAST_CMD_ADD: | ||
2478 | bin = bnx2x_mcast_bin_from_mac(cfg_data->mac); | ||
2479 | BIT_VEC64_SET_BIT(o->registry.aprox_match.vec, bin); | ||
2480 | break; | ||
2481 | |||
2482 | case BNX2X_MCAST_CMD_DEL: | ||
2483 | /* If there were no more bins to clear | ||
2484 | * (bnx2x_mcast_clear_first_bin() returns -1) then we would | ||
2485 | * clear any (0xff) bin. | ||
2486 | * See bnx2x_mcast_validate_e2() for explanation when it may | ||
2487 | * happen. | ||
363 | */ | 2488 | */ |
364 | if (!NO_FCOE(bp)) { | 2489 | bin = bnx2x_mcast_clear_first_bin(o); |
365 | cl_id = bnx2x_fcoe(bp, cl_id); | ||
366 | bnx2x_rxq_set_mac_filters(bp, cl_id, | ||
367 | BNX2X_ACCEPT_UNICAST); | ||
368 | } | ||
369 | #endif | ||
370 | break; | 2490 | break; |
371 | 2491 | ||
372 | case BNX2X_RX_MODE_PROMISC: | 2492 | case BNX2X_MCAST_CMD_RESTORE: |
373 | def_q_filters |= BNX2X_PROMISCUOUS_MODE; | 2493 | bin = cfg_data->bin; |
374 | #ifdef BCM_CNIC | 2494 | break; |
375 | /* | 2495 | |
376 | * Prevent packets duplication by configuring DROP_ALL for FCoE | 2496 | default: |
377 | * L2 Client. | 2497 | BNX2X_ERR("Unknown command: %d\n", cmd); |
2498 | return; | ||
2499 | } | ||
2500 | |||
2501 | DP(BNX2X_MSG_SP, "%s bin %d\n", | ||
2502 | ((rx_tx_add_flag & ETH_MULTICAST_RULES_CMD_IS_ADD) ? | ||
2503 | "Setting" : "Clearing"), bin); | ||
2504 | |||
2505 | data->rules[idx].bin_id = (u8)bin; | ||
2506 | data->rules[idx].func_id = func_id; | ||
2507 | data->rules[idx].engine_id = o->engine_id; | ||
2508 | } | ||
2509 | |||
2510 | /** | ||
2511 | * bnx2x_mcast_handle_restore_cmd_e2 - restore configuration from the registry | ||
2512 | * | ||
2513 | * @bp: device handle | ||
2514 | * @o: | ||
2515 | * @start_bin: index in the registry to start from (including) | ||
2516 | * @rdata_idx: index in the ramrod data to start from | ||
2517 | * | ||
2518 | * returns last handled bin index or -1 if all bins have been handled | ||
2519 | */ | ||
2520 | static inline int bnx2x_mcast_handle_restore_cmd_e2( | ||
2521 | struct bnx2x *bp, struct bnx2x_mcast_obj *o , int start_bin, | ||
2522 | int *rdata_idx) | ||
2523 | { | ||
2524 | int cur_bin, cnt = *rdata_idx; | ||
2525 | union bnx2x_mcast_config_data cfg_data = {0}; | ||
2526 | |||
2527 | /* go through the registry and configure the bins from it */ | ||
2528 | for (cur_bin = bnx2x_mcast_get_next_bin(o, start_bin); cur_bin >= 0; | ||
2529 | cur_bin = bnx2x_mcast_get_next_bin(o, cur_bin + 1)) { | ||
2530 | |||
2531 | cfg_data.bin = (u8)cur_bin; | ||
2532 | o->set_one_rule(bp, o, cnt, &cfg_data, | ||
2533 | BNX2X_MCAST_CMD_RESTORE); | ||
2534 | |||
2535 | cnt++; | ||
2536 | |||
2537 | DP(BNX2X_MSG_SP, "About to configure a bin %d\n", cur_bin); | ||
2538 | |||
2539 | /* Break if we reached the maximum number | ||
2540 | * of rules. | ||
2541 | */ | ||
2542 | if (cnt >= o->max_cmd_len) | ||
2543 | break; | ||
2544 | } | ||
2545 | |||
2546 | *rdata_idx = cnt; | ||
2547 | |||
2548 | return cur_bin; | ||
2549 | } | ||
2550 | |||
2551 | static inline void bnx2x_mcast_hdl_pending_add_e2(struct bnx2x *bp, | ||
2552 | struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos, | ||
2553 | int *line_idx) | ||
2554 | { | ||
2555 | struct bnx2x_mcast_mac_elem *pmac_pos, *pmac_pos_n; | ||
2556 | int cnt = *line_idx; | ||
2557 | union bnx2x_mcast_config_data cfg_data = {0}; | ||
2558 | |||
2559 | list_for_each_entry_safe(pmac_pos, pmac_pos_n, &cmd_pos->data.macs_head, | ||
2560 | link) { | ||
2561 | |||
2562 | cfg_data.mac = &pmac_pos->mac[0]; | ||
2563 | o->set_one_rule(bp, o, cnt, &cfg_data, cmd_pos->type); | ||
2564 | |||
2565 | cnt++; | ||
2566 | |||
2567 | DP(BNX2X_MSG_SP, "About to configure "BNX2X_MAC_FMT | ||
2568 | " mcast MAC\n", | ||
2569 | BNX2X_MAC_PRN_LIST(pmac_pos->mac)); | ||
2570 | |||
2571 | list_del(&pmac_pos->link); | ||
2572 | |||
2573 | /* Break if we reached the maximum number | ||
2574 | * of rules. | ||
2575 | */ | ||
2576 | if (cnt >= o->max_cmd_len) | ||
2577 | break; | ||
2578 | } | ||
2579 | |||
2580 | *line_idx = cnt; | ||
2581 | |||
2582 | /* if no more MACs to configure - we are done */ | ||
2583 | if (list_empty(&cmd_pos->data.macs_head)) | ||
2584 | cmd_pos->done = true; | ||
2585 | } | ||
2586 | |||
2587 | static inline void bnx2x_mcast_hdl_pending_del_e2(struct bnx2x *bp, | ||
2588 | struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos, | ||
2589 | int *line_idx) | ||
2590 | { | ||
2591 | int cnt = *line_idx; | ||
2592 | |||
2593 | while (cmd_pos->data.macs_num) { | ||
2594 | o->set_one_rule(bp, o, cnt, NULL, cmd_pos->type); | ||
2595 | |||
2596 | cnt++; | ||
2597 | |||
2598 | cmd_pos->data.macs_num--; | ||
2599 | |||
2600 | DP(BNX2X_MSG_SP, "Deleting MAC. %d left,cnt is %d\n", | ||
2601 | cmd_pos->data.macs_num, cnt); | ||
2602 | |||
2603 | /* Break if we reached the maximum | ||
2604 | * number of rules. | ||
2605 | */ | ||
2606 | if (cnt >= o->max_cmd_len) | ||
2607 | break; | ||
2608 | } | ||
2609 | |||
2610 | *line_idx = cnt; | ||
2611 | |||
2612 | /* If we cleared all bins - we are done */ | ||
2613 | if (!cmd_pos->data.macs_num) | ||
2614 | cmd_pos->done = true; | ||
2615 | } | ||
2616 | |||
2617 | static inline void bnx2x_mcast_hdl_pending_restore_e2(struct bnx2x *bp, | ||
2618 | struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos, | ||
2619 | int *line_idx) | ||
2620 | { | ||
2621 | cmd_pos->data.next_bin = o->hdl_restore(bp, o, cmd_pos->data.next_bin, | ||
2622 | line_idx); | ||
2623 | |||
2624 | if (cmd_pos->data.next_bin < 0) | ||
2625 | /* If o->set_restore returned -1 we are done */ | ||
2626 | cmd_pos->done = true; | ||
2627 | else | ||
2628 | /* Start from the next bin next time */ | ||
2629 | cmd_pos->data.next_bin++; | ||
2630 | } | ||
2631 | |||
2632 | static inline int bnx2x_mcast_handle_pending_cmds_e2(struct bnx2x *bp, | ||
2633 | struct bnx2x_mcast_ramrod_params *p) | ||
2634 | { | ||
2635 | struct bnx2x_pending_mcast_cmd *cmd_pos, *cmd_pos_n; | ||
2636 | int cnt = 0; | ||
2637 | struct bnx2x_mcast_obj *o = p->mcast_obj; | ||
2638 | |||
2639 | list_for_each_entry_safe(cmd_pos, cmd_pos_n, &o->pending_cmds_head, | ||
2640 | link) { | ||
2641 | switch (cmd_pos->type) { | ||
2642 | case BNX2X_MCAST_CMD_ADD: | ||
2643 | bnx2x_mcast_hdl_pending_add_e2(bp, o, cmd_pos, &cnt); | ||
2644 | break; | ||
2645 | |||
2646 | case BNX2X_MCAST_CMD_DEL: | ||
2647 | bnx2x_mcast_hdl_pending_del_e2(bp, o, cmd_pos, &cnt); | ||
2648 | break; | ||
2649 | |||
2650 | case BNX2X_MCAST_CMD_RESTORE: | ||
2651 | bnx2x_mcast_hdl_pending_restore_e2(bp, o, cmd_pos, | ||
2652 | &cnt); | ||
2653 | break; | ||
2654 | |||
2655 | default: | ||
2656 | BNX2X_ERR("Unknown command: %d\n", cmd_pos->type); | ||
2657 | return -EINVAL; | ||
2658 | } | ||
2659 | |||
2660 | /* If the command has been completed - remove it from the list | ||
2661 | * and free the memory | ||
378 | */ | 2662 | */ |
379 | if (!NO_FCOE(bp)) { | 2663 | if (cmd_pos->done) { |
380 | cl_id = bnx2x_fcoe(bp, cl_id); | 2664 | list_del(&cmd_pos->link); |
381 | bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_NONE); | 2665 | kfree(cmd_pos); |
382 | } | 2666 | } |
383 | #endif | 2667 | |
384 | /* pass management unicast packets as well */ | 2668 | /* Break if we reached the maximum number of rules */ |
385 | llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST; | 2669 | if (cnt >= o->max_cmd_len) |
2670 | break; | ||
2671 | } | ||
2672 | |||
2673 | return cnt; | ||
2674 | } | ||
2675 | |||
2676 | static inline void bnx2x_mcast_hdl_add(struct bnx2x *bp, | ||
2677 | struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p, | ||
2678 | int *line_idx) | ||
2679 | { | ||
2680 | struct bnx2x_mcast_list_elem *mlist_pos; | ||
2681 | union bnx2x_mcast_config_data cfg_data = {0}; | ||
2682 | int cnt = *line_idx; | ||
2683 | |||
2684 | list_for_each_entry(mlist_pos, &p->mcast_list, link) { | ||
2685 | cfg_data.mac = mlist_pos->mac; | ||
2686 | o->set_one_rule(bp, o, cnt, &cfg_data, BNX2X_MCAST_CMD_ADD); | ||
2687 | |||
2688 | cnt++; | ||
2689 | |||
2690 | DP(BNX2X_MSG_SP, "About to configure "BNX2X_MAC_FMT | ||
2691 | " mcast MAC\n", | ||
2692 | BNX2X_MAC_PRN_LIST(mlist_pos->mac)); | ||
2693 | } | ||
2694 | |||
2695 | *line_idx = cnt; | ||
2696 | } | ||
2697 | |||
2698 | static inline void bnx2x_mcast_hdl_del(struct bnx2x *bp, | ||
2699 | struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p, | ||
2700 | int *line_idx) | ||
2701 | { | ||
2702 | int cnt = *line_idx, i; | ||
2703 | |||
2704 | for (i = 0; i < p->mcast_list_len; i++) { | ||
2705 | o->set_one_rule(bp, o, cnt, NULL, BNX2X_MCAST_CMD_DEL); | ||
2706 | |||
2707 | cnt++; | ||
2708 | |||
2709 | DP(BNX2X_MSG_SP, "Deleting MAC. %d left\n", | ||
2710 | p->mcast_list_len - i - 1); | ||
2711 | } | ||
2712 | |||
2713 | *line_idx = cnt; | ||
2714 | } | ||
2715 | |||
2716 | /** | ||
2717 | * bnx2x_mcast_handle_current_cmd - | ||
2718 | * | ||
2719 | * @bp: device handle | ||
2720 | * @p: | ||
2721 | * @cmd: | ||
2722 | * @start_cnt: first line in the ramrod data that may be used | ||
2723 | * | ||
2724 | * This function is called iff there is enough place for the current command in | ||
2725 | * the ramrod data. | ||
2726 | * Returns number of lines filled in the ramrod data in total. | ||
2727 | */ | ||
2728 | static inline int bnx2x_mcast_handle_current_cmd(struct bnx2x *bp, | ||
2729 | struct bnx2x_mcast_ramrod_params *p, int cmd, | ||
2730 | int start_cnt) | ||
2731 | { | ||
2732 | struct bnx2x_mcast_obj *o = p->mcast_obj; | ||
2733 | int cnt = start_cnt; | ||
2734 | |||
2735 | DP(BNX2X_MSG_SP, "p->mcast_list_len=%d\n", p->mcast_list_len); | ||
2736 | |||
2737 | switch (cmd) { | ||
2738 | case BNX2X_MCAST_CMD_ADD: | ||
2739 | bnx2x_mcast_hdl_add(bp, o, p, &cnt); | ||
2740 | break; | ||
2741 | |||
2742 | case BNX2X_MCAST_CMD_DEL: | ||
2743 | bnx2x_mcast_hdl_del(bp, o, p, &cnt); | ||
2744 | break; | ||
2745 | |||
2746 | case BNX2X_MCAST_CMD_RESTORE: | ||
2747 | o->hdl_restore(bp, o, 0, &cnt); | ||
386 | break; | 2748 | break; |
387 | 2749 | ||
388 | default: | 2750 | default: |
389 | BNX2X_ERR("BAD rx mode (%d)\n", mode); | 2751 | BNX2X_ERR("Unknown command: %d\n", cmd); |
2752 | return -EINVAL; | ||
2753 | } | ||
2754 | |||
2755 | /* The current command has been handled */ | ||
2756 | p->mcast_list_len = 0; | ||
2757 | |||
2758 | return cnt; | ||
2759 | } | ||
2760 | |||
2761 | static int bnx2x_mcast_validate_e2(struct bnx2x *bp, | ||
2762 | struct bnx2x_mcast_ramrod_params *p, | ||
2763 | int cmd) | ||
2764 | { | ||
2765 | struct bnx2x_mcast_obj *o = p->mcast_obj; | ||
2766 | int reg_sz = o->get_registry_size(o); | ||
2767 | |||
2768 | switch (cmd) { | ||
2769 | /* DEL command deletes all currently configured MACs */ | ||
2770 | case BNX2X_MCAST_CMD_DEL: | ||
2771 | o->set_registry_size(o, 0); | ||
2772 | /* Don't break */ | ||
2773 | |||
2774 | /* RESTORE command will restore the entire multicast configuration */ | ||
2775 | case BNX2X_MCAST_CMD_RESTORE: | ||
2776 | /* Here we set the approximate amount of work to do, which in | ||
2777 | * fact may be only less as some MACs in postponed ADD | ||
2778 | * command(s) scheduled before this command may fall into | ||
2779 | * the same bin and the actual number of bins set in the | ||
2780 | * registry would be less than we estimated here. See | ||
2781 | * bnx2x_mcast_set_one_rule_e2() for further details. | ||
2782 | */ | ||
2783 | p->mcast_list_len = reg_sz; | ||
2784 | break; | ||
2785 | |||
2786 | case BNX2X_MCAST_CMD_ADD: | ||
2787 | case BNX2X_MCAST_CMD_CONT: | ||
2788 | /* Here we assume that all new MACs will fall into new bins. | ||
2789 | * However we will correct the real registry size after we | ||
2790 | * handle all pending commands. | ||
2791 | */ | ||
2792 | o->set_registry_size(o, reg_sz + p->mcast_list_len); | ||
390 | break; | 2793 | break; |
2794 | |||
2795 | default: | ||
2796 | BNX2X_ERR("Unknown command: %d\n", cmd); | ||
2797 | return -EINVAL; | ||
2798 | |||
391 | } | 2799 | } |
392 | 2800 | ||
393 | cl_id = BP_L_ID(bp); | 2801 | /* Increase the total number of MACs pending to be configured */ |
394 | bnx2x_rxq_set_mac_filters(bp, cl_id, def_q_filters); | 2802 | o->total_pending_num += p->mcast_list_len; |
395 | 2803 | ||
396 | REG_WR(bp, | 2804 | return 0; |
397 | (port ? NIG_REG_LLH1_BRB1_DRV_MASK : | 2805 | } |
398 | NIG_REG_LLH0_BRB1_DRV_MASK), llh_mask); | ||
399 | 2806 | ||
400 | DP(NETIF_MSG_IFUP, "rx mode %d\n" | 2807 | static void bnx2x_mcast_revert_e2(struct bnx2x *bp, |
401 | "drop_ucast 0x%x\ndrop_mcast 0x%x\ndrop_bcast 0x%x\n" | 2808 | struct bnx2x_mcast_ramrod_params *p, |
402 | "accp_ucast 0x%x\naccp_mcast 0x%x\naccp_bcast 0x%x\n" | 2809 | int old_num_bins) |
403 | "unmatched_ucast 0x%x\n", mode, | 2810 | { |
404 | bp->mac_filters.ucast_drop_all, | 2811 | struct bnx2x_mcast_obj *o = p->mcast_obj; |
405 | bp->mac_filters.mcast_drop_all, | ||
406 | bp->mac_filters.bcast_drop_all, | ||
407 | bp->mac_filters.ucast_accept_all, | ||
408 | bp->mac_filters.mcast_accept_all, | ||
409 | bp->mac_filters.bcast_accept_all, | ||
410 | bp->mac_filters.unmatched_unicast | ||
411 | ); | ||
412 | 2812 | ||
413 | storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp)); | 2813 | o->set_registry_size(o, old_num_bins); |
2814 | o->total_pending_num -= p->mcast_list_len; | ||
414 | } | 2815 | } |
415 | 2816 | ||
416 | /* RSS configuration */ | 2817 | /** |
417 | static inline void __storm_memset_dma_mapping(struct bnx2x *bp, | 2818 | * bnx2x_mcast_set_rdata_hdr_e2 - sets a header values |
418 | u32 addr, dma_addr_t mapping) | 2819 | * |
2820 | * @bp: device handle | ||
2821 | * @p: | ||
2822 | * @len: number of rules to handle | ||
2823 | */ | ||
2824 | static inline void bnx2x_mcast_set_rdata_hdr_e2(struct bnx2x *bp, | ||
2825 | struct bnx2x_mcast_ramrod_params *p, | ||
2826 | u8 len) | ||
419 | { | 2827 | { |
420 | REG_WR(bp, addr, U64_LO(mapping)); | 2828 | struct bnx2x_raw_obj *r = &p->mcast_obj->raw; |
421 | REG_WR(bp, addr + 4, U64_HI(mapping)); | 2829 | struct eth_multicast_rules_ramrod_data *data = |
2830 | (struct eth_multicast_rules_ramrod_data *)(r->rdata); | ||
2831 | |||
2832 | data->header.echo = ((r->cid & BNX2X_SWCID_MASK) | | ||
2833 | (BNX2X_FILTER_MCAST_PENDING << BNX2X_SWCID_SHIFT)); | ||
2834 | data->header.rule_cnt = len; | ||
422 | } | 2835 | } |
423 | 2836 | ||
424 | static inline void __storm_fill(struct bnx2x *bp, | 2837 | /** |
425 | u32 addr, size_t size, u32 val) | 2838 | * bnx2x_mcast_refresh_registry_e2 - recalculate the actual number of set bins |
2839 | * | ||
2840 | * @bp: device handle | ||
2841 | * @o: | ||
2842 | * | ||
2843 | * Recalculate the actual number of set bins in the registry using Brian | ||
2844 | * Kernighan's algorithm: it's execution complexity is as a number of set bins. | ||
2845 | * | ||
2846 | * returns 0 for the compliance with bnx2x_mcast_refresh_registry_e1(). | ||
2847 | */ | ||
2848 | static inline int bnx2x_mcast_refresh_registry_e2(struct bnx2x *bp, | ||
2849 | struct bnx2x_mcast_obj *o) | ||
2850 | { | ||
2851 | int i, cnt = 0; | ||
2852 | u64 elem; | ||
2853 | |||
2854 | for (i = 0; i < BNX2X_MCAST_VEC_SZ; i++) { | ||
2855 | elem = o->registry.aprox_match.vec[i]; | ||
2856 | for (; elem; cnt++) | ||
2857 | elem &= elem - 1; | ||
2858 | } | ||
2859 | |||
2860 | o->set_registry_size(o, cnt); | ||
2861 | |||
2862 | return 0; | ||
2863 | } | ||
2864 | |||
2865 | static int bnx2x_mcast_setup_e2(struct bnx2x *bp, | ||
2866 | struct bnx2x_mcast_ramrod_params *p, | ||
2867 | int cmd) | ||
2868 | { | ||
2869 | struct bnx2x_raw_obj *raw = &p->mcast_obj->raw; | ||
2870 | struct bnx2x_mcast_obj *o = p->mcast_obj; | ||
2871 | struct eth_multicast_rules_ramrod_data *data = | ||
2872 | (struct eth_multicast_rules_ramrod_data *)(raw->rdata); | ||
2873 | int cnt = 0, rc; | ||
2874 | |||
2875 | /* Reset the ramrod data buffer */ | ||
2876 | memset(data, 0, sizeof(*data)); | ||
2877 | |||
2878 | cnt = bnx2x_mcast_handle_pending_cmds_e2(bp, p); | ||
2879 | |||
2880 | /* If there are no more pending commands - clear SCHEDULED state */ | ||
2881 | if (list_empty(&o->pending_cmds_head)) | ||
2882 | o->clear_sched(o); | ||
2883 | |||
2884 | /* The below may be true iff there was enough room in ramrod | ||
2885 | * data for all pending commands and for the current | ||
2886 | * command. Otherwise the current command would have been added | ||
2887 | * to the pending commands and p->mcast_list_len would have been | ||
2888 | * zeroed. | ||
2889 | */ | ||
2890 | if (p->mcast_list_len > 0) | ||
2891 | cnt = bnx2x_mcast_handle_current_cmd(bp, p, cmd, cnt); | ||
2892 | |||
2893 | /* We've pulled out some MACs - update the total number of | ||
2894 | * outstanding. | ||
2895 | */ | ||
2896 | o->total_pending_num -= cnt; | ||
2897 | |||
2898 | /* send a ramrod */ | ||
2899 | WARN_ON(o->total_pending_num < 0); | ||
2900 | WARN_ON(cnt > o->max_cmd_len); | ||
2901 | |||
2902 | bnx2x_mcast_set_rdata_hdr_e2(bp, p, (u8)cnt); | ||
2903 | |||
2904 | /* Update a registry size if there are no more pending operations. | ||
2905 | * | ||
2906 | * We don't want to change the value of the registry size if there are | ||
2907 | * pending operations because we want it to always be equal to the | ||
2908 | * exact or the approximate number (see bnx2x_mcast_validate_e2()) of | ||
2909 | * set bins after the last requested operation in order to properly | ||
2910 | * evaluate the size of the next DEL/RESTORE operation. | ||
2911 | * | ||
2912 | * Note that we update the registry itself during command(s) handling | ||
2913 | * - see bnx2x_mcast_set_one_rule_e2(). That's because for 57712 we | ||
2914 | * aggregate multiple commands (ADD/DEL/RESTORE) into one ramrod but | ||
2915 | * with a limited amount of update commands (per MAC/bin) and we don't | ||
2916 | * know in this scope what the actual state of bins configuration is | ||
2917 | * going to be after this ramrod. | ||
2918 | */ | ||
2919 | if (!o->total_pending_num) | ||
2920 | bnx2x_mcast_refresh_registry_e2(bp, o); | ||
2921 | |||
2922 | /* Commit writes towards the memory before sending a ramrod */ | ||
2923 | mb(); | ||
2924 | |||
2925 | /* If CLEAR_ONLY was requested - don't send a ramrod and clear | ||
2926 | * RAMROD_PENDING status immediately. | ||
2927 | */ | ||
2928 | if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) { | ||
2929 | raw->clear_pending(raw); | ||
2930 | return 0; | ||
2931 | } else { | ||
2932 | /* Send a ramrod */ | ||
2933 | rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_MULTICAST_RULES, | ||
2934 | raw->cid, U64_HI(raw->rdata_mapping), | ||
2935 | U64_LO(raw->rdata_mapping), | ||
2936 | ETH_CONNECTION_TYPE); | ||
2937 | if (rc) | ||
2938 | return rc; | ||
2939 | |||
2940 | /* Ramrod completion is pending */ | ||
2941 | return 1; | ||
2942 | } | ||
2943 | } | ||
2944 | |||
2945 | static int bnx2x_mcast_validate_e1h(struct bnx2x *bp, | ||
2946 | struct bnx2x_mcast_ramrod_params *p, | ||
2947 | int cmd) | ||
2948 | { | ||
2949 | /* Mark, that there is a work to do */ | ||
2950 | if ((cmd == BNX2X_MCAST_CMD_DEL) || (cmd == BNX2X_MCAST_CMD_RESTORE)) | ||
2951 | p->mcast_list_len = 1; | ||
2952 | |||
2953 | return 0; | ||
2954 | } | ||
2955 | |||
2956 | static void bnx2x_mcast_revert_e1h(struct bnx2x *bp, | ||
2957 | struct bnx2x_mcast_ramrod_params *p, | ||
2958 | int old_num_bins) | ||
2959 | { | ||
2960 | /* Do nothing */ | ||
2961 | } | ||
2962 | |||
2963 | #define BNX2X_57711_SET_MC_FILTER(filter, bit) \ | ||
2964 | do { \ | ||
2965 | (filter)[(bit) >> 5] |= (1 << ((bit) & 0x1f)); \ | ||
2966 | } while (0) | ||
2967 | |||
2968 | static inline void bnx2x_mcast_hdl_add_e1h(struct bnx2x *bp, | ||
2969 | struct bnx2x_mcast_obj *o, | ||
2970 | struct bnx2x_mcast_ramrod_params *p, | ||
2971 | u32 *mc_filter) | ||
2972 | { | ||
2973 | struct bnx2x_mcast_list_elem *mlist_pos; | ||
2974 | int bit; | ||
2975 | |||
2976 | list_for_each_entry(mlist_pos, &p->mcast_list, link) { | ||
2977 | bit = bnx2x_mcast_bin_from_mac(mlist_pos->mac); | ||
2978 | BNX2X_57711_SET_MC_FILTER(mc_filter, bit); | ||
2979 | |||
2980 | DP(BNX2X_MSG_SP, "About to configure " | ||
2981 | BNX2X_MAC_FMT" mcast MAC, bin %d\n", | ||
2982 | BNX2X_MAC_PRN_LIST(mlist_pos->mac), bit); | ||
2983 | |||
2984 | /* bookkeeping... */ | ||
2985 | BIT_VEC64_SET_BIT(o->registry.aprox_match.vec, | ||
2986 | bit); | ||
2987 | } | ||
2988 | } | ||
2989 | |||
2990 | static inline void bnx2x_mcast_hdl_restore_e1h(struct bnx2x *bp, | ||
2991 | struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p, | ||
2992 | u32 *mc_filter) | ||
2993 | { | ||
2994 | int bit; | ||
2995 | |||
2996 | for (bit = bnx2x_mcast_get_next_bin(o, 0); | ||
2997 | bit >= 0; | ||
2998 | bit = bnx2x_mcast_get_next_bin(o, bit + 1)) { | ||
2999 | BNX2X_57711_SET_MC_FILTER(mc_filter, bit); | ||
3000 | DP(BNX2X_MSG_SP, "About to set bin %d\n", bit); | ||
3001 | } | ||
3002 | } | ||
3003 | |||
3004 | /* On 57711 we write the multicast MACs' aproximate match | ||
3005 | * table by directly into the TSTORM's internal RAM. So we don't | ||
3006 | * really need to handle any tricks to make it work. | ||
3007 | */ | ||
3008 | static int bnx2x_mcast_setup_e1h(struct bnx2x *bp, | ||
3009 | struct bnx2x_mcast_ramrod_params *p, | ||
3010 | int cmd) | ||
426 | { | 3011 | { |
427 | int i; | 3012 | int i; |
428 | for (i = 0; i < size/4; i++) | 3013 | struct bnx2x_mcast_obj *o = p->mcast_obj; |
429 | REG_WR(bp, addr + (i * 4), val); | 3014 | struct bnx2x_raw_obj *r = &o->raw; |
3015 | |||
3016 | /* If CLEAR_ONLY has been requested - clear the registry | ||
3017 | * and clear a pending bit. | ||
3018 | */ | ||
3019 | if (!test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) { | ||
3020 | u32 mc_filter[MC_HASH_SIZE] = {0}; | ||
3021 | |||
3022 | /* Set the multicast filter bits before writing it into | ||
3023 | * the internal memory. | ||
3024 | */ | ||
3025 | switch (cmd) { | ||
3026 | case BNX2X_MCAST_CMD_ADD: | ||
3027 | bnx2x_mcast_hdl_add_e1h(bp, o, p, mc_filter); | ||
3028 | break; | ||
3029 | |||
3030 | case BNX2X_MCAST_CMD_DEL: | ||
3031 | DP(BNX2X_MSG_SP, "Invalidating multicast " | ||
3032 | "MACs configuration\n"); | ||
3033 | |||
3034 | /* clear the registry */ | ||
3035 | memset(o->registry.aprox_match.vec, 0, | ||
3036 | sizeof(o->registry.aprox_match.vec)); | ||
3037 | break; | ||
3038 | |||
3039 | case BNX2X_MCAST_CMD_RESTORE: | ||
3040 | bnx2x_mcast_hdl_restore_e1h(bp, o, p, mc_filter); | ||
3041 | break; | ||
3042 | |||
3043 | default: | ||
3044 | BNX2X_ERR("Unknown command: %d\n", cmd); | ||
3045 | return -EINVAL; | ||
3046 | } | ||
3047 | |||
3048 | /* Set the mcast filter in the internal memory */ | ||
3049 | for (i = 0; i < MC_HASH_SIZE; i++) | ||
3050 | REG_WR(bp, MC_HASH_OFFSET(bp, i), mc_filter[i]); | ||
3051 | } else | ||
3052 | /* clear the registry */ | ||
3053 | memset(o->registry.aprox_match.vec, 0, | ||
3054 | sizeof(o->registry.aprox_match.vec)); | ||
3055 | |||
3056 | /* We are done */ | ||
3057 | r->clear_pending(r); | ||
3058 | |||
3059 | return 0; | ||
430 | } | 3060 | } |
431 | 3061 | ||
432 | static inline void storm_memset_ustats_zero(struct bnx2x *bp, | 3062 | static int bnx2x_mcast_validate_e1(struct bnx2x *bp, |
433 | u8 port, u16 stat_id) | 3063 | struct bnx2x_mcast_ramrod_params *p, |
3064 | int cmd) | ||
434 | { | 3065 | { |
435 | size_t size = sizeof(struct ustorm_per_client_stats); | 3066 | struct bnx2x_mcast_obj *o = p->mcast_obj; |
3067 | int reg_sz = o->get_registry_size(o); | ||
3068 | |||
3069 | switch (cmd) { | ||
3070 | /* DEL command deletes all currently configured MACs */ | ||
3071 | case BNX2X_MCAST_CMD_DEL: | ||
3072 | o->set_registry_size(o, 0); | ||
3073 | /* Don't break */ | ||
3074 | |||
3075 | /* RESTORE command will restore the entire multicast configuration */ | ||
3076 | case BNX2X_MCAST_CMD_RESTORE: | ||
3077 | p->mcast_list_len = reg_sz; | ||
3078 | DP(BNX2X_MSG_SP, "Command %d, p->mcast_list_len=%d\n", | ||
3079 | cmd, p->mcast_list_len); | ||
3080 | break; | ||
3081 | |||
3082 | case BNX2X_MCAST_CMD_ADD: | ||
3083 | case BNX2X_MCAST_CMD_CONT: | ||
3084 | /* Multicast MACs on 57710 are configured as unicast MACs and | ||
3085 | * there is only a limited number of CAM entries for that | ||
3086 | * matter. | ||
3087 | */ | ||
3088 | if (p->mcast_list_len > o->max_cmd_len) { | ||
3089 | BNX2X_ERR("Can't configure more than %d multicast MACs" | ||
3090 | "on 57710\n", o->max_cmd_len); | ||
3091 | return -EINVAL; | ||
3092 | } | ||
3093 | /* Every configured MAC should be cleared if DEL command is | ||
3094 | * called. Only the last ADD command is relevant as long as | ||
3095 | * every ADD commands overrides the previous configuration. | ||
3096 | */ | ||
3097 | DP(BNX2X_MSG_SP, "p->mcast_list_len=%d\n", p->mcast_list_len); | ||
3098 | if (p->mcast_list_len > 0) | ||
3099 | o->set_registry_size(o, p->mcast_list_len); | ||
3100 | |||
3101 | break; | ||
3102 | |||
3103 | default: | ||
3104 | BNX2X_ERR("Unknown command: %d\n", cmd); | ||
3105 | return -EINVAL; | ||
3106 | |||
3107 | } | ||
436 | 3108 | ||
437 | u32 addr = BAR_USTRORM_INTMEM + | 3109 | /* We want to ensure that commands are executed one by one for 57710. |
438 | USTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id); | 3110 | * Therefore each none-empty command will consume o->max_cmd_len. |
3111 | */ | ||
3112 | if (p->mcast_list_len) | ||
3113 | o->total_pending_num += o->max_cmd_len; | ||
439 | 3114 | ||
440 | __storm_fill(bp, addr, size, 0); | 3115 | return 0; |
441 | } | 3116 | } |
442 | 3117 | ||
443 | static inline void storm_memset_tstats_zero(struct bnx2x *bp, | 3118 | static void bnx2x_mcast_revert_e1(struct bnx2x *bp, |
444 | u8 port, u16 stat_id) | 3119 | struct bnx2x_mcast_ramrod_params *p, |
3120 | int old_num_macs) | ||
445 | { | 3121 | { |
446 | size_t size = sizeof(struct tstorm_per_client_stats); | 3122 | struct bnx2x_mcast_obj *o = p->mcast_obj; |
447 | 3123 | ||
448 | u32 addr = BAR_TSTRORM_INTMEM + | 3124 | o->set_registry_size(o, old_num_macs); |
449 | TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id); | 3125 | |
3126 | /* If current command hasn't been handled yet and we are | ||
3127 | * here means that it's meant to be dropped and we have to | ||
3128 | * update the number of outstandling MACs accordingly. | ||
3129 | */ | ||
3130 | if (p->mcast_list_len) | ||
3131 | o->total_pending_num -= o->max_cmd_len; | ||
3132 | } | ||
450 | 3133 | ||
451 | __storm_fill(bp, addr, size, 0); | 3134 | static void bnx2x_mcast_set_one_rule_e1(struct bnx2x *bp, |
3135 | struct bnx2x_mcast_obj *o, int idx, | ||
3136 | union bnx2x_mcast_config_data *cfg_data, | ||
3137 | int cmd) | ||
3138 | { | ||
3139 | struct bnx2x_raw_obj *r = &o->raw; | ||
3140 | struct mac_configuration_cmd *data = | ||
3141 | (struct mac_configuration_cmd *)(r->rdata); | ||
3142 | |||
3143 | /* copy mac */ | ||
3144 | if ((cmd == BNX2X_MCAST_CMD_ADD) || (cmd == BNX2X_MCAST_CMD_RESTORE)) { | ||
3145 | bnx2x_set_fw_mac_addr(&data->config_table[idx].msb_mac_addr, | ||
3146 | &data->config_table[idx].middle_mac_addr, | ||
3147 | &data->config_table[idx].lsb_mac_addr, | ||
3148 | cfg_data->mac); | ||
3149 | |||
3150 | data->config_table[idx].vlan_id = 0; | ||
3151 | data->config_table[idx].pf_id = r->func_id; | ||
3152 | data->config_table[idx].clients_bit_vector = | ||
3153 | cpu_to_le32(1 << r->cl_id); | ||
3154 | |||
3155 | SET_FLAG(data->config_table[idx].flags, | ||
3156 | MAC_CONFIGURATION_ENTRY_ACTION_TYPE, | ||
3157 | T_ETH_MAC_COMMAND_SET); | ||
3158 | } | ||
3159 | } | ||
3160 | |||
3161 | /** | ||
3162 | * bnx2x_mcast_set_rdata_hdr_e1 - set header values in mac_configuration_cmd | ||
3163 | * | ||
3164 | * @bp: device handle | ||
3165 | * @p: | ||
3166 | * @len: number of rules to handle | ||
3167 | */ | ||
3168 | static inline void bnx2x_mcast_set_rdata_hdr_e1(struct bnx2x *bp, | ||
3169 | struct bnx2x_mcast_ramrod_params *p, | ||
3170 | u8 len) | ||
3171 | { | ||
3172 | struct bnx2x_raw_obj *r = &p->mcast_obj->raw; | ||
3173 | struct mac_configuration_cmd *data = | ||
3174 | (struct mac_configuration_cmd *)(r->rdata); | ||
3175 | |||
3176 | u8 offset = (CHIP_REV_IS_SLOW(bp) ? | ||
3177 | BNX2X_MAX_EMUL_MULTI*(1 + r->func_id) : | ||
3178 | BNX2X_MAX_MULTICAST*(1 + r->func_id)); | ||
3179 | |||
3180 | data->hdr.offset = offset; | ||
3181 | data->hdr.client_id = 0xff; | ||
3182 | data->hdr.echo = ((r->cid & BNX2X_SWCID_MASK) | | ||
3183 | (BNX2X_FILTER_MCAST_PENDING << BNX2X_SWCID_SHIFT)); | ||
3184 | data->hdr.length = len; | ||
452 | } | 3185 | } |
453 | 3186 | ||
454 | static inline void storm_memset_xstats_zero(struct bnx2x *bp, | 3187 | /** |
455 | u8 port, u16 stat_id) | 3188 | * bnx2x_mcast_handle_restore_cmd_e1 - restore command for 57710 |
3189 | * | ||
3190 | * @bp: device handle | ||
3191 | * @o: | ||
3192 | * @start_idx: index in the registry to start from | ||
3193 | * @rdata_idx: index in the ramrod data to start from | ||
3194 | * | ||
3195 | * restore command for 57710 is like all other commands - always a stand alone | ||
3196 | * command - start_idx and rdata_idx will always be 0. This function will always | ||
3197 | * succeed. | ||
3198 | * returns -1 to comply with 57712 variant. | ||
3199 | */ | ||
3200 | static inline int bnx2x_mcast_handle_restore_cmd_e1( | ||
3201 | struct bnx2x *bp, struct bnx2x_mcast_obj *o , int start_idx, | ||
3202 | int *rdata_idx) | ||
456 | { | 3203 | { |
457 | size_t size = sizeof(struct xstorm_per_client_stats); | 3204 | struct bnx2x_mcast_mac_elem *elem; |
3205 | int i = 0; | ||
3206 | union bnx2x_mcast_config_data cfg_data = {0}; | ||
458 | 3207 | ||
459 | u32 addr = BAR_XSTRORM_INTMEM + | 3208 | /* go through the registry and configure the MACs from it. */ |
460 | XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id); | 3209 | list_for_each_entry(elem, &o->registry.exact_match.macs, link) { |
3210 | cfg_data.mac = &elem->mac[0]; | ||
3211 | o->set_one_rule(bp, o, i, &cfg_data, BNX2X_MCAST_CMD_RESTORE); | ||
461 | 3212 | ||
462 | __storm_fill(bp, addr, size, 0); | 3213 | i++; |
3214 | |||
3215 | DP(BNX2X_MSG_SP, "About to configure "BNX2X_MAC_FMT | ||
3216 | " mcast MAC\n", | ||
3217 | BNX2X_MAC_PRN_LIST(cfg_data.mac)); | ||
3218 | } | ||
3219 | |||
3220 | *rdata_idx = i; | ||
3221 | |||
3222 | return -1; | ||
463 | } | 3223 | } |
464 | 3224 | ||
465 | 3225 | ||
466 | static inline void storm_memset_spq_addr(struct bnx2x *bp, | 3226 | static inline int bnx2x_mcast_handle_pending_cmds_e1( |
467 | dma_addr_t mapping, u16 abs_fid) | 3227 | struct bnx2x *bp, struct bnx2x_mcast_ramrod_params *p) |
468 | { | 3228 | { |
469 | u32 addr = XSEM_REG_FAST_MEMORY + | 3229 | struct bnx2x_pending_mcast_cmd *cmd_pos; |
470 | XSTORM_SPQ_PAGE_BASE_OFFSET(abs_fid); | 3230 | struct bnx2x_mcast_mac_elem *pmac_pos; |
3231 | struct bnx2x_mcast_obj *o = p->mcast_obj; | ||
3232 | union bnx2x_mcast_config_data cfg_data = {0}; | ||
3233 | int cnt = 0; | ||
3234 | |||
471 | 3235 | ||
472 | __storm_memset_dma_mapping(bp, addr, mapping); | 3236 | /* If nothing to be done - return */ |
3237 | if (list_empty(&o->pending_cmds_head)) | ||
3238 | return 0; | ||
3239 | |||
3240 | /* Handle the first command */ | ||
3241 | cmd_pos = list_first_entry(&o->pending_cmds_head, | ||
3242 | struct bnx2x_pending_mcast_cmd, link); | ||
3243 | |||
3244 | switch (cmd_pos->type) { | ||
3245 | case BNX2X_MCAST_CMD_ADD: | ||
3246 | list_for_each_entry(pmac_pos, &cmd_pos->data.macs_head, link) { | ||
3247 | cfg_data.mac = &pmac_pos->mac[0]; | ||
3248 | o->set_one_rule(bp, o, cnt, &cfg_data, cmd_pos->type); | ||
3249 | |||
3250 | cnt++; | ||
3251 | |||
3252 | DP(BNX2X_MSG_SP, "About to configure "BNX2X_MAC_FMT | ||
3253 | " mcast MAC\n", | ||
3254 | BNX2X_MAC_PRN_LIST(pmac_pos->mac)); | ||
3255 | } | ||
3256 | break; | ||
3257 | |||
3258 | case BNX2X_MCAST_CMD_DEL: | ||
3259 | cnt = cmd_pos->data.macs_num; | ||
3260 | DP(BNX2X_MSG_SP, "About to delete %d multicast MACs\n", cnt); | ||
3261 | break; | ||
3262 | |||
3263 | case BNX2X_MCAST_CMD_RESTORE: | ||
3264 | o->hdl_restore(bp, o, 0, &cnt); | ||
3265 | break; | ||
3266 | |||
3267 | default: | ||
3268 | BNX2X_ERR("Unknown command: %d\n", cmd_pos->type); | ||
3269 | return -EINVAL; | ||
3270 | } | ||
3271 | |||
3272 | list_del(&cmd_pos->link); | ||
3273 | kfree(cmd_pos); | ||
3274 | |||
3275 | return cnt; | ||
3276 | } | ||
3277 | |||
3278 | /** | ||
3279 | * bnx2x_get_fw_mac_addr - revert the bnx2x_set_fw_mac_addr(). | ||
3280 | * | ||
3281 | * @fw_hi: | ||
3282 | * @fw_mid: | ||
3283 | * @fw_lo: | ||
3284 | * @mac: | ||
3285 | */ | ||
3286 | static inline void bnx2x_get_fw_mac_addr(__le16 *fw_hi, __le16 *fw_mid, | ||
3287 | __le16 *fw_lo, u8 *mac) | ||
3288 | { | ||
3289 | mac[1] = ((u8 *)fw_hi)[0]; | ||
3290 | mac[0] = ((u8 *)fw_hi)[1]; | ||
3291 | mac[3] = ((u8 *)fw_mid)[0]; | ||
3292 | mac[2] = ((u8 *)fw_mid)[1]; | ||
3293 | mac[5] = ((u8 *)fw_lo)[0]; | ||
3294 | mac[4] = ((u8 *)fw_lo)[1]; | ||
473 | } | 3295 | } |
474 | 3296 | ||
475 | static inline void storm_memset_xstats_flags(struct bnx2x *bp, | 3297 | /** |
476 | struct stats_indication_flags *flags, | 3298 | * bnx2x_mcast_refresh_registry_e1 - |
477 | u16 abs_fid) | 3299 | * |
3300 | * @bp: device handle | ||
3301 | * @cnt: | ||
3302 | * | ||
3303 | * Check the ramrod data first entry flag to see if it's a DELETE or ADD command | ||
3304 | * and update the registry correspondingly: if ADD - allocate a memory and add | ||
3305 | * the entries to the registry (list), if DELETE - clear the registry and free | ||
3306 | * the memory. | ||
3307 | */ | ||
3308 | static inline int bnx2x_mcast_refresh_registry_e1(struct bnx2x *bp, | ||
3309 | struct bnx2x_mcast_obj *o) | ||
478 | { | 3310 | { |
479 | size_t size = sizeof(struct stats_indication_flags); | 3311 | struct bnx2x_raw_obj *raw = &o->raw; |
3312 | struct bnx2x_mcast_mac_elem *elem; | ||
3313 | struct mac_configuration_cmd *data = | ||
3314 | (struct mac_configuration_cmd *)(raw->rdata); | ||
3315 | |||
3316 | /* If first entry contains a SET bit - the command was ADD, | ||
3317 | * otherwise - DEL_ALL | ||
3318 | */ | ||
3319 | if (GET_FLAG(data->config_table[0].flags, | ||
3320 | MAC_CONFIGURATION_ENTRY_ACTION_TYPE)) { | ||
3321 | int i, len = data->hdr.length; | ||
3322 | |||
3323 | /* Break if it was a RESTORE command */ | ||
3324 | if (!list_empty(&o->registry.exact_match.macs)) | ||
3325 | return 0; | ||
3326 | |||
3327 | elem = kzalloc(sizeof(*elem)*len, GFP_ATOMIC); | ||
3328 | if (!elem) { | ||
3329 | BNX2X_ERR("Failed to allocate registry memory\n"); | ||
3330 | return -ENOMEM; | ||
3331 | } | ||
480 | 3332 | ||
481 | u32 addr = BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(abs_fid); | 3333 | for (i = 0; i < len; i++, elem++) { |
3334 | bnx2x_get_fw_mac_addr( | ||
3335 | &data->config_table[i].msb_mac_addr, | ||
3336 | &data->config_table[i].middle_mac_addr, | ||
3337 | &data->config_table[i].lsb_mac_addr, | ||
3338 | elem->mac); | ||
3339 | DP(BNX2X_MSG_SP, "Adding registry entry for [" | ||
3340 | BNX2X_MAC_FMT"]\n", | ||
3341 | BNX2X_MAC_PRN_LIST(elem->mac)); | ||
3342 | list_add_tail(&elem->link, | ||
3343 | &o->registry.exact_match.macs); | ||
3344 | } | ||
3345 | } else { | ||
3346 | elem = list_first_entry(&o->registry.exact_match.macs, | ||
3347 | struct bnx2x_mcast_mac_elem, link); | ||
3348 | DP(BNX2X_MSG_SP, "Deleting a registry\n"); | ||
3349 | kfree(elem); | ||
3350 | INIT_LIST_HEAD(&o->registry.exact_match.macs); | ||
3351 | } | ||
482 | 3352 | ||
483 | __storm_memset_struct(bp, addr, size, (u32 *)flags); | 3353 | return 0; |
484 | } | 3354 | } |
485 | 3355 | ||
486 | static inline void storm_memset_tstats_flags(struct bnx2x *bp, | 3356 | static int bnx2x_mcast_setup_e1(struct bnx2x *bp, |
487 | struct stats_indication_flags *flags, | 3357 | struct bnx2x_mcast_ramrod_params *p, |
488 | u16 abs_fid) | 3358 | int cmd) |
489 | { | 3359 | { |
490 | size_t size = sizeof(struct stats_indication_flags); | 3360 | struct bnx2x_mcast_obj *o = p->mcast_obj; |
3361 | struct bnx2x_raw_obj *raw = &o->raw; | ||
3362 | struct mac_configuration_cmd *data = | ||
3363 | (struct mac_configuration_cmd *)(raw->rdata); | ||
3364 | int cnt = 0, i, rc; | ||
3365 | |||
3366 | /* Reset the ramrod data buffer */ | ||
3367 | memset(data, 0, sizeof(*data)); | ||
3368 | |||
3369 | /* First set all entries as invalid */ | ||
3370 | for (i = 0; i < o->max_cmd_len ; i++) | ||
3371 | SET_FLAG(data->config_table[i].flags, | ||
3372 | MAC_CONFIGURATION_ENTRY_ACTION_TYPE, | ||
3373 | T_ETH_MAC_COMMAND_INVALIDATE); | ||
3374 | |||
3375 | /* Handle pending commands first */ | ||
3376 | cnt = bnx2x_mcast_handle_pending_cmds_e1(bp, p); | ||
491 | 3377 | ||
492 | u32 addr = BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(abs_fid); | 3378 | /* If there are no more pending commands - clear SCHEDULED state */ |
3379 | if (list_empty(&o->pending_cmds_head)) | ||
3380 | o->clear_sched(o); | ||
493 | 3381 | ||
494 | __storm_memset_struct(bp, addr, size, (u32 *)flags); | 3382 | /* The below may be true iff there were no pending commands */ |
3383 | if (!cnt) | ||
3384 | cnt = bnx2x_mcast_handle_current_cmd(bp, p, cmd, 0); | ||
3385 | |||
3386 | /* For 57710 every command has o->max_cmd_len length to ensure that | ||
3387 | * commands are done one at a time. | ||
3388 | */ | ||
3389 | o->total_pending_num -= o->max_cmd_len; | ||
3390 | |||
3391 | /* send a ramrod */ | ||
3392 | |||
3393 | WARN_ON(cnt > o->max_cmd_len); | ||
3394 | |||
3395 | /* Set ramrod header (in particular, a number of entries to update) */ | ||
3396 | bnx2x_mcast_set_rdata_hdr_e1(bp, p, (u8)cnt); | ||
3397 | |||
3398 | /* update a registry: we need the registry contents to be always up | ||
3399 | * to date in order to be able to execute a RESTORE opcode. Here | ||
3400 | * we use the fact that for 57710 we sent one command at a time | ||
3401 | * hence we may take the registry update out of the command handling | ||
3402 | * and do it in a simpler way here. | ||
3403 | */ | ||
3404 | rc = bnx2x_mcast_refresh_registry_e1(bp, o); | ||
3405 | if (rc) | ||
3406 | return rc; | ||
3407 | |||
3408 | /* Commit writes towards the memory before sending a ramrod */ | ||
3409 | mb(); | ||
3410 | |||
3411 | /* If CLEAR_ONLY was requested - don't send a ramrod and clear | ||
3412 | * RAMROD_PENDING status immediately. | ||
3413 | */ | ||
3414 | if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) { | ||
3415 | raw->clear_pending(raw); | ||
3416 | return 0; | ||
3417 | } else { | ||
3418 | /* Send a ramrod */ | ||
3419 | rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, raw->cid, | ||
3420 | U64_HI(raw->rdata_mapping), | ||
3421 | U64_LO(raw->rdata_mapping), | ||
3422 | ETH_CONNECTION_TYPE); | ||
3423 | if (rc) | ||
3424 | return rc; | ||
3425 | |||
3426 | /* Ramrod completion is pending */ | ||
3427 | return 1; | ||
3428 | } | ||
3429 | |||
3430 | } | ||
3431 | |||
3432 | static int bnx2x_mcast_get_registry_size_exact(struct bnx2x_mcast_obj *o) | ||
3433 | { | ||
3434 | return o->registry.exact_match.num_macs_set; | ||
495 | } | 3435 | } |
496 | 3436 | ||
497 | static inline void storm_memset_ustats_flags(struct bnx2x *bp, | 3437 | static int bnx2x_mcast_get_registry_size_aprox(struct bnx2x_mcast_obj *o) |
498 | struct stats_indication_flags *flags, | ||
499 | u16 abs_fid) | ||
500 | { | 3438 | { |
501 | size_t size = sizeof(struct stats_indication_flags); | 3439 | return o->registry.aprox_match.num_bins_set; |
3440 | } | ||
502 | 3441 | ||
503 | u32 addr = BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(abs_fid); | 3442 | static void bnx2x_mcast_set_registry_size_exact(struct bnx2x_mcast_obj *o, |
3443 | int n) | ||
3444 | { | ||
3445 | o->registry.exact_match.num_macs_set = n; | ||
3446 | } | ||
504 | 3447 | ||
505 | __storm_memset_struct(bp, addr, size, (u32 *)flags); | 3448 | static void bnx2x_mcast_set_registry_size_aprox(struct bnx2x_mcast_obj *o, |
3449 | int n) | ||
3450 | { | ||
3451 | o->registry.aprox_match.num_bins_set = n; | ||
506 | } | 3452 | } |
507 | 3453 | ||
508 | static inline void storm_memset_cstats_flags(struct bnx2x *bp, | 3454 | int bnx2x_config_mcast(struct bnx2x *bp, |
509 | struct stats_indication_flags *flags, | 3455 | struct bnx2x_mcast_ramrod_params *p, |
510 | u16 abs_fid) | 3456 | int cmd) |
511 | { | 3457 | { |
512 | size_t size = sizeof(struct stats_indication_flags); | 3458 | struct bnx2x_mcast_obj *o = p->mcast_obj; |
3459 | struct bnx2x_raw_obj *r = &o->raw; | ||
3460 | int rc = 0, old_reg_size; | ||
3461 | |||
3462 | /* This is needed to recover number of currently configured mcast macs | ||
3463 | * in case of failure. | ||
3464 | */ | ||
3465 | old_reg_size = o->get_registry_size(o); | ||
3466 | |||
3467 | /* Do some calculations and checks */ | ||
3468 | rc = o->validate(bp, p, cmd); | ||
3469 | if (rc) | ||
3470 | return rc; | ||
3471 | |||
3472 | /* Return if there is no work to do */ | ||
3473 | if ((!p->mcast_list_len) && (!o->check_sched(o))) | ||
3474 | return 0; | ||
3475 | |||
3476 | DP(BNX2X_MSG_SP, "o->total_pending_num=%d p->mcast_list_len=%d " | ||
3477 | "o->max_cmd_len=%d\n", o->total_pending_num, | ||
3478 | p->mcast_list_len, o->max_cmd_len); | ||
3479 | |||
3480 | /* Enqueue the current command to the pending list if we can't complete | ||
3481 | * it in the current iteration | ||
3482 | */ | ||
3483 | if (r->check_pending(r) || | ||
3484 | ((o->max_cmd_len > 0) && (o->total_pending_num > o->max_cmd_len))) { | ||
3485 | rc = o->enqueue_cmd(bp, p->mcast_obj, p, cmd); | ||
3486 | if (rc < 0) | ||
3487 | goto error_exit1; | ||
3488 | |||
3489 | /* As long as the current command is in a command list we | ||
3490 | * don't need to handle it separately. | ||
3491 | */ | ||
3492 | p->mcast_list_len = 0; | ||
3493 | } | ||
3494 | |||
3495 | if (!r->check_pending(r)) { | ||
3496 | |||
3497 | /* Set 'pending' state */ | ||
3498 | r->set_pending(r); | ||
3499 | |||
3500 | /* Configure the new classification in the chip */ | ||
3501 | rc = o->config_mcast(bp, p, cmd); | ||
3502 | if (rc < 0) | ||
3503 | goto error_exit2; | ||
3504 | |||
3505 | /* Wait for a ramrod completion if was requested */ | ||
3506 | if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags)) | ||
3507 | rc = o->wait_comp(bp, o); | ||
3508 | } | ||
3509 | |||
3510 | return rc; | ||
513 | 3511 | ||
514 | u32 addr = BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(abs_fid); | 3512 | error_exit2: |
3513 | r->clear_pending(r); | ||
515 | 3514 | ||
516 | __storm_memset_struct(bp, addr, size, (u32 *)flags); | 3515 | error_exit1: |
3516 | o->revert(bp, p, old_reg_size); | ||
3517 | |||
3518 | return rc; | ||
517 | } | 3519 | } |
518 | 3520 | ||
519 | static inline void storm_memset_xstats_addr(struct bnx2x *bp, | 3521 | static void bnx2x_mcast_clear_sched(struct bnx2x_mcast_obj *o) |
520 | dma_addr_t mapping, u16 abs_fid) | ||
521 | { | 3522 | { |
522 | u32 addr = BAR_XSTRORM_INTMEM + | 3523 | smp_mb__before_clear_bit(); |
523 | XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid); | 3524 | clear_bit(o->sched_state, o->raw.pstate); |
3525 | smp_mb__after_clear_bit(); | ||
3526 | } | ||
524 | 3527 | ||
525 | __storm_memset_dma_mapping(bp, addr, mapping); | 3528 | static void bnx2x_mcast_set_sched(struct bnx2x_mcast_obj *o) |
3529 | { | ||
3530 | smp_mb__before_clear_bit(); | ||
3531 | set_bit(o->sched_state, o->raw.pstate); | ||
3532 | smp_mb__after_clear_bit(); | ||
526 | } | 3533 | } |
527 | 3534 | ||
528 | static inline void storm_memset_tstats_addr(struct bnx2x *bp, | 3535 | static bool bnx2x_mcast_check_sched(struct bnx2x_mcast_obj *o) |
529 | dma_addr_t mapping, u16 abs_fid) | ||
530 | { | 3536 | { |
531 | u32 addr = BAR_TSTRORM_INTMEM + | 3537 | return !!test_bit(o->sched_state, o->raw.pstate); |
532 | TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid); | 3538 | } |
3539 | |||
3540 | static bool bnx2x_mcast_check_pending(struct bnx2x_mcast_obj *o) | ||
3541 | { | ||
3542 | return o->raw.check_pending(&o->raw) || o->check_sched(o); | ||
3543 | } | ||
3544 | |||
3545 | void bnx2x_init_mcast_obj(struct bnx2x *bp, | ||
3546 | struct bnx2x_mcast_obj *mcast_obj, | ||
3547 | u8 mcast_cl_id, u32 mcast_cid, u8 func_id, | ||
3548 | u8 engine_id, void *rdata, dma_addr_t rdata_mapping, | ||
3549 | int state, unsigned long *pstate, bnx2x_obj_type type) | ||
3550 | { | ||
3551 | memset(mcast_obj, 0, sizeof(*mcast_obj)); | ||
3552 | |||
3553 | bnx2x_init_raw_obj(&mcast_obj->raw, mcast_cl_id, mcast_cid, func_id, | ||
3554 | rdata, rdata_mapping, state, pstate, type); | ||
3555 | |||
3556 | mcast_obj->engine_id = engine_id; | ||
3557 | |||
3558 | INIT_LIST_HEAD(&mcast_obj->pending_cmds_head); | ||
3559 | |||
3560 | mcast_obj->sched_state = BNX2X_FILTER_MCAST_SCHED; | ||
3561 | mcast_obj->check_sched = bnx2x_mcast_check_sched; | ||
3562 | mcast_obj->set_sched = bnx2x_mcast_set_sched; | ||
3563 | mcast_obj->clear_sched = bnx2x_mcast_clear_sched; | ||
3564 | |||
3565 | if (CHIP_IS_E1(bp)) { | ||
3566 | mcast_obj->config_mcast = bnx2x_mcast_setup_e1; | ||
3567 | mcast_obj->enqueue_cmd = bnx2x_mcast_enqueue_cmd; | ||
3568 | mcast_obj->hdl_restore = | ||
3569 | bnx2x_mcast_handle_restore_cmd_e1; | ||
3570 | mcast_obj->check_pending = bnx2x_mcast_check_pending; | ||
3571 | |||
3572 | if (CHIP_REV_IS_SLOW(bp)) | ||
3573 | mcast_obj->max_cmd_len = BNX2X_MAX_EMUL_MULTI; | ||
3574 | else | ||
3575 | mcast_obj->max_cmd_len = BNX2X_MAX_MULTICAST; | ||
3576 | |||
3577 | mcast_obj->wait_comp = bnx2x_mcast_wait; | ||
3578 | mcast_obj->set_one_rule = bnx2x_mcast_set_one_rule_e1; | ||
3579 | mcast_obj->validate = bnx2x_mcast_validate_e1; | ||
3580 | mcast_obj->revert = bnx2x_mcast_revert_e1; | ||
3581 | mcast_obj->get_registry_size = | ||
3582 | bnx2x_mcast_get_registry_size_exact; | ||
3583 | mcast_obj->set_registry_size = | ||
3584 | bnx2x_mcast_set_registry_size_exact; | ||
3585 | |||
3586 | /* 57710 is the only chip that uses the exact match for mcast | ||
3587 | * at the moment. | ||
3588 | */ | ||
3589 | INIT_LIST_HEAD(&mcast_obj->registry.exact_match.macs); | ||
3590 | |||
3591 | } else if (CHIP_IS_E1H(bp)) { | ||
3592 | mcast_obj->config_mcast = bnx2x_mcast_setup_e1h; | ||
3593 | mcast_obj->enqueue_cmd = NULL; | ||
3594 | mcast_obj->hdl_restore = NULL; | ||
3595 | mcast_obj->check_pending = bnx2x_mcast_check_pending; | ||
3596 | |||
3597 | /* 57711 doesn't send a ramrod, so it has unlimited credit | ||
3598 | * for one command. | ||
3599 | */ | ||
3600 | mcast_obj->max_cmd_len = -1; | ||
3601 | mcast_obj->wait_comp = bnx2x_mcast_wait; | ||
3602 | mcast_obj->set_one_rule = NULL; | ||
3603 | mcast_obj->validate = bnx2x_mcast_validate_e1h; | ||
3604 | mcast_obj->revert = bnx2x_mcast_revert_e1h; | ||
3605 | mcast_obj->get_registry_size = | ||
3606 | bnx2x_mcast_get_registry_size_aprox; | ||
3607 | mcast_obj->set_registry_size = | ||
3608 | bnx2x_mcast_set_registry_size_aprox; | ||
3609 | } else { | ||
3610 | mcast_obj->config_mcast = bnx2x_mcast_setup_e2; | ||
3611 | mcast_obj->enqueue_cmd = bnx2x_mcast_enqueue_cmd; | ||
3612 | mcast_obj->hdl_restore = | ||
3613 | bnx2x_mcast_handle_restore_cmd_e2; | ||
3614 | mcast_obj->check_pending = bnx2x_mcast_check_pending; | ||
3615 | /* TODO: There should be a proper HSI define for this number!!! | ||
3616 | */ | ||
3617 | mcast_obj->max_cmd_len = 16; | ||
3618 | mcast_obj->wait_comp = bnx2x_mcast_wait; | ||
3619 | mcast_obj->set_one_rule = bnx2x_mcast_set_one_rule_e2; | ||
3620 | mcast_obj->validate = bnx2x_mcast_validate_e2; | ||
3621 | mcast_obj->revert = bnx2x_mcast_revert_e2; | ||
3622 | mcast_obj->get_registry_size = | ||
3623 | bnx2x_mcast_get_registry_size_aprox; | ||
3624 | mcast_obj->set_registry_size = | ||
3625 | bnx2x_mcast_set_registry_size_aprox; | ||
3626 | } | ||
3627 | } | ||
3628 | |||
3629 | /*************************** Credit handling **********************************/ | ||
3630 | |||
3631 | /** | ||
3632 | * atomic_add_ifless - add if the result is less than a given value. | ||
3633 | * | ||
3634 | * @v: pointer of type atomic_t | ||
3635 | * @a: the amount to add to v... | ||
3636 | * @u: ...if (v + a) is less than u. | ||
3637 | * | ||
3638 | * returns true if (v + a) was less than u, and false otherwise. | ||
3639 | * | ||
3640 | */ | ||
3641 | static inline bool __atomic_add_ifless(atomic_t *v, int a, int u) | ||
3642 | { | ||
3643 | int c, old; | ||
3644 | |||
3645 | c = atomic_read(v); | ||
3646 | for (;;) { | ||
3647 | if (unlikely(c + a >= u)) | ||
3648 | return false; | ||
533 | 3649 | ||
534 | __storm_memset_dma_mapping(bp, addr, mapping); | 3650 | old = atomic_cmpxchg((v), c, c + a); |
3651 | if (likely(old == c)) | ||
3652 | break; | ||
3653 | c = old; | ||
3654 | } | ||
3655 | |||
3656 | return true; | ||
535 | } | 3657 | } |
536 | 3658 | ||
537 | static inline void storm_memset_ustats_addr(struct bnx2x *bp, | 3659 | /** |
538 | dma_addr_t mapping, u16 abs_fid) | 3660 | * atomic_dec_ifmoe - dec if the result is more or equal than a given value. |
3661 | * | ||
3662 | * @v: pointer of type atomic_t | ||
3663 | * @a: the amount to dec from v... | ||
3664 | * @u: ...if (v - a) is more or equal than u. | ||
3665 | * | ||
3666 | * returns true if (v - a) was more or equal than u, and false | ||
3667 | * otherwise. | ||
3668 | */ | ||
3669 | static inline bool __atomic_dec_ifmoe(atomic_t *v, int a, int u) | ||
539 | { | 3670 | { |
540 | u32 addr = BAR_USTRORM_INTMEM + | 3671 | int c, old; |
541 | USTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid); | 3672 | |
3673 | c = atomic_read(v); | ||
3674 | for (;;) { | ||
3675 | if (unlikely(c - a < u)) | ||
3676 | return false; | ||
3677 | |||
3678 | old = atomic_cmpxchg((v), c, c - a); | ||
3679 | if (likely(old == c)) | ||
3680 | break; | ||
3681 | c = old; | ||
3682 | } | ||
542 | 3683 | ||
543 | __storm_memset_dma_mapping(bp, addr, mapping); | 3684 | return true; |
544 | } | 3685 | } |
545 | 3686 | ||
546 | static inline void storm_memset_cstats_addr(struct bnx2x *bp, | 3687 | static bool bnx2x_credit_pool_get(struct bnx2x_credit_pool_obj *o, int cnt) |
547 | dma_addr_t mapping, u16 abs_fid) | ||
548 | { | 3688 | { |
549 | u32 addr = BAR_CSTRORM_INTMEM + | 3689 | bool rc; |
550 | CSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid); | ||
551 | 3690 | ||
552 | __storm_memset_dma_mapping(bp, addr, mapping); | 3691 | smp_mb(); |
3692 | rc = __atomic_dec_ifmoe(&o->credit, cnt, 0); | ||
3693 | smp_mb(); | ||
3694 | |||
3695 | return rc; | ||
553 | } | 3696 | } |
554 | 3697 | ||
555 | static inline void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid, | 3698 | static bool bnx2x_credit_pool_put(struct bnx2x_credit_pool_obj *o, int cnt) |
556 | u16 pf_id) | ||
557 | { | 3699 | { |
558 | REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid), | 3700 | bool rc; |
559 | pf_id); | 3701 | |
560 | REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid), | 3702 | smp_mb(); |
561 | pf_id); | 3703 | |
562 | REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid), | 3704 | /* Don't let to refill if credit + cnt > pool_sz */ |
563 | pf_id); | 3705 | rc = __atomic_add_ifless(&o->credit, cnt, o->pool_sz + 1); |
564 | REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid), | 3706 | |
565 | pf_id); | 3707 | smp_mb(); |
3708 | |||
3709 | return rc; | ||
566 | } | 3710 | } |
567 | 3711 | ||
568 | static inline void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid, | 3712 | static int bnx2x_credit_pool_check(struct bnx2x_credit_pool_obj *o) |
569 | u8 enable) | ||
570 | { | 3713 | { |
571 | REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid), | 3714 | int cur_credit; |
572 | enable); | 3715 | |
573 | REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid), | 3716 | smp_mb(); |
574 | enable); | 3717 | cur_credit = atomic_read(&o->credit); |
575 | REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid), | 3718 | |
576 | enable); | 3719 | return cur_credit; |
577 | REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid), | ||
578 | enable); | ||
579 | } | 3720 | } |
580 | 3721 | ||
581 | static inline void storm_memset_func_cfg(struct bnx2x *bp, | 3722 | static bool bnx2x_credit_pool_always_true(struct bnx2x_credit_pool_obj *o, |
582 | struct tstorm_eth_function_common_config *tcfg, | 3723 | int cnt) |
583 | u16 abs_fid) | ||
584 | { | 3724 | { |
585 | size_t size = sizeof(struct tstorm_eth_function_common_config); | 3725 | return true; |
3726 | } | ||
586 | 3727 | ||
587 | u32 addr = BAR_TSTRORM_INTMEM + | ||
588 | TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid); | ||
589 | 3728 | ||
590 | __storm_memset_struct(bp, addr, size, (u32 *)tcfg); | 3729 | static bool bnx2x_credit_pool_get_entry( |
3730 | struct bnx2x_credit_pool_obj *o, | ||
3731 | int *offset) | ||
3732 | { | ||
3733 | int idx, vec, i; | ||
3734 | |||
3735 | *offset = -1; | ||
3736 | |||
3737 | /* Find "internal cam-offset" then add to base for this object... */ | ||
3738 | for (vec = 0; vec < BNX2X_POOL_VEC_SIZE; vec++) { | ||
3739 | |||
3740 | /* Skip the current vector if there are no free entries in it */ | ||
3741 | if (!o->pool_mirror[vec]) | ||
3742 | continue; | ||
3743 | |||
3744 | /* If we've got here we are going to find a free entry */ | ||
3745 | for (idx = vec * BNX2X_POOL_VEC_SIZE, i = 0; | ||
3746 | i < BIT_VEC64_ELEM_SZ; idx++, i++) | ||
3747 | |||
3748 | if (BIT_VEC64_TEST_BIT(o->pool_mirror, idx)) { | ||
3749 | /* Got one!! */ | ||
3750 | BIT_VEC64_CLEAR_BIT(o->pool_mirror, idx); | ||
3751 | *offset = o->base_pool_offset + idx; | ||
3752 | return true; | ||
3753 | } | ||
3754 | } | ||
3755 | |||
3756 | return false; | ||
3757 | } | ||
3758 | |||
3759 | static bool bnx2x_credit_pool_put_entry( | ||
3760 | struct bnx2x_credit_pool_obj *o, | ||
3761 | int offset) | ||
3762 | { | ||
3763 | if (offset < o->base_pool_offset) | ||
3764 | return false; | ||
3765 | |||
3766 | offset -= o->base_pool_offset; | ||
3767 | |||
3768 | if (offset >= o->pool_sz) | ||
3769 | return false; | ||
3770 | |||
3771 | /* Return the entry to the pool */ | ||
3772 | BIT_VEC64_SET_BIT(o->pool_mirror, offset); | ||
3773 | |||
3774 | return true; | ||
3775 | } | ||
3776 | |||
3777 | static bool bnx2x_credit_pool_put_entry_always_true( | ||
3778 | struct bnx2x_credit_pool_obj *o, | ||
3779 | int offset) | ||
3780 | { | ||
3781 | return true; | ||
591 | } | 3782 | } |
592 | 3783 | ||
593 | void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p) | 3784 | static bool bnx2x_credit_pool_get_entry_always_true( |
3785 | struct bnx2x_credit_pool_obj *o, | ||
3786 | int *offset) | ||
3787 | { | ||
3788 | *offset = -1; | ||
3789 | return true; | ||
3790 | } | ||
3791 | /** | ||
3792 | * bnx2x_init_credit_pool - initialize credit pool internals. | ||
3793 | * | ||
3794 | * @p: | ||
3795 | * @base: Base entry in the CAM to use. | ||
3796 | * @credit: pool size. | ||
3797 | * | ||
3798 | * If base is negative no CAM entries handling will be performed. | ||
3799 | * If credit is negative pool operations will always succeed (unlimited pool). | ||
3800 | * | ||
3801 | */ | ||
3802 | static inline void bnx2x_init_credit_pool(struct bnx2x_credit_pool_obj *p, | ||
3803 | int base, int credit) | ||
594 | { | 3804 | { |
595 | struct tstorm_eth_function_common_config tcfg = {0}; | 3805 | /* Zero the object first */ |
596 | u16 rss_flgs; | 3806 | memset(p, 0, sizeof(*p)); |
3807 | |||
3808 | /* Set the table to all 1s */ | ||
3809 | memset(&p->pool_mirror, 0xff, sizeof(p->pool_mirror)); | ||
3810 | |||
3811 | /* Init a pool as full */ | ||
3812 | atomic_set(&p->credit, credit); | ||
3813 | |||
3814 | /* The total poll size */ | ||
3815 | p->pool_sz = credit; | ||
3816 | |||
3817 | p->base_pool_offset = base; | ||
3818 | |||
3819 | /* Commit the change */ | ||
3820 | smp_mb(); | ||
597 | 3821 | ||
598 | /* tpa */ | 3822 | p->check = bnx2x_credit_pool_check; |
599 | if (p->func_flgs & FUNC_FLG_TPA) | ||
600 | tcfg.config_flags |= | ||
601 | TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA; | ||
602 | 3823 | ||
603 | /* set rss flags */ | 3824 | /* if pool credit is negative - disable the checks */ |
604 | rss_flgs = (p->rss->mode << | 3825 | if (credit >= 0) { |
605 | TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE_SHIFT); | 3826 | p->put = bnx2x_credit_pool_put; |
3827 | p->get = bnx2x_credit_pool_get; | ||
3828 | p->put_entry = bnx2x_credit_pool_put_entry; | ||
3829 | p->get_entry = bnx2x_credit_pool_get_entry; | ||
3830 | } else { | ||
3831 | p->put = bnx2x_credit_pool_always_true; | ||
3832 | p->get = bnx2x_credit_pool_always_true; | ||
3833 | p->put_entry = bnx2x_credit_pool_put_entry_always_true; | ||
3834 | p->get_entry = bnx2x_credit_pool_get_entry_always_true; | ||
3835 | } | ||
3836 | |||
3837 | /* If base is negative - disable entries handling */ | ||
3838 | if (base < 0) { | ||
3839 | p->put_entry = bnx2x_credit_pool_put_entry_always_true; | ||
3840 | p->get_entry = bnx2x_credit_pool_get_entry_always_true; | ||
3841 | } | ||
3842 | } | ||
3843 | |||
3844 | void bnx2x_init_mac_credit_pool(struct bnx2x *bp, | ||
3845 | struct bnx2x_credit_pool_obj *p, u8 func_id, | ||
3846 | u8 func_num) | ||
3847 | { | ||
3848 | /* TODO: this will be defined in consts as well... */ | ||
3849 | #define BNX2X_CAM_SIZE_EMUL 5 | ||
606 | 3850 | ||
607 | if (p->rss->cap & RSS_IPV4_CAP) | 3851 | int cam_sz; |
608 | rss_flgs |= RSS_IPV4_CAP_MASK; | ||
609 | if (p->rss->cap & RSS_IPV4_TCP_CAP) | ||
610 | rss_flgs |= RSS_IPV4_TCP_CAP_MASK; | ||
611 | if (p->rss->cap & RSS_IPV6_CAP) | ||
612 | rss_flgs |= RSS_IPV6_CAP_MASK; | ||
613 | if (p->rss->cap & RSS_IPV6_TCP_CAP) | ||
614 | rss_flgs |= RSS_IPV6_TCP_CAP_MASK; | ||
615 | 3852 | ||
616 | tcfg.config_flags |= rss_flgs; | 3853 | if (CHIP_IS_E1(bp)) { |
617 | tcfg.rss_result_mask = p->rss->result_mask; | 3854 | /* In E1, Multicast is saved in cam... */ |
3855 | if (!CHIP_REV_IS_SLOW(bp)) | ||
3856 | cam_sz = (MAX_MAC_CREDIT_E1 / 2) - BNX2X_MAX_MULTICAST; | ||
3857 | else | ||
3858 | cam_sz = BNX2X_CAM_SIZE_EMUL - BNX2X_MAX_EMUL_MULTI; | ||
618 | 3859 | ||
619 | storm_memset_func_cfg(bp, &tcfg, p->func_id); | 3860 | bnx2x_init_credit_pool(p, func_id * cam_sz, cam_sz); |
620 | 3861 | ||
621 | /* Enable the function in the FW */ | 3862 | } else if (CHIP_IS_E1H(bp)) { |
622 | storm_memset_vf_to_pf(bp, p->func_id, p->pf_id); | 3863 | /* CAM credit is equaly divided between all active functions |
623 | storm_memset_func_en(bp, p->func_id, 1); | 3864 | * on the PORT!. |
3865 | */ | ||
3866 | if ((func_num > 0)) { | ||
3867 | if (!CHIP_REV_IS_SLOW(bp)) | ||
3868 | cam_sz = (MAX_MAC_CREDIT_E1H / (2*func_num)); | ||
3869 | else | ||
3870 | cam_sz = BNX2X_CAM_SIZE_EMUL; | ||
3871 | bnx2x_init_credit_pool(p, func_id * cam_sz, cam_sz); | ||
3872 | } else { | ||
3873 | /* this should never happen! Block MAC operations. */ | ||
3874 | bnx2x_init_credit_pool(p, 0, 0); | ||
3875 | } | ||
624 | 3876 | ||
625 | /* statistics */ | 3877 | } else { |
626 | if (p->func_flgs & FUNC_FLG_STATS) { | ||
627 | struct stats_indication_flags stats_flags = {0}; | ||
628 | stats_flags.collect_eth = 1; | ||
629 | 3878 | ||
630 | storm_memset_xstats_flags(bp, &stats_flags, p->func_id); | 3879 | /* |
631 | storm_memset_xstats_addr(bp, p->fw_stat_map, p->func_id); | 3880 | * CAM credit is equaly divided between all active functions |
3881 | * on the PATH. | ||
3882 | */ | ||
3883 | if ((func_num > 0)) { | ||
3884 | if (!CHIP_REV_IS_SLOW(bp)) | ||
3885 | cam_sz = (MAX_MAC_CREDIT_E2 / func_num); | ||
3886 | else | ||
3887 | cam_sz = BNX2X_CAM_SIZE_EMUL; | ||
632 | 3888 | ||
633 | storm_memset_tstats_flags(bp, &stats_flags, p->func_id); | 3889 | /* |
634 | storm_memset_tstats_addr(bp, p->fw_stat_map, p->func_id); | 3890 | * No need for CAM entries handling for 57712 and |
3891 | * newer. | ||
3892 | */ | ||
3893 | bnx2x_init_credit_pool(p, -1, cam_sz); | ||
3894 | } else { | ||
3895 | /* this should never happen! Block MAC operations. */ | ||
3896 | bnx2x_init_credit_pool(p, 0, 0); | ||
3897 | } | ||
635 | 3898 | ||
636 | storm_memset_ustats_flags(bp, &stats_flags, p->func_id); | 3899 | } |
637 | storm_memset_ustats_addr(bp, p->fw_stat_map, p->func_id); | 3900 | } |
638 | 3901 | ||
639 | storm_memset_cstats_flags(bp, &stats_flags, p->func_id); | 3902 | void bnx2x_init_vlan_credit_pool(struct bnx2x *bp, |
640 | storm_memset_cstats_addr(bp, p->fw_stat_map, p->func_id); | 3903 | struct bnx2x_credit_pool_obj *p, |
3904 | u8 func_id, | ||
3905 | u8 func_num) | ||
3906 | { | ||
3907 | if (CHIP_IS_E1x(bp)) { | ||
3908 | /* | ||
3909 | * There is no VLAN credit in HW on 57710 and 57711 only | ||
3910 | * MAC / MAC-VLAN can be set | ||
3911 | */ | ||
3912 | bnx2x_init_credit_pool(p, 0, -1); | ||
3913 | } else { | ||
3914 | /* | ||
3915 | * CAM credit is equaly divided between all active functions | ||
3916 | * on the PATH. | ||
3917 | */ | ||
3918 | if (func_num > 0) { | ||
3919 | int credit = MAX_VLAN_CREDIT_E2 / func_num; | ||
3920 | bnx2x_init_credit_pool(p, func_id * credit, credit); | ||
3921 | } else | ||
3922 | /* this should never happen! Block VLAN operations. */ | ||
3923 | bnx2x_init_credit_pool(p, 0, 0); | ||
641 | } | 3924 | } |
3925 | } | ||
3926 | |||
3927 | /****************** RSS Configuration ******************/ | ||
3928 | /** | ||
3929 | * bnx2x_debug_print_ind_table - prints the indirection table configuration. | ||
3930 | * | ||
3931 | * @bp: driver hanlde | ||
3932 | * @p: pointer to rss configuration | ||
3933 | * | ||
3934 | * Prints it when NETIF_MSG_IFUP debug level is configured. | ||
3935 | */ | ||
3936 | static inline void bnx2x_debug_print_ind_table(struct bnx2x *bp, | ||
3937 | struct bnx2x_config_rss_params *p) | ||
3938 | { | ||
3939 | int i; | ||
3940 | |||
3941 | DP(BNX2X_MSG_SP, "Setting indirection table to:\n"); | ||
3942 | DP(BNX2X_MSG_SP, "0x0000: "); | ||
3943 | for (i = 0; i < T_ETH_INDIRECTION_TABLE_SIZE; i++) { | ||
3944 | DP_CONT(BNX2X_MSG_SP, "0x%02x ", p->ind_table[i]); | ||
642 | 3945 | ||
643 | /* spq */ | 3946 | /* Print 4 bytes in a line */ |
644 | if (p->func_flgs & FUNC_FLG_SPQ) { | 3947 | if ((i + 1 < T_ETH_INDIRECTION_TABLE_SIZE) && |
645 | storm_memset_spq_addr(bp, p->spq_map, p->func_id); | 3948 | (((i + 1) & 0x3) == 0)) { |
646 | REG_WR(bp, XSEM_REG_FAST_MEMORY + | 3949 | DP_CONT(BNX2X_MSG_SP, "\n"); |
647 | XSTORM_SPQ_PROD_OFFSET(p->func_id), p->spq_prod); | 3950 | DP(BNX2X_MSG_SP, "0x%04x: ", i + 1); |
3951 | } | ||
648 | } | 3952 | } |
3953 | |||
3954 | DP_CONT(BNX2X_MSG_SP, "\n"); | ||
649 | } | 3955 | } |
650 | 3956 | ||
651 | static void bnx2x_fill_cl_init_data(struct bnx2x *bp, | 3957 | /** |
652 | struct bnx2x_client_init_params *params, | 3958 | * bnx2x_setup_rss - configure RSS |
653 | u8 activate, | 3959 | * |
654 | struct client_init_ramrod_data *data) | 3960 | * @bp: device handle |
3961 | * @p: rss configuration | ||
3962 | * | ||
3963 | * sends on UPDATE ramrod for that matter. | ||
3964 | */ | ||
3965 | static int bnx2x_setup_rss(struct bnx2x *bp, | ||
3966 | struct bnx2x_config_rss_params *p) | ||
655 | { | 3967 | { |
656 | /* Clear the buffer */ | 3968 | struct bnx2x_rss_config_obj *o = p->rss_obj; |
3969 | struct bnx2x_raw_obj *r = &o->raw; | ||
3970 | struct eth_rss_update_ramrod_data *data = | ||
3971 | (struct eth_rss_update_ramrod_data *)(r->rdata); | ||
3972 | u8 rss_mode = 0; | ||
3973 | int rc; | ||
3974 | |||
657 | memset(data, 0, sizeof(*data)); | 3975 | memset(data, 0, sizeof(*data)); |
658 | 3976 | ||
3977 | DP(BNX2X_MSG_SP, "Configuring RSS\n"); | ||
3978 | |||
3979 | /* Set an echo field */ | ||
3980 | data->echo = (r->cid & BNX2X_SWCID_MASK) | | ||
3981 | (r->state << BNX2X_SWCID_SHIFT); | ||
3982 | |||
3983 | /* RSS mode */ | ||
3984 | if (test_bit(BNX2X_RSS_MODE_DISABLED, &p->rss_flags)) | ||
3985 | rss_mode = ETH_RSS_MODE_DISABLED; | ||
3986 | else if (test_bit(BNX2X_RSS_MODE_REGULAR, &p->rss_flags)) | ||
3987 | rss_mode = ETH_RSS_MODE_REGULAR; | ||
3988 | else if (test_bit(BNX2X_RSS_MODE_VLAN_PRI, &p->rss_flags)) | ||
3989 | rss_mode = ETH_RSS_MODE_VLAN_PRI; | ||
3990 | else if (test_bit(BNX2X_RSS_MODE_E1HOV_PRI, &p->rss_flags)) | ||
3991 | rss_mode = ETH_RSS_MODE_E1HOV_PRI; | ||
3992 | else if (test_bit(BNX2X_RSS_MODE_IP_DSCP, &p->rss_flags)) | ||
3993 | rss_mode = ETH_RSS_MODE_IP_DSCP; | ||
3994 | |||
3995 | data->rss_mode = rss_mode; | ||
3996 | |||
3997 | DP(BNX2X_MSG_SP, "rss_mode=%d\n", rss_mode); | ||
3998 | |||
3999 | /* RSS capabilities */ | ||
4000 | if (test_bit(BNX2X_RSS_IPV4, &p->rss_flags)) | ||
4001 | data->capabilities |= | ||
4002 | ETH_RSS_UPDATE_RAMROD_DATA_IPV4_CAPABILITY; | ||
4003 | |||
4004 | if (test_bit(BNX2X_RSS_IPV4_TCP, &p->rss_flags)) | ||
4005 | data->capabilities |= | ||
4006 | ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY; | ||
4007 | |||
4008 | if (test_bit(BNX2X_RSS_IPV6, &p->rss_flags)) | ||
4009 | data->capabilities |= | ||
4010 | ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY; | ||
4011 | |||
4012 | if (test_bit(BNX2X_RSS_IPV6_TCP, &p->rss_flags)) | ||
4013 | data->capabilities |= | ||
4014 | ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY; | ||
4015 | |||
4016 | /* Hashing mask */ | ||
4017 | data->rss_result_mask = p->rss_result_mask; | ||
4018 | |||
4019 | /* RSS engine ID */ | ||
4020 | data->rss_engine_id = o->engine_id; | ||
4021 | |||
4022 | DP(BNX2X_MSG_SP, "rss_engine_id=%d\n", data->rss_engine_id); | ||
4023 | |||
4024 | /* Indirection table */ | ||
4025 | memcpy(data->indirection_table, p->ind_table, | ||
4026 | T_ETH_INDIRECTION_TABLE_SIZE); | ||
4027 | |||
4028 | /* Remember the last configuration */ | ||
4029 | memcpy(o->ind_table, p->ind_table, T_ETH_INDIRECTION_TABLE_SIZE); | ||
4030 | |||
4031 | /* Print the indirection table */ | ||
4032 | if (netif_msg_ifup(bp)) | ||
4033 | bnx2x_debug_print_ind_table(bp, p); | ||
4034 | |||
4035 | /* RSS keys */ | ||
4036 | if (test_bit(BNX2X_RSS_SET_SRCH, &p->rss_flags)) { | ||
4037 | memcpy(&data->rss_key[0], &p->rss_key[0], | ||
4038 | sizeof(data->rss_key)); | ||
4039 | data->capabilities |= ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY; | ||
4040 | } | ||
4041 | |||
4042 | /* Commit writes towards the memory before sending a ramrod */ | ||
4043 | mb(); | ||
4044 | |||
4045 | /* Send a ramrod */ | ||
4046 | rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_RSS_UPDATE, r->cid, | ||
4047 | U64_HI(r->rdata_mapping), | ||
4048 | U64_LO(r->rdata_mapping), | ||
4049 | ETH_CONNECTION_TYPE); | ||
4050 | |||
4051 | if (rc < 0) | ||
4052 | return rc; | ||
4053 | |||
4054 | return 1; | ||
4055 | } | ||
4056 | |||
4057 | void bnx2x_get_rss_ind_table(struct bnx2x_rss_config_obj *rss_obj, | ||
4058 | u8 *ind_table) | ||
4059 | { | ||
4060 | memcpy(ind_table, rss_obj->ind_table, sizeof(rss_obj->ind_table)); | ||
4061 | } | ||
4062 | |||
4063 | int bnx2x_config_rss(struct bnx2x *bp, | ||
4064 | struct bnx2x_config_rss_params *p) | ||
4065 | { | ||
4066 | int rc; | ||
4067 | struct bnx2x_rss_config_obj *o = p->rss_obj; | ||
4068 | struct bnx2x_raw_obj *r = &o->raw; | ||
4069 | |||
4070 | /* Do nothing if only driver cleanup was requested */ | ||
4071 | if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) | ||
4072 | return 0; | ||
4073 | |||
4074 | r->set_pending(r); | ||
4075 | |||
4076 | rc = o->config_rss(bp, p); | ||
4077 | if (rc < 0) { | ||
4078 | r->clear_pending(r); | ||
4079 | return rc; | ||
4080 | } | ||
4081 | |||
4082 | if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags)) | ||
4083 | rc = r->wait_comp(bp, r); | ||
4084 | |||
4085 | return rc; | ||
4086 | } | ||
4087 | |||
4088 | |||
4089 | void bnx2x_init_rss_config_obj(struct bnx2x *bp, | ||
4090 | struct bnx2x_rss_config_obj *rss_obj, | ||
4091 | u8 cl_id, u32 cid, u8 func_id, u8 engine_id, | ||
4092 | void *rdata, dma_addr_t rdata_mapping, | ||
4093 | int state, unsigned long *pstate, | ||
4094 | bnx2x_obj_type type) | ||
4095 | { | ||
4096 | bnx2x_init_raw_obj(&rss_obj->raw, cl_id, cid, func_id, rdata, | ||
4097 | rdata_mapping, state, pstate, type); | ||
4098 | |||
4099 | rss_obj->engine_id = engine_id; | ||
4100 | rss_obj->config_rss = bnx2x_setup_rss; | ||
4101 | } | ||
4102 | |||
4103 | /********************** Queue state object ***********************************/ | ||
4104 | |||
4105 | /** | ||
4106 | * bnx2x_queue_state_change - perform Queue state change transition | ||
4107 | * | ||
4108 | * @bp: device handle | ||
4109 | * @params: parameters to perform the transition | ||
4110 | * | ||
4111 | * returns 0 in case of successfully completed transition, negative error | ||
4112 | * code in case of failure, positive (EBUSY) value if there is a completion | ||
4113 | * to that is still pending (possible only if RAMROD_COMP_WAIT is | ||
4114 | * not set in params->ramrod_flags for asynchronous commands). | ||
4115 | * | ||
4116 | */ | ||
4117 | int bnx2x_queue_state_change(struct bnx2x *bp, | ||
4118 | struct bnx2x_queue_state_params *params) | ||
4119 | { | ||
4120 | struct bnx2x_queue_sp_obj *o = params->q_obj; | ||
4121 | int rc, pending_bit; | ||
4122 | unsigned long *pending = &o->pending; | ||
4123 | |||
4124 | /* Check that the requested transition is legal */ | ||
4125 | if (o->check_transition(bp, o, params)) | ||
4126 | return -EINVAL; | ||
4127 | |||
4128 | /* Set "pending" bit */ | ||
4129 | pending_bit = o->set_pending(o, params); | ||
4130 | |||
4131 | /* Don't send a command if only driver cleanup was requested */ | ||
4132 | if (test_bit(RAMROD_DRV_CLR_ONLY, ¶ms->ramrod_flags)) | ||
4133 | o->complete_cmd(bp, o, pending_bit); | ||
4134 | else { | ||
4135 | /* Send a ramrod */ | ||
4136 | rc = o->send_cmd(bp, params); | ||
4137 | if (rc) { | ||
4138 | o->next_state = BNX2X_Q_STATE_MAX; | ||
4139 | clear_bit(pending_bit, pending); | ||
4140 | smp_mb__after_clear_bit(); | ||
4141 | return rc; | ||
4142 | } | ||
4143 | |||
4144 | if (test_bit(RAMROD_COMP_WAIT, ¶ms->ramrod_flags)) { | ||
4145 | rc = o->wait_comp(bp, o, pending_bit); | ||
4146 | if (rc) | ||
4147 | return rc; | ||
4148 | |||
4149 | return 0; | ||
4150 | } | ||
4151 | } | ||
4152 | |||
4153 | return !!test_bit(pending_bit, pending); | ||
4154 | } | ||
4155 | |||
4156 | |||
4157 | static int bnx2x_queue_set_pending(struct bnx2x_queue_sp_obj *obj, | ||
4158 | struct bnx2x_queue_state_params *params) | ||
4159 | { | ||
4160 | enum bnx2x_queue_cmd cmd = params->cmd, bit; | ||
4161 | |||
4162 | /* ACTIVATE and DEACTIVATE commands are implemented on top of | ||
4163 | * UPDATE command. | ||
4164 | */ | ||
4165 | if ((cmd == BNX2X_Q_CMD_ACTIVATE) || | ||
4166 | (cmd == BNX2X_Q_CMD_DEACTIVATE)) | ||
4167 | bit = BNX2X_Q_CMD_UPDATE; | ||
4168 | else | ||
4169 | bit = cmd; | ||
4170 | |||
4171 | set_bit(bit, &obj->pending); | ||
4172 | return bit; | ||
4173 | } | ||
4174 | |||
4175 | static int bnx2x_queue_wait_comp(struct bnx2x *bp, | ||
4176 | struct bnx2x_queue_sp_obj *o, | ||
4177 | enum bnx2x_queue_cmd cmd) | ||
4178 | { | ||
4179 | return bnx2x_state_wait(bp, cmd, &o->pending); | ||
4180 | } | ||
4181 | |||
4182 | /** | ||
4183 | * bnx2x_queue_comp_cmd - complete the state change command. | ||
4184 | * | ||
4185 | * @bp: device handle | ||
4186 | * @o: | ||
4187 | * @cmd: | ||
4188 | * | ||
4189 | * Checks that the arrived completion is expected. | ||
4190 | */ | ||
4191 | static int bnx2x_queue_comp_cmd(struct bnx2x *bp, | ||
4192 | struct bnx2x_queue_sp_obj *o, | ||
4193 | enum bnx2x_queue_cmd cmd) | ||
4194 | { | ||
4195 | unsigned long cur_pending = o->pending; | ||
4196 | |||
4197 | if (!test_and_clear_bit(cmd, &cur_pending)) { | ||
4198 | BNX2X_ERR("Bad MC reply %d for queue %d in state %d " | ||
4199 | "pending 0x%lx, next_state %d\n", cmd, o->cid, | ||
4200 | o->state, cur_pending, o->next_state); | ||
4201 | return -EINVAL; | ||
4202 | } | ||
4203 | |||
4204 | DP(BNX2X_MSG_SP, "Completing command %d for queue %d, " | ||
4205 | "setting state to %d\n", cmd, o->cid, o->next_state); | ||
4206 | |||
4207 | o->state = o->next_state; | ||
4208 | o->next_state = BNX2X_Q_STATE_MAX; | ||
4209 | |||
4210 | /* It's important that o->state and o->next_state are | ||
4211 | * updated before o->pending. | ||
4212 | */ | ||
4213 | wmb(); | ||
4214 | |||
4215 | clear_bit(cmd, &o->pending); | ||
4216 | smp_mb__after_clear_bit(); | ||
4217 | |||
4218 | return 0; | ||
4219 | } | ||
4220 | |||
4221 | static void bnx2x_q_fill_setup_data_e2(struct bnx2x *bp, | ||
4222 | struct bnx2x_queue_state_params *cmd_params, | ||
4223 | struct client_init_ramrod_data *data) | ||
4224 | { | ||
4225 | struct bnx2x_queue_setup_params *params = &cmd_params->params.setup; | ||
4226 | |||
4227 | /* Rx data */ | ||
4228 | |||
4229 | /* IPv6 TPA supported for E2 and above only */ | ||
4230 | data->rx.tpa_en |= test_bit(BNX2X_Q_FLG_TPA, ¶ms->flags) * | ||
4231 | CLIENT_INIT_RX_DATA_TPA_EN_IPV6; | ||
4232 | } | ||
4233 | |||
4234 | static void bnx2x_q_fill_setup_data_cmn(struct bnx2x *bp, | ||
4235 | struct bnx2x_queue_state_params *cmd_params, | ||
4236 | struct client_init_ramrod_data *data) | ||
4237 | { | ||
4238 | struct bnx2x_queue_sp_obj *o = cmd_params->q_obj; | ||
4239 | struct bnx2x_queue_setup_params *params = &cmd_params->params.setup; | ||
4240 | |||
4241 | |||
659 | /* general */ | 4242 | /* general */ |
660 | data->general.client_id = params->rxq_params.cl_id; | 4243 | data->general.client_id = o->cl_id; |
661 | data->general.statistics_counter_id = params->rxq_params.stat_id; | 4244 | |
662 | data->general.statistics_en_flg = | 4245 | if (test_bit(BNX2X_Q_FLG_STATS, ¶ms->flags)) { |
663 | (params->rxq_params.flags & QUEUE_FLG_STATS) ? 1 : 0; | 4246 | data->general.statistics_counter_id = |
664 | data->general.is_fcoe_flg = | 4247 | params->gen_params.stat_id; |
665 | (params->ramrod_params.flags & CLIENT_IS_FCOE) ? 1 : 0; | 4248 | data->general.statistics_en_flg = 1; |
666 | data->general.activate_flg = activate; | 4249 | data->general.statistics_zero_flg = |
667 | data->general.sp_client_id = params->rxq_params.spcl_id; | 4250 | test_bit(BNX2X_Q_FLG_ZERO_STATS, ¶ms->flags); |
4251 | } else | ||
4252 | data->general.statistics_counter_id = | ||
4253 | DISABLE_STATISTIC_COUNTER_ID_VALUE; | ||
4254 | |||
4255 | data->general.is_fcoe_flg = test_bit(BNX2X_Q_FLG_FCOE, ¶ms->flags); | ||
4256 | data->general.activate_flg = test_bit(BNX2X_Q_FLG_ACTIVE, | ||
4257 | ¶ms->flags); | ||
4258 | data->general.sp_client_id = params->gen_params.spcl_id; | ||
4259 | data->general.mtu = cpu_to_le16(params->gen_params.mtu); | ||
4260 | data->general.func_id = o->func_id; | ||
4261 | |||
4262 | |||
4263 | data->general.cos = params->txq_params.cos; | ||
4264 | |||
4265 | data->general.traffic_type = | ||
4266 | test_bit(BNX2X_Q_FLG_FCOE, ¶ms->flags) ? | ||
4267 | LLFC_TRAFFIC_TYPE_FCOE : LLFC_TRAFFIC_TYPE_NW; | ||
668 | 4268 | ||
669 | /* Rx data */ | 4269 | /* Rx data */ |
670 | data->rx.tpa_en_flg = | 4270 | data->rx.tpa_en = test_bit(BNX2X_Q_FLG_TPA, ¶ms->flags) * |
671 | (params->rxq_params.flags & QUEUE_FLG_TPA) ? 1 : 0; | 4271 | CLIENT_INIT_RX_DATA_TPA_EN_IPV4; |
672 | data->rx.vmqueue_mode_en_flg = 0; | 4272 | data->rx.vmqueue_mode_en_flg = 0; |
4273 | |||
673 | data->rx.cache_line_alignment_log_size = | 4274 | data->rx.cache_line_alignment_log_size = |
674 | params->rxq_params.cache_line_log; | 4275 | params->rxq_params.cache_line_log; |
675 | data->rx.enable_dynamic_hc = | 4276 | data->rx.enable_dynamic_hc = |
676 | (params->rxq_params.flags & QUEUE_FLG_DHC) ? 1 : 0; | 4277 | test_bit(BNX2X_Q_FLG_DHC, ¶ms->flags); |
677 | data->rx.max_sges_for_packet = params->rxq_params.max_sges_pkt; | 4278 | data->rx.max_sges_for_packet = params->rxq_params.max_sges_pkt; |
678 | data->rx.client_qzone_id = params->rxq_params.cl_qzone_id; | 4279 | data->rx.client_qzone_id = params->rxq_params.cl_qzone_id; |
679 | data->rx.max_agg_size = params->rxq_params.tpa_agg_sz; | 4280 | data->rx.max_agg_size = cpu_to_le16(params->rxq_params.tpa_agg_sz); |
4281 | |||
4282 | /* Always start in DROP_ALL mode */ | ||
4283 | data->rx.state = cpu_to_le16(CLIENT_INIT_RX_DATA_UCAST_DROP_ALL | | ||
4284 | CLIENT_INIT_RX_DATA_MCAST_DROP_ALL); | ||
680 | 4285 | ||
681 | /* We don't set drop flags */ | 4286 | /* We don't set drop flags */ |
682 | data->rx.drop_ip_cs_err_flg = 0; | 4287 | data->rx.drop_ip_cs_err_flg = 0; |
683 | data->rx.drop_tcp_cs_err_flg = 0; | 4288 | data->rx.drop_tcp_cs_err_flg = 0; |
684 | data->rx.drop_ttl0_flg = 0; | 4289 | data->rx.drop_ttl0_flg = 0; |
685 | data->rx.drop_udp_cs_err_flg = 0; | 4290 | data->rx.drop_udp_cs_err_flg = 0; |
686 | |||
687 | data->rx.inner_vlan_removal_enable_flg = | 4291 | data->rx.inner_vlan_removal_enable_flg = |
688 | (params->rxq_params.flags & QUEUE_FLG_VLAN) ? 1 : 0; | 4292 | test_bit(BNX2X_Q_FLG_VLAN, ¶ms->flags); |
689 | data->rx.outer_vlan_removal_enable_flg = | 4293 | data->rx.outer_vlan_removal_enable_flg = |
690 | (params->rxq_params.flags & QUEUE_FLG_OV) ? 1 : 0; | 4294 | test_bit(BNX2X_Q_FLG_OV, ¶ms->flags); |
691 | data->rx.status_block_id = params->rxq_params.fw_sb_id; | 4295 | data->rx.status_block_id = params->rxq_params.fw_sb_id; |
692 | data->rx.rx_sb_index_number = params->rxq_params.sb_cq_index; | 4296 | data->rx.rx_sb_index_number = params->rxq_params.sb_cq_index; |
693 | data->rx.bd_buff_size = cpu_to_le16(params->rxq_params.buf_sz); | 4297 | data->rx.max_tpa_queues = params->rxq_params.max_tpa_queues; |
4298 | data->rx.max_bytes_on_bd = cpu_to_le16(params->rxq_params.buf_sz); | ||
694 | data->rx.sge_buff_size = cpu_to_le16(params->rxq_params.sge_buf_sz); | 4299 | data->rx.sge_buff_size = cpu_to_le16(params->rxq_params.sge_buf_sz); |
695 | data->rx.mtu = cpu_to_le16(params->rxq_params.mtu); | ||
696 | data->rx.bd_page_base.lo = | 4300 | data->rx.bd_page_base.lo = |
697 | cpu_to_le32(U64_LO(params->rxq_params.dscr_map)); | 4301 | cpu_to_le32(U64_LO(params->rxq_params.dscr_map)); |
698 | data->rx.bd_page_base.hi = | 4302 | data->rx.bd_page_base.hi = |
@@ -705,115 +4309,1025 @@ static void bnx2x_fill_cl_init_data(struct bnx2x *bp, | |||
705 | cpu_to_le32(U64_LO(params->rxq_params.rcq_map)); | 4309 | cpu_to_le32(U64_LO(params->rxq_params.rcq_map)); |
706 | data->rx.cqe_page_base.hi = | 4310 | data->rx.cqe_page_base.hi = |
707 | cpu_to_le32(U64_HI(params->rxq_params.rcq_map)); | 4311 | cpu_to_le32(U64_HI(params->rxq_params.rcq_map)); |
708 | data->rx.is_leading_rss = | 4312 | data->rx.is_leading_rss = test_bit(BNX2X_Q_FLG_LEADING_RSS, |
709 | (params->ramrod_params.flags & CLIENT_IS_LEADING_RSS) ? 1 : 0; | 4313 | ¶ms->flags); |
710 | data->rx.is_approx_mcast = data->rx.is_leading_rss; | 4314 | |
4315 | if (test_bit(BNX2X_Q_FLG_MCAST, ¶ms->flags)) { | ||
4316 | data->rx.approx_mcast_engine_id = o->func_id; | ||
4317 | data->rx.is_approx_mcast = 1; | ||
4318 | } | ||
4319 | |||
4320 | data->rx.rss_engine_id = params->rxq_params.rss_engine_id; | ||
4321 | |||
4322 | /* flow control data */ | ||
4323 | data->rx.cqe_pause_thr_low = cpu_to_le16(params->pause.rcq_th_lo); | ||
4324 | data->rx.cqe_pause_thr_high = cpu_to_le16(params->pause.rcq_th_hi); | ||
4325 | data->rx.bd_pause_thr_low = cpu_to_le16(params->pause.bd_th_lo); | ||
4326 | data->rx.bd_pause_thr_high = cpu_to_le16(params->pause.bd_th_hi); | ||
4327 | data->rx.sge_pause_thr_low = cpu_to_le16(params->pause.sge_th_lo); | ||
4328 | data->rx.sge_pause_thr_high = cpu_to_le16(params->pause.sge_th_hi); | ||
4329 | data->rx.rx_cos_mask = cpu_to_le16(params->pause.pri_map); | ||
4330 | |||
4331 | /* silent vlan removal */ | ||
4332 | data->rx.silent_vlan_removal_flg = | ||
4333 | test_bit(BNX2X_Q_FLG_SILENT_VLAN_REM, ¶ms->flags); | ||
4334 | data->rx.silent_vlan_value = | ||
4335 | cpu_to_le16(params->rxq_params.silent_removal_value); | ||
4336 | data->rx.silent_vlan_mask = | ||
4337 | cpu_to_le16(params->rxq_params.silent_removal_mask); | ||
711 | 4338 | ||
712 | /* Tx data */ | 4339 | /* Tx data */ |
713 | data->tx.enforce_security_flg = 0; /* VF specific */ | 4340 | data->tx.enforce_security_flg = |
4341 | test_bit(BNX2X_Q_FLG_TX_SEC, ¶ms->flags); | ||
4342 | data->tx.default_vlan = | ||
4343 | cpu_to_le16(params->txq_params.default_vlan); | ||
4344 | data->tx.default_vlan_flg = | ||
4345 | test_bit(BNX2X_Q_FLG_DEF_VLAN, ¶ms->flags); | ||
4346 | data->tx.tx_switching_flg = | ||
4347 | test_bit(BNX2X_Q_FLG_TX_SWITCH, ¶ms->flags); | ||
4348 | data->tx.anti_spoofing_flg = | ||
4349 | test_bit(BNX2X_Q_FLG_ANTI_SPOOF, ¶ms->flags); | ||
714 | data->tx.tx_status_block_id = params->txq_params.fw_sb_id; | 4350 | data->tx.tx_status_block_id = params->txq_params.fw_sb_id; |
715 | data->tx.tx_sb_index_number = params->txq_params.sb_cq_index; | 4351 | data->tx.tx_sb_index_number = params->txq_params.sb_cq_index; |
716 | data->tx.mtu = 0; /* VF specific */ | 4352 | data->tx.tss_leading_client_id = params->txq_params.tss_leading_cl_id; |
4353 | |||
717 | data->tx.tx_bd_page_base.lo = | 4354 | data->tx.tx_bd_page_base.lo = |
718 | cpu_to_le32(U64_LO(params->txq_params.dscr_map)); | 4355 | cpu_to_le32(U64_LO(params->txq_params.dscr_map)); |
719 | data->tx.tx_bd_page_base.hi = | 4356 | data->tx.tx_bd_page_base.hi = |
720 | cpu_to_le32(U64_HI(params->txq_params.dscr_map)); | 4357 | cpu_to_le32(U64_HI(params->txq_params.dscr_map)); |
721 | 4358 | ||
722 | /* flow control data */ | 4359 | /* Don't configure any Tx switching mode during queue SETUP */ |
723 | data->fc.cqe_pause_thr_low = cpu_to_le16(params->pause.rcq_th_lo); | 4360 | data->tx.state = 0; |
724 | data->fc.cqe_pause_thr_high = cpu_to_le16(params->pause.rcq_th_hi); | ||
725 | data->fc.bd_pause_thr_low = cpu_to_le16(params->pause.bd_th_lo); | ||
726 | data->fc.bd_pause_thr_high = cpu_to_le16(params->pause.bd_th_hi); | ||
727 | data->fc.sge_pause_thr_low = cpu_to_le16(params->pause.sge_th_lo); | ||
728 | data->fc.sge_pause_thr_high = cpu_to_le16(params->pause.sge_th_hi); | ||
729 | data->fc.rx_cos_mask = cpu_to_le16(params->pause.pri_map); | ||
730 | |||
731 | data->fc.safc_group_num = params->txq_params.cos; | ||
732 | data->fc.safc_group_en_flg = | ||
733 | (params->txq_params.flags & QUEUE_FLG_COS) ? 1 : 0; | ||
734 | data->fc.traffic_type = | ||
735 | (params->ramrod_params.flags & CLIENT_IS_FCOE) ? | ||
736 | LLFC_TRAFFIC_TYPE_FCOE : LLFC_TRAFFIC_TYPE_NW; | ||
737 | } | 4361 | } |
738 | 4362 | ||
739 | 4363 | ||
740 | int bnx2x_setup_fw_client(struct bnx2x *bp, | 4364 | /** |
741 | struct bnx2x_client_init_params *params, | 4365 | * bnx2x_q_init - init HW/FW queue |
742 | u8 activate, | 4366 | * |
743 | struct client_init_ramrod_data *data, | 4367 | * @bp: device handle |
744 | dma_addr_t data_mapping) | 4368 | * @params: |
4369 | * | ||
4370 | * HW/FW initial Queue configuration: | ||
4371 | * - HC: Rx and Tx | ||
4372 | * - CDU context validation | ||
4373 | * | ||
4374 | */ | ||
4375 | static inline int bnx2x_q_init(struct bnx2x *bp, | ||
4376 | struct bnx2x_queue_state_params *params) | ||
745 | { | 4377 | { |
4378 | struct bnx2x_queue_sp_obj *o = params->q_obj; | ||
4379 | struct bnx2x_queue_init_params *init = ¶ms->params.init; | ||
746 | u16 hc_usec; | 4380 | u16 hc_usec; |
747 | int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP; | 4381 | |
748 | int ramrod_flags = 0, rc; | 4382 | /* Tx HC configuration */ |
749 | 4383 | if (test_bit(BNX2X_Q_TYPE_HAS_TX, &o->type) && | |
750 | /* HC and context validation values */ | 4384 | test_bit(BNX2X_Q_FLG_HC, &init->tx.flags)) { |
751 | hc_usec = params->txq_params.hc_rate ? | 4385 | hc_usec = init->tx.hc_rate ? 1000000 / init->tx.hc_rate : 0; |
752 | 1000000 / params->txq_params.hc_rate : 0; | 4386 | |
753 | bnx2x_update_coalesce_sb_index(bp, | 4387 | bnx2x_update_coalesce_sb_index(bp, init->tx.fw_sb_id, |
754 | params->txq_params.fw_sb_id, | 4388 | init->tx.sb_cq_index, |
755 | params->txq_params.sb_cq_index, | 4389 | !test_bit(BNX2X_Q_FLG_HC_EN, &init->tx.flags), |
756 | !(params->txq_params.flags & QUEUE_FLG_HC), | ||
757 | hc_usec); | 4390 | hc_usec); |
4391 | } | ||
758 | 4392 | ||
759 | *(params->ramrod_params.pstate) = BNX2X_FP_STATE_OPENING; | 4393 | /* Rx HC configuration */ |
4394 | if (test_bit(BNX2X_Q_TYPE_HAS_RX, &o->type) && | ||
4395 | test_bit(BNX2X_Q_FLG_HC, &init->rx.flags)) { | ||
4396 | hc_usec = init->rx.hc_rate ? 1000000 / init->rx.hc_rate : 0; | ||
760 | 4397 | ||
761 | hc_usec = params->rxq_params.hc_rate ? | 4398 | bnx2x_update_coalesce_sb_index(bp, init->rx.fw_sb_id, |
762 | 1000000 / params->rxq_params.hc_rate : 0; | 4399 | init->rx.sb_cq_index, |
763 | bnx2x_update_coalesce_sb_index(bp, | 4400 | !test_bit(BNX2X_Q_FLG_HC_EN, &init->rx.flags), |
764 | params->rxq_params.fw_sb_id, | ||
765 | params->rxq_params.sb_cq_index, | ||
766 | !(params->rxq_params.flags & QUEUE_FLG_HC), | ||
767 | hc_usec); | 4401 | hc_usec); |
4402 | } | ||
768 | 4403 | ||
769 | bnx2x_set_ctx_validation(params->rxq_params.cxt, | 4404 | /* Set CDU context validation values */ |
770 | params->rxq_params.cid); | 4405 | bnx2x_set_ctx_validation(bp, init->cxt, o->cid); |
771 | 4406 | ||
772 | /* zero stats */ | 4407 | /* As no ramrod is sent, complete the command immediately */ |
773 | if (params->txq_params.flags & QUEUE_FLG_STATS) | 4408 | o->complete_cmd(bp, o, BNX2X_Q_CMD_INIT); |
774 | storm_memset_xstats_zero(bp, BP_PORT(bp), | ||
775 | params->txq_params.stat_id); | ||
776 | 4409 | ||
777 | if (params->rxq_params.flags & QUEUE_FLG_STATS) { | 4410 | mmiowb(); |
778 | storm_memset_ustats_zero(bp, BP_PORT(bp), | 4411 | smp_mb(); |
779 | params->rxq_params.stat_id); | 4412 | |
780 | storm_memset_tstats_zero(bp, BP_PORT(bp), | 4413 | return 0; |
781 | params->rxq_params.stat_id); | 4414 | } |
782 | } | 4415 | |
4416 | static inline int bnx2x_q_send_setup_e1x(struct bnx2x *bp, | ||
4417 | struct bnx2x_queue_state_params *params) | ||
4418 | { | ||
4419 | struct bnx2x_queue_sp_obj *o = params->q_obj; | ||
4420 | struct client_init_ramrod_data *rdata = | ||
4421 | (struct client_init_ramrod_data *)o->rdata; | ||
4422 | dma_addr_t data_mapping = o->rdata_mapping; | ||
4423 | int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP; | ||
4424 | |||
4425 | /* Clear the ramrod data */ | ||
4426 | memset(rdata, 0, sizeof(*rdata)); | ||
783 | 4427 | ||
784 | /* Fill the ramrod data */ | 4428 | /* Fill the ramrod data */ |
785 | bnx2x_fill_cl_init_data(bp, params, activate, data); | 4429 | bnx2x_q_fill_setup_data_cmn(bp, params, rdata); |
786 | 4430 | ||
787 | /* SETUP ramrod. | 4431 | mb(); |
788 | * | 4432 | |
789 | * bnx2x_sp_post() takes a spin_lock thus no other explict memory | 4433 | return bnx2x_sp_post(bp, ramrod, o->cid, U64_HI(data_mapping), |
790 | * barrier except from mmiowb() is needed to impose a | 4434 | U64_LO(data_mapping), ETH_CONNECTION_TYPE); |
791 | * proper ordering of memory operations. | 4435 | } |
4436 | |||
4437 | static inline int bnx2x_q_send_setup_e2(struct bnx2x *bp, | ||
4438 | struct bnx2x_queue_state_params *params) | ||
4439 | { | ||
4440 | struct bnx2x_queue_sp_obj *o = params->q_obj; | ||
4441 | struct client_init_ramrod_data *rdata = | ||
4442 | (struct client_init_ramrod_data *)o->rdata; | ||
4443 | dma_addr_t data_mapping = o->rdata_mapping; | ||
4444 | int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP; | ||
4445 | |||
4446 | /* Clear the ramrod data */ | ||
4447 | memset(rdata, 0, sizeof(*rdata)); | ||
4448 | |||
4449 | /* Fill the ramrod data */ | ||
4450 | bnx2x_q_fill_setup_data_cmn(bp, params, rdata); | ||
4451 | bnx2x_q_fill_setup_data_e2(bp, params, rdata); | ||
4452 | |||
4453 | mb(); | ||
4454 | |||
4455 | return bnx2x_sp_post(bp, ramrod, o->cid, U64_HI(data_mapping), | ||
4456 | U64_LO(data_mapping), ETH_CONNECTION_TYPE); | ||
4457 | } | ||
4458 | |||
4459 | static void bnx2x_q_fill_update_data(struct bnx2x *bp, | ||
4460 | struct bnx2x_queue_sp_obj *obj, | ||
4461 | struct bnx2x_queue_update_params *params, | ||
4462 | struct client_update_ramrod_data *data) | ||
4463 | { | ||
4464 | /* Client ID of the client to update */ | ||
4465 | data->client_id = obj->cl_id; | ||
4466 | |||
4467 | /* Function ID of the client to update */ | ||
4468 | data->func_id = obj->func_id; | ||
4469 | |||
4470 | /* Default VLAN value */ | ||
4471 | data->default_vlan = cpu_to_le16(params->def_vlan); | ||
4472 | |||
4473 | /* Inner VLAN stripping */ | ||
4474 | data->inner_vlan_removal_enable_flg = | ||
4475 | test_bit(BNX2X_Q_UPDATE_IN_VLAN_REM, ¶ms->update_flags); | ||
4476 | data->inner_vlan_removal_change_flg = | ||
4477 | test_bit(BNX2X_Q_UPDATE_IN_VLAN_REM_CHNG, | ||
4478 | ¶ms->update_flags); | ||
4479 | |||
4480 | /* Outer VLAN sripping */ | ||
4481 | data->outer_vlan_removal_enable_flg = | ||
4482 | test_bit(BNX2X_Q_UPDATE_OUT_VLAN_REM, ¶ms->update_flags); | ||
4483 | data->outer_vlan_removal_change_flg = | ||
4484 | test_bit(BNX2X_Q_UPDATE_OUT_VLAN_REM_CHNG, | ||
4485 | ¶ms->update_flags); | ||
4486 | |||
4487 | /* Drop packets that have source MAC that doesn't belong to this | ||
4488 | * Queue. | ||
792 | */ | 4489 | */ |
793 | mmiowb(); | 4490 | data->anti_spoofing_enable_flg = |
4491 | test_bit(BNX2X_Q_UPDATE_ANTI_SPOOF, ¶ms->update_flags); | ||
4492 | data->anti_spoofing_change_flg = | ||
4493 | test_bit(BNX2X_Q_UPDATE_ANTI_SPOOF_CHNG, ¶ms->update_flags); | ||
4494 | |||
4495 | /* Activate/Deactivate */ | ||
4496 | data->activate_flg = | ||
4497 | test_bit(BNX2X_Q_UPDATE_ACTIVATE, ¶ms->update_flags); | ||
4498 | data->activate_change_flg = | ||
4499 | test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, ¶ms->update_flags); | ||
4500 | |||
4501 | /* Enable default VLAN */ | ||
4502 | data->default_vlan_enable_flg = | ||
4503 | test_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN, ¶ms->update_flags); | ||
4504 | data->default_vlan_change_flg = | ||
4505 | test_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG, | ||
4506 | ¶ms->update_flags); | ||
4507 | |||
4508 | /* silent vlan removal */ | ||
4509 | data->silent_vlan_change_flg = | ||
4510 | test_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG, | ||
4511 | ¶ms->update_flags); | ||
4512 | data->silent_vlan_removal_flg = | ||
4513 | test_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM, ¶ms->update_flags); | ||
4514 | data->silent_vlan_value = cpu_to_le16(params->silent_removal_value); | ||
4515 | data->silent_vlan_mask = cpu_to_le16(params->silent_removal_mask); | ||
4516 | } | ||
4517 | |||
4518 | static inline int bnx2x_q_send_update(struct bnx2x *bp, | ||
4519 | struct bnx2x_queue_state_params *params) | ||
4520 | { | ||
4521 | struct bnx2x_queue_sp_obj *o = params->q_obj; | ||
4522 | struct client_update_ramrod_data *rdata = | ||
4523 | (struct client_update_ramrod_data *)o->rdata; | ||
4524 | dma_addr_t data_mapping = o->rdata_mapping; | ||
794 | 4525 | ||
4526 | /* Clear the ramrod data */ | ||
4527 | memset(rdata, 0, sizeof(*rdata)); | ||
795 | 4528 | ||
796 | bnx2x_sp_post(bp, ramrod, params->ramrod_params.cid, | 4529 | /* Fill the ramrod data */ |
797 | U64_HI(data_mapping), U64_LO(data_mapping), 0); | 4530 | bnx2x_q_fill_update_data(bp, o, ¶ms->params.update, rdata); |
4531 | |||
4532 | mb(); | ||
4533 | |||
4534 | return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_UPDATE, o->cid, | ||
4535 | U64_HI(data_mapping), | ||
4536 | U64_LO(data_mapping), ETH_CONNECTION_TYPE); | ||
4537 | } | ||
4538 | |||
4539 | /** | ||
4540 | * bnx2x_q_send_deactivate - send DEACTIVATE command | ||
4541 | * | ||
4542 | * @bp: device handle | ||
4543 | * @params: | ||
4544 | * | ||
4545 | * implemented using the UPDATE command. | ||
4546 | */ | ||
4547 | static inline int bnx2x_q_send_deactivate(struct bnx2x *bp, | ||
4548 | struct bnx2x_queue_state_params *params) | ||
4549 | { | ||
4550 | struct bnx2x_queue_update_params *update = ¶ms->params.update; | ||
4551 | |||
4552 | memset(update, 0, sizeof(*update)); | ||
4553 | |||
4554 | __set_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, &update->update_flags); | ||
4555 | |||
4556 | return bnx2x_q_send_update(bp, params); | ||
4557 | } | ||
4558 | |||
4559 | /** | ||
4560 | * bnx2x_q_send_activate - send ACTIVATE command | ||
4561 | * | ||
4562 | * @bp: device handle | ||
4563 | * @params: | ||
4564 | * | ||
4565 | * implemented using the UPDATE command. | ||
4566 | */ | ||
4567 | static inline int bnx2x_q_send_activate(struct bnx2x *bp, | ||
4568 | struct bnx2x_queue_state_params *params) | ||
4569 | { | ||
4570 | struct bnx2x_queue_update_params *update = ¶ms->params.update; | ||
4571 | |||
4572 | memset(update, 0, sizeof(*update)); | ||
4573 | |||
4574 | __set_bit(BNX2X_Q_UPDATE_ACTIVATE, &update->update_flags); | ||
4575 | __set_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, &update->update_flags); | ||
4576 | |||
4577 | return bnx2x_q_send_update(bp, params); | ||
4578 | } | ||
4579 | |||
4580 | static inline int bnx2x_q_send_update_tpa(struct bnx2x *bp, | ||
4581 | struct bnx2x_queue_state_params *params) | ||
4582 | { | ||
4583 | /* TODO: Not implemented yet. */ | ||
4584 | return -1; | ||
4585 | } | ||
4586 | |||
4587 | static inline int bnx2x_q_send_halt(struct bnx2x *bp, | ||
4588 | struct bnx2x_queue_state_params *params) | ||
4589 | { | ||
4590 | struct bnx2x_queue_sp_obj *o = params->q_obj; | ||
4591 | |||
4592 | return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, o->cid, 0, o->cl_id, | ||
4593 | ETH_CONNECTION_TYPE); | ||
4594 | } | ||
4595 | |||
4596 | static inline int bnx2x_q_send_cfc_del(struct bnx2x *bp, | ||
4597 | struct bnx2x_queue_state_params *params) | ||
4598 | { | ||
4599 | struct bnx2x_queue_sp_obj *o = params->q_obj; | ||
4600 | |||
4601 | return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_CFC_DEL, o->cid, 0, 0, | ||
4602 | NONE_CONNECTION_TYPE); | ||
4603 | } | ||
4604 | |||
4605 | static inline int bnx2x_q_send_terminate(struct bnx2x *bp, | ||
4606 | struct bnx2x_queue_state_params *params) | ||
4607 | { | ||
4608 | struct bnx2x_queue_sp_obj *o = params->q_obj; | ||
4609 | |||
4610 | return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_TERMINATE, o->cid, 0, 0, | ||
4611 | ETH_CONNECTION_TYPE); | ||
4612 | } | ||
4613 | |||
4614 | static inline int bnx2x_q_send_empty(struct bnx2x *bp, | ||
4615 | struct bnx2x_queue_state_params *params) | ||
4616 | { | ||
4617 | struct bnx2x_queue_sp_obj *o = params->q_obj; | ||
4618 | |||
4619 | return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_EMPTY, o->cid, 0, 0, | ||
4620 | ETH_CONNECTION_TYPE); | ||
4621 | } | ||
4622 | |||
4623 | static inline int bnx2x_queue_send_cmd_cmn(struct bnx2x *bp, | ||
4624 | struct bnx2x_queue_state_params *params) | ||
4625 | { | ||
4626 | switch (params->cmd) { | ||
4627 | case BNX2X_Q_CMD_INIT: | ||
4628 | return bnx2x_q_init(bp, params); | ||
4629 | case BNX2X_Q_CMD_DEACTIVATE: | ||
4630 | return bnx2x_q_send_deactivate(bp, params); | ||
4631 | case BNX2X_Q_CMD_ACTIVATE: | ||
4632 | return bnx2x_q_send_activate(bp, params); | ||
4633 | case BNX2X_Q_CMD_UPDATE: | ||
4634 | return bnx2x_q_send_update(bp, params); | ||
4635 | case BNX2X_Q_CMD_UPDATE_TPA: | ||
4636 | return bnx2x_q_send_update_tpa(bp, params); | ||
4637 | case BNX2X_Q_CMD_HALT: | ||
4638 | return bnx2x_q_send_halt(bp, params); | ||
4639 | case BNX2X_Q_CMD_CFC_DEL: | ||
4640 | return bnx2x_q_send_cfc_del(bp, params); | ||
4641 | case BNX2X_Q_CMD_TERMINATE: | ||
4642 | return bnx2x_q_send_terminate(bp, params); | ||
4643 | case BNX2X_Q_CMD_EMPTY: | ||
4644 | return bnx2x_q_send_empty(bp, params); | ||
4645 | default: | ||
4646 | BNX2X_ERR("Unknown command: %d\n", params->cmd); | ||
4647 | return -EINVAL; | ||
4648 | } | ||
4649 | } | ||
4650 | |||
4651 | static int bnx2x_queue_send_cmd_e1x(struct bnx2x *bp, | ||
4652 | struct bnx2x_queue_state_params *params) | ||
4653 | { | ||
4654 | switch (params->cmd) { | ||
4655 | case BNX2X_Q_CMD_SETUP: | ||
4656 | return bnx2x_q_send_setup_e1x(bp, params); | ||
4657 | case BNX2X_Q_CMD_INIT: | ||
4658 | case BNX2X_Q_CMD_DEACTIVATE: | ||
4659 | case BNX2X_Q_CMD_ACTIVATE: | ||
4660 | case BNX2X_Q_CMD_UPDATE: | ||
4661 | case BNX2X_Q_CMD_UPDATE_TPA: | ||
4662 | case BNX2X_Q_CMD_HALT: | ||
4663 | case BNX2X_Q_CMD_CFC_DEL: | ||
4664 | case BNX2X_Q_CMD_TERMINATE: | ||
4665 | case BNX2X_Q_CMD_EMPTY: | ||
4666 | return bnx2x_queue_send_cmd_cmn(bp, params); | ||
4667 | default: | ||
4668 | BNX2X_ERR("Unknown command: %d\n", params->cmd); | ||
4669 | return -EINVAL; | ||
4670 | } | ||
4671 | } | ||
4672 | |||
4673 | static int bnx2x_queue_send_cmd_e2(struct bnx2x *bp, | ||
4674 | struct bnx2x_queue_state_params *params) | ||
4675 | { | ||
4676 | switch (params->cmd) { | ||
4677 | case BNX2X_Q_CMD_SETUP: | ||
4678 | return bnx2x_q_send_setup_e2(bp, params); | ||
4679 | case BNX2X_Q_CMD_INIT: | ||
4680 | case BNX2X_Q_CMD_DEACTIVATE: | ||
4681 | case BNX2X_Q_CMD_ACTIVATE: | ||
4682 | case BNX2X_Q_CMD_UPDATE: | ||
4683 | case BNX2X_Q_CMD_UPDATE_TPA: | ||
4684 | case BNX2X_Q_CMD_HALT: | ||
4685 | case BNX2X_Q_CMD_CFC_DEL: | ||
4686 | case BNX2X_Q_CMD_TERMINATE: | ||
4687 | case BNX2X_Q_CMD_EMPTY: | ||
4688 | return bnx2x_queue_send_cmd_cmn(bp, params); | ||
4689 | default: | ||
4690 | BNX2X_ERR("Unknown command: %d\n", params->cmd); | ||
4691 | return -EINVAL; | ||
4692 | } | ||
4693 | } | ||
4694 | |||
4695 | /** | ||
4696 | * bnx2x_queue_chk_transition - check state machine of a regular Queue | ||
4697 | * | ||
4698 | * @bp: device handle | ||
4699 | * @o: | ||
4700 | * @params: | ||
4701 | * | ||
4702 | * (not Forwarding) | ||
4703 | * It both checks if the requested command is legal in a current | ||
4704 | * state and, if it's legal, sets a `next_state' in the object | ||
4705 | * that will be used in the completion flow to set the `state' | ||
4706 | * of the object. | ||
4707 | * | ||
4708 | * returns 0 if a requested command is a legal transition, | ||
4709 | * -EINVAL otherwise. | ||
4710 | */ | ||
4711 | static int bnx2x_queue_chk_transition(struct bnx2x *bp, | ||
4712 | struct bnx2x_queue_sp_obj *o, | ||
4713 | struct bnx2x_queue_state_params *params) | ||
4714 | { | ||
4715 | enum bnx2x_q_state state = o->state, next_state = BNX2X_Q_STATE_MAX; | ||
4716 | enum bnx2x_queue_cmd cmd = params->cmd; | ||
4717 | |||
4718 | switch (state) { | ||
4719 | case BNX2X_Q_STATE_RESET: | ||
4720 | if (cmd == BNX2X_Q_CMD_INIT) | ||
4721 | next_state = BNX2X_Q_STATE_INITIALIZED; | ||
4722 | |||
4723 | break; | ||
4724 | case BNX2X_Q_STATE_INITIALIZED: | ||
4725 | if (cmd == BNX2X_Q_CMD_SETUP) { | ||
4726 | if (test_bit(BNX2X_Q_FLG_ACTIVE, | ||
4727 | ¶ms->params.setup.flags)) | ||
4728 | next_state = BNX2X_Q_STATE_ACTIVE; | ||
4729 | else | ||
4730 | next_state = BNX2X_Q_STATE_INACTIVE; | ||
4731 | } | ||
4732 | |||
4733 | break; | ||
4734 | case BNX2X_Q_STATE_ACTIVE: | ||
4735 | if (cmd == BNX2X_Q_CMD_DEACTIVATE) | ||
4736 | next_state = BNX2X_Q_STATE_INACTIVE; | ||
4737 | |||
4738 | else if ((cmd == BNX2X_Q_CMD_EMPTY) || | ||
4739 | (cmd == BNX2X_Q_CMD_UPDATE_TPA)) | ||
4740 | next_state = BNX2X_Q_STATE_ACTIVE; | ||
4741 | |||
4742 | else if (cmd == BNX2X_Q_CMD_HALT) | ||
4743 | next_state = BNX2X_Q_STATE_STOPPED; | ||
4744 | |||
4745 | else if (cmd == BNX2X_Q_CMD_UPDATE) { | ||
4746 | struct bnx2x_queue_update_params *update_params = | ||
4747 | ¶ms->params.update; | ||
4748 | |||
4749 | /* If "active" state change is requested, update the | ||
4750 | * state accordingly. | ||
4751 | */ | ||
4752 | if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, | ||
4753 | &update_params->update_flags) && | ||
4754 | !test_bit(BNX2X_Q_UPDATE_ACTIVATE, | ||
4755 | &update_params->update_flags)) | ||
4756 | next_state = BNX2X_Q_STATE_INACTIVE; | ||
4757 | else | ||
4758 | next_state = BNX2X_Q_STATE_ACTIVE; | ||
4759 | } | ||
4760 | |||
4761 | break; | ||
4762 | case BNX2X_Q_STATE_INACTIVE: | ||
4763 | if (cmd == BNX2X_Q_CMD_ACTIVATE) | ||
4764 | next_state = BNX2X_Q_STATE_ACTIVE; | ||
4765 | |||
4766 | else if ((cmd == BNX2X_Q_CMD_EMPTY) || | ||
4767 | (cmd == BNX2X_Q_CMD_UPDATE_TPA)) | ||
4768 | next_state = BNX2X_Q_STATE_INACTIVE; | ||
4769 | |||
4770 | else if (cmd == BNX2X_Q_CMD_HALT) | ||
4771 | next_state = BNX2X_Q_STATE_STOPPED; | ||
4772 | |||
4773 | else if (cmd == BNX2X_Q_CMD_UPDATE) { | ||
4774 | struct bnx2x_queue_update_params *update_params = | ||
4775 | ¶ms->params.update; | ||
4776 | |||
4777 | /* If "active" state change is requested, update the | ||
4778 | * state accordingly. | ||
4779 | */ | ||
4780 | if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, | ||
4781 | &update_params->update_flags) && | ||
4782 | test_bit(BNX2X_Q_UPDATE_ACTIVATE, | ||
4783 | &update_params->update_flags)) | ||
4784 | next_state = BNX2X_Q_STATE_ACTIVE; | ||
4785 | else | ||
4786 | next_state = BNX2X_Q_STATE_INACTIVE; | ||
4787 | } | ||
4788 | |||
4789 | break; | ||
4790 | case BNX2X_Q_STATE_STOPPED: | ||
4791 | if (cmd == BNX2X_Q_CMD_TERMINATE) | ||
4792 | next_state = BNX2X_Q_STATE_TERMINATED; | ||
4793 | |||
4794 | break; | ||
4795 | case BNX2X_Q_STATE_TERMINATED: | ||
4796 | if (cmd == BNX2X_Q_CMD_CFC_DEL) | ||
4797 | next_state = BNX2X_Q_STATE_RESET; | ||
4798 | |||
4799 | break; | ||
4800 | default: | ||
4801 | BNX2X_ERR("Illegal state: %d\n", state); | ||
4802 | } | ||
4803 | |||
4804 | /* Transition is assured */ | ||
4805 | if (next_state != BNX2X_Q_STATE_MAX) { | ||
4806 | DP(BNX2X_MSG_SP, "Good state transition: %d(%d)->%d\n", | ||
4807 | state, cmd, next_state); | ||
4808 | o->next_state = next_state; | ||
4809 | return 0; | ||
4810 | } | ||
798 | 4811 | ||
799 | /* Wait for completion */ | 4812 | DP(BNX2X_MSG_SP, "Bad state transition request: %d %d\n", state, cmd); |
800 | rc = bnx2x_wait_ramrod(bp, params->ramrod_params.state, | 4813 | |
801 | params->ramrod_params.index, | 4814 | return -EINVAL; |
802 | params->ramrod_params.pstate, | 4815 | } |
803 | ramrod_flags); | 4816 | |
4817 | void bnx2x_init_queue_obj(struct bnx2x *bp, | ||
4818 | struct bnx2x_queue_sp_obj *obj, | ||
4819 | u8 cl_id, u32 cid, u8 func_id, void *rdata, | ||
4820 | dma_addr_t rdata_mapping, unsigned long type) | ||
4821 | { | ||
4822 | memset(obj, 0, sizeof(*obj)); | ||
4823 | |||
4824 | obj->cid = cid; | ||
4825 | obj->cl_id = cl_id; | ||
4826 | obj->func_id = func_id; | ||
4827 | obj->rdata = rdata; | ||
4828 | obj->rdata_mapping = rdata_mapping; | ||
4829 | obj->type = type; | ||
4830 | obj->next_state = BNX2X_Q_STATE_MAX; | ||
4831 | |||
4832 | if (CHIP_IS_E1x(bp)) | ||
4833 | obj->send_cmd = bnx2x_queue_send_cmd_e1x; | ||
4834 | else | ||
4835 | obj->send_cmd = bnx2x_queue_send_cmd_e2; | ||
4836 | |||
4837 | obj->check_transition = bnx2x_queue_chk_transition; | ||
4838 | |||
4839 | obj->complete_cmd = bnx2x_queue_comp_cmd; | ||
4840 | obj->wait_comp = bnx2x_queue_wait_comp; | ||
4841 | obj->set_pending = bnx2x_queue_set_pending; | ||
4842 | } | ||
4843 | |||
4844 | /********************** Function state object *********************************/ | ||
4845 | |||
4846 | static int bnx2x_func_wait_comp(struct bnx2x *bp, | ||
4847 | struct bnx2x_func_sp_obj *o, | ||
4848 | enum bnx2x_func_cmd cmd) | ||
4849 | { | ||
4850 | return bnx2x_state_wait(bp, cmd, &o->pending); | ||
4851 | } | ||
4852 | |||
4853 | /** | ||
4854 | * bnx2x_func_state_change_comp - complete the state machine transition | ||
4855 | * | ||
4856 | * @bp: device handle | ||
4857 | * @o: | ||
4858 | * @cmd: | ||
4859 | * | ||
4860 | * Called on state change transition. Completes the state | ||
4861 | * machine transition only - no HW interaction. | ||
4862 | */ | ||
4863 | static inline int bnx2x_func_state_change_comp(struct bnx2x *bp, | ||
4864 | struct bnx2x_func_sp_obj *o, | ||
4865 | enum bnx2x_func_cmd cmd) | ||
4866 | { | ||
4867 | unsigned long cur_pending = o->pending; | ||
4868 | |||
4869 | if (!test_and_clear_bit(cmd, &cur_pending)) { | ||
4870 | BNX2X_ERR("Bad MC reply %d for func %d in state %d " | ||
4871 | "pending 0x%lx, next_state %d\n", cmd, BP_FUNC(bp), | ||
4872 | o->state, cur_pending, o->next_state); | ||
4873 | return -EINVAL; | ||
4874 | } | ||
4875 | |||
4876 | DP(BNX2X_MSG_SP, "Completing command %d for func %d, setting state to " | ||
4877 | "%d\n", cmd, BP_FUNC(bp), o->next_state); | ||
4878 | |||
4879 | o->state = o->next_state; | ||
4880 | o->next_state = BNX2X_F_STATE_MAX; | ||
4881 | |||
4882 | /* It's important that o->state and o->next_state are | ||
4883 | * updated before o->pending. | ||
4884 | */ | ||
4885 | wmb(); | ||
4886 | |||
4887 | clear_bit(cmd, &o->pending); | ||
4888 | smp_mb__after_clear_bit(); | ||
4889 | |||
4890 | return 0; | ||
4891 | } | ||
4892 | |||
4893 | /** | ||
4894 | * bnx2x_func_comp_cmd - complete the state change command | ||
4895 | * | ||
4896 | * @bp: device handle | ||
4897 | * @o: | ||
4898 | * @cmd: | ||
4899 | * | ||
4900 | * Checks that the arrived completion is expected. | ||
4901 | */ | ||
4902 | static int bnx2x_func_comp_cmd(struct bnx2x *bp, | ||
4903 | struct bnx2x_func_sp_obj *o, | ||
4904 | enum bnx2x_func_cmd cmd) | ||
4905 | { | ||
4906 | /* Complete the state machine part first, check if it's a | ||
4907 | * legal completion. | ||
4908 | */ | ||
4909 | int rc = bnx2x_func_state_change_comp(bp, o, cmd); | ||
804 | return rc; | 4910 | return rc; |
805 | } | 4911 | } |
806 | 4912 | ||
807 | void bnx2x_push_indir_table(struct bnx2x *bp) | 4913 | /** |
4914 | * bnx2x_func_chk_transition - perform function state machine transition | ||
4915 | * | ||
4916 | * @bp: device handle | ||
4917 | * @o: | ||
4918 | * @params: | ||
4919 | * | ||
4920 | * It both checks if the requested command is legal in a current | ||
4921 | * state and, if it's legal, sets a `next_state' in the object | ||
4922 | * that will be used in the completion flow to set the `state' | ||
4923 | * of the object. | ||
4924 | * | ||
4925 | * returns 0 if a requested command is a legal transition, | ||
4926 | * -EINVAL otherwise. | ||
4927 | */ | ||
4928 | static int bnx2x_func_chk_transition(struct bnx2x *bp, | ||
4929 | struct bnx2x_func_sp_obj *o, | ||
4930 | struct bnx2x_func_state_params *params) | ||
808 | { | 4931 | { |
809 | int func = BP_FUNC(bp); | 4932 | enum bnx2x_func_state state = o->state, next_state = BNX2X_F_STATE_MAX; |
810 | int i; | 4933 | enum bnx2x_func_cmd cmd = params->cmd; |
811 | 4934 | ||
812 | if (bp->multi_mode == ETH_RSS_MODE_DISABLED) | 4935 | switch (state) { |
813 | return; | 4936 | case BNX2X_F_STATE_RESET: |
4937 | if (cmd == BNX2X_F_CMD_HW_INIT) | ||
4938 | next_state = BNX2X_F_STATE_INITIALIZED; | ||
4939 | |||
4940 | break; | ||
4941 | case BNX2X_F_STATE_INITIALIZED: | ||
4942 | if (cmd == BNX2X_F_CMD_START) | ||
4943 | next_state = BNX2X_F_STATE_STARTED; | ||
4944 | |||
4945 | else if (cmd == BNX2X_F_CMD_HW_RESET) | ||
4946 | next_state = BNX2X_F_STATE_RESET; | ||
4947 | |||
4948 | break; | ||
4949 | case BNX2X_F_STATE_STARTED: | ||
4950 | if (cmd == BNX2X_F_CMD_STOP) | ||
4951 | next_state = BNX2X_F_STATE_INITIALIZED; | ||
4952 | |||
4953 | break; | ||
4954 | default: | ||
4955 | BNX2X_ERR("Unknown state: %d\n", state); | ||
4956 | } | ||
4957 | |||
4958 | /* Transition is assured */ | ||
4959 | if (next_state != BNX2X_F_STATE_MAX) { | ||
4960 | DP(BNX2X_MSG_SP, "Good function state transition: %d(%d)->%d\n", | ||
4961 | state, cmd, next_state); | ||
4962 | o->next_state = next_state; | ||
4963 | return 0; | ||
4964 | } | ||
4965 | |||
4966 | DP(BNX2X_MSG_SP, "Bad function state transition request: %d %d\n", | ||
4967 | state, cmd); | ||
4968 | |||
4969 | return -EINVAL; | ||
4970 | } | ||
4971 | |||
4972 | /** | ||
4973 | * bnx2x_func_init_func - performs HW init at function stage | ||
4974 | * | ||
4975 | * @bp: device handle | ||
4976 | * @drv: | ||
4977 | * | ||
4978 | * Init HW when the current phase is | ||
4979 | * FW_MSG_CODE_DRV_LOAD_FUNCTION: initialize only FUNCTION-only | ||
4980 | * HW blocks. | ||
4981 | */ | ||
4982 | static inline int bnx2x_func_init_func(struct bnx2x *bp, | ||
4983 | const struct bnx2x_func_sp_drv_ops *drv) | ||
4984 | { | ||
4985 | return drv->init_hw_func(bp); | ||
4986 | } | ||
4987 | |||
4988 | /** | ||
4989 | * bnx2x_func_init_port - performs HW init at port stage | ||
4990 | * | ||
4991 | * @bp: device handle | ||
4992 | * @drv: | ||
4993 | * | ||
4994 | * Init HW when the current phase is | ||
4995 | * FW_MSG_CODE_DRV_LOAD_PORT: initialize PORT-only and | ||
4996 | * FUNCTION-only HW blocks. | ||
4997 | * | ||
4998 | */ | ||
4999 | static inline int bnx2x_func_init_port(struct bnx2x *bp, | ||
5000 | const struct bnx2x_func_sp_drv_ops *drv) | ||
5001 | { | ||
5002 | int rc = drv->init_hw_port(bp); | ||
5003 | if (rc) | ||
5004 | return rc; | ||
5005 | |||
5006 | return bnx2x_func_init_func(bp, drv); | ||
5007 | } | ||
5008 | |||
5009 | /** | ||
5010 | * bnx2x_func_init_cmn_chip - performs HW init at chip-common stage | ||
5011 | * | ||
5012 | * @bp: device handle | ||
5013 | * @drv: | ||
5014 | * | ||
5015 | * Init HW when the current phase is | ||
5016 | * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: initialize COMMON_CHIP, | ||
5017 | * PORT-only and FUNCTION-only HW blocks. | ||
5018 | */ | ||
5019 | static inline int bnx2x_func_init_cmn_chip(struct bnx2x *bp, | ||
5020 | const struct bnx2x_func_sp_drv_ops *drv) | ||
5021 | { | ||
5022 | int rc = drv->init_hw_cmn_chip(bp); | ||
5023 | if (rc) | ||
5024 | return rc; | ||
5025 | |||
5026 | return bnx2x_func_init_port(bp, drv); | ||
5027 | } | ||
5028 | |||
5029 | /** | ||
5030 | * bnx2x_func_init_cmn - performs HW init at common stage | ||
5031 | * | ||
5032 | * @bp: device handle | ||
5033 | * @drv: | ||
5034 | * | ||
5035 | * Init HW when the current phase is | ||
5036 | * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: initialize COMMON, | ||
5037 | * PORT-only and FUNCTION-only HW blocks. | ||
5038 | */ | ||
5039 | static inline int bnx2x_func_init_cmn(struct bnx2x *bp, | ||
5040 | const struct bnx2x_func_sp_drv_ops *drv) | ||
5041 | { | ||
5042 | int rc = drv->init_hw_cmn(bp); | ||
5043 | if (rc) | ||
5044 | return rc; | ||
5045 | |||
5046 | return bnx2x_func_init_port(bp, drv); | ||
5047 | } | ||
5048 | |||
5049 | static int bnx2x_func_hw_init(struct bnx2x *bp, | ||
5050 | struct bnx2x_func_state_params *params) | ||
5051 | { | ||
5052 | u32 load_code = params->params.hw_init.load_phase; | ||
5053 | struct bnx2x_func_sp_obj *o = params->f_obj; | ||
5054 | const struct bnx2x_func_sp_drv_ops *drv = o->drv; | ||
5055 | int rc = 0; | ||
5056 | |||
5057 | DP(BNX2X_MSG_SP, "function %d load_code %x\n", | ||
5058 | BP_ABS_FUNC(bp), load_code); | ||
5059 | |||
5060 | /* Prepare buffers for unzipping the FW */ | ||
5061 | rc = drv->gunzip_init(bp); | ||
5062 | if (rc) | ||
5063 | return rc; | ||
5064 | |||
5065 | /* Prepare FW */ | ||
5066 | rc = drv->init_fw(bp); | ||
5067 | if (rc) { | ||
5068 | BNX2X_ERR("Error loading firmware\n"); | ||
5069 | goto fw_init_err; | ||
5070 | } | ||
5071 | |||
5072 | /* Handle the beginning of COMMON_XXX pases separatelly... */ | ||
5073 | switch (load_code) { | ||
5074 | case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: | ||
5075 | rc = bnx2x_func_init_cmn_chip(bp, drv); | ||
5076 | if (rc) | ||
5077 | goto init_hw_err; | ||
5078 | |||
5079 | break; | ||
5080 | case FW_MSG_CODE_DRV_LOAD_COMMON: | ||
5081 | rc = bnx2x_func_init_cmn(bp, drv); | ||
5082 | if (rc) | ||
5083 | goto init_hw_err; | ||
5084 | |||
5085 | break; | ||
5086 | case FW_MSG_CODE_DRV_LOAD_PORT: | ||
5087 | rc = bnx2x_func_init_port(bp, drv); | ||
5088 | if (rc) | ||
5089 | goto init_hw_err; | ||
5090 | |||
5091 | break; | ||
5092 | case FW_MSG_CODE_DRV_LOAD_FUNCTION: | ||
5093 | rc = bnx2x_func_init_func(bp, drv); | ||
5094 | if (rc) | ||
5095 | goto init_hw_err; | ||
5096 | |||
5097 | break; | ||
5098 | default: | ||
5099 | BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code); | ||
5100 | rc = -EINVAL; | ||
5101 | } | ||
5102 | |||
5103 | init_hw_err: | ||
5104 | drv->release_fw(bp); | ||
5105 | |||
5106 | fw_init_err: | ||
5107 | drv->gunzip_end(bp); | ||
5108 | |||
5109 | /* In case of success, complete the comand immediatelly: no ramrods | ||
5110 | * have been sent. | ||
5111 | */ | ||
5112 | if (!rc) | ||
5113 | o->complete_cmd(bp, o, BNX2X_F_CMD_HW_INIT); | ||
5114 | |||
5115 | return rc; | ||
5116 | } | ||
5117 | |||
5118 | /** | ||
5119 | * bnx2x_func_reset_func - reset HW at function stage | ||
5120 | * | ||
5121 | * @bp: device handle | ||
5122 | * @drv: | ||
5123 | * | ||
5124 | * Reset HW at FW_MSG_CODE_DRV_UNLOAD_FUNCTION stage: reset only | ||
5125 | * FUNCTION-only HW blocks. | ||
5126 | */ | ||
5127 | static inline void bnx2x_func_reset_func(struct bnx2x *bp, | ||
5128 | const struct bnx2x_func_sp_drv_ops *drv) | ||
5129 | { | ||
5130 | drv->reset_hw_func(bp); | ||
5131 | } | ||
5132 | |||
5133 | /** | ||
5134 | * bnx2x_func_reset_port - reser HW at port stage | ||
5135 | * | ||
5136 | * @bp: device handle | ||
5137 | * @drv: | ||
5138 | * | ||
5139 | * Reset HW at FW_MSG_CODE_DRV_UNLOAD_PORT stage: reset | ||
5140 | * FUNCTION-only and PORT-only HW blocks. | ||
5141 | * | ||
5142 | * !!!IMPORTANT!!! | ||
5143 | * | ||
5144 | * It's important to call reset_port before reset_func() as the last thing | ||
5145 | * reset_func does is pf_disable() thus disabling PGLUE_B, which | ||
5146 | * makes impossible any DMAE transactions. | ||
5147 | */ | ||
5148 | static inline void bnx2x_func_reset_port(struct bnx2x *bp, | ||
5149 | const struct bnx2x_func_sp_drv_ops *drv) | ||
5150 | { | ||
5151 | drv->reset_hw_port(bp); | ||
5152 | bnx2x_func_reset_func(bp, drv); | ||
5153 | } | ||
5154 | |||
5155 | /** | ||
5156 | * bnx2x_func_reset_cmn - reser HW at common stage | ||
5157 | * | ||
5158 | * @bp: device handle | ||
5159 | * @drv: | ||
5160 | * | ||
5161 | * Reset HW at FW_MSG_CODE_DRV_UNLOAD_COMMON and | ||
5162 | * FW_MSG_CODE_DRV_UNLOAD_COMMON_CHIP stages: reset COMMON, | ||
5163 | * COMMON_CHIP, FUNCTION-only and PORT-only HW blocks. | ||
5164 | */ | ||
5165 | static inline void bnx2x_func_reset_cmn(struct bnx2x *bp, | ||
5166 | const struct bnx2x_func_sp_drv_ops *drv) | ||
5167 | { | ||
5168 | bnx2x_func_reset_port(bp, drv); | ||
5169 | drv->reset_hw_cmn(bp); | ||
5170 | } | ||
5171 | |||
5172 | |||
5173 | static inline int bnx2x_func_hw_reset(struct bnx2x *bp, | ||
5174 | struct bnx2x_func_state_params *params) | ||
5175 | { | ||
5176 | u32 reset_phase = params->params.hw_reset.reset_phase; | ||
5177 | struct bnx2x_func_sp_obj *o = params->f_obj; | ||
5178 | const struct bnx2x_func_sp_drv_ops *drv = o->drv; | ||
5179 | |||
5180 | DP(BNX2X_MSG_SP, "function %d reset_phase %x\n", BP_ABS_FUNC(bp), | ||
5181 | reset_phase); | ||
5182 | |||
5183 | switch (reset_phase) { | ||
5184 | case FW_MSG_CODE_DRV_UNLOAD_COMMON: | ||
5185 | bnx2x_func_reset_cmn(bp, drv); | ||
5186 | break; | ||
5187 | case FW_MSG_CODE_DRV_UNLOAD_PORT: | ||
5188 | bnx2x_func_reset_port(bp, drv); | ||
5189 | break; | ||
5190 | case FW_MSG_CODE_DRV_UNLOAD_FUNCTION: | ||
5191 | bnx2x_func_reset_func(bp, drv); | ||
5192 | break; | ||
5193 | default: | ||
5194 | BNX2X_ERR("Unknown reset_phase (0x%x) from MCP\n", | ||
5195 | reset_phase); | ||
5196 | break; | ||
5197 | } | ||
5198 | |||
5199 | /* Complete the comand immediatelly: no ramrods have been sent. */ | ||
5200 | o->complete_cmd(bp, o, BNX2X_F_CMD_HW_RESET); | ||
5201 | |||
5202 | return 0; | ||
5203 | } | ||
5204 | |||
5205 | static inline int bnx2x_func_send_start(struct bnx2x *bp, | ||
5206 | struct bnx2x_func_state_params *params) | ||
5207 | { | ||
5208 | struct bnx2x_func_sp_obj *o = params->f_obj; | ||
5209 | struct function_start_data *rdata = | ||
5210 | (struct function_start_data *)o->rdata; | ||
5211 | dma_addr_t data_mapping = o->rdata_mapping; | ||
5212 | struct bnx2x_func_start_params *start_params = ¶ms->params.start; | ||
5213 | |||
5214 | memset(rdata, 0, sizeof(*rdata)); | ||
5215 | |||
5216 | /* Fill the ramrod data with provided parameters */ | ||
5217 | rdata->function_mode = cpu_to_le16(start_params->mf_mode); | ||
5218 | rdata->sd_vlan_tag = start_params->sd_vlan_tag; | ||
5219 | rdata->path_id = BP_PATH(bp); | ||
5220 | rdata->network_cos_mode = start_params->network_cos_mode; | ||
5221 | |||
5222 | mb(); | ||
5223 | |||
5224 | return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_START, 0, | ||
5225 | U64_HI(data_mapping), | ||
5226 | U64_LO(data_mapping), NONE_CONNECTION_TYPE); | ||
5227 | } | ||
5228 | |||
5229 | static inline int bnx2x_func_send_stop(struct bnx2x *bp, | ||
5230 | struct bnx2x_func_state_params *params) | ||
5231 | { | ||
5232 | return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_STOP, 0, 0, 0, | ||
5233 | NONE_CONNECTION_TYPE); | ||
5234 | } | ||
5235 | |||
5236 | static int bnx2x_func_send_cmd(struct bnx2x *bp, | ||
5237 | struct bnx2x_func_state_params *params) | ||
5238 | { | ||
5239 | switch (params->cmd) { | ||
5240 | case BNX2X_F_CMD_HW_INIT: | ||
5241 | return bnx2x_func_hw_init(bp, params); | ||
5242 | case BNX2X_F_CMD_START: | ||
5243 | return bnx2x_func_send_start(bp, params); | ||
5244 | case BNX2X_F_CMD_STOP: | ||
5245 | return bnx2x_func_send_stop(bp, params); | ||
5246 | case BNX2X_F_CMD_HW_RESET: | ||
5247 | return bnx2x_func_hw_reset(bp, params); | ||
5248 | default: | ||
5249 | BNX2X_ERR("Unknown command: %d\n", params->cmd); | ||
5250 | return -EINVAL; | ||
5251 | } | ||
5252 | } | ||
5253 | |||
5254 | void bnx2x_init_func_obj(struct bnx2x *bp, | ||
5255 | struct bnx2x_func_sp_obj *obj, | ||
5256 | void *rdata, dma_addr_t rdata_mapping, | ||
5257 | struct bnx2x_func_sp_drv_ops *drv_iface) | ||
5258 | { | ||
5259 | memset(obj, 0, sizeof(*obj)); | ||
5260 | |||
5261 | mutex_init(&obj->one_pending_mutex); | ||
5262 | |||
5263 | obj->rdata = rdata; | ||
5264 | obj->rdata_mapping = rdata_mapping; | ||
5265 | |||
5266 | obj->send_cmd = bnx2x_func_send_cmd; | ||
5267 | obj->check_transition = bnx2x_func_chk_transition; | ||
5268 | obj->complete_cmd = bnx2x_func_comp_cmd; | ||
5269 | obj->wait_comp = bnx2x_func_wait_comp; | ||
5270 | |||
5271 | obj->drv = drv_iface; | ||
5272 | } | ||
5273 | |||
5274 | /** | ||
5275 | * bnx2x_func_state_change - perform Function state change transition | ||
5276 | * | ||
5277 | * @bp: device handle | ||
5278 | * @params: parameters to perform the transaction | ||
5279 | * | ||
5280 | * returns 0 in case of successfully completed transition, | ||
5281 | * negative error code in case of failure, positive | ||
5282 | * (EBUSY) value if there is a completion to that is | ||
5283 | * still pending (possible only if RAMROD_COMP_WAIT is | ||
5284 | * not set in params->ramrod_flags for asynchronous | ||
5285 | * commands). | ||
5286 | */ | ||
5287 | int bnx2x_func_state_change(struct bnx2x *bp, | ||
5288 | struct bnx2x_func_state_params *params) | ||
5289 | { | ||
5290 | struct bnx2x_func_sp_obj *o = params->f_obj; | ||
5291 | int rc; | ||
5292 | enum bnx2x_func_cmd cmd = params->cmd; | ||
5293 | unsigned long *pending = &o->pending; | ||
5294 | |||
5295 | mutex_lock(&o->one_pending_mutex); | ||
5296 | |||
5297 | /* Check that the requested transition is legal */ | ||
5298 | if (o->check_transition(bp, o, params)) { | ||
5299 | mutex_unlock(&o->one_pending_mutex); | ||
5300 | return -EINVAL; | ||
5301 | } | ||
5302 | |||
5303 | /* Set "pending" bit */ | ||
5304 | set_bit(cmd, pending); | ||
5305 | |||
5306 | /* Don't send a command if only driver cleanup was requested */ | ||
5307 | if (test_bit(RAMROD_DRV_CLR_ONLY, ¶ms->ramrod_flags)) { | ||
5308 | bnx2x_func_state_change_comp(bp, o, cmd); | ||
5309 | mutex_unlock(&o->one_pending_mutex); | ||
5310 | } else { | ||
5311 | /* Send a ramrod */ | ||
5312 | rc = o->send_cmd(bp, params); | ||
5313 | |||
5314 | mutex_unlock(&o->one_pending_mutex); | ||
5315 | |||
5316 | if (rc) { | ||
5317 | o->next_state = BNX2X_F_STATE_MAX; | ||
5318 | clear_bit(cmd, pending); | ||
5319 | smp_mb__after_clear_bit(); | ||
5320 | return rc; | ||
5321 | } | ||
5322 | |||
5323 | if (test_bit(RAMROD_COMP_WAIT, ¶ms->ramrod_flags)) { | ||
5324 | rc = o->wait_comp(bp, o, cmd); | ||
5325 | if (rc) | ||
5326 | return rc; | ||
5327 | |||
5328 | return 0; | ||
5329 | } | ||
5330 | } | ||
814 | 5331 | ||
815 | for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++) | 5332 | return !!test_bit(cmd, pending); |
816 | REG_WR8(bp, BAR_TSTRORM_INTMEM + | ||
817 | TSTORM_INDIRECTION_TABLE_OFFSET(func) + i, | ||
818 | bp->fp->cl_id + bp->rx_indir_table[i]); | ||
819 | } | 5333 | } |
diff --git a/drivers/net/bnx2x/bnx2x_sp.h b/drivers/net/bnx2x/bnx2x_sp.h index f9b755e4a108..86eaa80721ea 100644 --- a/drivers/net/bnx2x/bnx2x_sp.h +++ b/drivers/net/bnx2x/bnx2x_sp.h | |||
@@ -1,43 +1,1235 @@ | |||
1 | #ifndef BNX2X_SP | 1 | /* bnx2x_sp.h: Broadcom Everest network driver. |
2 | #define BNX2X_SP | 2 | * |
3 | 3 | * Copyright 2011 Broadcom Corporation | |
4 | #include "bnx2x_reg.h" | 4 | * |
5 | 5 | * Unless you and Broadcom execute a separate written software license | |
6 | /* MAC configuration */ | 6 | * agreement governing use of this software, this software is licensed to you |
7 | void bnx2x_set_mac_addr_gen(struct bnx2x *bp, int set, const u8 *mac, | 7 | * under the terms of the GNU General Public License version 2, available |
8 | u32 cl_bit_vec, u8 cam_offset, | 8 | * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL"). |
9 | u8 is_bcast); | 9 | * |
10 | 10 | * Notwithstanding the above, under no circumstances may you combine this | |
11 | /* Multicast */ | 11 | * software in any way with any other Broadcom software provided under a |
12 | void bnx2x_invalidate_e1_mc_list(struct bnx2x *bp); | 12 | * license other than the GPL, without Broadcom's express prior written |
13 | void bnx2x_invalidate_e1h_mc_list(struct bnx2x *bp); | 13 | * consent. |
14 | int bnx2x_set_e1_mc_list(struct bnx2x *bp); | 14 | * |
15 | int bnx2x_set_e1h_mc_list(struct bnx2x *bp); | 15 | * Maintained by: Eilon Greenstein <eilong@broadcom.com> |
16 | 16 | * Written by: Vladislav Zolotarov | |
17 | /* Rx mode */ | 17 | * |
18 | void bnx2x_set_storm_rx_mode(struct bnx2x *bp); | 18 | */ |
19 | void bnx2x_rxq_set_mac_filters(struct bnx2x *bp, u16 cl_id, u32 filters); | 19 | #ifndef BNX2X_SP_VERBS |
20 | 20 | #define BNX2X_SP_VERBS | |
21 | /* RSS configuration */ | 21 | |
22 | void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p); | 22 | struct bnx2x; |
23 | void bnx2x_push_indir_table(struct bnx2x *bp); | 23 | struct eth_context; |
24 | 24 | ||
25 | /* Queue configuration */ | 25 | /* Bits representing general command's configuration */ |
26 | static inline void bnx2x_set_ctx_validation(struct eth_context *cxt, u32 cid) | 26 | enum { |
27 | { | 27 | RAMROD_TX, |
28 | /* ustorm cxt validation */ | 28 | RAMROD_RX, |
29 | cxt->ustorm_ag_context.cdu_usage = | 29 | /* Wait until all pending commands complete */ |
30 | CDU_RSRVD_VALUE_TYPE_A(cid, CDU_REGION_NUMBER_UCM_AG, | 30 | RAMROD_COMP_WAIT, |
31 | ETH_CONNECTION_TYPE); | 31 | /* Don't send a ramrod, only update a registry */ |
32 | /* xcontext validation */ | 32 | RAMROD_DRV_CLR_ONLY, |
33 | cxt->xstorm_ag_context.cdu_reserved = | 33 | /* Configure HW according to the current object state */ |
34 | CDU_RSRVD_VALUE_TYPE_A(cid, CDU_REGION_NUMBER_XCM_AG, | 34 | RAMROD_RESTORE, |
35 | ETH_CONNECTION_TYPE); | 35 | /* Execute the next command now */ |
36 | } | 36 | RAMROD_EXEC, |
37 | 37 | /* | |
38 | int bnx2x_setup_fw_client(struct bnx2x *bp, | 38 | * Don't add a new command and continue execution of posponed |
39 | struct bnx2x_client_init_params *params, | 39 | * commands. If not set a new command will be added to the |
40 | u8 activate, | 40 | * pending commands list. |
41 | struct client_init_ramrod_data *data, | 41 | */ |
42 | dma_addr_t data_mapping); | 42 | RAMROD_CONT, |
43 | #endif /* BNX2X_SP */ | 43 | }; |
44 | |||
45 | typedef enum { | ||
46 | BNX2X_OBJ_TYPE_RX, | ||
47 | BNX2X_OBJ_TYPE_TX, | ||
48 | BNX2X_OBJ_TYPE_RX_TX, | ||
49 | } bnx2x_obj_type; | ||
50 | |||
51 | /* Filtering states */ | ||
52 | enum { | ||
53 | BNX2X_FILTER_MAC_PENDING, | ||
54 | BNX2X_FILTER_VLAN_PENDING, | ||
55 | BNX2X_FILTER_VLAN_MAC_PENDING, | ||
56 | BNX2X_FILTER_RX_MODE_PENDING, | ||
57 | BNX2X_FILTER_RX_MODE_SCHED, | ||
58 | BNX2X_FILTER_ISCSI_ETH_START_SCHED, | ||
59 | BNX2X_FILTER_ISCSI_ETH_STOP_SCHED, | ||
60 | BNX2X_FILTER_FCOE_ETH_START_SCHED, | ||
61 | BNX2X_FILTER_FCOE_ETH_STOP_SCHED, | ||
62 | BNX2X_FILTER_MCAST_PENDING, | ||
63 | BNX2X_FILTER_MCAST_SCHED, | ||
64 | BNX2X_FILTER_RSS_CONF_PENDING, | ||
65 | }; | ||
66 | |||
67 | struct bnx2x_raw_obj { | ||
68 | u8 func_id; | ||
69 | |||
70 | /* Queue params */ | ||
71 | u8 cl_id; | ||
72 | u32 cid; | ||
73 | |||
74 | /* Ramrod data buffer params */ | ||
75 | void *rdata; | ||
76 | dma_addr_t rdata_mapping; | ||
77 | |||
78 | /* Ramrod state params */ | ||
79 | int state; /* "ramrod is pending" state bit */ | ||
80 | unsigned long *pstate; /* pointer to state buffer */ | ||
81 | |||
82 | bnx2x_obj_type obj_type; | ||
83 | |||
84 | int (*wait_comp)(struct bnx2x *bp, | ||
85 | struct bnx2x_raw_obj *o); | ||
86 | |||
87 | bool (*check_pending)(struct bnx2x_raw_obj *o); | ||
88 | void (*clear_pending)(struct bnx2x_raw_obj *o); | ||
89 | void (*set_pending)(struct bnx2x_raw_obj *o); | ||
90 | }; | ||
91 | |||
92 | /************************* VLAN-MAC commands related parameters ***************/ | ||
93 | struct bnx2x_mac_ramrod_data { | ||
94 | u8 mac[ETH_ALEN]; | ||
95 | }; | ||
96 | |||
97 | struct bnx2x_vlan_ramrod_data { | ||
98 | u16 vlan; | ||
99 | }; | ||
100 | |||
101 | struct bnx2x_vlan_mac_ramrod_data { | ||
102 | u8 mac[ETH_ALEN]; | ||
103 | u16 vlan; | ||
104 | }; | ||
105 | |||
106 | union bnx2x_classification_ramrod_data { | ||
107 | struct bnx2x_mac_ramrod_data mac; | ||
108 | struct bnx2x_vlan_ramrod_data vlan; | ||
109 | struct bnx2x_vlan_mac_ramrod_data vlan_mac; | ||
110 | }; | ||
111 | |||
112 | /* VLAN_MAC commands */ | ||
113 | enum bnx2x_vlan_mac_cmd { | ||
114 | BNX2X_VLAN_MAC_ADD, | ||
115 | BNX2X_VLAN_MAC_DEL, | ||
116 | BNX2X_VLAN_MAC_MOVE, | ||
117 | }; | ||
118 | |||
119 | struct bnx2x_vlan_mac_data { | ||
120 | /* Requested command: BNX2X_VLAN_MAC_XX */ | ||
121 | enum bnx2x_vlan_mac_cmd cmd; | ||
122 | /* | ||
123 | * used to contain the data related vlan_mac_flags bits from | ||
124 | * ramrod parameters. | ||
125 | */ | ||
126 | unsigned long vlan_mac_flags; | ||
127 | |||
128 | /* Needed for MOVE command */ | ||
129 | struct bnx2x_vlan_mac_obj *target_obj; | ||
130 | |||
131 | union bnx2x_classification_ramrod_data u; | ||
132 | }; | ||
133 | |||
134 | /*************************** Exe Queue obj ************************************/ | ||
135 | union bnx2x_exe_queue_cmd_data { | ||
136 | struct bnx2x_vlan_mac_data vlan_mac; | ||
137 | |||
138 | struct { | ||
139 | /* TODO */ | ||
140 | } mcast; | ||
141 | }; | ||
142 | |||
143 | struct bnx2x_exeq_elem { | ||
144 | struct list_head link; | ||
145 | |||
146 | /* Length of this element in the exe_chunk. */ | ||
147 | int cmd_len; | ||
148 | |||
149 | union bnx2x_exe_queue_cmd_data cmd_data; | ||
150 | }; | ||
151 | |||
152 | union bnx2x_qable_obj; | ||
153 | |||
154 | union bnx2x_exeq_comp_elem { | ||
155 | union event_ring_elem *elem; | ||
156 | }; | ||
157 | |||
158 | struct bnx2x_exe_queue_obj; | ||
159 | |||
160 | typedef int (*exe_q_validate)(struct bnx2x *bp, | ||
161 | union bnx2x_qable_obj *o, | ||
162 | struct bnx2x_exeq_elem *elem); | ||
163 | |||
164 | /** | ||
165 | * @return positive is entry was optimized, 0 - if not, negative | ||
166 | * in case of an error. | ||
167 | */ | ||
168 | typedef int (*exe_q_optimize)(struct bnx2x *bp, | ||
169 | union bnx2x_qable_obj *o, | ||
170 | struct bnx2x_exeq_elem *elem); | ||
171 | typedef int (*exe_q_execute)(struct bnx2x *bp, | ||
172 | union bnx2x_qable_obj *o, | ||
173 | struct list_head *exe_chunk, | ||
174 | unsigned long *ramrod_flags); | ||
175 | typedef struct bnx2x_exeq_elem * | ||
176 | (*exe_q_get)(struct bnx2x_exe_queue_obj *o, | ||
177 | struct bnx2x_exeq_elem *elem); | ||
178 | |||
179 | struct bnx2x_exe_queue_obj { | ||
180 | /* | ||
181 | * Commands pending for an execution. | ||
182 | */ | ||
183 | struct list_head exe_queue; | ||
184 | |||
185 | /* | ||
186 | * Commands pending for an completion. | ||
187 | */ | ||
188 | struct list_head pending_comp; | ||
189 | |||
190 | spinlock_t lock; | ||
191 | |||
192 | /* Maximum length of commands' list for one execution */ | ||
193 | int exe_chunk_len; | ||
194 | |||
195 | union bnx2x_qable_obj *owner; | ||
196 | |||
197 | /****** Virtual functions ******/ | ||
198 | /** | ||
199 | * Called before commands execution for commands that are really | ||
200 | * going to be executed (after 'optimize'). | ||
201 | * | ||
202 | * Must run under exe_queue->lock | ||
203 | */ | ||
204 | exe_q_validate validate; | ||
205 | |||
206 | |||
207 | /** | ||
208 | * This will try to cancel the current pending commands list | ||
209 | * considering the new command. | ||
210 | * | ||
211 | * Must run under exe_queue->lock | ||
212 | */ | ||
213 | exe_q_optimize optimize; | ||
214 | |||
215 | /** | ||
216 | * Run the next commands chunk (owner specific). | ||
217 | */ | ||
218 | exe_q_execute execute; | ||
219 | |||
220 | /** | ||
221 | * Return the exe_queue element containing the specific command | ||
222 | * if any. Otherwise return NULL. | ||
223 | */ | ||
224 | exe_q_get get; | ||
225 | }; | ||
226 | /***************** Classification verbs: Set/Del MAC/VLAN/VLAN-MAC ************/ | ||
227 | /* | ||
228 | * Element in the VLAN_MAC registry list having all currenty configured | ||
229 | * rules. | ||
230 | */ | ||
231 | struct bnx2x_vlan_mac_registry_elem { | ||
232 | struct list_head link; | ||
233 | |||
234 | /* | ||
235 | * Used to store the cam offset used for the mac/vlan/vlan-mac. | ||
236 | * Relevant for 57710 and 57711 only. VLANs and MACs share the | ||
237 | * same CAM for these chips. | ||
238 | */ | ||
239 | int cam_offset; | ||
240 | |||
241 | /* Needed for DEL and RESTORE flows */ | ||
242 | unsigned long vlan_mac_flags; | ||
243 | |||
244 | union bnx2x_classification_ramrod_data u; | ||
245 | }; | ||
246 | |||
247 | /* Bits representing VLAN_MAC commands specific flags */ | ||
248 | enum { | ||
249 | BNX2X_UC_LIST_MAC, | ||
250 | BNX2X_ETH_MAC, | ||
251 | BNX2X_ISCSI_ETH_MAC, | ||
252 | BNX2X_NETQ_ETH_MAC, | ||
253 | BNX2X_DONT_CONSUME_CAM_CREDIT, | ||
254 | BNX2X_DONT_CONSUME_CAM_CREDIT_DEST, | ||
255 | }; | ||
256 | |||
257 | struct bnx2x_vlan_mac_ramrod_params { | ||
258 | /* Object to run the command from */ | ||
259 | struct bnx2x_vlan_mac_obj *vlan_mac_obj; | ||
260 | |||
261 | /* General command flags: COMP_WAIT, etc. */ | ||
262 | unsigned long ramrod_flags; | ||
263 | |||
264 | /* Command specific configuration request */ | ||
265 | struct bnx2x_vlan_mac_data user_req; | ||
266 | }; | ||
267 | |||
268 | struct bnx2x_vlan_mac_obj { | ||
269 | struct bnx2x_raw_obj raw; | ||
270 | |||
271 | /* Bookkeeping list: will prevent the addition of already existing | ||
272 | * entries. | ||
273 | */ | ||
274 | struct list_head head; | ||
275 | |||
276 | /* TODO: Add it's initialization in the init functions */ | ||
277 | struct bnx2x_exe_queue_obj exe_queue; | ||
278 | |||
279 | /* MACs credit pool */ | ||
280 | struct bnx2x_credit_pool_obj *macs_pool; | ||
281 | |||
282 | /* VLANs credit pool */ | ||
283 | struct bnx2x_credit_pool_obj *vlans_pool; | ||
284 | |||
285 | /* RAMROD command to be used */ | ||
286 | int ramrod_cmd; | ||
287 | |||
288 | /** | ||
289 | * Checks if ADD-ramrod with the given params may be performed. | ||
290 | * | ||
291 | * @return zero if the element may be added | ||
292 | */ | ||
293 | |||
294 | int (*check_add)(struct bnx2x_vlan_mac_obj *o, | ||
295 | union bnx2x_classification_ramrod_data *data); | ||
296 | |||
297 | /** | ||
298 | * Checks if DEL-ramrod with the given params may be performed. | ||
299 | * | ||
300 | * @return true if the element may be deleted | ||
301 | */ | ||
302 | struct bnx2x_vlan_mac_registry_elem * | ||
303 | (*check_del)(struct bnx2x_vlan_mac_obj *o, | ||
304 | union bnx2x_classification_ramrod_data *data); | ||
305 | |||
306 | /** | ||
307 | * Checks if DEL-ramrod with the given params may be performed. | ||
308 | * | ||
309 | * @return true if the element may be deleted | ||
310 | */ | ||
311 | bool (*check_move)(struct bnx2x_vlan_mac_obj *src_o, | ||
312 | struct bnx2x_vlan_mac_obj *dst_o, | ||
313 | union bnx2x_classification_ramrod_data *data); | ||
314 | |||
315 | /** | ||
316 | * Update the relevant credit object(s) (consume/return | ||
317 | * correspondingly). | ||
318 | */ | ||
319 | bool (*get_credit)(struct bnx2x_vlan_mac_obj *o); | ||
320 | bool (*put_credit)(struct bnx2x_vlan_mac_obj *o); | ||
321 | bool (*get_cam_offset)(struct bnx2x_vlan_mac_obj *o, int *offset); | ||
322 | bool (*put_cam_offset)(struct bnx2x_vlan_mac_obj *o, int offset); | ||
323 | |||
324 | /** | ||
325 | * Configures one rule in the ramrod data buffer. | ||
326 | */ | ||
327 | void (*set_one_rule)(struct bnx2x *bp, | ||
328 | struct bnx2x_vlan_mac_obj *o, | ||
329 | struct bnx2x_exeq_elem *elem, int rule_idx, | ||
330 | int cam_offset); | ||
331 | |||
332 | /** | ||
333 | * Delete all configured elements having the given | ||
334 | * vlan_mac_flags specification. Assumes no pending for | ||
335 | * execution commands. Will schedule all all currently | ||
336 | * configured MACs/VLANs/VLAN-MACs matching the vlan_mac_flags | ||
337 | * specification for deletion and will use the given | ||
338 | * ramrod_flags for the last DEL operation. | ||
339 | * | ||
340 | * @param bp | ||
341 | * @param o | ||
342 | * @param ramrod_flags RAMROD_XX flags | ||
343 | * | ||
344 | * @return 0 if the last operation has completed successfully | ||
345 | * and there are no more elements left, positive value | ||
346 | * if there are pending for completion commands, | ||
347 | * negative value in case of failure. | ||
348 | */ | ||
349 | int (*delete_all)(struct bnx2x *bp, | ||
350 | struct bnx2x_vlan_mac_obj *o, | ||
351 | unsigned long *vlan_mac_flags, | ||
352 | unsigned long *ramrod_flags); | ||
353 | |||
354 | /** | ||
355 | * Reconfigures the next MAC/VLAN/VLAN-MAC element from the previously | ||
356 | * configured elements list. | ||
357 | * | ||
358 | * @param bp | ||
359 | * @param p Command parameters (RAMROD_COMP_WAIT bit in | ||
360 | * ramrod_flags is only taken into an account) | ||
361 | * @param ppos a pointer to the cooky that should be given back in the | ||
362 | * next call to make function handle the next element. If | ||
363 | * *ppos is set to NULL it will restart the iterator. | ||
364 | * If returned *ppos == NULL this means that the last | ||
365 | * element has been handled. | ||
366 | * | ||
367 | * @return int | ||
368 | */ | ||
369 | int (*restore)(struct bnx2x *bp, | ||
370 | struct bnx2x_vlan_mac_ramrod_params *p, | ||
371 | struct bnx2x_vlan_mac_registry_elem **ppos); | ||
372 | |||
373 | /** | ||
374 | * Should be called on a completion arival. | ||
375 | * | ||
376 | * @param bp | ||
377 | * @param o | ||
378 | * @param cqe Completion element we are handling | ||
379 | * @param ramrod_flags if RAMROD_CONT is set the next bulk of | ||
380 | * pending commands will be executed. | ||
381 | * RAMROD_DRV_CLR_ONLY and RAMROD_RESTORE | ||
382 | * may also be set if needed. | ||
383 | * | ||
384 | * @return 0 if there are neither pending nor waiting for | ||
385 | * completion commands. Positive value if there are | ||
386 | * pending for execution or for completion commands. | ||
387 | * Negative value in case of an error (including an | ||
388 | * error in the cqe). | ||
389 | */ | ||
390 | int (*complete)(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o, | ||
391 | union event_ring_elem *cqe, | ||
392 | unsigned long *ramrod_flags); | ||
393 | |||
394 | /** | ||
395 | * Wait for completion of all commands. Don't schedule new ones, | ||
396 | * just wait. It assumes that the completion code will schedule | ||
397 | * for new commands. | ||
398 | */ | ||
399 | int (*wait)(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o); | ||
400 | }; | ||
401 | |||
402 | /** RX_MODE verbs:DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */ | ||
403 | |||
404 | /* RX_MODE ramrod spesial flags: set in rx_mode_flags field in | ||
405 | * a bnx2x_rx_mode_ramrod_params. | ||
406 | */ | ||
407 | enum { | ||
408 | BNX2X_RX_MODE_FCOE_ETH, | ||
409 | BNX2X_RX_MODE_ISCSI_ETH, | ||
410 | }; | ||
411 | |||
412 | enum { | ||
413 | BNX2X_ACCEPT_UNICAST, | ||
414 | BNX2X_ACCEPT_MULTICAST, | ||
415 | BNX2X_ACCEPT_ALL_UNICAST, | ||
416 | BNX2X_ACCEPT_ALL_MULTICAST, | ||
417 | BNX2X_ACCEPT_BROADCAST, | ||
418 | BNX2X_ACCEPT_UNMATCHED, | ||
419 | BNX2X_ACCEPT_ANY_VLAN | ||
420 | }; | ||
421 | |||
422 | struct bnx2x_rx_mode_ramrod_params { | ||
423 | struct bnx2x_rx_mode_obj *rx_mode_obj; | ||
424 | unsigned long *pstate; | ||
425 | int state; | ||
426 | u8 cl_id; | ||
427 | u32 cid; | ||
428 | u8 func_id; | ||
429 | unsigned long ramrod_flags; | ||
430 | unsigned long rx_mode_flags; | ||
431 | |||
432 | /* | ||
433 | * rdata is either a pointer to eth_filter_rules_ramrod_data(e2) or to | ||
434 | * a tstorm_eth_mac_filter_config (e1x). | ||
435 | */ | ||
436 | void *rdata; | ||
437 | dma_addr_t rdata_mapping; | ||
438 | |||
439 | /* Rx mode settings */ | ||
440 | unsigned long rx_accept_flags; | ||
441 | |||
442 | /* internal switching settings */ | ||
443 | unsigned long tx_accept_flags; | ||
444 | }; | ||
445 | |||
446 | struct bnx2x_rx_mode_obj { | ||
447 | int (*config_rx_mode)(struct bnx2x *bp, | ||
448 | struct bnx2x_rx_mode_ramrod_params *p); | ||
449 | |||
450 | int (*wait_comp)(struct bnx2x *bp, | ||
451 | struct bnx2x_rx_mode_ramrod_params *p); | ||
452 | }; | ||
453 | |||
454 | /********************** Set multicast group ***********************************/ | ||
455 | |||
456 | struct bnx2x_mcast_list_elem { | ||
457 | struct list_head link; | ||
458 | u8 *mac; | ||
459 | }; | ||
460 | |||
461 | union bnx2x_mcast_config_data { | ||
462 | u8 *mac; | ||
463 | u8 bin; /* used in a RESTORE flow */ | ||
464 | }; | ||
465 | |||
466 | struct bnx2x_mcast_ramrod_params { | ||
467 | struct bnx2x_mcast_obj *mcast_obj; | ||
468 | |||
469 | /* Relevant options are RAMROD_COMP_WAIT and RAMROD_DRV_CLR_ONLY */ | ||
470 | unsigned long ramrod_flags; | ||
471 | |||
472 | struct list_head mcast_list; /* list of struct bnx2x_mcast_list_elem */ | ||
473 | /** TODO: | ||
474 | * - rename it to macs_num. | ||
475 | * - Add a new command type for handling pending commands | ||
476 | * (remove "zero semantics"). | ||
477 | * | ||
478 | * Length of mcast_list. If zero and ADD_CONT command - post | ||
479 | * pending commands. | ||
480 | */ | ||
481 | int mcast_list_len; | ||
482 | }; | ||
483 | |||
484 | enum { | ||
485 | BNX2X_MCAST_CMD_ADD, | ||
486 | BNX2X_MCAST_CMD_CONT, | ||
487 | BNX2X_MCAST_CMD_DEL, | ||
488 | BNX2X_MCAST_CMD_RESTORE, | ||
489 | }; | ||
490 | |||
491 | struct bnx2x_mcast_obj { | ||
492 | struct bnx2x_raw_obj raw; | ||
493 | |||
494 | union { | ||
495 | struct { | ||
496 | #define BNX2X_MCAST_BINS_NUM 256 | ||
497 | #define BNX2X_MCAST_VEC_SZ (BNX2X_MCAST_BINS_NUM / 64) | ||
498 | u64 vec[BNX2X_MCAST_VEC_SZ]; | ||
499 | |||
500 | /** Number of BINs to clear. Should be updated | ||
501 | * immediately when a command arrives in order to | ||
502 | * properly create DEL commands. | ||
503 | */ | ||
504 | int num_bins_set; | ||
505 | } aprox_match; | ||
506 | |||
507 | struct { | ||
508 | struct list_head macs; | ||
509 | int num_macs_set; | ||
510 | } exact_match; | ||
511 | } registry; | ||
512 | |||
513 | /* Pending commands */ | ||
514 | struct list_head pending_cmds_head; | ||
515 | |||
516 | /* A state that is set in raw.pstate, when there are pending commands */ | ||
517 | int sched_state; | ||
518 | |||
519 | /* Maximal number of mcast MACs configured in one command */ | ||
520 | int max_cmd_len; | ||
521 | |||
522 | /* Total number of currently pending MACs to configure: both | ||
523 | * in the pending commands list and in the current command. | ||
524 | */ | ||
525 | int total_pending_num; | ||
526 | |||
527 | u8 engine_id; | ||
528 | |||
529 | /** | ||
530 | * @param cmd command to execute (BNX2X_MCAST_CMD_X, see above) | ||
531 | */ | ||
532 | int (*config_mcast)(struct bnx2x *bp, | ||
533 | struct bnx2x_mcast_ramrod_params *p, int cmd); | ||
534 | |||
535 | /** | ||
536 | * Fills the ramrod data during the RESTORE flow. | ||
537 | * | ||
538 | * @param bp | ||
539 | * @param o | ||
540 | * @param start_idx Registry index to start from | ||
541 | * @param rdata_idx Index in the ramrod data to start from | ||
542 | * | ||
543 | * @return -1 if we handled the whole registry or index of the last | ||
544 | * handled registry element. | ||
545 | */ | ||
546 | int (*hdl_restore)(struct bnx2x *bp, struct bnx2x_mcast_obj *o, | ||
547 | int start_bin, int *rdata_idx); | ||
548 | |||
549 | int (*enqueue_cmd)(struct bnx2x *bp, struct bnx2x_mcast_obj *o, | ||
550 | struct bnx2x_mcast_ramrod_params *p, int cmd); | ||
551 | |||
552 | void (*set_one_rule)(struct bnx2x *bp, | ||
553 | struct bnx2x_mcast_obj *o, int idx, | ||
554 | union bnx2x_mcast_config_data *cfg_data, int cmd); | ||
555 | |||
556 | /** Checks if there are more mcast MACs to be set or a previous | ||
557 | * command is still pending. | ||
558 | */ | ||
559 | bool (*check_pending)(struct bnx2x_mcast_obj *o); | ||
560 | |||
561 | /** | ||
562 | * Set/Clear/Check SCHEDULED state of the object | ||
563 | */ | ||
564 | void (*set_sched)(struct bnx2x_mcast_obj *o); | ||
565 | void (*clear_sched)(struct bnx2x_mcast_obj *o); | ||
566 | bool (*check_sched)(struct bnx2x_mcast_obj *o); | ||
567 | |||
568 | /* Wait until all pending commands complete */ | ||
569 | int (*wait_comp)(struct bnx2x *bp, struct bnx2x_mcast_obj *o); | ||
570 | |||
571 | /** | ||
572 | * Handle the internal object counters needed for proper | ||
573 | * commands handling. Checks that the provided parameters are | ||
574 | * feasible. | ||
575 | */ | ||
576 | int (*validate)(struct bnx2x *bp, | ||
577 | struct bnx2x_mcast_ramrod_params *p, int cmd); | ||
578 | |||
579 | /** | ||
580 | * Restore the values of internal counters in case of a failure. | ||
581 | */ | ||
582 | void (*revert)(struct bnx2x *bp, | ||
583 | struct bnx2x_mcast_ramrod_params *p, | ||
584 | int old_num_bins); | ||
585 | |||
586 | int (*get_registry_size)(struct bnx2x_mcast_obj *o); | ||
587 | void (*set_registry_size)(struct bnx2x_mcast_obj *o, int n); | ||
588 | }; | ||
589 | |||
590 | /*************************** Credit handling **********************************/ | ||
591 | struct bnx2x_credit_pool_obj { | ||
592 | |||
593 | /* Current amount of credit in the pool */ | ||
594 | atomic_t credit; | ||
595 | |||
596 | /* Maximum allowed credit. put() will check against it. */ | ||
597 | int pool_sz; | ||
598 | |||
599 | /* | ||
600 | * Allocate a pool table statically. | ||
601 | * | ||
602 | * Currently the mamimum allowed size is MAX_MAC_CREDIT_E2(272) | ||
603 | * | ||
604 | * The set bit in the table will mean that the entry is available. | ||
605 | */ | ||
606 | #define BNX2X_POOL_VEC_SIZE (MAX_MAC_CREDIT_E2 / 64) | ||
607 | u64 pool_mirror[BNX2X_POOL_VEC_SIZE]; | ||
608 | |||
609 | /* Base pool offset (initialized differently */ | ||
610 | int base_pool_offset; | ||
611 | |||
612 | /** | ||
613 | * Get the next free pool entry. | ||
614 | * | ||
615 | * @return true if there was a free entry in the pool | ||
616 | */ | ||
617 | bool (*get_entry)(struct bnx2x_credit_pool_obj *o, int *entry); | ||
618 | |||
619 | /** | ||
620 | * Return the entry back to the pool. | ||
621 | * | ||
622 | * @return true if entry is legal and has been successfully | ||
623 | * returned to the pool. | ||
624 | */ | ||
625 | bool (*put_entry)(struct bnx2x_credit_pool_obj *o, int entry); | ||
626 | |||
627 | /** | ||
628 | * Get the requested amount of credit from the pool. | ||
629 | * | ||
630 | * @param cnt Amount of requested credit | ||
631 | * @return true if the operation is successful | ||
632 | */ | ||
633 | bool (*get)(struct bnx2x_credit_pool_obj *o, int cnt); | ||
634 | |||
635 | /** | ||
636 | * Returns the credit to the pool. | ||
637 | * | ||
638 | * @param cnt Amount of credit to return | ||
639 | * @return true if the operation is successful | ||
640 | */ | ||
641 | bool (*put)(struct bnx2x_credit_pool_obj *o, int cnt); | ||
642 | |||
643 | /** | ||
644 | * Reads the current amount of credit. | ||
645 | */ | ||
646 | int (*check)(struct bnx2x_credit_pool_obj *o); | ||
647 | }; | ||
648 | |||
649 | /*************************** RSS configuration ********************************/ | ||
650 | enum { | ||
651 | /* RSS_MODE bits are mutually exclusive */ | ||
652 | BNX2X_RSS_MODE_DISABLED, | ||
653 | BNX2X_RSS_MODE_REGULAR, | ||
654 | BNX2X_RSS_MODE_VLAN_PRI, | ||
655 | BNX2X_RSS_MODE_E1HOV_PRI, | ||
656 | BNX2X_RSS_MODE_IP_DSCP, | ||
657 | |||
658 | BNX2X_RSS_SET_SRCH, /* Setup searcher, E1x specific flag */ | ||
659 | |||
660 | BNX2X_RSS_IPV4, | ||
661 | BNX2X_RSS_IPV4_TCP, | ||
662 | BNX2X_RSS_IPV6, | ||
663 | BNX2X_RSS_IPV6_TCP, | ||
664 | }; | ||
665 | |||
666 | struct bnx2x_config_rss_params { | ||
667 | struct bnx2x_rss_config_obj *rss_obj; | ||
668 | |||
669 | /* may have RAMROD_COMP_WAIT set only */ | ||
670 | unsigned long ramrod_flags; | ||
671 | |||
672 | /* BNX2X_RSS_X bits */ | ||
673 | unsigned long rss_flags; | ||
674 | |||
675 | /* Number hash bits to take into an account */ | ||
676 | u8 rss_result_mask; | ||
677 | |||
678 | /* Indirection table */ | ||
679 | u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE]; | ||
680 | |||
681 | /* RSS hash values */ | ||
682 | u32 rss_key[10]; | ||
683 | |||
684 | /* valid only iff BNX2X_RSS_UPDATE_TOE is set */ | ||
685 | u16 toe_rss_bitmap; | ||
686 | }; | ||
687 | |||
688 | struct bnx2x_rss_config_obj { | ||
689 | struct bnx2x_raw_obj raw; | ||
690 | |||
691 | /* RSS engine to use */ | ||
692 | u8 engine_id; | ||
693 | |||
694 | /* Last configured indirection table */ | ||
695 | u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE]; | ||
696 | |||
697 | int (*config_rss)(struct bnx2x *bp, | ||
698 | struct bnx2x_config_rss_params *p); | ||
699 | }; | ||
700 | |||
701 | /*********************** Queue state update ***********************************/ | ||
702 | |||
703 | /* UPDATE command options */ | ||
704 | enum { | ||
705 | BNX2X_Q_UPDATE_IN_VLAN_REM, | ||
706 | BNX2X_Q_UPDATE_IN_VLAN_REM_CHNG, | ||
707 | BNX2X_Q_UPDATE_OUT_VLAN_REM, | ||
708 | BNX2X_Q_UPDATE_OUT_VLAN_REM_CHNG, | ||
709 | BNX2X_Q_UPDATE_ANTI_SPOOF, | ||
710 | BNX2X_Q_UPDATE_ANTI_SPOOF_CHNG, | ||
711 | BNX2X_Q_UPDATE_ACTIVATE, | ||
712 | BNX2X_Q_UPDATE_ACTIVATE_CHNG, | ||
713 | BNX2X_Q_UPDATE_DEF_VLAN_EN, | ||
714 | BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG, | ||
715 | BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG, | ||
716 | BNX2X_Q_UPDATE_SILENT_VLAN_REM | ||
717 | }; | ||
718 | |||
719 | /* Allowed Queue states */ | ||
720 | enum bnx2x_q_state { | ||
721 | BNX2X_Q_STATE_RESET, | ||
722 | BNX2X_Q_STATE_INITIALIZED, | ||
723 | BNX2X_Q_STATE_ACTIVE, | ||
724 | BNX2X_Q_STATE_INACTIVE, | ||
725 | BNX2X_Q_STATE_STOPPED, | ||
726 | BNX2X_Q_STATE_TERMINATED, | ||
727 | BNX2X_Q_STATE_FLRED, | ||
728 | BNX2X_Q_STATE_MAX, | ||
729 | }; | ||
730 | |||
731 | /* Allowed commands */ | ||
732 | enum bnx2x_queue_cmd { | ||
733 | BNX2X_Q_CMD_INIT, | ||
734 | BNX2X_Q_CMD_SETUP, | ||
735 | BNX2X_Q_CMD_DEACTIVATE, | ||
736 | BNX2X_Q_CMD_ACTIVATE, | ||
737 | BNX2X_Q_CMD_UPDATE, | ||
738 | BNX2X_Q_CMD_UPDATE_TPA, | ||
739 | BNX2X_Q_CMD_HALT, | ||
740 | BNX2X_Q_CMD_CFC_DEL, | ||
741 | BNX2X_Q_CMD_TERMINATE, | ||
742 | BNX2X_Q_CMD_EMPTY, | ||
743 | BNX2X_Q_CMD_MAX, | ||
744 | }; | ||
745 | |||
746 | /* queue SETUP + INIT flags */ | ||
747 | enum { | ||
748 | BNX2X_Q_FLG_TPA, | ||
749 | BNX2X_Q_FLG_STATS, | ||
750 | BNX2X_Q_FLG_ZERO_STATS, | ||
751 | BNX2X_Q_FLG_ACTIVE, | ||
752 | BNX2X_Q_FLG_OV, | ||
753 | BNX2X_Q_FLG_VLAN, | ||
754 | BNX2X_Q_FLG_COS, | ||
755 | BNX2X_Q_FLG_HC, | ||
756 | BNX2X_Q_FLG_HC_EN, | ||
757 | BNX2X_Q_FLG_DHC, | ||
758 | BNX2X_Q_FLG_FCOE, | ||
759 | BNX2X_Q_FLG_LEADING_RSS, | ||
760 | BNX2X_Q_FLG_MCAST, | ||
761 | BNX2X_Q_FLG_DEF_VLAN, | ||
762 | BNX2X_Q_FLG_TX_SWITCH, | ||
763 | BNX2X_Q_FLG_TX_SEC, | ||
764 | BNX2X_Q_FLG_ANTI_SPOOF, | ||
765 | BNX2X_Q_FLG_SILENT_VLAN_REM | ||
766 | }; | ||
767 | |||
768 | /* Queue type options: queue type may be a compination of below. */ | ||
769 | enum bnx2x_q_type { | ||
770 | /** TODO: Consider moving both these flags into the init() | ||
771 | * ramrod params. | ||
772 | */ | ||
773 | BNX2X_Q_TYPE_HAS_RX, | ||
774 | BNX2X_Q_TYPE_HAS_TX, | ||
775 | }; | ||
776 | |||
777 | struct bnx2x_queue_init_params { | ||
778 | struct { | ||
779 | unsigned long flags; | ||
780 | u16 hc_rate; | ||
781 | u8 fw_sb_id; | ||
782 | u8 sb_cq_index; | ||
783 | } tx; | ||
784 | |||
785 | struct { | ||
786 | unsigned long flags; | ||
787 | u16 hc_rate; | ||
788 | u8 fw_sb_id; | ||
789 | u8 sb_cq_index; | ||
790 | } rx; | ||
791 | |||
792 | /* CID context in the host memory */ | ||
793 | struct eth_context *cxt; | ||
794 | }; | ||
795 | |||
796 | struct bnx2x_queue_update_params { | ||
797 | unsigned long update_flags; /* BNX2X_Q_UPDATE_XX bits */ | ||
798 | u16 def_vlan; | ||
799 | u16 silent_removal_value; | ||
800 | u16 silent_removal_mask; | ||
801 | }; | ||
802 | |||
803 | struct rxq_pause_params { | ||
804 | u16 bd_th_lo; | ||
805 | u16 bd_th_hi; | ||
806 | u16 rcq_th_lo; | ||
807 | u16 rcq_th_hi; | ||
808 | u16 sge_th_lo; /* valid iff BNX2X_Q_FLG_TPA */ | ||
809 | u16 sge_th_hi; /* valid iff BNX2X_Q_FLG_TPA */ | ||
810 | u16 pri_map; | ||
811 | }; | ||
812 | |||
813 | /* general */ | ||
814 | struct bnx2x_general_setup_params { | ||
815 | /* valid iff BNX2X_Q_FLG_STATS */ | ||
816 | u8 stat_id; | ||
817 | |||
818 | u8 spcl_id; | ||
819 | u16 mtu; | ||
820 | }; | ||
821 | |||
822 | struct bnx2x_rxq_setup_params { | ||
823 | /* dma */ | ||
824 | dma_addr_t dscr_map; | ||
825 | dma_addr_t sge_map; | ||
826 | dma_addr_t rcq_map; | ||
827 | dma_addr_t rcq_np_map; | ||
828 | |||
829 | u16 drop_flags; | ||
830 | u16 buf_sz; | ||
831 | u8 fw_sb_id; | ||
832 | u8 cl_qzone_id; | ||
833 | |||
834 | /* valid iff BNX2X_Q_FLG_TPA */ | ||
835 | u16 tpa_agg_sz; | ||
836 | u16 sge_buf_sz; | ||
837 | u8 max_sges_pkt; | ||
838 | u8 max_tpa_queues; | ||
839 | u8 rss_engine_id; | ||
840 | |||
841 | u8 cache_line_log; | ||
842 | |||
843 | u8 sb_cq_index; | ||
844 | |||
845 | /* valid iff BXN2X_Q_FLG_SILENT_VLAN_REM */ | ||
846 | u16 silent_removal_value; | ||
847 | u16 silent_removal_mask; | ||
848 | }; | ||
849 | |||
850 | struct bnx2x_txq_setup_params { | ||
851 | /* dma */ | ||
852 | dma_addr_t dscr_map; | ||
853 | |||
854 | u8 fw_sb_id; | ||
855 | u8 sb_cq_index; | ||
856 | u8 cos; /* valid iff BNX2X_Q_FLG_COS */ | ||
857 | u16 traffic_type; | ||
858 | /* equals to the leading rss client id, used for TX classification*/ | ||
859 | u8 tss_leading_cl_id; | ||
860 | |||
861 | /* valid iff BNX2X_Q_FLG_DEF_VLAN */ | ||
862 | u16 default_vlan; | ||
863 | }; | ||
864 | |||
865 | struct bnx2x_queue_setup_params { | ||
866 | struct rxq_pause_params pause; | ||
867 | struct bnx2x_general_setup_params gen_params; | ||
868 | struct bnx2x_rxq_setup_params rxq_params; | ||
869 | struct bnx2x_txq_setup_params txq_params; | ||
870 | unsigned long flags; | ||
871 | }; | ||
872 | |||
873 | |||
874 | struct bnx2x_queue_state_params { | ||
875 | struct bnx2x_queue_sp_obj *q_obj; | ||
876 | |||
877 | /* Current command */ | ||
878 | enum bnx2x_queue_cmd cmd; | ||
879 | |||
880 | /* may have RAMROD_COMP_WAIT set only */ | ||
881 | unsigned long ramrod_flags; | ||
882 | |||
883 | /* Params according to the current command */ | ||
884 | union { | ||
885 | struct bnx2x_queue_update_params update; | ||
886 | struct bnx2x_queue_setup_params setup; | ||
887 | struct bnx2x_queue_init_params init; | ||
888 | } params; | ||
889 | }; | ||
890 | |||
891 | struct bnx2x_queue_sp_obj { | ||
892 | u32 cid; | ||
893 | u8 cl_id; | ||
894 | u8 func_id; | ||
895 | |||
896 | enum bnx2x_q_state state, next_state; | ||
897 | |||
898 | /* bits from enum bnx2x_q_type */ | ||
899 | unsigned long type; | ||
900 | |||
901 | /* BNX2X_Q_CMD_XX bits. This object implements "one | ||
902 | * pending" paradigm but for debug and tracing purposes it's | ||
903 | * more convinient to have different bits for different | ||
904 | * commands. | ||
905 | */ | ||
906 | unsigned long pending; | ||
907 | |||
908 | /* Buffer to use as a ramrod data and its mapping */ | ||
909 | void *rdata; | ||
910 | dma_addr_t rdata_mapping; | ||
911 | |||
912 | /** | ||
913 | * Performs one state change according to the given parameters. | ||
914 | * | ||
915 | * @return 0 in case of success and negative value otherwise. | ||
916 | */ | ||
917 | int (*send_cmd)(struct bnx2x *bp, | ||
918 | struct bnx2x_queue_state_params *params); | ||
919 | |||
920 | /** | ||
921 | * Sets the pending bit according to the requested transition. | ||
922 | */ | ||
923 | int (*set_pending)(struct bnx2x_queue_sp_obj *o, | ||
924 | struct bnx2x_queue_state_params *params); | ||
925 | |||
926 | /** | ||
927 | * Checks that the requested state transition is legal. | ||
928 | */ | ||
929 | int (*check_transition)(struct bnx2x *bp, | ||
930 | struct bnx2x_queue_sp_obj *o, | ||
931 | struct bnx2x_queue_state_params *params); | ||
932 | |||
933 | /** | ||
934 | * Completes the pending command. | ||
935 | */ | ||
936 | int (*complete_cmd)(struct bnx2x *bp, | ||
937 | struct bnx2x_queue_sp_obj *o, | ||
938 | enum bnx2x_queue_cmd); | ||
939 | |||
940 | int (*wait_comp)(struct bnx2x *bp, | ||
941 | struct bnx2x_queue_sp_obj *o, | ||
942 | enum bnx2x_queue_cmd cmd); | ||
943 | }; | ||
944 | |||
945 | /********************** Function state update *********************************/ | ||
946 | /* Allowed Function states */ | ||
947 | enum bnx2x_func_state { | ||
948 | BNX2X_F_STATE_RESET, | ||
949 | BNX2X_F_STATE_INITIALIZED, | ||
950 | BNX2X_F_STATE_STARTED, | ||
951 | BNX2X_F_STATE_MAX, | ||
952 | }; | ||
953 | |||
954 | /* Allowed Function commands */ | ||
955 | enum bnx2x_func_cmd { | ||
956 | BNX2X_F_CMD_HW_INIT, | ||
957 | BNX2X_F_CMD_START, | ||
958 | BNX2X_F_CMD_STOP, | ||
959 | BNX2X_F_CMD_HW_RESET, | ||
960 | BNX2X_F_CMD_MAX, | ||
961 | }; | ||
962 | |||
963 | struct bnx2x_func_hw_init_params { | ||
964 | /* A load phase returned by MCP. | ||
965 | * | ||
966 | * May be: | ||
967 | * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP | ||
968 | * FW_MSG_CODE_DRV_LOAD_COMMON | ||
969 | * FW_MSG_CODE_DRV_LOAD_PORT | ||
970 | * FW_MSG_CODE_DRV_LOAD_FUNCTION | ||
971 | */ | ||
972 | u32 load_phase; | ||
973 | }; | ||
974 | |||
975 | struct bnx2x_func_hw_reset_params { | ||
976 | /* A load phase returned by MCP. | ||
977 | * | ||
978 | * May be: | ||
979 | * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP | ||
980 | * FW_MSG_CODE_DRV_LOAD_COMMON | ||
981 | * FW_MSG_CODE_DRV_LOAD_PORT | ||
982 | * FW_MSG_CODE_DRV_LOAD_FUNCTION | ||
983 | */ | ||
984 | u32 reset_phase; | ||
985 | }; | ||
986 | |||
987 | struct bnx2x_func_start_params { | ||
988 | /* Multi Function mode: | ||
989 | * - Single Function | ||
990 | * - Switch Dependent | ||
991 | * - Switch Independent | ||
992 | */ | ||
993 | u16 mf_mode; | ||
994 | |||
995 | /* Switch Dependent mode outer VLAN tag */ | ||
996 | u16 sd_vlan_tag; | ||
997 | |||
998 | /* Function cos mode */ | ||
999 | u8 network_cos_mode; | ||
1000 | }; | ||
1001 | |||
1002 | struct bnx2x_func_state_params { | ||
1003 | struct bnx2x_func_sp_obj *f_obj; | ||
1004 | |||
1005 | /* Current command */ | ||
1006 | enum bnx2x_func_cmd cmd; | ||
1007 | |||
1008 | /* may have RAMROD_COMP_WAIT set only */ | ||
1009 | unsigned long ramrod_flags; | ||
1010 | |||
1011 | /* Params according to the current command */ | ||
1012 | union { | ||
1013 | struct bnx2x_func_hw_init_params hw_init; | ||
1014 | struct bnx2x_func_hw_reset_params hw_reset; | ||
1015 | struct bnx2x_func_start_params start; | ||
1016 | } params; | ||
1017 | }; | ||
1018 | |||
1019 | struct bnx2x_func_sp_drv_ops { | ||
1020 | /* Init tool + runtime initialization: | ||
1021 | * - Common Chip | ||
1022 | * - Common (per Path) | ||
1023 | * - Port | ||
1024 | * - Function phases | ||
1025 | */ | ||
1026 | int (*init_hw_cmn_chip)(struct bnx2x *bp); | ||
1027 | int (*init_hw_cmn)(struct bnx2x *bp); | ||
1028 | int (*init_hw_port)(struct bnx2x *bp); | ||
1029 | int (*init_hw_func)(struct bnx2x *bp); | ||
1030 | |||
1031 | /* Reset Function HW: Common, Port, Function phases. */ | ||
1032 | void (*reset_hw_cmn)(struct bnx2x *bp); | ||
1033 | void (*reset_hw_port)(struct bnx2x *bp); | ||
1034 | void (*reset_hw_func)(struct bnx2x *bp); | ||
1035 | |||
1036 | /* Init/Free GUNZIP resources */ | ||
1037 | int (*gunzip_init)(struct bnx2x *bp); | ||
1038 | void (*gunzip_end)(struct bnx2x *bp); | ||
1039 | |||
1040 | /* Prepare/Release FW resources */ | ||
1041 | int (*init_fw)(struct bnx2x *bp); | ||
1042 | void (*release_fw)(struct bnx2x *bp); | ||
1043 | }; | ||
1044 | |||
1045 | struct bnx2x_func_sp_obj { | ||
1046 | enum bnx2x_func_state state, next_state; | ||
1047 | |||
1048 | /* BNX2X_FUNC_CMD_XX bits. This object implements "one | ||
1049 | * pending" paradigm but for debug and tracing purposes it's | ||
1050 | * more convinient to have different bits for different | ||
1051 | * commands. | ||
1052 | */ | ||
1053 | unsigned long pending; | ||
1054 | |||
1055 | /* Buffer to use as a ramrod data and its mapping */ | ||
1056 | void *rdata; | ||
1057 | dma_addr_t rdata_mapping; | ||
1058 | |||
1059 | /* this mutex validates that when pending flag is taken, the next | ||
1060 | * ramrod to be sent will be the one set the pending bit | ||
1061 | */ | ||
1062 | struct mutex one_pending_mutex; | ||
1063 | |||
1064 | /* Driver interface */ | ||
1065 | struct bnx2x_func_sp_drv_ops *drv; | ||
1066 | |||
1067 | /** | ||
1068 | * Performs one state change according to the given parameters. | ||
1069 | * | ||
1070 | * @return 0 in case of success and negative value otherwise. | ||
1071 | */ | ||
1072 | int (*send_cmd)(struct bnx2x *bp, | ||
1073 | struct bnx2x_func_state_params *params); | ||
1074 | |||
1075 | /** | ||
1076 | * Checks that the requested state transition is legal. | ||
1077 | */ | ||
1078 | int (*check_transition)(struct bnx2x *bp, | ||
1079 | struct bnx2x_func_sp_obj *o, | ||
1080 | struct bnx2x_func_state_params *params); | ||
1081 | |||
1082 | /** | ||
1083 | * Completes the pending command. | ||
1084 | */ | ||
1085 | int (*complete_cmd)(struct bnx2x *bp, | ||
1086 | struct bnx2x_func_sp_obj *o, | ||
1087 | enum bnx2x_func_cmd cmd); | ||
1088 | |||
1089 | int (*wait_comp)(struct bnx2x *bp, struct bnx2x_func_sp_obj *o, | ||
1090 | enum bnx2x_func_cmd cmd); | ||
1091 | }; | ||
1092 | |||
1093 | /********************** Interfaces ********************************************/ | ||
1094 | /* Queueable objects set */ | ||
1095 | union bnx2x_qable_obj { | ||
1096 | struct bnx2x_vlan_mac_obj vlan_mac; | ||
1097 | }; | ||
1098 | /************** Function state update *********/ | ||
1099 | void bnx2x_init_func_obj(struct bnx2x *bp, | ||
1100 | struct bnx2x_func_sp_obj *obj, | ||
1101 | void *rdata, dma_addr_t rdata_mapping, | ||
1102 | struct bnx2x_func_sp_drv_ops *drv_iface); | ||
1103 | |||
1104 | int bnx2x_func_state_change(struct bnx2x *bp, | ||
1105 | struct bnx2x_func_state_params *params); | ||
1106 | |||
1107 | /******************* Queue State **************/ | ||
1108 | void bnx2x_init_queue_obj(struct bnx2x *bp, | ||
1109 | struct bnx2x_queue_sp_obj *obj, u8 cl_id, u32 cid, | ||
1110 | u8 func_id, void *rdata, dma_addr_t rdata_mapping, | ||
1111 | unsigned long type); | ||
1112 | |||
1113 | int bnx2x_queue_state_change(struct bnx2x *bp, | ||
1114 | struct bnx2x_queue_state_params *params); | ||
1115 | |||
1116 | /********************* VLAN-MAC ****************/ | ||
1117 | void bnx2x_init_mac_obj(struct bnx2x *bp, | ||
1118 | struct bnx2x_vlan_mac_obj *mac_obj, | ||
1119 | u8 cl_id, u32 cid, u8 func_id, void *rdata, | ||
1120 | dma_addr_t rdata_mapping, int state, | ||
1121 | unsigned long *pstate, bnx2x_obj_type type, | ||
1122 | struct bnx2x_credit_pool_obj *macs_pool); | ||
1123 | |||
1124 | void bnx2x_init_vlan_obj(struct bnx2x *bp, | ||
1125 | struct bnx2x_vlan_mac_obj *vlan_obj, | ||
1126 | u8 cl_id, u32 cid, u8 func_id, void *rdata, | ||
1127 | dma_addr_t rdata_mapping, int state, | ||
1128 | unsigned long *pstate, bnx2x_obj_type type, | ||
1129 | struct bnx2x_credit_pool_obj *vlans_pool); | ||
1130 | |||
1131 | void bnx2x_init_vlan_mac_obj(struct bnx2x *bp, | ||
1132 | struct bnx2x_vlan_mac_obj *vlan_mac_obj, | ||
1133 | u8 cl_id, u32 cid, u8 func_id, void *rdata, | ||
1134 | dma_addr_t rdata_mapping, int state, | ||
1135 | unsigned long *pstate, bnx2x_obj_type type, | ||
1136 | struct bnx2x_credit_pool_obj *macs_pool, | ||
1137 | struct bnx2x_credit_pool_obj *vlans_pool); | ||
1138 | |||
1139 | int bnx2x_config_vlan_mac(struct bnx2x *bp, | ||
1140 | struct bnx2x_vlan_mac_ramrod_params *p); | ||
1141 | |||
1142 | int bnx2x_vlan_mac_move(struct bnx2x *bp, | ||
1143 | struct bnx2x_vlan_mac_ramrod_params *p, | ||
1144 | struct bnx2x_vlan_mac_obj *dest_o); | ||
1145 | |||
1146 | /********************* RX MODE ****************/ | ||
1147 | |||
1148 | void bnx2x_init_rx_mode_obj(struct bnx2x *bp, | ||
1149 | struct bnx2x_rx_mode_obj *o); | ||
1150 | |||
1151 | /** | ||
1152 | * Send and RX_MODE ramrod according to the provided parameters. | ||
1153 | * | ||
1154 | * @param bp | ||
1155 | * @param p Command parameters | ||
1156 | * | ||
1157 | * @return 0 - if operation was successfull and there is no pending completions, | ||
1158 | * positive number - if there are pending completions, | ||
1159 | * negative - if there were errors | ||
1160 | */ | ||
1161 | int bnx2x_config_rx_mode(struct bnx2x *bp, | ||
1162 | struct bnx2x_rx_mode_ramrod_params *p); | ||
1163 | |||
1164 | /****************** MULTICASTS ****************/ | ||
1165 | |||
1166 | void bnx2x_init_mcast_obj(struct bnx2x *bp, | ||
1167 | struct bnx2x_mcast_obj *mcast_obj, | ||
1168 | u8 mcast_cl_id, u32 mcast_cid, u8 func_id, | ||
1169 | u8 engine_id, void *rdata, dma_addr_t rdata_mapping, | ||
1170 | int state, unsigned long *pstate, | ||
1171 | bnx2x_obj_type type); | ||
1172 | |||
1173 | /** | ||
1174 | * Configure multicast MACs list. May configure a new list | ||
1175 | * provided in p->mcast_list (BNX2X_MCAST_CMD_ADD), clean up | ||
1176 | * (BNX2X_MCAST_CMD_DEL) or restore (BNX2X_MCAST_CMD_RESTORE) a current | ||
1177 | * configuration, continue to execute the pending commands | ||
1178 | * (BNX2X_MCAST_CMD_CONT). | ||
1179 | * | ||
1180 | * If previous command is still pending or if number of MACs to | ||
1181 | * configure is more that maximum number of MACs in one command, | ||
1182 | * the current command will be enqueued to the tail of the | ||
1183 | * pending commands list. | ||
1184 | * | ||
1185 | * @param bp | ||
1186 | * @param p | ||
1187 | * @param command to execute: BNX2X_MCAST_CMD_X | ||
1188 | * | ||
1189 | * @return 0 is operation was sucessfull and there are no pending completions, | ||
1190 | * negative if there were errors, positive if there are pending | ||
1191 | * completions. | ||
1192 | */ | ||
1193 | int bnx2x_config_mcast(struct bnx2x *bp, | ||
1194 | struct bnx2x_mcast_ramrod_params *p, int cmd); | ||
1195 | |||
1196 | /****************** CREDIT POOL ****************/ | ||
1197 | void bnx2x_init_mac_credit_pool(struct bnx2x *bp, | ||
1198 | struct bnx2x_credit_pool_obj *p, u8 func_id, | ||
1199 | u8 func_num); | ||
1200 | void bnx2x_init_vlan_credit_pool(struct bnx2x *bp, | ||
1201 | struct bnx2x_credit_pool_obj *p, u8 func_id, | ||
1202 | u8 func_num); | ||
1203 | |||
1204 | |||
1205 | /****************** RSS CONFIGURATION ****************/ | ||
1206 | void bnx2x_init_rss_config_obj(struct bnx2x *bp, | ||
1207 | struct bnx2x_rss_config_obj *rss_obj, | ||
1208 | u8 cl_id, u32 cid, u8 func_id, u8 engine_id, | ||
1209 | void *rdata, dma_addr_t rdata_mapping, | ||
1210 | int state, unsigned long *pstate, | ||
1211 | bnx2x_obj_type type); | ||
1212 | |||
1213 | /** | ||
1214 | * Updates RSS configuration according to provided parameters. | ||
1215 | * | ||
1216 | * @param bp | ||
1217 | * @param p | ||
1218 | * | ||
1219 | * @return 0 in case of success | ||
1220 | */ | ||
1221 | int bnx2x_config_rss(struct bnx2x *bp, | ||
1222 | struct bnx2x_config_rss_params *p); | ||
1223 | |||
1224 | /** | ||
1225 | * Return the current ind_table configuration. | ||
1226 | * | ||
1227 | * @param bp | ||
1228 | * @param ind_table buffer to fill with the current indirection | ||
1229 | * table content. Should be at least | ||
1230 | * T_ETH_INDIRECTION_TABLE_SIZE bytes long. | ||
1231 | */ | ||
1232 | void bnx2x_get_rss_ind_table(struct bnx2x_rss_config_obj *rss_obj, | ||
1233 | u8 *ind_table); | ||
1234 | |||
1235 | #endif /* BNX2X_SP_VERBS */ | ||
diff --git a/drivers/net/bnx2x/bnx2x_stats.c b/drivers/net/bnx2x/bnx2x_stats.c index e535bfa08945..54c07f557ad4 100644 --- a/drivers/net/bnx2x/bnx2x_stats.c +++ b/drivers/net/bnx2x/bnx2x_stats.c | |||
@@ -14,120 +14,11 @@ | |||
14 | * Statistics and Link management by Yitchak Gertner | 14 | * Statistics and Link management by Yitchak Gertner |
15 | * | 15 | * |
16 | */ | 16 | */ |
17 | #include "bnx2x_cmn.h" | ||
18 | #include "bnx2x_stats.h" | 17 | #include "bnx2x_stats.h" |
18 | #include "bnx2x_cmn.h" | ||
19 | 19 | ||
20 | /* Statistics */ | ||
21 | 20 | ||
22 | /**************************************************************************** | 21 | /* Statistics */ |
23 | * Macros | ||
24 | ****************************************************************************/ | ||
25 | |||
26 | /* sum[hi:lo] += add[hi:lo] */ | ||
27 | #define ADD_64(s_hi, a_hi, s_lo, a_lo) \ | ||
28 | do { \ | ||
29 | s_lo += a_lo; \ | ||
30 | s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \ | ||
31 | } while (0) | ||
32 | |||
33 | /* difference = minuend - subtrahend */ | ||
34 | #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \ | ||
35 | do { \ | ||
36 | if (m_lo < s_lo) { \ | ||
37 | /* underflow */ \ | ||
38 | d_hi = m_hi - s_hi; \ | ||
39 | if (d_hi > 0) { \ | ||
40 | /* we can 'loan' 1 */ \ | ||
41 | d_hi--; \ | ||
42 | d_lo = m_lo + (UINT_MAX - s_lo) + 1; \ | ||
43 | } else { \ | ||
44 | /* m_hi <= s_hi */ \ | ||
45 | d_hi = 0; \ | ||
46 | d_lo = 0; \ | ||
47 | } \ | ||
48 | } else { \ | ||
49 | /* m_lo >= s_lo */ \ | ||
50 | if (m_hi < s_hi) { \ | ||
51 | d_hi = 0; \ | ||
52 | d_lo = 0; \ | ||
53 | } else { \ | ||
54 | /* m_hi >= s_hi */ \ | ||
55 | d_hi = m_hi - s_hi; \ | ||
56 | d_lo = m_lo - s_lo; \ | ||
57 | } \ | ||
58 | } \ | ||
59 | } while (0) | ||
60 | |||
61 | #define UPDATE_STAT64(s, t) \ | ||
62 | do { \ | ||
63 | DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \ | ||
64 | diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \ | ||
65 | pstats->mac_stx[0].t##_hi = new->s##_hi; \ | ||
66 | pstats->mac_stx[0].t##_lo = new->s##_lo; \ | ||
67 | ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \ | ||
68 | pstats->mac_stx[1].t##_lo, diff.lo); \ | ||
69 | } while (0) | ||
70 | |||
71 | #define UPDATE_STAT64_NIG(s, t) \ | ||
72 | do { \ | ||
73 | DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \ | ||
74 | diff.lo, new->s##_lo, old->s##_lo); \ | ||
75 | ADD_64(estats->t##_hi, diff.hi, \ | ||
76 | estats->t##_lo, diff.lo); \ | ||
77 | } while (0) | ||
78 | |||
79 | /* sum[hi:lo] += add */ | ||
80 | #define ADD_EXTEND_64(s_hi, s_lo, a) \ | ||
81 | do { \ | ||
82 | s_lo += a; \ | ||
83 | s_hi += (s_lo < a) ? 1 : 0; \ | ||
84 | } while (0) | ||
85 | |||
86 | #define UPDATE_EXTEND_STAT(s) \ | ||
87 | do { \ | ||
88 | ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \ | ||
89 | pstats->mac_stx[1].s##_lo, \ | ||
90 | new->s); \ | ||
91 | } while (0) | ||
92 | |||
93 | #define UPDATE_EXTEND_TSTAT(s, t) \ | ||
94 | do { \ | ||
95 | diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \ | ||
96 | old_tclient->s = tclient->s; \ | ||
97 | ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \ | ||
98 | } while (0) | ||
99 | |||
100 | #define UPDATE_EXTEND_USTAT(s, t) \ | ||
101 | do { \ | ||
102 | diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \ | ||
103 | old_uclient->s = uclient->s; \ | ||
104 | ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \ | ||
105 | } while (0) | ||
106 | |||
107 | #define UPDATE_EXTEND_XSTAT(s, t) \ | ||
108 | do { \ | ||
109 | diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \ | ||
110 | old_xclient->s = xclient->s; \ | ||
111 | ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \ | ||
112 | } while (0) | ||
113 | |||
114 | /* minuend -= subtrahend */ | ||
115 | #define SUB_64(m_hi, s_hi, m_lo, s_lo) \ | ||
116 | do { \ | ||
117 | DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \ | ||
118 | } while (0) | ||
119 | |||
120 | /* minuend[hi:lo] -= subtrahend */ | ||
121 | #define SUB_EXTEND_64(m_hi, m_lo, s) \ | ||
122 | do { \ | ||
123 | SUB_64(m_hi, 0, m_lo, s); \ | ||
124 | } while (0) | ||
125 | |||
126 | #define SUB_EXTEND_USTAT(s, t) \ | ||
127 | do { \ | ||
128 | diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \ | ||
129 | SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \ | ||
130 | } while (0) | ||
131 | 22 | ||
132 | /* | 23 | /* |
133 | * General service functions | 24 | * General service functions |
@@ -149,12 +40,16 @@ static inline long bnx2x_hilo(u32 *hiref) | |||
149 | * Init service functions | 40 | * Init service functions |
150 | */ | 41 | */ |
151 | 42 | ||
152 | 43 | /* Post the next statistics ramrod. Protect it with the spin in | |
44 | * order to ensure the strict order between statistics ramrods | ||
45 | * (each ramrod has a sequence number passed in a | ||
46 | * bp->fw_stats_req->hdr.drv_stats_counter and ramrods must be | ||
47 | * sent in order). | ||
48 | */ | ||
153 | static void bnx2x_storm_stats_post(struct bnx2x *bp) | 49 | static void bnx2x_storm_stats_post(struct bnx2x *bp) |
154 | { | 50 | { |
155 | if (!bp->stats_pending) { | 51 | if (!bp->stats_pending) { |
156 | struct common_query_ramrod_data ramrod_data = {0}; | 52 | int rc; |
157 | int i, rc; | ||
158 | 53 | ||
159 | spin_lock_bh(&bp->stats_lock); | 54 | spin_lock_bh(&bp->stats_lock); |
160 | 55 | ||
@@ -163,14 +58,19 @@ static void bnx2x_storm_stats_post(struct bnx2x *bp) | |||
163 | return; | 58 | return; |
164 | } | 59 | } |
165 | 60 | ||
166 | ramrod_data.drv_counter = bp->stats_counter++; | 61 | bp->fw_stats_req->hdr.drv_stats_counter = |
167 | ramrod_data.collect_port = bp->port.pmf ? 1 : 0; | 62 | cpu_to_le16(bp->stats_counter++); |
168 | for_each_eth_queue(bp, i) | ||
169 | ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id); | ||
170 | 63 | ||
64 | DP(NETIF_MSG_TIMER, "Sending statistics ramrod %d\n", | ||
65 | bp->fw_stats_req->hdr.drv_stats_counter); | ||
66 | |||
67 | |||
68 | |||
69 | /* send FW stats ramrod */ | ||
171 | rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_STAT_QUERY, 0, | 70 | rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_STAT_QUERY, 0, |
172 | ((u32 *)&ramrod_data)[1], | 71 | U64_HI(bp->fw_stats_req_mapping), |
173 | ((u32 *)&ramrod_data)[0], 1); | 72 | U64_LO(bp->fw_stats_req_mapping), |
73 | NONE_CONNECTION_TYPE); | ||
174 | if (rc == 0) | 74 | if (rc == 0) |
175 | bp->stats_pending = 1; | 75 | bp->stats_pending = 1; |
176 | 76 | ||
@@ -230,7 +130,7 @@ static int bnx2x_stats_comp(struct bnx2x *bp) | |||
230 | break; | 130 | break; |
231 | } | 131 | } |
232 | cnt--; | 132 | cnt--; |
233 | msleep(1); | 133 | usleep_range(1000, 1000); |
234 | } | 134 | } |
235 | return 1; | 135 | return 1; |
236 | } | 136 | } |
@@ -338,69 +238,8 @@ static void bnx2x_port_stats_init(struct bnx2x *bp) | |||
338 | opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_GRC, DMAE_DST_PCI, | 238 | opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_GRC, DMAE_DST_PCI, |
339 | true, DMAE_COMP_GRC); | 239 | true, DMAE_COMP_GRC); |
340 | 240 | ||
341 | if (bp->link_vars.mac_type == MAC_TYPE_BMAC) { | 241 | /* EMAC is special */ |
342 | 242 | if (bp->link_vars.mac_type == MAC_TYPE_EMAC) { | |
343 | mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM : | ||
344 | NIG_REG_INGRESS_BMAC0_MEM); | ||
345 | |||
346 | /* BIGMAC_REGISTER_TX_STAT_GTPKT .. | ||
347 | BIGMAC_REGISTER_TX_STAT_GTBYT */ | ||
348 | dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); | ||
349 | dmae->opcode = opcode; | ||
350 | if (CHIP_IS_E1x(bp)) { | ||
351 | dmae->src_addr_lo = (mac_addr + | ||
352 | BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2; | ||
353 | dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT - | ||
354 | BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2; | ||
355 | } else { | ||
356 | dmae->src_addr_lo = (mac_addr + | ||
357 | BIGMAC2_REGISTER_TX_STAT_GTPOK) >> 2; | ||
358 | dmae->len = (8 + BIGMAC2_REGISTER_TX_STAT_GTBYT - | ||
359 | BIGMAC2_REGISTER_TX_STAT_GTPOK) >> 2; | ||
360 | } | ||
361 | |||
362 | dmae->src_addr_hi = 0; | ||
363 | dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats)); | ||
364 | dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats)); | ||
365 | dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; | ||
366 | dmae->comp_addr_hi = 0; | ||
367 | dmae->comp_val = 1; | ||
368 | |||
369 | /* BIGMAC_REGISTER_RX_STAT_GR64 .. | ||
370 | BIGMAC_REGISTER_RX_STAT_GRIPJ */ | ||
371 | dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); | ||
372 | dmae->opcode = opcode; | ||
373 | dmae->src_addr_hi = 0; | ||
374 | if (CHIP_IS_E1x(bp)) { | ||
375 | dmae->src_addr_lo = (mac_addr + | ||
376 | BIGMAC_REGISTER_RX_STAT_GR64) >> 2; | ||
377 | dmae->dst_addr_lo = | ||
378 | U64_LO(bnx2x_sp_mapping(bp, mac_stats) + | ||
379 | offsetof(struct bmac1_stats, rx_stat_gr64_lo)); | ||
380 | dmae->dst_addr_hi = | ||
381 | U64_HI(bnx2x_sp_mapping(bp, mac_stats) + | ||
382 | offsetof(struct bmac1_stats, rx_stat_gr64_lo)); | ||
383 | dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ - | ||
384 | BIGMAC_REGISTER_RX_STAT_GR64) >> 2; | ||
385 | } else { | ||
386 | dmae->src_addr_lo = | ||
387 | (mac_addr + BIGMAC2_REGISTER_RX_STAT_GR64) >> 2; | ||
388 | dmae->dst_addr_lo = | ||
389 | U64_LO(bnx2x_sp_mapping(bp, mac_stats) + | ||
390 | offsetof(struct bmac2_stats, rx_stat_gr64_lo)); | ||
391 | dmae->dst_addr_hi = | ||
392 | U64_HI(bnx2x_sp_mapping(bp, mac_stats) + | ||
393 | offsetof(struct bmac2_stats, rx_stat_gr64_lo)); | ||
394 | dmae->len = (8 + BIGMAC2_REGISTER_RX_STAT_GRIPJ - | ||
395 | BIGMAC2_REGISTER_RX_STAT_GR64) >> 2; | ||
396 | } | ||
397 | |||
398 | dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; | ||
399 | dmae->comp_addr_hi = 0; | ||
400 | dmae->comp_val = 1; | ||
401 | |||
402 | } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) { | ||
403 | |||
404 | mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0); | 243 | mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0); |
405 | 244 | ||
406 | /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/ | 245 | /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/ |
@@ -445,46 +284,122 @@ static void bnx2x_port_stats_init(struct bnx2x *bp) | |||
445 | dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; | 284 | dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; |
446 | dmae->comp_addr_hi = 0; | 285 | dmae->comp_addr_hi = 0; |
447 | dmae->comp_val = 1; | 286 | dmae->comp_val = 1; |
287 | } else { | ||
288 | u32 tx_src_addr_lo, rx_src_addr_lo; | ||
289 | u16 rx_len, tx_len; | ||
290 | |||
291 | /* configure the params according to MAC type */ | ||
292 | switch (bp->link_vars.mac_type) { | ||
293 | case MAC_TYPE_BMAC: | ||
294 | mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM : | ||
295 | NIG_REG_INGRESS_BMAC0_MEM); | ||
296 | |||
297 | /* BIGMAC_REGISTER_TX_STAT_GTPKT .. | ||
298 | BIGMAC_REGISTER_TX_STAT_GTBYT */ | ||
299 | if (CHIP_IS_E1x(bp)) { | ||
300 | tx_src_addr_lo = (mac_addr + | ||
301 | BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2; | ||
302 | tx_len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT - | ||
303 | BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2; | ||
304 | rx_src_addr_lo = (mac_addr + | ||
305 | BIGMAC_REGISTER_RX_STAT_GR64) >> 2; | ||
306 | rx_len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ - | ||
307 | BIGMAC_REGISTER_RX_STAT_GR64) >> 2; | ||
308 | } else { | ||
309 | tx_src_addr_lo = (mac_addr + | ||
310 | BIGMAC2_REGISTER_TX_STAT_GTPOK) >> 2; | ||
311 | tx_len = (8 + BIGMAC2_REGISTER_TX_STAT_GTBYT - | ||
312 | BIGMAC2_REGISTER_TX_STAT_GTPOK) >> 2; | ||
313 | rx_src_addr_lo = (mac_addr + | ||
314 | BIGMAC2_REGISTER_RX_STAT_GR64) >> 2; | ||
315 | rx_len = (8 + BIGMAC2_REGISTER_RX_STAT_GRIPJ - | ||
316 | BIGMAC2_REGISTER_RX_STAT_GR64) >> 2; | ||
317 | } | ||
318 | break; | ||
319 | |||
320 | case MAC_TYPE_UMAC: /* handled by MSTAT */ | ||
321 | case MAC_TYPE_XMAC: /* handled by MSTAT */ | ||
322 | default: | ||
323 | mac_addr = port ? GRCBASE_MSTAT1 : GRCBASE_MSTAT0; | ||
324 | tx_src_addr_lo = (mac_addr + | ||
325 | MSTAT_REG_TX_STAT_GTXPOK_LO) >> 2; | ||
326 | rx_src_addr_lo = (mac_addr + | ||
327 | MSTAT_REG_RX_STAT_GR64_LO) >> 2; | ||
328 | tx_len = sizeof(bp->slowpath-> | ||
329 | mac_stats.mstat_stats.stats_tx) >> 2; | ||
330 | rx_len = sizeof(bp->slowpath-> | ||
331 | mac_stats.mstat_stats.stats_rx) >> 2; | ||
332 | break; | ||
333 | } | ||
334 | |||
335 | /* TX stats */ | ||
336 | dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); | ||
337 | dmae->opcode = opcode; | ||
338 | dmae->src_addr_lo = tx_src_addr_lo; | ||
339 | dmae->src_addr_hi = 0; | ||
340 | dmae->len = tx_len; | ||
341 | dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats)); | ||
342 | dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats)); | ||
343 | dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; | ||
344 | dmae->comp_addr_hi = 0; | ||
345 | dmae->comp_val = 1; | ||
346 | |||
347 | /* RX stats */ | ||
348 | dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); | ||
349 | dmae->opcode = opcode; | ||
350 | dmae->src_addr_hi = 0; | ||
351 | dmae->src_addr_lo = rx_src_addr_lo; | ||
352 | dmae->dst_addr_lo = | ||
353 | U64_LO(bnx2x_sp_mapping(bp, mac_stats) + (tx_len << 2)); | ||
354 | dmae->dst_addr_hi = | ||
355 | U64_HI(bnx2x_sp_mapping(bp, mac_stats) + (tx_len << 2)); | ||
356 | dmae->len = rx_len; | ||
357 | dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; | ||
358 | dmae->comp_addr_hi = 0; | ||
359 | dmae->comp_val = 1; | ||
448 | } | 360 | } |
449 | 361 | ||
450 | /* NIG */ | 362 | /* NIG */ |
363 | if (!CHIP_IS_E3(bp)) { | ||
364 | dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); | ||
365 | dmae->opcode = opcode; | ||
366 | dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 : | ||
367 | NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2; | ||
368 | dmae->src_addr_hi = 0; | ||
369 | dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) + | ||
370 | offsetof(struct nig_stats, egress_mac_pkt0_lo)); | ||
371 | dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) + | ||
372 | offsetof(struct nig_stats, egress_mac_pkt0_lo)); | ||
373 | dmae->len = (2*sizeof(u32)) >> 2; | ||
374 | dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; | ||
375 | dmae->comp_addr_hi = 0; | ||
376 | dmae->comp_val = 1; | ||
377 | |||
378 | dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); | ||
379 | dmae->opcode = opcode; | ||
380 | dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 : | ||
381 | NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2; | ||
382 | dmae->src_addr_hi = 0; | ||
383 | dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) + | ||
384 | offsetof(struct nig_stats, egress_mac_pkt1_lo)); | ||
385 | dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) + | ||
386 | offsetof(struct nig_stats, egress_mac_pkt1_lo)); | ||
387 | dmae->len = (2*sizeof(u32)) >> 2; | ||
388 | dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; | ||
389 | dmae->comp_addr_hi = 0; | ||
390 | dmae->comp_val = 1; | ||
391 | } | ||
392 | |||
451 | dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); | 393 | dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); |
452 | dmae->opcode = opcode; | 394 | dmae->opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_GRC, DMAE_DST_PCI, |
395 | true, DMAE_COMP_PCI); | ||
453 | dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD : | 396 | dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD : |
454 | NIG_REG_STAT0_BRB_DISCARD) >> 2; | 397 | NIG_REG_STAT0_BRB_DISCARD) >> 2; |
455 | dmae->src_addr_hi = 0; | 398 | dmae->src_addr_hi = 0; |
456 | dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats)); | 399 | dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats)); |
457 | dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats)); | 400 | dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats)); |
458 | dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2; | 401 | dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2; |
459 | dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; | ||
460 | dmae->comp_addr_hi = 0; | ||
461 | dmae->comp_val = 1; | ||
462 | 402 | ||
463 | dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); | ||
464 | dmae->opcode = opcode; | ||
465 | dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 : | ||
466 | NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2; | ||
467 | dmae->src_addr_hi = 0; | ||
468 | dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) + | ||
469 | offsetof(struct nig_stats, egress_mac_pkt0_lo)); | ||
470 | dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) + | ||
471 | offsetof(struct nig_stats, egress_mac_pkt0_lo)); | ||
472 | dmae->len = (2*sizeof(u32)) >> 2; | ||
473 | dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; | ||
474 | dmae->comp_addr_hi = 0; | ||
475 | dmae->comp_val = 1; | ||
476 | |||
477 | dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); | ||
478 | dmae->opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_GRC, DMAE_DST_PCI, | ||
479 | true, DMAE_COMP_PCI); | ||
480 | dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 : | ||
481 | NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2; | ||
482 | dmae->src_addr_hi = 0; | ||
483 | dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) + | ||
484 | offsetof(struct nig_stats, egress_mac_pkt1_lo)); | ||
485 | dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) + | ||
486 | offsetof(struct nig_stats, egress_mac_pkt1_lo)); | ||
487 | dmae->len = (2*sizeof(u32)) >> 2; | ||
488 | dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp)); | 403 | dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp)); |
489 | dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp)); | 404 | dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp)); |
490 | dmae->comp_val = DMAE_COMP_VAL; | 405 | dmae->comp_val = DMAE_COMP_VAL; |
@@ -566,7 +481,8 @@ static void bnx2x_bmac_stats_update(struct bnx2x *bp) | |||
566 | UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers); | 481 | UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers); |
567 | UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived); | 482 | UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived); |
568 | UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered); | 483 | UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered); |
569 | UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf); | 484 | UPDATE_STAT64(rx_stat_grxpf, rx_stat_mac_xpf); |
485 | |||
570 | UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent); | 486 | UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent); |
571 | UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone); | 487 | UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone); |
572 | UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets); | 488 | UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets); |
@@ -580,13 +496,13 @@ static void bnx2x_bmac_stats_update(struct bnx2x *bp) | |||
580 | tx_stat_etherstatspkts512octetsto1023octets); | 496 | tx_stat_etherstatspkts512octetsto1023octets); |
581 | UPDATE_STAT64(tx_stat_gt1518, | 497 | UPDATE_STAT64(tx_stat_gt1518, |
582 | tx_stat_etherstatspkts1024octetsto1522octets); | 498 | tx_stat_etherstatspkts1024octetsto1522octets); |
583 | UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047); | 499 | UPDATE_STAT64(tx_stat_gt2047, tx_stat_mac_2047); |
584 | UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095); | 500 | UPDATE_STAT64(tx_stat_gt4095, tx_stat_mac_4095); |
585 | UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216); | 501 | UPDATE_STAT64(tx_stat_gt9216, tx_stat_mac_9216); |
586 | UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383); | 502 | UPDATE_STAT64(tx_stat_gt16383, tx_stat_mac_16383); |
587 | UPDATE_STAT64(tx_stat_gterr, | 503 | UPDATE_STAT64(tx_stat_gterr, |
588 | tx_stat_dot3statsinternalmactransmiterrors); | 504 | tx_stat_dot3statsinternalmactransmiterrors); |
589 | UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl); | 505 | UPDATE_STAT64(tx_stat_gtufl, tx_stat_mac_ufl); |
590 | 506 | ||
591 | } else { | 507 | } else { |
592 | struct bmac2_stats *new = bnx2x_sp(bp, mac_stats.bmac2_stats); | 508 | struct bmac2_stats *new = bnx2x_sp(bp, mac_stats.bmac2_stats); |
@@ -600,7 +516,7 @@ static void bnx2x_bmac_stats_update(struct bnx2x *bp) | |||
600 | UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers); | 516 | UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers); |
601 | UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived); | 517 | UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived); |
602 | UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered); | 518 | UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered); |
603 | UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf); | 519 | UPDATE_STAT64(rx_stat_grxpf, rx_stat_mac_xpf); |
604 | UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent); | 520 | UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent); |
605 | UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone); | 521 | UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone); |
606 | UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets); | 522 | UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets); |
@@ -614,19 +530,96 @@ static void bnx2x_bmac_stats_update(struct bnx2x *bp) | |||
614 | tx_stat_etherstatspkts512octetsto1023octets); | 530 | tx_stat_etherstatspkts512octetsto1023octets); |
615 | UPDATE_STAT64(tx_stat_gt1518, | 531 | UPDATE_STAT64(tx_stat_gt1518, |
616 | tx_stat_etherstatspkts1024octetsto1522octets); | 532 | tx_stat_etherstatspkts1024octetsto1522octets); |
617 | UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047); | 533 | UPDATE_STAT64(tx_stat_gt2047, tx_stat_mac_2047); |
618 | UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095); | 534 | UPDATE_STAT64(tx_stat_gt4095, tx_stat_mac_4095); |
619 | UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216); | 535 | UPDATE_STAT64(tx_stat_gt9216, tx_stat_mac_9216); |
620 | UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383); | 536 | UPDATE_STAT64(tx_stat_gt16383, tx_stat_mac_16383); |
621 | UPDATE_STAT64(tx_stat_gterr, | 537 | UPDATE_STAT64(tx_stat_gterr, |
622 | tx_stat_dot3statsinternalmactransmiterrors); | 538 | tx_stat_dot3statsinternalmactransmiterrors); |
623 | UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl); | 539 | UPDATE_STAT64(tx_stat_gtufl, tx_stat_mac_ufl); |
624 | } | 540 | } |
625 | 541 | ||
626 | estats->pause_frames_received_hi = | 542 | estats->pause_frames_received_hi = |
627 | pstats->mac_stx[1].rx_stat_bmac_xpf_hi; | 543 | pstats->mac_stx[1].rx_stat_mac_xpf_hi; |
544 | estats->pause_frames_received_lo = | ||
545 | pstats->mac_stx[1].rx_stat_mac_xpf_lo; | ||
546 | |||
547 | estats->pause_frames_sent_hi = | ||
548 | pstats->mac_stx[1].tx_stat_outxoffsent_hi; | ||
549 | estats->pause_frames_sent_lo = | ||
550 | pstats->mac_stx[1].tx_stat_outxoffsent_lo; | ||
551 | } | ||
552 | |||
553 | static void bnx2x_mstat_stats_update(struct bnx2x *bp) | ||
554 | { | ||
555 | struct host_port_stats *pstats = bnx2x_sp(bp, port_stats); | ||
556 | struct bnx2x_eth_stats *estats = &bp->eth_stats; | ||
557 | |||
558 | struct mstat_stats *new = bnx2x_sp(bp, mac_stats.mstat_stats); | ||
559 | |||
560 | ADD_STAT64(stats_rx.rx_grerb, rx_stat_ifhcinbadoctets); | ||
561 | ADD_STAT64(stats_rx.rx_grfcs, rx_stat_dot3statsfcserrors); | ||
562 | ADD_STAT64(stats_rx.rx_grund, rx_stat_etherstatsundersizepkts); | ||
563 | ADD_STAT64(stats_rx.rx_grovr, rx_stat_dot3statsframestoolong); | ||
564 | ADD_STAT64(stats_rx.rx_grfrg, rx_stat_etherstatsfragments); | ||
565 | ADD_STAT64(stats_rx.rx_grxcf, rx_stat_maccontrolframesreceived); | ||
566 | ADD_STAT64(stats_rx.rx_grxpf, rx_stat_xoffstateentered); | ||
567 | ADD_STAT64(stats_rx.rx_grxpf, rx_stat_mac_xpf); | ||
568 | ADD_STAT64(stats_tx.tx_gtxpf, tx_stat_outxoffsent); | ||
569 | ADD_STAT64(stats_tx.tx_gtxpf, tx_stat_flowcontroldone); | ||
570 | |||
571 | |||
572 | ADD_STAT64(stats_tx.tx_gt64, tx_stat_etherstatspkts64octets); | ||
573 | ADD_STAT64(stats_tx.tx_gt127, | ||
574 | tx_stat_etherstatspkts65octetsto127octets); | ||
575 | ADD_STAT64(stats_tx.tx_gt255, | ||
576 | tx_stat_etherstatspkts128octetsto255octets); | ||
577 | ADD_STAT64(stats_tx.tx_gt511, | ||
578 | tx_stat_etherstatspkts256octetsto511octets); | ||
579 | ADD_STAT64(stats_tx.tx_gt1023, | ||
580 | tx_stat_etherstatspkts512octetsto1023octets); | ||
581 | ADD_STAT64(stats_tx.tx_gt1518, | ||
582 | tx_stat_etherstatspkts1024octetsto1522octets); | ||
583 | ADD_STAT64(stats_tx.tx_gt2047, tx_stat_mac_2047); | ||
584 | |||
585 | ADD_STAT64(stats_tx.tx_gt4095, tx_stat_mac_4095); | ||
586 | ADD_STAT64(stats_tx.tx_gt9216, tx_stat_mac_9216); | ||
587 | ADD_STAT64(stats_tx.tx_gt16383, tx_stat_mac_16383); | ||
588 | |||
589 | ADD_STAT64(stats_tx.tx_gterr, | ||
590 | tx_stat_dot3statsinternalmactransmiterrors); | ||
591 | ADD_STAT64(stats_tx.tx_gtufl, tx_stat_mac_ufl); | ||
592 | |||
593 | ADD_64(estats->etherstatspkts1024octetsto1522octets_hi, | ||
594 | new->stats_tx.tx_gt1518_hi, | ||
595 | estats->etherstatspkts1024octetsto1522octets_lo, | ||
596 | new->stats_tx.tx_gt1518_lo); | ||
597 | |||
598 | ADD_64(estats->etherstatspktsover1522octets_hi, | ||
599 | new->stats_tx.tx_gt2047_hi, | ||
600 | estats->etherstatspktsover1522octets_lo, | ||
601 | new->stats_tx.tx_gt2047_lo); | ||
602 | |||
603 | ADD_64(estats->etherstatspktsover1522octets_hi, | ||
604 | new->stats_tx.tx_gt4095_hi, | ||
605 | estats->etherstatspktsover1522octets_lo, | ||
606 | new->stats_tx.tx_gt4095_lo); | ||
607 | |||
608 | ADD_64(estats->etherstatspktsover1522octets_hi, | ||
609 | new->stats_tx.tx_gt9216_hi, | ||
610 | estats->etherstatspktsover1522octets_lo, | ||
611 | new->stats_tx.tx_gt9216_lo); | ||
612 | |||
613 | |||
614 | ADD_64(estats->etherstatspktsover1522octets_hi, | ||
615 | new->stats_tx.tx_gt16383_hi, | ||
616 | estats->etherstatspktsover1522octets_lo, | ||
617 | new->stats_tx.tx_gt16383_lo); | ||
618 | |||
619 | estats->pause_frames_received_hi = | ||
620 | pstats->mac_stx[1].rx_stat_mac_xpf_hi; | ||
628 | estats->pause_frames_received_lo = | 621 | estats->pause_frames_received_lo = |
629 | pstats->mac_stx[1].rx_stat_bmac_xpf_lo; | 622 | pstats->mac_stx[1].rx_stat_mac_xpf_lo; |
630 | 623 | ||
631 | estats->pause_frames_sent_hi = | 624 | estats->pause_frames_sent_hi = |
632 | pstats->mac_stx[1].tx_stat_outxoffsent_hi; | 625 | pstats->mac_stx[1].tx_stat_outxoffsent_hi; |
@@ -702,15 +695,26 @@ static int bnx2x_hw_stats_update(struct bnx2x *bp) | |||
702 | u32 hi; | 695 | u32 hi; |
703 | } diff; | 696 | } diff; |
704 | 697 | ||
705 | if (bp->link_vars.mac_type == MAC_TYPE_BMAC) | 698 | switch (bp->link_vars.mac_type) { |
699 | case MAC_TYPE_BMAC: | ||
706 | bnx2x_bmac_stats_update(bp); | 700 | bnx2x_bmac_stats_update(bp); |
701 | break; | ||
707 | 702 | ||
708 | else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) | 703 | case MAC_TYPE_EMAC: |
709 | bnx2x_emac_stats_update(bp); | 704 | bnx2x_emac_stats_update(bp); |
705 | break; | ||
706 | |||
707 | case MAC_TYPE_UMAC: | ||
708 | case MAC_TYPE_XMAC: | ||
709 | bnx2x_mstat_stats_update(bp); | ||
710 | break; | ||
710 | 711 | ||
711 | else { /* unreached */ | 712 | case MAC_TYPE_NONE: /* unreached */ |
712 | BNX2X_ERR("stats updated by DMAE but no MAC active\n"); | 713 | BNX2X_ERR("stats updated by DMAE but no MAC active\n"); |
713 | return -1; | 714 | return -1; |
715 | |||
716 | default: /* unreached */ | ||
717 | BNX2X_ERR("Unknown MAC type\n"); | ||
714 | } | 718 | } |
715 | 719 | ||
716 | ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo, | 720 | ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo, |
@@ -718,9 +722,12 @@ static int bnx2x_hw_stats_update(struct bnx2x *bp) | |||
718 | ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo, | 722 | ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo, |
719 | new->brb_truncate - old->brb_truncate); | 723 | new->brb_truncate - old->brb_truncate); |
720 | 724 | ||
721 | UPDATE_STAT64_NIG(egress_mac_pkt0, | 725 | if (!CHIP_IS_E3(bp)) { |
726 | UPDATE_STAT64_NIG(egress_mac_pkt0, | ||
722 | etherstatspkts1024octetsto1522octets); | 727 | etherstatspkts1024octetsto1522octets); |
723 | UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets); | 728 | UPDATE_STAT64_NIG(egress_mac_pkt1, |
729 | etherstatspktsover1522octets); | ||
730 | } | ||
724 | 731 | ||
725 | memcpy(old, new, sizeof(struct nig_stats)); | 732 | memcpy(old, new, sizeof(struct nig_stats)); |
726 | 733 | ||
@@ -746,11 +753,13 @@ static int bnx2x_hw_stats_update(struct bnx2x *bp) | |||
746 | 753 | ||
747 | static int bnx2x_storm_stats_update(struct bnx2x *bp) | 754 | static int bnx2x_storm_stats_update(struct bnx2x *bp) |
748 | { | 755 | { |
749 | struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats); | ||
750 | struct tstorm_per_port_stats *tport = | 756 | struct tstorm_per_port_stats *tport = |
751 | &stats->tstorm_common.port_statistics; | 757 | &bp->fw_stats_data->port.tstorm_port_statistics; |
758 | struct tstorm_per_pf_stats *tfunc = | ||
759 | &bp->fw_stats_data->pf.tstorm_pf_statistics; | ||
752 | struct host_func_stats *fstats = bnx2x_sp(bp, func_stats); | 760 | struct host_func_stats *fstats = bnx2x_sp(bp, func_stats); |
753 | struct bnx2x_eth_stats *estats = &bp->eth_stats; | 761 | struct bnx2x_eth_stats *estats = &bp->eth_stats; |
762 | struct stats_counter *counters = &bp->fw_stats_data->storm_counters; | ||
754 | int i; | 763 | int i; |
755 | u16 cur_stats_counter; | 764 | u16 cur_stats_counter; |
756 | 765 | ||
@@ -761,6 +770,35 @@ static int bnx2x_storm_stats_update(struct bnx2x *bp) | |||
761 | cur_stats_counter = bp->stats_counter - 1; | 770 | cur_stats_counter = bp->stats_counter - 1; |
762 | spin_unlock_bh(&bp->stats_lock); | 771 | spin_unlock_bh(&bp->stats_lock); |
763 | 772 | ||
773 | /* are storm stats valid? */ | ||
774 | if (le16_to_cpu(counters->xstats_counter) != cur_stats_counter) { | ||
775 | DP(BNX2X_MSG_STATS, "stats not updated by xstorm" | ||
776 | " xstorm counter (0x%x) != stats_counter (0x%x)\n", | ||
777 | le16_to_cpu(counters->xstats_counter), bp->stats_counter); | ||
778 | return -EAGAIN; | ||
779 | } | ||
780 | |||
781 | if (le16_to_cpu(counters->ustats_counter) != cur_stats_counter) { | ||
782 | DP(BNX2X_MSG_STATS, "stats not updated by ustorm" | ||
783 | " ustorm counter (0x%x) != stats_counter (0x%x)\n", | ||
784 | le16_to_cpu(counters->ustats_counter), bp->stats_counter); | ||
785 | return -EAGAIN; | ||
786 | } | ||
787 | |||
788 | if (le16_to_cpu(counters->cstats_counter) != cur_stats_counter) { | ||
789 | DP(BNX2X_MSG_STATS, "stats not updated by cstorm" | ||
790 | " cstorm counter (0x%x) != stats_counter (0x%x)\n", | ||
791 | le16_to_cpu(counters->cstats_counter), bp->stats_counter); | ||
792 | return -EAGAIN; | ||
793 | } | ||
794 | |||
795 | if (le16_to_cpu(counters->tstats_counter) != cur_stats_counter) { | ||
796 | DP(BNX2X_MSG_STATS, "stats not updated by tstorm" | ||
797 | " tstorm counter (0x%x) != stats_counter (0x%x)\n", | ||
798 | le16_to_cpu(counters->tstats_counter), bp->stats_counter); | ||
799 | return -EAGAIN; | ||
800 | } | ||
801 | |||
764 | memcpy(&(fstats->total_bytes_received_hi), | 802 | memcpy(&(fstats->total_bytes_received_hi), |
765 | &(bnx2x_sp(bp, func_stats_base)->total_bytes_received_hi), | 803 | &(bnx2x_sp(bp, func_stats_base)->total_bytes_received_hi), |
766 | sizeof(struct host_func_stats) - 2*sizeof(u32)); | 804 | sizeof(struct host_func_stats) - 2*sizeof(u32)); |
@@ -770,94 +808,84 @@ static int bnx2x_storm_stats_update(struct bnx2x *bp) | |||
770 | estats->etherstatsoverrsizepkts_lo = 0; | 808 | estats->etherstatsoverrsizepkts_lo = 0; |
771 | estats->no_buff_discard_hi = 0; | 809 | estats->no_buff_discard_hi = 0; |
772 | estats->no_buff_discard_lo = 0; | 810 | estats->no_buff_discard_lo = 0; |
811 | estats->total_tpa_aggregations_hi = 0; | ||
812 | estats->total_tpa_aggregations_lo = 0; | ||
813 | estats->total_tpa_aggregated_frames_hi = 0; | ||
814 | estats->total_tpa_aggregated_frames_lo = 0; | ||
815 | estats->total_tpa_bytes_hi = 0; | ||
816 | estats->total_tpa_bytes_lo = 0; | ||
773 | 817 | ||
774 | for_each_eth_queue(bp, i) { | 818 | for_each_eth_queue(bp, i) { |
775 | struct bnx2x_fastpath *fp = &bp->fp[i]; | 819 | struct bnx2x_fastpath *fp = &bp->fp[i]; |
776 | int cl_id = fp->cl_id; | 820 | struct tstorm_per_queue_stats *tclient = |
777 | struct tstorm_per_client_stats *tclient = | 821 | &bp->fw_stats_data->queue_stats[i]. |
778 | &stats->tstorm_common.client_statistics[cl_id]; | 822 | tstorm_queue_statistics; |
779 | struct tstorm_per_client_stats *old_tclient = &fp->old_tclient; | 823 | struct tstorm_per_queue_stats *old_tclient = &fp->old_tclient; |
780 | struct ustorm_per_client_stats *uclient = | 824 | struct ustorm_per_queue_stats *uclient = |
781 | &stats->ustorm_common.client_statistics[cl_id]; | 825 | &bp->fw_stats_data->queue_stats[i]. |
782 | struct ustorm_per_client_stats *old_uclient = &fp->old_uclient; | 826 | ustorm_queue_statistics; |
783 | struct xstorm_per_client_stats *xclient = | 827 | struct ustorm_per_queue_stats *old_uclient = &fp->old_uclient; |
784 | &stats->xstorm_common.client_statistics[cl_id]; | 828 | struct xstorm_per_queue_stats *xclient = |
785 | struct xstorm_per_client_stats *old_xclient = &fp->old_xclient; | 829 | &bp->fw_stats_data->queue_stats[i]. |
830 | xstorm_queue_statistics; | ||
831 | struct xstorm_per_queue_stats *old_xclient = &fp->old_xclient; | ||
786 | struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats; | 832 | struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats; |
787 | u32 diff; | 833 | u32 diff; |
788 | 834 | ||
789 | /* are storm stats valid? */ | 835 | DP(BNX2X_MSG_STATS, "queue[%d]: ucast_sent 0x%x, " |
790 | if (le16_to_cpu(xclient->stats_counter) != cur_stats_counter) { | 836 | "bcast_sent 0x%x mcast_sent 0x%x\n", |
791 | DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm" | 837 | i, xclient->ucast_pkts_sent, |
792 | " xstorm counter (0x%x) != stats_counter (0x%x)\n", | 838 | xclient->bcast_pkts_sent, xclient->mcast_pkts_sent); |
793 | i, xclient->stats_counter, cur_stats_counter + 1); | 839 | |
794 | return -1; | 840 | DP(BNX2X_MSG_STATS, "---------------\n"); |
795 | } | ||
796 | if (le16_to_cpu(tclient->stats_counter) != cur_stats_counter) { | ||
797 | DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm" | ||
798 | " tstorm counter (0x%x) != stats_counter (0x%x)\n", | ||
799 | i, tclient->stats_counter, cur_stats_counter + 1); | ||
800 | return -2; | ||
801 | } | ||
802 | if (le16_to_cpu(uclient->stats_counter) != cur_stats_counter) { | ||
803 | DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm" | ||
804 | " ustorm counter (0x%x) != stats_counter (0x%x)\n", | ||
805 | i, uclient->stats_counter, cur_stats_counter + 1); | ||
806 | return -4; | ||
807 | } | ||
808 | 841 | ||
842 | qstats->total_broadcast_bytes_received_hi = | ||
843 | le32_to_cpu(tclient->rcv_bcast_bytes.hi); | ||
844 | qstats->total_broadcast_bytes_received_lo = | ||
845 | le32_to_cpu(tclient->rcv_bcast_bytes.lo); | ||
846 | |||
847 | qstats->total_multicast_bytes_received_hi = | ||
848 | le32_to_cpu(tclient->rcv_mcast_bytes.hi); | ||
849 | qstats->total_multicast_bytes_received_lo = | ||
850 | le32_to_cpu(tclient->rcv_mcast_bytes.lo); | ||
851 | |||
852 | qstats->total_unicast_bytes_received_hi = | ||
853 | le32_to_cpu(tclient->rcv_ucast_bytes.hi); | ||
854 | qstats->total_unicast_bytes_received_lo = | ||
855 | le32_to_cpu(tclient->rcv_ucast_bytes.lo); | ||
856 | |||
857 | /* | ||
858 | * sum to total_bytes_received all | ||
859 | * unicast/multicast/broadcast | ||
860 | */ | ||
809 | qstats->total_bytes_received_hi = | 861 | qstats->total_bytes_received_hi = |
810 | le32_to_cpu(tclient->rcv_broadcast_bytes.hi); | 862 | qstats->total_broadcast_bytes_received_hi; |
811 | qstats->total_bytes_received_lo = | 863 | qstats->total_bytes_received_lo = |
812 | le32_to_cpu(tclient->rcv_broadcast_bytes.lo); | 864 | qstats->total_broadcast_bytes_received_lo; |
813 | 865 | ||
814 | ADD_64(qstats->total_bytes_received_hi, | 866 | ADD_64(qstats->total_bytes_received_hi, |
815 | le32_to_cpu(tclient->rcv_multicast_bytes.hi), | 867 | qstats->total_multicast_bytes_received_hi, |
816 | qstats->total_bytes_received_lo, | 868 | qstats->total_bytes_received_lo, |
817 | le32_to_cpu(tclient->rcv_multicast_bytes.lo)); | 869 | qstats->total_multicast_bytes_received_lo); |
818 | 870 | ||
819 | ADD_64(qstats->total_bytes_received_hi, | 871 | ADD_64(qstats->total_bytes_received_hi, |
820 | le32_to_cpu(tclient->rcv_unicast_bytes.hi), | 872 | qstats->total_unicast_bytes_received_hi, |
821 | qstats->total_bytes_received_lo, | ||
822 | le32_to_cpu(tclient->rcv_unicast_bytes.lo)); | ||
823 | |||
824 | SUB_64(qstats->total_bytes_received_hi, | ||
825 | le32_to_cpu(uclient->bcast_no_buff_bytes.hi), | ||
826 | qstats->total_bytes_received_lo, | 873 | qstats->total_bytes_received_lo, |
827 | le32_to_cpu(uclient->bcast_no_buff_bytes.lo)); | 874 | qstats->total_unicast_bytes_received_lo); |
828 | |||
829 | SUB_64(qstats->total_bytes_received_hi, | ||
830 | le32_to_cpu(uclient->mcast_no_buff_bytes.hi), | ||
831 | qstats->total_bytes_received_lo, | ||
832 | le32_to_cpu(uclient->mcast_no_buff_bytes.lo)); | ||
833 | |||
834 | SUB_64(qstats->total_bytes_received_hi, | ||
835 | le32_to_cpu(uclient->ucast_no_buff_bytes.hi), | ||
836 | qstats->total_bytes_received_lo, | ||
837 | le32_to_cpu(uclient->ucast_no_buff_bytes.lo)); | ||
838 | 875 | ||
839 | qstats->valid_bytes_received_hi = | 876 | qstats->valid_bytes_received_hi = |
840 | qstats->total_bytes_received_hi; | 877 | qstats->total_bytes_received_hi; |
841 | qstats->valid_bytes_received_lo = | 878 | qstats->valid_bytes_received_lo = |
842 | qstats->total_bytes_received_lo; | 879 | qstats->total_bytes_received_lo; |
843 | 880 | ||
844 | qstats->error_bytes_received_hi = | ||
845 | le32_to_cpu(tclient->rcv_error_bytes.hi); | ||
846 | qstats->error_bytes_received_lo = | ||
847 | le32_to_cpu(tclient->rcv_error_bytes.lo); | ||
848 | |||
849 | ADD_64(qstats->total_bytes_received_hi, | ||
850 | qstats->error_bytes_received_hi, | ||
851 | qstats->total_bytes_received_lo, | ||
852 | qstats->error_bytes_received_lo); | ||
853 | 881 | ||
854 | UPDATE_EXTEND_TSTAT(rcv_unicast_pkts, | 882 | UPDATE_EXTEND_TSTAT(rcv_ucast_pkts, |
855 | total_unicast_packets_received); | 883 | total_unicast_packets_received); |
856 | UPDATE_EXTEND_TSTAT(rcv_multicast_pkts, | 884 | UPDATE_EXTEND_TSTAT(rcv_mcast_pkts, |
857 | total_multicast_packets_received); | 885 | total_multicast_packets_received); |
858 | UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts, | 886 | UPDATE_EXTEND_TSTAT(rcv_bcast_pkts, |
859 | total_broadcast_packets_received); | 887 | total_broadcast_packets_received); |
860 | UPDATE_EXTEND_TSTAT(packets_too_big_discard, | 888 | UPDATE_EXTEND_TSTAT(pkts_too_big_discard, |
861 | etherstatsoverrsizepkts); | 889 | etherstatsoverrsizepkts); |
862 | UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard); | 890 | UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard); |
863 | 891 | ||
@@ -871,30 +899,78 @@ static int bnx2x_storm_stats_update(struct bnx2x *bp) | |||
871 | UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard); | 899 | UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard); |
872 | UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard); | 900 | UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard); |
873 | 901 | ||
902 | qstats->total_broadcast_bytes_transmitted_hi = | ||
903 | le32_to_cpu(xclient->bcast_bytes_sent.hi); | ||
904 | qstats->total_broadcast_bytes_transmitted_lo = | ||
905 | le32_to_cpu(xclient->bcast_bytes_sent.lo); | ||
906 | |||
907 | qstats->total_multicast_bytes_transmitted_hi = | ||
908 | le32_to_cpu(xclient->mcast_bytes_sent.hi); | ||
909 | qstats->total_multicast_bytes_transmitted_lo = | ||
910 | le32_to_cpu(xclient->mcast_bytes_sent.lo); | ||
911 | |||
912 | qstats->total_unicast_bytes_transmitted_hi = | ||
913 | le32_to_cpu(xclient->ucast_bytes_sent.hi); | ||
914 | qstats->total_unicast_bytes_transmitted_lo = | ||
915 | le32_to_cpu(xclient->ucast_bytes_sent.lo); | ||
916 | /* | ||
917 | * sum to total_bytes_transmitted all | ||
918 | * unicast/multicast/broadcast | ||
919 | */ | ||
874 | qstats->total_bytes_transmitted_hi = | 920 | qstats->total_bytes_transmitted_hi = |
875 | le32_to_cpu(xclient->unicast_bytes_sent.hi); | 921 | qstats->total_unicast_bytes_transmitted_hi; |
876 | qstats->total_bytes_transmitted_lo = | 922 | qstats->total_bytes_transmitted_lo = |
877 | le32_to_cpu(xclient->unicast_bytes_sent.lo); | 923 | qstats->total_unicast_bytes_transmitted_lo; |
878 | 924 | ||
879 | ADD_64(qstats->total_bytes_transmitted_hi, | 925 | ADD_64(qstats->total_bytes_transmitted_hi, |
880 | le32_to_cpu(xclient->multicast_bytes_sent.hi), | 926 | qstats->total_broadcast_bytes_transmitted_hi, |
881 | qstats->total_bytes_transmitted_lo, | 927 | qstats->total_bytes_transmitted_lo, |
882 | le32_to_cpu(xclient->multicast_bytes_sent.lo)); | 928 | qstats->total_broadcast_bytes_transmitted_lo); |
883 | 929 | ||
884 | ADD_64(qstats->total_bytes_transmitted_hi, | 930 | ADD_64(qstats->total_bytes_transmitted_hi, |
885 | le32_to_cpu(xclient->broadcast_bytes_sent.hi), | 931 | qstats->total_multicast_bytes_transmitted_hi, |
886 | qstats->total_bytes_transmitted_lo, | 932 | qstats->total_bytes_transmitted_lo, |
887 | le32_to_cpu(xclient->broadcast_bytes_sent.lo)); | 933 | qstats->total_multicast_bytes_transmitted_lo); |
888 | 934 | ||
889 | UPDATE_EXTEND_XSTAT(unicast_pkts_sent, | 935 | UPDATE_EXTEND_XSTAT(ucast_pkts_sent, |
890 | total_unicast_packets_transmitted); | 936 | total_unicast_packets_transmitted); |
891 | UPDATE_EXTEND_XSTAT(multicast_pkts_sent, | 937 | UPDATE_EXTEND_XSTAT(mcast_pkts_sent, |
892 | total_multicast_packets_transmitted); | 938 | total_multicast_packets_transmitted); |
893 | UPDATE_EXTEND_XSTAT(broadcast_pkts_sent, | 939 | UPDATE_EXTEND_XSTAT(bcast_pkts_sent, |
894 | total_broadcast_packets_transmitted); | 940 | total_broadcast_packets_transmitted); |
895 | 941 | ||
896 | old_tclient->checksum_discard = tclient->checksum_discard; | 942 | UPDATE_EXTEND_TSTAT(checksum_discard, |
897 | old_tclient->ttl0_discard = tclient->ttl0_discard; | 943 | total_packets_received_checksum_discarded); |
944 | UPDATE_EXTEND_TSTAT(ttl0_discard, | ||
945 | total_packets_received_ttl0_discarded); | ||
946 | |||
947 | UPDATE_EXTEND_XSTAT(error_drop_pkts, | ||
948 | total_transmitted_dropped_packets_error); | ||
949 | |||
950 | /* TPA aggregations completed */ | ||
951 | UPDATE_EXTEND_USTAT(coalesced_events, total_tpa_aggregations); | ||
952 | /* Number of network frames aggregated by TPA */ | ||
953 | UPDATE_EXTEND_USTAT(coalesced_pkts, | ||
954 | total_tpa_aggregated_frames); | ||
955 | /* Total number of bytes in completed TPA aggregations */ | ||
956 | qstats->total_tpa_bytes_lo = | ||
957 | le32_to_cpu(uclient->coalesced_bytes.lo); | ||
958 | qstats->total_tpa_bytes_hi = | ||
959 | le32_to_cpu(uclient->coalesced_bytes.hi); | ||
960 | |||
961 | /* TPA stats per-function */ | ||
962 | ADD_64(estats->total_tpa_aggregations_hi, | ||
963 | qstats->total_tpa_aggregations_hi, | ||
964 | estats->total_tpa_aggregations_lo, | ||
965 | qstats->total_tpa_aggregations_lo); | ||
966 | ADD_64(estats->total_tpa_aggregated_frames_hi, | ||
967 | qstats->total_tpa_aggregated_frames_hi, | ||
968 | estats->total_tpa_aggregated_frames_lo, | ||
969 | qstats->total_tpa_aggregated_frames_lo); | ||
970 | ADD_64(estats->total_tpa_bytes_hi, | ||
971 | qstats->total_tpa_bytes_hi, | ||
972 | estats->total_tpa_bytes_lo, | ||
973 | qstats->total_tpa_bytes_lo); | ||
898 | 974 | ||
899 | ADD_64(fstats->total_bytes_received_hi, | 975 | ADD_64(fstats->total_bytes_received_hi, |
900 | qstats->total_bytes_received_hi, | 976 | qstats->total_bytes_received_hi, |
@@ -933,10 +1009,6 @@ static int bnx2x_storm_stats_update(struct bnx2x *bp) | |||
933 | fstats->valid_bytes_received_lo, | 1009 | fstats->valid_bytes_received_lo, |
934 | qstats->valid_bytes_received_lo); | 1010 | qstats->valid_bytes_received_lo); |
935 | 1011 | ||
936 | ADD_64(estats->error_bytes_received_hi, | ||
937 | qstats->error_bytes_received_hi, | ||
938 | estats->error_bytes_received_lo, | ||
939 | qstats->error_bytes_received_lo); | ||
940 | ADD_64(estats->etherstatsoverrsizepkts_hi, | 1012 | ADD_64(estats->etherstatsoverrsizepkts_hi, |
941 | qstats->etherstatsoverrsizepkts_hi, | 1013 | qstats->etherstatsoverrsizepkts_hi, |
942 | estats->etherstatsoverrsizepkts_lo, | 1014 | estats->etherstatsoverrsizepkts_lo, |
@@ -950,9 +1022,19 @@ static int bnx2x_storm_stats_update(struct bnx2x *bp) | |||
950 | fstats->total_bytes_received_lo, | 1022 | fstats->total_bytes_received_lo, |
951 | estats->rx_stat_ifhcinbadoctets_lo); | 1023 | estats->rx_stat_ifhcinbadoctets_lo); |
952 | 1024 | ||
1025 | ADD_64(fstats->total_bytes_received_hi, | ||
1026 | tfunc->rcv_error_bytes.hi, | ||
1027 | fstats->total_bytes_received_lo, | ||
1028 | tfunc->rcv_error_bytes.lo); | ||
1029 | |||
953 | memcpy(estats, &(fstats->total_bytes_received_hi), | 1030 | memcpy(estats, &(fstats->total_bytes_received_hi), |
954 | sizeof(struct host_func_stats) - 2*sizeof(u32)); | 1031 | sizeof(struct host_func_stats) - 2*sizeof(u32)); |
955 | 1032 | ||
1033 | ADD_64(estats->error_bytes_received_hi, | ||
1034 | tfunc->rcv_error_bytes.hi, | ||
1035 | estats->error_bytes_received_lo, | ||
1036 | tfunc->rcv_error_bytes.lo); | ||
1037 | |||
956 | ADD_64(estats->etherstatsoverrsizepkts_hi, | 1038 | ADD_64(estats->etherstatsoverrsizepkts_hi, |
957 | estats->rx_stat_dot3statsframestoolong_hi, | 1039 | estats->rx_stat_dot3statsframestoolong_hi, |
958 | estats->etherstatsoverrsizepkts_lo, | 1040 | estats->etherstatsoverrsizepkts_lo, |
@@ -965,8 +1047,8 @@ static int bnx2x_storm_stats_update(struct bnx2x *bp) | |||
965 | if (bp->port.pmf) { | 1047 | if (bp->port.pmf) { |
966 | estats->mac_filter_discard = | 1048 | estats->mac_filter_discard = |
967 | le32_to_cpu(tport->mac_filter_discard); | 1049 | le32_to_cpu(tport->mac_filter_discard); |
968 | estats->xxoverflow_discard = | 1050 | estats->mf_tag_discard = |
969 | le32_to_cpu(tport->xxoverflow_discard); | 1051 | le32_to_cpu(tport->mf_tag_discard); |
970 | estats->brb_truncate_discard = | 1052 | estats->brb_truncate_discard = |
971 | le32_to_cpu(tport->brb_truncate_discard); | 1053 | le32_to_cpu(tport->brb_truncate_discard); |
972 | estats->mac_discard = le32_to_cpu(tport->mac_discard); | 1054 | estats->mac_discard = le32_to_cpu(tport->mac_discard); |
@@ -1023,7 +1105,7 @@ static void bnx2x_net_stats_update(struct bnx2x *bp) | |||
1023 | nstats->rx_frame_errors = | 1105 | nstats->rx_frame_errors = |
1024 | bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi); | 1106 | bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi); |
1025 | nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi); | 1107 | nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi); |
1026 | nstats->rx_missed_errors = estats->xxoverflow_discard; | 1108 | nstats->rx_missed_errors = 0; |
1027 | 1109 | ||
1028 | nstats->rx_errors = nstats->rx_length_errors + | 1110 | nstats->rx_errors = nstats->rx_length_errors + |
1029 | nstats->rx_over_errors + | 1111 | nstats->rx_over_errors + |
@@ -1065,10 +1147,27 @@ static void bnx2x_drv_stats_update(struct bnx2x *bp) | |||
1065 | } | 1147 | } |
1066 | } | 1148 | } |
1067 | 1149 | ||
1150 | static bool bnx2x_edebug_stats_stopped(struct bnx2x *bp) | ||
1151 | { | ||
1152 | u32 val; | ||
1153 | |||
1154 | if (SHMEM2_HAS(bp, edebug_driver_if[1])) { | ||
1155 | val = SHMEM2_RD(bp, edebug_driver_if[1]); | ||
1156 | |||
1157 | if (val == EDEBUG_DRIVER_IF_OP_CODE_DISABLE_STAT) | ||
1158 | return true; | ||
1159 | } | ||
1160 | |||
1161 | return false; | ||
1162 | } | ||
1163 | |||
1068 | static void bnx2x_stats_update(struct bnx2x *bp) | 1164 | static void bnx2x_stats_update(struct bnx2x *bp) |
1069 | { | 1165 | { |
1070 | u32 *stats_comp = bnx2x_sp(bp, stats_comp); | 1166 | u32 *stats_comp = bnx2x_sp(bp, stats_comp); |
1071 | 1167 | ||
1168 | if (bnx2x_edebug_stats_stopped(bp)) | ||
1169 | return; | ||
1170 | |||
1072 | if (*stats_comp != DMAE_COMP_VAL) | 1171 | if (*stats_comp != DMAE_COMP_VAL) |
1073 | return; | 1172 | return; |
1074 | 1173 | ||
@@ -1088,8 +1187,7 @@ static void bnx2x_stats_update(struct bnx2x *bp) | |||
1088 | struct bnx2x_eth_stats *estats = &bp->eth_stats; | 1187 | struct bnx2x_eth_stats *estats = &bp->eth_stats; |
1089 | int i; | 1188 | int i; |
1090 | 1189 | ||
1091 | printk(KERN_DEBUG "%s: brb drops %u brb truncate %u\n", | 1190 | netdev_dbg(bp->dev, "brb drops %u brb truncate %u\n", |
1092 | bp->dev->name, | ||
1093 | estats->brb_drop_lo, estats->brb_truncate_lo); | 1191 | estats->brb_drop_lo, estats->brb_truncate_lo); |
1094 | 1192 | ||
1095 | for_each_eth_queue(bp, i) { | 1193 | for_each_eth_queue(bp, i) { |
@@ -1149,6 +1247,7 @@ static void bnx2x_port_stats_stop(struct bnx2x *bp) | |||
1149 | else | 1247 | else |
1150 | dmae->opcode = bnx2x_dmae_opcode_add_comp( | 1248 | dmae->opcode = bnx2x_dmae_opcode_add_comp( |
1151 | opcode, DMAE_COMP_PCI); | 1249 | opcode, DMAE_COMP_PCI); |
1250 | |||
1152 | dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats)); | 1251 | dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats)); |
1153 | dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats)); | 1252 | dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats)); |
1154 | dmae->dst_addr_lo = bp->port.port_stx >> 2; | 1253 | dmae->dst_addr_lo = bp->port.port_stx >> 2; |
@@ -1235,13 +1334,9 @@ static const struct { | |||
1235 | void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event) | 1334 | void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event) |
1236 | { | 1335 | { |
1237 | enum bnx2x_stats_state state; | 1336 | enum bnx2x_stats_state state; |
1238 | |||
1239 | if (unlikely(bp->panic)) | 1337 | if (unlikely(bp->panic)) |
1240 | return; | 1338 | return; |
1241 | |||
1242 | bnx2x_stats_stm[bp->stats_state][event].action(bp); | 1339 | bnx2x_stats_stm[bp->stats_state][event].action(bp); |
1243 | |||
1244 | /* Protect a state change flow */ | ||
1245 | spin_lock_bh(&bp->stats_lock); | 1340 | spin_lock_bh(&bp->stats_lock); |
1246 | state = bp->stats_state; | 1341 | state = bp->stats_state; |
1247 | bp->stats_state = bnx2x_stats_stm[state][event].next_state; | 1342 | bp->stats_state = bnx2x_stats_stm[state][event].next_state; |
@@ -1297,7 +1392,7 @@ static void bnx2x_func_stats_base_init(struct bnx2x *bp) | |||
1297 | func_stx = bp->func_stx; | 1392 | func_stx = bp->func_stx; |
1298 | 1393 | ||
1299 | for (vn = VN_0; vn < vn_max; vn++) { | 1394 | for (vn = VN_0; vn < vn_max; vn++) { |
1300 | int mb_idx = !CHIP_IS_E2(bp) ? 2*vn + BP_PORT(bp) : vn; | 1395 | int mb_idx = CHIP_IS_E1x(bp) ? 2*vn + BP_PORT(bp) : vn; |
1301 | 1396 | ||
1302 | bp->func_stx = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_param); | 1397 | bp->func_stx = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_param); |
1303 | bnx2x_func_stats_init(bp); | 1398 | bnx2x_func_stats_init(bp); |
@@ -1339,12 +1434,97 @@ static void bnx2x_func_stats_base_update(struct bnx2x *bp) | |||
1339 | bnx2x_stats_comp(bp); | 1434 | bnx2x_stats_comp(bp); |
1340 | } | 1435 | } |
1341 | 1436 | ||
1437 | /** | ||
1438 | * This function will prepare the statistics ramrod data the way | ||
1439 | * we will only have to increment the statistics counter and | ||
1440 | * send the ramrod each time we have to. | ||
1441 | * | ||
1442 | * @param bp | ||
1443 | */ | ||
1444 | static inline void bnx2x_prep_fw_stats_req(struct bnx2x *bp) | ||
1445 | { | ||
1446 | int i; | ||
1447 | struct stats_query_header *stats_hdr = &bp->fw_stats_req->hdr; | ||
1448 | |||
1449 | dma_addr_t cur_data_offset; | ||
1450 | struct stats_query_entry *cur_query_entry; | ||
1451 | |||
1452 | stats_hdr->cmd_num = bp->fw_stats_num; | ||
1453 | stats_hdr->drv_stats_counter = 0; | ||
1454 | |||
1455 | /* storm_counters struct contains the counters of completed | ||
1456 | * statistics requests per storm which are incremented by FW | ||
1457 | * each time it completes hadning a statistics ramrod. We will | ||
1458 | * check these counters in the timer handler and discard a | ||
1459 | * (statistics) ramrod completion. | ||
1460 | */ | ||
1461 | cur_data_offset = bp->fw_stats_data_mapping + | ||
1462 | offsetof(struct bnx2x_fw_stats_data, storm_counters); | ||
1463 | |||
1464 | stats_hdr->stats_counters_addrs.hi = | ||
1465 | cpu_to_le32(U64_HI(cur_data_offset)); | ||
1466 | stats_hdr->stats_counters_addrs.lo = | ||
1467 | cpu_to_le32(U64_LO(cur_data_offset)); | ||
1468 | |||
1469 | /* prepare to the first stats ramrod (will be completed with | ||
1470 | * the counters equal to zero) - init counters to somethig different. | ||
1471 | */ | ||
1472 | memset(&bp->fw_stats_data->storm_counters, 0xff, | ||
1473 | sizeof(struct stats_counter)); | ||
1474 | |||
1475 | /**** Port FW statistics data ****/ | ||
1476 | cur_data_offset = bp->fw_stats_data_mapping + | ||
1477 | offsetof(struct bnx2x_fw_stats_data, port); | ||
1478 | |||
1479 | cur_query_entry = &bp->fw_stats_req->query[BNX2X_PORT_QUERY_IDX]; | ||
1480 | |||
1481 | cur_query_entry->kind = STATS_TYPE_PORT; | ||
1482 | /* For port query index is a DONT CARE */ | ||
1483 | cur_query_entry->index = BP_PORT(bp); | ||
1484 | /* For port query funcID is a DONT CARE */ | ||
1485 | cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp)); | ||
1486 | cur_query_entry->address.hi = cpu_to_le32(U64_HI(cur_data_offset)); | ||
1487 | cur_query_entry->address.lo = cpu_to_le32(U64_LO(cur_data_offset)); | ||
1488 | |||
1489 | /**** PF FW statistics data ****/ | ||
1490 | cur_data_offset = bp->fw_stats_data_mapping + | ||
1491 | offsetof(struct bnx2x_fw_stats_data, pf); | ||
1492 | |||
1493 | cur_query_entry = &bp->fw_stats_req->query[BNX2X_PF_QUERY_IDX]; | ||
1494 | |||
1495 | cur_query_entry->kind = STATS_TYPE_PF; | ||
1496 | /* For PF query index is a DONT CARE */ | ||
1497 | cur_query_entry->index = BP_PORT(bp); | ||
1498 | cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp)); | ||
1499 | cur_query_entry->address.hi = cpu_to_le32(U64_HI(cur_data_offset)); | ||
1500 | cur_query_entry->address.lo = cpu_to_le32(U64_LO(cur_data_offset)); | ||
1501 | |||
1502 | /**** Clients' queries ****/ | ||
1503 | cur_data_offset = bp->fw_stats_data_mapping + | ||
1504 | offsetof(struct bnx2x_fw_stats_data, queue_stats); | ||
1505 | |||
1506 | for_each_eth_queue(bp, i) { | ||
1507 | cur_query_entry = | ||
1508 | &bp->fw_stats_req-> | ||
1509 | query[BNX2X_FIRST_QUEUE_QUERY_IDX + i]; | ||
1510 | |||
1511 | cur_query_entry->kind = STATS_TYPE_QUEUE; | ||
1512 | cur_query_entry->index = bnx2x_stats_id(&bp->fp[i]); | ||
1513 | cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp)); | ||
1514 | cur_query_entry->address.hi = | ||
1515 | cpu_to_le32(U64_HI(cur_data_offset)); | ||
1516 | cur_query_entry->address.lo = | ||
1517 | cpu_to_le32(U64_LO(cur_data_offset)); | ||
1518 | |||
1519 | cur_data_offset += sizeof(struct per_queue_stats); | ||
1520 | } | ||
1521 | } | ||
1522 | |||
1342 | void bnx2x_stats_init(struct bnx2x *bp) | 1523 | void bnx2x_stats_init(struct bnx2x *bp) |
1343 | { | 1524 | { |
1344 | int port = BP_PORT(bp); | 1525 | int /*abs*/port = BP_PORT(bp); |
1345 | int mb_idx = BP_FW_MB_IDX(bp); | 1526 | int mb_idx = BP_FW_MB_IDX(bp); |
1346 | int i; | 1527 | int i; |
1347 | struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats); | ||
1348 | 1528 | ||
1349 | bp->stats_pending = 0; | 1529 | bp->stats_pending = 0; |
1350 | bp->executer_idx = 0; | 1530 | bp->executer_idx = 0; |
@@ -1362,45 +1542,35 @@ void bnx2x_stats_init(struct bnx2x *bp) | |||
1362 | DP(BNX2X_MSG_STATS, "port_stx 0x%x func_stx 0x%x\n", | 1542 | DP(BNX2X_MSG_STATS, "port_stx 0x%x func_stx 0x%x\n", |
1363 | bp->port.port_stx, bp->func_stx); | 1543 | bp->port.port_stx, bp->func_stx); |
1364 | 1544 | ||
1545 | port = BP_PORT(bp); | ||
1365 | /* port stats */ | 1546 | /* port stats */ |
1366 | memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats)); | 1547 | memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats)); |
1367 | bp->port.old_nig_stats.brb_discard = | 1548 | bp->port.old_nig_stats.brb_discard = |
1368 | REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38); | 1549 | REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38); |
1369 | bp->port.old_nig_stats.brb_truncate = | 1550 | bp->port.old_nig_stats.brb_truncate = |
1370 | REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38); | 1551 | REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38); |
1371 | REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50, | 1552 | if (!CHIP_IS_E3(bp)) { |
1372 | &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2); | 1553 | REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50, |
1373 | REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50, | 1554 | &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2); |
1374 | &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2); | 1555 | REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50, |
1556 | &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2); | ||
1557 | } | ||
1375 | 1558 | ||
1376 | /* function stats */ | 1559 | /* function stats */ |
1377 | for_each_queue(bp, i) { | 1560 | for_each_queue(bp, i) { |
1378 | struct bnx2x_fastpath *fp = &bp->fp[i]; | 1561 | struct bnx2x_fastpath *fp = &bp->fp[i]; |
1379 | 1562 | ||
1380 | memset(&fp->old_tclient, 0, | 1563 | memset(&fp->old_tclient, 0, sizeof(fp->old_tclient)); |
1381 | sizeof(struct tstorm_per_client_stats)); | 1564 | memset(&fp->old_uclient, 0, sizeof(fp->old_uclient)); |
1382 | memset(&fp->old_uclient, 0, | 1565 | memset(&fp->old_xclient, 0, sizeof(fp->old_xclient)); |
1383 | sizeof(struct ustorm_per_client_stats)); | 1566 | memset(&fp->eth_q_stats, 0, sizeof(fp->eth_q_stats)); |
1384 | memset(&fp->old_xclient, 0, | ||
1385 | sizeof(struct xstorm_per_client_stats)); | ||
1386 | memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats)); | ||
1387 | } | 1567 | } |
1388 | 1568 | ||
1389 | /* FW stats are currently collected for ETH clients only */ | 1569 | /* Prepare statistics ramrod data */ |
1390 | for_each_eth_queue(bp, i) { | 1570 | bnx2x_prep_fw_stats_req(bp); |
1391 | /* Set initial stats counter in the stats ramrod data to -1 */ | ||
1392 | int cl_id = bp->fp[i].cl_id; | ||
1393 | |||
1394 | stats->xstorm_common.client_statistics[cl_id]. | ||
1395 | stats_counter = 0xffff; | ||
1396 | stats->ustorm_common.client_statistics[cl_id]. | ||
1397 | stats_counter = 0xffff; | ||
1398 | stats->tstorm_common.client_statistics[cl_id]. | ||
1399 | stats_counter = 0xffff; | ||
1400 | } | ||
1401 | 1571 | ||
1402 | memset(&bp->dev->stats, 0, sizeof(struct net_device_stats)); | 1572 | memset(&bp->dev->stats, 0, sizeof(bp->dev->stats)); |
1403 | memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats)); | 1573 | memset(&bp->eth_stats, 0, sizeof(bp->eth_stats)); |
1404 | 1574 | ||
1405 | bp->stats_state = STATS_STATE_DISABLED; | 1575 | bp->stats_state = STATS_STATE_DISABLED; |
1406 | 1576 | ||
diff --git a/drivers/net/bnx2x/bnx2x_stats.h b/drivers/net/bnx2x/bnx2x_stats.h index 45d14d8bc1aa..5d8ce2f6afef 100644 --- a/drivers/net/bnx2x/bnx2x_stats.h +++ b/drivers/net/bnx2x/bnx2x_stats.h | |||
@@ -14,48 +14,11 @@ | |||
14 | * Statistics and Link management by Yitchak Gertner | 14 | * Statistics and Link management by Yitchak Gertner |
15 | * | 15 | * |
16 | */ | 16 | */ |
17 | |||
18 | #ifndef BNX2X_STATS_H | 17 | #ifndef BNX2X_STATS_H |
19 | #define BNX2X_STATS_H | 18 | #define BNX2X_STATS_H |
20 | 19 | ||
21 | #include <linux/types.h> | 20 | #include <linux/types.h> |
22 | 21 | ||
23 | struct bnx2x_eth_q_stats { | ||
24 | u32 total_bytes_received_hi; | ||
25 | u32 total_bytes_received_lo; | ||
26 | u32 total_bytes_transmitted_hi; | ||
27 | u32 total_bytes_transmitted_lo; | ||
28 | u32 total_unicast_packets_received_hi; | ||
29 | u32 total_unicast_packets_received_lo; | ||
30 | u32 total_multicast_packets_received_hi; | ||
31 | u32 total_multicast_packets_received_lo; | ||
32 | u32 total_broadcast_packets_received_hi; | ||
33 | u32 total_broadcast_packets_received_lo; | ||
34 | u32 total_unicast_packets_transmitted_hi; | ||
35 | u32 total_unicast_packets_transmitted_lo; | ||
36 | u32 total_multicast_packets_transmitted_hi; | ||
37 | u32 total_multicast_packets_transmitted_lo; | ||
38 | u32 total_broadcast_packets_transmitted_hi; | ||
39 | u32 total_broadcast_packets_transmitted_lo; | ||
40 | u32 valid_bytes_received_hi; | ||
41 | u32 valid_bytes_received_lo; | ||
42 | |||
43 | u32 error_bytes_received_hi; | ||
44 | u32 error_bytes_received_lo; | ||
45 | u32 etherstatsoverrsizepkts_hi; | ||
46 | u32 etherstatsoverrsizepkts_lo; | ||
47 | u32 no_buff_discard_hi; | ||
48 | u32 no_buff_discard_lo; | ||
49 | |||
50 | u32 driver_xoff; | ||
51 | u32 rx_err_discard_pkt; | ||
52 | u32 rx_skb_alloc_failed; | ||
53 | u32 hw_csum_err; | ||
54 | }; | ||
55 | |||
56 | #define Q_STATS_OFFSET32(stat_name) \ | ||
57 | (offsetof(struct bnx2x_eth_q_stats, stat_name) / 4) | ||
58 | |||
59 | struct nig_stats { | 22 | struct nig_stats { |
60 | u32 brb_discard; | 23 | u32 brb_discard; |
61 | u32 brb_packet; | 24 | u32 brb_packet; |
@@ -212,7 +175,7 @@ struct bnx2x_eth_stats { | |||
212 | u32 brb_truncate_lo; | 175 | u32 brb_truncate_lo; |
213 | 176 | ||
214 | u32 mac_filter_discard; | 177 | u32 mac_filter_discard; |
215 | u32 xxoverflow_discard; | 178 | u32 mf_tag_discard; |
216 | u32 brb_truncate_discard; | 179 | u32 brb_truncate_discard; |
217 | u32 mac_discard; | 180 | u32 mac_discard; |
218 | 181 | ||
@@ -222,16 +185,197 @@ struct bnx2x_eth_stats { | |||
222 | u32 hw_csum_err; | 185 | u32 hw_csum_err; |
223 | 186 | ||
224 | u32 nig_timer_max; | 187 | u32 nig_timer_max; |
188 | |||
189 | /* TPA */ | ||
190 | u32 total_tpa_aggregations_hi; | ||
191 | u32 total_tpa_aggregations_lo; | ||
192 | u32 total_tpa_aggregated_frames_hi; | ||
193 | u32 total_tpa_aggregated_frames_lo; | ||
194 | u32 total_tpa_bytes_hi; | ||
195 | u32 total_tpa_bytes_lo; | ||
196 | }; | ||
197 | |||
198 | |||
199 | struct bnx2x_eth_q_stats { | ||
200 | u32 total_unicast_bytes_received_hi; | ||
201 | u32 total_unicast_bytes_received_lo; | ||
202 | u32 total_broadcast_bytes_received_hi; | ||
203 | u32 total_broadcast_bytes_received_lo; | ||
204 | u32 total_multicast_bytes_received_hi; | ||
205 | u32 total_multicast_bytes_received_lo; | ||
206 | u32 total_bytes_received_hi; | ||
207 | u32 total_bytes_received_lo; | ||
208 | u32 total_unicast_bytes_transmitted_hi; | ||
209 | u32 total_unicast_bytes_transmitted_lo; | ||
210 | u32 total_broadcast_bytes_transmitted_hi; | ||
211 | u32 total_broadcast_bytes_transmitted_lo; | ||
212 | u32 total_multicast_bytes_transmitted_hi; | ||
213 | u32 total_multicast_bytes_transmitted_lo; | ||
214 | u32 total_bytes_transmitted_hi; | ||
215 | u32 total_bytes_transmitted_lo; | ||
216 | u32 total_unicast_packets_received_hi; | ||
217 | u32 total_unicast_packets_received_lo; | ||
218 | u32 total_multicast_packets_received_hi; | ||
219 | u32 total_multicast_packets_received_lo; | ||
220 | u32 total_broadcast_packets_received_hi; | ||
221 | u32 total_broadcast_packets_received_lo; | ||
222 | u32 total_unicast_packets_transmitted_hi; | ||
223 | u32 total_unicast_packets_transmitted_lo; | ||
224 | u32 total_multicast_packets_transmitted_hi; | ||
225 | u32 total_multicast_packets_transmitted_lo; | ||
226 | u32 total_broadcast_packets_transmitted_hi; | ||
227 | u32 total_broadcast_packets_transmitted_lo; | ||
228 | u32 valid_bytes_received_hi; | ||
229 | u32 valid_bytes_received_lo; | ||
230 | |||
231 | u32 etherstatsoverrsizepkts_hi; | ||
232 | u32 etherstatsoverrsizepkts_lo; | ||
233 | u32 no_buff_discard_hi; | ||
234 | u32 no_buff_discard_lo; | ||
235 | |||
236 | u32 driver_xoff; | ||
237 | u32 rx_err_discard_pkt; | ||
238 | u32 rx_skb_alloc_failed; | ||
239 | u32 hw_csum_err; | ||
240 | |||
241 | u32 total_packets_received_checksum_discarded_hi; | ||
242 | u32 total_packets_received_checksum_discarded_lo; | ||
243 | u32 total_packets_received_ttl0_discarded_hi; | ||
244 | u32 total_packets_received_ttl0_discarded_lo; | ||
245 | u32 total_transmitted_dropped_packets_error_hi; | ||
246 | u32 total_transmitted_dropped_packets_error_lo; | ||
247 | |||
248 | /* TPA */ | ||
249 | u32 total_tpa_aggregations_hi; | ||
250 | u32 total_tpa_aggregations_lo; | ||
251 | u32 total_tpa_aggregated_frames_hi; | ||
252 | u32 total_tpa_aggregated_frames_lo; | ||
253 | u32 total_tpa_bytes_hi; | ||
254 | u32 total_tpa_bytes_lo; | ||
225 | }; | 255 | }; |
226 | 256 | ||
227 | #define STATS_OFFSET32(stat_name) \ | 257 | /**************************************************************************** |
228 | (offsetof(struct bnx2x_eth_stats, stat_name) / 4) | 258 | * Macros |
259 | ****************************************************************************/ | ||
260 | |||
261 | /* sum[hi:lo] += add[hi:lo] */ | ||
262 | #define ADD_64(s_hi, a_hi, s_lo, a_lo) \ | ||
263 | do { \ | ||
264 | s_lo += a_lo; \ | ||
265 | s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \ | ||
266 | } while (0) | ||
267 | |||
268 | /* difference = minuend - subtrahend */ | ||
269 | #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \ | ||
270 | do { \ | ||
271 | if (m_lo < s_lo) { \ | ||
272 | /* underflow */ \ | ||
273 | d_hi = m_hi - s_hi; \ | ||
274 | if (d_hi > 0) { \ | ||
275 | /* we can 'loan' 1 */ \ | ||
276 | d_hi--; \ | ||
277 | d_lo = m_lo + (UINT_MAX - s_lo) + 1; \ | ||
278 | } else { \ | ||
279 | /* m_hi <= s_hi */ \ | ||
280 | d_hi = 0; \ | ||
281 | d_lo = 0; \ | ||
282 | } \ | ||
283 | } else { \ | ||
284 | /* m_lo >= s_lo */ \ | ||
285 | if (m_hi < s_hi) { \ | ||
286 | d_hi = 0; \ | ||
287 | d_lo = 0; \ | ||
288 | } else { \ | ||
289 | /* m_hi >= s_hi */ \ | ||
290 | d_hi = m_hi - s_hi; \ | ||
291 | d_lo = m_lo - s_lo; \ | ||
292 | } \ | ||
293 | } \ | ||
294 | } while (0) | ||
295 | |||
296 | #define UPDATE_STAT64(s, t) \ | ||
297 | do { \ | ||
298 | DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \ | ||
299 | diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \ | ||
300 | pstats->mac_stx[0].t##_hi = new->s##_hi; \ | ||
301 | pstats->mac_stx[0].t##_lo = new->s##_lo; \ | ||
302 | ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \ | ||
303 | pstats->mac_stx[1].t##_lo, diff.lo); \ | ||
304 | } while (0) | ||
305 | |||
306 | #define UPDATE_STAT64_NIG(s, t) \ | ||
307 | do { \ | ||
308 | DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \ | ||
309 | diff.lo, new->s##_lo, old->s##_lo); \ | ||
310 | ADD_64(estats->t##_hi, diff.hi, \ | ||
311 | estats->t##_lo, diff.lo); \ | ||
312 | } while (0) | ||
313 | |||
314 | /* sum[hi:lo] += add */ | ||
315 | #define ADD_EXTEND_64(s_hi, s_lo, a) \ | ||
316 | do { \ | ||
317 | s_lo += a; \ | ||
318 | s_hi += (s_lo < a) ? 1 : 0; \ | ||
319 | } while (0) | ||
320 | |||
321 | #define ADD_STAT64(diff, t) \ | ||
322 | do { \ | ||
323 | ADD_64(pstats->mac_stx[1].t##_hi, new->diff##_hi, \ | ||
324 | pstats->mac_stx[1].t##_lo, new->diff##_lo); \ | ||
325 | } while (0) | ||
326 | |||
327 | #define UPDATE_EXTEND_STAT(s) \ | ||
328 | do { \ | ||
329 | ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \ | ||
330 | pstats->mac_stx[1].s##_lo, \ | ||
331 | new->s); \ | ||
332 | } while (0) | ||
333 | |||
334 | #define UPDATE_EXTEND_TSTAT(s, t) \ | ||
335 | do { \ | ||
336 | diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \ | ||
337 | old_tclient->s = tclient->s; \ | ||
338 | ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \ | ||
339 | } while (0) | ||
340 | |||
341 | #define UPDATE_EXTEND_USTAT(s, t) \ | ||
342 | do { \ | ||
343 | diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \ | ||
344 | old_uclient->s = uclient->s; \ | ||
345 | ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \ | ||
346 | } while (0) | ||
347 | |||
348 | #define UPDATE_EXTEND_XSTAT(s, t) \ | ||
349 | do { \ | ||
350 | diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \ | ||
351 | old_xclient->s = xclient->s; \ | ||
352 | ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \ | ||
353 | } while (0) | ||
354 | |||
355 | /* minuend -= subtrahend */ | ||
356 | #define SUB_64(m_hi, s_hi, m_lo, s_lo) \ | ||
357 | do { \ | ||
358 | DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \ | ||
359 | } while (0) | ||
360 | |||
361 | /* minuend[hi:lo] -= subtrahend */ | ||
362 | #define SUB_EXTEND_64(m_hi, m_lo, s) \ | ||
363 | do { \ | ||
364 | SUB_64(m_hi, 0, m_lo, s); \ | ||
365 | } while (0) | ||
366 | |||
367 | #define SUB_EXTEND_USTAT(s, t) \ | ||
368 | do { \ | ||
369 | diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \ | ||
370 | SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \ | ||
371 | } while (0) | ||
372 | |||
229 | 373 | ||
230 | /* Forward declaration */ | 374 | /* forward */ |
231 | struct bnx2x; | 375 | struct bnx2x; |
232 | 376 | ||
233 | void bnx2x_stats_init(struct bnx2x *bp); | 377 | void bnx2x_stats_init(struct bnx2x *bp); |
234 | 378 | ||
235 | extern const u32 dmae_reg_go_c[]; | 379 | void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event); |
236 | 380 | ||
237 | #endif /* BNX2X_STATS_H */ | 381 | #endif /* BNX2X_STATS_H */ |
diff --git a/drivers/net/cnic.c b/drivers/net/cnic.c index 07f1b13c8dce..e66c3d9ab2c6 100644 --- a/drivers/net/cnic.c +++ b/drivers/net/cnic.c | |||
@@ -1,6 +1,6 @@ | |||
1 | /* cnic.c: Broadcom CNIC core network driver. | 1 | /* cnic.c: Broadcom CNIC core network driver. |
2 | * | 2 | * |
3 | * Copyright (c) 2006-2010 Broadcom Corporation | 3 | * Copyright (c) 2006-2011 Broadcom Corporation |
4 | * | 4 | * |
5 | * This program is free software; you can redistribute it and/or modify | 5 | * This program is free software; you can redistribute it and/or modify |
6 | * it under the terms of the GNU General Public License as published by | 6 | * it under the terms of the GNU General Public License as published by |
@@ -836,7 +836,6 @@ static void cnic_free_resc(struct cnic_dev *dev) | |||
836 | cp->ctx_blks = 0; | 836 | cp->ctx_blks = 0; |
837 | 837 | ||
838 | cnic_free_dma(dev, &cp->gbl_buf_info); | 838 | cnic_free_dma(dev, &cp->gbl_buf_info); |
839 | cnic_free_dma(dev, &cp->conn_buf_info); | ||
840 | cnic_free_dma(dev, &cp->kwq_info); | 839 | cnic_free_dma(dev, &cp->kwq_info); |
841 | cnic_free_dma(dev, &cp->kwq_16_data_info); | 840 | cnic_free_dma(dev, &cp->kwq_16_data_info); |
842 | cnic_free_dma(dev, &cp->kcq2.dma); | 841 | cnic_free_dma(dev, &cp->kcq2.dma); |
@@ -1176,7 +1175,7 @@ static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev) | |||
1176 | cp->iscsi_start_cid = start_cid; | 1175 | cp->iscsi_start_cid = start_cid; |
1177 | cp->fcoe_start_cid = start_cid + MAX_ISCSI_TBL_SZ; | 1176 | cp->fcoe_start_cid = start_cid + MAX_ISCSI_TBL_SZ; |
1178 | 1177 | ||
1179 | if (BNX2X_CHIP_IS_E2(cp->chip_id)) { | 1178 | if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) { |
1180 | cp->max_cid_space += BNX2X_FCOE_NUM_CONNECTIONS; | 1179 | cp->max_cid_space += BNX2X_FCOE_NUM_CONNECTIONS; |
1181 | cp->fcoe_init_cid = ethdev->fcoe_init_cid; | 1180 | cp->fcoe_init_cid = ethdev->fcoe_init_cid; |
1182 | if (!cp->fcoe_init_cid) | 1181 | if (!cp->fcoe_init_cid) |
@@ -1232,18 +1231,12 @@ static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev) | |||
1232 | if (ret) | 1231 | if (ret) |
1233 | goto error; | 1232 | goto error; |
1234 | 1233 | ||
1235 | if (BNX2X_CHIP_IS_E2(cp->chip_id)) { | 1234 | if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) { |
1236 | ret = cnic_alloc_kcq(dev, &cp->kcq2, false); | 1235 | ret = cnic_alloc_kcq(dev, &cp->kcq2, true); |
1237 | if (ret) | 1236 | if (ret) |
1238 | goto error; | 1237 | goto error; |
1239 | } | 1238 | } |
1240 | 1239 | ||
1241 | pages = PAGE_ALIGN(BNX2X_ISCSI_NUM_CONNECTIONS * | ||
1242 | BNX2X_ISCSI_CONN_BUF_SIZE) / PAGE_SIZE; | ||
1243 | ret = cnic_alloc_dma(dev, &cp->conn_buf_info, pages, 1); | ||
1244 | if (ret) | ||
1245 | goto error; | ||
1246 | |||
1247 | pages = PAGE_ALIGN(BNX2X_ISCSI_GLB_BUF_SIZE) / PAGE_SIZE; | 1240 | pages = PAGE_ALIGN(BNX2X_ISCSI_GLB_BUF_SIZE) / PAGE_SIZE; |
1248 | ret = cnic_alloc_dma(dev, &cp->gbl_buf_info, pages, 0); | 1241 | ret = cnic_alloc_dma(dev, &cp->gbl_buf_info, pages, 0); |
1249 | if (ret) | 1242 | if (ret) |
@@ -1610,6 +1603,7 @@ static int cnic_setup_bnx2x_ctx(struct cnic_dev *dev, struct kwqe *wqes[], | |||
1610 | struct iscsi_context *ictx; | 1603 | struct iscsi_context *ictx; |
1611 | struct regpair context_addr; | 1604 | struct regpair context_addr; |
1612 | int i, j, n = 2, n_max; | 1605 | int i, j, n = 2, n_max; |
1606 | u8 port = CNIC_PORT(cp); | ||
1613 | 1607 | ||
1614 | ctx->ctx_flags = 0; | 1608 | ctx->ctx_flags = 0; |
1615 | if (!req2->num_additional_wqes) | 1609 | if (!req2->num_additional_wqes) |
@@ -1661,6 +1655,17 @@ static int cnic_setup_bnx2x_ctx(struct cnic_dev *dev, struct kwqe *wqes[], | |||
1661 | XSTORM_ISCSI_CONTEXT_FLAGS_B_IMMEDIATE_DATA; | 1655 | XSTORM_ISCSI_CONTEXT_FLAGS_B_IMMEDIATE_DATA; |
1662 | ictx->xstorm_st_context.iscsi.flags.flags |= | 1656 | ictx->xstorm_st_context.iscsi.flags.flags |= |
1663 | XSTORM_ISCSI_CONTEXT_FLAGS_B_INITIAL_R2T; | 1657 | XSTORM_ISCSI_CONTEXT_FLAGS_B_INITIAL_R2T; |
1658 | ictx->xstorm_st_context.common.ethernet.reserved_vlan_type = | ||
1659 | ETH_P_8021Q; | ||
1660 | if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id) && | ||
1661 | cp->port_mode == CHIP_2_PORT_MODE) { | ||
1662 | |||
1663 | port = 0; | ||
1664 | } | ||
1665 | ictx->xstorm_st_context.common.flags = | ||
1666 | 1 << XSTORM_COMMON_CONTEXT_SECTION_PHYSQ_INITIALIZED_SHIFT; | ||
1667 | ictx->xstorm_st_context.common.flags = | ||
1668 | port << XSTORM_COMMON_CONTEXT_SECTION_PBF_PORT_SHIFT; | ||
1664 | 1669 | ||
1665 | ictx->tstorm_st_context.iscsi.hdr_bytes_2_fetch = ISCSI_HEADER_SIZE; | 1670 | ictx->tstorm_st_context.iscsi.hdr_bytes_2_fetch = ISCSI_HEADER_SIZE; |
1666 | /* TSTORM requires the base address of RQ DB & not PTE */ | 1671 | /* TSTORM requires the base address of RQ DB & not PTE */ |
@@ -1876,8 +1881,11 @@ static int cnic_bnx2x_destroy_ramrod(struct cnic_dev *dev, u32 l5_cid) | |||
1876 | ret = cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_COMMON_CFC_DEL, | 1881 | ret = cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_COMMON_CFC_DEL, |
1877 | hw_cid, NONE_CONNECTION_TYPE, &l5_data); | 1882 | hw_cid, NONE_CONNECTION_TYPE, &l5_data); |
1878 | 1883 | ||
1879 | if (ret == 0) | 1884 | if (ret == 0) { |
1880 | wait_event(ctx->waitq, ctx->wait_cond); | 1885 | wait_event(ctx->waitq, ctx->wait_cond); |
1886 | if (unlikely(test_bit(CTX_FL_CID_ERROR, &ctx->ctx_flags))) | ||
1887 | return -EBUSY; | ||
1888 | } | ||
1881 | 1889 | ||
1882 | return ret; | 1890 | return ret; |
1883 | } | 1891 | } |
@@ -1912,8 +1920,10 @@ static int cnic_bnx2x_iscsi_destroy(struct cnic_dev *dev, struct kwqe *kwqe) | |||
1912 | skip_cfc_delete: | 1920 | skip_cfc_delete: |
1913 | cnic_free_bnx2x_conn_resc(dev, l5_cid); | 1921 | cnic_free_bnx2x_conn_resc(dev, l5_cid); |
1914 | 1922 | ||
1915 | atomic_dec(&cp->iscsi_conn); | 1923 | if (!ret) { |
1916 | clear_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags); | 1924 | atomic_dec(&cp->iscsi_conn); |
1925 | clear_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags); | ||
1926 | } | ||
1917 | 1927 | ||
1918 | destroy_reply: | 1928 | destroy_reply: |
1919 | memset(&kcqe, 0, sizeof(kcqe)); | 1929 | memset(&kcqe, 0, sizeof(kcqe)); |
@@ -1972,8 +1982,6 @@ static void cnic_init_storm_conn_bufs(struct cnic_dev *dev, | |||
1972 | tstorm_buf->ka_interval = kwqe3->ka_interval; | 1982 | tstorm_buf->ka_interval = kwqe3->ka_interval; |
1973 | tstorm_buf->ka_max_probe_count = kwqe3->ka_max_probe_count; | 1983 | tstorm_buf->ka_max_probe_count = kwqe3->ka_max_probe_count; |
1974 | } | 1984 | } |
1975 | tstorm_buf->rcv_buf = kwqe3->rcv_buf; | ||
1976 | tstorm_buf->snd_buf = kwqe3->snd_buf; | ||
1977 | tstorm_buf->max_rt_time = 0xffffffff; | 1985 | tstorm_buf->max_rt_time = 0xffffffff; |
1978 | } | 1986 | } |
1979 | 1987 | ||
@@ -2002,15 +2010,14 @@ static void cnic_init_bnx2x_mac(struct cnic_dev *dev) | |||
2002 | TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfid) + 1, | 2010 | TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfid) + 1, |
2003 | mac[4]); | 2011 | mac[4]); |
2004 | CNIC_WR8(dev, BAR_TSTRORM_INTMEM + | 2012 | CNIC_WR8(dev, BAR_TSTRORM_INTMEM + |
2005 | TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid), mac[3]); | 2013 | TSTORM_ISCSI_TCP_VARS_MID_LOCAL_MAC_ADDR_OFFSET(pfid), mac[3]); |
2006 | CNIC_WR8(dev, BAR_TSTRORM_INTMEM + | 2014 | CNIC_WR8(dev, BAR_TSTRORM_INTMEM + |
2007 | TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid) + 1, | 2015 | TSTORM_ISCSI_TCP_VARS_MID_LOCAL_MAC_ADDR_OFFSET(pfid) + 1, |
2008 | mac[2]); | 2016 | mac[2]); |
2009 | CNIC_WR8(dev, BAR_TSTRORM_INTMEM + | 2017 | CNIC_WR8(dev, BAR_TSTRORM_INTMEM + |
2010 | TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid) + 2, | 2018 | TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid), mac[1]); |
2011 | mac[1]); | ||
2012 | CNIC_WR8(dev, BAR_TSTRORM_INTMEM + | 2019 | CNIC_WR8(dev, BAR_TSTRORM_INTMEM + |
2013 | TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid) + 3, | 2020 | TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid) + 1, |
2014 | mac[0]); | 2021 | mac[0]); |
2015 | } | 2022 | } |
2016 | 2023 | ||
@@ -2189,7 +2196,7 @@ static int cnic_bnx2x_fcoe_stat(struct cnic_dev *dev, struct kwqe *kwqe) | |||
2189 | memset(fcoe_stat, 0, sizeof(*fcoe_stat)); | 2196 | memset(fcoe_stat, 0, sizeof(*fcoe_stat)); |
2190 | memcpy(&fcoe_stat->stat_kwqe, req, sizeof(*req)); | 2197 | memcpy(&fcoe_stat->stat_kwqe, req, sizeof(*req)); |
2191 | 2198 | ||
2192 | ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_STAT, cid, | 2199 | ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_STAT_FUNC, cid, |
2193 | FCOE_CONNECTION_TYPE, &l5_data); | 2200 | FCOE_CONNECTION_TYPE, &l5_data); |
2194 | return ret; | 2201 | return ret; |
2195 | } | 2202 | } |
@@ -2234,12 +2241,9 @@ static int cnic_bnx2x_fcoe_init1(struct cnic_dev *dev, struct kwqe *wqes[], | |||
2234 | memcpy(&fcoe_init->init_kwqe1, req1, sizeof(*req1)); | 2241 | memcpy(&fcoe_init->init_kwqe1, req1, sizeof(*req1)); |
2235 | memcpy(&fcoe_init->init_kwqe2, req2, sizeof(*req2)); | 2242 | memcpy(&fcoe_init->init_kwqe2, req2, sizeof(*req2)); |
2236 | memcpy(&fcoe_init->init_kwqe3, req3, sizeof(*req3)); | 2243 | memcpy(&fcoe_init->init_kwqe3, req3, sizeof(*req3)); |
2237 | fcoe_init->eq_addr.lo = cp->kcq2.dma.pg_map_arr[0] & 0xffffffff; | 2244 | fcoe_init->eq_pbl_base.lo = cp->kcq2.dma.pgtbl_map & 0xffffffff; |
2238 | fcoe_init->eq_addr.hi = (u64) cp->kcq2.dma.pg_map_arr[0] >> 32; | 2245 | fcoe_init->eq_pbl_base.hi = (u64) cp->kcq2.dma.pgtbl_map >> 32; |
2239 | fcoe_init->eq_next_page_addr.lo = | 2246 | fcoe_init->eq_pbl_size = cp->kcq2.dma.num_pages; |
2240 | cp->kcq2.dma.pg_map_arr[1] & 0xffffffff; | ||
2241 | fcoe_init->eq_next_page_addr.hi = | ||
2242 | (u64) cp->kcq2.dma.pg_map_arr[1] >> 32; | ||
2243 | 2247 | ||
2244 | fcoe_init->sb_num = cp->status_blk_num; | 2248 | fcoe_init->sb_num = cp->status_blk_num; |
2245 | fcoe_init->eq_prod = MAX_KCQ_IDX; | 2249 | fcoe_init->eq_prod = MAX_KCQ_IDX; |
@@ -2247,7 +2251,7 @@ static int cnic_bnx2x_fcoe_init1(struct cnic_dev *dev, struct kwqe *wqes[], | |||
2247 | cp->kcq2.sw_prod_idx = 0; | 2251 | cp->kcq2.sw_prod_idx = 0; |
2248 | 2252 | ||
2249 | cid = BNX2X_HW_CID(cp, cp->fcoe_init_cid); | 2253 | cid = BNX2X_HW_CID(cp, cp->fcoe_init_cid); |
2250 | ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_INIT, cid, | 2254 | ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_INIT_FUNC, cid, |
2251 | FCOE_CONNECTION_TYPE, &l5_data); | 2255 | FCOE_CONNECTION_TYPE, &l5_data); |
2252 | *work = 3; | 2256 | *work = 3; |
2253 | return ret; | 2257 | return ret; |
@@ -2463,7 +2467,7 @@ static int cnic_bnx2x_fcoe_fw_destroy(struct cnic_dev *dev, struct kwqe *kwqe) | |||
2463 | cid = BNX2X_HW_CID(cp, cp->fcoe_init_cid); | 2467 | cid = BNX2X_HW_CID(cp, cp->fcoe_init_cid); |
2464 | 2468 | ||
2465 | memset(&l5_data, 0, sizeof(l5_data)); | 2469 | memset(&l5_data, 0, sizeof(l5_data)); |
2466 | ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_DESTROY, cid, | 2470 | ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_DESTROY_FUNC, cid, |
2467 | FCOE_CONNECTION_TYPE, &l5_data); | 2471 | FCOE_CONNECTION_TYPE, &l5_data); |
2468 | return ret; | 2472 | return ret; |
2469 | } | 2473 | } |
@@ -2544,7 +2548,7 @@ static int cnic_submit_bnx2x_fcoe_kwqes(struct cnic_dev *dev, | |||
2544 | if (!test_bit(CNIC_F_CNIC_UP, &dev->flags)) | 2548 | if (!test_bit(CNIC_F_CNIC_UP, &dev->flags)) |
2545 | return -EAGAIN; /* bnx2 is down */ | 2549 | return -EAGAIN; /* bnx2 is down */ |
2546 | 2550 | ||
2547 | if (BNX2X_CHIP_NUM(cp->chip_id) == BNX2X_CHIP_NUM_57710) | 2551 | if (!BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) |
2548 | return -EINVAL; | 2552 | return -EINVAL; |
2549 | 2553 | ||
2550 | for (i = 0; i < num_wqes; ) { | 2554 | for (i = 0; i < num_wqes; ) { |
@@ -2935,7 +2939,7 @@ static void cnic_service_bnx2x_bh(unsigned long data) | |||
2935 | CNIC_WR16(dev, cp->kcq1.io_addr, | 2939 | CNIC_WR16(dev, cp->kcq1.io_addr, |
2936 | cp->kcq1.sw_prod_idx + MAX_KCQ_IDX); | 2940 | cp->kcq1.sw_prod_idx + MAX_KCQ_IDX); |
2937 | 2941 | ||
2938 | if (!BNX2X_CHIP_IS_E2(cp->chip_id)) { | 2942 | if (!BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) { |
2939 | cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, USTORM_ID, | 2943 | cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, USTORM_ID, |
2940 | status_idx, IGU_INT_ENABLE, 1); | 2944 | status_idx, IGU_INT_ENABLE, 1); |
2941 | break; | 2945 | break; |
@@ -3054,13 +3058,21 @@ static int cnic_ctl(void *data, struct cnic_ctl_info *info) | |||
3054 | break; | 3058 | break; |
3055 | } | 3059 | } |
3056 | case CNIC_CTL_COMPLETION_CMD: { | 3060 | case CNIC_CTL_COMPLETION_CMD: { |
3057 | u32 cid = BNX2X_SW_CID(info->data.comp.cid); | 3061 | struct cnic_ctl_completion *comp = &info->data.comp; |
3062 | u32 cid = BNX2X_SW_CID(comp->cid); | ||
3058 | u32 l5_cid; | 3063 | u32 l5_cid; |
3059 | struct cnic_local *cp = dev->cnic_priv; | 3064 | struct cnic_local *cp = dev->cnic_priv; |
3060 | 3065 | ||
3061 | if (cnic_get_l5_cid(cp, cid, &l5_cid) == 0) { | 3066 | if (cnic_get_l5_cid(cp, cid, &l5_cid) == 0) { |
3062 | struct cnic_context *ctx = &cp->ctx_tbl[l5_cid]; | 3067 | struct cnic_context *ctx = &cp->ctx_tbl[l5_cid]; |
3063 | 3068 | ||
3069 | if (unlikely(comp->error)) { | ||
3070 | set_bit(CTX_FL_CID_ERROR, &ctx->ctx_flags); | ||
3071 | netdev_err(dev->netdev, | ||
3072 | "CID %x CFC delete comp error %x\n", | ||
3073 | cid, comp->error); | ||
3074 | } | ||
3075 | |||
3064 | ctx->wait_cond = 1; | 3076 | ctx->wait_cond = 1; |
3065 | wake_up(&ctx->waitq); | 3077 | wake_up(&ctx->waitq); |
3066 | } | 3078 | } |
@@ -3935,10 +3947,17 @@ static void cnic_cm_stop_bnx2x_hw(struct cnic_dev *dev) | |||
3935 | 3947 | ||
3936 | for (i = 0; i < cp->max_cid_space; i++) { | 3948 | for (i = 0; i < cp->max_cid_space; i++) { |
3937 | struct cnic_context *ctx = &cp->ctx_tbl[i]; | 3949 | struct cnic_context *ctx = &cp->ctx_tbl[i]; |
3950 | int j; | ||
3938 | 3951 | ||
3939 | while (test_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags)) | 3952 | while (test_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags)) |
3940 | msleep(10); | 3953 | msleep(10); |
3941 | 3954 | ||
3955 | for (j = 0; j < 5; j++) { | ||
3956 | if (!test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags)) | ||
3957 | break; | ||
3958 | msleep(20); | ||
3959 | } | ||
3960 | |||
3942 | if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags)) | 3961 | if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags)) |
3943 | netdev_warn(dev->netdev, "CID %x not deleted\n", | 3962 | netdev_warn(dev->netdev, "CID %x not deleted\n", |
3944 | ctx->cid); | 3963 | ctx->cid); |
@@ -4005,6 +4024,7 @@ static void cnic_delete_task(struct work_struct *work) | |||
4005 | 4024 | ||
4006 | for (i = 0; i < cp->max_cid_space; i++) { | 4025 | for (i = 0; i < cp->max_cid_space; i++) { |
4007 | struct cnic_context *ctx = &cp->ctx_tbl[i]; | 4026 | struct cnic_context *ctx = &cp->ctx_tbl[i]; |
4027 | int err; | ||
4008 | 4028 | ||
4009 | if (!test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags) || | 4029 | if (!test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags) || |
4010 | !test_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags)) | 4030 | !test_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags)) |
@@ -4018,13 +4038,15 @@ static void cnic_delete_task(struct work_struct *work) | |||
4018 | if (!test_and_clear_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags)) | 4038 | if (!test_and_clear_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags)) |
4019 | continue; | 4039 | continue; |
4020 | 4040 | ||
4021 | cnic_bnx2x_destroy_ramrod(dev, i); | 4041 | err = cnic_bnx2x_destroy_ramrod(dev, i); |
4022 | 4042 | ||
4023 | cnic_free_bnx2x_conn_resc(dev, i); | 4043 | cnic_free_bnx2x_conn_resc(dev, i); |
4024 | if (ctx->ulp_proto_id == CNIC_ULP_ISCSI) | 4044 | if (!err) { |
4025 | atomic_dec(&cp->iscsi_conn); | 4045 | if (ctx->ulp_proto_id == CNIC_ULP_ISCSI) |
4046 | atomic_dec(&cp->iscsi_conn); | ||
4026 | 4047 | ||
4027 | clear_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags); | 4048 | clear_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags); |
4049 | } | ||
4028 | } | 4050 | } |
4029 | 4051 | ||
4030 | if (need_resched) | 4052 | if (need_resched) |
@@ -4620,7 +4642,7 @@ static void cnic_enable_bnx2x_int(struct cnic_dev *dev) | |||
4620 | CSTORM_STATUS_BLOCK_DATA_OFFSET(sb_id) + | 4642 | CSTORM_STATUS_BLOCK_DATA_OFFSET(sb_id) + |
4621 | offsetof(struct hc_status_block_data_e1x, index_data) + | 4643 | offsetof(struct hc_status_block_data_e1x, index_data) + |
4622 | sizeof(struct hc_index_data)*HC_INDEX_ISCSI_EQ_CONS + | 4644 | sizeof(struct hc_index_data)*HC_INDEX_ISCSI_EQ_CONS + |
4623 | offsetof(struct hc_index_data, timeout), 64 / 12); | 4645 | offsetof(struct hc_index_data, timeout), 64 / 4); |
4624 | cnic_storm_memset_hc_disable(dev, sb_id, HC_INDEX_ISCSI_EQ_CONS, 0); | 4646 | cnic_storm_memset_hc_disable(dev, sb_id, HC_INDEX_ISCSI_EQ_CONS, 0); |
4625 | } | 4647 | } |
4626 | 4648 | ||
@@ -4636,7 +4658,6 @@ static void cnic_init_bnx2x_tx_ring(struct cnic_dev *dev, | |||
4636 | union eth_tx_bd_types *txbd = (union eth_tx_bd_types *) udev->l2_ring; | 4658 | union eth_tx_bd_types *txbd = (union eth_tx_bd_types *) udev->l2_ring; |
4637 | dma_addr_t buf_map, ring_map = udev->l2_ring_map; | 4659 | dma_addr_t buf_map, ring_map = udev->l2_ring_map; |
4638 | struct host_sp_status_block *sb = cp->bnx2x_def_status_blk; | 4660 | struct host_sp_status_block *sb = cp->bnx2x_def_status_blk; |
4639 | int port = CNIC_PORT(cp); | ||
4640 | int i; | 4661 | int i; |
4641 | u32 cli = cp->ethdev->iscsi_l2_client_id; | 4662 | u32 cli = cp->ethdev->iscsi_l2_client_id; |
4642 | u32 val; | 4663 | u32 val; |
@@ -4677,10 +4698,9 @@ static void cnic_init_bnx2x_tx_ring(struct cnic_dev *dev, | |||
4677 | 4698 | ||
4678 | /* reset xstorm per client statistics */ | 4699 | /* reset xstorm per client statistics */ |
4679 | if (cli < MAX_STAT_COUNTER_ID) { | 4700 | if (cli < MAX_STAT_COUNTER_ID) { |
4680 | val = BAR_XSTRORM_INTMEM + | 4701 | data->general.statistics_zero_flg = 1; |
4681 | XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cli); | 4702 | data->general.statistics_en_flg = 1; |
4682 | for (i = 0; i < sizeof(struct xstorm_per_client_stats) / 4; i++) | 4703 | data->general.statistics_counter_id = cli; |
4683 | CNIC_WR(dev, val + i * 4, 0); | ||
4684 | } | 4704 | } |
4685 | 4705 | ||
4686 | cp->tx_cons_ptr = | 4706 | cp->tx_cons_ptr = |
@@ -4698,7 +4718,6 @@ static void cnic_init_bnx2x_rx_ring(struct cnic_dev *dev, | |||
4698 | (udev->l2_ring + (2 * BCM_PAGE_SIZE)); | 4718 | (udev->l2_ring + (2 * BCM_PAGE_SIZE)); |
4699 | struct host_sp_status_block *sb = cp->bnx2x_def_status_blk; | 4719 | struct host_sp_status_block *sb = cp->bnx2x_def_status_blk; |
4700 | int i; | 4720 | int i; |
4701 | int port = CNIC_PORT(cp); | ||
4702 | u32 cli = cp->ethdev->iscsi_l2_client_id; | 4721 | u32 cli = cp->ethdev->iscsi_l2_client_id; |
4703 | int cl_qzone_id = BNX2X_CL_QZONE_ID(cp, cli); | 4722 | int cl_qzone_id = BNX2X_CL_QZONE_ID(cp, cli); |
4704 | u32 val; | 4723 | u32 val; |
@@ -4706,10 +4725,10 @@ static void cnic_init_bnx2x_rx_ring(struct cnic_dev *dev, | |||
4706 | 4725 | ||
4707 | /* General data */ | 4726 | /* General data */ |
4708 | data->general.client_id = cli; | 4727 | data->general.client_id = cli; |
4709 | data->general.statistics_en_flg = 1; | ||
4710 | data->general.statistics_counter_id = cli; | ||
4711 | data->general.activate_flg = 1; | 4728 | data->general.activate_flg = 1; |
4712 | data->general.sp_client_id = cli; | 4729 | data->general.sp_client_id = cli; |
4730 | data->general.mtu = cpu_to_le16(cp->l2_single_buf_size - 14); | ||
4731 | data->general.func_id = cp->pfid; | ||
4713 | 4732 | ||
4714 | for (i = 0; i < BNX2X_MAX_RX_DESC_CNT; i++, rxbd++) { | 4733 | for (i = 0; i < BNX2X_MAX_RX_DESC_CNT; i++, rxbd++) { |
4715 | dma_addr_t buf_map; | 4734 | dma_addr_t buf_map; |
@@ -4743,23 +4762,12 @@ static void cnic_init_bnx2x_rx_ring(struct cnic_dev *dev, | |||
4743 | data->rx.status_block_id = BNX2X_DEF_SB_ID; | 4762 | data->rx.status_block_id = BNX2X_DEF_SB_ID; |
4744 | 4763 | ||
4745 | data->rx.cache_line_alignment_log_size = L1_CACHE_SHIFT; | 4764 | data->rx.cache_line_alignment_log_size = L1_CACHE_SHIFT; |
4746 | data->rx.bd_buff_size = cpu_to_le16(cp->l2_single_buf_size); | ||
4747 | 4765 | ||
4748 | data->rx.mtu = cpu_to_le16(cp->l2_single_buf_size - 14); | 4766 | data->rx.max_bytes_on_bd = cpu_to_le16(cp->l2_single_buf_size); |
4749 | data->rx.outer_vlan_removal_enable_flg = 1; | 4767 | data->rx.outer_vlan_removal_enable_flg = 1; |
4750 | 4768 | data->rx.silent_vlan_removal_flg = 1; | |
4751 | /* reset tstorm and ustorm per client statistics */ | 4769 | data->rx.silent_vlan_value = 0; |
4752 | if (cli < MAX_STAT_COUNTER_ID) { | 4770 | data->rx.silent_vlan_mask = 0xffff; |
4753 | val = BAR_TSTRORM_INTMEM + | ||
4754 | TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cli); | ||
4755 | for (i = 0; i < sizeof(struct tstorm_per_client_stats) / 4; i++) | ||
4756 | CNIC_WR(dev, val + i * 4, 0); | ||
4757 | |||
4758 | val = BAR_USTRORM_INTMEM + | ||
4759 | USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cli); | ||
4760 | for (i = 0; i < sizeof(struct ustorm_per_client_stats) / 4; i++) | ||
4761 | CNIC_WR(dev, val + i * 4, 0); | ||
4762 | } | ||
4763 | 4771 | ||
4764 | cp->rx_cons_ptr = | 4772 | cp->rx_cons_ptr = |
4765 | &sb->sp_sb.index_values[HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS]; | 4773 | &sb->sp_sb.index_values[HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS]; |
@@ -4775,7 +4783,7 @@ static void cnic_init_bnx2x_kcq(struct cnic_dev *dev) | |||
4775 | CSTORM_ISCSI_EQ_PROD_OFFSET(pfid, 0); | 4783 | CSTORM_ISCSI_EQ_PROD_OFFSET(pfid, 0); |
4776 | cp->kcq1.sw_prod_idx = 0; | 4784 | cp->kcq1.sw_prod_idx = 0; |
4777 | 4785 | ||
4778 | if (BNX2X_CHIP_IS_E2(cp->chip_id)) { | 4786 | if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) { |
4779 | struct host_hc_status_block_e2 *sb = cp->status_blk.gen; | 4787 | struct host_hc_status_block_e2 *sb = cp->status_blk.gen; |
4780 | 4788 | ||
4781 | cp->kcq1.hw_prod_idx_ptr = | 4789 | cp->kcq1.hw_prod_idx_ptr = |
@@ -4791,7 +4799,7 @@ static void cnic_init_bnx2x_kcq(struct cnic_dev *dev) | |||
4791 | &sb->sb.running_index[SM_RX_ID]; | 4799 | &sb->sb.running_index[SM_RX_ID]; |
4792 | } | 4800 | } |
4793 | 4801 | ||
4794 | if (BNX2X_CHIP_IS_E2(cp->chip_id)) { | 4802 | if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) { |
4795 | struct host_hc_status_block_e2 *sb = cp->status_blk.gen; | 4803 | struct host_hc_status_block_e2 *sb = cp->status_blk.gen; |
4796 | 4804 | ||
4797 | cp->kcq2.io_addr = BAR_USTRORM_INTMEM + | 4805 | cp->kcq2.io_addr = BAR_USTRORM_INTMEM + |
@@ -4808,10 +4816,12 @@ static int cnic_start_bnx2x_hw(struct cnic_dev *dev) | |||
4808 | { | 4816 | { |
4809 | struct cnic_local *cp = dev->cnic_priv; | 4817 | struct cnic_local *cp = dev->cnic_priv; |
4810 | struct cnic_eth_dev *ethdev = cp->ethdev; | 4818 | struct cnic_eth_dev *ethdev = cp->ethdev; |
4811 | int func = CNIC_FUNC(cp), ret, i; | 4819 | int func = CNIC_FUNC(cp), ret; |
4812 | u32 pfid; | 4820 | u32 pfid; |
4813 | 4821 | ||
4814 | if (BNX2X_CHIP_IS_E2(cp->chip_id)) { | 4822 | cp->port_mode = CHIP_PORT_MODE_NONE; |
4823 | |||
4824 | if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) { | ||
4815 | u32 val = CNIC_RD(dev, MISC_REG_PORT4MODE_EN_OVWR); | 4825 | u32 val = CNIC_RD(dev, MISC_REG_PORT4MODE_EN_OVWR); |
4816 | 4826 | ||
4817 | if (!(val & 1)) | 4827 | if (!(val & 1)) |
@@ -4819,10 +4829,13 @@ static int cnic_start_bnx2x_hw(struct cnic_dev *dev) | |||
4819 | else | 4829 | else |
4820 | val = (val >> 1) & 1; | 4830 | val = (val >> 1) & 1; |
4821 | 4831 | ||
4822 | if (val) | 4832 | if (val) { |
4833 | cp->port_mode = CHIP_4_PORT_MODE; | ||
4823 | cp->pfid = func >> 1; | 4834 | cp->pfid = func >> 1; |
4824 | else | 4835 | } else { |
4836 | cp->port_mode = CHIP_4_PORT_MODE; | ||
4825 | cp->pfid = func & 0x6; | 4837 | cp->pfid = func & 0x6; |
4838 | } | ||
4826 | } else { | 4839 | } else { |
4827 | cp->pfid = func; | 4840 | cp->pfid = func; |
4828 | } | 4841 | } |
@@ -4834,7 +4847,7 @@ static int cnic_start_bnx2x_hw(struct cnic_dev *dev) | |||
4834 | if (ret) | 4847 | if (ret) |
4835 | return -ENOMEM; | 4848 | return -ENOMEM; |
4836 | 4849 | ||
4837 | if (BNX2X_CHIP_IS_E2(cp->chip_id)) { | 4850 | if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) { |
4838 | ret = cnic_init_id_tbl(&cp->fcoe_cid_tbl, | 4851 | ret = cnic_init_id_tbl(&cp->fcoe_cid_tbl, |
4839 | BNX2X_FCOE_NUM_CONNECTIONS, | 4852 | BNX2X_FCOE_NUM_CONNECTIONS, |
4840 | cp->fcoe_start_cid, 0); | 4853 | cp->fcoe_start_cid, 0); |
@@ -4871,15 +4884,6 @@ static int cnic_start_bnx2x_hw(struct cnic_dev *dev) | |||
4871 | CSTORM_ISCSI_EQ_SB_INDEX_OFFSET(pfid, 0), | 4884 | CSTORM_ISCSI_EQ_SB_INDEX_OFFSET(pfid, 0), |
4872 | HC_INDEX_ISCSI_EQ_CONS); | 4885 | HC_INDEX_ISCSI_EQ_CONS); |
4873 | 4886 | ||
4874 | for (i = 0; i < cp->conn_buf_info.num_pages; i++) { | ||
4875 | CNIC_WR(dev, BAR_TSTRORM_INTMEM + | ||
4876 | TSTORM_ISCSI_CONN_BUF_PBL_OFFSET(pfid, i), | ||
4877 | cp->conn_buf_info.pgtbl[2 * i]); | ||
4878 | CNIC_WR(dev, BAR_TSTRORM_INTMEM + | ||
4879 | TSTORM_ISCSI_CONN_BUF_PBL_OFFSET(pfid, i) + 4, | ||
4880 | cp->conn_buf_info.pgtbl[(2 * i) + 1]); | ||
4881 | } | ||
4882 | |||
4883 | CNIC_WR(dev, BAR_USTRORM_INTMEM + | 4887 | CNIC_WR(dev, BAR_USTRORM_INTMEM + |
4884 | USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfid), | 4888 | USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfid), |
4885 | cp->gbl_buf_info.pg_map_arr[0] & 0xffffffff); | 4889 | cp->gbl_buf_info.pg_map_arr[0] & 0xffffffff); |
@@ -4927,7 +4931,7 @@ static void cnic_init_rings(struct cnic_dev *dev) | |||
4927 | cl_qzone_id = BNX2X_CL_QZONE_ID(cp, cli); | 4931 | cl_qzone_id = BNX2X_CL_QZONE_ID(cp, cli); |
4928 | 4932 | ||
4929 | off = BAR_USTRORM_INTMEM + | 4933 | off = BAR_USTRORM_INTMEM + |
4930 | (BNX2X_CHIP_IS_E2(cp->chip_id) ? | 4934 | (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id) ? |
4931 | USTORM_RX_PRODS_E2_OFFSET(cl_qzone_id) : | 4935 | USTORM_RX_PRODS_E2_OFFSET(cl_qzone_id) : |
4932 | USTORM_RX_PRODS_E1X_OFFSET(CNIC_PORT(cp), cli)); | 4936 | USTORM_RX_PRODS_E1X_OFFSET(CNIC_PORT(cp), cli)); |
4933 | 4937 | ||
@@ -5277,7 +5281,7 @@ static struct cnic_dev *init_bnx2x_cnic(struct net_device *dev) | |||
5277 | 5281 | ||
5278 | if (!(ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI)) | 5282 | if (!(ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI)) |
5279 | cdev->max_iscsi_conn = ethdev->max_iscsi_conn; | 5283 | cdev->max_iscsi_conn = ethdev->max_iscsi_conn; |
5280 | if (BNX2X_CHIP_IS_E2(cp->chip_id) && | 5284 | if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id) && |
5281 | !(ethdev->drv_state & CNIC_DRV_STATE_NO_FCOE)) | 5285 | !(ethdev->drv_state & CNIC_DRV_STATE_NO_FCOE)) |
5282 | cdev->max_fcoe_conn = ethdev->max_fcoe_conn; | 5286 | cdev->max_fcoe_conn = ethdev->max_fcoe_conn; |
5283 | 5287 | ||
@@ -5293,7 +5297,7 @@ static struct cnic_dev *init_bnx2x_cnic(struct net_device *dev) | |||
5293 | cp->stop_cm = cnic_cm_stop_bnx2x_hw; | 5297 | cp->stop_cm = cnic_cm_stop_bnx2x_hw; |
5294 | cp->enable_int = cnic_enable_bnx2x_int; | 5298 | cp->enable_int = cnic_enable_bnx2x_int; |
5295 | cp->disable_int_sync = cnic_disable_bnx2x_int_sync; | 5299 | cp->disable_int_sync = cnic_disable_bnx2x_int_sync; |
5296 | if (BNX2X_CHIP_IS_E2(cp->chip_id)) | 5300 | if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) |
5297 | cp->ack_int = cnic_ack_bnx2x_e2_msix; | 5301 | cp->ack_int = cnic_ack_bnx2x_e2_msix; |
5298 | else | 5302 | else |
5299 | cp->ack_int = cnic_ack_bnx2x_msix; | 5303 | cp->ack_int = cnic_ack_bnx2x_msix; |
diff --git a/drivers/net/cnic.h b/drivers/net/cnic.h index eb11821108a0..330ef9350413 100644 --- a/drivers/net/cnic.h +++ b/drivers/net/cnic.h | |||
@@ -1,6 +1,6 @@ | |||
1 | /* cnic.h: Broadcom CNIC core network driver. | 1 | /* cnic.h: Broadcom CNIC core network driver. |
2 | * | 2 | * |
3 | * Copyright (c) 2006-2010 Broadcom Corporation | 3 | * Copyright (c) 2006-2011 Broadcom Corporation |
4 | * | 4 | * |
5 | * This program is free software; you can redistribute it and/or modify | 5 | * This program is free software; you can redistribute it and/or modify |
6 | * it under the terms of the GNU General Public License as published by | 6 | * it under the terms of the GNU General Public License as published by |
@@ -68,11 +68,6 @@ | |||
68 | #define BNX2_PG_CTX_MAP 0x1a0034 | 68 | #define BNX2_PG_CTX_MAP 0x1a0034 |
69 | #define BNX2_ISCSI_CTX_MAP 0x1a0074 | 69 | #define BNX2_ISCSI_CTX_MAP 0x1a0074 |
70 | 70 | ||
71 | struct cnic_redirect_entry { | ||
72 | struct dst_entry *old_dst; | ||
73 | struct dst_entry *new_dst; | ||
74 | }; | ||
75 | |||
76 | #define MAX_COMPLETED_KCQE 64 | 71 | #define MAX_COMPLETED_KCQE 64 |
77 | 72 | ||
78 | #define MAX_CNIC_L5_CONTEXT 256 | 73 | #define MAX_CNIC_L5_CONTEXT 256 |
@@ -171,6 +166,7 @@ struct cnic_context { | |||
171 | unsigned long ctx_flags; | 166 | unsigned long ctx_flags; |
172 | #define CTX_FL_OFFLD_START 0 | 167 | #define CTX_FL_OFFLD_START 0 |
173 | #define CTX_FL_DELETE_WAIT 1 | 168 | #define CTX_FL_DELETE_WAIT 1 |
169 | #define CTX_FL_CID_ERROR 2 | ||
174 | u8 ulp_proto_id; | 170 | u8 ulp_proto_id; |
175 | union { | 171 | union { |
176 | struct cnic_iscsi *iscsi; | 172 | struct cnic_iscsi *iscsi; |
@@ -245,7 +241,7 @@ struct cnic_local { | |||
245 | u16 rx_cons; | 241 | u16 rx_cons; |
246 | u16 tx_cons; | 242 | u16 tx_cons; |
247 | 243 | ||
248 | struct iro *iro_arr; | 244 | const struct iro *iro_arr; |
249 | #define IRO (((struct cnic_local *) dev->cnic_priv)->iro_arr) | 245 | #define IRO (((struct cnic_local *) dev->cnic_priv)->iro_arr) |
250 | 246 | ||
251 | struct cnic_dma kwq_info; | 247 | struct cnic_dma kwq_info; |
@@ -286,7 +282,6 @@ struct cnic_local { | |||
286 | struct cnic_sock *csk_tbl; | 282 | struct cnic_sock *csk_tbl; |
287 | struct cnic_id_tbl csk_port_tbl; | 283 | struct cnic_id_tbl csk_port_tbl; |
288 | 284 | ||
289 | struct cnic_dma conn_buf_info; | ||
290 | struct cnic_dma gbl_buf_info; | 285 | struct cnic_dma gbl_buf_info; |
291 | 286 | ||
292 | struct cnic_iscsi *iscsi_tbl; | 287 | struct cnic_iscsi *iscsi_tbl; |
@@ -320,6 +315,11 @@ struct cnic_local { | |||
320 | u32 chip_id; | 315 | u32 chip_id; |
321 | int func; | 316 | int func; |
322 | u32 pfid; | 317 | u32 pfid; |
318 | u8 port_mode; | ||
319 | #define CHIP_4_PORT_MODE 0 | ||
320 | #define CHIP_2_PORT_MODE 1 | ||
321 | #define CHIP_PORT_MODE_NONE 2 | ||
322 | |||
323 | u32 shmem_base; | 323 | u32 shmem_base; |
324 | 324 | ||
325 | struct cnic_ops *cnic_ops; | 325 | struct cnic_ops *cnic_ops; |
@@ -369,7 +369,6 @@ struct bnx2x_bd_chain_next { | |||
369 | #define BNX2X_ISCSI_MAX_PENDING_R2TS 4 | 369 | #define BNX2X_ISCSI_MAX_PENDING_R2TS 4 |
370 | #define BNX2X_ISCSI_R2TQE_SIZE 8 | 370 | #define BNX2X_ISCSI_R2TQE_SIZE 8 |
371 | #define BNX2X_ISCSI_HQ_BD_SIZE 64 | 371 | #define BNX2X_ISCSI_HQ_BD_SIZE 64 |
372 | #define BNX2X_ISCSI_CONN_BUF_SIZE 64 | ||
373 | #define BNX2X_ISCSI_GLB_BUF_SIZE 64 | 372 | #define BNX2X_ISCSI_GLB_BUF_SIZE 64 |
374 | #define BNX2X_ISCSI_PBL_NOT_CACHED 0xff | 373 | #define BNX2X_ISCSI_PBL_NOT_CACHED 0xff |
375 | #define BNX2X_ISCSI_PDU_HEADER_NOT_CACHED 0xff | 374 | #define BNX2X_ISCSI_PDU_HEADER_NOT_CACHED 0xff |
@@ -406,6 +405,7 @@ struct bnx2x_bd_chain_next { | |||
406 | #define BNX2X_CHIP_IS_E2(x) \ | 405 | #define BNX2X_CHIP_IS_E2(x) \ |
407 | (BNX2X_CHIP_IS_57712(x) || BNX2X_CHIP_IS_57712E(x) || \ | 406 | (BNX2X_CHIP_IS_57712(x) || BNX2X_CHIP_IS_57712E(x) || \ |
408 | BNX2X_CHIP_IS_57713(x) || BNX2X_CHIP_IS_57713E(x)) | 407 | BNX2X_CHIP_IS_57713(x) || BNX2X_CHIP_IS_57713E(x)) |
408 | #define BNX2X_CHIP_IS_E2_PLUS(x) BNX2X_CHIP_IS_E2(x) | ||
409 | 409 | ||
410 | #define IS_E1H_OFFSET BNX2X_CHIP_IS_E1H(cp->chip_id) | 410 | #define IS_E1H_OFFSET BNX2X_CHIP_IS_E1H(cp->chip_id) |
411 | 411 | ||
@@ -442,8 +442,8 @@ struct bnx2x_bd_chain_next { | |||
442 | 442 | ||
443 | #define CNIC_PORT(cp) ((cp)->pfid & 1) | 443 | #define CNIC_PORT(cp) ((cp)->pfid & 1) |
444 | #define CNIC_FUNC(cp) ((cp)->func) | 444 | #define CNIC_FUNC(cp) ((cp)->func) |
445 | #define CNIC_PATH(cp) (!BNX2X_CHIP_IS_E2(cp->chip_id) ? 0 :\ | 445 | #define CNIC_PATH(cp) (!BNX2X_CHIP_IS_E2_PLUS(cp->chip_id) ? \ |
446 | (CNIC_FUNC(cp) & 1)) | 446 | 0 : (CNIC_FUNC(cp) & 1)) |
447 | #define CNIC_E1HVN(cp) ((cp)->pfid >> 1) | 447 | #define CNIC_E1HVN(cp) ((cp)->pfid >> 1) |
448 | 448 | ||
449 | #define BNX2X_HW_CID(cp, x) ((CNIC_PORT(cp) << 23) | \ | 449 | #define BNX2X_HW_CID(cp, x) ((CNIC_PORT(cp) << 23) | \ |
@@ -452,10 +452,15 @@ struct bnx2x_bd_chain_next { | |||
452 | #define BNX2X_SW_CID(x) (x & 0x1ffff) | 452 | #define BNX2X_SW_CID(x) (x & 0x1ffff) |
453 | 453 | ||
454 | #define BNX2X_CL_QZONE_ID(cp, cli) \ | 454 | #define BNX2X_CL_QZONE_ID(cp, cli) \ |
455 | (cli + (CNIC_PORT(cp) * (BNX2X_CHIP_IS_E2(cp->chip_id) ?\ | 455 | (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id) ? cli : \ |
456 | ETH_MAX_RX_CLIENTS_E2 : \ | 456 | cli + (CNIC_PORT(cp) * ETH_MAX_RX_CLIENTS_E1H)) |
457 | ETH_MAX_RX_CLIENTS_E1H))) | 457 | |
458 | #ifndef MAX_STAT_COUNTER_ID | ||
459 | #define MAX_STAT_COUNTER_ID \ | ||
460 | (BNX2X_CHIP_IS_E1H((cp)->chip_id) ? MAX_STAT_COUNTER_ID_E1H : \ | ||
461 | ((BNX2X_CHIP_IS_E2_PLUS((cp)->chip_id)) ? MAX_STAT_COUNTER_ID_E2 :\ | ||
462 | MAX_STAT_COUNTER_ID_E1)) | ||
463 | #endif | ||
458 | 464 | ||
459 | #define TCP_TSTORM_OOO_DROP_AND_PROC_ACK (0<<4) | ||
460 | #endif | 465 | #endif |
461 | 466 | ||
diff --git a/drivers/net/cnic_defs.h b/drivers/net/cnic_defs.h index fdbc00415603..e47d21076767 100644 --- a/drivers/net/cnic_defs.h +++ b/drivers/net/cnic_defs.h | |||
@@ -1,7 +1,7 @@ | |||
1 | 1 | ||
2 | /* cnic.c: Broadcom CNIC core network driver. | 2 | /* cnic.c: Broadcom CNIC core network driver. |
3 | * | 3 | * |
4 | * Copyright (c) 2006-2010 Broadcom Corporation | 4 | * Copyright (c) 2006-2009 Broadcom Corporation |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify | 6 | * This program is free software; you can redistribute it and/or modify |
7 | * it under the terms of the GNU General Public License as published by | 7 | * it under the terms of the GNU General Public License as published by |
@@ -45,13 +45,13 @@ | |||
45 | #define FCOE_KCQE_OPCODE_CQ_EVENT_NOTIFICATION (0x20) | 45 | #define FCOE_KCQE_OPCODE_CQ_EVENT_NOTIFICATION (0x20) |
46 | #define FCOE_KCQE_OPCODE_FCOE_ERROR (0x21) | 46 | #define FCOE_KCQE_OPCODE_FCOE_ERROR (0x21) |
47 | 47 | ||
48 | #define FCOE_RAMROD_CMD_ID_INIT (FCOE_KCQE_OPCODE_INIT_FUNC) | 48 | #define FCOE_RAMROD_CMD_ID_INIT_FUNC (FCOE_KCQE_OPCODE_INIT_FUNC) |
49 | #define FCOE_RAMROD_CMD_ID_DESTROY (FCOE_KCQE_OPCODE_DESTROY_FUNC) | 49 | #define FCOE_RAMROD_CMD_ID_DESTROY_FUNC (FCOE_KCQE_OPCODE_DESTROY_FUNC) |
50 | #define FCOE_RAMROD_CMD_ID_STAT_FUNC (FCOE_KCQE_OPCODE_STAT_FUNC) | ||
50 | #define FCOE_RAMROD_CMD_ID_OFFLOAD_CONN (FCOE_KCQE_OPCODE_OFFLOAD_CONN) | 51 | #define FCOE_RAMROD_CMD_ID_OFFLOAD_CONN (FCOE_KCQE_OPCODE_OFFLOAD_CONN) |
51 | #define FCOE_RAMROD_CMD_ID_ENABLE_CONN (FCOE_KCQE_OPCODE_ENABLE_CONN) | 52 | #define FCOE_RAMROD_CMD_ID_ENABLE_CONN (FCOE_KCQE_OPCODE_ENABLE_CONN) |
52 | #define FCOE_RAMROD_CMD_ID_DISABLE_CONN (FCOE_KCQE_OPCODE_DISABLE_CONN) | 53 | #define FCOE_RAMROD_CMD_ID_DISABLE_CONN (FCOE_KCQE_OPCODE_DISABLE_CONN) |
53 | #define FCOE_RAMROD_CMD_ID_DESTROY_CONN (FCOE_KCQE_OPCODE_DESTROY_CONN) | 54 | #define FCOE_RAMROD_CMD_ID_DESTROY_CONN (FCOE_KCQE_OPCODE_DESTROY_CONN) |
54 | #define FCOE_RAMROD_CMD_ID_STAT (FCOE_KCQE_OPCODE_STAT_FUNC) | ||
55 | #define FCOE_RAMROD_CMD_ID_TERMINATE_CONN (0x81) | 55 | #define FCOE_RAMROD_CMD_ID_TERMINATE_CONN (0x81) |
56 | 56 | ||
57 | #define FCOE_KWQE_OPCODE_INIT1 (0) | 57 | #define FCOE_KWQE_OPCODE_INIT1 (0) |
@@ -641,20 +641,20 @@ struct cstorm_iscsi_ag_context { | |||
641 | #define __CSTORM_ISCSI_AG_CONTEXT_RESERVED_ULP_RX_SE_CF_EN_SHIFT 12 | 641 | #define __CSTORM_ISCSI_AG_CONTEXT_RESERVED_ULP_RX_SE_CF_EN_SHIFT 12 |
642 | #define __CSTORM_ISCSI_AG_CONTEXT_RESERVED_ULP_RX_INV_CF_EN (0x1<<13) | 642 | #define __CSTORM_ISCSI_AG_CONTEXT_RESERVED_ULP_RX_INV_CF_EN (0x1<<13) |
643 | #define __CSTORM_ISCSI_AG_CONTEXT_RESERVED_ULP_RX_INV_CF_EN_SHIFT 13 | 643 | #define __CSTORM_ISCSI_AG_CONTEXT_RESERVED_ULP_RX_INV_CF_EN_SHIFT 13 |
644 | #define __CSTORM_ISCSI_AG_CONTEXT_PENDING_COMPLETION3_CF (0x3<<14) | 644 | #define __CSTORM_ISCSI_AG_CONTEXT_AUX4_CF (0x3<<14) |
645 | #define __CSTORM_ISCSI_AG_CONTEXT_PENDING_COMPLETION3_CF_SHIFT 14 | 645 | #define __CSTORM_ISCSI_AG_CONTEXT_AUX4_CF_SHIFT 14 |
646 | #define __CSTORM_ISCSI_AG_CONTEXT_RESERVED66 (0x3<<16) | 646 | #define __CSTORM_ISCSI_AG_CONTEXT_RESERVED66 (0x3<<16) |
647 | #define __CSTORM_ISCSI_AG_CONTEXT_RESERVED66_SHIFT 16 | 647 | #define __CSTORM_ISCSI_AG_CONTEXT_RESERVED66_SHIFT 16 |
648 | #define __CSTORM_ISCSI_AG_CONTEXT_FIN_RECEIVED_CF_EN (0x1<<18) | 648 | #define __CSTORM_ISCSI_AG_CONTEXT_FIN_RECEIVED_CF_EN (0x1<<18) |
649 | #define __CSTORM_ISCSI_AG_CONTEXT_FIN_RECEIVED_CF_EN_SHIFT 18 | 649 | #define __CSTORM_ISCSI_AG_CONTEXT_FIN_RECEIVED_CF_EN_SHIFT 18 |
650 | #define __CSTORM_ISCSI_AG_CONTEXT_PENDING_COMPLETION0_CF_EN (0x1<<19) | 650 | #define __CSTORM_ISCSI_AG_CONTEXT_AUX1_CF_EN (0x1<<19) |
651 | #define __CSTORM_ISCSI_AG_CONTEXT_PENDING_COMPLETION0_CF_EN_SHIFT 19 | 651 | #define __CSTORM_ISCSI_AG_CONTEXT_AUX1_CF_EN_SHIFT 19 |
652 | #define __CSTORM_ISCSI_AG_CONTEXT_PENDING_COMPLETION1_CF_EN (0x1<<20) | 652 | #define __CSTORM_ISCSI_AG_CONTEXT_AUX2_CF_EN (0x1<<20) |
653 | #define __CSTORM_ISCSI_AG_CONTEXT_PENDING_COMPLETION1_CF_EN_SHIFT 20 | 653 | #define __CSTORM_ISCSI_AG_CONTEXT_AUX2_CF_EN_SHIFT 20 |
654 | #define __CSTORM_ISCSI_AG_CONTEXT_PENDING_COMPLETION2_CF_EN (0x1<<21) | 654 | #define __CSTORM_ISCSI_AG_CONTEXT_AUX3_CF_EN (0x1<<21) |
655 | #define __CSTORM_ISCSI_AG_CONTEXT_PENDING_COMPLETION2_CF_EN_SHIFT 21 | 655 | #define __CSTORM_ISCSI_AG_CONTEXT_AUX3_CF_EN_SHIFT 21 |
656 | #define __CSTORM_ISCSI_AG_CONTEXT_PENDING_COMPLETION3_CF_EN (0x1<<22) | 656 | #define __CSTORM_ISCSI_AG_CONTEXT_AUX4_CF_EN (0x1<<22) |
657 | #define __CSTORM_ISCSI_AG_CONTEXT_PENDING_COMPLETION3_CF_EN_SHIFT 22 | 657 | #define __CSTORM_ISCSI_AG_CONTEXT_AUX4_CF_EN_SHIFT 22 |
658 | #define __CSTORM_ISCSI_AG_CONTEXT_REL_SEQ_RULE (0x7<<23) | 658 | #define __CSTORM_ISCSI_AG_CONTEXT_REL_SEQ_RULE (0x7<<23) |
659 | #define __CSTORM_ISCSI_AG_CONTEXT_REL_SEQ_RULE_SHIFT 23 | 659 | #define __CSTORM_ISCSI_AG_CONTEXT_REL_SEQ_RULE_SHIFT 23 |
660 | #define CSTORM_ISCSI_AG_CONTEXT_HQ_PROD_RULE (0x3<<26) | 660 | #define CSTORM_ISCSI_AG_CONTEXT_HQ_PROD_RULE (0x3<<26) |
@@ -694,573 +694,667 @@ struct cstorm_iscsi_ag_context { | |||
694 | #endif | 694 | #endif |
695 | #if defined(__BIG_ENDIAN) | 695 | #if defined(__BIG_ENDIAN) |
696 | u16 __reserved64; | 696 | u16 __reserved64; |
697 | u16 __cq_u_prod0; | 697 | u16 cq_u_prod; |
698 | #elif defined(__LITTLE_ENDIAN) | 698 | #elif defined(__LITTLE_ENDIAN) |
699 | u16 __cq_u_prod0; | 699 | u16 cq_u_prod; |
700 | u16 __reserved64; | 700 | u16 __reserved64; |
701 | #endif | 701 | #endif |
702 | u32 __cq_u_prod1; | 702 | u32 __cq_u_prod1; |
703 | #if defined(__BIG_ENDIAN) | 703 | #if defined(__BIG_ENDIAN) |
704 | u16 __agg_vars3; | 704 | u16 __agg_vars3; |
705 | u16 __cq_u_prod2; | 705 | u16 cq_u_pend; |
706 | #elif defined(__LITTLE_ENDIAN) | 706 | #elif defined(__LITTLE_ENDIAN) |
707 | u16 __cq_u_prod2; | 707 | u16 cq_u_pend; |
708 | u16 __agg_vars3; | 708 | u16 __agg_vars3; |
709 | #endif | 709 | #endif |
710 | #if defined(__BIG_ENDIAN) | 710 | #if defined(__BIG_ENDIAN) |
711 | u16 __aux2_th; | 711 | u16 __aux2_th; |
712 | u16 __cq_u_prod3; | 712 | u16 aux2_val; |
713 | #elif defined(__LITTLE_ENDIAN) | 713 | #elif defined(__LITTLE_ENDIAN) |
714 | u16 __cq_u_prod3; | 714 | u16 aux2_val; |
715 | u16 __aux2_th; | 715 | u16 __aux2_th; |
716 | #endif | 716 | #endif |
717 | }; | 717 | }; |
718 | 718 | ||
719 | /* | 719 | /* |
720 | * Parameters initialized during offloaded according to FLOGI/PLOGI/PRLI and used in FCoE context section | 720 | * The fcoe extra aggregative context section of Tstorm |
721 | */ | 721 | */ |
722 | struct ustorm_fcoe_params { | 722 | struct tstorm_fcoe_extra_ag_context_section { |
723 | #if defined(__BIG_ENDIAN) | 723 | u32 __agg_val1; |
724 | u16 fcoe_conn_id; | ||
725 | u16 flags; | ||
726 | #define USTORM_FCOE_PARAMS_B_MUL_N_PORT_IDS (0x1<<0) | ||
727 | #define USTORM_FCOE_PARAMS_B_MUL_N_PORT_IDS_SHIFT 0 | ||
728 | #define USTORM_FCOE_PARAMS_B_E_D_TOV_RES (0x1<<1) | ||
729 | #define USTORM_FCOE_PARAMS_B_E_D_TOV_RES_SHIFT 1 | ||
730 | #define USTORM_FCOE_PARAMS_B_CONT_INCR_SEQ_CNT (0x1<<2) | ||
731 | #define USTORM_FCOE_PARAMS_B_CONT_INCR_SEQ_CNT_SHIFT 2 | ||
732 | #define USTORM_FCOE_PARAMS_B_CONF_REQ (0x1<<3) | ||
733 | #define USTORM_FCOE_PARAMS_B_CONF_REQ_SHIFT 3 | ||
734 | #define USTORM_FCOE_PARAMS_B_REC_VALID (0x1<<4) | ||
735 | #define USTORM_FCOE_PARAMS_B_REC_VALID_SHIFT 4 | ||
736 | #define USTORM_FCOE_PARAMS_B_CQ_TOGGLE_BIT (0x1<<5) | ||
737 | #define USTORM_FCOE_PARAMS_B_CQ_TOGGLE_BIT_SHIFT 5 | ||
738 | #define USTORM_FCOE_PARAMS_B_XFRQ_TOGGLE_BIT (0x1<<6) | ||
739 | #define USTORM_FCOE_PARAMS_B_XFRQ_TOGGLE_BIT_SHIFT 6 | ||
740 | #define USTORM_FCOE_PARAMS_B_C2_VALID (0x1<<7) | ||
741 | #define USTORM_FCOE_PARAMS_B_C2_VALID_SHIFT 7 | ||
742 | #define USTORM_FCOE_PARAMS_B_ACK_0 (0x1<<8) | ||
743 | #define USTORM_FCOE_PARAMS_B_ACK_0_SHIFT 8 | ||
744 | #define USTORM_FCOE_PARAMS_RSRV0 (0x7F<<9) | ||
745 | #define USTORM_FCOE_PARAMS_RSRV0_SHIFT 9 | ||
746 | #elif defined(__LITTLE_ENDIAN) | ||
747 | u16 flags; | ||
748 | #define USTORM_FCOE_PARAMS_B_MUL_N_PORT_IDS (0x1<<0) | ||
749 | #define USTORM_FCOE_PARAMS_B_MUL_N_PORT_IDS_SHIFT 0 | ||
750 | #define USTORM_FCOE_PARAMS_B_E_D_TOV_RES (0x1<<1) | ||
751 | #define USTORM_FCOE_PARAMS_B_E_D_TOV_RES_SHIFT 1 | ||
752 | #define USTORM_FCOE_PARAMS_B_CONT_INCR_SEQ_CNT (0x1<<2) | ||
753 | #define USTORM_FCOE_PARAMS_B_CONT_INCR_SEQ_CNT_SHIFT 2 | ||
754 | #define USTORM_FCOE_PARAMS_B_CONF_REQ (0x1<<3) | ||
755 | #define USTORM_FCOE_PARAMS_B_CONF_REQ_SHIFT 3 | ||
756 | #define USTORM_FCOE_PARAMS_B_REC_VALID (0x1<<4) | ||
757 | #define USTORM_FCOE_PARAMS_B_REC_VALID_SHIFT 4 | ||
758 | #define USTORM_FCOE_PARAMS_B_CQ_TOGGLE_BIT (0x1<<5) | ||
759 | #define USTORM_FCOE_PARAMS_B_CQ_TOGGLE_BIT_SHIFT 5 | ||
760 | #define USTORM_FCOE_PARAMS_B_XFRQ_TOGGLE_BIT (0x1<<6) | ||
761 | #define USTORM_FCOE_PARAMS_B_XFRQ_TOGGLE_BIT_SHIFT 6 | ||
762 | #define USTORM_FCOE_PARAMS_B_C2_VALID (0x1<<7) | ||
763 | #define USTORM_FCOE_PARAMS_B_C2_VALID_SHIFT 7 | ||
764 | #define USTORM_FCOE_PARAMS_B_ACK_0 (0x1<<8) | ||
765 | #define USTORM_FCOE_PARAMS_B_ACK_0_SHIFT 8 | ||
766 | #define USTORM_FCOE_PARAMS_RSRV0 (0x7F<<9) | ||
767 | #define USTORM_FCOE_PARAMS_RSRV0_SHIFT 9 | ||
768 | u16 fcoe_conn_id; | ||
769 | #endif | ||
770 | #if defined(__BIG_ENDIAN) | ||
771 | u8 hc_csdm_byte_en; | ||
772 | u8 func_id; | ||
773 | u8 port_id; | ||
774 | u8 vnic_id; | ||
775 | #elif defined(__LITTLE_ENDIAN) | ||
776 | u8 vnic_id; | ||
777 | u8 port_id; | ||
778 | u8 func_id; | ||
779 | u8 hc_csdm_byte_en; | ||
780 | #endif | ||
781 | #if defined(__BIG_ENDIAN) | 724 | #if defined(__BIG_ENDIAN) |
782 | u16 rx_total_conc_seqs; | 725 | u8 __tcp_agg_vars2; |
783 | u16 rx_max_fc_pay_len; | 726 | u8 __agg_val3; |
727 | u16 __agg_val2; | ||
784 | #elif defined(__LITTLE_ENDIAN) | 728 | #elif defined(__LITTLE_ENDIAN) |
785 | u16 rx_max_fc_pay_len; | 729 | u16 __agg_val2; |
786 | u16 rx_total_conc_seqs; | 730 | u8 __agg_val3; |
731 | u8 __tcp_agg_vars2; | ||
787 | #endif | 732 | #endif |
788 | #if defined(__BIG_ENDIAN) | 733 | #if defined(__BIG_ENDIAN) |
789 | u16 ox_id; | 734 | u16 __agg_val5; |
790 | u16 rx_max_conc_seqs; | 735 | u8 __agg_val6; |
736 | u8 __tcp_agg_vars3; | ||
791 | #elif defined(__LITTLE_ENDIAN) | 737 | #elif defined(__LITTLE_ENDIAN) |
792 | u16 rx_max_conc_seqs; | 738 | u8 __tcp_agg_vars3; |
793 | u16 ox_id; | 739 | u8 __agg_val6; |
740 | u16 __agg_val5; | ||
794 | #endif | 741 | #endif |
742 | u32 __lcq_prod; | ||
743 | u32 rtt_seq; | ||
744 | u32 rtt_time; | ||
745 | u32 __reserved66; | ||
746 | u32 wnd_right_edge; | ||
747 | u32 tcp_agg_vars1; | ||
748 | #define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_FIN_SENT_FLAG (0x1<<0) | ||
749 | #define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_FIN_SENT_FLAG_SHIFT 0 | ||
750 | #define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_LAST_PACKET_FIN_FLAG (0x1<<1) | ||
751 | #define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_LAST_PACKET_FIN_FLAG_SHIFT 1 | ||
752 | #define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_WND_UPD_CF (0x3<<2) | ||
753 | #define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_WND_UPD_CF_SHIFT 2 | ||
754 | #define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TIMEOUT_CF (0x3<<4) | ||
755 | #define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TIMEOUT_CF_SHIFT 4 | ||
756 | #define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_WND_UPD_CF_EN (0x1<<6) | ||
757 | #define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_WND_UPD_CF_EN_SHIFT 6 | ||
758 | #define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TIMEOUT_CF_EN (0x1<<7) | ||
759 | #define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TIMEOUT_CF_EN_SHIFT 7 | ||
760 | #define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RETRANSMIT_SEQ_EN (0x1<<8) | ||
761 | #define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RETRANSMIT_SEQ_EN_SHIFT 8 | ||
762 | #define __TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_LCQ_SND_EN (0x1<<9) | ||
763 | #define __TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_LCQ_SND_EN_SHIFT 9 | ||
764 | #define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX1_FLAG (0x1<<10) | ||
765 | #define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX1_FLAG_SHIFT 10 | ||
766 | #define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX2_FLAG (0x1<<11) | ||
767 | #define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX2_FLAG_SHIFT 11 | ||
768 | #define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX1_CF_EN (0x1<<12) | ||
769 | #define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX1_CF_EN_SHIFT 12 | ||
770 | #define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX2_CF_EN (0x1<<13) | ||
771 | #define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX2_CF_EN_SHIFT 13 | ||
772 | #define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX1_CF (0x3<<14) | ||
773 | #define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX1_CF_SHIFT 14 | ||
774 | #define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX2_CF (0x3<<16) | ||
775 | #define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX2_CF_SHIFT 16 | ||
776 | #define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TX_BLOCKED (0x1<<18) | ||
777 | #define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TX_BLOCKED_SHIFT 18 | ||
778 | #define __TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX10_CF_EN (0x1<<19) | ||
779 | #define __TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX10_CF_EN_SHIFT 19 | ||
780 | #define __TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX11_CF_EN (0x1<<20) | ||
781 | #define __TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX11_CF_EN_SHIFT 20 | ||
782 | #define __TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX12_CF_EN (0x1<<21) | ||
783 | #define __TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX12_CF_EN_SHIFT 21 | ||
784 | #define __TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED1 (0x3<<22) | ||
785 | #define __TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED1_SHIFT 22 | ||
786 | #define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RETRANSMIT_PEND_SEQ (0xF<<24) | ||
787 | #define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RETRANSMIT_PEND_SEQ_SHIFT 24 | ||
788 | #define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RETRANSMIT_DONE_SEQ (0xF<<28) | ||
789 | #define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RETRANSMIT_DONE_SEQ_SHIFT 28 | ||
790 | u32 snd_max; | ||
791 | u32 __lcq_cons; | ||
792 | u32 __reserved2; | ||
795 | }; | 793 | }; |
796 | 794 | ||
797 | /* | 795 | /* |
798 | * FCoE 16-bits index structure | 796 | * The fcoe aggregative context of Tstorm |
799 | */ | ||
800 | struct fcoe_idx16_fields { | ||
801 | u16 fields; | ||
802 | #define FCOE_IDX16_FIELDS_IDX (0x7FFF<<0) | ||
803 | #define FCOE_IDX16_FIELDS_IDX_SHIFT 0 | ||
804 | #define FCOE_IDX16_FIELDS_MSB (0x1<<15) | ||
805 | #define FCOE_IDX16_FIELDS_MSB_SHIFT 15 | ||
806 | }; | ||
807 | |||
808 | /* | ||
809 | * FCoE 16-bits index union | ||
810 | */ | ||
811 | union fcoe_idx16_field_union { | ||
812 | struct fcoe_idx16_fields fields; | ||
813 | u16 val; | ||
814 | }; | ||
815 | |||
816 | /* | ||
817 | * 4 regs size | ||
818 | */ | 797 | */ |
819 | struct fcoe_bd_ctx { | 798 | struct tstorm_fcoe_ag_context { |
820 | u32 buf_addr_hi; | ||
821 | u32 buf_addr_lo; | ||
822 | #if defined(__BIG_ENDIAN) | 799 | #if defined(__BIG_ENDIAN) |
823 | u16 rsrv0; | 800 | u16 ulp_credit; |
824 | u16 buf_len; | 801 | u8 agg_vars1; |
802 | #define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0) | ||
803 | #define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0 | ||
804 | #define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1) | ||
805 | #define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1 | ||
806 | #define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2) | ||
807 | #define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2 | ||
808 | #define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3) | ||
809 | #define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3 | ||
810 | #define __TSTORM_FCOE_AG_CONTEXT_QUEUE0_FLUSH_CF (0x3<<4) | ||
811 | #define __TSTORM_FCOE_AG_CONTEXT_QUEUE0_FLUSH_CF_SHIFT 4 | ||
812 | #define __TSTORM_FCOE_AG_CONTEXT_AUX3_FLAG (0x1<<6) | ||
813 | #define __TSTORM_FCOE_AG_CONTEXT_AUX3_FLAG_SHIFT 6 | ||
814 | #define __TSTORM_FCOE_AG_CONTEXT_AUX4_FLAG (0x1<<7) | ||
815 | #define __TSTORM_FCOE_AG_CONTEXT_AUX4_FLAG_SHIFT 7 | ||
816 | u8 state; | ||
825 | #elif defined(__LITTLE_ENDIAN) | 817 | #elif defined(__LITTLE_ENDIAN) |
826 | u16 buf_len; | 818 | u8 state; |
827 | u16 rsrv0; | 819 | u8 agg_vars1; |
820 | #define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0) | ||
821 | #define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0 | ||
822 | #define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1) | ||
823 | #define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1 | ||
824 | #define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2) | ||
825 | #define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2 | ||
826 | #define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3) | ||
827 | #define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3 | ||
828 | #define __TSTORM_FCOE_AG_CONTEXT_QUEUE0_FLUSH_CF (0x3<<4) | ||
829 | #define __TSTORM_FCOE_AG_CONTEXT_QUEUE0_FLUSH_CF_SHIFT 4 | ||
830 | #define __TSTORM_FCOE_AG_CONTEXT_AUX3_FLAG (0x1<<6) | ||
831 | #define __TSTORM_FCOE_AG_CONTEXT_AUX3_FLAG_SHIFT 6 | ||
832 | #define __TSTORM_FCOE_AG_CONTEXT_AUX4_FLAG (0x1<<7) | ||
833 | #define __TSTORM_FCOE_AG_CONTEXT_AUX4_FLAG_SHIFT 7 | ||
834 | u16 ulp_credit; | ||
828 | #endif | 835 | #endif |
829 | #if defined(__BIG_ENDIAN) | 836 | #if defined(__BIG_ENDIAN) |
830 | u16 rsrv1; | 837 | u16 __agg_val4; |
831 | u16 flags; | 838 | u16 agg_vars2; |
839 | #define __TSTORM_FCOE_AG_CONTEXT_AUX5_FLAG (0x1<<0) | ||
840 | #define __TSTORM_FCOE_AG_CONTEXT_AUX5_FLAG_SHIFT 0 | ||
841 | #define __TSTORM_FCOE_AG_CONTEXT_AUX6_FLAG (0x1<<1) | ||
842 | #define __TSTORM_FCOE_AG_CONTEXT_AUX6_FLAG_SHIFT 1 | ||
843 | #define __TSTORM_FCOE_AG_CONTEXT_AUX4_CF (0x3<<2) | ||
844 | #define __TSTORM_FCOE_AG_CONTEXT_AUX4_CF_SHIFT 2 | ||
845 | #define __TSTORM_FCOE_AG_CONTEXT_AUX5_CF (0x3<<4) | ||
846 | #define __TSTORM_FCOE_AG_CONTEXT_AUX5_CF_SHIFT 4 | ||
847 | #define __TSTORM_FCOE_AG_CONTEXT_AUX6_CF (0x3<<6) | ||
848 | #define __TSTORM_FCOE_AG_CONTEXT_AUX6_CF_SHIFT 6 | ||
849 | #define __TSTORM_FCOE_AG_CONTEXT_AUX7_CF (0x3<<8) | ||
850 | #define __TSTORM_FCOE_AG_CONTEXT_AUX7_CF_SHIFT 8 | ||
851 | #define __TSTORM_FCOE_AG_CONTEXT_AUX7_FLAG (0x1<<10) | ||
852 | #define __TSTORM_FCOE_AG_CONTEXT_AUX7_FLAG_SHIFT 10 | ||
853 | #define __TSTORM_FCOE_AG_CONTEXT_QUEUE0_FLUSH_CF_EN (0x1<<11) | ||
854 | #define __TSTORM_FCOE_AG_CONTEXT_QUEUE0_FLUSH_CF_EN_SHIFT 11 | ||
855 | #define TSTORM_FCOE_AG_CONTEXT_AUX4_CF_EN (0x1<<12) | ||
856 | #define TSTORM_FCOE_AG_CONTEXT_AUX4_CF_EN_SHIFT 12 | ||
857 | #define TSTORM_FCOE_AG_CONTEXT_AUX5_CF_EN (0x1<<13) | ||
858 | #define TSTORM_FCOE_AG_CONTEXT_AUX5_CF_EN_SHIFT 13 | ||
859 | #define TSTORM_FCOE_AG_CONTEXT_AUX6_CF_EN (0x1<<14) | ||
860 | #define TSTORM_FCOE_AG_CONTEXT_AUX6_CF_EN_SHIFT 14 | ||
861 | #define TSTORM_FCOE_AG_CONTEXT_AUX7_CF_EN (0x1<<15) | ||
862 | #define TSTORM_FCOE_AG_CONTEXT_AUX7_CF_EN_SHIFT 15 | ||
832 | #elif defined(__LITTLE_ENDIAN) | 863 | #elif defined(__LITTLE_ENDIAN) |
833 | u16 flags; | 864 | u16 agg_vars2; |
834 | u16 rsrv1; | 865 | #define __TSTORM_FCOE_AG_CONTEXT_AUX5_FLAG (0x1<<0) |
866 | #define __TSTORM_FCOE_AG_CONTEXT_AUX5_FLAG_SHIFT 0 | ||
867 | #define __TSTORM_FCOE_AG_CONTEXT_AUX6_FLAG (0x1<<1) | ||
868 | #define __TSTORM_FCOE_AG_CONTEXT_AUX6_FLAG_SHIFT 1 | ||
869 | #define __TSTORM_FCOE_AG_CONTEXT_AUX4_CF (0x3<<2) | ||
870 | #define __TSTORM_FCOE_AG_CONTEXT_AUX4_CF_SHIFT 2 | ||
871 | #define __TSTORM_FCOE_AG_CONTEXT_AUX5_CF (0x3<<4) | ||
872 | #define __TSTORM_FCOE_AG_CONTEXT_AUX5_CF_SHIFT 4 | ||
873 | #define __TSTORM_FCOE_AG_CONTEXT_AUX6_CF (0x3<<6) | ||
874 | #define __TSTORM_FCOE_AG_CONTEXT_AUX6_CF_SHIFT 6 | ||
875 | #define __TSTORM_FCOE_AG_CONTEXT_AUX7_CF (0x3<<8) | ||
876 | #define __TSTORM_FCOE_AG_CONTEXT_AUX7_CF_SHIFT 8 | ||
877 | #define __TSTORM_FCOE_AG_CONTEXT_AUX7_FLAG (0x1<<10) | ||
878 | #define __TSTORM_FCOE_AG_CONTEXT_AUX7_FLAG_SHIFT 10 | ||
879 | #define __TSTORM_FCOE_AG_CONTEXT_QUEUE0_FLUSH_CF_EN (0x1<<11) | ||
880 | #define __TSTORM_FCOE_AG_CONTEXT_QUEUE0_FLUSH_CF_EN_SHIFT 11 | ||
881 | #define TSTORM_FCOE_AG_CONTEXT_AUX4_CF_EN (0x1<<12) | ||
882 | #define TSTORM_FCOE_AG_CONTEXT_AUX4_CF_EN_SHIFT 12 | ||
883 | #define TSTORM_FCOE_AG_CONTEXT_AUX5_CF_EN (0x1<<13) | ||
884 | #define TSTORM_FCOE_AG_CONTEXT_AUX5_CF_EN_SHIFT 13 | ||
885 | #define TSTORM_FCOE_AG_CONTEXT_AUX6_CF_EN (0x1<<14) | ||
886 | #define TSTORM_FCOE_AG_CONTEXT_AUX6_CF_EN_SHIFT 14 | ||
887 | #define TSTORM_FCOE_AG_CONTEXT_AUX7_CF_EN (0x1<<15) | ||
888 | #define TSTORM_FCOE_AG_CONTEXT_AUX7_CF_EN_SHIFT 15 | ||
889 | u16 __agg_val4; | ||
835 | #endif | 890 | #endif |
891 | struct tstorm_fcoe_extra_ag_context_section __extra_section; | ||
836 | }; | 892 | }; |
837 | 893 | ||
894 | |||
895 | |||
838 | /* | 896 | /* |
839 | * Parameters required for placement according to SGL | 897 | * The tcp aggregative context section of Tstorm |
840 | */ | 898 | */ |
841 | struct ustorm_fcoe_data_place { | 899 | struct tstorm_tcp_tcp_ag_context_section { |
900 | u32 __agg_val1; | ||
842 | #if defined(__BIG_ENDIAN) | 901 | #if defined(__BIG_ENDIAN) |
843 | u16 cached_sge_off; | 902 | u8 __tcp_agg_vars2; |
844 | u8 cached_num_sges; | 903 | u8 __agg_val3; |
845 | u8 cached_sge_idx; | 904 | u16 __agg_val2; |
846 | #elif defined(__LITTLE_ENDIAN) | 905 | #elif defined(__LITTLE_ENDIAN) |
847 | u8 cached_sge_idx; | 906 | u16 __agg_val2; |
848 | u8 cached_num_sges; | 907 | u8 __agg_val3; |
849 | u16 cached_sge_off; | 908 | u8 __tcp_agg_vars2; |
850 | #endif | 909 | #endif |
851 | struct fcoe_bd_ctx cached_sge[3]; | ||
852 | }; | ||
853 | |||
854 | struct fcoe_task_ctx_entry_txwr_rxrd { | ||
855 | #if defined(__BIG_ENDIAN) | 910 | #if defined(__BIG_ENDIAN) |
856 | u16 verify_tx_seq; | 911 | u16 __agg_val5; |
857 | u8 init_flags; | 912 | u8 __agg_val6; |
858 | #define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TASK_TYPE (0x7<<0) | 913 | u8 __tcp_agg_vars3; |
859 | #define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TASK_TYPE_SHIFT 0 | ||
860 | #define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_DEV_TYPE (0x1<<3) | ||
861 | #define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_DEV_TYPE_SHIFT 3 | ||
862 | #define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_CLASS_TYPE (0x1<<4) | ||
863 | #define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_CLASS_TYPE_SHIFT 4 | ||
864 | #define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_SINGLE_SGE (0x1<<5) | ||
865 | #define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_SINGLE_SGE_SHIFT 5 | ||
866 | #define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_RSRV5 (0x3<<6) | ||
867 | #define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_RSRV5_SHIFT 6 | ||
868 | u8 tx_flags; | ||
869 | #define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TX_STATE (0xF<<0) | ||
870 | #define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TX_STATE_SHIFT 0 | ||
871 | #define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_RSRV4 (0xF<<4) | ||
872 | #define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_RSRV4_SHIFT 4 | ||
873 | #elif defined(__LITTLE_ENDIAN) | 914 | #elif defined(__LITTLE_ENDIAN) |
874 | u8 tx_flags; | 915 | u8 __tcp_agg_vars3; |
875 | #define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TX_STATE (0xF<<0) | 916 | u8 __agg_val6; |
876 | #define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TX_STATE_SHIFT 0 | 917 | u16 __agg_val5; |
877 | #define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_RSRV4 (0xF<<4) | ||
878 | #define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_RSRV4_SHIFT 4 | ||
879 | u8 init_flags; | ||
880 | #define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TASK_TYPE (0x7<<0) | ||
881 | #define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TASK_TYPE_SHIFT 0 | ||
882 | #define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_DEV_TYPE (0x1<<3) | ||
883 | #define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_DEV_TYPE_SHIFT 3 | ||
884 | #define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_CLASS_TYPE (0x1<<4) | ||
885 | #define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_CLASS_TYPE_SHIFT 4 | ||
886 | #define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_SINGLE_SGE (0x1<<5) | ||
887 | #define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_SINGLE_SGE_SHIFT 5 | ||
888 | #define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_RSRV5 (0x3<<6) | ||
889 | #define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_RSRV5_SHIFT 6 | ||
890 | u16 verify_tx_seq; | ||
891 | #endif | 918 | #endif |
919 | u32 snd_nxt; | ||
920 | u32 rtt_seq; | ||
921 | u32 rtt_time; | ||
922 | u32 __reserved66; | ||
923 | u32 wnd_right_edge; | ||
924 | u32 tcp_agg_vars1; | ||
925 | #define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_FIN_SENT_FLAG (0x1<<0) | ||
926 | #define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_FIN_SENT_FLAG_SHIFT 0 | ||
927 | #define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_LAST_PACKET_FIN_FLAG (0x1<<1) | ||
928 | #define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_LAST_PACKET_FIN_FLAG_SHIFT 1 | ||
929 | #define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_WND_UPD_CF (0x3<<2) | ||
930 | #define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_WND_UPD_CF_SHIFT 2 | ||
931 | #define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_TIMEOUT_CF (0x3<<4) | ||
932 | #define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_TIMEOUT_CF_SHIFT 4 | ||
933 | #define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_WND_UPD_CF_EN (0x1<<6) | ||
934 | #define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_WND_UPD_CF_EN_SHIFT 6 | ||
935 | #define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_TIMEOUT_CF_EN (0x1<<7) | ||
936 | #define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_TIMEOUT_CF_EN_SHIFT 7 | ||
937 | #define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_RETRANSMIT_SEQ_EN (0x1<<8) | ||
938 | #define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_RETRANSMIT_SEQ_EN_SHIFT 8 | ||
939 | #define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_SND_NXT_EN (0x1<<9) | ||
940 | #define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_SND_NXT_EN_SHIFT 9 | ||
941 | #define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX1_FLAG (0x1<<10) | ||
942 | #define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX1_FLAG_SHIFT 10 | ||
943 | #define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX2_FLAG (0x1<<11) | ||
944 | #define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX2_FLAG_SHIFT 11 | ||
945 | #define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX1_CF_EN (0x1<<12) | ||
946 | #define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX1_CF_EN_SHIFT 12 | ||
947 | #define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX2_CF_EN (0x1<<13) | ||
948 | #define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX2_CF_EN_SHIFT 13 | ||
949 | #define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX1_CF (0x3<<14) | ||
950 | #define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX1_CF_SHIFT 14 | ||
951 | #define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX2_CF (0x3<<16) | ||
952 | #define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX2_CF_SHIFT 16 | ||
953 | #define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_BLOCKED (0x1<<18) | ||
954 | #define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_BLOCKED_SHIFT 18 | ||
955 | #define __TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX10_CF_EN (0x1<<19) | ||
956 | #define __TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX10_CF_EN_SHIFT 19 | ||
957 | #define __TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX11_CF_EN (0x1<<20) | ||
958 | #define __TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX11_CF_EN_SHIFT 20 | ||
959 | #define __TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX12_CF_EN (0x1<<21) | ||
960 | #define __TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX12_CF_EN_SHIFT 21 | ||
961 | #define __TSTORM_TCP_TCP_AG_CONTEXT_SECTION_RESERVED1 (0x3<<22) | ||
962 | #define __TSTORM_TCP_TCP_AG_CONTEXT_SECTION_RESERVED1_SHIFT 22 | ||
963 | #define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_RETRANSMIT_PEND_SEQ (0xF<<24) | ||
964 | #define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_RETRANSMIT_PEND_SEQ_SHIFT 24 | ||
965 | #define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_RETRANSMIT_DONE_SEQ (0xF<<28) | ||
966 | #define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_RETRANSMIT_DONE_SEQ_SHIFT 28 | ||
967 | u32 snd_max; | ||
968 | u32 snd_una; | ||
969 | u32 __reserved2; | ||
892 | }; | 970 | }; |
893 | 971 | ||
894 | struct fcoe_fcp_cmd_payload { | 972 | /* |
895 | u32 opaque[8]; | 973 | * The iscsi aggregative context of Tstorm |
896 | }; | 974 | */ |
897 | 975 | struct tstorm_iscsi_ag_context { | |
898 | struct fcoe_fc_hdr { | ||
899 | #if defined(__BIG_ENDIAN) | ||
900 | u8 cs_ctl; | ||
901 | u8 s_id[3]; | ||
902 | #elif defined(__LITTLE_ENDIAN) | ||
903 | u8 s_id[3]; | ||
904 | u8 cs_ctl; | ||
905 | #endif | ||
906 | #if defined(__BIG_ENDIAN) | ||
907 | u8 r_ctl; | ||
908 | u8 d_id[3]; | ||
909 | #elif defined(__LITTLE_ENDIAN) | ||
910 | u8 d_id[3]; | ||
911 | u8 r_ctl; | ||
912 | #endif | ||
913 | #if defined(__BIG_ENDIAN) | ||
914 | u8 seq_id; | ||
915 | u8 df_ctl; | ||
916 | u16 seq_cnt; | ||
917 | #elif defined(__LITTLE_ENDIAN) | ||
918 | u16 seq_cnt; | ||
919 | u8 df_ctl; | ||
920 | u8 seq_id; | ||
921 | #endif | ||
922 | #if defined(__BIG_ENDIAN) | 976 | #if defined(__BIG_ENDIAN) |
923 | u8 type; | 977 | u16 ulp_credit; |
924 | u8 f_ctl[3]; | 978 | u8 agg_vars1; |
979 | #define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0) | ||
980 | #define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0 | ||
981 | #define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1) | ||
982 | #define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1 | ||
983 | #define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2) | ||
984 | #define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2 | ||
985 | #define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3) | ||
986 | #define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3 | ||
987 | #define __TSTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF (0x3<<4) | ||
988 | #define __TSTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF_SHIFT 4 | ||
989 | #define __TSTORM_ISCSI_AG_CONTEXT_AUX3_FLAG (0x1<<6) | ||
990 | #define __TSTORM_ISCSI_AG_CONTEXT_AUX3_FLAG_SHIFT 6 | ||
991 | #define __TSTORM_ISCSI_AG_CONTEXT_ACK_ON_FIN_SENT_FLAG (0x1<<7) | ||
992 | #define __TSTORM_ISCSI_AG_CONTEXT_ACK_ON_FIN_SENT_FLAG_SHIFT 7 | ||
993 | u8 state; | ||
925 | #elif defined(__LITTLE_ENDIAN) | 994 | #elif defined(__LITTLE_ENDIAN) |
926 | u8 f_ctl[3]; | 995 | u8 state; |
927 | u8 type; | 996 | u8 agg_vars1; |
997 | #define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0) | ||
998 | #define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0 | ||
999 | #define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1) | ||
1000 | #define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1 | ||
1001 | #define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2) | ||
1002 | #define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2 | ||
1003 | #define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3) | ||
1004 | #define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3 | ||
1005 | #define __TSTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF (0x3<<4) | ||
1006 | #define __TSTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF_SHIFT 4 | ||
1007 | #define __TSTORM_ISCSI_AG_CONTEXT_AUX3_FLAG (0x1<<6) | ||
1008 | #define __TSTORM_ISCSI_AG_CONTEXT_AUX3_FLAG_SHIFT 6 | ||
1009 | #define __TSTORM_ISCSI_AG_CONTEXT_ACK_ON_FIN_SENT_FLAG (0x1<<7) | ||
1010 | #define __TSTORM_ISCSI_AG_CONTEXT_ACK_ON_FIN_SENT_FLAG_SHIFT 7 | ||
1011 | u16 ulp_credit; | ||
928 | #endif | 1012 | #endif |
929 | u32 parameters; | ||
930 | #if defined(__BIG_ENDIAN) | 1013 | #if defined(__BIG_ENDIAN) |
931 | u16 ox_id; | 1014 | u16 __agg_val4; |
932 | u16 rx_id; | 1015 | u16 agg_vars2; |
1016 | #define __TSTORM_ISCSI_AG_CONTEXT_MSL_TIMER_SET_FLAG (0x1<<0) | ||
1017 | #define __TSTORM_ISCSI_AG_CONTEXT_MSL_TIMER_SET_FLAG_SHIFT 0 | ||
1018 | #define __TSTORM_ISCSI_AG_CONTEXT_FIN_SENT_FIRST_FLAG (0x1<<1) | ||
1019 | #define __TSTORM_ISCSI_AG_CONTEXT_FIN_SENT_FIRST_FLAG_SHIFT 1 | ||
1020 | #define __TSTORM_ISCSI_AG_CONTEXT_RST_SENT_CF (0x3<<2) | ||
1021 | #define __TSTORM_ISCSI_AG_CONTEXT_RST_SENT_CF_SHIFT 2 | ||
1022 | #define __TSTORM_ISCSI_AG_CONTEXT_WAKEUP_CALL_CF (0x3<<4) | ||
1023 | #define __TSTORM_ISCSI_AG_CONTEXT_WAKEUP_CALL_CF_SHIFT 4 | ||
1024 | #define __TSTORM_ISCSI_AG_CONTEXT_AUX6_CF (0x3<<6) | ||
1025 | #define __TSTORM_ISCSI_AG_CONTEXT_AUX6_CF_SHIFT 6 | ||
1026 | #define __TSTORM_ISCSI_AG_CONTEXT_AUX7_CF (0x3<<8) | ||
1027 | #define __TSTORM_ISCSI_AG_CONTEXT_AUX7_CF_SHIFT 8 | ||
1028 | #define __TSTORM_ISCSI_AG_CONTEXT_AUX7_FLAG (0x1<<10) | ||
1029 | #define __TSTORM_ISCSI_AG_CONTEXT_AUX7_FLAG_SHIFT 10 | ||
1030 | #define __TSTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF_EN (0x1<<11) | ||
1031 | #define __TSTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF_EN_SHIFT 11 | ||
1032 | #define __TSTORM_ISCSI_AG_CONTEXT_RST_SENT_CF_EN (0x1<<12) | ||
1033 | #define __TSTORM_ISCSI_AG_CONTEXT_RST_SENT_CF_EN_SHIFT 12 | ||
1034 | #define __TSTORM_ISCSI_AG_CONTEXT_WAKEUP_CALL_CF_EN (0x1<<13) | ||
1035 | #define __TSTORM_ISCSI_AG_CONTEXT_WAKEUP_CALL_CF_EN_SHIFT 13 | ||
1036 | #define TSTORM_ISCSI_AG_CONTEXT_AUX6_CF_EN (0x1<<14) | ||
1037 | #define TSTORM_ISCSI_AG_CONTEXT_AUX6_CF_EN_SHIFT 14 | ||
1038 | #define TSTORM_ISCSI_AG_CONTEXT_AUX7_CF_EN (0x1<<15) | ||
1039 | #define TSTORM_ISCSI_AG_CONTEXT_AUX7_CF_EN_SHIFT 15 | ||
933 | #elif defined(__LITTLE_ENDIAN) | 1040 | #elif defined(__LITTLE_ENDIAN) |
934 | u16 rx_id; | 1041 | u16 agg_vars2; |
935 | u16 ox_id; | 1042 | #define __TSTORM_ISCSI_AG_CONTEXT_MSL_TIMER_SET_FLAG (0x1<<0) |
1043 | #define __TSTORM_ISCSI_AG_CONTEXT_MSL_TIMER_SET_FLAG_SHIFT 0 | ||
1044 | #define __TSTORM_ISCSI_AG_CONTEXT_FIN_SENT_FIRST_FLAG (0x1<<1) | ||
1045 | #define __TSTORM_ISCSI_AG_CONTEXT_FIN_SENT_FIRST_FLAG_SHIFT 1 | ||
1046 | #define __TSTORM_ISCSI_AG_CONTEXT_RST_SENT_CF (0x3<<2) | ||
1047 | #define __TSTORM_ISCSI_AG_CONTEXT_RST_SENT_CF_SHIFT 2 | ||
1048 | #define __TSTORM_ISCSI_AG_CONTEXT_WAKEUP_CALL_CF (0x3<<4) | ||
1049 | #define __TSTORM_ISCSI_AG_CONTEXT_WAKEUP_CALL_CF_SHIFT 4 | ||
1050 | #define __TSTORM_ISCSI_AG_CONTEXT_AUX6_CF (0x3<<6) | ||
1051 | #define __TSTORM_ISCSI_AG_CONTEXT_AUX6_CF_SHIFT 6 | ||
1052 | #define __TSTORM_ISCSI_AG_CONTEXT_AUX7_CF (0x3<<8) | ||
1053 | #define __TSTORM_ISCSI_AG_CONTEXT_AUX7_CF_SHIFT 8 | ||
1054 | #define __TSTORM_ISCSI_AG_CONTEXT_AUX7_FLAG (0x1<<10) | ||
1055 | #define __TSTORM_ISCSI_AG_CONTEXT_AUX7_FLAG_SHIFT 10 | ||
1056 | #define __TSTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF_EN (0x1<<11) | ||
1057 | #define __TSTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF_EN_SHIFT 11 | ||
1058 | #define __TSTORM_ISCSI_AG_CONTEXT_RST_SENT_CF_EN (0x1<<12) | ||
1059 | #define __TSTORM_ISCSI_AG_CONTEXT_RST_SENT_CF_EN_SHIFT 12 | ||
1060 | #define __TSTORM_ISCSI_AG_CONTEXT_WAKEUP_CALL_CF_EN (0x1<<13) | ||
1061 | #define __TSTORM_ISCSI_AG_CONTEXT_WAKEUP_CALL_CF_EN_SHIFT 13 | ||
1062 | #define TSTORM_ISCSI_AG_CONTEXT_AUX6_CF_EN (0x1<<14) | ||
1063 | #define TSTORM_ISCSI_AG_CONTEXT_AUX6_CF_EN_SHIFT 14 | ||
1064 | #define TSTORM_ISCSI_AG_CONTEXT_AUX7_CF_EN (0x1<<15) | ||
1065 | #define TSTORM_ISCSI_AG_CONTEXT_AUX7_CF_EN_SHIFT 15 | ||
1066 | u16 __agg_val4; | ||
936 | #endif | 1067 | #endif |
1068 | struct tstorm_tcp_tcp_ag_context_section tcp; | ||
937 | }; | 1069 | }; |
938 | 1070 | ||
939 | struct fcoe_fc_frame { | ||
940 | struct fcoe_fc_hdr fc_hdr; | ||
941 | u32 reserved0[2]; | ||
942 | }; | ||
943 | |||
944 | union fcoe_cmd_flow_info { | ||
945 | struct fcoe_fcp_cmd_payload fcp_cmd_payload; | ||
946 | struct fcoe_fc_frame mp_fc_frame; | ||
947 | }; | ||
948 | |||
949 | struct fcoe_read_flow_info { | ||
950 | struct fcoe_fc_hdr fc_data_in_hdr; | ||
951 | u32 reserved[2]; | ||
952 | }; | ||
953 | |||
954 | struct fcoe_fcp_xfr_rdy_payload { | ||
955 | u32 burst_len; | ||
956 | u32 data_ro; | ||
957 | }; | ||
958 | |||
959 | struct fcoe_write_flow_info { | ||
960 | struct fcoe_fc_hdr fc_data_out_hdr; | ||
961 | struct fcoe_fcp_xfr_rdy_payload fcp_xfr_payload; | ||
962 | }; | ||
963 | |||
964 | struct fcoe_fcp_rsp_flags { | ||
965 | u8 flags; | ||
966 | #define FCOE_FCP_RSP_FLAGS_FCP_RSP_LEN_VALID (0x1<<0) | ||
967 | #define FCOE_FCP_RSP_FLAGS_FCP_RSP_LEN_VALID_SHIFT 0 | ||
968 | #define FCOE_FCP_RSP_FLAGS_FCP_SNS_LEN_VALID (0x1<<1) | ||
969 | #define FCOE_FCP_RSP_FLAGS_FCP_SNS_LEN_VALID_SHIFT 1 | ||
970 | #define FCOE_FCP_RSP_FLAGS_FCP_RESID_OVER (0x1<<2) | ||
971 | #define FCOE_FCP_RSP_FLAGS_FCP_RESID_OVER_SHIFT 2 | ||
972 | #define FCOE_FCP_RSP_FLAGS_FCP_RESID_UNDER (0x1<<3) | ||
973 | #define FCOE_FCP_RSP_FLAGS_FCP_RESID_UNDER_SHIFT 3 | ||
974 | #define FCOE_FCP_RSP_FLAGS_FCP_CONF_REQ (0x1<<4) | ||
975 | #define FCOE_FCP_RSP_FLAGS_FCP_CONF_REQ_SHIFT 4 | ||
976 | #define FCOE_FCP_RSP_FLAGS_FCP_BIDI_FLAGS (0x7<<5) | ||
977 | #define FCOE_FCP_RSP_FLAGS_FCP_BIDI_FLAGS_SHIFT 5 | ||
978 | }; | ||
979 | |||
980 | struct fcoe_fcp_rsp_payload { | ||
981 | struct regpair reserved0; | ||
982 | u32 fcp_resid; | ||
983 | #if defined(__BIG_ENDIAN) | ||
984 | u16 retry_delay_timer; | ||
985 | struct fcoe_fcp_rsp_flags fcp_flags; | ||
986 | u8 scsi_status_code; | ||
987 | #elif defined(__LITTLE_ENDIAN) | ||
988 | u8 scsi_status_code; | ||
989 | struct fcoe_fcp_rsp_flags fcp_flags; | ||
990 | u16 retry_delay_timer; | ||
991 | #endif | ||
992 | u32 fcp_rsp_len; | ||
993 | u32 fcp_sns_len; | ||
994 | }; | ||
995 | 1071 | ||
996 | /* | ||
997 | * Fixed size structure in order to plant it in Union structure | ||
998 | */ | ||
999 | struct fcoe_fcp_rsp_union { | ||
1000 | struct fcoe_fcp_rsp_payload payload; | ||
1001 | struct regpair reserved0; | ||
1002 | }; | ||
1003 | 1072 | ||
1004 | /* | 1073 | /* |
1005 | * Fixed size structure in order to plant it in Union structure | 1074 | * The fcoe aggregative context of Ustorm |
1006 | */ | 1075 | */ |
1007 | struct fcoe_abts_rsp_union { | 1076 | struct ustorm_fcoe_ag_context { |
1008 | u32 r_ctl; | ||
1009 | u32 abts_rsp_payload[7]; | ||
1010 | }; | ||
1011 | |||
1012 | union fcoe_rsp_flow_info { | ||
1013 | struct fcoe_fcp_rsp_union fcp_rsp; | ||
1014 | struct fcoe_abts_rsp_union abts_rsp; | ||
1015 | }; | ||
1016 | |||
1017 | struct fcoe_cleanup_flow_info { | ||
1018 | #if defined(__BIG_ENDIAN) | 1077 | #if defined(__BIG_ENDIAN) |
1019 | u16 reserved1; | 1078 | u8 __aux_counter_flags; |
1020 | u16 task_id; | 1079 | u8 agg_vars2; |
1080 | #define USTORM_FCOE_AG_CONTEXT_TX_CF (0x3<<0) | ||
1081 | #define USTORM_FCOE_AG_CONTEXT_TX_CF_SHIFT 0 | ||
1082 | #define __USTORM_FCOE_AG_CONTEXT_TIMER_CF (0x3<<2) | ||
1083 | #define __USTORM_FCOE_AG_CONTEXT_TIMER_CF_SHIFT 2 | ||
1084 | #define USTORM_FCOE_AG_CONTEXT_AGG_MISC4_RULE (0x7<<4) | ||
1085 | #define USTORM_FCOE_AG_CONTEXT_AGG_MISC4_RULE_SHIFT 4 | ||
1086 | #define __USTORM_FCOE_AG_CONTEXT_AGG_VAL2_MASK (0x1<<7) | ||
1087 | #define __USTORM_FCOE_AG_CONTEXT_AGG_VAL2_MASK_SHIFT 7 | ||
1088 | u8 agg_vars1; | ||
1089 | #define __USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0) | ||
1090 | #define __USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0 | ||
1091 | #define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1) | ||
1092 | #define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1 | ||
1093 | #define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2) | ||
1094 | #define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2 | ||
1095 | #define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3) | ||
1096 | #define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3 | ||
1097 | #define USTORM_FCOE_AG_CONTEXT_INV_CF (0x3<<4) | ||
1098 | #define USTORM_FCOE_AG_CONTEXT_INV_CF_SHIFT 4 | ||
1099 | #define USTORM_FCOE_AG_CONTEXT_COMPLETION_CF (0x3<<6) | ||
1100 | #define USTORM_FCOE_AG_CONTEXT_COMPLETION_CF_SHIFT 6 | ||
1101 | u8 state; | ||
1021 | #elif defined(__LITTLE_ENDIAN) | 1102 | #elif defined(__LITTLE_ENDIAN) |
1022 | u16 task_id; | 1103 | u8 state; |
1023 | u16 reserved1; | 1104 | u8 agg_vars1; |
1105 | #define __USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0) | ||
1106 | #define __USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0 | ||
1107 | #define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1) | ||
1108 | #define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1 | ||
1109 | #define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2) | ||
1110 | #define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2 | ||
1111 | #define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3) | ||
1112 | #define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3 | ||
1113 | #define USTORM_FCOE_AG_CONTEXT_INV_CF (0x3<<4) | ||
1114 | #define USTORM_FCOE_AG_CONTEXT_INV_CF_SHIFT 4 | ||
1115 | #define USTORM_FCOE_AG_CONTEXT_COMPLETION_CF (0x3<<6) | ||
1116 | #define USTORM_FCOE_AG_CONTEXT_COMPLETION_CF_SHIFT 6 | ||
1117 | u8 agg_vars2; | ||
1118 | #define USTORM_FCOE_AG_CONTEXT_TX_CF (0x3<<0) | ||
1119 | #define USTORM_FCOE_AG_CONTEXT_TX_CF_SHIFT 0 | ||
1120 | #define __USTORM_FCOE_AG_CONTEXT_TIMER_CF (0x3<<2) | ||
1121 | #define __USTORM_FCOE_AG_CONTEXT_TIMER_CF_SHIFT 2 | ||
1122 | #define USTORM_FCOE_AG_CONTEXT_AGG_MISC4_RULE (0x7<<4) | ||
1123 | #define USTORM_FCOE_AG_CONTEXT_AGG_MISC4_RULE_SHIFT 4 | ||
1124 | #define __USTORM_FCOE_AG_CONTEXT_AGG_VAL2_MASK (0x1<<7) | ||
1125 | #define __USTORM_FCOE_AG_CONTEXT_AGG_VAL2_MASK_SHIFT 7 | ||
1126 | u8 __aux_counter_flags; | ||
1024 | #endif | 1127 | #endif |
1025 | u32 reserved2[7]; | 1128 | #if defined(__BIG_ENDIAN) |
1026 | }; | 1129 | u8 cdu_usage; |
1027 | 1130 | u8 agg_misc2; | |
1028 | /* | 1131 | u16 pbf_tx_seq_ack; |
1029 | * 32 bytes used for general purposes | ||
1030 | */ | ||
1031 | union fcoe_general_task_ctx { | ||
1032 | union fcoe_cmd_flow_info cmd_info; | ||
1033 | struct fcoe_read_flow_info read_info; | ||
1034 | struct fcoe_write_flow_info write_info; | ||
1035 | union fcoe_rsp_flow_info rsp_info; | ||
1036 | struct fcoe_cleanup_flow_info cleanup_info; | ||
1037 | u32 comp_info[8]; | ||
1038 | }; | ||
1039 | |||
1040 | struct fcoe_s_stat_ctx { | ||
1041 | u8 flags; | ||
1042 | #define FCOE_S_STAT_CTX_ACTIVE (0x1<<0) | ||
1043 | #define FCOE_S_STAT_CTX_ACTIVE_SHIFT 0 | ||
1044 | #define FCOE_S_STAT_CTX_ACK_ABORT_SEQ_COND (0x1<<1) | ||
1045 | #define FCOE_S_STAT_CTX_ACK_ABORT_SEQ_COND_SHIFT 1 | ||
1046 | #define FCOE_S_STAT_CTX_ABTS_PERFORMED (0x1<<2) | ||
1047 | #define FCOE_S_STAT_CTX_ABTS_PERFORMED_SHIFT 2 | ||
1048 | #define FCOE_S_STAT_CTX_SEQ_TIMEOUT (0x1<<3) | ||
1049 | #define FCOE_S_STAT_CTX_SEQ_TIMEOUT_SHIFT 3 | ||
1050 | #define FCOE_S_STAT_CTX_P_RJT (0x1<<4) | ||
1051 | #define FCOE_S_STAT_CTX_P_RJT_SHIFT 4 | ||
1052 | #define FCOE_S_STAT_CTX_ACK_EOFT (0x1<<5) | ||
1053 | #define FCOE_S_STAT_CTX_ACK_EOFT_SHIFT 5 | ||
1054 | #define FCOE_S_STAT_CTX_RSRV1 (0x3<<6) | ||
1055 | #define FCOE_S_STAT_CTX_RSRV1_SHIFT 6 | ||
1056 | }; | ||
1057 | |||
1058 | /* | ||
1059 | * Common section. Both TX and RX processing might write and read from it in different flows | ||
1060 | */ | ||
1061 | struct fcoe_task_ctx_entry_tx_rx_cmn { | ||
1062 | u32 data_2_trns; | ||
1063 | union fcoe_general_task_ctx general; | ||
1064 | #if defined(__BIG_ENDIAN) | ||
1065 | u16 tx_low_seq_cnt; | ||
1066 | struct fcoe_s_stat_ctx tx_s_stat; | ||
1067 | u8 tx_seq_id; | ||
1068 | #elif defined(__LITTLE_ENDIAN) | ||
1069 | u8 tx_seq_id; | ||
1070 | struct fcoe_s_stat_ctx tx_s_stat; | ||
1071 | u16 tx_low_seq_cnt; | ||
1072 | #endif | ||
1073 | u32 common_flags; | ||
1074 | #define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_CID (0xFFFFFF<<0) | ||
1075 | #define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_CID_SHIFT 0 | ||
1076 | #define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_VALID (0x1<<24) | ||
1077 | #define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_VALID_SHIFT 24 | ||
1078 | #define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_SEQ_INIT (0x1<<25) | ||
1079 | #define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_SEQ_INIT_SHIFT 25 | ||
1080 | #define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_PEND_XFER (0x1<<26) | ||
1081 | #define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_PEND_XFER_SHIFT 26 | ||
1082 | #define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_PEND_CONF (0x1<<27) | ||
1083 | #define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_PEND_CONF_SHIFT 27 | ||
1084 | #define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_EXP_FIRST_FRAME (0x1<<28) | ||
1085 | #define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_EXP_FIRST_FRAME_SHIFT 28 | ||
1086 | #define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_RSRV (0x7<<29) | ||
1087 | #define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_RSRV_SHIFT 29 | ||
1088 | }; | ||
1089 | |||
1090 | struct fcoe_task_ctx_entry_rxwr_txrd { | ||
1091 | #if defined(__BIG_ENDIAN) | ||
1092 | u16 rx_id; | ||
1093 | u16 rx_flags; | ||
1094 | #define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RX_STATE (0xF<<0) | ||
1095 | #define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RX_STATE_SHIFT 0 | ||
1096 | #define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_NUM_RQ_WQE (0x7<<4) | ||
1097 | #define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_NUM_RQ_WQE_SHIFT 4 | ||
1098 | #define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_CONF_REQ (0x1<<7) | ||
1099 | #define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_CONF_REQ_SHIFT 7 | ||
1100 | #define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_MISS_FRAME (0x1<<8) | ||
1101 | #define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_MISS_FRAME_SHIFT 8 | ||
1102 | #define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RESERVED0 (0x7F<<9) | ||
1103 | #define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RESERVED0_SHIFT 9 | ||
1104 | #elif defined(__LITTLE_ENDIAN) | ||
1105 | u16 rx_flags; | ||
1106 | #define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RX_STATE (0xF<<0) | ||
1107 | #define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RX_STATE_SHIFT 0 | ||
1108 | #define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_NUM_RQ_WQE (0x7<<4) | ||
1109 | #define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_NUM_RQ_WQE_SHIFT 4 | ||
1110 | #define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_CONF_REQ (0x1<<7) | ||
1111 | #define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_CONF_REQ_SHIFT 7 | ||
1112 | #define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_MISS_FRAME (0x1<<8) | ||
1113 | #define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_MISS_FRAME_SHIFT 8 | ||
1114 | #define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RESERVED0 (0x7F<<9) | ||
1115 | #define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RESERVED0_SHIFT 9 | ||
1116 | u16 rx_id; | ||
1117 | #endif | ||
1118 | }; | ||
1119 | |||
1120 | struct fcoe_seq_ctx { | ||
1121 | #if defined(__BIG_ENDIAN) | ||
1122 | u16 low_seq_cnt; | ||
1123 | struct fcoe_s_stat_ctx s_stat; | ||
1124 | u8 seq_id; | ||
1125 | #elif defined(__LITTLE_ENDIAN) | 1132 | #elif defined(__LITTLE_ENDIAN) |
1126 | u8 seq_id; | 1133 | u16 pbf_tx_seq_ack; |
1127 | struct fcoe_s_stat_ctx s_stat; | 1134 | u8 agg_misc2; |
1128 | u16 low_seq_cnt; | 1135 | u8 cdu_usage; |
1129 | #endif | 1136 | #endif |
1137 | u32 agg_misc4; | ||
1130 | #if defined(__BIG_ENDIAN) | 1138 | #if defined(__BIG_ENDIAN) |
1131 | u16 err_seq_cnt; | 1139 | u8 agg_val3_th; |
1132 | u16 high_seq_cnt; | 1140 | u8 agg_val3; |
1141 | u16 agg_misc3; | ||
1133 | #elif defined(__LITTLE_ENDIAN) | 1142 | #elif defined(__LITTLE_ENDIAN) |
1134 | u16 high_seq_cnt; | 1143 | u16 agg_misc3; |
1135 | u16 err_seq_cnt; | 1144 | u8 agg_val3; |
1145 | u8 agg_val3_th; | ||
1136 | #endif | 1146 | #endif |
1137 | u32 low_exp_ro; | 1147 | u32 expired_task_id; |
1138 | u32 high_exp_ro; | 1148 | u32 agg_misc4_th; |
1139 | }; | ||
1140 | |||
1141 | struct fcoe_single_sge_ctx { | ||
1142 | struct regpair cur_buf_addr; | ||
1143 | #if defined(__BIG_ENDIAN) | 1149 | #if defined(__BIG_ENDIAN) |
1144 | u16 reserved0; | 1150 | u16 cq_prod; |
1145 | u16 cur_buf_rem; | 1151 | u16 cq_cons; |
1146 | #elif defined(__LITTLE_ENDIAN) | 1152 | #elif defined(__LITTLE_ENDIAN) |
1147 | u16 cur_buf_rem; | 1153 | u16 cq_cons; |
1148 | u16 reserved0; | 1154 | u16 cq_prod; |
1149 | #endif | 1155 | #endif |
1150 | }; | ||
1151 | |||
1152 | struct fcoe_mul_sges_ctx { | ||
1153 | struct regpair cur_sge_addr; | ||
1154 | #if defined(__BIG_ENDIAN) | 1156 | #if defined(__BIG_ENDIAN) |
1155 | u8 sgl_size; | 1157 | u16 __reserved2; |
1156 | u8 cur_sge_idx; | 1158 | u8 decision_rules; |
1157 | u16 cur_sge_off; | 1159 | #define USTORM_FCOE_AG_CONTEXT_CQ_DEC_RULE (0x7<<0) |
1160 | #define USTORM_FCOE_AG_CONTEXT_CQ_DEC_RULE_SHIFT 0 | ||
1161 | #define __USTORM_FCOE_AG_CONTEXT_AGG_VAL3_RULE (0x7<<3) | ||
1162 | #define __USTORM_FCOE_AG_CONTEXT_AGG_VAL3_RULE_SHIFT 3 | ||
1163 | #define USTORM_FCOE_AG_CONTEXT_CQ_ARM_N_FLAG (0x1<<6) | ||
1164 | #define USTORM_FCOE_AG_CONTEXT_CQ_ARM_N_FLAG_SHIFT 6 | ||
1165 | #define __USTORM_FCOE_AG_CONTEXT_RESERVED1 (0x1<<7) | ||
1166 | #define __USTORM_FCOE_AG_CONTEXT_RESERVED1_SHIFT 7 | ||
1167 | u8 decision_rule_enable_bits; | ||
1168 | #define __USTORM_FCOE_AG_CONTEXT_RESERVED_INV_CF_EN (0x1<<0) | ||
1169 | #define __USTORM_FCOE_AG_CONTEXT_RESERVED_INV_CF_EN_SHIFT 0 | ||
1170 | #define USTORM_FCOE_AG_CONTEXT_COMPLETION_CF_EN (0x1<<1) | ||
1171 | #define USTORM_FCOE_AG_CONTEXT_COMPLETION_CF_EN_SHIFT 1 | ||
1172 | #define USTORM_FCOE_AG_CONTEXT_TX_CF_EN (0x1<<2) | ||
1173 | #define USTORM_FCOE_AG_CONTEXT_TX_CF_EN_SHIFT 2 | ||
1174 | #define __USTORM_FCOE_AG_CONTEXT_TIMER_CF_EN (0x1<<3) | ||
1175 | #define __USTORM_FCOE_AG_CONTEXT_TIMER_CF_EN_SHIFT 3 | ||
1176 | #define __USTORM_FCOE_AG_CONTEXT_AUX1_CF_EN (0x1<<4) | ||
1177 | #define __USTORM_FCOE_AG_CONTEXT_AUX1_CF_EN_SHIFT 4 | ||
1178 | #define __USTORM_FCOE_AG_CONTEXT_QUEUE0_CF_EN (0x1<<5) | ||
1179 | #define __USTORM_FCOE_AG_CONTEXT_QUEUE0_CF_EN_SHIFT 5 | ||
1180 | #define __USTORM_FCOE_AG_CONTEXT_AUX3_CF_EN (0x1<<6) | ||
1181 | #define __USTORM_FCOE_AG_CONTEXT_AUX3_CF_EN_SHIFT 6 | ||
1182 | #define __USTORM_FCOE_AG_CONTEXT_DQ_CF_EN (0x1<<7) | ||
1183 | #define __USTORM_FCOE_AG_CONTEXT_DQ_CF_EN_SHIFT 7 | ||
1158 | #elif defined(__LITTLE_ENDIAN) | 1184 | #elif defined(__LITTLE_ENDIAN) |
1159 | u16 cur_sge_off; | 1185 | u8 decision_rule_enable_bits; |
1160 | u8 cur_sge_idx; | 1186 | #define __USTORM_FCOE_AG_CONTEXT_RESERVED_INV_CF_EN (0x1<<0) |
1161 | u8 sgl_size; | 1187 | #define __USTORM_FCOE_AG_CONTEXT_RESERVED_INV_CF_EN_SHIFT 0 |
1188 | #define USTORM_FCOE_AG_CONTEXT_COMPLETION_CF_EN (0x1<<1) | ||
1189 | #define USTORM_FCOE_AG_CONTEXT_COMPLETION_CF_EN_SHIFT 1 | ||
1190 | #define USTORM_FCOE_AG_CONTEXT_TX_CF_EN (0x1<<2) | ||
1191 | #define USTORM_FCOE_AG_CONTEXT_TX_CF_EN_SHIFT 2 | ||
1192 | #define __USTORM_FCOE_AG_CONTEXT_TIMER_CF_EN (0x1<<3) | ||
1193 | #define __USTORM_FCOE_AG_CONTEXT_TIMER_CF_EN_SHIFT 3 | ||
1194 | #define __USTORM_FCOE_AG_CONTEXT_AUX1_CF_EN (0x1<<4) | ||
1195 | #define __USTORM_FCOE_AG_CONTEXT_AUX1_CF_EN_SHIFT 4 | ||
1196 | #define __USTORM_FCOE_AG_CONTEXT_QUEUE0_CF_EN (0x1<<5) | ||
1197 | #define __USTORM_FCOE_AG_CONTEXT_QUEUE0_CF_EN_SHIFT 5 | ||
1198 | #define __USTORM_FCOE_AG_CONTEXT_AUX3_CF_EN (0x1<<6) | ||
1199 | #define __USTORM_FCOE_AG_CONTEXT_AUX3_CF_EN_SHIFT 6 | ||
1200 | #define __USTORM_FCOE_AG_CONTEXT_DQ_CF_EN (0x1<<7) | ||
1201 | #define __USTORM_FCOE_AG_CONTEXT_DQ_CF_EN_SHIFT 7 | ||
1202 | u8 decision_rules; | ||
1203 | #define USTORM_FCOE_AG_CONTEXT_CQ_DEC_RULE (0x7<<0) | ||
1204 | #define USTORM_FCOE_AG_CONTEXT_CQ_DEC_RULE_SHIFT 0 | ||
1205 | #define __USTORM_FCOE_AG_CONTEXT_AGG_VAL3_RULE (0x7<<3) | ||
1206 | #define __USTORM_FCOE_AG_CONTEXT_AGG_VAL3_RULE_SHIFT 3 | ||
1207 | #define USTORM_FCOE_AG_CONTEXT_CQ_ARM_N_FLAG (0x1<<6) | ||
1208 | #define USTORM_FCOE_AG_CONTEXT_CQ_ARM_N_FLAG_SHIFT 6 | ||
1209 | #define __USTORM_FCOE_AG_CONTEXT_RESERVED1 (0x1<<7) | ||
1210 | #define __USTORM_FCOE_AG_CONTEXT_RESERVED1_SHIFT 7 | ||
1211 | u16 __reserved2; | ||
1162 | #endif | 1212 | #endif |
1163 | }; | 1213 | }; |
1164 | 1214 | ||
1165 | union fcoe_sgl_ctx { | ||
1166 | struct fcoe_single_sge_ctx single_sge; | ||
1167 | struct fcoe_mul_sges_ctx mul_sges; | ||
1168 | }; | ||
1169 | |||
1170 | struct fcoe_task_ctx_entry_rx_only { | ||
1171 | struct fcoe_seq_ctx seq_ctx; | ||
1172 | struct fcoe_seq_ctx ooo_seq_ctx; | ||
1173 | u32 rsrv3; | ||
1174 | union fcoe_sgl_ctx sgl_ctx; | ||
1175 | }; | ||
1176 | |||
1177 | struct ustorm_fcoe_task_ctx_entry_rd { | ||
1178 | struct fcoe_task_ctx_entry_txwr_rxrd tx_wr_rx_rd; | ||
1179 | struct fcoe_task_ctx_entry_tx_rx_cmn cmn; | ||
1180 | struct fcoe_task_ctx_entry_rxwr_txrd rx_wr_tx_rd; | ||
1181 | struct fcoe_task_ctx_entry_rx_only rx_wr; | ||
1182 | u32 reserved; | ||
1183 | }; | ||
1184 | 1215 | ||
1185 | /* | 1216 | /* |
1186 | * Ustorm FCoE Storm Context | 1217 | * The iscsi aggregative context of Ustorm |
1187 | */ | 1218 | */ |
1188 | struct ustorm_fcoe_st_context { | 1219 | struct ustorm_iscsi_ag_context { |
1189 | struct ustorm_fcoe_params fcoe_params; | ||
1190 | struct regpair task_addr; | ||
1191 | struct regpair cq_base_addr; | ||
1192 | struct regpair rq_pbl_base; | ||
1193 | struct regpair rq_cur_page_addr; | ||
1194 | struct regpair confq_pbl_base_addr; | ||
1195 | struct regpair conn_db_base; | ||
1196 | struct regpair xfrq_base_addr; | ||
1197 | struct regpair lcq_base_addr; | ||
1198 | #if defined(__BIG_ENDIAN) | 1220 | #if defined(__BIG_ENDIAN) |
1199 | union fcoe_idx16_field_union rq_cons; | 1221 | u8 __aux_counter_flags; |
1200 | union fcoe_idx16_field_union rq_prod; | 1222 | u8 agg_vars2; |
1223 | #define USTORM_ISCSI_AG_CONTEXT_TX_CF (0x3<<0) | ||
1224 | #define USTORM_ISCSI_AG_CONTEXT_TX_CF_SHIFT 0 | ||
1225 | #define __USTORM_ISCSI_AG_CONTEXT_TIMER_CF (0x3<<2) | ||
1226 | #define __USTORM_ISCSI_AG_CONTEXT_TIMER_CF_SHIFT 2 | ||
1227 | #define USTORM_ISCSI_AG_CONTEXT_AGG_MISC4_RULE (0x7<<4) | ||
1228 | #define USTORM_ISCSI_AG_CONTEXT_AGG_MISC4_RULE_SHIFT 4 | ||
1229 | #define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_MASK (0x1<<7) | ||
1230 | #define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_MASK_SHIFT 7 | ||
1231 | u8 agg_vars1; | ||
1232 | #define __USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0) | ||
1233 | #define __USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0 | ||
1234 | #define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1) | ||
1235 | #define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1 | ||
1236 | #define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2) | ||
1237 | #define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2 | ||
1238 | #define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3) | ||
1239 | #define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3 | ||
1240 | #define USTORM_ISCSI_AG_CONTEXT_INV_CF (0x3<<4) | ||
1241 | #define USTORM_ISCSI_AG_CONTEXT_INV_CF_SHIFT 4 | ||
1242 | #define USTORM_ISCSI_AG_CONTEXT_COMPLETION_CF (0x3<<6) | ||
1243 | #define USTORM_ISCSI_AG_CONTEXT_COMPLETION_CF_SHIFT 6 | ||
1244 | u8 state; | ||
1201 | #elif defined(__LITTLE_ENDIAN) | 1245 | #elif defined(__LITTLE_ENDIAN) |
1202 | union fcoe_idx16_field_union rq_prod; | 1246 | u8 state; |
1203 | union fcoe_idx16_field_union rq_cons; | 1247 | u8 agg_vars1; |
1248 | #define __USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0) | ||
1249 | #define __USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0 | ||
1250 | #define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1) | ||
1251 | #define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1 | ||
1252 | #define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2) | ||
1253 | #define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2 | ||
1254 | #define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3) | ||
1255 | #define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3 | ||
1256 | #define USTORM_ISCSI_AG_CONTEXT_INV_CF (0x3<<4) | ||
1257 | #define USTORM_ISCSI_AG_CONTEXT_INV_CF_SHIFT 4 | ||
1258 | #define USTORM_ISCSI_AG_CONTEXT_COMPLETION_CF (0x3<<6) | ||
1259 | #define USTORM_ISCSI_AG_CONTEXT_COMPLETION_CF_SHIFT 6 | ||
1260 | u8 agg_vars2; | ||
1261 | #define USTORM_ISCSI_AG_CONTEXT_TX_CF (0x3<<0) | ||
1262 | #define USTORM_ISCSI_AG_CONTEXT_TX_CF_SHIFT 0 | ||
1263 | #define __USTORM_ISCSI_AG_CONTEXT_TIMER_CF (0x3<<2) | ||
1264 | #define __USTORM_ISCSI_AG_CONTEXT_TIMER_CF_SHIFT 2 | ||
1265 | #define USTORM_ISCSI_AG_CONTEXT_AGG_MISC4_RULE (0x7<<4) | ||
1266 | #define USTORM_ISCSI_AG_CONTEXT_AGG_MISC4_RULE_SHIFT 4 | ||
1267 | #define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_MASK (0x1<<7) | ||
1268 | #define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_MASK_SHIFT 7 | ||
1269 | u8 __aux_counter_flags; | ||
1204 | #endif | 1270 | #endif |
1205 | #if defined(__BIG_ENDIAN) | 1271 | #if defined(__BIG_ENDIAN) |
1206 | u16 xfrq_prod; | 1272 | u8 cdu_usage; |
1207 | u16 cq_cons; | 1273 | u8 agg_misc2; |
1274 | u16 __cq_local_comp_itt_val; | ||
1208 | #elif defined(__LITTLE_ENDIAN) | 1275 | #elif defined(__LITTLE_ENDIAN) |
1209 | u16 cq_cons; | 1276 | u16 __cq_local_comp_itt_val; |
1210 | u16 xfrq_prod; | 1277 | u8 agg_misc2; |
1278 | u8 cdu_usage; | ||
1211 | #endif | 1279 | #endif |
1280 | u32 agg_misc4; | ||
1212 | #if defined(__BIG_ENDIAN) | 1281 | #if defined(__BIG_ENDIAN) |
1213 | u16 lcq_cons; | 1282 | u8 agg_val3_th; |
1214 | u16 hc_cram_address; | 1283 | u8 agg_val3; |
1284 | u16 agg_misc3; | ||
1215 | #elif defined(__LITTLE_ENDIAN) | 1285 | #elif defined(__LITTLE_ENDIAN) |
1216 | u16 hc_cram_address; | 1286 | u16 agg_misc3; |
1217 | u16 lcq_cons; | 1287 | u8 agg_val3; |
1288 | u8 agg_val3_th; | ||
1218 | #endif | 1289 | #endif |
1290 | u32 agg_val1; | ||
1291 | u32 agg_misc4_th; | ||
1219 | #if defined(__BIG_ENDIAN) | 1292 | #if defined(__BIG_ENDIAN) |
1220 | u16 sq_xfrq_lcq_confq_size; | 1293 | u16 agg_val2_th; |
1221 | u16 confq_prod; | 1294 | u16 agg_val2; |
1222 | #elif defined(__LITTLE_ENDIAN) | 1295 | #elif defined(__LITTLE_ENDIAN) |
1223 | u16 confq_prod; | 1296 | u16 agg_val2; |
1224 | u16 sq_xfrq_lcq_confq_size; | 1297 | u16 agg_val2_th; |
1225 | #endif | 1298 | #endif |
1226 | #if defined(__BIG_ENDIAN) | 1299 | #if defined(__BIG_ENDIAN) |
1227 | u8 hc_csdm_agg_int; | 1300 | u16 __reserved2; |
1228 | u8 flags; | 1301 | u8 decision_rules; |
1229 | #define USTORM_FCOE_ST_CONTEXT_MID_SEQ_PROC_FLAG (0x1<<0) | 1302 | #define USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_RULE (0x7<<0) |
1230 | #define USTORM_FCOE_ST_CONTEXT_MID_SEQ_PROC_FLAG_SHIFT 0 | 1303 | #define USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_RULE_SHIFT 0 |
1231 | #define USTORM_FCOE_ST_CONTEXT_CACHED_CONN_FLAG (0x1<<1) | 1304 | #define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL3_RULE (0x7<<3) |
1232 | #define USTORM_FCOE_ST_CONTEXT_CACHED_CONN_FLAG_SHIFT 1 | 1305 | #define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL3_RULE_SHIFT 3 |
1233 | #define USTORM_FCOE_ST_CONTEXT_CACHED_TCE_FLAG (0x1<<2) | 1306 | #define USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_ARM_N_FLAG (0x1<<6) |
1234 | #define USTORM_FCOE_ST_CONTEXT_CACHED_TCE_FLAG_SHIFT 2 | 1307 | #define USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_ARM_N_FLAG_SHIFT 6 |
1235 | #define USTORM_FCOE_ST_CONTEXT_RSRV1 (0x1F<<3) | 1308 | #define __USTORM_ISCSI_AG_CONTEXT_RESERVED1 (0x1<<7) |
1236 | #define USTORM_FCOE_ST_CONTEXT_RSRV1_SHIFT 3 | 1309 | #define __USTORM_ISCSI_AG_CONTEXT_RESERVED1_SHIFT 7 |
1237 | u8 available_rqes; | 1310 | u8 decision_rule_enable_bits; |
1238 | u8 sp_q_flush_cnt; | 1311 | #define USTORM_ISCSI_AG_CONTEXT_INV_CF_EN (0x1<<0) |
1312 | #define USTORM_ISCSI_AG_CONTEXT_INV_CF_EN_SHIFT 0 | ||
1313 | #define USTORM_ISCSI_AG_CONTEXT_COMPLETION_CF_EN (0x1<<1) | ||
1314 | #define USTORM_ISCSI_AG_CONTEXT_COMPLETION_CF_EN_SHIFT 1 | ||
1315 | #define USTORM_ISCSI_AG_CONTEXT_TX_CF_EN (0x1<<2) | ||
1316 | #define USTORM_ISCSI_AG_CONTEXT_TX_CF_EN_SHIFT 2 | ||
1317 | #define __USTORM_ISCSI_AG_CONTEXT_TIMER_CF_EN (0x1<<3) | ||
1318 | #define __USTORM_ISCSI_AG_CONTEXT_TIMER_CF_EN_SHIFT 3 | ||
1319 | #define __USTORM_ISCSI_AG_CONTEXT_CQ_LOCAL_COMP_CF_EN (0x1<<4) | ||
1320 | #define __USTORM_ISCSI_AG_CONTEXT_CQ_LOCAL_COMP_CF_EN_SHIFT 4 | ||
1321 | #define __USTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF_EN (0x1<<5) | ||
1322 | #define __USTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF_EN_SHIFT 5 | ||
1323 | #define __USTORM_ISCSI_AG_CONTEXT_AUX3_CF_EN (0x1<<6) | ||
1324 | #define __USTORM_ISCSI_AG_CONTEXT_AUX3_CF_EN_SHIFT 6 | ||
1325 | #define __USTORM_ISCSI_AG_CONTEXT_DQ_CF_EN (0x1<<7) | ||
1326 | #define __USTORM_ISCSI_AG_CONTEXT_DQ_CF_EN_SHIFT 7 | ||
1239 | #elif defined(__LITTLE_ENDIAN) | 1327 | #elif defined(__LITTLE_ENDIAN) |
1240 | u8 sp_q_flush_cnt; | 1328 | u8 decision_rule_enable_bits; |
1241 | u8 available_rqes; | 1329 | #define USTORM_ISCSI_AG_CONTEXT_INV_CF_EN (0x1<<0) |
1242 | u8 flags; | 1330 | #define USTORM_ISCSI_AG_CONTEXT_INV_CF_EN_SHIFT 0 |
1243 | #define USTORM_FCOE_ST_CONTEXT_MID_SEQ_PROC_FLAG (0x1<<0) | 1331 | #define USTORM_ISCSI_AG_CONTEXT_COMPLETION_CF_EN (0x1<<1) |
1244 | #define USTORM_FCOE_ST_CONTEXT_MID_SEQ_PROC_FLAG_SHIFT 0 | 1332 | #define USTORM_ISCSI_AG_CONTEXT_COMPLETION_CF_EN_SHIFT 1 |
1245 | #define USTORM_FCOE_ST_CONTEXT_CACHED_CONN_FLAG (0x1<<1) | 1333 | #define USTORM_ISCSI_AG_CONTEXT_TX_CF_EN (0x1<<2) |
1246 | #define USTORM_FCOE_ST_CONTEXT_CACHED_CONN_FLAG_SHIFT 1 | 1334 | #define USTORM_ISCSI_AG_CONTEXT_TX_CF_EN_SHIFT 2 |
1247 | #define USTORM_FCOE_ST_CONTEXT_CACHED_TCE_FLAG (0x1<<2) | 1335 | #define __USTORM_ISCSI_AG_CONTEXT_TIMER_CF_EN (0x1<<3) |
1248 | #define USTORM_FCOE_ST_CONTEXT_CACHED_TCE_FLAG_SHIFT 2 | 1336 | #define __USTORM_ISCSI_AG_CONTEXT_TIMER_CF_EN_SHIFT 3 |
1249 | #define USTORM_FCOE_ST_CONTEXT_RSRV1 (0x1F<<3) | 1337 | #define __USTORM_ISCSI_AG_CONTEXT_CQ_LOCAL_COMP_CF_EN (0x1<<4) |
1250 | #define USTORM_FCOE_ST_CONTEXT_RSRV1_SHIFT 3 | 1338 | #define __USTORM_ISCSI_AG_CONTEXT_CQ_LOCAL_COMP_CF_EN_SHIFT 4 |
1251 | u8 hc_csdm_agg_int; | 1339 | #define __USTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF_EN (0x1<<5) |
1340 | #define __USTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF_EN_SHIFT 5 | ||
1341 | #define __USTORM_ISCSI_AG_CONTEXT_AUX3_CF_EN (0x1<<6) | ||
1342 | #define __USTORM_ISCSI_AG_CONTEXT_AUX3_CF_EN_SHIFT 6 | ||
1343 | #define __USTORM_ISCSI_AG_CONTEXT_DQ_CF_EN (0x1<<7) | ||
1344 | #define __USTORM_ISCSI_AG_CONTEXT_DQ_CF_EN_SHIFT 7 | ||
1345 | u8 decision_rules; | ||
1346 | #define USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_RULE (0x7<<0) | ||
1347 | #define USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_RULE_SHIFT 0 | ||
1348 | #define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL3_RULE (0x7<<3) | ||
1349 | #define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL3_RULE_SHIFT 3 | ||
1350 | #define USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_ARM_N_FLAG (0x1<<6) | ||
1351 | #define USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_ARM_N_FLAG_SHIFT 6 | ||
1352 | #define __USTORM_ISCSI_AG_CONTEXT_RESERVED1 (0x1<<7) | ||
1353 | #define __USTORM_ISCSI_AG_CONTEXT_RESERVED1_SHIFT 7 | ||
1354 | u16 __reserved2; | ||
1252 | #endif | 1355 | #endif |
1253 | struct ustorm_fcoe_data_place data_place; | ||
1254 | struct ustorm_fcoe_task_ctx_entry_rd tce; | ||
1255 | }; | 1356 | }; |
1256 | 1357 | ||
1257 | /* | ||
1258 | * The FCoE non-aggregative context of Tstorm | ||
1259 | */ | ||
1260 | struct tstorm_fcoe_st_context { | ||
1261 | struct regpair reserved0; | ||
1262 | struct regpair reserved1; | ||
1263 | }; | ||
1264 | 1358 | ||
1265 | /* | 1359 | /* |
1266 | * The fcoe aggregative context section of Xstorm | 1360 | * The fcoe aggregative context section of Xstorm |
@@ -1272,8 +1366,8 @@ struct xstorm_fcoe_extra_ag_context_section { | |||
1272 | #define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED51_SHIFT 0 | 1366 | #define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED51_SHIFT 0 |
1273 | #define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_ACK_TO_FE_UPDATED (0x3<<2) | 1367 | #define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_ACK_TO_FE_UPDATED (0x3<<2) |
1274 | #define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_ACK_TO_FE_UPDATED_SHIFT 2 | 1368 | #define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_ACK_TO_FE_UPDATED_SHIFT 2 |
1275 | #define XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_PBF_TX_SEQ_ACK_CF (0x3<<4) | 1369 | #define XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_SIDEBAND_SENT_CF (0x3<<4) |
1276 | #define XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_PBF_TX_SEQ_ACK_CF_SHIFT 4 | 1370 | #define XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_SIDEBAND_SENT_CF_SHIFT 4 |
1277 | #define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_CLEAR_DA_TIMER_EN (0x1<<6) | 1371 | #define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_CLEAR_DA_TIMER_EN (0x1<<6) |
1278 | #define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_CLEAR_DA_TIMER_EN_SHIFT 6 | 1372 | #define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_CLEAR_DA_TIMER_EN_SHIFT 6 |
1279 | #define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_DA_EXPIRATION_FLAG (0x1<<7) | 1373 | #define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_DA_EXPIRATION_FLAG (0x1<<7) |
@@ -1288,20 +1382,20 @@ struct xstorm_fcoe_extra_ag_context_section { | |||
1288 | #define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED51_SHIFT 0 | 1382 | #define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED51_SHIFT 0 |
1289 | #define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_ACK_TO_FE_UPDATED (0x3<<2) | 1383 | #define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_ACK_TO_FE_UPDATED (0x3<<2) |
1290 | #define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_ACK_TO_FE_UPDATED_SHIFT 2 | 1384 | #define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_ACK_TO_FE_UPDATED_SHIFT 2 |
1291 | #define XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_PBF_TX_SEQ_ACK_CF (0x3<<4) | 1385 | #define XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_SIDEBAND_SENT_CF (0x3<<4) |
1292 | #define XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_PBF_TX_SEQ_ACK_CF_SHIFT 4 | 1386 | #define XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_SIDEBAND_SENT_CF_SHIFT 4 |
1293 | #define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_CLEAR_DA_TIMER_EN (0x1<<6) | 1387 | #define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_CLEAR_DA_TIMER_EN (0x1<<6) |
1294 | #define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_CLEAR_DA_TIMER_EN_SHIFT 6 | 1388 | #define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_CLEAR_DA_TIMER_EN_SHIFT 6 |
1295 | #define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_DA_EXPIRATION_FLAG (0x1<<7) | 1389 | #define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_DA_EXPIRATION_FLAG (0x1<<7) |
1296 | #define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_DA_EXPIRATION_FLAG_SHIFT 7 | 1390 | #define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_DA_EXPIRATION_FLAG_SHIFT 7 |
1297 | #endif | 1391 | #endif |
1298 | u32 __task_addr_lo; | 1392 | u32 snd_nxt; |
1299 | u32 __task_addr_hi; | 1393 | u32 tx_wnd; |
1300 | u32 __reserved55; | 1394 | u32 __reserved55; |
1301 | u32 __tx_prods; | 1395 | u32 local_adv_wnd; |
1302 | #if defined(__BIG_ENDIAN) | 1396 | #if defined(__BIG_ENDIAN) |
1303 | u8 __agg_val8_th; | 1397 | u8 __agg_val8_th; |
1304 | u8 __agg_val8; | 1398 | u8 __tx_dest; |
1305 | u16 tcp_agg_vars2; | 1399 | u16 tcp_agg_vars2; |
1306 | #define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED57 (0x1<<0) | 1400 | #define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED57 (0x1<<0) |
1307 | #define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED57_SHIFT 0 | 1401 | #define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED57_SHIFT 0 |
@@ -1317,8 +1411,8 @@ struct xstorm_fcoe_extra_ag_context_section { | |||
1317 | #define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED60_SHIFT 5 | 1411 | #define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED60_SHIFT 5 |
1318 | #define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_ACK_TO_FE_UPDATED_EN (0x1<<6) | 1412 | #define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_ACK_TO_FE_UPDATED_EN (0x1<<6) |
1319 | #define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_ACK_TO_FE_UPDATED_EN_SHIFT 6 | 1413 | #define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_ACK_TO_FE_UPDATED_EN_SHIFT 6 |
1320 | #define XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_PBF_TX_SEQ_ACK_CF_EN (0x1<<7) | 1414 | #define XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_SIDEBAND_SENT_CF_EN (0x1<<7) |
1321 | #define XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_PBF_TX_SEQ_ACK_CF_EN_SHIFT 7 | 1415 | #define XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_SIDEBAND_SENT_CF_EN_SHIFT 7 |
1322 | #define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_TX_FIN_FLAG_EN (0x1<<8) | 1416 | #define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_TX_FIN_FLAG_EN (0x1<<8) |
1323 | #define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_TX_FIN_FLAG_EN_SHIFT 8 | 1417 | #define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_TX_FIN_FLAG_EN_SHIFT 8 |
1324 | #define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX1_FLAG (0x1<<9) | 1418 | #define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX1_FLAG (0x1<<9) |
@@ -1327,8 +1421,8 @@ struct xstorm_fcoe_extra_ag_context_section { | |||
1327 | #define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_SET_RTO_CF_SHIFT 10 | 1421 | #define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_SET_RTO_CF_SHIFT 10 |
1328 | #define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TS_TO_ECHO_UPDATED_CF (0x3<<12) | 1422 | #define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TS_TO_ECHO_UPDATED_CF (0x3<<12) |
1329 | #define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TS_TO_ECHO_UPDATED_CF_SHIFT 12 | 1423 | #define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TS_TO_ECHO_UPDATED_CF_SHIFT 12 |
1330 | #define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX8_CF (0x3<<14) | 1424 | #define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TX_DEST_UPDATED_CF (0x3<<14) |
1331 | #define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX8_CF_SHIFT 14 | 1425 | #define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TX_DEST_UPDATED_CF_SHIFT 14 |
1332 | #elif defined(__LITTLE_ENDIAN) | 1426 | #elif defined(__LITTLE_ENDIAN) |
1333 | u16 tcp_agg_vars2; | 1427 | u16 tcp_agg_vars2; |
1334 | #define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED57 (0x1<<0) | 1428 | #define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED57 (0x1<<0) |
@@ -1345,8 +1439,8 @@ struct xstorm_fcoe_extra_ag_context_section { | |||
1345 | #define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED60_SHIFT 5 | 1439 | #define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED60_SHIFT 5 |
1346 | #define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_ACK_TO_FE_UPDATED_EN (0x1<<6) | 1440 | #define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_ACK_TO_FE_UPDATED_EN (0x1<<6) |
1347 | #define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_ACK_TO_FE_UPDATED_EN_SHIFT 6 | 1441 | #define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_ACK_TO_FE_UPDATED_EN_SHIFT 6 |
1348 | #define XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_PBF_TX_SEQ_ACK_CF_EN (0x1<<7) | 1442 | #define XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_SIDEBAND_SENT_CF_EN (0x1<<7) |
1349 | #define XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_PBF_TX_SEQ_ACK_CF_EN_SHIFT 7 | 1443 | #define XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_SIDEBAND_SENT_CF_EN_SHIFT 7 |
1350 | #define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_TX_FIN_FLAG_EN (0x1<<8) | 1444 | #define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_TX_FIN_FLAG_EN (0x1<<8) |
1351 | #define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_TX_FIN_FLAG_EN_SHIFT 8 | 1445 | #define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_TX_FIN_FLAG_EN_SHIFT 8 |
1352 | #define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX1_FLAG (0x1<<9) | 1446 | #define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX1_FLAG (0x1<<9) |
@@ -1355,9 +1449,9 @@ struct xstorm_fcoe_extra_ag_context_section { | |||
1355 | #define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_SET_RTO_CF_SHIFT 10 | 1449 | #define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_SET_RTO_CF_SHIFT 10 |
1356 | #define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TS_TO_ECHO_UPDATED_CF (0x3<<12) | 1450 | #define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TS_TO_ECHO_UPDATED_CF (0x3<<12) |
1357 | #define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TS_TO_ECHO_UPDATED_CF_SHIFT 12 | 1451 | #define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TS_TO_ECHO_UPDATED_CF_SHIFT 12 |
1358 | #define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX8_CF (0x3<<14) | 1452 | #define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TX_DEST_UPDATED_CF (0x3<<14) |
1359 | #define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX8_CF_SHIFT 14 | 1453 | #define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TX_DEST_UPDATED_CF_SHIFT 14 |
1360 | u8 __agg_val8; | 1454 | u8 __tx_dest; |
1361 | u8 __agg_val8_th; | 1455 | u8 __agg_val8_th; |
1362 | #endif | 1456 | #endif |
1363 | u32 __sq_base_addr_lo; | 1457 | u32 __sq_base_addr_lo; |
@@ -1591,9 +1685,9 @@ struct xstorm_fcoe_ag_context { | |||
1591 | #if defined(__BIG_ENDIAN) | 1685 | #if defined(__BIG_ENDIAN) |
1592 | u8 __reserved1; | 1686 | u8 __reserved1; |
1593 | u8 __agg_val6_th; | 1687 | u8 __agg_val6_th; |
1594 | u16 __confq_tx_prod; | 1688 | u16 __agg_val9; |
1595 | #elif defined(__LITTLE_ENDIAN) | 1689 | #elif defined(__LITTLE_ENDIAN) |
1596 | u16 __confq_tx_prod; | 1690 | u16 __agg_val9; |
1597 | u8 __agg_val6_th; | 1691 | u8 __agg_val6_th; |
1598 | u8 __reserved1; | 1692 | u8 __reserved1; |
1599 | #endif | 1693 | #endif |
@@ -1605,16 +1699,16 @@ struct xstorm_fcoe_ag_context { | |||
1605 | u16 confq_cons; | 1699 | u16 confq_cons; |
1606 | #endif | 1700 | #endif |
1607 | u32 agg_vars8; | 1701 | u32 agg_vars8; |
1608 | #define __XSTORM_FCOE_AG_CONTEXT_CACHE_WQE_IDX (0xFFFFFF<<0) | 1702 | #define XSTORM_FCOE_AG_CONTEXT_AGG_MISC2 (0xFFFFFF<<0) |
1609 | #define __XSTORM_FCOE_AG_CONTEXT_CACHE_WQE_IDX_SHIFT 0 | 1703 | #define XSTORM_FCOE_AG_CONTEXT_AGG_MISC2_SHIFT 0 |
1610 | #define XSTORM_FCOE_AG_CONTEXT_AGG_MISC3 (0xFF<<24) | 1704 | #define XSTORM_FCOE_AG_CONTEXT_AGG_MISC3 (0xFF<<24) |
1611 | #define XSTORM_FCOE_AG_CONTEXT_AGG_MISC3_SHIFT 24 | 1705 | #define XSTORM_FCOE_AG_CONTEXT_AGG_MISC3_SHIFT 24 |
1612 | #if defined(__BIG_ENDIAN) | 1706 | #if defined(__BIG_ENDIAN) |
1613 | u16 ox_id; | 1707 | u16 agg_misc0; |
1614 | u16 sq_prod; | 1708 | u16 sq_prod; |
1615 | #elif defined(__LITTLE_ENDIAN) | 1709 | #elif defined(__LITTLE_ENDIAN) |
1616 | u16 sq_prod; | 1710 | u16 sq_prod; |
1617 | u16 ox_id; | 1711 | u16 agg_misc0; |
1618 | #endif | 1712 | #endif |
1619 | #if defined(__BIG_ENDIAN) | 1713 | #if defined(__BIG_ENDIAN) |
1620 | u8 agg_val3; | 1714 | u8 agg_val3; |
@@ -1628,332 +1722,1685 @@ struct xstorm_fcoe_ag_context { | |||
1628 | u8 agg_val3; | 1722 | u8 agg_val3; |
1629 | #endif | 1723 | #endif |
1630 | #if defined(__BIG_ENDIAN) | 1724 | #if defined(__BIG_ENDIAN) |
1631 | u16 __pbf_tx_seq_ack; | 1725 | u16 __agg_misc1; |
1632 | u16 agg_limit1; | 1726 | u16 agg_limit1; |
1633 | #elif defined(__LITTLE_ENDIAN) | 1727 | #elif defined(__LITTLE_ENDIAN) |
1634 | u16 agg_limit1; | 1728 | u16 agg_limit1; |
1635 | u16 __pbf_tx_seq_ack; | 1729 | u16 __agg_misc1; |
1636 | #endif | 1730 | #endif |
1637 | u32 completion_seq; | 1731 | u32 completion_seq; |
1638 | u32 confq_pbl_base_lo; | 1732 | u32 confq_pbl_base_lo; |
1639 | u32 confq_pbl_base_hi; | 1733 | u32 confq_pbl_base_hi; |
1640 | }; | 1734 | }; |
1641 | 1735 | ||
1736 | |||
1737 | |||
1642 | /* | 1738 | /* |
1643 | * The fcoe extra aggregative context section of Tstorm | 1739 | * The tcp aggregative context section of Xstorm |
1644 | */ | 1740 | */ |
1645 | struct tstorm_fcoe_extra_ag_context_section { | 1741 | struct xstorm_tcp_tcp_ag_context_section { |
1646 | u32 __agg_val1; | ||
1647 | #if defined(__BIG_ENDIAN) | 1742 | #if defined(__BIG_ENDIAN) |
1648 | u8 __tcp_agg_vars2; | 1743 | u8 tcp_agg_vars1; |
1649 | u8 __agg_val3; | 1744 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SET_DA_TIMER_CF (0x3<<0) |
1650 | u16 __agg_val2; | 1745 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SET_DA_TIMER_CF_SHIFT 0 |
1746 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_ACK_TO_FE_UPDATED (0x3<<2) | ||
1747 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_ACK_TO_FE_UPDATED_SHIFT 2 | ||
1748 | #define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SIDEBAND_SENT_CF (0x3<<4) | ||
1749 | #define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SIDEBAND_SENT_CF_SHIFT 4 | ||
1750 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_CLEAR_DA_TIMER_EN (0x1<<6) | ||
1751 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_CLEAR_DA_TIMER_EN_SHIFT 6 | ||
1752 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DA_EXPIRATION_FLAG (0x1<<7) | ||
1753 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DA_EXPIRATION_FLAG_SHIFT 7 | ||
1754 | u8 __da_cnt; | ||
1755 | u16 mss; | ||
1651 | #elif defined(__LITTLE_ENDIAN) | 1756 | #elif defined(__LITTLE_ENDIAN) |
1652 | u16 __agg_val2; | 1757 | u16 mss; |
1653 | u8 __agg_val3; | 1758 | u8 __da_cnt; |
1654 | u8 __tcp_agg_vars2; | 1759 | u8 tcp_agg_vars1; |
1760 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SET_DA_TIMER_CF (0x3<<0) | ||
1761 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SET_DA_TIMER_CF_SHIFT 0 | ||
1762 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_ACK_TO_FE_UPDATED (0x3<<2) | ||
1763 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_ACK_TO_FE_UPDATED_SHIFT 2 | ||
1764 | #define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SIDEBAND_SENT_CF (0x3<<4) | ||
1765 | #define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SIDEBAND_SENT_CF_SHIFT 4 | ||
1766 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_CLEAR_DA_TIMER_EN (0x1<<6) | ||
1767 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_CLEAR_DA_TIMER_EN_SHIFT 6 | ||
1768 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DA_EXPIRATION_FLAG (0x1<<7) | ||
1769 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DA_EXPIRATION_FLAG_SHIFT 7 | ||
1655 | #endif | 1770 | #endif |
1771 | u32 snd_nxt; | ||
1772 | u32 tx_wnd; | ||
1773 | u32 snd_una; | ||
1774 | u32 local_adv_wnd; | ||
1656 | #if defined(__BIG_ENDIAN) | 1775 | #if defined(__BIG_ENDIAN) |
1657 | u16 __agg_val5; | 1776 | u8 __agg_val8_th; |
1658 | u8 __agg_val6; | 1777 | u8 __tx_dest; |
1778 | u16 tcp_agg_vars2; | ||
1779 | #define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_FIN_FLAG (0x1<<0) | ||
1780 | #define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_FIN_FLAG_SHIFT 0 | ||
1781 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_UNBLOCKED (0x1<<1) | ||
1782 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_UNBLOCKED_SHIFT 1 | ||
1783 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DA_TIMER_ACTIVE (0x1<<2) | ||
1784 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DA_TIMER_ACTIVE_SHIFT 2 | ||
1785 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX3_FLAG (0x1<<3) | ||
1786 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX3_FLAG_SHIFT 3 | ||
1787 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX4_FLAG (0x1<<4) | ||
1788 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX4_FLAG_SHIFT 4 | ||
1789 | #define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DA_ENABLE (0x1<<5) | ||
1790 | #define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DA_ENABLE_SHIFT 5 | ||
1791 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_ACK_TO_FE_UPDATED_EN (0x1<<6) | ||
1792 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_ACK_TO_FE_UPDATED_EN_SHIFT 6 | ||
1793 | #define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SIDEBAND_SENT_CF_EN (0x1<<7) | ||
1794 | #define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SIDEBAND_SENT_CF_EN_SHIFT 7 | ||
1795 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_FIN_FLAG_EN (0x1<<8) | ||
1796 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_FIN_FLAG_EN_SHIFT 8 | ||
1797 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX1_FLAG (0x1<<9) | ||
1798 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX1_FLAG_SHIFT 9 | ||
1799 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SET_RTO_CF (0x3<<10) | ||
1800 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SET_RTO_CF_SHIFT 10 | ||
1801 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TS_TO_ECHO_UPDATED_CF (0x3<<12) | ||
1802 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TS_TO_ECHO_UPDATED_CF_SHIFT 12 | ||
1803 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_DEST_UPDATED_CF (0x3<<14) | ||
1804 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_DEST_UPDATED_CF_SHIFT 14 | ||
1805 | #elif defined(__LITTLE_ENDIAN) | ||
1806 | u16 tcp_agg_vars2; | ||
1807 | #define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_FIN_FLAG (0x1<<0) | ||
1808 | #define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_FIN_FLAG_SHIFT 0 | ||
1809 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_UNBLOCKED (0x1<<1) | ||
1810 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_UNBLOCKED_SHIFT 1 | ||
1811 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DA_TIMER_ACTIVE (0x1<<2) | ||
1812 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DA_TIMER_ACTIVE_SHIFT 2 | ||
1813 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX3_FLAG (0x1<<3) | ||
1814 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX3_FLAG_SHIFT 3 | ||
1815 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX4_FLAG (0x1<<4) | ||
1816 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX4_FLAG_SHIFT 4 | ||
1817 | #define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DA_ENABLE (0x1<<5) | ||
1818 | #define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DA_ENABLE_SHIFT 5 | ||
1819 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_ACK_TO_FE_UPDATED_EN (0x1<<6) | ||
1820 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_ACK_TO_FE_UPDATED_EN_SHIFT 6 | ||
1821 | #define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SIDEBAND_SENT_CF_EN (0x1<<7) | ||
1822 | #define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SIDEBAND_SENT_CF_EN_SHIFT 7 | ||
1823 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_FIN_FLAG_EN (0x1<<8) | ||
1824 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_FIN_FLAG_EN_SHIFT 8 | ||
1825 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX1_FLAG (0x1<<9) | ||
1826 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX1_FLAG_SHIFT 9 | ||
1827 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SET_RTO_CF (0x3<<10) | ||
1828 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SET_RTO_CF_SHIFT 10 | ||
1829 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TS_TO_ECHO_UPDATED_CF (0x3<<12) | ||
1830 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TS_TO_ECHO_UPDATED_CF_SHIFT 12 | ||
1831 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_DEST_UPDATED_CF (0x3<<14) | ||
1832 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_DEST_UPDATED_CF_SHIFT 14 | ||
1833 | u8 __tx_dest; | ||
1834 | u8 __agg_val8_th; | ||
1835 | #endif | ||
1836 | u32 ack_to_far_end; | ||
1837 | u32 rto_timer; | ||
1838 | u32 ka_timer; | ||
1839 | u32 ts_to_echo; | ||
1840 | #if defined(__BIG_ENDIAN) | ||
1841 | u16 __agg_val7_th; | ||
1842 | u16 __agg_val7; | ||
1843 | #elif defined(__LITTLE_ENDIAN) | ||
1844 | u16 __agg_val7; | ||
1845 | u16 __agg_val7_th; | ||
1846 | #endif | ||
1847 | #if defined(__BIG_ENDIAN) | ||
1848 | u8 __tcp_agg_vars5; | ||
1849 | u8 __tcp_agg_vars4; | ||
1659 | u8 __tcp_agg_vars3; | 1850 | u8 __tcp_agg_vars3; |
1851 | u8 __force_pure_ack_cnt; | ||
1660 | #elif defined(__LITTLE_ENDIAN) | 1852 | #elif defined(__LITTLE_ENDIAN) |
1853 | u8 __force_pure_ack_cnt; | ||
1661 | u8 __tcp_agg_vars3; | 1854 | u8 __tcp_agg_vars3; |
1662 | u8 __agg_val6; | 1855 | u8 __tcp_agg_vars4; |
1663 | u16 __agg_val5; | 1856 | u8 __tcp_agg_vars5; |
1857 | #endif | ||
1858 | u32 tcp_agg_vars6; | ||
1859 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TS_TO_ECHO_CF_EN (0x1<<0) | ||
1860 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TS_TO_ECHO_CF_EN_SHIFT 0 | ||
1861 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_DEST_UPDATED_CF_EN (0x1<<1) | ||
1862 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_DEST_UPDATED_CF_EN_SHIFT 1 | ||
1863 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX9_CF_EN (0x1<<2) | ||
1864 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX9_CF_EN_SHIFT 2 | ||
1865 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX10_CF_EN (0x1<<3) | ||
1866 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX10_CF_EN_SHIFT 3 | ||
1867 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX6_FLAG (0x1<<4) | ||
1868 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX6_FLAG_SHIFT 4 | ||
1869 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX7_FLAG (0x1<<5) | ||
1870 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX7_FLAG_SHIFT 5 | ||
1871 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX5_CF (0x3<<6) | ||
1872 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX5_CF_SHIFT 6 | ||
1873 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX9_CF (0x3<<8) | ||
1874 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX9_CF_SHIFT 8 | ||
1875 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX10_CF (0x3<<10) | ||
1876 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX10_CF_SHIFT 10 | ||
1877 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX11_CF (0x3<<12) | ||
1878 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX11_CF_SHIFT 12 | ||
1879 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX12_CF (0x3<<14) | ||
1880 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX12_CF_SHIFT 14 | ||
1881 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX13_CF (0x3<<16) | ||
1882 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX13_CF_SHIFT 16 | ||
1883 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX14_CF (0x3<<18) | ||
1884 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX14_CF_SHIFT 18 | ||
1885 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX15_CF (0x3<<20) | ||
1886 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX15_CF_SHIFT 20 | ||
1887 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX16_CF (0x3<<22) | ||
1888 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX16_CF_SHIFT 22 | ||
1889 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX17_CF (0x3<<24) | ||
1890 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX17_CF_SHIFT 24 | ||
1891 | #define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_ECE_FLAG (0x1<<26) | ||
1892 | #define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_ECE_FLAG_SHIFT 26 | ||
1893 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_RESERVED71 (0x1<<27) | ||
1894 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_RESERVED71_SHIFT 27 | ||
1895 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_FORCE_PURE_ACK_CNT_DIRTY (0x1<<28) | ||
1896 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_FORCE_PURE_ACK_CNT_DIRTY_SHIFT 28 | ||
1897 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TCP_AUTO_STOP_FLAG (0x1<<29) | ||
1898 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TCP_AUTO_STOP_FLAG_SHIFT 29 | ||
1899 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DO_TS_UPDATE_FLAG (0x1<<30) | ||
1900 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DO_TS_UPDATE_FLAG_SHIFT 30 | ||
1901 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_CANCEL_RETRANSMIT_FLAG (0x1<<31) | ||
1902 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_CANCEL_RETRANSMIT_FLAG_SHIFT 31 | ||
1903 | #if defined(__BIG_ENDIAN) | ||
1904 | u16 __agg_misc6; | ||
1905 | u16 __tcp_agg_vars7; | ||
1906 | #elif defined(__LITTLE_ENDIAN) | ||
1907 | u16 __tcp_agg_vars7; | ||
1908 | u16 __agg_misc6; | ||
1909 | #endif | ||
1910 | u32 __agg_val10; | ||
1911 | u32 __agg_val10_th; | ||
1912 | #if defined(__BIG_ENDIAN) | ||
1913 | u16 __reserved3; | ||
1914 | u8 __reserved2; | ||
1915 | u8 __da_only_cnt; | ||
1916 | #elif defined(__LITTLE_ENDIAN) | ||
1917 | u8 __da_only_cnt; | ||
1918 | u8 __reserved2; | ||
1919 | u16 __reserved3; | ||
1664 | #endif | 1920 | #endif |
1665 | u32 __lcq_prod; | ||
1666 | u32 rtt_seq; | ||
1667 | u32 rtt_time; | ||
1668 | u32 __reserved66; | ||
1669 | u32 wnd_right_edge; | ||
1670 | u32 tcp_agg_vars1; | ||
1671 | #define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_FIN_SENT_FLAG (0x1<<0) | ||
1672 | #define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_FIN_SENT_FLAG_SHIFT 0 | ||
1673 | #define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_LAST_PACKET_FIN_FLAG (0x1<<1) | ||
1674 | #define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_LAST_PACKET_FIN_FLAG_SHIFT 1 | ||
1675 | #define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_WND_UPD_CF (0x3<<2) | ||
1676 | #define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_WND_UPD_CF_SHIFT 2 | ||
1677 | #define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TIMEOUT_CF (0x3<<4) | ||
1678 | #define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TIMEOUT_CF_SHIFT 4 | ||
1679 | #define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_WND_UPD_CF_EN (0x1<<6) | ||
1680 | #define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_WND_UPD_CF_EN_SHIFT 6 | ||
1681 | #define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TIMEOUT_CF_EN (0x1<<7) | ||
1682 | #define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TIMEOUT_CF_EN_SHIFT 7 | ||
1683 | #define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RETRANSMIT_SEQ_EN (0x1<<8) | ||
1684 | #define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RETRANSMIT_SEQ_EN_SHIFT 8 | ||
1685 | #define __TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_LCQ_SND_EN (0x1<<9) | ||
1686 | #define __TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_LCQ_SND_EN_SHIFT 9 | ||
1687 | #define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX1_FLAG (0x1<<10) | ||
1688 | #define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX1_FLAG_SHIFT 10 | ||
1689 | #define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX2_FLAG (0x1<<11) | ||
1690 | #define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX2_FLAG_SHIFT 11 | ||
1691 | #define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX1_CF_EN (0x1<<12) | ||
1692 | #define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX1_CF_EN_SHIFT 12 | ||
1693 | #define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX2_CF_EN (0x1<<13) | ||
1694 | #define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX2_CF_EN_SHIFT 13 | ||
1695 | #define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX1_CF (0x3<<14) | ||
1696 | #define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX1_CF_SHIFT 14 | ||
1697 | #define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX2_CF (0x3<<16) | ||
1698 | #define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX2_CF_SHIFT 16 | ||
1699 | #define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TX_BLOCKED (0x1<<18) | ||
1700 | #define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TX_BLOCKED_SHIFT 18 | ||
1701 | #define __TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX10_CF_EN (0x1<<19) | ||
1702 | #define __TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX10_CF_EN_SHIFT 19 | ||
1703 | #define __TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX11_CF_EN (0x1<<20) | ||
1704 | #define __TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX11_CF_EN_SHIFT 20 | ||
1705 | #define __TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX12_CF_EN (0x1<<21) | ||
1706 | #define __TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX12_CF_EN_SHIFT 21 | ||
1707 | #define __TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED1 (0x3<<22) | ||
1708 | #define __TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED1_SHIFT 22 | ||
1709 | #define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RETRANSMIT_PEND_SEQ (0xF<<24) | ||
1710 | #define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RETRANSMIT_PEND_SEQ_SHIFT 24 | ||
1711 | #define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RETRANSMIT_DONE_SEQ (0xF<<28) | ||
1712 | #define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RETRANSMIT_DONE_SEQ_SHIFT 28 | ||
1713 | u32 snd_max; | ||
1714 | u32 __lcq_cons; | ||
1715 | u32 __reserved2; | ||
1716 | }; | 1921 | }; |
1717 | 1922 | ||
1718 | /* | 1923 | /* |
1719 | * The fcoe aggregative context of Tstorm | 1924 | * The iscsi aggregative context of Xstorm |
1720 | */ | 1925 | */ |
1721 | struct tstorm_fcoe_ag_context { | 1926 | struct xstorm_iscsi_ag_context { |
1722 | #if defined(__BIG_ENDIAN) | 1927 | #if defined(__BIG_ENDIAN) |
1723 | u16 ulp_credit; | 1928 | u16 agg_val1; |
1724 | u8 agg_vars1; | 1929 | u8 agg_vars1; |
1725 | #define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0) | 1930 | #define __XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0) |
1726 | #define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0 | 1931 | #define __XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0 |
1727 | #define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1) | 1932 | #define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1) |
1728 | #define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1 | 1933 | #define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1 |
1729 | #define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2) | 1934 | #define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2) |
1730 | #define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2 | 1935 | #define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2 |
1731 | #define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3) | 1936 | #define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3) |
1732 | #define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3 | 1937 | #define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3 |
1733 | #define __TSTORM_FCOE_AG_CONTEXT_QUEUE0_FLUSH_CF (0x3<<4) | 1938 | #define __XSTORM_ISCSI_AG_CONTEXT_MORE_TO_SEND_EN (0x1<<4) |
1734 | #define __TSTORM_FCOE_AG_CONTEXT_QUEUE0_FLUSH_CF_SHIFT 4 | 1939 | #define __XSTORM_ISCSI_AG_CONTEXT_MORE_TO_SEND_EN_SHIFT 4 |
1735 | #define __TSTORM_FCOE_AG_CONTEXT_AUX3_FLAG (0x1<<6) | 1940 | #define XSTORM_ISCSI_AG_CONTEXT_NAGLE_EN (0x1<<5) |
1736 | #define __TSTORM_FCOE_AG_CONTEXT_AUX3_FLAG_SHIFT 6 | 1941 | #define XSTORM_ISCSI_AG_CONTEXT_NAGLE_EN_SHIFT 5 |
1737 | #define __TSTORM_FCOE_AG_CONTEXT_AUX4_FLAG (0x1<<7) | 1942 | #define __XSTORM_ISCSI_AG_CONTEXT_DQ_SPARE_FLAG (0x1<<6) |
1738 | #define __TSTORM_FCOE_AG_CONTEXT_AUX4_FLAG_SHIFT 7 | 1943 | #define __XSTORM_ISCSI_AG_CONTEXT_DQ_SPARE_FLAG_SHIFT 6 |
1944 | #define __XSTORM_ISCSI_AG_CONTEXT_UNA_GT_NXT_EN (0x1<<7) | ||
1945 | #define __XSTORM_ISCSI_AG_CONTEXT_UNA_GT_NXT_EN_SHIFT 7 | ||
1739 | u8 state; | 1946 | u8 state; |
1740 | #elif defined(__LITTLE_ENDIAN) | 1947 | #elif defined(__LITTLE_ENDIAN) |
1741 | u8 state; | 1948 | u8 state; |
1742 | u8 agg_vars1; | 1949 | u8 agg_vars1; |
1743 | #define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0) | 1950 | #define __XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0) |
1744 | #define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0 | 1951 | #define __XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0 |
1745 | #define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1) | 1952 | #define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1) |
1746 | #define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1 | 1953 | #define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1 |
1747 | #define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2) | 1954 | #define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2) |
1748 | #define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2 | 1955 | #define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2 |
1749 | #define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3) | 1956 | #define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3) |
1750 | #define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3 | 1957 | #define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3 |
1751 | #define __TSTORM_FCOE_AG_CONTEXT_QUEUE0_FLUSH_CF (0x3<<4) | 1958 | #define __XSTORM_ISCSI_AG_CONTEXT_MORE_TO_SEND_EN (0x1<<4) |
1752 | #define __TSTORM_FCOE_AG_CONTEXT_QUEUE0_FLUSH_CF_SHIFT 4 | 1959 | #define __XSTORM_ISCSI_AG_CONTEXT_MORE_TO_SEND_EN_SHIFT 4 |
1753 | #define __TSTORM_FCOE_AG_CONTEXT_AUX3_FLAG (0x1<<6) | 1960 | #define XSTORM_ISCSI_AG_CONTEXT_NAGLE_EN (0x1<<5) |
1754 | #define __TSTORM_FCOE_AG_CONTEXT_AUX3_FLAG_SHIFT 6 | 1961 | #define XSTORM_ISCSI_AG_CONTEXT_NAGLE_EN_SHIFT 5 |
1755 | #define __TSTORM_FCOE_AG_CONTEXT_AUX4_FLAG (0x1<<7) | 1962 | #define __XSTORM_ISCSI_AG_CONTEXT_DQ_SPARE_FLAG (0x1<<6) |
1756 | #define __TSTORM_FCOE_AG_CONTEXT_AUX4_FLAG_SHIFT 7 | 1963 | #define __XSTORM_ISCSI_AG_CONTEXT_DQ_SPARE_FLAG_SHIFT 6 |
1757 | u16 ulp_credit; | 1964 | #define __XSTORM_ISCSI_AG_CONTEXT_UNA_GT_NXT_EN (0x1<<7) |
1965 | #define __XSTORM_ISCSI_AG_CONTEXT_UNA_GT_NXT_EN_SHIFT 7 | ||
1966 | u16 agg_val1; | ||
1758 | #endif | 1967 | #endif |
1759 | #if defined(__BIG_ENDIAN) | 1968 | #if defined(__BIG_ENDIAN) |
1760 | u16 __agg_val4; | 1969 | u8 cdu_reserved; |
1761 | u16 agg_vars2; | 1970 | u8 __agg_vars4; |
1762 | #define __TSTORM_FCOE_AG_CONTEXT_AUX5_FLAG (0x1<<0) | 1971 | u8 agg_vars3; |
1763 | #define __TSTORM_FCOE_AG_CONTEXT_AUX5_FLAG_SHIFT 0 | 1972 | #define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM2 (0x3F<<0) |
1764 | #define __TSTORM_FCOE_AG_CONTEXT_AUX6_FLAG (0x1<<1) | 1973 | #define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM2_SHIFT 0 |
1765 | #define __TSTORM_FCOE_AG_CONTEXT_AUX6_FLAG_SHIFT 1 | 1974 | #define __XSTORM_ISCSI_AG_CONTEXT_RX_TS_EN_CF (0x3<<6) |
1766 | #define __TSTORM_FCOE_AG_CONTEXT_AUX4_CF (0x3<<2) | 1975 | #define __XSTORM_ISCSI_AG_CONTEXT_RX_TS_EN_CF_SHIFT 6 |
1767 | #define __TSTORM_FCOE_AG_CONTEXT_AUX4_CF_SHIFT 2 | 1976 | u8 agg_vars2; |
1768 | #define __TSTORM_FCOE_AG_CONTEXT_AUX5_CF (0x3<<4) | 1977 | #define __XSTORM_ISCSI_AG_CONTEXT_DQ_CF (0x3<<0) |
1769 | #define __TSTORM_FCOE_AG_CONTEXT_AUX5_CF_SHIFT 4 | 1978 | #define __XSTORM_ISCSI_AG_CONTEXT_DQ_CF_SHIFT 0 |
1770 | #define __TSTORM_FCOE_AG_CONTEXT_AUX6_CF (0x3<<6) | 1979 | #define __XSTORM_ISCSI_AG_CONTEXT_DQ_SPARE_FLAG_EN (0x1<<2) |
1771 | #define __TSTORM_FCOE_AG_CONTEXT_AUX6_CF_SHIFT 6 | 1980 | #define __XSTORM_ISCSI_AG_CONTEXT_DQ_SPARE_FLAG_EN_SHIFT 2 |
1772 | #define __TSTORM_FCOE_AG_CONTEXT_AUX7_CF (0x3<<8) | 1981 | #define __XSTORM_ISCSI_AG_CONTEXT_AUX8_FLAG (0x1<<3) |
1773 | #define __TSTORM_FCOE_AG_CONTEXT_AUX7_CF_SHIFT 8 | 1982 | #define __XSTORM_ISCSI_AG_CONTEXT_AUX8_FLAG_SHIFT 3 |
1774 | #define __TSTORM_FCOE_AG_CONTEXT_AUX7_FLAG (0x1<<10) | 1983 | #define __XSTORM_ISCSI_AG_CONTEXT_AUX9_FLAG (0x1<<4) |
1775 | #define __TSTORM_FCOE_AG_CONTEXT_AUX7_FLAG_SHIFT 10 | 1984 | #define __XSTORM_ISCSI_AG_CONTEXT_AUX9_FLAG_SHIFT 4 |
1776 | #define __TSTORM_FCOE_AG_CONTEXT_QUEUE0_FLUSH_CF_EN (0x1<<11) | 1985 | #define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE1 (0x3<<5) |
1777 | #define __TSTORM_FCOE_AG_CONTEXT_QUEUE0_FLUSH_CF_EN_SHIFT 11 | 1986 | #define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE1_SHIFT 5 |
1778 | #define TSTORM_FCOE_AG_CONTEXT_AUX4_CF_EN (0x1<<12) | 1987 | #define __XSTORM_ISCSI_AG_CONTEXT_DQ_CF_EN (0x1<<7) |
1779 | #define TSTORM_FCOE_AG_CONTEXT_AUX4_CF_EN_SHIFT 12 | 1988 | #define __XSTORM_ISCSI_AG_CONTEXT_DQ_CF_EN_SHIFT 7 |
1780 | #define TSTORM_FCOE_AG_CONTEXT_AUX5_CF_EN (0x1<<13) | ||
1781 | #define TSTORM_FCOE_AG_CONTEXT_AUX5_CF_EN_SHIFT 13 | ||
1782 | #define TSTORM_FCOE_AG_CONTEXT_AUX6_CF_EN (0x1<<14) | ||
1783 | #define TSTORM_FCOE_AG_CONTEXT_AUX6_CF_EN_SHIFT 14 | ||
1784 | #define TSTORM_FCOE_AG_CONTEXT_AUX7_CF_EN (0x1<<15) | ||
1785 | #define TSTORM_FCOE_AG_CONTEXT_AUX7_CF_EN_SHIFT 15 | ||
1786 | #elif defined(__LITTLE_ENDIAN) | 1989 | #elif defined(__LITTLE_ENDIAN) |
1787 | u16 agg_vars2; | 1990 | u8 agg_vars2; |
1788 | #define __TSTORM_FCOE_AG_CONTEXT_AUX5_FLAG (0x1<<0) | 1991 | #define __XSTORM_ISCSI_AG_CONTEXT_DQ_CF (0x3<<0) |
1789 | #define __TSTORM_FCOE_AG_CONTEXT_AUX5_FLAG_SHIFT 0 | 1992 | #define __XSTORM_ISCSI_AG_CONTEXT_DQ_CF_SHIFT 0 |
1790 | #define __TSTORM_FCOE_AG_CONTEXT_AUX6_FLAG (0x1<<1) | 1993 | #define __XSTORM_ISCSI_AG_CONTEXT_DQ_SPARE_FLAG_EN (0x1<<2) |
1791 | #define __TSTORM_FCOE_AG_CONTEXT_AUX6_FLAG_SHIFT 1 | 1994 | #define __XSTORM_ISCSI_AG_CONTEXT_DQ_SPARE_FLAG_EN_SHIFT 2 |
1792 | #define __TSTORM_FCOE_AG_CONTEXT_AUX4_CF (0x3<<2) | 1995 | #define __XSTORM_ISCSI_AG_CONTEXT_AUX8_FLAG (0x1<<3) |
1793 | #define __TSTORM_FCOE_AG_CONTEXT_AUX4_CF_SHIFT 2 | 1996 | #define __XSTORM_ISCSI_AG_CONTEXT_AUX8_FLAG_SHIFT 3 |
1794 | #define __TSTORM_FCOE_AG_CONTEXT_AUX5_CF (0x3<<4) | 1997 | #define __XSTORM_ISCSI_AG_CONTEXT_AUX9_FLAG (0x1<<4) |
1795 | #define __TSTORM_FCOE_AG_CONTEXT_AUX5_CF_SHIFT 4 | 1998 | #define __XSTORM_ISCSI_AG_CONTEXT_AUX9_FLAG_SHIFT 4 |
1796 | #define __TSTORM_FCOE_AG_CONTEXT_AUX6_CF (0x3<<6) | 1999 | #define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE1 (0x3<<5) |
1797 | #define __TSTORM_FCOE_AG_CONTEXT_AUX6_CF_SHIFT 6 | 2000 | #define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE1_SHIFT 5 |
1798 | #define __TSTORM_FCOE_AG_CONTEXT_AUX7_CF (0x3<<8) | 2001 | #define __XSTORM_ISCSI_AG_CONTEXT_DQ_CF_EN (0x1<<7) |
1799 | #define __TSTORM_FCOE_AG_CONTEXT_AUX7_CF_SHIFT 8 | 2002 | #define __XSTORM_ISCSI_AG_CONTEXT_DQ_CF_EN_SHIFT 7 |
1800 | #define __TSTORM_FCOE_AG_CONTEXT_AUX7_FLAG (0x1<<10) | 2003 | u8 agg_vars3; |
1801 | #define __TSTORM_FCOE_AG_CONTEXT_AUX7_FLAG_SHIFT 10 | 2004 | #define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM2 (0x3F<<0) |
1802 | #define __TSTORM_FCOE_AG_CONTEXT_QUEUE0_FLUSH_CF_EN (0x1<<11) | 2005 | #define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM2_SHIFT 0 |
1803 | #define __TSTORM_FCOE_AG_CONTEXT_QUEUE0_FLUSH_CF_EN_SHIFT 11 | 2006 | #define __XSTORM_ISCSI_AG_CONTEXT_RX_TS_EN_CF (0x3<<6) |
1804 | #define TSTORM_FCOE_AG_CONTEXT_AUX4_CF_EN (0x1<<12) | 2007 | #define __XSTORM_ISCSI_AG_CONTEXT_RX_TS_EN_CF_SHIFT 6 |
1805 | #define TSTORM_FCOE_AG_CONTEXT_AUX4_CF_EN_SHIFT 12 | 2008 | u8 __agg_vars4; |
1806 | #define TSTORM_FCOE_AG_CONTEXT_AUX5_CF_EN (0x1<<13) | 2009 | u8 cdu_reserved; |
1807 | #define TSTORM_FCOE_AG_CONTEXT_AUX5_CF_EN_SHIFT 13 | ||
1808 | #define TSTORM_FCOE_AG_CONTEXT_AUX6_CF_EN (0x1<<14) | ||
1809 | #define TSTORM_FCOE_AG_CONTEXT_AUX6_CF_EN_SHIFT 14 | ||
1810 | #define TSTORM_FCOE_AG_CONTEXT_AUX7_CF_EN (0x1<<15) | ||
1811 | #define TSTORM_FCOE_AG_CONTEXT_AUX7_CF_EN_SHIFT 15 | ||
1812 | u16 __agg_val4; | ||
1813 | #endif | 2010 | #endif |
1814 | struct tstorm_fcoe_extra_ag_context_section __extra_section; | 2011 | u32 more_to_send; |
2012 | #if defined(__BIG_ENDIAN) | ||
2013 | u16 agg_vars5; | ||
2014 | #define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE5 (0x3<<0) | ||
2015 | #define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE5_SHIFT 0 | ||
2016 | #define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM0 (0x3F<<2) | ||
2017 | #define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM0_SHIFT 2 | ||
2018 | #define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM1 (0x3F<<8) | ||
2019 | #define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM1_SHIFT 8 | ||
2020 | #define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE2 (0x3<<14) | ||
2021 | #define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE2_SHIFT 14 | ||
2022 | u16 sq_cons; | ||
2023 | #elif defined(__LITTLE_ENDIAN) | ||
2024 | u16 sq_cons; | ||
2025 | u16 agg_vars5; | ||
2026 | #define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE5 (0x3<<0) | ||
2027 | #define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE5_SHIFT 0 | ||
2028 | #define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM0 (0x3F<<2) | ||
2029 | #define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM0_SHIFT 2 | ||
2030 | #define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM1 (0x3F<<8) | ||
2031 | #define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM1_SHIFT 8 | ||
2032 | #define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE2 (0x3<<14) | ||
2033 | #define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE2_SHIFT 14 | ||
2034 | #endif | ||
2035 | struct xstorm_tcp_tcp_ag_context_section tcp; | ||
2036 | #if defined(__BIG_ENDIAN) | ||
2037 | u16 agg_vars7; | ||
2038 | #define __XSTORM_ISCSI_AG_CONTEXT_AGG_VAL11_DECISION_RULE (0x7<<0) | ||
2039 | #define __XSTORM_ISCSI_AG_CONTEXT_AGG_VAL11_DECISION_RULE_SHIFT 0 | ||
2040 | #define __XSTORM_ISCSI_AG_CONTEXT_AUX13_FLAG (0x1<<3) | ||
2041 | #define __XSTORM_ISCSI_AG_CONTEXT_AUX13_FLAG_SHIFT 3 | ||
2042 | #define __XSTORM_ISCSI_AG_CONTEXT_STORMS_SYNC_CF (0x3<<4) | ||
2043 | #define __XSTORM_ISCSI_AG_CONTEXT_STORMS_SYNC_CF_SHIFT 4 | ||
2044 | #define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE3 (0x3<<6) | ||
2045 | #define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE3_SHIFT 6 | ||
2046 | #define XSTORM_ISCSI_AG_CONTEXT_AUX1_CF (0x3<<8) | ||
2047 | #define XSTORM_ISCSI_AG_CONTEXT_AUX1_CF_SHIFT 8 | ||
2048 | #define __XSTORM_ISCSI_AG_CONTEXT_COMPLETION_SEQ_DECISION_MASK (0x1<<10) | ||
2049 | #define __XSTORM_ISCSI_AG_CONTEXT_COMPLETION_SEQ_DECISION_MASK_SHIFT 10 | ||
2050 | #define __XSTORM_ISCSI_AG_CONTEXT_AUX1_CF_EN (0x1<<11) | ||
2051 | #define __XSTORM_ISCSI_AG_CONTEXT_AUX1_CF_EN_SHIFT 11 | ||
2052 | #define __XSTORM_ISCSI_AG_CONTEXT_AUX10_FLAG (0x1<<12) | ||
2053 | #define __XSTORM_ISCSI_AG_CONTEXT_AUX10_FLAG_SHIFT 12 | ||
2054 | #define __XSTORM_ISCSI_AG_CONTEXT_AUX11_FLAG (0x1<<13) | ||
2055 | #define __XSTORM_ISCSI_AG_CONTEXT_AUX11_FLAG_SHIFT 13 | ||
2056 | #define __XSTORM_ISCSI_AG_CONTEXT_AUX12_FLAG (0x1<<14) | ||
2057 | #define __XSTORM_ISCSI_AG_CONTEXT_AUX12_FLAG_SHIFT 14 | ||
2058 | #define __XSTORM_ISCSI_AG_CONTEXT_RX_WND_SCL_EN (0x1<<15) | ||
2059 | #define __XSTORM_ISCSI_AG_CONTEXT_RX_WND_SCL_EN_SHIFT 15 | ||
2060 | u8 agg_val3_th; | ||
2061 | u8 agg_vars6; | ||
2062 | #define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE6 (0x7<<0) | ||
2063 | #define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE6_SHIFT 0 | ||
2064 | #define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE7 (0x7<<3) | ||
2065 | #define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE7_SHIFT 3 | ||
2066 | #define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE4 (0x3<<6) | ||
2067 | #define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE4_SHIFT 6 | ||
2068 | #elif defined(__LITTLE_ENDIAN) | ||
2069 | u8 agg_vars6; | ||
2070 | #define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE6 (0x7<<0) | ||
2071 | #define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE6_SHIFT 0 | ||
2072 | #define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE7 (0x7<<3) | ||
2073 | #define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE7_SHIFT 3 | ||
2074 | #define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE4 (0x3<<6) | ||
2075 | #define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE4_SHIFT 6 | ||
2076 | u8 agg_val3_th; | ||
2077 | u16 agg_vars7; | ||
2078 | #define __XSTORM_ISCSI_AG_CONTEXT_AGG_VAL11_DECISION_RULE (0x7<<0) | ||
2079 | #define __XSTORM_ISCSI_AG_CONTEXT_AGG_VAL11_DECISION_RULE_SHIFT 0 | ||
2080 | #define __XSTORM_ISCSI_AG_CONTEXT_AUX13_FLAG (0x1<<3) | ||
2081 | #define __XSTORM_ISCSI_AG_CONTEXT_AUX13_FLAG_SHIFT 3 | ||
2082 | #define __XSTORM_ISCSI_AG_CONTEXT_STORMS_SYNC_CF (0x3<<4) | ||
2083 | #define __XSTORM_ISCSI_AG_CONTEXT_STORMS_SYNC_CF_SHIFT 4 | ||
2084 | #define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE3 (0x3<<6) | ||
2085 | #define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE3_SHIFT 6 | ||
2086 | #define XSTORM_ISCSI_AG_CONTEXT_AUX1_CF (0x3<<8) | ||
2087 | #define XSTORM_ISCSI_AG_CONTEXT_AUX1_CF_SHIFT 8 | ||
2088 | #define __XSTORM_ISCSI_AG_CONTEXT_COMPLETION_SEQ_DECISION_MASK (0x1<<10) | ||
2089 | #define __XSTORM_ISCSI_AG_CONTEXT_COMPLETION_SEQ_DECISION_MASK_SHIFT 10 | ||
2090 | #define __XSTORM_ISCSI_AG_CONTEXT_AUX1_CF_EN (0x1<<11) | ||
2091 | #define __XSTORM_ISCSI_AG_CONTEXT_AUX1_CF_EN_SHIFT 11 | ||
2092 | #define __XSTORM_ISCSI_AG_CONTEXT_AUX10_FLAG (0x1<<12) | ||
2093 | #define __XSTORM_ISCSI_AG_CONTEXT_AUX10_FLAG_SHIFT 12 | ||
2094 | #define __XSTORM_ISCSI_AG_CONTEXT_AUX11_FLAG (0x1<<13) | ||
2095 | #define __XSTORM_ISCSI_AG_CONTEXT_AUX11_FLAG_SHIFT 13 | ||
2096 | #define __XSTORM_ISCSI_AG_CONTEXT_AUX12_FLAG (0x1<<14) | ||
2097 | #define __XSTORM_ISCSI_AG_CONTEXT_AUX12_FLAG_SHIFT 14 | ||
2098 | #define __XSTORM_ISCSI_AG_CONTEXT_RX_WND_SCL_EN (0x1<<15) | ||
2099 | #define __XSTORM_ISCSI_AG_CONTEXT_RX_WND_SCL_EN_SHIFT 15 | ||
2100 | #endif | ||
2101 | #if defined(__BIG_ENDIAN) | ||
2102 | u16 __agg_val11_th; | ||
2103 | u16 __gen_data; | ||
2104 | #elif defined(__LITTLE_ENDIAN) | ||
2105 | u16 __gen_data; | ||
2106 | u16 __agg_val11_th; | ||
2107 | #endif | ||
2108 | #if defined(__BIG_ENDIAN) | ||
2109 | u8 __reserved1; | ||
2110 | u8 __agg_val6_th; | ||
2111 | u16 __agg_val9; | ||
2112 | #elif defined(__LITTLE_ENDIAN) | ||
2113 | u16 __agg_val9; | ||
2114 | u8 __agg_val6_th; | ||
2115 | u8 __reserved1; | ||
2116 | #endif | ||
2117 | #if defined(__BIG_ENDIAN) | ||
2118 | u16 hq_prod; | ||
2119 | u16 hq_cons; | ||
2120 | #elif defined(__LITTLE_ENDIAN) | ||
2121 | u16 hq_cons; | ||
2122 | u16 hq_prod; | ||
2123 | #endif | ||
2124 | u32 agg_vars8; | ||
2125 | #define XSTORM_ISCSI_AG_CONTEXT_AGG_MISC2 (0xFFFFFF<<0) | ||
2126 | #define XSTORM_ISCSI_AG_CONTEXT_AGG_MISC2_SHIFT 0 | ||
2127 | #define XSTORM_ISCSI_AG_CONTEXT_AGG_MISC3 (0xFF<<24) | ||
2128 | #define XSTORM_ISCSI_AG_CONTEXT_AGG_MISC3_SHIFT 24 | ||
2129 | #if defined(__BIG_ENDIAN) | ||
2130 | u16 r2tq_prod; | ||
2131 | u16 sq_prod; | ||
2132 | #elif defined(__LITTLE_ENDIAN) | ||
2133 | u16 sq_prod; | ||
2134 | u16 r2tq_prod; | ||
2135 | #endif | ||
2136 | #if defined(__BIG_ENDIAN) | ||
2137 | u8 agg_val3; | ||
2138 | u8 agg_val6; | ||
2139 | u8 agg_val5_th; | ||
2140 | u8 agg_val5; | ||
2141 | #elif defined(__LITTLE_ENDIAN) | ||
2142 | u8 agg_val5; | ||
2143 | u8 agg_val5_th; | ||
2144 | u8 agg_val6; | ||
2145 | u8 agg_val3; | ||
2146 | #endif | ||
2147 | #if defined(__BIG_ENDIAN) | ||
2148 | u16 __agg_misc1; | ||
2149 | u16 agg_limit1; | ||
2150 | #elif defined(__LITTLE_ENDIAN) | ||
2151 | u16 agg_limit1; | ||
2152 | u16 __agg_misc1; | ||
2153 | #endif | ||
2154 | u32 hq_cons_tcp_seq; | ||
2155 | u32 exp_stat_sn; | ||
2156 | u32 rst_seq_num; | ||
1815 | }; | 2157 | }; |
1816 | 2158 | ||
2159 | |||
1817 | /* | 2160 | /* |
1818 | * The fcoe aggregative context of Ustorm | 2161 | * The L5cm aggregative context of XStorm |
1819 | */ | 2162 | */ |
1820 | struct ustorm_fcoe_ag_context { | 2163 | struct xstorm_l5cm_ag_context { |
1821 | #if defined(__BIG_ENDIAN) | 2164 | #if defined(__BIG_ENDIAN) |
1822 | u8 __aux_counter_flags; | 2165 | u16 agg_val1; |
1823 | u8 agg_vars2; | ||
1824 | #define USTORM_FCOE_AG_CONTEXT_TX_CF (0x3<<0) | ||
1825 | #define USTORM_FCOE_AG_CONTEXT_TX_CF_SHIFT 0 | ||
1826 | #define __USTORM_FCOE_AG_CONTEXT_TIMER_CF (0x3<<2) | ||
1827 | #define __USTORM_FCOE_AG_CONTEXT_TIMER_CF_SHIFT 2 | ||
1828 | #define USTORM_FCOE_AG_CONTEXT_AGG_MISC4_RULE (0x7<<4) | ||
1829 | #define USTORM_FCOE_AG_CONTEXT_AGG_MISC4_RULE_SHIFT 4 | ||
1830 | #define __USTORM_FCOE_AG_CONTEXT_AGG_VAL2_MASK (0x1<<7) | ||
1831 | #define __USTORM_FCOE_AG_CONTEXT_AGG_VAL2_MASK_SHIFT 7 | ||
1832 | u8 agg_vars1; | 2166 | u8 agg_vars1; |
1833 | #define __USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0) | 2167 | #define __XSTORM_L5CM_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0) |
1834 | #define __USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0 | 2168 | #define __XSTORM_L5CM_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0 |
1835 | #define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1) | 2169 | #define XSTORM_L5CM_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1) |
1836 | #define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1 | 2170 | #define XSTORM_L5CM_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1 |
1837 | #define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2) | 2171 | #define XSTORM_L5CM_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2) |
1838 | #define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2 | 2172 | #define XSTORM_L5CM_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2 |
1839 | #define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3) | 2173 | #define XSTORM_L5CM_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3) |
1840 | #define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3 | 2174 | #define XSTORM_L5CM_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3 |
1841 | #define USTORM_FCOE_AG_CONTEXT_INV_CF (0x3<<4) | 2175 | #define __XSTORM_L5CM_AG_CONTEXT_MORE_TO_SEND_EN (0x1<<4) |
1842 | #define USTORM_FCOE_AG_CONTEXT_INV_CF_SHIFT 4 | 2176 | #define __XSTORM_L5CM_AG_CONTEXT_MORE_TO_SEND_EN_SHIFT 4 |
1843 | #define USTORM_FCOE_AG_CONTEXT_COMPLETION_CF (0x3<<6) | 2177 | #define XSTORM_L5CM_AG_CONTEXT_NAGLE_EN (0x1<<5) |
1844 | #define USTORM_FCOE_AG_CONTEXT_COMPLETION_CF_SHIFT 6 | 2178 | #define XSTORM_L5CM_AG_CONTEXT_NAGLE_EN_SHIFT 5 |
2179 | #define __XSTORM_L5CM_AG_CONTEXT_DQ_SPARE_FLAG (0x1<<6) | ||
2180 | #define __XSTORM_L5CM_AG_CONTEXT_DQ_SPARE_FLAG_SHIFT 6 | ||
2181 | #define __XSTORM_L5CM_AG_CONTEXT_UNA_GT_NXT_EN (0x1<<7) | ||
2182 | #define __XSTORM_L5CM_AG_CONTEXT_UNA_GT_NXT_EN_SHIFT 7 | ||
1845 | u8 state; | 2183 | u8 state; |
1846 | #elif defined(__LITTLE_ENDIAN) | 2184 | #elif defined(__LITTLE_ENDIAN) |
1847 | u8 state; | 2185 | u8 state; |
1848 | u8 agg_vars1; | 2186 | u8 agg_vars1; |
1849 | #define __USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0) | 2187 | #define __XSTORM_L5CM_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0) |
1850 | #define __USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0 | 2188 | #define __XSTORM_L5CM_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0 |
1851 | #define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1) | 2189 | #define XSTORM_L5CM_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1) |
1852 | #define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1 | 2190 | #define XSTORM_L5CM_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1 |
1853 | #define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2) | 2191 | #define XSTORM_L5CM_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2) |
1854 | #define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2 | 2192 | #define XSTORM_L5CM_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2 |
1855 | #define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3) | 2193 | #define XSTORM_L5CM_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3) |
1856 | #define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3 | 2194 | #define XSTORM_L5CM_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3 |
1857 | #define USTORM_FCOE_AG_CONTEXT_INV_CF (0x3<<4) | 2195 | #define __XSTORM_L5CM_AG_CONTEXT_MORE_TO_SEND_EN (0x1<<4) |
1858 | #define USTORM_FCOE_AG_CONTEXT_INV_CF_SHIFT 4 | 2196 | #define __XSTORM_L5CM_AG_CONTEXT_MORE_TO_SEND_EN_SHIFT 4 |
1859 | #define USTORM_FCOE_AG_CONTEXT_COMPLETION_CF (0x3<<6) | 2197 | #define XSTORM_L5CM_AG_CONTEXT_NAGLE_EN (0x1<<5) |
1860 | #define USTORM_FCOE_AG_CONTEXT_COMPLETION_CF_SHIFT 6 | 2198 | #define XSTORM_L5CM_AG_CONTEXT_NAGLE_EN_SHIFT 5 |
1861 | u8 agg_vars2; | 2199 | #define __XSTORM_L5CM_AG_CONTEXT_DQ_SPARE_FLAG (0x1<<6) |
1862 | #define USTORM_FCOE_AG_CONTEXT_TX_CF (0x3<<0) | 2200 | #define __XSTORM_L5CM_AG_CONTEXT_DQ_SPARE_FLAG_SHIFT 6 |
1863 | #define USTORM_FCOE_AG_CONTEXT_TX_CF_SHIFT 0 | 2201 | #define __XSTORM_L5CM_AG_CONTEXT_UNA_GT_NXT_EN (0x1<<7) |
1864 | #define __USTORM_FCOE_AG_CONTEXT_TIMER_CF (0x3<<2) | 2202 | #define __XSTORM_L5CM_AG_CONTEXT_UNA_GT_NXT_EN_SHIFT 7 |
1865 | #define __USTORM_FCOE_AG_CONTEXT_TIMER_CF_SHIFT 2 | 2203 | u16 agg_val1; |
1866 | #define USTORM_FCOE_AG_CONTEXT_AGG_MISC4_RULE (0x7<<4) | ||
1867 | #define USTORM_FCOE_AG_CONTEXT_AGG_MISC4_RULE_SHIFT 4 | ||
1868 | #define __USTORM_FCOE_AG_CONTEXT_AGG_VAL2_MASK (0x1<<7) | ||
1869 | #define __USTORM_FCOE_AG_CONTEXT_AGG_VAL2_MASK_SHIFT 7 | ||
1870 | u8 __aux_counter_flags; | ||
1871 | #endif | 2204 | #endif |
1872 | #if defined(__BIG_ENDIAN) | 2205 | #if defined(__BIG_ENDIAN) |
1873 | u8 cdu_usage; | 2206 | u8 cdu_reserved; |
1874 | u8 agg_misc2; | 2207 | u8 __agg_vars4; |
1875 | u16 pbf_tx_seq_ack; | 2208 | u8 agg_vars3; |
2209 | #define XSTORM_L5CM_AG_CONTEXT_PHYSICAL_QUEUE_NUM2 (0x3F<<0) | ||
2210 | #define XSTORM_L5CM_AG_CONTEXT_PHYSICAL_QUEUE_NUM2_SHIFT 0 | ||
2211 | #define __XSTORM_L5CM_AG_CONTEXT_RX_TS_EN_CF (0x3<<6) | ||
2212 | #define __XSTORM_L5CM_AG_CONTEXT_RX_TS_EN_CF_SHIFT 6 | ||
2213 | u8 agg_vars2; | ||
2214 | #define XSTORM_L5CM_AG_CONTEXT_AUX4_CF (0x3<<0) | ||
2215 | #define XSTORM_L5CM_AG_CONTEXT_AUX4_CF_SHIFT 0 | ||
2216 | #define __XSTORM_L5CM_AG_CONTEXT_DQ_SPARE_FLAG_EN (0x1<<2) | ||
2217 | #define __XSTORM_L5CM_AG_CONTEXT_DQ_SPARE_FLAG_EN_SHIFT 2 | ||
2218 | #define __XSTORM_L5CM_AG_CONTEXT_AUX8_FLAG (0x1<<3) | ||
2219 | #define __XSTORM_L5CM_AG_CONTEXT_AUX8_FLAG_SHIFT 3 | ||
2220 | #define __XSTORM_L5CM_AG_CONTEXT_AUX9_FLAG (0x1<<4) | ||
2221 | #define __XSTORM_L5CM_AG_CONTEXT_AUX9_FLAG_SHIFT 4 | ||
2222 | #define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE1 (0x3<<5) | ||
2223 | #define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE1_SHIFT 5 | ||
2224 | #define XSTORM_L5CM_AG_CONTEXT_AUX4_CF_EN (0x1<<7) | ||
2225 | #define XSTORM_L5CM_AG_CONTEXT_AUX4_CF_EN_SHIFT 7 | ||
1876 | #elif defined(__LITTLE_ENDIAN) | 2226 | #elif defined(__LITTLE_ENDIAN) |
1877 | u16 pbf_tx_seq_ack; | 2227 | u8 agg_vars2; |
1878 | u8 agg_misc2; | 2228 | #define XSTORM_L5CM_AG_CONTEXT_AUX4_CF (0x3<<0) |
1879 | u8 cdu_usage; | 2229 | #define XSTORM_L5CM_AG_CONTEXT_AUX4_CF_SHIFT 0 |
2230 | #define __XSTORM_L5CM_AG_CONTEXT_DQ_SPARE_FLAG_EN (0x1<<2) | ||
2231 | #define __XSTORM_L5CM_AG_CONTEXT_DQ_SPARE_FLAG_EN_SHIFT 2 | ||
2232 | #define __XSTORM_L5CM_AG_CONTEXT_AUX8_FLAG (0x1<<3) | ||
2233 | #define __XSTORM_L5CM_AG_CONTEXT_AUX8_FLAG_SHIFT 3 | ||
2234 | #define __XSTORM_L5CM_AG_CONTEXT_AUX9_FLAG (0x1<<4) | ||
2235 | #define __XSTORM_L5CM_AG_CONTEXT_AUX9_FLAG_SHIFT 4 | ||
2236 | #define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE1 (0x3<<5) | ||
2237 | #define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE1_SHIFT 5 | ||
2238 | #define XSTORM_L5CM_AG_CONTEXT_AUX4_CF_EN (0x1<<7) | ||
2239 | #define XSTORM_L5CM_AG_CONTEXT_AUX4_CF_EN_SHIFT 7 | ||
2240 | u8 agg_vars3; | ||
2241 | #define XSTORM_L5CM_AG_CONTEXT_PHYSICAL_QUEUE_NUM2 (0x3F<<0) | ||
2242 | #define XSTORM_L5CM_AG_CONTEXT_PHYSICAL_QUEUE_NUM2_SHIFT 0 | ||
2243 | #define __XSTORM_L5CM_AG_CONTEXT_RX_TS_EN_CF (0x3<<6) | ||
2244 | #define __XSTORM_L5CM_AG_CONTEXT_RX_TS_EN_CF_SHIFT 6 | ||
2245 | u8 __agg_vars4; | ||
2246 | u8 cdu_reserved; | ||
1880 | #endif | 2247 | #endif |
1881 | u32 agg_misc4; | 2248 | u32 more_to_send; |
2249 | #if defined(__BIG_ENDIAN) | ||
2250 | u16 agg_vars5; | ||
2251 | #define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE5 (0x3<<0) | ||
2252 | #define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE5_SHIFT 0 | ||
2253 | #define XSTORM_L5CM_AG_CONTEXT_PHYSICAL_QUEUE_NUM0 (0x3F<<2) | ||
2254 | #define XSTORM_L5CM_AG_CONTEXT_PHYSICAL_QUEUE_NUM0_SHIFT 2 | ||
2255 | #define XSTORM_L5CM_AG_CONTEXT_PHYSICAL_QUEUE_NUM1 (0x3F<<8) | ||
2256 | #define XSTORM_L5CM_AG_CONTEXT_PHYSICAL_QUEUE_NUM1_SHIFT 8 | ||
2257 | #define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE2 (0x3<<14) | ||
2258 | #define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE2_SHIFT 14 | ||
2259 | u16 agg_val4_th; | ||
2260 | #elif defined(__LITTLE_ENDIAN) | ||
2261 | u16 agg_val4_th; | ||
2262 | u16 agg_vars5; | ||
2263 | #define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE5 (0x3<<0) | ||
2264 | #define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE5_SHIFT 0 | ||
2265 | #define XSTORM_L5CM_AG_CONTEXT_PHYSICAL_QUEUE_NUM0 (0x3F<<2) | ||
2266 | #define XSTORM_L5CM_AG_CONTEXT_PHYSICAL_QUEUE_NUM0_SHIFT 2 | ||
2267 | #define XSTORM_L5CM_AG_CONTEXT_PHYSICAL_QUEUE_NUM1 (0x3F<<8) | ||
2268 | #define XSTORM_L5CM_AG_CONTEXT_PHYSICAL_QUEUE_NUM1_SHIFT 8 | ||
2269 | #define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE2 (0x3<<14) | ||
2270 | #define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE2_SHIFT 14 | ||
2271 | #endif | ||
2272 | struct xstorm_tcp_tcp_ag_context_section tcp; | ||
1882 | #if defined(__BIG_ENDIAN) | 2273 | #if defined(__BIG_ENDIAN) |
2274 | u16 agg_vars7; | ||
2275 | #define __XSTORM_L5CM_AG_CONTEXT_AGG_VAL11_DECISION_RULE (0x7<<0) | ||
2276 | #define __XSTORM_L5CM_AG_CONTEXT_AGG_VAL11_DECISION_RULE_SHIFT 0 | ||
2277 | #define __XSTORM_L5CM_AG_CONTEXT_AUX13_FLAG (0x1<<3) | ||
2278 | #define __XSTORM_L5CM_AG_CONTEXT_AUX13_FLAG_SHIFT 3 | ||
2279 | #define __XSTORM_L5CM_AG_CONTEXT_STORMS_SYNC_CF (0x3<<4) | ||
2280 | #define __XSTORM_L5CM_AG_CONTEXT_STORMS_SYNC_CF_SHIFT 4 | ||
2281 | #define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE3 (0x3<<6) | ||
2282 | #define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE3_SHIFT 6 | ||
2283 | #define XSTORM_L5CM_AG_CONTEXT_AUX1_CF (0x3<<8) | ||
2284 | #define XSTORM_L5CM_AG_CONTEXT_AUX1_CF_SHIFT 8 | ||
2285 | #define __XSTORM_L5CM_AG_CONTEXT_COMPLETION_SEQ_DECISION_MASK (0x1<<10) | ||
2286 | #define __XSTORM_L5CM_AG_CONTEXT_COMPLETION_SEQ_DECISION_MASK_SHIFT 10 | ||
2287 | #define __XSTORM_L5CM_AG_CONTEXT_AUX1_CF_EN (0x1<<11) | ||
2288 | #define __XSTORM_L5CM_AG_CONTEXT_AUX1_CF_EN_SHIFT 11 | ||
2289 | #define __XSTORM_L5CM_AG_CONTEXT_AUX10_FLAG (0x1<<12) | ||
2290 | #define __XSTORM_L5CM_AG_CONTEXT_AUX10_FLAG_SHIFT 12 | ||
2291 | #define __XSTORM_L5CM_AG_CONTEXT_AUX11_FLAG (0x1<<13) | ||
2292 | #define __XSTORM_L5CM_AG_CONTEXT_AUX11_FLAG_SHIFT 13 | ||
2293 | #define __XSTORM_L5CM_AG_CONTEXT_AUX12_FLAG (0x1<<14) | ||
2294 | #define __XSTORM_L5CM_AG_CONTEXT_AUX12_FLAG_SHIFT 14 | ||
2295 | #define __XSTORM_L5CM_AG_CONTEXT_RX_WND_SCL_EN (0x1<<15) | ||
2296 | #define __XSTORM_L5CM_AG_CONTEXT_RX_WND_SCL_EN_SHIFT 15 | ||
1883 | u8 agg_val3_th; | 2297 | u8 agg_val3_th; |
2298 | u8 agg_vars6; | ||
2299 | #define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE6 (0x7<<0) | ||
2300 | #define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE6_SHIFT 0 | ||
2301 | #define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE7 (0x7<<3) | ||
2302 | #define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE7_SHIFT 3 | ||
2303 | #define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE4 (0x3<<6) | ||
2304 | #define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE4_SHIFT 6 | ||
2305 | #elif defined(__LITTLE_ENDIAN) | ||
2306 | u8 agg_vars6; | ||
2307 | #define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE6 (0x7<<0) | ||
2308 | #define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE6_SHIFT 0 | ||
2309 | #define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE7 (0x7<<3) | ||
2310 | #define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE7_SHIFT 3 | ||
2311 | #define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE4 (0x3<<6) | ||
2312 | #define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE4_SHIFT 6 | ||
2313 | u8 agg_val3_th; | ||
2314 | u16 agg_vars7; | ||
2315 | #define __XSTORM_L5CM_AG_CONTEXT_AGG_VAL11_DECISION_RULE (0x7<<0) | ||
2316 | #define __XSTORM_L5CM_AG_CONTEXT_AGG_VAL11_DECISION_RULE_SHIFT 0 | ||
2317 | #define __XSTORM_L5CM_AG_CONTEXT_AUX13_FLAG (0x1<<3) | ||
2318 | #define __XSTORM_L5CM_AG_CONTEXT_AUX13_FLAG_SHIFT 3 | ||
2319 | #define __XSTORM_L5CM_AG_CONTEXT_STORMS_SYNC_CF (0x3<<4) | ||
2320 | #define __XSTORM_L5CM_AG_CONTEXT_STORMS_SYNC_CF_SHIFT 4 | ||
2321 | #define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE3 (0x3<<6) | ||
2322 | #define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE3_SHIFT 6 | ||
2323 | #define XSTORM_L5CM_AG_CONTEXT_AUX1_CF (0x3<<8) | ||
2324 | #define XSTORM_L5CM_AG_CONTEXT_AUX1_CF_SHIFT 8 | ||
2325 | #define __XSTORM_L5CM_AG_CONTEXT_COMPLETION_SEQ_DECISION_MASK (0x1<<10) | ||
2326 | #define __XSTORM_L5CM_AG_CONTEXT_COMPLETION_SEQ_DECISION_MASK_SHIFT 10 | ||
2327 | #define __XSTORM_L5CM_AG_CONTEXT_AUX1_CF_EN (0x1<<11) | ||
2328 | #define __XSTORM_L5CM_AG_CONTEXT_AUX1_CF_EN_SHIFT 11 | ||
2329 | #define __XSTORM_L5CM_AG_CONTEXT_AUX10_FLAG (0x1<<12) | ||
2330 | #define __XSTORM_L5CM_AG_CONTEXT_AUX10_FLAG_SHIFT 12 | ||
2331 | #define __XSTORM_L5CM_AG_CONTEXT_AUX11_FLAG (0x1<<13) | ||
2332 | #define __XSTORM_L5CM_AG_CONTEXT_AUX11_FLAG_SHIFT 13 | ||
2333 | #define __XSTORM_L5CM_AG_CONTEXT_AUX12_FLAG (0x1<<14) | ||
2334 | #define __XSTORM_L5CM_AG_CONTEXT_AUX12_FLAG_SHIFT 14 | ||
2335 | #define __XSTORM_L5CM_AG_CONTEXT_RX_WND_SCL_EN (0x1<<15) | ||
2336 | #define __XSTORM_L5CM_AG_CONTEXT_RX_WND_SCL_EN_SHIFT 15 | ||
2337 | #endif | ||
2338 | #if defined(__BIG_ENDIAN) | ||
2339 | u16 __agg_val11_th; | ||
2340 | u16 __gen_data; | ||
2341 | #elif defined(__LITTLE_ENDIAN) | ||
2342 | u16 __gen_data; | ||
2343 | u16 __agg_val11_th; | ||
2344 | #endif | ||
2345 | #if defined(__BIG_ENDIAN) | ||
2346 | u8 __reserved1; | ||
2347 | u8 __agg_val6_th; | ||
2348 | u16 __agg_val9; | ||
2349 | #elif defined(__LITTLE_ENDIAN) | ||
2350 | u16 __agg_val9; | ||
2351 | u8 __agg_val6_th; | ||
2352 | u8 __reserved1; | ||
2353 | #endif | ||
2354 | #if defined(__BIG_ENDIAN) | ||
2355 | u16 agg_val2_th; | ||
2356 | u16 agg_val2; | ||
2357 | #elif defined(__LITTLE_ENDIAN) | ||
2358 | u16 agg_val2; | ||
2359 | u16 agg_val2_th; | ||
2360 | #endif | ||
2361 | u32 agg_vars8; | ||
2362 | #define XSTORM_L5CM_AG_CONTEXT_AGG_MISC2 (0xFFFFFF<<0) | ||
2363 | #define XSTORM_L5CM_AG_CONTEXT_AGG_MISC2_SHIFT 0 | ||
2364 | #define XSTORM_L5CM_AG_CONTEXT_AGG_MISC3 (0xFF<<24) | ||
2365 | #define XSTORM_L5CM_AG_CONTEXT_AGG_MISC3_SHIFT 24 | ||
2366 | #if defined(__BIG_ENDIAN) | ||
2367 | u16 agg_misc0; | ||
2368 | u16 agg_val4; | ||
2369 | #elif defined(__LITTLE_ENDIAN) | ||
2370 | u16 agg_val4; | ||
2371 | u16 agg_misc0; | ||
2372 | #endif | ||
2373 | #if defined(__BIG_ENDIAN) | ||
1884 | u8 agg_val3; | 2374 | u8 agg_val3; |
1885 | u16 agg_misc3; | 2375 | u8 agg_val6; |
2376 | u8 agg_val5_th; | ||
2377 | u8 agg_val5; | ||
1886 | #elif defined(__LITTLE_ENDIAN) | 2378 | #elif defined(__LITTLE_ENDIAN) |
1887 | u16 agg_misc3; | 2379 | u8 agg_val5; |
2380 | u8 agg_val5_th; | ||
2381 | u8 agg_val6; | ||
1888 | u8 agg_val3; | 2382 | u8 agg_val3; |
1889 | u8 agg_val3_th; | ||
1890 | #endif | 2383 | #endif |
1891 | u32 expired_task_id; | ||
1892 | u32 agg_misc4_th; | ||
1893 | #if defined(__BIG_ENDIAN) | 2384 | #if defined(__BIG_ENDIAN) |
1894 | u16 cq_prod; | 2385 | u16 __agg_misc1; |
2386 | u16 agg_limit1; | ||
2387 | #elif defined(__LITTLE_ENDIAN) | ||
2388 | u16 agg_limit1; | ||
2389 | u16 __agg_misc1; | ||
2390 | #endif | ||
2391 | u32 completion_seq; | ||
2392 | u32 agg_misc4; | ||
2393 | u32 rst_seq_num; | ||
2394 | }; | ||
2395 | |||
2396 | /* | ||
2397 | * ABTS info $$KEEP_ENDIANNESS$$ | ||
2398 | */ | ||
2399 | struct fcoe_abts_info { | ||
2400 | __le16 aborted_task_id; | ||
2401 | __le16 reserved0; | ||
2402 | __le32 reserved1; | ||
2403 | }; | ||
2404 | |||
2405 | |||
2406 | /* | ||
2407 | * Fixed size structure in order to plant it in Union structure | ||
2408 | * $$KEEP_ENDIANNESS$$ | ||
2409 | */ | ||
2410 | struct fcoe_abts_rsp_union { | ||
2411 | u8 r_ctl; | ||
2412 | u8 rsrv[3]; | ||
2413 | __le32 abts_rsp_payload[7]; | ||
2414 | }; | ||
2415 | |||
2416 | |||
2417 | /* | ||
2418 | * 4 regs size $$KEEP_ENDIANNESS$$ | ||
2419 | */ | ||
2420 | struct fcoe_bd_ctx { | ||
2421 | __le32 buf_addr_hi; | ||
2422 | __le32 buf_addr_lo; | ||
2423 | __le16 buf_len; | ||
2424 | __le16 rsrv0; | ||
2425 | __le16 flags; | ||
2426 | __le16 rsrv1; | ||
2427 | }; | ||
2428 | |||
2429 | |||
2430 | /* | ||
2431 | * FCoE cached sges context $$KEEP_ENDIANNESS$$ | ||
2432 | */ | ||
2433 | struct fcoe_cached_sge_ctx { | ||
2434 | struct regpair cur_buf_addr; | ||
2435 | __le16 cur_buf_rem; | ||
2436 | __le16 second_buf_rem; | ||
2437 | struct regpair second_buf_addr; | ||
2438 | }; | ||
2439 | |||
2440 | |||
2441 | /* | ||
2442 | * Cleanup info $$KEEP_ENDIANNESS$$ | ||
2443 | */ | ||
2444 | struct fcoe_cleanup_info { | ||
2445 | __le16 cleaned_task_id; | ||
2446 | __le16 rolled_tx_seq_cnt; | ||
2447 | __le32 rolled_tx_data_offset; | ||
2448 | }; | ||
2449 | |||
2450 | |||
2451 | /* | ||
2452 | * Fcp RSP flags $$KEEP_ENDIANNESS$$ | ||
2453 | */ | ||
2454 | struct fcoe_fcp_rsp_flags { | ||
2455 | u8 flags; | ||
2456 | #define FCOE_FCP_RSP_FLAGS_FCP_RSP_LEN_VALID (0x1<<0) | ||
2457 | #define FCOE_FCP_RSP_FLAGS_FCP_RSP_LEN_VALID_SHIFT 0 | ||
2458 | #define FCOE_FCP_RSP_FLAGS_FCP_SNS_LEN_VALID (0x1<<1) | ||
2459 | #define FCOE_FCP_RSP_FLAGS_FCP_SNS_LEN_VALID_SHIFT 1 | ||
2460 | #define FCOE_FCP_RSP_FLAGS_FCP_RESID_OVER (0x1<<2) | ||
2461 | #define FCOE_FCP_RSP_FLAGS_FCP_RESID_OVER_SHIFT 2 | ||
2462 | #define FCOE_FCP_RSP_FLAGS_FCP_RESID_UNDER (0x1<<3) | ||
2463 | #define FCOE_FCP_RSP_FLAGS_FCP_RESID_UNDER_SHIFT 3 | ||
2464 | #define FCOE_FCP_RSP_FLAGS_FCP_CONF_REQ (0x1<<4) | ||
2465 | #define FCOE_FCP_RSP_FLAGS_FCP_CONF_REQ_SHIFT 4 | ||
2466 | #define FCOE_FCP_RSP_FLAGS_FCP_BIDI_FLAGS (0x7<<5) | ||
2467 | #define FCOE_FCP_RSP_FLAGS_FCP_BIDI_FLAGS_SHIFT 5 | ||
2468 | }; | ||
2469 | |||
2470 | /* | ||
2471 | * Fcp RSP payload $$KEEP_ENDIANNESS$$ | ||
2472 | */ | ||
2473 | struct fcoe_fcp_rsp_payload { | ||
2474 | struct regpair reserved0; | ||
2475 | __le32 fcp_resid; | ||
2476 | u8 scsi_status_code; | ||
2477 | struct fcoe_fcp_rsp_flags fcp_flags; | ||
2478 | __le16 retry_delay_timer; | ||
2479 | __le32 fcp_rsp_len; | ||
2480 | __le32 fcp_sns_len; | ||
2481 | }; | ||
2482 | |||
2483 | /* | ||
2484 | * Fixed size structure in order to plant it in Union structure | ||
2485 | * $$KEEP_ENDIANNESS$$ | ||
2486 | */ | ||
2487 | struct fcoe_fcp_rsp_union { | ||
2488 | struct fcoe_fcp_rsp_payload payload; | ||
2489 | struct regpair reserved0; | ||
2490 | }; | ||
2491 | |||
2492 | /* | ||
2493 | * FC header $$KEEP_ENDIANNESS$$ | ||
2494 | */ | ||
2495 | struct fcoe_fc_hdr { | ||
2496 | u8 s_id[3]; | ||
2497 | u8 cs_ctl; | ||
2498 | u8 d_id[3]; | ||
2499 | u8 r_ctl; | ||
2500 | __le16 seq_cnt; | ||
2501 | u8 df_ctl; | ||
2502 | u8 seq_id; | ||
2503 | u8 f_ctl[3]; | ||
2504 | u8 type; | ||
2505 | __le32 parameters; | ||
2506 | __le16 rx_id; | ||
2507 | __le16 ox_id; | ||
2508 | }; | ||
2509 | |||
2510 | /* | ||
2511 | * FC header union $$KEEP_ENDIANNESS$$ | ||
2512 | */ | ||
2513 | struct fcoe_mp_rsp_union { | ||
2514 | struct fcoe_fc_hdr fc_hdr; | ||
2515 | __le32 mp_payload_len; | ||
2516 | __le32 rsrv; | ||
2517 | }; | ||
2518 | |||
2519 | /* | ||
2520 | * Completion information $$KEEP_ENDIANNESS$$ | ||
2521 | */ | ||
2522 | union fcoe_comp_flow_info { | ||
2523 | struct fcoe_fcp_rsp_union fcp_rsp; | ||
2524 | struct fcoe_abts_rsp_union abts_rsp; | ||
2525 | struct fcoe_mp_rsp_union mp_rsp; | ||
2526 | __le32 opaque[8]; | ||
2527 | }; | ||
2528 | |||
2529 | |||
2530 | /* | ||
2531 | * External ABTS info $$KEEP_ENDIANNESS$$ | ||
2532 | */ | ||
2533 | struct fcoe_ext_abts_info { | ||
2534 | __le32 rsrv0[6]; | ||
2535 | struct fcoe_abts_info ctx; | ||
2536 | }; | ||
2537 | |||
2538 | |||
2539 | /* | ||
2540 | * External cleanup info $$KEEP_ENDIANNESS$$ | ||
2541 | */ | ||
2542 | struct fcoe_ext_cleanup_info { | ||
2543 | __le32 rsrv0[6]; | ||
2544 | struct fcoe_cleanup_info ctx; | ||
2545 | }; | ||
2546 | |||
2547 | |||
2548 | /* | ||
2549 | * Fcoe FW Tx sequence context $$KEEP_ENDIANNESS$$ | ||
2550 | */ | ||
2551 | struct fcoe_fw_tx_seq_ctx { | ||
2552 | __le32 data_offset; | ||
2553 | __le16 seq_cnt; | ||
2554 | __le16 rsrv0; | ||
2555 | }; | ||
2556 | |||
2557 | /* | ||
2558 | * Fcoe external FW Tx sequence context $$KEEP_ENDIANNESS$$ | ||
2559 | */ | ||
2560 | struct fcoe_ext_fw_tx_seq_ctx { | ||
2561 | __le32 rsrv0[6]; | ||
2562 | struct fcoe_fw_tx_seq_ctx ctx; | ||
2563 | }; | ||
2564 | |||
2565 | |||
2566 | /* | ||
2567 | * FCoE multiple sges context $$KEEP_ENDIANNESS$$ | ||
2568 | */ | ||
2569 | struct fcoe_mul_sges_ctx { | ||
2570 | struct regpair cur_sge_addr; | ||
2571 | __le16 cur_sge_off; | ||
2572 | u8 cur_sge_idx; | ||
2573 | u8 sgl_size; | ||
2574 | }; | ||
2575 | |||
2576 | /* | ||
2577 | * FCoE external multiple sges context $$KEEP_ENDIANNESS$$ | ||
2578 | */ | ||
2579 | struct fcoe_ext_mul_sges_ctx { | ||
2580 | struct fcoe_mul_sges_ctx mul_sgl; | ||
2581 | struct regpair rsrv0; | ||
2582 | }; | ||
2583 | |||
2584 | |||
2585 | /* | ||
2586 | * FCP CMD payload $$KEEP_ENDIANNESS$$ | ||
2587 | */ | ||
2588 | struct fcoe_fcp_cmd_payload { | ||
2589 | __le32 opaque[8]; | ||
2590 | }; | ||
2591 | |||
2592 | |||
2593 | |||
2594 | |||
2595 | |||
2596 | /* | ||
2597 | * Fcp xfr rdy payload $$KEEP_ENDIANNESS$$ | ||
2598 | */ | ||
2599 | struct fcoe_fcp_xfr_rdy_payload { | ||
2600 | __le32 burst_len; | ||
2601 | __le32 data_ro; | ||
2602 | }; | ||
2603 | |||
2604 | |||
2605 | /* | ||
2606 | * FC frame $$KEEP_ENDIANNESS$$ | ||
2607 | */ | ||
2608 | struct fcoe_fc_frame { | ||
2609 | struct fcoe_fc_hdr fc_hdr; | ||
2610 | __le32 reserved0[2]; | ||
2611 | }; | ||
2612 | |||
2613 | |||
2614 | |||
2615 | |||
2616 | /* | ||
2617 | * FCoE KCQ CQE parameters $$KEEP_ENDIANNESS$$ | ||
2618 | */ | ||
2619 | union fcoe_kcqe_params { | ||
2620 | __le32 reserved0[4]; | ||
2621 | }; | ||
2622 | |||
2623 | /* | ||
2624 | * FCoE KCQ CQE $$KEEP_ENDIANNESS$$ | ||
2625 | */ | ||
2626 | struct fcoe_kcqe { | ||
2627 | __le32 fcoe_conn_id; | ||
2628 | __le32 completion_status; | ||
2629 | __le32 fcoe_conn_context_id; | ||
2630 | union fcoe_kcqe_params params; | ||
2631 | __le16 qe_self_seq; | ||
2632 | u8 op_code; | ||
2633 | u8 flags; | ||
2634 | #define FCOE_KCQE_RESERVED0 (0x7<<0) | ||
2635 | #define FCOE_KCQE_RESERVED0_SHIFT 0 | ||
2636 | #define FCOE_KCQE_RAMROD_COMPLETION (0x1<<3) | ||
2637 | #define FCOE_KCQE_RAMROD_COMPLETION_SHIFT 3 | ||
2638 | #define FCOE_KCQE_LAYER_CODE (0x7<<4) | ||
2639 | #define FCOE_KCQE_LAYER_CODE_SHIFT 4 | ||
2640 | #define FCOE_KCQE_LINKED_WITH_NEXT (0x1<<7) | ||
2641 | #define FCOE_KCQE_LINKED_WITH_NEXT_SHIFT 7 | ||
2642 | }; | ||
2643 | |||
2644 | |||
2645 | |||
2646 | /* | ||
2647 | * FCoE KWQE header $$KEEP_ENDIANNESS$$ | ||
2648 | */ | ||
2649 | struct fcoe_kwqe_header { | ||
2650 | u8 op_code; | ||
2651 | u8 flags; | ||
2652 | #define FCOE_KWQE_HEADER_RESERVED0 (0xF<<0) | ||
2653 | #define FCOE_KWQE_HEADER_RESERVED0_SHIFT 0 | ||
2654 | #define FCOE_KWQE_HEADER_LAYER_CODE (0x7<<4) | ||
2655 | #define FCOE_KWQE_HEADER_LAYER_CODE_SHIFT 4 | ||
2656 | #define FCOE_KWQE_HEADER_RESERVED1 (0x1<<7) | ||
2657 | #define FCOE_KWQE_HEADER_RESERVED1_SHIFT 7 | ||
2658 | }; | ||
2659 | |||
2660 | /* | ||
2661 | * FCoE firmware init request 1 $$KEEP_ENDIANNESS$$ | ||
2662 | */ | ||
2663 | struct fcoe_kwqe_init1 { | ||
2664 | __le16 num_tasks; | ||
2665 | struct fcoe_kwqe_header hdr; | ||
2666 | __le32 task_list_pbl_addr_lo; | ||
2667 | __le32 task_list_pbl_addr_hi; | ||
2668 | __le32 dummy_buffer_addr_lo; | ||
2669 | __le32 dummy_buffer_addr_hi; | ||
2670 | __le16 sq_num_wqes; | ||
2671 | __le16 rq_num_wqes; | ||
2672 | __le16 rq_buffer_log_size; | ||
2673 | __le16 cq_num_wqes; | ||
2674 | __le16 mtu; | ||
2675 | u8 num_sessions_log; | ||
2676 | u8 flags; | ||
2677 | #define FCOE_KWQE_INIT1_LOG_PAGE_SIZE (0xF<<0) | ||
2678 | #define FCOE_KWQE_INIT1_LOG_PAGE_SIZE_SHIFT 0 | ||
2679 | #define FCOE_KWQE_INIT1_LOG_CACHED_PBES_PER_FUNC (0x7<<4) | ||
2680 | #define FCOE_KWQE_INIT1_LOG_CACHED_PBES_PER_FUNC_SHIFT 4 | ||
2681 | #define FCOE_KWQE_INIT1_RESERVED1 (0x1<<7) | ||
2682 | #define FCOE_KWQE_INIT1_RESERVED1_SHIFT 7 | ||
2683 | }; | ||
2684 | |||
2685 | /* | ||
2686 | * FCoE firmware init request 2 $$KEEP_ENDIANNESS$$ | ||
2687 | */ | ||
2688 | struct fcoe_kwqe_init2 { | ||
2689 | u8 hsi_major_version; | ||
2690 | u8 hsi_minor_version; | ||
2691 | struct fcoe_kwqe_header hdr; | ||
2692 | __le32 hash_tbl_pbl_addr_lo; | ||
2693 | __le32 hash_tbl_pbl_addr_hi; | ||
2694 | __le32 t2_hash_tbl_addr_lo; | ||
2695 | __le32 t2_hash_tbl_addr_hi; | ||
2696 | __le32 t2_ptr_hash_tbl_addr_lo; | ||
2697 | __le32 t2_ptr_hash_tbl_addr_hi; | ||
2698 | __le32 free_list_count; | ||
2699 | }; | ||
2700 | |||
2701 | /* | ||
2702 | * FCoE firmware init request 3 $$KEEP_ENDIANNESS$$ | ||
2703 | */ | ||
2704 | struct fcoe_kwqe_init3 { | ||
2705 | __le16 reserved0; | ||
2706 | struct fcoe_kwqe_header hdr; | ||
2707 | __le32 error_bit_map_lo; | ||
2708 | __le32 error_bit_map_hi; | ||
2709 | u8 perf_config; | ||
2710 | u8 reserved21[3]; | ||
2711 | __le32 reserved2[4]; | ||
2712 | }; | ||
2713 | |||
2714 | /* | ||
2715 | * FCoE connection offload request 1 $$KEEP_ENDIANNESS$$ | ||
2716 | */ | ||
2717 | struct fcoe_kwqe_conn_offload1 { | ||
2718 | __le16 fcoe_conn_id; | ||
2719 | struct fcoe_kwqe_header hdr; | ||
2720 | __le32 sq_addr_lo; | ||
2721 | __le32 sq_addr_hi; | ||
2722 | __le32 rq_pbl_addr_lo; | ||
2723 | __le32 rq_pbl_addr_hi; | ||
2724 | __le32 rq_first_pbe_addr_lo; | ||
2725 | __le32 rq_first_pbe_addr_hi; | ||
2726 | __le16 rq_prod; | ||
2727 | __le16 reserved0; | ||
2728 | }; | ||
2729 | |||
2730 | /* | ||
2731 | * FCoE connection offload request 2 $$KEEP_ENDIANNESS$$ | ||
2732 | */ | ||
2733 | struct fcoe_kwqe_conn_offload2 { | ||
2734 | __le16 tx_max_fc_pay_len; | ||
2735 | struct fcoe_kwqe_header hdr; | ||
2736 | __le32 cq_addr_lo; | ||
2737 | __le32 cq_addr_hi; | ||
2738 | __le32 xferq_addr_lo; | ||
2739 | __le32 xferq_addr_hi; | ||
2740 | __le32 conn_db_addr_lo; | ||
2741 | __le32 conn_db_addr_hi; | ||
2742 | __le32 reserved1; | ||
2743 | }; | ||
2744 | |||
2745 | /* | ||
2746 | * FCoE connection offload request 3 $$KEEP_ENDIANNESS$$ | ||
2747 | */ | ||
2748 | struct fcoe_kwqe_conn_offload3 { | ||
2749 | __le16 vlan_tag; | ||
2750 | #define FCOE_KWQE_CONN_OFFLOAD3_VLAN_ID (0xFFF<<0) | ||
2751 | #define FCOE_KWQE_CONN_OFFLOAD3_VLAN_ID_SHIFT 0 | ||
2752 | #define FCOE_KWQE_CONN_OFFLOAD3_CFI (0x1<<12) | ||
2753 | #define FCOE_KWQE_CONN_OFFLOAD3_CFI_SHIFT 12 | ||
2754 | #define FCOE_KWQE_CONN_OFFLOAD3_PRIORITY (0x7<<13) | ||
2755 | #define FCOE_KWQE_CONN_OFFLOAD3_PRIORITY_SHIFT 13 | ||
2756 | struct fcoe_kwqe_header hdr; | ||
2757 | u8 s_id[3]; | ||
2758 | u8 tx_max_conc_seqs_c3; | ||
2759 | u8 d_id[3]; | ||
2760 | u8 flags; | ||
2761 | #define FCOE_KWQE_CONN_OFFLOAD3_B_MUL_N_PORT_IDS (0x1<<0) | ||
2762 | #define FCOE_KWQE_CONN_OFFLOAD3_B_MUL_N_PORT_IDS_SHIFT 0 | ||
2763 | #define FCOE_KWQE_CONN_OFFLOAD3_B_E_D_TOV_RES (0x1<<1) | ||
2764 | #define FCOE_KWQE_CONN_OFFLOAD3_B_E_D_TOV_RES_SHIFT 1 | ||
2765 | #define FCOE_KWQE_CONN_OFFLOAD3_B_CONT_INCR_SEQ_CNT (0x1<<2) | ||
2766 | #define FCOE_KWQE_CONN_OFFLOAD3_B_CONT_INCR_SEQ_CNT_SHIFT 2 | ||
2767 | #define FCOE_KWQE_CONN_OFFLOAD3_B_CONF_REQ (0x1<<3) | ||
2768 | #define FCOE_KWQE_CONN_OFFLOAD3_B_CONF_REQ_SHIFT 3 | ||
2769 | #define FCOE_KWQE_CONN_OFFLOAD3_B_REC_VALID (0x1<<4) | ||
2770 | #define FCOE_KWQE_CONN_OFFLOAD3_B_REC_VALID_SHIFT 4 | ||
2771 | #define FCOE_KWQE_CONN_OFFLOAD3_B_C2_VALID (0x1<<5) | ||
2772 | #define FCOE_KWQE_CONN_OFFLOAD3_B_C2_VALID_SHIFT 5 | ||
2773 | #define FCOE_KWQE_CONN_OFFLOAD3_B_ACK_0 (0x1<<6) | ||
2774 | #define FCOE_KWQE_CONN_OFFLOAD3_B_ACK_0_SHIFT 6 | ||
2775 | #define FCOE_KWQE_CONN_OFFLOAD3_B_VLAN_FLAG (0x1<<7) | ||
2776 | #define FCOE_KWQE_CONN_OFFLOAD3_B_VLAN_FLAG_SHIFT 7 | ||
2777 | __le32 reserved; | ||
2778 | __le32 confq_first_pbe_addr_lo; | ||
2779 | __le32 confq_first_pbe_addr_hi; | ||
2780 | __le16 tx_total_conc_seqs; | ||
2781 | __le16 rx_max_fc_pay_len; | ||
2782 | __le16 rx_total_conc_seqs; | ||
2783 | u8 rx_max_conc_seqs_c3; | ||
2784 | u8 rx_open_seqs_exch_c3; | ||
2785 | }; | ||
2786 | |||
2787 | /* | ||
2788 | * FCoE connection offload request 4 $$KEEP_ENDIANNESS$$ | ||
2789 | */ | ||
2790 | struct fcoe_kwqe_conn_offload4 { | ||
2791 | u8 e_d_tov_timer_val; | ||
2792 | u8 reserved2; | ||
2793 | struct fcoe_kwqe_header hdr; | ||
2794 | u8 src_mac_addr_lo[2]; | ||
2795 | u8 src_mac_addr_mid[2]; | ||
2796 | u8 src_mac_addr_hi[2]; | ||
2797 | u8 dst_mac_addr_hi[2]; | ||
2798 | u8 dst_mac_addr_lo[2]; | ||
2799 | u8 dst_mac_addr_mid[2]; | ||
2800 | __le32 lcq_addr_lo; | ||
2801 | __le32 lcq_addr_hi; | ||
2802 | __le32 confq_pbl_base_addr_lo; | ||
2803 | __le32 confq_pbl_base_addr_hi; | ||
2804 | }; | ||
2805 | |||
2806 | /* | ||
2807 | * FCoE connection enable request $$KEEP_ENDIANNESS$$ | ||
2808 | */ | ||
2809 | struct fcoe_kwqe_conn_enable_disable { | ||
2810 | __le16 reserved0; | ||
2811 | struct fcoe_kwqe_header hdr; | ||
2812 | u8 src_mac_addr_lo[2]; | ||
2813 | u8 src_mac_addr_mid[2]; | ||
2814 | u8 src_mac_addr_hi[2]; | ||
2815 | u16 vlan_tag; | ||
2816 | #define FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID (0xFFF<<0) | ||
2817 | #define FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID_SHIFT 0 | ||
2818 | #define FCOE_KWQE_CONN_ENABLE_DISABLE_CFI (0x1<<12) | ||
2819 | #define FCOE_KWQE_CONN_ENABLE_DISABLE_CFI_SHIFT 12 | ||
2820 | #define FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY (0x7<<13) | ||
2821 | #define FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY_SHIFT 13 | ||
2822 | u8 dst_mac_addr_lo[2]; | ||
2823 | u8 dst_mac_addr_mid[2]; | ||
2824 | u8 dst_mac_addr_hi[2]; | ||
2825 | __le16 reserved1; | ||
2826 | u8 s_id[3]; | ||
2827 | u8 vlan_flag; | ||
2828 | u8 d_id[3]; | ||
2829 | u8 reserved3; | ||
2830 | __le32 context_id; | ||
2831 | __le32 conn_id; | ||
2832 | __le32 reserved4; | ||
2833 | }; | ||
2834 | |||
2835 | /* | ||
2836 | * FCoE connection destroy request $$KEEP_ENDIANNESS$$ | ||
2837 | */ | ||
2838 | struct fcoe_kwqe_conn_destroy { | ||
2839 | __le16 reserved0; | ||
2840 | struct fcoe_kwqe_header hdr; | ||
2841 | __le32 context_id; | ||
2842 | __le32 conn_id; | ||
2843 | __le32 reserved1[5]; | ||
2844 | }; | ||
2845 | |||
2846 | /* | ||
2847 | * FCoe destroy request $$KEEP_ENDIANNESS$$ | ||
2848 | */ | ||
2849 | struct fcoe_kwqe_destroy { | ||
2850 | __le16 reserved0; | ||
2851 | struct fcoe_kwqe_header hdr; | ||
2852 | __le32 reserved1[7]; | ||
2853 | }; | ||
2854 | |||
2855 | /* | ||
2856 | * FCoe statistics request $$KEEP_ENDIANNESS$$ | ||
2857 | */ | ||
2858 | struct fcoe_kwqe_stat { | ||
2859 | __le16 reserved0; | ||
2860 | struct fcoe_kwqe_header hdr; | ||
2861 | __le32 stat_params_addr_lo; | ||
2862 | __le32 stat_params_addr_hi; | ||
2863 | __le32 reserved1[5]; | ||
2864 | }; | ||
2865 | |||
2866 | /* | ||
2867 | * FCoE KWQ WQE $$KEEP_ENDIANNESS$$ | ||
2868 | */ | ||
2869 | union fcoe_kwqe { | ||
2870 | struct fcoe_kwqe_init1 init1; | ||
2871 | struct fcoe_kwqe_init2 init2; | ||
2872 | struct fcoe_kwqe_init3 init3; | ||
2873 | struct fcoe_kwqe_conn_offload1 conn_offload1; | ||
2874 | struct fcoe_kwqe_conn_offload2 conn_offload2; | ||
2875 | struct fcoe_kwqe_conn_offload3 conn_offload3; | ||
2876 | struct fcoe_kwqe_conn_offload4 conn_offload4; | ||
2877 | struct fcoe_kwqe_conn_enable_disable conn_enable_disable; | ||
2878 | struct fcoe_kwqe_conn_destroy conn_destroy; | ||
2879 | struct fcoe_kwqe_destroy destroy; | ||
2880 | struct fcoe_kwqe_stat statistics; | ||
2881 | }; | ||
2882 | |||
2883 | |||
2884 | |||
2885 | |||
2886 | |||
2887 | |||
2888 | |||
2889 | |||
2890 | |||
2891 | |||
2892 | |||
2893 | |||
2894 | |||
2895 | |||
2896 | |||
2897 | |||
2898 | /* | ||
2899 | * TX SGL context $$KEEP_ENDIANNESS$$ | ||
2900 | */ | ||
2901 | union fcoe_sgl_union_ctx { | ||
2902 | struct fcoe_cached_sge_ctx cached_sge; | ||
2903 | struct fcoe_ext_mul_sges_ctx sgl; | ||
2904 | __le32 opaque[5]; | ||
2905 | }; | ||
2906 | |||
2907 | /* | ||
2908 | * Data-In/ELS/BLS information $$KEEP_ENDIANNESS$$ | ||
2909 | */ | ||
2910 | struct fcoe_read_flow_info { | ||
2911 | union fcoe_sgl_union_ctx sgl_ctx; | ||
2912 | __le32 rsrv0[3]; | ||
2913 | }; | ||
2914 | |||
2915 | |||
2916 | /* | ||
2917 | * Fcoe stat context $$KEEP_ENDIANNESS$$ | ||
2918 | */ | ||
2919 | struct fcoe_s_stat_ctx { | ||
2920 | u8 flags; | ||
2921 | #define FCOE_S_STAT_CTX_ACTIVE (0x1<<0) | ||
2922 | #define FCOE_S_STAT_CTX_ACTIVE_SHIFT 0 | ||
2923 | #define FCOE_S_STAT_CTX_ACK_ABORT_SEQ_COND (0x1<<1) | ||
2924 | #define FCOE_S_STAT_CTX_ACK_ABORT_SEQ_COND_SHIFT 1 | ||
2925 | #define FCOE_S_STAT_CTX_ABTS_PERFORMED (0x1<<2) | ||
2926 | #define FCOE_S_STAT_CTX_ABTS_PERFORMED_SHIFT 2 | ||
2927 | #define FCOE_S_STAT_CTX_SEQ_TIMEOUT (0x1<<3) | ||
2928 | #define FCOE_S_STAT_CTX_SEQ_TIMEOUT_SHIFT 3 | ||
2929 | #define FCOE_S_STAT_CTX_P_RJT (0x1<<4) | ||
2930 | #define FCOE_S_STAT_CTX_P_RJT_SHIFT 4 | ||
2931 | #define FCOE_S_STAT_CTX_ACK_EOFT (0x1<<5) | ||
2932 | #define FCOE_S_STAT_CTX_ACK_EOFT_SHIFT 5 | ||
2933 | #define FCOE_S_STAT_CTX_RSRV1 (0x3<<6) | ||
2934 | #define FCOE_S_STAT_CTX_RSRV1_SHIFT 6 | ||
2935 | }; | ||
2936 | |||
2937 | /* | ||
2938 | * Fcoe rx seq context $$KEEP_ENDIANNESS$$ | ||
2939 | */ | ||
2940 | struct fcoe_rx_seq_ctx { | ||
2941 | u8 seq_id; | ||
2942 | struct fcoe_s_stat_ctx s_stat; | ||
2943 | __le16 seq_cnt; | ||
2944 | __le32 low_exp_ro; | ||
2945 | __le32 high_exp_ro; | ||
2946 | }; | ||
2947 | |||
2948 | |||
2949 | /* | ||
2950 | * Fcoe rx_wr union context $$KEEP_ENDIANNESS$$ | ||
2951 | */ | ||
2952 | union fcoe_rx_wr_union_ctx { | ||
2953 | struct fcoe_read_flow_info read_info; | ||
2954 | union fcoe_comp_flow_info comp_info; | ||
2955 | __le32 opaque[8]; | ||
2956 | }; | ||
2957 | |||
2958 | |||
2959 | |||
2960 | /* | ||
2961 | * FCoE SQ element $$KEEP_ENDIANNESS$$ | ||
2962 | */ | ||
2963 | struct fcoe_sqe { | ||
2964 | __le16 wqe; | ||
2965 | #define FCOE_SQE_TASK_ID (0x7FFF<<0) | ||
2966 | #define FCOE_SQE_TASK_ID_SHIFT 0 | ||
2967 | #define FCOE_SQE_TOGGLE_BIT (0x1<<15) | ||
2968 | #define FCOE_SQE_TOGGLE_BIT_SHIFT 15 | ||
2969 | }; | ||
2970 | |||
2971 | |||
2972 | |||
2973 | /* | ||
2974 | * 14 regs $$KEEP_ENDIANNESS$$ | ||
2975 | */ | ||
2976 | struct fcoe_tce_tx_only { | ||
2977 | union fcoe_sgl_union_ctx sgl_ctx; | ||
2978 | __le32 rsrv0; | ||
2979 | }; | ||
2980 | |||
2981 | /* | ||
2982 | * 32 bytes (8 regs) used for TX only purposes $$KEEP_ENDIANNESS$$ | ||
2983 | */ | ||
2984 | union fcoe_tx_wr_rx_rd_union_ctx { | ||
2985 | struct fcoe_fc_frame tx_frame; | ||
2986 | struct fcoe_fcp_cmd_payload fcp_cmd; | ||
2987 | struct fcoe_ext_cleanup_info cleanup; | ||
2988 | struct fcoe_ext_abts_info abts; | ||
2989 | struct fcoe_ext_fw_tx_seq_ctx tx_seq; | ||
2990 | __le32 opaque[8]; | ||
2991 | }; | ||
2992 | |||
2993 | /* | ||
2994 | * tce_tx_wr_rx_rd_const $$KEEP_ENDIANNESS$$ | ||
2995 | */ | ||
2996 | struct fcoe_tce_tx_wr_rx_rd_const { | ||
2997 | u8 init_flags; | ||
2998 | #define FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE (0x7<<0) | ||
2999 | #define FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT 0 | ||
3000 | #define FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE (0x1<<3) | ||
3001 | #define FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT 3 | ||
3002 | #define FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE (0x1<<4) | ||
3003 | #define FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE_SHIFT 4 | ||
3004 | #define FCOE_TCE_TX_WR_RX_RD_CONST_CACHED_SGE (0x3<<5) | ||
3005 | #define FCOE_TCE_TX_WR_RX_RD_CONST_CACHED_SGE_SHIFT 5 | ||
3006 | #define FCOE_TCE_TX_WR_RX_RD_CONST_SUPPORT_REC_TOV (0x1<<7) | ||
3007 | #define FCOE_TCE_TX_WR_RX_RD_CONST_SUPPORT_REC_TOV_SHIFT 7 | ||
3008 | u8 tx_flags; | ||
3009 | #define FCOE_TCE_TX_WR_RX_RD_CONST_TX_VALID (0x1<<0) | ||
3010 | #define FCOE_TCE_TX_WR_RX_RD_CONST_TX_VALID_SHIFT 0 | ||
3011 | #define FCOE_TCE_TX_WR_RX_RD_CONST_TX_STATE (0xF<<1) | ||
3012 | #define FCOE_TCE_TX_WR_RX_RD_CONST_TX_STATE_SHIFT 1 | ||
3013 | #define FCOE_TCE_TX_WR_RX_RD_CONST_RSRV1 (0x1<<5) | ||
3014 | #define FCOE_TCE_TX_WR_RX_RD_CONST_RSRV1_SHIFT 5 | ||
3015 | #define FCOE_TCE_TX_WR_RX_RD_CONST_TX_SEQ_INIT (0x1<<6) | ||
3016 | #define FCOE_TCE_TX_WR_RX_RD_CONST_TX_SEQ_INIT_SHIFT 6 | ||
3017 | #define FCOE_TCE_TX_WR_RX_RD_CONST_RSRV2 (0x1<<7) | ||
3018 | #define FCOE_TCE_TX_WR_RX_RD_CONST_RSRV2_SHIFT 7 | ||
3019 | __le16 rsrv3; | ||
3020 | __le32 verify_tx_seq; | ||
3021 | }; | ||
3022 | |||
3023 | /* | ||
3024 | * tce_tx_wr_rx_rd $$KEEP_ENDIANNESS$$ | ||
3025 | */ | ||
3026 | struct fcoe_tce_tx_wr_rx_rd { | ||
3027 | union fcoe_tx_wr_rx_rd_union_ctx union_ctx; | ||
3028 | struct fcoe_tce_tx_wr_rx_rd_const const_ctx; | ||
3029 | }; | ||
3030 | |||
3031 | /* | ||
3032 | * tce_rx_wr_tx_rd_const $$KEEP_ENDIANNESS$$ | ||
3033 | */ | ||
3034 | struct fcoe_tce_rx_wr_tx_rd_const { | ||
3035 | __le32 data_2_trns; | ||
3036 | __le32 init_flags; | ||
3037 | #define FCOE_TCE_RX_WR_TX_RD_CONST_CID (0xFFFFFF<<0) | ||
3038 | #define FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT 0 | ||
3039 | #define FCOE_TCE_RX_WR_TX_RD_CONST_RSRV0 (0xFF<<24) | ||
3040 | #define FCOE_TCE_RX_WR_TX_RD_CONST_RSRV0_SHIFT 24 | ||
3041 | }; | ||
3042 | |||
3043 | /* | ||
3044 | * tce_rx_wr_tx_rd_var $$KEEP_ENDIANNESS$$ | ||
3045 | */ | ||
3046 | struct fcoe_tce_rx_wr_tx_rd_var { | ||
3047 | __le16 rx_flags; | ||
3048 | #define FCOE_TCE_RX_WR_TX_RD_VAR_RSRV1 (0xF<<0) | ||
3049 | #define FCOE_TCE_RX_WR_TX_RD_VAR_RSRV1_SHIFT 0 | ||
3050 | #define FCOE_TCE_RX_WR_TX_RD_VAR_NUM_RQ_WQE (0x7<<4) | ||
3051 | #define FCOE_TCE_RX_WR_TX_RD_VAR_NUM_RQ_WQE_SHIFT 4 | ||
3052 | #define FCOE_TCE_RX_WR_TX_RD_VAR_CONF_REQ (0x1<<7) | ||
3053 | #define FCOE_TCE_RX_WR_TX_RD_VAR_CONF_REQ_SHIFT 7 | ||
3054 | #define FCOE_TCE_RX_WR_TX_RD_VAR_RX_STATE (0xF<<8) | ||
3055 | #define FCOE_TCE_RX_WR_TX_RD_VAR_RX_STATE_SHIFT 8 | ||
3056 | #define FCOE_TCE_RX_WR_TX_RD_VAR_EXP_FIRST_FRAME (0x1<<12) | ||
3057 | #define FCOE_TCE_RX_WR_TX_RD_VAR_EXP_FIRST_FRAME_SHIFT 12 | ||
3058 | #define FCOE_TCE_RX_WR_TX_RD_VAR_RX_SEQ_INIT (0x1<<13) | ||
3059 | #define FCOE_TCE_RX_WR_TX_RD_VAR_RX_SEQ_INIT_SHIFT 13 | ||
3060 | #define FCOE_TCE_RX_WR_TX_RD_VAR_RSRV2 (0x1<<14) | ||
3061 | #define FCOE_TCE_RX_WR_TX_RD_VAR_RSRV2_SHIFT 14 | ||
3062 | #define FCOE_TCE_RX_WR_TX_RD_VAR_RX_VALID (0x1<<15) | ||
3063 | #define FCOE_TCE_RX_WR_TX_RD_VAR_RX_VALID_SHIFT 15 | ||
3064 | __le16 rx_id; | ||
3065 | struct fcoe_fcp_xfr_rdy_payload fcp_xfr_rdy; | ||
3066 | }; | ||
3067 | |||
3068 | /* | ||
3069 | * tce_rx_wr_tx_rd $$KEEP_ENDIANNESS$$ | ||
3070 | */ | ||
3071 | struct fcoe_tce_rx_wr_tx_rd { | ||
3072 | struct fcoe_tce_rx_wr_tx_rd_const const_ctx; | ||
3073 | struct fcoe_tce_rx_wr_tx_rd_var var_ctx; | ||
3074 | }; | ||
3075 | |||
3076 | /* | ||
3077 | * tce_rx_only $$KEEP_ENDIANNESS$$ | ||
3078 | */ | ||
3079 | struct fcoe_tce_rx_only { | ||
3080 | struct fcoe_rx_seq_ctx rx_seq_ctx; | ||
3081 | union fcoe_rx_wr_union_ctx union_ctx; | ||
3082 | }; | ||
3083 | |||
3084 | /* | ||
3085 | * task_ctx_entry $$KEEP_ENDIANNESS$$ | ||
3086 | */ | ||
3087 | struct fcoe_task_ctx_entry { | ||
3088 | struct fcoe_tce_tx_only txwr_only; | ||
3089 | struct fcoe_tce_tx_wr_rx_rd txwr_rxrd; | ||
3090 | struct fcoe_tce_rx_wr_tx_rd rxwr_txrd; | ||
3091 | struct fcoe_tce_rx_only rxwr_only; | ||
3092 | }; | ||
3093 | |||
3094 | |||
3095 | |||
3096 | |||
3097 | |||
3098 | |||
3099 | |||
3100 | |||
3101 | |||
3102 | |||
3103 | /* | ||
3104 | * FCoE XFRQ element $$KEEP_ENDIANNESS$$ | ||
3105 | */ | ||
3106 | struct fcoe_xfrqe { | ||
3107 | __le16 wqe; | ||
3108 | #define FCOE_XFRQE_TASK_ID (0x7FFF<<0) | ||
3109 | #define FCOE_XFRQE_TASK_ID_SHIFT 0 | ||
3110 | #define FCOE_XFRQE_TOGGLE_BIT (0x1<<15) | ||
3111 | #define FCOE_XFRQE_TOGGLE_BIT_SHIFT 15 | ||
3112 | }; | ||
3113 | |||
3114 | |||
3115 | /* | ||
3116 | * Cached SGEs $$KEEP_ENDIANNESS$$ | ||
3117 | */ | ||
3118 | struct common_fcoe_sgl { | ||
3119 | struct fcoe_bd_ctx sge[3]; | ||
3120 | }; | ||
3121 | |||
3122 | |||
3123 | /* | ||
3124 | * FCoE SQ\XFRQ element | ||
3125 | */ | ||
3126 | struct fcoe_cached_wqe { | ||
3127 | struct fcoe_sqe sqe; | ||
3128 | struct fcoe_xfrqe xfrqe; | ||
3129 | }; | ||
3130 | |||
3131 | |||
3132 | /* | ||
3133 | * FCoE connection enable\disable params passed by driver to FW in FCoE enable | ||
3134 | * ramrod $$KEEP_ENDIANNESS$$ | ||
3135 | */ | ||
3136 | struct fcoe_conn_enable_disable_ramrod_params { | ||
3137 | struct fcoe_kwqe_conn_enable_disable enable_disable_kwqe; | ||
3138 | }; | ||
3139 | |||
3140 | |||
3141 | /* | ||
3142 | * FCoE connection offload params passed by driver to FW in FCoE offload ramrod | ||
3143 | * $$KEEP_ENDIANNESS$$ | ||
3144 | */ | ||
3145 | struct fcoe_conn_offload_ramrod_params { | ||
3146 | struct fcoe_kwqe_conn_offload1 offload_kwqe1; | ||
3147 | struct fcoe_kwqe_conn_offload2 offload_kwqe2; | ||
3148 | struct fcoe_kwqe_conn_offload3 offload_kwqe3; | ||
3149 | struct fcoe_kwqe_conn_offload4 offload_kwqe4; | ||
3150 | }; | ||
3151 | |||
3152 | |||
3153 | struct ustorm_fcoe_mng_ctx { | ||
3154 | #if defined(__BIG_ENDIAN) | ||
3155 | u8 mid_seq_proc_flag; | ||
3156 | u8 tce_in_cam_flag; | ||
3157 | u8 tce_on_ior_flag; | ||
3158 | u8 en_cached_tce_flag; | ||
3159 | #elif defined(__LITTLE_ENDIAN) | ||
3160 | u8 en_cached_tce_flag; | ||
3161 | u8 tce_on_ior_flag; | ||
3162 | u8 tce_in_cam_flag; | ||
3163 | u8 mid_seq_proc_flag; | ||
3164 | #endif | ||
3165 | #if defined(__BIG_ENDIAN) | ||
3166 | u8 tce_cam_addr; | ||
3167 | u8 cached_conn_flag; | ||
3168 | u16 rsrv0; | ||
3169 | #elif defined(__LITTLE_ENDIAN) | ||
3170 | u16 rsrv0; | ||
3171 | u8 cached_conn_flag; | ||
3172 | u8 tce_cam_addr; | ||
3173 | #endif | ||
3174 | #if defined(__BIG_ENDIAN) | ||
3175 | u16 dma_tce_ram_addr; | ||
3176 | u16 tce_ram_addr; | ||
3177 | #elif defined(__LITTLE_ENDIAN) | ||
3178 | u16 tce_ram_addr; | ||
3179 | u16 dma_tce_ram_addr; | ||
3180 | #endif | ||
3181 | #if defined(__BIG_ENDIAN) | ||
3182 | u16 ox_id; | ||
3183 | u16 wr_done_seq; | ||
3184 | #elif defined(__LITTLE_ENDIAN) | ||
3185 | u16 wr_done_seq; | ||
3186 | u16 ox_id; | ||
3187 | #endif | ||
3188 | struct regpair task_addr; | ||
3189 | }; | ||
3190 | |||
3191 | /* | ||
3192 | * Parameters initialized during offloaded according to FLOGI/PLOGI/PRLI and | ||
3193 | * used in FCoE context section | ||
3194 | */ | ||
3195 | struct ustorm_fcoe_params { | ||
3196 | #if defined(__BIG_ENDIAN) | ||
3197 | u16 fcoe_conn_id; | ||
3198 | u16 flags; | ||
3199 | #define USTORM_FCOE_PARAMS_B_MUL_N_PORT_IDS (0x1<<0) | ||
3200 | #define USTORM_FCOE_PARAMS_B_MUL_N_PORT_IDS_SHIFT 0 | ||
3201 | #define USTORM_FCOE_PARAMS_B_E_D_TOV_RES (0x1<<1) | ||
3202 | #define USTORM_FCOE_PARAMS_B_E_D_TOV_RES_SHIFT 1 | ||
3203 | #define USTORM_FCOE_PARAMS_B_CONT_INCR_SEQ_CNT (0x1<<2) | ||
3204 | #define USTORM_FCOE_PARAMS_B_CONT_INCR_SEQ_CNT_SHIFT 2 | ||
3205 | #define USTORM_FCOE_PARAMS_B_CONF_REQ (0x1<<3) | ||
3206 | #define USTORM_FCOE_PARAMS_B_CONF_REQ_SHIFT 3 | ||
3207 | #define USTORM_FCOE_PARAMS_B_REC_VALID (0x1<<4) | ||
3208 | #define USTORM_FCOE_PARAMS_B_REC_VALID_SHIFT 4 | ||
3209 | #define USTORM_FCOE_PARAMS_B_CQ_TOGGLE_BIT (0x1<<5) | ||
3210 | #define USTORM_FCOE_PARAMS_B_CQ_TOGGLE_BIT_SHIFT 5 | ||
3211 | #define USTORM_FCOE_PARAMS_B_XFRQ_TOGGLE_BIT (0x1<<6) | ||
3212 | #define USTORM_FCOE_PARAMS_B_XFRQ_TOGGLE_BIT_SHIFT 6 | ||
3213 | #define USTORM_FCOE_PARAMS_RSRV0 (0x1FF<<7) | ||
3214 | #define USTORM_FCOE_PARAMS_RSRV0_SHIFT 7 | ||
3215 | #elif defined(__LITTLE_ENDIAN) | ||
3216 | u16 flags; | ||
3217 | #define USTORM_FCOE_PARAMS_B_MUL_N_PORT_IDS (0x1<<0) | ||
3218 | #define USTORM_FCOE_PARAMS_B_MUL_N_PORT_IDS_SHIFT 0 | ||
3219 | #define USTORM_FCOE_PARAMS_B_E_D_TOV_RES (0x1<<1) | ||
3220 | #define USTORM_FCOE_PARAMS_B_E_D_TOV_RES_SHIFT 1 | ||
3221 | #define USTORM_FCOE_PARAMS_B_CONT_INCR_SEQ_CNT (0x1<<2) | ||
3222 | #define USTORM_FCOE_PARAMS_B_CONT_INCR_SEQ_CNT_SHIFT 2 | ||
3223 | #define USTORM_FCOE_PARAMS_B_CONF_REQ (0x1<<3) | ||
3224 | #define USTORM_FCOE_PARAMS_B_CONF_REQ_SHIFT 3 | ||
3225 | #define USTORM_FCOE_PARAMS_B_REC_VALID (0x1<<4) | ||
3226 | #define USTORM_FCOE_PARAMS_B_REC_VALID_SHIFT 4 | ||
3227 | #define USTORM_FCOE_PARAMS_B_CQ_TOGGLE_BIT (0x1<<5) | ||
3228 | #define USTORM_FCOE_PARAMS_B_CQ_TOGGLE_BIT_SHIFT 5 | ||
3229 | #define USTORM_FCOE_PARAMS_B_XFRQ_TOGGLE_BIT (0x1<<6) | ||
3230 | #define USTORM_FCOE_PARAMS_B_XFRQ_TOGGLE_BIT_SHIFT 6 | ||
3231 | #define USTORM_FCOE_PARAMS_RSRV0 (0x1FF<<7) | ||
3232 | #define USTORM_FCOE_PARAMS_RSRV0_SHIFT 7 | ||
3233 | u16 fcoe_conn_id; | ||
3234 | #endif | ||
3235 | #if defined(__BIG_ENDIAN) | ||
3236 | u8 hc_csdm_byte_en; | ||
3237 | u8 func_id; | ||
3238 | u8 port_id; | ||
3239 | u8 vnic_id; | ||
3240 | #elif defined(__LITTLE_ENDIAN) | ||
3241 | u8 vnic_id; | ||
3242 | u8 port_id; | ||
3243 | u8 func_id; | ||
3244 | u8 hc_csdm_byte_en; | ||
3245 | #endif | ||
3246 | #if defined(__BIG_ENDIAN) | ||
3247 | u16 rx_total_conc_seqs; | ||
3248 | u16 rx_max_fc_pay_len; | ||
3249 | #elif defined(__LITTLE_ENDIAN) | ||
3250 | u16 rx_max_fc_pay_len; | ||
3251 | u16 rx_total_conc_seqs; | ||
3252 | #endif | ||
3253 | #if defined(__BIG_ENDIAN) | ||
3254 | u8 task_pbe_idx_off; | ||
3255 | u8 task_in_page_log_size; | ||
3256 | u16 rx_max_conc_seqs; | ||
3257 | #elif defined(__LITTLE_ENDIAN) | ||
3258 | u16 rx_max_conc_seqs; | ||
3259 | u8 task_in_page_log_size; | ||
3260 | u8 task_pbe_idx_off; | ||
3261 | #endif | ||
3262 | }; | ||
3263 | |||
3264 | /* | ||
3265 | * FCoE 16-bits index structure | ||
3266 | */ | ||
3267 | struct fcoe_idx16_fields { | ||
3268 | u16 fields; | ||
3269 | #define FCOE_IDX16_FIELDS_IDX (0x7FFF<<0) | ||
3270 | #define FCOE_IDX16_FIELDS_IDX_SHIFT 0 | ||
3271 | #define FCOE_IDX16_FIELDS_MSB (0x1<<15) | ||
3272 | #define FCOE_IDX16_FIELDS_MSB_SHIFT 15 | ||
3273 | }; | ||
3274 | |||
3275 | /* | ||
3276 | * FCoE 16-bits index union | ||
3277 | */ | ||
3278 | union fcoe_idx16_field_union { | ||
3279 | struct fcoe_idx16_fields fields; | ||
3280 | u16 val; | ||
3281 | }; | ||
3282 | |||
3283 | /* | ||
3284 | * Parameters required for placement according to SGL | ||
3285 | */ | ||
3286 | struct ustorm_fcoe_data_place_mng { | ||
3287 | #if defined(__BIG_ENDIAN) | ||
3288 | u16 sge_off; | ||
3289 | u8 num_sges; | ||
3290 | u8 sge_idx; | ||
3291 | #elif defined(__LITTLE_ENDIAN) | ||
3292 | u8 sge_idx; | ||
3293 | u8 num_sges; | ||
3294 | u16 sge_off; | ||
3295 | #endif | ||
3296 | }; | ||
3297 | |||
3298 | /* | ||
3299 | * Parameters required for placement according to SGL | ||
3300 | */ | ||
3301 | struct ustorm_fcoe_data_place { | ||
3302 | struct ustorm_fcoe_data_place_mng cached_mng; | ||
3303 | struct fcoe_bd_ctx cached_sge[2]; | ||
3304 | }; | ||
3305 | |||
3306 | /* | ||
3307 | * TX processing shall write and RX processing shall read from this section | ||
3308 | */ | ||
3309 | union fcoe_u_tce_tx_wr_rx_rd_union { | ||
3310 | struct fcoe_abts_info abts; | ||
3311 | struct fcoe_cleanup_info cleanup; | ||
3312 | struct fcoe_fw_tx_seq_ctx tx_seq_ctx; | ||
3313 | u32 opaque[2]; | ||
3314 | }; | ||
3315 | |||
3316 | /* | ||
3317 | * TX processing shall write and RX processing shall read from this section | ||
3318 | */ | ||
3319 | struct fcoe_u_tce_tx_wr_rx_rd { | ||
3320 | union fcoe_u_tce_tx_wr_rx_rd_union union_ctx; | ||
3321 | struct fcoe_tce_tx_wr_rx_rd_const const_ctx; | ||
3322 | }; | ||
3323 | |||
3324 | struct ustorm_fcoe_tce { | ||
3325 | struct fcoe_u_tce_tx_wr_rx_rd txwr_rxrd; | ||
3326 | struct fcoe_tce_rx_wr_tx_rd rxwr_txrd; | ||
3327 | struct fcoe_tce_rx_only rxwr; | ||
3328 | }; | ||
3329 | |||
3330 | struct ustorm_fcoe_cache_ctx { | ||
3331 | u32 rsrv0; | ||
3332 | struct ustorm_fcoe_data_place data_place; | ||
3333 | struct ustorm_fcoe_tce tce; | ||
3334 | }; | ||
3335 | |||
3336 | /* | ||
3337 | * Ustorm FCoE Storm Context | ||
3338 | */ | ||
3339 | struct ustorm_fcoe_st_context { | ||
3340 | struct ustorm_fcoe_mng_ctx mng_ctx; | ||
3341 | struct ustorm_fcoe_params fcoe_params; | ||
3342 | struct regpair cq_base_addr; | ||
3343 | struct regpair rq_pbl_base; | ||
3344 | struct regpair rq_cur_page_addr; | ||
3345 | struct regpair confq_pbl_base_addr; | ||
3346 | struct regpair conn_db_base; | ||
3347 | struct regpair xfrq_base_addr; | ||
3348 | struct regpair lcq_base_addr; | ||
3349 | #if defined(__BIG_ENDIAN) | ||
3350 | union fcoe_idx16_field_union rq_cons; | ||
3351 | union fcoe_idx16_field_union rq_prod; | ||
3352 | #elif defined(__LITTLE_ENDIAN) | ||
3353 | union fcoe_idx16_field_union rq_prod; | ||
3354 | union fcoe_idx16_field_union rq_cons; | ||
3355 | #endif | ||
3356 | #if defined(__BIG_ENDIAN) | ||
3357 | u16 xfrq_prod; | ||
1895 | u16 cq_cons; | 3358 | u16 cq_cons; |
1896 | #elif defined(__LITTLE_ENDIAN) | 3359 | #elif defined(__LITTLE_ENDIAN) |
1897 | u16 cq_cons; | 3360 | u16 cq_cons; |
1898 | u16 cq_prod; | 3361 | u16 xfrq_prod; |
1899 | #endif | 3362 | #endif |
1900 | #if defined(__BIG_ENDIAN) | 3363 | #if defined(__BIG_ENDIAN) |
1901 | u16 __reserved2; | 3364 | u16 lcq_cons; |
1902 | u8 decision_rules; | 3365 | u16 hc_cram_address; |
1903 | #define USTORM_FCOE_AG_CONTEXT_CQ_DEC_RULE (0x7<<0) | ||
1904 | #define USTORM_FCOE_AG_CONTEXT_CQ_DEC_RULE_SHIFT 0 | ||
1905 | #define __USTORM_FCOE_AG_CONTEXT_AGG_VAL3_RULE (0x7<<3) | ||
1906 | #define __USTORM_FCOE_AG_CONTEXT_AGG_VAL3_RULE_SHIFT 3 | ||
1907 | #define USTORM_FCOE_AG_CONTEXT_CQ_ARM_N_FLAG (0x1<<6) | ||
1908 | #define USTORM_FCOE_AG_CONTEXT_CQ_ARM_N_FLAG_SHIFT 6 | ||
1909 | #define __USTORM_FCOE_AG_CONTEXT_RESERVED1 (0x1<<7) | ||
1910 | #define __USTORM_FCOE_AG_CONTEXT_RESERVED1_SHIFT 7 | ||
1911 | u8 decision_rule_enable_bits; | ||
1912 | #define __USTORM_FCOE_AG_CONTEXT_RESERVED_INV_CF_EN (0x1<<0) | ||
1913 | #define __USTORM_FCOE_AG_CONTEXT_RESERVED_INV_CF_EN_SHIFT 0 | ||
1914 | #define USTORM_FCOE_AG_CONTEXT_COMPLETION_CF_EN (0x1<<1) | ||
1915 | #define USTORM_FCOE_AG_CONTEXT_COMPLETION_CF_EN_SHIFT 1 | ||
1916 | #define USTORM_FCOE_AG_CONTEXT_TX_CF_EN (0x1<<2) | ||
1917 | #define USTORM_FCOE_AG_CONTEXT_TX_CF_EN_SHIFT 2 | ||
1918 | #define __USTORM_FCOE_AG_CONTEXT_TIMER_CF_EN (0x1<<3) | ||
1919 | #define __USTORM_FCOE_AG_CONTEXT_TIMER_CF_EN_SHIFT 3 | ||
1920 | #define __USTORM_FCOE_AG_CONTEXT_AUX1_CF_EN (0x1<<4) | ||
1921 | #define __USTORM_FCOE_AG_CONTEXT_AUX1_CF_EN_SHIFT 4 | ||
1922 | #define __USTORM_FCOE_AG_CONTEXT_QUEUE0_CF_EN (0x1<<5) | ||
1923 | #define __USTORM_FCOE_AG_CONTEXT_QUEUE0_CF_EN_SHIFT 5 | ||
1924 | #define __USTORM_FCOE_AG_CONTEXT_AUX3_CF_EN (0x1<<6) | ||
1925 | #define __USTORM_FCOE_AG_CONTEXT_AUX3_CF_EN_SHIFT 6 | ||
1926 | #define __USTORM_FCOE_AG_CONTEXT_DQ_CF_EN (0x1<<7) | ||
1927 | #define __USTORM_FCOE_AG_CONTEXT_DQ_CF_EN_SHIFT 7 | ||
1928 | #elif defined(__LITTLE_ENDIAN) | 3366 | #elif defined(__LITTLE_ENDIAN) |
1929 | u8 decision_rule_enable_bits; | 3367 | u16 hc_cram_address; |
1930 | #define __USTORM_FCOE_AG_CONTEXT_RESERVED_INV_CF_EN (0x1<<0) | 3368 | u16 lcq_cons; |
1931 | #define __USTORM_FCOE_AG_CONTEXT_RESERVED_INV_CF_EN_SHIFT 0 | 3369 | #endif |
1932 | #define USTORM_FCOE_AG_CONTEXT_COMPLETION_CF_EN (0x1<<1) | 3370 | #if defined(__BIG_ENDIAN) |
1933 | #define USTORM_FCOE_AG_CONTEXT_COMPLETION_CF_EN_SHIFT 1 | 3371 | u16 sq_xfrq_lcq_confq_size; |
1934 | #define USTORM_FCOE_AG_CONTEXT_TX_CF_EN (0x1<<2) | 3372 | u16 confq_prod; |
1935 | #define USTORM_FCOE_AG_CONTEXT_TX_CF_EN_SHIFT 2 | 3373 | #elif defined(__LITTLE_ENDIAN) |
1936 | #define __USTORM_FCOE_AG_CONTEXT_TIMER_CF_EN (0x1<<3) | 3374 | u16 confq_prod; |
1937 | #define __USTORM_FCOE_AG_CONTEXT_TIMER_CF_EN_SHIFT 3 | 3375 | u16 sq_xfrq_lcq_confq_size; |
1938 | #define __USTORM_FCOE_AG_CONTEXT_AUX1_CF_EN (0x1<<4) | 3376 | #endif |
1939 | #define __USTORM_FCOE_AG_CONTEXT_AUX1_CF_EN_SHIFT 4 | 3377 | #if defined(__BIG_ENDIAN) |
1940 | #define __USTORM_FCOE_AG_CONTEXT_QUEUE0_CF_EN (0x1<<5) | 3378 | u8 hc_csdm_agg_int; |
1941 | #define __USTORM_FCOE_AG_CONTEXT_QUEUE0_CF_EN_SHIFT 5 | 3379 | u8 rsrv2; |
1942 | #define __USTORM_FCOE_AG_CONTEXT_AUX3_CF_EN (0x1<<6) | 3380 | u8 available_rqes; |
1943 | #define __USTORM_FCOE_AG_CONTEXT_AUX3_CF_EN_SHIFT 6 | 3381 | u8 sp_q_flush_cnt; |
1944 | #define __USTORM_FCOE_AG_CONTEXT_DQ_CF_EN (0x1<<7) | 3382 | #elif defined(__LITTLE_ENDIAN) |
1945 | #define __USTORM_FCOE_AG_CONTEXT_DQ_CF_EN_SHIFT 7 | 3383 | u8 sp_q_flush_cnt; |
1946 | u8 decision_rules; | 3384 | u8 available_rqes; |
1947 | #define USTORM_FCOE_AG_CONTEXT_CQ_DEC_RULE (0x7<<0) | 3385 | u8 rsrv2; |
1948 | #define USTORM_FCOE_AG_CONTEXT_CQ_DEC_RULE_SHIFT 0 | 3386 | u8 hc_csdm_agg_int; |
1949 | #define __USTORM_FCOE_AG_CONTEXT_AGG_VAL3_RULE (0x7<<3) | ||
1950 | #define __USTORM_FCOE_AG_CONTEXT_AGG_VAL3_RULE_SHIFT 3 | ||
1951 | #define USTORM_FCOE_AG_CONTEXT_CQ_ARM_N_FLAG (0x1<<6) | ||
1952 | #define USTORM_FCOE_AG_CONTEXT_CQ_ARM_N_FLAG_SHIFT 6 | ||
1953 | #define __USTORM_FCOE_AG_CONTEXT_RESERVED1 (0x1<<7) | ||
1954 | #define __USTORM_FCOE_AG_CONTEXT_RESERVED1_SHIFT 7 | ||
1955 | u16 __reserved2; | ||
1956 | #endif | 3387 | #endif |
3388 | #if defined(__BIG_ENDIAN) | ||
3389 | u16 num_pend_tasks; | ||
3390 | u16 pbf_ack_ram_addr; | ||
3391 | #elif defined(__LITTLE_ENDIAN) | ||
3392 | u16 pbf_ack_ram_addr; | ||
3393 | u16 num_pend_tasks; | ||
3394 | #endif | ||
3395 | struct ustorm_fcoe_cache_ctx cache_ctx; | ||
3396 | }; | ||
3397 | |||
3398 | /* | ||
3399 | * The FCoE non-aggregative context of Tstorm | ||
3400 | */ | ||
3401 | struct tstorm_fcoe_st_context { | ||
3402 | struct regpair reserved0; | ||
3403 | struct regpair reserved1; | ||
1957 | }; | 3404 | }; |
1958 | 3405 | ||
1959 | /* | 3406 | /* |
@@ -2023,86 +3470,106 @@ struct xstorm_fcoe_context_flags { | |||
2023 | #define XSTORM_FCOE_CONTEXT_FLAGS_B_PROC_Q_SHIFT 0 | 3470 | #define XSTORM_FCOE_CONTEXT_FLAGS_B_PROC_Q_SHIFT 0 |
2024 | #define XSTORM_FCOE_CONTEXT_FLAGS_B_MID_SEQ (0x1<<2) | 3471 | #define XSTORM_FCOE_CONTEXT_FLAGS_B_MID_SEQ (0x1<<2) |
2025 | #define XSTORM_FCOE_CONTEXT_FLAGS_B_MID_SEQ_SHIFT 2 | 3472 | #define XSTORM_FCOE_CONTEXT_FLAGS_B_MID_SEQ_SHIFT 2 |
2026 | #define XSTORM_FCOE_CONTEXT_FLAGS_B_EXCHANGE_CLEANUP_DEFFERED (0x1<<3) | 3473 | #define XSTORM_FCOE_CONTEXT_FLAGS_B_BLOCK_SQ (0x1<<3) |
2027 | #define XSTORM_FCOE_CONTEXT_FLAGS_B_EXCHANGE_CLEANUP_DEFFERED_SHIFT 3 | 3474 | #define XSTORM_FCOE_CONTEXT_FLAGS_B_BLOCK_SQ_SHIFT 3 |
2028 | #define XSTORM_FCOE_CONTEXT_FLAGS_B_REC_SUPPORT (0x1<<4) | 3475 | #define XSTORM_FCOE_CONTEXT_FLAGS_B_REC_SUPPORT (0x1<<4) |
2029 | #define XSTORM_FCOE_CONTEXT_FLAGS_B_REC_SUPPORT_SHIFT 4 | 3476 | #define XSTORM_FCOE_CONTEXT_FLAGS_B_REC_SUPPORT_SHIFT 4 |
2030 | #define XSTORM_FCOE_CONTEXT_FLAGS_B_SQ_TOGGLE (0x1<<5) | 3477 | #define XSTORM_FCOE_CONTEXT_FLAGS_B_SQ_TOGGLE (0x1<<5) |
2031 | #define XSTORM_FCOE_CONTEXT_FLAGS_B_SQ_TOGGLE_SHIFT 5 | 3478 | #define XSTORM_FCOE_CONTEXT_FLAGS_B_SQ_TOGGLE_SHIFT 5 |
2032 | #define XSTORM_FCOE_CONTEXT_FLAGS_B_XFRQ_TOGGLE (0x1<<6) | 3479 | #define XSTORM_FCOE_CONTEXT_FLAGS_B_XFRQ_TOGGLE (0x1<<6) |
2033 | #define XSTORM_FCOE_CONTEXT_FLAGS_B_XFRQ_TOGGLE_SHIFT 6 | 3480 | #define XSTORM_FCOE_CONTEXT_FLAGS_B_XFRQ_TOGGLE_SHIFT 6 |
2034 | #define XSTORM_FCOE_CONTEXT_FLAGS_B_ABTS_DEFFERED (0x1<<7) | 3481 | #define XSTORM_FCOE_CONTEXT_FLAGS_B_VNTAG_VLAN (0x1<<7) |
2035 | #define XSTORM_FCOE_CONTEXT_FLAGS_B_ABTS_DEFFERED_SHIFT 7 | 3482 | #define XSTORM_FCOE_CONTEXT_FLAGS_B_VNTAG_VLAN_SHIFT 7 |
2036 | }; | 3483 | }; |
2037 | 3484 | ||
2038 | /* | 3485 | struct xstorm_fcoe_tce { |
2039 | * FCoE SQ element | 3486 | struct fcoe_tce_tx_only txwr; |
2040 | */ | 3487 | struct fcoe_tce_tx_wr_rx_rd txwr_rxrd; |
2041 | struct fcoe_sqe { | ||
2042 | u16 wqe; | ||
2043 | #define FCOE_SQE_TASK_ID (0x7FFF<<0) | ||
2044 | #define FCOE_SQE_TASK_ID_SHIFT 0 | ||
2045 | #define FCOE_SQE_TOGGLE_BIT (0x1<<15) | ||
2046 | #define FCOE_SQE_TOGGLE_BIT_SHIFT 15 | ||
2047 | }; | ||
2048 | |||
2049 | /* | ||
2050 | * FCoE XFRQ element | ||
2051 | */ | ||
2052 | struct fcoe_xfrqe { | ||
2053 | u16 wqe; | ||
2054 | #define FCOE_XFRQE_TASK_ID (0x7FFF<<0) | ||
2055 | #define FCOE_XFRQE_TASK_ID_SHIFT 0 | ||
2056 | #define FCOE_XFRQE_TOGGLE_BIT (0x1<<15) | ||
2057 | #define FCOE_XFRQE_TOGGLE_BIT_SHIFT 15 | ||
2058 | }; | 3488 | }; |
2059 | 3489 | ||
2060 | /* | 3490 | /* |
2061 | * FCoE SQ\XFRQ element | 3491 | * FCP_DATA parameters required for transmission |
2062 | */ | 3492 | */ |
2063 | struct fcoe_cached_wqe { | 3493 | struct xstorm_fcoe_fcp_data { |
3494 | u32 io_rem; | ||
2064 | #if defined(__BIG_ENDIAN) | 3495 | #if defined(__BIG_ENDIAN) |
2065 | struct fcoe_xfrqe xfrqe; | 3496 | u16 cached_sge_off; |
2066 | struct fcoe_sqe sqe; | 3497 | u8 cached_num_sges; |
3498 | u8 cached_sge_idx; | ||
2067 | #elif defined(__LITTLE_ENDIAN) | 3499 | #elif defined(__LITTLE_ENDIAN) |
2068 | struct fcoe_sqe sqe; | 3500 | u8 cached_sge_idx; |
2069 | struct fcoe_xfrqe xfrqe; | 3501 | u8 cached_num_sges; |
3502 | u16 cached_sge_off; | ||
3503 | #endif | ||
3504 | u32 buf_addr_hi_0; | ||
3505 | u32 buf_addr_lo_0; | ||
3506 | #if defined(__BIG_ENDIAN) | ||
3507 | u16 num_of_pending_tasks; | ||
3508 | u16 buf_len_0; | ||
3509 | #elif defined(__LITTLE_ENDIAN) | ||
3510 | u16 buf_len_0; | ||
3511 | u16 num_of_pending_tasks; | ||
3512 | #endif | ||
3513 | u32 buf_addr_hi_1; | ||
3514 | u32 buf_addr_lo_1; | ||
3515 | #if defined(__BIG_ENDIAN) | ||
3516 | u16 task_pbe_idx_off; | ||
3517 | u16 buf_len_1; | ||
3518 | #elif defined(__LITTLE_ENDIAN) | ||
3519 | u16 buf_len_1; | ||
3520 | u16 task_pbe_idx_off; | ||
3521 | #endif | ||
3522 | u32 buf_addr_hi_2; | ||
3523 | u32 buf_addr_lo_2; | ||
3524 | #if defined(__BIG_ENDIAN) | ||
3525 | u16 ox_id; | ||
3526 | u16 buf_len_2; | ||
3527 | #elif defined(__LITTLE_ENDIAN) | ||
3528 | u16 buf_len_2; | ||
3529 | u16 ox_id; | ||
2070 | #endif | 3530 | #endif |
2071 | }; | 3531 | }; |
2072 | 3532 | ||
2073 | struct fcoe_task_ctx_entry_tx_only { | 3533 | /* |
2074 | union fcoe_sgl_ctx sgl_ctx; | 3534 | * vlan configuration |
3535 | */ | ||
3536 | struct xstorm_fcoe_vlan_conf { | ||
3537 | u8 vlan_conf; | ||
3538 | #define XSTORM_FCOE_VLAN_CONF_PRIORITY (0x7<<0) | ||
3539 | #define XSTORM_FCOE_VLAN_CONF_PRIORITY_SHIFT 0 | ||
3540 | #define XSTORM_FCOE_VLAN_CONF_INNER_VLAN_FLAG (0x1<<3) | ||
3541 | #define XSTORM_FCOE_VLAN_CONF_INNER_VLAN_FLAG_SHIFT 3 | ||
3542 | #define XSTORM_FCOE_VLAN_CONF_RESERVED (0xF<<4) | ||
3543 | #define XSTORM_FCOE_VLAN_CONF_RESERVED_SHIFT 4 | ||
2075 | }; | 3544 | }; |
2076 | 3545 | ||
2077 | struct xstorm_fcoe_task_ctx_entry_rd { | 3546 | /* |
2078 | struct fcoe_task_ctx_entry_tx_only tx_wr; | 3547 | * FCoE 16-bits vlan structure |
2079 | struct fcoe_task_ctx_entry_txwr_rxrd tx_wr_rx_rd; | 3548 | */ |
2080 | struct fcoe_task_ctx_entry_tx_rx_cmn cmn; | 3549 | struct fcoe_vlan_fields { |
2081 | struct fcoe_task_ctx_entry_rxwr_txrd rx_wr_tx_rd; | 3550 | u16 fields; |
3551 | #define FCOE_VLAN_FIELDS_VID (0xFFF<<0) | ||
3552 | #define FCOE_VLAN_FIELDS_VID_SHIFT 0 | ||
3553 | #define FCOE_VLAN_FIELDS_CLI (0x1<<12) | ||
3554 | #define FCOE_VLAN_FIELDS_CLI_SHIFT 12 | ||
3555 | #define FCOE_VLAN_FIELDS_PRI (0x7<<13) | ||
3556 | #define FCOE_VLAN_FIELDS_PRI_SHIFT 13 | ||
2082 | }; | 3557 | }; |
2083 | 3558 | ||
2084 | /* | 3559 | /* |
2085 | * Cached SGEs | 3560 | * FCoE 16-bits vlan union |
2086 | */ | 3561 | */ |
2087 | struct common_fcoe_sgl { | 3562 | union fcoe_vlan_field_union { |
2088 | struct fcoe_bd_ctx sge[2]; | 3563 | struct fcoe_vlan_fields fields; |
3564 | u16 val; | ||
2089 | }; | 3565 | }; |
2090 | 3566 | ||
2091 | /* | 3567 | /* |
2092 | * FCP_DATA parameters required for transmission | 3568 | * FCoE 16-bits vlan, vif union |
2093 | */ | 3569 | */ |
2094 | struct xstorm_fcoe_fcp_data { | 3570 | union fcoe_vlan_vif_field_union { |
2095 | u32 io_rem; | 3571 | union fcoe_vlan_field_union vlan; |
2096 | #if defined(__BIG_ENDIAN) | 3572 | u16 vif; |
2097 | u16 cached_sge_off; | ||
2098 | u8 cached_num_sges; | ||
2099 | u8 cached_sge_idx; | ||
2100 | #elif defined(__LITTLE_ENDIAN) | ||
2101 | u8 cached_sge_idx; | ||
2102 | u8 cached_num_sges; | ||
2103 | u16 cached_sge_off; | ||
2104 | #endif | ||
2105 | struct common_fcoe_sgl cached_sgl; | ||
2106 | }; | 3573 | }; |
2107 | 3574 | ||
2108 | /* | 3575 | /* |
@@ -2110,18 +3577,18 @@ struct xstorm_fcoe_fcp_data { | |||
2110 | */ | 3577 | */ |
2111 | struct xstorm_fcoe_context_section { | 3578 | struct xstorm_fcoe_context_section { |
2112 | #if defined(__BIG_ENDIAN) | 3579 | #if defined(__BIG_ENDIAN) |
2113 | u8 vlan_flag; | 3580 | u8 cs_ctl; |
2114 | u8 s_id[3]; | 3581 | u8 s_id[3]; |
2115 | #elif defined(__LITTLE_ENDIAN) | 3582 | #elif defined(__LITTLE_ENDIAN) |
2116 | u8 s_id[3]; | 3583 | u8 s_id[3]; |
2117 | u8 vlan_flag; | 3584 | u8 cs_ctl; |
2118 | #endif | 3585 | #endif |
2119 | #if defined(__BIG_ENDIAN) | 3586 | #if defined(__BIG_ENDIAN) |
2120 | u8 func_id; | 3587 | u8 rctl; |
2121 | u8 d_id[3]; | 3588 | u8 d_id[3]; |
2122 | #elif defined(__LITTLE_ENDIAN) | 3589 | #elif defined(__LITTLE_ENDIAN) |
2123 | u8 d_id[3]; | 3590 | u8 d_id[3]; |
2124 | u8 func_id; | 3591 | u8 rctl; |
2125 | #endif | 3592 | #endif |
2126 | #if defined(__BIG_ENDIAN) | 3593 | #if defined(__BIG_ENDIAN) |
2127 | u16 sq_xfrq_lcq_confq_size; | 3594 | u16 sq_xfrq_lcq_confq_size; |
@@ -2133,56 +3600,84 @@ struct xstorm_fcoe_context_section { | |||
2133 | u32 lcq_prod; | 3600 | u32 lcq_prod; |
2134 | #if defined(__BIG_ENDIAN) | 3601 | #if defined(__BIG_ENDIAN) |
2135 | u8 port_id; | 3602 | u8 port_id; |
2136 | u8 tx_max_conc_seqs_c3; | 3603 | u8 func_id; |
2137 | u8 seq_id; | 3604 | u8 seq_id; |
2138 | struct xstorm_fcoe_context_flags tx_flags; | 3605 | struct xstorm_fcoe_context_flags tx_flags; |
2139 | #elif defined(__LITTLE_ENDIAN) | 3606 | #elif defined(__LITTLE_ENDIAN) |
2140 | struct xstorm_fcoe_context_flags tx_flags; | 3607 | struct xstorm_fcoe_context_flags tx_flags; |
2141 | u8 seq_id; | 3608 | u8 seq_id; |
2142 | u8 tx_max_conc_seqs_c3; | 3609 | u8 func_id; |
2143 | u8 port_id; | 3610 | u8 port_id; |
2144 | #endif | 3611 | #endif |
2145 | #if defined(__BIG_ENDIAN) | 3612 | #if defined(__BIG_ENDIAN) |
2146 | u16 verify_tx_seq; | 3613 | u16 mtu; |
2147 | u8 func_mode; | 3614 | u8 func_mode; |
2148 | u8 vnic_id; | 3615 | u8 vnic_id; |
2149 | #elif defined(__LITTLE_ENDIAN) | 3616 | #elif defined(__LITTLE_ENDIAN) |
2150 | u8 vnic_id; | 3617 | u8 vnic_id; |
2151 | u8 func_mode; | 3618 | u8 func_mode; |
2152 | u16 verify_tx_seq; | 3619 | u16 mtu; |
2153 | #endif | 3620 | #endif |
2154 | struct regpair confq_curr_page_addr; | 3621 | struct regpair confq_curr_page_addr; |
2155 | struct fcoe_cached_wqe cached_wqe[8]; | 3622 | struct fcoe_cached_wqe cached_wqe[8]; |
2156 | struct regpair lcq_base_addr; | 3623 | struct regpair lcq_base_addr; |
2157 | struct xstorm_fcoe_task_ctx_entry_rd tce; | 3624 | struct xstorm_fcoe_tce tce; |
2158 | struct xstorm_fcoe_fcp_data fcp_data; | 3625 | struct xstorm_fcoe_fcp_data fcp_data; |
2159 | #if defined(__BIG_ENDIAN) | 3626 | #if defined(__BIG_ENDIAN) |
3627 | u8 tx_max_conc_seqs_c3; | ||
3628 | u8 vlan_flag; | ||
3629 | u8 dcb_val; | ||
3630 | u8 data_pb_cmd_size; | ||
3631 | #elif defined(__LITTLE_ENDIAN) | ||
3632 | u8 data_pb_cmd_size; | ||
3633 | u8 dcb_val; | ||
3634 | u8 vlan_flag; | ||
3635 | u8 tx_max_conc_seqs_c3; | ||
3636 | #endif | ||
3637 | #if defined(__BIG_ENDIAN) | ||
2160 | u16 fcoe_tx_stat_params_ram_addr; | 3638 | u16 fcoe_tx_stat_params_ram_addr; |
2161 | u16 cmng_port_ram_addr; | 3639 | u16 fcoe_tx_fc_seq_ram_addr; |
2162 | #elif defined(__LITTLE_ENDIAN) | 3640 | #elif defined(__LITTLE_ENDIAN) |
2163 | u16 cmng_port_ram_addr; | 3641 | u16 fcoe_tx_fc_seq_ram_addr; |
2164 | u16 fcoe_tx_stat_params_ram_addr; | 3642 | u16 fcoe_tx_stat_params_ram_addr; |
2165 | #endif | 3643 | #endif |
2166 | #if defined(__BIG_ENDIAN) | 3644 | #if defined(__BIG_ENDIAN) |
2167 | u8 fcp_cmd_pb_cmd_size; | 3645 | u8 fcp_cmd_line_credit; |
2168 | u8 eth_hdr_size; | 3646 | u8 eth_hdr_size; |
2169 | u16 pbf_addr; | 3647 | u16 pbf_addr; |
2170 | #elif defined(__LITTLE_ENDIAN) | 3648 | #elif defined(__LITTLE_ENDIAN) |
2171 | u16 pbf_addr; | 3649 | u16 pbf_addr; |
2172 | u8 eth_hdr_size; | 3650 | u8 eth_hdr_size; |
2173 | u8 fcp_cmd_pb_cmd_size; | 3651 | u8 fcp_cmd_line_credit; |
2174 | #endif | 3652 | #endif |
2175 | #if defined(__BIG_ENDIAN) | 3653 | #if defined(__BIG_ENDIAN) |
2176 | u8 reserved2[2]; | 3654 | union fcoe_vlan_vif_field_union multi_func_val; |
3655 | u8 page_log_size; | ||
3656 | struct xstorm_fcoe_vlan_conf orig_vlan_conf; | ||
3657 | #elif defined(__LITTLE_ENDIAN) | ||
3658 | struct xstorm_fcoe_vlan_conf orig_vlan_conf; | ||
3659 | u8 page_log_size; | ||
3660 | union fcoe_vlan_vif_field_union multi_func_val; | ||
3661 | #endif | ||
3662 | #if defined(__BIG_ENDIAN) | ||
3663 | u16 fcp_cmd_frame_size; | ||
3664 | u16 pbf_addr_ff; | ||
3665 | #elif defined(__LITTLE_ENDIAN) | ||
3666 | u16 pbf_addr_ff; | ||
3667 | u16 fcp_cmd_frame_size; | ||
3668 | #endif | ||
3669 | #if defined(__BIG_ENDIAN) | ||
3670 | u8 vlan_num; | ||
2177 | u8 cos; | 3671 | u8 cos; |
2178 | u8 dcb_version; | 3672 | u8 cache_xfrq_cons; |
3673 | u8 cache_sq_cons; | ||
2179 | #elif defined(__LITTLE_ENDIAN) | 3674 | #elif defined(__LITTLE_ENDIAN) |
2180 | u8 dcb_version; | 3675 | u8 cache_sq_cons; |
3676 | u8 cache_xfrq_cons; | ||
2181 | u8 cos; | 3677 | u8 cos; |
2182 | u8 reserved2[2]; | 3678 | u8 vlan_num; |
2183 | #endif | 3679 | #endif |
2184 | u32 reserved3; | 3680 | u32 verify_tx_seq; |
2185 | struct regpair reserved4[2]; | ||
2186 | }; | 3681 | }; |
2187 | 3682 | ||
2188 | /* | 3683 | /* |
@@ -2207,6 +3702,181 @@ struct fcoe_context { | |||
2207 | }; | 3702 | }; |
2208 | 3703 | ||
2209 | /* | 3704 | /* |
3705 | * FCoE init params passed by driver to FW in FCoE init ramrod | ||
3706 | * $$KEEP_ENDIANNESS$$ | ||
3707 | */ | ||
3708 | struct fcoe_init_ramrod_params { | ||
3709 | struct fcoe_kwqe_init1 init_kwqe1; | ||
3710 | struct fcoe_kwqe_init2 init_kwqe2; | ||
3711 | struct fcoe_kwqe_init3 init_kwqe3; | ||
3712 | struct regpair eq_pbl_base; | ||
3713 | __le32 eq_pbl_size; | ||
3714 | __le32 reserved2; | ||
3715 | __le16 eq_prod; | ||
3716 | __le16 sb_num; | ||
3717 | u8 sb_id; | ||
3718 | u8 reserved0; | ||
3719 | __le16 reserved1; | ||
3720 | }; | ||
3721 | |||
3722 | /* | ||
3723 | * FCoE statistics params buffer passed by driver to FW in FCoE statistics | ||
3724 | * ramrod $$KEEP_ENDIANNESS$$ | ||
3725 | */ | ||
3726 | struct fcoe_stat_ramrod_params { | ||
3727 | struct fcoe_kwqe_stat stat_kwqe; | ||
3728 | }; | ||
3729 | |||
3730 | /* | ||
3731 | * CQ DB CQ producer and pending completion counter | ||
3732 | */ | ||
3733 | struct iscsi_cq_db_prod_pnd_cmpltn_cnt { | ||
3734 | #if defined(__BIG_ENDIAN) | ||
3735 | u16 cntr; | ||
3736 | u16 prod; | ||
3737 | #elif defined(__LITTLE_ENDIAN) | ||
3738 | u16 prod; | ||
3739 | u16 cntr; | ||
3740 | #endif | ||
3741 | }; | ||
3742 | |||
3743 | /* | ||
3744 | * CQ DB pending completion ITT array | ||
3745 | */ | ||
3746 | struct iscsi_cq_db_prod_pnd_cmpltn_cnt_arr { | ||
3747 | struct iscsi_cq_db_prod_pnd_cmpltn_cnt prod_pend_comp[8]; | ||
3748 | }; | ||
3749 | |||
3750 | /* | ||
3751 | * Cstorm CQ sequence to notify array, updated by driver | ||
3752 | */ | ||
3753 | struct iscsi_cq_db_sqn_2_notify_arr { | ||
3754 | u16 sqn[8]; | ||
3755 | }; | ||
3756 | |||
3757 | /* | ||
3758 | * Cstorm iSCSI Storm Context | ||
3759 | */ | ||
3760 | struct cstorm_iscsi_st_context { | ||
3761 | struct iscsi_cq_db_prod_pnd_cmpltn_cnt_arr cq_c_prod_pend_comp_ctr_arr; | ||
3762 | struct iscsi_cq_db_sqn_2_notify_arr cq_c_prod_sqn_arr; | ||
3763 | struct iscsi_cq_db_sqn_2_notify_arr cq_c_sqn_2_notify_arr; | ||
3764 | struct regpair hq_pbl_base; | ||
3765 | struct regpair hq_curr_pbe; | ||
3766 | struct regpair task_pbl_base; | ||
3767 | struct regpair cq_db_base; | ||
3768 | #if defined(__BIG_ENDIAN) | ||
3769 | u16 hq_bd_itt; | ||
3770 | u16 iscsi_conn_id; | ||
3771 | #elif defined(__LITTLE_ENDIAN) | ||
3772 | u16 iscsi_conn_id; | ||
3773 | u16 hq_bd_itt; | ||
3774 | #endif | ||
3775 | u32 hq_bd_data_segment_len; | ||
3776 | u32 hq_bd_buffer_offset; | ||
3777 | #if defined(__BIG_ENDIAN) | ||
3778 | u8 rsrv; | ||
3779 | u8 cq_proc_en_bit_map; | ||
3780 | u8 cq_pend_comp_itt_valid_bit_map; | ||
3781 | u8 hq_bd_opcode; | ||
3782 | #elif defined(__LITTLE_ENDIAN) | ||
3783 | u8 hq_bd_opcode; | ||
3784 | u8 cq_pend_comp_itt_valid_bit_map; | ||
3785 | u8 cq_proc_en_bit_map; | ||
3786 | u8 rsrv; | ||
3787 | #endif | ||
3788 | u32 hq_tcp_seq; | ||
3789 | #if defined(__BIG_ENDIAN) | ||
3790 | u16 flags; | ||
3791 | #define CSTORM_ISCSI_ST_CONTEXT_DATA_DIGEST_EN (0x1<<0) | ||
3792 | #define CSTORM_ISCSI_ST_CONTEXT_DATA_DIGEST_EN_SHIFT 0 | ||
3793 | #define CSTORM_ISCSI_ST_CONTEXT_HDR_DIGEST_EN (0x1<<1) | ||
3794 | #define CSTORM_ISCSI_ST_CONTEXT_HDR_DIGEST_EN_SHIFT 1 | ||
3795 | #define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_CTXT_VALID (0x1<<2) | ||
3796 | #define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_CTXT_VALID_SHIFT 2 | ||
3797 | #define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_LCL_CMPLN_FLG (0x1<<3) | ||
3798 | #define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_LCL_CMPLN_FLG_SHIFT 3 | ||
3799 | #define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_WRITE_TASK (0x1<<4) | ||
3800 | #define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_WRITE_TASK_SHIFT 4 | ||
3801 | #define CSTORM_ISCSI_ST_CONTEXT_CTRL_FLAGS_RSRV (0x7FF<<5) | ||
3802 | #define CSTORM_ISCSI_ST_CONTEXT_CTRL_FLAGS_RSRV_SHIFT 5 | ||
3803 | u16 hq_cons; | ||
3804 | #elif defined(__LITTLE_ENDIAN) | ||
3805 | u16 hq_cons; | ||
3806 | u16 flags; | ||
3807 | #define CSTORM_ISCSI_ST_CONTEXT_DATA_DIGEST_EN (0x1<<0) | ||
3808 | #define CSTORM_ISCSI_ST_CONTEXT_DATA_DIGEST_EN_SHIFT 0 | ||
3809 | #define CSTORM_ISCSI_ST_CONTEXT_HDR_DIGEST_EN (0x1<<1) | ||
3810 | #define CSTORM_ISCSI_ST_CONTEXT_HDR_DIGEST_EN_SHIFT 1 | ||
3811 | #define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_CTXT_VALID (0x1<<2) | ||
3812 | #define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_CTXT_VALID_SHIFT 2 | ||
3813 | #define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_LCL_CMPLN_FLG (0x1<<3) | ||
3814 | #define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_LCL_CMPLN_FLG_SHIFT 3 | ||
3815 | #define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_WRITE_TASK (0x1<<4) | ||
3816 | #define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_WRITE_TASK_SHIFT 4 | ||
3817 | #define CSTORM_ISCSI_ST_CONTEXT_CTRL_FLAGS_RSRV (0x7FF<<5) | ||
3818 | #define CSTORM_ISCSI_ST_CONTEXT_CTRL_FLAGS_RSRV_SHIFT 5 | ||
3819 | #endif | ||
3820 | struct regpair rsrv1; | ||
3821 | }; | ||
3822 | |||
3823 | |||
3824 | /* | ||
3825 | * SCSI read/write SQ WQE | ||
3826 | */ | ||
3827 | struct iscsi_cmd_pdu_hdr_little_endian { | ||
3828 | #if defined(__BIG_ENDIAN) | ||
3829 | u8 opcode; | ||
3830 | u8 op_attr; | ||
3831 | #define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_ATTRIBUTES (0x7<<0) | ||
3832 | #define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_ATTRIBUTES_SHIFT 0 | ||
3833 | #define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_RSRV1 (0x3<<3) | ||
3834 | #define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_RSRV1_SHIFT 3 | ||
3835 | #define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_WRITE_FLAG (0x1<<5) | ||
3836 | #define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_WRITE_FLAG_SHIFT 5 | ||
3837 | #define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_READ_FLAG (0x1<<6) | ||
3838 | #define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_READ_FLAG_SHIFT 6 | ||
3839 | #define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_FINAL_FLAG (0x1<<7) | ||
3840 | #define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_FINAL_FLAG_SHIFT 7 | ||
3841 | u16 rsrv0; | ||
3842 | #elif defined(__LITTLE_ENDIAN) | ||
3843 | u16 rsrv0; | ||
3844 | u8 op_attr; | ||
3845 | #define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_ATTRIBUTES (0x7<<0) | ||
3846 | #define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_ATTRIBUTES_SHIFT 0 | ||
3847 | #define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_RSRV1 (0x3<<3) | ||
3848 | #define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_RSRV1_SHIFT 3 | ||
3849 | #define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_WRITE_FLAG (0x1<<5) | ||
3850 | #define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_WRITE_FLAG_SHIFT 5 | ||
3851 | #define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_READ_FLAG (0x1<<6) | ||
3852 | #define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_READ_FLAG_SHIFT 6 | ||
3853 | #define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_FINAL_FLAG (0x1<<7) | ||
3854 | #define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_FINAL_FLAG_SHIFT 7 | ||
3855 | u8 opcode; | ||
3856 | #endif | ||
3857 | u32 data_fields; | ||
3858 | #define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_DATA_SEGMENT_LENGTH (0xFFFFFF<<0) | ||
3859 | #define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_DATA_SEGMENT_LENGTH_SHIFT 0 | ||
3860 | #define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_TOTAL_AHS_LENGTH (0xFF<<24) | ||
3861 | #define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_TOTAL_AHS_LENGTH_SHIFT 24 | ||
3862 | struct regpair lun; | ||
3863 | u32 itt; | ||
3864 | u32 expected_data_transfer_length; | ||
3865 | u32 cmd_sn; | ||
3866 | u32 exp_stat_sn; | ||
3867 | u32 scsi_command_block[4]; | ||
3868 | }; | ||
3869 | |||
3870 | |||
3871 | /* | ||
3872 | * Buffer per connection, used in Tstorm | ||
3873 | */ | ||
3874 | struct iscsi_conn_buf { | ||
3875 | struct regpair reserved[8]; | ||
3876 | }; | ||
3877 | |||
3878 | |||
3879 | /* | ||
2210 | * iSCSI context region, used only in iSCSI | 3880 | * iSCSI context region, used only in iSCSI |
2211 | */ | 3881 | */ |
2212 | struct ustorm_iscsi_rq_db { | 3882 | struct ustorm_iscsi_rq_db { |
@@ -2271,11 +3941,13 @@ struct ustorm_iscsi_placement_db { | |||
2271 | u32 local_sge_1_address_hi; | 3941 | u32 local_sge_1_address_hi; |
2272 | u32 local_sge_1_address_lo; | 3942 | u32 local_sge_1_address_lo; |
2273 | #if defined(__BIG_ENDIAN) | 3943 | #if defined(__BIG_ENDIAN) |
2274 | u16 reserved6; | 3944 | u8 exp_padding_2b; |
3945 | u8 nal_len_3b; | ||
2275 | u16 local_sge_1_size; | 3946 | u16 local_sge_1_size; |
2276 | #elif defined(__LITTLE_ENDIAN) | 3947 | #elif defined(__LITTLE_ENDIAN) |
2277 | u16 local_sge_1_size; | 3948 | u16 local_sge_1_size; |
2278 | u16 reserved6; | 3949 | u8 nal_len_3b; |
3950 | u8 exp_padding_2b; | ||
2279 | #endif | 3951 | #endif |
2280 | #if defined(__BIG_ENDIAN) | 3952 | #if defined(__BIG_ENDIAN) |
2281 | u8 sgl_size; | 3953 | u8 sgl_size; |
@@ -2300,12 +3972,8 @@ struct ustorm_iscsi_placement_db { | |||
2300 | u32 nal; | 3972 | u32 nal; |
2301 | #define USTORM_ISCSI_PLACEMENT_DB_REM_SGE_SIZE (0xFFFFFF<<0) | 3973 | #define USTORM_ISCSI_PLACEMENT_DB_REM_SGE_SIZE (0xFFFFFF<<0) |
2302 | #define USTORM_ISCSI_PLACEMENT_DB_REM_SGE_SIZE_SHIFT 0 | 3974 | #define USTORM_ISCSI_PLACEMENT_DB_REM_SGE_SIZE_SHIFT 0 |
2303 | #define USTORM_ISCSI_PLACEMENT_DB_EXP_PADDING_2B (0x3<<24) | 3975 | #define USTORM_ISCSI_PLACEMENT_DB_EXP_DIGEST_3B (0xFF<<24) |
2304 | #define USTORM_ISCSI_PLACEMENT_DB_EXP_PADDING_2B_SHIFT 24 | 3976 | #define USTORM_ISCSI_PLACEMENT_DB_EXP_DIGEST_3B_SHIFT 24 |
2305 | #define USTORM_ISCSI_PLACEMENT_DB_EXP_DIGEST_3B (0x7<<26) | ||
2306 | #define USTORM_ISCSI_PLACEMENT_DB_EXP_DIGEST_3B_SHIFT 26 | ||
2307 | #define USTORM_ISCSI_PLACEMENT_DB_NAL_LEN_3B (0x7<<29) | ||
2308 | #define USTORM_ISCSI_PLACEMENT_DB_NAL_LEN_3B_SHIFT 29 | ||
2309 | }; | 3977 | }; |
2310 | 3978 | ||
2311 | /* | 3979 | /* |
@@ -2509,7 +4177,13 @@ struct tstorm_tcp_st_context_section { | |||
2509 | u16 vlan_id; | 4177 | u16 vlan_id; |
2510 | u16 lsb_mac_address; | 4178 | u16 lsb_mac_address; |
2511 | #endif | 4179 | #endif |
2512 | u32 msb_mac_address; | 4180 | #if defined(__BIG_ENDIAN) |
4181 | u16 msb_mac_address; | ||
4182 | u16 mid_mac_address; | ||
4183 | #elif defined(__LITTLE_ENDIAN) | ||
4184 | u16 mid_mac_address; | ||
4185 | u16 msb_mac_address; | ||
4186 | #endif | ||
2513 | u32 rightmost_received_seq; | 4187 | u32 rightmost_received_seq; |
2514 | }; | 4188 | }; |
2515 | 4189 | ||
@@ -2534,13 +4208,7 @@ struct iscsi_term_vars { | |||
2534 | * iSCSI context region, used only in iSCSI | 4208 | * iSCSI context region, used only in iSCSI |
2535 | */ | 4209 | */ |
2536 | struct tstorm_iscsi_st_context_section { | 4210 | struct tstorm_iscsi_st_context_section { |
2537 | #if defined(__BIG_ENDIAN) | 4211 | u32 nalPayload; |
2538 | u16 rem_tcp_data_len; | ||
2539 | u16 brb_offset; | ||
2540 | #elif defined(__LITTLE_ENDIAN) | ||
2541 | u16 brb_offset; | ||
2542 | u16 rem_tcp_data_len; | ||
2543 | #endif | ||
2544 | u32 b2nh; | 4212 | u32 b2nh; |
2545 | #if defined(__BIG_ENDIAN) | 4213 | #if defined(__BIG_ENDIAN) |
2546 | u16 rq_cons; | 4214 | u16 rq_cons; |
@@ -2555,8 +4223,10 @@ struct tstorm_iscsi_st_context_section { | |||
2555 | #define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_FULL_FEATURE_SHIFT 3 | 4223 | #define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_FULL_FEATURE_SHIFT 3 |
2556 | #define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_DROP_ALL_PDUS (0x1<<4) | 4224 | #define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_DROP_ALL_PDUS (0x1<<4) |
2557 | #define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_DROP_ALL_PDUS_SHIFT 4 | 4225 | #define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_DROP_ALL_PDUS_SHIFT 4 |
2558 | #define TSTORM_ISCSI_ST_CONTEXT_SECTION_FLAGS_RSRV (0x7<<5) | 4226 | #define TSTORM_ISCSI_ST_CONTEXT_SECTION_NALLEN (0x3<<5) |
2559 | #define TSTORM_ISCSI_ST_CONTEXT_SECTION_FLAGS_RSRV_SHIFT 5 | 4227 | #define TSTORM_ISCSI_ST_CONTEXT_SECTION_NALLEN_SHIFT 5 |
4228 | #define TSTORM_ISCSI_ST_CONTEXT_SECTION_RSRV0 (0x1<<7) | ||
4229 | #define TSTORM_ISCSI_ST_CONTEXT_SECTION_RSRV0_SHIFT 7 | ||
2560 | u8 hdr_bytes_2_fetch; | 4230 | u8 hdr_bytes_2_fetch; |
2561 | #elif defined(__LITTLE_ENDIAN) | 4231 | #elif defined(__LITTLE_ENDIAN) |
2562 | u8 hdr_bytes_2_fetch; | 4232 | u8 hdr_bytes_2_fetch; |
@@ -2571,18 +4241,20 @@ struct tstorm_iscsi_st_context_section { | |||
2571 | #define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_FULL_FEATURE_SHIFT 3 | 4241 | #define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_FULL_FEATURE_SHIFT 3 |
2572 | #define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_DROP_ALL_PDUS (0x1<<4) | 4242 | #define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_DROP_ALL_PDUS (0x1<<4) |
2573 | #define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_DROP_ALL_PDUS_SHIFT 4 | 4243 | #define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_DROP_ALL_PDUS_SHIFT 4 |
2574 | #define TSTORM_ISCSI_ST_CONTEXT_SECTION_FLAGS_RSRV (0x7<<5) | 4244 | #define TSTORM_ISCSI_ST_CONTEXT_SECTION_NALLEN (0x3<<5) |
2575 | #define TSTORM_ISCSI_ST_CONTEXT_SECTION_FLAGS_RSRV_SHIFT 5 | 4245 | #define TSTORM_ISCSI_ST_CONTEXT_SECTION_NALLEN_SHIFT 5 |
4246 | #define TSTORM_ISCSI_ST_CONTEXT_SECTION_RSRV0 (0x1<<7) | ||
4247 | #define TSTORM_ISCSI_ST_CONTEXT_SECTION_RSRV0_SHIFT 7 | ||
2576 | u16 rq_cons; | 4248 | u16 rq_cons; |
2577 | #endif | 4249 | #endif |
2578 | struct regpair rq_db_phy_addr; | 4250 | struct regpair rq_db_phy_addr; |
2579 | #if defined(__BIG_ENDIAN) | 4251 | #if defined(__BIG_ENDIAN) |
2580 | struct iscsi_term_vars term_vars; | 4252 | struct iscsi_term_vars term_vars; |
2581 | u8 scratchpad_idx; | 4253 | u8 rsrv1; |
2582 | u16 iscsi_conn_id; | 4254 | u16 iscsi_conn_id; |
2583 | #elif defined(__LITTLE_ENDIAN) | 4255 | #elif defined(__LITTLE_ENDIAN) |
2584 | u16 iscsi_conn_id; | 4256 | u16 iscsi_conn_id; |
2585 | u8 scratchpad_idx; | 4257 | u8 rsrv1; |
2586 | struct iscsi_term_vars term_vars; | 4258 | struct iscsi_term_vars term_vars; |
2587 | #endif | 4259 | #endif |
2588 | u32 process_nxt; | 4260 | u32 process_nxt; |
@@ -2597,724 +4269,6 @@ struct tstorm_iscsi_st_context { | |||
2597 | }; | 4269 | }; |
2598 | 4270 | ||
2599 | /* | 4271 | /* |
2600 | * The tcp aggregative context section of Xstorm | ||
2601 | */ | ||
2602 | struct xstorm_tcp_tcp_ag_context_section { | ||
2603 | #if defined(__BIG_ENDIAN) | ||
2604 | u8 __tcp_agg_vars1; | ||
2605 | u8 __da_cnt; | ||
2606 | u16 mss; | ||
2607 | #elif defined(__LITTLE_ENDIAN) | ||
2608 | u16 mss; | ||
2609 | u8 __da_cnt; | ||
2610 | u8 __tcp_agg_vars1; | ||
2611 | #endif | ||
2612 | u32 snd_nxt; | ||
2613 | u32 tx_wnd; | ||
2614 | u32 snd_una; | ||
2615 | u32 local_adv_wnd; | ||
2616 | #if defined(__BIG_ENDIAN) | ||
2617 | u8 __agg_val8_th; | ||
2618 | u8 __agg_val8; | ||
2619 | u16 tcp_agg_vars2; | ||
2620 | #define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_FIN_FLAG (0x1<<0) | ||
2621 | #define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_FIN_FLAG_SHIFT 0 | ||
2622 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_UNBLOCKED (0x1<<1) | ||
2623 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_UNBLOCKED_SHIFT 1 | ||
2624 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DA_TIMER_ACTIVE (0x1<<2) | ||
2625 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DA_TIMER_ACTIVE_SHIFT 2 | ||
2626 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX3_FLAG (0x1<<3) | ||
2627 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX3_FLAG_SHIFT 3 | ||
2628 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX4_FLAG (0x1<<4) | ||
2629 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX4_FLAG_SHIFT 4 | ||
2630 | #define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DA_ENABLE (0x1<<5) | ||
2631 | #define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DA_ENABLE_SHIFT 5 | ||
2632 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_ACK_TO_FE_UPDATED_EN (0x1<<6) | ||
2633 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_ACK_TO_FE_UPDATED_EN_SHIFT 6 | ||
2634 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX3_CF_EN (0x1<<7) | ||
2635 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX3_CF_EN_SHIFT 7 | ||
2636 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_FIN_FLAG_EN (0x1<<8) | ||
2637 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_FIN_FLAG_EN_SHIFT 8 | ||
2638 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX1_FLAG (0x1<<9) | ||
2639 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX1_FLAG_SHIFT 9 | ||
2640 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SET_RTO_CF (0x3<<10) | ||
2641 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SET_RTO_CF_SHIFT 10 | ||
2642 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TS_TO_ECHO_UPDATED_CF (0x3<<12) | ||
2643 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TS_TO_ECHO_UPDATED_CF_SHIFT 12 | ||
2644 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX8_CF (0x3<<14) | ||
2645 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX8_CF_SHIFT 14 | ||
2646 | #elif defined(__LITTLE_ENDIAN) | ||
2647 | u16 tcp_agg_vars2; | ||
2648 | #define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_FIN_FLAG (0x1<<0) | ||
2649 | #define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_FIN_FLAG_SHIFT 0 | ||
2650 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_UNBLOCKED (0x1<<1) | ||
2651 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_UNBLOCKED_SHIFT 1 | ||
2652 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DA_TIMER_ACTIVE (0x1<<2) | ||
2653 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DA_TIMER_ACTIVE_SHIFT 2 | ||
2654 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX3_FLAG (0x1<<3) | ||
2655 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX3_FLAG_SHIFT 3 | ||
2656 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX4_FLAG (0x1<<4) | ||
2657 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX4_FLAG_SHIFT 4 | ||
2658 | #define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DA_ENABLE (0x1<<5) | ||
2659 | #define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DA_ENABLE_SHIFT 5 | ||
2660 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_ACK_TO_FE_UPDATED_EN (0x1<<6) | ||
2661 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_ACK_TO_FE_UPDATED_EN_SHIFT 6 | ||
2662 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX3_CF_EN (0x1<<7) | ||
2663 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX3_CF_EN_SHIFT 7 | ||
2664 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_FIN_FLAG_EN (0x1<<8) | ||
2665 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_FIN_FLAG_EN_SHIFT 8 | ||
2666 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX1_FLAG (0x1<<9) | ||
2667 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX1_FLAG_SHIFT 9 | ||
2668 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SET_RTO_CF (0x3<<10) | ||
2669 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SET_RTO_CF_SHIFT 10 | ||
2670 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TS_TO_ECHO_UPDATED_CF (0x3<<12) | ||
2671 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TS_TO_ECHO_UPDATED_CF_SHIFT 12 | ||
2672 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX8_CF (0x3<<14) | ||
2673 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX8_CF_SHIFT 14 | ||
2674 | u8 __agg_val8; | ||
2675 | u8 __agg_val8_th; | ||
2676 | #endif | ||
2677 | u32 ack_to_far_end; | ||
2678 | u32 rto_timer; | ||
2679 | u32 ka_timer; | ||
2680 | u32 ts_to_echo; | ||
2681 | #if defined(__BIG_ENDIAN) | ||
2682 | u16 __agg_val7_th; | ||
2683 | u16 __agg_val7; | ||
2684 | #elif defined(__LITTLE_ENDIAN) | ||
2685 | u16 __agg_val7; | ||
2686 | u16 __agg_val7_th; | ||
2687 | #endif | ||
2688 | #if defined(__BIG_ENDIAN) | ||
2689 | u8 __tcp_agg_vars5; | ||
2690 | u8 __tcp_agg_vars4; | ||
2691 | u8 __tcp_agg_vars3; | ||
2692 | u8 __force_pure_ack_cnt; | ||
2693 | #elif defined(__LITTLE_ENDIAN) | ||
2694 | u8 __force_pure_ack_cnt; | ||
2695 | u8 __tcp_agg_vars3; | ||
2696 | u8 __tcp_agg_vars4; | ||
2697 | u8 __tcp_agg_vars5; | ||
2698 | #endif | ||
2699 | u32 tcp_agg_vars6; | ||
2700 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TS_TO_ECHO_CF_EN (0x1<<0) | ||
2701 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TS_TO_ECHO_CF_EN_SHIFT 0 | ||
2702 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX8_CF_EN (0x1<<1) | ||
2703 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX8_CF_EN_SHIFT 1 | ||
2704 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX9_CF_EN (0x1<<2) | ||
2705 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX9_CF_EN_SHIFT 2 | ||
2706 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX10_CF_EN (0x1<<3) | ||
2707 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX10_CF_EN_SHIFT 3 | ||
2708 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX6_FLAG (0x1<<4) | ||
2709 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX6_FLAG_SHIFT 4 | ||
2710 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX7_FLAG (0x1<<5) | ||
2711 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX7_FLAG_SHIFT 5 | ||
2712 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX5_CF (0x3<<6) | ||
2713 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX5_CF_SHIFT 6 | ||
2714 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX9_CF (0x3<<8) | ||
2715 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX9_CF_SHIFT 8 | ||
2716 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX10_CF (0x3<<10) | ||
2717 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX10_CF_SHIFT 10 | ||
2718 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX11_CF (0x3<<12) | ||
2719 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX11_CF_SHIFT 12 | ||
2720 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX12_CF (0x3<<14) | ||
2721 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX12_CF_SHIFT 14 | ||
2722 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX13_CF (0x3<<16) | ||
2723 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX13_CF_SHIFT 16 | ||
2724 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX14_CF (0x3<<18) | ||
2725 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX14_CF_SHIFT 18 | ||
2726 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX15_CF (0x3<<20) | ||
2727 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX15_CF_SHIFT 20 | ||
2728 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX16_CF (0x3<<22) | ||
2729 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX16_CF_SHIFT 22 | ||
2730 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX17_CF (0x3<<24) | ||
2731 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX17_CF_SHIFT 24 | ||
2732 | #define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_ECE_FLAG (0x1<<26) | ||
2733 | #define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_ECE_FLAG_SHIFT 26 | ||
2734 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_RESERVED71 (0x1<<27) | ||
2735 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_RESERVED71_SHIFT 27 | ||
2736 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_FORCE_PURE_ACK_CNT_DIRTY (0x1<<28) | ||
2737 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_FORCE_PURE_ACK_CNT_DIRTY_SHIFT 28 | ||
2738 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TCP_AUTO_STOP_FLAG (0x1<<29) | ||
2739 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TCP_AUTO_STOP_FLAG_SHIFT 29 | ||
2740 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DO_TS_UPDATE_FLAG (0x1<<30) | ||
2741 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DO_TS_UPDATE_FLAG_SHIFT 30 | ||
2742 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_CANCEL_RETRANSMIT_FLAG (0x1<<31) | ||
2743 | #define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_CANCEL_RETRANSMIT_FLAG_SHIFT 31 | ||
2744 | #if defined(__BIG_ENDIAN) | ||
2745 | u16 __agg_misc6; | ||
2746 | u16 __tcp_agg_vars7; | ||
2747 | #elif defined(__LITTLE_ENDIAN) | ||
2748 | u16 __tcp_agg_vars7; | ||
2749 | u16 __agg_misc6; | ||
2750 | #endif | ||
2751 | u32 __agg_val10; | ||
2752 | u32 __agg_val10_th; | ||
2753 | #if defined(__BIG_ENDIAN) | ||
2754 | u16 __reserved3; | ||
2755 | u8 __reserved2; | ||
2756 | u8 __da_only_cnt; | ||
2757 | #elif defined(__LITTLE_ENDIAN) | ||
2758 | u8 __da_only_cnt; | ||
2759 | u8 __reserved2; | ||
2760 | u16 __reserved3; | ||
2761 | #endif | ||
2762 | }; | ||
2763 | |||
2764 | /* | ||
2765 | * The iscsi aggregative context of Xstorm | ||
2766 | */ | ||
2767 | struct xstorm_iscsi_ag_context { | ||
2768 | #if defined(__BIG_ENDIAN) | ||
2769 | u16 agg_val1; | ||
2770 | u8 agg_vars1; | ||
2771 | #define __XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0) | ||
2772 | #define __XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0 | ||
2773 | #define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1) | ||
2774 | #define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1 | ||
2775 | #define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2) | ||
2776 | #define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2 | ||
2777 | #define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3) | ||
2778 | #define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3 | ||
2779 | #define __XSTORM_ISCSI_AG_CONTEXT_MORE_TO_SEND_EN (0x1<<4) | ||
2780 | #define __XSTORM_ISCSI_AG_CONTEXT_MORE_TO_SEND_EN_SHIFT 4 | ||
2781 | #define XSTORM_ISCSI_AG_CONTEXT_NAGLE_EN (0x1<<5) | ||
2782 | #define XSTORM_ISCSI_AG_CONTEXT_NAGLE_EN_SHIFT 5 | ||
2783 | #define __XSTORM_ISCSI_AG_CONTEXT_DQ_SPARE_FLAG (0x1<<6) | ||
2784 | #define __XSTORM_ISCSI_AG_CONTEXT_DQ_SPARE_FLAG_SHIFT 6 | ||
2785 | #define __XSTORM_ISCSI_AG_CONTEXT_UNA_GT_NXT_EN (0x1<<7) | ||
2786 | #define __XSTORM_ISCSI_AG_CONTEXT_UNA_GT_NXT_EN_SHIFT 7 | ||
2787 | u8 state; | ||
2788 | #elif defined(__LITTLE_ENDIAN) | ||
2789 | u8 state; | ||
2790 | u8 agg_vars1; | ||
2791 | #define __XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0) | ||
2792 | #define __XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0 | ||
2793 | #define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1) | ||
2794 | #define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1 | ||
2795 | #define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2) | ||
2796 | #define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2 | ||
2797 | #define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3) | ||
2798 | #define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3 | ||
2799 | #define __XSTORM_ISCSI_AG_CONTEXT_MORE_TO_SEND_EN (0x1<<4) | ||
2800 | #define __XSTORM_ISCSI_AG_CONTEXT_MORE_TO_SEND_EN_SHIFT 4 | ||
2801 | #define XSTORM_ISCSI_AG_CONTEXT_NAGLE_EN (0x1<<5) | ||
2802 | #define XSTORM_ISCSI_AG_CONTEXT_NAGLE_EN_SHIFT 5 | ||
2803 | #define __XSTORM_ISCSI_AG_CONTEXT_DQ_SPARE_FLAG (0x1<<6) | ||
2804 | #define __XSTORM_ISCSI_AG_CONTEXT_DQ_SPARE_FLAG_SHIFT 6 | ||
2805 | #define __XSTORM_ISCSI_AG_CONTEXT_UNA_GT_NXT_EN (0x1<<7) | ||
2806 | #define __XSTORM_ISCSI_AG_CONTEXT_UNA_GT_NXT_EN_SHIFT 7 | ||
2807 | u16 agg_val1; | ||
2808 | #endif | ||
2809 | #if defined(__BIG_ENDIAN) | ||
2810 | u8 cdu_reserved; | ||
2811 | u8 __agg_vars4; | ||
2812 | u8 agg_vars3; | ||
2813 | #define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM2 (0x3F<<0) | ||
2814 | #define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM2_SHIFT 0 | ||
2815 | #define __XSTORM_ISCSI_AG_CONTEXT_RX_TS_EN_CF (0x3<<6) | ||
2816 | #define __XSTORM_ISCSI_AG_CONTEXT_RX_TS_EN_CF_SHIFT 6 | ||
2817 | u8 agg_vars2; | ||
2818 | #define __XSTORM_ISCSI_AG_CONTEXT_DQ_CF (0x3<<0) | ||
2819 | #define __XSTORM_ISCSI_AG_CONTEXT_DQ_CF_SHIFT 0 | ||
2820 | #define __XSTORM_ISCSI_AG_CONTEXT_DQ_SPARE_FLAG_EN (0x1<<2) | ||
2821 | #define __XSTORM_ISCSI_AG_CONTEXT_DQ_SPARE_FLAG_EN_SHIFT 2 | ||
2822 | #define __XSTORM_ISCSI_AG_CONTEXT_AUX8_FLAG (0x1<<3) | ||
2823 | #define __XSTORM_ISCSI_AG_CONTEXT_AUX8_FLAG_SHIFT 3 | ||
2824 | #define __XSTORM_ISCSI_AG_CONTEXT_AUX9_FLAG (0x1<<4) | ||
2825 | #define __XSTORM_ISCSI_AG_CONTEXT_AUX9_FLAG_SHIFT 4 | ||
2826 | #define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE1 (0x3<<5) | ||
2827 | #define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE1_SHIFT 5 | ||
2828 | #define __XSTORM_ISCSI_AG_CONTEXT_DQ_CF_EN (0x1<<7) | ||
2829 | #define __XSTORM_ISCSI_AG_CONTEXT_DQ_CF_EN_SHIFT 7 | ||
2830 | #elif defined(__LITTLE_ENDIAN) | ||
2831 | u8 agg_vars2; | ||
2832 | #define __XSTORM_ISCSI_AG_CONTEXT_DQ_CF (0x3<<0) | ||
2833 | #define __XSTORM_ISCSI_AG_CONTEXT_DQ_CF_SHIFT 0 | ||
2834 | #define __XSTORM_ISCSI_AG_CONTEXT_DQ_SPARE_FLAG_EN (0x1<<2) | ||
2835 | #define __XSTORM_ISCSI_AG_CONTEXT_DQ_SPARE_FLAG_EN_SHIFT 2 | ||
2836 | #define __XSTORM_ISCSI_AG_CONTEXT_AUX8_FLAG (0x1<<3) | ||
2837 | #define __XSTORM_ISCSI_AG_CONTEXT_AUX8_FLAG_SHIFT 3 | ||
2838 | #define __XSTORM_ISCSI_AG_CONTEXT_AUX9_FLAG (0x1<<4) | ||
2839 | #define __XSTORM_ISCSI_AG_CONTEXT_AUX9_FLAG_SHIFT 4 | ||
2840 | #define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE1 (0x3<<5) | ||
2841 | #define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE1_SHIFT 5 | ||
2842 | #define __XSTORM_ISCSI_AG_CONTEXT_DQ_CF_EN (0x1<<7) | ||
2843 | #define __XSTORM_ISCSI_AG_CONTEXT_DQ_CF_EN_SHIFT 7 | ||
2844 | u8 agg_vars3; | ||
2845 | #define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM2 (0x3F<<0) | ||
2846 | #define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM2_SHIFT 0 | ||
2847 | #define __XSTORM_ISCSI_AG_CONTEXT_RX_TS_EN_CF (0x3<<6) | ||
2848 | #define __XSTORM_ISCSI_AG_CONTEXT_RX_TS_EN_CF_SHIFT 6 | ||
2849 | u8 __agg_vars4; | ||
2850 | u8 cdu_reserved; | ||
2851 | #endif | ||
2852 | u32 more_to_send; | ||
2853 | #if defined(__BIG_ENDIAN) | ||
2854 | u16 agg_vars5; | ||
2855 | #define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE5 (0x3<<0) | ||
2856 | #define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE5_SHIFT 0 | ||
2857 | #define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM0 (0x3F<<2) | ||
2858 | #define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM0_SHIFT 2 | ||
2859 | #define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM1 (0x3F<<8) | ||
2860 | #define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM1_SHIFT 8 | ||
2861 | #define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE2 (0x3<<14) | ||
2862 | #define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE2_SHIFT 14 | ||
2863 | u16 sq_cons; | ||
2864 | #elif defined(__LITTLE_ENDIAN) | ||
2865 | u16 sq_cons; | ||
2866 | u16 agg_vars5; | ||
2867 | #define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE5 (0x3<<0) | ||
2868 | #define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE5_SHIFT 0 | ||
2869 | #define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM0 (0x3F<<2) | ||
2870 | #define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM0_SHIFT 2 | ||
2871 | #define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM1 (0x3F<<8) | ||
2872 | #define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM1_SHIFT 8 | ||
2873 | #define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE2 (0x3<<14) | ||
2874 | #define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE2_SHIFT 14 | ||
2875 | #endif | ||
2876 | struct xstorm_tcp_tcp_ag_context_section tcp; | ||
2877 | #if defined(__BIG_ENDIAN) | ||
2878 | u16 agg_vars7; | ||
2879 | #define __XSTORM_ISCSI_AG_CONTEXT_AGG_VAL11_DECISION_RULE (0x7<<0) | ||
2880 | #define __XSTORM_ISCSI_AG_CONTEXT_AGG_VAL11_DECISION_RULE_SHIFT 0 | ||
2881 | #define __XSTORM_ISCSI_AG_CONTEXT_AUX13_FLAG (0x1<<3) | ||
2882 | #define __XSTORM_ISCSI_AG_CONTEXT_AUX13_FLAG_SHIFT 3 | ||
2883 | #define __XSTORM_ISCSI_AG_CONTEXT_STORMS_SYNC_CF (0x3<<4) | ||
2884 | #define __XSTORM_ISCSI_AG_CONTEXT_STORMS_SYNC_CF_SHIFT 4 | ||
2885 | #define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE3 (0x3<<6) | ||
2886 | #define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE3_SHIFT 6 | ||
2887 | #define XSTORM_ISCSI_AG_CONTEXT_AUX1_CF (0x3<<8) | ||
2888 | #define XSTORM_ISCSI_AG_CONTEXT_AUX1_CF_SHIFT 8 | ||
2889 | #define __XSTORM_ISCSI_AG_CONTEXT_COMPLETION_SEQ_DECISION_MASK (0x1<<10) | ||
2890 | #define __XSTORM_ISCSI_AG_CONTEXT_COMPLETION_SEQ_DECISION_MASK_SHIFT 10 | ||
2891 | #define __XSTORM_ISCSI_AG_CONTEXT_AUX1_CF_EN (0x1<<11) | ||
2892 | #define __XSTORM_ISCSI_AG_CONTEXT_AUX1_CF_EN_SHIFT 11 | ||
2893 | #define __XSTORM_ISCSI_AG_CONTEXT_AUX10_FLAG (0x1<<12) | ||
2894 | #define __XSTORM_ISCSI_AG_CONTEXT_AUX10_FLAG_SHIFT 12 | ||
2895 | #define __XSTORM_ISCSI_AG_CONTEXT_AUX11_FLAG (0x1<<13) | ||
2896 | #define __XSTORM_ISCSI_AG_CONTEXT_AUX11_FLAG_SHIFT 13 | ||
2897 | #define __XSTORM_ISCSI_AG_CONTEXT_AUX12_FLAG (0x1<<14) | ||
2898 | #define __XSTORM_ISCSI_AG_CONTEXT_AUX12_FLAG_SHIFT 14 | ||
2899 | #define __XSTORM_ISCSI_AG_CONTEXT_RX_WND_SCL_EN (0x1<<15) | ||
2900 | #define __XSTORM_ISCSI_AG_CONTEXT_RX_WND_SCL_EN_SHIFT 15 | ||
2901 | u8 agg_val3_th; | ||
2902 | u8 agg_vars6; | ||
2903 | #define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE6 (0x7<<0) | ||
2904 | #define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE6_SHIFT 0 | ||
2905 | #define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE7 (0x7<<3) | ||
2906 | #define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE7_SHIFT 3 | ||
2907 | #define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE4 (0x3<<6) | ||
2908 | #define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE4_SHIFT 6 | ||
2909 | #elif defined(__LITTLE_ENDIAN) | ||
2910 | u8 agg_vars6; | ||
2911 | #define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE6 (0x7<<0) | ||
2912 | #define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE6_SHIFT 0 | ||
2913 | #define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE7 (0x7<<3) | ||
2914 | #define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE7_SHIFT 3 | ||
2915 | #define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE4 (0x3<<6) | ||
2916 | #define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE4_SHIFT 6 | ||
2917 | u8 agg_val3_th; | ||
2918 | u16 agg_vars7; | ||
2919 | #define __XSTORM_ISCSI_AG_CONTEXT_AGG_VAL11_DECISION_RULE (0x7<<0) | ||
2920 | #define __XSTORM_ISCSI_AG_CONTEXT_AGG_VAL11_DECISION_RULE_SHIFT 0 | ||
2921 | #define __XSTORM_ISCSI_AG_CONTEXT_AUX13_FLAG (0x1<<3) | ||
2922 | #define __XSTORM_ISCSI_AG_CONTEXT_AUX13_FLAG_SHIFT 3 | ||
2923 | #define __XSTORM_ISCSI_AG_CONTEXT_STORMS_SYNC_CF (0x3<<4) | ||
2924 | #define __XSTORM_ISCSI_AG_CONTEXT_STORMS_SYNC_CF_SHIFT 4 | ||
2925 | #define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE3 (0x3<<6) | ||
2926 | #define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE3_SHIFT 6 | ||
2927 | #define XSTORM_ISCSI_AG_CONTEXT_AUX1_CF (0x3<<8) | ||
2928 | #define XSTORM_ISCSI_AG_CONTEXT_AUX1_CF_SHIFT 8 | ||
2929 | #define __XSTORM_ISCSI_AG_CONTEXT_COMPLETION_SEQ_DECISION_MASK (0x1<<10) | ||
2930 | #define __XSTORM_ISCSI_AG_CONTEXT_COMPLETION_SEQ_DECISION_MASK_SHIFT 10 | ||
2931 | #define __XSTORM_ISCSI_AG_CONTEXT_AUX1_CF_EN (0x1<<11) | ||
2932 | #define __XSTORM_ISCSI_AG_CONTEXT_AUX1_CF_EN_SHIFT 11 | ||
2933 | #define __XSTORM_ISCSI_AG_CONTEXT_AUX10_FLAG (0x1<<12) | ||
2934 | #define __XSTORM_ISCSI_AG_CONTEXT_AUX10_FLAG_SHIFT 12 | ||
2935 | #define __XSTORM_ISCSI_AG_CONTEXT_AUX11_FLAG (0x1<<13) | ||
2936 | #define __XSTORM_ISCSI_AG_CONTEXT_AUX11_FLAG_SHIFT 13 | ||
2937 | #define __XSTORM_ISCSI_AG_CONTEXT_AUX12_FLAG (0x1<<14) | ||
2938 | #define __XSTORM_ISCSI_AG_CONTEXT_AUX12_FLAG_SHIFT 14 | ||
2939 | #define __XSTORM_ISCSI_AG_CONTEXT_RX_WND_SCL_EN (0x1<<15) | ||
2940 | #define __XSTORM_ISCSI_AG_CONTEXT_RX_WND_SCL_EN_SHIFT 15 | ||
2941 | #endif | ||
2942 | #if defined(__BIG_ENDIAN) | ||
2943 | u16 __agg_val11_th; | ||
2944 | u16 __gen_data; | ||
2945 | #elif defined(__LITTLE_ENDIAN) | ||
2946 | u16 __gen_data; | ||
2947 | u16 __agg_val11_th; | ||
2948 | #endif | ||
2949 | #if defined(__BIG_ENDIAN) | ||
2950 | u8 __reserved1; | ||
2951 | u8 __agg_val6_th; | ||
2952 | u16 __agg_val9; | ||
2953 | #elif defined(__LITTLE_ENDIAN) | ||
2954 | u16 __agg_val9; | ||
2955 | u8 __agg_val6_th; | ||
2956 | u8 __reserved1; | ||
2957 | #endif | ||
2958 | #if defined(__BIG_ENDIAN) | ||
2959 | u16 hq_prod; | ||
2960 | u16 hq_cons; | ||
2961 | #elif defined(__LITTLE_ENDIAN) | ||
2962 | u16 hq_cons; | ||
2963 | u16 hq_prod; | ||
2964 | #endif | ||
2965 | u32 agg_vars8; | ||
2966 | #define XSTORM_ISCSI_AG_CONTEXT_AGG_MISC2 (0xFFFFFF<<0) | ||
2967 | #define XSTORM_ISCSI_AG_CONTEXT_AGG_MISC2_SHIFT 0 | ||
2968 | #define XSTORM_ISCSI_AG_CONTEXT_AGG_MISC3 (0xFF<<24) | ||
2969 | #define XSTORM_ISCSI_AG_CONTEXT_AGG_MISC3_SHIFT 24 | ||
2970 | #if defined(__BIG_ENDIAN) | ||
2971 | u16 r2tq_prod; | ||
2972 | u16 sq_prod; | ||
2973 | #elif defined(__LITTLE_ENDIAN) | ||
2974 | u16 sq_prod; | ||
2975 | u16 r2tq_prod; | ||
2976 | #endif | ||
2977 | #if defined(__BIG_ENDIAN) | ||
2978 | u8 agg_val3; | ||
2979 | u8 agg_val6; | ||
2980 | u8 agg_val5_th; | ||
2981 | u8 agg_val5; | ||
2982 | #elif defined(__LITTLE_ENDIAN) | ||
2983 | u8 agg_val5; | ||
2984 | u8 agg_val5_th; | ||
2985 | u8 agg_val6; | ||
2986 | u8 agg_val3; | ||
2987 | #endif | ||
2988 | #if defined(__BIG_ENDIAN) | ||
2989 | u16 __agg_misc1; | ||
2990 | u16 agg_limit1; | ||
2991 | #elif defined(__LITTLE_ENDIAN) | ||
2992 | u16 agg_limit1; | ||
2993 | u16 __agg_misc1; | ||
2994 | #endif | ||
2995 | u32 hq_cons_tcp_seq; | ||
2996 | u32 exp_stat_sn; | ||
2997 | u32 rst_seq_num; | ||
2998 | }; | ||
2999 | |||
3000 | /* | ||
3001 | * The tcp aggregative context section of Tstorm | ||
3002 | */ | ||
3003 | struct tstorm_tcp_tcp_ag_context_section { | ||
3004 | u32 __agg_val1; | ||
3005 | #if defined(__BIG_ENDIAN) | ||
3006 | u8 __tcp_agg_vars2; | ||
3007 | u8 __agg_val3; | ||
3008 | u16 __agg_val2; | ||
3009 | #elif defined(__LITTLE_ENDIAN) | ||
3010 | u16 __agg_val2; | ||
3011 | u8 __agg_val3; | ||
3012 | u8 __tcp_agg_vars2; | ||
3013 | #endif | ||
3014 | #if defined(__BIG_ENDIAN) | ||
3015 | u16 __agg_val5; | ||
3016 | u8 __agg_val6; | ||
3017 | u8 __tcp_agg_vars3; | ||
3018 | #elif defined(__LITTLE_ENDIAN) | ||
3019 | u8 __tcp_agg_vars3; | ||
3020 | u8 __agg_val6; | ||
3021 | u16 __agg_val5; | ||
3022 | #endif | ||
3023 | u32 snd_nxt; | ||
3024 | u32 rtt_seq; | ||
3025 | u32 rtt_time; | ||
3026 | u32 __reserved66; | ||
3027 | u32 wnd_right_edge; | ||
3028 | u32 tcp_agg_vars1; | ||
3029 | #define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_FIN_SENT_FLAG (0x1<<0) | ||
3030 | #define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_FIN_SENT_FLAG_SHIFT 0 | ||
3031 | #define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_LAST_PACKET_FIN_FLAG (0x1<<1) | ||
3032 | #define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_LAST_PACKET_FIN_FLAG_SHIFT 1 | ||
3033 | #define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_WND_UPD_CF (0x3<<2) | ||
3034 | #define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_WND_UPD_CF_SHIFT 2 | ||
3035 | #define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_TIMEOUT_CF (0x3<<4) | ||
3036 | #define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_TIMEOUT_CF_SHIFT 4 | ||
3037 | #define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_WND_UPD_CF_EN (0x1<<6) | ||
3038 | #define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_WND_UPD_CF_EN_SHIFT 6 | ||
3039 | #define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_TIMEOUT_CF_EN (0x1<<7) | ||
3040 | #define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_TIMEOUT_CF_EN_SHIFT 7 | ||
3041 | #define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_RETRANSMIT_SEQ_EN (0x1<<8) | ||
3042 | #define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_RETRANSMIT_SEQ_EN_SHIFT 8 | ||
3043 | #define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_SND_NXT_EN (0x1<<9) | ||
3044 | #define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_SND_NXT_EN_SHIFT 9 | ||
3045 | #define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX1_FLAG (0x1<<10) | ||
3046 | #define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX1_FLAG_SHIFT 10 | ||
3047 | #define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX2_FLAG (0x1<<11) | ||
3048 | #define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX2_FLAG_SHIFT 11 | ||
3049 | #define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX1_CF_EN (0x1<<12) | ||
3050 | #define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX1_CF_EN_SHIFT 12 | ||
3051 | #define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX2_CF_EN (0x1<<13) | ||
3052 | #define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX2_CF_EN_SHIFT 13 | ||
3053 | #define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX1_CF (0x3<<14) | ||
3054 | #define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX1_CF_SHIFT 14 | ||
3055 | #define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX2_CF (0x3<<16) | ||
3056 | #define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX2_CF_SHIFT 16 | ||
3057 | #define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_BLOCKED (0x1<<18) | ||
3058 | #define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_BLOCKED_SHIFT 18 | ||
3059 | #define __TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX10_CF_EN (0x1<<19) | ||
3060 | #define __TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX10_CF_EN_SHIFT 19 | ||
3061 | #define __TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX11_CF_EN (0x1<<20) | ||
3062 | #define __TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX11_CF_EN_SHIFT 20 | ||
3063 | #define __TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX12_CF_EN (0x1<<21) | ||
3064 | #define __TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX12_CF_EN_SHIFT 21 | ||
3065 | #define __TSTORM_TCP_TCP_AG_CONTEXT_SECTION_RESERVED1 (0x3<<22) | ||
3066 | #define __TSTORM_TCP_TCP_AG_CONTEXT_SECTION_RESERVED1_SHIFT 22 | ||
3067 | #define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_RETRANSMIT_PEND_SEQ (0xF<<24) | ||
3068 | #define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_RETRANSMIT_PEND_SEQ_SHIFT 24 | ||
3069 | #define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_RETRANSMIT_DONE_SEQ (0xF<<28) | ||
3070 | #define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_RETRANSMIT_DONE_SEQ_SHIFT 28 | ||
3071 | u32 snd_max; | ||
3072 | u32 snd_una; | ||
3073 | u32 __reserved2; | ||
3074 | }; | ||
3075 | |||
3076 | /* | ||
3077 | * The iscsi aggregative context of Tstorm | ||
3078 | */ | ||
3079 | struct tstorm_iscsi_ag_context { | ||
3080 | #if defined(__BIG_ENDIAN) | ||
3081 | u16 ulp_credit; | ||
3082 | u8 agg_vars1; | ||
3083 | #define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0) | ||
3084 | #define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0 | ||
3085 | #define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1) | ||
3086 | #define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1 | ||
3087 | #define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2) | ||
3088 | #define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2 | ||
3089 | #define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3) | ||
3090 | #define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3 | ||
3091 | #define __TSTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF (0x3<<4) | ||
3092 | #define __TSTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF_SHIFT 4 | ||
3093 | #define __TSTORM_ISCSI_AG_CONTEXT_AUX3_FLAG (0x1<<6) | ||
3094 | #define __TSTORM_ISCSI_AG_CONTEXT_AUX3_FLAG_SHIFT 6 | ||
3095 | #define __TSTORM_ISCSI_AG_CONTEXT_ACK_ON_FIN_SENT_FLAG (0x1<<7) | ||
3096 | #define __TSTORM_ISCSI_AG_CONTEXT_ACK_ON_FIN_SENT_FLAG_SHIFT 7 | ||
3097 | u8 state; | ||
3098 | #elif defined(__LITTLE_ENDIAN) | ||
3099 | u8 state; | ||
3100 | u8 agg_vars1; | ||
3101 | #define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0) | ||
3102 | #define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0 | ||
3103 | #define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1) | ||
3104 | #define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1 | ||
3105 | #define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2) | ||
3106 | #define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2 | ||
3107 | #define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3) | ||
3108 | #define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3 | ||
3109 | #define __TSTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF (0x3<<4) | ||
3110 | #define __TSTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF_SHIFT 4 | ||
3111 | #define __TSTORM_ISCSI_AG_CONTEXT_AUX3_FLAG (0x1<<6) | ||
3112 | #define __TSTORM_ISCSI_AG_CONTEXT_AUX3_FLAG_SHIFT 6 | ||
3113 | #define __TSTORM_ISCSI_AG_CONTEXT_ACK_ON_FIN_SENT_FLAG (0x1<<7) | ||
3114 | #define __TSTORM_ISCSI_AG_CONTEXT_ACK_ON_FIN_SENT_FLAG_SHIFT 7 | ||
3115 | u16 ulp_credit; | ||
3116 | #endif | ||
3117 | #if defined(__BIG_ENDIAN) | ||
3118 | u16 __agg_val4; | ||
3119 | u16 agg_vars2; | ||
3120 | #define __TSTORM_ISCSI_AG_CONTEXT_MSL_TIMER_SET_FLAG (0x1<<0) | ||
3121 | #define __TSTORM_ISCSI_AG_CONTEXT_MSL_TIMER_SET_FLAG_SHIFT 0 | ||
3122 | #define __TSTORM_ISCSI_AG_CONTEXT_FIN_SENT_FIRST_FLAG (0x1<<1) | ||
3123 | #define __TSTORM_ISCSI_AG_CONTEXT_FIN_SENT_FIRST_FLAG_SHIFT 1 | ||
3124 | #define __TSTORM_ISCSI_AG_CONTEXT_RST_SENT_CF (0x3<<2) | ||
3125 | #define __TSTORM_ISCSI_AG_CONTEXT_RST_SENT_CF_SHIFT 2 | ||
3126 | #define __TSTORM_ISCSI_AG_CONTEXT_WAKEUP_CALL_CF (0x3<<4) | ||
3127 | #define __TSTORM_ISCSI_AG_CONTEXT_WAKEUP_CALL_CF_SHIFT 4 | ||
3128 | #define __TSTORM_ISCSI_AG_CONTEXT_AUX6_CF (0x3<<6) | ||
3129 | #define __TSTORM_ISCSI_AG_CONTEXT_AUX6_CF_SHIFT 6 | ||
3130 | #define __TSTORM_ISCSI_AG_CONTEXT_AUX7_CF (0x3<<8) | ||
3131 | #define __TSTORM_ISCSI_AG_CONTEXT_AUX7_CF_SHIFT 8 | ||
3132 | #define __TSTORM_ISCSI_AG_CONTEXT_AUX7_FLAG (0x1<<10) | ||
3133 | #define __TSTORM_ISCSI_AG_CONTEXT_AUX7_FLAG_SHIFT 10 | ||
3134 | #define __TSTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF_EN (0x1<<11) | ||
3135 | #define __TSTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF_EN_SHIFT 11 | ||
3136 | #define __TSTORM_ISCSI_AG_CONTEXT_RST_SENT_CF_EN (0x1<<12) | ||
3137 | #define __TSTORM_ISCSI_AG_CONTEXT_RST_SENT_CF_EN_SHIFT 12 | ||
3138 | #define __TSTORM_ISCSI_AG_CONTEXT_WAKEUP_CALL_CF_EN (0x1<<13) | ||
3139 | #define __TSTORM_ISCSI_AG_CONTEXT_WAKEUP_CALL_CF_EN_SHIFT 13 | ||
3140 | #define TSTORM_ISCSI_AG_CONTEXT_AUX6_CF_EN (0x1<<14) | ||
3141 | #define TSTORM_ISCSI_AG_CONTEXT_AUX6_CF_EN_SHIFT 14 | ||
3142 | #define TSTORM_ISCSI_AG_CONTEXT_AUX7_CF_EN (0x1<<15) | ||
3143 | #define TSTORM_ISCSI_AG_CONTEXT_AUX7_CF_EN_SHIFT 15 | ||
3144 | #elif defined(__LITTLE_ENDIAN) | ||
3145 | u16 agg_vars2; | ||
3146 | #define __TSTORM_ISCSI_AG_CONTEXT_MSL_TIMER_SET_FLAG (0x1<<0) | ||
3147 | #define __TSTORM_ISCSI_AG_CONTEXT_MSL_TIMER_SET_FLAG_SHIFT 0 | ||
3148 | #define __TSTORM_ISCSI_AG_CONTEXT_FIN_SENT_FIRST_FLAG (0x1<<1) | ||
3149 | #define __TSTORM_ISCSI_AG_CONTEXT_FIN_SENT_FIRST_FLAG_SHIFT 1 | ||
3150 | #define __TSTORM_ISCSI_AG_CONTEXT_RST_SENT_CF (0x3<<2) | ||
3151 | #define __TSTORM_ISCSI_AG_CONTEXT_RST_SENT_CF_SHIFT 2 | ||
3152 | #define __TSTORM_ISCSI_AG_CONTEXT_WAKEUP_CALL_CF (0x3<<4) | ||
3153 | #define __TSTORM_ISCSI_AG_CONTEXT_WAKEUP_CALL_CF_SHIFT 4 | ||
3154 | #define __TSTORM_ISCSI_AG_CONTEXT_AUX6_CF (0x3<<6) | ||
3155 | #define __TSTORM_ISCSI_AG_CONTEXT_AUX6_CF_SHIFT 6 | ||
3156 | #define __TSTORM_ISCSI_AG_CONTEXT_AUX7_CF (0x3<<8) | ||
3157 | #define __TSTORM_ISCSI_AG_CONTEXT_AUX7_CF_SHIFT 8 | ||
3158 | #define __TSTORM_ISCSI_AG_CONTEXT_AUX7_FLAG (0x1<<10) | ||
3159 | #define __TSTORM_ISCSI_AG_CONTEXT_AUX7_FLAG_SHIFT 10 | ||
3160 | #define __TSTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF_EN (0x1<<11) | ||
3161 | #define __TSTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF_EN_SHIFT 11 | ||
3162 | #define __TSTORM_ISCSI_AG_CONTEXT_RST_SENT_CF_EN (0x1<<12) | ||
3163 | #define __TSTORM_ISCSI_AG_CONTEXT_RST_SENT_CF_EN_SHIFT 12 | ||
3164 | #define __TSTORM_ISCSI_AG_CONTEXT_WAKEUP_CALL_CF_EN (0x1<<13) | ||
3165 | #define __TSTORM_ISCSI_AG_CONTEXT_WAKEUP_CALL_CF_EN_SHIFT 13 | ||
3166 | #define TSTORM_ISCSI_AG_CONTEXT_AUX6_CF_EN (0x1<<14) | ||
3167 | #define TSTORM_ISCSI_AG_CONTEXT_AUX6_CF_EN_SHIFT 14 | ||
3168 | #define TSTORM_ISCSI_AG_CONTEXT_AUX7_CF_EN (0x1<<15) | ||
3169 | #define TSTORM_ISCSI_AG_CONTEXT_AUX7_CF_EN_SHIFT 15 | ||
3170 | u16 __agg_val4; | ||
3171 | #endif | ||
3172 | struct tstorm_tcp_tcp_ag_context_section tcp; | ||
3173 | }; | ||
3174 | |||
3175 | /* | ||
3176 | * The iscsi aggregative context of Ustorm | ||
3177 | */ | ||
3178 | struct ustorm_iscsi_ag_context { | ||
3179 | #if defined(__BIG_ENDIAN) | ||
3180 | u8 __aux_counter_flags; | ||
3181 | u8 agg_vars2; | ||
3182 | #define USTORM_ISCSI_AG_CONTEXT_TX_CF (0x3<<0) | ||
3183 | #define USTORM_ISCSI_AG_CONTEXT_TX_CF_SHIFT 0 | ||
3184 | #define __USTORM_ISCSI_AG_CONTEXT_TIMER_CF (0x3<<2) | ||
3185 | #define __USTORM_ISCSI_AG_CONTEXT_TIMER_CF_SHIFT 2 | ||
3186 | #define USTORM_ISCSI_AG_CONTEXT_AGG_MISC4_RULE (0x7<<4) | ||
3187 | #define USTORM_ISCSI_AG_CONTEXT_AGG_MISC4_RULE_SHIFT 4 | ||
3188 | #define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_MASK (0x1<<7) | ||
3189 | #define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_MASK_SHIFT 7 | ||
3190 | u8 agg_vars1; | ||
3191 | #define __USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0) | ||
3192 | #define __USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0 | ||
3193 | #define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1) | ||
3194 | #define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1 | ||
3195 | #define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2) | ||
3196 | #define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2 | ||
3197 | #define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3) | ||
3198 | #define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3 | ||
3199 | #define USTORM_ISCSI_AG_CONTEXT_INV_CF (0x3<<4) | ||
3200 | #define USTORM_ISCSI_AG_CONTEXT_INV_CF_SHIFT 4 | ||
3201 | #define USTORM_ISCSI_AG_CONTEXT_COMPLETION_CF (0x3<<6) | ||
3202 | #define USTORM_ISCSI_AG_CONTEXT_COMPLETION_CF_SHIFT 6 | ||
3203 | u8 state; | ||
3204 | #elif defined(__LITTLE_ENDIAN) | ||
3205 | u8 state; | ||
3206 | u8 agg_vars1; | ||
3207 | #define __USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0) | ||
3208 | #define __USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0 | ||
3209 | #define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1) | ||
3210 | #define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1 | ||
3211 | #define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2) | ||
3212 | #define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2 | ||
3213 | #define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3) | ||
3214 | #define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3 | ||
3215 | #define USTORM_ISCSI_AG_CONTEXT_INV_CF (0x3<<4) | ||
3216 | #define USTORM_ISCSI_AG_CONTEXT_INV_CF_SHIFT 4 | ||
3217 | #define USTORM_ISCSI_AG_CONTEXT_COMPLETION_CF (0x3<<6) | ||
3218 | #define USTORM_ISCSI_AG_CONTEXT_COMPLETION_CF_SHIFT 6 | ||
3219 | u8 agg_vars2; | ||
3220 | #define USTORM_ISCSI_AG_CONTEXT_TX_CF (0x3<<0) | ||
3221 | #define USTORM_ISCSI_AG_CONTEXT_TX_CF_SHIFT 0 | ||
3222 | #define __USTORM_ISCSI_AG_CONTEXT_TIMER_CF (0x3<<2) | ||
3223 | #define __USTORM_ISCSI_AG_CONTEXT_TIMER_CF_SHIFT 2 | ||
3224 | #define USTORM_ISCSI_AG_CONTEXT_AGG_MISC4_RULE (0x7<<4) | ||
3225 | #define USTORM_ISCSI_AG_CONTEXT_AGG_MISC4_RULE_SHIFT 4 | ||
3226 | #define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_MASK (0x1<<7) | ||
3227 | #define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_MASK_SHIFT 7 | ||
3228 | u8 __aux_counter_flags; | ||
3229 | #endif | ||
3230 | #if defined(__BIG_ENDIAN) | ||
3231 | u8 cdu_usage; | ||
3232 | u8 agg_misc2; | ||
3233 | u16 __cq_local_comp_itt_val; | ||
3234 | #elif defined(__LITTLE_ENDIAN) | ||
3235 | u16 __cq_local_comp_itt_val; | ||
3236 | u8 agg_misc2; | ||
3237 | u8 cdu_usage; | ||
3238 | #endif | ||
3239 | u32 agg_misc4; | ||
3240 | #if defined(__BIG_ENDIAN) | ||
3241 | u8 agg_val3_th; | ||
3242 | u8 agg_val3; | ||
3243 | u16 agg_misc3; | ||
3244 | #elif defined(__LITTLE_ENDIAN) | ||
3245 | u16 agg_misc3; | ||
3246 | u8 agg_val3; | ||
3247 | u8 agg_val3_th; | ||
3248 | #endif | ||
3249 | u32 agg_val1; | ||
3250 | u32 agg_misc4_th; | ||
3251 | #if defined(__BIG_ENDIAN) | ||
3252 | u16 agg_val2_th; | ||
3253 | u16 agg_val2; | ||
3254 | #elif defined(__LITTLE_ENDIAN) | ||
3255 | u16 agg_val2; | ||
3256 | u16 agg_val2_th; | ||
3257 | #endif | ||
3258 | #if defined(__BIG_ENDIAN) | ||
3259 | u16 __reserved2; | ||
3260 | u8 decision_rules; | ||
3261 | #define USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_RULE (0x7<<0) | ||
3262 | #define USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_RULE_SHIFT 0 | ||
3263 | #define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL3_RULE (0x7<<3) | ||
3264 | #define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL3_RULE_SHIFT 3 | ||
3265 | #define USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_ARM_N_FLAG (0x1<<6) | ||
3266 | #define USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_ARM_N_FLAG_SHIFT 6 | ||
3267 | #define __USTORM_ISCSI_AG_CONTEXT_RESERVED1 (0x1<<7) | ||
3268 | #define __USTORM_ISCSI_AG_CONTEXT_RESERVED1_SHIFT 7 | ||
3269 | u8 decision_rule_enable_bits; | ||
3270 | #define USTORM_ISCSI_AG_CONTEXT_INV_CF_EN (0x1<<0) | ||
3271 | #define USTORM_ISCSI_AG_CONTEXT_INV_CF_EN_SHIFT 0 | ||
3272 | #define USTORM_ISCSI_AG_CONTEXT_COMPLETION_CF_EN (0x1<<1) | ||
3273 | #define USTORM_ISCSI_AG_CONTEXT_COMPLETION_CF_EN_SHIFT 1 | ||
3274 | #define USTORM_ISCSI_AG_CONTEXT_TX_CF_EN (0x1<<2) | ||
3275 | #define USTORM_ISCSI_AG_CONTEXT_TX_CF_EN_SHIFT 2 | ||
3276 | #define __USTORM_ISCSI_AG_CONTEXT_TIMER_CF_EN (0x1<<3) | ||
3277 | #define __USTORM_ISCSI_AG_CONTEXT_TIMER_CF_EN_SHIFT 3 | ||
3278 | #define __USTORM_ISCSI_AG_CONTEXT_CQ_LOCAL_COMP_CF_EN (0x1<<4) | ||
3279 | #define __USTORM_ISCSI_AG_CONTEXT_CQ_LOCAL_COMP_CF_EN_SHIFT 4 | ||
3280 | #define __USTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF_EN (0x1<<5) | ||
3281 | #define __USTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF_EN_SHIFT 5 | ||
3282 | #define __USTORM_ISCSI_AG_CONTEXT_AUX3_CF_EN (0x1<<6) | ||
3283 | #define __USTORM_ISCSI_AG_CONTEXT_AUX3_CF_EN_SHIFT 6 | ||
3284 | #define __USTORM_ISCSI_AG_CONTEXT_DQ_CF_EN (0x1<<7) | ||
3285 | #define __USTORM_ISCSI_AG_CONTEXT_DQ_CF_EN_SHIFT 7 | ||
3286 | #elif defined(__LITTLE_ENDIAN) | ||
3287 | u8 decision_rule_enable_bits; | ||
3288 | #define USTORM_ISCSI_AG_CONTEXT_INV_CF_EN (0x1<<0) | ||
3289 | #define USTORM_ISCSI_AG_CONTEXT_INV_CF_EN_SHIFT 0 | ||
3290 | #define USTORM_ISCSI_AG_CONTEXT_COMPLETION_CF_EN (0x1<<1) | ||
3291 | #define USTORM_ISCSI_AG_CONTEXT_COMPLETION_CF_EN_SHIFT 1 | ||
3292 | #define USTORM_ISCSI_AG_CONTEXT_TX_CF_EN (0x1<<2) | ||
3293 | #define USTORM_ISCSI_AG_CONTEXT_TX_CF_EN_SHIFT 2 | ||
3294 | #define __USTORM_ISCSI_AG_CONTEXT_TIMER_CF_EN (0x1<<3) | ||
3295 | #define __USTORM_ISCSI_AG_CONTEXT_TIMER_CF_EN_SHIFT 3 | ||
3296 | #define __USTORM_ISCSI_AG_CONTEXT_CQ_LOCAL_COMP_CF_EN (0x1<<4) | ||
3297 | #define __USTORM_ISCSI_AG_CONTEXT_CQ_LOCAL_COMP_CF_EN_SHIFT 4 | ||
3298 | #define __USTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF_EN (0x1<<5) | ||
3299 | #define __USTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF_EN_SHIFT 5 | ||
3300 | #define __USTORM_ISCSI_AG_CONTEXT_AUX3_CF_EN (0x1<<6) | ||
3301 | #define __USTORM_ISCSI_AG_CONTEXT_AUX3_CF_EN_SHIFT 6 | ||
3302 | #define __USTORM_ISCSI_AG_CONTEXT_DQ_CF_EN (0x1<<7) | ||
3303 | #define __USTORM_ISCSI_AG_CONTEXT_DQ_CF_EN_SHIFT 7 | ||
3304 | u8 decision_rules; | ||
3305 | #define USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_RULE (0x7<<0) | ||
3306 | #define USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_RULE_SHIFT 0 | ||
3307 | #define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL3_RULE (0x7<<3) | ||
3308 | #define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL3_RULE_SHIFT 3 | ||
3309 | #define USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_ARM_N_FLAG (0x1<<6) | ||
3310 | #define USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_ARM_N_FLAG_SHIFT 6 | ||
3311 | #define __USTORM_ISCSI_AG_CONTEXT_RESERVED1 (0x1<<7) | ||
3312 | #define __USTORM_ISCSI_AG_CONTEXT_RESERVED1_SHIFT 7 | ||
3313 | u16 __reserved2; | ||
3314 | #endif | ||
3315 | }; | ||
3316 | |||
3317 | /* | ||
3318 | * Ethernet context section, shared in TOE, RDMA and ISCSI | 4272 | * Ethernet context section, shared in TOE, RDMA and ISCSI |
3319 | */ | 4273 | */ |
3320 | struct xstorm_eth_context_section { | 4274 | struct xstorm_eth_context_section { |
@@ -3509,7 +4463,27 @@ struct xstorm_tcp_context_section { | |||
3509 | u16 window_scaling_factor; | 4463 | u16 window_scaling_factor; |
3510 | u16 pseudo_csum; | 4464 | u16 pseudo_csum; |
3511 | #endif | 4465 | #endif |
3512 | u32 reserved2; | 4466 | #if defined(__BIG_ENDIAN) |
4467 | u16 reserved2; | ||
4468 | u8 statistics_counter_id; | ||
4469 | u8 statistics_params; | ||
4470 | #define XSTORM_TCP_CONTEXT_SECTION_UPDATE_L2_STATSTICS (0x1<<0) | ||
4471 | #define XSTORM_TCP_CONTEXT_SECTION_UPDATE_L2_STATSTICS_SHIFT 0 | ||
4472 | #define XSTORM_TCP_CONTEXT_SECTION_UPDATE_L4_STATSTICS (0x1<<1) | ||
4473 | #define XSTORM_TCP_CONTEXT_SECTION_UPDATE_L4_STATSTICS_SHIFT 1 | ||
4474 | #define XSTORM_TCP_CONTEXT_SECTION_RESERVED (0x3F<<2) | ||
4475 | #define XSTORM_TCP_CONTEXT_SECTION_RESERVED_SHIFT 2 | ||
4476 | #elif defined(__LITTLE_ENDIAN) | ||
4477 | u8 statistics_params; | ||
4478 | #define XSTORM_TCP_CONTEXT_SECTION_UPDATE_L2_STATSTICS (0x1<<0) | ||
4479 | #define XSTORM_TCP_CONTEXT_SECTION_UPDATE_L2_STATSTICS_SHIFT 0 | ||
4480 | #define XSTORM_TCP_CONTEXT_SECTION_UPDATE_L4_STATSTICS (0x1<<1) | ||
4481 | #define XSTORM_TCP_CONTEXT_SECTION_UPDATE_L4_STATSTICS_SHIFT 1 | ||
4482 | #define XSTORM_TCP_CONTEXT_SECTION_RESERVED (0x3F<<2) | ||
4483 | #define XSTORM_TCP_CONTEXT_SECTION_RESERVED_SHIFT 2 | ||
4484 | u8 statistics_counter_id; | ||
4485 | u16 reserved2; | ||
4486 | #endif | ||
3513 | u32 ts_time_diff; | 4487 | u32 ts_time_diff; |
3514 | u32 __next_timer_expir; | 4488 | u32 __next_timer_expir; |
3515 | }; | 4489 | }; |
@@ -3522,29 +4496,31 @@ struct xstorm_common_context_section { | |||
3522 | union xstorm_ip_context_section_types ip_union; | 4496 | union xstorm_ip_context_section_types ip_union; |
3523 | struct xstorm_tcp_context_section tcp; | 4497 | struct xstorm_tcp_context_section tcp; |
3524 | #if defined(__BIG_ENDIAN) | 4498 | #if defined(__BIG_ENDIAN) |
3525 | u16 reserved; | 4499 | u8 __dcb_val; |
3526 | u8 statistics_params; | 4500 | u8 flags; |
3527 | #define XSTORM_COMMON_CONTEXT_SECTION_UPDATE_L2_STATSTICS (0x1<<0) | 4501 | #define XSTORM_COMMON_CONTEXT_SECTION_PHYSQ_INITIALIZED (0x1<<0) |
3528 | #define XSTORM_COMMON_CONTEXT_SECTION_UPDATE_L2_STATSTICS_SHIFT 0 | 4502 | #define XSTORM_COMMON_CONTEXT_SECTION_PHYSQ_INITIALIZED_SHIFT 0 |
3529 | #define XSTORM_COMMON_CONTEXT_SECTION_UPDATE_L4_STATSTICS (0x1<<1) | 4503 | #define XSTORM_COMMON_CONTEXT_SECTION_PBF_PORT (0x7<<1) |
3530 | #define XSTORM_COMMON_CONTEXT_SECTION_UPDATE_L4_STATSTICS_SHIFT 1 | 4504 | #define XSTORM_COMMON_CONTEXT_SECTION_PBF_PORT_SHIFT 1 |
3531 | #define XSTORM_COMMON_CONTEXT_SECTION_STATISTICS_COUNTER_ID (0x1F<<2) | 4505 | #define XSTORM_COMMON_CONTEXT_SECTION_VLAN_MODE (0x1<<4) |
3532 | #define XSTORM_COMMON_CONTEXT_SECTION_STATISTICS_COUNTER_ID_SHIFT 2 | 4506 | #define XSTORM_COMMON_CONTEXT_SECTION_VLAN_MODE_SHIFT 4 |
3533 | #define XSTORM_COMMON_CONTEXT_SECTION_DCB_EXISTS (0x1<<7) | 4507 | #define XSTORM_COMMON_CONTEXT_SECTION_ORIGINAL_PRIORITY (0x7<<5) |
3534 | #define XSTORM_COMMON_CONTEXT_SECTION_DCB_EXISTS_SHIFT 7 | 4508 | #define XSTORM_COMMON_CONTEXT_SECTION_ORIGINAL_PRIORITY_SHIFT 5 |
4509 | u8 reserved; | ||
3535 | u8 ip_version_1b; | 4510 | u8 ip_version_1b; |
3536 | #elif defined(__LITTLE_ENDIAN) | 4511 | #elif defined(__LITTLE_ENDIAN) |
3537 | u8 ip_version_1b; | 4512 | u8 ip_version_1b; |
3538 | u8 statistics_params; | 4513 | u8 reserved; |
3539 | #define XSTORM_COMMON_CONTEXT_SECTION_UPDATE_L2_STATSTICS (0x1<<0) | 4514 | u8 flags; |
3540 | #define XSTORM_COMMON_CONTEXT_SECTION_UPDATE_L2_STATSTICS_SHIFT 0 | 4515 | #define XSTORM_COMMON_CONTEXT_SECTION_PHYSQ_INITIALIZED (0x1<<0) |
3541 | #define XSTORM_COMMON_CONTEXT_SECTION_UPDATE_L4_STATSTICS (0x1<<1) | 4516 | #define XSTORM_COMMON_CONTEXT_SECTION_PHYSQ_INITIALIZED_SHIFT 0 |
3542 | #define XSTORM_COMMON_CONTEXT_SECTION_UPDATE_L4_STATSTICS_SHIFT 1 | 4517 | #define XSTORM_COMMON_CONTEXT_SECTION_PBF_PORT (0x7<<1) |
3543 | #define XSTORM_COMMON_CONTEXT_SECTION_STATISTICS_COUNTER_ID (0x1F<<2) | 4518 | #define XSTORM_COMMON_CONTEXT_SECTION_PBF_PORT_SHIFT 1 |
3544 | #define XSTORM_COMMON_CONTEXT_SECTION_STATISTICS_COUNTER_ID_SHIFT 2 | 4519 | #define XSTORM_COMMON_CONTEXT_SECTION_VLAN_MODE (0x1<<4) |
3545 | #define XSTORM_COMMON_CONTEXT_SECTION_DCB_EXISTS (0x1<<7) | 4520 | #define XSTORM_COMMON_CONTEXT_SECTION_VLAN_MODE_SHIFT 4 |
3546 | #define XSTORM_COMMON_CONTEXT_SECTION_DCB_EXISTS_SHIFT 7 | 4521 | #define XSTORM_COMMON_CONTEXT_SECTION_ORIGINAL_PRIORITY (0x7<<5) |
3547 | u16 reserved; | 4522 | #define XSTORM_COMMON_CONTEXT_SECTION_ORIGINAL_PRIORITY_SHIFT 5 |
4523 | u8 __dcb_val; | ||
3548 | #endif | 4524 | #endif |
3549 | }; | 4525 | }; |
3550 | 4526 | ||
@@ -3682,99 +4658,6 @@ struct xstorm_iscsi_st_context { | |||
3682 | }; | 4658 | }; |
3683 | 4659 | ||
3684 | /* | 4660 | /* |
3685 | * CQ DB CQ producer and pending completion counter | ||
3686 | */ | ||
3687 | struct iscsi_cq_db_prod_pnd_cmpltn_cnt { | ||
3688 | #if defined(__BIG_ENDIAN) | ||
3689 | u16 cntr; | ||
3690 | u16 prod; | ||
3691 | #elif defined(__LITTLE_ENDIAN) | ||
3692 | u16 prod; | ||
3693 | u16 cntr; | ||
3694 | #endif | ||
3695 | }; | ||
3696 | |||
3697 | /* | ||
3698 | * CQ DB pending completion ITT array | ||
3699 | */ | ||
3700 | struct iscsi_cq_db_prod_pnd_cmpltn_cnt_arr { | ||
3701 | struct iscsi_cq_db_prod_pnd_cmpltn_cnt prod_pend_comp[8]; | ||
3702 | }; | ||
3703 | |||
3704 | /* | ||
3705 | * Cstorm CQ sequence to notify array, updated by driver | ||
3706 | */ | ||
3707 | struct iscsi_cq_db_sqn_2_notify_arr { | ||
3708 | u16 sqn[8]; | ||
3709 | }; | ||
3710 | |||
3711 | /* | ||
3712 | * Cstorm iSCSI Storm Context | ||
3713 | */ | ||
3714 | struct cstorm_iscsi_st_context { | ||
3715 | struct iscsi_cq_db_prod_pnd_cmpltn_cnt_arr cq_c_prod_pend_comp_ctr_arr; | ||
3716 | struct iscsi_cq_db_sqn_2_notify_arr cq_c_prod_sqn_arr; | ||
3717 | struct iscsi_cq_db_sqn_2_notify_arr cq_c_sqn_2_notify_arr; | ||
3718 | struct regpair hq_pbl_base; | ||
3719 | struct regpair hq_curr_pbe; | ||
3720 | struct regpair task_pbl_base; | ||
3721 | struct regpair cq_db_base; | ||
3722 | #if defined(__BIG_ENDIAN) | ||
3723 | u16 hq_bd_itt; | ||
3724 | u16 iscsi_conn_id; | ||
3725 | #elif defined(__LITTLE_ENDIAN) | ||
3726 | u16 iscsi_conn_id; | ||
3727 | u16 hq_bd_itt; | ||
3728 | #endif | ||
3729 | u32 hq_bd_data_segment_len; | ||
3730 | u32 hq_bd_buffer_offset; | ||
3731 | #if defined(__BIG_ENDIAN) | ||
3732 | u8 timer_entry_idx; | ||
3733 | u8 cq_proc_en_bit_map; | ||
3734 | u8 cq_pend_comp_itt_valid_bit_map; | ||
3735 | u8 hq_bd_opcode; | ||
3736 | #elif defined(__LITTLE_ENDIAN) | ||
3737 | u8 hq_bd_opcode; | ||
3738 | u8 cq_pend_comp_itt_valid_bit_map; | ||
3739 | u8 cq_proc_en_bit_map; | ||
3740 | u8 timer_entry_idx; | ||
3741 | #endif | ||
3742 | u32 hq_tcp_seq; | ||
3743 | #if defined(__BIG_ENDIAN) | ||
3744 | u16 flags; | ||
3745 | #define CSTORM_ISCSI_ST_CONTEXT_DATA_DIGEST_EN (0x1<<0) | ||
3746 | #define CSTORM_ISCSI_ST_CONTEXT_DATA_DIGEST_EN_SHIFT 0 | ||
3747 | #define CSTORM_ISCSI_ST_CONTEXT_HDR_DIGEST_EN (0x1<<1) | ||
3748 | #define CSTORM_ISCSI_ST_CONTEXT_HDR_DIGEST_EN_SHIFT 1 | ||
3749 | #define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_CTXT_VALID (0x1<<2) | ||
3750 | #define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_CTXT_VALID_SHIFT 2 | ||
3751 | #define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_LCL_CMPLN_FLG (0x1<<3) | ||
3752 | #define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_LCL_CMPLN_FLG_SHIFT 3 | ||
3753 | #define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_WRITE_TASK (0x1<<4) | ||
3754 | #define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_WRITE_TASK_SHIFT 4 | ||
3755 | #define CSTORM_ISCSI_ST_CONTEXT_CTRL_FLAGS_RSRV (0x7FF<<5) | ||
3756 | #define CSTORM_ISCSI_ST_CONTEXT_CTRL_FLAGS_RSRV_SHIFT 5 | ||
3757 | u16 hq_cons; | ||
3758 | #elif defined(__LITTLE_ENDIAN) | ||
3759 | u16 hq_cons; | ||
3760 | u16 flags; | ||
3761 | #define CSTORM_ISCSI_ST_CONTEXT_DATA_DIGEST_EN (0x1<<0) | ||
3762 | #define CSTORM_ISCSI_ST_CONTEXT_DATA_DIGEST_EN_SHIFT 0 | ||
3763 | #define CSTORM_ISCSI_ST_CONTEXT_HDR_DIGEST_EN (0x1<<1) | ||
3764 | #define CSTORM_ISCSI_ST_CONTEXT_HDR_DIGEST_EN_SHIFT 1 | ||
3765 | #define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_CTXT_VALID (0x1<<2) | ||
3766 | #define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_CTXT_VALID_SHIFT 2 | ||
3767 | #define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_LCL_CMPLN_FLG (0x1<<3) | ||
3768 | #define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_LCL_CMPLN_FLG_SHIFT 3 | ||
3769 | #define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_WRITE_TASK (0x1<<4) | ||
3770 | #define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_WRITE_TASK_SHIFT 4 | ||
3771 | #define CSTORM_ISCSI_ST_CONTEXT_CTRL_FLAGS_RSRV (0x7FF<<5) | ||
3772 | #define CSTORM_ISCSI_ST_CONTEXT_CTRL_FLAGS_RSRV_SHIFT 5 | ||
3773 | #endif | ||
3774 | struct regpair rsrv1; | ||
3775 | }; | ||
3776 | |||
3777 | /* | ||
3778 | * Iscsi connection context | 4661 | * Iscsi connection context |
3779 | */ | 4662 | */ |
3780 | struct iscsi_context { | 4663 | struct iscsi_context { |
@@ -3791,583 +4674,388 @@ struct iscsi_context { | |||
3791 | struct cstorm_iscsi_st_context cstorm_st_context; | 4674 | struct cstorm_iscsi_st_context cstorm_st_context; |
3792 | }; | 4675 | }; |
3793 | 4676 | ||
3794 | /* | ||
3795 | * FCoE KCQ CQE parameters | ||
3796 | */ | ||
3797 | union fcoe_kcqe_params { | ||
3798 | u32 reserved0[4]; | ||
3799 | }; | ||
3800 | 4677 | ||
3801 | /* | 4678 | /* |
3802 | * FCoE KCQ CQE | 4679 | * PDU header of an iSCSI DATA-OUT |
3803 | */ | 4680 | */ |
3804 | struct fcoe_kcqe { | 4681 | struct iscsi_data_pdu_hdr_little_endian { |
3805 | u32 fcoe_conn_id; | ||
3806 | u32 completion_status; | ||
3807 | u32 fcoe_conn_context_id; | ||
3808 | union fcoe_kcqe_params params; | ||
3809 | #if defined(__BIG_ENDIAN) | 4682 | #if defined(__BIG_ENDIAN) |
3810 | u8 flags; | 4683 | u8 opcode; |
3811 | #define FCOE_KCQE_RESERVED0 (0x7<<0) | 4684 | u8 op_attr; |
3812 | #define FCOE_KCQE_RESERVED0_SHIFT 0 | 4685 | #define ISCSI_DATA_PDU_HDR_LITTLE_ENDIAN_RSRV1 (0x7F<<0) |
3813 | #define FCOE_KCQE_RAMROD_COMPLETION (0x1<<3) | 4686 | #define ISCSI_DATA_PDU_HDR_LITTLE_ENDIAN_RSRV1_SHIFT 0 |
3814 | #define FCOE_KCQE_RAMROD_COMPLETION_SHIFT 3 | 4687 | #define ISCSI_DATA_PDU_HDR_LITTLE_ENDIAN_FINAL_FLAG (0x1<<7) |
3815 | #define FCOE_KCQE_LAYER_CODE (0x7<<4) | 4688 | #define ISCSI_DATA_PDU_HDR_LITTLE_ENDIAN_FINAL_FLAG_SHIFT 7 |
3816 | #define FCOE_KCQE_LAYER_CODE_SHIFT 4 | 4689 | u16 rsrv0; |
3817 | #define FCOE_KCQE_LINKED_WITH_NEXT (0x1<<7) | ||
3818 | #define FCOE_KCQE_LINKED_WITH_NEXT_SHIFT 7 | ||
3819 | u8 op_code; | ||
3820 | u16 qe_self_seq; | ||
3821 | #elif defined(__LITTLE_ENDIAN) | 4690 | #elif defined(__LITTLE_ENDIAN) |
3822 | u16 qe_self_seq; | 4691 | u16 rsrv0; |
3823 | u8 op_code; | 4692 | u8 op_attr; |
3824 | u8 flags; | 4693 | #define ISCSI_DATA_PDU_HDR_LITTLE_ENDIAN_RSRV1 (0x7F<<0) |
3825 | #define FCOE_KCQE_RESERVED0 (0x7<<0) | 4694 | #define ISCSI_DATA_PDU_HDR_LITTLE_ENDIAN_RSRV1_SHIFT 0 |
3826 | #define FCOE_KCQE_RESERVED0_SHIFT 0 | 4695 | #define ISCSI_DATA_PDU_HDR_LITTLE_ENDIAN_FINAL_FLAG (0x1<<7) |
3827 | #define FCOE_KCQE_RAMROD_COMPLETION (0x1<<3) | 4696 | #define ISCSI_DATA_PDU_HDR_LITTLE_ENDIAN_FINAL_FLAG_SHIFT 7 |
3828 | #define FCOE_KCQE_RAMROD_COMPLETION_SHIFT 3 | 4697 | u8 opcode; |
3829 | #define FCOE_KCQE_LAYER_CODE (0x7<<4) | ||
3830 | #define FCOE_KCQE_LAYER_CODE_SHIFT 4 | ||
3831 | #define FCOE_KCQE_LINKED_WITH_NEXT (0x1<<7) | ||
3832 | #define FCOE_KCQE_LINKED_WITH_NEXT_SHIFT 7 | ||
3833 | #endif | 4698 | #endif |
4699 | u32 data_fields; | ||
4700 | #define ISCSI_DATA_PDU_HDR_LITTLE_ENDIAN_DATA_SEGMENT_LENGTH (0xFFFFFF<<0) | ||
4701 | #define ISCSI_DATA_PDU_HDR_LITTLE_ENDIAN_DATA_SEGMENT_LENGTH_SHIFT 0 | ||
4702 | #define ISCSI_DATA_PDU_HDR_LITTLE_ENDIAN_TOTAL_AHS_LENGTH (0xFF<<24) | ||
4703 | #define ISCSI_DATA_PDU_HDR_LITTLE_ENDIAN_TOTAL_AHS_LENGTH_SHIFT 24 | ||
4704 | struct regpair lun; | ||
4705 | u32 itt; | ||
4706 | u32 ttt; | ||
4707 | u32 rsrv2; | ||
4708 | u32 exp_stat_sn; | ||
4709 | u32 rsrv3; | ||
4710 | u32 data_sn; | ||
4711 | u32 buffer_offset; | ||
4712 | u32 rsrv4; | ||
3834 | }; | 4713 | }; |
3835 | 4714 | ||
3836 | /* | ||
3837 | * FCoE KWQE header | ||
3838 | */ | ||
3839 | struct fcoe_kwqe_header { | ||
3840 | #if defined(__BIG_ENDIAN) | ||
3841 | u8 flags; | ||
3842 | #define FCOE_KWQE_HEADER_RESERVED0 (0xF<<0) | ||
3843 | #define FCOE_KWQE_HEADER_RESERVED0_SHIFT 0 | ||
3844 | #define FCOE_KWQE_HEADER_LAYER_CODE (0x7<<4) | ||
3845 | #define FCOE_KWQE_HEADER_LAYER_CODE_SHIFT 4 | ||
3846 | #define FCOE_KWQE_HEADER_RESERVED1 (0x1<<7) | ||
3847 | #define FCOE_KWQE_HEADER_RESERVED1_SHIFT 7 | ||
3848 | u8 op_code; | ||
3849 | #elif defined(__LITTLE_ENDIAN) | ||
3850 | u8 op_code; | ||
3851 | u8 flags; | ||
3852 | #define FCOE_KWQE_HEADER_RESERVED0 (0xF<<0) | ||
3853 | #define FCOE_KWQE_HEADER_RESERVED0_SHIFT 0 | ||
3854 | #define FCOE_KWQE_HEADER_LAYER_CODE (0x7<<4) | ||
3855 | #define FCOE_KWQE_HEADER_LAYER_CODE_SHIFT 4 | ||
3856 | #define FCOE_KWQE_HEADER_RESERVED1 (0x1<<7) | ||
3857 | #define FCOE_KWQE_HEADER_RESERVED1_SHIFT 7 | ||
3858 | #endif | ||
3859 | }; | ||
3860 | 4715 | ||
3861 | /* | 4716 | /* |
3862 | * FCoE firmware init request 1 | 4717 | * PDU header of an iSCSI login request |
3863 | */ | 4718 | */ |
3864 | struct fcoe_kwqe_init1 { | 4719 | struct iscsi_login_req_hdr_little_endian { |
3865 | #if defined(__BIG_ENDIAN) | 4720 | #if defined(__BIG_ENDIAN) |
3866 | struct fcoe_kwqe_header hdr; | 4721 | u8 opcode; |
3867 | u16 num_tasks; | 4722 | u8 op_attr; |
3868 | #elif defined(__LITTLE_ENDIAN) | 4723 | #define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_NSG (0x3<<0) |
3869 | u16 num_tasks; | 4724 | #define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_NSG_SHIFT 0 |
3870 | struct fcoe_kwqe_header hdr; | 4725 | #define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_CSG (0x3<<2) |
3871 | #endif | 4726 | #define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_CSG_SHIFT 2 |
3872 | u32 task_list_pbl_addr_lo; | 4727 | #define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_RSRV0 (0x3<<4) |
3873 | u32 task_list_pbl_addr_hi; | 4728 | #define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_RSRV0_SHIFT 4 |
3874 | u32 dummy_buffer_addr_lo; | 4729 | #define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_CONTINUE_FLG (0x1<<6) |
3875 | u32 dummy_buffer_addr_hi; | 4730 | #define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_CONTINUE_FLG_SHIFT 6 |
3876 | #if defined(__BIG_ENDIAN) | 4731 | #define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_TRANSIT (0x1<<7) |
3877 | u16 rq_num_wqes; | 4732 | #define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_TRANSIT_SHIFT 7 |
3878 | u16 sq_num_wqes; | 4733 | u8 version_max; |
3879 | #elif defined(__LITTLE_ENDIAN) | 4734 | u8 version_min; |
3880 | u16 sq_num_wqes; | 4735 | #elif defined(__LITTLE_ENDIAN) |
3881 | u16 rq_num_wqes; | 4736 | u8 version_min; |
3882 | #endif | 4737 | u8 version_max; |
3883 | #if defined(__BIG_ENDIAN) | 4738 | u8 op_attr; |
3884 | u16 cq_num_wqes; | 4739 | #define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_NSG (0x3<<0) |
3885 | u16 rq_buffer_log_size; | 4740 | #define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_NSG_SHIFT 0 |
3886 | #elif defined(__LITTLE_ENDIAN) | 4741 | #define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_CSG (0x3<<2) |
3887 | u16 rq_buffer_log_size; | 4742 | #define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_CSG_SHIFT 2 |
3888 | u16 cq_num_wqes; | 4743 | #define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_RSRV0 (0x3<<4) |
4744 | #define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_RSRV0_SHIFT 4 | ||
4745 | #define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_CONTINUE_FLG (0x1<<6) | ||
4746 | #define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_CONTINUE_FLG_SHIFT 6 | ||
4747 | #define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_TRANSIT (0x1<<7) | ||
4748 | #define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_TRANSIT_SHIFT 7 | ||
4749 | u8 opcode; | ||
3889 | #endif | 4750 | #endif |
4751 | u32 data_fields; | ||
4752 | #define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_DATA_SEGMENT_LENGTH (0xFFFFFF<<0) | ||
4753 | #define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_DATA_SEGMENT_LENGTH_SHIFT 0 | ||
4754 | #define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_TOTAL_AHS_LENGTH (0xFF<<24) | ||
4755 | #define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_TOTAL_AHS_LENGTH_SHIFT 24 | ||
4756 | u32 isid_lo; | ||
3890 | #if defined(__BIG_ENDIAN) | 4757 | #if defined(__BIG_ENDIAN) |
3891 | u8 flags; | 4758 | u16 isid_hi; |
3892 | #define FCOE_KWQE_INIT1_LOG_PAGE_SIZE (0xF<<0) | 4759 | u16 tsih; |
3893 | #define FCOE_KWQE_INIT1_LOG_PAGE_SIZE_SHIFT 0 | ||
3894 | #define FCOE_KWQE_INIT1_LOG_CACHED_PBES_PER_FUNC (0x7<<4) | ||
3895 | #define FCOE_KWQE_INIT1_LOG_CACHED_PBES_PER_FUNC_SHIFT 4 | ||
3896 | #define FCOE_KWQE_INIT1_RESERVED1 (0x1<<7) | ||
3897 | #define FCOE_KWQE_INIT1_RESERVED1_SHIFT 7 | ||
3898 | u8 num_sessions_log; | ||
3899 | u16 mtu; | ||
3900 | #elif defined(__LITTLE_ENDIAN) | 4760 | #elif defined(__LITTLE_ENDIAN) |
3901 | u16 mtu; | 4761 | u16 tsih; |
3902 | u8 num_sessions_log; | 4762 | u16 isid_hi; |
3903 | u8 flags; | ||
3904 | #define FCOE_KWQE_INIT1_LOG_PAGE_SIZE (0xF<<0) | ||
3905 | #define FCOE_KWQE_INIT1_LOG_PAGE_SIZE_SHIFT 0 | ||
3906 | #define FCOE_KWQE_INIT1_LOG_CACHED_PBES_PER_FUNC (0x7<<4) | ||
3907 | #define FCOE_KWQE_INIT1_LOG_CACHED_PBES_PER_FUNC_SHIFT 4 | ||
3908 | #define FCOE_KWQE_INIT1_RESERVED1 (0x1<<7) | ||
3909 | #define FCOE_KWQE_INIT1_RESERVED1_SHIFT 7 | ||
3910 | #endif | 4763 | #endif |
3911 | }; | 4764 | u32 itt; |
3912 | |||
3913 | /* | ||
3914 | * FCoE firmware init request 2 | ||
3915 | */ | ||
3916 | struct fcoe_kwqe_init2 { | ||
3917 | #if defined(__BIG_ENDIAN) | 4765 | #if defined(__BIG_ENDIAN) |
3918 | struct fcoe_kwqe_header hdr; | 4766 | u16 cid; |
3919 | u16 reserved0; | 4767 | u16 rsrv1; |
3920 | #elif defined(__LITTLE_ENDIAN) | 4768 | #elif defined(__LITTLE_ENDIAN) |
3921 | u16 reserved0; | 4769 | u16 rsrv1; |
3922 | struct fcoe_kwqe_header hdr; | 4770 | u16 cid; |
3923 | #endif | 4771 | #endif |
3924 | u32 hash_tbl_pbl_addr_lo; | 4772 | u32 cmd_sn; |
3925 | u32 hash_tbl_pbl_addr_hi; | 4773 | u32 exp_stat_sn; |
3926 | u32 t2_hash_tbl_addr_lo; | 4774 | u32 rsrv2[4]; |
3927 | u32 t2_hash_tbl_addr_hi; | ||
3928 | u32 t2_ptr_hash_tbl_addr_lo; | ||
3929 | u32 t2_ptr_hash_tbl_addr_hi; | ||
3930 | u32 free_list_count; | ||
3931 | }; | 4775 | }; |
3932 | 4776 | ||
3933 | /* | 4777 | /* |
3934 | * FCoE firmware init request 3 | 4778 | * PDU header of an iSCSI logout request |
3935 | */ | 4779 | */ |
3936 | struct fcoe_kwqe_init3 { | 4780 | struct iscsi_logout_req_hdr_little_endian { |
3937 | #if defined(__BIG_ENDIAN) | 4781 | #if defined(__BIG_ENDIAN) |
3938 | struct fcoe_kwqe_header hdr; | 4782 | u8 opcode; |
3939 | u16 reserved0; | 4783 | u8 op_attr; |
4784 | #define ISCSI_LOGOUT_REQ_HDR_LITTLE_ENDIAN_REASON_CODE (0x7F<<0) | ||
4785 | #define ISCSI_LOGOUT_REQ_HDR_LITTLE_ENDIAN_REASON_CODE_SHIFT 0 | ||
4786 | #define ISCSI_LOGOUT_REQ_HDR_LITTLE_ENDIAN_RSRV1_1 (0x1<<7) | ||
4787 | #define ISCSI_LOGOUT_REQ_HDR_LITTLE_ENDIAN_RSRV1_1_SHIFT 7 | ||
4788 | u16 rsrv0; | ||
3940 | #elif defined(__LITTLE_ENDIAN) | 4789 | #elif defined(__LITTLE_ENDIAN) |
3941 | u16 reserved0; | 4790 | u16 rsrv0; |
3942 | struct fcoe_kwqe_header hdr; | 4791 | u8 op_attr; |
4792 | #define ISCSI_LOGOUT_REQ_HDR_LITTLE_ENDIAN_REASON_CODE (0x7F<<0) | ||
4793 | #define ISCSI_LOGOUT_REQ_HDR_LITTLE_ENDIAN_REASON_CODE_SHIFT 0 | ||
4794 | #define ISCSI_LOGOUT_REQ_HDR_LITTLE_ENDIAN_RSRV1_1 (0x1<<7) | ||
4795 | #define ISCSI_LOGOUT_REQ_HDR_LITTLE_ENDIAN_RSRV1_1_SHIFT 7 | ||
4796 | u8 opcode; | ||
3943 | #endif | 4797 | #endif |
3944 | u32 error_bit_map_lo; | 4798 | u32 data_fields; |
3945 | u32 error_bit_map_hi; | 4799 | #define ISCSI_LOGOUT_REQ_HDR_LITTLE_ENDIAN_DATA_SEGMENT_LENGTH (0xFFFFFF<<0) |
4800 | #define ISCSI_LOGOUT_REQ_HDR_LITTLE_ENDIAN_DATA_SEGMENT_LENGTH_SHIFT 0 | ||
4801 | #define ISCSI_LOGOUT_REQ_HDR_LITTLE_ENDIAN_TOTAL_AHS_LENGTH (0xFF<<24) | ||
4802 | #define ISCSI_LOGOUT_REQ_HDR_LITTLE_ENDIAN_TOTAL_AHS_LENGTH_SHIFT 24 | ||
4803 | u32 rsrv2[2]; | ||
4804 | u32 itt; | ||
3946 | #if defined(__BIG_ENDIAN) | 4805 | #if defined(__BIG_ENDIAN) |
3947 | u8 reserved21[3]; | 4806 | u16 cid; |
3948 | u8 cached_session_enable; | 4807 | u16 rsrv1; |
3949 | #elif defined(__LITTLE_ENDIAN) | 4808 | #elif defined(__LITTLE_ENDIAN) |
3950 | u8 cached_session_enable; | 4809 | u16 rsrv1; |
3951 | u8 reserved21[3]; | 4810 | u16 cid; |
3952 | #endif | 4811 | #endif |
3953 | u32 reserved2[4]; | 4812 | u32 cmd_sn; |
4813 | u32 exp_stat_sn; | ||
4814 | u32 rsrv3[4]; | ||
3954 | }; | 4815 | }; |
3955 | 4816 | ||
3956 | /* | 4817 | /* |
3957 | * FCoE connection offload request 1 | 4818 | * PDU header of an iSCSI TMF request |
3958 | */ | 4819 | */ |
3959 | struct fcoe_kwqe_conn_offload1 { | 4820 | struct iscsi_tmf_req_hdr_little_endian { |
3960 | #if defined(__BIG_ENDIAN) | ||
3961 | struct fcoe_kwqe_header hdr; | ||
3962 | u16 fcoe_conn_id; | ||
3963 | #elif defined(__LITTLE_ENDIAN) | ||
3964 | u16 fcoe_conn_id; | ||
3965 | struct fcoe_kwqe_header hdr; | ||
3966 | #endif | ||
3967 | u32 sq_addr_lo; | ||
3968 | u32 sq_addr_hi; | ||
3969 | u32 rq_pbl_addr_lo; | ||
3970 | u32 rq_pbl_addr_hi; | ||
3971 | u32 rq_first_pbe_addr_lo; | ||
3972 | u32 rq_first_pbe_addr_hi; | ||
3973 | #if defined(__BIG_ENDIAN) | 4821 | #if defined(__BIG_ENDIAN) |
3974 | u16 reserved0; | 4822 | u8 opcode; |
3975 | u16 rq_prod; | 4823 | u8 op_attr; |
4824 | #define ISCSI_TMF_REQ_HDR_LITTLE_ENDIAN_FUNCTION (0x7F<<0) | ||
4825 | #define ISCSI_TMF_REQ_HDR_LITTLE_ENDIAN_FUNCTION_SHIFT 0 | ||
4826 | #define ISCSI_TMF_REQ_HDR_LITTLE_ENDIAN_RSRV1_1 (0x1<<7) | ||
4827 | #define ISCSI_TMF_REQ_HDR_LITTLE_ENDIAN_RSRV1_1_SHIFT 7 | ||
4828 | u16 rsrv0; | ||
3976 | #elif defined(__LITTLE_ENDIAN) | 4829 | #elif defined(__LITTLE_ENDIAN) |
3977 | u16 rq_prod; | 4830 | u16 rsrv0; |
3978 | u16 reserved0; | 4831 | u8 op_attr; |
4832 | #define ISCSI_TMF_REQ_HDR_LITTLE_ENDIAN_FUNCTION (0x7F<<0) | ||
4833 | #define ISCSI_TMF_REQ_HDR_LITTLE_ENDIAN_FUNCTION_SHIFT 0 | ||
4834 | #define ISCSI_TMF_REQ_HDR_LITTLE_ENDIAN_RSRV1_1 (0x1<<7) | ||
4835 | #define ISCSI_TMF_REQ_HDR_LITTLE_ENDIAN_RSRV1_1_SHIFT 7 | ||
4836 | u8 opcode; | ||
3979 | #endif | 4837 | #endif |
4838 | u32 data_fields; | ||
4839 | #define ISCSI_TMF_REQ_HDR_LITTLE_ENDIAN_DATA_SEGMENT_LENGTH (0xFFFFFF<<0) | ||
4840 | #define ISCSI_TMF_REQ_HDR_LITTLE_ENDIAN_DATA_SEGMENT_LENGTH_SHIFT 0 | ||
4841 | #define ISCSI_TMF_REQ_HDR_LITTLE_ENDIAN_TOTAL_AHS_LENGTH (0xFF<<24) | ||
4842 | #define ISCSI_TMF_REQ_HDR_LITTLE_ENDIAN_TOTAL_AHS_LENGTH_SHIFT 24 | ||
4843 | struct regpair lun; | ||
4844 | u32 itt; | ||
4845 | u32 referenced_task_tag; | ||
4846 | u32 cmd_sn; | ||
4847 | u32 exp_stat_sn; | ||
4848 | u32 ref_cmd_sn; | ||
4849 | u32 exp_data_sn; | ||
4850 | u32 rsrv2[2]; | ||
3980 | }; | 4851 | }; |
3981 | 4852 | ||
3982 | /* | 4853 | /* |
3983 | * FCoE connection offload request 2 | 4854 | * PDU header of an iSCSI Text request |
3984 | */ | 4855 | */ |
3985 | struct fcoe_kwqe_conn_offload2 { | 4856 | struct iscsi_text_req_hdr_little_endian { |
3986 | #if defined(__BIG_ENDIAN) | 4857 | #if defined(__BIG_ENDIAN) |
3987 | struct fcoe_kwqe_header hdr; | 4858 | u8 opcode; |
3988 | u16 tx_max_fc_pay_len; | 4859 | u8 op_attr; |
4860 | #define ISCSI_TEXT_REQ_HDR_LITTLE_ENDIAN_RSRV1 (0x3F<<0) | ||
4861 | #define ISCSI_TEXT_REQ_HDR_LITTLE_ENDIAN_RSRV1_SHIFT 0 | ||
4862 | #define ISCSI_TEXT_REQ_HDR_LITTLE_ENDIAN_CONTINUE_FLG (0x1<<6) | ||
4863 | #define ISCSI_TEXT_REQ_HDR_LITTLE_ENDIAN_CONTINUE_FLG_SHIFT 6 | ||
4864 | #define ISCSI_TEXT_REQ_HDR_LITTLE_ENDIAN_FINAL (0x1<<7) | ||
4865 | #define ISCSI_TEXT_REQ_HDR_LITTLE_ENDIAN_FINAL_SHIFT 7 | ||
4866 | u16 rsrv0; | ||
3989 | #elif defined(__LITTLE_ENDIAN) | 4867 | #elif defined(__LITTLE_ENDIAN) |
3990 | u16 tx_max_fc_pay_len; | 4868 | u16 rsrv0; |
3991 | struct fcoe_kwqe_header hdr; | 4869 | u8 op_attr; |
4870 | #define ISCSI_TEXT_REQ_HDR_LITTLE_ENDIAN_RSRV1 (0x3F<<0) | ||
4871 | #define ISCSI_TEXT_REQ_HDR_LITTLE_ENDIAN_RSRV1_SHIFT 0 | ||
4872 | #define ISCSI_TEXT_REQ_HDR_LITTLE_ENDIAN_CONTINUE_FLG (0x1<<6) | ||
4873 | #define ISCSI_TEXT_REQ_HDR_LITTLE_ENDIAN_CONTINUE_FLG_SHIFT 6 | ||
4874 | #define ISCSI_TEXT_REQ_HDR_LITTLE_ENDIAN_FINAL (0x1<<7) | ||
4875 | #define ISCSI_TEXT_REQ_HDR_LITTLE_ENDIAN_FINAL_SHIFT 7 | ||
4876 | u8 opcode; | ||
3992 | #endif | 4877 | #endif |
3993 | u32 cq_addr_lo; | 4878 | u32 data_fields; |
3994 | u32 cq_addr_hi; | 4879 | #define ISCSI_TEXT_REQ_HDR_LITTLE_ENDIAN_DATA_SEGMENT_LENGTH (0xFFFFFF<<0) |
3995 | u32 xferq_addr_lo; | 4880 | #define ISCSI_TEXT_REQ_HDR_LITTLE_ENDIAN_DATA_SEGMENT_LENGTH_SHIFT 0 |
3996 | u32 xferq_addr_hi; | 4881 | #define ISCSI_TEXT_REQ_HDR_LITTLE_ENDIAN_TOTAL_AHS_LENGTH (0xFF<<24) |
3997 | u32 conn_db_addr_lo; | 4882 | #define ISCSI_TEXT_REQ_HDR_LITTLE_ENDIAN_TOTAL_AHS_LENGTH_SHIFT 24 |
3998 | u32 conn_db_addr_hi; | 4883 | struct regpair lun; |
3999 | u32 reserved1; | 4884 | u32 itt; |
4885 | u32 ttt; | ||
4886 | u32 cmd_sn; | ||
4887 | u32 exp_stat_sn; | ||
4888 | u32 rsrv3[4]; | ||
4000 | }; | 4889 | }; |
4001 | 4890 | ||
4002 | /* | 4891 | /* |
4003 | * FCoE connection offload request 3 | 4892 | * PDU header of an iSCSI Nop-Out |
4004 | */ | 4893 | */ |
4005 | struct fcoe_kwqe_conn_offload3 { | 4894 | struct iscsi_nop_out_hdr_little_endian { |
4006 | #if defined(__BIG_ENDIAN) | 4895 | #if defined(__BIG_ENDIAN) |
4007 | struct fcoe_kwqe_header hdr; | 4896 | u8 opcode; |
4008 | u16 vlan_tag; | 4897 | u8 op_attr; |
4009 | #define FCOE_KWQE_CONN_OFFLOAD3_VLAN_ID (0xFFF<<0) | 4898 | #define ISCSI_NOP_OUT_HDR_LITTLE_ENDIAN_RSRV1 (0x7F<<0) |
4010 | #define FCOE_KWQE_CONN_OFFLOAD3_VLAN_ID_SHIFT 0 | 4899 | #define ISCSI_NOP_OUT_HDR_LITTLE_ENDIAN_RSRV1_SHIFT 0 |
4011 | #define FCOE_KWQE_CONN_OFFLOAD3_CFI (0x1<<12) | 4900 | #define ISCSI_NOP_OUT_HDR_LITTLE_ENDIAN_RSRV2_1 (0x1<<7) |
4012 | #define FCOE_KWQE_CONN_OFFLOAD3_CFI_SHIFT 12 | 4901 | #define ISCSI_NOP_OUT_HDR_LITTLE_ENDIAN_RSRV2_1_SHIFT 7 |
4013 | #define FCOE_KWQE_CONN_OFFLOAD3_PRIORITY (0x7<<13) | 4902 | u16 rsrv0; |
4014 | #define FCOE_KWQE_CONN_OFFLOAD3_PRIORITY_SHIFT 13 | ||
4015 | #elif defined(__LITTLE_ENDIAN) | ||
4016 | u16 vlan_tag; | ||
4017 | #define FCOE_KWQE_CONN_OFFLOAD3_VLAN_ID (0xFFF<<0) | ||
4018 | #define FCOE_KWQE_CONN_OFFLOAD3_VLAN_ID_SHIFT 0 | ||
4019 | #define FCOE_KWQE_CONN_OFFLOAD3_CFI (0x1<<12) | ||
4020 | #define FCOE_KWQE_CONN_OFFLOAD3_CFI_SHIFT 12 | ||
4021 | #define FCOE_KWQE_CONN_OFFLOAD3_PRIORITY (0x7<<13) | ||
4022 | #define FCOE_KWQE_CONN_OFFLOAD3_PRIORITY_SHIFT 13 | ||
4023 | struct fcoe_kwqe_header hdr; | ||
4024 | #endif | ||
4025 | #if defined(__BIG_ENDIAN) | ||
4026 | u8 tx_max_conc_seqs_c3; | ||
4027 | u8 s_id[3]; | ||
4028 | #elif defined(__LITTLE_ENDIAN) | ||
4029 | u8 s_id[3]; | ||
4030 | u8 tx_max_conc_seqs_c3; | ||
4031 | #endif | ||
4032 | #if defined(__BIG_ENDIAN) | ||
4033 | u8 flags; | ||
4034 | #define FCOE_KWQE_CONN_OFFLOAD3_B_MUL_N_PORT_IDS (0x1<<0) | ||
4035 | #define FCOE_KWQE_CONN_OFFLOAD3_B_MUL_N_PORT_IDS_SHIFT 0 | ||
4036 | #define FCOE_KWQE_CONN_OFFLOAD3_B_E_D_TOV_RES (0x1<<1) | ||
4037 | #define FCOE_KWQE_CONN_OFFLOAD3_B_E_D_TOV_RES_SHIFT 1 | ||
4038 | #define FCOE_KWQE_CONN_OFFLOAD3_B_CONT_INCR_SEQ_CNT (0x1<<2) | ||
4039 | #define FCOE_KWQE_CONN_OFFLOAD3_B_CONT_INCR_SEQ_CNT_SHIFT 2 | ||
4040 | #define FCOE_KWQE_CONN_OFFLOAD3_B_CONF_REQ (0x1<<3) | ||
4041 | #define FCOE_KWQE_CONN_OFFLOAD3_B_CONF_REQ_SHIFT 3 | ||
4042 | #define FCOE_KWQE_CONN_OFFLOAD3_B_REC_VALID (0x1<<4) | ||
4043 | #define FCOE_KWQE_CONN_OFFLOAD3_B_REC_VALID_SHIFT 4 | ||
4044 | #define FCOE_KWQE_CONN_OFFLOAD3_B_C2_VALID (0x1<<5) | ||
4045 | #define FCOE_KWQE_CONN_OFFLOAD3_B_C2_VALID_SHIFT 5 | ||
4046 | #define FCOE_KWQE_CONN_OFFLOAD3_B_ACK_0 (0x1<<6) | ||
4047 | #define FCOE_KWQE_CONN_OFFLOAD3_B_ACK_0_SHIFT 6 | ||
4048 | #define FCOE_KWQE_CONN_OFFLOAD3_B_VLAN_FLAG (0x1<<7) | ||
4049 | #define FCOE_KWQE_CONN_OFFLOAD3_B_VLAN_FLAG_SHIFT 7 | ||
4050 | u8 d_id[3]; | ||
4051 | #elif defined(__LITTLE_ENDIAN) | ||
4052 | u8 d_id[3]; | ||
4053 | u8 flags; | ||
4054 | #define FCOE_KWQE_CONN_OFFLOAD3_B_MUL_N_PORT_IDS (0x1<<0) | ||
4055 | #define FCOE_KWQE_CONN_OFFLOAD3_B_MUL_N_PORT_IDS_SHIFT 0 | ||
4056 | #define FCOE_KWQE_CONN_OFFLOAD3_B_E_D_TOV_RES (0x1<<1) | ||
4057 | #define FCOE_KWQE_CONN_OFFLOAD3_B_E_D_TOV_RES_SHIFT 1 | ||
4058 | #define FCOE_KWQE_CONN_OFFLOAD3_B_CONT_INCR_SEQ_CNT (0x1<<2) | ||
4059 | #define FCOE_KWQE_CONN_OFFLOAD3_B_CONT_INCR_SEQ_CNT_SHIFT 2 | ||
4060 | #define FCOE_KWQE_CONN_OFFLOAD3_B_CONF_REQ (0x1<<3) | ||
4061 | #define FCOE_KWQE_CONN_OFFLOAD3_B_CONF_REQ_SHIFT 3 | ||
4062 | #define FCOE_KWQE_CONN_OFFLOAD3_B_REC_VALID (0x1<<4) | ||
4063 | #define FCOE_KWQE_CONN_OFFLOAD3_B_REC_VALID_SHIFT 4 | ||
4064 | #define FCOE_KWQE_CONN_OFFLOAD3_B_C2_VALID (0x1<<5) | ||
4065 | #define FCOE_KWQE_CONN_OFFLOAD3_B_C2_VALID_SHIFT 5 | ||
4066 | #define FCOE_KWQE_CONN_OFFLOAD3_B_ACK_0 (0x1<<6) | ||
4067 | #define FCOE_KWQE_CONN_OFFLOAD3_B_ACK_0_SHIFT 6 | ||
4068 | #define FCOE_KWQE_CONN_OFFLOAD3_B_VLAN_FLAG (0x1<<7) | ||
4069 | #define FCOE_KWQE_CONN_OFFLOAD3_B_VLAN_FLAG_SHIFT 7 | ||
4070 | #endif | ||
4071 | u32 reserved; | ||
4072 | u32 confq_first_pbe_addr_lo; | ||
4073 | u32 confq_first_pbe_addr_hi; | ||
4074 | #if defined(__BIG_ENDIAN) | ||
4075 | u16 rx_max_fc_pay_len; | ||
4076 | u16 tx_total_conc_seqs; | ||
4077 | #elif defined(__LITTLE_ENDIAN) | ||
4078 | u16 tx_total_conc_seqs; | ||
4079 | u16 rx_max_fc_pay_len; | ||
4080 | #endif | ||
4081 | #if defined(__BIG_ENDIAN) | ||
4082 | u8 rx_open_seqs_exch_c3; | ||
4083 | u8 rx_max_conc_seqs_c3; | ||
4084 | u16 rx_total_conc_seqs; | ||
4085 | #elif defined(__LITTLE_ENDIAN) | 4903 | #elif defined(__LITTLE_ENDIAN) |
4086 | u16 rx_total_conc_seqs; | 4904 | u16 rsrv0; |
4087 | u8 rx_max_conc_seqs_c3; | 4905 | u8 op_attr; |
4088 | u8 rx_open_seqs_exch_c3; | 4906 | #define ISCSI_NOP_OUT_HDR_LITTLE_ENDIAN_RSRV1 (0x7F<<0) |
4907 | #define ISCSI_NOP_OUT_HDR_LITTLE_ENDIAN_RSRV1_SHIFT 0 | ||
4908 | #define ISCSI_NOP_OUT_HDR_LITTLE_ENDIAN_RSRV2_1 (0x1<<7) | ||
4909 | #define ISCSI_NOP_OUT_HDR_LITTLE_ENDIAN_RSRV2_1_SHIFT 7 | ||
4910 | u8 opcode; | ||
4089 | #endif | 4911 | #endif |
4912 | u32 data_fields; | ||
4913 | #define ISCSI_NOP_OUT_HDR_LITTLE_ENDIAN_DATA_SEGMENT_LENGTH (0xFFFFFF<<0) | ||
4914 | #define ISCSI_NOP_OUT_HDR_LITTLE_ENDIAN_DATA_SEGMENT_LENGTH_SHIFT 0 | ||
4915 | #define ISCSI_NOP_OUT_HDR_LITTLE_ENDIAN_TOTAL_AHS_LENGTH (0xFF<<24) | ||
4916 | #define ISCSI_NOP_OUT_HDR_LITTLE_ENDIAN_TOTAL_AHS_LENGTH_SHIFT 24 | ||
4917 | struct regpair lun; | ||
4918 | u32 itt; | ||
4919 | u32 ttt; | ||
4920 | u32 cmd_sn; | ||
4921 | u32 exp_stat_sn; | ||
4922 | u32 rsrv3[4]; | ||
4090 | }; | 4923 | }; |
4091 | 4924 | ||
4092 | /* | 4925 | /* |
4093 | * FCoE connection offload request 4 | 4926 | * iscsi pdu headers in little endian form. |
4094 | */ | 4927 | */ |
4095 | struct fcoe_kwqe_conn_offload4 { | 4928 | union iscsi_pdu_headers_little_endian { |
4096 | #if defined(__BIG_ENDIAN) | 4929 | u32 fullHeaderSize[12]; |
4097 | struct fcoe_kwqe_header hdr; | 4930 | struct iscsi_cmd_pdu_hdr_little_endian command_pdu_hdr; |
4098 | u8 reserved2; | 4931 | struct iscsi_data_pdu_hdr_little_endian data_out_pdu_hdr; |
4099 | u8 e_d_tov_timer_val; | 4932 | struct iscsi_login_req_hdr_little_endian login_req_pdu_hdr; |
4100 | #elif defined(__LITTLE_ENDIAN) | 4933 | struct iscsi_logout_req_hdr_little_endian logout_req_pdu_hdr; |
4101 | u8 e_d_tov_timer_val; | 4934 | struct iscsi_tmf_req_hdr_little_endian tmf_req_pdu_hdr; |
4102 | u8 reserved2; | 4935 | struct iscsi_text_req_hdr_little_endian text_req_pdu_hdr; |
4103 | struct fcoe_kwqe_header hdr; | 4936 | struct iscsi_nop_out_hdr_little_endian nop_out_pdu_hdr; |
4104 | #endif | ||
4105 | u8 src_mac_addr_lo32[4]; | ||
4106 | #if defined(__BIG_ENDIAN) | ||
4107 | u8 dst_mac_addr_hi16[2]; | ||
4108 | u8 src_mac_addr_hi16[2]; | ||
4109 | #elif defined(__LITTLE_ENDIAN) | ||
4110 | u8 src_mac_addr_hi16[2]; | ||
4111 | u8 dst_mac_addr_hi16[2]; | ||
4112 | #endif | ||
4113 | u8 dst_mac_addr_lo32[4]; | ||
4114 | u32 lcq_addr_lo; | ||
4115 | u32 lcq_addr_hi; | ||
4116 | u32 confq_pbl_base_addr_lo; | ||
4117 | u32 confq_pbl_base_addr_hi; | ||
4118 | }; | 4937 | }; |
4119 | 4938 | ||
4120 | /* | 4939 | struct iscsi_hq_bd { |
4121 | * FCoE connection enable request | 4940 | union iscsi_pdu_headers_little_endian pdu_header; |
4122 | */ | ||
4123 | struct fcoe_kwqe_conn_enable_disable { | ||
4124 | #if defined(__BIG_ENDIAN) | ||
4125 | struct fcoe_kwqe_header hdr; | ||
4126 | u16 reserved0; | ||
4127 | #elif defined(__LITTLE_ENDIAN) | ||
4128 | u16 reserved0; | ||
4129 | struct fcoe_kwqe_header hdr; | ||
4130 | #endif | ||
4131 | u8 src_mac_addr_lo32[4]; | ||
4132 | #if defined(__BIG_ENDIAN) | ||
4133 | u16 vlan_tag; | ||
4134 | #define FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID (0xFFF<<0) | ||
4135 | #define FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID_SHIFT 0 | ||
4136 | #define FCOE_KWQE_CONN_ENABLE_DISABLE_CFI (0x1<<12) | ||
4137 | #define FCOE_KWQE_CONN_ENABLE_DISABLE_CFI_SHIFT 12 | ||
4138 | #define FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY (0x7<<13) | ||
4139 | #define FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY_SHIFT 13 | ||
4140 | u8 src_mac_addr_hi16[2]; | ||
4141 | #elif defined(__LITTLE_ENDIAN) | ||
4142 | u8 src_mac_addr_hi16[2]; | ||
4143 | u16 vlan_tag; | ||
4144 | #define FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID (0xFFF<<0) | ||
4145 | #define FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID_SHIFT 0 | ||
4146 | #define FCOE_KWQE_CONN_ENABLE_DISABLE_CFI (0x1<<12) | ||
4147 | #define FCOE_KWQE_CONN_ENABLE_DISABLE_CFI_SHIFT 12 | ||
4148 | #define FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY (0x7<<13) | ||
4149 | #define FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY_SHIFT 13 | ||
4150 | #endif | ||
4151 | u8 dst_mac_addr_lo32[4]; | ||
4152 | #if defined(__BIG_ENDIAN) | 4941 | #if defined(__BIG_ENDIAN) |
4153 | u16 reserved1; | 4942 | u16 reserved1; |
4154 | u8 dst_mac_addr_hi16[2]; | 4943 | u16 lcl_cmp_flg; |
4155 | #elif defined(__LITTLE_ENDIAN) | 4944 | #elif defined(__LITTLE_ENDIAN) |
4156 | u8 dst_mac_addr_hi16[2]; | 4945 | u16 lcl_cmp_flg; |
4157 | u16 reserved1; | 4946 | u16 reserved1; |
4158 | #endif | 4947 | #endif |
4948 | u32 sgl_base_lo; | ||
4949 | u32 sgl_base_hi; | ||
4159 | #if defined(__BIG_ENDIAN) | 4950 | #if defined(__BIG_ENDIAN) |
4160 | u8 vlan_flag; | 4951 | u8 sgl_size; |
4161 | u8 s_id[3]; | 4952 | u8 sge_index; |
4162 | #elif defined(__LITTLE_ENDIAN) | 4953 | u16 sge_offset; |
4163 | u8 s_id[3]; | ||
4164 | u8 vlan_flag; | ||
4165 | #endif | ||
4166 | #if defined(__BIG_ENDIAN) | ||
4167 | u8 reserved3; | ||
4168 | u8 d_id[3]; | ||
4169 | #elif defined(__LITTLE_ENDIAN) | 4954 | #elif defined(__LITTLE_ENDIAN) |
4170 | u8 d_id[3]; | 4955 | u16 sge_offset; |
4171 | u8 reserved3; | 4956 | u8 sge_index; |
4957 | u8 sgl_size; | ||
4172 | #endif | 4958 | #endif |
4173 | u32 context_id; | ||
4174 | u32 conn_id; | ||
4175 | u32 reserved4; | ||
4176 | }; | 4959 | }; |
4177 | 4960 | ||
4178 | /* | ||
4179 | * FCoE connection destroy request | ||
4180 | */ | ||
4181 | struct fcoe_kwqe_conn_destroy { | ||
4182 | #if defined(__BIG_ENDIAN) | ||
4183 | struct fcoe_kwqe_header hdr; | ||
4184 | u16 reserved0; | ||
4185 | #elif defined(__LITTLE_ENDIAN) | ||
4186 | u16 reserved0; | ||
4187 | struct fcoe_kwqe_header hdr; | ||
4188 | #endif | ||
4189 | u32 context_id; | ||
4190 | u32 conn_id; | ||
4191 | u32 reserved1[5]; | ||
4192 | }; | ||
4193 | 4961 | ||
4194 | /* | 4962 | /* |
4195 | * FCoe destroy request | 4963 | * CQE data for L2 OOO connection $$KEEP_ENDIANNESS$$ |
4196 | */ | 4964 | */ |
4197 | struct fcoe_kwqe_destroy { | 4965 | struct iscsi_l2_ooo_data { |
4198 | #if defined(__BIG_ENDIAN) | 4966 | __le32 iscsi_cid; |
4199 | struct fcoe_kwqe_header hdr; | 4967 | u8 drop_isle; |
4200 | u16 reserved0; | 4968 | u8 drop_size; |
4201 | #elif defined(__LITTLE_ENDIAN) | 4969 | u8 ooo_opcode; |
4202 | u16 reserved0; | 4970 | u8 ooo_isle; |
4203 | struct fcoe_kwqe_header hdr; | 4971 | u8 reserved[8]; |
4204 | #endif | ||
4205 | u32 reserved1[7]; | ||
4206 | }; | 4972 | }; |
4207 | 4973 | ||
4208 | /* | ||
4209 | * FCoe statistics request | ||
4210 | */ | ||
4211 | struct fcoe_kwqe_stat { | ||
4212 | #if defined(__BIG_ENDIAN) | ||
4213 | struct fcoe_kwqe_header hdr; | ||
4214 | u16 reserved0; | ||
4215 | #elif defined(__LITTLE_ENDIAN) | ||
4216 | u16 reserved0; | ||
4217 | struct fcoe_kwqe_header hdr; | ||
4218 | #endif | ||
4219 | u32 stat_params_addr_lo; | ||
4220 | u32 stat_params_addr_hi; | ||
4221 | u32 reserved1[5]; | ||
4222 | }; | ||
4223 | 4974 | ||
4224 | /* | ||
4225 | * FCoE KWQ WQE | ||
4226 | */ | ||
4227 | union fcoe_kwqe { | ||
4228 | struct fcoe_kwqe_init1 init1; | ||
4229 | struct fcoe_kwqe_init2 init2; | ||
4230 | struct fcoe_kwqe_init3 init3; | ||
4231 | struct fcoe_kwqe_conn_offload1 conn_offload1; | ||
4232 | struct fcoe_kwqe_conn_offload2 conn_offload2; | ||
4233 | struct fcoe_kwqe_conn_offload3 conn_offload3; | ||
4234 | struct fcoe_kwqe_conn_offload4 conn_offload4; | ||
4235 | struct fcoe_kwqe_conn_enable_disable conn_enable_disable; | ||
4236 | struct fcoe_kwqe_conn_destroy conn_destroy; | ||
4237 | struct fcoe_kwqe_destroy destroy; | ||
4238 | struct fcoe_kwqe_stat statistics; | ||
4239 | }; | ||
4240 | 4975 | ||
4241 | struct fcoe_task_ctx_entry { | ||
4242 | struct fcoe_task_ctx_entry_tx_only tx_wr_only; | ||
4243 | struct fcoe_task_ctx_entry_txwr_rxrd tx_wr_rx_rd; | ||
4244 | struct fcoe_task_ctx_entry_tx_rx_cmn cmn; | ||
4245 | struct fcoe_task_ctx_entry_rxwr_txrd rx_wr_tx_rd; | ||
4246 | struct fcoe_task_ctx_entry_rx_only rx_wr_only; | ||
4247 | u32 reserved[4]; | ||
4248 | }; | ||
4249 | 4976 | ||
4250 | /* | ||
4251 | * FCoE connection enable\disable params passed by driver to FW in FCoE enable ramrod | ||
4252 | */ | ||
4253 | struct fcoe_conn_enable_disable_ramrod_params { | ||
4254 | struct fcoe_kwqe_conn_enable_disable enable_disable_kwqe; | ||
4255 | }; | ||
4256 | 4977 | ||
4257 | 4978 | ||
4258 | /* | 4979 | struct iscsi_task_context_entry_xuc_c_write_only { |
4259 | * FCoE connection offload params passed by driver to FW in FCoE offload ramrod | 4980 | u32 total_data_acked; |
4260 | */ | ||
4261 | struct fcoe_conn_offload_ramrod_params { | ||
4262 | struct fcoe_kwqe_conn_offload1 offload_kwqe1; | ||
4263 | struct fcoe_kwqe_conn_offload2 offload_kwqe2; | ||
4264 | struct fcoe_kwqe_conn_offload3 offload_kwqe3; | ||
4265 | struct fcoe_kwqe_conn_offload4 offload_kwqe4; | ||
4266 | }; | 4981 | }; |
4267 | 4982 | ||
4268 | /* | 4983 | struct iscsi_task_context_r2t_table_entry { |
4269 | * FCoE init params passed by driver to FW in FCoE init ramrod | 4984 | u32 ttt; |
4270 | */ | 4985 | u32 desired_data_len; |
4271 | struct fcoe_init_ramrod_params { | 4986 | }; |
4272 | struct fcoe_kwqe_init1 init_kwqe1; | 4987 | |
4273 | struct fcoe_kwqe_init2 init_kwqe2; | 4988 | struct iscsi_task_context_entry_xuc_u_write_only { |
4274 | struct fcoe_kwqe_init3 init_kwqe3; | 4989 | u32 exp_r2t_sn; |
4275 | struct regpair eq_addr; | 4990 | struct iscsi_task_context_r2t_table_entry r2t_table[4]; |
4276 | struct regpair eq_next_page_addr; | ||
4277 | #if defined(__BIG_ENDIAN) | 4991 | #if defined(__BIG_ENDIAN) |
4278 | u16 sb_num; | 4992 | u16 data_in_count; |
4279 | u16 eq_prod; | 4993 | u8 cq_id; |
4994 | u8 valid_1b; | ||
4280 | #elif defined(__LITTLE_ENDIAN) | 4995 | #elif defined(__LITTLE_ENDIAN) |
4281 | u16 eq_prod; | 4996 | u8 valid_1b; |
4282 | u16 sb_num; | 4997 | u8 cq_id; |
4998 | u16 data_in_count; | ||
4283 | #endif | 4999 | #endif |
5000 | }; | ||
5001 | |||
5002 | struct iscsi_task_context_entry_xuc { | ||
5003 | struct iscsi_task_context_entry_xuc_c_write_only write_c; | ||
5004 | u32 exp_data_transfer_len; | ||
5005 | struct iscsi_task_context_entry_xuc_x_write_only write_x; | ||
5006 | u32 lun_lo; | ||
5007 | struct iscsi_task_context_entry_xuc_xu_write_both write_xu; | ||
5008 | u32 lun_hi; | ||
5009 | struct iscsi_task_context_entry_xuc_u_write_only write_u; | ||
5010 | }; | ||
5011 | |||
5012 | struct iscsi_task_context_entry_u { | ||
5013 | u32 exp_r2t_buff_offset; | ||
5014 | u32 rem_rcv_len; | ||
5015 | u32 exp_data_sn; | ||
5016 | }; | ||
5017 | |||
5018 | struct iscsi_task_context_entry { | ||
5019 | struct iscsi_task_context_entry_x tce_x; | ||
4284 | #if defined(__BIG_ENDIAN) | 5020 | #if defined(__BIG_ENDIAN) |
4285 | u16 reserved1; | 5021 | u16 data_out_count; |
4286 | u8 reserved0; | 5022 | u16 rsrv0; |
4287 | u8 sb_id; | ||
4288 | #elif defined(__LITTLE_ENDIAN) | 5023 | #elif defined(__LITTLE_ENDIAN) |
4289 | u8 sb_id; | 5024 | u16 rsrv0; |
4290 | u8 reserved0; | 5025 | u16 data_out_count; |
4291 | u16 reserved1; | ||
4292 | #endif | 5026 | #endif |
5027 | struct iscsi_task_context_entry_xuc tce_xuc; | ||
5028 | struct iscsi_task_context_entry_u tce_u; | ||
5029 | u32 rsrv1[7]; | ||
4293 | }; | 5030 | }; |
4294 | 5031 | ||
4295 | 5032 | ||
4296 | /* | ||
4297 | * FCoE statistics params buffer passed by driver to FW in FCoE statistics ramrod | ||
4298 | */ | ||
4299 | struct fcoe_stat_ramrod_params { | ||
4300 | struct fcoe_kwqe_stat stat_kwqe; | ||
4301 | }; | ||
4302 | 5033 | ||
4303 | 5034 | ||
4304 | /* | ||
4305 | * FCoE 16-bits vlan structure | ||
4306 | */ | ||
4307 | struct fcoe_vlan_fields { | ||
4308 | u16 fields; | ||
4309 | #define FCOE_VLAN_FIELDS_VID (0xFFF<<0) | ||
4310 | #define FCOE_VLAN_FIELDS_VID_SHIFT 0 | ||
4311 | #define FCOE_VLAN_FIELDS_CLI (0x1<<12) | ||
4312 | #define FCOE_VLAN_FIELDS_CLI_SHIFT 12 | ||
4313 | #define FCOE_VLAN_FIELDS_PRI (0x7<<13) | ||
4314 | #define FCOE_VLAN_FIELDS_PRI_SHIFT 13 | ||
4315 | }; | ||
4316 | 5035 | ||
4317 | 5036 | ||
4318 | /* | ||
4319 | * FCoE 16-bits vlan union | ||
4320 | */ | ||
4321 | union fcoe_vlan_field_union { | ||
4322 | struct fcoe_vlan_fields fields; | ||
4323 | u16 val; | ||
4324 | }; | ||
4325 | 5037 | ||
4326 | /* | ||
4327 | * Parameters used for Class 2 verifications | ||
4328 | */ | ||
4329 | struct ustorm_fcoe_c2_params { | ||
4330 | #if defined(__BIG_ENDIAN) | ||
4331 | u16 e2e_credit; | ||
4332 | u16 con_seq; | ||
4333 | #elif defined(__LITTLE_ENDIAN) | ||
4334 | u16 con_seq; | ||
4335 | u16 e2e_credit; | ||
4336 | #endif | ||
4337 | #if defined(__BIG_ENDIAN) | ||
4338 | u16 ackq_prod; | ||
4339 | u16 open_seq_per_exch; | ||
4340 | #elif defined(__LITTLE_ENDIAN) | ||
4341 | u16 open_seq_per_exch; | ||
4342 | u16 ackq_prod; | ||
4343 | #endif | ||
4344 | struct regpair ackq_pbl_base; | ||
4345 | struct regpair ackq_cur_seg; | ||
4346 | }; | ||
4347 | 5038 | ||
4348 | /* | 5039 | struct iscsi_task_context_entry_xuc_x_init_only { |
4349 | * Parameters used for Class 2 verifications | 5040 | struct regpair lun; |
4350 | */ | 5041 | u32 exp_data_transfer_len; |
4351 | struct xstorm_fcoe_c2_params { | ||
4352 | #if defined(__BIG_ENDIAN) | ||
4353 | u16 reserved0; | ||
4354 | u8 ackq_x_prod; | ||
4355 | u8 max_conc_seqs_c2; | ||
4356 | #elif defined(__LITTLE_ENDIAN) | ||
4357 | u8 max_conc_seqs_c2; | ||
4358 | u8 ackq_x_prod; | ||
4359 | u16 reserved0; | ||
4360 | #endif | ||
4361 | struct regpair ackq_pbl_base; | ||
4362 | struct regpair ackq_cur_seg; | ||
4363 | }; | 5042 | }; |
4364 | 5043 | ||
4365 | /* | 5044 | |
4366 | * Buffer per connection, used in Tstorm | 5045 | |
4367 | */ | 5046 | |
4368 | struct iscsi_conn_buf { | 5047 | |
4369 | struct regpair reserved[8]; | 5048 | |
4370 | }; | 5049 | |
5050 | |||
5051 | |||
5052 | |||
5053 | |||
5054 | |||
5055 | |||
5056 | |||
5057 | |||
5058 | |||
4371 | 5059 | ||
4372 | /* | 5060 | /* |
4373 | * ipv6 structure | 5061 | * ipv6 structure |
@@ -4379,6 +5067,8 @@ struct ip_v6_addr { | |||
4379 | u32 ip_addr_hi_hi; | 5067 | u32 ip_addr_hi_hi; |
4380 | }; | 5068 | }; |
4381 | 5069 | ||
5070 | |||
5071 | |||
4382 | /* | 5072 | /* |
4383 | * l5cm- connection identification params | 5073 | * l5cm- connection identification params |
4384 | */ | 5074 | */ |
@@ -4460,8 +5150,7 @@ struct l5cm_xstorm_conn_buffer { | |||
4460 | * l5cm-tstorm connection buffer | 5150 | * l5cm-tstorm connection buffer |
4461 | */ | 5151 | */ |
4462 | struct l5cm_tstorm_conn_buffer { | 5152 | struct l5cm_tstorm_conn_buffer { |
4463 | u32 snd_buf; | 5153 | u32 rsrv1[2]; |
4464 | u32 rcv_buf; | ||
4465 | #if defined(__BIG_ENDIAN) | 5154 | #if defined(__BIG_ENDIAN) |
4466 | u16 params; | 5155 | u16 params; |
4467 | #define L5CM_TSTORM_CONN_BUFFER_DELAYED_ACK_ENABLE (0x1<<0) | 5156 | #define L5CM_TSTORM_CONN_BUFFER_DELAYED_ACK_ENABLE (0x1<<0) |
@@ -4493,6 +5182,72 @@ struct l5cm_active_conn_buffer { | |||
4493 | struct l5cm_tstorm_conn_buffer tstorm_conn_buffer; | 5182 | struct l5cm_tstorm_conn_buffer tstorm_conn_buffer; |
4494 | }; | 5183 | }; |
4495 | 5184 | ||
5185 | |||
5186 | |||
5187 | /* | ||
5188 | * The l5cm opaque buffer passed in add new connection ramrod passive side | ||
5189 | */ | ||
5190 | struct l5cm_hash_input_string { | ||
5191 | u32 __opaque1; | ||
5192 | #if defined(__BIG_ENDIAN) | ||
5193 | u16 __opaque3; | ||
5194 | u16 __opaque2; | ||
5195 | #elif defined(__LITTLE_ENDIAN) | ||
5196 | u16 __opaque2; | ||
5197 | u16 __opaque3; | ||
5198 | #endif | ||
5199 | struct ip_v6_addr __opaque4; | ||
5200 | struct ip_v6_addr __opaque5; | ||
5201 | u32 __opaque6; | ||
5202 | u32 __opaque7[5]; | ||
5203 | }; | ||
5204 | |||
5205 | |||
5206 | /* | ||
5207 | * syn cookie component | ||
5208 | */ | ||
5209 | struct l5cm_syn_cookie_comp { | ||
5210 | u32 __opaque; | ||
5211 | }; | ||
5212 | |||
5213 | /* | ||
5214 | * data related to listeners of a TCP port | ||
5215 | */ | ||
5216 | struct l5cm_port_listener_data { | ||
5217 | u8 params; | ||
5218 | #define L5CM_PORT_LISTENER_DATA_ENABLE (0x1<<0) | ||
5219 | #define L5CM_PORT_LISTENER_DATA_ENABLE_SHIFT 0 | ||
5220 | #define L5CM_PORT_LISTENER_DATA_IP_INDEX (0xF<<1) | ||
5221 | #define L5CM_PORT_LISTENER_DATA_IP_INDEX_SHIFT 1 | ||
5222 | #define L5CM_PORT_LISTENER_DATA_NET_FILTER (0x1<<5) | ||
5223 | #define L5CM_PORT_LISTENER_DATA_NET_FILTER_SHIFT 5 | ||
5224 | #define L5CM_PORT_LISTENER_DATA_DEFFERED_MODE (0x1<<6) | ||
5225 | #define L5CM_PORT_LISTENER_DATA_DEFFERED_MODE_SHIFT 6 | ||
5226 | #define L5CM_PORT_LISTENER_DATA_MPA_MODE (0x1<<7) | ||
5227 | #define L5CM_PORT_LISTENER_DATA_MPA_MODE_SHIFT 7 | ||
5228 | }; | ||
5229 | |||
5230 | /* | ||
5231 | * Opaque structure passed from U to X when final ack arrives | ||
5232 | */ | ||
5233 | struct l5cm_opaque_buf { | ||
5234 | u32 __opaque1; | ||
5235 | u32 __opaque2; | ||
5236 | u32 __opaque3; | ||
5237 | u32 __opaque4; | ||
5238 | struct l5cm_syn_cookie_comp __opaque5; | ||
5239 | #if defined(__BIG_ENDIAN) | ||
5240 | u16 rsrv2; | ||
5241 | u8 rsrv; | ||
5242 | struct l5cm_port_listener_data __opaque6; | ||
5243 | #elif defined(__LITTLE_ENDIAN) | ||
5244 | struct l5cm_port_listener_data __opaque6; | ||
5245 | u8 rsrv; | ||
5246 | u16 rsrv2; | ||
5247 | #endif | ||
5248 | }; | ||
5249 | |||
5250 | |||
4496 | /* | 5251 | /* |
4497 | * l5cm slow path element | 5252 | * l5cm slow path element |
4498 | */ | 5253 | */ |
@@ -4501,6 +5256,109 @@ struct l5cm_packet_size { | |||
4501 | u32 rsrv; | 5256 | u32 rsrv; |
4502 | }; | 5257 | }; |
4503 | 5258 | ||
5259 | |||
5260 | /* | ||
5261 | * The final-ack union structure in PCS entry after final ack arrived | ||
5262 | */ | ||
5263 | struct l5cm_pcse_ack { | ||
5264 | struct l5cm_xstorm_conn_buffer tx_socket_params; | ||
5265 | struct l5cm_opaque_buf opaque_buf; | ||
5266 | struct l5cm_tstorm_conn_buffer rx_socket_params; | ||
5267 | }; | ||
5268 | |||
5269 | |||
5270 | /* | ||
5271 | * The syn union structure in PCS entry after syn arrived | ||
5272 | */ | ||
5273 | struct l5cm_pcse_syn { | ||
5274 | struct l5cm_opaque_buf opaque_buf; | ||
5275 | u32 rsrv[12]; | ||
5276 | }; | ||
5277 | |||
5278 | |||
5279 | /* | ||
5280 | * pcs entry data for passive connections | ||
5281 | */ | ||
5282 | struct l5cm_pcs_attributes { | ||
5283 | #if defined(__BIG_ENDIAN) | ||
5284 | u16 pcs_id; | ||
5285 | u8 status; | ||
5286 | u8 flags; | ||
5287 | #define L5CM_PCS_ATTRIBUTES_NET_FILTER (0x1<<0) | ||
5288 | #define L5CM_PCS_ATTRIBUTES_NET_FILTER_SHIFT 0 | ||
5289 | #define L5CM_PCS_ATTRIBUTES_CALCULATE_HASH (0x1<<1) | ||
5290 | #define L5CM_PCS_ATTRIBUTES_CALCULATE_HASH_SHIFT 1 | ||
5291 | #define L5CM_PCS_ATTRIBUTES_COMPARE_HASH_RESULT (0x1<<2) | ||
5292 | #define L5CM_PCS_ATTRIBUTES_COMPARE_HASH_RESULT_SHIFT 2 | ||
5293 | #define L5CM_PCS_ATTRIBUTES_QUERY_ULP_ACCEPT (0x1<<3) | ||
5294 | #define L5CM_PCS_ATTRIBUTES_QUERY_ULP_ACCEPT_SHIFT 3 | ||
5295 | #define L5CM_PCS_ATTRIBUTES_FIND_DEST_MAC (0x1<<4) | ||
5296 | #define L5CM_PCS_ATTRIBUTES_FIND_DEST_MAC_SHIFT 4 | ||
5297 | #define L5CM_PCS_ATTRIBUTES_L4_OFFLOAD (0x1<<5) | ||
5298 | #define L5CM_PCS_ATTRIBUTES_L4_OFFLOAD_SHIFT 5 | ||
5299 | #define L5CM_PCS_ATTRIBUTES_FORWARD_PACKET (0x1<<6) | ||
5300 | #define L5CM_PCS_ATTRIBUTES_FORWARD_PACKET_SHIFT 6 | ||
5301 | #define L5CM_PCS_ATTRIBUTES_RSRV (0x1<<7) | ||
5302 | #define L5CM_PCS_ATTRIBUTES_RSRV_SHIFT 7 | ||
5303 | #elif defined(__LITTLE_ENDIAN) | ||
5304 | u8 flags; | ||
5305 | #define L5CM_PCS_ATTRIBUTES_NET_FILTER (0x1<<0) | ||
5306 | #define L5CM_PCS_ATTRIBUTES_NET_FILTER_SHIFT 0 | ||
5307 | #define L5CM_PCS_ATTRIBUTES_CALCULATE_HASH (0x1<<1) | ||
5308 | #define L5CM_PCS_ATTRIBUTES_CALCULATE_HASH_SHIFT 1 | ||
5309 | #define L5CM_PCS_ATTRIBUTES_COMPARE_HASH_RESULT (0x1<<2) | ||
5310 | #define L5CM_PCS_ATTRIBUTES_COMPARE_HASH_RESULT_SHIFT 2 | ||
5311 | #define L5CM_PCS_ATTRIBUTES_QUERY_ULP_ACCEPT (0x1<<3) | ||
5312 | #define L5CM_PCS_ATTRIBUTES_QUERY_ULP_ACCEPT_SHIFT 3 | ||
5313 | #define L5CM_PCS_ATTRIBUTES_FIND_DEST_MAC (0x1<<4) | ||
5314 | #define L5CM_PCS_ATTRIBUTES_FIND_DEST_MAC_SHIFT 4 | ||
5315 | #define L5CM_PCS_ATTRIBUTES_L4_OFFLOAD (0x1<<5) | ||
5316 | #define L5CM_PCS_ATTRIBUTES_L4_OFFLOAD_SHIFT 5 | ||
5317 | #define L5CM_PCS_ATTRIBUTES_FORWARD_PACKET (0x1<<6) | ||
5318 | #define L5CM_PCS_ATTRIBUTES_FORWARD_PACKET_SHIFT 6 | ||
5319 | #define L5CM_PCS_ATTRIBUTES_RSRV (0x1<<7) | ||
5320 | #define L5CM_PCS_ATTRIBUTES_RSRV_SHIFT 7 | ||
5321 | u8 status; | ||
5322 | u16 pcs_id; | ||
5323 | #endif | ||
5324 | }; | ||
5325 | |||
5326 | |||
5327 | union l5cm_seg_params { | ||
5328 | struct l5cm_pcse_syn syn_seg_params; | ||
5329 | struct l5cm_pcse_ack ack_seg_params; | ||
5330 | }; | ||
5331 | |||
5332 | /* | ||
5333 | * pcs entry data for passive connections | ||
5334 | */ | ||
5335 | struct l5cm_pcs_hdr { | ||
5336 | struct l5cm_hash_input_string hash_input_string; | ||
5337 | struct l5cm_conn_addr_params conn_addr_buf; | ||
5338 | u32 cid; | ||
5339 | u32 hash_result; | ||
5340 | union l5cm_seg_params seg_params; | ||
5341 | struct l5cm_pcs_attributes att; | ||
5342 | #if defined(__BIG_ENDIAN) | ||
5343 | u16 rsrv; | ||
5344 | u16 rx_seg_size; | ||
5345 | #elif defined(__LITTLE_ENDIAN) | ||
5346 | u16 rx_seg_size; | ||
5347 | u16 rsrv; | ||
5348 | #endif | ||
5349 | }; | ||
5350 | |||
5351 | /* | ||
5352 | * pcs entry for passive connections | ||
5353 | */ | ||
5354 | struct l5cm_pcs_entry { | ||
5355 | struct l5cm_pcs_hdr hdr; | ||
5356 | u8 rx_segment[1516]; | ||
5357 | }; | ||
5358 | |||
5359 | |||
5360 | |||
5361 | |||
4504 | /* | 5362 | /* |
4505 | * l5cm connection parameters | 5363 | * l5cm connection parameters |
4506 | */ | 5364 | */ |
@@ -4535,6 +5393,29 @@ struct l5cm_spe { | |||
4535 | union l5cm_specific_data data; | 5393 | union l5cm_specific_data data; |
4536 | }; | 5394 | }; |
4537 | 5395 | ||
5396 | |||
5397 | |||
5398 | |||
5399 | /* | ||
5400 | * Termination variables | ||
5401 | */ | ||
5402 | struct l5cm_term_vars { | ||
5403 | u8 BitMap; | ||
5404 | #define L5CM_TERM_VARS_TCP_STATE (0xF<<0) | ||
5405 | #define L5CM_TERM_VARS_TCP_STATE_SHIFT 0 | ||
5406 | #define L5CM_TERM_VARS_FIN_RECEIVED_SBIT (0x1<<4) | ||
5407 | #define L5CM_TERM_VARS_FIN_RECEIVED_SBIT_SHIFT 4 | ||
5408 | #define L5CM_TERM_VARS_ACK_ON_FIN_RECEIVED_SBIT (0x1<<5) | ||
5409 | #define L5CM_TERM_VARS_ACK_ON_FIN_RECEIVED_SBIT_SHIFT 5 | ||
5410 | #define L5CM_TERM_VARS_TERM_ON_CHIP (0x1<<6) | ||
5411 | #define L5CM_TERM_VARS_TERM_ON_CHIP_SHIFT 6 | ||
5412 | #define L5CM_TERM_VARS_RSRV (0x1<<7) | ||
5413 | #define L5CM_TERM_VARS_RSRV_SHIFT 7 | ||
5414 | }; | ||
5415 | |||
5416 | |||
5417 | |||
5418 | |||
4538 | /* | 5419 | /* |
4539 | * Tstorm Tcp flags | 5420 | * Tstorm Tcp flags |
4540 | */ | 5421 | */ |
@@ -4550,6 +5431,7 @@ struct tstorm_l5cm_tcp_flags { | |||
4550 | #define TSTORM_L5CM_TCP_FLAGS_RSRV1_SHIFT 14 | 5431 | #define TSTORM_L5CM_TCP_FLAGS_RSRV1_SHIFT 14 |
4551 | }; | 5432 | }; |
4552 | 5433 | ||
5434 | |||
4553 | /* | 5435 | /* |
4554 | * Xstorm Tcp flags | 5436 | * Xstorm Tcp flags |
4555 | */ | 5437 | */ |
@@ -4565,4 +5447,38 @@ struct xstorm_l5cm_tcp_flags { | |||
4565 | #define XSTORM_L5CM_TCP_FLAGS_RSRV_SHIFT 3 | 5447 | #define XSTORM_L5CM_TCP_FLAGS_RSRV_SHIFT 3 |
4566 | }; | 5448 | }; |
4567 | 5449 | ||
4568 | #endif /* CNIC_DEFS_H */ | 5450 | |
5451 | |||
5452 | /* | ||
5453 | * Out-of-order states | ||
5454 | */ | ||
5455 | enum tcp_ooo_event { | ||
5456 | TCP_EVENT_ADD_PEN = 0, | ||
5457 | TCP_EVENT_ADD_NEW_ISLE = 1, | ||
5458 | TCP_EVENT_ADD_ISLE_RIGHT = 2, | ||
5459 | TCP_EVENT_ADD_ISLE_LEFT = 3, | ||
5460 | TCP_EVENT_JOIN = 4, | ||
5461 | TCP_EVENT_NOP = 5, | ||
5462 | MAX_TCP_OOO_EVENT | ||
5463 | }; | ||
5464 | |||
5465 | |||
5466 | /* | ||
5467 | * OOO support modes | ||
5468 | */ | ||
5469 | enum tcp_tstorm_ooo { | ||
5470 | TCP_TSTORM_OOO_DROP_AND_PROC_ACK = 0, | ||
5471 | TCP_TSTORM_OOO_SEND_PURE_ACK = 1, | ||
5472 | TCP_TSTORM_OOO_SUPPORTED = 2, | ||
5473 | MAX_TCP_TSTORM_OOO | ||
5474 | }; | ||
5475 | |||
5476 | |||
5477 | |||
5478 | |||
5479 | |||
5480 | |||
5481 | |||
5482 | |||
5483 | |||
5484 | #endif /* __5710_HSI_CNIC_LE__ */ | ||
diff --git a/drivers/net/cnic_if.h b/drivers/net/cnic_if.h index fdd8e46a9050..bc7000a2d4e1 100644 --- a/drivers/net/cnic_if.h +++ b/drivers/net/cnic_if.h | |||
@@ -12,8 +12,8 @@ | |||
12 | #ifndef CNIC_IF_H | 12 | #ifndef CNIC_IF_H |
13 | #define CNIC_IF_H | 13 | #define CNIC_IF_H |
14 | 14 | ||
15 | #define CNIC_MODULE_VERSION "2.2.14" | 15 | #define CNIC_MODULE_VERSION "2.5.3" |
16 | #define CNIC_MODULE_RELDATE "Mar 30, 2011" | 16 | #define CNIC_MODULE_RELDATE "June 6, 2011" |
17 | 17 | ||
18 | #define CNIC_ULP_RDMA 0 | 18 | #define CNIC_ULP_RDMA 0 |
19 | #define CNIC_ULP_ISCSI 1 | 19 | #define CNIC_ULP_ISCSI 1 |
@@ -99,6 +99,8 @@ struct kcqe { | |||
99 | 99 | ||
100 | struct cnic_ctl_completion { | 100 | struct cnic_ctl_completion { |
101 | u32 cid; | 101 | u32 cid; |
102 | u8 opcode; | ||
103 | u8 error; | ||
102 | }; | 104 | }; |
103 | 105 | ||
104 | struct cnic_ctl_info { | 106 | struct cnic_ctl_info { |
@@ -169,7 +171,7 @@ struct cnic_eth_dev { | |||
169 | struct pci_dev *pdev; | 171 | struct pci_dev *pdev; |
170 | void __iomem *io_base; | 172 | void __iomem *io_base; |
171 | void __iomem *io_base2; | 173 | void __iomem *io_base2; |
172 | void *iro_arr; | 174 | const void *iro_arr; |
173 | 175 | ||
174 | u32 ctx_tbl_offset; | 176 | u32 ctx_tbl_offset; |
175 | u32 ctx_tbl_len; | 177 | u32 ctx_tbl_len; |
diff --git a/drivers/scsi/bnx2fc/57xx_hsi_bnx2fc.h b/drivers/scsi/bnx2fc/57xx_hsi_bnx2fc.h index 97a61b4d81b7..e1f1e3448f98 100644 --- a/drivers/scsi/bnx2fc/57xx_hsi_bnx2fc.h +++ b/drivers/scsi/bnx2fc/57xx_hsi_bnx2fc.h | |||
@@ -19,6 +19,23 @@ struct b577xx_doorbell_hdr { | |||
19 | /* | 19 | /* |
20 | * doorbell message sent to the chip | 20 | * doorbell message sent to the chip |
21 | */ | 21 | */ |
22 | struct b577xx_doorbell { | ||
23 | #if defined(__BIG_ENDIAN) | ||
24 | u16 zero_fill2; | ||
25 | u8 zero_fill1; | ||
26 | struct b577xx_doorbell_hdr header; | ||
27 | #elif defined(__LITTLE_ENDIAN) | ||
28 | struct b577xx_doorbell_hdr header; | ||
29 | u8 zero_fill1; | ||
30 | u16 zero_fill2; | ||
31 | #endif | ||
32 | }; | ||
33 | |||
34 | |||
35 | |||
36 | /* | ||
37 | * doorbell message sent to the chip | ||
38 | */ | ||
22 | struct b577xx_doorbell_set_prod { | 39 | struct b577xx_doorbell_set_prod { |
23 | #if defined(__BIG_ENDIAN) | 40 | #if defined(__BIG_ENDIAN) |
24 | u16 prod; | 41 | u16 prod; |
@@ -39,106 +56,63 @@ struct regpair { | |||
39 | 56 | ||
40 | 57 | ||
41 | /* | 58 | /* |
42 | * Fixed size structure in order to plant it in Union structure | 59 | * ABTS info $$KEEP_ENDIANNESS$$ |
43 | */ | 60 | */ |
44 | struct fcoe_abts_rsp_union { | 61 | struct fcoe_abts_info { |
45 | u32 r_ctl; | 62 | __le16 aborted_task_id; |
46 | u32 abts_rsp_payload[7]; | 63 | __le16 reserved0; |
64 | __le32 reserved1; | ||
47 | }; | 65 | }; |
48 | 66 | ||
49 | 67 | ||
50 | /* | 68 | /* |
51 | * 4 regs size | 69 | * Fixed size structure in order to plant it in Union structure |
70 | * $$KEEP_ENDIANNESS$$ | ||
52 | */ | 71 | */ |
53 | struct fcoe_bd_ctx { | 72 | struct fcoe_abts_rsp_union { |
54 | u32 buf_addr_hi; | 73 | u8 r_ctl; |
55 | u32 buf_addr_lo; | 74 | u8 rsrv[3]; |
56 | #if defined(__BIG_ENDIAN) | 75 | __le32 abts_rsp_payload[7]; |
57 | u16 rsrv0; | ||
58 | u16 buf_len; | ||
59 | #elif defined(__LITTLE_ENDIAN) | ||
60 | u16 buf_len; | ||
61 | u16 rsrv0; | ||
62 | #endif | ||
63 | #if defined(__BIG_ENDIAN) | ||
64 | u16 rsrv1; | ||
65 | u16 flags; | ||
66 | #elif defined(__LITTLE_ENDIAN) | ||
67 | u16 flags; | ||
68 | u16 rsrv1; | ||
69 | #endif | ||
70 | }; | 76 | }; |
71 | 77 | ||
72 | 78 | ||
73 | struct fcoe_cleanup_flow_info { | 79 | /* |
74 | #if defined(__BIG_ENDIAN) | 80 | * 4 regs size $$KEEP_ENDIANNESS$$ |
75 | u16 reserved1; | 81 | */ |
76 | u16 task_id; | 82 | struct fcoe_bd_ctx { |
77 | #elif defined(__LITTLE_ENDIAN) | 83 | __le32 buf_addr_hi; |
78 | u16 task_id; | 84 | __le32 buf_addr_lo; |
79 | u16 reserved1; | 85 | __le16 buf_len; |
80 | #endif | 86 | __le16 rsrv0; |
81 | u32 reserved2[7]; | 87 | __le16 flags; |
88 | __le16 rsrv1; | ||
82 | }; | 89 | }; |
83 | 90 | ||
84 | 91 | ||
85 | struct fcoe_fcp_cmd_payload { | 92 | /* |
86 | u32 opaque[8]; | 93 | * FCoE cached sges context $$KEEP_ENDIANNESS$$ |
87 | }; | 94 | */ |
88 | 95 | struct fcoe_cached_sge_ctx { | |
89 | struct fcoe_fc_hdr { | 96 | struct regpair cur_buf_addr; |
90 | #if defined(__BIG_ENDIAN) | 97 | __le16 cur_buf_rem; |
91 | u8 cs_ctl; | 98 | __le16 second_buf_rem; |
92 | u8 s_id[3]; | 99 | struct regpair second_buf_addr; |
93 | #elif defined(__LITTLE_ENDIAN) | ||
94 | u8 s_id[3]; | ||
95 | u8 cs_ctl; | ||
96 | #endif | ||
97 | #if defined(__BIG_ENDIAN) | ||
98 | u8 r_ctl; | ||
99 | u8 d_id[3]; | ||
100 | #elif defined(__LITTLE_ENDIAN) | ||
101 | u8 d_id[3]; | ||
102 | u8 r_ctl; | ||
103 | #endif | ||
104 | #if defined(__BIG_ENDIAN) | ||
105 | u8 seq_id; | ||
106 | u8 df_ctl; | ||
107 | u16 seq_cnt; | ||
108 | #elif defined(__LITTLE_ENDIAN) | ||
109 | u16 seq_cnt; | ||
110 | u8 df_ctl; | ||
111 | u8 seq_id; | ||
112 | #endif | ||
113 | #if defined(__BIG_ENDIAN) | ||
114 | u8 type; | ||
115 | u8 f_ctl[3]; | ||
116 | #elif defined(__LITTLE_ENDIAN) | ||
117 | u8 f_ctl[3]; | ||
118 | u8 type; | ||
119 | #endif | ||
120 | u32 parameters; | ||
121 | #if defined(__BIG_ENDIAN) | ||
122 | u16 ox_id; | ||
123 | u16 rx_id; | ||
124 | #elif defined(__LITTLE_ENDIAN) | ||
125 | u16 rx_id; | ||
126 | u16 ox_id; | ||
127 | #endif | ||
128 | }; | 100 | }; |
129 | 101 | ||
130 | struct fcoe_fc_frame { | ||
131 | struct fcoe_fc_hdr fc_hdr; | ||
132 | u32 reserved0[2]; | ||
133 | }; | ||
134 | 102 | ||
135 | union fcoe_cmd_flow_info { | 103 | /* |
136 | struct fcoe_fcp_cmd_payload fcp_cmd_payload; | 104 | * Cleanup info $$KEEP_ENDIANNESS$$ |
137 | struct fcoe_fc_frame mp_fc_frame; | 105 | */ |
106 | struct fcoe_cleanup_info { | ||
107 | __le16 cleaned_task_id; | ||
108 | __le16 rolled_tx_seq_cnt; | ||
109 | __le32 rolled_tx_data_offset; | ||
138 | }; | 110 | }; |
139 | 111 | ||
140 | 112 | ||
141 | 113 | /* | |
114 | * Fcp RSP flags $$KEEP_ENDIANNESS$$ | ||
115 | */ | ||
142 | struct fcoe_fcp_rsp_flags { | 116 | struct fcoe_fcp_rsp_flags { |
143 | u8 flags; | 117 | u8 flags; |
144 | #define FCOE_FCP_RSP_FLAGS_FCP_RSP_LEN_VALID (0x1<<0) | 118 | #define FCOE_FCP_RSP_FLAGS_FCP_RSP_LEN_VALID (0x1<<0) |
@@ -155,95 +129,168 @@ struct fcoe_fcp_rsp_flags { | |||
155 | #define FCOE_FCP_RSP_FLAGS_FCP_BIDI_FLAGS_SHIFT 5 | 129 | #define FCOE_FCP_RSP_FLAGS_FCP_BIDI_FLAGS_SHIFT 5 |
156 | }; | 130 | }; |
157 | 131 | ||
158 | 132 | /* | |
133 | * Fcp RSP payload $$KEEP_ENDIANNESS$$ | ||
134 | */ | ||
159 | struct fcoe_fcp_rsp_payload { | 135 | struct fcoe_fcp_rsp_payload { |
160 | struct regpair reserved0; | 136 | struct regpair reserved0; |
161 | u32 fcp_resid; | 137 | __le32 fcp_resid; |
162 | #if defined(__BIG_ENDIAN) | ||
163 | u16 retry_delay_timer; | ||
164 | struct fcoe_fcp_rsp_flags fcp_flags; | ||
165 | u8 scsi_status_code; | ||
166 | #elif defined(__LITTLE_ENDIAN) | ||
167 | u8 scsi_status_code; | 138 | u8 scsi_status_code; |
168 | struct fcoe_fcp_rsp_flags fcp_flags; | 139 | struct fcoe_fcp_rsp_flags fcp_flags; |
169 | u16 retry_delay_timer; | 140 | __le16 retry_delay_timer; |
170 | #endif | 141 | __le32 fcp_rsp_len; |
171 | u32 fcp_rsp_len; | 142 | __le32 fcp_sns_len; |
172 | u32 fcp_sns_len; | ||
173 | }; | 143 | }; |
174 | 144 | ||
175 | |||
176 | /* | 145 | /* |
177 | * Fixed size structure in order to plant it in Union structure | 146 | * Fixed size structure in order to plant it in Union structure |
147 | * $$KEEP_ENDIANNESS$$ | ||
178 | */ | 148 | */ |
179 | struct fcoe_fcp_rsp_union { | 149 | struct fcoe_fcp_rsp_union { |
180 | struct fcoe_fcp_rsp_payload payload; | 150 | struct fcoe_fcp_rsp_payload payload; |
181 | struct regpair reserved0; | 151 | struct regpair reserved0; |
182 | }; | 152 | }; |
183 | 153 | ||
154 | /* | ||
155 | * FC header $$KEEP_ENDIANNESS$$ | ||
156 | */ | ||
157 | struct fcoe_fc_hdr { | ||
158 | u8 s_id[3]; | ||
159 | u8 cs_ctl; | ||
160 | u8 d_id[3]; | ||
161 | u8 r_ctl; | ||
162 | __le16 seq_cnt; | ||
163 | u8 df_ctl; | ||
164 | u8 seq_id; | ||
165 | u8 f_ctl[3]; | ||
166 | u8 type; | ||
167 | __le32 parameters; | ||
168 | __le16 rx_id; | ||
169 | __le16 ox_id; | ||
170 | }; | ||
184 | 171 | ||
185 | struct fcoe_fcp_xfr_rdy_payload { | 172 | /* |
186 | u32 burst_len; | 173 | * FC header union $$KEEP_ENDIANNESS$$ |
187 | u32 data_ro; | 174 | */ |
175 | struct fcoe_mp_rsp_union { | ||
176 | struct fcoe_fc_hdr fc_hdr; | ||
177 | __le32 mp_payload_len; | ||
178 | __le32 rsrv; | ||
188 | }; | 179 | }; |
189 | 180 | ||
190 | struct fcoe_read_flow_info { | 181 | /* |
191 | struct fcoe_fc_hdr fc_data_in_hdr; | 182 | * Completion information $$KEEP_ENDIANNESS$$ |
192 | u32 reserved[2]; | 183 | */ |
184 | union fcoe_comp_flow_info { | ||
185 | struct fcoe_fcp_rsp_union fcp_rsp; | ||
186 | struct fcoe_abts_rsp_union abts_rsp; | ||
187 | struct fcoe_mp_rsp_union mp_rsp; | ||
188 | __le32 opaque[8]; | ||
193 | }; | 189 | }; |
194 | 190 | ||
195 | struct fcoe_write_flow_info { | 191 | |
196 | struct fcoe_fc_hdr fc_data_out_hdr; | 192 | /* |
197 | struct fcoe_fcp_xfr_rdy_payload fcp_xfr_payload; | 193 | * External ABTS info $$KEEP_ENDIANNESS$$ |
194 | */ | ||
195 | struct fcoe_ext_abts_info { | ||
196 | __le32 rsrv0[6]; | ||
197 | struct fcoe_abts_info ctx; | ||
198 | }; | 198 | }; |
199 | 199 | ||
200 | union fcoe_rsp_flow_info { | 200 | |
201 | struct fcoe_fcp_rsp_union fcp_rsp; | 201 | /* |
202 | struct fcoe_abts_rsp_union abts_rsp; | 202 | * External cleanup info $$KEEP_ENDIANNESS$$ |
203 | */ | ||
204 | struct fcoe_ext_cleanup_info { | ||
205 | __le32 rsrv0[6]; | ||
206 | struct fcoe_cleanup_info ctx; | ||
203 | }; | 207 | }; |
204 | 208 | ||
209 | |||
205 | /* | 210 | /* |
206 | * 32 bytes used for general purposes | 211 | * Fcoe FW Tx sequence context $$KEEP_ENDIANNESS$$ |
207 | */ | 212 | */ |
208 | union fcoe_general_task_ctx { | 213 | struct fcoe_fw_tx_seq_ctx { |
209 | union fcoe_cmd_flow_info cmd_info; | 214 | __le32 data_offset; |
210 | struct fcoe_read_flow_info read_info; | 215 | __le16 seq_cnt; |
211 | struct fcoe_write_flow_info write_info; | 216 | __le16 rsrv0; |
212 | union fcoe_rsp_flow_info rsp_info; | 217 | }; |
213 | struct fcoe_cleanup_flow_info cleanup_info; | 218 | |
214 | u32 comp_info[8]; | 219 | /* |
220 | * Fcoe external FW Tx sequence context $$KEEP_ENDIANNESS$$ | ||
221 | */ | ||
222 | struct fcoe_ext_fw_tx_seq_ctx { | ||
223 | __le32 rsrv0[6]; | ||
224 | struct fcoe_fw_tx_seq_ctx ctx; | ||
225 | }; | ||
226 | |||
227 | |||
228 | /* | ||
229 | * FCoE multiple sges context $$KEEP_ENDIANNESS$$ | ||
230 | */ | ||
231 | struct fcoe_mul_sges_ctx { | ||
232 | struct regpair cur_sge_addr; | ||
233 | __le16 cur_sge_off; | ||
234 | u8 cur_sge_idx; | ||
235 | u8 sgl_size; | ||
236 | }; | ||
237 | |||
238 | /* | ||
239 | * FCoE external multiple sges context $$KEEP_ENDIANNESS$$ | ||
240 | */ | ||
241 | struct fcoe_ext_mul_sges_ctx { | ||
242 | struct fcoe_mul_sges_ctx mul_sgl; | ||
243 | struct regpair rsrv0; | ||
215 | }; | 244 | }; |
216 | 245 | ||
217 | 246 | ||
218 | /* | 247 | /* |
219 | * FCoE KCQ CQE parameters | 248 | * FCP CMD payload $$KEEP_ENDIANNESS$$ |
249 | */ | ||
250 | struct fcoe_fcp_cmd_payload { | ||
251 | __le32 opaque[8]; | ||
252 | }; | ||
253 | |||
254 | |||
255 | |||
256 | |||
257 | |||
258 | /* | ||
259 | * Fcp xfr rdy payload $$KEEP_ENDIANNESS$$ | ||
260 | */ | ||
261 | struct fcoe_fcp_xfr_rdy_payload { | ||
262 | __le32 burst_len; | ||
263 | __le32 data_ro; | ||
264 | }; | ||
265 | |||
266 | |||
267 | /* | ||
268 | * FC frame $$KEEP_ENDIANNESS$$ | ||
269 | */ | ||
270 | struct fcoe_fc_frame { | ||
271 | struct fcoe_fc_hdr fc_hdr; | ||
272 | __le32 reserved0[2]; | ||
273 | }; | ||
274 | |||
275 | |||
276 | |||
277 | |||
278 | /* | ||
279 | * FCoE KCQ CQE parameters $$KEEP_ENDIANNESS$$ | ||
220 | */ | 280 | */ |
221 | union fcoe_kcqe_params { | 281 | union fcoe_kcqe_params { |
222 | u32 reserved0[4]; | 282 | __le32 reserved0[4]; |
223 | }; | 283 | }; |
224 | 284 | ||
225 | /* | 285 | /* |
226 | * FCoE KCQ CQE | 286 | * FCoE KCQ CQE $$KEEP_ENDIANNESS$$ |
227 | */ | 287 | */ |
228 | struct fcoe_kcqe { | 288 | struct fcoe_kcqe { |
229 | u32 fcoe_conn_id; | 289 | __le32 fcoe_conn_id; |
230 | u32 completion_status; | 290 | __le32 completion_status; |
231 | u32 fcoe_conn_context_id; | 291 | __le32 fcoe_conn_context_id; |
232 | union fcoe_kcqe_params params; | 292 | union fcoe_kcqe_params params; |
233 | #if defined(__BIG_ENDIAN) | 293 | __le16 qe_self_seq; |
234 | u8 flags; | ||
235 | #define FCOE_KCQE_RESERVED0 (0x7<<0) | ||
236 | #define FCOE_KCQE_RESERVED0_SHIFT 0 | ||
237 | #define FCOE_KCQE_RAMROD_COMPLETION (0x1<<3) | ||
238 | #define FCOE_KCQE_RAMROD_COMPLETION_SHIFT 3 | ||
239 | #define FCOE_KCQE_LAYER_CODE (0x7<<4) | ||
240 | #define FCOE_KCQE_LAYER_CODE_SHIFT 4 | ||
241 | #define FCOE_KCQE_LINKED_WITH_NEXT (0x1<<7) | ||
242 | #define FCOE_KCQE_LINKED_WITH_NEXT_SHIFT 7 | ||
243 | u8 op_code; | ||
244 | u16 qe_self_seq; | ||
245 | #elif defined(__LITTLE_ENDIAN) | ||
246 | u16 qe_self_seq; | ||
247 | u8 op_code; | 294 | u8 op_code; |
248 | u8 flags; | 295 | u8 flags; |
249 | #define FCOE_KCQE_RESERVED0 (0x7<<0) | 296 | #define FCOE_KCQE_RESERVED0 (0x7<<0) |
@@ -254,23 +301,14 @@ struct fcoe_kcqe { | |||
254 | #define FCOE_KCQE_LAYER_CODE_SHIFT 4 | 301 | #define FCOE_KCQE_LAYER_CODE_SHIFT 4 |
255 | #define FCOE_KCQE_LINKED_WITH_NEXT (0x1<<7) | 302 | #define FCOE_KCQE_LINKED_WITH_NEXT (0x1<<7) |
256 | #define FCOE_KCQE_LINKED_WITH_NEXT_SHIFT 7 | 303 | #define FCOE_KCQE_LINKED_WITH_NEXT_SHIFT 7 |
257 | #endif | ||
258 | }; | 304 | }; |
259 | 305 | ||
306 | |||
307 | |||
260 | /* | 308 | /* |
261 | * FCoE KWQE header | 309 | * FCoE KWQE header $$KEEP_ENDIANNESS$$ |
262 | */ | 310 | */ |
263 | struct fcoe_kwqe_header { | 311 | struct fcoe_kwqe_header { |
264 | #if defined(__BIG_ENDIAN) | ||
265 | u8 flags; | ||
266 | #define FCOE_KWQE_HEADER_RESERVED0 (0xF<<0) | ||
267 | #define FCOE_KWQE_HEADER_RESERVED0_SHIFT 0 | ||
268 | #define FCOE_KWQE_HEADER_LAYER_CODE (0x7<<4) | ||
269 | #define FCOE_KWQE_HEADER_LAYER_CODE_SHIFT 4 | ||
270 | #define FCOE_KWQE_HEADER_RESERVED1 (0x1<<7) | ||
271 | #define FCOE_KWQE_HEADER_RESERVED1_SHIFT 7 | ||
272 | u8 op_code; | ||
273 | #elif defined(__LITTLE_ENDIAN) | ||
274 | u8 op_code; | 312 | u8 op_code; |
275 | u8 flags; | 313 | u8 flags; |
276 | #define FCOE_KWQE_HEADER_RESERVED0 (0xF<<0) | 314 | #define FCOE_KWQE_HEADER_RESERVED0 (0xF<<0) |
@@ -279,50 +317,23 @@ struct fcoe_kwqe_header { | |||
279 | #define FCOE_KWQE_HEADER_LAYER_CODE_SHIFT 4 | 317 | #define FCOE_KWQE_HEADER_LAYER_CODE_SHIFT 4 |
280 | #define FCOE_KWQE_HEADER_RESERVED1 (0x1<<7) | 318 | #define FCOE_KWQE_HEADER_RESERVED1 (0x1<<7) |
281 | #define FCOE_KWQE_HEADER_RESERVED1_SHIFT 7 | 319 | #define FCOE_KWQE_HEADER_RESERVED1_SHIFT 7 |
282 | #endif | ||
283 | }; | 320 | }; |
284 | 321 | ||
285 | /* | 322 | /* |
286 | * FCoE firmware init request 1 | 323 | * FCoE firmware init request 1 $$KEEP_ENDIANNESS$$ |
287 | */ | 324 | */ |
288 | struct fcoe_kwqe_init1 { | 325 | struct fcoe_kwqe_init1 { |
289 | #if defined(__BIG_ENDIAN) | 326 | __le16 num_tasks; |
290 | struct fcoe_kwqe_header hdr; | 327 | struct fcoe_kwqe_header hdr; |
291 | u16 num_tasks; | 328 | __le32 task_list_pbl_addr_lo; |
292 | #elif defined(__LITTLE_ENDIAN) | 329 | __le32 task_list_pbl_addr_hi; |
293 | u16 num_tasks; | 330 | __le32 dummy_buffer_addr_lo; |
294 | struct fcoe_kwqe_header hdr; | 331 | __le32 dummy_buffer_addr_hi; |
295 | #endif | 332 | __le16 sq_num_wqes; |
296 | u32 task_list_pbl_addr_lo; | 333 | __le16 rq_num_wqes; |
297 | u32 task_list_pbl_addr_hi; | 334 | __le16 rq_buffer_log_size; |
298 | u32 dummy_buffer_addr_lo; | 335 | __le16 cq_num_wqes; |
299 | u32 dummy_buffer_addr_hi; | 336 | __le16 mtu; |
300 | #if defined(__BIG_ENDIAN) | ||
301 | u16 rq_num_wqes; | ||
302 | u16 sq_num_wqes; | ||
303 | #elif defined(__LITTLE_ENDIAN) | ||
304 | u16 sq_num_wqes; | ||
305 | u16 rq_num_wqes; | ||
306 | #endif | ||
307 | #if defined(__BIG_ENDIAN) | ||
308 | u16 cq_num_wqes; | ||
309 | u16 rq_buffer_log_size; | ||
310 | #elif defined(__LITTLE_ENDIAN) | ||
311 | u16 rq_buffer_log_size; | ||
312 | u16 cq_num_wqes; | ||
313 | #endif | ||
314 | #if defined(__BIG_ENDIAN) | ||
315 | u8 flags; | ||
316 | #define FCOE_KWQE_INIT1_LOG_PAGE_SIZE (0xF<<0) | ||
317 | #define FCOE_KWQE_INIT1_LOG_PAGE_SIZE_SHIFT 0 | ||
318 | #define FCOE_KWQE_INIT1_LOG_CACHED_PBES_PER_FUNC (0x7<<4) | ||
319 | #define FCOE_KWQE_INIT1_LOG_CACHED_PBES_PER_FUNC_SHIFT 4 | ||
320 | #define FCOE_KWQE_INIT1_RESERVED1 (0x1<<7) | ||
321 | #define FCOE_KWQE_INIT1_RESERVED1_SHIFT 7 | ||
322 | u8 num_sessions_log; | ||
323 | u16 mtu; | ||
324 | #elif defined(__LITTLE_ENDIAN) | ||
325 | u16 mtu; | ||
326 | u8 num_sessions_log; | 337 | u8 num_sessions_log; |
327 | u8 flags; | 338 | u8 flags; |
328 | #define FCOE_KWQE_INIT1_LOG_PAGE_SIZE (0xF<<0) | 339 | #define FCOE_KWQE_INIT1_LOG_PAGE_SIZE (0xF<<0) |
@@ -331,113 +342,73 @@ struct fcoe_kwqe_init1 { | |||
331 | #define FCOE_KWQE_INIT1_LOG_CACHED_PBES_PER_FUNC_SHIFT 4 | 342 | #define FCOE_KWQE_INIT1_LOG_CACHED_PBES_PER_FUNC_SHIFT 4 |
332 | #define FCOE_KWQE_INIT1_RESERVED1 (0x1<<7) | 343 | #define FCOE_KWQE_INIT1_RESERVED1 (0x1<<7) |
333 | #define FCOE_KWQE_INIT1_RESERVED1_SHIFT 7 | 344 | #define FCOE_KWQE_INIT1_RESERVED1_SHIFT 7 |
334 | #endif | ||
335 | }; | 345 | }; |
336 | 346 | ||
337 | /* | 347 | /* |
338 | * FCoE firmware init request 2 | 348 | * FCoE firmware init request 2 $$KEEP_ENDIANNESS$$ |
339 | */ | 349 | */ |
340 | struct fcoe_kwqe_init2 { | 350 | struct fcoe_kwqe_init2 { |
341 | #if defined(__BIG_ENDIAN) | 351 | u8 hsi_major_version; |
342 | struct fcoe_kwqe_header hdr; | 352 | u8 hsi_minor_version; |
343 | u16 reserved0; | ||
344 | #elif defined(__LITTLE_ENDIAN) | ||
345 | u16 reserved0; | ||
346 | struct fcoe_kwqe_header hdr; | 353 | struct fcoe_kwqe_header hdr; |
347 | #endif | 354 | __le32 hash_tbl_pbl_addr_lo; |
348 | u32 hash_tbl_pbl_addr_lo; | 355 | __le32 hash_tbl_pbl_addr_hi; |
349 | u32 hash_tbl_pbl_addr_hi; | 356 | __le32 t2_hash_tbl_addr_lo; |
350 | u32 t2_hash_tbl_addr_lo; | 357 | __le32 t2_hash_tbl_addr_hi; |
351 | u32 t2_hash_tbl_addr_hi; | 358 | __le32 t2_ptr_hash_tbl_addr_lo; |
352 | u32 t2_ptr_hash_tbl_addr_lo; | 359 | __le32 t2_ptr_hash_tbl_addr_hi; |
353 | u32 t2_ptr_hash_tbl_addr_hi; | 360 | __le32 free_list_count; |
354 | u32 free_list_count; | ||
355 | }; | 361 | }; |
356 | 362 | ||
357 | /* | 363 | /* |
358 | * FCoE firmware init request 3 | 364 | * FCoE firmware init request 3 $$KEEP_ENDIANNESS$$ |
359 | */ | 365 | */ |
360 | struct fcoe_kwqe_init3 { | 366 | struct fcoe_kwqe_init3 { |
361 | #if defined(__BIG_ENDIAN) | 367 | __le16 reserved0; |
362 | struct fcoe_kwqe_header hdr; | ||
363 | u16 reserved0; | ||
364 | #elif defined(__LITTLE_ENDIAN) | ||
365 | u16 reserved0; | ||
366 | struct fcoe_kwqe_header hdr; | 368 | struct fcoe_kwqe_header hdr; |
367 | #endif | 369 | __le32 error_bit_map_lo; |
368 | u32 error_bit_map_lo; | 370 | __le32 error_bit_map_hi; |
369 | u32 error_bit_map_hi; | 371 | u8 perf_config; |
370 | #if defined(__BIG_ENDIAN) | ||
371 | u8 reserved21[3]; | ||
372 | u8 cached_session_enable; | ||
373 | #elif defined(__LITTLE_ENDIAN) | ||
374 | u8 cached_session_enable; | ||
375 | u8 reserved21[3]; | 372 | u8 reserved21[3]; |
376 | #endif | 373 | __le32 reserved2[4]; |
377 | u32 reserved2[4]; | ||
378 | }; | 374 | }; |
379 | 375 | ||
380 | /* | 376 | /* |
381 | * FCoE connection offload request 1 | 377 | * FCoE connection offload request 1 $$KEEP_ENDIANNESS$$ |
382 | */ | 378 | */ |
383 | struct fcoe_kwqe_conn_offload1 { | 379 | struct fcoe_kwqe_conn_offload1 { |
384 | #if defined(__BIG_ENDIAN) | 380 | __le16 fcoe_conn_id; |
385 | struct fcoe_kwqe_header hdr; | 381 | struct fcoe_kwqe_header hdr; |
386 | u16 fcoe_conn_id; | 382 | __le32 sq_addr_lo; |
387 | #elif defined(__LITTLE_ENDIAN) | 383 | __le32 sq_addr_hi; |
388 | u16 fcoe_conn_id; | 384 | __le32 rq_pbl_addr_lo; |
389 | struct fcoe_kwqe_header hdr; | 385 | __le32 rq_pbl_addr_hi; |
390 | #endif | 386 | __le32 rq_first_pbe_addr_lo; |
391 | u32 sq_addr_lo; | 387 | __le32 rq_first_pbe_addr_hi; |
392 | u32 sq_addr_hi; | 388 | __le16 rq_prod; |
393 | u32 rq_pbl_addr_lo; | 389 | __le16 reserved0; |
394 | u32 rq_pbl_addr_hi; | ||
395 | u32 rq_first_pbe_addr_lo; | ||
396 | u32 rq_first_pbe_addr_hi; | ||
397 | #if defined(__BIG_ENDIAN) | ||
398 | u16 reserved0; | ||
399 | u16 rq_prod; | ||
400 | #elif defined(__LITTLE_ENDIAN) | ||
401 | u16 rq_prod; | ||
402 | u16 reserved0; | ||
403 | #endif | ||
404 | }; | 390 | }; |
405 | 391 | ||
406 | /* | 392 | /* |
407 | * FCoE connection offload request 2 | 393 | * FCoE connection offload request 2 $$KEEP_ENDIANNESS$$ |
408 | */ | 394 | */ |
409 | struct fcoe_kwqe_conn_offload2 { | 395 | struct fcoe_kwqe_conn_offload2 { |
410 | #if defined(__BIG_ENDIAN) | 396 | __le16 tx_max_fc_pay_len; |
411 | struct fcoe_kwqe_header hdr; | ||
412 | u16 tx_max_fc_pay_len; | ||
413 | #elif defined(__LITTLE_ENDIAN) | ||
414 | u16 tx_max_fc_pay_len; | ||
415 | struct fcoe_kwqe_header hdr; | 397 | struct fcoe_kwqe_header hdr; |
416 | #endif | 398 | __le32 cq_addr_lo; |
417 | u32 cq_addr_lo; | 399 | __le32 cq_addr_hi; |
418 | u32 cq_addr_hi; | 400 | __le32 xferq_addr_lo; |
419 | u32 xferq_addr_lo; | 401 | __le32 xferq_addr_hi; |
420 | u32 xferq_addr_hi; | 402 | __le32 conn_db_addr_lo; |
421 | u32 conn_db_addr_lo; | 403 | __le32 conn_db_addr_hi; |
422 | u32 conn_db_addr_hi; | 404 | __le32 reserved1; |
423 | u32 reserved1; | ||
424 | }; | 405 | }; |
425 | 406 | ||
426 | /* | 407 | /* |
427 | * FCoE connection offload request 3 | 408 | * FCoE connection offload request 3 $$KEEP_ENDIANNESS$$ |
428 | */ | 409 | */ |
429 | struct fcoe_kwqe_conn_offload3 { | 410 | struct fcoe_kwqe_conn_offload3 { |
430 | #if defined(__BIG_ENDIAN) | 411 | __le16 vlan_tag; |
431 | struct fcoe_kwqe_header hdr; | ||
432 | u16 vlan_tag; | ||
433 | #define FCOE_KWQE_CONN_OFFLOAD3_VLAN_ID (0xFFF<<0) | ||
434 | #define FCOE_KWQE_CONN_OFFLOAD3_VLAN_ID_SHIFT 0 | ||
435 | #define FCOE_KWQE_CONN_OFFLOAD3_CFI (0x1<<12) | ||
436 | #define FCOE_KWQE_CONN_OFFLOAD3_CFI_SHIFT 12 | ||
437 | #define FCOE_KWQE_CONN_OFFLOAD3_PRIORITY (0x7<<13) | ||
438 | #define FCOE_KWQE_CONN_OFFLOAD3_PRIORITY_SHIFT 13 | ||
439 | #elif defined(__LITTLE_ENDIAN) | ||
440 | u16 vlan_tag; | ||
441 | #define FCOE_KWQE_CONN_OFFLOAD3_VLAN_ID (0xFFF<<0) | 412 | #define FCOE_KWQE_CONN_OFFLOAD3_VLAN_ID (0xFFF<<0) |
442 | #define FCOE_KWQE_CONN_OFFLOAD3_VLAN_ID_SHIFT 0 | 413 | #define FCOE_KWQE_CONN_OFFLOAD3_VLAN_ID_SHIFT 0 |
443 | #define FCOE_KWQE_CONN_OFFLOAD3_CFI (0x1<<12) | 414 | #define FCOE_KWQE_CONN_OFFLOAD3_CFI (0x1<<12) |
@@ -445,34 +416,8 @@ struct fcoe_kwqe_conn_offload3 { | |||
445 | #define FCOE_KWQE_CONN_OFFLOAD3_PRIORITY (0x7<<13) | 416 | #define FCOE_KWQE_CONN_OFFLOAD3_PRIORITY (0x7<<13) |
446 | #define FCOE_KWQE_CONN_OFFLOAD3_PRIORITY_SHIFT 13 | 417 | #define FCOE_KWQE_CONN_OFFLOAD3_PRIORITY_SHIFT 13 |
447 | struct fcoe_kwqe_header hdr; | 418 | struct fcoe_kwqe_header hdr; |
448 | #endif | ||
449 | #if defined(__BIG_ENDIAN) | ||
450 | u8 tx_max_conc_seqs_c3; | ||
451 | u8 s_id[3]; | ||
452 | #elif defined(__LITTLE_ENDIAN) | ||
453 | u8 s_id[3]; | 419 | u8 s_id[3]; |
454 | u8 tx_max_conc_seqs_c3; | 420 | u8 tx_max_conc_seqs_c3; |
455 | #endif | ||
456 | #if defined(__BIG_ENDIAN) | ||
457 | u8 flags; | ||
458 | #define FCOE_KWQE_CONN_OFFLOAD3_B_MUL_N_PORT_IDS (0x1<<0) | ||
459 | #define FCOE_KWQE_CONN_OFFLOAD3_B_MUL_N_PORT_IDS_SHIFT 0 | ||
460 | #define FCOE_KWQE_CONN_OFFLOAD3_B_E_D_TOV_RES (0x1<<1) | ||
461 | #define FCOE_KWQE_CONN_OFFLOAD3_B_E_D_TOV_RES_SHIFT 1 | ||
462 | #define FCOE_KWQE_CONN_OFFLOAD3_B_CONT_INCR_SEQ_CNT (0x1<<2) | ||
463 | #define FCOE_KWQE_CONN_OFFLOAD3_B_CONT_INCR_SEQ_CNT_SHIFT 2 | ||
464 | #define FCOE_KWQE_CONN_OFFLOAD3_B_CONF_REQ (0x1<<3) | ||
465 | #define FCOE_KWQE_CONN_OFFLOAD3_B_CONF_REQ_SHIFT 3 | ||
466 | #define FCOE_KWQE_CONN_OFFLOAD3_B_REC_VALID (0x1<<4) | ||
467 | #define FCOE_KWQE_CONN_OFFLOAD3_B_REC_VALID_SHIFT 4 | ||
468 | #define FCOE_KWQE_CONN_OFFLOAD3_B_C2_VALID (0x1<<5) | ||
469 | #define FCOE_KWQE_CONN_OFFLOAD3_B_C2_VALID_SHIFT 5 | ||
470 | #define FCOE_KWQE_CONN_OFFLOAD3_B_ACK_0 (0x1<<6) | ||
471 | #define FCOE_KWQE_CONN_OFFLOAD3_B_ACK_0_SHIFT 6 | ||
472 | #define FCOE_KWQE_CONN_OFFLOAD3_B_VLAN_FLAG (0x1<<7) | ||
473 | #define FCOE_KWQE_CONN_OFFLOAD3_B_VLAN_FLAG_SHIFT 7 | ||
474 | u8 d_id[3]; | ||
475 | #elif defined(__LITTLE_ENDIAN) | ||
476 | u8 d_id[3]; | 421 | u8 d_id[3]; |
477 | u8 flags; | 422 | u8 flags; |
478 | #define FCOE_KWQE_CONN_OFFLOAD3_B_MUL_N_PORT_IDS (0x1<<0) | 423 | #define FCOE_KWQE_CONN_OFFLOAD3_B_MUL_N_PORT_IDS (0x1<<0) |
@@ -491,79 +436,44 @@ struct fcoe_kwqe_conn_offload3 { | |||
491 | #define FCOE_KWQE_CONN_OFFLOAD3_B_ACK_0_SHIFT 6 | 436 | #define FCOE_KWQE_CONN_OFFLOAD3_B_ACK_0_SHIFT 6 |
492 | #define FCOE_KWQE_CONN_OFFLOAD3_B_VLAN_FLAG (0x1<<7) | 437 | #define FCOE_KWQE_CONN_OFFLOAD3_B_VLAN_FLAG (0x1<<7) |
493 | #define FCOE_KWQE_CONN_OFFLOAD3_B_VLAN_FLAG_SHIFT 7 | 438 | #define FCOE_KWQE_CONN_OFFLOAD3_B_VLAN_FLAG_SHIFT 7 |
494 | #endif | 439 | __le32 reserved; |
495 | u32 reserved; | 440 | __le32 confq_first_pbe_addr_lo; |
496 | u32 confq_first_pbe_addr_lo; | 441 | __le32 confq_first_pbe_addr_hi; |
497 | u32 confq_first_pbe_addr_hi; | 442 | __le16 tx_total_conc_seqs; |
498 | #if defined(__BIG_ENDIAN) | 443 | __le16 rx_max_fc_pay_len; |
499 | u16 rx_max_fc_pay_len; | 444 | __le16 rx_total_conc_seqs; |
500 | u16 tx_total_conc_seqs; | ||
501 | #elif defined(__LITTLE_ENDIAN) | ||
502 | u16 tx_total_conc_seqs; | ||
503 | u16 rx_max_fc_pay_len; | ||
504 | #endif | ||
505 | #if defined(__BIG_ENDIAN) | ||
506 | u8 rx_open_seqs_exch_c3; | ||
507 | u8 rx_max_conc_seqs_c3; | ||
508 | u16 rx_total_conc_seqs; | ||
509 | #elif defined(__LITTLE_ENDIAN) | ||
510 | u16 rx_total_conc_seqs; | ||
511 | u8 rx_max_conc_seqs_c3; | 445 | u8 rx_max_conc_seqs_c3; |
512 | u8 rx_open_seqs_exch_c3; | 446 | u8 rx_open_seqs_exch_c3; |
513 | #endif | ||
514 | }; | 447 | }; |
515 | 448 | ||
516 | /* | 449 | /* |
517 | * FCoE connection offload request 4 | 450 | * FCoE connection offload request 4 $$KEEP_ENDIANNESS$$ |
518 | */ | 451 | */ |
519 | struct fcoe_kwqe_conn_offload4 { | 452 | struct fcoe_kwqe_conn_offload4 { |
520 | #if defined(__BIG_ENDIAN) | ||
521 | struct fcoe_kwqe_header hdr; | ||
522 | u8 reserved2; | ||
523 | u8 e_d_tov_timer_val; | ||
524 | #elif defined(__LITTLE_ENDIAN) | ||
525 | u8 e_d_tov_timer_val; | 453 | u8 e_d_tov_timer_val; |
526 | u8 reserved2; | 454 | u8 reserved2; |
527 | struct fcoe_kwqe_header hdr; | 455 | struct fcoe_kwqe_header hdr; |
528 | #endif | 456 | u8 src_mac_addr_lo[2]; |
529 | u8 src_mac_addr_lo32[4]; | 457 | u8 src_mac_addr_mid[2]; |
530 | #if defined(__BIG_ENDIAN) | 458 | u8 src_mac_addr_hi[2]; |
531 | u8 dst_mac_addr_hi16[2]; | 459 | u8 dst_mac_addr_hi[2]; |
532 | u8 src_mac_addr_hi16[2]; | 460 | u8 dst_mac_addr_lo[2]; |
533 | #elif defined(__LITTLE_ENDIAN) | 461 | u8 dst_mac_addr_mid[2]; |
534 | u8 src_mac_addr_hi16[2]; | 462 | __le32 lcq_addr_lo; |
535 | u8 dst_mac_addr_hi16[2]; | 463 | __le32 lcq_addr_hi; |
536 | #endif | 464 | __le32 confq_pbl_base_addr_lo; |
537 | u8 dst_mac_addr_lo32[4]; | 465 | __le32 confq_pbl_base_addr_hi; |
538 | u32 lcq_addr_lo; | ||
539 | u32 lcq_addr_hi; | ||
540 | u32 confq_pbl_base_addr_lo; | ||
541 | u32 confq_pbl_base_addr_hi; | ||
542 | }; | 466 | }; |
543 | 467 | ||
544 | /* | 468 | /* |
545 | * FCoE connection enable request | 469 | * FCoE connection enable request $$KEEP_ENDIANNESS$$ |
546 | */ | 470 | */ |
547 | struct fcoe_kwqe_conn_enable_disable { | 471 | struct fcoe_kwqe_conn_enable_disable { |
548 | #if defined(__BIG_ENDIAN) | 472 | __le16 reserved0; |
549 | struct fcoe_kwqe_header hdr; | ||
550 | u16 reserved0; | ||
551 | #elif defined(__LITTLE_ENDIAN) | ||
552 | u16 reserved0; | ||
553 | struct fcoe_kwqe_header hdr; | 473 | struct fcoe_kwqe_header hdr; |
554 | #endif | 474 | u8 src_mac_addr_lo[2]; |
555 | u8 src_mac_addr_lo32[4]; | 475 | u8 src_mac_addr_mid[2]; |
556 | #if defined(__BIG_ENDIAN) | 476 | u8 src_mac_addr_hi[2]; |
557 | u16 vlan_tag; | ||
558 | #define FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID (0xFFF<<0) | ||
559 | #define FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID_SHIFT 0 | ||
560 | #define FCOE_KWQE_CONN_ENABLE_DISABLE_CFI (0x1<<12) | ||
561 | #define FCOE_KWQE_CONN_ENABLE_DISABLE_CFI_SHIFT 12 | ||
562 | #define FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY (0x7<<13) | ||
563 | #define FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY_SHIFT 13 | ||
564 | u8 src_mac_addr_hi16[2]; | ||
565 | #elif defined(__LITTLE_ENDIAN) | ||
566 | u8 src_mac_addr_hi16[2]; | ||
567 | u16 vlan_tag; | 477 | u16 vlan_tag; |
568 | #define FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID (0xFFF<<0) | 478 | #define FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID (0xFFF<<0) |
569 | #define FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID_SHIFT 0 | 479 | #define FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID_SHIFT 0 |
@@ -571,82 +481,52 @@ struct fcoe_kwqe_conn_enable_disable { | |||
571 | #define FCOE_KWQE_CONN_ENABLE_DISABLE_CFI_SHIFT 12 | 481 | #define FCOE_KWQE_CONN_ENABLE_DISABLE_CFI_SHIFT 12 |
572 | #define FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY (0x7<<13) | 482 | #define FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY (0x7<<13) |
573 | #define FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY_SHIFT 13 | 483 | #define FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY_SHIFT 13 |
574 | #endif | 484 | u8 dst_mac_addr_lo[2]; |
575 | u8 dst_mac_addr_lo32[4]; | 485 | u8 dst_mac_addr_mid[2]; |
576 | #if defined(__BIG_ENDIAN) | 486 | u8 dst_mac_addr_hi[2]; |
577 | u16 reserved1; | 487 | __le16 reserved1; |
578 | u8 dst_mac_addr_hi16[2]; | ||
579 | #elif defined(__LITTLE_ENDIAN) | ||
580 | u8 dst_mac_addr_hi16[2]; | ||
581 | u16 reserved1; | ||
582 | #endif | ||
583 | #if defined(__BIG_ENDIAN) | ||
584 | u8 vlan_flag; | ||
585 | u8 s_id[3]; | ||
586 | #elif defined(__LITTLE_ENDIAN) | ||
587 | u8 s_id[3]; | 488 | u8 s_id[3]; |
588 | u8 vlan_flag; | 489 | u8 vlan_flag; |
589 | #endif | ||
590 | #if defined(__BIG_ENDIAN) | ||
591 | u8 reserved3; | ||
592 | u8 d_id[3]; | ||
593 | #elif defined(__LITTLE_ENDIAN) | ||
594 | u8 d_id[3]; | 490 | u8 d_id[3]; |
595 | u8 reserved3; | 491 | u8 reserved3; |
596 | #endif | 492 | __le32 context_id; |
597 | u32 context_id; | 493 | __le32 conn_id; |
598 | u32 conn_id; | 494 | __le32 reserved4; |
599 | u32 reserved4; | ||
600 | }; | 495 | }; |
601 | 496 | ||
602 | /* | 497 | /* |
603 | * FCoE connection destroy request | 498 | * FCoE connection destroy request $$KEEP_ENDIANNESS$$ |
604 | */ | 499 | */ |
605 | struct fcoe_kwqe_conn_destroy { | 500 | struct fcoe_kwqe_conn_destroy { |
606 | #if defined(__BIG_ENDIAN) | 501 | __le16 reserved0; |
607 | struct fcoe_kwqe_header hdr; | ||
608 | u16 reserved0; | ||
609 | #elif defined(__LITTLE_ENDIAN) | ||
610 | u16 reserved0; | ||
611 | struct fcoe_kwqe_header hdr; | 502 | struct fcoe_kwqe_header hdr; |
612 | #endif | 503 | __le32 context_id; |
613 | u32 context_id; | 504 | __le32 conn_id; |
614 | u32 conn_id; | 505 | __le32 reserved1[5]; |
615 | u32 reserved1[5]; | ||
616 | }; | 506 | }; |
617 | 507 | ||
618 | /* | 508 | /* |
619 | * FCoe destroy request | 509 | * FCoe destroy request $$KEEP_ENDIANNESS$$ |
620 | */ | 510 | */ |
621 | struct fcoe_kwqe_destroy { | 511 | struct fcoe_kwqe_destroy { |
622 | #if defined(__BIG_ENDIAN) | 512 | __le16 reserved0; |
623 | struct fcoe_kwqe_header hdr; | ||
624 | u16 reserved0; | ||
625 | #elif defined(__LITTLE_ENDIAN) | ||
626 | u16 reserved0; | ||
627 | struct fcoe_kwqe_header hdr; | 513 | struct fcoe_kwqe_header hdr; |
628 | #endif | 514 | __le32 reserved1[7]; |
629 | u32 reserved1[7]; | ||
630 | }; | 515 | }; |
631 | 516 | ||
632 | /* | 517 | /* |
633 | * FCoe statistics request | 518 | * FCoe statistics request $$KEEP_ENDIANNESS$$ |
634 | */ | 519 | */ |
635 | struct fcoe_kwqe_stat { | 520 | struct fcoe_kwqe_stat { |
636 | #if defined(__BIG_ENDIAN) | 521 | __le16 reserved0; |
637 | struct fcoe_kwqe_header hdr; | 522 | struct fcoe_kwqe_header hdr; |
638 | u16 reserved0; | 523 | __le32 stat_params_addr_lo; |
639 | #elif defined(__LITTLE_ENDIAN) | 524 | __le32 stat_params_addr_hi; |
640 | u16 reserved0; | 525 | __le32 reserved1[5]; |
641 | struct fcoe_kwqe_header hdr; | ||
642 | #endif | ||
643 | u32 stat_params_addr_lo; | ||
644 | u32 stat_params_addr_hi; | ||
645 | u32 reserved1[5]; | ||
646 | }; | 526 | }; |
647 | 527 | ||
648 | /* | 528 | /* |
649 | * FCoE KWQ WQE | 529 | * FCoE KWQ WQE $$KEEP_ENDIANNESS$$ |
650 | */ | 530 | */ |
651 | union fcoe_kwqe { | 531 | union fcoe_kwqe { |
652 | struct fcoe_kwqe_init1 init1; | 532 | struct fcoe_kwqe_init1 init1; |
@@ -662,19 +542,42 @@ union fcoe_kwqe { | |||
662 | struct fcoe_kwqe_stat statistics; | 542 | struct fcoe_kwqe_stat statistics; |
663 | }; | 543 | }; |
664 | 544 | ||
665 | struct fcoe_mul_sges_ctx { | 545 | |
666 | struct regpair cur_sge_addr; | 546 | |
667 | #if defined(__BIG_ENDIAN) | 547 | |
668 | u8 sgl_size; | 548 | |
669 | u8 cur_sge_idx; | 549 | |
670 | u16 cur_sge_off; | 550 | |
671 | #elif defined(__LITTLE_ENDIAN) | 551 | |
672 | u16 cur_sge_off; | 552 | |
673 | u8 cur_sge_idx; | 553 | |
674 | u8 sgl_size; | 554 | |
675 | #endif | 555 | |
556 | |||
557 | |||
558 | |||
559 | |||
560 | /* | ||
561 | * TX SGL context $$KEEP_ENDIANNESS$$ | ||
562 | */ | ||
563 | union fcoe_sgl_union_ctx { | ||
564 | struct fcoe_cached_sge_ctx cached_sge; | ||
565 | struct fcoe_ext_mul_sges_ctx sgl; | ||
566 | __le32 opaque[5]; | ||
676 | }; | 567 | }; |
677 | 568 | ||
569 | /* | ||
570 | * Data-In/ELS/BLS information $$KEEP_ENDIANNESS$$ | ||
571 | */ | ||
572 | struct fcoe_read_flow_info { | ||
573 | union fcoe_sgl_union_ctx sgl_ctx; | ||
574 | __le32 rsrv0[3]; | ||
575 | }; | ||
576 | |||
577 | |||
578 | /* | ||
579 | * Fcoe stat context $$KEEP_ENDIANNESS$$ | ||
580 | */ | ||
678 | struct fcoe_s_stat_ctx { | 581 | struct fcoe_s_stat_ctx { |
679 | u8 flags; | 582 | u8 flags; |
680 | #define FCOE_S_STAT_CTX_ACTIVE (0x1<<0) | 583 | #define FCOE_S_STAT_CTX_ACTIVE (0x1<<0) |
@@ -693,51 +596,34 @@ struct fcoe_s_stat_ctx { | |||
693 | #define FCOE_S_STAT_CTX_RSRV1_SHIFT 6 | 596 | #define FCOE_S_STAT_CTX_RSRV1_SHIFT 6 |
694 | }; | 597 | }; |
695 | 598 | ||
696 | struct fcoe_seq_ctx { | 599 | /* |
697 | #if defined(__BIG_ENDIAN) | 600 | * Fcoe rx seq context $$KEEP_ENDIANNESS$$ |
698 | u16 low_seq_cnt; | 601 | */ |
699 | struct fcoe_s_stat_ctx s_stat; | 602 | struct fcoe_rx_seq_ctx { |
700 | u8 seq_id; | ||
701 | #elif defined(__LITTLE_ENDIAN) | ||
702 | u8 seq_id; | 603 | u8 seq_id; |
703 | struct fcoe_s_stat_ctx s_stat; | 604 | struct fcoe_s_stat_ctx s_stat; |
704 | u16 low_seq_cnt; | 605 | __le16 seq_cnt; |
705 | #endif | 606 | __le32 low_exp_ro; |
706 | #if defined(__BIG_ENDIAN) | 607 | __le32 high_exp_ro; |
707 | u16 err_seq_cnt; | ||
708 | u16 high_seq_cnt; | ||
709 | #elif defined(__LITTLE_ENDIAN) | ||
710 | u16 high_seq_cnt; | ||
711 | u16 err_seq_cnt; | ||
712 | #endif | ||
713 | u32 low_exp_ro; | ||
714 | u32 high_exp_ro; | ||
715 | }; | 608 | }; |
716 | 609 | ||
717 | 610 | ||
718 | struct fcoe_single_sge_ctx { | 611 | /* |
719 | struct regpair cur_buf_addr; | 612 | * Fcoe rx_wr union context $$KEEP_ENDIANNESS$$ |
720 | #if defined(__BIG_ENDIAN) | 613 | */ |
721 | u16 reserved0; | 614 | union fcoe_rx_wr_union_ctx { |
722 | u16 cur_buf_rem; | 615 | struct fcoe_read_flow_info read_info; |
723 | #elif defined(__LITTLE_ENDIAN) | 616 | union fcoe_comp_flow_info comp_info; |
724 | u16 cur_buf_rem; | 617 | __le32 opaque[8]; |
725 | u16 reserved0; | ||
726 | #endif | ||
727 | }; | ||
728 | |||
729 | union fcoe_sgl_ctx { | ||
730 | struct fcoe_single_sge_ctx single_sge; | ||
731 | struct fcoe_mul_sges_ctx mul_sges; | ||
732 | }; | 618 | }; |
733 | 619 | ||
734 | 620 | ||
735 | 621 | ||
736 | /* | 622 | /* |
737 | * FCoE SQ element | 623 | * FCoE SQ element $$KEEP_ENDIANNESS$$ |
738 | */ | 624 | */ |
739 | struct fcoe_sqe { | 625 | struct fcoe_sqe { |
740 | u16 wqe; | 626 | __le16 wqe; |
741 | #define FCOE_SQE_TASK_ID (0x7FFF<<0) | 627 | #define FCOE_SQE_TASK_ID (0x7FFF<<0) |
742 | #define FCOE_SQE_TASK_ID_SHIFT 0 | 628 | #define FCOE_SQE_TASK_ID_SHIFT 0 |
743 | #define FCOE_SQE_TOGGLE_BIT (0x1<<15) | 629 | #define FCOE_SQE_TOGGLE_BIT (0x1<<15) |
@@ -746,135 +632,141 @@ struct fcoe_sqe { | |||
746 | 632 | ||
747 | 633 | ||
748 | 634 | ||
749 | struct fcoe_task_ctx_entry_tx_only { | 635 | /* |
750 | union fcoe_sgl_ctx sgl_ctx; | 636 | * 14 regs $$KEEP_ENDIANNESS$$ |
637 | */ | ||
638 | struct fcoe_tce_tx_only { | ||
639 | union fcoe_sgl_union_ctx sgl_ctx; | ||
640 | __le32 rsrv0; | ||
751 | }; | 641 | }; |
752 | 642 | ||
753 | struct fcoe_task_ctx_entry_txwr_rxrd { | 643 | /* |
754 | #if defined(__BIG_ENDIAN) | 644 | * 32 bytes (8 regs) used for TX only purposes $$KEEP_ENDIANNESS$$ |
755 | u16 verify_tx_seq; | 645 | */ |
646 | union fcoe_tx_wr_rx_rd_union_ctx { | ||
647 | struct fcoe_fc_frame tx_frame; | ||
648 | struct fcoe_fcp_cmd_payload fcp_cmd; | ||
649 | struct fcoe_ext_cleanup_info cleanup; | ||
650 | struct fcoe_ext_abts_info abts; | ||
651 | struct fcoe_ext_fw_tx_seq_ctx tx_seq; | ||
652 | __le32 opaque[8]; | ||
653 | }; | ||
654 | |||
655 | /* | ||
656 | * tce_tx_wr_rx_rd_const $$KEEP_ENDIANNESS$$ | ||
657 | */ | ||
658 | struct fcoe_tce_tx_wr_rx_rd_const { | ||
756 | u8 init_flags; | 659 | u8 init_flags; |
757 | #define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TASK_TYPE (0x7<<0) | 660 | #define FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE (0x7<<0) |
758 | #define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TASK_TYPE_SHIFT 0 | 661 | #define FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT 0 |
759 | #define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_DEV_TYPE (0x1<<3) | 662 | #define FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE (0x1<<3) |
760 | #define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_DEV_TYPE_SHIFT 3 | 663 | #define FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT 3 |
761 | #define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_CLASS_TYPE (0x1<<4) | 664 | #define FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE (0x1<<4) |
762 | #define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_CLASS_TYPE_SHIFT 4 | 665 | #define FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE_SHIFT 4 |
763 | #define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_SINGLE_SGE (0x1<<5) | 666 | #define FCOE_TCE_TX_WR_RX_RD_CONST_CACHED_SGE (0x3<<5) |
764 | #define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_SINGLE_SGE_SHIFT 5 | 667 | #define FCOE_TCE_TX_WR_RX_RD_CONST_CACHED_SGE_SHIFT 5 |
765 | #define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_RSRV5 (0x3<<6) | 668 | #define FCOE_TCE_TX_WR_RX_RD_CONST_SUPPORT_REC_TOV (0x1<<7) |
766 | #define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_RSRV5_SHIFT 6 | 669 | #define FCOE_TCE_TX_WR_RX_RD_CONST_SUPPORT_REC_TOV_SHIFT 7 |
767 | u8 tx_flags; | ||
768 | #define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TX_STATE (0xF<<0) | ||
769 | #define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TX_STATE_SHIFT 0 | ||
770 | #define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_RSRV4 (0xF<<4) | ||
771 | #define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_RSRV4_SHIFT 4 | ||
772 | #elif defined(__LITTLE_ENDIAN) | ||
773 | u8 tx_flags; | 670 | u8 tx_flags; |
774 | #define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TX_STATE (0xF<<0) | 671 | #define FCOE_TCE_TX_WR_RX_RD_CONST_TX_VALID (0x1<<0) |
775 | #define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TX_STATE_SHIFT 0 | 672 | #define FCOE_TCE_TX_WR_RX_RD_CONST_TX_VALID_SHIFT 0 |
776 | #define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_RSRV4 (0xF<<4) | 673 | #define FCOE_TCE_TX_WR_RX_RD_CONST_TX_STATE (0xF<<1) |
777 | #define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_RSRV4_SHIFT 4 | 674 | #define FCOE_TCE_TX_WR_RX_RD_CONST_TX_STATE_SHIFT 1 |
778 | u8 init_flags; | 675 | #define FCOE_TCE_TX_WR_RX_RD_CONST_RSRV1 (0x1<<5) |
779 | #define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TASK_TYPE (0x7<<0) | 676 | #define FCOE_TCE_TX_WR_RX_RD_CONST_RSRV1_SHIFT 5 |
780 | #define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TASK_TYPE_SHIFT 0 | 677 | #define FCOE_TCE_TX_WR_RX_RD_CONST_TX_SEQ_INIT (0x1<<6) |
781 | #define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_DEV_TYPE (0x1<<3) | 678 | #define FCOE_TCE_TX_WR_RX_RD_CONST_TX_SEQ_INIT_SHIFT 6 |
782 | #define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_DEV_TYPE_SHIFT 3 | 679 | #define FCOE_TCE_TX_WR_RX_RD_CONST_RSRV2 (0x1<<7) |
783 | #define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_CLASS_TYPE (0x1<<4) | 680 | #define FCOE_TCE_TX_WR_RX_RD_CONST_RSRV2_SHIFT 7 |
784 | #define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_CLASS_TYPE_SHIFT 4 | 681 | __le16 rsrv3; |
785 | #define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_SINGLE_SGE (0x1<<5) | 682 | __le32 verify_tx_seq; |
786 | #define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_SINGLE_SGE_SHIFT 5 | ||
787 | #define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_RSRV5 (0x3<<6) | ||
788 | #define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_RSRV5_SHIFT 6 | ||
789 | u16 verify_tx_seq; | ||
790 | #endif | ||
791 | }; | 683 | }; |
792 | 684 | ||
793 | /* | 685 | /* |
794 | * Common section. Both TX and RX processing might write and read from it in | 686 | * tce_tx_wr_rx_rd $$KEEP_ENDIANNESS$$ |
795 | * different flows | ||
796 | */ | 687 | */ |
797 | struct fcoe_task_ctx_entry_tx_rx_cmn { | 688 | struct fcoe_tce_tx_wr_rx_rd { |
798 | u32 data_2_trns; | 689 | union fcoe_tx_wr_rx_rd_union_ctx union_ctx; |
799 | union fcoe_general_task_ctx general; | 690 | struct fcoe_tce_tx_wr_rx_rd_const const_ctx; |
800 | #if defined(__BIG_ENDIAN) | ||
801 | u16 tx_low_seq_cnt; | ||
802 | struct fcoe_s_stat_ctx tx_s_stat; | ||
803 | u8 tx_seq_id; | ||
804 | #elif defined(__LITTLE_ENDIAN) | ||
805 | u8 tx_seq_id; | ||
806 | struct fcoe_s_stat_ctx tx_s_stat; | ||
807 | u16 tx_low_seq_cnt; | ||
808 | #endif | ||
809 | u32 common_flags; | ||
810 | #define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_CID (0xFFFFFF<<0) | ||
811 | #define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_CID_SHIFT 0 | ||
812 | #define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_VALID (0x1<<24) | ||
813 | #define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_VALID_SHIFT 24 | ||
814 | #define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_SEQ_INIT (0x1<<25) | ||
815 | #define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_SEQ_INIT_SHIFT 25 | ||
816 | #define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_PEND_XFER (0x1<<26) | ||
817 | #define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_PEND_XFER_SHIFT 26 | ||
818 | #define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_PEND_CONF (0x1<<27) | ||
819 | #define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_PEND_CONF_SHIFT 27 | ||
820 | #define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_EXP_FIRST_FRAME (0x1<<28) | ||
821 | #define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_EXP_FIRST_FRAME_SHIFT 28 | ||
822 | #define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_RSRV (0x7<<29) | ||
823 | #define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_RSRV_SHIFT 29 | ||
824 | }; | ||
825 | |||
826 | struct fcoe_task_ctx_entry_rxwr_txrd { | ||
827 | #if defined(__BIG_ENDIAN) | ||
828 | u16 rx_id; | ||
829 | u16 rx_flags; | ||
830 | #define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RX_STATE (0xF<<0) | ||
831 | #define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RX_STATE_SHIFT 0 | ||
832 | #define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_NUM_RQ_WQE (0x7<<4) | ||
833 | #define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_NUM_RQ_WQE_SHIFT 4 | ||
834 | #define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_CONF_REQ (0x1<<7) | ||
835 | #define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_CONF_REQ_SHIFT 7 | ||
836 | #define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_MISS_FRAME (0x1<<8) | ||
837 | #define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_MISS_FRAME_SHIFT 8 | ||
838 | #define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RESERVED0 (0x7F<<9) | ||
839 | #define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RESERVED0_SHIFT 9 | ||
840 | #elif defined(__LITTLE_ENDIAN) | ||
841 | u16 rx_flags; | ||
842 | #define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RX_STATE (0xF<<0) | ||
843 | #define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RX_STATE_SHIFT 0 | ||
844 | #define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_NUM_RQ_WQE (0x7<<4) | ||
845 | #define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_NUM_RQ_WQE_SHIFT 4 | ||
846 | #define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_CONF_REQ (0x1<<7) | ||
847 | #define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_CONF_REQ_SHIFT 7 | ||
848 | #define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_MISS_FRAME (0x1<<8) | ||
849 | #define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_MISS_FRAME_SHIFT 8 | ||
850 | #define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RESERVED0 (0x7F<<9) | ||
851 | #define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RESERVED0_SHIFT 9 | ||
852 | u16 rx_id; | ||
853 | #endif | ||
854 | }; | 691 | }; |
855 | 692 | ||
856 | struct fcoe_task_ctx_entry_rx_only { | 693 | /* |
857 | struct fcoe_seq_ctx seq_ctx; | 694 | * tce_rx_wr_tx_rd_const $$KEEP_ENDIANNESS$$ |
858 | struct fcoe_seq_ctx ooo_seq_ctx; | 695 | */ |
859 | u32 rsrv3; | 696 | struct fcoe_tce_rx_wr_tx_rd_const { |
860 | union fcoe_sgl_ctx sgl_ctx; | 697 | __le32 data_2_trns; |
698 | __le32 init_flags; | ||
699 | #define FCOE_TCE_RX_WR_TX_RD_CONST_CID (0xFFFFFF<<0) | ||
700 | #define FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT 0 | ||
701 | #define FCOE_TCE_RX_WR_TX_RD_CONST_RSRV0 (0xFF<<24) | ||
702 | #define FCOE_TCE_RX_WR_TX_RD_CONST_RSRV0_SHIFT 24 | ||
703 | }; | ||
704 | |||
705 | /* | ||
706 | * tce_rx_wr_tx_rd_var $$KEEP_ENDIANNESS$$ | ||
707 | */ | ||
708 | struct fcoe_tce_rx_wr_tx_rd_var { | ||
709 | __le16 rx_flags; | ||
710 | #define FCOE_TCE_RX_WR_TX_RD_VAR_RSRV1 (0xF<<0) | ||
711 | #define FCOE_TCE_RX_WR_TX_RD_VAR_RSRV1_SHIFT 0 | ||
712 | #define FCOE_TCE_RX_WR_TX_RD_VAR_NUM_RQ_WQE (0x7<<4) | ||
713 | #define FCOE_TCE_RX_WR_TX_RD_VAR_NUM_RQ_WQE_SHIFT 4 | ||
714 | #define FCOE_TCE_RX_WR_TX_RD_VAR_CONF_REQ (0x1<<7) | ||
715 | #define FCOE_TCE_RX_WR_TX_RD_VAR_CONF_REQ_SHIFT 7 | ||
716 | #define FCOE_TCE_RX_WR_TX_RD_VAR_RX_STATE (0xF<<8) | ||
717 | #define FCOE_TCE_RX_WR_TX_RD_VAR_RX_STATE_SHIFT 8 | ||
718 | #define FCOE_TCE_RX_WR_TX_RD_VAR_EXP_FIRST_FRAME (0x1<<12) | ||
719 | #define FCOE_TCE_RX_WR_TX_RD_VAR_EXP_FIRST_FRAME_SHIFT 12 | ||
720 | #define FCOE_TCE_RX_WR_TX_RD_VAR_RX_SEQ_INIT (0x1<<13) | ||
721 | #define FCOE_TCE_RX_WR_TX_RD_VAR_RX_SEQ_INIT_SHIFT 13 | ||
722 | #define FCOE_TCE_RX_WR_TX_RD_VAR_RSRV2 (0x1<<14) | ||
723 | #define FCOE_TCE_RX_WR_TX_RD_VAR_RSRV2_SHIFT 14 | ||
724 | #define FCOE_TCE_RX_WR_TX_RD_VAR_RX_VALID (0x1<<15) | ||
725 | #define FCOE_TCE_RX_WR_TX_RD_VAR_RX_VALID_SHIFT 15 | ||
726 | __le16 rx_id; | ||
727 | struct fcoe_fcp_xfr_rdy_payload fcp_xfr_rdy; | ||
728 | }; | ||
729 | |||
730 | /* | ||
731 | * tce_rx_wr_tx_rd $$KEEP_ENDIANNESS$$ | ||
732 | */ | ||
733 | struct fcoe_tce_rx_wr_tx_rd { | ||
734 | struct fcoe_tce_rx_wr_tx_rd_const const_ctx; | ||
735 | struct fcoe_tce_rx_wr_tx_rd_var var_ctx; | ||
736 | }; | ||
737 | |||
738 | /* | ||
739 | * tce_rx_only $$KEEP_ENDIANNESS$$ | ||
740 | */ | ||
741 | struct fcoe_tce_rx_only { | ||
742 | struct fcoe_rx_seq_ctx rx_seq_ctx; | ||
743 | union fcoe_rx_wr_union_ctx union_ctx; | ||
861 | }; | 744 | }; |
862 | 745 | ||
746 | /* | ||
747 | * task_ctx_entry $$KEEP_ENDIANNESS$$ | ||
748 | */ | ||
863 | struct fcoe_task_ctx_entry { | 749 | struct fcoe_task_ctx_entry { |
864 | struct fcoe_task_ctx_entry_tx_only tx_wr_only; | 750 | struct fcoe_tce_tx_only txwr_only; |
865 | struct fcoe_task_ctx_entry_txwr_rxrd tx_wr_rx_rd; | 751 | struct fcoe_tce_tx_wr_rx_rd txwr_rxrd; |
866 | struct fcoe_task_ctx_entry_tx_rx_cmn cmn; | 752 | struct fcoe_tce_rx_wr_tx_rd rxwr_txrd; |
867 | struct fcoe_task_ctx_entry_rxwr_txrd rx_wr_tx_rd; | 753 | struct fcoe_tce_rx_only rxwr_only; |
868 | struct fcoe_task_ctx_entry_rx_only rx_wr_only; | ||
869 | u32 reserved[4]; | ||
870 | }; | 754 | }; |
871 | 755 | ||
872 | 756 | ||
757 | |||
758 | |||
759 | |||
760 | |||
761 | |||
762 | |||
763 | |||
764 | |||
873 | /* | 765 | /* |
874 | * FCoE XFRQ element | 766 | * FCoE XFRQ element $$KEEP_ENDIANNESS$$ |
875 | */ | 767 | */ |
876 | struct fcoe_xfrqe { | 768 | struct fcoe_xfrqe { |
877 | u16 wqe; | 769 | __le16 wqe; |
878 | #define FCOE_XFRQE_TASK_ID (0x7FFF<<0) | 770 | #define FCOE_XFRQE_TASK_ID (0x7FFF<<0) |
879 | #define FCOE_XFRQE_TASK_ID_SHIFT 0 | 771 | #define FCOE_XFRQE_TASK_ID_SHIFT 0 |
880 | #define FCOE_XFRQE_TOGGLE_BIT (0x1<<15) | 772 | #define FCOE_XFRQE_TOGGLE_BIT (0x1<<15) |
@@ -883,22 +775,31 @@ struct fcoe_xfrqe { | |||
883 | 775 | ||
884 | 776 | ||
885 | /* | 777 | /* |
886 | * FCoE CONFQ element | 778 | * fcoe rx doorbell message sent to the chip $$KEEP_ENDIANNESS$$ |
779 | */ | ||
780 | struct b577xx_fcoe_rx_doorbell { | ||
781 | struct b577xx_doorbell_hdr hdr; | ||
782 | u8 params; | ||
783 | #define B577XX_FCOE_RX_DOORBELL_NEGATIVE_ARM (0x1F<<0) | ||
784 | #define B577XX_FCOE_RX_DOORBELL_NEGATIVE_ARM_SHIFT 0 | ||
785 | #define B577XX_FCOE_RX_DOORBELL_OPCODE (0x7<<5) | ||
786 | #define B577XX_FCOE_RX_DOORBELL_OPCODE_SHIFT 5 | ||
787 | __le16 doorbell_cq_cons; | ||
788 | }; | ||
789 | |||
790 | |||
791 | /* | ||
792 | * FCoE CONFQ element $$KEEP_ENDIANNESS$$ | ||
887 | */ | 793 | */ |
888 | struct fcoe_confqe { | 794 | struct fcoe_confqe { |
889 | #if defined(__BIG_ENDIAN) | 795 | __le16 ox_id; |
890 | u16 rx_id; | 796 | __le16 rx_id; |
891 | u16 ox_id; | 797 | __le32 param; |
892 | #elif defined(__LITTLE_ENDIAN) | ||
893 | u16 ox_id; | ||
894 | u16 rx_id; | ||
895 | #endif | ||
896 | u32 param; | ||
897 | }; | 798 | }; |
898 | 799 | ||
899 | 800 | ||
900 | /* | 801 | /* |
901 | * FCoE connection data base | 802 | * FCoE conection data base |
902 | */ | 803 | */ |
903 | struct fcoe_conn_db { | 804 | struct fcoe_conn_db { |
904 | #if defined(__BIG_ENDIAN) | 805 | #if defined(__BIG_ENDIAN) |
@@ -914,10 +815,10 @@ struct fcoe_conn_db { | |||
914 | 815 | ||
915 | 816 | ||
916 | /* | 817 | /* |
917 | * FCoE CQ element | 818 | * FCoE CQ element $$KEEP_ENDIANNESS$$ |
918 | */ | 819 | */ |
919 | struct fcoe_cqe { | 820 | struct fcoe_cqe { |
920 | u16 wqe; | 821 | __le16 wqe; |
921 | #define FCOE_CQE_CQE_INFO (0x3FFF<<0) | 822 | #define FCOE_CQE_CQE_INFO (0x3FFF<<0) |
922 | #define FCOE_CQE_CQE_INFO_SHIFT 0 | 823 | #define FCOE_CQE_CQE_INFO_SHIFT 0 |
923 | #define FCOE_CQE_CQE_TYPE (0x1<<14) | 824 | #define FCOE_CQE_CQE_TYPE (0x1<<14) |
@@ -928,61 +829,46 @@ struct fcoe_cqe { | |||
928 | 829 | ||
929 | 830 | ||
930 | /* | 831 | /* |
931 | * FCoE error/warning resporting entry | 832 | * FCoE error/warning reporting entry $$KEEP_ENDIANNESS$$ |
833 | */ | ||
834 | struct fcoe_partial_err_report_entry { | ||
835 | __le32 err_warn_bitmap_lo; | ||
836 | __le32 err_warn_bitmap_hi; | ||
837 | __le32 tx_buf_off; | ||
838 | __le32 rx_buf_off; | ||
839 | }; | ||
840 | |||
841 | /* | ||
842 | * FCoE error/warning reporting entry $$KEEP_ENDIANNESS$$ | ||
932 | */ | 843 | */ |
933 | struct fcoe_err_report_entry { | 844 | struct fcoe_err_report_entry { |
934 | u32 err_warn_bitmap_lo; | 845 | struct fcoe_partial_err_report_entry data; |
935 | u32 err_warn_bitmap_hi; | ||
936 | u32 tx_buf_off; | ||
937 | u32 rx_buf_off; | ||
938 | struct fcoe_fc_hdr fc_hdr; | 846 | struct fcoe_fc_hdr fc_hdr; |
939 | }; | 847 | }; |
940 | 848 | ||
941 | 849 | ||
942 | /* | 850 | /* |
943 | * FCoE hash table entry (32 bytes) | 851 | * FCoE hash table entry (32 bytes) $$KEEP_ENDIANNESS$$ |
944 | */ | 852 | */ |
945 | struct fcoe_hash_table_entry { | 853 | struct fcoe_hash_table_entry { |
946 | #if defined(__BIG_ENDIAN) | ||
947 | u8 d_id_0; | ||
948 | u8 s_id_2; | ||
949 | u8 s_id_1; | ||
950 | u8 s_id_0; | ||
951 | #elif defined(__LITTLE_ENDIAN) | ||
952 | u8 s_id_0; | 854 | u8 s_id_0; |
953 | u8 s_id_1; | 855 | u8 s_id_1; |
954 | u8 s_id_2; | 856 | u8 s_id_2; |
955 | u8 d_id_0; | 857 | u8 d_id_0; |
956 | #endif | ||
957 | #if defined(__BIG_ENDIAN) | ||
958 | u16 dst_mac_addr_hi; | ||
959 | u8 d_id_2; | ||
960 | u8 d_id_1; | ||
961 | #elif defined(__LITTLE_ENDIAN) | ||
962 | u8 d_id_1; | 858 | u8 d_id_1; |
963 | u8 d_id_2; | 859 | u8 d_id_2; |
964 | u16 dst_mac_addr_hi; | 860 | __le16 dst_mac_addr_hi; |
965 | #endif | 861 | __le16 dst_mac_addr_mid; |
966 | u32 dst_mac_addr_lo; | 862 | __le16 dst_mac_addr_lo; |
967 | #if defined(__BIG_ENDIAN) | 863 | __le16 src_mac_addr_hi; |
968 | u16 vlan_id; | 864 | __le16 vlan_id; |
969 | u16 src_mac_addr_hi; | 865 | __le16 src_mac_addr_lo; |
970 | #elif defined(__LITTLE_ENDIAN) | 866 | __le16 src_mac_addr_mid; |
971 | u16 src_mac_addr_hi; | ||
972 | u16 vlan_id; | ||
973 | #endif | ||
974 | u32 src_mac_addr_lo; | ||
975 | #if defined(__BIG_ENDIAN) | ||
976 | u16 reserved1; | ||
977 | u8 reserved0; | ||
978 | u8 vlan_flag; | ||
979 | #elif defined(__LITTLE_ENDIAN) | ||
980 | u8 vlan_flag; | 867 | u8 vlan_flag; |
981 | u8 reserved0; | 868 | u8 reserved0; |
982 | u16 reserved1; | 869 | __le16 reserved1; |
983 | #endif | 870 | __le32 reserved2; |
984 | u32 reserved2; | 871 | __le32 field_id; |
985 | u32 field_id; | ||
986 | #define FCOE_HASH_TABLE_ENTRY_CID (0xFFFFFF<<0) | 872 | #define FCOE_HASH_TABLE_ENTRY_CID (0xFFFFFF<<0) |
987 | #define FCOE_HASH_TABLE_ENTRY_CID_SHIFT 0 | 873 | #define FCOE_HASH_TABLE_ENTRY_CID_SHIFT 0 |
988 | #define FCOE_HASH_TABLE_ENTRY_RESERVED3 (0x7F<<24) | 874 | #define FCOE_HASH_TABLE_ENTRY_RESERVED3 (0x7F<<24) |
@@ -991,11 +877,27 @@ struct fcoe_hash_table_entry { | |||
991 | #define FCOE_HASH_TABLE_ENTRY_VALID_SHIFT 31 | 877 | #define FCOE_HASH_TABLE_ENTRY_VALID_SHIFT 31 |
992 | }; | 878 | }; |
993 | 879 | ||
880 | |||
994 | /* | 881 | /* |
995 | * FCoE pending work request CQE | 882 | * FCoE LCQ element $$KEEP_ENDIANNESS$$ |
883 | */ | ||
884 | struct fcoe_lcqe { | ||
885 | __le32 wqe; | ||
886 | #define FCOE_LCQE_TASK_ID (0xFFFF<<0) | ||
887 | #define FCOE_LCQE_TASK_ID_SHIFT 0 | ||
888 | #define FCOE_LCQE_LCQE_TYPE (0xFF<<16) | ||
889 | #define FCOE_LCQE_LCQE_TYPE_SHIFT 16 | ||
890 | #define FCOE_LCQE_RESERVED (0xFF<<24) | ||
891 | #define FCOE_LCQE_RESERVED_SHIFT 24 | ||
892 | }; | ||
893 | |||
894 | |||
895 | |||
896 | /* | ||
897 | * FCoE pending work request CQE $$KEEP_ENDIANNESS$$ | ||
996 | */ | 898 | */ |
997 | struct fcoe_pend_wq_cqe { | 899 | struct fcoe_pend_wq_cqe { |
998 | u16 wqe; | 900 | __le16 wqe; |
999 | #define FCOE_PEND_WQ_CQE_TASK_ID (0x3FFF<<0) | 901 | #define FCOE_PEND_WQ_CQE_TASK_ID (0x3FFF<<0) |
1000 | #define FCOE_PEND_WQ_CQE_TASK_ID_SHIFT 0 | 902 | #define FCOE_PEND_WQ_CQE_TASK_ID_SHIFT 0 |
1001 | #define FCOE_PEND_WQ_CQE_CQE_TYPE (0x1<<14) | 903 | #define FCOE_PEND_WQ_CQE_CQE_TYPE (0x1<<14) |
@@ -1006,53 +908,61 @@ struct fcoe_pend_wq_cqe { | |||
1006 | 908 | ||
1007 | 909 | ||
1008 | /* | 910 | /* |
1009 | * FCoE RX statistics parameters section#0 | 911 | * FCoE RX statistics parameters section#0 $$KEEP_ENDIANNESS$$ |
1010 | */ | 912 | */ |
1011 | struct fcoe_rx_stat_params_section0 { | 913 | struct fcoe_rx_stat_params_section0 { |
1012 | u32 fcoe_ver_cnt; | 914 | __le32 fcoe_rx_pkt_cnt; |
1013 | u32 fcoe_rx_pkt_cnt; | 915 | __le32 fcoe_rx_byte_cnt; |
1014 | u32 fcoe_rx_byte_cnt; | ||
1015 | u32 fcoe_rx_drop_pkt_cnt; | ||
1016 | }; | 916 | }; |
1017 | 917 | ||
1018 | 918 | ||
1019 | /* | 919 | /* |
1020 | * FCoE RX statistics parameters section#1 | 920 | * FCoE RX statistics parameters section#1 $$KEEP_ENDIANNESS$$ |
1021 | */ | 921 | */ |
1022 | struct fcoe_rx_stat_params_section1 { | 922 | struct fcoe_rx_stat_params_section1 { |
1023 | u32 fc_crc_cnt; | 923 | __le32 fcoe_ver_cnt; |
1024 | u32 eofa_del_cnt; | 924 | __le32 fcoe_rx_drop_pkt_cnt; |
1025 | u32 miss_frame_cnt; | 925 | }; |
1026 | u32 seq_timeout_cnt; | 926 | |
1027 | u32 drop_seq_cnt; | 927 | |
1028 | u32 fcoe_rx_drop_pkt_cnt; | 928 | /* |
1029 | u32 fcp_rx_pkt_cnt; | 929 | * FCoE RX statistics parameters section#2 $$KEEP_ENDIANNESS$$ |
1030 | u32 reserved0; | 930 | */ |
931 | struct fcoe_rx_stat_params_section2 { | ||
932 | __le32 fc_crc_cnt; | ||
933 | __le32 eofa_del_cnt; | ||
934 | __le32 miss_frame_cnt; | ||
935 | __le32 seq_timeout_cnt; | ||
936 | __le32 drop_seq_cnt; | ||
937 | __le32 fcoe_rx_drop_pkt_cnt; | ||
938 | __le32 fcp_rx_pkt_cnt; | ||
939 | __le32 reserved0; | ||
1031 | }; | 940 | }; |
1032 | 941 | ||
1033 | 942 | ||
1034 | /* | 943 | /* |
1035 | * FCoE TX statistics parameters | 944 | * FCoE TX statistics parameters $$KEEP_ENDIANNESS$$ |
1036 | */ | 945 | */ |
1037 | struct fcoe_tx_stat_params { | 946 | struct fcoe_tx_stat_params { |
1038 | u32 fcoe_tx_pkt_cnt; | 947 | __le32 fcoe_tx_pkt_cnt; |
1039 | u32 fcoe_tx_byte_cnt; | 948 | __le32 fcoe_tx_byte_cnt; |
1040 | u32 fcp_tx_pkt_cnt; | 949 | __le32 fcp_tx_pkt_cnt; |
1041 | u32 reserved0; | 950 | __le32 reserved0; |
1042 | }; | 951 | }; |
1043 | 952 | ||
1044 | /* | 953 | /* |
1045 | * FCoE statistics parameters | 954 | * FCoE statistics parameters $$KEEP_ENDIANNESS$$ |
1046 | */ | 955 | */ |
1047 | struct fcoe_statistics_params { | 956 | struct fcoe_statistics_params { |
1048 | struct fcoe_tx_stat_params tx_stat; | 957 | struct fcoe_tx_stat_params tx_stat; |
1049 | struct fcoe_rx_stat_params_section0 rx_stat0; | 958 | struct fcoe_rx_stat_params_section0 rx_stat0; |
1050 | struct fcoe_rx_stat_params_section1 rx_stat1; | 959 | struct fcoe_rx_stat_params_section1 rx_stat1; |
960 | struct fcoe_rx_stat_params_section2 rx_stat2; | ||
1051 | }; | 961 | }; |
1052 | 962 | ||
1053 | 963 | ||
1054 | /* | 964 | /* |
1055 | * FCoE t2 hash table entry (64 bytes) | 965 | * FCoE t2 hash table entry (64 bytes) $$KEEP_ENDIANNESS$$ |
1056 | */ | 966 | */ |
1057 | struct fcoe_t2_hash_table_entry { | 967 | struct fcoe_t2_hash_table_entry { |
1058 | struct fcoe_hash_table_entry data; | 968 | struct fcoe_hash_table_entry data; |
@@ -1060,11 +970,13 @@ struct fcoe_t2_hash_table_entry { | |||
1060 | struct regpair reserved0[3]; | 970 | struct regpair reserved0[3]; |
1061 | }; | 971 | }; |
1062 | 972 | ||
973 | |||
974 | |||
1063 | /* | 975 | /* |
1064 | * FCoE unsolicited CQE | 976 | * FCoE unsolicited CQE $$KEEP_ENDIANNESS$$ |
1065 | */ | 977 | */ |
1066 | struct fcoe_unsolicited_cqe { | 978 | struct fcoe_unsolicited_cqe { |
1067 | u16 wqe; | 979 | __le16 wqe; |
1068 | #define FCOE_UNSOLICITED_CQE_SUBTYPE (0x3<<0) | 980 | #define FCOE_UNSOLICITED_CQE_SUBTYPE (0x3<<0) |
1069 | #define FCOE_UNSOLICITED_CQE_SUBTYPE_SHIFT 0 | 981 | #define FCOE_UNSOLICITED_CQE_SUBTYPE_SHIFT 0 |
1070 | #define FCOE_UNSOLICITED_CQE_PKT_LEN (0xFFF<<2) | 982 | #define FCOE_UNSOLICITED_CQE_PKT_LEN (0xFFF<<2) |
@@ -1075,6 +987,4 @@ struct fcoe_unsolicited_cqe { | |||
1075 | #define FCOE_UNSOLICITED_CQE_TOGGLE_BIT_SHIFT 15 | 987 | #define FCOE_UNSOLICITED_CQE_TOGGLE_BIT_SHIFT 15 |
1076 | }; | 988 | }; |
1077 | 989 | ||
1078 | |||
1079 | |||
1080 | #endif /* __57XX_FCOE_HSI_LINUX_LE__ */ | 990 | #endif /* __57XX_FCOE_HSI_LINUX_LE__ */ |
diff --git a/drivers/scsi/bnx2fc/bnx2fc.h b/drivers/scsi/bnx2fc/bnx2fc.h index 0a404bfb44fe..907672e86063 100644 --- a/drivers/scsi/bnx2fc/bnx2fc.h +++ b/drivers/scsi/bnx2fc/bnx2fc.h | |||
@@ -62,7 +62,7 @@ | |||
62 | #include "bnx2fc_constants.h" | 62 | #include "bnx2fc_constants.h" |
63 | 63 | ||
64 | #define BNX2FC_NAME "bnx2fc" | 64 | #define BNX2FC_NAME "bnx2fc" |
65 | #define BNX2FC_VERSION "1.0.1" | 65 | #define BNX2FC_VERSION "1.0.3" |
66 | 66 | ||
67 | #define PFX "bnx2fc: " | 67 | #define PFX "bnx2fc: " |
68 | 68 | ||
@@ -262,9 +262,14 @@ struct bnx2fc_rport { | |||
262 | #define BNX2FC_FLAG_UPLD_REQ_COMPL 0x8 | 262 | #define BNX2FC_FLAG_UPLD_REQ_COMPL 0x8 |
263 | #define BNX2FC_FLAG_EXPL_LOGO 0x9 | 263 | #define BNX2FC_FLAG_EXPL_LOGO 0x9 |
264 | 264 | ||
265 | u8 src_addr[ETH_ALEN]; | ||
265 | u32 max_sqes; | 266 | u32 max_sqes; |
266 | u32 max_rqes; | 267 | u32 max_rqes; |
267 | u32 max_cqes; | 268 | u32 max_cqes; |
269 | atomic_t free_sqes; | ||
270 | |||
271 | struct b577xx_doorbell_set_prod sq_db; | ||
272 | struct b577xx_fcoe_rx_doorbell rx_db; | ||
268 | 273 | ||
269 | struct fcoe_sqe *sq; | 274 | struct fcoe_sqe *sq; |
270 | dma_addr_t sq_dma; | 275 | dma_addr_t sq_dma; |
@@ -274,7 +279,7 @@ struct bnx2fc_rport { | |||
274 | 279 | ||
275 | struct fcoe_cqe *cq; | 280 | struct fcoe_cqe *cq; |
276 | dma_addr_t cq_dma; | 281 | dma_addr_t cq_dma; |
277 | u32 cq_cons_idx; | 282 | u16 cq_cons_idx; |
278 | u8 cq_curr_toggle_bit; | 283 | u8 cq_curr_toggle_bit; |
279 | u32 cq_mem_size; | 284 | u32 cq_mem_size; |
280 | 285 | ||
@@ -505,6 +510,7 @@ struct fc_seq *bnx2fc_elsct_send(struct fc_lport *lport, u32 did, | |||
505 | struct fc_frame *, | 510 | struct fc_frame *, |
506 | void *), | 511 | void *), |
507 | void *arg, u32 timeout); | 512 | void *arg, u32 timeout); |
513 | void bnx2fc_arm_cq(struct bnx2fc_rport *tgt); | ||
508 | int bnx2fc_process_new_cqes(struct bnx2fc_rport *tgt); | 514 | int bnx2fc_process_new_cqes(struct bnx2fc_rport *tgt); |
509 | void bnx2fc_process_cq_compl(struct bnx2fc_rport *tgt, u16 wqe); | 515 | void bnx2fc_process_cq_compl(struct bnx2fc_rport *tgt, u16 wqe); |
510 | struct bnx2fc_rport *bnx2fc_tgt_lookup(struct fcoe_port *port, | 516 | struct bnx2fc_rport *bnx2fc_tgt_lookup(struct fcoe_port *port, |
diff --git a/drivers/scsi/bnx2fc/bnx2fc_constants.h b/drivers/scsi/bnx2fc/bnx2fc_constants.h index fe7769173c43..399cda047a77 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_constants.h +++ b/drivers/scsi/bnx2fc/bnx2fc_constants.h | |||
@@ -5,6 +5,12 @@ | |||
5 | * This file defines HSI constants for the FCoE flows | 5 | * This file defines HSI constants for the FCoE flows |
6 | */ | 6 | */ |
7 | 7 | ||
8 | /* Current FCoE HSI version number composed of two fields (16 bit) */ | ||
9 | /* Implies on a change broken previous HSI */ | ||
10 | #define FCOE_HSI_MAJOR_VERSION (1) | ||
11 | /* Implies on a change which does not broken previous HSI */ | ||
12 | #define FCOE_HSI_MINOR_VERSION (1) | ||
13 | |||
8 | /* KWQ/KCQ FCoE layer code */ | 14 | /* KWQ/KCQ FCoE layer code */ |
9 | #define FCOE_KWQE_LAYER_CODE (7) | 15 | #define FCOE_KWQE_LAYER_CODE (7) |
10 | 16 | ||
@@ -40,21 +46,62 @@ | |||
40 | #define FCOE_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE (0x3) | 46 | #define FCOE_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE (0x3) |
41 | #define FCOE_KCQE_COMPLETION_STATUS_CTX_FREE_FAILURE (0x4) | 47 | #define FCOE_KCQE_COMPLETION_STATUS_CTX_FREE_FAILURE (0x4) |
42 | #define FCOE_KCQE_COMPLETION_STATUS_NIC_ERROR (0x5) | 48 | #define FCOE_KCQE_COMPLETION_STATUS_NIC_ERROR (0x5) |
49 | #define FCOE_KCQE_COMPLETION_STATUS_WRONG_HSI_VERSION (0x6) | ||
50 | |||
51 | /* CQE type */ | ||
52 | #define FCOE_PENDING_CQE_TYPE 0 | ||
53 | #define FCOE_UNSOLIC_CQE_TYPE 1 | ||
43 | 54 | ||
44 | /* Unsolicited CQE type */ | 55 | /* Unsolicited CQE type */ |
45 | #define FCOE_UNSOLICITED_FRAME_CQE_TYPE 0 | 56 | #define FCOE_UNSOLICITED_FRAME_CQE_TYPE 0 |
46 | #define FCOE_ERROR_DETECTION_CQE_TYPE 1 | 57 | #define FCOE_ERROR_DETECTION_CQE_TYPE 1 |
47 | #define FCOE_WARNING_DETECTION_CQE_TYPE 2 | 58 | #define FCOE_WARNING_DETECTION_CQE_TYPE 2 |
48 | 59 | ||
60 | /* E_D_TOV timer resolution in ms */ | ||
61 | #define FCOE_E_D_TOV_TIMER_RESOLUTION_MS (20) | ||
62 | |||
63 | /* E_D_TOV timer resolution for SDM (4 micro) */ | ||
64 | #define FCOE_E_D_TOV_SDM_TIMER_RESOLUTION \ | ||
65 | (FCOE_E_D_TOV_TIMER_RESOLUTION_MS * 1000 / 4) | ||
66 | |||
67 | /* REC timer resolution in ms */ | ||
68 | #define FCOE_REC_TIMER_RESOLUTION_MS (20) | ||
69 | |||
70 | /* REC timer resolution for SDM (4 micro) */ | ||
71 | #define FCOE_REC_SDM_TIMER_RESOLUTION (FCOE_REC_TIMER_RESOLUTION_MS * 1000 / 4) | ||
72 | |||
73 | /* E_D_TOV timer default wraparound value (2 sec) in 20 ms resolution */ | ||
74 | #define FCOE_E_D_TOV_DEFAULT_WRAPAROUND_VAL \ | ||
75 | (2000 / FCOE_E_D_TOV_TIMER_RESOLUTION_MS) | ||
76 | |||
77 | /* REC_TOV timer default wraparound value (3 sec) in 20 ms resolution */ | ||
78 | #define FCOE_REC_TOV_DEFAULT_WRAPAROUND_VAL \ | ||
79 | (3000 / FCOE_REC_TIMER_RESOLUTION_MS) | ||
80 | |||
81 | #define FCOE_NUM_OF_TIMER_TASKS (8 * 1024) | ||
82 | |||
83 | #define FCOE_NUM_OF_CACHED_TASKS_TIMER (8) | ||
84 | |||
49 | /* Task context constants */ | 85 | /* Task context constants */ |
86 | /******** Remove FCP_CMD write tce sleep ***********************/ | ||
87 | /* In case timer services are required then shall be updated by Xstorm after | ||
88 | * start processing the task. In case no timer facilities are required then the | ||
89 | * driver would initialize the state to this value | ||
90 | * | ||
91 | #define FCOE_TASK_TX_STATE_NORMAL 0 | ||
92 | * After driver has initialize the task in case timer services required * | ||
93 | #define FCOE_TASK_TX_STATE_INIT 1 | ||
94 | ******** Remove FCP_CMD write tce sleep ***********************/ | ||
50 | /* After driver has initialize the task in case timer services required */ | 95 | /* After driver has initialize the task in case timer services required */ |
51 | #define FCOE_TASK_TX_STATE_INIT 0 | 96 | #define FCOE_TASK_TX_STATE_INIT 0 |
52 | /* In case timer services are required then shall be updated by Xstorm after | 97 | /* In case timer services are required then shall be updated by Xstorm after |
53 | * start processing the task. In case no timer facilities are required then the | 98 | * start processing the task. In case no timer facilities are required then the |
54 | * driver would initialize the state to this value */ | 99 | * driver would initialize the state to this value |
100 | */ | ||
55 | #define FCOE_TASK_TX_STATE_NORMAL 1 | 101 | #define FCOE_TASK_TX_STATE_NORMAL 1 |
56 | /* Task is under abort procedure. Updated in order to stop processing of | 102 | /* Task is under abort procedure. Updated in order to stop processing of |
57 | * pending WQEs on this task */ | 103 | * pending WQEs on this task |
104 | */ | ||
58 | #define FCOE_TASK_TX_STATE_ABORT 2 | 105 | #define FCOE_TASK_TX_STATE_ABORT 2 |
59 | /* For E_D_T_TOV timer expiration in Xstorm (Class 2 only) */ | 106 | /* For E_D_T_TOV timer expiration in Xstorm (Class 2 only) */ |
60 | #define FCOE_TASK_TX_STATE_ERROR 3 | 107 | #define FCOE_TASK_TX_STATE_ERROR 3 |
@@ -66,17 +113,8 @@ | |||
66 | #define FCOE_TASK_TX_STATE_EXCHANGE_CLEANUP 6 | 113 | #define FCOE_TASK_TX_STATE_EXCHANGE_CLEANUP 6 |
67 | /* For sequence cleanup request task */ | 114 | /* For sequence cleanup request task */ |
68 | #define FCOE_TASK_TX_STATE_SEQUENCE_CLEANUP 7 | 115 | #define FCOE_TASK_TX_STATE_SEQUENCE_CLEANUP 7 |
69 | /* Mark task as aborted and indicate that ABTS was not transmitted */ | ||
70 | #define FCOE_TASK_TX_STATE_BEFORE_ABTS_TX 8 | ||
71 | /* Mark task as aborted and indicate that ABTS was transmitted */ | ||
72 | #define FCOE_TASK_TX_STATE_AFTER_ABTS_TX 9 | ||
73 | /* For completion the ABTS task. */ | 116 | /* For completion the ABTS task. */ |
74 | #define FCOE_TASK_TX_STATE_ABTS_TX_COMPLETED 10 | 117 | #define FCOE_TASK_TX_STATE_ABTS_TX 8 |
75 | /* Mark task as aborted and indicate that Exchange cleanup was not transmitted | ||
76 | */ | ||
77 | #define FCOE_TASK_TX_STATE_BEFORE_EXCHANGE_CLEANUP_TX 11 | ||
78 | /* Mark task as aborted and indicate that Exchange cleanup was transmitted */ | ||
79 | #define FCOE_TASK_TX_STATE_AFTER_EXCHANGE_CLEANUP_TX 12 | ||
80 | 118 | ||
81 | #define FCOE_TASK_RX_STATE_NORMAL 0 | 119 | #define FCOE_TASK_RX_STATE_NORMAL 0 |
82 | #define FCOE_TASK_RX_STATE_COMPLETED 1 | 120 | #define FCOE_TASK_RX_STATE_COMPLETED 1 |
@@ -86,25 +124,25 @@ | |||
86 | #define FCOE_TASK_RX_STATE_WARNING 3 | 124 | #define FCOE_TASK_RX_STATE_WARNING 3 |
87 | /* For E_D_T_TOV timer expiration in Ustorm */ | 125 | /* For E_D_T_TOV timer expiration in Ustorm */ |
88 | #define FCOE_TASK_RX_STATE_ERROR 4 | 126 | #define FCOE_TASK_RX_STATE_ERROR 4 |
89 | /* ABTS ACC arrived wait for local completion to finally complete the task. */ | 127 | /* FW only: First visit at rx-path, part of the abts round trip */ |
90 | #define FCOE_TASK_RX_STATE_ABTS_ACC_ARRIVED 5 | 128 | #define FCOE_TASK_RX_STATE_ABTS_IN_PROCESS 5 |
91 | /* local completion arrived wait for ABTS ACC to finally complete the task. */ | 129 | /* FW only: Second visit at rx-path, after ABTS frame transmitted */ |
92 | #define FCOE_TASK_RX_STATE_ABTS_LOCAL_COMP_ARRIVED 6 | 130 | #define FCOE_TASK_RX_STATE_ABTS_TRANSMITTED 6 |
93 | /* Special completion indication in case of task was aborted. */ | 131 | /* Special completion indication in case of task was aborted. */ |
94 | #define FCOE_TASK_RX_STATE_ABTS_COMPLETED 7 | 132 | #define FCOE_TASK_RX_STATE_ABTS_COMPLETED 7 |
95 | /* Special completion indication in case of task was cleaned. */ | 133 | /* FW only: First visit at rx-path, part of the cleanup round trip */ |
96 | #define FCOE_TASK_RX_STATE_EXCHANGE_CLEANUP_COMPLETED 8 | 134 | #define FCOE_TASK_RX_STATE_EXCHANGE_CLEANUP_IN_PROCESS 8 |
97 | /* Special completion indication (in task requested the exchange cleanup) in | 135 | /* FW only: Special completion indication in case of task was cleaned. */ |
98 | * case cleaned task is in non-valid. */ | 136 | #define FCOE_TASK_RX_STATE_EXCHANGE_CLEANUP_COMPLETED 9 |
99 | #define FCOE_TASK_RX_STATE_ABORT_CLEANUP_COMPLETED 9 | 137 | /* Not in used: Special completion indication (in task requested the exchange |
138 | * cleanup) in case cleaned task is in non-valid. | ||
139 | */ | ||
140 | #define FCOE_TASK_RX_STATE_ABORT_CLEANUP_COMPLETED 10 | ||
100 | /* Special completion indication (in task requested the sequence cleanup) in | 141 | /* Special completion indication (in task requested the sequence cleanup) in |
101 | * case cleaned task was already returned to normal. */ | 142 | * case cleaned task was already returned to normal. |
102 | #define FCOE_TASK_RX_STATE_IGNORED_SEQUENCE_CLEANUP 10 | 143 | */ |
103 | /* Exchange cleanup arrived wait until xfer will be handled to finally | 144 | #define FCOE_TASK_RX_STATE_IGNORED_SEQUENCE_CLEANUP 11 |
104 | * complete the task. */ | 145 | |
105 | #define FCOE_TASK_RX_STATE_EXCHANGE_CLEANUP_ARRIVED 11 | ||
106 | /* Xfer handled, wait for exchange cleanup to finally complete the task. */ | ||
107 | #define FCOE_TASK_RX_STATE_EXCHANGE_CLEANUP_HANDLED_XFER 12 | ||
108 | 146 | ||
109 | #define FCOE_TASK_TYPE_WRITE 0 | 147 | #define FCOE_TASK_TYPE_WRITE 0 |
110 | #define FCOE_TASK_TYPE_READ 1 | 148 | #define FCOE_TASK_TYPE_READ 1 |
@@ -120,11 +158,40 @@ | |||
120 | #define FCOE_TASK_CLASS_TYPE_3 0 | 158 | #define FCOE_TASK_CLASS_TYPE_3 0 |
121 | #define FCOE_TASK_CLASS_TYPE_2 1 | 159 | #define FCOE_TASK_CLASS_TYPE_2 1 |
122 | 160 | ||
161 | /* FCoE/FC packet fields */ | ||
162 | #define FCOE_ETH_TYPE 0x8906 | ||
163 | |||
164 | /* FCoE maximum elements in hash table */ | ||
165 | #define FCOE_MAX_ELEMENTS_IN_HASH_TABLE_ROW 8 | ||
166 | |||
167 | /* FCoE half of the elements in hash table */ | ||
168 | #define FCOE_HALF_ELEMENTS_IN_HASH_TABLE_ROW \ | ||
169 | (FCOE_MAX_ELEMENTS_IN_HASH_TABLE_ROW / 2) | ||
170 | |||
171 | /* FcoE number of cached T2 entries */ | ||
172 | #define T_FCOE_NUMBER_OF_CACHED_T2_ENTRIES (4) | ||
173 | |||
174 | /* FCoE maximum elements in hash table */ | ||
175 | #define FCOE_HASH_TBL_CHUNK_SIZE 16384 | ||
176 | |||
123 | /* Everest FCoE connection type */ | 177 | /* Everest FCoE connection type */ |
124 | #define B577XX_FCOE_CONNECTION_TYPE 4 | 178 | #define B577XX_FCOE_CONNECTION_TYPE 4 |
125 | 179 | ||
126 | /* Error codes for Error Reporting in fast path flows */ | 180 | /* FCoE number of rows (in log). This number derives |
127 | /* XFER error codes */ | 181 | * from the maximum connections supported which is 2048. |
182 | * TBA: Need a different constant for E2 | ||
183 | */ | ||
184 | #define FCOE_MAX_NUM_SESSIONS_LOG 11 | ||
185 | |||
186 | #define FC_ABTS_REPLY_MAX_PAYLOAD_LEN 12 | ||
187 | |||
188 | /* Error codes for Error Reporting in slow path flows */ | ||
189 | #define FCOE_SLOW_PATH_ERROR_CODE_TOO_MANY_FUNCS 0 | ||
190 | #define FCOE_SLOW_PATH_ERROR_CODE_NO_LICENSE 1 | ||
191 | |||
192 | /* Error codes for Error Reporting in fast path flows | ||
193 | * XFER error codes | ||
194 | */ | ||
128 | #define FCOE_ERROR_CODE_XFER_OOO_RO 0 | 195 | #define FCOE_ERROR_CODE_XFER_OOO_RO 0 |
129 | #define FCOE_ERROR_CODE_XFER_RO_NOT_ALIGNED 1 | 196 | #define FCOE_ERROR_CODE_XFER_RO_NOT_ALIGNED 1 |
130 | #define FCOE_ERROR_CODE_XFER_NULL_BURST_LEN 2 | 197 | #define FCOE_ERROR_CODE_XFER_NULL_BURST_LEN 2 |
@@ -155,17 +222,17 @@ | |||
155 | #define FCOE_ERROR_CODE_DATA_SOFI3_SEQ_ACTIVE_SET 23 | 222 | #define FCOE_ERROR_CODE_DATA_SOFI3_SEQ_ACTIVE_SET 23 |
156 | #define FCOE_ERROR_CODE_DATA_SOFN_SEQ_ACTIVE_RESET 24 | 223 | #define FCOE_ERROR_CODE_DATA_SOFN_SEQ_ACTIVE_RESET 24 |
157 | #define FCOE_ERROR_CODE_DATA_EOFN_END_SEQ_SET 25 | 224 | #define FCOE_ERROR_CODE_DATA_EOFN_END_SEQ_SET 25 |
158 | #define FCOE_ERROR_CODE_DATA_EOFT_END_SEQ_RESET 26 | 225 | #define FCOE_ERROR_CODE_DATA_EOFT_END_SEQ_RESET 26 |
159 | #define FCOE_ERROR_CODE_DATA_TASK_TYPE_NOT_READ 27 | 226 | #define FCOE_ERROR_CODE_DATA_TASK_TYPE_NOT_READ 27 |
160 | #define FCOE_ERROR_CODE_DATA_FCTL 28 | 227 | #define FCOE_ERROR_CODE_DATA_FCTL 28 |
161 | 228 | ||
162 | /* Middle path error codes */ | 229 | /* Middle path error codes */ |
163 | #define FCOE_ERROR_CODE_MIDPATH_TYPE_NOT_ELS 29 | 230 | #define FCOE_ERROR_CODE_MIDPATH_INVALID_TYPE 29 |
164 | #define FCOE_ERROR_CODE_MIDPATH_SOFI3_SEQ_ACTIVE_SET 30 | 231 | #define FCOE_ERROR_CODE_MIDPATH_SOFI3_SEQ_ACTIVE_SET 30 |
165 | #define FCOE_ERROR_CODE_MIDPATH_SOFN_SEQ_ACTIVE_RESET 31 | 232 | #define FCOE_ERROR_CODE_MIDPATH_SOFN_SEQ_ACTIVE_RESET 31 |
166 | #define FCOE_ERROR_CODE_MIDPATH_EOFN_END_SEQ_SET 32 | 233 | #define FCOE_ERROR_CODE_MIDPATH_EOFN_END_SEQ_SET 32 |
167 | #define FCOE_ERROR_CODE_MIDPATH_EOFT_END_SEQ_RESET 33 | 234 | #define FCOE_ERROR_CODE_MIDPATH_EOFT_END_SEQ_RESET 33 |
168 | #define FCOE_ERROR_CODE_MIDPATH_ELS_REPLY_FCTL 34 | 235 | #define FCOE_ERROR_CODE_MIDPATH_REPLY_FCTL 34 |
169 | #define FCOE_ERROR_CODE_MIDPATH_INVALID_REPLY 35 | 236 | #define FCOE_ERROR_CODE_MIDPATH_INVALID_REPLY 35 |
170 | #define FCOE_ERROR_CODE_MIDPATH_ELS_REPLY_RCTL 36 | 237 | #define FCOE_ERROR_CODE_MIDPATH_ELS_REPLY_RCTL 36 |
171 | 238 | ||
@@ -173,7 +240,7 @@ | |||
173 | #define FCOE_ERROR_CODE_ABTS_REPLY_F_CTL 37 | 240 | #define FCOE_ERROR_CODE_ABTS_REPLY_F_CTL 37 |
174 | #define FCOE_ERROR_CODE_ABTS_REPLY_DDF_RCTL_FIELD 38 | 241 | #define FCOE_ERROR_CODE_ABTS_REPLY_DDF_RCTL_FIELD 38 |
175 | #define FCOE_ERROR_CODE_ABTS_REPLY_INVALID_BLS_RCTL 39 | 242 | #define FCOE_ERROR_CODE_ABTS_REPLY_INVALID_BLS_RCTL 39 |
176 | #define FCOE_ERROR_CODE_ABTS_REPLY_INVALID_RCTL 40 | 243 | #define FCOE_ERROR_CODE_ABTS_REPLY_INVALID_RCTL 40 |
177 | #define FCOE_ERROR_CODE_ABTS_REPLY_RCTL_GENERAL_MISMATCH 41 | 244 | #define FCOE_ERROR_CODE_ABTS_REPLY_RCTL_GENERAL_MISMATCH 41 |
178 | 245 | ||
179 | /* Common error codes */ | 246 | /* Common error codes */ |
@@ -185,7 +252,7 @@ | |||
185 | #define FCOE_ERROR_CODE_COMMON_DATA_NO_MORE_SGES 47 | 252 | #define FCOE_ERROR_CODE_COMMON_DATA_NO_MORE_SGES 47 |
186 | #define FCOE_ERROR_CODE_COMMON_OPTIONAL_FC_HDR 48 | 253 | #define FCOE_ERROR_CODE_COMMON_OPTIONAL_FC_HDR 48 |
187 | #define FCOE_ERROR_CODE_COMMON_READ_TCE_OX_ID_TOO_BIG 49 | 254 | #define FCOE_ERROR_CODE_COMMON_READ_TCE_OX_ID_TOO_BIG 49 |
188 | #define FCOE_ERROR_CODE_COMMON_DATA_WAS_NOT_TRANSMITTED 50 | 255 | #define FCOE_ERROR_CODE_COMMON_DATA_WAS_NOT_TRANSMITTED 50 |
189 | 256 | ||
190 | /* Unsolicited Rx error codes */ | 257 | /* Unsolicited Rx error codes */ |
191 | #define FCOE_ERROR_CODE_UNSOLICITED_TYPE_NOT_ELS 51 | 258 | #define FCOE_ERROR_CODE_UNSOLICITED_TYPE_NOT_ELS 51 |
diff --git a/drivers/scsi/bnx2fc/bnx2fc_els.c b/drivers/scsi/bnx2fc/bnx2fc_els.c index 52c358427ce2..7e89143f15cf 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_els.c +++ b/drivers/scsi/bnx2fc/bnx2fc_els.c | |||
@@ -83,7 +83,7 @@ int bnx2fc_send_rrq(struct bnx2fc_cmd *aborted_io_req) | |||
83 | rrq.rrq_cmd = ELS_RRQ; | 83 | rrq.rrq_cmd = ELS_RRQ; |
84 | hton24(rrq.rrq_s_id, sid); | 84 | hton24(rrq.rrq_s_id, sid); |
85 | rrq.rrq_ox_id = htons(aborted_io_req->xid); | 85 | rrq.rrq_ox_id = htons(aborted_io_req->xid); |
86 | rrq.rrq_rx_id = htons(aborted_io_req->task->rx_wr_tx_rd.rx_id); | 86 | rrq.rrq_rx_id = htons(aborted_io_req->task->rxwr_txrd.var_ctx.rx_id); |
87 | 87 | ||
88 | retry_rrq: | 88 | retry_rrq: |
89 | rc = bnx2fc_initiate_els(tgt, ELS_RRQ, &rrq, sizeof(rrq), | 89 | rc = bnx2fc_initiate_els(tgt, ELS_RRQ, &rrq, sizeof(rrq), |
@@ -417,12 +417,13 @@ void bnx2fc_process_els_compl(struct bnx2fc_cmd *els_req, | |||
417 | 417 | ||
418 | hdr = (u64 *)fc_hdr; | 418 | hdr = (u64 *)fc_hdr; |
419 | temp_hdr = (u64 *) | 419 | temp_hdr = (u64 *) |
420 | &task->cmn.general.cmd_info.mp_fc_frame.fc_hdr; | 420 | &task->rxwr_only.union_ctx.comp_info.mp_rsp.fc_hdr; |
421 | hdr[0] = cpu_to_be64(temp_hdr[0]); | 421 | hdr[0] = cpu_to_be64(temp_hdr[0]); |
422 | hdr[1] = cpu_to_be64(temp_hdr[1]); | 422 | hdr[1] = cpu_to_be64(temp_hdr[1]); |
423 | hdr[2] = cpu_to_be64(temp_hdr[2]); | 423 | hdr[2] = cpu_to_be64(temp_hdr[2]); |
424 | 424 | ||
425 | mp_req->resp_len = task->rx_wr_only.sgl_ctx.mul_sges.cur_sge_off; | 425 | mp_req->resp_len = |
426 | task->rxwr_only.union_ctx.comp_info.mp_rsp.mp_payload_len; | ||
426 | 427 | ||
427 | /* Parse ELS response */ | 428 | /* Parse ELS response */ |
428 | if ((els_req->cb_func) && (els_req->cb_arg)) { | 429 | if ((els_req->cb_func) && (els_req->cb_arg)) { |
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c index ab255fbc7f36..7a16ca1c3ecf 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c +++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c | |||
@@ -21,7 +21,7 @@ DEFINE_PER_CPU(struct bnx2fc_percpu_s, bnx2fc_percpu); | |||
21 | 21 | ||
22 | #define DRV_MODULE_NAME "bnx2fc" | 22 | #define DRV_MODULE_NAME "bnx2fc" |
23 | #define DRV_MODULE_VERSION BNX2FC_VERSION | 23 | #define DRV_MODULE_VERSION BNX2FC_VERSION |
24 | #define DRV_MODULE_RELDATE "Mar 17, 2011" | 24 | #define DRV_MODULE_RELDATE "Jun 10, 2011" |
25 | 25 | ||
26 | 26 | ||
27 | static char version[] __devinitdata = | 27 | static char version[] __devinitdata = |
@@ -612,7 +612,7 @@ static struct fc_host_statistics *bnx2fc_get_host_stats(struct Scsi_Host *shost) | |||
612 | BNX2FC_HBA_DBG(lport, "FW stat req timed out\n"); | 612 | BNX2FC_HBA_DBG(lport, "FW stat req timed out\n"); |
613 | return bnx2fc_stats; | 613 | return bnx2fc_stats; |
614 | } | 614 | } |
615 | bnx2fc_stats->invalid_crc_count += fw_stats->rx_stat1.fc_crc_cnt; | 615 | bnx2fc_stats->invalid_crc_count += fw_stats->rx_stat2.fc_crc_cnt; |
616 | bnx2fc_stats->tx_frames += fw_stats->tx_stat.fcoe_tx_pkt_cnt; | 616 | bnx2fc_stats->tx_frames += fw_stats->tx_stat.fcoe_tx_pkt_cnt; |
617 | bnx2fc_stats->tx_words += (fw_stats->tx_stat.fcoe_tx_byte_cnt) / 4; | 617 | bnx2fc_stats->tx_words += (fw_stats->tx_stat.fcoe_tx_byte_cnt) / 4; |
618 | bnx2fc_stats->rx_frames += fw_stats->rx_stat0.fcoe_rx_pkt_cnt; | 618 | bnx2fc_stats->rx_frames += fw_stats->rx_stat0.fcoe_rx_pkt_cnt; |
diff --git a/drivers/scsi/bnx2fc/bnx2fc_hwi.c b/drivers/scsi/bnx2fc/bnx2fc_hwi.c index f756d5f85c7a..d8e8a825560d 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_hwi.c +++ b/drivers/scsi/bnx2fc/bnx2fc_hwi.c | |||
@@ -100,6 +100,9 @@ int bnx2fc_send_fw_fcoe_init_msg(struct bnx2fc_hba *hba) | |||
100 | fcoe_init2.hdr.flags = (FCOE_KWQE_LAYER_CODE << | 100 | fcoe_init2.hdr.flags = (FCOE_KWQE_LAYER_CODE << |
101 | FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); | 101 | FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); |
102 | 102 | ||
103 | fcoe_init2.hsi_major_version = FCOE_HSI_MAJOR_VERSION; | ||
104 | fcoe_init2.hsi_minor_version = FCOE_HSI_MINOR_VERSION; | ||
105 | |||
103 | fcoe_init2.hash_tbl_pbl_addr_lo = (u32) hba->hash_tbl_pbl_dma; | 106 | fcoe_init2.hash_tbl_pbl_addr_lo = (u32) hba->hash_tbl_pbl_dma; |
104 | fcoe_init2.hash_tbl_pbl_addr_hi = (u32) | 107 | fcoe_init2.hash_tbl_pbl_addr_hi = (u32) |
105 | ((u64) hba->hash_tbl_pbl_dma >> 32); | 108 | ((u64) hba->hash_tbl_pbl_dma >> 32); |
@@ -122,6 +125,7 @@ int bnx2fc_send_fw_fcoe_init_msg(struct bnx2fc_hba *hba) | |||
122 | fcoe_init3.error_bit_map_lo = 0xffffffff; | 125 | fcoe_init3.error_bit_map_lo = 0xffffffff; |
123 | fcoe_init3.error_bit_map_hi = 0xffffffff; | 126 | fcoe_init3.error_bit_map_hi = 0xffffffff; |
124 | 127 | ||
128 | fcoe_init3.perf_config = 1; | ||
125 | 129 | ||
126 | kwqe_arr[0] = (struct kwqe *) &fcoe_init1; | 130 | kwqe_arr[0] = (struct kwqe *) &fcoe_init1; |
127 | kwqe_arr[1] = (struct kwqe *) &fcoe_init2; | 131 | kwqe_arr[1] = (struct kwqe *) &fcoe_init2; |
@@ -289,19 +293,19 @@ int bnx2fc_send_session_ofld_req(struct fcoe_port *port, | |||
289 | ofld_req4.e_d_tov_timer_val = lport->e_d_tov / 20; | 293 | ofld_req4.e_d_tov_timer_val = lport->e_d_tov / 20; |
290 | 294 | ||
291 | 295 | ||
292 | ofld_req4.src_mac_addr_lo32[0] = port->data_src_addr[5]; | 296 | ofld_req4.src_mac_addr_lo[0] = port->data_src_addr[5]; |
293 | /* local mac */ | 297 | /* local mac */ |
294 | ofld_req4.src_mac_addr_lo32[1] = port->data_src_addr[4]; | 298 | ofld_req4.src_mac_addr_lo[1] = port->data_src_addr[4]; |
295 | ofld_req4.src_mac_addr_lo32[2] = port->data_src_addr[3]; | 299 | ofld_req4.src_mac_addr_mid[0] = port->data_src_addr[3]; |
296 | ofld_req4.src_mac_addr_lo32[3] = port->data_src_addr[2]; | 300 | ofld_req4.src_mac_addr_mid[1] = port->data_src_addr[2]; |
297 | ofld_req4.src_mac_addr_hi16[0] = port->data_src_addr[1]; | 301 | ofld_req4.src_mac_addr_hi[0] = port->data_src_addr[1]; |
298 | ofld_req4.src_mac_addr_hi16[1] = port->data_src_addr[0]; | 302 | ofld_req4.src_mac_addr_hi[1] = port->data_src_addr[0]; |
299 | ofld_req4.dst_mac_addr_lo32[0] = hba->ctlr.dest_addr[5];/* fcf mac */ | 303 | ofld_req4.dst_mac_addr_lo[0] = hba->ctlr.dest_addr[5];/* fcf mac */ |
300 | ofld_req4.dst_mac_addr_lo32[1] = hba->ctlr.dest_addr[4]; | 304 | ofld_req4.dst_mac_addr_lo[1] = hba->ctlr.dest_addr[4]; |
301 | ofld_req4.dst_mac_addr_lo32[2] = hba->ctlr.dest_addr[3]; | 305 | ofld_req4.dst_mac_addr_mid[0] = hba->ctlr.dest_addr[3]; |
302 | ofld_req4.dst_mac_addr_lo32[3] = hba->ctlr.dest_addr[2]; | 306 | ofld_req4.dst_mac_addr_mid[1] = hba->ctlr.dest_addr[2]; |
303 | ofld_req4.dst_mac_addr_hi16[0] = hba->ctlr.dest_addr[1]; | 307 | ofld_req4.dst_mac_addr_hi[0] = hba->ctlr.dest_addr[1]; |
304 | ofld_req4.dst_mac_addr_hi16[1] = hba->ctlr.dest_addr[0]; | 308 | ofld_req4.dst_mac_addr_hi[1] = hba->ctlr.dest_addr[0]; |
305 | 309 | ||
306 | ofld_req4.lcq_addr_lo = (u32) tgt->lcq_dma; | 310 | ofld_req4.lcq_addr_lo = (u32) tgt->lcq_dma; |
307 | ofld_req4.lcq_addr_hi = (u32)((u64) tgt->lcq_dma >> 32); | 311 | ofld_req4.lcq_addr_hi = (u32)((u64) tgt->lcq_dma >> 32); |
@@ -345,20 +349,21 @@ static int bnx2fc_send_session_enable_req(struct fcoe_port *port, | |||
345 | enbl_req.hdr.flags = | 349 | enbl_req.hdr.flags = |
346 | (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); | 350 | (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); |
347 | 351 | ||
348 | enbl_req.src_mac_addr_lo32[0] = port->data_src_addr[5]; | 352 | enbl_req.src_mac_addr_lo[0] = port->data_src_addr[5]; |
349 | /* local mac */ | 353 | /* local mac */ |
350 | enbl_req.src_mac_addr_lo32[1] = port->data_src_addr[4]; | 354 | enbl_req.src_mac_addr_lo[1] = port->data_src_addr[4]; |
351 | enbl_req.src_mac_addr_lo32[2] = port->data_src_addr[3]; | 355 | enbl_req.src_mac_addr_mid[0] = port->data_src_addr[3]; |
352 | enbl_req.src_mac_addr_lo32[3] = port->data_src_addr[2]; | 356 | enbl_req.src_mac_addr_mid[1] = port->data_src_addr[2]; |
353 | enbl_req.src_mac_addr_hi16[0] = port->data_src_addr[1]; | 357 | enbl_req.src_mac_addr_hi[0] = port->data_src_addr[1]; |
354 | enbl_req.src_mac_addr_hi16[1] = port->data_src_addr[0]; | 358 | enbl_req.src_mac_addr_hi[1] = port->data_src_addr[0]; |
355 | 359 | memcpy(tgt->src_addr, port->data_src_addr, ETH_ALEN); | |
356 | enbl_req.dst_mac_addr_lo32[0] = hba->ctlr.dest_addr[5];/* fcf mac */ | 360 | |
357 | enbl_req.dst_mac_addr_lo32[1] = hba->ctlr.dest_addr[4]; | 361 | enbl_req.dst_mac_addr_lo[0] = hba->ctlr.dest_addr[5];/* fcf mac */ |
358 | enbl_req.dst_mac_addr_lo32[2] = hba->ctlr.dest_addr[3]; | 362 | enbl_req.dst_mac_addr_lo[1] = hba->ctlr.dest_addr[4]; |
359 | enbl_req.dst_mac_addr_lo32[3] = hba->ctlr.dest_addr[2]; | 363 | enbl_req.dst_mac_addr_mid[0] = hba->ctlr.dest_addr[3]; |
360 | enbl_req.dst_mac_addr_hi16[0] = hba->ctlr.dest_addr[1]; | 364 | enbl_req.dst_mac_addr_mid[1] = hba->ctlr.dest_addr[2]; |
361 | enbl_req.dst_mac_addr_hi16[1] = hba->ctlr.dest_addr[0]; | 365 | enbl_req.dst_mac_addr_hi[0] = hba->ctlr.dest_addr[1]; |
366 | enbl_req.dst_mac_addr_hi[1] = hba->ctlr.dest_addr[0]; | ||
362 | 367 | ||
363 | port_id = fc_host_port_id(lport->host); | 368 | port_id = fc_host_port_id(lport->host); |
364 | if (port_id != tgt->sid) { | 369 | if (port_id != tgt->sid) { |
@@ -411,18 +416,19 @@ int bnx2fc_send_session_disable_req(struct fcoe_port *port, | |||
411 | disable_req.hdr.flags = | 416 | disable_req.hdr.flags = |
412 | (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); | 417 | (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); |
413 | 418 | ||
414 | disable_req.src_mac_addr_lo32[0] = port->data_src_addr[5]; | 419 | disable_req.src_mac_addr_lo[0] = tgt->src_addr[5]; |
415 | disable_req.src_mac_addr_lo32[2] = port->data_src_addr[3]; | 420 | disable_req.src_mac_addr_lo[1] = tgt->src_addr[4]; |
416 | disable_req.src_mac_addr_lo32[3] = port->data_src_addr[2]; | 421 | disable_req.src_mac_addr_mid[0] = tgt->src_addr[3]; |
417 | disable_req.src_mac_addr_hi16[0] = port->data_src_addr[1]; | 422 | disable_req.src_mac_addr_mid[1] = tgt->src_addr[2]; |
418 | disable_req.src_mac_addr_hi16[1] = port->data_src_addr[0]; | 423 | disable_req.src_mac_addr_hi[0] = tgt->src_addr[1]; |
424 | disable_req.src_mac_addr_hi[1] = tgt->src_addr[0]; | ||
419 | 425 | ||
420 | disable_req.dst_mac_addr_lo32[0] = hba->ctlr.dest_addr[5];/* fcf mac */ | 426 | disable_req.dst_mac_addr_lo[0] = hba->ctlr.dest_addr[5];/* fcf mac */ |
421 | disable_req.dst_mac_addr_lo32[1] = hba->ctlr.dest_addr[4]; | 427 | disable_req.dst_mac_addr_lo[1] = hba->ctlr.dest_addr[4]; |
422 | disable_req.dst_mac_addr_lo32[2] = hba->ctlr.dest_addr[3]; | 428 | disable_req.dst_mac_addr_mid[0] = hba->ctlr.dest_addr[3]; |
423 | disable_req.dst_mac_addr_lo32[3] = hba->ctlr.dest_addr[2]; | 429 | disable_req.dst_mac_addr_mid[1] = hba->ctlr.dest_addr[2]; |
424 | disable_req.dst_mac_addr_hi16[0] = hba->ctlr.dest_addr[1]; | 430 | disable_req.dst_mac_addr_hi[0] = hba->ctlr.dest_addr[1]; |
425 | disable_req.dst_mac_addr_hi16[1] = hba->ctlr.dest_addr[0]; | 431 | disable_req.dst_mac_addr_hi[1] = hba->ctlr.dest_addr[0]; |
426 | 432 | ||
427 | port_id = tgt->sid; | 433 | port_id = tgt->sid; |
428 | disable_req.s_id[0] = (port_id & 0x000000FF); | 434 | disable_req.s_id[0] = (port_id & 0x000000FF); |
@@ -640,10 +646,10 @@ static void bnx2fc_process_unsol_compl(struct bnx2fc_rport *tgt, u16 wqe) | |||
640 | xid = err_entry->fc_hdr.ox_id; | 646 | xid = err_entry->fc_hdr.ox_id; |
641 | BNX2FC_TGT_DBG(tgt, "Unsol Error Frame OX_ID = 0x%x\n", xid); | 647 | BNX2FC_TGT_DBG(tgt, "Unsol Error Frame OX_ID = 0x%x\n", xid); |
642 | BNX2FC_TGT_DBG(tgt, "err_warn_bitmap = %08x:%08x\n", | 648 | BNX2FC_TGT_DBG(tgt, "err_warn_bitmap = %08x:%08x\n", |
643 | err_entry->err_warn_bitmap_hi, | 649 | err_entry->data.err_warn_bitmap_hi, |
644 | err_entry->err_warn_bitmap_lo); | 650 | err_entry->data.err_warn_bitmap_lo); |
645 | BNX2FC_TGT_DBG(tgt, "buf_offsets - tx = 0x%x, rx = 0x%x\n", | 651 | BNX2FC_TGT_DBG(tgt, "buf_offsets - tx = 0x%x, rx = 0x%x\n", |
646 | err_entry->tx_buf_off, err_entry->rx_buf_off); | 652 | err_entry->data.tx_buf_off, err_entry->data.rx_buf_off); |
647 | 653 | ||
648 | bnx2fc_return_rqe(tgt, 1); | 654 | bnx2fc_return_rqe(tgt, 1); |
649 | 655 | ||
@@ -722,10 +728,10 @@ static void bnx2fc_process_unsol_compl(struct bnx2fc_rport *tgt, u16 wqe) | |||
722 | xid = cpu_to_be16(err_entry->fc_hdr.ox_id); | 728 | xid = cpu_to_be16(err_entry->fc_hdr.ox_id); |
723 | BNX2FC_TGT_DBG(tgt, "Unsol Warning Frame OX_ID = 0x%x\n", xid); | 729 | BNX2FC_TGT_DBG(tgt, "Unsol Warning Frame OX_ID = 0x%x\n", xid); |
724 | BNX2FC_TGT_DBG(tgt, "err_warn_bitmap = %08x:%08x", | 730 | BNX2FC_TGT_DBG(tgt, "err_warn_bitmap = %08x:%08x", |
725 | err_entry->err_warn_bitmap_hi, | 731 | err_entry->data.err_warn_bitmap_hi, |
726 | err_entry->err_warn_bitmap_lo); | 732 | err_entry->data.err_warn_bitmap_lo); |
727 | BNX2FC_TGT_DBG(tgt, "buf_offsets - tx = 0x%x, rx = 0x%x", | 733 | BNX2FC_TGT_DBG(tgt, "buf_offsets - tx = 0x%x, rx = 0x%x", |
728 | err_entry->tx_buf_off, err_entry->rx_buf_off); | 734 | err_entry->data.tx_buf_off, err_entry->data.rx_buf_off); |
729 | 735 | ||
730 | bnx2fc_return_rqe(tgt, 1); | 736 | bnx2fc_return_rqe(tgt, 1); |
731 | spin_unlock_bh(&tgt->tgt_lock); | 737 | spin_unlock_bh(&tgt->tgt_lock); |
@@ -762,9 +768,9 @@ void bnx2fc_process_cq_compl(struct bnx2fc_rport *tgt, u16 wqe) | |||
762 | task_page = (struct fcoe_task_ctx_entry *)hba->task_ctx[task_idx]; | 768 | task_page = (struct fcoe_task_ctx_entry *)hba->task_ctx[task_idx]; |
763 | task = &(task_page[index]); | 769 | task = &(task_page[index]); |
764 | 770 | ||
765 | num_rq = ((task->rx_wr_tx_rd.rx_flags & | 771 | num_rq = ((task->rxwr_txrd.var_ctx.rx_flags & |
766 | FCOE_TASK_CTX_ENTRY_RXWR_TXRD_NUM_RQ_WQE) >> | 772 | FCOE_TCE_RX_WR_TX_RD_VAR_NUM_RQ_WQE) >> |
767 | FCOE_TASK_CTX_ENTRY_RXWR_TXRD_NUM_RQ_WQE_SHIFT); | 773 | FCOE_TCE_RX_WR_TX_RD_VAR_NUM_RQ_WQE_SHIFT); |
768 | 774 | ||
769 | io_req = (struct bnx2fc_cmd *)hba->cmd_mgr->cmds[xid]; | 775 | io_req = (struct bnx2fc_cmd *)hba->cmd_mgr->cmds[xid]; |
770 | 776 | ||
@@ -777,22 +783,19 @@ void bnx2fc_process_cq_compl(struct bnx2fc_rport *tgt, u16 wqe) | |||
777 | /* Timestamp IO completion time */ | 783 | /* Timestamp IO completion time */ |
778 | cmd_type = io_req->cmd_type; | 784 | cmd_type = io_req->cmd_type; |
779 | 785 | ||
780 | /* optimized completion path */ | 786 | rx_state = ((task->rxwr_txrd.var_ctx.rx_flags & |
781 | if (cmd_type == BNX2FC_SCSI_CMD) { | 787 | FCOE_TCE_RX_WR_TX_RD_VAR_RX_STATE) >> |
782 | rx_state = ((task->rx_wr_tx_rd.rx_flags & | 788 | FCOE_TCE_RX_WR_TX_RD_VAR_RX_STATE_SHIFT); |
783 | FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RX_STATE) >> | ||
784 | FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RX_STATE_SHIFT); | ||
785 | 789 | ||
790 | /* Process other IO completion types */ | ||
791 | switch (cmd_type) { | ||
792 | case BNX2FC_SCSI_CMD: | ||
786 | if (rx_state == FCOE_TASK_RX_STATE_COMPLETED) { | 793 | if (rx_state == FCOE_TASK_RX_STATE_COMPLETED) { |
787 | bnx2fc_process_scsi_cmd_compl(io_req, task, num_rq); | 794 | bnx2fc_process_scsi_cmd_compl(io_req, task, num_rq); |
788 | spin_unlock_bh(&tgt->tgt_lock); | 795 | spin_unlock_bh(&tgt->tgt_lock); |
789 | return; | 796 | return; |
790 | } | 797 | } |
791 | } | ||
792 | 798 | ||
793 | /* Process other IO completion types */ | ||
794 | switch (cmd_type) { | ||
795 | case BNX2FC_SCSI_CMD: | ||
796 | if (rx_state == FCOE_TASK_RX_STATE_ABTS_COMPLETED) | 799 | if (rx_state == FCOE_TASK_RX_STATE_ABTS_COMPLETED) |
797 | bnx2fc_process_abts_compl(io_req, task, num_rq); | 800 | bnx2fc_process_abts_compl(io_req, task, num_rq); |
798 | else if (rx_state == | 801 | else if (rx_state == |
@@ -819,8 +822,16 @@ void bnx2fc_process_cq_compl(struct bnx2fc_rport *tgt, u16 wqe) | |||
819 | break; | 822 | break; |
820 | 823 | ||
821 | case BNX2FC_ELS: | 824 | case BNX2FC_ELS: |
822 | BNX2FC_IO_DBG(io_req, "cq_compl - call process_els_compl\n"); | 825 | if (rx_state == FCOE_TASK_RX_STATE_COMPLETED) |
823 | bnx2fc_process_els_compl(io_req, task, num_rq); | 826 | bnx2fc_process_els_compl(io_req, task, num_rq); |
827 | else if (rx_state == FCOE_TASK_RX_STATE_ABTS_COMPLETED) | ||
828 | bnx2fc_process_abts_compl(io_req, task, num_rq); | ||
829 | else if (rx_state == | ||
830 | FCOE_TASK_RX_STATE_EXCHANGE_CLEANUP_COMPLETED) | ||
831 | bnx2fc_process_cleanup_compl(io_req, task, num_rq); | ||
832 | else | ||
833 | printk(KERN_ERR PFX "Invalid rx state = %d\n", | ||
834 | rx_state); | ||
824 | break; | 835 | break; |
825 | 836 | ||
826 | case BNX2FC_CLEANUP: | 837 | case BNX2FC_CLEANUP: |
@@ -835,6 +846,20 @@ void bnx2fc_process_cq_compl(struct bnx2fc_rport *tgt, u16 wqe) | |||
835 | spin_unlock_bh(&tgt->tgt_lock); | 846 | spin_unlock_bh(&tgt->tgt_lock); |
836 | } | 847 | } |
837 | 848 | ||
849 | void bnx2fc_arm_cq(struct bnx2fc_rport *tgt) | ||
850 | { | ||
851 | struct b577xx_fcoe_rx_doorbell *rx_db = &tgt->rx_db; | ||
852 | u32 msg; | ||
853 | |||
854 | wmb(); | ||
855 | rx_db->doorbell_cq_cons = tgt->cq_cons_idx | (tgt->cq_curr_toggle_bit << | ||
856 | FCOE_CQE_TOGGLE_BIT_SHIFT); | ||
857 | msg = *((u32 *)rx_db); | ||
858 | writel(cpu_to_le32(msg), tgt->ctx_base); | ||
859 | mmiowb(); | ||
860 | |||
861 | } | ||
862 | |||
838 | struct bnx2fc_work *bnx2fc_alloc_work(struct bnx2fc_rport *tgt, u16 wqe) | 863 | struct bnx2fc_work *bnx2fc_alloc_work(struct bnx2fc_rport *tgt, u16 wqe) |
839 | { | 864 | { |
840 | struct bnx2fc_work *work; | 865 | struct bnx2fc_work *work; |
@@ -853,8 +878,8 @@ int bnx2fc_process_new_cqes(struct bnx2fc_rport *tgt) | |||
853 | struct fcoe_cqe *cq; | 878 | struct fcoe_cqe *cq; |
854 | u32 cq_cons; | 879 | u32 cq_cons; |
855 | struct fcoe_cqe *cqe; | 880 | struct fcoe_cqe *cqe; |
881 | u32 num_free_sqes = 0; | ||
856 | u16 wqe; | 882 | u16 wqe; |
857 | bool more_cqes_found = false; | ||
858 | 883 | ||
859 | /* | 884 | /* |
860 | * cq_lock is a low contention lock used to protect | 885 | * cq_lock is a low contention lock used to protect |
@@ -872,62 +897,51 @@ int bnx2fc_process_new_cqes(struct bnx2fc_rport *tgt) | |||
872 | cq_cons = tgt->cq_cons_idx; | 897 | cq_cons = tgt->cq_cons_idx; |
873 | cqe = &cq[cq_cons]; | 898 | cqe = &cq[cq_cons]; |
874 | 899 | ||
875 | do { | 900 | while (((wqe = cqe->wqe) & FCOE_CQE_TOGGLE_BIT) == |
876 | more_cqes_found ^= true; | 901 | (tgt->cq_curr_toggle_bit << |
877 | 902 | FCOE_CQE_TOGGLE_BIT_SHIFT)) { | |
878 | while (((wqe = cqe->wqe) & FCOE_CQE_TOGGLE_BIT) == | ||
879 | (tgt->cq_curr_toggle_bit << | ||
880 | FCOE_CQE_TOGGLE_BIT_SHIFT)) { | ||
881 | 903 | ||
882 | /* new entry on the cq */ | 904 | /* new entry on the cq */ |
883 | if (wqe & FCOE_CQE_CQE_TYPE) { | 905 | if (wqe & FCOE_CQE_CQE_TYPE) { |
884 | /* Unsolicited event notification */ | 906 | /* Unsolicited event notification */ |
885 | bnx2fc_process_unsol_compl(tgt, wqe); | 907 | bnx2fc_process_unsol_compl(tgt, wqe); |
886 | } else { | 908 | } else { |
887 | struct bnx2fc_work *work = NULL; | 909 | /* Pending work request completion */ |
888 | struct bnx2fc_percpu_s *fps = NULL; | 910 | struct bnx2fc_work *work = NULL; |
889 | unsigned int cpu = wqe % num_possible_cpus(); | 911 | struct bnx2fc_percpu_s *fps = NULL; |
890 | 912 | unsigned int cpu = wqe % num_possible_cpus(); | |
891 | fps = &per_cpu(bnx2fc_percpu, cpu); | 913 | |
892 | spin_lock_bh(&fps->fp_work_lock); | 914 | fps = &per_cpu(bnx2fc_percpu, cpu); |
893 | if (unlikely(!fps->iothread)) | 915 | spin_lock_bh(&fps->fp_work_lock); |
894 | goto unlock; | 916 | if (unlikely(!fps->iothread)) |
895 | 917 | goto unlock; | |
896 | work = bnx2fc_alloc_work(tgt, wqe); | 918 | |
897 | if (work) | 919 | work = bnx2fc_alloc_work(tgt, wqe); |
898 | list_add_tail(&work->list, | 920 | if (work) |
899 | &fps->work_list); | 921 | list_add_tail(&work->list, |
922 | &fps->work_list); | ||
900 | unlock: | 923 | unlock: |
901 | spin_unlock_bh(&fps->fp_work_lock); | 924 | spin_unlock_bh(&fps->fp_work_lock); |
902 | 925 | ||
903 | /* Pending work request completion */ | 926 | /* Pending work request completion */ |
904 | if (fps->iothread && work) | 927 | if (fps->iothread && work) |
905 | wake_up_process(fps->iothread); | 928 | wake_up_process(fps->iothread); |
906 | else | 929 | else |
907 | bnx2fc_process_cq_compl(tgt, wqe); | 930 | bnx2fc_process_cq_compl(tgt, wqe); |
908 | } | ||
909 | cqe++; | ||
910 | tgt->cq_cons_idx++; | ||
911 | |||
912 | if (tgt->cq_cons_idx == BNX2FC_CQ_WQES_MAX) { | ||
913 | tgt->cq_cons_idx = 0; | ||
914 | cqe = cq; | ||
915 | tgt->cq_curr_toggle_bit = | ||
916 | 1 - tgt->cq_curr_toggle_bit; | ||
917 | } | ||
918 | } | 931 | } |
919 | /* Re-arm CQ */ | 932 | cqe++; |
920 | if (more_cqes_found) { | 933 | tgt->cq_cons_idx++; |
921 | tgt->conn_db->cq_arm.lo = -1; | 934 | num_free_sqes++; |
922 | wmb(); | 935 | |
936 | if (tgt->cq_cons_idx == BNX2FC_CQ_WQES_MAX) { | ||
937 | tgt->cq_cons_idx = 0; | ||
938 | cqe = cq; | ||
939 | tgt->cq_curr_toggle_bit = | ||
940 | 1 - tgt->cq_curr_toggle_bit; | ||
923 | } | 941 | } |
924 | } while (more_cqes_found); | 942 | } |
925 | 943 | bnx2fc_arm_cq(tgt); | |
926 | /* | 944 | atomic_add(num_free_sqes, &tgt->free_sqes); |
927 | * Commit tgt->cq_cons_idx change to the memory | ||
928 | * spin_lock implies full memory barrier, no need to smp_wmb | ||
929 | */ | ||
930 | |||
931 | spin_unlock_bh(&tgt->cq_lock); | 945 | spin_unlock_bh(&tgt->cq_lock); |
932 | return 0; | 946 | return 0; |
933 | } | 947 | } |
@@ -1141,7 +1155,11 @@ static void bnx2fc_init_failure(struct bnx2fc_hba *hba, u32 err_code) | |||
1141 | case FCOE_KCQE_COMPLETION_STATUS_NIC_ERROR: | 1155 | case FCOE_KCQE_COMPLETION_STATUS_NIC_ERROR: |
1142 | printk(KERN_ERR PFX "init_failure due to NIC error\n"); | 1156 | printk(KERN_ERR PFX "init_failure due to NIC error\n"); |
1143 | break; | 1157 | break; |
1144 | 1158 | case FCOE_KCQE_COMPLETION_STATUS_ERROR: | |
1159 | printk(KERN_ERR PFX "init failure due to compl status err\n"); | ||
1160 | break; | ||
1161 | case FCOE_KCQE_COMPLETION_STATUS_WRONG_HSI_VERSION: | ||
1162 | printk(KERN_ERR PFX "init failure due to HSI mismatch\n"); | ||
1145 | default: | 1163 | default: |
1146 | printk(KERN_ERR PFX "Unknown Error code %d\n", err_code); | 1164 | printk(KERN_ERR PFX "Unknown Error code %d\n", err_code); |
1147 | } | 1165 | } |
@@ -1247,21 +1265,14 @@ void bnx2fc_add_2_sq(struct bnx2fc_rport *tgt, u16 xid) | |||
1247 | 1265 | ||
1248 | void bnx2fc_ring_doorbell(struct bnx2fc_rport *tgt) | 1266 | void bnx2fc_ring_doorbell(struct bnx2fc_rport *tgt) |
1249 | { | 1267 | { |
1250 | struct b577xx_doorbell_set_prod ev_doorbell; | 1268 | struct b577xx_doorbell_set_prod *sq_db = &tgt->sq_db; |
1251 | u32 msg; | 1269 | u32 msg; |
1252 | 1270 | ||
1253 | wmb(); | 1271 | wmb(); |
1254 | 1272 | sq_db->prod = tgt->sq_prod_idx | | |
1255 | memset(&ev_doorbell, 0, sizeof(struct b577xx_doorbell_set_prod)); | ||
1256 | ev_doorbell.header.header = B577XX_DOORBELL_HDR_DB_TYPE; | ||
1257 | |||
1258 | ev_doorbell.prod = tgt->sq_prod_idx | | ||
1259 | (tgt->sq_curr_toggle_bit << 15); | 1273 | (tgt->sq_curr_toggle_bit << 15); |
1260 | ev_doorbell.header.header |= B577XX_FCOE_CONNECTION_TYPE << | 1274 | msg = *((u32 *)sq_db); |
1261 | B577XX_DOORBELL_HDR_CONN_TYPE_SHIFT; | ||
1262 | msg = *((u32 *)&ev_doorbell); | ||
1263 | writel(cpu_to_le32(msg), tgt->ctx_base); | 1275 | writel(cpu_to_le32(msg), tgt->ctx_base); |
1264 | |||
1265 | mmiowb(); | 1276 | mmiowb(); |
1266 | 1277 | ||
1267 | } | 1278 | } |
@@ -1322,18 +1333,26 @@ void bnx2fc_init_cleanup_task(struct bnx2fc_cmd *io_req, | |||
1322 | memset(task, 0, sizeof(struct fcoe_task_ctx_entry)); | 1333 | memset(task, 0, sizeof(struct fcoe_task_ctx_entry)); |
1323 | 1334 | ||
1324 | /* Tx Write Rx Read */ | 1335 | /* Tx Write Rx Read */ |
1325 | task->tx_wr_rx_rd.tx_flags = FCOE_TASK_TX_STATE_EXCHANGE_CLEANUP << | 1336 | /* init flags */ |
1326 | FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TX_STATE_SHIFT; | 1337 | task->txwr_rxrd.const_ctx.init_flags = task_type << |
1327 | task->tx_wr_rx_rd.init_flags = task_type << | 1338 | FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT; |
1328 | FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TASK_TYPE_SHIFT; | 1339 | task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_CLASS_TYPE_3 << |
1329 | task->tx_wr_rx_rd.init_flags |= FCOE_TASK_CLASS_TYPE_3 << | 1340 | FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE_SHIFT; |
1330 | FCOE_TASK_CTX_ENTRY_TXWR_RXRD_CLASS_TYPE_SHIFT; | 1341 | task->txwr_rxrd.const_ctx.init_flags |= |
1331 | /* Common */ | 1342 | FCOE_TASK_DEV_TYPE_DISK << |
1332 | task->cmn.common_flags = context_id << | 1343 | FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT; |
1333 | FCOE_TASK_CTX_ENTRY_TX_RX_CMN_CID_SHIFT; | 1344 | task->txwr_rxrd.union_ctx.cleanup.ctx.cleaned_task_id = orig_xid; |
1334 | task->cmn.general.cleanup_info.task_id = orig_xid; | 1345 | |
1335 | 1346 | /* Tx flags */ | |
1336 | 1347 | task->txwr_rxrd.const_ctx.tx_flags = | |
1348 | FCOE_TASK_TX_STATE_EXCHANGE_CLEANUP << | ||
1349 | FCOE_TCE_TX_WR_RX_RD_CONST_TX_STATE_SHIFT; | ||
1350 | |||
1351 | /* Rx Read Tx Write */ | ||
1352 | task->rxwr_txrd.const_ctx.init_flags = context_id << | ||
1353 | FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT; | ||
1354 | task->rxwr_txrd.var_ctx.rx_flags |= 1 << | ||
1355 | FCOE_TCE_RX_WR_TX_RD_VAR_EXP_FIRST_FRAME_SHIFT; | ||
1337 | } | 1356 | } |
1338 | 1357 | ||
1339 | void bnx2fc_init_mp_task(struct bnx2fc_cmd *io_req, | 1358 | void bnx2fc_init_mp_task(struct bnx2fc_cmd *io_req, |
@@ -1342,6 +1361,7 @@ void bnx2fc_init_mp_task(struct bnx2fc_cmd *io_req, | |||
1342 | struct bnx2fc_mp_req *mp_req = &(io_req->mp_req); | 1361 | struct bnx2fc_mp_req *mp_req = &(io_req->mp_req); |
1343 | struct bnx2fc_rport *tgt = io_req->tgt; | 1362 | struct bnx2fc_rport *tgt = io_req->tgt; |
1344 | struct fc_frame_header *fc_hdr; | 1363 | struct fc_frame_header *fc_hdr; |
1364 | struct fcoe_ext_mul_sges_ctx *sgl; | ||
1345 | u8 task_type = 0; | 1365 | u8 task_type = 0; |
1346 | u64 *hdr; | 1366 | u64 *hdr; |
1347 | u64 temp_hdr[3]; | 1367 | u64 temp_hdr[3]; |
@@ -1367,47 +1387,49 @@ void bnx2fc_init_mp_task(struct bnx2fc_cmd *io_req, | |||
1367 | /* Tx only */ | 1387 | /* Tx only */ |
1368 | if ((task_type == FCOE_TASK_TYPE_MIDPATH) || | 1388 | if ((task_type == FCOE_TASK_TYPE_MIDPATH) || |
1369 | (task_type == FCOE_TASK_TYPE_UNSOLICITED)) { | 1389 | (task_type == FCOE_TASK_TYPE_UNSOLICITED)) { |
1370 | task->tx_wr_only.sgl_ctx.mul_sges.cur_sge_addr.lo = | 1390 | task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.lo = |
1371 | (u32)mp_req->mp_req_bd_dma; | 1391 | (u32)mp_req->mp_req_bd_dma; |
1372 | task->tx_wr_only.sgl_ctx.mul_sges.cur_sge_addr.hi = | 1392 | task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.hi = |
1373 | (u32)((u64)mp_req->mp_req_bd_dma >> 32); | 1393 | (u32)((u64)mp_req->mp_req_bd_dma >> 32); |
1374 | task->tx_wr_only.sgl_ctx.mul_sges.sgl_size = 1; | 1394 | task->txwr_only.sgl_ctx.sgl.mul_sgl.sgl_size = 1; |
1375 | BNX2FC_IO_DBG(io_req, "init_mp_task - bd_dma = 0x%llx\n", | ||
1376 | (unsigned long long)mp_req->mp_req_bd_dma); | ||
1377 | } | 1395 | } |
1378 | 1396 | ||
1379 | /* Tx Write Rx Read */ | 1397 | /* Tx Write Rx Read */ |
1380 | task->tx_wr_rx_rd.tx_flags = FCOE_TASK_TX_STATE_INIT << | 1398 | /* init flags */ |
1381 | FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TX_STATE_SHIFT; | 1399 | task->txwr_rxrd.const_ctx.init_flags = task_type << |
1382 | task->tx_wr_rx_rd.init_flags = task_type << | 1400 | FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT; |
1383 | FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TASK_TYPE_SHIFT; | 1401 | task->txwr_rxrd.const_ctx.init_flags |= |
1384 | task->tx_wr_rx_rd.init_flags |= FCOE_TASK_DEV_TYPE_DISK << | 1402 | FCOE_TASK_DEV_TYPE_DISK << |
1385 | FCOE_TASK_CTX_ENTRY_TXWR_RXRD_DEV_TYPE_SHIFT; | 1403 | FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT; |
1386 | task->tx_wr_rx_rd.init_flags |= FCOE_TASK_CLASS_TYPE_3 << | 1404 | task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_CLASS_TYPE_3 << |
1387 | FCOE_TASK_CTX_ENTRY_TXWR_RXRD_CLASS_TYPE_SHIFT; | 1405 | FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE_SHIFT; |
1388 | 1406 | ||
1389 | /* Common */ | 1407 | /* tx flags */ |
1390 | task->cmn.data_2_trns = io_req->data_xfer_len; | 1408 | task->txwr_rxrd.const_ctx.tx_flags = FCOE_TASK_TX_STATE_INIT << |
1391 | context_id = tgt->context_id; | 1409 | FCOE_TCE_TX_WR_RX_RD_CONST_TX_STATE_SHIFT; |
1392 | task->cmn.common_flags = context_id << | ||
1393 | FCOE_TASK_CTX_ENTRY_TX_RX_CMN_CID_SHIFT; | ||
1394 | task->cmn.common_flags |= 1 << | ||
1395 | FCOE_TASK_CTX_ENTRY_TX_RX_CMN_VALID_SHIFT; | ||
1396 | task->cmn.common_flags |= 1 << | ||
1397 | FCOE_TASK_CTX_ENTRY_TX_RX_CMN_EXP_FIRST_FRAME_SHIFT; | ||
1398 | 1410 | ||
1399 | /* Rx Write Tx Read */ | 1411 | /* Rx Write Tx Read */ |
1412 | task->rxwr_txrd.const_ctx.data_2_trns = io_req->data_xfer_len; | ||
1413 | |||
1414 | /* rx flags */ | ||
1415 | task->rxwr_txrd.var_ctx.rx_flags |= 1 << | ||
1416 | FCOE_TCE_RX_WR_TX_RD_VAR_EXP_FIRST_FRAME_SHIFT; | ||
1417 | |||
1418 | context_id = tgt->context_id; | ||
1419 | task->rxwr_txrd.const_ctx.init_flags = context_id << | ||
1420 | FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT; | ||
1421 | |||
1400 | fc_hdr = &(mp_req->req_fc_hdr); | 1422 | fc_hdr = &(mp_req->req_fc_hdr); |
1401 | if (task_type == FCOE_TASK_TYPE_MIDPATH) { | 1423 | if (task_type == FCOE_TASK_TYPE_MIDPATH) { |
1402 | fc_hdr->fh_ox_id = cpu_to_be16(io_req->xid); | 1424 | fc_hdr->fh_ox_id = cpu_to_be16(io_req->xid); |
1403 | fc_hdr->fh_rx_id = htons(0xffff); | 1425 | fc_hdr->fh_rx_id = htons(0xffff); |
1404 | task->rx_wr_tx_rd.rx_id = 0xffff; | 1426 | task->rxwr_txrd.var_ctx.rx_id = 0xffff; |
1405 | } else if (task_type == FCOE_TASK_TYPE_UNSOLICITED) { | 1427 | } else if (task_type == FCOE_TASK_TYPE_UNSOLICITED) { |
1406 | fc_hdr->fh_rx_id = cpu_to_be16(io_req->xid); | 1428 | fc_hdr->fh_rx_id = cpu_to_be16(io_req->xid); |
1407 | } | 1429 | } |
1408 | 1430 | ||
1409 | /* Fill FC Header into middle path buffer */ | 1431 | /* Fill FC Header into middle path buffer */ |
1410 | hdr = (u64 *) &task->cmn.general.cmd_info.mp_fc_frame.fc_hdr; | 1432 | hdr = (u64 *) &task->txwr_rxrd.union_ctx.tx_frame.fc_hdr; |
1411 | memcpy(temp_hdr, fc_hdr, sizeof(temp_hdr)); | 1433 | memcpy(temp_hdr, fc_hdr, sizeof(temp_hdr)); |
1412 | hdr[0] = cpu_to_be64(temp_hdr[0]); | 1434 | hdr[0] = cpu_to_be64(temp_hdr[0]); |
1413 | hdr[1] = cpu_to_be64(temp_hdr[1]); | 1435 | hdr[1] = cpu_to_be64(temp_hdr[1]); |
@@ -1415,12 +1437,12 @@ void bnx2fc_init_mp_task(struct bnx2fc_cmd *io_req, | |||
1415 | 1437 | ||
1416 | /* Rx Only */ | 1438 | /* Rx Only */ |
1417 | if (task_type == FCOE_TASK_TYPE_MIDPATH) { | 1439 | if (task_type == FCOE_TASK_TYPE_MIDPATH) { |
1440 | sgl = &task->rxwr_only.union_ctx.read_info.sgl_ctx.sgl; | ||
1418 | 1441 | ||
1419 | task->rx_wr_only.sgl_ctx.mul_sges.cur_sge_addr.lo = | 1442 | sgl->mul_sgl.cur_sge_addr.lo = (u32)mp_req->mp_resp_bd_dma; |
1420 | (u32)mp_req->mp_resp_bd_dma; | 1443 | sgl->mul_sgl.cur_sge_addr.hi = |
1421 | task->rx_wr_only.sgl_ctx.mul_sges.cur_sge_addr.hi = | ||
1422 | (u32)((u64)mp_req->mp_resp_bd_dma >> 32); | 1444 | (u32)((u64)mp_req->mp_resp_bd_dma >> 32); |
1423 | task->rx_wr_only.sgl_ctx.mul_sges.sgl_size = 1; | 1445 | sgl->mul_sgl.sgl_size = 1; |
1424 | } | 1446 | } |
1425 | } | 1447 | } |
1426 | 1448 | ||
@@ -1431,6 +1453,8 @@ void bnx2fc_init_task(struct bnx2fc_cmd *io_req, | |||
1431 | struct scsi_cmnd *sc_cmd = io_req->sc_cmd; | 1453 | struct scsi_cmnd *sc_cmd = io_req->sc_cmd; |
1432 | struct io_bdt *bd_tbl = io_req->bd_tbl; | 1454 | struct io_bdt *bd_tbl = io_req->bd_tbl; |
1433 | struct bnx2fc_rport *tgt = io_req->tgt; | 1455 | struct bnx2fc_rport *tgt = io_req->tgt; |
1456 | struct fcoe_cached_sge_ctx *cached_sge; | ||
1457 | struct fcoe_ext_mul_sges_ctx *sgl; | ||
1434 | u64 *fcp_cmnd; | 1458 | u64 *fcp_cmnd; |
1435 | u64 tmp_fcp_cmnd[4]; | 1459 | u64 tmp_fcp_cmnd[4]; |
1436 | u32 context_id; | 1460 | u32 context_id; |
@@ -1449,47 +1473,33 @@ void bnx2fc_init_task(struct bnx2fc_cmd *io_req, | |||
1449 | 1473 | ||
1450 | /* Tx only */ | 1474 | /* Tx only */ |
1451 | if (task_type == FCOE_TASK_TYPE_WRITE) { | 1475 | if (task_type == FCOE_TASK_TYPE_WRITE) { |
1452 | task->tx_wr_only.sgl_ctx.mul_sges.cur_sge_addr.lo = | 1476 | task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.lo = |
1453 | (u32)bd_tbl->bd_tbl_dma; | 1477 | (u32)bd_tbl->bd_tbl_dma; |
1454 | task->tx_wr_only.sgl_ctx.mul_sges.cur_sge_addr.hi = | 1478 | task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.hi = |
1455 | (u32)((u64)bd_tbl->bd_tbl_dma >> 32); | 1479 | (u32)((u64)bd_tbl->bd_tbl_dma >> 32); |
1456 | task->tx_wr_only.sgl_ctx.mul_sges.sgl_size = | 1480 | task->txwr_only.sgl_ctx.sgl.mul_sgl.sgl_size = |
1457 | bd_tbl->bd_valid; | 1481 | bd_tbl->bd_valid; |
1458 | } | 1482 | } |
1459 | 1483 | ||
1460 | /*Tx Write Rx Read */ | 1484 | /*Tx Write Rx Read */ |
1461 | /* Init state to NORMAL */ | 1485 | /* Init state to NORMAL */ |
1462 | task->tx_wr_rx_rd.tx_flags = FCOE_TASK_TX_STATE_NORMAL << | 1486 | task->txwr_rxrd.const_ctx.init_flags = task_type << |
1463 | FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TX_STATE_SHIFT; | 1487 | FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT; |
1464 | task->tx_wr_rx_rd.init_flags = task_type << | 1488 | task->txwr_rxrd.const_ctx.init_flags |= |
1465 | FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TASK_TYPE_SHIFT; | 1489 | FCOE_TASK_DEV_TYPE_DISK << |
1466 | task->tx_wr_rx_rd.init_flags |= FCOE_TASK_DEV_TYPE_DISK << | 1490 | FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT; |
1467 | FCOE_TASK_CTX_ENTRY_TXWR_RXRD_DEV_TYPE_SHIFT; | 1491 | task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_CLASS_TYPE_3 << |
1468 | task->tx_wr_rx_rd.init_flags |= FCOE_TASK_CLASS_TYPE_3 << | 1492 | FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE_SHIFT; |
1469 | FCOE_TASK_CTX_ENTRY_TXWR_RXRD_CLASS_TYPE_SHIFT; | 1493 | /* tx flags */ |
1470 | 1494 | task->txwr_rxrd.const_ctx.tx_flags = FCOE_TASK_TX_STATE_NORMAL << | |
1471 | /* Common */ | 1495 | FCOE_TCE_TX_WR_RX_RD_CONST_TX_STATE_SHIFT; |
1472 | task->cmn.data_2_trns = io_req->data_xfer_len; | ||
1473 | context_id = tgt->context_id; | ||
1474 | task->cmn.common_flags = context_id << | ||
1475 | FCOE_TASK_CTX_ENTRY_TX_RX_CMN_CID_SHIFT; | ||
1476 | task->cmn.common_flags |= 1 << | ||
1477 | FCOE_TASK_CTX_ENTRY_TX_RX_CMN_VALID_SHIFT; | ||
1478 | task->cmn.common_flags |= 1 << | ||
1479 | FCOE_TASK_CTX_ENTRY_TX_RX_CMN_EXP_FIRST_FRAME_SHIFT; | ||
1480 | |||
1481 | /* Set initiative ownership */ | ||
1482 | task->cmn.common_flags |= FCOE_TASK_CTX_ENTRY_TX_RX_CMN_SEQ_INIT; | ||
1483 | 1496 | ||
1484 | /* Set initial seq counter */ | 1497 | /* Set initial seq counter */ |
1485 | task->cmn.tx_low_seq_cnt = 1; | 1498 | task->txwr_rxrd.union_ctx.tx_seq.ctx.seq_cnt = 1; |
1486 | |||
1487 | /* Set state to "waiting for the first packet" */ | ||
1488 | task->cmn.common_flags |= FCOE_TASK_CTX_ENTRY_TX_RX_CMN_EXP_FIRST_FRAME; | ||
1489 | 1499 | ||
1490 | /* Fill FCP_CMND IU */ | 1500 | /* Fill FCP_CMND IU */ |
1491 | fcp_cmnd = (u64 *) | 1501 | fcp_cmnd = (u64 *) |
1492 | task->cmn.general.cmd_info.fcp_cmd_payload.opaque; | 1502 | task->txwr_rxrd.union_ctx.fcp_cmd.opaque; |
1493 | bnx2fc_build_fcp_cmnd(io_req, (struct fcp_cmnd *)&tmp_fcp_cmnd); | 1503 | bnx2fc_build_fcp_cmnd(io_req, (struct fcp_cmnd *)&tmp_fcp_cmnd); |
1494 | 1504 | ||
1495 | /* swap fcp_cmnd */ | 1505 | /* swap fcp_cmnd */ |
@@ -1501,32 +1511,54 @@ void bnx2fc_init_task(struct bnx2fc_cmd *io_req, | |||
1501 | } | 1511 | } |
1502 | 1512 | ||
1503 | /* Rx Write Tx Read */ | 1513 | /* Rx Write Tx Read */ |
1504 | task->rx_wr_tx_rd.rx_id = 0xffff; | 1514 | task->rxwr_txrd.const_ctx.data_2_trns = io_req->data_xfer_len; |
1515 | |||
1516 | context_id = tgt->context_id; | ||
1517 | task->rxwr_txrd.const_ctx.init_flags = context_id << | ||
1518 | FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT; | ||
1519 | |||
1520 | /* rx flags */ | ||
1521 | /* Set state to "waiting for the first packet" */ | ||
1522 | task->rxwr_txrd.var_ctx.rx_flags |= 1 << | ||
1523 | FCOE_TCE_RX_WR_TX_RD_VAR_EXP_FIRST_FRAME_SHIFT; | ||
1524 | |||
1525 | task->rxwr_txrd.var_ctx.rx_id = 0xffff; | ||
1505 | 1526 | ||
1506 | /* Rx Only */ | 1527 | /* Rx Only */ |
1528 | cached_sge = &task->rxwr_only.union_ctx.read_info.sgl_ctx.cached_sge; | ||
1529 | sgl = &task->rxwr_only.union_ctx.read_info.sgl_ctx.sgl; | ||
1530 | bd_count = bd_tbl->bd_valid; | ||
1507 | if (task_type == FCOE_TASK_TYPE_READ) { | 1531 | if (task_type == FCOE_TASK_TYPE_READ) { |
1508 | |||
1509 | bd_count = bd_tbl->bd_valid; | ||
1510 | if (bd_count == 1) { | 1532 | if (bd_count == 1) { |
1511 | 1533 | ||
1512 | struct fcoe_bd_ctx *fcoe_bd_tbl = bd_tbl->bd_tbl; | 1534 | struct fcoe_bd_ctx *fcoe_bd_tbl = bd_tbl->bd_tbl; |
1513 | 1535 | ||
1514 | task->rx_wr_only.sgl_ctx.single_sge.cur_buf_addr.lo = | 1536 | cached_sge->cur_buf_addr.lo = fcoe_bd_tbl->buf_addr_lo; |
1515 | fcoe_bd_tbl->buf_addr_lo; | 1537 | cached_sge->cur_buf_addr.hi = fcoe_bd_tbl->buf_addr_hi; |
1516 | task->rx_wr_only.sgl_ctx.single_sge.cur_buf_addr.hi = | 1538 | cached_sge->cur_buf_rem = fcoe_bd_tbl->buf_len; |
1517 | fcoe_bd_tbl->buf_addr_hi; | 1539 | task->txwr_rxrd.const_ctx.init_flags |= 1 << |
1518 | task->rx_wr_only.sgl_ctx.single_sge.cur_buf_rem = | 1540 | FCOE_TCE_TX_WR_RX_RD_CONST_CACHED_SGE_SHIFT; |
1519 | fcoe_bd_tbl->buf_len; | 1541 | } else if (bd_count == 2) { |
1520 | task->tx_wr_rx_rd.init_flags |= 1 << | 1542 | struct fcoe_bd_ctx *fcoe_bd_tbl = bd_tbl->bd_tbl; |
1521 | FCOE_TASK_CTX_ENTRY_TXWR_RXRD_SINGLE_SGE_SHIFT; | 1543 | |
1544 | cached_sge->cur_buf_addr.lo = fcoe_bd_tbl->buf_addr_lo; | ||
1545 | cached_sge->cur_buf_addr.hi = fcoe_bd_tbl->buf_addr_hi; | ||
1546 | cached_sge->cur_buf_rem = fcoe_bd_tbl->buf_len; | ||
1547 | |||
1548 | fcoe_bd_tbl++; | ||
1549 | cached_sge->second_buf_addr.lo = | ||
1550 | fcoe_bd_tbl->buf_addr_lo; | ||
1551 | cached_sge->second_buf_addr.hi = | ||
1552 | fcoe_bd_tbl->buf_addr_hi; | ||
1553 | cached_sge->second_buf_rem = fcoe_bd_tbl->buf_len; | ||
1554 | task->txwr_rxrd.const_ctx.init_flags |= 1 << | ||
1555 | FCOE_TCE_TX_WR_RX_RD_CONST_CACHED_SGE_SHIFT; | ||
1522 | } else { | 1556 | } else { |
1523 | 1557 | ||
1524 | task->rx_wr_only.sgl_ctx.mul_sges.cur_sge_addr.lo = | 1558 | sgl->mul_sgl.cur_sge_addr.lo = (u32)bd_tbl->bd_tbl_dma; |
1525 | (u32)bd_tbl->bd_tbl_dma; | 1559 | sgl->mul_sgl.cur_sge_addr.hi = |
1526 | task->rx_wr_only.sgl_ctx.mul_sges.cur_sge_addr.hi = | ||
1527 | (u32)((u64)bd_tbl->bd_tbl_dma >> 32); | 1560 | (u32)((u64)bd_tbl->bd_tbl_dma >> 32); |
1528 | task->rx_wr_only.sgl_ctx.mul_sges.sgl_size = | 1561 | sgl->mul_sgl.sgl_size = bd_count; |
1529 | bd_tbl->bd_valid; | ||
1530 | } | 1562 | } |
1531 | } | 1563 | } |
1532 | } | 1564 | } |
diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c index b5b5c346d779..5dc4205ed8af 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_io.c +++ b/drivers/scsi/bnx2fc/bnx2fc_io.c | |||
@@ -425,6 +425,7 @@ struct bnx2fc_cmd *bnx2fc_elstm_alloc(struct bnx2fc_rport *tgt, int type) | |||
425 | struct list_head *listp; | 425 | struct list_head *listp; |
426 | struct io_bdt *bd_tbl; | 426 | struct io_bdt *bd_tbl; |
427 | int index = RESERVE_FREE_LIST_INDEX; | 427 | int index = RESERVE_FREE_LIST_INDEX; |
428 | u32 free_sqes; | ||
428 | u32 max_sqes; | 429 | u32 max_sqes; |
429 | u16 xid; | 430 | u16 xid; |
430 | 431 | ||
@@ -445,8 +446,10 @@ struct bnx2fc_cmd *bnx2fc_elstm_alloc(struct bnx2fc_rport *tgt, int type) | |||
445 | * cmgr lock | 446 | * cmgr lock |
446 | */ | 447 | */ |
447 | spin_lock_bh(&cmd_mgr->free_list_lock[index]); | 448 | spin_lock_bh(&cmd_mgr->free_list_lock[index]); |
449 | free_sqes = atomic_read(&tgt->free_sqes); | ||
448 | if ((list_empty(&(cmd_mgr->free_list[index]))) || | 450 | if ((list_empty(&(cmd_mgr->free_list[index]))) || |
449 | (tgt->num_active_ios.counter >= max_sqes)) { | 451 | (tgt->num_active_ios.counter >= max_sqes) || |
452 | (free_sqes + max_sqes <= BNX2FC_SQ_WQES_MAX)) { | ||
450 | BNX2FC_TGT_DBG(tgt, "No free els_tm cmds available " | 453 | BNX2FC_TGT_DBG(tgt, "No free els_tm cmds available " |
451 | "ios(%d):sqes(%d)\n", | 454 | "ios(%d):sqes(%d)\n", |
452 | tgt->num_active_ios.counter, tgt->max_sqes); | 455 | tgt->num_active_ios.counter, tgt->max_sqes); |
@@ -463,6 +466,7 @@ struct bnx2fc_cmd *bnx2fc_elstm_alloc(struct bnx2fc_rport *tgt, int type) | |||
463 | xid = io_req->xid; | 466 | xid = io_req->xid; |
464 | cmd_mgr->cmds[xid] = io_req; | 467 | cmd_mgr->cmds[xid] = io_req; |
465 | atomic_inc(&tgt->num_active_ios); | 468 | atomic_inc(&tgt->num_active_ios); |
469 | atomic_dec(&tgt->free_sqes); | ||
466 | spin_unlock_bh(&cmd_mgr->free_list_lock[index]); | 470 | spin_unlock_bh(&cmd_mgr->free_list_lock[index]); |
467 | 471 | ||
468 | INIT_LIST_HEAD(&io_req->link); | 472 | INIT_LIST_HEAD(&io_req->link); |
@@ -489,6 +493,7 @@ static struct bnx2fc_cmd *bnx2fc_cmd_alloc(struct bnx2fc_rport *tgt) | |||
489 | struct bnx2fc_cmd *io_req; | 493 | struct bnx2fc_cmd *io_req; |
490 | struct list_head *listp; | 494 | struct list_head *listp; |
491 | struct io_bdt *bd_tbl; | 495 | struct io_bdt *bd_tbl; |
496 | u32 free_sqes; | ||
492 | u32 max_sqes; | 497 | u32 max_sqes; |
493 | u16 xid; | 498 | u16 xid; |
494 | int index = get_cpu(); | 499 | int index = get_cpu(); |
@@ -499,8 +504,10 @@ static struct bnx2fc_cmd *bnx2fc_cmd_alloc(struct bnx2fc_rport *tgt) | |||
499 | * cmgr lock | 504 | * cmgr lock |
500 | */ | 505 | */ |
501 | spin_lock_bh(&cmd_mgr->free_list_lock[index]); | 506 | spin_lock_bh(&cmd_mgr->free_list_lock[index]); |
507 | free_sqes = atomic_read(&tgt->free_sqes); | ||
502 | if ((list_empty(&cmd_mgr->free_list[index])) || | 508 | if ((list_empty(&cmd_mgr->free_list[index])) || |
503 | (tgt->num_active_ios.counter >= max_sqes)) { | 509 | (tgt->num_active_ios.counter >= max_sqes) || |
510 | (free_sqes + max_sqes <= BNX2FC_SQ_WQES_MAX)) { | ||
504 | spin_unlock_bh(&cmd_mgr->free_list_lock[index]); | 511 | spin_unlock_bh(&cmd_mgr->free_list_lock[index]); |
505 | put_cpu(); | 512 | put_cpu(); |
506 | return NULL; | 513 | return NULL; |
@@ -513,6 +520,7 @@ static struct bnx2fc_cmd *bnx2fc_cmd_alloc(struct bnx2fc_rport *tgt) | |||
513 | xid = io_req->xid; | 520 | xid = io_req->xid; |
514 | cmd_mgr->cmds[xid] = io_req; | 521 | cmd_mgr->cmds[xid] = io_req; |
515 | atomic_inc(&tgt->num_active_ios); | 522 | atomic_inc(&tgt->num_active_ios); |
523 | atomic_dec(&tgt->free_sqes); | ||
516 | spin_unlock_bh(&cmd_mgr->free_list_lock[index]); | 524 | spin_unlock_bh(&cmd_mgr->free_list_lock[index]); |
517 | put_cpu(); | 525 | put_cpu(); |
518 | 526 | ||
@@ -873,7 +881,7 @@ int bnx2fc_initiate_abts(struct bnx2fc_cmd *io_req) | |||
873 | 881 | ||
874 | /* Obtain oxid and rxid for the original exchange to be aborted */ | 882 | /* Obtain oxid and rxid for the original exchange to be aborted */ |
875 | fc_hdr->fh_ox_id = htons(io_req->xid); | 883 | fc_hdr->fh_ox_id = htons(io_req->xid); |
876 | fc_hdr->fh_rx_id = htons(io_req->task->rx_wr_tx_rd.rx_id); | 884 | fc_hdr->fh_rx_id = htons(io_req->task->rxwr_txrd.var_ctx.rx_id); |
877 | 885 | ||
878 | sid = tgt->sid; | 886 | sid = tgt->sid; |
879 | did = rport->port_id; | 887 | did = rport->port_id; |
@@ -1189,7 +1197,7 @@ void bnx2fc_process_abts_compl(struct bnx2fc_cmd *io_req, | |||
1189 | kref_put(&io_req->refcount, | 1197 | kref_put(&io_req->refcount, |
1190 | bnx2fc_cmd_release); /* drop timer hold */ | 1198 | bnx2fc_cmd_release); /* drop timer hold */ |
1191 | 1199 | ||
1192 | r_ctl = task->cmn.general.rsp_info.abts_rsp.r_ctl; | 1200 | r_ctl = (u8)task->rxwr_only.union_ctx.comp_info.abts_rsp.r_ctl; |
1193 | 1201 | ||
1194 | switch (r_ctl) { | 1202 | switch (r_ctl) { |
1195 | case FC_RCTL_BA_ACC: | 1203 | case FC_RCTL_BA_ACC: |
@@ -1344,12 +1352,13 @@ void bnx2fc_process_tm_compl(struct bnx2fc_cmd *io_req, | |||
1344 | fc_hdr = &(tm_req->resp_fc_hdr); | 1352 | fc_hdr = &(tm_req->resp_fc_hdr); |
1345 | hdr = (u64 *)fc_hdr; | 1353 | hdr = (u64 *)fc_hdr; |
1346 | temp_hdr = (u64 *) | 1354 | temp_hdr = (u64 *) |
1347 | &task->cmn.general.cmd_info.mp_fc_frame.fc_hdr; | 1355 | &task->rxwr_only.union_ctx.comp_info.mp_rsp.fc_hdr; |
1348 | hdr[0] = cpu_to_be64(temp_hdr[0]); | 1356 | hdr[0] = cpu_to_be64(temp_hdr[0]); |
1349 | hdr[1] = cpu_to_be64(temp_hdr[1]); | 1357 | hdr[1] = cpu_to_be64(temp_hdr[1]); |
1350 | hdr[2] = cpu_to_be64(temp_hdr[2]); | 1358 | hdr[2] = cpu_to_be64(temp_hdr[2]); |
1351 | 1359 | ||
1352 | tm_req->resp_len = task->rx_wr_only.sgl_ctx.mul_sges.cur_sge_off; | 1360 | tm_req->resp_len = |
1361 | task->rxwr_only.union_ctx.comp_info.mp_rsp.mp_payload_len; | ||
1353 | 1362 | ||
1354 | rsp_buf = tm_req->resp_buf; | 1363 | rsp_buf = tm_req->resp_buf; |
1355 | 1364 | ||
@@ -1724,7 +1733,7 @@ void bnx2fc_process_scsi_cmd_compl(struct bnx2fc_cmd *io_req, | |||
1724 | 1733 | ||
1725 | /* Fetch fcp_rsp from task context and perform cmd completion */ | 1734 | /* Fetch fcp_rsp from task context and perform cmd completion */ |
1726 | fcp_rsp = (struct fcoe_fcp_rsp_payload *) | 1735 | fcp_rsp = (struct fcoe_fcp_rsp_payload *) |
1727 | &(task->cmn.general.rsp_info.fcp_rsp.payload); | 1736 | &(task->rxwr_only.union_ctx.comp_info.fcp_rsp.payload); |
1728 | 1737 | ||
1729 | /* parse fcp_rsp and obtain sense data from RQ if available */ | 1738 | /* parse fcp_rsp and obtain sense data from RQ if available */ |
1730 | bnx2fc_parse_fcp_rsp(io_req, fcp_rsp, num_rq); | 1739 | bnx2fc_parse_fcp_rsp(io_req, fcp_rsp, num_rq); |
diff --git a/drivers/scsi/bnx2fc/bnx2fc_tgt.c b/drivers/scsi/bnx2fc/bnx2fc_tgt.c index a2e3830bd268..3e892bd66fbe 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_tgt.c +++ b/drivers/scsi/bnx2fc/bnx2fc_tgt.c | |||
@@ -133,6 +133,8 @@ retry_ofld: | |||
133 | /* upload will take care of cleaning up sess resc */ | 133 | /* upload will take care of cleaning up sess resc */ |
134 | lport->tt.rport_logoff(rdata); | 134 | lport->tt.rport_logoff(rdata); |
135 | } | 135 | } |
136 | /* Arm CQ */ | ||
137 | bnx2fc_arm_cq(tgt); | ||
136 | return; | 138 | return; |
137 | 139 | ||
138 | ofld_err: | 140 | ofld_err: |
@@ -315,6 +317,8 @@ static int bnx2fc_init_tgt(struct bnx2fc_rport *tgt, | |||
315 | 317 | ||
316 | struct fc_rport *rport = rdata->rport; | 318 | struct fc_rport *rport = rdata->rport; |
317 | struct bnx2fc_hba *hba = port->priv; | 319 | struct bnx2fc_hba *hba = port->priv; |
320 | struct b577xx_doorbell_set_prod *sq_db = &tgt->sq_db; | ||
321 | struct b577xx_fcoe_rx_doorbell *rx_db = &tgt->rx_db; | ||
318 | 322 | ||
319 | tgt->rport = rport; | 323 | tgt->rport = rport; |
320 | tgt->rdata = rdata; | 324 | tgt->rdata = rdata; |
@@ -335,6 +339,7 @@ static int bnx2fc_init_tgt(struct bnx2fc_rport *tgt, | |||
335 | tgt->max_sqes = BNX2FC_SQ_WQES_MAX; | 339 | tgt->max_sqes = BNX2FC_SQ_WQES_MAX; |
336 | tgt->max_rqes = BNX2FC_RQ_WQES_MAX; | 340 | tgt->max_rqes = BNX2FC_RQ_WQES_MAX; |
337 | tgt->max_cqes = BNX2FC_CQ_WQES_MAX; | 341 | tgt->max_cqes = BNX2FC_CQ_WQES_MAX; |
342 | atomic_set(&tgt->free_sqes, BNX2FC_SQ_WQES_MAX); | ||
338 | 343 | ||
339 | /* Initialize the toggle bit */ | 344 | /* Initialize the toggle bit */ |
340 | tgt->sq_curr_toggle_bit = 1; | 345 | tgt->sq_curr_toggle_bit = 1; |
@@ -345,7 +350,17 @@ static int bnx2fc_init_tgt(struct bnx2fc_rport *tgt, | |||
345 | tgt->rq_cons_idx = 0; | 350 | tgt->rq_cons_idx = 0; |
346 | atomic_set(&tgt->num_active_ios, 0); | 351 | atomic_set(&tgt->num_active_ios, 0); |
347 | 352 | ||
348 | tgt->work_time_slice = 2; | 353 | /* initialize sq doorbell */ |
354 | sq_db->header.header = B577XX_DOORBELL_HDR_DB_TYPE; | ||
355 | sq_db->header.header |= B577XX_FCOE_CONNECTION_TYPE << | ||
356 | B577XX_DOORBELL_HDR_CONN_TYPE_SHIFT; | ||
357 | /* initialize rx doorbell */ | ||
358 | rx_db->hdr.header = ((0x1 << B577XX_DOORBELL_HDR_RX_SHIFT) | | ||
359 | (0x1 << B577XX_DOORBELL_HDR_DB_TYPE_SHIFT) | | ||
360 | (B577XX_FCOE_CONNECTION_TYPE << | ||
361 | B577XX_DOORBELL_HDR_CONN_TYPE_SHIFT)); | ||
362 | rx_db->params = (0x2 << B577XX_FCOE_RX_DOORBELL_NEGATIVE_ARM_SHIFT) | | ||
363 | (0x3 << B577XX_FCOE_RX_DOORBELL_OPCODE_SHIFT); | ||
349 | 364 | ||
350 | spin_lock_init(&tgt->tgt_lock); | 365 | spin_lock_init(&tgt->tgt_lock); |
351 | spin_lock_init(&tgt->cq_lock); | 366 | spin_lock_init(&tgt->cq_lock); |
@@ -758,8 +773,6 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba, | |||
758 | } | 773 | } |
759 | memset(tgt->lcq, 0, tgt->lcq_mem_size); | 774 | memset(tgt->lcq, 0, tgt->lcq_mem_size); |
760 | 775 | ||
761 | /* Arm CQ */ | ||
762 | tgt->conn_db->cq_arm.lo = -1; | ||
763 | tgt->conn_db->rq_prod = 0x8000; | 776 | tgt->conn_db->rq_prod = 0x8000; |
764 | 777 | ||
765 | return 0; | 778 | return 0; |
@@ -787,6 +800,8 @@ static void bnx2fc_free_session_resc(struct bnx2fc_hba *hba, | |||
787 | iounmap(tgt->ctx_base); | 800 | iounmap(tgt->ctx_base); |
788 | tgt->ctx_base = NULL; | 801 | tgt->ctx_base = NULL; |
789 | } | 802 | } |
803 | |||
804 | spin_lock_bh(&tgt->cq_lock); | ||
790 | /* Free LCQ */ | 805 | /* Free LCQ */ |
791 | if (tgt->lcq) { | 806 | if (tgt->lcq) { |
792 | dma_free_coherent(&hba->pcidev->dev, tgt->lcq_mem_size, | 807 | dma_free_coherent(&hba->pcidev->dev, tgt->lcq_mem_size, |
@@ -828,17 +843,16 @@ static void bnx2fc_free_session_resc(struct bnx2fc_hba *hba, | |||
828 | tgt->rq = NULL; | 843 | tgt->rq = NULL; |
829 | } | 844 | } |
830 | /* Free CQ */ | 845 | /* Free CQ */ |
831 | spin_lock_bh(&tgt->cq_lock); | ||
832 | if (tgt->cq) { | 846 | if (tgt->cq) { |
833 | dma_free_coherent(&hba->pcidev->dev, tgt->cq_mem_size, | 847 | dma_free_coherent(&hba->pcidev->dev, tgt->cq_mem_size, |
834 | tgt->cq, tgt->cq_dma); | 848 | tgt->cq, tgt->cq_dma); |
835 | tgt->cq = NULL; | 849 | tgt->cq = NULL; |
836 | } | 850 | } |
837 | spin_unlock_bh(&tgt->cq_lock); | ||
838 | /* Free SQ */ | 851 | /* Free SQ */ |
839 | if (tgt->sq) { | 852 | if (tgt->sq) { |
840 | dma_free_coherent(&hba->pcidev->dev, tgt->sq_mem_size, | 853 | dma_free_coherent(&hba->pcidev->dev, tgt->sq_mem_size, |
841 | tgt->sq, tgt->sq_dma); | 854 | tgt->sq, tgt->sq_dma); |
842 | tgt->sq = NULL; | 855 | tgt->sq = NULL; |
843 | } | 856 | } |
857 | spin_unlock_bh(&tgt->cq_lock); | ||
844 | } | 858 | } |
diff --git a/drivers/scsi/bnx2i/57xx_iscsi_hsi.h b/drivers/scsi/bnx2i/57xx_iscsi_hsi.h index dad6c8a34317..71890a063cd3 100644 --- a/drivers/scsi/bnx2i/57xx_iscsi_hsi.h +++ b/drivers/scsi/bnx2i/57xx_iscsi_hsi.h | |||
@@ -707,8 +707,10 @@ struct iscsi_kwqe_conn_update { | |||
707 | #define ISCSI_KWQE_CONN_UPDATE_INITIAL_R2T_SHIFT 2 | 707 | #define ISCSI_KWQE_CONN_UPDATE_INITIAL_R2T_SHIFT 2 |
708 | #define ISCSI_KWQE_CONN_UPDATE_IMMEDIATE_DATA (0x1<<3) | 708 | #define ISCSI_KWQE_CONN_UPDATE_IMMEDIATE_DATA (0x1<<3) |
709 | #define ISCSI_KWQE_CONN_UPDATE_IMMEDIATE_DATA_SHIFT 3 | 709 | #define ISCSI_KWQE_CONN_UPDATE_IMMEDIATE_DATA_SHIFT 3 |
710 | #define ISCSI_KWQE_CONN_UPDATE_RESERVED1 (0xF<<4) | 710 | #define ISCSI_KWQE_CONN_UPDATE_OOO_SUPPORT_MODE (0x3<<4) |
711 | #define ISCSI_KWQE_CONN_UPDATE_RESERVED1_SHIFT 4 | 711 | #define ISCSI_KWQE_CONN_UPDATE_OOO_SUPPORT_MODE_SHIFT 4 |
712 | #define ISCSI_KWQE_CONN_UPDATE_RESERVED1 (0x3<<6) | ||
713 | #define ISCSI_KWQE_CONN_UPDATE_RESERVED1_SHIFT 6 | ||
712 | #elif defined(__LITTLE_ENDIAN) | 714 | #elif defined(__LITTLE_ENDIAN) |
713 | u8 conn_flags; | 715 | u8 conn_flags; |
714 | #define ISCSI_KWQE_CONN_UPDATE_HEADER_DIGEST (0x1<<0) | 716 | #define ISCSI_KWQE_CONN_UPDATE_HEADER_DIGEST (0x1<<0) |
@@ -719,8 +721,10 @@ struct iscsi_kwqe_conn_update { | |||
719 | #define ISCSI_KWQE_CONN_UPDATE_INITIAL_R2T_SHIFT 2 | 721 | #define ISCSI_KWQE_CONN_UPDATE_INITIAL_R2T_SHIFT 2 |
720 | #define ISCSI_KWQE_CONN_UPDATE_IMMEDIATE_DATA (0x1<<3) | 722 | #define ISCSI_KWQE_CONN_UPDATE_IMMEDIATE_DATA (0x1<<3) |
721 | #define ISCSI_KWQE_CONN_UPDATE_IMMEDIATE_DATA_SHIFT 3 | 723 | #define ISCSI_KWQE_CONN_UPDATE_IMMEDIATE_DATA_SHIFT 3 |
722 | #define ISCSI_KWQE_CONN_UPDATE_RESERVED1 (0xF<<4) | 724 | #define ISCSI_KWQE_CONN_UPDATE_OOO_SUPPORT_MODE (0x3<<4) |
723 | #define ISCSI_KWQE_CONN_UPDATE_RESERVED1_SHIFT 4 | 725 | #define ISCSI_KWQE_CONN_UPDATE_OOO_SUPPORT_MODE_SHIFT 4 |
726 | #define ISCSI_KWQE_CONN_UPDATE_RESERVED1 (0x3<<6) | ||
727 | #define ISCSI_KWQE_CONN_UPDATE_RESERVED1_SHIFT 6 | ||
724 | u8 reserved2; | 728 | u8 reserved2; |
725 | u8 max_outstanding_r2ts; | 729 | u8 max_outstanding_r2ts; |
726 | u8 session_error_recovery_level; | 730 | u8 session_error_recovery_level; |