diff options
author | Vlad Zolotarov <vladz@broadcom.com> | 2011-06-14 07:33:44 -0400 |
---|---|---|
committer | David S. Miller <davem@conan.davemloft.net> | 2011-06-15 10:56:37 -0400 |
commit | 619c5cb6885b936c44ae1422ef805b69c6291485 (patch) | |
tree | 4604ae08f1eb12c6ad1f65106879c2e73946ae12 /drivers/net/bnx2x/bnx2x.h | |
parent | 042181f5aa8833a8918e1a91cfaf292146ffc62c (diff) |
New 7.0 FW: bnx2x, cnic, bnx2i, bnx2fc
New FW/HSI (7.0):
- Added support to 578xx chips
- Improved HSI - much less driver's direct access to the FW internal
memory needed.
New implementation of the HSI handling layer in the bnx2x (bnx2x_sp.c):
- Introduced chip dependent objects that have chip independent interfaces
for configuration of MACs, multicast addresses, Rx mode, indirection table,
fast path queues and function initialization/cleanup.
- Objects functionality is based on the private function pointers, which
allows not only a per-chip but also PF/VF differentiation while still
preserving the same interface towards the driver.
- Objects interface is not influenced by the HSI changes which do not require
providing new parameters keeping the code outside the bnx2x_sp.c invariant
with regard to such HSI chnages.
Changes in a CNIC, bnx2fc and bnx2i modules due to the new HSI.
Signed-off-by: Vladislav Zolotarov <vladz@broadcom.com>
Signed-off-by: Michael Chan <mchan@broadcom.com>
Signed-off-by: Bhanu Prakash Gollapudi <bprakash@broadcom.com>
Signed-off-by: Eilon Greenstein <eilong@broadcom.com>
Signed-off-by: David S. Miller <davem@conan.davemloft.net>
Diffstat (limited to 'drivers/net/bnx2x/bnx2x.h')
-rw-r--r-- | drivers/net/bnx2x/bnx2x.h | 870 |
1 files changed, 489 insertions, 381 deletions
diff --git a/drivers/net/bnx2x/bnx2x.h b/drivers/net/bnx2x/bnx2x.h index 6d4d6d4e53c6..b4b3abe9e7c4 100644 --- a/drivers/net/bnx2x/bnx2x.h +++ b/drivers/net/bnx2x/bnx2x.h | |||
@@ -47,11 +47,12 @@ | |||
47 | #endif | 47 | #endif |
48 | 48 | ||
49 | #include <linux/mdio.h> | 49 | #include <linux/mdio.h> |
50 | #include <linux/pci.h> | 50 | |
51 | #include "bnx2x_reg.h" | 51 | #include "bnx2x_reg.h" |
52 | #include "bnx2x_fw_defs.h" | 52 | #include "bnx2x_fw_defs.h" |
53 | #include "bnx2x_hsi.h" | 53 | #include "bnx2x_hsi.h" |
54 | #include "bnx2x_link.h" | 54 | #include "bnx2x_link.h" |
55 | #include "bnx2x_sp.h" | ||
55 | #include "bnx2x_dcb.h" | 56 | #include "bnx2x_dcb.h" |
56 | #include "bnx2x_stats.h" | 57 | #include "bnx2x_stats.h" |
57 | 58 | ||
@@ -80,6 +81,12 @@ do { \ | |||
80 | ##__args); \ | 81 | ##__args); \ |
81 | } while (0) | 82 | } while (0) |
82 | 83 | ||
84 | #define DP_CONT(__mask, __fmt, __args...) \ | ||
85 | do { \ | ||
86 | if (bp->msg_enable & (__mask)) \ | ||
87 | pr_cont(__fmt, ##__args); \ | ||
88 | } while (0) | ||
89 | |||
83 | /* errors debug print */ | 90 | /* errors debug print */ |
84 | #define BNX2X_DBG_ERR(__fmt, __args...) \ | 91 | #define BNX2X_DBG_ERR(__fmt, __args...) \ |
85 | do { \ | 92 | do { \ |
@@ -111,7 +118,9 @@ do { \ | |||
111 | dev_info(&bp->pdev->dev, __fmt, ##__args); \ | 118 | dev_info(&bp->pdev->dev, __fmt, ##__args); \ |
112 | } while (0) | 119 | } while (0) |
113 | 120 | ||
114 | void bnx2x_panic_dump(struct bnx2x *bp); | 121 | #define BNX2X_MAC_FMT "%pM" |
122 | #define BNX2X_MAC_PRN_LIST(mac) (mac) | ||
123 | |||
115 | 124 | ||
116 | #ifdef BNX2X_STOP_ON_ERROR | 125 | #ifdef BNX2X_STOP_ON_ERROR |
117 | #define bnx2x_panic() do { \ | 126 | #define bnx2x_panic() do { \ |
@@ -233,11 +242,11 @@ void bnx2x_panic_dump(struct bnx2x *bp); | |||
233 | * | 242 | * |
234 | */ | 243 | */ |
235 | /* iSCSI L2 */ | 244 | /* iSCSI L2 */ |
236 | #define BNX2X_ISCSI_ETH_CL_ID 17 | 245 | #define BNX2X_ISCSI_ETH_CL_ID_IDX 1 |
237 | #define BNX2X_ISCSI_ETH_CID 17 | 246 | #define BNX2X_ISCSI_ETH_CID 17 |
238 | 247 | ||
239 | /* FCoE L2 */ | 248 | /* FCoE L2 */ |
240 | #define BNX2X_FCOE_ETH_CL_ID 18 | 249 | #define BNX2X_FCOE_ETH_CL_ID_IDX 2 |
241 | #define BNX2X_FCOE_ETH_CID 18 | 250 | #define BNX2X_FCOE_ETH_CID 18 |
242 | 251 | ||
243 | /** Additional rings budgeting */ | 252 | /** Additional rings budgeting */ |
@@ -283,44 +292,73 @@ union db_prod { | |||
283 | 292 | ||
284 | 293 | ||
285 | /* MC hsi */ | 294 | /* MC hsi */ |
286 | #define BCM_PAGE_SHIFT 12 | 295 | #define BCM_PAGE_SHIFT 12 |
287 | #define BCM_PAGE_SIZE (1 << BCM_PAGE_SHIFT) | 296 | #define BCM_PAGE_SIZE (1 << BCM_PAGE_SHIFT) |
288 | #define BCM_PAGE_MASK (~(BCM_PAGE_SIZE - 1)) | 297 | #define BCM_PAGE_MASK (~(BCM_PAGE_SIZE - 1)) |
289 | #define BCM_PAGE_ALIGN(addr) (((addr) + BCM_PAGE_SIZE - 1) & BCM_PAGE_MASK) | 298 | #define BCM_PAGE_ALIGN(addr) (((addr) + BCM_PAGE_SIZE - 1) & BCM_PAGE_MASK) |
290 | 299 | ||
291 | #define PAGES_PER_SGE_SHIFT 0 | 300 | #define PAGES_PER_SGE_SHIFT 0 |
292 | #define PAGES_PER_SGE (1 << PAGES_PER_SGE_SHIFT) | 301 | #define PAGES_PER_SGE (1 << PAGES_PER_SGE_SHIFT) |
293 | #define SGE_PAGE_SIZE PAGE_SIZE | 302 | #define SGE_PAGE_SIZE PAGE_SIZE |
294 | #define SGE_PAGE_SHIFT PAGE_SHIFT | 303 | #define SGE_PAGE_SHIFT PAGE_SHIFT |
295 | #define SGE_PAGE_ALIGN(addr) PAGE_ALIGN((typeof(PAGE_SIZE))(addr)) | 304 | #define SGE_PAGE_ALIGN(addr) PAGE_ALIGN((typeof(PAGE_SIZE))(addr)) |
296 | 305 | ||
297 | /* SGE ring related macros */ | 306 | /* SGE ring related macros */ |
298 | #define NUM_RX_SGE_PAGES 2 | 307 | #define NUM_RX_SGE_PAGES 2 |
299 | #define RX_SGE_CNT (BCM_PAGE_SIZE / sizeof(struct eth_rx_sge)) | 308 | #define RX_SGE_CNT (BCM_PAGE_SIZE / sizeof(struct eth_rx_sge)) |
300 | #define MAX_RX_SGE_CNT (RX_SGE_CNT - 2) | 309 | #define MAX_RX_SGE_CNT (RX_SGE_CNT - 2) |
301 | /* RX_SGE_CNT is promised to be a power of 2 */ | 310 | /* RX_SGE_CNT is promised to be a power of 2 */ |
302 | #define RX_SGE_MASK (RX_SGE_CNT - 1) | 311 | #define RX_SGE_MASK (RX_SGE_CNT - 1) |
303 | #define NUM_RX_SGE (RX_SGE_CNT * NUM_RX_SGE_PAGES) | 312 | #define NUM_RX_SGE (RX_SGE_CNT * NUM_RX_SGE_PAGES) |
304 | #define MAX_RX_SGE (NUM_RX_SGE - 1) | 313 | #define MAX_RX_SGE (NUM_RX_SGE - 1) |
305 | #define NEXT_SGE_IDX(x) ((((x) & RX_SGE_MASK) == \ | 314 | #define NEXT_SGE_IDX(x) ((((x) & RX_SGE_MASK) == \ |
306 | (MAX_RX_SGE_CNT - 1)) ? (x) + 3 : (x) + 1) | 315 | (MAX_RX_SGE_CNT - 1)) ? (x) + 3 : (x) + 1) |
307 | #define RX_SGE(x) ((x) & MAX_RX_SGE) | 316 | #define RX_SGE(x) ((x) & MAX_RX_SGE) |
317 | |||
318 | /* Manipulate a bit vector defined as an array of u64 */ | ||
308 | 319 | ||
309 | /* SGE producer mask related macros */ | ||
310 | /* Number of bits in one sge_mask array element */ | 320 | /* Number of bits in one sge_mask array element */ |
311 | #define RX_SGE_MASK_ELEM_SZ 64 | 321 | #define BIT_VEC64_ELEM_SZ 64 |
312 | #define RX_SGE_MASK_ELEM_SHIFT 6 | 322 | #define BIT_VEC64_ELEM_SHIFT 6 |
313 | #define RX_SGE_MASK_ELEM_MASK ((u64)RX_SGE_MASK_ELEM_SZ - 1) | 323 | #define BIT_VEC64_ELEM_MASK ((u64)BIT_VEC64_ELEM_SZ - 1) |
324 | |||
325 | |||
326 | #define __BIT_VEC64_SET_BIT(el, bit) \ | ||
327 | do { \ | ||
328 | el = ((el) | ((u64)0x1 << (bit))); \ | ||
329 | } while (0) | ||
330 | |||
331 | #define __BIT_VEC64_CLEAR_BIT(el, bit) \ | ||
332 | do { \ | ||
333 | el = ((el) & (~((u64)0x1 << (bit)))); \ | ||
334 | } while (0) | ||
335 | |||
336 | |||
337 | #define BIT_VEC64_SET_BIT(vec64, idx) \ | ||
338 | __BIT_VEC64_SET_BIT((vec64)[(idx) >> BIT_VEC64_ELEM_SHIFT], \ | ||
339 | (idx) & BIT_VEC64_ELEM_MASK) | ||
340 | |||
341 | #define BIT_VEC64_CLEAR_BIT(vec64, idx) \ | ||
342 | __BIT_VEC64_CLEAR_BIT((vec64)[(idx) >> BIT_VEC64_ELEM_SHIFT], \ | ||
343 | (idx) & BIT_VEC64_ELEM_MASK) | ||
344 | |||
345 | #define BIT_VEC64_TEST_BIT(vec64, idx) \ | ||
346 | (((vec64)[(idx) >> BIT_VEC64_ELEM_SHIFT] >> \ | ||
347 | ((idx) & BIT_VEC64_ELEM_MASK)) & 0x1) | ||
314 | 348 | ||
315 | /* Creates a bitmask of all ones in less significant bits. | 349 | /* Creates a bitmask of all ones in less significant bits. |
316 | idx - index of the most significant bit in the created mask */ | 350 | idx - index of the most significant bit in the created mask */ |
317 | #define RX_SGE_ONES_MASK(idx) \ | 351 | #define BIT_VEC64_ONES_MASK(idx) \ |
318 | (((u64)0x1 << (((idx) & RX_SGE_MASK_ELEM_MASK) + 1)) - 1) | 352 | (((u64)0x1 << (((idx) & BIT_VEC64_ELEM_MASK) + 1)) - 1) |
319 | #define RX_SGE_MASK_ELEM_ONE_MASK ((u64)(~0)) | 353 | #define BIT_VEC64_ELEM_ONE_MASK ((u64)(~0)) |
354 | |||
355 | /*******************************************************/ | ||
356 | |||
357 | |||
320 | 358 | ||
321 | /* Number of u64 elements in SGE mask array */ | 359 | /* Number of u64 elements in SGE mask array */ |
322 | #define RX_SGE_MASK_LEN ((NUM_RX_SGE_PAGES * RX_SGE_CNT) / \ | 360 | #define RX_SGE_MASK_LEN ((NUM_RX_SGE_PAGES * RX_SGE_CNT) / \ |
323 | RX_SGE_MASK_ELEM_SZ) | 361 | BIT_VEC64_ELEM_SZ) |
324 | #define RX_SGE_MASK_LEN_MASK (RX_SGE_MASK_LEN - 1) | 362 | #define RX_SGE_MASK_LEN_MASK (RX_SGE_MASK_LEN - 1) |
325 | #define NEXT_SGE_MASK_ELEM(el) (((el) + 1) & RX_SGE_MASK_LEN_MASK) | 363 | #define NEXT_SGE_MASK_ELEM(el) (((el) + 1) & RX_SGE_MASK_LEN_MASK) |
326 | 364 | ||
@@ -331,7 +369,30 @@ union host_hc_status_block { | |||
331 | struct host_hc_status_block_e2 *e2_sb; | 369 | struct host_hc_status_block_e2 *e2_sb; |
332 | }; | 370 | }; |
333 | 371 | ||
372 | struct bnx2x_agg_info { | ||
373 | /* | ||
374 | * First aggregation buffer is an skb, the following - are pages. | ||
375 | * We will preallocate the skbs for each aggregation when | ||
376 | * we open the interface and will replace the BD at the consumer | ||
377 | * with this one when we receive the TPA_START CQE in order to | ||
378 | * keep the Rx BD ring consistent. | ||
379 | */ | ||
380 | struct sw_rx_bd first_buf; | ||
381 | u8 tpa_state; | ||
382 | #define BNX2X_TPA_START 1 | ||
383 | #define BNX2X_TPA_STOP 2 | ||
384 | #define BNX2X_TPA_ERROR 3 | ||
385 | u8 placement_offset; | ||
386 | u16 parsing_flags; | ||
387 | u16 vlan_tag; | ||
388 | u16 len_on_bd; | ||
389 | }; | ||
390 | |||
391 | #define Q_STATS_OFFSET32(stat_name) \ | ||
392 | (offsetof(struct bnx2x_eth_q_stats, stat_name) / 4) | ||
393 | |||
334 | struct bnx2x_fastpath { | 394 | struct bnx2x_fastpath { |
395 | struct bnx2x *bp; /* parent */ | ||
335 | 396 | ||
336 | #define BNX2X_NAPI_WEIGHT 128 | 397 | #define BNX2X_NAPI_WEIGHT 128 |
337 | struct napi_struct napi; | 398 | struct napi_struct napi; |
@@ -366,23 +427,13 @@ struct bnx2x_fastpath { | |||
366 | 427 | ||
367 | u64 sge_mask[RX_SGE_MASK_LEN]; | 428 | u64 sge_mask[RX_SGE_MASK_LEN]; |
368 | 429 | ||
369 | int state; | 430 | u32 cid; |
370 | #define BNX2X_FP_STATE_CLOSED 0 | ||
371 | #define BNX2X_FP_STATE_IRQ 0x80000 | ||
372 | #define BNX2X_FP_STATE_OPENING 0x90000 | ||
373 | #define BNX2X_FP_STATE_OPEN 0xa0000 | ||
374 | #define BNX2X_FP_STATE_HALTING 0xb0000 | ||
375 | #define BNX2X_FP_STATE_HALTED 0xc0000 | ||
376 | #define BNX2X_FP_STATE_TERMINATING 0xd0000 | ||
377 | #define BNX2X_FP_STATE_TERMINATED 0xe0000 | ||
378 | 431 | ||
379 | u8 index; /* number in fp array */ | 432 | u8 index; /* number in fp array */ |
380 | u8 cl_id; /* eth client id */ | 433 | u8 cl_id; /* eth client id */ |
381 | u8 cl_qzone_id; | 434 | u8 cl_qzone_id; |
382 | u8 fw_sb_id; /* status block number in FW */ | 435 | u8 fw_sb_id; /* status block number in FW */ |
383 | u8 igu_sb_id; /* status block number in HW */ | 436 | u8 igu_sb_id; /* status block number in HW */ |
384 | u32 cid; | ||
385 | |||
386 | union db_prod tx_db; | 437 | union db_prod tx_db; |
387 | 438 | ||
388 | u16 tx_pkt_prod; | 439 | u16 tx_pkt_prod; |
@@ -401,24 +452,20 @@ struct bnx2x_fastpath { | |||
401 | /* The last maximal completed SGE */ | 452 | /* The last maximal completed SGE */ |
402 | u16 last_max_sge; | 453 | u16 last_max_sge; |
403 | __le16 *rx_cons_sb; | 454 | __le16 *rx_cons_sb; |
404 | |||
405 | unsigned long tx_pkt, | 455 | unsigned long tx_pkt, |
406 | rx_pkt, | 456 | rx_pkt, |
407 | rx_calls; | 457 | rx_calls; |
408 | 458 | ||
409 | /* TPA related */ | 459 | /* TPA related */ |
410 | struct sw_rx_bd tpa_pool[ETH_MAX_AGGREGATION_QUEUES_E1H]; | 460 | struct bnx2x_agg_info tpa_info[ETH_MAX_AGGREGATION_QUEUES_E1H_E2]; |
411 | u8 tpa_state[ETH_MAX_AGGREGATION_QUEUES_E1H]; | ||
412 | #define BNX2X_TPA_START 1 | ||
413 | #define BNX2X_TPA_STOP 2 | ||
414 | u8 disable_tpa; | 461 | u8 disable_tpa; |
415 | #ifdef BNX2X_STOP_ON_ERROR | 462 | #ifdef BNX2X_STOP_ON_ERROR |
416 | u64 tpa_queue_used; | 463 | u64 tpa_queue_used; |
417 | #endif | 464 | #endif |
418 | 465 | ||
419 | struct tstorm_per_client_stats old_tclient; | 466 | struct tstorm_per_queue_stats old_tclient; |
420 | struct ustorm_per_client_stats old_uclient; | 467 | struct ustorm_per_queue_stats old_uclient; |
421 | struct xstorm_per_client_stats old_xclient; | 468 | struct xstorm_per_queue_stats old_xclient; |
422 | struct bnx2x_eth_q_stats eth_q_stats; | 469 | struct bnx2x_eth_q_stats eth_q_stats; |
423 | 470 | ||
424 | /* The size is calculated using the following: | 471 | /* The size is calculated using the following: |
@@ -427,7 +474,13 @@ struct bnx2x_fastpath { | |||
427 | 4 (for the digits and to make it DWORD aligned) */ | 474 | 4 (for the digits and to make it DWORD aligned) */ |
428 | #define FP_NAME_SIZE (sizeof(((struct net_device *)0)->name) + 8) | 475 | #define FP_NAME_SIZE (sizeof(((struct net_device *)0)->name) + 8) |
429 | char name[FP_NAME_SIZE]; | 476 | char name[FP_NAME_SIZE]; |
430 | struct bnx2x *bp; /* parent */ | 477 | |
478 | /* MACs object */ | ||
479 | struct bnx2x_vlan_mac_obj mac_obj; | ||
480 | |||
481 | /* Queue State object */ | ||
482 | struct bnx2x_queue_sp_obj q_obj; | ||
483 | |||
431 | }; | 484 | }; |
432 | 485 | ||
433 | #define bnx2x_fp(bp, nr, var) (bp->fp[nr].var) | 486 | #define bnx2x_fp(bp, nr, var) (bp->fp[nr].var) |
@@ -435,11 +488,13 @@ struct bnx2x_fastpath { | |||
435 | /* Use 2500 as a mini-jumbo MTU for FCoE */ | 488 | /* Use 2500 as a mini-jumbo MTU for FCoE */ |
436 | #define BNX2X_FCOE_MINI_JUMBO_MTU 2500 | 489 | #define BNX2X_FCOE_MINI_JUMBO_MTU 2500 |
437 | 490 | ||
438 | #ifdef BCM_CNIC | 491 | /* FCoE L2 `fastpath' entry is right after the eth entries */ |
439 | /* FCoE L2 `fastpath' is right after the eth entries */ | ||
440 | #define FCOE_IDX BNX2X_NUM_ETH_QUEUES(bp) | 492 | #define FCOE_IDX BNX2X_NUM_ETH_QUEUES(bp) |
441 | #define bnx2x_fcoe_fp(bp) (&bp->fp[FCOE_IDX]) | 493 | #define bnx2x_fcoe_fp(bp) (&bp->fp[FCOE_IDX]) |
442 | #define bnx2x_fcoe(bp, var) (bnx2x_fcoe_fp(bp)->var) | 494 | #define bnx2x_fcoe(bp, var) (bnx2x_fcoe_fp(bp)->var) |
495 | |||
496 | |||
497 | #ifdef BCM_CNIC | ||
443 | #define IS_FCOE_FP(fp) (fp->index == FCOE_IDX) | 498 | #define IS_FCOE_FP(fp) (fp->index == FCOE_IDX) |
444 | #define IS_FCOE_IDX(idx) ((idx) == FCOE_IDX) | 499 | #define IS_FCOE_IDX(idx) ((idx) == FCOE_IDX) |
445 | #else | 500 | #else |
@@ -449,77 +504,68 @@ struct bnx2x_fastpath { | |||
449 | 504 | ||
450 | 505 | ||
451 | /* MC hsi */ | 506 | /* MC hsi */ |
452 | #define MAX_FETCH_BD 13 /* HW max BDs per packet */ | 507 | #define MAX_FETCH_BD 13 /* HW max BDs per packet */ |
453 | #define RX_COPY_THRESH 92 | 508 | #define RX_COPY_THRESH 92 |
454 | 509 | ||
455 | #define NUM_TX_RINGS 16 | 510 | #define NUM_TX_RINGS 16 |
456 | #define TX_DESC_CNT (BCM_PAGE_SIZE / sizeof(union eth_tx_bd_types)) | 511 | #define TX_DESC_CNT (BCM_PAGE_SIZE / sizeof(union eth_tx_bd_types)) |
457 | #define MAX_TX_DESC_CNT (TX_DESC_CNT - 1) | 512 | #define MAX_TX_DESC_CNT (TX_DESC_CNT - 1) |
458 | #define NUM_TX_BD (TX_DESC_CNT * NUM_TX_RINGS) | 513 | #define NUM_TX_BD (TX_DESC_CNT * NUM_TX_RINGS) |
459 | #define MAX_TX_BD (NUM_TX_BD - 1) | 514 | #define MAX_TX_BD (NUM_TX_BD - 1) |
460 | #define MAX_TX_AVAIL (MAX_TX_DESC_CNT * NUM_TX_RINGS - 2) | 515 | #define MAX_TX_AVAIL (MAX_TX_DESC_CNT * NUM_TX_RINGS - 2) |
461 | #define INIT_JUMBO_TX_RING_SIZE MAX_TX_AVAIL | ||
462 | #define INIT_TX_RING_SIZE MAX_TX_AVAIL | ||
463 | #define NEXT_TX_IDX(x) ((((x) & MAX_TX_DESC_CNT) == \ | 516 | #define NEXT_TX_IDX(x) ((((x) & MAX_TX_DESC_CNT) == \ |
464 | (MAX_TX_DESC_CNT - 1)) ? (x) + 2 : (x) + 1) | 517 | (MAX_TX_DESC_CNT - 1)) ? (x) + 2 : (x) + 1) |
465 | #define TX_BD(x) ((x) & MAX_TX_BD) | 518 | #define TX_BD(x) ((x) & MAX_TX_BD) |
466 | #define TX_BD_POFF(x) ((x) & MAX_TX_DESC_CNT) | 519 | #define TX_BD_POFF(x) ((x) & MAX_TX_DESC_CNT) |
467 | 520 | ||
468 | /* The RX BD ring is special, each bd is 8 bytes but the last one is 16 */ | 521 | /* The RX BD ring is special, each bd is 8 bytes but the last one is 16 */ |
469 | #define NUM_RX_RINGS 8 | 522 | #define NUM_RX_RINGS 8 |
470 | #define RX_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct eth_rx_bd)) | 523 | #define RX_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct eth_rx_bd)) |
471 | #define MAX_RX_DESC_CNT (RX_DESC_CNT - 2) | 524 | #define MAX_RX_DESC_CNT (RX_DESC_CNT - 2) |
472 | #define RX_DESC_MASK (RX_DESC_CNT - 1) | 525 | #define RX_DESC_MASK (RX_DESC_CNT - 1) |
473 | #define NUM_RX_BD (RX_DESC_CNT * NUM_RX_RINGS) | 526 | #define NUM_RX_BD (RX_DESC_CNT * NUM_RX_RINGS) |
474 | #define MAX_RX_BD (NUM_RX_BD - 1) | 527 | #define MAX_RX_BD (NUM_RX_BD - 1) |
475 | #define MAX_RX_AVAIL (MAX_RX_DESC_CNT * NUM_RX_RINGS - 2) | 528 | #define MAX_RX_AVAIL (MAX_RX_DESC_CNT * NUM_RX_RINGS - 2) |
476 | #define MIN_RX_SIZE_TPA 72 | 529 | #define MIN_RX_AVAIL 128 |
477 | #define MIN_RX_SIZE_NONTPA 10 | 530 | |
478 | #define INIT_JUMBO_RX_RING_SIZE MAX_RX_AVAIL | 531 | #define MIN_RX_SIZE_TPA_HW (CHIP_IS_E1(bp) ? \ |
479 | #define INIT_RX_RING_SIZE MAX_RX_AVAIL | 532 | ETH_MIN_RX_CQES_WITH_TPA_E1 : \ |
533 | ETH_MIN_RX_CQES_WITH_TPA_E1H_E2) | ||
534 | #define MIN_RX_SIZE_NONTPA_HW ETH_MIN_RX_CQES_WITHOUT_TPA | ||
535 | #define MIN_RX_SIZE_TPA (max_t(u32, MIN_RX_SIZE_TPA_HW, MIN_RX_AVAIL)) | ||
536 | #define MIN_RX_SIZE_NONTPA (max_t(u32, MIN_RX_SIZE_NONTPA_HW,\ | ||
537 | MIN_RX_AVAIL)) | ||
538 | |||
480 | #define NEXT_RX_IDX(x) ((((x) & RX_DESC_MASK) == \ | 539 | #define NEXT_RX_IDX(x) ((((x) & RX_DESC_MASK) == \ |
481 | (MAX_RX_DESC_CNT - 1)) ? (x) + 3 : (x) + 1) | 540 | (MAX_RX_DESC_CNT - 1)) ? (x) + 3 : (x) + 1) |
482 | #define RX_BD(x) ((x) & MAX_RX_BD) | 541 | #define RX_BD(x) ((x) & MAX_RX_BD) |
483 | 542 | ||
484 | /* As long as CQE is 4 times bigger than BD entry we have to allocate | 543 | /* |
485 | 4 times more pages for CQ ring in order to keep it balanced with | 544 | * As long as CQE is X times bigger than BD entry we have to allocate X times |
486 | BD ring */ | 545 | * more pages for CQ ring in order to keep it balanced with BD ring |
487 | #define NUM_RCQ_RINGS (NUM_RX_RINGS * 4) | 546 | */ |
547 | #define CQE_BD_REL (sizeof(union eth_rx_cqe) / sizeof(struct eth_rx_bd)) | ||
548 | #define NUM_RCQ_RINGS (NUM_RX_RINGS * CQE_BD_REL) | ||
488 | #define RCQ_DESC_CNT (BCM_PAGE_SIZE / sizeof(union eth_rx_cqe)) | 549 | #define RCQ_DESC_CNT (BCM_PAGE_SIZE / sizeof(union eth_rx_cqe)) |
489 | #define MAX_RCQ_DESC_CNT (RCQ_DESC_CNT - 1) | 550 | #define MAX_RCQ_DESC_CNT (RCQ_DESC_CNT - 1) |
490 | #define NUM_RCQ_BD (RCQ_DESC_CNT * NUM_RCQ_RINGS) | 551 | #define NUM_RCQ_BD (RCQ_DESC_CNT * NUM_RCQ_RINGS) |
491 | #define MAX_RCQ_BD (NUM_RCQ_BD - 1) | 552 | #define MAX_RCQ_BD (NUM_RCQ_BD - 1) |
492 | #define MAX_RCQ_AVAIL (MAX_RCQ_DESC_CNT * NUM_RCQ_RINGS - 2) | 553 | #define MAX_RCQ_AVAIL (MAX_RCQ_DESC_CNT * NUM_RCQ_RINGS - 2) |
493 | #define NEXT_RCQ_IDX(x) ((((x) & MAX_RCQ_DESC_CNT) == \ | 554 | #define NEXT_RCQ_IDX(x) ((((x) & MAX_RCQ_DESC_CNT) == \ |
494 | (MAX_RCQ_DESC_CNT - 1)) ? (x) + 2 : (x) + 1) | 555 | (MAX_RCQ_DESC_CNT - 1)) ? (x) + 2 : (x) + 1) |
495 | #define RCQ_BD(x) ((x) & MAX_RCQ_BD) | 556 | #define RCQ_BD(x) ((x) & MAX_RCQ_BD) |
496 | 557 | ||
497 | 558 | ||
498 | /* This is needed for determining of last_max */ | 559 | /* This is needed for determining of last_max */ |
499 | #define SUB_S16(a, b) (s16)((s16)(a) - (s16)(b)) | 560 | #define SUB_S16(a, b) (s16)((s16)(a) - (s16)(b)) |
500 | 561 | #define SUB_S32(a, b) (s32)((s32)(a) - (s32)(b)) | |
501 | #define __SGE_MASK_SET_BIT(el, bit) \ | ||
502 | do { \ | ||
503 | el = ((el) | ((u64)0x1 << (bit))); \ | ||
504 | } while (0) | ||
505 | |||
506 | #define __SGE_MASK_CLEAR_BIT(el, bit) \ | ||
507 | do { \ | ||
508 | el = ((el) & (~((u64)0x1 << (bit)))); \ | ||
509 | } while (0) | ||
510 | |||
511 | #define SGE_MASK_SET_BIT(fp, idx) \ | ||
512 | __SGE_MASK_SET_BIT(fp->sge_mask[(idx) >> RX_SGE_MASK_ELEM_SHIFT], \ | ||
513 | ((idx) & RX_SGE_MASK_ELEM_MASK)) | ||
514 | 562 | ||
515 | #define SGE_MASK_CLEAR_BIT(fp, idx) \ | ||
516 | __SGE_MASK_CLEAR_BIT(fp->sge_mask[(idx) >> RX_SGE_MASK_ELEM_SHIFT], \ | ||
517 | ((idx) & RX_SGE_MASK_ELEM_MASK)) | ||
518 | 563 | ||
564 | #define BNX2X_SWCID_SHIFT 17 | ||
565 | #define BNX2X_SWCID_MASK ((0x1 << BNX2X_SWCID_SHIFT) - 1) | ||
519 | 566 | ||
520 | /* used on a CID received from the HW */ | 567 | /* used on a CID received from the HW */ |
521 | #define SW_CID(x) (le32_to_cpu(x) & \ | 568 | #define SW_CID(x) (le32_to_cpu(x) & BNX2X_SWCID_MASK) |
522 | (COMMON_RAMROD_ETH_RX_CQE_CID >> 7)) | ||
523 | #define CQE_CMD(x) (le32_to_cpu(x) >> \ | 569 | #define CQE_CMD(x) (le32_to_cpu(x) >> \ |
524 | COMMON_RAMROD_ETH_RX_CQE_CMD_ID_SHIFT) | 570 | COMMON_RAMROD_ETH_RX_CQE_CMD_ID_SHIFT) |
525 | 571 | ||
@@ -529,6 +575,9 @@ struct bnx2x_fastpath { | |||
529 | 575 | ||
530 | #define BNX2X_DB_MIN_SHIFT 3 /* 8 bytes */ | 576 | #define BNX2X_DB_MIN_SHIFT 3 /* 8 bytes */ |
531 | #define BNX2X_DB_SHIFT 7 /* 128 bytes*/ | 577 | #define BNX2X_DB_SHIFT 7 /* 128 bytes*/ |
578 | #if (BNX2X_DB_SHIFT < BNX2X_DB_MIN_SHIFT) | ||
579 | #error "Min DB doorbell stride is 8" | ||
580 | #endif | ||
532 | #define DPM_TRIGER_TYPE 0x40 | 581 | #define DPM_TRIGER_TYPE 0x40 |
533 | #define DOORBELL(bp, cid, val) \ | 582 | #define DOORBELL(bp, cid, val) \ |
534 | do { \ | 583 | do { \ |
@@ -557,13 +606,11 @@ struct bnx2x_fastpath { | |||
557 | 606 | ||
558 | 607 | ||
559 | /* stuff added to make the code fit 80Col */ | 608 | /* stuff added to make the code fit 80Col */ |
560 | 609 | #define CQE_TYPE(cqe_fp_flags) ((cqe_fp_flags) & ETH_FAST_PATH_RX_CQE_TYPE) | |
561 | #define CQE_TYPE(cqe_fp_flags) ((cqe_fp_flags) & ETH_FAST_PATH_RX_CQE_TYPE) | 610 | #define CQE_TYPE_START(cqe_type) ((cqe_type) == RX_ETH_CQE_TYPE_ETH_START_AGG) |
562 | 611 | #define CQE_TYPE_STOP(cqe_type) ((cqe_type) == RX_ETH_CQE_TYPE_ETH_STOP_AGG) | |
563 | #define TPA_TYPE_START ETH_FAST_PATH_RX_CQE_START_FLG | 612 | #define CQE_TYPE_SLOW(cqe_type) ((cqe_type) == RX_ETH_CQE_TYPE_ETH_RAMROD) |
564 | #define TPA_TYPE_END ETH_FAST_PATH_RX_CQE_END_FLG | 613 | #define CQE_TYPE_FAST(cqe_type) ((cqe_type) == RX_ETH_CQE_TYPE_ETH_FASTPATH) |
565 | #define TPA_TYPE(cqe_fp_flags) ((cqe_fp_flags) & \ | ||
566 | (TPA_TYPE_START | TPA_TYPE_END)) | ||
567 | 614 | ||
568 | #define ETH_RX_ERROR_FALGS ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG | 615 | #define ETH_RX_ERROR_FALGS ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG |
569 | 616 | ||
@@ -590,12 +637,30 @@ struct bnx2x_fastpath { | |||
590 | #define BNX2X_RX_SUM_FIX(cqe) \ | 637 | #define BNX2X_RX_SUM_FIX(cqe) \ |
591 | BNX2X_PRS_FLAG_OVERETH_IPV4(cqe->fast_path_cqe.pars_flags.flags) | 638 | BNX2X_PRS_FLAG_OVERETH_IPV4(cqe->fast_path_cqe.pars_flags.flags) |
592 | 639 | ||
593 | #define U_SB_ETH_RX_CQ_INDEX 1 | 640 | |
594 | #define U_SB_ETH_RX_BD_INDEX 2 | 641 | #define FP_USB_FUNC_OFF \ |
595 | #define C_SB_ETH_TX_CQ_INDEX 5 | 642 | offsetof(struct cstorm_status_block_u, func) |
643 | #define FP_CSB_FUNC_OFF \ | ||
644 | offsetof(struct cstorm_status_block_c, func) | ||
645 | |||
646 | #define HC_INDEX_TOE_RX_CQ_CONS 0 /* Formerly Ustorm TOE CQ index */ | ||
647 | /* (HC_INDEX_U_TOE_RX_CQ_CONS) */ | ||
648 | #define HC_INDEX_ETH_RX_CQ_CONS 1 /* Formerly Ustorm ETH CQ index */ | ||
649 | /* (HC_INDEX_U_ETH_RX_CQ_CONS) */ | ||
650 | #define HC_INDEX_ETH_RX_BD_CONS 2 /* Formerly Ustorm ETH BD index */ | ||
651 | /* (HC_INDEX_U_ETH_RX_BD_CONS) */ | ||
652 | |||
653 | #define HC_INDEX_TOE_TX_CQ_CONS 4 /* Formerly Cstorm TOE CQ index */ | ||
654 | /* (HC_INDEX_C_TOE_TX_CQ_CONS) */ | ||
655 | #define HC_INDEX_ETH_TX_CQ_CONS 5 /* Formerly Cstorm ETH CQ index */ | ||
656 | /* (HC_INDEX_C_ETH_TX_CQ_CONS) */ | ||
657 | |||
658 | #define U_SB_ETH_RX_CQ_INDEX HC_INDEX_ETH_RX_CQ_CONS | ||
659 | #define U_SB_ETH_RX_BD_INDEX HC_INDEX_ETH_RX_BD_CONS | ||
660 | #define C_SB_ETH_TX_CQ_INDEX HC_INDEX_ETH_TX_CQ_CONS | ||
596 | 661 | ||
597 | #define BNX2X_RX_SB_INDEX \ | 662 | #define BNX2X_RX_SB_INDEX \ |
598 | (&fp->sb_index_values[U_SB_ETH_RX_CQ_INDEX]) | 663 | (&fp->sb_index_values[HC_INDEX_ETH_RX_CQ_CONS]) |
599 | 664 | ||
600 | #define BNX2X_TX_SB_INDEX \ | 665 | #define BNX2X_TX_SB_INDEX \ |
601 | (&fp->sb_index_values[C_SB_ETH_TX_CQ_INDEX]) | 666 | (&fp->sb_index_values[C_SB_ETH_TX_CQ_INDEX]) |
@@ -615,29 +680,53 @@ struct bnx2x_common { | |||
615 | #define CHIP_NUM_57711 0x164f | 680 | #define CHIP_NUM_57711 0x164f |
616 | #define CHIP_NUM_57711E 0x1650 | 681 | #define CHIP_NUM_57711E 0x1650 |
617 | #define CHIP_NUM_57712 0x1662 | 682 | #define CHIP_NUM_57712 0x1662 |
618 | #define CHIP_NUM_57712E 0x1663 | 683 | #define CHIP_NUM_57712_MF 0x1663 |
684 | #define CHIP_NUM_57713 0x1651 | ||
685 | #define CHIP_NUM_57713E 0x1652 | ||
686 | #define CHIP_NUM_57800 0x168a | ||
687 | #define CHIP_NUM_57800_MF 0x16a5 | ||
688 | #define CHIP_NUM_57810 0x168e | ||
689 | #define CHIP_NUM_57810_MF 0x16ae | ||
690 | #define CHIP_NUM_57840 0x168d | ||
691 | #define CHIP_NUM_57840_MF 0x16ab | ||
619 | #define CHIP_IS_E1(bp) (CHIP_NUM(bp) == CHIP_NUM_57710) | 692 | #define CHIP_IS_E1(bp) (CHIP_NUM(bp) == CHIP_NUM_57710) |
620 | #define CHIP_IS_57711(bp) (CHIP_NUM(bp) == CHIP_NUM_57711) | 693 | #define CHIP_IS_57711(bp) (CHIP_NUM(bp) == CHIP_NUM_57711) |
621 | #define CHIP_IS_57711E(bp) (CHIP_NUM(bp) == CHIP_NUM_57711E) | 694 | #define CHIP_IS_57711E(bp) (CHIP_NUM(bp) == CHIP_NUM_57711E) |
622 | #define CHIP_IS_57712(bp) (CHIP_NUM(bp) == CHIP_NUM_57712) | 695 | #define CHIP_IS_57712(bp) (CHIP_NUM(bp) == CHIP_NUM_57712) |
623 | #define CHIP_IS_57712E(bp) (CHIP_NUM(bp) == CHIP_NUM_57712E) | 696 | #define CHIP_IS_57712_MF(bp) (CHIP_NUM(bp) == CHIP_NUM_57712_MF) |
697 | #define CHIP_IS_57800(bp) (CHIP_NUM(bp) == CHIP_NUM_57800) | ||
698 | #define CHIP_IS_57800_MF(bp) (CHIP_NUM(bp) == CHIP_NUM_57800_MF) | ||
699 | #define CHIP_IS_57810(bp) (CHIP_NUM(bp) == CHIP_NUM_57810) | ||
700 | #define CHIP_IS_57810_MF(bp) (CHIP_NUM(bp) == CHIP_NUM_57810_MF) | ||
701 | #define CHIP_IS_57840(bp) (CHIP_NUM(bp) == CHIP_NUM_57840) | ||
702 | #define CHIP_IS_57840_MF(bp) (CHIP_NUM(bp) == CHIP_NUM_57840_MF) | ||
624 | #define CHIP_IS_E1H(bp) (CHIP_IS_57711(bp) || \ | 703 | #define CHIP_IS_E1H(bp) (CHIP_IS_57711(bp) || \ |
625 | CHIP_IS_57711E(bp)) | 704 | CHIP_IS_57711E(bp)) |
626 | #define CHIP_IS_E2(bp) (CHIP_IS_57712(bp) || \ | 705 | #define CHIP_IS_E2(bp) (CHIP_IS_57712(bp) || \ |
627 | CHIP_IS_57712E(bp)) | 706 | CHIP_IS_57712_MF(bp)) |
707 | #define CHIP_IS_E3(bp) (CHIP_IS_57800(bp) || \ | ||
708 | CHIP_IS_57800_MF(bp) || \ | ||
709 | CHIP_IS_57810(bp) || \ | ||
710 | CHIP_IS_57810_MF(bp) || \ | ||
711 | CHIP_IS_57840(bp) || \ | ||
712 | CHIP_IS_57840_MF(bp)) | ||
628 | #define CHIP_IS_E1x(bp) (CHIP_IS_E1((bp)) || CHIP_IS_E1H((bp))) | 713 | #define CHIP_IS_E1x(bp) (CHIP_IS_E1((bp)) || CHIP_IS_E1H((bp))) |
629 | #define IS_E1H_OFFSET (CHIP_IS_E1H(bp) || CHIP_IS_E2(bp)) | 714 | #define USES_WARPCORE(bp) (CHIP_IS_E3(bp)) |
630 | 715 | #define IS_E1H_OFFSET (!CHIP_IS_E1(bp)) | |
631 | #define CHIP_REV(bp) (bp->common.chip_id & 0x0000f000) | 716 | |
632 | #define CHIP_REV_Ax 0x00000000 | 717 | #define CHIP_REV_SHIFT 12 |
718 | #define CHIP_REV_MASK (0xF << CHIP_REV_SHIFT) | ||
719 | #define CHIP_REV_VAL(bp) (bp->common.chip_id & CHIP_REV_MASK) | ||
720 | #define CHIP_REV_Ax (0x0 << CHIP_REV_SHIFT) | ||
721 | #define CHIP_REV_Bx (0x1 << CHIP_REV_SHIFT) | ||
633 | /* assume maximum 5 revisions */ | 722 | /* assume maximum 5 revisions */ |
634 | #define CHIP_REV_IS_SLOW(bp) (CHIP_REV(bp) > 0x00005000) | 723 | #define CHIP_REV_IS_SLOW(bp) (CHIP_REV_VAL(bp) > 0x00005000) |
635 | /* Emul versions are A=>0xe, B=>0xc, C=>0xa, D=>8, E=>6 */ | 724 | /* Emul versions are A=>0xe, B=>0xc, C=>0xa, D=>8, E=>6 */ |
636 | #define CHIP_REV_IS_EMUL(bp) ((CHIP_REV_IS_SLOW(bp)) && \ | 725 | #define CHIP_REV_IS_EMUL(bp) ((CHIP_REV_IS_SLOW(bp)) && \ |
637 | !(CHIP_REV(bp) & 0x00001000)) | 726 | !(CHIP_REV_VAL(bp) & 0x00001000)) |
638 | /* FPGA versions are A=>0xf, B=>0xd, C=>0xb, D=>9, E=>7 */ | 727 | /* FPGA versions are A=>0xf, B=>0xd, C=>0xb, D=>9, E=>7 */ |
639 | #define CHIP_REV_IS_FPGA(bp) ((CHIP_REV_IS_SLOW(bp)) && \ | 728 | #define CHIP_REV_IS_FPGA(bp) ((CHIP_REV_IS_SLOW(bp)) && \ |
640 | (CHIP_REV(bp) & 0x00001000)) | 729 | (CHIP_REV_VAL(bp) & 0x00001000)) |
641 | 730 | ||
642 | #define CHIP_TIME(bp) ((CHIP_REV_IS_EMUL(bp)) ? 2000 : \ | 731 | #define CHIP_TIME(bp) ((CHIP_REV_IS_EMUL(bp)) ? 2000 : \ |
643 | ((CHIP_REV_IS_FPGA(bp)) ? 200 : 1)) | 732 | ((CHIP_REV_IS_FPGA(bp)) ? 200 : 1)) |
@@ -645,6 +734,16 @@ struct bnx2x_common { | |||
645 | #define CHIP_METAL(bp) (bp->common.chip_id & 0x00000ff0) | 734 | #define CHIP_METAL(bp) (bp->common.chip_id & 0x00000ff0) |
646 | #define CHIP_BOND_ID(bp) (bp->common.chip_id & 0x0000000f) | 735 | #define CHIP_BOND_ID(bp) (bp->common.chip_id & 0x0000000f) |
647 | #define CHIP_PARITY_ENABLED(bp) (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp)) | 736 | #define CHIP_PARITY_ENABLED(bp) (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp)) |
737 | #define CHIP_REV_SIM(bp) (((CHIP_REV_MASK - CHIP_REV_VAL(bp)) >>\ | ||
738 | (CHIP_REV_SHIFT + 1)) \ | ||
739 | << CHIP_REV_SHIFT) | ||
740 | #define CHIP_REV(bp) (CHIP_REV_IS_SLOW(bp) ? \ | ||
741 | CHIP_REV_SIM(bp) :\ | ||
742 | CHIP_REV_VAL(bp)) | ||
743 | #define CHIP_IS_E3B0(bp) (CHIP_IS_E3(bp) && \ | ||
744 | (CHIP_REV(bp) == CHIP_REV_Bx)) | ||
745 | #define CHIP_IS_E3A0(bp) (CHIP_IS_E3(bp) && \ | ||
746 | (CHIP_REV(bp) == CHIP_REV_Ax)) | ||
648 | 747 | ||
649 | int flash_size; | 748 | int flash_size; |
650 | #define NVRAM_1MB_SIZE 0x20000 /* 1M bit in bytes */ | 749 | #define NVRAM_1MB_SIZE 0x20000 /* 1M bit in bytes */ |
@@ -666,7 +765,7 @@ struct bnx2x_common { | |||
666 | #define INT_BLOCK_MODE_NORMAL 0 | 765 | #define INT_BLOCK_MODE_NORMAL 0 |
667 | #define INT_BLOCK_MODE_BW_COMP 2 | 766 | #define INT_BLOCK_MODE_BW_COMP 2 |
668 | #define CHIP_INT_MODE_IS_NBC(bp) \ | 767 | #define CHIP_INT_MODE_IS_NBC(bp) \ |
669 | (CHIP_IS_E2(bp) && \ | 768 | (!CHIP_IS_E1x(bp) && \ |
670 | !((bp)->common.int_block & INT_BLOCK_MODE_BW_COMP)) | 769 | !((bp)->common.int_block & INT_BLOCK_MODE_BW_COMP)) |
671 | #define CHIP_INT_MODE_IS_BC(bp) (!CHIP_INT_MODE_IS_NBC(bp)) | 770 | #define CHIP_INT_MODE_IS_BC(bp) (!CHIP_INT_MODE_IS_NBC(bp)) |
672 | 771 | ||
@@ -712,19 +811,15 @@ struct bnx2x_port { | |||
712 | 811 | ||
713 | /* end of port */ | 812 | /* end of port */ |
714 | 813 | ||
715 | /* e1h Classification CAM line allocations */ | 814 | #define STATS_OFFSET32(stat_name) \ |
716 | enum { | 815 | (offsetof(struct bnx2x_eth_stats, stat_name) / 4) |
717 | CAM_ETH_LINE = 0, | ||
718 | CAM_ISCSI_ETH_LINE, | ||
719 | CAM_FIP_ETH_LINE, | ||
720 | CAM_FIP_MCAST_LINE, | ||
721 | CAM_MAX_PF_LINE = CAM_FIP_MCAST_LINE | ||
722 | }; | ||
723 | /* number of MACs per function in NIG memory - used for SI mode */ | ||
724 | #define NIG_LLH_FUNC_MEM_SIZE 16 | ||
725 | /* number of entries in NIG_REG_LLHX_FUNC_MEM */ | ||
726 | #define NIG_LLH_FUNC_MEM_MAX_OFFSET 8 | ||
727 | 816 | ||
817 | /* slow path */ | ||
818 | |||
819 | /* slow path work-queue */ | ||
820 | extern struct workqueue_struct *bnx2x_wq; | ||
821 | |||
822 | #define BNX2X_MAX_NUM_OF_VFS 64 | ||
728 | #define BNX2X_VF_ID_INVALID 0xFF | 823 | #define BNX2X_VF_ID_INVALID 0xFF |
729 | 824 | ||
730 | /* | 825 | /* |
@@ -749,8 +844,10 @@ enum { | |||
749 | * L2 queue is supported. the cid for the FCoE L2 queue is always X. | 844 | * L2 queue is supported. the cid for the FCoE L2 queue is always X. |
750 | */ | 845 | */ |
751 | 846 | ||
752 | #define FP_SB_MAX_E1x 16 /* fast-path interrupt contexts E1x */ | 847 | /* fast-path interrupt contexts E1x */ |
753 | #define FP_SB_MAX_E2 16 /* fast-path interrupt contexts E2 */ | 848 | #define FP_SB_MAX_E1x 16 |
849 | /* fast-path interrupt contexts E2 */ | ||
850 | #define FP_SB_MAX_E2 HC_SB_MAX_SB_E2 | ||
754 | 851 | ||
755 | /* | 852 | /* |
756 | * cid_cnt paramter below refers to the value returned by | 853 | * cid_cnt paramter below refers to the value returned by |
@@ -761,13 +858,13 @@ enum { | |||
761 | * The number of FP context allocated by the driver == max number of regular | 858 | * The number of FP context allocated by the driver == max number of regular |
762 | * L2 queues + 1 for the FCoE L2 queue | 859 | * L2 queues + 1 for the FCoE L2 queue |
763 | */ | 860 | */ |
764 | #define L2_FP_COUNT(cid_cnt) ((cid_cnt) - CNIC_CONTEXT_USE) | 861 | #define L2_FP_COUNT(cid_cnt) ((cid_cnt) - FCOE_CONTEXT_USE) |
765 | 862 | ||
766 | /* | 863 | /* |
767 | * The number of FP-SB allocated by the driver == max number of regular L2 | 864 | * The number of FP-SB allocated by the driver == max number of regular L2 |
768 | * queues + 1 for the CNIC which also consumes an FP-SB | 865 | * queues + 1 for the CNIC which also consumes an FP-SB |
769 | */ | 866 | */ |
770 | #define FP_SB_COUNT(cid_cnt) ((cid_cnt) - FCOE_CONTEXT_USE) | 867 | #define FP_SB_COUNT(cid_cnt) ((cid_cnt) - CNIC_CONTEXT_USE) |
771 | #define NUM_IGU_SB_REQUIRED(cid_cnt) \ | 868 | #define NUM_IGU_SB_REQUIRED(cid_cnt) \ |
772 | (FP_SB_COUNT(cid_cnt) - NONE_ETH_CONTEXT_USE) | 869 | (FP_SB_COUNT(cid_cnt) - NONE_ETH_CONTEXT_USE) |
773 | 870 | ||
@@ -788,38 +885,61 @@ union cdu_context { | |||
788 | #define CNIC_ILT_LINES DIV_ROUND_UP(CNIC_CID_MAX, ILT_PAGE_CIDS) | 885 | #define CNIC_ILT_LINES DIV_ROUND_UP(CNIC_CID_MAX, ILT_PAGE_CIDS) |
789 | #endif | 886 | #endif |
790 | 887 | ||
791 | #define QM_ILT_PAGE_SZ_HW 3 | 888 | #define QM_ILT_PAGE_SZ_HW 0 |
792 | #define QM_ILT_PAGE_SZ (4096 << QM_ILT_PAGE_SZ_HW) /* 32K */ | 889 | #define QM_ILT_PAGE_SZ (4096 << QM_ILT_PAGE_SZ_HW) /* 4K */ |
793 | #define QM_CID_ROUND 1024 | 890 | #define QM_CID_ROUND 1024 |
794 | 891 | ||
795 | #ifdef BCM_CNIC | 892 | #ifdef BCM_CNIC |
796 | /* TM (timers) host DB constants */ | 893 | /* TM (timers) host DB constants */ |
797 | #define TM_ILT_PAGE_SZ_HW 2 | 894 | #define TM_ILT_PAGE_SZ_HW 0 |
798 | #define TM_ILT_PAGE_SZ (4096 << TM_ILT_PAGE_SZ_HW) /* 16K */ | 895 | #define TM_ILT_PAGE_SZ (4096 << TM_ILT_PAGE_SZ_HW) /* 4K */ |
799 | /* #define TM_CONN_NUM (CNIC_STARTING_CID+CNIC_ISCSI_CXT_MAX) */ | 896 | /* #define TM_CONN_NUM (CNIC_STARTING_CID+CNIC_ISCSI_CXT_MAX) */ |
800 | #define TM_CONN_NUM 1024 | 897 | #define TM_CONN_NUM 1024 |
801 | #define TM_ILT_SZ (8 * TM_CONN_NUM) | 898 | #define TM_ILT_SZ (8 * TM_CONN_NUM) |
802 | #define TM_ILT_LINES DIV_ROUND_UP(TM_ILT_SZ, TM_ILT_PAGE_SZ) | 899 | #define TM_ILT_LINES DIV_ROUND_UP(TM_ILT_SZ, TM_ILT_PAGE_SZ) |
803 | 900 | ||
804 | /* SRC (Searcher) host DB constants */ | 901 | /* SRC (Searcher) host DB constants */ |
805 | #define SRC_ILT_PAGE_SZ_HW 3 | 902 | #define SRC_ILT_PAGE_SZ_HW 0 |
806 | #define SRC_ILT_PAGE_SZ (4096 << SRC_ILT_PAGE_SZ_HW) /* 32K */ | 903 | #define SRC_ILT_PAGE_SZ (4096 << SRC_ILT_PAGE_SZ_HW) /* 4K */ |
807 | #define SRC_HASH_BITS 10 | 904 | #define SRC_HASH_BITS 10 |
808 | #define SRC_CONN_NUM (1 << SRC_HASH_BITS) /* 1024 */ | 905 | #define SRC_CONN_NUM (1 << SRC_HASH_BITS) /* 1024 */ |
809 | #define SRC_ILT_SZ (sizeof(struct src_ent) * SRC_CONN_NUM) | 906 | #define SRC_ILT_SZ (sizeof(struct src_ent) * SRC_CONN_NUM) |
810 | #define SRC_T2_SZ SRC_ILT_SZ | 907 | #define SRC_T2_SZ SRC_ILT_SZ |
811 | #define SRC_ILT_LINES DIV_ROUND_UP(SRC_ILT_SZ, SRC_ILT_PAGE_SZ) | 908 | #define SRC_ILT_LINES DIV_ROUND_UP(SRC_ILT_SZ, SRC_ILT_PAGE_SZ) |
909 | |||
812 | #endif | 910 | #endif |
813 | 911 | ||
814 | #define MAX_DMAE_C 8 | 912 | #define MAX_DMAE_C 8 |
815 | 913 | ||
816 | /* DMA memory not used in fastpath */ | 914 | /* DMA memory not used in fastpath */ |
817 | struct bnx2x_slowpath { | 915 | struct bnx2x_slowpath { |
818 | struct eth_stats_query fw_stats; | 916 | union { |
819 | struct mac_configuration_cmd mac_config; | 917 | struct mac_configuration_cmd e1x; |
820 | struct mac_configuration_cmd mcast_config; | 918 | struct eth_classify_rules_ramrod_data e2; |
821 | struct mac_configuration_cmd uc_mac_config; | 919 | } mac_rdata; |
822 | struct client_init_ramrod_data client_init_data; | 920 | |
921 | |||
922 | union { | ||
923 | struct tstorm_eth_mac_filter_config e1x; | ||
924 | struct eth_filter_rules_ramrod_data e2; | ||
925 | } rx_mode_rdata; | ||
926 | |||
927 | union { | ||
928 | struct mac_configuration_cmd e1; | ||
929 | struct eth_multicast_rules_ramrod_data e2; | ||
930 | } mcast_rdata; | ||
931 | |||
932 | struct eth_rss_update_ramrod_data rss_rdata; | ||
933 | |||
934 | /* Queue State related ramrods are always sent under rtnl_lock */ | ||
935 | union { | ||
936 | struct client_init_ramrod_data init_data; | ||
937 | struct client_update_ramrod_data update_data; | ||
938 | } q_rdata; | ||
939 | |||
940 | union { | ||
941 | struct function_start_data func_start; | ||
942 | } func_rdata; | ||
823 | 943 | ||
824 | /* used by dmae command executer */ | 944 | /* used by dmae command executer */ |
825 | struct dmae_command dmae[MAX_DMAE_C]; | 945 | struct dmae_command dmae[MAX_DMAE_C]; |
@@ -846,7 +966,7 @@ struct bnx2x_slowpath { | |||
846 | #define MAX_DYNAMIC_ATTN_GRPS 8 | 966 | #define MAX_DYNAMIC_ATTN_GRPS 8 |
847 | 967 | ||
848 | struct attn_route { | 968 | struct attn_route { |
849 | u32 sig[5]; | 969 | u32 sig[5]; |
850 | }; | 970 | }; |
851 | 971 | ||
852 | struct iro { | 972 | struct iro { |
@@ -872,7 +992,7 @@ typedef enum { | |||
872 | BNX2X_RECOVERY_WAIT, | 992 | BNX2X_RECOVERY_WAIT, |
873 | } bnx2x_recovery_state_t; | 993 | } bnx2x_recovery_state_t; |
874 | 994 | ||
875 | /** | 995 | /* |
876 | * Event queue (EQ or event ring) MC hsi | 996 | * Event queue (EQ or event ring) MC hsi |
877 | * NUM_EQ_PAGES and EQ_DESC_CNT_PAGE must be power of 2 | 997 | * NUM_EQ_PAGES and EQ_DESC_CNT_PAGE must be power of 2 |
878 | */ | 998 | */ |
@@ -910,6 +1030,24 @@ enum { | |||
910 | BNX2X_LINK_REPORT_TX_FC_ON, | 1030 | BNX2X_LINK_REPORT_TX_FC_ON, |
911 | }; | 1031 | }; |
912 | 1032 | ||
1033 | enum { | ||
1034 | BNX2X_PORT_QUERY_IDX, | ||
1035 | BNX2X_PF_QUERY_IDX, | ||
1036 | BNX2X_FIRST_QUEUE_QUERY_IDX, | ||
1037 | }; | ||
1038 | |||
1039 | struct bnx2x_fw_stats_req { | ||
1040 | struct stats_query_header hdr; | ||
1041 | struct stats_query_entry query[STATS_QUERY_CMD_COUNT]; | ||
1042 | }; | ||
1043 | |||
1044 | struct bnx2x_fw_stats_data { | ||
1045 | struct stats_counter storm_counters; | ||
1046 | struct per_port_stats port; | ||
1047 | struct per_pf_stats pf; | ||
1048 | struct per_queue_stats queue_stats[1]; | ||
1049 | }; | ||
1050 | |||
913 | struct bnx2x { | 1051 | struct bnx2x { |
914 | /* Fields used in the tx and intr/napi performance paths | 1052 | /* Fields used in the tx and intr/napi performance paths |
915 | * are grouped together in the beginning of the structure | 1053 | * are grouped together in the beginning of the structure |
@@ -919,10 +1057,23 @@ struct bnx2x { | |||
919 | void __iomem *doorbells; | 1057 | void __iomem *doorbells; |
920 | u16 db_size; | 1058 | u16 db_size; |
921 | 1059 | ||
1060 | u8 pf_num; /* absolute PF number */ | ||
1061 | u8 pfid; /* per-path PF number */ | ||
1062 | int base_fw_ndsb; /**/ | ||
1063 | #define BP_PATH(bp) (CHIP_IS_E1x(bp) ? 0 : (bp->pf_num & 1)) | ||
1064 | #define BP_PORT(bp) (bp->pfid & 1) | ||
1065 | #define BP_FUNC(bp) (bp->pfid) | ||
1066 | #define BP_ABS_FUNC(bp) (bp->pf_num) | ||
1067 | #define BP_E1HVN(bp) (bp->pfid >> 1) | ||
1068 | #define BP_VN(bp) (BP_E1HVN(bp)) /*remove when approved*/ | ||
1069 | #define BP_L_ID(bp) (BP_E1HVN(bp) << 2) | ||
1070 | #define BP_FW_MB_IDX(bp) (BP_PORT(bp) +\ | ||
1071 | BP_VN(bp) * (CHIP_IS_E1x(bp) ? 2 : 1)) | ||
1072 | |||
922 | struct net_device *dev; | 1073 | struct net_device *dev; |
923 | struct pci_dev *pdev; | 1074 | struct pci_dev *pdev; |
924 | 1075 | ||
925 | struct iro *iro_arr; | 1076 | const struct iro *iro_arr; |
926 | #define IRO (bp->iro_arr) | 1077 | #define IRO (bp->iro_arr) |
927 | 1078 | ||
928 | bnx2x_recovery_state_t recovery_state; | 1079 | bnx2x_recovery_state_t recovery_state; |
@@ -940,7 +1091,8 @@ struct bnx2x { | |||
940 | /* Max supported alignment is 256 (8 shift) */ | 1091 | /* Max supported alignment is 256 (8 shift) */ |
941 | #define BNX2X_RX_ALIGN_SHIFT ((L1_CACHE_SHIFT < 8) ? \ | 1092 | #define BNX2X_RX_ALIGN_SHIFT ((L1_CACHE_SHIFT < 8) ? \ |
942 | L1_CACHE_SHIFT : 8) | 1093 | L1_CACHE_SHIFT : 8) |
943 | #define BNX2X_RX_ALIGN (1 << BNX2X_RX_ALIGN_SHIFT) | 1094 | /* FW use 2 Cache lines Alignment for start packet and size */ |
1095 | #define BNX2X_FW_RX_ALIGN (2 << BNX2X_RX_ALIGN_SHIFT) | ||
944 | #define BNX2X_PXP_DRAM_ALIGN (BNX2X_RX_ALIGN_SHIFT - 5) | 1096 | #define BNX2X_PXP_DRAM_ALIGN (BNX2X_RX_ALIGN_SHIFT - 5) |
945 | 1097 | ||
946 | struct host_sp_status_block *def_status_blk; | 1098 | struct host_sp_status_block *def_status_blk; |
@@ -970,10 +1122,12 @@ struct bnx2x { | |||
970 | __le16 *eq_cons_sb; | 1122 | __le16 *eq_cons_sb; |
971 | atomic_t eq_spq_left; /* COMMON_XXX ramrods credit */ | 1123 | atomic_t eq_spq_left; /* COMMON_XXX ramrods credit */ |
972 | 1124 | ||
973 | /* Flags for marking that there is a STAT_QUERY or | 1125 | |
974 | SET_MAC ramrod pending */ | 1126 | |
975 | int stats_pending; | 1127 | /* Counter for marking that there is a STAT_QUERY ramrod pending */ |
976 | int set_mac_pending; | 1128 | u16 stats_pending; |
1129 | /* Counter for completed statistics ramrods */ | ||
1130 | u16 stats_comp; | ||
977 | 1131 | ||
978 | /* End of fields used in the performance code paths */ | 1132 | /* End of fields used in the performance code paths */ |
979 | 1133 | ||
@@ -981,47 +1135,27 @@ struct bnx2x { | |||
981 | int msg_enable; | 1135 | int msg_enable; |
982 | 1136 | ||
983 | u32 flags; | 1137 | u32 flags; |
984 | #define PCIX_FLAG 1 | 1138 | #define PCIX_FLAG (1 << 0) |
985 | #define PCI_32BIT_FLAG 2 | 1139 | #define PCI_32BIT_FLAG (1 << 1) |
986 | #define ONE_PORT_FLAG 4 | 1140 | #define ONE_PORT_FLAG (1 << 2) |
987 | #define NO_WOL_FLAG 8 | 1141 | #define NO_WOL_FLAG (1 << 3) |
988 | #define USING_DAC_FLAG 0x10 | 1142 | #define USING_DAC_FLAG (1 << 4) |
989 | #define USING_MSIX_FLAG 0x20 | 1143 | #define USING_MSIX_FLAG (1 << 5) |
990 | #define USING_MSI_FLAG 0x40 | 1144 | #define USING_MSI_FLAG (1 << 6) |
991 | 1145 | #define DISABLE_MSI_FLAG (1 << 7) | |
992 | #define TPA_ENABLE_FLAG 0x80 | 1146 | #define TPA_ENABLE_FLAG (1 << 8) |
993 | #define NO_MCP_FLAG 0x100 | 1147 | #define NO_MCP_FLAG (1 << 9) |
994 | #define DISABLE_MSI_FLAG 0x200 | 1148 | |
995 | #define BP_NOMCP(bp) (bp->flags & NO_MCP_FLAG) | 1149 | #define BP_NOMCP(bp) (bp->flags & NO_MCP_FLAG) |
996 | #define MF_FUNC_DIS 0x1000 | 1150 | #define MF_FUNC_DIS (1 << 11) |
997 | #define FCOE_MACS_SET 0x2000 | 1151 | #define OWN_CNIC_IRQ (1 << 12) |
998 | #define NO_FCOE_FLAG 0x4000 | 1152 | #define NO_ISCSI_OOO_FLAG (1 << 13) |
999 | #define NO_ISCSI_OOO_FLAG 0x8000 | 1153 | #define NO_ISCSI_FLAG (1 << 14) |
1000 | #define NO_ISCSI_FLAG 0x10000 | 1154 | #define NO_FCOE_FLAG (1 << 15) |
1001 | 1155 | ||
1002 | #define NO_FCOE(bp) ((bp)->flags & NO_FCOE_FLAG) | ||
1003 | #define NO_ISCSI(bp) ((bp)->flags & NO_ISCSI_FLAG) | 1156 | #define NO_ISCSI(bp) ((bp)->flags & NO_ISCSI_FLAG) |
1004 | #define NO_ISCSI_OOO(bp) ((bp)->flags & NO_ISCSI_OOO_FLAG) | 1157 | #define NO_ISCSI_OOO(bp) ((bp)->flags & NO_ISCSI_OOO_FLAG) |
1005 | 1158 | #define NO_FCOE(bp) ((bp)->flags & NO_FCOE_FLAG) | |
1006 | int pf_num; /* absolute PF number */ | ||
1007 | int pfid; /* per-path PF number */ | ||
1008 | int base_fw_ndsb; | ||
1009 | #define BP_PATH(bp) (!CHIP_IS_E2(bp) ? \ | ||
1010 | 0 : (bp->pf_num & 1)) | ||
1011 | #define BP_PORT(bp) (bp->pfid & 1) | ||
1012 | #define BP_FUNC(bp) (bp->pfid) | ||
1013 | #define BP_ABS_FUNC(bp) (bp->pf_num) | ||
1014 | #define BP_E1HVN(bp) (bp->pfid >> 1) | ||
1015 | #define BP_VN(bp) (CHIP_MODE_IS_4_PORT(bp) ? \ | ||
1016 | 0 : BP_E1HVN(bp)) | ||
1017 | #define BP_L_ID(bp) (BP_E1HVN(bp) << 2) | ||
1018 | #define BP_FW_MB_IDX(bp) (BP_PORT(bp) +\ | ||
1019 | BP_VN(bp) * (CHIP_IS_E1x(bp) ? 2 : 1)) | ||
1020 | |||
1021 | #ifdef BCM_CNIC | ||
1022 | #define BCM_CNIC_CID_START 16 | ||
1023 | #define BCM_ISCSI_ETH_CL_ID 17 | ||
1024 | #endif | ||
1025 | 1159 | ||
1026 | int pm_cap; | 1160 | int pm_cap; |
1027 | int pcie_cap; | 1161 | int pcie_cap; |
@@ -1048,9 +1182,9 @@ struct bnx2x { | |||
1048 | 1182 | ||
1049 | struct cmng_struct_per_port cmng; | 1183 | struct cmng_struct_per_port cmng; |
1050 | u32 vn_weight_sum; | 1184 | u32 vn_weight_sum; |
1051 | |||
1052 | u32 mf_config[E1HVN_MAX]; | 1185 | u32 mf_config[E1HVN_MAX]; |
1053 | u32 mf2_config[E2_FUNC_MAX]; | 1186 | u32 mf2_config[E2_FUNC_MAX]; |
1187 | u32 path_has_ovlan; /* E3 */ | ||
1054 | u16 mf_ov; | 1188 | u16 mf_ov; |
1055 | u8 mf_mode; | 1189 | u8 mf_mode; |
1056 | #define IS_MF(bp) (bp->mf_mode != 0) | 1190 | #define IS_MF(bp) (bp->mf_mode != 0) |
@@ -1075,32 +1209,20 @@ struct bnx2x { | |||
1075 | 1209 | ||
1076 | u32 lin_cnt; | 1210 | u32 lin_cnt; |
1077 | 1211 | ||
1078 | int state; | 1212 | u16 state; |
1079 | #define BNX2X_STATE_CLOSED 0 | 1213 | #define BNX2X_STATE_CLOSED 0 |
1080 | #define BNX2X_STATE_OPENING_WAIT4_LOAD 0x1000 | 1214 | #define BNX2X_STATE_OPENING_WAIT4_LOAD 0x1000 |
1081 | #define BNX2X_STATE_OPENING_WAIT4_PORT 0x2000 | 1215 | #define BNX2X_STATE_OPENING_WAIT4_PORT 0x2000 |
1082 | #define BNX2X_STATE_OPEN 0x3000 | 1216 | #define BNX2X_STATE_OPEN 0x3000 |
1083 | #define BNX2X_STATE_CLOSING_WAIT4_HALT 0x4000 | 1217 | #define BNX2X_STATE_CLOSING_WAIT4_HALT 0x4000 |
1084 | #define BNX2X_STATE_CLOSING_WAIT4_DELETE 0x5000 | 1218 | #define BNX2X_STATE_CLOSING_WAIT4_DELETE 0x5000 |
1085 | #define BNX2X_STATE_CLOSING_WAIT4_UNLOAD 0x6000 | 1219 | |
1086 | #define BNX2X_STATE_FUNC_STARTED 0x7000 | ||
1087 | #define BNX2X_STATE_DIAG 0xe000 | 1220 | #define BNX2X_STATE_DIAG 0xe000 |
1088 | #define BNX2X_STATE_ERROR 0xf000 | 1221 | #define BNX2X_STATE_ERROR 0xf000 |
1089 | 1222 | ||
1090 | int multi_mode; | 1223 | int multi_mode; |
1091 | int num_queues; | 1224 | int num_queues; |
1092 | int disable_tpa; | 1225 | int disable_tpa; |
1093 | u32 *rx_indir_table; | ||
1094 | |||
1095 | struct tstorm_eth_mac_filter_config mac_filters; | ||
1096 | #define BNX2X_ACCEPT_NONE 0x0000 | ||
1097 | #define BNX2X_ACCEPT_UNICAST 0x0001 | ||
1098 | #define BNX2X_ACCEPT_MULTICAST 0x0002 | ||
1099 | #define BNX2X_ACCEPT_ALL_UNICAST 0x0004 | ||
1100 | #define BNX2X_ACCEPT_ALL_MULTICAST 0x0008 | ||
1101 | #define BNX2X_ACCEPT_BROADCAST 0x0010 | ||
1102 | #define BNX2X_ACCEPT_UNMATCHED_UCAST 0x0020 | ||
1103 | #define BNX2X_PROMISCUOUS_MODE 0x10000 | ||
1104 | 1226 | ||
1105 | u32 rx_mode; | 1227 | u32 rx_mode; |
1106 | #define BNX2X_RX_MODE_NONE 0 | 1228 | #define BNX2X_RX_MODE_NONE 0 |
@@ -1108,7 +1230,6 @@ struct bnx2x { | |||
1108 | #define BNX2X_RX_MODE_ALLMULTI 2 | 1230 | #define BNX2X_RX_MODE_ALLMULTI 2 |
1109 | #define BNX2X_RX_MODE_PROMISC 3 | 1231 | #define BNX2X_RX_MODE_PROMISC 3 |
1110 | #define BNX2X_MAX_MULTICAST 64 | 1232 | #define BNX2X_MAX_MULTICAST 64 |
1111 | #define BNX2X_MAX_EMUL_MULTI 16 | ||
1112 | 1233 | ||
1113 | u8 igu_dsb_id; | 1234 | u8 igu_dsb_id; |
1114 | u8 igu_base_sb; | 1235 | u8 igu_base_sb; |
@@ -1117,11 +1238,38 @@ struct bnx2x { | |||
1117 | 1238 | ||
1118 | struct bnx2x_slowpath *slowpath; | 1239 | struct bnx2x_slowpath *slowpath; |
1119 | dma_addr_t slowpath_mapping; | 1240 | dma_addr_t slowpath_mapping; |
1241 | |||
1242 | /* Total number of FW statistics requests */ | ||
1243 | u8 fw_stats_num; | ||
1244 | |||
1245 | /* | ||
1246 | * This is a memory buffer that will contain both statistics | ||
1247 | * ramrod request and data. | ||
1248 | */ | ||
1249 | void *fw_stats; | ||
1250 | dma_addr_t fw_stats_mapping; | ||
1251 | |||
1252 | /* | ||
1253 | * FW statistics request shortcut (points at the | ||
1254 | * beginning of fw_stats buffer). | ||
1255 | */ | ||
1256 | struct bnx2x_fw_stats_req *fw_stats_req; | ||
1257 | dma_addr_t fw_stats_req_mapping; | ||
1258 | int fw_stats_req_sz; | ||
1259 | |||
1260 | /* | ||
1261 | * FW statistics data shortcut (points at the begining of | ||
1262 | * fw_stats buffer + fw_stats_req_sz). | ||
1263 | */ | ||
1264 | struct bnx2x_fw_stats_data *fw_stats_data; | ||
1265 | dma_addr_t fw_stats_data_mapping; | ||
1266 | int fw_stats_data_sz; | ||
1267 | |||
1120 | struct hw_context context; | 1268 | struct hw_context context; |
1121 | 1269 | ||
1122 | struct bnx2x_ilt *ilt; | 1270 | struct bnx2x_ilt *ilt; |
1123 | #define BP_ILT(bp) ((bp)->ilt) | 1271 | #define BP_ILT(bp) ((bp)->ilt) |
1124 | #define ILT_MAX_LINES 128 | 1272 | #define ILT_MAX_LINES 256 |
1125 | 1273 | ||
1126 | int l2_cid_count; | 1274 | int l2_cid_count; |
1127 | #define L2_ILT_LINES(bp) (DIV_ROUND_UP((bp)->l2_cid_count, \ | 1275 | #define L2_ILT_LINES(bp) (DIV_ROUND_UP((bp)->l2_cid_count, \ |
@@ -1143,16 +1291,18 @@ struct bnx2x { | |||
1143 | struct cnic_eth_dev cnic_eth_dev; | 1291 | struct cnic_eth_dev cnic_eth_dev; |
1144 | union host_hc_status_block cnic_sb; | 1292 | union host_hc_status_block cnic_sb; |
1145 | dma_addr_t cnic_sb_mapping; | 1293 | dma_addr_t cnic_sb_mapping; |
1146 | #define CNIC_SB_ID(bp) ((bp)->base_fw_ndsb + BP_L_ID(bp)) | ||
1147 | #define CNIC_IGU_SB_ID(bp) ((bp)->igu_base_sb) | ||
1148 | struct eth_spe *cnic_kwq; | 1294 | struct eth_spe *cnic_kwq; |
1149 | struct eth_spe *cnic_kwq_prod; | 1295 | struct eth_spe *cnic_kwq_prod; |
1150 | struct eth_spe *cnic_kwq_cons; | 1296 | struct eth_spe *cnic_kwq_cons; |
1151 | struct eth_spe *cnic_kwq_last; | 1297 | struct eth_spe *cnic_kwq_last; |
1152 | u16 cnic_kwq_pending; | 1298 | u16 cnic_kwq_pending; |
1153 | u16 cnic_spq_pending; | 1299 | u16 cnic_spq_pending; |
1154 | struct mutex cnic_mutex; | ||
1155 | u8 fip_mac[ETH_ALEN]; | 1300 | u8 fip_mac[ETH_ALEN]; |
1301 | struct mutex cnic_mutex; | ||
1302 | struct bnx2x_vlan_mac_obj iscsi_l2_mac_obj; | ||
1303 | |||
1304 | /* Start index of the "special" (CNIC related) L2 cleints */ | ||
1305 | u8 cnic_base_cl_id; | ||
1156 | #endif | 1306 | #endif |
1157 | 1307 | ||
1158 | int dmae_ready; | 1308 | int dmae_ready; |
@@ -1189,6 +1339,8 @@ struct bnx2x { | |||
1189 | u16 *init_ops_offsets; | 1339 | u16 *init_ops_offsets; |
1190 | /* Data blob - has 32 bit granularity */ | 1340 | /* Data blob - has 32 bit granularity */ |
1191 | u32 *init_data; | 1341 | u32 *init_data; |
1342 | u32 init_mode_flags; | ||
1343 | #define INIT_MODE_FLAGS(bp) (bp->init_mode_flags) | ||
1192 | /* Zipped PRAM blobs - raw data */ | 1344 | /* Zipped PRAM blobs - raw data */ |
1193 | const u8 *tsem_int_table_data; | 1345 | const u8 *tsem_int_table_data; |
1194 | const u8 *tsem_pram_data; | 1346 | const u8 *tsem_pram_data; |
@@ -1210,8 +1362,10 @@ struct bnx2x { | |||
1210 | #define INIT_CSEM_INT_TABLE_DATA(bp) (bp->csem_int_table_data) | 1362 | #define INIT_CSEM_INT_TABLE_DATA(bp) (bp->csem_int_table_data) |
1211 | #define INIT_CSEM_PRAM_DATA(bp) (bp->csem_pram_data) | 1363 | #define INIT_CSEM_PRAM_DATA(bp) (bp->csem_pram_data) |
1212 | 1364 | ||
1365 | #define PHY_FW_VER_LEN 20 | ||
1213 | char fw_ver[32]; | 1366 | char fw_ver[32]; |
1214 | const struct firmware *firmware; | 1367 | const struct firmware *firmware; |
1368 | |||
1215 | /* LLDP params */ | 1369 | /* LLDP params */ |
1216 | struct bnx2x_config_lldp_params lldp_config_params; | 1370 | struct bnx2x_config_lldp_params lldp_config_params; |
1217 | 1371 | ||
@@ -1230,13 +1384,30 @@ struct bnx2x { | |||
1230 | bool dcbx_mode_uset; | 1384 | bool dcbx_mode_uset; |
1231 | 1385 | ||
1232 | struct bnx2x_config_dcbx_params dcbx_config_params; | 1386 | struct bnx2x_config_dcbx_params dcbx_config_params; |
1233 | |||
1234 | struct bnx2x_dcbx_port_params dcbx_port_params; | 1387 | struct bnx2x_dcbx_port_params dcbx_port_params; |
1235 | int dcb_version; | 1388 | int dcb_version; |
1236 | 1389 | ||
1237 | /* DCBX Negotiation results */ | 1390 | /* CAM credit pools */ |
1391 | struct bnx2x_credit_pool_obj macs_pool; | ||
1392 | |||
1393 | /* RX_MODE object */ | ||
1394 | struct bnx2x_rx_mode_obj rx_mode_obj; | ||
1395 | |||
1396 | /* MCAST object */ | ||
1397 | struct bnx2x_mcast_obj mcast_obj; | ||
1398 | |||
1399 | /* RSS configuration object */ | ||
1400 | struct bnx2x_rss_config_obj rss_conf_obj; | ||
1401 | |||
1402 | /* Function State controlling object */ | ||
1403 | struct bnx2x_func_sp_obj func_obj; | ||
1404 | |||
1405 | unsigned long sp_state; | ||
1406 | |||
1407 | /* DCBX Negotation results */ | ||
1238 | struct dcbx_features dcbx_local_feat; | 1408 | struct dcbx_features dcbx_local_feat; |
1239 | u32 dcbx_error; | 1409 | u32 dcbx_error; |
1410 | |||
1240 | #ifdef BCM_DCBNL | 1411 | #ifdef BCM_DCBNL |
1241 | struct dcbx_features dcbx_remote_feat; | 1412 | struct dcbx_features dcbx_remote_feat; |
1242 | u32 dcbx_remote_flags; | 1413 | u32 dcbx_remote_flags; |
@@ -1244,42 +1415,11 @@ struct bnx2x { | |||
1244 | u32 pending_max; | 1415 | u32 pending_max; |
1245 | }; | 1416 | }; |
1246 | 1417 | ||
1247 | /** | 1418 | /* Tx queues may be less or equal to Rx queues */ |
1248 | * Init queue/func interface | 1419 | extern int num_queues; |
1249 | */ | ||
1250 | /* queue init flags */ | ||
1251 | #define QUEUE_FLG_TPA 0x0001 | ||
1252 | #define QUEUE_FLG_CACHE_ALIGN 0x0002 | ||
1253 | #define QUEUE_FLG_STATS 0x0004 | ||
1254 | #define QUEUE_FLG_OV 0x0008 | ||
1255 | #define QUEUE_FLG_VLAN 0x0010 | ||
1256 | #define QUEUE_FLG_COS 0x0020 | ||
1257 | #define QUEUE_FLG_HC 0x0040 | ||
1258 | #define QUEUE_FLG_DHC 0x0080 | ||
1259 | #define QUEUE_FLG_OOO 0x0100 | ||
1260 | |||
1261 | #define QUEUE_DROP_IP_CS_ERR TSTORM_ETH_CLIENT_CONFIG_DROP_IP_CS_ERR | ||
1262 | #define QUEUE_DROP_TCP_CS_ERR TSTORM_ETH_CLIENT_CONFIG_DROP_TCP_CS_ERR | ||
1263 | #define QUEUE_DROP_TTL0 TSTORM_ETH_CLIENT_CONFIG_DROP_TTL0 | ||
1264 | #define QUEUE_DROP_UDP_CS_ERR TSTORM_ETH_CLIENT_CONFIG_DROP_UDP_CS_ERR | ||
1265 | |||
1266 | |||
1267 | |||
1268 | /* rss capabilities */ | ||
1269 | #define RSS_IPV4_CAP 0x0001 | ||
1270 | #define RSS_IPV4_TCP_CAP 0x0002 | ||
1271 | #define RSS_IPV6_CAP 0x0004 | ||
1272 | #define RSS_IPV6_TCP_CAP 0x0008 | ||
1273 | |||
1274 | #define BNX2X_NUM_QUEUES(bp) (bp->num_queues) | 1420 | #define BNX2X_NUM_QUEUES(bp) (bp->num_queues) |
1275 | #define BNX2X_NUM_ETH_QUEUES(bp) (BNX2X_NUM_QUEUES(bp) - NONE_ETH_CONTEXT_USE) | 1421 | #define BNX2X_NUM_ETH_QUEUES(bp) (BNX2X_NUM_QUEUES(bp) - NONE_ETH_CONTEXT_USE) |
1276 | 1422 | ||
1277 | /* ethtool statistics are displayed for all regular ethernet queues and the | ||
1278 | * fcoe L2 queue if not disabled | ||
1279 | */ | ||
1280 | #define BNX2X_NUM_STAT_QUEUES(bp) (NO_FCOE(bp) ? BNX2X_NUM_ETH_QUEUES(bp) : \ | ||
1281 | (BNX2X_NUM_ETH_QUEUES(bp) + FCOE_CONTEXT_USE)) | ||
1282 | |||
1283 | #define is_multi(bp) (BNX2X_NUM_QUEUES(bp) > 1) | 1423 | #define is_multi(bp) (BNX2X_NUM_QUEUES(bp) > 1) |
1284 | 1424 | ||
1285 | #define BNX2X_MAX_QUEUES(bp) (bp->igu_sb_cnt - CNIC_CONTEXT_USE) | 1425 | #define BNX2X_MAX_QUEUES(bp) (bp->igu_sb_cnt - CNIC_CONTEXT_USE) |
@@ -1297,107 +1437,15 @@ struct bnx2x { | |||
1297 | TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_TCP_CAPABILITY | 1437 | TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_TCP_CAPABILITY |
1298 | 1438 | ||
1299 | /* func init flags */ | 1439 | /* func init flags */ |
1300 | #define FUNC_FLG_STATS 0x0001 | 1440 | #define FUNC_FLG_RSS 0x0001 |
1301 | #define FUNC_FLG_TPA 0x0002 | 1441 | #define FUNC_FLG_STATS 0x0002 |
1302 | #define FUNC_FLG_SPQ 0x0004 | 1442 | /* removed FUNC_FLG_UNMATCHED 0x0004 */ |
1303 | #define FUNC_FLG_LEADING 0x0008 /* PF only */ | 1443 | #define FUNC_FLG_TPA 0x0008 |
1304 | 1444 | #define FUNC_FLG_SPQ 0x0010 | |
1305 | struct rxq_pause_params { | 1445 | #define FUNC_FLG_LEADING 0x0020 /* PF only */ |
1306 | u16 bd_th_lo; | ||
1307 | u16 bd_th_hi; | ||
1308 | u16 rcq_th_lo; | ||
1309 | u16 rcq_th_hi; | ||
1310 | u16 sge_th_lo; /* valid iff QUEUE_FLG_TPA */ | ||
1311 | u16 sge_th_hi; /* valid iff QUEUE_FLG_TPA */ | ||
1312 | u16 pri_map; | ||
1313 | }; | ||
1314 | |||
1315 | struct bnx2x_rxq_init_params { | ||
1316 | /* cxt*/ | ||
1317 | struct eth_context *cxt; | ||
1318 | |||
1319 | /* dma */ | ||
1320 | dma_addr_t dscr_map; | ||
1321 | dma_addr_t sge_map; | ||
1322 | dma_addr_t rcq_map; | ||
1323 | dma_addr_t rcq_np_map; | ||
1324 | |||
1325 | u16 flags; | ||
1326 | u16 drop_flags; | ||
1327 | u16 mtu; | ||
1328 | u16 buf_sz; | ||
1329 | u16 fw_sb_id; | ||
1330 | u16 cl_id; | ||
1331 | u16 spcl_id; | ||
1332 | u16 cl_qzone_id; | ||
1333 | |||
1334 | /* valid iff QUEUE_FLG_STATS */ | ||
1335 | u16 stat_id; | ||
1336 | |||
1337 | /* valid iff QUEUE_FLG_TPA */ | ||
1338 | u16 tpa_agg_sz; | ||
1339 | u16 sge_buf_sz; | ||
1340 | u16 max_sges_pkt; | ||
1341 | |||
1342 | /* valid iff QUEUE_FLG_CACHE_ALIGN */ | ||
1343 | u8 cache_line_log; | ||
1344 | |||
1345 | u8 sb_cq_index; | ||
1346 | u32 cid; | ||
1347 | |||
1348 | /* desired interrupts per sec. valid iff QUEUE_FLG_HC */ | ||
1349 | u32 hc_rate; | ||
1350 | }; | ||
1351 | |||
1352 | struct bnx2x_txq_init_params { | ||
1353 | /* cxt*/ | ||
1354 | struct eth_context *cxt; | ||
1355 | 1446 | ||
1356 | /* dma */ | ||
1357 | dma_addr_t dscr_map; | ||
1358 | |||
1359 | u16 flags; | ||
1360 | u16 fw_sb_id; | ||
1361 | u8 sb_cq_index; | ||
1362 | u8 cos; /* valid iff QUEUE_FLG_COS */ | ||
1363 | u16 stat_id; /* valid iff QUEUE_FLG_STATS */ | ||
1364 | u16 traffic_type; | ||
1365 | u32 cid; | ||
1366 | u16 hc_rate; /* desired interrupts per sec.*/ | ||
1367 | /* valid iff QUEUE_FLG_HC */ | ||
1368 | |||
1369 | }; | ||
1370 | |||
1371 | struct bnx2x_client_ramrod_params { | ||
1372 | int *pstate; | ||
1373 | int state; | ||
1374 | u16 index; | ||
1375 | u16 cl_id; | ||
1376 | u32 cid; | ||
1377 | u8 poll; | ||
1378 | #define CLIENT_IS_FCOE 0x01 | ||
1379 | #define CLIENT_IS_LEADING_RSS 0x02 | ||
1380 | u8 flags; | ||
1381 | }; | ||
1382 | |||
1383 | struct bnx2x_client_init_params { | ||
1384 | struct rxq_pause_params pause; | ||
1385 | struct bnx2x_rxq_init_params rxq_params; | ||
1386 | struct bnx2x_txq_init_params txq_params; | ||
1387 | struct bnx2x_client_ramrod_params ramrod_params; | ||
1388 | }; | ||
1389 | |||
1390 | struct bnx2x_rss_params { | ||
1391 | int mode; | ||
1392 | u16 cap; | ||
1393 | u16 result_mask; | ||
1394 | }; | ||
1395 | 1447 | ||
1396 | struct bnx2x_func_init_params { | 1448 | struct bnx2x_func_init_params { |
1397 | |||
1398 | /* rss */ | ||
1399 | struct bnx2x_rss_params *rss; /* valid iff FUNC_FLG_RSS */ | ||
1400 | |||
1401 | /* dma */ | 1449 | /* dma */ |
1402 | dma_addr_t fw_stat_map; /* valid iff FUNC_FLG_STATS */ | 1450 | dma_addr_t fw_stat_map; /* valid iff FUNC_FLG_STATS */ |
1403 | dma_addr_t spq_map; /* valid iff FUNC_FLG_SPQ */ | 1451 | dma_addr_t spq_map; /* valid iff FUNC_FLG_SPQ */ |
@@ -1409,17 +1457,10 @@ struct bnx2x_func_init_params { | |||
1409 | }; | 1457 | }; |
1410 | 1458 | ||
1411 | #define for_each_eth_queue(bp, var) \ | 1459 | #define for_each_eth_queue(bp, var) \ |
1412 | for (var = 0; var < BNX2X_NUM_ETH_QUEUES(bp); var++) | 1460 | for (var = 0; var < BNX2X_NUM_ETH_QUEUES(bp); var++) |
1413 | 1461 | ||
1414 | #define for_each_nondefault_eth_queue(bp, var) \ | 1462 | #define for_each_nondefault_eth_queue(bp, var) \ |
1415 | for (var = 1; var < BNX2X_NUM_ETH_QUEUES(bp); var++) | 1463 | for (var = 1; var < BNX2X_NUM_ETH_QUEUES(bp); var++) |
1416 | |||
1417 | #define for_each_napi_queue(bp, var) \ | ||
1418 | for (var = 0; \ | ||
1419 | var < BNX2X_NUM_ETH_QUEUES(bp) + FCOE_CONTEXT_USE; var++) \ | ||
1420 | if (skip_queue(bp, var)) \ | ||
1421 | continue; \ | ||
1422 | else | ||
1423 | 1464 | ||
1424 | #define for_each_queue(bp, var) \ | 1465 | #define for_each_queue(bp, var) \ |
1425 | for (var = 0; var < BNX2X_NUM_QUEUES(bp); var++) \ | 1466 | for (var = 0; var < BNX2X_NUM_QUEUES(bp); var++) \ |
@@ -1457,11 +1498,66 @@ struct bnx2x_func_init_params { | |||
1457 | 1498 | ||
1458 | #define skip_queue(bp, idx) (NO_FCOE(bp) && IS_FCOE_IDX(idx)) | 1499 | #define skip_queue(bp, idx) (NO_FCOE(bp) && IS_FCOE_IDX(idx)) |
1459 | 1500 | ||
1460 | #define WAIT_RAMROD_POLL 0x01 | ||
1461 | #define WAIT_RAMROD_COMMON 0x02 | ||
1462 | 1501 | ||
1502 | |||
1503 | |||
1504 | /** | ||
1505 | * bnx2x_set_mac_one - configure a single MAC address | ||
1506 | * | ||
1507 | * @bp: driver handle | ||
1508 | * @mac: MAC to configure | ||
1509 | * @obj: MAC object handle | ||
1510 | * @set: if 'true' add a new MAC, otherwise - delete | ||
1511 | * @mac_type: the type of the MAC to configure (e.g. ETH, UC list) | ||
1512 | * @ramrod_flags: RAMROD_XXX flags (e.g. RAMROD_CONT, RAMROD_COMP_WAIT) | ||
1513 | * | ||
1514 | * Configures one MAC according to provided parameters or continues the | ||
1515 | * execution of previously scheduled commands if RAMROD_CONT is set in | ||
1516 | * ramrod_flags. | ||
1517 | * | ||
1518 | * Returns zero if operation has successfully completed, a positive value if the | ||
1519 | * operation has been successfully scheduled and a negative - if a requested | ||
1520 | * operations has failed. | ||
1521 | */ | ||
1522 | int bnx2x_set_mac_one(struct bnx2x *bp, u8 *mac, | ||
1523 | struct bnx2x_vlan_mac_obj *obj, bool set, | ||
1524 | int mac_type, unsigned long *ramrod_flags); | ||
1525 | /** | ||
1526 | * Deletes all MACs configured for the specific MAC object. | ||
1527 | * | ||
1528 | * @param bp Function driver instance | ||
1529 | * @param mac_obj MAC object to cleanup | ||
1530 | * | ||
1531 | * @return zero if all MACs were cleaned | ||
1532 | */ | ||
1533 | |||
1534 | /** | ||
1535 | * bnx2x_del_all_macs - delete all MACs configured for the specific MAC object | ||
1536 | * | ||
1537 | * @bp: driver handle | ||
1538 | * @mac_obj: MAC object handle | ||
1539 | * @mac_type: type of the MACs to clear (BNX2X_XXX_MAC) | ||
1540 | * @wait_for_comp: if 'true' block until completion | ||
1541 | * | ||
1542 | * Deletes all MACs of the specific type (e.g. ETH, UC list). | ||
1543 | * | ||
1544 | * Returns zero if operation has successfully completed, a positive value if the | ||
1545 | * operation has been successfully scheduled and a negative - if a requested | ||
1546 | * operations has failed. | ||
1547 | */ | ||
1548 | int bnx2x_del_all_macs(struct bnx2x *bp, | ||
1549 | struct bnx2x_vlan_mac_obj *mac_obj, | ||
1550 | int mac_type, bool wait_for_comp); | ||
1551 | |||
1552 | /* Init Function API */ | ||
1553 | void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p); | ||
1554 | int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port); | ||
1555 | int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port); | ||
1556 | int bnx2x_set_mult_gpio(struct bnx2x *bp, u8 pins, u32 mode); | ||
1557 | int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port); | ||
1463 | void bnx2x_read_mf_cfg(struct bnx2x *bp); | 1558 | void bnx2x_read_mf_cfg(struct bnx2x *bp); |
1464 | 1559 | ||
1560 | |||
1465 | /* dmae */ | 1561 | /* dmae */ |
1466 | void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32); | 1562 | void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32); |
1467 | void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr, | 1563 | void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr, |
@@ -1472,21 +1568,10 @@ u32 bnx2x_dmae_opcode_clr_src_reset(u32 opcode); | |||
1472 | u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type, | 1568 | u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type, |
1473 | bool with_comp, u8 comp_type); | 1569 | bool with_comp, u8 comp_type); |
1474 | 1570 | ||
1475 | int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port); | ||
1476 | int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port); | ||
1477 | int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port); | ||
1478 | u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param); | ||
1479 | 1571 | ||
1480 | void bnx2x_calc_fc_adv(struct bnx2x *bp); | 1572 | void bnx2x_calc_fc_adv(struct bnx2x *bp); |
1481 | int bnx2x_sp_post(struct bnx2x *bp, int command, int cid, | 1573 | int bnx2x_sp_post(struct bnx2x *bp, int command, int cid, |
1482 | u32 data_hi, u32 data_lo, int common); | 1574 | u32 data_hi, u32 data_lo, int cmd_type); |
1483 | |||
1484 | /* Clears multicast and unicast list configuration in the chip. */ | ||
1485 | void bnx2x_invalidate_uc_list(struct bnx2x *bp); | ||
1486 | |||
1487 | int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx, | ||
1488 | int *state_p, int flags); | ||
1489 | |||
1490 | void bnx2x_update_coalesce(struct bnx2x *bp); | 1575 | void bnx2x_update_coalesce(struct bnx2x *bp); |
1491 | int bnx2x_get_cur_phy_idx(struct bnx2x *bp); | 1576 | int bnx2x_get_cur_phy_idx(struct bnx2x *bp); |
1492 | 1577 | ||
@@ -1644,7 +1729,8 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms, | |||
1644 | 1729 | ||
1645 | /* must be used on a CID before placing it on a HW ring */ | 1730 | /* must be used on a CID before placing it on a HW ring */ |
1646 | #define HW_CID(bp, x) ((BP_PORT(bp) << 23) | \ | 1731 | #define HW_CID(bp, x) ((BP_PORT(bp) << 23) | \ |
1647 | (BP_E1HVN(bp) << 17) | (x)) | 1732 | (BP_E1HVN(bp) << BNX2X_SWCID_SHIFT) | \ |
1733 | (x)) | ||
1648 | 1734 | ||
1649 | #define SP_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct eth_spe)) | 1735 | #define SP_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct eth_spe)) |
1650 | #define MAX_SP_DESC_CNT (SP_DESC_CNT - 1) | 1736 | #define MAX_SP_DESC_CNT (SP_DESC_CNT - 1) |
@@ -1771,6 +1857,30 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms, | |||
1771 | TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE_SHIFT)) | 1857 | TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE_SHIFT)) |
1772 | #define MULTI_MASK 0x7f | 1858 | #define MULTI_MASK 0x7f |
1773 | 1859 | ||
1860 | |||
1861 | #define DEF_USB_FUNC_OFF offsetof(struct cstorm_def_status_block_u, func) | ||
1862 | #define DEF_CSB_FUNC_OFF offsetof(struct cstorm_def_status_block_c, func) | ||
1863 | #define DEF_XSB_FUNC_OFF offsetof(struct xstorm_def_status_block, func) | ||
1864 | #define DEF_TSB_FUNC_OFF offsetof(struct tstorm_def_status_block, func) | ||
1865 | |||
1866 | #define DEF_USB_IGU_INDEX_OFF \ | ||
1867 | offsetof(struct cstorm_def_status_block_u, igu_index) | ||
1868 | #define DEF_CSB_IGU_INDEX_OFF \ | ||
1869 | offsetof(struct cstorm_def_status_block_c, igu_index) | ||
1870 | #define DEF_XSB_IGU_INDEX_OFF \ | ||
1871 | offsetof(struct xstorm_def_status_block, igu_index) | ||
1872 | #define DEF_TSB_IGU_INDEX_OFF \ | ||
1873 | offsetof(struct tstorm_def_status_block, igu_index) | ||
1874 | |||
1875 | #define DEF_USB_SEGMENT_OFF \ | ||
1876 | offsetof(struct cstorm_def_status_block_u, segment) | ||
1877 | #define DEF_CSB_SEGMENT_OFF \ | ||
1878 | offsetof(struct cstorm_def_status_block_c, segment) | ||
1879 | #define DEF_XSB_SEGMENT_OFF \ | ||
1880 | offsetof(struct xstorm_def_status_block, segment) | ||
1881 | #define DEF_TSB_SEGMENT_OFF \ | ||
1882 | offsetof(struct tstorm_def_status_block, segment) | ||
1883 | |||
1774 | #define BNX2X_SP_DSB_INDEX \ | 1884 | #define BNX2X_SP_DSB_INDEX \ |
1775 | (&bp->def_status_blk->sp_sb.\ | 1885 | (&bp->def_status_blk->sp_sb.\ |
1776 | index_values[HC_SP_INDEX_ETH_DEF_CONS]) | 1886 | index_values[HC_SP_INDEX_ETH_DEF_CONS]) |
@@ -1782,7 +1892,7 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms, | |||
1782 | } while (0) | 1892 | } while (0) |
1783 | 1893 | ||
1784 | #define GET_FLAG(value, mask) \ | 1894 | #define GET_FLAG(value, mask) \ |
1785 | (((value) &= (mask)) >> (mask##_SHIFT)) | 1895 | (((value) & (mask)) >> (mask##_SHIFT)) |
1786 | 1896 | ||
1787 | #define GET_FIELD(value, fname) \ | 1897 | #define GET_FIELD(value, fname) \ |
1788 | (((value) & (fname##_MASK)) >> (fname##_SHIFT)) | 1898 | (((value) & (fname##_MASK)) >> (fname##_SHIFT)) |
@@ -1817,14 +1927,12 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms, | |||
1817 | #define HC_SEG_ACCESS_ATTN 4 | 1927 | #define HC_SEG_ACCESS_ATTN 4 |
1818 | #define HC_SEG_ACCESS_NORM 0 /*Driver decision 0-1*/ | 1928 | #define HC_SEG_ACCESS_NORM 0 /*Driver decision 0-1*/ |
1819 | 1929 | ||
1820 | #ifdef BNX2X_MAIN | 1930 | static const u32 dmae_reg_go_c[] = { |
1821 | #define BNX2X_EXTERN | 1931 | DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3, |
1822 | #else | 1932 | DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7, |
1823 | #define BNX2X_EXTERN extern | 1933 | DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11, |
1824 | #endif | 1934 | DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15 |
1825 | 1935 | }; | |
1826 | BNX2X_EXTERN int load_count[2][3]; /* per path: 0-common, 1-port0, 2-port1 */ | ||
1827 | |||
1828 | extern void bnx2x_set_ethtool_ops(struct net_device *netdev); | ||
1829 | 1936 | ||
1937 | void bnx2x_set_ethtool_ops(struct net_device *netdev); | ||
1830 | #endif /* bnx2x.h */ | 1938 | #endif /* bnx2x.h */ |