aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/bnx2x/bnx2x.h
diff options
context:
space:
mode:
authorDmitry Kravkov <dmitry@broadcom.com>2010-10-05 23:23:26 -0400
committerDavid S. Miller <davem@davemloft.net>2010-10-06 17:10:35 -0400
commit523224a3b3cd407ce4e6731a087194e13a90db18 (patch)
treebb0fda289682e4259c401b8a5763ba4cc4d41659 /drivers/net/bnx2x/bnx2x.h
parent0c5b77152e736d23a23eb2546eab323e27a37f52 (diff)
bnx2x, cnic, bnx2i: use new FW/HSI
This is the new FW HSI blob and the relevant definitions without logic changes. It also included code adaptation for new HSI. New features are not enabled. New FW/HSI includes: - Support for 57712 HW - Future support for VF (not used) - Improvements in FW interrupts scheme - FW FCoE hooks (stubs for future usage) Signed-off-by: Dmitry Kravkov <dmitry@broadcom.com> Signed-off-by: Michael Chan <mchan@broadcom.com> Signed-off-by: Eilon Greenstein <eilong@broadcom.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/bnx2x/bnx2x.h')
-rw-r--r--drivers/net/bnx2x/bnx2x.h532
1 files changed, 460 insertions, 72 deletions
diff --git a/drivers/net/bnx2x/bnx2x.h b/drivers/net/bnx2x/bnx2x.h
index 64329c5fbdea..8b053e0c00ab 100644
--- a/drivers/net/bnx2x/bnx2x.h
+++ b/drivers/net/bnx2x/bnx2x.h
@@ -33,13 +33,11 @@
33#define BNX2X_NEW_NAPI 33#define BNX2X_NEW_NAPI
34 34
35 35
36
37#if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE) 36#if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE)
38#define BCM_CNIC 1 37#define BCM_CNIC 1
39#include "../cnic_if.h" 38#include "../cnic_if.h"
40#endif 39#endif
41 40
42
43#ifdef BCM_CNIC 41#ifdef BCM_CNIC
44#define BNX2X_MIN_MSIX_VEC_CNT 3 42#define BNX2X_MIN_MSIX_VEC_CNT 3
45#define BNX2X_MSIX_VEC_FP_START 2 43#define BNX2X_MSIX_VEC_FP_START 2
@@ -129,16 +127,18 @@ void bnx2x_panic_dump(struct bnx2x *bp);
129 } while (0) 127 } while (0)
130#endif 128#endif
131 129
130#define bnx2x_mc_addr(ha) ((ha)->addr)
132 131
133#define U64_LO(x) (u32)(((u64)(x)) & 0xffffffff) 132#define U64_LO(x) (u32)(((u64)(x)) & 0xffffffff)
134#define U64_HI(x) (u32)(((u64)(x)) >> 32) 133#define U64_HI(x) (u32)(((u64)(x)) >> 32)
135#define HILO_U64(hi, lo) ((((u64)(hi)) << 32) + (lo)) 134#define HILO_U64(hi, lo) ((((u64)(hi)) << 32) + (lo))
136 135
137 136
138#define REG_ADDR(bp, offset) (bp->regview + offset) 137#define REG_ADDR(bp, offset) ((bp->regview) + (offset))
139 138
140#define REG_RD(bp, offset) readl(REG_ADDR(bp, offset)) 139#define REG_RD(bp, offset) readl(REG_ADDR(bp, offset))
141#define REG_RD8(bp, offset) readb(REG_ADDR(bp, offset)) 140#define REG_RD8(bp, offset) readb(REG_ADDR(bp, offset))
141#define REG_RD16(bp, offset) readw(REG_ADDR(bp, offset))
142 142
143#define REG_WR(bp, offset, val) writel((u32)val, REG_ADDR(bp, offset)) 143#define REG_WR(bp, offset, val) writel((u32)val, REG_ADDR(bp, offset))
144#define REG_WR8(bp, offset, val) writeb((u8)val, REG_ADDR(bp, offset)) 144#define REG_WR8(bp, offset, val) writeb((u8)val, REG_ADDR(bp, offset))
@@ -160,6 +160,9 @@ void bnx2x_panic_dump(struct bnx2x *bp);
160 offset, len32); \ 160 offset, len32); \
161 } while (0) 161 } while (0)
162 162
163#define REG_WR_DMAE_LEN(bp, offset, valp, len32) \
164 REG_WR_DMAE(bp, offset, valp, len32)
165
163#define VIRT_WR_DMAE_LEN(bp, data, addr, len32, le32_swap) \ 166#define VIRT_WR_DMAE_LEN(bp, data, addr, len32, le32_swap) \
164 do { \ 167 do { \
165 memcpy(GUNZIP_BUF(bp), data, (len32) * 4); \ 168 memcpy(GUNZIP_BUF(bp), data, (len32) * 4); \
@@ -175,16 +178,52 @@ void bnx2x_panic_dump(struct bnx2x *bp);
175 offsetof(struct shmem2_region, field)) 178 offsetof(struct shmem2_region, field))
176#define SHMEM2_RD(bp, field) REG_RD(bp, SHMEM2_ADDR(bp, field)) 179#define SHMEM2_RD(bp, field) REG_RD(bp, SHMEM2_ADDR(bp, field))
177#define SHMEM2_WR(bp, field, val) REG_WR(bp, SHMEM2_ADDR(bp, field), val) 180#define SHMEM2_WR(bp, field, val) REG_WR(bp, SHMEM2_ADDR(bp, field), val)
181#define MF_CFG_ADDR(bp, field) (bp->common.mf_cfg_base + \
182 offsetof(struct mf_cfg, field))
178 183
179#define MF_CFG_RD(bp, field) SHMEM_RD(bp, mf_cfg.field) 184#define MF_CFG_RD(bp, field) REG_RD(bp, MF_CFG_ADDR(bp, field))
180#define MF_CFG_WR(bp, field, val) SHMEM_WR(bp, mf_cfg.field, val) 185#define MF_CFG_WR(bp, field, val) REG_WR(bp,\
186 MF_CFG_ADDR(bp, field), (val))
181 187
182#define EMAC_RD(bp, reg) REG_RD(bp, emac_base + reg) 188#define EMAC_RD(bp, reg) REG_RD(bp, emac_base + reg)
183#define EMAC_WR(bp, reg, val) REG_WR(bp, emac_base + reg, val) 189#define EMAC_WR(bp, reg, val) REG_WR(bp, emac_base + reg, val)
184 190
191/* SP SB indices */
192
193/* General SP events - stats query, cfc delete, etc */
194#define HC_SP_INDEX_ETH_DEF_CONS 3
195
196/* EQ completions */
197#define HC_SP_INDEX_EQ_CONS 7
198
199/* iSCSI L2 */
200#define HC_SP_INDEX_ETH_ISCSI_CQ_CONS 5
201#define HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS 1
202
203/**
204 * CIDs and CLIDs:
205 * CLIDs below is a CLID for func 0, then the CLID for other
206 * functions will be calculated by the formula:
207 *
208 * FUNC_N_CLID_X = N * NUM_SPECIAL_CLIENTS + FUNC_0_CLID_X
209 *
210 */
211/* iSCSI L2 */
212#define BNX2X_ISCSI_ETH_CL_ID 17
213#define BNX2X_ISCSI_ETH_CID 17
214
215/** Additional rings budgeting */
216#ifdef BCM_CNIC
217#define CNIC_CONTEXT_USE 1
218#else
219#define CNIC_CONTEXT_USE 0
220#endif /* BCM_CNIC */
221
185#define AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR \ 222#define AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR \
186 AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR 223 AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR
187 224
225#define SM_RX_ID 0
226#define SM_TX_ID 1
188 227
189/* fast path */ 228/* fast path */
190 229
@@ -254,11 +293,21 @@ union db_prod {
254#define RX_SGE_MASK_LEN_MASK (RX_SGE_MASK_LEN - 1) 293#define RX_SGE_MASK_LEN_MASK (RX_SGE_MASK_LEN - 1)
255#define NEXT_SGE_MASK_ELEM(el) (((el) + 1) & RX_SGE_MASK_LEN_MASK) 294#define NEXT_SGE_MASK_ELEM(el) (((el) + 1) & RX_SGE_MASK_LEN_MASK)
256 295
296union host_hc_status_block {
297 /* pointer to fp status block e1x */
298 struct host_hc_status_block_e1x *e1x_sb;
299};
257 300
258struct bnx2x_fastpath { 301struct bnx2x_fastpath {
259 302
260 struct napi_struct napi; 303 struct napi_struct napi;
261 struct host_status_block *status_blk; 304 union host_hc_status_block status_blk;
305 /* chip independed shortcuts into sb structure */
306 __le16 *sb_index_values;
307 __le16 *sb_running_index;
308 /* chip independed shortcut into rx_prods_offset memory */
309 u32 ustorm_rx_prods_offset;
310
262 dma_addr_t status_blk_mapping; 311 dma_addr_t status_blk_mapping;
263 312
264 struct sw_tx_bd *tx_buf_ring; 313 struct sw_tx_bd *tx_buf_ring;
@@ -288,10 +337,15 @@ struct bnx2x_fastpath {
288#define BNX2X_FP_STATE_OPEN 0xa0000 337#define BNX2X_FP_STATE_OPEN 0xa0000
289#define BNX2X_FP_STATE_HALTING 0xb0000 338#define BNX2X_FP_STATE_HALTING 0xb0000
290#define BNX2X_FP_STATE_HALTED 0xc0000 339#define BNX2X_FP_STATE_HALTED 0xc0000
340#define BNX2X_FP_STATE_TERMINATING 0xd0000
341#define BNX2X_FP_STATE_TERMINATED 0xe0000
291 342
292 u8 index; /* number in fp array */ 343 u8 index; /* number in fp array */
293 u8 cl_id; /* eth client id */ 344 u8 cl_id; /* eth client id */
294 u8 sb_id; /* status block number in HW */ 345 u8 cl_qzone_id;
346 u8 fw_sb_id; /* status block number in FW */
347 u8 igu_sb_id; /* status block number in HW */
348 u32 cid;
295 349
296 union db_prod tx_db; 350 union db_prod tx_db;
297 351
@@ -301,8 +355,7 @@ struct bnx2x_fastpath {
301 u16 tx_bd_cons; 355 u16 tx_bd_cons;
302 __le16 *tx_cons_sb; 356 __le16 *tx_cons_sb;
303 357
304 __le16 fp_c_idx; 358 __le16 fp_hc_idx;
305 __le16 fp_u_idx;
306 359
307 u16 rx_bd_prod; 360 u16 rx_bd_prod;
308 u16 rx_bd_cons; 361 u16 rx_bd_cons;
@@ -312,7 +365,7 @@ struct bnx2x_fastpath {
312 /* The last maximal completed SGE */ 365 /* The last maximal completed SGE */
313 u16 last_max_sge; 366 u16 last_max_sge;
314 __le16 *rx_cons_sb; 367 __le16 *rx_cons_sb;
315 __le16 *rx_bd_cons_sb; 368
316 369
317 370
318 unsigned long tx_pkt, 371 unsigned long tx_pkt,
@@ -356,6 +409,8 @@ struct bnx2x_fastpath {
356#define NUM_TX_BD (TX_DESC_CNT * NUM_TX_RINGS) 409#define NUM_TX_BD (TX_DESC_CNT * NUM_TX_RINGS)
357#define MAX_TX_BD (NUM_TX_BD - 1) 410#define MAX_TX_BD (NUM_TX_BD - 1)
358#define MAX_TX_AVAIL (MAX_TX_DESC_CNT * NUM_TX_RINGS - 2) 411#define MAX_TX_AVAIL (MAX_TX_DESC_CNT * NUM_TX_RINGS - 2)
412#define INIT_JUMBO_TX_RING_SIZE MAX_TX_AVAIL
413#define INIT_TX_RING_SIZE MAX_TX_AVAIL
359#define NEXT_TX_IDX(x) ((((x) & MAX_TX_DESC_CNT) == \ 414#define NEXT_TX_IDX(x) ((((x) & MAX_TX_DESC_CNT) == \
360 (MAX_TX_DESC_CNT - 1)) ? (x) + 2 : (x) + 1) 415 (MAX_TX_DESC_CNT - 1)) ? (x) + 2 : (x) + 1)
361#define TX_BD(x) ((x) & MAX_TX_BD) 416#define TX_BD(x) ((x) & MAX_TX_BD)
@@ -370,6 +425,8 @@ struct bnx2x_fastpath {
370#define MAX_RX_BD (NUM_RX_BD - 1) 425#define MAX_RX_BD (NUM_RX_BD - 1)
371#define MAX_RX_AVAIL (MAX_RX_DESC_CNT * NUM_RX_RINGS - 2) 426#define MAX_RX_AVAIL (MAX_RX_DESC_CNT * NUM_RX_RINGS - 2)
372#define MIN_RX_AVAIL 128 427#define MIN_RX_AVAIL 128
428#define INIT_JUMBO_RX_RING_SIZE MAX_RX_AVAIL
429#define INIT_RX_RING_SIZE MAX_RX_AVAIL
373#define NEXT_RX_IDX(x) ((((x) & RX_DESC_MASK) == \ 430#define NEXT_RX_IDX(x) ((((x) & RX_DESC_MASK) == \
374 (MAX_RX_DESC_CNT - 1)) ? (x) + 3 : (x) + 1) 431 (MAX_RX_DESC_CNT - 1)) ? (x) + 3 : (x) + 1)
375#define RX_BD(x) ((x) & MAX_RX_BD) 432#define RX_BD(x) ((x) & MAX_RX_BD)
@@ -420,11 +477,12 @@ struct bnx2x_fastpath {
420 le32_to_cpu((bd)->addr_lo)) 477 le32_to_cpu((bd)->addr_lo))
421#define BD_UNMAP_LEN(bd) (le16_to_cpu((bd)->nbytes)) 478#define BD_UNMAP_LEN(bd) (le16_to_cpu((bd)->nbytes))
422 479
423 480#define BNX2X_DB_MIN_SHIFT 3 /* 8 bytes */
481#define BNX2X_DB_SHIFT 7 /* 128 bytes*/
424#define DPM_TRIGER_TYPE 0x40 482#define DPM_TRIGER_TYPE 0x40
425#define DOORBELL(bp, cid, val) \ 483#define DOORBELL(bp, cid, val) \
426 do { \ 484 do { \
427 writel((u32)(val), bp->doorbells + (BCM_PAGE_SIZE * (cid)) + \ 485 writel((u32)(val), bp->doorbells + (bp->db_size * (cid)) + \
428 DPM_TRIGER_TYPE); \ 486 DPM_TRIGER_TYPE); \
429 } while (0) 487 } while (0)
430 488
@@ -482,31 +540,15 @@ struct bnx2x_fastpath {
482#define BNX2X_RX_SUM_FIX(cqe) \ 540#define BNX2X_RX_SUM_FIX(cqe) \
483 BNX2X_PRS_FLAG_OVERETH_IPV4(cqe->fast_path_cqe.pars_flags.flags) 541 BNX2X_PRS_FLAG_OVERETH_IPV4(cqe->fast_path_cqe.pars_flags.flags)
484 542
485 543#define U_SB_ETH_RX_CQ_INDEX 1
486#define FP_USB_FUNC_OFF (2 + 2*HC_USTORM_SB_NUM_INDICES) 544#define U_SB_ETH_RX_BD_INDEX 2
487#define FP_CSB_FUNC_OFF (2 + 2*HC_CSTORM_SB_NUM_INDICES) 545#define C_SB_ETH_TX_CQ_INDEX 5
488
489#define U_SB_ETH_RX_CQ_INDEX HC_INDEX_U_ETH_RX_CQ_CONS
490#define U_SB_ETH_RX_BD_INDEX HC_INDEX_U_ETH_RX_BD_CONS
491#define C_SB_ETH_TX_CQ_INDEX HC_INDEX_C_ETH_TX_CQ_CONS
492 546
493#define BNX2X_RX_SB_INDEX \ 547#define BNX2X_RX_SB_INDEX \
494 (&fp->status_blk->u_status_block.index_values[U_SB_ETH_RX_CQ_INDEX]) 548 (&fp->sb_index_values[U_SB_ETH_RX_CQ_INDEX])
495
496#define BNX2X_RX_SB_BD_INDEX \
497 (&fp->status_blk->u_status_block.index_values[U_SB_ETH_RX_BD_INDEX])
498
499#define BNX2X_RX_SB_INDEX_NUM \
500 (((U_SB_ETH_RX_CQ_INDEX << \
501 USTORM_ETH_ST_CONTEXT_CONFIG_CQE_SB_INDEX_NUMBER_SHIFT) & \
502 USTORM_ETH_ST_CONTEXT_CONFIG_CQE_SB_INDEX_NUMBER) | \
503 ((U_SB_ETH_RX_BD_INDEX << \
504 USTORM_ETH_ST_CONTEXT_CONFIG_BD_SB_INDEX_NUMBER_SHIFT) & \
505 USTORM_ETH_ST_CONTEXT_CONFIG_BD_SB_INDEX_NUMBER))
506 549
507#define BNX2X_TX_SB_INDEX \ 550#define BNX2X_TX_SB_INDEX \
508 (&fp->status_blk->c_status_block.index_values[C_SB_ETH_TX_CQ_INDEX]) 551 (&fp->sb_index_values[C_SB_ETH_TX_CQ_INDEX])
509
510 552
511/* end of fast path */ 553/* end of fast path */
512 554
@@ -553,10 +595,16 @@ struct bnx2x_common {
553 595
554 u32 shmem_base; 596 u32 shmem_base;
555 u32 shmem2_base; 597 u32 shmem2_base;
598 u32 mf_cfg_base;
556 599
557 u32 hw_config; 600 u32 hw_config;
558 601
559 u32 bc_ver; 602 u32 bc_ver;
603
604 u8 int_block;
605#define INT_BLOCK_HC 0
606 u8 chip_port_mode;
607#define CHIP_PORT_MODE_NONE 0x2
560}; 608};
561 609
562 610
@@ -590,27 +638,98 @@ struct bnx2x_port {
590 638
591/* end of port */ 639/* end of port */
592 640
641/* e1h Classification CAM line allocations */
642enum {
643 CAM_ETH_LINE = 0,
644 CAM_ISCSI_ETH_LINE,
645 CAM_MAX_PF_LINE = CAM_ISCSI_ETH_LINE
646};
593 647
648#define BNX2X_VF_ID_INVALID 0xFF
594 649
595#ifdef BCM_CNIC 650/*
596#define MAX_CONTEXT 15 651 * The total number of L2 queues, MSIX vectors and HW contexts (CIDs) is
597#else 652 * control by the number of fast-path status blocks supported by the
598#define MAX_CONTEXT 16 653 * device (HW/FW). Each fast-path status block (FP-SB) aka non-default
599#endif 654 * status block represents an independent interrupts context that can
655 * serve a regular L2 networking queue. However special L2 queues such
656 * as the FCoE queue do not require a FP-SB and other components like
657 * the CNIC may consume FP-SB reducing the number of possible L2 queues
658 *
659 * If the maximum number of FP-SB available is X then:
660 * a. If CNIC is supported it consumes 1 FP-SB thus the max number of
661 * regular L2 queues is Y=X-1
662 * b. in MF mode the actual number of L2 queues is Y= (X-1/MF_factor)
663 * c. If the FCoE L2 queue is supported the actual number of L2 queues
664 * is Y+1
665 * d. The number of irqs (MSIX vectors) is either Y+1 (one extra for
666 * slow-path interrupts) or Y+2 if CNIC is supported (one additional
667 * FP interrupt context for the CNIC).
668 * e. The number of HW context (CID count) is always X or X+1 if FCoE
669 * L2 queue is supported. the cid for the FCoE L2 queue is always X.
670 */
671
672#define FP_SB_MAX_E1x 16 /* fast-path interrupt contexts E1x */
673#define MAX_CONTEXT FP_SB_MAX_E1x
674
675/*
676 * cid_cnt paramter below refers to the value returned by
677 * 'bnx2x_get_l2_cid_count()' routine
678 */
679
680/*
681 * The number of FP context allocated by the driver == max number of regular
682 * L2 queues + 1 for the FCoE L2 queue
683 */
684#define L2_FP_COUNT(cid_cnt) ((cid_cnt) - CNIC_CONTEXT_USE)
600 685
601union cdu_context { 686union cdu_context {
602 struct eth_context eth; 687 struct eth_context eth;
603 char pad[1024]; 688 char pad[1024];
604}; 689};
605 690
691/* CDU host DB constants */
692#define CDU_ILT_PAGE_SZ_HW 3
693#define CDU_ILT_PAGE_SZ (4096 << CDU_ILT_PAGE_SZ_HW) /* 32K */
694#define ILT_PAGE_CIDS (CDU_ILT_PAGE_SZ / sizeof(union cdu_context))
695
696#ifdef BCM_CNIC
697#define CNIC_ISCSI_CID_MAX 256
698#define CNIC_CID_MAX (CNIC_ISCSI_CID_MAX)
699#define CNIC_ILT_LINES DIV_ROUND_UP(CNIC_CID_MAX, ILT_PAGE_CIDS)
700#endif
701
702#define QM_ILT_PAGE_SZ_HW 3
703#define QM_ILT_PAGE_SZ (4096 << QM_ILT_PAGE_SZ_HW) /* 32K */
704#define QM_CID_ROUND 1024
705
706#ifdef BCM_CNIC
707/* TM (timers) host DB constants */
708#define TM_ILT_PAGE_SZ_HW 2
709#define TM_ILT_PAGE_SZ (4096 << TM_ILT_PAGE_SZ_HW) /* 16K */
710/* #define TM_CONN_NUM (CNIC_STARTING_CID+CNIC_ISCSI_CXT_MAX) */
711#define TM_CONN_NUM 1024
712#define TM_ILT_SZ (8 * TM_CONN_NUM)
713#define TM_ILT_LINES DIV_ROUND_UP(TM_ILT_SZ, TM_ILT_PAGE_SZ)
714
715/* SRC (Searcher) host DB constants */
716#define SRC_ILT_PAGE_SZ_HW 3
717#define SRC_ILT_PAGE_SZ (4096 << SRC_ILT_PAGE_SZ_HW) /* 32K */
718#define SRC_HASH_BITS 10
719#define SRC_CONN_NUM (1 << SRC_HASH_BITS) /* 1024 */
720#define SRC_ILT_SZ (sizeof(struct src_ent) * SRC_CONN_NUM)
721#define SRC_T2_SZ SRC_ILT_SZ
722#define SRC_ILT_LINES DIV_ROUND_UP(SRC_ILT_SZ, SRC_ILT_PAGE_SZ)
723#endif
724
606#define MAX_DMAE_C 8 725#define MAX_DMAE_C 8
607 726
608/* DMA memory not used in fastpath */ 727/* DMA memory not used in fastpath */
609struct bnx2x_slowpath { 728struct bnx2x_slowpath {
610 union cdu_context context[MAX_CONTEXT];
611 struct eth_stats_query fw_stats; 729 struct eth_stats_query fw_stats;
612 struct mac_configuration_cmd mac_config; 730 struct mac_configuration_cmd mac_config;
613 struct mac_configuration_cmd mcast_config; 731 struct mac_configuration_cmd mcast_config;
732 struct client_init_ramrod_data client_init_data;
614 733
615 /* used by dmae command executer */ 734 /* used by dmae command executer */
616 struct dmae_command dmae[MAX_DMAE_C]; 735 struct dmae_command dmae[MAX_DMAE_C];
@@ -638,37 +757,71 @@ struct attn_route {
638 u32 sig[4]; 757 u32 sig[4];
639}; 758};
640 759
760struct iro {
761 u32 base;
762 u16 m1;
763 u16 m2;
764 u16 m3;
765 u16 size;
766};
767
768struct hw_context {
769 union cdu_context *vcxt;
770 dma_addr_t cxt_mapping;
771 size_t size;
772};
773
774/* forward */
775struct bnx2x_ilt;
776
641typedef enum { 777typedef enum {
642 BNX2X_RECOVERY_DONE, 778 BNX2X_RECOVERY_DONE,
643 BNX2X_RECOVERY_INIT, 779 BNX2X_RECOVERY_INIT,
644 BNX2X_RECOVERY_WAIT, 780 BNX2X_RECOVERY_WAIT,
645} bnx2x_recovery_state_t; 781} bnx2x_recovery_state_t;
646 782
783/**
784 * Event queue (EQ or event ring) MC hsi
785 * NUM_EQ_PAGES and EQ_DESC_CNT_PAGE must be power of 2
786 */
787#define NUM_EQ_PAGES 1
788#define EQ_DESC_CNT_PAGE (BCM_PAGE_SIZE / sizeof(union event_ring_elem))
789#define EQ_DESC_MAX_PAGE (EQ_DESC_CNT_PAGE - 1)
790#define NUM_EQ_DESC (EQ_DESC_CNT_PAGE * NUM_EQ_PAGES)
791#define EQ_DESC_MASK (NUM_EQ_DESC - 1)
792#define MAX_EQ_AVAIL (EQ_DESC_MAX_PAGE * NUM_EQ_PAGES - 2)
793
794/* depends on EQ_DESC_CNT_PAGE being a power of 2 */
795#define NEXT_EQ_IDX(x) ((((x) & EQ_DESC_MAX_PAGE) == \
796 (EQ_DESC_MAX_PAGE - 1)) ? (x) + 2 : (x) + 1)
797
798/* depends on the above and on NUM_EQ_PAGES being a power of 2 */
799#define EQ_DESC(x) ((x) & EQ_DESC_MASK)
800
801#define BNX2X_EQ_INDEX \
802 (&bp->def_status_blk->sp_sb.\
803 index_values[HC_SP_INDEX_EQ_CONS])
804
647struct bnx2x { 805struct bnx2x {
648 /* Fields used in the tx and intr/napi performance paths 806 /* Fields used in the tx and intr/napi performance paths
649 * are grouped together in the beginning of the structure 807 * are grouped together in the beginning of the structure
650 */ 808 */
651 struct bnx2x_fastpath fp[MAX_CONTEXT]; 809 struct bnx2x_fastpath *fp;
652 void __iomem *regview; 810 void __iomem *regview;
653 void __iomem *doorbells; 811 void __iomem *doorbells;
654#ifdef BCM_CNIC 812 u16 db_size;
655#define BNX2X_DB_SIZE (18*BCM_PAGE_SIZE)
656#else
657#define BNX2X_DB_SIZE (16*BCM_PAGE_SIZE)
658#endif
659 813
660 struct net_device *dev; 814 struct net_device *dev;
661 struct pci_dev *pdev; 815 struct pci_dev *pdev;
662 816
817 struct iro *iro_arr;
818#define IRO (bp->iro_arr)
819
663 atomic_t intr_sem; 820 atomic_t intr_sem;
664 821
665 bnx2x_recovery_state_t recovery_state; 822 bnx2x_recovery_state_t recovery_state;
666 int is_leader; 823 int is_leader;
667#ifdef BCM_CNIC 824 struct msix_entry *msix_table;
668 struct msix_entry msix_table[MAX_CONTEXT+2];
669#else
670 struct msix_entry msix_table[MAX_CONTEXT+1];
671#endif
672#define INT_MODE_INTx 1 825#define INT_MODE_INTx 1
673#define INT_MODE_MSI 2 826#define INT_MODE_MSI 2
674 827
@@ -680,7 +833,8 @@ struct bnx2x {
680 833
681 u32 rx_csum; 834 u32 rx_csum;
682 u32 rx_buf_size; 835 u32 rx_buf_size;
683#define ETH_OVREHEAD (ETH_HLEN + 8) /* 8 for CRC + VLAN */ 836/* L2 header size + 2*VLANs (8 bytes) + LLC SNAP (8 bytes) */
837#define ETH_OVREHEAD (ETH_HLEN + 8 + 8)
684#define ETH_MIN_PACKET_SIZE 60 838#define ETH_MIN_PACKET_SIZE 60
685#define ETH_MAX_PACKET_SIZE 1500 839#define ETH_MAX_PACKET_SIZE 1500
686#define ETH_MAX_JUMBO_PACKET_SIZE 9600 840#define ETH_MAX_JUMBO_PACKET_SIZE 9600
@@ -689,13 +843,12 @@ struct bnx2x {
689#define BNX2X_RX_ALIGN_SHIFT ((L1_CACHE_SHIFT < 8) ? \ 843#define BNX2X_RX_ALIGN_SHIFT ((L1_CACHE_SHIFT < 8) ? \
690 L1_CACHE_SHIFT : 8) 844 L1_CACHE_SHIFT : 8)
691#define BNX2X_RX_ALIGN (1 << BNX2X_RX_ALIGN_SHIFT) 845#define BNX2X_RX_ALIGN (1 << BNX2X_RX_ALIGN_SHIFT)
846#define BNX2X_PXP_DRAM_ALIGN (BNX2X_RX_ALIGN_SHIFT - 5)
692 847
693 struct host_def_status_block *def_status_blk; 848 struct host_sp_status_block *def_status_blk;
694#define DEF_SB_ID 16 849#define DEF_SB_IGU_ID 16
695 __le16 def_c_idx; 850#define DEF_SB_ID HC_SP_SB_ID
696 __le16 def_u_idx; 851 __le16 def_idx;
697 __le16 def_x_idx;
698 __le16 def_t_idx;
699 __le16 def_att_idx; 852 __le16 def_att_idx;
700 u32 attn_state; 853 u32 attn_state;
701 struct attn_route attn_group[MAX_DYNAMIC_ATTN_GRPS]; 854 struct attn_route attn_group[MAX_DYNAMIC_ATTN_GRPS];
@@ -711,6 +864,13 @@ struct bnx2x {
711 /* used to synchronize spq accesses */ 864 /* used to synchronize spq accesses */
712 spinlock_t spq_lock; 865 spinlock_t spq_lock;
713 866
867 /* event queue */
868 union event_ring_elem *eq_ring;
869 dma_addr_t eq_mapping;
870 u16 eq_prod;
871 u16 eq_cons;
872 __le16 *eq_cons_sb;
873
714 /* Flags for marking that there is a STAT_QUERY or 874 /* Flags for marking that there is a STAT_QUERY or
715 SET_MAC ramrod pending */ 875 SET_MAC ramrod pending */
716 int stats_pending; 876 int stats_pending;
@@ -737,6 +897,8 @@ struct bnx2x {
737#define MF_FUNC_DIS 0x1000 897#define MF_FUNC_DIS 0x1000
738 898
739 int func; 899 int func;
900 int base_fw_ndsb;
901
740#define BP_PORT(bp) (bp->func % PORT_MAX) 902#define BP_PORT(bp) (bp->func % PORT_MAX)
741#define BP_FUNC(bp) (bp->func) 903#define BP_FUNC(bp) (bp->func)
742#define BP_E1HVN(bp) (bp->func >> 1) 904#define BP_E1HVN(bp) (bp->func >> 1)
@@ -801,6 +963,7 @@ struct bnx2x {
801#define BNX2X_STATE_CLOSING_WAIT4_HALT 0x4000 963#define BNX2X_STATE_CLOSING_WAIT4_HALT 0x4000
802#define BNX2X_STATE_CLOSING_WAIT4_DELETE 0x5000 964#define BNX2X_STATE_CLOSING_WAIT4_DELETE 0x5000
803#define BNX2X_STATE_CLOSING_WAIT4_UNLOAD 0x6000 965#define BNX2X_STATE_CLOSING_WAIT4_UNLOAD 0x6000
966#define BNX2X_STATE_FUNC_STARTED 0x7000
804#define BNX2X_STATE_DIAG 0xe000 967#define BNX2X_STATE_DIAG 0xe000
805#define BNX2X_STATE_ERROR 0xf000 968#define BNX2X_STATE_ERROR 0xf000
806 969
@@ -809,6 +972,15 @@ struct bnx2x {
809 int disable_tpa; 972 int disable_tpa;
810 int int_mode; 973 int int_mode;
811 974
975 struct tstorm_eth_mac_filter_config mac_filters;
976#define BNX2X_ACCEPT_NONE 0x0000
977#define BNX2X_ACCEPT_UNICAST 0x0001
978#define BNX2X_ACCEPT_MULTICAST 0x0002
979#define BNX2X_ACCEPT_ALL_UNICAST 0x0004
980#define BNX2X_ACCEPT_ALL_MULTICAST 0x0008
981#define BNX2X_ACCEPT_BROADCAST 0x0010
982#define BNX2X_PROMISCUOUS_MODE 0x10000
983
812 u32 rx_mode; 984 u32 rx_mode;
813#define BNX2X_RX_MODE_NONE 0 985#define BNX2X_RX_MODE_NONE 0
814#define BNX2X_RX_MODE_NORMAL 1 986#define BNX2X_RX_MODE_NORMAL 1
@@ -817,12 +989,25 @@ struct bnx2x {
817#define BNX2X_MAX_MULTICAST 64 989#define BNX2X_MAX_MULTICAST 64
818#define BNX2X_MAX_EMUL_MULTI 16 990#define BNX2X_MAX_EMUL_MULTI 16
819 991
820 u32 rx_mode_cl_mask; 992 u8 igu_dsb_id;
821 993 u8 igu_base_sb;
994 u8 igu_sb_cnt;
822 dma_addr_t def_status_blk_mapping; 995 dma_addr_t def_status_blk_mapping;
823 996
824 struct bnx2x_slowpath *slowpath; 997 struct bnx2x_slowpath *slowpath;
825 dma_addr_t slowpath_mapping; 998 dma_addr_t slowpath_mapping;
999 struct hw_context context;
1000
1001 struct bnx2x_ilt *ilt;
1002#define BP_ILT(bp) ((bp)->ilt)
1003#define ILT_MAX_LINES 128
1004
1005 int l2_cid_count;
1006#define L2_ILT_LINES(bp) (DIV_ROUND_UP((bp)->l2_cid_count, \
1007 ILT_PAGE_CIDS))
1008#define BNX2X_DB_SIZE(bp) ((bp)->l2_cid_count * (1 << BNX2X_DB_SHIFT))
1009
1010 int qm_cid_count;
826 1011
827 int dropless_fc; 1012 int dropless_fc;
828 1013
@@ -842,9 +1027,10 @@ struct bnx2x {
842 void *cnic_data; 1027 void *cnic_data;
843 u32 cnic_tag; 1028 u32 cnic_tag;
844 struct cnic_eth_dev cnic_eth_dev; 1029 struct cnic_eth_dev cnic_eth_dev;
845 struct host_status_block *cnic_sb; 1030 union host_hc_status_block cnic_sb;
846 dma_addr_t cnic_sb_mapping; 1031 dma_addr_t cnic_sb_mapping;
847#define CNIC_SB_ID(bp) BP_L_ID(bp) 1032#define CNIC_SB_ID(bp) ((bp)->base_fw_ndsb + BP_L_ID(bp))
1033#define CNIC_IGU_SB_ID(bp) ((bp)->igu_base_sb)
848 struct eth_spe *cnic_kwq; 1034 struct eth_spe *cnic_kwq;
849 struct eth_spe *cnic_kwq_prod; 1035 struct eth_spe *cnic_kwq_prod;
850 struct eth_spe *cnic_kwq_cons; 1036 struct eth_spe *cnic_kwq_cons;
@@ -914,12 +1100,167 @@ struct bnx2x {
914 const struct firmware *firmware; 1100 const struct firmware *firmware;
915}; 1101};
916 1102
1103/**
1104 * Init queue/func interface
1105 */
1106/* queue init flags */
1107#define QUEUE_FLG_TPA 0x0001
1108#define QUEUE_FLG_CACHE_ALIGN 0x0002
1109#define QUEUE_FLG_STATS 0x0004
1110#define QUEUE_FLG_OV 0x0008
1111#define QUEUE_FLG_VLAN 0x0010
1112#define QUEUE_FLG_COS 0x0020
1113#define QUEUE_FLG_HC 0x0040
1114#define QUEUE_FLG_DHC 0x0080
1115#define QUEUE_FLG_OOO 0x0100
1116
1117#define QUEUE_DROP_IP_CS_ERR TSTORM_ETH_CLIENT_CONFIG_DROP_IP_CS_ERR
1118#define QUEUE_DROP_TCP_CS_ERR TSTORM_ETH_CLIENT_CONFIG_DROP_TCP_CS_ERR
1119#define QUEUE_DROP_TTL0 TSTORM_ETH_CLIENT_CONFIG_DROP_TTL0
1120#define QUEUE_DROP_UDP_CS_ERR TSTORM_ETH_CLIENT_CONFIG_DROP_UDP_CS_ERR
1121
1122
1123
1124/* rss capabilities */
1125#define RSS_IPV4_CAP 0x0001
1126#define RSS_IPV4_TCP_CAP 0x0002
1127#define RSS_IPV6_CAP 0x0004
1128#define RSS_IPV6_TCP_CAP 0x0008
917 1129
918#define BNX2X_MAX_QUEUES(bp) (IS_E1HMF(bp) ? (MAX_CONTEXT/E1HVN_MAX) \ 1130#define BNX2X_MAX_QUEUES(bp) (IS_E1HMF(bp) ? (MAX_CONTEXT/E1HVN_MAX) \
919 : MAX_CONTEXT) 1131 : MAX_CONTEXT)
920#define BNX2X_NUM_QUEUES(bp) (bp->num_queues) 1132#define BNX2X_NUM_QUEUES(bp) (bp->num_queues)
921#define is_multi(bp) (BNX2X_NUM_QUEUES(bp) > 1) 1133#define is_multi(bp) (BNX2X_NUM_QUEUES(bp) > 1)
922 1134
1135
1136#define RSS_IPV4_CAP_MASK \
1137 TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY
1138
1139#define RSS_IPV4_TCP_CAP_MASK \
1140 TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_TCP_CAPABILITY
1141
1142#define RSS_IPV6_CAP_MASK \
1143 TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_CAPABILITY
1144
1145#define RSS_IPV6_TCP_CAP_MASK \
1146 TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_TCP_CAPABILITY
1147
1148/* func init flags */
1149#define FUNC_FLG_RSS 0x0001
1150#define FUNC_FLG_STATS 0x0002
1151/* removed FUNC_FLG_UNMATCHED 0x0004 */
1152#define FUNC_FLG_TPA 0x0008
1153#define FUNC_FLG_SPQ 0x0010
1154#define FUNC_FLG_LEADING 0x0020 /* PF only */
1155
1156#define FUNC_CONFIG(flgs) ((flgs) & (FUNC_FLG_RSS | FUNC_FLG_TPA | \
1157 FUNC_FLG_LEADING))
1158
1159struct rxq_pause_params {
1160 u16 bd_th_lo;
1161 u16 bd_th_hi;
1162 u16 rcq_th_lo;
1163 u16 rcq_th_hi;
1164 u16 sge_th_lo; /* valid iff QUEUE_FLG_TPA */
1165 u16 sge_th_hi; /* valid iff QUEUE_FLG_TPA */
1166 u16 pri_map;
1167};
1168
1169struct bnx2x_rxq_init_params {
1170 /* cxt*/
1171 struct eth_context *cxt;
1172
1173 /* dma */
1174 dma_addr_t dscr_map;
1175 dma_addr_t sge_map;
1176 dma_addr_t rcq_map;
1177 dma_addr_t rcq_np_map;
1178
1179 u16 flags;
1180 u16 drop_flags;
1181 u16 mtu;
1182 u16 buf_sz;
1183 u16 fw_sb_id;
1184 u16 cl_id;
1185 u16 spcl_id;
1186 u16 cl_qzone_id;
1187
1188 /* valid iff QUEUE_FLG_STATS */
1189 u16 stat_id;
1190
1191 /* valid iff QUEUE_FLG_TPA */
1192 u16 tpa_agg_sz;
1193 u16 sge_buf_sz;
1194 u16 max_sges_pkt;
1195
1196 /* valid iff QUEUE_FLG_CACHE_ALIGN */
1197 u8 cache_line_log;
1198
1199 u8 sb_cq_index;
1200 u32 cid;
1201
1202 /* desired interrupts per sec. valid iff QUEUE_FLG_HC */
1203 u32 hc_rate;
1204};
1205
1206struct bnx2x_txq_init_params {
1207 /* cxt*/
1208 struct eth_context *cxt;
1209
1210 /* dma */
1211 dma_addr_t dscr_map;
1212
1213 u16 flags;
1214 u16 fw_sb_id;
1215 u8 sb_cq_index;
1216 u8 cos; /* valid iff QUEUE_FLG_COS */
1217 u16 stat_id; /* valid iff QUEUE_FLG_STATS */
1218 u16 traffic_type;
1219 u32 cid;
1220 u16 hc_rate; /* desired interrupts per sec.*/
1221 /* valid iff QUEUE_FLG_HC */
1222
1223};
1224
1225struct bnx2x_client_ramrod_params {
1226 int *pstate;
1227 int state;
1228 u16 index;
1229 u16 cl_id;
1230 u32 cid;
1231 u8 poll;
1232#define CLIENT_IS_LEADING_RSS 0x02
1233 u8 flags;
1234};
1235
1236struct bnx2x_client_init_params {
1237 struct rxq_pause_params pause;
1238 struct bnx2x_rxq_init_params rxq_params;
1239 struct bnx2x_txq_init_params txq_params;
1240 struct bnx2x_client_ramrod_params ramrod_params;
1241};
1242
1243struct bnx2x_rss_params {
1244 int mode;
1245 u16 cap;
1246 u16 result_mask;
1247};
1248
1249struct bnx2x_func_init_params {
1250
1251 /* rss */
1252 struct bnx2x_rss_params *rss; /* valid iff FUNC_FLG_RSS */
1253
1254 /* dma */
1255 dma_addr_t fw_stat_map; /* valid iff FUNC_FLG_STATS */
1256 dma_addr_t spq_map; /* valid iff FUNC_FLG_SPQ */
1257
1258 u16 func_flgs;
1259 u16 func_id; /* abs fid */
1260 u16 pf_id;
1261 u16 spq_prod; /* valid iff FUNC_FLG_SPQ */
1262};
1263
923#define for_each_queue(bp, var) \ 1264#define for_each_queue(bp, var) \
924 for (var = 0; var < BNX2X_NUM_QUEUES(bp); var++) 1265 for (var = 0; var < BNX2X_NUM_QUEUES(bp); var++)
925#define for_each_nondefault_queue(bp, var) \ 1266#define for_each_nondefault_queue(bp, var) \
@@ -957,6 +1298,38 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
957 1298
958 return val; 1299 return val;
959} 1300}
1301#define BNX2X_ILT_ZALLOC(x, y, size) \
1302 do { \
1303 x = pci_alloc_consistent(bp->pdev, size, y); \
1304 if (x) \
1305 memset(x, 0, size); \
1306 } while (0)
1307
1308#define BNX2X_ILT_FREE(x, y, size) \
1309 do { \
1310 if (x) { \
1311 pci_free_consistent(bp->pdev, size, x, y); \
1312 x = NULL; \
1313 y = 0; \
1314 } \
1315 } while (0)
1316
1317#define ILOG2(x) (ilog2((x)))
1318
1319#define ILT_NUM_PAGE_ENTRIES (3072)
1320/* In 57710/11 we use whole table since we have 8 func
1321 */
1322#define ILT_PER_FUNC (ILT_NUM_PAGE_ENTRIES/8)
1323
1324#define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
1325/*
1326 * the phys address is shifted right 12 bits and has an added
1327 * 1=valid bit added to the 53rd bit
1328 * then since this is a wide register(TM)
1329 * we split it into two 32 bit writes
1330 */
1331#define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
1332#define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
960 1333
961 1334
962/* load/unload mode */ 1335/* load/unload mode */
@@ -1032,7 +1405,7 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
1032#define MAX_SP_DESC_CNT (SP_DESC_CNT - 1) 1405#define MAX_SP_DESC_CNT (SP_DESC_CNT - 1)
1033 1406
1034 1407
1035#define BNX2X_BTR 1 1408#define BNX2X_BTR 4
1036#define MAX_SPQ_PENDING 8 1409#define MAX_SPQ_PENDING 8
1037 1410
1038 1411
@@ -1149,20 +1522,22 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
1149 TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE_SHIFT)) 1522 TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE_SHIFT))
1150#define MULTI_MASK 0x7f 1523#define MULTI_MASK 0x7f
1151 1524
1152
1153#define DEF_USB_FUNC_OFF (2 + 2*HC_USTORM_DEF_SB_NUM_INDICES)
1154#define DEF_CSB_FUNC_OFF (2 + 2*HC_CSTORM_DEF_SB_NUM_INDICES)
1155#define DEF_XSB_FUNC_OFF (2 + 2*HC_XSTORM_DEF_SB_NUM_INDICES)
1156#define DEF_TSB_FUNC_OFF (2 + 2*HC_TSTORM_DEF_SB_NUM_INDICES)
1157
1158#define C_DEF_SB_SP_INDEX HC_INDEX_DEF_C_ETH_SLOW_PATH
1159
1160#define BNX2X_SP_DSB_INDEX \ 1525#define BNX2X_SP_DSB_INDEX \
1161(&bp->def_status_blk->c_def_status_block.index_values[C_DEF_SB_SP_INDEX]) 1526 (&bp->def_status_blk->sp_sb.\
1527 index_values[HC_SP_INDEX_ETH_DEF_CONS])
1528#define SET_FLAG(value, mask, flag) \
1529 do {\
1530 (value) &= ~(mask);\
1531 (value) |= ((flag) << (mask##_SHIFT));\
1532 } while (0)
1162 1533
1534#define GET_FLAG(value, mask) \
1535 (((value) &= (mask)) >> (mask##_SHIFT))
1163 1536
1164#define CAM_IS_INVALID(x) \ 1537#define CAM_IS_INVALID(x) \
1165(x.target_table_entry.flags == TSTORM_CAM_TARGET_TABLE_ENTRY_ACTION_TYPE) 1538 (GET_FLAG(x.flags, \
1539 MAC_CONFIGURATION_ENTRY_ACTION_TYPE) == \
1540 (T_ETH_MAC_COMMAND_INVALIDATE))
1166 1541
1167#define CAM_INVALIDATE(x) \ 1542#define CAM_INVALIDATE(x) \
1168 (x.target_table_entry.flags = TSTORM_CAM_TARGET_TABLE_ENTRY_ACTION_TYPE) 1543 (x.target_table_entry.flags = TSTORM_CAM_TARGET_TABLE_ENTRY_ACTION_TYPE)
@@ -1181,6 +1556,14 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
1181#define BNX2X_VPD_LEN 128 1556#define BNX2X_VPD_LEN 128
1182#define VENDOR_ID_LEN 4 1557#define VENDOR_ID_LEN 4
1183 1558
1559/* Congestion management fairness mode */
1560#define CMNG_FNS_NONE 0
1561#define CMNG_FNS_MINMAX 1
1562
1563#define HC_SEG_ACCESS_DEF 0 /*Driver decision 0-3*/
1564#define HC_SEG_ACCESS_ATTN 4
1565#define HC_SEG_ACCESS_NORM 0 /*Driver decision 0-1*/
1566
1184#ifdef BNX2X_MAIN 1567#ifdef BNX2X_MAIN
1185#define BNX2X_EXTERN 1568#define BNX2X_EXTERN
1186#else 1569#else
@@ -1195,4 +1578,9 @@ extern void bnx2x_set_ethtool_ops(struct net_device *netdev);
1195 1578
1196void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx); 1579void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx);
1197 1580
1581#define WAIT_RAMROD_POLL 0x01
1582#define WAIT_RAMROD_COMMON 0x02
1583
1584int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
1585 int *state_p, int flags);
1198#endif /* bnx2x.h */ 1586#endif /* bnx2x.h */