aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/bnx2x/bnx2x.h532
-rw-r--r--drivers/net/bnx2x/bnx2x_cmn.c488
-rw-r--r--drivers/net/bnx2x/bnx2x_cmn.h256
-rw-r--r--drivers/net/bnx2x/bnx2x_ethtool.c20
-rw-r--r--drivers/net/bnx2x/bnx2x_fw_defs.h819
-rw-r--r--drivers/net/bnx2x/bnx2x_fw_file_hdr.h1
-rw-r--r--drivers/net/bnx2x/bnx2x_hsi.h1465
-rw-r--r--drivers/net/bnx2x/bnx2x_init.h41
-rw-r--r--drivers/net/bnx2x/bnx2x_init_ops.h338
-rw-r--r--drivers/net/bnx2x/bnx2x_link.c3
-rw-r--r--drivers/net/bnx2x/bnx2x_main.c3208
-rw-r--r--drivers/net/bnx2x/bnx2x_stats.c12
-rw-r--r--drivers/net/cnic.c285
-rw-r--r--drivers/net/cnic.h51
-rw-r--r--drivers/net/cnic_defs.h456
-rw-r--r--drivers/net/cnic_if.h2
16 files changed, 4758 insertions, 3219 deletions
diff --git a/drivers/net/bnx2x/bnx2x.h b/drivers/net/bnx2x/bnx2x.h
index 64329c5fbdea..8b053e0c00ab 100644
--- a/drivers/net/bnx2x/bnx2x.h
+++ b/drivers/net/bnx2x/bnx2x.h
@@ -33,13 +33,11 @@
33#define BNX2X_NEW_NAPI 33#define BNX2X_NEW_NAPI
34 34
35 35
36
37#if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE) 36#if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE)
38#define BCM_CNIC 1 37#define BCM_CNIC 1
39#include "../cnic_if.h" 38#include "../cnic_if.h"
40#endif 39#endif
41 40
42
43#ifdef BCM_CNIC 41#ifdef BCM_CNIC
44#define BNX2X_MIN_MSIX_VEC_CNT 3 42#define BNX2X_MIN_MSIX_VEC_CNT 3
45#define BNX2X_MSIX_VEC_FP_START 2 43#define BNX2X_MSIX_VEC_FP_START 2
@@ -129,16 +127,18 @@ void bnx2x_panic_dump(struct bnx2x *bp);
129 } while (0) 127 } while (0)
130#endif 128#endif
131 129
130#define bnx2x_mc_addr(ha) ((ha)->addr)
132 131
133#define U64_LO(x) (u32)(((u64)(x)) & 0xffffffff) 132#define U64_LO(x) (u32)(((u64)(x)) & 0xffffffff)
134#define U64_HI(x) (u32)(((u64)(x)) >> 32) 133#define U64_HI(x) (u32)(((u64)(x)) >> 32)
135#define HILO_U64(hi, lo) ((((u64)(hi)) << 32) + (lo)) 134#define HILO_U64(hi, lo) ((((u64)(hi)) << 32) + (lo))
136 135
137 136
138#define REG_ADDR(bp, offset) (bp->regview + offset) 137#define REG_ADDR(bp, offset) ((bp->regview) + (offset))
139 138
140#define REG_RD(bp, offset) readl(REG_ADDR(bp, offset)) 139#define REG_RD(bp, offset) readl(REG_ADDR(bp, offset))
141#define REG_RD8(bp, offset) readb(REG_ADDR(bp, offset)) 140#define REG_RD8(bp, offset) readb(REG_ADDR(bp, offset))
141#define REG_RD16(bp, offset) readw(REG_ADDR(bp, offset))
142 142
143#define REG_WR(bp, offset, val) writel((u32)val, REG_ADDR(bp, offset)) 143#define REG_WR(bp, offset, val) writel((u32)val, REG_ADDR(bp, offset))
144#define REG_WR8(bp, offset, val) writeb((u8)val, REG_ADDR(bp, offset)) 144#define REG_WR8(bp, offset, val) writeb((u8)val, REG_ADDR(bp, offset))
@@ -160,6 +160,9 @@ void bnx2x_panic_dump(struct bnx2x *bp);
160 offset, len32); \ 160 offset, len32); \
161 } while (0) 161 } while (0)
162 162
163#define REG_WR_DMAE_LEN(bp, offset, valp, len32) \
164 REG_WR_DMAE(bp, offset, valp, len32)
165
163#define VIRT_WR_DMAE_LEN(bp, data, addr, len32, le32_swap) \ 166#define VIRT_WR_DMAE_LEN(bp, data, addr, len32, le32_swap) \
164 do { \ 167 do { \
165 memcpy(GUNZIP_BUF(bp), data, (len32) * 4); \ 168 memcpy(GUNZIP_BUF(bp), data, (len32) * 4); \
@@ -175,16 +178,52 @@ void bnx2x_panic_dump(struct bnx2x *bp);
175 offsetof(struct shmem2_region, field)) 178 offsetof(struct shmem2_region, field))
176#define SHMEM2_RD(bp, field) REG_RD(bp, SHMEM2_ADDR(bp, field)) 179#define SHMEM2_RD(bp, field) REG_RD(bp, SHMEM2_ADDR(bp, field))
177#define SHMEM2_WR(bp, field, val) REG_WR(bp, SHMEM2_ADDR(bp, field), val) 180#define SHMEM2_WR(bp, field, val) REG_WR(bp, SHMEM2_ADDR(bp, field), val)
181#define MF_CFG_ADDR(bp, field) (bp->common.mf_cfg_base + \
182 offsetof(struct mf_cfg, field))
178 183
179#define MF_CFG_RD(bp, field) SHMEM_RD(bp, mf_cfg.field) 184#define MF_CFG_RD(bp, field) REG_RD(bp, MF_CFG_ADDR(bp, field))
180#define MF_CFG_WR(bp, field, val) SHMEM_WR(bp, mf_cfg.field, val) 185#define MF_CFG_WR(bp, field, val) REG_WR(bp,\
186 MF_CFG_ADDR(bp, field), (val))
181 187
182#define EMAC_RD(bp, reg) REG_RD(bp, emac_base + reg) 188#define EMAC_RD(bp, reg) REG_RD(bp, emac_base + reg)
183#define EMAC_WR(bp, reg, val) REG_WR(bp, emac_base + reg, val) 189#define EMAC_WR(bp, reg, val) REG_WR(bp, emac_base + reg, val)
184 190
191/* SP SB indices */
192
193/* General SP events - stats query, cfc delete, etc */
194#define HC_SP_INDEX_ETH_DEF_CONS 3
195
196/* EQ completions */
197#define HC_SP_INDEX_EQ_CONS 7
198
199/* iSCSI L2 */
200#define HC_SP_INDEX_ETH_ISCSI_CQ_CONS 5
201#define HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS 1
202
203/**
204 * CIDs and CLIDs:
205 * CLIDs below is a CLID for func 0, then the CLID for other
206 * functions will be calculated by the formula:
207 *
208 * FUNC_N_CLID_X = N * NUM_SPECIAL_CLIENTS + FUNC_0_CLID_X
209 *
210 */
211/* iSCSI L2 */
212#define BNX2X_ISCSI_ETH_CL_ID 17
213#define BNX2X_ISCSI_ETH_CID 17
214
215/** Additional rings budgeting */
216#ifdef BCM_CNIC
217#define CNIC_CONTEXT_USE 1
218#else
219#define CNIC_CONTEXT_USE 0
220#endif /* BCM_CNIC */
221
185#define AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR \ 222#define AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR \
186 AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR 223 AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR
187 224
225#define SM_RX_ID 0
226#define SM_TX_ID 1
188 227
189/* fast path */ 228/* fast path */
190 229
@@ -254,11 +293,21 @@ union db_prod {
254#define RX_SGE_MASK_LEN_MASK (RX_SGE_MASK_LEN - 1) 293#define RX_SGE_MASK_LEN_MASK (RX_SGE_MASK_LEN - 1)
255#define NEXT_SGE_MASK_ELEM(el) (((el) + 1) & RX_SGE_MASK_LEN_MASK) 294#define NEXT_SGE_MASK_ELEM(el) (((el) + 1) & RX_SGE_MASK_LEN_MASK)
256 295
296union host_hc_status_block {
297 /* pointer to fp status block e1x */
298 struct host_hc_status_block_e1x *e1x_sb;
299};
257 300
258struct bnx2x_fastpath { 301struct bnx2x_fastpath {
259 302
260 struct napi_struct napi; 303 struct napi_struct napi;
261 struct host_status_block *status_blk; 304 union host_hc_status_block status_blk;
305 /* chip independed shortcuts into sb structure */
306 __le16 *sb_index_values;
307 __le16 *sb_running_index;
308 /* chip independed shortcut into rx_prods_offset memory */
309 u32 ustorm_rx_prods_offset;
310
262 dma_addr_t status_blk_mapping; 311 dma_addr_t status_blk_mapping;
263 312
264 struct sw_tx_bd *tx_buf_ring; 313 struct sw_tx_bd *tx_buf_ring;
@@ -288,10 +337,15 @@ struct bnx2x_fastpath {
288#define BNX2X_FP_STATE_OPEN 0xa0000 337#define BNX2X_FP_STATE_OPEN 0xa0000
289#define BNX2X_FP_STATE_HALTING 0xb0000 338#define BNX2X_FP_STATE_HALTING 0xb0000
290#define BNX2X_FP_STATE_HALTED 0xc0000 339#define BNX2X_FP_STATE_HALTED 0xc0000
340#define BNX2X_FP_STATE_TERMINATING 0xd0000
341#define BNX2X_FP_STATE_TERMINATED 0xe0000
291 342
292 u8 index; /* number in fp array */ 343 u8 index; /* number in fp array */
293 u8 cl_id; /* eth client id */ 344 u8 cl_id; /* eth client id */
294 u8 sb_id; /* status block number in HW */ 345 u8 cl_qzone_id;
346 u8 fw_sb_id; /* status block number in FW */
347 u8 igu_sb_id; /* status block number in HW */
348 u32 cid;
295 349
296 union db_prod tx_db; 350 union db_prod tx_db;
297 351
@@ -301,8 +355,7 @@ struct bnx2x_fastpath {
301 u16 tx_bd_cons; 355 u16 tx_bd_cons;
302 __le16 *tx_cons_sb; 356 __le16 *tx_cons_sb;
303 357
304 __le16 fp_c_idx; 358 __le16 fp_hc_idx;
305 __le16 fp_u_idx;
306 359
307 u16 rx_bd_prod; 360 u16 rx_bd_prod;
308 u16 rx_bd_cons; 361 u16 rx_bd_cons;
@@ -312,7 +365,7 @@ struct bnx2x_fastpath {
312 /* The last maximal completed SGE */ 365 /* The last maximal completed SGE */
313 u16 last_max_sge; 366 u16 last_max_sge;
314 __le16 *rx_cons_sb; 367 __le16 *rx_cons_sb;
315 __le16 *rx_bd_cons_sb; 368
316 369
317 370
318 unsigned long tx_pkt, 371 unsigned long tx_pkt,
@@ -356,6 +409,8 @@ struct bnx2x_fastpath {
356#define NUM_TX_BD (TX_DESC_CNT * NUM_TX_RINGS) 409#define NUM_TX_BD (TX_DESC_CNT * NUM_TX_RINGS)
357#define MAX_TX_BD (NUM_TX_BD - 1) 410#define MAX_TX_BD (NUM_TX_BD - 1)
358#define MAX_TX_AVAIL (MAX_TX_DESC_CNT * NUM_TX_RINGS - 2) 411#define MAX_TX_AVAIL (MAX_TX_DESC_CNT * NUM_TX_RINGS - 2)
412#define INIT_JUMBO_TX_RING_SIZE MAX_TX_AVAIL
413#define INIT_TX_RING_SIZE MAX_TX_AVAIL
359#define NEXT_TX_IDX(x) ((((x) & MAX_TX_DESC_CNT) == \ 414#define NEXT_TX_IDX(x) ((((x) & MAX_TX_DESC_CNT) == \
360 (MAX_TX_DESC_CNT - 1)) ? (x) + 2 : (x) + 1) 415 (MAX_TX_DESC_CNT - 1)) ? (x) + 2 : (x) + 1)
361#define TX_BD(x) ((x) & MAX_TX_BD) 416#define TX_BD(x) ((x) & MAX_TX_BD)
@@ -370,6 +425,8 @@ struct bnx2x_fastpath {
370#define MAX_RX_BD (NUM_RX_BD - 1) 425#define MAX_RX_BD (NUM_RX_BD - 1)
371#define MAX_RX_AVAIL (MAX_RX_DESC_CNT * NUM_RX_RINGS - 2) 426#define MAX_RX_AVAIL (MAX_RX_DESC_CNT * NUM_RX_RINGS - 2)
372#define MIN_RX_AVAIL 128 427#define MIN_RX_AVAIL 128
428#define INIT_JUMBO_RX_RING_SIZE MAX_RX_AVAIL
429#define INIT_RX_RING_SIZE MAX_RX_AVAIL
373#define NEXT_RX_IDX(x) ((((x) & RX_DESC_MASK) == \ 430#define NEXT_RX_IDX(x) ((((x) & RX_DESC_MASK) == \
374 (MAX_RX_DESC_CNT - 1)) ? (x) + 3 : (x) + 1) 431 (MAX_RX_DESC_CNT - 1)) ? (x) + 3 : (x) + 1)
375#define RX_BD(x) ((x) & MAX_RX_BD) 432#define RX_BD(x) ((x) & MAX_RX_BD)
@@ -420,11 +477,12 @@ struct bnx2x_fastpath {
420 le32_to_cpu((bd)->addr_lo)) 477 le32_to_cpu((bd)->addr_lo))
421#define BD_UNMAP_LEN(bd) (le16_to_cpu((bd)->nbytes)) 478#define BD_UNMAP_LEN(bd) (le16_to_cpu((bd)->nbytes))
422 479
423 480#define BNX2X_DB_MIN_SHIFT 3 /* 8 bytes */
481#define BNX2X_DB_SHIFT 7 /* 128 bytes*/
424#define DPM_TRIGER_TYPE 0x40 482#define DPM_TRIGER_TYPE 0x40
425#define DOORBELL(bp, cid, val) \ 483#define DOORBELL(bp, cid, val) \
426 do { \ 484 do { \
427 writel((u32)(val), bp->doorbells + (BCM_PAGE_SIZE * (cid)) + \ 485 writel((u32)(val), bp->doorbells + (bp->db_size * (cid)) + \
428 DPM_TRIGER_TYPE); \ 486 DPM_TRIGER_TYPE); \
429 } while (0) 487 } while (0)
430 488
@@ -482,31 +540,15 @@ struct bnx2x_fastpath {
482#define BNX2X_RX_SUM_FIX(cqe) \ 540#define BNX2X_RX_SUM_FIX(cqe) \
483 BNX2X_PRS_FLAG_OVERETH_IPV4(cqe->fast_path_cqe.pars_flags.flags) 541 BNX2X_PRS_FLAG_OVERETH_IPV4(cqe->fast_path_cqe.pars_flags.flags)
484 542
485 543#define U_SB_ETH_RX_CQ_INDEX 1
486#define FP_USB_FUNC_OFF (2 + 2*HC_USTORM_SB_NUM_INDICES) 544#define U_SB_ETH_RX_BD_INDEX 2
487#define FP_CSB_FUNC_OFF (2 + 2*HC_CSTORM_SB_NUM_INDICES) 545#define C_SB_ETH_TX_CQ_INDEX 5
488
489#define U_SB_ETH_RX_CQ_INDEX HC_INDEX_U_ETH_RX_CQ_CONS
490#define U_SB_ETH_RX_BD_INDEX HC_INDEX_U_ETH_RX_BD_CONS
491#define C_SB_ETH_TX_CQ_INDEX HC_INDEX_C_ETH_TX_CQ_CONS
492 546
493#define BNX2X_RX_SB_INDEX \ 547#define BNX2X_RX_SB_INDEX \
494 (&fp->status_blk->u_status_block.index_values[U_SB_ETH_RX_CQ_INDEX]) 548 (&fp->sb_index_values[U_SB_ETH_RX_CQ_INDEX])
495
496#define BNX2X_RX_SB_BD_INDEX \
497 (&fp->status_blk->u_status_block.index_values[U_SB_ETH_RX_BD_INDEX])
498
499#define BNX2X_RX_SB_INDEX_NUM \
500 (((U_SB_ETH_RX_CQ_INDEX << \
501 USTORM_ETH_ST_CONTEXT_CONFIG_CQE_SB_INDEX_NUMBER_SHIFT) & \
502 USTORM_ETH_ST_CONTEXT_CONFIG_CQE_SB_INDEX_NUMBER) | \
503 ((U_SB_ETH_RX_BD_INDEX << \
504 USTORM_ETH_ST_CONTEXT_CONFIG_BD_SB_INDEX_NUMBER_SHIFT) & \
505 USTORM_ETH_ST_CONTEXT_CONFIG_BD_SB_INDEX_NUMBER))
506 549
507#define BNX2X_TX_SB_INDEX \ 550#define BNX2X_TX_SB_INDEX \
508 (&fp->status_blk->c_status_block.index_values[C_SB_ETH_TX_CQ_INDEX]) 551 (&fp->sb_index_values[C_SB_ETH_TX_CQ_INDEX])
509
510 552
511/* end of fast path */ 553/* end of fast path */
512 554
@@ -553,10 +595,16 @@ struct bnx2x_common {
553 595
554 u32 shmem_base; 596 u32 shmem_base;
555 u32 shmem2_base; 597 u32 shmem2_base;
598 u32 mf_cfg_base;
556 599
557 u32 hw_config; 600 u32 hw_config;
558 601
559 u32 bc_ver; 602 u32 bc_ver;
603
604 u8 int_block;
605#define INT_BLOCK_HC 0
606 u8 chip_port_mode;
607#define CHIP_PORT_MODE_NONE 0x2
560}; 608};
561 609
562 610
@@ -590,27 +638,98 @@ struct bnx2x_port {
590 638
591/* end of port */ 639/* end of port */
592 640
641/* e1h Classification CAM line allocations */
642enum {
643 CAM_ETH_LINE = 0,
644 CAM_ISCSI_ETH_LINE,
645 CAM_MAX_PF_LINE = CAM_ISCSI_ETH_LINE
646};
593 647
648#define BNX2X_VF_ID_INVALID 0xFF
594 649
595#ifdef BCM_CNIC 650/*
596#define MAX_CONTEXT 15 651 * The total number of L2 queues, MSIX vectors and HW contexts (CIDs) is
597#else 652 * control by the number of fast-path status blocks supported by the
598#define MAX_CONTEXT 16 653 * device (HW/FW). Each fast-path status block (FP-SB) aka non-default
599#endif 654 * status block represents an independent interrupts context that can
655 * serve a regular L2 networking queue. However special L2 queues such
656 * as the FCoE queue do not require a FP-SB and other components like
657 * the CNIC may consume FP-SB reducing the number of possible L2 queues
658 *
659 * If the maximum number of FP-SB available is X then:
660 * a. If CNIC is supported it consumes 1 FP-SB thus the max number of
661 * regular L2 queues is Y=X-1
662 * b. in MF mode the actual number of L2 queues is Y= (X-1/MF_factor)
663 * c. If the FCoE L2 queue is supported the actual number of L2 queues
664 * is Y+1
665 * d. The number of irqs (MSIX vectors) is either Y+1 (one extra for
666 * slow-path interrupts) or Y+2 if CNIC is supported (one additional
667 * FP interrupt context for the CNIC).
668 * e. The number of HW context (CID count) is always X or X+1 if FCoE
669 * L2 queue is supported. the cid for the FCoE L2 queue is always X.
670 */
671
672#define FP_SB_MAX_E1x 16 /* fast-path interrupt contexts E1x */
673#define MAX_CONTEXT FP_SB_MAX_E1x
674
675/*
676 * cid_cnt paramter below refers to the value returned by
677 * 'bnx2x_get_l2_cid_count()' routine
678 */
679
680/*
681 * The number of FP context allocated by the driver == max number of regular
682 * L2 queues + 1 for the FCoE L2 queue
683 */
684#define L2_FP_COUNT(cid_cnt) ((cid_cnt) - CNIC_CONTEXT_USE)
600 685
601union cdu_context { 686union cdu_context {
602 struct eth_context eth; 687 struct eth_context eth;
603 char pad[1024]; 688 char pad[1024];
604}; 689};
605 690
691/* CDU host DB constants */
692#define CDU_ILT_PAGE_SZ_HW 3
693#define CDU_ILT_PAGE_SZ (4096 << CDU_ILT_PAGE_SZ_HW) /* 32K */
694#define ILT_PAGE_CIDS (CDU_ILT_PAGE_SZ / sizeof(union cdu_context))
695
696#ifdef BCM_CNIC
697#define CNIC_ISCSI_CID_MAX 256
698#define CNIC_CID_MAX (CNIC_ISCSI_CID_MAX)
699#define CNIC_ILT_LINES DIV_ROUND_UP(CNIC_CID_MAX, ILT_PAGE_CIDS)
700#endif
701
702#define QM_ILT_PAGE_SZ_HW 3
703#define QM_ILT_PAGE_SZ (4096 << QM_ILT_PAGE_SZ_HW) /* 32K */
704#define QM_CID_ROUND 1024
705
706#ifdef BCM_CNIC
707/* TM (timers) host DB constants */
708#define TM_ILT_PAGE_SZ_HW 2
709#define TM_ILT_PAGE_SZ (4096 << TM_ILT_PAGE_SZ_HW) /* 16K */
710/* #define TM_CONN_NUM (CNIC_STARTING_CID+CNIC_ISCSI_CXT_MAX) */
711#define TM_CONN_NUM 1024
712#define TM_ILT_SZ (8 * TM_CONN_NUM)
713#define TM_ILT_LINES DIV_ROUND_UP(TM_ILT_SZ, TM_ILT_PAGE_SZ)
714
715/* SRC (Searcher) host DB constants */
716#define SRC_ILT_PAGE_SZ_HW 3
717#define SRC_ILT_PAGE_SZ (4096 << SRC_ILT_PAGE_SZ_HW) /* 32K */
718#define SRC_HASH_BITS 10
719#define SRC_CONN_NUM (1 << SRC_HASH_BITS) /* 1024 */
720#define SRC_ILT_SZ (sizeof(struct src_ent) * SRC_CONN_NUM)
721#define SRC_T2_SZ SRC_ILT_SZ
722#define SRC_ILT_LINES DIV_ROUND_UP(SRC_ILT_SZ, SRC_ILT_PAGE_SZ)
723#endif
724
606#define MAX_DMAE_C 8 725#define MAX_DMAE_C 8
607 726
608/* DMA memory not used in fastpath */ 727/* DMA memory not used in fastpath */
609struct bnx2x_slowpath { 728struct bnx2x_slowpath {
610 union cdu_context context[MAX_CONTEXT];
611 struct eth_stats_query fw_stats; 729 struct eth_stats_query fw_stats;
612 struct mac_configuration_cmd mac_config; 730 struct mac_configuration_cmd mac_config;
613 struct mac_configuration_cmd mcast_config; 731 struct mac_configuration_cmd mcast_config;
732 struct client_init_ramrod_data client_init_data;
614 733
615 /* used by dmae command executer */ 734 /* used by dmae command executer */
616 struct dmae_command dmae[MAX_DMAE_C]; 735 struct dmae_command dmae[MAX_DMAE_C];
@@ -638,37 +757,71 @@ struct attn_route {
638 u32 sig[4]; 757 u32 sig[4];
639}; 758};
640 759
760struct iro {
761 u32 base;
762 u16 m1;
763 u16 m2;
764 u16 m3;
765 u16 size;
766};
767
768struct hw_context {
769 union cdu_context *vcxt;
770 dma_addr_t cxt_mapping;
771 size_t size;
772};
773
774/* forward */
775struct bnx2x_ilt;
776
641typedef enum { 777typedef enum {
642 BNX2X_RECOVERY_DONE, 778 BNX2X_RECOVERY_DONE,
643 BNX2X_RECOVERY_INIT, 779 BNX2X_RECOVERY_INIT,
644 BNX2X_RECOVERY_WAIT, 780 BNX2X_RECOVERY_WAIT,
645} bnx2x_recovery_state_t; 781} bnx2x_recovery_state_t;
646 782
783/**
784 * Event queue (EQ or event ring) MC hsi
785 * NUM_EQ_PAGES and EQ_DESC_CNT_PAGE must be power of 2
786 */
787#define NUM_EQ_PAGES 1
788#define EQ_DESC_CNT_PAGE (BCM_PAGE_SIZE / sizeof(union event_ring_elem))
789#define EQ_DESC_MAX_PAGE (EQ_DESC_CNT_PAGE - 1)
790#define NUM_EQ_DESC (EQ_DESC_CNT_PAGE * NUM_EQ_PAGES)
791#define EQ_DESC_MASK (NUM_EQ_DESC - 1)
792#define MAX_EQ_AVAIL (EQ_DESC_MAX_PAGE * NUM_EQ_PAGES - 2)
793
794/* depends on EQ_DESC_CNT_PAGE being a power of 2 */
795#define NEXT_EQ_IDX(x) ((((x) & EQ_DESC_MAX_PAGE) == \
796 (EQ_DESC_MAX_PAGE - 1)) ? (x) + 2 : (x) + 1)
797
798/* depends on the above and on NUM_EQ_PAGES being a power of 2 */
799#define EQ_DESC(x) ((x) & EQ_DESC_MASK)
800
801#define BNX2X_EQ_INDEX \
802 (&bp->def_status_blk->sp_sb.\
803 index_values[HC_SP_INDEX_EQ_CONS])
804
647struct bnx2x { 805struct bnx2x {
648 /* Fields used in the tx and intr/napi performance paths 806 /* Fields used in the tx and intr/napi performance paths
649 * are grouped together in the beginning of the structure 807 * are grouped together in the beginning of the structure
650 */ 808 */
651 struct bnx2x_fastpath fp[MAX_CONTEXT]; 809 struct bnx2x_fastpath *fp;
652 void __iomem *regview; 810 void __iomem *regview;
653 void __iomem *doorbells; 811 void __iomem *doorbells;
654#ifdef BCM_CNIC 812 u16 db_size;
655#define BNX2X_DB_SIZE (18*BCM_PAGE_SIZE)
656#else
657#define BNX2X_DB_SIZE (16*BCM_PAGE_SIZE)
658#endif
659 813
660 struct net_device *dev; 814 struct net_device *dev;
661 struct pci_dev *pdev; 815 struct pci_dev *pdev;
662 816
817 struct iro *iro_arr;
818#define IRO (bp->iro_arr)
819
663 atomic_t intr_sem; 820 atomic_t intr_sem;
664 821
665 bnx2x_recovery_state_t recovery_state; 822 bnx2x_recovery_state_t recovery_state;
666 int is_leader; 823 int is_leader;
667#ifdef BCM_CNIC 824 struct msix_entry *msix_table;
668 struct msix_entry msix_table[MAX_CONTEXT+2];
669#else
670 struct msix_entry msix_table[MAX_CONTEXT+1];
671#endif
672#define INT_MODE_INTx 1 825#define INT_MODE_INTx 1
673#define INT_MODE_MSI 2 826#define INT_MODE_MSI 2
674 827
@@ -680,7 +833,8 @@ struct bnx2x {
680 833
681 u32 rx_csum; 834 u32 rx_csum;
682 u32 rx_buf_size; 835 u32 rx_buf_size;
683#define ETH_OVREHEAD (ETH_HLEN + 8) /* 8 for CRC + VLAN */ 836/* L2 header size + 2*VLANs (8 bytes) + LLC SNAP (8 bytes) */
837#define ETH_OVREHEAD (ETH_HLEN + 8 + 8)
684#define ETH_MIN_PACKET_SIZE 60 838#define ETH_MIN_PACKET_SIZE 60
685#define ETH_MAX_PACKET_SIZE 1500 839#define ETH_MAX_PACKET_SIZE 1500
686#define ETH_MAX_JUMBO_PACKET_SIZE 9600 840#define ETH_MAX_JUMBO_PACKET_SIZE 9600
@@ -689,13 +843,12 @@ struct bnx2x {
689#define BNX2X_RX_ALIGN_SHIFT ((L1_CACHE_SHIFT < 8) ? \ 843#define BNX2X_RX_ALIGN_SHIFT ((L1_CACHE_SHIFT < 8) ? \
690 L1_CACHE_SHIFT : 8) 844 L1_CACHE_SHIFT : 8)
691#define BNX2X_RX_ALIGN (1 << BNX2X_RX_ALIGN_SHIFT) 845#define BNX2X_RX_ALIGN (1 << BNX2X_RX_ALIGN_SHIFT)
846#define BNX2X_PXP_DRAM_ALIGN (BNX2X_RX_ALIGN_SHIFT - 5)
692 847
693 struct host_def_status_block *def_status_blk; 848 struct host_sp_status_block *def_status_blk;
694#define DEF_SB_ID 16 849#define DEF_SB_IGU_ID 16
695 __le16 def_c_idx; 850#define DEF_SB_ID HC_SP_SB_ID
696 __le16 def_u_idx; 851 __le16 def_idx;
697 __le16 def_x_idx;
698 __le16 def_t_idx;
699 __le16 def_att_idx; 852 __le16 def_att_idx;
700 u32 attn_state; 853 u32 attn_state;
701 struct attn_route attn_group[MAX_DYNAMIC_ATTN_GRPS]; 854 struct attn_route attn_group[MAX_DYNAMIC_ATTN_GRPS];
@@ -711,6 +864,13 @@ struct bnx2x {
711 /* used to synchronize spq accesses */ 864 /* used to synchronize spq accesses */
712 spinlock_t spq_lock; 865 spinlock_t spq_lock;
713 866
867 /* event queue */
868 union event_ring_elem *eq_ring;
869 dma_addr_t eq_mapping;
870 u16 eq_prod;
871 u16 eq_cons;
872 __le16 *eq_cons_sb;
873
714 /* Flags for marking that there is a STAT_QUERY or 874 /* Flags for marking that there is a STAT_QUERY or
715 SET_MAC ramrod pending */ 875 SET_MAC ramrod pending */
716 int stats_pending; 876 int stats_pending;
@@ -737,6 +897,8 @@ struct bnx2x {
737#define MF_FUNC_DIS 0x1000 897#define MF_FUNC_DIS 0x1000
738 898
739 int func; 899 int func;
900 int base_fw_ndsb;
901
740#define BP_PORT(bp) (bp->func % PORT_MAX) 902#define BP_PORT(bp) (bp->func % PORT_MAX)
741#define BP_FUNC(bp) (bp->func) 903#define BP_FUNC(bp) (bp->func)
742#define BP_E1HVN(bp) (bp->func >> 1) 904#define BP_E1HVN(bp) (bp->func >> 1)
@@ -801,6 +963,7 @@ struct bnx2x {
801#define BNX2X_STATE_CLOSING_WAIT4_HALT 0x4000 963#define BNX2X_STATE_CLOSING_WAIT4_HALT 0x4000
802#define BNX2X_STATE_CLOSING_WAIT4_DELETE 0x5000 964#define BNX2X_STATE_CLOSING_WAIT4_DELETE 0x5000
803#define BNX2X_STATE_CLOSING_WAIT4_UNLOAD 0x6000 965#define BNX2X_STATE_CLOSING_WAIT4_UNLOAD 0x6000
966#define BNX2X_STATE_FUNC_STARTED 0x7000
804#define BNX2X_STATE_DIAG 0xe000 967#define BNX2X_STATE_DIAG 0xe000
805#define BNX2X_STATE_ERROR 0xf000 968#define BNX2X_STATE_ERROR 0xf000
806 969
@@ -809,6 +972,15 @@ struct bnx2x {
809 int disable_tpa; 972 int disable_tpa;
810 int int_mode; 973 int int_mode;
811 974
975 struct tstorm_eth_mac_filter_config mac_filters;
976#define BNX2X_ACCEPT_NONE 0x0000
977#define BNX2X_ACCEPT_UNICAST 0x0001
978#define BNX2X_ACCEPT_MULTICAST 0x0002
979#define BNX2X_ACCEPT_ALL_UNICAST 0x0004
980#define BNX2X_ACCEPT_ALL_MULTICAST 0x0008
981#define BNX2X_ACCEPT_BROADCAST 0x0010
982#define BNX2X_PROMISCUOUS_MODE 0x10000
983
812 u32 rx_mode; 984 u32 rx_mode;
813#define BNX2X_RX_MODE_NONE 0 985#define BNX2X_RX_MODE_NONE 0
814#define BNX2X_RX_MODE_NORMAL 1 986#define BNX2X_RX_MODE_NORMAL 1
@@ -817,12 +989,25 @@ struct bnx2x {
817#define BNX2X_MAX_MULTICAST 64 989#define BNX2X_MAX_MULTICAST 64
818#define BNX2X_MAX_EMUL_MULTI 16 990#define BNX2X_MAX_EMUL_MULTI 16
819 991
820 u32 rx_mode_cl_mask; 992 u8 igu_dsb_id;
821 993 u8 igu_base_sb;
994 u8 igu_sb_cnt;
822 dma_addr_t def_status_blk_mapping; 995 dma_addr_t def_status_blk_mapping;
823 996
824 struct bnx2x_slowpath *slowpath; 997 struct bnx2x_slowpath *slowpath;
825 dma_addr_t slowpath_mapping; 998 dma_addr_t slowpath_mapping;
999 struct hw_context context;
1000
1001 struct bnx2x_ilt *ilt;
1002#define BP_ILT(bp) ((bp)->ilt)
1003#define ILT_MAX_LINES 128
1004
1005 int l2_cid_count;
1006#define L2_ILT_LINES(bp) (DIV_ROUND_UP((bp)->l2_cid_count, \
1007 ILT_PAGE_CIDS))
1008#define BNX2X_DB_SIZE(bp) ((bp)->l2_cid_count * (1 << BNX2X_DB_SHIFT))
1009
1010 int qm_cid_count;
826 1011
827 int dropless_fc; 1012 int dropless_fc;
828 1013
@@ -842,9 +1027,10 @@ struct bnx2x {
842 void *cnic_data; 1027 void *cnic_data;
843 u32 cnic_tag; 1028 u32 cnic_tag;
844 struct cnic_eth_dev cnic_eth_dev; 1029 struct cnic_eth_dev cnic_eth_dev;
845 struct host_status_block *cnic_sb; 1030 union host_hc_status_block cnic_sb;
846 dma_addr_t cnic_sb_mapping; 1031 dma_addr_t cnic_sb_mapping;
847#define CNIC_SB_ID(bp) BP_L_ID(bp) 1032#define CNIC_SB_ID(bp) ((bp)->base_fw_ndsb + BP_L_ID(bp))
1033#define CNIC_IGU_SB_ID(bp) ((bp)->igu_base_sb)
848 struct eth_spe *cnic_kwq; 1034 struct eth_spe *cnic_kwq;
849 struct eth_spe *cnic_kwq_prod; 1035 struct eth_spe *cnic_kwq_prod;
850 struct eth_spe *cnic_kwq_cons; 1036 struct eth_spe *cnic_kwq_cons;
@@ -914,12 +1100,167 @@ struct bnx2x {
914 const struct firmware *firmware; 1100 const struct firmware *firmware;
915}; 1101};
916 1102
1103/**
1104 * Init queue/func interface
1105 */
1106/* queue init flags */
1107#define QUEUE_FLG_TPA 0x0001
1108#define QUEUE_FLG_CACHE_ALIGN 0x0002
1109#define QUEUE_FLG_STATS 0x0004
1110#define QUEUE_FLG_OV 0x0008
1111#define QUEUE_FLG_VLAN 0x0010
1112#define QUEUE_FLG_COS 0x0020
1113#define QUEUE_FLG_HC 0x0040
1114#define QUEUE_FLG_DHC 0x0080
1115#define QUEUE_FLG_OOO 0x0100
1116
1117#define QUEUE_DROP_IP_CS_ERR TSTORM_ETH_CLIENT_CONFIG_DROP_IP_CS_ERR
1118#define QUEUE_DROP_TCP_CS_ERR TSTORM_ETH_CLIENT_CONFIG_DROP_TCP_CS_ERR
1119#define QUEUE_DROP_TTL0 TSTORM_ETH_CLIENT_CONFIG_DROP_TTL0
1120#define QUEUE_DROP_UDP_CS_ERR TSTORM_ETH_CLIENT_CONFIG_DROP_UDP_CS_ERR
1121
1122
1123
1124/* rss capabilities */
1125#define RSS_IPV4_CAP 0x0001
1126#define RSS_IPV4_TCP_CAP 0x0002
1127#define RSS_IPV6_CAP 0x0004
1128#define RSS_IPV6_TCP_CAP 0x0008
917 1129
918#define BNX2X_MAX_QUEUES(bp) (IS_E1HMF(bp) ? (MAX_CONTEXT/E1HVN_MAX) \ 1130#define BNX2X_MAX_QUEUES(bp) (IS_E1HMF(bp) ? (MAX_CONTEXT/E1HVN_MAX) \
919 : MAX_CONTEXT) 1131 : MAX_CONTEXT)
920#define BNX2X_NUM_QUEUES(bp) (bp->num_queues) 1132#define BNX2X_NUM_QUEUES(bp) (bp->num_queues)
921#define is_multi(bp) (BNX2X_NUM_QUEUES(bp) > 1) 1133#define is_multi(bp) (BNX2X_NUM_QUEUES(bp) > 1)
922 1134
1135
1136#define RSS_IPV4_CAP_MASK \
1137 TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY
1138
1139#define RSS_IPV4_TCP_CAP_MASK \
1140 TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_TCP_CAPABILITY
1141
1142#define RSS_IPV6_CAP_MASK \
1143 TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_CAPABILITY
1144
1145#define RSS_IPV6_TCP_CAP_MASK \
1146 TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_TCP_CAPABILITY
1147
1148/* func init flags */
1149#define FUNC_FLG_RSS 0x0001
1150#define FUNC_FLG_STATS 0x0002
1151/* removed FUNC_FLG_UNMATCHED 0x0004 */
1152#define FUNC_FLG_TPA 0x0008
1153#define FUNC_FLG_SPQ 0x0010
1154#define FUNC_FLG_LEADING 0x0020 /* PF only */
1155
1156#define FUNC_CONFIG(flgs) ((flgs) & (FUNC_FLG_RSS | FUNC_FLG_TPA | \
1157 FUNC_FLG_LEADING))
1158
1159struct rxq_pause_params {
1160 u16 bd_th_lo;
1161 u16 bd_th_hi;
1162 u16 rcq_th_lo;
1163 u16 rcq_th_hi;
1164 u16 sge_th_lo; /* valid iff QUEUE_FLG_TPA */
1165 u16 sge_th_hi; /* valid iff QUEUE_FLG_TPA */
1166 u16 pri_map;
1167};
1168
1169struct bnx2x_rxq_init_params {
1170 /* cxt*/
1171 struct eth_context *cxt;
1172
1173 /* dma */
1174 dma_addr_t dscr_map;
1175 dma_addr_t sge_map;
1176 dma_addr_t rcq_map;
1177 dma_addr_t rcq_np_map;
1178
1179 u16 flags;
1180 u16 drop_flags;
1181 u16 mtu;
1182 u16 buf_sz;
1183 u16 fw_sb_id;
1184 u16 cl_id;
1185 u16 spcl_id;
1186 u16 cl_qzone_id;
1187
1188 /* valid iff QUEUE_FLG_STATS */
1189 u16 stat_id;
1190
1191 /* valid iff QUEUE_FLG_TPA */
1192 u16 tpa_agg_sz;
1193 u16 sge_buf_sz;
1194 u16 max_sges_pkt;
1195
1196 /* valid iff QUEUE_FLG_CACHE_ALIGN */
1197 u8 cache_line_log;
1198
1199 u8 sb_cq_index;
1200 u32 cid;
1201
1202 /* desired interrupts per sec. valid iff QUEUE_FLG_HC */
1203 u32 hc_rate;
1204};
1205
1206struct bnx2x_txq_init_params {
1207 /* cxt*/
1208 struct eth_context *cxt;
1209
1210 /* dma */
1211 dma_addr_t dscr_map;
1212
1213 u16 flags;
1214 u16 fw_sb_id;
1215 u8 sb_cq_index;
1216 u8 cos; /* valid iff QUEUE_FLG_COS */
1217 u16 stat_id; /* valid iff QUEUE_FLG_STATS */
1218 u16 traffic_type;
1219 u32 cid;
1220 u16 hc_rate; /* desired interrupts per sec.*/
1221 /* valid iff QUEUE_FLG_HC */
1222
1223};
1224
1225struct bnx2x_client_ramrod_params {
1226 int *pstate;
1227 int state;
1228 u16 index;
1229 u16 cl_id;
1230 u32 cid;
1231 u8 poll;
1232#define CLIENT_IS_LEADING_RSS 0x02
1233 u8 flags;
1234};
1235
1236struct bnx2x_client_init_params {
1237 struct rxq_pause_params pause;
1238 struct bnx2x_rxq_init_params rxq_params;
1239 struct bnx2x_txq_init_params txq_params;
1240 struct bnx2x_client_ramrod_params ramrod_params;
1241};
1242
1243struct bnx2x_rss_params {
1244 int mode;
1245 u16 cap;
1246 u16 result_mask;
1247};
1248
1249struct bnx2x_func_init_params {
1250
1251 /* rss */
1252 struct bnx2x_rss_params *rss; /* valid iff FUNC_FLG_RSS */
1253
1254 /* dma */
1255 dma_addr_t fw_stat_map; /* valid iff FUNC_FLG_STATS */
1256 dma_addr_t spq_map; /* valid iff FUNC_FLG_SPQ */
1257
1258 u16 func_flgs;
1259 u16 func_id; /* abs fid */
1260 u16 pf_id;
1261 u16 spq_prod; /* valid iff FUNC_FLG_SPQ */
1262};
1263
923#define for_each_queue(bp, var) \ 1264#define for_each_queue(bp, var) \
924 for (var = 0; var < BNX2X_NUM_QUEUES(bp); var++) 1265 for (var = 0; var < BNX2X_NUM_QUEUES(bp); var++)
925#define for_each_nondefault_queue(bp, var) \ 1266#define for_each_nondefault_queue(bp, var) \
@@ -957,6 +1298,38 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
957 1298
958 return val; 1299 return val;
959} 1300}
1301#define BNX2X_ILT_ZALLOC(x, y, size) \
1302 do { \
1303 x = pci_alloc_consistent(bp->pdev, size, y); \
1304 if (x) \
1305 memset(x, 0, size); \
1306 } while (0)
1307
1308#define BNX2X_ILT_FREE(x, y, size) \
1309 do { \
1310 if (x) { \
1311 pci_free_consistent(bp->pdev, size, x, y); \
1312 x = NULL; \
1313 y = 0; \
1314 } \
1315 } while (0)
1316
1317#define ILOG2(x) (ilog2((x)))
1318
1319#define ILT_NUM_PAGE_ENTRIES (3072)
1320/* In 57710/11 we use whole table since we have 8 func
1321 */
1322#define ILT_PER_FUNC (ILT_NUM_PAGE_ENTRIES/8)
1323
1324#define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
1325/*
1326 * the phys address is shifted right 12 bits and has an added
1327 * 1=valid bit added to the 53rd bit
1328 * then since this is a wide register(TM)
1329 * we split it into two 32 bit writes
1330 */
1331#define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
1332#define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
960 1333
961 1334
962/* load/unload mode */ 1335/* load/unload mode */
@@ -1032,7 +1405,7 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
1032#define MAX_SP_DESC_CNT (SP_DESC_CNT - 1) 1405#define MAX_SP_DESC_CNT (SP_DESC_CNT - 1)
1033 1406
1034 1407
1035#define BNX2X_BTR 1 1408#define BNX2X_BTR 4
1036#define MAX_SPQ_PENDING 8 1409#define MAX_SPQ_PENDING 8
1037 1410
1038 1411
@@ -1149,20 +1522,22 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
1149 TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE_SHIFT)) 1522 TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE_SHIFT))
1150#define MULTI_MASK 0x7f 1523#define MULTI_MASK 0x7f
1151 1524
1152
1153#define DEF_USB_FUNC_OFF (2 + 2*HC_USTORM_DEF_SB_NUM_INDICES)
1154#define DEF_CSB_FUNC_OFF (2 + 2*HC_CSTORM_DEF_SB_NUM_INDICES)
1155#define DEF_XSB_FUNC_OFF (2 + 2*HC_XSTORM_DEF_SB_NUM_INDICES)
1156#define DEF_TSB_FUNC_OFF (2 + 2*HC_TSTORM_DEF_SB_NUM_INDICES)
1157
1158#define C_DEF_SB_SP_INDEX HC_INDEX_DEF_C_ETH_SLOW_PATH
1159
1160#define BNX2X_SP_DSB_INDEX \ 1525#define BNX2X_SP_DSB_INDEX \
1161(&bp->def_status_blk->c_def_status_block.index_values[C_DEF_SB_SP_INDEX]) 1526 (&bp->def_status_blk->sp_sb.\
1527 index_values[HC_SP_INDEX_ETH_DEF_CONS])
1528#define SET_FLAG(value, mask, flag) \
1529 do {\
1530 (value) &= ~(mask);\
1531 (value) |= ((flag) << (mask##_SHIFT));\
1532 } while (0)
1162 1533
1534#define GET_FLAG(value, mask) \
1535 (((value) &= (mask)) >> (mask##_SHIFT))
1163 1536
1164#define CAM_IS_INVALID(x) \ 1537#define CAM_IS_INVALID(x) \
1165(x.target_table_entry.flags == TSTORM_CAM_TARGET_TABLE_ENTRY_ACTION_TYPE) 1538 (GET_FLAG(x.flags, \
1539 MAC_CONFIGURATION_ENTRY_ACTION_TYPE) == \
1540 (T_ETH_MAC_COMMAND_INVALIDATE))
1166 1541
1167#define CAM_INVALIDATE(x) \ 1542#define CAM_INVALIDATE(x) \
1168 (x.target_table_entry.flags = TSTORM_CAM_TARGET_TABLE_ENTRY_ACTION_TYPE) 1543 (x.target_table_entry.flags = TSTORM_CAM_TARGET_TABLE_ENTRY_ACTION_TYPE)
@@ -1181,6 +1556,14 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
1181#define BNX2X_VPD_LEN 128 1556#define BNX2X_VPD_LEN 128
1182#define VENDOR_ID_LEN 4 1557#define VENDOR_ID_LEN 4
1183 1558
1559/* Congestion management fairness mode */
1560#define CMNG_FNS_NONE 0
1561#define CMNG_FNS_MINMAX 1
1562
1563#define HC_SEG_ACCESS_DEF 0 /*Driver decision 0-3*/
1564#define HC_SEG_ACCESS_ATTN 4
1565#define HC_SEG_ACCESS_NORM 0 /*Driver decision 0-1*/
1566
1184#ifdef BNX2X_MAIN 1567#ifdef BNX2X_MAIN
1185#define BNX2X_EXTERN 1568#define BNX2X_EXTERN
1186#else 1569#else
@@ -1195,4 +1578,9 @@ extern void bnx2x_set_ethtool_ops(struct net_device *netdev);
1195 1578
1196void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx); 1579void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx);
1197 1580
1581#define WAIT_RAMROD_POLL 0x01
1582#define WAIT_RAMROD_COMMON 0x02
1583
1584int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
1585 int *state_p, int flags);
1198#endif /* bnx2x.h */ 1586#endif /* bnx2x.h */
diff --git a/drivers/net/bnx2x/bnx2x_cmn.c b/drivers/net/bnx2x/bnx2x_cmn.c
index 8d42067a6989..bcc4a8f4677b 100644
--- a/drivers/net/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/bnx2x/bnx2x_cmn.c
@@ -27,6 +27,8 @@
27#include <linux/if_vlan.h> 27#include <linux/if_vlan.h>
28#endif 28#endif
29 29
30#include "bnx2x_init.h"
31
30static int bnx2x_poll(struct napi_struct *napi, int budget); 32static int bnx2x_poll(struct napi_struct *napi, int budget);
31 33
32/* free skb in the packet ring at pos idx 34/* free skb in the packet ring at pos idx
@@ -190,14 +192,16 @@ static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
190 192
191 /* First mark all used pages */ 193 /* First mark all used pages */
192 for (i = 0; i < sge_len; i++) 194 for (i = 0; i < sge_len; i++)
193 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i]))); 195 SGE_MASK_CLEAR_BIT(fp,
196 RX_SGE(le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[i])));
194 197
195 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n", 198 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
196 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1])); 199 sge_len - 1, le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[sge_len - 1]));
197 200
198 /* Here we assume that the last SGE index is the biggest */ 201 /* Here we assume that the last SGE index is the biggest */
199 prefetch((void *)(fp->sge_mask)); 202 prefetch((void *)(fp->sge_mask));
200 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1])); 203 bnx2x_update_last_max_sge(fp,
204 le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[sge_len - 1]));
201 205
202 last_max = RX_SGE(fp->last_max_sge); 206 last_max = RX_SGE(fp->last_max_sge);
203 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT; 207 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
@@ -298,7 +302,8 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
298 302
299 /* Run through the SGL and compose the fragmented skb */ 303 /* Run through the SGL and compose the fragmented skb */
300 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) { 304 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
301 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j])); 305 u16 sge_idx =
306 RX_SGE(le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[j]));
302 307
303 /* FW gives the indices of the SGE as if the ring is an array 308 /* FW gives the indices of the SGE as if the ring is an array
304 (meaning that "next" element will consume 2 indices) */ 309 (meaning that "next" element will consume 2 indices) */
@@ -394,8 +399,9 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
394 if (!bnx2x_fill_frag_skb(bp, fp, skb, 399 if (!bnx2x_fill_frag_skb(bp, fp, skb,
395 &cqe->fast_path_cqe, cqe_idx)) { 400 &cqe->fast_path_cqe, cqe_idx)) {
396#ifdef BCM_VLAN 401#ifdef BCM_VLAN
397 if ((bp->vlgrp != NULL) && is_vlan_cqe && 402 if ((bp->vlgrp != NULL) &&
398 (!is_not_hwaccel_vlan_cqe)) 403 (le16_to_cpu(cqe->fast_path_cqe.
404 pars_flags.flags) & PARSING_FLAGS_VLAN))
399 vlan_gro_receive(&fp->napi, bp->vlgrp, 405 vlan_gro_receive(&fp->napi, bp->vlgrp,
400 le16_to_cpu(cqe->fast_path_cqe. 406 le16_to_cpu(cqe->fast_path_cqe.
401 vlan_tag), skb); 407 vlan_tag), skb);
@@ -686,9 +692,10 @@ static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
686 return IRQ_HANDLED; 692 return IRQ_HANDLED;
687 } 693 }
688 694
689 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n", 695 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB "
690 fp->index, fp->sb_id); 696 "[fp %d fw_sd %d igusb %d]\n",
691 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0); 697 fp->index, fp->fw_sb_id, fp->igu_sb_id);
698 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
692 699
693#ifdef BNX2X_STOP_ON_ERROR 700#ifdef BNX2X_STOP_ON_ERROR
694 if (unlikely(bp->panic)) 701 if (unlikely(bp->panic))
@@ -698,8 +705,7 @@ static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
698 /* Handle Rx and Tx according to MSI-X vector */ 705 /* Handle Rx and Tx according to MSI-X vector */
699 prefetch(fp->rx_cons_sb); 706 prefetch(fp->rx_cons_sb);
700 prefetch(fp->tx_cons_sb); 707 prefetch(fp->tx_cons_sb);
701 prefetch(&fp->status_blk->u_status_block.status_block_index); 708 prefetch(&fp->sb_running_index[SM_RX_ID]);
702 prefetch(&fp->status_blk->c_status_block.status_block_index);
703 napi_schedule(&bnx2x_fp(bp, fp->index, napi)); 709 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
704 710
705 return IRQ_HANDLED; 711 return IRQ_HANDLED;
@@ -774,27 +780,73 @@ void bnx2x_link_report(struct bnx2x *bp)
774 } 780 }
775} 781}
776 782
783/* Returns the number of actually allocated BDs */
784static inline int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
785 int rx_ring_size)
786{
787 struct bnx2x *bp = fp->bp;
788 u16 ring_prod, cqe_ring_prod;
789 int i;
790
791 fp->rx_comp_cons = 0;
792 cqe_ring_prod = ring_prod = 0;
793 for (i = 0; i < rx_ring_size; i++) {
794 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
795 BNX2X_ERR("was only able to allocate "
796 "%d rx skbs on queue[%d]\n", i, fp->index);
797 fp->eth_q_stats.rx_skb_alloc_failed++;
798 break;
799 }
800 ring_prod = NEXT_RX_IDX(ring_prod);
801 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
802 WARN_ON(ring_prod <= i);
803 }
804
805 fp->rx_bd_prod = ring_prod;
806 /* Limit the CQE producer by the CQE ring size */
807 fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
808 cqe_ring_prod);
809 fp->rx_pkt = fp->rx_calls = 0;
810
811 return i;
812}
813
814static inline void bnx2x_alloc_rx_bd_ring(struct bnx2x_fastpath *fp)
815{
816 struct bnx2x *bp = fp->bp;
817 int rx_ring_size = bp->rx_ring_size ? bp->rx_ring_size :
818 MAX_RX_AVAIL/bp->num_queues;
819
820 rx_ring_size = max_t(int, MIN_RX_AVAIL, rx_ring_size);
821
822 bnx2x_alloc_rx_bds(fp, rx_ring_size);
823
824 /* Warning!
825 * this will generate an interrupt (to the TSTORM)
826 * must only be done after chip is initialized
827 */
828 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
829 fp->rx_sge_prod);
830}
831
777void bnx2x_init_rx_rings(struct bnx2x *bp) 832void bnx2x_init_rx_rings(struct bnx2x *bp)
778{ 833{
779 int func = BP_FUNC(bp); 834 int func = BP_FUNC(bp);
780 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 : 835 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
781 ETH_MAX_AGGREGATION_QUEUES_E1H; 836 ETH_MAX_AGGREGATION_QUEUES_E1H;
782 u16 ring_prod, cqe_ring_prod; 837 u16 ring_prod;
783 int i, j; 838 int i, j;
784 int rx_ring_size = bp->rx_ring_size ? bp->rx_ring_size :
785 MAX_RX_AVAIL/bp->num_queues;
786 839
787 rx_ring_size = max_t(int, MIN_RX_AVAIL, rx_ring_size); 840 bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN +
841 BNX2X_FW_IP_HDR_ALIGN_PAD;
788 842
789 bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
790 DP(NETIF_MSG_IFUP, 843 DP(NETIF_MSG_IFUP,
791 "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size); 844 "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
792 845
793 if (bp->flags & TPA_ENABLE_FLAG) { 846 for_each_queue(bp, j) {
794 847 struct bnx2x_fastpath *fp = &bp->fp[j];
795 for_each_queue(bp, j) {
796 struct bnx2x_fastpath *fp = &bp->fp[j];
797 848
849 if (!fp->disable_tpa) {
798 for (i = 0; i < max_agg_queues; i++) { 850 for (i = 0; i < max_agg_queues; i++) {
799 fp->tpa_pool[i].skb = 851 fp->tpa_pool[i].skb =
800 netdev_alloc_skb(bp->dev, bp->rx_buf_size); 852 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
@@ -812,6 +864,35 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
812 mapping, 0); 864 mapping, 0);
813 fp->tpa_state[i] = BNX2X_TPA_STOP; 865 fp->tpa_state[i] = BNX2X_TPA_STOP;
814 } 866 }
867
868 /* "next page" elements initialization */
869 bnx2x_set_next_page_sgl(fp);
870
871 /* set SGEs bit mask */
872 bnx2x_init_sge_ring_bit_mask(fp);
873
874 /* Allocate SGEs and initialize the ring elements */
875 for (i = 0, ring_prod = 0;
876 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
877
878 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
879 BNX2X_ERR("was only able to allocate "
880 "%d rx sges\n", i);
881 BNX2X_ERR("disabling TPA for"
882 " queue[%d]\n", j);
883 /* Cleanup already allocated elements */
884 bnx2x_free_rx_sge_range(bp,
885 fp, ring_prod);
886 bnx2x_free_tpa_pool(bp,
887 fp, max_agg_queues);
888 fp->disable_tpa = 1;
889 ring_prod = 0;
890 break;
891 }
892 ring_prod = NEXT_SGE_IDX(ring_prod);
893 }
894
895 fp->rx_sge_prod = ring_prod;
815 } 896 }
816 } 897 }
817 898
@@ -819,98 +900,15 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
819 struct bnx2x_fastpath *fp = &bp->fp[j]; 900 struct bnx2x_fastpath *fp = &bp->fp[j];
820 901
821 fp->rx_bd_cons = 0; 902 fp->rx_bd_cons = 0;
822 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
823 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
824
825 /* "next page" elements initialization */
826 /* SGE ring */
827 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
828 struct eth_rx_sge *sge;
829
830 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
831 sge->addr_hi =
832 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
833 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
834 sge->addr_lo =
835 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
836 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
837 }
838 903
839 bnx2x_init_sge_ring_bit_mask(fp); 904 bnx2x_set_next_page_rx_bd(fp);
840
841 /* RX BD ring */
842 for (i = 1; i <= NUM_RX_RINGS; i++) {
843 struct eth_rx_bd *rx_bd;
844
845 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
846 rx_bd->addr_hi =
847 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
848 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
849 rx_bd->addr_lo =
850 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
851 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
852 }
853 905
854 /* CQ ring */ 906 /* CQ ring */
855 for (i = 1; i <= NUM_RCQ_RINGS; i++) { 907 bnx2x_set_next_page_rx_cq(fp);
856 struct eth_rx_cqe_next_page *nextpg;
857
858 nextpg = (struct eth_rx_cqe_next_page *)
859 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
860 nextpg->addr_hi =
861 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
862 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
863 nextpg->addr_lo =
864 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
865 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
866 }
867
868 /* Allocate SGEs and initialize the ring elements */
869 for (i = 0, ring_prod = 0;
870 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
871
872 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
873 BNX2X_ERR("was only able to allocate "
874 "%d rx sges\n", i);
875 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
876 /* Cleanup already allocated elements */
877 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
878 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
879 fp->disable_tpa = 1;
880 ring_prod = 0;
881 break;
882 }
883 ring_prod = NEXT_SGE_IDX(ring_prod);
884 }
885 fp->rx_sge_prod = ring_prod;
886 908
887 /* Allocate BDs and initialize BD ring */ 909 /* Allocate BDs and initialize BD ring */
888 fp->rx_comp_cons = 0; 910 bnx2x_alloc_rx_bd_ring(fp);
889 cqe_ring_prod = ring_prod = 0;
890 for (i = 0; i < rx_ring_size; i++) {
891 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
892 BNX2X_ERR("was only able to allocate "
893 "%d rx skbs on queue[%d]\n", i, j);
894 fp->eth_q_stats.rx_skb_alloc_failed++;
895 break;
896 }
897 ring_prod = NEXT_RX_IDX(ring_prod);
898 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
899 WARN_ON(ring_prod <= i);
900 }
901 911
902 fp->rx_bd_prod = ring_prod;
903 /* must not have more available CQEs than BDs */
904 fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
905 cqe_ring_prod);
906 fp->rx_pkt = fp->rx_calls = 0;
907
908 /* Warning!
909 * this will generate an interrupt (to the TSTORM)
910 * must only be done after chip is initialized
911 */
912 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
913 fp->rx_sge_prod);
914 if (j != 0) 912 if (j != 0)
915 continue; 913 continue;
916 914
@@ -921,6 +919,7 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
921 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4, 919 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
922 U64_HI(fp->rx_comp_mapping)); 920 U64_HI(fp->rx_comp_mapping));
923 } 921 }
922
924} 923}
925static void bnx2x_free_tx_skbs(struct bnx2x *bp) 924static void bnx2x_free_tx_skbs(struct bnx2x *bp)
926{ 925{
@@ -1252,6 +1251,9 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
1252 if (rc) 1251 if (rc)
1253 return rc; 1252 return rc;
1254 1253
1254 /* must be called before memory allocation and HW init */
1255 bnx2x_ilt_set_info(bp);
1256
1255 if (bnx2x_alloc_mem(bp)) { 1257 if (bnx2x_alloc_mem(bp)) {
1256 bnx2x_free_irq(bp, true); 1258 bnx2x_free_irq(bp, true);
1257 return -ENOMEM; 1259 return -ENOMEM;
@@ -1339,6 +1341,11 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
1339 goto load_error2; 1341 goto load_error2;
1340 } 1342 }
1341 1343
1344 if (rc) {
1345 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
1346 goto load_error2;
1347 }
1348
1342 /* Setup NIC internals and enable interrupts */ 1349 /* Setup NIC internals and enable interrupts */
1343 bnx2x_nic_init(bp, load_code); 1350 bnx2x_nic_init(bp, load_code);
1344 1351
@@ -1360,7 +1367,18 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
1360 1367
1361 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT; 1368 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
1362 1369
1363 rc = bnx2x_setup_leading(bp); 1370 rc = bnx2x_func_start(bp);
1371 if (rc) {
1372 BNX2X_ERR("Function start failed!\n");
1373#ifndef BNX2X_STOP_ON_ERROR
1374 goto load_error3;
1375#else
1376 bp->panic = 1;
1377 return -EBUSY;
1378#endif
1379 }
1380
1381 rc = bnx2x_setup_client(bp, &bp->fp[0], 1 /* Leading */);
1364 if (rc) { 1382 if (rc) {
1365 BNX2X_ERR("Setup leading failed!\n"); 1383 BNX2X_ERR("Setup leading failed!\n");
1366#ifndef BNX2X_STOP_ON_ERROR 1384#ifndef BNX2X_STOP_ON_ERROR
@@ -1377,37 +1395,37 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
1377 bp->flags |= MF_FUNC_DIS; 1395 bp->flags |= MF_FUNC_DIS;
1378 } 1396 }
1379 1397
1380 if (bp->state == BNX2X_STATE_OPEN) {
1381#ifdef BCM_CNIC 1398#ifdef BCM_CNIC
1382 /* Enable Timer scan */ 1399 /* Enable Timer scan */
1383 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 1); 1400 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 1);
1384#endif 1401#endif
1385 for_each_nondefault_queue(bp, i) { 1402 for_each_nondefault_queue(bp, i) {
1386 rc = bnx2x_setup_multi(bp, i); 1403 rc = bnx2x_setup_client(bp, &bp->fp[i], 0);
1387 if (rc) 1404 if (rc)
1388#ifdef BCM_CNIC 1405#ifdef BCM_CNIC
1389 goto load_error4; 1406 goto load_error4;
1390#else 1407#else
1391 goto load_error3; 1408 goto load_error3;
1392#endif 1409#endif
1393 } 1410 }
1411
1412 /* Now when Clients are configured we are ready to work */
1413 bp->state = BNX2X_STATE_OPEN;
1414
1415 bnx2x_set_eth_mac(bp, 1);
1394 1416
1395 if (CHIP_IS_E1(bp))
1396 bnx2x_set_eth_mac_addr_e1(bp, 1);
1397 else
1398 bnx2x_set_eth_mac_addr_e1h(bp, 1);
1399#ifdef BCM_CNIC 1417#ifdef BCM_CNIC
1400 /* Set iSCSI L2 MAC */ 1418 /* Set iSCSI L2 MAC */
1401 mutex_lock(&bp->cnic_mutex); 1419 mutex_lock(&bp->cnic_mutex);
1402 if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD) { 1420 if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD) {
1403 bnx2x_set_iscsi_eth_mac_addr(bp, 1); 1421 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
1404 bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET; 1422 bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
1405 bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping, 1423 bnx2x_init_sb(bp, bp->cnic_sb_mapping,
1406 CNIC_SB_ID(bp)); 1424 BNX2X_VF_ID_INVALID, false,
1407 } 1425 CNIC_SB_ID(bp), CNIC_IGU_SB_ID(bp));
1408 mutex_unlock(&bp->cnic_mutex);
1409#endif
1410 } 1426 }
1427 mutex_unlock(&bp->cnic_mutex);
1428#endif
1411 1429
1412 if (bp->port.pmf) 1430 if (bp->port.pmf)
1413 bnx2x_initial_phy_init(bp, load_mode); 1431 bnx2x_initial_phy_init(bp, load_mode);
@@ -1415,18 +1433,15 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
1415 /* Start fast path */ 1433 /* Start fast path */
1416 switch (load_mode) { 1434 switch (load_mode) {
1417 case LOAD_NORMAL: 1435 case LOAD_NORMAL:
1418 if (bp->state == BNX2X_STATE_OPEN) { 1436 /* Tx queue should be only reenabled */
1419 /* Tx queue should be only reenabled */ 1437 netif_tx_wake_all_queues(bp->dev);
1420 netif_tx_wake_all_queues(bp->dev);
1421 }
1422 /* Initialize the receive filter. */ 1438 /* Initialize the receive filter. */
1423 bnx2x_set_rx_mode(bp->dev); 1439 bnx2x_set_rx_mode(bp->dev);
1424 break; 1440 break;
1425 1441
1426 case LOAD_OPEN: 1442 case LOAD_OPEN:
1427 netif_tx_start_all_queues(bp->dev); 1443 netif_tx_start_all_queues(bp->dev);
1428 if (bp->state != BNX2X_STATE_OPEN) 1444 smp_mb__after_clear_bit();
1429 netif_tx_disable(bp->dev);
1430 /* Initialize the receive filter. */ 1445 /* Initialize the receive filter. */
1431 bnx2x_set_rx_mode(bp->dev); 1446 bnx2x_set_rx_mode(bp->dev);
1432 break; 1447 break;
@@ -1512,21 +1527,22 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
1512 bp->rx_mode = BNX2X_RX_MODE_NONE; 1527 bp->rx_mode = BNX2X_RX_MODE_NONE;
1513 bnx2x_set_storm_rx_mode(bp); 1528 bnx2x_set_storm_rx_mode(bp);
1514 1529
1515 /* Disable HW interrupts, NAPI and Tx */
1516 bnx2x_netif_stop(bp, 1);
1517 netif_carrier_off(bp->dev);
1518
1519 del_timer_sync(&bp->timer); 1530 del_timer_sync(&bp->timer);
1520 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb, 1531 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
1521 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq)); 1532 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
1522 bnx2x_stats_handle(bp, STATS_EVENT_STOP); 1533 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
1523 1534
1524 /* Release IRQs */
1525 bnx2x_free_irq(bp, false);
1526 1535
1527 /* Cleanup the chip if needed */ 1536 /* Cleanup the chip if needed */
1528 if (unload_mode != UNLOAD_RECOVERY) 1537 if (unload_mode != UNLOAD_RECOVERY)
1529 bnx2x_chip_cleanup(bp, unload_mode); 1538 bnx2x_chip_cleanup(bp, unload_mode);
1539 else {
1540 /* Disable HW interrupts, NAPI and Tx */
1541 bnx2x_netif_stop(bp, 1);
1542
1543 /* Release IRQs */
1544 bnx2x_free_irq(bp, false);
1545 }
1530 1546
1531 bp->port.pmf = 0; 1547 bp->port.pmf = 0;
1532 1548
@@ -1634,27 +1650,28 @@ static int bnx2x_poll(struct napi_struct *napi, int budget)
1634 /* Fall out from the NAPI loop if needed */ 1650 /* Fall out from the NAPI loop if needed */
1635 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) { 1651 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
1636 bnx2x_update_fpsb_idx(fp); 1652 bnx2x_update_fpsb_idx(fp);
1637 /* bnx2x_has_rx_work() reads the status block, thus we need 1653 /* bnx2x_has_rx_work() reads the status block,
1638 * to ensure that status block indices have been actually read 1654 * thus we need to ensure that status block indices
1639 * (bnx2x_update_fpsb_idx) prior to this check 1655 * have been actually read (bnx2x_update_fpsb_idx)
1640 * (bnx2x_has_rx_work) so that we won't write the "newer" 1656 * prior to this check (bnx2x_has_rx_work) so that
1641 * value of the status block to IGU (if there was a DMA right 1657 * we won't write the "newer" value of the status block
1642 * after bnx2x_has_rx_work and if there is no rmb, the memory 1658 * to IGU (if there was a DMA right after
1643 * reading (bnx2x_update_fpsb_idx) may be postponed to right 1659 * bnx2x_has_rx_work and if there is no rmb, the memory
1644 * before bnx2x_ack_sb). In this case there will never be 1660 * reading (bnx2x_update_fpsb_idx) may be postponed
1645 * another interrupt until there is another update of the 1661 * to right before bnx2x_ack_sb). In this case there
1646 * status block, while there is still unhandled work. 1662 * will never be another interrupt until there is
1663 * another update of the status block, while there
1664 * is still unhandled work.
1647 */ 1665 */
1648 rmb(); 1666 rmb();
1649 1667
1650 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) { 1668 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
1651 napi_complete(napi); 1669 napi_complete(napi);
1652 /* Re-enable interrupts */ 1670 /* Re-enable interrupts */
1653 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 1671 DP(NETIF_MSG_HW,
1654 le16_to_cpu(fp->fp_c_idx), 1672 "Update index to %d\n", fp->fp_hc_idx);
1655 IGU_INT_NOP, 1); 1673 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
1656 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 1674 le16_to_cpu(fp->fp_hc_idx),
1657 le16_to_cpu(fp->fp_u_idx),
1658 IGU_INT_ENABLE, 1); 1675 IGU_INT_ENABLE, 1);
1659 break; 1676 break;
1660 } 1677 }
@@ -1850,7 +1867,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
1850 struct sw_tx_bd *tx_buf; 1867 struct sw_tx_bd *tx_buf;
1851 struct eth_tx_start_bd *tx_start_bd; 1868 struct eth_tx_start_bd *tx_start_bd;
1852 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL; 1869 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
1853 struct eth_tx_parse_bd *pbd = NULL; 1870 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
1854 u16 pkt_prod, bd_prod; 1871 u16 pkt_prod, bd_prod;
1855 int nbd, fp_index; 1872 int nbd, fp_index;
1856 dma_addr_t mapping; 1873 dma_addr_t mapping;
@@ -1926,10 +1943,13 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
1926 tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd; 1943 tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd;
1927 1944
1928 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD; 1945 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
1929 tx_start_bd->general_data = (mac_type << 1946 SET_FLAG(tx_start_bd->general_data,
1930 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT); 1947 ETH_TX_START_BD_ETH_ADDR_TYPE,
1948 mac_type);
1931 /* header nbd */ 1949 /* header nbd */
1932 tx_start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT); 1950 SET_FLAG(tx_start_bd->general_data,
1951 ETH_TX_START_BD_HDR_NBDS,
1952 1);
1933 1953
1934 /* remember the first BD of the packet */ 1954 /* remember the first BD of the packet */
1935 tx_buf->first_bd = fp->tx_bd_prod; 1955 tx_buf->first_bd = fp->tx_bd_prod;
@@ -1943,62 +1963,68 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
1943#ifdef BCM_VLAN 1963#ifdef BCM_VLAN
1944 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) && 1964 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
1945 (bp->flags & HW_VLAN_TX_FLAG)) { 1965 (bp->flags & HW_VLAN_TX_FLAG)) {
1946 tx_start_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb)); 1966 tx_start_bd->vlan_or_ethertype =
1947 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG; 1967 cpu_to_le16(vlan_tx_tag_get(skb));
1968 tx_start_bd->bd_flags.as_bitfield |=
1969 (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
1948 } else 1970 } else
1949#endif 1971#endif
1950 tx_start_bd->vlan = cpu_to_le16(pkt_prod); 1972 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
1951 1973
1952 /* turn on parsing and get a BD */ 1974 /* turn on parsing and get a BD */
1953 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod)); 1975 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
1954 pbd = &fp->tx_desc_ring[bd_prod].parse_bd;
1955 1976
1956 memset(pbd, 0, sizeof(struct eth_tx_parse_bd)); 1977 if (xmit_type & XMIT_CSUM) {
1978 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
1979
1980 if (xmit_type & XMIT_CSUM_V4)
1981 tx_start_bd->bd_flags.as_bitfield |=
1982 ETH_TX_BD_FLAGS_IP_CSUM;
1983 else
1984 tx_start_bd->bd_flags.as_bitfield |=
1985 ETH_TX_BD_FLAGS_IPV6;
1957 1986
1987 if (!(xmit_type & XMIT_CSUM_TCP))
1988 tx_start_bd->bd_flags.as_bitfield |=
1989 ETH_TX_BD_FLAGS_IS_UDP;
1990 }
1991 pbd_e1x = &fp->tx_desc_ring[bd_prod].parse_bd_e1x;
1992 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
1993 /* Set PBD in checksum offload case */
1958 if (xmit_type & XMIT_CSUM) { 1994 if (xmit_type & XMIT_CSUM) {
1959 hlen = (skb_network_header(skb) - skb->data) / 2; 1995 hlen = (skb_network_header(skb) - skb->data) / 2;
1960 1996
1961 /* for now NS flag is not used in Linux */ 1997 /* for now NS flag is not used in Linux */
1962 pbd->global_data = 1998 pbd_e1x->global_data =
1963 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) << 1999 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
1964 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT)); 2000 ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
1965 2001
1966 pbd->ip_hlen = (skb_transport_header(skb) - 2002 pbd_e1x->ip_hlen_w = (skb_transport_header(skb) -
1967 skb_network_header(skb)) / 2; 2003 skb_network_header(skb)) / 2;
1968 2004
1969 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2; 2005 hlen += pbd_e1x->ip_hlen_w + tcp_hdrlen(skb) / 2;
1970 2006
1971 pbd->total_hlen = cpu_to_le16(hlen); 2007 pbd_e1x->total_hlen_w = cpu_to_le16(hlen);
1972 hlen = hlen*2; 2008 hlen = hlen*2;
1973 2009
1974 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
1975
1976 if (xmit_type & XMIT_CSUM_V4)
1977 tx_start_bd->bd_flags.as_bitfield |=
1978 ETH_TX_BD_FLAGS_IP_CSUM;
1979 else
1980 tx_start_bd->bd_flags.as_bitfield |=
1981 ETH_TX_BD_FLAGS_IPV6;
1982
1983 if (xmit_type & XMIT_CSUM_TCP) { 2010 if (xmit_type & XMIT_CSUM_TCP) {
1984 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check); 2011 pbd_e1x->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
1985 2012
1986 } else { 2013 } else {
1987 s8 fix = SKB_CS_OFF(skb); /* signed! */ 2014 s8 fix = SKB_CS_OFF(skb); /* signed! */
1988 2015
1989 pbd->global_data |= ETH_TX_PARSE_BD_UDP_CS_FLG;
1990
1991 DP(NETIF_MSG_TX_QUEUED, 2016 DP(NETIF_MSG_TX_QUEUED,
1992 "hlen %d fix %d csum before fix %x\n", 2017 "hlen %d fix %d csum before fix %x\n",
1993 le16_to_cpu(pbd->total_hlen), fix, SKB_CS(skb)); 2018 le16_to_cpu(pbd_e1x->total_hlen_w),
2019 fix, SKB_CS(skb));
1994 2020
1995 /* HW bug: fixup the CSUM */ 2021 /* HW bug: fixup the CSUM */
1996 pbd->tcp_pseudo_csum = 2022 pbd_e1x->tcp_pseudo_csum =
1997 bnx2x_csum_fix(skb_transport_header(skb), 2023 bnx2x_csum_fix(skb_transport_header(skb),
1998 SKB_CS(skb), fix); 2024 SKB_CS(skb), fix);
1999 2025
2000 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n", 2026 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
2001 pbd->tcp_pseudo_csum); 2027 pbd_e1x->tcp_pseudo_csum);
2002 } 2028 }
2003 } 2029 }
2004 2030
@@ -2016,7 +2042,8 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
2016 " nbytes %d flags %x vlan %x\n", 2042 " nbytes %d flags %x vlan %x\n",
2017 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo, 2043 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
2018 le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes), 2044 le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
2019 tx_start_bd->bd_flags.as_bitfield, le16_to_cpu(tx_start_bd->vlan)); 2045 tx_start_bd->bd_flags.as_bitfield,
2046 le16_to_cpu(tx_start_bd->vlan_or_ethertype));
2020 2047
2021 if (xmit_type & XMIT_GSO) { 2048 if (xmit_type & XMIT_GSO) {
2022 2049
@@ -2031,24 +2058,25 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
2031 bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd, 2058 bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd,
2032 hlen, bd_prod, ++nbd); 2059 hlen, bd_prod, ++nbd);
2033 2060
2034 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size); 2061 pbd_e1x->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2035 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq); 2062 pbd_e1x->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
2036 pbd->tcp_flags = pbd_tcp_flags(skb); 2063 pbd_e1x->tcp_flags = pbd_tcp_flags(skb);
2037 2064
2038 if (xmit_type & XMIT_GSO_V4) { 2065 if (xmit_type & XMIT_GSO_V4) {
2039 pbd->ip_id = swab16(ip_hdr(skb)->id); 2066 pbd_e1x->ip_id = swab16(ip_hdr(skb)->id);
2040 pbd->tcp_pseudo_csum = 2067 pbd_e1x->tcp_pseudo_csum =
2041 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr, 2068 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
2042 ip_hdr(skb)->daddr, 2069 ip_hdr(skb)->daddr,
2043 0, IPPROTO_TCP, 0)); 2070 0, IPPROTO_TCP, 0));
2044 2071
2045 } else 2072 } else
2046 pbd->tcp_pseudo_csum = 2073 pbd_e1x->tcp_pseudo_csum =
2047 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, 2074 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2048 &ipv6_hdr(skb)->daddr, 2075 &ipv6_hdr(skb)->daddr,
2049 0, IPPROTO_TCP, 0)); 2076 0, IPPROTO_TCP, 0));
2050 2077
2051 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN; 2078 pbd_e1x->global_data |=
2079 ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN;
2052 } 2080 }
2053 tx_data_bd = (struct eth_tx_bd *)tx_start_bd; 2081 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
2054 2082
@@ -2088,13 +2116,14 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
2088 if (total_pkt_bd != NULL) 2116 if (total_pkt_bd != NULL)
2089 total_pkt_bd->total_pkt_bytes = pkt_size; 2117 total_pkt_bd->total_pkt_bytes = pkt_size;
2090 2118
2091 if (pbd) 2119 if (pbd_e1x)
2092 DP(NETIF_MSG_TX_QUEUED, 2120 DP(NETIF_MSG_TX_QUEUED,
2093 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u" 2121 "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
2094 " tcp_flags %x xsum %x seq %u hlen %u\n", 2122 " tcp_flags %x xsum %x seq %u hlen %u\n",
2095 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id, 2123 pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
2096 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum, 2124 pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
2097 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen)); 2125 pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
2126 le16_to_cpu(pbd_e1x->total_hlen_w));
2098 2127
2099 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod); 2128 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
2100 2129
@@ -2109,7 +2138,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
2109 2138
2110 fp->tx_db.data.prod += nbd; 2139 fp->tx_db.data.prod += nbd;
2111 barrier(); 2140 barrier();
2112 DOORBELL(bp, fp->index, fp->tx_db.raw); 2141 DOORBELL(bp, fp->cid, fp->tx_db.raw);
2113 2142
2114 mmiowb(); 2143 mmiowb();
2115 2144
@@ -2141,16 +2170,51 @@ int bnx2x_change_mac_addr(struct net_device *dev, void *p)
2141 return -EINVAL; 2170 return -EINVAL;
2142 2171
2143 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); 2172 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2144 if (netif_running(dev)) { 2173 if (netif_running(dev))
2145 if (CHIP_IS_E1(bp)) 2174 bnx2x_set_eth_mac(bp, 1);
2146 bnx2x_set_eth_mac_addr_e1(bp, 1);
2147 else
2148 bnx2x_set_eth_mac_addr_e1h(bp, 1);
2149 }
2150 2175
2151 return 0; 2176 return 0;
2152} 2177}
2153 2178
2179void bnx2x_free_mem_bp(struct bnx2x *bp)
2180{
2181 kfree(bp->fp);
2182 kfree(bp->msix_table);
2183 kfree(bp->ilt);
2184}
2185
2186int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp)
2187{
2188 struct bnx2x_fastpath *fp;
2189 struct msix_entry *tbl;
2190 struct bnx2x_ilt *ilt;
2191
2192 /* fp array */
2193 fp = kzalloc(L2_FP_COUNT(bp->l2_cid_count)*sizeof(*fp), GFP_KERNEL);
2194 if (!fp)
2195 goto alloc_err;
2196 bp->fp = fp;
2197
2198 /* msix table */
2199 tbl = kzalloc((bp->l2_cid_count + 1) * sizeof(*tbl),
2200 GFP_KERNEL);
2201 if (!tbl)
2202 goto alloc_err;
2203 bp->msix_table = tbl;
2204
2205 /* ilt */
2206 ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
2207 if (!ilt)
2208 goto alloc_err;
2209 bp->ilt = ilt;
2210
2211 return 0;
2212alloc_err:
2213 bnx2x_free_mem_bp(bp);
2214 return -ENOMEM;
2215
2216}
2217
2154/* called with rtnl_lock */ 2218/* called with rtnl_lock */
2155int bnx2x_change_mtu(struct net_device *dev, int new_mtu) 2219int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
2156{ 2220{
@@ -2200,18 +2264,6 @@ void bnx2x_vlan_rx_register(struct net_device *dev,
2200 struct bnx2x *bp = netdev_priv(dev); 2264 struct bnx2x *bp = netdev_priv(dev);
2201 2265
2202 bp->vlgrp = vlgrp; 2266 bp->vlgrp = vlgrp;
2203
2204 /* Set flags according to the required capabilities */
2205 bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
2206
2207 if (dev->features & NETIF_F_HW_VLAN_TX)
2208 bp->flags |= HW_VLAN_TX_FLAG;
2209
2210 if (dev->features & NETIF_F_HW_VLAN_RX)
2211 bp->flags |= HW_VLAN_RX_FLAG;
2212
2213 if (netif_running(dev))
2214 bnx2x_set_client_config(bp);
2215} 2267}
2216 2268
2217#endif 2269#endif
diff --git a/drivers/net/bnx2x/bnx2x_cmn.h b/drivers/net/bnx2x/bnx2x_cmn.h
index 1ad08e4e88f4..2fb9045833e1 100644
--- a/drivers/net/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/bnx2x/bnx2x_cmn.h
@@ -107,6 +107,13 @@ void bnx2x_setup_cnic_irq_info(struct bnx2x *bp);
107void bnx2x_int_enable(struct bnx2x *bp); 107void bnx2x_int_enable(struct bnx2x *bp);
108 108
109/** 109/**
110 * Disable HW interrupts.
111 *
112 * @param bp
113 */
114void bnx2x_int_disable(struct bnx2x *bp);
115
116/**
110 * Disable interrupts. This function ensures that there are no 117 * Disable interrupts. This function ensures that there are no
111 * ISRs or SP DPCs (sp_task) are running after it returns. 118 * ISRs or SP DPCs (sp_task) are running after it returns.
112 * 119 *
@@ -163,27 +170,30 @@ int bnx2x_alloc_mem(struct bnx2x *bp);
163void bnx2x_free_mem(struct bnx2x *bp); 170void bnx2x_free_mem(struct bnx2x *bp);
164 171
165/** 172/**
166 * Bring up a leading (the first) eth Client. 173 * Setup eth Client.
167 * 174 *
168 * @param bp 175 * @param bp
176 * @param fp
177 * @param is_leading
169 * 178 *
170 * @return int 179 * @return int
171 */ 180 */
172int bnx2x_setup_leading(struct bnx2x *bp); 181int bnx2x_setup_client(struct bnx2x *bp, struct bnx2x_fastpath *fp,
182 int is_leading);
173 183
174/** 184/**
175 * Setup non-leading eth Client. 185 * Bring down an eth client.
176 * 186 *
177 * @param bp 187 * @param bp
178 * @param fp 188 * @param p
179 * 189 *
180 * @return int 190 * @return int
181 */ 191 */
182int bnx2x_setup_multi(struct bnx2x *bp, int index); 192int bnx2x_stop_fw_client(struct bnx2x *bp,
193 struct bnx2x_client_ramrod_params *p);
183 194
184/** 195/**
185 * Set number of quueus according to mode and number of available 196 * Set number of quueus according to mode
186 * msi-x vectors
187 * 197 *
188 * @param bp 198 * @param bp
189 * 199 *
@@ -228,16 +238,7 @@ int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource);
228 * @param bp driver handle 238 * @param bp driver handle
229 * @param set 239 * @param set
230 */ 240 */
231void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set); 241void bnx2x_set_eth_mac(struct bnx2x *bp, int set);
232
233/**
234 * Configure eth MAC address in the HW according to the value in
235 * netdev->dev_addr for 57710
236 *
237 * @param bp driver handle
238 * @param set
239 */
240void bnx2x_set_eth_mac_addr_e1(struct bnx2x *bp, int set);
241 242
242#ifdef BCM_CNIC 243#ifdef BCM_CNIC
243/** 244/**
@@ -257,12 +258,15 @@ int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set);
257 * Initialize status block in FW and HW 258 * Initialize status block in FW and HW
258 * 259 *
259 * @param bp driver handle 260 * @param bp driver handle
260 * @param sb host_status_block
261 * @param dma_addr_t mapping 261 * @param dma_addr_t mapping
262 * @param int sb_id 262 * @param int sb_id
263 * @param int vfid
264 * @param u8 vf_valid
265 * @param int fw_sb_id
266 * @param int igu_sb_id
263 */ 267 */
264void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb, 268void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
265 dma_addr_t mapping, int sb_id); 269 u8 vf_valid, int fw_sb_id, int igu_sb_id);
266 270
267/** 271/**
268 * Reconfigure FW/HW according to dev->flags rx mode 272 * Reconfigure FW/HW according to dev->flags rx mode
@@ -295,14 +299,6 @@ void bnx2x_disable_close_the_gate(struct bnx2x *bp);
295void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event); 299void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
296 300
297/** 301/**
298 * Configures FW with client paramteres (like HW VLAN removal)
299 * for each active client.
300 *
301 * @param bp
302 */
303void bnx2x_set_client_config(struct bnx2x *bp);
304
305/**
306 * Handle sp events 302 * Handle sp events
307 * 303 *
308 * @param fp fastpath handle for the event 304 * @param fp fastpath handle for the event
@@ -310,14 +306,29 @@ void bnx2x_set_client_config(struct bnx2x *bp);
310 */ 306 */
311void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe); 307void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe);
312 308
309/**
310 * Init/halt function before/after sending
311 * CLIENT_SETUP/CFC_DEL for the first/last client.
312 *
313 * @param bp
314 *
315 * @return int
316 */
317int bnx2x_func_start(struct bnx2x *bp);
318int bnx2x_func_stop(struct bnx2x *bp);
319
320/**
321 * Prepare ILT configurations according to current driver
322 * parameters.
323 *
324 * @param bp
325 */
326void bnx2x_ilt_set_info(struct bnx2x *bp);
313 327
314static inline void bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp) 328static inline void bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
315{ 329{
316 struct host_status_block *fpsb = fp->status_blk;
317
318 barrier(); /* status block is written to by the chip */ 330 barrier(); /* status block is written to by the chip */
319 fp->fp_c_idx = fpsb->c_status_block.status_block_index; 331 fp->fp_hc_idx = fp->sb_running_index[SM_RX_ID];
320 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
321} 332}
322 333
323static inline void bnx2x_update_rx_prod(struct bnx2x *bp, 334static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
@@ -344,8 +355,8 @@ static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
344 wmb(); 355 wmb();
345 356
346 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++) 357 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
347 REG_WR(bp, BAR_USTRORM_INTMEM + 358 REG_WR(bp,
348 USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4, 359 BAR_USTRORM_INTMEM + fp->ustorm_rx_prods_offset + i*4,
349 ((u32 *)&rx_prods)[i]); 360 ((u32 *)&rx_prods)[i]);
350 361
351 mmiowb(); /* keep prod updates ordered */ 362 mmiowb(); /* keep prod updates ordered */
@@ -434,6 +445,17 @@ static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
434 return hw_cons != fp->tx_pkt_cons; 445 return hw_cons != fp->tx_pkt_cons;
435} 446}
436 447
448static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
449{
450 u16 rx_cons_sb;
451
452 /* Tell compiler that status block fields can change */
453 barrier();
454 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
455 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
456 rx_cons_sb++;
457 return (fp->rx_comp_cons != rx_cons_sb);
458}
437static inline void bnx2x_free_rx_sge(struct bnx2x *bp, 459static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
438 struct bnx2x_fastpath *fp, u16 index) 460 struct bnx2x_fastpath *fp, u16 index)
439{ 461{
@@ -454,13 +476,35 @@ static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
454 sge->addr_lo = 0; 476 sge->addr_lo = 0;
455} 477}
456 478
457static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp, 479
458 struct bnx2x_fastpath *fp, int last) 480
481
482
483static inline void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
459{ 484{
460 int i; 485 int i, j;
461 486
462 for (i = 0; i < last; i++) 487 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
463 bnx2x_free_rx_sge(bp, fp, i); 488 int idx = RX_SGE_CNT * i - 1;
489
490 for (j = 0; j < 2; j++) {
491 SGE_MASK_CLEAR_BIT(fp, idx);
492 idx--;
493 }
494 }
495}
496
497static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
498{
499 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
500 memset(fp->sge_mask, 0xff,
501 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
502
503 /* Clear the two last indices in the page to 1:
504 these are the indices that correspond to the "next" element,
505 hence will never be indicated and should be removed from
506 the calculations. */
507 bnx2x_clear_sge_mask_next_elems(fp);
464} 508}
465 509
466static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp, 510static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
@@ -540,33 +584,15 @@ static inline void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
540 dma_unmap_addr(cons_rx_buf, mapping)); 584 dma_unmap_addr(cons_rx_buf, mapping));
541 *prod_bd = *cons_bd; 585 *prod_bd = *cons_bd;
542} 586}
543 587static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
544static inline void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp) 588 struct bnx2x_fastpath *fp, int last)
545{ 589{
546 int i, j; 590 int i;
547
548 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
549 int idx = RX_SGE_CNT * i - 1;
550 591
551 for (j = 0; j < 2; j++) { 592 for (i = 0; i < last; i++)
552 SGE_MASK_CLEAR_BIT(fp, idx); 593 bnx2x_free_rx_sge(bp, fp, i);
553 idx--;
554 }
555 }
556} 594}
557 595
558static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
559{
560 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
561 memset(fp->sge_mask, 0xff,
562 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
563
564 /* Clear the two last indices in the page to 1:
565 these are the indices that correspond to the "next" element,
566 hence will never be indicated and should be removed from
567 the calculations. */
568 bnx2x_clear_sge_mask_next_elems(fp);
569}
570static inline void bnx2x_free_tpa_pool(struct bnx2x *bp, 596static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
571 struct bnx2x_fastpath *fp, int last) 597 struct bnx2x_fastpath *fp, int last)
572{ 598{
@@ -592,7 +618,7 @@ static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
592} 618}
593 619
594 620
595static inline void bnx2x_init_tx_ring(struct bnx2x *bp) 621static inline void bnx2x_init_tx_rings(struct bnx2x *bp)
596{ 622{
597 int i, j; 623 int i, j;
598 624
@@ -611,7 +637,7 @@ static inline void bnx2x_init_tx_ring(struct bnx2x *bp)
611 BCM_PAGE_SIZE*(i % NUM_TX_RINGS))); 637 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
612 } 638 }
613 639
614 fp->tx_db.data.header.header = DOORBELL_HDR_DB_TYPE; 640 SET_FLAG(fp->tx_db.data.header.header, DOORBELL_HDR_DB_TYPE, 1);
615 fp->tx_db.data.zero_fill1 = 0; 641 fp->tx_db.data.zero_fill1 = 0;
616 fp->tx_db.data.prod = 0; 642 fp->tx_db.data.prod = 0;
617 643
@@ -619,22 +645,94 @@ static inline void bnx2x_init_tx_ring(struct bnx2x *bp)
619 fp->tx_pkt_cons = 0; 645 fp->tx_pkt_cons = 0;
620 fp->tx_bd_prod = 0; 646 fp->tx_bd_prod = 0;
621 fp->tx_bd_cons = 0; 647 fp->tx_bd_cons = 0;
622 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
623 fp->tx_pkt = 0; 648 fp->tx_pkt = 0;
624 } 649 }
625} 650}
626static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp) 651static inline void bnx2x_set_next_page_rx_bd(struct bnx2x_fastpath *fp)
627{ 652{
628 u16 rx_cons_sb; 653 int i;
629 654
630 /* Tell compiler that status block fields can change */ 655 for (i = 1; i <= NUM_RX_RINGS; i++) {
631 barrier(); 656 struct eth_rx_bd *rx_bd;
632 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb); 657
633 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT) 658 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
634 rx_cons_sb++; 659 rx_bd->addr_hi =
635 return fp->rx_comp_cons != rx_cons_sb; 660 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
661 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
662 rx_bd->addr_lo =
663 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
664 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
665 }
636} 666}
637 667
668static inline void bnx2x_set_next_page_sgl(struct bnx2x_fastpath *fp)
669{
670 int i;
671
672 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
673 struct eth_rx_sge *sge;
674
675 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
676 sge->addr_hi =
677 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
678 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
679
680 sge->addr_lo =
681 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
682 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
683 }
684}
685
686static inline void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath *fp)
687{
688 int i;
689 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
690 struct eth_rx_cqe_next_page *nextpg;
691
692 nextpg = (struct eth_rx_cqe_next_page *)
693 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
694 nextpg->addr_hi =
695 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
696 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
697 nextpg->addr_lo =
698 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
699 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
700 }
701}
702
703
704
705static inline void __storm_memset_struct(struct bnx2x *bp,
706 u32 addr, size_t size, u32 *data)
707{
708 int i;
709 for (i = 0; i < size/4; i++)
710 REG_WR(bp, addr + (i * 4), data[i]);
711}
712
713static inline void storm_memset_mac_filters(struct bnx2x *bp,
714 struct tstorm_eth_mac_filter_config *mac_filters,
715 u16 abs_fid)
716{
717 size_t size = sizeof(struct tstorm_eth_mac_filter_config);
718
719 u32 addr = BAR_TSTRORM_INTMEM +
720 TSTORM_MAC_FILTER_CONFIG_OFFSET(abs_fid);
721
722 __storm_memset_struct(bp, addr, size, (u32 *)mac_filters);
723}
724
725static inline void storm_memset_cmng(struct bnx2x *bp,
726 struct cmng_struct_per_port *cmng,
727 u8 port)
728{
729 size_t size = sizeof(struct cmng_struct_per_port);
730
731 u32 addr = BAR_XSTRORM_INTMEM +
732 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port);
733
734 __storm_memset_struct(bp, addr, size, (u32 *)cmng);
735}
638/* HW Lock for shared dual port PHYs */ 736/* HW Lock for shared dual port PHYs */
639void bnx2x_acquire_phy_lock(struct bnx2x *bp); 737void bnx2x_acquire_phy_lock(struct bnx2x *bp);
640void bnx2x_release_phy_lock(struct bnx2x *bp); 738void bnx2x_release_phy_lock(struct bnx2x *bp);
@@ -659,4 +757,16 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode);
659int bnx2x_nic_load(struct bnx2x *bp, int load_mode); 757int bnx2x_nic_load(struct bnx2x *bp, int load_mode);
660int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state); 758int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state);
661 759
760/**
761 * Allocate/release memories outsize main driver structure
762 *
763 * @param bp
764 *
765 * @return int
766 */
767int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp);
768void bnx2x_free_mem_bp(struct bnx2x *bp);
769
770#define BNX2X_FW_IP_HDR_ALIGN_PAD 2 /* FW places hdr with this padding */
771
662#endif /* BNX2X_CMN_H */ 772#endif /* BNX2X_CMN_H */
diff --git a/drivers/net/bnx2x/bnx2x_ethtool.c b/drivers/net/bnx2x/bnx2x_ethtool.c
index d9748e97fad3..56a0cb579c21 100644
--- a/drivers/net/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/bnx2x/bnx2x_ethtool.c
@@ -1343,7 +1343,7 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
1343 u16 pkt_prod, bd_prod; 1343 u16 pkt_prod, bd_prod;
1344 struct sw_tx_bd *tx_buf; 1344 struct sw_tx_bd *tx_buf;
1345 struct eth_tx_start_bd *tx_start_bd; 1345 struct eth_tx_start_bd *tx_start_bd;
1346 struct eth_tx_parse_bd *pbd = NULL; 1346 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
1347 dma_addr_t mapping; 1347 dma_addr_t mapping;
1348 union eth_rx_cqe *cqe; 1348 union eth_rx_cqe *cqe;
1349 u8 cqe_fp_flags; 1349 u8 cqe_fp_flags;
@@ -1399,16 +1399,20 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
1399 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); 1399 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1400 tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */ 1400 tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */
1401 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb)); 1401 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
1402 tx_start_bd->vlan = cpu_to_le16(pkt_prod); 1402 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
1403 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD; 1403 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
1404 tx_start_bd->general_data = ((UNICAST_ADDRESS << 1404 SET_FLAG(tx_start_bd->general_data,
1405 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT) | 1); 1405 ETH_TX_START_BD_ETH_ADDR_TYPE,
1406 UNICAST_ADDRESS);
1407 SET_FLAG(tx_start_bd->general_data,
1408 ETH_TX_START_BD_HDR_NBDS,
1409 1);
1406 1410
1407 /* turn on parsing and get a BD */ 1411 /* turn on parsing and get a BD */
1408 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod)); 1412 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
1409 pbd = &fp_tx->tx_desc_ring[bd_prod].parse_bd; 1413 pbd_e1x = &fp_tx->tx_desc_ring[bd_prod].parse_bd_e1x;
1410 1414
1411 memset(pbd, 0, sizeof(struct eth_tx_parse_bd)); 1415 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
1412 1416
1413 wmb(); 1417 wmb();
1414 1418
@@ -1578,9 +1582,9 @@ static int bnx2x_test_intr(struct bnx2x *bp)
1578 1582
1579 bp->set_mac_pending++; 1583 bp->set_mac_pending++;
1580 smp_wmb(); 1584 smp_wmb();
1581 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0, 1585 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
1582 U64_HI(bnx2x_sp_mapping(bp, mac_config)), 1586 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
1583 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0); 1587 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 1);
1584 if (rc == 0) { 1588 if (rc == 0) {
1585 for (i = 0; i < 10; i++) { 1589 for (i = 0; i < 10; i++) {
1586 if (!bp->set_mac_pending) 1590 if (!bp->set_mac_pending)
diff --git a/drivers/net/bnx2x/bnx2x_fw_defs.h b/drivers/net/bnx2x/bnx2x_fw_defs.h
index 08d71bf438d6..f4e5b1ce8149 100644
--- a/drivers/net/bnx2x/bnx2x_fw_defs.h
+++ b/drivers/net/bnx2x/bnx2x_fw_defs.h
@@ -7,369 +7,272 @@
7 * the Free Software Foundation. 7 * the Free Software Foundation.
8 */ 8 */
9 9
10 10#ifndef BNX2X_FW_DEFS_H
11#define CSTORM_ASSERT_LIST_INDEX_OFFSET \ 11#define BNX2X_FW_DEFS_H
12 (IS_E1H_OFFSET ? 0x7000 : 0x1000) 12
13#define CSTORM_ASSERT_LIST_OFFSET(idx) \ 13#define CSTORM_ASSERT_LIST_INDEX_OFFSET (IRO[142].base)
14 (IS_E1H_OFFSET ? (0x7020 + (idx * 0x10)) : (0x1020 + (idx * 0x10))) 14#define CSTORM_ASSERT_LIST_OFFSET(assertListEntry) \
15#define CSTORM_DEF_SB_HC_DISABLE_C_OFFSET(function, index) \ 15 (IRO[141].base + ((assertListEntry) * IRO[141].m1))
16 (IS_E1H_OFFSET ? (0x8622 + ((function>>1) * 0x40) + \ 16#define CSTORM_ETH_STATS_QUERY_ADDR_OFFSET(pfId) \
17 ((function&1) * 0x100) + (index * 0x4)) : (0x3562 + (function * \ 17 (IRO[144].base + ((pfId) * IRO[144].m1))
18 0x40) + (index * 0x4))) 18#define CSTORM_EVENT_RING_DATA_OFFSET(pfId) \
19#define CSTORM_DEF_SB_HC_DISABLE_U_OFFSET(function, index) \ 19 (IRO[149].base + (((pfId)>>1) * IRO[149].m1) + (((pfId)&1) * \
20 (IS_E1H_OFFSET ? (0x8822 + ((function>>1) * 0x80) + \ 20 IRO[149].m2))
21 ((function&1) * 0x200) + (index * 0x4)) : (0x35e2 + (function * \ 21#define CSTORM_EVENT_RING_PROD_OFFSET(pfId) \
22 0x80) + (index * 0x4))) 22 (IRO[150].base + (((pfId)>>1) * IRO[150].m1) + (((pfId)&1) * \
23#define CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(function) \ 23 IRO[150].m2))
24 (IS_E1H_OFFSET ? (0x8600 + ((function>>1) * 0x40) + \ 24#define CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(funcId) \
25 ((function&1) * 0x100)) : (0x3540 + (function * 0x40))) 25 (IRO[156].base + ((funcId) * IRO[156].m1))
26#define CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(function) \ 26#define CSTORM_FUNC_EN_OFFSET(funcId) \
27 (IS_E1H_OFFSET ? (0x8800 + ((function>>1) * 0x80) + \ 27 (IRO[146].base + ((funcId) * IRO[146].m1))
28 ((function&1) * 0x200)) : (0x35c0 + (function * 0x80))) 28#define CSTORM_FUNCTION_MODE_OFFSET (IRO[153].base)
29#define CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(function) \ 29#define CSTORM_IGU_MODE_OFFSET (IRO[154].base)
30 (IS_E1H_OFFSET ? (0x8608 + ((function>>1) * 0x40) + \ 30#define CSTORM_ISCSI_CQ_SIZE_OFFSET(pfId) \
31 ((function&1) * 0x100)) : (0x3548 + (function * 0x40))) 31 (IRO[311].base + ((pfId) * IRO[311].m1))
32#define CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(function) \ 32#define CSTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfId) \
33 (IS_E1H_OFFSET ? (0x8808 + ((function>>1) * 0x80) + \ 33 (IRO[312].base + ((pfId) * IRO[312].m1))
34 ((function&1) * 0x200)) : (0x35c8 + (function * 0x80))) 34 #define CSTORM_ISCSI_EQ_CONS_OFFSET(pfId, iscsiEqId) \
35#define CSTORM_FUNCTION_MODE_OFFSET \ 35 (IRO[304].base + ((pfId) * IRO[304].m1) + ((iscsiEqId) * \
36 (IS_E1H_OFFSET ? 0x11e8 : 0xffffffff) 36 IRO[304].m2))
37#define CSTORM_HC_BTR_C_OFFSET(port) \ 37 #define CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfId, iscsiEqId) \
38 (IS_E1H_OFFSET ? (0x8c04 + (port * 0xf0)) : (0x36c4 + (port * 0xc0))) 38 (IRO[306].base + ((pfId) * IRO[306].m1) + ((iscsiEqId) * \
39#define CSTORM_HC_BTR_U_OFFSET(port) \ 39 IRO[306].m2))
40 (IS_E1H_OFFSET ? (0x8de4 + (port * 0xf0)) : (0x3844 + (port * 0xc0))) 40 #define CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfId, iscsiEqId) \
41#define CSTORM_ISCSI_CQ_SIZE_OFFSET(function) \ 41 (IRO[305].base + ((pfId) * IRO[305].m1) + ((iscsiEqId) * \
42 (IS_E1H_OFFSET ? (0x6680 + (function * 0x8)) : (0x25a0 + \ 42 IRO[305].m2))
43 (function * 0x8))) 43 #define \
44#define CSTORM_ISCSI_CQ_SQN_SIZE_OFFSET(function) \ 44 CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_VALID_OFFSET(pfId, iscsiEqId) \
45 (IS_E1H_OFFSET ? (0x66c0 + (function * 0x8)) : (0x25b0 + \ 45 (IRO[307].base + ((pfId) * IRO[307].m1) + ((iscsiEqId) * \
46 (function * 0x8))) 46 IRO[307].m2))
47#define CSTORM_ISCSI_EQ_CONS_OFFSET(function, eqIdx) \ 47 #define CSTORM_ISCSI_EQ_PROD_OFFSET(pfId, iscsiEqId) \
48 (IS_E1H_OFFSET ? (0x6040 + (function * 0xc0) + (eqIdx * 0x18)) : \ 48 (IRO[303].base + ((pfId) * IRO[303].m1) + ((iscsiEqId) * \
49 (0x2410 + (function * 0xc0) + (eqIdx * 0x18))) 49 IRO[303].m2))
50#define CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(function, eqIdx) \ 50 #define CSTORM_ISCSI_EQ_SB_INDEX_OFFSET(pfId, iscsiEqId) \
51 (IS_E1H_OFFSET ? (0x6044 + (function * 0xc0) + (eqIdx * 0x18)) : \ 51 (IRO[309].base + ((pfId) * IRO[309].m1) + ((iscsiEqId) * \
52 (0x2414 + (function * 0xc0) + (eqIdx * 0x18))) 52 IRO[309].m2))
53#define CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(function, eqIdx) \ 53 #define CSTORM_ISCSI_EQ_SB_NUM_OFFSET(pfId, iscsiEqId) \
54 (IS_E1H_OFFSET ? (0x604c + (function * 0xc0) + (eqIdx * 0x18)) : \ 54 (IRO[308].base + ((pfId) * IRO[308].m1) + ((iscsiEqId) * \
55 (0x241c + (function * 0xc0) + (eqIdx * 0x18))) 55 IRO[308].m2))
56#define CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_VALID_OFFSET(function, eqIdx) \ 56#define CSTORM_ISCSI_HQ_SIZE_OFFSET(pfId) \
57 (IS_E1H_OFFSET ? (0x6057 + (function * 0xc0) + (eqIdx * 0x18)) : \ 57 (IRO[310].base + ((pfId) * IRO[310].m1))
58 (0x2427 + (function * 0xc0) + (eqIdx * 0x18))) 58#define CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
59#define CSTORM_ISCSI_EQ_PROD_OFFSET(function, eqIdx) \ 59 (IRO[302].base + ((pfId) * IRO[302].m1))
60 (IS_E1H_OFFSET ? (0x6042 + (function * 0xc0) + (eqIdx * 0x18)) : \ 60#define CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
61 (0x2412 + (function * 0xc0) + (eqIdx * 0x18))) 61 (IRO[301].base + ((pfId) * IRO[301].m1))
62#define CSTORM_ISCSI_EQ_SB_INDEX_OFFSET(function, eqIdx) \ 62#define CSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
63 (IS_E1H_OFFSET ? (0x6056 + (function * 0xc0) + (eqIdx * 0x18)) : \ 63 (IRO[300].base + ((pfId) * IRO[300].m1))
64 (0x2426 + (function * 0xc0) + (eqIdx * 0x18))) 64#define CSTORM_PATH_ID_OFFSET (IRO[159].base)
65#define CSTORM_ISCSI_EQ_SB_NUM_OFFSET(function, eqIdx) \ 65#define CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(pfId) \
66 (IS_E1H_OFFSET ? (0x6054 + (function * 0xc0) + (eqIdx * 0x18)) : \ 66 (IRO[137].base + ((pfId) * IRO[137].m1))
67 (0x2424 + (function * 0xc0) + (eqIdx * 0x18))) 67#define CSTORM_SP_STATUS_BLOCK_OFFSET(pfId) \
68#define CSTORM_ISCSI_HQ_SIZE_OFFSET(function) \ 68 (IRO[136].base + ((pfId) * IRO[136].m1))
69 (IS_E1H_OFFSET ? (0x6640 + (function * 0x8)) : (0x2590 + \ 69#define CSTORM_SP_STATUS_BLOCK_SIZE (IRO[136].size)
70 (function * 0x8))) 70#define CSTORM_SP_SYNC_BLOCK_OFFSET(pfId) \
71#define CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(function) \ 71 (IRO[138].base + ((pfId) * IRO[138].m1))
72 (IS_E1H_OFFSET ? (0x6004 + (function * 0x8)) : (0x2404 + \ 72#define CSTORM_SP_SYNC_BLOCK_SIZE (IRO[138].size)
73 (function * 0x8))) 73#define CSTORM_STATS_FLAGS_OFFSET(pfId) \
74#define CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(function) \ 74 (IRO[143].base + ((pfId) * IRO[143].m1))
75 (IS_E1H_OFFSET ? (0x6002 + (function * 0x8)) : (0x2402 + \ 75#define CSTORM_STATUS_BLOCK_DATA_OFFSET(sbId) \
76 (function * 0x8))) 76 (IRO[129].base + ((sbId) * IRO[129].m1))
77#define CSTORM_ISCSI_PAGE_SIZE_OFFSET(function) \ 77#define CSTORM_STATUS_BLOCK_OFFSET(sbId) \
78 (IS_E1H_OFFSET ? (0x6000 + (function * 0x8)) : (0x2400 + \ 78 (IRO[128].base + ((sbId) * IRO[128].m1))
79 (function * 0x8))) 79#define CSTORM_STATUS_BLOCK_SIZE (IRO[128].size)
80#define CSTORM_SB_HC_DISABLE_C_OFFSET(port, cpu_id, index) \ 80#define CSTORM_SYNC_BLOCK_OFFSET(sbId) \
81 (IS_E1H_OFFSET ? (0x811a + (port * 0x280) + (cpu_id * 0x28) + \ 81 (IRO[132].base + ((sbId) * IRO[132].m1))
82 (index * 0x4)) : (0x305a + (port * 0x280) + (cpu_id * 0x28) + \ 82#define CSTORM_SYNC_BLOCK_SIZE (IRO[132].size)
83 (index * 0x4))) 83#define CSTORM_VF_PF_CHANNEL_STATE_OFFSET(vfId) \
84#define CSTORM_SB_HC_DISABLE_U_OFFSET(port, cpu_id, index) \ 84 (IRO[151].base + ((vfId) * IRO[151].m1))
85 (IS_E1H_OFFSET ? (0xb01a + (port * 0x800) + (cpu_id * 0x80) + \ 85#define CSTORM_VF_PF_CHANNEL_VALID_OFFSET(vfId) \
86 (index * 0x4)) : (0x401a + (port * 0x800) + (cpu_id * 0x80) + \ 86 (IRO[152].base + ((vfId) * IRO[152].m1))
87 (index * 0x4))) 87#define CSTORM_VF_TO_PF_OFFSET(funcId) \
88#define CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, cpu_id, index) \ 88 (IRO[147].base + ((funcId) * IRO[147].m1))
89 (IS_E1H_OFFSET ? (0x8118 + (port * 0x280) + (cpu_id * 0x28) + \ 89#define TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET (IRO[199].base)
90 (index * 0x4)) : (0x3058 + (port * 0x280) + (cpu_id * 0x28) + \ 90#define TSTORM_APPROXIMATE_MATCH_MULTICAST_FILTERING_OFFSET(pfId) \
91 (index * 0x4))) 91 (IRO[198].base + ((pfId) * IRO[198].m1))
92#define CSTORM_SB_HC_TIMEOUT_U_OFFSET(port, cpu_id, index) \ 92#define TSTORM_ASSERT_LIST_INDEX_OFFSET (IRO[99].base)
93 (IS_E1H_OFFSET ? (0xb018 + (port * 0x800) + (cpu_id * 0x80) + \ 93#define TSTORM_ASSERT_LIST_OFFSET(assertListEntry) \
94 (index * 0x4)) : (0x4018 + (port * 0x800) + (cpu_id * 0x80) + \ 94 (IRO[98].base + ((assertListEntry) * IRO[98].m1))
95 (index * 0x4))) 95 #define TSTORM_CLIENT_CONFIG_OFFSET(portId, clientId) \
96#define CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, cpu_id) \ 96 (IRO[197].base + ((portId) * IRO[197].m1) + ((clientId) * \
97 (IS_E1H_OFFSET ? (0x8100 + (port * 0x280) + (cpu_id * 0x28)) : \ 97 IRO[197].m2))
98 (0x3040 + (port * 0x280) + (cpu_id * 0x28))) 98#define TSTORM_COMMON_SAFC_WORKAROUND_ENABLE_OFFSET (IRO[104].base)
99#define CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, cpu_id) \
100 (IS_E1H_OFFSET ? (0xb000 + (port * 0x800) + (cpu_id * 0x80)) : \
101 (0x4000 + (port * 0x800) + (cpu_id * 0x80)))
102#define CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, cpu_id) \
103 (IS_E1H_OFFSET ? (0x8108 + (port * 0x280) + (cpu_id * 0x28)) : \
104 (0x3048 + (port * 0x280) + (cpu_id * 0x28)))
105#define CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, cpu_id) \
106 (IS_E1H_OFFSET ? (0xb008 + (port * 0x800) + (cpu_id * 0x80)) : \
107 (0x4008 + (port * 0x800) + (cpu_id * 0x80)))
108#define CSTORM_SB_STATUS_BLOCK_C_SIZE 0x10
109#define CSTORM_SB_STATUS_BLOCK_U_SIZE 0x60
110#define CSTORM_STATS_FLAGS_OFFSET(function) \
111 (IS_E1H_OFFSET ? (0x1108 + (function * 0x8)) : (0x5108 + \
112 (function * 0x8)))
113#define TSTORM_APPROXIMATE_MATCH_MULTICAST_FILTERING_OFFSET(function) \
114 (IS_E1H_OFFSET ? (0x3200 + (function * 0x20)) : 0xffffffff)
115#define TSTORM_ASSERT_LIST_INDEX_OFFSET \
116 (IS_E1H_OFFSET ? 0xa000 : 0x1000)
117#define TSTORM_ASSERT_LIST_OFFSET(idx) \
118 (IS_E1H_OFFSET ? (0xa020 + (idx * 0x10)) : (0x1020 + (idx * 0x10)))
119#define TSTORM_CLIENT_CONFIG_OFFSET(port, client_id) \
120 (IS_E1H_OFFSET ? (0x33a0 + (port * 0x1a0) + (client_id * 0x10)) \
121 : (0x9c0 + (port * 0x120) + (client_id * 0x10)))
122#define TSTORM_COMMON_SAFC_WORKAROUND_ENABLE_OFFSET \
123 (IS_E1H_OFFSET ? 0x1ed8 : 0xffffffff)
124#define TSTORM_COMMON_SAFC_WORKAROUND_TIMEOUT_10USEC_OFFSET \ 99#define TSTORM_COMMON_SAFC_WORKAROUND_TIMEOUT_10USEC_OFFSET \
125 (IS_E1H_OFFSET ? 0x1eda : 0xffffffff) 100 (IRO[105].base)
126#define TSTORM_DEF_SB_HC_DISABLE_OFFSET(function, index) \ 101#define TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(pfId) \
127 (IS_E1H_OFFSET ? (0xb01a + ((function>>1) * 0x28) + \ 102 (IRO[96].base + ((pfId) * IRO[96].m1))
128 ((function&1) * 0xa0) + (index * 0x4)) : (0x141a + (function * \ 103#define TSTORM_FUNC_EN_OFFSET(funcId) \
129 0x28) + (index * 0x4))) 104 (IRO[101].base + ((funcId) * IRO[101].m1))
130#define TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(function) \ 105#define TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(pfId) \
131 (IS_E1H_OFFSET ? (0xb000 + ((function>>1) * 0x28) + \ 106 (IRO[195].base + ((pfId) * IRO[195].m1))
132 ((function&1) * 0xa0)) : (0x1400 + (function * 0x28))) 107#define TSTORM_FUNCTION_MODE_OFFSET (IRO[103].base)
133#define TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(function) \ 108#define TSTORM_INDIRECTION_TABLE_OFFSET(pfId) \
134 (IS_E1H_OFFSET ? (0xb008 + ((function>>1) * 0x28) + \ 109 (IRO[91].base + ((pfId) * IRO[91].m1))
135 ((function&1) * 0xa0)) : (0x1408 + (function * 0x28))) 110#define TSTORM_INDIRECTION_TABLE_SIZE (IRO[91].size)
136#define TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(function) \ 111 #define \
137 (IS_E1H_OFFSET ? (0x2940 + (function * 0x8)) : (0x4928 + \ 112 TSTORM_ISCSI_CONN_BUF_PBL_OFFSET(pfId, iscsiConBufPblEntry) \
138 (function * 0x8))) 113 (IRO[260].base + ((pfId) * IRO[260].m1) + ((iscsiConBufPblEntry) \
139#define TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(function) \ 114 * IRO[260].m2))
140 (IS_E1H_OFFSET ? (0x3000 + (function * 0x40)) : (0x1500 + \ 115#define TSTORM_ISCSI_ERROR_BITMAP_OFFSET(pfId) \
141 (function * 0x40))) 116 (IRO[264].base + ((pfId) * IRO[264].m1))
142#define TSTORM_FUNCTION_MODE_OFFSET \ 117#define TSTORM_ISCSI_L2_ISCSI_OOO_CID_TABLE_OFFSET(pfId) \
143 (IS_E1H_OFFSET ? 0x1ed0 : 0xffffffff) 118 (IRO[265].base + ((pfId) * IRO[265].m1))
144#define TSTORM_HC_BTR_OFFSET(port) \ 119#define TSTORM_ISCSI_L2_ISCSI_OOO_CLIENT_ID_TABLE_OFFSET(pfId) \
145 (IS_E1H_OFFSET ? (0xb144 + (port * 0x30)) : (0x1454 + (port * 0x18))) 120 (IRO[266].base + ((pfId) * IRO[266].m1))
146#define TSTORM_INDIRECTION_TABLE_OFFSET(function) \ 121#define TSTORM_ISCSI_L2_ISCSI_OOO_PROD_OFFSET(pfId) \
147 (IS_E1H_OFFSET ? (0x12c8 + (function * 0x80)) : (0x22c8 + \ 122 (IRO[267].base + ((pfId) * IRO[267].m1))
148 (function * 0x80))) 123#define TSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
149#define TSTORM_INDIRECTION_TABLE_SIZE 0x80 124 (IRO[263].base + ((pfId) * IRO[263].m1))
150#define TSTORM_ISCSI_CONN_BUF_PBL_OFFSET(function, pblEntry) \ 125#define TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
151 (IS_E1H_OFFSET ? (0x60c0 + (function * 0x40) + (pblEntry * 0x8)) \ 126 (IRO[262].base + ((pfId) * IRO[262].m1))
152 : (0x4c30 + (function * 0x40) + (pblEntry * 0x8))) 127#define TSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
153#define TSTORM_ISCSI_ERROR_BITMAP_OFFSET(function) \ 128 (IRO[261].base + ((pfId) * IRO[261].m1))
154 (IS_E1H_OFFSET ? (0x6340 + (function * 0x8)) : (0x4cd0 + \ 129#define TSTORM_ISCSI_RQ_SIZE_OFFSET(pfId) \
155 (function * 0x8))) 130 (IRO[259].base + ((pfId) * IRO[259].m1))
156#define TSTORM_ISCSI_NUM_OF_TASKS_OFFSET(function) \ 131#define TSTORM_ISCSI_TCP_LOCAL_ADV_WND_OFFSET(pfId) \
157 (IS_E1H_OFFSET ? (0x6004 + (function * 0x8)) : (0x4c04 + \ 132 (IRO[269].base + ((pfId) * IRO[269].m1))
158 (function * 0x8))) 133#define TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(pfId) \
159#define TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(function) \ 134 (IRO[256].base + ((pfId) * IRO[256].m1))
160 (IS_E1H_OFFSET ? (0x6002 + (function * 0x8)) : (0x4c02 + \ 135#define TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfId) \
161 (function * 0x8))) 136 (IRO[257].base + ((pfId) * IRO[257].m1))
162#define TSTORM_ISCSI_PAGE_SIZE_OFFSET(function) \ 137#define TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfId) \
163 (IS_E1H_OFFSET ? (0x6000 + (function * 0x8)) : (0x4c00 + \ 138 (IRO[258].base + ((pfId) * IRO[258].m1))
164 (function * 0x8))) 139#define TSTORM_MAC_FILTER_CONFIG_OFFSET(pfId) \
165#define TSTORM_ISCSI_RQ_SIZE_OFFSET(function) \ 140 (IRO[196].base + ((pfId) * IRO[196].m1))
166 (IS_E1H_OFFSET ? (0x6080 + (function * 0x8)) : (0x4c20 + \ 141 #define TSTORM_PER_COUNTER_ID_STATS_OFFSET(portId, tStatCntId) \
167 (function * 0x8))) 142 (IRO[100].base + ((portId) * IRO[100].m1) + ((tStatCntId) * \
168#define TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(function) \ 143 IRO[100].m2))
169 (IS_E1H_OFFSET ? (0x6040 + (function * 0x8)) : (0x4c10 + \ 144#define TSTORM_STATS_FLAGS_OFFSET(pfId) \
170 (function * 0x8))) 145 (IRO[95].base + ((pfId) * IRO[95].m1))
171#define TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(function) \ 146#define TSTORM_TCP_MAX_CWND_OFFSET(pfId) \
172 (IS_E1H_OFFSET ? (0x6042 + (function * 0x8)) : (0x4c12 + \ 147 (IRO[211].base + ((pfId) * IRO[211].m1))
173 (function * 0x8))) 148#define TSTORM_VF_TO_PF_OFFSET(funcId) \
174#define TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(function) \ 149 (IRO[102].base + ((funcId) * IRO[102].m1))
175 (IS_E1H_OFFSET ? (0x6044 + (function * 0x8)) : (0x4c14 + \ 150#define USTORM_AGG_DATA_OFFSET (IRO[201].base)
176 (function * 0x8))) 151#define USTORM_AGG_DATA_SIZE (IRO[201].size)
177#define TSTORM_MAC_FILTER_CONFIG_OFFSET(function) \ 152#define USTORM_ASSERT_LIST_INDEX_OFFSET (IRO[170].base)
178 (IS_E1H_OFFSET ? (0x3008 + (function * 0x40)) : (0x1508 + \ 153#define USTORM_ASSERT_LIST_OFFSET(assertListEntry) \
179 (function * 0x40))) 154 (IRO[169].base + ((assertListEntry) * IRO[169].m1))
180#define TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, stats_counter_id) \ 155#define USTORM_ETH_PAUSE_ENABLED_OFFSET(portId) \
181 (IS_E1H_OFFSET ? (0x2010 + (port * 0x490) + (stats_counter_id * \ 156 (IRO[178].base + ((portId) * IRO[178].m1))
182 0x40)) : (0x4010 + (port * 0x490) + (stats_counter_id * 0x40))) 157#define USTORM_ETH_STATS_QUERY_ADDR_OFFSET(pfId) \
183#define TSTORM_STATS_FLAGS_OFFSET(function) \ 158 (IRO[172].base + ((pfId) * IRO[172].m1))
184 (IS_E1H_OFFSET ? (0x29c0 + (function * 0x8)) : (0x4948 + \ 159#define USTORM_FCOE_EQ_PROD_OFFSET(pfId) \
185 (function * 0x8))) 160 (IRO[313].base + ((pfId) * IRO[313].m1))
186#define TSTORM_TCP_MAX_CWND_OFFSET(function) \ 161#define USTORM_FUNC_EN_OFFSET(funcId) \
187 (IS_E1H_OFFSET ? (0x4004 + (function * 0x8)) : (0x1fb4 + \ 162 (IRO[174].base + ((funcId) * IRO[174].m1))
188 (function * 0x8))) 163#define USTORM_FUNCTION_MODE_OFFSET (IRO[177].base)
189#define USTORM_AGG_DATA_OFFSET (IS_E1H_OFFSET ? 0xa000 : 0x3000) 164#define USTORM_ISCSI_CQ_SIZE_OFFSET(pfId) \
190#define USTORM_AGG_DATA_SIZE (IS_E1H_OFFSET ? 0x2000 : 0x1000) 165 (IRO[277].base + ((pfId) * IRO[277].m1))
191#define USTORM_ASSERT_LIST_INDEX_OFFSET \ 166#define USTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfId) \
192 (IS_E1H_OFFSET ? 0x8000 : 0x1000) 167 (IRO[278].base + ((pfId) * IRO[278].m1))
193#define USTORM_ASSERT_LIST_OFFSET(idx) \ 168#define USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfId) \
194 (IS_E1H_OFFSET ? (0x8020 + (idx * 0x10)) : (0x1020 + (idx * 0x10))) 169 (IRO[282].base + ((pfId) * IRO[282].m1))
195#define USTORM_CQE_PAGE_BASE_OFFSET(port, clientId) \ 170#define USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfId) \
196 (IS_E1H_OFFSET ? (0x1010 + (port * 0x680) + (clientId * 0x40)) : \ 171 (IRO[279].base + ((pfId) * IRO[279].m1))
197 (0x4010 + (port * 0x360) + (clientId * 0x30))) 172#define USTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
198#define USTORM_CQE_PAGE_NEXT_OFFSET(port, clientId) \ 173 (IRO[275].base + ((pfId) * IRO[275].m1))
199 (IS_E1H_OFFSET ? (0x1028 + (port * 0x680) + (clientId * 0x40)) : \ 174#define USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
200 (0x4028 + (port * 0x360) + (clientId * 0x30))) 175 (IRO[274].base + ((pfId) * IRO[274].m1))
201#define USTORM_ETH_PAUSE_ENABLED_OFFSET(port) \ 176#define USTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
202 (IS_E1H_OFFSET ? (0x2ad4 + (port * 0x8)) : 0xffffffff) 177 (IRO[273].base + ((pfId) * IRO[273].m1))
203#define USTORM_ETH_RING_PAUSE_DATA_OFFSET(port, clientId) \ 178#define USTORM_ISCSI_R2TQ_SIZE_OFFSET(pfId) \
204 (IS_E1H_OFFSET ? (0x1030 + (port * 0x680) + (clientId * 0x40)) : \ 179 (IRO[276].base + ((pfId) * IRO[276].m1))
205 0xffffffff) 180#define USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(pfId) \
206#define USTORM_ETH_STATS_QUERY_ADDR_OFFSET(function) \ 181 (IRO[280].base + ((pfId) * IRO[280].m1))
207 (IS_E1H_OFFSET ? (0x2a50 + (function * 0x8)) : (0x1dd0 + \ 182#define USTORM_ISCSI_RQ_SIZE_OFFSET(pfId) \
208 (function * 0x8))) 183 (IRO[281].base + ((pfId) * IRO[281].m1))
209#define USTORM_FUNCTION_MODE_OFFSET \ 184#define USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(pfId) \
210 (IS_E1H_OFFSET ? 0x2448 : 0xffffffff) 185 (IRO[176].base + ((pfId) * IRO[176].m1))
211#define USTORM_ISCSI_CQ_SIZE_OFFSET(function) \ 186 #define USTORM_PER_COUNTER_ID_STATS_OFFSET(portId, uStatCntId) \
212 (IS_E1H_OFFSET ? (0x7044 + (function * 0x8)) : (0x2414 + \ 187 (IRO[173].base + ((portId) * IRO[173].m1) + ((uStatCntId) * \
213 (function * 0x8))) 188 IRO[173].m2))
214#define USTORM_ISCSI_CQ_SQN_SIZE_OFFSET(function) \ 189 #define USTORM_RX_PRODS_E1X_OFFSET(portId, clientId) \
215 (IS_E1H_OFFSET ? (0x7046 + (function * 0x8)) : (0x2416 + \ 190 (IRO[204].base + ((portId) * IRO[204].m1) + ((clientId) * \
216 (function * 0x8))) 191 IRO[204].m2))
217#define USTORM_ISCSI_ERROR_BITMAP_OFFSET(function) \ 192#define USTORM_RX_PRODS_E2_OFFSET(qzoneId) \
218 (IS_E1H_OFFSET ? (0x7688 + (function * 0x8)) : (0x29c8 + \ 193 (IRO[205].base + ((qzoneId) * IRO[205].m1))
219 (function * 0x8))) 194#define USTORM_STATS_FLAGS_OFFSET(pfId) \
220#define USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(function) \ 195 (IRO[171].base + ((pfId) * IRO[171].m1))
221 (IS_E1H_OFFSET ? (0x7648 + (function * 0x8)) : (0x29b8 + \ 196#define USTORM_TPA_BTR_OFFSET (IRO[202].base)
222 (function * 0x8))) 197#define USTORM_TPA_BTR_SIZE (IRO[202].size)
223#define USTORM_ISCSI_NUM_OF_TASKS_OFFSET(function) \ 198#define USTORM_VF_TO_PF_OFFSET(funcId) \
224 (IS_E1H_OFFSET ? (0x7004 + (function * 0x8)) : (0x2404 + \ 199 (IRO[175].base + ((funcId) * IRO[175].m1))
225 (function * 0x8))) 200#define XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE (IRO[59].base)
226#define USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(function) \ 201#define XSTORM_AGG_INT_FINAL_CLEANUP_INDEX (IRO[58].base)
227 (IS_E1H_OFFSET ? (0x7002 + (function * 0x8)) : (0x2402 + \ 202#define XSTORM_ASSERT_LIST_INDEX_OFFSET (IRO[54].base)
228 (function * 0x8))) 203#define XSTORM_ASSERT_LIST_OFFSET(assertListEntry) \
229#define USTORM_ISCSI_PAGE_SIZE_OFFSET(function) \ 204 (IRO[53].base + ((assertListEntry) * IRO[53].m1))
230 (IS_E1H_OFFSET ? (0x7000 + (function * 0x8)) : (0x2400 + \ 205#define XSTORM_CMNG_PER_PORT_VARS_OFFSET(portId) \
231 (function * 0x8))) 206 (IRO[47].base + ((portId) * IRO[47].m1))
232#define USTORM_ISCSI_R2TQ_SIZE_OFFSET(function) \ 207#define XSTORM_E1HOV_OFFSET(pfId) \
233 (IS_E1H_OFFSET ? (0x7040 + (function * 0x8)) : (0x2410 + \ 208 (IRO[55].base + ((pfId) * IRO[55].m1))
234 (function * 0x8))) 209#define XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(pfId) \
235#define USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(function) \ 210 (IRO[45].base + ((pfId) * IRO[45].m1))
236 (IS_E1H_OFFSET ? (0x7080 + (function * 0x8)) : (0x2420 + \ 211#define XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(pfId) \
237 (function * 0x8))) 212 (IRO[49].base + ((pfId) * IRO[49].m1))
238#define USTORM_ISCSI_RQ_SIZE_OFFSET(function) \ 213#define XSTORM_FUNC_EN_OFFSET(funcId) \
239 (IS_E1H_OFFSET ? (0x7084 + (function * 0x8)) : (0x2424 + \ 214 (IRO[51].base + ((funcId) * IRO[51].m1))
240 (function * 0x8))) 215#define XSTORM_FUNCTION_MODE_OFFSET (IRO[56].base)
241#define USTORM_MAX_AGG_SIZE_OFFSET(port, clientId) \ 216#define XSTORM_ISCSI_HQ_SIZE_OFFSET(pfId) \
242 (IS_E1H_OFFSET ? (0x1018 + (port * 0x680) + (clientId * 0x40)) : \ 217 (IRO[290].base + ((pfId) * IRO[290].m1))
243 (0x4018 + (port * 0x360) + (clientId * 0x30))) 218#define XSTORM_ISCSI_LOCAL_MAC_ADDR0_OFFSET(pfId) \
244#define USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(function) \ 219 (IRO[293].base + ((pfId) * IRO[293].m1))
245 (IS_E1H_OFFSET ? (0x2408 + (function * 0x8)) : (0x1da8 + \ 220#define XSTORM_ISCSI_LOCAL_MAC_ADDR1_OFFSET(pfId) \
246 (function * 0x8))) 221 (IRO[294].base + ((pfId) * IRO[294].m1))
247#define USTORM_PER_COUNTER_ID_STATS_OFFSET(port, stats_counter_id) \ 222#define XSTORM_ISCSI_LOCAL_MAC_ADDR2_OFFSET(pfId) \
248 (IS_E1H_OFFSET ? (0x2450 + (port * 0x2d0) + (stats_counter_id * \ 223 (IRO[295].base + ((pfId) * IRO[295].m1))
249 0x28)) : (0x1500 + (port * 0x2d0) + (stats_counter_id * 0x28))) 224#define XSTORM_ISCSI_LOCAL_MAC_ADDR3_OFFSET(pfId) \
250#define USTORM_RX_PRODS_OFFSET(port, client_id) \ 225 (IRO[296].base + ((pfId) * IRO[296].m1))
251 (IS_E1H_OFFSET ? (0x1000 + (port * 0x680) + (client_id * 0x40)) \ 226#define XSTORM_ISCSI_LOCAL_MAC_ADDR4_OFFSET(pfId) \
252 : (0x4000 + (port * 0x360) + (client_id * 0x30))) 227 (IRO[297].base + ((pfId) * IRO[297].m1))
253#define USTORM_STATS_FLAGS_OFFSET(function) \ 228#define XSTORM_ISCSI_LOCAL_MAC_ADDR5_OFFSET(pfId) \
254 (IS_E1H_OFFSET ? (0x29f0 + (function * 0x8)) : (0x1db8 + \ 229 (IRO[298].base + ((pfId) * IRO[298].m1))
255 (function * 0x8))) 230#define XSTORM_ISCSI_LOCAL_VLAN_OFFSET(pfId) \
256#define USTORM_TPA_BTR_OFFSET (IS_E1H_OFFSET ? 0x3da5 : 0x5095) 231 (IRO[299].base + ((pfId) * IRO[299].m1))
257#define USTORM_TPA_BTR_SIZE 0x1 232#define XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
258#define XSTORM_ASSERT_LIST_INDEX_OFFSET \ 233 (IRO[289].base + ((pfId) * IRO[289].m1))
259 (IS_E1H_OFFSET ? 0x9000 : 0x1000) 234#define XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
260#define XSTORM_ASSERT_LIST_OFFSET(idx) \ 235 (IRO[288].base + ((pfId) * IRO[288].m1))
261 (IS_E1H_OFFSET ? (0x9020 + (idx * 0x10)) : (0x1020 + (idx * 0x10))) 236#define XSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
262#define XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) \ 237 (IRO[287].base + ((pfId) * IRO[287].m1))
263 (IS_E1H_OFFSET ? (0x24a8 + (port * 0x50)) : (0x3a80 + (port * 0x50))) 238#define XSTORM_ISCSI_R2TQ_SIZE_OFFSET(pfId) \
264#define XSTORM_DEF_SB_HC_DISABLE_OFFSET(function, index) \ 239 (IRO[292].base + ((pfId) * IRO[292].m1))
265 (IS_E1H_OFFSET ? (0xa01a + ((function>>1) * 0x28) + \ 240#define XSTORM_ISCSI_SQ_SIZE_OFFSET(pfId) \
266 ((function&1) * 0xa0) + (index * 0x4)) : (0x141a + (function * \ 241 (IRO[291].base + ((pfId) * IRO[291].m1))
267 0x28) + (index * 0x4))) 242#define XSTORM_ISCSI_TCP_VARS_ADV_WND_SCL_OFFSET(pfId) \
268#define XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(function) \ 243 (IRO[286].base + ((pfId) * IRO[286].m1))
269 (IS_E1H_OFFSET ? (0xa000 + ((function>>1) * 0x28) + \ 244#define XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(pfId) \
270 ((function&1) * 0xa0)) : (0x1400 + (function * 0x28))) 245 (IRO[285].base + ((pfId) * IRO[285].m1))
271#define XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(function) \ 246#define XSTORM_ISCSI_TCP_VARS_TOS_OFFSET(pfId) \
272 (IS_E1H_OFFSET ? (0xa008 + ((function>>1) * 0x28) + \ 247 (IRO[284].base + ((pfId) * IRO[284].m1))
273 ((function&1) * 0xa0)) : (0x1408 + (function * 0x28))) 248#define XSTORM_ISCSI_TCP_VARS_TTL_OFFSET(pfId) \
274#define XSTORM_E1HOV_OFFSET(function) \ 249 (IRO[283].base + ((pfId) * IRO[283].m1))
275 (IS_E1H_OFFSET ? (0x2c10 + (function * 0x8)) : 0xffffffff) 250#define XSTORM_PATH_ID_OFFSET (IRO[65].base)
276#define XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(function) \ 251 #define XSTORM_PER_COUNTER_ID_STATS_OFFSET(portId, xStatCntId) \
277 (IS_E1H_OFFSET ? (0x2418 + (function * 0x8)) : (0x3a50 + \ 252 (IRO[50].base + ((portId) * IRO[50].m1) + ((xStatCntId) * \
278 (function * 0x8))) 253 IRO[50].m2))
279#define XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(function) \ 254#define XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(pfId) \
280 (IS_E1H_OFFSET ? (0x2588 + (function * 0x90)) : (0x3b60 + \ 255 (IRO[48].base + ((pfId) * IRO[48].m1))
281 (function * 0x90))) 256#define XSTORM_SPQ_DATA_OFFSET(funcId) \
282#define XSTORM_FUNCTION_MODE_OFFSET \ 257 (IRO[32].base + ((funcId) * IRO[32].m1))
283 (IS_E1H_OFFSET ? 0x2c50 : 0xffffffff) 258#define XSTORM_SPQ_DATA_SIZE (IRO[32].size)
284#define XSTORM_HC_BTR_OFFSET(port) \ 259#define XSTORM_SPQ_PAGE_BASE_OFFSET(funcId) \
285 (IS_E1H_OFFSET ? (0xa144 + (port * 0x30)) : (0x1454 + (port * 0x18))) 260 (IRO[30].base + ((funcId) * IRO[30].m1))
286#define XSTORM_ISCSI_HQ_SIZE_OFFSET(function) \ 261#define XSTORM_SPQ_PROD_OFFSET(funcId) \
287 (IS_E1H_OFFSET ? (0x80c0 + (function * 0x8)) : (0x1c30 + \ 262 (IRO[31].base + ((funcId) * IRO[31].m1))
288 (function * 0x8))) 263#define XSTORM_STATS_FLAGS_OFFSET(pfId) \
289#define XSTORM_ISCSI_LOCAL_MAC_ADDR0_OFFSET(function) \ 264 (IRO[43].base + ((pfId) * IRO[43].m1))
290 (IS_E1H_OFFSET ? (0x8080 + (function * 0x8)) : (0x1c20 + \ 265#define XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_ENABLED_OFFSET(portId) \
291 (function * 0x8))) 266 (IRO[206].base + ((portId) * IRO[206].m1))
292#define XSTORM_ISCSI_LOCAL_MAC_ADDR1_OFFSET(function) \ 267#define XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_MAX_COUNT_OFFSET(portId) \
293 (IS_E1H_OFFSET ? (0x8081 + (function * 0x8)) : (0x1c21 + \ 268 (IRO[207].base + ((portId) * IRO[207].m1))
294 (function * 0x8))) 269#define XSTORM_TCP_TX_SWS_TIMER_VAL_OFFSET(pfId) \
295#define XSTORM_ISCSI_LOCAL_MAC_ADDR2_OFFSET(function) \ 270 (IRO[209].base + (((pfId)>>1) * IRO[209].m1) + (((pfId)&1) * \
296 (IS_E1H_OFFSET ? (0x8082 + (function * 0x8)) : (0x1c22 + \ 271 IRO[209].m2))
297 (function * 0x8))) 272#define XSTORM_VF_TO_PF_OFFSET(funcId) \
298#define XSTORM_ISCSI_LOCAL_MAC_ADDR3_OFFSET(function) \ 273 (IRO[52].base + ((funcId) * IRO[52].m1))
299 (IS_E1H_OFFSET ? (0x8083 + (function * 0x8)) : (0x1c23 + \
300 (function * 0x8)))
301#define XSTORM_ISCSI_LOCAL_MAC_ADDR4_OFFSET(function) \
302 (IS_E1H_OFFSET ? (0x8084 + (function * 0x8)) : (0x1c24 + \
303 (function * 0x8)))
304#define XSTORM_ISCSI_LOCAL_MAC_ADDR5_OFFSET(function) \
305 (IS_E1H_OFFSET ? (0x8085 + (function * 0x8)) : (0x1c25 + \
306 (function * 0x8)))
307#define XSTORM_ISCSI_LOCAL_VLAN_OFFSET(function) \
308 (IS_E1H_OFFSET ? (0x8086 + (function * 0x8)) : (0x1c26 + \
309 (function * 0x8)))
310#define XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(function) \
311 (IS_E1H_OFFSET ? (0x8004 + (function * 0x8)) : (0x1c04 + \
312 (function * 0x8)))
313#define XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(function) \
314 (IS_E1H_OFFSET ? (0x8002 + (function * 0x8)) : (0x1c02 + \
315 (function * 0x8)))
316#define XSTORM_ISCSI_PAGE_SIZE_OFFSET(function) \
317 (IS_E1H_OFFSET ? (0x8000 + (function * 0x8)) : (0x1c00 + \
318 (function * 0x8)))
319#define XSTORM_ISCSI_R2TQ_SIZE_OFFSET(function) \
320 (IS_E1H_OFFSET ? (0x80c4 + (function * 0x8)) : (0x1c34 + \
321 (function * 0x8)))
322#define XSTORM_ISCSI_SQ_SIZE_OFFSET(function) \
323 (IS_E1H_OFFSET ? (0x80c2 + (function * 0x8)) : (0x1c32 + \
324 (function * 0x8)))
325#define XSTORM_ISCSI_TCP_VARS_ADV_WND_SCL_OFFSET(function) \
326 (IS_E1H_OFFSET ? (0x8043 + (function * 0x8)) : (0x1c13 + \
327 (function * 0x8)))
328#define XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(function) \
329 (IS_E1H_OFFSET ? (0x8042 + (function * 0x8)) : (0x1c12 + \
330 (function * 0x8)))
331#define XSTORM_ISCSI_TCP_VARS_TOS_OFFSET(function) \
332 (IS_E1H_OFFSET ? (0x8041 + (function * 0x8)) : (0x1c11 + \
333 (function * 0x8)))
334#define XSTORM_ISCSI_TCP_VARS_TTL_OFFSET(function) \
335 (IS_E1H_OFFSET ? (0x8040 + (function * 0x8)) : (0x1c10 + \
336 (function * 0x8)))
337#define XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, stats_counter_id) \
338 (IS_E1H_OFFSET ? (0xc000 + (port * 0x360) + (stats_counter_id * \
339 0x30)) : (0x3378 + (port * 0x360) + (stats_counter_id * 0x30)))
340#define XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(function) \
341 (IS_E1H_OFFSET ? (0x2548 + (function * 0x90)) : (0x3b20 + \
342 (function * 0x90)))
343#define XSTORM_SPQ_PAGE_BASE_OFFSET(function) \
344 (IS_E1H_OFFSET ? (0x2000 + (function * 0x10)) : (0x3328 + \
345 (function * 0x10)))
346#define XSTORM_SPQ_PROD_OFFSET(function) \
347 (IS_E1H_OFFSET ? (0x2008 + (function * 0x10)) : (0x3330 + \
348 (function * 0x10)))
349#define XSTORM_STATS_FLAGS_OFFSET(function) \
350 (IS_E1H_OFFSET ? (0x23d8 + (function * 0x8)) : (0x3a40 + \
351 (function * 0x8)))
352#define XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_ENABLED_OFFSET(port) \
353 (IS_E1H_OFFSET ? (0x4000 + (port * 0x8)) : (0x1960 + (port * 0x8)))
354#define XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_MAX_COUNT_OFFSET(port) \
355 (IS_E1H_OFFSET ? (0x4001 + (port * 0x8)) : (0x1961 + (port * 0x8)))
356#define XSTORM_TCP_TX_SWS_TIMER_VAL_OFFSET(function) \
357 (IS_E1H_OFFSET ? (0x4060 + ((function>>1) * 0x8) + ((function&1) \
358 * 0x4)) : (0x1978 + (function * 0x4)))
359#define COMMON_ASM_INVALID_ASSERT_OPCODE 0x0 274#define COMMON_ASM_INVALID_ASSERT_OPCODE 0x0
360 275
361/**
362* This file defines HSI constants for the ETH flow
363*/
364#ifdef _EVEREST_MICROCODE
365#include "microcode_constants.h"
366#include "eth_rx_bd.h"
367#include "eth_tx_bd.h"
368#include "eth_rx_cqe.h"
369#include "eth_rx_sge.h"
370#include "eth_rx_cqe_next_page.h"
371#endif
372
373/* RSS hash types */ 276/* RSS hash types */
374#define DEFAULT_HASH_TYPE 0 277#define DEFAULT_HASH_TYPE 0
375#define IPV4_HASH_TYPE 1 278#define IPV4_HASH_TYPE 1
@@ -389,11 +292,17 @@
389#define U_ETH_NUM_OF_SGES_TO_FETCH 8 292#define U_ETH_NUM_OF_SGES_TO_FETCH 8
390#define U_ETH_MAX_SGES_FOR_PACKET 3 293#define U_ETH_MAX_SGES_FOR_PACKET 3
391 294
295/*Tx params*/
296#define X_ETH_NO_VLAN 0
297#define X_ETH_OUTBAND_VLAN 1
298#define X_ETH_INBAND_VLAN 2
392/* Rx ring params */ 299/* Rx ring params */
393#define U_ETH_LOCAL_BD_RING_SIZE 8 300#define U_ETH_LOCAL_BD_RING_SIZE 8
394#define U_ETH_LOCAL_SGE_RING_SIZE 10 301#define U_ETH_LOCAL_SGE_RING_SIZE 10
395#define U_ETH_SGL_SIZE 8 302#define U_ETH_SGL_SIZE 8
396 303 /* The fw will padd the buffer with this value, so the IP header \
304 will be align to 4 Byte */
305#define IP_HEADER_ALIGNMENT_PADDING 2
397 306
398#define U_ETH_SGES_PER_PAGE_INVERSE_MASK \ 307#define U_ETH_SGES_PER_PAGE_INVERSE_MASK \
399 (0xFFFF - ((PAGE_SIZE/((STRUCT_SIZE(eth_rx_sge))/8))-1)) 308 (0xFFFF - ((PAGE_SIZE/((STRUCT_SIZE(eth_rx_sge))/8))-1))
@@ -409,16 +318,15 @@
409#define U_ETH_UNDEFINED_Q 0xFF 318#define U_ETH_UNDEFINED_Q 0xFF
410 319
411/* values of command IDs in the ramrod message */ 320/* values of command IDs in the ramrod message */
412#define RAMROD_CMD_ID_ETH_PORT_SETUP 80 321#define RAMROD_CMD_ID_ETH_UNUSED 0
413#define RAMROD_CMD_ID_ETH_CLIENT_SETUP 85 322#define RAMROD_CMD_ID_ETH_CLIENT_SETUP 1
414#define RAMROD_CMD_ID_ETH_STAT_QUERY 90 323#define RAMROD_CMD_ID_ETH_UPDATE 2
415#define RAMROD_CMD_ID_ETH_UPDATE 100 324#define RAMROD_CMD_ID_ETH_HALT 3
416#define RAMROD_CMD_ID_ETH_HALT 105 325#define RAMROD_CMD_ID_ETH_FORWARD_SETUP 4
417#define RAMROD_CMD_ID_ETH_SET_MAC 110 326#define RAMROD_CMD_ID_ETH_ACTIVATE 5
418#define RAMROD_CMD_ID_ETH_CFC_DEL 115 327#define RAMROD_CMD_ID_ETH_DEACTIVATE 6
419#define RAMROD_CMD_ID_ETH_PORT_DEL 120 328#define RAMROD_CMD_ID_ETH_EMPTY 7
420#define RAMROD_CMD_ID_ETH_FORWARD_SETUP 125 329#define RAMROD_CMD_ID_ETH_TERMINATE 8
421
422 330
423/* command values for set mac command */ 331/* command values for set mac command */
424#define T_ETH_MAC_COMMAND_SET 0 332#define T_ETH_MAC_COMMAND_SET 0
@@ -431,7 +339,9 @@
431 339
432/* Maximal L2 clients supported */ 340/* Maximal L2 clients supported */
433#define ETH_MAX_RX_CLIENTS_E1 18 341#define ETH_MAX_RX_CLIENTS_E1 18
434#define ETH_MAX_RX_CLIENTS_E1H 26 342#define ETH_MAX_RX_CLIENTS_E1H 28
343
344#define MAX_STAT_COUNTER_ID ETH_MAX_RX_CLIENTS_E1H
435 345
436/* Maximal aggregation queues supported */ 346/* Maximal aggregation queues supported */
437#define ETH_MAX_AGGREGATION_QUEUES_E1 32 347#define ETH_MAX_AGGREGATION_QUEUES_E1 32
@@ -443,6 +353,20 @@
443#define ETH_RSS_MODE_VLAN_PRI 2 353#define ETH_RSS_MODE_VLAN_PRI 2
444#define ETH_RSS_MODE_E1HOV_PRI 3 354#define ETH_RSS_MODE_E1HOV_PRI 3
445#define ETH_RSS_MODE_IP_DSCP 4 355#define ETH_RSS_MODE_IP_DSCP 4
356#define ETH_RSS_MODE_E2_INTEG 5
357
358
359/* ETH vlan filtering modes */
360#define ETH_VLAN_FILTER_ANY_VLAN 0 /* Don't filter by vlan */
361#define ETH_VLAN_FILTER_SPECIFIC_VLAN \
362 1 /* Only the vlan_id is allowed */
363#define ETH_VLAN_FILTER_CLASSIFY \
364 2 /* vlan will be added to CAM for classification */
365
366/* Fast path CQE selection */
367#define ETH_FP_CQE_REGULAR 0
368#define ETH_FP_CQE_SGL 1
369#define ETH_FP_CQE_RAW 2
446 370
447 371
448/** 372/**
@@ -458,6 +382,7 @@
458#define RESERVED_CONNECTION_TYPE_0 5 382#define RESERVED_CONNECTION_TYPE_0 5
459#define RESERVED_CONNECTION_TYPE_1 6 383#define RESERVED_CONNECTION_TYPE_1 6
460#define RESERVED_CONNECTION_TYPE_2 7 384#define RESERVED_CONNECTION_TYPE_2 7
385#define NONE_CONNECTION_TYPE 8
461 386
462 387
463#define PROTOCOL_STATE_BIT_OFFSET 6 388#define PROTOCOL_STATE_BIT_OFFSET 6
@@ -466,6 +391,16 @@
466#define TOE_STATE (TOE_CONNECTION_TYPE << PROTOCOL_STATE_BIT_OFFSET) 391#define TOE_STATE (TOE_CONNECTION_TYPE << PROTOCOL_STATE_BIT_OFFSET)
467#define RDMA_STATE (RDMA_CONNECTION_TYPE << PROTOCOL_STATE_BIT_OFFSET) 392#define RDMA_STATE (RDMA_CONNECTION_TYPE << PROTOCOL_STATE_BIT_OFFSET)
468 393
394/* values of command IDs in the ramrod message */
395#define RAMROD_CMD_ID_COMMON_FUNCTION_START 1
396#define RAMROD_CMD_ID_COMMON_FUNCTION_STOP 2
397#define RAMROD_CMD_ID_COMMON_CFC_DEL 3
398#define RAMROD_CMD_ID_COMMON_CFC_DEL_WB 4
399#define RAMROD_CMD_ID_COMMON_SET_MAC 5
400#define RAMROD_CMD_ID_COMMON_STAT_QUERY 6
401#define RAMROD_CMD_ID_COMMON_STOP_TRAFFIC 7
402#define RAMROD_CMD_ID_COMMON_START_TRAFFIC 8
403
469/* microcode fixed page page size 4K (chains and ring segments) */ 404/* microcode fixed page page size 4K (chains and ring segments) */
470#define MC_PAGE_SIZE 4096 405#define MC_PAGE_SIZE 4096
471 406
@@ -473,46 +408,26 @@
473/* Host coalescing constants */ 408/* Host coalescing constants */
474#define HC_IGU_BC_MODE 0 409#define HC_IGU_BC_MODE 0
475#define HC_IGU_NBC_MODE 1 410#define HC_IGU_NBC_MODE 1
411/* Host coalescing constants. E1 includes E1H as well */
412
413/* Number of indices per slow-path SB */
414#define HC_SP_SB_MAX_INDICES 16
415
416/* Number of indices per SB */
417#define HC_SB_MAX_INDICES_E1X 8
418#define HC_SB_MAX_INDICES_E2 8
419
420#define HC_SB_MAX_SB_E1X 32
421#define HC_SB_MAX_SB_E2 136
422
423#define HC_SP_SB_ID 0xde
476 424
477#define HC_REGULAR_SEGMENT 0 425#define HC_REGULAR_SEGMENT 0
478#define HC_DEFAULT_SEGMENT 1 426#define HC_DEFAULT_SEGMENT 1
427#define HC_SB_MAX_SM 2
479 428
480/* index numbers */ 429#define HC_SB_MAX_DYNAMIC_INDICES 4
481#define HC_USTORM_DEF_SB_NUM_INDICES 8 430#define HC_FUNCTION_DISABLED 0xff
482#define HC_CSTORM_DEF_SB_NUM_INDICES 8
483#define HC_XSTORM_DEF_SB_NUM_INDICES 4
484#define HC_TSTORM_DEF_SB_NUM_INDICES 4
485#define HC_USTORM_SB_NUM_INDICES 4
486#define HC_CSTORM_SB_NUM_INDICES 4
487
488/* index values - which counter to update */
489
490#define HC_INDEX_U_TOE_RX_CQ_CONS 0
491#define HC_INDEX_U_ETH_RX_CQ_CONS 1
492#define HC_INDEX_U_ETH_RX_BD_CONS 2
493#define HC_INDEX_U_FCOE_EQ_CONS 3
494
495#define HC_INDEX_C_TOE_TX_CQ_CONS 0
496#define HC_INDEX_C_ETH_TX_CQ_CONS 1
497#define HC_INDEX_C_ISCSI_EQ_CONS 2
498
499#define HC_INDEX_DEF_X_SPQ_CONS 0
500
501#define HC_INDEX_DEF_C_RDMA_EQ_CONS 0
502#define HC_INDEX_DEF_C_RDMA_NAL_PROD 1
503#define HC_INDEX_DEF_C_ETH_FW_TX_CQ_CONS 2
504#define HC_INDEX_DEF_C_ETH_SLOW_PATH 3
505#define HC_INDEX_DEF_C_ETH_RDMA_CQ_CONS 4
506#define HC_INDEX_DEF_C_ETH_ISCSI_CQ_CONS 5
507#define HC_INDEX_DEF_C_ETH_FCOE_CQ_CONS 6
508
509#define HC_INDEX_DEF_U_ETH_RDMA_RX_CQ_CONS 0
510#define HC_INDEX_DEF_U_ETH_ISCSI_RX_CQ_CONS 1
511#define HC_INDEX_DEF_U_ETH_RDMA_RX_BD_CONS 2
512#define HC_INDEX_DEF_U_ETH_ISCSI_RX_BD_CONS 3
513#define HC_INDEX_DEF_U_ETH_FCOE_RX_CQ_CONS 4
514#define HC_INDEX_DEF_U_ETH_FCOE_RX_BD_CONS 5
515
516/* used by the driver to get the SB offset */ 431/* used by the driver to get the SB offset */
517#define USTORM_ID 0 432#define USTORM_ID 0
518#define CSTORM_ID 1 433#define CSTORM_ID 1
@@ -529,45 +444,17 @@
529 444
530 445
531/**** DEFINES FOR TIMERS/CLOCKS RESOLUTIONS ****/ 446/**** DEFINES FOR TIMERS/CLOCKS RESOLUTIONS ****/
532#define EMULATION_FREQUENCY_FACTOR 1600
533#define FPGA_FREQUENCY_FACTOR 100
534 447
535#define TIMERS_TICK_SIZE_CHIP (1e-3) 448#define TIMERS_TICK_SIZE_CHIP (1e-3)
536#define TIMERS_TICK_SIZE_EMUL \
537 ((TIMERS_TICK_SIZE_CHIP)/((EMULATION_FREQUENCY_FACTOR)))
538#define TIMERS_TICK_SIZE_FPGA \
539 ((TIMERS_TICK_SIZE_CHIP)/((FPGA_FREQUENCY_FACTOR)))
540 449
541#define TSEMI_CLK1_RESUL_CHIP (1e-3) 450#define TSEMI_CLK1_RESUL_CHIP (1e-3)
542#define TSEMI_CLK1_RESUL_EMUL \
543 ((TSEMI_CLK1_RESUL_CHIP)/(EMULATION_FREQUENCY_FACTOR))
544#define TSEMI_CLK1_RESUL_FPGA \
545 ((TSEMI_CLK1_RESUL_CHIP)/(FPGA_FREQUENCY_FACTOR))
546
547#define USEMI_CLK1_RESUL_CHIP (TIMERS_TICK_SIZE_CHIP)
548#define USEMI_CLK1_RESUL_EMUL (TIMERS_TICK_SIZE_EMUL)
549#define USEMI_CLK1_RESUL_FPGA (TIMERS_TICK_SIZE_FPGA)
550 451
551#define XSEMI_CLK1_RESUL_CHIP (1e-3) 452#define XSEMI_CLK1_RESUL_CHIP (1e-3)
552#define XSEMI_CLK1_RESUL_EMUL \
553 ((XSEMI_CLK1_RESUL_CHIP)/(EMULATION_FREQUENCY_FACTOR))
554#define XSEMI_CLK1_RESUL_FPGA \
555 ((XSEMI_CLK1_RESUL_CHIP)/(FPGA_FREQUENCY_FACTOR))
556
557#define XSEMI_CLK2_RESUL_CHIP (1e-6)
558#define XSEMI_CLK2_RESUL_EMUL \
559 ((XSEMI_CLK2_RESUL_CHIP)/(EMULATION_FREQUENCY_FACTOR))
560#define XSEMI_CLK2_RESUL_FPGA \
561 ((XSEMI_CLK2_RESUL_CHIP)/(FPGA_FREQUENCY_FACTOR))
562 453
563#define SDM_TIMER_TICK_RESUL_CHIP (4*(1e-6)) 454#define SDM_TIMER_TICK_RESUL_CHIP (4*(1e-6))
564#define SDM_TIMER_TICK_RESUL_EMUL \
565 ((SDM_TIMER_TICK_RESUL_CHIP)/(EMULATION_FREQUENCY_FACTOR))
566#define SDM_TIMER_TICK_RESUL_FPGA \
567 ((SDM_TIMER_TICK_RESUL_CHIP)/(FPGA_FREQUENCY_FACTOR))
568
569 455
570/**** END DEFINES FOR TIMERS/CLOCKS RESOLUTIONS ****/ 456/**** END DEFINES FOR TIMERS/CLOCKS RESOLUTIONS ****/
457
571#define XSTORM_IP_ID_ROLL_HALF 0x8000 458#define XSTORM_IP_ID_ROLL_HALF 0x8000
572#define XSTORM_IP_ID_ROLL_ALL 0 459#define XSTORM_IP_ID_ROLL_ALL 0
573 460
@@ -576,10 +463,36 @@
576#define NUM_OF_PROTOCOLS 4 463#define NUM_OF_PROTOCOLS 4
577#define NUM_OF_SAFC_BITS 16 464#define NUM_OF_SAFC_BITS 16
578#define MAX_COS_NUMBER 4 465#define MAX_COS_NUMBER 4
579#define MAX_T_STAT_COUNTER_ID 18
580#define MAX_X_STAT_COUNTER_ID 18
581#define MAX_U_STAT_COUNTER_ID 18
582 466
467#define FAIRNESS_COS_WRR_MODE 0
468#define FAIRNESS_COS_ETS_MODE 1
469
470
471/* Priority Flow Control (PFC) */
472#define MAX_PFC_PRIORITIES 8
473#define MAX_PFC_TRAFFIC_TYPES 8
474
475/* Available Traffic Types for Link Layer Flow Control */
476#define LLFC_TRAFFIC_TYPE_NW 0
477#define LLFC_TRAFFIC_TYPE_FCOE 1
478#define LLFC_TRAFFIC_TYPE_ISCSI 2
479 /***************** START OF E2 INTEGRATION \
480 CODE***************************************/
481#define LLFC_TRAFFIC_TYPE_NW_COS1_E2INTEG 3
482 /***************** END OF E2 INTEGRATION \
483 CODE***************************************/
484#define LLFC_TRAFFIC_TYPE_MAX 4
485
486 /* used by array traffic_type_to_priority[] to mark traffic type \
487 that is not mapped to priority*/
488#define LLFC_TRAFFIC_TYPE_TO_PRIORITY_UNMAPPED 0xFF
489
490#define LLFC_MODE_NONE 0
491#define LLFC_MODE_PFC 1
492#define LLFC_MODE_SAFC 2
493
494#define DCB_DISABLED 0
495#define DCB_ENABLED 1
583 496
584#define UNKNOWN_ADDRESS 0 497#define UNKNOWN_ADDRESS 0
585#define UNICAST_ADDRESS 1 498#define UNICAST_ADDRESS 1
@@ -587,8 +500,32 @@
587#define BROADCAST_ADDRESS 3 500#define BROADCAST_ADDRESS 3
588 501
589#define SINGLE_FUNCTION 0 502#define SINGLE_FUNCTION 0
590#define MULTI_FUNCTION 1 503#define MULTI_FUNCTION_SD 1
504#define MULTI_FUNCTION_SI 2
591 505
592#define IP_V4 0 506#define IP_V4 0
593#define IP_V6 1 507#define IP_V6 1
594 508
509
510#define C_ERES_PER_PAGE \
511 (PAGE_SIZE / BITS_TO_BYTES(STRUCT_SIZE(event_ring_elem)))
512#define C_ERE_PER_PAGE_MASK (C_ERES_PER_PAGE - 1)
513
514#define EVENT_RING_OPCODE_VF_PF_CHANNEL 0
515#define EVENT_RING_OPCODE_FUNCTION_START 1
516#define EVENT_RING_OPCODE_FUNCTION_STOP 2
517#define EVENT_RING_OPCODE_CFC_DEL 3
518#define EVENT_RING_OPCODE_CFC_DEL_WB 4
519#define EVENT_RING_OPCODE_SET_MAC 5
520#define EVENT_RING_OPCODE_STAT_QUERY 6
521#define EVENT_RING_OPCODE_STOP_TRAFFIC 7
522#define EVENT_RING_OPCODE_START_TRAFFIC 8
523#define EVENT_RING_OPCODE_FORWARD_SETUP 9
524
525#define VF_PF_CHANNEL_STATE_READY 0
526#define VF_PF_CHANNEL_STATE_WAITING_FOR_ACK 1
527
528#define VF_PF_CHANNEL_STATE_MAX_NUMBER 2
529
530
531#endif /* BNX2X_FW_DEFS_H */
diff --git a/drivers/net/bnx2x/bnx2x_fw_file_hdr.h b/drivers/net/bnx2x/bnx2x_fw_file_hdr.h
index 3f5ee5d7cc2a..f807262911e5 100644
--- a/drivers/net/bnx2x/bnx2x_fw_file_hdr.h
+++ b/drivers/net/bnx2x/bnx2x_fw_file_hdr.h
@@ -31,6 +31,7 @@ struct bnx2x_fw_file_hdr {
31 struct bnx2x_fw_file_section csem_pram_data; 31 struct bnx2x_fw_file_section csem_pram_data;
32 struct bnx2x_fw_file_section xsem_int_table_data; 32 struct bnx2x_fw_file_section xsem_int_table_data;
33 struct bnx2x_fw_file_section xsem_pram_data; 33 struct bnx2x_fw_file_section xsem_pram_data;
34 struct bnx2x_fw_file_section iro_arr;
34 struct bnx2x_fw_file_section fw_version; 35 struct bnx2x_fw_file_section fw_version;
35}; 36};
36 37
diff --git a/drivers/net/bnx2x/bnx2x_hsi.h b/drivers/net/bnx2x/bnx2x_hsi.h
index 60d141cd9950..596041cbd977 100644
--- a/drivers/net/bnx2x/bnx2x_hsi.h
+++ b/drivers/net/bnx2x/bnx2x_hsi.h
@@ -6,6 +6,10 @@
6 * it under the terms of the GNU General Public License as published by 6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation. 7 * the Free Software Foundation.
8 */ 8 */
9#ifndef BNX2X_HSI_H
10#define BNX2X_HSI_H
11
12#include "bnx2x_fw_defs.h"
9 13
10struct license_key { 14struct license_key {
11 u32 reserved[6]; 15 u32 reserved[6];
@@ -326,6 +330,7 @@ struct port_hw_cfg { /* port 0: 0x12c port 1: 0x2bc */
326 u32 lane_config; 330 u32 lane_config;
327#define PORT_HW_CFG_LANE_SWAP_CFG_MASK 0x0000ffff 331#define PORT_HW_CFG_LANE_SWAP_CFG_MASK 0x0000ffff
328#define PORT_HW_CFG_LANE_SWAP_CFG_SHIFT 0 332#define PORT_HW_CFG_LANE_SWAP_CFG_SHIFT 0
333
329#define PORT_HW_CFG_LANE_SWAP_CFG_TX_MASK 0x000000ff 334#define PORT_HW_CFG_LANE_SWAP_CFG_TX_MASK 0x000000ff
330#define PORT_HW_CFG_LANE_SWAP_CFG_TX_SHIFT 0 335#define PORT_HW_CFG_LANE_SWAP_CFG_TX_SHIFT 0
331#define PORT_HW_CFG_LANE_SWAP_CFG_RX_MASK 0x0000ff00 336#define PORT_HW_CFG_LANE_SWAP_CFG_RX_MASK 0x0000ff00
@@ -1016,11 +1021,12 @@ struct shmem_region { /* SharedMem Offset (size) */
1016 struct mgmtfw_state mgmtfw_state; /* 0x4ac (0x1b8) */ 1021 struct mgmtfw_state mgmtfw_state; /* 0x4ac (0x1b8) */
1017 1022
1018 struct drv_port_mb port_mb[PORT_MAX]; /* 0x664 (16*2=0x20) */ 1023 struct drv_port_mb port_mb[PORT_MAX]; /* 0x664 (16*2=0x20) */
1019 struct drv_func_mb func_mb[E1H_FUNC_MAX]; 1024 struct drv_func_mb func_mb[]; /* 0x684
1025 (44*2/4/8=0x58/0xb0/0x160) */
1026
1027}; /* 57710 = 0x6dc | 57711 = 0x7E4 | 57712 = 0x734 */
1020 1028
1021 struct mf_cfg mf_cfg;
1022 1029
1023}; /* 0x6dc */
1024 1030
1025 1031
1026struct shmem2_region { 1032struct shmem2_region {
@@ -1096,7 +1102,7 @@ struct emac_stats {
1096}; 1102};
1097 1103
1098 1104
1099struct bmac_stats { 1105struct bmac1_stats {
1100 u32 tx_stat_gtpkt_lo; 1106 u32 tx_stat_gtpkt_lo;
1101 u32 tx_stat_gtpkt_hi; 1107 u32 tx_stat_gtpkt_hi;
1102 u32 tx_stat_gtxpf_lo; 1108 u32 tx_stat_gtxpf_lo;
@@ -1202,8 +1208,8 @@ struct bmac_stats {
1202 1208
1203 1209
1204union mac_stats { 1210union mac_stats {
1205 struct emac_stats emac_stats; 1211 struct emac_stats emac_stats;
1206 struct bmac_stats bmac_stats; 1212 struct bmac1_stats bmac1_stats;
1207}; 1213};
1208 1214
1209 1215
@@ -1377,17 +1383,17 @@ struct host_func_stats {
1377}; 1383};
1378 1384
1379 1385
1380#define BCM_5710_FW_MAJOR_VERSION 5 1386#define BCM_5710_FW_MAJOR_VERSION 6
1381#define BCM_5710_FW_MINOR_VERSION 2 1387#define BCM_5710_FW_MINOR_VERSION 0
1382#define BCM_5710_FW_REVISION_VERSION 13 1388#define BCM_5710_FW_REVISION_VERSION 34
1383#define BCM_5710_FW_ENGINEERING_VERSION 0 1389#define BCM_5710_FW_ENGINEERING_VERSION 0
1384#define BCM_5710_FW_COMPILE_FLAGS 1 1390#define BCM_5710_FW_COMPILE_FLAGS 1
1385 1391
1386 1392
1387/* 1393/*
1388 * attention bits 1394 * attention bits
1389 */ 1395 */
1390struct atten_def_status_block { 1396struct atten_sp_status_block {
1391 __le32 attn_bits; 1397 __le32 attn_bits;
1392 __le32 attn_bits_ack; 1398 __le32 attn_bits_ack;
1393 u8 status_block_id; 1399 u8 status_block_id;
@@ -1445,7 +1451,60 @@ struct doorbell_set_prod {
1445 1451
1446 1452
1447/* 1453/*
1448 * IGU driver acknowledgement register 1454 * 3 lines. status block
1455 */
1456struct hc_status_block_e1x {
1457 __le16 index_values[HC_SB_MAX_INDICES_E1X];
1458 __le16 running_index[HC_SB_MAX_SM];
1459 u32 rsrv;
1460};
1461
1462/*
1463 * host status block
1464 */
1465struct host_hc_status_block_e1x {
1466 struct hc_status_block_e1x sb;
1467};
1468
1469
1470/*
1471 * 3 lines. status block
1472 */
1473struct hc_status_block_e2 {
1474 __le16 index_values[HC_SB_MAX_INDICES_E2];
1475 __le16 running_index[HC_SB_MAX_SM];
1476 u32 reserved;
1477};
1478
1479/*
1480 * host status block
1481 */
1482struct host_hc_status_block_e2 {
1483 struct hc_status_block_e2 sb;
1484};
1485
1486
1487/*
1488 * 5 lines. slow-path status block
1489 */
1490struct hc_sp_status_block {
1491 __le16 index_values[HC_SP_SB_MAX_INDICES];
1492 __le16 running_index;
1493 __le16 rsrv;
1494 u32 rsrv1;
1495};
1496
1497/*
1498 * host status block
1499 */
1500struct host_sp_status_block {
1501 struct atten_sp_status_block atten_status_block;
1502 struct hc_sp_status_block sp_sb;
1503};
1504
1505
1506/*
1507 * IGU driver acknowledgment register
1449 */ 1508 */
1450struct igu_ack_register { 1509struct igu_ack_register {
1451#if defined(__BIG_ENDIAN) 1510#if defined(__BIG_ENDIAN)
@@ -1603,8 +1662,14 @@ struct dmae_command {
1603#define DMAE_COMMAND_DST_RESET_SHIFT 14 1662#define DMAE_COMMAND_DST_RESET_SHIFT 14
1604#define DMAE_COMMAND_E1HVN (0x3<<15) 1663#define DMAE_COMMAND_E1HVN (0x3<<15)
1605#define DMAE_COMMAND_E1HVN_SHIFT 15 1664#define DMAE_COMMAND_E1HVN_SHIFT 15
1606#define DMAE_COMMAND_RESERVED0 (0x7FFF<<17) 1665#define DMAE_COMMAND_DST_VN (0x3<<17)
1607#define DMAE_COMMAND_RESERVED0_SHIFT 17 1666#define DMAE_COMMAND_DST_VN_SHIFT 17
1667#define DMAE_COMMAND_C_FUNC (0x1<<19)
1668#define DMAE_COMMAND_C_FUNC_SHIFT 19
1669#define DMAE_COMMAND_ERR_POLICY (0x3<<20)
1670#define DMAE_COMMAND_ERR_POLICY_SHIFT 20
1671#define DMAE_COMMAND_RESERVED0 (0x3FF<<22)
1672#define DMAE_COMMAND_RESERVED0_SHIFT 22
1608 u32 src_addr_lo; 1673 u32 src_addr_lo;
1609 u32 src_addr_hi; 1674 u32 src_addr_hi;
1610 u32 dst_addr_lo; 1675 u32 dst_addr_lo;
@@ -1629,11 +1694,11 @@ struct dmae_command {
1629 u16 crc16_c; 1694 u16 crc16_c;
1630#endif 1695#endif
1631#if defined(__BIG_ENDIAN) 1696#if defined(__BIG_ENDIAN)
1632 u16 reserved2; 1697 u16 reserved3;
1633 u16 crc_t10; 1698 u16 crc_t10;
1634#elif defined(__LITTLE_ENDIAN) 1699#elif defined(__LITTLE_ENDIAN)
1635 u16 crc_t10; 1700 u16 crc_t10;
1636 u16 reserved2; 1701 u16 reserved3;
1637#endif 1702#endif
1638#if defined(__BIG_ENDIAN) 1703#if defined(__BIG_ENDIAN)
1639 u16 xsum8; 1704 u16 xsum8;
@@ -1654,96 +1719,20 @@ struct double_regpair {
1654 1719
1655 1720
1656/* 1721/*
1657 * The eth storm context of Ustorm (configuration part) 1722 * SDM operation gen command (generate aggregative interrupt)
1658 */ 1723 */
1659struct ustorm_eth_st_context_config { 1724struct sdm_op_gen {
1660#if defined(__BIG_ENDIAN) 1725 __le32 command;
1661 u8 flags; 1726#define SDM_OP_GEN_COMP_PARAM (0x1F<<0)
1662#define USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT (0x1<<0) 1727#define SDM_OP_GEN_COMP_PARAM_SHIFT 0
1663#define USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT_SHIFT 0 1728#define SDM_OP_GEN_COMP_TYPE (0x7<<5)
1664#define USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_DYNAMIC_HC (0x1<<1) 1729#define SDM_OP_GEN_COMP_TYPE_SHIFT 5
1665#define USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_DYNAMIC_HC_SHIFT 1 1730#define SDM_OP_GEN_AGG_VECT_IDX (0xFF<<8)
1666#define USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA (0x1<<2) 1731#define SDM_OP_GEN_AGG_VECT_IDX_SHIFT 8
1667#define USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA_SHIFT 2 1732#define SDM_OP_GEN_AGG_VECT_IDX_VALID (0x1<<16)
1668#define USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS (0x1<<3) 1733#define SDM_OP_GEN_AGG_VECT_IDX_VALID_SHIFT 16
1669#define USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS_SHIFT 3 1734#define SDM_OP_GEN_RESERVED (0x7FFF<<17)
1670#define __USTORM_ETH_ST_CONTEXT_CONFIG_RESERVED0 (0xF<<4) 1735#define SDM_OP_GEN_RESERVED_SHIFT 17
1671#define __USTORM_ETH_ST_CONTEXT_CONFIG_RESERVED0_SHIFT 4
1672 u8 status_block_id;
1673 u8 clientId;
1674 u8 sb_index_numbers;
1675#define USTORM_ETH_ST_CONTEXT_CONFIG_CQE_SB_INDEX_NUMBER (0xF<<0)
1676#define USTORM_ETH_ST_CONTEXT_CONFIG_CQE_SB_INDEX_NUMBER_SHIFT 0
1677#define USTORM_ETH_ST_CONTEXT_CONFIG_BD_SB_INDEX_NUMBER (0xF<<4)
1678#define USTORM_ETH_ST_CONTEXT_CONFIG_BD_SB_INDEX_NUMBER_SHIFT 4
1679#elif defined(__LITTLE_ENDIAN)
1680 u8 sb_index_numbers;
1681#define USTORM_ETH_ST_CONTEXT_CONFIG_CQE_SB_INDEX_NUMBER (0xF<<0)
1682#define USTORM_ETH_ST_CONTEXT_CONFIG_CQE_SB_INDEX_NUMBER_SHIFT 0
1683#define USTORM_ETH_ST_CONTEXT_CONFIG_BD_SB_INDEX_NUMBER (0xF<<4)
1684#define USTORM_ETH_ST_CONTEXT_CONFIG_BD_SB_INDEX_NUMBER_SHIFT 4
1685 u8 clientId;
1686 u8 status_block_id;
1687 u8 flags;
1688#define USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT (0x1<<0)
1689#define USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT_SHIFT 0
1690#define USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_DYNAMIC_HC (0x1<<1)
1691#define USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_DYNAMIC_HC_SHIFT 1
1692#define USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA (0x1<<2)
1693#define USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA_SHIFT 2
1694#define USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS (0x1<<3)
1695#define USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS_SHIFT 3
1696#define __USTORM_ETH_ST_CONTEXT_CONFIG_RESERVED0 (0xF<<4)
1697#define __USTORM_ETH_ST_CONTEXT_CONFIG_RESERVED0_SHIFT 4
1698#endif
1699#if defined(__BIG_ENDIAN)
1700 u16 bd_buff_size;
1701 u8 statistics_counter_id;
1702 u8 mc_alignment_log_size;
1703#elif defined(__LITTLE_ENDIAN)
1704 u8 mc_alignment_log_size;
1705 u8 statistics_counter_id;
1706 u16 bd_buff_size;
1707#endif
1708#if defined(__BIG_ENDIAN)
1709 u8 __local_sge_prod;
1710 u8 __local_bd_prod;
1711 u16 sge_buff_size;
1712#elif defined(__LITTLE_ENDIAN)
1713 u16 sge_buff_size;
1714 u8 __local_bd_prod;
1715 u8 __local_sge_prod;
1716#endif
1717#if defined(__BIG_ENDIAN)
1718 u16 __sdm_bd_expected_counter;
1719 u8 cstorm_agg_int;
1720 u8 __expected_bds_on_ram;
1721#elif defined(__LITTLE_ENDIAN)
1722 u8 __expected_bds_on_ram;
1723 u8 cstorm_agg_int;
1724 u16 __sdm_bd_expected_counter;
1725#endif
1726#if defined(__BIG_ENDIAN)
1727 u16 __ring_data_ram_addr;
1728 u16 __hc_cstorm_ram_addr;
1729#elif defined(__LITTLE_ENDIAN)
1730 u16 __hc_cstorm_ram_addr;
1731 u16 __ring_data_ram_addr;
1732#endif
1733#if defined(__BIG_ENDIAN)
1734 u8 reserved1;
1735 u8 max_sges_for_packet;
1736 u16 __bd_ring_ram_addr;
1737#elif defined(__LITTLE_ENDIAN)
1738 u16 __bd_ring_ram_addr;
1739 u8 max_sges_for_packet;
1740 u8 reserved1;
1741#endif
1742 u32 bd_page_base_lo;
1743 u32 bd_page_base_hi;
1744 u32 sge_page_base_lo;
1745 u32 sge_page_base_hi;
1746 struct regpair reserved2;
1747}; 1736};
1748 1737
1749/* 1738/*
@@ -1762,20 +1751,13 @@ struct eth_rx_sge {
1762 __le32 addr_hi; 1751 __le32 addr_hi;
1763}; 1752};
1764 1753
1765/* 1754
1766 * Local BDs and SGEs rings (in ETH)
1767 */
1768struct eth_local_rx_rings {
1769 struct eth_rx_bd __local_bd_ring[8];
1770 struct eth_rx_sge __local_sge_ring[10];
1771};
1772 1755
1773/* 1756/*
1774 * The eth storm context of Ustorm 1757 * The eth storm context of Ustorm
1775 */ 1758 */
1776struct ustorm_eth_st_context { 1759struct ustorm_eth_st_context {
1777 struct ustorm_eth_st_context_config common; 1760 u32 reserved0[48];
1778 struct eth_local_rx_rings __rings;
1779}; 1761};
1780 1762
1781/* 1763/*
@@ -1786,337 +1768,53 @@ struct tstorm_eth_st_context {
1786}; 1768};
1787 1769
1788/* 1770/*
1789 * The eth aggregative context section of Xstorm
1790 */
1791struct xstorm_eth_extra_ag_context_section {
1792#if defined(__BIG_ENDIAN)
1793 u8 __tcp_agg_vars1;
1794 u8 __reserved50;
1795 u16 __mss;
1796#elif defined(__LITTLE_ENDIAN)
1797 u16 __mss;
1798 u8 __reserved50;
1799 u8 __tcp_agg_vars1;
1800#endif
1801 u32 __snd_nxt;
1802 u32 __tx_wnd;
1803 u32 __snd_una;
1804 u32 __reserved53;
1805#if defined(__BIG_ENDIAN)
1806 u8 __agg_val8_th;
1807 u8 __agg_val8;
1808 u16 __tcp_agg_vars2;
1809#elif defined(__LITTLE_ENDIAN)
1810 u16 __tcp_agg_vars2;
1811 u8 __agg_val8;
1812 u8 __agg_val8_th;
1813#endif
1814 u32 __reserved58;
1815 u32 __reserved59;
1816 u32 __reserved60;
1817 u32 __reserved61;
1818#if defined(__BIG_ENDIAN)
1819 u16 __agg_val7_th;
1820 u16 __agg_val7;
1821#elif defined(__LITTLE_ENDIAN)
1822 u16 __agg_val7;
1823 u16 __agg_val7_th;
1824#endif
1825#if defined(__BIG_ENDIAN)
1826 u8 __tcp_agg_vars5;
1827 u8 __tcp_agg_vars4;
1828 u8 __tcp_agg_vars3;
1829 u8 __reserved62;
1830#elif defined(__LITTLE_ENDIAN)
1831 u8 __reserved62;
1832 u8 __tcp_agg_vars3;
1833 u8 __tcp_agg_vars4;
1834 u8 __tcp_agg_vars5;
1835#endif
1836 u32 __tcp_agg_vars6;
1837#if defined(__BIG_ENDIAN)
1838 u16 __agg_misc6;
1839 u16 __tcp_agg_vars7;
1840#elif defined(__LITTLE_ENDIAN)
1841 u16 __tcp_agg_vars7;
1842 u16 __agg_misc6;
1843#endif
1844 u32 __agg_val10;
1845 u32 __agg_val10_th;
1846#if defined(__BIG_ENDIAN)
1847 u16 __reserved3;
1848 u8 __reserved2;
1849 u8 __da_only_cnt;
1850#elif defined(__LITTLE_ENDIAN)
1851 u8 __da_only_cnt;
1852 u8 __reserved2;
1853 u16 __reserved3;
1854#endif
1855};
1856
1857/*
1858 * The eth aggregative context of Xstorm 1771 * The eth aggregative context of Xstorm
1859 */ 1772 */
1860struct xstorm_eth_ag_context { 1773struct xstorm_eth_ag_context {
1861#if defined(__BIG_ENDIAN) 1774 u32 reserved0;
1862 u16 agg_val1;
1863 u8 __agg_vars1;
1864 u8 __state;
1865#elif defined(__LITTLE_ENDIAN)
1866 u8 __state;
1867 u8 __agg_vars1;
1868 u16 agg_val1;
1869#endif
1870#if defined(__BIG_ENDIAN) 1775#if defined(__BIG_ENDIAN)
1871 u8 cdu_reserved; 1776 u8 cdu_reserved;
1872 u8 __agg_vars4; 1777 u8 reserved2;
1873 u8 __agg_vars3; 1778 u16 reserved1;
1874 u8 __agg_vars2;
1875#elif defined(__LITTLE_ENDIAN) 1779#elif defined(__LITTLE_ENDIAN)
1876 u8 __agg_vars2; 1780 u16 reserved1;
1877 u8 __agg_vars3; 1781 u8 reserved2;
1878 u8 __agg_vars4;
1879 u8 cdu_reserved; 1782 u8 cdu_reserved;
1880#endif 1783#endif
1881 u32 __bd_prod; 1784 u32 reserved3[30];
1882#if defined(__BIG_ENDIAN)
1883 u16 __agg_vars5;
1884 u16 __agg_val4_th;
1885#elif defined(__LITTLE_ENDIAN)
1886 u16 __agg_val4_th;
1887 u16 __agg_vars5;
1888#endif
1889 struct xstorm_eth_extra_ag_context_section __extra_section;
1890#if defined(__BIG_ENDIAN)
1891 u16 __agg_vars7;
1892 u8 __agg_val3_th;
1893 u8 __agg_vars6;
1894#elif defined(__LITTLE_ENDIAN)
1895 u8 __agg_vars6;
1896 u8 __agg_val3_th;
1897 u16 __agg_vars7;
1898#endif
1899#if defined(__BIG_ENDIAN)
1900 u16 __agg_val11_th;
1901 u16 __agg_val11;
1902#elif defined(__LITTLE_ENDIAN)
1903 u16 __agg_val11;
1904 u16 __agg_val11_th;
1905#endif
1906#if defined(__BIG_ENDIAN)
1907 u8 __reserved1;
1908 u8 __agg_val6_th;
1909 u16 __agg_val9;
1910#elif defined(__LITTLE_ENDIAN)
1911 u16 __agg_val9;
1912 u8 __agg_val6_th;
1913 u8 __reserved1;
1914#endif
1915#if defined(__BIG_ENDIAN)
1916 u16 __agg_val2_th;
1917 u16 __agg_val2;
1918#elif defined(__LITTLE_ENDIAN)
1919 u16 __agg_val2;
1920 u16 __agg_val2_th;
1921#endif
1922 u32 __agg_vars8;
1923#if defined(__BIG_ENDIAN)
1924 u16 __agg_misc0;
1925 u16 __agg_val4;
1926#elif defined(__LITTLE_ENDIAN)
1927 u16 __agg_val4;
1928 u16 __agg_misc0;
1929#endif
1930#if defined(__BIG_ENDIAN)
1931 u8 __agg_val3;
1932 u8 __agg_val6;
1933 u8 __agg_val5_th;
1934 u8 __agg_val5;
1935#elif defined(__LITTLE_ENDIAN)
1936 u8 __agg_val5;
1937 u8 __agg_val5_th;
1938 u8 __agg_val6;
1939 u8 __agg_val3;
1940#endif
1941#if defined(__BIG_ENDIAN)
1942 u16 __agg_misc1;
1943 u16 __bd_ind_max_val;
1944#elif defined(__LITTLE_ENDIAN)
1945 u16 __bd_ind_max_val;
1946 u16 __agg_misc1;
1947#endif
1948 u32 __reserved57;
1949 u32 __agg_misc4;
1950 u32 __agg_misc5;
1951};
1952
1953/*
1954 * The eth extra aggregative context section of Tstorm
1955 */
1956struct tstorm_eth_extra_ag_context_section {
1957 u32 __agg_val1;
1958#if defined(__BIG_ENDIAN)
1959 u8 __tcp_agg_vars2;
1960 u8 __agg_val3;
1961 u16 __agg_val2;
1962#elif defined(__LITTLE_ENDIAN)
1963 u16 __agg_val2;
1964 u8 __agg_val3;
1965 u8 __tcp_agg_vars2;
1966#endif
1967#if defined(__BIG_ENDIAN)
1968 u16 __agg_val5;
1969 u8 __agg_val6;
1970 u8 __tcp_agg_vars3;
1971#elif defined(__LITTLE_ENDIAN)
1972 u8 __tcp_agg_vars3;
1973 u8 __agg_val6;
1974 u16 __agg_val5;
1975#endif
1976 u32 __reserved63;
1977 u32 __reserved64;
1978 u32 __reserved65;
1979 u32 __reserved66;
1980 u32 __reserved67;
1981 u32 __tcp_agg_vars1;
1982 u32 __reserved61;
1983 u32 __reserved62;
1984 u32 __reserved2;
1985}; 1785};
1986 1786
1987/* 1787/*
1988 * The eth aggregative context of Tstorm 1788 * The eth aggregative context of Tstorm
1989 */ 1789 */
1990struct tstorm_eth_ag_context { 1790struct tstorm_eth_ag_context {
1991#if defined(__BIG_ENDIAN) 1791 u32 __reserved0[14];
1992 u16 __reserved54;
1993 u8 __agg_vars1;
1994 u8 __state;
1995#elif defined(__LITTLE_ENDIAN)
1996 u8 __state;
1997 u8 __agg_vars1;
1998 u16 __reserved54;
1999#endif
2000#if defined(__BIG_ENDIAN)
2001 u16 __agg_val4;
2002 u16 __agg_vars2;
2003#elif defined(__LITTLE_ENDIAN)
2004 u16 __agg_vars2;
2005 u16 __agg_val4;
2006#endif
2007 struct tstorm_eth_extra_ag_context_section __extra_section;
2008}; 1792};
2009 1793
1794
2010/* 1795/*
2011 * The eth aggregative context of Cstorm 1796 * The eth aggregative context of Cstorm
2012 */ 1797 */
2013struct cstorm_eth_ag_context { 1798struct cstorm_eth_ag_context {
2014 u32 __agg_vars1; 1799 u32 __reserved0[10];
2015#if defined(__BIG_ENDIAN)
2016 u8 __aux1_th;
2017 u8 __aux1_val;
2018 u16 __agg_vars2;
2019#elif defined(__LITTLE_ENDIAN)
2020 u16 __agg_vars2;
2021 u8 __aux1_val;
2022 u8 __aux1_th;
2023#endif
2024 u32 __num_of_treated_packet;
2025 u32 __last_packet_treated;
2026#if defined(__BIG_ENDIAN)
2027 u16 __reserved58;
2028 u16 __reserved57;
2029#elif defined(__LITTLE_ENDIAN)
2030 u16 __reserved57;
2031 u16 __reserved58;
2032#endif
2033#if defined(__BIG_ENDIAN)
2034 u8 __reserved62;
2035 u8 __reserved61;
2036 u8 __reserved60;
2037 u8 __reserved59;
2038#elif defined(__LITTLE_ENDIAN)
2039 u8 __reserved59;
2040 u8 __reserved60;
2041 u8 __reserved61;
2042 u8 __reserved62;
2043#endif
2044#if defined(__BIG_ENDIAN)
2045 u16 __reserved64;
2046 u16 __reserved63;
2047#elif defined(__LITTLE_ENDIAN)
2048 u16 __reserved63;
2049 u16 __reserved64;
2050#endif
2051 u32 __reserved65;
2052#if defined(__BIG_ENDIAN)
2053 u16 __agg_vars3;
2054 u16 __rq_inv_cnt;
2055#elif defined(__LITTLE_ENDIAN)
2056 u16 __rq_inv_cnt;
2057 u16 __agg_vars3;
2058#endif
2059#if defined(__BIG_ENDIAN)
2060 u16 __packet_index_th;
2061 u16 __packet_index;
2062#elif defined(__LITTLE_ENDIAN)
2063 u16 __packet_index;
2064 u16 __packet_index_th;
2065#endif
2066}; 1800};
2067 1801
1802
2068/* 1803/*
2069 * The eth aggregative context of Ustorm 1804 * The eth aggregative context of Ustorm
2070 */ 1805 */
2071struct ustorm_eth_ag_context { 1806struct ustorm_eth_ag_context {
2072#if defined(__BIG_ENDIAN) 1807 u32 __reserved0;
2073 u8 __aux_counter_flags;
2074 u8 __agg_vars2;
2075 u8 __agg_vars1;
2076 u8 __state;
2077#elif defined(__LITTLE_ENDIAN)
2078 u8 __state;
2079 u8 __agg_vars1;
2080 u8 __agg_vars2;
2081 u8 __aux_counter_flags;
2082#endif
2083#if defined(__BIG_ENDIAN) 1808#if defined(__BIG_ENDIAN)
2084 u8 cdu_usage; 1809 u8 cdu_usage;
2085 u8 __agg_misc2; 1810 u8 __reserved2;
2086 u16 __agg_misc1; 1811 u16 __reserved1;
2087#elif defined(__LITTLE_ENDIAN) 1812#elif defined(__LITTLE_ENDIAN)
2088 u16 __agg_misc1; 1813 u16 __reserved1;
2089 u8 __agg_misc2; 1814 u8 __reserved2;
2090 u8 cdu_usage; 1815 u8 cdu_usage;
2091#endif 1816#endif
2092 u32 __agg_misc4; 1817 u32 __reserved3[6];
2093#if defined(__BIG_ENDIAN)
2094 u8 __agg_val3_th;
2095 u8 __agg_val3;
2096 u16 __agg_misc3;
2097#elif defined(__LITTLE_ENDIAN)
2098 u16 __agg_misc3;
2099 u8 __agg_val3;
2100 u8 __agg_val3_th;
2101#endif
2102 u32 __agg_val1;
2103 u32 __agg_misc4_th;
2104#if defined(__BIG_ENDIAN)
2105 u16 __agg_val2_th;
2106 u16 __agg_val2;
2107#elif defined(__LITTLE_ENDIAN)
2108 u16 __agg_val2;
2109 u16 __agg_val2_th;
2110#endif
2111#if defined(__BIG_ENDIAN)
2112 u16 __reserved2;
2113 u8 __decision_rules;
2114 u8 __decision_rule_enable_bits;
2115#elif defined(__LITTLE_ENDIAN)
2116 u8 __decision_rule_enable_bits;
2117 u8 __decision_rules;
2118 u16 __reserved2;
2119#endif
2120}; 1818};
2121 1819
2122/* 1820/*
@@ -2140,18 +1838,16 @@ struct timers_block_context {
2140 */ 1838 */
2141struct eth_tx_bd_flags { 1839struct eth_tx_bd_flags {
2142 u8 as_bitfield; 1840 u8 as_bitfield;
2143#define ETH_TX_BD_FLAGS_VLAN_TAG (0x1<<0) 1841#define ETH_TX_BD_FLAGS_IP_CSUM (0x1<<0)
2144#define ETH_TX_BD_FLAGS_VLAN_TAG_SHIFT 0 1842#define ETH_TX_BD_FLAGS_IP_CSUM_SHIFT 0
2145#define ETH_TX_BD_FLAGS_IP_CSUM (0x1<<1) 1843#define ETH_TX_BD_FLAGS_L4_CSUM (0x1<<1)
2146#define ETH_TX_BD_FLAGS_IP_CSUM_SHIFT 1 1844#define ETH_TX_BD_FLAGS_L4_CSUM_SHIFT 1
2147#define ETH_TX_BD_FLAGS_L4_CSUM (0x1<<2) 1845#define ETH_TX_BD_FLAGS_VLAN_MODE (0x3<<2)
2148#define ETH_TX_BD_FLAGS_L4_CSUM_SHIFT 2 1846#define ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT 2
2149#define ETH_TX_BD_FLAGS_END_BD (0x1<<3)
2150#define ETH_TX_BD_FLAGS_END_BD_SHIFT 3
2151#define ETH_TX_BD_FLAGS_START_BD (0x1<<4) 1847#define ETH_TX_BD_FLAGS_START_BD (0x1<<4)
2152#define ETH_TX_BD_FLAGS_START_BD_SHIFT 4 1848#define ETH_TX_BD_FLAGS_START_BD_SHIFT 4
2153#define ETH_TX_BD_FLAGS_HDR_POOL (0x1<<5) 1849#define ETH_TX_BD_FLAGS_IS_UDP (0x1<<5)
2154#define ETH_TX_BD_FLAGS_HDR_POOL_SHIFT 5 1850#define ETH_TX_BD_FLAGS_IS_UDP_SHIFT 5
2155#define ETH_TX_BD_FLAGS_SW_LSO (0x1<<6) 1851#define ETH_TX_BD_FLAGS_SW_LSO (0x1<<6)
2156#define ETH_TX_BD_FLAGS_SW_LSO_SHIFT 6 1852#define ETH_TX_BD_FLAGS_SW_LSO_SHIFT 6
2157#define ETH_TX_BD_FLAGS_IPV6 (0x1<<7) 1853#define ETH_TX_BD_FLAGS_IPV6 (0x1<<7)
@@ -2166,7 +1862,7 @@ struct eth_tx_start_bd {
2166 __le32 addr_hi; 1862 __le32 addr_hi;
2167 __le16 nbd; 1863 __le16 nbd;
2168 __le16 nbytes; 1864 __le16 nbytes;
2169 __le16 vlan; 1865 __le16 vlan_or_ethertype;
2170 struct eth_tx_bd_flags bd_flags; 1866 struct eth_tx_bd_flags bd_flags;
2171 u8 general_data; 1867 u8 general_data;
2172#define ETH_TX_START_BD_HDR_NBDS (0x3F<<0) 1868#define ETH_TX_START_BD_HDR_NBDS (0x3F<<0)
@@ -2179,48 +1875,48 @@ struct eth_tx_start_bd {
2179 * Tx regular BD structure 1875 * Tx regular BD structure
2180 */ 1876 */
2181struct eth_tx_bd { 1877struct eth_tx_bd {
2182 u32 addr_lo; 1878 __le32 addr_lo;
2183 u32 addr_hi; 1879 __le32 addr_hi;
2184 u16 total_pkt_bytes; 1880 __le16 total_pkt_bytes;
2185 u16 nbytes; 1881 __le16 nbytes;
2186 u8 reserved[4]; 1882 u8 reserved[4];
2187}; 1883};
2188 1884
2189/* 1885/*
2190 * Tx parsing BD structure for ETH,Relevant in START 1886 * Tx parsing BD structure for ETH E1/E1h
2191 */ 1887 */
2192struct eth_tx_parse_bd { 1888struct eth_tx_parse_bd_e1x {
2193 u8 global_data; 1889 u8 global_data;
2194#define ETH_TX_PARSE_BD_IP_HDR_START_OFFSET (0xF<<0) 1890#define ETH_TX_PARSE_BD_E1X_IP_HDR_START_OFFSET_W (0xF<<0)
2195#define ETH_TX_PARSE_BD_IP_HDR_START_OFFSET_SHIFT 0 1891#define ETH_TX_PARSE_BD_E1X_IP_HDR_START_OFFSET_W_SHIFT 0
2196#define ETH_TX_PARSE_BD_UDP_CS_FLG (0x1<<4) 1892#define ETH_TX_PARSE_BD_E1X_RESERVED0 (0x1<<4)
2197#define ETH_TX_PARSE_BD_UDP_CS_FLG_SHIFT 4 1893#define ETH_TX_PARSE_BD_E1X_RESERVED0_SHIFT 4
2198#define ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN (0x1<<5) 1894#define ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN (0x1<<5)
2199#define ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN_SHIFT 5 1895#define ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN_SHIFT 5
2200#define ETH_TX_PARSE_BD_LLC_SNAP_EN (0x1<<6) 1896#define ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN (0x1<<6)
2201#define ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT 6 1897#define ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT 6
2202#define ETH_TX_PARSE_BD_NS_FLG (0x1<<7) 1898#define ETH_TX_PARSE_BD_E1X_NS_FLG (0x1<<7)
2203#define ETH_TX_PARSE_BD_NS_FLG_SHIFT 7 1899#define ETH_TX_PARSE_BD_E1X_NS_FLG_SHIFT 7
2204 u8 tcp_flags; 1900 u8 tcp_flags;
2205#define ETH_TX_PARSE_BD_FIN_FLG (0x1<<0) 1901#define ETH_TX_PARSE_BD_E1X_FIN_FLG (0x1<<0)
2206#define ETH_TX_PARSE_BD_FIN_FLG_SHIFT 0 1902#define ETH_TX_PARSE_BD_E1X_FIN_FLG_SHIFT 0
2207#define ETH_TX_PARSE_BD_SYN_FLG (0x1<<1) 1903#define ETH_TX_PARSE_BD_E1X_SYN_FLG (0x1<<1)
2208#define ETH_TX_PARSE_BD_SYN_FLG_SHIFT 1 1904#define ETH_TX_PARSE_BD_E1X_SYN_FLG_SHIFT 1
2209#define ETH_TX_PARSE_BD_RST_FLG (0x1<<2) 1905#define ETH_TX_PARSE_BD_E1X_RST_FLG (0x1<<2)
2210#define ETH_TX_PARSE_BD_RST_FLG_SHIFT 2 1906#define ETH_TX_PARSE_BD_E1X_RST_FLG_SHIFT 2
2211#define ETH_TX_PARSE_BD_PSH_FLG (0x1<<3) 1907#define ETH_TX_PARSE_BD_E1X_PSH_FLG (0x1<<3)
2212#define ETH_TX_PARSE_BD_PSH_FLG_SHIFT 3 1908#define ETH_TX_PARSE_BD_E1X_PSH_FLG_SHIFT 3
2213#define ETH_TX_PARSE_BD_ACK_FLG (0x1<<4) 1909#define ETH_TX_PARSE_BD_E1X_ACK_FLG (0x1<<4)
2214#define ETH_TX_PARSE_BD_ACK_FLG_SHIFT 4 1910#define ETH_TX_PARSE_BD_E1X_ACK_FLG_SHIFT 4
2215#define ETH_TX_PARSE_BD_URG_FLG (0x1<<5) 1911#define ETH_TX_PARSE_BD_E1X_URG_FLG (0x1<<5)
2216#define ETH_TX_PARSE_BD_URG_FLG_SHIFT 5 1912#define ETH_TX_PARSE_BD_E1X_URG_FLG_SHIFT 5
2217#define ETH_TX_PARSE_BD_ECE_FLG (0x1<<6) 1913#define ETH_TX_PARSE_BD_E1X_ECE_FLG (0x1<<6)
2218#define ETH_TX_PARSE_BD_ECE_FLG_SHIFT 6 1914#define ETH_TX_PARSE_BD_E1X_ECE_FLG_SHIFT 6
2219#define ETH_TX_PARSE_BD_CWR_FLG (0x1<<7) 1915#define ETH_TX_PARSE_BD_E1X_CWR_FLG (0x1<<7)
2220#define ETH_TX_PARSE_BD_CWR_FLG_SHIFT 7 1916#define ETH_TX_PARSE_BD_E1X_CWR_FLG_SHIFT 7
2221 u8 ip_hlen; 1917 u8 ip_hlen_w;
2222 s8 reserved; 1918 s8 reserved;
2223 __le16 total_hlen; 1919 __le16 total_hlen_w;
2224 __le16 tcp_pseudo_csum; 1920 __le16 tcp_pseudo_csum;
2225 __le16 lso_mss; 1921 __le16 lso_mss;
2226 __le16 ip_id; 1922 __le16 ip_id;
@@ -2242,79 +1938,23 @@ struct eth_tx_next_bd {
2242union eth_tx_bd_types { 1938union eth_tx_bd_types {
2243 struct eth_tx_start_bd start_bd; 1939 struct eth_tx_start_bd start_bd;
2244 struct eth_tx_bd reg_bd; 1940 struct eth_tx_bd reg_bd;
2245 struct eth_tx_parse_bd parse_bd; 1941 struct eth_tx_parse_bd_e1x parse_bd_e1x;
2246 struct eth_tx_next_bd next_bd; 1942 struct eth_tx_next_bd next_bd;
2247}; 1943};
2248 1944
1945
2249/* 1946/*
2250 * The eth storm context of Xstorm 1947 * The eth storm context of Xstorm
2251 */ 1948 */
2252struct xstorm_eth_st_context { 1949struct xstorm_eth_st_context {
2253 u32 tx_bd_page_base_lo; 1950 u32 reserved0[60];
2254 u32 tx_bd_page_base_hi;
2255#if defined(__BIG_ENDIAN)
2256 u16 tx_bd_cons;
2257 u8 statistics_data;
2258#define XSTORM_ETH_ST_CONTEXT_STATISTICS_COUNTER_ID (0x7F<<0)
2259#define XSTORM_ETH_ST_CONTEXT_STATISTICS_COUNTER_ID_SHIFT 0
2260#define XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE (0x1<<7)
2261#define XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE_SHIFT 7
2262 u8 __local_tx_bd_prod;
2263#elif defined(__LITTLE_ENDIAN)
2264 u8 __local_tx_bd_prod;
2265 u8 statistics_data;
2266#define XSTORM_ETH_ST_CONTEXT_STATISTICS_COUNTER_ID (0x7F<<0)
2267#define XSTORM_ETH_ST_CONTEXT_STATISTICS_COUNTER_ID_SHIFT 0
2268#define XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE (0x1<<7)
2269#define XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE_SHIFT 7
2270 u16 tx_bd_cons;
2271#endif
2272 u32 __reserved1;
2273 u32 __reserved2;
2274#if defined(__BIG_ENDIAN)
2275 u8 __ram_cache_index;
2276 u8 __double_buffer_client;
2277 u16 __pkt_cons;
2278#elif defined(__LITTLE_ENDIAN)
2279 u16 __pkt_cons;
2280 u8 __double_buffer_client;
2281 u8 __ram_cache_index;
2282#endif
2283#if defined(__BIG_ENDIAN)
2284 u16 __statistics_address;
2285 u16 __gso_next;
2286#elif defined(__LITTLE_ENDIAN)
2287 u16 __gso_next;
2288 u16 __statistics_address;
2289#endif
2290#if defined(__BIG_ENDIAN)
2291 u8 __local_tx_bd_cons;
2292 u8 safc_group_num;
2293 u8 safc_group_en;
2294 u8 __is_eth_conn;
2295#elif defined(__LITTLE_ENDIAN)
2296 u8 __is_eth_conn;
2297 u8 safc_group_en;
2298 u8 safc_group_num;
2299 u8 __local_tx_bd_cons;
2300#endif
2301 union eth_tx_bd_types __bds[13];
2302}; 1951};
2303 1952
2304/* 1953/*
2305 * The eth storm context of Cstorm 1954 * The eth storm context of Cstorm
2306 */ 1955 */
2307struct cstorm_eth_st_context { 1956struct cstorm_eth_st_context {
2308#if defined(__BIG_ENDIAN) 1957 u32 __reserved0[4];
2309 u16 __reserved0;
2310 u8 sb_index_number;
2311 u8 status_block_id;
2312#elif defined(__LITTLE_ENDIAN)
2313 u8 status_block_id;
2314 u8 sb_index_number;
2315 u16 __reserved0;
2316#endif
2317 u32 __reserved1[3];
2318}; 1958};
2319 1959
2320/* 1960/*
@@ -2362,103 +2002,114 @@ struct eth_tx_doorbell {
2362 2002
2363 2003
2364/* 2004/*
2365 * cstorm default status block, generated by ustorm 2005 * client init fc data
2366 */
2367struct cstorm_def_status_block_u {
2368 __le16 index_values[HC_USTORM_DEF_SB_NUM_INDICES];
2369 __le16 status_block_index;
2370 u8 func;
2371 u8 status_block_id;
2372 __le32 __flags;
2373};
2374
2375/*
2376 * cstorm default status block, generated by cstorm
2377 */
2378struct cstorm_def_status_block_c {
2379 __le16 index_values[HC_CSTORM_DEF_SB_NUM_INDICES];
2380 __le16 status_block_index;
2381 u8 func;
2382 u8 status_block_id;
2383 __le32 __flags;
2384};
2385
2386/*
2387 * xstorm status block
2388 */ 2006 */
2389struct xstorm_def_status_block { 2007struct client_init_fc_data {
2390 __le16 index_values[HC_XSTORM_DEF_SB_NUM_INDICES]; 2008 __le16 cqe_pause_thr_low;
2391 __le16 status_block_index; 2009 __le16 cqe_pause_thr_high;
2392 u8 func; 2010 __le16 bd_pause_thr_low;
2393 u8 status_block_id; 2011 __le16 bd_pause_thr_high;
2394 __le32 __flags; 2012 __le16 sge_pause_thr_low;
2013 __le16 sge_pause_thr_high;
2014 __le16 rx_cos_mask;
2015 u8 safc_group_num;
2016 u8 safc_group_en_flg;
2017 u8 traffic_type;
2018 u8 reserved0;
2019 __le16 reserved1;
2020 __le32 reserved2;
2395}; 2021};
2396 2022
2397/*
2398 * tstorm status block
2399 */
2400struct tstorm_def_status_block {
2401 __le16 index_values[HC_TSTORM_DEF_SB_NUM_INDICES];
2402 __le16 status_block_index;
2403 u8 func;
2404 u8 status_block_id;
2405 __le32 __flags;
2406};
2407 2023
2408/* 2024/*
2409 * host status block 2025 * client init ramrod data
2410 */ 2026 */
2411struct host_def_status_block { 2027struct client_init_general_data {
2412 struct atten_def_status_block atten_status_block; 2028 u8 client_id;
2413 struct cstorm_def_status_block_u u_def_status_block; 2029 u8 statistics_counter_id;
2414 struct cstorm_def_status_block_c c_def_status_block; 2030 u8 statistics_en_flg;
2415 struct xstorm_def_status_block x_def_status_block; 2031 u8 is_fcoe_flg;
2416 struct tstorm_def_status_block t_def_status_block; 2032 u8 activate_flg;
2033 u8 sp_client_id;
2034 __le16 reserved0;
2035 __le32 reserved1[2];
2417}; 2036};
2418 2037
2419 2038
2420/* 2039/*
2421 * cstorm status block, generated by ustorm 2040 * client init rx data
2422 */ 2041 */
2423struct cstorm_status_block_u { 2042struct client_init_rx_data {
2424 __le16 index_values[HC_USTORM_SB_NUM_INDICES]; 2043 u8 tpa_en_flg;
2425 __le16 status_block_index; 2044 u8 vmqueue_mode_en_flg;
2426 u8 func; 2045 u8 extra_data_over_sgl_en_flg;
2046 u8 cache_line_alignment_log_size;
2047 u8 enable_dynamic_hc;
2048 u8 max_sges_for_packet;
2049 u8 client_qzone_id;
2050 u8 drop_ip_cs_err_flg;
2051 u8 drop_tcp_cs_err_flg;
2052 u8 drop_ttl0_flg;
2053 u8 drop_udp_cs_err_flg;
2054 u8 inner_vlan_removal_enable_flg;
2055 u8 outer_vlan_removal_enable_flg;
2427 u8 status_block_id; 2056 u8 status_block_id;
2428 __le32 __flags; 2057 u8 rx_sb_index_number;
2058 u8 reserved0[3];
2059 __le16 bd_buff_size;
2060 __le16 sge_buff_size;
2061 __le16 mtu;
2062 struct regpair bd_page_base;
2063 struct regpair sge_page_base;
2064 struct regpair cqe_page_base;
2065 u8 is_leading_rss;
2066 u8 is_approx_mcast;
2067 __le16 max_agg_size;
2068 __le32 reserved2[3];
2069};
2070
2071/*
2072 * client init tx data
2073 */
2074struct client_init_tx_data {
2075 u8 enforce_security_flg;
2076 u8 tx_status_block_id;
2077 u8 tx_sb_index_number;
2078 u8 reserved0;
2079 __le16 mtu;
2080 __le16 reserved1;
2081 struct regpair tx_bd_page_base;
2082 __le32 reserved2[2];
2429}; 2083};
2430 2084
2431/* 2085/*
2432 * cstorm status block, generated by cstorm 2086 * client init ramrod data
2433 */ 2087 */
2434struct cstorm_status_block_c { 2088struct client_init_ramrod_data {
2435 __le16 index_values[HC_CSTORM_SB_NUM_INDICES]; 2089 struct client_init_general_data general;
2436 __le16 status_block_index; 2090 struct client_init_rx_data rx;
2437 u8 func; 2091 struct client_init_tx_data tx;
2438 u8 status_block_id; 2092 struct client_init_fc_data fc;
2439 __le32 __flags;
2440}; 2093};
2441 2094
2095
2442/* 2096/*
2443 * host status block 2097 * The data contain client ID need to the ramrod
2444 */ 2098 */
2445struct host_status_block { 2099struct eth_common_ramrod_data {
2446 struct cstorm_status_block_u u_status_block; 2100 u32 client_id;
2447 struct cstorm_status_block_c c_status_block; 2101 u32 reserved1;
2448}; 2102};
2449 2103
2450 2104
2451/* 2105/*
2452 * The data for RSS setup ramrod 2106 * union for sgl and raw data.
2453 */ 2107 */
2454struct eth_client_setup_ramrod_data { 2108union eth_sgl_or_raw_data {
2455 u32 client_id; 2109 __le16 sgl[8];
2456 u8 is_rdma; 2110 u32 raw_data[4];
2457 u8 is_fcoe;
2458 u16 reserved1;
2459}; 2111};
2460 2112
2461
2462/* 2113/*
2463 * regular eth FP CQE parameters struct 2114 * regular eth FP CQE parameters struct
2464 */ 2115 */
@@ -2476,8 +2127,8 @@ struct eth_fast_path_rx_cqe {
2476#define ETH_FAST_PATH_RX_CQE_START_FLG_SHIFT 4 2127#define ETH_FAST_PATH_RX_CQE_START_FLG_SHIFT 4
2477#define ETH_FAST_PATH_RX_CQE_END_FLG (0x1<<5) 2128#define ETH_FAST_PATH_RX_CQE_END_FLG (0x1<<5)
2478#define ETH_FAST_PATH_RX_CQE_END_FLG_SHIFT 5 2129#define ETH_FAST_PATH_RX_CQE_END_FLG_SHIFT 5
2479#define ETH_FAST_PATH_RX_CQE_RESERVED0 (0x3<<6) 2130#define ETH_FAST_PATH_RX_CQE_SGL_RAW_SEL (0x3<<6)
2480#define ETH_FAST_PATH_RX_CQE_RESERVED0_SHIFT 6 2131#define ETH_FAST_PATH_RX_CQE_SGL_RAW_SEL_SHIFT 6
2481 u8 status_flags; 2132 u8 status_flags;
2482#define ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE (0x7<<0) 2133#define ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE (0x7<<0)
2483#define ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE_SHIFT 0 2134#define ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE_SHIFT 0
@@ -2498,7 +2149,7 @@ struct eth_fast_path_rx_cqe {
2498 __le16 pkt_len; 2149 __le16 pkt_len;
2499 __le16 len_on_bd; 2150 __le16 len_on_bd;
2500 struct parsing_flags pars_flags; 2151 struct parsing_flags pars_flags;
2501 __le16 sgl[8]; 2152 union eth_sgl_or_raw_data sgl_or_raw_data;
2502}; 2153};
2503 2154
2504 2155
@@ -2510,11 +2161,10 @@ struct eth_halt_ramrod_data {
2510 u32 reserved0; 2161 u32 reserved0;
2511}; 2162};
2512 2163
2513
2514/* 2164/*
2515 * The data for statistics query ramrod 2165 * The data for statistics query ramrod
2516 */ 2166 */
2517struct eth_query_ramrod_data { 2167struct common_query_ramrod_data {
2518#if defined(__BIG_ENDIAN) 2168#if defined(__BIG_ENDIAN)
2519 u8 reserved0; 2169 u8 reserved0;
2520 u8 collect_port; 2170 u8 collect_port;
@@ -2597,9 +2247,9 @@ struct spe_hdr {
2597 __le16 type; 2247 __le16 type;
2598#define SPE_HDR_CONN_TYPE (0xFF<<0) 2248#define SPE_HDR_CONN_TYPE (0xFF<<0)
2599#define SPE_HDR_CONN_TYPE_SHIFT 0 2249#define SPE_HDR_CONN_TYPE_SHIFT 0
2600#define SPE_HDR_COMMON_RAMROD (0xFF<<8) 2250#define SPE_HDR_FUNCTION_ID (0xFF<<8)
2601#define SPE_HDR_COMMON_RAMROD_SHIFT 8 2251#define SPE_HDR_FUNCTION_ID_SHIFT 8
2602 __le16 reserved; 2252 __le16 reserved1;
2603}; 2253};
2604 2254
2605/* 2255/*
@@ -2607,12 +2257,10 @@ struct spe_hdr {
2607 */ 2257 */
2608union eth_specific_data { 2258union eth_specific_data {
2609 u8 protocol_data[8]; 2259 u8 protocol_data[8];
2610 struct regpair mac_config_addr; 2260 struct regpair client_init_ramrod_init_data;
2611 struct eth_client_setup_ramrod_data client_setup_ramrod_data;
2612 struct eth_halt_ramrod_data halt_ramrod_data; 2261 struct eth_halt_ramrod_data halt_ramrod_data;
2613 struct regpair leading_cqe_addr;
2614 struct regpair update_data_addr; 2262 struct regpair update_data_addr;
2615 struct eth_query_ramrod_data query_ramrod_data; 2263 struct eth_common_ramrod_data common_ramrod_data;
2616}; 2264};
2617 2265
2618/* 2266/*
@@ -2637,7 +2285,7 @@ struct eth_tx_bds_array {
2637 */ 2285 */
2638struct tstorm_eth_function_common_config { 2286struct tstorm_eth_function_common_config {
2639#if defined(__BIG_ENDIAN) 2287#if defined(__BIG_ENDIAN)
2640 u8 leading_client_id; 2288 u8 reserved1;
2641 u8 rss_result_mask; 2289 u8 rss_result_mask;
2642 u16 config_flags; 2290 u16 config_flags;
2643#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY (0x1<<0) 2291#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY (0x1<<0)
@@ -2650,16 +2298,12 @@ struct tstorm_eth_function_common_config {
2650#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_TCP_CAPABILITY_SHIFT 3 2298#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_TCP_CAPABILITY_SHIFT 3
2651#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE (0x7<<4) 2299#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE (0x7<<4)
2652#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE_SHIFT 4 2300#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE_SHIFT 4
2653#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_DEFAULT_ENABLE (0x1<<7) 2301#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA (0x1<<7)
2654#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_DEFAULT_ENABLE_SHIFT 7 2302#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA_SHIFT 7
2655#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_VLAN_IN_CAM (0x1<<8) 2303#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_VLAN_FILTERING_ENABLE (0x1<<8)
2656#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_VLAN_IN_CAM_SHIFT 8 2304#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_VLAN_FILTERING_ENABLE_SHIFT 8
2657#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM (0x1<<9) 2305#define __TSTORM_ETH_FUNCTION_COMMON_CONFIG_RESERVED0 (0x7F<<9)
2658#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM_SHIFT 9 2306#define __TSTORM_ETH_FUNCTION_COMMON_CONFIG_RESERVED0_SHIFT 9
2659#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA (0x1<<10)
2660#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA_SHIFT 10
2661#define __TSTORM_ETH_FUNCTION_COMMON_CONFIG_RESERVED0 (0x1F<<11)
2662#define __TSTORM_ETH_FUNCTION_COMMON_CONFIG_RESERVED0_SHIFT 11
2663#elif defined(__LITTLE_ENDIAN) 2307#elif defined(__LITTLE_ENDIAN)
2664 u16 config_flags; 2308 u16 config_flags;
2665#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY (0x1<<0) 2309#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY (0x1<<0)
@@ -2672,18 +2316,14 @@ struct tstorm_eth_function_common_config {
2672#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_TCP_CAPABILITY_SHIFT 3 2316#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_TCP_CAPABILITY_SHIFT 3
2673#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE (0x7<<4) 2317#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE (0x7<<4)
2674#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE_SHIFT 4 2318#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE_SHIFT 4
2675#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_DEFAULT_ENABLE (0x1<<7) 2319#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA (0x1<<7)
2676#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_DEFAULT_ENABLE_SHIFT 7 2320#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA_SHIFT 7
2677#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_VLAN_IN_CAM (0x1<<8) 2321#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_VLAN_FILTERING_ENABLE (0x1<<8)
2678#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_VLAN_IN_CAM_SHIFT 8 2322#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_VLAN_FILTERING_ENABLE_SHIFT 8
2679#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM (0x1<<9) 2323#define __TSTORM_ETH_FUNCTION_COMMON_CONFIG_RESERVED0 (0x7F<<9)
2680#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM_SHIFT 9 2324#define __TSTORM_ETH_FUNCTION_COMMON_CONFIG_RESERVED0_SHIFT 9
2681#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA (0x1<<10)
2682#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA_SHIFT 10
2683#define __TSTORM_ETH_FUNCTION_COMMON_CONFIG_RESERVED0 (0x1F<<11)
2684#define __TSTORM_ETH_FUNCTION_COMMON_CONFIG_RESERVED0_SHIFT 11
2685 u8 rss_result_mask; 2325 u8 rss_result_mask;
2686 u8 leading_client_id; 2326 u8 reserved1;
2687#endif 2327#endif
2688 u16 vlan_id[2]; 2328 u16 vlan_id[2];
2689}; 2329};
@@ -2731,90 +2371,42 @@ struct mac_configuration_hdr {
2731 u8 length; 2371 u8 length;
2732 u8 offset; 2372 u8 offset;
2733 u16 client_id; 2373 u16 client_id;
2734 u32 reserved1; 2374 u16 echo;
2735}; 2375 u16 reserved1;
2736
2737/*
2738 * MAC address in list for ramrod
2739 */
2740struct tstorm_cam_entry {
2741 __le16 lsb_mac_addr;
2742 __le16 middle_mac_addr;
2743 __le16 msb_mac_addr;
2744 __le16 flags;
2745#define TSTORM_CAM_ENTRY_PORT_ID (0x1<<0)
2746#define TSTORM_CAM_ENTRY_PORT_ID_SHIFT 0
2747#define TSTORM_CAM_ENTRY_RSRVVAL0 (0x7<<1)
2748#define TSTORM_CAM_ENTRY_RSRVVAL0_SHIFT 1
2749#define TSTORM_CAM_ENTRY_RESERVED0 (0xFFF<<4)
2750#define TSTORM_CAM_ENTRY_RESERVED0_SHIFT 4
2751};
2752
2753/*
2754 * MAC filtering: CAM target table entry
2755 */
2756struct tstorm_cam_target_table_entry {
2757 u8 flags;
2758#define TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST (0x1<<0)
2759#define TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST_SHIFT 0
2760#define TSTORM_CAM_TARGET_TABLE_ENTRY_OVERRIDE_VLAN_REMOVAL (0x1<<1)
2761#define TSTORM_CAM_TARGET_TABLE_ENTRY_OVERRIDE_VLAN_REMOVAL_SHIFT 1
2762#define TSTORM_CAM_TARGET_TABLE_ENTRY_ACTION_TYPE (0x1<<2)
2763#define TSTORM_CAM_TARGET_TABLE_ENTRY_ACTION_TYPE_SHIFT 2
2764#define TSTORM_CAM_TARGET_TABLE_ENTRY_RDMA_MAC (0x1<<3)
2765#define TSTORM_CAM_TARGET_TABLE_ENTRY_RDMA_MAC_SHIFT 3
2766#define TSTORM_CAM_TARGET_TABLE_ENTRY_RESERVED0 (0xF<<4)
2767#define TSTORM_CAM_TARGET_TABLE_ENTRY_RESERVED0_SHIFT 4
2768 u8 reserved1;
2769 u16 vlan_id;
2770 u32 clients_bit_vector;
2771}; 2376};
2772 2377
2773/* 2378/*
2774 * MAC address in list for ramrod 2379 * MAC address in list for ramrod
2775 */ 2380 */
2776struct mac_configuration_entry { 2381struct mac_configuration_entry {
2777 struct tstorm_cam_entry cam_entry;
2778 struct tstorm_cam_target_table_entry target_table_entry;
2779};
2780
2781/*
2782 * MAC filtering configuration command
2783 */
2784struct mac_configuration_cmd {
2785 struct mac_configuration_hdr hdr;
2786 struct mac_configuration_entry config_table[64];
2787};
2788
2789
2790/*
2791 * MAC address in list for ramrod
2792 */
2793struct mac_configuration_entry_e1h {
2794 __le16 lsb_mac_addr; 2382 __le16 lsb_mac_addr;
2795 __le16 middle_mac_addr; 2383 __le16 middle_mac_addr;
2796 __le16 msb_mac_addr; 2384 __le16 msb_mac_addr;
2797 __le16 vlan_id; 2385 __le16 vlan_id;
2798 __le16 e1hov_id; 2386 u8 pf_id;
2799 u8 reserved0;
2800 u8 flags; 2387 u8 flags;
2801#define MAC_CONFIGURATION_ENTRY_E1H_PORT (0x1<<0) 2388#define MAC_CONFIGURATION_ENTRY_ACTION_TYPE (0x1<<0)
2802#define MAC_CONFIGURATION_ENTRY_E1H_PORT_SHIFT 0 2389#define MAC_CONFIGURATION_ENTRY_ACTION_TYPE_SHIFT 0
2803#define MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE (0x1<<1) 2390#define MAC_CONFIGURATION_ENTRY_RDMA_MAC (0x1<<1)
2804#define MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE_SHIFT 1 2391#define MAC_CONFIGURATION_ENTRY_RDMA_MAC_SHIFT 1
2805#define MAC_CONFIGURATION_ENTRY_E1H_RDMA_MAC (0x1<<2) 2392#define MAC_CONFIGURATION_ENTRY_VLAN_FILTERING_MODE (0x3<<2)
2806#define MAC_CONFIGURATION_ENTRY_E1H_RDMA_MAC_SHIFT 2 2393#define MAC_CONFIGURATION_ENTRY_VLAN_FILTERING_MODE_SHIFT 2
2807#define MAC_CONFIGURATION_ENTRY_E1H_RESERVED1 (0x1F<<3) 2394#define MAC_CONFIGURATION_ENTRY_OVERRIDE_VLAN_REMOVAL (0x1<<4)
2808#define MAC_CONFIGURATION_ENTRY_E1H_RESERVED1_SHIFT 3 2395#define MAC_CONFIGURATION_ENTRY_OVERRIDE_VLAN_REMOVAL_SHIFT 4
2396#define MAC_CONFIGURATION_ENTRY_BROADCAST (0x1<<5)
2397#define MAC_CONFIGURATION_ENTRY_BROADCAST_SHIFT 5
2398#define MAC_CONFIGURATION_ENTRY_RESERVED1 (0x3<<6)
2399#define MAC_CONFIGURATION_ENTRY_RESERVED1_SHIFT 6
2400 u16 reserved0;
2809 u32 clients_bit_vector; 2401 u32 clients_bit_vector;
2810}; 2402};
2811 2403
2812/* 2404/*
2813 * MAC filtering configuration command 2405 * MAC filtering configuration command
2814 */ 2406 */
2815struct mac_configuration_cmd_e1h { 2407struct mac_configuration_cmd {
2816 struct mac_configuration_hdr hdr; 2408 struct mac_configuration_hdr hdr;
2817 struct mac_configuration_entry_e1h config_table[32]; 2409 struct mac_configuration_entry config_table[64];
2818}; 2410};
2819 2411
2820 2412
@@ -2827,65 +2419,6 @@ struct tstorm_eth_approximate_match_multicast_filtering {
2827 2419
2828 2420
2829/* 2421/*
2830 * Configuration parameters per client in Tstorm
2831 */
2832struct tstorm_eth_client_config {
2833#if defined(__BIG_ENDIAN)
2834 u8 reserved0;
2835 u8 statistics_counter_id;
2836 u16 mtu;
2837#elif defined(__LITTLE_ENDIAN)
2838 u16 mtu;
2839 u8 statistics_counter_id;
2840 u8 reserved0;
2841#endif
2842#if defined(__BIG_ENDIAN)
2843 u16 drop_flags;
2844#define TSTORM_ETH_CLIENT_CONFIG_DROP_IP_CS_ERR (0x1<<0)
2845#define TSTORM_ETH_CLIENT_CONFIG_DROP_IP_CS_ERR_SHIFT 0
2846#define TSTORM_ETH_CLIENT_CONFIG_DROP_TCP_CS_ERR (0x1<<1)
2847#define TSTORM_ETH_CLIENT_CONFIG_DROP_TCP_CS_ERR_SHIFT 1
2848#define TSTORM_ETH_CLIENT_CONFIG_DROP_TTL0 (0x1<<2)
2849#define TSTORM_ETH_CLIENT_CONFIG_DROP_TTL0_SHIFT 2
2850#define TSTORM_ETH_CLIENT_CONFIG_DROP_UDP_CS_ERR (0x1<<3)
2851#define TSTORM_ETH_CLIENT_CONFIG_DROP_UDP_CS_ERR_SHIFT 3
2852#define __TSTORM_ETH_CLIENT_CONFIG_RESERVED2 (0xFFF<<4)
2853#define __TSTORM_ETH_CLIENT_CONFIG_RESERVED2_SHIFT 4
2854 u16 config_flags;
2855#define TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE (0x1<<0)
2856#define TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE_SHIFT 0
2857#define TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE (0x1<<1)
2858#define TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE_SHIFT 1
2859#define TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE (0x1<<2)
2860#define TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE_SHIFT 2
2861#define __TSTORM_ETH_CLIENT_CONFIG_RESERVED1 (0x1FFF<<3)
2862#define __TSTORM_ETH_CLIENT_CONFIG_RESERVED1_SHIFT 3
2863#elif defined(__LITTLE_ENDIAN)
2864 u16 config_flags;
2865#define TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE (0x1<<0)
2866#define TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE_SHIFT 0
2867#define TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE (0x1<<1)
2868#define TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE_SHIFT 1
2869#define TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE (0x1<<2)
2870#define TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE_SHIFT 2
2871#define __TSTORM_ETH_CLIENT_CONFIG_RESERVED1 (0x1FFF<<3)
2872#define __TSTORM_ETH_CLIENT_CONFIG_RESERVED1_SHIFT 3
2873 u16 drop_flags;
2874#define TSTORM_ETH_CLIENT_CONFIG_DROP_IP_CS_ERR (0x1<<0)
2875#define TSTORM_ETH_CLIENT_CONFIG_DROP_IP_CS_ERR_SHIFT 0
2876#define TSTORM_ETH_CLIENT_CONFIG_DROP_TCP_CS_ERR (0x1<<1)
2877#define TSTORM_ETH_CLIENT_CONFIG_DROP_TCP_CS_ERR_SHIFT 1
2878#define TSTORM_ETH_CLIENT_CONFIG_DROP_TTL0 (0x1<<2)
2879#define TSTORM_ETH_CLIENT_CONFIG_DROP_TTL0_SHIFT 2
2880#define TSTORM_ETH_CLIENT_CONFIG_DROP_UDP_CS_ERR (0x1<<3)
2881#define TSTORM_ETH_CLIENT_CONFIG_DROP_UDP_CS_ERR_SHIFT 3
2882#define __TSTORM_ETH_CLIENT_CONFIG_RESERVED2 (0xFFF<<4)
2883#define __TSTORM_ETH_CLIENT_CONFIG_RESERVED2_SHIFT 4
2884#endif
2885};
2886
2887
2888/*
2889 * MAC filtering configuration parameters per port in Tstorm 2422 * MAC filtering configuration parameters per port in Tstorm
2890 */ 2423 */
2891struct tstorm_eth_mac_filter_config { 2424struct tstorm_eth_mac_filter_config {
@@ -2895,8 +2428,8 @@ struct tstorm_eth_mac_filter_config {
2895 u32 mcast_accept_all; 2428 u32 mcast_accept_all;
2896 u32 bcast_drop_all; 2429 u32 bcast_drop_all;
2897 u32 bcast_accept_all; 2430 u32 bcast_accept_all;
2898 u32 strict_vlan;
2899 u32 vlan_filter[2]; 2431 u32 vlan_filter[2];
2432 u32 unmatched_unicast;
2900 u32 reserved; 2433 u32 reserved;
2901}; 2434};
2902 2435
@@ -2919,41 +2452,6 @@ struct tstorm_eth_tpa_exist {
2919 2452
2920 2453
2921/* 2454/*
2922 * rx rings pause data for E1h only
2923 */
2924struct ustorm_eth_rx_pause_data_e1h {
2925#if defined(__BIG_ENDIAN)
2926 u16 bd_thr_low;
2927 u16 cqe_thr_low;
2928#elif defined(__LITTLE_ENDIAN)
2929 u16 cqe_thr_low;
2930 u16 bd_thr_low;
2931#endif
2932#if defined(__BIG_ENDIAN)
2933 u16 cos;
2934 u16 sge_thr_low;
2935#elif defined(__LITTLE_ENDIAN)
2936 u16 sge_thr_low;
2937 u16 cos;
2938#endif
2939#if defined(__BIG_ENDIAN)
2940 u16 bd_thr_high;
2941 u16 cqe_thr_high;
2942#elif defined(__LITTLE_ENDIAN)
2943 u16 cqe_thr_high;
2944 u16 bd_thr_high;
2945#endif
2946#if defined(__BIG_ENDIAN)
2947 u16 reserved0;
2948 u16 sge_thr_high;
2949#elif defined(__LITTLE_ENDIAN)
2950 u16 sge_thr_high;
2951 u16 reserved0;
2952#endif
2953};
2954
2955
2956/*
2957 * Three RX producers for ETH 2455 * Three RX producers for ETH
2958 */ 2456 */
2959struct ustorm_eth_rx_producers { 2457struct ustorm_eth_rx_producers {
@@ -2975,6 +2473,18 @@ struct ustorm_eth_rx_producers {
2975 2473
2976 2474
2977/* 2475/*
2476 * cfc delete event data
2477 */
2478struct cfc_del_event_data {
2479 u32 cid;
2480 u8 error;
2481 u8 reserved0;
2482 u16 reserved1;
2483 u32 reserved2;
2484};
2485
2486
2487/*
2978 * per-port SAFC demo variables 2488 * per-port SAFC demo variables
2979 */ 2489 */
2980struct cmng_flags_per_port { 2490struct cmng_flags_per_port {
@@ -2990,8 +2500,10 @@ struct cmng_flags_per_port {
2990#define CMNG_FLAGS_PER_PORT_RATE_SHAPING_PROTOCOL_SHIFT 3 2500#define CMNG_FLAGS_PER_PORT_RATE_SHAPING_PROTOCOL_SHIFT 3
2991#define CMNG_FLAGS_PER_PORT_FAIRNESS_COS (0x1<<4) 2501#define CMNG_FLAGS_PER_PORT_FAIRNESS_COS (0x1<<4)
2992#define CMNG_FLAGS_PER_PORT_FAIRNESS_COS_SHIFT 4 2502#define CMNG_FLAGS_PER_PORT_FAIRNESS_COS_SHIFT 4
2993#define __CMNG_FLAGS_PER_PORT_RESERVED0 (0x7FFFFFF<<5) 2503#define CMNG_FLAGS_PER_PORT_FAIRNESS_COS_MODE (0x1<<5)
2994#define __CMNG_FLAGS_PER_PORT_RESERVED0_SHIFT 5 2504#define CMNG_FLAGS_PER_PORT_FAIRNESS_COS_MODE_SHIFT 5
2505#define __CMNG_FLAGS_PER_PORT_RESERVED0 (0x3FFFFFF<<6)
2506#define __CMNG_FLAGS_PER_PORT_RESERVED0_SHIFT 6
2995}; 2507};
2996 2508
2997 2509
@@ -3025,30 +2537,92 @@ struct safc_struct_per_port {
3025 u8 __reserved0; 2537 u8 __reserved0;
3026 u16 __reserved1; 2538 u16 __reserved1;
3027#endif 2539#endif
2540 u8 cos_to_traffic_types[MAX_COS_NUMBER];
2541 u32 __reserved2;
3028 u16 cos_to_pause_mask[NUM_OF_SAFC_BITS]; 2542 u16 cos_to_pause_mask[NUM_OF_SAFC_BITS];
3029}; 2543};
3030 2544
3031/* 2545/*
2546 * per-port PFC variables
2547 */
2548struct pfc_struct_per_port {
2549 u8 priority_to_traffic_types[MAX_PFC_PRIORITIES];
2550#if defined(__BIG_ENDIAN)
2551 u16 pfc_pause_quanta_in_nanosec;
2552 u8 __reserved0;
2553 u8 priority_non_pausable_mask;
2554#elif defined(__LITTLE_ENDIAN)
2555 u8 priority_non_pausable_mask;
2556 u8 __reserved0;
2557 u16 pfc_pause_quanta_in_nanosec;
2558#endif
2559};
2560
2561/*
2562 * Priority and cos
2563 */
2564struct priority_cos {
2565#if defined(__BIG_ENDIAN)
2566 u16 reserved1;
2567 u8 cos;
2568 u8 priority;
2569#elif defined(__LITTLE_ENDIAN)
2570 u8 priority;
2571 u8 cos;
2572 u16 reserved1;
2573#endif
2574 u32 reserved2;
2575};
2576
2577/*
3032 * Per-port congestion management variables 2578 * Per-port congestion management variables
3033 */ 2579 */
3034struct cmng_struct_per_port { 2580struct cmng_struct_per_port {
3035 struct rate_shaping_vars_per_port rs_vars; 2581 struct rate_shaping_vars_per_port rs_vars;
3036 struct fairness_vars_per_port fair_vars; 2582 struct fairness_vars_per_port fair_vars;
3037 struct safc_struct_per_port safc_vars; 2583 struct safc_struct_per_port safc_vars;
2584 struct pfc_struct_per_port pfc_vars;
2585#if defined(__BIG_ENDIAN)
2586 u16 __reserved1;
2587 u8 dcb_enabled;
2588 u8 llfc_mode;
2589#elif defined(__LITTLE_ENDIAN)
2590 u8 llfc_mode;
2591 u8 dcb_enabled;
2592 u16 __reserved1;
2593#endif
2594 struct priority_cos
2595 traffic_type_to_priority_cos[MAX_PFC_TRAFFIC_TYPES];
3038 struct cmng_flags_per_port flags; 2596 struct cmng_flags_per_port flags;
3039}; 2597};
3040 2598
3041 2599
2600
2601/*
2602 * Dynamic HC counters set by the driver
2603 */
2604struct hc_dynamic_drv_counter {
2605 u32 val[HC_SB_MAX_DYNAMIC_INDICES];
2606};
2607
2608/*
2609 * zone A per-queue data
2610 */
2611struct cstorm_queue_zone_data {
2612 struct hc_dynamic_drv_counter hc_dyn_drv_cnt;
2613 struct regpair reserved[2];
2614};
2615
3042/* 2616/*
3043 * Dynamic host coalescing init parameters 2617 * Dynamic host coalescing init parameters
3044 */ 2618 */
3045struct dynamic_hc_config { 2619struct dynamic_hc_config {
3046 u32 threshold[3]; 2620 u32 threshold[3];
3047 u8 shift_per_protocol[HC_USTORM_SB_NUM_INDICES]; 2621 u8 shift_per_protocol[HC_SB_MAX_DYNAMIC_INDICES];
3048 u8 hc_timeout0[HC_USTORM_SB_NUM_INDICES]; 2622 u8 hc_timeout0[HC_SB_MAX_DYNAMIC_INDICES];
3049 u8 hc_timeout1[HC_USTORM_SB_NUM_INDICES]; 2623 u8 hc_timeout1[HC_SB_MAX_DYNAMIC_INDICES];
3050 u8 hc_timeout2[HC_USTORM_SB_NUM_INDICES]; 2624 u8 hc_timeout2[HC_SB_MAX_DYNAMIC_INDICES];
3051 u8 hc_timeout3[HC_USTORM_SB_NUM_INDICES]; 2625 u8 hc_timeout3[HC_SB_MAX_DYNAMIC_INDICES];
3052}; 2626};
3053 2627
3054 2628
@@ -3072,7 +2646,7 @@ struct xstorm_per_client_stats {
3072 * Common statistics collected by the Xstorm (per port) 2646 * Common statistics collected by the Xstorm (per port)
3073 */ 2647 */
3074struct xstorm_common_stats { 2648struct xstorm_common_stats {
3075 struct xstorm_per_client_stats client_statistics[MAX_X_STAT_COUNTER_ID]; 2649 struct xstorm_per_client_stats client_statistics[MAX_STAT_COUNTER_ID];
3076}; 2650};
3077 2651
3078/* 2652/*
@@ -3109,7 +2683,7 @@ struct tstorm_per_client_stats {
3109 */ 2683 */
3110struct tstorm_common_stats { 2684struct tstorm_common_stats {
3111 struct tstorm_per_port_stats port_statistics; 2685 struct tstorm_per_port_stats port_statistics;
3112 struct tstorm_per_client_stats client_statistics[MAX_T_STAT_COUNTER_ID]; 2686 struct tstorm_per_client_stats client_statistics[MAX_STAT_COUNTER_ID];
3113}; 2687};
3114 2688
3115/* 2689/*
@@ -3130,7 +2704,7 @@ struct ustorm_per_client_stats {
3130 * Protocol-common statistics collected by the Ustorm 2704 * Protocol-common statistics collected by the Ustorm
3131 */ 2705 */
3132struct ustorm_common_stats { 2706struct ustorm_common_stats {
3133 struct ustorm_per_client_stats client_statistics[MAX_U_STAT_COUNTER_ID]; 2707 struct ustorm_per_client_stats client_statistics[MAX_STAT_COUNTER_ID];
3134}; 2708};
3135 2709
3136/* 2710/*
@@ -3144,6 +2718,70 @@ struct eth_stats_query {
3144 2718
3145 2719
3146/* 2720/*
2721 * set mac event data
2722 */
2723struct set_mac_event_data {
2724 u16 echo;
2725 u16 reserved0;
2726 u32 reserved1;
2727 u32 reserved2;
2728};
2729
2730/*
2731 * union for all event ring message types
2732 */
2733union event_data {
2734 struct set_mac_event_data set_mac_event;
2735 struct cfc_del_event_data cfc_del_event;
2736};
2737
2738
2739/*
2740 * per PF event ring data
2741 */
2742struct event_ring_data {
2743 struct regpair base_addr;
2744#if defined(__BIG_ENDIAN)
2745 u8 index_id;
2746 u8 sb_id;
2747 u16 producer;
2748#elif defined(__LITTLE_ENDIAN)
2749 u16 producer;
2750 u8 sb_id;
2751 u8 index_id;
2752#endif
2753 u32 reserved0;
2754};
2755
2756
2757/*
2758 * event ring message element (each element is 128 bits)
2759 */
2760struct event_ring_msg {
2761 u8 opcode;
2762 u8 reserved0;
2763 u16 reserved1;
2764 union event_data data;
2765};
2766
2767/*
2768 * event ring next page element (128 bits)
2769 */
2770struct event_ring_next {
2771 struct regpair addr;
2772 u32 reserved[2];
2773};
2774
2775/*
2776 * union for event ring element types (each element is 128 bits)
2777 */
2778union event_ring_elem {
2779 struct event_ring_msg message;
2780 struct event_ring_next next_page;
2781};
2782
2783
2784/*
3147 * per-vnic fairness variables 2785 * per-vnic fairness variables
3148 */ 2786 */
3149struct fairness_vars_per_vn { 2787struct fairness_vars_per_vn {
@@ -3182,6 +2820,137 @@ struct fw_version {
3182 2820
3183 2821
3184/* 2822/*
2823 * Dynamic Host-Coalescing - Driver(host) counters
2824 */
2825struct hc_dynamic_sb_drv_counters {
2826 u32 dynamic_hc_drv_counter[HC_SB_MAX_DYNAMIC_INDICES];
2827};
2828
2829
2830/*
2831 * 2 bytes. configuration/state parameters for a single protocol index
2832 */
2833struct hc_index_data {
2834#if defined(__BIG_ENDIAN)
2835 u8 flags;
2836#define HC_INDEX_DATA_SM_ID (0x1<<0)
2837#define HC_INDEX_DATA_SM_ID_SHIFT 0
2838#define HC_INDEX_DATA_HC_ENABLED (0x1<<1)
2839#define HC_INDEX_DATA_HC_ENABLED_SHIFT 1
2840#define HC_INDEX_DATA_DYNAMIC_HC_ENABLED (0x1<<2)
2841#define HC_INDEX_DATA_DYNAMIC_HC_ENABLED_SHIFT 2
2842#define HC_INDEX_DATA_RESERVE (0x1F<<3)
2843#define HC_INDEX_DATA_RESERVE_SHIFT 3
2844 u8 timeout;
2845#elif defined(__LITTLE_ENDIAN)
2846 u8 timeout;
2847 u8 flags;
2848#define HC_INDEX_DATA_SM_ID (0x1<<0)
2849#define HC_INDEX_DATA_SM_ID_SHIFT 0
2850#define HC_INDEX_DATA_HC_ENABLED (0x1<<1)
2851#define HC_INDEX_DATA_HC_ENABLED_SHIFT 1
2852#define HC_INDEX_DATA_DYNAMIC_HC_ENABLED (0x1<<2)
2853#define HC_INDEX_DATA_DYNAMIC_HC_ENABLED_SHIFT 2
2854#define HC_INDEX_DATA_RESERVE (0x1F<<3)
2855#define HC_INDEX_DATA_RESERVE_SHIFT 3
2856#endif
2857};
2858
2859
2860/*
2861 * HC state-machine
2862 */
2863struct hc_status_block_sm {
2864#if defined(__BIG_ENDIAN)
2865 u8 igu_seg_id;
2866 u8 igu_sb_id;
2867 u8 timer_value;
2868 u8 __flags;
2869#elif defined(__LITTLE_ENDIAN)
2870 u8 __flags;
2871 u8 timer_value;
2872 u8 igu_sb_id;
2873 u8 igu_seg_id;
2874#endif
2875 u32 time_to_expire;
2876};
2877
2878/*
2879 * hold PCI identification variables- used in various places in firmware
2880 */
2881struct pci_entity {
2882#if defined(__BIG_ENDIAN)
2883 u8 vf_valid;
2884 u8 vf_id;
2885 u8 vnic_id;
2886 u8 pf_id;
2887#elif defined(__LITTLE_ENDIAN)
2888 u8 pf_id;
2889 u8 vnic_id;
2890 u8 vf_id;
2891 u8 vf_valid;
2892#endif
2893};
2894
2895/*
2896 * The fast-path status block meta-data, common to all chips
2897 */
2898struct hc_sb_data {
2899 struct regpair host_sb_addr;
2900 struct hc_status_block_sm state_machine[HC_SB_MAX_SM];
2901 struct pci_entity p_func;
2902#if defined(__BIG_ENDIAN)
2903 u8 rsrv0;
2904 u8 dhc_qzone_id;
2905 u8 __dynamic_hc_level;
2906 u8 same_igu_sb_1b;
2907#elif defined(__LITTLE_ENDIAN)
2908 u8 same_igu_sb_1b;
2909 u8 __dynamic_hc_level;
2910 u8 dhc_qzone_id;
2911 u8 rsrv0;
2912#endif
2913 struct regpair rsrv1[2];
2914};
2915
2916
2917/*
2918 * The fast-path status block meta-data
2919 */
2920struct hc_sp_status_block_data {
2921 struct regpair host_sb_addr;
2922#if defined(__BIG_ENDIAN)
2923 u16 rsrv;
2924 u8 igu_seg_id;
2925 u8 igu_sb_id;
2926#elif defined(__LITTLE_ENDIAN)
2927 u8 igu_sb_id;
2928 u8 igu_seg_id;
2929 u16 rsrv;
2930#endif
2931 struct pci_entity p_func;
2932};
2933
2934
2935/*
2936 * The fast-path status block meta-data
2937 */
2938struct hc_status_block_data_e1x {
2939 struct hc_index_data index_data[HC_SB_MAX_INDICES_E1X];
2940 struct hc_sb_data common;
2941};
2942
2943
2944/*
2945 * The fast-path status block meta-data
2946 */
2947struct hc_status_block_data_e2 {
2948 struct hc_index_data index_data[HC_SB_MAX_INDICES_E2];
2949 struct hc_sb_data common;
2950};
2951
2952
2953/*
3185 * FW version stored in first line of pram 2954 * FW version stored in first line of pram
3186 */ 2955 */
3187struct pram_fw_version { 2956struct pram_fw_version {
@@ -3204,11 +2973,21 @@ struct pram_fw_version {
3204 2973
3205 2974
3206/* 2975/*
2976 * Ethernet slow path element
2977 */
2978union protocol_common_specific_data {
2979 u8 protocol_data[8];
2980 struct regpair phy_address;
2981 struct regpair mac_config_addr;
2982 struct common_query_ramrod_data query_ramrod_data;
2983};
2984
2985/*
3207 * The send queue element 2986 * The send queue element
3208 */ 2987 */
3209struct protocol_common_spe { 2988struct protocol_common_spe {
3210 struct spe_hdr hdr; 2989 struct spe_hdr hdr;
3211 struct regpair phy_address; 2990 union protocol_common_specific_data data;
3212}; 2991};
3213 2992
3214 2993
@@ -3241,7 +3020,7 @@ struct rate_shaping_vars_per_vn {
3241 */ 3020 */
3242struct slow_path_element { 3021struct slow_path_element {
3243 struct spe_hdr hdr; 3022 struct spe_hdr hdr;
3244 u8 protocol_data[8]; 3023 struct regpair protocol_data;
3245}; 3024};
3246 3025
3247 3026
@@ -3254,3 +3033,97 @@ struct stats_indication_flags {
3254}; 3033};
3255 3034
3256 3035
3036/*
3037 * per-port PFC variables
3038 */
3039struct storm_pfc_struct_per_port {
3040#if defined(__BIG_ENDIAN)
3041 u16 mid_mac_addr;
3042 u16 msb_mac_addr;
3043#elif defined(__LITTLE_ENDIAN)
3044 u16 msb_mac_addr;
3045 u16 mid_mac_addr;
3046#endif
3047#if defined(__BIG_ENDIAN)
3048 u16 pfc_pause_quanta_in_nanosec;
3049 u16 lsb_mac_addr;
3050#elif defined(__LITTLE_ENDIAN)
3051 u16 lsb_mac_addr;
3052 u16 pfc_pause_quanta_in_nanosec;
3053#endif
3054};
3055
3056/*
3057 * Per-port congestion management variables
3058 */
3059struct storm_cmng_struct_per_port {
3060 struct storm_pfc_struct_per_port pfc_vars;
3061};
3062
3063
3064/*
3065 * zone A per-queue data
3066 */
3067struct tstorm_queue_zone_data {
3068 struct regpair reserved[4];
3069};
3070
3071
3072/*
3073 * zone B per-VF data
3074 */
3075struct tstorm_vf_zone_data {
3076 struct regpair reserved;
3077};
3078
3079
3080/*
3081 * zone A per-queue data
3082 */
3083struct ustorm_queue_zone_data {
3084 struct ustorm_eth_rx_producers eth_rx_producers;
3085 struct regpair reserved[3];
3086};
3087
3088
3089/*
3090 * zone B per-VF data
3091 */
3092struct ustorm_vf_zone_data {
3093 struct regpair reserved;
3094};
3095
3096
3097/*
3098 * data per VF-PF channel
3099 */
3100struct vf_pf_channel_data {
3101#if defined(__BIG_ENDIAN)
3102 u16 reserved0;
3103 u8 valid;
3104 u8 state;
3105#elif defined(__LITTLE_ENDIAN)
3106 u8 state;
3107 u8 valid;
3108 u16 reserved0;
3109#endif
3110 u32 reserved1;
3111};
3112
3113
3114/*
3115 * zone A per-queue data
3116 */
3117struct xstorm_queue_zone_data {
3118 struct regpair reserved[4];
3119};
3120
3121
3122/*
3123 * zone B per-VF data
3124 */
3125struct xstorm_vf_zone_data {
3126 struct regpair reserved;
3127};
3128
3129#endif /* BNX2X_HSI_H */
diff --git a/drivers/net/bnx2x/bnx2x_init.h b/drivers/net/bnx2x/bnx2x_init.h
index 65b26cbfe3e7..5ae22e085518 100644
--- a/drivers/net/bnx2x/bnx2x_init.h
+++ b/drivers/net/bnx2x/bnx2x_init.h
@@ -148,5 +148,46 @@ union init_op {
148 struct raw_op raw; 148 struct raw_op raw;
149}; 149};
150 150
151#define INITOP_SET 0 /* set the HW directly */
152#define INITOP_CLEAR 1 /* clear the HW directly */
153#define INITOP_INIT 2 /* set the init-value array */
154
155/****************************************************************************
156* ILT management
157****************************************************************************/
158struct ilt_line {
159 dma_addr_t page_mapping;
160 void *page;
161 u32 size;
162};
163
164struct ilt_client_info {
165 u32 page_size;
166 u16 start;
167 u16 end;
168 u16 client_num;
169 u16 flags;
170#define ILT_CLIENT_SKIP_INIT 0x1
171#define ILT_CLIENT_SKIP_MEM 0x2
172};
173
174struct bnx2x_ilt {
175 u32 start_line;
176 struct ilt_line *lines;
177 struct ilt_client_info clients[4];
178#define ILT_CLIENT_CDU 0
179#define ILT_CLIENT_QM 1
180#define ILT_CLIENT_SRC 2
181#define ILT_CLIENT_TM 3
182};
183
184/****************************************************************************
185* SRC configuration
186****************************************************************************/
187struct src_ent {
188 u8 opaque[56];
189 u64 next;
190};
191
151#endif /* BNX2X_INIT_H */ 192#endif /* BNX2X_INIT_H */
152 193
diff --git a/drivers/net/bnx2x/bnx2x_init_ops.h b/drivers/net/bnx2x/bnx2x_init_ops.h
index 2b1363a6fe78..aae7fea00622 100644
--- a/drivers/net/bnx2x/bnx2x_init_ops.h
+++ b/drivers/net/bnx2x/bnx2x_init_ops.h
@@ -151,6 +151,15 @@ static void bnx2x_init_wr_wb(struct bnx2x *bp, u32 addr, const u32 *data,
151 bnx2x_init_ind_wr(bp, addr, data, len); 151 bnx2x_init_ind_wr(bp, addr, data, len);
152} 152}
153 153
154static void bnx2x_wr_64(struct bnx2x *bp, u32 reg, u32 val_lo, u32 val_hi)
155{
156 u32 wb_write[2];
157
158 wb_write[0] = val_lo;
159 wb_write[1] = val_hi;
160 REG_WR_DMAE_LEN(bp, reg, wb_write, 2);
161}
162
154static void bnx2x_init_wr_zp(struct bnx2x *bp, u32 addr, u32 len, u32 blob_off) 163static void bnx2x_init_wr_zp(struct bnx2x *bp, u32 addr, u32 len, u32 blob_off)
155{ 164{
156 const u8 *data = NULL; 165 const u8 *data = NULL;
@@ -503,4 +512,333 @@ static void bnx2x_init_pxp_arb(struct bnx2x *bp, int r_order, int w_order)
503 } 512 }
504} 513}
505 514
515/****************************************************************************
516* ILT management
517****************************************************************************/
518/*
519 * This codes hides the low level HW interaction for ILT management and
520 * configuration. The API consists of a shadow ILT table which is set by the
521 * driver and a set of routines to use it to configure the HW.
522 *
523 */
524
525/* ILT HW init operations */
526
527/* ILT memory management operations */
528#define ILT_MEMOP_ALLOC 0
529#define ILT_MEMOP_FREE 1
530
531/* the phys address is shifted right 12 bits and has an added
532 * 1=valid bit added to the 53rd bit
533 * then since this is a wide register(TM)
534 * we split it into two 32 bit writes
535 */
536#define ILT_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
537#define ILT_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
538#define ILT_RANGE(f, l) (((l) << 10) | f)
539
540static int bnx2x_ilt_line_mem_op(struct bnx2x *bp, struct ilt_line *line,
541 u32 size, u8 memop)
542{
543 if (memop == ILT_MEMOP_FREE) {
544 BNX2X_ILT_FREE(line->page, line->page_mapping, line->size);
545 return 0;
546 }
547 BNX2X_ILT_ZALLOC(line->page, &line->page_mapping, size);
548 if (!line->page)
549 return -1;
550 line->size = size;
551 return 0;
552}
553
554
555static int bnx2x_ilt_client_mem_op(struct bnx2x *bp, int cli_num, u8 memop)
556{
557 int i, rc;
558 struct bnx2x_ilt *ilt = BP_ILT(bp);
559 struct ilt_client_info *ilt_cli = &ilt->clients[cli_num];
560
561 if (!ilt || !ilt->lines)
562 return -1;
563
564 if (ilt_cli->flags & (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM))
565 return 0;
566
567 for (rc = 0, i = ilt_cli->start; i <= ilt_cli->end && !rc; i++) {
568 rc = bnx2x_ilt_line_mem_op(bp, &ilt->lines[i],
569 ilt_cli->page_size, memop);
570 }
571 return rc;
572}
573
574int bnx2x_ilt_mem_op(struct bnx2x *bp, u8 memop)
575{
576 int rc = bnx2x_ilt_client_mem_op(bp, ILT_CLIENT_CDU, memop);
577 if (!rc)
578 rc = bnx2x_ilt_client_mem_op(bp, ILT_CLIENT_QM, memop);
579 if (!rc)
580 rc = bnx2x_ilt_client_mem_op(bp, ILT_CLIENT_SRC, memop);
581 if (!rc)
582 rc = bnx2x_ilt_client_mem_op(bp, ILT_CLIENT_TM, memop);
583
584 return rc;
585}
586
587static void bnx2x_ilt_line_wr(struct bnx2x *bp, int abs_idx,
588 dma_addr_t page_mapping)
589{
590 u32 reg;
591
592 if (CHIP_IS_E1(bp))
593 reg = PXP2_REG_RQ_ONCHIP_AT + abs_idx*8;
594 else
595 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + abs_idx*8;
596
597 bnx2x_wr_64(bp, reg, ILT_ADDR1(page_mapping), ILT_ADDR2(page_mapping));
598}
599
600static void bnx2x_ilt_line_init_op(struct bnx2x *bp, struct bnx2x_ilt *ilt,
601 int idx, u8 initop)
602{
603 dma_addr_t null_mapping;
604 int abs_idx = ilt->start_line + idx;
605
606
607 switch (initop) {
608 case INITOP_INIT:
609 /* set in the init-value array */
610 case INITOP_SET:
611 bnx2x_ilt_line_wr(bp, abs_idx, ilt->lines[idx].page_mapping);
612 break;
613 case INITOP_CLEAR:
614 null_mapping = 0;
615 bnx2x_ilt_line_wr(bp, abs_idx, null_mapping);
616 break;
617 }
618}
619
620void bnx2x_ilt_boundry_init_op(struct bnx2x *bp,
621 struct ilt_client_info *ilt_cli,
622 u32 ilt_start, u8 initop)
623{
624 u32 start_reg = 0;
625 u32 end_reg = 0;
626
627 /* The boundary is either SET or INIT,
628 CLEAR => SET and for now SET ~~ INIT */
629
630 /* find the appropriate regs */
631 if (CHIP_IS_E1(bp)) {
632 switch (ilt_cli->client_num) {
633 case ILT_CLIENT_CDU:
634 start_reg = PXP2_REG_PSWRQ_CDU0_L2P;
635 break;
636 case ILT_CLIENT_QM:
637 start_reg = PXP2_REG_PSWRQ_QM0_L2P;
638 break;
639 case ILT_CLIENT_SRC:
640 start_reg = PXP2_REG_PSWRQ_SRC0_L2P;
641 break;
642 case ILT_CLIENT_TM:
643 start_reg = PXP2_REG_PSWRQ_TM0_L2P;
644 break;
645 }
646 REG_WR(bp, start_reg + BP_FUNC(bp)*4,
647 ILT_RANGE((ilt_start + ilt_cli->start),
648 (ilt_start + ilt_cli->end)));
649 } else {
650 switch (ilt_cli->client_num) {
651 case ILT_CLIENT_CDU:
652 start_reg = PXP2_REG_RQ_CDU_FIRST_ILT;
653 end_reg = PXP2_REG_RQ_CDU_LAST_ILT;
654 break;
655 case ILT_CLIENT_QM:
656 start_reg = PXP2_REG_RQ_QM_FIRST_ILT;
657 end_reg = PXP2_REG_RQ_QM_LAST_ILT;
658 break;
659 case ILT_CLIENT_SRC:
660 start_reg = PXP2_REG_RQ_SRC_FIRST_ILT;
661 end_reg = PXP2_REG_RQ_SRC_LAST_ILT;
662 break;
663 case ILT_CLIENT_TM:
664 start_reg = PXP2_REG_RQ_TM_FIRST_ILT;
665 end_reg = PXP2_REG_RQ_TM_LAST_ILT;
666 break;
667 }
668 REG_WR(bp, start_reg, (ilt_start + ilt_cli->start));
669 REG_WR(bp, end_reg, (ilt_start + ilt_cli->end));
670 }
671}
672
673void bnx2x_ilt_client_init_op_ilt(struct bnx2x *bp, struct bnx2x_ilt *ilt,
674 struct ilt_client_info *ilt_cli, u8 initop)
675{
676 int i;
677
678 if (ilt_cli->flags & ILT_CLIENT_SKIP_INIT)
679 return;
680
681 for (i = ilt_cli->start; i <= ilt_cli->end; i++)
682 bnx2x_ilt_line_init_op(bp, ilt, i, initop);
683
684 /* init/clear the ILT boundries */
685 bnx2x_ilt_boundry_init_op(bp, ilt_cli, ilt->start_line, initop);
686}
687
688void bnx2x_ilt_client_init_op(struct bnx2x *bp,
689 struct ilt_client_info *ilt_cli, u8 initop)
690{
691 struct bnx2x_ilt *ilt = BP_ILT(bp);
692
693 bnx2x_ilt_client_init_op_ilt(bp, ilt, ilt_cli, initop);
694}
695
696static void bnx2x_ilt_client_id_init_op(struct bnx2x *bp,
697 int cli_num, u8 initop)
698{
699 struct bnx2x_ilt *ilt = BP_ILT(bp);
700 struct ilt_client_info *ilt_cli = &ilt->clients[cli_num];
701
702 bnx2x_ilt_client_init_op(bp, ilt_cli, initop);
703}
704
705void bnx2x_ilt_init_op(struct bnx2x *bp, u8 initop)
706{
707 bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_CDU, initop);
708 bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_QM, initop);
709 bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_SRC, initop);
710 bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_TM, initop);
711}
712
713static void bnx2x_ilt_init_client_psz(struct bnx2x *bp, int cli_num,
714 u32 psz_reg, u8 initop)
715{
716 struct bnx2x_ilt *ilt = BP_ILT(bp);
717 struct ilt_client_info *ilt_cli = &ilt->clients[cli_num];
718
719 if (ilt_cli->flags & ILT_CLIENT_SKIP_INIT)
720 return;
721
722 switch (initop) {
723 case INITOP_INIT:
724 /* set in the init-value array */
725 case INITOP_SET:
726 REG_WR(bp, psz_reg, ILOG2(ilt_cli->page_size >> 12));
727 break;
728 case INITOP_CLEAR:
729 break;
730 }
731}
732
733/*
734 * called during init common stage, ilt clients should be initialized
735 * prioir to calling this function
736 */
737void bnx2x_ilt_init_page_size(struct bnx2x *bp, u8 initop)
738{
739 bnx2x_ilt_init_client_psz(bp, ILT_CLIENT_CDU,
740 PXP2_REG_RQ_CDU_P_SIZE, initop);
741 bnx2x_ilt_init_client_psz(bp, ILT_CLIENT_QM,
742 PXP2_REG_RQ_QM_P_SIZE, initop);
743 bnx2x_ilt_init_client_psz(bp, ILT_CLIENT_SRC,
744 PXP2_REG_RQ_SRC_P_SIZE, initop);
745 bnx2x_ilt_init_client_psz(bp, ILT_CLIENT_TM,
746 PXP2_REG_RQ_TM_P_SIZE, initop);
747}
748
749/****************************************************************************
750* QM initializations
751****************************************************************************/
752#define QM_QUEUES_PER_FUNC 16 /* E1 has 32, but only 16 are used */
753#define QM_INIT_MIN_CID_COUNT 31
754#define QM_INIT(cid_cnt) (cid_cnt > QM_INIT_MIN_CID_COUNT)
755
756/* called during init port stage */
757void bnx2x_qm_init_cid_count(struct bnx2x *bp, int qm_cid_count,
758 u8 initop)
759{
760 int port = BP_PORT(bp);
761
762 if (QM_INIT(qm_cid_count)) {
763 switch (initop) {
764 case INITOP_INIT:
765 /* set in the init-value array */
766 case INITOP_SET:
767 REG_WR(bp, QM_REG_CONNNUM_0 + port*4,
768 qm_cid_count/16 - 1);
769 break;
770 case INITOP_CLEAR:
771 break;
772 }
773 }
774}
775
776static void bnx2x_qm_set_ptr_table(struct bnx2x *bp, int qm_cid_count)
777{
778 int i;
779 u32 wb_data[2];
780
781 wb_data[0] = wb_data[1] = 0;
782
783 for (i = 0; i < 4 * QM_QUEUES_PER_FUNC; i++) {
784 REG_WR(bp, QM_REG_BASEADDR + i*4,
785 qm_cid_count * 4 * (i % QM_QUEUES_PER_FUNC));
786 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL + i*8,
787 wb_data, 2);
788
789 if (CHIP_IS_E1H(bp)) {
790 REG_WR(bp, QM_REG_BASEADDR_EXT_A + i*4,
791 qm_cid_count * 4 * (i % QM_QUEUES_PER_FUNC));
792 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL_EXT_A + i*8,
793 wb_data, 2);
794 }
795 }
796}
797
798/* called during init common stage */
799void bnx2x_qm_init_ptr_table(struct bnx2x *bp, int qm_cid_count,
800 u8 initop)
801{
802 if (!QM_INIT(qm_cid_count))
803 return;
804
805 switch (initop) {
806 case INITOP_INIT:
807 /* set in the init-value array */
808 case INITOP_SET:
809 bnx2x_qm_set_ptr_table(bp, qm_cid_count);
810 break;
811 case INITOP_CLEAR:
812 break;
813 }
814}
815
816/****************************************************************************
817* SRC initializations
818****************************************************************************/
819
820/* called during init func stage */
821void bnx2x_src_init_t2(struct bnx2x *bp, struct src_ent *t2,
822 dma_addr_t t2_mapping, int src_cid_count)
823{
824 int i;
825 int port = BP_PORT(bp);
826
827 /* Initialize T2 */
828 for (i = 0; i < src_cid_count-1; i++)
829 t2[i].next = (u64)(t2_mapping + (i+1)*sizeof(struct src_ent));
830
831 /* tell the searcher where the T2 table is */
832 REG_WR(bp, SRC_REG_COUNTFREE0 + port*4, src_cid_count);
833
834 bnx2x_wr_64(bp, SRC_REG_FIRSTFREE0 + port*16,
835 U64_LO(t2_mapping), U64_HI(t2_mapping));
836
837 bnx2x_wr_64(bp, SRC_REG_LASTFREE0 + port*16,
838 U64_LO((u64)t2_mapping +
839 (src_cid_count-1) * sizeof(struct src_ent)),
840 U64_HI((u64)t2_mapping +
841 (src_cid_count-1) * sizeof(struct src_ent)));
842}
843
506#endif /* BNX2X_INIT_OPS_H */ 844#endif /* BNX2X_INIT_OPS_H */
diff --git a/drivers/net/bnx2x/bnx2x_link.c b/drivers/net/bnx2x/bnx2x_link.c
index a07a3a6abd40..51d468d430ee 100644
--- a/drivers/net/bnx2x/bnx2x_link.c
+++ b/drivers/net/bnx2x/bnx2x_link.c
@@ -28,7 +28,7 @@
28 28
29/********************************************************/ 29/********************************************************/
30#define ETH_HLEN 14 30#define ETH_HLEN 14
31#define ETH_OVREHEAD (ETH_HLEN + 8)/* 8 for CRC + VLAN*/ 31#define ETH_OVREHEAD (ETH_HLEN + 8 + 8)/* 16 for CRC + VLAN + LLC */
32#define ETH_MIN_PACKET_SIZE 60 32#define ETH_MIN_PACKET_SIZE 60
33#define ETH_MAX_PACKET_SIZE 1500 33#define ETH_MAX_PACKET_SIZE 1500
34#define ETH_MAX_JUMBO_PACKET_SIZE 9600 34#define ETH_MAX_JUMBO_PACKET_SIZE 9600
@@ -4066,6 +4066,7 @@ static u8 bnx2x_verify_sfp_module(struct bnx2x_phy *phy,
4066 "verification\n"); 4066 "verification\n");
4067 return -EINVAL; 4067 return -EINVAL;
4068 } 4068 }
4069
4069 fw_cmd_param = FW_PARAM_SET(phy->addr, phy->type, phy->mdio_ctrl); 4070 fw_cmd_param = FW_PARAM_SET(phy->addr, phy->type, phy->mdio_ctrl);
4070 fw_resp = bnx2x_fw_command(bp, cmd, fw_cmd_param); 4071 fw_resp = bnx2x_fw_command(bp, cmd, fw_cmd_param);
4071 if (fw_resp == FW_MSG_CODE_VRFY_OPT_MDL_SUCCESS) { 4072 if (fw_resp == FW_MSG_CODE_VRFY_OPT_MDL_SUCCESS) {
diff --git a/drivers/net/bnx2x/bnx2x_main.c b/drivers/net/bnx2x/bnx2x_main.c
index 3696a4b6547b..119ca871f016 100644
--- a/drivers/net/bnx2x/bnx2x_main.c
+++ b/drivers/net/bnx2x/bnx2x_main.c
@@ -149,6 +149,242 @@ MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
149* General service functions 149* General service functions
150****************************************************************************/ 150****************************************************************************/
151 151
152static inline void __storm_memset_dma_mapping(struct bnx2x *bp,
153 u32 addr, dma_addr_t mapping)
154{
155 REG_WR(bp, addr, U64_LO(mapping));
156 REG_WR(bp, addr + 4, U64_HI(mapping));
157}
158
159static inline void __storm_memset_fill(struct bnx2x *bp,
160 u32 addr, size_t size, u32 val)
161{
162 int i;
163 for (i = 0; i < size/4; i++)
164 REG_WR(bp, addr + (i * 4), val);
165}
166
167static inline void storm_memset_ustats_zero(struct bnx2x *bp,
168 u8 port, u16 stat_id)
169{
170 size_t size = sizeof(struct ustorm_per_client_stats);
171
172 u32 addr = BAR_USTRORM_INTMEM +
173 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id);
174
175 __storm_memset_fill(bp, addr, size, 0);
176}
177
178static inline void storm_memset_tstats_zero(struct bnx2x *bp,
179 u8 port, u16 stat_id)
180{
181 size_t size = sizeof(struct tstorm_per_client_stats);
182
183 u32 addr = BAR_TSTRORM_INTMEM +
184 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id);
185
186 __storm_memset_fill(bp, addr, size, 0);
187}
188
189static inline void storm_memset_xstats_zero(struct bnx2x *bp,
190 u8 port, u16 stat_id)
191{
192 size_t size = sizeof(struct xstorm_per_client_stats);
193
194 u32 addr = BAR_XSTRORM_INTMEM +
195 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id);
196
197 __storm_memset_fill(bp, addr, size, 0);
198}
199
200
201static inline void storm_memset_spq_addr(struct bnx2x *bp,
202 dma_addr_t mapping, u16 abs_fid)
203{
204 u32 addr = XSEM_REG_FAST_MEMORY +
205 XSTORM_SPQ_PAGE_BASE_OFFSET(abs_fid);
206
207 __storm_memset_dma_mapping(bp, addr, mapping);
208}
209
210static inline void storm_memset_ov(struct bnx2x *bp, u16 ov, u16 abs_fid)
211{
212 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(abs_fid), ov);
213}
214
215static inline void storm_memset_func_cfg(struct bnx2x *bp,
216 struct tstorm_eth_function_common_config *tcfg,
217 u16 abs_fid)
218{
219 size_t size = sizeof(struct tstorm_eth_function_common_config);
220
221 u32 addr = BAR_TSTRORM_INTMEM +
222 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid);
223
224 __storm_memset_struct(bp, addr, size, (u32 *)tcfg);
225}
226
227static inline void storm_memset_xstats_flags(struct bnx2x *bp,
228 struct stats_indication_flags *flags,
229 u16 abs_fid)
230{
231 size_t size = sizeof(struct stats_indication_flags);
232
233 u32 addr = BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(abs_fid);
234
235 __storm_memset_struct(bp, addr, size, (u32 *)flags);
236}
237
238static inline void storm_memset_tstats_flags(struct bnx2x *bp,
239 struct stats_indication_flags *flags,
240 u16 abs_fid)
241{
242 size_t size = sizeof(struct stats_indication_flags);
243
244 u32 addr = BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(abs_fid);
245
246 __storm_memset_struct(bp, addr, size, (u32 *)flags);
247}
248
249static inline void storm_memset_ustats_flags(struct bnx2x *bp,
250 struct stats_indication_flags *flags,
251 u16 abs_fid)
252{
253 size_t size = sizeof(struct stats_indication_flags);
254
255 u32 addr = BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(abs_fid);
256
257 __storm_memset_struct(bp, addr, size, (u32 *)flags);
258}
259
260static inline void storm_memset_cstats_flags(struct bnx2x *bp,
261 struct stats_indication_flags *flags,
262 u16 abs_fid)
263{
264 size_t size = sizeof(struct stats_indication_flags);
265
266 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(abs_fid);
267
268 __storm_memset_struct(bp, addr, size, (u32 *)flags);
269}
270
271static inline void storm_memset_xstats_addr(struct bnx2x *bp,
272 dma_addr_t mapping, u16 abs_fid)
273{
274 u32 addr = BAR_XSTRORM_INTMEM +
275 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
276
277 __storm_memset_dma_mapping(bp, addr, mapping);
278}
279
280static inline void storm_memset_tstats_addr(struct bnx2x *bp,
281 dma_addr_t mapping, u16 abs_fid)
282{
283 u32 addr = BAR_TSTRORM_INTMEM +
284 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
285
286 __storm_memset_dma_mapping(bp, addr, mapping);
287}
288
289static inline void storm_memset_ustats_addr(struct bnx2x *bp,
290 dma_addr_t mapping, u16 abs_fid)
291{
292 u32 addr = BAR_USTRORM_INTMEM +
293 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
294
295 __storm_memset_dma_mapping(bp, addr, mapping);
296}
297
298static inline void storm_memset_cstats_addr(struct bnx2x *bp,
299 dma_addr_t mapping, u16 abs_fid)
300{
301 u32 addr = BAR_CSTRORM_INTMEM +
302 CSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
303
304 __storm_memset_dma_mapping(bp, addr, mapping);
305}
306
307static inline void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid,
308 u16 pf_id)
309{
310 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid),
311 pf_id);
312 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid),
313 pf_id);
314 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid),
315 pf_id);
316 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid),
317 pf_id);
318}
319
320static inline void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid,
321 u8 enable)
322{
323 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid),
324 enable);
325 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid),
326 enable);
327 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid),
328 enable);
329 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid),
330 enable);
331}
332
333static inline void storm_memset_eq_data(struct bnx2x *bp,
334 struct event_ring_data *eq_data,
335 u16 pfid)
336{
337 size_t size = sizeof(struct event_ring_data);
338
339 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_DATA_OFFSET(pfid);
340
341 __storm_memset_struct(bp, addr, size, (u32 *)eq_data);
342}
343
344static inline void storm_memset_eq_prod(struct bnx2x *bp, u16 eq_prod,
345 u16 pfid)
346{
347 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_PROD_OFFSET(pfid);
348 REG_WR16(bp, addr, eq_prod);
349}
350
351static inline void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
352 u16 fw_sb_id, u8 sb_index,
353 u8 ticks)
354{
355
356 int index_offset =
357 offsetof(struct hc_status_block_data_e1x, index_data);
358 u32 addr = BAR_CSTRORM_INTMEM +
359 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
360 index_offset +
361 sizeof(struct hc_index_data)*sb_index +
362 offsetof(struct hc_index_data, timeout);
363 REG_WR8(bp, addr, ticks);
364 DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d ticks %d\n",
365 port, fw_sb_id, sb_index, ticks);
366}
367static inline void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
368 u16 fw_sb_id, u8 sb_index,
369 u8 disable)
370{
371 u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
372 int index_offset =
373 offsetof(struct hc_status_block_data_e1x, index_data);
374 u32 addr = BAR_CSTRORM_INTMEM +
375 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
376 index_offset +
377 sizeof(struct hc_index_data)*sb_index +
378 offsetof(struct hc_index_data, flags);
379 u16 flags = REG_RD16(bp, addr);
380 /* clear and set */
381 flags &= ~HC_INDEX_DATA_HC_ENABLED;
382 flags |= enable_flag;
383 REG_WR16(bp, addr, flags);
384 DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d disable %d\n",
385 port, fw_sb_id, sb_index, disable);
386}
387
152/* used only at init 388/* used only at init
153 * locking is done by mcp 389 * locking is done by mcp
154 */ 390 */
@@ -538,7 +774,12 @@ static void bnx2x_fw_dump(struct bnx2x *bp)
538void bnx2x_panic_dump(struct bnx2x *bp) 774void bnx2x_panic_dump(struct bnx2x *bp)
539{ 775{
540 int i; 776 int i;
541 u16 j, start, end; 777 u16 j;
778 struct hc_sp_status_block_data sp_sb_data;
779 int func = BP_FUNC(bp);
780#ifdef BNX2X_STOP_ON_ERROR
781 u16 start = 0, end = 0;
782#endif
542 783
543 bp->stats_state = STATS_STATE_DISABLED; 784 bp->stats_state = STATS_STATE_DISABLED;
544 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n"); 785 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
@@ -547,44 +788,124 @@ void bnx2x_panic_dump(struct bnx2x *bp)
547 788
548 /* Indices */ 789 /* Indices */
549 /* Common */ 790 /* Common */
550 BNX2X_ERR("def_c_idx(0x%x) def_u_idx(0x%x) def_x_idx(0x%x)" 791 BNX2X_ERR("def_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x)"
551 " def_t_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x)"
552 " spq_prod_idx(0x%x)\n", 792 " spq_prod_idx(0x%x)\n",
553 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx, 793 bp->def_idx, bp->def_att_idx,
554 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx); 794 bp->attn_state, bp->spq_prod_idx);
795 BNX2X_ERR("DSB: attn bits(0x%x) ack(0x%x) id(0x%x) idx(0x%x)\n",
796 bp->def_status_blk->atten_status_block.attn_bits,
797 bp->def_status_blk->atten_status_block.attn_bits_ack,
798 bp->def_status_blk->atten_status_block.status_block_id,
799 bp->def_status_blk->atten_status_block.attn_bits_index);
800 BNX2X_ERR(" def (");
801 for (i = 0; i < HC_SP_SB_MAX_INDICES; i++)
802 pr_cont("0x%x%s",
803 bp->def_status_blk->sp_sb.index_values[i],
804 (i == HC_SP_SB_MAX_INDICES - 1) ? ") " : " ");
805
806 for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++)
807 *((u32 *)&sp_sb_data + i) = REG_RD(bp, BAR_CSTRORM_INTMEM +
808 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
809 i*sizeof(u32));
810
811 pr_cont("igu_sb_id(0x%x) igu_seg_id (0x%x) "
812 "pf_id(0x%x) vnic_id(0x%x) "
813 "vf_id(0x%x) vf_valid (0x%x)\n",
814 sp_sb_data.igu_sb_id,
815 sp_sb_data.igu_seg_id,
816 sp_sb_data.p_func.pf_id,
817 sp_sb_data.p_func.vnic_id,
818 sp_sb_data.p_func.vf_id,
819 sp_sb_data.p_func.vf_valid);
820
555 821
556 /* Rx */
557 for_each_queue(bp, i) { 822 for_each_queue(bp, i) {
558 struct bnx2x_fastpath *fp = &bp->fp[i]; 823 struct bnx2x_fastpath *fp = &bp->fp[i];
559 824 int loop;
825 struct hc_status_block_data_e1x sb_data_e1x;
826 struct hc_status_block_sm *hc_sm_p =
827 sb_data_e1x.common.state_machine;
828 struct hc_index_data *hc_index_p =
829 sb_data_e1x.index_data;
830 int data_size;
831 u32 *sb_data_p;
832
833 /* Rx */
560 BNX2X_ERR("fp%d: rx_bd_prod(0x%x) rx_bd_cons(0x%x)" 834 BNX2X_ERR("fp%d: rx_bd_prod(0x%x) rx_bd_cons(0x%x)"
561 " *rx_bd_cons_sb(0x%x) rx_comp_prod(0x%x)" 835 " rx_comp_prod(0x%x)"
562 " rx_comp_cons(0x%x) *rx_cons_sb(0x%x)\n", 836 " rx_comp_cons(0x%x) *rx_cons_sb(0x%x)\n",
563 i, fp->rx_bd_prod, fp->rx_bd_cons, 837 i, fp->rx_bd_prod, fp->rx_bd_cons,
564 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod, 838 fp->rx_comp_prod,
565 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb)); 839 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
566 BNX2X_ERR(" rx_sge_prod(0x%x) last_max_sge(0x%x)" 840 BNX2X_ERR(" rx_sge_prod(0x%x) last_max_sge(0x%x)"
567 " fp_u_idx(0x%x) *sb_u_idx(0x%x)\n", 841 " fp_hc_idx(0x%x)\n",
568 fp->rx_sge_prod, fp->last_max_sge, 842 fp->rx_sge_prod, fp->last_max_sge,
569 le16_to_cpu(fp->fp_u_idx), 843 le16_to_cpu(fp->fp_hc_idx));
570 fp->status_blk->u_status_block.status_block_index);
571 }
572
573 /* Tx */
574 for_each_queue(bp, i) {
575 struct bnx2x_fastpath *fp = &bp->fp[i];
576 844
845 /* Tx */
577 BNX2X_ERR("fp%d: tx_pkt_prod(0x%x) tx_pkt_cons(0x%x)" 846 BNX2X_ERR("fp%d: tx_pkt_prod(0x%x) tx_pkt_cons(0x%x)"
578 " tx_bd_prod(0x%x) tx_bd_cons(0x%x)" 847 " tx_bd_prod(0x%x) tx_bd_cons(0x%x)"
579 " *tx_cons_sb(0x%x)\n", 848 " *tx_cons_sb(0x%x)\n",
580 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod, 849 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
581 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb)); 850 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
582 BNX2X_ERR(" fp_c_idx(0x%x) *sb_c_idx(0x%x)" 851
583 " tx_db_prod(0x%x)\n", le16_to_cpu(fp->fp_c_idx), 852 loop = HC_SB_MAX_INDICES_E1X;
584 fp->status_blk->c_status_block.status_block_index, 853
585 fp->tx_db.data.prod); 854 /* host sb data */
855
856 BNX2X_ERR(" run indexes (");
857 for (j = 0; j < HC_SB_MAX_SM; j++)
858 pr_cont("0x%x%s",
859 fp->sb_running_index[j],
860 (j == HC_SB_MAX_SM - 1) ? ")" : " ");
861
862 BNX2X_ERR(" indexes (");
863 for (j = 0; j < loop; j++)
864 pr_cont("0x%x%s",
865 fp->sb_index_values[j],
866 (j == loop - 1) ? ")" : " ");
867 /* fw sb data */
868 data_size =
869 sizeof(struct hc_status_block_data_e1x);
870 data_size /= sizeof(u32);
871 sb_data_p = (u32 *)&sb_data_e1x;
872 /* copy sb data in here */
873 for (j = 0; j < data_size; j++)
874 *(sb_data_p + j) = REG_RD(bp, BAR_CSTRORM_INTMEM +
875 CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id) +
876 j * sizeof(u32));
877
878 pr_cont("pf_id(0x%x) vf_id (0x%x) vf_valid(0x%x) "
879 "vnic_id(0x%x) same_igu_sb_1b(0x%x)\n",
880 sb_data_e1x.common.p_func.pf_id,
881 sb_data_e1x.common.p_func.vf_id,
882 sb_data_e1x.common.p_func.vf_valid,
883 sb_data_e1x.common.p_func.vnic_id,
884 sb_data_e1x.common.same_igu_sb_1b);
885
886 /* SB_SMs data */
887 for (j = 0; j < HC_SB_MAX_SM; j++) {
888 pr_cont("SM[%d] __flags (0x%x) "
889 "igu_sb_id (0x%x) igu_seg_id(0x%x) "
890 "time_to_expire (0x%x) "
891 "timer_value(0x%x)\n", j,
892 hc_sm_p[j].__flags,
893 hc_sm_p[j].igu_sb_id,
894 hc_sm_p[j].igu_seg_id,
895 hc_sm_p[j].time_to_expire,
896 hc_sm_p[j].timer_value);
897 }
898
899 /* Indecies data */
900 for (j = 0; j < loop; j++) {
901 pr_cont("INDEX[%d] flags (0x%x) "
902 "timeout (0x%x)\n", j,
903 hc_index_p[j].flags,
904 hc_index_p[j].timeout);
905 }
586 } 906 }
587 907
908#ifdef BNX2X_STOP_ON_ERROR
588 /* Rings */ 909 /* Rings */
589 /* Rx */ 910 /* Rx */
590 for_each_queue(bp, i) { 911 for_each_queue(bp, i) {
@@ -642,7 +963,7 @@ void bnx2x_panic_dump(struct bnx2x *bp)
642 i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]); 963 i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
643 } 964 }
644 } 965 }
645 966#endif
646 bnx2x_fw_dump(bp); 967 bnx2x_fw_dump(bp);
647 bnx2x_mc_assert(bp); 968 bnx2x_mc_assert(bp);
648 BNX2X_ERR("end crash dump -----------------\n"); 969 BNX2X_ERR("end crash dump -----------------\n");
@@ -708,7 +1029,7 @@ void bnx2x_int_enable(struct bnx2x *bp)
708 mmiowb(); 1029 mmiowb();
709} 1030}
710 1031
711static void bnx2x_int_disable(struct bnx2x *bp) 1032void bnx2x_int_disable(struct bnx2x *bp)
712{ 1033{
713 int port = BP_PORT(bp); 1034 int port = BP_PORT(bp);
714 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0; 1035 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
@@ -817,76 +1138,35 @@ void bnx2x_sp_event(struct bnx2x_fastpath *fp,
817 fp->index, cid, command, bp->state, 1138 fp->index, cid, command, bp->state,
818 rr_cqe->ramrod_cqe.ramrod_type); 1139 rr_cqe->ramrod_cqe.ramrod_type);
819 1140
820 bp->spq_left++; 1141 switch (command | fp->state) {
821 1142 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP | BNX2X_FP_STATE_OPENING):
822 if (fp->index) { 1143 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n", cid);
823 switch (command | fp->state) { 1144 fp->state = BNX2X_FP_STATE_OPEN;
824 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
825 BNX2X_FP_STATE_OPENING):
826 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
827 cid);
828 fp->state = BNX2X_FP_STATE_OPEN;
829 break;
830
831 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
832 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
833 cid);
834 fp->state = BNX2X_FP_STATE_HALTED;
835 break;
836
837 default:
838 BNX2X_ERR("unexpected MC reply (%d) "
839 "fp[%d] state is %x\n",
840 command, fp->index, fp->state);
841 break;
842 }
843 mb(); /* force bnx2x_wait_ramrod() to see the change */
844 return;
845 }
846
847 switch (command | bp->state) {
848 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
849 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
850 bp->state = BNX2X_STATE_OPEN;
851 break; 1145 break;
852 1146
853 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT): 1147 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
854 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n"); 1148 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n", cid);
855 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
856 fp->state = BNX2X_FP_STATE_HALTED; 1149 fp->state = BNX2X_FP_STATE_HALTED;
857 break; 1150 break;
858 1151
859 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT): 1152 case (RAMROD_CMD_ID_ETH_TERMINATE | BNX2X_FP_STATE_TERMINATING):
860 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid); 1153 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] teminate ramrod\n", cid);
861 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED; 1154 fp->state = BNX2X_FP_STATE_TERMINATED;
862 break; 1155 break;
863 1156
864#ifdef BCM_CNIC 1157 default:
865 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_OPEN): 1158 BNX2X_ERR("unexpected MC reply (%d) "
866 DP(NETIF_MSG_IFDOWN, "got delete ramrod for CID %d\n", cid); 1159 "fp[%d] state is %x\n",
867 bnx2x_cnic_cfc_comp(bp, cid); 1160 command, fp->index, fp->state);
868 break; 1161 break;
869#endif 1162 }
870 1163
871 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN): 1164 bp->spq_left++;
872 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
873 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
874 bp->set_mac_pending--;
875 smp_wmb();
876 break;
877 1165
878 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT): 1166 /* push the change in fp->state and towards the memory */
879 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n"); 1167 smp_wmb();
880 bp->set_mac_pending--;
881 smp_wmb();
882 break;
883 1168
884 default: 1169 return;
885 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
886 command, bp->state);
887 break;
888 }
889 mb(); /* force bnx2x_wait_ramrod() to see the change */
890} 1170}
891 1171
892irqreturn_t bnx2x_interrupt(int irq, void *dev_instance) 1172irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
@@ -917,22 +1197,19 @@ irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
917 for (i = 0; i < BNX2X_NUM_QUEUES(bp); i++) { 1197 for (i = 0; i < BNX2X_NUM_QUEUES(bp); i++) {
918 struct bnx2x_fastpath *fp = &bp->fp[i]; 1198 struct bnx2x_fastpath *fp = &bp->fp[i];
919 1199
920 mask = 0x2 << fp->sb_id; 1200 mask = 0x2 << (fp->index + CNIC_CONTEXT_USE);
921 if (status & mask) { 1201 if (status & mask) {
922 /* Handle Rx and Tx according to SB id */ 1202 /* Handle Rx and Tx according to SB id */
923 prefetch(fp->rx_cons_sb); 1203 prefetch(fp->rx_cons_sb);
924 prefetch(&fp->status_blk->u_status_block.
925 status_block_index);
926 prefetch(fp->tx_cons_sb); 1204 prefetch(fp->tx_cons_sb);
927 prefetch(&fp->status_blk->c_status_block. 1205 prefetch(&fp->sb_running_index[SM_RX_ID]);
928 status_block_index);
929 napi_schedule(&bnx2x_fp(bp, fp->index, napi)); 1206 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
930 status &= ~mask; 1207 status &= ~mask;
931 } 1208 }
932 } 1209 }
933 1210
934#ifdef BCM_CNIC 1211#ifdef BCM_CNIC
935 mask = 0x2 << CNIC_SB_ID(bp); 1212 mask = 0x2;
936 if (status & (mask | 0x1)) { 1213 if (status & (mask | 0x1)) {
937 struct cnic_ops *c_ops = NULL; 1214 struct cnic_ops *c_ops = NULL;
938 1215
@@ -1422,7 +1699,7 @@ static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
1422 bp->vn_weight_sum = 0; 1699 bp->vn_weight_sum = 0;
1423 for (vn = VN_0; vn < E1HVN_MAX; vn++) { 1700 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
1424 int func = 2*vn + port; 1701 int func = 2*vn + port;
1425 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config); 1702 u32 vn_cfg = MF_CFG_RD(bp, func_mf_config[func].config);
1426 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >> 1703 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
1427 FUNC_MF_CFG_MIN_BW_SHIFT) * 100; 1704 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
1428 1705
@@ -1454,7 +1731,7 @@ static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
1454{ 1731{
1455 struct rate_shaping_vars_per_vn m_rs_vn; 1732 struct rate_shaping_vars_per_vn m_rs_vn;
1456 struct fairness_vars_per_vn m_fair_vn; 1733 struct fairness_vars_per_vn m_fair_vn;
1457 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config); 1734 u32 vn_cfg = MF_CFG_RD(bp, func_mf_config[func].config);
1458 u16 vn_min_rate, vn_max_rate; 1735 u16 vn_min_rate, vn_max_rate;
1459 int i; 1736 int i;
1460 1737
@@ -1511,7 +1788,83 @@ static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
1511 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4, 1788 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
1512 ((u32 *)(&m_fair_vn))[i]); 1789 ((u32 *)(&m_fair_vn))[i]);
1513} 1790}
1791static int bnx2x_get_cmng_fns_mode(struct bnx2x *bp)
1792{
1793 if (CHIP_REV_IS_SLOW(bp))
1794 return CMNG_FNS_NONE;
1795 if (IS_E1HMF(bp))
1796 return CMNG_FNS_MINMAX;
1797
1798 return CMNG_FNS_NONE;
1799}
1800
1801static void bnx2x_read_mf_cfg(struct bnx2x *bp)
1802{
1803 int vn;
1804
1805 if (BP_NOMCP(bp))
1806 return; /* what should be the default bvalue in this case */
1807
1808 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
1809 int /*abs*/func = 2*vn + BP_PORT(bp);
1810 bp->mf_config =
1811 MF_CFG_RD(bp, func_mf_config[func].config);
1812 }
1813}
1814
1815static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type)
1816{
1817
1818 if (cmng_type == CMNG_FNS_MINMAX) {
1819 int vn;
1820
1821 /* clear cmng_enables */
1822 bp->cmng.flags.cmng_enables = 0;
1823
1824 /* read mf conf from shmem */
1825 if (read_cfg)
1826 bnx2x_read_mf_cfg(bp);
1827
1828 /* Init rate shaping and fairness contexts */
1829 bnx2x_init_port_minmax(bp);
1830
1831 /* vn_weight_sum and enable fairness if not 0 */
1832 bnx2x_calc_vn_weight_sum(bp);
1833
1834 /* calculate and set min-max rate for each vn */
1835 for (vn = VN_0; vn < E1HVN_MAX; vn++)
1836 bnx2x_init_vn_minmax(bp, vn);
1837
1838 /* always enable rate shaping and fairness */
1839 bp->cmng.flags.cmng_enables |=
1840 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
1841 if (!bp->vn_weight_sum)
1842 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
1843 " fairness will be disabled\n");
1844 return;
1845 }
1846
1847 /* rate shaping and fairness are disabled */
1848 DP(NETIF_MSG_IFUP,
1849 "rate shaping and fairness are disabled\n");
1850}
1851
1852static inline void bnx2x_link_sync_notify(struct bnx2x *bp)
1853{
1854 int port = BP_PORT(bp);
1855 int func;
1856 int vn;
1514 1857
1858 /* Set the attention towards other drivers on the same port */
1859 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
1860 if (vn == BP_E1HVN(bp))
1861 continue;
1862
1863 func = ((vn << 1) | port);
1864 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
1865 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
1866 }
1867}
1515 1868
1516/* This function is called upon link interrupt */ 1869/* This function is called upon link interrupt */
1517static void bnx2x_link_attn(struct bnx2x *bp) 1870static void bnx2x_link_attn(struct bnx2x *bp)
@@ -1669,6 +2022,308 @@ u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param)
1669 return rc; 2022 return rc;
1670} 2023}
1671 2024
2025/* must be called under rtnl_lock */
2026void bnx2x_rxq_set_mac_filters(struct bnx2x *bp, u16 cl_id, u32 filters)
2027{
2028 u32 mask = (1 << cl_id);
2029
2030 /* initial seeting is BNX2X_ACCEPT_NONE */
2031 u8 drop_all_ucast = 1, drop_all_bcast = 1, drop_all_mcast = 1;
2032 u8 accp_all_ucast = 0, accp_all_bcast = 0, accp_all_mcast = 0;
2033 u8 unmatched_unicast = 0;
2034
2035 if (filters & BNX2X_PROMISCUOUS_MODE) {
2036 /* promiscious - accept all, drop none */
2037 drop_all_ucast = drop_all_bcast = drop_all_mcast = 0;
2038 accp_all_ucast = accp_all_bcast = accp_all_mcast = 1;
2039 }
2040 if (filters & BNX2X_ACCEPT_UNICAST) {
2041 /* accept matched ucast */
2042 drop_all_ucast = 0;
2043 }
2044 if (filters & BNX2X_ACCEPT_MULTICAST) {
2045 /* accept matched mcast */
2046 drop_all_mcast = 0;
2047 }
2048 if (filters & BNX2X_ACCEPT_ALL_UNICAST) {
2049 /* accept all mcast */
2050 drop_all_ucast = 0;
2051 accp_all_ucast = 1;
2052 }
2053 if (filters & BNX2X_ACCEPT_ALL_MULTICAST) {
2054 /* accept all mcast */
2055 drop_all_mcast = 0;
2056 accp_all_mcast = 1;
2057 }
2058 if (filters & BNX2X_ACCEPT_BROADCAST) {
2059 /* accept (all) bcast */
2060 drop_all_bcast = 0;
2061 accp_all_bcast = 1;
2062 }
2063
2064 bp->mac_filters.ucast_drop_all = drop_all_ucast ?
2065 bp->mac_filters.ucast_drop_all | mask :
2066 bp->mac_filters.ucast_drop_all & ~mask;
2067
2068 bp->mac_filters.mcast_drop_all = drop_all_mcast ?
2069 bp->mac_filters.mcast_drop_all | mask :
2070 bp->mac_filters.mcast_drop_all & ~mask;
2071
2072 bp->mac_filters.bcast_drop_all = drop_all_bcast ?
2073 bp->mac_filters.bcast_drop_all | mask :
2074 bp->mac_filters.bcast_drop_all & ~mask;
2075
2076 bp->mac_filters.ucast_accept_all = accp_all_ucast ?
2077 bp->mac_filters.ucast_accept_all | mask :
2078 bp->mac_filters.ucast_accept_all & ~mask;
2079
2080 bp->mac_filters.mcast_accept_all = accp_all_mcast ?
2081 bp->mac_filters.mcast_accept_all | mask :
2082 bp->mac_filters.mcast_accept_all & ~mask;
2083
2084 bp->mac_filters.bcast_accept_all = accp_all_bcast ?
2085 bp->mac_filters.bcast_accept_all | mask :
2086 bp->mac_filters.bcast_accept_all & ~mask;
2087
2088 bp->mac_filters.unmatched_unicast = unmatched_unicast ?
2089 bp->mac_filters.unmatched_unicast | mask :
2090 bp->mac_filters.unmatched_unicast & ~mask;
2091}
2092
2093void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p)
2094{
2095 if (FUNC_CONFIG(p->func_flgs)) {
2096 struct tstorm_eth_function_common_config tcfg = {0};
2097
2098 /* tpa */
2099 if (p->func_flgs & FUNC_FLG_TPA)
2100 tcfg.config_flags |=
2101 TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
2102
2103 /* set rss flags */
2104 if (p->func_flgs & FUNC_FLG_RSS) {
2105 u16 rss_flgs = (p->rss->mode <<
2106 TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE_SHIFT);
2107
2108 if (p->rss->cap & RSS_IPV4_CAP)
2109 rss_flgs |= RSS_IPV4_CAP_MASK;
2110 if (p->rss->cap & RSS_IPV4_TCP_CAP)
2111 rss_flgs |= RSS_IPV4_TCP_CAP_MASK;
2112 if (p->rss->cap & RSS_IPV6_CAP)
2113 rss_flgs |= RSS_IPV6_CAP_MASK;
2114 if (p->rss->cap & RSS_IPV6_TCP_CAP)
2115 rss_flgs |= RSS_IPV6_TCP_CAP_MASK;
2116
2117 tcfg.config_flags |= rss_flgs;
2118 tcfg.rss_result_mask = p->rss->result_mask;
2119
2120 }
2121
2122 storm_memset_func_cfg(bp, &tcfg, p->func_id);
2123 }
2124
2125 /* Enable the function in the FW */
2126 storm_memset_vf_to_pf(bp, p->func_id, p->pf_id);
2127 storm_memset_func_en(bp, p->func_id, 1);
2128
2129 /* statistics */
2130 if (p->func_flgs & FUNC_FLG_STATS) {
2131 struct stats_indication_flags stats_flags = {0};
2132 stats_flags.collect_eth = 1;
2133
2134 storm_memset_xstats_flags(bp, &stats_flags, p->func_id);
2135 storm_memset_xstats_addr(bp, p->fw_stat_map, p->func_id);
2136
2137 storm_memset_tstats_flags(bp, &stats_flags, p->func_id);
2138 storm_memset_tstats_addr(bp, p->fw_stat_map, p->func_id);
2139
2140 storm_memset_ustats_flags(bp, &stats_flags, p->func_id);
2141 storm_memset_ustats_addr(bp, p->fw_stat_map, p->func_id);
2142
2143 storm_memset_cstats_flags(bp, &stats_flags, p->func_id);
2144 storm_memset_cstats_addr(bp, p->fw_stat_map, p->func_id);
2145 }
2146
2147 /* spq */
2148 if (p->func_flgs & FUNC_FLG_SPQ) {
2149 storm_memset_spq_addr(bp, p->spq_map, p->func_id);
2150 REG_WR(bp, XSEM_REG_FAST_MEMORY +
2151 XSTORM_SPQ_PROD_OFFSET(p->func_id), p->spq_prod);
2152 }
2153}
2154
2155static inline u16 bnx2x_get_cl_flags(struct bnx2x *bp,
2156 struct bnx2x_fastpath *fp)
2157{
2158 u16 flags = 0;
2159
2160 /* calculate queue flags */
2161 flags |= QUEUE_FLG_CACHE_ALIGN;
2162 flags |= QUEUE_FLG_HC;
2163 flags |= IS_E1HMF(bp) ? QUEUE_FLG_OV : 0;
2164
2165#ifdef BCM_VLAN
2166 flags |= QUEUE_FLG_VLAN;
2167 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
2168#endif
2169
2170 if (!fp->disable_tpa)
2171 flags |= QUEUE_FLG_TPA;
2172
2173 flags |= QUEUE_FLG_STATS;
2174
2175 return flags;
2176}
2177
2178static void bnx2x_pf_rx_cl_prep(struct bnx2x *bp,
2179 struct bnx2x_fastpath *fp, struct rxq_pause_params *pause,
2180 struct bnx2x_rxq_init_params *rxq_init)
2181{
2182 u16 max_sge = 0;
2183 u16 sge_sz = 0;
2184 u16 tpa_agg_size = 0;
2185
2186 /* calculate queue flags */
2187 u16 flags = bnx2x_get_cl_flags(bp, fp);
2188
2189 if (!fp->disable_tpa) {
2190 pause->sge_th_hi = 250;
2191 pause->sge_th_lo = 150;
2192 tpa_agg_size = min_t(u32,
2193 (min_t(u32, 8, MAX_SKB_FRAGS) *
2194 SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff);
2195 max_sge = SGE_PAGE_ALIGN(bp->dev->mtu) >>
2196 SGE_PAGE_SHIFT;
2197 max_sge = ((max_sge + PAGES_PER_SGE - 1) &
2198 (~(PAGES_PER_SGE-1))) >> PAGES_PER_SGE_SHIFT;
2199 sge_sz = (u16)min_t(u32, SGE_PAGE_SIZE * PAGES_PER_SGE,
2200 0xffff);
2201 }
2202
2203 /* pause - not for e1 */
2204 if (!CHIP_IS_E1(bp)) {
2205 pause->bd_th_hi = 350;
2206 pause->bd_th_lo = 250;
2207 pause->rcq_th_hi = 350;
2208 pause->rcq_th_lo = 250;
2209 pause->sge_th_hi = 0;
2210 pause->sge_th_lo = 0;
2211 pause->pri_map = 1;
2212 }
2213
2214 /* rxq setup */
2215 rxq_init->flags = flags;
2216 rxq_init->cxt = &bp->context.vcxt[fp->cid].eth;
2217 rxq_init->dscr_map = fp->rx_desc_mapping;
2218 rxq_init->sge_map = fp->rx_sge_mapping;
2219 rxq_init->rcq_map = fp->rx_comp_mapping;
2220 rxq_init->rcq_np_map = fp->rx_comp_mapping + BCM_PAGE_SIZE;
2221 rxq_init->mtu = bp->dev->mtu;
2222 rxq_init->buf_sz = bp->rx_buf_size;
2223 rxq_init->cl_qzone_id = fp->cl_qzone_id;
2224 rxq_init->cl_id = fp->cl_id;
2225 rxq_init->spcl_id = fp->cl_id;
2226 rxq_init->stat_id = fp->cl_id;
2227 rxq_init->tpa_agg_sz = tpa_agg_size;
2228 rxq_init->sge_buf_sz = sge_sz;
2229 rxq_init->max_sges_pkt = max_sge;
2230 rxq_init->cache_line_log = BNX2X_RX_ALIGN_SHIFT;
2231 rxq_init->fw_sb_id = fp->fw_sb_id;
2232
2233 rxq_init->sb_cq_index = U_SB_ETH_RX_CQ_INDEX;
2234
2235 rxq_init->cid = HW_CID(bp, fp->cid);
2236
2237 rxq_init->hc_rate = bp->rx_ticks ? (1000000 / bp->rx_ticks) : 0;
2238}
2239
2240static void bnx2x_pf_tx_cl_prep(struct bnx2x *bp,
2241 struct bnx2x_fastpath *fp, struct bnx2x_txq_init_params *txq_init)
2242{
2243 u16 flags = bnx2x_get_cl_flags(bp, fp);
2244
2245 txq_init->flags = flags;
2246 txq_init->cxt = &bp->context.vcxt[fp->cid].eth;
2247 txq_init->dscr_map = fp->tx_desc_mapping;
2248 txq_init->stat_id = fp->cl_id;
2249 txq_init->cid = HW_CID(bp, fp->cid);
2250 txq_init->sb_cq_index = C_SB_ETH_TX_CQ_INDEX;
2251 txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW;
2252 txq_init->fw_sb_id = fp->fw_sb_id;
2253 txq_init->hc_rate = bp->tx_ticks ? (1000000 / bp->tx_ticks) : 0;
2254}
2255
2256void bnx2x_pf_init(struct bnx2x *bp)
2257{
2258 struct bnx2x_func_init_params func_init = {0};
2259 struct bnx2x_rss_params rss = {0};
2260 struct event_ring_data eq_data = { {0} };
2261 u16 flags;
2262
2263 /* pf specific setups */
2264 if (!CHIP_IS_E1(bp))
2265 storm_memset_ov(bp, bp->e1hov, BP_FUNC(bp));
2266
2267 /* function setup flags */
2268 flags = (FUNC_FLG_STATS | FUNC_FLG_LEADING | FUNC_FLG_SPQ);
2269
2270 flags |= (bp->flags & TPA_ENABLE_FLAG) ? FUNC_FLG_TPA : 0;
2271
2272 /**
2273 * Although RSS is meaningless when there is a single HW queue we
2274 * still need it enabled in order to have HW Rx hash generated.
2275 *
2276 * if (is_eth_multi(bp))
2277 * flags |= FUNC_FLG_RSS;
2278 */
2279
2280 /* function setup */
2281 if (flags & FUNC_FLG_RSS) {
2282 rss.cap = (RSS_IPV4_CAP | RSS_IPV4_TCP_CAP |
2283 RSS_IPV6_CAP | RSS_IPV6_TCP_CAP);
2284 rss.mode = bp->multi_mode;
2285 rss.result_mask = MULTI_MASK;
2286 func_init.rss = &rss;
2287 }
2288
2289 func_init.func_flgs = flags;
2290 func_init.pf_id = BP_FUNC(bp);
2291 func_init.func_id = BP_FUNC(bp);
2292 func_init.fw_stat_map = bnx2x_sp_mapping(bp, fw_stats);
2293 func_init.spq_map = bp->spq_mapping;
2294 func_init.spq_prod = bp->spq_prod_idx;
2295
2296 bnx2x_func_init(bp, &func_init);
2297
2298 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
2299
2300 /*
2301 Congestion management values depend on the link rate
2302 There is no active link so initial link rate is set to 10 Gbps.
2303 When the link comes up The congestion management values are
2304 re-calculated according to the actual link rate.
2305 */
2306 bp->link_vars.line_speed = SPEED_10000;
2307 bnx2x_cmng_fns_init(bp, true, bnx2x_get_cmng_fns_mode(bp));
2308
2309 /* Only the PMF sets the HW */
2310 if (bp->port.pmf)
2311 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
2312
2313 /* no rx until link is up */
2314 bp->rx_mode = BNX2X_RX_MODE_NONE;
2315 bnx2x_set_storm_rx_mode(bp);
2316
2317 /* init Event Queue */
2318 eq_data.base_addr.hi = U64_HI(bp->eq_mapping);
2319 eq_data.base_addr.lo = U64_LO(bp->eq_mapping);
2320 eq_data.producer = bp->eq_prod;
2321 eq_data.index_id = HC_SP_INDEX_EQ_CONS;
2322 eq_data.sb_id = DEF_SB_ID;
2323 storm_memset_eq_data(bp, &eq_data, BP_FUNC(bp));
2324}
2325
2326
1672static void bnx2x_e1h_disable(struct bnx2x *bp) 2327static void bnx2x_e1h_disable(struct bnx2x *bp)
1673{ 2328{
1674 int port = BP_PORT(bp); 2329 int port = BP_PORT(bp);
@@ -1695,40 +2350,6 @@ static void bnx2x_e1h_enable(struct bnx2x *bp)
1695 */ 2350 */
1696} 2351}
1697 2352
1698static void bnx2x_update_min_max(struct bnx2x *bp)
1699{
1700 int port = BP_PORT(bp);
1701 int vn, i;
1702
1703 /* Init rate shaping and fairness contexts */
1704 bnx2x_init_port_minmax(bp);
1705
1706 bnx2x_calc_vn_weight_sum(bp);
1707
1708 for (vn = VN_0; vn < E1HVN_MAX; vn++)
1709 bnx2x_init_vn_minmax(bp, 2*vn + port);
1710
1711 if (bp->port.pmf) {
1712 int func;
1713
1714 /* Set the attention towards other drivers on the same port */
1715 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
1716 if (vn == BP_E1HVN(bp))
1717 continue;
1718
1719 func = ((vn << 1) | port);
1720 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
1721 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
1722 }
1723
1724 /* Store it to internal memory */
1725 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
1726 REG_WR(bp, BAR_XSTRORM_INTMEM +
1727 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
1728 ((u32 *)(&bp->cmng))[i]);
1729 }
1730}
1731
1732static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event) 2353static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
1733{ 2354{
1734 DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event); 2355 DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
@@ -1755,7 +2376,9 @@ static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
1755 } 2376 }
1756 if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) { 2377 if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
1757 2378
1758 bnx2x_update_min_max(bp); 2379 bnx2x_cmng_fns_init(bp, true, CMNG_FNS_MINMAX);
2380 bnx2x_link_sync_notify(bp);
2381 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
1759 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION; 2382 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
1760 } 2383 }
1761 2384
@@ -1790,7 +2413,7 @@ static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
1790 /* Make sure that BD data is updated before writing the producer */ 2413 /* Make sure that BD data is updated before writing the producer */
1791 wmb(); 2414 wmb();
1792 2415
1793 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func), 2416 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
1794 bp->spq_prod_idx); 2417 bp->spq_prod_idx);
1795 mmiowb(); 2418 mmiowb();
1796} 2419}
@@ -1800,6 +2423,7 @@ int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
1800 u32 data_hi, u32 data_lo, int common) 2423 u32 data_hi, u32 data_lo, int common)
1801{ 2424{
1802 struct eth_spe *spe; 2425 struct eth_spe *spe;
2426 u16 type;
1803 2427
1804#ifdef BNX2X_STOP_ON_ERROR 2428#ifdef BNX2X_STOP_ON_ERROR
1805 if (unlikely(bp->panic)) 2429 if (unlikely(bp->panic))
@@ -1821,22 +2445,42 @@ int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
1821 spe->hdr.conn_and_cmd_data = 2445 spe->hdr.conn_and_cmd_data =
1822 cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) | 2446 cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) |
1823 HW_CID(bp, cid)); 2447 HW_CID(bp, cid));
1824 spe->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE); 2448
1825 if (common) 2449 if (common)
1826 spe->hdr.type |= 2450 /* Common ramrods:
1827 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT)); 2451 * FUNC_START, FUNC_STOP, CFC_DEL, STATS, SET_MAC
2452 * TRAFFIC_STOP, TRAFFIC_START
2453 */
2454 type = (NONE_CONNECTION_TYPE << SPE_HDR_CONN_TYPE_SHIFT)
2455 & SPE_HDR_CONN_TYPE;
2456 else
2457 /* ETH ramrods: SETUP, HALT */
2458 type = (ETH_CONNECTION_TYPE << SPE_HDR_CONN_TYPE_SHIFT)
2459 & SPE_HDR_CONN_TYPE;
2460
2461 type |= ((BP_FUNC(bp) << SPE_HDR_FUNCTION_ID_SHIFT) &
2462 SPE_HDR_FUNCTION_ID);
1828 2463
1829 spe->data.mac_config_addr.hi = cpu_to_le32(data_hi); 2464 spe->hdr.type = cpu_to_le16(type);
1830 spe->data.mac_config_addr.lo = cpu_to_le32(data_lo);
1831 2465
1832 bp->spq_left--; 2466 spe->data.update_data_addr.hi = cpu_to_le32(data_hi);
2467 spe->data.update_data_addr.lo = cpu_to_le32(data_lo);
2468
2469 /* stats ramrod has it's own slot on the spq */
2470 if (command != RAMROD_CMD_ID_COMMON_STAT_QUERY)
2471 /* It's ok if the actual decrement is issued towards the memory
2472 * somewhere between the spin_lock and spin_unlock. Thus no
2473 * more explict memory barrier is needed.
2474 */
2475 bp->spq_left--;
1833 2476
1834 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/, 2477 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
1835 "SPQE[%x] (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n", 2478 "SPQE[%x] (%x:%x) command %d hw_cid %x data (%x:%x) "
2479 "type(0x%x) left %x\n",
1836 bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping), 2480 bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
1837 (u32)(U64_LO(bp->spq_mapping) + 2481 (u32)(U64_LO(bp->spq_mapping) +
1838 (void *)bp->spq_prod_bd - (void *)bp->spq), command, 2482 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
1839 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left); 2483 HW_CID(bp, cid), data_hi, data_lo, type, bp->spq_left);
1840 2484
1841 bnx2x_sp_prod_update(bp); 2485 bnx2x_sp_prod_update(bp);
1842 spin_unlock_bh(&bp->spq_lock); 2486 spin_unlock_bh(&bp->spq_lock);
@@ -1873,32 +2517,27 @@ static void bnx2x_release_alr(struct bnx2x *bp)
1873 REG_WR(bp, GRCBASE_MCP + 0x9c, 0); 2517 REG_WR(bp, GRCBASE_MCP + 0x9c, 0);
1874} 2518}
1875 2519
2520#define BNX2X_DEF_SB_ATT_IDX 0x0001
2521#define BNX2X_DEF_SB_IDX 0x0002
2522
1876static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp) 2523static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
1877{ 2524{
1878 struct host_def_status_block *def_sb = bp->def_status_blk; 2525 struct host_sp_status_block *def_sb = bp->def_status_blk;
1879 u16 rc = 0; 2526 u16 rc = 0;
1880 2527
1881 barrier(); /* status block is written to by the chip */ 2528 barrier(); /* status block is written to by the chip */
1882 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) { 2529 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
1883 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index; 2530 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
1884 rc |= 1; 2531 rc |= BNX2X_DEF_SB_ATT_IDX;
1885 }
1886 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
1887 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
1888 rc |= 2;
1889 } 2532 }
1890 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) { 2533
1891 bp->def_u_idx = def_sb->u_def_status_block.status_block_index; 2534 if (bp->def_idx != def_sb->sp_sb.running_index) {
1892 rc |= 4; 2535 bp->def_idx = def_sb->sp_sb.running_index;
1893 } 2536 rc |= BNX2X_DEF_SB_IDX;
1894 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
1895 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
1896 rc |= 8;
1897 }
1898 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
1899 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
1900 rc |= 16;
1901 } 2537 }
2538
2539 /* Do not reorder: indecies reading should complete before handling */
2540 barrier();
1902 return rc; 2541 return rc;
1903} 2542}
1904 2543
@@ -2144,8 +2783,8 @@ static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2144 int func = BP_FUNC(bp); 2783 int func = BP_FUNC(bp);
2145 2784
2146 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0); 2785 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2147 bp->mf_config = SHMEM_RD(bp, 2786 bp->mf_config =
2148 mf_cfg.func_mf_config[func].config); 2787 MF_CFG_RD(bp, func_mf_config[func].config);
2149 val = SHMEM_RD(bp, func_mb[func].drv_status); 2788 val = SHMEM_RD(bp, func_mb[func].drv_status);
2150 if (val & DRV_STATUS_DCC_EVENT_MASK) 2789 if (val & DRV_STATUS_DCC_EVENT_MASK)
2151 bnx2x_dcc_event(bp, 2790 bnx2x_dcc_event(bp,
@@ -2598,6 +3237,140 @@ static void bnx2x_attn_int(struct bnx2x *bp)
2598 bnx2x_attn_int_deasserted(bp, deasserted); 3237 bnx2x_attn_int_deasserted(bp, deasserted);
2599} 3238}
2600 3239
3240static inline void bnx2x_update_eq_prod(struct bnx2x *bp, u16 prod)
3241{
3242 /* No memory barriers */
3243 storm_memset_eq_prod(bp, prod, BP_FUNC(bp));
3244 mmiowb(); /* keep prod updates ordered */
3245}
3246
3247#ifdef BCM_CNIC
3248static int bnx2x_cnic_handle_cfc_del(struct bnx2x *bp, u32 cid,
3249 union event_ring_elem *elem)
3250{
3251 if (!bp->cnic_eth_dev.starting_cid ||
3252 cid < bp->cnic_eth_dev.starting_cid)
3253 return 1;
3254
3255 DP(BNX2X_MSG_SP, "got delete ramrod for CNIC CID %d\n", cid);
3256
3257 if (unlikely(elem->message.data.cfc_del_event.error)) {
3258 BNX2X_ERR("got delete ramrod for CNIC CID %d with error!\n",
3259 cid);
3260 bnx2x_panic_dump(bp);
3261 }
3262 bnx2x_cnic_cfc_comp(bp, cid);
3263 return 0;
3264}
3265#endif
3266
3267static void bnx2x_eq_int(struct bnx2x *bp)
3268{
3269 u16 hw_cons, sw_cons, sw_prod;
3270 union event_ring_elem *elem;
3271 u32 cid;
3272 u8 opcode;
3273 int spqe_cnt = 0;
3274
3275 hw_cons = le16_to_cpu(*bp->eq_cons_sb);
3276
3277 /* The hw_cos range is 1-255, 257 - the sw_cons range is 0-254, 256.
3278 * when we get the the next-page we nned to adjust so the loop
3279 * condition below will be met. The next element is the size of a
3280 * regular element and hence incrementing by 1
3281 */
3282 if ((hw_cons & EQ_DESC_MAX_PAGE) == EQ_DESC_MAX_PAGE)
3283 hw_cons++;
3284
3285 /* This function may never run in parralel with itself for a
3286 * specific bp, thus there is no need in "paired" read memory
3287 * barrier here.
3288 */
3289 sw_cons = bp->eq_cons;
3290 sw_prod = bp->eq_prod;
3291
3292 DP(BNX2X_MSG_SP, "EQ: hw_cons %u sw_cons %u bp->spq_left %u\n",
3293 hw_cons, sw_cons, bp->spq_left);
3294
3295 for (; sw_cons != hw_cons;
3296 sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) {
3297
3298
3299 elem = &bp->eq_ring[EQ_DESC(sw_cons)];
3300
3301 cid = SW_CID(elem->message.data.cfc_del_event.cid);
3302 opcode = elem->message.opcode;
3303
3304
3305 /* handle eq element */
3306 switch (opcode) {
3307 case EVENT_RING_OPCODE_STAT_QUERY:
3308 DP(NETIF_MSG_TIMER, "got statistics comp event\n");
3309 /* nothing to do with stats comp */
3310 continue;
3311
3312 case EVENT_RING_OPCODE_CFC_DEL:
3313 /* handle according to cid range */
3314 /*
3315 * we may want to verify here that the bp state is
3316 * HALTING
3317 */
3318 DP(NETIF_MSG_IFDOWN,
3319 "got delete ramrod for MULTI[%d]\n", cid);
3320#ifdef BCM_CNIC
3321 if (!bnx2x_cnic_handle_cfc_del(bp, cid, elem))
3322 goto next_spqe;
3323#endif
3324 bnx2x_fp(bp, cid, state) =
3325 BNX2X_FP_STATE_CLOSED;
3326
3327 goto next_spqe;
3328 }
3329
3330 switch (opcode | bp->state) {
3331 case (EVENT_RING_OPCODE_FUNCTION_START |
3332 BNX2X_STATE_OPENING_WAIT4_PORT):
3333 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
3334 bp->state = BNX2X_STATE_FUNC_STARTED;
3335 break;
3336
3337 case (EVENT_RING_OPCODE_FUNCTION_STOP |
3338 BNX2X_STATE_CLOSING_WAIT4_HALT):
3339 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
3340 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
3341 break;
3342
3343 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_OPEN):
3344 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_DIAG):
3345 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
3346 bp->set_mac_pending = 0;
3347 break;
3348
3349 case (EVENT_RING_OPCODE_SET_MAC |
3350 BNX2X_STATE_CLOSING_WAIT4_HALT):
3351 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
3352 bp->set_mac_pending = 0;
3353 break;
3354 default:
3355 /* unknown event log error and continue */
3356 BNX2X_ERR("Unknown EQ event %d\n",
3357 elem->message.opcode);
3358 }
3359next_spqe:
3360 spqe_cnt++;
3361 } /* for */
3362
3363 bp->spq_left++;
3364
3365 bp->eq_cons = sw_cons;
3366 bp->eq_prod = sw_prod;
3367 /* Make sure that above mem writes were issued towards the memory */
3368 smp_wmb();
3369
3370 /* update producer */
3371 bnx2x_update_eq_prod(bp, bp->eq_prod);
3372}
3373
2601static void bnx2x_sp_task(struct work_struct *work) 3374static void bnx2x_sp_task(struct work_struct *work)
2602{ 3375{
2603 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work); 3376 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
@@ -2616,31 +3389,29 @@ static void bnx2x_sp_task(struct work_struct *work)
2616 DP(NETIF_MSG_INTR, "got a slowpath interrupt (status 0x%x)\n", status); 3389 DP(NETIF_MSG_INTR, "got a slowpath interrupt (status 0x%x)\n", status);
2617 3390
2618 /* HW attentions */ 3391 /* HW attentions */
2619 if (status & 0x1) { 3392 if (status & BNX2X_DEF_SB_ATT_IDX) {
2620 bnx2x_attn_int(bp); 3393 bnx2x_attn_int(bp);
2621 status &= ~0x1; 3394 status &= ~BNX2X_DEF_SB_ATT_IDX;
2622 } 3395 }
2623 3396
2624 /* CStorm events: STAT_QUERY */ 3397 /* SP events: STAT_QUERY and others */
2625 if (status & 0x2) { 3398 if (status & BNX2X_DEF_SB_IDX) {
2626 DP(BNX2X_MSG_SP, "CStorm events: STAT_QUERY\n"); 3399
2627 status &= ~0x2; 3400 /* Handle EQ completions */
3401 bnx2x_eq_int(bp);
3402
3403 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID,
3404 le16_to_cpu(bp->def_idx), IGU_INT_NOP, 1);
3405
3406 status &= ~BNX2X_DEF_SB_IDX;
2628 } 3407 }
2629 3408
2630 if (unlikely(status)) 3409 if (unlikely(status))
2631 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n", 3410 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
2632 status); 3411 status);
2633 3412
2634 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx), 3413 bnx2x_ack_sb(bp, bp->igu_dsb_id, ATTENTION_ID,
2635 IGU_INT_NOP, 1); 3414 le16_to_cpu(bp->def_att_idx), IGU_INT_ENABLE, 1);
2636 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2637 IGU_INT_NOP, 1);
2638 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2639 IGU_INT_NOP, 1);
2640 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2641 IGU_INT_NOP, 1);
2642 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2643 IGU_INT_ENABLE, 1);
2644} 3415}
2645 3416
2646irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance) 3417irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
@@ -2654,7 +3425,8 @@ irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2654 return IRQ_HANDLED; 3425 return IRQ_HANDLED;
2655 } 3426 }
2656 3427
2657 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0); 3428 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0,
3429 IGU_INT_DISABLE, 0);
2658 3430
2659#ifdef BNX2X_STOP_ON_ERROR 3431#ifdef BNX2X_STOP_ON_ERROR
2660 if (unlikely(bp->panic)) 3432 if (unlikely(bp->panic))
@@ -2736,232 +3508,234 @@ timer_restart:
2736 * nic init service functions 3508 * nic init service functions
2737 */ 3509 */
2738 3510
2739static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id) 3511static inline void bnx2x_fill(struct bnx2x *bp, u32 addr, int fill, u32 len)
2740{ 3512{
2741 int port = BP_PORT(bp); 3513 u32 i;
3514 if (!(len%4) && !(addr%4))
3515 for (i = 0; i < len; i += 4)
3516 REG_WR(bp, addr + i, fill);
3517 else
3518 for (i = 0; i < len; i++)
3519 REG_WR8(bp, addr + i, fill);
2742 3520
2743 /* "CSTORM" */
2744 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
2745 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), 0,
2746 CSTORM_SB_STATUS_BLOCK_U_SIZE / 4);
2747 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
2748 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), 0,
2749 CSTORM_SB_STATUS_BLOCK_C_SIZE / 4);
2750} 3521}
2751 3522
2752void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb, 3523/* helper: writes FP SP data to FW - data_size in dwords */
2753 dma_addr_t mapping, int sb_id) 3524static inline void bnx2x_wr_fp_sb_data(struct bnx2x *bp,
3525 int fw_sb_id,
3526 u32 *sb_data_p,
3527 u32 data_size)
2754{ 3528{
2755 int port = BP_PORT(bp);
2756 int func = BP_FUNC(bp);
2757 int index; 3529 int index;
2758 u64 section; 3530 for (index = 0; index < data_size; index++)
3531 REG_WR(bp, BAR_CSTRORM_INTMEM +
3532 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
3533 sizeof(u32)*index,
3534 *(sb_data_p + index));
3535}
2759 3536
2760 /* USTORM */ 3537static inline void bnx2x_zero_fp_sb(struct bnx2x *bp, int fw_sb_id)
2761 section = ((u64)mapping) + offsetof(struct host_status_block, 3538{
2762 u_status_block); 3539 u32 *sb_data_p;
2763 sb->u_status_block.status_block_id = sb_id; 3540 u32 data_size = 0;
2764 3541 struct hc_status_block_data_e1x sb_data_e1x;
2765 REG_WR(bp, BAR_CSTRORM_INTMEM +
2766 CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id), U64_LO(section));
2767 REG_WR(bp, BAR_CSTRORM_INTMEM +
2768 ((CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id)) + 4),
2769 U64_HI(section));
2770 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_USB_FUNC_OFF +
2771 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), func);
2772
2773 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
2774 REG_WR16(bp, BAR_CSTRORM_INTMEM +
2775 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id, index), 1);
2776 3542
2777 /* CSTORM */ 3543 /* disable the function first */
2778 section = ((u64)mapping) + offsetof(struct host_status_block, 3544 memset(&sb_data_e1x, 0,
2779 c_status_block); 3545 sizeof(struct hc_status_block_data_e1x));
2780 sb->c_status_block.status_block_id = sb_id; 3546 sb_data_e1x.common.p_func.pf_id = HC_FUNCTION_DISABLED;
3547 sb_data_e1x.common.p_func.vf_id = HC_FUNCTION_DISABLED;
3548 sb_data_e1x.common.p_func.vf_valid = false;
3549 sb_data_p = (u32 *)&sb_data_e1x;
3550 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
2781 3551
2782 REG_WR(bp, BAR_CSTRORM_INTMEM + 3552 bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
2783 CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id), U64_LO(section));
2784 REG_WR(bp, BAR_CSTRORM_INTMEM +
2785 ((CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id)) + 4),
2786 U64_HI(section));
2787 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
2788 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), func);
2789 3553
2790 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++) 3554 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
2791 REG_WR16(bp, BAR_CSTRORM_INTMEM + 3555 CSTORM_STATUS_BLOCK_OFFSET(fw_sb_id), 0,
2792 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id, index), 1); 3556 CSTORM_STATUS_BLOCK_SIZE);
3557 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
3558 CSTORM_SYNC_BLOCK_OFFSET(fw_sb_id), 0,
3559 CSTORM_SYNC_BLOCK_SIZE);
3560}
2793 3561
2794 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0); 3562/* helper: writes SP SB data to FW */
3563static inline void bnx2x_wr_sp_sb_data(struct bnx2x *bp,
3564 struct hc_sp_status_block_data *sp_sb_data)
3565{
3566 int func = BP_FUNC(bp);
3567 int i;
3568 for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++)
3569 REG_WR(bp, BAR_CSTRORM_INTMEM +
3570 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
3571 i*sizeof(u32),
3572 *((u32 *)sp_sb_data + i));
2795} 3573}
2796 3574
2797static void bnx2x_zero_def_sb(struct bnx2x *bp) 3575static inline void bnx2x_zero_sp_sb(struct bnx2x *bp)
2798{ 3576{
2799 int func = BP_FUNC(bp); 3577 int func = BP_FUNC(bp);
3578 struct hc_sp_status_block_data sp_sb_data;
3579 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
3580
3581 sp_sb_data.p_func.pf_id = HC_FUNCTION_DISABLED;
3582 sp_sb_data.p_func.vf_id = HC_FUNCTION_DISABLED;
3583 sp_sb_data.p_func.vf_valid = false;
3584
3585 bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
3586
3587 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
3588 CSTORM_SP_STATUS_BLOCK_OFFSET(func), 0,
3589 CSTORM_SP_STATUS_BLOCK_SIZE);
3590 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
3591 CSTORM_SP_SYNC_BLOCK_OFFSET(func), 0,
3592 CSTORM_SP_SYNC_BLOCK_SIZE);
3593
3594}
3595
3596
3597static inline
3598void bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm,
3599 int igu_sb_id, int igu_seg_id)
3600{
3601 hc_sm->igu_sb_id = igu_sb_id;
3602 hc_sm->igu_seg_id = igu_seg_id;
3603 hc_sm->timer_value = 0xFF;
3604 hc_sm->time_to_expire = 0xFFFFFFFF;
3605}
3606
3607void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
3608 u8 vf_valid, int fw_sb_id, int igu_sb_id)
3609{
3610 int igu_seg_id;
3611
3612 struct hc_status_block_data_e1x sb_data_e1x;
3613 struct hc_status_block_sm *hc_sm_p;
3614 struct hc_index_data *hc_index_p;
3615 int data_size;
3616 u32 *sb_data_p;
3617
3618 igu_seg_id = HC_SEG_ACCESS_NORM;
3619
3620 bnx2x_zero_fp_sb(bp, fw_sb_id);
3621
3622 memset(&sb_data_e1x, 0,
3623 sizeof(struct hc_status_block_data_e1x));
3624 sb_data_e1x.common.p_func.pf_id = BP_FUNC(bp);
3625 sb_data_e1x.common.p_func.vf_id = 0xff;
3626 sb_data_e1x.common.p_func.vf_valid = false;
3627 sb_data_e1x.common.p_func.vnic_id = BP_E1HVN(bp);
3628 sb_data_e1x.common.same_igu_sb_1b = true;
3629 sb_data_e1x.common.host_sb_addr.hi = U64_HI(mapping);
3630 sb_data_e1x.common.host_sb_addr.lo = U64_LO(mapping);
3631 hc_sm_p = sb_data_e1x.common.state_machine;
3632 hc_index_p = sb_data_e1x.index_data;
3633 sb_data_p = (u32 *)&sb_data_e1x;
3634 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
3635
3636
3637 bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID],
3638 igu_sb_id, igu_seg_id);
3639 bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_TX_ID],
3640 igu_sb_id, igu_seg_id);
3641
3642 DP(NETIF_MSG_HW, "Init FW SB %d\n", fw_sb_id);
3643
3644 /* write indecies to HW */
3645 bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
3646}
3647
3648static void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u16 fw_sb_id,
3649 u8 sb_index, u8 disable, u16 usec)
3650{
3651 int port = BP_PORT(bp);
3652 u8 ticks = usec / BNX2X_BTR;
2800 3653
2801 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY + 3654 storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
2802 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0, 3655
2803 sizeof(struct tstorm_def_status_block)/4); 3656 disable = disable ? 1 : (usec ? 0 : 1);
2804 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY + 3657 storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
2805 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), 0,
2806 sizeof(struct cstorm_def_status_block_u)/4);
2807 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
2808 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), 0,
2809 sizeof(struct cstorm_def_status_block_c)/4);
2810 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY +
2811 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
2812 sizeof(struct xstorm_def_status_block)/4);
2813} 3658}
2814 3659
2815static void bnx2x_init_def_sb(struct bnx2x *bp, 3660static void bnx2x_update_coalesce_sb(struct bnx2x *bp, u16 fw_sb_id,
2816 struct host_def_status_block *def_sb, 3661 u16 tx_usec, u16 rx_usec)
2817 dma_addr_t mapping, int sb_id)
2818{ 3662{
3663 bnx2x_update_coalesce_sb_index(bp, fw_sb_id, U_SB_ETH_RX_CQ_INDEX,
3664 false, rx_usec);
3665 bnx2x_update_coalesce_sb_index(bp, fw_sb_id, C_SB_ETH_TX_CQ_INDEX,
3666 false, tx_usec);
3667}
3668static void bnx2x_init_def_sb(struct bnx2x *bp)
3669{
3670 struct host_sp_status_block *def_sb = bp->def_status_blk;
3671 dma_addr_t mapping = bp->def_status_blk_mapping;
3672 int igu_sp_sb_index;
3673 int igu_seg_id;
2819 int port = BP_PORT(bp); 3674 int port = BP_PORT(bp);
2820 int func = BP_FUNC(bp); 3675 int func = BP_FUNC(bp);
2821 int index, val, reg_offset; 3676 int reg_offset;
2822 u64 section; 3677 u64 section;
3678 int index;
3679 struct hc_sp_status_block_data sp_sb_data;
3680 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
3681
3682 igu_sp_sb_index = DEF_SB_IGU_ID;
3683 igu_seg_id = HC_SEG_ACCESS_DEF;
2823 3684
2824 /* ATTN */ 3685 /* ATTN */
2825 section = ((u64)mapping) + offsetof(struct host_def_status_block, 3686 section = ((u64)mapping) + offsetof(struct host_sp_status_block,
2826 atten_status_block); 3687 atten_status_block);
2827 def_sb->atten_status_block.status_block_id = sb_id; 3688 def_sb->atten_status_block.status_block_id = igu_sp_sb_index;
2828 3689
2829 bp->attn_state = 0; 3690 bp->attn_state = 0;
2830 3691
2831 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : 3692 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2832 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0); 3693 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2833
2834 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) { 3694 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2835 bp->attn_group[index].sig[0] = REG_RD(bp, 3695 int sindex;
2836 reg_offset + 0x10*index); 3696 /* take care of sig[0]..sig[4] */
2837 bp->attn_group[index].sig[1] = REG_RD(bp, 3697 for (sindex = 0; sindex < 4; sindex++)
2838 reg_offset + 0x4 + 0x10*index); 3698 bp->attn_group[index].sig[sindex] =
2839 bp->attn_group[index].sig[2] = REG_RD(bp, 3699 REG_RD(bp, reg_offset + sindex*0x4 + 0x10*index);
2840 reg_offset + 0x8 + 0x10*index);
2841 bp->attn_group[index].sig[3] = REG_RD(bp,
2842 reg_offset + 0xc + 0x10*index);
2843 } 3700 }
2844 3701
2845 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L : 3702 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
2846 HC_REG_ATTN_MSG0_ADDR_L); 3703 HC_REG_ATTN_MSG0_ADDR_L);
2847
2848 REG_WR(bp, reg_offset, U64_LO(section)); 3704 REG_WR(bp, reg_offset, U64_LO(section));
2849 REG_WR(bp, reg_offset + 4, U64_HI(section)); 3705 REG_WR(bp, reg_offset + 4, U64_HI(section));
2850 3706
2851 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0); 3707 section = ((u64)mapping) + offsetof(struct host_sp_status_block,
2852 3708 sp_sb);
2853 val = REG_RD(bp, reg_offset);
2854 val |= sb_id;
2855 REG_WR(bp, reg_offset, val);
2856
2857 /* USTORM */
2858 section = ((u64)mapping) + offsetof(struct host_def_status_block,
2859 u_def_status_block);
2860 def_sb->u_def_status_block.status_block_id = sb_id;
2861
2862 REG_WR(bp, BAR_CSTRORM_INTMEM +
2863 CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func), U64_LO(section));
2864 REG_WR(bp, BAR_CSTRORM_INTMEM +
2865 ((CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func)) + 4),
2866 U64_HI(section));
2867 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_USB_FUNC_OFF +
2868 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), func);
2869
2870 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
2871 REG_WR16(bp, BAR_CSTRORM_INTMEM +
2872 CSTORM_DEF_SB_HC_DISABLE_U_OFFSET(func, index), 1);
2873 3709
2874 /* CSTORM */ 3710 bnx2x_zero_sp_sb(bp);
2875 section = ((u64)mapping) + offsetof(struct host_def_status_block,
2876 c_def_status_block);
2877 def_sb->c_def_status_block.status_block_id = sb_id;
2878
2879 REG_WR(bp, BAR_CSTRORM_INTMEM +
2880 CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func), U64_LO(section));
2881 REG_WR(bp, BAR_CSTRORM_INTMEM +
2882 ((CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func)) + 4),
2883 U64_HI(section));
2884 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
2885 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), func);
2886
2887 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
2888 REG_WR16(bp, BAR_CSTRORM_INTMEM +
2889 CSTORM_DEF_SB_HC_DISABLE_C_OFFSET(func, index), 1);
2890 3711
2891 /* TSTORM */ 3712 sp_sb_data.host_sb_addr.lo = U64_LO(section);
2892 section = ((u64)mapping) + offsetof(struct host_def_status_block, 3713 sp_sb_data.host_sb_addr.hi = U64_HI(section);
2893 t_def_status_block); 3714 sp_sb_data.igu_sb_id = igu_sp_sb_index;
2894 def_sb->t_def_status_block.status_block_id = sb_id; 3715 sp_sb_data.igu_seg_id = igu_seg_id;
2895 3716 sp_sb_data.p_func.pf_id = func;
2896 REG_WR(bp, BAR_TSTRORM_INTMEM + 3717 sp_sb_data.p_func.vnic_id = BP_E1HVN(bp);
2897 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section)); 3718 sp_sb_data.p_func.vf_id = 0xff;
2898 REG_WR(bp, BAR_TSTRORM_INTMEM +
2899 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
2900 U64_HI(section));
2901 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
2902 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
2903
2904 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
2905 REG_WR16(bp, BAR_TSTRORM_INTMEM +
2906 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
2907 3719
2908 /* XSTORM */ 3720 bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
2909 section = ((u64)mapping) + offsetof(struct host_def_status_block,
2910 x_def_status_block);
2911 def_sb->x_def_status_block.status_block_id = sb_id;
2912
2913 REG_WR(bp, BAR_XSTRORM_INTMEM +
2914 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
2915 REG_WR(bp, BAR_XSTRORM_INTMEM +
2916 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
2917 U64_HI(section));
2918 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
2919 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
2920
2921 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
2922 REG_WR16(bp, BAR_XSTRORM_INTMEM +
2923 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
2924 3721
2925 bp->stats_pending = 0; 3722 bp->stats_pending = 0;
2926 bp->set_mac_pending = 0; 3723 bp->set_mac_pending = 0;
2927 3724
2928 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0); 3725 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0);
2929} 3726}
2930 3727
2931void bnx2x_update_coalesce(struct bnx2x *bp) 3728void bnx2x_update_coalesce(struct bnx2x *bp)
2932{ 3729{
2933 int port = BP_PORT(bp);
2934 int i; 3730 int i;
2935 3731
2936 for_each_queue(bp, i) { 3732 for_each_queue(bp, i)
2937 int sb_id = bp->fp[i].sb_id; 3733 bnx2x_update_coalesce_sb(bp, bp->fp[i].fw_sb_id,
2938 3734 bp->rx_ticks, bp->tx_ticks);
2939 /* HC_INDEX_U_ETH_RX_CQ_CONS */
2940 REG_WR8(bp, BAR_CSTRORM_INTMEM +
2941 CSTORM_SB_HC_TIMEOUT_U_OFFSET(port, sb_id,
2942 U_SB_ETH_RX_CQ_INDEX),
2943 bp->rx_ticks/(4 * BNX2X_BTR));
2944 REG_WR16(bp, BAR_CSTRORM_INTMEM +
2945 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id,
2946 U_SB_ETH_RX_CQ_INDEX),
2947 (bp->rx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
2948
2949 /* HC_INDEX_C_ETH_TX_CQ_CONS */
2950 REG_WR8(bp, BAR_CSTRORM_INTMEM +
2951 CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, sb_id,
2952 C_SB_ETH_TX_CQ_INDEX),
2953 bp->tx_ticks/(4 * BNX2X_BTR));
2954 REG_WR16(bp, BAR_CSTRORM_INTMEM +
2955 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id,
2956 C_SB_ETH_TX_CQ_INDEX),
2957 (bp->tx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
2958 }
2959} 3735}
2960 3736
2961static void bnx2x_init_sp_ring(struct bnx2x *bp) 3737static void bnx2x_init_sp_ring(struct bnx2x *bp)
2962{ 3738{
2963 int func = BP_FUNC(bp);
2964
2965 spin_lock_init(&bp->spq_lock); 3739 spin_lock_init(&bp->spq_lock);
2966 3740
2967 bp->spq_left = MAX_SPQ_PENDING; 3741 bp->spq_left = MAX_SPQ_PENDING;
@@ -2969,91 +3743,25 @@ static void bnx2x_init_sp_ring(struct bnx2x *bp)
2969 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX; 3743 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
2970 bp->spq_prod_bd = bp->spq; 3744 bp->spq_prod_bd = bp->spq;
2971 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT; 3745 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
2972
2973 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
2974 U64_LO(bp->spq_mapping));
2975 REG_WR(bp,
2976 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
2977 U64_HI(bp->spq_mapping));
2978
2979 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
2980 bp->spq_prod_idx);
2981} 3746}
2982 3747
2983static void bnx2x_init_context(struct bnx2x *bp) 3748static void bnx2x_init_eq_ring(struct bnx2x *bp)
2984{ 3749{
2985 int i; 3750 int i;
3751 for (i = 1; i <= NUM_EQ_PAGES; i++) {
3752 union event_ring_elem *elem =
3753 &bp->eq_ring[EQ_DESC_CNT_PAGE * i - 1];
2986 3754
2987 /* Rx */ 3755 elem->next_page.addr.hi =
2988 for_each_queue(bp, i) { 3756 cpu_to_le32(U64_HI(bp->eq_mapping +
2989 struct eth_context *context = bnx2x_sp(bp, context[i].eth); 3757 BCM_PAGE_SIZE * (i % NUM_EQ_PAGES)));
2990 struct bnx2x_fastpath *fp = &bp->fp[i]; 3758 elem->next_page.addr.lo =
2991 u8 cl_id = fp->cl_id; 3759 cpu_to_le32(U64_LO(bp->eq_mapping +
2992 3760 BCM_PAGE_SIZE*(i % NUM_EQ_PAGES)));
2993 context->ustorm_st_context.common.sb_index_numbers =
2994 BNX2X_RX_SB_INDEX_NUM;
2995 context->ustorm_st_context.common.clientId = cl_id;
2996 context->ustorm_st_context.common.status_block_id = fp->sb_id;
2997 context->ustorm_st_context.common.flags =
2998 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
2999 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
3000 context->ustorm_st_context.common.statistics_counter_id =
3001 cl_id;
3002 context->ustorm_st_context.common.mc_alignment_log_size =
3003 BNX2X_RX_ALIGN_SHIFT;
3004 context->ustorm_st_context.common.bd_buff_size =
3005 bp->rx_buf_size;
3006 context->ustorm_st_context.common.bd_page_base_hi =
3007 U64_HI(fp->rx_desc_mapping);
3008 context->ustorm_st_context.common.bd_page_base_lo =
3009 U64_LO(fp->rx_desc_mapping);
3010 if (!fp->disable_tpa) {
3011 context->ustorm_st_context.common.flags |=
3012 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA;
3013 context->ustorm_st_context.common.sge_buff_size =
3014 (u16)min_t(u32, SGE_PAGE_SIZE*PAGES_PER_SGE,
3015 0xffff);
3016 context->ustorm_st_context.common.sge_page_base_hi =
3017 U64_HI(fp->rx_sge_mapping);
3018 context->ustorm_st_context.common.sge_page_base_lo =
3019 U64_LO(fp->rx_sge_mapping);
3020
3021 context->ustorm_st_context.common.max_sges_for_packet =
3022 SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT;
3023 context->ustorm_st_context.common.max_sges_for_packet =
3024 ((context->ustorm_st_context.common.
3025 max_sges_for_packet + PAGES_PER_SGE - 1) &
3026 (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT;
3027 }
3028
3029 context->ustorm_ag_context.cdu_usage =
3030 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
3031 CDU_REGION_NUMBER_UCM_AG,
3032 ETH_CONNECTION_TYPE);
3033
3034 context->xstorm_ag_context.cdu_reserved =
3035 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
3036 CDU_REGION_NUMBER_XCM_AG,
3037 ETH_CONNECTION_TYPE);
3038 }
3039
3040 /* Tx */
3041 for_each_queue(bp, i) {
3042 struct bnx2x_fastpath *fp = &bp->fp[i];
3043 struct eth_context *context =
3044 bnx2x_sp(bp, context[i].eth);
3045
3046 context->cstorm_st_context.sb_index_number =
3047 C_SB_ETH_TX_CQ_INDEX;
3048 context->cstorm_st_context.status_block_id = fp->sb_id;
3049
3050 context->xstorm_st_context.tx_bd_page_base_hi =
3051 U64_HI(fp->tx_desc_mapping);
3052 context->xstorm_st_context.tx_bd_page_base_lo =
3053 U64_LO(fp->tx_desc_mapping);
3054 context->xstorm_st_context.statistics_data = (fp->cl_id |
3055 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
3056 } 3761 }
3762 bp->eq_cons = 0;
3763 bp->eq_prod = NUM_EQ_DESC;
3764 bp->eq_cons_sb = BNX2X_EQ_INDEX;
3057} 3765}
3058 3766
3059static void bnx2x_init_ind_table(struct bnx2x *bp) 3767static void bnx2x_init_ind_table(struct bnx2x *bp)
@@ -3072,47 +3780,11 @@ static void bnx2x_init_ind_table(struct bnx2x *bp)
3072 bp->fp->cl_id + (i % bp->num_queues)); 3780 bp->fp->cl_id + (i % bp->num_queues));
3073} 3781}
3074 3782
3075void bnx2x_set_client_config(struct bnx2x *bp)
3076{
3077 struct tstorm_eth_client_config tstorm_client = {0};
3078 int port = BP_PORT(bp);
3079 int i;
3080
3081 tstorm_client.mtu = bp->dev->mtu;
3082 tstorm_client.config_flags =
3083 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
3084 TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
3085#ifdef BCM_VLAN
3086 if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
3087 tstorm_client.config_flags |=
3088 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
3089 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
3090 }
3091#endif
3092
3093 for_each_queue(bp, i) {
3094 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
3095
3096 REG_WR(bp, BAR_TSTRORM_INTMEM +
3097 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
3098 ((u32 *)&tstorm_client)[0]);
3099 REG_WR(bp, BAR_TSTRORM_INTMEM +
3100 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
3101 ((u32 *)&tstorm_client)[1]);
3102 }
3103
3104 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
3105 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
3106}
3107
3108void bnx2x_set_storm_rx_mode(struct bnx2x *bp) 3783void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
3109{ 3784{
3110 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
3111 int mode = bp->rx_mode; 3785 int mode = bp->rx_mode;
3112 int mask = bp->rx_mode_cl_mask; 3786 u16 cl_id;
3113 int func = BP_FUNC(bp); 3787
3114 int port = BP_PORT(bp);
3115 int i;
3116 /* All but management unicast packets should pass to the host as well */ 3788 /* All but management unicast packets should pass to the host as well */
3117 u32 llh_mask = 3789 u32 llh_mask =
3118 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST | 3790 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
@@ -3120,28 +3792,32 @@ void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
3120 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN | 3792 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
3121 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN; 3793 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
3122 3794
3123 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
3124
3125 switch (mode) { 3795 switch (mode) {
3126 case BNX2X_RX_MODE_NONE: /* no Rx */ 3796 case BNX2X_RX_MODE_NONE: /* no Rx */
3127 tstorm_mac_filter.ucast_drop_all = mask; 3797 cl_id = BP_L_ID(bp);
3128 tstorm_mac_filter.mcast_drop_all = mask; 3798 bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_NONE);
3129 tstorm_mac_filter.bcast_drop_all = mask;
3130 break; 3799 break;
3131 3800
3132 case BNX2X_RX_MODE_NORMAL: 3801 case BNX2X_RX_MODE_NORMAL:
3133 tstorm_mac_filter.bcast_accept_all = mask; 3802 cl_id = BP_L_ID(bp);
3803 bnx2x_rxq_set_mac_filters(bp, cl_id,
3804 BNX2X_ACCEPT_UNICAST |
3805 BNX2X_ACCEPT_BROADCAST |
3806 BNX2X_ACCEPT_MULTICAST);
3134 break; 3807 break;
3135 3808
3136 case BNX2X_RX_MODE_ALLMULTI: 3809 case BNX2X_RX_MODE_ALLMULTI:
3137 tstorm_mac_filter.mcast_accept_all = mask; 3810 cl_id = BP_L_ID(bp);
3138 tstorm_mac_filter.bcast_accept_all = mask; 3811 bnx2x_rxq_set_mac_filters(bp, cl_id,
3812 BNX2X_ACCEPT_UNICAST |
3813 BNX2X_ACCEPT_BROADCAST |
3814 BNX2X_ACCEPT_ALL_MULTICAST);
3139 break; 3815 break;
3140 3816
3141 case BNX2X_RX_MODE_PROMISC: 3817 case BNX2X_RX_MODE_PROMISC:
3142 tstorm_mac_filter.ucast_accept_all = mask; 3818 cl_id = BP_L_ID(bp);
3143 tstorm_mac_filter.mcast_accept_all = mask; 3819 bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_PROMISCUOUS_MODE);
3144 tstorm_mac_filter.bcast_accept_all = mask; 3820
3145 /* pass management unicast packets as well */ 3821 /* pass management unicast packets as well */
3146 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST; 3822 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
3147 break; 3823 break;
@@ -3152,256 +3828,52 @@ void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
3152 } 3828 }
3153 3829
3154 REG_WR(bp, 3830 REG_WR(bp,
3155 (port ? NIG_REG_LLH1_BRB1_DRV_MASK : NIG_REG_LLH0_BRB1_DRV_MASK), 3831 BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
3832 NIG_REG_LLH0_BRB1_DRV_MASK,
3156 llh_mask); 3833 llh_mask);
3157 3834
3158 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) { 3835 DP(NETIF_MSG_IFUP, "rx mode %d\n"
3159 REG_WR(bp, BAR_TSTRORM_INTMEM + 3836 "drop_ucast 0x%x\ndrop_mcast 0x%x\ndrop_bcast 0x%x\n"
3160 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4, 3837 "accp_ucast 0x%x\naccp_mcast 0x%x\naccp_bcast 0x%x\n", mode,
3161 ((u32 *)&tstorm_mac_filter)[i]); 3838 bp->mac_filters.ucast_drop_all,
3839 bp->mac_filters.mcast_drop_all,
3840 bp->mac_filters.bcast_drop_all,
3841 bp->mac_filters.ucast_accept_all,
3842 bp->mac_filters.mcast_accept_all,
3843 bp->mac_filters.bcast_accept_all
3844 );
3162 3845
3163/* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i, 3846 storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp));
3164 ((u32 *)&tstorm_mac_filter)[i]); */
3165 }
3166
3167 if (mode != BNX2X_RX_MODE_NONE)
3168 bnx2x_set_client_config(bp);
3169} 3847}
3170 3848
3171static void bnx2x_init_internal_common(struct bnx2x *bp) 3849static void bnx2x_init_internal_common(struct bnx2x *bp)
3172{ 3850{
3173 int i; 3851 int i;
3174 3852
3175 /* Zero this manually as its initialization is 3853 if (!CHIP_IS_E1(bp)) {
3176 currently missing in the initTool */
3177 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
3178 REG_WR(bp, BAR_USTRORM_INTMEM +
3179 USTORM_AGG_DATA_OFFSET + i * 4, 0);
3180}
3181
3182static void bnx2x_init_internal_port(struct bnx2x *bp)
3183{
3184 int port = BP_PORT(bp);
3185
3186 REG_WR(bp,
3187 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_U_OFFSET(port), BNX2X_BTR);
3188 REG_WR(bp,
3189 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_C_OFFSET(port), BNX2X_BTR);
3190 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
3191 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
3192}
3193
3194static void bnx2x_init_internal_func(struct bnx2x *bp)
3195{
3196 struct tstorm_eth_function_common_config tstorm_config = {0};
3197 struct stats_indication_flags stats_flags = {0};
3198 int port = BP_PORT(bp);
3199 int func = BP_FUNC(bp);
3200 int i, j;
3201 u32 offset;
3202 u16 max_agg_size;
3203
3204 tstorm_config.config_flags = RSS_FLAGS(bp);
3205
3206 if (is_multi(bp))
3207 tstorm_config.rss_result_mask = MULTI_MASK;
3208
3209 /* Enable TPA if needed */
3210 if (bp->flags & TPA_ENABLE_FLAG)
3211 tstorm_config.config_flags |=
3212 TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
3213
3214 if (IS_E1HMF(bp))
3215 tstorm_config.config_flags |=
3216 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
3217
3218 tstorm_config.leading_client_id = BP_L_ID(bp);
3219
3220 REG_WR(bp, BAR_TSTRORM_INTMEM +
3221 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
3222 (*(u32 *)&tstorm_config));
3223
3224 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
3225 bp->rx_mode_cl_mask = (1 << BP_L_ID(bp));
3226 bnx2x_set_storm_rx_mode(bp);
3227
3228 for_each_queue(bp, i) {
3229 u8 cl_id = bp->fp[i].cl_id;
3230
3231 /* reset xstorm per client statistics */
3232 offset = BAR_XSTRORM_INTMEM +
3233 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
3234 for (j = 0;
3235 j < sizeof(struct xstorm_per_client_stats) / 4; j++)
3236 REG_WR(bp, offset + j*4, 0);
3237
3238 /* reset tstorm per client statistics */
3239 offset = BAR_TSTRORM_INTMEM +
3240 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
3241 for (j = 0;
3242 j < sizeof(struct tstorm_per_client_stats) / 4; j++)
3243 REG_WR(bp, offset + j*4, 0);
3244
3245 /* reset ustorm per client statistics */
3246 offset = BAR_USTRORM_INTMEM +
3247 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
3248 for (j = 0;
3249 j < sizeof(struct ustorm_per_client_stats) / 4; j++)
3250 REG_WR(bp, offset + j*4, 0);
3251 }
3252
3253 /* Init statistics related context */
3254 stats_flags.collect_eth = 1;
3255
3256 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
3257 ((u32 *)&stats_flags)[0]);
3258 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
3259 ((u32 *)&stats_flags)[1]);
3260
3261 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
3262 ((u32 *)&stats_flags)[0]);
3263 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
3264 ((u32 *)&stats_flags)[1]);
3265
3266 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
3267 ((u32 *)&stats_flags)[0]);
3268 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
3269 ((u32 *)&stats_flags)[1]);
3270
3271 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
3272 ((u32 *)&stats_flags)[0]);
3273 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
3274 ((u32 *)&stats_flags)[1]);
3275
3276 REG_WR(bp, BAR_XSTRORM_INTMEM +
3277 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
3278 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
3279 REG_WR(bp, BAR_XSTRORM_INTMEM +
3280 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
3281 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
3282
3283 REG_WR(bp, BAR_TSTRORM_INTMEM +
3284 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
3285 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
3286 REG_WR(bp, BAR_TSTRORM_INTMEM +
3287 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
3288 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
3289
3290 REG_WR(bp, BAR_USTRORM_INTMEM +
3291 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
3292 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
3293 REG_WR(bp, BAR_USTRORM_INTMEM +
3294 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
3295 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
3296 3854
3297 if (CHIP_IS_E1H(bp)) { 3855 /* xstorm needs to know whether to add ovlan to packets or not,
3856 * in switch-independent we'll write 0 to here... */
3298 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET, 3857 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
3299 IS_E1HMF(bp)); 3858 bp->e1hmf);
3300 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET, 3859 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
3301 IS_E1HMF(bp)); 3860 bp->e1hmf);
3302 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET, 3861 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
3303 IS_E1HMF(bp)); 3862 bp->e1hmf);
3304 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET, 3863 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
3305 IS_E1HMF(bp)); 3864 bp->e1hmf);
3306
3307 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
3308 bp->e1hov);
3309 } 3865 }
3310 3866
3311 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */ 3867 /* Zero this manually as its initialization is
3312 max_agg_size = min_t(u32, (min_t(u32, 8, MAX_SKB_FRAGS) * 3868 currently missing in the initTool */
3313 SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff); 3869 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
3314 for_each_queue(bp, i) {
3315 struct bnx2x_fastpath *fp = &bp->fp[i];
3316
3317 REG_WR(bp, BAR_USTRORM_INTMEM +
3318 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
3319 U64_LO(fp->rx_comp_mapping));
3320 REG_WR(bp, BAR_USTRORM_INTMEM +
3321 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
3322 U64_HI(fp->rx_comp_mapping));
3323
3324 /* Next page */
3325 REG_WR(bp, BAR_USTRORM_INTMEM +
3326 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id),
3327 U64_LO(fp->rx_comp_mapping + BCM_PAGE_SIZE));
3328 REG_WR(bp, BAR_USTRORM_INTMEM + 3870 REG_WR(bp, BAR_USTRORM_INTMEM +
3329 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id) + 4, 3871 USTORM_AGG_DATA_OFFSET + i * 4, 0);
3330 U64_HI(fp->rx_comp_mapping + BCM_PAGE_SIZE)); 3872}
3331
3332 REG_WR16(bp, BAR_USTRORM_INTMEM +
3333 USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
3334 max_agg_size);
3335 }
3336
3337 /* dropless flow control */
3338 if (CHIP_IS_E1H(bp)) {
3339 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
3340
3341 rx_pause.bd_thr_low = 250;
3342 rx_pause.cqe_thr_low = 250;
3343 rx_pause.cos = 1;
3344 rx_pause.sge_thr_low = 0;
3345 rx_pause.bd_thr_high = 350;
3346 rx_pause.cqe_thr_high = 350;
3347 rx_pause.sge_thr_high = 0;
3348
3349 for_each_queue(bp, i) {
3350 struct bnx2x_fastpath *fp = &bp->fp[i];
3351
3352 if (!fp->disable_tpa) {
3353 rx_pause.sge_thr_low = 150;
3354 rx_pause.sge_thr_high = 250;
3355 }
3356
3357
3358 offset = BAR_USTRORM_INTMEM +
3359 USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
3360 fp->cl_id);
3361 for (j = 0;
3362 j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
3363 j++)
3364 REG_WR(bp, offset + j*4,
3365 ((u32 *)&rx_pause)[j]);
3366 }
3367 }
3368
3369 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
3370
3371 /* Init rate shaping and fairness contexts */
3372 if (IS_E1HMF(bp)) {
3373 int vn;
3374
3375 /* During init there is no active link
3376 Until link is up, set link rate to 10Gbps */
3377 bp->link_vars.line_speed = SPEED_10000;
3378 bnx2x_init_port_minmax(bp);
3379
3380 if (!BP_NOMCP(bp))
3381 bp->mf_config =
3382 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
3383 bnx2x_calc_vn_weight_sum(bp);
3384
3385 for (vn = VN_0; vn < E1HVN_MAX; vn++)
3386 bnx2x_init_vn_minmax(bp, 2*vn + port);
3387
3388 /* Enable rate shaping and fairness */
3389 bp->cmng.flags.cmng_enables |=
3390 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
3391
3392 } else {
3393 /* rate shaping and fairness are disabled */
3394 DP(NETIF_MSG_IFUP,
3395 "single function mode minmax will be disabled\n");
3396 }
3397
3398 3873
3399 /* Store cmng structures to internal memory */ 3874static void bnx2x_init_internal_port(struct bnx2x *bp)
3400 if (bp->port.pmf) 3875{
3401 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++) 3876 /* port */
3402 REG_WR(bp, BAR_XSTRORM_INTMEM +
3403 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
3404 ((u32 *)(&bp->cmng))[i]);
3405} 3877}
3406 3878
3407static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code) 3879static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
@@ -3416,7 +3888,8 @@ static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
3416 /* no break */ 3888 /* no break */
3417 3889
3418 case FW_MSG_CODE_DRV_LOAD_FUNCTION: 3890 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
3419 bnx2x_init_internal_func(bp); 3891 /* internal memory per function is
3892 initialized inside bnx2x_pf_init */
3420 break; 3893 break;
3421 3894
3422 default: 3895 default:
@@ -3425,43 +3898,61 @@ static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
3425 } 3898 }
3426} 3899}
3427 3900
3901static void bnx2x_init_fp_sb(struct bnx2x *bp, int fp_idx)
3902{
3903 struct bnx2x_fastpath *fp = &bp->fp[fp_idx];
3904
3905 fp->state = BNX2X_FP_STATE_CLOSED;
3906
3907 fp->index = fp->cid = fp_idx;
3908 fp->cl_id = BP_L_ID(bp) + fp_idx;
3909 fp->fw_sb_id = bp->base_fw_ndsb + fp->cl_id + CNIC_CONTEXT_USE;
3910 fp->igu_sb_id = bp->igu_base_sb + fp_idx + CNIC_CONTEXT_USE;
3911 /* qZone id equals to FW (per path) client id */
3912 fp->cl_qzone_id = fp->cl_id +
3913 BP_PORT(bp)*(ETH_MAX_RX_CLIENTS_E1H);
3914 /* init shortcut */
3915 fp->ustorm_rx_prods_offset =
3916 USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), fp->cl_id);
3917 /* Setup SB indicies */
3918 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
3919 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
3920
3921 DP(NETIF_MSG_IFUP, "queue[%d]: bnx2x_init_sb(%p,%p) "
3922 "cl_id %d fw_sb %d igu_sb %d\n",
3923 fp_idx, bp, fp->status_blk.e1x_sb, fp->cl_id, fp->fw_sb_id,
3924 fp->igu_sb_id);
3925 bnx2x_init_sb(bp, fp->status_blk_mapping, BNX2X_VF_ID_INVALID, false,
3926 fp->fw_sb_id, fp->igu_sb_id);
3927
3928 bnx2x_update_fpsb_idx(fp);
3929}
3930
3428void bnx2x_nic_init(struct bnx2x *bp, u32 load_code) 3931void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
3429{ 3932{
3430 int i; 3933 int i;
3431 3934
3432 for_each_queue(bp, i) { 3935 for_each_queue(bp, i)
3433 struct bnx2x_fastpath *fp = &bp->fp[i]; 3936 bnx2x_init_fp_sb(bp, i);
3434
3435 fp->bp = bp;
3436 fp->state = BNX2X_FP_STATE_CLOSED;
3437 fp->index = i;
3438 fp->cl_id = BP_L_ID(bp) + i;
3439#ifdef BCM_CNIC 3937#ifdef BCM_CNIC
3440 fp->sb_id = fp->cl_id + 1; 3938
3441#else 3939 bnx2x_init_sb(bp, bp->cnic_sb_mapping,
3442 fp->sb_id = fp->cl_id; 3940 BNX2X_VF_ID_INVALID, false,
3941 CNIC_SB_ID(bp), CNIC_IGU_SB_ID(bp));
3942
3443#endif 3943#endif
3444 DP(NETIF_MSG_IFUP,
3445 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d sb %d\n",
3446 i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
3447 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
3448 fp->sb_id);
3449 bnx2x_update_fpsb_idx(fp);
3450 }
3451 3944
3452 /* ensure status block indices were read */ 3945 /* ensure status block indices were read */
3453 rmb(); 3946 rmb();
3454 3947
3455 3948 bnx2x_init_def_sb(bp);
3456 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
3457 DEF_SB_ID);
3458 bnx2x_update_dsb_idx(bp); 3949 bnx2x_update_dsb_idx(bp);
3459 bnx2x_update_coalesce(bp);
3460 bnx2x_init_rx_rings(bp); 3950 bnx2x_init_rx_rings(bp);
3461 bnx2x_init_tx_ring(bp); 3951 bnx2x_init_tx_rings(bp);
3462 bnx2x_init_sp_ring(bp); 3952 bnx2x_init_sp_ring(bp);
3463 bnx2x_init_context(bp); 3953 bnx2x_init_eq_ring(bp);
3464 bnx2x_init_internal(bp, load_code); 3954 bnx2x_init_internal(bp, load_code);
3955 bnx2x_pf_init(bp);
3465 bnx2x_init_ind_table(bp); 3956 bnx2x_init_ind_table(bp);
3466 bnx2x_stats_init(bp); 3957 bnx2x_stats_init(bp);
3467 3958
@@ -3620,8 +4111,6 @@ static int bnx2x_int_mem_test(struct bnx2x *bp)
3620 else 4111 else
3621 factor = 1; 4112 factor = 1;
3622 4113
3623 DP(NETIF_MSG_HW, "start part1\n");
3624
3625 /* Disable inputs of parser neighbor blocks */ 4114 /* Disable inputs of parser neighbor blocks */
3626 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0); 4115 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
3627 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0); 4116 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
@@ -3917,12 +4406,9 @@ static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
3917 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val); 4406 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
3918} 4407}
3919 4408
3920static int bnx2x_init_common(struct bnx2x *bp) 4409static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code)
3921{ 4410{
3922 u32 val, i; 4411 u32 val, i;
3923#ifdef BCM_CNIC
3924 u32 wb_write[2];
3925#endif
3926 4412
3927 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp)); 4413 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
3928 4414
@@ -3964,12 +4450,8 @@ static int bnx2x_init_common(struct bnx2x *bp)
3964 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1); 4450 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
3965#endif 4451#endif
3966 4452
3967 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2); 4453 bnx2x_ilt_init_page_size(bp, INITOP_SET);
3968#ifdef BCM_CNIC 4454
3969 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
3970 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
3971 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
3972#endif
3973 4455
3974 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp)) 4456 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
3975 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1); 4457 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
@@ -4009,20 +4491,9 @@ static int bnx2x_init_common(struct bnx2x *bp)
4009 4491
4010 bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE); 4492 bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
4011 4493
4012#ifdef BCM_CNIC 4494 /* QM queues pointers table */
4013 wb_write[0] = 0; 4495 bnx2x_qm_init_ptr_table(bp, bp->qm_cid_count, INITOP_SET);
4014 wb_write[1] = 0; 4496
4015 for (i = 0; i < 64; i++) {
4016 REG_WR(bp, QM_REG_BASEADDR + i*4, 1024 * 4 * (i%16));
4017 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL + i*8, wb_write, 2);
4018
4019 if (CHIP_IS_E1H(bp)) {
4020 REG_WR(bp, QM_REG_BASEADDR_EXT_A + i*4, 1024*4*(i%16));
4021 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL_EXT_A + i*8,
4022 wb_write, 2);
4023 }
4024 }
4025#endif
4026 /* soft reset pulse */ 4497 /* soft reset pulse */
4027 REG_WR(bp, QM_REG_SOFT_RESET, 1); 4498 REG_WR(bp, QM_REG_SOFT_RESET, 1);
4028 REG_WR(bp, QM_REG_SOFT_RESET, 0); 4499 REG_WR(bp, QM_REG_SOFT_RESET, 0);
@@ -4032,7 +4503,8 @@ static int bnx2x_init_common(struct bnx2x *bp)
4032#endif 4503#endif
4033 4504
4034 bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE); 4505 bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
4035 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT); 4506 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BNX2X_DB_SHIFT);
4507
4036 if (!CHIP_REV_IS_SLOW(bp)) { 4508 if (!CHIP_REV_IS_SLOW(bp)) {
4037 /* enable hw interrupt from doorbell Q */ 4509 /* enable hw interrupt from doorbell Q */
4038 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0); 4510 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
@@ -4184,7 +4656,7 @@ static int bnx2x_init_common(struct bnx2x *bp)
4184 return 0; 4656 return 0;
4185} 4657}
4186 4658
4187static int bnx2x_init_port(struct bnx2x *bp) 4659static int bnx2x_init_hw_port(struct bnx2x *bp)
4188{ 4660{
4189 int port = BP_PORT(bp); 4661 int port = BP_PORT(bp);
4190 int init_stage = port ? PORT1_STAGE : PORT0_STAGE; 4662 int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
@@ -4203,9 +4675,10 @@ static int bnx2x_init_port(struct bnx2x *bp)
4203 bnx2x_init_block(bp, CCM_BLOCK, init_stage); 4675 bnx2x_init_block(bp, CCM_BLOCK, init_stage);
4204 bnx2x_init_block(bp, XCM_BLOCK, init_stage); 4676 bnx2x_init_block(bp, XCM_BLOCK, init_stage);
4205 4677
4206#ifdef BCM_CNIC 4678 /* QM cid (connection) count */
4207 REG_WR(bp, QM_REG_CONNNUM_0 + port*4, 1024/16 - 1); 4679 bnx2x_qm_init_cid_count(bp, bp->qm_cid_count, INITOP_SET);
4208 4680
4681#ifdef BCM_CNIC
4209 bnx2x_init_block(bp, TIMERS_BLOCK, init_stage); 4682 bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
4210 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20); 4683 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
4211 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31); 4684 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
@@ -4327,25 +4800,6 @@ static int bnx2x_init_port(struct bnx2x *bp)
4327 return 0; 4800 return 0;
4328} 4801}
4329 4802
4330#define ILT_PER_FUNC (768/2)
4331#define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
4332/* the phys address is shifted right 12 bits and has an added
4333 1=valid bit added to the 53rd bit
4334 then since this is a wide register(TM)
4335 we split it into two 32 bit writes
4336 */
4337#define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
4338#define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
4339#define PXP_ONE_ILT(x) (((x) << 10) | x)
4340#define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
4341
4342#ifdef BCM_CNIC
4343#define CNIC_ILT_LINES 127
4344#define CNIC_CTX_PER_ILT 16
4345#else
4346#define CNIC_ILT_LINES 0
4347#endif
4348
4349static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr) 4803static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
4350{ 4804{
4351 int reg; 4805 int reg;
@@ -4358,10 +4812,12 @@ static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
4358 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr)); 4812 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
4359} 4813}
4360 4814
4361static int bnx2x_init_func(struct bnx2x *bp) 4815static int bnx2x_init_hw_func(struct bnx2x *bp)
4362{ 4816{
4363 int port = BP_PORT(bp); 4817 int port = BP_PORT(bp);
4364 int func = BP_FUNC(bp); 4818 int func = BP_FUNC(bp);
4819 struct bnx2x_ilt *ilt = BP_ILT(bp);
4820 u16 cdu_ilt_start;
4365 u32 addr, val; 4821 u32 addr, val;
4366 int i; 4822 int i;
4367 4823
@@ -4373,72 +4829,67 @@ static int bnx2x_init_func(struct bnx2x *bp)
4373 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0; 4829 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
4374 REG_WR(bp, addr, val); 4830 REG_WR(bp, addr, val);
4375 4831
4376 i = FUNC_ILT_BASE(func); 4832 ilt = BP_ILT(bp);
4377 4833 cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start;
4378 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
4379 if (CHIP_IS_E1H(bp)) {
4380 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
4381 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
4382 } else /* E1 */
4383 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
4384 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
4385
4386#ifdef BCM_CNIC
4387 i += 1 + CNIC_ILT_LINES;
4388 bnx2x_ilt_wr(bp, i, bp->timers_mapping);
4389 if (CHIP_IS_E1(bp))
4390 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
4391 else {
4392 REG_WR(bp, PXP2_REG_RQ_TM_FIRST_ILT, i);
4393 REG_WR(bp, PXP2_REG_RQ_TM_LAST_ILT, i);
4394 }
4395
4396 i++;
4397 bnx2x_ilt_wr(bp, i, bp->qm_mapping);
4398 if (CHIP_IS_E1(bp))
4399 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
4400 else {
4401 REG_WR(bp, PXP2_REG_RQ_QM_FIRST_ILT, i);
4402 REG_WR(bp, PXP2_REG_RQ_QM_LAST_ILT, i);
4403 }
4404 4834
4405 i++; 4835 for (i = 0; i < L2_ILT_LINES(bp); i++) {
4406 bnx2x_ilt_wr(bp, i, bp->t1_mapping); 4836 ilt->lines[cdu_ilt_start + i].page =
4407 if (CHIP_IS_E1(bp)) 4837 bp->context.vcxt + (ILT_PAGE_CIDS * i);
4408 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i)); 4838 ilt->lines[cdu_ilt_start + i].page_mapping =
4409 else { 4839 bp->context.cxt_mapping + (CDU_ILT_PAGE_SZ * i);
4410 REG_WR(bp, PXP2_REG_RQ_SRC_FIRST_ILT, i); 4840 /* cdu ilt pages are allocated manually so there's no need to
4411 REG_WR(bp, PXP2_REG_RQ_SRC_LAST_ILT, i); 4841 set the size */
4412 } 4842 }
4843 bnx2x_ilt_init_op(bp, INITOP_SET);
4844#ifdef BCM_CNIC
4845 bnx2x_src_init_t2(bp, bp->t2, bp->t2_mapping, SRC_CONN_NUM);
4413 4846
4414 /* tell the searcher where the T2 table is */ 4847 /* T1 hash bits value determines the T1 number of entries */
4415 REG_WR(bp, SRC_REG_COUNTFREE0 + port*4, 16*1024/64); 4848 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, SRC_HASH_BITS);
4416 4849#endif
4417 bnx2x_wb_wr(bp, SRC_REG_FIRSTFREE0 + port*16,
4418 U64_LO(bp->t2_mapping), U64_HI(bp->t2_mapping));
4419 4850
4420 bnx2x_wb_wr(bp, SRC_REG_LASTFREE0 + port*16, 4851#ifndef BCM_CNIC
4421 U64_LO((u64)bp->t2_mapping + 16*1024 - 64), 4852 /* set NIC mode */
4422 U64_HI((u64)bp->t2_mapping + 16*1024 - 64)); 4853 REG_WR(bp, PRS_REG_NIC_MODE, 1);
4854#endif /* BCM_CNIC */
4423 4855
4424 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, 10); 4856 bp->dmae_ready = 1;
4425#endif
4426 4857
4427 if (CHIP_IS_E1H(bp)) { 4858 bnx2x_init_block(bp, PGLUE_B_BLOCK, FUNC0_STAGE + func);
4428 bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func); 4859
4429 bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func); 4860 bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func);
4430 bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func); 4861 bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func);
4431 bnx2x_init_block(bp, CCM_BLOCK, FUNC0_STAGE + func); 4862 bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func);
4432 bnx2x_init_block(bp, XCM_BLOCK, FUNC0_STAGE + func); 4863 bnx2x_init_block(bp, CCM_BLOCK, FUNC0_STAGE + func);
4433 bnx2x_init_block(bp, TSEM_BLOCK, FUNC0_STAGE + func); 4864 bnx2x_init_block(bp, XCM_BLOCK, FUNC0_STAGE + func);
4434 bnx2x_init_block(bp, USEM_BLOCK, FUNC0_STAGE + func); 4865 bnx2x_init_block(bp, TSEM_BLOCK, FUNC0_STAGE + func);
4435 bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func); 4866 bnx2x_init_block(bp, USEM_BLOCK, FUNC0_STAGE + func);
4436 bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func); 4867 bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func);
4868 bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func);
4869
4870 bnx2x_init_block(bp, QM_BLOCK, FUNC0_STAGE + func);
4871 bnx2x_init_block(bp, TIMERS_BLOCK, FUNC0_STAGE + func);
4872 bnx2x_init_block(bp, DQ_BLOCK, FUNC0_STAGE + func);
4873 bnx2x_init_block(bp, BRB1_BLOCK, FUNC0_STAGE + func);
4874 bnx2x_init_block(bp, PRS_BLOCK, FUNC0_STAGE + func);
4875 bnx2x_init_block(bp, TSDM_BLOCK, FUNC0_STAGE + func);
4876 bnx2x_init_block(bp, CSDM_BLOCK, FUNC0_STAGE + func);
4877 bnx2x_init_block(bp, USDM_BLOCK, FUNC0_STAGE + func);
4878 bnx2x_init_block(bp, XSDM_BLOCK, FUNC0_STAGE + func);
4879 bnx2x_init_block(bp, UPB_BLOCK, FUNC0_STAGE + func);
4880 bnx2x_init_block(bp, XPB_BLOCK, FUNC0_STAGE + func);
4881 bnx2x_init_block(bp, PBF_BLOCK, FUNC0_STAGE + func);
4882 bnx2x_init_block(bp, CDU_BLOCK, FUNC0_STAGE + func);
4883
4884 bnx2x_init_block(bp, CFC_BLOCK, FUNC0_STAGE + func);
4437 4885
4886 if (IS_E1HMF(bp)) {
4438 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1); 4887 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
4439 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov); 4888 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
4440 } 4889 }
4441 4890
4891 bnx2x_init_block(bp, MISC_AEU_BLOCK, FUNC0_STAGE + func);
4892
4442 /* HC init per function */ 4893 /* HC init per function */
4443 if (CHIP_IS_E1H(bp)) { 4894 if (CHIP_IS_E1H(bp)) {
4444 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0); 4895 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
@@ -4451,13 +4902,21 @@ static int bnx2x_init_func(struct bnx2x *bp)
4451 /* Reset PCIE errors for debug */ 4902 /* Reset PCIE errors for debug */
4452 REG_WR(bp, 0x2114, 0xffffffff); 4903 REG_WR(bp, 0x2114, 0xffffffff);
4453 REG_WR(bp, 0x2120, 0xffffffff); 4904 REG_WR(bp, 0x2120, 0xffffffff);
4905
4906 bnx2x_init_block(bp, EMAC0_BLOCK, FUNC0_STAGE + func);
4907 bnx2x_init_block(bp, EMAC1_BLOCK, FUNC0_STAGE + func);
4908 bnx2x_init_block(bp, DBU_BLOCK, FUNC0_STAGE + func);
4909 bnx2x_init_block(bp, DBG_BLOCK, FUNC0_STAGE + func);
4910 bnx2x_init_block(bp, MCP_BLOCK, FUNC0_STAGE + func);
4911 bnx2x_init_block(bp, DMAE_BLOCK, FUNC0_STAGE + func);
4912
4454 bnx2x_phy_probe(&bp->link_params); 4913 bnx2x_phy_probe(&bp->link_params);
4455 return 0; 4914 return 0;
4456} 4915}
4457 4916
4458int bnx2x_init_hw(struct bnx2x *bp, u32 load_code) 4917int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
4459{ 4918{
4460 int i, rc = 0; 4919 int rc = 0;
4461 4920
4462 DP(BNX2X_MSG_MCP, "function %d load_code %x\n", 4921 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
4463 BP_FUNC(bp), load_code); 4922 BP_FUNC(bp), load_code);
@@ -4470,21 +4929,19 @@ int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
4470 4929
4471 switch (load_code) { 4930 switch (load_code) {
4472 case FW_MSG_CODE_DRV_LOAD_COMMON: 4931 case FW_MSG_CODE_DRV_LOAD_COMMON:
4473 rc = bnx2x_init_common(bp); 4932 rc = bnx2x_init_hw_common(bp, load_code);
4474 if (rc) 4933 if (rc)
4475 goto init_hw_err; 4934 goto init_hw_err;
4476 /* no break */ 4935 /* no break */
4477 4936
4478 case FW_MSG_CODE_DRV_LOAD_PORT: 4937 case FW_MSG_CODE_DRV_LOAD_PORT:
4479 bp->dmae_ready = 1; 4938 rc = bnx2x_init_hw_port(bp);
4480 rc = bnx2x_init_port(bp);
4481 if (rc) 4939 if (rc)
4482 goto init_hw_err; 4940 goto init_hw_err;
4483 /* no break */ 4941 /* no break */
4484 4942
4485 case FW_MSG_CODE_DRV_LOAD_FUNCTION: 4943 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
4486 bp->dmae_ready = 1; 4944 rc = bnx2x_init_hw_func(bp);
4487 rc = bnx2x_init_func(bp);
4488 if (rc) 4945 if (rc)
4489 goto init_hw_err; 4946 goto init_hw_err;
4490 break; 4947 break;
@@ -4503,14 +4960,6 @@ int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
4503 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq); 4960 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
4504 } 4961 }
4505 4962
4506 /* this needs to be done before gunzip end */
4507 bnx2x_zero_def_sb(bp);
4508 for_each_queue(bp, i)
4509 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
4510#ifdef BCM_CNIC
4511 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
4512#endif
4513
4514init_hw_err: 4963init_hw_err:
4515 bnx2x_gunzip_end(bp); 4964 bnx2x_gunzip_end(bp);
4516 4965
@@ -4523,7 +4972,7 @@ void bnx2x_free_mem(struct bnx2x *bp)
4523#define BNX2X_PCI_FREE(x, y, size) \ 4972#define BNX2X_PCI_FREE(x, y, size) \
4524 do { \ 4973 do { \
4525 if (x) { \ 4974 if (x) { \
4526 dma_free_coherent(&bp->pdev->dev, size, x, y); \ 4975 dma_free_coherent(&bp->pdev->dev, size, (void *)x, y); \
4527 x = NULL; \ 4976 x = NULL; \
4528 y = 0; \ 4977 y = 0; \
4529 } \ 4978 } \
@@ -4532,7 +4981,7 @@ void bnx2x_free_mem(struct bnx2x *bp)
4532#define BNX2X_FREE(x) \ 4981#define BNX2X_FREE(x) \
4533 do { \ 4982 do { \
4534 if (x) { \ 4983 if (x) { \
4535 vfree(x); \ 4984 kfree((void *)x); \
4536 x = NULL; \ 4985 x = NULL; \
4537 } \ 4986 } \
4538 } while (0) 4987 } while (0)
@@ -4542,11 +4991,10 @@ void bnx2x_free_mem(struct bnx2x *bp)
4542 /* fastpath */ 4991 /* fastpath */
4543 /* Common */ 4992 /* Common */
4544 for_each_queue(bp, i) { 4993 for_each_queue(bp, i) {
4545
4546 /* status blocks */ 4994 /* status blocks */
4547 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk), 4995 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk.e1x_sb),
4548 bnx2x_fp(bp, i, status_blk_mapping), 4996 bnx2x_fp(bp, i, status_blk_mapping),
4549 sizeof(struct host_status_block)); 4997 sizeof(struct host_hc_status_block_e1x));
4550 } 4998 }
4551 /* Rx */ 4999 /* Rx */
4552 for_each_queue(bp, i) { 5000 for_each_queue(bp, i) {
@@ -4580,21 +5028,28 @@ void bnx2x_free_mem(struct bnx2x *bp)
4580 /* end of fastpath */ 5028 /* end of fastpath */
4581 5029
4582 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping, 5030 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
4583 sizeof(struct host_def_status_block)); 5031 sizeof(struct host_sp_status_block));
4584 5032
4585 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping, 5033 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
4586 sizeof(struct bnx2x_slowpath)); 5034 sizeof(struct bnx2x_slowpath));
4587 5035
5036 BNX2X_PCI_FREE(bp->context.vcxt, bp->context.cxt_mapping,
5037 bp->context.size);
5038
5039 bnx2x_ilt_mem_op(bp, ILT_MEMOP_FREE);
5040
5041 BNX2X_FREE(bp->ilt->lines);
4588#ifdef BCM_CNIC 5042#ifdef BCM_CNIC
4589 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024); 5043
4590 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024); 5044 BNX2X_PCI_FREE(bp->cnic_sb.e1x_sb, bp->cnic_sb_mapping,
4591 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024); 5045 sizeof(struct host_hc_status_block_e1x));
4592 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024); 5046 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, SRC_T2_SZ);
4593 BNX2X_PCI_FREE(bp->cnic_sb, bp->cnic_sb_mapping,
4594 sizeof(struct host_status_block));
4595#endif 5047#endif
4596 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE); 5048 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
4597 5049
5050 BNX2X_PCI_FREE(bp->eq_ring, bp->eq_mapping,
5051 BCM_PAGE_SIZE * NUM_EQ_PAGES);
5052
4598#undef BNX2X_PCI_FREE 5053#undef BNX2X_PCI_FREE
4599#undef BNX2X_KFREE 5054#undef BNX2X_KFREE
4600} 5055}
@@ -4612,13 +5067,13 @@ int bnx2x_alloc_mem(struct bnx2x *bp)
4612 5067
4613#define BNX2X_ALLOC(x, size) \ 5068#define BNX2X_ALLOC(x, size) \
4614 do { \ 5069 do { \
4615 x = vmalloc(size); \ 5070 x = kzalloc(size, GFP_KERNEL); \
4616 if (x == NULL) \ 5071 if (x == NULL) \
4617 goto alloc_mem_err; \ 5072 goto alloc_mem_err; \
4618 memset(x, 0, size); \
4619 } while (0) 5073 } while (0)
4620 5074
4621 int i; 5075 int i;
5076 void *p;
4622 5077
4623 /* fastpath */ 5078 /* fastpath */
4624 /* Common */ 5079 /* Common */
@@ -4626,9 +5081,17 @@ int bnx2x_alloc_mem(struct bnx2x *bp)
4626 bnx2x_fp(bp, i, bp) = bp; 5081 bnx2x_fp(bp, i, bp) = bp;
4627 5082
4628 /* status blocks */ 5083 /* status blocks */
4629 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk), 5084 BNX2X_PCI_ALLOC(p,
4630 &bnx2x_fp(bp, i, status_blk_mapping), 5085 &bnx2x_fp(bp, i, status_blk_mapping),
4631 sizeof(struct host_status_block)); 5086 sizeof(struct host_hc_status_block_e1x));
5087
5088 bnx2x_fp(bp, i, status_blk.e1x_sb) =
5089 (struct host_hc_status_block_e1x *)p;
5090
5091 bnx2x_fp(bp, i, sb_index_values) = (__le16 *)
5092 (bnx2x_fp(bp, i, status_blk.e1x_sb)->sb.index_values);
5093 bnx2x_fp(bp, i, sb_running_index) = (__le16 *)
5094 (bnx2x_fp(bp, i, status_blk.e1x_sb)->sb.running_index);
4632 } 5095 }
4633 /* Rx */ 5096 /* Rx */
4634 for_each_queue(bp, i) { 5097 for_each_queue(bp, i) {
@@ -4664,37 +5127,36 @@ int bnx2x_alloc_mem(struct bnx2x *bp)
4664 } 5127 }
4665 /* end of fastpath */ 5128 /* end of fastpath */
4666 5129
4667 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping, 5130#ifdef BCM_CNIC
4668 sizeof(struct host_def_status_block)); 5131 BNX2X_PCI_ALLOC(bp->cnic_sb.e1x_sb, &bp->cnic_sb_mapping,
5132 sizeof(struct host_hc_status_block_e1x));
4669 5133
4670 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping, 5134 /* allocate searcher T2 table */
4671 sizeof(struct bnx2x_slowpath)); 5135 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, SRC_T2_SZ);
5136#endif
4672 5137
4673#ifdef BCM_CNIC
4674 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
4675 5138
4676 /* allocate searcher T2 table 5139 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
4677 we allocate 1/4 of alloc num for T2 5140 sizeof(struct host_sp_status_block));
4678 (which is not entered into the ILT) */
4679 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
4680 5141
4681 /* Initialize T2 (for 1024 connections) */ 5142 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
4682 for (i = 0; i < 16*1024; i += 64) 5143 sizeof(struct bnx2x_slowpath));
4683 *(u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
4684 5144
4685 /* Timer block array (8*MAX_CONN) phys uncached for now 1024 conns */ 5145 bp->context.size = sizeof(union cdu_context) * bp->l2_cid_count;
4686 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024); 5146 BNX2X_PCI_ALLOC(bp->context.vcxt, &bp->context.cxt_mapping,
5147 bp->context.size);
4687 5148
4688 /* QM queues (128*MAX_CONN) */ 5149 BNX2X_ALLOC(bp->ilt->lines, sizeof(struct ilt_line) * ILT_MAX_LINES);
4689 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
4690 5150
4691 BNX2X_PCI_ALLOC(bp->cnic_sb, &bp->cnic_sb_mapping, 5151 if (bnx2x_ilt_mem_op(bp, ILT_MEMOP_ALLOC))
4692 sizeof(struct host_status_block)); 5152 goto alloc_mem_err;
4693#endif
4694 5153
4695 /* Slow path ring */ 5154 /* Slow path ring */
4696 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE); 5155 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
4697 5156
5157 /* EQ */
5158 BNX2X_PCI_ALLOC(bp->eq_ring, &bp->eq_mapping,
5159 BCM_PAGE_SIZE * NUM_EQ_PAGES);
4698 return 0; 5160 return 0;
4699 5161
4700alloc_mem_err: 5162alloc_mem_err:
@@ -4705,97 +5167,52 @@ alloc_mem_err:
4705#undef BNX2X_ALLOC 5167#undef BNX2X_ALLOC
4706} 5168}
4707 5169
4708
4709/* 5170/*
4710 * Init service functions 5171 * Init service functions
4711 */ 5172 */
4712 5173int bnx2x_func_start(struct bnx2x *bp)
4713/**
4714 * Sets a MAC in a CAM for a few L2 Clients for E1 chip
4715 *
4716 * @param bp driver descriptor
4717 * @param set set or clear an entry (1 or 0)
4718 * @param mac pointer to a buffer containing a MAC
4719 * @param cl_bit_vec bit vector of clients to register a MAC for
4720 * @param cam_offset offset in a CAM to use
4721 * @param with_bcast set broadcast MAC as well
4722 */
4723static void bnx2x_set_mac_addr_e1_gen(struct bnx2x *bp, int set, u8 *mac,
4724 u32 cl_bit_vec, u8 cam_offset,
4725 u8 with_bcast)
4726{ 5174{
4727 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config); 5175 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_START, 0, 0, 0, 1);
4728 int port = BP_PORT(bp);
4729 5176
4730 /* CAM allocation 5177 /* Wait for completion */
4731 * unicasts 0-31:port0 32-63:port1 5178 return bnx2x_wait_ramrod(bp, BNX2X_STATE_FUNC_STARTED, 0, &(bp->state),
4732 * multicast 64-127:port0 128-191:port1 5179 WAIT_RAMROD_COMMON);
4733 */ 5180}
4734 config->hdr.length = 1 + (with_bcast ? 1 : 0);
4735 config->hdr.offset = cam_offset;
4736 config->hdr.client_id = 0xff;
4737 config->hdr.reserved1 = 0;
4738
4739 /* primary MAC */
4740 config->config_table[0].cam_entry.msb_mac_addr =
4741 swab16(*(u16 *)&mac[0]);
4742 config->config_table[0].cam_entry.middle_mac_addr =
4743 swab16(*(u16 *)&mac[2]);
4744 config->config_table[0].cam_entry.lsb_mac_addr =
4745 swab16(*(u16 *)&mac[4]);
4746 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
4747 if (set)
4748 config->config_table[0].target_table_entry.flags = 0;
4749 else
4750 CAM_INVALIDATE(config->config_table[0]);
4751 config->config_table[0].target_table_entry.clients_bit_vector =
4752 cpu_to_le32(cl_bit_vec);
4753 config->config_table[0].target_table_entry.vlan_id = 0;
4754 5181
4755 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n", 5182int bnx2x_func_stop(struct bnx2x *bp)
4756 (set ? "setting" : "clearing"), 5183{
4757 config->config_table[0].cam_entry.msb_mac_addr, 5184 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_STOP, 0, 0, 0, 1);
4758 config->config_table[0].cam_entry.middle_mac_addr,
4759 config->config_table[0].cam_entry.lsb_mac_addr);
4760
4761 /* broadcast */
4762 if (with_bcast) {
4763 config->config_table[1].cam_entry.msb_mac_addr =
4764 cpu_to_le16(0xffff);
4765 config->config_table[1].cam_entry.middle_mac_addr =
4766 cpu_to_le16(0xffff);
4767 config->config_table[1].cam_entry.lsb_mac_addr =
4768 cpu_to_le16(0xffff);
4769 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
4770 if (set)
4771 config->config_table[1].target_table_entry.flags =
4772 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
4773 else
4774 CAM_INVALIDATE(config->config_table[1]);
4775 config->config_table[1].target_table_entry.clients_bit_vector =
4776 cpu_to_le32(cl_bit_vec);
4777 config->config_table[1].target_table_entry.vlan_id = 0;
4778 }
4779 5185
4780 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0, 5186 /* Wait for completion */
4781 U64_HI(bnx2x_sp_mapping(bp, mac_config)), 5187 return bnx2x_wait_ramrod(bp, BNX2X_STATE_CLOSING_WAIT4_UNLOAD,
4782 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0); 5188 0, &(bp->state), WAIT_RAMROD_COMMON);
4783} 5189}
4784 5190
4785/** 5191/**
4786 * Sets a MAC in a CAM for a few L2 Clients for E1H chip 5192 * Sets a MAC in a CAM for a few L2 Clients for E1x chip
4787 * 5193 *
4788 * @param bp driver descriptor 5194 * @param bp driver descriptor
4789 * @param set set or clear an entry (1 or 0) 5195 * @param set set or clear an entry (1 or 0)
4790 * @param mac pointer to a buffer containing a MAC 5196 * @param mac pointer to a buffer containing a MAC
4791 * @param cl_bit_vec bit vector of clients to register a MAC for 5197 * @param cl_bit_vec bit vector of clients to register a MAC for
4792 * @param cam_offset offset in a CAM to use 5198 * @param cam_offset offset in a CAM to use
5199 * @param is_bcast is the set MAC a broadcast address (for E1 only)
4793 */ 5200 */
4794static void bnx2x_set_mac_addr_e1h_gen(struct bnx2x *bp, int set, u8 *mac, 5201static void bnx2x_set_mac_addr_gen(struct bnx2x *bp, int set, u8 *mac,
4795 u32 cl_bit_vec, u8 cam_offset) 5202 u32 cl_bit_vec, u8 cam_offset,
5203 u8 is_bcast)
4796{ 5204{
4797 struct mac_configuration_cmd_e1h *config = 5205 struct mac_configuration_cmd *config =
4798 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config); 5206 (struct mac_configuration_cmd *)bnx2x_sp(bp, mac_config);
5207 int ramrod_flags = WAIT_RAMROD_COMMON;
5208
5209 bp->set_mac_pending = 1;
5210 smp_wmb();
5211
5212 config->hdr.length = 1 + (is_bcast ? 1 : 0);
5213 config->hdr.offset = cam_offset;
5214 config->hdr.client_id = 0xff;
5215 config->hdr.reserved1 = 0;
4799 5216
4800 config->hdr.length = 1; 5217 config->hdr.length = 1;
4801 config->hdr.offset = cam_offset; 5218 config->hdr.offset = cam_offset;
@@ -4812,29 +5229,42 @@ static void bnx2x_set_mac_addr_e1h_gen(struct bnx2x *bp, int set, u8 *mac,
4812 config->config_table[0].clients_bit_vector = 5229 config->config_table[0].clients_bit_vector =
4813 cpu_to_le32(cl_bit_vec); 5230 cpu_to_le32(cl_bit_vec);
4814 config->config_table[0].vlan_id = 0; 5231 config->config_table[0].vlan_id = 0;
4815 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov); 5232 config->config_table[0].pf_id = BP_FUNC(bp);
4816 if (set) 5233 if (set)
4817 config->config_table[0].flags = BP_PORT(bp); 5234 SET_FLAG(config->config_table[0].flags,
5235 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
5236 T_ETH_MAC_COMMAND_SET);
4818 else 5237 else
4819 config->config_table[0].flags = 5238 SET_FLAG(config->config_table[0].flags,
4820 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE; 5239 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
5240 T_ETH_MAC_COMMAND_INVALIDATE);
5241
5242 if (is_bcast)
5243 SET_FLAG(config->config_table[0].flags,
5244 MAC_CONFIGURATION_ENTRY_BROADCAST, 1);
4821 5245
4822 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID mask %d\n", 5246 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) PF_ID %d CLID mask %d\n",
4823 (set ? "setting" : "clearing"), 5247 (set ? "setting" : "clearing"),
4824 config->config_table[0].msb_mac_addr, 5248 config->config_table[0].msb_mac_addr,
4825 config->config_table[0].middle_mac_addr, 5249 config->config_table[0].middle_mac_addr,
4826 config->config_table[0].lsb_mac_addr, bp->e1hov, cl_bit_vec); 5250 config->config_table[0].lsb_mac_addr, BP_FUNC(bp), cl_bit_vec);
4827 5251
4828 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0, 5252 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
4829 U64_HI(bnx2x_sp_mapping(bp, mac_config)), 5253 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
4830 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0); 5254 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 1);
5255
5256 /* Wait for a completion */
5257 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, ramrod_flags);
4831} 5258}
4832 5259
4833static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx, 5260
4834 int *state_p, int poll) 5261int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
5262 int *state_p, int flags)
4835{ 5263{
4836 /* can take a while if any port is running */ 5264 /* can take a while if any port is running */
4837 int cnt = 5000; 5265 int cnt = 5000;
5266 u8 poll = flags & WAIT_RAMROD_POLL;
5267 u8 common = flags & WAIT_RAMROD_COMMON;
4838 5268
4839 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n", 5269 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
4840 poll ? "polling" : "waiting", state, idx); 5270 poll ? "polling" : "waiting", state, idx);
@@ -4842,13 +5272,17 @@ static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
4842 might_sleep(); 5272 might_sleep();
4843 while (cnt--) { 5273 while (cnt--) {
4844 if (poll) { 5274 if (poll) {
4845 bnx2x_rx_int(bp->fp, 10); 5275 if (common)
4846 /* if index is different from 0 5276 bnx2x_eq_int(bp);
4847 * the reply for some commands will 5277 else {
4848 * be on the non default queue 5278 bnx2x_rx_int(bp->fp, 10);
4849 */ 5279 /* if index is different from 0
4850 if (idx) 5280 * the reply for some commands will
4851 bnx2x_rx_int(&bp->fp[idx], 10); 5281 * be on the non default queue
5282 */
5283 if (idx)
5284 bnx2x_rx_int(&bp->fp[idx], 10);
5285 }
4852 } 5286 }
4853 5287
4854 mb(); /* state is changed by bnx2x_sp_event() */ 5288 mb(); /* state is changed by bnx2x_sp_event() */
@@ -4875,31 +5309,110 @@ static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
4875 return -EBUSY; 5309 return -EBUSY;
4876} 5310}
4877 5311
4878void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set) 5312u8 bnx2x_e1h_cam_offset(struct bnx2x *bp, u8 rel_offset)
4879{ 5313{
4880 bp->set_mac_pending++; 5314 return E1H_FUNC_MAX * rel_offset + BP_FUNC(bp);
4881 smp_wmb(); 5315}
5316
5317void bnx2x_set_eth_mac(struct bnx2x *bp, int set)
5318{
5319 u8 cam_offset = (CHIP_IS_E1(bp) ? (BP_PORT(bp) ? 32 : 0) :
5320 bnx2x_e1h_cam_offset(bp, CAM_ETH_LINE));
4882 5321
4883 bnx2x_set_mac_addr_e1h_gen(bp, set, bp->dev->dev_addr, 5322 /* networking MAC */
4884 (1 << bp->fp->cl_id), BP_FUNC(bp)); 5323 bnx2x_set_mac_addr_gen(bp, set, bp->dev->dev_addr,
5324 (1 << bp->fp->cl_id), cam_offset , 0);
4885 5325
4886 /* Wait for a completion */ 5326 if (CHIP_IS_E1(bp)) {
4887 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1); 5327 /* broadcast MAC */
5328 u8 bcast[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
5329 bnx2x_set_mac_addr_gen(bp, set, bcast, 0, cam_offset + 1, 1);
5330 }
4888} 5331}
5332static void bnx2x_set_e1_mc_list(struct bnx2x *bp, u8 offset)
5333{
5334 int i = 0, old;
5335 struct net_device *dev = bp->dev;
5336 struct netdev_hw_addr *ha;
5337 struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, mcast_config);
5338 dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, mcast_config);
5339
5340 netdev_for_each_mc_addr(ha, dev) {
5341 /* copy mac */
5342 config_cmd->config_table[i].msb_mac_addr =
5343 swab16(*(u16 *)&bnx2x_mc_addr(ha)[0]);
5344 config_cmd->config_table[i].middle_mac_addr =
5345 swab16(*(u16 *)&bnx2x_mc_addr(ha)[2]);
5346 config_cmd->config_table[i].lsb_mac_addr =
5347 swab16(*(u16 *)&bnx2x_mc_addr(ha)[4]);
5348
5349 config_cmd->config_table[i].vlan_id = 0;
5350 config_cmd->config_table[i].pf_id = BP_FUNC(bp);
5351 config_cmd->config_table[i].clients_bit_vector =
5352 cpu_to_le32(1 << BP_L_ID(bp));
5353
5354 SET_FLAG(config_cmd->config_table[i].flags,
5355 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
5356 T_ETH_MAC_COMMAND_SET);
5357
5358 DP(NETIF_MSG_IFUP,
5359 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
5360 config_cmd->config_table[i].msb_mac_addr,
5361 config_cmd->config_table[i].middle_mac_addr,
5362 config_cmd->config_table[i].lsb_mac_addr);
5363 i++;
5364 }
5365 old = config_cmd->hdr.length;
5366 if (old > i) {
5367 for (; i < old; i++) {
5368 if (CAM_IS_INVALID(config_cmd->
5369 config_table[i])) {
5370 /* already invalidated */
5371 break;
5372 }
5373 /* invalidate */
5374 SET_FLAG(config_cmd->config_table[i].flags,
5375 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
5376 T_ETH_MAC_COMMAND_INVALIDATE);
5377 }
5378 }
5379
5380 config_cmd->hdr.length = i;
5381 config_cmd->hdr.offset = offset;
5382 config_cmd->hdr.client_id = 0xff;
5383 config_cmd->hdr.reserved1 = 0;
5384
5385 bp->set_mac_pending = 1;
5386 smp_wmb();
4889 5387
4890void bnx2x_set_eth_mac_addr_e1(struct bnx2x *bp, int set) 5388 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
5389 U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
5390}
5391static void bnx2x_invlidate_e1_mc_list(struct bnx2x *bp)
4891{ 5392{
4892 bp->set_mac_pending++; 5393 int i;
5394 struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, mcast_config);
5395 dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, mcast_config);
5396 int ramrod_flags = WAIT_RAMROD_COMMON;
5397
5398 bp->set_mac_pending = 1;
4893 smp_wmb(); 5399 smp_wmb();
4894 5400
4895 bnx2x_set_mac_addr_e1_gen(bp, set, bp->dev->dev_addr, 5401 for (i = 0; i < config_cmd->hdr.length; i++)
4896 (1 << bp->fp->cl_id), (BP_PORT(bp) ? 32 : 0), 5402 SET_FLAG(config_cmd->config_table[i].flags,
4897 1); 5403 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
5404 T_ETH_MAC_COMMAND_INVALIDATE);
5405
5406 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
5407 U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
4898 5408
4899 /* Wait for a completion */ 5409 /* Wait for a completion */
4900 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1); 5410 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending,
5411 ramrod_flags);
5412
4901} 5413}
4902 5414
5415
4903#ifdef BCM_CNIC 5416#ifdef BCM_CNIC
4904/** 5417/**
4905 * Set iSCSI MAC(s) at the next enties in the CAM after the ETH 5418 * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
@@ -4913,65 +5426,181 @@ void bnx2x_set_eth_mac_addr_e1(struct bnx2x *bp, int set)
4913 */ 5426 */
4914int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set) 5427int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
4915{ 5428{
4916 u32 cl_bit_vec = (1 << BCM_ISCSI_ETH_CL_ID); 5429 u8 cam_offset = (CHIP_IS_E1(bp) ? ((BP_PORT(bp) ? 32 : 0) + 2) :
4917 5430 bnx2x_e1h_cam_offset(bp, CAM_ISCSI_ETH_LINE));
4918 bp->set_mac_pending++; 5431 u32 iscsi_l2_cl_id = BNX2X_ISCSI_ETH_CL_ID;
4919 smp_wmb(); 5432 u32 cl_bit_vec = (1 << iscsi_l2_cl_id);
4920 5433
4921 /* Send a SET_MAC ramrod */ 5434 /* Send a SET_MAC ramrod */
4922 if (CHIP_IS_E1(bp)) 5435 bnx2x_set_mac_addr_gen(bp, set, bp->iscsi_mac, cl_bit_vec,
4923 bnx2x_set_mac_addr_e1_gen(bp, set, bp->iscsi_mac, 5436 cam_offset, 0);
4924 cl_bit_vec, (BP_PORT(bp) ? 32 : 0) + 2,
4925 1);
4926 else
4927 /* CAM allocation for E1H
4928 * unicasts: by func number
4929 * multicast: 20+FUNC*20, 20 each
4930 */
4931 bnx2x_set_mac_addr_e1h_gen(bp, set, bp->iscsi_mac,
4932 cl_bit_vec, E1H_FUNC_MAX + BP_FUNC(bp));
4933
4934 /* Wait for a completion when setting */
4935 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
4936
4937 return 0; 5437 return 0;
4938} 5438}
4939#endif 5439#endif
4940 5440
4941int bnx2x_setup_leading(struct bnx2x *bp) 5441static void bnx2x_fill_cl_init_data(struct bnx2x *bp,
4942{ 5442 struct bnx2x_client_init_params *params,
4943 int rc; 5443 u8 activate,
5444 struct client_init_ramrod_data *data)
5445{
5446 /* Clear the buffer */
5447 memset(data, 0, sizeof(*data));
5448
5449 /* general */
5450 data->general.client_id = params->rxq_params.cl_id;
5451 data->general.statistics_counter_id = params->rxq_params.stat_id;
5452 data->general.statistics_en_flg =
5453 (params->rxq_params.flags & QUEUE_FLG_STATS) ? 1 : 0;
5454 data->general.activate_flg = activate;
5455 data->general.sp_client_id = params->rxq_params.spcl_id;
5456
5457 /* Rx data */
5458 data->rx.tpa_en_flg =
5459 (params->rxq_params.flags & QUEUE_FLG_TPA) ? 1 : 0;
5460 data->rx.vmqueue_mode_en_flg = 0;
5461 data->rx.cache_line_alignment_log_size =
5462 params->rxq_params.cache_line_log;
5463 data->rx.enable_dynamic_hc =
5464 (params->rxq_params.flags & QUEUE_FLG_DHC) ? 1 : 0;
5465 data->rx.max_sges_for_packet = params->rxq_params.max_sges_pkt;
5466 data->rx.client_qzone_id = params->rxq_params.cl_qzone_id;
5467 data->rx.max_agg_size = params->rxq_params.tpa_agg_sz;
5468
5469 /* We don't set drop flags */
5470 data->rx.drop_ip_cs_err_flg = 0;
5471 data->rx.drop_tcp_cs_err_flg = 0;
5472 data->rx.drop_ttl0_flg = 0;
5473 data->rx.drop_udp_cs_err_flg = 0;
5474
5475 data->rx.inner_vlan_removal_enable_flg =
5476 (params->rxq_params.flags & QUEUE_FLG_VLAN) ? 1 : 0;
5477 data->rx.outer_vlan_removal_enable_flg =
5478 (params->rxq_params.flags & QUEUE_FLG_OV) ? 1 : 0;
5479 data->rx.status_block_id = params->rxq_params.fw_sb_id;
5480 data->rx.rx_sb_index_number = params->rxq_params.sb_cq_index;
5481 data->rx.bd_buff_size = cpu_to_le16(params->rxq_params.buf_sz);
5482 data->rx.sge_buff_size = cpu_to_le16(params->rxq_params.sge_buf_sz);
5483 data->rx.mtu = cpu_to_le16(params->rxq_params.mtu);
5484 data->rx.bd_page_base.lo =
5485 cpu_to_le32(U64_LO(params->rxq_params.dscr_map));
5486 data->rx.bd_page_base.hi =
5487 cpu_to_le32(U64_HI(params->rxq_params.dscr_map));
5488 data->rx.sge_page_base.lo =
5489 cpu_to_le32(U64_LO(params->rxq_params.sge_map));
5490 data->rx.sge_page_base.hi =
5491 cpu_to_le32(U64_HI(params->rxq_params.sge_map));
5492 data->rx.cqe_page_base.lo =
5493 cpu_to_le32(U64_LO(params->rxq_params.rcq_map));
5494 data->rx.cqe_page_base.hi =
5495 cpu_to_le32(U64_HI(params->rxq_params.rcq_map));
5496 data->rx.is_leading_rss =
5497 (params->ramrod_params.flags & CLIENT_IS_LEADING_RSS) ? 1 : 0;
5498 data->rx.is_approx_mcast = data->rx.is_leading_rss;
5499
5500 /* Tx data */
5501 data->tx.enforce_security_flg = 0; /* VF specific */
5502 data->tx.tx_status_block_id = params->txq_params.fw_sb_id;
5503 data->tx.tx_sb_index_number = params->txq_params.sb_cq_index;
5504 data->tx.mtu = 0; /* VF specific */
5505 data->tx.tx_bd_page_base.lo =
5506 cpu_to_le32(U64_LO(params->txq_params.dscr_map));
5507 data->tx.tx_bd_page_base.hi =
5508 cpu_to_le32(U64_HI(params->txq_params.dscr_map));
5509
5510 /* flow control data */
5511 data->fc.cqe_pause_thr_low = cpu_to_le16(params->pause.rcq_th_lo);
5512 data->fc.cqe_pause_thr_high = cpu_to_le16(params->pause.rcq_th_hi);
5513 data->fc.bd_pause_thr_low = cpu_to_le16(params->pause.bd_th_lo);
5514 data->fc.bd_pause_thr_high = cpu_to_le16(params->pause.bd_th_hi);
5515 data->fc.sge_pause_thr_low = cpu_to_le16(params->pause.sge_th_lo);
5516 data->fc.sge_pause_thr_high = cpu_to_le16(params->pause.sge_th_hi);
5517 data->fc.rx_cos_mask = cpu_to_le16(params->pause.pri_map);
5518
5519 data->fc.safc_group_num = params->txq_params.cos;
5520 data->fc.safc_group_en_flg =
5521 (params->txq_params.flags & QUEUE_FLG_COS) ? 1 : 0;
5522 data->fc.traffic_type = LLFC_TRAFFIC_TYPE_NW;
5523}
5524
5525static inline void bnx2x_set_ctx_validation(struct eth_context *cxt, u32 cid)
5526{
5527 /* ustorm cxt validation */
5528 cxt->ustorm_ag_context.cdu_usage =
5529 CDU_RSRVD_VALUE_TYPE_A(cid, CDU_REGION_NUMBER_UCM_AG,
5530 ETH_CONNECTION_TYPE);
5531 /* xcontext validation */
5532 cxt->xstorm_ag_context.cdu_reserved =
5533 CDU_RSRVD_VALUE_TYPE_A(cid, CDU_REGION_NUMBER_XCM_AG,
5534 ETH_CONNECTION_TYPE);
5535}
5536
5537int bnx2x_setup_fw_client(struct bnx2x *bp,
5538 struct bnx2x_client_init_params *params,
5539 u8 activate,
5540 struct client_init_ramrod_data *data,
5541 dma_addr_t data_mapping)
5542{
5543 u16 hc_usec;
5544 int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
5545 int ramrod_flags = 0, rc;
5546
5547 /* HC and context validation values */
5548 hc_usec = params->txq_params.hc_rate ?
5549 1000000 / params->txq_params.hc_rate : 0;
5550 bnx2x_update_coalesce_sb_index(bp,
5551 params->txq_params.fw_sb_id,
5552 params->txq_params.sb_cq_index,
5553 !(params->txq_params.flags & QUEUE_FLG_HC),
5554 hc_usec);
5555
5556 *(params->ramrod_params.pstate) = BNX2X_FP_STATE_OPENING;
5557
5558 hc_usec = params->rxq_params.hc_rate ?
5559 1000000 / params->rxq_params.hc_rate : 0;
5560 bnx2x_update_coalesce_sb_index(bp,
5561 params->rxq_params.fw_sb_id,
5562 params->rxq_params.sb_cq_index,
5563 !(params->rxq_params.flags & QUEUE_FLG_HC),
5564 hc_usec);
5565
5566 bnx2x_set_ctx_validation(params->rxq_params.cxt,
5567 params->rxq_params.cid);
5568
5569 /* zero stats */
5570 if (params->txq_params.flags & QUEUE_FLG_STATS)
5571 storm_memset_xstats_zero(bp, BP_PORT(bp),
5572 params->txq_params.stat_id);
5573
5574 if (params->rxq_params.flags & QUEUE_FLG_STATS) {
5575 storm_memset_ustats_zero(bp, BP_PORT(bp),
5576 params->rxq_params.stat_id);
5577 storm_memset_tstats_zero(bp, BP_PORT(bp),
5578 params->rxq_params.stat_id);
5579 }
5580
5581 /* Fill the ramrod data */
5582 bnx2x_fill_cl_init_data(bp, params, activate, data);
5583
5584 /* SETUP ramrod.
5585 *
5586 * bnx2x_sp_post() takes a spin_lock thus no other explict memory
5587 * barrier except from mmiowb() is needed to impose a
5588 * proper ordering of memory operations.
5589 */
5590 mmiowb();
4944 5591
4945 /* reset IGU state */
4946 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4947 5592
4948 /* SETUP ramrod */ 5593 bnx2x_sp_post(bp, ramrod, params->ramrod_params.cid,
4949 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0); 5594 U64_HI(data_mapping), U64_LO(data_mapping), 0);
4950 5595
4951 /* Wait for completion */ 5596 /* Wait for completion */
4952 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0); 5597 rc = bnx2x_wait_ramrod(bp, params->ramrod_params.state,
4953 5598 params->ramrod_params.index,
5599 params->ramrod_params.pstate,
5600 ramrod_flags);
4954 return rc; 5601 return rc;
4955} 5602}
4956 5603
4957int bnx2x_setup_multi(struct bnx2x *bp, int index)
4958{
4959 struct bnx2x_fastpath *fp = &bp->fp[index];
4960
4961 /* reset IGU state */
4962 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4963
4964 /* SETUP ramrod */
4965 fp->state = BNX2X_FP_STATE_OPENING;
4966 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
4967 fp->cl_id, 0);
4968
4969 /* Wait for completion */
4970 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
4971 &(fp->state), 0);
4972}
4973
4974
4975void bnx2x_set_num_queues_msix(struct bnx2x *bp) 5604void bnx2x_set_num_queues_msix(struct bnx2x *bp)
4976{ 5605{
4977 5606
@@ -4996,87 +5625,217 @@ void bnx2x_set_num_queues_msix(struct bnx2x *bp)
4996 } 5625 }
4997} 5626}
4998 5627
5628void bnx2x_ilt_set_info(struct bnx2x *bp)
5629{
5630 struct ilt_client_info *ilt_client;
5631 struct bnx2x_ilt *ilt = BP_ILT(bp);
5632 u16 line = 0;
5633
5634 ilt->start_line = FUNC_ILT_BASE(BP_FUNC(bp));
5635 DP(BNX2X_MSG_SP, "ilt starts at line %d\n", ilt->start_line);
5636
5637 /* CDU */
5638 ilt_client = &ilt->clients[ILT_CLIENT_CDU];
5639 ilt_client->client_num = ILT_CLIENT_CDU;
5640 ilt_client->page_size = CDU_ILT_PAGE_SZ;
5641 ilt_client->flags = ILT_CLIENT_SKIP_MEM;
5642 ilt_client->start = line;
5643 line += L2_ILT_LINES(bp);
5644#ifdef BCM_CNIC
5645 line += CNIC_ILT_LINES;
5646#endif
5647 ilt_client->end = line - 1;
5648
5649 DP(BNX2X_MSG_SP, "ilt client[CDU]: start %d, end %d, psz 0x%x, "
5650 "flags 0x%x, hw psz %d\n",
5651 ilt_client->start,
5652 ilt_client->end,
5653 ilt_client->page_size,
5654 ilt_client->flags,
5655 ilog2(ilt_client->page_size >> 12));
5656
5657 /* QM */
5658 if (QM_INIT(bp->qm_cid_count)) {
5659 ilt_client = &ilt->clients[ILT_CLIENT_QM];
5660 ilt_client->client_num = ILT_CLIENT_QM;
5661 ilt_client->page_size = QM_ILT_PAGE_SZ;
5662 ilt_client->flags = 0;
5663 ilt_client->start = line;
5664
5665 /* 4 bytes for each cid */
5666 line += DIV_ROUND_UP(bp->qm_cid_count * QM_QUEUES_PER_FUNC * 4,
5667 QM_ILT_PAGE_SZ);
5668
5669 ilt_client->end = line - 1;
5670
5671 DP(BNX2X_MSG_SP, "ilt client[QM]: start %d, end %d, psz 0x%x, "
5672 "flags 0x%x, hw psz %d\n",
5673 ilt_client->start,
5674 ilt_client->end,
5675 ilt_client->page_size,
5676 ilt_client->flags,
5677 ilog2(ilt_client->page_size >> 12));
5678
5679 }
5680 /* SRC */
5681 ilt_client = &ilt->clients[ILT_CLIENT_SRC];
5682#ifdef BCM_CNIC
5683 ilt_client->client_num = ILT_CLIENT_SRC;
5684 ilt_client->page_size = SRC_ILT_PAGE_SZ;
5685 ilt_client->flags = 0;
5686 ilt_client->start = line;
5687 line += SRC_ILT_LINES;
5688 ilt_client->end = line - 1;
5689
5690 DP(BNX2X_MSG_SP, "ilt client[SRC]: start %d, end %d, psz 0x%x, "
5691 "flags 0x%x, hw psz %d\n",
5692 ilt_client->start,
5693 ilt_client->end,
5694 ilt_client->page_size,
5695 ilt_client->flags,
5696 ilog2(ilt_client->page_size >> 12));
5697
5698#else
5699 ilt_client->flags = (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM);
5700#endif
4999 5701
5702 /* TM */
5703 ilt_client = &ilt->clients[ILT_CLIENT_TM];
5704#ifdef BCM_CNIC
5705 ilt_client->client_num = ILT_CLIENT_TM;
5706 ilt_client->page_size = TM_ILT_PAGE_SZ;
5707 ilt_client->flags = 0;
5708 ilt_client->start = line;
5709 line += TM_ILT_LINES;
5710 ilt_client->end = line - 1;
5711
5712 DP(BNX2X_MSG_SP, "ilt client[TM]: start %d, end %d, psz 0x%x, "
5713 "flags 0x%x, hw psz %d\n",
5714 ilt_client->start,
5715 ilt_client->end,
5716 ilt_client->page_size,
5717 ilt_client->flags,
5718 ilog2(ilt_client->page_size >> 12));
5000 5719
5001static int bnx2x_stop_multi(struct bnx2x *bp, int index) 5720#else
5721 ilt_client->flags = (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM);
5722#endif
5723}
5724int bnx2x_setup_client(struct bnx2x *bp, struct bnx2x_fastpath *fp,
5725 int is_leading)
5002{ 5726{
5003 struct bnx2x_fastpath *fp = &bp->fp[index]; 5727 struct bnx2x_client_init_params params = { {0} };
5004 int rc; 5728 int rc;
5005 5729
5006 /* halt the connection */ 5730 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0,
5007 fp->state = BNX2X_FP_STATE_HALTING; 5731 IGU_INT_ENABLE, 0);
5008 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
5009 5732
5010 /* Wait for completion */ 5733 params.ramrod_params.pstate = &fp->state;
5011 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index, 5734 params.ramrod_params.state = BNX2X_FP_STATE_OPEN;
5012 &(fp->state), 1); 5735 params.ramrod_params.index = fp->index;
5013 if (rc) /* timeout */ 5736 params.ramrod_params.cid = fp->cid;
5014 return rc;
5015 5737
5016 /* delete cfc entry */ 5738 if (is_leading)
5017 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1); 5739 params.ramrod_params.flags |= CLIENT_IS_LEADING_RSS;
5018 5740
5019 /* Wait for completion */ 5741 bnx2x_pf_rx_cl_prep(bp, fp, &params.pause, &params.rxq_params);
5020 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index, 5742
5021 &(fp->state), 1); 5743 bnx2x_pf_tx_cl_prep(bp, fp, &params.txq_params);
5744
5745 rc = bnx2x_setup_fw_client(bp, &params, 1,
5746 bnx2x_sp(bp, client_init_data),
5747 bnx2x_sp_mapping(bp, client_init_data));
5022 return rc; 5748 return rc;
5023} 5749}
5024 5750
5025static int bnx2x_stop_leading(struct bnx2x *bp) 5751int bnx2x_stop_fw_client(struct bnx2x *bp, struct bnx2x_client_ramrod_params *p)
5026{ 5752{
5027 __le16 dsb_sp_prod_idx;
5028 /* if the other port is handling traffic,
5029 this can take a lot of time */
5030 int cnt = 500;
5031 int rc; 5753 int rc;
5032 5754
5033 might_sleep(); 5755 int poll_flag = p->poll ? WAIT_RAMROD_POLL : 0;
5034 5756
5035 /* Send HALT ramrod */ 5757 /* halt the connection */
5036 bp->fp[0].state = BNX2X_FP_STATE_HALTING; 5758 *p->pstate = BNX2X_FP_STATE_HALTING;
5037 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0); 5759 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, p->cid, 0,
5760 p->cl_id, 0);
5038 5761
5039 /* Wait for completion */ 5762 /* Wait for completion */
5040 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0, 5763 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, p->index,
5041 &(bp->fp[0].state), 1); 5764 p->pstate, poll_flag);
5042 if (rc) /* timeout */ 5765 if (rc) /* timeout */
5043 return rc; 5766 return rc;
5044 5767
5045 dsb_sp_prod_idx = *bp->dsb_sp_prod; 5768 *p->pstate = BNX2X_FP_STATE_TERMINATING;
5769 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_TERMINATE, p->cid, 0,
5770 p->cl_id, 0);
5771 /* Wait for completion */
5772 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_TERMINATED, p->index,
5773 p->pstate, poll_flag);
5774 if (rc) /* timeout */
5775 return rc;
5046 5776
5047 /* Send PORT_DELETE ramrod */
5048 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
5049 5777
5050 /* Wait for completion to arrive on default status block 5778 /* delete cfc entry */
5051 we are going to reset the chip anyway 5779 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_CFC_DEL, p->cid, 0, 0, 1);
5052 so there is not much to do if this times out
5053 */
5054 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
5055 if (!cnt) {
5056 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
5057 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
5058 *bp->dsb_sp_prod, dsb_sp_prod_idx);
5059#ifdef BNX2X_STOP_ON_ERROR
5060 bnx2x_panic();
5061#endif
5062 rc = -EBUSY;
5063 break;
5064 }
5065 cnt--;
5066 msleep(1);
5067 rmb(); /* Refresh the dsb_sp_prod */
5068 }
5069 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
5070 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
5071 5780
5781 /* Wait for completion */
5782 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, p->index,
5783 p->pstate, WAIT_RAMROD_COMMON);
5072 return rc; 5784 return rc;
5073} 5785}
5074 5786
5787static int bnx2x_stop_client(struct bnx2x *bp, int index)
5788{
5789 struct bnx2x_client_ramrod_params client_stop = {0};
5790 struct bnx2x_fastpath *fp = &bp->fp[index];
5791
5792 client_stop.index = index;
5793 client_stop.cid = fp->cid;
5794 client_stop.cl_id = fp->cl_id;
5795 client_stop.pstate = &(fp->state);
5796 client_stop.poll = 0;
5797
5798 return bnx2x_stop_fw_client(bp, &client_stop);
5799}
5800
5801
5075static void bnx2x_reset_func(struct bnx2x *bp) 5802static void bnx2x_reset_func(struct bnx2x *bp)
5076{ 5803{
5077 int port = BP_PORT(bp); 5804 int port = BP_PORT(bp);
5078 int func = BP_FUNC(bp); 5805 int func = BP_FUNC(bp);
5079 int base, i; 5806 int base, i;
5807 int pfunc_offset_fp = offsetof(struct hc_sb_data, p_func) +
5808 offsetof(struct hc_status_block_data_e1x, common);
5809 int pfunc_offset_sp = offsetof(struct hc_sp_status_block_data, p_func);
5810 int pfid_offset = offsetof(struct pci_entity, pf_id);
5811
5812 /* Disable the function in the FW */
5813 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(func), 0);
5814 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(func), 0);
5815 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(func), 0);
5816 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(func), 0);
5817
5818 /* FP SBs */
5819 for_each_queue(bp, i) {
5820 struct bnx2x_fastpath *fp = &bp->fp[i];
5821 REG_WR8(bp,
5822 BAR_CSTRORM_INTMEM +
5823 CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id)
5824 + pfunc_offset_fp + pfid_offset,
5825 HC_FUNCTION_DISABLED);
5826 }
5827
5828 /* SP SB */
5829 REG_WR8(bp,
5830 BAR_CSTRORM_INTMEM +
5831 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
5832 pfunc_offset_sp + pfid_offset,
5833 HC_FUNCTION_DISABLED);
5834
5835
5836 for (i = 0; i < XSTORM_SPQ_DATA_SIZE / 4; i++)
5837 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_DATA_OFFSET(func),
5838 0);
5080 5839
5081 /* Configure IGU */ 5840 /* Configure IGU */
5082 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0); 5841 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
@@ -5099,6 +5858,8 @@ static void bnx2x_reset_func(struct bnx2x *bp)
5099 base = FUNC_ILT_BASE(func); 5858 base = FUNC_ILT_BASE(func);
5100 for (i = base; i < base + ILT_PER_FUNC; i++) 5859 for (i = base; i < base + ILT_PER_FUNC; i++)
5101 bnx2x_ilt_wr(bp, i, 0); 5860 bnx2x_ilt_wr(bp, i, 0);
5861
5862 bp->dmae_ready = 0;
5102} 5863}
5103 5864
5104static void bnx2x_reset_port(struct bnx2x *bp) 5865static void bnx2x_reset_port(struct bnx2x *bp)
@@ -5167,7 +5928,6 @@ void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
5167 cnt = 1000; 5928 cnt = 1000;
5168 while (bnx2x_has_tx_work_unload(fp)) { 5929 while (bnx2x_has_tx_work_unload(fp)) {
5169 5930
5170 bnx2x_tx_int(fp);
5171 if (!cnt) { 5931 if (!cnt) {
5172 BNX2X_ERR("timeout waiting for queue[%d]\n", 5932 BNX2X_ERR("timeout waiting for queue[%d]\n",
5173 i); 5933 i);
@@ -5186,39 +5946,21 @@ void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
5186 msleep(1); 5946 msleep(1);
5187 5947
5188 if (CHIP_IS_E1(bp)) { 5948 if (CHIP_IS_E1(bp)) {
5189 struct mac_configuration_cmd *config = 5949 /* invalidate mc list,
5190 bnx2x_sp(bp, mcast_config); 5950 * wait and poll (interrupts are off)
5191 5951 */
5192 bnx2x_set_eth_mac_addr_e1(bp, 0); 5952 bnx2x_invlidate_e1_mc_list(bp);
5193 5953 bnx2x_set_eth_mac(bp, 0);
5194 for (i = 0; i < config->hdr.length; i++)
5195 CAM_INVALIDATE(config->config_table[i]);
5196
5197 config->hdr.length = i;
5198 if (CHIP_REV_IS_SLOW(bp))
5199 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
5200 else
5201 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
5202 config->hdr.client_id = bp->fp->cl_id;
5203 config->hdr.reserved1 = 0;
5204
5205 bp->set_mac_pending++;
5206 smp_wmb();
5207
5208 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
5209 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
5210 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
5211 5954
5212 } else { /* E1H */ 5955 } else {
5213 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0); 5956 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
5214 5957
5215 bnx2x_set_eth_mac_addr_e1h(bp, 0); 5958 bnx2x_set_eth_mac(bp, 0);
5216 5959
5217 for (i = 0; i < MC_HASH_SIZE; i++) 5960 for (i = 0; i < MC_HASH_SIZE; i++)
5218 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0); 5961 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
5219
5220 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
5221 } 5962 }
5963
5222#ifdef BCM_CNIC 5964#ifdef BCM_CNIC
5223 /* Clear iSCSI L2 MAC */ 5965 /* Clear iSCSI L2 MAC */
5224 mutex_lock(&bp->cnic_mutex); 5966 mutex_lock(&bp->cnic_mutex);
@@ -5257,21 +5999,27 @@ void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
5257 5999
5258 /* Close multi and leading connections 6000 /* Close multi and leading connections
5259 Completions for ramrods are collected in a synchronous way */ 6001 Completions for ramrods are collected in a synchronous way */
5260 for_each_nondefault_queue(bp, i) 6002 for_each_queue(bp, i)
5261 if (bnx2x_stop_multi(bp, i)) 6003
6004 if (bnx2x_stop_client(bp, i))
6005#ifdef BNX2X_STOP_ON_ERROR
6006 return;
6007#else
5262 goto unload_error; 6008 goto unload_error;
6009#endif
5263 6010
5264 rc = bnx2x_stop_leading(bp); 6011 rc = bnx2x_func_stop(bp);
5265 if (rc) { 6012 if (rc) {
5266 BNX2X_ERR("Stop leading failed!\n"); 6013 BNX2X_ERR("Function stop failed!\n");
5267#ifdef BNX2X_STOP_ON_ERROR 6014#ifdef BNX2X_STOP_ON_ERROR
5268 return -EBUSY; 6015 return;
5269#else 6016#else
5270 goto unload_error; 6017 goto unload_error;
5271#endif 6018#endif
5272 } 6019 }
5273 6020#ifndef BNX2X_STOP_ON_ERROR
5274unload_error: 6021unload_error:
6022#endif
5275 if (!BP_NOMCP(bp)) 6023 if (!BP_NOMCP(bp))
5276 reset_code = bnx2x_fw_command(bp, reset_code, 0); 6024 reset_code = bnx2x_fw_command(bp, reset_code, 0);
5277 else { 6025 else {
@@ -5293,6 +6041,12 @@ unload_error:
5293 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT)) 6041 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
5294 bnx2x__link_reset(bp); 6042 bnx2x__link_reset(bp);
5295 6043
6044 /* Disable HW interrupts, NAPI */
6045 bnx2x_netif_stop(bp, 1);
6046
6047 /* Release IRQs */
6048 bnx2x_free_irq(bp, false);
6049
5296 /* Reset the chip */ 6050 /* Reset the chip */
5297 bnx2x_reset_chip(bp, reset_code); 6051 bnx2x_reset_chip(bp, reset_code);
5298 6052
@@ -5953,6 +6707,18 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
5953 bp->link_params.chip_id = bp->common.chip_id; 6707 bp->link_params.chip_id = bp->common.chip_id;
5954 BNX2X_DEV_INFO("chip ID is 0x%x\n", id); 6708 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
5955 6709
6710 bp->common.chip_port_mode = CHIP_PORT_MODE_NONE; /* N/A */
6711
6712 /* Set doorbell size */
6713 bp->db_size = (1 << BNX2X_DB_SHIFT);
6714
6715 /*
6716 * set base FW non-default (fast path) status block id, this value is
6717 * used to initialize the fw_sb_id saved on the fp/queue structure to
6718 * determine the id used by the FW.
6719 */
6720 bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E1x;
6721
5956 val = (REG_RD(bp, 0x2874) & 0x55); 6722 val = (REG_RD(bp, 0x2874) & 0x55);
5957 if ((bp->common.chip_id & 0x1) || 6723 if ((bp->common.chip_id & 0x1) ||
5958 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) { 6724 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
@@ -6417,13 +7183,23 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
6417 7183
6418 bnx2x_get_common_hwinfo(bp); 7184 bnx2x_get_common_hwinfo(bp);
6419 7185
7186 bp->common.int_block = INT_BLOCK_HC;
7187
7188 bp->igu_dsb_id = DEF_SB_IGU_ID;
7189 bp->igu_base_sb = 0;
7190 bp->igu_sb_cnt = min_t(u8, FP_SB_MAX_E1x, bp->l2_cid_count);
7191
6420 bp->e1hov = 0; 7192 bp->e1hov = 0;
6421 bp->e1hmf = 0; 7193 bp->e1hmf = 0;
6422 if (CHIP_IS_E1H(bp) && !BP_NOMCP(bp)) { 7194 if (CHIP_IS_E1H(bp) && !BP_NOMCP(bp)) {
7195
7196 bp->common.mf_cfg_base = bp->common.shmem_base +
7197 offsetof(struct shmem_region, func_mb) +
7198 E1H_FUNC_MAX * sizeof(struct drv_func_mb);
6423 bp->mf_config = 7199 bp->mf_config =
6424 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config); 7200 MF_CFG_RD(bp, func_mf_config[func].config);
6425 7201
6426 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[FUNC_0].e1hov_tag) & 7202 val = (MF_CFG_RD(bp, func_mf_config[FUNC_0].e1hov_tag) &
6427 FUNC_MF_CFG_E1HOV_TAG_MASK); 7203 FUNC_MF_CFG_E1HOV_TAG_MASK);
6428 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) 7204 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
6429 bp->e1hmf = 1; 7205 bp->e1hmf = 1;
@@ -6431,7 +7207,7 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
6431 IS_E1HMF(bp) ? "multi" : "single"); 7207 IS_E1HMF(bp) ? "multi" : "single");
6432 7208
6433 if (IS_E1HMF(bp)) { 7209 if (IS_E1HMF(bp)) {
6434 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func]. 7210 val = (MF_CFG_RD(bp, func_mf_config[func].
6435 e1hov_tag) & 7211 e1hov_tag) &
6436 FUNC_MF_CFG_E1HOV_TAG_MASK); 7212 FUNC_MF_CFG_E1HOV_TAG_MASK);
6437 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) { 7213 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
@@ -6453,6 +7229,10 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
6453 } 7229 }
6454 } 7230 }
6455 7231
7232 /* adjust igu_sb_cnt to MF */
7233 if (IS_E1HMF(bp))
7234 bp->igu_sb_cnt /= E1HVN_MAX;
7235
6456 if (!BP_NOMCP(bp)) { 7236 if (!BP_NOMCP(bp)) {
6457 bnx2x_get_port_hwinfo(bp); 7237 bnx2x_get_port_hwinfo(bp);
6458 7238
@@ -6462,8 +7242,8 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
6462 } 7242 }
6463 7243
6464 if (IS_E1HMF(bp)) { 7244 if (IS_E1HMF(bp)) {
6465 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper); 7245 val2 = MF_CFG_RD(bp, func_mf_config[func].mac_upper);
6466 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower); 7246 val = MF_CFG_RD(bp, func_mf_config[func].mac_lower);
6467 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) && 7247 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
6468 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) { 7248 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
6469 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff); 7249 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
@@ -6577,6 +7357,9 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp)
6577 7357
6578 rc = bnx2x_get_hwinfo(bp); 7358 rc = bnx2x_get_hwinfo(bp);
6579 7359
7360 if (!rc)
7361 rc = bnx2x_alloc_mem_bp(bp);
7362
6580 bnx2x_read_fwinfo(bp); 7363 bnx2x_read_fwinfo(bp);
6581 /* need to reset chip if undi was active */ 7364 /* need to reset chip if undi was active */
6582 if (!BP_NOMCP(bp)) 7365 if (!BP_NOMCP(bp))
@@ -6623,8 +7406,8 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp)
6623 bp->rx_csum = 1; 7406 bp->rx_csum = 1;
6624 7407
6625 /* make sure that the numbers are in the right granularity */ 7408 /* make sure that the numbers are in the right granularity */
6626 bp->tx_ticks = (50 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR); 7409 bp->tx_ticks = (50 / BNX2X_BTR) * BNX2X_BTR;
6627 bp->rx_ticks = (25 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR); 7410 bp->rx_ticks = (25 / BNX2X_BTR) * BNX2X_BTR;
6628 7411
6629 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ); 7412 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
6630 bp->current_interval = (poll ? poll : timer_interval); 7413 bp->current_interval = (poll ? poll : timer_interval);
@@ -6724,73 +7507,16 @@ void bnx2x_set_rx_mode(struct net_device *dev)
6724 7507
6725 else { /* some multicasts */ 7508 else { /* some multicasts */
6726 if (CHIP_IS_E1(bp)) { 7509 if (CHIP_IS_E1(bp)) {
6727 int i, old, offset; 7510 /*
6728 struct netdev_hw_addr *ha; 7511 * set mc list, do not wait as wait implies sleep
6729 struct mac_configuration_cmd *config = 7512 * and set_rx_mode can be invoked from non-sleepable
6730 bnx2x_sp(bp, mcast_config); 7513 * context
6731 7514 */
6732 i = 0; 7515 u8 offset = (CHIP_REV_IS_SLOW(bp) ?
6733 netdev_for_each_mc_addr(ha, dev) { 7516 BNX2X_MAX_EMUL_MULTI*(1 + port) :
6734 config->config_table[i]. 7517 BNX2X_MAX_MULTICAST*(1 + port));
6735 cam_entry.msb_mac_addr =
6736 swab16(*(u16 *)&ha->addr[0]);
6737 config->config_table[i].
6738 cam_entry.middle_mac_addr =
6739 swab16(*(u16 *)&ha->addr[2]);
6740 config->config_table[i].
6741 cam_entry.lsb_mac_addr =
6742 swab16(*(u16 *)&ha->addr[4]);
6743 config->config_table[i].cam_entry.flags =
6744 cpu_to_le16(port);
6745 config->config_table[i].
6746 target_table_entry.flags = 0;
6747 config->config_table[i].target_table_entry.
6748 clients_bit_vector =
6749 cpu_to_le32(1 << BP_L_ID(bp));
6750 config->config_table[i].
6751 target_table_entry.vlan_id = 0;
6752
6753 DP(NETIF_MSG_IFUP,
6754 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
6755 config->config_table[i].
6756 cam_entry.msb_mac_addr,
6757 config->config_table[i].
6758 cam_entry.middle_mac_addr,
6759 config->config_table[i].
6760 cam_entry.lsb_mac_addr);
6761 i++;
6762 }
6763 old = config->hdr.length;
6764 if (old > i) {
6765 for (; i < old; i++) {
6766 if (CAM_IS_INVALID(config->
6767 config_table[i])) {
6768 /* already invalidated */
6769 break;
6770 }
6771 /* invalidate */
6772 CAM_INVALIDATE(config->
6773 config_table[i]);
6774 }
6775 }
6776
6777 if (CHIP_REV_IS_SLOW(bp))
6778 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
6779 else
6780 offset = BNX2X_MAX_MULTICAST*(1 + port);
6781
6782 config->hdr.length = i;
6783 config->hdr.offset = offset;
6784 config->hdr.client_id = bp->fp->cl_id;
6785 config->hdr.reserved1 = 0;
6786
6787 bp->set_mac_pending++;
6788 smp_wmb();
6789 7518
6790 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0, 7519 bnx2x_set_e1_mc_list(bp, offset);
6791 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
6792 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
6793 0);
6794 } else { /* E1H */ 7520 } else { /* E1H */
6795 /* Accept one or more multicasts */ 7521 /* Accept one or more multicasts */
6796 struct netdev_hw_addr *ha; 7522 struct netdev_hw_addr *ha;
@@ -6802,9 +7528,10 @@ void bnx2x_set_rx_mode(struct net_device *dev)
6802 7528
6803 netdev_for_each_mc_addr(ha, dev) { 7529 netdev_for_each_mc_addr(ha, dev) {
6804 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n", 7530 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
6805 ha->addr); 7531 bnx2x_mc_addr(ha));
6806 7532
6807 crc = crc32c_le(0, ha->addr, ETH_ALEN); 7533 crc = crc32c_le(0, bnx2x_mc_addr(ha),
7534 ETH_ALEN);
6808 bit = (crc >> 24) & 0xff; 7535 bit = (crc >> 24) & 0xff;
6809 regidx = bit >> 5; 7536 regidx = bit >> 5;
6810 bit &= 0x1f; 7537 bit &= 0x1f;
@@ -6817,6 +7544,7 @@ void bnx2x_set_rx_mode(struct net_device *dev)
6817 } 7544 }
6818 } 7545 }
6819 7546
7547
6820 bp->rx_mode = rx_mode; 7548 bp->rx_mode = rx_mode;
6821 bnx2x_set_storm_rx_mode(bp); 7549 bnx2x_set_storm_rx_mode(bp);
6822} 7550}
@@ -7003,7 +7731,7 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
7003 } 7731 }
7004 7732
7005 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2), 7733 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
7006 min_t(u64, BNX2X_DB_SIZE, 7734 min_t(u64, BNX2X_DB_SIZE(bp),
7007 pci_resource_len(pdev, 2))); 7735 pci_resource_len(pdev, 2)));
7008 if (!bp->doorbells) { 7736 if (!bp->doorbells) {
7009 dev_err(&bp->pdev->dev, 7737 dev_err(&bp->pdev->dev,
@@ -7179,6 +7907,30 @@ static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
7179 } 7907 }
7180} 7908}
7181 7909
7910/**
7911 * IRO array is stored in the following format:
7912 * {base(24bit), m1(16bit), m2(16bit), m3(16bit), size(16bit) }
7913 */
7914static inline void bnx2x_prep_iro(const u8 *_source, u8 *_target, u32 n)
7915{
7916 const __be32 *source = (const __be32 *)_source;
7917 struct iro *target = (struct iro *)_target;
7918 u32 i, j, tmp;
7919
7920 for (i = 0, j = 0; i < n/sizeof(struct iro); i++) {
7921 target[i].base = be32_to_cpu(source[j]);
7922 j++;
7923 tmp = be32_to_cpu(source[j]);
7924 target[i].m1 = (tmp >> 16) & 0xffff;
7925 target[i].m2 = tmp & 0xffff;
7926 j++;
7927 tmp = be32_to_cpu(source[j]);
7928 target[i].m3 = (tmp >> 16) & 0xffff;
7929 target[i].size = tmp & 0xffff;
7930 j++;
7931 }
7932}
7933
7182static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n) 7934static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
7183{ 7935{
7184 const __be16 *source = (const __be16 *)_source; 7936 const __be16 *source = (const __be16 *)_source;
@@ -7260,9 +8012,13 @@ int bnx2x_init_firmware(struct bnx2x *bp)
7260 be32_to_cpu(fw_hdr->csem_int_table_data.offset); 8012 be32_to_cpu(fw_hdr->csem_int_table_data.offset);
7261 INIT_CSEM_PRAM_DATA(bp) = bp->firmware->data + 8013 INIT_CSEM_PRAM_DATA(bp) = bp->firmware->data +
7262 be32_to_cpu(fw_hdr->csem_pram_data.offset); 8014 be32_to_cpu(fw_hdr->csem_pram_data.offset);
8015 /* IRO */
8016 BNX2X_ALLOC_AND_SET(iro_arr, iro_alloc_err, bnx2x_prep_iro);
7263 8017
7264 return 0; 8018 return 0;
7265 8019
8020iro_alloc_err:
8021 kfree(bp->init_ops_offsets);
7266init_offsets_alloc_err: 8022init_offsets_alloc_err:
7267 kfree(bp->init_ops); 8023 kfree(bp->init_ops);
7268init_ops_alloc_err: 8024init_ops_alloc_err:
@@ -7273,17 +8029,27 @@ request_firmware_exit:
7273 return rc; 8029 return rc;
7274} 8030}
7275 8031
8032static inline int bnx2x_set_qm_cid_count(struct bnx2x *bp, int l2_cid_count)
8033{
8034 int cid_count = L2_FP_COUNT(l2_cid_count);
7276 8035
8036#ifdef BCM_CNIC
8037 cid_count += CNIC_CID_MAX;
8038#endif
8039 return roundup(cid_count, QM_CID_ROUND);
8040}
7277static int __devinit bnx2x_init_one(struct pci_dev *pdev, 8041static int __devinit bnx2x_init_one(struct pci_dev *pdev,
7278 const struct pci_device_id *ent) 8042 const struct pci_device_id *ent)
7279{ 8043{
7280 struct net_device *dev = NULL; 8044 struct net_device *dev = NULL;
7281 struct bnx2x *bp; 8045 struct bnx2x *bp;
7282 int pcie_width, pcie_speed; 8046 int pcie_width, pcie_speed;
7283 int rc; 8047 int rc, cid_count;
8048
8049 cid_count = FP_SB_MAX_E1x + CNIC_CONTEXT_USE;
7284 8050
7285 /* dev zeroed in init_etherdev */ 8051 /* dev zeroed in init_etherdev */
7286 dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT); 8052 dev = alloc_etherdev_mq(sizeof(*bp), cid_count);
7287 if (!dev) { 8053 if (!dev) {
7288 dev_err(&pdev->dev, "Cannot allocate net device\n"); 8054 dev_err(&pdev->dev, "Cannot allocate net device\n");
7289 return -ENOMEM; 8055 return -ENOMEM;
@@ -7294,6 +8060,8 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
7294 8060
7295 pci_set_drvdata(pdev, dev); 8061 pci_set_drvdata(pdev, dev);
7296 8062
8063 bp->l2_cid_count = cid_count;
8064
7297 rc = bnx2x_init_dev(pdev, dev); 8065 rc = bnx2x_init_dev(pdev, dev);
7298 if (rc < 0) { 8066 if (rc < 0) {
7299 free_netdev(dev); 8067 free_netdev(dev);
@@ -7304,6 +8072,9 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
7304 if (rc) 8072 if (rc)
7305 goto init_one_exit; 8073 goto init_one_exit;
7306 8074
8075 /* calc qm_cid_count */
8076 bp->qm_cid_count = bnx2x_set_qm_cid_count(bp, cid_count);
8077
7307 rc = register_netdev(dev); 8078 rc = register_netdev(dev);
7308 if (rc) { 8079 if (rc) {
7309 dev_err(&pdev->dev, "Cannot register net device\n"); 8080 dev_err(&pdev->dev, "Cannot register net device\n");
@@ -7360,6 +8131,8 @@ static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
7360 if (bp->doorbells) 8131 if (bp->doorbells)
7361 iounmap(bp->doorbells); 8132 iounmap(bp->doorbells);
7362 8133
8134 bnx2x_free_mem_bp(bp);
8135
7363 free_netdev(dev); 8136 free_netdev(dev);
7364 8137
7365 if (atomic_read(&pdev->enable_cnt) == 1) 8138 if (atomic_read(&pdev->enable_cnt) == 1)
@@ -7387,16 +8160,9 @@ static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
7387 /* Release IRQs */ 8160 /* Release IRQs */
7388 bnx2x_free_irq(bp, false); 8161 bnx2x_free_irq(bp, false);
7389 8162
7390 if (CHIP_IS_E1(bp)) {
7391 struct mac_configuration_cmd *config =
7392 bnx2x_sp(bp, mcast_config);
7393
7394 for (i = 0; i < config->hdr.length; i++)
7395 CAM_INVALIDATE(config->config_table[i]);
7396 }
7397
7398 /* Free SKBs, SGEs, TPA pool and driver internals */ 8163 /* Free SKBs, SGEs, TPA pool and driver internals */
7399 bnx2x_free_skbs(bp); 8164 bnx2x_free_skbs(bp);
8165
7400 for_each_queue(bp, i) 8166 for_each_queue(bp, i)
7401 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); 8167 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
7402 for_each_queue(bp, i) 8168 for_each_queue(bp, i)
@@ -7641,8 +8407,8 @@ static int bnx2x_cnic_sp_queue(struct net_device *dev,
7641 8407
7642 DP(NETIF_MSG_TIMER, "L5 SPQE %x %x %x:%x pos %d\n", 8408 DP(NETIF_MSG_TIMER, "L5 SPQE %x %x %x:%x pos %d\n",
7643 spe->hdr.conn_and_cmd_data, spe->hdr.type, 8409 spe->hdr.conn_and_cmd_data, spe->hdr.type,
7644 spe->data.mac_config_addr.hi, 8410 spe->data.update_data_addr.hi,
7645 spe->data.mac_config_addr.lo, 8411 spe->data.update_data_addr.lo,
7646 bp->cnic_kwq_pending); 8412 bp->cnic_kwq_pending);
7647 8413
7648 if (bp->cnic_kwq_prod == bp->cnic_kwq_last) 8414 if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
@@ -7736,8 +8502,24 @@ static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
7736 case DRV_CTL_START_L2_CMD: { 8502 case DRV_CTL_START_L2_CMD: {
7737 u32 cli = ctl->data.ring.client_id; 8503 u32 cli = ctl->data.ring.client_id;
7738 8504
7739 bp->rx_mode_cl_mask |= (1 << cli); 8505 /* Set iSCSI MAC address */
7740 bnx2x_set_storm_rx_mode(bp); 8506 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
8507
8508 mmiowb();
8509 barrier();
8510
8511 /* Start accepting on iSCSI L2 ring. Accept all multicasts
8512 * because it's the only way for UIO Client to accept
8513 * multicasts (in non-promiscuous mode only one Client per
8514 * function will receive multicast packets (leading in our
8515 * case).
8516 */
8517 bnx2x_rxq_set_mac_filters(bp, cli,
8518 BNX2X_ACCEPT_UNICAST |
8519 BNX2X_ACCEPT_BROADCAST |
8520 BNX2X_ACCEPT_ALL_MULTICAST);
8521 storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp));
8522
7741 break; 8523 break;
7742 } 8524 }
7743 8525
@@ -7745,8 +8527,15 @@ static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
7745 case DRV_CTL_STOP_L2_CMD: { 8527 case DRV_CTL_STOP_L2_CMD: {
7746 u32 cli = ctl->data.ring.client_id; 8528 u32 cli = ctl->data.ring.client_id;
7747 8529
7748 bp->rx_mode_cl_mask &= ~(1 << cli); 8530 /* Stop accepting on iSCSI L2 ring */
7749 bnx2x_set_storm_rx_mode(bp); 8531 bnx2x_rxq_set_mac_filters(bp, cli, BNX2X_ACCEPT_NONE);
8532 storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp));
8533
8534 mmiowb();
8535 barrier();
8536
8537 /* Unset iSCSI L2 MAC */
8538 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
7750 break; 8539 break;
7751 } 8540 }
7752 8541
@@ -7770,10 +8559,12 @@ void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
7770 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX; 8559 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
7771 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX; 8560 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
7772 } 8561 }
7773 cp->irq_arr[0].status_blk = bp->cnic_sb; 8562 cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e1x_sb;
7774 cp->irq_arr[0].status_blk_num = CNIC_SB_ID(bp); 8563 cp->irq_arr[0].status_blk_num = CNIC_SB_ID(bp);
8564 cp->irq_arr[0].status_blk_num2 = CNIC_IGU_SB_ID(bp);
7775 cp->irq_arr[1].status_blk = bp->def_status_blk; 8565 cp->irq_arr[1].status_blk = bp->def_status_blk;
7776 cp->irq_arr[1].status_blk_num = DEF_SB_ID; 8566 cp->irq_arr[1].status_blk_num = DEF_SB_ID;
8567 cp->irq_arr[1].status_blk_num2 = DEF_SB_IGU_ID;
7777 8568
7778 cp->num_irq = 2; 8569 cp->num_irq = 2;
7779} 8570}
@@ -7805,8 +8596,11 @@ static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
7805 8596
7806 cp->num_irq = 0; 8597 cp->num_irq = 0;
7807 cp->drv_state = CNIC_DRV_STATE_REGD; 8598 cp->drv_state = CNIC_DRV_STATE_REGD;
8599 cp->iro_arr = bp->iro_arr;
7808 8600
7809 bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping, CNIC_SB_ID(bp)); 8601 bnx2x_init_sb(bp, bp->cnic_sb_mapping,
8602 BNX2X_VF_ID_INVALID, false,
8603 CNIC_SB_ID(bp), CNIC_IGU_SB_ID(bp));
7810 8604
7811 bnx2x_setup_cnic_irq_info(bp); 8605 bnx2x_setup_cnic_irq_info(bp);
7812 bnx2x_set_iscsi_eth_mac_addr(bp, 1); 8606 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
@@ -7847,7 +8641,7 @@ struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
7847 cp->io_base = bp->regview; 8641 cp->io_base = bp->regview;
7848 cp->io_base2 = bp->doorbells; 8642 cp->io_base2 = bp->doorbells;
7849 cp->max_kwqe_pending = 8; 8643 cp->max_kwqe_pending = 8;
7850 cp->ctx_blk_size = CNIC_CTX_PER_ILT * sizeof(union cdu_context); 8644 cp->ctx_blk_size = CDU_ILT_PAGE_SZ;
7851 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) + 1; 8645 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) + 1;
7852 cp->ctx_tbl_len = CNIC_ILT_LINES; 8646 cp->ctx_tbl_len = CNIC_ILT_LINES;
7853 cp->starting_cid = BCM_CNIC_CID_START; 8647 cp->starting_cid = BCM_CNIC_CID_START;
diff --git a/drivers/net/bnx2x/bnx2x_stats.c b/drivers/net/bnx2x/bnx2x_stats.c
index efa1403ebf82..1256f62f7bff 100644
--- a/drivers/net/bnx2x/bnx2x_stats.c
+++ b/drivers/net/bnx2x/bnx2x_stats.c
@@ -153,7 +153,7 @@ static inline long bnx2x_hilo(u32 *hiref)
153static void bnx2x_storm_stats_post(struct bnx2x *bp) 153static void bnx2x_storm_stats_post(struct bnx2x *bp)
154{ 154{
155 if (!bp->stats_pending) { 155 if (!bp->stats_pending) {
156 struct eth_query_ramrod_data ramrod_data = {0}; 156 struct common_query_ramrod_data ramrod_data = {0};
157 int i, rc; 157 int i, rc;
158 158
159 spin_lock_bh(&bp->stats_lock); 159 spin_lock_bh(&bp->stats_lock);
@@ -163,9 +163,9 @@ static void bnx2x_storm_stats_post(struct bnx2x *bp)
163 for_each_queue(bp, i) 163 for_each_queue(bp, i)
164 ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id); 164 ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
165 165
166 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0, 166 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_STAT_QUERY, 0,
167 ((u32 *)&ramrod_data)[1], 167 ((u32 *)&ramrod_data)[1],
168 ((u32 *)&ramrod_data)[0], 0); 168 ((u32 *)&ramrod_data)[0], 1);
169 if (rc == 0) { 169 if (rc == 0) {
170 /* stats ramrod has it's own slot on the spq */ 170 /* stats ramrod has it's own slot on the spq */
171 bp->spq_left++; 171 bp->spq_left++;
@@ -398,9 +398,9 @@ static void bnx2x_port_stats_init(struct bnx2x *bp)
398 BIGMAC_REGISTER_RX_STAT_GR64) >> 2; 398 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
399 dmae->src_addr_hi = 0; 399 dmae->src_addr_hi = 0;
400 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) + 400 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
401 offsetof(struct bmac_stats, rx_stat_gr64_lo)); 401 offsetof(struct bmac1_stats, rx_stat_gr64_lo));
402 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) + 402 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
403 offsetof(struct bmac_stats, rx_stat_gr64_lo)); 403 offsetof(struct bmac1_stats, rx_stat_gr64_lo));
404 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ - 404 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
405 BIGMAC_REGISTER_RX_STAT_GR64) >> 2; 405 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
406 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; 406 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
@@ -571,7 +571,7 @@ static void bnx2x_stats_restart(struct bnx2x *bp)
571 571
572static void bnx2x_bmac_stats_update(struct bnx2x *bp) 572static void bnx2x_bmac_stats_update(struct bnx2x *bp)
573{ 573{
574 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats); 574 struct bmac1_stats *new = bnx2x_sp(bp, mac_stats.bmac1_stats);
575 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats); 575 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
576 struct bnx2x_eth_stats *estats = &bp->eth_stats; 576 struct bnx2x_eth_stats *estats = &bp->eth_stats;
577 struct { 577 struct {
diff --git a/drivers/net/cnic.c b/drivers/net/cnic.c
index 4e9d4ae1f303..80259815af06 100644
--- a/drivers/net/cnic.c
+++ b/drivers/net/cnic.c
@@ -942,7 +942,7 @@ static int cnic_alloc_uio(struct cnic_dev *dev) {
942 } else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) { 942 } else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
943 uinfo->mem[1].addr = (unsigned long) cp->bnx2x_def_status_blk & 943 uinfo->mem[1].addr = (unsigned long) cp->bnx2x_def_status_blk &
944 PAGE_MASK; 944 PAGE_MASK;
945 uinfo->mem[1].size = sizeof(struct host_def_status_block); 945 uinfo->mem[1].size = sizeof(*cp->bnx2x_def_status_blk);
946 946
947 uinfo->name = "bnx2x_cnic"; 947 uinfo->name = "bnx2x_cnic";
948 } 948 }
@@ -1063,6 +1063,8 @@ static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev)
1063 int i, j, n, ret, pages; 1063 int i, j, n, ret, pages;
1064 struct cnic_dma *kwq_16_dma = &cp->kwq_16_data_info; 1064 struct cnic_dma *kwq_16_dma = &cp->kwq_16_data_info;
1065 1065
1066 cp->iro_arr = ethdev->iro_arr;
1067
1066 cp->max_cid_space = MAX_ISCSI_TBL_SZ; 1068 cp->max_cid_space = MAX_ISCSI_TBL_SZ;
1067 cp->iscsi_start_cid = start_cid; 1069 cp->iscsi_start_cid = start_cid;
1068 if (start_cid < BNX2X_ISCSI_START_CID) { 1070 if (start_cid < BNX2X_ISCSI_START_CID) {
@@ -1127,8 +1129,6 @@ static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev)
1127 1129
1128 cp->bnx2x_def_status_blk = cp->ethdev->irq_arr[1].status_blk; 1130 cp->bnx2x_def_status_blk = cp->ethdev->irq_arr[1].status_blk;
1129 1131
1130 memset(cp->status_blk.bnx2x, 0, sizeof(*cp->status_blk.bnx2x));
1131
1132 cp->l2_rx_ring_size = 15; 1132 cp->l2_rx_ring_size = 15;
1133 1133
1134 ret = cnic_alloc_l2_rings(dev, 4); 1134 ret = cnic_alloc_l2_rings(dev, 4);
@@ -1211,7 +1211,7 @@ static int cnic_submit_kwqe_16(struct cnic_dev *dev, u32 cmd, u32 cid,
1211 cpu_to_le32(((cmd << SPE_HDR_CMD_ID_SHIFT) | 1211 cpu_to_le32(((cmd << SPE_HDR_CMD_ID_SHIFT) |
1212 BNX2X_HW_CID(cp, cid))); 1212 BNX2X_HW_CID(cp, cid)));
1213 kwqe.hdr.type = cpu_to_le16(type); 1213 kwqe.hdr.type = cpu_to_le16(type);
1214 kwqe.hdr.reserved = 0; 1214 kwqe.hdr.reserved1 = 0;
1215 kwqe.data.phy_address.lo = cpu_to_le32(l5_data->phy_address.lo); 1215 kwqe.data.phy_address.lo = cpu_to_le32(l5_data->phy_address.lo);
1216 kwqe.data.phy_address.hi = cpu_to_le32(l5_data->phy_address.hi); 1216 kwqe.data.phy_address.hi = cpu_to_le32(l5_data->phy_address.hi);
1217 1217
@@ -1527,8 +1527,10 @@ static int cnic_setup_bnx2x_ctx(struct cnic_dev *dev, struct kwqe *wqes[],
1527 ictx->tstorm_st_context.tcp.cwnd = 0x5A8; 1527 ictx->tstorm_st_context.tcp.cwnd = 0x5A8;
1528 ictx->tstorm_st_context.tcp.flags2 |= 1528 ictx->tstorm_st_context.tcp.flags2 |=
1529 TSTORM_TCP_ST_CONTEXT_SECTION_DA_EN; 1529 TSTORM_TCP_ST_CONTEXT_SECTION_DA_EN;
1530 ictx->tstorm_st_context.tcp.ooo_support_mode =
1531 TCP_TSTORM_OOO_DROP_AND_PROC_ACK;
1530 1532
1531 ictx->timers_context.flags |= ISCSI_TIMERS_BLOCK_CONTEXT_CONN_VALID_FLG; 1533 ictx->timers_context.flags |= TIMERS_BLOCK_CONTEXT_CONN_VALID_FLG;
1532 1534
1533 ictx->ustorm_st_context.ring.rq.pbl_base.lo = 1535 ictx->ustorm_st_context.ring.rq.pbl_base.lo =
1534 req2->rq_page_table_addr_lo; 1536 req2->rq_page_table_addr_lo;
@@ -1717,6 +1719,7 @@ static int cnic_bnx2x_iscsi_destroy(struct cnic_dev *dev, struct kwqe *kwqe)
1717 int ret = 0; 1719 int ret = 0;
1718 struct iscsi_kcqe kcqe; 1720 struct iscsi_kcqe kcqe;
1719 struct kcqe *cqes[1]; 1721 struct kcqe *cqes[1];
1722 u32 hw_cid, type;
1720 1723
1721 if (!(ctx->ctx_flags & CTX_FL_OFFLD_START)) 1724 if (!(ctx->ctx_flags & CTX_FL_OFFLD_START))
1722 goto skip_cfc_delete; 1725 goto skip_cfc_delete;
@@ -1727,11 +1730,15 @@ static int cnic_bnx2x_iscsi_destroy(struct cnic_dev *dev, struct kwqe *kwqe)
1727 init_waitqueue_head(&ctx->waitq); 1730 init_waitqueue_head(&ctx->waitq);
1728 ctx->wait_cond = 0; 1731 ctx->wait_cond = 0;
1729 memset(&l5_data, 0, sizeof(l5_data)); 1732 memset(&l5_data, 0, sizeof(l5_data));
1730 ret = cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_CFC_DEL, 1733 hw_cid = BNX2X_HW_CID(cp, ctx->cid);
1731 req->context_id, 1734 type = (NONE_CONNECTION_TYPE << SPE_HDR_CONN_TYPE_SHIFT)
1732 ETH_CONNECTION_TYPE | 1735 & SPE_HDR_CONN_TYPE;
1733 (1 << SPE_HDR_COMMON_RAMROD_SHIFT), 1736 type |= ((cp->pfid << SPE_HDR_FUNCTION_ID_SHIFT) &
1734 &l5_data); 1737 SPE_HDR_FUNCTION_ID);
1738
1739 ret = cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_COMMON_CFC_DEL,
1740 hw_cid, type, &l5_data);
1741
1735 if (ret == 0) 1742 if (ret == 0)
1736 wait_event(ctx->waitq, ctx->wait_cond); 1743 wait_event(ctx->waitq, ctx->wait_cond);
1737 1744
@@ -2322,7 +2329,7 @@ static void cnic_ack_bnx2x_msix(struct cnic_dev *dev)
2322{ 2329{
2323 struct cnic_local *cp = dev->cnic_priv; 2330 struct cnic_local *cp = dev->cnic_priv;
2324 2331
2325 cnic_ack_bnx2x_int(dev, cp->status_blk_num, CSTORM_ID, 0, 2332 cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, CSTORM_ID, 0,
2326 IGU_INT_DISABLE, 0); 2333 IGU_INT_DISABLE, 0);
2327} 2334}
2328 2335
@@ -2357,7 +2364,7 @@ static void cnic_service_bnx2x_bh(unsigned long data)
2357 status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq1); 2364 status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq1);
2358 2365
2359 CNIC_WR16(dev, cp->kcq1.io_addr, cp->kcq1.sw_prod_idx + MAX_KCQ_IDX); 2366 CNIC_WR16(dev, cp->kcq1.io_addr, cp->kcq1.sw_prod_idx + MAX_KCQ_IDX);
2360 cnic_ack_bnx2x_int(dev, cp->status_blk_num, CSTORM_ID, 2367 cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, USTORM_ID,
2361 status_idx, IGU_INT_ENABLE, 1); 2368 status_idx, IGU_INT_ENABLE, 1);
2362} 2369}
2363 2370
@@ -3285,6 +3292,7 @@ static int cnic_cm_init_bnx2x_hw(struct cnic_dev *dev)
3285{ 3292{
3286 struct cnic_local *cp = dev->cnic_priv; 3293 struct cnic_local *cp = dev->cnic_priv;
3287 u32 pfid = cp->pfid; 3294 u32 pfid = cp->pfid;
3295 u32 port = CNIC_PORT(cp);
3288 3296
3289 cnic_init_bnx2x_mac(dev); 3297 cnic_init_bnx2x_mac(dev);
3290 cnic_bnx2x_set_tcp_timestamp(dev, 1); 3298 cnic_bnx2x_set_tcp_timestamp(dev, 1);
@@ -3293,9 +3301,9 @@ static int cnic_cm_init_bnx2x_hw(struct cnic_dev *dev)
3293 XSTORM_ISCSI_LOCAL_VLAN_OFFSET(pfid), 0); 3301 XSTORM_ISCSI_LOCAL_VLAN_OFFSET(pfid), 0);
3294 3302
3295 CNIC_WR(dev, BAR_XSTRORM_INTMEM + 3303 CNIC_WR(dev, BAR_XSTRORM_INTMEM +
3296 XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_ENABLED_OFFSET(pfid), 1); 3304 XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_ENABLED_OFFSET(port), 1);
3297 CNIC_WR(dev, BAR_XSTRORM_INTMEM + 3305 CNIC_WR(dev, BAR_XSTRORM_INTMEM +
3298 XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_MAX_COUNT_OFFSET(pfid), 3306 XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_MAX_COUNT_OFFSET(port),
3299 DEF_MAX_DA_COUNT); 3307 DEF_MAX_DA_COUNT);
3300 3308
3301 CNIC_WR8(dev, BAR_XSTRORM_INTMEM + 3309 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
@@ -3859,32 +3867,48 @@ static int cnic_init_bnx2x_irq(struct cnic_dev *dev)
3859 return err; 3867 return err;
3860} 3868}
3861 3869
3870static inline void cnic_storm_memset_hc_disable(struct cnic_dev *dev,
3871 u16 sb_id, u8 sb_index,
3872 u8 disable)
3873{
3874
3875 u32 addr = BAR_CSTRORM_INTMEM +
3876 CSTORM_STATUS_BLOCK_DATA_OFFSET(sb_id) +
3877 offsetof(struct hc_status_block_data_e1x, index_data) +
3878 sizeof(struct hc_index_data)*sb_index +
3879 offsetof(struct hc_index_data, flags);
3880 u16 flags = CNIC_RD16(dev, addr);
3881 /* clear and set */
3882 flags &= ~HC_INDEX_DATA_HC_ENABLED;
3883 flags |= (((~disable) << HC_INDEX_DATA_HC_ENABLED_SHIFT) &
3884 HC_INDEX_DATA_HC_ENABLED);
3885 CNIC_WR16(dev, addr, flags);
3886}
3887
3862static void cnic_enable_bnx2x_int(struct cnic_dev *dev) 3888static void cnic_enable_bnx2x_int(struct cnic_dev *dev)
3863{ 3889{
3864 struct cnic_local *cp = dev->cnic_priv; 3890 struct cnic_local *cp = dev->cnic_priv;
3865 u8 sb_id = cp->status_blk_num; 3891 u8 sb_id = cp->status_blk_num;
3866 int port = CNIC_PORT(cp);
3867 3892
3868 CNIC_WR8(dev, BAR_CSTRORM_INTMEM + 3893 CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
3869 CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, sb_id, 3894 CSTORM_STATUS_BLOCK_DATA_OFFSET(sb_id) +
3870 HC_INDEX_C_ISCSI_EQ_CONS), 3895 offsetof(struct hc_status_block_data_e1x, index_data) +
3871 64 / 12); 3896 sizeof(struct hc_index_data)*HC_INDEX_ISCSI_EQ_CONS +
3872 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + 3897 offsetof(struct hc_index_data, timeout), 64 / 12);
3873 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id, 3898 cnic_storm_memset_hc_disable(dev, sb_id, HC_INDEX_ISCSI_EQ_CONS, 0);
3874 HC_INDEX_C_ISCSI_EQ_CONS), 0);
3875} 3899}
3876 3900
3877static void cnic_disable_bnx2x_int_sync(struct cnic_dev *dev) 3901static void cnic_disable_bnx2x_int_sync(struct cnic_dev *dev)
3878{ 3902{
3879} 3903}
3880 3904
3881static void cnic_init_bnx2x_tx_ring(struct cnic_dev *dev) 3905static void cnic_init_bnx2x_tx_ring(struct cnic_dev *dev,
3906 struct client_init_ramrod_data *data)
3882{ 3907{
3883 struct cnic_local *cp = dev->cnic_priv; 3908 struct cnic_local *cp = dev->cnic_priv;
3884 union eth_tx_bd_types *txbd = (union eth_tx_bd_types *) cp->l2_ring; 3909 union eth_tx_bd_types *txbd = (union eth_tx_bd_types *) cp->l2_ring;
3885 struct eth_context *context; 3910 dma_addr_t buf_map, ring_map = cp->l2_ring_map;
3886 struct regpair context_addr; 3911 struct host_sp_status_block *sb = cp->bnx2x_def_status_blk;
3887 dma_addr_t buf_map;
3888 int port = CNIC_PORT(cp); 3912 int port = CNIC_PORT(cp);
3889 int i; 3913 int i;
3890 int cli = BNX2X_ISCSI_CL_ID(CNIC_E1HVN(cp)); 3914 int cli = BNX2X_ISCSI_CL_ID(CNIC_E1HVN(cp));
@@ -3909,33 +3933,23 @@ static void cnic_init_bnx2x_tx_ring(struct cnic_dev *dev)
3909 start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT); 3933 start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
3910 3934
3911 } 3935 }
3912 context = cnic_get_bnx2x_ctx(dev, BNX2X_ISCSI_L2_CID, 1, &context_addr);
3913 3936
3914 val = (u64) cp->l2_ring_map >> 32; 3937 val = (u64) ring_map >> 32;
3915 txbd->next_bd.addr_hi = cpu_to_le32(val); 3938 txbd->next_bd.addr_hi = cpu_to_le32(val);
3916 3939
3917 context->xstorm_st_context.tx_bd_page_base_hi = val; 3940 data->tx.tx_bd_page_base.hi = cpu_to_le32(val);
3918 3941
3919 val = (u64) cp->l2_ring_map & 0xffffffff; 3942 val = (u64) ring_map & 0xffffffff;
3920 txbd->next_bd.addr_lo = cpu_to_le32(val); 3943 txbd->next_bd.addr_lo = cpu_to_le32(val);
3921 3944
3922 context->xstorm_st_context.tx_bd_page_base_lo = val; 3945 data->tx.tx_bd_page_base.lo = cpu_to_le32(val);
3923
3924 context->cstorm_st_context.sb_index_number =
3925 HC_INDEX_DEF_C_ETH_ISCSI_CQ_CONS;
3926 context->cstorm_st_context.status_block_id = BNX2X_DEF_SB_ID;
3927
3928 if (cli < MAX_X_STAT_COUNTER_ID)
3929 context->xstorm_st_context.statistics_data = cli |
3930 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE;
3931 3946
3932 context->xstorm_ag_context.cdu_reserved = 3947 /* Other ramrod params */
3933 CDU_RSRVD_VALUE_TYPE_A(BNX2X_HW_CID(cp, BNX2X_ISCSI_L2_CID), 3948 data->tx.tx_sb_index_number = HC_SP_INDEX_ETH_ISCSI_CQ_CONS;
3934 CDU_REGION_NUMBER_XCM_AG, 3949 data->tx.tx_status_block_id = BNX2X_DEF_SB_ID;
3935 ETH_CONNECTION_TYPE);
3936 3950
3937 /* reset xstorm per client statistics */ 3951 /* reset xstorm per client statistics */
3938 if (cli < MAX_X_STAT_COUNTER_ID) { 3952 if (cli < MAX_STAT_COUNTER_ID) {
3939 val = BAR_XSTRORM_INTMEM + 3953 val = BAR_XSTRORM_INTMEM +
3940 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cli); 3954 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cli);
3941 for (i = 0; i < sizeof(struct xstorm_per_client_stats) / 4; i++) 3955 for (i = 0; i < sizeof(struct xstorm_per_client_stats) / 4; i++)
@@ -3943,24 +3957,31 @@ static void cnic_init_bnx2x_tx_ring(struct cnic_dev *dev)
3943 } 3957 }
3944 3958
3945 cp->tx_cons_ptr = 3959 cp->tx_cons_ptr =
3946 &cp->bnx2x_def_status_blk->c_def_status_block.index_values[ 3960 &sb->sp_sb.index_values[HC_SP_INDEX_ETH_ISCSI_CQ_CONS];
3947 HC_INDEX_DEF_C_ETH_ISCSI_CQ_CONS];
3948} 3961}
3949 3962
3950static void cnic_init_bnx2x_rx_ring(struct cnic_dev *dev) 3963static void cnic_init_bnx2x_rx_ring(struct cnic_dev *dev,
3964 struct client_init_ramrod_data *data)
3951{ 3965{
3952 struct cnic_local *cp = dev->cnic_priv; 3966 struct cnic_local *cp = dev->cnic_priv;
3953 struct eth_rx_bd *rxbd = (struct eth_rx_bd *) (cp->l2_ring + 3967 struct eth_rx_bd *rxbd = (struct eth_rx_bd *) (cp->l2_ring +
3954 BCM_PAGE_SIZE); 3968 BCM_PAGE_SIZE);
3955 struct eth_rx_cqe_next_page *rxcqe = (struct eth_rx_cqe_next_page *) 3969 struct eth_rx_cqe_next_page *rxcqe = (struct eth_rx_cqe_next_page *)
3956 (cp->l2_ring + (2 * BCM_PAGE_SIZE)); 3970 (cp->l2_ring + (2 * BCM_PAGE_SIZE));
3957 struct eth_context *context; 3971 struct host_sp_status_block *sb = cp->bnx2x_def_status_blk;
3958 struct regpair context_addr;
3959 int i; 3972 int i;
3960 int port = CNIC_PORT(cp); 3973 int port = CNIC_PORT(cp);
3961 int cli = BNX2X_ISCSI_CL_ID(CNIC_E1HVN(cp)); 3974 int cli = BNX2X_ISCSI_CL_ID(CNIC_E1HVN(cp));
3975 int cl_qzone_id = BNX2X_CL_QZONE_ID(cp, cli);
3962 u32 val; 3976 u32 val;
3963 struct tstorm_eth_client_config tstorm_client = {0}; 3977 dma_addr_t ring_map = cp->l2_ring_map;
3978
3979 /* General data */
3980 data->general.client_id = cli;
3981 data->general.statistics_en_flg = 1;
3982 data->general.statistics_counter_id = cli;
3983 data->general.activate_flg = 1;
3984 data->general.sp_client_id = cli;
3964 3985
3965 for (i = 0; i < BNX2X_MAX_RX_DESC_CNT; i++, rxbd++) { 3986 for (i = 0; i < BNX2X_MAX_RX_DESC_CNT; i++, rxbd++) {
3966 dma_addr_t buf_map; 3987 dma_addr_t buf_map;
@@ -3970,83 +3991,42 @@ static void cnic_init_bnx2x_rx_ring(struct cnic_dev *dev)
3970 rxbd->addr_hi = cpu_to_le32((u64) buf_map >> 32); 3991 rxbd->addr_hi = cpu_to_le32((u64) buf_map >> 32);
3971 rxbd->addr_lo = cpu_to_le32(buf_map & 0xffffffff); 3992 rxbd->addr_lo = cpu_to_le32(buf_map & 0xffffffff);
3972 } 3993 }
3973 context = cnic_get_bnx2x_ctx(dev, BNX2X_ISCSI_L2_CID, 0, &context_addr);
3974 3994
3975 val = (u64) (cp->l2_ring_map + BCM_PAGE_SIZE) >> 32; 3995 val = (u64) (ring_map + BCM_PAGE_SIZE) >> 32;
3976 rxbd->addr_hi = cpu_to_le32(val); 3996 rxbd->addr_hi = cpu_to_le32(val);
3997 data->rx.bd_page_base.hi = cpu_to_le32(val);
3977 3998
3978 context->ustorm_st_context.common.bd_page_base_hi = val; 3999 val = (u64) (ring_map + BCM_PAGE_SIZE) & 0xffffffff;
3979
3980 val = (u64) (cp->l2_ring_map + BCM_PAGE_SIZE) & 0xffffffff;
3981 rxbd->addr_lo = cpu_to_le32(val); 4000 rxbd->addr_lo = cpu_to_le32(val);
3982 4001 data->rx.bd_page_base.lo = cpu_to_le32(val);
3983 context->ustorm_st_context.common.bd_page_base_lo = val;
3984
3985 context->ustorm_st_context.common.sb_index_numbers =
3986 BNX2X_ISCSI_RX_SB_INDEX_NUM;
3987 context->ustorm_st_context.common.clientId = cli;
3988 context->ustorm_st_context.common.status_block_id = BNX2X_DEF_SB_ID;
3989 if (cli < MAX_U_STAT_COUNTER_ID) {
3990 context->ustorm_st_context.common.flags =
3991 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS;
3992 context->ustorm_st_context.common.statistics_counter_id = cli;
3993 }
3994 context->ustorm_st_context.common.mc_alignment_log_size = 0;
3995 context->ustorm_st_context.common.bd_buff_size =
3996 cp->l2_single_buf_size;
3997
3998 context->ustorm_ag_context.cdu_usage =
3999 CDU_RSRVD_VALUE_TYPE_A(BNX2X_HW_CID(cp, BNX2X_ISCSI_L2_CID),
4000 CDU_REGION_NUMBER_UCM_AG,
4001 ETH_CONNECTION_TYPE);
4002 4002
4003 rxcqe += BNX2X_MAX_RCQ_DESC_CNT; 4003 rxcqe += BNX2X_MAX_RCQ_DESC_CNT;
4004 val = (u64) (cp->l2_ring_map + (2 * BCM_PAGE_SIZE)) >> 32; 4004 val = (u64) (ring_map + (2 * BCM_PAGE_SIZE)) >> 32;
4005 rxcqe->addr_hi = cpu_to_le32(val); 4005 rxcqe->addr_hi = cpu_to_le32(val);
4006 data->rx.cqe_page_base.hi = cpu_to_le32(val);
4006 4007
4007 CNIC_WR(dev, BAR_USTRORM_INTMEM + 4008 val = (u64) (ring_map + (2 * BCM_PAGE_SIZE)) & 0xffffffff;
4008 USTORM_CQE_PAGE_BASE_OFFSET(port, cli) + 4, val);
4009
4010 CNIC_WR(dev, BAR_USTRORM_INTMEM +
4011 USTORM_CQE_PAGE_NEXT_OFFSET(port, cli) + 4, val);
4012
4013 val = (u64) (cp->l2_ring_map + (2 * BCM_PAGE_SIZE)) & 0xffffffff;
4014 rxcqe->addr_lo = cpu_to_le32(val); 4009 rxcqe->addr_lo = cpu_to_le32(val);
4010 data->rx.cqe_page_base.lo = cpu_to_le32(val);
4015 4011
4016 CNIC_WR(dev, BAR_USTRORM_INTMEM + 4012 /* Other ramrod params */
4017 USTORM_CQE_PAGE_BASE_OFFSET(port, cli), val); 4013 data->rx.client_qzone_id = cl_qzone_id;
4018 4014 data->rx.rx_sb_index_number = HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS;
4019 CNIC_WR(dev, BAR_USTRORM_INTMEM + 4015 data->rx.status_block_id = BNX2X_DEF_SB_ID;
4020 USTORM_CQE_PAGE_NEXT_OFFSET(port, cli), val);
4021
4022 /* client tstorm info */
4023 tstorm_client.mtu = cp->l2_single_buf_size - 14;
4024 tstorm_client.config_flags = TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE;
4025
4026 if (cli < MAX_T_STAT_COUNTER_ID) {
4027 tstorm_client.config_flags |=
4028 TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE;
4029 tstorm_client.statistics_counter_id = cli;
4030 }
4031 4016
4032 CNIC_WR(dev, BAR_TSTRORM_INTMEM + 4017 data->rx.cache_line_alignment_log_size = L1_CACHE_SHIFT;
4033 TSTORM_CLIENT_CONFIG_OFFSET(port, cli), 4018 data->rx.bd_buff_size = cpu_to_le16(cp->l2_single_buf_size);
4034 ((u32 *)&tstorm_client)[0]);
4035 CNIC_WR(dev, BAR_TSTRORM_INTMEM +
4036 TSTORM_CLIENT_CONFIG_OFFSET(port, cli) + 4,
4037 ((u32 *)&tstorm_client)[1]);
4038 4019
4039 /* reset tstorm per client statistics */ 4020 data->rx.mtu = cpu_to_le16(cp->l2_single_buf_size - 14);
4040 if (cli < MAX_T_STAT_COUNTER_ID) { 4021 data->rx.outer_vlan_removal_enable_flg = 1;
4041 4022
4023 /* reset tstorm and ustorm per client statistics */
4024 if (cli < MAX_STAT_COUNTER_ID) {
4042 val = BAR_TSTRORM_INTMEM + 4025 val = BAR_TSTRORM_INTMEM +
4043 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cli); 4026 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cli);
4044 for (i = 0; i < sizeof(struct tstorm_per_client_stats) / 4; i++) 4027 for (i = 0; i < sizeof(struct tstorm_per_client_stats) / 4; i++)
4045 CNIC_WR(dev, val + i * 4, 0); 4028 CNIC_WR(dev, val + i * 4, 0);
4046 }
4047 4029
4048 /* reset ustorm per client statistics */
4049 if (cli < MAX_U_STAT_COUNTER_ID) {
4050 val = BAR_USTRORM_INTMEM + 4030 val = BAR_USTRORM_INTMEM +
4051 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cli); 4031 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cli);
4052 for (i = 0; i < sizeof(struct ustorm_per_client_stats) / 4; i++) 4032 for (i = 0; i < sizeof(struct ustorm_per_client_stats) / 4; i++)
@@ -4054,8 +4034,7 @@ static void cnic_init_bnx2x_rx_ring(struct cnic_dev *dev)
4054 } 4034 }
4055 4035
4056 cp->rx_cons_ptr = 4036 cp->rx_cons_ptr =
4057 &cp->bnx2x_def_status_blk->u_def_status_block.index_values[ 4037 &sb->sp_sb.index_values[HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS];
4058 HC_INDEX_DEF_U_ETH_ISCSI_RX_CQ_CONS];
4059} 4038}
4060 4039
4061static void cnic_get_bnx2x_iscsi_info(struct cnic_dev *dev) 4040static void cnic_get_bnx2x_iscsi_info(struct cnic_dev *dev)
@@ -4066,7 +4045,7 @@ static void cnic_get_bnx2x_iscsi_info(struct cnic_dev *dev)
4066 4045
4067 dev->max_iscsi_conn = 0; 4046 dev->max_iscsi_conn = 0;
4068 base = CNIC_RD(dev, MISC_REG_SHARED_MEM_ADDR); 4047 base = CNIC_RD(dev, MISC_REG_SHARED_MEM_ADDR);
4069 if (base < 0xa0000 || base >= 0xc0000) 4048 if (base == 0)
4070 return; 4049 return;
4071 4050
4072 addr = BNX2X_SHMEM_ADDR(base, 4051 addr = BNX2X_SHMEM_ADDR(base,
@@ -4103,14 +4082,19 @@ static void cnic_get_bnx2x_iscsi_info(struct cnic_dev *dev)
4103 } 4082 }
4104 if (BNX2X_CHIP_IS_E1H(cp->chip_id)) { 4083 if (BNX2X_CHIP_IS_E1H(cp->chip_id)) {
4105 int func = CNIC_FUNC(cp); 4084 int func = CNIC_FUNC(cp);
4085 u32 mf_cfg_addr;
4086
4087 mf_cfg_addr = base + BNX2X_SHMEM_MF_BLK_OFFSET;
4088
4089 addr = mf_cfg_addr +
4090 offsetof(struct mf_cfg, func_mf_config[func].e1hov_tag);
4106 4091
4107 addr = BNX2X_SHMEM_ADDR(base,
4108 mf_cfg.func_mf_config[func].e1hov_tag);
4109 val = CNIC_RD(dev, addr); 4092 val = CNIC_RD(dev, addr);
4110 val &= FUNC_MF_CFG_E1HOV_TAG_MASK; 4093 val &= FUNC_MF_CFG_E1HOV_TAG_MASK;
4111 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) { 4094 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
4112 addr = BNX2X_SHMEM_ADDR(base, 4095 addr = mf_cfg_addr +
4113 mf_cfg.func_mf_config[func].config); 4096 offsetof(struct mf_cfg,
4097 func_mf_config[func].config);
4114 val = CNIC_RD(dev, addr); 4098 val = CNIC_RD(dev, addr);
4115 val &= FUNC_MF_CFG_PROTOCOL_MASK; 4099 val &= FUNC_MF_CFG_PROTOCOL_MASK;
4116 if (val != FUNC_MF_CFG_PROTOCOL_ISCSI) 4100 if (val != FUNC_MF_CFG_PROTOCOL_ISCSI)
@@ -4122,11 +4106,10 @@ static void cnic_get_bnx2x_iscsi_info(struct cnic_dev *dev)
4122static int cnic_start_bnx2x_hw(struct cnic_dev *dev) 4106static int cnic_start_bnx2x_hw(struct cnic_dev *dev)
4123{ 4107{
4124 struct cnic_local *cp = dev->cnic_priv; 4108 struct cnic_local *cp = dev->cnic_priv;
4109 struct cnic_eth_dev *ethdev = cp->ethdev;
4125 int func = CNIC_FUNC(cp), ret, i; 4110 int func = CNIC_FUNC(cp), ret, i;
4126 int port = CNIC_PORT(cp);
4127 u32 pfid; 4111 u32 pfid;
4128 u16 eq_idx; 4112 struct host_hc_status_block_e1x *sb = cp->status_blk.gen;
4129 u8 sb_id = cp->status_blk_num;
4130 4113
4131 cp->pfid = func; 4114 cp->pfid = func;
4132 pfid = cp->pfid; 4115 pfid = cp->pfid;
@@ -4137,15 +4120,16 @@ static int cnic_start_bnx2x_hw(struct cnic_dev *dev)
4137 if (ret) 4120 if (ret)
4138 return -ENOMEM; 4121 return -ENOMEM;
4139 4122
4123 cp->bnx2x_igu_sb_id = ethdev->irq_arr[0].status_blk_num2;
4124
4140 cp->kcq1.io_addr = BAR_CSTRORM_INTMEM + 4125 cp->kcq1.io_addr = BAR_CSTRORM_INTMEM +
4141 CSTORM_ISCSI_EQ_PROD_OFFSET(pfid, 0); 4126 CSTORM_ISCSI_EQ_PROD_OFFSET(pfid, 0);
4142 cp->kcq1.sw_prod_idx = 0; 4127 cp->kcq1.sw_prod_idx = 0;
4143 4128
4144 cp->kcq1.hw_prod_idx_ptr = 4129 cp->kcq1.hw_prod_idx_ptr =
4145 &cp->status_blk.bnx2x->c_status_block.index_values[ 4130 &sb->sb.index_values[HC_INDEX_ISCSI_EQ_CONS];
4146 HC_INDEX_C_ISCSI_EQ_CONS];
4147 cp->kcq1.status_idx_ptr = 4131 cp->kcq1.status_idx_ptr =
4148 &cp->status_blk.bnx2x->c_status_block.status_block_index; 4132 &sb->sb.running_index[SM_RX_ID];
4149 4133
4150 cnic_get_bnx2x_iscsi_info(dev); 4134 cnic_get_bnx2x_iscsi_info(dev);
4151 4135
@@ -4171,7 +4155,7 @@ static int cnic_start_bnx2x_hw(struct cnic_dev *dev)
4171 CSTORM_ISCSI_EQ_SB_NUM_OFFSET(pfid, 0), cp->status_blk_num); 4155 CSTORM_ISCSI_EQ_SB_NUM_OFFSET(pfid, 0), cp->status_blk_num);
4172 CNIC_WR8(dev, BAR_CSTRORM_INTMEM + 4156 CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
4173 CSTORM_ISCSI_EQ_SB_INDEX_OFFSET(pfid, 0), 4157 CSTORM_ISCSI_EQ_SB_INDEX_OFFSET(pfid, 0),
4174 HC_INDEX_C_ISCSI_EQ_CONS); 4158 HC_INDEX_ISCSI_EQ_CONS);
4175 4159
4176 for (i = 0; i < cp->conn_buf_info.num_pages; i++) { 4160 for (i = 0; i < cp->conn_buf_info.num_pages; i++) {
4177 CNIC_WR(dev, BAR_TSTRORM_INTMEM + 4161 CNIC_WR(dev, BAR_TSTRORM_INTMEM +
@@ -4189,16 +4173,11 @@ static int cnic_start_bnx2x_hw(struct cnic_dev *dev)
4189 USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfid) + 4, 4173 USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfid) + 4,
4190 (u64) cp->gbl_buf_info.pg_map_arr[0] >> 32); 4174 (u64) cp->gbl_buf_info.pg_map_arr[0] >> 32);
4191 4175
4176 CNIC_WR(dev, BAR_TSTRORM_INTMEM +
4177 TSTORM_ISCSI_TCP_LOCAL_ADV_WND_OFFSET(pfid), DEF_RCV_BUF);
4178
4192 cnic_setup_bnx2x_context(dev); 4179 cnic_setup_bnx2x_context(dev);
4193 4180
4194 eq_idx = CNIC_RD16(dev, BAR_CSTRORM_INTMEM +
4195 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id) +
4196 offsetof(struct cstorm_status_block_c,
4197 index_values[HC_INDEX_C_ISCSI_EQ_CONS]));
4198 if (eq_idx != 0) {
4199 netdev_err(dev->netdev, "EQ cons index %x != 0\n", eq_idx);
4200 return -EBUSY;
4201 }
4202 ret = cnic_init_bnx2x_irq(dev); 4181 ret = cnic_init_bnx2x_irq(dev);
4203 if (ret) 4182 if (ret)
4204 return ret; 4183 return ret;
@@ -4218,8 +4197,9 @@ static void cnic_init_rings(struct cnic_dev *dev)
4218 cnic_init_bnx2_rx_ring(dev); 4197 cnic_init_bnx2_rx_ring(dev);
4219 set_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags); 4198 set_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags);
4220 } else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) { 4199 } else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
4221 struct cnic_local *cp = dev->cnic_priv;
4222 u32 cli = BNX2X_ISCSI_CL_ID(CNIC_E1HVN(cp)); 4200 u32 cli = BNX2X_ISCSI_CL_ID(CNIC_E1HVN(cp));
4201 u32 cl_qzone_id, type;
4202 struct client_init_ramrod_data *data;
4223 union l5cm_specific_data l5_data; 4203 union l5cm_specific_data l5_data;
4224 struct ustorm_eth_rx_producers rx_prods = {0}; 4204 struct ustorm_eth_rx_producers rx_prods = {0};
4225 u32 off, i; 4205 u32 off, i;
@@ -4228,23 +4208,36 @@ static void cnic_init_rings(struct cnic_dev *dev)
4228 rx_prods.cqe_prod = BNX2X_MAX_RCQ_DESC_CNT; 4208 rx_prods.cqe_prod = BNX2X_MAX_RCQ_DESC_CNT;
4229 barrier(); 4209 barrier();
4230 4210
4211 cl_qzone_id = BNX2X_CL_QZONE_ID(cp, cli);
4212
4231 off = BAR_USTRORM_INTMEM + 4213 off = BAR_USTRORM_INTMEM +
4232 USTORM_RX_PRODS_OFFSET(CNIC_PORT(cp), cli); 4214 USTORM_RX_PRODS_E1X_OFFSET(CNIC_PORT(cp), cli);
4233 4215
4234 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers) / 4; i++) 4216 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers) / 4; i++)
4235 CNIC_WR(dev, off + i * 4, ((u32 *) &rx_prods)[i]); 4217 CNIC_WR(dev, off + i * 4, ((u32 *) &rx_prods)[i]);
4236 4218
4237 set_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags); 4219 set_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags);
4238 4220
4239 cnic_init_bnx2x_tx_ring(dev); 4221 data = cp->l2_buf;
4240 cnic_init_bnx2x_rx_ring(dev); 4222
4223 memset(data, 0, sizeof(*data));
4224
4225 cnic_init_bnx2x_tx_ring(dev, data);
4226 cnic_init_bnx2x_rx_ring(dev, data);
4227
4228 l5_data.phy_address.lo = cp->l2_buf_map & 0xffffffff;
4229 l5_data.phy_address.hi = (u64) cp->l2_buf_map >> 32;
4230
4231 type = (ETH_CONNECTION_TYPE << SPE_HDR_CONN_TYPE_SHIFT)
4232 & SPE_HDR_CONN_TYPE;
4233 type |= ((cp->pfid << SPE_HDR_FUNCTION_ID_SHIFT) &
4234 SPE_HDR_FUNCTION_ID);
4241 4235
4242 set_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags); 4236 set_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags);
4243 4237
4244 l5_data.phy_address.lo = cli;
4245 l5_data.phy_address.hi = 0;
4246 cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_CLIENT_SETUP, 4238 cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_CLIENT_SETUP,
4247 BNX2X_ISCSI_L2_CID, ETH_CONNECTION_TYPE, &l5_data); 4239 BNX2X_ISCSI_L2_CID, type, &l5_data);
4240
4248 i = 0; 4241 i = 0;
4249 while (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags) && 4242 while (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags) &&
4250 ++i < 10) 4243 ++i < 10)
@@ -4272,6 +4265,7 @@ static void cnic_shutdown_rings(struct cnic_dev *dev)
4272 u32 cli = BNX2X_ISCSI_CL_ID(CNIC_E1HVN(cp)); 4265 u32 cli = BNX2X_ISCSI_CL_ID(CNIC_E1HVN(cp));
4273 union l5cm_specific_data l5_data; 4266 union l5cm_specific_data l5_data;
4274 int i; 4267 int i;
4268 u32 type;
4275 4269
4276 cnic_ring_ctl(dev, BNX2X_ISCSI_L2_CID, cli, 0); 4270 cnic_ring_ctl(dev, BNX2X_ISCSI_L2_CID, cli, 0);
4277 4271
@@ -4292,9 +4286,12 @@ static void cnic_shutdown_rings(struct cnic_dev *dev)
4292 cnic_kwq_completion(dev, 1); 4286 cnic_kwq_completion(dev, 1);
4293 4287
4294 memset(&l5_data, 0, sizeof(l5_data)); 4288 memset(&l5_data, 0, sizeof(l5_data));
4295 cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_CFC_DEL, 4289 type = (NONE_CONNECTION_TYPE << SPE_HDR_CONN_TYPE_SHIFT)
4296 BNX2X_ISCSI_L2_CID, ETH_CONNECTION_TYPE | 4290 & SPE_HDR_CONN_TYPE;
4297 (1 << SPE_HDR_COMMON_RAMROD_SHIFT), &l5_data); 4291 type |= ((cp->pfid << SPE_HDR_FUNCTION_ID_SHIFT) &
4292 SPE_HDR_FUNCTION_ID);
4293 cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_COMMON_CFC_DEL,
4294 BNX2X_ISCSI_L2_CID, type, &l5_data);
4298 msleep(10); 4295 msleep(10);
4299 } 4296 }
4300 clear_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags); 4297 clear_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags);
@@ -4392,15 +4389,9 @@ static void cnic_stop_bnx2_hw(struct cnic_dev *dev)
4392static void cnic_stop_bnx2x_hw(struct cnic_dev *dev) 4389static void cnic_stop_bnx2x_hw(struct cnic_dev *dev)
4393{ 4390{
4394 struct cnic_local *cp = dev->cnic_priv; 4391 struct cnic_local *cp = dev->cnic_priv;
4395 u8 sb_id = cp->status_blk_num;
4396 int port = CNIC_PORT(cp);
4397 4392
4398 cnic_free_irq(dev); 4393 cnic_free_irq(dev);
4399 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + 4394 *cp->kcq1.hw_prod_idx_ptr = 0;
4400 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id) +
4401 offsetof(struct cstorm_status_block_c,
4402 index_values[HC_INDEX_C_ISCSI_EQ_CONS]),
4403 0);
4404 CNIC_WR(dev, BAR_CSTRORM_INTMEM + 4395 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
4405 CSTORM_ISCSI_EQ_CONS_OFFSET(cp->pfid, 0), 0); 4396 CSTORM_ISCSI_EQ_CONS_OFFSET(cp->pfid, 0), 0);
4406 CNIC_WR16(dev, cp->kcq1.io_addr, 0); 4397 CNIC_WR16(dev, cp->kcq1.io_addr, 0);
diff --git a/drivers/net/cnic.h b/drivers/net/cnic.h
index 481618399b1f..676d008509c6 100644
--- a/drivers/net/cnic.h
+++ b/drivers/net/cnic.h
@@ -12,6 +12,13 @@
12#ifndef CNIC_H 12#ifndef CNIC_H
13#define CNIC_H 13#define CNIC_H
14 14
15#define HC_INDEX_ISCSI_EQ_CONS 6
16
17#define HC_INDEX_FCOE_EQ_CONS 3
18
19#define HC_SP_INDEX_ETH_ISCSI_CQ_CONS 5
20#define HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS 1
21
15#define KWQ_PAGE_CNT 4 22#define KWQ_PAGE_CNT 4
16#define KCQ_PAGE_CNT 16 23#define KCQ_PAGE_CNT 16
17 24
@@ -179,6 +186,14 @@ struct kcq_info {
179 u32 io_addr; 186 u32 io_addr;
180}; 187};
181 188
189struct iro {
190 u32 base;
191 u16 m1;
192 u16 m2;
193 u16 m3;
194 u16 size;
195};
196
182struct cnic_local { 197struct cnic_local {
183 198
184 spinlock_t cnic_ulp_lock; 199 spinlock_t cnic_ulp_lock;
@@ -213,6 +228,9 @@ struct cnic_local {
213 u16 rx_cons; 228 u16 rx_cons;
214 u16 tx_cons; 229 u16 tx_cons;
215 230
231 struct iro *iro_arr;
232#define IRO (((struct cnic_local *) dev->cnic_priv)->iro_arr)
233
216 struct cnic_dma kwq_info; 234 struct cnic_dma kwq_info;
217 struct kwqe **kwq; 235 struct kwqe **kwq;
218 236
@@ -231,12 +249,16 @@ struct cnic_local {
231 union { 249 union {
232 void *gen; 250 void *gen;
233 struct status_block_msix *bnx2; 251 struct status_block_msix *bnx2;
234 struct host_status_block *bnx2x; 252 struct host_hc_status_block_e1x *bnx2x_e1x;
253 /* index values - which counter to update */
254 #define SM_RX_ID 0
255 #define SM_TX_ID 1
235 } status_blk; 256 } status_blk;
236 257
237 struct host_def_status_block *bnx2x_def_status_blk; 258 struct host_sp_status_block *bnx2x_def_status_blk;
238 259
239 u32 status_blk_num; 260 u32 status_blk_num;
261 u32 bnx2x_igu_sb_id;
240 u32 int_num; 262 u32 int_num;
241 u32 last_status_idx; 263 u32 last_status_idx;
242 struct tasklet_struct cnic_irq_task; 264 struct tasklet_struct cnic_irq_task;
@@ -358,24 +380,33 @@ struct bnx2x_bd_chain_next {
358 (BNX2X_MAX_RCQ_DESC_CNT - 1)) ? \ 380 (BNX2X_MAX_RCQ_DESC_CNT - 1)) ? \
359 ((x) + 2) : ((x) + 1) 381 ((x) + 2) : ((x) + 1)
360 382
361#define BNX2X_DEF_SB_ID 16 383#define BNX2X_DEF_SB_ID HC_SP_SB_ID
362 384
363#define BNX2X_ISCSI_RX_SB_INDEX_NUM \ 385#define BNX2X_SHMEM_MF_BLK_OFFSET 0x7e4
364 ((HC_INDEX_DEF_U_ETH_ISCSI_RX_CQ_CONS << \
365 USTORM_ETH_ST_CONTEXT_CONFIG_CQE_SB_INDEX_NUMBER_SHIFT) & \
366 USTORM_ETH_ST_CONTEXT_CONFIG_CQE_SB_INDEX_NUMBER)
367 386
368#define BNX2X_SHMEM_ADDR(base, field) (base + \ 387#define BNX2X_SHMEM_ADDR(base, field) (base + \
369 offsetof(struct shmem_region, field)) 388 offsetof(struct shmem_region, field))
370 389
371#define CNIC_PORT(cp) ((cp)->func % PORT_MAX) 390#define BNX2X_SHMEM2_ADDR(base, field) (base + \
391 offsetof(struct shmem2_region, field))
392
393#define BNX2X_SHMEM2_HAS(base, field) \
394 ((base) && \
395 (CNIC_RD(dev, BNX2X_SHMEM2_ADDR(base, size)) > \
396 offsetof(struct shmem2_region, field)))
397
398#define CNIC_PORT(cp) ((cp)->pfid & 1)
372#define CNIC_FUNC(cp) ((cp)->func) 399#define CNIC_FUNC(cp) ((cp)->func)
373#define CNIC_E1HVN(cp) ((cp)->func >> 1) 400#define CNIC_E1HVN(cp) ((cp)->pfid >> 1)
374 401
375#define BNX2X_HW_CID(cp, x) (((CNIC_FUNC(cp) % PORT_MAX) << 23) | \ 402#define BNX2X_HW_CID(cp, x) ((CNIC_PORT(cp) << 23) | \
376 (CNIC_E1HVN(cp) << 17) | (x)) 403 (CNIC_E1HVN(cp) << 17) | (x))
377 404
378#define BNX2X_SW_CID(x) (x & 0x1ffff) 405#define BNX2X_SW_CID(x) (x & 0x1ffff)
379 406
407#define BNX2X_CL_QZONE_ID(cp, cli) \
408 (cli + (CNIC_PORT(cp) * ETH_MAX_RX_CLIENTS_E1H))
409
410#define TCP_TSTORM_OOO_DROP_AND_PROC_ACK (0<<4)
380#endif 411#endif
381 412
diff --git a/drivers/net/cnic_defs.h b/drivers/net/cnic_defs.h
index 7ce694d41b6b..328e8b2765a3 100644
--- a/drivers/net/cnic_defs.h
+++ b/drivers/net/cnic_defs.h
@@ -14,6 +14,7 @@
14 14
15/* KWQ (kernel work queue) request op codes */ 15/* KWQ (kernel work queue) request op codes */
16#define L2_KWQE_OPCODE_VALUE_FLUSH (4) 16#define L2_KWQE_OPCODE_VALUE_FLUSH (4)
17#define L2_KWQE_OPCODE_VALUE_VM_FREE_RX_QUEUE (8)
17 18
18#define L4_KWQE_OPCODE_VALUE_CONNECT1 (50) 19#define L4_KWQE_OPCODE_VALUE_CONNECT1 (50)
19#define L4_KWQE_OPCODE_VALUE_CONNECT2 (51) 20#define L4_KWQE_OPCODE_VALUE_CONNECT2 (51)
@@ -48,11 +49,14 @@
48#define L4_KCQE_OPCODE_VALUE_UPLOAD_PG (14) 49#define L4_KCQE_OPCODE_VALUE_UPLOAD_PG (14)
49 50
50/* KCQ (kernel completion queue) completion status */ 51/* KCQ (kernel completion queue) completion status */
51#define L4_KCQE_COMPLETION_STATUS_SUCCESS (0) 52#define L4_KCQE_COMPLETION_STATUS_SUCCESS (0)
52#define L4_KCQE_COMPLETION_STATUS_TIMEOUT (0x93) 53#define L4_KCQE_COMPLETION_STATUS_TIMEOUT (0x93)
53 54
54#define L4_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAIL (0x83) 55#define L4_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAIL (0x83)
55#define L4_KCQE_COMPLETION_STATUS_OFFLOADED_PG (0x89) 56#define L4_KCQE_COMPLETION_STATUS_OFFLOADED_PG (0x89)
57
58#define L4_KCQE_OPCODE_VALUE_OOO_EVENT_NOTIFICATION (0xa0)
59#define L4_KCQE_OPCODE_VALUE_OOO_FLUSH (0xa1)
56 60
57#define L4_LAYER_CODE (4) 61#define L4_LAYER_CODE (4)
58#define L2_LAYER_CODE (2) 62#define L2_LAYER_CODE (2)
@@ -585,6 +589,100 @@ struct l4_kwq_upload {
585 */ 589 */
586 590
587/* 591/*
592 * The iscsi aggregative context of Cstorm
593 */
594struct cstorm_iscsi_ag_context {
595 u32 agg_vars1;
596#define CSTORM_ISCSI_AG_CONTEXT_STATE (0xFF<<0)
597#define CSTORM_ISCSI_AG_CONTEXT_STATE_SHIFT 0
598#define __CSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<8)
599#define __CSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 8
600#define __CSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<9)
601#define __CSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 9
602#define __CSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<10)
603#define __CSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 10
604#define __CSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<11)
605#define __CSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 11
606#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED_ULP_RX_SE_CF_EN (0x1<<12)
607#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED_ULP_RX_SE_CF_EN_SHIFT 12
608#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED_ULP_RX_INV_CF_EN (0x1<<13)
609#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED_ULP_RX_INV_CF_EN_SHIFT 13
610#define __CSTORM_ISCSI_AG_CONTEXT_PENDING_COMPLETION3_CF (0x3<<14)
611#define __CSTORM_ISCSI_AG_CONTEXT_PENDING_COMPLETION3_CF_SHIFT 14
612#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED66 (0x3<<16)
613#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED66_SHIFT 16
614#define __CSTORM_ISCSI_AG_CONTEXT_FIN_RECEIVED_CF_EN (0x1<<18)
615#define __CSTORM_ISCSI_AG_CONTEXT_FIN_RECEIVED_CF_EN_SHIFT 18
616#define __CSTORM_ISCSI_AG_CONTEXT_PENDING_COMPLETION0_CF_EN (0x1<<19)
617#define __CSTORM_ISCSI_AG_CONTEXT_PENDING_COMPLETION0_CF_EN_SHIFT 19
618#define __CSTORM_ISCSI_AG_CONTEXT_PENDING_COMPLETION1_CF_EN (0x1<<20)
619#define __CSTORM_ISCSI_AG_CONTEXT_PENDING_COMPLETION1_CF_EN_SHIFT 20
620#define __CSTORM_ISCSI_AG_CONTEXT_PENDING_COMPLETION2_CF_EN (0x1<<21)
621#define __CSTORM_ISCSI_AG_CONTEXT_PENDING_COMPLETION2_CF_EN_SHIFT 21
622#define __CSTORM_ISCSI_AG_CONTEXT_PENDING_COMPLETION3_CF_EN (0x1<<22)
623#define __CSTORM_ISCSI_AG_CONTEXT_PENDING_COMPLETION3_CF_EN_SHIFT 22
624#define __CSTORM_ISCSI_AG_CONTEXT_REL_SEQ_RULE (0x7<<23)
625#define __CSTORM_ISCSI_AG_CONTEXT_REL_SEQ_RULE_SHIFT 23
626#define CSTORM_ISCSI_AG_CONTEXT_HQ_PROD_RULE (0x3<<26)
627#define CSTORM_ISCSI_AG_CONTEXT_HQ_PROD_RULE_SHIFT 26
628#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED52 (0x3<<28)
629#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED52_SHIFT 28
630#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED53 (0x3<<30)
631#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED53_SHIFT 30
632#if defined(__BIG_ENDIAN)
633 u8 __aux1_th;
634 u8 __aux1_val;
635 u16 __agg_vars2;
636#elif defined(__LITTLE_ENDIAN)
637 u16 __agg_vars2;
638 u8 __aux1_val;
639 u8 __aux1_th;
640#endif
641 u32 rel_seq;
642 u32 rel_seq_th;
643#if defined(__BIG_ENDIAN)
644 u16 hq_cons;
645 u16 hq_prod;
646#elif defined(__LITTLE_ENDIAN)
647 u16 hq_prod;
648 u16 hq_cons;
649#endif
650#if defined(__BIG_ENDIAN)
651 u8 __reserved62;
652 u8 __reserved61;
653 u8 __reserved60;
654 u8 __reserved59;
655#elif defined(__LITTLE_ENDIAN)
656 u8 __reserved59;
657 u8 __reserved60;
658 u8 __reserved61;
659 u8 __reserved62;
660#endif
661#if defined(__BIG_ENDIAN)
662 u16 __reserved64;
663 u16 __cq_u_prod0;
664#elif defined(__LITTLE_ENDIAN)
665 u16 __cq_u_prod0;
666 u16 __reserved64;
667#endif
668 u32 __cq_u_prod1;
669#if defined(__BIG_ENDIAN)
670 u16 __agg_vars3;
671 u16 __cq_u_prod2;
672#elif defined(__LITTLE_ENDIAN)
673 u16 __cq_u_prod2;
674 u16 __agg_vars3;
675#endif
676#if defined(__BIG_ENDIAN)
677 u16 __aux2_th;
678 u16 __cq_u_prod3;
679#elif defined(__LITTLE_ENDIAN)
680 u16 __cq_u_prod3;
681 u16 __aux2_th;
682#endif
683};
684
685/*
588 * iSCSI context region, used only in iSCSI 686 * iSCSI context region, used only in iSCSI
589 */ 687 */
590struct ustorm_iscsi_rq_db { 688struct ustorm_iscsi_rq_db {
@@ -696,7 +794,7 @@ struct ustorm_iscsi_st_context {
696 struct regpair task_pbl_base; 794 struct regpair task_pbl_base;
697 struct regpair tce_phy_addr; 795 struct regpair tce_phy_addr;
698 struct ustorm_iscsi_placement_db place_db; 796 struct ustorm_iscsi_placement_db place_db;
699 u32 data_rcv_seq; 797 u32 reserved8;
700 u32 rem_rcv_len; 798 u32 rem_rcv_len;
701#if defined(__BIG_ENDIAN) 799#if defined(__BIG_ENDIAN)
702 u16 hdr_itt; 800 u16 hdr_itt;
@@ -713,8 +811,10 @@ struct ustorm_iscsi_st_context {
713#define USTORM_ISCSI_ST_CONTEXT_BMIDDLEOFPDU_SHIFT 0 811#define USTORM_ISCSI_ST_CONTEXT_BMIDDLEOFPDU_SHIFT 0
714#define USTORM_ISCSI_ST_CONTEXT_BFENCECQE (0x1<<1) 812#define USTORM_ISCSI_ST_CONTEXT_BFENCECQE (0x1<<1)
715#define USTORM_ISCSI_ST_CONTEXT_BFENCECQE_SHIFT 1 813#define USTORM_ISCSI_ST_CONTEXT_BFENCECQE_SHIFT 1
716#define USTORM_ISCSI_ST_CONTEXT_RESERVED1 (0x3F<<2) 814#define USTORM_ISCSI_ST_CONTEXT_BRESETCRC (0x1<<2)
717#define USTORM_ISCSI_ST_CONTEXT_RESERVED1_SHIFT 2 815#define USTORM_ISCSI_ST_CONTEXT_BRESETCRC_SHIFT 2
816#define USTORM_ISCSI_ST_CONTEXT_RESERVED1 (0x1F<<3)
817#define USTORM_ISCSI_ST_CONTEXT_RESERVED1_SHIFT 3
718 u8 task_pdu_cache_index; 818 u8 task_pdu_cache_index;
719 u8 task_pbe_cache_index; 819 u8 task_pbe_cache_index;
720#elif defined(__LITTLE_ENDIAN) 820#elif defined(__LITTLE_ENDIAN)
@@ -725,8 +825,10 @@ struct ustorm_iscsi_st_context {
725#define USTORM_ISCSI_ST_CONTEXT_BMIDDLEOFPDU_SHIFT 0 825#define USTORM_ISCSI_ST_CONTEXT_BMIDDLEOFPDU_SHIFT 0
726#define USTORM_ISCSI_ST_CONTEXT_BFENCECQE (0x1<<1) 826#define USTORM_ISCSI_ST_CONTEXT_BFENCECQE (0x1<<1)
727#define USTORM_ISCSI_ST_CONTEXT_BFENCECQE_SHIFT 1 827#define USTORM_ISCSI_ST_CONTEXT_BFENCECQE_SHIFT 1
728#define USTORM_ISCSI_ST_CONTEXT_RESERVED1 (0x3F<<2) 828#define USTORM_ISCSI_ST_CONTEXT_BRESETCRC (0x1<<2)
729#define USTORM_ISCSI_ST_CONTEXT_RESERVED1_SHIFT 2 829#define USTORM_ISCSI_ST_CONTEXT_BRESETCRC_SHIFT 2
830#define USTORM_ISCSI_ST_CONTEXT_RESERVED1 (0x1F<<3)
831#define USTORM_ISCSI_ST_CONTEXT_RESERVED1_SHIFT 3
730 u8 hdr_second_byte_union; 832 u8 hdr_second_byte_union;
731#endif 833#endif
732#if defined(__BIG_ENDIAN) 834#if defined(__BIG_ENDIAN)
@@ -777,14 +879,14 @@ struct ustorm_iscsi_st_context {
777 */ 879 */
778struct tstorm_tcp_st_context_section { 880struct tstorm_tcp_st_context_section {
779 u32 flags1; 881 u32 flags1;
780#define TSTORM_TCP_ST_CONTEXT_SECTION_RTT_SRTT_20B (0xFFFFFF<<0) 882#define TSTORM_TCP_ST_CONTEXT_SECTION_RTT_SRTT (0xFFFFFF<<0)
781#define TSTORM_TCP_ST_CONTEXT_SECTION_RTT_SRTT_20B_SHIFT 0 883#define TSTORM_TCP_ST_CONTEXT_SECTION_RTT_SRTT_SHIFT 0
782#define TSTORM_TCP_ST_CONTEXT_SECTION_PAWS_INVALID (0x1<<24) 884#define TSTORM_TCP_ST_CONTEXT_SECTION_PAWS_INVALID (0x1<<24)
783#define TSTORM_TCP_ST_CONTEXT_SECTION_PAWS_INVALID_SHIFT 24 885#define TSTORM_TCP_ST_CONTEXT_SECTION_PAWS_INVALID_SHIFT 24
784#define TSTORM_TCP_ST_CONTEXT_SECTION_TIMESTAMP_EXISTS (0x1<<25) 886#define TSTORM_TCP_ST_CONTEXT_SECTION_TIMESTAMP_EXISTS (0x1<<25)
785#define TSTORM_TCP_ST_CONTEXT_SECTION_TIMESTAMP_EXISTS_SHIFT 25 887#define TSTORM_TCP_ST_CONTEXT_SECTION_TIMESTAMP_EXISTS_SHIFT 25
786#define TSTORM_TCP_ST_CONTEXT_SECTION_ISLE_EXISTS (0x1<<26) 888#define TSTORM_TCP_ST_CONTEXT_SECTION_RESERVED0 (0x1<<26)
787#define TSTORM_TCP_ST_CONTEXT_SECTION_ISLE_EXISTS_SHIFT 26 889#define TSTORM_TCP_ST_CONTEXT_SECTION_RESERVED0_SHIFT 26
788#define TSTORM_TCP_ST_CONTEXT_SECTION_STOP_RX_PAYLOAD (0x1<<27) 890#define TSTORM_TCP_ST_CONTEXT_SECTION_STOP_RX_PAYLOAD (0x1<<27)
789#define TSTORM_TCP_ST_CONTEXT_SECTION_STOP_RX_PAYLOAD_SHIFT 27 891#define TSTORM_TCP_ST_CONTEXT_SECTION_STOP_RX_PAYLOAD_SHIFT 27
790#define TSTORM_TCP_ST_CONTEXT_SECTION_KA_ENABLED (0x1<<28) 892#define TSTORM_TCP_ST_CONTEXT_SECTION_KA_ENABLED (0x1<<28)
@@ -793,11 +895,11 @@ struct tstorm_tcp_st_context_section {
793#define TSTORM_TCP_ST_CONTEXT_SECTION_FIRST_RTO_ESTIMATE_SHIFT 29 895#define TSTORM_TCP_ST_CONTEXT_SECTION_FIRST_RTO_ESTIMATE_SHIFT 29
794#define TSTORM_TCP_ST_CONTEXT_SECTION_MAX_SEG_RETRANSMIT_EN (0x1<<30) 896#define TSTORM_TCP_ST_CONTEXT_SECTION_MAX_SEG_RETRANSMIT_EN (0x1<<30)
795#define TSTORM_TCP_ST_CONTEXT_SECTION_MAX_SEG_RETRANSMIT_EN_SHIFT 30 897#define TSTORM_TCP_ST_CONTEXT_SECTION_MAX_SEG_RETRANSMIT_EN_SHIFT 30
796#define TSTORM_TCP_ST_CONTEXT_SECTION_RESERVED3 (0x1<<31) 898#define TSTORM_TCP_ST_CONTEXT_SECTION_LAST_ISLE_HAS_FIN (0x1<<31)
797#define TSTORM_TCP_ST_CONTEXT_SECTION_RESERVED3_SHIFT 31 899#define TSTORM_TCP_ST_CONTEXT_SECTION_LAST_ISLE_HAS_FIN_SHIFT 31
798 u32 flags2; 900 u32 flags2;
799#define TSTORM_TCP_ST_CONTEXT_SECTION_RTT_VARIATION_20B (0xFFFFFF<<0) 901#define TSTORM_TCP_ST_CONTEXT_SECTION_RTT_VARIATION (0xFFFFFF<<0)
800#define TSTORM_TCP_ST_CONTEXT_SECTION_RTT_VARIATION_20B_SHIFT 0 902#define TSTORM_TCP_ST_CONTEXT_SECTION_RTT_VARIATION_SHIFT 0
801#define TSTORM_TCP_ST_CONTEXT_SECTION_DA_EN (0x1<<24) 903#define TSTORM_TCP_ST_CONTEXT_SECTION_DA_EN (0x1<<24)
802#define TSTORM_TCP_ST_CONTEXT_SECTION_DA_EN_SHIFT 24 904#define TSTORM_TCP_ST_CONTEXT_SECTION_DA_EN_SHIFT 24
803#define TSTORM_TCP_ST_CONTEXT_SECTION_DA_COUNTER_EN (0x1<<25) 905#define TSTORM_TCP_ST_CONTEXT_SECTION_DA_COUNTER_EN (0x1<<25)
@@ -810,18 +912,18 @@ struct tstorm_tcp_st_context_section {
810#define TSTORM_TCP_ST_CONTEXT_SECTION_UPDATE_L2_STATSTICS_SHIFT 28 912#define TSTORM_TCP_ST_CONTEXT_SECTION_UPDATE_L2_STATSTICS_SHIFT 28
811#define TSTORM_TCP_ST_CONTEXT_SECTION_UPDATE_L4_STATSTICS (0x1<<29) 913#define TSTORM_TCP_ST_CONTEXT_SECTION_UPDATE_L4_STATSTICS (0x1<<29)
812#define TSTORM_TCP_ST_CONTEXT_SECTION_UPDATE_L4_STATSTICS_SHIFT 29 914#define TSTORM_TCP_ST_CONTEXT_SECTION_UPDATE_L4_STATSTICS_SHIFT 29
813#define __TSTORM_TCP_ST_CONTEXT_SECTION_SECOND_ISLE_DROPPED (0x1<<30) 915#define __TSTORM_TCP_ST_CONTEXT_SECTION_IN_WINDOW_RST_ATTACK (0x1<<30)
814#define __TSTORM_TCP_ST_CONTEXT_SECTION_SECOND_ISLE_DROPPED_SHIFT 30 916#define __TSTORM_TCP_ST_CONTEXT_SECTION_IN_WINDOW_RST_ATTACK_SHIFT 30
815#define __TSTORM_TCP_ST_CONTEXT_SECTION_DONT_SUPPORT_OOO (0x1<<31) 917#define __TSTORM_TCP_ST_CONTEXT_SECTION_IN_WINDOW_SYN_ATTACK (0x1<<31)
816#define __TSTORM_TCP_ST_CONTEXT_SECTION_DONT_SUPPORT_OOO_SHIFT 31 918#define __TSTORM_TCP_ST_CONTEXT_SECTION_IN_WINDOW_SYN_ATTACK_SHIFT 31
817#if defined(__BIG_ENDIAN) 919#if defined(__BIG_ENDIAN)
818 u16 reserved_slowpath; 920 u16 mss;
819 u8 tcp_sm_state_3b; 921 u8 tcp_sm_state;
820 u8 rto_exp_3b; 922 u8 rto_exp;
821#elif defined(__LITTLE_ENDIAN) 923#elif defined(__LITTLE_ENDIAN)
822 u8 rto_exp_3b; 924 u8 rto_exp;
823 u8 tcp_sm_state_3b; 925 u8 tcp_sm_state;
824 u16 reserved_slowpath; 926 u16 mss;
825#endif 927#endif
826 u32 rcv_nxt; 928 u32 rcv_nxt;
827 u32 timestamp_recent; 929 u32 timestamp_recent;
@@ -846,11 +948,11 @@ struct tstorm_tcp_st_context_section {
846#if defined(__BIG_ENDIAN) 948#if defined(__BIG_ENDIAN)
847 u8 statistics_counter_id; 949 u8 statistics_counter_id;
848 u8 ooo_support_mode; 950 u8 ooo_support_mode;
849 u8 snd_wnd_scale_4b; 951 u8 snd_wnd_scale;
850 u8 dup_ack_count; 952 u8 dup_ack_count;
851#elif defined(__LITTLE_ENDIAN) 953#elif defined(__LITTLE_ENDIAN)
852 u8 dup_ack_count; 954 u8 dup_ack_count;
853 u8 snd_wnd_scale_4b; 955 u8 snd_wnd_scale;
854 u8 ooo_support_mode; 956 u8 ooo_support_mode;
855 u8 statistics_counter_id; 957 u8 statistics_counter_id;
856#endif 958#endif
@@ -860,13 +962,21 @@ struct tstorm_tcp_st_context_section {
860 u32 isle_start_seq; 962 u32 isle_start_seq;
861 u32 isle_end_seq; 963 u32 isle_end_seq;
862#if defined(__BIG_ENDIAN) 964#if defined(__BIG_ENDIAN)
863 u16 mss; 965 u16 second_isle_address;
864 u16 recent_seg_wnd; 966 u16 recent_seg_wnd;
865#elif defined(__LITTLE_ENDIAN) 967#elif defined(__LITTLE_ENDIAN)
866 u16 recent_seg_wnd; 968 u16 recent_seg_wnd;
867 u16 mss; 969 u16 second_isle_address;
970#endif
971#if defined(__BIG_ENDIAN)
972 u8 max_isles_ever_happened;
973 u8 isles_number;
974 u16 last_isle_address;
975#elif defined(__LITTLE_ENDIAN)
976 u16 last_isle_address;
977 u8 isles_number;
978 u8 max_isles_ever_happened;
868#endif 979#endif
869 u32 reserved4;
870 u32 max_rt_time; 980 u32 max_rt_time;
871#if defined(__BIG_ENDIAN) 981#if defined(__BIG_ENDIAN)
872 u16 lsb_mac_address; 982 u16 lsb_mac_address;
@@ -876,7 +986,7 @@ struct tstorm_tcp_st_context_section {
876 u16 lsb_mac_address; 986 u16 lsb_mac_address;
877#endif 987#endif
878 u32 msb_mac_address; 988 u32 msb_mac_address;
879 u32 reserved2; 989 u32 rightmost_received_seq;
880}; 990};
881 991
882/* 992/*
@@ -951,7 +1061,7 @@ struct tstorm_iscsi_st_context_section {
951 u8 scratchpad_idx; 1061 u8 scratchpad_idx;
952 struct iscsi_term_vars term_vars; 1062 struct iscsi_term_vars term_vars;
953#endif 1063#endif
954 u32 reserved2; 1064 u32 process_nxt;
955}; 1065};
956 1066
957/* 1067/*
@@ -1174,24 +1284,12 @@ struct xstorm_iscsi_ag_context {
1174#endif 1284#endif
1175#if defined(__BIG_ENDIAN) 1285#if defined(__BIG_ENDIAN)
1176 u8 cdu_reserved; 1286 u8 cdu_reserved;
1177 u8 agg_vars4; 1287 u8 __agg_vars4;
1178#define XSTORM_ISCSI_AG_CONTEXT_R2TQ_PROD_CF (0x3<<0)
1179#define XSTORM_ISCSI_AG_CONTEXT_R2TQ_PROD_CF_SHIFT 0
1180#define __XSTORM_ISCSI_AG_CONTEXT_AUX21_CF (0x3<<2)
1181#define __XSTORM_ISCSI_AG_CONTEXT_AUX21_CF_SHIFT 2
1182#define __XSTORM_ISCSI_AG_CONTEXT_AUX18_CF_EN (0x1<<4)
1183#define __XSTORM_ISCSI_AG_CONTEXT_AUX18_CF_EN_SHIFT 4
1184#define __XSTORM_ISCSI_AG_CONTEXT_AUX19_CF_EN (0x1<<5)
1185#define __XSTORM_ISCSI_AG_CONTEXT_AUX19_CF_EN_SHIFT 5
1186#define __XSTORM_ISCSI_AG_CONTEXT_R2TQ_PROD_CF_EN (0x1<<6)
1187#define __XSTORM_ISCSI_AG_CONTEXT_R2TQ_PROD_CF_EN_SHIFT 6
1188#define __XSTORM_ISCSI_AG_CONTEXT_AUX21_CF_EN (0x1<<7)
1189#define __XSTORM_ISCSI_AG_CONTEXT_AUX21_CF_EN_SHIFT 7
1190 u8 agg_vars3; 1288 u8 agg_vars3;
1191#define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM2 (0x3F<<0) 1289#define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM2 (0x3F<<0)
1192#define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM2_SHIFT 0 1290#define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM2_SHIFT 0
1193#define __XSTORM_ISCSI_AG_CONTEXT_AUX19_CF (0x3<<6) 1291#define __XSTORM_ISCSI_AG_CONTEXT_RX_TS_EN_CF (0x3<<6)
1194#define __XSTORM_ISCSI_AG_CONTEXT_AUX19_CF_SHIFT 6 1292#define __XSTORM_ISCSI_AG_CONTEXT_RX_TS_EN_CF_SHIFT 6
1195 u8 agg_vars2; 1293 u8 agg_vars2;
1196#define __XSTORM_ISCSI_AG_CONTEXT_DQ_CF (0x3<<0) 1294#define __XSTORM_ISCSI_AG_CONTEXT_DQ_CF (0x3<<0)
1197#define __XSTORM_ISCSI_AG_CONTEXT_DQ_CF_SHIFT 0 1295#define __XSTORM_ISCSI_AG_CONTEXT_DQ_CF_SHIFT 0
@@ -1222,21 +1320,9 @@ struct xstorm_iscsi_ag_context {
1222 u8 agg_vars3; 1320 u8 agg_vars3;
1223#define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM2 (0x3F<<0) 1321#define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM2 (0x3F<<0)
1224#define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM2_SHIFT 0 1322#define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM2_SHIFT 0
1225#define __XSTORM_ISCSI_AG_CONTEXT_AUX19_CF (0x3<<6) 1323#define __XSTORM_ISCSI_AG_CONTEXT_RX_TS_EN_CF (0x3<<6)
1226#define __XSTORM_ISCSI_AG_CONTEXT_AUX19_CF_SHIFT 6 1324#define __XSTORM_ISCSI_AG_CONTEXT_RX_TS_EN_CF_SHIFT 6
1227 u8 agg_vars4; 1325 u8 __agg_vars4;
1228#define XSTORM_ISCSI_AG_CONTEXT_R2TQ_PROD_CF (0x3<<0)
1229#define XSTORM_ISCSI_AG_CONTEXT_R2TQ_PROD_CF_SHIFT 0
1230#define __XSTORM_ISCSI_AG_CONTEXT_AUX21_CF (0x3<<2)
1231#define __XSTORM_ISCSI_AG_CONTEXT_AUX21_CF_SHIFT 2
1232#define __XSTORM_ISCSI_AG_CONTEXT_AUX18_CF_EN (0x1<<4)
1233#define __XSTORM_ISCSI_AG_CONTEXT_AUX18_CF_EN_SHIFT 4
1234#define __XSTORM_ISCSI_AG_CONTEXT_AUX19_CF_EN (0x1<<5)
1235#define __XSTORM_ISCSI_AG_CONTEXT_AUX19_CF_EN_SHIFT 5
1236#define __XSTORM_ISCSI_AG_CONTEXT_R2TQ_PROD_CF_EN (0x1<<6)
1237#define __XSTORM_ISCSI_AG_CONTEXT_R2TQ_PROD_CF_EN_SHIFT 6
1238#define __XSTORM_ISCSI_AG_CONTEXT_AUX21_CF_EN (0x1<<7)
1239#define __XSTORM_ISCSI_AG_CONTEXT_AUX21_CF_EN_SHIFT 7
1240 u8 cdu_reserved; 1326 u8 cdu_reserved;
1241#endif 1327#endif
1242 u32 more_to_send; 1328 u32 more_to_send;
@@ -1270,8 +1356,8 @@ struct xstorm_iscsi_ag_context {
1270#define __XSTORM_ISCSI_AG_CONTEXT_AGG_VAL11_DECISION_RULE_SHIFT 0 1356#define __XSTORM_ISCSI_AG_CONTEXT_AGG_VAL11_DECISION_RULE_SHIFT 0
1271#define __XSTORM_ISCSI_AG_CONTEXT_AUX13_FLAG (0x1<<3) 1357#define __XSTORM_ISCSI_AG_CONTEXT_AUX13_FLAG (0x1<<3)
1272#define __XSTORM_ISCSI_AG_CONTEXT_AUX13_FLAG_SHIFT 3 1358#define __XSTORM_ISCSI_AG_CONTEXT_AUX13_FLAG_SHIFT 3
1273#define XSTORM_ISCSI_AG_CONTEXT_AUX18_CF (0x3<<4) 1359#define __XSTORM_ISCSI_AG_CONTEXT_STORMS_SYNC_CF (0x3<<4)
1274#define XSTORM_ISCSI_AG_CONTEXT_AUX18_CF_SHIFT 4 1360#define __XSTORM_ISCSI_AG_CONTEXT_STORMS_SYNC_CF_SHIFT 4
1275#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE3 (0x3<<6) 1361#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE3 (0x3<<6)
1276#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE3_SHIFT 6 1362#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE3_SHIFT 6
1277#define XSTORM_ISCSI_AG_CONTEXT_AUX1_CF (0x3<<8) 1363#define XSTORM_ISCSI_AG_CONTEXT_AUX1_CF (0x3<<8)
@@ -1286,8 +1372,8 @@ struct xstorm_iscsi_ag_context {
1286#define __XSTORM_ISCSI_AG_CONTEXT_AUX11_FLAG_SHIFT 13 1372#define __XSTORM_ISCSI_AG_CONTEXT_AUX11_FLAG_SHIFT 13
1287#define __XSTORM_ISCSI_AG_CONTEXT_AUX12_FLAG (0x1<<14) 1373#define __XSTORM_ISCSI_AG_CONTEXT_AUX12_FLAG (0x1<<14)
1288#define __XSTORM_ISCSI_AG_CONTEXT_AUX12_FLAG_SHIFT 14 1374#define __XSTORM_ISCSI_AG_CONTEXT_AUX12_FLAG_SHIFT 14
1289#define __XSTORM_ISCSI_AG_CONTEXT_AUX2_FLAG (0x1<<15) 1375#define __XSTORM_ISCSI_AG_CONTEXT_RX_WND_SCL_EN (0x1<<15)
1290#define __XSTORM_ISCSI_AG_CONTEXT_AUX2_FLAG_SHIFT 15 1376#define __XSTORM_ISCSI_AG_CONTEXT_RX_WND_SCL_EN_SHIFT 15
1291 u8 agg_val3_th; 1377 u8 agg_val3_th;
1292 u8 agg_vars6; 1378 u8 agg_vars6;
1293#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE6 (0x7<<0) 1379#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE6 (0x7<<0)
@@ -1310,8 +1396,8 @@ struct xstorm_iscsi_ag_context {
1310#define __XSTORM_ISCSI_AG_CONTEXT_AGG_VAL11_DECISION_RULE_SHIFT 0 1396#define __XSTORM_ISCSI_AG_CONTEXT_AGG_VAL11_DECISION_RULE_SHIFT 0
1311#define __XSTORM_ISCSI_AG_CONTEXT_AUX13_FLAG (0x1<<3) 1397#define __XSTORM_ISCSI_AG_CONTEXT_AUX13_FLAG (0x1<<3)
1312#define __XSTORM_ISCSI_AG_CONTEXT_AUX13_FLAG_SHIFT 3 1398#define __XSTORM_ISCSI_AG_CONTEXT_AUX13_FLAG_SHIFT 3
1313#define XSTORM_ISCSI_AG_CONTEXT_AUX18_CF (0x3<<4) 1399#define __XSTORM_ISCSI_AG_CONTEXT_STORMS_SYNC_CF (0x3<<4)
1314#define XSTORM_ISCSI_AG_CONTEXT_AUX18_CF_SHIFT 4 1400#define __XSTORM_ISCSI_AG_CONTEXT_STORMS_SYNC_CF_SHIFT 4
1315#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE3 (0x3<<6) 1401#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE3 (0x3<<6)
1316#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE3_SHIFT 6 1402#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE3_SHIFT 6
1317#define XSTORM_ISCSI_AG_CONTEXT_AUX1_CF (0x3<<8) 1403#define XSTORM_ISCSI_AG_CONTEXT_AUX1_CF (0x3<<8)
@@ -1326,14 +1412,14 @@ struct xstorm_iscsi_ag_context {
1326#define __XSTORM_ISCSI_AG_CONTEXT_AUX11_FLAG_SHIFT 13 1412#define __XSTORM_ISCSI_AG_CONTEXT_AUX11_FLAG_SHIFT 13
1327#define __XSTORM_ISCSI_AG_CONTEXT_AUX12_FLAG (0x1<<14) 1413#define __XSTORM_ISCSI_AG_CONTEXT_AUX12_FLAG (0x1<<14)
1328#define __XSTORM_ISCSI_AG_CONTEXT_AUX12_FLAG_SHIFT 14 1414#define __XSTORM_ISCSI_AG_CONTEXT_AUX12_FLAG_SHIFT 14
1329#define __XSTORM_ISCSI_AG_CONTEXT_AUX2_FLAG (0x1<<15) 1415#define __XSTORM_ISCSI_AG_CONTEXT_RX_WND_SCL_EN (0x1<<15)
1330#define __XSTORM_ISCSI_AG_CONTEXT_AUX2_FLAG_SHIFT 15 1416#define __XSTORM_ISCSI_AG_CONTEXT_RX_WND_SCL_EN_SHIFT 15
1331#endif 1417#endif
1332#if defined(__BIG_ENDIAN) 1418#if defined(__BIG_ENDIAN)
1333 u16 __agg_val11_th; 1419 u16 __agg_val11_th;
1334 u16 __agg_val11; 1420 u16 __gen_data;
1335#elif defined(__LITTLE_ENDIAN) 1421#elif defined(__LITTLE_ENDIAN)
1336 u16 __agg_val11; 1422 u16 __gen_data;
1337 u16 __agg_val11_th; 1423 u16 __agg_val11_th;
1338#endif 1424#endif
1339#if defined(__BIG_ENDIAN) 1425#if defined(__BIG_ENDIAN)
@@ -1384,7 +1470,7 @@ struct xstorm_iscsi_ag_context {
1384#endif 1470#endif
1385 u32 hq_cons_tcp_seq; 1471 u32 hq_cons_tcp_seq;
1386 u32 exp_stat_sn; 1472 u32 exp_stat_sn;
1387 u32 agg_misc5; 1473 u32 rst_seq_num;
1388}; 1474};
1389 1475
1390/* 1476/*
@@ -1478,12 +1564,12 @@ struct tstorm_iscsi_ag_context {
1478#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2 1564#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2
1479#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3) 1565#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3)
1480#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3 1566#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3
1481#define __TSTORM_ISCSI_AG_CONTEXT_AUX3_CF (0x3<<4) 1567#define __TSTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF (0x3<<4)
1482#define __TSTORM_ISCSI_AG_CONTEXT_AUX3_CF_SHIFT 4 1568#define __TSTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF_SHIFT 4
1483#define __TSTORM_ISCSI_AG_CONTEXT_AUX3_FLAG (0x1<<6) 1569#define __TSTORM_ISCSI_AG_CONTEXT_AUX3_FLAG (0x1<<6)
1484#define __TSTORM_ISCSI_AG_CONTEXT_AUX3_FLAG_SHIFT 6 1570#define __TSTORM_ISCSI_AG_CONTEXT_AUX3_FLAG_SHIFT 6
1485#define __TSTORM_ISCSI_AG_CONTEXT_AUX4_FLAG (0x1<<7) 1571#define __TSTORM_ISCSI_AG_CONTEXT_ACK_ON_FIN_SENT_FLAG (0x1<<7)
1486#define __TSTORM_ISCSI_AG_CONTEXT_AUX4_FLAG_SHIFT 7 1572#define __TSTORM_ISCSI_AG_CONTEXT_ACK_ON_FIN_SENT_FLAG_SHIFT 7
1487 u8 state; 1573 u8 state;
1488#elif defined(__LITTLE_ENDIAN) 1574#elif defined(__LITTLE_ENDIAN)
1489 u8 state; 1575 u8 state;
@@ -1496,63 +1582,63 @@ struct tstorm_iscsi_ag_context {
1496#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2 1582#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2
1497#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3) 1583#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3)
1498#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3 1584#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3
1499#define __TSTORM_ISCSI_AG_CONTEXT_AUX3_CF (0x3<<4) 1585#define __TSTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF (0x3<<4)
1500#define __TSTORM_ISCSI_AG_CONTEXT_AUX3_CF_SHIFT 4 1586#define __TSTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF_SHIFT 4
1501#define __TSTORM_ISCSI_AG_CONTEXT_AUX3_FLAG (0x1<<6) 1587#define __TSTORM_ISCSI_AG_CONTEXT_AUX3_FLAG (0x1<<6)
1502#define __TSTORM_ISCSI_AG_CONTEXT_AUX3_FLAG_SHIFT 6 1588#define __TSTORM_ISCSI_AG_CONTEXT_AUX3_FLAG_SHIFT 6
1503#define __TSTORM_ISCSI_AG_CONTEXT_AUX4_FLAG (0x1<<7) 1589#define __TSTORM_ISCSI_AG_CONTEXT_ACK_ON_FIN_SENT_FLAG (0x1<<7)
1504#define __TSTORM_ISCSI_AG_CONTEXT_AUX4_FLAG_SHIFT 7 1590#define __TSTORM_ISCSI_AG_CONTEXT_ACK_ON_FIN_SENT_FLAG_SHIFT 7
1505 u16 ulp_credit; 1591 u16 ulp_credit;
1506#endif 1592#endif
1507#if defined(__BIG_ENDIAN) 1593#if defined(__BIG_ENDIAN)
1508 u16 __agg_val4; 1594 u16 __agg_val4;
1509 u16 agg_vars2; 1595 u16 agg_vars2;
1510#define __TSTORM_ISCSI_AG_CONTEXT_AUX5_FLAG (0x1<<0) 1596#define __TSTORM_ISCSI_AG_CONTEXT_MSL_TIMER_SET_FLAG (0x1<<0)
1511#define __TSTORM_ISCSI_AG_CONTEXT_AUX5_FLAG_SHIFT 0 1597#define __TSTORM_ISCSI_AG_CONTEXT_MSL_TIMER_SET_FLAG_SHIFT 0
1512#define __TSTORM_ISCSI_AG_CONTEXT_AUX6_FLAG (0x1<<1) 1598#define __TSTORM_ISCSI_AG_CONTEXT_FIN_SENT_FIRST_FLAG (0x1<<1)
1513#define __TSTORM_ISCSI_AG_CONTEXT_AUX6_FLAG_SHIFT 1 1599#define __TSTORM_ISCSI_AG_CONTEXT_FIN_SENT_FIRST_FLAG_SHIFT 1
1514#define __TSTORM_ISCSI_AG_CONTEXT_AUX4_CF (0x3<<2) 1600#define __TSTORM_ISCSI_AG_CONTEXT_RST_SENT_CF (0x3<<2)
1515#define __TSTORM_ISCSI_AG_CONTEXT_AUX4_CF_SHIFT 2 1601#define __TSTORM_ISCSI_AG_CONTEXT_RST_SENT_CF_SHIFT 2
1516#define __TSTORM_ISCSI_AG_CONTEXT_AUX5_CF (0x3<<4) 1602#define __TSTORM_ISCSI_AG_CONTEXT_WAKEUP_CALL_CF (0x3<<4)
1517#define __TSTORM_ISCSI_AG_CONTEXT_AUX5_CF_SHIFT 4 1603#define __TSTORM_ISCSI_AG_CONTEXT_WAKEUP_CALL_CF_SHIFT 4
1518#define __TSTORM_ISCSI_AG_CONTEXT_AUX6_CF (0x3<<6) 1604#define __TSTORM_ISCSI_AG_CONTEXT_AUX6_CF (0x3<<6)
1519#define __TSTORM_ISCSI_AG_CONTEXT_AUX6_CF_SHIFT 6 1605#define __TSTORM_ISCSI_AG_CONTEXT_AUX6_CF_SHIFT 6
1520#define __TSTORM_ISCSI_AG_CONTEXT_AUX7_CF (0x3<<8) 1606#define __TSTORM_ISCSI_AG_CONTEXT_AUX7_CF (0x3<<8)
1521#define __TSTORM_ISCSI_AG_CONTEXT_AUX7_CF_SHIFT 8 1607#define __TSTORM_ISCSI_AG_CONTEXT_AUX7_CF_SHIFT 8
1522#define __TSTORM_ISCSI_AG_CONTEXT_AUX7_FLAG (0x1<<10) 1608#define __TSTORM_ISCSI_AG_CONTEXT_AUX7_FLAG (0x1<<10)
1523#define __TSTORM_ISCSI_AG_CONTEXT_AUX7_FLAG_SHIFT 10 1609#define __TSTORM_ISCSI_AG_CONTEXT_AUX7_FLAG_SHIFT 10
1524#define TSTORM_ISCSI_AG_CONTEXT_AUX3_CF_EN (0x1<<11) 1610#define __TSTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF_EN (0x1<<11)
1525#define TSTORM_ISCSI_AG_CONTEXT_AUX3_CF_EN_SHIFT 11 1611#define __TSTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF_EN_SHIFT 11
1526#define TSTORM_ISCSI_AG_CONTEXT_AUX4_CF_EN (0x1<<12) 1612#define __TSTORM_ISCSI_AG_CONTEXT_RST_SENT_CF_EN (0x1<<12)
1527#define TSTORM_ISCSI_AG_CONTEXT_AUX4_CF_EN_SHIFT 12 1613#define __TSTORM_ISCSI_AG_CONTEXT_RST_SENT_CF_EN_SHIFT 12
1528#define TSTORM_ISCSI_AG_CONTEXT_AUX5_CF_EN (0x1<<13) 1614#define __TSTORM_ISCSI_AG_CONTEXT_WAKEUP_CALL_CF_EN (0x1<<13)
1529#define TSTORM_ISCSI_AG_CONTEXT_AUX5_CF_EN_SHIFT 13 1615#define __TSTORM_ISCSI_AG_CONTEXT_WAKEUP_CALL_CF_EN_SHIFT 13
1530#define TSTORM_ISCSI_AG_CONTEXT_AUX6_CF_EN (0x1<<14) 1616#define TSTORM_ISCSI_AG_CONTEXT_AUX6_CF_EN (0x1<<14)
1531#define TSTORM_ISCSI_AG_CONTEXT_AUX6_CF_EN_SHIFT 14 1617#define TSTORM_ISCSI_AG_CONTEXT_AUX6_CF_EN_SHIFT 14
1532#define TSTORM_ISCSI_AG_CONTEXT_AUX7_CF_EN (0x1<<15) 1618#define TSTORM_ISCSI_AG_CONTEXT_AUX7_CF_EN (0x1<<15)
1533#define TSTORM_ISCSI_AG_CONTEXT_AUX7_CF_EN_SHIFT 15 1619#define TSTORM_ISCSI_AG_CONTEXT_AUX7_CF_EN_SHIFT 15
1534#elif defined(__LITTLE_ENDIAN) 1620#elif defined(__LITTLE_ENDIAN)
1535 u16 agg_vars2; 1621 u16 agg_vars2;
1536#define __TSTORM_ISCSI_AG_CONTEXT_AUX5_FLAG (0x1<<0) 1622#define __TSTORM_ISCSI_AG_CONTEXT_MSL_TIMER_SET_FLAG (0x1<<0)
1537#define __TSTORM_ISCSI_AG_CONTEXT_AUX5_FLAG_SHIFT 0 1623#define __TSTORM_ISCSI_AG_CONTEXT_MSL_TIMER_SET_FLAG_SHIFT 0
1538#define __TSTORM_ISCSI_AG_CONTEXT_AUX6_FLAG (0x1<<1) 1624#define __TSTORM_ISCSI_AG_CONTEXT_FIN_SENT_FIRST_FLAG (0x1<<1)
1539#define __TSTORM_ISCSI_AG_CONTEXT_AUX6_FLAG_SHIFT 1 1625#define __TSTORM_ISCSI_AG_CONTEXT_FIN_SENT_FIRST_FLAG_SHIFT 1
1540#define __TSTORM_ISCSI_AG_CONTEXT_AUX4_CF (0x3<<2) 1626#define __TSTORM_ISCSI_AG_CONTEXT_RST_SENT_CF (0x3<<2)
1541#define __TSTORM_ISCSI_AG_CONTEXT_AUX4_CF_SHIFT 2 1627#define __TSTORM_ISCSI_AG_CONTEXT_RST_SENT_CF_SHIFT 2
1542#define __TSTORM_ISCSI_AG_CONTEXT_AUX5_CF (0x3<<4) 1628#define __TSTORM_ISCSI_AG_CONTEXT_WAKEUP_CALL_CF (0x3<<4)
1543#define __TSTORM_ISCSI_AG_CONTEXT_AUX5_CF_SHIFT 4 1629#define __TSTORM_ISCSI_AG_CONTEXT_WAKEUP_CALL_CF_SHIFT 4
1544#define __TSTORM_ISCSI_AG_CONTEXT_AUX6_CF (0x3<<6) 1630#define __TSTORM_ISCSI_AG_CONTEXT_AUX6_CF (0x3<<6)
1545#define __TSTORM_ISCSI_AG_CONTEXT_AUX6_CF_SHIFT 6 1631#define __TSTORM_ISCSI_AG_CONTEXT_AUX6_CF_SHIFT 6
1546#define __TSTORM_ISCSI_AG_CONTEXT_AUX7_CF (0x3<<8) 1632#define __TSTORM_ISCSI_AG_CONTEXT_AUX7_CF (0x3<<8)
1547#define __TSTORM_ISCSI_AG_CONTEXT_AUX7_CF_SHIFT 8 1633#define __TSTORM_ISCSI_AG_CONTEXT_AUX7_CF_SHIFT 8
1548#define __TSTORM_ISCSI_AG_CONTEXT_AUX7_FLAG (0x1<<10) 1634#define __TSTORM_ISCSI_AG_CONTEXT_AUX7_FLAG (0x1<<10)
1549#define __TSTORM_ISCSI_AG_CONTEXT_AUX7_FLAG_SHIFT 10 1635#define __TSTORM_ISCSI_AG_CONTEXT_AUX7_FLAG_SHIFT 10
1550#define TSTORM_ISCSI_AG_CONTEXT_AUX3_CF_EN (0x1<<11) 1636#define __TSTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF_EN (0x1<<11)
1551#define TSTORM_ISCSI_AG_CONTEXT_AUX3_CF_EN_SHIFT 11 1637#define __TSTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF_EN_SHIFT 11
1552#define TSTORM_ISCSI_AG_CONTEXT_AUX4_CF_EN (0x1<<12) 1638#define __TSTORM_ISCSI_AG_CONTEXT_RST_SENT_CF_EN (0x1<<12)
1553#define TSTORM_ISCSI_AG_CONTEXT_AUX4_CF_EN_SHIFT 12 1639#define __TSTORM_ISCSI_AG_CONTEXT_RST_SENT_CF_EN_SHIFT 12
1554#define TSTORM_ISCSI_AG_CONTEXT_AUX5_CF_EN (0x1<<13) 1640#define __TSTORM_ISCSI_AG_CONTEXT_WAKEUP_CALL_CF_EN (0x1<<13)
1555#define TSTORM_ISCSI_AG_CONTEXT_AUX5_CF_EN_SHIFT 13 1641#define __TSTORM_ISCSI_AG_CONTEXT_WAKEUP_CALL_CF_EN_SHIFT 13
1556#define TSTORM_ISCSI_AG_CONTEXT_AUX6_CF_EN (0x1<<14) 1642#define TSTORM_ISCSI_AG_CONTEXT_AUX6_CF_EN (0x1<<14)
1557#define TSTORM_ISCSI_AG_CONTEXT_AUX6_CF_EN_SHIFT 14 1643#define TSTORM_ISCSI_AG_CONTEXT_AUX6_CF_EN_SHIFT 14
1558#define TSTORM_ISCSI_AG_CONTEXT_AUX7_CF_EN (0x1<<15) 1644#define TSTORM_ISCSI_AG_CONTEXT_AUX7_CF_EN (0x1<<15)
@@ -1563,100 +1649,6 @@ struct tstorm_iscsi_ag_context {
1563}; 1649};
1564 1650
1565/* 1651/*
1566 * The iscsi aggregative context of Cstorm
1567 */
1568struct cstorm_iscsi_ag_context {
1569 u32 agg_vars1;
1570#define CSTORM_ISCSI_AG_CONTEXT_STATE (0xFF<<0)
1571#define CSTORM_ISCSI_AG_CONTEXT_STATE_SHIFT 0
1572#define __CSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<8)
1573#define __CSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 8
1574#define __CSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<9)
1575#define __CSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 9
1576#define __CSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<10)
1577#define __CSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 10
1578#define __CSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<11)
1579#define __CSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 11
1580#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED_ULP_RX_SE_CF_EN (0x1<<12)
1581#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED_ULP_RX_SE_CF_EN_SHIFT 12
1582#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED_ULP_RX_INV_CF_EN (0x1<<13)
1583#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED_ULP_RX_INV_CF_EN_SHIFT 13
1584#define __CSTORM_ISCSI_AG_CONTEXT_PENDING_COMPLETION3_CF (0x3<<14)
1585#define __CSTORM_ISCSI_AG_CONTEXT_PENDING_COMPLETION3_CF_SHIFT 14
1586#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED66 (0x3<<16)
1587#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED66_SHIFT 16
1588#define __CSTORM_ISCSI_AG_CONTEXT_FIN_RECEIVED_CF_EN (0x1<<18)
1589#define __CSTORM_ISCSI_AG_CONTEXT_FIN_RECEIVED_CF_EN_SHIFT 18
1590#define __CSTORM_ISCSI_AG_CONTEXT_PENDING_COMPLETION0_CF_EN (0x1<<19)
1591#define __CSTORM_ISCSI_AG_CONTEXT_PENDING_COMPLETION0_CF_EN_SHIFT 19
1592#define __CSTORM_ISCSI_AG_CONTEXT_PENDING_COMPLETION1_CF_EN (0x1<<20)
1593#define __CSTORM_ISCSI_AG_CONTEXT_PENDING_COMPLETION1_CF_EN_SHIFT 20
1594#define __CSTORM_ISCSI_AG_CONTEXT_PENDING_COMPLETION2_CF_EN (0x1<<21)
1595#define __CSTORM_ISCSI_AG_CONTEXT_PENDING_COMPLETION2_CF_EN_SHIFT 21
1596#define __CSTORM_ISCSI_AG_CONTEXT_PENDING_COMPLETION3_CF_EN (0x1<<22)
1597#define __CSTORM_ISCSI_AG_CONTEXT_PENDING_COMPLETION3_CF_EN_SHIFT 22
1598#define __CSTORM_ISCSI_AG_CONTEXT_REL_SEQ_RULE (0x7<<23)
1599#define __CSTORM_ISCSI_AG_CONTEXT_REL_SEQ_RULE_SHIFT 23
1600#define CSTORM_ISCSI_AG_CONTEXT_HQ_PROD_RULE (0x3<<26)
1601#define CSTORM_ISCSI_AG_CONTEXT_HQ_PROD_RULE_SHIFT 26
1602#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED52 (0x3<<28)
1603#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED52_SHIFT 28
1604#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED53 (0x3<<30)
1605#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED53_SHIFT 30
1606#if defined(__BIG_ENDIAN)
1607 u8 __aux1_th;
1608 u8 __aux1_val;
1609 u16 __agg_vars2;
1610#elif defined(__LITTLE_ENDIAN)
1611 u16 __agg_vars2;
1612 u8 __aux1_val;
1613 u8 __aux1_th;
1614#endif
1615 u32 rel_seq;
1616 u32 rel_seq_th;
1617#if defined(__BIG_ENDIAN)
1618 u16 hq_cons;
1619 u16 hq_prod;
1620#elif defined(__LITTLE_ENDIAN)
1621 u16 hq_prod;
1622 u16 hq_cons;
1623#endif
1624#if defined(__BIG_ENDIAN)
1625 u8 __reserved62;
1626 u8 __reserved61;
1627 u8 __reserved60;
1628 u8 __reserved59;
1629#elif defined(__LITTLE_ENDIAN)
1630 u8 __reserved59;
1631 u8 __reserved60;
1632 u8 __reserved61;
1633 u8 __reserved62;
1634#endif
1635#if defined(__BIG_ENDIAN)
1636 u16 __reserved64;
1637 u16 __cq_u_prod0;
1638#elif defined(__LITTLE_ENDIAN)
1639 u16 __cq_u_prod0;
1640 u16 __reserved64;
1641#endif
1642 u32 __cq_u_prod1;
1643#if defined(__BIG_ENDIAN)
1644 u16 __agg_vars3;
1645 u16 __cq_u_prod2;
1646#elif defined(__LITTLE_ENDIAN)
1647 u16 __cq_u_prod2;
1648 u16 __agg_vars3;
1649#endif
1650#if defined(__BIG_ENDIAN)
1651 u16 __aux2_th;
1652 u16 __cq_u_prod3;
1653#elif defined(__LITTLE_ENDIAN)
1654 u16 __cq_u_prod3;
1655 u16 __aux2_th;
1656#endif
1657};
1658
1659/*
1660 * The iscsi aggregative context of Ustorm 1652 * The iscsi aggregative context of Ustorm
1661 */ 1653 */
1662struct ustorm_iscsi_ag_context { 1654struct ustorm_iscsi_ag_context {
@@ -1746,8 +1738,8 @@ struct ustorm_iscsi_ag_context {
1746#define USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_RULE_SHIFT 0 1738#define USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_RULE_SHIFT 0
1747#define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL3_RULE (0x7<<3) 1739#define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL3_RULE (0x7<<3)
1748#define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL3_RULE_SHIFT 3 1740#define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL3_RULE_SHIFT 3
1749#define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_ARM_N_FLAG (0x1<<6) 1741#define USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_ARM_N_FLAG (0x1<<6)
1750#define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_ARM_N_FLAG_SHIFT 6 1742#define USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_ARM_N_FLAG_SHIFT 6
1751#define __USTORM_ISCSI_AG_CONTEXT_RESERVED1 (0x1<<7) 1743#define __USTORM_ISCSI_AG_CONTEXT_RESERVED1 (0x1<<7)
1752#define __USTORM_ISCSI_AG_CONTEXT_RESERVED1_SHIFT 7 1744#define __USTORM_ISCSI_AG_CONTEXT_RESERVED1_SHIFT 7
1753 u8 decision_rule_enable_bits; 1745 u8 decision_rule_enable_bits;
@@ -1790,8 +1782,8 @@ struct ustorm_iscsi_ag_context {
1790#define USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_RULE_SHIFT 0 1782#define USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_RULE_SHIFT 0
1791#define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL3_RULE (0x7<<3) 1783#define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL3_RULE (0x7<<3)
1792#define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL3_RULE_SHIFT 3 1784#define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL3_RULE_SHIFT 3
1793#define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_ARM_N_FLAG (0x1<<6) 1785#define USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_ARM_N_FLAG (0x1<<6)
1794#define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_ARM_N_FLAG_SHIFT 6 1786#define USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_ARM_N_FLAG_SHIFT 6
1795#define __USTORM_ISCSI_AG_CONTEXT_RESERVED1 (0x1<<7) 1787#define __USTORM_ISCSI_AG_CONTEXT_RESERVED1 (0x1<<7)
1796#define __USTORM_ISCSI_AG_CONTEXT_RESERVED1_SHIFT 7 1788#define __USTORM_ISCSI_AG_CONTEXT_RESERVED1_SHIFT 7
1797 u16 __reserved2; 1789 u16 __reserved2;
@@ -1799,22 +1791,6 @@ struct ustorm_iscsi_ag_context {
1799}; 1791};
1800 1792
1801/* 1793/*
1802 * Timers connection context
1803 */
1804struct iscsi_timers_block_context {
1805 u32 __reserved_0;
1806 u32 __reserved_1;
1807 u32 __reserved_2;
1808 u32 flags;
1809#define __ISCSI_TIMERS_BLOCK_CONTEXT_NUM_OF_ACTIVE_TIMERS (0x3<<0)
1810#define __ISCSI_TIMERS_BLOCK_CONTEXT_NUM_OF_ACTIVE_TIMERS_SHIFT 0
1811#define ISCSI_TIMERS_BLOCK_CONTEXT_CONN_VALID_FLG (0x1<<2)
1812#define ISCSI_TIMERS_BLOCK_CONTEXT_CONN_VALID_FLG_SHIFT 2
1813#define __ISCSI_TIMERS_BLOCK_CONTEXT_RESERVED0 (0x1FFFFFFF<<3)
1814#define __ISCSI_TIMERS_BLOCK_CONTEXT_RESERVED0_SHIFT 3
1815};
1816
1817/*
1818 * Ethernet context section, shared in TOE, RDMA and ISCSI 1794 * Ethernet context section, shared in TOE, RDMA and ISCSI
1819 */ 1795 */
1820struct xstorm_eth_context_section { 1796struct xstorm_eth_context_section {
@@ -1963,7 +1939,7 @@ struct xstorm_tcp_context_section {
1963#endif 1939#endif
1964#if defined(__BIG_ENDIAN) 1940#if defined(__BIG_ENDIAN)
1965 u8 original_nagle_1b; 1941 u8 original_nagle_1b;
1966 u8 ts_enabled_1b; 1942 u8 ts_enabled;
1967 u16 tcp_params; 1943 u16 tcp_params;
1968#define XSTORM_TCP_CONTEXT_SECTION_TOTAL_HEADER_SIZE (0xFF<<0) 1944#define XSTORM_TCP_CONTEXT_SECTION_TOTAL_HEADER_SIZE (0xFF<<0)
1969#define XSTORM_TCP_CONTEXT_SECTION_TOTAL_HEADER_SIZE_SHIFT 0 1945#define XSTORM_TCP_CONTEXT_SECTION_TOTAL_HEADER_SIZE_SHIFT 0
@@ -1973,8 +1949,8 @@ struct xstorm_tcp_context_section {
1973#define __XSTORM_TCP_CONTEXT_SECTION_ECN_ENABLED_SHIFT 9 1949#define __XSTORM_TCP_CONTEXT_SECTION_ECN_ENABLED_SHIFT 9
1974#define XSTORM_TCP_CONTEXT_SECTION_SACK_ENABLED (0x1<<10) 1950#define XSTORM_TCP_CONTEXT_SECTION_SACK_ENABLED (0x1<<10)
1975#define XSTORM_TCP_CONTEXT_SECTION_SACK_ENABLED_SHIFT 10 1951#define XSTORM_TCP_CONTEXT_SECTION_SACK_ENABLED_SHIFT 10
1976#define XSTORM_TCP_CONTEXT_SECTION_KA_STATE (0x1<<11) 1952#define XSTORM_TCP_CONTEXT_SECTION_SMALL_WIN_ADV (0x1<<11)
1977#define XSTORM_TCP_CONTEXT_SECTION_KA_STATE_SHIFT 11 1953#define XSTORM_TCP_CONTEXT_SECTION_SMALL_WIN_ADV_SHIFT 11
1978#define XSTORM_TCP_CONTEXT_SECTION_FIN_SENT_FLAG (0x1<<12) 1954#define XSTORM_TCP_CONTEXT_SECTION_FIN_SENT_FLAG (0x1<<12)
1979#define XSTORM_TCP_CONTEXT_SECTION_FIN_SENT_FLAG_SHIFT 12 1955#define XSTORM_TCP_CONTEXT_SECTION_FIN_SENT_FLAG_SHIFT 12
1980#define XSTORM_TCP_CONTEXT_SECTION_WINDOW_SATURATED (0x1<<13) 1956#define XSTORM_TCP_CONTEXT_SECTION_WINDOW_SATURATED (0x1<<13)
@@ -1991,15 +1967,15 @@ struct xstorm_tcp_context_section {
1991#define __XSTORM_TCP_CONTEXT_SECTION_ECN_ENABLED_SHIFT 9 1967#define __XSTORM_TCP_CONTEXT_SECTION_ECN_ENABLED_SHIFT 9
1992#define XSTORM_TCP_CONTEXT_SECTION_SACK_ENABLED (0x1<<10) 1968#define XSTORM_TCP_CONTEXT_SECTION_SACK_ENABLED (0x1<<10)
1993#define XSTORM_TCP_CONTEXT_SECTION_SACK_ENABLED_SHIFT 10 1969#define XSTORM_TCP_CONTEXT_SECTION_SACK_ENABLED_SHIFT 10
1994#define XSTORM_TCP_CONTEXT_SECTION_KA_STATE (0x1<<11) 1970#define XSTORM_TCP_CONTEXT_SECTION_SMALL_WIN_ADV (0x1<<11)
1995#define XSTORM_TCP_CONTEXT_SECTION_KA_STATE_SHIFT 11 1971#define XSTORM_TCP_CONTEXT_SECTION_SMALL_WIN_ADV_SHIFT 11
1996#define XSTORM_TCP_CONTEXT_SECTION_FIN_SENT_FLAG (0x1<<12) 1972#define XSTORM_TCP_CONTEXT_SECTION_FIN_SENT_FLAG (0x1<<12)
1997#define XSTORM_TCP_CONTEXT_SECTION_FIN_SENT_FLAG_SHIFT 12 1973#define XSTORM_TCP_CONTEXT_SECTION_FIN_SENT_FLAG_SHIFT 12
1998#define XSTORM_TCP_CONTEXT_SECTION_WINDOW_SATURATED (0x1<<13) 1974#define XSTORM_TCP_CONTEXT_SECTION_WINDOW_SATURATED (0x1<<13)
1999#define XSTORM_TCP_CONTEXT_SECTION_WINDOW_SATURATED_SHIFT 13 1975#define XSTORM_TCP_CONTEXT_SECTION_WINDOW_SATURATED_SHIFT 13
2000#define XSTORM_TCP_CONTEXT_SECTION_SLOWPATH_QUEUES_FLUSH_COUNTER (0x3<<14) 1976#define XSTORM_TCP_CONTEXT_SECTION_SLOWPATH_QUEUES_FLUSH_COUNTER (0x3<<14)
2001#define XSTORM_TCP_CONTEXT_SECTION_SLOWPATH_QUEUES_FLUSH_COUNTER_SHIFT 14 1977#define XSTORM_TCP_CONTEXT_SECTION_SLOWPATH_QUEUES_FLUSH_COUNTER_SHIFT 14
2002 u8 ts_enabled_1b; 1978 u8 ts_enabled;
2003 u8 original_nagle_1b; 1979 u8 original_nagle_1b;
2004#endif 1980#endif
2005#if defined(__BIG_ENDIAN) 1981#if defined(__BIG_ENDIAN)
@@ -2030,8 +2006,8 @@ struct xstorm_common_context_section {
2030#define XSTORM_COMMON_CONTEXT_SECTION_UPDATE_L4_STATSTICS_SHIFT 1 2006#define XSTORM_COMMON_CONTEXT_SECTION_UPDATE_L4_STATSTICS_SHIFT 1
2031#define XSTORM_COMMON_CONTEXT_SECTION_STATISTICS_COUNTER_ID (0x1F<<2) 2007#define XSTORM_COMMON_CONTEXT_SECTION_STATISTICS_COUNTER_ID (0x1F<<2)
2032#define XSTORM_COMMON_CONTEXT_SECTION_STATISTICS_COUNTER_ID_SHIFT 2 2008#define XSTORM_COMMON_CONTEXT_SECTION_STATISTICS_COUNTER_ID_SHIFT 2
2033#define XSTORM_COMMON_CONTEXT_SECTION_RESERVED0 (0x1<<7) 2009#define XSTORM_COMMON_CONTEXT_SECTION_DCB_EXISTS (0x1<<7)
2034#define XSTORM_COMMON_CONTEXT_SECTION_RESERVED0_SHIFT 7 2010#define XSTORM_COMMON_CONTEXT_SECTION_DCB_EXISTS_SHIFT 7
2035 u8 ip_version_1b; 2011 u8 ip_version_1b;
2036#elif defined(__LITTLE_ENDIAN) 2012#elif defined(__LITTLE_ENDIAN)
2037 u8 ip_version_1b; 2013 u8 ip_version_1b;
@@ -2042,8 +2018,8 @@ struct xstorm_common_context_section {
2042#define XSTORM_COMMON_CONTEXT_SECTION_UPDATE_L4_STATSTICS_SHIFT 1 2018#define XSTORM_COMMON_CONTEXT_SECTION_UPDATE_L4_STATSTICS_SHIFT 1
2043#define XSTORM_COMMON_CONTEXT_SECTION_STATISTICS_COUNTER_ID (0x1F<<2) 2019#define XSTORM_COMMON_CONTEXT_SECTION_STATISTICS_COUNTER_ID (0x1F<<2)
2044#define XSTORM_COMMON_CONTEXT_SECTION_STATISTICS_COUNTER_ID_SHIFT 2 2020#define XSTORM_COMMON_CONTEXT_SECTION_STATISTICS_COUNTER_ID_SHIFT 2
2045#define XSTORM_COMMON_CONTEXT_SECTION_RESERVED0 (0x1<<7) 2021#define XSTORM_COMMON_CONTEXT_SECTION_DCB_EXISTS (0x1<<7)
2046#define XSTORM_COMMON_CONTEXT_SECTION_RESERVED0_SHIFT 7 2022#define XSTORM_COMMON_CONTEXT_SECTION_DCB_EXISTS_SHIFT 7
2047 u16 reserved; 2023 u16 reserved;
2048#endif 2024#endif
2049}; 2025};
@@ -2284,7 +2260,7 @@ struct iscsi_context {
2284 struct tstorm_iscsi_ag_context tstorm_ag_context; 2260 struct tstorm_iscsi_ag_context tstorm_ag_context;
2285 struct cstorm_iscsi_ag_context cstorm_ag_context; 2261 struct cstorm_iscsi_ag_context cstorm_ag_context;
2286 struct ustorm_iscsi_ag_context ustorm_ag_context; 2262 struct ustorm_iscsi_ag_context ustorm_ag_context;
2287 struct iscsi_timers_block_context timers_context; 2263 struct timers_block_context timers_context;
2288 struct regpair upb_context; 2264 struct regpair upb_context;
2289 struct xstorm_iscsi_st_context xstorm_st_context; 2265 struct xstorm_iscsi_st_context xstorm_st_context;
2290 struct regpair xpb_context; 2266 struct regpair xpb_context;
@@ -2434,16 +2410,16 @@ struct l5cm_packet_size {
2434 * l5cm connection parameters 2410 * l5cm connection parameters
2435 */ 2411 */
2436union l5cm_reduce_param_union { 2412union l5cm_reduce_param_union {
2437 u32 passive_side_scramble_key; 2413 u32 opaque1;
2438 u32 pcs_id; 2414 u32 opaque2;
2439}; 2415};
2440 2416
2441/* 2417/*
2442 * l5cm connection parameters 2418 * l5cm connection parameters
2443 */ 2419 */
2444struct l5cm_reduce_conn { 2420struct l5cm_reduce_conn {
2445 union l5cm_reduce_param_union param; 2421 union l5cm_reduce_param_union opaque1;
2446 u32 isn; 2422 u32 opaque2;
2447}; 2423};
2448 2424
2449/* 2425/*
diff --git a/drivers/net/cnic_if.h b/drivers/net/cnic_if.h
index 344c842d55ab..4018de12f819 100644
--- a/drivers/net/cnic_if.h
+++ b/drivers/net/cnic_if.h
@@ -138,6 +138,7 @@ struct cnic_irq {
138 unsigned int vector; 138 unsigned int vector;
139 void *status_blk; 139 void *status_blk;
140 u32 status_blk_num; 140 u32 status_blk_num;
141 u32 status_blk_num2;
141 u32 irq_flags; 142 u32 irq_flags;
142#define CNIC_IRQ_FL_MSIX 0x00000001 143#define CNIC_IRQ_FL_MSIX 0x00000001
143}; 144};
@@ -152,6 +153,7 @@ struct cnic_eth_dev {
152 struct pci_dev *pdev; 153 struct pci_dev *pdev;
153 void __iomem *io_base; 154 void __iomem *io_base;
154 void __iomem *io_base2; 155 void __iomem *io_base2;
156 void *iro_arr;
155 157
156 u32 ctx_tbl_offset; 158 u32 ctx_tbl_offset;
157 u32 ctx_tbl_len; 159 u32 ctx_tbl_len;