aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/bnx2x
diff options
context:
space:
mode:
authorGlenn Elliott <gelliott@cs.unc.edu>2012-03-04 19:47:13 -0500
committerGlenn Elliott <gelliott@cs.unc.edu>2012-03-04 19:47:13 -0500
commitc71c03bda1e86c9d5198c5d83f712e695c4f2a1e (patch)
treeecb166cb3e2b7e2adb3b5e292245fefd23381ac8 /drivers/net/bnx2x
parentea53c912f8a86a8567697115b6a0d8152beee5c8 (diff)
parent6a00f206debf8a5c8899055726ad127dbeeed098 (diff)
Merge branch 'mpi-master' into wip-k-fmlpwip-k-fmlp
Conflicts: litmus/sched_cedf.c
Diffstat (limited to 'drivers/net/bnx2x')
-rw-r--r--drivers/net/bnx2x/Makefile2
-rw-r--r--drivers/net/bnx2x/bnx2x.h944
-rw-r--r--drivers/net/bnx2x/bnx2x_cmn.c1762
-rw-r--r--drivers/net/bnx2x/bnx2x_cmn.h918
-rw-r--r--drivers/net/bnx2x/bnx2x_dcb.c2243
-rw-r--r--drivers/net/bnx2x/bnx2x_dcb.h195
-rw-r--r--drivers/net/bnx2x/bnx2x_dump.h997
-rw-r--r--drivers/net/bnx2x/bnx2x_ethtool.c958
-rw-r--r--drivers/net/bnx2x/bnx2x_fw_defs.h821
-rw-r--r--drivers/net/bnx2x/bnx2x_fw_file_hdr.h3
-rw-r--r--drivers/net/bnx2x/bnx2x_hsi.h2228
-rw-r--r--drivers/net/bnx2x/bnx2x_init.h266
-rw-r--r--drivers/net/bnx2x/bnx2x_init_ops.h374
-rw-r--r--drivers/net/bnx2x/bnx2x_link.c10888
-rw-r--r--drivers/net/bnx2x/bnx2x_link.h317
-rw-r--r--drivers/net/bnx2x/bnx2x_main.c7265
-rw-r--r--drivers/net/bnx2x/bnx2x_reg.h1111
-rw-r--r--drivers/net/bnx2x/bnx2x_stats.c327
-rw-r--r--drivers/net/bnx2x/bnx2x_stats.h12
19 files changed, 21201 insertions, 10430 deletions
diff --git a/drivers/net/bnx2x/Makefile b/drivers/net/bnx2x/Makefile
index 084afce89ae9..bb83a2961273 100644
--- a/drivers/net/bnx2x/Makefile
+++ b/drivers/net/bnx2x/Makefile
@@ -4,4 +4,4 @@
4 4
5obj-$(CONFIG_BNX2X) += bnx2x.o 5obj-$(CONFIG_BNX2X) += bnx2x.o
6 6
7bnx2x-objs := bnx2x_main.o bnx2x_link.o bnx2x_cmn.o bnx2x_ethtool.o bnx2x_stats.o 7bnx2x-objs := bnx2x_main.o bnx2x_link.o bnx2x_cmn.o bnx2x_ethtool.o bnx2x_stats.o bnx2x_dcb.o
diff --git a/drivers/net/bnx2x/bnx2x.h b/drivers/net/bnx2x/bnx2x.h
index 0c2d96ed561c..668a578c49e9 100644
--- a/drivers/net/bnx2x/bnx2x.h
+++ b/drivers/net/bnx2x/bnx2x.h
@@ -1,6 +1,6 @@
1/* bnx2x.h: Broadcom Everest network driver. 1/* bnx2x.h: Broadcom Everest network driver.
2 * 2 *
3 * Copyright (c) 2007-2010 Broadcom Corporation 3 * Copyright (c) 2007-2011 Broadcom Corporation
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by 6 * it under the terms of the GNU General Public License as published by
@@ -13,6 +13,8 @@
13 13
14#ifndef BNX2X_H 14#ifndef BNX2X_H
15#define BNX2X_H 15#define BNX2X_H
16#include <linux/netdevice.h>
17#include <linux/types.h>
16 18
17/* compilation time flags */ 19/* compilation time flags */
18 20
@@ -20,26 +22,22 @@
20 * (you will need to reboot afterwards) */ 22 * (you will need to reboot afterwards) */
21/* #define BNX2X_STOP_ON_ERROR */ 23/* #define BNX2X_STOP_ON_ERROR */
22 24
23#define DRV_MODULE_VERSION "1.52.53-4" 25#define DRV_MODULE_VERSION "1.62.12-0"
24#define DRV_MODULE_RELDATE "2010/16/08" 26#define DRV_MODULE_RELDATE "2011/03/20"
25#define BNX2X_BC_VER 0x040200 27#define BNX2X_BC_VER 0x040200
26 28
27#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
28#define BCM_VLAN 1
29#endif
30
31#define BNX2X_MULTI_QUEUE 29#define BNX2X_MULTI_QUEUE
32 30
33#define BNX2X_NEW_NAPI 31#define BNX2X_NEW_NAPI
34 32
35 33#if defined(CONFIG_DCB)
36 34#define BCM_DCBNL
35#endif
37#if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE) 36#if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE)
38#define BCM_CNIC 1 37#define BCM_CNIC 1
39#include "../cnic_if.h" 38#include "../cnic_if.h"
40#endif 39#endif
41 40
42
43#ifdef BCM_CNIC 41#ifdef BCM_CNIC
44#define BNX2X_MIN_MSIX_VEC_CNT 3 42#define BNX2X_MIN_MSIX_VEC_CNT 3
45#define BNX2X_MSIX_VEC_FP_START 2 43#define BNX2X_MSIX_VEC_FP_START 2
@@ -54,6 +52,7 @@
54#include "bnx2x_fw_defs.h" 52#include "bnx2x_fw_defs.h"
55#include "bnx2x_hsi.h" 53#include "bnx2x_hsi.h"
56#include "bnx2x_link.h" 54#include "bnx2x_link.h"
55#include "bnx2x_dcb.h"
57#include "bnx2x_stats.h" 56#include "bnx2x_stats.h"
58 57
59/* error/debug prints */ 58/* error/debug prints */
@@ -129,16 +128,19 @@ void bnx2x_panic_dump(struct bnx2x *bp);
129 } while (0) 128 } while (0)
130#endif 129#endif
131 130
131#define bnx2x_mc_addr(ha) ((ha)->addr)
132#define bnx2x_uc_addr(ha) ((ha)->addr)
132 133
133#define U64_LO(x) (u32)(((u64)(x)) & 0xffffffff) 134#define U64_LO(x) (u32)(((u64)(x)) & 0xffffffff)
134#define U64_HI(x) (u32)(((u64)(x)) >> 32) 135#define U64_HI(x) (u32)(((u64)(x)) >> 32)
135#define HILO_U64(hi, lo) ((((u64)(hi)) << 32) + (lo)) 136#define HILO_U64(hi, lo) ((((u64)(hi)) << 32) + (lo))
136 137
137 138
138#define REG_ADDR(bp, offset) (bp->regview + offset) 139#define REG_ADDR(bp, offset) ((bp->regview) + (offset))
139 140
140#define REG_RD(bp, offset) readl(REG_ADDR(bp, offset)) 141#define REG_RD(bp, offset) readl(REG_ADDR(bp, offset))
141#define REG_RD8(bp, offset) readb(REG_ADDR(bp, offset)) 142#define REG_RD8(bp, offset) readb(REG_ADDR(bp, offset))
143#define REG_RD16(bp, offset) readw(REG_ADDR(bp, offset))
142 144
143#define REG_WR(bp, offset, val) writel((u32)val, REG_ADDR(bp, offset)) 145#define REG_WR(bp, offset, val) writel((u32)val, REG_ADDR(bp, offset))
144#define REG_WR8(bp, offset, val) writeb((u8)val, REG_ADDR(bp, offset)) 146#define REG_WR8(bp, offset, val) writeb((u8)val, REG_ADDR(bp, offset))
@@ -160,6 +162,9 @@ void bnx2x_panic_dump(struct bnx2x *bp);
160 offset, len32); \ 162 offset, len32); \
161 } while (0) 163 } while (0)
162 164
165#define REG_WR_DMAE_LEN(bp, offset, valp, len32) \
166 REG_WR_DMAE(bp, offset, valp, len32)
167
163#define VIRT_WR_DMAE_LEN(bp, data, addr, len32, le32_swap) \ 168#define VIRT_WR_DMAE_LEN(bp, data, addr, len32, le32_swap) \
164 do { \ 169 do { \
165 memcpy(GUNZIP_BUF(bp), data, (len32) * 4); \ 170 memcpy(GUNZIP_BUF(bp), data, (len32) * 4); \
@@ -175,16 +180,81 @@ void bnx2x_panic_dump(struct bnx2x *bp);
175 offsetof(struct shmem2_region, field)) 180 offsetof(struct shmem2_region, field))
176#define SHMEM2_RD(bp, field) REG_RD(bp, SHMEM2_ADDR(bp, field)) 181#define SHMEM2_RD(bp, field) REG_RD(bp, SHMEM2_ADDR(bp, field))
177#define SHMEM2_WR(bp, field, val) REG_WR(bp, SHMEM2_ADDR(bp, field), val) 182#define SHMEM2_WR(bp, field, val) REG_WR(bp, SHMEM2_ADDR(bp, field), val)
183#define MF_CFG_ADDR(bp, field) (bp->common.mf_cfg_base + \
184 offsetof(struct mf_cfg, field))
185#define MF2_CFG_ADDR(bp, field) (bp->common.mf2_cfg_base + \
186 offsetof(struct mf2_cfg, field))
187
188#define MF_CFG_RD(bp, field) REG_RD(bp, MF_CFG_ADDR(bp, field))
189#define MF_CFG_WR(bp, field, val) REG_WR(bp,\
190 MF_CFG_ADDR(bp, field), (val))
191#define MF2_CFG_RD(bp, field) REG_RD(bp, MF2_CFG_ADDR(bp, field))
178 192
179#define MF_CFG_RD(bp, field) SHMEM_RD(bp, mf_cfg.field) 193#define SHMEM2_HAS(bp, field) ((bp)->common.shmem2_base && \
180#define MF_CFG_WR(bp, field, val) SHMEM_WR(bp, mf_cfg.field, val) 194 (SHMEM2_RD((bp), size) > \
195 offsetof(struct shmem2_region, field)))
181 196
182#define EMAC_RD(bp, reg) REG_RD(bp, emac_base + reg) 197#define EMAC_RD(bp, reg) REG_RD(bp, emac_base + reg)
183#define EMAC_WR(bp, reg, val) REG_WR(bp, emac_base + reg, val) 198#define EMAC_WR(bp, reg, val) REG_WR(bp, emac_base + reg, val)
184 199
200/* SP SB indices */
201
202/* General SP events - stats query, cfc delete, etc */
203#define HC_SP_INDEX_ETH_DEF_CONS 3
204
205/* EQ completions */
206#define HC_SP_INDEX_EQ_CONS 7
207
208/* FCoE L2 connection completions */
209#define HC_SP_INDEX_ETH_FCOE_TX_CQ_CONS 6
210#define HC_SP_INDEX_ETH_FCOE_RX_CQ_CONS 4
211/* iSCSI L2 */
212#define HC_SP_INDEX_ETH_ISCSI_CQ_CONS 5
213#define HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS 1
214
215/* Special clients parameters */
216
217/* SB indices */
218/* FCoE L2 */
219#define BNX2X_FCOE_L2_RX_INDEX \
220 (&bp->def_status_blk->sp_sb.\
221 index_values[HC_SP_INDEX_ETH_FCOE_RX_CQ_CONS])
222
223#define BNX2X_FCOE_L2_TX_INDEX \
224 (&bp->def_status_blk->sp_sb.\
225 index_values[HC_SP_INDEX_ETH_FCOE_TX_CQ_CONS])
226
227/**
228 * CIDs and CLIDs:
229 * CLIDs below is a CLID for func 0, then the CLID for other
230 * functions will be calculated by the formula:
231 *
232 * FUNC_N_CLID_X = N * NUM_SPECIAL_CLIENTS + FUNC_0_CLID_X
233 *
234 */
235/* iSCSI L2 */
236#define BNX2X_ISCSI_ETH_CL_ID 17
237#define BNX2X_ISCSI_ETH_CID 17
238
239/* FCoE L2 */
240#define BNX2X_FCOE_ETH_CL_ID 18
241#define BNX2X_FCOE_ETH_CID 18
242
243/** Additional rings budgeting */
244#ifdef BCM_CNIC
245#define CNIC_CONTEXT_USE 1
246#define FCOE_CONTEXT_USE 1
247#else
248#define CNIC_CONTEXT_USE 0
249#define FCOE_CONTEXT_USE 0
250#endif /* BCM_CNIC */
251#define NONE_ETH_CONTEXT_USE (FCOE_CONTEXT_USE)
252
185#define AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR \ 253#define AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR \
186 AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR 254 AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR
187 255
256#define SM_RX_ID 0
257#define SM_TX_ID 1
188 258
189/* fast path */ 259/* fast path */
190 260
@@ -254,11 +324,26 @@ union db_prod {
254#define RX_SGE_MASK_LEN_MASK (RX_SGE_MASK_LEN - 1) 324#define RX_SGE_MASK_LEN_MASK (RX_SGE_MASK_LEN - 1)
255#define NEXT_SGE_MASK_ELEM(el) (((el) + 1) & RX_SGE_MASK_LEN_MASK) 325#define NEXT_SGE_MASK_ELEM(el) (((el) + 1) & RX_SGE_MASK_LEN_MASK)
256 326
327union host_hc_status_block {
328 /* pointer to fp status block e1x */
329 struct host_hc_status_block_e1x *e1x_sb;
330 /* pointer to fp status block e2 */
331 struct host_hc_status_block_e2 *e2_sb;
332};
257 333
258struct bnx2x_fastpath { 334struct bnx2x_fastpath {
259 335
336#define BNX2X_NAPI_WEIGHT 128
260 struct napi_struct napi; 337 struct napi_struct napi;
261 struct host_status_block *status_blk; 338 union host_hc_status_block status_blk;
339 /* chip independed shortcuts into sb structure */
340 __le16 *sb_index_values;
341 __le16 *sb_running_index;
342 /* chip independed shortcut into rx_prods_offset memory */
343 u32 ustorm_rx_prods_offset;
344
345 u32 rx_buf_size;
346
262 dma_addr_t status_blk_mapping; 347 dma_addr_t status_blk_mapping;
263 348
264 struct sw_tx_bd *tx_buf_ring; 349 struct sw_tx_bd *tx_buf_ring;
@@ -288,10 +373,15 @@ struct bnx2x_fastpath {
288#define BNX2X_FP_STATE_OPEN 0xa0000 373#define BNX2X_FP_STATE_OPEN 0xa0000
289#define BNX2X_FP_STATE_HALTING 0xb0000 374#define BNX2X_FP_STATE_HALTING 0xb0000
290#define BNX2X_FP_STATE_HALTED 0xc0000 375#define BNX2X_FP_STATE_HALTED 0xc0000
376#define BNX2X_FP_STATE_TERMINATING 0xd0000
377#define BNX2X_FP_STATE_TERMINATED 0xe0000
291 378
292 u8 index; /* number in fp array */ 379 u8 index; /* number in fp array */
293 u8 cl_id; /* eth client id */ 380 u8 cl_id; /* eth client id */
294 u8 sb_id; /* status block number in HW */ 381 u8 cl_qzone_id;
382 u8 fw_sb_id; /* status block number in FW */
383 u8 igu_sb_id; /* status block number in HW */
384 u32 cid;
295 385
296 union db_prod tx_db; 386 union db_prod tx_db;
297 387
@@ -301,8 +391,7 @@ struct bnx2x_fastpath {
301 u16 tx_bd_cons; 391 u16 tx_bd_cons;
302 __le16 *tx_cons_sb; 392 __le16 *tx_cons_sb;
303 393
304 __le16 fp_c_idx; 394 __le16 fp_hc_idx;
305 __le16 fp_u_idx;
306 395
307 u16 rx_bd_prod; 396 u16 rx_bd_prod;
308 u16 rx_bd_cons; 397 u16 rx_bd_cons;
@@ -312,8 +401,6 @@ struct bnx2x_fastpath {
312 /* The last maximal completed SGE */ 401 /* The last maximal completed SGE */
313 u16 last_max_sge; 402 u16 last_max_sge;
314 __le16 *rx_cons_sb; 403 __le16 *rx_cons_sb;
315 __le16 *rx_bd_cons_sb;
316
317 404
318 unsigned long tx_pkt, 405 unsigned long tx_pkt,
319 rx_pkt, 406 rx_pkt,
@@ -345,6 +432,21 @@ struct bnx2x_fastpath {
345 432
346#define bnx2x_fp(bp, nr, var) (bp->fp[nr].var) 433#define bnx2x_fp(bp, nr, var) (bp->fp[nr].var)
347 434
435/* Use 2500 as a mini-jumbo MTU for FCoE */
436#define BNX2X_FCOE_MINI_JUMBO_MTU 2500
437
438#ifdef BCM_CNIC
439/* FCoE L2 `fastpath' is right after the eth entries */
440#define FCOE_IDX BNX2X_NUM_ETH_QUEUES(bp)
441#define bnx2x_fcoe_fp(bp) (&bp->fp[FCOE_IDX])
442#define bnx2x_fcoe(bp, var) (bnx2x_fcoe_fp(bp)->var)
443#define IS_FCOE_FP(fp) (fp->index == FCOE_IDX)
444#define IS_FCOE_IDX(idx) ((idx) == FCOE_IDX)
445#else
446#define IS_FCOE_FP(fp) false
447#define IS_FCOE_IDX(idx) false
448#endif
449
348 450
349/* MC hsi */ 451/* MC hsi */
350#define MAX_FETCH_BD 13 /* HW max BDs per packet */ 452#define MAX_FETCH_BD 13 /* HW max BDs per packet */
@@ -356,6 +458,8 @@ struct bnx2x_fastpath {
356#define NUM_TX_BD (TX_DESC_CNT * NUM_TX_RINGS) 458#define NUM_TX_BD (TX_DESC_CNT * NUM_TX_RINGS)
357#define MAX_TX_BD (NUM_TX_BD - 1) 459#define MAX_TX_BD (NUM_TX_BD - 1)
358#define MAX_TX_AVAIL (MAX_TX_DESC_CNT * NUM_TX_RINGS - 2) 460#define MAX_TX_AVAIL (MAX_TX_DESC_CNT * NUM_TX_RINGS - 2)
461#define INIT_JUMBO_TX_RING_SIZE MAX_TX_AVAIL
462#define INIT_TX_RING_SIZE MAX_TX_AVAIL
359#define NEXT_TX_IDX(x) ((((x) & MAX_TX_DESC_CNT) == \ 463#define NEXT_TX_IDX(x) ((((x) & MAX_TX_DESC_CNT) == \
360 (MAX_TX_DESC_CNT - 1)) ? (x) + 2 : (x) + 1) 464 (MAX_TX_DESC_CNT - 1)) ? (x) + 2 : (x) + 1)
361#define TX_BD(x) ((x) & MAX_TX_BD) 465#define TX_BD(x) ((x) & MAX_TX_BD)
@@ -369,6 +473,10 @@ struct bnx2x_fastpath {
369#define NUM_RX_BD (RX_DESC_CNT * NUM_RX_RINGS) 473#define NUM_RX_BD (RX_DESC_CNT * NUM_RX_RINGS)
370#define MAX_RX_BD (NUM_RX_BD - 1) 474#define MAX_RX_BD (NUM_RX_BD - 1)
371#define MAX_RX_AVAIL (MAX_RX_DESC_CNT * NUM_RX_RINGS - 2) 475#define MAX_RX_AVAIL (MAX_RX_DESC_CNT * NUM_RX_RINGS - 2)
476#define MIN_RX_SIZE_TPA 72
477#define MIN_RX_SIZE_NONTPA 10
478#define INIT_JUMBO_RX_RING_SIZE MAX_RX_AVAIL
479#define INIT_RX_RING_SIZE MAX_RX_AVAIL
372#define NEXT_RX_IDX(x) ((((x) & RX_DESC_MASK) == \ 480#define NEXT_RX_IDX(x) ((((x) & RX_DESC_MASK) == \
373 (MAX_RX_DESC_CNT - 1)) ? (x) + 3 : (x) + 1) 481 (MAX_RX_DESC_CNT - 1)) ? (x) + 3 : (x) + 1)
374#define RX_BD(x) ((x) & MAX_RX_BD) 482#define RX_BD(x) ((x) & MAX_RX_BD)
@@ -419,11 +527,12 @@ struct bnx2x_fastpath {
419 le32_to_cpu((bd)->addr_lo)) 527 le32_to_cpu((bd)->addr_lo))
420#define BD_UNMAP_LEN(bd) (le16_to_cpu((bd)->nbytes)) 528#define BD_UNMAP_LEN(bd) (le16_to_cpu((bd)->nbytes))
421 529
422 530#define BNX2X_DB_MIN_SHIFT 3 /* 8 bytes */
531#define BNX2X_DB_SHIFT 7 /* 128 bytes*/
423#define DPM_TRIGER_TYPE 0x40 532#define DPM_TRIGER_TYPE 0x40
424#define DOORBELL(bp, cid, val) \ 533#define DOORBELL(bp, cid, val) \
425 do { \ 534 do { \
426 writel((u32)(val), bp->doorbells + (BCM_PAGE_SIZE * (cid)) + \ 535 writel((u32)(val), bp->doorbells + (bp->db_size * (cid)) + \
427 DPM_TRIGER_TYPE); \ 536 DPM_TRIGER_TYPE); \
428 } while (0) 537 } while (0)
429 538
@@ -481,31 +590,15 @@ struct bnx2x_fastpath {
481#define BNX2X_RX_SUM_FIX(cqe) \ 590#define BNX2X_RX_SUM_FIX(cqe) \
482 BNX2X_PRS_FLAG_OVERETH_IPV4(cqe->fast_path_cqe.pars_flags.flags) 591 BNX2X_PRS_FLAG_OVERETH_IPV4(cqe->fast_path_cqe.pars_flags.flags)
483 592
484 593#define U_SB_ETH_RX_CQ_INDEX 1
485#define FP_USB_FUNC_OFF (2 + 2*HC_USTORM_SB_NUM_INDICES) 594#define U_SB_ETH_RX_BD_INDEX 2
486#define FP_CSB_FUNC_OFF (2 + 2*HC_CSTORM_SB_NUM_INDICES) 595#define C_SB_ETH_TX_CQ_INDEX 5
487
488#define U_SB_ETH_RX_CQ_INDEX HC_INDEX_U_ETH_RX_CQ_CONS
489#define U_SB_ETH_RX_BD_INDEX HC_INDEX_U_ETH_RX_BD_CONS
490#define C_SB_ETH_TX_CQ_INDEX HC_INDEX_C_ETH_TX_CQ_CONS
491 596
492#define BNX2X_RX_SB_INDEX \ 597#define BNX2X_RX_SB_INDEX \
493 (&fp->status_blk->u_status_block.index_values[U_SB_ETH_RX_CQ_INDEX]) 598 (&fp->sb_index_values[U_SB_ETH_RX_CQ_INDEX])
494
495#define BNX2X_RX_SB_BD_INDEX \
496 (&fp->status_blk->u_status_block.index_values[U_SB_ETH_RX_BD_INDEX])
497
498#define BNX2X_RX_SB_INDEX_NUM \
499 (((U_SB_ETH_RX_CQ_INDEX << \
500 USTORM_ETH_ST_CONTEXT_CONFIG_CQE_SB_INDEX_NUMBER_SHIFT) & \
501 USTORM_ETH_ST_CONTEXT_CONFIG_CQE_SB_INDEX_NUMBER) | \
502 ((U_SB_ETH_RX_BD_INDEX << \
503 USTORM_ETH_ST_CONTEXT_CONFIG_BD_SB_INDEX_NUMBER_SHIFT) & \
504 USTORM_ETH_ST_CONTEXT_CONFIG_BD_SB_INDEX_NUMBER))
505 599
506#define BNX2X_TX_SB_INDEX \ 600#define BNX2X_TX_SB_INDEX \
507 (&fp->status_blk->c_status_block.index_values[C_SB_ETH_TX_CQ_INDEX]) 601 (&fp->sb_index_values[C_SB_ETH_TX_CQ_INDEX])
508
509 602
510/* end of fast path */ 603/* end of fast path */
511 604
@@ -521,12 +614,19 @@ struct bnx2x_common {
521#define CHIP_NUM_57710 0x164e 614#define CHIP_NUM_57710 0x164e
522#define CHIP_NUM_57711 0x164f 615#define CHIP_NUM_57711 0x164f
523#define CHIP_NUM_57711E 0x1650 616#define CHIP_NUM_57711E 0x1650
617#define CHIP_NUM_57712 0x1662
618#define CHIP_NUM_57712E 0x1663
524#define CHIP_IS_E1(bp) (CHIP_NUM(bp) == CHIP_NUM_57710) 619#define CHIP_IS_E1(bp) (CHIP_NUM(bp) == CHIP_NUM_57710)
525#define CHIP_IS_57711(bp) (CHIP_NUM(bp) == CHIP_NUM_57711) 620#define CHIP_IS_57711(bp) (CHIP_NUM(bp) == CHIP_NUM_57711)
526#define CHIP_IS_57711E(bp) (CHIP_NUM(bp) == CHIP_NUM_57711E) 621#define CHIP_IS_57711E(bp) (CHIP_NUM(bp) == CHIP_NUM_57711E)
622#define CHIP_IS_57712(bp) (CHIP_NUM(bp) == CHIP_NUM_57712)
623#define CHIP_IS_57712E(bp) (CHIP_NUM(bp) == CHIP_NUM_57712E)
527#define CHIP_IS_E1H(bp) (CHIP_IS_57711(bp) || \ 624#define CHIP_IS_E1H(bp) (CHIP_IS_57711(bp) || \
528 CHIP_IS_57711E(bp)) 625 CHIP_IS_57711E(bp))
529#define IS_E1H_OFFSET CHIP_IS_E1H(bp) 626#define CHIP_IS_E2(bp) (CHIP_IS_57712(bp) || \
627 CHIP_IS_57712E(bp))
628#define CHIP_IS_E1x(bp) (CHIP_IS_E1((bp)) || CHIP_IS_E1H((bp)))
629#define IS_E1H_OFFSET (CHIP_IS_E1H(bp) || CHIP_IS_E2(bp))
530 630
531#define CHIP_REV(bp) (bp->common.chip_id & 0x0000f000) 631#define CHIP_REV(bp) (bp->common.chip_id & 0x0000f000)
532#define CHIP_REV_Ax 0x00000000 632#define CHIP_REV_Ax 0x00000000
@@ -544,6 +644,7 @@ struct bnx2x_common {
544 644
545#define CHIP_METAL(bp) (bp->common.chip_id & 0x00000ff0) 645#define CHIP_METAL(bp) (bp->common.chip_id & 0x00000ff0)
546#define CHIP_BOND_ID(bp) (bp->common.chip_id & 0x0000000f) 646#define CHIP_BOND_ID(bp) (bp->common.chip_id & 0x0000000f)
647#define CHIP_PARITY_ENABLED(bp) (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp))
547 648
548 int flash_size; 649 int flash_size;
549#define NVRAM_1MB_SIZE 0x20000 /* 1M bit in bytes */ 650#define NVRAM_1MB_SIZE 0x20000 /* 1M bit in bytes */
@@ -552,12 +653,34 @@ struct bnx2x_common {
552 653
553 u32 shmem_base; 654 u32 shmem_base;
554 u32 shmem2_base; 655 u32 shmem2_base;
656 u32 mf_cfg_base;
657 u32 mf2_cfg_base;
555 658
556 u32 hw_config; 659 u32 hw_config;
557 660
558 u32 bc_ver; 661 u32 bc_ver;
662
663 u8 int_block;
664#define INT_BLOCK_HC 0
665#define INT_BLOCK_IGU 1
666#define INT_BLOCK_MODE_NORMAL 0
667#define INT_BLOCK_MODE_BW_COMP 2
668#define CHIP_INT_MODE_IS_NBC(bp) \
669 (CHIP_IS_E2(bp) && \
670 !((bp)->common.int_block & INT_BLOCK_MODE_BW_COMP))
671#define CHIP_INT_MODE_IS_BC(bp) (!CHIP_INT_MODE_IS_NBC(bp))
672
673 u8 chip_port_mode;
674#define CHIP_4_PORT_MODE 0x0
675#define CHIP_2_PORT_MODE 0x1
676#define CHIP_PORT_MODE_NONE 0x2
677#define CHIP_MODE(bp) (bp->common.chip_port_mode)
678#define CHIP_MODE_IS_4_PORT(bp) (CHIP_MODE(bp) == CHIP_4_PORT_MODE)
559}; 679};
560 680
681/* IGU MSIX STATISTICS on 57712: 64 for VFs; 4 for PFs; 4 for Attentions */
682#define BNX2X_IGU_STAS_MSG_VF_CNT 64
683#define BNX2X_IGU_STAS_MSG_PF_CNT 4
561 684
562/* end of common */ 685/* end of common */
563 686
@@ -566,13 +689,13 @@ struct bnx2x_common {
566struct bnx2x_port { 689struct bnx2x_port {
567 u32 pmf; 690 u32 pmf;
568 691
569 u32 link_config; 692 u32 link_config[LINK_CONFIG_SIZE];
570 693
571 u32 supported; 694 u32 supported[LINK_CONFIG_SIZE];
572/* link settings - missing defines */ 695/* link settings - missing defines */
573#define SUPPORTED_2500baseX_Full (1 << 15) 696#define SUPPORTED_2500baseX_Full (1 << 15)
574 697
575 u32 advertising; 698 u32 advertising[LINK_CONFIG_SIZE];
576/* link settings - missing defines */ 699/* link settings - missing defines */
577#define ADVERTISED_2500baseX_Full (1 << 15) 700#define ADVERTISED_2500baseX_Full (1 << 15)
578 701
@@ -589,27 +712,114 @@ struct bnx2x_port {
589 712
590/* end of port */ 713/* end of port */
591 714
715/* e1h Classification CAM line allocations */
716enum {
717 CAM_ETH_LINE = 0,
718 CAM_ISCSI_ETH_LINE,
719 CAM_FIP_ETH_LINE,
720 CAM_FIP_MCAST_LINE,
721 CAM_MAX_PF_LINE = CAM_FIP_MCAST_LINE
722};
723/* number of MACs per function in NIG memory - used for SI mode */
724#define NIG_LLH_FUNC_MEM_SIZE 16
725/* number of entries in NIG_REG_LLHX_FUNC_MEM */
726#define NIG_LLH_FUNC_MEM_MAX_OFFSET 8
727
728#define BNX2X_VF_ID_INVALID 0xFF
729
730/*
731 * The total number of L2 queues, MSIX vectors and HW contexts (CIDs) is
732 * control by the number of fast-path status blocks supported by the
733 * device (HW/FW). Each fast-path status block (FP-SB) aka non-default
734 * status block represents an independent interrupts context that can
735 * serve a regular L2 networking queue. However special L2 queues such
736 * as the FCoE queue do not require a FP-SB and other components like
737 * the CNIC may consume FP-SB reducing the number of possible L2 queues
738 *
739 * If the maximum number of FP-SB available is X then:
740 * a. If CNIC is supported it consumes 1 FP-SB thus the max number of
741 * regular L2 queues is Y=X-1
742 * b. in MF mode the actual number of L2 queues is Y= (X-1/MF_factor)
743 * c. If the FCoE L2 queue is supported the actual number of L2 queues
744 * is Y+1
745 * d. The number of irqs (MSIX vectors) is either Y+1 (one extra for
746 * slow-path interrupts) or Y+2 if CNIC is supported (one additional
747 * FP interrupt context for the CNIC).
748 * e. The number of HW context (CID count) is always X or X+1 if FCoE
749 * L2 queue is supported. the cid for the FCoE L2 queue is always X.
750 */
751
752#define FP_SB_MAX_E1x 16 /* fast-path interrupt contexts E1x */
753#define FP_SB_MAX_E2 16 /* fast-path interrupt contexts E2 */
592 754
755/*
756 * cid_cnt paramter below refers to the value returned by
757 * 'bnx2x_get_l2_cid_count()' routine
758 */
593 759
594#ifdef BCM_CNIC 760/*
595#define MAX_CONTEXT 15 761 * The number of FP context allocated by the driver == max number of regular
596#else 762 * L2 queues + 1 for the FCoE L2 queue
597#define MAX_CONTEXT 16 763 */
598#endif 764#define L2_FP_COUNT(cid_cnt) ((cid_cnt) - CNIC_CONTEXT_USE)
765
766/*
767 * The number of FP-SB allocated by the driver == max number of regular L2
768 * queues + 1 for the CNIC which also consumes an FP-SB
769 */
770#define FP_SB_COUNT(cid_cnt) ((cid_cnt) - FCOE_CONTEXT_USE)
771#define NUM_IGU_SB_REQUIRED(cid_cnt) \
772 (FP_SB_COUNT(cid_cnt) - NONE_ETH_CONTEXT_USE)
599 773
600union cdu_context { 774union cdu_context {
601 struct eth_context eth; 775 struct eth_context eth;
602 char pad[1024]; 776 char pad[1024];
603}; 777};
604 778
779/* CDU host DB constants */
780#define CDU_ILT_PAGE_SZ_HW 3
781#define CDU_ILT_PAGE_SZ (4096 << CDU_ILT_PAGE_SZ_HW) /* 32K */
782#define ILT_PAGE_CIDS (CDU_ILT_PAGE_SZ / sizeof(union cdu_context))
783
784#ifdef BCM_CNIC
785#define CNIC_ISCSI_CID_MAX 256
786#define CNIC_FCOE_CID_MAX 2048
787#define CNIC_CID_MAX (CNIC_ISCSI_CID_MAX + CNIC_FCOE_CID_MAX)
788#define CNIC_ILT_LINES DIV_ROUND_UP(CNIC_CID_MAX, ILT_PAGE_CIDS)
789#endif
790
791#define QM_ILT_PAGE_SZ_HW 3
792#define QM_ILT_PAGE_SZ (4096 << QM_ILT_PAGE_SZ_HW) /* 32K */
793#define QM_CID_ROUND 1024
794
795#ifdef BCM_CNIC
796/* TM (timers) host DB constants */
797#define TM_ILT_PAGE_SZ_HW 2
798#define TM_ILT_PAGE_SZ (4096 << TM_ILT_PAGE_SZ_HW) /* 16K */
799/* #define TM_CONN_NUM (CNIC_STARTING_CID+CNIC_ISCSI_CXT_MAX) */
800#define TM_CONN_NUM 1024
801#define TM_ILT_SZ (8 * TM_CONN_NUM)
802#define TM_ILT_LINES DIV_ROUND_UP(TM_ILT_SZ, TM_ILT_PAGE_SZ)
803
804/* SRC (Searcher) host DB constants */
805#define SRC_ILT_PAGE_SZ_HW 3
806#define SRC_ILT_PAGE_SZ (4096 << SRC_ILT_PAGE_SZ_HW) /* 32K */
807#define SRC_HASH_BITS 10
808#define SRC_CONN_NUM (1 << SRC_HASH_BITS) /* 1024 */
809#define SRC_ILT_SZ (sizeof(struct src_ent) * SRC_CONN_NUM)
810#define SRC_T2_SZ SRC_ILT_SZ
811#define SRC_ILT_LINES DIV_ROUND_UP(SRC_ILT_SZ, SRC_ILT_PAGE_SZ)
812#endif
813
605#define MAX_DMAE_C 8 814#define MAX_DMAE_C 8
606 815
607/* DMA memory not used in fastpath */ 816/* DMA memory not used in fastpath */
608struct bnx2x_slowpath { 817struct bnx2x_slowpath {
609 union cdu_context context[MAX_CONTEXT];
610 struct eth_stats_query fw_stats; 818 struct eth_stats_query fw_stats;
611 struct mac_configuration_cmd mac_config; 819 struct mac_configuration_cmd mac_config;
612 struct mac_configuration_cmd mcast_config; 820 struct mac_configuration_cmd mcast_config;
821 struct mac_configuration_cmd uc_mac_config;
822 struct client_init_ramrod_data client_init_data;
613 823
614 /* used by dmae command executer */ 824 /* used by dmae command executer */
615 struct dmae_command dmae[MAX_DMAE_C]; 825 struct dmae_command dmae[MAX_DMAE_C];
@@ -623,6 +833,8 @@ struct bnx2x_slowpath {
623 833
624 u32 wb_comp; 834 u32 wb_comp;
625 u32 wb_data[4]; 835 u32 wb_data[4];
836 /* pfc configuration for DCBX ramrod */
837 struct flow_control_configuration pfc_config;
626}; 838};
627 839
628#define bnx2x_sp(bp, var) (&bp->slowpath->var) 840#define bnx2x_sp(bp, var) (&bp->slowpath->var)
@@ -634,52 +846,97 @@ struct bnx2x_slowpath {
634#define MAX_DYNAMIC_ATTN_GRPS 8 846#define MAX_DYNAMIC_ATTN_GRPS 8
635 847
636struct attn_route { 848struct attn_route {
637 u32 sig[4]; 849 u32 sig[5];
850};
851
852struct iro {
853 u32 base;
854 u16 m1;
855 u16 m2;
856 u16 m3;
857 u16 size;
858};
859
860struct hw_context {
861 union cdu_context *vcxt;
862 dma_addr_t cxt_mapping;
863 size_t size;
638}; 864};
639 865
866/* forward */
867struct bnx2x_ilt;
868
640typedef enum { 869typedef enum {
641 BNX2X_RECOVERY_DONE, 870 BNX2X_RECOVERY_DONE,
642 BNX2X_RECOVERY_INIT, 871 BNX2X_RECOVERY_INIT,
643 BNX2X_RECOVERY_WAIT, 872 BNX2X_RECOVERY_WAIT,
644} bnx2x_recovery_state_t; 873} bnx2x_recovery_state_t;
645 874
875/**
876 * Event queue (EQ or event ring) MC hsi
877 * NUM_EQ_PAGES and EQ_DESC_CNT_PAGE must be power of 2
878 */
879#define NUM_EQ_PAGES 1
880#define EQ_DESC_CNT_PAGE (BCM_PAGE_SIZE / sizeof(union event_ring_elem))
881#define EQ_DESC_MAX_PAGE (EQ_DESC_CNT_PAGE - 1)
882#define NUM_EQ_DESC (EQ_DESC_CNT_PAGE * NUM_EQ_PAGES)
883#define EQ_DESC_MASK (NUM_EQ_DESC - 1)
884#define MAX_EQ_AVAIL (EQ_DESC_MAX_PAGE * NUM_EQ_PAGES - 2)
885
886/* depends on EQ_DESC_CNT_PAGE being a power of 2 */
887#define NEXT_EQ_IDX(x) ((((x) & EQ_DESC_MAX_PAGE) == \
888 (EQ_DESC_MAX_PAGE - 1)) ? (x) + 2 : (x) + 1)
889
890/* depends on the above and on NUM_EQ_PAGES being a power of 2 */
891#define EQ_DESC(x) ((x) & EQ_DESC_MASK)
892
893#define BNX2X_EQ_INDEX \
894 (&bp->def_status_blk->sp_sb.\
895 index_values[HC_SP_INDEX_EQ_CONS])
896
897/* This is a data that will be used to create a link report message.
898 * We will keep the data used for the last link report in order
899 * to prevent reporting the same link parameters twice.
900 */
901struct bnx2x_link_report_data {
902 u16 line_speed; /* Effective line speed */
903 unsigned long link_report_flags;/* BNX2X_LINK_REPORT_XXX flags */
904};
905
906enum {
907 BNX2X_LINK_REPORT_FD, /* Full DUPLEX */
908 BNX2X_LINK_REPORT_LINK_DOWN,
909 BNX2X_LINK_REPORT_RX_FC_ON,
910 BNX2X_LINK_REPORT_TX_FC_ON,
911};
912
646struct bnx2x { 913struct bnx2x {
647 /* Fields used in the tx and intr/napi performance paths 914 /* Fields used in the tx and intr/napi performance paths
648 * are grouped together in the beginning of the structure 915 * are grouped together in the beginning of the structure
649 */ 916 */
650 struct bnx2x_fastpath fp[MAX_CONTEXT]; 917 struct bnx2x_fastpath *fp;
651 void __iomem *regview; 918 void __iomem *regview;
652 void __iomem *doorbells; 919 void __iomem *doorbells;
653#ifdef BCM_CNIC 920 u16 db_size;
654#define BNX2X_DB_SIZE (18*BCM_PAGE_SIZE)
655#else
656#define BNX2X_DB_SIZE (16*BCM_PAGE_SIZE)
657#endif
658 921
659 struct net_device *dev; 922 struct net_device *dev;
660 struct pci_dev *pdev; 923 struct pci_dev *pdev;
661 924
925 struct iro *iro_arr;
926#define IRO (bp->iro_arr)
927
662 atomic_t intr_sem; 928 atomic_t intr_sem;
663 929
664 bnx2x_recovery_state_t recovery_state; 930 bnx2x_recovery_state_t recovery_state;
665 int is_leader; 931 int is_leader;
666#ifdef BCM_CNIC 932 struct msix_entry *msix_table;
667 struct msix_entry msix_table[MAX_CONTEXT+2];
668#else
669 struct msix_entry msix_table[MAX_CONTEXT+1];
670#endif
671#define INT_MODE_INTx 1 933#define INT_MODE_INTx 1
672#define INT_MODE_MSI 2 934#define INT_MODE_MSI 2
673 935
674 int tx_ring_size; 936 int tx_ring_size;
675 937
676#ifdef BCM_VLAN 938/* L2 header size + 2*VLANs (8 bytes) + LLC SNAP (8 bytes) */
677 struct vlan_group *vlgrp; 939#define ETH_OVREHEAD (ETH_HLEN + 8 + 8)
678#endif
679
680 u32 rx_csum;
681 u32 rx_buf_size;
682#define ETH_OVREHEAD (ETH_HLEN + 8) /* 8 for CRC + VLAN */
683#define ETH_MIN_PACKET_SIZE 60 940#define ETH_MIN_PACKET_SIZE 60
684#define ETH_MAX_PACKET_SIZE 1500 941#define ETH_MAX_PACKET_SIZE 1500
685#define ETH_MAX_JUMBO_PACKET_SIZE 9600 942#define ETH_MAX_JUMBO_PACKET_SIZE 9600
@@ -688,13 +945,12 @@ struct bnx2x {
688#define BNX2X_RX_ALIGN_SHIFT ((L1_CACHE_SHIFT < 8) ? \ 945#define BNX2X_RX_ALIGN_SHIFT ((L1_CACHE_SHIFT < 8) ? \
689 L1_CACHE_SHIFT : 8) 946 L1_CACHE_SHIFT : 8)
690#define BNX2X_RX_ALIGN (1 << BNX2X_RX_ALIGN_SHIFT) 947#define BNX2X_RX_ALIGN (1 << BNX2X_RX_ALIGN_SHIFT)
948#define BNX2X_PXP_DRAM_ALIGN (BNX2X_RX_ALIGN_SHIFT - 5)
691 949
692 struct host_def_status_block *def_status_blk; 950 struct host_sp_status_block *def_status_blk;
693#define DEF_SB_ID 16 951#define DEF_SB_IGU_ID 16
694 __le16 def_c_idx; 952#define DEF_SB_ID HC_SP_SB_ID
695 __le16 def_u_idx; 953 __le16 def_idx;
696 __le16 def_x_idx;
697 __le16 def_t_idx;
698 __le16 def_att_idx; 954 __le16 def_att_idx;
699 u32 attn_state; 955 u32 attn_state;
700 struct attn_route attn_group[MAX_DYNAMIC_ATTN_GRPS]; 956 struct attn_route attn_group[MAX_DYNAMIC_ATTN_GRPS];
@@ -706,10 +962,18 @@ struct bnx2x {
706 struct eth_spe *spq_prod_bd; 962 struct eth_spe *spq_prod_bd;
707 struct eth_spe *spq_last_bd; 963 struct eth_spe *spq_last_bd;
708 __le16 *dsb_sp_prod; 964 __le16 *dsb_sp_prod;
709 u16 spq_left; /* serialize spq */ 965 atomic_t cq_spq_left; /* ETH_XXX ramrods credit */
710 /* used to synchronize spq accesses */ 966 /* used to synchronize spq accesses */
711 spinlock_t spq_lock; 967 spinlock_t spq_lock;
712 968
969 /* event queue */
970 union event_ring_elem *eq_ring;
971 dma_addr_t eq_mapping;
972 u16 eq_prod;
973 u16 eq_cons;
974 __le16 *eq_cons_sb;
975 atomic_t eq_spq_left; /* COMMON_XXX ramrods credit */
976
713 /* Flags for marking that there is a STAT_QUERY or 977 /* Flags for marking that there is a STAT_QUERY or
714 SET_MAC ramrod pending */ 978 SET_MAC ramrod pending */
715 int stats_pending; 979 int stats_pending;
@@ -728,18 +992,35 @@ struct bnx2x {
728#define USING_DAC_FLAG 0x10 992#define USING_DAC_FLAG 0x10
729#define USING_MSIX_FLAG 0x20 993#define USING_MSIX_FLAG 0x20
730#define USING_MSI_FLAG 0x40 994#define USING_MSI_FLAG 0x40
995
731#define TPA_ENABLE_FLAG 0x80 996#define TPA_ENABLE_FLAG 0x80
732#define NO_MCP_FLAG 0x100 997#define NO_MCP_FLAG 0x100
998#define DISABLE_MSI_FLAG 0x200
733#define BP_NOMCP(bp) (bp->flags & NO_MCP_FLAG) 999#define BP_NOMCP(bp) (bp->flags & NO_MCP_FLAG)
734#define HW_VLAN_TX_FLAG 0x400
735#define HW_VLAN_RX_FLAG 0x800
736#define MF_FUNC_DIS 0x1000 1000#define MF_FUNC_DIS 0x1000
737 1001#define FCOE_MACS_SET 0x2000
738 int func; 1002#define NO_FCOE_FLAG 0x4000
739#define BP_PORT(bp) (bp->func % PORT_MAX) 1003#define NO_ISCSI_OOO_FLAG 0x8000
740#define BP_FUNC(bp) (bp->func) 1004#define NO_ISCSI_FLAG 0x10000
741#define BP_E1HVN(bp) (bp->func >> 1) 1005
1006#define NO_FCOE(bp) ((bp)->flags & NO_FCOE_FLAG)
1007#define NO_ISCSI(bp) ((bp)->flags & NO_ISCSI_FLAG)
1008#define NO_ISCSI_OOO(bp) ((bp)->flags & NO_ISCSI_OOO_FLAG)
1009
1010 int pf_num; /* absolute PF number */
1011 int pfid; /* per-path PF number */
1012 int base_fw_ndsb;
1013#define BP_PATH(bp) (!CHIP_IS_E2(bp) ? \
1014 0 : (bp->pf_num & 1))
1015#define BP_PORT(bp) (bp->pfid & 1)
1016#define BP_FUNC(bp) (bp->pfid)
1017#define BP_ABS_FUNC(bp) (bp->pf_num)
1018#define BP_E1HVN(bp) (bp->pfid >> 1)
1019#define BP_VN(bp) (CHIP_MODE_IS_4_PORT(bp) ? \
1020 0 : BP_E1HVN(bp))
742#define BP_L_ID(bp) (BP_E1HVN(bp) << 2) 1021#define BP_L_ID(bp) (BP_E1HVN(bp) << 2)
1022#define BP_FW_MB_IDX(bp) (BP_PORT(bp) +\
1023 BP_VN(bp) * (CHIP_IS_E1x(bp) ? 2 : 1))
743 1024
744#ifdef BCM_CNIC 1025#ifdef BCM_CNIC
745#define BCM_CNIC_CID_START 16 1026#define BCM_CNIC_CID_START 16
@@ -761,6 +1042,9 @@ struct bnx2x {
761 1042
762 struct link_params link_params; 1043 struct link_params link_params;
763 struct link_vars link_vars; 1044 struct link_vars link_vars;
1045 u32 link_cnt;
1046 struct bnx2x_link_report_data last_reported_link;
1047
764 struct mdio_if_info mdio; 1048 struct mdio_if_info mdio;
765 1049
766 struct bnx2x_common common; 1050 struct bnx2x_common common;
@@ -769,10 +1053,13 @@ struct bnx2x {
769 struct cmng_struct_per_port cmng; 1053 struct cmng_struct_per_port cmng;
770 u32 vn_weight_sum; 1054 u32 vn_weight_sum;
771 1055
772 u32 mf_config; 1056 u32 mf_config[E1HVN_MAX];
773 u16 e1hov; 1057 u32 mf2_config[E2_FUNC_MAX];
774 u8 e1hmf; 1058 u16 mf_ov;
775#define IS_E1HMF(bp) (bp->e1hmf != 0) 1059 u8 mf_mode;
1060#define IS_MF(bp) (bp->mf_mode != 0)
1061#define IS_MF_SI(bp) (bp->mf_mode == MULTI_FUNCTION_SI)
1062#define IS_MF_SD(bp) (bp->mf_mode == MULTI_FUNCTION_SD)
776 1063
777 u8 wol; 1064 u8 wol;
778 1065
@@ -800,6 +1087,7 @@ struct bnx2x {
800#define BNX2X_STATE_CLOSING_WAIT4_HALT 0x4000 1087#define BNX2X_STATE_CLOSING_WAIT4_HALT 0x4000
801#define BNX2X_STATE_CLOSING_WAIT4_DELETE 0x5000 1088#define BNX2X_STATE_CLOSING_WAIT4_DELETE 0x5000
802#define BNX2X_STATE_CLOSING_WAIT4_UNLOAD 0x6000 1089#define BNX2X_STATE_CLOSING_WAIT4_UNLOAD 0x6000
1090#define BNX2X_STATE_FUNC_STARTED 0x7000
803#define BNX2X_STATE_DIAG 0xe000 1091#define BNX2X_STATE_DIAG 0xe000
804#define BNX2X_STATE_ERROR 0xf000 1092#define BNX2X_STATE_ERROR 0xf000
805 1093
@@ -807,6 +1095,17 @@ struct bnx2x {
807 int num_queues; 1095 int num_queues;
808 int disable_tpa; 1096 int disable_tpa;
809 int int_mode; 1097 int int_mode;
1098 u32 *rx_indir_table;
1099
1100 struct tstorm_eth_mac_filter_config mac_filters;
1101#define BNX2X_ACCEPT_NONE 0x0000
1102#define BNX2X_ACCEPT_UNICAST 0x0001
1103#define BNX2X_ACCEPT_MULTICAST 0x0002
1104#define BNX2X_ACCEPT_ALL_UNICAST 0x0004
1105#define BNX2X_ACCEPT_ALL_MULTICAST 0x0008
1106#define BNX2X_ACCEPT_BROADCAST 0x0010
1107#define BNX2X_ACCEPT_UNMATCHED_UCAST 0x0020
1108#define BNX2X_PROMISCUOUS_MODE 0x10000
810 1109
811 u32 rx_mode; 1110 u32 rx_mode;
812#define BNX2X_RX_MODE_NONE 0 1111#define BNX2X_RX_MODE_NONE 0
@@ -816,34 +1115,41 @@ struct bnx2x {
816#define BNX2X_MAX_MULTICAST 64 1115#define BNX2X_MAX_MULTICAST 64
817#define BNX2X_MAX_EMUL_MULTI 16 1116#define BNX2X_MAX_EMUL_MULTI 16
818 1117
819 u32 rx_mode_cl_mask; 1118 u8 igu_dsb_id;
820 1119 u8 igu_base_sb;
1120 u8 igu_sb_cnt;
821 dma_addr_t def_status_blk_mapping; 1121 dma_addr_t def_status_blk_mapping;
822 1122
823 struct bnx2x_slowpath *slowpath; 1123 struct bnx2x_slowpath *slowpath;
824 dma_addr_t slowpath_mapping; 1124 dma_addr_t slowpath_mapping;
1125 struct hw_context context;
1126
1127 struct bnx2x_ilt *ilt;
1128#define BP_ILT(bp) ((bp)->ilt)
1129#define ILT_MAX_LINES 128
1130
1131 int l2_cid_count;
1132#define L2_ILT_LINES(bp) (DIV_ROUND_UP((bp)->l2_cid_count, \
1133 ILT_PAGE_CIDS))
1134#define BNX2X_DB_SIZE(bp) ((bp)->l2_cid_count * (1 << BNX2X_DB_SHIFT))
1135
1136 int qm_cid_count;
825 1137
826 int dropless_fc; 1138 int dropless_fc;
827 1139
828#ifdef BCM_CNIC 1140#ifdef BCM_CNIC
829 u32 cnic_flags; 1141 u32 cnic_flags;
830#define BNX2X_CNIC_FLAG_MAC_SET 1 1142#define BNX2X_CNIC_FLAG_MAC_SET 1
831
832 void *t1;
833 dma_addr_t t1_mapping;
834 void *t2; 1143 void *t2;
835 dma_addr_t t2_mapping; 1144 dma_addr_t t2_mapping;
836 void *timers; 1145 struct cnic_ops __rcu *cnic_ops;
837 dma_addr_t timers_mapping;
838 void *qm;
839 dma_addr_t qm_mapping;
840 struct cnic_ops *cnic_ops;
841 void *cnic_data; 1146 void *cnic_data;
842 u32 cnic_tag; 1147 u32 cnic_tag;
843 struct cnic_eth_dev cnic_eth_dev; 1148 struct cnic_eth_dev cnic_eth_dev;
844 struct host_status_block *cnic_sb; 1149 union host_hc_status_block cnic_sb;
845 dma_addr_t cnic_sb_mapping; 1150 dma_addr_t cnic_sb_mapping;
846#define CNIC_SB_ID(bp) BP_L_ID(bp) 1151#define CNIC_SB_ID(bp) ((bp)->base_fw_ndsb + BP_L_ID(bp))
1152#define CNIC_IGU_SB_ID(bp) ((bp)->igu_base_sb)
847 struct eth_spe *cnic_kwq; 1153 struct eth_spe *cnic_kwq;
848 struct eth_spe *cnic_kwq_prod; 1154 struct eth_spe *cnic_kwq_prod;
849 struct eth_spe *cnic_kwq_cons; 1155 struct eth_spe *cnic_kwq_cons;
@@ -851,12 +1157,12 @@ struct bnx2x {
851 u16 cnic_kwq_pending; 1157 u16 cnic_kwq_pending;
852 u16 cnic_spq_pending; 1158 u16 cnic_spq_pending;
853 struct mutex cnic_mutex; 1159 struct mutex cnic_mutex;
854 u8 iscsi_mac[6]; 1160 u8 fip_mac[ETH_ALEN];
855#endif 1161#endif
856 1162
857 int dmae_ready; 1163 int dmae_ready;
858 /* used to synchronize dmae accesses */ 1164 /* used to synchronize dmae accesses */
859 struct mutex dmae_mutex; 1165 spinlock_t dmae_lock;
860 1166
861 /* used to protect the FW mail box */ 1167 /* used to protect the FW mail box */
862 struct mutex fw_mb_mutex; 1168 struct mutex fw_mb_mutex;
@@ -911,34 +1217,282 @@ struct bnx2x {
911 1217
912 char fw_ver[32]; 1218 char fw_ver[32];
913 const struct firmware *firmware; 1219 const struct firmware *firmware;
1220 /* LLDP params */
1221 struct bnx2x_config_lldp_params lldp_config_params;
1222
1223 /* DCB support on/off */
1224 u16 dcb_state;
1225#define BNX2X_DCB_STATE_OFF 0
1226#define BNX2X_DCB_STATE_ON 1
1227
1228 /* DCBX engine mode */
1229 int dcbx_enabled;
1230#define BNX2X_DCBX_ENABLED_OFF 0
1231#define BNX2X_DCBX_ENABLED_ON_NEG_OFF 1
1232#define BNX2X_DCBX_ENABLED_ON_NEG_ON 2
1233#define BNX2X_DCBX_ENABLED_INVALID (-1)
1234
1235 bool dcbx_mode_uset;
1236
1237 struct bnx2x_config_dcbx_params dcbx_config_params;
1238
1239 struct bnx2x_dcbx_port_params dcbx_port_params;
1240 int dcb_version;
1241
1242 /* DCBX Negotiation results */
1243 struct dcbx_features dcbx_local_feat;
1244 u32 dcbx_error;
1245#ifdef BCM_DCBNL
1246 struct dcbx_features dcbx_remote_feat;
1247 u32 dcbx_remote_flags;
1248#endif
1249 u32 pending_max;
914}; 1250};
915 1251
1252/**
1253 * Init queue/func interface
1254 */
1255/* queue init flags */
1256#define QUEUE_FLG_TPA 0x0001
1257#define QUEUE_FLG_CACHE_ALIGN 0x0002
1258#define QUEUE_FLG_STATS 0x0004
1259#define QUEUE_FLG_OV 0x0008
1260#define QUEUE_FLG_VLAN 0x0010
1261#define QUEUE_FLG_COS 0x0020
1262#define QUEUE_FLG_HC 0x0040
1263#define QUEUE_FLG_DHC 0x0080
1264#define QUEUE_FLG_OOO 0x0100
1265
1266#define QUEUE_DROP_IP_CS_ERR TSTORM_ETH_CLIENT_CONFIG_DROP_IP_CS_ERR
1267#define QUEUE_DROP_TCP_CS_ERR TSTORM_ETH_CLIENT_CONFIG_DROP_TCP_CS_ERR
1268#define QUEUE_DROP_TTL0 TSTORM_ETH_CLIENT_CONFIG_DROP_TTL0
1269#define QUEUE_DROP_UDP_CS_ERR TSTORM_ETH_CLIENT_CONFIG_DROP_UDP_CS_ERR
1270
1271
1272
1273/* rss capabilities */
1274#define RSS_IPV4_CAP 0x0001
1275#define RSS_IPV4_TCP_CAP 0x0002
1276#define RSS_IPV6_CAP 0x0004
1277#define RSS_IPV6_TCP_CAP 0x0008
916 1278
917#define BNX2X_MAX_QUEUES(bp) (IS_E1HMF(bp) ? (MAX_CONTEXT/E1HVN_MAX) \
918 : MAX_CONTEXT)
919#define BNX2X_NUM_QUEUES(bp) (bp->num_queues) 1279#define BNX2X_NUM_QUEUES(bp) (bp->num_queues)
1280#define BNX2X_NUM_ETH_QUEUES(bp) (BNX2X_NUM_QUEUES(bp) - NONE_ETH_CONTEXT_USE)
1281
1282/* ethtool statistics are displayed for all regular ethernet queues and the
1283 * fcoe L2 queue if not disabled
1284 */
1285#define BNX2X_NUM_STAT_QUEUES(bp) (NO_FCOE(bp) ? BNX2X_NUM_ETH_QUEUES(bp) : \
1286 (BNX2X_NUM_ETH_QUEUES(bp) + FCOE_CONTEXT_USE))
1287
920#define is_multi(bp) (BNX2X_NUM_QUEUES(bp) > 1) 1288#define is_multi(bp) (BNX2X_NUM_QUEUES(bp) > 1)
921 1289
1290#define BNX2X_MAX_QUEUES(bp) (bp->igu_sb_cnt - CNIC_CONTEXT_USE)
1291
1292#define RSS_IPV4_CAP_MASK \
1293 TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY
1294
1295#define RSS_IPV4_TCP_CAP_MASK \
1296 TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_TCP_CAPABILITY
1297
1298#define RSS_IPV6_CAP_MASK \
1299 TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_CAPABILITY
1300
1301#define RSS_IPV6_TCP_CAP_MASK \
1302 TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_TCP_CAPABILITY
1303
1304/* func init flags */
1305#define FUNC_FLG_STATS 0x0001
1306#define FUNC_FLG_TPA 0x0002
1307#define FUNC_FLG_SPQ 0x0004
1308#define FUNC_FLG_LEADING 0x0008 /* PF only */
1309
1310struct rxq_pause_params {
1311 u16 bd_th_lo;
1312 u16 bd_th_hi;
1313 u16 rcq_th_lo;
1314 u16 rcq_th_hi;
1315 u16 sge_th_lo; /* valid iff QUEUE_FLG_TPA */
1316 u16 sge_th_hi; /* valid iff QUEUE_FLG_TPA */
1317 u16 pri_map;
1318};
1319
1320struct bnx2x_rxq_init_params {
1321 /* cxt*/
1322 struct eth_context *cxt;
1323
1324 /* dma */
1325 dma_addr_t dscr_map;
1326 dma_addr_t sge_map;
1327 dma_addr_t rcq_map;
1328 dma_addr_t rcq_np_map;
1329
1330 u16 flags;
1331 u16 drop_flags;
1332 u16 mtu;
1333 u16 buf_sz;
1334 u16 fw_sb_id;
1335 u16 cl_id;
1336 u16 spcl_id;
1337 u16 cl_qzone_id;
1338
1339 /* valid iff QUEUE_FLG_STATS */
1340 u16 stat_id;
1341
1342 /* valid iff QUEUE_FLG_TPA */
1343 u16 tpa_agg_sz;
1344 u16 sge_buf_sz;
1345 u16 max_sges_pkt;
1346
1347 /* valid iff QUEUE_FLG_CACHE_ALIGN */
1348 u8 cache_line_log;
1349
1350 u8 sb_cq_index;
1351 u32 cid;
1352
1353 /* desired interrupts per sec. valid iff QUEUE_FLG_HC */
1354 u32 hc_rate;
1355};
1356
1357struct bnx2x_txq_init_params {
1358 /* cxt*/
1359 struct eth_context *cxt;
1360
1361 /* dma */
1362 dma_addr_t dscr_map;
1363
1364 u16 flags;
1365 u16 fw_sb_id;
1366 u8 sb_cq_index;
1367 u8 cos; /* valid iff QUEUE_FLG_COS */
1368 u16 stat_id; /* valid iff QUEUE_FLG_STATS */
1369 u16 traffic_type;
1370 u32 cid;
1371 u16 hc_rate; /* desired interrupts per sec.*/
1372 /* valid iff QUEUE_FLG_HC */
1373
1374};
1375
1376struct bnx2x_client_ramrod_params {
1377 int *pstate;
1378 int state;
1379 u16 index;
1380 u16 cl_id;
1381 u32 cid;
1382 u8 poll;
1383#define CLIENT_IS_FCOE 0x01
1384#define CLIENT_IS_LEADING_RSS 0x02
1385 u8 flags;
1386};
1387
1388struct bnx2x_client_init_params {
1389 struct rxq_pause_params pause;
1390 struct bnx2x_rxq_init_params rxq_params;
1391 struct bnx2x_txq_init_params txq_params;
1392 struct bnx2x_client_ramrod_params ramrod_params;
1393};
1394
1395struct bnx2x_rss_params {
1396 int mode;
1397 u16 cap;
1398 u16 result_mask;
1399};
1400
1401struct bnx2x_func_init_params {
1402
1403 /* rss */
1404 struct bnx2x_rss_params *rss; /* valid iff FUNC_FLG_RSS */
1405
1406 /* dma */
1407 dma_addr_t fw_stat_map; /* valid iff FUNC_FLG_STATS */
1408 dma_addr_t spq_map; /* valid iff FUNC_FLG_SPQ */
1409
1410 u16 func_flgs;
1411 u16 func_id; /* abs fid */
1412 u16 pf_id;
1413 u16 spq_prod; /* valid iff FUNC_FLG_SPQ */
1414};
1415
1416#define for_each_eth_queue(bp, var) \
1417 for (var = 0; var < BNX2X_NUM_ETH_QUEUES(bp); var++)
1418
1419#define for_each_nondefault_eth_queue(bp, var) \
1420 for (var = 1; var < BNX2X_NUM_ETH_QUEUES(bp); var++)
1421
1422#define for_each_napi_queue(bp, var) \
1423 for (var = 0; \
1424 var < BNX2X_NUM_ETH_QUEUES(bp) + FCOE_CONTEXT_USE; var++) \
1425 if (skip_queue(bp, var)) \
1426 continue; \
1427 else
1428
922#define for_each_queue(bp, var) \ 1429#define for_each_queue(bp, var) \
923 for (var = 0; var < BNX2X_NUM_QUEUES(bp); var++) 1430 for (var = 0; var < BNX2X_NUM_QUEUES(bp); var++) \
1431 if (skip_queue(bp, var)) \
1432 continue; \
1433 else
1434
1435#define for_each_rx_queue(bp, var) \
1436 for (var = 0; var < BNX2X_NUM_QUEUES(bp); var++) \
1437 if (skip_rx_queue(bp, var)) \
1438 continue; \
1439 else
1440
1441#define for_each_tx_queue(bp, var) \
1442 for (var = 0; var < BNX2X_NUM_QUEUES(bp); var++) \
1443 if (skip_tx_queue(bp, var)) \
1444 continue; \
1445 else
1446
924#define for_each_nondefault_queue(bp, var) \ 1447#define for_each_nondefault_queue(bp, var) \
925 for (var = 1; var < BNX2X_NUM_QUEUES(bp); var++) 1448 for (var = 1; var < BNX2X_NUM_QUEUES(bp); var++) \
1449 if (skip_queue(bp, var)) \
1450 continue; \
1451 else
1452
1453/* skip rx queue
1454 * if FCOE l2 support is disabled and this is the fcoe L2 queue
1455 */
1456#define skip_rx_queue(bp, idx) (NO_FCOE(bp) && IS_FCOE_IDX(idx))
926 1457
1458/* skip tx queue
1459 * if FCOE l2 support is disabled and this is the fcoe L2 queue
1460 */
1461#define skip_tx_queue(bp, idx) (NO_FCOE(bp) && IS_FCOE_IDX(idx))
1462
1463#define skip_queue(bp, idx) (NO_FCOE(bp) && IS_FCOE_IDX(idx))
1464
1465#define WAIT_RAMROD_POLL 0x01
1466#define WAIT_RAMROD_COMMON 0x02
927 1467
1468void bnx2x_read_mf_cfg(struct bnx2x *bp);
1469
1470/* dmae */
928void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32); 1471void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32);
929void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr, 1472void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
930 u32 len32); 1473 u32 len32);
1474void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx);
1475u32 bnx2x_dmae_opcode_add_comp(u32 opcode, u8 comp_type);
1476u32 bnx2x_dmae_opcode_clr_src_reset(u32 opcode);
1477u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type,
1478 bool with_comp, u8 comp_type);
1479
931int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port); 1480int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port);
932int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port); 1481int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port);
933int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port); 1482int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port);
934u32 bnx2x_fw_command(struct bnx2x *bp, u32 command); 1483u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param);
935void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val); 1484
936void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
937 u32 addr, u32 len);
938void bnx2x_calc_fc_adv(struct bnx2x *bp); 1485void bnx2x_calc_fc_adv(struct bnx2x *bp);
939int bnx2x_sp_post(struct bnx2x *bp, int command, int cid, 1486int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
940 u32 data_hi, u32 data_lo, int common); 1487 u32 data_hi, u32 data_lo, int common);
1488
1489/* Clears multicast and unicast list configuration in the chip. */
1490void bnx2x_invalidate_e1_mc_list(struct bnx2x *bp);
1491void bnx2x_invalidate_e1h_mc_list(struct bnx2x *bp);
1492void bnx2x_invalidate_uc_list(struct bnx2x *bp);
1493
941void bnx2x_update_coalesce(struct bnx2x *bp); 1494void bnx2x_update_coalesce(struct bnx2x *bp);
1495int bnx2x_get_link_cfg_idx(struct bnx2x *bp);
942 1496
943static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms, 1497static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
944 int wait) 1498 int wait)
@@ -957,6 +1511,40 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
957 return val; 1511 return val;
958} 1512}
959 1513
1514#define BNX2X_ILT_ZALLOC(x, y, size) \
1515 do { \
1516 x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
1517 if (x) \
1518 memset(x, 0, size); \
1519 } while (0)
1520
1521#define BNX2X_ILT_FREE(x, y, size) \
1522 do { \
1523 if (x) { \
1524 dma_free_coherent(&bp->pdev->dev, size, x, y); \
1525 x = NULL; \
1526 y = 0; \
1527 } \
1528 } while (0)
1529
1530#define ILOG2(x) (ilog2((x)))
1531
1532#define ILT_NUM_PAGE_ENTRIES (3072)
1533/* In 57710/11 we use whole table since we have 8 func
1534 * In 57712 we have only 4 func, but use same size per func, then only half of
1535 * the table in use
1536 */
1537#define ILT_PER_FUNC (ILT_NUM_PAGE_ENTRIES/8)
1538
1539#define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
1540/*
1541 * the phys address is shifted right 12 bits and has an added
1542 * 1=valid bit added to the 53rd bit
1543 * then since this is a wide register(TM)
1544 * we split it into two 32 bit writes
1545 */
1546#define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
1547#define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
960 1548
961/* load/unload mode */ 1549/* load/unload mode */
962#define LOAD_NORMAL 0 1550#define LOAD_NORMAL 0
@@ -964,18 +1552,44 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
964#define LOAD_DIAG 2 1552#define LOAD_DIAG 2
965#define UNLOAD_NORMAL 0 1553#define UNLOAD_NORMAL 0
966#define UNLOAD_CLOSE 1 1554#define UNLOAD_CLOSE 1
967#define UNLOAD_RECOVERY 2 1555#define UNLOAD_RECOVERY 2
968 1556
969 1557
970/* DMAE command defines */ 1558/* DMAE command defines */
971#define DMAE_CMD_SRC_PCI 0 1559#define DMAE_TIMEOUT -1
972#define DMAE_CMD_SRC_GRC DMAE_COMMAND_SRC 1560#define DMAE_PCI_ERROR -2 /* E2 and onward */
1561#define DMAE_NOT_RDY -3
1562#define DMAE_PCI_ERR_FLAG 0x80000000
1563
1564#define DMAE_SRC_PCI 0
1565#define DMAE_SRC_GRC 1
1566
1567#define DMAE_DST_NONE 0
1568#define DMAE_DST_PCI 1
1569#define DMAE_DST_GRC 2
1570
1571#define DMAE_COMP_PCI 0
1572#define DMAE_COMP_GRC 1
973 1573
974#define DMAE_CMD_DST_PCI (1 << DMAE_COMMAND_DST_SHIFT) 1574/* E2 and onward - PCI error handling in the completion */
975#define DMAE_CMD_DST_GRC (2 << DMAE_COMMAND_DST_SHIFT)
976 1575
977#define DMAE_CMD_C_DST_PCI 0 1576#define DMAE_COMP_REGULAR 0
978#define DMAE_CMD_C_DST_GRC (1 << DMAE_COMMAND_C_DST_SHIFT) 1577#define DMAE_COM_SET_ERR 1
1578
1579#define DMAE_CMD_SRC_PCI (DMAE_SRC_PCI << \
1580 DMAE_COMMAND_SRC_SHIFT)
1581#define DMAE_CMD_SRC_GRC (DMAE_SRC_GRC << \
1582 DMAE_COMMAND_SRC_SHIFT)
1583
1584#define DMAE_CMD_DST_PCI (DMAE_DST_PCI << \
1585 DMAE_COMMAND_DST_SHIFT)
1586#define DMAE_CMD_DST_GRC (DMAE_DST_GRC << \
1587 DMAE_COMMAND_DST_SHIFT)
1588
1589#define DMAE_CMD_C_DST_PCI (DMAE_COMP_PCI << \
1590 DMAE_COMMAND_C_DST_SHIFT)
1591#define DMAE_CMD_C_DST_GRC (DMAE_COMP_GRC << \
1592 DMAE_COMMAND_C_DST_SHIFT)
979 1593
980#define DMAE_CMD_C_ENABLE DMAE_COMMAND_C_TYPE_ENABLE 1594#define DMAE_CMD_C_ENABLE DMAE_COMMAND_C_TYPE_ENABLE
981 1595
@@ -991,10 +1605,20 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
991#define DMAE_CMD_DST_RESET DMAE_COMMAND_DST_RESET 1605#define DMAE_CMD_DST_RESET DMAE_COMMAND_DST_RESET
992#define DMAE_CMD_E1HVN_SHIFT DMAE_COMMAND_E1HVN_SHIFT 1606#define DMAE_CMD_E1HVN_SHIFT DMAE_COMMAND_E1HVN_SHIFT
993 1607
1608#define DMAE_SRC_PF 0
1609#define DMAE_SRC_VF 1
1610
1611#define DMAE_DST_PF 0
1612#define DMAE_DST_VF 1
1613
1614#define DMAE_C_SRC 0
1615#define DMAE_C_DST 1
1616
994#define DMAE_LEN32_RD_MAX 0x80 1617#define DMAE_LEN32_RD_MAX 0x80
995#define DMAE_LEN32_WR_MAX(bp) (CHIP_IS_E1(bp) ? 0x400 : 0x2000) 1618#define DMAE_LEN32_WR_MAX(bp) (CHIP_IS_E1(bp) ? 0x400 : 0x2000)
996 1619
997#define DMAE_COMP_VAL 0xe0d0d0ae 1620#define DMAE_COMP_VAL 0x60d0d0ae /* E2 and on - upper bit
1621 indicates eror */
998 1622
999#define MAX_DMAE_C_PER_PORT 8 1623#define MAX_DMAE_C_PER_PORT 8
1000#define INIT_DMAE_C(bp) (BP_PORT(bp) * MAX_DMAE_C_PER_PORT + \ 1624#define INIT_DMAE_C(bp) (BP_PORT(bp) * MAX_DMAE_C_PER_PORT + \
@@ -1002,7 +1626,6 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
1002#define PMF_DMAE_C(bp) (BP_PORT(bp) * MAX_DMAE_C_PER_PORT + \ 1626#define PMF_DMAE_C(bp) (BP_PORT(bp) * MAX_DMAE_C_PER_PORT + \
1003 E1HVN_MAX) 1627 E1HVN_MAX)
1004 1628
1005
1006/* PCIE link and speed */ 1629/* PCIE link and speed */
1007#define PCICFG_LINK_WIDTH 0x1f00000 1630#define PCICFG_LINK_WIDTH 0x1f00000
1008#define PCICFG_LINK_WIDTH_SHIFT 20 1631#define PCICFG_LINK_WIDTH_SHIFT 20
@@ -1031,22 +1654,26 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
1031#define MAX_SP_DESC_CNT (SP_DESC_CNT - 1) 1654#define MAX_SP_DESC_CNT (SP_DESC_CNT - 1)
1032 1655
1033 1656
1034#define BNX2X_BTR 1 1657#define BNX2X_BTR 4
1035#define MAX_SPQ_PENDING 8 1658#define MAX_SPQ_PENDING 8
1036 1659
1037 1660/* CMNG constants, as derived from system spec calculations */
1038/* CMNG constants 1661/* default MIN rate in case VNIC min rate is configured to zero - 100Mbps */
1039 derived from lab experiments, and not from system spec calculations !!! */ 1662#define DEF_MIN_RATE 100
1040#define DEF_MIN_RATE 100 1663/* resolution of the rate shaping timer - 400 usec */
1041/* resolution of the rate shaping timer - 100 usec */ 1664#define RS_PERIODIC_TIMEOUT_USEC 400
1042#define RS_PERIODIC_TIMEOUT_USEC 100
1043/* resolution of fairness algorithm in usecs -
1044 coefficient for calculating the actual t fair */
1045#define T_FAIR_COEF 10000000
1046/* number of bytes in single QM arbitration cycle - 1665/* number of bytes in single QM arbitration cycle -
1047 coefficient for calculating the fairness timer */ 1666 * coefficient for calculating the fairness timer */
1048#define QM_ARB_BYTES 40000 1667#define QM_ARB_BYTES 160000
1049#define FAIR_MEM 2 1668/* resolution of Min algorithm 1:100 */
1669#define MIN_RES 100
1670/* how many bytes above threshold for the minimal credit of Min algorithm*/
1671#define MIN_ABOVE_THRESH 32768
1672/* Fairness algorithm integration time coefficient -
1673 * for calculating the actual Tfair */
1674#define T_FAIR_COEF ((MIN_ABOVE_THRESH + QM_ARB_BYTES) * 8 * MIN_RES)
1675/* Memory of fairness algorithm . 2 cycles */
1676#define FAIR_MEM 2
1050 1677
1051 1678
1052#define ATTN_NIG_FOR_FUNC (1L << 8) 1679#define ATTN_NIG_FOR_FUNC (1L << 8)
@@ -1148,24 +1775,26 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
1148 TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE_SHIFT)) 1775 TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE_SHIFT))
1149#define MULTI_MASK 0x7f 1776#define MULTI_MASK 0x7f
1150 1777
1151
1152#define DEF_USB_FUNC_OFF (2 + 2*HC_USTORM_DEF_SB_NUM_INDICES)
1153#define DEF_CSB_FUNC_OFF (2 + 2*HC_CSTORM_DEF_SB_NUM_INDICES)
1154#define DEF_XSB_FUNC_OFF (2 + 2*HC_XSTORM_DEF_SB_NUM_INDICES)
1155#define DEF_TSB_FUNC_OFF (2 + 2*HC_TSTORM_DEF_SB_NUM_INDICES)
1156
1157#define C_DEF_SB_SP_INDEX HC_INDEX_DEF_C_ETH_SLOW_PATH
1158
1159#define BNX2X_SP_DSB_INDEX \ 1778#define BNX2X_SP_DSB_INDEX \
1160(&bp->def_status_blk->c_def_status_block.index_values[C_DEF_SB_SP_INDEX]) 1779 (&bp->def_status_blk->sp_sb.\
1780 index_values[HC_SP_INDEX_ETH_DEF_CONS])
1161 1781
1782#define SET_FLAG(value, mask, flag) \
1783 do {\
1784 (value) &= ~(mask);\
1785 (value) |= ((flag) << (mask##_SHIFT));\
1786 } while (0)
1162 1787
1163#define CAM_IS_INVALID(x) \ 1788#define GET_FLAG(value, mask) \
1164(x.target_table_entry.flags == TSTORM_CAM_TARGET_TABLE_ENTRY_ACTION_TYPE) 1789 (((value) &= (mask)) >> (mask##_SHIFT))
1165 1790
1166#define CAM_INVALIDATE(x) \ 1791#define GET_FIELD(value, fname) \
1167 (x.target_table_entry.flags = TSTORM_CAM_TARGET_TABLE_ENTRY_ACTION_TYPE) 1792 (((value) & (fname##_MASK)) >> (fname##_SHIFT))
1168 1793
1794#define CAM_IS_INVALID(x) \
1795 (GET_FLAG(x.flags, \
1796 MAC_CONFIGURATION_ENTRY_ACTION_TYPE) == \
1797 (T_ETH_MAC_COMMAND_INVALIDATE))
1169 1798
1170/* Number of u32 elements in MC hash array */ 1799/* Number of u32 elements in MC hash array */
1171#define MC_HASH_SIZE 8 1800#define MC_HASH_SIZE 8
@@ -1177,21 +1806,30 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
1177#define PXP2_REG_PXP2_INT_STS PXP2_REG_PXP2_INT_STS_0 1806#define PXP2_REG_PXP2_INT_STS PXP2_REG_PXP2_INT_STS_0
1178#endif 1807#endif
1179 1808
1809#ifndef ETH_MAX_RX_CLIENTS_E2
1810#define ETH_MAX_RX_CLIENTS_E2 ETH_MAX_RX_CLIENTS_E1H
1811#endif
1812
1180#define BNX2X_VPD_LEN 128 1813#define BNX2X_VPD_LEN 128
1181#define VENDOR_ID_LEN 4 1814#define VENDOR_ID_LEN 4
1182 1815
1816/* Congestion management fairness mode */
1817#define CMNG_FNS_NONE 0
1818#define CMNG_FNS_MINMAX 1
1819
1820#define HC_SEG_ACCESS_DEF 0 /*Driver decision 0-3*/
1821#define HC_SEG_ACCESS_ATTN 4
1822#define HC_SEG_ACCESS_NORM 0 /*Driver decision 0-1*/
1823
1183#ifdef BNX2X_MAIN 1824#ifdef BNX2X_MAIN
1184#define BNX2X_EXTERN 1825#define BNX2X_EXTERN
1185#else 1826#else
1186#define BNX2X_EXTERN extern 1827#define BNX2X_EXTERN extern
1187#endif 1828#endif
1188 1829
1189BNX2X_EXTERN int load_count[3]; /* 0-common, 1-port0, 2-port1 */ 1830BNX2X_EXTERN int load_count[2][3]; /* per path: 0-common, 1-port0, 2-port1 */
1190
1191/* MISC_REG_RESET_REG - this is here for the hsi to work don't touch */
1192 1831
1193extern void bnx2x_set_ethtool_ops(struct net_device *netdev); 1832extern void bnx2x_set_ethtool_ops(struct net_device *netdev);
1194 1833void bnx2x_push_indir_table(struct bnx2x *bp);
1195void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx);
1196 1834
1197#endif /* bnx2x.h */ 1835#endif /* bnx2x.h */
diff --git a/drivers/net/bnx2x/bnx2x_cmn.c b/drivers/net/bnx2x/bnx2x_cmn.c
index 02bf710629a3..289044332ed8 100644
--- a/drivers/net/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/bnx2x/bnx2x_cmn.c
@@ -1,6 +1,6 @@
1/* bnx2x_cmn.c: Broadcom Everest network driver. 1/* bnx2x_cmn.c: Broadcom Everest network driver.
2 * 2 *
3 * Copyright (c) 2007-2010 Broadcom Corporation 3 * Copyright (c) 2007-2011 Broadcom Corporation
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by 6 * it under the terms of the GNU General Public License as published by
@@ -15,18 +15,61 @@
15 * 15 *
16 */ 16 */
17 17
18
19#include <linux/etherdevice.h> 18#include <linux/etherdevice.h>
19#include <linux/if_vlan.h>
20#include <linux/ip.h> 20#include <linux/ip.h>
21#include <linux/ipv6.h> 21#include <net/ipv6.h>
22#include <net/ip6_checksum.h> 22#include <net/ip6_checksum.h>
23#include <linux/firmware.h>
24#include <linux/prefetch.h>
23#include "bnx2x_cmn.h" 25#include "bnx2x_cmn.h"
24 26
25#ifdef BCM_VLAN 27#include "bnx2x_init.h"
26#include <linux/if_vlan.h> 28
27#endif 29static int bnx2x_setup_irqs(struct bnx2x *bp);
30
31/**
32 * bnx2x_bz_fp - zero content of the fastpath structure.
33 *
34 * @bp: driver handle
35 * @index: fastpath index to be zeroed
36 *
37 * Makes sure the contents of the bp->fp[index].napi is kept
38 * intact.
39 */
40static inline void bnx2x_bz_fp(struct bnx2x *bp, int index)
41{
42 struct bnx2x_fastpath *fp = &bp->fp[index];
43 struct napi_struct orig_napi = fp->napi;
44 /* bzero bnx2x_fastpath contents */
45 memset(fp, 0, sizeof(*fp));
28 46
29static int bnx2x_poll(struct napi_struct *napi, int budget); 47 /* Restore the NAPI object as it has been already initialized */
48 fp->napi = orig_napi;
49}
50
51/**
52 * bnx2x_move_fp - move content of the fastpath structure.
53 *
54 * @bp: driver handle
55 * @from: source FP index
56 * @to: destination FP index
57 *
58 * Makes sure the contents of the bp->fp[to].napi is kept
59 * intact.
60 */
61static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
62{
63 struct bnx2x_fastpath *from_fp = &bp->fp[from];
64 struct bnx2x_fastpath *to_fp = &bp->fp[to];
65 struct napi_struct orig_napi = to_fp->napi;
66 /* Move bnx2x_fastpath contents */
67 memcpy(to_fp, from_fp, sizeof(*to_fp));
68 to_fp->index = to;
69
70 /* Restore the NAPI object as it has been already initialized */
71 to_fp->napi = orig_napi;
72}
30 73
31/* free skb in the packet ring at pos idx 74/* free skb in the packet ring at pos idx
32 * return idx of last bd freed 75 * return idx of last bd freed
@@ -51,7 +94,7 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
51 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx); 94 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
52 tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd; 95 tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
53 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd), 96 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
54 BD_UNMAP_LEN(tx_start_bd), PCI_DMA_TODEVICE); 97 BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE);
55 98
56 nbd = le16_to_cpu(tx_start_bd->nbd) - 1; 99 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
57#ifdef BNX2X_STOP_ON_ERROR 100#ifdef BNX2X_STOP_ON_ERROR
@@ -88,7 +131,7 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
88 131
89 /* release skb */ 132 /* release skb */
90 WARN_ON(!skb); 133 WARN_ON(!skb);
91 dev_kfree_skb(skb); 134 dev_kfree_skb_any(skb);
92 tx_buf->first_bd = 0; 135 tx_buf->first_bd = 0;
93 tx_buf->skb = NULL; 136 tx_buf->skb = NULL;
94 137
@@ -115,16 +158,10 @@ int bnx2x_tx_int(struct bnx2x_fastpath *fp)
115 158
116 pkt_cons = TX_BD(sw_cons); 159 pkt_cons = TX_BD(sw_cons);
117 160
118 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */ 161 DP(NETIF_MSG_TX_DONE, "queue[%d]: hw_cons %u sw_cons %u "
119 162 " pkt_cons %u\n",
120 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n", 163 fp->index, hw_cons, sw_cons, pkt_cons);
121 hw_cons, sw_cons, pkt_cons);
122 164
123/* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
124 rmb();
125 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
126 }
127*/
128 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons); 165 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
129 sw_cons++; 166 sw_cons++;
130 } 167 }
@@ -140,7 +177,6 @@ int bnx2x_tx_int(struct bnx2x_fastpath *fp)
140 */ 177 */
141 smp_mb(); 178 smp_mb();
142 179
143 /* TBD need a thresh? */
144 if (unlikely(netif_tx_queue_stopped(txq))) { 180 if (unlikely(netif_tx_queue_stopped(txq))) {
145 /* Taking tx_lock() is needed to prevent reenabling the queue 181 /* Taking tx_lock() is needed to prevent reenabling the queue
146 * while it's empty. This could have happen if rx_action() gets 182 * while it's empty. This could have happen if rx_action() gets
@@ -189,14 +225,16 @@ static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
189 225
190 /* First mark all used pages */ 226 /* First mark all used pages */
191 for (i = 0; i < sge_len; i++) 227 for (i = 0; i < sge_len; i++)
192 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i]))); 228 SGE_MASK_CLEAR_BIT(fp,
229 RX_SGE(le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[i])));
193 230
194 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n", 231 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
195 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1])); 232 sge_len - 1, le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[sge_len - 1]));
196 233
197 /* Here we assume that the last SGE index is the biggest */ 234 /* Here we assume that the last SGE index is the biggest */
198 prefetch((void *)(fp->sge_mask)); 235 prefetch((void *)(fp->sge_mask));
199 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1])); 236 bnx2x_update_last_max_sge(fp,
237 le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[sge_len - 1]));
200 238
201 last_max = RX_SGE(fp->last_max_sge); 239 last_max = RX_SGE(fp->last_max_sge);
202 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT; 240 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
@@ -238,7 +276,7 @@ static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
238 /* move empty skb from pool to prod and map it */ 276 /* move empty skb from pool to prod and map it */
239 prod_rx_buf->skb = fp->tpa_pool[queue].skb; 277 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
240 mapping = dma_map_single(&bp->pdev->dev, fp->tpa_pool[queue].skb->data, 278 mapping = dma_map_single(&bp->pdev->dev, fp->tpa_pool[queue].skb->data,
241 bp->rx_buf_size, DMA_FROM_DEVICE); 279 fp->rx_buf_size, DMA_FROM_DEVICE);
242 dma_unmap_addr_set(prod_rx_buf, mapping, mapping); 280 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
243 281
244 /* move partial skb from cons to pool (don't unmap yet) */ 282 /* move partial skb from cons to pool (don't unmap yet) */
@@ -265,10 +303,46 @@ static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
265#endif 303#endif
266} 304}
267 305
306/* Timestamp option length allowed for TPA aggregation:
307 *
308 * nop nop kind length echo val
309 */
310#define TPA_TSTAMP_OPT_LEN 12
311/**
312 * bnx2x_set_lro_mss - calculate the approximate value of the MSS
313 *
314 * @bp: driver handle
315 * @parsing_flags: parsing flags from the START CQE
316 * @len_on_bd: total length of the first packet for the
317 * aggregation.
318 *
319 * Approximate value of the MSS for this aggregation calculated using
320 * the first packet of it.
321 */
322static inline u16 bnx2x_set_lro_mss(struct bnx2x *bp, u16 parsing_flags,
323 u16 len_on_bd)
324{
325 /* TPA arrgregation won't have an IP options and TCP options
326 * other than timestamp.
327 */
328 u16 hdrs_len = ETH_HLEN + sizeof(struct iphdr) + sizeof(struct tcphdr);
329
330
331 /* Check if there was a TCP timestamp, if there is it's will
332 * always be 12 bytes length: nop nop kind length echo val.
333 *
334 * Otherwise FW would close the aggregation.
335 */
336 if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
337 hdrs_len += TPA_TSTAMP_OPT_LEN;
338
339 return len_on_bd - hdrs_len;
340}
341
268static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp, 342static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
269 struct sk_buff *skb, 343 struct sk_buff *skb,
270 struct eth_fast_path_rx_cqe *fp_cqe, 344 struct eth_fast_path_rx_cqe *fp_cqe,
271 u16 cqe_idx) 345 u16 cqe_idx, u16 parsing_flags)
272{ 346{
273 struct sw_rx_page *rx_pg, old_rx_pg; 347 struct sw_rx_page *rx_pg, old_rx_pg;
274 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd); 348 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
@@ -281,8 +355,8 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
281 355
282 /* This is needed in order to enable forwarding support */ 356 /* This is needed in order to enable forwarding support */
283 if (frag_size) 357 if (frag_size)
284 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE, 358 skb_shinfo(skb)->gso_size = bnx2x_set_lro_mss(bp, parsing_flags,
285 max(frag_size, (u32)len_on_bd)); 359 len_on_bd);
286 360
287#ifdef BNX2X_STOP_ON_ERROR 361#ifdef BNX2X_STOP_ON_ERROR
288 if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) { 362 if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
@@ -297,7 +371,8 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
297 371
298 /* Run through the SGL and compose the fragmented skb */ 372 /* Run through the SGL and compose the fragmented skb */
299 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) { 373 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
300 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j])); 374 u16 sge_idx =
375 RX_SGE(le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[j]));
301 376
302 /* FW gives the indices of the SGE as if the ring is an array 377 /* FW gives the indices of the SGE as if the ring is an array
303 (meaning that "next" element will consume 2 indices) */ 378 (meaning that "next" element will consume 2 indices) */
@@ -338,33 +413,28 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
338 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue]; 413 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
339 struct sk_buff *skb = rx_buf->skb; 414 struct sk_buff *skb = rx_buf->skb;
340 /* alloc new skb */ 415 /* alloc new skb */
341 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size); 416 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, fp->rx_buf_size);
342 417
343 /* Unmap skb in the pool anyway, as we are going to change 418 /* Unmap skb in the pool anyway, as we are going to change
344 pool entry status to BNX2X_TPA_STOP even if new skb allocation 419 pool entry status to BNX2X_TPA_STOP even if new skb allocation
345 fails. */ 420 fails. */
346 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping), 421 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
347 bp->rx_buf_size, DMA_FROM_DEVICE); 422 fp->rx_buf_size, DMA_FROM_DEVICE);
348 423
349 if (likely(new_skb)) { 424 if (likely(new_skb)) {
350 /* fix ip xsum and give it to the stack */ 425 /* fix ip xsum and give it to the stack */
351 /* (no need to map the new skb) */ 426 /* (no need to map the new skb) */
352#ifdef BCM_VLAN 427 u16 parsing_flags =
353 int is_vlan_cqe = 428 le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags);
354 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
355 PARSING_FLAGS_VLAN);
356 int is_not_hwaccel_vlan_cqe =
357 (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
358#endif
359 429
360 prefetch(skb); 430 prefetch(skb);
361 prefetch(((char *)(skb)) + 128); 431 prefetch(((char *)(skb)) + L1_CACHE_BYTES);
362 432
363#ifdef BNX2X_STOP_ON_ERROR 433#ifdef BNX2X_STOP_ON_ERROR
364 if (pad + len > bp->rx_buf_size) { 434 if (pad + len > fp->rx_buf_size) {
365 BNX2X_ERR("skb_put is about to fail... " 435 BNX2X_ERR("skb_put is about to fail... "
366 "pad %d len %d rx_buf_size %d\n", 436 "pad %d len %d rx_buf_size %d\n",
367 pad, len, bp->rx_buf_size); 437 pad, len, fp->rx_buf_size);
368 bnx2x_panic(); 438 bnx2x_panic();
369 return; 439 return;
370 } 440 }
@@ -380,31 +450,22 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
380 struct iphdr *iph; 450 struct iphdr *iph;
381 451
382 iph = (struct iphdr *)skb->data; 452 iph = (struct iphdr *)skb->data;
383#ifdef BCM_VLAN
384 /* If there is no Rx VLAN offloading -
385 take VLAN tag into an account */
386 if (unlikely(is_not_hwaccel_vlan_cqe))
387 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
388#endif
389 iph->check = 0; 453 iph->check = 0;
390 iph->check = ip_fast_csum((u8 *)iph, iph->ihl); 454 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
391 } 455 }
392 456
393 if (!bnx2x_fill_frag_skb(bp, fp, skb, 457 if (!bnx2x_fill_frag_skb(bp, fp, skb,
394 &cqe->fast_path_cqe, cqe_idx)) { 458 &cqe->fast_path_cqe, cqe_idx,
395#ifdef BCM_VLAN 459 parsing_flags)) {
396 if ((bp->vlgrp != NULL) && is_vlan_cqe && 460 if (parsing_flags & PARSING_FLAGS_VLAN)
397 (!is_not_hwaccel_vlan_cqe)) 461 __vlan_hwaccel_put_tag(skb,
398 vlan_gro_receive(&fp->napi, bp->vlgrp,
399 le16_to_cpu(cqe->fast_path_cqe. 462 le16_to_cpu(cqe->fast_path_cqe.
400 vlan_tag), skb); 463 vlan_tag));
401 else 464 napi_gro_receive(&fp->napi, skb);
402#endif
403 napi_gro_receive(&fp->napi, skb);
404 } else { 465 } else {
405 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages" 466 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
406 " - dropping packet!\n"); 467 " - dropping packet!\n");
407 dev_kfree_skb(skb); 468 dev_kfree_skb_any(skb);
408 } 469 }
409 470
410 471
@@ -509,8 +570,11 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
509 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len); 570 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
510 pad = cqe->fast_path_cqe.placement_offset; 571 pad = cqe->fast_path_cqe.placement_offset;
511 572
512 /* If CQE is marked both TPA_START and TPA_END 573 /* - If CQE is marked both TPA_START and TPA_END it is
513 it is a non-TPA CQE */ 574 * a non-TPA CQE.
575 * - FP CQE will always have either TPA_START or/and
576 * TPA_STOP flags set.
577 */
514 if ((!fp->disable_tpa) && 578 if ((!fp->disable_tpa) &&
515 (TPA_TYPE(cqe_fp_flags) != 579 (TPA_TYPE(cqe_fp_flags) !=
516 (TPA_TYPE_START | TPA_TYPE_END))) { 580 (TPA_TYPE_START | TPA_TYPE_END))) {
@@ -528,9 +592,7 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
528 bnx2x_set_skb_rxhash(bp, cqe, skb); 592 bnx2x_set_skb_rxhash(bp, cqe, skb);
529 593
530 goto next_rx; 594 goto next_rx;
531 } 595 } else { /* TPA_STOP */
532
533 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
534 DP(NETIF_MSG_RX_STATUS, 596 DP(NETIF_MSG_RX_STATUS,
535 "calling tpa_stop on queue %d\n", 597 "calling tpa_stop on queue %d\n",
536 queue); 598 queue);
@@ -560,7 +622,7 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
560 dma_unmap_addr(rx_buf, mapping), 622 dma_unmap_addr(rx_buf, mapping),
561 pad + RX_COPY_THRESH, 623 pad + RX_COPY_THRESH,
562 DMA_FROM_DEVICE); 624 DMA_FROM_DEVICE);
563 prefetch(((char *)(skb)) + 128); 625 prefetch(((char *)(skb)) + L1_CACHE_BYTES);
564 626
565 /* is this an error packet? */ 627 /* is this an error packet? */
566 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) { 628 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
@@ -594,7 +656,7 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
594 skb_reserve(new_skb, pad); 656 skb_reserve(new_skb, pad);
595 skb_put(new_skb, len); 657 skb_put(new_skb, len);
596 658
597 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod); 659 bnx2x_reuse_rx_skb(fp, bd_cons, bd_prod);
598 660
599 skb = new_skb; 661 skb = new_skb;
600 662
@@ -602,7 +664,7 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
602 if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) { 664 if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
603 dma_unmap_single(&bp->pdev->dev, 665 dma_unmap_single(&bp->pdev->dev,
604 dma_unmap_addr(rx_buf, mapping), 666 dma_unmap_addr(rx_buf, mapping),
605 bp->rx_buf_size, 667 fp->rx_buf_size,
606 DMA_FROM_DEVICE); 668 DMA_FROM_DEVICE);
607 skb_reserve(skb, pad); 669 skb_reserve(skb, pad);
608 skb_put(skb, len); 670 skb_put(skb, len);
@@ -613,7 +675,7 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
613 "of alloc failure\n"); 675 "of alloc failure\n");
614 fp->eth_q_stats.rx_skb_alloc_failed++; 676 fp->eth_q_stats.rx_skb_alloc_failed++;
615reuse_rx: 677reuse_rx:
616 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod); 678 bnx2x_reuse_rx_skb(fp, bd_cons, bd_prod);
617 goto next_rx; 679 goto next_rx;
618 } 680 }
619 681
@@ -622,8 +684,9 @@ reuse_rx:
622 /* Set Toeplitz hash for a none-LRO skb */ 684 /* Set Toeplitz hash for a none-LRO skb */
623 bnx2x_set_skb_rxhash(bp, cqe, skb); 685 bnx2x_set_skb_rxhash(bp, cqe, skb);
624 686
625 skb->ip_summed = CHECKSUM_NONE; 687 skb_checksum_none_assert(skb);
626 if (bp->rx_csum) { 688
689 if (bp->dev->features & NETIF_F_RXCSUM) {
627 if (likely(BNX2X_RX_CSUM_OK(cqe))) 690 if (likely(BNX2X_RX_CSUM_OK(cqe)))
628 skb->ip_summed = CHECKSUM_UNNECESSARY; 691 skb->ip_summed = CHECKSUM_UNNECESSARY;
629 else 692 else
@@ -633,15 +696,11 @@ reuse_rx:
633 696
634 skb_record_rx_queue(skb, fp->index); 697 skb_record_rx_queue(skb, fp->index);
635 698
636#ifdef BCM_VLAN 699 if (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
637 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) && 700 PARSING_FLAGS_VLAN)
638 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) & 701 __vlan_hwaccel_put_tag(skb,
639 PARSING_FLAGS_VLAN)) 702 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
640 vlan_gro_receive(&fp->napi, bp->vlgrp, 703 napi_gro_receive(&fp->napi, skb);
641 le16_to_cpu(cqe->fast_path_cqe.vlan_tag), skb);
642 else
643#endif
644 napi_gro_receive(&fp->napi, skb);
645 704
646 705
647next_rx: 706next_rx:
@@ -685,9 +744,10 @@ static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
685 return IRQ_HANDLED; 744 return IRQ_HANDLED;
686 } 745 }
687 746
688 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n", 747 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB "
689 fp->index, fp->sb_id); 748 "[fp %d fw_sd %d igusb %d]\n",
690 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0); 749 fp->index, fp->fw_sb_id, fp->igu_sb_id);
750 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
691 751
692#ifdef BNX2X_STOP_ON_ERROR 752#ifdef BNX2X_STOP_ON_ERROR
693 if (unlikely(bp->panic)) 753 if (unlikely(bp->panic))
@@ -697,14 +757,12 @@ static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
697 /* Handle Rx and Tx according to MSI-X vector */ 757 /* Handle Rx and Tx according to MSI-X vector */
698 prefetch(fp->rx_cons_sb); 758 prefetch(fp->rx_cons_sb);
699 prefetch(fp->tx_cons_sb); 759 prefetch(fp->tx_cons_sb);
700 prefetch(&fp->status_blk->u_status_block.status_block_index); 760 prefetch(&fp->sb_running_index[SM_RX_ID]);
701 prefetch(&fp->status_blk->c_status_block.status_block_index);
702 napi_schedule(&bnx2x_fp(bp, fp->index, napi)); 761 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
703 762
704 return IRQ_HANDLED; 763 return IRQ_HANDLED;
705} 764}
706 765
707
708/* HW Lock for shared dual port PHYs */ 766/* HW Lock for shared dual port PHYs */
709void bnx2x_acquire_phy_lock(struct bnx2x *bp) 767void bnx2x_acquire_phy_lock(struct bnx2x *bp)
710{ 768{
@@ -722,43 +780,143 @@ void bnx2x_release_phy_lock(struct bnx2x *bp)
722 mutex_unlock(&bp->port.phy_mutex); 780 mutex_unlock(&bp->port.phy_mutex);
723} 781}
724 782
725void bnx2x_link_report(struct bnx2x *bp) 783/* calculates MF speed according to current linespeed and MF configuration */
784u16 bnx2x_get_mf_speed(struct bnx2x *bp)
726{ 785{
727 if (bp->flags & MF_FUNC_DIS) { 786 u16 line_speed = bp->link_vars.line_speed;
728 netif_carrier_off(bp->dev); 787 if (IS_MF(bp)) {
729 netdev_err(bp->dev, "NIC Link is Down\n"); 788 u16 maxCfg = bnx2x_extract_max_cfg(bp,
730 return; 789 bp->mf_config[BP_VN(bp)]);
731 }
732
733 if (bp->link_vars.link_up) {
734 u16 line_speed;
735 790
736 if (bp->state == BNX2X_STATE_OPEN) 791 /* Calculate the current MAX line speed limit for the MF
737 netif_carrier_on(bp->dev); 792 * devices
738 netdev_info(bp->dev, "NIC Link is Up, "); 793 */
739 794 if (IS_MF_SI(bp))
740 line_speed = bp->link_vars.line_speed; 795 line_speed = (line_speed * maxCfg) / 100;
741 if (IS_E1HMF(bp)) { 796 else { /* SD mode */
742 u16 vn_max_rate; 797 u16 vn_max_rate = maxCfg * 100;
743 798
744 vn_max_rate =
745 ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
746 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
747 if (vn_max_rate < line_speed) 799 if (vn_max_rate < line_speed)
748 line_speed = vn_max_rate; 800 line_speed = vn_max_rate;
749 } 801 }
750 pr_cont("%d Mbps ", line_speed); 802 }
803
804 return line_speed;
805}
806
807/**
808 * bnx2x_fill_report_data - fill link report data to report
809 *
810 * @bp: driver handle
811 * @data: link state to update
812 *
813 * It uses a none-atomic bit operations because is called under the mutex.
814 */
815static inline void bnx2x_fill_report_data(struct bnx2x *bp,
816 struct bnx2x_link_report_data *data)
817{
818 u16 line_speed = bnx2x_get_mf_speed(bp);
819
820 memset(data, 0, sizeof(*data));
821
822 /* Fill the report data: efective line speed */
823 data->line_speed = line_speed;
824
825 /* Link is down */
826 if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS))
827 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
828 &data->link_report_flags);
829
830 /* Full DUPLEX */
831 if (bp->link_vars.duplex == DUPLEX_FULL)
832 __set_bit(BNX2X_LINK_REPORT_FD, &data->link_report_flags);
833
834 /* Rx Flow Control is ON */
835 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX)
836 __set_bit(BNX2X_LINK_REPORT_RX_FC_ON, &data->link_report_flags);
837
838 /* Tx Flow Control is ON */
839 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
840 __set_bit(BNX2X_LINK_REPORT_TX_FC_ON, &data->link_report_flags);
841}
842
843/**
844 * bnx2x_link_report - report link status to OS.
845 *
846 * @bp: driver handle
847 *
848 * Calls the __bnx2x_link_report() under the same locking scheme
849 * as a link/PHY state managing code to ensure a consistent link
850 * reporting.
851 */
852
853void bnx2x_link_report(struct bnx2x *bp)
854{
855 bnx2x_acquire_phy_lock(bp);
856 __bnx2x_link_report(bp);
857 bnx2x_release_phy_lock(bp);
858}
859
860/**
861 * __bnx2x_link_report - report link status to OS.
862 *
863 * @bp: driver handle
864 *
865 * None atomic inmlementation.
866 * Should be called under the phy_lock.
867 */
868void __bnx2x_link_report(struct bnx2x *bp)
869{
870 struct bnx2x_link_report_data cur_data;
871
872 /* reread mf_cfg */
873 if (!CHIP_IS_E1(bp))
874 bnx2x_read_mf_cfg(bp);
875
876 /* Read the current link report info */
877 bnx2x_fill_report_data(bp, &cur_data);
878
879 /* Don't report link down or exactly the same link status twice */
880 if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) ||
881 (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
882 &bp->last_reported_link.link_report_flags) &&
883 test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
884 &cur_data.link_report_flags)))
885 return;
751 886
752 if (bp->link_vars.duplex == DUPLEX_FULL) 887 bp->link_cnt++;
888
889 /* We are going to report a new link parameters now -
890 * remember the current data for the next time.
891 */
892 memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data));
893
894 if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
895 &cur_data.link_report_flags)) {
896 netif_carrier_off(bp->dev);
897 netdev_err(bp->dev, "NIC Link is Down\n");
898 return;
899 } else {
900 netif_carrier_on(bp->dev);
901 netdev_info(bp->dev, "NIC Link is Up, ");
902 pr_cont("%d Mbps ", cur_data.line_speed);
903
904 if (test_and_clear_bit(BNX2X_LINK_REPORT_FD,
905 &cur_data.link_report_flags))
753 pr_cont("full duplex"); 906 pr_cont("full duplex");
754 else 907 else
755 pr_cont("half duplex"); 908 pr_cont("half duplex");
756 909
757 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) { 910 /* Handle the FC at the end so that only these flags would be
758 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) { 911 * possibly set. This way we may easily check if there is no FC
912 * enabled.
913 */
914 if (cur_data.link_report_flags) {
915 if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
916 &cur_data.link_report_flags)) {
759 pr_cont(", receive "); 917 pr_cont(", receive ");
760 if (bp->link_vars.flow_ctrl & 918 if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
761 BNX2X_FLOW_CTRL_TX) 919 &cur_data.link_report_flags))
762 pr_cont("& transmit "); 920 pr_cont("& transmit ");
763 } else { 921 } else {
764 pr_cont(", transmit "); 922 pr_cont(", transmit ");
@@ -766,10 +924,6 @@ void bnx2x_link_report(struct bnx2x *bp)
766 pr_cont("flow control ON"); 924 pr_cont("flow control ON");
767 } 925 }
768 pr_cont("\n"); 926 pr_cont("\n");
769
770 } else { /* link_down */
771 netif_carrier_off(bp->dev);
772 netdev_err(bp->dev, "NIC Link is Down\n");
773 } 927 }
774} 928}
775 929
@@ -778,21 +932,21 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
778 int func = BP_FUNC(bp); 932 int func = BP_FUNC(bp);
779 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 : 933 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
780 ETH_MAX_AGGREGATION_QUEUES_E1H; 934 ETH_MAX_AGGREGATION_QUEUES_E1H;
781 u16 ring_prod, cqe_ring_prod; 935 u16 ring_prod;
782 int i, j; 936 int i, j;
783 937
784 bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN; 938 /* Allocate TPA resources */
785 DP(NETIF_MSG_IFUP, 939 for_each_rx_queue(bp, j) {
786 "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size); 940 struct bnx2x_fastpath *fp = &bp->fp[j];
787
788 if (bp->flags & TPA_ENABLE_FLAG) {
789 941
790 for_each_queue(bp, j) { 942 DP(NETIF_MSG_IFUP,
791 struct bnx2x_fastpath *fp = &bp->fp[j]; 943 "mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
792 944
945 if (!fp->disable_tpa) {
946 /* Fill the per-aggregation pool */
793 for (i = 0; i < max_agg_queues; i++) { 947 for (i = 0; i < max_agg_queues; i++) {
794 fp->tpa_pool[i].skb = 948 fp->tpa_pool[i].skb =
795 netdev_alloc_skb(bp->dev, bp->rx_buf_size); 949 netdev_alloc_skb(bp->dev, fp->rx_buf_size);
796 if (!fp->tpa_pool[i].skb) { 950 if (!fp->tpa_pool[i].skb) {
797 BNX2X_ERR("Failed to allocate TPA " 951 BNX2X_ERR("Failed to allocate TPA "
798 "skb pool for queue[%d] - " 952 "skb pool for queue[%d] - "
@@ -807,121 +961,70 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
807 mapping, 0); 961 mapping, 0);
808 fp->tpa_state[i] = BNX2X_TPA_STOP; 962 fp->tpa_state[i] = BNX2X_TPA_STOP;
809 } 963 }
964
965 /* "next page" elements initialization */
966 bnx2x_set_next_page_sgl(fp);
967
968 /* set SGEs bit mask */
969 bnx2x_init_sge_ring_bit_mask(fp);
970
971 /* Allocate SGEs and initialize the ring elements */
972 for (i = 0, ring_prod = 0;
973 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
974
975 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
976 BNX2X_ERR("was only able to allocate "
977 "%d rx sges\n", i);
978 BNX2X_ERR("disabling TPA for"
979 " queue[%d]\n", j);
980 /* Cleanup already allocated elements */
981 bnx2x_free_rx_sge_range(bp,
982 fp, ring_prod);
983 bnx2x_free_tpa_pool(bp,
984 fp, max_agg_queues);
985 fp->disable_tpa = 1;
986 ring_prod = 0;
987 break;
988 }
989 ring_prod = NEXT_SGE_IDX(ring_prod);
990 }
991
992 fp->rx_sge_prod = ring_prod;
810 } 993 }
811 } 994 }
812 995
813 for_each_queue(bp, j) { 996 for_each_rx_queue(bp, j) {
814 struct bnx2x_fastpath *fp = &bp->fp[j]; 997 struct bnx2x_fastpath *fp = &bp->fp[j];
815 998
816 fp->rx_bd_cons = 0; 999 fp->rx_bd_cons = 0;
817 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
818 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
819
820 /* "next page" elements initialization */
821 /* SGE ring */
822 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
823 struct eth_rx_sge *sge;
824
825 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
826 sge->addr_hi =
827 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
828 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
829 sge->addr_lo =
830 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
831 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
832 }
833
834 bnx2x_init_sge_ring_bit_mask(fp);
835
836 /* RX BD ring */
837 for (i = 1; i <= NUM_RX_RINGS; i++) {
838 struct eth_rx_bd *rx_bd;
839
840 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
841 rx_bd->addr_hi =
842 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
843 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
844 rx_bd->addr_lo =
845 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
846 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
847 }
848
849 /* CQ ring */
850 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
851 struct eth_rx_cqe_next_page *nextpg;
852
853 nextpg = (struct eth_rx_cqe_next_page *)
854 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
855 nextpg->addr_hi =
856 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
857 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
858 nextpg->addr_lo =
859 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
860 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
861 }
862
863 /* Allocate SGEs and initialize the ring elements */
864 for (i = 0, ring_prod = 0;
865 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
866
867 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
868 BNX2X_ERR("was only able to allocate "
869 "%d rx sges\n", i);
870 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
871 /* Cleanup already allocated elements */
872 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
873 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
874 fp->disable_tpa = 1;
875 ring_prod = 0;
876 break;
877 }
878 ring_prod = NEXT_SGE_IDX(ring_prod);
879 }
880 fp->rx_sge_prod = ring_prod;
881
882 /* Allocate BDs and initialize BD ring */
883 fp->rx_comp_cons = 0;
884 cqe_ring_prod = ring_prod = 0;
885 for (i = 0; i < bp->rx_ring_size; i++) {
886 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
887 BNX2X_ERR("was only able to allocate "
888 "%d rx skbs on queue[%d]\n", i, j);
889 fp->eth_q_stats.rx_skb_alloc_failed++;
890 break;
891 }
892 ring_prod = NEXT_RX_IDX(ring_prod);
893 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
894 WARN_ON(ring_prod <= i);
895 }
896
897 fp->rx_bd_prod = ring_prod;
898 /* must not have more available CQEs than BDs */
899 fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
900 cqe_ring_prod);
901 fp->rx_pkt = fp->rx_calls = 0;
902 1000
1001 /* Activate BD ring */
903 /* Warning! 1002 /* Warning!
904 * this will generate an interrupt (to the TSTORM) 1003 * this will generate an interrupt (to the TSTORM)
905 * must only be done after chip is initialized 1004 * must only be done after chip is initialized
906 */ 1005 */
907 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod, 1006 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
908 fp->rx_sge_prod); 1007 fp->rx_sge_prod);
1008
909 if (j != 0) 1009 if (j != 0)
910 continue; 1010 continue;
911 1011
912 REG_WR(bp, BAR_USTRORM_INTMEM + 1012 if (!CHIP_IS_E2(bp)) {
913 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func), 1013 REG_WR(bp, BAR_USTRORM_INTMEM +
914 U64_LO(fp->rx_comp_mapping)); 1014 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
915 REG_WR(bp, BAR_USTRORM_INTMEM + 1015 U64_LO(fp->rx_comp_mapping));
916 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4, 1016 REG_WR(bp, BAR_USTRORM_INTMEM +
917 U64_HI(fp->rx_comp_mapping)); 1017 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
1018 U64_HI(fp->rx_comp_mapping));
1019 }
918 } 1020 }
919} 1021}
1022
920static void bnx2x_free_tx_skbs(struct bnx2x *bp) 1023static void bnx2x_free_tx_skbs(struct bnx2x *bp)
921{ 1024{
922 int i; 1025 int i;
923 1026
924 for_each_queue(bp, i) { 1027 for_each_tx_queue(bp, i) {
925 struct bnx2x_fastpath *fp = &bp->fp[i]; 1028 struct bnx2x_fastpath *fp = &bp->fp[i];
926 1029
927 u16 bd_cons = fp->tx_bd_cons; 1030 u16 bd_cons = fp->tx_bd_cons;
@@ -935,27 +1038,40 @@ static void bnx2x_free_tx_skbs(struct bnx2x *bp)
935 } 1038 }
936} 1039}
937 1040
938static void bnx2x_free_rx_skbs(struct bnx2x *bp) 1041static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
939{ 1042{
940 int i, j; 1043 struct bnx2x *bp = fp->bp;
1044 int i;
941 1045
942 for_each_queue(bp, j) { 1046 /* ring wasn't allocated */
943 struct bnx2x_fastpath *fp = &bp->fp[j]; 1047 if (fp->rx_buf_ring == NULL)
1048 return;
944 1049
945 for (i = 0; i < NUM_RX_BD; i++) { 1050 for (i = 0; i < NUM_RX_BD; i++) {
946 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i]; 1051 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
947 struct sk_buff *skb = rx_buf->skb; 1052 struct sk_buff *skb = rx_buf->skb;
948 1053
949 if (skb == NULL) 1054 if (skb == NULL)
950 continue; 1055 continue;
951 1056
952 dma_unmap_single(&bp->pdev->dev, 1057 dma_unmap_single(&bp->pdev->dev,
953 dma_unmap_addr(rx_buf, mapping), 1058 dma_unmap_addr(rx_buf, mapping),
954 bp->rx_buf_size, DMA_FROM_DEVICE); 1059 fp->rx_buf_size, DMA_FROM_DEVICE);
1060
1061 rx_buf->skb = NULL;
1062 dev_kfree_skb(skb);
1063 }
1064}
1065
1066static void bnx2x_free_rx_skbs(struct bnx2x *bp)
1067{
1068 int j;
1069
1070 for_each_rx_queue(bp, j) {
1071 struct bnx2x_fastpath *fp = &bp->fp[j];
1072
1073 bnx2x_free_rx_bds(fp);
955 1074
956 rx_buf->skb = NULL;
957 dev_kfree_skb(skb);
958 }
959 if (!fp->disable_tpa) 1075 if (!fp->disable_tpa)
960 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ? 1076 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
961 ETH_MAX_AGGREGATION_QUEUES_E1 : 1077 ETH_MAX_AGGREGATION_QUEUES_E1 :
@@ -969,6 +1085,23 @@ void bnx2x_free_skbs(struct bnx2x *bp)
969 bnx2x_free_rx_skbs(bp); 1085 bnx2x_free_rx_skbs(bp);
970} 1086}
971 1087
1088void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value)
1089{
1090 /* load old values */
1091 u32 mf_cfg = bp->mf_config[BP_VN(bp)];
1092
1093 if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) {
1094 /* leave all but MAX value */
1095 mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK;
1096
1097 /* set new MAX value */
1098 mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT)
1099 & FUNC_MF_CFG_MAX_BW_MASK;
1100
1101 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg);
1102 }
1103}
1104
972static void bnx2x_free_msix_irqs(struct bnx2x *bp) 1105static void bnx2x_free_msix_irqs(struct bnx2x *bp)
973{ 1106{
974 int i, offset = 1; 1107 int i, offset = 1;
@@ -980,7 +1113,7 @@ static void bnx2x_free_msix_irqs(struct bnx2x *bp)
980#ifdef BCM_CNIC 1113#ifdef BCM_CNIC
981 offset++; 1114 offset++;
982#endif 1115#endif
983 for_each_queue(bp, i) { 1116 for_each_eth_queue(bp, i) {
984 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq " 1117 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
985 "state %x\n", i, bp->msix_table[i + offset].vector, 1118 "state %x\n", i, bp->msix_table[i + offset].vector,
986 bnx2x_fp(bp, i, state)); 1119 bnx2x_fp(bp, i, state));
@@ -989,55 +1122,49 @@ static void bnx2x_free_msix_irqs(struct bnx2x *bp)
989 } 1122 }
990} 1123}
991 1124
992void bnx2x_free_irq(struct bnx2x *bp, bool disable_only) 1125void bnx2x_free_irq(struct bnx2x *bp)
993{ 1126{
994 if (bp->flags & USING_MSIX_FLAG) { 1127 if (bp->flags & USING_MSIX_FLAG)
995 if (!disable_only) 1128 bnx2x_free_msix_irqs(bp);
996 bnx2x_free_msix_irqs(bp); 1129 else if (bp->flags & USING_MSI_FLAG)
997 pci_disable_msix(bp->pdev); 1130 free_irq(bp->pdev->irq, bp->dev);
998 bp->flags &= ~USING_MSIX_FLAG; 1131 else
999
1000 } else if (bp->flags & USING_MSI_FLAG) {
1001 if (!disable_only)
1002 free_irq(bp->pdev->irq, bp->dev);
1003 pci_disable_msi(bp->pdev);
1004 bp->flags &= ~USING_MSI_FLAG;
1005
1006 } else if (!disable_only)
1007 free_irq(bp->pdev->irq, bp->dev); 1132 free_irq(bp->pdev->irq, bp->dev);
1008} 1133}
1009 1134
1010static int bnx2x_enable_msix(struct bnx2x *bp) 1135int bnx2x_enable_msix(struct bnx2x *bp)
1011{ 1136{
1012 int i, rc, offset = 1; 1137 int msix_vec = 0, i, rc, req_cnt;
1013 int igu_vec = 0;
1014 1138
1015 bp->msix_table[0].entry = igu_vec; 1139 bp->msix_table[msix_vec].entry = msix_vec;
1016 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec); 1140 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n",
1141 bp->msix_table[0].entry);
1142 msix_vec++;
1017 1143
1018#ifdef BCM_CNIC 1144#ifdef BCM_CNIC
1019 igu_vec = BP_L_ID(bp) + offset; 1145 bp->msix_table[msix_vec].entry = msix_vec;
1020 bp->msix_table[1].entry = igu_vec; 1146 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d (CNIC)\n",
1021 DP(NETIF_MSG_IFUP, "msix_table[1].entry = %d (CNIC)\n", igu_vec); 1147 bp->msix_table[msix_vec].entry, bp->msix_table[msix_vec].entry);
1022 offset++; 1148 msix_vec++;
1023#endif 1149#endif
1024 for_each_queue(bp, i) { 1150 for_each_eth_queue(bp, i) {
1025 igu_vec = BP_L_ID(bp) + offset + i; 1151 bp->msix_table[msix_vec].entry = msix_vec;
1026 bp->msix_table[i + offset].entry = igu_vec;
1027 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d " 1152 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
1028 "(fastpath #%u)\n", i + offset, igu_vec, i); 1153 "(fastpath #%u)\n", msix_vec, msix_vec, i);
1154 msix_vec++;
1029 } 1155 }
1030 1156
1031 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], 1157 req_cnt = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_CONTEXT_USE + 1;
1032 BNX2X_NUM_QUEUES(bp) + offset); 1158
1159 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], req_cnt);
1033 1160
1034 /* 1161 /*
1035 * reconfigure number of tx/rx queues according to available 1162 * reconfigure number of tx/rx queues according to available
1036 * MSI-X vectors 1163 * MSI-X vectors
1037 */ 1164 */
1038 if (rc >= BNX2X_MIN_MSIX_VEC_CNT) { 1165 if (rc >= BNX2X_MIN_MSIX_VEC_CNT) {
1039 /* vectors available for FP */ 1166 /* how less vectors we will have? */
1040 int fp_vec = rc - BNX2X_MSIX_VEC_FP_START; 1167 int diff = req_cnt - rc;
1041 1168
1042 DP(NETIF_MSG_IFUP, 1169 DP(NETIF_MSG_IFUP,
1043 "Trying to use less MSI-X vectors: %d\n", rc); 1170 "Trying to use less MSI-X vectors: %d\n", rc);
@@ -1049,12 +1176,17 @@ static int bnx2x_enable_msix(struct bnx2x *bp)
1049 "MSI-X is not attainable rc %d\n", rc); 1176 "MSI-X is not attainable rc %d\n", rc);
1050 return rc; 1177 return rc;
1051 } 1178 }
1052 1179 /*
1053 bp->num_queues = min(bp->num_queues, fp_vec); 1180 * decrease number of queues by number of unallocated entries
1181 */
1182 bp->num_queues -= diff;
1054 1183
1055 DP(NETIF_MSG_IFUP, "New queue configuration set: %d\n", 1184 DP(NETIF_MSG_IFUP, "New queue configuration set: %d\n",
1056 bp->num_queues); 1185 bp->num_queues);
1057 } else if (rc) { 1186 } else if (rc) {
1187 /* fall to INTx if not enough memory */
1188 if (rc == -ENOMEM)
1189 bp->flags |= DISABLE_MSI_FLAG;
1058 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc); 1190 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
1059 return rc; 1191 return rc;
1060 } 1192 }
@@ -1078,12 +1210,12 @@ static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1078#ifdef BCM_CNIC 1210#ifdef BCM_CNIC
1079 offset++; 1211 offset++;
1080#endif 1212#endif
1081 for_each_queue(bp, i) { 1213 for_each_eth_queue(bp, i) {
1082 struct bnx2x_fastpath *fp = &bp->fp[i]; 1214 struct bnx2x_fastpath *fp = &bp->fp[i];
1083 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d", 1215 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1084 bp->dev->name, i); 1216 bp->dev->name, i);
1085 1217
1086 rc = request_irq(bp->msix_table[i + offset].vector, 1218 rc = request_irq(bp->msix_table[offset].vector,
1087 bnx2x_msix_fp_int, 0, fp->name, fp); 1219 bnx2x_msix_fp_int, 0, fp->name, fp);
1088 if (rc) { 1220 if (rc) {
1089 BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc); 1221 BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc);
@@ -1091,10 +1223,12 @@ static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1091 return -EBUSY; 1223 return -EBUSY;
1092 } 1224 }
1093 1225
1226 offset++;
1094 fp->state = BNX2X_FP_STATE_IRQ; 1227 fp->state = BNX2X_FP_STATE_IRQ;
1095 } 1228 }
1096 1229
1097 i = BNX2X_NUM_QUEUES(bp); 1230 i = BNX2X_NUM_ETH_QUEUES(bp);
1231 offset = 1 + CNIC_CONTEXT_USE;
1098 netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d" 1232 netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d"
1099 " ... fp[%d] %d\n", 1233 " ... fp[%d] %d\n",
1100 bp->msix_table[0].vector, 1234 bp->msix_table[0].vector,
@@ -1104,7 +1238,7 @@ static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1104 return 0; 1238 return 0;
1105} 1239}
1106 1240
1107static int bnx2x_enable_msi(struct bnx2x *bp) 1241int bnx2x_enable_msi(struct bnx2x *bp)
1108{ 1242{
1109 int rc; 1243 int rc;
1110 1244
@@ -1140,7 +1274,7 @@ static void bnx2x_napi_enable(struct bnx2x *bp)
1140{ 1274{
1141 int i; 1275 int i;
1142 1276
1143 for_each_queue(bp, i) 1277 for_each_napi_queue(bp, i)
1144 napi_enable(&bnx2x_fp(bp, i, napi)); 1278 napi_enable(&bnx2x_fp(bp, i, napi));
1145} 1279}
1146 1280
@@ -1148,7 +1282,7 @@ static void bnx2x_napi_disable(struct bnx2x *bp)
1148{ 1282{
1149 int i; 1283 int i;
1150 1284
1151 for_each_queue(bp, i) 1285 for_each_napi_queue(bp, i)
1152 napi_disable(&bnx2x_fp(bp, i, napi)); 1286 napi_disable(&bnx2x_fp(bp, i, napi));
1153} 1287}
1154 1288
@@ -1175,43 +1309,127 @@ void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1175 bnx2x_napi_disable(bp); 1309 bnx2x_napi_disable(bp);
1176 netif_tx_disable(bp->dev); 1310 netif_tx_disable(bp->dev);
1177} 1311}
1178static int bnx2x_set_num_queues(struct bnx2x *bp) 1312
1313u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb)
1179{ 1314{
1180 int rc = 0; 1315#ifdef BCM_CNIC
1316 struct bnx2x *bp = netdev_priv(dev);
1317 if (NO_FCOE(bp))
1318 return skb_tx_hash(dev, skb);
1319 else {
1320 struct ethhdr *hdr = (struct ethhdr *)skb->data;
1321 u16 ether_type = ntohs(hdr->h_proto);
1322
1323 /* Skip VLAN tag if present */
1324 if (ether_type == ETH_P_8021Q) {
1325 struct vlan_ethhdr *vhdr =
1326 (struct vlan_ethhdr *)skb->data;
1327
1328 ether_type = ntohs(vhdr->h_vlan_encapsulated_proto);
1329 }
1181 1330
1182 switch (bp->int_mode) { 1331 /* If ethertype is FCoE or FIP - use FCoE ring */
1183 case INT_MODE_INTx: 1332 if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
1184 case INT_MODE_MSI: 1333 return bnx2x_fcoe(bp, index);
1334 }
1335#endif
1336 /* Select a none-FCoE queue: if FCoE is enabled, exclude FCoE L2 ring
1337 */
1338 return __skb_tx_hash(dev, skb,
1339 dev->real_num_tx_queues - FCOE_CONTEXT_USE);
1340}
1341
1342void bnx2x_set_num_queues(struct bnx2x *bp)
1343{
1344 switch (bp->multi_mode) {
1345 case ETH_RSS_MODE_DISABLED:
1185 bp->num_queues = 1; 1346 bp->num_queues = 1;
1186 DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
1187 break; 1347 break;
1348 case ETH_RSS_MODE_REGULAR:
1349 bp->num_queues = bnx2x_calc_num_queues(bp);
1350 break;
1351
1188 default: 1352 default:
1189 /* Set number of queues according to bp->multi_mode value */ 1353 bp->num_queues = 1;
1190 bnx2x_set_num_queues_msix(bp); 1354 break;
1355 }
1191 1356
1192 DP(NETIF_MSG_IFUP, "set number of queues to %d\n", 1357 /* Add special queues */
1193 bp->num_queues); 1358 bp->num_queues += NONE_ETH_CONTEXT_USE;
1359}
1194 1360
1195 /* if we can't use MSI-X we only need one fp, 1361#ifdef BCM_CNIC
1196 * so try to enable MSI-X with the requested number of fp's 1362static inline void bnx2x_set_fcoe_eth_macs(struct bnx2x *bp)
1197 * and fallback to MSI or legacy INTx with one fp 1363{
1198 */ 1364 if (!NO_FCOE(bp)) {
1199 rc = bnx2x_enable_msix(bp); 1365 if (!IS_MF_SD(bp))
1200 if (rc) 1366 bnx2x_set_fip_eth_mac_addr(bp, 1);
1201 /* failed to enable MSI-X */ 1367 bnx2x_set_all_enode_macs(bp, 1);
1202 bp->num_queues = 1; 1368 bp->flags |= FCOE_MACS_SET;
1203 break;
1204 } 1369 }
1205 bp->dev->real_num_tx_queues = bp->num_queues; 1370}
1371#endif
1372
1373static void bnx2x_release_firmware(struct bnx2x *bp)
1374{
1375 kfree(bp->init_ops_offsets);
1376 kfree(bp->init_ops);
1377 kfree(bp->init_data);
1378 release_firmware(bp->firmware);
1379}
1380
1381static inline int bnx2x_set_real_num_queues(struct bnx2x *bp)
1382{
1383 int rc, num = bp->num_queues;
1384
1385#ifdef BCM_CNIC
1386 if (NO_FCOE(bp))
1387 num -= FCOE_CONTEXT_USE;
1388
1389#endif
1390 netif_set_real_num_tx_queues(bp->dev, num);
1391 rc = netif_set_real_num_rx_queues(bp->dev, num);
1206 return rc; 1392 return rc;
1207} 1393}
1208 1394
1395static inline void bnx2x_set_rx_buf_size(struct bnx2x *bp)
1396{
1397 int i;
1398
1399 for_each_queue(bp, i) {
1400 struct bnx2x_fastpath *fp = &bp->fp[i];
1401
1402 /* Always use a mini-jumbo MTU for the FCoE L2 ring */
1403 if (IS_FCOE_IDX(i))
1404 /*
1405 * Although there are no IP frames expected to arrive to
1406 * this ring we still want to add an
1407 * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
1408 * overrun attack.
1409 */
1410 fp->rx_buf_size =
1411 BNX2X_FCOE_MINI_JUMBO_MTU + ETH_OVREHEAD +
1412 BNX2X_RX_ALIGN + IP_HEADER_ALIGNMENT_PADDING;
1413 else
1414 fp->rx_buf_size =
1415 bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN +
1416 IP_HEADER_ALIGNMENT_PADDING;
1417 }
1418}
1419
1209/* must be called with rtnl_lock */ 1420/* must be called with rtnl_lock */
1210int bnx2x_nic_load(struct bnx2x *bp, int load_mode) 1421int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
1211{ 1422{
1212 u32 load_code; 1423 u32 load_code;
1213 int i, rc; 1424 int i, rc;
1214 1425
1426 /* Set init arrays */
1427 rc = bnx2x_init_firmware(bp);
1428 if (rc) {
1429 BNX2X_ERR("Error loading firmware\n");
1430 return rc;
1431 }
1432
1215#ifdef BNX2X_STOP_ON_ERROR 1433#ifdef BNX2X_STOP_ON_ERROR
1216 if (unlikely(bp->panic)) 1434 if (unlikely(bp->panic))
1217 return -EPERM; 1435 return -EPERM;
@@ -1219,83 +1437,88 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
1219 1437
1220 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD; 1438 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
1221 1439
1222 rc = bnx2x_set_num_queues(bp); 1440 /* Set the initial link reported state to link down */
1441 bnx2x_acquire_phy_lock(bp);
1442 memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link));
1443 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1444 &bp->last_reported_link.link_report_flags);
1445 bnx2x_release_phy_lock(bp);
1223 1446
1224 if (bnx2x_alloc_mem(bp)) { 1447 /* must be called before memory allocation and HW init */
1225 bnx2x_free_irq(bp, true); 1448 bnx2x_ilt_set_info(bp);
1226 return -ENOMEM; 1449
1227 } 1450 /* zero fastpath structures preserving invariants like napi which are
1451 * allocated only once
1452 */
1453 for_each_queue(bp, i)
1454 bnx2x_bz_fp(bp, i);
1455
1456 /* Set the receive queues buffer size */
1457 bnx2x_set_rx_buf_size(bp);
1228 1458
1229 for_each_queue(bp, i) 1459 for_each_queue(bp, i)
1230 bnx2x_fp(bp, i, disable_tpa) = 1460 bnx2x_fp(bp, i, disable_tpa) =
1231 ((bp->flags & TPA_ENABLE_FLAG) == 0); 1461 ((bp->flags & TPA_ENABLE_FLAG) == 0);
1232 1462
1233 for_each_queue(bp, i) 1463#ifdef BCM_CNIC
1234 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi), 1464 /* We don't want TPA on FCoE L2 ring */
1235 bnx2x_poll, 128); 1465 bnx2x_fcoe(bp, disable_tpa) = 1;
1466#endif
1236 1467
1237 bnx2x_napi_enable(bp); 1468 if (bnx2x_alloc_mem(bp))
1469 return -ENOMEM;
1238 1470
1239 if (bp->flags & USING_MSIX_FLAG) { 1471 /* As long as bnx2x_alloc_mem() may possibly update
1240 rc = bnx2x_req_msix_irqs(bp); 1472 * bp->num_queues, bnx2x_set_real_num_queues() should always
1241 if (rc) { 1473 * come after it.
1242 bnx2x_free_irq(bp, true); 1474 */
1243 goto load_error1; 1475 rc = bnx2x_set_real_num_queues(bp);
1244 } 1476 if (rc) {
1245 } else { 1477 BNX2X_ERR("Unable to set real_num_queues\n");
1246 /* Fall to INTx if failed to enable MSI-X due to lack of 1478 goto load_error0;
1247 memory (in bnx2x_set_num_queues()) */
1248 if ((rc != -ENOMEM) && (bp->int_mode != INT_MODE_INTx))
1249 bnx2x_enable_msi(bp);
1250 bnx2x_ack_int(bp);
1251 rc = bnx2x_req_irq(bp);
1252 if (rc) {
1253 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
1254 bnx2x_free_irq(bp, true);
1255 goto load_error1;
1256 }
1257 if (bp->flags & USING_MSI_FLAG) {
1258 bp->dev->irq = bp->pdev->irq;
1259 netdev_info(bp->dev, "using MSI IRQ %d\n",
1260 bp->pdev->irq);
1261 }
1262 } 1479 }
1263 1480
1481 bnx2x_napi_enable(bp);
1482
1264 /* Send LOAD_REQUEST command to MCP 1483 /* Send LOAD_REQUEST command to MCP
1265 Returns the type of LOAD command: 1484 Returns the type of LOAD command:
1266 if it is the first port to be initialized 1485 if it is the first port to be initialized
1267 common blocks should be initialized, otherwise - not 1486 common blocks should be initialized, otherwise - not
1268 */ 1487 */
1269 if (!BP_NOMCP(bp)) { 1488 if (!BP_NOMCP(bp)) {
1270 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ); 1489 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, 0);
1271 if (!load_code) { 1490 if (!load_code) {
1272 BNX2X_ERR("MCP response failure, aborting\n"); 1491 BNX2X_ERR("MCP response failure, aborting\n");
1273 rc = -EBUSY; 1492 rc = -EBUSY;
1274 goto load_error2; 1493 goto load_error1;
1275 } 1494 }
1276 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) { 1495 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
1277 rc = -EBUSY; /* other port in diagnostic mode */ 1496 rc = -EBUSY; /* other port in diagnostic mode */
1278 goto load_error2; 1497 goto load_error1;
1279 } 1498 }
1280 1499
1281 } else { 1500 } else {
1501 int path = BP_PATH(bp);
1282 int port = BP_PORT(bp); 1502 int port = BP_PORT(bp);
1283 1503
1284 DP(NETIF_MSG_IFUP, "NO MCP - load counts %d, %d, %d\n", 1504 DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d] %d, %d, %d\n",
1285 load_count[0], load_count[1], load_count[2]); 1505 path, load_count[path][0], load_count[path][1],
1286 load_count[0]++; 1506 load_count[path][2]);
1287 load_count[1 + port]++; 1507 load_count[path][0]++;
1288 DP(NETIF_MSG_IFUP, "NO MCP - new load counts %d, %d, %d\n", 1508 load_count[path][1 + port]++;
1289 load_count[0], load_count[1], load_count[2]); 1509 DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d] %d, %d, %d\n",
1290 if (load_count[0] == 1) 1510 path, load_count[path][0], load_count[path][1],
1511 load_count[path][2]);
1512 if (load_count[path][0] == 1)
1291 load_code = FW_MSG_CODE_DRV_LOAD_COMMON; 1513 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
1292 else if (load_count[1 + port] == 1) 1514 else if (load_count[path][1 + port] == 1)
1293 load_code = FW_MSG_CODE_DRV_LOAD_PORT; 1515 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
1294 else 1516 else
1295 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION; 1517 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
1296 } 1518 }
1297 1519
1298 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) || 1520 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
1521 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
1299 (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) 1522 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
1300 bp->port.pmf = 1; 1523 bp->port.pmf = 1;
1301 else 1524 else
@@ -1306,16 +1529,22 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
1306 rc = bnx2x_init_hw(bp, load_code); 1529 rc = bnx2x_init_hw(bp, load_code);
1307 if (rc) { 1530 if (rc) {
1308 BNX2X_ERR("HW init failed, aborting\n"); 1531 BNX2X_ERR("HW init failed, aborting\n");
1309 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE); 1532 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
1310 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP); 1533 goto load_error2;
1311 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE); 1534 }
1535
1536 /* Connect to IRQs */
1537 rc = bnx2x_setup_irqs(bp);
1538 if (rc) {
1539 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
1312 goto load_error2; 1540 goto load_error2;
1313 } 1541 }
1314 1542
1315 /* Setup NIC internals and enable interrupts */ 1543 /* Setup NIC internals and enable interrupts */
1316 bnx2x_nic_init(bp, load_code); 1544 bnx2x_nic_init(bp, load_code);
1317 1545
1318 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) && 1546 if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
1547 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
1319 (bp->common.shmem2_base)) 1548 (bp->common.shmem2_base))
1320 SHMEM2_WR(bp, dcc_support, 1549 SHMEM2_WR(bp, dcc_support,
1321 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV | 1550 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
@@ -1323,7 +1552,7 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
1323 1552
1324 /* Send LOAD_DONE command to MCP */ 1553 /* Send LOAD_DONE command to MCP */
1325 if (!BP_NOMCP(bp)) { 1554 if (!BP_NOMCP(bp)) {
1326 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE); 1555 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
1327 if (!load_code) { 1556 if (!load_code) {
1328 BNX2X_ERR("MCP response failure, aborting\n"); 1557 BNX2X_ERR("MCP response failure, aborting\n");
1329 rc = -EBUSY; 1558 rc = -EBUSY;
@@ -1331,9 +1560,22 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
1331 } 1560 }
1332 } 1561 }
1333 1562
1563 bnx2x_dcbx_init(bp);
1564
1334 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT; 1565 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
1335 1566
1336 rc = bnx2x_setup_leading(bp); 1567 rc = bnx2x_func_start(bp);
1568 if (rc) {
1569 BNX2X_ERR("Function start failed!\n");
1570#ifndef BNX2X_STOP_ON_ERROR
1571 goto load_error3;
1572#else
1573 bp->panic = 1;
1574 return -EBUSY;
1575#endif
1576 }
1577
1578 rc = bnx2x_setup_client(bp, &bp->fp[0], 1 /* Leading */);
1337 if (rc) { 1579 if (rc) {
1338 BNX2X_ERR("Setup leading failed!\n"); 1580 BNX2X_ERR("Setup leading failed!\n");
1339#ifndef BNX2X_STOP_ON_ERROR 1581#ifndef BNX2X_STOP_ON_ERROR
@@ -1344,69 +1586,70 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
1344#endif 1586#endif
1345 } 1587 }
1346 1588
1347 if (CHIP_IS_E1H(bp)) 1589 if (!CHIP_IS_E1(bp) &&
1348 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) { 1590 (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED)) {
1349 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n"); 1591 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
1350 bp->flags |= MF_FUNC_DIS; 1592 bp->flags |= MF_FUNC_DIS;
1351 } 1593 }
1352 1594
1353 if (bp->state == BNX2X_STATE_OPEN) {
1354#ifdef BCM_CNIC 1595#ifdef BCM_CNIC
1355 /* Enable Timer scan */ 1596 /* Enable Timer scan */
1356 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 1); 1597 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 1);
1357#endif 1598#endif
1358 for_each_nondefault_queue(bp, i) { 1599
1359 rc = bnx2x_setup_multi(bp, i); 1600 for_each_nondefault_queue(bp, i) {
1360 if (rc) 1601 rc = bnx2x_setup_client(bp, &bp->fp[i], 0);
1602 if (rc)
1361#ifdef BCM_CNIC 1603#ifdef BCM_CNIC
1362 goto load_error4; 1604 goto load_error4;
1363#else 1605#else
1364 goto load_error3; 1606 goto load_error3;
1365#endif 1607#endif
1366 } 1608 }
1609
1610 /* Now when Clients are configured we are ready to work */
1611 bp->state = BNX2X_STATE_OPEN;
1367 1612
1368 if (CHIP_IS_E1(bp))
1369 bnx2x_set_eth_mac_addr_e1(bp, 1);
1370 else
1371 bnx2x_set_eth_mac_addr_e1h(bp, 1);
1372#ifdef BCM_CNIC 1613#ifdef BCM_CNIC
1373 /* Set iSCSI L2 MAC */ 1614 bnx2x_set_fcoe_eth_macs(bp);
1374 mutex_lock(&bp->cnic_mutex);
1375 if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD) {
1376 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
1377 bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
1378 bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping,
1379 CNIC_SB_ID(bp));
1380 }
1381 mutex_unlock(&bp->cnic_mutex);
1382#endif 1615#endif
1616
1617 bnx2x_set_eth_mac(bp, 1);
1618
1619 /* Clear MC configuration */
1620 if (CHIP_IS_E1(bp))
1621 bnx2x_invalidate_e1_mc_list(bp);
1622 else
1623 bnx2x_invalidate_e1h_mc_list(bp);
1624
1625 /* Clear UC lists configuration */
1626 bnx2x_invalidate_uc_list(bp);
1627
1628 if (bp->pending_max) {
1629 bnx2x_update_max_mf_config(bp, bp->pending_max);
1630 bp->pending_max = 0;
1383 } 1631 }
1384 1632
1385 if (bp->port.pmf) 1633 if (bp->port.pmf)
1386 bnx2x_initial_phy_init(bp, load_mode); 1634 bnx2x_initial_phy_init(bp, load_mode);
1387 1635
1636 /* Initialize Rx filtering */
1637 bnx2x_set_rx_mode(bp->dev);
1638
1388 /* Start fast path */ 1639 /* Start fast path */
1389 switch (load_mode) { 1640 switch (load_mode) {
1390 case LOAD_NORMAL: 1641 case LOAD_NORMAL:
1391 if (bp->state == BNX2X_STATE_OPEN) { 1642 /* Tx queue should be only reenabled */
1392 /* Tx queue should be only reenabled */ 1643 netif_tx_wake_all_queues(bp->dev);
1393 netif_tx_wake_all_queues(bp->dev);
1394 }
1395 /* Initialize the receive filter. */ 1644 /* Initialize the receive filter. */
1396 bnx2x_set_rx_mode(bp->dev);
1397 break; 1645 break;
1398 1646
1399 case LOAD_OPEN: 1647 case LOAD_OPEN:
1400 netif_tx_start_all_queues(bp->dev); 1648 netif_tx_start_all_queues(bp->dev);
1401 if (bp->state != BNX2X_STATE_OPEN) 1649 smp_mb__after_clear_bit();
1402 netif_tx_disable(bp->dev);
1403 /* Initialize the receive filter. */
1404 bnx2x_set_rx_mode(bp->dev);
1405 break; 1650 break;
1406 1651
1407 case LOAD_DIAG: 1652 case LOAD_DIAG:
1408 /* Initialize the receive filter. */
1409 bnx2x_set_rx_mode(bp->dev);
1410 bp->state = BNX2X_STATE_DIAG; 1653 bp->state = BNX2X_STATE_DIAG;
1411 break; 1654 break;
1412 1655
@@ -1427,6 +1670,8 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
1427#endif 1670#endif
1428 bnx2x_inc_load_cnt(bp); 1671 bnx2x_inc_load_cnt(bp);
1429 1672
1673 bnx2x_release_firmware(bp);
1674
1430 return 0; 1675 return 0;
1431 1676
1432#ifdef BCM_CNIC 1677#ifdef BCM_CNIC
@@ -1436,24 +1681,28 @@ load_error4:
1436#endif 1681#endif
1437load_error3: 1682load_error3:
1438 bnx2x_int_disable_sync(bp, 1); 1683 bnx2x_int_disable_sync(bp, 1);
1439 if (!BP_NOMCP(bp)) { 1684
1440 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
1441 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
1442 }
1443 bp->port.pmf = 0;
1444 /* Free SKBs, SGEs, TPA pool and driver internals */ 1685 /* Free SKBs, SGEs, TPA pool and driver internals */
1445 bnx2x_free_skbs(bp); 1686 bnx2x_free_skbs(bp);
1446 for_each_queue(bp, i) 1687 for_each_rx_queue(bp, i)
1447 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); 1688 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
1448load_error2: 1689
1449 /* Release IRQs */ 1690 /* Release IRQs */
1450 bnx2x_free_irq(bp, false); 1691 bnx2x_free_irq(bp);
1692load_error2:
1693 if (!BP_NOMCP(bp)) {
1694 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
1695 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
1696 }
1697
1698 bp->port.pmf = 0;
1451load_error1: 1699load_error1:
1452 bnx2x_napi_disable(bp); 1700 bnx2x_napi_disable(bp);
1453 for_each_queue(bp, i) 1701load_error0:
1454 netif_napi_del(&bnx2x_fp(bp, i, napi));
1455 bnx2x_free_mem(bp); 1702 bnx2x_free_mem(bp);
1456 1703
1704 bnx2x_release_firmware(bp);
1705
1457 return rc; 1706 return rc;
1458} 1707}
1459 1708
@@ -1481,30 +1730,34 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
1481 bp->rx_mode = BNX2X_RX_MODE_NONE; 1730 bp->rx_mode = BNX2X_RX_MODE_NONE;
1482 bnx2x_set_storm_rx_mode(bp); 1731 bnx2x_set_storm_rx_mode(bp);
1483 1732
1484 /* Disable HW interrupts, NAPI and Tx */ 1733 /* Stop Tx */
1485 bnx2x_netif_stop(bp, 1); 1734 bnx2x_tx_disable(bp);
1486 netif_carrier_off(bp->dev);
1487 1735
1488 del_timer_sync(&bp->timer); 1736 del_timer_sync(&bp->timer);
1489 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb, 1737
1738 SHMEM_WR(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb,
1490 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq)); 1739 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
1491 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
1492 1740
1493 /* Release IRQs */ 1741 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
1494 bnx2x_free_irq(bp, false);
1495 1742
1496 /* Cleanup the chip if needed */ 1743 /* Cleanup the chip if needed */
1497 if (unload_mode != UNLOAD_RECOVERY) 1744 if (unload_mode != UNLOAD_RECOVERY)
1498 bnx2x_chip_cleanup(bp, unload_mode); 1745 bnx2x_chip_cleanup(bp, unload_mode);
1746 else {
1747 /* Disable HW interrupts, NAPI and Tx */
1748 bnx2x_netif_stop(bp, 1);
1749
1750 /* Release IRQs */
1751 bnx2x_free_irq(bp);
1752 }
1499 1753
1500 bp->port.pmf = 0; 1754 bp->port.pmf = 0;
1501 1755
1502 /* Free SKBs, SGEs, TPA pool and driver internals */ 1756 /* Free SKBs, SGEs, TPA pool and driver internals */
1503 bnx2x_free_skbs(bp); 1757 bnx2x_free_skbs(bp);
1504 for_each_queue(bp, i) 1758 for_each_rx_queue(bp, i)
1505 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); 1759 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
1506 for_each_queue(bp, i) 1760
1507 netif_napi_del(&bnx2x_fp(bp, i, napi));
1508 bnx2x_free_mem(bp); 1761 bnx2x_free_mem(bp);
1509 1762
1510 bp->state = BNX2X_STATE_CLOSED; 1763 bp->state = BNX2X_STATE_CLOSED;
@@ -1522,10 +1775,17 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
1522 1775
1523 return 0; 1776 return 0;
1524} 1777}
1778
1525int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state) 1779int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
1526{ 1780{
1527 u16 pmcsr; 1781 u16 pmcsr;
1528 1782
1783 /* If there is no power capability, silently succeed */
1784 if (!bp->pm_cap) {
1785 DP(NETIF_MSG_HW, "No power capability. Breaking.\n");
1786 return 0;
1787 }
1788
1529 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr); 1789 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
1530 1790
1531 switch (state) { 1791 switch (state) {
@@ -1568,13 +1828,10 @@ int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
1568 return 0; 1828 return 0;
1569} 1829}
1570 1830
1571
1572
1573/* 1831/*
1574 * net_device service functions 1832 * net_device service functions
1575 */ 1833 */
1576 1834int bnx2x_poll(struct napi_struct *napi, int budget)
1577static int bnx2x_poll(struct napi_struct *napi, int budget)
1578{ 1835{
1579 int work_done = 0; 1836 int work_done = 0;
1580 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath, 1837 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
@@ -1602,28 +1859,40 @@ static int bnx2x_poll(struct napi_struct *napi, int budget)
1602 1859
1603 /* Fall out from the NAPI loop if needed */ 1860 /* Fall out from the NAPI loop if needed */
1604 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) { 1861 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
1862#ifdef BCM_CNIC
1863 /* No need to update SB for FCoE L2 ring as long as
1864 * it's connected to the default SB and the SB
1865 * has been updated when NAPI was scheduled.
1866 */
1867 if (IS_FCOE_FP(fp)) {
1868 napi_complete(napi);
1869 break;
1870 }
1871#endif
1872
1605 bnx2x_update_fpsb_idx(fp); 1873 bnx2x_update_fpsb_idx(fp);
1606 /* bnx2x_has_rx_work() reads the status block, thus we need 1874 /* bnx2x_has_rx_work() reads the status block,
1607 * to ensure that status block indices have been actually read 1875 * thus we need to ensure that status block indices
1608 * (bnx2x_update_fpsb_idx) prior to this check 1876 * have been actually read (bnx2x_update_fpsb_idx)
1609 * (bnx2x_has_rx_work) so that we won't write the "newer" 1877 * prior to this check (bnx2x_has_rx_work) so that
1610 * value of the status block to IGU (if there was a DMA right 1878 * we won't write the "newer" value of the status block
1611 * after bnx2x_has_rx_work and if there is no rmb, the memory 1879 * to IGU (if there was a DMA right after
1612 * reading (bnx2x_update_fpsb_idx) may be postponed to right 1880 * bnx2x_has_rx_work and if there is no rmb, the memory
1613 * before bnx2x_ack_sb). In this case there will never be 1881 * reading (bnx2x_update_fpsb_idx) may be postponed
1614 * another interrupt until there is another update of the 1882 * to right before bnx2x_ack_sb). In this case there
1615 * status block, while there is still unhandled work. 1883 * will never be another interrupt until there is
1616 */ 1884 * another update of the status block, while there
1885 * is still unhandled work.
1886 */
1617 rmb(); 1887 rmb();
1618 1888
1619 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) { 1889 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
1620 napi_complete(napi); 1890 napi_complete(napi);
1621 /* Re-enable interrupts */ 1891 /* Re-enable interrupts */
1622 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 1892 DP(NETIF_MSG_HW,
1623 le16_to_cpu(fp->fp_c_idx), 1893 "Update index to %d\n", fp->fp_hc_idx);
1624 IGU_INT_NOP, 1); 1894 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
1625 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 1895 le16_to_cpu(fp->fp_hc_idx),
1626 le16_to_cpu(fp->fp_u_idx),
1627 IGU_INT_ENABLE, 1); 1896 IGU_INT_ENABLE, 1);
1628 break; 1897 break;
1629 } 1898 }
@@ -1633,7 +1902,6 @@ static int bnx2x_poll(struct napi_struct *napi, int budget)
1633 return work_done; 1902 return work_done;
1634} 1903}
1635 1904
1636
1637/* we split the first BD into headers and data BDs 1905/* we split the first BD into headers and data BDs
1638 * to ease the pain of our fellow microcode engineers 1906 * to ease the pain of our fellow microcode engineers
1639 * we use one mapping for both BDs 1907 * we use one mapping for both BDs
@@ -1705,7 +1973,7 @@ static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
1705 rc = XMIT_PLAIN; 1973 rc = XMIT_PLAIN;
1706 1974
1707 else { 1975 else {
1708 if (skb->protocol == htons(ETH_P_IPV6)) { 1976 if (vlan_get_protocol(skb) == htons(ETH_P_IPV6)) {
1709 rc = XMIT_CSUM_V6; 1977 rc = XMIT_CSUM_V6;
1710 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) 1978 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
1711 rc |= XMIT_CSUM_TCP; 1979 rc |= XMIT_CSUM_TCP;
@@ -1717,11 +1985,10 @@ static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
1717 } 1985 }
1718 } 1986 }
1719 1987
1720 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) 1988 if (skb_is_gso_v6(skb))
1721 rc |= (XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP); 1989 rc |= XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6;
1722 1990 else if (skb_is_gso(skb))
1723 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) 1991 rc |= XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP;
1724 rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6);
1725 1992
1726 return rc; 1993 return rc;
1727} 1994}
@@ -1807,6 +2074,135 @@ exit_lbl:
1807} 2074}
1808#endif 2075#endif
1809 2076
2077static inline void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data,
2078 u32 xmit_type)
2079{
2080 *parsing_data |= (skb_shinfo(skb)->gso_size <<
2081 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
2082 ETH_TX_PARSE_BD_E2_LSO_MSS;
2083 if ((xmit_type & XMIT_GSO_V6) &&
2084 (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
2085 *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
2086}
2087
2088/**
2089 * bnx2x_set_pbd_gso - update PBD in GSO case.
2090 *
2091 * @skb: packet skb
2092 * @pbd: parse BD
2093 * @xmit_type: xmit flags
2094 */
2095static inline void bnx2x_set_pbd_gso(struct sk_buff *skb,
2096 struct eth_tx_parse_bd_e1x *pbd,
2097 u32 xmit_type)
2098{
2099 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2100 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
2101 pbd->tcp_flags = pbd_tcp_flags(skb);
2102
2103 if (xmit_type & XMIT_GSO_V4) {
2104 pbd->ip_id = swab16(ip_hdr(skb)->id);
2105 pbd->tcp_pseudo_csum =
2106 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
2107 ip_hdr(skb)->daddr,
2108 0, IPPROTO_TCP, 0));
2109
2110 } else
2111 pbd->tcp_pseudo_csum =
2112 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2113 &ipv6_hdr(skb)->daddr,
2114 0, IPPROTO_TCP, 0));
2115
2116 pbd->global_data |= ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN;
2117}
2118
2119/**
2120 * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length
2121 *
2122 * @bp: driver handle
2123 * @skb: packet skb
2124 * @parsing_data: data to be updated
2125 * @xmit_type: xmit flags
2126 *
2127 * 57712 related
2128 */
2129static inline u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
2130 u32 *parsing_data, u32 xmit_type)
2131{
2132 *parsing_data |=
2133 ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
2134 ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT) &
2135 ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W;
2136
2137 if (xmit_type & XMIT_CSUM_TCP) {
2138 *parsing_data |= ((tcp_hdrlen(skb) / 4) <<
2139 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
2140 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
2141
2142 return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
2143 } else
2144 /* We support checksum offload for TCP and UDP only.
2145 * No need to pass the UDP header length - it's a constant.
2146 */
2147 return skb_transport_header(skb) +
2148 sizeof(struct udphdr) - skb->data;
2149}
2150
2151/**
2152 * bnx2x_set_pbd_csum - update PBD with checksum and return header length
2153 *
2154 * @bp: driver handle
2155 * @skb: packet skb
2156 * @pbd: parse BD to be updated
2157 * @xmit_type: xmit flags
2158 */
2159static inline u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
2160 struct eth_tx_parse_bd_e1x *pbd,
2161 u32 xmit_type)
2162{
2163 u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
2164
2165 /* for now NS flag is not used in Linux */
2166 pbd->global_data =
2167 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
2168 ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
2169
2170 pbd->ip_hlen_w = (skb_transport_header(skb) -
2171 skb_network_header(skb)) >> 1;
2172
2173 hlen += pbd->ip_hlen_w;
2174
2175 /* We support checksum offload for TCP and UDP only */
2176 if (xmit_type & XMIT_CSUM_TCP)
2177 hlen += tcp_hdrlen(skb) / 2;
2178 else
2179 hlen += sizeof(struct udphdr) / 2;
2180
2181 pbd->total_hlen_w = cpu_to_le16(hlen);
2182 hlen = hlen*2;
2183
2184 if (xmit_type & XMIT_CSUM_TCP) {
2185 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
2186
2187 } else {
2188 s8 fix = SKB_CS_OFF(skb); /* signed! */
2189
2190 DP(NETIF_MSG_TX_QUEUED,
2191 "hlen %d fix %d csum before fix %x\n",
2192 le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
2193
2194 /* HW bug: fixup the CSUM */
2195 pbd->tcp_pseudo_csum =
2196 bnx2x_csum_fix(skb_transport_header(skb),
2197 SKB_CS(skb), fix);
2198
2199 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
2200 pbd->tcp_pseudo_csum);
2201 }
2202
2203 return hlen;
2204}
2205
1810/* called with netif_tx_lock 2206/* called with netif_tx_lock
1811 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call 2207 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
1812 * netif_wake_queue() 2208 * netif_wake_queue()
@@ -1819,7 +2215,9 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
1819 struct sw_tx_bd *tx_buf; 2215 struct sw_tx_bd *tx_buf;
1820 struct eth_tx_start_bd *tx_start_bd; 2216 struct eth_tx_start_bd *tx_start_bd;
1821 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL; 2217 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
1822 struct eth_tx_parse_bd *pbd = NULL; 2218 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
2219 struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
2220 u32 pbd_e2_parsing_data = 0;
1823 u16 pkt_prod, bd_prod; 2221 u16 pkt_prod, bd_prod;
1824 int nbd, fp_index; 2222 int nbd, fp_index;
1825 dma_addr_t mapping; 2223 dma_addr_t mapping;
@@ -1847,9 +2245,9 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
1847 return NETDEV_TX_BUSY; 2245 return NETDEV_TX_BUSY;
1848 } 2246 }
1849 2247
1850 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)" 2248 DP(NETIF_MSG_TX_QUEUED, "queue[%d]: SKB: summed %x protocol %x "
1851 " gso type %x xmit_type %x\n", 2249 "protocol(%x,%x) gso type %x xmit_type %x\n",
1852 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr, 2250 fp_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
1853 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type); 2251 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
1854 2252
1855 eth = (struct ethhdr *)skb->data; 2253 eth = (struct ethhdr *)skb->data;
@@ -1895,10 +2293,11 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
1895 tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd; 2293 tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd;
1896 2294
1897 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD; 2295 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
1898 tx_start_bd->general_data = (mac_type << 2296 SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_ETH_ADDR_TYPE,
1899 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT); 2297 mac_type);
2298
1900 /* header nbd */ 2299 /* header nbd */
1901 tx_start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT); 2300 SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_HDR_NBDS, 1);
1902 2301
1903 /* remember the first BD of the packet */ 2302 /* remember the first BD of the packet */
1904 tx_buf->first_bd = fp->tx_bd_prod; 2303 tx_buf->first_bd = fp->tx_bd_prod;
@@ -1909,37 +2308,18 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
1909 "sending pkt %u @%p next_idx %u bd %u @%p\n", 2308 "sending pkt %u @%p next_idx %u bd %u @%p\n",
1910 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd); 2309 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
1911 2310
1912#ifdef BCM_VLAN 2311 if (vlan_tx_tag_present(skb)) {
1913 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) && 2312 tx_start_bd->vlan_or_ethertype =
1914 (bp->flags & HW_VLAN_TX_FLAG)) { 2313 cpu_to_le16(vlan_tx_tag_get(skb));
1915 tx_start_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb)); 2314 tx_start_bd->bd_flags.as_bitfield |=
1916 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG; 2315 (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
1917 } else 2316 } else
1918#endif 2317 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
1919 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
1920 2318
1921 /* turn on parsing and get a BD */ 2319 /* turn on parsing and get a BD */
1922 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod)); 2320 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
1923 pbd = &fp->tx_desc_ring[bd_prod].parse_bd;
1924
1925 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
1926 2321
1927 if (xmit_type & XMIT_CSUM) { 2322 if (xmit_type & XMIT_CSUM) {
1928 hlen = (skb_network_header(skb) - skb->data) / 2;
1929
1930 /* for now NS flag is not used in Linux */
1931 pbd->global_data =
1932 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
1933 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
1934
1935 pbd->ip_hlen = (skb_transport_header(skb) -
1936 skb_network_header(skb)) / 2;
1937
1938 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
1939
1940 pbd->total_hlen = cpu_to_le16(hlen);
1941 hlen = hlen*2;
1942
1943 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM; 2323 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
1944 2324
1945 if (xmit_type & XMIT_CSUM_V4) 2325 if (xmit_type & XMIT_CSUM_V4)
@@ -1949,31 +2329,33 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
1949 tx_start_bd->bd_flags.as_bitfield |= 2329 tx_start_bd->bd_flags.as_bitfield |=
1950 ETH_TX_BD_FLAGS_IPV6; 2330 ETH_TX_BD_FLAGS_IPV6;
1951 2331
1952 if (xmit_type & XMIT_CSUM_TCP) { 2332 if (!(xmit_type & XMIT_CSUM_TCP))
1953 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check); 2333 tx_start_bd->bd_flags.as_bitfield |=
1954 2334 ETH_TX_BD_FLAGS_IS_UDP;
1955 } else { 2335 }
1956 s8 fix = SKB_CS_OFF(skb); /* signed! */
1957
1958 pbd->global_data |= ETH_TX_PARSE_BD_UDP_CS_FLG;
1959
1960 DP(NETIF_MSG_TX_QUEUED,
1961 "hlen %d fix %d csum before fix %x\n",
1962 le16_to_cpu(pbd->total_hlen), fix, SKB_CS(skb));
1963 2336
1964 /* HW bug: fixup the CSUM */ 2337 if (CHIP_IS_E2(bp)) {
1965 pbd->tcp_pseudo_csum = 2338 pbd_e2 = &fp->tx_desc_ring[bd_prod].parse_bd_e2;
1966 bnx2x_csum_fix(skb_transport_header(skb), 2339 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
1967 SKB_CS(skb), fix); 2340 /* Set PBD in checksum offload case */
2341 if (xmit_type & XMIT_CSUM)
2342 hlen = bnx2x_set_pbd_csum_e2(bp, skb,
2343 &pbd_e2_parsing_data,
2344 xmit_type);
2345 } else {
2346 pbd_e1x = &fp->tx_desc_ring[bd_prod].parse_bd_e1x;
2347 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
2348 /* Set PBD in checksum offload case */
2349 if (xmit_type & XMIT_CSUM)
2350 hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
1968 2351
1969 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
1970 pbd->tcp_pseudo_csum);
1971 }
1972 } 2352 }
1973 2353
2354 /* Map skb linear data for DMA */
1974 mapping = dma_map_single(&bp->pdev->dev, skb->data, 2355 mapping = dma_map_single(&bp->pdev->dev, skb->data,
1975 skb_headlen(skb), DMA_TO_DEVICE); 2356 skb_headlen(skb), DMA_TO_DEVICE);
1976 2357
2358 /* Setup the data pointer of the first BD of the packet */
1977 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); 2359 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1978 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); 2360 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1979 nbd = skb_shinfo(skb)->nr_frags + 2; /* start_bd + pbd + frags */ 2361 nbd = skb_shinfo(skb)->nr_frags + 2; /* start_bd + pbd + frags */
@@ -1985,7 +2367,8 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
1985 " nbytes %d flags %x vlan %x\n", 2367 " nbytes %d flags %x vlan %x\n",
1986 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo, 2368 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
1987 le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes), 2369 le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
1988 tx_start_bd->bd_flags.as_bitfield, le16_to_cpu(tx_start_bd->vlan)); 2370 tx_start_bd->bd_flags.as_bitfield,
2371 le16_to_cpu(tx_start_bd->vlan_or_ethertype));
1989 2372
1990 if (xmit_type & XMIT_GSO) { 2373 if (xmit_type & XMIT_GSO) {
1991 2374
@@ -1999,28 +2382,22 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
1999 if (unlikely(skb_headlen(skb) > hlen)) 2382 if (unlikely(skb_headlen(skb) > hlen))
2000 bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd, 2383 bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd,
2001 hlen, bd_prod, ++nbd); 2384 hlen, bd_prod, ++nbd);
2385 if (CHIP_IS_E2(bp))
2386 bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data,
2387 xmit_type);
2388 else
2389 bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type);
2390 }
2002 2391
2003 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size); 2392 /* Set the PBD's parsing_data field if not zero
2004 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq); 2393 * (for the chips newer than 57711).
2005 pbd->tcp_flags = pbd_tcp_flags(skb); 2394 */
2006 2395 if (pbd_e2_parsing_data)
2007 if (xmit_type & XMIT_GSO_V4) { 2396 pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data);
2008 pbd->ip_id = swab16(ip_hdr(skb)->id);
2009 pbd->tcp_pseudo_csum =
2010 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
2011 ip_hdr(skb)->daddr,
2012 0, IPPROTO_TCP, 0));
2013
2014 } else
2015 pbd->tcp_pseudo_csum =
2016 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2017 &ipv6_hdr(skb)->daddr,
2018 0, IPPROTO_TCP, 0));
2019 2397
2020 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
2021 }
2022 tx_data_bd = (struct eth_tx_bd *)tx_start_bd; 2398 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
2023 2399
2400 /* Handle fragmented skb */
2024 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 2401 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2025 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2402 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2026 2403
@@ -2057,14 +2434,21 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
2057 if (total_pkt_bd != NULL) 2434 if (total_pkt_bd != NULL)
2058 total_pkt_bd->total_pkt_bytes = pkt_size; 2435 total_pkt_bd->total_pkt_bytes = pkt_size;
2059 2436
2060 if (pbd) 2437 if (pbd_e1x)
2061 DP(NETIF_MSG_TX_QUEUED, 2438 DP(NETIF_MSG_TX_QUEUED,
2062 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u" 2439 "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
2063 " tcp_flags %x xsum %x seq %u hlen %u\n", 2440 " tcp_flags %x xsum %x seq %u hlen %u\n",
2064 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id, 2441 pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
2065 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum, 2442 pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
2066 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen)); 2443 pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
2067 2444 le16_to_cpu(pbd_e1x->total_hlen_w));
2445 if (pbd_e2)
2446 DP(NETIF_MSG_TX_QUEUED,
2447 "PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n",
2448 pbd_e2, pbd_e2->dst_mac_addr_hi, pbd_e2->dst_mac_addr_mid,
2449 pbd_e2->dst_mac_addr_lo, pbd_e2->src_mac_addr_hi,
2450 pbd_e2->src_mac_addr_mid, pbd_e2->src_mac_addr_lo,
2451 pbd_e2->parsing_data);
2068 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod); 2452 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
2069 2453
2070 /* 2454 /*
@@ -2078,7 +2462,8 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
2078 2462
2079 fp->tx_db.data.prod += nbd; 2463 fp->tx_db.data.prod += nbd;
2080 barrier(); 2464 barrier();
2081 DOORBELL(bp, fp->index, fp->tx_db.raw); 2465
2466 DOORBELL(bp, fp->cid, fp->tx_db.raw);
2082 2467
2083 mmiowb(); 2468 mmiowb();
2084 2469
@@ -2100,6 +2485,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
2100 2485
2101 return NETDEV_TX_OK; 2486 return NETDEV_TX_OK;
2102} 2487}
2488
2103/* called with rtnl_lock */ 2489/* called with rtnl_lock */
2104int bnx2x_change_mac_addr(struct net_device *dev, void *p) 2490int bnx2x_change_mac_addr(struct net_device *dev, void *p)
2105{ 2491{
@@ -2110,21 +2496,317 @@ int bnx2x_change_mac_addr(struct net_device *dev, void *p)
2110 return -EINVAL; 2496 return -EINVAL;
2111 2497
2112 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); 2498 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2113 if (netif_running(dev)) { 2499 if (netif_running(dev))
2114 if (CHIP_IS_E1(bp)) 2500 bnx2x_set_eth_mac(bp, 1);
2115 bnx2x_set_eth_mac_addr_e1(bp, 1); 2501
2502 return 0;
2503}
2504
2505static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
2506{
2507 union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk);
2508 struct bnx2x_fastpath *fp = &bp->fp[fp_index];
2509
2510 /* Common */
2511#ifdef BCM_CNIC
2512 if (IS_FCOE_IDX(fp_index)) {
2513 memset(sb, 0, sizeof(union host_hc_status_block));
2514 fp->status_blk_mapping = 0;
2515
2516 } else {
2517#endif
2518 /* status blocks */
2519 if (CHIP_IS_E2(bp))
2520 BNX2X_PCI_FREE(sb->e2_sb,
2521 bnx2x_fp(bp, fp_index,
2522 status_blk_mapping),
2523 sizeof(struct host_hc_status_block_e2));
2524 else
2525 BNX2X_PCI_FREE(sb->e1x_sb,
2526 bnx2x_fp(bp, fp_index,
2527 status_blk_mapping),
2528 sizeof(struct host_hc_status_block_e1x));
2529#ifdef BCM_CNIC
2530 }
2531#endif
2532 /* Rx */
2533 if (!skip_rx_queue(bp, fp_index)) {
2534 bnx2x_free_rx_bds(fp);
2535
2536 /* fastpath rx rings: rx_buf rx_desc rx_comp */
2537 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_buf_ring));
2538 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_desc_ring),
2539 bnx2x_fp(bp, fp_index, rx_desc_mapping),
2540 sizeof(struct eth_rx_bd) * NUM_RX_BD);
2541
2542 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_comp_ring),
2543 bnx2x_fp(bp, fp_index, rx_comp_mapping),
2544 sizeof(struct eth_fast_path_rx_cqe) *
2545 NUM_RCQ_BD);
2546
2547 /* SGE ring */
2548 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_page_ring));
2549 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_sge_ring),
2550 bnx2x_fp(bp, fp_index, rx_sge_mapping),
2551 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
2552 }
2553
2554 /* Tx */
2555 if (!skip_tx_queue(bp, fp_index)) {
2556 /* fastpath tx rings: tx_buf tx_desc */
2557 BNX2X_FREE(bnx2x_fp(bp, fp_index, tx_buf_ring));
2558 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, tx_desc_ring),
2559 bnx2x_fp(bp, fp_index, tx_desc_mapping),
2560 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
2561 }
2562 /* end of fastpath */
2563}
2564
2565void bnx2x_free_fp_mem(struct bnx2x *bp)
2566{
2567 int i;
2568 for_each_queue(bp, i)
2569 bnx2x_free_fp_mem_at(bp, i);
2570}
2571
2572static inline void set_sb_shortcuts(struct bnx2x *bp, int index)
2573{
2574 union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
2575 if (CHIP_IS_E2(bp)) {
2576 bnx2x_fp(bp, index, sb_index_values) =
2577 (__le16 *)status_blk.e2_sb->sb.index_values;
2578 bnx2x_fp(bp, index, sb_running_index) =
2579 (__le16 *)status_blk.e2_sb->sb.running_index;
2580 } else {
2581 bnx2x_fp(bp, index, sb_index_values) =
2582 (__le16 *)status_blk.e1x_sb->sb.index_values;
2583 bnx2x_fp(bp, index, sb_running_index) =
2584 (__le16 *)status_blk.e1x_sb->sb.running_index;
2585 }
2586}
2587
2588static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
2589{
2590 union host_hc_status_block *sb;
2591 struct bnx2x_fastpath *fp = &bp->fp[index];
2592 int ring_size = 0;
2593
2594 /* if rx_ring_size specified - use it */
2595 int rx_ring_size = bp->rx_ring_size ? bp->rx_ring_size :
2596 MAX_RX_AVAIL/bp->num_queues;
2597
2598 /* allocate at least number of buffers required by FW */
2599 rx_ring_size = max_t(int, fp->disable_tpa ? MIN_RX_SIZE_NONTPA :
2600 MIN_RX_SIZE_TPA,
2601 rx_ring_size);
2602
2603 bnx2x_fp(bp, index, bp) = bp;
2604 bnx2x_fp(bp, index, index) = index;
2605
2606 /* Common */
2607 sb = &bnx2x_fp(bp, index, status_blk);
2608#ifdef BCM_CNIC
2609 if (!IS_FCOE_IDX(index)) {
2610#endif
2611 /* status blocks */
2612 if (CHIP_IS_E2(bp))
2613 BNX2X_PCI_ALLOC(sb->e2_sb,
2614 &bnx2x_fp(bp, index, status_blk_mapping),
2615 sizeof(struct host_hc_status_block_e2));
2116 else 2616 else
2117 bnx2x_set_eth_mac_addr_e1h(bp, 1); 2617 BNX2X_PCI_ALLOC(sb->e1x_sb,
2618 &bnx2x_fp(bp, index, status_blk_mapping),
2619 sizeof(struct host_hc_status_block_e1x));
2620#ifdef BCM_CNIC
2621 }
2622#endif
2623 set_sb_shortcuts(bp, index);
2624
2625 /* Tx */
2626 if (!skip_tx_queue(bp, index)) {
2627 /* fastpath tx rings: tx_buf tx_desc */
2628 BNX2X_ALLOC(bnx2x_fp(bp, index, tx_buf_ring),
2629 sizeof(struct sw_tx_bd) * NUM_TX_BD);
2630 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, tx_desc_ring),
2631 &bnx2x_fp(bp, index, tx_desc_mapping),
2632 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
2633 }
2634
2635 /* Rx */
2636 if (!skip_rx_queue(bp, index)) {
2637 /* fastpath rx rings: rx_buf rx_desc rx_comp */
2638 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_buf_ring),
2639 sizeof(struct sw_rx_bd) * NUM_RX_BD);
2640 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_desc_ring),
2641 &bnx2x_fp(bp, index, rx_desc_mapping),
2642 sizeof(struct eth_rx_bd) * NUM_RX_BD);
2643
2644 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_comp_ring),
2645 &bnx2x_fp(bp, index, rx_comp_mapping),
2646 sizeof(struct eth_fast_path_rx_cqe) *
2647 NUM_RCQ_BD);
2648
2649 /* SGE ring */
2650 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_page_ring),
2651 sizeof(struct sw_rx_page) * NUM_RX_SGE);
2652 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_sge_ring),
2653 &bnx2x_fp(bp, index, rx_sge_mapping),
2654 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
2655 /* RX BD ring */
2656 bnx2x_set_next_page_rx_bd(fp);
2657
2658 /* CQ ring */
2659 bnx2x_set_next_page_rx_cq(fp);
2660
2661 /* BDs */
2662 ring_size = bnx2x_alloc_rx_bds(fp, rx_ring_size);
2663 if (ring_size < rx_ring_size)
2664 goto alloc_mem_err;
2665 }
2666
2667 return 0;
2668
2669/* handles low memory cases */
2670alloc_mem_err:
2671 BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n",
2672 index, ring_size);
2673 /* FW will drop all packets if queue is not big enough,
2674 * In these cases we disable the queue
2675 * Min size diferent for TPA and non-TPA queues
2676 */
2677 if (ring_size < (fp->disable_tpa ?
2678 MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) {
2679 /* release memory allocated for this queue */
2680 bnx2x_free_fp_mem_at(bp, index);
2681 return -ENOMEM;
2682 }
2683 return 0;
2684}
2685
2686int bnx2x_alloc_fp_mem(struct bnx2x *bp)
2687{
2688 int i;
2689
2690 /**
2691 * 1. Allocate FP for leading - fatal if error
2692 * 2. {CNIC} Allocate FCoE FP - fatal if error
2693 * 3. Allocate RSS - fix number of queues if error
2694 */
2695
2696 /* leading */
2697 if (bnx2x_alloc_fp_mem_at(bp, 0))
2698 return -ENOMEM;
2699#ifdef BCM_CNIC
2700 /* FCoE */
2701 if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX))
2702 return -ENOMEM;
2703#endif
2704 /* RSS */
2705 for_each_nondefault_eth_queue(bp, i)
2706 if (bnx2x_alloc_fp_mem_at(bp, i))
2707 break;
2708
2709 /* handle memory failures */
2710 if (i != BNX2X_NUM_ETH_QUEUES(bp)) {
2711 int delta = BNX2X_NUM_ETH_QUEUES(bp) - i;
2712
2713 WARN_ON(delta < 0);
2714#ifdef BCM_CNIC
2715 /**
2716 * move non eth FPs next to last eth FP
2717 * must be done in that order
2718 * FCOE_IDX < FWD_IDX < OOO_IDX
2719 */
2720
2721 /* move FCoE fp */
2722 bnx2x_move_fp(bp, FCOE_IDX, FCOE_IDX - delta);
2723#endif
2724 bp->num_queues -= delta;
2725 BNX2X_ERR("Adjusted num of queues from %d to %d\n",
2726 bp->num_queues + delta, bp->num_queues);
2727 }
2728
2729 return 0;
2730}
2731
2732static int bnx2x_setup_irqs(struct bnx2x *bp)
2733{
2734 int rc = 0;
2735 if (bp->flags & USING_MSIX_FLAG) {
2736 rc = bnx2x_req_msix_irqs(bp);
2737 if (rc)
2738 return rc;
2739 } else {
2740 bnx2x_ack_int(bp);
2741 rc = bnx2x_req_irq(bp);
2742 if (rc) {
2743 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
2744 return rc;
2745 }
2746 if (bp->flags & USING_MSI_FLAG) {
2747 bp->dev->irq = bp->pdev->irq;
2748 netdev_info(bp->dev, "using MSI IRQ %d\n",
2749 bp->pdev->irq);
2750 }
2118 } 2751 }
2119 2752
2120 return 0; 2753 return 0;
2121} 2754}
2122 2755
2756void bnx2x_free_mem_bp(struct bnx2x *bp)
2757{
2758 kfree(bp->fp);
2759 kfree(bp->msix_table);
2760 kfree(bp->ilt);
2761}
2762
2763int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp)
2764{
2765 struct bnx2x_fastpath *fp;
2766 struct msix_entry *tbl;
2767 struct bnx2x_ilt *ilt;
2768
2769 /* fp array */
2770 fp = kzalloc(L2_FP_COUNT(bp->l2_cid_count)*sizeof(*fp), GFP_KERNEL);
2771 if (!fp)
2772 goto alloc_err;
2773 bp->fp = fp;
2774
2775 /* msix table */
2776 tbl = kzalloc((FP_SB_COUNT(bp->l2_cid_count) + 1) * sizeof(*tbl),
2777 GFP_KERNEL);
2778 if (!tbl)
2779 goto alloc_err;
2780 bp->msix_table = tbl;
2781
2782 /* ilt */
2783 ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
2784 if (!ilt)
2785 goto alloc_err;
2786 bp->ilt = ilt;
2787
2788 return 0;
2789alloc_err:
2790 bnx2x_free_mem_bp(bp);
2791 return -ENOMEM;
2792
2793}
2794
2795static int bnx2x_reload_if_running(struct net_device *dev)
2796{
2797 struct bnx2x *bp = netdev_priv(dev);
2798
2799 if (unlikely(!netif_running(dev)))
2800 return 0;
2801
2802 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
2803 return bnx2x_nic_load(bp, LOAD_NORMAL);
2804}
2805
2123/* called with rtnl_lock */ 2806/* called with rtnl_lock */
2124int bnx2x_change_mtu(struct net_device *dev, int new_mtu) 2807int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
2125{ 2808{
2126 struct bnx2x *bp = netdev_priv(dev); 2809 struct bnx2x *bp = netdev_priv(dev);
2127 int rc = 0;
2128 2810
2129 if (bp->recovery_state != BNX2X_RECOVERY_DONE) { 2811 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
2130 printk(KERN_ERR "Handling parity error recovery. Try again later\n"); 2812 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
@@ -2141,49 +2823,69 @@ int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
2141 */ 2823 */
2142 dev->mtu = new_mtu; 2824 dev->mtu = new_mtu;
2143 2825
2144 if (netif_running(dev)) { 2826 return bnx2x_reload_if_running(dev);
2145 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
2146 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
2147 }
2148
2149 return rc;
2150} 2827}
2151 2828
2152void bnx2x_tx_timeout(struct net_device *dev) 2829u32 bnx2x_fix_features(struct net_device *dev, u32 features)
2153{ 2830{
2154 struct bnx2x *bp = netdev_priv(dev); 2831 struct bnx2x *bp = netdev_priv(dev);
2155 2832
2156#ifdef BNX2X_STOP_ON_ERROR 2833 /* TPA requires Rx CSUM offloading */
2157 if (!bp->panic) 2834 if (!(features & NETIF_F_RXCSUM) || bp->disable_tpa)
2158 bnx2x_panic(); 2835 features &= ~NETIF_F_LRO;
2159#endif 2836
2160 /* This allows the netif to be shutdown gracefully before resetting */ 2837 return features;
2161 schedule_delayed_work(&bp->reset_task, 0);
2162} 2838}
2163 2839
2164#ifdef BCM_VLAN 2840int bnx2x_set_features(struct net_device *dev, u32 features)
2165/* called with rtnl_lock */
2166void bnx2x_vlan_rx_register(struct net_device *dev,
2167 struct vlan_group *vlgrp)
2168{ 2841{
2169 struct bnx2x *bp = netdev_priv(dev); 2842 struct bnx2x *bp = netdev_priv(dev);
2843 u32 flags = bp->flags;
2844 bool bnx2x_reload = false;
2170 2845
2171 bp->vlgrp = vlgrp; 2846 if (features & NETIF_F_LRO)
2847 flags |= TPA_ENABLE_FLAG;
2848 else
2849 flags &= ~TPA_ENABLE_FLAG;
2172 2850
2173 /* Set flags according to the required capabilities */ 2851 if (features & NETIF_F_LOOPBACK) {
2174 bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG); 2852 if (bp->link_params.loopback_mode != LOOPBACK_BMAC) {
2853 bp->link_params.loopback_mode = LOOPBACK_BMAC;
2854 bnx2x_reload = true;
2855 }
2856 } else {
2857 if (bp->link_params.loopback_mode != LOOPBACK_NONE) {
2858 bp->link_params.loopback_mode = LOOPBACK_NONE;
2859 bnx2x_reload = true;
2860 }
2861 }
2175 2862
2176 if (dev->features & NETIF_F_HW_VLAN_TX) 2863 if (flags ^ bp->flags) {
2177 bp->flags |= HW_VLAN_TX_FLAG; 2864 bp->flags = flags;
2865 bnx2x_reload = true;
2866 }
2178 2867
2179 if (dev->features & NETIF_F_HW_VLAN_RX) 2868 if (bnx2x_reload) {
2180 bp->flags |= HW_VLAN_RX_FLAG; 2869 if (bp->recovery_state == BNX2X_RECOVERY_DONE)
2870 return bnx2x_reload_if_running(dev);
2871 /* else: bnx2x_nic_load() will be called at end of recovery */
2872 }
2181 2873
2182 if (netif_running(dev)) 2874 return 0;
2183 bnx2x_set_client_config(bp);
2184} 2875}
2185 2876
2877void bnx2x_tx_timeout(struct net_device *dev)
2878{
2879 struct bnx2x *bp = netdev_priv(dev);
2880
2881#ifdef BNX2X_STOP_ON_ERROR
2882 if (!bp->panic)
2883 bnx2x_panic();
2186#endif 2884#endif
2885 /* This allows the netif to be shutdown gracefully before resetting */
2886 schedule_delayed_work(&bp->reset_task, 0);
2887}
2888
2187int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state) 2889int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
2188{ 2890{
2189 struct net_device *dev = pci_get_drvdata(pdev); 2891 struct net_device *dev = pci_get_drvdata(pdev);
@@ -2244,6 +2946,8 @@ int bnx2x_resume(struct pci_dev *pdev)
2244 bnx2x_set_power_state(bp, PCI_D0); 2946 bnx2x_set_power_state(bp, PCI_D0);
2245 netif_device_attach(dev); 2947 netif_device_attach(dev);
2246 2948
2949 /* Since the chip was reset, clear the FW sequence number */
2950 bp->fw_seq = 0;
2247 rc = bnx2x_nic_load(bp, LOAD_OPEN); 2951 rc = bnx2x_nic_load(bp, LOAD_OPEN);
2248 2952
2249 rtnl_unlock(); 2953 rtnl_unlock();
diff --git a/drivers/net/bnx2x/bnx2x_cmn.h b/drivers/net/bnx2x/bnx2x_cmn.h
index d1979b1a7ed2..1a3545bd8a92 100644
--- a/drivers/net/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/bnx2x/bnx2x_cmn.h
@@ -1,6 +1,6 @@
1/* bnx2x_cmn.h: Broadcom Everest network driver. 1/* bnx2x_cmn.h: Broadcom Everest network driver.
2 * 2 *
3 * Copyright (c) 2007-2010 Broadcom Corporation 3 * Copyright (c) 2007-2011 Broadcom Corporation
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by 6 * it under the terms of the GNU General Public License as published by
@@ -23,249 +23,279 @@
23 23
24#include "bnx2x.h" 24#include "bnx2x.h"
25 25
26extern int num_queues;
27
28/************************ Macros ********************************/
29#define BNX2X_PCI_FREE(x, y, size) \
30 do { \
31 if (x) { \
32 dma_free_coherent(&bp->pdev->dev, size, (void *)x, y); \
33 x = NULL; \
34 y = 0; \
35 } \
36 } while (0)
37
38#define BNX2X_FREE(x) \
39 do { \
40 if (x) { \
41 kfree((void *)x); \
42 x = NULL; \
43 } \
44 } while (0)
45
46#define BNX2X_PCI_ALLOC(x, y, size) \
47 do { \
48 x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
49 if (x == NULL) \
50 goto alloc_mem_err; \
51 memset((void *)x, 0, size); \
52 } while (0)
53
54#define BNX2X_ALLOC(x, size) \
55 do { \
56 x = kzalloc(size, GFP_KERNEL); \
57 if (x == NULL) \
58 goto alloc_mem_err; \
59 } while (0)
26 60
27/*********************** Interfaces **************************** 61/*********************** Interfaces ****************************
28 * Functions that need to be implemented by each driver version 62 * Functions that need to be implemented by each driver version
29 */ 63 */
30 64
31/** 65/**
32 * Initialize link parameters structure variables. 66 * bnx2x_initial_phy_init - initialize link parameters structure variables.
33 * 67 *
34 * @param bp 68 * @bp: driver handle
35 * @param load_mode 69 * @load_mode: current mode
36 *
37 * @return u8
38 */ 70 */
39u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode); 71u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode);
40 72
41/** 73/**
42 * Configure hw according to link parameters structure. 74 * bnx2x_link_set - configure hw according to link parameters structure.
43 * 75 *
44 * @param bp 76 * @bp: driver handle
45 */ 77 */
46void bnx2x_link_set(struct bnx2x *bp); 78void bnx2x_link_set(struct bnx2x *bp);
47 79
48/** 80/**
49 * Query link status 81 * bnx2x_link_test - query link status.
50 * 82 *
51 * @param bp 83 * @bp: driver handle
84 * @is_serdes: bool
52 * 85 *
53 * @return 0 - link is UP 86 * Returns 0 if link is UP.
54 */ 87 */
55u8 bnx2x_link_test(struct bnx2x *bp); 88u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes);
56 89
57/** 90/**
58 * Handles link status change 91 * bnx2x__link_status_update - handles link status change.
59 * 92 *
60 * @param bp 93 * @bp: driver handle
61 */ 94 */
62void bnx2x__link_status_update(struct bnx2x *bp); 95void bnx2x__link_status_update(struct bnx2x *bp);
63 96
64/** 97/**
65 * MSI-X slowpath interrupt handler 98 * bnx2x_link_report - report link status to upper layer.
66 * 99 *
67 * @param irq 100 * @bp: driver handle
68 * @param dev_instance 101 */
102void bnx2x_link_report(struct bnx2x *bp);
103
104/* None-atomic version of bnx2x_link_report() */
105void __bnx2x_link_report(struct bnx2x *bp);
106
107/**
108 * bnx2x_get_mf_speed - calculate MF speed.
69 * 109 *
70 * @return irqreturn_t 110 * @bp: driver handle
111 *
112 * Takes into account current linespeed and MF configuration.
71 */ 113 */
72irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance); 114u16 bnx2x_get_mf_speed(struct bnx2x *bp);
73 115
74/** 116/**
75 * non MSI-X interrupt handler 117 * bnx2x_msix_sp_int - MSI-X slowpath interrupt handler
76 * 118 *
77 * @param irq 119 * @irq: irq number
78 * @param dev_instance 120 * @dev_instance: private instance
121 */
122irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance);
123
124/**
125 * bnx2x_interrupt - non MSI-X interrupt handler
79 * 126 *
80 * @return irqreturn_t 127 * @irq: irq number
128 * @dev_instance: private instance
81 */ 129 */
82irqreturn_t bnx2x_interrupt(int irq, void *dev_instance); 130irqreturn_t bnx2x_interrupt(int irq, void *dev_instance);
83#ifdef BCM_CNIC 131#ifdef BCM_CNIC
84 132
85/** 133/**
86 * Send command to cnic driver 134 * bnx2x_cnic_notify - send command to cnic driver
87 * 135 *
88 * @param bp 136 * @bp: driver handle
89 * @param cmd 137 * @cmd: command
90 */ 138 */
91int bnx2x_cnic_notify(struct bnx2x *bp, int cmd); 139int bnx2x_cnic_notify(struct bnx2x *bp, int cmd);
92 140
93/** 141/**
94 * Provides cnic information for proper interrupt handling 142 * bnx2x_setup_cnic_irq_info - provides cnic with IRQ information
95 * 143 *
96 * @param bp 144 * @bp: driver handle
97 */ 145 */
98void bnx2x_setup_cnic_irq_info(struct bnx2x *bp); 146void bnx2x_setup_cnic_irq_info(struct bnx2x *bp);
99#endif 147#endif
100 148
101/** 149/**
102 * Enable HW interrupts. 150 * bnx2x_int_enable - enable HW interrupts.
103 * 151 *
104 * @param bp 152 * @bp: driver handle
105 */ 153 */
106void bnx2x_int_enable(struct bnx2x *bp); 154void bnx2x_int_enable(struct bnx2x *bp);
107 155
108/** 156/**
109 * Disable interrupts. This function ensures that there are no 157 * bnx2x_int_disable_sync - disable interrupts.
110 * ISRs or SP DPCs (sp_task) are running after it returns. 158 *
159 * @bp: driver handle
160 * @disable_hw: true, disable HW interrupts.
111 * 161 *
112 * @param bp 162 * This function ensures that there are no
113 * @param disable_hw if true, disable HW interrupts. 163 * ISRs or SP DPCs (sp_task) are running after it returns.
114 */ 164 */
115void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw); 165void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw);
116 166
117/** 167/**
118 * Init HW blocks according to current initialization stage: 168 * bnx2x_init_firmware - loads device firmware
119 * COMMON, PORT or FUNCTION.
120 * 169 *
121 * @param bp 170 * @bp: driver handle
122 * @param load_code: COMMON, PORT or FUNCTION 171 */
172int bnx2x_init_firmware(struct bnx2x *bp);
173
174/**
175 * bnx2x_init_hw - init HW blocks according to current initialization stage.
123 * 176 *
124 * @return int 177 * @bp: driver handle
178 * @load_code: COMMON, PORT or FUNCTION
125 */ 179 */
126int bnx2x_init_hw(struct bnx2x *bp, u32 load_code); 180int bnx2x_init_hw(struct bnx2x *bp, u32 load_code);
127 181
128/** 182/**
129 * Init driver internals: 183 * bnx2x_nic_init - init driver internals.
184 *
185 * @bp: driver handle
186 * @load_code: COMMON, PORT or FUNCTION
187 *
188 * Initializes:
130 * - rings 189 * - rings
131 * - status blocks 190 * - status blocks
132 * - etc. 191 * - etc.
133 *
134 * @param bp
135 * @param load_code COMMON, PORT or FUNCTION
136 */ 192 */
137void bnx2x_nic_init(struct bnx2x *bp, u32 load_code); 193void bnx2x_nic_init(struct bnx2x *bp, u32 load_code);
138 194
139/** 195/**
140 * Allocate driver's memory. 196 * bnx2x_alloc_mem - allocate driver's memory.
141 *
142 * @param bp
143 * 197 *
144 * @return int 198 * @bp: driver handle
145 */ 199 */
146int bnx2x_alloc_mem(struct bnx2x *bp); 200int bnx2x_alloc_mem(struct bnx2x *bp);
147 201
148/** 202/**
149 * Release driver's memory. 203 * bnx2x_free_mem - release driver's memory.
150 * 204 *
151 * @param bp 205 * @bp: driver handle
152 */ 206 */
153void bnx2x_free_mem(struct bnx2x *bp); 207void bnx2x_free_mem(struct bnx2x *bp);
154 208
155/** 209/**
156 * Bring up a leading (the first) eth Client. 210 * bnx2x_setup_client - setup eth client.
157 * 211 *
158 * @param bp 212 * @bp: driver handle
159 * 213 * @fp: pointer to fastpath structure
160 * @return int 214 * @is_leading: boolean
161 */ 215 */
162int bnx2x_setup_leading(struct bnx2x *bp); 216int bnx2x_setup_client(struct bnx2x *bp, struct bnx2x_fastpath *fp,
217 int is_leading);
163 218
164/** 219/**
165 * Setup non-leading eth Client. 220 * bnx2x_set_num_queues - set number of queues according to mode.
166 *
167 * @param bp
168 * @param fp
169 * 221 *
170 * @return int 222 * @bp: driver handle
171 */ 223 */
172int bnx2x_setup_multi(struct bnx2x *bp, int index); 224void bnx2x_set_num_queues(struct bnx2x *bp);
173 225
174/** 226/**
175 * Set number of quueus according to mode and number of available 227 * bnx2x_chip_cleanup - cleanup chip internals.
176 * msi-x vectors
177 * 228 *
178 * @param bp 229 * @bp: driver handle
230 * @unload_mode: COMMON, PORT, FUNCTION
179 * 231 *
180 */
181void bnx2x_set_num_queues_msix(struct bnx2x *bp);
182
183/**
184 * Cleanup chip internals:
185 * - Cleanup MAC configuration. 232 * - Cleanup MAC configuration.
186 * - Close clients. 233 * - Closes clients.
187 * - etc. 234 * - etc.
188 *
189 * @param bp
190 * @param unload_mode
191 */ 235 */
192void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode); 236void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode);
193 237
194/** 238/**
195 * Acquire HW lock. 239 * bnx2x_acquire_hw_lock - acquire HW lock.
196 * 240 *
197 * @param bp 241 * @bp: driver handle
198 * @param resource Resource bit which was locked 242 * @resource: resource bit which was locked
199 *
200 * @return int
201 */ 243 */
202int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource); 244int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource);
203 245
204/** 246/**
205 * Release HW lock. 247 * bnx2x_release_hw_lock - release HW lock.
206 *
207 * @param bp driver handle
208 * @param resource Resource bit which was locked
209 * 248 *
210 * @return int 249 * @bp: driver handle
250 * @resource: resource bit which was locked
211 */ 251 */
212int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource); 252int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource);
213 253
214/** 254/**
215 * Configure eth MAC address in the HW according to the value in 255 * bnx2x_set_eth_mac - configure eth MAC address in the HW
216 * netdev->dev_addr for 57711
217 * 256 *
218 * @param bp driver handle 257 * @bp: driver handle
219 * @param set 258 * @set: set or clear
220 */
221void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set);
222
223/**
224 * Configure eth MAC address in the HW according to the value in
225 * netdev->dev_addr for 57710
226 * 259 *
227 * @param bp driver handle 260 * Configures according to the value in netdev->dev_addr.
228 * @param set
229 */ 261 */
230void bnx2x_set_eth_mac_addr_e1(struct bnx2x *bp, int set); 262void bnx2x_set_eth_mac(struct bnx2x *bp, int set);
231 263
232#ifdef BCM_CNIC 264#ifdef BCM_CNIC
233/** 265/**
234 * Set iSCSI MAC(s) at the next enties in the CAM after the ETH 266 * bnx2x_set_fip_eth_mac_addr - Set/Clear FIP MAC(s)
235 * MAC(s). The function will wait until the ramrod completion
236 * returns.
237 * 267 *
238 * @param bp driver handle 268 * @bp: driver handle
239 * @param set set or clear the CAM entry 269 * @set: set or clear the CAM entry
240 * 270 *
241 * @return 0 if cussess, -ENODEV if ramrod doesn't return. 271 * Used next enties in the CAM after the ETH MAC(s).
272 * This function will wait until the ramdord completion returns.
273 * Return 0 if cussess, -ENODEV if ramrod doesn't return.
242 */ 274 */
243int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set); 275int bnx2x_set_fip_eth_mac_addr(struct bnx2x *bp, int set);
244#endif
245 276
246/** 277/**
247 * Initialize status block in FW and HW 278 * bnx2x_set_all_enode_macs - Set/Clear ALL_ENODE mcast MAC.
248 * 279 *
249 * @param bp driver handle 280 * @bp: driver handle
250 * @param sb host_status_block 281 * @set: set or clear
251 * @param dma_addr_t mapping
252 * @param int sb_id
253 */ 282 */
254void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb, 283int bnx2x_set_all_enode_macs(struct bnx2x *bp, int set);
255 dma_addr_t mapping, int sb_id); 284#endif
256 285
257/** 286/**
258 * Reconfigure FW/HW according to dev->flags rx mode 287 * bnx2x_set_rx_mode - set MAC filtering configurations.
259 * 288 *
260 * @param dev net_device 289 * @dev: netdevice
261 * 290 *
291 * called with netif_tx_lock from dev_mcast.c
262 */ 292 */
263void bnx2x_set_rx_mode(struct net_device *dev); 293void bnx2x_set_rx_mode(struct net_device *dev);
264 294
265/** 295/**
266 * Configure MAC filtering rules in a FW. 296 * bnx2x_set_storm_rx_mode - configure MAC filtering rules in a FW.
267 * 297 *
268 * @param bp driver handle 298 * @bp: driver handle
269 */ 299 */
270void bnx2x_set_storm_rx_mode(struct bnx2x *bp); 300void bnx2x_set_storm_rx_mode(struct bnx2x *bp);
271 301
@@ -277,37 +307,160 @@ bool bnx2x_reset_is_done(struct bnx2x *bp);
277void bnx2x_disable_close_the_gate(struct bnx2x *bp); 307void bnx2x_disable_close_the_gate(struct bnx2x *bp);
278 308
279/** 309/**
280 * Perform statistics handling according to event 310 * bnx2x_stats_handle - perform statistics handling according to event.
281 * 311 *
282 * @param bp driver handle 312 * @bp: driver handle
283 * @param even tbnx2x_stats_event 313 * @event: bnx2x_stats_event
284 */ 314 */
285void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event); 315void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
286 316
287/** 317/**
288 * Configures FW with client paramteres (like HW VLAN removal) 318 * bnx2x_sp_event - handle ramrods completion.
289 * for each active client. 319 *
320 * @fp: fastpath handle for the event
321 * @rr_cqe: eth_rx_cqe
322 */
323void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe);
324
325/**
326 * bnx2x_func_start - init function
327 *
328 * @bp: driver handle
290 * 329 *
291 * @param bp 330 * Must be called before sending CLIENT_SETUP for the first client.
292 */ 331 */
293void bnx2x_set_client_config(struct bnx2x *bp); 332int bnx2x_func_start(struct bnx2x *bp);
294 333
295/** 334/**
296 * Handle sp events 335 * bnx2x_ilt_set_info - prepare ILT configurations.
297 * 336 *
298 * @param fp fastpath handle for the event 337 * @bp: driver handle
299 * @param rr_cqe eth_rx_cqe
300 */ 338 */
301void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe); 339void bnx2x_ilt_set_info(struct bnx2x *bp);
302 340
341/**
342 * bnx2x_dcbx_init - initialize dcbx protocol.
343 *
344 * @bp: driver handle
345 */
346void bnx2x_dcbx_init(struct bnx2x *bp);
347
348/**
349 * bnx2x_set_power_state - set power state to the requested value.
350 *
351 * @bp: driver handle
352 * @state: required state D0 or D3hot
353 *
354 * Currently only D0 and D3hot are supported.
355 */
356int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state);
357
358/**
359 * bnx2x_update_max_mf_config - update MAX part of MF configuration in HW.
360 *
361 * @bp: driver handle
362 * @value: new value
363 */
364void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value);
365
366/* dev_close main block */
367int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode);
368
369/* dev_open main block */
370int bnx2x_nic_load(struct bnx2x *bp, int load_mode);
371
372/* hard_xmit callback */
373netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev);
374
375/* select_queue callback */
376u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb);
377
378int bnx2x_change_mac_addr(struct net_device *dev, void *p);
379
380/* NAPI poll Rx part */
381int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget);
382
383/* NAPI poll Tx part */
384int bnx2x_tx_int(struct bnx2x_fastpath *fp);
385
386/* suspend/resume callbacks */
387int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state);
388int bnx2x_resume(struct pci_dev *pdev);
389
390/* Release IRQ vectors */
391void bnx2x_free_irq(struct bnx2x *bp);
392
393void bnx2x_free_fp_mem(struct bnx2x *bp);
394int bnx2x_alloc_fp_mem(struct bnx2x *bp);
395
396void bnx2x_init_rx_rings(struct bnx2x *bp);
397void bnx2x_free_skbs(struct bnx2x *bp);
398void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw);
399void bnx2x_netif_start(struct bnx2x *bp);
400
401/**
402 * bnx2x_enable_msix - set msix configuration.
403 *
404 * @bp: driver handle
405 *
406 * fills msix_table, requests vectors, updates num_queues
407 * according to number of available vectors.
408 */
409int bnx2x_enable_msix(struct bnx2x *bp);
410
411/**
412 * bnx2x_enable_msi - request msi mode from OS, updated internals accordingly
413 *
414 * @bp: driver handle
415 */
416int bnx2x_enable_msi(struct bnx2x *bp);
417
418/**
419 * bnx2x_poll - NAPI callback
420 *
421 * @napi: napi structure
422 * @budget:
423 *
424 */
425int bnx2x_poll(struct napi_struct *napi, int budget);
426
427/**
428 * bnx2x_alloc_mem_bp - allocate memories outsize main driver structure
429 *
430 * @bp: driver handle
431 */
432int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp);
433
434/**
435 * bnx2x_free_mem_bp - release memories outsize main driver structure
436 *
437 * @bp: driver handle
438 */
439void bnx2x_free_mem_bp(struct bnx2x *bp);
440
441/**
442 * bnx2x_change_mtu - change mtu netdev callback
443 *
444 * @dev: net device
445 * @new_mtu: requested mtu
446 *
447 */
448int bnx2x_change_mtu(struct net_device *dev, int new_mtu);
449
450u32 bnx2x_fix_features(struct net_device *dev, u32 features);
451int bnx2x_set_features(struct net_device *dev, u32 features);
452
453/**
454 * bnx2x_tx_timeout - tx timeout netdev callback
455 *
456 * @dev: net device
457 */
458void bnx2x_tx_timeout(struct net_device *dev);
303 459
304static inline void bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp) 460static inline void bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
305{ 461{
306 struct host_status_block *fpsb = fp->status_blk;
307
308 barrier(); /* status block is written to by the chip */ 462 barrier(); /* status block is written to by the chip */
309 fp->fp_c_idx = fpsb->c_status_block.status_block_index; 463 fp->fp_hc_idx = fp->sb_running_index[SM_RX_ID];
310 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
311} 464}
312 465
313static inline void bnx2x_update_rx_prod(struct bnx2x *bp, 466static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
@@ -334,8 +487,8 @@ static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
334 wmb(); 487 wmb();
335 488
336 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++) 489 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
337 REG_WR(bp, BAR_USTRORM_INTMEM + 490 REG_WR(bp,
338 USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4, 491 BAR_USTRORM_INTMEM + fp->ustorm_rx_prods_offset + i*4,
339 ((u32 *)&rx_prods)[i]); 492 ((u32 *)&rx_prods)[i]);
340 493
341 mmiowb(); /* keep prod updates ordered */ 494 mmiowb(); /* keep prod updates ordered */
@@ -345,10 +498,77 @@ static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
345 fp->index, bd_prod, rx_comp_prod, rx_sge_prod); 498 fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
346} 499}
347 500
501static inline void bnx2x_igu_ack_sb_gen(struct bnx2x *bp, u8 igu_sb_id,
502 u8 segment, u16 index, u8 op,
503 u8 update, u32 igu_addr)
504{
505 struct igu_regular cmd_data = {0};
506
507 cmd_data.sb_id_and_flags =
508 ((index << IGU_REGULAR_SB_INDEX_SHIFT) |
509 (segment << IGU_REGULAR_SEGMENT_ACCESS_SHIFT) |
510 (update << IGU_REGULAR_BUPDATE_SHIFT) |
511 (op << IGU_REGULAR_ENABLE_INT_SHIFT));
512
513 DP(NETIF_MSG_HW, "write 0x%08x to IGU addr 0x%x\n",
514 cmd_data.sb_id_and_flags, igu_addr);
515 REG_WR(bp, igu_addr, cmd_data.sb_id_and_flags);
516
517 /* Make sure that ACK is written */
518 mmiowb();
519 barrier();
520}
521
522static inline void bnx2x_igu_clear_sb_gen(struct bnx2x *bp,
523 u8 idu_sb_id, bool is_Pf)
524{
525 u32 data, ctl, cnt = 100;
526 u32 igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA;
527 u32 igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL;
528 u32 igu_addr_ack = IGU_REG_CSTORM_TYPE_0_SB_CLEANUP + (idu_sb_id/32)*4;
529 u32 sb_bit = 1 << (idu_sb_id%32);
530 u32 func_encode = BP_FUNC(bp) |
531 ((is_Pf == true ? 1 : 0) << IGU_FID_ENCODE_IS_PF_SHIFT);
532 u32 addr_encode = IGU_CMD_E2_PROD_UPD_BASE + idu_sb_id;
533
534 /* Not supported in BC mode */
535 if (CHIP_INT_MODE_IS_BC(bp))
536 return;
537
538 data = (IGU_USE_REGISTER_cstorm_type_0_sb_cleanup
539 << IGU_REGULAR_CLEANUP_TYPE_SHIFT) |
540 IGU_REGULAR_CLEANUP_SET |
541 IGU_REGULAR_BCLEANUP;
542
543 ctl = addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT |
544 func_encode << IGU_CTRL_REG_FID_SHIFT |
545 IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT;
546
547 DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
548 data, igu_addr_data);
549 REG_WR(bp, igu_addr_data, data);
550 mmiowb();
551 barrier();
552 DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
553 ctl, igu_addr_ctl);
554 REG_WR(bp, igu_addr_ctl, ctl);
555 mmiowb();
556 barrier();
557
558 /* wait for clean up to finish */
559 while (!(REG_RD(bp, igu_addr_ack) & sb_bit) && --cnt)
560 msleep(20);
348 561
349 562
350static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id, 563 if (!(REG_RD(bp, igu_addr_ack) & sb_bit)) {
351 u8 storm, u16 index, u8 op, u8 update) 564 DP(NETIF_MSG_HW, "Unable to finish IGU cleanup: "
565 "idu_sb_id %d offset %d bit %d (cnt %d)\n",
566 idu_sb_id, idu_sb_id/32, idu_sb_id%32, cnt);
567 }
568}
569
570static inline void bnx2x_hc_ack_sb(struct bnx2x *bp, u8 sb_id,
571 u8 storm, u16 index, u8 op, u8 update)
352{ 572{
353 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 + 573 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
354 COMMAND_REG_INT_ACK); 574 COMMAND_REG_INT_ACK);
@@ -369,7 +589,37 @@ static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
369 mmiowb(); 589 mmiowb();
370 barrier(); 590 barrier();
371} 591}
372static inline u16 bnx2x_ack_int(struct bnx2x *bp) 592
593static inline void bnx2x_igu_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 segment,
594 u16 index, u8 op, u8 update)
595{
596 u32 igu_addr = BAR_IGU_INTMEM + (IGU_CMD_INT_ACK_BASE + igu_sb_id)*8;
597
598 bnx2x_igu_ack_sb_gen(bp, igu_sb_id, segment, index, op, update,
599 igu_addr);
600}
601
602static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 storm,
603 u16 index, u8 op, u8 update)
604{
605 if (bp->common.int_block == INT_BLOCK_HC)
606 bnx2x_hc_ack_sb(bp, igu_sb_id, storm, index, op, update);
607 else {
608 u8 segment;
609
610 if (CHIP_INT_MODE_IS_BC(bp))
611 segment = storm;
612 else if (igu_sb_id != bp->igu_dsb_id)
613 segment = IGU_SEG_ACCESS_DEF;
614 else if (storm == ATTENTION_ID)
615 segment = IGU_SEG_ACCESS_ATTN;
616 else
617 segment = IGU_SEG_ACCESS_DEF;
618 bnx2x_igu_ack_sb(bp, igu_sb_id, segment, index, op, update);
619 }
620}
621
622static inline u16 bnx2x_hc_ack_int(struct bnx2x *bp)
373{ 623{
374 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 + 624 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
375 COMMAND_REG_SIMD_MASK); 625 COMMAND_REG_SIMD_MASK);
@@ -378,18 +628,36 @@ static inline u16 bnx2x_ack_int(struct bnx2x *bp)
378 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n", 628 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
379 result, hc_addr); 629 result, hc_addr);
380 630
631 barrier();
381 return result; 632 return result;
382} 633}
383 634
384/* 635static inline u16 bnx2x_igu_ack_int(struct bnx2x *bp)
385 * fast path service functions 636{
386 */ 637 u32 igu_addr = (BAR_IGU_INTMEM + IGU_REG_SISR_MDPC_WMASK_LSB_UPPER*8);
638 u32 result = REG_RD(bp, igu_addr);
639
640 DP(NETIF_MSG_HW, "read 0x%08x from IGU addr 0x%x\n",
641 result, igu_addr);
642
643 barrier();
644 return result;
645}
646
647static inline u16 bnx2x_ack_int(struct bnx2x *bp)
648{
649 barrier();
650 if (bp->common.int_block == INT_BLOCK_HC)
651 return bnx2x_hc_ack_int(bp);
652 else
653 return bnx2x_igu_ack_int(bp);
654}
387 655
388static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp) 656static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
389{ 657{
390 /* Tell compiler that consumer and producer can change */ 658 /* Tell compiler that consumer and producer can change */
391 barrier(); 659 barrier();
392 return (fp->tx_pkt_prod != fp->tx_pkt_cons); 660 return fp->tx_pkt_prod != fp->tx_pkt_cons;
393} 661}
394 662
395static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp) 663static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
@@ -424,6 +692,29 @@ static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
424 return hw_cons != fp->tx_pkt_cons; 692 return hw_cons != fp->tx_pkt_cons;
425} 693}
426 694
695static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
696{
697 u16 rx_cons_sb;
698
699 /* Tell compiler that status block fields can change */
700 barrier();
701 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
702 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
703 rx_cons_sb++;
704 return (fp->rx_comp_cons != rx_cons_sb);
705}
706
707/**
708 * disables tx from stack point of view
709 *
710 * @bp: driver handle
711 */
712static inline void bnx2x_tx_disable(struct bnx2x *bp)
713{
714 netif_tx_disable(bp->dev);
715 netif_carrier_off(bp->dev);
716}
717
427static inline void bnx2x_free_rx_sge(struct bnx2x *bp, 718static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
428 struct bnx2x_fastpath *fp, u16 index) 719 struct bnx2x_fastpath *fp, u16 index)
429{ 720{
@@ -436,7 +727,7 @@ static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
436 return; 727 return;
437 728
438 dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(sw_buf, mapping), 729 dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(sw_buf, mapping),
439 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE); 730 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
440 __free_pages(page, PAGES_PER_SGE_SHIFT); 731 __free_pages(page, PAGES_PER_SGE_SHIFT);
441 732
442 sw_buf->page = NULL; 733 sw_buf->page = NULL;
@@ -444,13 +735,67 @@ static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
444 sge->addr_lo = 0; 735 sge->addr_lo = 0;
445} 736}
446 737
447static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp, 738static inline void bnx2x_add_all_napi(struct bnx2x *bp)
448 struct bnx2x_fastpath *fp, int last)
449{ 739{
450 int i; 740 int i;
451 741
452 for (i = 0; i < last; i++) 742 /* Add NAPI objects */
453 bnx2x_free_rx_sge(bp, fp, i); 743 for_each_napi_queue(bp, i)
744 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
745 bnx2x_poll, BNX2X_NAPI_WEIGHT);
746}
747
748static inline void bnx2x_del_all_napi(struct bnx2x *bp)
749{
750 int i;
751
752 for_each_napi_queue(bp, i)
753 netif_napi_del(&bnx2x_fp(bp, i, napi));
754}
755
756static inline void bnx2x_disable_msi(struct bnx2x *bp)
757{
758 if (bp->flags & USING_MSIX_FLAG) {
759 pci_disable_msix(bp->pdev);
760 bp->flags &= ~USING_MSIX_FLAG;
761 } else if (bp->flags & USING_MSI_FLAG) {
762 pci_disable_msi(bp->pdev);
763 bp->flags &= ~USING_MSI_FLAG;
764 }
765}
766
767static inline int bnx2x_calc_num_queues(struct bnx2x *bp)
768{
769 return num_queues ?
770 min_t(int, num_queues, BNX2X_MAX_QUEUES(bp)) :
771 min_t(int, num_online_cpus(), BNX2X_MAX_QUEUES(bp));
772}
773
774static inline void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
775{
776 int i, j;
777
778 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
779 int idx = RX_SGE_CNT * i - 1;
780
781 for (j = 0; j < 2; j++) {
782 SGE_MASK_CLEAR_BIT(fp, idx);
783 idx--;
784 }
785 }
786}
787
788static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
789{
790 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
791 memset(fp->sge_mask, 0xff,
792 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
793
794 /* Clear the two last indices in the page to 1:
795 these are the indices that correspond to the "next" element,
796 hence will never be indicated and should be removed from
797 the calculations. */
798 bnx2x_clear_sge_mask_next_elems(fp);
454} 799}
455 800
456static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp, 801static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
@@ -479,6 +824,7 @@ static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
479 824
480 return 0; 825 return 0;
481} 826}
827
482static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp, 828static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
483 struct bnx2x_fastpath *fp, u16 index) 829 struct bnx2x_fastpath *fp, u16 index)
484{ 830{
@@ -487,14 +833,14 @@ static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
487 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index]; 833 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
488 dma_addr_t mapping; 834 dma_addr_t mapping;
489 835
490 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size); 836 skb = netdev_alloc_skb(bp->dev, fp->rx_buf_size);
491 if (unlikely(skb == NULL)) 837 if (unlikely(skb == NULL))
492 return -ENOMEM; 838 return -ENOMEM;
493 839
494 mapping = dma_map_single(&bp->pdev->dev, skb->data, bp->rx_buf_size, 840 mapping = dma_map_single(&bp->pdev->dev, skb->data, fp->rx_buf_size,
495 DMA_FROM_DEVICE); 841 DMA_FROM_DEVICE);
496 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { 842 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
497 dev_kfree_skb(skb); 843 dev_kfree_skb_any(skb);
498 return -ENOMEM; 844 return -ENOMEM;
499 } 845 }
500 846
@@ -513,7 +859,7 @@ static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
513 * so there is no need to check for dma_mapping_error(). 859 * so there is no need to check for dma_mapping_error().
514 */ 860 */
515static inline void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp, 861static inline void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
516 struct sk_buff *skb, u16 cons, u16 prod) 862 u16 cons, u16 prod)
517{ 863{
518 struct bnx2x *bp = fp->bp; 864 struct bnx2x *bp = fp->bp;
519 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons]; 865 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
@@ -531,32 +877,18 @@ static inline void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
531 *prod_bd = *cons_bd; 877 *prod_bd = *cons_bd;
532} 878}
533 879
534static inline void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp) 880static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
881 struct bnx2x_fastpath *fp, int last)
535{ 882{
536 int i, j; 883 int i;
537 884
538 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) { 885 if (fp->disable_tpa)
539 int idx = RX_SGE_CNT * i - 1; 886 return;
540 887
541 for (j = 0; j < 2; j++) { 888 for (i = 0; i < last; i++)
542 SGE_MASK_CLEAR_BIT(fp, idx); 889 bnx2x_free_rx_sge(bp, fp, i);
543 idx--;
544 }
545 }
546} 890}
547 891
548static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
549{
550 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
551 memset(fp->sge_mask, 0xff,
552 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
553
554 /* Clear the two last indices in the page to 1:
555 these are the indices that correspond to the "next" element,
556 hence will never be indicated and should be removed from
557 the calculations. */
558 bnx2x_clear_sge_mask_next_elems(fp);
559}
560static inline void bnx2x_free_tpa_pool(struct bnx2x *bp, 892static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
561 struct bnx2x_fastpath *fp, int last) 893 struct bnx2x_fastpath *fp, int last)
562{ 894{
@@ -574,79 +906,227 @@ static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
574 if (fp->tpa_state[i] == BNX2X_TPA_START) 906 if (fp->tpa_state[i] == BNX2X_TPA_START)
575 dma_unmap_single(&bp->pdev->dev, 907 dma_unmap_single(&bp->pdev->dev,
576 dma_unmap_addr(rx_buf, mapping), 908 dma_unmap_addr(rx_buf, mapping),
577 bp->rx_buf_size, DMA_FROM_DEVICE); 909 fp->rx_buf_size, DMA_FROM_DEVICE);
578 910
579 dev_kfree_skb(skb); 911 dev_kfree_skb(skb);
580 rx_buf->skb = NULL; 912 rx_buf->skb = NULL;
581 } 913 }
582} 914}
583 915
916static inline void bnx2x_init_tx_ring_one(struct bnx2x_fastpath *fp)
917{
918 int i;
919
920 for (i = 1; i <= NUM_TX_RINGS; i++) {
921 struct eth_tx_next_bd *tx_next_bd =
922 &fp->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;
923
924 tx_next_bd->addr_hi =
925 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
926 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
927 tx_next_bd->addr_lo =
928 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
929 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
930 }
931
932 SET_FLAG(fp->tx_db.data.header.header, DOORBELL_HDR_DB_TYPE, 1);
933 fp->tx_db.data.zero_fill1 = 0;
934 fp->tx_db.data.prod = 0;
935
936 fp->tx_pkt_prod = 0;
937 fp->tx_pkt_cons = 0;
938 fp->tx_bd_prod = 0;
939 fp->tx_bd_cons = 0;
940 fp->tx_pkt = 0;
941}
584 942
585static inline void bnx2x_init_tx_ring(struct bnx2x *bp) 943static inline void bnx2x_init_tx_rings(struct bnx2x *bp)
586{ 944{
587 int i, j; 945 int i;
588 946
589 for_each_queue(bp, j) { 947 for_each_tx_queue(bp, i)
590 struct bnx2x_fastpath *fp = &bp->fp[j]; 948 bnx2x_init_tx_ring_one(&bp->fp[i]);
949}
591 950
592 for (i = 1; i <= NUM_TX_RINGS; i++) { 951static inline void bnx2x_set_next_page_rx_bd(struct bnx2x_fastpath *fp)
593 struct eth_tx_next_bd *tx_next_bd = 952{
594 &fp->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd; 953 int i;
595 954
596 tx_next_bd->addr_hi = 955 for (i = 1; i <= NUM_RX_RINGS; i++) {
597 cpu_to_le32(U64_HI(fp->tx_desc_mapping + 956 struct eth_rx_bd *rx_bd;
598 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
599 tx_next_bd->addr_lo =
600 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
601 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
602 }
603 957
604 fp->tx_db.data.header.header = DOORBELL_HDR_DB_TYPE; 958 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
605 fp->tx_db.data.zero_fill1 = 0; 959 rx_bd->addr_hi =
606 fp->tx_db.data.prod = 0; 960 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
961 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
962 rx_bd->addr_lo =
963 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
964 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
965 }
966}
967
968static inline void bnx2x_set_next_page_sgl(struct bnx2x_fastpath *fp)
969{
970 int i;
607 971
608 fp->tx_pkt_prod = 0; 972 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
609 fp->tx_pkt_cons = 0; 973 struct eth_rx_sge *sge;
610 fp->tx_bd_prod = 0; 974
611 fp->tx_bd_cons = 0; 975 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
612 fp->tx_cons_sb = BNX2X_TX_SB_INDEX; 976 sge->addr_hi =
613 fp->tx_pkt = 0; 977 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
978 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
979
980 sge->addr_lo =
981 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
982 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
614 } 983 }
615} 984}
616static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp) 985
986static inline void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath *fp)
617{ 987{
618 u16 rx_cons_sb; 988 int i;
989 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
990 struct eth_rx_cqe_next_page *nextpg;
991
992 nextpg = (struct eth_rx_cqe_next_page *)
993 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
994 nextpg->addr_hi =
995 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
996 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
997 nextpg->addr_lo =
998 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
999 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
1000 }
1001}
619 1002
620 /* Tell compiler that status block fields can change */ 1003/* Returns the number of actually allocated BDs */
621 barrier(); 1004static inline int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
622 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb); 1005 int rx_ring_size)
623 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT) 1006{
624 rx_cons_sb++; 1007 struct bnx2x *bp = fp->bp;
625 return (fp->rx_comp_cons != rx_cons_sb); 1008 u16 ring_prod, cqe_ring_prod;
1009 int i;
1010
1011 fp->rx_comp_cons = 0;
1012 cqe_ring_prod = ring_prod = 0;
1013
1014 /* This routine is called only during fo init so
1015 * fp->eth_q_stats.rx_skb_alloc_failed = 0
1016 */
1017 for (i = 0; i < rx_ring_size; i++) {
1018 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
1019 fp->eth_q_stats.rx_skb_alloc_failed++;
1020 continue;
1021 }
1022 ring_prod = NEXT_RX_IDX(ring_prod);
1023 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
1024 WARN_ON(ring_prod <= (i - fp->eth_q_stats.rx_skb_alloc_failed));
1025 }
1026
1027 if (fp->eth_q_stats.rx_skb_alloc_failed)
1028 BNX2X_ERR("was only able to allocate "
1029 "%d rx skbs on queue[%d]\n",
1030 (i - fp->eth_q_stats.rx_skb_alloc_failed), fp->index);
1031
1032 fp->rx_bd_prod = ring_prod;
1033 /* Limit the CQE producer by the CQE ring size */
1034 fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
1035 cqe_ring_prod);
1036 fp->rx_pkt = fp->rx_calls = 0;
1037
1038 return i - fp->eth_q_stats.rx_skb_alloc_failed;
1039}
1040
1041#ifdef BCM_CNIC
1042static inline void bnx2x_init_fcoe_fp(struct bnx2x *bp)
1043{
1044 bnx2x_fcoe(bp, cl_id) = BNX2X_FCOE_ETH_CL_ID +
1045 BP_E1HVN(bp) * NONE_ETH_CONTEXT_USE;
1046 bnx2x_fcoe(bp, cid) = BNX2X_FCOE_ETH_CID;
1047 bnx2x_fcoe(bp, fw_sb_id) = DEF_SB_ID;
1048 bnx2x_fcoe(bp, igu_sb_id) = bp->igu_dsb_id;
1049 bnx2x_fcoe(bp, bp) = bp;
1050 bnx2x_fcoe(bp, state) = BNX2X_FP_STATE_CLOSED;
1051 bnx2x_fcoe(bp, index) = FCOE_IDX;
1052 bnx2x_fcoe(bp, rx_cons_sb) = BNX2X_FCOE_L2_RX_INDEX;
1053 bnx2x_fcoe(bp, tx_cons_sb) = BNX2X_FCOE_L2_TX_INDEX;
1054 /* qZone id equals to FW (per path) client id */
1055 bnx2x_fcoe(bp, cl_qzone_id) = bnx2x_fcoe(bp, cl_id) +
1056 BP_PORT(bp)*(CHIP_IS_E2(bp) ? ETH_MAX_RX_CLIENTS_E2 :
1057 ETH_MAX_RX_CLIENTS_E1H);
1058 /* init shortcut */
1059 bnx2x_fcoe(bp, ustorm_rx_prods_offset) = CHIP_IS_E2(bp) ?
1060 USTORM_RX_PRODS_E2_OFFSET(bnx2x_fcoe(bp, cl_qzone_id)) :
1061 USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), bnx2x_fcoe_fp(bp)->cl_id);
1062
1063}
1064#endif
1065
1066static inline void __storm_memset_struct(struct bnx2x *bp,
1067 u32 addr, size_t size, u32 *data)
1068{
1069 int i;
1070 for (i = 0; i < size/4; i++)
1071 REG_WR(bp, addr + (i * 4), data[i]);
1072}
1073
1074static inline void storm_memset_mac_filters(struct bnx2x *bp,
1075 struct tstorm_eth_mac_filter_config *mac_filters,
1076 u16 abs_fid)
1077{
1078 size_t size = sizeof(struct tstorm_eth_mac_filter_config);
1079
1080 u32 addr = BAR_TSTRORM_INTMEM +
1081 TSTORM_MAC_FILTER_CONFIG_OFFSET(abs_fid);
1082
1083 __storm_memset_struct(bp, addr, size, (u32 *)mac_filters);
1084}
1085
1086static inline void storm_memset_cmng(struct bnx2x *bp,
1087 struct cmng_struct_per_port *cmng,
1088 u8 port)
1089{
1090 size_t size =
1091 sizeof(struct rate_shaping_vars_per_port) +
1092 sizeof(struct fairness_vars_per_port) +
1093 sizeof(struct safc_struct_per_port) +
1094 sizeof(struct pfc_struct_per_port);
1095
1096 u32 addr = BAR_XSTRORM_INTMEM +
1097 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port);
1098
1099 __storm_memset_struct(bp, addr, size, (u32 *)cmng);
1100
1101 addr += size + 4 /* SKIP DCB+LLFC */;
1102 size = sizeof(struct cmng_struct_per_port) -
1103 size /* written */ - 4 /*skipped*/;
1104
1105 __storm_memset_struct(bp, addr, size,
1106 (u32 *)(cmng->traffic_type_to_priority_cos));
626} 1107}
627 1108
628/* HW Lock for shared dual port PHYs */ 1109/* HW Lock for shared dual port PHYs */
629void bnx2x_acquire_phy_lock(struct bnx2x *bp); 1110void bnx2x_acquire_phy_lock(struct bnx2x *bp);
630void bnx2x_release_phy_lock(struct bnx2x *bp); 1111void bnx2x_release_phy_lock(struct bnx2x *bp);
631 1112
632void bnx2x_link_report(struct bnx2x *bp); 1113/**
633int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget); 1114 * bnx2x_extract_max_cfg - extract MAX BW part from MF configuration.
634int bnx2x_tx_int(struct bnx2x_fastpath *fp); 1115 *
635void bnx2x_init_rx_rings(struct bnx2x *bp); 1116 * @bp: driver handle
636netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev); 1117 * @mf_cfg: MF configuration
637 1118 *
638int bnx2x_change_mac_addr(struct net_device *dev, void *p); 1119 */
639void bnx2x_tx_timeout(struct net_device *dev); 1120static inline u16 bnx2x_extract_max_cfg(struct bnx2x *bp, u32 mf_cfg)
640void bnx2x_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp); 1121{
641void bnx2x_netif_start(struct bnx2x *bp); 1122 u16 max_cfg = (mf_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
642void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw); 1123 FUNC_MF_CFG_MAX_BW_SHIFT;
643void bnx2x_free_irq(struct bnx2x *bp, bool disable_only); 1124 if (!max_cfg) {
644int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state); 1125 BNX2X_ERR("Illegal configuration detected for Max BW - "
645int bnx2x_resume(struct pci_dev *pdev); 1126 "using 100 instead\n");
646void bnx2x_free_skbs(struct bnx2x *bp); 1127 max_cfg = 100;
647int bnx2x_change_mtu(struct net_device *dev, int new_mtu); 1128 }
648int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode); 1129 return max_cfg;
649int bnx2x_nic_load(struct bnx2x *bp, int load_mode); 1130}
650int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state);
651 1131
652#endif /* BNX2X_CMN_H */ 1132#endif /* BNX2X_CMN_H */
diff --git a/drivers/net/bnx2x/bnx2x_dcb.c b/drivers/net/bnx2x/bnx2x_dcb.c
new file mode 100644
index 000000000000..410a49e571ac
--- /dev/null
+++ b/drivers/net/bnx2x/bnx2x_dcb.c
@@ -0,0 +1,2243 @@
1/* bnx2x_dcb.c: Broadcom Everest network driver.
2 *
3 * Copyright 2009-2011 Broadcom Corporation
4 *
5 * Unless you and Broadcom execute a separate written software license
6 * agreement governing use of this software, this software is licensed to you
7 * under the terms of the GNU General Public License version 2, available
8 * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
9 *
10 * Notwithstanding the above, under no circumstances may you combine this
11 * software in any way with any other Broadcom software provided under a
12 * license other than the GPL, without Broadcom's express prior written
13 * consent.
14 *
15 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
16 * Written by: Dmitry Kravkov
17 *
18 */
19#include <linux/netdevice.h>
20#include <linux/types.h>
21#include <linux/errno.h>
22#ifdef BCM_DCBNL
23#include <linux/dcbnl.h>
24#endif
25
26#include "bnx2x.h"
27#include "bnx2x_cmn.h"
28#include "bnx2x_dcb.h"
29
30
31/* forward declarations of dcbx related functions */
32static void bnx2x_dcbx_stop_hw_tx(struct bnx2x *bp);
33static void bnx2x_pfc_set_pfc(struct bnx2x *bp);
34static void bnx2x_dcbx_update_ets_params(struct bnx2x *bp);
35static void bnx2x_dcbx_resume_hw_tx(struct bnx2x *bp);
36static void bnx2x_dcbx_get_ets_pri_pg_tbl(struct bnx2x *bp,
37 u32 *set_configuration_ets_pg,
38 u32 *pri_pg_tbl);
39static void bnx2x_dcbx_get_num_pg_traf_type(struct bnx2x *bp,
40 u32 *pg_pri_orginal_spread,
41 struct pg_help_data *help_data);
42static void bnx2x_dcbx_fill_cos_params(struct bnx2x *bp,
43 struct pg_help_data *help_data,
44 struct dcbx_ets_feature *ets,
45 u32 *pg_pri_orginal_spread);
46static void bnx2x_dcbx_separate_pauseable_from_non(struct bnx2x *bp,
47 struct cos_help_data *cos_data,
48 u32 *pg_pri_orginal_spread,
49 struct dcbx_ets_feature *ets);
50static void bnx2x_pfc_fw_struct_e2(struct bnx2x *bp);
51
52
53static void bnx2x_pfc_set(struct bnx2x *bp)
54{
55 struct bnx2x_nig_brb_pfc_port_params pfc_params = {0};
56 u32 pri_bit, val = 0;
57 u8 pri;
58
59 /* Tx COS configuration */
60 if (bp->dcbx_port_params.ets.cos_params[0].pauseable)
61 pfc_params.rx_cos0_priority_mask =
62 bp->dcbx_port_params.ets.cos_params[0].pri_bitmask;
63 if (bp->dcbx_port_params.ets.cos_params[1].pauseable)
64 pfc_params.rx_cos1_priority_mask =
65 bp->dcbx_port_params.ets.cos_params[1].pri_bitmask;
66
67
68 /**
69 * Rx COS configuration
70 * Changing PFC RX configuration .
71 * In RX COS0 will always be configured to lossy and COS1 to lossless
72 */
73 for (pri = 0 ; pri < MAX_PFC_PRIORITIES ; pri++) {
74 pri_bit = 1 << pri;
75
76 if (pri_bit & DCBX_PFC_PRI_PAUSE_MASK(bp))
77 val |= 1 << (pri * 4);
78 }
79
80 pfc_params.pkt_priority_to_cos = val;
81
82 /* RX COS0 */
83 pfc_params.llfc_low_priority_classes = 0;
84 /* RX COS1 */
85 pfc_params.llfc_high_priority_classes = DCBX_PFC_PRI_PAUSE_MASK(bp);
86
87 /* BRB configuration */
88 pfc_params.cos0_pauseable = false;
89 pfc_params.cos1_pauseable = true;
90
91 bnx2x_acquire_phy_lock(bp);
92 bp->link_params.feature_config_flags |= FEATURE_CONFIG_PFC_ENABLED;
93 bnx2x_update_pfc(&bp->link_params, &bp->link_vars, &pfc_params);
94 bnx2x_release_phy_lock(bp);
95}
96
97static void bnx2x_pfc_clear(struct bnx2x *bp)
98{
99 struct bnx2x_nig_brb_pfc_port_params nig_params = {0};
100 nig_params.pause_enable = 1;
101#ifdef BNX2X_SAFC
102 if (bp->flags & SAFC_TX_FLAG) {
103 u32 high = 0, low = 0;
104 int i;
105
106 for (i = 0; i < BNX2X_MAX_PRIORITY; i++) {
107 if (bp->pri_map[i] == 1)
108 high |= (1 << i);
109 if (bp->pri_map[i] == 0)
110 low |= (1 << i);
111 }
112
113 nig_params.llfc_low_priority_classes = high;
114 nig_params.llfc_low_priority_classes = low;
115
116 nig_params.pause_enable = 0;
117 nig_params.llfc_enable = 1;
118 nig_params.llfc_out_en = 1;
119 }
120#endif /* BNX2X_SAFC */
121 bnx2x_acquire_phy_lock(bp);
122 bp->link_params.feature_config_flags &= ~FEATURE_CONFIG_PFC_ENABLED;
123 bnx2x_update_pfc(&bp->link_params, &bp->link_vars, &nig_params);
124 bnx2x_release_phy_lock(bp);
125}
126
127static void bnx2x_dump_dcbx_drv_param(struct bnx2x *bp,
128 struct dcbx_features *features,
129 u32 error)
130{
131 u8 i = 0;
132 DP(NETIF_MSG_LINK, "local_mib.error %x\n", error);
133
134 /* PG */
135 DP(NETIF_MSG_LINK,
136 "local_mib.features.ets.enabled %x\n", features->ets.enabled);
137 for (i = 0; i < DCBX_MAX_NUM_PG_BW_ENTRIES; i++)
138 DP(NETIF_MSG_LINK,
139 "local_mib.features.ets.pg_bw_tbl[%d] %d\n", i,
140 DCBX_PG_BW_GET(features->ets.pg_bw_tbl, i));
141 for (i = 0; i < DCBX_MAX_NUM_PRI_PG_ENTRIES; i++)
142 DP(NETIF_MSG_LINK,
143 "local_mib.features.ets.pri_pg_tbl[%d] %d\n", i,
144 DCBX_PRI_PG_GET(features->ets.pri_pg_tbl, i));
145
146 /* pfc */
147 DP(NETIF_MSG_LINK, "dcbx_features.pfc.pri_en_bitmap %x\n",
148 features->pfc.pri_en_bitmap);
149 DP(NETIF_MSG_LINK, "dcbx_features.pfc.pfc_caps %x\n",
150 features->pfc.pfc_caps);
151 DP(NETIF_MSG_LINK, "dcbx_features.pfc.enabled %x\n",
152 features->pfc.enabled);
153
154 DP(NETIF_MSG_LINK, "dcbx_features.app.default_pri %x\n",
155 features->app.default_pri);
156 DP(NETIF_MSG_LINK, "dcbx_features.app.tc_supported %x\n",
157 features->app.tc_supported);
158 DP(NETIF_MSG_LINK, "dcbx_features.app.enabled %x\n",
159 features->app.enabled);
160 for (i = 0; i < DCBX_MAX_APP_PROTOCOL; i++) {
161 DP(NETIF_MSG_LINK,
162 "dcbx_features.app.app_pri_tbl[%x].app_id %x\n",
163 i, features->app.app_pri_tbl[i].app_id);
164 DP(NETIF_MSG_LINK,
165 "dcbx_features.app.app_pri_tbl[%x].pri_bitmap %x\n",
166 i, features->app.app_pri_tbl[i].pri_bitmap);
167 DP(NETIF_MSG_LINK,
168 "dcbx_features.app.app_pri_tbl[%x].appBitfield %x\n",
169 i, features->app.app_pri_tbl[i].appBitfield);
170 }
171}
172
173static void bnx2x_dcbx_get_ap_priority(struct bnx2x *bp,
174 u8 pri_bitmap,
175 u8 llfc_traf_type)
176{
177 u32 pri = MAX_PFC_PRIORITIES;
178 u32 index = MAX_PFC_PRIORITIES - 1;
179 u32 pri_mask;
180 u32 *ttp = bp->dcbx_port_params.app.traffic_type_priority;
181
182 /* Choose the highest priority */
183 while ((MAX_PFC_PRIORITIES == pri) && (0 != index)) {
184 pri_mask = 1 << index;
185 if (GET_FLAGS(pri_bitmap, pri_mask))
186 pri = index ;
187 index--;
188 }
189
190 if (pri < MAX_PFC_PRIORITIES)
191 ttp[llfc_traf_type] = max_t(u32, ttp[llfc_traf_type], pri);
192}
193
194static void bnx2x_dcbx_get_ap_feature(struct bnx2x *bp,
195 struct dcbx_app_priority_feature *app,
196 u32 error) {
197 u8 index;
198 u32 *ttp = bp->dcbx_port_params.app.traffic_type_priority;
199
200 if (GET_FLAGS(error, DCBX_LOCAL_APP_ERROR))
201 DP(NETIF_MSG_LINK, "DCBX_LOCAL_APP_ERROR\n");
202
203 if (app->enabled && !GET_FLAGS(error, DCBX_LOCAL_APP_ERROR)) {
204
205 bp->dcbx_port_params.app.enabled = true;
206
207 for (index = 0 ; index < LLFC_DRIVER_TRAFFIC_TYPE_MAX; index++)
208 ttp[index] = 0;
209
210 if (app->default_pri < MAX_PFC_PRIORITIES)
211 ttp[LLFC_TRAFFIC_TYPE_NW] = app->default_pri;
212
213 for (index = 0 ; index < DCBX_MAX_APP_PROTOCOL; index++) {
214 struct dcbx_app_priority_entry *entry =
215 app->app_pri_tbl;
216
217 if (GET_FLAGS(entry[index].appBitfield,
218 DCBX_APP_SF_ETH_TYPE) &&
219 ETH_TYPE_FCOE == entry[index].app_id)
220 bnx2x_dcbx_get_ap_priority(bp,
221 entry[index].pri_bitmap,
222 LLFC_TRAFFIC_TYPE_FCOE);
223
224 if (GET_FLAGS(entry[index].appBitfield,
225 DCBX_APP_SF_PORT) &&
226 TCP_PORT_ISCSI == entry[index].app_id)
227 bnx2x_dcbx_get_ap_priority(bp,
228 entry[index].pri_bitmap,
229 LLFC_TRAFFIC_TYPE_ISCSI);
230 }
231 } else {
232 DP(NETIF_MSG_LINK, "DCBX_LOCAL_APP_DISABLED\n");
233 bp->dcbx_port_params.app.enabled = false;
234 for (index = 0 ; index < LLFC_DRIVER_TRAFFIC_TYPE_MAX; index++)
235 ttp[index] = INVALID_TRAFFIC_TYPE_PRIORITY;
236 }
237}
238
239static void bnx2x_dcbx_get_ets_feature(struct bnx2x *bp,
240 struct dcbx_ets_feature *ets,
241 u32 error) {
242 int i = 0;
243 u32 pg_pri_orginal_spread[DCBX_MAX_NUM_PG_BW_ENTRIES] = {0};
244 struct pg_help_data pg_help_data;
245 struct bnx2x_dcbx_cos_params *cos_params =
246 bp->dcbx_port_params.ets.cos_params;
247
248 memset(&pg_help_data, 0, sizeof(struct pg_help_data));
249
250
251 if (GET_FLAGS(error, DCBX_LOCAL_ETS_ERROR))
252 DP(NETIF_MSG_LINK, "DCBX_LOCAL_ETS_ERROR\n");
253
254
255 /* Clean up old settings of ets on COS */
256 for (i = 0; i < E2_NUM_OF_COS ; i++) {
257
258 cos_params[i].pauseable = false;
259 cos_params[i].strict = BNX2X_DCBX_COS_NOT_STRICT;
260 cos_params[i].bw_tbl = DCBX_INVALID_COS_BW;
261 cos_params[i].pri_bitmask = DCBX_PFC_PRI_GET_NON_PAUSE(bp, 0);
262 }
263
264 if (bp->dcbx_port_params.app.enabled &&
265 !GET_FLAGS(error, DCBX_LOCAL_ETS_ERROR) &&
266 ets->enabled) {
267 DP(NETIF_MSG_LINK, "DCBX_LOCAL_ETS_ENABLE\n");
268 bp->dcbx_port_params.ets.enabled = true;
269
270 bnx2x_dcbx_get_ets_pri_pg_tbl(bp,
271 pg_pri_orginal_spread,
272 ets->pri_pg_tbl);
273
274 bnx2x_dcbx_get_num_pg_traf_type(bp,
275 pg_pri_orginal_spread,
276 &pg_help_data);
277
278 bnx2x_dcbx_fill_cos_params(bp, &pg_help_data,
279 ets, pg_pri_orginal_spread);
280
281 } else {
282 DP(NETIF_MSG_LINK, "DCBX_LOCAL_ETS_DISABLED\n");
283 bp->dcbx_port_params.ets.enabled = false;
284 ets->pri_pg_tbl[0] = 0;
285
286 for (i = 0; i < DCBX_MAX_NUM_PRI_PG_ENTRIES ; i++)
287 DCBX_PG_BW_SET(ets->pg_bw_tbl, i, 1);
288 }
289}
290
291static void bnx2x_dcbx_get_pfc_feature(struct bnx2x *bp,
292 struct dcbx_pfc_feature *pfc, u32 error)
293{
294
295 if (GET_FLAGS(error, DCBX_LOCAL_PFC_ERROR))
296 DP(NETIF_MSG_LINK, "DCBX_LOCAL_PFC_ERROR\n");
297
298 if (bp->dcbx_port_params.app.enabled &&
299 !GET_FLAGS(error, DCBX_LOCAL_PFC_ERROR) &&
300 pfc->enabled) {
301 bp->dcbx_port_params.pfc.enabled = true;
302 bp->dcbx_port_params.pfc.priority_non_pauseable_mask =
303 ~(pfc->pri_en_bitmap);
304 } else {
305 DP(NETIF_MSG_LINK, "DCBX_LOCAL_PFC_DISABLED\n");
306 bp->dcbx_port_params.pfc.enabled = false;
307 bp->dcbx_port_params.pfc.priority_non_pauseable_mask = 0;
308 }
309}
310
311static void bnx2x_get_dcbx_drv_param(struct bnx2x *bp,
312 struct dcbx_features *features,
313 u32 error)
314{
315 bnx2x_dcbx_get_ap_feature(bp, &features->app, error);
316
317 bnx2x_dcbx_get_pfc_feature(bp, &features->pfc, error);
318
319 bnx2x_dcbx_get_ets_feature(bp, &features->ets, error);
320}
321
322#define DCBX_LOCAL_MIB_MAX_TRY_READ (100)
323static int bnx2x_dcbx_read_mib(struct bnx2x *bp,
324 u32 *base_mib_addr,
325 u32 offset,
326 int read_mib_type)
327{
328 int max_try_read = 0, i;
329 u32 *buff, mib_size, prefix_seq_num, suffix_seq_num;
330 struct lldp_remote_mib *remote_mib ;
331 struct lldp_local_mib *local_mib;
332
333
334 switch (read_mib_type) {
335 case DCBX_READ_LOCAL_MIB:
336 mib_size = sizeof(struct lldp_local_mib);
337 break;
338 case DCBX_READ_REMOTE_MIB:
339 mib_size = sizeof(struct lldp_remote_mib);
340 break;
341 default:
342 return 1; /*error*/
343 }
344
345 offset += BP_PORT(bp) * mib_size;
346
347 do {
348 buff = base_mib_addr;
349 for (i = 0; i < mib_size; i += 4, buff++)
350 *buff = REG_RD(bp, offset + i);
351
352 max_try_read++;
353
354 switch (read_mib_type) {
355 case DCBX_READ_LOCAL_MIB:
356 local_mib = (struct lldp_local_mib *) base_mib_addr;
357 prefix_seq_num = local_mib->prefix_seq_num;
358 suffix_seq_num = local_mib->suffix_seq_num;
359 break;
360 case DCBX_READ_REMOTE_MIB:
361 remote_mib = (struct lldp_remote_mib *) base_mib_addr;
362 prefix_seq_num = remote_mib->prefix_seq_num;
363 suffix_seq_num = remote_mib->suffix_seq_num;
364 break;
365 default:
366 return 1; /*error*/
367 }
368 } while ((prefix_seq_num != suffix_seq_num) &&
369 (max_try_read < DCBX_LOCAL_MIB_MAX_TRY_READ));
370
371 if (max_try_read >= DCBX_LOCAL_MIB_MAX_TRY_READ) {
372 BNX2X_ERR("MIB could not be read\n");
373 return 1;
374 }
375
376 return 0;
377}
378
379static void bnx2x_pfc_set_pfc(struct bnx2x *bp)
380{
381 if (CHIP_IS_E2(bp)) {
382 if (BP_PORT(bp)) {
383 BNX2X_ERR("4 port mode is not supported");
384 return;
385 }
386
387 if (bp->dcbx_port_params.pfc.enabled)
388
389 /* 1. Fills up common PFC structures if required.*/
390 /* 2. Configure NIG, MAC and BRB via the elink:
391 * elink must first check if BMAC is not in reset
392 * and only then configures the BMAC
393 * Or, configure EMAC.
394 */
395 bnx2x_pfc_set(bp);
396
397 else
398 bnx2x_pfc_clear(bp);
399 }
400}
401
402static void bnx2x_dcbx_stop_hw_tx(struct bnx2x *bp)
403{
404 DP(NETIF_MSG_LINK, "sending STOP TRAFFIC\n");
405 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_STOP_TRAFFIC,
406 0 /* connectionless */,
407 0 /* dataHi is zero */,
408 0 /* dataLo is zero */,
409 1 /* common */);
410}
411
412static void bnx2x_dcbx_resume_hw_tx(struct bnx2x *bp)
413{
414 bnx2x_pfc_fw_struct_e2(bp);
415 DP(NETIF_MSG_LINK, "sending START TRAFFIC\n");
416 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_START_TRAFFIC,
417 0, /* connectionless */
418 U64_HI(bnx2x_sp_mapping(bp, pfc_config)),
419 U64_LO(bnx2x_sp_mapping(bp, pfc_config)),
420 1 /* commmon */);
421}
422
423static void bnx2x_dcbx_update_ets_params(struct bnx2x *bp)
424{
425 struct bnx2x_dcbx_pg_params *ets = &(bp->dcbx_port_params.ets);
426 u8 status = 0;
427
428 bnx2x_ets_disabled(&bp->link_params);
429
430 if (!ets->enabled)
431 return;
432
433 if ((ets->num_of_cos == 0) || (ets->num_of_cos > E2_NUM_OF_COS)) {
434 BNX2X_ERR("illegal num of cos= %x", ets->num_of_cos);
435 return;
436 }
437
438 /* valid COS entries */
439 if (ets->num_of_cos == 1) /* no ETS */
440 return;
441
442 /* sanity */
443 if (((BNX2X_DCBX_COS_NOT_STRICT == ets->cos_params[0].strict) &&
444 (DCBX_INVALID_COS_BW == ets->cos_params[0].bw_tbl)) ||
445 ((BNX2X_DCBX_COS_NOT_STRICT == ets->cos_params[1].strict) &&
446 (DCBX_INVALID_COS_BW == ets->cos_params[1].bw_tbl))) {
447 BNX2X_ERR("all COS should have at least bw_limit or strict"
448 "ets->cos_params[0].strict= %x"
449 "ets->cos_params[0].bw_tbl= %x"
450 "ets->cos_params[1].strict= %x"
451 "ets->cos_params[1].bw_tbl= %x",
452 ets->cos_params[0].strict,
453 ets->cos_params[0].bw_tbl,
454 ets->cos_params[1].strict,
455 ets->cos_params[1].bw_tbl);
456 return;
457 }
458 /* If we join a group and there is bw_tbl and strict then bw rules */
459 if ((DCBX_INVALID_COS_BW != ets->cos_params[0].bw_tbl) &&
460 (DCBX_INVALID_COS_BW != ets->cos_params[1].bw_tbl)) {
461 u32 bw_tbl_0 = ets->cos_params[0].bw_tbl;
462 u32 bw_tbl_1 = ets->cos_params[1].bw_tbl;
463 /* Do not allow 0-100 configuration
464 * since PBF does not support it
465 * force 1-99 instead
466 */
467 if (bw_tbl_0 == 0) {
468 bw_tbl_0 = 1;
469 bw_tbl_1 = 99;
470 } else if (bw_tbl_1 == 0) {
471 bw_tbl_1 = 1;
472 bw_tbl_0 = 99;
473 }
474
475 bnx2x_ets_bw_limit(&bp->link_params, bw_tbl_0, bw_tbl_1);
476 } else {
477 if (ets->cos_params[0].strict == BNX2X_DCBX_COS_HIGH_STRICT)
478 status = bnx2x_ets_strict(&bp->link_params, 0);
479 else if (ets->cos_params[1].strict
480 == BNX2X_DCBX_COS_HIGH_STRICT)
481 status = bnx2x_ets_strict(&bp->link_params, 1);
482
483 if (status)
484 BNX2X_ERR("update_ets_params failed\n");
485 }
486}
487
488#ifdef BCM_DCBNL
489static int bnx2x_dcbx_read_shmem_remote_mib(struct bnx2x *bp)
490{
491 struct lldp_remote_mib remote_mib = {0};
492 u32 dcbx_remote_mib_offset = SHMEM2_RD(bp, dcbx_remote_mib_offset);
493 int rc;
494
495 DP(NETIF_MSG_LINK, "dcbx_remote_mib_offset 0x%x\n",
496 dcbx_remote_mib_offset);
497
498 if (SHMEM_DCBX_REMOTE_MIB_NONE == dcbx_remote_mib_offset) {
499 BNX2X_ERR("FW doesn't support dcbx_remote_mib_offset\n");
500 return -EINVAL;
501 }
502
503 rc = bnx2x_dcbx_read_mib(bp, (u32 *)&remote_mib, dcbx_remote_mib_offset,
504 DCBX_READ_REMOTE_MIB);
505
506 if (rc) {
507 BNX2X_ERR("Faild to read remote mib from FW\n");
508 return rc;
509 }
510
511 /* save features and flags */
512 bp->dcbx_remote_feat = remote_mib.features;
513 bp->dcbx_remote_flags = remote_mib.flags;
514 return 0;
515}
516#endif
517
518static int bnx2x_dcbx_read_shmem_neg_results(struct bnx2x *bp)
519{
520 struct lldp_local_mib local_mib = {0};
521 u32 dcbx_neg_res_offset = SHMEM2_RD(bp, dcbx_neg_res_offset);
522 int rc;
523
524 DP(NETIF_MSG_LINK, "dcbx_neg_res_offset 0x%x\n", dcbx_neg_res_offset);
525
526 if (SHMEM_DCBX_NEG_RES_NONE == dcbx_neg_res_offset) {
527 BNX2X_ERR("FW doesn't support dcbx_neg_res_offset\n");
528 return -EINVAL;
529 }
530 rc = bnx2x_dcbx_read_mib(bp, (u32 *)&local_mib, dcbx_neg_res_offset,
531 DCBX_READ_LOCAL_MIB);
532
533 if (rc) {
534 BNX2X_ERR("Faild to read local mib from FW\n");
535 return rc;
536 }
537
538 /* save features and error */
539 bp->dcbx_local_feat = local_mib.features;
540 bp->dcbx_error = local_mib.error;
541 return 0;
542}
543
544
545#ifdef BCM_DCBNL
546static inline
547u8 bnx2x_dcbx_dcbnl_app_up(struct dcbx_app_priority_entry *ent)
548{
549 u8 pri;
550
551 /* Choose the highest priority */
552 for (pri = MAX_PFC_PRIORITIES - 1; pri > 0; pri--)
553 if (ent->pri_bitmap & (1 << pri))
554 break;
555 return pri;
556}
557
558static inline
559u8 bnx2x_dcbx_dcbnl_app_idtype(struct dcbx_app_priority_entry *ent)
560{
561 return ((ent->appBitfield & DCBX_APP_ENTRY_SF_MASK) ==
562 DCBX_APP_SF_PORT) ? DCB_APP_IDTYPE_PORTNUM :
563 DCB_APP_IDTYPE_ETHTYPE;
564}
565
566static inline
567void bnx2x_dcbx_invalidate_local_apps(struct bnx2x *bp)
568{
569 int i;
570 for (i = 0; i < DCBX_MAX_APP_PROTOCOL; i++)
571 bp->dcbx_local_feat.app.app_pri_tbl[i].appBitfield &=
572 ~DCBX_APP_ENTRY_VALID;
573}
574
575int bnx2x_dcbnl_update_applist(struct bnx2x *bp, bool delall)
576{
577 int i, err = 0;
578
579 for (i = 0; i < DCBX_MAX_APP_PROTOCOL && err == 0; i++) {
580 struct dcbx_app_priority_entry *ent =
581 &bp->dcbx_local_feat.app.app_pri_tbl[i];
582
583 if (ent->appBitfield & DCBX_APP_ENTRY_VALID) {
584 u8 up = bnx2x_dcbx_dcbnl_app_up(ent);
585
586 /* avoid invalid user-priority */
587 if (up) {
588 struct dcb_app app;
589 app.selector = bnx2x_dcbx_dcbnl_app_idtype(ent);
590 app.protocol = ent->app_id;
591 app.priority = delall ? 0 : up;
592 err = dcb_setapp(bp->dev, &app);
593 }
594 }
595 }
596 return err;
597}
598#endif
599
600void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state)
601{
602 switch (state) {
603 case BNX2X_DCBX_STATE_NEG_RECEIVED:
604#ifdef BCM_CNIC
605 if (bp->state != BNX2X_STATE_OPENING_WAIT4_LOAD) {
606 struct cnic_ops *c_ops;
607 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
608 bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG;
609 cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI_OOO;
610 cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI;
611
612 rcu_read_lock();
613 c_ops = rcu_dereference(bp->cnic_ops);
614 if (c_ops) {
615 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_ISCSI_CMD);
616 rcu_read_unlock();
617 return;
618 }
619 rcu_read_unlock();
620 }
621
622 /* fall through if no CNIC initialized */
623 case BNX2X_DCBX_STATE_ISCSI_STOPPED:
624#endif
625
626 {
627 DP(NETIF_MSG_LINK, "BNX2X_DCBX_STATE_NEG_RECEIVED\n");
628#ifdef BCM_DCBNL
629 /**
630 * Delete app tlvs from dcbnl before reading new
631 * negotiation results
632 */
633 bnx2x_dcbnl_update_applist(bp, true);
634
635 /* Read rmeote mib if dcbx is in the FW */
636 if (bnx2x_dcbx_read_shmem_remote_mib(bp))
637 return;
638#endif
639 /* Read neg results if dcbx is in the FW */
640 if (bnx2x_dcbx_read_shmem_neg_results(bp))
641 return;
642
643 bnx2x_dump_dcbx_drv_param(bp, &bp->dcbx_local_feat,
644 bp->dcbx_error);
645
646 bnx2x_get_dcbx_drv_param(bp, &bp->dcbx_local_feat,
647 bp->dcbx_error);
648
649 if (bp->state != BNX2X_STATE_OPENING_WAIT4_LOAD) {
650#ifdef BCM_DCBNL
651 /**
652 * Add new app tlvs to dcbnl
653 */
654 bnx2x_dcbnl_update_applist(bp, false);
655#endif
656 bnx2x_dcbx_stop_hw_tx(bp);
657 return;
658 }
659 /* fall through */
660#ifdef BCM_DCBNL
661 /**
662 * Invalidate the local app tlvs if they are not added
663 * to the dcbnl app list to avoid deleting them from
664 * the list later on
665 */
666 bnx2x_dcbx_invalidate_local_apps(bp);
667#endif
668 }
669 case BNX2X_DCBX_STATE_TX_PAUSED:
670 DP(NETIF_MSG_LINK, "BNX2X_DCBX_STATE_TX_PAUSED\n");
671 bnx2x_pfc_set_pfc(bp);
672
673 bnx2x_dcbx_update_ets_params(bp);
674 if (bp->state != BNX2X_STATE_OPENING_WAIT4_LOAD) {
675 bnx2x_dcbx_resume_hw_tx(bp);
676 return;
677 }
678 /* fall through */
679 case BNX2X_DCBX_STATE_TX_RELEASED:
680 DP(NETIF_MSG_LINK, "BNX2X_DCBX_STATE_TX_RELEASED\n");
681 if (bp->state != BNX2X_STATE_OPENING_WAIT4_LOAD)
682 bnx2x_fw_command(bp, DRV_MSG_CODE_DCBX_PMF_DRV_OK, 0);
683
684 return;
685 default:
686 BNX2X_ERR("Unknown DCBX_STATE\n");
687 }
688}
689
690
691#define LLDP_STATS_OFFSET(bp) (BP_PORT(bp)*\
692 sizeof(struct lldp_dcbx_stat))
693
694/* calculate struct offset in array according to chip information */
695#define LLDP_PARAMS_OFFSET(bp) (BP_PORT(bp)*sizeof(struct lldp_params))
696
697#define LLDP_ADMIN_MIB_OFFSET(bp) (PORT_MAX*sizeof(struct lldp_params) + \
698 BP_PORT(bp)*sizeof(struct lldp_admin_mib))
699
700static void bnx2x_dcbx_lldp_updated_params(struct bnx2x *bp,
701 u32 dcbx_lldp_params_offset)
702{
703 struct lldp_params lldp_params = {0};
704 u32 i = 0, *buff = NULL;
705 u32 offset = dcbx_lldp_params_offset + LLDP_PARAMS_OFFSET(bp);
706
707 DP(NETIF_MSG_LINK, "lldp_offset 0x%x\n", offset);
708
709 if ((bp->lldp_config_params.overwrite_settings ==
710 BNX2X_DCBX_OVERWRITE_SETTINGS_ENABLE)) {
711 /* Read the data first */
712 buff = (u32 *)&lldp_params;
713 for (i = 0; i < sizeof(struct lldp_params); i += 4, buff++)
714 *buff = REG_RD(bp, (offset + i));
715
716 lldp_params.msg_tx_hold =
717 (u8)bp->lldp_config_params.msg_tx_hold;
718 lldp_params.msg_fast_tx_interval =
719 (u8)bp->lldp_config_params.msg_fast_tx;
720 lldp_params.tx_crd_max =
721 (u8)bp->lldp_config_params.tx_credit_max;
722 lldp_params.msg_tx_interval =
723 (u8)bp->lldp_config_params.msg_tx_interval;
724 lldp_params.tx_fast =
725 (u8)bp->lldp_config_params.tx_fast;
726
727 /* Write the data.*/
728 buff = (u32 *)&lldp_params;
729 for (i = 0; i < sizeof(struct lldp_params); i += 4, buff++)
730 REG_WR(bp, (offset + i) , *buff);
731
732
733 } else if (BNX2X_DCBX_OVERWRITE_SETTINGS_ENABLE ==
734 bp->lldp_config_params.overwrite_settings)
735 bp->lldp_config_params.overwrite_settings =
736 BNX2X_DCBX_OVERWRITE_SETTINGS_INVALID;
737}
738
739static void bnx2x_dcbx_admin_mib_updated_params(struct bnx2x *bp,
740 u32 dcbx_lldp_params_offset)
741{
742 struct lldp_admin_mib admin_mib;
743 u32 i, other_traf_type = PREDEFINED_APP_IDX_MAX, traf_type = 0;
744 u32 *buff;
745 u32 offset = dcbx_lldp_params_offset + LLDP_ADMIN_MIB_OFFSET(bp);
746
747 /*shortcuts*/
748 struct dcbx_features *af = &admin_mib.features;
749 struct bnx2x_config_dcbx_params *dp = &bp->dcbx_config_params;
750
751 memset(&admin_mib, 0, sizeof(struct lldp_admin_mib));
752 buff = (u32 *)&admin_mib;
753 /* Read the data first */
754 for (i = 0; i < sizeof(struct lldp_admin_mib); i += 4, buff++)
755 *buff = REG_RD(bp, (offset + i));
756
757 if (bp->dcbx_enabled == BNX2X_DCBX_ENABLED_ON_NEG_ON)
758 SET_FLAGS(admin_mib.ver_cfg_flags, DCBX_DCBX_ENABLED);
759 else
760 RESET_FLAGS(admin_mib.ver_cfg_flags, DCBX_DCBX_ENABLED);
761
762 if ((BNX2X_DCBX_OVERWRITE_SETTINGS_ENABLE ==
763 dp->overwrite_settings)) {
764 RESET_FLAGS(admin_mib.ver_cfg_flags, DCBX_CEE_VERSION_MASK);
765 admin_mib.ver_cfg_flags |=
766 (dp->admin_dcbx_version << DCBX_CEE_VERSION_SHIFT) &
767 DCBX_CEE_VERSION_MASK;
768
769 af->ets.enabled = (u8)dp->admin_ets_enable;
770
771 af->pfc.enabled = (u8)dp->admin_pfc_enable;
772
773 /* FOR IEEE dp->admin_tc_supported_tx_enable */
774 if (dp->admin_ets_configuration_tx_enable)
775 SET_FLAGS(admin_mib.ver_cfg_flags,
776 DCBX_ETS_CONFIG_TX_ENABLED);
777 else
778 RESET_FLAGS(admin_mib.ver_cfg_flags,
779 DCBX_ETS_CONFIG_TX_ENABLED);
780 /* For IEEE admin_ets_recommendation_tx_enable */
781 if (dp->admin_pfc_tx_enable)
782 SET_FLAGS(admin_mib.ver_cfg_flags,
783 DCBX_PFC_CONFIG_TX_ENABLED);
784 else
785 RESET_FLAGS(admin_mib.ver_cfg_flags,
786 DCBX_PFC_CONFIG_TX_ENABLED);
787
788 if (dp->admin_application_priority_tx_enable)
789 SET_FLAGS(admin_mib.ver_cfg_flags,
790 DCBX_APP_CONFIG_TX_ENABLED);
791 else
792 RESET_FLAGS(admin_mib.ver_cfg_flags,
793 DCBX_APP_CONFIG_TX_ENABLED);
794
795 if (dp->admin_ets_willing)
796 SET_FLAGS(admin_mib.ver_cfg_flags, DCBX_ETS_WILLING);
797 else
798 RESET_FLAGS(admin_mib.ver_cfg_flags, DCBX_ETS_WILLING);
799 /* For IEEE admin_ets_reco_valid */
800 if (dp->admin_pfc_willing)
801 SET_FLAGS(admin_mib.ver_cfg_flags, DCBX_PFC_WILLING);
802 else
803 RESET_FLAGS(admin_mib.ver_cfg_flags, DCBX_PFC_WILLING);
804
805 if (dp->admin_app_priority_willing)
806 SET_FLAGS(admin_mib.ver_cfg_flags, DCBX_APP_WILLING);
807 else
808 RESET_FLAGS(admin_mib.ver_cfg_flags, DCBX_APP_WILLING);
809
810 for (i = 0 ; i < DCBX_MAX_NUM_PG_BW_ENTRIES; i++) {
811 DCBX_PG_BW_SET(af->ets.pg_bw_tbl, i,
812 (u8)dp->admin_configuration_bw_precentage[i]);
813
814 DP(NETIF_MSG_LINK, "pg_bw_tbl[%d] = %02x\n",
815 i, DCBX_PG_BW_GET(af->ets.pg_bw_tbl, i));
816 }
817
818 for (i = 0; i < DCBX_MAX_NUM_PRI_PG_ENTRIES; i++) {
819 DCBX_PRI_PG_SET(af->ets.pri_pg_tbl, i,
820 (u8)dp->admin_configuration_ets_pg[i]);
821
822 DP(NETIF_MSG_LINK, "pri_pg_tbl[%d] = %02x\n",
823 i, DCBX_PRI_PG_GET(af->ets.pri_pg_tbl, i));
824 }
825
826 /*For IEEE admin_recommendation_bw_precentage
827 *For IEEE admin_recommendation_ets_pg */
828 af->pfc.pri_en_bitmap = (u8)dp->admin_pfc_bitmap;
829 for (i = 0; i < 4; i++) {
830 if (dp->admin_priority_app_table[i].valid) {
831 struct bnx2x_admin_priority_app_table *table =
832 dp->admin_priority_app_table;
833 if ((ETH_TYPE_FCOE == table[i].app_id) &&
834 (TRAFFIC_TYPE_ETH == table[i].traffic_type))
835 traf_type = FCOE_APP_IDX;
836 else if ((TCP_PORT_ISCSI == table[i].app_id) &&
837 (TRAFFIC_TYPE_PORT == table[i].traffic_type))
838 traf_type = ISCSI_APP_IDX;
839 else
840 traf_type = other_traf_type++;
841
842 af->app.app_pri_tbl[traf_type].app_id =
843 table[i].app_id;
844
845 af->app.app_pri_tbl[traf_type].pri_bitmap =
846 (u8)(1 << table[i].priority);
847
848 af->app.app_pri_tbl[traf_type].appBitfield =
849 (DCBX_APP_ENTRY_VALID);
850
851 af->app.app_pri_tbl[traf_type].appBitfield |=
852 (TRAFFIC_TYPE_ETH == table[i].traffic_type) ?
853 DCBX_APP_SF_ETH_TYPE : DCBX_APP_SF_PORT;
854 }
855 }
856
857 af->app.default_pri = (u8)dp->admin_default_priority;
858
859 } else if (BNX2X_DCBX_OVERWRITE_SETTINGS_ENABLE ==
860 dp->overwrite_settings)
861 dp->overwrite_settings = BNX2X_DCBX_OVERWRITE_SETTINGS_INVALID;
862
863 /* Write the data. */
864 buff = (u32 *)&admin_mib;
865 for (i = 0; i < sizeof(struct lldp_admin_mib); i += 4, buff++)
866 REG_WR(bp, (offset + i), *buff);
867}
868
869void bnx2x_dcbx_set_state(struct bnx2x *bp, bool dcb_on, u32 dcbx_enabled)
870{
871 if (CHIP_IS_E2(bp) && !CHIP_MODE_IS_4_PORT(bp)) {
872 bp->dcb_state = dcb_on;
873 bp->dcbx_enabled = dcbx_enabled;
874 } else {
875 bp->dcb_state = false;
876 bp->dcbx_enabled = BNX2X_DCBX_ENABLED_INVALID;
877 }
878 DP(NETIF_MSG_LINK, "DCB state [%s:%s]\n",
879 dcb_on ? "ON" : "OFF",
880 dcbx_enabled == BNX2X_DCBX_ENABLED_OFF ? "user-mode" :
881 dcbx_enabled == BNX2X_DCBX_ENABLED_ON_NEG_OFF ? "on-chip static" :
882 dcbx_enabled == BNX2X_DCBX_ENABLED_ON_NEG_ON ?
883 "on-chip with negotiation" : "invalid");
884}
885
886void bnx2x_dcbx_init_params(struct bnx2x *bp)
887{
888 bp->dcbx_config_params.admin_dcbx_version = 0x0; /* 0 - CEE; 1 - IEEE */
889 bp->dcbx_config_params.admin_ets_willing = 1;
890 bp->dcbx_config_params.admin_pfc_willing = 1;
891 bp->dcbx_config_params.overwrite_settings = 1;
892 bp->dcbx_config_params.admin_ets_enable = 1;
893 bp->dcbx_config_params.admin_pfc_enable = 1;
894 bp->dcbx_config_params.admin_tc_supported_tx_enable = 1;
895 bp->dcbx_config_params.admin_ets_configuration_tx_enable = 1;
896 bp->dcbx_config_params.admin_pfc_tx_enable = 1;
897 bp->dcbx_config_params.admin_application_priority_tx_enable = 1;
898 bp->dcbx_config_params.admin_ets_reco_valid = 1;
899 bp->dcbx_config_params.admin_app_priority_willing = 1;
900 bp->dcbx_config_params.admin_configuration_bw_precentage[0] = 00;
901 bp->dcbx_config_params.admin_configuration_bw_precentage[1] = 50;
902 bp->dcbx_config_params.admin_configuration_bw_precentage[2] = 50;
903 bp->dcbx_config_params.admin_configuration_bw_precentage[3] = 0;
904 bp->dcbx_config_params.admin_configuration_bw_precentage[4] = 0;
905 bp->dcbx_config_params.admin_configuration_bw_precentage[5] = 0;
906 bp->dcbx_config_params.admin_configuration_bw_precentage[6] = 0;
907 bp->dcbx_config_params.admin_configuration_bw_precentage[7] = 0;
908 bp->dcbx_config_params.admin_configuration_ets_pg[0] = 1;
909 bp->dcbx_config_params.admin_configuration_ets_pg[1] = 0;
910 bp->dcbx_config_params.admin_configuration_ets_pg[2] = 0;
911 bp->dcbx_config_params.admin_configuration_ets_pg[3] = 2;
912 bp->dcbx_config_params.admin_configuration_ets_pg[4] = 0;
913 bp->dcbx_config_params.admin_configuration_ets_pg[5] = 0;
914 bp->dcbx_config_params.admin_configuration_ets_pg[6] = 0;
915 bp->dcbx_config_params.admin_configuration_ets_pg[7] = 0;
916 bp->dcbx_config_params.admin_recommendation_bw_precentage[0] = 0;
917 bp->dcbx_config_params.admin_recommendation_bw_precentage[1] = 1;
918 bp->dcbx_config_params.admin_recommendation_bw_precentage[2] = 2;
919 bp->dcbx_config_params.admin_recommendation_bw_precentage[3] = 0;
920 bp->dcbx_config_params.admin_recommendation_bw_precentage[4] = 7;
921 bp->dcbx_config_params.admin_recommendation_bw_precentage[5] = 5;
922 bp->dcbx_config_params.admin_recommendation_bw_precentage[6] = 6;
923 bp->dcbx_config_params.admin_recommendation_bw_precentage[7] = 7;
924 bp->dcbx_config_params.admin_recommendation_ets_pg[0] = 0;
925 bp->dcbx_config_params.admin_recommendation_ets_pg[1] = 1;
926 bp->dcbx_config_params.admin_recommendation_ets_pg[2] = 2;
927 bp->dcbx_config_params.admin_recommendation_ets_pg[3] = 3;
928 bp->dcbx_config_params.admin_recommendation_ets_pg[4] = 4;
929 bp->dcbx_config_params.admin_recommendation_ets_pg[5] = 5;
930 bp->dcbx_config_params.admin_recommendation_ets_pg[6] = 6;
931 bp->dcbx_config_params.admin_recommendation_ets_pg[7] = 7;
932 bp->dcbx_config_params.admin_pfc_bitmap = 0x8; /* FCoE(3) enable */
933 bp->dcbx_config_params.admin_priority_app_table[0].valid = 1;
934 bp->dcbx_config_params.admin_priority_app_table[1].valid = 1;
935 bp->dcbx_config_params.admin_priority_app_table[2].valid = 0;
936 bp->dcbx_config_params.admin_priority_app_table[3].valid = 0;
937 bp->dcbx_config_params.admin_priority_app_table[0].priority = 3;
938 bp->dcbx_config_params.admin_priority_app_table[1].priority = 0;
939 bp->dcbx_config_params.admin_priority_app_table[2].priority = 0;
940 bp->dcbx_config_params.admin_priority_app_table[3].priority = 0;
941 bp->dcbx_config_params.admin_priority_app_table[0].traffic_type = 0;
942 bp->dcbx_config_params.admin_priority_app_table[1].traffic_type = 1;
943 bp->dcbx_config_params.admin_priority_app_table[2].traffic_type = 0;
944 bp->dcbx_config_params.admin_priority_app_table[3].traffic_type = 0;
945 bp->dcbx_config_params.admin_priority_app_table[0].app_id = 0x8906;
946 bp->dcbx_config_params.admin_priority_app_table[1].app_id = 3260;
947 bp->dcbx_config_params.admin_priority_app_table[2].app_id = 0;
948 bp->dcbx_config_params.admin_priority_app_table[3].app_id = 0;
949 bp->dcbx_config_params.admin_default_priority =
950 bp->dcbx_config_params.admin_priority_app_table[1].priority;
951}
952
953void bnx2x_dcbx_init(struct bnx2x *bp)
954{
955 u32 dcbx_lldp_params_offset = SHMEM_LLDP_DCBX_PARAMS_NONE;
956
957 if (bp->dcbx_enabled <= 0)
958 return;
959
960 /* validate:
961 * chip of good for dcbx version,
962 * dcb is wanted
963 * the function is pmf
964 * shmem2 contains DCBX support fields
965 */
966 DP(NETIF_MSG_LINK, "dcb_state %d bp->port.pmf %d\n",
967 bp->dcb_state, bp->port.pmf);
968
969 if (bp->dcb_state == BNX2X_DCB_STATE_ON && bp->port.pmf &&
970 SHMEM2_HAS(bp, dcbx_lldp_params_offset)) {
971 dcbx_lldp_params_offset =
972 SHMEM2_RD(bp, dcbx_lldp_params_offset);
973
974 DP(NETIF_MSG_LINK, "dcbx_lldp_params_offset 0x%x\n",
975 dcbx_lldp_params_offset);
976
977 if (SHMEM_LLDP_DCBX_PARAMS_NONE != dcbx_lldp_params_offset) {
978 bnx2x_dcbx_lldp_updated_params(bp,
979 dcbx_lldp_params_offset);
980
981 bnx2x_dcbx_admin_mib_updated_params(bp,
982 dcbx_lldp_params_offset);
983
984 /* set default configuration BC has */
985 bnx2x_dcbx_set_params(bp,
986 BNX2X_DCBX_STATE_NEG_RECEIVED);
987
988 bnx2x_fw_command(bp,
989 DRV_MSG_CODE_DCBX_ADMIN_PMF_MSG, 0);
990 }
991 }
992}
993
994void bnx2x_dcb_init_intmem_pfc(struct bnx2x *bp)
995{
996 struct priority_cos pricos[MAX_PFC_TRAFFIC_TYPES];
997 u32 i = 0, addr;
998 memset(pricos, 0, sizeof(pricos));
999 /* Default initialization */
1000 for (i = 0; i < MAX_PFC_TRAFFIC_TYPES; i++)
1001 pricos[i].priority = LLFC_TRAFFIC_TYPE_TO_PRIORITY_UNMAPPED;
1002
1003 /* Store per port struct to internal memory */
1004 addr = BAR_XSTRORM_INTMEM +
1005 XSTORM_CMNG_PER_PORT_VARS_OFFSET(BP_PORT(bp)) +
1006 offsetof(struct cmng_struct_per_port,
1007 traffic_type_to_priority_cos);
1008 __storm_memset_struct(bp, addr, sizeof(pricos), (u32 *)pricos);
1009
1010
1011 /* LLFC disabled.*/
1012 REG_WR8(bp , BAR_XSTRORM_INTMEM +
1013 XSTORM_CMNG_PER_PORT_VARS_OFFSET(BP_PORT(bp)) +
1014 offsetof(struct cmng_struct_per_port, llfc_mode),
1015 LLFC_MODE_NONE);
1016
1017 /* DCBX disabled.*/
1018 REG_WR8(bp , BAR_XSTRORM_INTMEM +
1019 XSTORM_CMNG_PER_PORT_VARS_OFFSET(BP_PORT(bp)) +
1020 offsetof(struct cmng_struct_per_port, dcb_enabled),
1021 DCB_DISABLED);
1022}
1023
1024static void
1025bnx2x_dcbx_print_cos_params(struct bnx2x *bp,
1026 struct flow_control_configuration *pfc_fw_cfg)
1027{
1028 u8 pri = 0;
1029 u8 cos = 0;
1030
1031 DP(NETIF_MSG_LINK,
1032 "pfc_fw_cfg->dcb_version %x\n", pfc_fw_cfg->dcb_version);
1033 DP(NETIF_MSG_LINK,
1034 "pdev->params.dcbx_port_params.pfc."
1035 "priority_non_pauseable_mask %x\n",
1036 bp->dcbx_port_params.pfc.priority_non_pauseable_mask);
1037
1038 for (cos = 0 ; cos < bp->dcbx_port_params.ets.num_of_cos ; cos++) {
1039 DP(NETIF_MSG_LINK, "pdev->params.dcbx_port_params.ets."
1040 "cos_params[%d].pri_bitmask %x\n", cos,
1041 bp->dcbx_port_params.ets.cos_params[cos].pri_bitmask);
1042
1043 DP(NETIF_MSG_LINK, "pdev->params.dcbx_port_params.ets."
1044 "cos_params[%d].bw_tbl %x\n", cos,
1045 bp->dcbx_port_params.ets.cos_params[cos].bw_tbl);
1046
1047 DP(NETIF_MSG_LINK, "pdev->params.dcbx_port_params.ets."
1048 "cos_params[%d].strict %x\n", cos,
1049 bp->dcbx_port_params.ets.cos_params[cos].strict);
1050
1051 DP(NETIF_MSG_LINK, "pdev->params.dcbx_port_params.ets."
1052 "cos_params[%d].pauseable %x\n", cos,
1053 bp->dcbx_port_params.ets.cos_params[cos].pauseable);
1054 }
1055
1056 for (pri = 0; pri < LLFC_DRIVER_TRAFFIC_TYPE_MAX; pri++) {
1057 DP(NETIF_MSG_LINK,
1058 "pfc_fw_cfg->traffic_type_to_priority_cos[%d]."
1059 "priority %x\n", pri,
1060 pfc_fw_cfg->traffic_type_to_priority_cos[pri].priority);
1061
1062 DP(NETIF_MSG_LINK,
1063 "pfc_fw_cfg->traffic_type_to_priority_cos[%d].cos %x\n",
1064 pri, pfc_fw_cfg->traffic_type_to_priority_cos[pri].cos);
1065 }
1066}
1067
1068/* fills help_data according to pg_info */
1069static void bnx2x_dcbx_get_num_pg_traf_type(struct bnx2x *bp,
1070 u32 *pg_pri_orginal_spread,
1071 struct pg_help_data *help_data)
1072{
1073 bool pg_found = false;
1074 u32 i, traf_type, add_traf_type, add_pg;
1075 u32 *ttp = bp->dcbx_port_params.app.traffic_type_priority;
1076 struct pg_entry_help_data *data = help_data->data; /*shotcut*/
1077
1078 /* Set to invalid */
1079 for (i = 0; i < LLFC_DRIVER_TRAFFIC_TYPE_MAX; i++)
1080 data[i].pg = DCBX_ILLEGAL_PG;
1081
1082 for (add_traf_type = 0;
1083 add_traf_type < LLFC_DRIVER_TRAFFIC_TYPE_MAX; add_traf_type++) {
1084 pg_found = false;
1085 if (ttp[add_traf_type] < MAX_PFC_PRIORITIES) {
1086 add_pg = (u8)pg_pri_orginal_spread[ttp[add_traf_type]];
1087 for (traf_type = 0;
1088 traf_type < LLFC_DRIVER_TRAFFIC_TYPE_MAX;
1089 traf_type++) {
1090 if (data[traf_type].pg == add_pg) {
1091 if (!(data[traf_type].pg_priority &
1092 (1 << ttp[add_traf_type])))
1093 data[traf_type].
1094 num_of_dif_pri++;
1095 data[traf_type].pg_priority |=
1096 (1 << ttp[add_traf_type]);
1097 pg_found = true;
1098 break;
1099 }
1100 }
1101 if (false == pg_found) {
1102 data[help_data->num_of_pg].pg = add_pg;
1103 data[help_data->num_of_pg].pg_priority =
1104 (1 << ttp[add_traf_type]);
1105 data[help_data->num_of_pg].num_of_dif_pri = 1;
1106 help_data->num_of_pg++;
1107 }
1108 }
1109 DP(NETIF_MSG_LINK,
1110 "add_traf_type %d pg_found %s num_of_pg %d\n",
1111 add_traf_type, (false == pg_found) ? "NO" : "YES",
1112 help_data->num_of_pg);
1113 }
1114}
1115
1116static void bnx2x_dcbx_ets_disabled_entry_data(struct bnx2x *bp,
1117 struct cos_help_data *cos_data,
1118 u32 pri_join_mask)
1119{
1120 /* Only one priority than only one COS */
1121 cos_data->data[0].pausable =
1122 IS_DCBX_PFC_PRI_ONLY_PAUSE(bp, pri_join_mask);
1123 cos_data->data[0].pri_join_mask = pri_join_mask;
1124 cos_data->data[0].cos_bw = 100;
1125 cos_data->num_of_cos = 1;
1126}
1127
1128static inline void bnx2x_dcbx_add_to_cos_bw(struct bnx2x *bp,
1129 struct cos_entry_help_data *data,
1130 u8 pg_bw)
1131{
1132 if (data->cos_bw == DCBX_INVALID_COS_BW)
1133 data->cos_bw = pg_bw;
1134 else
1135 data->cos_bw += pg_bw;
1136}
1137
1138static void bnx2x_dcbx_separate_pauseable_from_non(struct bnx2x *bp,
1139 struct cos_help_data *cos_data,
1140 u32 *pg_pri_orginal_spread,
1141 struct dcbx_ets_feature *ets)
1142{
1143 u32 pri_tested = 0;
1144 u8 i = 0;
1145 u8 entry = 0;
1146 u8 pg_entry = 0;
1147 u8 num_of_pri = LLFC_DRIVER_TRAFFIC_TYPE_MAX;
1148
1149 cos_data->data[0].pausable = true;
1150 cos_data->data[1].pausable = false;
1151 cos_data->data[0].pri_join_mask = cos_data->data[1].pri_join_mask = 0;
1152
1153 for (i = 0 ; i < num_of_pri ; i++) {
1154 pri_tested = 1 << bp->dcbx_port_params.
1155 app.traffic_type_priority[i];
1156
1157 if (pri_tested & DCBX_PFC_PRI_NON_PAUSE_MASK(bp)) {
1158 cos_data->data[1].pri_join_mask |= pri_tested;
1159 entry = 1;
1160 } else {
1161 cos_data->data[0].pri_join_mask |= pri_tested;
1162 entry = 0;
1163 }
1164 pg_entry = (u8)pg_pri_orginal_spread[bp->dcbx_port_params.
1165 app.traffic_type_priority[i]];
1166 /* There can be only one strict pg */
1167 if (pg_entry < DCBX_MAX_NUM_PG_BW_ENTRIES)
1168 bnx2x_dcbx_add_to_cos_bw(bp, &cos_data->data[entry],
1169 DCBX_PG_BW_GET(ets->pg_bw_tbl, pg_entry));
1170 else
1171 /* If we join a group and one is strict
1172 * than the bw rulls */
1173 cos_data->data[entry].strict =
1174 BNX2X_DCBX_COS_HIGH_STRICT;
1175 }
1176 if ((0 == cos_data->data[0].pri_join_mask) &&
1177 (0 == cos_data->data[1].pri_join_mask))
1178 BNX2X_ERR("dcbx error: Both groups must have priorities\n");
1179}
1180
1181
1182#ifndef POWER_OF_2
1183#define POWER_OF_2(x) ((0 != x) && (0 == (x & (x-1))))
1184#endif
1185
1186static void bxn2x_dcbx_single_pg_to_cos_params(struct bnx2x *bp,
1187 struct pg_help_data *pg_help_data,
1188 struct cos_help_data *cos_data,
1189 u32 pri_join_mask,
1190 u8 num_of_dif_pri)
1191{
1192 u8 i = 0;
1193 u32 pri_tested = 0;
1194 u32 pri_mask_without_pri = 0;
1195 u32 *ttp = bp->dcbx_port_params.app.traffic_type_priority;
1196 /*debug*/
1197 if (num_of_dif_pri == 1) {
1198 bnx2x_dcbx_ets_disabled_entry_data(bp, cos_data, pri_join_mask);
1199 return;
1200 }
1201 /* single priority group */
1202 if (pg_help_data->data[0].pg < DCBX_MAX_NUM_PG_BW_ENTRIES) {
1203 /* If there are both pauseable and non-pauseable priorities,
1204 * the pauseable priorities go to the first queue and
1205 * the non-pauseable priorities go to the second queue.
1206 */
1207 if (IS_DCBX_PFC_PRI_MIX_PAUSE(bp, pri_join_mask)) {
1208 /* Pauseable */
1209 cos_data->data[0].pausable = true;
1210 /* Non pauseable.*/
1211 cos_data->data[1].pausable = false;
1212
1213 if (2 == num_of_dif_pri) {
1214 cos_data->data[0].cos_bw = 50;
1215 cos_data->data[1].cos_bw = 50;
1216 }
1217
1218 if (3 == num_of_dif_pri) {
1219 if (POWER_OF_2(DCBX_PFC_PRI_GET_PAUSE(bp,
1220 pri_join_mask))) {
1221 cos_data->data[0].cos_bw = 33;
1222 cos_data->data[1].cos_bw = 67;
1223 } else {
1224 cos_data->data[0].cos_bw = 67;
1225 cos_data->data[1].cos_bw = 33;
1226 }
1227 }
1228
1229 } else if (IS_DCBX_PFC_PRI_ONLY_PAUSE(bp, pri_join_mask)) {
1230 /* If there are only pauseable priorities,
1231 * then one/two priorities go to the first queue
1232 * and one priority goes to the second queue.
1233 */
1234 if (2 == num_of_dif_pri) {
1235 cos_data->data[0].cos_bw = 50;
1236 cos_data->data[1].cos_bw = 50;
1237 } else {
1238 cos_data->data[0].cos_bw = 67;
1239 cos_data->data[1].cos_bw = 33;
1240 }
1241 cos_data->data[1].pausable = true;
1242 cos_data->data[0].pausable = true;
1243 /* All priorities except FCOE */
1244 cos_data->data[0].pri_join_mask = (pri_join_mask &
1245 ((u8)~(1 << ttp[LLFC_TRAFFIC_TYPE_FCOE])));
1246 /* Only FCOE priority.*/
1247 cos_data->data[1].pri_join_mask =
1248 (1 << ttp[LLFC_TRAFFIC_TYPE_FCOE]);
1249 } else
1250 /* If there are only non-pauseable priorities,
1251 * they will all go to the same queue.
1252 */
1253 bnx2x_dcbx_ets_disabled_entry_data(bp,
1254 cos_data, pri_join_mask);
1255 } else {
1256 /* priority group which is not BW limited (PG#15):*/
1257 if (IS_DCBX_PFC_PRI_MIX_PAUSE(bp, pri_join_mask)) {
1258 /* If there are both pauseable and non-pauseable
1259 * priorities, the pauseable priorities go to the first
1260 * queue and the non-pauseable priorities
1261 * go to the second queue.
1262 */
1263 if (DCBX_PFC_PRI_GET_PAUSE(bp, pri_join_mask) >
1264 DCBX_PFC_PRI_GET_NON_PAUSE(bp, pri_join_mask)) {
1265 cos_data->data[0].strict =
1266 BNX2X_DCBX_COS_HIGH_STRICT;
1267 cos_data->data[1].strict =
1268 BNX2X_DCBX_COS_LOW_STRICT;
1269 } else {
1270 cos_data->data[0].strict =
1271 BNX2X_DCBX_COS_LOW_STRICT;
1272 cos_data->data[1].strict =
1273 BNX2X_DCBX_COS_HIGH_STRICT;
1274 }
1275 /* Pauseable */
1276 cos_data->data[0].pausable = true;
1277 /* Non pause-able.*/
1278 cos_data->data[1].pausable = false;
1279 } else {
1280 /* If there are only pauseable priorities or
1281 * only non-pauseable,* the lower priorities go
1282 * to the first queue and the higherpriorities go
1283 * to the second queue.
1284 */
1285 cos_data->data[0].pausable =
1286 cos_data->data[1].pausable =
1287 IS_DCBX_PFC_PRI_ONLY_PAUSE(bp, pri_join_mask);
1288
1289 for (i = 0 ; i < LLFC_DRIVER_TRAFFIC_TYPE_MAX; i++) {
1290 pri_tested = 1 << bp->dcbx_port_params.
1291 app.traffic_type_priority[i];
1292 /* Remove priority tested */
1293 pri_mask_without_pri =
1294 (pri_join_mask & ((u8)(~pri_tested)));
1295 if (pri_mask_without_pri < pri_tested)
1296 break;
1297 }
1298
1299 if (i == LLFC_DRIVER_TRAFFIC_TYPE_MAX)
1300 BNX2X_ERR("Invalid value for pri_join_mask -"
1301 " could not find a priority\n");
1302
1303 cos_data->data[0].pri_join_mask = pri_mask_without_pri;
1304 cos_data->data[1].pri_join_mask = pri_tested;
1305 /* Both queues are strict priority,
1306 * and that with the highest priority
1307 * gets the highest strict priority in the arbiter.
1308 */
1309 cos_data->data[0].strict = BNX2X_DCBX_COS_LOW_STRICT;
1310 cos_data->data[1].strict = BNX2X_DCBX_COS_HIGH_STRICT;
1311 }
1312 }
1313}
1314
1315static void bnx2x_dcbx_two_pg_to_cos_params(
1316 struct bnx2x *bp,
1317 struct pg_help_data *pg_help_data,
1318 struct dcbx_ets_feature *ets,
1319 struct cos_help_data *cos_data,
1320 u32 *pg_pri_orginal_spread,
1321 u32 pri_join_mask,
1322 u8 num_of_dif_pri)
1323{
1324 u8 i = 0;
1325 u8 pg[E2_NUM_OF_COS] = {0};
1326
1327 /* If there are both pauseable and non-pauseable priorities,
1328 * the pauseable priorities go to the first queue and
1329 * the non-pauseable priorities go to the second queue.
1330 */
1331 if (IS_DCBX_PFC_PRI_MIX_PAUSE(bp, pri_join_mask)) {
1332 if (IS_DCBX_PFC_PRI_MIX_PAUSE(bp,
1333 pg_help_data->data[0].pg_priority) ||
1334 IS_DCBX_PFC_PRI_MIX_PAUSE(bp,
1335 pg_help_data->data[1].pg_priority)) {
1336 /* If one PG contains both pauseable and
1337 * non-pauseable priorities then ETS is disabled.
1338 */
1339 bnx2x_dcbx_separate_pauseable_from_non(bp, cos_data,
1340 pg_pri_orginal_spread, ets);
1341 bp->dcbx_port_params.ets.enabled = false;
1342 return;
1343 }
1344
1345 /* Pauseable */
1346 cos_data->data[0].pausable = true;
1347 /* Non pauseable. */
1348 cos_data->data[1].pausable = false;
1349 if (IS_DCBX_PFC_PRI_ONLY_PAUSE(bp,
1350 pg_help_data->data[0].pg_priority)) {
1351 /* 0 is pauseable */
1352 cos_data->data[0].pri_join_mask =
1353 pg_help_data->data[0].pg_priority;
1354 pg[0] = pg_help_data->data[0].pg;
1355 cos_data->data[1].pri_join_mask =
1356 pg_help_data->data[1].pg_priority;
1357 pg[1] = pg_help_data->data[1].pg;
1358 } else {/* 1 is pauseable */
1359 cos_data->data[0].pri_join_mask =
1360 pg_help_data->data[1].pg_priority;
1361 pg[0] = pg_help_data->data[1].pg;
1362 cos_data->data[1].pri_join_mask =
1363 pg_help_data->data[0].pg_priority;
1364 pg[1] = pg_help_data->data[0].pg;
1365 }
1366 } else {
1367 /* If there are only pauseable priorities or
1368 * only non-pauseable, each PG goes to a queue.
1369 */
1370 cos_data->data[0].pausable = cos_data->data[1].pausable =
1371 IS_DCBX_PFC_PRI_ONLY_PAUSE(bp, pri_join_mask);
1372 cos_data->data[0].pri_join_mask =
1373 pg_help_data->data[0].pg_priority;
1374 pg[0] = pg_help_data->data[0].pg;
1375 cos_data->data[1].pri_join_mask =
1376 pg_help_data->data[1].pg_priority;
1377 pg[1] = pg_help_data->data[1].pg;
1378 }
1379
1380 /* There can be only one strict pg */
1381 for (i = 0 ; i < E2_NUM_OF_COS; i++) {
1382 if (pg[i] < DCBX_MAX_NUM_PG_BW_ENTRIES)
1383 cos_data->data[i].cos_bw =
1384 DCBX_PG_BW_GET(ets->pg_bw_tbl, pg[i]);
1385 else
1386 cos_data->data[i].strict = BNX2X_DCBX_COS_HIGH_STRICT;
1387 }
1388}
1389
1390static void bnx2x_dcbx_three_pg_to_cos_params(
1391 struct bnx2x *bp,
1392 struct pg_help_data *pg_help_data,
1393 struct dcbx_ets_feature *ets,
1394 struct cos_help_data *cos_data,
1395 u32 *pg_pri_orginal_spread,
1396 u32 pri_join_mask,
1397 u8 num_of_dif_pri)
1398{
1399 u8 i = 0;
1400 u32 pri_tested = 0;
1401 u8 entry = 0;
1402 u8 pg_entry = 0;
1403 bool b_found_strict = false;
1404 u8 num_of_pri = LLFC_DRIVER_TRAFFIC_TYPE_MAX;
1405
1406 cos_data->data[0].pri_join_mask = cos_data->data[1].pri_join_mask = 0;
1407 /* If there are both pauseable and non-pauseable priorities,
1408 * the pauseable priorities go to the first queue and the
1409 * non-pauseable priorities go to the second queue.
1410 */
1411 if (IS_DCBX_PFC_PRI_MIX_PAUSE(bp, pri_join_mask))
1412 bnx2x_dcbx_separate_pauseable_from_non(bp,
1413 cos_data, pg_pri_orginal_spread, ets);
1414 else {
1415 /* If two BW-limited PG-s were combined to one queue,
1416 * the BW is their sum.
1417 *
1418 * If there are only pauseable priorities or only non-pauseable,
1419 * and there are both BW-limited and non-BW-limited PG-s,
1420 * the BW-limited PG/s go to one queue and the non-BW-limited
1421 * PG/s go to the second queue.
1422 *
1423 * If there are only pauseable priorities or only non-pauseable
1424 * and all are BW limited, then two priorities go to the first
1425 * queue and one priority goes to the second queue.
1426 *
1427 * We will join this two cases:
1428 * if one is BW limited it will go to the secoend queue
1429 * otherwise the last priority will get it
1430 */
1431
1432 cos_data->data[0].pausable = cos_data->data[1].pausable =
1433 IS_DCBX_PFC_PRI_ONLY_PAUSE(bp, pri_join_mask);
1434
1435 for (i = 0 ; i < num_of_pri; i++) {
1436 pri_tested = 1 << bp->dcbx_port_params.
1437 app.traffic_type_priority[i];
1438 pg_entry = (u8)pg_pri_orginal_spread[bp->
1439 dcbx_port_params.app.traffic_type_priority[i]];
1440
1441 if (pg_entry < DCBX_MAX_NUM_PG_BW_ENTRIES) {
1442 entry = 0;
1443
1444 if (i == (num_of_pri-1) &&
1445 false == b_found_strict)
1446 /* last entry will be handled separately
1447 * If no priority is strict than last
1448 * enty goes to last queue.*/
1449 entry = 1;
1450 cos_data->data[entry].pri_join_mask |=
1451 pri_tested;
1452 bnx2x_dcbx_add_to_cos_bw(bp,
1453 &cos_data->data[entry],
1454 DCBX_PG_BW_GET(ets->pg_bw_tbl,
1455 pg_entry));
1456 } else {
1457 b_found_strict = true;
1458 cos_data->data[1].pri_join_mask |= pri_tested;
1459 /* If we join a group and one is strict
1460 * than the bw rulls */
1461 cos_data->data[1].strict =
1462 BNX2X_DCBX_COS_HIGH_STRICT;
1463 }
1464 }
1465 }
1466}
1467
1468
1469static void bnx2x_dcbx_fill_cos_params(struct bnx2x *bp,
1470 struct pg_help_data *help_data,
1471 struct dcbx_ets_feature *ets,
1472 u32 *pg_pri_orginal_spread)
1473{
1474 struct cos_help_data cos_data ;
1475 u8 i = 0;
1476 u32 pri_join_mask = 0;
1477 u8 num_of_dif_pri = 0;
1478
1479 memset(&cos_data, 0, sizeof(cos_data));
1480 /* Validate the pg value */
1481 for (i = 0; i < help_data->num_of_pg ; i++) {
1482 if (DCBX_STRICT_PRIORITY != help_data->data[i].pg &&
1483 DCBX_MAX_NUM_PG_BW_ENTRIES <= help_data->data[i].pg)
1484 BNX2X_ERR("Invalid pg[%d] data %x\n", i,
1485 help_data->data[i].pg);
1486 pri_join_mask |= help_data->data[i].pg_priority;
1487 num_of_dif_pri += help_data->data[i].num_of_dif_pri;
1488 }
1489
1490 /* default settings */
1491 cos_data.num_of_cos = 2;
1492 for (i = 0; i < E2_NUM_OF_COS ; i++) {
1493 cos_data.data[i].pri_join_mask = pri_join_mask;
1494 cos_data.data[i].pausable = false;
1495 cos_data.data[i].strict = BNX2X_DCBX_COS_NOT_STRICT;
1496 cos_data.data[i].cos_bw = DCBX_INVALID_COS_BW;
1497 }
1498
1499 switch (help_data->num_of_pg) {
1500 case 1:
1501
1502 bxn2x_dcbx_single_pg_to_cos_params(
1503 bp,
1504 help_data,
1505 &cos_data,
1506 pri_join_mask,
1507 num_of_dif_pri);
1508 break;
1509 case 2:
1510 bnx2x_dcbx_two_pg_to_cos_params(
1511 bp,
1512 help_data,
1513 ets,
1514 &cos_data,
1515 pg_pri_orginal_spread,
1516 pri_join_mask,
1517 num_of_dif_pri);
1518 break;
1519
1520 case 3:
1521 bnx2x_dcbx_three_pg_to_cos_params(
1522 bp,
1523 help_data,
1524 ets,
1525 &cos_data,
1526 pg_pri_orginal_spread,
1527 pri_join_mask,
1528 num_of_dif_pri);
1529
1530 break;
1531 default:
1532 BNX2X_ERR("Wrong pg_help_data.num_of_pg\n");
1533 bnx2x_dcbx_ets_disabled_entry_data(bp,
1534 &cos_data, pri_join_mask);
1535 }
1536
1537 for (i = 0; i < cos_data.num_of_cos ; i++) {
1538 struct bnx2x_dcbx_cos_params *params =
1539 &bp->dcbx_port_params.ets.cos_params[i];
1540
1541 params->pauseable = cos_data.data[i].pausable;
1542 params->strict = cos_data.data[i].strict;
1543 params->bw_tbl = cos_data.data[i].cos_bw;
1544 if (params->pauseable) {
1545 params->pri_bitmask =
1546 DCBX_PFC_PRI_GET_PAUSE(bp,
1547 cos_data.data[i].pri_join_mask);
1548 DP(NETIF_MSG_LINK, "COS %d PAUSABLE prijoinmask 0x%x\n",
1549 i, cos_data.data[i].pri_join_mask);
1550 } else {
1551 params->pri_bitmask =
1552 DCBX_PFC_PRI_GET_NON_PAUSE(bp,
1553 cos_data.data[i].pri_join_mask);
1554 DP(NETIF_MSG_LINK, "COS %d NONPAUSABLE prijoinmask "
1555 "0x%x\n",
1556 i, cos_data.data[i].pri_join_mask);
1557 }
1558 }
1559
1560 bp->dcbx_port_params.ets.num_of_cos = cos_data.num_of_cos ;
1561}
1562
1563static void bnx2x_dcbx_get_ets_pri_pg_tbl(struct bnx2x *bp,
1564 u32 *set_configuration_ets_pg,
1565 u32 *pri_pg_tbl)
1566{
1567 int i;
1568
1569 for (i = 0; i < DCBX_MAX_NUM_PRI_PG_ENTRIES; i++) {
1570 set_configuration_ets_pg[i] = DCBX_PRI_PG_GET(pri_pg_tbl, i);
1571
1572 DP(NETIF_MSG_LINK, "set_configuration_ets_pg[%d] = 0x%x\n",
1573 i, set_configuration_ets_pg[i]);
1574 }
1575}
1576
1577static void bnx2x_pfc_fw_struct_e2(struct bnx2x *bp)
1578{
1579 struct flow_control_configuration *pfc_fw_cfg = NULL;
1580 u16 pri_bit = 0;
1581 u8 cos = 0, pri = 0;
1582 struct priority_cos *tt2cos;
1583 u32 *ttp = bp->dcbx_port_params.app.traffic_type_priority;
1584
1585 pfc_fw_cfg = (struct flow_control_configuration *)
1586 bnx2x_sp(bp, pfc_config);
1587 memset(pfc_fw_cfg, 0, sizeof(struct flow_control_configuration));
1588
1589 /*shortcut*/
1590 tt2cos = pfc_fw_cfg->traffic_type_to_priority_cos;
1591
1592 /* Fw version should be incremented each update */
1593 pfc_fw_cfg->dcb_version = ++bp->dcb_version;
1594 pfc_fw_cfg->dcb_enabled = DCB_ENABLED;
1595
1596 /* Default initialization */
1597 for (pri = 0; pri < MAX_PFC_TRAFFIC_TYPES ; pri++) {
1598 tt2cos[pri].priority = LLFC_TRAFFIC_TYPE_TO_PRIORITY_UNMAPPED;
1599 tt2cos[pri].cos = 0;
1600 }
1601
1602 /* Fill priority parameters */
1603 for (pri = 0; pri < LLFC_DRIVER_TRAFFIC_TYPE_MAX; pri++) {
1604 tt2cos[pri].priority = ttp[pri];
1605 pri_bit = 1 << tt2cos[pri].priority;
1606
1607 /* Fill COS parameters based on COS calculated to
1608 * make it more generally for future use */
1609 for (cos = 0; cos < bp->dcbx_port_params.ets.num_of_cos; cos++)
1610 if (bp->dcbx_port_params.ets.cos_params[cos].
1611 pri_bitmask & pri_bit)
1612 tt2cos[pri].cos = cos;
1613 }
1614 bnx2x_dcbx_print_cos_params(bp, pfc_fw_cfg);
1615}
1616/* DCB netlink */
1617#ifdef BCM_DCBNL
1618
1619#define BNX2X_DCBX_CAPS (DCB_CAP_DCBX_LLD_MANAGED | \
1620 DCB_CAP_DCBX_VER_CEE | DCB_CAP_DCBX_STATIC)
1621
1622static inline bool bnx2x_dcbnl_set_valid(struct bnx2x *bp)
1623{
1624 /* validate dcbnl call that may change HW state:
1625 * DCB is on and DCBX mode was SUCCESSFULLY set by the user.
1626 */
1627 return bp->dcb_state && bp->dcbx_mode_uset;
1628}
1629
1630static u8 bnx2x_dcbnl_get_state(struct net_device *netdev)
1631{
1632 struct bnx2x *bp = netdev_priv(netdev);
1633 DP(NETIF_MSG_LINK, "state = %d\n", bp->dcb_state);
1634 return bp->dcb_state;
1635}
1636
1637static u8 bnx2x_dcbnl_set_state(struct net_device *netdev, u8 state)
1638{
1639 struct bnx2x *bp = netdev_priv(netdev);
1640 DP(NETIF_MSG_LINK, "state = %s\n", state ? "on" : "off");
1641
1642 bnx2x_dcbx_set_state(bp, (state ? true : false), bp->dcbx_enabled);
1643 return 0;
1644}
1645
1646static void bnx2x_dcbnl_get_perm_hw_addr(struct net_device *netdev,
1647 u8 *perm_addr)
1648{
1649 struct bnx2x *bp = netdev_priv(netdev);
1650 DP(NETIF_MSG_LINK, "GET-PERM-ADDR\n");
1651
1652 /* first the HW mac address */
1653 memcpy(perm_addr, netdev->dev_addr, netdev->addr_len);
1654
1655#ifdef BCM_CNIC
1656 /* second SAN address */
1657 memcpy(perm_addr+netdev->addr_len, bp->fip_mac, netdev->addr_len);
1658#endif
1659}
1660
1661static void bnx2x_dcbnl_set_pg_tccfg_tx(struct net_device *netdev, int prio,
1662 u8 prio_type, u8 pgid, u8 bw_pct,
1663 u8 up_map)
1664{
1665 struct bnx2x *bp = netdev_priv(netdev);
1666
1667 DP(NETIF_MSG_LINK, "prio[%d] = %d\n", prio, pgid);
1668 if (!bnx2x_dcbnl_set_valid(bp) || prio >= DCBX_MAX_NUM_PRI_PG_ENTRIES)
1669 return;
1670
1671 /**
1672 * bw_pct ingnored - band-width percentage devision between user
1673 * priorities within the same group is not
1674 * standard and hence not supported
1675 *
1676 * prio_type igonred - priority levels within the same group are not
1677 * standard and hence are not supported. According
1678 * to the standard pgid 15 is dedicated to strict
1679 * prioirty traffic (on the port level).
1680 *
1681 * up_map ignored
1682 */
1683
1684 bp->dcbx_config_params.admin_configuration_ets_pg[prio] = pgid;
1685 bp->dcbx_config_params.admin_ets_configuration_tx_enable = 1;
1686}
1687
1688static void bnx2x_dcbnl_set_pg_bwgcfg_tx(struct net_device *netdev,
1689 int pgid, u8 bw_pct)
1690{
1691 struct bnx2x *bp = netdev_priv(netdev);
1692 DP(NETIF_MSG_LINK, "pgid[%d] = %d\n", pgid, bw_pct);
1693
1694 if (!bnx2x_dcbnl_set_valid(bp) || pgid >= DCBX_MAX_NUM_PG_BW_ENTRIES)
1695 return;
1696
1697 bp->dcbx_config_params.admin_configuration_bw_precentage[pgid] = bw_pct;
1698 bp->dcbx_config_params.admin_ets_configuration_tx_enable = 1;
1699}
1700
1701static void bnx2x_dcbnl_set_pg_tccfg_rx(struct net_device *netdev, int prio,
1702 u8 prio_type, u8 pgid, u8 bw_pct,
1703 u8 up_map)
1704{
1705 struct bnx2x *bp = netdev_priv(netdev);
1706 DP(NETIF_MSG_LINK, "Nothing to set; No RX support\n");
1707}
1708
1709static void bnx2x_dcbnl_set_pg_bwgcfg_rx(struct net_device *netdev,
1710 int pgid, u8 bw_pct)
1711{
1712 struct bnx2x *bp = netdev_priv(netdev);
1713 DP(NETIF_MSG_LINK, "Nothing to set; No RX support\n");
1714}
1715
1716static void bnx2x_dcbnl_get_pg_tccfg_tx(struct net_device *netdev, int prio,
1717 u8 *prio_type, u8 *pgid, u8 *bw_pct,
1718 u8 *up_map)
1719{
1720 struct bnx2x *bp = netdev_priv(netdev);
1721 DP(NETIF_MSG_LINK, "prio = %d\n", prio);
1722
1723 /**
1724 * bw_pct ingnored - band-width percentage devision between user
1725 * priorities within the same group is not
1726 * standard and hence not supported
1727 *
1728 * prio_type igonred - priority levels within the same group are not
1729 * standard and hence are not supported. According
1730 * to the standard pgid 15 is dedicated to strict
1731 * prioirty traffic (on the port level).
1732 *
1733 * up_map ignored
1734 */
1735 *up_map = *bw_pct = *prio_type = *pgid = 0;
1736
1737 if (!bp->dcb_state || prio >= DCBX_MAX_NUM_PRI_PG_ENTRIES)
1738 return;
1739
1740 *pgid = DCBX_PRI_PG_GET(bp->dcbx_local_feat.ets.pri_pg_tbl, prio);
1741}
1742
1743static void bnx2x_dcbnl_get_pg_bwgcfg_tx(struct net_device *netdev,
1744 int pgid, u8 *bw_pct)
1745{
1746 struct bnx2x *bp = netdev_priv(netdev);
1747 DP(NETIF_MSG_LINK, "pgid = %d\n", pgid);
1748
1749 *bw_pct = 0;
1750
1751 if (!bp->dcb_state || pgid >= DCBX_MAX_NUM_PG_BW_ENTRIES)
1752 return;
1753
1754 *bw_pct = DCBX_PG_BW_GET(bp->dcbx_local_feat.ets.pg_bw_tbl, pgid);
1755}
1756
1757static void bnx2x_dcbnl_get_pg_tccfg_rx(struct net_device *netdev, int prio,
1758 u8 *prio_type, u8 *pgid, u8 *bw_pct,
1759 u8 *up_map)
1760{
1761 struct bnx2x *bp = netdev_priv(netdev);
1762 DP(NETIF_MSG_LINK, "Nothing to get; No RX support\n");
1763
1764 *prio_type = *pgid = *bw_pct = *up_map = 0;
1765}
1766
1767static void bnx2x_dcbnl_get_pg_bwgcfg_rx(struct net_device *netdev,
1768 int pgid, u8 *bw_pct)
1769{
1770 struct bnx2x *bp = netdev_priv(netdev);
1771 DP(NETIF_MSG_LINK, "Nothing to get; No RX support\n");
1772
1773 *bw_pct = 0;
1774}
1775
1776static void bnx2x_dcbnl_set_pfc_cfg(struct net_device *netdev, int prio,
1777 u8 setting)
1778{
1779 struct bnx2x *bp = netdev_priv(netdev);
1780 DP(NETIF_MSG_LINK, "prio[%d] = %d\n", prio, setting);
1781
1782 if (!bnx2x_dcbnl_set_valid(bp) || prio >= MAX_PFC_PRIORITIES)
1783 return;
1784
1785 bp->dcbx_config_params.admin_pfc_bitmap |= ((setting ? 1 : 0) << prio);
1786
1787 if (setting)
1788 bp->dcbx_config_params.admin_pfc_tx_enable = 1;
1789}
1790
1791static void bnx2x_dcbnl_get_pfc_cfg(struct net_device *netdev, int prio,
1792 u8 *setting)
1793{
1794 struct bnx2x *bp = netdev_priv(netdev);
1795 DP(NETIF_MSG_LINK, "prio = %d\n", prio);
1796
1797 *setting = 0;
1798
1799 if (!bp->dcb_state || prio >= MAX_PFC_PRIORITIES)
1800 return;
1801
1802 *setting = (bp->dcbx_local_feat.pfc.pri_en_bitmap >> prio) & 0x1;
1803}
1804
1805static u8 bnx2x_dcbnl_set_all(struct net_device *netdev)
1806{
1807 struct bnx2x *bp = netdev_priv(netdev);
1808 int rc = 0;
1809
1810 DP(NETIF_MSG_LINK, "SET-ALL\n");
1811
1812 if (!bnx2x_dcbnl_set_valid(bp))
1813 return 1;
1814
1815 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
1816 netdev_err(bp->dev, "Handling parity error recovery. "
1817 "Try again later\n");
1818 return 1;
1819 }
1820 if (netif_running(bp->dev)) {
1821 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
1822 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
1823 }
1824 DP(NETIF_MSG_LINK, "set_dcbx_params done (%d)\n", rc);
1825 if (rc)
1826 return 1;
1827
1828 return 0;
1829}
1830
1831static u8 bnx2x_dcbnl_get_cap(struct net_device *netdev, int capid, u8 *cap)
1832{
1833 struct bnx2x *bp = netdev_priv(netdev);
1834 u8 rval = 0;
1835
1836 if (bp->dcb_state) {
1837 switch (capid) {
1838 case DCB_CAP_ATTR_PG:
1839 *cap = true;
1840 break;
1841 case DCB_CAP_ATTR_PFC:
1842 *cap = true;
1843 break;
1844 case DCB_CAP_ATTR_UP2TC:
1845 *cap = false;
1846 break;
1847 case DCB_CAP_ATTR_PG_TCS:
1848 *cap = 0x80; /* 8 priorities for PGs */
1849 break;
1850 case DCB_CAP_ATTR_PFC_TCS:
1851 *cap = 0x80; /* 8 priorities for PFC */
1852 break;
1853 case DCB_CAP_ATTR_GSP:
1854 *cap = true;
1855 break;
1856 case DCB_CAP_ATTR_BCN:
1857 *cap = false;
1858 break;
1859 case DCB_CAP_ATTR_DCBX:
1860 *cap = BNX2X_DCBX_CAPS;
1861 default:
1862 rval = -EINVAL;
1863 break;
1864 }
1865 } else
1866 rval = -EINVAL;
1867
1868 DP(NETIF_MSG_LINK, "capid %d:%x\n", capid, *cap);
1869 return rval;
1870}
1871
1872static u8 bnx2x_dcbnl_get_numtcs(struct net_device *netdev, int tcid, u8 *num)
1873{
1874 struct bnx2x *bp = netdev_priv(netdev);
1875 u8 rval = 0;
1876
1877 DP(NETIF_MSG_LINK, "tcid %d\n", tcid);
1878
1879 if (bp->dcb_state) {
1880 switch (tcid) {
1881 case DCB_NUMTCS_ATTR_PG:
1882 *num = E2_NUM_OF_COS;
1883 break;
1884 case DCB_NUMTCS_ATTR_PFC:
1885 *num = E2_NUM_OF_COS;
1886 break;
1887 default:
1888 rval = -EINVAL;
1889 break;
1890 }
1891 } else
1892 rval = -EINVAL;
1893
1894 return rval;
1895}
1896
1897static u8 bnx2x_dcbnl_set_numtcs(struct net_device *netdev, int tcid, u8 num)
1898{
1899 struct bnx2x *bp = netdev_priv(netdev);
1900 DP(NETIF_MSG_LINK, "num tcs = %d; Not supported\n", num);
1901 return -EINVAL;
1902}
1903
1904static u8 bnx2x_dcbnl_get_pfc_state(struct net_device *netdev)
1905{
1906 struct bnx2x *bp = netdev_priv(netdev);
1907 DP(NETIF_MSG_LINK, "state = %d\n", bp->dcbx_local_feat.pfc.enabled);
1908
1909 if (!bp->dcb_state)
1910 return 0;
1911
1912 return bp->dcbx_local_feat.pfc.enabled;
1913}
1914
1915static void bnx2x_dcbnl_set_pfc_state(struct net_device *netdev, u8 state)
1916{
1917 struct bnx2x *bp = netdev_priv(netdev);
1918 DP(NETIF_MSG_LINK, "state = %s\n", state ? "on" : "off");
1919
1920 if (!bnx2x_dcbnl_set_valid(bp))
1921 return;
1922
1923 bp->dcbx_config_params.admin_pfc_tx_enable =
1924 bp->dcbx_config_params.admin_pfc_enable = (state ? 1 : 0);
1925}
1926
1927static void bnx2x_admin_app_set_ent(
1928 struct bnx2x_admin_priority_app_table *app_ent,
1929 u8 idtype, u16 idval, u8 up)
1930{
1931 app_ent->valid = 1;
1932
1933 switch (idtype) {
1934 case DCB_APP_IDTYPE_ETHTYPE:
1935 app_ent->traffic_type = TRAFFIC_TYPE_ETH;
1936 break;
1937 case DCB_APP_IDTYPE_PORTNUM:
1938 app_ent->traffic_type = TRAFFIC_TYPE_PORT;
1939 break;
1940 default:
1941 break; /* never gets here */
1942 }
1943 app_ent->app_id = idval;
1944 app_ent->priority = up;
1945}
1946
1947static bool bnx2x_admin_app_is_equal(
1948 struct bnx2x_admin_priority_app_table *app_ent,
1949 u8 idtype, u16 idval)
1950{
1951 if (!app_ent->valid)
1952 return false;
1953
1954 switch (idtype) {
1955 case DCB_APP_IDTYPE_ETHTYPE:
1956 if (app_ent->traffic_type != TRAFFIC_TYPE_ETH)
1957 return false;
1958 break;
1959 case DCB_APP_IDTYPE_PORTNUM:
1960 if (app_ent->traffic_type != TRAFFIC_TYPE_PORT)
1961 return false;
1962 break;
1963 default:
1964 return false;
1965 }
1966 if (app_ent->app_id != idval)
1967 return false;
1968
1969 return true;
1970}
1971
1972static int bnx2x_set_admin_app_up(struct bnx2x *bp, u8 idtype, u16 idval, u8 up)
1973{
1974 int i, ff;
1975
1976 /* iterate over the app entries looking for idtype and idval */
1977 for (i = 0, ff = -1; i < 4; i++) {
1978 struct bnx2x_admin_priority_app_table *app_ent =
1979 &bp->dcbx_config_params.admin_priority_app_table[i];
1980 if (bnx2x_admin_app_is_equal(app_ent, idtype, idval))
1981 break;
1982
1983 if (ff < 0 && !app_ent->valid)
1984 ff = i;
1985 }
1986 if (i < 4)
1987 /* if found overwrite up */
1988 bp->dcbx_config_params.
1989 admin_priority_app_table[i].priority = up;
1990 else if (ff >= 0)
1991 /* not found use first-free */
1992 bnx2x_admin_app_set_ent(
1993 &bp->dcbx_config_params.admin_priority_app_table[ff],
1994 idtype, idval, up);
1995 else
1996 /* app table is full */
1997 return -EBUSY;
1998
1999 /* up configured, if not 0 make sure feature is enabled */
2000 if (up)
2001 bp->dcbx_config_params.admin_application_priority_tx_enable = 1;
2002
2003 return 0;
2004}
2005
2006static u8 bnx2x_dcbnl_set_app_up(struct net_device *netdev, u8 idtype,
2007 u16 idval, u8 up)
2008{
2009 struct bnx2x *bp = netdev_priv(netdev);
2010
2011 DP(NETIF_MSG_LINK, "app_type %d, app_id %x, prio bitmap %d\n",
2012 idtype, idval, up);
2013
2014 if (!bnx2x_dcbnl_set_valid(bp))
2015 return -EINVAL;
2016
2017 /* verify idtype */
2018 switch (idtype) {
2019 case DCB_APP_IDTYPE_ETHTYPE:
2020 case DCB_APP_IDTYPE_PORTNUM:
2021 break;
2022 default:
2023 return -EINVAL;
2024 }
2025 return bnx2x_set_admin_app_up(bp, idtype, idval, up);
2026}
2027
2028static u8 bnx2x_dcbnl_get_dcbx(struct net_device *netdev)
2029{
2030 struct bnx2x *bp = netdev_priv(netdev);
2031 u8 state;
2032
2033 state = DCB_CAP_DCBX_LLD_MANAGED | DCB_CAP_DCBX_VER_CEE;
2034
2035 if (bp->dcbx_enabled == BNX2X_DCBX_ENABLED_ON_NEG_OFF)
2036 state |= DCB_CAP_DCBX_STATIC;
2037
2038 return state;
2039}
2040
2041static u8 bnx2x_dcbnl_set_dcbx(struct net_device *netdev, u8 state)
2042{
2043 struct bnx2x *bp = netdev_priv(netdev);
2044 DP(NETIF_MSG_LINK, "state = %02x\n", state);
2045
2046 /* set dcbx mode */
2047
2048 if ((state & BNX2X_DCBX_CAPS) != state) {
2049 BNX2X_ERR("Requested DCBX mode %x is beyond advertised "
2050 "capabilities\n", state);
2051 return 1;
2052 }
2053
2054 if (bp->dcb_state != BNX2X_DCB_STATE_ON) {
2055 BNX2X_ERR("DCB turned off, DCBX configuration is invalid\n");
2056 return 1;
2057 }
2058
2059 if (state & DCB_CAP_DCBX_STATIC)
2060 bp->dcbx_enabled = BNX2X_DCBX_ENABLED_ON_NEG_OFF;
2061 else
2062 bp->dcbx_enabled = BNX2X_DCBX_ENABLED_ON_NEG_ON;
2063
2064 bp->dcbx_mode_uset = true;
2065 return 0;
2066}
2067
2068static u8 bnx2x_dcbnl_get_featcfg(struct net_device *netdev, int featid,
2069 u8 *flags)
2070{
2071 struct bnx2x *bp = netdev_priv(netdev);
2072 u8 rval = 0;
2073
2074 DP(NETIF_MSG_LINK, "featid %d\n", featid);
2075
2076 if (bp->dcb_state) {
2077 *flags = 0;
2078 switch (featid) {
2079 case DCB_FEATCFG_ATTR_PG:
2080 if (bp->dcbx_local_feat.ets.enabled)
2081 *flags |= DCB_FEATCFG_ENABLE;
2082 if (bp->dcbx_error & DCBX_LOCAL_ETS_ERROR)
2083 *flags |= DCB_FEATCFG_ERROR;
2084 break;
2085 case DCB_FEATCFG_ATTR_PFC:
2086 if (bp->dcbx_local_feat.pfc.enabled)
2087 *flags |= DCB_FEATCFG_ENABLE;
2088 if (bp->dcbx_error & (DCBX_LOCAL_PFC_ERROR |
2089 DCBX_LOCAL_PFC_MISMATCH))
2090 *flags |= DCB_FEATCFG_ERROR;
2091 break;
2092 case DCB_FEATCFG_ATTR_APP:
2093 if (bp->dcbx_local_feat.app.enabled)
2094 *flags |= DCB_FEATCFG_ENABLE;
2095 if (bp->dcbx_error & (DCBX_LOCAL_APP_ERROR |
2096 DCBX_LOCAL_APP_MISMATCH))
2097 *flags |= DCB_FEATCFG_ERROR;
2098 break;
2099 default:
2100 rval = -EINVAL;
2101 break;
2102 }
2103 } else
2104 rval = -EINVAL;
2105
2106 return rval;
2107}
2108
2109static u8 bnx2x_dcbnl_set_featcfg(struct net_device *netdev, int featid,
2110 u8 flags)
2111{
2112 struct bnx2x *bp = netdev_priv(netdev);
2113 u8 rval = 0;
2114
2115 DP(NETIF_MSG_LINK, "featid = %d flags = %02x\n", featid, flags);
2116
2117 /* ignore the 'advertise' flag */
2118 if (bnx2x_dcbnl_set_valid(bp)) {
2119 switch (featid) {
2120 case DCB_FEATCFG_ATTR_PG:
2121 bp->dcbx_config_params.admin_ets_enable =
2122 flags & DCB_FEATCFG_ENABLE ? 1 : 0;
2123 bp->dcbx_config_params.admin_ets_willing =
2124 flags & DCB_FEATCFG_WILLING ? 1 : 0;
2125 break;
2126 case DCB_FEATCFG_ATTR_PFC:
2127 bp->dcbx_config_params.admin_pfc_enable =
2128 flags & DCB_FEATCFG_ENABLE ? 1 : 0;
2129 bp->dcbx_config_params.admin_pfc_willing =
2130 flags & DCB_FEATCFG_WILLING ? 1 : 0;
2131 break;
2132 case DCB_FEATCFG_ATTR_APP:
2133 /* ignore enable, always enabled */
2134 bp->dcbx_config_params.admin_app_priority_willing =
2135 flags & DCB_FEATCFG_WILLING ? 1 : 0;
2136 break;
2137 default:
2138 rval = -EINVAL;
2139 break;
2140 }
2141 } else
2142 rval = -EINVAL;
2143
2144 return rval;
2145}
2146
2147static int bnx2x_peer_appinfo(struct net_device *netdev,
2148 struct dcb_peer_app_info *info, u16* app_count)
2149{
2150 int i;
2151 struct bnx2x *bp = netdev_priv(netdev);
2152
2153 DP(NETIF_MSG_LINK, "APP-INFO\n");
2154
2155 info->willing = (bp->dcbx_remote_flags & DCBX_APP_REM_WILLING) ?: 0;
2156 info->error = (bp->dcbx_remote_flags & DCBX_APP_RX_ERROR) ?: 0;
2157 *app_count = 0;
2158
2159 for (i = 0; i < DCBX_MAX_APP_PROTOCOL; i++)
2160 if (bp->dcbx_remote_feat.app.app_pri_tbl[i].appBitfield &
2161 DCBX_APP_ENTRY_VALID)
2162 (*app_count)++;
2163 return 0;
2164}
2165
2166static int bnx2x_peer_apptable(struct net_device *netdev,
2167 struct dcb_app *table)
2168{
2169 int i, j;
2170 struct bnx2x *bp = netdev_priv(netdev);
2171
2172 DP(NETIF_MSG_LINK, "APP-TABLE\n");
2173
2174 for (i = 0, j = 0; i < DCBX_MAX_APP_PROTOCOL; i++) {
2175 struct dcbx_app_priority_entry *ent =
2176 &bp->dcbx_remote_feat.app.app_pri_tbl[i];
2177
2178 if (ent->appBitfield & DCBX_APP_ENTRY_VALID) {
2179 table[j].selector = bnx2x_dcbx_dcbnl_app_idtype(ent);
2180 table[j].priority = bnx2x_dcbx_dcbnl_app_up(ent);
2181 table[j++].protocol = ent->app_id;
2182 }
2183 }
2184 return 0;
2185}
2186
2187static int bnx2x_cee_peer_getpg(struct net_device *netdev, struct cee_pg *pg)
2188{
2189 int i;
2190 struct bnx2x *bp = netdev_priv(netdev);
2191
2192 pg->willing = (bp->dcbx_remote_flags & DCBX_ETS_REM_WILLING) ?: 0;
2193
2194 for (i = 0; i < CEE_DCBX_MAX_PGS; i++) {
2195 pg->pg_bw[i] =
2196 DCBX_PG_BW_GET(bp->dcbx_remote_feat.ets.pg_bw_tbl, i);
2197 pg->prio_pg[i] =
2198 DCBX_PRI_PG_GET(bp->dcbx_remote_feat.ets.pri_pg_tbl, i);
2199 }
2200 return 0;
2201}
2202
2203static int bnx2x_cee_peer_getpfc(struct net_device *netdev,
2204 struct cee_pfc *pfc)
2205{
2206 struct bnx2x *bp = netdev_priv(netdev);
2207 pfc->tcs_supported = bp->dcbx_remote_feat.pfc.pfc_caps;
2208 pfc->pfc_en = bp->dcbx_remote_feat.pfc.pri_en_bitmap;
2209 return 0;
2210}
2211
2212const struct dcbnl_rtnl_ops bnx2x_dcbnl_ops = {
2213 .getstate = bnx2x_dcbnl_get_state,
2214 .setstate = bnx2x_dcbnl_set_state,
2215 .getpermhwaddr = bnx2x_dcbnl_get_perm_hw_addr,
2216 .setpgtccfgtx = bnx2x_dcbnl_set_pg_tccfg_tx,
2217 .setpgbwgcfgtx = bnx2x_dcbnl_set_pg_bwgcfg_tx,
2218 .setpgtccfgrx = bnx2x_dcbnl_set_pg_tccfg_rx,
2219 .setpgbwgcfgrx = bnx2x_dcbnl_set_pg_bwgcfg_rx,
2220 .getpgtccfgtx = bnx2x_dcbnl_get_pg_tccfg_tx,
2221 .getpgbwgcfgtx = bnx2x_dcbnl_get_pg_bwgcfg_tx,
2222 .getpgtccfgrx = bnx2x_dcbnl_get_pg_tccfg_rx,
2223 .getpgbwgcfgrx = bnx2x_dcbnl_get_pg_bwgcfg_rx,
2224 .setpfccfg = bnx2x_dcbnl_set_pfc_cfg,
2225 .getpfccfg = bnx2x_dcbnl_get_pfc_cfg,
2226 .setall = bnx2x_dcbnl_set_all,
2227 .getcap = bnx2x_dcbnl_get_cap,
2228 .getnumtcs = bnx2x_dcbnl_get_numtcs,
2229 .setnumtcs = bnx2x_dcbnl_set_numtcs,
2230 .getpfcstate = bnx2x_dcbnl_get_pfc_state,
2231 .setpfcstate = bnx2x_dcbnl_set_pfc_state,
2232 .setapp = bnx2x_dcbnl_set_app_up,
2233 .getdcbx = bnx2x_dcbnl_get_dcbx,
2234 .setdcbx = bnx2x_dcbnl_set_dcbx,
2235 .getfeatcfg = bnx2x_dcbnl_get_featcfg,
2236 .setfeatcfg = bnx2x_dcbnl_set_featcfg,
2237 .peer_getappinfo = bnx2x_peer_appinfo,
2238 .peer_getapptable = bnx2x_peer_apptable,
2239 .cee_peer_getpg = bnx2x_cee_peer_getpg,
2240 .cee_peer_getpfc = bnx2x_cee_peer_getpfc,
2241};
2242
2243#endif /* BCM_DCBNL */
diff --git a/drivers/net/bnx2x/bnx2x_dcb.h b/drivers/net/bnx2x/bnx2x_dcb.h
new file mode 100644
index 000000000000..bed369d67e02
--- /dev/null
+++ b/drivers/net/bnx2x/bnx2x_dcb.h
@@ -0,0 +1,195 @@
1/* bnx2x_dcb.h: Broadcom Everest network driver.
2 *
3 * Copyright 2009-2011 Broadcom Corporation
4 *
5 * Unless you and Broadcom execute a separate written software license
6 * agreement governing use of this software, this software is licensed to you
7 * under the terms of the GNU General Public License version 2, available
8 * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
9 *
10 * Notwithstanding the above, under no circumstances may you combine this
11 * software in any way with any other Broadcom software provided under a
12 * license other than the GPL, without Broadcom's express prior written
13 * consent.
14 *
15 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
16 * Written by: Dmitry Kravkov
17 *
18 */
19#ifndef BNX2X_DCB_H
20#define BNX2X_DCB_H
21
22#include "bnx2x_hsi.h"
23
24#define LLFC_DRIVER_TRAFFIC_TYPE_MAX 3 /* NW, iSCSI, FCoE */
25struct bnx2x_dcbx_app_params {
26 u32 enabled;
27 u32 traffic_type_priority[LLFC_DRIVER_TRAFFIC_TYPE_MAX];
28};
29
30#define E2_NUM_OF_COS 2
31#define BNX2X_DCBX_COS_NOT_STRICT 0
32#define BNX2X_DCBX_COS_LOW_STRICT 1
33#define BNX2X_DCBX_COS_HIGH_STRICT 2
34
35struct bnx2x_dcbx_cos_params {
36 u32 bw_tbl;
37 u32 pri_bitmask;
38 u8 strict;
39 u8 pauseable;
40};
41
42struct bnx2x_dcbx_pg_params {
43 u32 enabled;
44 u8 num_of_cos; /* valid COS entries */
45 struct bnx2x_dcbx_cos_params cos_params[E2_NUM_OF_COS];
46};
47
48struct bnx2x_dcbx_pfc_params {
49 u32 enabled;
50 u32 priority_non_pauseable_mask;
51};
52
53struct bnx2x_dcbx_port_params {
54 struct bnx2x_dcbx_pfc_params pfc;
55 struct bnx2x_dcbx_pg_params ets;
56 struct bnx2x_dcbx_app_params app;
57};
58
59#define BNX2X_DCBX_CONFIG_INV_VALUE (0xFFFFFFFF)
60#define BNX2X_DCBX_OVERWRITE_SETTINGS_DISABLE 0
61#define BNX2X_DCBX_OVERWRITE_SETTINGS_ENABLE 1
62#define BNX2X_DCBX_OVERWRITE_SETTINGS_INVALID (BNX2X_DCBX_CONFIG_INV_VALUE)
63
64struct bnx2x_config_lldp_params {
65 u32 overwrite_settings;
66 u32 msg_tx_hold;
67 u32 msg_fast_tx;
68 u32 tx_credit_max;
69 u32 msg_tx_interval;
70 u32 tx_fast;
71};
72
73struct bnx2x_admin_priority_app_table {
74 u32 valid;
75 u32 priority;
76#define INVALID_TRAFFIC_TYPE_PRIORITY (0xFFFFFFFF)
77 u32 traffic_type;
78#define TRAFFIC_TYPE_ETH 0
79#define TRAFFIC_TYPE_PORT 1
80 u32 app_id;
81};
82
83struct bnx2x_config_dcbx_params {
84 u32 overwrite_settings;
85 u32 admin_dcbx_version;
86 u32 admin_ets_enable;
87 u32 admin_pfc_enable;
88 u32 admin_tc_supported_tx_enable;
89 u32 admin_ets_configuration_tx_enable;
90 u32 admin_ets_recommendation_tx_enable;
91 u32 admin_pfc_tx_enable;
92 u32 admin_application_priority_tx_enable;
93 u32 admin_ets_willing;
94 u32 admin_ets_reco_valid;
95 u32 admin_pfc_willing;
96 u32 admin_app_priority_willing;
97 u32 admin_configuration_bw_precentage[8];
98 u32 admin_configuration_ets_pg[8];
99 u32 admin_recommendation_bw_precentage[8];
100 u32 admin_recommendation_ets_pg[8];
101 u32 admin_pfc_bitmap;
102 struct bnx2x_admin_priority_app_table admin_priority_app_table[4];
103 u32 admin_default_priority;
104};
105
106#define GET_FLAGS(flags, bits) ((flags) & (bits))
107#define SET_FLAGS(flags, bits) ((flags) |= (bits))
108#define RESET_FLAGS(flags, bits) ((flags) &= ~(bits))
109
110enum {
111 DCBX_READ_LOCAL_MIB,
112 DCBX_READ_REMOTE_MIB
113};
114
115#define ETH_TYPE_FCOE (0x8906)
116#define TCP_PORT_ISCSI (0xCBC)
117
118#define PFC_VALUE_FRAME_SIZE (512)
119#define PFC_QUANTA_IN_NANOSEC_FROM_SPEED_MEGA(mega_speed) \
120 ((1000 * PFC_VALUE_FRAME_SIZE)/(mega_speed))
121
122#define PFC_BRB1_REG_HIGH_LLFC_LOW_THRESHOLD 130
123#define PFC_BRB1_REG_HIGH_LLFC_HIGH_THRESHOLD 170
124
125
126
127struct cos_entry_help_data {
128 u32 pri_join_mask;
129 u32 cos_bw;
130 u8 strict;
131 bool pausable;
132};
133
134struct cos_help_data {
135 struct cos_entry_help_data data[E2_NUM_OF_COS];
136 u8 num_of_cos;
137};
138
139#define DCBX_ILLEGAL_PG (0xFF)
140#define DCBX_PFC_PRI_MASK (0xFF)
141#define DCBX_STRICT_PRIORITY (15)
142#define DCBX_INVALID_COS_BW (0xFFFFFFFF)
143#define DCBX_PFC_PRI_NON_PAUSE_MASK(bp) \
144 ((bp)->dcbx_port_params.pfc.priority_non_pauseable_mask)
145#define DCBX_PFC_PRI_PAUSE_MASK(bp) \
146 ((u8)~DCBX_PFC_PRI_NON_PAUSE_MASK(bp))
147#define DCBX_PFC_PRI_GET_PAUSE(bp, pg_pri) \
148 ((pg_pri) & (DCBX_PFC_PRI_PAUSE_MASK(bp)))
149#define DCBX_PFC_PRI_GET_NON_PAUSE(bp, pg_pri) \
150 (DCBX_PFC_PRI_NON_PAUSE_MASK(bp) & (pg_pri))
151#define IS_DCBX_PFC_PRI_ONLY_PAUSE(bp, pg_pri) \
152 (pg_pri == DCBX_PFC_PRI_GET_PAUSE((bp), (pg_pri)))
153#define IS_DCBX_PFC_PRI_ONLY_NON_PAUSE(bp, pg_pri)\
154 ((pg_pri) == DCBX_PFC_PRI_GET_NON_PAUSE((bp), (pg_pri)))
155#define IS_DCBX_PFC_PRI_MIX_PAUSE(bp, pg_pri) \
156 (!(IS_DCBX_PFC_PRI_ONLY_NON_PAUSE((bp), (pg_pri)) || \
157 IS_DCBX_PFC_PRI_ONLY_PAUSE((bp), (pg_pri))))
158
159
160struct pg_entry_help_data {
161 u8 num_of_dif_pri;
162 u8 pg;
163 u32 pg_priority;
164};
165
166struct pg_help_data {
167 struct pg_entry_help_data data[LLFC_DRIVER_TRAFFIC_TYPE_MAX];
168 u8 num_of_pg;
169};
170
171/* forward DCB/PFC related declarations */
172struct bnx2x;
173void bnx2x_dcb_init_intmem_pfc(struct bnx2x *bp);
174void bnx2x_dcbx_update(struct work_struct *work);
175void bnx2x_dcbx_init_params(struct bnx2x *bp);
176void bnx2x_dcbx_set_state(struct bnx2x *bp, bool dcb_on, u32 dcbx_enabled);
177
178enum {
179 BNX2X_DCBX_STATE_NEG_RECEIVED = 0x1,
180#ifdef BCM_CNIC
181 BNX2X_DCBX_STATE_ISCSI_STOPPED,
182#endif
183 BNX2X_DCBX_STATE_TX_PAUSED,
184 BNX2X_DCBX_STATE_TX_RELEASED
185};
186
187void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state);
188
189/* DCB netlink */
190#ifdef BCM_DCBNL
191extern const struct dcbnl_rtnl_ops bnx2x_dcbnl_ops;
192int bnx2x_dcbnl_update_applist(struct bnx2x *bp, bool delall);
193#endif /* BCM_DCBNL */
194
195#endif /* BNX2X_DCB_H */
diff --git a/drivers/net/bnx2x/bnx2x_dump.h b/drivers/net/bnx2x/bnx2x_dump.h
index 3bb9a91bb3f7..fb3ff7c4d7ca 100644
--- a/drivers/net/bnx2x/bnx2x_dump.h
+++ b/drivers/net/bnx2x/bnx2x_dump.h
@@ -1,10 +1,16 @@
1/* bnx2x_dump.h: Broadcom Everest network driver. 1/* bnx2x_dump.h: Broadcom Everest network driver.
2 * 2 *
3 * Copyright (c) 2009 Broadcom Corporation 3 * Copyright (c) 2011 Broadcom Corporation
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify 5 * Unless you and Broadcom execute a separate written software license
6 * it under the terms of the GNU General Public License as published by 6 * agreement governing use of this software, this software is licensed to you
7 * the Free Software Foundation. 7 * under the terms of the GNU General Public License version 2, available
8 * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
9 *
10 * Notwithstanding the above, under no circumstances may you combine this
11 * software in any way with any other Broadcom software provided under a
12 * license other than the GPL, without Broadcom's express prior written
13 * consent.
8 */ 14 */
9 15
10 16
@@ -17,43 +23,53 @@
17#define BNX2X_DUMP_H 23#define BNX2X_DUMP_H
18 24
19 25
26
27/*definitions */
28#define XSTORM_WAITP_ADDR 0x2b8a80
29#define TSTORM_WAITP_ADDR 0x1b8a80
30#define USTORM_WAITP_ADDR 0x338a80
31#define CSTORM_WAITP_ADDR 0x238a80
32#define TSTORM_CAM_MODE 0x1B1440
33
34#define MAX_TIMER_PENDING 200
35#define TIMER_SCAN_DONT_CARE 0xFF
36#define RI_E1 0x1
37#define RI_E1H 0x2
38#define RI_E2 0x4
39#define RI_ONLINE 0x100
40#define RI_PATH0_DUMP 0x200
41#define RI_PATH1_DUMP 0x400
42#define RI_E1_OFFLINE (RI_E1)
43#define RI_E1_ONLINE (RI_E1 | RI_ONLINE)
44#define RI_E1H_OFFLINE (RI_E1H)
45#define RI_E1H_ONLINE (RI_E1H | RI_ONLINE)
46#define RI_E2_OFFLINE (RI_E2)
47#define RI_E2_ONLINE (RI_E2 | RI_ONLINE)
48#define RI_E1E1H_OFFLINE (RI_E1 | RI_E1H)
49#define RI_E1E1H_ONLINE (RI_E1 | RI_E1H | RI_ONLINE)
50#define RI_E1HE2_OFFLINE (RI_E2 | RI_E1H)
51#define RI_E1HE2_ONLINE (RI_E2 | RI_E1H | RI_ONLINE)
52#define RI_E1E2_OFFLINE (RI_E2 | RI_E1)
53#define RI_E1E2_ONLINE (RI_E2 | RI_E1 | RI_ONLINE)
54#define RI_ALL_OFFLINE (RI_E1 | RI_E1H | RI_E2)
55#define RI_ALL_ONLINE (RI_E1 | RI_E1H | RI_E2 | RI_ONLINE)
56
20struct dump_sign { 57struct dump_sign {
21 u32 time_stamp; 58 u32 time_stamp;
22 u32 diag_ver; 59 u32 diag_ver;
23 u32 grc_dump_ver; 60 u32 grc_dump_ver;
24}; 61};
25 62
26#define TSTORM_WAITP_ADDR 0x1b8a80
27#define CSTORM_WAITP_ADDR 0x238a80
28#define XSTORM_WAITP_ADDR 0x2b8a80
29#define USTORM_WAITP_ADDR 0x338a80
30#define TSTORM_CAM_MODE 0x1b1440
31
32#define RI_E1 0x1
33#define RI_E1H 0x2
34#define RI_ONLINE 0x100
35
36#define RI_E1_OFFLINE (RI_E1)
37#define RI_E1_ONLINE (RI_E1 | RI_ONLINE)
38#define RI_E1H_OFFLINE (RI_E1H)
39#define RI_E1H_ONLINE (RI_E1H | RI_ONLINE)
40#define RI_ALL_OFFLINE (RI_E1 | RI_E1H)
41#define RI_ALL_ONLINE (RI_E1 | RI_E1H | RI_ONLINE)
42
43#define MAX_TIMER_PENDING 200
44#define TIMER_SCAN_DONT_CARE 0xFF
45
46
47struct dump_hdr { 63struct dump_hdr {
48 u32 hdr_size; /* in dwords, excluding this field */ 64 u32 hdr_size; /* in dwords, excluding this field */
49 struct dump_sign dump_sign; 65 struct dump_sign dump_sign;
50 u32 xstorm_waitp; 66 u32 xstorm_waitp;
51 u32 tstorm_waitp; 67 u32 tstorm_waitp;
52 u32 ustorm_waitp; 68 u32 ustorm_waitp;
53 u32 cstorm_waitp; 69 u32 cstorm_waitp;
54 u16 info; 70 u16 info;
55 u8 idle_chk; 71 u8 idle_chk;
56 u8 reserved; 72 u8 reserved;
57}; 73};
58 74
59struct reg_addr { 75struct reg_addr {
@@ -70,202 +86,185 @@ struct wreg_addr {
70 u16 info; 86 u16 info;
71}; 87};
72 88
73 89#define REGS_COUNT 834
74#define REGS_COUNT 558
75static const struct reg_addr reg_addrs[REGS_COUNT] = { 90static const struct reg_addr reg_addrs[REGS_COUNT] = {
76 { 0x2000, 341, RI_ALL_ONLINE }, { 0x2800, 103, RI_ALL_ONLINE }, 91 { 0x2000, 341, RI_ALL_ONLINE }, { 0x2800, 103, RI_ALL_ONLINE },
77 { 0x3000, 287, RI_ALL_ONLINE }, { 0x3800, 331, RI_ALL_ONLINE }, 92 { 0x3000, 287, RI_ALL_ONLINE }, { 0x3800, 331, RI_ALL_ONLINE },
78 { 0x8800, 6, RI_E1_ONLINE }, { 0xa000, 223, RI_ALL_ONLINE }, 93 { 0x8800, 6, RI_ALL_ONLINE }, { 0x8818, 1, RI_E1HE2_ONLINE },
79 { 0xa388, 1, RI_ALL_ONLINE }, { 0xa398, 1, RI_ALL_ONLINE }, 94 { 0x9000, 164, RI_E2_ONLINE }, { 0x9400, 33, RI_E2_ONLINE },
80 { 0xa39c, 7, RI_E1H_ONLINE }, { 0xa3c0, 3, RI_E1H_ONLINE }, 95 { 0xa000, 27, RI_ALL_ONLINE }, { 0xa06c, 1, RI_E1E1H_ONLINE },
81 { 0xa3d0, 1, RI_E1H_ONLINE }, { 0xa3d8, 1, RI_E1H_ONLINE }, 96 { 0xa070, 71, RI_ALL_ONLINE }, { 0xa18c, 4, RI_E1E1H_ONLINE },
82 { 0xa3e0, 1, RI_E1H_ONLINE }, { 0xa3e8, 1, RI_E1H_ONLINE }, 97 { 0xa19c, 62, RI_ALL_ONLINE }, { 0xa294, 2, RI_E1E1H_ONLINE },
83 { 0xa3f0, 1, RI_E1H_ONLINE }, { 0xa3f8, 1, RI_E1H_ONLINE }, 98 { 0xa29c, 56, RI_ALL_ONLINE }, { 0xa39c, 7, RI_E1HE2_ONLINE },
84 { 0xa400, 69, RI_ALL_ONLINE }, { 0xa518, 1, RI_ALL_ONLINE }, 99 { 0xa3c0, 3, RI_E1HE2_ONLINE }, { 0xa3d0, 1, RI_E1HE2_ONLINE },
85 { 0xa520, 1, RI_ALL_ONLINE }, { 0xa528, 1, RI_ALL_ONLINE }, 100 { 0xa3d8, 1, RI_E1HE2_ONLINE }, { 0xa3e0, 1, RI_E1HE2_ONLINE },
86 { 0xa530, 1, RI_ALL_ONLINE }, { 0xa538, 1, RI_ALL_ONLINE }, 101 { 0xa3e8, 1, RI_E1HE2_ONLINE }, { 0xa3f0, 1, RI_E1HE2_ONLINE },
87 { 0xa540, 1, RI_ALL_ONLINE }, { 0xa548, 1, RI_ALL_ONLINE }, 102 { 0xa3f8, 1, RI_E1HE2_ONLINE }, { 0xa400, 43, RI_ALL_ONLINE },
88 { 0xa550, 1, RI_ALL_ONLINE }, { 0xa558, 1, RI_ALL_ONLINE }, 103 { 0xa4ac, 2, RI_E1E1H_ONLINE }, { 0xa4b4, 1, RI_ALL_ONLINE },
89 { 0xa560, 1, RI_ALL_ONLINE }, { 0xa568, 1, RI_ALL_ONLINE }, 104 { 0xa4b8, 2, RI_E1E1H_ONLINE }, { 0xa4c0, 3, RI_ALL_ONLINE },
90 { 0xa570, 1, RI_ALL_ONLINE }, { 0xa580, 1, RI_ALL_ONLINE }, 105 { 0xa4cc, 5, RI_E1E1H_ONLINE }, { 0xa4e0, 9, RI_ALL_ONLINE },
91 { 0xa590, 1, RI_ALL_ONLINE }, { 0xa5a0, 1, RI_ALL_ONLINE }, 106 { 0xa504, 1, RI_E1E1H_ONLINE }, { 0xa508, 3, RI_ALL_ONLINE },
92 { 0xa5c0, 1, RI_ALL_ONLINE }, { 0xa5e0, 1, RI_E1H_ONLINE }, 107 { 0xa518, 1, RI_ALL_ONLINE }, { 0xa520, 1, RI_ALL_ONLINE },
93 { 0xa5e8, 1, RI_E1H_ONLINE }, { 0xa5f0, 1, RI_E1H_ONLINE }, 108 { 0xa528, 1, RI_ALL_ONLINE }, { 0xa530, 1, RI_ALL_ONLINE },
94 { 0xa5f8, 10, RI_E1H_ONLINE }, { 0x10000, 236, RI_ALL_ONLINE }, 109 { 0xa538, 1, RI_ALL_ONLINE }, { 0xa540, 1, RI_ALL_ONLINE },
95 { 0x103bc, 1, RI_ALL_ONLINE }, { 0x103cc, 1, RI_ALL_ONLINE }, 110 { 0xa548, 1, RI_E1E1H_ONLINE }, { 0xa550, 1, RI_E1E1H_ONLINE },
96 { 0x103dc, 1, RI_ALL_ONLINE }, { 0x10400, 57, RI_ALL_ONLINE }, 111 { 0xa558, 1, RI_E1E1H_ONLINE }, { 0xa560, 1, RI_E1E1H_ONLINE },
97 { 0x104e8, 2, RI_ALL_ONLINE }, { 0x104f4, 2, RI_ALL_ONLINE }, 112 { 0xa568, 1, RI_E1E1H_ONLINE }, { 0xa570, 1, RI_ALL_ONLINE },
98 { 0x10500, 146, RI_ALL_ONLINE }, { 0x10750, 2, RI_ALL_ONLINE }, 113 { 0xa580, 1, RI_ALL_ONLINE }, { 0xa590, 1, RI_ALL_ONLINE },
99 { 0x10760, 2, RI_ALL_ONLINE }, { 0x10770, 2, RI_ALL_ONLINE }, 114 { 0xa5a0, 1, RI_ALL_ONLINE }, { 0xa5c0, 1, RI_ALL_ONLINE },
100 { 0x10780, 2, RI_ALL_ONLINE }, { 0x10790, 2, RI_ALL_ONLINE }, 115 { 0xa5e0, 1, RI_E1HE2_ONLINE }, { 0xa5e8, 1, RI_E1HE2_ONLINE },
101 { 0x107a0, 2, RI_ALL_ONLINE }, { 0x107b0, 2, RI_ALL_ONLINE }, 116 { 0xa5f0, 1, RI_E1HE2_ONLINE }, { 0xa5f8, 10, RI_E1HE2_ONLINE },
102 { 0x107c0, 2, RI_ALL_ONLINE }, { 0x107d0, 2, RI_ALL_ONLINE }, 117 { 0xa620, 111, RI_E2_ONLINE }, { 0xa800, 51, RI_E2_ONLINE },
103 { 0x107e0, 2, RI_ALL_ONLINE }, { 0x10880, 2, RI_ALL_ONLINE }, 118 { 0xa8d4, 4, RI_E2_ONLINE }, { 0xa8e8, 1, RI_E2_ONLINE },
104 { 0x10900, 2, RI_ALL_ONLINE }, { 0x12000, 1, RI_ALL_ONLINE }, 119 { 0xa8f0, 1, RI_E2_ONLINE }, { 0x10000, 236, RI_ALL_ONLINE },
105 { 0x14000, 1, RI_ALL_ONLINE }, { 0x16000, 26, RI_E1H_ONLINE }, 120 { 0x10400, 57, RI_ALL_ONLINE }, { 0x104e8, 2, RI_ALL_ONLINE },
106 { 0x16070, 18, RI_E1H_ONLINE }, { 0x160c0, 27, RI_E1H_ONLINE }, 121 { 0x104f4, 2, RI_ALL_ONLINE }, { 0x10500, 146, RI_ALL_ONLINE },
107 { 0x16140, 1, RI_E1H_ONLINE }, { 0x16160, 1, RI_E1H_ONLINE }, 122 { 0x10750, 2, RI_ALL_ONLINE }, { 0x10760, 2, RI_ALL_ONLINE },
108 { 0x16180, 2, RI_E1H_ONLINE }, { 0x161c0, 2, RI_E1H_ONLINE }, 123 { 0x10770, 2, RI_ALL_ONLINE }, { 0x10780, 2, RI_ALL_ONLINE },
109 { 0x16204, 5, RI_E1H_ONLINE }, { 0x18000, 1, RI_E1H_ONLINE }, 124 { 0x10790, 2, RI_ALL_ONLINE }, { 0x107a0, 2, RI_ALL_ONLINE },
110 { 0x18008, 1, RI_E1H_ONLINE }, { 0x20000, 24, RI_ALL_ONLINE }, 125 { 0x107b0, 2, RI_ALL_ONLINE }, { 0x107c0, 2, RI_ALL_ONLINE },
111 { 0x20060, 8, RI_ALL_ONLINE }, { 0x20080, 138, RI_ALL_ONLINE }, 126 { 0x107d0, 2, RI_ALL_ONLINE }, { 0x107e0, 2, RI_ALL_ONLINE },
112 { 0x202b4, 1, RI_ALL_ONLINE }, { 0x202c4, 1, RI_ALL_ONLINE }, 127 { 0x10880, 2, RI_ALL_ONLINE }, { 0x10900, 2, RI_ALL_ONLINE },
113 { 0x20400, 2, RI_ALL_ONLINE }, { 0x2040c, 8, RI_ALL_ONLINE }, 128 { 0x16000, 26, RI_E1HE2_ONLINE }, { 0x16070, 18, RI_E1HE2_ONLINE },
114 { 0x2042c, 18, RI_E1H_ONLINE }, { 0x20480, 1, RI_ALL_ONLINE }, 129 { 0x160c0, 27, RI_E1HE2_ONLINE }, { 0x16140, 1, RI_E1HE2_ONLINE },
115 { 0x20500, 1, RI_ALL_ONLINE }, { 0x20600, 1, RI_ALL_ONLINE }, 130 { 0x16160, 1, RI_E1HE2_ONLINE }, { 0x16180, 2, RI_E1HE2_ONLINE },
116 { 0x28000, 1, RI_ALL_ONLINE }, { 0x28004, 8191, RI_ALL_OFFLINE }, 131 { 0x161c0, 2, RI_E1HE2_ONLINE }, { 0x16204, 5, RI_E1HE2_ONLINE },
117 { 0x30000, 1, RI_ALL_ONLINE }, { 0x30004, 16383, RI_ALL_OFFLINE }, 132 { 0x18000, 1, RI_E1HE2_ONLINE }, { 0x18008, 1, RI_E1HE2_ONLINE },
118 { 0x40000, 98, RI_ALL_ONLINE }, { 0x40194, 1, RI_ALL_ONLINE }, 133 { 0x18010, 35, RI_E2_ONLINE }, { 0x180a4, 2, RI_E2_ONLINE },
119 { 0x401a4, 1, RI_ALL_ONLINE }, { 0x401a8, 11, RI_E1H_ONLINE }, 134 { 0x180c0, 191, RI_E2_ONLINE }, { 0x18440, 1, RI_E2_ONLINE },
120 { 0x40200, 4, RI_ALL_ONLINE }, { 0x40400, 43, RI_ALL_ONLINE }, 135 { 0x18460, 1, RI_E2_ONLINE }, { 0x18480, 2, RI_E2_ONLINE },
121 { 0x404b8, 1, RI_ALL_ONLINE }, { 0x404c8, 1, RI_ALL_ONLINE }, 136 { 0x184c0, 2, RI_E2_ONLINE }, { 0x18500, 15, RI_E2_ONLINE },
122 { 0x404cc, 3, RI_E1H_ONLINE }, { 0x40500, 2, RI_ALL_ONLINE }, 137 { 0x20000, 24, RI_ALL_ONLINE }, { 0x20060, 8, RI_ALL_ONLINE },
138 { 0x20080, 94, RI_ALL_ONLINE }, { 0x201f8, 1, RI_E1E1H_ONLINE },
139 { 0x201fc, 1, RI_ALL_ONLINE }, { 0x20200, 1, RI_E1E1H_ONLINE },
140 { 0x20204, 1, RI_ALL_ONLINE }, { 0x20208, 1, RI_E1E1H_ONLINE },
141 { 0x2020c, 39, RI_ALL_ONLINE }, { 0x202c8, 1, RI_E2_ONLINE },
142 { 0x202d8, 4, RI_E2_ONLINE }, { 0x20400, 2, RI_ALL_ONLINE },
143 { 0x2040c, 8, RI_ALL_ONLINE }, { 0x2042c, 18, RI_E1HE2_ONLINE },
144 { 0x20480, 1, RI_ALL_ONLINE }, { 0x20500, 1, RI_ALL_ONLINE },
145 { 0x20600, 1, RI_ALL_ONLINE }, { 0x28000, 1, RI_ALL_ONLINE },
146 { 0x28004, 8191, RI_ALL_OFFLINE }, { 0x30000, 1, RI_ALL_ONLINE },
147 { 0x30004, 16383, RI_ALL_OFFLINE }, { 0x40000, 98, RI_ALL_ONLINE },
148 { 0x401a8, 8, RI_E1HE2_ONLINE }, { 0x401c8, 1, RI_E1H_ONLINE },
149 { 0x401cc, 2, RI_E1HE2_ONLINE }, { 0x401d4, 2, RI_E2_ONLINE },
150 { 0x40200, 4, RI_ALL_ONLINE }, { 0x40220, 18, RI_E2_ONLINE },
151 { 0x40400, 43, RI_ALL_ONLINE }, { 0x404cc, 3, RI_E1HE2_ONLINE },
152 { 0x404e0, 1, RI_E2_ONLINE }, { 0x40500, 2, RI_ALL_ONLINE },
123 { 0x40510, 2, RI_ALL_ONLINE }, { 0x40520, 2, RI_ALL_ONLINE }, 153 { 0x40510, 2, RI_ALL_ONLINE }, { 0x40520, 2, RI_ALL_ONLINE },
124 { 0x40530, 2, RI_ALL_ONLINE }, { 0x40540, 2, RI_ALL_ONLINE }, 154 { 0x40530, 2, RI_ALL_ONLINE }, { 0x40540, 2, RI_ALL_ONLINE },
125 { 0x42000, 164, RI_ALL_ONLINE }, { 0x4229c, 1, RI_ALL_ONLINE }, 155 { 0x40550, 10, RI_E2_ONLINE }, { 0x40610, 2, RI_E2_ONLINE },
126 { 0x422ac, 1, RI_ALL_ONLINE }, { 0x422bc, 1, RI_ALL_ONLINE }, 156 { 0x42000, 164, RI_ALL_ONLINE }, { 0x422c0, 4, RI_E2_ONLINE },
127 { 0x422d4, 5, RI_E1H_ONLINE }, { 0x42400, 49, RI_ALL_ONLINE }, 157 { 0x422d4, 5, RI_E1HE2_ONLINE }, { 0x422e8, 1, RI_E2_ONLINE },
128 { 0x424c8, 38, RI_ALL_ONLINE }, { 0x42568, 2, RI_ALL_ONLINE }, 158 { 0x42400, 49, RI_ALL_ONLINE }, { 0x424c8, 38, RI_ALL_ONLINE },
129 { 0x42800, 1, RI_ALL_ONLINE }, { 0x50000, 20, RI_ALL_ONLINE }, 159 { 0x42568, 2, RI_ALL_ONLINE }, { 0x42640, 5, RI_E2_ONLINE },
130 { 0x50050, 8, RI_ALL_ONLINE }, { 0x50070, 88, RI_ALL_ONLINE }, 160 { 0x42800, 1, RI_ALL_ONLINE }, { 0x50000, 1, RI_ALL_ONLINE },
131 { 0x501dc, 1, RI_ALL_ONLINE }, { 0x501ec, 1, RI_ALL_ONLINE }, 161 { 0x50004, 19, RI_ALL_ONLINE }, { 0x50050, 8, RI_ALL_ONLINE },
132 { 0x501f0, 4, RI_E1H_ONLINE }, { 0x50200, 2, RI_ALL_ONLINE }, 162 { 0x50070, 88, RI_ALL_ONLINE }, { 0x501f0, 4, RI_E1HE2_ONLINE },
133 { 0x5020c, 7, RI_ALL_ONLINE }, { 0x50228, 6, RI_E1H_ONLINE }, 163 { 0x50200, 2, RI_ALL_ONLINE }, { 0x5020c, 7, RI_ALL_ONLINE },
134 { 0x50240, 1, RI_ALL_ONLINE }, { 0x50280, 1, RI_ALL_ONLINE }, 164 { 0x50228, 6, RI_E1HE2_ONLINE }, { 0x50240, 1, RI_ALL_ONLINE },
165 { 0x50280, 1, RI_ALL_ONLINE }, { 0x50300, 1, RI_E2_ONLINE },
166 { 0x5030c, 1, RI_E2_ONLINE }, { 0x50318, 1, RI_E2_ONLINE },
167 { 0x5031c, 1, RI_E2_ONLINE }, { 0x50320, 2, RI_E2_ONLINE },
135 { 0x52000, 1, RI_ALL_ONLINE }, { 0x54000, 1, RI_ALL_ONLINE }, 168 { 0x52000, 1, RI_ALL_ONLINE }, { 0x54000, 1, RI_ALL_ONLINE },
136 { 0x54004, 3327, RI_ALL_OFFLINE }, { 0x58000, 1, RI_ALL_ONLINE }, 169 { 0x54004, 3327, RI_ALL_OFFLINE }, { 0x58000, 1, RI_ALL_ONLINE },
137 { 0x58004, 8191, RI_ALL_OFFLINE }, { 0x60000, 71, RI_ALL_ONLINE }, 170 { 0x58004, 8191, RI_E1E1H_OFFLINE }, { 0x60000, 26, RI_ALL_ONLINE },
138 { 0x60128, 1, RI_ALL_ONLINE }, { 0x60138, 1, RI_ALL_ONLINE }, 171 { 0x60068, 8, RI_E1E1H_ONLINE }, { 0x60088, 12, RI_ALL_ONLINE },
139 { 0x6013c, 24, RI_E1H_ONLINE }, { 0x60200, 1, RI_ALL_ONLINE }, 172 { 0x600b8, 9, RI_E1E1H_ONLINE }, { 0x600dc, 1, RI_ALL_ONLINE },
173 { 0x600e0, 5, RI_E1E1H_ONLINE }, { 0x600f4, 1, RI_ALL_ONLINE },
174 { 0x600f8, 1, RI_E1E1H_ONLINE }, { 0x600fc, 8, RI_ALL_ONLINE },
175 { 0x6013c, 24, RI_E1H_ONLINE }, { 0x6019c, 2, RI_E2_ONLINE },
176 { 0x601ac, 18, RI_E2_ONLINE }, { 0x60200, 1, RI_ALL_ONLINE },
177 { 0x60204, 2, RI_ALL_OFFLINE }, { 0x60210, 13, RI_E2_ONLINE },
140 { 0x61000, 1, RI_ALL_ONLINE }, { 0x61004, 511, RI_ALL_OFFLINE }, 178 { 0x61000, 1, RI_ALL_ONLINE }, { 0x61004, 511, RI_ALL_OFFLINE },
141 { 0x70000, 8, RI_ALL_ONLINE }, { 0x70020, 21496, RI_ALL_OFFLINE }, 179 { 0x70000, 8, RI_ALL_ONLINE }, { 0x70020, 8184, RI_ALL_OFFLINE },
142 { 0x85000, 3, RI_ALL_ONLINE }, { 0x8500c, 4, RI_ALL_OFFLINE }, 180 { 0x85000, 3, RI_ALL_ONLINE }, { 0x8501c, 7, RI_ALL_ONLINE },
143 { 0x8501c, 7, RI_ALL_ONLINE }, { 0x85038, 4, RI_ALL_OFFLINE }, 181 { 0x85048, 1, RI_ALL_ONLINE }, { 0x85200, 32, RI_ALL_ONLINE },
144 { 0x85048, 1, RI_ALL_ONLINE }, { 0x8504c, 109, RI_ALL_OFFLINE }, 182 { 0xc1000, 7, RI_ALL_ONLINE }, { 0xc103c, 2, RI_E2_ONLINE },
145 { 0x85200, 32, RI_ALL_ONLINE }, { 0x85280, 11104, RI_ALL_OFFLINE }, 183 { 0xc1800, 2, RI_ALL_ONLINE }, { 0xc2000, 164, RI_ALL_ONLINE },
146 { 0xa0000, 16384, RI_ALL_ONLINE }, { 0xb0000, 16384, RI_E1H_ONLINE }, 184 { 0xc22c0, 5, RI_E2_ONLINE }, { 0xc22d8, 4, RI_E2_ONLINE },
147 { 0xc1000, 7, RI_ALL_ONLINE }, { 0xc1028, 1, RI_ALL_ONLINE },
148 { 0xc1038, 1, RI_ALL_ONLINE }, { 0xc1800, 2, RI_ALL_ONLINE },
149 { 0xc2000, 164, RI_ALL_ONLINE }, { 0xc229c, 1, RI_ALL_ONLINE },
150 { 0xc22ac, 1, RI_ALL_ONLINE }, { 0xc22bc, 1, RI_ALL_ONLINE },
151 { 0xc2400, 49, RI_ALL_ONLINE }, { 0xc24c8, 38, RI_ALL_ONLINE }, 185 { 0xc2400, 49, RI_ALL_ONLINE }, { 0xc24c8, 38, RI_ALL_ONLINE },
152 { 0xc2568, 2, RI_ALL_ONLINE }, { 0xc2600, 1, RI_ALL_ONLINE }, 186 { 0xc2568, 2, RI_ALL_ONLINE }, { 0xc2600, 1, RI_ALL_ONLINE },
153 { 0xc4000, 165, RI_ALL_ONLINE }, { 0xc42a0, 1, RI_ALL_ONLINE }, 187 { 0xc4000, 165, RI_ALL_ONLINE }, { 0xc42d8, 2, RI_E2_ONLINE },
154 { 0xc42b0, 1, RI_ALL_ONLINE }, { 0xc42c0, 1, RI_ALL_ONLINE }, 188 { 0xc42e0, 7, RI_E1HE2_ONLINE }, { 0xc42fc, 1, RI_E2_ONLINE },
155 { 0xc42e0, 7, RI_E1H_ONLINE }, { 0xc4400, 51, RI_ALL_ONLINE }, 189 { 0xc4400, 51, RI_ALL_ONLINE }, { 0xc44d0, 38, RI_ALL_ONLINE },
156 { 0xc44d0, 38, RI_ALL_ONLINE }, { 0xc4570, 2, RI_ALL_ONLINE }, 190 { 0xc4570, 2, RI_ALL_ONLINE }, { 0xc4578, 5, RI_E2_ONLINE },
157 { 0xc4600, 1, RI_ALL_ONLINE }, { 0xd0000, 19, RI_ALL_ONLINE }, 191 { 0xc4600, 1, RI_ALL_ONLINE }, { 0xd0000, 19, RI_ALL_ONLINE },
158 { 0xd004c, 8, RI_ALL_ONLINE }, { 0xd006c, 91, RI_ALL_ONLINE }, 192 { 0xd004c, 8, RI_ALL_ONLINE }, { 0xd006c, 91, RI_ALL_ONLINE },
159 { 0xd01e4, 1, RI_ALL_ONLINE }, { 0xd01f4, 1, RI_ALL_ONLINE }, 193 { 0xd01fc, 1, RI_E2_ONLINE }, { 0xd0200, 2, RI_ALL_ONLINE },
160 { 0xd0200, 2, RI_ALL_ONLINE }, { 0xd020c, 7, RI_ALL_ONLINE }, 194 { 0xd020c, 7, RI_ALL_ONLINE }, { 0xd0228, 18, RI_E1HE2_ONLINE },
161 { 0xd0228, 18, RI_E1H_ONLINE }, { 0xd0280, 1, RI_ALL_ONLINE }, 195 { 0xd0280, 1, RI_ALL_ONLINE }, { 0xd0300, 1, RI_ALL_ONLINE },
162 { 0xd0300, 1, RI_ALL_ONLINE }, { 0xd0400, 1, RI_ALL_ONLINE }, 196 { 0xd0400, 1, RI_ALL_ONLINE }, { 0xd4000, 1, RI_ALL_ONLINE },
163 { 0xd4000, 1, RI_ALL_ONLINE }, { 0xd4004, 2559, RI_ALL_OFFLINE }, 197 { 0xd4004, 2559, RI_ALL_OFFLINE }, { 0xd8000, 1, RI_ALL_ONLINE },
164 { 0xd8000, 1, RI_ALL_ONLINE }, { 0xd8004, 8191, RI_ALL_OFFLINE }, 198 { 0xd8004, 8191, RI_ALL_OFFLINE }, { 0xe0000, 21, RI_ALL_ONLINE },
165 { 0xe0000, 21, RI_ALL_ONLINE }, { 0xe0054, 8, RI_ALL_ONLINE }, 199 { 0xe0054, 8, RI_ALL_ONLINE }, { 0xe0074, 49, RI_ALL_ONLINE },
166 { 0xe0074, 85, RI_ALL_ONLINE }, { 0xe01d4, 1, RI_ALL_ONLINE }, 200 { 0xe0138, 1, RI_E1E1H_ONLINE }, { 0xe013c, 35, RI_ALL_ONLINE },
167 { 0xe01e4, 1, RI_ALL_ONLINE }, { 0xe0200, 2, RI_ALL_ONLINE }, 201 { 0xe01f4, 2, RI_E2_ONLINE }, { 0xe0200, 2, RI_ALL_ONLINE },
168 { 0xe020c, 8, RI_ALL_ONLINE }, { 0xe022c, 18, RI_E1H_ONLINE }, 202 { 0xe020c, 8, RI_ALL_ONLINE }, { 0xe022c, 18, RI_E1HE2_ONLINE },
169 { 0xe0280, 1, RI_ALL_ONLINE }, { 0xe0300, 1, RI_ALL_ONLINE }, 203 { 0xe0280, 1, RI_ALL_ONLINE }, { 0xe0300, 1, RI_ALL_ONLINE },
170 { 0xe1000, 1, RI_ALL_ONLINE }, { 0xe2000, 1, RI_ALL_ONLINE }, 204 { 0xe1000, 1, RI_ALL_ONLINE }, { 0xe2000, 1, RI_ALL_ONLINE },
171 { 0xe2004, 2047, RI_ALL_OFFLINE }, { 0xf0000, 1, RI_ALL_ONLINE }, 205 { 0xe2004, 2047, RI_ALL_OFFLINE }, { 0xf0000, 1, RI_ALL_ONLINE },
172 { 0xf0004, 16383, RI_ALL_OFFLINE }, { 0x101000, 12, RI_ALL_ONLINE }, 206 { 0xf0004, 16383, RI_ALL_OFFLINE }, { 0x101000, 12, RI_ALL_ONLINE },
173 { 0x10103c, 1, RI_ALL_ONLINE }, { 0x10104c, 1, RI_ALL_ONLINE }, 207 { 0x101050, 1, RI_E1HE2_ONLINE }, { 0x101054, 3, RI_E2_ONLINE },
174 { 0x101050, 1, RI_E1H_ONLINE }, { 0x101100, 1, RI_ALL_ONLINE }, 208 { 0x101100, 1, RI_ALL_ONLINE }, { 0x101800, 8, RI_ALL_ONLINE },
175 { 0x101800, 8, RI_ALL_ONLINE }, { 0x102000, 18, RI_ALL_ONLINE }, 209 { 0x102000, 18, RI_ALL_ONLINE }, { 0x102068, 6, RI_E2_ONLINE },
176 { 0x102054, 1, RI_ALL_ONLINE }, { 0x102064, 1, RI_ALL_ONLINE },
177 { 0x102080, 17, RI_ALL_ONLINE }, { 0x1020c8, 8, RI_E1H_ONLINE }, 210 { 0x102080, 17, RI_ALL_ONLINE }, { 0x1020c8, 8, RI_E1H_ONLINE },
178 { 0x102400, 1, RI_ALL_ONLINE }, { 0x103000, 26, RI_ALL_ONLINE }, 211 { 0x1020e8, 9, RI_E2_ONLINE }, { 0x102400, 1, RI_ALL_ONLINE },
179 { 0x103074, 1, RI_ALL_ONLINE }, { 0x103084, 1, RI_ALL_ONLINE }, 212 { 0x103000, 26, RI_ALL_ONLINE }, { 0x103098, 5, RI_E1HE2_ONLINE },
180 { 0x103094, 1, RI_ALL_ONLINE }, { 0x103098, 5, RI_E1H_ONLINE }, 213 { 0x1030ac, 10, RI_E2_ONLINE }, { 0x1030d8, 8, RI_E2_ONLINE },
214 { 0x103400, 1, RI_E2_ONLINE }, { 0x103404, 135, RI_E2_OFFLINE },
181 { 0x103800, 8, RI_ALL_ONLINE }, { 0x104000, 63, RI_ALL_ONLINE }, 215 { 0x103800, 8, RI_ALL_ONLINE }, { 0x104000, 63, RI_ALL_ONLINE },
182 { 0x104108, 1, RI_ALL_ONLINE }, { 0x104118, 1, RI_ALL_ONLINE }, 216 { 0x10411c, 16, RI_E2_ONLINE }, { 0x104200, 17, RI_ALL_ONLINE },
183 { 0x104200, 17, RI_ALL_ONLINE }, { 0x104400, 64, RI_ALL_ONLINE }, 217 { 0x104400, 64, RI_ALL_ONLINE }, { 0x104500, 192, RI_ALL_OFFLINE },
184 { 0x104500, 192, RI_ALL_OFFLINE }, { 0x104800, 64, RI_ALL_ONLINE }, 218 { 0x104800, 64, RI_ALL_ONLINE }, { 0x104900, 192, RI_ALL_OFFLINE },
185 { 0x104900, 192, RI_ALL_OFFLINE }, { 0x105000, 7, RI_ALL_ONLINE }, 219 { 0x105000, 256, RI_ALL_ONLINE }, { 0x105400, 768, RI_ALL_OFFLINE },
186 { 0x10501c, 1, RI_ALL_OFFLINE }, { 0x105020, 3, RI_ALL_ONLINE }, 220 { 0x107000, 7, RI_E2_ONLINE }, { 0x108000, 33, RI_E1E1H_ONLINE },
187 { 0x10502c, 1, RI_ALL_OFFLINE }, { 0x105030, 3, RI_ALL_ONLINE }, 221 { 0x1080ac, 5, RI_E1H_ONLINE }, { 0x108100, 5, RI_E1E1H_ONLINE },
188 { 0x10503c, 1, RI_ALL_OFFLINE }, { 0x105040, 3, RI_ALL_ONLINE }, 222 { 0x108120, 5, RI_E1E1H_ONLINE }, { 0x108200, 74, RI_E1E1H_ONLINE },
189 { 0x10504c, 1, RI_ALL_OFFLINE }, { 0x105050, 3, RI_ALL_ONLINE }, 223 { 0x108400, 74, RI_E1E1H_ONLINE }, { 0x108800, 152, RI_E1E1H_ONLINE },
190 { 0x10505c, 1, RI_ALL_OFFLINE }, { 0x105060, 3, RI_ALL_ONLINE }, 224 { 0x110000, 111, RI_E2_ONLINE }, { 0x110200, 4, RI_E2_ONLINE },
191 { 0x10506c, 1, RI_ALL_OFFLINE }, { 0x105070, 3, RI_ALL_ONLINE }, 225 { 0x120000, 2, RI_ALL_ONLINE }, { 0x120008, 4, RI_ALL_ONLINE },
192 { 0x10507c, 1, RI_ALL_OFFLINE }, { 0x105080, 3, RI_ALL_ONLINE }, 226 { 0x120018, 3, RI_ALL_ONLINE }, { 0x120024, 4, RI_ALL_ONLINE },
193 { 0x10508c, 1, RI_ALL_OFFLINE }, { 0x105090, 3, RI_ALL_ONLINE }, 227 { 0x120034, 3, RI_ALL_ONLINE }, { 0x120040, 4, RI_ALL_ONLINE },
194 { 0x10509c, 1, RI_ALL_OFFLINE }, { 0x1050a0, 3, RI_ALL_ONLINE }, 228 { 0x120050, 3, RI_ALL_ONLINE }, { 0x12005c, 4, RI_ALL_ONLINE },
195 { 0x1050ac, 1, RI_ALL_OFFLINE }, { 0x1050b0, 3, RI_ALL_ONLINE }, 229 { 0x12006c, 3, RI_ALL_ONLINE }, { 0x120078, 4, RI_ALL_ONLINE },
196 { 0x1050bc, 1, RI_ALL_OFFLINE }, { 0x1050c0, 3, RI_ALL_ONLINE }, 230 { 0x120088, 3, RI_ALL_ONLINE }, { 0x120094, 4, RI_ALL_ONLINE },
197 { 0x1050cc, 1, RI_ALL_OFFLINE }, { 0x1050d0, 3, RI_ALL_ONLINE }, 231 { 0x1200a4, 3, RI_ALL_ONLINE }, { 0x1200b0, 4, RI_ALL_ONLINE },
198 { 0x1050dc, 1, RI_ALL_OFFLINE }, { 0x1050e0, 3, RI_ALL_ONLINE }, 232 { 0x1200c0, 3, RI_ALL_ONLINE }, { 0x1200cc, 4, RI_ALL_ONLINE },
199 { 0x1050ec, 1, RI_ALL_OFFLINE }, { 0x1050f0, 3, RI_ALL_ONLINE }, 233 { 0x1200dc, 3, RI_ALL_ONLINE }, { 0x1200e8, 4, RI_ALL_ONLINE },
200 { 0x1050fc, 1, RI_ALL_OFFLINE }, { 0x105100, 3, RI_ALL_ONLINE }, 234 { 0x1200f8, 3, RI_ALL_ONLINE }, { 0x120104, 4, RI_ALL_ONLINE },
201 { 0x10510c, 1, RI_ALL_OFFLINE }, { 0x105110, 3, RI_ALL_ONLINE }, 235 { 0x120114, 1, RI_ALL_ONLINE }, { 0x120118, 22, RI_ALL_ONLINE },
202 { 0x10511c, 1, RI_ALL_OFFLINE }, { 0x105120, 3, RI_ALL_ONLINE }, 236 { 0x120170, 2, RI_E1E1H_ONLINE }, { 0x120178, 243, RI_ALL_ONLINE },
203 { 0x10512c, 1, RI_ALL_OFFLINE }, { 0x105130, 3, RI_ALL_ONLINE }, 237 { 0x120544, 4, RI_E1E1H_ONLINE }, { 0x120554, 7, RI_ALL_ONLINE },
204 { 0x10513c, 1, RI_ALL_OFFLINE }, { 0x105140, 3, RI_ALL_ONLINE }, 238 { 0x12059c, 6, RI_E1HE2_ONLINE }, { 0x1205b4, 1, RI_E1HE2_ONLINE },
205 { 0x10514c, 1, RI_ALL_OFFLINE }, { 0x105150, 3, RI_ALL_ONLINE }, 239 { 0x1205b8, 16, RI_E1HE2_ONLINE }, { 0x1205f8, 4, RI_E2_ONLINE },
206 { 0x10515c, 1, RI_ALL_OFFLINE }, { 0x105160, 3, RI_ALL_ONLINE }, 240 { 0x120618, 1, RI_E2_ONLINE }, { 0x12061c, 20, RI_E1HE2_ONLINE },
207 { 0x10516c, 1, RI_ALL_OFFLINE }, { 0x105170, 3, RI_ALL_ONLINE }, 241 { 0x12066c, 11, RI_E1HE2_ONLINE }, { 0x120698, 5, RI_E2_ONLINE },
208 { 0x10517c, 1, RI_ALL_OFFLINE }, { 0x105180, 3, RI_ALL_ONLINE }, 242 { 0x1206b0, 76, RI_E2_ONLINE }, { 0x1207fc, 1, RI_E2_ONLINE },
209 { 0x10518c, 1, RI_ALL_OFFLINE }, { 0x105190, 3, RI_ALL_ONLINE }, 243 { 0x120808, 66, RI_ALL_ONLINE }, { 0x120910, 7, RI_E2_ONLINE },
210 { 0x10519c, 1, RI_ALL_OFFLINE }, { 0x1051a0, 3, RI_ALL_ONLINE }, 244 { 0x120930, 9, RI_E2_ONLINE }, { 0x120a00, 2, RI_ALL_ONLINE },
211 { 0x1051ac, 1, RI_ALL_OFFLINE }, { 0x1051b0, 3, RI_ALL_ONLINE }, 245 { 0x122000, 2, RI_ALL_ONLINE }, { 0x122008, 2046, RI_E1_OFFLINE },
212 { 0x1051bc, 1, RI_ALL_OFFLINE }, { 0x1051c0, 3, RI_ALL_ONLINE }, 246 { 0x128000, 2, RI_E1HE2_ONLINE }, { 0x128008, 6142, RI_E1HE2_OFFLINE },
213 { 0x1051cc, 1, RI_ALL_OFFLINE }, { 0x1051d0, 3, RI_ALL_ONLINE }, 247 { 0x130000, 35, RI_E2_ONLINE }, { 0x130100, 29, RI_E2_ONLINE },
214 { 0x1051dc, 1, RI_ALL_OFFLINE }, { 0x1051e0, 3, RI_ALL_ONLINE }, 248 { 0x130180, 1, RI_E2_ONLINE }, { 0x130200, 1, RI_E2_ONLINE },
215 { 0x1051ec, 1, RI_ALL_OFFLINE }, { 0x1051f0, 3, RI_ALL_ONLINE }, 249 { 0x130280, 1, RI_E2_ONLINE }, { 0x130300, 5, RI_E2_ONLINE },
216 { 0x1051fc, 1, RI_ALL_OFFLINE }, { 0x105200, 3, RI_ALL_ONLINE }, 250 { 0x130380, 1, RI_E2_ONLINE }, { 0x130400, 1, RI_E2_ONLINE },
217 { 0x10520c, 1, RI_ALL_OFFLINE }, { 0x105210, 3, RI_ALL_ONLINE }, 251 { 0x130480, 5, RI_E2_ONLINE }, { 0x130800, 72, RI_E2_ONLINE },
218 { 0x10521c, 1, RI_ALL_OFFLINE }, { 0x105220, 3, RI_ALL_ONLINE }, 252 { 0x131000, 136, RI_E2_ONLINE }, { 0x132000, 148, RI_E2_ONLINE },
219 { 0x10522c, 1, RI_ALL_OFFLINE }, { 0x105230, 3, RI_ALL_ONLINE }, 253 { 0x134000, 544, RI_E2_ONLINE }, { 0x140000, 64, RI_ALL_ONLINE },
220 { 0x10523c, 1, RI_ALL_OFFLINE }, { 0x105240, 3, RI_ALL_ONLINE }, 254 { 0x140100, 5, RI_E1E1H_ONLINE }, { 0x140114, 45, RI_ALL_ONLINE },
221 { 0x10524c, 1, RI_ALL_OFFLINE }, { 0x105250, 3, RI_ALL_ONLINE }, 255 { 0x140200, 6, RI_ALL_ONLINE }, { 0x140220, 4, RI_E2_ONLINE },
222 { 0x10525c, 1, RI_ALL_OFFLINE }, { 0x105260, 3, RI_ALL_ONLINE }, 256 { 0x140240, 4, RI_E2_ONLINE }, { 0x140260, 4, RI_E2_ONLINE },
223 { 0x10526c, 1, RI_ALL_OFFLINE }, { 0x105270, 3, RI_ALL_ONLINE }, 257 { 0x140280, 4, RI_E2_ONLINE }, { 0x1402a0, 4, RI_E2_ONLINE },
224 { 0x10527c, 1, RI_ALL_OFFLINE }, { 0x105280, 3, RI_ALL_ONLINE }, 258 { 0x1402c0, 4, RI_E2_ONLINE }, { 0x1402e0, 13, RI_E2_ONLINE },
225 { 0x10528c, 1, RI_ALL_OFFLINE }, { 0x105290, 3, RI_ALL_ONLINE }, 259 { 0x144000, 4, RI_E1E1H_ONLINE }, { 0x148000, 4, RI_E1E1H_ONLINE },
226 { 0x10529c, 1, RI_ALL_OFFLINE }, { 0x1052a0, 3, RI_ALL_ONLINE }, 260 { 0x14c000, 4, RI_E1E1H_ONLINE }, { 0x150000, 4, RI_E1E1H_ONLINE },
227 { 0x1052ac, 1, RI_ALL_OFFLINE }, { 0x1052b0, 3, RI_ALL_ONLINE }, 261 { 0x154000, 4, RI_E1E1H_ONLINE }, { 0x158000, 4, RI_E1E1H_ONLINE },
228 { 0x1052bc, 1, RI_ALL_OFFLINE }, { 0x1052c0, 3, RI_ALL_ONLINE }, 262 { 0x15c000, 2, RI_E1HE2_ONLINE }, { 0x15c008, 5, RI_E1H_ONLINE },
229 { 0x1052cc, 1, RI_ALL_OFFLINE }, { 0x1052d0, 3, RI_ALL_ONLINE }, 263 { 0x15c020, 27, RI_E2_ONLINE }, { 0x15c090, 13, RI_E2_ONLINE },
230 { 0x1052dc, 1, RI_ALL_OFFLINE }, { 0x1052e0, 3, RI_ALL_ONLINE }, 264 { 0x15c0c8, 34, RI_E2_ONLINE }, { 0x161000, 7, RI_ALL_ONLINE },
231 { 0x1052ec, 1, RI_ALL_OFFLINE }, { 0x1052f0, 3, RI_ALL_ONLINE }, 265 { 0x16103c, 2, RI_E2_ONLINE }, { 0x161800, 2, RI_ALL_ONLINE },
232 { 0x1052fc, 1, RI_ALL_OFFLINE }, { 0x105300, 3, RI_ALL_ONLINE }, 266 { 0x164000, 60, RI_ALL_ONLINE }, { 0x164110, 2, RI_E1HE2_ONLINE },
233 { 0x10530c, 1, RI_ALL_OFFLINE }, { 0x105310, 3, RI_ALL_ONLINE }, 267 { 0x164118, 15, RI_E2_ONLINE }, { 0x164200, 1, RI_ALL_ONLINE },
234 { 0x10531c, 1, RI_ALL_OFFLINE }, { 0x105320, 3, RI_ALL_ONLINE },
235 { 0x10532c, 1, RI_ALL_OFFLINE }, { 0x105330, 3, RI_ALL_ONLINE },
236 { 0x10533c, 1, RI_ALL_OFFLINE }, { 0x105340, 3, RI_ALL_ONLINE },
237 { 0x10534c, 1, RI_ALL_OFFLINE }, { 0x105350, 3, RI_ALL_ONLINE },
238 { 0x10535c, 1, RI_ALL_OFFLINE }, { 0x105360, 3, RI_ALL_ONLINE },
239 { 0x10536c, 1, RI_ALL_OFFLINE }, { 0x105370, 3, RI_ALL_ONLINE },
240 { 0x10537c, 1, RI_ALL_OFFLINE }, { 0x105380, 3, RI_ALL_ONLINE },
241 { 0x10538c, 1, RI_ALL_OFFLINE }, { 0x105390, 3, RI_ALL_ONLINE },
242 { 0x10539c, 1, RI_ALL_OFFLINE }, { 0x1053a0, 3, RI_ALL_ONLINE },
243 { 0x1053ac, 1, RI_ALL_OFFLINE }, { 0x1053b0, 3, RI_ALL_ONLINE },
244 { 0x1053bc, 1, RI_ALL_OFFLINE }, { 0x1053c0, 3, RI_ALL_ONLINE },
245 { 0x1053cc, 1, RI_ALL_OFFLINE }, { 0x1053d0, 3, RI_ALL_ONLINE },
246 { 0x1053dc, 1, RI_ALL_OFFLINE }, { 0x1053e0, 3, RI_ALL_ONLINE },
247 { 0x1053ec, 1, RI_ALL_OFFLINE }, { 0x1053f0, 3, RI_ALL_ONLINE },
248 { 0x1053fc, 769, RI_ALL_OFFLINE }, { 0x108000, 33, RI_ALL_ONLINE },
249 { 0x108090, 1, RI_ALL_ONLINE }, { 0x1080a0, 1, RI_ALL_ONLINE },
250 { 0x1080ac, 5, RI_E1H_ONLINE }, { 0x108100, 5, RI_ALL_ONLINE },
251 { 0x108120, 5, RI_ALL_ONLINE }, { 0x108200, 74, RI_ALL_ONLINE },
252 { 0x108400, 74, RI_ALL_ONLINE }, { 0x108800, 152, RI_ALL_ONLINE },
253 { 0x109000, 1, RI_ALL_ONLINE }, { 0x120000, 347, RI_ALL_ONLINE },
254 { 0x120578, 1, RI_ALL_ONLINE }, { 0x120588, 1, RI_ALL_ONLINE },
255 { 0x120598, 1, RI_ALL_ONLINE }, { 0x12059c, 23, RI_E1H_ONLINE },
256 { 0x120614, 1, RI_E1H_ONLINE }, { 0x12061c, 30, RI_E1H_ONLINE },
257 { 0x12080c, 65, RI_ALL_ONLINE }, { 0x120a00, 2, RI_ALL_ONLINE },
258 { 0x122000, 2, RI_ALL_ONLINE }, { 0x128000, 2, RI_E1H_ONLINE },
259 { 0x140000, 114, RI_ALL_ONLINE }, { 0x1401d4, 1, RI_ALL_ONLINE },
260 { 0x1401e4, 1, RI_ALL_ONLINE }, { 0x140200, 6, RI_ALL_ONLINE },
261 { 0x144000, 4, RI_ALL_ONLINE }, { 0x148000, 4, RI_ALL_ONLINE },
262 { 0x14c000, 4, RI_ALL_ONLINE }, { 0x150000, 4, RI_ALL_ONLINE },
263 { 0x154000, 4, RI_ALL_ONLINE }, { 0x158000, 4, RI_ALL_ONLINE },
264 { 0x15c000, 7, RI_E1H_ONLINE }, { 0x161000, 7, RI_ALL_ONLINE },
265 { 0x161028, 1, RI_ALL_ONLINE }, { 0x161038, 1, RI_ALL_ONLINE },
266 { 0x161800, 2, RI_ALL_ONLINE }, { 0x164000, 60, RI_ALL_ONLINE },
267 { 0x1640fc, 1, RI_ALL_ONLINE }, { 0x16410c, 1, RI_ALL_ONLINE },
268 { 0x164110, 2, RI_E1H_ONLINE }, { 0x164200, 1, RI_ALL_ONLINE },
269 { 0x164208, 1, RI_ALL_ONLINE }, { 0x164210, 1, RI_ALL_ONLINE }, 268 { 0x164208, 1, RI_ALL_ONLINE }, { 0x164210, 1, RI_ALL_ONLINE },
270 { 0x164218, 1, RI_ALL_ONLINE }, { 0x164220, 1, RI_ALL_ONLINE }, 269 { 0x164218, 1, RI_ALL_ONLINE }, { 0x164220, 1, RI_ALL_ONLINE },
271 { 0x164228, 1, RI_ALL_ONLINE }, { 0x164230, 1, RI_ALL_ONLINE }, 270 { 0x164228, 1, RI_ALL_ONLINE }, { 0x164230, 1, RI_ALL_ONLINE },
@@ -274,169 +273,298 @@ static const struct reg_addr reg_addrs[REGS_COUNT] = {
274 { 0x164258, 1, RI_ALL_ONLINE }, { 0x164260, 1, RI_ALL_ONLINE }, 273 { 0x164258, 1, RI_ALL_ONLINE }, { 0x164260, 1, RI_ALL_ONLINE },
275 { 0x164270, 2, RI_ALL_ONLINE }, { 0x164280, 2, RI_ALL_ONLINE }, 274 { 0x164270, 2, RI_ALL_ONLINE }, { 0x164280, 2, RI_ALL_ONLINE },
276 { 0x164800, 2, RI_ALL_ONLINE }, { 0x165000, 2, RI_ALL_ONLINE }, 275 { 0x164800, 2, RI_ALL_ONLINE }, { 0x165000, 2, RI_ALL_ONLINE },
277 { 0x166000, 164, RI_ALL_ONLINE }, { 0x16629c, 1, RI_ALL_ONLINE }, 276 { 0x166000, 164, RI_ALL_ONLINE }, { 0x1662cc, 7, RI_E2_ONLINE },
278 { 0x1662ac, 1, RI_ALL_ONLINE }, { 0x1662bc, 1, RI_ALL_ONLINE },
279 { 0x166400, 49, RI_ALL_ONLINE }, { 0x1664c8, 38, RI_ALL_ONLINE }, 277 { 0x166400, 49, RI_ALL_ONLINE }, { 0x1664c8, 38, RI_ALL_ONLINE },
280 { 0x166568, 2, RI_ALL_ONLINE }, { 0x166800, 1, RI_ALL_ONLINE }, 278 { 0x166568, 2, RI_ALL_ONLINE }, { 0x166570, 5, RI_E2_ONLINE },
281 { 0x168000, 270, RI_ALL_ONLINE }, { 0x168444, 1, RI_ALL_ONLINE }, 279 { 0x166800, 1, RI_ALL_ONLINE }, { 0x168000, 137, RI_ALL_ONLINE },
282 { 0x168454, 1, RI_ALL_ONLINE }, { 0x168800, 19, RI_ALL_ONLINE }, 280 { 0x168224, 2, RI_E1E1H_ONLINE }, { 0x16822c, 29, RI_ALL_ONLINE },
283 { 0x168900, 1, RI_ALL_ONLINE }, { 0x168a00, 128, RI_ALL_ONLINE }, 281 { 0x1682a0, 12, RI_E1E1H_ONLINE }, { 0x1682d0, 12, RI_ALL_ONLINE },
284 { 0x16a000, 1, RI_ALL_ONLINE }, { 0x16a004, 1535, RI_ALL_OFFLINE }, 282 { 0x168300, 2, RI_E1E1H_ONLINE }, { 0x168308, 68, RI_ALL_ONLINE },
285 { 0x16c000, 1, RI_ALL_ONLINE }, { 0x16c004, 1535, RI_ALL_OFFLINE }, 283 { 0x168418, 2, RI_E1E1H_ONLINE }, { 0x168420, 6, RI_ALL_ONLINE },
286 { 0x16e000, 16, RI_E1H_ONLINE }, { 0x16e100, 1, RI_E1H_ONLINE }, 284 { 0x168800, 19, RI_ALL_ONLINE }, { 0x168900, 1, RI_ALL_ONLINE },
287 { 0x16e200, 2, RI_E1H_ONLINE }, { 0x16e400, 183, RI_E1H_ONLINE }, 285 { 0x168a00, 128, RI_ALL_ONLINE }, { 0x16a000, 1, RI_ALL_ONLINE },
288 { 0x170000, 93, RI_ALL_ONLINE }, { 0x170180, 1, RI_ALL_ONLINE }, 286 { 0x16a004, 1535, RI_ALL_OFFLINE }, { 0x16c000, 1, RI_ALL_ONLINE },
289 { 0x170190, 1, RI_ALL_ONLINE }, { 0x170200, 4, RI_ALL_ONLINE }, 287 { 0x16c004, 1535, RI_ALL_OFFLINE }, { 0x16e000, 16, RI_E1H_ONLINE },
290 { 0x170214, 1, RI_ALL_ONLINE }, { 0x178000, 1, RI_ALL_ONLINE }, 288 { 0x16e040, 8, RI_E2_ONLINE }, { 0x16e100, 1, RI_E1H_ONLINE },
291 { 0x180000, 61, RI_ALL_ONLINE }, { 0x180100, 1, RI_ALL_ONLINE }, 289 { 0x16e200, 2, RI_E1H_ONLINE }, { 0x16e400, 161, RI_E1H_ONLINE },
292 { 0x180110, 1, RI_ALL_ONLINE }, { 0x180120, 1, RI_ALL_ONLINE }, 290 { 0x16e684, 2, RI_E1HE2_ONLINE }, { 0x16e68c, 12, RI_E1H_ONLINE },
293 { 0x180130, 1, RI_ALL_ONLINE }, { 0x18013c, 2, RI_E1H_ONLINE }, 291 { 0x16e6bc, 4, RI_E1HE2_ONLINE }, { 0x16e6cc, 4, RI_E1H_ONLINE },
294 { 0x180200, 58, RI_ALL_ONLINE }, { 0x180340, 4, RI_ALL_ONLINE }, 292 { 0x16e6e0, 12, RI_E2_ONLINE }, { 0x16e768, 17, RI_E2_ONLINE },
295 { 0x180400, 1, RI_ALL_ONLINE }, { 0x180404, 255, RI_ALL_OFFLINE }, 293 { 0x170000, 24, RI_ALL_ONLINE }, { 0x170060, 4, RI_E1E1H_ONLINE },
294 { 0x170070, 65, RI_ALL_ONLINE }, { 0x170194, 11, RI_E2_ONLINE },
295 { 0x1701c4, 1, RI_E2_ONLINE }, { 0x1701cc, 7, RI_E2_ONLINE },
296 { 0x1701ec, 1, RI_E2_ONLINE }, { 0x1701f4, 1, RI_E2_ONLINE },
297 { 0x170200, 4, RI_ALL_ONLINE }, { 0x170214, 1, RI_ALL_ONLINE },
298 { 0x170218, 77, RI_E2_ONLINE }, { 0x170400, 64, RI_E2_ONLINE },
299 { 0x178000, 1, RI_ALL_ONLINE }, { 0x180000, 61, RI_ALL_ONLINE },
300 { 0x18013c, 2, RI_E1HE2_ONLINE }, { 0x180200, 58, RI_ALL_ONLINE },
301 { 0x180340, 4, RI_ALL_ONLINE }, { 0x180380, 1, RI_E2_ONLINE },
302 { 0x180388, 1, RI_E2_ONLINE }, { 0x180390, 1, RI_E2_ONLINE },
303 { 0x180398, 1, RI_E2_ONLINE }, { 0x1803a0, 5, RI_E2_ONLINE },
304 { 0x180400, 1, RI_ALL_ONLINE }, { 0x180404, 255, RI_E1E1H_OFFLINE },
296 { 0x181000, 4, RI_ALL_ONLINE }, { 0x181010, 1020, RI_ALL_OFFLINE }, 305 { 0x181000, 4, RI_ALL_ONLINE }, { 0x181010, 1020, RI_ALL_OFFLINE },
297 { 0x1a0000, 1, RI_ALL_ONLINE }, { 0x1a0004, 1023, RI_ALL_OFFLINE }, 306 { 0x1a0000, 1, RI_ALL_ONLINE }, { 0x1a0004, 5631, RI_ALL_OFFLINE },
298 { 0x1a1000, 1, RI_ALL_ONLINE }, { 0x1a1004, 4607, RI_ALL_OFFLINE }, 307 { 0x1a5800, 2560, RI_E1HE2_OFFLINE }, { 0x1a8000, 1, RI_ALL_ONLINE },
299 { 0x1a5800, 2560, RI_E1H_OFFLINE }, { 0x1a8000, 64, RI_ALL_OFFLINE }, 308 { 0x1a8004, 8191, RI_E1HE2_OFFLINE }, { 0x1b0000, 1, RI_ALL_ONLINE },
300 { 0x1a8100, 1984, RI_E1H_OFFLINE }, { 0x1aa000, 1, RI_E1H_ONLINE }, 309 { 0x1b0004, 15, RI_E1H_OFFLINE }, { 0x1b0040, 1, RI_E1HE2_ONLINE },
301 { 0x1aa004, 6655, RI_E1H_OFFLINE }, { 0x1b1800, 128, RI_ALL_OFFLINE }, 310 { 0x1b0044, 239, RI_E1H_OFFLINE }, { 0x1b0400, 1, RI_ALL_ONLINE },
302 { 0x1b1c00, 128, RI_ALL_OFFLINE }, { 0x1b2000, 1, RI_ALL_OFFLINE }, 311 { 0x1b0404, 255, RI_E1H_OFFLINE }, { 0x1b0800, 1, RI_ALL_ONLINE },
303 { 0x1b2400, 64, RI_E1H_OFFLINE }, { 0x1b8200, 1, RI_ALL_ONLINE }, 312 { 0x1b0840, 1, RI_E1HE2_ONLINE }, { 0x1b0c00, 1, RI_ALL_ONLINE },
313 { 0x1b1000, 1, RI_ALL_ONLINE }, { 0x1b1040, 1, RI_E1HE2_ONLINE },
314 { 0x1b1400, 1, RI_ALL_ONLINE }, { 0x1b1440, 1, RI_E1HE2_ONLINE },
315 { 0x1b1480, 1, RI_E1HE2_ONLINE }, { 0x1b14c0, 1, RI_E1HE2_ONLINE },
316 { 0x1b1800, 128, RI_ALL_OFFLINE }, { 0x1b1c00, 128, RI_ALL_OFFLINE },
317 { 0x1b2000, 1, RI_ALL_ONLINE }, { 0x1b2400, 1, RI_E1HE2_ONLINE },
318 { 0x1b2404, 5631, RI_E2_OFFLINE }, { 0x1b8000, 1, RI_ALL_ONLINE },
319 { 0x1b8040, 1, RI_ALL_ONLINE }, { 0x1b8080, 1, RI_ALL_ONLINE },
320 { 0x1b80c0, 1, RI_ALL_ONLINE }, { 0x1b8100, 1, RI_ALL_ONLINE },
321 { 0x1b8140, 1, RI_ALL_ONLINE }, { 0x1b8180, 1, RI_ALL_ONLINE },
322 { 0x1b81c0, 1, RI_ALL_ONLINE }, { 0x1b8200, 1, RI_ALL_ONLINE },
304 { 0x1b8240, 1, RI_ALL_ONLINE }, { 0x1b8280, 1, RI_ALL_ONLINE }, 323 { 0x1b8240, 1, RI_ALL_ONLINE }, { 0x1b8280, 1, RI_ALL_ONLINE },
305 { 0x1b82c0, 1, RI_ALL_ONLINE }, { 0x1b8a00, 1, RI_ALL_ONLINE }, 324 { 0x1b82c0, 1, RI_ALL_ONLINE }, { 0x1b8300, 1, RI_ALL_ONLINE },
306 { 0x1b8a80, 1, RI_ALL_ONLINE }, { 0x1c0000, 2, RI_ALL_ONLINE }, 325 { 0x1b8340, 1, RI_ALL_ONLINE }, { 0x1b8380, 1, RI_ALL_ONLINE },
307 { 0x200000, 65, RI_ALL_ONLINE }, { 0x200110, 1, RI_ALL_ONLINE }, 326 { 0x1b83c0, 1, RI_ALL_ONLINE }, { 0x1b8400, 1, RI_ALL_ONLINE },
308 { 0x200120, 1, RI_ALL_ONLINE }, { 0x200130, 1, RI_ALL_ONLINE }, 327 { 0x1b8440, 1, RI_ALL_ONLINE }, { 0x1b8480, 1, RI_ALL_ONLINE },
309 { 0x200140, 1, RI_ALL_ONLINE }, { 0x20014c, 2, RI_E1H_ONLINE }, 328 { 0x1b84c0, 1, RI_ALL_ONLINE }, { 0x1b8500, 1, RI_ALL_ONLINE },
310 { 0x200200, 58, RI_ALL_ONLINE }, { 0x200340, 4, RI_ALL_ONLINE }, 329 { 0x1b8540, 1, RI_ALL_ONLINE }, { 0x1b8580, 1, RI_ALL_ONLINE },
311 { 0x200400, 1, RI_ALL_ONLINE }, { 0x200404, 255, RI_ALL_OFFLINE }, 330 { 0x1b85c0, 19, RI_E2_ONLINE }, { 0x1b8800, 1, RI_ALL_ONLINE },
312 { 0x202000, 4, RI_ALL_ONLINE }, { 0x202010, 2044, RI_ALL_OFFLINE }, 331 { 0x1b8840, 1, RI_ALL_ONLINE }, { 0x1b8880, 1, RI_ALL_ONLINE },
313 { 0x220000, 1, RI_ALL_ONLINE }, { 0x220004, 1023, RI_ALL_OFFLINE }, 332 { 0x1b88c0, 1, RI_ALL_ONLINE }, { 0x1b8900, 1, RI_ALL_ONLINE },
314 { 0x221000, 1, RI_ALL_ONLINE }, { 0x221004, 4607, RI_ALL_OFFLINE }, 333 { 0x1b8940, 1, RI_ALL_ONLINE }, { 0x1b8980, 1, RI_ALL_ONLINE },
315 { 0x225800, 1536, RI_E1H_OFFLINE }, { 0x227000, 1, RI_E1H_ONLINE }, 334 { 0x1b89c0, 1, RI_ALL_ONLINE }, { 0x1b8a00, 1, RI_ALL_ONLINE },
316 { 0x227004, 1023, RI_E1H_OFFLINE }, { 0x228000, 64, RI_ALL_OFFLINE }, 335 { 0x1b8a40, 1, RI_ALL_ONLINE }, { 0x1b8a80, 1, RI_ALL_ONLINE },
317 { 0x228100, 8640, RI_E1H_OFFLINE }, { 0x231800, 128, RI_ALL_OFFLINE }, 336 { 0x1b8ac0, 1, RI_ALL_ONLINE }, { 0x1b8b00, 1, RI_ALL_ONLINE },
318 { 0x231c00, 128, RI_ALL_OFFLINE }, { 0x232000, 1, RI_ALL_OFFLINE }, 337 { 0x1b8b40, 1, RI_ALL_ONLINE }, { 0x1b8b80, 1, RI_ALL_ONLINE },
319 { 0x232400, 64, RI_E1H_OFFLINE }, { 0x238200, 1, RI_ALL_ONLINE }, 338 { 0x1b8bc0, 1, RI_ALL_ONLINE }, { 0x1b8c00, 1, RI_ALL_ONLINE },
320 { 0x238240, 1, RI_ALL_ONLINE }, { 0x238280, 1, RI_ALL_ONLINE }, 339 { 0x1b8c40, 1, RI_ALL_ONLINE }, { 0x1b8c80, 1, RI_ALL_ONLINE },
321 { 0x2382c0, 1, RI_ALL_ONLINE }, { 0x238a00, 1, RI_ALL_ONLINE }, 340 { 0x1b8cc0, 1, RI_ALL_ONLINE }, { 0x1b8cc4, 1, RI_E2_ONLINE },
322 { 0x238a80, 1, RI_ALL_ONLINE }, { 0x240000, 2, RI_ALL_ONLINE }, 341 { 0x1b8d00, 1, RI_ALL_ONLINE }, { 0x1b8d40, 1, RI_ALL_ONLINE },
323 { 0x280000, 65, RI_ALL_ONLINE }, { 0x280110, 1, RI_ALL_ONLINE }, 342 { 0x1b8d80, 1, RI_ALL_ONLINE }, { 0x1b8dc0, 1, RI_ALL_ONLINE },
324 { 0x280120, 1, RI_ALL_ONLINE }, { 0x280130, 1, RI_ALL_ONLINE }, 343 { 0x1b8e00, 1, RI_ALL_ONLINE }, { 0x1b8e40, 1, RI_ALL_ONLINE },
325 { 0x280140, 1, RI_ALL_ONLINE }, { 0x28014c, 2, RI_E1H_ONLINE }, 344 { 0x1b8e80, 1, RI_ALL_ONLINE }, { 0x1b8e84, 1, RI_E2_ONLINE },
326 { 0x280200, 58, RI_ALL_ONLINE }, { 0x280340, 4, RI_ALL_ONLINE }, 345 { 0x1b8ec0, 1, RI_E1HE2_ONLINE }, { 0x1b8f00, 1, RI_E1HE2_ONLINE },
327 { 0x280400, 1, RI_ALL_ONLINE }, { 0x280404, 255, RI_ALL_OFFLINE }, 346 { 0x1b8f40, 1, RI_E1HE2_ONLINE }, { 0x1b8f80, 1, RI_E1HE2_ONLINE },
328 { 0x282000, 4, RI_ALL_ONLINE }, { 0x282010, 2044, RI_ALL_OFFLINE }, 347 { 0x1b8fc0, 1, RI_E1HE2_ONLINE }, { 0x1b8fc4, 2, RI_E2_ONLINE },
329 { 0x2a0000, 1, RI_ALL_ONLINE }, { 0x2a0004, 1023, RI_ALL_OFFLINE }, 348 { 0x1b8fd0, 6, RI_E2_ONLINE }, { 0x1b9000, 1, RI_E2_ONLINE },
330 { 0x2a1000, 1, RI_ALL_ONLINE }, { 0x2a1004, 4607, RI_ALL_OFFLINE }, 349 { 0x1b9040, 3, RI_E2_ONLINE }, { 0x1b9400, 14, RI_E2_ONLINE },
331 { 0x2a5800, 2560, RI_E1H_OFFLINE }, { 0x2a8000, 64, RI_ALL_OFFLINE }, 350 { 0x1b943c, 19, RI_E2_ONLINE }, { 0x1b9490, 10, RI_E2_ONLINE },
332 { 0x2a8100, 960, RI_E1H_OFFLINE }, { 0x2a9000, 1, RI_E1H_ONLINE }, 351 { 0x1c0000, 2, RI_ALL_ONLINE }, { 0x200000, 65, RI_ALL_ONLINE },
333 { 0x2a9004, 7679, RI_E1H_OFFLINE }, { 0x2b1800, 128, RI_ALL_OFFLINE }, 352 { 0x20014c, 2, RI_E1HE2_ONLINE }, { 0x200200, 58, RI_ALL_ONLINE },
334 { 0x2b1c00, 128, RI_ALL_OFFLINE }, { 0x2b2000, 1, RI_ALL_OFFLINE }, 353 { 0x200340, 4, RI_ALL_ONLINE }, { 0x200380, 1, RI_E2_ONLINE },
335 { 0x2b2400, 64, RI_E1H_OFFLINE }, { 0x2b8200, 1, RI_ALL_ONLINE }, 354 { 0x200388, 1, RI_E2_ONLINE }, { 0x200390, 1, RI_E2_ONLINE },
336 { 0x2b8240, 1, RI_ALL_ONLINE }, { 0x2b8280, 1, RI_ALL_ONLINE }, 355 { 0x200398, 1, RI_E2_ONLINE }, { 0x2003a0, 1, RI_E2_ONLINE },
337 { 0x2b82c0, 1, RI_ALL_ONLINE }, { 0x2b8a00, 1, RI_ALL_ONLINE }, 356 { 0x2003a8, 2, RI_E2_ONLINE }, { 0x200400, 1, RI_ALL_ONLINE },
338 { 0x2b8a80, 1, RI_ALL_ONLINE }, { 0x2c0000, 2, RI_ALL_ONLINE }, 357 { 0x200404, 255, RI_E1E1H_OFFLINE }, { 0x202000, 4, RI_ALL_ONLINE },
339 { 0x300000, 65, RI_ALL_ONLINE }, { 0x300110, 1, RI_ALL_ONLINE }, 358 { 0x202010, 2044, RI_ALL_OFFLINE }, { 0x220000, 1, RI_ALL_ONLINE },
340 { 0x300120, 1, RI_ALL_ONLINE }, { 0x300130, 1, RI_ALL_ONLINE }, 359 { 0x220004, 5631, RI_ALL_OFFLINE }, { 0x225800, 2560, RI_E1HE2_OFFLINE},
341 { 0x300140, 1, RI_ALL_ONLINE }, { 0x30014c, 2, RI_E1H_ONLINE }, 360 { 0x228000, 1, RI_ALL_ONLINE }, { 0x228004, 8191, RI_E1HE2_OFFLINE },
361 { 0x230000, 1, RI_ALL_ONLINE }, { 0x230004, 15, RI_E1H_OFFLINE },
362 { 0x230040, 1, RI_E1HE2_ONLINE }, { 0x230044, 239, RI_E1H_OFFLINE },
363 { 0x230400, 1, RI_ALL_ONLINE }, { 0x230404, 255, RI_E1H_OFFLINE },
364 { 0x230800, 1, RI_ALL_ONLINE }, { 0x230840, 1, RI_E1HE2_ONLINE },
365 { 0x230c00, 1, RI_ALL_ONLINE }, { 0x231000, 1, RI_ALL_ONLINE },
366 { 0x231040, 1, RI_E1HE2_ONLINE }, { 0x231400, 1, RI_ALL_ONLINE },
367 { 0x231440, 1, RI_E1HE2_ONLINE }, { 0x231480, 1, RI_E1HE2_ONLINE },
368 { 0x2314c0, 1, RI_E1HE2_ONLINE }, { 0x231800, 128, RI_ALL_OFFLINE },
369 { 0x231c00, 128, RI_ALL_OFFLINE }, { 0x232000, 1, RI_ALL_ONLINE },
370 { 0x232400, 1, RI_E1HE2_ONLINE }, { 0x232404, 5631, RI_E2_OFFLINE },
371 { 0x238000, 1, RI_ALL_ONLINE }, { 0x238040, 1, RI_ALL_ONLINE },
372 { 0x238080, 1, RI_ALL_ONLINE }, { 0x2380c0, 1, RI_ALL_ONLINE },
373 { 0x238100, 1, RI_ALL_ONLINE }, { 0x238140, 1, RI_ALL_ONLINE },
374 { 0x238180, 1, RI_ALL_ONLINE }, { 0x2381c0, 1, RI_ALL_ONLINE },
375 { 0x238200, 1, RI_ALL_ONLINE }, { 0x238240, 1, RI_ALL_ONLINE },
376 { 0x238280, 1, RI_ALL_ONLINE }, { 0x2382c0, 1, RI_ALL_ONLINE },
377 { 0x238300, 1, RI_ALL_ONLINE }, { 0x238340, 1, RI_ALL_ONLINE },
378 { 0x238380, 1, RI_ALL_ONLINE }, { 0x2383c0, 1, RI_ALL_ONLINE },
379 { 0x238400, 1, RI_ALL_ONLINE }, { 0x238440, 1, RI_ALL_ONLINE },
380 { 0x238480, 1, RI_ALL_ONLINE }, { 0x2384c0, 1, RI_ALL_ONLINE },
381 { 0x238500, 1, RI_ALL_ONLINE }, { 0x238540, 1, RI_ALL_ONLINE },
382 { 0x238580, 1, RI_ALL_ONLINE }, { 0x2385c0, 19, RI_E2_ONLINE },
383 { 0x238800, 1, RI_ALL_ONLINE }, { 0x238840, 1, RI_ALL_ONLINE },
384 { 0x238880, 1, RI_ALL_ONLINE }, { 0x2388c0, 1, RI_ALL_ONLINE },
385 { 0x238900, 1, RI_ALL_ONLINE }, { 0x238940, 1, RI_ALL_ONLINE },
386 { 0x238980, 1, RI_ALL_ONLINE }, { 0x2389c0, 1, RI_ALL_ONLINE },
387 { 0x238a00, 1, RI_ALL_ONLINE }, { 0x238a40, 1, RI_ALL_ONLINE },
388 { 0x238a80, 1, RI_ALL_ONLINE }, { 0x238ac0, 1, RI_ALL_ONLINE },
389 { 0x238b00, 1, RI_ALL_ONLINE }, { 0x238b40, 1, RI_ALL_ONLINE },
390 { 0x238b80, 1, RI_ALL_ONLINE }, { 0x238bc0, 1, RI_ALL_ONLINE },
391 { 0x238c00, 1, RI_ALL_ONLINE }, { 0x238c40, 1, RI_ALL_ONLINE },
392 { 0x238c80, 1, RI_ALL_ONLINE }, { 0x238cc0, 1, RI_ALL_ONLINE },
393 { 0x238cc4, 1, RI_E2_ONLINE }, { 0x238d00, 1, RI_ALL_ONLINE },
394 { 0x238d40, 1, RI_ALL_ONLINE }, { 0x238d80, 1, RI_ALL_ONLINE },
395 { 0x238dc0, 1, RI_ALL_ONLINE }, { 0x238e00, 1, RI_ALL_ONLINE },
396 { 0x238e40, 1, RI_ALL_ONLINE }, { 0x238e80, 1, RI_ALL_ONLINE },
397 { 0x238e84, 1, RI_E2_ONLINE }, { 0x238ec0, 1, RI_E1HE2_ONLINE },
398 { 0x238f00, 1, RI_E1HE2_ONLINE }, { 0x238f40, 1, RI_E1HE2_ONLINE },
399 { 0x238f80, 1, RI_E1HE2_ONLINE }, { 0x238fc0, 1, RI_E1HE2_ONLINE },
400 { 0x238fc4, 2, RI_E2_ONLINE }, { 0x238fd0, 6, RI_E2_ONLINE },
401 { 0x239000, 1, RI_E2_ONLINE }, { 0x239040, 3, RI_E2_ONLINE },
402 { 0x240000, 2, RI_ALL_ONLINE }, { 0x280000, 65, RI_ALL_ONLINE },
403 { 0x28014c, 2, RI_E1HE2_ONLINE }, { 0x280200, 58, RI_ALL_ONLINE },
404 { 0x280340, 4, RI_ALL_ONLINE }, { 0x280380, 1, RI_E2_ONLINE },
405 { 0x280388, 1, RI_E2_ONLINE }, { 0x280390, 1, RI_E2_ONLINE },
406 { 0x280398, 1, RI_E2_ONLINE }, { 0x2803a0, 1, RI_E2_ONLINE },
407 { 0x2803a8, 2, RI_E2_ONLINE }, { 0x280400, 1, RI_ALL_ONLINE },
408 { 0x280404, 255, RI_E1E1H_OFFLINE }, { 0x282000, 4, RI_ALL_ONLINE },
409 { 0x282010, 2044, RI_ALL_OFFLINE }, { 0x2a0000, 1, RI_ALL_ONLINE },
410 { 0x2a0004, 5631, RI_ALL_OFFLINE }, { 0x2a5800, 2560, RI_E1HE2_OFFLINE},
411 { 0x2a8000, 1, RI_ALL_ONLINE }, { 0x2a8004, 8191, RI_E1HE2_OFFLINE },
412 { 0x2b0000, 1, RI_ALL_ONLINE }, { 0x2b0004, 15, RI_E1H_OFFLINE },
413 { 0x2b0040, 1, RI_E1HE2_ONLINE }, { 0x2b0044, 239, RI_E1H_OFFLINE },
414 { 0x2b0400, 1, RI_ALL_ONLINE }, { 0x2b0404, 255, RI_E1H_OFFLINE },
415 { 0x2b0800, 1, RI_ALL_ONLINE }, { 0x2b0840, 1, RI_E1HE2_ONLINE },
416 { 0x2b0c00, 1, RI_ALL_ONLINE }, { 0x2b1000, 1, RI_ALL_ONLINE },
417 { 0x2b1040, 1, RI_E1HE2_ONLINE }, { 0x2b1400, 1, RI_ALL_ONLINE },
418 { 0x2b1440, 1, RI_E1HE2_ONLINE }, { 0x2b1480, 1, RI_E1HE2_ONLINE },
419 { 0x2b14c0, 1, RI_E1HE2_ONLINE }, { 0x2b1800, 128, RI_ALL_OFFLINE },
420 { 0x2b1c00, 128, RI_ALL_OFFLINE }, { 0x2b2000, 1, RI_ALL_ONLINE },
421 { 0x2b2400, 1, RI_E1HE2_ONLINE }, { 0x2b2404, 5631, RI_E2_OFFLINE },
422 { 0x2b8000, 1, RI_ALL_ONLINE }, { 0x2b8040, 1, RI_ALL_ONLINE },
423 { 0x2b8080, 1, RI_ALL_ONLINE }, { 0x2b80c0, 1, RI_ALL_ONLINE },
424 { 0x2b8100, 1, RI_ALL_ONLINE }, { 0x2b8140, 1, RI_ALL_ONLINE },
425 { 0x2b8180, 1, RI_ALL_ONLINE }, { 0x2b81c0, 1, RI_ALL_ONLINE },
426 { 0x2b8200, 1, RI_ALL_ONLINE }, { 0x2b8240, 1, RI_ALL_ONLINE },
427 { 0x2b8280, 1, RI_ALL_ONLINE }, { 0x2b82c0, 1, RI_ALL_ONLINE },
428 { 0x2b8300, 1, RI_ALL_ONLINE }, { 0x2b8340, 1, RI_ALL_ONLINE },
429 { 0x2b8380, 1, RI_ALL_ONLINE }, { 0x2b83c0, 1, RI_ALL_ONLINE },
430 { 0x2b8400, 1, RI_ALL_ONLINE }, { 0x2b8440, 1, RI_ALL_ONLINE },
431 { 0x2b8480, 1, RI_ALL_ONLINE }, { 0x2b84c0, 1, RI_ALL_ONLINE },
432 { 0x2b8500, 1, RI_ALL_ONLINE }, { 0x2b8540, 1, RI_ALL_ONLINE },
433 { 0x2b8580, 1, RI_ALL_ONLINE }, { 0x2b85c0, 19, RI_E2_ONLINE },
434 { 0x2b8800, 1, RI_ALL_ONLINE }, { 0x2b8840, 1, RI_ALL_ONLINE },
435 { 0x2b8880, 1, RI_ALL_ONLINE }, { 0x2b88c0, 1, RI_ALL_ONLINE },
436 { 0x2b8900, 1, RI_ALL_ONLINE }, { 0x2b8940, 1, RI_ALL_ONLINE },
437 { 0x2b8980, 1, RI_ALL_ONLINE }, { 0x2b89c0, 1, RI_ALL_ONLINE },
438 { 0x2b8a00, 1, RI_ALL_ONLINE }, { 0x2b8a40, 1, RI_ALL_ONLINE },
439 { 0x2b8a80, 1, RI_ALL_ONLINE }, { 0x2b8ac0, 1, RI_ALL_ONLINE },
440 { 0x2b8b00, 1, RI_ALL_ONLINE }, { 0x2b8b40, 1, RI_ALL_ONLINE },
441 { 0x2b8b80, 1, RI_ALL_ONLINE }, { 0x2b8bc0, 1, RI_ALL_ONLINE },
442 { 0x2b8c00, 1, RI_ALL_ONLINE }, { 0x2b8c40, 1, RI_ALL_ONLINE },
443 { 0x2b8c80, 1, RI_ALL_ONLINE }, { 0x2b8cc0, 1, RI_ALL_ONLINE },
444 { 0x2b8cc4, 1, RI_E2_ONLINE }, { 0x2b8d00, 1, RI_ALL_ONLINE },
445 { 0x2b8d40, 1, RI_ALL_ONLINE }, { 0x2b8d80, 1, RI_ALL_ONLINE },
446 { 0x2b8dc0, 1, RI_ALL_ONLINE }, { 0x2b8e00, 1, RI_ALL_ONLINE },
447 { 0x2b8e40, 1, RI_ALL_ONLINE }, { 0x2b8e80, 1, RI_ALL_ONLINE },
448 { 0x2b8e84, 1, RI_E2_ONLINE }, { 0x2b8ec0, 1, RI_E1HE2_ONLINE },
449 { 0x2b8f00, 1, RI_E1HE2_ONLINE }, { 0x2b8f40, 1, RI_E1HE2_ONLINE },
450 { 0x2b8f80, 1, RI_E1HE2_ONLINE }, { 0x2b8fc0, 1, RI_E1HE2_ONLINE },
451 { 0x2b8fc4, 2, RI_E2_ONLINE }, { 0x2b8fd0, 6, RI_E2_ONLINE },
452 { 0x2b9000, 1, RI_E2_ONLINE }, { 0x2b9040, 3, RI_E2_ONLINE },
453 { 0x2b9400, 14, RI_E2_ONLINE }, { 0x2b943c, 19, RI_E2_ONLINE },
454 { 0x2b9490, 10, RI_E2_ONLINE }, { 0x2c0000, 2, RI_ALL_ONLINE },
455 { 0x300000, 65, RI_ALL_ONLINE }, { 0x30014c, 2, RI_E1HE2_ONLINE },
342 { 0x300200, 58, RI_ALL_ONLINE }, { 0x300340, 4, RI_ALL_ONLINE }, 456 { 0x300200, 58, RI_ALL_ONLINE }, { 0x300340, 4, RI_ALL_ONLINE },
343 { 0x300400, 1, RI_ALL_ONLINE }, { 0x300404, 255, RI_ALL_OFFLINE }, 457 { 0x300380, 1, RI_E2_ONLINE }, { 0x300388, 1, RI_E2_ONLINE },
458 { 0x300390, 1, RI_E2_ONLINE }, { 0x300398, 1, RI_E2_ONLINE },
459 { 0x3003a0, 1, RI_E2_ONLINE }, { 0x3003a8, 2, RI_E2_ONLINE },
460 { 0x300400, 1, RI_ALL_ONLINE }, { 0x300404, 255, RI_E1E1H_OFFLINE },
344 { 0x302000, 4, RI_ALL_ONLINE }, { 0x302010, 2044, RI_ALL_OFFLINE }, 461 { 0x302000, 4, RI_ALL_ONLINE }, { 0x302010, 2044, RI_ALL_OFFLINE },
345 { 0x320000, 1, RI_ALL_ONLINE }, { 0x320004, 1023, RI_ALL_OFFLINE }, 462 { 0x320000, 1, RI_ALL_ONLINE }, { 0x320004, 5631, RI_ALL_OFFLINE },
346 { 0x321000, 1, RI_ALL_ONLINE }, { 0x321004, 4607, RI_ALL_OFFLINE }, 463 { 0x325800, 2560, RI_E1HE2_OFFLINE }, { 0x328000, 1, RI_ALL_ONLINE },
347 { 0x325800, 2560, RI_E1H_OFFLINE }, { 0x328000, 64, RI_ALL_OFFLINE }, 464 { 0x328004, 8191, RI_E1HE2_OFFLINE }, { 0x330000, 1, RI_ALL_ONLINE },
348 { 0x328100, 536, RI_E1H_OFFLINE }, { 0x328960, 1, RI_E1H_ONLINE }, 465 { 0x330004, 15, RI_E1H_OFFLINE }, { 0x330040, 1, RI_E1HE2_ONLINE },
349 { 0x328964, 8103, RI_E1H_OFFLINE }, { 0x331800, 128, RI_ALL_OFFLINE }, 466 { 0x330044, 239, RI_E1H_OFFLINE }, { 0x330400, 1, RI_ALL_ONLINE },
350 { 0x331c00, 128, RI_ALL_OFFLINE }, { 0x332000, 1, RI_ALL_OFFLINE }, 467 { 0x330404, 255, RI_E1H_OFFLINE }, { 0x330800, 1, RI_ALL_ONLINE },
351 { 0x332400, 64, RI_E1H_OFFLINE }, { 0x338200, 1, RI_ALL_ONLINE }, 468 { 0x330840, 1, RI_E1HE2_ONLINE }, { 0x330c00, 1, RI_ALL_ONLINE },
469 { 0x331000, 1, RI_ALL_ONLINE }, { 0x331040, 1, RI_E1HE2_ONLINE },
470 { 0x331400, 1, RI_ALL_ONLINE }, { 0x331440, 1, RI_E1HE2_ONLINE },
471 { 0x331480, 1, RI_E1HE2_ONLINE }, { 0x3314c0, 1, RI_E1HE2_ONLINE },
472 { 0x331800, 128, RI_ALL_OFFLINE }, { 0x331c00, 128, RI_ALL_OFFLINE },
473 { 0x332000, 1, RI_ALL_ONLINE }, { 0x332400, 1, RI_E1HE2_ONLINE },
474 { 0x332404, 5631, RI_E2_OFFLINE }, { 0x338000, 1, RI_ALL_ONLINE },
475 { 0x338040, 1, RI_ALL_ONLINE }, { 0x338080, 1, RI_ALL_ONLINE },
476 { 0x3380c0, 1, RI_ALL_ONLINE }, { 0x338100, 1, RI_ALL_ONLINE },
477 { 0x338140, 1, RI_ALL_ONLINE }, { 0x338180, 1, RI_ALL_ONLINE },
478 { 0x3381c0, 1, RI_ALL_ONLINE }, { 0x338200, 1, RI_ALL_ONLINE },
352 { 0x338240, 1, RI_ALL_ONLINE }, { 0x338280, 1, RI_ALL_ONLINE }, 479 { 0x338240, 1, RI_ALL_ONLINE }, { 0x338280, 1, RI_ALL_ONLINE },
353 { 0x3382c0, 1, RI_ALL_ONLINE }, { 0x338a00, 1, RI_ALL_ONLINE }, 480 { 0x3382c0, 1, RI_ALL_ONLINE }, { 0x338300, 1, RI_ALL_ONLINE },
354 { 0x338a80, 1, RI_ALL_ONLINE }, { 0x340000, 2, RI_ALL_ONLINE } 481 { 0x338340, 1, RI_ALL_ONLINE }, { 0x338380, 1, RI_ALL_ONLINE },
482 { 0x3383c0, 1, RI_ALL_ONLINE }, { 0x338400, 1, RI_ALL_ONLINE },
483 { 0x338440, 1, RI_ALL_ONLINE }, { 0x338480, 1, RI_ALL_ONLINE },
484 { 0x3384c0, 1, RI_ALL_ONLINE }, { 0x338500, 1, RI_ALL_ONLINE },
485 { 0x338540, 1, RI_ALL_ONLINE }, { 0x338580, 1, RI_ALL_ONLINE },
486 { 0x3385c0, 19, RI_E2_ONLINE }, { 0x338800, 1, RI_ALL_ONLINE },
487 { 0x338840, 1, RI_ALL_ONLINE }, { 0x338880, 1, RI_ALL_ONLINE },
488 { 0x3388c0, 1, RI_ALL_ONLINE }, { 0x338900, 1, RI_ALL_ONLINE },
489 { 0x338940, 1, RI_ALL_ONLINE }, { 0x338980, 1, RI_ALL_ONLINE },
490 { 0x3389c0, 1, RI_ALL_ONLINE }, { 0x338a00, 1, RI_ALL_ONLINE },
491 { 0x338a40, 1, RI_ALL_ONLINE }, { 0x338a80, 1, RI_ALL_ONLINE },
492 { 0x338ac0, 1, RI_ALL_ONLINE }, { 0x338b00, 1, RI_ALL_ONLINE },
493 { 0x338b40, 1, RI_ALL_ONLINE }, { 0x338b80, 1, RI_ALL_ONLINE },
494 { 0x338bc0, 1, RI_ALL_ONLINE }, { 0x338c00, 1, RI_ALL_ONLINE },
495 { 0x338c40, 1, RI_ALL_ONLINE }, { 0x338c80, 1, RI_ALL_ONLINE },
496 { 0x338cc0, 1, RI_ALL_ONLINE }, { 0x338cc4, 1, RI_E2_ONLINE },
497 { 0x338d00, 1, RI_ALL_ONLINE }, { 0x338d40, 1, RI_ALL_ONLINE },
498 { 0x338d80, 1, RI_ALL_ONLINE }, { 0x338dc0, 1, RI_ALL_ONLINE },
499 { 0x338e00, 1, RI_ALL_ONLINE }, { 0x338e40, 1, RI_ALL_ONLINE },
500 { 0x338e80, 1, RI_ALL_ONLINE }, { 0x338e84, 1, RI_E2_ONLINE },
501 { 0x338ec0, 1, RI_E1HE2_ONLINE }, { 0x338f00, 1, RI_E1HE2_ONLINE },
502 { 0x338f40, 1, RI_E1HE2_ONLINE }, { 0x338f80, 1, RI_E1HE2_ONLINE },
503 { 0x338fc0, 1, RI_E1HE2_ONLINE }, { 0x338fc4, 2, RI_E2_ONLINE },
504 { 0x338fd0, 6, RI_E2_ONLINE }, { 0x339000, 1, RI_E2_ONLINE },
505 { 0x339040, 3, RI_E2_ONLINE }, { 0x340000, 2, RI_ALL_ONLINE },
355}; 506};
356 507
357 508#define IDLE_REGS_COUNT 237
358#define IDLE_REGS_COUNT 277
359static const struct reg_addr idle_addrs[IDLE_REGS_COUNT] = { 509static const struct reg_addr idle_addrs[IDLE_REGS_COUNT] = {
360 { 0x2114, 1, RI_ALL_ONLINE }, { 0x2120, 1, RI_ALL_ONLINE }, 510 { 0x2104, 1, RI_ALL_ONLINE }, { 0x2110, 2, RI_ALL_ONLINE },
361 { 0x212c, 4, RI_ALL_ONLINE }, { 0x2814, 1, RI_ALL_ONLINE }, 511 { 0x211c, 8, RI_ALL_ONLINE }, { 0x2814, 1, RI_ALL_ONLINE },
362 { 0x281c, 2, RI_ALL_ONLINE }, { 0xa38c, 1, RI_ALL_ONLINE }, 512 { 0x281c, 2, RI_ALL_ONLINE }, { 0x2854, 1, RI_ALL_ONLINE },
513 { 0x285c, 1, RI_ALL_ONLINE }, { 0x9010, 7, RI_E2_ONLINE },
514 { 0x9030, 1, RI_E2_ONLINE }, { 0x9068, 16, RI_E2_ONLINE },
515 { 0x9230, 2, RI_E2_ONLINE }, { 0x9244, 1, RI_E2_ONLINE },
516 { 0x9298, 1, RI_E2_ONLINE }, { 0x92a8, 1, RI_E2_ONLINE },
517 { 0xa38c, 1, RI_ALL_ONLINE }, { 0xa3c4, 1, RI_E1HE2_ONLINE },
363 { 0xa408, 1, RI_ALL_ONLINE }, { 0xa42c, 12, RI_ALL_ONLINE }, 518 { 0xa408, 1, RI_ALL_ONLINE }, { 0xa42c, 12, RI_ALL_ONLINE },
364 { 0xa600, 5, RI_E1H_ONLINE }, { 0xa618, 1, RI_E1H_ONLINE }, 519 { 0xa600, 5, RI_E1HE2_ONLINE }, { 0xa618, 1, RI_E1HE2_ONLINE },
365 { 0xc09c, 1, RI_ALL_ONLINE }, { 0x103b0, 1, RI_ALL_ONLINE }, 520 { 0xa714, 1, RI_E2_ONLINE }, { 0xa720, 1, RI_E2_ONLINE },
366 { 0x103c0, 1, RI_ALL_ONLINE }, { 0x103d0, 1, RI_E1H_ONLINE }, 521 { 0xa750, 1, RI_E2_ONLINE }, { 0xc09c, 1, RI_E1E1H_ONLINE },
367 { 0x2021c, 11, RI_ALL_ONLINE }, { 0x202a8, 1, RI_ALL_ONLINE }, 522 { 0x103b0, 1, RI_ALL_ONLINE }, { 0x103c0, 1, RI_ALL_ONLINE },
368 { 0x202b8, 1, RI_ALL_ONLINE }, { 0x20404, 1, RI_ALL_ONLINE }, 523 { 0x103d0, 1, RI_E1H_ONLINE }, { 0x183bc, 1, RI_E2_ONLINE },
369 { 0x2040c, 2, RI_ALL_ONLINE }, { 0x2041c, 2, RI_ALL_ONLINE }, 524 { 0x183cc, 1, RI_E2_ONLINE }, { 0x2021c, 11, RI_ALL_ONLINE },
370 { 0x40154, 14, RI_ALL_ONLINE }, { 0x40198, 1, RI_ALL_ONLINE }, 525 { 0x202a8, 1, RI_ALL_ONLINE }, { 0x202b8, 1, RI_ALL_ONLINE },
371 { 0x404ac, 1, RI_ALL_ONLINE }, { 0x404bc, 1, RI_ALL_ONLINE }, 526 { 0x20404, 1, RI_ALL_ONLINE }, { 0x2040c, 2, RI_ALL_ONLINE },
372 { 0x42290, 1, RI_ALL_ONLINE }, { 0x422a0, 1, RI_ALL_ONLINE }, 527 { 0x2041c, 2, RI_ALL_ONLINE }, { 0x40154, 14, RI_ALL_ONLINE },
373 { 0x422b0, 1, RI_ALL_ONLINE }, { 0x42548, 1, RI_ALL_ONLINE }, 528 { 0x40198, 1, RI_ALL_ONLINE }, { 0x404ac, 1, RI_ALL_ONLINE },
374 { 0x42550, 1, RI_ALL_ONLINE }, { 0x42558, 1, RI_ALL_ONLINE }, 529 { 0x404bc, 1, RI_ALL_ONLINE }, { 0x42290, 1, RI_ALL_ONLINE },
375 { 0x50160, 8, RI_ALL_ONLINE }, { 0x501d0, 1, RI_ALL_ONLINE }, 530 { 0x422a0, 1, RI_ALL_ONLINE }, { 0x422b0, 1, RI_ALL_ONLINE },
376 { 0x501e0, 1, RI_ALL_ONLINE }, { 0x50204, 1, RI_ALL_ONLINE }, 531 { 0x42548, 1, RI_ALL_ONLINE }, { 0x42550, 1, RI_ALL_ONLINE },
377 { 0x5020c, 2, RI_ALL_ONLINE }, { 0x5021c, 1, RI_ALL_ONLINE }, 532 { 0x42558, 1, RI_ALL_ONLINE }, { 0x50160, 8, RI_ALL_ONLINE },
378 { 0x60090, 1, RI_ALL_ONLINE }, { 0x6011c, 1, RI_ALL_ONLINE }, 533 { 0x501d0, 1, RI_ALL_ONLINE }, { 0x501e0, 1, RI_ALL_ONLINE },
379 { 0x6012c, 1, RI_ALL_ONLINE }, { 0xc101c, 1, RI_ALL_ONLINE }, 534 { 0x50204, 1, RI_ALL_ONLINE }, { 0x5020c, 2, RI_ALL_ONLINE },
380 { 0xc102c, 1, RI_ALL_ONLINE }, { 0xc2290, 1, RI_ALL_ONLINE }, 535 { 0x5021c, 1, RI_ALL_ONLINE }, { 0x60090, 1, RI_ALL_ONLINE },
381 { 0xc22a0, 1, RI_ALL_ONLINE }, { 0xc22b0, 1, RI_ALL_ONLINE }, 536 { 0x6011c, 1, RI_ALL_ONLINE }, { 0x6012c, 1, RI_ALL_ONLINE },
382 { 0xc2548, 1, RI_ALL_ONLINE }, { 0xc2550, 1, RI_ALL_ONLINE }, 537 { 0xc101c, 1, RI_ALL_ONLINE }, { 0xc102c, 1, RI_ALL_ONLINE },
383 { 0xc2558, 1, RI_ALL_ONLINE }, { 0xc4294, 1, RI_ALL_ONLINE }, 538 { 0xc2290, 1, RI_ALL_ONLINE }, { 0xc22a0, 1, RI_ALL_ONLINE },
384 { 0xc42a4, 1, RI_ALL_ONLINE }, { 0xc42b4, 1, RI_ALL_ONLINE }, 539 { 0xc22b0, 1, RI_ALL_ONLINE }, { 0xc2548, 1, RI_ALL_ONLINE },
385 { 0xc4550, 1, RI_ALL_ONLINE }, { 0xc4558, 1, RI_ALL_ONLINE }, 540 { 0xc2550, 1, RI_ALL_ONLINE }, { 0xc2558, 1, RI_ALL_ONLINE },
386 { 0xc4560, 1, RI_ALL_ONLINE }, { 0xd016c, 8, RI_ALL_ONLINE }, 541 { 0xc4294, 1, RI_ALL_ONLINE }, { 0xc42a4, 1, RI_ALL_ONLINE },
387 { 0xd01d8, 1, RI_ALL_ONLINE }, { 0xd01e8, 1, RI_ALL_ONLINE }, 542 { 0xc42b4, 1, RI_ALL_ONLINE }, { 0xc4550, 1, RI_ALL_ONLINE },
388 { 0xd0204, 1, RI_ALL_ONLINE }, { 0xd020c, 3, RI_ALL_ONLINE }, 543 { 0xc4558, 1, RI_ALL_ONLINE }, { 0xc4560, 1, RI_ALL_ONLINE },
389 { 0xe0154, 8, RI_ALL_ONLINE }, { 0xe01c8, 1, RI_ALL_ONLINE }, 544 { 0xd016c, 8, RI_ALL_ONLINE }, { 0xd01d8, 1, RI_ALL_ONLINE },
390 { 0xe01d8, 1, RI_ALL_ONLINE }, { 0xe0204, 1, RI_ALL_ONLINE }, 545 { 0xd01e8, 1, RI_ALL_ONLINE }, { 0xd0204, 1, RI_ALL_ONLINE },
391 { 0xe020c, 2, RI_ALL_ONLINE }, { 0xe021c, 2, RI_ALL_ONLINE }, 546 { 0xd020c, 3, RI_ALL_ONLINE }, { 0xe0154, 8, RI_ALL_ONLINE },
392 { 0x101014, 1, RI_ALL_ONLINE }, { 0x101030, 1, RI_ALL_ONLINE }, 547 { 0xe01c8, 1, RI_ALL_ONLINE }, { 0xe01d8, 1, RI_ALL_ONLINE },
393 { 0x101040, 1, RI_ALL_ONLINE }, { 0x102058, 1, RI_ALL_ONLINE }, 548 { 0xe0204, 1, RI_ALL_ONLINE }, { 0xe020c, 2, RI_ALL_ONLINE },
394 { 0x102080, 16, RI_ALL_ONLINE }, { 0x103004, 2, RI_ALL_ONLINE }, 549 { 0xe021c, 2, RI_ALL_ONLINE }, { 0x101014, 1, RI_ALL_ONLINE },
395 { 0x103068, 1, RI_ALL_ONLINE }, { 0x103078, 1, RI_ALL_ONLINE }, 550 { 0x101030, 1, RI_ALL_ONLINE }, { 0x101040, 1, RI_ALL_ONLINE },
396 { 0x103088, 1, RI_ALL_ONLINE }, { 0x10309c, 2, RI_E1H_ONLINE }, 551 { 0x102058, 1, RI_ALL_ONLINE }, { 0x102080, 16, RI_ALL_ONLINE },
552 { 0x103004, 2, RI_ALL_ONLINE }, { 0x103068, 1, RI_ALL_ONLINE },
553 { 0x103078, 1, RI_ALL_ONLINE }, { 0x103088, 1, RI_ALL_ONLINE },
554 { 0x10309c, 2, RI_E1HE2_ONLINE }, { 0x1030b8, 2, RI_E2_ONLINE },
555 { 0x1030cc, 1, RI_E2_ONLINE }, { 0x1030e0, 1, RI_E2_ONLINE },
397 { 0x104004, 1, RI_ALL_ONLINE }, { 0x104018, 1, RI_ALL_ONLINE }, 556 { 0x104004, 1, RI_ALL_ONLINE }, { 0x104018, 1, RI_ALL_ONLINE },
398 { 0x104020, 1, RI_ALL_ONLINE }, { 0x10403c, 1, RI_ALL_ONLINE }, 557 { 0x104020, 1, RI_ALL_ONLINE }, { 0x10403c, 1, RI_ALL_ONLINE },
399 { 0x1040fc, 1, RI_ALL_ONLINE }, { 0x10410c, 1, RI_ALL_ONLINE }, 558 { 0x1040fc, 1, RI_ALL_ONLINE }, { 0x10410c, 1, RI_ALL_ONLINE },
400 { 0x104400, 64, RI_ALL_ONLINE }, { 0x104800, 64, RI_ALL_ONLINE }, 559 { 0x104400, 64, RI_ALL_ONLINE }, { 0x104800, 64, RI_ALL_ONLINE },
401 { 0x105000, 3, RI_ALL_ONLINE }, { 0x105010, 3, RI_ALL_ONLINE }, 560 { 0x105000, 256, RI_ALL_ONLINE }, { 0x108094, 1, RI_E1E1H_ONLINE },
402 { 0x105020, 3, RI_ALL_ONLINE }, { 0x105030, 3, RI_ALL_ONLINE }, 561 { 0x1201b0, 2, RI_ALL_ONLINE }, { 0x12032c, 1, RI_ALL_ONLINE },
403 { 0x105040, 3, RI_ALL_ONLINE }, { 0x105050, 3, RI_ALL_ONLINE }, 562 { 0x12036c, 3, RI_ALL_ONLINE }, { 0x120408, 2, RI_ALL_ONLINE },
404 { 0x105060, 3, RI_ALL_ONLINE }, { 0x105070, 3, RI_ALL_ONLINE }, 563 { 0x120414, 15, RI_ALL_ONLINE }, { 0x120478, 2, RI_ALL_ONLINE },
405 { 0x105080, 3, RI_ALL_ONLINE }, { 0x105090, 3, RI_ALL_ONLINE }, 564 { 0x12052c, 1, RI_ALL_ONLINE }, { 0x120564, 3, RI_ALL_ONLINE },
406 { 0x1050a0, 3, RI_ALL_ONLINE }, { 0x1050b0, 3, RI_ALL_ONLINE }, 565 { 0x12057c, 1, RI_ALL_ONLINE }, { 0x12058c, 1, RI_ALL_ONLINE },
407 { 0x1050c0, 3, RI_ALL_ONLINE }, { 0x1050d0, 3, RI_ALL_ONLINE }, 566 { 0x120608, 1, RI_E1HE2_ONLINE }, { 0x120738, 1, RI_E2_ONLINE },
408 { 0x1050e0, 3, RI_ALL_ONLINE }, { 0x1050f0, 3, RI_ALL_ONLINE }, 567 { 0x120778, 2, RI_E2_ONLINE }, { 0x120808, 3, RI_ALL_ONLINE },
409 { 0x105100, 3, RI_ALL_ONLINE }, { 0x105110, 3, RI_ALL_ONLINE },
410 { 0x105120, 3, RI_ALL_ONLINE }, { 0x105130, 3, RI_ALL_ONLINE },
411 { 0x105140, 3, RI_ALL_ONLINE }, { 0x105150, 3, RI_ALL_ONLINE },
412 { 0x105160, 3, RI_ALL_ONLINE }, { 0x105170, 3, RI_ALL_ONLINE },
413 { 0x105180, 3, RI_ALL_ONLINE }, { 0x105190, 3, RI_ALL_ONLINE },
414 { 0x1051a0, 3, RI_ALL_ONLINE }, { 0x1051b0, 3, RI_ALL_ONLINE },
415 { 0x1051c0, 3, RI_ALL_ONLINE }, { 0x1051d0, 3, RI_ALL_ONLINE },
416 { 0x1051e0, 3, RI_ALL_ONLINE }, { 0x1051f0, 3, RI_ALL_ONLINE },
417 { 0x105200, 3, RI_ALL_ONLINE }, { 0x105210, 3, RI_ALL_ONLINE },
418 { 0x105220, 3, RI_ALL_ONLINE }, { 0x105230, 3, RI_ALL_ONLINE },
419 { 0x105240, 3, RI_ALL_ONLINE }, { 0x105250, 3, RI_ALL_ONLINE },
420 { 0x105260, 3, RI_ALL_ONLINE }, { 0x105270, 3, RI_ALL_ONLINE },
421 { 0x105280, 3, RI_ALL_ONLINE }, { 0x105290, 3, RI_ALL_ONLINE },
422 { 0x1052a0, 3, RI_ALL_ONLINE }, { 0x1052b0, 3, RI_ALL_ONLINE },
423 { 0x1052c0, 3, RI_ALL_ONLINE }, { 0x1052d0, 3, RI_ALL_ONLINE },
424 { 0x1052e0, 3, RI_ALL_ONLINE }, { 0x1052f0, 3, RI_ALL_ONLINE },
425 { 0x105300, 3, RI_ALL_ONLINE }, { 0x105310, 3, RI_ALL_ONLINE },
426 { 0x105320, 3, RI_ALL_ONLINE }, { 0x105330, 3, RI_ALL_ONLINE },
427 { 0x105340, 3, RI_ALL_ONLINE }, { 0x105350, 3, RI_ALL_ONLINE },
428 { 0x105360, 3, RI_ALL_ONLINE }, { 0x105370, 3, RI_ALL_ONLINE },
429 { 0x105380, 3, RI_ALL_ONLINE }, { 0x105390, 3, RI_ALL_ONLINE },
430 { 0x1053a0, 3, RI_ALL_ONLINE }, { 0x1053b0, 3, RI_ALL_ONLINE },
431 { 0x1053c0, 3, RI_ALL_ONLINE }, { 0x1053d0, 3, RI_ALL_ONLINE },
432 { 0x1053e0, 3, RI_ALL_ONLINE }, { 0x1053f0, 3, RI_ALL_ONLINE },
433 { 0x108094, 1, RI_ALL_ONLINE }, { 0x1201b0, 2, RI_ALL_ONLINE },
434 { 0x12032c, 1, RI_ALL_ONLINE }, { 0x12036c, 3, RI_ALL_ONLINE },
435 { 0x120408, 2, RI_ALL_ONLINE }, { 0x120414, 15, RI_ALL_ONLINE },
436 { 0x120478, 2, RI_ALL_ONLINE }, { 0x12052c, 1, RI_ALL_ONLINE },
437 { 0x120564, 3, RI_ALL_ONLINE }, { 0x12057c, 1, RI_ALL_ONLINE },
438 { 0x12058c, 1, RI_ALL_ONLINE }, { 0x120608, 1, RI_E1H_ONLINE },
439 { 0x120808, 1, RI_E1_ONLINE }, { 0x12080c, 2, RI_ALL_ONLINE },
440 { 0x120818, 1, RI_ALL_ONLINE }, { 0x120820, 1, RI_ALL_ONLINE }, 568 { 0x120818, 1, RI_ALL_ONLINE }, { 0x120820, 1, RI_ALL_ONLINE },
441 { 0x120828, 1, RI_ALL_ONLINE }, { 0x120830, 1, RI_ALL_ONLINE }, 569 { 0x120828, 1, RI_ALL_ONLINE }, { 0x120830, 1, RI_ALL_ONLINE },
442 { 0x120838, 1, RI_ALL_ONLINE }, { 0x120840, 1, RI_ALL_ONLINE }, 570 { 0x120838, 1, RI_ALL_ONLINE }, { 0x120840, 1, RI_ALL_ONLINE },
@@ -452,48 +580,50 @@ static const struct reg_addr idle_addrs[IDLE_REGS_COUNT] = {
452 { 0x1208d8, 1, RI_ALL_ONLINE }, { 0x1208e0, 1, RI_ALL_ONLINE }, 580 { 0x1208d8, 1, RI_ALL_ONLINE }, { 0x1208e0, 1, RI_ALL_ONLINE },
453 { 0x1208e8, 1, RI_ALL_ONLINE }, { 0x1208f0, 1, RI_ALL_ONLINE }, 581 { 0x1208e8, 1, RI_ALL_ONLINE }, { 0x1208f0, 1, RI_ALL_ONLINE },
454 { 0x1208f8, 1, RI_ALL_ONLINE }, { 0x120900, 1, RI_ALL_ONLINE }, 582 { 0x1208f8, 1, RI_ALL_ONLINE }, { 0x120900, 1, RI_ALL_ONLINE },
455 { 0x120908, 1, RI_ALL_ONLINE }, { 0x14005c, 2, RI_ALL_ONLINE }, 583 { 0x120908, 1, RI_ALL_ONLINE }, { 0x120940, 5, RI_E2_ONLINE },
456 { 0x1400d0, 2, RI_ALL_ONLINE }, { 0x1400e0, 1, RI_ALL_ONLINE }, 584 { 0x130030, 1, RI_E2_ONLINE }, { 0x13004c, 3, RI_E2_ONLINE },
457 { 0x1401c8, 1, RI_ALL_ONLINE }, { 0x140200, 6, RI_ALL_ONLINE }, 585 { 0x130064, 2, RI_E2_ONLINE }, { 0x13009c, 1, RI_E2_ONLINE },
458 { 0x16101c, 1, RI_ALL_ONLINE }, { 0x16102c, 1, RI_ALL_ONLINE }, 586 { 0x130130, 1, RI_E2_ONLINE }, { 0x13016c, 1, RI_E2_ONLINE },
459 { 0x164014, 2, RI_ALL_ONLINE }, { 0x1640f0, 1, RI_ALL_ONLINE }, 587 { 0x130300, 1, RI_E2_ONLINE }, { 0x130480, 1, RI_E2_ONLINE },
460 { 0x166290, 1, RI_ALL_ONLINE }, { 0x1662a0, 1, RI_ALL_ONLINE }, 588 { 0x14005c, 2, RI_ALL_ONLINE }, { 0x1400d0, 2, RI_ALL_ONLINE },
461 { 0x1662b0, 1, RI_ALL_ONLINE }, { 0x166548, 1, RI_ALL_ONLINE }, 589 { 0x1400e0, 1, RI_ALL_ONLINE }, { 0x1401c8, 1, RI_ALL_ONLINE },
462 { 0x166550, 1, RI_ALL_ONLINE }, { 0x166558, 1, RI_ALL_ONLINE }, 590 { 0x140200, 6, RI_ALL_ONLINE }, { 0x16101c, 1, RI_ALL_ONLINE },
463 { 0x168000, 1, RI_ALL_ONLINE }, { 0x168008, 1, RI_ALL_ONLINE }, 591 { 0x16102c, 1, RI_ALL_ONLINE }, { 0x164014, 2, RI_ALL_ONLINE },
464 { 0x168010, 1, RI_ALL_ONLINE }, { 0x168018, 1, RI_ALL_ONLINE }, 592 { 0x1640f0, 1, RI_ALL_ONLINE }, { 0x166290, 1, RI_ALL_ONLINE },
465 { 0x168028, 2, RI_ALL_ONLINE }, { 0x168058, 4, RI_ALL_ONLINE }, 593 { 0x1662a0, 1, RI_ALL_ONLINE }, { 0x1662b0, 1, RI_ALL_ONLINE },
466 { 0x168070, 1, RI_ALL_ONLINE }, { 0x168238, 1, RI_ALL_ONLINE }, 594 { 0x166548, 1, RI_ALL_ONLINE }, { 0x166550, 1, RI_ALL_ONLINE },
467 { 0x1682d0, 2, RI_ALL_ONLINE }, { 0x1682e0, 1, RI_ALL_ONLINE }, 595 { 0x166558, 1, RI_ALL_ONLINE }, { 0x168000, 1, RI_ALL_ONLINE },
468 { 0x168300, 67, RI_ALL_ONLINE }, { 0x168410, 2, RI_ALL_ONLINE }, 596 { 0x168008, 1, RI_ALL_ONLINE }, { 0x168010, 1, RI_ALL_ONLINE },
597 { 0x168018, 1, RI_ALL_ONLINE }, { 0x168028, 2, RI_ALL_ONLINE },
598 { 0x168058, 4, RI_ALL_ONLINE }, { 0x168070, 1, RI_ALL_ONLINE },
599 { 0x168238, 1, RI_ALL_ONLINE }, { 0x1682d0, 2, RI_ALL_ONLINE },
600 { 0x1682e0, 1, RI_ALL_ONLINE }, { 0x168300, 2, RI_E1E1H_ONLINE },
601 { 0x168308, 65, RI_ALL_ONLINE }, { 0x168410, 2, RI_ALL_ONLINE },
469 { 0x168438, 1, RI_ALL_ONLINE }, { 0x168448, 1, RI_ALL_ONLINE }, 602 { 0x168438, 1, RI_ALL_ONLINE }, { 0x168448, 1, RI_ALL_ONLINE },
470 { 0x168a00, 128, RI_ALL_ONLINE }, { 0x16e200, 128, RI_E1H_ONLINE }, 603 { 0x168a00, 128, RI_ALL_ONLINE }, { 0x16e200, 128, RI_E1H_ONLINE },
471 { 0x16e404, 2, RI_E1H_ONLINE }, { 0x16e584, 70, RI_E1H_ONLINE }, 604 { 0x16e404, 2, RI_E1H_ONLINE }, { 0x16e584, 64, RI_E1H_ONLINE },
472 { 0x1700a4, 1, RI_ALL_ONLINE }, { 0x1700ac, 2, RI_ALL_ONLINE }, 605 { 0x16e684, 2, RI_E1HE2_ONLINE }, { 0x16e68c, 4, RI_E1H_ONLINE },
473 { 0x1700c0, 1, RI_ALL_ONLINE }, { 0x170174, 1, RI_ALL_ONLINE }, 606 { 0x16e6fc, 4, RI_E2_ONLINE }, { 0x1700a4, 1, RI_ALL_ONLINE },
474 { 0x170184, 1, RI_ALL_ONLINE }, { 0x1800f4, 1, RI_ALL_ONLINE }, 607 { 0x1700ac, 2, RI_ALL_ONLINE }, { 0x1700c0, 1, RI_ALL_ONLINE },
475 { 0x180104, 1, RI_ALL_ONLINE }, { 0x180114, 1, RI_ALL_ONLINE }, 608 { 0x170174, 1, RI_ALL_ONLINE }, { 0x170184, 1, RI_ALL_ONLINE },
476 { 0x180124, 1, RI_ALL_ONLINE }, { 0x18026c, 1, RI_ALL_ONLINE }, 609 { 0x1800f4, 1, RI_ALL_ONLINE }, { 0x180104, 1, RI_ALL_ONLINE },
477 { 0x1802a0, 1, RI_ALL_ONLINE }, { 0x1a1000, 1, RI_ALL_ONLINE }, 610 { 0x180114, 1, RI_ALL_ONLINE }, { 0x180124, 1, RI_ALL_ONLINE },
478 { 0x1aa000, 1, RI_E1H_ONLINE }, { 0x1b8000, 1, RI_ALL_ONLINE }, 611 { 0x18026c, 1, RI_ALL_ONLINE }, { 0x1802a0, 1, RI_ALL_ONLINE },
479 { 0x1b8040, 1, RI_ALL_ONLINE }, { 0x1b8080, 1, RI_ALL_ONLINE }, 612 { 0x1b8000, 1, RI_ALL_ONLINE }, { 0x1b8040, 1, RI_ALL_ONLINE },
480 { 0x1b80c0, 1, RI_ALL_ONLINE }, { 0x200104, 1, RI_ALL_ONLINE }, 613 { 0x1b8080, 1, RI_ALL_ONLINE }, { 0x1b80c0, 1, RI_ALL_ONLINE },
481 { 0x200114, 1, RI_ALL_ONLINE }, { 0x200124, 1, RI_ALL_ONLINE }, 614 { 0x200104, 1, RI_ALL_ONLINE }, { 0x200114, 1, RI_ALL_ONLINE },
482 { 0x200134, 1, RI_ALL_ONLINE }, { 0x20026c, 1, RI_ALL_ONLINE }, 615 { 0x200124, 1, RI_ALL_ONLINE }, { 0x200134, 1, RI_ALL_ONLINE },
483 { 0x2002a0, 1, RI_ALL_ONLINE }, { 0x221000, 1, RI_ALL_ONLINE }, 616 { 0x20026c, 1, RI_ALL_ONLINE }, { 0x2002a0, 1, RI_ALL_ONLINE },
484 { 0x227000, 1, RI_E1H_ONLINE }, { 0x238000, 1, RI_ALL_ONLINE }, 617 { 0x238000, 1, RI_ALL_ONLINE }, { 0x238040, 1, RI_ALL_ONLINE },
485 { 0x238040, 1, RI_ALL_ONLINE }, { 0x238080, 1, RI_ALL_ONLINE }, 618 { 0x238080, 1, RI_ALL_ONLINE }, { 0x2380c0, 1, RI_ALL_ONLINE },
486 { 0x2380c0, 1, RI_ALL_ONLINE }, { 0x280104, 1, RI_ALL_ONLINE }, 619 { 0x280104, 1, RI_ALL_ONLINE }, { 0x280114, 1, RI_ALL_ONLINE },
487 { 0x280114, 1, RI_ALL_ONLINE }, { 0x280124, 1, RI_ALL_ONLINE }, 620 { 0x280124, 1, RI_ALL_ONLINE }, { 0x280134, 1, RI_ALL_ONLINE },
488 { 0x280134, 1, RI_ALL_ONLINE }, { 0x28026c, 1, RI_ALL_ONLINE }, 621 { 0x28026c, 1, RI_ALL_ONLINE }, { 0x2802a0, 1, RI_ALL_ONLINE },
489 { 0x2802a0, 1, RI_ALL_ONLINE }, { 0x2a1000, 1, RI_ALL_ONLINE }, 622 { 0x2b8000, 1, RI_ALL_ONLINE }, { 0x2b8040, 1, RI_ALL_ONLINE },
490 { 0x2a9000, 1, RI_E1H_ONLINE }, { 0x2b8000, 1, RI_ALL_ONLINE }, 623 { 0x2b8080, 1, RI_ALL_ONLINE }, { 0x300104, 1, RI_ALL_ONLINE },
491 { 0x2b8040, 1, RI_ALL_ONLINE }, { 0x2b8080, 1, RI_ALL_ONLINE },
492 { 0x2b80c0, 1, RI_ALL_ONLINE }, { 0x300104, 1, RI_ALL_ONLINE },
493 { 0x300114, 1, RI_ALL_ONLINE }, { 0x300124, 1, RI_ALL_ONLINE }, 624 { 0x300114, 1, RI_ALL_ONLINE }, { 0x300124, 1, RI_ALL_ONLINE },
494 { 0x300134, 1, RI_ALL_ONLINE }, { 0x30026c, 1, RI_ALL_ONLINE }, 625 { 0x300134, 1, RI_ALL_ONLINE }, { 0x30026c, 1, RI_ALL_ONLINE },
495 { 0x3002a0, 1, RI_ALL_ONLINE }, { 0x321000, 1, RI_ALL_ONLINE }, 626 { 0x3002a0, 1, RI_ALL_ONLINE }, { 0x338000, 1, RI_ALL_ONLINE },
496 { 0x328960, 1, RI_E1H_ONLINE }, { 0x338000, 1, RI_ALL_ONLINE },
497 { 0x338040, 1, RI_ALL_ONLINE }, { 0x338080, 1, RI_ALL_ONLINE }, 627 { 0x338040, 1, RI_ALL_ONLINE }, { 0x338080, 1, RI_ALL_ONLINE },
498 { 0x3380c0, 1, RI_ALL_ONLINE } 628 { 0x3380c0, 1, RI_ALL_ONLINE }
499}; 629};
@@ -505,7 +635,6 @@ static const struct wreg_addr wreg_addrs_e1[WREGS_COUNT_E1] = {
505 { 0x1b0c00, 192, 1, read_reg_e1_0, RI_E1_OFFLINE } 635 { 0x1b0c00, 192, 1, read_reg_e1_0, RI_E1_OFFLINE }
506}; 636};
507 637
508
509#define WREGS_COUNT_E1H 1 638#define WREGS_COUNT_E1H 1
510static const u32 read_reg_e1h_0[] = { 0x1b1040, 0x1b1000 }; 639static const u32 read_reg_e1h_0[] = { 0x1b1040, 0x1b1000 };
511 640
@@ -513,22 +642,72 @@ static const struct wreg_addr wreg_addrs_e1h[WREGS_COUNT_E1H] = {
513 { 0x1b0c00, 256, 2, read_reg_e1h_0, RI_E1H_OFFLINE } 642 { 0x1b0c00, 256, 2, read_reg_e1h_0, RI_E1H_OFFLINE }
514}; 643};
515 644
645#define WREGS_COUNT_E2 1
646static const u32 read_reg_e2_0[] = { 0x1b1040, 0x1b1000 };
516 647
517static const struct dump_sign dump_sign_all = { 0x49aa93ee, 0x40835, 0x22 }; 648static const struct wreg_addr wreg_addrs_e2[WREGS_COUNT_E2] = {
649 { 0x1b0c00, 128, 2, read_reg_e2_0, RI_E2_OFFLINE }
650};
518 651
652static const struct dump_sign dump_sign_all = { 0x4d18b0a4, 0x60010, 0x3a };
519 653
520#define TIMER_REGS_COUNT_E1 2 654#define TIMER_REGS_COUNT_E1 2
521static const u32 timer_status_regs_e1[TIMER_REGS_COUNT_E1] =
522 { 0x164014, 0x164018 };
523static const u32 timer_scan_regs_e1[TIMER_REGS_COUNT_E1] =
524 { 0x1640d0, 0x1640d4 };
525 655
656static const u32 timer_status_regs_e1[TIMER_REGS_COUNT_E1] = {
657 0x164014, 0x164018 };
658static const u32 timer_scan_regs_e1[TIMER_REGS_COUNT_E1] = {
659 0x1640d0, 0x1640d4 };
526 660
527#define TIMER_REGS_COUNT_E1H 2 661#define TIMER_REGS_COUNT_E1H 2
528static const u32 timer_status_regs_e1h[TIMER_REGS_COUNT_E1H] =
529 { 0x164014, 0x164018 };
530static const u32 timer_scan_regs_e1h[TIMER_REGS_COUNT_E1H] =
531 { 0x1640d0, 0x1640d4 };
532 662
663static const u32 timer_status_regs_e1h[TIMER_REGS_COUNT_E1H] = {
664 0x164014, 0x164018 };
665static const u32 timer_scan_regs_e1h[TIMER_REGS_COUNT_E1H] = {
666 0x1640d0, 0x1640d4 };
667
668#define TIMER_REGS_COUNT_E2 2
669
670static const u32 timer_status_regs_e2[TIMER_REGS_COUNT_E2] = {
671 0x164014, 0x164018 };
672static const u32 timer_scan_regs_e2[TIMER_REGS_COUNT_E2] = {
673 0x1640d0, 0x1640d4 };
674
675#define PAGE_MODE_VALUES_E1 0
676
677#define PAGE_READ_REGS_E1 0
678
679#define PAGE_WRITE_REGS_E1 0
680
681static const u32 page_vals_e1[] = { 0 };
682
683static const u32 page_write_regs_e1[] = { 0 };
684
685static const struct reg_addr page_read_regs_e1[] = { { 0x0, 0, RI_E1_ONLINE } };
686
687#define PAGE_MODE_VALUES_E1H 0
688
689#define PAGE_READ_REGS_E1H 0
690
691#define PAGE_WRITE_REGS_E1H 0
692
693static const u32 page_vals_e1h[] = { 0 };
694
695static const u32 page_write_regs_e1h[] = { 0 };
696
697static const struct reg_addr page_read_regs_e1h[] = {
698 { 0x0, 0, RI_E1H_ONLINE } };
699
700#define PAGE_MODE_VALUES_E2 2
701
702#define PAGE_READ_REGS_E2 1
703
704#define PAGE_WRITE_REGS_E2 1
705
706static const u32 page_vals_e2[PAGE_MODE_VALUES_E2] = { 0, 128 };
707
708static const u32 page_write_regs_e2[PAGE_WRITE_REGS_E2] = { 328476 };
709
710static const struct reg_addr page_read_regs_e2[PAGE_READ_REGS_E2] = {
711 { 0x58000, 4608, RI_E2_ONLINE } };
533 712
534#endif /* BNX2X_DUMP_H */ 713#endif /* BNX2X_DUMP_H */
diff --git a/drivers/net/bnx2x/bnx2x_ethtool.c b/drivers/net/bnx2x/bnx2x_ethtool.c
index 8b75b05e34c5..727fe89ff37f 100644
--- a/drivers/net/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/bnx2x/bnx2x_ethtool.c
@@ -1,6 +1,6 @@
1/* bnx2x_ethtool.c: Broadcom Everest network driver. 1/* bnx2x_ethtool.c: Broadcom Everest network driver.
2 * 2 *
3 * Copyright (c) 2007-2010 Broadcom Corporation 3 * Copyright (c) 2007-2011 Broadcom Corporation
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by 6 * it under the terms of the GNU General Public License as published by
@@ -24,71 +24,181 @@
24#include "bnx2x.h" 24#include "bnx2x.h"
25#include "bnx2x_cmn.h" 25#include "bnx2x_cmn.h"
26#include "bnx2x_dump.h" 26#include "bnx2x_dump.h"
27#include "bnx2x_init.h"
27 28
29/* Note: in the format strings below %s is replaced by the queue-name which is
30 * either its index or 'fcoe' for the fcoe queue. Make sure the format string
31 * length does not exceed ETH_GSTRING_LEN - MAX_QUEUE_NAME_LEN + 2
32 */
33#define MAX_QUEUE_NAME_LEN 4
34static const struct {
35 long offset;
36 int size;
37 char string[ETH_GSTRING_LEN];
38} bnx2x_q_stats_arr[] = {
39/* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%s]: rx_bytes" },
40 { Q_STATS_OFFSET32(error_bytes_received_hi),
41 8, "[%s]: rx_error_bytes" },
42 { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
43 8, "[%s]: rx_ucast_packets" },
44 { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
45 8, "[%s]: rx_mcast_packets" },
46 { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
47 8, "[%s]: rx_bcast_packets" },
48 { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%s]: rx_discards" },
49 { Q_STATS_OFFSET32(rx_err_discard_pkt),
50 4, "[%s]: rx_phy_ip_err_discards"},
51 { Q_STATS_OFFSET32(rx_skb_alloc_failed),
52 4, "[%s]: rx_skb_alloc_discard" },
53 { Q_STATS_OFFSET32(hw_csum_err), 4, "[%s]: rx_csum_offload_errors" },
54
55/* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%s]: tx_bytes" },
56 { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
57 8, "[%s]: tx_ucast_packets" },
58 { Q_STATS_OFFSET32(total_multicast_packets_transmitted_hi),
59 8, "[%s]: tx_mcast_packets" },
60 { Q_STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
61 8, "[%s]: tx_bcast_packets" }
62};
63
64#define BNX2X_NUM_Q_STATS ARRAY_SIZE(bnx2x_q_stats_arr)
65
66static const struct {
67 long offset;
68 int size;
69 u32 flags;
70#define STATS_FLAGS_PORT 1
71#define STATS_FLAGS_FUNC 2
72#define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
73 char string[ETH_GSTRING_LEN];
74} bnx2x_stats_arr[] = {
75/* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
76 8, STATS_FLAGS_BOTH, "rx_bytes" },
77 { STATS_OFFSET32(error_bytes_received_hi),
78 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
79 { STATS_OFFSET32(total_unicast_packets_received_hi),
80 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
81 { STATS_OFFSET32(total_multicast_packets_received_hi),
82 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
83 { STATS_OFFSET32(total_broadcast_packets_received_hi),
84 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
85 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
86 8, STATS_FLAGS_PORT, "rx_crc_errors" },
87 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
88 8, STATS_FLAGS_PORT, "rx_align_errors" },
89 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
90 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
91 { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
92 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
93/* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
94 8, STATS_FLAGS_PORT, "rx_fragments" },
95 { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
96 8, STATS_FLAGS_PORT, "rx_jabbers" },
97 { STATS_OFFSET32(no_buff_discard_hi),
98 8, STATS_FLAGS_BOTH, "rx_discards" },
99 { STATS_OFFSET32(mac_filter_discard),
100 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
101 { STATS_OFFSET32(xxoverflow_discard),
102 4, STATS_FLAGS_PORT, "rx_fw_discards" },
103 { STATS_OFFSET32(brb_drop_hi),
104 8, STATS_FLAGS_PORT, "rx_brb_discard" },
105 { STATS_OFFSET32(brb_truncate_hi),
106 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
107 { STATS_OFFSET32(pause_frames_received_hi),
108 8, STATS_FLAGS_PORT, "rx_pause_frames" },
109 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
110 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
111 { STATS_OFFSET32(nig_timer_max),
112 4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
113/* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
114 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
115 { STATS_OFFSET32(rx_skb_alloc_failed),
116 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
117 { STATS_OFFSET32(hw_csum_err),
118 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
119
120 { STATS_OFFSET32(total_bytes_transmitted_hi),
121 8, STATS_FLAGS_BOTH, "tx_bytes" },
122 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
123 8, STATS_FLAGS_PORT, "tx_error_bytes" },
124 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
125 8, STATS_FLAGS_BOTH, "tx_ucast_packets" },
126 { STATS_OFFSET32(total_multicast_packets_transmitted_hi),
127 8, STATS_FLAGS_BOTH, "tx_mcast_packets" },
128 { STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
129 8, STATS_FLAGS_BOTH, "tx_bcast_packets" },
130 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
131 8, STATS_FLAGS_PORT, "tx_mac_errors" },
132 { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
133 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
134/* 30 */{ STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
135 8, STATS_FLAGS_PORT, "tx_single_collisions" },
136 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
137 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
138 { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
139 8, STATS_FLAGS_PORT, "tx_deferred" },
140 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
141 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
142 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
143 8, STATS_FLAGS_PORT, "tx_late_collisions" },
144 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
145 8, STATS_FLAGS_PORT, "tx_total_collisions" },
146 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
147 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
148 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
149 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
150 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
151 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
152 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
153 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
154/* 40 */{ STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
155 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
156 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
157 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
158 { STATS_OFFSET32(etherstatspktsover1522octets_hi),
159 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
160 { STATS_OFFSET32(pause_frames_sent_hi),
161 8, STATS_FLAGS_PORT, "tx_pause_frames" }
162};
163
164#define BNX2X_NUM_STATS ARRAY_SIZE(bnx2x_stats_arr)
28 165
29static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 166static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
30{ 167{
31 struct bnx2x *bp = netdev_priv(dev); 168 struct bnx2x *bp = netdev_priv(dev);
169 int cfg_idx = bnx2x_get_link_cfg_idx(bp);
32 170
33 cmd->supported = bp->port.supported; 171 /* Dual Media boards present all available port types */
34 cmd->advertising = bp->port.advertising; 172 cmd->supported = bp->port.supported[cfg_idx] |
173 (bp->port.supported[cfg_idx ^ 1] &
174 (SUPPORTED_TP | SUPPORTED_FIBRE));
175 cmd->advertising = bp->port.advertising[cfg_idx];
35 176
36 if ((bp->state == BNX2X_STATE_OPEN) && 177 if ((bp->state == BNX2X_STATE_OPEN) &&
37 !(bp->flags & MF_FUNC_DIS) && 178 !(bp->flags & MF_FUNC_DIS) &&
38 (bp->link_vars.link_up)) { 179 (bp->link_vars.link_up)) {
39 cmd->speed = bp->link_vars.line_speed; 180 ethtool_cmd_speed_set(cmd, bp->link_vars.line_speed);
40 cmd->duplex = bp->link_vars.duplex; 181 cmd->duplex = bp->link_vars.duplex;
41 if (IS_E1HMF(bp)) {
42 u16 vn_max_rate;
43
44 vn_max_rate =
45 ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
46 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
47 if (vn_max_rate < cmd->speed)
48 cmd->speed = vn_max_rate;
49 }
50 } else { 182 } else {
51 cmd->speed = -1; 183 ethtool_cmd_speed_set(
52 cmd->duplex = -1; 184 cmd, bp->link_params.req_line_speed[cfg_idx]);
185 cmd->duplex = bp->link_params.req_duplex[cfg_idx];
53 } 186 }
54 187
55 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) { 188 if (IS_MF(bp))
56 u32 ext_phy_type = 189 ethtool_cmd_speed_set(cmd, bnx2x_get_mf_speed(bp));
57 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
58
59 switch (ext_phy_type) {
60 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
61 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
62 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
63 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
64 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
65 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
66 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
67 cmd->port = PORT_FIBRE;
68 break;
69
70 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
71 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
72 cmd->port = PORT_TP;
73 break;
74 190
75 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE: 191 if (bp->port.supported[cfg_idx] & SUPPORTED_TP)
76 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
77 bp->link_params.ext_phy_config);
78 break;
79
80 default:
81 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
82 bp->link_params.ext_phy_config);
83 break;
84 }
85 } else
86 cmd->port = PORT_TP; 192 cmd->port = PORT_TP;
193 else if (bp->port.supported[cfg_idx] & SUPPORTED_FIBRE)
194 cmd->port = PORT_FIBRE;
195 else
196 BNX2X_ERR("XGXS PHY Failure detected\n");
87 197
88 cmd->phy_address = bp->mdio.prtad; 198 cmd->phy_address = bp->mdio.prtad;
89 cmd->transceiver = XCVR_INTERNAL; 199 cmd->transceiver = XCVR_INTERNAL;
90 200
91 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG) 201 if (bp->link_params.req_line_speed[cfg_idx] == SPEED_AUTO_NEG)
92 cmd->autoneg = AUTONEG_ENABLE; 202 cmd->autoneg = AUTONEG_ENABLE;
93 else 203 else
94 cmd->autoneg = AUTONEG_DISABLE; 204 cmd->autoneg = AUTONEG_DISABLE;
@@ -97,10 +207,11 @@ static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
97 cmd->maxrxpkt = 0; 207 cmd->maxrxpkt = 0;
98 208
99 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n" 209 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
100 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n" 210 DP_LEVEL " supported 0x%x advertising 0x%x speed %u\n"
101 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n" 211 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
102 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n", 212 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
103 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed, 213 cmd->cmd, cmd->supported, cmd->advertising,
214 ethtool_cmd_speed(cmd),
104 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver, 215 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
105 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt); 216 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
106 217
@@ -110,39 +221,129 @@ static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
110static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) 221static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
111{ 222{
112 struct bnx2x *bp = netdev_priv(dev); 223 struct bnx2x *bp = netdev_priv(dev);
113 u32 advertising; 224 u32 advertising, cfg_idx, old_multi_phy_config, new_multi_phy_config;
225 u32 speed;
114 226
115 if (IS_E1HMF(bp)) 227 if (IS_MF_SD(bp))
116 return 0; 228 return 0;
117 229
118 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n" 230 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
119 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n" 231 " supported 0x%x advertising 0x%x speed %u\n"
120 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n" 232 " duplex %d port %d phy_address %d transceiver %d\n"
121 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n", 233 " autoneg %d maxtxpkt %d maxrxpkt %d\n",
122 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed, 234 cmd->cmd, cmd->supported, cmd->advertising,
235 ethtool_cmd_speed(cmd),
123 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver, 236 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
124 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt); 237 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
125 238
239 speed = ethtool_cmd_speed(cmd);
240
241 if (IS_MF_SI(bp)) {
242 u32 part;
243 u32 line_speed = bp->link_vars.line_speed;
244
245 /* use 10G if no link detected */
246 if (!line_speed)
247 line_speed = 10000;
248
249 if (bp->common.bc_ver < REQ_BC_VER_4_SET_MF_BW) {
250 BNX2X_DEV_INFO("To set speed BC %X or higher "
251 "is required, please upgrade BC\n",
252 REQ_BC_VER_4_SET_MF_BW);
253 return -EINVAL;
254 }
255
256 part = (speed * 100) / line_speed;
257
258 if (line_speed < speed || !part) {
259 BNX2X_DEV_INFO("Speed setting should be in a range "
260 "from 1%% to 100%% "
261 "of actual line speed\n");
262 return -EINVAL;
263 }
264
265 if (bp->state != BNX2X_STATE_OPEN)
266 /* store value for following "load" */
267 bp->pending_max = part;
268 else
269 bnx2x_update_max_mf_config(bp, part);
270
271 return 0;
272 }
273
274 cfg_idx = bnx2x_get_link_cfg_idx(bp);
275 old_multi_phy_config = bp->link_params.multi_phy_config;
276 switch (cmd->port) {
277 case PORT_TP:
278 if (bp->port.supported[cfg_idx] & SUPPORTED_TP)
279 break; /* no port change */
280
281 if (!(bp->port.supported[0] & SUPPORTED_TP ||
282 bp->port.supported[1] & SUPPORTED_TP)) {
283 DP(NETIF_MSG_LINK, "Unsupported port type\n");
284 return -EINVAL;
285 }
286 bp->link_params.multi_phy_config &=
287 ~PORT_HW_CFG_PHY_SELECTION_MASK;
288 if (bp->link_params.multi_phy_config &
289 PORT_HW_CFG_PHY_SWAPPED_ENABLED)
290 bp->link_params.multi_phy_config |=
291 PORT_HW_CFG_PHY_SELECTION_SECOND_PHY;
292 else
293 bp->link_params.multi_phy_config |=
294 PORT_HW_CFG_PHY_SELECTION_FIRST_PHY;
295 break;
296 case PORT_FIBRE:
297 if (bp->port.supported[cfg_idx] & SUPPORTED_FIBRE)
298 break; /* no port change */
299
300 if (!(bp->port.supported[0] & SUPPORTED_FIBRE ||
301 bp->port.supported[1] & SUPPORTED_FIBRE)) {
302 DP(NETIF_MSG_LINK, "Unsupported port type\n");
303 return -EINVAL;
304 }
305 bp->link_params.multi_phy_config &=
306 ~PORT_HW_CFG_PHY_SELECTION_MASK;
307 if (bp->link_params.multi_phy_config &
308 PORT_HW_CFG_PHY_SWAPPED_ENABLED)
309 bp->link_params.multi_phy_config |=
310 PORT_HW_CFG_PHY_SELECTION_FIRST_PHY;
311 else
312 bp->link_params.multi_phy_config |=
313 PORT_HW_CFG_PHY_SELECTION_SECOND_PHY;
314 break;
315 default:
316 DP(NETIF_MSG_LINK, "Unsupported port type\n");
317 return -EINVAL;
318 }
319 /* Save new config in case command complete successuly */
320 new_multi_phy_config = bp->link_params.multi_phy_config;
321 /* Get the new cfg_idx */
322 cfg_idx = bnx2x_get_link_cfg_idx(bp);
323 /* Restore old config in case command failed */
324 bp->link_params.multi_phy_config = old_multi_phy_config;
325 DP(NETIF_MSG_LINK, "cfg_idx = %x\n", cfg_idx);
326
126 if (cmd->autoneg == AUTONEG_ENABLE) { 327 if (cmd->autoneg == AUTONEG_ENABLE) {
127 if (!(bp->port.supported & SUPPORTED_Autoneg)) { 328 if (!(bp->port.supported[cfg_idx] & SUPPORTED_Autoneg)) {
128 DP(NETIF_MSG_LINK, "Autoneg not supported\n"); 329 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
129 return -EINVAL; 330 return -EINVAL;
130 } 331 }
131 332
132 /* advertise the requested speed and duplex if supported */ 333 /* advertise the requested speed and duplex if supported */
133 cmd->advertising &= bp->port.supported; 334 cmd->advertising &= bp->port.supported[cfg_idx];
134 335
135 bp->link_params.req_line_speed = SPEED_AUTO_NEG; 336 bp->link_params.req_line_speed[cfg_idx] = SPEED_AUTO_NEG;
136 bp->link_params.req_duplex = DUPLEX_FULL; 337 bp->link_params.req_duplex[cfg_idx] = DUPLEX_FULL;
137 bp->port.advertising |= (ADVERTISED_Autoneg | 338 bp->port.advertising[cfg_idx] |= (ADVERTISED_Autoneg |
138 cmd->advertising); 339 cmd->advertising);
139 340
140 } else { /* forced speed */ 341 } else { /* forced speed */
141 /* advertise the requested speed and duplex if supported */ 342 /* advertise the requested speed and duplex if supported */
142 switch (cmd->speed) { 343 switch (speed) {
143 case SPEED_10: 344 case SPEED_10:
144 if (cmd->duplex == DUPLEX_FULL) { 345 if (cmd->duplex == DUPLEX_FULL) {
145 if (!(bp->port.supported & 346 if (!(bp->port.supported[cfg_idx] &
146 SUPPORTED_10baseT_Full)) { 347 SUPPORTED_10baseT_Full)) {
147 DP(NETIF_MSG_LINK, 348 DP(NETIF_MSG_LINK,
148 "10M full not supported\n"); 349 "10M full not supported\n");
@@ -152,7 +353,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
152 advertising = (ADVERTISED_10baseT_Full | 353 advertising = (ADVERTISED_10baseT_Full |
153 ADVERTISED_TP); 354 ADVERTISED_TP);
154 } else { 355 } else {
155 if (!(bp->port.supported & 356 if (!(bp->port.supported[cfg_idx] &
156 SUPPORTED_10baseT_Half)) { 357 SUPPORTED_10baseT_Half)) {
157 DP(NETIF_MSG_LINK, 358 DP(NETIF_MSG_LINK,
158 "10M half not supported\n"); 359 "10M half not supported\n");
@@ -166,7 +367,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
166 367
167 case SPEED_100: 368 case SPEED_100:
168 if (cmd->duplex == DUPLEX_FULL) { 369 if (cmd->duplex == DUPLEX_FULL) {
169 if (!(bp->port.supported & 370 if (!(bp->port.supported[cfg_idx] &
170 SUPPORTED_100baseT_Full)) { 371 SUPPORTED_100baseT_Full)) {
171 DP(NETIF_MSG_LINK, 372 DP(NETIF_MSG_LINK,
172 "100M full not supported\n"); 373 "100M full not supported\n");
@@ -176,7 +377,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
176 advertising = (ADVERTISED_100baseT_Full | 377 advertising = (ADVERTISED_100baseT_Full |
177 ADVERTISED_TP); 378 ADVERTISED_TP);
178 } else { 379 } else {
179 if (!(bp->port.supported & 380 if (!(bp->port.supported[cfg_idx] &
180 SUPPORTED_100baseT_Half)) { 381 SUPPORTED_100baseT_Half)) {
181 DP(NETIF_MSG_LINK, 382 DP(NETIF_MSG_LINK,
182 "100M half not supported\n"); 383 "100M half not supported\n");
@@ -194,7 +395,8 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
194 return -EINVAL; 395 return -EINVAL;
195 } 396 }
196 397
197 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) { 398 if (!(bp->port.supported[cfg_idx] &
399 SUPPORTED_1000baseT_Full)) {
198 DP(NETIF_MSG_LINK, "1G full not supported\n"); 400 DP(NETIF_MSG_LINK, "1G full not supported\n");
199 return -EINVAL; 401 return -EINVAL;
200 } 402 }
@@ -210,7 +412,8 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
210 return -EINVAL; 412 return -EINVAL;
211 } 413 }
212 414
213 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) { 415 if (!(bp->port.supported[cfg_idx]
416 & SUPPORTED_2500baseX_Full)) {
214 DP(NETIF_MSG_LINK, 417 DP(NETIF_MSG_LINK,
215 "2.5G full not supported\n"); 418 "2.5G full not supported\n");
216 return -EINVAL; 419 return -EINVAL;
@@ -226,7 +429,8 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
226 return -EINVAL; 429 return -EINVAL;
227 } 430 }
228 431
229 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) { 432 if (!(bp->port.supported[cfg_idx]
433 & SUPPORTED_10000baseT_Full)) {
230 DP(NETIF_MSG_LINK, "10G full not supported\n"); 434 DP(NETIF_MSG_LINK, "10G full not supported\n");
231 return -EINVAL; 435 return -EINVAL;
232 } 436 }
@@ -236,20 +440,23 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
236 break; 440 break;
237 441
238 default: 442 default:
239 DP(NETIF_MSG_LINK, "Unsupported speed\n"); 443 DP(NETIF_MSG_LINK, "Unsupported speed %u\n", speed);
240 return -EINVAL; 444 return -EINVAL;
241 } 445 }
242 446
243 bp->link_params.req_line_speed = cmd->speed; 447 bp->link_params.req_line_speed[cfg_idx] = speed;
244 bp->link_params.req_duplex = cmd->duplex; 448 bp->link_params.req_duplex[cfg_idx] = cmd->duplex;
245 bp->port.advertising = advertising; 449 bp->port.advertising[cfg_idx] = advertising;
246 } 450 }
247 451
248 DP(NETIF_MSG_LINK, "req_line_speed %d\n" 452 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
249 DP_LEVEL " req_duplex %d advertising 0x%x\n", 453 DP_LEVEL " req_duplex %d advertising 0x%x\n",
250 bp->link_params.req_line_speed, bp->link_params.req_duplex, 454 bp->link_params.req_line_speed[cfg_idx],
251 bp->port.advertising); 455 bp->link_params.req_duplex[cfg_idx],
456 bp->port.advertising[cfg_idx]);
252 457
458 /* Set new config */
459 bp->link_params.multi_phy_config = new_multi_phy_config;
253 if (netif_running(dev)) { 460 if (netif_running(dev)) {
254 bnx2x_stats_handle(bp, STATS_EVENT_STOP); 461 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
255 bnx2x_link_set(bp); 462 bnx2x_link_set(bp);
@@ -260,12 +467,13 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
260 467
261#define IS_E1_ONLINE(info) (((info) & RI_E1_ONLINE) == RI_E1_ONLINE) 468#define IS_E1_ONLINE(info) (((info) & RI_E1_ONLINE) == RI_E1_ONLINE)
262#define IS_E1H_ONLINE(info) (((info) & RI_E1H_ONLINE) == RI_E1H_ONLINE) 469#define IS_E1H_ONLINE(info) (((info) & RI_E1H_ONLINE) == RI_E1H_ONLINE)
470#define IS_E2_ONLINE(info) (((info) & RI_E2_ONLINE) == RI_E2_ONLINE)
263 471
264static int bnx2x_get_regs_len(struct net_device *dev) 472static int bnx2x_get_regs_len(struct net_device *dev)
265{ 473{
266 struct bnx2x *bp = netdev_priv(dev); 474 struct bnx2x *bp = netdev_priv(dev);
267 int regdump_len = 0; 475 int regdump_len = 0;
268 int i; 476 int i, j, k;
269 477
270 if (CHIP_IS_E1(bp)) { 478 if (CHIP_IS_E1(bp)) {
271 for (i = 0; i < REGS_COUNT; i++) 479 for (i = 0; i < REGS_COUNT; i++)
@@ -277,7 +485,7 @@ static int bnx2x_get_regs_len(struct net_device *dev)
277 regdump_len += wreg_addrs_e1[i].size * 485 regdump_len += wreg_addrs_e1[i].size *
278 (1 + wreg_addrs_e1[i].read_regs_count); 486 (1 + wreg_addrs_e1[i].read_regs_count);
279 487
280 } else { /* E1H */ 488 } else if (CHIP_IS_E1H(bp)) {
281 for (i = 0; i < REGS_COUNT; i++) 489 for (i = 0; i < REGS_COUNT; i++)
282 if (IS_E1H_ONLINE(reg_addrs[i].info)) 490 if (IS_E1H_ONLINE(reg_addrs[i].info))
283 regdump_len += reg_addrs[i].size; 491 regdump_len += reg_addrs[i].size;
@@ -286,6 +494,24 @@ static int bnx2x_get_regs_len(struct net_device *dev)
286 if (IS_E1H_ONLINE(wreg_addrs_e1h[i].info)) 494 if (IS_E1H_ONLINE(wreg_addrs_e1h[i].info))
287 regdump_len += wreg_addrs_e1h[i].size * 495 regdump_len += wreg_addrs_e1h[i].size *
288 (1 + wreg_addrs_e1h[i].read_regs_count); 496 (1 + wreg_addrs_e1h[i].read_regs_count);
497 } else if (CHIP_IS_E2(bp)) {
498 for (i = 0; i < REGS_COUNT; i++)
499 if (IS_E2_ONLINE(reg_addrs[i].info))
500 regdump_len += reg_addrs[i].size;
501
502 for (i = 0; i < WREGS_COUNT_E2; i++)
503 if (IS_E2_ONLINE(wreg_addrs_e2[i].info))
504 regdump_len += wreg_addrs_e2[i].size *
505 (1 + wreg_addrs_e2[i].read_regs_count);
506
507 for (i = 0; i < PAGE_MODE_VALUES_E2; i++)
508 for (j = 0; j < PAGE_WRITE_REGS_E2; j++) {
509 for (k = 0; k < PAGE_READ_REGS_E2; k++)
510 if (IS_E2_ONLINE(page_read_regs_e2[k].
511 info))
512 regdump_len +=
513 page_read_regs_e2[k].size;
514 }
289 } 515 }
290 regdump_len *= 4; 516 regdump_len *= 4;
291 regdump_len += sizeof(struct dump_hdr); 517 regdump_len += sizeof(struct dump_hdr);
@@ -293,6 +519,23 @@ static int bnx2x_get_regs_len(struct net_device *dev)
293 return regdump_len; 519 return regdump_len;
294} 520}
295 521
522static inline void bnx2x_read_pages_regs_e2(struct bnx2x *bp, u32 *p)
523{
524 u32 i, j, k, n;
525
526 for (i = 0; i < PAGE_MODE_VALUES_E2; i++) {
527 for (j = 0; j < PAGE_WRITE_REGS_E2; j++) {
528 REG_WR(bp, page_write_regs_e2[j], page_vals_e2[i]);
529 for (k = 0; k < PAGE_READ_REGS_E2; k++)
530 if (IS_E2_ONLINE(page_read_regs_e2[k].info))
531 for (n = 0; n <
532 page_read_regs_e2[k].size; n++)
533 *p++ = REG_RD(bp,
534 page_read_regs_e2[k].addr + n*4);
535 }
536 }
537}
538
296static void bnx2x_get_regs(struct net_device *dev, 539static void bnx2x_get_regs(struct net_device *dev,
297 struct ethtool_regs *regs, void *_p) 540 struct ethtool_regs *regs, void *_p)
298{ 541{
@@ -306,13 +549,26 @@ static void bnx2x_get_regs(struct net_device *dev,
306 if (!netif_running(bp->dev)) 549 if (!netif_running(bp->dev))
307 return; 550 return;
308 551
552 /* Disable parity attentions as long as following dump may
553 * cause false alarms by reading never written registers. We
554 * will re-enable parity attentions right after the dump.
555 */
556 bnx2x_disable_blocks_parity(bp);
557
309 dump_hdr.hdr_size = (sizeof(struct dump_hdr) / 4) - 1; 558 dump_hdr.hdr_size = (sizeof(struct dump_hdr) / 4) - 1;
310 dump_hdr.dump_sign = dump_sign_all; 559 dump_hdr.dump_sign = dump_sign_all;
311 dump_hdr.xstorm_waitp = REG_RD(bp, XSTORM_WAITP_ADDR); 560 dump_hdr.xstorm_waitp = REG_RD(bp, XSTORM_WAITP_ADDR);
312 dump_hdr.tstorm_waitp = REG_RD(bp, TSTORM_WAITP_ADDR); 561 dump_hdr.tstorm_waitp = REG_RD(bp, TSTORM_WAITP_ADDR);
313 dump_hdr.ustorm_waitp = REG_RD(bp, USTORM_WAITP_ADDR); 562 dump_hdr.ustorm_waitp = REG_RD(bp, USTORM_WAITP_ADDR);
314 dump_hdr.cstorm_waitp = REG_RD(bp, CSTORM_WAITP_ADDR); 563 dump_hdr.cstorm_waitp = REG_RD(bp, CSTORM_WAITP_ADDR);
315 dump_hdr.info = CHIP_IS_E1(bp) ? RI_E1_ONLINE : RI_E1H_ONLINE; 564
565 if (CHIP_IS_E1(bp))
566 dump_hdr.info = RI_E1_ONLINE;
567 else if (CHIP_IS_E1H(bp))
568 dump_hdr.info = RI_E1H_ONLINE;
569 else if (CHIP_IS_E2(bp))
570 dump_hdr.info = RI_E2_ONLINE |
571 (BP_PATH(bp) ? RI_PATH1_DUMP : RI_PATH0_DUMP);
316 572
317 memcpy(p, &dump_hdr, sizeof(struct dump_hdr)); 573 memcpy(p, &dump_hdr, sizeof(struct dump_hdr));
318 p += dump_hdr.hdr_size + 1; 574 p += dump_hdr.hdr_size + 1;
@@ -324,16 +580,29 @@ static void bnx2x_get_regs(struct net_device *dev,
324 *p++ = REG_RD(bp, 580 *p++ = REG_RD(bp,
325 reg_addrs[i].addr + j*4); 581 reg_addrs[i].addr + j*4);
326 582
327 } else { /* E1H */ 583 } else if (CHIP_IS_E1H(bp)) {
328 for (i = 0; i < REGS_COUNT; i++) 584 for (i = 0; i < REGS_COUNT; i++)
329 if (IS_E1H_ONLINE(reg_addrs[i].info)) 585 if (IS_E1H_ONLINE(reg_addrs[i].info))
330 for (j = 0; j < reg_addrs[i].size; j++) 586 for (j = 0; j < reg_addrs[i].size; j++)
331 *p++ = REG_RD(bp, 587 *p++ = REG_RD(bp,
332 reg_addrs[i].addr + j*4); 588 reg_addrs[i].addr + j*4);
589
590 } else if (CHIP_IS_E2(bp)) {
591 for (i = 0; i < REGS_COUNT; i++)
592 if (IS_E2_ONLINE(reg_addrs[i].info))
593 for (j = 0; j < reg_addrs[i].size; j++)
594 *p++ = REG_RD(bp,
595 reg_addrs[i].addr + j*4);
596
597 bnx2x_read_pages_regs_e2(bp, p);
333 } 598 }
599 /* Re-enable parity attentions */
600 bnx2x_clear_blocks_parity(bp);
601 if (CHIP_PARITY_ENABLED(bp))
602 bnx2x_enable_blocks_parity(bp);
334} 603}
335 604
336#define PHY_FW_VER_LEN 10 605#define PHY_FW_VER_LEN 20
337 606
338static void bnx2x_get_drvinfo(struct net_device *dev, 607static void bnx2x_get_drvinfo(struct net_device *dev,
339 struct ethtool_drvinfo *info) 608 struct ethtool_drvinfo *info)
@@ -436,7 +705,7 @@ static u32 bnx2x_get_link(struct net_device *dev)
436{ 705{
437 struct bnx2x *bp = netdev_priv(dev); 706 struct bnx2x *bp = netdev_priv(dev);
438 707
439 if (bp->flags & MF_FUNC_DIS) 708 if (bp->flags & MF_FUNC_DIS || (bp->state != BNX2X_STATE_OPEN))
440 return 0; 709 return 0;
441 710
442 return bp->link_vars.link_up; 711 return bp->link_vars.link_up;
@@ -811,7 +1080,7 @@ static int bnx2x_set_eeprom(struct net_device *dev,
811 struct bnx2x *bp = netdev_priv(dev); 1080 struct bnx2x *bp = netdev_priv(dev);
812 int port = BP_PORT(bp); 1081 int port = BP_PORT(bp);
813 int rc = 0; 1082 int rc = 0;
814 1083 u32 ext_phy_config;
815 if (!netif_running(dev)) 1084 if (!netif_running(dev))
816 return -EAGAIN; 1085 return -EAGAIN;
817 1086
@@ -827,6 +1096,10 @@ static int bnx2x_set_eeprom(struct net_device *dev,
827 !bp->port.pmf) 1096 !bp->port.pmf)
828 return -EINVAL; 1097 return -EINVAL;
829 1098
1099 ext_phy_config =
1100 SHMEM_RD(bp,
1101 dev_info.port_hw_config[port].external_phy_config);
1102
830 if (eeprom->magic == 0x50485950) { 1103 if (eeprom->magic == 0x50485950) {
831 /* 'PHYP' (0x50485950): prepare phy for FW upgrade */ 1104 /* 'PHYP' (0x50485950): prepare phy for FW upgrade */
832 bnx2x_stats_handle(bp, STATS_EVENT_STOP); 1105 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
@@ -834,7 +1107,7 @@ static int bnx2x_set_eeprom(struct net_device *dev,
834 bnx2x_acquire_phy_lock(bp); 1107 bnx2x_acquire_phy_lock(bp);
835 rc |= bnx2x_link_reset(&bp->link_params, 1108 rc |= bnx2x_link_reset(&bp->link_params,
836 &bp->link_vars, 0); 1109 &bp->link_vars, 0);
837 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) == 1110 if (XGXS_EXT_PHY_TYPE(ext_phy_config) ==
838 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) 1111 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101)
839 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0, 1112 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
840 MISC_REGISTERS_GPIO_HIGH, port); 1113 MISC_REGISTERS_GPIO_HIGH, port);
@@ -855,10 +1128,8 @@ static int bnx2x_set_eeprom(struct net_device *dev,
855 } 1128 }
856 } else if (eeprom->magic == 0x53985943) { 1129 } else if (eeprom->magic == 0x53985943) {
857 /* 'PHYC' (0x53985943): PHY FW upgrade completed */ 1130 /* 'PHYC' (0x53985943): PHY FW upgrade completed */
858 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) == 1131 if (XGXS_EXT_PHY_TYPE(ext_phy_config) ==
859 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) { 1132 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) {
860 u8 ext_phy_addr =
861 XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
862 1133
863 /* DSP Remove Download Mode */ 1134 /* DSP Remove Download Mode */
864 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0, 1135 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
@@ -866,7 +1137,8 @@ static int bnx2x_set_eeprom(struct net_device *dev,
866 1137
867 bnx2x_acquire_phy_lock(bp); 1138 bnx2x_acquire_phy_lock(bp);
868 1139
869 bnx2x_sfx7101_sp_sw_reset(bp, port, ext_phy_addr); 1140 bnx2x_sfx7101_sp_sw_reset(bp,
1141 &bp->link_params.phy[EXT_PHY1]);
870 1142
871 /* wait 0.5 sec to allow it to run */ 1143 /* wait 0.5 sec to allow it to run */
872 msleep(500); 1144 msleep(500);
@@ -879,6 +1151,7 @@ static int bnx2x_set_eeprom(struct net_device *dev,
879 1151
880 return rc; 1152 return rc;
881} 1153}
1154
882static int bnx2x_get_coalesce(struct net_device *dev, 1155static int bnx2x_get_coalesce(struct net_device *dev,
883 struct ethtool_coalesce *coal) 1156 struct ethtool_coalesce *coal)
884{ 1157{
@@ -920,7 +1193,14 @@ static void bnx2x_get_ringparam(struct net_device *dev,
920 ering->rx_mini_max_pending = 0; 1193 ering->rx_mini_max_pending = 0;
921 ering->rx_jumbo_max_pending = 0; 1194 ering->rx_jumbo_max_pending = 0;
922 1195
923 ering->rx_pending = bp->rx_ring_size; 1196 if (bp->rx_ring_size)
1197 ering->rx_pending = bp->rx_ring_size;
1198 else
1199 if (bp->state == BNX2X_STATE_OPEN && bp->num_queues)
1200 ering->rx_pending = MAX_RX_AVAIL/bp->num_queues;
1201 else
1202 ering->rx_pending = MAX_RX_AVAIL;
1203
924 ering->rx_mini_pending = 0; 1204 ering->rx_mini_pending = 0;
925 ering->rx_jumbo_pending = 0; 1205 ering->rx_jumbo_pending = 0;
926 1206
@@ -940,6 +1220,8 @@ static int bnx2x_set_ringparam(struct net_device *dev,
940 } 1220 }
941 1221
942 if ((ering->rx_pending > MAX_RX_AVAIL) || 1222 if ((ering->rx_pending > MAX_RX_AVAIL) ||
1223 (ering->rx_pending < (bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
1224 MIN_RX_SIZE_TPA)) ||
943 (ering->tx_pending > MAX_TX_AVAIL) || 1225 (ering->tx_pending > MAX_TX_AVAIL) ||
944 (ering->tx_pending <= MAX_SKB_FRAGS + 4)) 1226 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
945 return -EINVAL; 1227 return -EINVAL;
@@ -959,10 +1241,9 @@ static void bnx2x_get_pauseparam(struct net_device *dev,
959 struct ethtool_pauseparam *epause) 1241 struct ethtool_pauseparam *epause)
960{ 1242{
961 struct bnx2x *bp = netdev_priv(dev); 1243 struct bnx2x *bp = netdev_priv(dev);
962 1244 int cfg_idx = bnx2x_get_link_cfg_idx(bp);
963 epause->autoneg = (bp->link_params.req_flow_ctrl == 1245 epause->autoneg = (bp->link_params.req_flow_ctrl[cfg_idx] ==
964 BNX2X_FLOW_CTRL_AUTO) && 1246 BNX2X_FLOW_CTRL_AUTO);
965 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
966 1247
967 epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) == 1248 epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
968 BNX2X_FLOW_CTRL_RX); 1249 BNX2X_FLOW_CTRL_RX);
@@ -978,37 +1259,39 @@ static int bnx2x_set_pauseparam(struct net_device *dev,
978 struct ethtool_pauseparam *epause) 1259 struct ethtool_pauseparam *epause)
979{ 1260{
980 struct bnx2x *bp = netdev_priv(dev); 1261 struct bnx2x *bp = netdev_priv(dev);
981 1262 u32 cfg_idx = bnx2x_get_link_cfg_idx(bp);
982 if (IS_E1HMF(bp)) 1263 if (IS_MF(bp))
983 return 0; 1264 return 0;
984 1265
985 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n" 1266 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
986 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n", 1267 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
987 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause); 1268 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
988 1269
989 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO; 1270 bp->link_params.req_flow_ctrl[cfg_idx] = BNX2X_FLOW_CTRL_AUTO;
990 1271
991 if (epause->rx_pause) 1272 if (epause->rx_pause)
992 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX; 1273 bp->link_params.req_flow_ctrl[cfg_idx] |= BNX2X_FLOW_CTRL_RX;
993 1274
994 if (epause->tx_pause) 1275 if (epause->tx_pause)
995 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX; 1276 bp->link_params.req_flow_ctrl[cfg_idx] |= BNX2X_FLOW_CTRL_TX;
996 1277
997 if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) 1278 if (bp->link_params.req_flow_ctrl[cfg_idx] == BNX2X_FLOW_CTRL_AUTO)
998 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE; 1279 bp->link_params.req_flow_ctrl[cfg_idx] = BNX2X_FLOW_CTRL_NONE;
999 1280
1000 if (epause->autoneg) { 1281 if (epause->autoneg) {
1001 if (!(bp->port.supported & SUPPORTED_Autoneg)) { 1282 if (!(bp->port.supported[cfg_idx] & SUPPORTED_Autoneg)) {
1002 DP(NETIF_MSG_LINK, "autoneg not supported\n"); 1283 DP(NETIF_MSG_LINK, "autoneg not supported\n");
1003 return -EINVAL; 1284 return -EINVAL;
1004 } 1285 }
1005 1286
1006 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG) 1287 if (bp->link_params.req_line_speed[cfg_idx] == SPEED_AUTO_NEG) {
1007 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO; 1288 bp->link_params.req_flow_ctrl[cfg_idx] =
1289 BNX2X_FLOW_CTRL_AUTO;
1290 }
1008 } 1291 }
1009 1292
1010 DP(NETIF_MSG_LINK, 1293 DP(NETIF_MSG_LINK,
1011 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl); 1294 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl[cfg_idx]);
1012 1295
1013 if (netif_running(dev)) { 1296 if (netif_running(dev)) {
1014 bnx2x_stats_handle(bp, STATS_EVENT_STOP); 1297 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
@@ -1018,92 +1301,6 @@ static int bnx2x_set_pauseparam(struct net_device *dev,
1018 return 0; 1301 return 0;
1019} 1302}
1020 1303
1021static int bnx2x_set_flags(struct net_device *dev, u32 data)
1022{
1023 struct bnx2x *bp = netdev_priv(dev);
1024 int changed = 0;
1025 int rc = 0;
1026
1027 if (data & ~(ETH_FLAG_LRO | ETH_FLAG_RXHASH))
1028 return -EINVAL;
1029
1030 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
1031 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
1032 return -EAGAIN;
1033 }
1034
1035 /* TPA requires Rx CSUM offloading */
1036 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
1037 if (!bp->disable_tpa) {
1038 if (!(dev->features & NETIF_F_LRO)) {
1039 dev->features |= NETIF_F_LRO;
1040 bp->flags |= TPA_ENABLE_FLAG;
1041 changed = 1;
1042 }
1043 } else
1044 rc = -EINVAL;
1045 } else if (dev->features & NETIF_F_LRO) {
1046 dev->features &= ~NETIF_F_LRO;
1047 bp->flags &= ~TPA_ENABLE_FLAG;
1048 changed = 1;
1049 }
1050
1051 if (data & ETH_FLAG_RXHASH)
1052 dev->features |= NETIF_F_RXHASH;
1053 else
1054 dev->features &= ~NETIF_F_RXHASH;
1055
1056 if (changed && netif_running(dev)) {
1057 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
1058 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
1059 }
1060
1061 return rc;
1062}
1063
1064static u32 bnx2x_get_rx_csum(struct net_device *dev)
1065{
1066 struct bnx2x *bp = netdev_priv(dev);
1067
1068 return bp->rx_csum;
1069}
1070
1071static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
1072{
1073 struct bnx2x *bp = netdev_priv(dev);
1074 int rc = 0;
1075
1076 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
1077 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
1078 return -EAGAIN;
1079 }
1080
1081 bp->rx_csum = data;
1082
1083 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
1084 TPA'ed packets will be discarded due to wrong TCP CSUM */
1085 if (!data) {
1086 u32 flags = ethtool_op_get_flags(dev);
1087
1088 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
1089 }
1090
1091 return rc;
1092}
1093
1094static int bnx2x_set_tso(struct net_device *dev, u32 data)
1095{
1096 if (data) {
1097 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
1098 dev->features |= NETIF_F_TSO6;
1099 } else {
1100 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
1101 dev->features &= ~NETIF_F_TSO6;
1102 }
1103
1104 return 0;
1105}
1106
1107static const struct { 1304static const struct {
1108 char string[ETH_GSTRING_LEN]; 1305 char string[ETH_GSTRING_LEN];
1109} bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = { 1306} bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
@@ -1185,13 +1382,17 @@ static int bnx2x_test_registers(struct bnx2x *bp)
1185 1382
1186 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) { 1383 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
1187 u32 offset, mask, save_val, val; 1384 u32 offset, mask, save_val, val;
1385 if (CHIP_IS_E2(bp) &&
1386 reg_tbl[i].offset0 == HC_REG_AGG_INT_0)
1387 continue;
1188 1388
1189 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1; 1389 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
1190 mask = reg_tbl[i].mask; 1390 mask = reg_tbl[i].mask;
1191 1391
1192 save_val = REG_RD(bp, offset); 1392 save_val = REG_RD(bp, offset);
1193 1393
1194 REG_WR(bp, offset, (wr_val & mask)); 1394 REG_WR(bp, offset, wr_val & mask);
1395
1195 val = REG_RD(bp, offset); 1396 val = REG_RD(bp, offset);
1196 1397
1197 /* Restore the original register's value */ 1398 /* Restore the original register's value */
@@ -1236,20 +1437,33 @@ static int bnx2x_test_memory(struct bnx2x *bp)
1236 u32 offset; 1437 u32 offset;
1237 u32 e1_mask; 1438 u32 e1_mask;
1238 u32 e1h_mask; 1439 u32 e1h_mask;
1440 u32 e2_mask;
1239 } prty_tbl[] = { 1441 } prty_tbl[] = {
1240 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 }, 1442 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0, 0 },
1241 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 }, 1443 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2, 0 },
1242 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 }, 1444 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0, 0 },
1243 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 }, 1445 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0, 0 },
1244 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 }, 1446 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0, 0 },
1245 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 }, 1447 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0, 0 },
1246 1448
1247 { NULL, 0xffffffff, 0, 0 } 1449 { NULL, 0xffffffff, 0, 0, 0 }
1248 }; 1450 };
1249 1451
1250 if (!netif_running(bp->dev)) 1452 if (!netif_running(bp->dev))
1251 return rc; 1453 return rc;
1252 1454
1455 /* pre-Check the parity status */
1456 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
1457 val = REG_RD(bp, prty_tbl[i].offset);
1458 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
1459 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask))) ||
1460 (CHIP_IS_E2(bp) && (val & ~(prty_tbl[i].e2_mask)))) {
1461 DP(NETIF_MSG_HW,
1462 "%s is 0x%x\n", prty_tbl[i].name, val);
1463 goto test_mem_exit;
1464 }
1465 }
1466
1253 /* Go through all the memories */ 1467 /* Go through all the memories */
1254 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) 1468 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
1255 for (j = 0; j < mem_tbl[i].size; j++) 1469 for (j = 0; j < mem_tbl[i].size; j++)
@@ -1259,7 +1473,8 @@ static int bnx2x_test_memory(struct bnx2x *bp)
1259 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) { 1473 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
1260 val = REG_RD(bp, prty_tbl[i].offset); 1474 val = REG_RD(bp, prty_tbl[i].offset);
1261 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) || 1475 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
1262 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) { 1476 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask))) ||
1477 (CHIP_IS_E2(bp) && (val & ~(prty_tbl[i].e2_mask)))) {
1263 DP(NETIF_MSG_HW, 1478 DP(NETIF_MSG_HW,
1264 "%s is 0x%x\n", prty_tbl[i].name, val); 1479 "%s is 0x%x\n", prty_tbl[i].name, val);
1265 goto test_mem_exit; 1480 goto test_mem_exit;
@@ -1272,12 +1487,12 @@ test_mem_exit:
1272 return rc; 1487 return rc;
1273} 1488}
1274 1489
1275static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up) 1490static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up, u8 is_serdes)
1276{ 1491{
1277 int cnt = 1000; 1492 int cnt = 1400;
1278 1493
1279 if (link_up) 1494 if (link_up)
1280 while (bnx2x_link_test(bp) && cnt--) 1495 while (bnx2x_link_test(bp, is_serdes) && cnt--)
1281 msleep(10); 1496 msleep(10);
1282} 1497}
1283 1498
@@ -1293,7 +1508,8 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
1293 u16 pkt_prod, bd_prod; 1508 u16 pkt_prod, bd_prod;
1294 struct sw_tx_bd *tx_buf; 1509 struct sw_tx_bd *tx_buf;
1295 struct eth_tx_start_bd *tx_start_bd; 1510 struct eth_tx_start_bd *tx_start_bd;
1296 struct eth_tx_parse_bd *pbd = NULL; 1511 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
1512 struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
1297 dma_addr_t mapping; 1513 dma_addr_t mapping;
1298 union eth_rx_cqe *cqe; 1514 union eth_rx_cqe *cqe;
1299 u8 cqe_fp_flags; 1515 u8 cqe_fp_flags;
@@ -1304,7 +1520,7 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
1304 /* check the loopback mode */ 1520 /* check the loopback mode */
1305 switch (loopback_mode) { 1521 switch (loopback_mode) {
1306 case BNX2X_PHY_LOOPBACK: 1522 case BNX2X_PHY_LOOPBACK:
1307 if (bp->link_params.loopback_mode != LOOPBACK_XGXS_10) 1523 if (bp->link_params.loopback_mode != LOOPBACK_XGXS)
1308 return -EINVAL; 1524 return -EINVAL;
1309 break; 1525 break;
1310 case BNX2X_MAC_LOOPBACK: 1526 case BNX2X_MAC_LOOPBACK:
@@ -1318,7 +1534,7 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
1318 /* prepare the loopback packet */ 1534 /* prepare the loopback packet */
1319 pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ? 1535 pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
1320 bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN); 1536 bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
1321 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size); 1537 skb = netdev_alloc_skb(bp->dev, fp_rx->rx_buf_size);
1322 if (!skb) { 1538 if (!skb) {
1323 rc = -ENOMEM; 1539 rc = -ENOMEM;
1324 goto test_loopback_exit; 1540 goto test_loopback_exit;
@@ -1349,16 +1565,23 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
1349 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); 1565 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1350 tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */ 1566 tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */
1351 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb)); 1567 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
1352 tx_start_bd->vlan = cpu_to_le16(pkt_prod); 1568 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
1353 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD; 1569 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
1354 tx_start_bd->general_data = ((UNICAST_ADDRESS << 1570 SET_FLAG(tx_start_bd->general_data,
1355 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT) | 1); 1571 ETH_TX_START_BD_ETH_ADDR_TYPE,
1572 UNICAST_ADDRESS);
1573 SET_FLAG(tx_start_bd->general_data,
1574 ETH_TX_START_BD_HDR_NBDS,
1575 1);
1356 1576
1357 /* turn on parsing and get a BD */ 1577 /* turn on parsing and get a BD */
1358 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod)); 1578 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
1359 pbd = &fp_tx->tx_desc_ring[bd_prod].parse_bd;
1360 1579
1361 memset(pbd, 0, sizeof(struct eth_tx_parse_bd)); 1580 pbd_e1x = &fp_tx->tx_desc_ring[bd_prod].parse_bd_e1x;
1581 pbd_e2 = &fp_tx->tx_desc_ring[bd_prod].parse_bd_e2;
1582
1583 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
1584 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
1362 1585
1363 wmb(); 1586 wmb();
1364 1587
@@ -1377,6 +1600,20 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
1377 if (tx_idx != tx_start_idx + num_pkts) 1600 if (tx_idx != tx_start_idx + num_pkts)
1378 goto test_loopback_exit; 1601 goto test_loopback_exit;
1379 1602
1603 /* Unlike HC IGU won't generate an interrupt for status block
1604 * updates that have been performed while interrupts were
1605 * disabled.
1606 */
1607 if (bp->common.int_block == INT_BLOCK_IGU) {
1608 /* Disable local BHes to prevent a dead-lock situation between
1609 * sch_direct_xmit() and bnx2x_run_loopback() (calling
1610 * bnx2x_tx_int()), as both are taking netif_tx_lock().
1611 */
1612 local_bh_disable();
1613 bnx2x_tx_int(fp_tx);
1614 local_bh_enable();
1615 }
1616
1380 rx_idx = le16_to_cpu(*fp_rx->rx_cons_sb); 1617 rx_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
1381 if (rx_idx != rx_start_idx + num_pkts) 1618 if (rx_idx != rx_start_idx + num_pkts)
1382 goto test_loopback_exit; 1619 goto test_loopback_exit;
@@ -1460,9 +1697,7 @@ static int bnx2x_test_nvram(struct bnx2x *bp)
1460 { 0x100, 0x350 }, /* manuf_info */ 1697 { 0x100, 0x350 }, /* manuf_info */
1461 { 0x450, 0xf0 }, /* feature_info */ 1698 { 0x450, 0xf0 }, /* feature_info */
1462 { 0x640, 0x64 }, /* upgrade_key_info */ 1699 { 0x640, 0x64 }, /* upgrade_key_info */
1463 { 0x6a4, 0x64 },
1464 { 0x708, 0x70 }, /* manuf_key_info */ 1700 { 0x708, 0x70 }, /* manuf_key_info */
1465 { 0x778, 0x70 },
1466 { 0, 0 } 1701 { 0, 0 }
1467 }; 1702 };
1468 __be32 buf[0x350 / 4]; 1703 __be32 buf[0x350 / 4];
@@ -1519,18 +1754,17 @@ static int bnx2x_test_intr(struct bnx2x *bp)
1519 1754
1520 config->hdr.length = 0; 1755 config->hdr.length = 0;
1521 if (CHIP_IS_E1(bp)) 1756 if (CHIP_IS_E1(bp))
1522 /* use last unicast entries */ 1757 config->hdr.offset = (BP_PORT(bp) ? 32 : 0);
1523 config->hdr.offset = (BP_PORT(bp) ? 63 : 31);
1524 else 1758 else
1525 config->hdr.offset = BP_FUNC(bp); 1759 config->hdr.offset = BP_FUNC(bp);
1526 config->hdr.client_id = bp->fp->cl_id; 1760 config->hdr.client_id = bp->fp->cl_id;
1527 config->hdr.reserved1 = 0; 1761 config->hdr.reserved1 = 0;
1528 1762
1529 bp->set_mac_pending++; 1763 bp->set_mac_pending = 1;
1530 smp_wmb(); 1764 smp_wmb();
1531 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0, 1765 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
1532 U64_HI(bnx2x_sp_mapping(bp, mac_config)), 1766 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
1533 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0); 1767 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 1);
1534 if (rc == 0) { 1768 if (rc == 0) {
1535 for (i = 0; i < 10; i++) { 1769 for (i = 0; i < 10; i++) {
1536 if (!bp->set_mac_pending) 1770 if (!bp->set_mac_pending)
@@ -1549,7 +1783,7 @@ static void bnx2x_self_test(struct net_device *dev,
1549 struct ethtool_test *etest, u64 *buf) 1783 struct ethtool_test *etest, u64 *buf)
1550{ 1784{
1551 struct bnx2x *bp = netdev_priv(dev); 1785 struct bnx2x *bp = netdev_priv(dev);
1552 1786 u8 is_serdes;
1553 if (bp->recovery_state != BNX2X_RECOVERY_DONE) { 1787 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
1554 printk(KERN_ERR "Handling parity error recovery. Try again later\n"); 1788 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
1555 etest->flags |= ETH_TEST_FL_FAILED; 1789 etest->flags |= ETH_TEST_FL_FAILED;
@@ -1562,8 +1796,9 @@ static void bnx2x_self_test(struct net_device *dev,
1562 return; 1796 return;
1563 1797
1564 /* offline tests are not supported in MF mode */ 1798 /* offline tests are not supported in MF mode */
1565 if (IS_E1HMF(bp)) 1799 if (IS_MF(bp))
1566 etest->flags &= ~ETH_TEST_FL_OFFLINE; 1800 etest->flags &= ~ETH_TEST_FL_OFFLINE;
1801 is_serdes = (bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) > 0;
1567 1802
1568 if (etest->flags & ETH_TEST_FL_OFFLINE) { 1803 if (etest->flags & ETH_TEST_FL_OFFLINE) {
1569 int port = BP_PORT(bp); 1804 int port = BP_PORT(bp);
@@ -1575,11 +1810,12 @@ static void bnx2x_self_test(struct net_device *dev,
1575 /* disable input for TX port IF */ 1810 /* disable input for TX port IF */
1576 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, 0); 1811 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, 0);
1577 1812
1578 link_up = (bnx2x_link_test(bp) == 0); 1813 link_up = bp->link_vars.link_up;
1814
1579 bnx2x_nic_unload(bp, UNLOAD_NORMAL); 1815 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
1580 bnx2x_nic_load(bp, LOAD_DIAG); 1816 bnx2x_nic_load(bp, LOAD_DIAG);
1581 /* wait until link state is restored */ 1817 /* wait until link state is restored */
1582 bnx2x_wait_for_link(bp, link_up); 1818 bnx2x_wait_for_link(bp, link_up, is_serdes);
1583 1819
1584 if (bnx2x_test_registers(bp) != 0) { 1820 if (bnx2x_test_registers(bp) != 0) {
1585 buf[0] = 1; 1821 buf[0] = 1;
@@ -1589,6 +1825,7 @@ static void bnx2x_self_test(struct net_device *dev,
1589 buf[1] = 1; 1825 buf[1] = 1;
1590 etest->flags |= ETH_TEST_FL_FAILED; 1826 etest->flags |= ETH_TEST_FL_FAILED;
1591 } 1827 }
1828
1592 buf[2] = bnx2x_test_loopback(bp, link_up); 1829 buf[2] = bnx2x_test_loopback(bp, link_up);
1593 if (buf[2] != 0) 1830 if (buf[2] != 0)
1594 etest->flags |= ETH_TEST_FL_FAILED; 1831 etest->flags |= ETH_TEST_FL_FAILED;
@@ -1600,7 +1837,7 @@ static void bnx2x_self_test(struct net_device *dev,
1600 1837
1601 bnx2x_nic_load(bp, LOAD_NORMAL); 1838 bnx2x_nic_load(bp, LOAD_NORMAL);
1602 /* wait until link state is restored */ 1839 /* wait until link state is restored */
1603 bnx2x_wait_for_link(bp, link_up); 1840 bnx2x_wait_for_link(bp, link_up, is_serdes);
1604 } 1841 }
1605 if (bnx2x_test_nvram(bp) != 0) { 1842 if (bnx2x_test_nvram(bp) != 0) {
1606 buf[3] = 1; 1843 buf[3] = 1;
@@ -1610,150 +1847,22 @@ static void bnx2x_self_test(struct net_device *dev,
1610 buf[4] = 1; 1847 buf[4] = 1;
1611 etest->flags |= ETH_TEST_FL_FAILED; 1848 etest->flags |= ETH_TEST_FL_FAILED;
1612 } 1849 }
1613 if (bp->port.pmf) 1850
1614 if (bnx2x_link_test(bp) != 0) { 1851 if (bnx2x_link_test(bp, is_serdes) != 0) {
1615 buf[5] = 1; 1852 buf[5] = 1;
1616 etest->flags |= ETH_TEST_FL_FAILED; 1853 etest->flags |= ETH_TEST_FL_FAILED;
1617 } 1854 }
1618 1855
1619#ifdef BNX2X_EXTRA_DEBUG 1856#ifdef BNX2X_EXTRA_DEBUG
1620 bnx2x_panic_dump(bp); 1857 bnx2x_panic_dump(bp);
1621#endif 1858#endif
1622} 1859}
1623 1860
1624static const struct {
1625 long offset;
1626 int size;
1627 u8 string[ETH_GSTRING_LEN];
1628} bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
1629/* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
1630 { Q_STATS_OFFSET32(error_bytes_received_hi),
1631 8, "[%d]: rx_error_bytes" },
1632 { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
1633 8, "[%d]: rx_ucast_packets" },
1634 { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
1635 8, "[%d]: rx_mcast_packets" },
1636 { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
1637 8, "[%d]: rx_bcast_packets" },
1638 { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
1639 { Q_STATS_OFFSET32(rx_err_discard_pkt),
1640 4, "[%d]: rx_phy_ip_err_discards"},
1641 { Q_STATS_OFFSET32(rx_skb_alloc_failed),
1642 4, "[%d]: rx_skb_alloc_discard" },
1643 { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
1644
1645/* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
1646 { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
1647 8, "[%d]: tx_ucast_packets" },
1648 { Q_STATS_OFFSET32(total_multicast_packets_transmitted_hi),
1649 8, "[%d]: tx_mcast_packets" },
1650 { Q_STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
1651 8, "[%d]: tx_bcast_packets" }
1652};
1653
1654static const struct {
1655 long offset;
1656 int size;
1657 u32 flags;
1658#define STATS_FLAGS_PORT 1
1659#define STATS_FLAGS_FUNC 2
1660#define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
1661 u8 string[ETH_GSTRING_LEN];
1662} bnx2x_stats_arr[BNX2X_NUM_STATS] = {
1663/* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
1664 8, STATS_FLAGS_BOTH, "rx_bytes" },
1665 { STATS_OFFSET32(error_bytes_received_hi),
1666 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
1667 { STATS_OFFSET32(total_unicast_packets_received_hi),
1668 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
1669 { STATS_OFFSET32(total_multicast_packets_received_hi),
1670 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
1671 { STATS_OFFSET32(total_broadcast_packets_received_hi),
1672 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
1673 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
1674 8, STATS_FLAGS_PORT, "rx_crc_errors" },
1675 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
1676 8, STATS_FLAGS_PORT, "rx_align_errors" },
1677 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
1678 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
1679 { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
1680 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
1681/* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
1682 8, STATS_FLAGS_PORT, "rx_fragments" },
1683 { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
1684 8, STATS_FLAGS_PORT, "rx_jabbers" },
1685 { STATS_OFFSET32(no_buff_discard_hi),
1686 8, STATS_FLAGS_BOTH, "rx_discards" },
1687 { STATS_OFFSET32(mac_filter_discard),
1688 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
1689 { STATS_OFFSET32(xxoverflow_discard),
1690 4, STATS_FLAGS_PORT, "rx_fw_discards" },
1691 { STATS_OFFSET32(brb_drop_hi),
1692 8, STATS_FLAGS_PORT, "rx_brb_discard" },
1693 { STATS_OFFSET32(brb_truncate_hi),
1694 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
1695 { STATS_OFFSET32(pause_frames_received_hi),
1696 8, STATS_FLAGS_PORT, "rx_pause_frames" },
1697 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
1698 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
1699 { STATS_OFFSET32(nig_timer_max),
1700 4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
1701/* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
1702 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
1703 { STATS_OFFSET32(rx_skb_alloc_failed),
1704 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
1705 { STATS_OFFSET32(hw_csum_err),
1706 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
1707
1708 { STATS_OFFSET32(total_bytes_transmitted_hi),
1709 8, STATS_FLAGS_BOTH, "tx_bytes" },
1710 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
1711 8, STATS_FLAGS_PORT, "tx_error_bytes" },
1712 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
1713 8, STATS_FLAGS_BOTH, "tx_ucast_packets" },
1714 { STATS_OFFSET32(total_multicast_packets_transmitted_hi),
1715 8, STATS_FLAGS_BOTH, "tx_mcast_packets" },
1716 { STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
1717 8, STATS_FLAGS_BOTH, "tx_bcast_packets" },
1718 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
1719 8, STATS_FLAGS_PORT, "tx_mac_errors" },
1720 { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
1721 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
1722/* 30 */{ STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
1723 8, STATS_FLAGS_PORT, "tx_single_collisions" },
1724 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
1725 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
1726 { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
1727 8, STATS_FLAGS_PORT, "tx_deferred" },
1728 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
1729 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
1730 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
1731 8, STATS_FLAGS_PORT, "tx_late_collisions" },
1732 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
1733 8, STATS_FLAGS_PORT, "tx_total_collisions" },
1734 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
1735 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
1736 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
1737 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
1738 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
1739 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
1740 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
1741 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
1742/* 40 */{ STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
1743 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
1744 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
1745 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
1746 { STATS_OFFSET32(etherstatspktsover1522octets_hi),
1747 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
1748 { STATS_OFFSET32(pause_frames_sent_hi),
1749 8, STATS_FLAGS_PORT, "tx_pause_frames" }
1750};
1751
1752#define IS_PORT_STAT(i) \ 1861#define IS_PORT_STAT(i) \
1753 ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT) 1862 ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
1754#define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC) 1863#define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
1755#define IS_E1HMF_MODE_STAT(bp) \ 1864#define IS_MF_MODE_STAT(bp) \
1756 (IS_E1HMF(bp) && !(bp->msg_enable & BNX2X_MSG_STATS)) 1865 (IS_MF(bp) && !(bp->msg_enable & BNX2X_MSG_STATS))
1757 1866
1758static int bnx2x_get_sset_count(struct net_device *dev, int stringset) 1867static int bnx2x_get_sset_count(struct net_device *dev, int stringset)
1759{ 1868{
@@ -1763,11 +1872,12 @@ static int bnx2x_get_sset_count(struct net_device *dev, int stringset)
1763 switch (stringset) { 1872 switch (stringset) {
1764 case ETH_SS_STATS: 1873 case ETH_SS_STATS:
1765 if (is_multi(bp)) { 1874 if (is_multi(bp)) {
1766 num_stats = BNX2X_NUM_Q_STATS * bp->num_queues; 1875 num_stats = BNX2X_NUM_STAT_QUEUES(bp) *
1767 if (!IS_E1HMF_MODE_STAT(bp)) 1876 BNX2X_NUM_Q_STATS;
1877 if (!IS_MF_MODE_STAT(bp))
1768 num_stats += BNX2X_NUM_STATS; 1878 num_stats += BNX2X_NUM_STATS;
1769 } else { 1879 } else {
1770 if (IS_E1HMF_MODE_STAT(bp)) { 1880 if (IS_MF_MODE_STAT(bp)) {
1771 num_stats = 0; 1881 num_stats = 0;
1772 for (i = 0; i < BNX2X_NUM_STATS; i++) 1882 for (i = 0; i < BNX2X_NUM_STATS; i++)
1773 if (IS_FUNC_STAT(i)) 1883 if (IS_FUNC_STAT(i))
@@ -1789,25 +1899,35 @@ static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
1789{ 1899{
1790 struct bnx2x *bp = netdev_priv(dev); 1900 struct bnx2x *bp = netdev_priv(dev);
1791 int i, j, k; 1901 int i, j, k;
1902 char queue_name[MAX_QUEUE_NAME_LEN+1];
1792 1903
1793 switch (stringset) { 1904 switch (stringset) {
1794 case ETH_SS_STATS: 1905 case ETH_SS_STATS:
1795 if (is_multi(bp)) { 1906 if (is_multi(bp)) {
1796 k = 0; 1907 k = 0;
1797 for_each_queue(bp, i) { 1908 for_each_napi_queue(bp, i) {
1909 memset(queue_name, 0, sizeof(queue_name));
1910
1911 if (IS_FCOE_IDX(i))
1912 sprintf(queue_name, "fcoe");
1913 else
1914 sprintf(queue_name, "%d", i);
1915
1798 for (j = 0; j < BNX2X_NUM_Q_STATS; j++) 1916 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
1799 sprintf(buf + (k + j)*ETH_GSTRING_LEN, 1917 snprintf(buf + (k + j)*ETH_GSTRING_LEN,
1800 bnx2x_q_stats_arr[j].string, i); 1918 ETH_GSTRING_LEN,
1919 bnx2x_q_stats_arr[j].string,
1920 queue_name);
1801 k += BNX2X_NUM_Q_STATS; 1921 k += BNX2X_NUM_Q_STATS;
1802 } 1922 }
1803 if (IS_E1HMF_MODE_STAT(bp)) 1923 if (IS_MF_MODE_STAT(bp))
1804 break; 1924 break;
1805 for (j = 0; j < BNX2X_NUM_STATS; j++) 1925 for (j = 0; j < BNX2X_NUM_STATS; j++)
1806 strcpy(buf + (k + j)*ETH_GSTRING_LEN, 1926 strcpy(buf + (k + j)*ETH_GSTRING_LEN,
1807 bnx2x_stats_arr[j].string); 1927 bnx2x_stats_arr[j].string);
1808 } else { 1928 } else {
1809 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) { 1929 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
1810 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i)) 1930 if (IS_MF_MODE_STAT(bp) && IS_PORT_STAT(i))
1811 continue; 1931 continue;
1812 strcpy(buf + j*ETH_GSTRING_LEN, 1932 strcpy(buf + j*ETH_GSTRING_LEN,
1813 bnx2x_stats_arr[i].string); 1933 bnx2x_stats_arr[i].string);
@@ -1831,7 +1951,7 @@ static void bnx2x_get_ethtool_stats(struct net_device *dev,
1831 1951
1832 if (is_multi(bp)) { 1952 if (is_multi(bp)) {
1833 k = 0; 1953 k = 0;
1834 for_each_queue(bp, i) { 1954 for_each_napi_queue(bp, i) {
1835 hw_stats = (u32 *)&bp->fp[i].eth_q_stats; 1955 hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
1836 for (j = 0; j < BNX2X_NUM_Q_STATS; j++) { 1956 for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
1837 if (bnx2x_q_stats_arr[j].size == 0) { 1957 if (bnx2x_q_stats_arr[j].size == 0) {
@@ -1851,7 +1971,7 @@ static void bnx2x_get_ethtool_stats(struct net_device *dev,
1851 } 1971 }
1852 k += BNX2X_NUM_Q_STATS; 1972 k += BNX2X_NUM_Q_STATS;
1853 } 1973 }
1854 if (IS_E1HMF_MODE_STAT(bp)) 1974 if (IS_MF_MODE_STAT(bp))
1855 return; 1975 return;
1856 hw_stats = (u32 *)&bp->eth_stats; 1976 hw_stats = (u32 *)&bp->eth_stats;
1857 for (j = 0; j < BNX2X_NUM_STATS; j++) { 1977 for (j = 0; j < BNX2X_NUM_STATS; j++) {
@@ -1872,7 +1992,7 @@ static void bnx2x_get_ethtool_stats(struct net_device *dev,
1872 } else { 1992 } else {
1873 hw_stats = (u32 *)&bp->eth_stats; 1993 hw_stats = (u32 *)&bp->eth_stats;
1874 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) { 1994 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
1875 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i)) 1995 if (IS_MF_MODE_STAT(bp) && IS_PORT_STAT(i))
1876 continue; 1996 continue;
1877 if (bnx2x_stats_arr[i].size == 0) { 1997 if (bnx2x_stats_arr[i].size == 0) {
1878 /* skip this counter */ 1998 /* skip this counter */
@@ -1894,36 +2014,91 @@ static void bnx2x_get_ethtool_stats(struct net_device *dev,
1894 } 2014 }
1895} 2015}
1896 2016
1897static int bnx2x_phys_id(struct net_device *dev, u32 data) 2017static int bnx2x_set_phys_id(struct net_device *dev,
2018 enum ethtool_phys_id_state state)
1898{ 2019{
1899 struct bnx2x *bp = netdev_priv(dev); 2020 struct bnx2x *bp = netdev_priv(dev);
1900 int i;
1901 2021
1902 if (!netif_running(dev)) 2022 if (!netif_running(dev))
1903 return 0; 2023 return -EAGAIN;
1904 2024
1905 if (!bp->port.pmf) 2025 if (!bp->port.pmf)
1906 return 0; 2026 return -EOPNOTSUPP;
1907 2027
1908 if (data == 0) 2028 switch (state) {
1909 data = 2; 2029 case ETHTOOL_ID_ACTIVE:
2030 return 1; /* cycle on/off once per second */
1910 2031
1911 for (i = 0; i < (data * 2); i++) { 2032 case ETHTOOL_ID_ON:
1912 if ((i % 2) == 0) 2033 bnx2x_set_led(&bp->link_params, &bp->link_vars,
1913 bnx2x_set_led(&bp->link_params, LED_MODE_OPER, 2034 LED_MODE_ON, SPEED_1000);
1914 SPEED_1000); 2035 break;
1915 else
1916 bnx2x_set_led(&bp->link_params, LED_MODE_OFF, 0);
1917 2036
1918 msleep_interruptible(500); 2037 case ETHTOOL_ID_OFF:
1919 if (signal_pending(current)) 2038 bnx2x_set_led(&bp->link_params, &bp->link_vars,
1920 break; 2039 LED_MODE_FRONT_PANEL_OFF, 0);
1921 } 2040
2041 break;
1922 2042
1923 if (bp->link_vars.link_up) 2043 case ETHTOOL_ID_INACTIVE:
1924 bnx2x_set_led(&bp->link_params, LED_MODE_OPER, 2044 bnx2x_set_led(&bp->link_params, &bp->link_vars,
2045 LED_MODE_OPER,
1925 bp->link_vars.line_speed); 2046 bp->link_vars.line_speed);
2047 }
2048
2049 return 0;
2050}
2051
2052static int bnx2x_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
2053 void *rules __always_unused)
2054{
2055 struct bnx2x *bp = netdev_priv(dev);
2056
2057 switch (info->cmd) {
2058 case ETHTOOL_GRXRINGS:
2059 info->data = BNX2X_NUM_ETH_QUEUES(bp);
2060 return 0;
2061
2062 default:
2063 return -EOPNOTSUPP;
2064 }
2065}
2066
2067static int bnx2x_get_rxfh_indir(struct net_device *dev,
2068 struct ethtool_rxfh_indir *indir)
2069{
2070 struct bnx2x *bp = netdev_priv(dev);
2071 size_t copy_size =
2072 min_t(size_t, indir->size, TSTORM_INDIRECTION_TABLE_SIZE);
2073
2074 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
2075 return -EOPNOTSUPP;
2076
2077 indir->size = TSTORM_INDIRECTION_TABLE_SIZE;
2078 memcpy(indir->ring_index, bp->rx_indir_table,
2079 copy_size * sizeof(bp->rx_indir_table[0]));
2080 return 0;
2081}
2082
2083static int bnx2x_set_rxfh_indir(struct net_device *dev,
2084 const struct ethtool_rxfh_indir *indir)
2085{
2086 struct bnx2x *bp = netdev_priv(dev);
2087 size_t i;
2088
2089 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
2090 return -EOPNOTSUPP;
2091
2092 /* Validate size and indices */
2093 if (indir->size != TSTORM_INDIRECTION_TABLE_SIZE)
2094 return -EINVAL;
2095 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
2096 if (indir->ring_index[i] >= BNX2X_NUM_ETH_QUEUES(bp))
2097 return -EINVAL;
1926 2098
2099 memcpy(bp->rx_indir_table, indir->ring_index,
2100 indir->size * sizeof(bp->rx_indir_table[0]));
2101 bnx2x_push_indir_table(bp);
1927 return 0; 2102 return 0;
1928} 2103}
1929 2104
@@ -1948,21 +2123,14 @@ static const struct ethtool_ops bnx2x_ethtool_ops = {
1948 .set_ringparam = bnx2x_set_ringparam, 2123 .set_ringparam = bnx2x_set_ringparam,
1949 .get_pauseparam = bnx2x_get_pauseparam, 2124 .get_pauseparam = bnx2x_get_pauseparam,
1950 .set_pauseparam = bnx2x_set_pauseparam, 2125 .set_pauseparam = bnx2x_set_pauseparam,
1951 .get_rx_csum = bnx2x_get_rx_csum,
1952 .set_rx_csum = bnx2x_set_rx_csum,
1953 .get_tx_csum = ethtool_op_get_tx_csum,
1954 .set_tx_csum = ethtool_op_set_tx_hw_csum,
1955 .set_flags = bnx2x_set_flags,
1956 .get_flags = ethtool_op_get_flags,
1957 .get_sg = ethtool_op_get_sg,
1958 .set_sg = ethtool_op_set_sg,
1959 .get_tso = ethtool_op_get_tso,
1960 .set_tso = bnx2x_set_tso,
1961 .self_test = bnx2x_self_test, 2126 .self_test = bnx2x_self_test,
1962 .get_sset_count = bnx2x_get_sset_count, 2127 .get_sset_count = bnx2x_get_sset_count,
1963 .get_strings = bnx2x_get_strings, 2128 .get_strings = bnx2x_get_strings,
1964 .phys_id = bnx2x_phys_id, 2129 .set_phys_id = bnx2x_set_phys_id,
1965 .get_ethtool_stats = bnx2x_get_ethtool_stats, 2130 .get_ethtool_stats = bnx2x_get_ethtool_stats,
2131 .get_rxnfc = bnx2x_get_rxnfc,
2132 .get_rxfh_indir = bnx2x_get_rxfh_indir,
2133 .set_rxfh_indir = bnx2x_set_rxfh_indir,
1966}; 2134};
1967 2135
1968void bnx2x_set_ethtool_ops(struct net_device *netdev) 2136void bnx2x_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/bnx2x/bnx2x_fw_defs.h b/drivers/net/bnx2x/bnx2x_fw_defs.h
index 08d71bf438d6..9fe367836a57 100644
--- a/drivers/net/bnx2x/bnx2x_fw_defs.h
+++ b/drivers/net/bnx2x/bnx2x_fw_defs.h
@@ -1,375 +1,278 @@
1/* bnx2x_fw_defs.h: Broadcom Everest network driver. 1/* bnx2x_fw_defs.h: Broadcom Everest network driver.
2 * 2 *
3 * Copyright (c) 2007-2010 Broadcom Corporation 3 * Copyright (c) 2007-2011 Broadcom Corporation
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by 6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation. 7 * the Free Software Foundation.
8 */ 8 */
9 9
10 10#ifndef BNX2X_FW_DEFS_H
11#define CSTORM_ASSERT_LIST_INDEX_OFFSET \ 11#define BNX2X_FW_DEFS_H
12 (IS_E1H_OFFSET ? 0x7000 : 0x1000) 12
13#define CSTORM_ASSERT_LIST_OFFSET(idx) \ 13#define CSTORM_ASSERT_LIST_INDEX_OFFSET (IRO[142].base)
14 (IS_E1H_OFFSET ? (0x7020 + (idx * 0x10)) : (0x1020 + (idx * 0x10))) 14#define CSTORM_ASSERT_LIST_OFFSET(assertListEntry) \
15#define CSTORM_DEF_SB_HC_DISABLE_C_OFFSET(function, index) \ 15 (IRO[141].base + ((assertListEntry) * IRO[141].m1))
16 (IS_E1H_OFFSET ? (0x8622 + ((function>>1) * 0x40) + \ 16#define CSTORM_ETH_STATS_QUERY_ADDR_OFFSET(pfId) \
17 ((function&1) * 0x100) + (index * 0x4)) : (0x3562 + (function * \ 17 (IRO[144].base + ((pfId) * IRO[144].m1))
18 0x40) + (index * 0x4))) 18#define CSTORM_EVENT_RING_DATA_OFFSET(pfId) \
19#define CSTORM_DEF_SB_HC_DISABLE_U_OFFSET(function, index) \ 19 (IRO[149].base + (((pfId)>>1) * IRO[149].m1) + (((pfId)&1) * \
20 (IS_E1H_OFFSET ? (0x8822 + ((function>>1) * 0x80) + \ 20 IRO[149].m2))
21 ((function&1) * 0x200) + (index * 0x4)) : (0x35e2 + (function * \ 21#define CSTORM_EVENT_RING_PROD_OFFSET(pfId) \
22 0x80) + (index * 0x4))) 22 (IRO[150].base + (((pfId)>>1) * IRO[150].m1) + (((pfId)&1) * \
23#define CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(function) \ 23 IRO[150].m2))
24 (IS_E1H_OFFSET ? (0x8600 + ((function>>1) * 0x40) + \ 24#define CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(funcId) \
25 ((function&1) * 0x100)) : (0x3540 + (function * 0x40))) 25 (IRO[156].base + ((funcId) * IRO[156].m1))
26#define CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(function) \ 26#define CSTORM_FUNC_EN_OFFSET(funcId) \
27 (IS_E1H_OFFSET ? (0x8800 + ((function>>1) * 0x80) + \ 27 (IRO[146].base + ((funcId) * IRO[146].m1))
28 ((function&1) * 0x200)) : (0x35c0 + (function * 0x80))) 28#define CSTORM_FUNCTION_MODE_OFFSET (IRO[153].base)
29#define CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(function) \ 29#define CSTORM_IGU_MODE_OFFSET (IRO[154].base)
30 (IS_E1H_OFFSET ? (0x8608 + ((function>>1) * 0x40) + \ 30#define CSTORM_ISCSI_CQ_SIZE_OFFSET(pfId) \
31 ((function&1) * 0x100)) : (0x3548 + (function * 0x40))) 31 (IRO[311].base + ((pfId) * IRO[311].m1))
32#define CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(function) \ 32#define CSTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfId) \
33 (IS_E1H_OFFSET ? (0x8808 + ((function>>1) * 0x80) + \ 33 (IRO[312].base + ((pfId) * IRO[312].m1))
34 ((function&1) * 0x200)) : (0x35c8 + (function * 0x80))) 34 #define CSTORM_ISCSI_EQ_CONS_OFFSET(pfId, iscsiEqId) \
35#define CSTORM_FUNCTION_MODE_OFFSET \ 35 (IRO[304].base + ((pfId) * IRO[304].m1) + ((iscsiEqId) * \
36 (IS_E1H_OFFSET ? 0x11e8 : 0xffffffff) 36 IRO[304].m2))
37#define CSTORM_HC_BTR_C_OFFSET(port) \ 37 #define CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfId, iscsiEqId) \
38 (IS_E1H_OFFSET ? (0x8c04 + (port * 0xf0)) : (0x36c4 + (port * 0xc0))) 38 (IRO[306].base + ((pfId) * IRO[306].m1) + ((iscsiEqId) * \
39#define CSTORM_HC_BTR_U_OFFSET(port) \ 39 IRO[306].m2))
40 (IS_E1H_OFFSET ? (0x8de4 + (port * 0xf0)) : (0x3844 + (port * 0xc0))) 40 #define CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfId, iscsiEqId) \
41#define CSTORM_ISCSI_CQ_SIZE_OFFSET(function) \ 41 (IRO[305].base + ((pfId) * IRO[305].m1) + ((iscsiEqId) * \
42 (IS_E1H_OFFSET ? (0x6680 + (function * 0x8)) : (0x25a0 + \ 42 IRO[305].m2))
43 (function * 0x8))) 43 #define \
44#define CSTORM_ISCSI_CQ_SQN_SIZE_OFFSET(function) \ 44 CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_VALID_OFFSET(pfId, iscsiEqId) \
45 (IS_E1H_OFFSET ? (0x66c0 + (function * 0x8)) : (0x25b0 + \ 45 (IRO[307].base + ((pfId) * IRO[307].m1) + ((iscsiEqId) * \
46 (function * 0x8))) 46 IRO[307].m2))
47#define CSTORM_ISCSI_EQ_CONS_OFFSET(function, eqIdx) \ 47 #define CSTORM_ISCSI_EQ_PROD_OFFSET(pfId, iscsiEqId) \
48 (IS_E1H_OFFSET ? (0x6040 + (function * 0xc0) + (eqIdx * 0x18)) : \ 48 (IRO[303].base + ((pfId) * IRO[303].m1) + ((iscsiEqId) * \
49 (0x2410 + (function * 0xc0) + (eqIdx * 0x18))) 49 IRO[303].m2))
50#define CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(function, eqIdx) \ 50 #define CSTORM_ISCSI_EQ_SB_INDEX_OFFSET(pfId, iscsiEqId) \
51 (IS_E1H_OFFSET ? (0x6044 + (function * 0xc0) + (eqIdx * 0x18)) : \ 51 (IRO[309].base + ((pfId) * IRO[309].m1) + ((iscsiEqId) * \
52 (0x2414 + (function * 0xc0) + (eqIdx * 0x18))) 52 IRO[309].m2))
53#define CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(function, eqIdx) \ 53 #define CSTORM_ISCSI_EQ_SB_NUM_OFFSET(pfId, iscsiEqId) \
54 (IS_E1H_OFFSET ? (0x604c + (function * 0xc0) + (eqIdx * 0x18)) : \ 54 (IRO[308].base + ((pfId) * IRO[308].m1) + ((iscsiEqId) * \
55 (0x241c + (function * 0xc0) + (eqIdx * 0x18))) 55 IRO[308].m2))
56#define CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_VALID_OFFSET(function, eqIdx) \ 56#define CSTORM_ISCSI_HQ_SIZE_OFFSET(pfId) \
57 (IS_E1H_OFFSET ? (0x6057 + (function * 0xc0) + (eqIdx * 0x18)) : \ 57 (IRO[310].base + ((pfId) * IRO[310].m1))
58 (0x2427 + (function * 0xc0) + (eqIdx * 0x18))) 58#define CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
59#define CSTORM_ISCSI_EQ_PROD_OFFSET(function, eqIdx) \ 59 (IRO[302].base + ((pfId) * IRO[302].m1))
60 (IS_E1H_OFFSET ? (0x6042 + (function * 0xc0) + (eqIdx * 0x18)) : \ 60#define CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
61 (0x2412 + (function * 0xc0) + (eqIdx * 0x18))) 61 (IRO[301].base + ((pfId) * IRO[301].m1))
62#define CSTORM_ISCSI_EQ_SB_INDEX_OFFSET(function, eqIdx) \ 62#define CSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
63 (IS_E1H_OFFSET ? (0x6056 + (function * 0xc0) + (eqIdx * 0x18)) : \ 63 (IRO[300].base + ((pfId) * IRO[300].m1))
64 (0x2426 + (function * 0xc0) + (eqIdx * 0x18))) 64#define CSTORM_PATH_ID_OFFSET (IRO[159].base)
65#define CSTORM_ISCSI_EQ_SB_NUM_OFFSET(function, eqIdx) \ 65#define CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(pfId) \
66 (IS_E1H_OFFSET ? (0x6054 + (function * 0xc0) + (eqIdx * 0x18)) : \ 66 (IRO[137].base + ((pfId) * IRO[137].m1))
67 (0x2424 + (function * 0xc0) + (eqIdx * 0x18))) 67#define CSTORM_SP_STATUS_BLOCK_OFFSET(pfId) \
68#define CSTORM_ISCSI_HQ_SIZE_OFFSET(function) \ 68 (IRO[136].base + ((pfId) * IRO[136].m1))
69 (IS_E1H_OFFSET ? (0x6640 + (function * 0x8)) : (0x2590 + \ 69#define CSTORM_SP_STATUS_BLOCK_SIZE (IRO[136].size)
70 (function * 0x8))) 70#define CSTORM_SP_SYNC_BLOCK_OFFSET(pfId) \
71#define CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(function) \ 71 (IRO[138].base + ((pfId) * IRO[138].m1))
72 (IS_E1H_OFFSET ? (0x6004 + (function * 0x8)) : (0x2404 + \ 72#define CSTORM_SP_SYNC_BLOCK_SIZE (IRO[138].size)
73 (function * 0x8))) 73#define CSTORM_STATS_FLAGS_OFFSET(pfId) \
74#define CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(function) \ 74 (IRO[143].base + ((pfId) * IRO[143].m1))
75 (IS_E1H_OFFSET ? (0x6002 + (function * 0x8)) : (0x2402 + \ 75#define CSTORM_STATUS_BLOCK_DATA_OFFSET(sbId) \
76 (function * 0x8))) 76 (IRO[129].base + ((sbId) * IRO[129].m1))
77#define CSTORM_ISCSI_PAGE_SIZE_OFFSET(function) \ 77#define CSTORM_STATUS_BLOCK_OFFSET(sbId) \
78 (IS_E1H_OFFSET ? (0x6000 + (function * 0x8)) : (0x2400 + \ 78 (IRO[128].base + ((sbId) * IRO[128].m1))
79 (function * 0x8))) 79#define CSTORM_STATUS_BLOCK_SIZE (IRO[128].size)
80#define CSTORM_SB_HC_DISABLE_C_OFFSET(port, cpu_id, index) \ 80#define CSTORM_SYNC_BLOCK_OFFSET(sbId) \
81 (IS_E1H_OFFSET ? (0x811a + (port * 0x280) + (cpu_id * 0x28) + \ 81 (IRO[132].base + ((sbId) * IRO[132].m1))
82 (index * 0x4)) : (0x305a + (port * 0x280) + (cpu_id * 0x28) + \ 82#define CSTORM_SYNC_BLOCK_SIZE (IRO[132].size)
83 (index * 0x4))) 83#define CSTORM_VF_PF_CHANNEL_STATE_OFFSET(vfId) \
84#define CSTORM_SB_HC_DISABLE_U_OFFSET(port, cpu_id, index) \ 84 (IRO[151].base + ((vfId) * IRO[151].m1))
85 (IS_E1H_OFFSET ? (0xb01a + (port * 0x800) + (cpu_id * 0x80) + \ 85#define CSTORM_VF_PF_CHANNEL_VALID_OFFSET(vfId) \
86 (index * 0x4)) : (0x401a + (port * 0x800) + (cpu_id * 0x80) + \ 86 (IRO[152].base + ((vfId) * IRO[152].m1))
87 (index * 0x4))) 87#define CSTORM_VF_TO_PF_OFFSET(funcId) \
88#define CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, cpu_id, index) \ 88 (IRO[147].base + ((funcId) * IRO[147].m1))
89 (IS_E1H_OFFSET ? (0x8118 + (port * 0x280) + (cpu_id * 0x28) + \ 89#define TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET (IRO[199].base)
90 (index * 0x4)) : (0x3058 + (port * 0x280) + (cpu_id * 0x28) + \ 90#define TSTORM_APPROXIMATE_MATCH_MULTICAST_FILTERING_OFFSET(pfId) \
91 (index * 0x4))) 91 (IRO[198].base + ((pfId) * IRO[198].m1))
92#define CSTORM_SB_HC_TIMEOUT_U_OFFSET(port, cpu_id, index) \ 92#define TSTORM_ASSERT_LIST_INDEX_OFFSET (IRO[99].base)
93 (IS_E1H_OFFSET ? (0xb018 + (port * 0x800) + (cpu_id * 0x80) + \ 93#define TSTORM_ASSERT_LIST_OFFSET(assertListEntry) \
94 (index * 0x4)) : (0x4018 + (port * 0x800) + (cpu_id * 0x80) + \ 94 (IRO[98].base + ((assertListEntry) * IRO[98].m1))
95 (index * 0x4))) 95 #define TSTORM_CLIENT_CONFIG_OFFSET(portId, clientId) \
96#define CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, cpu_id) \ 96 (IRO[197].base + ((portId) * IRO[197].m1) + ((clientId) * \
97 (IS_E1H_OFFSET ? (0x8100 + (port * 0x280) + (cpu_id * 0x28)) : \ 97 IRO[197].m2))
98 (0x3040 + (port * 0x280) + (cpu_id * 0x28))) 98#define TSTORM_COMMON_SAFC_WORKAROUND_ENABLE_OFFSET (IRO[104].base)
99#define CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, cpu_id) \
100 (IS_E1H_OFFSET ? (0xb000 + (port * 0x800) + (cpu_id * 0x80)) : \
101 (0x4000 + (port * 0x800) + (cpu_id * 0x80)))
102#define CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, cpu_id) \
103 (IS_E1H_OFFSET ? (0x8108 + (port * 0x280) + (cpu_id * 0x28)) : \
104 (0x3048 + (port * 0x280) + (cpu_id * 0x28)))
105#define CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, cpu_id) \
106 (IS_E1H_OFFSET ? (0xb008 + (port * 0x800) + (cpu_id * 0x80)) : \
107 (0x4008 + (port * 0x800) + (cpu_id * 0x80)))
108#define CSTORM_SB_STATUS_BLOCK_C_SIZE 0x10
109#define CSTORM_SB_STATUS_BLOCK_U_SIZE 0x60
110#define CSTORM_STATS_FLAGS_OFFSET(function) \
111 (IS_E1H_OFFSET ? (0x1108 + (function * 0x8)) : (0x5108 + \
112 (function * 0x8)))
113#define TSTORM_APPROXIMATE_MATCH_MULTICAST_FILTERING_OFFSET(function) \
114 (IS_E1H_OFFSET ? (0x3200 + (function * 0x20)) : 0xffffffff)
115#define TSTORM_ASSERT_LIST_INDEX_OFFSET \
116 (IS_E1H_OFFSET ? 0xa000 : 0x1000)
117#define TSTORM_ASSERT_LIST_OFFSET(idx) \
118 (IS_E1H_OFFSET ? (0xa020 + (idx * 0x10)) : (0x1020 + (idx * 0x10)))
119#define TSTORM_CLIENT_CONFIG_OFFSET(port, client_id) \
120 (IS_E1H_OFFSET ? (0x33a0 + (port * 0x1a0) + (client_id * 0x10)) \
121 : (0x9c0 + (port * 0x120) + (client_id * 0x10)))
122#define TSTORM_COMMON_SAFC_WORKAROUND_ENABLE_OFFSET \
123 (IS_E1H_OFFSET ? 0x1ed8 : 0xffffffff)
124#define TSTORM_COMMON_SAFC_WORKAROUND_TIMEOUT_10USEC_OFFSET \ 99#define TSTORM_COMMON_SAFC_WORKAROUND_TIMEOUT_10USEC_OFFSET \
125 (IS_E1H_OFFSET ? 0x1eda : 0xffffffff) 100 (IRO[105].base)
126#define TSTORM_DEF_SB_HC_DISABLE_OFFSET(function, index) \ 101#define TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(pfId) \
127 (IS_E1H_OFFSET ? (0xb01a + ((function>>1) * 0x28) + \ 102 (IRO[96].base + ((pfId) * IRO[96].m1))
128 ((function&1) * 0xa0) + (index * 0x4)) : (0x141a + (function * \ 103#define TSTORM_FUNC_EN_OFFSET(funcId) \
129 0x28) + (index * 0x4))) 104 (IRO[101].base + ((funcId) * IRO[101].m1))
130#define TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(function) \ 105#define TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(pfId) \
131 (IS_E1H_OFFSET ? (0xb000 + ((function>>1) * 0x28) + \ 106 (IRO[195].base + ((pfId) * IRO[195].m1))
132 ((function&1) * 0xa0)) : (0x1400 + (function * 0x28))) 107#define TSTORM_FUNCTION_MODE_OFFSET (IRO[103].base)
133#define TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(function) \ 108#define TSTORM_INDIRECTION_TABLE_OFFSET(pfId) \
134 (IS_E1H_OFFSET ? (0xb008 + ((function>>1) * 0x28) + \ 109 (IRO[91].base + ((pfId) * IRO[91].m1))
135 ((function&1) * 0xa0)) : (0x1408 + (function * 0x28))) 110#define TSTORM_INDIRECTION_TABLE_SIZE (IRO[91].size)
136#define TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(function) \ 111 #define \
137 (IS_E1H_OFFSET ? (0x2940 + (function * 0x8)) : (0x4928 + \ 112 TSTORM_ISCSI_CONN_BUF_PBL_OFFSET(pfId, iscsiConBufPblEntry) \
138 (function * 0x8))) 113 (IRO[260].base + ((pfId) * IRO[260].m1) + ((iscsiConBufPblEntry) \
139#define TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(function) \ 114 * IRO[260].m2))
140 (IS_E1H_OFFSET ? (0x3000 + (function * 0x40)) : (0x1500 + \ 115#define TSTORM_ISCSI_ERROR_BITMAP_OFFSET(pfId) \
141 (function * 0x40))) 116 (IRO[264].base + ((pfId) * IRO[264].m1))
142#define TSTORM_FUNCTION_MODE_OFFSET \ 117#define TSTORM_ISCSI_L2_ISCSI_OOO_CID_TABLE_OFFSET(pfId) \
143 (IS_E1H_OFFSET ? 0x1ed0 : 0xffffffff) 118 (IRO[265].base + ((pfId) * IRO[265].m1))
144#define TSTORM_HC_BTR_OFFSET(port) \ 119#define TSTORM_ISCSI_L2_ISCSI_OOO_CLIENT_ID_TABLE_OFFSET(pfId) \
145 (IS_E1H_OFFSET ? (0xb144 + (port * 0x30)) : (0x1454 + (port * 0x18))) 120 (IRO[266].base + ((pfId) * IRO[266].m1))
146#define TSTORM_INDIRECTION_TABLE_OFFSET(function) \ 121#define TSTORM_ISCSI_L2_ISCSI_OOO_PROD_OFFSET(pfId) \
147 (IS_E1H_OFFSET ? (0x12c8 + (function * 0x80)) : (0x22c8 + \ 122 (IRO[267].base + ((pfId) * IRO[267].m1))
148 (function * 0x80))) 123#define TSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
149#define TSTORM_INDIRECTION_TABLE_SIZE 0x80 124 (IRO[263].base + ((pfId) * IRO[263].m1))
150#define TSTORM_ISCSI_CONN_BUF_PBL_OFFSET(function, pblEntry) \ 125#define TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
151 (IS_E1H_OFFSET ? (0x60c0 + (function * 0x40) + (pblEntry * 0x8)) \ 126 (IRO[262].base + ((pfId) * IRO[262].m1))
152 : (0x4c30 + (function * 0x40) + (pblEntry * 0x8))) 127#define TSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
153#define TSTORM_ISCSI_ERROR_BITMAP_OFFSET(function) \ 128 (IRO[261].base + ((pfId) * IRO[261].m1))
154 (IS_E1H_OFFSET ? (0x6340 + (function * 0x8)) : (0x4cd0 + \ 129#define TSTORM_ISCSI_RQ_SIZE_OFFSET(pfId) \
155 (function * 0x8))) 130 (IRO[259].base + ((pfId) * IRO[259].m1))
156#define TSTORM_ISCSI_NUM_OF_TASKS_OFFSET(function) \ 131#define TSTORM_ISCSI_TCP_LOCAL_ADV_WND_OFFSET(pfId) \
157 (IS_E1H_OFFSET ? (0x6004 + (function * 0x8)) : (0x4c04 + \ 132 (IRO[269].base + ((pfId) * IRO[269].m1))
158 (function * 0x8))) 133#define TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(pfId) \
159#define TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(function) \ 134 (IRO[256].base + ((pfId) * IRO[256].m1))
160 (IS_E1H_OFFSET ? (0x6002 + (function * 0x8)) : (0x4c02 + \ 135#define TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfId) \
161 (function * 0x8))) 136 (IRO[257].base + ((pfId) * IRO[257].m1))
162#define TSTORM_ISCSI_PAGE_SIZE_OFFSET(function) \ 137#define TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfId) \
163 (IS_E1H_OFFSET ? (0x6000 + (function * 0x8)) : (0x4c00 + \ 138 (IRO[258].base + ((pfId) * IRO[258].m1))
164 (function * 0x8))) 139#define TSTORM_MAC_FILTER_CONFIG_OFFSET(pfId) \
165#define TSTORM_ISCSI_RQ_SIZE_OFFSET(function) \ 140 (IRO[196].base + ((pfId) * IRO[196].m1))
166 (IS_E1H_OFFSET ? (0x6080 + (function * 0x8)) : (0x4c20 + \ 141 #define TSTORM_PER_COUNTER_ID_STATS_OFFSET(portId, tStatCntId) \
167 (function * 0x8))) 142 (IRO[100].base + ((portId) * IRO[100].m1) + ((tStatCntId) * \
168#define TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(function) \ 143 IRO[100].m2))
169 (IS_E1H_OFFSET ? (0x6040 + (function * 0x8)) : (0x4c10 + \ 144#define TSTORM_STATS_FLAGS_OFFSET(pfId) \
170 (function * 0x8))) 145 (IRO[95].base + ((pfId) * IRO[95].m1))
171#define TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(function) \ 146#define TSTORM_TCP_MAX_CWND_OFFSET(pfId) \
172 (IS_E1H_OFFSET ? (0x6042 + (function * 0x8)) : (0x4c12 + \ 147 (IRO[211].base + ((pfId) * IRO[211].m1))
173 (function * 0x8))) 148#define TSTORM_VF_TO_PF_OFFSET(funcId) \
174#define TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(function) \ 149 (IRO[102].base + ((funcId) * IRO[102].m1))
175 (IS_E1H_OFFSET ? (0x6044 + (function * 0x8)) : (0x4c14 + \ 150#define USTORM_AGG_DATA_OFFSET (IRO[201].base)
176 (function * 0x8))) 151#define USTORM_AGG_DATA_SIZE (IRO[201].size)
177#define TSTORM_MAC_FILTER_CONFIG_OFFSET(function) \ 152#define USTORM_ASSERT_LIST_INDEX_OFFSET (IRO[170].base)
178 (IS_E1H_OFFSET ? (0x3008 + (function * 0x40)) : (0x1508 + \ 153#define USTORM_ASSERT_LIST_OFFSET(assertListEntry) \
179 (function * 0x40))) 154 (IRO[169].base + ((assertListEntry) * IRO[169].m1))
180#define TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, stats_counter_id) \ 155#define USTORM_ETH_PAUSE_ENABLED_OFFSET(portId) \
181 (IS_E1H_OFFSET ? (0x2010 + (port * 0x490) + (stats_counter_id * \ 156 (IRO[178].base + ((portId) * IRO[178].m1))
182 0x40)) : (0x4010 + (port * 0x490) + (stats_counter_id * 0x40))) 157#define USTORM_ETH_STATS_QUERY_ADDR_OFFSET(pfId) \
183#define TSTORM_STATS_FLAGS_OFFSET(function) \ 158 (IRO[172].base + ((pfId) * IRO[172].m1))
184 (IS_E1H_OFFSET ? (0x29c0 + (function * 0x8)) : (0x4948 + \ 159#define USTORM_FCOE_EQ_PROD_OFFSET(pfId) \
185 (function * 0x8))) 160 (IRO[313].base + ((pfId) * IRO[313].m1))
186#define TSTORM_TCP_MAX_CWND_OFFSET(function) \ 161#define USTORM_FUNC_EN_OFFSET(funcId) \
187 (IS_E1H_OFFSET ? (0x4004 + (function * 0x8)) : (0x1fb4 + \ 162 (IRO[174].base + ((funcId) * IRO[174].m1))
188 (function * 0x8))) 163#define USTORM_FUNCTION_MODE_OFFSET (IRO[177].base)
189#define USTORM_AGG_DATA_OFFSET (IS_E1H_OFFSET ? 0xa000 : 0x3000) 164#define USTORM_ISCSI_CQ_SIZE_OFFSET(pfId) \
190#define USTORM_AGG_DATA_SIZE (IS_E1H_OFFSET ? 0x2000 : 0x1000) 165 (IRO[277].base + ((pfId) * IRO[277].m1))
191#define USTORM_ASSERT_LIST_INDEX_OFFSET \ 166#define USTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfId) \
192 (IS_E1H_OFFSET ? 0x8000 : 0x1000) 167 (IRO[278].base + ((pfId) * IRO[278].m1))
193#define USTORM_ASSERT_LIST_OFFSET(idx) \ 168#define USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfId) \
194 (IS_E1H_OFFSET ? (0x8020 + (idx * 0x10)) : (0x1020 + (idx * 0x10))) 169 (IRO[282].base + ((pfId) * IRO[282].m1))
195#define USTORM_CQE_PAGE_BASE_OFFSET(port, clientId) \ 170#define USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfId) \
196 (IS_E1H_OFFSET ? (0x1010 + (port * 0x680) + (clientId * 0x40)) : \ 171 (IRO[279].base + ((pfId) * IRO[279].m1))
197 (0x4010 + (port * 0x360) + (clientId * 0x30))) 172#define USTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
198#define USTORM_CQE_PAGE_NEXT_OFFSET(port, clientId) \ 173 (IRO[275].base + ((pfId) * IRO[275].m1))
199 (IS_E1H_OFFSET ? (0x1028 + (port * 0x680) + (clientId * 0x40)) : \ 174#define USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
200 (0x4028 + (port * 0x360) + (clientId * 0x30))) 175 (IRO[274].base + ((pfId) * IRO[274].m1))
201#define USTORM_ETH_PAUSE_ENABLED_OFFSET(port) \ 176#define USTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
202 (IS_E1H_OFFSET ? (0x2ad4 + (port * 0x8)) : 0xffffffff) 177 (IRO[273].base + ((pfId) * IRO[273].m1))
203#define USTORM_ETH_RING_PAUSE_DATA_OFFSET(port, clientId) \ 178#define USTORM_ISCSI_R2TQ_SIZE_OFFSET(pfId) \
204 (IS_E1H_OFFSET ? (0x1030 + (port * 0x680) + (clientId * 0x40)) : \ 179 (IRO[276].base + ((pfId) * IRO[276].m1))
205 0xffffffff) 180#define USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(pfId) \
206#define USTORM_ETH_STATS_QUERY_ADDR_OFFSET(function) \ 181 (IRO[280].base + ((pfId) * IRO[280].m1))
207 (IS_E1H_OFFSET ? (0x2a50 + (function * 0x8)) : (0x1dd0 + \ 182#define USTORM_ISCSI_RQ_SIZE_OFFSET(pfId) \
208 (function * 0x8))) 183 (IRO[281].base + ((pfId) * IRO[281].m1))
209#define USTORM_FUNCTION_MODE_OFFSET \ 184#define USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(pfId) \
210 (IS_E1H_OFFSET ? 0x2448 : 0xffffffff) 185 (IRO[176].base + ((pfId) * IRO[176].m1))
211#define USTORM_ISCSI_CQ_SIZE_OFFSET(function) \ 186 #define USTORM_PER_COUNTER_ID_STATS_OFFSET(portId, uStatCntId) \
212 (IS_E1H_OFFSET ? (0x7044 + (function * 0x8)) : (0x2414 + \ 187 (IRO[173].base + ((portId) * IRO[173].m1) + ((uStatCntId) * \
213 (function * 0x8))) 188 IRO[173].m2))
214#define USTORM_ISCSI_CQ_SQN_SIZE_OFFSET(function) \ 189 #define USTORM_RX_PRODS_E1X_OFFSET(portId, clientId) \
215 (IS_E1H_OFFSET ? (0x7046 + (function * 0x8)) : (0x2416 + \ 190 (IRO[204].base + ((portId) * IRO[204].m1) + ((clientId) * \
216 (function * 0x8))) 191 IRO[204].m2))
217#define USTORM_ISCSI_ERROR_BITMAP_OFFSET(function) \ 192#define USTORM_RX_PRODS_E2_OFFSET(qzoneId) \
218 (IS_E1H_OFFSET ? (0x7688 + (function * 0x8)) : (0x29c8 + \ 193 (IRO[205].base + ((qzoneId) * IRO[205].m1))
219 (function * 0x8))) 194#define USTORM_STATS_FLAGS_OFFSET(pfId) \
220#define USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(function) \ 195 (IRO[171].base + ((pfId) * IRO[171].m1))
221 (IS_E1H_OFFSET ? (0x7648 + (function * 0x8)) : (0x29b8 + \ 196#define USTORM_TPA_BTR_OFFSET (IRO[202].base)
222 (function * 0x8))) 197#define USTORM_TPA_BTR_SIZE (IRO[202].size)
223#define USTORM_ISCSI_NUM_OF_TASKS_OFFSET(function) \ 198#define USTORM_VF_TO_PF_OFFSET(funcId) \
224 (IS_E1H_OFFSET ? (0x7004 + (function * 0x8)) : (0x2404 + \ 199 (IRO[175].base + ((funcId) * IRO[175].m1))
225 (function * 0x8))) 200#define XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE (IRO[59].base)
226#define USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(function) \ 201#define XSTORM_AGG_INT_FINAL_CLEANUP_INDEX (IRO[58].base)
227 (IS_E1H_OFFSET ? (0x7002 + (function * 0x8)) : (0x2402 + \ 202#define XSTORM_ASSERT_LIST_INDEX_OFFSET (IRO[54].base)
228 (function * 0x8))) 203#define XSTORM_ASSERT_LIST_OFFSET(assertListEntry) \
229#define USTORM_ISCSI_PAGE_SIZE_OFFSET(function) \ 204 (IRO[53].base + ((assertListEntry) * IRO[53].m1))
230 (IS_E1H_OFFSET ? (0x7000 + (function * 0x8)) : (0x2400 + \ 205#define XSTORM_CMNG_PER_PORT_VARS_OFFSET(portId) \
231 (function * 0x8))) 206 (IRO[47].base + ((portId) * IRO[47].m1))
232#define USTORM_ISCSI_R2TQ_SIZE_OFFSET(function) \ 207#define XSTORM_E1HOV_OFFSET(pfId) \
233 (IS_E1H_OFFSET ? (0x7040 + (function * 0x8)) : (0x2410 + \ 208 (IRO[55].base + ((pfId) * IRO[55].m1))
234 (function * 0x8))) 209#define XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(pfId) \
235#define USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(function) \ 210 (IRO[45].base + ((pfId) * IRO[45].m1))
236 (IS_E1H_OFFSET ? (0x7080 + (function * 0x8)) : (0x2420 + \ 211#define XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(pfId) \
237 (function * 0x8))) 212 (IRO[49].base + ((pfId) * IRO[49].m1))
238#define USTORM_ISCSI_RQ_SIZE_OFFSET(function) \ 213#define XSTORM_FUNC_EN_OFFSET(funcId) \
239 (IS_E1H_OFFSET ? (0x7084 + (function * 0x8)) : (0x2424 + \ 214 (IRO[51].base + ((funcId) * IRO[51].m1))
240 (function * 0x8))) 215#define XSTORM_FUNCTION_MODE_OFFSET (IRO[56].base)
241#define USTORM_MAX_AGG_SIZE_OFFSET(port, clientId) \ 216#define XSTORM_ISCSI_HQ_SIZE_OFFSET(pfId) \
242 (IS_E1H_OFFSET ? (0x1018 + (port * 0x680) + (clientId * 0x40)) : \ 217 (IRO[290].base + ((pfId) * IRO[290].m1))
243 (0x4018 + (port * 0x360) + (clientId * 0x30))) 218#define XSTORM_ISCSI_LOCAL_MAC_ADDR0_OFFSET(pfId) \
244#define USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(function) \ 219 (IRO[293].base + ((pfId) * IRO[293].m1))
245 (IS_E1H_OFFSET ? (0x2408 + (function * 0x8)) : (0x1da8 + \ 220#define XSTORM_ISCSI_LOCAL_MAC_ADDR1_OFFSET(pfId) \
246 (function * 0x8))) 221 (IRO[294].base + ((pfId) * IRO[294].m1))
247#define USTORM_PER_COUNTER_ID_STATS_OFFSET(port, stats_counter_id) \ 222#define XSTORM_ISCSI_LOCAL_MAC_ADDR2_OFFSET(pfId) \
248 (IS_E1H_OFFSET ? (0x2450 + (port * 0x2d0) + (stats_counter_id * \ 223 (IRO[295].base + ((pfId) * IRO[295].m1))
249 0x28)) : (0x1500 + (port * 0x2d0) + (stats_counter_id * 0x28))) 224#define XSTORM_ISCSI_LOCAL_MAC_ADDR3_OFFSET(pfId) \
250#define USTORM_RX_PRODS_OFFSET(port, client_id) \ 225 (IRO[296].base + ((pfId) * IRO[296].m1))
251 (IS_E1H_OFFSET ? (0x1000 + (port * 0x680) + (client_id * 0x40)) \ 226#define XSTORM_ISCSI_LOCAL_MAC_ADDR4_OFFSET(pfId) \
252 : (0x4000 + (port * 0x360) + (client_id * 0x30))) 227 (IRO[297].base + ((pfId) * IRO[297].m1))
253#define USTORM_STATS_FLAGS_OFFSET(function) \ 228#define XSTORM_ISCSI_LOCAL_MAC_ADDR5_OFFSET(pfId) \
254 (IS_E1H_OFFSET ? (0x29f0 + (function * 0x8)) : (0x1db8 + \ 229 (IRO[298].base + ((pfId) * IRO[298].m1))
255 (function * 0x8))) 230#define XSTORM_ISCSI_LOCAL_VLAN_OFFSET(pfId) \
256#define USTORM_TPA_BTR_OFFSET (IS_E1H_OFFSET ? 0x3da5 : 0x5095) 231 (IRO[299].base + ((pfId) * IRO[299].m1))
257#define USTORM_TPA_BTR_SIZE 0x1 232#define XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
258#define XSTORM_ASSERT_LIST_INDEX_OFFSET \ 233 (IRO[289].base + ((pfId) * IRO[289].m1))
259 (IS_E1H_OFFSET ? 0x9000 : 0x1000) 234#define XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
260#define XSTORM_ASSERT_LIST_OFFSET(idx) \ 235 (IRO[288].base + ((pfId) * IRO[288].m1))
261 (IS_E1H_OFFSET ? (0x9020 + (idx * 0x10)) : (0x1020 + (idx * 0x10))) 236#define XSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
262#define XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) \ 237 (IRO[287].base + ((pfId) * IRO[287].m1))
263 (IS_E1H_OFFSET ? (0x24a8 + (port * 0x50)) : (0x3a80 + (port * 0x50))) 238#define XSTORM_ISCSI_R2TQ_SIZE_OFFSET(pfId) \
264#define XSTORM_DEF_SB_HC_DISABLE_OFFSET(function, index) \ 239 (IRO[292].base + ((pfId) * IRO[292].m1))
265 (IS_E1H_OFFSET ? (0xa01a + ((function>>1) * 0x28) + \ 240#define XSTORM_ISCSI_SQ_SIZE_OFFSET(pfId) \
266 ((function&1) * 0xa0) + (index * 0x4)) : (0x141a + (function * \ 241 (IRO[291].base + ((pfId) * IRO[291].m1))
267 0x28) + (index * 0x4))) 242#define XSTORM_ISCSI_TCP_VARS_ADV_WND_SCL_OFFSET(pfId) \
268#define XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(function) \ 243 (IRO[286].base + ((pfId) * IRO[286].m1))
269 (IS_E1H_OFFSET ? (0xa000 + ((function>>1) * 0x28) + \ 244#define XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(pfId) \
270 ((function&1) * 0xa0)) : (0x1400 + (function * 0x28))) 245 (IRO[285].base + ((pfId) * IRO[285].m1))
271#define XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(function) \ 246#define XSTORM_ISCSI_TCP_VARS_TOS_OFFSET(pfId) \
272 (IS_E1H_OFFSET ? (0xa008 + ((function>>1) * 0x28) + \ 247 (IRO[284].base + ((pfId) * IRO[284].m1))
273 ((function&1) * 0xa0)) : (0x1408 + (function * 0x28))) 248#define XSTORM_ISCSI_TCP_VARS_TTL_OFFSET(pfId) \
274#define XSTORM_E1HOV_OFFSET(function) \ 249 (IRO[283].base + ((pfId) * IRO[283].m1))
275 (IS_E1H_OFFSET ? (0x2c10 + (function * 0x8)) : 0xffffffff) 250#define XSTORM_PATH_ID_OFFSET (IRO[65].base)
276#define XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(function) \ 251 #define XSTORM_PER_COUNTER_ID_STATS_OFFSET(portId, xStatCntId) \
277 (IS_E1H_OFFSET ? (0x2418 + (function * 0x8)) : (0x3a50 + \ 252 (IRO[50].base + ((portId) * IRO[50].m1) + ((xStatCntId) * \
278 (function * 0x8))) 253 IRO[50].m2))
279#define XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(function) \ 254#define XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(pfId) \
280 (IS_E1H_OFFSET ? (0x2588 + (function * 0x90)) : (0x3b60 + \ 255 (IRO[48].base + ((pfId) * IRO[48].m1))
281 (function * 0x90))) 256#define XSTORM_SPQ_DATA_OFFSET(funcId) \
282#define XSTORM_FUNCTION_MODE_OFFSET \ 257 (IRO[32].base + ((funcId) * IRO[32].m1))
283 (IS_E1H_OFFSET ? 0x2c50 : 0xffffffff) 258#define XSTORM_SPQ_DATA_SIZE (IRO[32].size)
284#define XSTORM_HC_BTR_OFFSET(port) \ 259#define XSTORM_SPQ_PAGE_BASE_OFFSET(funcId) \
285 (IS_E1H_OFFSET ? (0xa144 + (port * 0x30)) : (0x1454 + (port * 0x18))) 260 (IRO[30].base + ((funcId) * IRO[30].m1))
286#define XSTORM_ISCSI_HQ_SIZE_OFFSET(function) \ 261#define XSTORM_SPQ_PROD_OFFSET(funcId) \
287 (IS_E1H_OFFSET ? (0x80c0 + (function * 0x8)) : (0x1c30 + \ 262 (IRO[31].base + ((funcId) * IRO[31].m1))
288 (function * 0x8))) 263#define XSTORM_STATS_FLAGS_OFFSET(pfId) \
289#define XSTORM_ISCSI_LOCAL_MAC_ADDR0_OFFSET(function) \ 264 (IRO[43].base + ((pfId) * IRO[43].m1))
290 (IS_E1H_OFFSET ? (0x8080 + (function * 0x8)) : (0x1c20 + \ 265#define XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_ENABLED_OFFSET(portId) \
291 (function * 0x8))) 266 (IRO[206].base + ((portId) * IRO[206].m1))
292#define XSTORM_ISCSI_LOCAL_MAC_ADDR1_OFFSET(function) \ 267#define XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_MAX_COUNT_OFFSET(portId) \
293 (IS_E1H_OFFSET ? (0x8081 + (function * 0x8)) : (0x1c21 + \ 268 (IRO[207].base + ((portId) * IRO[207].m1))
294 (function * 0x8))) 269#define XSTORM_TCP_TX_SWS_TIMER_VAL_OFFSET(pfId) \
295#define XSTORM_ISCSI_LOCAL_MAC_ADDR2_OFFSET(function) \ 270 (IRO[209].base + (((pfId)>>1) * IRO[209].m1) + (((pfId)&1) * \
296 (IS_E1H_OFFSET ? (0x8082 + (function * 0x8)) : (0x1c22 + \ 271 IRO[209].m2))
297 (function * 0x8))) 272#define XSTORM_VF_TO_PF_OFFSET(funcId) \
298#define XSTORM_ISCSI_LOCAL_MAC_ADDR3_OFFSET(function) \ 273 (IRO[52].base + ((funcId) * IRO[52].m1))
299 (IS_E1H_OFFSET ? (0x8083 + (function * 0x8)) : (0x1c23 + \
300 (function * 0x8)))
301#define XSTORM_ISCSI_LOCAL_MAC_ADDR4_OFFSET(function) \
302 (IS_E1H_OFFSET ? (0x8084 + (function * 0x8)) : (0x1c24 + \
303 (function * 0x8)))
304#define XSTORM_ISCSI_LOCAL_MAC_ADDR5_OFFSET(function) \
305 (IS_E1H_OFFSET ? (0x8085 + (function * 0x8)) : (0x1c25 + \
306 (function * 0x8)))
307#define XSTORM_ISCSI_LOCAL_VLAN_OFFSET(function) \
308 (IS_E1H_OFFSET ? (0x8086 + (function * 0x8)) : (0x1c26 + \
309 (function * 0x8)))
310#define XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(function) \
311 (IS_E1H_OFFSET ? (0x8004 + (function * 0x8)) : (0x1c04 + \
312 (function * 0x8)))
313#define XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(function) \
314 (IS_E1H_OFFSET ? (0x8002 + (function * 0x8)) : (0x1c02 + \
315 (function * 0x8)))
316#define XSTORM_ISCSI_PAGE_SIZE_OFFSET(function) \
317 (IS_E1H_OFFSET ? (0x8000 + (function * 0x8)) : (0x1c00 + \
318 (function * 0x8)))
319#define XSTORM_ISCSI_R2TQ_SIZE_OFFSET(function) \
320 (IS_E1H_OFFSET ? (0x80c4 + (function * 0x8)) : (0x1c34 + \
321 (function * 0x8)))
322#define XSTORM_ISCSI_SQ_SIZE_OFFSET(function) \
323 (IS_E1H_OFFSET ? (0x80c2 + (function * 0x8)) : (0x1c32 + \
324 (function * 0x8)))
325#define XSTORM_ISCSI_TCP_VARS_ADV_WND_SCL_OFFSET(function) \
326 (IS_E1H_OFFSET ? (0x8043 + (function * 0x8)) : (0x1c13 + \
327 (function * 0x8)))
328#define XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(function) \
329 (IS_E1H_OFFSET ? (0x8042 + (function * 0x8)) : (0x1c12 + \
330 (function * 0x8)))
331#define XSTORM_ISCSI_TCP_VARS_TOS_OFFSET(function) \
332 (IS_E1H_OFFSET ? (0x8041 + (function * 0x8)) : (0x1c11 + \
333 (function * 0x8)))
334#define XSTORM_ISCSI_TCP_VARS_TTL_OFFSET(function) \
335 (IS_E1H_OFFSET ? (0x8040 + (function * 0x8)) : (0x1c10 + \
336 (function * 0x8)))
337#define XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, stats_counter_id) \
338 (IS_E1H_OFFSET ? (0xc000 + (port * 0x360) + (stats_counter_id * \
339 0x30)) : (0x3378 + (port * 0x360) + (stats_counter_id * 0x30)))
340#define XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(function) \
341 (IS_E1H_OFFSET ? (0x2548 + (function * 0x90)) : (0x3b20 + \
342 (function * 0x90)))
343#define XSTORM_SPQ_PAGE_BASE_OFFSET(function) \
344 (IS_E1H_OFFSET ? (0x2000 + (function * 0x10)) : (0x3328 + \
345 (function * 0x10)))
346#define XSTORM_SPQ_PROD_OFFSET(function) \
347 (IS_E1H_OFFSET ? (0x2008 + (function * 0x10)) : (0x3330 + \
348 (function * 0x10)))
349#define XSTORM_STATS_FLAGS_OFFSET(function) \
350 (IS_E1H_OFFSET ? (0x23d8 + (function * 0x8)) : (0x3a40 + \
351 (function * 0x8)))
352#define XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_ENABLED_OFFSET(port) \
353 (IS_E1H_OFFSET ? (0x4000 + (port * 0x8)) : (0x1960 + (port * 0x8)))
354#define XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_MAX_COUNT_OFFSET(port) \
355 (IS_E1H_OFFSET ? (0x4001 + (port * 0x8)) : (0x1961 + (port * 0x8)))
356#define XSTORM_TCP_TX_SWS_TIMER_VAL_OFFSET(function) \
357 (IS_E1H_OFFSET ? (0x4060 + ((function>>1) * 0x8) + ((function&1) \
358 * 0x4)) : (0x1978 + (function * 0x4)))
359#define COMMON_ASM_INVALID_ASSERT_OPCODE 0x0 274#define COMMON_ASM_INVALID_ASSERT_OPCODE 0x0
360 275
361/**
362* This file defines HSI constants for the ETH flow
363*/
364#ifdef _EVEREST_MICROCODE
365#include "microcode_constants.h"
366#include "eth_rx_bd.h"
367#include "eth_tx_bd.h"
368#include "eth_rx_cqe.h"
369#include "eth_rx_sge.h"
370#include "eth_rx_cqe_next_page.h"
371#endif
372
373/* RSS hash types */ 276/* RSS hash types */
374#define DEFAULT_HASH_TYPE 0 277#define DEFAULT_HASH_TYPE 0
375#define IPV4_HASH_TYPE 1 278#define IPV4_HASH_TYPE 1
@@ -389,11 +292,17 @@
389#define U_ETH_NUM_OF_SGES_TO_FETCH 8 292#define U_ETH_NUM_OF_SGES_TO_FETCH 8
390#define U_ETH_MAX_SGES_FOR_PACKET 3 293#define U_ETH_MAX_SGES_FOR_PACKET 3
391 294
295/*Tx params*/
296#define X_ETH_NO_VLAN 0
297#define X_ETH_OUTBAND_VLAN 1
298#define X_ETH_INBAND_VLAN 2
392/* Rx ring params */ 299/* Rx ring params */
393#define U_ETH_LOCAL_BD_RING_SIZE 8 300#define U_ETH_LOCAL_BD_RING_SIZE 8
394#define U_ETH_LOCAL_SGE_RING_SIZE 10 301#define U_ETH_LOCAL_SGE_RING_SIZE 10
395#define U_ETH_SGL_SIZE 8 302#define U_ETH_SGL_SIZE 8
396 303 /* The fw will padd the buffer with this value, so the IP header \
304 will be align to 4 Byte */
305#define IP_HEADER_ALIGNMENT_PADDING 2
397 306
398#define U_ETH_SGES_PER_PAGE_INVERSE_MASK \ 307#define U_ETH_SGES_PER_PAGE_INVERSE_MASK \
399 (0xFFFF - ((PAGE_SIZE/((STRUCT_SIZE(eth_rx_sge))/8))-1)) 308 (0xFFFF - ((PAGE_SIZE/((STRUCT_SIZE(eth_rx_sge))/8))-1))
@@ -409,16 +318,15 @@
409#define U_ETH_UNDEFINED_Q 0xFF 318#define U_ETH_UNDEFINED_Q 0xFF
410 319
411/* values of command IDs in the ramrod message */ 320/* values of command IDs in the ramrod message */
412#define RAMROD_CMD_ID_ETH_PORT_SETUP 80 321#define RAMROD_CMD_ID_ETH_UNUSED 0
413#define RAMROD_CMD_ID_ETH_CLIENT_SETUP 85 322#define RAMROD_CMD_ID_ETH_CLIENT_SETUP 1
414#define RAMROD_CMD_ID_ETH_STAT_QUERY 90 323#define RAMROD_CMD_ID_ETH_UPDATE 2
415#define RAMROD_CMD_ID_ETH_UPDATE 100 324#define RAMROD_CMD_ID_ETH_HALT 3
416#define RAMROD_CMD_ID_ETH_HALT 105 325#define RAMROD_CMD_ID_ETH_FORWARD_SETUP 4
417#define RAMROD_CMD_ID_ETH_SET_MAC 110 326#define RAMROD_CMD_ID_ETH_ACTIVATE 5
418#define RAMROD_CMD_ID_ETH_CFC_DEL 115 327#define RAMROD_CMD_ID_ETH_DEACTIVATE 6
419#define RAMROD_CMD_ID_ETH_PORT_DEL 120 328#define RAMROD_CMD_ID_ETH_EMPTY 7
420#define RAMROD_CMD_ID_ETH_FORWARD_SETUP 125 329#define RAMROD_CMD_ID_ETH_TERMINATE 8
421
422 330
423/* command values for set mac command */ 331/* command values for set mac command */
424#define T_ETH_MAC_COMMAND_SET 0 332#define T_ETH_MAC_COMMAND_SET 0
@@ -431,7 +339,9 @@
431 339
432/* Maximal L2 clients supported */ 340/* Maximal L2 clients supported */
433#define ETH_MAX_RX_CLIENTS_E1 18 341#define ETH_MAX_RX_CLIENTS_E1 18
434#define ETH_MAX_RX_CLIENTS_E1H 26 342#define ETH_MAX_RX_CLIENTS_E1H 28
343
344#define MAX_STAT_COUNTER_ID ETH_MAX_RX_CLIENTS_E1H
435 345
436/* Maximal aggregation queues supported */ 346/* Maximal aggregation queues supported */
437#define ETH_MAX_AGGREGATION_QUEUES_E1 32 347#define ETH_MAX_AGGREGATION_QUEUES_E1 32
@@ -443,6 +353,20 @@
443#define ETH_RSS_MODE_VLAN_PRI 2 353#define ETH_RSS_MODE_VLAN_PRI 2
444#define ETH_RSS_MODE_E1HOV_PRI 3 354#define ETH_RSS_MODE_E1HOV_PRI 3
445#define ETH_RSS_MODE_IP_DSCP 4 355#define ETH_RSS_MODE_IP_DSCP 4
356#define ETH_RSS_MODE_E2_INTEG 5
357
358
359/* ETH vlan filtering modes */
360#define ETH_VLAN_FILTER_ANY_VLAN 0 /* Don't filter by vlan */
361#define ETH_VLAN_FILTER_SPECIFIC_VLAN \
362 1 /* Only the vlan_id is allowed */
363#define ETH_VLAN_FILTER_CLASSIFY \
364 2 /* vlan will be added to CAM for classification */
365
366/* Fast path CQE selection */
367#define ETH_FP_CQE_REGULAR 0
368#define ETH_FP_CQE_SGL 1
369#define ETH_FP_CQE_RAW 2
446 370
447 371
448/** 372/**
@@ -458,6 +382,7 @@
458#define RESERVED_CONNECTION_TYPE_0 5 382#define RESERVED_CONNECTION_TYPE_0 5
459#define RESERVED_CONNECTION_TYPE_1 6 383#define RESERVED_CONNECTION_TYPE_1 6
460#define RESERVED_CONNECTION_TYPE_2 7 384#define RESERVED_CONNECTION_TYPE_2 7
385#define NONE_CONNECTION_TYPE 8
461 386
462 387
463#define PROTOCOL_STATE_BIT_OFFSET 6 388#define PROTOCOL_STATE_BIT_OFFSET 6
@@ -466,6 +391,16 @@
466#define TOE_STATE (TOE_CONNECTION_TYPE << PROTOCOL_STATE_BIT_OFFSET) 391#define TOE_STATE (TOE_CONNECTION_TYPE << PROTOCOL_STATE_BIT_OFFSET)
467#define RDMA_STATE (RDMA_CONNECTION_TYPE << PROTOCOL_STATE_BIT_OFFSET) 392#define RDMA_STATE (RDMA_CONNECTION_TYPE << PROTOCOL_STATE_BIT_OFFSET)
468 393
394/* values of command IDs in the ramrod message */
395#define RAMROD_CMD_ID_COMMON_FUNCTION_START 1
396#define RAMROD_CMD_ID_COMMON_FUNCTION_STOP 2
397#define RAMROD_CMD_ID_COMMON_CFC_DEL 3
398#define RAMROD_CMD_ID_COMMON_CFC_DEL_WB 4
399#define RAMROD_CMD_ID_COMMON_SET_MAC 5
400#define RAMROD_CMD_ID_COMMON_STAT_QUERY 6
401#define RAMROD_CMD_ID_COMMON_STOP_TRAFFIC 7
402#define RAMROD_CMD_ID_COMMON_START_TRAFFIC 8
403
469/* microcode fixed page page size 4K (chains and ring segments) */ 404/* microcode fixed page page size 4K (chains and ring segments) */
470#define MC_PAGE_SIZE 4096 405#define MC_PAGE_SIZE 4096
471 406
@@ -473,46 +408,26 @@
473/* Host coalescing constants */ 408/* Host coalescing constants */
474#define HC_IGU_BC_MODE 0 409#define HC_IGU_BC_MODE 0
475#define HC_IGU_NBC_MODE 1 410#define HC_IGU_NBC_MODE 1
411/* Host coalescing constants. E1 includes E1H as well */
412
413/* Number of indices per slow-path SB */
414#define HC_SP_SB_MAX_INDICES 16
415
416/* Number of indices per SB */
417#define HC_SB_MAX_INDICES_E1X 8
418#define HC_SB_MAX_INDICES_E2 8
419
420#define HC_SB_MAX_SB_E1X 32
421#define HC_SB_MAX_SB_E2 136
422
423#define HC_SP_SB_ID 0xde
476 424
477#define HC_REGULAR_SEGMENT 0 425#define HC_REGULAR_SEGMENT 0
478#define HC_DEFAULT_SEGMENT 1 426#define HC_DEFAULT_SEGMENT 1
427#define HC_SB_MAX_SM 2
479 428
480/* index numbers */ 429#define HC_SB_MAX_DYNAMIC_INDICES 4
481#define HC_USTORM_DEF_SB_NUM_INDICES 8 430#define HC_FUNCTION_DISABLED 0xff
482#define HC_CSTORM_DEF_SB_NUM_INDICES 8
483#define HC_XSTORM_DEF_SB_NUM_INDICES 4
484#define HC_TSTORM_DEF_SB_NUM_INDICES 4
485#define HC_USTORM_SB_NUM_INDICES 4
486#define HC_CSTORM_SB_NUM_INDICES 4
487
488/* index values - which counter to update */
489
490#define HC_INDEX_U_TOE_RX_CQ_CONS 0
491#define HC_INDEX_U_ETH_RX_CQ_CONS 1
492#define HC_INDEX_U_ETH_RX_BD_CONS 2
493#define HC_INDEX_U_FCOE_EQ_CONS 3
494
495#define HC_INDEX_C_TOE_TX_CQ_CONS 0
496#define HC_INDEX_C_ETH_TX_CQ_CONS 1
497#define HC_INDEX_C_ISCSI_EQ_CONS 2
498
499#define HC_INDEX_DEF_X_SPQ_CONS 0
500
501#define HC_INDEX_DEF_C_RDMA_EQ_CONS 0
502#define HC_INDEX_DEF_C_RDMA_NAL_PROD 1
503#define HC_INDEX_DEF_C_ETH_FW_TX_CQ_CONS 2
504#define HC_INDEX_DEF_C_ETH_SLOW_PATH 3
505#define HC_INDEX_DEF_C_ETH_RDMA_CQ_CONS 4
506#define HC_INDEX_DEF_C_ETH_ISCSI_CQ_CONS 5
507#define HC_INDEX_DEF_C_ETH_FCOE_CQ_CONS 6
508
509#define HC_INDEX_DEF_U_ETH_RDMA_RX_CQ_CONS 0
510#define HC_INDEX_DEF_U_ETH_ISCSI_RX_CQ_CONS 1
511#define HC_INDEX_DEF_U_ETH_RDMA_RX_BD_CONS 2
512#define HC_INDEX_DEF_U_ETH_ISCSI_RX_BD_CONS 3
513#define HC_INDEX_DEF_U_ETH_FCOE_RX_CQ_CONS 4
514#define HC_INDEX_DEF_U_ETH_FCOE_RX_BD_CONS 5
515
516/* used by the driver to get the SB offset */ 431/* used by the driver to get the SB offset */
517#define USTORM_ID 0 432#define USTORM_ID 0
518#define CSTORM_ID 1 433#define CSTORM_ID 1
@@ -529,45 +444,17 @@
529 444
530 445
531/**** DEFINES FOR TIMERS/CLOCKS RESOLUTIONS ****/ 446/**** DEFINES FOR TIMERS/CLOCKS RESOLUTIONS ****/
532#define EMULATION_FREQUENCY_FACTOR 1600
533#define FPGA_FREQUENCY_FACTOR 100
534 447
535#define TIMERS_TICK_SIZE_CHIP (1e-3) 448#define TIMERS_TICK_SIZE_CHIP (1e-3)
536#define TIMERS_TICK_SIZE_EMUL \
537 ((TIMERS_TICK_SIZE_CHIP)/((EMULATION_FREQUENCY_FACTOR)))
538#define TIMERS_TICK_SIZE_FPGA \
539 ((TIMERS_TICK_SIZE_CHIP)/((FPGA_FREQUENCY_FACTOR)))
540 449
541#define TSEMI_CLK1_RESUL_CHIP (1e-3) 450#define TSEMI_CLK1_RESUL_CHIP (1e-3)
542#define TSEMI_CLK1_RESUL_EMUL \
543 ((TSEMI_CLK1_RESUL_CHIP)/(EMULATION_FREQUENCY_FACTOR))
544#define TSEMI_CLK1_RESUL_FPGA \
545 ((TSEMI_CLK1_RESUL_CHIP)/(FPGA_FREQUENCY_FACTOR))
546
547#define USEMI_CLK1_RESUL_CHIP (TIMERS_TICK_SIZE_CHIP)
548#define USEMI_CLK1_RESUL_EMUL (TIMERS_TICK_SIZE_EMUL)
549#define USEMI_CLK1_RESUL_FPGA (TIMERS_TICK_SIZE_FPGA)
550 451
551#define XSEMI_CLK1_RESUL_CHIP (1e-3) 452#define XSEMI_CLK1_RESUL_CHIP (1e-3)
552#define XSEMI_CLK1_RESUL_EMUL \
553 ((XSEMI_CLK1_RESUL_CHIP)/(EMULATION_FREQUENCY_FACTOR))
554#define XSEMI_CLK1_RESUL_FPGA \
555 ((XSEMI_CLK1_RESUL_CHIP)/(FPGA_FREQUENCY_FACTOR))
556
557#define XSEMI_CLK2_RESUL_CHIP (1e-6)
558#define XSEMI_CLK2_RESUL_EMUL \
559 ((XSEMI_CLK2_RESUL_CHIP)/(EMULATION_FREQUENCY_FACTOR))
560#define XSEMI_CLK2_RESUL_FPGA \
561 ((XSEMI_CLK2_RESUL_CHIP)/(FPGA_FREQUENCY_FACTOR))
562 453
563#define SDM_TIMER_TICK_RESUL_CHIP (4*(1e-6)) 454#define SDM_TIMER_TICK_RESUL_CHIP (4*(1e-6))
564#define SDM_TIMER_TICK_RESUL_EMUL \
565 ((SDM_TIMER_TICK_RESUL_CHIP)/(EMULATION_FREQUENCY_FACTOR))
566#define SDM_TIMER_TICK_RESUL_FPGA \
567 ((SDM_TIMER_TICK_RESUL_CHIP)/(FPGA_FREQUENCY_FACTOR))
568
569 455
570/**** END DEFINES FOR TIMERS/CLOCKS RESOLUTIONS ****/ 456/**** END DEFINES FOR TIMERS/CLOCKS RESOLUTIONS ****/
457
571#define XSTORM_IP_ID_ROLL_HALF 0x8000 458#define XSTORM_IP_ID_ROLL_HALF 0x8000
572#define XSTORM_IP_ID_ROLL_ALL 0 459#define XSTORM_IP_ID_ROLL_ALL 0
573 460
@@ -576,10 +463,36 @@
576#define NUM_OF_PROTOCOLS 4 463#define NUM_OF_PROTOCOLS 4
577#define NUM_OF_SAFC_BITS 16 464#define NUM_OF_SAFC_BITS 16
578#define MAX_COS_NUMBER 4 465#define MAX_COS_NUMBER 4
579#define MAX_T_STAT_COUNTER_ID 18
580#define MAX_X_STAT_COUNTER_ID 18
581#define MAX_U_STAT_COUNTER_ID 18
582 466
467#define FAIRNESS_COS_WRR_MODE 0
468#define FAIRNESS_COS_ETS_MODE 1
469
470
471/* Priority Flow Control (PFC) */
472#define MAX_PFC_PRIORITIES 8
473#define MAX_PFC_TRAFFIC_TYPES 8
474
475/* Available Traffic Types for Link Layer Flow Control */
476#define LLFC_TRAFFIC_TYPE_NW 0
477#define LLFC_TRAFFIC_TYPE_FCOE 1
478#define LLFC_TRAFFIC_TYPE_ISCSI 2
479 /***************** START OF E2 INTEGRATION \
480 CODE***************************************/
481#define LLFC_TRAFFIC_TYPE_NW_COS1_E2INTEG 3
482 /***************** END OF E2 INTEGRATION \
483 CODE***************************************/
484#define LLFC_TRAFFIC_TYPE_MAX 4
485
486 /* used by array traffic_type_to_priority[] to mark traffic type \
487 that is not mapped to priority*/
488#define LLFC_TRAFFIC_TYPE_TO_PRIORITY_UNMAPPED 0xFF
489
490#define LLFC_MODE_NONE 0
491#define LLFC_MODE_PFC 1
492#define LLFC_MODE_SAFC 2
493
494#define DCB_DISABLED 0
495#define DCB_ENABLED 1
583 496
584#define UNKNOWN_ADDRESS 0 497#define UNKNOWN_ADDRESS 0
585#define UNICAST_ADDRESS 1 498#define UNICAST_ADDRESS 1
@@ -587,8 +500,32 @@
587#define BROADCAST_ADDRESS 3 500#define BROADCAST_ADDRESS 3
588 501
589#define SINGLE_FUNCTION 0 502#define SINGLE_FUNCTION 0
590#define MULTI_FUNCTION 1 503#define MULTI_FUNCTION_SD 1
504#define MULTI_FUNCTION_SI 2
591 505
592#define IP_V4 0 506#define IP_V4 0
593#define IP_V6 1 507#define IP_V6 1
594 508
509
510#define C_ERES_PER_PAGE \
511 (PAGE_SIZE / BITS_TO_BYTES(STRUCT_SIZE(event_ring_elem)))
512#define C_ERE_PER_PAGE_MASK (C_ERES_PER_PAGE - 1)
513
514#define EVENT_RING_OPCODE_VF_PF_CHANNEL 0
515#define EVENT_RING_OPCODE_FUNCTION_START 1
516#define EVENT_RING_OPCODE_FUNCTION_STOP 2
517#define EVENT_RING_OPCODE_CFC_DEL 3
518#define EVENT_RING_OPCODE_CFC_DEL_WB 4
519#define EVENT_RING_OPCODE_SET_MAC 5
520#define EVENT_RING_OPCODE_STAT_QUERY 6
521#define EVENT_RING_OPCODE_STOP_TRAFFIC 7
522#define EVENT_RING_OPCODE_START_TRAFFIC 8
523#define EVENT_RING_OPCODE_FORWARD_SETUP 9
524
525#define VF_PF_CHANNEL_STATE_READY 0
526#define VF_PF_CHANNEL_STATE_WAITING_FOR_ACK 1
527
528#define VF_PF_CHANNEL_STATE_MAX_NUMBER 2
529
530
531#endif /* BNX2X_FW_DEFS_H */
diff --git a/drivers/net/bnx2x/bnx2x_fw_file_hdr.h b/drivers/net/bnx2x/bnx2x_fw_file_hdr.h
index 3f5ee5d7cc2a..f4a07fbaed05 100644
--- a/drivers/net/bnx2x/bnx2x_fw_file_hdr.h
+++ b/drivers/net/bnx2x/bnx2x_fw_file_hdr.h
@@ -1,6 +1,6 @@
1/* bnx2x_fw_file_hdr.h: FW binary file header structure. 1/* bnx2x_fw_file_hdr.h: FW binary file header structure.
2 * 2 *
3 * Copyright (c) 2007-2009 Broadcom Corporation 3 * Copyright (c) 2007-2011 Broadcom Corporation
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by 6 * it under the terms of the GNU General Public License as published by
@@ -31,6 +31,7 @@ struct bnx2x_fw_file_hdr {
31 struct bnx2x_fw_file_section csem_pram_data; 31 struct bnx2x_fw_file_section csem_pram_data;
32 struct bnx2x_fw_file_section xsem_int_table_data; 32 struct bnx2x_fw_file_section xsem_int_table_data;
33 struct bnx2x_fw_file_section xsem_pram_data; 33 struct bnx2x_fw_file_section xsem_pram_data;
34 struct bnx2x_fw_file_section iro_arr;
34 struct bnx2x_fw_file_section fw_version; 35 struct bnx2x_fw_file_section fw_version;
35}; 36};
36 37
diff --git a/drivers/net/bnx2x/bnx2x_hsi.h b/drivers/net/bnx2x/bnx2x_hsi.h
index fd1f29e0317d..cdf19fe7c7f6 100644
--- a/drivers/net/bnx2x/bnx2x_hsi.h
+++ b/drivers/net/bnx2x/bnx2x_hsi.h
@@ -1,26 +1,37 @@
1/* bnx2x_hsi.h: Broadcom Everest network driver. 1/* bnx2x_hsi.h: Broadcom Everest network driver.
2 * 2 *
3 * Copyright (c) 2007-2010 Broadcom Corporation 3 * Copyright (c) 2007-2011 Broadcom Corporation
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by 6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation. 7 * the Free Software Foundation.
8 */ 8 */
9#ifndef BNX2X_HSI_H
10#define BNX2X_HSI_H
11
12#include "bnx2x_fw_defs.h"
13
14#define FW_ENCODE_32BIT_PATTERN 0x1e1e1e1e
9 15
10struct license_key { 16struct license_key {
11 u32 reserved[6]; 17 u32 reserved[6];
12 18
13#if defined(__BIG_ENDIAN) 19 u32 max_iscsi_conn;
14 u16 max_iscsi_init_conn; 20#define BNX2X_MAX_ISCSI_TRGT_CONN_MASK 0xFFFF
15 u16 max_iscsi_trgt_conn; 21#define BNX2X_MAX_ISCSI_TRGT_CONN_SHIFT 0
16#elif defined(__LITTLE_ENDIAN) 22#define BNX2X_MAX_ISCSI_INIT_CONN_MASK 0xFFFF0000
17 u16 max_iscsi_trgt_conn; 23#define BNX2X_MAX_ISCSI_INIT_CONN_SHIFT 16
18 u16 max_iscsi_init_conn;
19#endif
20 24
21 u32 reserved_a[6]; 25 u32 reserved_a;
22}; 26
27 u32 max_fcoe_conn;
28#define BNX2X_MAX_FCOE_TRGT_CONN_MASK 0xFFFF
29#define BNX2X_MAX_FCOE_TRGT_CONN_SHIFT 0
30#define BNX2X_MAX_FCOE_INIT_CONN_MASK 0xFFFF0000
31#define BNX2X_MAX_FCOE_INIT_CONN_SHIFT 16
23 32
33 u32 reserved_b[4];
34};
24 35
25#define PORT_0 0 36#define PORT_0 0
26#define PORT_1 1 37#define PORT_1 1
@@ -78,6 +89,8 @@ struct shared_hw_cfg { /* NVRAM Offset */
78#define SHARED_HW_CFG_LED_PHY11 0x000b0000 89#define SHARED_HW_CFG_LED_PHY11 0x000b0000
79#define SHARED_HW_CFG_LED_MAC4 0x000c0000 90#define SHARED_HW_CFG_LED_MAC4 0x000c0000
80#define SHARED_HW_CFG_LED_PHY8 0x000d0000 91#define SHARED_HW_CFG_LED_PHY8 0x000d0000
92#define SHARED_HW_CFG_LED_EXTPHY1 0x000e0000
93
81 94
82#define SHARED_HW_CFG_AN_ENABLE_MASK 0x3f000000 95#define SHARED_HW_CFG_AN_ENABLE_MASK 0x3f000000
83#define SHARED_HW_CFG_AN_ENABLE_SHIFT 24 96#define SHARED_HW_CFG_AN_ENABLE_SHIFT 24
@@ -120,6 +133,23 @@ struct shared_hw_cfg { /* NVRAM Offset */
120#define SHARED_HW_CFG_FAN_FAILURE_DISABLED 0x00080000 133#define SHARED_HW_CFG_FAN_FAILURE_DISABLED 0x00080000
121#define SHARED_HW_CFG_FAN_FAILURE_ENABLED 0x00100000 134#define SHARED_HW_CFG_FAN_FAILURE_ENABLED 0x00100000
122 135
136 /* Set the MDC/MDIO access for the first external phy */
137#define SHARED_HW_CFG_MDC_MDIO_ACCESS1_MASK 0x1C000000
138#define SHARED_HW_CFG_MDC_MDIO_ACCESS1_SHIFT 26
139#define SHARED_HW_CFG_MDC_MDIO_ACCESS1_PHY_TYPE 0x00000000
140#define SHARED_HW_CFG_MDC_MDIO_ACCESS1_EMAC0 0x04000000
141#define SHARED_HW_CFG_MDC_MDIO_ACCESS1_EMAC1 0x08000000
142#define SHARED_HW_CFG_MDC_MDIO_ACCESS1_BOTH 0x0c000000
143#define SHARED_HW_CFG_MDC_MDIO_ACCESS1_SWAPPED 0x10000000
144
145 /* Set the MDC/MDIO access for the second external phy */
146#define SHARED_HW_CFG_MDC_MDIO_ACCESS2_MASK 0xE0000000
147#define SHARED_HW_CFG_MDC_MDIO_ACCESS2_SHIFT 29
148#define SHARED_HW_CFG_MDC_MDIO_ACCESS2_PHY_TYPE 0x00000000
149#define SHARED_HW_CFG_MDC_MDIO_ACCESS2_EMAC0 0x20000000
150#define SHARED_HW_CFG_MDC_MDIO_ACCESS2_EMAC1 0x40000000
151#define SHARED_HW_CFG_MDC_MDIO_ACCESS2_BOTH 0x60000000
152#define SHARED_HW_CFG_MDC_MDIO_ACCESS2_SWAPPED 0x80000000
123 u32 power_dissipated; /* 0x11c */ 153 u32 power_dissipated; /* 0x11c */
124#define SHARED_HW_CFG_POWER_DIS_CMN_MASK 0xff000000 154#define SHARED_HW_CFG_POWER_DIS_CMN_MASK 0xff000000
125#define SHARED_HW_CFG_POWER_DIS_CMN_SHIFT 24 155#define SHARED_HW_CFG_POWER_DIS_CMN_SHIFT 24
@@ -214,18 +244,191 @@ struct port_hw_cfg { /* port 0: 0x12c port 1: 0x2bc */
214#define PORT_HW_CFG_SERDES_RX_DRV_EQUALIZER_SHIFT 16 244#define PORT_HW_CFG_SERDES_RX_DRV_EQUALIZER_SHIFT 16
215 245
216 246
217 u32 Reserved0[16]; /* 0x158 */ 247 u32 Reserved0[3]; /* 0x158 */
218 248 /* Controls the TX laser of the SFP+ module */
249 u32 sfp_ctrl; /* 0x164 */
250#define PORT_HW_CFG_TX_LASER_MASK 0x000000FF
251#define PORT_HW_CFG_TX_LASER_SHIFT 0
252#define PORT_HW_CFG_TX_LASER_MDIO 0x00000000
253#define PORT_HW_CFG_TX_LASER_GPIO0 0x00000001
254#define PORT_HW_CFG_TX_LASER_GPIO1 0x00000002
255#define PORT_HW_CFG_TX_LASER_GPIO2 0x00000003
256#define PORT_HW_CFG_TX_LASER_GPIO3 0x00000004
257
258 /* Controls the fault module LED of the SFP+ */
259#define PORT_HW_CFG_FAULT_MODULE_LED_MASK 0x0000FF00
260#define PORT_HW_CFG_FAULT_MODULE_LED_SHIFT 8
261#define PORT_HW_CFG_FAULT_MODULE_LED_GPIO0 0x00000000
262#define PORT_HW_CFG_FAULT_MODULE_LED_GPIO1 0x00000100
263#define PORT_HW_CFG_FAULT_MODULE_LED_GPIO2 0x00000200
264#define PORT_HW_CFG_FAULT_MODULE_LED_GPIO3 0x00000300
265#define PORT_HW_CFG_FAULT_MODULE_LED_DISABLED 0x00000400
266 u32 Reserved01[12]; /* 0x158 */
219 /* for external PHY, or forced mode or during AN */ 267 /* for external PHY, or forced mode or during AN */
220 u16 xgxs_config_rx[4]; /* 0x198 */ 268 u16 xgxs_config_rx[4]; /* 0x198 */
221 269
222 u16 xgxs_config_tx[4]; /* 0x1A0 */ 270 u16 xgxs_config_tx[4]; /* 0x1A0 */
223 271
224 u32 Reserved1[64]; /* 0x1A8 */ 272 u32 Reserved1[56]; /* 0x1A8 */
273 u32 default_cfg; /* 0x288 */
274#define PORT_HW_CFG_GPIO0_CONFIG_MASK 0x00000003
275#define PORT_HW_CFG_GPIO0_CONFIG_SHIFT 0
276#define PORT_HW_CFG_GPIO0_CONFIG_NA 0x00000000
277#define PORT_HW_CFG_GPIO0_CONFIG_LOW 0x00000001
278#define PORT_HW_CFG_GPIO0_CONFIG_HIGH 0x00000002
279#define PORT_HW_CFG_GPIO0_CONFIG_INPUT 0x00000003
280
281#define PORT_HW_CFG_GPIO1_CONFIG_MASK 0x0000000C
282#define PORT_HW_CFG_GPIO1_CONFIG_SHIFT 2
283#define PORT_HW_CFG_GPIO1_CONFIG_NA 0x00000000
284#define PORT_HW_CFG_GPIO1_CONFIG_LOW 0x00000004
285#define PORT_HW_CFG_GPIO1_CONFIG_HIGH 0x00000008
286#define PORT_HW_CFG_GPIO1_CONFIG_INPUT 0x0000000c
287
288#define PORT_HW_CFG_GPIO2_CONFIG_MASK 0x00000030
289#define PORT_HW_CFG_GPIO2_CONFIG_SHIFT 4
290#define PORT_HW_CFG_GPIO2_CONFIG_NA 0x00000000
291#define PORT_HW_CFG_GPIO2_CONFIG_LOW 0x00000010
292#define PORT_HW_CFG_GPIO2_CONFIG_HIGH 0x00000020
293#define PORT_HW_CFG_GPIO2_CONFIG_INPUT 0x00000030
294
295#define PORT_HW_CFG_GPIO3_CONFIG_MASK 0x000000C0
296#define PORT_HW_CFG_GPIO3_CONFIG_SHIFT 6
297#define PORT_HW_CFG_GPIO3_CONFIG_NA 0x00000000
298#define PORT_HW_CFG_GPIO3_CONFIG_LOW 0x00000040
299#define PORT_HW_CFG_GPIO3_CONFIG_HIGH 0x00000080
300#define PORT_HW_CFG_GPIO3_CONFIG_INPUT 0x000000c0
301
302 /*
303 * When KR link is required to be set to force which is not
304 * KR-compliant, this parameter determine what is the trigger for it.
305 * When GPIO is selected, low input will force the speed. Currently
306 * default speed is 1G. In the future, it may be widen to select the
307 * forced speed in with another parameter. Note when force-1G is
308 * enabled, it override option 56: Link Speed option.
309 */
310#define PORT_HW_CFG_FORCE_KR_ENABLER_MASK 0x00000F00
311#define PORT_HW_CFG_FORCE_KR_ENABLER_SHIFT 8
312#define PORT_HW_CFG_FORCE_KR_ENABLER_NOT_FORCED 0x00000000
313#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO0_P0 0x00000100
314#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO1_P0 0x00000200
315#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO2_P0 0x00000300
316#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO3_P0 0x00000400
317#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO0_P1 0x00000500
318#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO1_P1 0x00000600
319#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO2_P1 0x00000700
320#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO3_P1 0x00000800
321#define PORT_HW_CFG_FORCE_KR_ENABLER_FORCED 0x00000900
322 /* Enable to determine with which GPIO to reset the external phy */
323#define PORT_HW_CFG_EXT_PHY_GPIO_RST_MASK 0x000F0000
324#define PORT_HW_CFG_EXT_PHY_GPIO_RST_SHIFT 16
325#define PORT_HW_CFG_EXT_PHY_GPIO_RST_PHY_TYPE 0x00000000
326#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO0_P0 0x00010000
327#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO1_P0 0x00020000
328#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO2_P0 0x00030000
329#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO3_P0 0x00040000
330#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO0_P1 0x00050000
331#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO1_P1 0x00060000
332#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO2_P1 0x00070000
333#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO3_P1 0x00080000
334 /* Enable BAM on KR */
335#define PORT_HW_CFG_ENABLE_BAM_ON_KR_MASK 0x00100000
336#define PORT_HW_CFG_ENABLE_BAM_ON_KR_SHIFT 20
337#define PORT_HW_CFG_ENABLE_BAM_ON_KR_DISABLED 0x00000000
338#define PORT_HW_CFG_ENABLE_BAM_ON_KR_ENABLED 0x00100000
339
340 /* Enable Common Mode Sense */
341#define PORT_HW_CFG_ENABLE_CMS_MASK 0x00200000
342#define PORT_HW_CFG_ENABLE_CMS_SHIFT 21
343#define PORT_HW_CFG_ENABLE_CMS_DISABLED 0x00000000
344#define PORT_HW_CFG_ENABLE_CMS_ENABLED 0x00200000
345
346 u32 speed_capability_mask2; /* 0x28C */
347#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_MASK 0x0000FFFF
348#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_SHIFT 0
349#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_10M_FULL 0x00000001
350#define PORT_HW_CFG_SPEED_CAPABILITY2_D3__ 0x00000002
351#define PORT_HW_CFG_SPEED_CAPABILITY2_D3___ 0x00000004
352#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_100M_FULL 0x00000008
353#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_1G 0x00000010
354#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_2_DOT_5G 0x00000020
355#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_10G 0x00000040
356#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_12G 0x00000080
357#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_12_DOT_5G 0x00000100
358#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_13G 0x00000200
359#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_15G 0x00000400
360#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_16G 0x00000800
361
362#define PORT_HW_CFG_SPEED_CAPABILITY2_D0_MASK 0xFFFF0000
363#define PORT_HW_CFG_SPEED_CAPABILITY2_D0_SHIFT 16
364#define PORT_HW_CFG_SPEED_CAPABILITY2_D0_10M_FULL 0x00010000
365#define PORT_HW_CFG_SPEED_CAPABILITY2_D0__ 0x00020000
366#define PORT_HW_CFG_SPEED_CAPABILITY2_D0___ 0x00040000
367#define PORT_HW_CFG_SPEED_CAPABILITY2_D0_100M_FULL 0x00080000
368#define PORT_HW_CFG_SPEED_CAPABILITY2_D0_1G 0x00100000
369#define PORT_HW_CFG_SPEED_CAPABILITY2_D0_2_DOT_5G 0x00200000
370#define PORT_HW_CFG_SPEED_CAPABILITY2_D0_10G 0x00400000
371#define PORT_HW_CFG_SPEED_CAPABILITY2_D0_12G 0x00800000
372#define PORT_HW_CFG_SPEED_CAPABILITY2_D0_12_DOT_5G 0x01000000
373#define PORT_HW_CFG_SPEED_CAPABILITY2_D0_13G 0x02000000
374#define PORT_HW_CFG_SPEED_CAPABILITY2_D0_15G 0x04000000
375#define PORT_HW_CFG_SPEED_CAPABILITY2_D0_16G 0x08000000
376
377 /* In the case where two media types (e.g. copper and fiber) are
378 present and electrically active at the same time, PHY Selection
379 will determine which of the two PHYs will be designated as the
380 Active PHY and used for a connection to the network. */
381 u32 multi_phy_config; /* 0x290 */
382#define PORT_HW_CFG_PHY_SELECTION_MASK 0x00000007
383#define PORT_HW_CFG_PHY_SELECTION_SHIFT 0
384#define PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT 0x00000000
385#define PORT_HW_CFG_PHY_SELECTION_FIRST_PHY 0x00000001
386#define PORT_HW_CFG_PHY_SELECTION_SECOND_PHY 0x00000002
387#define PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY 0x00000003
388#define PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY 0x00000004
389
390 /* When enabled, all second phy nvram parameters will be swapped
391 with the first phy parameters */
392#define PORT_HW_CFG_PHY_SWAPPED_MASK 0x00000008
393#define PORT_HW_CFG_PHY_SWAPPED_SHIFT 3
394#define PORT_HW_CFG_PHY_SWAPPED_DISABLED 0x00000000
395#define PORT_HW_CFG_PHY_SWAPPED_ENABLED 0x00000008
396
397
398 /* Address of the second external phy */
399 u32 external_phy_config2; /* 0x294 */
400#define PORT_HW_CFG_XGXS_EXT_PHY2_ADDR_MASK 0x000000FF
401#define PORT_HW_CFG_XGXS_EXT_PHY2_ADDR_SHIFT 0
402
403 /* The second XGXS external PHY type */
404#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_MASK 0x0000FF00
405#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_SHIFT 8
406#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_DIRECT 0x00000000
407#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8071 0x00000100
408#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8072 0x00000200
409#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8073 0x00000300
410#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8705 0x00000400
411#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8706 0x00000500
412#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8726 0x00000600
413#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8481 0x00000700
414#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_SFX7101 0x00000800
415#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8727 0x00000900
416#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8727_NOC 0x00000a00
417#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM84823 0x00000b00
418#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM54640 0x00000c00
419#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM84833 0x00000d00
420#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_FAILURE 0x0000fd00
421#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_NOT_CONN 0x0000ff00
422
423 /* 4 times 16 bits for all 4 lanes. For some external PHYs (such as
424 8706, 8726 and 8727) not all 4 values are needed. */
425 u16 xgxs_config2_rx[4]; /* 0x296 */
426 u16 xgxs_config2_tx[4]; /* 0x2A0 */
225 427
226 u32 lane_config; 428 u32 lane_config;
227#define PORT_HW_CFG_LANE_SWAP_CFG_MASK 0x0000ffff 429#define PORT_HW_CFG_LANE_SWAP_CFG_MASK 0x0000ffff
228#define PORT_HW_CFG_LANE_SWAP_CFG_SHIFT 0 430#define PORT_HW_CFG_LANE_SWAP_CFG_SHIFT 0
431
229#define PORT_HW_CFG_LANE_SWAP_CFG_TX_MASK 0x000000ff 432#define PORT_HW_CFG_LANE_SWAP_CFG_TX_MASK 0x000000ff
230#define PORT_HW_CFG_LANE_SWAP_CFG_TX_SHIFT 0 433#define PORT_HW_CFG_LANE_SWAP_CFG_TX_SHIFT 0
231#define PORT_HW_CFG_LANE_SWAP_CFG_RX_MASK 0x0000ff00 434#define PORT_HW_CFG_LANE_SWAP_CFG_RX_MASK 0x0000ff00
@@ -240,6 +443,10 @@ struct port_hw_cfg { /* port 0: 0x12c port 1: 0x2bc */
240#define PORT_HW_CFG_LANE_SWAP_CFG_31203120 0x0000d8d8 443#define PORT_HW_CFG_LANE_SWAP_CFG_31203120 0x0000d8d8
241 /* forced only */ 444 /* forced only */
242#define PORT_HW_CFG_LANE_SWAP_CFG_32103210 0x0000e4e4 445#define PORT_HW_CFG_LANE_SWAP_CFG_32103210 0x0000e4e4
446 /* Indicate whether to swap the external phy polarity */
447#define PORT_HW_CFG_SWAP_PHY_POLARITY_MASK 0x00010000
448#define PORT_HW_CFG_SWAP_PHY_POLARITY_DISABLED 0x00000000
449#define PORT_HW_CFG_SWAP_PHY_POLARITY_ENABLED 0x00010000
243 450
244 u32 external_phy_config; 451 u32 external_phy_config;
245#define PORT_HW_CFG_SERDES_EXT_PHY_TYPE_MASK 0xff000000 452#define PORT_HW_CFG_SERDES_EXT_PHY_TYPE_MASK 0xff000000
@@ -265,6 +472,7 @@ struct port_hw_cfg { /* port 0: 0x12c port 1: 0x2bc */
265#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727 0x00000900 472#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727 0x00000900
266#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC 0x00000a00 473#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC 0x00000a00
267#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823 0x00000b00 474#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823 0x00000b00
475#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833 0x00000d00
268#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE 0x0000fd00 476#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE 0x0000fd00
269#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN 0x0000ff00 477#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN 0x0000ff00
270 478
@@ -322,7 +530,12 @@ struct shared_feat_cfg { /* NVRAM Offset */
322#define SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_DISABLED 0x00000000 530#define SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_DISABLED 0x00000000
323#define SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED 0x00000002 531#define SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED 0x00000002
324 532
325#define SHARED_FEATURE_MF_MODE_DISABLED 0x00000100 533#define SHARED_FEAT_CFG_FORCE_SF_MODE_MASK 0x00000700
534#define SHARED_FEAT_CFG_FORCE_SF_MODE_SHIFT 8
535#define SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED 0x00000000
536#define SHARED_FEAT_CFG_FORCE_SF_MODE_FORCED_SF 0x00000100
537#define SHARED_FEAT_CFG_FORCE_SF_MODE_SPIO4 0x00000200
538#define SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT 0x00000300
326 539
327}; 540};
328 541
@@ -515,10 +728,17 @@ struct port_feat_cfg { /* port 0: 0x454 port 1: 0x4c8 */
515#define PORT_FEATURE_FLOW_CONTROL_NONE 0x00000400 728#define PORT_FEATURE_FLOW_CONTROL_NONE 0x00000400
516 729
517 /* The default for MCP link configuration, 730 /* The default for MCP link configuration,
518 uses the same defines as link_config */ 731 uses the same defines as link_config */
519 u32 mfw_wol_link_cfg; 732 u32 mfw_wol_link_cfg;
733 /* The default for the driver of the second external phy,
734 uses the same defines as link_config */
735 u32 link_config2; /* 0x47C */
520 736
521 u32 reserved[19]; 737 /* The default for MCP of the second external phy,
738 uses the same defines as link_config */
739 u32 mfw_wol_link_cfg2; /* 0x480 */
740
741 u32 Reserved2[17]; /* 0x484 */
522 742
523}; 743};
524 744
@@ -551,6 +771,7 @@ struct shm_dev_info { /* size */
551#define FUNC_7 7 771#define FUNC_7 7
552#define E1_FUNC_MAX 2 772#define E1_FUNC_MAX 2
553#define E1H_FUNC_MAX 8 773#define E1H_FUNC_MAX 8
774#define E2_FUNC_MAX 4 /* per path */
554 775
555#define VN_0 0 776#define VN_0 0
556#define VN_1 1 777#define VN_1 1
@@ -559,7 +780,7 @@ struct shm_dev_info { /* size */
559#define E1VN_MAX 1 780#define E1VN_MAX 1
560#define E1HVN_MAX 4 781#define E1HVN_MAX 4
561 782
562 783#define E2_VF_MAX 64
563/* This value (in milliseconds) determines the frequency of the driver 784/* This value (in milliseconds) determines the frequency of the driver
564 * issuing the PULSE message code. The firmware monitors this periodic 785 * issuing the PULSE message code. The firmware monitors this periodic
565 * pulse to determine when to switch to an OS-absent mode. */ 786 * pulse to determine when to switch to an OS-absent mode. */
@@ -686,9 +907,20 @@ struct drv_func_mb {
686 * The optic module verification commands require bootcode 907 * The optic module verification commands require bootcode
687 * v5.0.6 or later 908 * v5.0.6 or later
688 */ 909 */
689#define DRV_MSG_CODE_VRFY_OPT_MDL 0xa0000000 910#define DRV_MSG_CODE_VRFY_FIRST_PHY_OPT_MDL 0xa0000000
690#define REQ_BC_VER_4_VRFY_OPT_MDL 0x00050006 911#define REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL 0x00050006
691 912 /*
913 * The specific optic module verification command requires bootcode
914 * v5.2.12 or later
915 */
916#define DRV_MSG_CODE_VRFY_SPECIFIC_PHY_OPT_MDL 0xa1000000
917#define REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL 0x00050234
918
919#define DRV_MSG_CODE_DCBX_ADMIN_PMF_MSG 0xb0000000
920#define DRV_MSG_CODE_DCBX_PMF_DRV_OK 0xb2000000
921#define DRV_MSG_CODE_SET_MF_BW 0xe0000000
922#define REQ_BC_VER_4_SET_MF_BW 0x00060202
923#define DRV_MSG_CODE_SET_MF_BW_ACK 0xe1000000
692#define BIOS_MSG_CODE_LIC_CHALLENGE 0xff010000 924#define BIOS_MSG_CODE_LIC_CHALLENGE 0xff010000
693#define BIOS_MSG_CODE_LIC_RESPONSE 0xff020000 925#define BIOS_MSG_CODE_LIC_RESPONSE 0xff020000
694#define BIOS_MSG_CODE_VIRT_MAC_PRIM 0xff030000 926#define BIOS_MSG_CODE_VIRT_MAC_PRIM 0xff030000
@@ -703,6 +935,9 @@ struct drv_func_mb {
703#define FW_MSG_CODE_DRV_LOAD_COMMON 0x10100000 935#define FW_MSG_CODE_DRV_LOAD_COMMON 0x10100000
704#define FW_MSG_CODE_DRV_LOAD_PORT 0x10110000 936#define FW_MSG_CODE_DRV_LOAD_PORT 0x10110000
705#define FW_MSG_CODE_DRV_LOAD_FUNCTION 0x10120000 937#define FW_MSG_CODE_DRV_LOAD_FUNCTION 0x10120000
938 /* Load common chip is supported from bc 6.0.0 */
939#define REQ_BC_VER_4_DRV_LOAD_COMMON_CHIP 0x00060000
940#define FW_MSG_CODE_DRV_LOAD_COMMON_CHIP 0x10130000
706#define FW_MSG_CODE_DRV_LOAD_REFUSED 0x10200000 941#define FW_MSG_CODE_DRV_LOAD_REFUSED 0x10200000
707#define FW_MSG_CODE_DRV_LOAD_DONE 0x11100000 942#define FW_MSG_CODE_DRV_LOAD_DONE 0x11100000
708#define FW_MSG_CODE_DRV_UNLOAD_COMMON 0x20100000 943#define FW_MSG_CODE_DRV_UNLOAD_COMMON 0x20100000
@@ -759,6 +994,7 @@ struct drv_func_mb {
759 994
760 u32 drv_status; 995 u32 drv_status;
761#define DRV_STATUS_PMF 0x00000001 996#define DRV_STATUS_PMF 0x00000001
997#define DRV_STATUS_SET_MF_BW 0x00000004
762 998
763#define DRV_STATUS_DCC_EVENT_MASK 0x0000ff00 999#define DRV_STATUS_DCC_EVENT_MASK 0x0000ff00
764#define DRV_STATUS_DCC_DISABLE_ENABLE_PF 0x00000100 1000#define DRV_STATUS_DCC_DISABLE_ENABLE_PF 0x00000100
@@ -767,6 +1003,8 @@ struct drv_func_mb {
767#define DRV_STATUS_DCC_RESERVED1 0x00000800 1003#define DRV_STATUS_DCC_RESERVED1 0x00000800
768#define DRV_STATUS_DCC_SET_PROTOCOL 0x00001000 1004#define DRV_STATUS_DCC_SET_PROTOCOL 0x00001000
769#define DRV_STATUS_DCC_SET_PRIORITY 0x00002000 1005#define DRV_STATUS_DCC_SET_PRIORITY 0x00002000
1006#define DRV_STATUS_DCBX_EVENT_MASK 0x000f0000
1007#define DRV_STATUS_DCBX_NEGOTIATION_RESULTS 0x00010000
770 1008
771 u32 virt_mac_upper; 1009 u32 virt_mac_upper;
772#define VIRT_MAC_SIGN_MASK 0xffff0000 1010#define VIRT_MAC_SIGN_MASK 0xffff0000
@@ -859,12 +1097,43 @@ struct func_mf_cfg {
859 1097
860}; 1098};
861 1099
1100/* This structure is not applicable and should not be accessed on 57711 */
1101struct func_ext_cfg {
1102 u32 func_cfg;
1103#define MACP_FUNC_CFG_FLAGS_MASK 0x000000FF
1104#define MACP_FUNC_CFG_FLAGS_SHIFT 0
1105#define MACP_FUNC_CFG_FLAGS_ENABLED 0x00000001
1106#define MACP_FUNC_CFG_FLAGS_ETHERNET 0x00000002
1107#define MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD 0x00000004
1108#define MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD 0x00000008
1109
1110 u32 iscsi_mac_addr_upper;
1111 u32 iscsi_mac_addr_lower;
1112
1113 u32 fcoe_mac_addr_upper;
1114 u32 fcoe_mac_addr_lower;
1115
1116 u32 fcoe_wwn_port_name_upper;
1117 u32 fcoe_wwn_port_name_lower;
1118
1119 u32 fcoe_wwn_node_name_upper;
1120 u32 fcoe_wwn_node_name_lower;
1121
1122 u32 preserve_data;
1123#define MF_FUNC_CFG_PRESERVE_L2_MAC (1<<0)
1124#define MF_FUNC_CFG_PRESERVE_ISCSI_MAC (1<<1)
1125#define MF_FUNC_CFG_PRESERVE_FCOE_MAC (1<<2)
1126#define MF_FUNC_CFG_PRESERVE_FCOE_WWN_P (1<<3)
1127#define MF_FUNC_CFG_PRESERVE_FCOE_WWN_N (1<<4)
1128};
1129
862struct mf_cfg { 1130struct mf_cfg {
863 1131
864 struct shared_mf_cfg shared_mf_config; 1132 struct shared_mf_cfg shared_mf_config;
865 struct port_mf_cfg port_mf_config[PORT_MAX]; 1133 struct port_mf_cfg port_mf_config[PORT_MAX];
866 struct func_mf_cfg func_mf_config[E1H_FUNC_MAX]; 1134 struct func_mf_cfg func_mf_config[E1H_FUNC_MAX];
867 1135
1136 struct func_ext_cfg func_ext_config[E1H_FUNC_MAX];
868}; 1137};
869 1138
870 1139
@@ -903,12 +1172,268 @@ struct shmem_region { /* SharedMem Offset (size) */
903 struct mgmtfw_state mgmtfw_state; /* 0x4ac (0x1b8) */ 1172 struct mgmtfw_state mgmtfw_state; /* 0x4ac (0x1b8) */
904 1173
905 struct drv_port_mb port_mb[PORT_MAX]; /* 0x664 (16*2=0x20) */ 1174 struct drv_port_mb port_mb[PORT_MAX]; /* 0x664 (16*2=0x20) */
906 struct drv_func_mb func_mb[E1H_FUNC_MAX]; 1175 struct drv_func_mb func_mb[]; /* 0x684
1176 (44*2/4/8=0x58/0xb0/0x160) */
1177
1178}; /* 57710 = 0x6dc | 57711 = 0x7E4 | 57712 = 0x734 */
907 1179
908 struct mf_cfg mf_cfg; 1180struct fw_flr_ack {
1181 u32 pf_ack;
1182 u32 vf_ack[1];
1183 u32 iov_dis_ack;
1184};
1185
1186struct fw_flr_mb {
1187 u32 aggint;
1188 u32 opgen_addr;
1189 struct fw_flr_ack ack;
1190};
909 1191
910}; /* 0x6dc */ 1192/**** SUPPORT FOR SHMEM ARRRAYS ***
1193 * The SHMEM HSI is aligned on 32 bit boundaries which makes it difficult to
1194 * define arrays with storage types smaller then unsigned dwords.
1195 * The macros below add generic support for SHMEM arrays with numeric elements
1196 * that can span 2,4,8 or 16 bits. The array underlying type is a 32 bit dword
1197 * array with individual bit-filed elements accessed using shifts and masks.
1198 *
1199 */
911 1200
1201/* eb is the bitwidth of a single element */
1202#define SHMEM_ARRAY_MASK(eb) ((1<<(eb))-1)
1203#define SHMEM_ARRAY_ENTRY(i, eb) ((i)/(32/(eb)))
1204
1205/* the bit-position macro allows the used to flip the order of the arrays
1206 * elements on a per byte or word boundary.
1207 *
1208 * example: an array with 8 entries each 4 bit wide. This array will fit into
1209 * a single dword. The diagrmas below show the array order of the nibbles.
1210 *
1211 * SHMEM_ARRAY_BITPOS(i, 4, 4) defines the stadard ordering:
1212 *
1213 * | | | |
1214 * 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 |
1215 * | | | |
1216 *
1217 * SHMEM_ARRAY_BITPOS(i, 4, 8) defines a flip ordering per byte:
1218 *
1219 * | | | |
1220 * 1 | 0 | 3 | 2 | 5 | 4 | 7 | 6 |
1221 * | | | |
1222 *
1223 * SHMEM_ARRAY_BITPOS(i, 4, 16) defines a flip ordering per word:
1224 *
1225 * | | | |
1226 * 3 | 2 | 1 | 0 | 7 | 6 | 5 | 4 |
1227 * | | | |
1228 */
1229#define SHMEM_ARRAY_BITPOS(i, eb, fb) \
1230 ((((32/(fb)) - 1 - ((i)/((fb)/(eb))) % (32/(fb))) * (fb)) + \
1231 (((i)%((fb)/(eb))) * (eb)))
1232
1233#define SHMEM_ARRAY_GET(a, i, eb, fb) \
1234 ((a[SHMEM_ARRAY_ENTRY(i, eb)] >> SHMEM_ARRAY_BITPOS(i, eb, fb)) & \
1235 SHMEM_ARRAY_MASK(eb))
1236
1237#define SHMEM_ARRAY_SET(a, i, eb, fb, val) \
1238do { \
1239 a[SHMEM_ARRAY_ENTRY(i, eb)] &= ~(SHMEM_ARRAY_MASK(eb) << \
1240 SHMEM_ARRAY_BITPOS(i, eb, fb)); \
1241 a[SHMEM_ARRAY_ENTRY(i, eb)] |= (((val) & SHMEM_ARRAY_MASK(eb)) << \
1242 SHMEM_ARRAY_BITPOS(i, eb, fb)); \
1243} while (0)
1244
1245
1246/****START OF DCBX STRUCTURES DECLARATIONS****/
1247#define DCBX_MAX_NUM_PRI_PG_ENTRIES 8
1248#define DCBX_PRI_PG_BITWIDTH 4
1249#define DCBX_PRI_PG_FBITS 8
1250#define DCBX_PRI_PG_GET(a, i) \
1251 SHMEM_ARRAY_GET(a, i, DCBX_PRI_PG_BITWIDTH, DCBX_PRI_PG_FBITS)
1252#define DCBX_PRI_PG_SET(a, i, val) \
1253 SHMEM_ARRAY_SET(a, i, DCBX_PRI_PG_BITWIDTH, DCBX_PRI_PG_FBITS, val)
1254#define DCBX_MAX_NUM_PG_BW_ENTRIES 8
1255#define DCBX_BW_PG_BITWIDTH 8
1256#define DCBX_PG_BW_GET(a, i) \
1257 SHMEM_ARRAY_GET(a, i, DCBX_BW_PG_BITWIDTH, DCBX_BW_PG_BITWIDTH)
1258#define DCBX_PG_BW_SET(a, i, val) \
1259 SHMEM_ARRAY_SET(a, i, DCBX_BW_PG_BITWIDTH, DCBX_BW_PG_BITWIDTH, val)
1260#define DCBX_STRICT_PRI_PG 15
1261#define DCBX_MAX_APP_PROTOCOL 16
1262#define FCOE_APP_IDX 0
1263#define ISCSI_APP_IDX 1
1264#define PREDEFINED_APP_IDX_MAX 2
1265
1266struct dcbx_ets_feature {
1267 u32 enabled;
1268 u32 pg_bw_tbl[2];
1269 u32 pri_pg_tbl[1];
1270};
1271
1272struct dcbx_pfc_feature {
1273#ifdef __BIG_ENDIAN
1274 u8 pri_en_bitmap;
1275#define DCBX_PFC_PRI_0 0x01
1276#define DCBX_PFC_PRI_1 0x02
1277#define DCBX_PFC_PRI_2 0x04
1278#define DCBX_PFC_PRI_3 0x08
1279#define DCBX_PFC_PRI_4 0x10
1280#define DCBX_PFC_PRI_5 0x20
1281#define DCBX_PFC_PRI_6 0x40
1282#define DCBX_PFC_PRI_7 0x80
1283 u8 pfc_caps;
1284 u8 reserved;
1285 u8 enabled;
1286#elif defined(__LITTLE_ENDIAN)
1287 u8 enabled;
1288 u8 reserved;
1289 u8 pfc_caps;
1290 u8 pri_en_bitmap;
1291#define DCBX_PFC_PRI_0 0x01
1292#define DCBX_PFC_PRI_1 0x02
1293#define DCBX_PFC_PRI_2 0x04
1294#define DCBX_PFC_PRI_3 0x08
1295#define DCBX_PFC_PRI_4 0x10
1296#define DCBX_PFC_PRI_5 0x20
1297#define DCBX_PFC_PRI_6 0x40
1298#define DCBX_PFC_PRI_7 0x80
1299#endif
1300};
1301
1302struct dcbx_app_priority_entry {
1303#ifdef __BIG_ENDIAN
1304 u16 app_id;
1305 u8 pri_bitmap;
1306 u8 appBitfield;
1307#define DCBX_APP_ENTRY_VALID 0x01
1308#define DCBX_APP_ENTRY_SF_MASK 0x30
1309#define DCBX_APP_ENTRY_SF_SHIFT 4
1310#define DCBX_APP_SF_ETH_TYPE 0x10
1311#define DCBX_APP_SF_PORT 0x20
1312#elif defined(__LITTLE_ENDIAN)
1313 u8 appBitfield;
1314#define DCBX_APP_ENTRY_VALID 0x01
1315#define DCBX_APP_ENTRY_SF_MASK 0x30
1316#define DCBX_APP_ENTRY_SF_SHIFT 4
1317#define DCBX_APP_SF_ETH_TYPE 0x10
1318#define DCBX_APP_SF_PORT 0x20
1319 u8 pri_bitmap;
1320 u16 app_id;
1321#endif
1322};
1323
1324struct dcbx_app_priority_feature {
1325#ifdef __BIG_ENDIAN
1326 u8 reserved;
1327 u8 default_pri;
1328 u8 tc_supported;
1329 u8 enabled;
1330#elif defined(__LITTLE_ENDIAN)
1331 u8 enabled;
1332 u8 tc_supported;
1333 u8 default_pri;
1334 u8 reserved;
1335#endif
1336 struct dcbx_app_priority_entry app_pri_tbl[DCBX_MAX_APP_PROTOCOL];
1337};
1338
1339struct dcbx_features {
1340 struct dcbx_ets_feature ets;
1341 struct dcbx_pfc_feature pfc;
1342 struct dcbx_app_priority_feature app;
1343};
1344
1345struct lldp_params {
1346#ifdef __BIG_ENDIAN
1347 u8 msg_fast_tx_interval;
1348 u8 msg_tx_hold;
1349 u8 msg_tx_interval;
1350 u8 admin_status;
1351#define LLDP_TX_ONLY 0x01
1352#define LLDP_RX_ONLY 0x02
1353#define LLDP_TX_RX 0x03
1354#define LLDP_DISABLED 0x04
1355 u8 reserved1;
1356 u8 tx_fast;
1357 u8 tx_crd_max;
1358 u8 tx_crd;
1359#elif defined(__LITTLE_ENDIAN)
1360 u8 admin_status;
1361#define LLDP_TX_ONLY 0x01
1362#define LLDP_RX_ONLY 0x02
1363#define LLDP_TX_RX 0x03
1364#define LLDP_DISABLED 0x04
1365 u8 msg_tx_interval;
1366 u8 msg_tx_hold;
1367 u8 msg_fast_tx_interval;
1368 u8 tx_crd;
1369 u8 tx_crd_max;
1370 u8 tx_fast;
1371 u8 reserved1;
1372#endif
1373#define REM_CHASSIS_ID_STAT_LEN 4
1374#define REM_PORT_ID_STAT_LEN 4
1375 u32 peer_chassis_id[REM_CHASSIS_ID_STAT_LEN];
1376 u32 peer_port_id[REM_PORT_ID_STAT_LEN];
1377};
1378
1379struct lldp_dcbx_stat {
1380#define LOCAL_CHASSIS_ID_STAT_LEN 2
1381#define LOCAL_PORT_ID_STAT_LEN 2
1382 u32 local_chassis_id[LOCAL_CHASSIS_ID_STAT_LEN];
1383 u32 local_port_id[LOCAL_PORT_ID_STAT_LEN];
1384 u32 num_tx_dcbx_pkts;
1385 u32 num_rx_dcbx_pkts;
1386};
1387
1388struct lldp_admin_mib {
1389 u32 ver_cfg_flags;
1390#define DCBX_ETS_CONFIG_TX_ENABLED 0x00000001
1391#define DCBX_PFC_CONFIG_TX_ENABLED 0x00000002
1392#define DCBX_APP_CONFIG_TX_ENABLED 0x00000004
1393#define DCBX_ETS_RECO_TX_ENABLED 0x00000008
1394#define DCBX_ETS_RECO_VALID 0x00000010
1395#define DCBX_ETS_WILLING 0x00000020
1396#define DCBX_PFC_WILLING 0x00000040
1397#define DCBX_APP_WILLING 0x00000080
1398#define DCBX_VERSION_CEE 0x00000100
1399#define DCBX_VERSION_IEEE 0x00000200
1400#define DCBX_DCBX_ENABLED 0x00000400
1401#define DCBX_CEE_VERSION_MASK 0x0000f000
1402#define DCBX_CEE_VERSION_SHIFT 12
1403#define DCBX_CEE_MAX_VERSION_MASK 0x000f0000
1404#define DCBX_CEE_MAX_VERSION_SHIFT 16
1405 struct dcbx_features features;
1406};
1407
1408struct lldp_remote_mib {
1409 u32 prefix_seq_num;
1410 u32 flags;
1411#define DCBX_ETS_TLV_RX 0x00000001
1412#define DCBX_PFC_TLV_RX 0x00000002
1413#define DCBX_APP_TLV_RX 0x00000004
1414#define DCBX_ETS_RX_ERROR 0x00000010
1415#define DCBX_PFC_RX_ERROR 0x00000020
1416#define DCBX_APP_RX_ERROR 0x00000040
1417#define DCBX_ETS_REM_WILLING 0x00000100
1418#define DCBX_PFC_REM_WILLING 0x00000200
1419#define DCBX_APP_REM_WILLING 0x00000400
1420#define DCBX_REMOTE_ETS_RECO_VALID 0x00001000
1421 struct dcbx_features features;
1422 u32 suffix_seq_num;
1423};
1424
1425struct lldp_local_mib {
1426 u32 prefix_seq_num;
1427 u32 error;
1428#define DCBX_LOCAL_ETS_ERROR 0x00000001
1429#define DCBX_LOCAL_PFC_ERROR 0x00000002
1430#define DCBX_LOCAL_APP_ERROR 0x00000004
1431#define DCBX_LOCAL_PFC_MISMATCH 0x00000010
1432#define DCBX_LOCAL_APP_MISMATCH 0x00000020
1433 struct dcbx_features features;
1434 u32 suffix_seq_num;
1435};
1436/***END OF DCBX STRUCTURES DECLARATIONS***/
912 1437
913struct shmem2_region { 1438struct shmem2_region {
914 1439
@@ -922,7 +1447,34 @@ struct shmem2_region {
922#define SHMEM_DCC_SUPPORT_SET_PROTOCOL_TLV 0x00000040 1447#define SHMEM_DCC_SUPPORT_SET_PROTOCOL_TLV 0x00000040
923#define SHMEM_DCC_SUPPORT_SET_PRIORITY_TLV 0x00000080 1448#define SHMEM_DCC_SUPPORT_SET_PRIORITY_TLV 0x00000080
924#define SHMEM_DCC_SUPPORT_DEFAULT SHMEM_DCC_SUPPORT_NONE 1449#define SHMEM_DCC_SUPPORT_DEFAULT SHMEM_DCC_SUPPORT_NONE
925 1450 u32 ext_phy_fw_version2[PORT_MAX];
1451 /*
1452 * For backwards compatibility, if the mf_cfg_addr does not exist
1453 * (the size filed is smaller than 0xc) the mf_cfg resides at the
1454 * end of struct shmem_region
1455 */
1456 u32 mf_cfg_addr;
1457#define SHMEM_MF_CFG_ADDR_NONE 0x00000000
1458
1459 struct fw_flr_mb flr_mb;
1460 u32 dcbx_lldp_params_offset;
1461#define SHMEM_LLDP_DCBX_PARAMS_NONE 0x00000000
1462 u32 dcbx_neg_res_offset;
1463#define SHMEM_DCBX_NEG_RES_NONE 0x00000000
1464 u32 dcbx_remote_mib_offset;
1465#define SHMEM_DCBX_REMOTE_MIB_NONE 0x00000000
1466 /*
1467 * The other shmemX_base_addr holds the other path's shmem address
1468 * required for example in case of common phy init, or for path1 to know
1469 * the address of mcp debug trace which is located in offset from shmem
1470 * of path0
1471 */
1472 u32 other_shmem_base_addr;
1473 u32 other_shmem2_base_addr;
1474 u32 reserved1[E2_VF_MAX / 32];
1475 u32 reserved2[E2_FUNC_MAX][E2_VF_MAX / 32];
1476 u32 dcbx_lldp_dcbx_stat_offset;
1477#define SHMEM_LLDP_DCBX_STAT_NONE 0x00000000
926}; 1478};
927 1479
928 1480
@@ -978,7 +1530,7 @@ struct emac_stats {
978}; 1530};
979 1531
980 1532
981struct bmac_stats { 1533struct bmac1_stats {
982 u32 tx_stat_gtpkt_lo; 1534 u32 tx_stat_gtpkt_lo;
983 u32 tx_stat_gtpkt_hi; 1535 u32 tx_stat_gtpkt_hi;
984 u32 tx_stat_gtxpf_lo; 1536 u32 tx_stat_gtxpf_lo;
@@ -1082,10 +1634,126 @@ struct bmac_stats {
1082 u32 rx_stat_gripj_hi; 1634 u32 rx_stat_gripj_hi;
1083}; 1635};
1084 1636
1637struct bmac2_stats {
1638 u32 tx_stat_gtpk_lo; /* gtpok */
1639 u32 tx_stat_gtpk_hi; /* gtpok */
1640 u32 tx_stat_gtxpf_lo; /* gtpf */
1641 u32 tx_stat_gtxpf_hi; /* gtpf */
1642 u32 tx_stat_gtpp_lo; /* NEW BMAC2 */
1643 u32 tx_stat_gtpp_hi; /* NEW BMAC2 */
1644 u32 tx_stat_gtfcs_lo;
1645 u32 tx_stat_gtfcs_hi;
1646 u32 tx_stat_gtuca_lo; /* NEW BMAC2 */
1647 u32 tx_stat_gtuca_hi; /* NEW BMAC2 */
1648 u32 tx_stat_gtmca_lo;
1649 u32 tx_stat_gtmca_hi;
1650 u32 tx_stat_gtbca_lo;
1651 u32 tx_stat_gtbca_hi;
1652 u32 tx_stat_gtovr_lo;
1653 u32 tx_stat_gtovr_hi;
1654 u32 tx_stat_gtfrg_lo;
1655 u32 tx_stat_gtfrg_hi;
1656 u32 tx_stat_gtpkt1_lo; /* gtpkt */
1657 u32 tx_stat_gtpkt1_hi; /* gtpkt */
1658 u32 tx_stat_gt64_lo;
1659 u32 tx_stat_gt64_hi;
1660 u32 tx_stat_gt127_lo;
1661 u32 tx_stat_gt127_hi;
1662 u32 tx_stat_gt255_lo;
1663 u32 tx_stat_gt255_hi;
1664 u32 tx_stat_gt511_lo;
1665 u32 tx_stat_gt511_hi;
1666 u32 tx_stat_gt1023_lo;
1667 u32 tx_stat_gt1023_hi;
1668 u32 tx_stat_gt1518_lo;
1669 u32 tx_stat_gt1518_hi;
1670 u32 tx_stat_gt2047_lo;
1671 u32 tx_stat_gt2047_hi;
1672 u32 tx_stat_gt4095_lo;
1673 u32 tx_stat_gt4095_hi;
1674 u32 tx_stat_gt9216_lo;
1675 u32 tx_stat_gt9216_hi;
1676 u32 tx_stat_gt16383_lo;
1677 u32 tx_stat_gt16383_hi;
1678 u32 tx_stat_gtmax_lo;
1679 u32 tx_stat_gtmax_hi;
1680 u32 tx_stat_gtufl_lo;
1681 u32 tx_stat_gtufl_hi;
1682 u32 tx_stat_gterr_lo;
1683 u32 tx_stat_gterr_hi;
1684 u32 tx_stat_gtbyt_lo;
1685 u32 tx_stat_gtbyt_hi;
1686
1687 u32 rx_stat_gr64_lo;
1688 u32 rx_stat_gr64_hi;
1689 u32 rx_stat_gr127_lo;
1690 u32 rx_stat_gr127_hi;
1691 u32 rx_stat_gr255_lo;
1692 u32 rx_stat_gr255_hi;
1693 u32 rx_stat_gr511_lo;
1694 u32 rx_stat_gr511_hi;
1695 u32 rx_stat_gr1023_lo;
1696 u32 rx_stat_gr1023_hi;
1697 u32 rx_stat_gr1518_lo;
1698 u32 rx_stat_gr1518_hi;
1699 u32 rx_stat_gr2047_lo;
1700 u32 rx_stat_gr2047_hi;
1701 u32 rx_stat_gr4095_lo;
1702 u32 rx_stat_gr4095_hi;
1703 u32 rx_stat_gr9216_lo;
1704 u32 rx_stat_gr9216_hi;
1705 u32 rx_stat_gr16383_lo;
1706 u32 rx_stat_gr16383_hi;
1707 u32 rx_stat_grmax_lo;
1708 u32 rx_stat_grmax_hi;
1709 u32 rx_stat_grpkt_lo;
1710 u32 rx_stat_grpkt_hi;
1711 u32 rx_stat_grfcs_lo;
1712 u32 rx_stat_grfcs_hi;
1713 u32 rx_stat_gruca_lo;
1714 u32 rx_stat_gruca_hi;
1715 u32 rx_stat_grmca_lo;
1716 u32 rx_stat_grmca_hi;
1717 u32 rx_stat_grbca_lo;
1718 u32 rx_stat_grbca_hi;
1719 u32 rx_stat_grxpf_lo; /* grpf */
1720 u32 rx_stat_grxpf_hi; /* grpf */
1721 u32 rx_stat_grpp_lo;
1722 u32 rx_stat_grpp_hi;
1723 u32 rx_stat_grxuo_lo; /* gruo */
1724 u32 rx_stat_grxuo_hi; /* gruo */
1725 u32 rx_stat_grjbr_lo;
1726 u32 rx_stat_grjbr_hi;
1727 u32 rx_stat_grovr_lo;
1728 u32 rx_stat_grovr_hi;
1729 u32 rx_stat_grxcf_lo; /* grcf */
1730 u32 rx_stat_grxcf_hi; /* grcf */
1731 u32 rx_stat_grflr_lo;
1732 u32 rx_stat_grflr_hi;
1733 u32 rx_stat_grpok_lo;
1734 u32 rx_stat_grpok_hi;
1735 u32 rx_stat_grmeg_lo;
1736 u32 rx_stat_grmeg_hi;
1737 u32 rx_stat_grmeb_lo;
1738 u32 rx_stat_grmeb_hi;
1739 u32 rx_stat_grbyt_lo;
1740 u32 rx_stat_grbyt_hi;
1741 u32 rx_stat_grund_lo;
1742 u32 rx_stat_grund_hi;
1743 u32 rx_stat_grfrg_lo;
1744 u32 rx_stat_grfrg_hi;
1745 u32 rx_stat_grerb_lo; /* grerrbyt */
1746 u32 rx_stat_grerb_hi; /* grerrbyt */
1747 u32 rx_stat_grfre_lo; /* grfrerr */
1748 u32 rx_stat_grfre_hi; /* grfrerr */
1749 u32 rx_stat_gripj_lo;
1750 u32 rx_stat_gripj_hi;
1751};
1085 1752
1086union mac_stats { 1753union mac_stats {
1087 struct emac_stats emac_stats; 1754 struct emac_stats emac_stats;
1088 struct bmac_stats bmac_stats; 1755 struct bmac1_stats bmac1_stats;
1756 struct bmac2_stats bmac2_stats;
1089}; 1757};
1090 1758
1091 1759
@@ -1259,17 +1927,17 @@ struct host_func_stats {
1259}; 1927};
1260 1928
1261 1929
1262#define BCM_5710_FW_MAJOR_VERSION 5 1930#define BCM_5710_FW_MAJOR_VERSION 6
1263#define BCM_5710_FW_MINOR_VERSION 2 1931#define BCM_5710_FW_MINOR_VERSION 2
1264#define BCM_5710_FW_REVISION_VERSION 13 1932#define BCM_5710_FW_REVISION_VERSION 9
1265#define BCM_5710_FW_ENGINEERING_VERSION 0 1933#define BCM_5710_FW_ENGINEERING_VERSION 0
1266#define BCM_5710_FW_COMPILE_FLAGS 1 1934#define BCM_5710_FW_COMPILE_FLAGS 1
1267 1935
1268 1936
1269/* 1937/*
1270 * attention bits 1938 * attention bits
1271 */ 1939 */
1272struct atten_def_status_block { 1940struct atten_sp_status_block {
1273 __le32 attn_bits; 1941 __le32 attn_bits;
1274 __le32 attn_bits_ack; 1942 __le32 attn_bits_ack;
1275 u8 status_block_id; 1943 u8 status_block_id;
@@ -1327,7 +1995,60 @@ struct doorbell_set_prod {
1327 1995
1328 1996
1329/* 1997/*
1330 * IGU driver acknowledgement register 1998 * 3 lines. status block
1999 */
2000struct hc_status_block_e1x {
2001 __le16 index_values[HC_SB_MAX_INDICES_E1X];
2002 __le16 running_index[HC_SB_MAX_SM];
2003 u32 rsrv;
2004};
2005
2006/*
2007 * host status block
2008 */
2009struct host_hc_status_block_e1x {
2010 struct hc_status_block_e1x sb;
2011};
2012
2013
2014/*
2015 * 3 lines. status block
2016 */
2017struct hc_status_block_e2 {
2018 __le16 index_values[HC_SB_MAX_INDICES_E2];
2019 __le16 running_index[HC_SB_MAX_SM];
2020 u32 reserved;
2021};
2022
2023/*
2024 * host status block
2025 */
2026struct host_hc_status_block_e2 {
2027 struct hc_status_block_e2 sb;
2028};
2029
2030
2031/*
2032 * 5 lines. slow-path status block
2033 */
2034struct hc_sp_status_block {
2035 __le16 index_values[HC_SP_SB_MAX_INDICES];
2036 __le16 running_index;
2037 __le16 rsrv;
2038 u32 rsrv1;
2039};
2040
2041/*
2042 * host status block
2043 */
2044struct host_sp_status_block {
2045 struct atten_sp_status_block atten_status_block;
2046 struct hc_sp_status_block sp_sb;
2047};
2048
2049
2050/*
2051 * IGU driver acknowledgment register
1331 */ 2052 */
1332struct igu_ack_register { 2053struct igu_ack_register {
1333#if defined(__BIG_ENDIAN) 2054#if defined(__BIG_ENDIAN)
@@ -1417,6 +2138,24 @@ union igu_consprod_reg {
1417 2138
1418 2139
1419/* 2140/*
2141 * Control register for the IGU command register
2142 */
2143struct igu_ctrl_reg {
2144 u32 ctrl_data;
2145#define IGU_CTRL_REG_ADDRESS (0xFFF<<0)
2146#define IGU_CTRL_REG_ADDRESS_SHIFT 0
2147#define IGU_CTRL_REG_FID (0x7F<<12)
2148#define IGU_CTRL_REG_FID_SHIFT 12
2149#define IGU_CTRL_REG_RESERVED (0x1<<19)
2150#define IGU_CTRL_REG_RESERVED_SHIFT 19
2151#define IGU_CTRL_REG_TYPE (0x1<<20)
2152#define IGU_CTRL_REG_TYPE_SHIFT 20
2153#define IGU_CTRL_REG_UNUSED (0x7FF<<21)
2154#define IGU_CTRL_REG_UNUSED_SHIFT 21
2155};
2156
2157
2158/*
1420 * Parser parsing flags field 2159 * Parser parsing flags field
1421 */ 2160 */
1422struct parsing_flags { 2161struct parsing_flags {
@@ -1485,8 +2224,14 @@ struct dmae_command {
1485#define DMAE_COMMAND_DST_RESET_SHIFT 14 2224#define DMAE_COMMAND_DST_RESET_SHIFT 14
1486#define DMAE_COMMAND_E1HVN (0x3<<15) 2225#define DMAE_COMMAND_E1HVN (0x3<<15)
1487#define DMAE_COMMAND_E1HVN_SHIFT 15 2226#define DMAE_COMMAND_E1HVN_SHIFT 15
1488#define DMAE_COMMAND_RESERVED0 (0x7FFF<<17) 2227#define DMAE_COMMAND_DST_VN (0x3<<17)
1489#define DMAE_COMMAND_RESERVED0_SHIFT 17 2228#define DMAE_COMMAND_DST_VN_SHIFT 17
2229#define DMAE_COMMAND_C_FUNC (0x1<<19)
2230#define DMAE_COMMAND_C_FUNC_SHIFT 19
2231#define DMAE_COMMAND_ERR_POLICY (0x3<<20)
2232#define DMAE_COMMAND_ERR_POLICY_SHIFT 20
2233#define DMAE_COMMAND_RESERVED0 (0x3FF<<22)
2234#define DMAE_COMMAND_RESERVED0_SHIFT 22
1490 u32 src_addr_lo; 2235 u32 src_addr_lo;
1491 u32 src_addr_hi; 2236 u32 src_addr_hi;
1492 u32 dst_addr_lo; 2237 u32 dst_addr_lo;
@@ -1511,11 +2256,11 @@ struct dmae_command {
1511 u16 crc16_c; 2256 u16 crc16_c;
1512#endif 2257#endif
1513#if defined(__BIG_ENDIAN) 2258#if defined(__BIG_ENDIAN)
1514 u16 reserved2; 2259 u16 reserved3;
1515 u16 crc_t10; 2260 u16 crc_t10;
1516#elif defined(__LITTLE_ENDIAN) 2261#elif defined(__LITTLE_ENDIAN)
1517 u16 crc_t10; 2262 u16 crc_t10;
1518 u16 reserved2; 2263 u16 reserved3;
1519#endif 2264#endif
1520#if defined(__BIG_ENDIAN) 2265#if defined(__BIG_ENDIAN)
1521 u16 xsum8; 2266 u16 xsum8;
@@ -1536,96 +2281,20 @@ struct double_regpair {
1536 2281
1537 2282
1538/* 2283/*
1539 * The eth storm context of Ustorm (configuration part) 2284 * SDM operation gen command (generate aggregative interrupt)
1540 */ 2285 */
1541struct ustorm_eth_st_context_config { 2286struct sdm_op_gen {
1542#if defined(__BIG_ENDIAN) 2287 __le32 command;
1543 u8 flags; 2288#define SDM_OP_GEN_COMP_PARAM (0x1F<<0)
1544#define USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT (0x1<<0) 2289#define SDM_OP_GEN_COMP_PARAM_SHIFT 0
1545#define USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT_SHIFT 0 2290#define SDM_OP_GEN_COMP_TYPE (0x7<<5)
1546#define USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_DYNAMIC_HC (0x1<<1) 2291#define SDM_OP_GEN_COMP_TYPE_SHIFT 5
1547#define USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_DYNAMIC_HC_SHIFT 1 2292#define SDM_OP_GEN_AGG_VECT_IDX (0xFF<<8)
1548#define USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA (0x1<<2) 2293#define SDM_OP_GEN_AGG_VECT_IDX_SHIFT 8
1549#define USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA_SHIFT 2 2294#define SDM_OP_GEN_AGG_VECT_IDX_VALID (0x1<<16)
1550#define USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS (0x1<<3) 2295#define SDM_OP_GEN_AGG_VECT_IDX_VALID_SHIFT 16
1551#define USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS_SHIFT 3 2296#define SDM_OP_GEN_RESERVED (0x7FFF<<17)
1552#define __USTORM_ETH_ST_CONTEXT_CONFIG_RESERVED0 (0xF<<4) 2297#define SDM_OP_GEN_RESERVED_SHIFT 17
1553#define __USTORM_ETH_ST_CONTEXT_CONFIG_RESERVED0_SHIFT 4
1554 u8 status_block_id;
1555 u8 clientId;
1556 u8 sb_index_numbers;
1557#define USTORM_ETH_ST_CONTEXT_CONFIG_CQE_SB_INDEX_NUMBER (0xF<<0)
1558#define USTORM_ETH_ST_CONTEXT_CONFIG_CQE_SB_INDEX_NUMBER_SHIFT 0
1559#define USTORM_ETH_ST_CONTEXT_CONFIG_BD_SB_INDEX_NUMBER (0xF<<4)
1560#define USTORM_ETH_ST_CONTEXT_CONFIG_BD_SB_INDEX_NUMBER_SHIFT 4
1561#elif defined(__LITTLE_ENDIAN)
1562 u8 sb_index_numbers;
1563#define USTORM_ETH_ST_CONTEXT_CONFIG_CQE_SB_INDEX_NUMBER (0xF<<0)
1564#define USTORM_ETH_ST_CONTEXT_CONFIG_CQE_SB_INDEX_NUMBER_SHIFT 0
1565#define USTORM_ETH_ST_CONTEXT_CONFIG_BD_SB_INDEX_NUMBER (0xF<<4)
1566#define USTORM_ETH_ST_CONTEXT_CONFIG_BD_SB_INDEX_NUMBER_SHIFT 4
1567 u8 clientId;
1568 u8 status_block_id;
1569 u8 flags;
1570#define USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT (0x1<<0)
1571#define USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT_SHIFT 0
1572#define USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_DYNAMIC_HC (0x1<<1)
1573#define USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_DYNAMIC_HC_SHIFT 1
1574#define USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA (0x1<<2)
1575#define USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA_SHIFT 2
1576#define USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS (0x1<<3)
1577#define USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS_SHIFT 3
1578#define __USTORM_ETH_ST_CONTEXT_CONFIG_RESERVED0 (0xF<<4)
1579#define __USTORM_ETH_ST_CONTEXT_CONFIG_RESERVED0_SHIFT 4
1580#endif
1581#if defined(__BIG_ENDIAN)
1582 u16 bd_buff_size;
1583 u8 statistics_counter_id;
1584 u8 mc_alignment_log_size;
1585#elif defined(__LITTLE_ENDIAN)
1586 u8 mc_alignment_log_size;
1587 u8 statistics_counter_id;
1588 u16 bd_buff_size;
1589#endif
1590#if defined(__BIG_ENDIAN)
1591 u8 __local_sge_prod;
1592 u8 __local_bd_prod;
1593 u16 sge_buff_size;
1594#elif defined(__LITTLE_ENDIAN)
1595 u16 sge_buff_size;
1596 u8 __local_bd_prod;
1597 u8 __local_sge_prod;
1598#endif
1599#if defined(__BIG_ENDIAN)
1600 u16 __sdm_bd_expected_counter;
1601 u8 cstorm_agg_int;
1602 u8 __expected_bds_on_ram;
1603#elif defined(__LITTLE_ENDIAN)
1604 u8 __expected_bds_on_ram;
1605 u8 cstorm_agg_int;
1606 u16 __sdm_bd_expected_counter;
1607#endif
1608#if defined(__BIG_ENDIAN)
1609 u16 __ring_data_ram_addr;
1610 u16 __hc_cstorm_ram_addr;
1611#elif defined(__LITTLE_ENDIAN)
1612 u16 __hc_cstorm_ram_addr;
1613 u16 __ring_data_ram_addr;
1614#endif
1615#if defined(__BIG_ENDIAN)
1616 u8 reserved1;
1617 u8 max_sges_for_packet;
1618 u16 __bd_ring_ram_addr;
1619#elif defined(__LITTLE_ENDIAN)
1620 u16 __bd_ring_ram_addr;
1621 u8 max_sges_for_packet;
1622 u8 reserved1;
1623#endif
1624 u32 bd_page_base_lo;
1625 u32 bd_page_base_hi;
1626 u32 sge_page_base_lo;
1627 u32 sge_page_base_hi;
1628 struct regpair reserved2;
1629}; 2298};
1630 2299
1631/* 2300/*
@@ -1644,20 +2313,13 @@ struct eth_rx_sge {
1644 __le32 addr_hi; 2313 __le32 addr_hi;
1645}; 2314};
1646 2315
1647/* 2316
1648 * Local BDs and SGEs rings (in ETH)
1649 */
1650struct eth_local_rx_rings {
1651 struct eth_rx_bd __local_bd_ring[8];
1652 struct eth_rx_sge __local_sge_ring[10];
1653};
1654 2317
1655/* 2318/*
1656 * The eth storm context of Ustorm 2319 * The eth storm context of Ustorm
1657 */ 2320 */
1658struct ustorm_eth_st_context { 2321struct ustorm_eth_st_context {
1659 struct ustorm_eth_st_context_config common; 2322 u32 reserved0[48];
1660 struct eth_local_rx_rings __rings;
1661}; 2323};
1662 2324
1663/* 2325/*
@@ -1668,337 +2330,53 @@ struct tstorm_eth_st_context {
1668}; 2330};
1669 2331
1670/* 2332/*
1671 * The eth aggregative context section of Xstorm
1672 */
1673struct xstorm_eth_extra_ag_context_section {
1674#if defined(__BIG_ENDIAN)
1675 u8 __tcp_agg_vars1;
1676 u8 __reserved50;
1677 u16 __mss;
1678#elif defined(__LITTLE_ENDIAN)
1679 u16 __mss;
1680 u8 __reserved50;
1681 u8 __tcp_agg_vars1;
1682#endif
1683 u32 __snd_nxt;
1684 u32 __tx_wnd;
1685 u32 __snd_una;
1686 u32 __reserved53;
1687#if defined(__BIG_ENDIAN)
1688 u8 __agg_val8_th;
1689 u8 __agg_val8;
1690 u16 __tcp_agg_vars2;
1691#elif defined(__LITTLE_ENDIAN)
1692 u16 __tcp_agg_vars2;
1693 u8 __agg_val8;
1694 u8 __agg_val8_th;
1695#endif
1696 u32 __reserved58;
1697 u32 __reserved59;
1698 u32 __reserved60;
1699 u32 __reserved61;
1700#if defined(__BIG_ENDIAN)
1701 u16 __agg_val7_th;
1702 u16 __agg_val7;
1703#elif defined(__LITTLE_ENDIAN)
1704 u16 __agg_val7;
1705 u16 __agg_val7_th;
1706#endif
1707#if defined(__BIG_ENDIAN)
1708 u8 __tcp_agg_vars5;
1709 u8 __tcp_agg_vars4;
1710 u8 __tcp_agg_vars3;
1711 u8 __reserved62;
1712#elif defined(__LITTLE_ENDIAN)
1713 u8 __reserved62;
1714 u8 __tcp_agg_vars3;
1715 u8 __tcp_agg_vars4;
1716 u8 __tcp_agg_vars5;
1717#endif
1718 u32 __tcp_agg_vars6;
1719#if defined(__BIG_ENDIAN)
1720 u16 __agg_misc6;
1721 u16 __tcp_agg_vars7;
1722#elif defined(__LITTLE_ENDIAN)
1723 u16 __tcp_agg_vars7;
1724 u16 __agg_misc6;
1725#endif
1726 u32 __agg_val10;
1727 u32 __agg_val10_th;
1728#if defined(__BIG_ENDIAN)
1729 u16 __reserved3;
1730 u8 __reserved2;
1731 u8 __da_only_cnt;
1732#elif defined(__LITTLE_ENDIAN)
1733 u8 __da_only_cnt;
1734 u8 __reserved2;
1735 u16 __reserved3;
1736#endif
1737};
1738
1739/*
1740 * The eth aggregative context of Xstorm 2333 * The eth aggregative context of Xstorm
1741 */ 2334 */
1742struct xstorm_eth_ag_context { 2335struct xstorm_eth_ag_context {
1743#if defined(__BIG_ENDIAN) 2336 u32 reserved0;
1744 u16 agg_val1;
1745 u8 __agg_vars1;
1746 u8 __state;
1747#elif defined(__LITTLE_ENDIAN)
1748 u8 __state;
1749 u8 __agg_vars1;
1750 u16 agg_val1;
1751#endif
1752#if defined(__BIG_ENDIAN) 2337#if defined(__BIG_ENDIAN)
1753 u8 cdu_reserved; 2338 u8 cdu_reserved;
1754 u8 __agg_vars4; 2339 u8 reserved2;
1755 u8 __agg_vars3; 2340 u16 reserved1;
1756 u8 __agg_vars2;
1757#elif defined(__LITTLE_ENDIAN) 2341#elif defined(__LITTLE_ENDIAN)
1758 u8 __agg_vars2; 2342 u16 reserved1;
1759 u8 __agg_vars3; 2343 u8 reserved2;
1760 u8 __agg_vars4;
1761 u8 cdu_reserved; 2344 u8 cdu_reserved;
1762#endif 2345#endif
1763 u32 __bd_prod; 2346 u32 reserved3[30];
1764#if defined(__BIG_ENDIAN)
1765 u16 __agg_vars5;
1766 u16 __agg_val4_th;
1767#elif defined(__LITTLE_ENDIAN)
1768 u16 __agg_val4_th;
1769 u16 __agg_vars5;
1770#endif
1771 struct xstorm_eth_extra_ag_context_section __extra_section;
1772#if defined(__BIG_ENDIAN)
1773 u16 __agg_vars7;
1774 u8 __agg_val3_th;
1775 u8 __agg_vars6;
1776#elif defined(__LITTLE_ENDIAN)
1777 u8 __agg_vars6;
1778 u8 __agg_val3_th;
1779 u16 __agg_vars7;
1780#endif
1781#if defined(__BIG_ENDIAN)
1782 u16 __agg_val11_th;
1783 u16 __agg_val11;
1784#elif defined(__LITTLE_ENDIAN)
1785 u16 __agg_val11;
1786 u16 __agg_val11_th;
1787#endif
1788#if defined(__BIG_ENDIAN)
1789 u8 __reserved1;
1790 u8 __agg_val6_th;
1791 u16 __agg_val9;
1792#elif defined(__LITTLE_ENDIAN)
1793 u16 __agg_val9;
1794 u8 __agg_val6_th;
1795 u8 __reserved1;
1796#endif
1797#if defined(__BIG_ENDIAN)
1798 u16 __agg_val2_th;
1799 u16 __agg_val2;
1800#elif defined(__LITTLE_ENDIAN)
1801 u16 __agg_val2;
1802 u16 __agg_val2_th;
1803#endif
1804 u32 __agg_vars8;
1805#if defined(__BIG_ENDIAN)
1806 u16 __agg_misc0;
1807 u16 __agg_val4;
1808#elif defined(__LITTLE_ENDIAN)
1809 u16 __agg_val4;
1810 u16 __agg_misc0;
1811#endif
1812#if defined(__BIG_ENDIAN)
1813 u8 __agg_val3;
1814 u8 __agg_val6;
1815 u8 __agg_val5_th;
1816 u8 __agg_val5;
1817#elif defined(__LITTLE_ENDIAN)
1818 u8 __agg_val5;
1819 u8 __agg_val5_th;
1820 u8 __agg_val6;
1821 u8 __agg_val3;
1822#endif
1823#if defined(__BIG_ENDIAN)
1824 u16 __agg_misc1;
1825 u16 __bd_ind_max_val;
1826#elif defined(__LITTLE_ENDIAN)
1827 u16 __bd_ind_max_val;
1828 u16 __agg_misc1;
1829#endif
1830 u32 __reserved57;
1831 u32 __agg_misc4;
1832 u32 __agg_misc5;
1833};
1834
1835/*
1836 * The eth extra aggregative context section of Tstorm
1837 */
1838struct tstorm_eth_extra_ag_context_section {
1839 u32 __agg_val1;
1840#if defined(__BIG_ENDIAN)
1841 u8 __tcp_agg_vars2;
1842 u8 __agg_val3;
1843 u16 __agg_val2;
1844#elif defined(__LITTLE_ENDIAN)
1845 u16 __agg_val2;
1846 u8 __agg_val3;
1847 u8 __tcp_agg_vars2;
1848#endif
1849#if defined(__BIG_ENDIAN)
1850 u16 __agg_val5;
1851 u8 __agg_val6;
1852 u8 __tcp_agg_vars3;
1853#elif defined(__LITTLE_ENDIAN)
1854 u8 __tcp_agg_vars3;
1855 u8 __agg_val6;
1856 u16 __agg_val5;
1857#endif
1858 u32 __reserved63;
1859 u32 __reserved64;
1860 u32 __reserved65;
1861 u32 __reserved66;
1862 u32 __reserved67;
1863 u32 __tcp_agg_vars1;
1864 u32 __reserved61;
1865 u32 __reserved62;
1866 u32 __reserved2;
1867}; 2347};
1868 2348
1869/* 2349/*
1870 * The eth aggregative context of Tstorm 2350 * The eth aggregative context of Tstorm
1871 */ 2351 */
1872struct tstorm_eth_ag_context { 2352struct tstorm_eth_ag_context {
1873#if defined(__BIG_ENDIAN) 2353 u32 __reserved0[14];
1874 u16 __reserved54;
1875 u8 __agg_vars1;
1876 u8 __state;
1877#elif defined(__LITTLE_ENDIAN)
1878 u8 __state;
1879 u8 __agg_vars1;
1880 u16 __reserved54;
1881#endif
1882#if defined(__BIG_ENDIAN)
1883 u16 __agg_val4;
1884 u16 __agg_vars2;
1885#elif defined(__LITTLE_ENDIAN)
1886 u16 __agg_vars2;
1887 u16 __agg_val4;
1888#endif
1889 struct tstorm_eth_extra_ag_context_section __extra_section;
1890}; 2354};
1891 2355
2356
1892/* 2357/*
1893 * The eth aggregative context of Cstorm 2358 * The eth aggregative context of Cstorm
1894 */ 2359 */
1895struct cstorm_eth_ag_context { 2360struct cstorm_eth_ag_context {
1896 u32 __agg_vars1; 2361 u32 __reserved0[10];
1897#if defined(__BIG_ENDIAN)
1898 u8 __aux1_th;
1899 u8 __aux1_val;
1900 u16 __agg_vars2;
1901#elif defined(__LITTLE_ENDIAN)
1902 u16 __agg_vars2;
1903 u8 __aux1_val;
1904 u8 __aux1_th;
1905#endif
1906 u32 __num_of_treated_packet;
1907 u32 __last_packet_treated;
1908#if defined(__BIG_ENDIAN)
1909 u16 __reserved58;
1910 u16 __reserved57;
1911#elif defined(__LITTLE_ENDIAN)
1912 u16 __reserved57;
1913 u16 __reserved58;
1914#endif
1915#if defined(__BIG_ENDIAN)
1916 u8 __reserved62;
1917 u8 __reserved61;
1918 u8 __reserved60;
1919 u8 __reserved59;
1920#elif defined(__LITTLE_ENDIAN)
1921 u8 __reserved59;
1922 u8 __reserved60;
1923 u8 __reserved61;
1924 u8 __reserved62;
1925#endif
1926#if defined(__BIG_ENDIAN)
1927 u16 __reserved64;
1928 u16 __reserved63;
1929#elif defined(__LITTLE_ENDIAN)
1930 u16 __reserved63;
1931 u16 __reserved64;
1932#endif
1933 u32 __reserved65;
1934#if defined(__BIG_ENDIAN)
1935 u16 __agg_vars3;
1936 u16 __rq_inv_cnt;
1937#elif defined(__LITTLE_ENDIAN)
1938 u16 __rq_inv_cnt;
1939 u16 __agg_vars3;
1940#endif
1941#if defined(__BIG_ENDIAN)
1942 u16 __packet_index_th;
1943 u16 __packet_index;
1944#elif defined(__LITTLE_ENDIAN)
1945 u16 __packet_index;
1946 u16 __packet_index_th;
1947#endif
1948}; 2362};
1949 2363
2364
1950/* 2365/*
1951 * The eth aggregative context of Ustorm 2366 * The eth aggregative context of Ustorm
1952 */ 2367 */
1953struct ustorm_eth_ag_context { 2368struct ustorm_eth_ag_context {
1954#if defined(__BIG_ENDIAN) 2369 u32 __reserved0;
1955 u8 __aux_counter_flags;
1956 u8 __agg_vars2;
1957 u8 __agg_vars1;
1958 u8 __state;
1959#elif defined(__LITTLE_ENDIAN)
1960 u8 __state;
1961 u8 __agg_vars1;
1962 u8 __agg_vars2;
1963 u8 __aux_counter_flags;
1964#endif
1965#if defined(__BIG_ENDIAN) 2370#if defined(__BIG_ENDIAN)
1966 u8 cdu_usage; 2371 u8 cdu_usage;
1967 u8 __agg_misc2; 2372 u8 __reserved2;
1968 u16 __agg_misc1; 2373 u16 __reserved1;
1969#elif defined(__LITTLE_ENDIAN) 2374#elif defined(__LITTLE_ENDIAN)
1970 u16 __agg_misc1; 2375 u16 __reserved1;
1971 u8 __agg_misc2; 2376 u8 __reserved2;
1972 u8 cdu_usage; 2377 u8 cdu_usage;
1973#endif 2378#endif
1974 u32 __agg_misc4; 2379 u32 __reserved3[6];
1975#if defined(__BIG_ENDIAN)
1976 u8 __agg_val3_th;
1977 u8 __agg_val3;
1978 u16 __agg_misc3;
1979#elif defined(__LITTLE_ENDIAN)
1980 u16 __agg_misc3;
1981 u8 __agg_val3;
1982 u8 __agg_val3_th;
1983#endif
1984 u32 __agg_val1;
1985 u32 __agg_misc4_th;
1986#if defined(__BIG_ENDIAN)
1987 u16 __agg_val2_th;
1988 u16 __agg_val2;
1989#elif defined(__LITTLE_ENDIAN)
1990 u16 __agg_val2;
1991 u16 __agg_val2_th;
1992#endif
1993#if defined(__BIG_ENDIAN)
1994 u16 __reserved2;
1995 u8 __decision_rules;
1996 u8 __decision_rule_enable_bits;
1997#elif defined(__LITTLE_ENDIAN)
1998 u8 __decision_rule_enable_bits;
1999 u8 __decision_rules;
2000 u16 __reserved2;
2001#endif
2002}; 2380};
2003 2381
2004/* 2382/*
@@ -2022,18 +2400,16 @@ struct timers_block_context {
2022 */ 2400 */
2023struct eth_tx_bd_flags { 2401struct eth_tx_bd_flags {
2024 u8 as_bitfield; 2402 u8 as_bitfield;
2025#define ETH_TX_BD_FLAGS_VLAN_TAG (0x1<<0) 2403#define ETH_TX_BD_FLAGS_IP_CSUM (0x1<<0)
2026#define ETH_TX_BD_FLAGS_VLAN_TAG_SHIFT 0 2404#define ETH_TX_BD_FLAGS_IP_CSUM_SHIFT 0
2027#define ETH_TX_BD_FLAGS_IP_CSUM (0x1<<1) 2405#define ETH_TX_BD_FLAGS_L4_CSUM (0x1<<1)
2028#define ETH_TX_BD_FLAGS_IP_CSUM_SHIFT 1 2406#define ETH_TX_BD_FLAGS_L4_CSUM_SHIFT 1
2029#define ETH_TX_BD_FLAGS_L4_CSUM (0x1<<2) 2407#define ETH_TX_BD_FLAGS_VLAN_MODE (0x3<<2)
2030#define ETH_TX_BD_FLAGS_L4_CSUM_SHIFT 2 2408#define ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT 2
2031#define ETH_TX_BD_FLAGS_END_BD (0x1<<3)
2032#define ETH_TX_BD_FLAGS_END_BD_SHIFT 3
2033#define ETH_TX_BD_FLAGS_START_BD (0x1<<4) 2409#define ETH_TX_BD_FLAGS_START_BD (0x1<<4)
2034#define ETH_TX_BD_FLAGS_START_BD_SHIFT 4 2410#define ETH_TX_BD_FLAGS_START_BD_SHIFT 4
2035#define ETH_TX_BD_FLAGS_HDR_POOL (0x1<<5) 2411#define ETH_TX_BD_FLAGS_IS_UDP (0x1<<5)
2036#define ETH_TX_BD_FLAGS_HDR_POOL_SHIFT 5 2412#define ETH_TX_BD_FLAGS_IS_UDP_SHIFT 5
2037#define ETH_TX_BD_FLAGS_SW_LSO (0x1<<6) 2413#define ETH_TX_BD_FLAGS_SW_LSO (0x1<<6)
2038#define ETH_TX_BD_FLAGS_SW_LSO_SHIFT 6 2414#define ETH_TX_BD_FLAGS_SW_LSO_SHIFT 6
2039#define ETH_TX_BD_FLAGS_IPV6 (0x1<<7) 2415#define ETH_TX_BD_FLAGS_IPV6 (0x1<<7)
@@ -2048,7 +2424,7 @@ struct eth_tx_start_bd {
2048 __le32 addr_hi; 2424 __le32 addr_hi;
2049 __le16 nbd; 2425 __le16 nbd;
2050 __le16 nbytes; 2426 __le16 nbytes;
2051 __le16 vlan; 2427 __le16 vlan_or_ethertype;
2052 struct eth_tx_bd_flags bd_flags; 2428 struct eth_tx_bd_flags bd_flags;
2053 u8 general_data; 2429 u8 general_data;
2054#define ETH_TX_START_BD_HDR_NBDS (0x3F<<0) 2430#define ETH_TX_START_BD_HDR_NBDS (0x3F<<0)
@@ -2061,48 +2437,48 @@ struct eth_tx_start_bd {
2061 * Tx regular BD structure 2437 * Tx regular BD structure
2062 */ 2438 */
2063struct eth_tx_bd { 2439struct eth_tx_bd {
2064 u32 addr_lo; 2440 __le32 addr_lo;
2065 u32 addr_hi; 2441 __le32 addr_hi;
2066 u16 total_pkt_bytes; 2442 __le16 total_pkt_bytes;
2067 u16 nbytes; 2443 __le16 nbytes;
2068 u8 reserved[4]; 2444 u8 reserved[4];
2069}; 2445};
2070 2446
2071/* 2447/*
2072 * Tx parsing BD structure for ETH,Relevant in START 2448 * Tx parsing BD structure for ETH E1/E1h
2073 */ 2449 */
2074struct eth_tx_parse_bd { 2450struct eth_tx_parse_bd_e1x {
2075 u8 global_data; 2451 u8 global_data;
2076#define ETH_TX_PARSE_BD_IP_HDR_START_OFFSET (0xF<<0) 2452#define ETH_TX_PARSE_BD_E1X_IP_HDR_START_OFFSET_W (0xF<<0)
2077#define ETH_TX_PARSE_BD_IP_HDR_START_OFFSET_SHIFT 0 2453#define ETH_TX_PARSE_BD_E1X_IP_HDR_START_OFFSET_W_SHIFT 0
2078#define ETH_TX_PARSE_BD_UDP_CS_FLG (0x1<<4) 2454#define ETH_TX_PARSE_BD_E1X_RESERVED0 (0x1<<4)
2079#define ETH_TX_PARSE_BD_UDP_CS_FLG_SHIFT 4 2455#define ETH_TX_PARSE_BD_E1X_RESERVED0_SHIFT 4
2080#define ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN (0x1<<5) 2456#define ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN (0x1<<5)
2081#define ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN_SHIFT 5 2457#define ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN_SHIFT 5
2082#define ETH_TX_PARSE_BD_LLC_SNAP_EN (0x1<<6) 2458#define ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN (0x1<<6)
2083#define ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT 6 2459#define ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT 6
2084#define ETH_TX_PARSE_BD_NS_FLG (0x1<<7) 2460#define ETH_TX_PARSE_BD_E1X_NS_FLG (0x1<<7)
2085#define ETH_TX_PARSE_BD_NS_FLG_SHIFT 7 2461#define ETH_TX_PARSE_BD_E1X_NS_FLG_SHIFT 7
2086 u8 tcp_flags; 2462 u8 tcp_flags;
2087#define ETH_TX_PARSE_BD_FIN_FLG (0x1<<0) 2463#define ETH_TX_PARSE_BD_E1X_FIN_FLG (0x1<<0)
2088#define ETH_TX_PARSE_BD_FIN_FLG_SHIFT 0 2464#define ETH_TX_PARSE_BD_E1X_FIN_FLG_SHIFT 0
2089#define ETH_TX_PARSE_BD_SYN_FLG (0x1<<1) 2465#define ETH_TX_PARSE_BD_E1X_SYN_FLG (0x1<<1)
2090#define ETH_TX_PARSE_BD_SYN_FLG_SHIFT 1 2466#define ETH_TX_PARSE_BD_E1X_SYN_FLG_SHIFT 1
2091#define ETH_TX_PARSE_BD_RST_FLG (0x1<<2) 2467#define ETH_TX_PARSE_BD_E1X_RST_FLG (0x1<<2)
2092#define ETH_TX_PARSE_BD_RST_FLG_SHIFT 2 2468#define ETH_TX_PARSE_BD_E1X_RST_FLG_SHIFT 2
2093#define ETH_TX_PARSE_BD_PSH_FLG (0x1<<3) 2469#define ETH_TX_PARSE_BD_E1X_PSH_FLG (0x1<<3)
2094#define ETH_TX_PARSE_BD_PSH_FLG_SHIFT 3 2470#define ETH_TX_PARSE_BD_E1X_PSH_FLG_SHIFT 3
2095#define ETH_TX_PARSE_BD_ACK_FLG (0x1<<4) 2471#define ETH_TX_PARSE_BD_E1X_ACK_FLG (0x1<<4)
2096#define ETH_TX_PARSE_BD_ACK_FLG_SHIFT 4 2472#define ETH_TX_PARSE_BD_E1X_ACK_FLG_SHIFT 4
2097#define ETH_TX_PARSE_BD_URG_FLG (0x1<<5) 2473#define ETH_TX_PARSE_BD_E1X_URG_FLG (0x1<<5)
2098#define ETH_TX_PARSE_BD_URG_FLG_SHIFT 5 2474#define ETH_TX_PARSE_BD_E1X_URG_FLG_SHIFT 5
2099#define ETH_TX_PARSE_BD_ECE_FLG (0x1<<6) 2475#define ETH_TX_PARSE_BD_E1X_ECE_FLG (0x1<<6)
2100#define ETH_TX_PARSE_BD_ECE_FLG_SHIFT 6 2476#define ETH_TX_PARSE_BD_E1X_ECE_FLG_SHIFT 6
2101#define ETH_TX_PARSE_BD_CWR_FLG (0x1<<7) 2477#define ETH_TX_PARSE_BD_E1X_CWR_FLG (0x1<<7)
2102#define ETH_TX_PARSE_BD_CWR_FLG_SHIFT 7 2478#define ETH_TX_PARSE_BD_E1X_CWR_FLG_SHIFT 7
2103 u8 ip_hlen; 2479 u8 ip_hlen_w;
2104 s8 reserved; 2480 s8 reserved;
2105 __le16 total_hlen; 2481 __le16 total_hlen_w;
2106 __le16 tcp_pseudo_csum; 2482 __le16 tcp_pseudo_csum;
2107 __le16 lso_mss; 2483 __le16 lso_mss;
2108 __le16 ip_id; 2484 __le16 ip_id;
@@ -2110,6 +2486,27 @@ struct eth_tx_parse_bd {
2110}; 2486};
2111 2487
2112/* 2488/*
2489 * Tx parsing BD structure for ETH E2
2490 */
2491struct eth_tx_parse_bd_e2 {
2492 __le16 dst_mac_addr_lo;
2493 __le16 dst_mac_addr_mid;
2494 __le16 dst_mac_addr_hi;
2495 __le16 src_mac_addr_lo;
2496 __le16 src_mac_addr_mid;
2497 __le16 src_mac_addr_hi;
2498 __le32 parsing_data;
2499#define ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W (0x1FFF<<0)
2500#define ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT 0
2501#define ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW (0xF<<13)
2502#define ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT 13
2503#define ETH_TX_PARSE_BD_E2_LSO_MSS (0x3FFF<<17)
2504#define ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT 17
2505#define ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR (0x1<<31)
2506#define ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR_SHIFT 31
2507};
2508
2509/*
2113 * The last BD in the BD memory will hold a pointer to the next BD memory 2510 * The last BD in the BD memory will hold a pointer to the next BD memory
2114 */ 2511 */
2115struct eth_tx_next_bd { 2512struct eth_tx_next_bd {
@@ -2124,79 +2521,24 @@ struct eth_tx_next_bd {
2124union eth_tx_bd_types { 2521union eth_tx_bd_types {
2125 struct eth_tx_start_bd start_bd; 2522 struct eth_tx_start_bd start_bd;
2126 struct eth_tx_bd reg_bd; 2523 struct eth_tx_bd reg_bd;
2127 struct eth_tx_parse_bd parse_bd; 2524 struct eth_tx_parse_bd_e1x parse_bd_e1x;
2525 struct eth_tx_parse_bd_e2 parse_bd_e2;
2128 struct eth_tx_next_bd next_bd; 2526 struct eth_tx_next_bd next_bd;
2129}; 2527};
2130 2528
2529
2131/* 2530/*
2132 * The eth storm context of Xstorm 2531 * The eth storm context of Xstorm
2133 */ 2532 */
2134struct xstorm_eth_st_context { 2533struct xstorm_eth_st_context {
2135 u32 tx_bd_page_base_lo; 2534 u32 reserved0[60];
2136 u32 tx_bd_page_base_hi;
2137#if defined(__BIG_ENDIAN)
2138 u16 tx_bd_cons;
2139 u8 statistics_data;
2140#define XSTORM_ETH_ST_CONTEXT_STATISTICS_COUNTER_ID (0x7F<<0)
2141#define XSTORM_ETH_ST_CONTEXT_STATISTICS_COUNTER_ID_SHIFT 0
2142#define XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE (0x1<<7)
2143#define XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE_SHIFT 7
2144 u8 __local_tx_bd_prod;
2145#elif defined(__LITTLE_ENDIAN)
2146 u8 __local_tx_bd_prod;
2147 u8 statistics_data;
2148#define XSTORM_ETH_ST_CONTEXT_STATISTICS_COUNTER_ID (0x7F<<0)
2149#define XSTORM_ETH_ST_CONTEXT_STATISTICS_COUNTER_ID_SHIFT 0
2150#define XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE (0x1<<7)
2151#define XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE_SHIFT 7
2152 u16 tx_bd_cons;
2153#endif
2154 u32 __reserved1;
2155 u32 __reserved2;
2156#if defined(__BIG_ENDIAN)
2157 u8 __ram_cache_index;
2158 u8 __double_buffer_client;
2159 u16 __pkt_cons;
2160#elif defined(__LITTLE_ENDIAN)
2161 u16 __pkt_cons;
2162 u8 __double_buffer_client;
2163 u8 __ram_cache_index;
2164#endif
2165#if defined(__BIG_ENDIAN)
2166 u16 __statistics_address;
2167 u16 __gso_next;
2168#elif defined(__LITTLE_ENDIAN)
2169 u16 __gso_next;
2170 u16 __statistics_address;
2171#endif
2172#if defined(__BIG_ENDIAN)
2173 u8 __local_tx_bd_cons;
2174 u8 safc_group_num;
2175 u8 safc_group_en;
2176 u8 __is_eth_conn;
2177#elif defined(__LITTLE_ENDIAN)
2178 u8 __is_eth_conn;
2179 u8 safc_group_en;
2180 u8 safc_group_num;
2181 u8 __local_tx_bd_cons;
2182#endif
2183 union eth_tx_bd_types __bds[13];
2184}; 2535};
2185 2536
2186/* 2537/*
2187 * The eth storm context of Cstorm 2538 * The eth storm context of Cstorm
2188 */ 2539 */
2189struct cstorm_eth_st_context { 2540struct cstorm_eth_st_context {
2190#if defined(__BIG_ENDIAN) 2541 u32 __reserved0[4];
2191 u16 __reserved0;
2192 u8 sb_index_number;
2193 u8 status_block_id;
2194#elif defined(__LITTLE_ENDIAN)
2195 u8 status_block_id;
2196 u8 sb_index_number;
2197 u16 __reserved0;
2198#endif
2199 u32 __reserved1[3];
2200}; 2542};
2201 2543
2202/* 2544/*
@@ -2244,103 +2586,114 @@ struct eth_tx_doorbell {
2244 2586
2245 2587
2246/* 2588/*
2247 * cstorm default status block, generated by ustorm 2589 * client init fc data
2248 */
2249struct cstorm_def_status_block_u {
2250 __le16 index_values[HC_USTORM_DEF_SB_NUM_INDICES];
2251 __le16 status_block_index;
2252 u8 func;
2253 u8 status_block_id;
2254 __le32 __flags;
2255};
2256
2257/*
2258 * cstorm default status block, generated by cstorm
2259 */
2260struct cstorm_def_status_block_c {
2261 __le16 index_values[HC_CSTORM_DEF_SB_NUM_INDICES];
2262 __le16 status_block_index;
2263 u8 func;
2264 u8 status_block_id;
2265 __le32 __flags;
2266};
2267
2268/*
2269 * xstorm status block
2270 */ 2590 */
2271struct xstorm_def_status_block { 2591struct client_init_fc_data {
2272 __le16 index_values[HC_XSTORM_DEF_SB_NUM_INDICES]; 2592 __le16 cqe_pause_thr_low;
2273 __le16 status_block_index; 2593 __le16 cqe_pause_thr_high;
2274 u8 func; 2594 __le16 bd_pause_thr_low;
2275 u8 status_block_id; 2595 __le16 bd_pause_thr_high;
2276 __le32 __flags; 2596 __le16 sge_pause_thr_low;
2597 __le16 sge_pause_thr_high;
2598 __le16 rx_cos_mask;
2599 u8 safc_group_num;
2600 u8 safc_group_en_flg;
2601 u8 traffic_type;
2602 u8 reserved0;
2603 __le16 reserved1;
2604 __le32 reserved2;
2277}; 2605};
2278 2606
2279/*
2280 * tstorm status block
2281 */
2282struct tstorm_def_status_block {
2283 __le16 index_values[HC_TSTORM_DEF_SB_NUM_INDICES];
2284 __le16 status_block_index;
2285 u8 func;
2286 u8 status_block_id;
2287 __le32 __flags;
2288};
2289 2607
2290/* 2608/*
2291 * host status block 2609 * client init ramrod data
2292 */ 2610 */
2293struct host_def_status_block { 2611struct client_init_general_data {
2294 struct atten_def_status_block atten_status_block; 2612 u8 client_id;
2295 struct cstorm_def_status_block_u u_def_status_block; 2613 u8 statistics_counter_id;
2296 struct cstorm_def_status_block_c c_def_status_block; 2614 u8 statistics_en_flg;
2297 struct xstorm_def_status_block x_def_status_block; 2615 u8 is_fcoe_flg;
2298 struct tstorm_def_status_block t_def_status_block; 2616 u8 activate_flg;
2617 u8 sp_client_id;
2618 __le16 reserved0;
2619 __le32 reserved1[2];
2299}; 2620};
2300 2621
2301 2622
2302/* 2623/*
2303 * cstorm status block, generated by ustorm 2624 * client init rx data
2304 */ 2625 */
2305struct cstorm_status_block_u { 2626struct client_init_rx_data {
2306 __le16 index_values[HC_USTORM_SB_NUM_INDICES]; 2627 u8 tpa_en_flg;
2307 __le16 status_block_index; 2628 u8 vmqueue_mode_en_flg;
2308 u8 func; 2629 u8 extra_data_over_sgl_en_flg;
2630 u8 cache_line_alignment_log_size;
2631 u8 enable_dynamic_hc;
2632 u8 max_sges_for_packet;
2633 u8 client_qzone_id;
2634 u8 drop_ip_cs_err_flg;
2635 u8 drop_tcp_cs_err_flg;
2636 u8 drop_ttl0_flg;
2637 u8 drop_udp_cs_err_flg;
2638 u8 inner_vlan_removal_enable_flg;
2639 u8 outer_vlan_removal_enable_flg;
2309 u8 status_block_id; 2640 u8 status_block_id;
2310 __le32 __flags; 2641 u8 rx_sb_index_number;
2642 u8 reserved0[3];
2643 __le16 bd_buff_size;
2644 __le16 sge_buff_size;
2645 __le16 mtu;
2646 struct regpair bd_page_base;
2647 struct regpair sge_page_base;
2648 struct regpair cqe_page_base;
2649 u8 is_leading_rss;
2650 u8 is_approx_mcast;
2651 __le16 max_agg_size;
2652 __le32 reserved2[3];
2653};
2654
2655/*
2656 * client init tx data
2657 */
2658struct client_init_tx_data {
2659 u8 enforce_security_flg;
2660 u8 tx_status_block_id;
2661 u8 tx_sb_index_number;
2662 u8 reserved0;
2663 __le16 mtu;
2664 __le16 reserved1;
2665 struct regpair tx_bd_page_base;
2666 __le32 reserved2[2];
2311}; 2667};
2312 2668
2313/* 2669/*
2314 * cstorm status block, generated by cstorm 2670 * client init ramrod data
2315 */ 2671 */
2316struct cstorm_status_block_c { 2672struct client_init_ramrod_data {
2317 __le16 index_values[HC_CSTORM_SB_NUM_INDICES]; 2673 struct client_init_general_data general;
2318 __le16 status_block_index; 2674 struct client_init_rx_data rx;
2319 u8 func; 2675 struct client_init_tx_data tx;
2320 u8 status_block_id; 2676 struct client_init_fc_data fc;
2321 __le32 __flags;
2322}; 2677};
2323 2678
2679
2324/* 2680/*
2325 * host status block 2681 * The data contain client ID need to the ramrod
2326 */ 2682 */
2327struct host_status_block { 2683struct eth_common_ramrod_data {
2328 struct cstorm_status_block_u u_status_block; 2684 u32 client_id;
2329 struct cstorm_status_block_c c_status_block; 2685 u32 reserved1;
2330}; 2686};
2331 2687
2332 2688
2333/* 2689/*
2334 * The data for RSS setup ramrod 2690 * union for sgl and raw data.
2335 */ 2691 */
2336struct eth_client_setup_ramrod_data { 2692union eth_sgl_or_raw_data {
2337 u32 client_id; 2693 __le16 sgl[8];
2338 u8 is_rdma; 2694 u32 raw_data[4];
2339 u8 is_fcoe;
2340 u16 reserved1;
2341}; 2695};
2342 2696
2343
2344/* 2697/*
2345 * regular eth FP CQE parameters struct 2698 * regular eth FP CQE parameters struct
2346 */ 2699 */
@@ -2358,8 +2711,8 @@ struct eth_fast_path_rx_cqe {
2358#define ETH_FAST_PATH_RX_CQE_START_FLG_SHIFT 4 2711#define ETH_FAST_PATH_RX_CQE_START_FLG_SHIFT 4
2359#define ETH_FAST_PATH_RX_CQE_END_FLG (0x1<<5) 2712#define ETH_FAST_PATH_RX_CQE_END_FLG (0x1<<5)
2360#define ETH_FAST_PATH_RX_CQE_END_FLG_SHIFT 5 2713#define ETH_FAST_PATH_RX_CQE_END_FLG_SHIFT 5
2361#define ETH_FAST_PATH_RX_CQE_RESERVED0 (0x3<<6) 2714#define ETH_FAST_PATH_RX_CQE_SGL_RAW_SEL (0x3<<6)
2362#define ETH_FAST_PATH_RX_CQE_RESERVED0_SHIFT 6 2715#define ETH_FAST_PATH_RX_CQE_SGL_RAW_SEL_SHIFT 6
2363 u8 status_flags; 2716 u8 status_flags;
2364#define ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE (0x7<<0) 2717#define ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE (0x7<<0)
2365#define ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE_SHIFT 0 2718#define ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE_SHIFT 0
@@ -2380,7 +2733,7 @@ struct eth_fast_path_rx_cqe {
2380 __le16 pkt_len; 2733 __le16 pkt_len;
2381 __le16 len_on_bd; 2734 __le16 len_on_bd;
2382 struct parsing_flags pars_flags; 2735 struct parsing_flags pars_flags;
2383 __le16 sgl[8]; 2736 union eth_sgl_or_raw_data sgl_or_raw_data;
2384}; 2737};
2385 2738
2386 2739
@@ -2392,11 +2745,10 @@ struct eth_halt_ramrod_data {
2392 u32 reserved0; 2745 u32 reserved0;
2393}; 2746};
2394 2747
2395
2396/* 2748/*
2397 * The data for statistics query ramrod 2749 * The data for statistics query ramrod
2398 */ 2750 */
2399struct eth_query_ramrod_data { 2751struct common_query_ramrod_data {
2400#if defined(__BIG_ENDIAN) 2752#if defined(__BIG_ENDIAN)
2401 u8 reserved0; 2753 u8 reserved0;
2402 u8 collect_port; 2754 u8 collect_port;
@@ -2479,9 +2831,9 @@ struct spe_hdr {
2479 __le16 type; 2831 __le16 type;
2480#define SPE_HDR_CONN_TYPE (0xFF<<0) 2832#define SPE_HDR_CONN_TYPE (0xFF<<0)
2481#define SPE_HDR_CONN_TYPE_SHIFT 0 2833#define SPE_HDR_CONN_TYPE_SHIFT 0
2482#define SPE_HDR_COMMON_RAMROD (0xFF<<8) 2834#define SPE_HDR_FUNCTION_ID (0xFF<<8)
2483#define SPE_HDR_COMMON_RAMROD_SHIFT 8 2835#define SPE_HDR_FUNCTION_ID_SHIFT 8
2484 __le16 reserved; 2836 __le16 reserved1;
2485}; 2837};
2486 2838
2487/* 2839/*
@@ -2489,12 +2841,10 @@ struct spe_hdr {
2489 */ 2841 */
2490union eth_specific_data { 2842union eth_specific_data {
2491 u8 protocol_data[8]; 2843 u8 protocol_data[8];
2492 struct regpair mac_config_addr; 2844 struct regpair client_init_ramrod_init_data;
2493 struct eth_client_setup_ramrod_data client_setup_ramrod_data;
2494 struct eth_halt_ramrod_data halt_ramrod_data; 2845 struct eth_halt_ramrod_data halt_ramrod_data;
2495 struct regpair leading_cqe_addr;
2496 struct regpair update_data_addr; 2846 struct regpair update_data_addr;
2497 struct eth_query_ramrod_data query_ramrod_data; 2847 struct eth_common_ramrod_data common_ramrod_data;
2498}; 2848};
2499 2849
2500/* 2850/*
@@ -2519,7 +2869,7 @@ struct eth_tx_bds_array {
2519 */ 2869 */
2520struct tstorm_eth_function_common_config { 2870struct tstorm_eth_function_common_config {
2521#if defined(__BIG_ENDIAN) 2871#if defined(__BIG_ENDIAN)
2522 u8 leading_client_id; 2872 u8 reserved1;
2523 u8 rss_result_mask; 2873 u8 rss_result_mask;
2524 u16 config_flags; 2874 u16 config_flags;
2525#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY (0x1<<0) 2875#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY (0x1<<0)
@@ -2532,16 +2882,12 @@ struct tstorm_eth_function_common_config {
2532#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_TCP_CAPABILITY_SHIFT 3 2882#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_TCP_CAPABILITY_SHIFT 3
2533#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE (0x7<<4) 2883#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE (0x7<<4)
2534#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE_SHIFT 4 2884#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE_SHIFT 4
2535#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_DEFAULT_ENABLE (0x1<<7) 2885#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA (0x1<<7)
2536#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_DEFAULT_ENABLE_SHIFT 7 2886#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA_SHIFT 7
2537#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_VLAN_IN_CAM (0x1<<8) 2887#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_VLAN_FILTERING_ENABLE (0x1<<8)
2538#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_VLAN_IN_CAM_SHIFT 8 2888#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_VLAN_FILTERING_ENABLE_SHIFT 8
2539#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM (0x1<<9) 2889#define __TSTORM_ETH_FUNCTION_COMMON_CONFIG_RESERVED0 (0x7F<<9)
2540#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM_SHIFT 9 2890#define __TSTORM_ETH_FUNCTION_COMMON_CONFIG_RESERVED0_SHIFT 9
2541#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA (0x1<<10)
2542#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA_SHIFT 10
2543#define __TSTORM_ETH_FUNCTION_COMMON_CONFIG_RESERVED0 (0x1F<<11)
2544#define __TSTORM_ETH_FUNCTION_COMMON_CONFIG_RESERVED0_SHIFT 11
2545#elif defined(__LITTLE_ENDIAN) 2891#elif defined(__LITTLE_ENDIAN)
2546 u16 config_flags; 2892 u16 config_flags;
2547#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY (0x1<<0) 2893#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY (0x1<<0)
@@ -2554,18 +2900,14 @@ struct tstorm_eth_function_common_config {
2554#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_TCP_CAPABILITY_SHIFT 3 2900#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_TCP_CAPABILITY_SHIFT 3
2555#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE (0x7<<4) 2901#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE (0x7<<4)
2556#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE_SHIFT 4 2902#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE_SHIFT 4
2557#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_DEFAULT_ENABLE (0x1<<7) 2903#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA (0x1<<7)
2558#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_DEFAULT_ENABLE_SHIFT 7 2904#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA_SHIFT 7
2559#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_VLAN_IN_CAM (0x1<<8) 2905#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_VLAN_FILTERING_ENABLE (0x1<<8)
2560#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_VLAN_IN_CAM_SHIFT 8 2906#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_VLAN_FILTERING_ENABLE_SHIFT 8
2561#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM (0x1<<9) 2907#define __TSTORM_ETH_FUNCTION_COMMON_CONFIG_RESERVED0 (0x7F<<9)
2562#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM_SHIFT 9 2908#define __TSTORM_ETH_FUNCTION_COMMON_CONFIG_RESERVED0_SHIFT 9
2563#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA (0x1<<10)
2564#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA_SHIFT 10
2565#define __TSTORM_ETH_FUNCTION_COMMON_CONFIG_RESERVED0 (0x1F<<11)
2566#define __TSTORM_ETH_FUNCTION_COMMON_CONFIG_RESERVED0_SHIFT 11
2567 u8 rss_result_mask; 2909 u8 rss_result_mask;
2568 u8 leading_client_id; 2910 u8 reserved1;
2569#endif 2911#endif
2570 u16 vlan_id[2]; 2912 u16 vlan_id[2];
2571}; 2913};
@@ -2613,90 +2955,42 @@ struct mac_configuration_hdr {
2613 u8 length; 2955 u8 length;
2614 u8 offset; 2956 u8 offset;
2615 u16 client_id; 2957 u16 client_id;
2616 u32 reserved1; 2958 u16 echo;
2617}; 2959 u16 reserved1;
2618
2619/*
2620 * MAC address in list for ramrod
2621 */
2622struct tstorm_cam_entry {
2623 __le16 lsb_mac_addr;
2624 __le16 middle_mac_addr;
2625 __le16 msb_mac_addr;
2626 __le16 flags;
2627#define TSTORM_CAM_ENTRY_PORT_ID (0x1<<0)
2628#define TSTORM_CAM_ENTRY_PORT_ID_SHIFT 0
2629#define TSTORM_CAM_ENTRY_RSRVVAL0 (0x7<<1)
2630#define TSTORM_CAM_ENTRY_RSRVVAL0_SHIFT 1
2631#define TSTORM_CAM_ENTRY_RESERVED0 (0xFFF<<4)
2632#define TSTORM_CAM_ENTRY_RESERVED0_SHIFT 4
2633};
2634
2635/*
2636 * MAC filtering: CAM target table entry
2637 */
2638struct tstorm_cam_target_table_entry {
2639 u8 flags;
2640#define TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST (0x1<<0)
2641#define TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST_SHIFT 0
2642#define TSTORM_CAM_TARGET_TABLE_ENTRY_OVERRIDE_VLAN_REMOVAL (0x1<<1)
2643#define TSTORM_CAM_TARGET_TABLE_ENTRY_OVERRIDE_VLAN_REMOVAL_SHIFT 1
2644#define TSTORM_CAM_TARGET_TABLE_ENTRY_ACTION_TYPE (0x1<<2)
2645#define TSTORM_CAM_TARGET_TABLE_ENTRY_ACTION_TYPE_SHIFT 2
2646#define TSTORM_CAM_TARGET_TABLE_ENTRY_RDMA_MAC (0x1<<3)
2647#define TSTORM_CAM_TARGET_TABLE_ENTRY_RDMA_MAC_SHIFT 3
2648#define TSTORM_CAM_TARGET_TABLE_ENTRY_RESERVED0 (0xF<<4)
2649#define TSTORM_CAM_TARGET_TABLE_ENTRY_RESERVED0_SHIFT 4
2650 u8 reserved1;
2651 u16 vlan_id;
2652 u32 clients_bit_vector;
2653}; 2960};
2654 2961
2655/* 2962/*
2656 * MAC address in list for ramrod 2963 * MAC address in list for ramrod
2657 */ 2964 */
2658struct mac_configuration_entry { 2965struct mac_configuration_entry {
2659 struct tstorm_cam_entry cam_entry;
2660 struct tstorm_cam_target_table_entry target_table_entry;
2661};
2662
2663/*
2664 * MAC filtering configuration command
2665 */
2666struct mac_configuration_cmd {
2667 struct mac_configuration_hdr hdr;
2668 struct mac_configuration_entry config_table[64];
2669};
2670
2671
2672/*
2673 * MAC address in list for ramrod
2674 */
2675struct mac_configuration_entry_e1h {
2676 __le16 lsb_mac_addr; 2966 __le16 lsb_mac_addr;
2677 __le16 middle_mac_addr; 2967 __le16 middle_mac_addr;
2678 __le16 msb_mac_addr; 2968 __le16 msb_mac_addr;
2679 __le16 vlan_id; 2969 __le16 vlan_id;
2680 __le16 e1hov_id; 2970 u8 pf_id;
2681 u8 reserved0;
2682 u8 flags; 2971 u8 flags;
2683#define MAC_CONFIGURATION_ENTRY_E1H_PORT (0x1<<0) 2972#define MAC_CONFIGURATION_ENTRY_ACTION_TYPE (0x1<<0)
2684#define MAC_CONFIGURATION_ENTRY_E1H_PORT_SHIFT 0 2973#define MAC_CONFIGURATION_ENTRY_ACTION_TYPE_SHIFT 0
2685#define MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE (0x1<<1) 2974#define MAC_CONFIGURATION_ENTRY_RDMA_MAC (0x1<<1)
2686#define MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE_SHIFT 1 2975#define MAC_CONFIGURATION_ENTRY_RDMA_MAC_SHIFT 1
2687#define MAC_CONFIGURATION_ENTRY_E1H_RDMA_MAC (0x1<<2) 2976#define MAC_CONFIGURATION_ENTRY_VLAN_FILTERING_MODE (0x3<<2)
2688#define MAC_CONFIGURATION_ENTRY_E1H_RDMA_MAC_SHIFT 2 2977#define MAC_CONFIGURATION_ENTRY_VLAN_FILTERING_MODE_SHIFT 2
2689#define MAC_CONFIGURATION_ENTRY_E1H_RESERVED1 (0x1F<<3) 2978#define MAC_CONFIGURATION_ENTRY_OVERRIDE_VLAN_REMOVAL (0x1<<4)
2690#define MAC_CONFIGURATION_ENTRY_E1H_RESERVED1_SHIFT 3 2979#define MAC_CONFIGURATION_ENTRY_OVERRIDE_VLAN_REMOVAL_SHIFT 4
2980#define MAC_CONFIGURATION_ENTRY_BROADCAST (0x1<<5)
2981#define MAC_CONFIGURATION_ENTRY_BROADCAST_SHIFT 5
2982#define MAC_CONFIGURATION_ENTRY_RESERVED1 (0x3<<6)
2983#define MAC_CONFIGURATION_ENTRY_RESERVED1_SHIFT 6
2984 u16 reserved0;
2691 u32 clients_bit_vector; 2985 u32 clients_bit_vector;
2692}; 2986};
2693 2987
2694/* 2988/*
2695 * MAC filtering configuration command 2989 * MAC filtering configuration command
2696 */ 2990 */
2697struct mac_configuration_cmd_e1h { 2991struct mac_configuration_cmd {
2698 struct mac_configuration_hdr hdr; 2992 struct mac_configuration_hdr hdr;
2699 struct mac_configuration_entry_e1h config_table[32]; 2993 struct mac_configuration_entry config_table[64];
2700}; 2994};
2701 2995
2702 2996
@@ -2709,65 +3003,6 @@ struct tstorm_eth_approximate_match_multicast_filtering {
2709 3003
2710 3004
2711/* 3005/*
2712 * Configuration parameters per client in Tstorm
2713 */
2714struct tstorm_eth_client_config {
2715#if defined(__BIG_ENDIAN)
2716 u8 reserved0;
2717 u8 statistics_counter_id;
2718 u16 mtu;
2719#elif defined(__LITTLE_ENDIAN)
2720 u16 mtu;
2721 u8 statistics_counter_id;
2722 u8 reserved0;
2723#endif
2724#if defined(__BIG_ENDIAN)
2725 u16 drop_flags;
2726#define TSTORM_ETH_CLIENT_CONFIG_DROP_IP_CS_ERR (0x1<<0)
2727#define TSTORM_ETH_CLIENT_CONFIG_DROP_IP_CS_ERR_SHIFT 0
2728#define TSTORM_ETH_CLIENT_CONFIG_DROP_TCP_CS_ERR (0x1<<1)
2729#define TSTORM_ETH_CLIENT_CONFIG_DROP_TCP_CS_ERR_SHIFT 1
2730#define TSTORM_ETH_CLIENT_CONFIG_DROP_TTL0 (0x1<<2)
2731#define TSTORM_ETH_CLIENT_CONFIG_DROP_TTL0_SHIFT 2
2732#define TSTORM_ETH_CLIENT_CONFIG_DROP_UDP_CS_ERR (0x1<<3)
2733#define TSTORM_ETH_CLIENT_CONFIG_DROP_UDP_CS_ERR_SHIFT 3
2734#define __TSTORM_ETH_CLIENT_CONFIG_RESERVED2 (0xFFF<<4)
2735#define __TSTORM_ETH_CLIENT_CONFIG_RESERVED2_SHIFT 4
2736 u16 config_flags;
2737#define TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE (0x1<<0)
2738#define TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE_SHIFT 0
2739#define TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE (0x1<<1)
2740#define TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE_SHIFT 1
2741#define TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE (0x1<<2)
2742#define TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE_SHIFT 2
2743#define __TSTORM_ETH_CLIENT_CONFIG_RESERVED1 (0x1FFF<<3)
2744#define __TSTORM_ETH_CLIENT_CONFIG_RESERVED1_SHIFT 3
2745#elif defined(__LITTLE_ENDIAN)
2746 u16 config_flags;
2747#define TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE (0x1<<0)
2748#define TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE_SHIFT 0
2749#define TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE (0x1<<1)
2750#define TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE_SHIFT 1
2751#define TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE (0x1<<2)
2752#define TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE_SHIFT 2
2753#define __TSTORM_ETH_CLIENT_CONFIG_RESERVED1 (0x1FFF<<3)
2754#define __TSTORM_ETH_CLIENT_CONFIG_RESERVED1_SHIFT 3
2755 u16 drop_flags;
2756#define TSTORM_ETH_CLIENT_CONFIG_DROP_IP_CS_ERR (0x1<<0)
2757#define TSTORM_ETH_CLIENT_CONFIG_DROP_IP_CS_ERR_SHIFT 0
2758#define TSTORM_ETH_CLIENT_CONFIG_DROP_TCP_CS_ERR (0x1<<1)
2759#define TSTORM_ETH_CLIENT_CONFIG_DROP_TCP_CS_ERR_SHIFT 1
2760#define TSTORM_ETH_CLIENT_CONFIG_DROP_TTL0 (0x1<<2)
2761#define TSTORM_ETH_CLIENT_CONFIG_DROP_TTL0_SHIFT 2
2762#define TSTORM_ETH_CLIENT_CONFIG_DROP_UDP_CS_ERR (0x1<<3)
2763#define TSTORM_ETH_CLIENT_CONFIG_DROP_UDP_CS_ERR_SHIFT 3
2764#define __TSTORM_ETH_CLIENT_CONFIG_RESERVED2 (0xFFF<<4)
2765#define __TSTORM_ETH_CLIENT_CONFIG_RESERVED2_SHIFT 4
2766#endif
2767};
2768
2769
2770/*
2771 * MAC filtering configuration parameters per port in Tstorm 3006 * MAC filtering configuration parameters per port in Tstorm
2772 */ 3007 */
2773struct tstorm_eth_mac_filter_config { 3008struct tstorm_eth_mac_filter_config {
@@ -2777,14 +3012,14 @@ struct tstorm_eth_mac_filter_config {
2777 u32 mcast_accept_all; 3012 u32 mcast_accept_all;
2778 u32 bcast_drop_all; 3013 u32 bcast_drop_all;
2779 u32 bcast_accept_all; 3014 u32 bcast_accept_all;
2780 u32 strict_vlan;
2781 u32 vlan_filter[2]; 3015 u32 vlan_filter[2];
3016 u32 unmatched_unicast;
2782 u32 reserved; 3017 u32 reserved;
2783}; 3018};
2784 3019
2785 3020
2786/* 3021/*
2787 * common flag to indicate existance of TPA. 3022 * common flag to indicate existence of TPA.
2788 */ 3023 */
2789struct tstorm_eth_tpa_exist { 3024struct tstorm_eth_tpa_exist {
2790#if defined(__BIG_ENDIAN) 3025#if defined(__BIG_ENDIAN)
@@ -2801,41 +3036,6 @@ struct tstorm_eth_tpa_exist {
2801 3036
2802 3037
2803/* 3038/*
2804 * rx rings pause data for E1h only
2805 */
2806struct ustorm_eth_rx_pause_data_e1h {
2807#if defined(__BIG_ENDIAN)
2808 u16 bd_thr_low;
2809 u16 cqe_thr_low;
2810#elif defined(__LITTLE_ENDIAN)
2811 u16 cqe_thr_low;
2812 u16 bd_thr_low;
2813#endif
2814#if defined(__BIG_ENDIAN)
2815 u16 cos;
2816 u16 sge_thr_low;
2817#elif defined(__LITTLE_ENDIAN)
2818 u16 sge_thr_low;
2819 u16 cos;
2820#endif
2821#if defined(__BIG_ENDIAN)
2822 u16 bd_thr_high;
2823 u16 cqe_thr_high;
2824#elif defined(__LITTLE_ENDIAN)
2825 u16 cqe_thr_high;
2826 u16 bd_thr_high;
2827#endif
2828#if defined(__BIG_ENDIAN)
2829 u16 reserved0;
2830 u16 sge_thr_high;
2831#elif defined(__LITTLE_ENDIAN)
2832 u16 sge_thr_high;
2833 u16 reserved0;
2834#endif
2835};
2836
2837
2838/*
2839 * Three RX producers for ETH 3039 * Three RX producers for ETH
2840 */ 3040 */
2841struct ustorm_eth_rx_producers { 3041struct ustorm_eth_rx_producers {
@@ -2857,6 +3057,18 @@ struct ustorm_eth_rx_producers {
2857 3057
2858 3058
2859/* 3059/*
3060 * cfc delete event data
3061 */
3062struct cfc_del_event_data {
3063 u32 cid;
3064 u8 error;
3065 u8 reserved0;
3066 u16 reserved1;
3067 u32 reserved2;
3068};
3069
3070
3071/*
2860 * per-port SAFC demo variables 3072 * per-port SAFC demo variables
2861 */ 3073 */
2862struct cmng_flags_per_port { 3074struct cmng_flags_per_port {
@@ -2872,8 +3084,10 @@ struct cmng_flags_per_port {
2872#define CMNG_FLAGS_PER_PORT_RATE_SHAPING_PROTOCOL_SHIFT 3 3084#define CMNG_FLAGS_PER_PORT_RATE_SHAPING_PROTOCOL_SHIFT 3
2873#define CMNG_FLAGS_PER_PORT_FAIRNESS_COS (0x1<<4) 3085#define CMNG_FLAGS_PER_PORT_FAIRNESS_COS (0x1<<4)
2874#define CMNG_FLAGS_PER_PORT_FAIRNESS_COS_SHIFT 4 3086#define CMNG_FLAGS_PER_PORT_FAIRNESS_COS_SHIFT 4
2875#define __CMNG_FLAGS_PER_PORT_RESERVED0 (0x7FFFFFF<<5) 3087#define CMNG_FLAGS_PER_PORT_FAIRNESS_COS_MODE (0x1<<5)
2876#define __CMNG_FLAGS_PER_PORT_RESERVED0_SHIFT 5 3088#define CMNG_FLAGS_PER_PORT_FAIRNESS_COS_MODE_SHIFT 5
3089#define __CMNG_FLAGS_PER_PORT_RESERVED0 (0x3FFFFFF<<6)
3090#define __CMNG_FLAGS_PER_PORT_RESERVED0_SHIFT 6
2877}; 3091};
2878 3092
2879 3093
@@ -2907,30 +3121,92 @@ struct safc_struct_per_port {
2907 u8 __reserved0; 3121 u8 __reserved0;
2908 u16 __reserved1; 3122 u16 __reserved1;
2909#endif 3123#endif
3124 u8 cos_to_traffic_types[MAX_COS_NUMBER];
3125 u32 __reserved2;
2910 u16 cos_to_pause_mask[NUM_OF_SAFC_BITS]; 3126 u16 cos_to_pause_mask[NUM_OF_SAFC_BITS];
2911}; 3127};
2912 3128
2913/* 3129/*
3130 * per-port PFC variables
3131 */
3132struct pfc_struct_per_port {
3133 u8 priority_to_traffic_types[MAX_PFC_PRIORITIES];
3134#if defined(__BIG_ENDIAN)
3135 u16 pfc_pause_quanta_in_nanosec;
3136 u8 __reserved0;
3137 u8 priority_non_pausable_mask;
3138#elif defined(__LITTLE_ENDIAN)
3139 u8 priority_non_pausable_mask;
3140 u8 __reserved0;
3141 u16 pfc_pause_quanta_in_nanosec;
3142#endif
3143};
3144
3145/*
3146 * Priority and cos
3147 */
3148struct priority_cos {
3149#if defined(__BIG_ENDIAN)
3150 u16 reserved1;
3151 u8 cos;
3152 u8 priority;
3153#elif defined(__LITTLE_ENDIAN)
3154 u8 priority;
3155 u8 cos;
3156 u16 reserved1;
3157#endif
3158 u32 reserved2;
3159};
3160
3161/*
2914 * Per-port congestion management variables 3162 * Per-port congestion management variables
2915 */ 3163 */
2916struct cmng_struct_per_port { 3164struct cmng_struct_per_port {
2917 struct rate_shaping_vars_per_port rs_vars; 3165 struct rate_shaping_vars_per_port rs_vars;
2918 struct fairness_vars_per_port fair_vars; 3166 struct fairness_vars_per_port fair_vars;
2919 struct safc_struct_per_port safc_vars; 3167 struct safc_struct_per_port safc_vars;
3168 struct pfc_struct_per_port pfc_vars;
3169#if defined(__BIG_ENDIAN)
3170 u16 __reserved1;
3171 u8 dcb_enabled;
3172 u8 llfc_mode;
3173#elif defined(__LITTLE_ENDIAN)
3174 u8 llfc_mode;
3175 u8 dcb_enabled;
3176 u16 __reserved1;
3177#endif
3178 struct priority_cos
3179 traffic_type_to_priority_cos[MAX_PFC_TRAFFIC_TYPES];
2920 struct cmng_flags_per_port flags; 3180 struct cmng_flags_per_port flags;
2921}; 3181};
2922 3182
2923 3183
3184
3185/*
3186 * Dynamic HC counters set by the driver
3187 */
3188struct hc_dynamic_drv_counter {
3189 u32 val[HC_SB_MAX_DYNAMIC_INDICES];
3190};
3191
3192/*
3193 * zone A per-queue data
3194 */
3195struct cstorm_queue_zone_data {
3196 struct hc_dynamic_drv_counter hc_dyn_drv_cnt;
3197 struct regpair reserved[2];
3198};
3199
2924/* 3200/*
2925 * Dynamic host coalescing init parameters 3201 * Dynamic host coalescing init parameters
2926 */ 3202 */
2927struct dynamic_hc_config { 3203struct dynamic_hc_config {
2928 u32 threshold[3]; 3204 u32 threshold[3];
2929 u8 shift_per_protocol[HC_USTORM_SB_NUM_INDICES]; 3205 u8 shift_per_protocol[HC_SB_MAX_DYNAMIC_INDICES];
2930 u8 hc_timeout0[HC_USTORM_SB_NUM_INDICES]; 3206 u8 hc_timeout0[HC_SB_MAX_DYNAMIC_INDICES];
2931 u8 hc_timeout1[HC_USTORM_SB_NUM_INDICES]; 3207 u8 hc_timeout1[HC_SB_MAX_DYNAMIC_INDICES];
2932 u8 hc_timeout2[HC_USTORM_SB_NUM_INDICES]; 3208 u8 hc_timeout2[HC_SB_MAX_DYNAMIC_INDICES];
2933 u8 hc_timeout3[HC_USTORM_SB_NUM_INDICES]; 3209 u8 hc_timeout3[HC_SB_MAX_DYNAMIC_INDICES];
2934}; 3210};
2935 3211
2936 3212
@@ -2954,7 +3230,7 @@ struct xstorm_per_client_stats {
2954 * Common statistics collected by the Xstorm (per port) 3230 * Common statistics collected by the Xstorm (per port)
2955 */ 3231 */
2956struct xstorm_common_stats { 3232struct xstorm_common_stats {
2957 struct xstorm_per_client_stats client_statistics[MAX_X_STAT_COUNTER_ID]; 3233 struct xstorm_per_client_stats client_statistics[MAX_STAT_COUNTER_ID];
2958}; 3234};
2959 3235
2960/* 3236/*
@@ -2991,7 +3267,7 @@ struct tstorm_per_client_stats {
2991 */ 3267 */
2992struct tstorm_common_stats { 3268struct tstorm_common_stats {
2993 struct tstorm_per_port_stats port_statistics; 3269 struct tstorm_per_port_stats port_statistics;
2994 struct tstorm_per_client_stats client_statistics[MAX_T_STAT_COUNTER_ID]; 3270 struct tstorm_per_client_stats client_statistics[MAX_STAT_COUNTER_ID];
2995}; 3271};
2996 3272
2997/* 3273/*
@@ -3012,7 +3288,7 @@ struct ustorm_per_client_stats {
3012 * Protocol-common statistics collected by the Ustorm 3288 * Protocol-common statistics collected by the Ustorm
3013 */ 3289 */
3014struct ustorm_common_stats { 3290struct ustorm_common_stats {
3015 struct ustorm_per_client_stats client_statistics[MAX_U_STAT_COUNTER_ID]; 3291 struct ustorm_per_client_stats client_statistics[MAX_STAT_COUNTER_ID];
3016}; 3292};
3017 3293
3018/* 3294/*
@@ -3026,6 +3302,70 @@ struct eth_stats_query {
3026 3302
3027 3303
3028/* 3304/*
3305 * set mac event data
3306 */
3307struct set_mac_event_data {
3308 u16 echo;
3309 u16 reserved0;
3310 u32 reserved1;
3311 u32 reserved2;
3312};
3313
3314/*
3315 * union for all event ring message types
3316 */
3317union event_data {
3318 struct set_mac_event_data set_mac_event;
3319 struct cfc_del_event_data cfc_del_event;
3320};
3321
3322
3323/*
3324 * per PF event ring data
3325 */
3326struct event_ring_data {
3327 struct regpair base_addr;
3328#if defined(__BIG_ENDIAN)
3329 u8 index_id;
3330 u8 sb_id;
3331 u16 producer;
3332#elif defined(__LITTLE_ENDIAN)
3333 u16 producer;
3334 u8 sb_id;
3335 u8 index_id;
3336#endif
3337 u32 reserved0;
3338};
3339
3340
3341/*
3342 * event ring message element (each element is 128 bits)
3343 */
3344struct event_ring_msg {
3345 u8 opcode;
3346 u8 reserved0;
3347 u16 reserved1;
3348 union event_data data;
3349};
3350
3351/*
3352 * event ring next page element (128 bits)
3353 */
3354struct event_ring_next {
3355 struct regpair addr;
3356 u32 reserved[2];
3357};
3358
3359/*
3360 * union for event ring element types (each element is 128 bits)
3361 */
3362union event_ring_elem {
3363 struct event_ring_msg message;
3364 struct event_ring_next next_page;
3365};
3366
3367
3368/*
3029 * per-vnic fairness variables 3369 * per-vnic fairness variables
3030 */ 3370 */
3031struct fairness_vars_per_vn { 3371struct fairness_vars_per_vn {
@@ -3037,6 +3377,25 @@ struct fairness_vars_per_vn {
3037 3377
3038 3378
3039/* 3379/*
3380 * The data for flow control configuration
3381 */
3382struct flow_control_configuration {
3383 struct priority_cos
3384 traffic_type_to_priority_cos[MAX_PFC_TRAFFIC_TYPES];
3385#if defined(__BIG_ENDIAN)
3386 u16 reserved1;
3387 u8 dcb_version;
3388 u8 dcb_enabled;
3389#elif defined(__LITTLE_ENDIAN)
3390 u8 dcb_enabled;
3391 u8 dcb_version;
3392 u16 reserved1;
3393#endif
3394 u32 reserved2;
3395};
3396
3397
3398/*
3040 * FW version stored in the Xstorm RAM 3399 * FW version stored in the Xstorm RAM
3041 */ 3400 */
3042struct fw_version { 3401struct fw_version {
@@ -3064,6 +3423,137 @@ struct fw_version {
3064 3423
3065 3424
3066/* 3425/*
3426 * Dynamic Host-Coalescing - Driver(host) counters
3427 */
3428struct hc_dynamic_sb_drv_counters {
3429 u32 dynamic_hc_drv_counter[HC_SB_MAX_DYNAMIC_INDICES];
3430};
3431
3432
3433/*
3434 * 2 bytes. configuration/state parameters for a single protocol index
3435 */
3436struct hc_index_data {
3437#if defined(__BIG_ENDIAN)
3438 u8 flags;
3439#define HC_INDEX_DATA_SM_ID (0x1<<0)
3440#define HC_INDEX_DATA_SM_ID_SHIFT 0
3441#define HC_INDEX_DATA_HC_ENABLED (0x1<<1)
3442#define HC_INDEX_DATA_HC_ENABLED_SHIFT 1
3443#define HC_INDEX_DATA_DYNAMIC_HC_ENABLED (0x1<<2)
3444#define HC_INDEX_DATA_DYNAMIC_HC_ENABLED_SHIFT 2
3445#define HC_INDEX_DATA_RESERVE (0x1F<<3)
3446#define HC_INDEX_DATA_RESERVE_SHIFT 3
3447 u8 timeout;
3448#elif defined(__LITTLE_ENDIAN)
3449 u8 timeout;
3450 u8 flags;
3451#define HC_INDEX_DATA_SM_ID (0x1<<0)
3452#define HC_INDEX_DATA_SM_ID_SHIFT 0
3453#define HC_INDEX_DATA_HC_ENABLED (0x1<<1)
3454#define HC_INDEX_DATA_HC_ENABLED_SHIFT 1
3455#define HC_INDEX_DATA_DYNAMIC_HC_ENABLED (0x1<<2)
3456#define HC_INDEX_DATA_DYNAMIC_HC_ENABLED_SHIFT 2
3457#define HC_INDEX_DATA_RESERVE (0x1F<<3)
3458#define HC_INDEX_DATA_RESERVE_SHIFT 3
3459#endif
3460};
3461
3462
3463/*
3464 * HC state-machine
3465 */
3466struct hc_status_block_sm {
3467#if defined(__BIG_ENDIAN)
3468 u8 igu_seg_id;
3469 u8 igu_sb_id;
3470 u8 timer_value;
3471 u8 __flags;
3472#elif defined(__LITTLE_ENDIAN)
3473 u8 __flags;
3474 u8 timer_value;
3475 u8 igu_sb_id;
3476 u8 igu_seg_id;
3477#endif
3478 u32 time_to_expire;
3479};
3480
3481/*
3482 * hold PCI identification variables- used in various places in firmware
3483 */
3484struct pci_entity {
3485#if defined(__BIG_ENDIAN)
3486 u8 vf_valid;
3487 u8 vf_id;
3488 u8 vnic_id;
3489 u8 pf_id;
3490#elif defined(__LITTLE_ENDIAN)
3491 u8 pf_id;
3492 u8 vnic_id;
3493 u8 vf_id;
3494 u8 vf_valid;
3495#endif
3496};
3497
3498/*
3499 * The fast-path status block meta-data, common to all chips
3500 */
3501struct hc_sb_data {
3502 struct regpair host_sb_addr;
3503 struct hc_status_block_sm state_machine[HC_SB_MAX_SM];
3504 struct pci_entity p_func;
3505#if defined(__BIG_ENDIAN)
3506 u8 rsrv0;
3507 u8 dhc_qzone_id;
3508 u8 __dynamic_hc_level;
3509 u8 same_igu_sb_1b;
3510#elif defined(__LITTLE_ENDIAN)
3511 u8 same_igu_sb_1b;
3512 u8 __dynamic_hc_level;
3513 u8 dhc_qzone_id;
3514 u8 rsrv0;
3515#endif
3516 struct regpair rsrv1[2];
3517};
3518
3519
3520/*
3521 * The fast-path status block meta-data
3522 */
3523struct hc_sp_status_block_data {
3524 struct regpair host_sb_addr;
3525#if defined(__BIG_ENDIAN)
3526 u16 rsrv;
3527 u8 igu_seg_id;
3528 u8 igu_sb_id;
3529#elif defined(__LITTLE_ENDIAN)
3530 u8 igu_sb_id;
3531 u8 igu_seg_id;
3532 u16 rsrv;
3533#endif
3534 struct pci_entity p_func;
3535};
3536
3537
3538/*
3539 * The fast-path status block meta-data
3540 */
3541struct hc_status_block_data_e1x {
3542 struct hc_index_data index_data[HC_SB_MAX_INDICES_E1X];
3543 struct hc_sb_data common;
3544};
3545
3546
3547/*
3548 * The fast-path status block meta-data
3549 */
3550struct hc_status_block_data_e2 {
3551 struct hc_index_data index_data[HC_SB_MAX_INDICES_E2];
3552 struct hc_sb_data common;
3553};
3554
3555
3556/*
3067 * FW version stored in first line of pram 3557 * FW version stored in first line of pram
3068 */ 3558 */
3069struct pram_fw_version { 3559struct pram_fw_version {
@@ -3086,11 +3576,21 @@ struct pram_fw_version {
3086 3576
3087 3577
3088/* 3578/*
3579 * Ethernet slow path element
3580 */
3581union protocol_common_specific_data {
3582 u8 protocol_data[8];
3583 struct regpair phy_address;
3584 struct regpair mac_config_addr;
3585 struct common_query_ramrod_data query_ramrod_data;
3586};
3587
3588/*
3089 * The send queue element 3589 * The send queue element
3090 */ 3590 */
3091struct protocol_common_spe { 3591struct protocol_common_spe {
3092 struct spe_hdr hdr; 3592 struct spe_hdr hdr;
3093 struct regpair phy_address; 3593 union protocol_common_specific_data data;
3094}; 3594};
3095 3595
3096 3596
@@ -3123,7 +3623,7 @@ struct rate_shaping_vars_per_vn {
3123 */ 3623 */
3124struct slow_path_element { 3624struct slow_path_element {
3125 struct spe_hdr hdr; 3625 struct spe_hdr hdr;
3126 u8 protocol_data[8]; 3626 struct regpair protocol_data;
3127}; 3627};
3128 3628
3129 3629
@@ -3136,3 +3636,97 @@ struct stats_indication_flags {
3136}; 3636};
3137 3637
3138 3638
3639/*
3640 * per-port PFC variables
3641 */
3642struct storm_pfc_struct_per_port {
3643#if defined(__BIG_ENDIAN)
3644 u16 mid_mac_addr;
3645 u16 msb_mac_addr;
3646#elif defined(__LITTLE_ENDIAN)
3647 u16 msb_mac_addr;
3648 u16 mid_mac_addr;
3649#endif
3650#if defined(__BIG_ENDIAN)
3651 u16 pfc_pause_quanta_in_nanosec;
3652 u16 lsb_mac_addr;
3653#elif defined(__LITTLE_ENDIAN)
3654 u16 lsb_mac_addr;
3655 u16 pfc_pause_quanta_in_nanosec;
3656#endif
3657};
3658
3659/*
3660 * Per-port congestion management variables
3661 */
3662struct storm_cmng_struct_per_port {
3663 struct storm_pfc_struct_per_port pfc_vars;
3664};
3665
3666
3667/*
3668 * zone A per-queue data
3669 */
3670struct tstorm_queue_zone_data {
3671 struct regpair reserved[4];
3672};
3673
3674
3675/*
3676 * zone B per-VF data
3677 */
3678struct tstorm_vf_zone_data {
3679 struct regpair reserved;
3680};
3681
3682
3683/*
3684 * zone A per-queue data
3685 */
3686struct ustorm_queue_zone_data {
3687 struct ustorm_eth_rx_producers eth_rx_producers;
3688 struct regpair reserved[3];
3689};
3690
3691
3692/*
3693 * zone B per-VF data
3694 */
3695struct ustorm_vf_zone_data {
3696 struct regpair reserved;
3697};
3698
3699
3700/*
3701 * data per VF-PF channel
3702 */
3703struct vf_pf_channel_data {
3704#if defined(__BIG_ENDIAN)
3705 u16 reserved0;
3706 u8 valid;
3707 u8 state;
3708#elif defined(__LITTLE_ENDIAN)
3709 u8 state;
3710 u8 valid;
3711 u16 reserved0;
3712#endif
3713 u32 reserved1;
3714};
3715
3716
3717/*
3718 * zone A per-queue data
3719 */
3720struct xstorm_queue_zone_data {
3721 struct regpair reserved[4];
3722};
3723
3724
3725/*
3726 * zone B per-VF data
3727 */
3728struct xstorm_vf_zone_data {
3729 struct regpair reserved;
3730};
3731
3732#endif /* BNX2X_HSI_H */
diff --git a/drivers/net/bnx2x/bnx2x_init.h b/drivers/net/bnx2x/bnx2x_init.h
index 65b26cbfe3e7..d5399206f66e 100644
--- a/drivers/net/bnx2x/bnx2x_init.h
+++ b/drivers/net/bnx2x/bnx2x_init.h
@@ -1,7 +1,7 @@
1/* bnx2x_init.h: Broadcom Everest network driver. 1/* bnx2x_init.h: Broadcom Everest network driver.
2 * Structures and macroes needed during the initialization. 2 * Structures and macroes needed during the initialization.
3 * 3 *
4 * Copyright (c) 2007-2009 Broadcom Corporation 4 * Copyright (c) 2007-2011 Broadcom Corporation
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by 7 * it under the terms of the GNU General Public License as published by
@@ -97,6 +97,9 @@
97#define MISC_AEU_BLOCK 35 97#define MISC_AEU_BLOCK 35
98#define PGLUE_B_BLOCK 36 98#define PGLUE_B_BLOCK 36
99#define IGU_BLOCK 37 99#define IGU_BLOCK 37
100#define ATC_BLOCK 38
101#define QM_4PORT_BLOCK 39
102#define XSEM_4PORT_BLOCK 40
100 103
101 104
102/* Returns the index of start or end of a specific block stage in ops array*/ 105/* Returns the index of start or end of a specific block stage in ops array*/
@@ -148,5 +151,266 @@ union init_op {
148 struct raw_op raw; 151 struct raw_op raw;
149}; 152};
150 153
154#define INITOP_SET 0 /* set the HW directly */
155#define INITOP_CLEAR 1 /* clear the HW directly */
156#define INITOP_INIT 2 /* set the init-value array */
157
158/****************************************************************************
159* ILT management
160****************************************************************************/
161struct ilt_line {
162 dma_addr_t page_mapping;
163 void *page;
164 u32 size;
165};
166
167struct ilt_client_info {
168 u32 page_size;
169 u16 start;
170 u16 end;
171 u16 client_num;
172 u16 flags;
173#define ILT_CLIENT_SKIP_INIT 0x1
174#define ILT_CLIENT_SKIP_MEM 0x2
175};
176
177struct bnx2x_ilt {
178 u32 start_line;
179 struct ilt_line *lines;
180 struct ilt_client_info clients[4];
181#define ILT_CLIENT_CDU 0
182#define ILT_CLIENT_QM 1
183#define ILT_CLIENT_SRC 2
184#define ILT_CLIENT_TM 3
185};
186
187/****************************************************************************
188* SRC configuration
189****************************************************************************/
190struct src_ent {
191 u8 opaque[56];
192 u64 next;
193};
194
195/****************************************************************************
196* Parity configuration
197****************************************************************************/
198#define BLOCK_PRTY_INFO(block, en_mask, m1, m1h, m2) \
199{ \
200 block##_REG_##block##_PRTY_MASK, \
201 block##_REG_##block##_PRTY_STS_CLR, \
202 en_mask, {m1, m1h, m2}, #block \
203}
204
205#define BLOCK_PRTY_INFO_0(block, en_mask, m1, m1h, m2) \
206{ \
207 block##_REG_##block##_PRTY_MASK_0, \
208 block##_REG_##block##_PRTY_STS_CLR_0, \
209 en_mask, {m1, m1h, m2}, #block"_0" \
210}
211
212#define BLOCK_PRTY_INFO_1(block, en_mask, m1, m1h, m2) \
213{ \
214 block##_REG_##block##_PRTY_MASK_1, \
215 block##_REG_##block##_PRTY_STS_CLR_1, \
216 en_mask, {m1, m1h, m2}, #block"_1" \
217}
218
219static const struct {
220 u32 mask_addr;
221 u32 sts_clr_addr;
222 u32 en_mask; /* Mask to enable parity attentions */
223 struct {
224 u32 e1; /* 57710 */
225 u32 e1h; /* 57711 */
226 u32 e2; /* 57712 */
227 } reg_mask; /* Register mask (all valid bits) */
228 char name[7]; /* Block's longest name is 6 characters long
229 * (name + suffix)
230 */
231} bnx2x_blocks_parity_data[] = {
232 /* bit 19 masked */
233 /* REG_WR(bp, PXP_REG_PXP_PRTY_MASK, 0x80000); */
234 /* bit 5,18,20-31 */
235 /* REG_WR(bp, PXP2_REG_PXP2_PRTY_MASK_0, 0xfff40020); */
236 /* bit 5 */
237 /* REG_WR(bp, PXP2_REG_PXP2_PRTY_MASK_1, 0x20); */
238 /* REG_WR(bp, HC_REG_HC_PRTY_MASK, 0x0); */
239 /* REG_WR(bp, MISC_REG_MISC_PRTY_MASK, 0x0); */
240
241 /* Block IGU, MISC, PXP and PXP2 parity errors as long as we don't
242 * want to handle "system kill" flow at the moment.
243 */
244 BLOCK_PRTY_INFO(PXP, 0x7ffffff, 0x3ffffff, 0x3ffffff, 0x7ffffff),
245 BLOCK_PRTY_INFO_0(PXP2, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff),
246 BLOCK_PRTY_INFO_1(PXP2, 0x7ff, 0x7f, 0x7f, 0x7ff),
247 BLOCK_PRTY_INFO(HC, 0x7, 0x7, 0x7, 0),
248 BLOCK_PRTY_INFO(IGU, 0x7ff, 0, 0, 0x7ff),
249 BLOCK_PRTY_INFO(MISC, 0x1, 0x1, 0x1, 0x1),
250 BLOCK_PRTY_INFO(QM, 0, 0x1ff, 0xfff, 0xfff),
251 BLOCK_PRTY_INFO(DORQ, 0, 0x3, 0x3, 0x3),
252 {GRCBASE_UPB + PB_REG_PB_PRTY_MASK,
253 GRCBASE_UPB + PB_REG_PB_PRTY_STS_CLR, 0,
254 {0xf, 0xf, 0xf}, "UPB"},
255 {GRCBASE_XPB + PB_REG_PB_PRTY_MASK,
256 GRCBASE_XPB + PB_REG_PB_PRTY_STS_CLR, 0,
257 {0xf, 0xf, 0xf}, "XPB"},
258 BLOCK_PRTY_INFO(SRC, 0x4, 0x7, 0x7, 0x7),
259 BLOCK_PRTY_INFO(CDU, 0, 0x1f, 0x1f, 0x1f),
260 BLOCK_PRTY_INFO(CFC, 0, 0xf, 0xf, 0xf),
261 BLOCK_PRTY_INFO(DBG, 0, 0x1, 0x1, 0x1),
262 BLOCK_PRTY_INFO(DMAE, 0, 0xf, 0xf, 0xf),
263 BLOCK_PRTY_INFO(BRB1, 0, 0xf, 0xf, 0xf),
264 BLOCK_PRTY_INFO(PRS, (1<<6), 0xff, 0xff, 0xff),
265 BLOCK_PRTY_INFO(TSDM, 0x18, 0x7ff, 0x7ff, 0x7ff),
266 BLOCK_PRTY_INFO(CSDM, 0x8, 0x7ff, 0x7ff, 0x7ff),
267 BLOCK_PRTY_INFO(USDM, 0x38, 0x7ff, 0x7ff, 0x7ff),
268 BLOCK_PRTY_INFO(XSDM, 0x8, 0x7ff, 0x7ff, 0x7ff),
269 BLOCK_PRTY_INFO_0(TSEM, 0, 0xffffffff, 0xffffffff, 0xffffffff),
270 BLOCK_PRTY_INFO_1(TSEM, 0, 0x3, 0x1f, 0x3f),
271 BLOCK_PRTY_INFO_0(USEM, 0, 0xffffffff, 0xffffffff, 0xffffffff),
272 BLOCK_PRTY_INFO_1(USEM, 0, 0x3, 0x1f, 0x1f),
273 BLOCK_PRTY_INFO_0(CSEM, 0, 0xffffffff, 0xffffffff, 0xffffffff),
274 BLOCK_PRTY_INFO_1(CSEM, 0, 0x3, 0x1f, 0x1f),
275 BLOCK_PRTY_INFO_0(XSEM, 0, 0xffffffff, 0xffffffff, 0xffffffff),
276 BLOCK_PRTY_INFO_1(XSEM, 0, 0x3, 0x1f, 0x3f),
277};
278
279
280/* [28] MCP Latched rom_parity
281 * [29] MCP Latched ump_rx_parity
282 * [30] MCP Latched ump_tx_parity
283 * [31] MCP Latched scpad_parity
284 */
285#define MISC_AEU_ENABLE_MCP_PRTY_BITS \
286 (AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY | \
287 AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY | \
288 AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY | \
289 AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY)
290
291/* Below registers control the MCP parity attention output. When
292 * MISC_AEU_ENABLE_MCP_PRTY_BITS are set - attentions are
293 * enabled, when cleared - disabled.
294 */
295static const u32 mcp_attn_ctl_regs[] = {
296 MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0,
297 MISC_REG_AEU_ENABLE4_NIG_0,
298 MISC_REG_AEU_ENABLE4_PXP_0,
299 MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0,
300 MISC_REG_AEU_ENABLE4_NIG_1,
301 MISC_REG_AEU_ENABLE4_PXP_1
302};
303
304static inline void bnx2x_set_mcp_parity(struct bnx2x *bp, u8 enable)
305{
306 int i;
307 u32 reg_val;
308
309 for (i = 0; i < ARRAY_SIZE(mcp_attn_ctl_regs); i++) {
310 reg_val = REG_RD(bp, mcp_attn_ctl_regs[i]);
311
312 if (enable)
313 reg_val |= MISC_AEU_ENABLE_MCP_PRTY_BITS;
314 else
315 reg_val &= ~MISC_AEU_ENABLE_MCP_PRTY_BITS;
316
317 REG_WR(bp, mcp_attn_ctl_regs[i], reg_val);
318 }
319}
320
321static inline u32 bnx2x_parity_reg_mask(struct bnx2x *bp, int idx)
322{
323 if (CHIP_IS_E1(bp))
324 return bnx2x_blocks_parity_data[idx].reg_mask.e1;
325 else if (CHIP_IS_E1H(bp))
326 return bnx2x_blocks_parity_data[idx].reg_mask.e1h;
327 else
328 return bnx2x_blocks_parity_data[idx].reg_mask.e2;
329}
330
331static inline void bnx2x_disable_blocks_parity(struct bnx2x *bp)
332{
333 int i;
334
335 for (i = 0; i < ARRAY_SIZE(bnx2x_blocks_parity_data); i++) {
336 u32 dis_mask = bnx2x_parity_reg_mask(bp, i);
337
338 if (dis_mask) {
339 REG_WR(bp, bnx2x_blocks_parity_data[i].mask_addr,
340 dis_mask);
341 DP(NETIF_MSG_HW, "Setting parity mask "
342 "for %s to\t\t0x%x\n",
343 bnx2x_blocks_parity_data[i].name, dis_mask);
344 }
345 }
346
347 /* Disable MCP parity attentions */
348 bnx2x_set_mcp_parity(bp, false);
349}
350
351/**
352 * Clear the parity error status registers.
353 */
354static inline void bnx2x_clear_blocks_parity(struct bnx2x *bp)
355{
356 int i;
357 u32 reg_val, mcp_aeu_bits =
358 AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY |
359 AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY |
360 AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY |
361 AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY;
362
363 /* Clear SEM_FAST parities */
364 REG_WR(bp, XSEM_REG_FAST_MEMORY + SEM_FAST_REG_PARITY_RST, 0x1);
365 REG_WR(bp, TSEM_REG_FAST_MEMORY + SEM_FAST_REG_PARITY_RST, 0x1);
366 REG_WR(bp, USEM_REG_FAST_MEMORY + SEM_FAST_REG_PARITY_RST, 0x1);
367 REG_WR(bp, CSEM_REG_FAST_MEMORY + SEM_FAST_REG_PARITY_RST, 0x1);
368
369 for (i = 0; i < ARRAY_SIZE(bnx2x_blocks_parity_data); i++) {
370 u32 reg_mask = bnx2x_parity_reg_mask(bp, i);
371
372 if (reg_mask) {
373 reg_val = REG_RD(bp, bnx2x_blocks_parity_data[i].
374 sts_clr_addr);
375 if (reg_val & reg_mask)
376 DP(NETIF_MSG_HW,
377 "Parity errors in %s: 0x%x\n",
378 bnx2x_blocks_parity_data[i].name,
379 reg_val & reg_mask);
380 }
381 }
382
383 /* Check if there were parity attentions in MCP */
384 reg_val = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_MCP);
385 if (reg_val & mcp_aeu_bits)
386 DP(NETIF_MSG_HW, "Parity error in MCP: 0x%x\n",
387 reg_val & mcp_aeu_bits);
388
389 /* Clear parity attentions in MCP:
390 * [7] clears Latched rom_parity
391 * [8] clears Latched ump_rx_parity
392 * [9] clears Latched ump_tx_parity
393 * [10] clears Latched scpad_parity (both ports)
394 */
395 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x780);
396}
397
398static inline void bnx2x_enable_blocks_parity(struct bnx2x *bp)
399{
400 int i;
401
402 for (i = 0; i < ARRAY_SIZE(bnx2x_blocks_parity_data); i++) {
403 u32 reg_mask = bnx2x_parity_reg_mask(bp, i);
404
405 if (reg_mask)
406 REG_WR(bp, bnx2x_blocks_parity_data[i].mask_addr,
407 bnx2x_blocks_parity_data[i].en_mask & reg_mask);
408 }
409
410 /* Enable MCP parity attentions */
411 bnx2x_set_mcp_parity(bp, true);
412}
413
414
151#endif /* BNX2X_INIT_H */ 415#endif /* BNX2X_INIT_H */
152 416
diff --git a/drivers/net/bnx2x/bnx2x_init_ops.h b/drivers/net/bnx2x/bnx2x_init_ops.h
index 2b1363a6fe78..aafd0232393f 100644
--- a/drivers/net/bnx2x/bnx2x_init_ops.h
+++ b/drivers/net/bnx2x/bnx2x_init_ops.h
@@ -2,7 +2,7 @@
2 * Static functions needed during the initialization. 2 * Static functions needed during the initialization.
3 * This file is "included" in bnx2x_main.c. 3 * This file is "included" in bnx2x_main.c.
4 * 4 *
5 * Copyright (c) 2007-2010 Broadcom Corporation 5 * Copyright (c) 2007-2011 Broadcom Corporation
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by 8 * it under the terms of the GNU General Public License as published by
@@ -16,7 +16,9 @@
16#define BNX2X_INIT_OPS_H 16#define BNX2X_INIT_OPS_H
17 17
18static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len); 18static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len);
19 19static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val);
20static void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
21 u32 addr, u32 len);
20 22
21static void bnx2x_init_str_wr(struct bnx2x *bp, u32 addr, const u32 *data, 23static void bnx2x_init_str_wr(struct bnx2x *bp, u32 addr, const u32 *data,
22 u32 len) 24 u32 len)
@@ -151,6 +153,15 @@ static void bnx2x_init_wr_wb(struct bnx2x *bp, u32 addr, const u32 *data,
151 bnx2x_init_ind_wr(bp, addr, data, len); 153 bnx2x_init_ind_wr(bp, addr, data, len);
152} 154}
153 155
156static void bnx2x_wr_64(struct bnx2x *bp, u32 reg, u32 val_lo, u32 val_hi)
157{
158 u32 wb_write[2];
159
160 wb_write[0] = val_lo;
161 wb_write[1] = val_hi;
162 REG_WR_DMAE_LEN(bp, reg, wb_write, 2);
163}
164
154static void bnx2x_init_wr_zp(struct bnx2x *bp, u32 addr, u32 len, u32 blob_off) 165static void bnx2x_init_wr_zp(struct bnx2x *bp, u32 addr, u32 len, u32 blob_off)
155{ 166{
156 const u8 *data = NULL; 167 const u8 *data = NULL;
@@ -477,18 +488,30 @@ static void bnx2x_init_pxp_arb(struct bnx2x *bp, int r_order, int w_order)
477 REG_WR(bp, PXP2_REG_RQ_RD_MBS0, r_order); 488 REG_WR(bp, PXP2_REG_RQ_RD_MBS0, r_order);
478 REG_WR(bp, PXP2_REG_RQ_RD_MBS1, r_order); 489 REG_WR(bp, PXP2_REG_RQ_RD_MBS1, r_order);
479 490
480 if (r_order == MAX_RD_ORD) 491 if ((CHIP_IS_E1(bp) || CHIP_IS_E1H(bp)) && (r_order == MAX_RD_ORD))
481 REG_WR(bp, PXP2_REG_RQ_PDR_LIMIT, 0xe00); 492 REG_WR(bp, PXP2_REG_RQ_PDR_LIMIT, 0xe00);
482 493
483 REG_WR(bp, PXP2_REG_WR_USDMDP_TH, (0x18 << w_order)); 494 if (CHIP_IS_E2(bp))
495 REG_WR(bp, PXP2_REG_WR_USDMDP_TH, (0x8 << w_order));
496 else
497 REG_WR(bp, PXP2_REG_WR_USDMDP_TH, (0x18 << w_order));
484 498
485 if (CHIP_IS_E1H(bp)) { 499 if (CHIP_IS_E1H(bp) || CHIP_IS_E2(bp)) {
486 /* MPS w_order optimal TH presently TH 500 /* MPS w_order optimal TH presently TH
487 * 128 0 0 2 501 * 128 0 0 2
488 * 256 1 1 3 502 * 256 1 1 3
489 * >=512 2 2 3 503 * >=512 2 2 3
490 */ 504 */
491 val = ((w_order == 0) ? 2 : 3); 505 /* DMAE is special */
506 if (CHIP_IS_E2(bp)) {
507 /* E2 can use optimal TH */
508 val = w_order;
509 REG_WR(bp, PXP2_REG_WR_DMAE_MPS, val);
510 } else {
511 val = ((w_order == 0) ? 2 : 3);
512 REG_WR(bp, PXP2_REG_WR_DMAE_MPS, 2);
513 }
514
492 REG_WR(bp, PXP2_REG_WR_HC_MPS, val); 515 REG_WR(bp, PXP2_REG_WR_HC_MPS, val);
493 REG_WR(bp, PXP2_REG_WR_USDM_MPS, val); 516 REG_WR(bp, PXP2_REG_WR_USDM_MPS, val);
494 REG_WR(bp, PXP2_REG_WR_CSDM_MPS, val); 517 REG_WR(bp, PXP2_REG_WR_CSDM_MPS, val);
@@ -498,9 +521,346 @@ static void bnx2x_init_pxp_arb(struct bnx2x *bp, int r_order, int w_order)
498 REG_WR(bp, PXP2_REG_WR_TM_MPS, val); 521 REG_WR(bp, PXP2_REG_WR_TM_MPS, val);
499 REG_WR(bp, PXP2_REG_WR_SRC_MPS, val); 522 REG_WR(bp, PXP2_REG_WR_SRC_MPS, val);
500 REG_WR(bp, PXP2_REG_WR_DBG_MPS, val); 523 REG_WR(bp, PXP2_REG_WR_DBG_MPS, val);
501 REG_WR(bp, PXP2_REG_WR_DMAE_MPS, 2); /* DMAE is special */
502 REG_WR(bp, PXP2_REG_WR_CDU_MPS, val); 524 REG_WR(bp, PXP2_REG_WR_CDU_MPS, val);
503 } 525 }
526
527 /* Validate number of tags suppoted by device */
528#define PCIE_REG_PCIER_TL_HDR_FC_ST 0x2980
529 val = REG_RD(bp, PCIE_REG_PCIER_TL_HDR_FC_ST);
530 val &= 0xFF;
531 if (val <= 0x20)
532 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x20);
533}
534
535/****************************************************************************
536* ILT management
537****************************************************************************/
538/*
539 * This codes hides the low level HW interaction for ILT management and
540 * configuration. The API consists of a shadow ILT table which is set by the
541 * driver and a set of routines to use it to configure the HW.
542 *
543 */
544
545/* ILT HW init operations */
546
547/* ILT memory management operations */
548#define ILT_MEMOP_ALLOC 0
549#define ILT_MEMOP_FREE 1
550
551/* the phys address is shifted right 12 bits and has an added
552 * 1=valid bit added to the 53rd bit
553 * then since this is a wide register(TM)
554 * we split it into two 32 bit writes
555 */
556#define ILT_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
557#define ILT_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
558#define ILT_RANGE(f, l) (((l) << 10) | f)
559
560static int bnx2x_ilt_line_mem_op(struct bnx2x *bp, struct ilt_line *line,
561 u32 size, u8 memop)
562{
563 if (memop == ILT_MEMOP_FREE) {
564 BNX2X_ILT_FREE(line->page, line->page_mapping, line->size);
565 return 0;
566 }
567 BNX2X_ILT_ZALLOC(line->page, &line->page_mapping, size);
568 if (!line->page)
569 return -1;
570 line->size = size;
571 return 0;
572}
573
574
575static int bnx2x_ilt_client_mem_op(struct bnx2x *bp, int cli_num, u8 memop)
576{
577 int i, rc;
578 struct bnx2x_ilt *ilt = BP_ILT(bp);
579 struct ilt_client_info *ilt_cli = &ilt->clients[cli_num];
580
581 if (!ilt || !ilt->lines)
582 return -1;
583
584 if (ilt_cli->flags & (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM))
585 return 0;
586
587 for (rc = 0, i = ilt_cli->start; i <= ilt_cli->end && !rc; i++) {
588 rc = bnx2x_ilt_line_mem_op(bp, &ilt->lines[i],
589 ilt_cli->page_size, memop);
590 }
591 return rc;
592}
593
594static int bnx2x_ilt_mem_op(struct bnx2x *bp, u8 memop)
595{
596 int rc = bnx2x_ilt_client_mem_op(bp, ILT_CLIENT_CDU, memop);
597 if (!rc)
598 rc = bnx2x_ilt_client_mem_op(bp, ILT_CLIENT_QM, memop);
599 if (!rc)
600 rc = bnx2x_ilt_client_mem_op(bp, ILT_CLIENT_SRC, memop);
601 if (!rc)
602 rc = bnx2x_ilt_client_mem_op(bp, ILT_CLIENT_TM, memop);
603
604 return rc;
605}
606
607static void bnx2x_ilt_line_wr(struct bnx2x *bp, int abs_idx,
608 dma_addr_t page_mapping)
609{
610 u32 reg;
611
612 if (CHIP_IS_E1(bp))
613 reg = PXP2_REG_RQ_ONCHIP_AT + abs_idx*8;
614 else
615 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + abs_idx*8;
616
617 bnx2x_wr_64(bp, reg, ILT_ADDR1(page_mapping), ILT_ADDR2(page_mapping));
618}
619
620static void bnx2x_ilt_line_init_op(struct bnx2x *bp, struct bnx2x_ilt *ilt,
621 int idx, u8 initop)
622{
623 dma_addr_t null_mapping;
624 int abs_idx = ilt->start_line + idx;
625
626
627 switch (initop) {
628 case INITOP_INIT:
629 /* set in the init-value array */
630 case INITOP_SET:
631 bnx2x_ilt_line_wr(bp, abs_idx, ilt->lines[idx].page_mapping);
632 break;
633 case INITOP_CLEAR:
634 null_mapping = 0;
635 bnx2x_ilt_line_wr(bp, abs_idx, null_mapping);
636 break;
637 }
638}
639
640static void bnx2x_ilt_boundry_init_op(struct bnx2x *bp,
641 struct ilt_client_info *ilt_cli,
642 u32 ilt_start, u8 initop)
643{
644 u32 start_reg = 0;
645 u32 end_reg = 0;
646
647 /* The boundary is either SET or INIT,
648 CLEAR => SET and for now SET ~~ INIT */
649
650 /* find the appropriate regs */
651 if (CHIP_IS_E1(bp)) {
652 switch (ilt_cli->client_num) {
653 case ILT_CLIENT_CDU:
654 start_reg = PXP2_REG_PSWRQ_CDU0_L2P;
655 break;
656 case ILT_CLIENT_QM:
657 start_reg = PXP2_REG_PSWRQ_QM0_L2P;
658 break;
659 case ILT_CLIENT_SRC:
660 start_reg = PXP2_REG_PSWRQ_SRC0_L2P;
661 break;
662 case ILT_CLIENT_TM:
663 start_reg = PXP2_REG_PSWRQ_TM0_L2P;
664 break;
665 }
666 REG_WR(bp, start_reg + BP_FUNC(bp)*4,
667 ILT_RANGE((ilt_start + ilt_cli->start),
668 (ilt_start + ilt_cli->end)));
669 } else {
670 switch (ilt_cli->client_num) {
671 case ILT_CLIENT_CDU:
672 start_reg = PXP2_REG_RQ_CDU_FIRST_ILT;
673 end_reg = PXP2_REG_RQ_CDU_LAST_ILT;
674 break;
675 case ILT_CLIENT_QM:
676 start_reg = PXP2_REG_RQ_QM_FIRST_ILT;
677 end_reg = PXP2_REG_RQ_QM_LAST_ILT;
678 break;
679 case ILT_CLIENT_SRC:
680 start_reg = PXP2_REG_RQ_SRC_FIRST_ILT;
681 end_reg = PXP2_REG_RQ_SRC_LAST_ILT;
682 break;
683 case ILT_CLIENT_TM:
684 start_reg = PXP2_REG_RQ_TM_FIRST_ILT;
685 end_reg = PXP2_REG_RQ_TM_LAST_ILT;
686 break;
687 }
688 REG_WR(bp, start_reg, (ilt_start + ilt_cli->start));
689 REG_WR(bp, end_reg, (ilt_start + ilt_cli->end));
690 }
691}
692
693static void bnx2x_ilt_client_init_op_ilt(struct bnx2x *bp,
694 struct bnx2x_ilt *ilt,
695 struct ilt_client_info *ilt_cli,
696 u8 initop)
697{
698 int i;
699
700 if (ilt_cli->flags & ILT_CLIENT_SKIP_INIT)
701 return;
702
703 for (i = ilt_cli->start; i <= ilt_cli->end; i++)
704 bnx2x_ilt_line_init_op(bp, ilt, i, initop);
705
706 /* init/clear the ILT boundries */
707 bnx2x_ilt_boundry_init_op(bp, ilt_cli, ilt->start_line, initop);
708}
709
710static void bnx2x_ilt_client_init_op(struct bnx2x *bp,
711 struct ilt_client_info *ilt_cli, u8 initop)
712{
713 struct bnx2x_ilt *ilt = BP_ILT(bp);
714
715 bnx2x_ilt_client_init_op_ilt(bp, ilt, ilt_cli, initop);
716}
717
718static void bnx2x_ilt_client_id_init_op(struct bnx2x *bp,
719 int cli_num, u8 initop)
720{
721 struct bnx2x_ilt *ilt = BP_ILT(bp);
722 struct ilt_client_info *ilt_cli = &ilt->clients[cli_num];
723
724 bnx2x_ilt_client_init_op(bp, ilt_cli, initop);
725}
726
727static void bnx2x_ilt_init_op(struct bnx2x *bp, u8 initop)
728{
729 bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_CDU, initop);
730 bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_QM, initop);
731 bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_SRC, initop);
732 bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_TM, initop);
733}
734
735static void bnx2x_ilt_init_client_psz(struct bnx2x *bp, int cli_num,
736 u32 psz_reg, u8 initop)
737{
738 struct bnx2x_ilt *ilt = BP_ILT(bp);
739 struct ilt_client_info *ilt_cli = &ilt->clients[cli_num];
740
741 if (ilt_cli->flags & ILT_CLIENT_SKIP_INIT)
742 return;
743
744 switch (initop) {
745 case INITOP_INIT:
746 /* set in the init-value array */
747 case INITOP_SET:
748 REG_WR(bp, psz_reg, ILOG2(ilt_cli->page_size >> 12));
749 break;
750 case INITOP_CLEAR:
751 break;
752 }
753}
754
755/*
756 * called during init common stage, ilt clients should be initialized
757 * prioir to calling this function
758 */
759static void bnx2x_ilt_init_page_size(struct bnx2x *bp, u8 initop)
760{
761 bnx2x_ilt_init_client_psz(bp, ILT_CLIENT_CDU,
762 PXP2_REG_RQ_CDU_P_SIZE, initop);
763 bnx2x_ilt_init_client_psz(bp, ILT_CLIENT_QM,
764 PXP2_REG_RQ_QM_P_SIZE, initop);
765 bnx2x_ilt_init_client_psz(bp, ILT_CLIENT_SRC,
766 PXP2_REG_RQ_SRC_P_SIZE, initop);
767 bnx2x_ilt_init_client_psz(bp, ILT_CLIENT_TM,
768 PXP2_REG_RQ_TM_P_SIZE, initop);
769}
770
771/****************************************************************************
772* QM initializations
773****************************************************************************/
774#define QM_QUEUES_PER_FUNC 16 /* E1 has 32, but only 16 are used */
775#define QM_INIT_MIN_CID_COUNT 31
776#define QM_INIT(cid_cnt) (cid_cnt > QM_INIT_MIN_CID_COUNT)
777
778/* called during init port stage */
779static void bnx2x_qm_init_cid_count(struct bnx2x *bp, int qm_cid_count,
780 u8 initop)
781{
782 int port = BP_PORT(bp);
783
784 if (QM_INIT(qm_cid_count)) {
785 switch (initop) {
786 case INITOP_INIT:
787 /* set in the init-value array */
788 case INITOP_SET:
789 REG_WR(bp, QM_REG_CONNNUM_0 + port*4,
790 qm_cid_count/16 - 1);
791 break;
792 case INITOP_CLEAR:
793 break;
794 }
795 }
796}
797
798static void bnx2x_qm_set_ptr_table(struct bnx2x *bp, int qm_cid_count)
799{
800 int i;
801 u32 wb_data[2];
802
803 wb_data[0] = wb_data[1] = 0;
804
805 for (i = 0; i < 4 * QM_QUEUES_PER_FUNC; i++) {
806 REG_WR(bp, QM_REG_BASEADDR + i*4,
807 qm_cid_count * 4 * (i % QM_QUEUES_PER_FUNC));
808 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL + i*8,
809 wb_data, 2);
810
811 if (CHIP_IS_E1H(bp)) {
812 REG_WR(bp, QM_REG_BASEADDR_EXT_A + i*4,
813 qm_cid_count * 4 * (i % QM_QUEUES_PER_FUNC));
814 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL_EXT_A + i*8,
815 wb_data, 2);
816 }
817 }
818}
819
820/* called during init common stage */
821static void bnx2x_qm_init_ptr_table(struct bnx2x *bp, int qm_cid_count,
822 u8 initop)
823{
824 if (!QM_INIT(qm_cid_count))
825 return;
826
827 switch (initop) {
828 case INITOP_INIT:
829 /* set in the init-value array */
830 case INITOP_SET:
831 bnx2x_qm_set_ptr_table(bp, qm_cid_count);
832 break;
833 case INITOP_CLEAR:
834 break;
835 }
504} 836}
505 837
838/****************************************************************************
839* SRC initializations
840****************************************************************************/
841#ifdef BCM_CNIC
842/* called during init func stage */
843static void bnx2x_src_init_t2(struct bnx2x *bp, struct src_ent *t2,
844 dma_addr_t t2_mapping, int src_cid_count)
845{
846 int i;
847 int port = BP_PORT(bp);
848
849 /* Initialize T2 */
850 for (i = 0; i < src_cid_count-1; i++)
851 t2[i].next = (u64)(t2_mapping + (i+1)*sizeof(struct src_ent));
852
853 /* tell the searcher where the T2 table is */
854 REG_WR(bp, SRC_REG_COUNTFREE0 + port*4, src_cid_count);
855
856 bnx2x_wr_64(bp, SRC_REG_FIRSTFREE0 + port*16,
857 U64_LO(t2_mapping), U64_HI(t2_mapping));
858
859 bnx2x_wr_64(bp, SRC_REG_LASTFREE0 + port*16,
860 U64_LO((u64)t2_mapping +
861 (src_cid_count-1) * sizeof(struct src_ent)),
862 U64_HI((u64)t2_mapping +
863 (src_cid_count-1) * sizeof(struct src_ent)));
864}
865#endif
506#endif /* BNX2X_INIT_OPS_H */ 866#endif /* BNX2X_INIT_OPS_H */
diff --git a/drivers/net/bnx2x/bnx2x_link.c b/drivers/net/bnx2x/bnx2x_link.c
index 0383e3066313..076e11f5769f 100644
--- a/drivers/net/bnx2x/bnx2x_link.c
+++ b/drivers/net/bnx2x/bnx2x_link.c
@@ -1,4 +1,4 @@
1/* Copyright 2008-2009 Broadcom Corporation 1/* Copyright 2008-2011 Broadcom Corporation
2 * 2 *
3 * Unless you and Broadcom execute a separate written software license 3 * Unless you and Broadcom execute a separate written software license
4 * agreement governing use of this software, this software is licensed to you 4 * agreement governing use of this software, this software is licensed to you
@@ -28,12 +28,13 @@
28 28
29/********************************************************/ 29/********************************************************/
30#define ETH_HLEN 14 30#define ETH_HLEN 14
31#define ETH_OVREHEAD (ETH_HLEN + 8)/* 8 for CRC + VLAN*/ 31/* L2 header size + 2*VLANs (8 bytes) + LLC SNAP (8 bytes) */
32#define ETH_OVREHEAD (ETH_HLEN + 8 + 8)
32#define ETH_MIN_PACKET_SIZE 60 33#define ETH_MIN_PACKET_SIZE 60
33#define ETH_MAX_PACKET_SIZE 1500 34#define ETH_MAX_PACKET_SIZE 1500
34#define ETH_MAX_JUMBO_PACKET_SIZE 9600 35#define ETH_MAX_JUMBO_PACKET_SIZE 9600
35#define MDIO_ACCESS_TIMEOUT 1000 36#define MDIO_ACCESS_TIMEOUT 1000
36#define BMAC_CONTROL_RX_ENABLE 2 37#define BMAC_CONTROL_RX_ENABLE 2
37 38
38/***********************************************************/ 39/***********************************************************/
39/* Shortcut definitions */ 40/* Shortcut definitions */
@@ -79,7 +80,7 @@
79 80
80#define AUTONEG_CL37 SHARED_HW_CFG_AN_ENABLE_CL37 81#define AUTONEG_CL37 SHARED_HW_CFG_AN_ENABLE_CL37
81#define AUTONEG_CL73 SHARED_HW_CFG_AN_ENABLE_CL73 82#define AUTONEG_CL73 SHARED_HW_CFG_AN_ENABLE_CL73
82#define AUTONEG_BAM SHARED_HW_CFG_AN_ENABLE_BAM 83#define AUTONEG_BAM SHARED_HW_CFG_AN_ENABLE_BAM
83#define AUTONEG_PARALLEL \ 84#define AUTONEG_PARALLEL \
84 SHARED_HW_CFG_AN_ENABLE_PARALLEL_DETECTION 85 SHARED_HW_CFG_AN_ENABLE_PARALLEL_DETECTION
85#define AUTONEG_SGMII_FIBER_AUTODET \ 86#define AUTONEG_SGMII_FIBER_AUTODET \
@@ -112,10 +113,10 @@
112#define GP_STATUS_10G_KX4 \ 113#define GP_STATUS_10G_KX4 \
113 MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_KX4 114 MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_KX4
114 115
115#define LINK_10THD LINK_STATUS_SPEED_AND_DUPLEX_10THD 116#define LINK_10THD LINK_STATUS_SPEED_AND_DUPLEX_10THD
116#define LINK_10TFD LINK_STATUS_SPEED_AND_DUPLEX_10TFD 117#define LINK_10TFD LINK_STATUS_SPEED_AND_DUPLEX_10TFD
117#define LINK_100TXHD LINK_STATUS_SPEED_AND_DUPLEX_100TXHD 118#define LINK_100TXHD LINK_STATUS_SPEED_AND_DUPLEX_100TXHD
118#define LINK_100T4 LINK_STATUS_SPEED_AND_DUPLEX_100T4 119#define LINK_100T4 LINK_STATUS_SPEED_AND_DUPLEX_100T4
119#define LINK_100TXFD LINK_STATUS_SPEED_AND_DUPLEX_100TXFD 120#define LINK_100TXFD LINK_STATUS_SPEED_AND_DUPLEX_100TXFD
120#define LINK_1000THD LINK_STATUS_SPEED_AND_DUPLEX_1000THD 121#define LINK_1000THD LINK_STATUS_SPEED_AND_DUPLEX_1000THD
121#define LINK_1000TFD LINK_STATUS_SPEED_AND_DUPLEX_1000TFD 122#define LINK_1000TFD LINK_STATUS_SPEED_AND_DUPLEX_1000TFD
@@ -123,18 +124,18 @@
123#define LINK_2500THD LINK_STATUS_SPEED_AND_DUPLEX_2500THD 124#define LINK_2500THD LINK_STATUS_SPEED_AND_DUPLEX_2500THD
124#define LINK_2500TFD LINK_STATUS_SPEED_AND_DUPLEX_2500TFD 125#define LINK_2500TFD LINK_STATUS_SPEED_AND_DUPLEX_2500TFD
125#define LINK_2500XFD LINK_STATUS_SPEED_AND_DUPLEX_2500XFD 126#define LINK_2500XFD LINK_STATUS_SPEED_AND_DUPLEX_2500XFD
126#define LINK_10GTFD LINK_STATUS_SPEED_AND_DUPLEX_10GTFD 127#define LINK_10GTFD LINK_STATUS_SPEED_AND_DUPLEX_10GTFD
127#define LINK_10GXFD LINK_STATUS_SPEED_AND_DUPLEX_10GXFD 128#define LINK_10GXFD LINK_STATUS_SPEED_AND_DUPLEX_10GXFD
128#define LINK_12GTFD LINK_STATUS_SPEED_AND_DUPLEX_12GTFD 129#define LINK_12GTFD LINK_STATUS_SPEED_AND_DUPLEX_12GTFD
129#define LINK_12GXFD LINK_STATUS_SPEED_AND_DUPLEX_12GXFD 130#define LINK_12GXFD LINK_STATUS_SPEED_AND_DUPLEX_12GXFD
130#define LINK_12_5GTFD LINK_STATUS_SPEED_AND_DUPLEX_12_5GTFD 131#define LINK_12_5GTFD LINK_STATUS_SPEED_AND_DUPLEX_12_5GTFD
131#define LINK_12_5GXFD LINK_STATUS_SPEED_AND_DUPLEX_12_5GXFD 132#define LINK_12_5GXFD LINK_STATUS_SPEED_AND_DUPLEX_12_5GXFD
132#define LINK_13GTFD LINK_STATUS_SPEED_AND_DUPLEX_13GTFD 133#define LINK_13GTFD LINK_STATUS_SPEED_AND_DUPLEX_13GTFD
133#define LINK_13GXFD LINK_STATUS_SPEED_AND_DUPLEX_13GXFD 134#define LINK_13GXFD LINK_STATUS_SPEED_AND_DUPLEX_13GXFD
134#define LINK_15GTFD LINK_STATUS_SPEED_AND_DUPLEX_15GTFD 135#define LINK_15GTFD LINK_STATUS_SPEED_AND_DUPLEX_15GTFD
135#define LINK_15GXFD LINK_STATUS_SPEED_AND_DUPLEX_15GXFD 136#define LINK_15GXFD LINK_STATUS_SPEED_AND_DUPLEX_15GXFD
136#define LINK_16GTFD LINK_STATUS_SPEED_AND_DUPLEX_16GTFD 137#define LINK_16GTFD LINK_STATUS_SPEED_AND_DUPLEX_16GTFD
137#define LINK_16GXFD LINK_STATUS_SPEED_AND_DUPLEX_16GXFD 138#define LINK_16GXFD LINK_STATUS_SPEED_AND_DUPLEX_16GXFD
138 139
139#define PHY_XGXS_FLAG 0x1 140#define PHY_XGXS_FLAG 0x1
140#define PHY_SGMII_FLAG 0x2 141#define PHY_SGMII_FLAG 0x2
@@ -142,7 +143,7 @@
142 143
143/* */ 144/* */
144#define SFP_EEPROM_CON_TYPE_ADDR 0x2 145#define SFP_EEPROM_CON_TYPE_ADDR 0x2
145 #define SFP_EEPROM_CON_TYPE_VAL_LC 0x7 146 #define SFP_EEPROM_CON_TYPE_VAL_LC 0x7
146 #define SFP_EEPROM_CON_TYPE_VAL_COPPER 0x21 147 #define SFP_EEPROM_CON_TYPE_VAL_COPPER 0x21
147 148
148 149
@@ -153,85 +154,318 @@
153 154
154#define SFP_EEPROM_FC_TX_TECH_ADDR 0x8 155#define SFP_EEPROM_FC_TX_TECH_ADDR 0x8
155 #define SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_PASSIVE 0x4 156 #define SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_PASSIVE 0x4
156 #define SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_ACTIVE 0x8 157 #define SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_ACTIVE 0x8
157 158
158#define SFP_EEPROM_OPTIONS_ADDR 0x40 159#define SFP_EEPROM_OPTIONS_ADDR 0x40
159 #define SFP_EEPROM_OPTIONS_LINEAR_RX_OUT_MASK 0x1 160 #define SFP_EEPROM_OPTIONS_LINEAR_RX_OUT_MASK 0x1
160#define SFP_EEPROM_OPTIONS_SIZE 2 161#define SFP_EEPROM_OPTIONS_SIZE 2
161
162#define EDC_MODE_LINEAR 0x0022
163#define EDC_MODE_LIMITING 0x0044
164#define EDC_MODE_PASSIVE_DAC 0x0055
165 162
163#define EDC_MODE_LINEAR 0x0022
164#define EDC_MODE_LIMITING 0x0044
165#define EDC_MODE_PASSIVE_DAC 0x0055
166 166
167 167
168#define ETS_BW_LIMIT_CREDIT_UPPER_BOUND (0x5000)
169#define ETS_BW_LIMIT_CREDIT_WEIGHT (0x5000)
168/**********************************************************/ 170/**********************************************************/
169/* INTERFACE */ 171/* INTERFACE */
170/**********************************************************/ 172/**********************************************************/
171#define CL45_WR_OVER_CL22(_bp, _port, _phy_addr, _bank, _addr, _val) \ 173
172 bnx2x_cl45_write(_bp, _port, 0, _phy_addr, \ 174#define CL22_WR_OVER_CL45(_bp, _phy, _bank, _addr, _val) \
173 DEFAULT_PHY_DEV_ADDR, \ 175 bnx2x_cl45_write(_bp, _phy, \
176 (_phy)->def_md_devad, \
174 (_bank + (_addr & 0xf)), \ 177 (_bank + (_addr & 0xf)), \
175 _val) 178 _val)
176 179
177#define CL45_RD_OVER_CL22(_bp, _port, _phy_addr, _bank, _addr, _val) \ 180#define CL22_RD_OVER_CL45(_bp, _phy, _bank, _addr, _val) \
178 bnx2x_cl45_read(_bp, _port, 0, _phy_addr, \ 181 bnx2x_cl45_read(_bp, _phy, \
179 DEFAULT_PHY_DEV_ADDR, \ 182 (_phy)->def_md_devad, \
180 (_bank + (_addr & 0xf)), \ 183 (_bank + (_addr & 0xf)), \
181 _val) 184 _val)
182 185
183static void bnx2x_set_serdes_access(struct link_params *params) 186static u32 bnx2x_bits_en(struct bnx2x *bp, u32 reg, u32 bits)
187{
188 u32 val = REG_RD(bp, reg);
189
190 val |= bits;
191 REG_WR(bp, reg, val);
192 return val;
193}
194
195static u32 bnx2x_bits_dis(struct bnx2x *bp, u32 reg, u32 bits)
184{ 196{
197 u32 val = REG_RD(bp, reg);
198
199 val &= ~bits;
200 REG_WR(bp, reg, val);
201 return val;
202}
203
204/******************************************************************/
205/* ETS section */
206/******************************************************************/
207void bnx2x_ets_disabled(struct link_params *params)
208{
209 /* ETS disabled configuration*/
185 struct bnx2x *bp = params->bp; 210 struct bnx2x *bp = params->bp;
186 u32 emac_base = (params->port) ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
187 211
188 /* Set Clause 22 */ 212 DP(NETIF_MSG_LINK, "ETS disabled configuration\n");
189 REG_WR(bp, NIG_REG_SERDES0_CTRL_MD_ST + params->port*0x10, 1); 213
190 REG_WR(bp, emac_base + EMAC_REG_EMAC_MDIO_COMM, 0x245f8000); 214 /*
191 udelay(500); 215 * mapping between entry priority to client number (0,1,2 -debug and
192 REG_WR(bp, emac_base + EMAC_REG_EMAC_MDIO_COMM, 0x245d000f); 216 * management clients, 3 - COS0 client, 4 - COS client)(HIGHEST)
193 udelay(500); 217 * 3bits client num.
194 /* Set Clause 45 */ 218 * PRI4 | PRI3 | PRI2 | PRI1 | PRI0
195 REG_WR(bp, NIG_REG_SERDES0_CTRL_MD_ST + params->port*0x10, 0); 219 * cos1-100 cos0-011 dbg1-010 dbg0-001 MCP-000
220 */
221
222 REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT, 0x4688);
223 /*
224 * Bitmap of 5bits length. Each bit specifies whether the entry behaves
225 * as strict. Bits 0,1,2 - debug and management entries, 3 -
226 * COS0 entry, 4 - COS1 entry.
227 * COS1 | COS0 | DEBUG1 | DEBUG0 | MGMT
228 * bit4 bit3 bit2 bit1 bit0
229 * MCP and debug are strict
230 */
231
232 REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, 0x7);
233 /* defines which entries (clients) are subjected to WFQ arbitration */
234 REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ, 0);
235 /*
236 * For strict priority entries defines the number of consecutive
237 * slots for the highest priority.
238 */
239 REG_WR(bp, NIG_REG_P0_TX_ARB_NUM_STRICT_ARB_SLOTS, 0x100);
240 /*
241 * mapping between the CREDIT_WEIGHT registers and actual client
242 * numbers
243 */
244 REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP, 0);
245 REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_0, 0);
246 REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_1, 0);
247
248 REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_0, 0);
249 REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_1, 0);
250 REG_WR(bp, PBF_REG_HIGH_PRIORITY_COS_NUM, 0);
251 /* ETS mode disable */
252 REG_WR(bp, PBF_REG_ETS_ENABLED, 0);
253 /*
254 * If ETS mode is enabled (there is no strict priority) defines a WFQ
255 * weight for COS0/COS1.
256 */
257 REG_WR(bp, PBF_REG_COS0_WEIGHT, 0x2710);
258 REG_WR(bp, PBF_REG_COS1_WEIGHT, 0x2710);
259 /* Upper bound that COS0_WEIGHT can reach in the WFQ arbiter */
260 REG_WR(bp, PBF_REG_COS0_UPPER_BOUND, 0x989680);
261 REG_WR(bp, PBF_REG_COS1_UPPER_BOUND, 0x989680);
262 /* Defines the number of consecutive slots for the strict priority */
263 REG_WR(bp, PBF_REG_NUM_STRICT_ARB_SLOTS, 0);
264}
265
266static void bnx2x_ets_bw_limit_common(const struct link_params *params)
267{
268 /* ETS disabled configuration */
269 struct bnx2x *bp = params->bp;
270 DP(NETIF_MSG_LINK, "ETS enabled BW limit configuration\n");
271 /*
272 * defines which entries (clients) are subjected to WFQ arbitration
273 * COS0 0x8
274 * COS1 0x10
275 */
276 REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ, 0x18);
277 /*
278 * mapping between the ARB_CREDIT_WEIGHT registers and actual
279 * client numbers (WEIGHT_0 does not actually have to represent
280 * client 0)
281 * PRI4 | PRI3 | PRI2 | PRI1 | PRI0
282 * cos1-001 cos0-000 dbg1-100 dbg0-011 MCP-010
283 */
284 REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP, 0x111A);
285
286 REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_0,
287 ETS_BW_LIMIT_CREDIT_UPPER_BOUND);
288 REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_1,
289 ETS_BW_LIMIT_CREDIT_UPPER_BOUND);
290
291 /* ETS mode enabled*/
292 REG_WR(bp, PBF_REG_ETS_ENABLED, 1);
293
294 /* Defines the number of consecutive slots for the strict priority */
295 REG_WR(bp, PBF_REG_NUM_STRICT_ARB_SLOTS, 0);
296 /*
297 * Bitmap of 5bits length. Each bit specifies whether the entry behaves
298 * as strict. Bits 0,1,2 - debug and management entries, 3 - COS0
299 * entry, 4 - COS1 entry.
300 * COS1 | COS0 | DEBUG21 | DEBUG0 | MGMT
301 * bit4 bit3 bit2 bit1 bit0
302 * MCP and debug are strict
303 */
304 REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, 0x7);
305
306 /* Upper bound that COS0_WEIGHT can reach in the WFQ arbiter.*/
307 REG_WR(bp, PBF_REG_COS0_UPPER_BOUND,
308 ETS_BW_LIMIT_CREDIT_UPPER_BOUND);
309 REG_WR(bp, PBF_REG_COS1_UPPER_BOUND,
310 ETS_BW_LIMIT_CREDIT_UPPER_BOUND);
196} 311}
197static void bnx2x_set_phy_mdio(struct link_params *params, u8 phy_flags) 312
313void bnx2x_ets_bw_limit(const struct link_params *params, const u32 cos0_bw,
314 const u32 cos1_bw)
198{ 315{
316 /* ETS disabled configuration*/
199 struct bnx2x *bp = params->bp; 317 struct bnx2x *bp = params->bp;
318 const u32 total_bw = cos0_bw + cos1_bw;
319 u32 cos0_credit_weight = 0;
320 u32 cos1_credit_weight = 0;
200 321
201 if (phy_flags & PHY_XGXS_FLAG) { 322 DP(NETIF_MSG_LINK, "ETS enabled BW limit configuration\n");
202 REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_ST +
203 params->port*0x18, 0);
204 REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + params->port*0x18,
205 DEFAULT_PHY_DEV_ADDR);
206 } else {
207 bnx2x_set_serdes_access(params);
208 323
209 REG_WR(bp, NIG_REG_SERDES0_CTRL_MD_DEVAD + 324 if ((0 == total_bw) ||
210 params->port*0x10, 325 (0 == cos0_bw) ||
211 DEFAULT_PHY_DEV_ADDR); 326 (0 == cos1_bw)) {
327 DP(NETIF_MSG_LINK, "Total BW can't be zero\n");
328 return;
212 } 329 }
330
331 cos0_credit_weight = (cos0_bw * ETS_BW_LIMIT_CREDIT_WEIGHT)/
332 total_bw;
333 cos1_credit_weight = (cos1_bw * ETS_BW_LIMIT_CREDIT_WEIGHT)/
334 total_bw;
335
336 bnx2x_ets_bw_limit_common(params);
337
338 REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_0, cos0_credit_weight);
339 REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_1, cos1_credit_weight);
340
341 REG_WR(bp, PBF_REG_COS0_WEIGHT, cos0_credit_weight);
342 REG_WR(bp, PBF_REG_COS1_WEIGHT, cos1_credit_weight);
213} 343}
214 344
215static u32 bnx2x_bits_en(struct bnx2x *bp, u32 reg, u32 bits) 345u8 bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos)
216{ 346{
217 u32 val = REG_RD(bp, reg); 347 /* ETS disabled configuration*/
348 struct bnx2x *bp = params->bp;
349 u32 val = 0;
218 350
219 val |= bits; 351 DP(NETIF_MSG_LINK, "ETS enabled strict configuration\n");
220 REG_WR(bp, reg, val); 352 /*
221 return val; 353 * Bitmap of 5bits length. Each bit specifies whether the entry behaves
354 * as strict. Bits 0,1,2 - debug and management entries,
355 * 3 - COS0 entry, 4 - COS1 entry.
356 * COS1 | COS0 | DEBUG21 | DEBUG0 | MGMT
357 * bit4 bit3 bit2 bit1 bit0
358 * MCP and debug are strict
359 */
360 REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, 0x1F);
361 /*
362 * For strict priority entries defines the number of consecutive slots
363 * for the highest priority.
364 */
365 REG_WR(bp, NIG_REG_P0_TX_ARB_NUM_STRICT_ARB_SLOTS, 0x100);
366 /* ETS mode disable */
367 REG_WR(bp, PBF_REG_ETS_ENABLED, 0);
368 /* Defines the number of consecutive slots for the strict priority */
369 REG_WR(bp, PBF_REG_NUM_STRICT_ARB_SLOTS, 0x100);
370
371 /* Defines the number of consecutive slots for the strict priority */
372 REG_WR(bp, PBF_REG_HIGH_PRIORITY_COS_NUM, strict_cos);
373
374 /*
375 * mapping between entry priority to client number (0,1,2 -debug and
376 * management clients, 3 - COS0 client, 4 - COS client)(HIGHEST)
377 * 3bits client num.
378 * PRI4 | PRI3 | PRI2 | PRI1 | PRI0
379 * dbg0-010 dbg1-001 cos1-100 cos0-011 MCP-000
380 * dbg0-010 dbg1-001 cos0-011 cos1-100 MCP-000
381 */
382 val = (0 == strict_cos) ? 0x2318 : 0x22E0;
383 REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT, val);
384
385 return 0;
222} 386}
387/******************************************************************/
388/* PFC section */
389/******************************************************************/
223 390
224static u32 bnx2x_bits_dis(struct bnx2x *bp, u32 reg, u32 bits) 391static void bnx2x_bmac2_get_pfc_stat(struct link_params *params,
392 u32 pfc_frames_sent[2],
393 u32 pfc_frames_received[2])
225{ 394{
226 u32 val = REG_RD(bp, reg); 395 /* Read pfc statistic */
396 struct bnx2x *bp = params->bp;
397 u32 bmac_addr = params->port ? NIG_REG_INGRESS_BMAC1_MEM :
398 NIG_REG_INGRESS_BMAC0_MEM;
227 399
228 val &= ~bits; 400 DP(NETIF_MSG_LINK, "pfc statistic read from BMAC\n");
229 REG_WR(bp, reg, val); 401
230 return val; 402 REG_RD_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_STAT_GTPP,
403 pfc_frames_sent, 2);
404
405 REG_RD_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_RX_STAT_GRPP,
406 pfc_frames_received, 2);
407
408}
409static void bnx2x_emac_get_pfc_stat(struct link_params *params,
410 u32 pfc_frames_sent[2],
411 u32 pfc_frames_received[2])
412{
413 /* Read pfc statistic */
414 struct bnx2x *bp = params->bp;
415 u32 emac_base = params->port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
416 u32 val_xon = 0;
417 u32 val_xoff = 0;
418
419 DP(NETIF_MSG_LINK, "pfc statistic read from EMAC\n");
420
421 /* PFC received frames */
422 val_xoff = REG_RD(bp, emac_base +
423 EMAC_REG_RX_PFC_STATS_XOFF_RCVD);
424 val_xoff &= EMAC_REG_RX_PFC_STATS_XOFF_RCVD_COUNT;
425 val_xon = REG_RD(bp, emac_base + EMAC_REG_RX_PFC_STATS_XON_RCVD);
426 val_xon &= EMAC_REG_RX_PFC_STATS_XON_RCVD_COUNT;
427
428 pfc_frames_received[0] = val_xon + val_xoff;
429
430 /* PFC received sent */
431 val_xoff = REG_RD(bp, emac_base +
432 EMAC_REG_RX_PFC_STATS_XOFF_SENT);
433 val_xoff &= EMAC_REG_RX_PFC_STATS_XOFF_SENT_COUNT;
434 val_xon = REG_RD(bp, emac_base + EMAC_REG_RX_PFC_STATS_XON_SENT);
435 val_xon &= EMAC_REG_RX_PFC_STATS_XON_SENT_COUNT;
436
437 pfc_frames_sent[0] = val_xon + val_xoff;
231} 438}
232 439
440void bnx2x_pfc_statistic(struct link_params *params, struct link_vars *vars,
441 u32 pfc_frames_sent[2],
442 u32 pfc_frames_received[2])
443{
444 /* Read pfc statistic */
445 struct bnx2x *bp = params->bp;
446 u32 val = 0;
447 DP(NETIF_MSG_LINK, "pfc statistic\n");
448
449 if (!vars->link_up)
450 return;
451
452 val = REG_RD(bp, MISC_REG_RESET_REG_2);
453 if ((val & (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << params->port))
454 == 0) {
455 DP(NETIF_MSG_LINK, "About to read stats from EMAC\n");
456 bnx2x_emac_get_pfc_stat(params, pfc_frames_sent,
457 pfc_frames_received);
458 } else {
459 DP(NETIF_MSG_LINK, "About to read stats from BMAC\n");
460 bnx2x_bmac2_get_pfc_stat(params, pfc_frames_sent,
461 pfc_frames_received);
462 }
463}
464/******************************************************************/
465/* MAC/PBF section */
466/******************************************************************/
233static void bnx2x_emac_init(struct link_params *params, 467static void bnx2x_emac_init(struct link_params *params,
234 struct link_vars *vars) 468 struct link_vars *vars)
235{ 469{
236 /* reset and unreset the emac core */ 470 /* reset and unreset the emac core */
237 struct bnx2x *bp = params->bp; 471 struct bnx2x *bp = params->bp;
@@ -241,10 +475,10 @@ static void bnx2x_emac_init(struct link_params *params,
241 u16 timeout; 475 u16 timeout;
242 476
243 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 477 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
244 (MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE << port)); 478 (MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE << port));
245 udelay(5); 479 udelay(5);
246 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 480 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
247 (MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE << port)); 481 (MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE << port));
248 482
249 /* init emac - use read-modify-write */ 483 /* init emac - use read-modify-write */
250 /* self clear reset */ 484 /* self clear reset */
@@ -275,7 +509,7 @@ static void bnx2x_emac_init(struct link_params *params,
275} 509}
276 510
277static u8 bnx2x_emac_enable(struct link_params *params, 511static u8 bnx2x_emac_enable(struct link_params *params,
278 struct link_vars *vars, u8 lb) 512 struct link_vars *vars, u8 lb)
279{ 513{
280 struct bnx2x *bp = params->bp; 514 struct bnx2x *bp = params->bp;
281 u8 port = params->port; 515 u8 port = params->port;
@@ -287,77 +521,86 @@ static u8 bnx2x_emac_enable(struct link_params *params,
287 /* enable emac and not bmac */ 521 /* enable emac and not bmac */
288 REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + port*4, 1); 522 REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + port*4, 1);
289 523
290 /* for paladium */
291 if (CHIP_REV_IS_EMUL(bp)) {
292 /* Use lane 1 (of lanes 0-3) */
293 REG_WR(bp, NIG_REG_XGXS_LANE_SEL_P0 + port*4, 1);
294 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL +
295 port*4, 1);
296 }
297 /* for fpga */
298 else
299
300 if (CHIP_REV_IS_FPGA(bp)) {
301 /* Use lane 1 (of lanes 0-3) */
302 DP(NETIF_MSG_LINK, "bnx2x_emac_enable: Setting FPGA\n");
303
304 REG_WR(bp, NIG_REG_XGXS_LANE_SEL_P0 + port*4, 1);
305 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4,
306 0);
307 } else
308 /* ASIC */ 524 /* ASIC */
309 if (vars->phy_flags & PHY_XGXS_FLAG) { 525 if (vars->phy_flags & PHY_XGXS_FLAG) {
310 u32 ser_lane = ((params->lane_config & 526 u32 ser_lane = ((params->lane_config &
311 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >> 527 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
312 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT); 528 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
313 529
314 DP(NETIF_MSG_LINK, "XGXS\n"); 530 DP(NETIF_MSG_LINK, "XGXS\n");
315 /* select the master lanes (out of 0-3) */ 531 /* select the master lanes (out of 0-3) */
316 REG_WR(bp, NIG_REG_XGXS_LANE_SEL_P0 + 532 REG_WR(bp, NIG_REG_XGXS_LANE_SEL_P0 + port*4, ser_lane);
317 port*4, ser_lane);
318 /* select XGXS */ 533 /* select XGXS */
319 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + 534 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
320 port*4, 1);
321 535
322 } else { /* SerDes */ 536 } else { /* SerDes */
323 DP(NETIF_MSG_LINK, "SerDes\n"); 537 DP(NETIF_MSG_LINK, "SerDes\n");
324 /* select SerDes */ 538 /* select SerDes */
325 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + 539 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 0);
326 port*4, 0);
327 } 540 }
328 541
329 bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_RX_MODE, 542 bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_RX_MODE,
330 EMAC_RX_MODE_RESET); 543 EMAC_RX_MODE_RESET);
331 bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_TX_MODE, 544 bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_TX_MODE,
332 EMAC_TX_MODE_RESET); 545 EMAC_TX_MODE_RESET);
333 546
334 if (CHIP_REV_IS_SLOW(bp)) { 547 if (CHIP_REV_IS_SLOW(bp)) {
335 /* config GMII mode */ 548 /* config GMII mode */
336 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE); 549 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE);
337 EMAC_WR(bp, EMAC_REG_EMAC_MODE, 550 EMAC_WR(bp, EMAC_REG_EMAC_MODE, (val | EMAC_MODE_PORT_GMII));
338 (val | EMAC_MODE_PORT_GMII));
339 } else { /* ASIC */ 551 } else { /* ASIC */
340 /* pause enable/disable */ 552 /* pause enable/disable */
341 bnx2x_bits_dis(bp, emac_base + EMAC_REG_EMAC_RX_MODE, 553 bnx2x_bits_dis(bp, emac_base + EMAC_REG_EMAC_RX_MODE,
342 EMAC_RX_MODE_FLOW_EN); 554 EMAC_RX_MODE_FLOW_EN);
343 if (vars->flow_ctrl & BNX2X_FLOW_CTRL_RX)
344 bnx2x_bits_en(bp, emac_base +
345 EMAC_REG_EMAC_RX_MODE,
346 EMAC_RX_MODE_FLOW_EN);
347 555
348 bnx2x_bits_dis(bp, emac_base + EMAC_REG_EMAC_TX_MODE, 556 bnx2x_bits_dis(bp, emac_base + EMAC_REG_EMAC_TX_MODE,
349 (EMAC_TX_MODE_EXT_PAUSE_EN | 557 (EMAC_TX_MODE_EXT_PAUSE_EN |
350 EMAC_TX_MODE_FLOW_EN)); 558 EMAC_TX_MODE_FLOW_EN));
351 if (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX) 559 if (!(params->feature_config_flags &
352 bnx2x_bits_en(bp, emac_base + 560 FEATURE_CONFIG_PFC_ENABLED)) {
353 EMAC_REG_EMAC_TX_MODE, 561 if (vars->flow_ctrl & BNX2X_FLOW_CTRL_RX)
354 (EMAC_TX_MODE_EXT_PAUSE_EN | 562 bnx2x_bits_en(bp, emac_base +
355 EMAC_TX_MODE_FLOW_EN)); 563 EMAC_REG_EMAC_RX_MODE,
564 EMAC_RX_MODE_FLOW_EN);
565
566 if (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX)
567 bnx2x_bits_en(bp, emac_base +
568 EMAC_REG_EMAC_TX_MODE,
569 (EMAC_TX_MODE_EXT_PAUSE_EN |
570 EMAC_TX_MODE_FLOW_EN));
571 } else
572 bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_TX_MODE,
573 EMAC_TX_MODE_FLOW_EN);
356 } 574 }
357 575
358 /* KEEP_VLAN_TAG, promiscuous */ 576 /* KEEP_VLAN_TAG, promiscuous */
359 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_RX_MODE); 577 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_RX_MODE);
360 val |= EMAC_RX_MODE_KEEP_VLAN_TAG | EMAC_RX_MODE_PROMISCUOUS; 578 val |= EMAC_RX_MODE_KEEP_VLAN_TAG | EMAC_RX_MODE_PROMISCUOUS;
579
580 /*
581 * Setting this bit causes MAC control frames (except for pause
582 * frames) to be passed on for processing. This setting has no
583 * affect on the operation of the pause frames. This bit effects
584 * all packets regardless of RX Parser packet sorting logic.
585 * Turn the PFC off to make sure we are in Xon state before
586 * enabling it.
587 */
588 EMAC_WR(bp, EMAC_REG_RX_PFC_MODE, 0);
589 if (params->feature_config_flags & FEATURE_CONFIG_PFC_ENABLED) {
590 DP(NETIF_MSG_LINK, "PFC is enabled\n");
591 /* Enable PFC again */
592 EMAC_WR(bp, EMAC_REG_RX_PFC_MODE,
593 EMAC_REG_RX_PFC_MODE_RX_EN |
594 EMAC_REG_RX_PFC_MODE_TX_EN |
595 EMAC_REG_RX_PFC_MODE_PRIORITIES);
596
597 EMAC_WR(bp, EMAC_REG_RX_PFC_PARAM,
598 ((0x0101 <<
599 EMAC_REG_RX_PFC_PARAM_OPCODE_BITSHIFT) |
600 (0x00ff <<
601 EMAC_REG_RX_PFC_PARAM_PRIORITY_EN_BITSHIFT)));
602 val |= EMAC_RX_MODE_KEEP_MAC_CONTROL;
603 }
361 EMAC_WR(bp, EMAC_REG_EMAC_RX_MODE, val); 604 EMAC_WR(bp, EMAC_REG_EMAC_RX_MODE, val);
362 605
363 /* Set Loopback */ 606 /* Set Loopback */
@@ -387,31 +630,381 @@ static u8 bnx2x_emac_enable(struct link_params *params,
387 /* enable the NIG in/out to the emac */ 630 /* enable the NIG in/out to the emac */
388 REG_WR(bp, NIG_REG_EMAC0_IN_EN + port*4, 0x1); 631 REG_WR(bp, NIG_REG_EMAC0_IN_EN + port*4, 0x1);
389 val = 0; 632 val = 0;
390 if (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX) 633 if ((params->feature_config_flags &
634 FEATURE_CONFIG_PFC_ENABLED) ||
635 (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX))
391 val = 1; 636 val = 1;
392 637
393 REG_WR(bp, NIG_REG_EMAC0_PAUSE_OUT_EN + port*4, val); 638 REG_WR(bp, NIG_REG_EMAC0_PAUSE_OUT_EN + port*4, val);
394 REG_WR(bp, NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0x1); 639 REG_WR(bp, NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0x1);
395 640
396 if (CHIP_REV_IS_EMUL(bp)) { 641 REG_WR(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4, 0x0);
397 /* take the BigMac out of reset */
398 REG_WR(bp,
399 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
400 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
401
402 /* enable access for bmac registers */
403 REG_WR(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4, 0x1);
404 } else
405 REG_WR(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4, 0x0);
406 642
407 vars->mac_type = MAC_TYPE_EMAC; 643 vars->mac_type = MAC_TYPE_EMAC;
408 return 0; 644 return 0;
409} 645}
410 646
647static void bnx2x_update_pfc_bmac1(struct link_params *params,
648 struct link_vars *vars)
649{
650 u32 wb_data[2];
651 struct bnx2x *bp = params->bp;
652 u32 bmac_addr = params->port ? NIG_REG_INGRESS_BMAC1_MEM :
653 NIG_REG_INGRESS_BMAC0_MEM;
654
655 u32 val = 0x14;
656 if ((!(params->feature_config_flags &
657 FEATURE_CONFIG_PFC_ENABLED)) &&
658 (vars->flow_ctrl & BNX2X_FLOW_CTRL_RX))
659 /* Enable BigMAC to react on received Pause packets */
660 val |= (1<<5);
661 wb_data[0] = val;
662 wb_data[1] = 0;
663 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_CONTROL, wb_data, 2);
664
665 /* tx control */
666 val = 0xc0;
667 if (!(params->feature_config_flags &
668 FEATURE_CONFIG_PFC_ENABLED) &&
669 (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX))
670 val |= 0x800000;
671 wb_data[0] = val;
672 wb_data[1] = 0;
673 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_CONTROL, wb_data, 2);
674}
675
676static void bnx2x_update_pfc_bmac2(struct link_params *params,
677 struct link_vars *vars,
678 u8 is_lb)
679{
680 /*
681 * Set rx control: Strip CRC and enable BigMAC to relay
682 * control packets to the system as well
683 */
684 u32 wb_data[2];
685 struct bnx2x *bp = params->bp;
686 u32 bmac_addr = params->port ? NIG_REG_INGRESS_BMAC1_MEM :
687 NIG_REG_INGRESS_BMAC0_MEM;
688 u32 val = 0x14;
689
690 if ((!(params->feature_config_flags &
691 FEATURE_CONFIG_PFC_ENABLED)) &&
692 (vars->flow_ctrl & BNX2X_FLOW_CTRL_RX))
693 /* Enable BigMAC to react on received Pause packets */
694 val |= (1<<5);
695 wb_data[0] = val;
696 wb_data[1] = 0;
697 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_RX_CONTROL, wb_data, 2);
698 udelay(30);
699
700 /* Tx control */
701 val = 0xc0;
702 if (!(params->feature_config_flags &
703 FEATURE_CONFIG_PFC_ENABLED) &&
704 (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX))
705 val |= 0x800000;
706 wb_data[0] = val;
707 wb_data[1] = 0;
708 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_CONTROL, wb_data, 2);
709
710 if (params->feature_config_flags & FEATURE_CONFIG_PFC_ENABLED) {
711 DP(NETIF_MSG_LINK, "PFC is enabled\n");
712 /* Enable PFC RX & TX & STATS and set 8 COS */
713 wb_data[0] = 0x0;
714 wb_data[0] |= (1<<0); /* RX */
715 wb_data[0] |= (1<<1); /* TX */
716 wb_data[0] |= (1<<2); /* Force initial Xon */
717 wb_data[0] |= (1<<3); /* 8 cos */
718 wb_data[0] |= (1<<5); /* STATS */
719 wb_data[1] = 0;
720 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_PFC_CONTROL,
721 wb_data, 2);
722 /* Clear the force Xon */
723 wb_data[0] &= ~(1<<2);
724 } else {
725 DP(NETIF_MSG_LINK, "PFC is disabled\n");
726 /* disable PFC RX & TX & STATS and set 8 COS */
727 wb_data[0] = 0x8;
728 wb_data[1] = 0;
729 }
730
731 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_PFC_CONTROL, wb_data, 2);
732
733 /*
734 * Set Time (based unit is 512 bit time) between automatic
735 * re-sending of PP packets amd enable automatic re-send of
736 * Per-Priroity Packet as long as pp_gen is asserted and
737 * pp_disable is low.
738 */
739 val = 0x8000;
740 if (params->feature_config_flags & FEATURE_CONFIG_PFC_ENABLED)
741 val |= (1<<16); /* enable automatic re-send */
742
743 wb_data[0] = val;
744 wb_data[1] = 0;
745 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_PAUSE_CONTROL,
746 wb_data, 2);
411 747
748 /* mac control */
749 val = 0x3; /* Enable RX and TX */
750 if (is_lb) {
751 val |= 0x4; /* Local loopback */
752 DP(NETIF_MSG_LINK, "enable bmac loopback\n");
753 }
754 /* When PFC enabled, Pass pause frames towards the NIG. */
755 if (params->feature_config_flags & FEATURE_CONFIG_PFC_ENABLED)
756 val |= ((1<<6)|(1<<5));
757
758 wb_data[0] = val;
759 wb_data[1] = 0;
760 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_BMAC_CONTROL, wb_data, 2);
761}
412 762
413static u8 bnx2x_bmac_enable(struct link_params *params, struct link_vars *vars, 763static void bnx2x_update_pfc_brb(struct link_params *params,
414 u8 is_lb) 764 struct link_vars *vars,
765 struct bnx2x_nig_brb_pfc_port_params *pfc_params)
766{
767 struct bnx2x *bp = params->bp;
768 int set_pfc = params->feature_config_flags &
769 FEATURE_CONFIG_PFC_ENABLED;
770
771 /* default - pause configuration */
772 u32 pause_xoff_th = PFC_BRB_MAC_PAUSE_XOFF_THRESHOLD_PAUSEABLE;
773 u32 pause_xon_th = PFC_BRB_MAC_PAUSE_XON_THRESHOLD_PAUSEABLE;
774 u32 full_xoff_th = PFC_BRB_MAC_FULL_XOFF_THRESHOLD_PAUSEABLE;
775 u32 full_xon_th = PFC_BRB_MAC_FULL_XON_THRESHOLD_PAUSEABLE;
776
777 if (set_pfc && pfc_params)
778 /* First COS */
779 if (!pfc_params->cos0_pauseable) {
780 pause_xoff_th =
781 PFC_BRB_MAC_PAUSE_XOFF_THRESHOLD_NON_PAUSEABLE;
782 pause_xon_th =
783 PFC_BRB_MAC_PAUSE_XON_THRESHOLD_NON_PAUSEABLE;
784 full_xoff_th =
785 PFC_BRB_MAC_FULL_XOFF_THRESHOLD_NON_PAUSEABLE;
786 full_xon_th =
787 PFC_BRB_MAC_FULL_XON_THRESHOLD_NON_PAUSEABLE;
788 }
789 /*
790 * The number of free blocks below which the pause signal to class 0
791 * of MAC #n is asserted. n=0,1
792 */
793 REG_WR(bp, BRB1_REG_PAUSE_0_XOFF_THRESHOLD_0 , pause_xoff_th);
794 /*
795 * The number of free blocks above which the pause signal to class 0
796 * of MAC #n is de-asserted. n=0,1
797 */
798 REG_WR(bp, BRB1_REG_PAUSE_0_XON_THRESHOLD_0 , pause_xon_th);
799 /*
800 * The number of free blocks below which the full signal to class 0
801 * of MAC #n is asserted. n=0,1
802 */
803 REG_WR(bp, BRB1_REG_FULL_0_XOFF_THRESHOLD_0 , full_xoff_th);
804 /*
805 * The number of free blocks above which the full signal to class 0
806 * of MAC #n is de-asserted. n=0,1
807 */
808 REG_WR(bp, BRB1_REG_FULL_0_XON_THRESHOLD_0 , full_xon_th);
809
810 if (set_pfc && pfc_params) {
811 /* Second COS */
812 if (pfc_params->cos1_pauseable) {
813 pause_xoff_th =
814 PFC_BRB_MAC_PAUSE_XOFF_THRESHOLD_PAUSEABLE;
815 pause_xon_th =
816 PFC_BRB_MAC_PAUSE_XON_THRESHOLD_PAUSEABLE;
817 full_xoff_th =
818 PFC_BRB_MAC_FULL_XOFF_THRESHOLD_PAUSEABLE;
819 full_xon_th =
820 PFC_BRB_MAC_FULL_XON_THRESHOLD_PAUSEABLE;
821 } else {
822 pause_xoff_th =
823 PFC_BRB_MAC_PAUSE_XOFF_THRESHOLD_NON_PAUSEABLE;
824 pause_xon_th =
825 PFC_BRB_MAC_PAUSE_XON_THRESHOLD_NON_PAUSEABLE;
826 full_xoff_th =
827 PFC_BRB_MAC_FULL_XOFF_THRESHOLD_NON_PAUSEABLE;
828 full_xon_th =
829 PFC_BRB_MAC_FULL_XON_THRESHOLD_NON_PAUSEABLE;
830 }
831 /*
832 * The number of free blocks below which the pause signal to
833 * class 1 of MAC #n is asserted. n=0,1
834 */
835 REG_WR(bp, BRB1_REG_PAUSE_1_XOFF_THRESHOLD_0, pause_xoff_th);
836 /*
837 * The number of free blocks above which the pause signal to
838 * class 1 of MAC #n is de-asserted. n=0,1
839 */
840 REG_WR(bp, BRB1_REG_PAUSE_1_XON_THRESHOLD_0, pause_xon_th);
841 /*
842 * The number of free blocks below which the full signal to
843 * class 1 of MAC #n is asserted. n=0,1
844 */
845 REG_WR(bp, BRB1_REG_FULL_1_XOFF_THRESHOLD_0, full_xoff_th);
846 /*
847 * The number of free blocks above which the full signal to
848 * class 1 of MAC #n is de-asserted. n=0,1
849 */
850 REG_WR(bp, BRB1_REG_FULL_1_XON_THRESHOLD_0, full_xon_th);
851 }
852}
853
854static void bnx2x_update_pfc_nig(struct link_params *params,
855 struct link_vars *vars,
856 struct bnx2x_nig_brb_pfc_port_params *nig_params)
857{
858 u32 xcm_mask = 0, ppp_enable = 0, pause_enable = 0, llfc_out_en = 0;
859 u32 llfc_enable = 0, xcm0_out_en = 0, p0_hwpfc_enable = 0;
860 u32 pkt_priority_to_cos = 0;
861 u32 val;
862 struct bnx2x *bp = params->bp;
863 int port = params->port;
864 int set_pfc = params->feature_config_flags &
865 FEATURE_CONFIG_PFC_ENABLED;
866 DP(NETIF_MSG_LINK, "updating pfc nig parameters\n");
867
868 /*
869 * When NIG_LLH0_XCM_MASK_REG_LLHX_XCM_MASK_BCN bit is set
870 * MAC control frames (that are not pause packets)
871 * will be forwarded to the XCM.
872 */
873 xcm_mask = REG_RD(bp,
874 port ? NIG_REG_LLH1_XCM_MASK :
875 NIG_REG_LLH0_XCM_MASK);
876 /*
877 * nig params will override non PFC params, since it's possible to
878 * do transition from PFC to SAFC
879 */
880 if (set_pfc) {
881 pause_enable = 0;
882 llfc_out_en = 0;
883 llfc_enable = 0;
884 ppp_enable = 1;
885 xcm_mask &= ~(port ? NIG_LLH1_XCM_MASK_REG_LLH1_XCM_MASK_BCN :
886 NIG_LLH0_XCM_MASK_REG_LLH0_XCM_MASK_BCN);
887 xcm0_out_en = 0;
888 p0_hwpfc_enable = 1;
889 } else {
890 if (nig_params) {
891 llfc_out_en = nig_params->llfc_out_en;
892 llfc_enable = nig_params->llfc_enable;
893 pause_enable = nig_params->pause_enable;
894 } else /*defaul non PFC mode - PAUSE */
895 pause_enable = 1;
896
897 xcm_mask |= (port ? NIG_LLH1_XCM_MASK_REG_LLH1_XCM_MASK_BCN :
898 NIG_LLH0_XCM_MASK_REG_LLH0_XCM_MASK_BCN);
899 xcm0_out_en = 1;
900 }
901
902 REG_WR(bp, port ? NIG_REG_LLFC_OUT_EN_1 :
903 NIG_REG_LLFC_OUT_EN_0, llfc_out_en);
904 REG_WR(bp, port ? NIG_REG_LLFC_ENABLE_1 :
905 NIG_REG_LLFC_ENABLE_0, llfc_enable);
906 REG_WR(bp, port ? NIG_REG_PAUSE_ENABLE_1 :
907 NIG_REG_PAUSE_ENABLE_0, pause_enable);
908
909 REG_WR(bp, port ? NIG_REG_PPP_ENABLE_1 :
910 NIG_REG_PPP_ENABLE_0, ppp_enable);
911
912 REG_WR(bp, port ? NIG_REG_LLH1_XCM_MASK :
913 NIG_REG_LLH0_XCM_MASK, xcm_mask);
914
915 REG_WR(bp, NIG_REG_LLFC_EGRESS_SRC_ENABLE_0, 0x7);
916
917 /* output enable for RX_XCM # IF */
918 REG_WR(bp, NIG_REG_XCM0_OUT_EN, xcm0_out_en);
919
920 /* HW PFC TX enable */
921 REG_WR(bp, NIG_REG_P0_HWPFC_ENABLE, p0_hwpfc_enable);
922
923 /* 0x2 = BMAC, 0x1= EMAC */
924 switch (vars->mac_type) {
925 case MAC_TYPE_EMAC:
926 val = 1;
927 break;
928 case MAC_TYPE_BMAC:
929 val = 0;
930 break;
931 default:
932 val = 0;
933 break;
934 }
935 REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT, val);
936
937 if (nig_params) {
938 pkt_priority_to_cos = nig_params->pkt_priority_to_cos;
939
940 REG_WR(bp, port ? NIG_REG_P1_RX_COS0_PRIORITY_MASK :
941 NIG_REG_P0_RX_COS0_PRIORITY_MASK,
942 nig_params->rx_cos0_priority_mask);
943
944 REG_WR(bp, port ? NIG_REG_P1_RX_COS1_PRIORITY_MASK :
945 NIG_REG_P0_RX_COS1_PRIORITY_MASK,
946 nig_params->rx_cos1_priority_mask);
947
948 REG_WR(bp, port ? NIG_REG_LLFC_HIGH_PRIORITY_CLASSES_1 :
949 NIG_REG_LLFC_HIGH_PRIORITY_CLASSES_0,
950 nig_params->llfc_high_priority_classes);
951
952 REG_WR(bp, port ? NIG_REG_LLFC_LOW_PRIORITY_CLASSES_1 :
953 NIG_REG_LLFC_LOW_PRIORITY_CLASSES_0,
954 nig_params->llfc_low_priority_classes);
955 }
956 REG_WR(bp, port ? NIG_REG_P1_PKT_PRIORITY_TO_COS :
957 NIG_REG_P0_PKT_PRIORITY_TO_COS,
958 pkt_priority_to_cos);
959}
960
961
962void bnx2x_update_pfc(struct link_params *params,
963 struct link_vars *vars,
964 struct bnx2x_nig_brb_pfc_port_params *pfc_params)
965{
966 /*
967 * The PFC and pause are orthogonal to one another, meaning when
968 * PFC is enabled, the pause are disabled, and when PFC is
969 * disabled, pause are set according to the pause result.
970 */
971 u32 val;
972 struct bnx2x *bp = params->bp;
973
974 /* update NIG params */
975 bnx2x_update_pfc_nig(params, vars, pfc_params);
976
977 /* update BRB params */
978 bnx2x_update_pfc_brb(params, vars, pfc_params);
979
980 if (!vars->link_up)
981 return;
982
983 val = REG_RD(bp, MISC_REG_RESET_REG_2);
984 if ((val & (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << params->port))
985 == 0) {
986 DP(NETIF_MSG_LINK, "About to update PFC in EMAC\n");
987 bnx2x_emac_enable(params, vars, 0);
988 return;
989 }
990
991 DP(NETIF_MSG_LINK, "About to update PFC in BMAC\n");
992 if (CHIP_IS_E2(bp))
993 bnx2x_update_pfc_bmac2(params, vars, 0);
994 else
995 bnx2x_update_pfc_bmac1(params, vars);
996
997 val = 0;
998 if ((params->feature_config_flags &
999 FEATURE_CONFIG_PFC_ENABLED) ||
1000 (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX))
1001 val = 1;
1002 REG_WR(bp, NIG_REG_BMAC0_PAUSE_OUT_EN + params->port*4, val);
1003}
1004
1005static u8 bnx2x_bmac1_enable(struct link_params *params,
1006 struct link_vars *vars,
1007 u8 is_lb)
415{ 1008{
416 struct bnx2x *bp = params->bp; 1009 struct bnx2x *bp = params->bp;
417 u8 port = params->port; 1010 u8 port = params->port;
@@ -420,24 +1013,13 @@ static u8 bnx2x_bmac_enable(struct link_params *params, struct link_vars *vars,
420 u32 wb_data[2]; 1013 u32 wb_data[2];
421 u32 val; 1014 u32 val;
422 1015
423 DP(NETIF_MSG_LINK, "Enabling BigMAC\n"); 1016 DP(NETIF_MSG_LINK, "Enabling BigMAC1\n");
424 /* reset and unreset the BigMac */
425 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
426 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
427 msleep(1);
428
429 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
430 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
431
432 /* enable access for bmac registers */
433 REG_WR(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4, 0x1);
434 1017
435 /* XGXS control */ 1018 /* XGXS control */
436 wb_data[0] = 0x3c; 1019 wb_data[0] = 0x3c;
437 wb_data[1] = 0; 1020 wb_data[1] = 0;
438 REG_WR_DMAE(bp, bmac_addr + 1021 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_XGXS_CONTROL,
439 BIGMAC_REGISTER_BMAC_XGXS_CONTROL, 1022 wb_data, 2);
440 wb_data, 2);
441 1023
442 /* tx MAC SA */ 1024 /* tx MAC SA */
443 wb_data[0] = ((params->mac_addr[2] << 24) | 1025 wb_data[0] = ((params->mac_addr[2] << 24) |
@@ -446,17 +1028,7 @@ static u8 bnx2x_bmac_enable(struct link_params *params, struct link_vars *vars,
446 params->mac_addr[5]); 1028 params->mac_addr[5]);
447 wb_data[1] = ((params->mac_addr[0] << 8) | 1029 wb_data[1] = ((params->mac_addr[0] << 8) |
448 params->mac_addr[1]); 1030 params->mac_addr[1]);
449 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_SOURCE_ADDR, 1031 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_SOURCE_ADDR, wb_data, 2);
450 wb_data, 2);
451
452 /* tx control */
453 val = 0xc0;
454 if (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX)
455 val |= 0x800000;
456 wb_data[0] = val;
457 wb_data[1] = 0;
458 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_CONTROL,
459 wb_data, 2);
460 1032
461 /* mac control */ 1033 /* mac control */
462 val = 0x3; 1034 val = 0x3;
@@ -466,238 +1038,155 @@ static u8 bnx2x_bmac_enable(struct link_params *params, struct link_vars *vars,
466 } 1038 }
467 wb_data[0] = val; 1039 wb_data[0] = val;
468 wb_data[1] = 0; 1040 wb_data[1] = 0;
469 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_CONTROL, 1041 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_CONTROL, wb_data, 2);
470 wb_data, 2);
471 1042
472 /* set rx mtu */ 1043 /* set rx mtu */
473 wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD; 1044 wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
474 wb_data[1] = 0; 1045 wb_data[1] = 0;
475 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_MAX_SIZE, 1046 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_MAX_SIZE, wb_data, 2);
476 wb_data, 2);
477 1047
478 /* rx control set to don't strip crc */ 1048 bnx2x_update_pfc_bmac1(params, vars);
479 val = 0x14;
480 if (vars->flow_ctrl & BNX2X_FLOW_CTRL_RX)
481 val |= 0x20;
482 wb_data[0] = val;
483 wb_data[1] = 0;
484 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_CONTROL,
485 wb_data, 2);
486 1049
487 /* set tx mtu */ 1050 /* set tx mtu */
488 wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD; 1051 wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
489 wb_data[1] = 0; 1052 wb_data[1] = 0;
490 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_MAX_SIZE, 1053 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_MAX_SIZE, wb_data, 2);
491 wb_data, 2);
492 1054
493 /* set cnt max size */ 1055 /* set cnt max size */
494 wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD; 1056 wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
495 wb_data[1] = 0; 1057 wb_data[1] = 0;
496 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_CNT_MAX_SIZE, 1058 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_CNT_MAX_SIZE, wb_data, 2);
497 wb_data, 2);
498 1059
499 /* configure safc */ 1060 /* configure safc */
500 wb_data[0] = 0x1000200; 1061 wb_data[0] = 0x1000200;
501 wb_data[1] = 0; 1062 wb_data[1] = 0;
502 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_LLFC_MSG_FLDS, 1063 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_LLFC_MSG_FLDS,
503 wb_data, 2); 1064 wb_data, 2);
504 /* fix for emulation */
505 if (CHIP_REV_IS_EMUL(bp)) {
506 wb_data[0] = 0xf000;
507 wb_data[1] = 0;
508 REG_WR_DMAE(bp,
509 bmac_addr + BIGMAC_REGISTER_TX_PAUSE_THRESHOLD,
510 wb_data, 2);
511 }
512
513 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 0x1);
514 REG_WR(bp, NIG_REG_XGXS_LANE_SEL_P0 + port*4, 0x0);
515 REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + port*4, 0x0);
516 val = 0;
517 if (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX)
518 val = 1;
519 REG_WR(bp, NIG_REG_BMAC0_PAUSE_OUT_EN + port*4, val);
520 REG_WR(bp, NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0x0);
521 REG_WR(bp, NIG_REG_EMAC0_IN_EN + port*4, 0x0);
522 REG_WR(bp, NIG_REG_EMAC0_PAUSE_OUT_EN + port*4, 0x0);
523 REG_WR(bp, NIG_REG_BMAC0_IN_EN + port*4, 0x1);
524 REG_WR(bp, NIG_REG_BMAC0_OUT_EN + port*4, 0x1);
525 1065
526 vars->mac_type = MAC_TYPE_BMAC;
527 return 0; 1066 return 0;
528} 1067}
529 1068
530static void bnx2x_phy_deassert(struct link_params *params, u8 phy_flags) 1069static u8 bnx2x_bmac2_enable(struct link_params *params,
531{ 1070 struct link_vars *vars,
532 struct bnx2x *bp = params->bp; 1071 u8 is_lb)
533 u32 val;
534
535 if (phy_flags & PHY_XGXS_FLAG) {
536 DP(NETIF_MSG_LINK, "bnx2x_phy_deassert:XGXS\n");
537 val = XGXS_RESET_BITS;
538
539 } else { /* SerDes */
540 DP(NETIF_MSG_LINK, "bnx2x_phy_deassert:SerDes\n");
541 val = SERDES_RESET_BITS;
542 }
543
544 val = val << (params->port*16);
545
546 /* reset and unreset the SerDes/XGXS */
547 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR,
548 val);
549 udelay(500);
550 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_SET,
551 val);
552 bnx2x_set_phy_mdio(params, phy_flags);
553}
554
555void bnx2x_link_status_update(struct link_params *params,
556 struct link_vars *vars)
557{ 1072{
558 struct bnx2x *bp = params->bp; 1073 struct bnx2x *bp = params->bp;
559 u8 link_10g;
560 u8 port = params->port; 1074 u8 port = params->port;
1075 u32 bmac_addr = port ? NIG_REG_INGRESS_BMAC1_MEM :
1076 NIG_REG_INGRESS_BMAC0_MEM;
1077 u32 wb_data[2];
561 1078
562 if (params->switch_cfg == SWITCH_CFG_1G) 1079 DP(NETIF_MSG_LINK, "Enabling BigMAC2\n");
563 vars->phy_flags = PHY_SERDES_FLAG;
564 else
565 vars->phy_flags = PHY_XGXS_FLAG;
566 vars->link_status = REG_RD(bp, params->shmem_base +
567 offsetof(struct shmem_region,
568 port_mb[port].link_status));
569
570 vars->link_up = (vars->link_status & LINK_STATUS_LINK_UP);
571
572 if (vars->link_up) {
573 DP(NETIF_MSG_LINK, "phy link up\n");
574
575 vars->phy_link_up = 1;
576 vars->duplex = DUPLEX_FULL;
577 switch (vars->link_status &
578 LINK_STATUS_SPEED_AND_DUPLEX_MASK) {
579 case LINK_10THD:
580 vars->duplex = DUPLEX_HALF;
581 /* fall thru */
582 case LINK_10TFD:
583 vars->line_speed = SPEED_10;
584 break;
585
586 case LINK_100TXHD:
587 vars->duplex = DUPLEX_HALF;
588 /* fall thru */
589 case LINK_100T4:
590 case LINK_100TXFD:
591 vars->line_speed = SPEED_100;
592 break;
593
594 case LINK_1000THD:
595 vars->duplex = DUPLEX_HALF;
596 /* fall thru */
597 case LINK_1000TFD:
598 vars->line_speed = SPEED_1000;
599 break;
600
601 case LINK_2500THD:
602 vars->duplex = DUPLEX_HALF;
603 /* fall thru */
604 case LINK_2500TFD:
605 vars->line_speed = SPEED_2500;
606 break;
607
608 case LINK_10GTFD:
609 vars->line_speed = SPEED_10000;
610 break;
611 1080
612 case LINK_12GTFD: 1081 wb_data[0] = 0;
613 vars->line_speed = SPEED_12000; 1082 wb_data[1] = 0;
614 break; 1083 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_BMAC_CONTROL, wb_data, 2);
615 1084 udelay(30);
616 case LINK_12_5GTFD:
617 vars->line_speed = SPEED_12500;
618 break;
619 1085
620 case LINK_13GTFD: 1086 /* XGXS control: Reset phy HW, MDIO registers, PHY PLL and BMAC */
621 vars->line_speed = SPEED_13000; 1087 wb_data[0] = 0x3c;
622 break; 1088 wb_data[1] = 0;
1089 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_BMAC_XGXS_CONTROL,
1090 wb_data, 2);
623 1091
624 case LINK_15GTFD: 1092 udelay(30);
625 vars->line_speed = SPEED_15000;
626 break;
627 1093
628 case LINK_16GTFD: 1094 /* tx MAC SA */
629 vars->line_speed = SPEED_16000; 1095 wb_data[0] = ((params->mac_addr[2] << 24) |
630 break; 1096 (params->mac_addr[3] << 16) |
1097 (params->mac_addr[4] << 8) |
1098 params->mac_addr[5]);
1099 wb_data[1] = ((params->mac_addr[0] << 8) |
1100 params->mac_addr[1]);
1101 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_SOURCE_ADDR,
1102 wb_data, 2);
631 1103
632 default: 1104 udelay(30);
633 break;
634 }
635 1105
636 if (vars->link_status & LINK_STATUS_TX_FLOW_CONTROL_ENABLED) 1106 /* Configure SAFC */
637 vars->flow_ctrl |= BNX2X_FLOW_CTRL_TX; 1107 wb_data[0] = 0x1000200;
638 else 1108 wb_data[1] = 0;
639 vars->flow_ctrl &= ~BNX2X_FLOW_CTRL_TX; 1109 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_RX_LLFC_MSG_FLDS,
1110 wb_data, 2);
1111 udelay(30);
640 1112
641 if (vars->link_status & LINK_STATUS_RX_FLOW_CONTROL_ENABLED) 1113 /* set rx mtu */
642 vars->flow_ctrl |= BNX2X_FLOW_CTRL_RX; 1114 wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
643 else 1115 wb_data[1] = 0;
644 vars->flow_ctrl &= ~BNX2X_FLOW_CTRL_RX; 1116 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_RX_MAX_SIZE, wb_data, 2);
1117 udelay(30);
645 1118
646 if (vars->phy_flags & PHY_XGXS_FLAG) { 1119 /* set tx mtu */
647 if (vars->line_speed && 1120 wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
648 ((vars->line_speed == SPEED_10) || 1121 wb_data[1] = 0;
649 (vars->line_speed == SPEED_100))) { 1122 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_MAX_SIZE, wb_data, 2);
650 vars->phy_flags |= PHY_SGMII_FLAG; 1123 udelay(30);
651 } else { 1124 /* set cnt max size */
652 vars->phy_flags &= ~PHY_SGMII_FLAG; 1125 wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD - 2;
653 } 1126 wb_data[1] = 0;
654 } 1127 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_CNT_MAX_SIZE, wb_data, 2);
1128 udelay(30);
1129 bnx2x_update_pfc_bmac2(params, vars, is_lb);
655 1130
656 /* anything 10 and over uses the bmac */ 1131 return 0;
657 link_10g = ((vars->line_speed == SPEED_10000) || 1132}
658 (vars->line_speed == SPEED_12000) ||
659 (vars->line_speed == SPEED_12500) ||
660 (vars->line_speed == SPEED_13000) ||
661 (vars->line_speed == SPEED_15000) ||
662 (vars->line_speed == SPEED_16000));
663 if (link_10g)
664 vars->mac_type = MAC_TYPE_BMAC;
665 else
666 vars->mac_type = MAC_TYPE_EMAC;
667 1133
668 } else { /* link down */ 1134static u8 bnx2x_bmac_enable(struct link_params *params,
669 DP(NETIF_MSG_LINK, "phy link down\n"); 1135 struct link_vars *vars,
1136 u8 is_lb)
1137{
1138 u8 rc, port = params->port;
1139 struct bnx2x *bp = params->bp;
1140 u32 val;
1141 /* reset and unreset the BigMac */
1142 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
1143 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
1144 msleep(1);
670 1145
671 vars->phy_link_up = 0; 1146 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
1147 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
672 1148
673 vars->line_speed = 0; 1149 /* enable access for bmac registers */
674 vars->duplex = DUPLEX_FULL; 1150 REG_WR(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4, 0x1);
675 vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
676 1151
677 /* indicate no mac active */ 1152 /* Enable BMAC according to BMAC type*/
678 vars->mac_type = MAC_TYPE_NONE; 1153 if (CHIP_IS_E2(bp))
679 } 1154 rc = bnx2x_bmac2_enable(params, vars, is_lb);
1155 else
1156 rc = bnx2x_bmac1_enable(params, vars, is_lb);
1157 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 0x1);
1158 REG_WR(bp, NIG_REG_XGXS_LANE_SEL_P0 + port*4, 0x0);
1159 REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + port*4, 0x0);
1160 val = 0;
1161 if ((params->feature_config_flags &
1162 FEATURE_CONFIG_PFC_ENABLED) ||
1163 (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX))
1164 val = 1;
1165 REG_WR(bp, NIG_REG_BMAC0_PAUSE_OUT_EN + port*4, val);
1166 REG_WR(bp, NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0x0);
1167 REG_WR(bp, NIG_REG_EMAC0_IN_EN + port*4, 0x0);
1168 REG_WR(bp, NIG_REG_EMAC0_PAUSE_OUT_EN + port*4, 0x0);
1169 REG_WR(bp, NIG_REG_BMAC0_IN_EN + port*4, 0x1);
1170 REG_WR(bp, NIG_REG_BMAC0_OUT_EN + port*4, 0x1);
680 1171
681 DP(NETIF_MSG_LINK, "link_status 0x%x phy_link_up %x\n", 1172 vars->mac_type = MAC_TYPE_BMAC;
682 vars->link_status, vars->phy_link_up); 1173 return rc;
683 DP(NETIF_MSG_LINK, "line_speed %x duplex %x flow_ctrl 0x%x\n",
684 vars->line_speed, vars->duplex, vars->flow_ctrl);
685} 1174}
686 1175
1176
687static void bnx2x_update_mng(struct link_params *params, u32 link_status) 1177static void bnx2x_update_mng(struct link_params *params, u32 link_status)
688{ 1178{
689 struct bnx2x *bp = params->bp; 1179 struct bnx2x *bp = params->bp;
690 1180
691 REG_WR(bp, params->shmem_base + 1181 REG_WR(bp, params->shmem_base +
692 offsetof(struct shmem_region, 1182 offsetof(struct shmem_region,
693 port_mb[params->port].link_status), 1183 port_mb[params->port].link_status), link_status);
694 link_status);
695} 1184}
696 1185
697static void bnx2x_bmac_rx_disable(struct bnx2x *bp, u8 port) 1186static void bnx2x_bmac_rx_disable(struct bnx2x *bp, u8 port)
698{ 1187{
699 u32 bmac_addr = port ? NIG_REG_INGRESS_BMAC1_MEM : 1188 u32 bmac_addr = port ? NIG_REG_INGRESS_BMAC1_MEM :
700 NIG_REG_INGRESS_BMAC0_MEM; 1189 NIG_REG_INGRESS_BMAC0_MEM;
701 u32 wb_data[2]; 1190 u32 wb_data[2];
702 u32 nig_bmac_enable = REG_RD(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4); 1191 u32 nig_bmac_enable = REG_RD(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4);
703 1192
@@ -706,19 +1195,31 @@ static void bnx2x_bmac_rx_disable(struct bnx2x *bp, u8 port)
706 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port) && 1195 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port) &&
707 nig_bmac_enable) { 1196 nig_bmac_enable) {
708 1197
709 /* Clear Rx Enable bit in BMAC_CONTROL register */ 1198 if (CHIP_IS_E2(bp)) {
710 REG_RD_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_CONTROL, 1199 /* Clear Rx Enable bit in BMAC_CONTROL register */
711 wb_data, 2); 1200 REG_RD_DMAE(bp, bmac_addr +
712 wb_data[0] &= ~BMAC_CONTROL_RX_ENABLE; 1201 BIGMAC2_REGISTER_BMAC_CONTROL,
713 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_CONTROL, 1202 wb_data, 2);
714 wb_data, 2); 1203 wb_data[0] &= ~BMAC_CONTROL_RX_ENABLE;
715 1204 REG_WR_DMAE(bp, bmac_addr +
1205 BIGMAC2_REGISTER_BMAC_CONTROL,
1206 wb_data, 2);
1207 } else {
1208 /* Clear Rx Enable bit in BMAC_CONTROL register */
1209 REG_RD_DMAE(bp, bmac_addr +
1210 BIGMAC_REGISTER_BMAC_CONTROL,
1211 wb_data, 2);
1212 wb_data[0] &= ~BMAC_CONTROL_RX_ENABLE;
1213 REG_WR_DMAE(bp, bmac_addr +
1214 BIGMAC_REGISTER_BMAC_CONTROL,
1215 wb_data, 2);
1216 }
716 msleep(1); 1217 msleep(1);
717 } 1218 }
718} 1219}
719 1220
720static u8 bnx2x_pbf_update(struct link_params *params, u32 flow_ctrl, 1221static u8 bnx2x_pbf_update(struct link_params *params, u32 flow_ctrl,
721 u32 line_speed) 1222 u32 line_speed)
722{ 1223{
723 struct bnx2x *bp = params->bp; 1224 struct bnx2x *bp = params->bp;
724 u8 port = params->port; 1225 u8 port = params->port;
@@ -755,7 +1256,7 @@ static u8 bnx2x_pbf_update(struct link_params *params, u32 flow_ctrl,
755 /* update threshold */ 1256 /* update threshold */
756 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, 0); 1257 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, 0);
757 /* update init credit */ 1258 /* update init credit */
758 init_crd = 778; /* (800-18-4) */ 1259 init_crd = 778; /* (800-18-4) */
759 1260
760 } else { 1261 } else {
761 u32 thresh = (ETH_MAX_JUMBO_PACKET_SIZE + 1262 u32 thresh = (ETH_MAX_JUMBO_PACKET_SIZE +
@@ -800,62 +1301,86 @@ static u8 bnx2x_pbf_update(struct link_params *params, u32 flow_ctrl,
800 return 0; 1301 return 0;
801} 1302}
802 1303
803static u32 bnx2x_get_emac_base(struct bnx2x *bp, u32 ext_phy_type, u8 port) 1304/**
1305 * bnx2x_get_emac_base - retrive emac base address
1306 *
1307 * @bp: driver handle
1308 * @mdc_mdio_access: access type
1309 * @port: port id
1310 *
1311 * This function selects the MDC/MDIO access (through emac0 or
1312 * emac1) depend on the mdc_mdio_access, port, port swapped. Each
1313 * phy has a default access mode, which could also be overridden
1314 * by nvram configuration. This parameter, whether this is the
1315 * default phy configuration, or the nvram overrun
1316 * configuration, is passed here as mdc_mdio_access and selects
1317 * the emac_base for the CL45 read/writes operations
1318 */
1319static u32 bnx2x_get_emac_base(struct bnx2x *bp,
1320 u32 mdc_mdio_access, u8 port)
804{ 1321{
805 u32 emac_base; 1322 u32 emac_base = 0;
806 1323 switch (mdc_mdio_access) {
807 switch (ext_phy_type) { 1324 case SHARED_HW_CFG_MDC_MDIO_ACCESS1_PHY_TYPE:
808 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072: 1325 break;
809 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726: 1326 case SHARED_HW_CFG_MDC_MDIO_ACCESS1_EMAC0:
810 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727: 1327 if (REG_RD(bp, NIG_REG_PORT_SWAP))
811 /* All MDC/MDIO is directed through single EMAC */ 1328 emac_base = GRCBASE_EMAC1;
1329 else
1330 emac_base = GRCBASE_EMAC0;
1331 break;
1332 case SHARED_HW_CFG_MDC_MDIO_ACCESS1_EMAC1:
812 if (REG_RD(bp, NIG_REG_PORT_SWAP)) 1333 if (REG_RD(bp, NIG_REG_PORT_SWAP))
813 emac_base = GRCBASE_EMAC0; 1334 emac_base = GRCBASE_EMAC0;
814 else 1335 else
815 emac_base = GRCBASE_EMAC1; 1336 emac_base = GRCBASE_EMAC1;
816 break; 1337 break;
817 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073: 1338 case SHARED_HW_CFG_MDC_MDIO_ACCESS1_BOTH:
1339 emac_base = (port) ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
1340 break;
1341 case SHARED_HW_CFG_MDC_MDIO_ACCESS1_SWAPPED:
818 emac_base = (port) ? GRCBASE_EMAC0 : GRCBASE_EMAC1; 1342 emac_base = (port) ? GRCBASE_EMAC0 : GRCBASE_EMAC1;
819 break; 1343 break;
820 default: 1344 default:
821 emac_base = (port) ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
822 break; 1345 break;
823 } 1346 }
824 return emac_base; 1347 return emac_base;
825 1348
826} 1349}
827 1350
828u8 bnx2x_cl45_write(struct bnx2x *bp, u8 port, u32 ext_phy_type, 1351/******************************************************************/
829 u8 phy_addr, u8 devad, u16 reg, u16 val) 1352/* CL45 access functions */
1353/******************************************************************/
1354static u8 bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
1355 u8 devad, u16 reg, u16 val)
830{ 1356{
831 u32 tmp, saved_mode; 1357 u32 tmp, saved_mode;
832 u8 i, rc = 0; 1358 u8 i, rc = 0;
833 u32 mdio_ctrl = bnx2x_get_emac_base(bp, ext_phy_type, port); 1359 /*
834 1360 * Set clause 45 mode, slow down the MDIO clock to 2.5MHz
835 /* set clause 45 mode, slow down the MDIO clock to 2.5MHz
836 * (a value of 49==0x31) and make sure that the AUTO poll is off 1361 * (a value of 49==0x31) and make sure that the AUTO poll is off
837 */ 1362 */
838 1363
839 saved_mode = REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE); 1364 saved_mode = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
840 tmp = saved_mode & ~(EMAC_MDIO_MODE_AUTO_POLL | 1365 tmp = saved_mode & ~(EMAC_MDIO_MODE_AUTO_POLL |
841 EMAC_MDIO_MODE_CLOCK_CNT); 1366 EMAC_MDIO_MODE_CLOCK_CNT);
842 tmp |= (EMAC_MDIO_MODE_CLAUSE_45 | 1367 tmp |= (EMAC_MDIO_MODE_CLAUSE_45 |
843 (49 << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT)); 1368 (49 << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT));
844 REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, tmp); 1369 REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, tmp);
845 REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE); 1370 REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
846 udelay(40); 1371 udelay(40);
847 1372
848 /* address */ 1373 /* address */
849 1374
850 tmp = ((phy_addr << 21) | (devad << 16) | reg | 1375 tmp = ((phy->addr << 21) | (devad << 16) | reg |
851 EMAC_MDIO_COMM_COMMAND_ADDRESS | 1376 EMAC_MDIO_COMM_COMMAND_ADDRESS |
852 EMAC_MDIO_COMM_START_BUSY); 1377 EMAC_MDIO_COMM_START_BUSY);
853 REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, tmp); 1378 REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, tmp);
854 1379
855 for (i = 0; i < 50; i++) { 1380 for (i = 0; i < 50; i++) {
856 udelay(10); 1381 udelay(10);
857 1382
858 tmp = REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM); 1383 tmp = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM);
859 if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) { 1384 if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) {
860 udelay(5); 1385 udelay(5);
861 break; 1386 break;
@@ -863,19 +1388,20 @@ u8 bnx2x_cl45_write(struct bnx2x *bp, u8 port, u32 ext_phy_type,
863 } 1388 }
864 if (tmp & EMAC_MDIO_COMM_START_BUSY) { 1389 if (tmp & EMAC_MDIO_COMM_START_BUSY) {
865 DP(NETIF_MSG_LINK, "write phy register failed\n"); 1390 DP(NETIF_MSG_LINK, "write phy register failed\n");
1391 netdev_err(bp->dev, "MDC/MDIO access timeout\n");
866 rc = -EFAULT; 1392 rc = -EFAULT;
867 } else { 1393 } else {
868 /* data */ 1394 /* data */
869 tmp = ((phy_addr << 21) | (devad << 16) | val | 1395 tmp = ((phy->addr << 21) | (devad << 16) | val |
870 EMAC_MDIO_COMM_COMMAND_WRITE_45 | 1396 EMAC_MDIO_COMM_COMMAND_WRITE_45 |
871 EMAC_MDIO_COMM_START_BUSY); 1397 EMAC_MDIO_COMM_START_BUSY);
872 REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, tmp); 1398 REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, tmp);
873 1399
874 for (i = 0; i < 50; i++) { 1400 for (i = 0; i < 50; i++) {
875 udelay(10); 1401 udelay(10);
876 1402
877 tmp = REG_RD(bp, mdio_ctrl + 1403 tmp = REG_RD(bp, phy->mdio_ctrl +
878 EMAC_REG_EMAC_MDIO_COMM); 1404 EMAC_REG_EMAC_MDIO_COMM);
879 if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) { 1405 if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) {
880 udelay(5); 1406 udelay(5);
881 break; 1407 break;
@@ -883,47 +1409,47 @@ u8 bnx2x_cl45_write(struct bnx2x *bp, u8 port, u32 ext_phy_type,
883 } 1409 }
884 if (tmp & EMAC_MDIO_COMM_START_BUSY) { 1410 if (tmp & EMAC_MDIO_COMM_START_BUSY) {
885 DP(NETIF_MSG_LINK, "write phy register failed\n"); 1411 DP(NETIF_MSG_LINK, "write phy register failed\n");
1412 netdev_err(bp->dev, "MDC/MDIO access timeout\n");
886 rc = -EFAULT; 1413 rc = -EFAULT;
887 } 1414 }
888 } 1415 }
889 1416
890 /* Restore the saved mode */ 1417 /* Restore the saved mode */
891 REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, saved_mode); 1418 REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, saved_mode);
892 1419
893 return rc; 1420 return rc;
894} 1421}
895 1422
896u8 bnx2x_cl45_read(struct bnx2x *bp, u8 port, u32 ext_phy_type, 1423static u8 bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy,
897 u8 phy_addr, u8 devad, u16 reg, u16 *ret_val) 1424 u8 devad, u16 reg, u16 *ret_val)
898{ 1425{
899 u32 val, saved_mode; 1426 u32 val, saved_mode;
900 u16 i; 1427 u16 i;
901 u8 rc = 0; 1428 u8 rc = 0;
902 1429 /*
903 u32 mdio_ctrl = bnx2x_get_emac_base(bp, ext_phy_type, port); 1430 * Set clause 45 mode, slow down the MDIO clock to 2.5MHz
904 /* set clause 45 mode, slow down the MDIO clock to 2.5MHz
905 * (a value of 49==0x31) and make sure that the AUTO poll is off 1431 * (a value of 49==0x31) and make sure that the AUTO poll is off
906 */ 1432 */
907 1433
908 saved_mode = REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE); 1434 saved_mode = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
909 val = saved_mode & ((EMAC_MDIO_MODE_AUTO_POLL | 1435 val = saved_mode & ~((EMAC_MDIO_MODE_AUTO_POLL |
910 EMAC_MDIO_MODE_CLOCK_CNT)); 1436 EMAC_MDIO_MODE_CLOCK_CNT));
911 val |= (EMAC_MDIO_MODE_CLAUSE_45 | 1437 val |= (EMAC_MDIO_MODE_CLAUSE_45 |
912 (49L << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT)); 1438 (49L << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT));
913 REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, val); 1439 REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, val);
914 REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE); 1440 REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
915 udelay(40); 1441 udelay(40);
916 1442
917 /* address */ 1443 /* address */
918 val = ((phy_addr << 21) | (devad << 16) | reg | 1444 val = ((phy->addr << 21) | (devad << 16) | reg |
919 EMAC_MDIO_COMM_COMMAND_ADDRESS | 1445 EMAC_MDIO_COMM_COMMAND_ADDRESS |
920 EMAC_MDIO_COMM_START_BUSY); 1446 EMAC_MDIO_COMM_START_BUSY);
921 REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, val); 1447 REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, val);
922 1448
923 for (i = 0; i < 50; i++) { 1449 for (i = 0; i < 50; i++) {
924 udelay(10); 1450 udelay(10);
925 1451
926 val = REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM); 1452 val = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM);
927 if (!(val & EMAC_MDIO_COMM_START_BUSY)) { 1453 if (!(val & EMAC_MDIO_COMM_START_BUSY)) {
928 udelay(5); 1454 udelay(5);
929 break; 1455 break;
@@ -931,22 +1457,22 @@ u8 bnx2x_cl45_read(struct bnx2x *bp, u8 port, u32 ext_phy_type,
931 } 1457 }
932 if (val & EMAC_MDIO_COMM_START_BUSY) { 1458 if (val & EMAC_MDIO_COMM_START_BUSY) {
933 DP(NETIF_MSG_LINK, "read phy register failed\n"); 1459 DP(NETIF_MSG_LINK, "read phy register failed\n");
934 1460 netdev_err(bp->dev, "MDC/MDIO access timeout\n");
935 *ret_val = 0; 1461 *ret_val = 0;
936 rc = -EFAULT; 1462 rc = -EFAULT;
937 1463
938 } else { 1464 } else {
939 /* data */ 1465 /* data */
940 val = ((phy_addr << 21) | (devad << 16) | 1466 val = ((phy->addr << 21) | (devad << 16) |
941 EMAC_MDIO_COMM_COMMAND_READ_45 | 1467 EMAC_MDIO_COMM_COMMAND_READ_45 |
942 EMAC_MDIO_COMM_START_BUSY); 1468 EMAC_MDIO_COMM_START_BUSY);
943 REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, val); 1469 REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, val);
944 1470
945 for (i = 0; i < 50; i++) { 1471 for (i = 0; i < 50; i++) {
946 udelay(10); 1472 udelay(10);
947 1473
948 val = REG_RD(bp, mdio_ctrl + 1474 val = REG_RD(bp, phy->mdio_ctrl +
949 EMAC_REG_EMAC_MDIO_COMM); 1475 EMAC_REG_EMAC_MDIO_COMM);
950 if (!(val & EMAC_MDIO_COMM_START_BUSY)) { 1476 if (!(val & EMAC_MDIO_COMM_START_BUSY)) {
951 *ret_val = (u16)(val & EMAC_MDIO_COMM_DATA); 1477 *ret_val = (u16)(val & EMAC_MDIO_COMM_DATA);
952 break; 1478 break;
@@ -954,91 +1480,314 @@ u8 bnx2x_cl45_read(struct bnx2x *bp, u8 port, u32 ext_phy_type,
954 } 1480 }
955 if (val & EMAC_MDIO_COMM_START_BUSY) { 1481 if (val & EMAC_MDIO_COMM_START_BUSY) {
956 DP(NETIF_MSG_LINK, "read phy register failed\n"); 1482 DP(NETIF_MSG_LINK, "read phy register failed\n");
957 1483 netdev_err(bp->dev, "MDC/MDIO access timeout\n");
958 *ret_val = 0; 1484 *ret_val = 0;
959 rc = -EFAULT; 1485 rc = -EFAULT;
960 } 1486 }
961 } 1487 }
962 1488
963 /* Restore the saved mode */ 1489 /* Restore the saved mode */
964 REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, saved_mode); 1490 REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, saved_mode);
965 1491
966 return rc; 1492 return rc;
967} 1493}
968 1494
969static void bnx2x_set_aer_mmd(struct link_params *params, 1495u8 bnx2x_phy_read(struct link_params *params, u8 phy_addr,
970 struct link_vars *vars) 1496 u8 devad, u16 reg, u16 *ret_val)
971{ 1497{
972 struct bnx2x *bp = params->bp; 1498 u8 phy_index;
973 u32 ser_lane; 1499 /*
974 u16 offset; 1500 * Probe for the phy according to the given phy_addr, and execute
1501 * the read request on it
1502 */
1503 for (phy_index = 0; phy_index < params->num_phys; phy_index++) {
1504 if (params->phy[phy_index].addr == phy_addr) {
1505 return bnx2x_cl45_read(params->bp,
1506 &params->phy[phy_index], devad,
1507 reg, ret_val);
1508 }
1509 }
1510 return -EINVAL;
1511}
1512
1513u8 bnx2x_phy_write(struct link_params *params, u8 phy_addr,
1514 u8 devad, u16 reg, u16 val)
1515{
1516 u8 phy_index;
1517 /*
1518 * Probe for the phy according to the given phy_addr, and execute
1519 * the write request on it
1520 */
1521 for (phy_index = 0; phy_index < params->num_phys; phy_index++) {
1522 if (params->phy[phy_index].addr == phy_addr) {
1523 return bnx2x_cl45_write(params->bp,
1524 &params->phy[phy_index], devad,
1525 reg, val);
1526 }
1527 }
1528 return -EINVAL;
1529}
975 1530
1531static void bnx2x_set_aer_mmd_xgxs(struct link_params *params,
1532 struct bnx2x_phy *phy)
1533{
1534 u32 ser_lane;
1535 u16 offset, aer_val;
1536 struct bnx2x *bp = params->bp;
976 ser_lane = ((params->lane_config & 1537 ser_lane = ((params->lane_config &
977 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >> 1538 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
978 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT); 1539 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
979 1540
980 offset = (vars->phy_flags & PHY_XGXS_FLAG) ? 1541 offset = phy->addr + ser_lane;
981 (params->phy_addr + ser_lane) : 0; 1542 if (CHIP_IS_E2(bp))
1543 aer_val = 0x3800 + offset - 1;
1544 else
1545 aer_val = 0x3800 + offset;
1546 CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK,
1547 MDIO_AER_BLOCK_AER_REG, aer_val);
1548}
1549static void bnx2x_set_aer_mmd_serdes(struct bnx2x *bp,
1550 struct bnx2x_phy *phy)
1551{
1552 CL22_WR_OVER_CL45(bp, phy,
1553 MDIO_REG_BANK_AER_BLOCK,
1554 MDIO_AER_BLOCK_AER_REG, 0x3800);
1555}
1556
1557/******************************************************************/
1558/* Internal phy section */
1559/******************************************************************/
1560
1561static void bnx2x_set_serdes_access(struct bnx2x *bp, u8 port)
1562{
1563 u32 emac_base = (port) ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
1564
1565 /* Set Clause 22 */
1566 REG_WR(bp, NIG_REG_SERDES0_CTRL_MD_ST + port*0x10, 1);
1567 REG_WR(bp, emac_base + EMAC_REG_EMAC_MDIO_COMM, 0x245f8000);
1568 udelay(500);
1569 REG_WR(bp, emac_base + EMAC_REG_EMAC_MDIO_COMM, 0x245d000f);
1570 udelay(500);
1571 /* Set Clause 45 */
1572 REG_WR(bp, NIG_REG_SERDES0_CTRL_MD_ST + port*0x10, 0);
1573}
1574
1575static void bnx2x_serdes_deassert(struct bnx2x *bp, u8 port)
1576{
1577 u32 val;
1578
1579 DP(NETIF_MSG_LINK, "bnx2x_serdes_deassert\n");
1580
1581 val = SERDES_RESET_BITS << (port*16);
1582
1583 /* reset and unreset the SerDes/XGXS */
1584 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR, val);
1585 udelay(500);
1586 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_SET, val);
1587
1588 bnx2x_set_serdes_access(bp, port);
982 1589
983 CL45_WR_OVER_CL22(bp, params->port, 1590 REG_WR(bp, NIG_REG_SERDES0_CTRL_MD_DEVAD + port*0x10,
984 params->phy_addr, 1591 DEFAULT_PHY_DEV_ADDR);
985 MDIO_REG_BANK_AER_BLOCK,
986 MDIO_AER_BLOCK_AER_REG, 0x3800 + offset);
987} 1592}
988 1593
989static void bnx2x_set_master_ln(struct link_params *params) 1594static void bnx2x_xgxs_deassert(struct link_params *params)
1595{
1596 struct bnx2x *bp = params->bp;
1597 u8 port;
1598 u32 val;
1599 DP(NETIF_MSG_LINK, "bnx2x_xgxs_deassert\n");
1600 port = params->port;
1601
1602 val = XGXS_RESET_BITS << (port*16);
1603
1604 /* reset and unreset the SerDes/XGXS */
1605 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR, val);
1606 udelay(500);
1607 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_SET, val);
1608
1609 REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_ST + port*0x18, 0);
1610 REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18,
1611 params->phy[INT_PHY].def_md_devad);
1612}
1613
1614
1615void bnx2x_link_status_update(struct link_params *params,
1616 struct link_vars *vars)
1617{
1618 struct bnx2x *bp = params->bp;
1619 u8 link_10g;
1620 u8 port = params->port;
1621
1622 vars->link_status = REG_RD(bp, params->shmem_base +
1623 offsetof(struct shmem_region,
1624 port_mb[port].link_status));
1625
1626 vars->link_up = (vars->link_status & LINK_STATUS_LINK_UP);
1627
1628 if (vars->link_up) {
1629 DP(NETIF_MSG_LINK, "phy link up\n");
1630
1631 vars->phy_link_up = 1;
1632 vars->duplex = DUPLEX_FULL;
1633 switch (vars->link_status &
1634 LINK_STATUS_SPEED_AND_DUPLEX_MASK) {
1635 case LINK_10THD:
1636 vars->duplex = DUPLEX_HALF;
1637 /* fall thru */
1638 case LINK_10TFD:
1639 vars->line_speed = SPEED_10;
1640 break;
1641
1642 case LINK_100TXHD:
1643 vars->duplex = DUPLEX_HALF;
1644 /* fall thru */
1645 case LINK_100T4:
1646 case LINK_100TXFD:
1647 vars->line_speed = SPEED_100;
1648 break;
1649
1650 case LINK_1000THD:
1651 vars->duplex = DUPLEX_HALF;
1652 /* fall thru */
1653 case LINK_1000TFD:
1654 vars->line_speed = SPEED_1000;
1655 break;
1656
1657 case LINK_2500THD:
1658 vars->duplex = DUPLEX_HALF;
1659 /* fall thru */
1660 case LINK_2500TFD:
1661 vars->line_speed = SPEED_2500;
1662 break;
1663
1664 case LINK_10GTFD:
1665 vars->line_speed = SPEED_10000;
1666 break;
1667
1668 case LINK_12GTFD:
1669 vars->line_speed = SPEED_12000;
1670 break;
1671
1672 case LINK_12_5GTFD:
1673 vars->line_speed = SPEED_12500;
1674 break;
1675
1676 case LINK_13GTFD:
1677 vars->line_speed = SPEED_13000;
1678 break;
1679
1680 case LINK_15GTFD:
1681 vars->line_speed = SPEED_15000;
1682 break;
1683
1684 case LINK_16GTFD:
1685 vars->line_speed = SPEED_16000;
1686 break;
1687
1688 default:
1689 break;
1690 }
1691 vars->flow_ctrl = 0;
1692 if (vars->link_status & LINK_STATUS_TX_FLOW_CONTROL_ENABLED)
1693 vars->flow_ctrl |= BNX2X_FLOW_CTRL_TX;
1694
1695 if (vars->link_status & LINK_STATUS_RX_FLOW_CONTROL_ENABLED)
1696 vars->flow_ctrl |= BNX2X_FLOW_CTRL_RX;
1697
1698 if (!vars->flow_ctrl)
1699 vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
1700
1701 if (vars->line_speed &&
1702 ((vars->line_speed == SPEED_10) ||
1703 (vars->line_speed == SPEED_100))) {
1704 vars->phy_flags |= PHY_SGMII_FLAG;
1705 } else {
1706 vars->phy_flags &= ~PHY_SGMII_FLAG;
1707 }
1708
1709 /* anything 10 and over uses the bmac */
1710 link_10g = ((vars->line_speed == SPEED_10000) ||
1711 (vars->line_speed == SPEED_12000) ||
1712 (vars->line_speed == SPEED_12500) ||
1713 (vars->line_speed == SPEED_13000) ||
1714 (vars->line_speed == SPEED_15000) ||
1715 (vars->line_speed == SPEED_16000));
1716 if (link_10g)
1717 vars->mac_type = MAC_TYPE_BMAC;
1718 else
1719 vars->mac_type = MAC_TYPE_EMAC;
1720
1721 } else { /* link down */
1722 DP(NETIF_MSG_LINK, "phy link down\n");
1723
1724 vars->phy_link_up = 0;
1725
1726 vars->line_speed = 0;
1727 vars->duplex = DUPLEX_FULL;
1728 vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
1729
1730 /* indicate no mac active */
1731 vars->mac_type = MAC_TYPE_NONE;
1732 }
1733
1734 DP(NETIF_MSG_LINK, "link_status 0x%x phy_link_up %x\n",
1735 vars->link_status, vars->phy_link_up);
1736 DP(NETIF_MSG_LINK, "line_speed %x duplex %x flow_ctrl 0x%x\n",
1737 vars->line_speed, vars->duplex, vars->flow_ctrl);
1738}
1739
1740
1741static void bnx2x_set_master_ln(struct link_params *params,
1742 struct bnx2x_phy *phy)
990{ 1743{
991 struct bnx2x *bp = params->bp; 1744 struct bnx2x *bp = params->bp;
992 u16 new_master_ln, ser_lane; 1745 u16 new_master_ln, ser_lane;
993 ser_lane = ((params->lane_config & 1746 ser_lane = ((params->lane_config &
994 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >> 1747 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
995 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT); 1748 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
996 1749
997 /* set the master_ln for AN */ 1750 /* set the master_ln for AN */
998 CL45_RD_OVER_CL22(bp, params->port, 1751 CL22_RD_OVER_CL45(bp, phy,
999 params->phy_addr, 1752 MDIO_REG_BANK_XGXS_BLOCK2,
1000 MDIO_REG_BANK_XGXS_BLOCK2, 1753 MDIO_XGXS_BLOCK2_TEST_MODE_LANE,
1001 MDIO_XGXS_BLOCK2_TEST_MODE_LANE, 1754 &new_master_ln);
1002 &new_master_ln); 1755
1003 1756 CL22_WR_OVER_CL45(bp, phy,
1004 CL45_WR_OVER_CL22(bp, params->port, 1757 MDIO_REG_BANK_XGXS_BLOCK2 ,
1005 params->phy_addr, 1758 MDIO_XGXS_BLOCK2_TEST_MODE_LANE,
1006 MDIO_REG_BANK_XGXS_BLOCK2 , 1759 (new_master_ln | ser_lane));
1007 MDIO_XGXS_BLOCK2_TEST_MODE_LANE,
1008 (new_master_ln | ser_lane));
1009} 1760}
1010 1761
1011static u8 bnx2x_reset_unicore(struct link_params *params) 1762static u8 bnx2x_reset_unicore(struct link_params *params,
1763 struct bnx2x_phy *phy,
1764 u8 set_serdes)
1012{ 1765{
1013 struct bnx2x *bp = params->bp; 1766 struct bnx2x *bp = params->bp;
1014 u16 mii_control; 1767 u16 mii_control;
1015 u16 i; 1768 u16 i;
1016 1769 CL22_RD_OVER_CL45(bp, phy,
1017 CL45_RD_OVER_CL22(bp, params->port, 1770 MDIO_REG_BANK_COMBO_IEEE0,
1018 params->phy_addr, 1771 MDIO_COMBO_IEEE0_MII_CONTROL, &mii_control);
1019 MDIO_REG_BANK_COMBO_IEEE0,
1020 MDIO_COMBO_IEEE0_MII_CONTROL, &mii_control);
1021 1772
1022 /* reset the unicore */ 1773 /* reset the unicore */
1023 CL45_WR_OVER_CL22(bp, params->port, 1774 CL22_WR_OVER_CL45(bp, phy,
1024 params->phy_addr, 1775 MDIO_REG_BANK_COMBO_IEEE0,
1025 MDIO_REG_BANK_COMBO_IEEE0, 1776 MDIO_COMBO_IEEE0_MII_CONTROL,
1026 MDIO_COMBO_IEEE0_MII_CONTROL, 1777 (mii_control |
1027 (mii_control | 1778 MDIO_COMBO_IEEO_MII_CONTROL_RESET));
1028 MDIO_COMBO_IEEO_MII_CONTROL_RESET)); 1779 if (set_serdes)
1029 if (params->switch_cfg == SWITCH_CFG_1G) 1780 bnx2x_set_serdes_access(bp, params->port);
1030 bnx2x_set_serdes_access(params);
1031 1781
1032 /* wait for the reset to self clear */ 1782 /* wait for the reset to self clear */
1033 for (i = 0; i < MDIO_ACCESS_TIMEOUT; i++) { 1783 for (i = 0; i < MDIO_ACCESS_TIMEOUT; i++) {
1034 udelay(5); 1784 udelay(5);
1035 1785
1036 /* the reset erased the previous bank value */ 1786 /* the reset erased the previous bank value */
1037 CL45_RD_OVER_CL22(bp, params->port, 1787 CL22_RD_OVER_CL45(bp, phy,
1038 params->phy_addr, 1788 MDIO_REG_BANK_COMBO_IEEE0,
1039 MDIO_REG_BANK_COMBO_IEEE0, 1789 MDIO_COMBO_IEEE0_MII_CONTROL,
1040 MDIO_COMBO_IEEE0_MII_CONTROL, 1790 &mii_control);
1041 &mii_control);
1042 1791
1043 if (!(mii_control & MDIO_COMBO_IEEO_MII_CONTROL_RESET)) { 1792 if (!(mii_control & MDIO_COMBO_IEEO_MII_CONTROL_RESET)) {
1044 udelay(5); 1793 udelay(5);
@@ -1046,131 +1795,125 @@ static u8 bnx2x_reset_unicore(struct link_params *params)
1046 } 1795 }
1047 } 1796 }
1048 1797
1798 netdev_err(bp->dev, "Warning: PHY was not initialized,"
1799 " Port %d\n",
1800 params->port);
1049 DP(NETIF_MSG_LINK, "BUG! XGXS is still in reset!\n"); 1801 DP(NETIF_MSG_LINK, "BUG! XGXS is still in reset!\n");
1050 return -EINVAL; 1802 return -EINVAL;
1051 1803
1052} 1804}
1053 1805
1054static void bnx2x_set_swap_lanes(struct link_params *params) 1806static void bnx2x_set_swap_lanes(struct link_params *params,
1807 struct bnx2x_phy *phy)
1055{ 1808{
1056 struct bnx2x *bp = params->bp; 1809 struct bnx2x *bp = params->bp;
1057 /* Each two bits represents a lane number: 1810 /*
1058 No swap is 0123 => 0x1b no need to enable the swap */ 1811 * Each two bits represents a lane number:
1812 * No swap is 0123 => 0x1b no need to enable the swap
1813 */
1059 u16 ser_lane, rx_lane_swap, tx_lane_swap; 1814 u16 ser_lane, rx_lane_swap, tx_lane_swap;
1060 1815
1061 ser_lane = ((params->lane_config & 1816 ser_lane = ((params->lane_config &
1062 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >> 1817 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
1063 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT); 1818 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
1064 rx_lane_swap = ((params->lane_config & 1819 rx_lane_swap = ((params->lane_config &
1065 PORT_HW_CFG_LANE_SWAP_CFG_RX_MASK) >> 1820 PORT_HW_CFG_LANE_SWAP_CFG_RX_MASK) >>
1066 PORT_HW_CFG_LANE_SWAP_CFG_RX_SHIFT); 1821 PORT_HW_CFG_LANE_SWAP_CFG_RX_SHIFT);
1067 tx_lane_swap = ((params->lane_config & 1822 tx_lane_swap = ((params->lane_config &
1068 PORT_HW_CFG_LANE_SWAP_CFG_TX_MASK) >> 1823 PORT_HW_CFG_LANE_SWAP_CFG_TX_MASK) >>
1069 PORT_HW_CFG_LANE_SWAP_CFG_TX_SHIFT); 1824 PORT_HW_CFG_LANE_SWAP_CFG_TX_SHIFT);
1070 1825
1071 if (rx_lane_swap != 0x1b) { 1826 if (rx_lane_swap != 0x1b) {
1072 CL45_WR_OVER_CL22(bp, params->port, 1827 CL22_WR_OVER_CL45(bp, phy,
1073 params->phy_addr, 1828 MDIO_REG_BANK_XGXS_BLOCK2,
1074 MDIO_REG_BANK_XGXS_BLOCK2, 1829 MDIO_XGXS_BLOCK2_RX_LN_SWAP,
1075 MDIO_XGXS_BLOCK2_RX_LN_SWAP, 1830 (rx_lane_swap |
1076 (rx_lane_swap | 1831 MDIO_XGXS_BLOCK2_RX_LN_SWAP_ENABLE |
1077 MDIO_XGXS_BLOCK2_RX_LN_SWAP_ENABLE | 1832 MDIO_XGXS_BLOCK2_RX_LN_SWAP_FORCE_ENABLE));
1078 MDIO_XGXS_BLOCK2_RX_LN_SWAP_FORCE_ENABLE));
1079 } else { 1833 } else {
1080 CL45_WR_OVER_CL22(bp, params->port, 1834 CL22_WR_OVER_CL45(bp, phy,
1081 params->phy_addr, 1835 MDIO_REG_BANK_XGXS_BLOCK2,
1082 MDIO_REG_BANK_XGXS_BLOCK2, 1836 MDIO_XGXS_BLOCK2_RX_LN_SWAP, 0);
1083 MDIO_XGXS_BLOCK2_RX_LN_SWAP, 0);
1084 } 1837 }
1085 1838
1086 if (tx_lane_swap != 0x1b) { 1839 if (tx_lane_swap != 0x1b) {
1087 CL45_WR_OVER_CL22(bp, params->port, 1840 CL22_WR_OVER_CL45(bp, phy,
1088 params->phy_addr, 1841 MDIO_REG_BANK_XGXS_BLOCK2,
1089 MDIO_REG_BANK_XGXS_BLOCK2, 1842 MDIO_XGXS_BLOCK2_TX_LN_SWAP,
1090 MDIO_XGXS_BLOCK2_TX_LN_SWAP, 1843 (tx_lane_swap |
1091 (tx_lane_swap | 1844 MDIO_XGXS_BLOCK2_TX_LN_SWAP_ENABLE));
1092 MDIO_XGXS_BLOCK2_TX_LN_SWAP_ENABLE));
1093 } else { 1845 } else {
1094 CL45_WR_OVER_CL22(bp, params->port, 1846 CL22_WR_OVER_CL45(bp, phy,
1095 params->phy_addr, 1847 MDIO_REG_BANK_XGXS_BLOCK2,
1096 MDIO_REG_BANK_XGXS_BLOCK2, 1848 MDIO_XGXS_BLOCK2_TX_LN_SWAP, 0);
1097 MDIO_XGXS_BLOCK2_TX_LN_SWAP, 0);
1098 } 1849 }
1099} 1850}
1100 1851
1101static void bnx2x_set_parallel_detection(struct link_params *params, 1852static void bnx2x_set_parallel_detection(struct bnx2x_phy *phy,
1102 u8 phy_flags) 1853 struct link_params *params)
1103{ 1854{
1104 struct bnx2x *bp = params->bp; 1855 struct bnx2x *bp = params->bp;
1105 u16 control2; 1856 u16 control2;
1106 1857 CL22_RD_OVER_CL45(bp, phy,
1107 CL45_RD_OVER_CL22(bp, params->port, 1858 MDIO_REG_BANK_SERDES_DIGITAL,
1108 params->phy_addr, 1859 MDIO_SERDES_DIGITAL_A_1000X_CONTROL2,
1109 MDIO_REG_BANK_SERDES_DIGITAL, 1860 &control2);
1110 MDIO_SERDES_DIGITAL_A_1000X_CONTROL2, 1861 if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)
1111 &control2);
1112 if (params->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)
1113 control2 |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN; 1862 control2 |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN;
1114 else 1863 else
1115 control2 &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN; 1864 control2 &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN;
1116 DP(NETIF_MSG_LINK, "params->speed_cap_mask = 0x%x, control2 = 0x%x\n", 1865 DP(NETIF_MSG_LINK, "phy->speed_cap_mask = 0x%x, control2 = 0x%x\n",
1117 params->speed_cap_mask, control2); 1866 phy->speed_cap_mask, control2);
1118 CL45_WR_OVER_CL22(bp, params->port, 1867 CL22_WR_OVER_CL45(bp, phy,
1119 params->phy_addr, 1868 MDIO_REG_BANK_SERDES_DIGITAL,
1120 MDIO_REG_BANK_SERDES_DIGITAL, 1869 MDIO_SERDES_DIGITAL_A_1000X_CONTROL2,
1121 MDIO_SERDES_DIGITAL_A_1000X_CONTROL2, 1870 control2);
1122 control2); 1871
1123 1872 if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) &&
1124 if ((phy_flags & PHY_XGXS_FLAG) && 1873 (phy->speed_cap_mask &
1125 (params->speed_cap_mask &
1126 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) { 1874 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) {
1127 DP(NETIF_MSG_LINK, "XGXS\n"); 1875 DP(NETIF_MSG_LINK, "XGXS\n");
1128 1876
1129 CL45_WR_OVER_CL22(bp, params->port, 1877 CL22_WR_OVER_CL45(bp, phy,
1130 params->phy_addr, 1878 MDIO_REG_BANK_10G_PARALLEL_DETECT,
1131 MDIO_REG_BANK_10G_PARALLEL_DETECT, 1879 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK,
1132 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK, 1880 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK_CNT);
1133 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK_CNT);
1134 1881
1135 CL45_RD_OVER_CL22(bp, params->port, 1882 CL22_RD_OVER_CL45(bp, phy,
1136 params->phy_addr, 1883 MDIO_REG_BANK_10G_PARALLEL_DETECT,
1137 MDIO_REG_BANK_10G_PARALLEL_DETECT, 1884 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL,
1138 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL, 1885 &control2);
1139 &control2);
1140 1886
1141 1887
1142 control2 |= 1888 control2 |=
1143 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL_PARDET10G_EN; 1889 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL_PARDET10G_EN;
1144 1890
1145 CL45_WR_OVER_CL22(bp, params->port, 1891 CL22_WR_OVER_CL45(bp, phy,
1146 params->phy_addr, 1892 MDIO_REG_BANK_10G_PARALLEL_DETECT,
1147 MDIO_REG_BANK_10G_PARALLEL_DETECT, 1893 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL,
1148 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL, 1894 control2);
1149 control2);
1150 1895
1151 /* Disable parallel detection of HiG */ 1896 /* Disable parallel detection of HiG */
1152 CL45_WR_OVER_CL22(bp, params->port, 1897 CL22_WR_OVER_CL45(bp, phy,
1153 params->phy_addr, 1898 MDIO_REG_BANK_XGXS_BLOCK2,
1154 MDIO_REG_BANK_XGXS_BLOCK2, 1899 MDIO_XGXS_BLOCK2_UNICORE_MODE_10G,
1155 MDIO_XGXS_BLOCK2_UNICORE_MODE_10G, 1900 MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_CX4_XGXS |
1156 MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_CX4_XGXS | 1901 MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_HIGIG_XGXS);
1157 MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_HIGIG_XGXS);
1158 } 1902 }
1159} 1903}
1160 1904
1161static void bnx2x_set_autoneg(struct link_params *params, 1905static void bnx2x_set_autoneg(struct bnx2x_phy *phy,
1162 struct link_vars *vars, 1906 struct link_params *params,
1163 u8 enable_cl73) 1907 struct link_vars *vars,
1908 u8 enable_cl73)
1164{ 1909{
1165 struct bnx2x *bp = params->bp; 1910 struct bnx2x *bp = params->bp;
1166 u16 reg_val; 1911 u16 reg_val;
1167 1912
1168 /* CL37 Autoneg */ 1913 /* CL37 Autoneg */
1169 1914 CL22_RD_OVER_CL45(bp, phy,
1170 CL45_RD_OVER_CL22(bp, params->port, 1915 MDIO_REG_BANK_COMBO_IEEE0,
1171 params->phy_addr, 1916 MDIO_COMBO_IEEE0_MII_CONTROL, &reg_val);
1172 MDIO_REG_BANK_COMBO_IEEE0,
1173 MDIO_COMBO_IEEE0_MII_CONTROL, &reg_val);
1174 1917
1175 /* CL37 Autoneg Enabled */ 1918 /* CL37 Autoneg Enabled */
1176 if (vars->line_speed == SPEED_AUTO_NEG) 1919 if (vars->line_speed == SPEED_AUTO_NEG)
@@ -1179,17 +1922,15 @@ static void bnx2x_set_autoneg(struct link_params *params,
1179 reg_val &= ~(MDIO_COMBO_IEEO_MII_CONTROL_AN_EN | 1922 reg_val &= ~(MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
1180 MDIO_COMBO_IEEO_MII_CONTROL_RESTART_AN); 1923 MDIO_COMBO_IEEO_MII_CONTROL_RESTART_AN);
1181 1924
1182 CL45_WR_OVER_CL22(bp, params->port, 1925 CL22_WR_OVER_CL45(bp, phy,
1183 params->phy_addr, 1926 MDIO_REG_BANK_COMBO_IEEE0,
1184 MDIO_REG_BANK_COMBO_IEEE0, 1927 MDIO_COMBO_IEEE0_MII_CONTROL, reg_val);
1185 MDIO_COMBO_IEEE0_MII_CONTROL, reg_val);
1186 1928
1187 /* Enable/Disable Autodetection */ 1929 /* Enable/Disable Autodetection */
1188 1930
1189 CL45_RD_OVER_CL22(bp, params->port, 1931 CL22_RD_OVER_CL45(bp, phy,
1190 params->phy_addr, 1932 MDIO_REG_BANK_SERDES_DIGITAL,
1191 MDIO_REG_BANK_SERDES_DIGITAL, 1933 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, &reg_val);
1192 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, &reg_val);
1193 reg_val &= ~(MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_SIGNAL_DETECT_EN | 1934 reg_val &= ~(MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_SIGNAL_DETECT_EN |
1194 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_INVERT_SIGNAL_DETECT); 1935 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_INVERT_SIGNAL_DETECT);
1195 reg_val |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_FIBER_MODE; 1936 reg_val |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_FIBER_MODE;
@@ -1198,16 +1939,14 @@ static void bnx2x_set_autoneg(struct link_params *params,
1198 else 1939 else
1199 reg_val &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET; 1940 reg_val &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET;
1200 1941
1201 CL45_WR_OVER_CL22(bp, params->port, 1942 CL22_WR_OVER_CL45(bp, phy,
1202 params->phy_addr, 1943 MDIO_REG_BANK_SERDES_DIGITAL,
1203 MDIO_REG_BANK_SERDES_DIGITAL, 1944 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, reg_val);
1204 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, reg_val);
1205 1945
1206 /* Enable TetonII and BAM autoneg */ 1946 /* Enable TetonII and BAM autoneg */
1207 CL45_RD_OVER_CL22(bp, params->port, 1947 CL22_RD_OVER_CL45(bp, phy,
1208 params->phy_addr, 1948 MDIO_REG_BANK_BAM_NEXT_PAGE,
1209 MDIO_REG_BANK_BAM_NEXT_PAGE, 1949 MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL,
1210 MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL,
1211 &reg_val); 1950 &reg_val);
1212 if (vars->line_speed == SPEED_AUTO_NEG) { 1951 if (vars->line_speed == SPEED_AUTO_NEG) {
1213 /* Enable BAM aneg Mode and TetonII aneg Mode */ 1952 /* Enable BAM aneg Mode and TetonII aneg Mode */
@@ -1218,23 +1957,20 @@ static void bnx2x_set_autoneg(struct link_params *params,
1218 reg_val &= ~(MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_BAM_MODE | 1957 reg_val &= ~(MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_BAM_MODE |
1219 MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_TETON_AN); 1958 MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_TETON_AN);
1220 } 1959 }
1221 CL45_WR_OVER_CL22(bp, params->port, 1960 CL22_WR_OVER_CL45(bp, phy,
1222 params->phy_addr, 1961 MDIO_REG_BANK_BAM_NEXT_PAGE,
1223 MDIO_REG_BANK_BAM_NEXT_PAGE, 1962 MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL,
1224 MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL, 1963 reg_val);
1225 reg_val);
1226 1964
1227 if (enable_cl73) { 1965 if (enable_cl73) {
1228 /* Enable Cl73 FSM status bits */ 1966 /* Enable Cl73 FSM status bits */
1229 CL45_WR_OVER_CL22(bp, params->port, 1967 CL22_WR_OVER_CL45(bp, phy,
1230 params->phy_addr, 1968 MDIO_REG_BANK_CL73_USERB0,
1231 MDIO_REG_BANK_CL73_USERB0, 1969 MDIO_CL73_USERB0_CL73_UCTRL,
1232 MDIO_CL73_USERB0_CL73_UCTRL, 1970 0xe);
1233 0xe);
1234 1971
1235 /* Enable BAM Station Manager*/ 1972 /* Enable BAM Station Manager*/
1236 CL45_WR_OVER_CL22(bp, params->port, 1973 CL22_WR_OVER_CL45(bp, phy,
1237 params->phy_addr,
1238 MDIO_REG_BANK_CL73_USERB0, 1974 MDIO_REG_BANK_CL73_USERB0,
1239 MDIO_CL73_USERB0_CL73_BAM_CTRL1, 1975 MDIO_CL73_USERB0_CL73_BAM_CTRL1,
1240 MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_EN | 1976 MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_EN |
@@ -1242,23 +1978,21 @@ static void bnx2x_set_autoneg(struct link_params *params,
1242 MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_NP_AFTER_BP_EN); 1978 MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_NP_AFTER_BP_EN);
1243 1979
1244 /* Advertise CL73 link speeds */ 1980 /* Advertise CL73 link speeds */
1245 CL45_RD_OVER_CL22(bp, params->port, 1981 CL22_RD_OVER_CL45(bp, phy,
1246 params->phy_addr, 1982 MDIO_REG_BANK_CL73_IEEEB1,
1247 MDIO_REG_BANK_CL73_IEEEB1, 1983 MDIO_CL73_IEEEB1_AN_ADV2,
1248 MDIO_CL73_IEEEB1_AN_ADV2, 1984 &reg_val);
1249 &reg_val); 1985 if (phy->speed_cap_mask &
1250 if (params->speed_cap_mask &
1251 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) 1986 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)
1252 reg_val |= MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KX4; 1987 reg_val |= MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KX4;
1253 if (params->speed_cap_mask & 1988 if (phy->speed_cap_mask &
1254 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G) 1989 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)
1255 reg_val |= MDIO_CL73_IEEEB1_AN_ADV2_ADVR_1000M_KX; 1990 reg_val |= MDIO_CL73_IEEEB1_AN_ADV2_ADVR_1000M_KX;
1256 1991
1257 CL45_WR_OVER_CL22(bp, params->port, 1992 CL22_WR_OVER_CL45(bp, phy,
1258 params->phy_addr, 1993 MDIO_REG_BANK_CL73_IEEEB1,
1259 MDIO_REG_BANK_CL73_IEEEB1, 1994 MDIO_CL73_IEEEB1_AN_ADV2,
1260 MDIO_CL73_IEEEB1_AN_ADV2, 1995 reg_val);
1261 reg_val);
1262 1996
1263 /* CL73 Autoneg Enabled */ 1997 /* CL73 Autoneg Enabled */
1264 reg_val = MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN; 1998 reg_val = MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN;
@@ -1266,40 +2000,39 @@ static void bnx2x_set_autoneg(struct link_params *params,
1266 } else /* CL73 Autoneg Disabled */ 2000 } else /* CL73 Autoneg Disabled */
1267 reg_val = 0; 2001 reg_val = 0;
1268 2002
1269 CL45_WR_OVER_CL22(bp, params->port, 2003 CL22_WR_OVER_CL45(bp, phy,
1270 params->phy_addr, 2004 MDIO_REG_BANK_CL73_IEEEB0,
1271 MDIO_REG_BANK_CL73_IEEEB0, 2005 MDIO_CL73_IEEEB0_CL73_AN_CONTROL, reg_val);
1272 MDIO_CL73_IEEEB0_CL73_AN_CONTROL, reg_val);
1273} 2006}
1274 2007
1275/* program SerDes, forced speed */ 2008/* program SerDes, forced speed */
1276static void bnx2x_program_serdes(struct link_params *params, 2009static void bnx2x_program_serdes(struct bnx2x_phy *phy,
1277 struct link_vars *vars) 2010 struct link_params *params,
2011 struct link_vars *vars)
1278{ 2012{
1279 struct bnx2x *bp = params->bp; 2013 struct bnx2x *bp = params->bp;
1280 u16 reg_val; 2014 u16 reg_val;
1281 2015
1282 /* program duplex, disable autoneg and sgmii*/ 2016 /* program duplex, disable autoneg and sgmii*/
1283 CL45_RD_OVER_CL22(bp, params->port, 2017 CL22_RD_OVER_CL45(bp, phy,
1284 params->phy_addr, 2018 MDIO_REG_BANK_COMBO_IEEE0,
1285 MDIO_REG_BANK_COMBO_IEEE0, 2019 MDIO_COMBO_IEEE0_MII_CONTROL, &reg_val);
1286 MDIO_COMBO_IEEE0_MII_CONTROL, &reg_val);
1287 reg_val &= ~(MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX | 2020 reg_val &= ~(MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX |
1288 MDIO_COMBO_IEEO_MII_CONTROL_AN_EN | 2021 MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
1289 MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_MASK); 2022 MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_MASK);
1290 if (params->req_duplex == DUPLEX_FULL) 2023 if (phy->req_duplex == DUPLEX_FULL)
1291 reg_val |= MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX; 2024 reg_val |= MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX;
1292 CL45_WR_OVER_CL22(bp, params->port, 2025 CL22_WR_OVER_CL45(bp, phy,
1293 params->phy_addr, 2026 MDIO_REG_BANK_COMBO_IEEE0,
1294 MDIO_REG_BANK_COMBO_IEEE0, 2027 MDIO_COMBO_IEEE0_MII_CONTROL, reg_val);
1295 MDIO_COMBO_IEEE0_MII_CONTROL, reg_val); 2028
1296 2029 /*
1297 /* program speed 2030 * program speed
1298 - needed only if the speed is greater than 1G (2.5G or 10G) */ 2031 * - needed only if the speed is greater than 1G (2.5G or 10G)
1299 CL45_RD_OVER_CL22(bp, params->port, 2032 */
1300 params->phy_addr, 2033 CL22_RD_OVER_CL45(bp, phy,
1301 MDIO_REG_BANK_SERDES_DIGITAL, 2034 MDIO_REG_BANK_SERDES_DIGITAL,
1302 MDIO_SERDES_DIGITAL_MISC1, &reg_val); 2035 MDIO_SERDES_DIGITAL_MISC1, &reg_val);
1303 /* clearing the speed value before setting the right speed */ 2036 /* clearing the speed value before setting the right speed */
1304 DP(NETIF_MSG_LINK, "MDIO_REG_BANK_SERDES_DIGITAL = 0x%x\n", reg_val); 2037 DP(NETIF_MSG_LINK, "MDIO_REG_BANK_SERDES_DIGITAL = 0x%x\n", reg_val);
1305 2038
@@ -1320,14 +2053,14 @@ static void bnx2x_program_serdes(struct link_params *params,
1320 MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_13G; 2053 MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_13G;
1321 } 2054 }
1322 2055
1323 CL45_WR_OVER_CL22(bp, params->port, 2056 CL22_WR_OVER_CL45(bp, phy,
1324 params->phy_addr, 2057 MDIO_REG_BANK_SERDES_DIGITAL,
1325 MDIO_REG_BANK_SERDES_DIGITAL, 2058 MDIO_SERDES_DIGITAL_MISC1, reg_val);
1326 MDIO_SERDES_DIGITAL_MISC1, reg_val);
1327 2059
1328} 2060}
1329 2061
1330static void bnx2x_set_brcm_cl37_advertisment(struct link_params *params) 2062static void bnx2x_set_brcm_cl37_advertisment(struct bnx2x_phy *phy,
2063 struct link_params *params)
1331{ 2064{
1332 struct bnx2x *bp = params->bp; 2065 struct bnx2x *bp = params->bp;
1333 u16 val = 0; 2066 u16 val = 0;
@@ -1335,41 +2068,39 @@ static void bnx2x_set_brcm_cl37_advertisment(struct link_params *params)
1335 /* configure the 48 bits for BAM AN */ 2068 /* configure the 48 bits for BAM AN */
1336 2069
1337 /* set extended capabilities */ 2070 /* set extended capabilities */
1338 if (params->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G) 2071 if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G)
1339 val |= MDIO_OVER_1G_UP1_2_5G; 2072 val |= MDIO_OVER_1G_UP1_2_5G;
1340 if (params->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) 2073 if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)
1341 val |= MDIO_OVER_1G_UP1_10G; 2074 val |= MDIO_OVER_1G_UP1_10G;
1342 CL45_WR_OVER_CL22(bp, params->port, 2075 CL22_WR_OVER_CL45(bp, phy,
1343 params->phy_addr, 2076 MDIO_REG_BANK_OVER_1G,
1344 MDIO_REG_BANK_OVER_1G, 2077 MDIO_OVER_1G_UP1, val);
1345 MDIO_OVER_1G_UP1, val);
1346 2078
1347 CL45_WR_OVER_CL22(bp, params->port, 2079 CL22_WR_OVER_CL45(bp, phy,
1348 params->phy_addr, 2080 MDIO_REG_BANK_OVER_1G,
1349 MDIO_REG_BANK_OVER_1G, 2081 MDIO_OVER_1G_UP3, 0x400);
1350 MDIO_OVER_1G_UP3, 0x400);
1351} 2082}
1352 2083
1353static void bnx2x_calc_ieee_aneg_adv(struct link_params *params, u16 *ieee_fc) 2084static void bnx2x_calc_ieee_aneg_adv(struct bnx2x_phy *phy,
2085 struct link_params *params, u16 *ieee_fc)
1354{ 2086{
1355 struct bnx2x *bp = params->bp; 2087 struct bnx2x *bp = params->bp;
1356 *ieee_fc = MDIO_COMBO_IEEE0_AUTO_NEG_ADV_FULL_DUPLEX; 2088 *ieee_fc = MDIO_COMBO_IEEE0_AUTO_NEG_ADV_FULL_DUPLEX;
1357 /* resolve pause mode and advertisement 2089 /*
1358 * Please refer to Table 28B-3 of the 802.3ab-1999 spec */ 2090 * Resolve pause mode and advertisement.
2091 * Please refer to Table 28B-3 of the 802.3ab-1999 spec
2092 */
1359 2093
1360 switch (params->req_flow_ctrl) { 2094 switch (phy->req_flow_ctrl) {
1361 case BNX2X_FLOW_CTRL_AUTO: 2095 case BNX2X_FLOW_CTRL_AUTO:
1362 if (params->req_fc_auto_adv == BNX2X_FLOW_CTRL_BOTH) { 2096 if (params->req_fc_auto_adv == BNX2X_FLOW_CTRL_BOTH)
1363 *ieee_fc |= 2097 *ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
1364 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH; 2098 else
1365 } else {
1366 *ieee_fc |= 2099 *ieee_fc |=
1367 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC; 2100 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
1368 }
1369 break; 2101 break;
1370 case BNX2X_FLOW_CTRL_TX: 2102 case BNX2X_FLOW_CTRL_TX:
1371 *ieee_fc |= 2103 *ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
1372 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
1373 break; 2104 break;
1374 2105
1375 case BNX2X_FLOW_CTRL_RX: 2106 case BNX2X_FLOW_CTRL_RX:
@@ -1385,30 +2116,30 @@ static void bnx2x_calc_ieee_aneg_adv(struct link_params *params, u16 *ieee_fc)
1385 DP(NETIF_MSG_LINK, "ieee_fc = 0x%x\n", *ieee_fc); 2116 DP(NETIF_MSG_LINK, "ieee_fc = 0x%x\n", *ieee_fc);
1386} 2117}
1387 2118
1388static void bnx2x_set_ieee_aneg_advertisment(struct link_params *params, 2119static void bnx2x_set_ieee_aneg_advertisment(struct bnx2x_phy *phy,
1389 u16 ieee_fc) 2120 struct link_params *params,
2121 u16 ieee_fc)
1390{ 2122{
1391 struct bnx2x *bp = params->bp; 2123 struct bnx2x *bp = params->bp;
1392 u16 val; 2124 u16 val;
1393 /* for AN, we are always publishing full duplex */ 2125 /* for AN, we are always publishing full duplex */
1394 2126
1395 CL45_WR_OVER_CL22(bp, params->port, 2127 CL22_WR_OVER_CL45(bp, phy,
1396 params->phy_addr, 2128 MDIO_REG_BANK_COMBO_IEEE0,
1397 MDIO_REG_BANK_COMBO_IEEE0, 2129 MDIO_COMBO_IEEE0_AUTO_NEG_ADV, ieee_fc);
1398 MDIO_COMBO_IEEE0_AUTO_NEG_ADV, ieee_fc); 2130 CL22_RD_OVER_CL45(bp, phy,
1399 CL45_RD_OVER_CL22(bp, params->port, 2131 MDIO_REG_BANK_CL73_IEEEB1,
1400 params->phy_addr, 2132 MDIO_CL73_IEEEB1_AN_ADV1, &val);
1401 MDIO_REG_BANK_CL73_IEEEB1,
1402 MDIO_CL73_IEEEB1_AN_ADV1, &val);
1403 val &= ~MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_BOTH; 2133 val &= ~MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_BOTH;
1404 val |= ((ieee_fc<<3) & MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_MASK); 2134 val |= ((ieee_fc<<3) & MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_MASK);
1405 CL45_WR_OVER_CL22(bp, params->port, 2135 CL22_WR_OVER_CL45(bp, phy,
1406 params->phy_addr, 2136 MDIO_REG_BANK_CL73_IEEEB1,
1407 MDIO_REG_BANK_CL73_IEEEB1, 2137 MDIO_CL73_IEEEB1_AN_ADV1, val);
1408 MDIO_CL73_IEEEB1_AN_ADV1, val);
1409} 2138}
1410 2139
1411static void bnx2x_restart_autoneg(struct link_params *params, u8 enable_cl73) 2140static void bnx2x_restart_autoneg(struct bnx2x_phy *phy,
2141 struct link_params *params,
2142 u8 enable_cl73)
1412{ 2143{
1413 struct bnx2x *bp = params->bp; 2144 struct bnx2x *bp = params->bp;
1414 u16 mii_control; 2145 u16 mii_control;
@@ -1417,73 +2148,67 @@ static void bnx2x_restart_autoneg(struct link_params *params, u8 enable_cl73)
1417 /* Enable and restart BAM/CL37 aneg */ 2148 /* Enable and restart BAM/CL37 aneg */
1418 2149
1419 if (enable_cl73) { 2150 if (enable_cl73) {
1420 CL45_RD_OVER_CL22(bp, params->port, 2151 CL22_RD_OVER_CL45(bp, phy,
1421 params->phy_addr, 2152 MDIO_REG_BANK_CL73_IEEEB0,
1422 MDIO_REG_BANK_CL73_IEEEB0, 2153 MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
1423 MDIO_CL73_IEEEB0_CL73_AN_CONTROL, 2154 &mii_control);
1424 &mii_control); 2155
1425 2156 CL22_WR_OVER_CL45(bp, phy,
1426 CL45_WR_OVER_CL22(bp, params->port, 2157 MDIO_REG_BANK_CL73_IEEEB0,
1427 params->phy_addr, 2158 MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
1428 MDIO_REG_BANK_CL73_IEEEB0, 2159 (mii_control |
1429 MDIO_CL73_IEEEB0_CL73_AN_CONTROL, 2160 MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN |
1430 (mii_control | 2161 MDIO_CL73_IEEEB0_CL73_AN_CONTROL_RESTART_AN));
1431 MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN |
1432 MDIO_CL73_IEEEB0_CL73_AN_CONTROL_RESTART_AN));
1433 } else { 2162 } else {
1434 2163
1435 CL45_RD_OVER_CL22(bp, params->port, 2164 CL22_RD_OVER_CL45(bp, phy,
1436 params->phy_addr, 2165 MDIO_REG_BANK_COMBO_IEEE0,
1437 MDIO_REG_BANK_COMBO_IEEE0, 2166 MDIO_COMBO_IEEE0_MII_CONTROL,
1438 MDIO_COMBO_IEEE0_MII_CONTROL, 2167 &mii_control);
1439 &mii_control);
1440 DP(NETIF_MSG_LINK, 2168 DP(NETIF_MSG_LINK,
1441 "bnx2x_restart_autoneg mii_control before = 0x%x\n", 2169 "bnx2x_restart_autoneg mii_control before = 0x%x\n",
1442 mii_control); 2170 mii_control);
1443 CL45_WR_OVER_CL22(bp, params->port, 2171 CL22_WR_OVER_CL45(bp, phy,
1444 params->phy_addr, 2172 MDIO_REG_BANK_COMBO_IEEE0,
1445 MDIO_REG_BANK_COMBO_IEEE0, 2173 MDIO_COMBO_IEEE0_MII_CONTROL,
1446 MDIO_COMBO_IEEE0_MII_CONTROL, 2174 (mii_control |
1447 (mii_control | 2175 MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
1448 MDIO_COMBO_IEEO_MII_CONTROL_AN_EN | 2176 MDIO_COMBO_IEEO_MII_CONTROL_RESTART_AN));
1449 MDIO_COMBO_IEEO_MII_CONTROL_RESTART_AN));
1450 } 2177 }
1451} 2178}
1452 2179
1453static void bnx2x_initialize_sgmii_process(struct link_params *params, 2180static void bnx2x_initialize_sgmii_process(struct bnx2x_phy *phy,
1454 struct link_vars *vars) 2181 struct link_params *params,
2182 struct link_vars *vars)
1455{ 2183{
1456 struct bnx2x *bp = params->bp; 2184 struct bnx2x *bp = params->bp;
1457 u16 control1; 2185 u16 control1;
1458 2186
1459 /* in SGMII mode, the unicore is always slave */ 2187 /* in SGMII mode, the unicore is always slave */
1460 2188
1461 CL45_RD_OVER_CL22(bp, params->port, 2189 CL22_RD_OVER_CL45(bp, phy,
1462 params->phy_addr, 2190 MDIO_REG_BANK_SERDES_DIGITAL,
1463 MDIO_REG_BANK_SERDES_DIGITAL, 2191 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1,
1464 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, 2192 &control1);
1465 &control1);
1466 control1 |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_INVERT_SIGNAL_DETECT; 2193 control1 |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_INVERT_SIGNAL_DETECT;
1467 /* set sgmii mode (and not fiber) */ 2194 /* set sgmii mode (and not fiber) */
1468 control1 &= ~(MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_FIBER_MODE | 2195 control1 &= ~(MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_FIBER_MODE |
1469 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET | 2196 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET |
1470 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_MSTR_MODE); 2197 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_MSTR_MODE);
1471 CL45_WR_OVER_CL22(bp, params->port, 2198 CL22_WR_OVER_CL45(bp, phy,
1472 params->phy_addr, 2199 MDIO_REG_BANK_SERDES_DIGITAL,
1473 MDIO_REG_BANK_SERDES_DIGITAL, 2200 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1,
1474 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, 2201 control1);
1475 control1);
1476 2202
1477 /* if forced speed */ 2203 /* if forced speed */
1478 if (!(vars->line_speed == SPEED_AUTO_NEG)) { 2204 if (!(vars->line_speed == SPEED_AUTO_NEG)) {
1479 /* set speed, disable autoneg */ 2205 /* set speed, disable autoneg */
1480 u16 mii_control; 2206 u16 mii_control;
1481 2207
1482 CL45_RD_OVER_CL22(bp, params->port, 2208 CL22_RD_OVER_CL45(bp, phy,
1483 params->phy_addr, 2209 MDIO_REG_BANK_COMBO_IEEE0,
1484 MDIO_REG_BANK_COMBO_IEEE0, 2210 MDIO_COMBO_IEEE0_MII_CONTROL,
1485 MDIO_COMBO_IEEE0_MII_CONTROL, 2211 &mii_control);
1486 &mii_control);
1487 mii_control &= ~(MDIO_COMBO_IEEO_MII_CONTROL_AN_EN | 2212 mii_control &= ~(MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
1488 MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_MASK| 2213 MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_MASK|
1489 MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX); 2214 MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX);
@@ -1508,18 +2233,17 @@ static void bnx2x_initialize_sgmii_process(struct link_params *params,
1508 } 2233 }
1509 2234
1510 /* setting the full duplex */ 2235 /* setting the full duplex */
1511 if (params->req_duplex == DUPLEX_FULL) 2236 if (phy->req_duplex == DUPLEX_FULL)
1512 mii_control |= 2237 mii_control |=
1513 MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX; 2238 MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX;
1514 CL45_WR_OVER_CL22(bp, params->port, 2239 CL22_WR_OVER_CL45(bp, phy,
1515 params->phy_addr, 2240 MDIO_REG_BANK_COMBO_IEEE0,
1516 MDIO_REG_BANK_COMBO_IEEE0, 2241 MDIO_COMBO_IEEE0_MII_CONTROL,
1517 MDIO_COMBO_IEEE0_MII_CONTROL, 2242 mii_control);
1518 mii_control);
1519 2243
1520 } else { /* AN mode */ 2244 } else { /* AN mode */
1521 /* enable and restart AN */ 2245 /* enable and restart AN */
1522 bnx2x_restart_autoneg(params, 0); 2246 bnx2x_restart_autoneg(phy, params, 0);
1523 } 2247 }
1524} 2248}
1525 2249
@@ -1530,124 +2254,56 @@ static void bnx2x_initialize_sgmii_process(struct link_params *params,
1530 2254
1531static void bnx2x_pause_resolve(struct link_vars *vars, u32 pause_result) 2255static void bnx2x_pause_resolve(struct link_vars *vars, u32 pause_result)
1532{ /* LD LP */ 2256{ /* LD LP */
1533 switch (pause_result) { /* ASYM P ASYM P */ 2257 switch (pause_result) { /* ASYM P ASYM P */
1534 case 0xb: /* 1 0 1 1 */ 2258 case 0xb: /* 1 0 1 1 */
1535 vars->flow_ctrl = BNX2X_FLOW_CTRL_TX; 2259 vars->flow_ctrl = BNX2X_FLOW_CTRL_TX;
1536 break; 2260 break;
1537 2261
1538 case 0xe: /* 1 1 1 0 */ 2262 case 0xe: /* 1 1 1 0 */
1539 vars->flow_ctrl = BNX2X_FLOW_CTRL_RX; 2263 vars->flow_ctrl = BNX2X_FLOW_CTRL_RX;
1540 break; 2264 break;
1541 2265
1542 case 0x5: /* 0 1 0 1 */ 2266 case 0x5: /* 0 1 0 1 */
1543 case 0x7: /* 0 1 1 1 */ 2267 case 0x7: /* 0 1 1 1 */
1544 case 0xd: /* 1 1 0 1 */ 2268 case 0xd: /* 1 1 0 1 */
1545 case 0xf: /* 1 1 1 1 */ 2269 case 0xf: /* 1 1 1 1 */
1546 vars->flow_ctrl = BNX2X_FLOW_CTRL_BOTH; 2270 vars->flow_ctrl = BNX2X_FLOW_CTRL_BOTH;
1547 break; 2271 break;
1548 2272
1549 default: 2273 default:
1550 break; 2274 break;
1551 } 2275 }
2276 if (pause_result & (1<<0))
2277 vars->link_status |= LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE;
2278 if (pause_result & (1<<1))
2279 vars->link_status |= LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE;
1552} 2280}
1553 2281
1554static u8 bnx2x_ext_phy_resolve_fc(struct link_params *params, 2282static u8 bnx2x_direct_parallel_detect_used(struct bnx2x_phy *phy,
1555 struct link_vars *vars) 2283 struct link_params *params)
1556{
1557 struct bnx2x *bp = params->bp;
1558 u8 ext_phy_addr;
1559 u16 ld_pause; /* local */
1560 u16 lp_pause; /* link partner */
1561 u16 an_complete; /* AN complete */
1562 u16 pause_result;
1563 u8 ret = 0;
1564 u32 ext_phy_type;
1565 u8 port = params->port;
1566 ext_phy_addr = XGXS_EXT_PHY_ADDR(params->ext_phy_config);
1567 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config);
1568 /* read twice */
1569
1570 bnx2x_cl45_read(bp, port,
1571 ext_phy_type,
1572 ext_phy_addr,
1573 MDIO_AN_DEVAD,
1574 MDIO_AN_REG_STATUS, &an_complete);
1575 bnx2x_cl45_read(bp, port,
1576 ext_phy_type,
1577 ext_phy_addr,
1578 MDIO_AN_DEVAD,
1579 MDIO_AN_REG_STATUS, &an_complete);
1580
1581 if (an_complete & MDIO_AN_REG_STATUS_AN_COMPLETE) {
1582 ret = 1;
1583 bnx2x_cl45_read(bp, port,
1584 ext_phy_type,
1585 ext_phy_addr,
1586 MDIO_AN_DEVAD,
1587 MDIO_AN_REG_ADV_PAUSE, &ld_pause);
1588 bnx2x_cl45_read(bp, port,
1589 ext_phy_type,
1590 ext_phy_addr,
1591 MDIO_AN_DEVAD,
1592 MDIO_AN_REG_LP_AUTO_NEG, &lp_pause);
1593 pause_result = (ld_pause &
1594 MDIO_AN_REG_ADV_PAUSE_MASK) >> 8;
1595 pause_result |= (lp_pause &
1596 MDIO_AN_REG_ADV_PAUSE_MASK) >> 10;
1597 DP(NETIF_MSG_LINK, "Ext PHY pause result 0x%x\n",
1598 pause_result);
1599 bnx2x_pause_resolve(vars, pause_result);
1600 if (vars->flow_ctrl == BNX2X_FLOW_CTRL_NONE &&
1601 ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073) {
1602 bnx2x_cl45_read(bp, port,
1603 ext_phy_type,
1604 ext_phy_addr,
1605 MDIO_AN_DEVAD,
1606 MDIO_AN_REG_CL37_FC_LD, &ld_pause);
1607
1608 bnx2x_cl45_read(bp, port,
1609 ext_phy_type,
1610 ext_phy_addr,
1611 MDIO_AN_DEVAD,
1612 MDIO_AN_REG_CL37_FC_LP, &lp_pause);
1613 pause_result = (ld_pause &
1614 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) >> 5;
1615 pause_result |= (lp_pause &
1616 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) >> 7;
1617
1618 bnx2x_pause_resolve(vars, pause_result);
1619 DP(NETIF_MSG_LINK, "Ext PHY CL37 pause result 0x%x\n",
1620 pause_result);
1621 }
1622 }
1623 return ret;
1624}
1625
1626static u8 bnx2x_direct_parallel_detect_used(struct link_params *params)
1627{ 2284{
1628 struct bnx2x *bp = params->bp; 2285 struct bnx2x *bp = params->bp;
1629 u16 pd_10g, status2_1000x; 2286 u16 pd_10g, status2_1000x;
1630 CL45_RD_OVER_CL22(bp, params->port, 2287 if (phy->req_line_speed != SPEED_AUTO_NEG)
1631 params->phy_addr, 2288 return 0;
1632 MDIO_REG_BANK_SERDES_DIGITAL, 2289 CL22_RD_OVER_CL45(bp, phy,
1633 MDIO_SERDES_DIGITAL_A_1000X_STATUS2, 2290 MDIO_REG_BANK_SERDES_DIGITAL,
1634 &status2_1000x); 2291 MDIO_SERDES_DIGITAL_A_1000X_STATUS2,
1635 CL45_RD_OVER_CL22(bp, params->port, 2292 &status2_1000x);
1636 params->phy_addr, 2293 CL22_RD_OVER_CL45(bp, phy,
1637 MDIO_REG_BANK_SERDES_DIGITAL, 2294 MDIO_REG_BANK_SERDES_DIGITAL,
1638 MDIO_SERDES_DIGITAL_A_1000X_STATUS2, 2295 MDIO_SERDES_DIGITAL_A_1000X_STATUS2,
1639 &status2_1000x); 2296 &status2_1000x);
1640 if (status2_1000x & MDIO_SERDES_DIGITAL_A_1000X_STATUS2_AN_DISABLED) { 2297 if (status2_1000x & MDIO_SERDES_DIGITAL_A_1000X_STATUS2_AN_DISABLED) {
1641 DP(NETIF_MSG_LINK, "1G parallel detect link on port %d\n", 2298 DP(NETIF_MSG_LINK, "1G parallel detect link on port %d\n",
1642 params->port); 2299 params->port);
1643 return 1; 2300 return 1;
1644 } 2301 }
1645 2302
1646 CL45_RD_OVER_CL22(bp, params->port, 2303 CL22_RD_OVER_CL45(bp, phy,
1647 params->phy_addr, 2304 MDIO_REG_BANK_10G_PARALLEL_DETECT,
1648 MDIO_REG_BANK_10G_PARALLEL_DETECT, 2305 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_STATUS,
1649 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_STATUS, 2306 &pd_10g);
1650 &pd_10g);
1651 2307
1652 if (pd_10g & MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_STATUS_PD_LINK) { 2308 if (pd_10g & MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_STATUS_PD_LINK) {
1653 DP(NETIF_MSG_LINK, "10G parallel detect link on port %d\n", 2309 DP(NETIF_MSG_LINK, "10G parallel detect link on port %d\n",
@@ -1657,9 +2313,10 @@ static u8 bnx2x_direct_parallel_detect_used(struct link_params *params)
1657 return 0; 2313 return 0;
1658} 2314}
1659 2315
1660static void bnx2x_flow_ctrl_resolve(struct link_params *params, 2316static void bnx2x_flow_ctrl_resolve(struct bnx2x_phy *phy,
1661 struct link_vars *vars, 2317 struct link_params *params,
1662 u32 gp_status) 2318 struct link_vars *vars,
2319 u32 gp_status)
1663{ 2320{
1664 struct bnx2x *bp = params->bp; 2321 struct bnx2x *bp = params->bp;
1665 u16 ld_pause; /* local driver */ 2322 u16 ld_pause; /* local driver */
@@ -1669,12 +2326,13 @@ static void bnx2x_flow_ctrl_resolve(struct link_params *params,
1669 vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE; 2326 vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
1670 2327
1671 /* resolve from gp_status in case of AN complete and not sgmii */ 2328 /* resolve from gp_status in case of AN complete and not sgmii */
1672 if ((params->req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) && 2329 if (phy->req_flow_ctrl != BNX2X_FLOW_CTRL_AUTO)
1673 (gp_status & MDIO_AN_CL73_OR_37_COMPLETE) && 2330 vars->flow_ctrl = phy->req_flow_ctrl;
1674 (!(vars->phy_flags & PHY_SGMII_FLAG)) && 2331 else if (phy->req_line_speed != SPEED_AUTO_NEG)
1675 (XGXS_EXT_PHY_TYPE(params->ext_phy_config) == 2332 vars->flow_ctrl = params->req_fc_auto_adv;
1676 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)) { 2333 else if ((gp_status & MDIO_AN_CL73_OR_37_COMPLETE) &&
1677 if (bnx2x_direct_parallel_detect_used(params)) { 2334 (!(vars->phy_flags & PHY_SGMII_FLAG))) {
2335 if (bnx2x_direct_parallel_detect_used(phy, params)) {
1678 vars->flow_ctrl = params->req_fc_auto_adv; 2336 vars->flow_ctrl = params->req_fc_auto_adv;
1679 return; 2337 return;
1680 } 2338 }
@@ -1684,16 +2342,14 @@ static void bnx2x_flow_ctrl_resolve(struct link_params *params,
1684 (MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_AUTONEG_COMPLETE | 2342 (MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_AUTONEG_COMPLETE |
1685 MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_MR_LP_NP_AN_ABLE)) { 2343 MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_MR_LP_NP_AN_ABLE)) {
1686 2344
1687 CL45_RD_OVER_CL22(bp, params->port, 2345 CL22_RD_OVER_CL45(bp, phy,
1688 params->phy_addr, 2346 MDIO_REG_BANK_CL73_IEEEB1,
1689 MDIO_REG_BANK_CL73_IEEEB1, 2347 MDIO_CL73_IEEEB1_AN_ADV1,
1690 MDIO_CL73_IEEEB1_AN_ADV1, 2348 &ld_pause);
1691 &ld_pause); 2349 CL22_RD_OVER_CL45(bp, phy,
1692 CL45_RD_OVER_CL22(bp, params->port, 2350 MDIO_REG_BANK_CL73_IEEEB1,
1693 params->phy_addr, 2351 MDIO_CL73_IEEEB1_AN_LP_ADV1,
1694 MDIO_REG_BANK_CL73_IEEEB1, 2352 &lp_pause);
1695 MDIO_CL73_IEEEB1_AN_LP_ADV1,
1696 &lp_pause);
1697 pause_result = (ld_pause & 2353 pause_result = (ld_pause &
1698 MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_MASK) 2354 MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_MASK)
1699 >> 8; 2355 >> 8;
@@ -1703,65 +2359,52 @@ static void bnx2x_flow_ctrl_resolve(struct link_params *params,
1703 DP(NETIF_MSG_LINK, "pause_result CL73 0x%x\n", 2359 DP(NETIF_MSG_LINK, "pause_result CL73 0x%x\n",
1704 pause_result); 2360 pause_result);
1705 } else { 2361 } else {
1706 2362 CL22_RD_OVER_CL45(bp, phy,
1707 CL45_RD_OVER_CL22(bp, params->port, 2363 MDIO_REG_BANK_COMBO_IEEE0,
1708 params->phy_addr, 2364 MDIO_COMBO_IEEE0_AUTO_NEG_ADV,
1709 MDIO_REG_BANK_COMBO_IEEE0, 2365 &ld_pause);
1710 MDIO_COMBO_IEEE0_AUTO_NEG_ADV, 2366 CL22_RD_OVER_CL45(bp, phy,
1711 &ld_pause); 2367 MDIO_REG_BANK_COMBO_IEEE0,
1712 CL45_RD_OVER_CL22(bp, params->port, 2368 MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1,
1713 params->phy_addr, 2369 &lp_pause);
1714 MDIO_REG_BANK_COMBO_IEEE0,
1715 MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1,
1716 &lp_pause);
1717 pause_result = (ld_pause & 2370 pause_result = (ld_pause &
1718 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK)>>5; 2371 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK)>>5;
1719 pause_result |= (lp_pause & 2372 pause_result |= (lp_pause &
1720 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK)>>7; 2373 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK)>>7;
1721 DP(NETIF_MSG_LINK, "pause_result CL37 0x%x\n", 2374 DP(NETIF_MSG_LINK, "pause_result CL37 0x%x\n",
1722 pause_result); 2375 pause_result);
1723 } 2376 }
1724 bnx2x_pause_resolve(vars, pause_result); 2377 bnx2x_pause_resolve(vars, pause_result);
1725 } else if ((params->req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
1726 (bnx2x_ext_phy_resolve_fc(params, vars))) {
1727 return;
1728 } else {
1729 if (params->req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
1730 vars->flow_ctrl = params->req_fc_auto_adv;
1731 else
1732 vars->flow_ctrl = params->req_flow_ctrl;
1733 } 2378 }
1734 DP(NETIF_MSG_LINK, "flow_ctrl 0x%x\n", vars->flow_ctrl); 2379 DP(NETIF_MSG_LINK, "flow_ctrl 0x%x\n", vars->flow_ctrl);
1735} 2380}
1736 2381
1737static void bnx2x_check_fallback_to_cl37(struct link_params *params) 2382static void bnx2x_check_fallback_to_cl37(struct bnx2x_phy *phy,
2383 struct link_params *params)
1738{ 2384{
1739 struct bnx2x *bp = params->bp; 2385 struct bnx2x *bp = params->bp;
1740 u16 rx_status, ustat_val, cl37_fsm_recieved; 2386 u16 rx_status, ustat_val, cl37_fsm_recieved;
1741 DP(NETIF_MSG_LINK, "bnx2x_check_fallback_to_cl37\n"); 2387 DP(NETIF_MSG_LINK, "bnx2x_check_fallback_to_cl37\n");
1742 /* Step 1: Make sure signal is detected */ 2388 /* Step 1: Make sure signal is detected */
1743 CL45_RD_OVER_CL22(bp, params->port, 2389 CL22_RD_OVER_CL45(bp, phy,
1744 params->phy_addr, 2390 MDIO_REG_BANK_RX0,
1745 MDIO_REG_BANK_RX0, 2391 MDIO_RX0_RX_STATUS,
1746 MDIO_RX0_RX_STATUS, 2392 &rx_status);
1747 &rx_status);
1748 if ((rx_status & MDIO_RX0_RX_STATUS_SIGDET) != 2393 if ((rx_status & MDIO_RX0_RX_STATUS_SIGDET) !=
1749 (MDIO_RX0_RX_STATUS_SIGDET)) { 2394 (MDIO_RX0_RX_STATUS_SIGDET)) {
1750 DP(NETIF_MSG_LINK, "Signal is not detected. Restoring CL73." 2395 DP(NETIF_MSG_LINK, "Signal is not detected. Restoring CL73."
1751 "rx_status(0x80b0) = 0x%x\n", rx_status); 2396 "rx_status(0x80b0) = 0x%x\n", rx_status);
1752 CL45_WR_OVER_CL22(bp, params->port, 2397 CL22_WR_OVER_CL45(bp, phy,
1753 params->phy_addr, 2398 MDIO_REG_BANK_CL73_IEEEB0,
1754 MDIO_REG_BANK_CL73_IEEEB0, 2399 MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
1755 MDIO_CL73_IEEEB0_CL73_AN_CONTROL, 2400 MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN);
1756 MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN);
1757 return; 2401 return;
1758 } 2402 }
1759 /* Step 2: Check CL73 state machine */ 2403 /* Step 2: Check CL73 state machine */
1760 CL45_RD_OVER_CL22(bp, params->port, 2404 CL22_RD_OVER_CL45(bp, phy,
1761 params->phy_addr, 2405 MDIO_REG_BANK_CL73_USERB0,
1762 MDIO_REG_BANK_CL73_USERB0, 2406 MDIO_CL73_USERB0_CL73_USTAT1,
1763 MDIO_CL73_USERB0_CL73_USTAT1, 2407 &ustat_val);
1764 &ustat_val);
1765 if ((ustat_val & 2408 if ((ustat_val &
1766 (MDIO_CL73_USERB0_CL73_USTAT1_LINK_STATUS_CHECK | 2409 (MDIO_CL73_USERB0_CL73_USTAT1_LINK_STATUS_CHECK |
1767 MDIO_CL73_USERB0_CL73_USTAT1_AN_GOOD_CHECK_BAM37)) != 2410 MDIO_CL73_USERB0_CL73_USTAT1_AN_GOOD_CHECK_BAM37)) !=
@@ -1771,13 +2414,14 @@ static void bnx2x_check_fallback_to_cl37(struct link_params *params)
1771 "ustat_val(0x8371) = 0x%x\n", ustat_val); 2414 "ustat_val(0x8371) = 0x%x\n", ustat_val);
1772 return; 2415 return;
1773 } 2416 }
1774 /* Step 3: Check CL37 Message Pages received to indicate LP 2417 /*
1775 supports only CL37 */ 2418 * Step 3: Check CL37 Message Pages received to indicate LP
1776 CL45_RD_OVER_CL22(bp, params->port, 2419 * supports only CL37
1777 params->phy_addr, 2420 */
1778 MDIO_REG_BANK_REMOTE_PHY, 2421 CL22_RD_OVER_CL45(bp, phy,
1779 MDIO_REMOTE_PHY_MISC_RX_STATUS, 2422 MDIO_REG_BANK_REMOTE_PHY,
1780 &cl37_fsm_recieved); 2423 MDIO_REMOTE_PHY_MISC_RX_STATUS,
2424 &cl37_fsm_recieved);
1781 if ((cl37_fsm_recieved & 2425 if ((cl37_fsm_recieved &
1782 (MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_OVER1G_MSG | 2426 (MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_OVER1G_MSG |
1783 MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_BRCM_OUI_MSG)) != 2427 MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_BRCM_OUI_MSG)) !=
@@ -1788,29 +2432,53 @@ static void bnx2x_check_fallback_to_cl37(struct link_params *params)
1788 cl37_fsm_recieved); 2432 cl37_fsm_recieved);
1789 return; 2433 return;
1790 } 2434 }
1791 /* The combined cl37/cl73 fsm state information indicating that we are 2435 /*
1792 connected to a device which does not support cl73, but does support 2436 * The combined cl37/cl73 fsm state information indicating that
1793 cl37 BAM. In this case we disable cl73 and restart cl37 auto-neg */ 2437 * we are connected to a device which does not support cl73, but
2438 * does support cl37 BAM. In this case we disable cl73 and
2439 * restart cl37 auto-neg
2440 */
2441
1794 /* Disable CL73 */ 2442 /* Disable CL73 */
1795 CL45_WR_OVER_CL22(bp, params->port, 2443 CL22_WR_OVER_CL45(bp, phy,
1796 params->phy_addr, 2444 MDIO_REG_BANK_CL73_IEEEB0,
1797 MDIO_REG_BANK_CL73_IEEEB0, 2445 MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
1798 MDIO_CL73_IEEEB0_CL73_AN_CONTROL, 2446 0);
1799 0);
1800 /* Restart CL37 autoneg */ 2447 /* Restart CL37 autoneg */
1801 bnx2x_restart_autoneg(params, 0); 2448 bnx2x_restart_autoneg(phy, params, 0);
1802 DP(NETIF_MSG_LINK, "Disabling CL73, and restarting CL37 autoneg\n"); 2449 DP(NETIF_MSG_LINK, "Disabling CL73, and restarting CL37 autoneg\n");
1803} 2450}
1804static u8 bnx2x_link_settings_status(struct link_params *params, 2451
1805 struct link_vars *vars, 2452static void bnx2x_xgxs_an_resolve(struct bnx2x_phy *phy,
1806 u32 gp_status, 2453 struct link_params *params,
1807 u8 ext_phy_link_up) 2454 struct link_vars *vars,
2455 u32 gp_status)
2456{
2457 if (gp_status & MDIO_AN_CL73_OR_37_COMPLETE)
2458 vars->link_status |=
2459 LINK_STATUS_AUTO_NEGOTIATE_COMPLETE;
2460
2461 if (bnx2x_direct_parallel_detect_used(phy, params))
2462 vars->link_status |=
2463 LINK_STATUS_PARALLEL_DETECTION_USED;
2464}
2465
2466static u8 bnx2x_link_settings_status(struct bnx2x_phy *phy,
2467 struct link_params *params,
2468 struct link_vars *vars)
1808{ 2469{
1809 struct bnx2x *bp = params->bp; 2470 struct bnx2x *bp = params->bp;
1810 u16 new_line_speed; 2471 u16 new_line_speed, gp_status;
1811 u8 rc = 0; 2472 u8 rc = 0;
1812 vars->link_status = 0;
1813 2473
2474 /* Read gp_status */
2475 CL22_RD_OVER_CL45(bp, phy,
2476 MDIO_REG_BANK_GP_STATUS,
2477 MDIO_GP_STATUS_TOP_AN_STATUS1,
2478 &gp_status);
2479
2480 if (phy->req_line_speed == SPEED_AUTO_NEG)
2481 vars->link_status |= LINK_STATUS_AUTO_NEGOTIATE_ENABLED;
1814 if (gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS) { 2482 if (gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS) {
1815 DP(NETIF_MSG_LINK, "phy link up gp_status=0x%x\n", 2483 DP(NETIF_MSG_LINK, "phy link up gp_status=0x%x\n",
1816 gp_status); 2484 gp_status);
@@ -1823,7 +2491,12 @@ static u8 bnx2x_link_settings_status(struct link_params *params,
1823 else 2491 else
1824 vars->duplex = DUPLEX_HALF; 2492 vars->duplex = DUPLEX_HALF;
1825 2493
1826 bnx2x_flow_ctrl_resolve(params, vars, gp_status); 2494 if (SINGLE_MEDIA_DIRECT(params)) {
2495 bnx2x_flow_ctrl_resolve(phy, params, vars, gp_status);
2496 if (phy->req_line_speed == SPEED_AUTO_NEG)
2497 bnx2x_xgxs_an_resolve(phy, params, vars,
2498 gp_status);
2499 }
1827 2500
1828 switch (gp_status & GP_STATUS_SPEED_MASK) { 2501 switch (gp_status & GP_STATUS_SPEED_MASK) {
1829 case GP_STATUS_10M: 2502 case GP_STATUS_10M:
@@ -1905,56 +2578,7 @@ static u8 bnx2x_link_settings_status(struct link_params *params,
1905 return -EINVAL; 2578 return -EINVAL;
1906 } 2579 }
1907 2580
1908 /* Upon link speed change set the NIG into drain mode.
1909 Comes to deals with possible FIFO glitch due to clk change
1910 when speed is decreased without link down indicator */
1911 if (new_line_speed != vars->line_speed) {
1912 if (XGXS_EXT_PHY_TYPE(params->ext_phy_config) !=
1913 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT &&
1914 ext_phy_link_up) {
1915 DP(NETIF_MSG_LINK, "Internal link speed %d is"
1916 " different than the external"
1917 " link speed %d\n", new_line_speed,
1918 vars->line_speed);
1919 vars->phy_link_up = 0;
1920 return 0;
1921 }
1922 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE
1923 + params->port*4, 0);
1924 msleep(1);
1925 }
1926 vars->line_speed = new_line_speed; 2581 vars->line_speed = new_line_speed;
1927 vars->link_status |= LINK_STATUS_SERDES_LINK;
1928
1929 if ((params->req_line_speed == SPEED_AUTO_NEG) &&
1930 ((XGXS_EXT_PHY_TYPE(params->ext_phy_config) ==
1931 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) ||
1932 (XGXS_EXT_PHY_TYPE(params->ext_phy_config) ==
1933 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
1934 (XGXS_EXT_PHY_TYPE(params->ext_phy_config) ==
1935 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706) ||
1936 (XGXS_EXT_PHY_TYPE(params->ext_phy_config) ==
1937 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726))) {
1938 vars->autoneg = AUTO_NEG_ENABLED;
1939
1940 if (gp_status & MDIO_AN_CL73_OR_37_COMPLETE) {
1941 vars->autoneg |= AUTO_NEG_COMPLETE;
1942 vars->link_status |=
1943 LINK_STATUS_AUTO_NEGOTIATE_COMPLETE;
1944 }
1945
1946 vars->autoneg |= AUTO_NEG_PARALLEL_DETECTION_USED;
1947 vars->link_status |=
1948 LINK_STATUS_PARALLEL_DETECTION_USED;
1949
1950 }
1951 if (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX)
1952 vars->link_status |=
1953 LINK_STATUS_TX_FLOW_CONTROL_ENABLED;
1954
1955 if (vars->flow_ctrl & BNX2X_FLOW_CTRL_RX)
1956 vars->link_status |=
1957 LINK_STATUS_RX_FLOW_CONTROL_ENABLED;
1958 2582
1959 } else { /* link_down */ 2583 } else { /* link_down */
1960 DP(NETIF_MSG_LINK, "phy link down\n"); 2584 DP(NETIF_MSG_LINK, "phy link down\n");
@@ -1963,40 +2587,34 @@ static u8 bnx2x_link_settings_status(struct link_params *params,
1963 2587
1964 vars->duplex = DUPLEX_FULL; 2588 vars->duplex = DUPLEX_FULL;
1965 vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE; 2589 vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
1966 vars->autoneg = AUTO_NEG_DISABLED;
1967 vars->mac_type = MAC_TYPE_NONE; 2590 vars->mac_type = MAC_TYPE_NONE;
1968 2591
1969 if ((params->req_line_speed == SPEED_AUTO_NEG) && 2592 if ((phy->req_line_speed == SPEED_AUTO_NEG) &&
1970 ((XGXS_EXT_PHY_TYPE(params->ext_phy_config) == 2593 SINGLE_MEDIA_DIRECT(params)) {
1971 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT))) {
1972 /* Check signal is detected */ 2594 /* Check signal is detected */
1973 bnx2x_check_fallback_to_cl37(params); 2595 bnx2x_check_fallback_to_cl37(phy, params);
1974 } 2596 }
1975 } 2597 }
1976 2598
1977 DP(NETIF_MSG_LINK, "gp_status 0x%x phy_link_up %x line_speed %x\n", 2599 DP(NETIF_MSG_LINK, "gp_status 0x%x phy_link_up %x line_speed %x\n",
1978 gp_status, vars->phy_link_up, vars->line_speed); 2600 gp_status, vars->phy_link_up, vars->line_speed);
1979 DP(NETIF_MSG_LINK, "duplex %x flow_ctrl 0x%x" 2601 DP(NETIF_MSG_LINK, "duplex %x flow_ctrl 0x%x link_status 0x%x\n",
1980 " autoneg 0x%x\n", 2602 vars->duplex, vars->flow_ctrl, vars->link_status);
1981 vars->duplex,
1982 vars->flow_ctrl, vars->autoneg);
1983 DP(NETIF_MSG_LINK, "link_status 0x%x\n", vars->link_status);
1984
1985 return rc; 2603 return rc;
1986} 2604}
1987 2605
1988static void bnx2x_set_gmii_tx_driver(struct link_params *params) 2606static void bnx2x_set_gmii_tx_driver(struct link_params *params)
1989{ 2607{
1990 struct bnx2x *bp = params->bp; 2608 struct bnx2x *bp = params->bp;
2609 struct bnx2x_phy *phy = &params->phy[INT_PHY];
1991 u16 lp_up2; 2610 u16 lp_up2;
1992 u16 tx_driver; 2611 u16 tx_driver;
1993 u16 bank; 2612 u16 bank;
1994 2613
1995 /* read precomp */ 2614 /* read precomp */
1996 CL45_RD_OVER_CL22(bp, params->port, 2615 CL22_RD_OVER_CL45(bp, phy,
1997 params->phy_addr, 2616 MDIO_REG_BANK_OVER_1G,
1998 MDIO_REG_BANK_OVER_1G, 2617 MDIO_OVER_1G_LP_UP2, &lp_up2);
1999 MDIO_OVER_1G_LP_UP2, &lp_up2);
2000 2618
2001 /* bits [10:7] at lp_up2, positioned at [15:12] */ 2619 /* bits [10:7] at lp_up2, positioned at [15:12] */
2002 lp_up2 = (((lp_up2 & MDIO_OVER_1G_LP_UP2_PREEMPHASIS_MASK) >> 2620 lp_up2 = (((lp_up2 & MDIO_OVER_1G_LP_UP2_PREEMPHASIS_MASK) >>
@@ -2008,26 +2626,24 @@ static void bnx2x_set_gmii_tx_driver(struct link_params *params)
2008 2626
2009 for (bank = MDIO_REG_BANK_TX0; bank <= MDIO_REG_BANK_TX3; 2627 for (bank = MDIO_REG_BANK_TX0; bank <= MDIO_REG_BANK_TX3;
2010 bank += (MDIO_REG_BANK_TX1 - MDIO_REG_BANK_TX0)) { 2628 bank += (MDIO_REG_BANK_TX1 - MDIO_REG_BANK_TX0)) {
2011 CL45_RD_OVER_CL22(bp, params->port, 2629 CL22_RD_OVER_CL45(bp, phy,
2012 params->phy_addr, 2630 bank,
2013 bank, 2631 MDIO_TX0_TX_DRIVER, &tx_driver);
2014 MDIO_TX0_TX_DRIVER, &tx_driver);
2015 2632
2016 /* replace tx_driver bits [15:12] */ 2633 /* replace tx_driver bits [15:12] */
2017 if (lp_up2 != 2634 if (lp_up2 !=
2018 (tx_driver & MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK)) { 2635 (tx_driver & MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK)) {
2019 tx_driver &= ~MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK; 2636 tx_driver &= ~MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK;
2020 tx_driver |= lp_up2; 2637 tx_driver |= lp_up2;
2021 CL45_WR_OVER_CL22(bp, params->port, 2638 CL22_WR_OVER_CL45(bp, phy,
2022 params->phy_addr, 2639 bank,
2023 bank, 2640 MDIO_TX0_TX_DRIVER, tx_driver);
2024 MDIO_TX0_TX_DRIVER, tx_driver);
2025 } 2641 }
2026 } 2642 }
2027} 2643}
2028 2644
2029static u8 bnx2x_emac_program(struct link_params *params, 2645static u8 bnx2x_emac_program(struct link_params *params,
2030 u32 line_speed, u32 duplex) 2646 struct link_vars *vars)
2031{ 2647{
2032 struct bnx2x *bp = params->bp; 2648 struct bnx2x *bp = params->bp;
2033 u8 port = params->port; 2649 u8 port = params->port;
@@ -2035,11 +2651,11 @@ static u8 bnx2x_emac_program(struct link_params *params,
2035 2651
2036 DP(NETIF_MSG_LINK, "setting link speed & duplex\n"); 2652 DP(NETIF_MSG_LINK, "setting link speed & duplex\n");
2037 bnx2x_bits_dis(bp, GRCBASE_EMAC0 + port*0x400 + 2653 bnx2x_bits_dis(bp, GRCBASE_EMAC0 + port*0x400 +
2038 EMAC_REG_EMAC_MODE, 2654 EMAC_REG_EMAC_MODE,
2039 (EMAC_MODE_25G_MODE | 2655 (EMAC_MODE_25G_MODE |
2040 EMAC_MODE_PORT_MII_10M | 2656 EMAC_MODE_PORT_MII_10M |
2041 EMAC_MODE_HALF_DUPLEX)); 2657 EMAC_MODE_HALF_DUPLEX));
2042 switch (line_speed) { 2658 switch (vars->line_speed) {
2043 case SPEED_10: 2659 case SPEED_10:
2044 mode |= EMAC_MODE_PORT_MII_10M; 2660 mode |= EMAC_MODE_PORT_MII_10M;
2045 break; 2661 break;
@@ -2058,384 +2674,1276 @@ static u8 bnx2x_emac_program(struct link_params *params,
2058 2674
2059 default: 2675 default:
2060 /* 10G not valid for EMAC */ 2676 /* 10G not valid for EMAC */
2061 DP(NETIF_MSG_LINK, "Invalid line_speed 0x%x\n", line_speed); 2677 DP(NETIF_MSG_LINK, "Invalid line_speed 0x%x\n",
2678 vars->line_speed);
2062 return -EINVAL; 2679 return -EINVAL;
2063 } 2680 }
2064 2681
2065 if (duplex == DUPLEX_HALF) 2682 if (vars->duplex == DUPLEX_HALF)
2066 mode |= EMAC_MODE_HALF_DUPLEX; 2683 mode |= EMAC_MODE_HALF_DUPLEX;
2067 bnx2x_bits_en(bp, 2684 bnx2x_bits_en(bp,
2068 GRCBASE_EMAC0 + port*0x400 + EMAC_REG_EMAC_MODE, 2685 GRCBASE_EMAC0 + port*0x400 + EMAC_REG_EMAC_MODE,
2069 mode); 2686 mode);
2070 2687
2071 bnx2x_set_led(params, LED_MODE_OPER, line_speed); 2688 bnx2x_set_led(params, vars, LED_MODE_OPER, vars->line_speed);
2072 return 0; 2689 return 0;
2073} 2690}
2074 2691
2075/*****************************************************************************/ 2692static void bnx2x_set_preemphasis(struct bnx2x_phy *phy,
2076/* External Phy section */ 2693 struct link_params *params)
2077/*****************************************************************************/
2078void bnx2x_ext_phy_hw_reset(struct bnx2x *bp, u8 port)
2079{ 2694{
2080 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1, 2695
2081 MISC_REGISTERS_GPIO_OUTPUT_LOW, port); 2696 u16 bank, i = 0;
2082 msleep(1); 2697 struct bnx2x *bp = params->bp;
2083 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1, 2698
2084 MISC_REGISTERS_GPIO_OUTPUT_HIGH, port); 2699 for (bank = MDIO_REG_BANK_RX0, i = 0; bank <= MDIO_REG_BANK_RX3;
2700 bank += (MDIO_REG_BANK_RX1-MDIO_REG_BANK_RX0), i++) {
2701 CL22_WR_OVER_CL45(bp, phy,
2702 bank,
2703 MDIO_RX0_RX_EQ_BOOST,
2704 phy->rx_preemphasis[i]);
2705 }
2706
2707 for (bank = MDIO_REG_BANK_TX0, i = 0; bank <= MDIO_REG_BANK_TX3;
2708 bank += (MDIO_REG_BANK_TX1 - MDIO_REG_BANK_TX0), i++) {
2709 CL22_WR_OVER_CL45(bp, phy,
2710 bank,
2711 MDIO_TX0_TX_DRIVER,
2712 phy->tx_preemphasis[i]);
2713 }
2085} 2714}
2086 2715
2087static void bnx2x_ext_phy_reset(struct link_params *params, 2716static void bnx2x_init_internal_phy(struct bnx2x_phy *phy,
2088 struct link_vars *vars) 2717 struct link_params *params,
2718 struct link_vars *vars)
2089{ 2719{
2090 struct bnx2x *bp = params->bp; 2720 struct bnx2x *bp = params->bp;
2091 u32 ext_phy_type; 2721 u8 enable_cl73 = (SINGLE_MEDIA_DIRECT(params) ||
2092 u8 ext_phy_addr = XGXS_EXT_PHY_ADDR(params->ext_phy_config); 2722 (params->loopback_mode == LOOPBACK_XGXS));
2723 if (!(vars->phy_flags & PHY_SGMII_FLAG)) {
2724 if (SINGLE_MEDIA_DIRECT(params) &&
2725 (params->feature_config_flags &
2726 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED))
2727 bnx2x_set_preemphasis(phy, params);
2093 2728
2094 DP(NETIF_MSG_LINK, "Port %x: bnx2x_ext_phy_reset\n", params->port); 2729 /* forced speed requested? */
2095 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config); 2730 if (vars->line_speed != SPEED_AUTO_NEG ||
2096 /* The PHY reset is controled by GPIO 1 2731 (SINGLE_MEDIA_DIRECT(params) &&
2097 * Give it 1ms of reset pulse 2732 params->loopback_mode == LOOPBACK_EXT)) {
2098 */ 2733 DP(NETIF_MSG_LINK, "not SGMII, no AN\n");
2099 if (vars->phy_flags & PHY_XGXS_FLAG) {
2100 2734
2101 switch (ext_phy_type) { 2735 /* disable autoneg */
2102 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT: 2736 bnx2x_set_autoneg(phy, params, vars, 0);
2103 DP(NETIF_MSG_LINK, "XGXS Direct\n");
2104 break;
2105 2737
2106 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705: 2738 /* program speed and duplex */
2107 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706: 2739 bnx2x_program_serdes(phy, params, vars);
2108 DP(NETIF_MSG_LINK, "XGXS 8705/8706\n");
2109 2740
2110 /* Restore normal power mode*/ 2741 } else { /* AN_mode */
2111 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, 2742 DP(NETIF_MSG_LINK, "not SGMII, AN\n");
2112 MISC_REGISTERS_GPIO_OUTPUT_HIGH,
2113 params->port);
2114 2743
2115 /* HW reset */ 2744 /* AN enabled */
2116 bnx2x_ext_phy_hw_reset(bp, params->port); 2745 bnx2x_set_brcm_cl37_advertisment(phy, params);
2117 2746
2118 bnx2x_cl45_write(bp, params->port, 2747 /* program duplex & pause advertisement (for aneg) */
2119 ext_phy_type, 2748 bnx2x_set_ieee_aneg_advertisment(phy, params,
2120 ext_phy_addr, 2749 vars->ieee_fc);
2121 MDIO_PMA_DEVAD,
2122 MDIO_PMA_REG_CTRL, 0xa040);
2123 break;
2124 2750
2125 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727: 2751 /* enable autoneg */
2126 break; 2752 bnx2x_set_autoneg(phy, params, vars, enable_cl73);
2127 2753
2128 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726: 2754 /* enable and restart AN */
2755 bnx2x_restart_autoneg(phy, params, enable_cl73);
2756 }
2129 2757
2130 /* Restore normal power mode*/ 2758 } else { /* SGMII mode */
2131 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, 2759 DP(NETIF_MSG_LINK, "SGMII\n");
2132 MISC_REGISTERS_GPIO_OUTPUT_HIGH,
2133 params->port);
2134 2760
2135 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1, 2761 bnx2x_initialize_sgmii_process(phy, params, vars);
2136 MISC_REGISTERS_GPIO_OUTPUT_HIGH, 2762 }
2137 params->port); 2763}
2138 2764
2139 bnx2x_cl45_write(bp, params->port, 2765static u8 bnx2x_init_serdes(struct bnx2x_phy *phy,
2140 ext_phy_type, 2766 struct link_params *params,
2141 ext_phy_addr, 2767 struct link_vars *vars)
2142 MDIO_PMA_DEVAD, 2768{
2143 MDIO_PMA_REG_CTRL, 2769 u8 rc;
2144 1<<15); 2770 vars->phy_flags |= PHY_SGMII_FLAG;
2145 break; 2771 bnx2x_calc_ieee_aneg_adv(phy, params, &vars->ieee_fc);
2772 bnx2x_set_aer_mmd_serdes(params->bp, phy);
2773 rc = bnx2x_reset_unicore(params, phy, 1);
2774 /* reset the SerDes and wait for reset bit return low */
2775 if (rc != 0)
2776 return rc;
2777 bnx2x_set_aer_mmd_serdes(params->bp, phy);
2146 2778
2147 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072: 2779 return rc;
2148 DP(NETIF_MSG_LINK, "XGXS 8072\n"); 2780}
2149
2150 /* Unset Low Power Mode and SW reset */
2151 /* Restore normal power mode*/
2152 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2153 MISC_REGISTERS_GPIO_OUTPUT_HIGH,
2154 params->port);
2155
2156 bnx2x_cl45_write(bp, params->port,
2157 ext_phy_type,
2158 ext_phy_addr,
2159 MDIO_PMA_DEVAD,
2160 MDIO_PMA_REG_CTRL,
2161 1<<15);
2162 break;
2163 2781
2164 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073: 2782static u8 bnx2x_init_xgxs(struct bnx2x_phy *phy,
2165 DP(NETIF_MSG_LINK, "XGXS 8073\n"); 2783 struct link_params *params,
2784 struct link_vars *vars)
2785{
2786 u8 rc;
2787 vars->phy_flags = PHY_XGXS_FLAG;
2788 if ((phy->req_line_speed &&
2789 ((phy->req_line_speed == SPEED_100) ||
2790 (phy->req_line_speed == SPEED_10))) ||
2791 (!phy->req_line_speed &&
2792 (phy->speed_cap_mask >=
2793 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL) &&
2794 (phy->speed_cap_mask <
2795 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)
2796 ))
2797 vars->phy_flags |= PHY_SGMII_FLAG;
2798 else
2799 vars->phy_flags &= ~PHY_SGMII_FLAG;
2166 2800
2167 /* Restore normal power mode*/ 2801 bnx2x_calc_ieee_aneg_adv(phy, params, &vars->ieee_fc);
2168 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, 2802 bnx2x_set_aer_mmd_xgxs(params, phy);
2169 MISC_REGISTERS_GPIO_OUTPUT_HIGH, 2803 bnx2x_set_master_ln(params, phy);
2170 params->port);
2171 2804
2172 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1, 2805 rc = bnx2x_reset_unicore(params, phy, 0);
2173 MISC_REGISTERS_GPIO_OUTPUT_HIGH, 2806 /* reset the SerDes and wait for reset bit return low */
2174 params->port); 2807 if (rc != 0)
2175 break; 2808 return rc;
2176 2809
2177 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101: 2810 bnx2x_set_aer_mmd_xgxs(params, phy);
2178 DP(NETIF_MSG_LINK, "XGXS SFX7101\n");
2179 2811
2180 /* Restore normal power mode*/ 2812 /* setting the masterLn_def again after the reset */
2181 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, 2813 bnx2x_set_master_ln(params, phy);
2182 MISC_REGISTERS_GPIO_OUTPUT_HIGH, 2814 bnx2x_set_swap_lanes(params, phy);
2183 params->port);
2184 2815
2185 /* HW reset */ 2816 return rc;
2186 bnx2x_ext_phy_hw_reset(bp, params->port); 2817}
2187 break;
2188 2818
2189 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481: 2819static u16 bnx2x_wait_reset_complete(struct bnx2x *bp,
2190 /* Restore normal power mode*/ 2820 struct bnx2x_phy *phy,
2191 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, 2821 struct link_params *params)
2192 MISC_REGISTERS_GPIO_OUTPUT_HIGH, 2822{
2193 params->port); 2823 u16 cnt, ctrl;
2194 2824 /* Wait for soft reset to get cleared up to 1 sec */
2195 /* HW reset */ 2825 for (cnt = 0; cnt < 1000; cnt++) {
2196 bnx2x_ext_phy_hw_reset(bp, params->port); 2826 bnx2x_cl45_read(bp, phy,
2197 2827 MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, &ctrl);
2198 bnx2x_cl45_write(bp, params->port, 2828 if (!(ctrl & (1<<15)))
2199 ext_phy_type,
2200 ext_phy_addr,
2201 MDIO_PMA_DEVAD,
2202 MDIO_PMA_REG_CTRL,
2203 1<<15);
2204 break;
2205 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823:
2206 break;
2207 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
2208 DP(NETIF_MSG_LINK, "XGXS PHY Failure detected\n");
2209 break; 2829 break;
2830 msleep(1);
2831 }
2210 2832
2211 default: 2833 if (cnt == 1000)
2212 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n", 2834 netdev_err(bp->dev, "Warning: PHY was not initialized,"
2213 params->ext_phy_config); 2835 " Port %d\n",
2214 break; 2836 params->port);
2837 DP(NETIF_MSG_LINK, "control reg 0x%x (after %d ms)\n", ctrl, cnt);
2838 return cnt;
2839}
2840
2841static void bnx2x_link_int_enable(struct link_params *params)
2842{
2843 u8 port = params->port;
2844 u32 mask;
2845 struct bnx2x *bp = params->bp;
2846
2847 /* Setting the status to report on link up for either XGXS or SerDes */
2848 if (params->switch_cfg == SWITCH_CFG_10G) {
2849 mask = (NIG_MASK_XGXS0_LINK10G |
2850 NIG_MASK_XGXS0_LINK_STATUS);
2851 DP(NETIF_MSG_LINK, "enabled XGXS interrupt\n");
2852 if (!(SINGLE_MEDIA_DIRECT(params)) &&
2853 params->phy[INT_PHY].type !=
2854 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) {
2855 mask |= NIG_MASK_MI_INT;
2856 DP(NETIF_MSG_LINK, "enabled external phy int\n");
2215 } 2857 }
2216 2858
2217 } else { /* SerDes */ 2859 } else { /* SerDes */
2218 ext_phy_type = SERDES_EXT_PHY_TYPE(params->ext_phy_config); 2860 mask = NIG_MASK_SERDES0_LINK_STATUS;
2219 switch (ext_phy_type) { 2861 DP(NETIF_MSG_LINK, "enabled SerDes interrupt\n");
2220 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT: 2862 if (!(SINGLE_MEDIA_DIRECT(params)) &&
2221 DP(NETIF_MSG_LINK, "SerDes Direct\n"); 2863 params->phy[INT_PHY].type !=
2222 break; 2864 PORT_HW_CFG_SERDES_EXT_PHY_TYPE_NOT_CONN) {
2865 mask |= NIG_MASK_MI_INT;
2866 DP(NETIF_MSG_LINK, "enabled external phy int\n");
2867 }
2868 }
2869 bnx2x_bits_en(bp,
2870 NIG_REG_MASK_INTERRUPT_PORT0 + port*4,
2871 mask);
2872
2873 DP(NETIF_MSG_LINK, "port %x, is_xgxs %x, int_status 0x%x\n", port,
2874 (params->switch_cfg == SWITCH_CFG_10G),
2875 REG_RD(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4));
2876 DP(NETIF_MSG_LINK, " int_mask 0x%x, MI_INT %x, SERDES_LINK %x\n",
2877 REG_RD(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4),
2878 REG_RD(bp, NIG_REG_EMAC0_STATUS_MISC_MI_INT + port*0x18),
2879 REG_RD(bp, NIG_REG_SERDES0_STATUS_LINK_STATUS+port*0x3c));
2880 DP(NETIF_MSG_LINK, " 10G %x, XGXS_LINK %x\n",
2881 REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK10G + port*0x68),
2882 REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK_STATUS + port*0x68));
2883}
2884
2885static void bnx2x_rearm_latch_signal(struct bnx2x *bp, u8 port,
2886 u8 exp_mi_int)
2887{
2888 u32 latch_status = 0;
2889
2890 /*
2891 * Disable the MI INT ( external phy int ) by writing 1 to the
2892 * status register. Link down indication is high-active-signal,
2893 * so in this case we need to write the status to clear the XOR
2894 */
2895 /* Read Latched signals */
2896 latch_status = REG_RD(bp,
2897 NIG_REG_LATCH_STATUS_0 + port*8);
2898 DP(NETIF_MSG_LINK, "latch_status = 0x%x\n", latch_status);
2899 /* Handle only those with latched-signal=up.*/
2900 if (exp_mi_int)
2901 bnx2x_bits_en(bp,
2902 NIG_REG_STATUS_INTERRUPT_PORT0
2903 + port*4,
2904 NIG_STATUS_EMAC0_MI_INT);
2905 else
2906 bnx2x_bits_dis(bp,
2907 NIG_REG_STATUS_INTERRUPT_PORT0
2908 + port*4,
2909 NIG_STATUS_EMAC0_MI_INT);
2910
2911 if (latch_status & 1) {
2912
2913 /* For all latched-signal=up : Re-Arm Latch signals */
2914 REG_WR(bp, NIG_REG_LATCH_STATUS_0 + port*8,
2915 (latch_status & 0xfffe) | (latch_status & 1));
2916 }
2917 /* For all latched-signal=up,Write original_signal to status */
2918}
2919
2920static void bnx2x_link_int_ack(struct link_params *params,
2921 struct link_vars *vars, u8 is_10g)
2922{
2923 struct bnx2x *bp = params->bp;
2924 u8 port = params->port;
2925
2926 /*
2927 * First reset all status we assume only one line will be
2928 * change at a time
2929 */
2930 bnx2x_bits_dis(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
2931 (NIG_STATUS_XGXS0_LINK10G |
2932 NIG_STATUS_XGXS0_LINK_STATUS |
2933 NIG_STATUS_SERDES0_LINK_STATUS));
2934 if (vars->phy_link_up) {
2935 if (is_10g) {
2936 /*
2937 * Disable the 10G link interrupt by writing 1 to the
2938 * status register
2939 */
2940 DP(NETIF_MSG_LINK, "10G XGXS phy link up\n");
2941 bnx2x_bits_en(bp,
2942 NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
2943 NIG_STATUS_XGXS0_LINK10G);
2944
2945 } else if (params->switch_cfg == SWITCH_CFG_10G) {
2946 /*
2947 * Disable the link interrupt by writing 1 to the
2948 * relevant lane in the status register
2949 */
2950 u32 ser_lane = ((params->lane_config &
2951 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
2952 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
2953
2954 DP(NETIF_MSG_LINK, "%d speed XGXS phy link up\n",
2955 vars->line_speed);
2956 bnx2x_bits_en(bp,
2957 NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
2958 ((1 << ser_lane) <<
2959 NIG_STATUS_XGXS0_LINK_STATUS_SIZE));
2960
2961 } else { /* SerDes */
2962 DP(NETIF_MSG_LINK, "SerDes phy link up\n");
2963 /*
2964 * Disable the link interrupt by writing 1 to the status
2965 * register
2966 */
2967 bnx2x_bits_en(bp,
2968 NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
2969 NIG_STATUS_SERDES0_LINK_STATUS);
2970 }
2971
2972 }
2973}
2974
2975static u8 bnx2x_format_ver(u32 num, u8 *str, u16 *len)
2976{
2977 u8 *str_ptr = str;
2978 u32 mask = 0xf0000000;
2979 u8 shift = 8*4;
2980 u8 digit;
2981 u8 remove_leading_zeros = 1;
2982 if (*len < 10) {
2983 /* Need more than 10chars for this format */
2984 *str_ptr = '\0';
2985 (*len)--;
2986 return -EINVAL;
2987 }
2988 while (shift > 0) {
2989
2990 shift -= 4;
2991 digit = ((num & mask) >> shift);
2992 if (digit == 0 && remove_leading_zeros) {
2993 mask = mask >> 4;
2994 continue;
2995 } else if (digit < 0xa)
2996 *str_ptr = digit + '0';
2997 else
2998 *str_ptr = digit - 0xa + 'a';
2999 remove_leading_zeros = 0;
3000 str_ptr++;
3001 (*len)--;
3002 mask = mask >> 4;
3003 if (shift == 4*4) {
3004 *str_ptr = '.';
3005 str_ptr++;
3006 (*len)--;
3007 remove_leading_zeros = 1;
3008 }
3009 }
3010 return 0;
3011}
3012
3013
3014static u8 bnx2x_null_format_ver(u32 spirom_ver, u8 *str, u16 *len)
3015{
3016 str[0] = '\0';
3017 (*len)--;
3018 return 0;
3019}
3020
3021u8 bnx2x_get_ext_phy_fw_version(struct link_params *params, u8 driver_loaded,
3022 u8 *version, u16 len)
3023{
3024 struct bnx2x *bp;
3025 u32 spirom_ver = 0;
3026 u8 status = 0;
3027 u8 *ver_p = version;
3028 u16 remain_len = len;
3029 if (version == NULL || params == NULL)
3030 return -EINVAL;
3031 bp = params->bp;
3032
3033 /* Extract first external phy*/
3034 version[0] = '\0';
3035 spirom_ver = REG_RD(bp, params->phy[EXT_PHY1].ver_addr);
3036
3037 if (params->phy[EXT_PHY1].format_fw_ver) {
3038 status |= params->phy[EXT_PHY1].format_fw_ver(spirom_ver,
3039 ver_p,
3040 &remain_len);
3041 ver_p += (len - remain_len);
3042 }
3043 if ((params->num_phys == MAX_PHYS) &&
3044 (params->phy[EXT_PHY2].ver_addr != 0)) {
3045 spirom_ver = REG_RD(bp, params->phy[EXT_PHY2].ver_addr);
3046 if (params->phy[EXT_PHY2].format_fw_ver) {
3047 *ver_p = '/';
3048 ver_p++;
3049 remain_len--;
3050 status |= params->phy[EXT_PHY2].format_fw_ver(
3051 spirom_ver,
3052 ver_p,
3053 &remain_len);
3054 ver_p = version + (len - remain_len);
3055 }
3056 }
3057 *ver_p = '\0';
3058 return status;
3059}
3060
3061static void bnx2x_set_xgxs_loopback(struct bnx2x_phy *phy,
3062 struct link_params *params)
3063{
3064 u8 port = params->port;
3065 struct bnx2x *bp = params->bp;
2223 3066
2224 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482: 3067 if (phy->req_line_speed != SPEED_1000) {
2225 DP(NETIF_MSG_LINK, "SerDes 5482\n"); 3068 u32 md_devad;
2226 bnx2x_ext_phy_hw_reset(bp, params->port); 3069
3070 DP(NETIF_MSG_LINK, "XGXS 10G loopback enable\n");
3071
3072 /* change the uni_phy_addr in the nig */
3073 md_devad = REG_RD(bp, (NIG_REG_XGXS0_CTRL_MD_DEVAD +
3074 port*0x18));
3075
3076 REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18, 0x5);
3077
3078 bnx2x_cl45_write(bp, phy,
3079 5,
3080 (MDIO_REG_BANK_AER_BLOCK +
3081 (MDIO_AER_BLOCK_AER_REG & 0xf)),
3082 0x2800);
3083
3084 bnx2x_cl45_write(bp, phy,
3085 5,
3086 (MDIO_REG_BANK_CL73_IEEEB0 +
3087 (MDIO_CL73_IEEEB0_CL73_AN_CONTROL & 0xf)),
3088 0x6041);
3089 msleep(200);
3090 /* set aer mmd back */
3091 bnx2x_set_aer_mmd_xgxs(params, phy);
3092
3093 /* and md_devad */
3094 REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18, md_devad);
3095 } else {
3096 u16 mii_ctrl;
3097 DP(NETIF_MSG_LINK, "XGXS 1G loopback enable\n");
3098 bnx2x_cl45_read(bp, phy, 5,
3099 (MDIO_REG_BANK_COMBO_IEEE0 +
3100 (MDIO_COMBO_IEEE0_MII_CONTROL & 0xf)),
3101 &mii_ctrl);
3102 bnx2x_cl45_write(bp, phy, 5,
3103 (MDIO_REG_BANK_COMBO_IEEE0 +
3104 (MDIO_COMBO_IEEE0_MII_CONTROL & 0xf)),
3105 mii_ctrl |
3106 MDIO_COMBO_IEEO_MII_CONTROL_LOOPBACK);
3107 }
3108}
3109
3110u8 bnx2x_set_led(struct link_params *params,
3111 struct link_vars *vars, u8 mode, u32 speed)
3112{
3113 u8 port = params->port;
3114 u16 hw_led_mode = params->hw_led_mode;
3115 u8 rc = 0, phy_idx;
3116 u32 tmp;
3117 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
3118 struct bnx2x *bp = params->bp;
3119 DP(NETIF_MSG_LINK, "bnx2x_set_led: port %x, mode %d\n", port, mode);
3120 DP(NETIF_MSG_LINK, "speed 0x%x, hw_led_mode 0x%x\n",
3121 speed, hw_led_mode);
3122 /* In case */
3123 for (phy_idx = EXT_PHY1; phy_idx < MAX_PHYS; phy_idx++) {
3124 if (params->phy[phy_idx].set_link_led) {
3125 params->phy[phy_idx].set_link_led(
3126 &params->phy[phy_idx], params, mode);
3127 }
3128 }
3129
3130 switch (mode) {
3131 case LED_MODE_FRONT_PANEL_OFF:
3132 case LED_MODE_OFF:
3133 REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 0);
3134 REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4,
3135 SHARED_HW_CFG_LED_MAC1);
3136
3137 tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED);
3138 EMAC_WR(bp, EMAC_REG_EMAC_LED, (tmp | EMAC_LED_OVERRIDE));
3139 break;
3140
3141 case LED_MODE_OPER:
3142 /*
3143 * For all other phys, OPER mode is same as ON, so in case
3144 * link is down, do nothing
3145 */
3146 if (!vars->link_up)
2227 break; 3147 break;
3148 case LED_MODE_ON:
3149 if (params->phy[EXT_PHY1].type ==
3150 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727 &&
3151 CHIP_IS_E2(bp) && params->num_phys == 2) {
3152 /*
3153 * This is a work-around for E2+8727 Configurations
3154 */
3155 if (mode == LED_MODE_ON ||
3156 speed == SPEED_10000){
3157 REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, 0);
3158 REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 1);
3159
3160 tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED);
3161 EMAC_WR(bp, EMAC_REG_EMAC_LED,
3162 (tmp | EMAC_LED_OVERRIDE));
3163 return rc;
3164 }
3165 } else if (SINGLE_MEDIA_DIRECT(params)) {
3166 /*
3167 * This is a work-around for HW issue found when link
3168 * is up in CL73
3169 */
3170 REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, 0);
3171 REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 1);
3172 } else {
3173 REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, hw_led_mode);
3174 }
2228 3175
2229 default: 3176 REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0 + port*4, 0);
2230 DP(NETIF_MSG_LINK, "BAD SerDes ext_phy_config 0x%x\n", 3177 /* Set blinking rate to ~15.9Hz */
2231 params->ext_phy_config); 3178 REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_RATE_P0 + port*4,
3179 LED_BLINK_RATE_VAL);
3180 REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_RATE_ENA_P0 +
3181 port*4, 1);
3182 tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED);
3183 EMAC_WR(bp, EMAC_REG_EMAC_LED, (tmp & (~EMAC_LED_OVERRIDE)));
3184
3185 if (CHIP_IS_E1(bp) &&
3186 ((speed == SPEED_2500) ||
3187 (speed == SPEED_1000) ||
3188 (speed == SPEED_100) ||
3189 (speed == SPEED_10))) {
3190 /*
3191 * On Everest 1 Ax chip versions for speeds less than
3192 * 10G LED scheme is different
3193 */
3194 REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0
3195 + port*4, 1);
3196 REG_WR(bp, NIG_REG_LED_CONTROL_TRAFFIC_P0 +
3197 port*4, 0);
3198 REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_TRAFFIC_P0 +
3199 port*4, 1);
3200 }
3201 break;
3202
3203 default:
3204 rc = -EINVAL;
3205 DP(NETIF_MSG_LINK, "bnx2x_set_led: Invalid led mode %d\n",
3206 mode);
3207 break;
3208 }
3209 return rc;
3210
3211}
3212
3213/*
3214 * This function comes to reflect the actual link state read DIRECTLY from the
3215 * HW
3216 */
3217u8 bnx2x_test_link(struct link_params *params, struct link_vars *vars,
3218 u8 is_serdes)
3219{
3220 struct bnx2x *bp = params->bp;
3221 u16 gp_status = 0, phy_index = 0;
3222 u8 ext_phy_link_up = 0, serdes_phy_type;
3223 struct link_vars temp_vars;
3224
3225 CL22_RD_OVER_CL45(bp, &params->phy[INT_PHY],
3226 MDIO_REG_BANK_GP_STATUS,
3227 MDIO_GP_STATUS_TOP_AN_STATUS1,
3228 &gp_status);
3229 /* link is up only if both local phy and external phy are up */
3230 if (!(gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS))
3231 return -ESRCH;
3232
3233 switch (params->num_phys) {
3234 case 1:
3235 /* No external PHY */
3236 return 0;
3237 case 2:
3238 ext_phy_link_up = params->phy[EXT_PHY1].read_status(
3239 &params->phy[EXT_PHY1],
3240 params, &temp_vars);
3241 break;
3242 case 3: /* Dual Media */
3243 for (phy_index = EXT_PHY1; phy_index < params->num_phys;
3244 phy_index++) {
3245 serdes_phy_type = ((params->phy[phy_index].media_type ==
3246 ETH_PHY_SFP_FIBER) ||
3247 (params->phy[phy_index].media_type ==
3248 ETH_PHY_XFP_FIBER));
3249
3250 if (is_serdes != serdes_phy_type)
3251 continue;
3252 if (params->phy[phy_index].read_status) {
3253 ext_phy_link_up |=
3254 params->phy[phy_index].read_status(
3255 &params->phy[phy_index],
3256 params, &temp_vars);
3257 }
3258 }
3259 break;
3260 }
3261 if (ext_phy_link_up)
3262 return 0;
3263 return -ESRCH;
3264}
3265
3266static u8 bnx2x_link_initialize(struct link_params *params,
3267 struct link_vars *vars)
3268{
3269 u8 rc = 0;
3270 u8 phy_index, non_ext_phy;
3271 struct bnx2x *bp = params->bp;
3272 /*
3273 * In case of external phy existence, the line speed would be the
3274 * line speed linked up by the external phy. In case it is direct
3275 * only, then the line_speed during initialization will be
3276 * equal to the req_line_speed
3277 */
3278 vars->line_speed = params->phy[INT_PHY].req_line_speed;
3279
3280 /*
3281 * Initialize the internal phy in case this is a direct board
3282 * (no external phys), or this board has external phy which requires
3283 * to first.
3284 */
3285
3286 if (params->phy[INT_PHY].config_init)
3287 params->phy[INT_PHY].config_init(
3288 &params->phy[INT_PHY],
3289 params, vars);
3290
3291 /* init ext phy and enable link state int */
3292 non_ext_phy = (SINGLE_MEDIA_DIRECT(params) ||
3293 (params->loopback_mode == LOOPBACK_XGXS));
3294
3295 if (non_ext_phy ||
3296 (params->phy[EXT_PHY1].flags & FLAGS_INIT_XGXS_FIRST) ||
3297 (params->loopback_mode == LOOPBACK_EXT_PHY)) {
3298 struct bnx2x_phy *phy = &params->phy[INT_PHY];
3299 if (vars->line_speed == SPEED_AUTO_NEG)
3300 bnx2x_set_parallel_detection(phy, params);
3301 bnx2x_init_internal_phy(phy, params, vars);
3302 }
3303
3304 /* Init external phy*/
3305 if (!non_ext_phy)
3306 for (phy_index = EXT_PHY1; phy_index < params->num_phys;
3307 phy_index++) {
3308 /*
3309 * No need to initialize second phy in case of first
3310 * phy only selection. In case of second phy, we do
3311 * need to initialize the first phy, since they are
3312 * connected.
3313 */
3314 if (phy_index == EXT_PHY2 &&
3315 (bnx2x_phy_selection(params) ==
3316 PORT_HW_CFG_PHY_SELECTION_FIRST_PHY)) {
3317 DP(NETIF_MSG_LINK, "Ignoring second phy\n");
3318 continue;
3319 }
3320 params->phy[phy_index].config_init(
3321 &params->phy[phy_index],
3322 params, vars);
3323 }
3324
3325 /* Reset the interrupt indication after phy was initialized */
3326 bnx2x_bits_dis(bp, NIG_REG_STATUS_INTERRUPT_PORT0 +
3327 params->port*4,
3328 (NIG_STATUS_XGXS0_LINK10G |
3329 NIG_STATUS_XGXS0_LINK_STATUS |
3330 NIG_STATUS_SERDES0_LINK_STATUS |
3331 NIG_MASK_MI_INT));
3332 return rc;
3333}
3334
3335static void bnx2x_int_link_reset(struct bnx2x_phy *phy,
3336 struct link_params *params)
3337{
3338 /* reset the SerDes/XGXS */
3339 REG_WR(params->bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR,
3340 (0x1ff << (params->port*16)));
3341}
3342
3343static void bnx2x_common_ext_link_reset(struct bnx2x_phy *phy,
3344 struct link_params *params)
3345{
3346 struct bnx2x *bp = params->bp;
3347 u8 gpio_port;
3348 /* HW reset */
3349 if (CHIP_IS_E2(bp))
3350 gpio_port = BP_PATH(bp);
3351 else
3352 gpio_port = params->port;
3353 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
3354 MISC_REGISTERS_GPIO_OUTPUT_LOW,
3355 gpio_port);
3356 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
3357 MISC_REGISTERS_GPIO_OUTPUT_LOW,
3358 gpio_port);
3359 DP(NETIF_MSG_LINK, "reset external PHY\n");
3360}
3361
3362static u8 bnx2x_update_link_down(struct link_params *params,
3363 struct link_vars *vars)
3364{
3365 struct bnx2x *bp = params->bp;
3366 u8 port = params->port;
3367
3368 DP(NETIF_MSG_LINK, "Port %x: Link is down\n", port);
3369 bnx2x_set_led(params, vars, LED_MODE_OFF, 0);
3370
3371 /* indicate no mac active */
3372 vars->mac_type = MAC_TYPE_NONE;
3373
3374 /* update shared memory */
3375 vars->link_status = 0;
3376 vars->line_speed = 0;
3377 bnx2x_update_mng(params, vars->link_status);
3378
3379 /* activate nig drain */
3380 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1);
3381
3382 /* disable emac */
3383 REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0);
3384
3385 msleep(10);
3386
3387 /* reset BigMac */
3388 bnx2x_bmac_rx_disable(bp, params->port);
3389 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
3390 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
3391 return 0;
3392}
3393
3394static u8 bnx2x_update_link_up(struct link_params *params,
3395 struct link_vars *vars,
3396 u8 link_10g)
3397{
3398 struct bnx2x *bp = params->bp;
3399 u8 port = params->port;
3400 u8 rc = 0;
3401
3402 vars->link_status |= LINK_STATUS_LINK_UP;
3403
3404 if (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX)
3405 vars->link_status |=
3406 LINK_STATUS_TX_FLOW_CONTROL_ENABLED;
3407
3408 if (vars->flow_ctrl & BNX2X_FLOW_CTRL_RX)
3409 vars->link_status |=
3410 LINK_STATUS_RX_FLOW_CONTROL_ENABLED;
3411
3412 if (link_10g) {
3413 bnx2x_bmac_enable(params, vars, 0);
3414 bnx2x_set_led(params, vars,
3415 LED_MODE_OPER, SPEED_10000);
3416 } else {
3417 rc = bnx2x_emac_program(params, vars);
3418
3419 bnx2x_emac_enable(params, vars, 0);
3420
3421 /* AN complete? */
3422 if ((vars->link_status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE)
3423 && (!(vars->phy_flags & PHY_SGMII_FLAG)) &&
3424 SINGLE_MEDIA_DIRECT(params))
3425 bnx2x_set_gmii_tx_driver(params);
3426 }
3427
3428 /* PBF - link up */
3429 if (!(CHIP_IS_E2(bp)))
3430 rc |= bnx2x_pbf_update(params, vars->flow_ctrl,
3431 vars->line_speed);
3432
3433 /* disable drain */
3434 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 0);
3435
3436 /* update shared memory */
3437 bnx2x_update_mng(params, vars->link_status);
3438 msleep(20);
3439 return rc;
3440}
3441/*
3442 * The bnx2x_link_update function should be called upon link
3443 * interrupt.
3444 * Link is considered up as follows:
3445 * - DIRECT_SINGLE_MEDIA - Only XGXS link (internal link) needs
3446 * to be up
3447 * - SINGLE_MEDIA - The link between the 577xx and the external
3448 * phy (XGXS) need to up as well as the external link of the
3449 * phy (PHY_EXT1)
3450 * - DUAL_MEDIA - The link between the 577xx and the first
3451 * external phy needs to be up, and at least one of the 2
3452 * external phy link must be up.
3453 */
3454u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars)
3455{
3456 struct bnx2x *bp = params->bp;
3457 struct link_vars phy_vars[MAX_PHYS];
3458 u8 port = params->port;
3459 u8 link_10g, phy_index;
3460 u8 ext_phy_link_up = 0, cur_link_up, rc = 0;
3461 u8 is_mi_int = 0;
3462 u16 ext_phy_line_speed = 0, prev_line_speed = vars->line_speed;
3463 u8 active_external_phy = INT_PHY;
3464 vars->link_status = 0;
3465 for (phy_index = INT_PHY; phy_index < params->num_phys;
3466 phy_index++) {
3467 phy_vars[phy_index].flow_ctrl = 0;
3468 phy_vars[phy_index].link_status = 0;
3469 phy_vars[phy_index].line_speed = 0;
3470 phy_vars[phy_index].duplex = DUPLEX_FULL;
3471 phy_vars[phy_index].phy_link_up = 0;
3472 phy_vars[phy_index].link_up = 0;
3473 }
3474
3475 DP(NETIF_MSG_LINK, "port %x, XGXS?%x, int_status 0x%x\n",
3476 port, (vars->phy_flags & PHY_XGXS_FLAG),
3477 REG_RD(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4));
3478
3479 is_mi_int = (u8)(REG_RD(bp, NIG_REG_EMAC0_STATUS_MISC_MI_INT +
3480 port*0x18) > 0);
3481 DP(NETIF_MSG_LINK, "int_mask 0x%x MI_INT %x, SERDES_LINK %x\n",
3482 REG_RD(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4),
3483 is_mi_int,
3484 REG_RD(bp, NIG_REG_SERDES0_STATUS_LINK_STATUS + port*0x3c));
3485
3486 DP(NETIF_MSG_LINK, " 10G %x, XGXS_LINK %x\n",
3487 REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK10G + port*0x68),
3488 REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK_STATUS + port*0x68));
3489
3490 /* disable emac */
3491 REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0);
3492
3493 /*
3494 * Step 1:
3495 * Check external link change only for external phys, and apply
3496 * priority selection between them in case the link on both phys
3497 * is up. Note that the instead of the common vars, a temporary
3498 * vars argument is used since each phy may have different link/
3499 * speed/duplex result
3500 */
3501 for (phy_index = EXT_PHY1; phy_index < params->num_phys;
3502 phy_index++) {
3503 struct bnx2x_phy *phy = &params->phy[phy_index];
3504 if (!phy->read_status)
3505 continue;
3506 /* Read link status and params of this ext phy */
3507 cur_link_up = phy->read_status(phy, params,
3508 &phy_vars[phy_index]);
3509 if (cur_link_up) {
3510 DP(NETIF_MSG_LINK, "phy in index %d link is up\n",
3511 phy_index);
3512 } else {
3513 DP(NETIF_MSG_LINK, "phy in index %d link is down\n",
3514 phy_index);
3515 continue;
3516 }
3517
3518 if (!ext_phy_link_up) {
3519 ext_phy_link_up = 1;
3520 active_external_phy = phy_index;
3521 } else {
3522 switch (bnx2x_phy_selection(params)) {
3523 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
3524 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
3525 /*
3526 * In this option, the first PHY makes sure to pass the
3527 * traffic through itself only.
3528 * Its not clear how to reset the link on the second phy
3529 */
3530 active_external_phy = EXT_PHY1;
3531 break;
3532 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
3533 /*
3534 * In this option, the first PHY makes sure to pass the
3535 * traffic through the second PHY.
3536 */
3537 active_external_phy = EXT_PHY2;
3538 break;
3539 default:
3540 /*
3541 * Link indication on both PHYs with the following cases
3542 * is invalid:
3543 * - FIRST_PHY means that second phy wasn't initialized,
3544 * hence its link is expected to be down
3545 * - SECOND_PHY means that first phy should not be able
3546 * to link up by itself (using configuration)
3547 * - DEFAULT should be overriden during initialiazation
3548 */
3549 DP(NETIF_MSG_LINK, "Invalid link indication"
3550 "mpc=0x%x. DISABLING LINK !!!\n",
3551 params->multi_phy_config);
3552 ext_phy_link_up = 0;
3553 break;
3554 }
3555 }
3556 }
3557 prev_line_speed = vars->line_speed;
3558 /*
3559 * Step 2:
3560 * Read the status of the internal phy. In case of
3561 * DIRECT_SINGLE_MEDIA board, this link is the external link,
3562 * otherwise this is the link between the 577xx and the first
3563 * external phy
3564 */
3565 if (params->phy[INT_PHY].read_status)
3566 params->phy[INT_PHY].read_status(
3567 &params->phy[INT_PHY],
3568 params, vars);
3569 /*
3570 * The INT_PHY flow control reside in the vars. This include the
3571 * case where the speed or flow control are not set to AUTO.
3572 * Otherwise, the active external phy flow control result is set
3573 * to the vars. The ext_phy_line_speed is needed to check if the
3574 * speed is different between the internal phy and external phy.
3575 * This case may be result of intermediate link speed change.
3576 */
3577 if (active_external_phy > INT_PHY) {
3578 vars->flow_ctrl = phy_vars[active_external_phy].flow_ctrl;
3579 /*
3580 * Link speed is taken from the XGXS. AN and FC result from
3581 * the external phy.
3582 */
3583 vars->link_status |= phy_vars[active_external_phy].link_status;
3584
3585 /*
3586 * if active_external_phy is first PHY and link is up - disable
3587 * disable TX on second external PHY
3588 */
3589 if (active_external_phy == EXT_PHY1) {
3590 if (params->phy[EXT_PHY2].phy_specific_func) {
3591 DP(NETIF_MSG_LINK, "Disabling TX on"
3592 " EXT_PHY2\n");
3593 params->phy[EXT_PHY2].phy_specific_func(
3594 &params->phy[EXT_PHY2],
3595 params, DISABLE_TX);
3596 }
3597 }
3598
3599 ext_phy_line_speed = phy_vars[active_external_phy].line_speed;
3600 vars->duplex = phy_vars[active_external_phy].duplex;
3601 if (params->phy[active_external_phy].supported &
3602 SUPPORTED_FIBRE)
3603 vars->link_status |= LINK_STATUS_SERDES_LINK;
3604 DP(NETIF_MSG_LINK, "Active external phy selected: %x\n",
3605 active_external_phy);
3606 }
3607
3608 for (phy_index = EXT_PHY1; phy_index < params->num_phys;
3609 phy_index++) {
3610 if (params->phy[phy_index].flags &
3611 FLAGS_REARM_LATCH_SIGNAL) {
3612 bnx2x_rearm_latch_signal(bp, port,
3613 phy_index ==
3614 active_external_phy);
2232 break; 3615 break;
2233 } 3616 }
2234 } 3617 }
3618 DP(NETIF_MSG_LINK, "vars->flow_ctrl = 0x%x, vars->link_status = 0x%x,"
3619 " ext_phy_line_speed = %d\n", vars->flow_ctrl,
3620 vars->link_status, ext_phy_line_speed);
3621 /*
3622 * Upon link speed change set the NIG into drain mode. Comes to
3623 * deals with possible FIFO glitch due to clk change when speed
3624 * is decreased without link down indicator
3625 */
3626
3627 if (vars->phy_link_up) {
3628 if (!(SINGLE_MEDIA_DIRECT(params)) && ext_phy_link_up &&
3629 (ext_phy_line_speed != vars->line_speed)) {
3630 DP(NETIF_MSG_LINK, "Internal link speed %d is"
3631 " different than the external"
3632 " link speed %d\n", vars->line_speed,
3633 ext_phy_line_speed);
3634 vars->phy_link_up = 0;
3635 } else if (prev_line_speed != vars->line_speed) {
3636 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4,
3637 0);
3638 msleep(1);
3639 }
3640 }
3641
3642 /* anything 10 and over uses the bmac */
3643 link_10g = ((vars->line_speed == SPEED_10000) ||
3644 (vars->line_speed == SPEED_12000) ||
3645 (vars->line_speed == SPEED_12500) ||
3646 (vars->line_speed == SPEED_13000) ||
3647 (vars->line_speed == SPEED_15000) ||
3648 (vars->line_speed == SPEED_16000));
3649
3650 bnx2x_link_int_ack(params, vars, link_10g);
3651
3652 /*
3653 * In case external phy link is up, and internal link is down
3654 * (not initialized yet probably after link initialization, it
3655 * needs to be initialized.
3656 * Note that after link down-up as result of cable plug, the xgxs
3657 * link would probably become up again without the need
3658 * initialize it
3659 */
3660 if (!(SINGLE_MEDIA_DIRECT(params))) {
3661 DP(NETIF_MSG_LINK, "ext_phy_link_up = %d, int_link_up = %d,"
3662 " init_preceding = %d\n", ext_phy_link_up,
3663 vars->phy_link_up,
3664 params->phy[EXT_PHY1].flags &
3665 FLAGS_INIT_XGXS_FIRST);
3666 if (!(params->phy[EXT_PHY1].flags &
3667 FLAGS_INIT_XGXS_FIRST)
3668 && ext_phy_link_up && !vars->phy_link_up) {
3669 vars->line_speed = ext_phy_line_speed;
3670 if (vars->line_speed < SPEED_1000)
3671 vars->phy_flags |= PHY_SGMII_FLAG;
3672 else
3673 vars->phy_flags &= ~PHY_SGMII_FLAG;
3674 bnx2x_init_internal_phy(&params->phy[INT_PHY],
3675 params,
3676 vars);
3677 }
3678 }
3679 /*
3680 * Link is up only if both local phy and external phy (in case of
3681 * non-direct board) are up
3682 */
3683 vars->link_up = (vars->phy_link_up &&
3684 (ext_phy_link_up ||
3685 SINGLE_MEDIA_DIRECT(params)));
3686
3687 if (vars->link_up)
3688 rc = bnx2x_update_link_up(params, vars, link_10g);
3689 else
3690 rc = bnx2x_update_link_down(params, vars);
3691
3692 return rc;
3693}
3694
3695
3696/*****************************************************************************/
3697/* External Phy section */
3698/*****************************************************************************/
3699void bnx2x_ext_phy_hw_reset(struct bnx2x *bp, u8 port)
3700{
3701 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
3702 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
3703 msleep(1);
3704 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
3705 MISC_REGISTERS_GPIO_OUTPUT_HIGH, port);
2235} 3706}
2236 3707
2237static void bnx2x_save_spirom_version(struct bnx2x *bp, u8 port, 3708static void bnx2x_save_spirom_version(struct bnx2x *bp, u8 port,
2238 u32 shmem_base, u32 spirom_ver) 3709 u32 spirom_ver, u32 ver_addr)
2239{ 3710{
2240 DP(NETIF_MSG_LINK, "FW version 0x%x:0x%x for port %d\n", 3711 DP(NETIF_MSG_LINK, "FW version 0x%x:0x%x for port %d\n",
2241 (u16)(spirom_ver>>16), (u16)spirom_ver, port); 3712 (u16)(spirom_ver>>16), (u16)spirom_ver, port);
2242 REG_WR(bp, shmem_base + 3713
2243 offsetof(struct shmem_region, 3714 if (ver_addr)
2244 port_mb[port].ext_phy_fw_version), 3715 REG_WR(bp, ver_addr, spirom_ver);
2245 spirom_ver);
2246} 3716}
2247 3717
2248static void bnx2x_save_bcm_spirom_ver(struct bnx2x *bp, u8 port, 3718static void bnx2x_save_bcm_spirom_ver(struct bnx2x *bp,
2249 u32 ext_phy_type, u8 ext_phy_addr, 3719 struct bnx2x_phy *phy,
2250 u32 shmem_base) 3720 u8 port)
2251{ 3721{
2252 u16 fw_ver1, fw_ver2; 3722 u16 fw_ver1, fw_ver2;
2253 3723
2254 bnx2x_cl45_read(bp, port, ext_phy_type, ext_phy_addr, MDIO_PMA_DEVAD, 3724 bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD,
2255 MDIO_PMA_REG_ROM_VER1, &fw_ver1); 3725 MDIO_PMA_REG_ROM_VER1, &fw_ver1);
2256 bnx2x_cl45_read(bp, port, ext_phy_type, ext_phy_addr, MDIO_PMA_DEVAD, 3726 bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD,
2257 MDIO_PMA_REG_ROM_VER2, &fw_ver2); 3727 MDIO_PMA_REG_ROM_VER2, &fw_ver2);
2258 bnx2x_save_spirom_version(bp, port, shmem_base, 3728 bnx2x_save_spirom_version(bp, port, (u32)(fw_ver1<<16 | fw_ver2),
2259 (u32)(fw_ver1<<16 | fw_ver2)); 3729 phy->ver_addr);
2260} 3730}
2261 3731
2262 3732static void bnx2x_ext_phy_set_pause(struct link_params *params,
2263static void bnx2x_save_8481_spirom_version(struct bnx2x *bp, u8 port, 3733 struct bnx2x_phy *phy,
2264 u8 ext_phy_addr, u32 shmem_base) 3734 struct link_vars *vars)
2265{ 3735{
2266 u16 val, fw_ver1, fw_ver2, cnt; 3736 u16 val;
2267 /* For the 32 bits registers in 8481, access via MDIO2ARM interface.*/ 3737 struct bnx2x *bp = params->bp;
2268 /* (1) set register 0xc200_0014(SPI_BRIDGE_CTRL_2) to 0x03000000 */ 3738 /* read modify write pause advertizing */
2269 bnx2x_cl45_write(bp, port, 3739 bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_ADV_PAUSE, &val);
2270 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481,
2271 ext_phy_addr, MDIO_PMA_DEVAD,
2272 0xA819, 0x0014);
2273 bnx2x_cl45_write(bp, port,
2274 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481,
2275 ext_phy_addr,
2276 MDIO_PMA_DEVAD,
2277 0xA81A,
2278 0xc200);
2279 bnx2x_cl45_write(bp, port,
2280 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481,
2281 ext_phy_addr,
2282 MDIO_PMA_DEVAD,
2283 0xA81B,
2284 0x0000);
2285 bnx2x_cl45_write(bp, port,
2286 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481,
2287 ext_phy_addr,
2288 MDIO_PMA_DEVAD,
2289 0xA81C,
2290 0x0300);
2291 bnx2x_cl45_write(bp, port,
2292 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481,
2293 ext_phy_addr,
2294 MDIO_PMA_DEVAD,
2295 0xA817,
2296 0x0009);
2297 3740
2298 for (cnt = 0; cnt < 100; cnt++) { 3741 val &= ~MDIO_AN_REG_ADV_PAUSE_BOTH;
2299 bnx2x_cl45_read(bp, port, 3742
2300 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481, 3743 /* Please refer to Table 28B-3 of 802.3ab-1999 spec. */
2301 ext_phy_addr, 3744 bnx2x_calc_ieee_aneg_adv(phy, params, &vars->ieee_fc);
2302 MDIO_PMA_DEVAD, 3745 if ((vars->ieee_fc &
2303 0xA818, 3746 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) ==
2304 &val); 3747 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) {
2305 if (val & 1) 3748 val |= MDIO_AN_REG_ADV_PAUSE_ASYMMETRIC;
2306 break;
2307 udelay(5);
2308 } 3749 }
2309 if (cnt == 100) { 3750 if ((vars->ieee_fc &
2310 DP(NETIF_MSG_LINK, "Unable to read 8481 phy fw version(1)\n"); 3751 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) ==
2311 bnx2x_save_spirom_version(bp, port, 3752 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) {
2312 shmem_base, 0); 3753 val |= MDIO_AN_REG_ADV_PAUSE_PAUSE;
2313 return;
2314 } 3754 }
3755 DP(NETIF_MSG_LINK, "Ext phy AN advertize 0x%x\n", val);
3756 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_ADV_PAUSE, val);
3757}
2315 3758
3759static u8 bnx2x_ext_phy_resolve_fc(struct bnx2x_phy *phy,
3760 struct link_params *params,
3761 struct link_vars *vars)
3762{
3763 struct bnx2x *bp = params->bp;
3764 u16 ld_pause; /* local */
3765 u16 lp_pause; /* link partner */
3766 u16 pause_result;
3767 u8 ret = 0;
3768 /* read twice */
2316 3769
2317 /* 2) read register 0xc200_0000 (SPI_FW_STATUS) */ 3770 vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
2318 bnx2x_cl45_write(bp, port, 3771
2319 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481, 3772 if (phy->req_flow_ctrl != BNX2X_FLOW_CTRL_AUTO)
2320 ext_phy_addr, MDIO_PMA_DEVAD, 3773 vars->flow_ctrl = phy->req_flow_ctrl;
2321 0xA819, 0x0000); 3774 else if (phy->req_line_speed != SPEED_AUTO_NEG)
2322 bnx2x_cl45_write(bp, port, 3775 vars->flow_ctrl = params->req_fc_auto_adv;
2323 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481, 3776 else if (vars->link_status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE) {
2324 ext_phy_addr, MDIO_PMA_DEVAD, 3777 ret = 1;
2325 0xA81A, 0xc200); 3778 bnx2x_cl45_read(bp, phy,
2326 bnx2x_cl45_write(bp, port, 3779 MDIO_AN_DEVAD,
2327 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481, 3780 MDIO_AN_REG_ADV_PAUSE, &ld_pause);
2328 ext_phy_addr, MDIO_PMA_DEVAD, 3781 bnx2x_cl45_read(bp, phy,
2329 0xA817, 0x000A); 3782 MDIO_AN_DEVAD,
2330 for (cnt = 0; cnt < 100; cnt++) { 3783 MDIO_AN_REG_LP_AUTO_NEG, &lp_pause);
2331 bnx2x_cl45_read(bp, port, 3784 pause_result = (ld_pause &
2332 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481, 3785 MDIO_AN_REG_ADV_PAUSE_MASK) >> 8;
2333 ext_phy_addr, 3786 pause_result |= (lp_pause &
2334 MDIO_PMA_DEVAD, 3787 MDIO_AN_REG_ADV_PAUSE_MASK) >> 10;
2335 0xA818, 3788 DP(NETIF_MSG_LINK, "Ext PHY pause result 0x%x\n",
2336 &val); 3789 pause_result);
2337 if (val & 1) 3790 bnx2x_pause_resolve(vars, pause_result);
2338 break;
2339 udelay(5);
2340 } 3791 }
2341 if (cnt == 100) { 3792 return ret;
2342 DP(NETIF_MSG_LINK, "Unable to read 8481 phy fw version(2)\n"); 3793}
2343 bnx2x_save_spirom_version(bp, port, 3794
2344 shmem_base, 0); 3795static void bnx2x_ext_phy_10G_an_resolve(struct bnx2x *bp,
3796 struct bnx2x_phy *phy,
3797 struct link_vars *vars)
3798{
3799 u16 val;
3800 bnx2x_cl45_read(bp, phy,
3801 MDIO_AN_DEVAD,
3802 MDIO_AN_REG_STATUS, &val);
3803 bnx2x_cl45_read(bp, phy,
3804 MDIO_AN_DEVAD,
3805 MDIO_AN_REG_STATUS, &val);
3806 if (val & (1<<5))
3807 vars->link_status |= LINK_STATUS_AUTO_NEGOTIATE_COMPLETE;
3808 if ((val & (1<<0)) == 0)
3809 vars->link_status |= LINK_STATUS_PARALLEL_DETECTION_USED;
3810}
3811
3812/******************************************************************/
3813/* common BCM8073/BCM8727 PHY SECTION */
3814/******************************************************************/
3815static void bnx2x_8073_resolve_fc(struct bnx2x_phy *phy,
3816 struct link_params *params,
3817 struct link_vars *vars)
3818{
3819 struct bnx2x *bp = params->bp;
3820 if (phy->req_line_speed == SPEED_10 ||
3821 phy->req_line_speed == SPEED_100) {
3822 vars->flow_ctrl = phy->req_flow_ctrl;
2345 return; 3823 return;
2346 } 3824 }
2347 3825
2348 /* lower 16 bits of the register SPI_FW_STATUS */ 3826 if (bnx2x_ext_phy_resolve_fc(phy, params, vars) &&
2349 bnx2x_cl45_read(bp, port, 3827 (vars->flow_ctrl == BNX2X_FLOW_CTRL_NONE)) {
2350 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481, 3828 u16 pause_result;
2351 ext_phy_addr, 3829 u16 ld_pause; /* local */
2352 MDIO_PMA_DEVAD, 3830 u16 lp_pause; /* link partner */
2353 0xA81B, 3831 bnx2x_cl45_read(bp, phy,
2354 &fw_ver1); 3832 MDIO_AN_DEVAD,
2355 /* upper 16 bits of register SPI_FW_STATUS */ 3833 MDIO_AN_REG_CL37_FC_LD, &ld_pause);
2356 bnx2x_cl45_read(bp, port, 3834
2357 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481, 3835 bnx2x_cl45_read(bp, phy,
2358 ext_phy_addr, 3836 MDIO_AN_DEVAD,
2359 MDIO_PMA_DEVAD, 3837 MDIO_AN_REG_CL37_FC_LP, &lp_pause);
2360 0xA81C, 3838 pause_result = (ld_pause &
2361 &fw_ver2); 3839 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) >> 5;
3840 pause_result |= (lp_pause &
3841 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) >> 7;
2362 3842
2363 bnx2x_save_spirom_version(bp, port, 3843 bnx2x_pause_resolve(vars, pause_result);
2364 shmem_base, (fw_ver2<<16) | fw_ver1); 3844 DP(NETIF_MSG_LINK, "Ext PHY CL37 pause result 0x%x\n",
3845 pause_result);
3846 }
2365} 3847}
2366 3848static u8 bnx2x_8073_8727_external_rom_boot(struct bnx2x *bp,
2367static void bnx2x_bcm8072_external_rom_boot(struct link_params *params) 3849 struct bnx2x_phy *phy,
3850 u8 port)
2368{ 3851{
2369 struct bnx2x *bp = params->bp; 3852 u32 count = 0;
2370 u8 port = params->port; 3853 u16 fw_ver1, fw_msgout;
2371 u8 ext_phy_addr = XGXS_EXT_PHY_ADDR(params->ext_phy_config); 3854 u8 rc = 0;
2372 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config);
2373 3855
2374 /* Need to wait 200ms after reset */ 3856 /* Boot port from external ROM */
2375 msleep(200); 3857 /* EDC grst */
2376 /* Boot port from external ROM 3858 bnx2x_cl45_write(bp, phy,
2377 * Set ser_boot_ctl bit in the MISC_CTRL1 register 3859 MDIO_PMA_DEVAD,
2378 */ 3860 MDIO_PMA_REG_GEN_CTRL,
2379 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr, 3861 0x0001);
2380 MDIO_PMA_DEVAD, 3862
2381 MDIO_PMA_REG_MISC_CTRL1, 0x0001); 3863 /* ucode reboot and rst */
3864 bnx2x_cl45_write(bp, phy,
3865 MDIO_PMA_DEVAD,
3866 MDIO_PMA_REG_GEN_CTRL,
3867 0x008c);
3868
3869 bnx2x_cl45_write(bp, phy,
3870 MDIO_PMA_DEVAD,
3871 MDIO_PMA_REG_MISC_CTRL1, 0x0001);
2382 3872
2383 /* Reset internal microprocessor */ 3873 /* Reset internal microprocessor */
2384 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr, 3874 bnx2x_cl45_write(bp, phy,
2385 MDIO_PMA_DEVAD, 3875 MDIO_PMA_DEVAD,
2386 MDIO_PMA_REG_GEN_CTRL, 3876 MDIO_PMA_REG_GEN_CTRL,
2387 MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP); 3877 MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET);
2388 /* set micro reset = 0 */ 3878
2389 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr, 3879 /* Release srst bit */
2390 MDIO_PMA_DEVAD, 3880 bnx2x_cl45_write(bp, phy,
2391 MDIO_PMA_REG_GEN_CTRL, 3881 MDIO_PMA_DEVAD,
2392 MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET); 3882 MDIO_PMA_REG_GEN_CTRL,
2393 /* Reset internal microprocessor */ 3883 MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP);
2394 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr, 3884
2395 MDIO_PMA_DEVAD, 3885 /* Delay 100ms per the PHY specifications */
2396 MDIO_PMA_REG_GEN_CTRL,
2397 MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP);
2398 /* wait for 100ms for code download via SPI port */
2399 msleep(100); 3886 msleep(100);
2400 3887
3888 /* 8073 sometimes taking longer to download */
3889 do {
3890 count++;
3891 if (count > 300) {
3892 DP(NETIF_MSG_LINK,
3893 "bnx2x_8073_8727_external_rom_boot port %x:"
3894 "Download failed. fw version = 0x%x\n",
3895 port, fw_ver1);
3896 rc = -EINVAL;
3897 break;
3898 }
3899
3900 bnx2x_cl45_read(bp, phy,
3901 MDIO_PMA_DEVAD,
3902 MDIO_PMA_REG_ROM_VER1, &fw_ver1);
3903 bnx2x_cl45_read(bp, phy,
3904 MDIO_PMA_DEVAD,
3905 MDIO_PMA_REG_M8051_MSGOUT_REG, &fw_msgout);
3906
3907 msleep(1);
3908 } while (fw_ver1 == 0 || fw_ver1 == 0x4321 ||
3909 ((fw_msgout & 0xff) != 0x03 && (phy->type ==
3910 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073)));
3911
2401 /* Clear ser_boot_ctl bit */ 3912 /* Clear ser_boot_ctl bit */
2402 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr, 3913 bnx2x_cl45_write(bp, phy,
2403 MDIO_PMA_DEVAD, 3914 MDIO_PMA_DEVAD,
2404 MDIO_PMA_REG_MISC_CTRL1, 0x0000); 3915 MDIO_PMA_REG_MISC_CTRL1, 0x0000);
2405 /* Wait 100ms */ 3916 bnx2x_save_bcm_spirom_ver(bp, phy, port);
2406 msleep(100);
2407 3917
2408 bnx2x_save_bcm_spirom_ver(bp, port, 3918 DP(NETIF_MSG_LINK,
2409 ext_phy_type, 3919 "bnx2x_8073_8727_external_rom_boot port %x:"
2410 ext_phy_addr, 3920 "Download complete. fw version = 0x%x\n",
2411 params->shmem_base); 3921 port, fw_ver1);
3922
3923 return rc;
2412} 3924}
2413 3925
2414static u8 bnx2x_8073_is_snr_needed(struct link_params *params) 3926/******************************************************************/
3927/* BCM8073 PHY SECTION */
3928/******************************************************************/
3929static u8 bnx2x_8073_is_snr_needed(struct bnx2x *bp, struct bnx2x_phy *phy)
2415{ 3930{
2416 /* This is only required for 8073A1, version 102 only */ 3931 /* This is only required for 8073A1, version 102 only */
2417
2418 struct bnx2x *bp = params->bp;
2419 u8 ext_phy_addr = XGXS_EXT_PHY_ADDR(params->ext_phy_config);
2420 u16 val; 3932 u16 val;
2421 3933
2422 /* Read 8073 HW revision*/ 3934 /* Read 8073 HW revision*/
2423 bnx2x_cl45_read(bp, params->port, 3935 bnx2x_cl45_read(bp, phy,
2424 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073, 3936 MDIO_PMA_DEVAD,
2425 ext_phy_addr, 3937 MDIO_PMA_REG_8073_CHIP_REV, &val);
2426 MDIO_PMA_DEVAD,
2427 MDIO_PMA_REG_8073_CHIP_REV, &val);
2428 3938
2429 if (val != 1) { 3939 if (val != 1) {
2430 /* No need to workaround in 8073 A1 */ 3940 /* No need to workaround in 8073 A1 */
2431 return 0; 3941 return 0;
2432 } 3942 }
2433 3943
2434 bnx2x_cl45_read(bp, params->port, 3944 bnx2x_cl45_read(bp, phy,
2435 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073, 3945 MDIO_PMA_DEVAD,
2436 ext_phy_addr, 3946 MDIO_PMA_REG_ROM_VER2, &val);
2437 MDIO_PMA_DEVAD,
2438 MDIO_PMA_REG_ROM_VER2, &val);
2439 3947
2440 /* SNR should be applied only for version 0x102 */ 3948 /* SNR should be applied only for version 0x102 */
2441 if (val != 0x102) 3949 if (val != 0x102)
@@ -2444,17 +3952,13 @@ static u8 bnx2x_8073_is_snr_needed(struct link_params *params)
2444 return 1; 3952 return 1;
2445} 3953}
2446 3954
2447static u8 bnx2x_bcm8073_xaui_wa(struct link_params *params) 3955static u8 bnx2x_8073_xaui_wa(struct bnx2x *bp, struct bnx2x_phy *phy)
2448{ 3956{
2449 struct bnx2x *bp = params->bp;
2450 u8 ext_phy_addr = XGXS_EXT_PHY_ADDR(params->ext_phy_config);
2451 u16 val, cnt, cnt1 ; 3957 u16 val, cnt, cnt1 ;
2452 3958
2453 bnx2x_cl45_read(bp, params->port, 3959 bnx2x_cl45_read(bp, phy,
2454 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073, 3960 MDIO_PMA_DEVAD,
2455 ext_phy_addr, 3961 MDIO_PMA_REG_8073_CHIP_REV, &val);
2456 MDIO_PMA_DEVAD,
2457 MDIO_PMA_REG_8073_CHIP_REV, &val);
2458 3962
2459 if (val > 0) { 3963 if (val > 0) {
2460 /* No need to workaround in 8073 A1 */ 3964 /* No need to workaround in 8073 A1 */
@@ -2462,32 +3966,34 @@ static u8 bnx2x_bcm8073_xaui_wa(struct link_params *params)
2462 } 3966 }
2463 /* XAUI workaround in 8073 A0: */ 3967 /* XAUI workaround in 8073 A0: */
2464 3968
2465 /* After loading the boot ROM and restarting Autoneg, 3969 /*
2466 poll Dev1, Reg $C820: */ 3970 * After loading the boot ROM and restarting Autoneg, poll
3971 * Dev1, Reg $C820:
3972 */
2467 3973
2468 for (cnt = 0; cnt < 1000; cnt++) { 3974 for (cnt = 0; cnt < 1000; cnt++) {
2469 bnx2x_cl45_read(bp, params->port, 3975 bnx2x_cl45_read(bp, phy,
2470 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073, 3976 MDIO_PMA_DEVAD,
2471 ext_phy_addr, 3977 MDIO_PMA_REG_8073_SPEED_LINK_STATUS,
2472 MDIO_PMA_DEVAD, 3978 &val);
2473 MDIO_PMA_REG_8073_SPEED_LINK_STATUS, 3979 /*
2474 &val); 3980 * If bit [14] = 0 or bit [13] = 0, continue on with
2475 /* If bit [14] = 0 or bit [13] = 0, continue on with 3981 * system initialization (XAUI work-around not required, as
2476 system initialization (XAUI work-around not required, 3982 * these bits indicate 2.5G or 1G link up).
2477 as these bits indicate 2.5G or 1G link up). */ 3983 */
2478 if (!(val & (1<<14)) || !(val & (1<<13))) { 3984 if (!(val & (1<<14)) || !(val & (1<<13))) {
2479 DP(NETIF_MSG_LINK, "XAUI work-around not required\n"); 3985 DP(NETIF_MSG_LINK, "XAUI work-around not required\n");
2480 return 0; 3986 return 0;
2481 } else if (!(val & (1<<15))) { 3987 } else if (!(val & (1<<15))) {
2482 DP(NETIF_MSG_LINK, "clc bit 15 went off\n"); 3988 DP(NETIF_MSG_LINK, "bit 15 went off\n");
2483 /* If bit 15 is 0, then poll Dev1, Reg $C841 until 3989 /*
2484 it's MSB (bit 15) goes to 1 (indicating that the 3990 * If bit 15 is 0, then poll Dev1, Reg $C841 until it's
2485 XAUI workaround has completed), 3991 * MSB (bit15) goes to 1 (indicating that the XAUI
2486 then continue on with system initialization.*/ 3992 * workaround has completed), then continue on with
3993 * system initialization.
3994 */
2487 for (cnt1 = 0; cnt1 < 1000; cnt1++) { 3995 for (cnt1 = 0; cnt1 < 1000; cnt1++) {
2488 bnx2x_cl45_read(bp, params->port, 3996 bnx2x_cl45_read(bp, phy,
2489 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
2490 ext_phy_addr,
2491 MDIO_PMA_DEVAD, 3997 MDIO_PMA_DEVAD,
2492 MDIO_PMA_REG_8073_XAUI_WA, &val); 3998 MDIO_PMA_REG_8073_XAUI_WA, &val);
2493 if (val & (1<<15)) { 3999 if (val & (1<<15)) {
@@ -2505,206 +4011,527 @@ static u8 bnx2x_bcm8073_xaui_wa(struct link_params *params)
2505 return -EINVAL; 4011 return -EINVAL;
2506} 4012}
2507 4013
2508static void bnx2x_bcm8073_bcm8727_external_rom_boot(struct bnx2x *bp, u8 port, 4014static void bnx2x_807x_force_10G(struct bnx2x *bp, struct bnx2x_phy *phy)
2509 u8 ext_phy_addr,
2510 u32 ext_phy_type,
2511 u32 shmem_base)
2512{ 4015{
2513 /* Boot port from external ROM */ 4016 /* Force KR or KX */
2514 /* EDC grst */ 4017 bnx2x_cl45_write(bp, phy,
2515 bnx2x_cl45_write(bp, port, 4018 MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x2040);
2516 ext_phy_type, 4019 bnx2x_cl45_write(bp, phy,
2517 ext_phy_addr, 4020 MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, 0x000b);
2518 MDIO_PMA_DEVAD, 4021 bnx2x_cl45_write(bp, phy,
2519 MDIO_PMA_REG_GEN_CTRL, 4022 MDIO_PMA_DEVAD, MDIO_PMA_REG_BCM_CTRL, 0x0000);
2520 0x0001); 4023 bnx2x_cl45_write(bp, phy,
2521 4024 MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0x0000);
2522 /* ucode reboot and rst */
2523 bnx2x_cl45_write(bp, port,
2524 ext_phy_type,
2525 ext_phy_addr,
2526 MDIO_PMA_DEVAD,
2527 MDIO_PMA_REG_GEN_CTRL,
2528 0x008c);
2529
2530 bnx2x_cl45_write(bp, port,
2531 ext_phy_type,
2532 ext_phy_addr,
2533 MDIO_PMA_DEVAD,
2534 MDIO_PMA_REG_MISC_CTRL1, 0x0001);
2535
2536 /* Reset internal microprocessor */
2537 bnx2x_cl45_write(bp, port,
2538 ext_phy_type,
2539 ext_phy_addr,
2540 MDIO_PMA_DEVAD,
2541 MDIO_PMA_REG_GEN_CTRL,
2542 MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET);
2543
2544 /* Release srst bit */
2545 bnx2x_cl45_write(bp, port,
2546 ext_phy_type,
2547 ext_phy_addr,
2548 MDIO_PMA_DEVAD,
2549 MDIO_PMA_REG_GEN_CTRL,
2550 MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP);
2551
2552 /* wait for 100ms for code download via SPI port */
2553 msleep(100);
2554
2555 /* Clear ser_boot_ctl bit */
2556 bnx2x_cl45_write(bp, port,
2557 ext_phy_type,
2558 ext_phy_addr,
2559 MDIO_PMA_DEVAD,
2560 MDIO_PMA_REG_MISC_CTRL1, 0x0000);
2561
2562 bnx2x_save_bcm_spirom_ver(bp, port,
2563 ext_phy_type,
2564 ext_phy_addr,
2565 shmem_base);
2566} 4025}
2567 4026
2568static void bnx2x_bcm8073_external_rom_boot(struct bnx2x *bp, u8 port, 4027static void bnx2x_8073_set_pause_cl37(struct link_params *params,
2569 u8 ext_phy_addr, 4028 struct bnx2x_phy *phy,
2570 u32 shmem_base) 4029 struct link_vars *vars)
2571{ 4030{
2572 bnx2x_bcm8073_bcm8727_external_rom_boot(bp, port, ext_phy_addr, 4031 u16 cl37_val;
2573 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073, 4032 struct bnx2x *bp = params->bp;
2574 shmem_base); 4033 bnx2x_cl45_read(bp, phy,
2575} 4034 MDIO_AN_DEVAD, MDIO_AN_REG_CL37_FC_LD, &cl37_val);
2576 4035
2577static void bnx2x_bcm8727_external_rom_boot(struct bnx2x *bp, u8 port, 4036 cl37_val &= ~MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
2578 u8 ext_phy_addr, 4037 /* Please refer to Table 28B-3 of 802.3ab-1999 spec. */
2579 u32 shmem_base) 4038 bnx2x_calc_ieee_aneg_adv(phy, params, &vars->ieee_fc);
2580{ 4039 if ((vars->ieee_fc &
2581 bnx2x_bcm8073_bcm8727_external_rom_boot(bp, port, ext_phy_addr, 4040 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_SYMMETRIC) ==
2582 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727, 4041 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_SYMMETRIC) {
2583 shmem_base); 4042 cl37_val |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_SYMMETRIC;
4043 }
4044 if ((vars->ieee_fc &
4045 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) ==
4046 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) {
4047 cl37_val |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
4048 }
4049 if ((vars->ieee_fc &
4050 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) ==
4051 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) {
4052 cl37_val |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
4053 }
4054 DP(NETIF_MSG_LINK,
4055 "Ext phy AN advertize cl37 0x%x\n", cl37_val);
2584 4056
4057 bnx2x_cl45_write(bp, phy,
4058 MDIO_AN_DEVAD, MDIO_AN_REG_CL37_FC_LD, cl37_val);
4059 msleep(500);
2585} 4060}
2586 4061
2587static void bnx2x_bcm8726_external_rom_boot(struct link_params *params) 4062static u8 bnx2x_8073_config_init(struct bnx2x_phy *phy,
4063 struct link_params *params,
4064 struct link_vars *vars)
2588{ 4065{
2589 struct bnx2x *bp = params->bp; 4066 struct bnx2x *bp = params->bp;
2590 u8 port = params->port; 4067 u16 val = 0, tmp1;
2591 u8 ext_phy_addr = XGXS_EXT_PHY_ADDR(params->ext_phy_config); 4068 u8 gpio_port;
2592 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config); 4069 DP(NETIF_MSG_LINK, "Init 8073\n");
2593 4070
2594 /* Need to wait 100ms after reset */ 4071 if (CHIP_IS_E2(bp))
2595 msleep(100); 4072 gpio_port = BP_PATH(bp);
4073 else
4074 gpio_port = params->port;
4075 /* Restore normal power mode*/
4076 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
4077 MISC_REGISTERS_GPIO_OUTPUT_HIGH, gpio_port);
2596 4078
2597 /* Micro controller re-boot */ 4079 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2598 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr, 4080 MISC_REGISTERS_GPIO_OUTPUT_HIGH, gpio_port);
2599 MDIO_PMA_DEVAD, 4081
2600 MDIO_PMA_REG_GEN_CTRL, 4082 /* enable LASI */
2601 0x018B); 4083 bnx2x_cl45_write(bp, phy,
4084 MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_ALARM_CTRL, (1<<2));
4085 bnx2x_cl45_write(bp, phy,
4086 MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_CTRL, 0x0004);
4087
4088 bnx2x_8073_set_pause_cl37(params, phy, vars);
4089
4090 bnx2x_cl45_read(bp, phy,
4091 MDIO_PMA_DEVAD, MDIO_PMA_REG_M8051_MSGOUT_REG, &tmp1);
4092
4093 bnx2x_cl45_read(bp, phy,
4094 MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_ALARM, &tmp1);
4095
4096 DP(NETIF_MSG_LINK, "Before rom RX_ALARM(port1): 0x%x\n", tmp1);
4097
4098 /* Swap polarity if required - Must be done only in non-1G mode */
4099 if (params->lane_config & PORT_HW_CFG_SWAP_PHY_POLARITY_ENABLED) {
4100 /* Configure the 8073 to swap _P and _N of the KR lines */
4101 DP(NETIF_MSG_LINK, "Swapping polarity for the 8073\n");
4102 /* 10G Rx/Tx and 1G Tx signal polarity swap */
4103 bnx2x_cl45_read(bp, phy,
4104 MDIO_PMA_DEVAD,
4105 MDIO_PMA_REG_8073_OPT_DIGITAL_CTRL, &val);
4106 bnx2x_cl45_write(bp, phy,
4107 MDIO_PMA_DEVAD,
4108 MDIO_PMA_REG_8073_OPT_DIGITAL_CTRL,
4109 (val | (3<<9)));
4110 }
2602 4111
2603 /* Set soft reset */
2604 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
2605 MDIO_PMA_DEVAD,
2606 MDIO_PMA_REG_GEN_CTRL,
2607 MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET);
2608 4112
2609 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr, 4113 /* Enable CL37 BAM */
2610 MDIO_PMA_DEVAD, 4114 if (REG_RD(bp, params->shmem_base +
2611 MDIO_PMA_REG_MISC_CTRL1, 0x0001); 4115 offsetof(struct shmem_region, dev_info.
4116 port_hw_config[params->port].default_cfg)) &
4117 PORT_HW_CFG_ENABLE_BAM_ON_KR_ENABLED) {
4118
4119 bnx2x_cl45_read(bp, phy,
4120 MDIO_AN_DEVAD,
4121 MDIO_AN_REG_8073_BAM, &val);
4122 bnx2x_cl45_write(bp, phy,
4123 MDIO_AN_DEVAD,
4124 MDIO_AN_REG_8073_BAM, val | 1);
4125 DP(NETIF_MSG_LINK, "Enable CL37 BAM on KR\n");
4126 }
4127 if (params->loopback_mode == LOOPBACK_EXT) {
4128 bnx2x_807x_force_10G(bp, phy);
4129 DP(NETIF_MSG_LINK, "Forced speed 10G on 807X\n");
4130 return 0;
4131 } else {
4132 bnx2x_cl45_write(bp, phy,
4133 MDIO_PMA_DEVAD, MDIO_PMA_REG_BCM_CTRL, 0x0002);
4134 }
4135 if (phy->req_line_speed != SPEED_AUTO_NEG) {
4136 if (phy->req_line_speed == SPEED_10000) {
4137 val = (1<<7);
4138 } else if (phy->req_line_speed == SPEED_2500) {
4139 val = (1<<5);
4140 /*
4141 * Note that 2.5G works only when used with 1G
4142 * advertisement
4143 */
4144 } else
4145 val = (1<<5);
4146 } else {
4147 val = 0;
4148 if (phy->speed_cap_mask &
4149 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)
4150 val |= (1<<7);
4151
4152 /* Note that 2.5G works only when used with 1G advertisement */
4153 if (phy->speed_cap_mask &
4154 (PORT_HW_CFG_SPEED_CAPABILITY_D0_1G |
4155 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
4156 val |= (1<<5);
4157 DP(NETIF_MSG_LINK, "807x autoneg val = 0x%x\n", val);
4158 }
4159
4160 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_ADV, val);
4161 bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_8073_2_5G, &tmp1);
4162
4163 if (((phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G) &&
4164 (phy->req_line_speed == SPEED_AUTO_NEG)) ||
4165 (phy->req_line_speed == SPEED_2500)) {
4166 u16 phy_ver;
4167 /* Allow 2.5G for A1 and above */
4168 bnx2x_cl45_read(bp, phy,
4169 MDIO_PMA_DEVAD, MDIO_PMA_REG_8073_CHIP_REV,
4170 &phy_ver);
4171 DP(NETIF_MSG_LINK, "Add 2.5G\n");
4172 if (phy_ver > 0)
4173 tmp1 |= 1;
4174 else
4175 tmp1 &= 0xfffe;
4176 } else {
4177 DP(NETIF_MSG_LINK, "Disable 2.5G\n");
4178 tmp1 &= 0xfffe;
4179 }
2612 4180
2613 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr, 4181 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_8073_2_5G, tmp1);
2614 MDIO_PMA_DEVAD, 4182 /* Add support for CL37 (passive mode) II */
2615 MDIO_PMA_REG_GEN_CTRL,
2616 MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP);
2617 4183
2618 /* wait for 150ms for microcode load */ 4184 bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CL37_FC_LD, &tmp1);
2619 msleep(150); 4185 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CL37_FC_LD,
4186 (tmp1 | ((phy->req_duplex == DUPLEX_FULL) ?
4187 0x20 : 0x40)));
2620 4188
2621 /* Disable serial boot control, tristates pins SS_N, SCK, MOSI, MISO */ 4189 /* Add support for CL37 (passive mode) III */
2622 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr, 4190 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1000);
2623 MDIO_PMA_DEVAD,
2624 MDIO_PMA_REG_MISC_CTRL1, 0x0000);
2625 4191
2626 msleep(200); 4192 /*
2627 bnx2x_save_bcm_spirom_ver(bp, port, 4193 * The SNR will improve about 2db by changing BW and FEE main
2628 ext_phy_type, 4194 * tap. Rest commands are executed after link is up
2629 ext_phy_addr, 4195 * Change FFE main cursor to 5 in EDC register
2630 params->shmem_base); 4196 */
4197 if (bnx2x_8073_is_snr_needed(bp, phy))
4198 bnx2x_cl45_write(bp, phy,
4199 MDIO_PMA_DEVAD, MDIO_PMA_REG_EDC_FFE_MAIN,
4200 0xFB0C);
4201
4202 /* Enable FEC (Forware Error Correction) Request in the AN */
4203 bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_ADV2, &tmp1);
4204 tmp1 |= (1<<15);
4205 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_ADV2, tmp1);
4206
4207 bnx2x_ext_phy_set_pause(params, phy, vars);
4208
4209 /* Restart autoneg */
4210 msleep(500);
4211 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0x1200);
4212 DP(NETIF_MSG_LINK, "807x Autoneg Restart: Advertise 1G=%x, 10G=%x\n",
4213 ((val & (1<<5)) > 0), ((val & (1<<7)) > 0));
4214 return 0;
4215}
4216
4217static u8 bnx2x_8073_read_status(struct bnx2x_phy *phy,
4218 struct link_params *params,
4219 struct link_vars *vars)
4220{
4221 struct bnx2x *bp = params->bp;
4222 u8 link_up = 0;
4223 u16 val1, val2;
4224 u16 link_status = 0;
4225 u16 an1000_status = 0;
4226
4227 bnx2x_cl45_read(bp, phy,
4228 MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_STATUS, &val1);
4229
4230 DP(NETIF_MSG_LINK, "8703 LASI status 0x%x\n", val1);
4231
4232 /* clear the interrupt LASI status register */
4233 bnx2x_cl45_read(bp, phy,
4234 MDIO_PCS_DEVAD, MDIO_PCS_REG_STATUS, &val2);
4235 bnx2x_cl45_read(bp, phy,
4236 MDIO_PCS_DEVAD, MDIO_PCS_REG_STATUS, &val1);
4237 DP(NETIF_MSG_LINK, "807x PCS status 0x%x->0x%x\n", val2, val1);
4238 /* Clear MSG-OUT */
4239 bnx2x_cl45_read(bp, phy,
4240 MDIO_PMA_DEVAD, MDIO_PMA_REG_M8051_MSGOUT_REG, &val1);
4241
4242 /* Check the LASI */
4243 bnx2x_cl45_read(bp, phy,
4244 MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_ALARM, &val2);
4245
4246 DP(NETIF_MSG_LINK, "KR 0x9003 0x%x\n", val2);
4247
4248 /* Check the link status */
4249 bnx2x_cl45_read(bp, phy,
4250 MDIO_PCS_DEVAD, MDIO_PCS_REG_STATUS, &val2);
4251 DP(NETIF_MSG_LINK, "KR PCS status 0x%x\n", val2);
4252
4253 bnx2x_cl45_read(bp, phy,
4254 MDIO_PMA_DEVAD, MDIO_PMA_REG_STATUS, &val2);
4255 bnx2x_cl45_read(bp, phy,
4256 MDIO_PMA_DEVAD, MDIO_PMA_REG_STATUS, &val1);
4257 link_up = ((val1 & 4) == 4);
4258 DP(NETIF_MSG_LINK, "PMA_REG_STATUS=0x%x\n", val1);
4259
4260 if (link_up &&
4261 ((phy->req_line_speed != SPEED_10000))) {
4262 if (bnx2x_8073_xaui_wa(bp, phy) != 0)
4263 return 0;
4264 }
4265 bnx2x_cl45_read(bp, phy,
4266 MDIO_AN_DEVAD, MDIO_AN_REG_LINK_STATUS, &an1000_status);
4267 bnx2x_cl45_read(bp, phy,
4268 MDIO_AN_DEVAD, MDIO_AN_REG_LINK_STATUS, &an1000_status);
4269
4270 /* Check the link status on 1.1.2 */
4271 bnx2x_cl45_read(bp, phy,
4272 MDIO_PMA_DEVAD, MDIO_PMA_REG_STATUS, &val2);
4273 bnx2x_cl45_read(bp, phy,
4274 MDIO_PMA_DEVAD, MDIO_PMA_REG_STATUS, &val1);
4275 DP(NETIF_MSG_LINK, "KR PMA status 0x%x->0x%x,"
4276 "an_link_status=0x%x\n", val2, val1, an1000_status);
4277
4278 link_up = (((val1 & 4) == 4) || (an1000_status & (1<<1)));
4279 if (link_up && bnx2x_8073_is_snr_needed(bp, phy)) {
4280 /*
4281 * The SNR will improve about 2dbby changing the BW and FEE main
4282 * tap. The 1st write to change FFE main tap is set before
4283 * restart AN. Change PLL Bandwidth in EDC register
4284 */
4285 bnx2x_cl45_write(bp, phy,
4286 MDIO_PMA_DEVAD, MDIO_PMA_REG_PLL_BANDWIDTH,
4287 0x26BC);
4288
4289 /* Change CDR Bandwidth in EDC register */
4290 bnx2x_cl45_write(bp, phy,
4291 MDIO_PMA_DEVAD, MDIO_PMA_REG_CDR_BANDWIDTH,
4292 0x0333);
4293 }
4294 bnx2x_cl45_read(bp, phy,
4295 MDIO_PMA_DEVAD, MDIO_PMA_REG_8073_SPEED_LINK_STATUS,
4296 &link_status);
4297
4298 /* Bits 0..2 --> speed detected, bits 13..15--> link is down */
4299 if ((link_status & (1<<2)) && (!(link_status & (1<<15)))) {
4300 link_up = 1;
4301 vars->line_speed = SPEED_10000;
4302 DP(NETIF_MSG_LINK, "port %x: External link up in 10G\n",
4303 params->port);
4304 } else if ((link_status & (1<<1)) && (!(link_status & (1<<14)))) {
4305 link_up = 1;
4306 vars->line_speed = SPEED_2500;
4307 DP(NETIF_MSG_LINK, "port %x: External link up in 2.5G\n",
4308 params->port);
4309 } else if ((link_status & (1<<0)) && (!(link_status & (1<<13)))) {
4310 link_up = 1;
4311 vars->line_speed = SPEED_1000;
4312 DP(NETIF_MSG_LINK, "port %x: External link up in 1G\n",
4313 params->port);
4314 } else {
4315 link_up = 0;
4316 DP(NETIF_MSG_LINK, "port %x: External link is down\n",
4317 params->port);
4318 }
4319
4320 if (link_up) {
4321 /* Swap polarity if required */
4322 if (params->lane_config &
4323 PORT_HW_CFG_SWAP_PHY_POLARITY_ENABLED) {
4324 /* Configure the 8073 to swap P and N of the KR lines */
4325 bnx2x_cl45_read(bp, phy,
4326 MDIO_XS_DEVAD,
4327 MDIO_XS_REG_8073_RX_CTRL_PCIE, &val1);
4328 /*
4329 * Set bit 3 to invert Rx in 1G mode and clear this bit
4330 * when it`s in 10G mode.
4331 */
4332 if (vars->line_speed == SPEED_1000) {
4333 DP(NETIF_MSG_LINK, "Swapping 1G polarity for"
4334 "the 8073\n");
4335 val1 |= (1<<3);
4336 } else
4337 val1 &= ~(1<<3);
4338
4339 bnx2x_cl45_write(bp, phy,
4340 MDIO_XS_DEVAD,
4341 MDIO_XS_REG_8073_RX_CTRL_PCIE,
4342 val1);
4343 }
4344 bnx2x_ext_phy_10G_an_resolve(bp, phy, vars);
4345 bnx2x_8073_resolve_fc(phy, params, vars);
4346 vars->duplex = DUPLEX_FULL;
4347 }
4348 return link_up;
4349}
4350
4351static void bnx2x_8073_link_reset(struct bnx2x_phy *phy,
4352 struct link_params *params)
4353{
4354 struct bnx2x *bp = params->bp;
4355 u8 gpio_port;
4356 if (CHIP_IS_E2(bp))
4357 gpio_port = BP_PATH(bp);
4358 else
4359 gpio_port = params->port;
4360 DP(NETIF_MSG_LINK, "Setting 8073 port %d into low power mode\n",
4361 gpio_port);
4362 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
4363 MISC_REGISTERS_GPIO_OUTPUT_LOW,
4364 gpio_port);
4365}
4366
4367/******************************************************************/
4368/* BCM8705 PHY SECTION */
4369/******************************************************************/
4370static u8 bnx2x_8705_config_init(struct bnx2x_phy *phy,
4371 struct link_params *params,
4372 struct link_vars *vars)
4373{
4374 struct bnx2x *bp = params->bp;
4375 DP(NETIF_MSG_LINK, "init 8705\n");
4376 /* Restore normal power mode*/
4377 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
4378 MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
4379 /* HW reset */
4380 bnx2x_ext_phy_hw_reset(bp, params->port);
4381 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0xa040);
4382 bnx2x_wait_reset_complete(bp, phy, params);
4383
4384 bnx2x_cl45_write(bp, phy,
4385 MDIO_PMA_DEVAD, MDIO_PMA_REG_MISC_CTRL, 0x8288);
4386 bnx2x_cl45_write(bp, phy,
4387 MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER, 0x7fbf);
4388 bnx2x_cl45_write(bp, phy,
4389 MDIO_PMA_DEVAD, MDIO_PMA_REG_CMU_PLL_BYPASS, 0x0100);
4390 bnx2x_cl45_write(bp, phy,
4391 MDIO_WIS_DEVAD, MDIO_WIS_REG_LASI_CNTL, 0x1);
4392 /* BCM8705 doesn't have microcode, hence the 0 */
4393 bnx2x_save_spirom_version(bp, params->port, params->shmem_base, 0);
4394 return 0;
4395}
4396
4397static u8 bnx2x_8705_read_status(struct bnx2x_phy *phy,
4398 struct link_params *params,
4399 struct link_vars *vars)
4400{
4401 u8 link_up = 0;
4402 u16 val1, rx_sd;
4403 struct bnx2x *bp = params->bp;
4404 DP(NETIF_MSG_LINK, "read status 8705\n");
4405 bnx2x_cl45_read(bp, phy,
4406 MDIO_WIS_DEVAD, MDIO_WIS_REG_LASI_STATUS, &val1);
4407 DP(NETIF_MSG_LINK, "8705 LASI status 0x%x\n", val1);
4408
4409 bnx2x_cl45_read(bp, phy,
4410 MDIO_WIS_DEVAD, MDIO_WIS_REG_LASI_STATUS, &val1);
4411 DP(NETIF_MSG_LINK, "8705 LASI status 0x%x\n", val1);
4412
4413 bnx2x_cl45_read(bp, phy,
4414 MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_SD, &rx_sd);
4415
4416 bnx2x_cl45_read(bp, phy,
4417 MDIO_PMA_DEVAD, 0xc809, &val1);
4418 bnx2x_cl45_read(bp, phy,
4419 MDIO_PMA_DEVAD, 0xc809, &val1);
4420
4421 DP(NETIF_MSG_LINK, "8705 1.c809 val=0x%x\n", val1);
4422 link_up = ((rx_sd & 0x1) && (val1 & (1<<9)) && ((val1 & (1<<8)) == 0));
4423 if (link_up) {
4424 vars->line_speed = SPEED_10000;
4425 bnx2x_ext_phy_resolve_fc(phy, params, vars);
4426 }
4427 return link_up;
2631} 4428}
2632 4429
2633static void bnx2x_sfp_set_transmitter(struct bnx2x *bp, u8 port, 4430/******************************************************************/
2634 u32 ext_phy_type, u8 ext_phy_addr, 4431/* SFP+ module Section */
2635 u8 tx_en) 4432/******************************************************************/
4433static u8 bnx2x_get_gpio_port(struct link_params *params)
4434{
4435 u8 gpio_port;
4436 u32 swap_val, swap_override;
4437 struct bnx2x *bp = params->bp;
4438 if (CHIP_IS_E2(bp))
4439 gpio_port = BP_PATH(bp);
4440 else
4441 gpio_port = params->port;
4442 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
4443 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
4444 return gpio_port ^ (swap_val && swap_override);
4445}
4446static void bnx2x_sfp_set_transmitter(struct link_params *params,
4447 struct bnx2x_phy *phy,
4448 u8 tx_en)
2636{ 4449{
2637 u16 val; 4450 u16 val;
4451 u8 port = params->port;
4452 struct bnx2x *bp = params->bp;
4453 u32 tx_en_mode;
2638 4454
2639 DP(NETIF_MSG_LINK, "Setting transmitter tx_en=%x for port %x\n",
2640 tx_en, port);
2641 /* Disable/Enable transmitter ( TX laser of the SFP+ module.)*/ 4455 /* Disable/Enable transmitter ( TX laser of the SFP+ module.)*/
2642 bnx2x_cl45_read(bp, port, 4456 tx_en_mode = REG_RD(bp, params->shmem_base +
2643 ext_phy_type, 4457 offsetof(struct shmem_region,
2644 ext_phy_addr, 4458 dev_info.port_hw_config[port].sfp_ctrl)) &
2645 MDIO_PMA_DEVAD, 4459 PORT_HW_CFG_TX_LASER_MASK;
2646 MDIO_PMA_REG_PHY_IDENTIFIER, 4460 DP(NETIF_MSG_LINK, "Setting transmitter tx_en=%x for port %x "
2647 &val); 4461 "mode = %x\n", tx_en, port, tx_en_mode);
2648 4462 switch (tx_en_mode) {
2649 if (tx_en) 4463 case PORT_HW_CFG_TX_LASER_MDIO:
2650 val &= ~(1<<15); 4464
2651 else 4465 bnx2x_cl45_read(bp, phy,
2652 val |= (1<<15); 4466 MDIO_PMA_DEVAD,
4467 MDIO_PMA_REG_PHY_IDENTIFIER,
4468 &val);
4469
4470 if (tx_en)
4471 val &= ~(1<<15);
4472 else
4473 val |= (1<<15);
4474
4475 bnx2x_cl45_write(bp, phy,
4476 MDIO_PMA_DEVAD,
4477 MDIO_PMA_REG_PHY_IDENTIFIER,
4478 val);
4479 break;
4480 case PORT_HW_CFG_TX_LASER_GPIO0:
4481 case PORT_HW_CFG_TX_LASER_GPIO1:
4482 case PORT_HW_CFG_TX_LASER_GPIO2:
4483 case PORT_HW_CFG_TX_LASER_GPIO3:
4484 {
4485 u16 gpio_pin;
4486 u8 gpio_port, gpio_mode;
4487 if (tx_en)
4488 gpio_mode = MISC_REGISTERS_GPIO_OUTPUT_HIGH;
4489 else
4490 gpio_mode = MISC_REGISTERS_GPIO_OUTPUT_LOW;
2653 4491
2654 bnx2x_cl45_write(bp, port, 4492 gpio_pin = tx_en_mode - PORT_HW_CFG_TX_LASER_GPIO0;
2655 ext_phy_type, 4493 gpio_port = bnx2x_get_gpio_port(params);
2656 ext_phy_addr, 4494 bnx2x_set_gpio(bp, gpio_pin, gpio_mode, gpio_port);
2657 MDIO_PMA_DEVAD, 4495 break;
2658 MDIO_PMA_REG_PHY_IDENTIFIER, 4496 }
2659 val); 4497 default:
4498 DP(NETIF_MSG_LINK, "Invalid TX_LASER_MDIO 0x%x\n", tx_en_mode);
4499 break;
4500 }
2660} 4501}
2661 4502
2662static u8 bnx2x_8726_read_sfp_module_eeprom(struct link_params *params, 4503static u8 bnx2x_8726_read_sfp_module_eeprom(struct bnx2x_phy *phy,
2663 u16 addr, u8 byte_cnt, u8 *o_buf) 4504 struct link_params *params,
4505 u16 addr, u8 byte_cnt, u8 *o_buf)
2664{ 4506{
2665 struct bnx2x *bp = params->bp; 4507 struct bnx2x *bp = params->bp;
2666 u16 val = 0; 4508 u16 val = 0;
2667 u16 i; 4509 u16 i;
2668 u8 port = params->port;
2669 u8 ext_phy_addr = XGXS_EXT_PHY_ADDR(params->ext_phy_config);
2670 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config);
2671
2672 if (byte_cnt > 16) { 4510 if (byte_cnt > 16) {
2673 DP(NETIF_MSG_LINK, "Reading from eeprom is" 4511 DP(NETIF_MSG_LINK, "Reading from eeprom is"
2674 " is limited to 0xf\n"); 4512 " is limited to 0xf\n");
2675 return -EINVAL; 4513 return -EINVAL;
2676 } 4514 }
2677 /* Set the read command byte count */ 4515 /* Set the read command byte count */
2678 bnx2x_cl45_write(bp, port, 4516 bnx2x_cl45_write(bp, phy,
2679 ext_phy_type, 4517 MDIO_PMA_DEVAD, MDIO_PMA_REG_SFP_TWO_WIRE_BYTE_CNT,
2680 ext_phy_addr, 4518 (byte_cnt | 0xa000));
2681 MDIO_PMA_DEVAD,
2682 MDIO_PMA_REG_SFP_TWO_WIRE_BYTE_CNT,
2683 (byte_cnt | 0xa000));
2684 4519
2685 /* Set the read command address */ 4520 /* Set the read command address */
2686 bnx2x_cl45_write(bp, port, 4521 bnx2x_cl45_write(bp, phy,
2687 ext_phy_type, 4522 MDIO_PMA_DEVAD, MDIO_PMA_REG_SFP_TWO_WIRE_MEM_ADDR,
2688 ext_phy_addr, 4523 addr);
2689 MDIO_PMA_DEVAD,
2690 MDIO_PMA_REG_SFP_TWO_WIRE_MEM_ADDR,
2691 addr);
2692 4524
2693 /* Activate read command */ 4525 /* Activate read command */
2694 bnx2x_cl45_write(bp, port, 4526 bnx2x_cl45_write(bp, phy,
2695 ext_phy_type, 4527 MDIO_PMA_DEVAD, MDIO_PMA_REG_SFP_TWO_WIRE_CTRL,
2696 ext_phy_addr, 4528 0x2c0f);
2697 MDIO_PMA_DEVAD,
2698 MDIO_PMA_REG_SFP_TWO_WIRE_CTRL,
2699 0x2c0f);
2700 4529
2701 /* Wait up to 500us for command complete status */ 4530 /* Wait up to 500us for command complete status */
2702 for (i = 0; i < 100; i++) { 4531 for (i = 0; i < 100; i++) {
2703 bnx2x_cl45_read(bp, port, 4532 bnx2x_cl45_read(bp, phy,
2704 ext_phy_type, 4533 MDIO_PMA_DEVAD,
2705 ext_phy_addr, 4534 MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val);
2706 MDIO_PMA_DEVAD,
2707 MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val);
2708 if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) == 4535 if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) ==
2709 MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_COMPLETE) 4536 MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_COMPLETE)
2710 break; 4537 break;
@@ -2721,36 +4548,30 @@ static u8 bnx2x_8726_read_sfp_module_eeprom(struct link_params *params,
2721 4548
2722 /* Read the buffer */ 4549 /* Read the buffer */
2723 for (i = 0; i < byte_cnt; i++) { 4550 for (i = 0; i < byte_cnt; i++) {
2724 bnx2x_cl45_read(bp, port, 4551 bnx2x_cl45_read(bp, phy,
2725 ext_phy_type, 4552 MDIO_PMA_DEVAD,
2726 ext_phy_addr, 4553 MDIO_PMA_REG_8726_TWO_WIRE_DATA_BUF + i, &val);
2727 MDIO_PMA_DEVAD,
2728 MDIO_PMA_REG_8726_TWO_WIRE_DATA_BUF + i, &val);
2729 o_buf[i] = (u8)(val & MDIO_PMA_REG_8726_TWO_WIRE_DATA_MASK); 4554 o_buf[i] = (u8)(val & MDIO_PMA_REG_8726_TWO_WIRE_DATA_MASK);
2730 } 4555 }
2731 4556
2732 for (i = 0; i < 100; i++) { 4557 for (i = 0; i < 100; i++) {
2733 bnx2x_cl45_read(bp, port, 4558 bnx2x_cl45_read(bp, phy,
2734 ext_phy_type, 4559 MDIO_PMA_DEVAD,
2735 ext_phy_addr, 4560 MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val);
2736 MDIO_PMA_DEVAD,
2737 MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val);
2738 if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) == 4561 if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) ==
2739 MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_IDLE) 4562 MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_IDLE)
2740 return 0;; 4563 return 0;
2741 msleep(1); 4564 msleep(1);
2742 } 4565 }
2743 return -EINVAL; 4566 return -EINVAL;
2744} 4567}
2745 4568
2746static u8 bnx2x_8727_read_sfp_module_eeprom(struct link_params *params, 4569static u8 bnx2x_8727_read_sfp_module_eeprom(struct bnx2x_phy *phy,
2747 u16 addr, u8 byte_cnt, u8 *o_buf) 4570 struct link_params *params,
4571 u16 addr, u8 byte_cnt, u8 *o_buf)
2748{ 4572{
2749 struct bnx2x *bp = params->bp; 4573 struct bnx2x *bp = params->bp;
2750 u16 val, i; 4574 u16 val, i;
2751 u8 port = params->port;
2752 u8 ext_phy_addr = XGXS_EXT_PHY_ADDR(params->ext_phy_config);
2753 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config);
2754 4575
2755 if (byte_cnt > 16) { 4576 if (byte_cnt > 16) {
2756 DP(NETIF_MSG_LINK, "Reading from eeprom is" 4577 DP(NETIF_MSG_LINK, "Reading from eeprom is"
@@ -2759,54 +4580,44 @@ static u8 bnx2x_8727_read_sfp_module_eeprom(struct link_params *params,
2759 } 4580 }
2760 4581
2761 /* Need to read from 1.8000 to clear it */ 4582 /* Need to read from 1.8000 to clear it */
2762 bnx2x_cl45_read(bp, port, 4583 bnx2x_cl45_read(bp, phy,
2763 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727, 4584 MDIO_PMA_DEVAD,
2764 ext_phy_addr, 4585 MDIO_PMA_REG_SFP_TWO_WIRE_CTRL,
2765 MDIO_PMA_DEVAD, 4586 &val);
2766 MDIO_PMA_REG_SFP_TWO_WIRE_CTRL,
2767 &val);
2768 4587
2769 /* Set the read command byte count */ 4588 /* Set the read command byte count */
2770 bnx2x_cl45_write(bp, port, 4589 bnx2x_cl45_write(bp, phy,
2771 ext_phy_type, 4590 MDIO_PMA_DEVAD,
2772 ext_phy_addr, 4591 MDIO_PMA_REG_SFP_TWO_WIRE_BYTE_CNT,
2773 MDIO_PMA_DEVAD, 4592 ((byte_cnt < 2) ? 2 : byte_cnt));
2774 MDIO_PMA_REG_SFP_TWO_WIRE_BYTE_CNT,
2775 ((byte_cnt < 2) ? 2 : byte_cnt));
2776 4593
2777 /* Set the read command address */ 4594 /* Set the read command address */
2778 bnx2x_cl45_write(bp, port, 4595 bnx2x_cl45_write(bp, phy,
2779 ext_phy_type, 4596 MDIO_PMA_DEVAD,
2780 ext_phy_addr, 4597 MDIO_PMA_REG_SFP_TWO_WIRE_MEM_ADDR,
2781 MDIO_PMA_DEVAD, 4598 addr);
2782 MDIO_PMA_REG_SFP_TWO_WIRE_MEM_ADDR,
2783 addr);
2784 /* Set the destination address */ 4599 /* Set the destination address */
2785 bnx2x_cl45_write(bp, port, 4600 bnx2x_cl45_write(bp, phy,
2786 ext_phy_type, 4601 MDIO_PMA_DEVAD,
2787 ext_phy_addr, 4602 0x8004,
2788 MDIO_PMA_DEVAD, 4603 MDIO_PMA_REG_8727_TWO_WIRE_DATA_BUF);
2789 0x8004,
2790 MDIO_PMA_REG_8727_TWO_WIRE_DATA_BUF);
2791 4604
2792 /* Activate read command */ 4605 /* Activate read command */
2793 bnx2x_cl45_write(bp, port, 4606 bnx2x_cl45_write(bp, phy,
2794 ext_phy_type, 4607 MDIO_PMA_DEVAD,
2795 ext_phy_addr, 4608 MDIO_PMA_REG_SFP_TWO_WIRE_CTRL,
2796 MDIO_PMA_DEVAD, 4609 0x8002);
2797 MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, 4610 /*
2798 0x8002); 4611 * Wait appropriate time for two-wire command to finish before
2799 /* Wait appropriate time for two-wire command to finish before 4612 * polling the status register
2800 polling the status register */ 4613 */
2801 msleep(1); 4614 msleep(1);
2802 4615
2803 /* Wait up to 500us for command complete status */ 4616 /* Wait up to 500us for command complete status */
2804 for (i = 0; i < 100; i++) { 4617 for (i = 0; i < 100; i++) {
2805 bnx2x_cl45_read(bp, port, 4618 bnx2x_cl45_read(bp, phy,
2806 ext_phy_type, 4619 MDIO_PMA_DEVAD,
2807 ext_phy_addr, 4620 MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val);
2808 MDIO_PMA_DEVAD,
2809 MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val);
2810 if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) == 4621 if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) ==
2811 MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_COMPLETE) 4622 MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_COMPLETE)
2812 break; 4623 break;
@@ -2818,60 +4629,57 @@ static u8 bnx2x_8727_read_sfp_module_eeprom(struct link_params *params,
2818 DP(NETIF_MSG_LINK, 4629 DP(NETIF_MSG_LINK,
2819 "Got bad status 0x%x when reading from SFP+ EEPROM\n", 4630 "Got bad status 0x%x when reading from SFP+ EEPROM\n",
2820 (val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK)); 4631 (val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK));
2821 return -EINVAL; 4632 return -EFAULT;
2822 } 4633 }
2823 4634
2824 /* Read the buffer */ 4635 /* Read the buffer */
2825 for (i = 0; i < byte_cnt; i++) { 4636 for (i = 0; i < byte_cnt; i++) {
2826 bnx2x_cl45_read(bp, port, 4637 bnx2x_cl45_read(bp, phy,
2827 ext_phy_type, 4638 MDIO_PMA_DEVAD,
2828 ext_phy_addr, 4639 MDIO_PMA_REG_8727_TWO_WIRE_DATA_BUF + i, &val);
2829 MDIO_PMA_DEVAD,
2830 MDIO_PMA_REG_8727_TWO_WIRE_DATA_BUF + i, &val);
2831 o_buf[i] = (u8)(val & MDIO_PMA_REG_8727_TWO_WIRE_DATA_MASK); 4640 o_buf[i] = (u8)(val & MDIO_PMA_REG_8727_TWO_WIRE_DATA_MASK);
2832 } 4641 }
2833 4642
2834 for (i = 0; i < 100; i++) { 4643 for (i = 0; i < 100; i++) {
2835 bnx2x_cl45_read(bp, port, 4644 bnx2x_cl45_read(bp, phy,
2836 ext_phy_type, 4645 MDIO_PMA_DEVAD,
2837 ext_phy_addr, 4646 MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val);
2838 MDIO_PMA_DEVAD,
2839 MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val);
2840 if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) == 4647 if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) ==
2841 MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_IDLE) 4648 MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_IDLE)
2842 return 0;; 4649 return 0;
2843 msleep(1); 4650 msleep(1);
2844 } 4651 }
2845 4652
2846 return -EINVAL; 4653 return -EINVAL;
2847} 4654}
2848 4655
2849u8 bnx2x_read_sfp_module_eeprom(struct link_params *params, u16 addr, 4656u8 bnx2x_read_sfp_module_eeprom(struct bnx2x_phy *phy,
2850 u8 byte_cnt, u8 *o_buf) 4657 struct link_params *params, u16 addr,
4658 u8 byte_cnt, u8 *o_buf)
2851{ 4659{
2852 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config); 4660 if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726)
2853 4661 return bnx2x_8726_read_sfp_module_eeprom(phy, params, addr,
2854 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726) 4662 byte_cnt, o_buf);
2855 return bnx2x_8726_read_sfp_module_eeprom(params, addr, 4663 else if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727)
2856 byte_cnt, o_buf); 4664 return bnx2x_8727_read_sfp_module_eeprom(phy, params, addr,
2857 else if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) 4665 byte_cnt, o_buf);
2858 return bnx2x_8727_read_sfp_module_eeprom(params, addr,
2859 byte_cnt, o_buf);
2860 return -EINVAL; 4666 return -EINVAL;
2861} 4667}
2862 4668
2863static u8 bnx2x_get_edc_mode(struct link_params *params, 4669static u8 bnx2x_get_edc_mode(struct bnx2x_phy *phy,
2864 u16 *edc_mode) 4670 struct link_params *params,
4671 u16 *edc_mode)
2865{ 4672{
2866 struct bnx2x *bp = params->bp; 4673 struct bnx2x *bp = params->bp;
2867 u8 val, check_limiting_mode = 0; 4674 u8 val, check_limiting_mode = 0;
2868 *edc_mode = EDC_MODE_LIMITING; 4675 *edc_mode = EDC_MODE_LIMITING;
2869 4676
2870 /* First check for copper cable */ 4677 /* First check for copper cable */
2871 if (bnx2x_read_sfp_module_eeprom(params, 4678 if (bnx2x_read_sfp_module_eeprom(phy,
2872 SFP_EEPROM_CON_TYPE_ADDR, 4679 params,
2873 1, 4680 SFP_EEPROM_CON_TYPE_ADDR,
2874 &val) != 0) { 4681 1,
4682 &val) != 0) {
2875 DP(NETIF_MSG_LINK, "Failed to read from SFP+ module EEPROM\n"); 4683 DP(NETIF_MSG_LINK, "Failed to read from SFP+ module EEPROM\n");
2876 return -EINVAL; 4684 return -EINVAL;
2877 } 4685 }
@@ -2881,9 +4689,12 @@ static u8 bnx2x_get_edc_mode(struct link_params *params,
2881 { 4689 {
2882 u8 copper_module_type; 4690 u8 copper_module_type;
2883 4691
2884 /* Check if its active cable( includes SFP+ module) 4692 /*
2885 of passive cable*/ 4693 * Check if its active cable (includes SFP+ module)
2886 if (bnx2x_read_sfp_module_eeprom(params, 4694 * of passive cable
4695 */
4696 if (bnx2x_read_sfp_module_eeprom(phy,
4697 params,
2887 SFP_EEPROM_FC_TX_TECH_ADDR, 4698 SFP_EEPROM_FC_TX_TECH_ADDR,
2888 1, 4699 1,
2889 &copper_module_type) != 4700 &copper_module_type) !=
@@ -2923,10 +4734,11 @@ static u8 bnx2x_get_edc_mode(struct link_params *params,
2923 4734
2924 if (check_limiting_mode) { 4735 if (check_limiting_mode) {
2925 u8 options[SFP_EEPROM_OPTIONS_SIZE]; 4736 u8 options[SFP_EEPROM_OPTIONS_SIZE];
2926 if (bnx2x_read_sfp_module_eeprom(params, 4737 if (bnx2x_read_sfp_module_eeprom(phy,
2927 SFP_EEPROM_OPTIONS_ADDR, 4738 params,
2928 SFP_EEPROM_OPTIONS_SIZE, 4739 SFP_EEPROM_OPTIONS_ADDR,
2929 options) != 0) { 4740 SFP_EEPROM_OPTIONS_SIZE,
4741 options) != 0) {
2930 DP(NETIF_MSG_LINK, "Failed to read Option" 4742 DP(NETIF_MSG_LINK, "Failed to read Option"
2931 " field from module EEPROM\n"); 4743 " field from module EEPROM\n");
2932 return -EINVAL; 4744 return -EINVAL;
@@ -2939,17 +4751,19 @@ static u8 bnx2x_get_edc_mode(struct link_params *params,
2939 DP(NETIF_MSG_LINK, "EDC mode is set to 0x%x\n", *edc_mode); 4751 DP(NETIF_MSG_LINK, "EDC mode is set to 0x%x\n", *edc_mode);
2940 return 0; 4752 return 0;
2941} 4753}
2942 4754/*
2943/* This function read the relevant field from the module ( SFP+ ), 4755 * This function read the relevant field from the module (SFP+), and verify it
2944 and verify it is compliant with this board */ 4756 * is compliant with this board
2945static u8 bnx2x_verify_sfp_module(struct link_params *params) 4757 */
4758static u8 bnx2x_verify_sfp_module(struct bnx2x_phy *phy,
4759 struct link_params *params)
2946{ 4760{
2947 struct bnx2x *bp = params->bp; 4761 struct bnx2x *bp = params->bp;
2948 u32 val; 4762 u32 val, cmd;
2949 u32 fw_resp; 4763 u32 fw_resp, fw_cmd_param;
2950 char vendor_name[SFP_EEPROM_VENDOR_NAME_SIZE+1]; 4764 char vendor_name[SFP_EEPROM_VENDOR_NAME_SIZE+1];
2951 char vendor_pn[SFP_EEPROM_PART_NO_SIZE+1]; 4765 char vendor_pn[SFP_EEPROM_PART_NO_SIZE+1];
2952 4766 phy->flags &= ~FLAGS_SFP_NOT_APPROVED;
2953 val = REG_RD(bp, params->shmem_base + 4767 val = REG_RD(bp, params->shmem_base +
2954 offsetof(struct shmem_region, dev_info. 4768 offsetof(struct shmem_region, dev_info.
2955 port_feature_config[params->port].config)); 4769 port_feature_config[params->port].config));
@@ -2959,162 +4773,72 @@ static u8 bnx2x_verify_sfp_module(struct link_params *params)
2959 return 0; 4773 return 0;
2960 } 4774 }
2961 4775
2962 /* Ask the FW to validate the module */ 4776 if (params->feature_config_flags &
2963 if (!(params->feature_config_flags & 4777 FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY) {
2964 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY)) { 4778 /* Use specific phy request */
4779 cmd = DRV_MSG_CODE_VRFY_SPECIFIC_PHY_OPT_MDL;
4780 } else if (params->feature_config_flags &
4781 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY) {
4782 /* Use first phy request only in case of non-dual media*/
4783 if (DUAL_MEDIA(params)) {
4784 DP(NETIF_MSG_LINK, "FW does not support OPT MDL "
4785 "verification\n");
4786 return -EINVAL;
4787 }
4788 cmd = DRV_MSG_CODE_VRFY_FIRST_PHY_OPT_MDL;
4789 } else {
4790 /* No support in OPT MDL detection */
2965 DP(NETIF_MSG_LINK, "FW does not support OPT MDL " 4791 DP(NETIF_MSG_LINK, "FW does not support OPT MDL "
2966 "verification\n"); 4792 "verification\n");
2967 return -EINVAL; 4793 return -EINVAL;
2968 } 4794 }
2969 4795
2970 fw_resp = bnx2x_fw_command(bp, DRV_MSG_CODE_VRFY_OPT_MDL); 4796 fw_cmd_param = FW_PARAM_SET(phy->addr, phy->type, phy->mdio_ctrl);
4797 fw_resp = bnx2x_fw_command(bp, cmd, fw_cmd_param);
2971 if (fw_resp == FW_MSG_CODE_VRFY_OPT_MDL_SUCCESS) { 4798 if (fw_resp == FW_MSG_CODE_VRFY_OPT_MDL_SUCCESS) {
2972 DP(NETIF_MSG_LINK, "Approved module\n"); 4799 DP(NETIF_MSG_LINK, "Approved module\n");
2973 return 0; 4800 return 0;
2974 } 4801 }
2975 4802
2976 /* format the warning message */ 4803 /* format the warning message */
2977 if (bnx2x_read_sfp_module_eeprom(params, 4804 if (bnx2x_read_sfp_module_eeprom(phy,
2978 SFP_EEPROM_VENDOR_NAME_ADDR, 4805 params,
2979 SFP_EEPROM_VENDOR_NAME_SIZE, 4806 SFP_EEPROM_VENDOR_NAME_ADDR,
2980 (u8 *)vendor_name)) 4807 SFP_EEPROM_VENDOR_NAME_SIZE,
4808 (u8 *)vendor_name))
2981 vendor_name[0] = '\0'; 4809 vendor_name[0] = '\0';
2982 else 4810 else
2983 vendor_name[SFP_EEPROM_VENDOR_NAME_SIZE] = '\0'; 4811 vendor_name[SFP_EEPROM_VENDOR_NAME_SIZE] = '\0';
2984 if (bnx2x_read_sfp_module_eeprom(params, 4812 if (bnx2x_read_sfp_module_eeprom(phy,
2985 SFP_EEPROM_PART_NO_ADDR, 4813 params,
2986 SFP_EEPROM_PART_NO_SIZE, 4814 SFP_EEPROM_PART_NO_ADDR,
2987 (u8 *)vendor_pn)) 4815 SFP_EEPROM_PART_NO_SIZE,
4816 (u8 *)vendor_pn))
2988 vendor_pn[0] = '\0'; 4817 vendor_pn[0] = '\0';
2989 else 4818 else
2990 vendor_pn[SFP_EEPROM_PART_NO_SIZE] = '\0'; 4819 vendor_pn[SFP_EEPROM_PART_NO_SIZE] = '\0';
2991 4820
2992 netdev_info(bp->dev, "Warning: Unqualified SFP+ module detected, Port %d from %s part number %s\n", 4821 netdev_err(bp->dev, "Warning: Unqualified SFP+ module detected,"
2993 params->port, vendor_name, vendor_pn); 4822 " Port %d from %s part number %s\n",
4823 params->port, vendor_name, vendor_pn);
4824 phy->flags |= FLAGS_SFP_NOT_APPROVED;
2994 return -EINVAL; 4825 return -EINVAL;
2995} 4826}
2996 4827
2997static u8 bnx2x_bcm8726_set_limiting_mode(struct link_params *params, 4828static u8 bnx2x_wait_for_sfp_module_initialized(struct bnx2x_phy *phy,
2998 u16 edc_mode) 4829 struct link_params *params)
2999{
3000 struct bnx2x *bp = params->bp;
3001 u8 port = params->port;
3002 u8 ext_phy_addr = XGXS_EXT_PHY_ADDR(params->ext_phy_config);
3003 u16 cur_limiting_mode;
3004
3005 bnx2x_cl45_read(bp, port,
3006 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726,
3007 ext_phy_addr,
3008 MDIO_PMA_DEVAD,
3009 MDIO_PMA_REG_ROM_VER2,
3010 &cur_limiting_mode);
3011 DP(NETIF_MSG_LINK, "Current Limiting mode is 0x%x\n",
3012 cur_limiting_mode);
3013
3014 if (edc_mode == EDC_MODE_LIMITING) {
3015 DP(NETIF_MSG_LINK,
3016 "Setting LIMITING MODE\n");
3017 bnx2x_cl45_write(bp, port,
3018 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726,
3019 ext_phy_addr,
3020 MDIO_PMA_DEVAD,
3021 MDIO_PMA_REG_ROM_VER2,
3022 EDC_MODE_LIMITING);
3023 } else { /* LRM mode ( default )*/
3024
3025 DP(NETIF_MSG_LINK, "Setting LRM MODE\n");
3026
3027 /* Changing to LRM mode takes quite few seconds.
3028 So do it only if current mode is limiting
3029 ( default is LRM )*/
3030 if (cur_limiting_mode != EDC_MODE_LIMITING)
3031 return 0;
3032
3033 bnx2x_cl45_write(bp, port,
3034 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726,
3035 ext_phy_addr,
3036 MDIO_PMA_DEVAD,
3037 MDIO_PMA_REG_LRM_MODE,
3038 0);
3039 bnx2x_cl45_write(bp, port,
3040 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726,
3041 ext_phy_addr,
3042 MDIO_PMA_DEVAD,
3043 MDIO_PMA_REG_ROM_VER2,
3044 0x128);
3045 bnx2x_cl45_write(bp, port,
3046 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726,
3047 ext_phy_addr,
3048 MDIO_PMA_DEVAD,
3049 MDIO_PMA_REG_MISC_CTRL0,
3050 0x4008);
3051 bnx2x_cl45_write(bp, port,
3052 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726,
3053 ext_phy_addr,
3054 MDIO_PMA_DEVAD,
3055 MDIO_PMA_REG_LRM_MODE,
3056 0xaaaa);
3057 }
3058 return 0;
3059}
3060
3061static u8 bnx2x_bcm8727_set_limiting_mode(struct link_params *params,
3062 u16 edc_mode)
3063{
3064 struct bnx2x *bp = params->bp;
3065 u8 port = params->port;
3066 u16 phy_identifier;
3067 u16 rom_ver2_val;
3068 u8 ext_phy_addr = XGXS_EXT_PHY_ADDR(params->ext_phy_config);
3069
3070 bnx2x_cl45_read(bp, port,
3071 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727,
3072 ext_phy_addr,
3073 MDIO_PMA_DEVAD,
3074 MDIO_PMA_REG_PHY_IDENTIFIER,
3075 &phy_identifier);
3076
3077 bnx2x_cl45_write(bp, port,
3078 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727,
3079 ext_phy_addr,
3080 MDIO_PMA_DEVAD,
3081 MDIO_PMA_REG_PHY_IDENTIFIER,
3082 (phy_identifier & ~(1<<9)));
3083
3084 bnx2x_cl45_read(bp, port,
3085 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727,
3086 ext_phy_addr,
3087 MDIO_PMA_DEVAD,
3088 MDIO_PMA_REG_ROM_VER2,
3089 &rom_ver2_val);
3090 /* Keep the MSB 8-bits, and set the LSB 8-bits with the edc_mode */
3091 bnx2x_cl45_write(bp, port,
3092 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727,
3093 ext_phy_addr,
3094 MDIO_PMA_DEVAD,
3095 MDIO_PMA_REG_ROM_VER2,
3096 (rom_ver2_val & 0xff00) | (edc_mode & 0x00ff));
3097
3098 bnx2x_cl45_write(bp, port,
3099 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727,
3100 ext_phy_addr,
3101 MDIO_PMA_DEVAD,
3102 MDIO_PMA_REG_PHY_IDENTIFIER,
3103 (phy_identifier | (1<<9)));
3104
3105 return 0;
3106}
3107
3108 4830
3109static u8 bnx2x_wait_for_sfp_module_initialized(struct link_params *params)
3110{ 4831{
3111 u8 val; 4832 u8 val;
3112 struct bnx2x *bp = params->bp; 4833 struct bnx2x *bp = params->bp;
3113 u16 timeout; 4834 u16 timeout;
3114 /* Initialization time after hot-plug may take up to 300ms for some 4835 /*
3115 phys type ( e.g. JDSU ) */ 4836 * Initialization time after hot-plug may take up to 300ms for
4837 * some phys type ( e.g. JDSU )
4838 */
4839
3116 for (timeout = 0; timeout < 60; timeout++) { 4840 for (timeout = 0; timeout < 60; timeout++) {
3117 if (bnx2x_read_sfp_module_eeprom(params, 1, 1, &val) 4841 if (bnx2x_read_sfp_module_eeprom(phy, params, 1, 1, &val)
3118 == 0) { 4842 == 0) {
3119 DP(NETIF_MSG_LINK, "SFP+ module initialization " 4843 DP(NETIF_MSG_LINK, "SFP+ module initialization "
3120 "took %d ms\n", timeout * 5); 4844 "took %d ms\n", timeout * 5);
@@ -3126,28 +4850,26 @@ static u8 bnx2x_wait_for_sfp_module_initialized(struct link_params *params)
3126} 4850}
3127 4851
3128static void bnx2x_8727_power_module(struct bnx2x *bp, 4852static void bnx2x_8727_power_module(struct bnx2x *bp,
3129 struct link_params *params, 4853 struct bnx2x_phy *phy,
3130 u8 ext_phy_addr, u8 is_power_up) { 4854 u8 is_power_up) {
3131 /* Make sure GPIOs are not using for LED mode */ 4855 /* Make sure GPIOs are not using for LED mode */
3132 u16 val; 4856 u16 val;
3133 u8 port = params->port;
3134 /* 4857 /*
3135 * In the GPIO register, bit 4 is use to detemine if the GPIOs are 4858 * In the GPIO register, bit 4 is use to determine if the GPIOs are
3136 * operating as INPUT or as OUTPUT. Bit 1 is for input, and 0 for 4859 * operating as INPUT or as OUTPUT. Bit 1 is for input, and 0 for
3137 * output 4860 * output
3138 * Bits 0-1 determine the gpios value for OUTPUT in case bit 4 val is 0 4861 * Bits 0-1 determine the gpios value for OUTPUT in case bit 4 val is 0
3139 * Bits 8-9 determine the gpios value for INPUT in case bit 4 val is 1 4862 * Bits 8-9 determine the gpios value for INPUT in case bit 4 val is 1
3140 * where the 1st bit is the over-current(only input), and 2nd bit is 4863 * where the 1st bit is the over-current(only input), and 2nd bit is
3141 * for power( only output ) 4864 * for power( only output )
3142 */ 4865 *
3143
3144 /*
3145 * In case of NOC feature is disabled and power is up, set GPIO control 4866 * In case of NOC feature is disabled and power is up, set GPIO control
3146 * as input to enable listening of over-current indication 4867 * as input to enable listening of over-current indication
3147 */ 4868 */
3148 4869 if (phy->flags & FLAGS_NOC)
3149 if (!(params->feature_config_flags & 4870 return;
3150 FEATURE_CONFIG_BCM8727_NOC) && is_power_up) 4871 if (!(phy->flags &
4872 FLAGS_NOC) && is_power_up)
3151 val = (1<<4); 4873 val = (1<<4);
3152 else 4874 else
3153 /* 4875 /*
@@ -3156,21 +4878,156 @@ static void bnx2x_8727_power_module(struct bnx2x *bp,
3156 */ 4878 */
3157 val = ((!(is_power_up)) << 1); 4879 val = ((!(is_power_up)) << 1);
3158 4880
3159 bnx2x_cl45_write(bp, port, 4881 bnx2x_cl45_write(bp, phy,
3160 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727, 4882 MDIO_PMA_DEVAD,
3161 ext_phy_addr, 4883 MDIO_PMA_REG_8727_GPIO_CTRL,
3162 MDIO_PMA_DEVAD, 4884 val);
3163 MDIO_PMA_REG_8727_GPIO_CTRL, 4885}
3164 val); 4886
4887static u8 bnx2x_8726_set_limiting_mode(struct bnx2x *bp,
4888 struct bnx2x_phy *phy,
4889 u16 edc_mode)
4890{
4891 u16 cur_limiting_mode;
4892
4893 bnx2x_cl45_read(bp, phy,
4894 MDIO_PMA_DEVAD,
4895 MDIO_PMA_REG_ROM_VER2,
4896 &cur_limiting_mode);
4897 DP(NETIF_MSG_LINK, "Current Limiting mode is 0x%x\n",
4898 cur_limiting_mode);
4899
4900 if (edc_mode == EDC_MODE_LIMITING) {
4901 DP(NETIF_MSG_LINK, "Setting LIMITING MODE\n");
4902 bnx2x_cl45_write(bp, phy,
4903 MDIO_PMA_DEVAD,
4904 MDIO_PMA_REG_ROM_VER2,
4905 EDC_MODE_LIMITING);
4906 } else { /* LRM mode ( default )*/
4907
4908 DP(NETIF_MSG_LINK, "Setting LRM MODE\n");
4909
4910 /*
4911 * Changing to LRM mode takes quite few seconds. So do it only
4912 * if current mode is limiting (default is LRM)
4913 */
4914 if (cur_limiting_mode != EDC_MODE_LIMITING)
4915 return 0;
4916
4917 bnx2x_cl45_write(bp, phy,
4918 MDIO_PMA_DEVAD,
4919 MDIO_PMA_REG_LRM_MODE,
4920 0);
4921 bnx2x_cl45_write(bp, phy,
4922 MDIO_PMA_DEVAD,
4923 MDIO_PMA_REG_ROM_VER2,
4924 0x128);
4925 bnx2x_cl45_write(bp, phy,
4926 MDIO_PMA_DEVAD,
4927 MDIO_PMA_REG_MISC_CTRL0,
4928 0x4008);
4929 bnx2x_cl45_write(bp, phy,
4930 MDIO_PMA_DEVAD,
4931 MDIO_PMA_REG_LRM_MODE,
4932 0xaaaa);
4933 }
4934 return 0;
4935}
4936
4937static u8 bnx2x_8727_set_limiting_mode(struct bnx2x *bp,
4938 struct bnx2x_phy *phy,
4939 u16 edc_mode)
4940{
4941 u16 phy_identifier;
4942 u16 rom_ver2_val;
4943 bnx2x_cl45_read(bp, phy,
4944 MDIO_PMA_DEVAD,
4945 MDIO_PMA_REG_PHY_IDENTIFIER,
4946 &phy_identifier);
4947
4948 bnx2x_cl45_write(bp, phy,
4949 MDIO_PMA_DEVAD,
4950 MDIO_PMA_REG_PHY_IDENTIFIER,
4951 (phy_identifier & ~(1<<9)));
4952
4953 bnx2x_cl45_read(bp, phy,
4954 MDIO_PMA_DEVAD,
4955 MDIO_PMA_REG_ROM_VER2,
4956 &rom_ver2_val);
4957 /* Keep the MSB 8-bits, and set the LSB 8-bits with the edc_mode */
4958 bnx2x_cl45_write(bp, phy,
4959 MDIO_PMA_DEVAD,
4960 MDIO_PMA_REG_ROM_VER2,
4961 (rom_ver2_val & 0xff00) | (edc_mode & 0x00ff));
4962
4963 bnx2x_cl45_write(bp, phy,
4964 MDIO_PMA_DEVAD,
4965 MDIO_PMA_REG_PHY_IDENTIFIER,
4966 (phy_identifier | (1<<9)));
4967
4968 return 0;
3165} 4969}
3166 4970
3167static u8 bnx2x_sfp_module_detection(struct link_params *params) 4971static void bnx2x_8727_specific_func(struct bnx2x_phy *phy,
4972 struct link_params *params,
4973 u32 action)
4974{
4975 struct bnx2x *bp = params->bp;
4976
4977 switch (action) {
4978 case DISABLE_TX:
4979 bnx2x_sfp_set_transmitter(params, phy, 0);
4980 break;
4981 case ENABLE_TX:
4982 if (!(phy->flags & FLAGS_SFP_NOT_APPROVED))
4983 bnx2x_sfp_set_transmitter(params, phy, 1);
4984 break;
4985 default:
4986 DP(NETIF_MSG_LINK, "Function 0x%x not supported by 8727\n",
4987 action);
4988 return;
4989 }
4990}
4991
4992static void bnx2x_set_sfp_module_fault_led(struct link_params *params,
4993 u8 gpio_mode)
4994{
4995 struct bnx2x *bp = params->bp;
4996
4997 u32 fault_led_gpio = REG_RD(bp, params->shmem_base +
4998 offsetof(struct shmem_region,
4999 dev_info.port_hw_config[params->port].sfp_ctrl)) &
5000 PORT_HW_CFG_FAULT_MODULE_LED_MASK;
5001 switch (fault_led_gpio) {
5002 case PORT_HW_CFG_FAULT_MODULE_LED_DISABLED:
5003 return;
5004 case PORT_HW_CFG_FAULT_MODULE_LED_GPIO0:
5005 case PORT_HW_CFG_FAULT_MODULE_LED_GPIO1:
5006 case PORT_HW_CFG_FAULT_MODULE_LED_GPIO2:
5007 case PORT_HW_CFG_FAULT_MODULE_LED_GPIO3:
5008 {
5009 u8 gpio_port = bnx2x_get_gpio_port(params);
5010 u16 gpio_pin = fault_led_gpio -
5011 PORT_HW_CFG_FAULT_MODULE_LED_GPIO0;
5012 DP(NETIF_MSG_LINK, "Set fault module-detected led "
5013 "pin %x port %x mode %x\n",
5014 gpio_pin, gpio_port, gpio_mode);
5015 bnx2x_set_gpio(bp, gpio_pin, gpio_mode, gpio_port);
5016 }
5017 break;
5018 default:
5019 DP(NETIF_MSG_LINK, "Error: Invalid fault led mode 0x%x\n",
5020 fault_led_gpio);
5021 }
5022}
5023
5024static u8 bnx2x_sfp_module_detection(struct bnx2x_phy *phy,
5025 struct link_params *params)
3168{ 5026{
3169 struct bnx2x *bp = params->bp; 5027 struct bnx2x *bp = params->bp;
3170 u16 edc_mode; 5028 u16 edc_mode;
3171 u8 rc = 0; 5029 u8 rc = 0;
3172 u8 ext_phy_addr = XGXS_EXT_PHY_ADDR(params->ext_phy_config); 5030
3173 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config);
3174 u32 val = REG_RD(bp, params->shmem_base + 5031 u32 val = REG_RD(bp, params->shmem_base +
3175 offsetof(struct shmem_region, dev_info. 5032 offsetof(struct shmem_region, dev_info.
3176 port_feature_config[params->port].config)); 5033 port_feature_config[params->port].config));
@@ -3178,45 +5035,42 @@ static u8 bnx2x_sfp_module_detection(struct link_params *params)
3178 DP(NETIF_MSG_LINK, "SFP+ module plugged in/out detected on port %d\n", 5035 DP(NETIF_MSG_LINK, "SFP+ module plugged in/out detected on port %d\n",
3179 params->port); 5036 params->port);
3180 5037
3181 if (bnx2x_get_edc_mode(params, &edc_mode) != 0) { 5038 if (bnx2x_get_edc_mode(phy, params, &edc_mode) != 0) {
3182 DP(NETIF_MSG_LINK, "Failed to get valid module type\n"); 5039 DP(NETIF_MSG_LINK, "Failed to get valid module type\n");
3183 return -EINVAL; 5040 return -EINVAL;
3184 } else if (bnx2x_verify_sfp_module(params) != 5041 } else if (bnx2x_verify_sfp_module(phy, params) != 0) {
3185 0) {
3186 /* check SFP+ module compatibility */ 5042 /* check SFP+ module compatibility */
3187 DP(NETIF_MSG_LINK, "Module verification failed!!\n"); 5043 DP(NETIF_MSG_LINK, "Module verification failed!!\n");
3188 rc = -EINVAL; 5044 rc = -EINVAL;
3189 /* Turn on fault module-detected led */ 5045 /* Turn on fault module-detected led */
3190 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0, 5046 bnx2x_set_sfp_module_fault_led(params,
3191 MISC_REGISTERS_GPIO_HIGH, 5047 MISC_REGISTERS_GPIO_HIGH);
3192 params->port); 5048
3193 if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) && 5049 if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) &&
3194 ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) == 5050 ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) ==
3195 PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_POWER_DOWN)) { 5051 PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_POWER_DOWN)) {
3196 /* Shutdown SFP+ module */ 5052 /* Shutdown SFP+ module */
3197 DP(NETIF_MSG_LINK, "Shutdown SFP+ module!!\n"); 5053 DP(NETIF_MSG_LINK, "Shutdown SFP+ module!!\n");
3198 bnx2x_8727_power_module(bp, params, 5054 bnx2x_8727_power_module(bp, phy, 0);
3199 ext_phy_addr, 0);
3200 return rc; 5055 return rc;
3201 } 5056 }
3202 } else { 5057 } else {
3203 /* Turn off fault module-detected led */ 5058 /* Turn off fault module-detected led */
3204 DP(NETIF_MSG_LINK, "Turn off fault module-detected led\n"); 5059 bnx2x_set_sfp_module_fault_led(params, MISC_REGISTERS_GPIO_LOW);
3205 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
3206 MISC_REGISTERS_GPIO_LOW,
3207 params->port);
3208 } 5060 }
3209 5061
3210 /* power up the SFP module */ 5062 /* power up the SFP module */
3211 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) 5063 if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727)
3212 bnx2x_8727_power_module(bp, params, ext_phy_addr, 1); 5064 bnx2x_8727_power_module(bp, phy, 1);
3213 5065
3214 /* Check and set limiting mode / LRM mode on 8726. 5066 /*
3215 On 8727 it is done automatically */ 5067 * Check and set limiting mode / LRM mode on 8726. On 8727 it
3216 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726) 5068 * is done automatically
3217 bnx2x_bcm8726_set_limiting_mode(params, edc_mode); 5069 */
5070 if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726)
5071 bnx2x_8726_set_limiting_mode(bp, phy, edc_mode);
3218 else 5072 else
3219 bnx2x_bcm8727_set_limiting_mode(params, edc_mode); 5073 bnx2x_8727_set_limiting_mode(bp, phy, edc_mode);
3220 /* 5074 /*
3221 * Enable transmit for this module if the module is approved, or 5075 * Enable transmit for this module if the module is approved, or
3222 * if unapproved modules should also enable the Tx laser 5076 * if unapproved modules should also enable the Tx laser
@@ -3224,11 +5078,9 @@ static u8 bnx2x_sfp_module_detection(struct link_params *params)
3224 if (rc == 0 || 5078 if (rc == 0 ||
3225 (val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) != 5079 (val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) !=
3226 PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER) 5080 PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER)
3227 bnx2x_sfp_set_transmitter(bp, params->port, 5081 bnx2x_sfp_set_transmitter(params, phy, 1);
3228 ext_phy_type, ext_phy_addr, 1);
3229 else 5082 else
3230 bnx2x_sfp_set_transmitter(bp, params->port, 5083 bnx2x_sfp_set_transmitter(params, phy, 0);
3231 ext_phy_type, ext_phy_addr, 0);
3232 5084
3233 return rc; 5085 return rc;
3234} 5086}
@@ -3236,2729 +5088,2537 @@ static u8 bnx2x_sfp_module_detection(struct link_params *params)
3236void bnx2x_handle_module_detect_int(struct link_params *params) 5088void bnx2x_handle_module_detect_int(struct link_params *params)
3237{ 5089{
3238 struct bnx2x *bp = params->bp; 5090 struct bnx2x *bp = params->bp;
5091 struct bnx2x_phy *phy = &params->phy[EXT_PHY1];
3239 u32 gpio_val; 5092 u32 gpio_val;
3240 u8 port = params->port; 5093 u8 port = params->port;
3241 5094
3242 /* Set valid module led off */ 5095 /* Set valid module led off */
3243 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0, 5096 bnx2x_set_sfp_module_fault_led(params, MISC_REGISTERS_GPIO_HIGH);
3244 MISC_REGISTERS_GPIO_HIGH,
3245 params->port);
3246 5097
3247 /* Get current gpio val refelecting module plugged in / out*/ 5098 /* Get current gpio val reflecting module plugged in / out*/
3248 gpio_val = bnx2x_get_gpio(bp, MISC_REGISTERS_GPIO_3, port); 5099 gpio_val = bnx2x_get_gpio(bp, MISC_REGISTERS_GPIO_3, port);
3249 5100
3250 /* Call the handling function in case module is detected */ 5101 /* Call the handling function in case module is detected */
3251 if (gpio_val == 0) { 5102 if (gpio_val == 0) {
3252 5103
3253 bnx2x_set_gpio_int(bp, MISC_REGISTERS_GPIO_3, 5104 bnx2x_set_gpio_int(bp, MISC_REGISTERS_GPIO_3,
3254 MISC_REGISTERS_GPIO_INT_OUTPUT_CLR, 5105 MISC_REGISTERS_GPIO_INT_OUTPUT_CLR,
3255 port); 5106 port);
3256 5107
3257 if (bnx2x_wait_for_sfp_module_initialized(params) == 5108 if (bnx2x_wait_for_sfp_module_initialized(phy, params) == 0)
3258 0) 5109 bnx2x_sfp_module_detection(phy, params);
3259 bnx2x_sfp_module_detection(params);
3260 else 5110 else
3261 DP(NETIF_MSG_LINK, "SFP+ module is not initialized\n"); 5111 DP(NETIF_MSG_LINK, "SFP+ module is not initialized\n");
3262 } else { 5112 } else {
3263 u8 ext_phy_addr = XGXS_EXT_PHY_ADDR(params->ext_phy_config);
3264
3265 u32 ext_phy_type =
3266 XGXS_EXT_PHY_TYPE(params->ext_phy_config);
3267 u32 val = REG_RD(bp, params->shmem_base + 5113 u32 val = REG_RD(bp, params->shmem_base +
3268 offsetof(struct shmem_region, dev_info. 5114 offsetof(struct shmem_region, dev_info.
3269 port_feature_config[params->port]. 5115 port_feature_config[params->port].
3270 config)); 5116 config));
3271 5117
3272 bnx2x_set_gpio_int(bp, MISC_REGISTERS_GPIO_3, 5118 bnx2x_set_gpio_int(bp, MISC_REGISTERS_GPIO_3,
3273 MISC_REGISTERS_GPIO_INT_OUTPUT_SET, 5119 MISC_REGISTERS_GPIO_INT_OUTPUT_SET,
3274 port); 5120 port);
3275 /* Module was plugged out. */ 5121 /*
3276 /* Disable transmit for this module */ 5122 * Module was plugged out.
5123 * Disable transmit for this module
5124 */
3277 if ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) == 5125 if ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) ==
3278 PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER) 5126 PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER)
3279 bnx2x_sfp_set_transmitter(bp, params->port, 5127 bnx2x_sfp_set_transmitter(params, phy, 0);
3280 ext_phy_type, ext_phy_addr, 0);
3281 } 5128 }
3282} 5129}
3283 5130
3284static void bnx2x_bcm807x_force_10G(struct link_params *params) 5131/******************************************************************/
3285{ 5132/* common BCM8706/BCM8726 PHY SECTION */
3286 struct bnx2x *bp = params->bp; 5133/******************************************************************/
3287 u8 port = params->port; 5134static u8 bnx2x_8706_8726_read_status(struct bnx2x_phy *phy,
3288 u8 ext_phy_addr = XGXS_EXT_PHY_ADDR(params->ext_phy_config); 5135 struct link_params *params,
3289 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config); 5136 struct link_vars *vars)
3290
3291 /* Force KR or KX */
3292 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
3293 MDIO_PMA_DEVAD,
3294 MDIO_PMA_REG_CTRL,
3295 0x2040);
3296 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
3297 MDIO_PMA_DEVAD,
3298 MDIO_PMA_REG_10G_CTRL2,
3299 0x000b);
3300 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
3301 MDIO_PMA_DEVAD,
3302 MDIO_PMA_REG_BCM_CTRL,
3303 0x0000);
3304 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
3305 MDIO_AN_DEVAD,
3306 MDIO_AN_REG_CTRL,
3307 0x0000);
3308}
3309
3310static void bnx2x_bcm8073_set_xaui_low_power_mode(struct link_params *params)
3311{ 5137{
5138 u8 link_up = 0;
5139 u16 val1, val2, rx_sd, pcs_status;
3312 struct bnx2x *bp = params->bp; 5140 struct bnx2x *bp = params->bp;
3313 u8 port = params->port; 5141 DP(NETIF_MSG_LINK, "XGXS 8706/8726\n");
3314 u16 val; 5142 /* Clear RX Alarm*/
3315 u8 ext_phy_addr = XGXS_EXT_PHY_ADDR(params->ext_phy_config); 5143 bnx2x_cl45_read(bp, phy,
3316 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config); 5144 MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_ALARM, &val2);
3317 5145 /* clear LASI indication*/
3318 bnx2x_cl45_read(bp, params->port, 5146 bnx2x_cl45_read(bp, phy,
3319 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073, 5147 MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_STATUS, &val1);
3320 ext_phy_addr, 5148 bnx2x_cl45_read(bp, phy,
3321 MDIO_PMA_DEVAD, 5149 MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_STATUS, &val2);
3322 MDIO_PMA_REG_8073_CHIP_REV, &val); 5150 DP(NETIF_MSG_LINK, "8706/8726 LASI status 0x%x--> 0x%x\n", val1, val2);
3323 5151
3324 if (val == 0) { 5152 bnx2x_cl45_read(bp, phy,
3325 /* Mustn't set low power mode in 8073 A0 */ 5153 MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_SD, &rx_sd);
3326 return; 5154 bnx2x_cl45_read(bp, phy,
5155 MDIO_PCS_DEVAD, MDIO_PCS_REG_STATUS, &pcs_status);
5156 bnx2x_cl45_read(bp, phy,
5157 MDIO_AN_DEVAD, MDIO_AN_REG_LINK_STATUS, &val2);
5158 bnx2x_cl45_read(bp, phy,
5159 MDIO_AN_DEVAD, MDIO_AN_REG_LINK_STATUS, &val2);
5160
5161 DP(NETIF_MSG_LINK, "8706/8726 rx_sd 0x%x pcs_status 0x%x 1Gbps"
5162 " link_status 0x%x\n", rx_sd, pcs_status, val2);
5163 /*
5164 * link is up if both bit 0 of pmd_rx_sd and bit 0 of pcs_status
5165 * are set, or if the autoneg bit 1 is set
5166 */
5167 link_up = ((rx_sd & pcs_status & 0x1) || (val2 & (1<<1)));
5168 if (link_up) {
5169 if (val2 & (1<<1))
5170 vars->line_speed = SPEED_1000;
5171 else
5172 vars->line_speed = SPEED_10000;
5173 bnx2x_ext_phy_resolve_fc(phy, params, vars);
5174 vars->duplex = DUPLEX_FULL;
3327 } 5175 }
3328 5176 return link_up;
3329 /* Disable PLL sequencer (use read-modify-write to clear bit 13) */
3330 bnx2x_cl45_read(bp, port, ext_phy_type, ext_phy_addr,
3331 MDIO_XS_DEVAD,
3332 MDIO_XS_PLL_SEQUENCER, &val);
3333 val &= ~(1<<13);
3334 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
3335 MDIO_XS_DEVAD, MDIO_XS_PLL_SEQUENCER, val);
3336
3337 /* PLL controls */
3338 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
3339 MDIO_XS_DEVAD, 0x805E, 0x1077);
3340 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
3341 MDIO_XS_DEVAD, 0x805D, 0x0000);
3342 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
3343 MDIO_XS_DEVAD, 0x805C, 0x030B);
3344 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
3345 MDIO_XS_DEVAD, 0x805B, 0x1240);
3346 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
3347 MDIO_XS_DEVAD, 0x805A, 0x2490);
3348
3349 /* Tx Controls */
3350 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
3351 MDIO_XS_DEVAD, 0x80A7, 0x0C74);
3352 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
3353 MDIO_XS_DEVAD, 0x80A6, 0x9041);
3354 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
3355 MDIO_XS_DEVAD, 0x80A5, 0x4640);
3356
3357 /* Rx Controls */
3358 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
3359 MDIO_XS_DEVAD, 0x80FE, 0x01C4);
3360 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
3361 MDIO_XS_DEVAD, 0x80FD, 0x9249);
3362 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
3363 MDIO_XS_DEVAD, 0x80FC, 0x2015);
3364
3365 /* Enable PLL sequencer (use read-modify-write to set bit 13) */
3366 bnx2x_cl45_read(bp, port, ext_phy_type, ext_phy_addr,
3367 MDIO_XS_DEVAD,
3368 MDIO_XS_PLL_SEQUENCER, &val);
3369 val |= (1<<13);
3370 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
3371 MDIO_XS_DEVAD, MDIO_XS_PLL_SEQUENCER, val);
3372} 5177}
3373 5178
3374static void bnx2x_8073_set_pause_cl37(struct link_params *params, 5179/******************************************************************/
3375 struct link_vars *vars) 5180/* BCM8706 PHY SECTION */
5181/******************************************************************/
5182static u8 bnx2x_8706_config_init(struct bnx2x_phy *phy,
5183 struct link_params *params,
5184 struct link_vars *vars)
3376{ 5185{
5186 u32 tx_en_mode;
5187 u16 cnt, val, tmp1;
3377 struct bnx2x *bp = params->bp; 5188 struct bnx2x *bp = params->bp;
3378 u16 cl37_val; 5189 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
3379 u8 ext_phy_addr = XGXS_EXT_PHY_ADDR(params->ext_phy_config); 5190 MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
3380 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config); 5191 /* HW reset */
3381 5192 bnx2x_ext_phy_hw_reset(bp, params->port);
3382 bnx2x_cl45_read(bp, params->port, 5193 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0xa040);
3383 ext_phy_type, 5194 bnx2x_wait_reset_complete(bp, phy, params);
3384 ext_phy_addr,
3385 MDIO_AN_DEVAD,
3386 MDIO_AN_REG_CL37_FC_LD, &cl37_val);
3387
3388 cl37_val &= ~MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
3389 /* Please refer to Table 28B-3 of 802.3ab-1999 spec. */
3390 5195
3391 if ((vars->ieee_fc & 5196 /* Wait until fw is loaded */
3392 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_SYMMETRIC) == 5197 for (cnt = 0; cnt < 100; cnt++) {
3393 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_SYMMETRIC) { 5198 bnx2x_cl45_read(bp, phy,
3394 cl37_val |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_SYMMETRIC; 5199 MDIO_PMA_DEVAD, MDIO_PMA_REG_ROM_VER1, &val);
5200 if (val)
5201 break;
5202 msleep(10);
3395 } 5203 }
3396 if ((vars->ieee_fc & 5204 DP(NETIF_MSG_LINK, "XGXS 8706 is initialized after %d ms\n", cnt);
3397 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) == 5205 if ((params->feature_config_flags &
3398 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) { 5206 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED)) {
3399 cl37_val |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC; 5207 u8 i;
5208 u16 reg;
5209 for (i = 0; i < 4; i++) {
5210 reg = MDIO_XS_8706_REG_BANK_RX0 +
5211 i*(MDIO_XS_8706_REG_BANK_RX1 -
5212 MDIO_XS_8706_REG_BANK_RX0);
5213 bnx2x_cl45_read(bp, phy, MDIO_XS_DEVAD, reg, &val);
5214 /* Clear first 3 bits of the control */
5215 val &= ~0x7;
5216 /* Set control bits according to configuration */
5217 val |= (phy->rx_preemphasis[i] & 0x7);
5218 DP(NETIF_MSG_LINK, "Setting RX Equalizer to BCM8706"
5219 " reg 0x%x <-- val 0x%x\n", reg, val);
5220 bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, reg, val);
5221 }
3400 } 5222 }
3401 if ((vars->ieee_fc & 5223 /* Force speed */
3402 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) == 5224 if (phy->req_line_speed == SPEED_10000) {
3403 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) { 5225 DP(NETIF_MSG_LINK, "XGXS 8706 force 10Gbps\n");
3404 cl37_val |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH; 5226
5227 bnx2x_cl45_write(bp, phy,
5228 MDIO_PMA_DEVAD,
5229 MDIO_PMA_REG_DIGITAL_CTRL, 0x400);
5230 bnx2x_cl45_write(bp, phy,
5231 MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_CTRL, 1);
5232 } else {
5233 /* Force 1Gbps using autoneg with 1G advertisement */
5234
5235 /* Allow CL37 through CL73 */
5236 DP(NETIF_MSG_LINK, "XGXS 8706 AutoNeg\n");
5237 bnx2x_cl45_write(bp, phy,
5238 MDIO_AN_DEVAD, MDIO_AN_REG_CL37_CL73, 0x040c);
5239
5240 /* Enable Full-Duplex advertisement on CL37 */
5241 bnx2x_cl45_write(bp, phy,
5242 MDIO_AN_DEVAD, MDIO_AN_REG_CL37_FC_LP, 0x0020);
5243 /* Enable CL37 AN */
5244 bnx2x_cl45_write(bp, phy,
5245 MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1000);
5246 /* 1G support */
5247 bnx2x_cl45_write(bp, phy,
5248 MDIO_AN_DEVAD, MDIO_AN_REG_ADV, (1<<5));
5249
5250 /* Enable clause 73 AN */
5251 bnx2x_cl45_write(bp, phy,
5252 MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0x1200);
5253 bnx2x_cl45_write(bp, phy,
5254 MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_ALARM_CTRL,
5255 0x0400);
5256 bnx2x_cl45_write(bp, phy,
5257 MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_CTRL,
5258 0x0004);
3405 } 5259 }
3406 DP(NETIF_MSG_LINK, 5260 bnx2x_save_bcm_spirom_ver(bp, phy, params->port);
3407 "Ext phy AN advertize cl37 0x%x\n", cl37_val);
3408
3409 bnx2x_cl45_write(bp, params->port,
3410 ext_phy_type,
3411 ext_phy_addr,
3412 MDIO_AN_DEVAD,
3413 MDIO_AN_REG_CL37_FC_LD, cl37_val);
3414 msleep(500);
3415}
3416
3417static void bnx2x_ext_phy_set_pause(struct link_params *params,
3418 struct link_vars *vars)
3419{
3420 struct bnx2x *bp = params->bp;
3421 u16 val;
3422 u8 ext_phy_addr = XGXS_EXT_PHY_ADDR(params->ext_phy_config);
3423 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config);
3424
3425 /* read modify write pause advertizing */
3426 bnx2x_cl45_read(bp, params->port,
3427 ext_phy_type,
3428 ext_phy_addr,
3429 MDIO_AN_DEVAD,
3430 MDIO_AN_REG_ADV_PAUSE, &val);
3431 5261
3432 val &= ~MDIO_AN_REG_ADV_PAUSE_BOTH; 5262 /*
3433 5263 * If TX Laser is controlled by GPIO_0, do not let PHY go into low
3434 /* Please refer to Table 28B-3 of 802.3ab-1999 spec. */ 5264 * power mode, if TX Laser is disabled
3435 5265 */
3436 if ((vars->ieee_fc &
3437 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) ==
3438 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) {
3439 val |= MDIO_AN_REG_ADV_PAUSE_ASYMMETRIC;
3440 }
3441 if ((vars->ieee_fc &
3442 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) ==
3443 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) {
3444 val |=
3445 MDIO_AN_REG_ADV_PAUSE_PAUSE;
3446 }
3447 DP(NETIF_MSG_LINK,
3448 "Ext phy AN advertize 0x%x\n", val);
3449 bnx2x_cl45_write(bp, params->port,
3450 ext_phy_type,
3451 ext_phy_addr,
3452 MDIO_AN_DEVAD,
3453 MDIO_AN_REG_ADV_PAUSE, val);
3454}
3455static void bnx2x_set_preemphasis(struct link_params *params)
3456{
3457 u16 bank, i = 0;
3458 struct bnx2x *bp = params->bp;
3459 5266
3460 for (bank = MDIO_REG_BANK_RX0, i = 0; bank <= MDIO_REG_BANK_RX3; 5267 tx_en_mode = REG_RD(bp, params->shmem_base +
3461 bank += (MDIO_REG_BANK_RX1-MDIO_REG_BANK_RX0), i++) { 5268 offsetof(struct shmem_region,
3462 CL45_WR_OVER_CL22(bp, params->port, 5269 dev_info.port_hw_config[params->port].sfp_ctrl))
3463 params->phy_addr, 5270 & PORT_HW_CFG_TX_LASER_MASK;
3464 bank, 5271
3465 MDIO_RX0_RX_EQ_BOOST, 5272 if (tx_en_mode == PORT_HW_CFG_TX_LASER_GPIO0) {
3466 params->xgxs_config_rx[i]); 5273 DP(NETIF_MSG_LINK, "Enabling TXONOFF_PWRDN_DIS\n");
5274 bnx2x_cl45_read(bp, phy,
5275 MDIO_PMA_DEVAD, MDIO_PMA_REG_DIGITAL_CTRL, &tmp1);
5276 tmp1 |= 0x1;
5277 bnx2x_cl45_write(bp, phy,
5278 MDIO_PMA_DEVAD, MDIO_PMA_REG_DIGITAL_CTRL, tmp1);
3467 } 5279 }
3468 5280
3469 for (bank = MDIO_REG_BANK_TX0, i = 0; bank <= MDIO_REG_BANK_TX3; 5281 return 0;
3470 bank += (MDIO_REG_BANK_TX1 - MDIO_REG_BANK_TX0), i++) {
3471 CL45_WR_OVER_CL22(bp, params->port,
3472 params->phy_addr,
3473 bank,
3474 MDIO_TX0_TX_DRIVER,
3475 params->xgxs_config_tx[i]);
3476 }
3477} 5282}
3478 5283
3479 5284static u8 bnx2x_8706_read_status(struct bnx2x_phy *phy,
3480static void bnx2x_8481_set_led4(struct link_params *params, 5285 struct link_params *params,
3481 u32 ext_phy_type, u8 ext_phy_addr) 5286 struct link_vars *vars)
3482{
3483 struct bnx2x *bp = params->bp;
3484
3485 /* PHYC_CTL_LED_CTL */
3486 bnx2x_cl45_write(bp, params->port,
3487 ext_phy_type,
3488 ext_phy_addr,
3489 MDIO_PMA_DEVAD,
3490 MDIO_PMA_REG_8481_LINK_SIGNAL, 0xa482);
3491
3492 /* Unmask LED4 for 10G link */
3493 bnx2x_cl45_write(bp, params->port,
3494 ext_phy_type,
3495 ext_phy_addr,
3496 MDIO_PMA_DEVAD,
3497 MDIO_PMA_REG_8481_SIGNAL_MASK, (1<<6));
3498 /* 'Interrupt Mask' */
3499 bnx2x_cl45_write(bp, params->port,
3500 ext_phy_type,
3501 ext_phy_addr,
3502 MDIO_AN_DEVAD,
3503 0xFFFB, 0xFFFD);
3504}
3505static void bnx2x_8481_set_legacy_led_mode(struct link_params *params,
3506 u32 ext_phy_type, u8 ext_phy_addr)
3507{ 5287{
3508 struct bnx2x *bp = params->bp; 5288 return bnx2x_8706_8726_read_status(phy, params, vars);
3509
3510 /* LED1 (10G Link): Disable LED1 when 10/100/1000 link */
3511 /* LED2 (1G/100/10 Link): Enable LED2 when 10/100/1000 link) */
3512 bnx2x_cl45_write(bp, params->port,
3513 ext_phy_type,
3514 ext_phy_addr,
3515 MDIO_AN_DEVAD,
3516 MDIO_AN_REG_8481_LEGACY_SHADOW,
3517 (1<<15) | (0xd << 10) | (0xc<<4) | 0xe);
3518} 5289}
3519 5290
3520static void bnx2x_8481_set_10G_led_mode(struct link_params *params, 5291/******************************************************************/
3521 u32 ext_phy_type, u8 ext_phy_addr) 5292/* BCM8726 PHY SECTION */
5293/******************************************************************/
5294static void bnx2x_8726_config_loopback(struct bnx2x_phy *phy,
5295 struct link_params *params)
3522{ 5296{
3523 struct bnx2x *bp = params->bp; 5297 struct bnx2x *bp = params->bp;
3524 u16 val1; 5298 DP(NETIF_MSG_LINK, "PMA/PMD ext_phy_loopback: 8726\n");
5299 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x0001);
5300}
3525 5301
3526 /* LED1 (10G Link) */ 5302static void bnx2x_8726_external_rom_boot(struct bnx2x_phy *phy,
3527 /* Enable continuse based on source 7(10G-link) */ 5303 struct link_params *params)
3528 bnx2x_cl45_read(bp, params->port,
3529 ext_phy_type,
3530 ext_phy_addr,
3531 MDIO_PMA_DEVAD,
3532 MDIO_PMA_REG_8481_LINK_SIGNAL,
3533 &val1);
3534 /* Set bit 2 to 0, and bits [1:0] to 10 */
3535 val1 &= ~((1<<0) | (1<<2) | (1<<7)); /* Clear bits 0,2,7*/
3536 val1 |= ((1<<1) | (1<<6)); /* Set bit 1, 6 */
3537
3538 bnx2x_cl45_write(bp, params->port,
3539 ext_phy_type,
3540 ext_phy_addr,
3541 MDIO_PMA_DEVAD,
3542 MDIO_PMA_REG_8481_LINK_SIGNAL,
3543 val1);
3544
3545 /* Unmask LED1 for 10G link */
3546 bnx2x_cl45_read(bp, params->port,
3547 ext_phy_type,
3548 ext_phy_addr,
3549 MDIO_PMA_DEVAD,
3550 MDIO_PMA_REG_8481_LED1_MASK,
3551 &val1);
3552 /* Set bit 2 to 0, and bits [1:0] to 10 */
3553 val1 |= (1<<7);
3554 bnx2x_cl45_write(bp, params->port,
3555 ext_phy_type,
3556 ext_phy_addr,
3557 MDIO_PMA_DEVAD,
3558 MDIO_PMA_REG_8481_LED1_MASK,
3559 val1);
3560
3561 /* LED2 (1G/100/10G Link) */
3562 /* Mask LED2 for 10G link */
3563 bnx2x_cl45_write(bp, params->port,
3564 ext_phy_type,
3565 ext_phy_addr,
3566 MDIO_PMA_DEVAD,
3567 MDIO_PMA_REG_8481_LED2_MASK,
3568 0);
3569
3570 /* Unmask LED3 for 10G link */
3571 bnx2x_cl45_write(bp, params->port,
3572 ext_phy_type,
3573 ext_phy_addr,
3574 MDIO_PMA_DEVAD,
3575 MDIO_PMA_REG_8481_LED3_MASK,
3576 0x6);
3577 bnx2x_cl45_write(bp, params->port,
3578 ext_phy_type,
3579 ext_phy_addr,
3580 MDIO_PMA_DEVAD,
3581 MDIO_PMA_REG_8481_LED3_BLINK,
3582 0);
3583}
3584
3585
3586static void bnx2x_init_internal_phy(struct link_params *params,
3587 struct link_vars *vars,
3588 u8 enable_cl73)
3589{ 5304{
3590 struct bnx2x *bp = params->bp; 5305 struct bnx2x *bp = params->bp;
5306 /* Need to wait 100ms after reset */
5307 msleep(100);
3591 5308
3592 if (!(vars->phy_flags & PHY_SGMII_FLAG)) { 5309 /* Micro controller re-boot */
3593 if ((XGXS_EXT_PHY_TYPE(params->ext_phy_config) == 5310 bnx2x_cl45_write(bp, phy,
3594 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) && 5311 MDIO_PMA_DEVAD, MDIO_PMA_REG_GEN_CTRL, 0x018B);
3595 (params->feature_config_flags &
3596 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED))
3597 bnx2x_set_preemphasis(params);
3598 5312
3599 /* forced speed requested? */ 5313 /* Set soft reset */
3600 if (vars->line_speed != SPEED_AUTO_NEG || 5314 bnx2x_cl45_write(bp, phy,
3601 ((XGXS_EXT_PHY_TYPE(params->ext_phy_config) == 5315 MDIO_PMA_DEVAD,
3602 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) && 5316 MDIO_PMA_REG_GEN_CTRL,
3603 params->loopback_mode == LOOPBACK_EXT)) { 5317 MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET);
3604 DP(NETIF_MSG_LINK, "not SGMII, no AN\n");
3605 5318
3606 /* disable autoneg */ 5319 bnx2x_cl45_write(bp, phy,
3607 bnx2x_set_autoneg(params, vars, 0); 5320 MDIO_PMA_DEVAD,
5321 MDIO_PMA_REG_MISC_CTRL1, 0x0001);
3608 5322
3609 /* program speed and duplex */ 5323 bnx2x_cl45_write(bp, phy,
3610 bnx2x_program_serdes(params, vars); 5324 MDIO_PMA_DEVAD,
5325 MDIO_PMA_REG_GEN_CTRL,
5326 MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP);
3611 5327
3612 } else { /* AN_mode */ 5328 /* wait for 150ms for microcode load */
3613 DP(NETIF_MSG_LINK, "not SGMII, AN\n"); 5329 msleep(150);
3614
3615 /* AN enabled */
3616 bnx2x_set_brcm_cl37_advertisment(params);
3617 5330
3618 /* program duplex & pause advertisement (for aneg) */ 5331 /* Disable serial boot control, tristates pins SS_N, SCK, MOSI, MISO */
3619 bnx2x_set_ieee_aneg_advertisment(params, 5332 bnx2x_cl45_write(bp, phy,
3620 vars->ieee_fc); 5333 MDIO_PMA_DEVAD,
5334 MDIO_PMA_REG_MISC_CTRL1, 0x0000);
3621 5335
3622 /* enable autoneg */ 5336 msleep(200);
3623 bnx2x_set_autoneg(params, vars, enable_cl73); 5337 bnx2x_save_bcm_spirom_ver(bp, phy, params->port);
5338}
3624 5339
3625 /* enable and restart AN */ 5340static u8 bnx2x_8726_read_status(struct bnx2x_phy *phy,
3626 bnx2x_restart_autoneg(params, enable_cl73); 5341 struct link_params *params,
5342 struct link_vars *vars)
5343{
5344 struct bnx2x *bp = params->bp;
5345 u16 val1;
5346 u8 link_up = bnx2x_8706_8726_read_status(phy, params, vars);
5347 if (link_up) {
5348 bnx2x_cl45_read(bp, phy,
5349 MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER,
5350 &val1);
5351 if (val1 & (1<<15)) {
5352 DP(NETIF_MSG_LINK, "Tx is disabled\n");
5353 link_up = 0;
5354 vars->line_speed = 0;
3627 } 5355 }
3628
3629 } else { /* SGMII mode */
3630 DP(NETIF_MSG_LINK, "SGMII\n");
3631
3632 bnx2x_initialize_sgmii_process(params, vars);
3633 } 5356 }
5357 return link_up;
3634} 5358}
3635 5359
3636static u8 bnx2x_ext_phy_init(struct link_params *params, struct link_vars *vars) 5360
5361static u8 bnx2x_8726_config_init(struct bnx2x_phy *phy,
5362 struct link_params *params,
5363 struct link_vars *vars)
3637{ 5364{
3638 struct bnx2x *bp = params->bp; 5365 struct bnx2x *bp = params->bp;
3639 u32 ext_phy_type; 5366 u32 val;
3640 u8 ext_phy_addr; 5367 u32 swap_val, swap_override, aeu_gpio_mask, offset;
3641 u16 cnt; 5368 DP(NETIF_MSG_LINK, "Initializing BCM8726\n");
3642 u16 ctrl = 0;
3643 u16 val = 0;
3644 u8 rc = 0;
3645 5369
3646 if (vars->phy_flags & PHY_XGXS_FLAG) { 5370 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1<<15);
3647 ext_phy_addr = XGXS_EXT_PHY_ADDR(params->ext_phy_config); 5371 bnx2x_wait_reset_complete(bp, phy, params);
3648 5372
3649 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config); 5373 bnx2x_8726_external_rom_boot(phy, params);
3650 /* Make sure that the soft reset is off (expect for the 8072:
3651 * due to the lock, it will be done inside the specific
3652 * handling)
3653 */
3654 if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) &&
3655 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
3656 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN) &&
3657 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) &&
3658 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073)) {
3659 /* Wait for soft reset to get cleared upto 1 sec */
3660 for (cnt = 0; cnt < 1000; cnt++) {
3661 bnx2x_cl45_read(bp, params->port,
3662 ext_phy_type,
3663 ext_phy_addr,
3664 MDIO_PMA_DEVAD,
3665 MDIO_PMA_REG_CTRL, &ctrl);
3666 if (!(ctrl & (1<<15)))
3667 break;
3668 msleep(1);
3669 }
3670 DP(NETIF_MSG_LINK, "control reg 0x%x (after %d ms)\n",
3671 ctrl, cnt);
3672 }
3673 5374
3674 switch (ext_phy_type) { 5375 /*
3675 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT: 5376 * Need to call module detected on initialization since the module
3676 break; 5377 * detection triggered by actual module insertion might occur before
5378 * driver is loaded, and when driver is loaded, it reset all
5379 * registers, including the transmitter
5380 */
5381 bnx2x_sfp_module_detection(phy, params);
5382
5383 if (phy->req_line_speed == SPEED_1000) {
5384 DP(NETIF_MSG_LINK, "Setting 1G force\n");
5385 bnx2x_cl45_write(bp, phy,
5386 MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x40);
5387 bnx2x_cl45_write(bp, phy,
5388 MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, 0xD);
5389 bnx2x_cl45_write(bp, phy,
5390 MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_CTRL, 0x5);
5391 bnx2x_cl45_write(bp, phy,
5392 MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_ALARM_CTRL,
5393 0x400);
5394 } else if ((phy->req_line_speed == SPEED_AUTO_NEG) &&
5395 (phy->speed_cap_mask &
5396 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G) &&
5397 ((phy->speed_cap_mask &
5398 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) !=
5399 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) {
5400 DP(NETIF_MSG_LINK, "Setting 1G clause37\n");
5401 /* Set Flow control */
5402 bnx2x_ext_phy_set_pause(params, phy, vars);
5403 bnx2x_cl45_write(bp, phy,
5404 MDIO_AN_DEVAD, MDIO_AN_REG_ADV, 0x20);
5405 bnx2x_cl45_write(bp, phy,
5406 MDIO_AN_DEVAD, MDIO_AN_REG_CL37_CL73, 0x040c);
5407 bnx2x_cl45_write(bp, phy,
5408 MDIO_AN_DEVAD, MDIO_AN_REG_CL37_FC_LD, 0x0020);
5409 bnx2x_cl45_write(bp, phy,
5410 MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1000);
5411 bnx2x_cl45_write(bp, phy,
5412 MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0x1200);
5413 /*
5414 * Enable RX-ALARM control to receive interrupt for 1G speed
5415 * change
5416 */
5417 bnx2x_cl45_write(bp, phy,
5418 MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_CTRL, 0x4);
5419 bnx2x_cl45_write(bp, phy,
5420 MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_ALARM_CTRL,
5421 0x400);
5422
5423 } else { /* Default 10G. Set only LASI control */
5424 bnx2x_cl45_write(bp, phy,
5425 MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_CTRL, 1);
5426 }
3677 5427
3678 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705: 5428 /* Set TX PreEmphasis if needed */
3679 DP(NETIF_MSG_LINK, "XGXS 8705\n"); 5429 if ((params->feature_config_flags &
3680 5430 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED)) {
3681 bnx2x_cl45_write(bp, params->port, 5431 DP(NETIF_MSG_LINK, "Setting TX_CTRL1 0x%x,"
3682 ext_phy_type, 5432 "TX_CTRL2 0x%x\n",
3683 ext_phy_addr, 5433 phy->tx_preemphasis[0],
3684 MDIO_PMA_DEVAD, 5434 phy->tx_preemphasis[1]);
3685 MDIO_PMA_REG_MISC_CTRL, 5435 bnx2x_cl45_write(bp, phy,
3686 0x8288); 5436 MDIO_PMA_DEVAD,
3687 bnx2x_cl45_write(bp, params->port, 5437 MDIO_PMA_REG_8726_TX_CTRL1,
3688 ext_phy_type, 5438 phy->tx_preemphasis[0]);
3689 ext_phy_addr, 5439
3690 MDIO_PMA_DEVAD, 5440 bnx2x_cl45_write(bp, phy,
3691 MDIO_PMA_REG_PHY_IDENTIFIER, 5441 MDIO_PMA_DEVAD,
3692 0x7fbf); 5442 MDIO_PMA_REG_8726_TX_CTRL2,
3693 bnx2x_cl45_write(bp, params->port, 5443 phy->tx_preemphasis[1]);
3694 ext_phy_type, 5444 }
3695 ext_phy_addr,
3696 MDIO_PMA_DEVAD,
3697 MDIO_PMA_REG_CMU_PLL_BYPASS,
3698 0x0100);
3699 bnx2x_cl45_write(bp, params->port,
3700 ext_phy_type,
3701 ext_phy_addr,
3702 MDIO_WIS_DEVAD,
3703 MDIO_WIS_REG_LASI_CNTL, 0x1);
3704
3705 /* BCM8705 doesn't have microcode, hence the 0 */
3706 bnx2x_save_spirom_version(bp, params->port,
3707 params->shmem_base, 0);
3708 break;
3709 5445
3710 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706: 5446 /* Set GPIO3 to trigger SFP+ module insertion/removal */
3711 /* Wait until fw is loaded */ 5447 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
3712 for (cnt = 0; cnt < 100; cnt++) { 5448 MISC_REGISTERS_GPIO_INPUT_HI_Z, params->port);
3713 bnx2x_cl45_read(bp, params->port, ext_phy_type,
3714 ext_phy_addr, MDIO_PMA_DEVAD,
3715 MDIO_PMA_REG_ROM_VER1, &val);
3716 if (val)
3717 break;
3718 msleep(10);
3719 }
3720 DP(NETIF_MSG_LINK, "XGXS 8706 is initialized "
3721 "after %d ms\n", cnt);
3722 if ((params->feature_config_flags &
3723 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED)) {
3724 u8 i;
3725 u16 reg;
3726 for (i = 0; i < 4; i++) {
3727 reg = MDIO_XS_8706_REG_BANK_RX0 +
3728 i*(MDIO_XS_8706_REG_BANK_RX1 -
3729 MDIO_XS_8706_REG_BANK_RX0);
3730 bnx2x_cl45_read(bp, params->port,
3731 ext_phy_type,
3732 ext_phy_addr,
3733 MDIO_XS_DEVAD,
3734 reg, &val);
3735 /* Clear first 3 bits of the control */
3736 val &= ~0x7;
3737 /* Set control bits according to
3738 configuation */
3739 val |= (params->xgxs_config_rx[i] &
3740 0x7);
3741 DP(NETIF_MSG_LINK, "Setting RX"
3742 "Equalizer to BCM8706 reg 0x%x"
3743 " <-- val 0x%x\n", reg, val);
3744 bnx2x_cl45_write(bp, params->port,
3745 ext_phy_type,
3746 ext_phy_addr,
3747 MDIO_XS_DEVAD,
3748 reg, val);
3749 }
3750 }
3751 /* Force speed */
3752 if (params->req_line_speed == SPEED_10000) {
3753 DP(NETIF_MSG_LINK, "XGXS 8706 force 10Gbps\n");
3754
3755 bnx2x_cl45_write(bp, params->port,
3756 ext_phy_type,
3757 ext_phy_addr,
3758 MDIO_PMA_DEVAD,
3759 MDIO_PMA_REG_DIGITAL_CTRL,
3760 0x400);
3761 bnx2x_cl45_write(bp, params->port, ext_phy_type,
3762 ext_phy_addr, MDIO_PMA_DEVAD,
3763 MDIO_PMA_REG_LASI_CTRL, 1);
3764 } else {
3765 /* Force 1Gbps using autoneg with 1G
3766 advertisment */
3767
3768 /* Allow CL37 through CL73 */
3769 DP(NETIF_MSG_LINK, "XGXS 8706 AutoNeg\n");
3770 bnx2x_cl45_write(bp, params->port,
3771 ext_phy_type,
3772 ext_phy_addr,
3773 MDIO_AN_DEVAD,
3774 MDIO_AN_REG_CL37_CL73,
3775 0x040c);
3776
3777 /* Enable Full-Duplex advertisment on CL37 */
3778 bnx2x_cl45_write(bp, params->port,
3779 ext_phy_type,
3780 ext_phy_addr,
3781 MDIO_AN_DEVAD,
3782 MDIO_AN_REG_CL37_FC_LP,
3783 0x0020);
3784 /* Enable CL37 AN */
3785 bnx2x_cl45_write(bp, params->port,
3786 ext_phy_type,
3787 ext_phy_addr,
3788 MDIO_AN_DEVAD,
3789 MDIO_AN_REG_CL37_AN,
3790 0x1000);
3791 /* 1G support */
3792 bnx2x_cl45_write(bp, params->port,
3793 ext_phy_type,
3794 ext_phy_addr,
3795 MDIO_AN_DEVAD,
3796 MDIO_AN_REG_ADV, (1<<5));
3797
3798 /* Enable clause 73 AN */
3799 bnx2x_cl45_write(bp, params->port,
3800 ext_phy_type,
3801 ext_phy_addr,
3802 MDIO_AN_DEVAD,
3803 MDIO_AN_REG_CTRL,
3804 0x1200);
3805 bnx2x_cl45_write(bp, params->port,
3806 ext_phy_type,
3807 ext_phy_addr,
3808 MDIO_PMA_DEVAD,
3809 MDIO_PMA_REG_RX_ALARM_CTRL,
3810 0x0400);
3811 bnx2x_cl45_write(bp, params->port,
3812 ext_phy_type,
3813 ext_phy_addr,
3814 MDIO_PMA_DEVAD,
3815 MDIO_PMA_REG_LASI_CTRL, 0x0004);
3816 5449
3817 } 5450 /* The GPIO should be swapped if the swap register is set and active */
3818 bnx2x_save_bcm_spirom_ver(bp, params->port, 5451 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
3819 ext_phy_type, 5452 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
3820 ext_phy_addr,
3821 params->shmem_base);
3822 break;
3823 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
3824 DP(NETIF_MSG_LINK, "Initializing BCM8726\n");
3825 bnx2x_bcm8726_external_rom_boot(params);
3826
3827 /* Need to call module detected on initialization since
3828 the module detection triggered by actual module
3829 insertion might occur before driver is loaded, and when
3830 driver is loaded, it reset all registers, including the
3831 transmitter */
3832 bnx2x_sfp_module_detection(params);
3833
3834 /* Set Flow control */
3835 bnx2x_ext_phy_set_pause(params, vars);
3836 if (params->req_line_speed == SPEED_1000) {
3837 DP(NETIF_MSG_LINK, "Setting 1G force\n");
3838 bnx2x_cl45_write(bp, params->port, ext_phy_type,
3839 ext_phy_addr, MDIO_PMA_DEVAD,
3840 MDIO_PMA_REG_CTRL, 0x40);
3841 bnx2x_cl45_write(bp, params->port, ext_phy_type,
3842 ext_phy_addr, MDIO_PMA_DEVAD,
3843 MDIO_PMA_REG_10G_CTRL2, 0xD);
3844 bnx2x_cl45_write(bp, params->port, ext_phy_type,
3845 ext_phy_addr, MDIO_PMA_DEVAD,
3846 MDIO_PMA_REG_LASI_CTRL, 0x5);
3847 bnx2x_cl45_write(bp, params->port, ext_phy_type,
3848 ext_phy_addr, MDIO_PMA_DEVAD,
3849 MDIO_PMA_REG_RX_ALARM_CTRL,
3850 0x400);
3851 } else if ((params->req_line_speed ==
3852 SPEED_AUTO_NEG) &&
3853 ((params->speed_cap_mask &
3854 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))) {
3855 DP(NETIF_MSG_LINK, "Setting 1G clause37\n");
3856 bnx2x_cl45_write(bp, params->port, ext_phy_type,
3857 ext_phy_addr, MDIO_AN_DEVAD,
3858 MDIO_AN_REG_ADV, 0x20);
3859 bnx2x_cl45_write(bp, params->port, ext_phy_type,
3860 ext_phy_addr, MDIO_AN_DEVAD,
3861 MDIO_AN_REG_CL37_CL73, 0x040c);
3862 bnx2x_cl45_write(bp, params->port, ext_phy_type,
3863 ext_phy_addr, MDIO_AN_DEVAD,
3864 MDIO_AN_REG_CL37_FC_LD, 0x0020);
3865 bnx2x_cl45_write(bp, params->port, ext_phy_type,
3866 ext_phy_addr, MDIO_AN_DEVAD,
3867 MDIO_AN_REG_CL37_AN, 0x1000);
3868 bnx2x_cl45_write(bp, params->port, ext_phy_type,
3869 ext_phy_addr, MDIO_AN_DEVAD,
3870 MDIO_AN_REG_CTRL, 0x1200);
3871
3872 /* Enable RX-ALARM control to receive
3873 interrupt for 1G speed change */
3874 bnx2x_cl45_write(bp, params->port, ext_phy_type,
3875 ext_phy_addr, MDIO_PMA_DEVAD,
3876 MDIO_PMA_REG_LASI_CTRL, 0x4);
3877 bnx2x_cl45_write(bp, params->port, ext_phy_type,
3878 ext_phy_addr, MDIO_PMA_DEVAD,
3879 MDIO_PMA_REG_RX_ALARM_CTRL,
3880 0x400);
3881
3882 } else { /* Default 10G. Set only LASI control */
3883 bnx2x_cl45_write(bp, params->port, ext_phy_type,
3884 ext_phy_addr, MDIO_PMA_DEVAD,
3885 MDIO_PMA_REG_LASI_CTRL, 1);
3886 }
3887 5453
3888 /* Set TX PreEmphasis if needed */ 5454 /* Select function upon port-swap configuration */
3889 if ((params->feature_config_flags & 5455 if (params->port == 0) {
3890 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED)) { 5456 offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
3891 DP(NETIF_MSG_LINK, "Setting TX_CTRL1 0x%x," 5457 aeu_gpio_mask = (swap_val && swap_override) ?
3892 "TX_CTRL2 0x%x\n", 5458 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
3893 params->xgxs_config_tx[0], 5459 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
3894 params->xgxs_config_tx[1]); 5460 } else {
3895 bnx2x_cl45_write(bp, params->port, 5461 offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
3896 ext_phy_type, 5462 aeu_gpio_mask = (swap_val && swap_override) ?
3897 ext_phy_addr, 5463 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
3898 MDIO_PMA_DEVAD, 5464 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
3899 MDIO_PMA_REG_8726_TX_CTRL1, 5465 }
3900 params->xgxs_config_tx[0]); 5466 val = REG_RD(bp, offset);
3901 5467 /* add GPIO3 to group */
3902 bnx2x_cl45_write(bp, params->port, 5468 val |= aeu_gpio_mask;
3903 ext_phy_type, 5469 REG_WR(bp, offset, val);
3904 ext_phy_addr, 5470 return 0;
3905 MDIO_PMA_DEVAD,
3906 MDIO_PMA_REG_8726_TX_CTRL2,
3907 params->xgxs_config_tx[1]);
3908 }
3909 break;
3910 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
3911 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
3912 {
3913 u16 tmp1;
3914 u16 rx_alarm_ctrl_val;
3915 u16 lasi_ctrl_val;
3916 if (ext_phy_type ==
3917 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) {
3918 rx_alarm_ctrl_val = 0x400;
3919 lasi_ctrl_val = 0x0004;
3920 } else {
3921 rx_alarm_ctrl_val = (1<<2);
3922 lasi_ctrl_val = 0x0004;
3923 }
3924 5471
3925 /* enable LASI */ 5472}
3926 bnx2x_cl45_write(bp, params->port,
3927 ext_phy_type,
3928 ext_phy_addr,
3929 MDIO_PMA_DEVAD,
3930 MDIO_PMA_REG_RX_ALARM_CTRL,
3931 rx_alarm_ctrl_val);
3932
3933 bnx2x_cl45_write(bp, params->port,
3934 ext_phy_type,
3935 ext_phy_addr,
3936 MDIO_PMA_DEVAD,
3937 MDIO_PMA_REG_LASI_CTRL,
3938 lasi_ctrl_val);
3939
3940 bnx2x_8073_set_pause_cl37(params, vars);
3941
3942 if (ext_phy_type ==
3943 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072)
3944 bnx2x_bcm8072_external_rom_boot(params);
3945 else
3946 /* In case of 8073 with long xaui lines,
3947 don't set the 8073 xaui low power*/
3948 bnx2x_bcm8073_set_xaui_low_power_mode(params);
3949
3950 bnx2x_cl45_read(bp, params->port,
3951 ext_phy_type,
3952 ext_phy_addr,
3953 MDIO_PMA_DEVAD,
3954 MDIO_PMA_REG_M8051_MSGOUT_REG,
3955 &tmp1);
3956
3957 bnx2x_cl45_read(bp, params->port,
3958 ext_phy_type,
3959 ext_phy_addr,
3960 MDIO_PMA_DEVAD,
3961 MDIO_PMA_REG_RX_ALARM, &tmp1);
3962
3963 DP(NETIF_MSG_LINK, "Before rom RX_ALARM(port1):"
3964 "0x%x\n", tmp1);
3965
3966 /* If this is forced speed, set to KR or KX
3967 * (all other are not supported)
3968 */
3969 if (params->loopback_mode == LOOPBACK_EXT) {
3970 bnx2x_bcm807x_force_10G(params);
3971 DP(NETIF_MSG_LINK,
3972 "Forced speed 10G on 807X\n");
3973 break;
3974 } else {
3975 bnx2x_cl45_write(bp, params->port,
3976 ext_phy_type, ext_phy_addr,
3977 MDIO_PMA_DEVAD,
3978 MDIO_PMA_REG_BCM_CTRL,
3979 0x0002);
3980 }
3981 if (params->req_line_speed != SPEED_AUTO_NEG) {
3982 if (params->req_line_speed == SPEED_10000) {
3983 val = (1<<7);
3984 } else if (params->req_line_speed ==
3985 SPEED_2500) {
3986 val = (1<<5);
3987 /* Note that 2.5G works only
3988 when used with 1G advertisment */
3989 } else
3990 val = (1<<5);
3991 } else {
3992
3993 val = 0;
3994 if (params->speed_cap_mask &
3995 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)
3996 val |= (1<<7);
3997
3998 /* Note that 2.5G works only when
3999 used with 1G advertisment */
4000 if (params->speed_cap_mask &
4001 (PORT_HW_CFG_SPEED_CAPABILITY_D0_1G |
4002 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
4003 val |= (1<<5);
4004 DP(NETIF_MSG_LINK,
4005 "807x autoneg val = 0x%x\n", val);
4006 }
4007 5473
4008 bnx2x_cl45_write(bp, params->port, 5474static void bnx2x_8726_link_reset(struct bnx2x_phy *phy,
4009 ext_phy_type, 5475 struct link_params *params)
4010 ext_phy_addr, 5476{
4011 MDIO_AN_DEVAD, 5477 struct bnx2x *bp = params->bp;
4012 MDIO_AN_REG_ADV, val); 5478 DP(NETIF_MSG_LINK, "bnx2x_8726_link_reset port %d\n", params->port);
4013 if (ext_phy_type == 5479 /* Set serial boot control for external load */
4014 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073) { 5480 bnx2x_cl45_write(bp, phy,
4015 bnx2x_cl45_read(bp, params->port, 5481 MDIO_PMA_DEVAD,
4016 ext_phy_type, 5482 MDIO_PMA_REG_GEN_CTRL, 0x0001);
4017 ext_phy_addr, 5483}
4018 MDIO_AN_DEVAD,
4019 MDIO_AN_REG_8073_2_5G, &tmp1);
4020
4021 if (((params->speed_cap_mask &
4022 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G) &&
4023 (params->req_line_speed ==
4024 SPEED_AUTO_NEG)) ||
4025 (params->req_line_speed ==
4026 SPEED_2500)) {
4027 u16 phy_ver;
4028 /* Allow 2.5G for A1 and above */
4029 bnx2x_cl45_read(bp, params->port,
4030 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
4031 ext_phy_addr,
4032 MDIO_PMA_DEVAD,
4033 MDIO_PMA_REG_8073_CHIP_REV, &phy_ver);
4034 DP(NETIF_MSG_LINK, "Add 2.5G\n");
4035 if (phy_ver > 0)
4036 tmp1 |= 1;
4037 else
4038 tmp1 &= 0xfffe;
4039 } else {
4040 DP(NETIF_MSG_LINK, "Disable 2.5G\n");
4041 tmp1 &= 0xfffe;
4042 }
4043
4044 bnx2x_cl45_write(bp, params->port,
4045 ext_phy_type,
4046 ext_phy_addr,
4047 MDIO_AN_DEVAD,
4048 MDIO_AN_REG_8073_2_5G, tmp1);
4049 }
4050 5484
4051 /* Add support for CL37 (passive mode) II */ 5485/******************************************************************/
4052 5486/* BCM8727 PHY SECTION */
4053 bnx2x_cl45_read(bp, params->port, 5487/******************************************************************/
4054 ext_phy_type,
4055 ext_phy_addr,
4056 MDIO_AN_DEVAD,
4057 MDIO_AN_REG_CL37_FC_LD,
4058 &tmp1);
4059
4060 bnx2x_cl45_write(bp, params->port,
4061 ext_phy_type,
4062 ext_phy_addr,
4063 MDIO_AN_DEVAD,
4064 MDIO_AN_REG_CL37_FC_LD, (tmp1 |
4065 ((params->req_duplex == DUPLEX_FULL) ?
4066 0x20 : 0x40)));
4067
4068 /* Add support for CL37 (passive mode) III */
4069 bnx2x_cl45_write(bp, params->port,
4070 ext_phy_type,
4071 ext_phy_addr,
4072 MDIO_AN_DEVAD,
4073 MDIO_AN_REG_CL37_AN, 0x1000);
4074
4075 if (ext_phy_type ==
4076 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073) {
4077 /* The SNR will improve about 2db by changing
4078 BW and FEE main tap. Rest commands are executed
4079 after link is up*/
4080 /*Change FFE main cursor to 5 in EDC register*/
4081 if (bnx2x_8073_is_snr_needed(params))
4082 bnx2x_cl45_write(bp, params->port,
4083 ext_phy_type,
4084 ext_phy_addr,
4085 MDIO_PMA_DEVAD,
4086 MDIO_PMA_REG_EDC_FFE_MAIN,
4087 0xFB0C);
4088
4089 /* Enable FEC (Forware Error Correction)
4090 Request in the AN */
4091 bnx2x_cl45_read(bp, params->port,
4092 ext_phy_type,
4093 ext_phy_addr,
4094 MDIO_AN_DEVAD,
4095 MDIO_AN_REG_ADV2, &tmp1);
4096
4097 tmp1 |= (1<<15);
4098
4099 bnx2x_cl45_write(bp, params->port,
4100 ext_phy_type,
4101 ext_phy_addr,
4102 MDIO_AN_DEVAD,
4103 MDIO_AN_REG_ADV2, tmp1);
4104 5488
4105 } 5489static void bnx2x_8727_set_link_led(struct bnx2x_phy *phy,
5490 struct link_params *params, u8 mode)
5491{
5492 struct bnx2x *bp = params->bp;
5493 u16 led_mode_bitmask = 0;
5494 u16 gpio_pins_bitmask = 0;
5495 u16 val;
5496 /* Only NOC flavor requires to set the LED specifically */
5497 if (!(phy->flags & FLAGS_NOC))
5498 return;
5499 switch (mode) {
5500 case LED_MODE_FRONT_PANEL_OFF:
5501 case LED_MODE_OFF:
5502 led_mode_bitmask = 0;
5503 gpio_pins_bitmask = 0x03;
5504 break;
5505 case LED_MODE_ON:
5506 led_mode_bitmask = 0;
5507 gpio_pins_bitmask = 0x02;
5508 break;
5509 case LED_MODE_OPER:
5510 led_mode_bitmask = 0x60;
5511 gpio_pins_bitmask = 0x11;
5512 break;
5513 }
5514 bnx2x_cl45_read(bp, phy,
5515 MDIO_PMA_DEVAD,
5516 MDIO_PMA_REG_8727_PCS_OPT_CTRL,
5517 &val);
5518 val &= 0xff8f;
5519 val |= led_mode_bitmask;
5520 bnx2x_cl45_write(bp, phy,
5521 MDIO_PMA_DEVAD,
5522 MDIO_PMA_REG_8727_PCS_OPT_CTRL,
5523 val);
5524 bnx2x_cl45_read(bp, phy,
5525 MDIO_PMA_DEVAD,
5526 MDIO_PMA_REG_8727_GPIO_CTRL,
5527 &val);
5528 val &= 0xffe0;
5529 val |= gpio_pins_bitmask;
5530 bnx2x_cl45_write(bp, phy,
5531 MDIO_PMA_DEVAD,
5532 MDIO_PMA_REG_8727_GPIO_CTRL,
5533 val);
5534}
5535static void bnx2x_8727_hw_reset(struct bnx2x_phy *phy,
5536 struct link_params *params) {
5537 u32 swap_val, swap_override;
5538 u8 port;
5539 /*
5540 * The PHY reset is controlled by GPIO 1. Fake the port number
5541 * to cancel the swap done in set_gpio()
5542 */
5543 struct bnx2x *bp = params->bp;
5544 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
5545 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
5546 port = (swap_val && swap_override) ^ 1;
5547 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
5548 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
5549}
4106 5550
4107 bnx2x_ext_phy_set_pause(params, vars); 5551static u8 bnx2x_8727_config_init(struct bnx2x_phy *phy,
4108 5552 struct link_params *params,
4109 /* Restart autoneg */ 5553 struct link_vars *vars)
4110 msleep(500); 5554{
4111 bnx2x_cl45_write(bp, params->port, 5555 u32 tx_en_mode;
4112 ext_phy_type, 5556 u16 tmp1, val, mod_abs, tmp2;
4113 ext_phy_addr, 5557 u16 rx_alarm_ctrl_val;
4114 MDIO_AN_DEVAD, 5558 u16 lasi_ctrl_val;
4115 MDIO_AN_REG_CTRL, 0x1200); 5559 struct bnx2x *bp = params->bp;
4116 DP(NETIF_MSG_LINK, "807x Autoneg Restart: " 5560 /* Enable PMD link, MOD_ABS_FLT, and 1G link alarm */
4117 "Advertise 1G=%x, 10G=%x\n",
4118 ((val & (1<<5)) > 0),
4119 ((val & (1<<7)) > 0));
4120 break;
4121 }
4122 5561
4123 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727: 5562 bnx2x_wait_reset_complete(bp, phy, params);
4124 { 5563 rx_alarm_ctrl_val = (1<<2) | (1<<5) ;
4125 u16 tmp1; 5564 lasi_ctrl_val = 0x0004;
4126 u16 rx_alarm_ctrl_val;
4127 u16 lasi_ctrl_val;
4128
4129 /* Enable PMD link, MOD_ABS_FLT, and 1G link alarm */
4130
4131 u16 mod_abs;
4132 rx_alarm_ctrl_val = (1<<2) | (1<<5) ;
4133 lasi_ctrl_val = 0x0004;
4134
4135 DP(NETIF_MSG_LINK, "Initializing BCM8727\n");
4136 /* enable LASI */
4137 bnx2x_cl45_write(bp, params->port,
4138 ext_phy_type,
4139 ext_phy_addr,
4140 MDIO_PMA_DEVAD,
4141 MDIO_PMA_REG_RX_ALARM_CTRL,
4142 rx_alarm_ctrl_val);
4143
4144 bnx2x_cl45_write(bp, params->port,
4145 ext_phy_type,
4146 ext_phy_addr,
4147 MDIO_PMA_DEVAD,
4148 MDIO_PMA_REG_LASI_CTRL,
4149 lasi_ctrl_val);
4150
4151 /* Initially configure MOD_ABS to interrupt when
4152 module is presence( bit 8) */
4153 bnx2x_cl45_read(bp, params->port,
4154 ext_phy_type,
4155 ext_phy_addr,
4156 MDIO_PMA_DEVAD,
4157 MDIO_PMA_REG_PHY_IDENTIFIER, &mod_abs);
4158 /* Set EDC off by setting OPTXLOS signal input to low
4159 (bit 9).
4160 When the EDC is off it locks onto a reference clock and
4161 avoids becoming 'lost'.*/
4162 mod_abs &= ~((1<<8) | (1<<9));
4163 bnx2x_cl45_write(bp, params->port,
4164 ext_phy_type,
4165 ext_phy_addr,
4166 MDIO_PMA_DEVAD,
4167 MDIO_PMA_REG_PHY_IDENTIFIER, mod_abs);
4168
4169 /* Make MOD_ABS give interrupt on change */
4170 bnx2x_cl45_read(bp, params->port,
4171 ext_phy_type,
4172 ext_phy_addr,
4173 MDIO_PMA_DEVAD,
4174 MDIO_PMA_REG_8727_PCS_OPT_CTRL,
4175 &val);
4176 val |= (1<<12);
4177 bnx2x_cl45_write(bp, params->port,
4178 ext_phy_type,
4179 ext_phy_addr,
4180 MDIO_PMA_DEVAD,
4181 MDIO_PMA_REG_8727_PCS_OPT_CTRL,
4182 val);
4183
4184 /* Set 8727 GPIOs to input to allow reading from the
4185 8727 GPIO0 status which reflect SFP+ module
4186 over-current */
4187
4188 bnx2x_cl45_read(bp, params->port,
4189 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727,
4190 ext_phy_addr,
4191 MDIO_PMA_DEVAD,
4192 MDIO_PMA_REG_8727_PCS_OPT_CTRL,
4193 &val);
4194 val &= 0xff8f; /* Reset bits 4-6 */
4195 bnx2x_cl45_write(bp, params->port,
4196 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727,
4197 ext_phy_addr,
4198 MDIO_PMA_DEVAD,
4199 MDIO_PMA_REG_8727_PCS_OPT_CTRL,
4200 val);
4201
4202 bnx2x_8727_power_module(bp, params, ext_phy_addr, 1);
4203 bnx2x_bcm8073_set_xaui_low_power_mode(params);
4204
4205 bnx2x_cl45_read(bp, params->port,
4206 ext_phy_type,
4207 ext_phy_addr,
4208 MDIO_PMA_DEVAD,
4209 MDIO_PMA_REG_M8051_MSGOUT_REG,
4210 &tmp1);
4211
4212 bnx2x_cl45_read(bp, params->port,
4213 ext_phy_type,
4214 ext_phy_addr,
4215 MDIO_PMA_DEVAD,
4216 MDIO_PMA_REG_RX_ALARM, &tmp1);
4217
4218 /* Set option 1G speed */
4219 if (params->req_line_speed == SPEED_1000) {
4220
4221 DP(NETIF_MSG_LINK, "Setting 1G force\n");
4222 bnx2x_cl45_write(bp, params->port,
4223 ext_phy_type,
4224 ext_phy_addr,
4225 MDIO_PMA_DEVAD,
4226 MDIO_PMA_REG_CTRL, 0x40);
4227 bnx2x_cl45_write(bp, params->port,
4228 ext_phy_type,
4229 ext_phy_addr,
4230 MDIO_PMA_DEVAD,
4231 MDIO_PMA_REG_10G_CTRL2, 0xD);
4232 bnx2x_cl45_read(bp, params->port,
4233 ext_phy_type,
4234 ext_phy_addr,
4235 MDIO_PMA_DEVAD,
4236 MDIO_PMA_REG_10G_CTRL2, &tmp1);
4237 DP(NETIF_MSG_LINK, "1.7 = 0x%x\n", tmp1);
4238
4239 } else if ((params->req_line_speed ==
4240 SPEED_AUTO_NEG) &&
4241 ((params->speed_cap_mask &
4242 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))) {
4243
4244 DP(NETIF_MSG_LINK, "Setting 1G clause37\n");
4245 bnx2x_cl45_write(bp, params->port, ext_phy_type,
4246 ext_phy_addr, MDIO_AN_DEVAD,
4247 MDIO_PMA_REG_8727_MISC_CTRL, 0);
4248 bnx2x_cl45_write(bp, params->port, ext_phy_type,
4249 ext_phy_addr, MDIO_AN_DEVAD,
4250 MDIO_AN_REG_CL37_AN, 0x1300);
4251 } else {
4252 /* Since the 8727 has only single reset pin,
4253 need to set the 10G registers although it is
4254 default */
4255 bnx2x_cl45_write(bp, params->port, ext_phy_type,
4256 ext_phy_addr, MDIO_AN_DEVAD,
4257 MDIO_AN_REG_CTRL, 0x0020);
4258 bnx2x_cl45_write(bp, params->port, ext_phy_type,
4259 ext_phy_addr, MDIO_AN_DEVAD,
4260 0x7, 0x0100);
4261 bnx2x_cl45_write(bp, params->port, ext_phy_type,
4262 ext_phy_addr, MDIO_PMA_DEVAD,
4263 MDIO_PMA_REG_CTRL, 0x2040);
4264 bnx2x_cl45_write(bp, params->port, ext_phy_type,
4265 ext_phy_addr, MDIO_PMA_DEVAD,
4266 MDIO_PMA_REG_10G_CTRL2, 0x0008);
4267 }
4268 5565
4269 /* Set 2-wire transfer rate of SFP+ module EEPROM 5566 DP(NETIF_MSG_LINK, "Initializing BCM8727\n");
4270 * to 100Khz since some DACs(direct attached cables) do 5567 /* enable LASI */
4271 * not work at 400Khz. 5568 bnx2x_cl45_write(bp, phy,
4272 */ 5569 MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_ALARM_CTRL,
4273 bnx2x_cl45_write(bp, params->port, 5570 rx_alarm_ctrl_val);
4274 ext_phy_type,
4275 ext_phy_addr,
4276 MDIO_PMA_DEVAD,
4277 MDIO_PMA_REG_8727_TWO_WIRE_SLAVE_ADDR,
4278 0xa001);
4279
4280 /* Set TX PreEmphasis if needed */
4281 if ((params->feature_config_flags &
4282 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED)) {
4283 DP(NETIF_MSG_LINK, "Setting TX_CTRL1 0x%x,"
4284 "TX_CTRL2 0x%x\n",
4285 params->xgxs_config_tx[0],
4286 params->xgxs_config_tx[1]);
4287 bnx2x_cl45_write(bp, params->port,
4288 ext_phy_type,
4289 ext_phy_addr,
4290 MDIO_PMA_DEVAD,
4291 MDIO_PMA_REG_8727_TX_CTRL1,
4292 params->xgxs_config_tx[0]);
4293
4294 bnx2x_cl45_write(bp, params->port,
4295 ext_phy_type,
4296 ext_phy_addr,
4297 MDIO_PMA_DEVAD,
4298 MDIO_PMA_REG_8727_TX_CTRL2,
4299 params->xgxs_config_tx[1]);
4300 }
4301 5571
4302 break; 5572 bnx2x_cl45_write(bp, phy,
4303 } 5573 MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_CTRL, lasi_ctrl_val);
4304 5574
4305 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101: 5575 /*
4306 { 5576 * Initially configure MOD_ABS to interrupt when module is
4307 u16 fw_ver1, fw_ver2; 5577 * presence( bit 8)
4308 DP(NETIF_MSG_LINK, 5578 */
4309 "Setting the SFX7101 LASI indication\n"); 5579 bnx2x_cl45_read(bp, phy,
5580 MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER, &mod_abs);
5581 /*
5582 * Set EDC off by setting OPTXLOS signal input to low (bit 9).
5583 * When the EDC is off it locks onto a reference clock and avoids
5584 * becoming 'lost'
5585 */
5586 mod_abs &= ~(1<<8);
5587 if (!(phy->flags & FLAGS_NOC))
5588 mod_abs &= ~(1<<9);
5589 bnx2x_cl45_write(bp, phy,
5590 MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER, mod_abs);
4310 5591
4311 bnx2x_cl45_write(bp, params->port,
4312 ext_phy_type,
4313 ext_phy_addr,
4314 MDIO_PMA_DEVAD,
4315 MDIO_PMA_REG_LASI_CTRL, 0x1);
4316 DP(NETIF_MSG_LINK,
4317 "Setting the SFX7101 LED to blink on traffic\n");
4318 bnx2x_cl45_write(bp, params->port,
4319 ext_phy_type,
4320 ext_phy_addr,
4321 MDIO_PMA_DEVAD,
4322 MDIO_PMA_REG_7107_LED_CNTL, (1<<3));
4323
4324 bnx2x_ext_phy_set_pause(params, vars);
4325 /* Restart autoneg */
4326 bnx2x_cl45_read(bp, params->port,
4327 ext_phy_type,
4328 ext_phy_addr,
4329 MDIO_AN_DEVAD,
4330 MDIO_AN_REG_CTRL, &val);
4331 val |= 0x200;
4332 bnx2x_cl45_write(bp, params->port,
4333 ext_phy_type,
4334 ext_phy_addr,
4335 MDIO_AN_DEVAD,
4336 MDIO_AN_REG_CTRL, val);
4337
4338 /* Save spirom version */
4339 bnx2x_cl45_read(bp, params->port, ext_phy_type,
4340 ext_phy_addr, MDIO_PMA_DEVAD,
4341 MDIO_PMA_REG_7101_VER1, &fw_ver1);
4342
4343 bnx2x_cl45_read(bp, params->port, ext_phy_type,
4344 ext_phy_addr, MDIO_PMA_DEVAD,
4345 MDIO_PMA_REG_7101_VER2, &fw_ver2);
4346
4347 bnx2x_save_spirom_version(params->bp, params->port,
4348 params->shmem_base,
4349 (u32)(fw_ver1<<16 | fw_ver2));
4350 break;
4351 }
4352 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
4353 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823:
4354 /* This phy uses the NIG latch mechanism since link
4355 indication arrives through its LED4 and not via
4356 its LASI signal, so we get steady signal
4357 instead of clear on read */
4358 bnx2x_bits_en(bp, NIG_REG_LATCH_BC_0 + params->port*4,
4359 1 << NIG_LATCH_BC_ENABLE_MI_INT);
4360
4361 bnx2x_cl45_write(bp, params->port,
4362 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481,
4363 ext_phy_addr,
4364 MDIO_PMA_DEVAD,
4365 MDIO_PMA_REG_CTRL, 0x0000);
4366
4367 bnx2x_8481_set_led4(params, ext_phy_type, ext_phy_addr);
4368 if (params->req_line_speed == SPEED_AUTO_NEG) {
4369
4370 u16 autoneg_val, an_1000_val, an_10_100_val;
4371 /* set 1000 speed advertisement */
4372 bnx2x_cl45_read(bp, params->port,
4373 ext_phy_type,
4374 ext_phy_addr,
4375 MDIO_AN_DEVAD,
4376 MDIO_AN_REG_8481_1000T_CTRL,
4377 &an_1000_val);
4378
4379 if (params->speed_cap_mask &
4380 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G) {
4381 an_1000_val |= (1<<8);
4382 if (params->req_duplex == DUPLEX_FULL)
4383 an_1000_val |= (1<<9);
4384 DP(NETIF_MSG_LINK, "Advertising 1G\n");
4385 } else
4386 an_1000_val &= ~((1<<8) | (1<<9));
4387
4388 bnx2x_cl45_write(bp, params->port,
4389 ext_phy_type,
4390 ext_phy_addr,
4391 MDIO_AN_DEVAD,
4392 MDIO_AN_REG_8481_1000T_CTRL,
4393 an_1000_val);
4394
4395 /* set 100 speed advertisement */
4396 bnx2x_cl45_read(bp, params->port,
4397 ext_phy_type,
4398 ext_phy_addr,
4399 MDIO_AN_DEVAD,
4400 MDIO_AN_REG_8481_LEGACY_AN_ADV,
4401 &an_10_100_val);
4402
4403 if (params->speed_cap_mask &
4404 (PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL |
4405 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF)) {
4406 an_10_100_val |= (1<<7);
4407 if (params->req_duplex == DUPLEX_FULL)
4408 an_10_100_val |= (1<<8);
4409 DP(NETIF_MSG_LINK,
4410 "Advertising 100M\n");
4411 } else
4412 an_10_100_val &= ~((1<<7) | (1<<8));
4413
4414 /* set 10 speed advertisement */
4415 if (params->speed_cap_mask &
4416 (PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL |
4417 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF)) {
4418 an_10_100_val |= (1<<5);
4419 if (params->req_duplex == DUPLEX_FULL)
4420 an_10_100_val |= (1<<6);
4421 DP(NETIF_MSG_LINK, "Advertising 10M\n");
4422 }
4423 else
4424 an_10_100_val &= ~((1<<5) | (1<<6));
4425
4426 bnx2x_cl45_write(bp, params->port,
4427 ext_phy_type,
4428 ext_phy_addr,
4429 MDIO_AN_DEVAD,
4430 MDIO_AN_REG_8481_LEGACY_AN_ADV,
4431 an_10_100_val);
4432
4433 bnx2x_cl45_read(bp, params->port,
4434 ext_phy_type,
4435 ext_phy_addr,
4436 MDIO_AN_DEVAD,
4437 MDIO_AN_REG_8481_LEGACY_MII_CTRL,
4438 &autoneg_val);
4439
4440 /* Disable forced speed */
4441 autoneg_val &= ~(1<<6|1<<13);
4442
4443 /* Enable autoneg and restart autoneg
4444 for legacy speeds */
4445 autoneg_val |= (1<<9|1<<12);
4446
4447 if (params->req_duplex == DUPLEX_FULL)
4448 autoneg_val |= (1<<8);
4449 else
4450 autoneg_val &= ~(1<<8);
4451
4452 bnx2x_cl45_write(bp, params->port,
4453 ext_phy_type,
4454 ext_phy_addr,
4455 MDIO_AN_DEVAD,
4456 MDIO_AN_REG_8481_LEGACY_MII_CTRL,
4457 autoneg_val);
4458
4459 if (params->speed_cap_mask &
4460 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) {
4461 DP(NETIF_MSG_LINK, "Advertising 10G\n");
4462 /* Restart autoneg for 10G*/
4463
4464 bnx2x_cl45_write(bp, params->port,
4465 ext_phy_type,
4466 ext_phy_addr,
4467 MDIO_AN_DEVAD,
4468 MDIO_AN_REG_CTRL, 0x3200);
4469 }
4470 } else {
4471 /* Force speed */
4472 u16 autoneg_ctrl, pma_ctrl;
4473 bnx2x_cl45_read(bp, params->port,
4474 ext_phy_type,
4475 ext_phy_addr,
4476 MDIO_AN_DEVAD,
4477 MDIO_AN_REG_8481_LEGACY_MII_CTRL,
4478 &autoneg_ctrl);
4479
4480 /* Disable autoneg */
4481 autoneg_ctrl &= ~(1<<12);
4482
4483 /* Set 1000 force */
4484 switch (params->req_line_speed) {
4485 case SPEED_10000:
4486 DP(NETIF_MSG_LINK,
4487 "Unable to set 10G force !\n");
4488 break;
4489 case SPEED_1000:
4490 bnx2x_cl45_read(bp, params->port,
4491 ext_phy_type,
4492 ext_phy_addr,
4493 MDIO_PMA_DEVAD,
4494 MDIO_PMA_REG_CTRL,
4495 &pma_ctrl);
4496 autoneg_ctrl &= ~(1<<13);
4497 autoneg_ctrl |= (1<<6);
4498 pma_ctrl &= ~(1<<13);
4499 pma_ctrl |= (1<<6);
4500 DP(NETIF_MSG_LINK,
4501 "Setting 1000M force\n");
4502 bnx2x_cl45_write(bp, params->port,
4503 ext_phy_type,
4504 ext_phy_addr,
4505 MDIO_PMA_DEVAD,
4506 MDIO_PMA_REG_CTRL,
4507 pma_ctrl);
4508 break;
4509 case SPEED_100:
4510 autoneg_ctrl |= (1<<13);
4511 autoneg_ctrl &= ~(1<<6);
4512 DP(NETIF_MSG_LINK,
4513 "Setting 100M force\n");
4514 break;
4515 case SPEED_10:
4516 autoneg_ctrl &= ~(1<<13);
4517 autoneg_ctrl &= ~(1<<6);
4518 DP(NETIF_MSG_LINK,
4519 "Setting 10M force\n");
4520 break;
4521 }
4522 5592
4523 /* Duplex mode */ 5593 /* Make MOD_ABS give interrupt on change */
4524 if (params->req_duplex == DUPLEX_FULL) { 5594 bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_PCS_OPT_CTRL,
4525 autoneg_ctrl |= (1<<8); 5595 &val);
4526 DP(NETIF_MSG_LINK, 5596 val |= (1<<12);
4527 "Setting full duplex\n"); 5597 if (phy->flags & FLAGS_NOC)
4528 } else 5598 val |= (3<<5);
4529 autoneg_ctrl &= ~(1<<8);
4530
4531 /* Update autoneg ctrl and pma ctrl */
4532 bnx2x_cl45_write(bp, params->port,
4533 ext_phy_type,
4534 ext_phy_addr,
4535 MDIO_AN_DEVAD,
4536 MDIO_AN_REG_8481_LEGACY_MII_CTRL,
4537 autoneg_ctrl);
4538 }
4539 5599
4540 /* Save spirom version */ 5600 /*
4541 bnx2x_save_8481_spirom_version(bp, params->port, 5601 * Set 8727 GPIOs to input to allow reading from the 8727 GPIO0
4542 ext_phy_addr, 5602 * status which reflect SFP+ module over-current
4543 params->shmem_base); 5603 */
4544 break; 5604 if (!(phy->flags & FLAGS_NOC))
4545 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE: 5605 val &= 0xff8f; /* Reset bits 4-6 */
4546 DP(NETIF_MSG_LINK, 5606 bnx2x_cl45_write(bp, phy,
4547 "XGXS PHY Failure detected 0x%x\n", 5607 MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_PCS_OPT_CTRL, val);
4548 params->ext_phy_config); 5608
4549 rc = -EINVAL; 5609 bnx2x_8727_power_module(bp, phy, 1);
4550 break; 5610
4551 default: 5611 bnx2x_cl45_read(bp, phy,
4552 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n", 5612 MDIO_PMA_DEVAD, MDIO_PMA_REG_M8051_MSGOUT_REG, &tmp1);
4553 params->ext_phy_config); 5613
4554 rc = -EINVAL; 5614 bnx2x_cl45_read(bp, phy,
4555 break; 5615 MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_ALARM, &tmp1);
5616
5617 /* Set option 1G speed */
5618 if (phy->req_line_speed == SPEED_1000) {
5619 DP(NETIF_MSG_LINK, "Setting 1G force\n");
5620 bnx2x_cl45_write(bp, phy,
5621 MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x40);
5622 bnx2x_cl45_write(bp, phy,
5623 MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, 0xD);
5624 bnx2x_cl45_read(bp, phy,
5625 MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, &tmp1);
5626 DP(NETIF_MSG_LINK, "1.7 = 0x%x\n", tmp1);
5627 /*
5628 * Power down the XAUI until link is up in case of dual-media
5629 * and 1G
5630 */
5631 if (DUAL_MEDIA(params)) {
5632 bnx2x_cl45_read(bp, phy,
5633 MDIO_PMA_DEVAD,
5634 MDIO_PMA_REG_8727_PCS_GP, &val);
5635 val |= (3<<10);
5636 bnx2x_cl45_write(bp, phy,
5637 MDIO_PMA_DEVAD,
5638 MDIO_PMA_REG_8727_PCS_GP, val);
4556 } 5639 }
5640 } else if ((phy->req_line_speed == SPEED_AUTO_NEG) &&
5641 ((phy->speed_cap_mask &
5642 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) &&
5643 ((phy->speed_cap_mask &
5644 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) !=
5645 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) {
5646
5647 DP(NETIF_MSG_LINK, "Setting 1G clause37\n");
5648 bnx2x_cl45_write(bp, phy,
5649 MDIO_AN_DEVAD, MDIO_AN_REG_8727_MISC_CTRL, 0);
5650 bnx2x_cl45_write(bp, phy,
5651 MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1300);
5652 } else {
5653 /*
5654 * Since the 8727 has only single reset pin, need to set the 10G
5655 * registers although it is default
5656 */
5657 bnx2x_cl45_write(bp, phy,
5658 MDIO_AN_DEVAD, MDIO_AN_REG_8727_MISC_CTRL,
5659 0x0020);
5660 bnx2x_cl45_write(bp, phy,
5661 MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x0100);
5662 bnx2x_cl45_write(bp, phy,
5663 MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x2040);
5664 bnx2x_cl45_write(bp, phy,
5665 MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2,
5666 0x0008);
5667 }
4557 5668
4558 } else { /* SerDes */ 5669 /*
4559 5670 * Set 2-wire transfer rate of SFP+ module EEPROM
4560 ext_phy_type = SERDES_EXT_PHY_TYPE(params->ext_phy_config); 5671 * to 100Khz since some DACs(direct attached cables) do
4561 switch (ext_phy_type) { 5672 * not work at 400Khz.
4562 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT: 5673 */
4563 DP(NETIF_MSG_LINK, "SerDes Direct\n"); 5674 bnx2x_cl45_write(bp, phy,
4564 break; 5675 MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_TWO_WIRE_SLAVE_ADDR,
4565 5676 0xa001);
4566 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482: 5677
4567 DP(NETIF_MSG_LINK, "SerDes 5482\n"); 5678 /* Set TX PreEmphasis if needed */
4568 break; 5679 if ((params->feature_config_flags &
5680 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED)) {
5681 DP(NETIF_MSG_LINK, "Setting TX_CTRL1 0x%x, TX_CTRL2 0x%x\n",
5682 phy->tx_preemphasis[0],
5683 phy->tx_preemphasis[1]);
5684 bnx2x_cl45_write(bp, phy,
5685 MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_TX_CTRL1,
5686 phy->tx_preemphasis[0]);
5687
5688 bnx2x_cl45_write(bp, phy,
5689 MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_TX_CTRL2,
5690 phy->tx_preemphasis[1]);
5691 }
4569 5692
4570 default: 5693 /*
4571 DP(NETIF_MSG_LINK, "BAD SerDes ext_phy_config 0x%x\n", 5694 * If TX Laser is controlled by GPIO_0, do not let PHY go into low
4572 params->ext_phy_config); 5695 * power mode, if TX Laser is disabled
4573 break; 5696 */
4574 } 5697 tx_en_mode = REG_RD(bp, params->shmem_base +
5698 offsetof(struct shmem_region,
5699 dev_info.port_hw_config[params->port].sfp_ctrl))
5700 & PORT_HW_CFG_TX_LASER_MASK;
5701
5702 if (tx_en_mode == PORT_HW_CFG_TX_LASER_GPIO0) {
5703
5704 DP(NETIF_MSG_LINK, "Enabling TXONOFF_PWRDN_DIS\n");
5705 bnx2x_cl45_read(bp, phy,
5706 MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_OPT_CFG_REG, &tmp2);
5707 tmp2 |= 0x1000;
5708 tmp2 &= 0xFFEF;
5709 bnx2x_cl45_write(bp, phy,
5710 MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_OPT_CFG_REG, tmp2);
4575 } 5711 }
4576 return rc; 5712
5713 return 0;
4577} 5714}
4578 5715
4579static void bnx2x_8727_handle_mod_abs(struct link_params *params) 5716static void bnx2x_8727_handle_mod_abs(struct bnx2x_phy *phy,
5717 struct link_params *params)
4580{ 5718{
4581 struct bnx2x *bp = params->bp; 5719 struct bnx2x *bp = params->bp;
4582 u16 mod_abs, rx_alarm_status; 5720 u16 mod_abs, rx_alarm_status;
4583 u8 ext_phy_addr = XGXS_EXT_PHY_ADDR(params->ext_phy_config);
4584 u32 val = REG_RD(bp, params->shmem_base + 5721 u32 val = REG_RD(bp, params->shmem_base +
4585 offsetof(struct shmem_region, dev_info. 5722 offsetof(struct shmem_region, dev_info.
4586 port_feature_config[params->port]. 5723 port_feature_config[params->port].
4587 config)); 5724 config));
4588 bnx2x_cl45_read(bp, params->port, 5725 bnx2x_cl45_read(bp, phy,
4589 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727, 5726 MDIO_PMA_DEVAD,
4590 ext_phy_addr, 5727 MDIO_PMA_REG_PHY_IDENTIFIER, &mod_abs);
4591 MDIO_PMA_DEVAD,
4592 MDIO_PMA_REG_PHY_IDENTIFIER, &mod_abs);
4593 if (mod_abs & (1<<8)) { 5728 if (mod_abs & (1<<8)) {
4594 5729
4595 /* Module is absent */ 5730 /* Module is absent */
4596 DP(NETIF_MSG_LINK, "MOD_ABS indication " 5731 DP(NETIF_MSG_LINK, "MOD_ABS indication "
4597 "show module is absent\n"); 5732 "show module is absent\n");
4598 5733
4599 /* 1. Set mod_abs to detect next module 5734 /*
4600 presence event 5735 * 1. Set mod_abs to detect next module
4601 2. Set EDC off by setting OPTXLOS signal input to low 5736 * presence event
4602 (bit 9). 5737 * 2. Set EDC off by setting OPTXLOS signal input to low
4603 When the EDC is off it locks onto a reference clock and 5738 * (bit 9).
4604 avoids becoming 'lost'.*/ 5739 * When the EDC is off it locks onto a reference clock and
4605 mod_abs &= ~((1<<8)|(1<<9)); 5740 * avoids becoming 'lost'.
4606 bnx2x_cl45_write(bp, params->port, 5741 */
4607 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727, 5742 mod_abs &= ~(1<<8);
4608 ext_phy_addr, 5743 if (!(phy->flags & FLAGS_NOC))
4609 MDIO_PMA_DEVAD, 5744 mod_abs &= ~(1<<9);
4610 MDIO_PMA_REG_PHY_IDENTIFIER, mod_abs); 5745 bnx2x_cl45_write(bp, phy,
4611 5746 MDIO_PMA_DEVAD,
4612 /* Clear RX alarm since it stays up as long as 5747 MDIO_PMA_REG_PHY_IDENTIFIER, mod_abs);
4613 the mod_abs wasn't changed */ 5748
4614 bnx2x_cl45_read(bp, params->port, 5749 /*
4615 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727, 5750 * Clear RX alarm since it stays up as long as
4616 ext_phy_addr, 5751 * the mod_abs wasn't changed
4617 MDIO_PMA_DEVAD, 5752 */
4618 MDIO_PMA_REG_RX_ALARM, &rx_alarm_status); 5753 bnx2x_cl45_read(bp, phy,
5754 MDIO_PMA_DEVAD,
5755 MDIO_PMA_REG_RX_ALARM, &rx_alarm_status);
4619 5756
4620 } else { 5757 } else {
4621 /* Module is present */ 5758 /* Module is present */
4622 DP(NETIF_MSG_LINK, "MOD_ABS indication " 5759 DP(NETIF_MSG_LINK, "MOD_ABS indication "
4623 "show module is present\n"); 5760 "show module is present\n");
4624 /* First thing, disable transmitter, 5761 /*
4625 and if the module is ok, the 5762 * First disable transmitter, and if the module is ok, the
4626 module_detection will enable it*/ 5763 * module_detection will enable it
4627 5764 * 1. Set mod_abs to detect next module absent event ( bit 8)
4628 /* 1. Set mod_abs to detect next module 5765 * 2. Restore the default polarity of the OPRXLOS signal and
4629 absent event ( bit 8) 5766 * this signal will then correctly indicate the presence or
4630 2. Restore the default polarity of the OPRXLOS signal and 5767 * absence of the Rx signal. (bit 9)
4631 this signal will then correctly indicate the presence or 5768 */
4632 absence of the Rx signal. (bit 9) */ 5769 mod_abs |= (1<<8);
4633 mod_abs |= ((1<<8)|(1<<9)); 5770 if (!(phy->flags & FLAGS_NOC))
4634 bnx2x_cl45_write(bp, params->port, 5771 mod_abs |= (1<<9);
4635 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727, 5772 bnx2x_cl45_write(bp, phy,
4636 ext_phy_addr, 5773 MDIO_PMA_DEVAD,
4637 MDIO_PMA_DEVAD, 5774 MDIO_PMA_REG_PHY_IDENTIFIER, mod_abs);
4638 MDIO_PMA_REG_PHY_IDENTIFIER, mod_abs); 5775
4639 5776 /*
4640 /* Clear RX alarm since it stays up as long as 5777 * Clear RX alarm since it stays up as long as the mod_abs
4641 the mod_abs wasn't changed. This is need to be done 5778 * wasn't changed. This is need to be done before calling the
4642 before calling the module detection, otherwise it will clear 5779 * module detection, otherwise it will clear* the link update
4643 the link update alarm */ 5780 * alarm
4644 bnx2x_cl45_read(bp, params->port, 5781 */
4645 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727, 5782 bnx2x_cl45_read(bp, phy,
4646 ext_phy_addr, 5783 MDIO_PMA_DEVAD,
4647 MDIO_PMA_DEVAD, 5784 MDIO_PMA_REG_RX_ALARM, &rx_alarm_status);
4648 MDIO_PMA_REG_RX_ALARM, &rx_alarm_status);
4649 5785
4650 5786
4651 if ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) == 5787 if ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) ==
4652 PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER) 5788 PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER)
4653 bnx2x_sfp_set_transmitter(bp, params->port, 5789 bnx2x_sfp_set_transmitter(params, phy, 0);
4654 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727,
4655 ext_phy_addr, 0);
4656 5790
4657 if (bnx2x_wait_for_sfp_module_initialized(params) 5791 if (bnx2x_wait_for_sfp_module_initialized(phy, params) == 0)
4658 == 0) 5792 bnx2x_sfp_module_detection(phy, params);
4659 bnx2x_sfp_module_detection(params);
4660 else 5793 else
4661 DP(NETIF_MSG_LINK, "SFP+ module is not initialized\n"); 5794 DP(NETIF_MSG_LINK, "SFP+ module is not initialized\n");
4662 } 5795 }
4663 5796
4664 DP(NETIF_MSG_LINK, "8727 RX_ALARM_STATUS 0x%x\n", 5797 DP(NETIF_MSG_LINK, "8727 RX_ALARM_STATUS 0x%x\n",
4665 rx_alarm_status); 5798 rx_alarm_status);
4666 /* No need to check link status in case of 5799 /* No need to check link status in case of module plugged in/out */
4667 module plugged in/out */
4668} 5800}
4669 5801
5802static u8 bnx2x_8727_read_status(struct bnx2x_phy *phy,
5803 struct link_params *params,
5804 struct link_vars *vars)
4670 5805
4671static u8 bnx2x_ext_phy_is_link_up(struct link_params *params,
4672 struct link_vars *vars,
4673 u8 is_mi_int)
4674{ 5806{
4675 struct bnx2x *bp = params->bp; 5807 struct bnx2x *bp = params->bp;
4676 u32 ext_phy_type; 5808 u8 link_up = 0;
4677 u8 ext_phy_addr; 5809 u16 link_status = 0;
4678 u16 val1 = 0, val2; 5810 u16 rx_alarm_status, lasi_ctrl, val1;
4679 u16 rx_sd, pcs_status; 5811
4680 u8 ext_phy_link_up = 0; 5812 /* If PHY is not initialized, do not check link status */
4681 u8 port = params->port; 5813 bnx2x_cl45_read(bp, phy,
5814 MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_CTRL,
5815 &lasi_ctrl);
5816 if (!lasi_ctrl)
5817 return 0;
4682 5818
4683 if (vars->phy_flags & PHY_XGXS_FLAG) { 5819 /* Check the LASI */
4684 ext_phy_addr = XGXS_EXT_PHY_ADDR(params->ext_phy_config); 5820 bnx2x_cl45_read(bp, phy,
4685 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config); 5821 MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_ALARM,
4686 switch (ext_phy_type) { 5822 &rx_alarm_status);
4687 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT: 5823 vars->line_speed = 0;
4688 DP(NETIF_MSG_LINK, "XGXS Direct\n"); 5824 DP(NETIF_MSG_LINK, "8727 RX_ALARM_STATUS 0x%x\n", rx_alarm_status);
4689 ext_phy_link_up = 1;
4690 break;
4691 5825
4692 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705: 5826 bnx2x_cl45_read(bp, phy,
4693 DP(NETIF_MSG_LINK, "XGXS 8705\n"); 5827 MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_STATUS, &val1);
4694 bnx2x_cl45_read(bp, params->port, ext_phy_type,
4695 ext_phy_addr,
4696 MDIO_WIS_DEVAD,
4697 MDIO_WIS_REG_LASI_STATUS, &val1);
4698 DP(NETIF_MSG_LINK, "8705 LASI status 0x%x\n", val1);
4699
4700 bnx2x_cl45_read(bp, params->port, ext_phy_type,
4701 ext_phy_addr,
4702 MDIO_WIS_DEVAD,
4703 MDIO_WIS_REG_LASI_STATUS, &val1);
4704 DP(NETIF_MSG_LINK, "8705 LASI status 0x%x\n", val1);
4705
4706 bnx2x_cl45_read(bp, params->port, ext_phy_type,
4707 ext_phy_addr,
4708 MDIO_PMA_DEVAD,
4709 MDIO_PMA_REG_RX_SD, &rx_sd);
4710
4711 bnx2x_cl45_read(bp, params->port, ext_phy_type,
4712 ext_phy_addr,
4713 1,
4714 0xc809, &val1);
4715 bnx2x_cl45_read(bp, params->port, ext_phy_type,
4716 ext_phy_addr,
4717 1,
4718 0xc809, &val1);
4719
4720 DP(NETIF_MSG_LINK, "8705 1.c809 val=0x%x\n", val1);
4721 ext_phy_link_up = ((rx_sd & 0x1) && (val1 & (1<<9)) &&
4722 ((val1 & (1<<8)) == 0));
4723 if (ext_phy_link_up)
4724 vars->line_speed = SPEED_10000;
4725 break;
4726 5828
4727 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706: 5829 DP(NETIF_MSG_LINK, "8727 LASI status 0x%x\n", val1);
4728 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
4729 DP(NETIF_MSG_LINK, "XGXS 8706/8726\n");
4730 /* Clear RX Alarm*/
4731 bnx2x_cl45_read(bp, params->port, ext_phy_type,
4732 ext_phy_addr,
4733 MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_ALARM,
4734 &val2);
4735 /* clear LASI indication*/
4736 bnx2x_cl45_read(bp, params->port, ext_phy_type,
4737 ext_phy_addr,
4738 MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_STATUS,
4739 &val1);
4740 bnx2x_cl45_read(bp, params->port, ext_phy_type,
4741 ext_phy_addr,
4742 MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_STATUS,
4743 &val2);
4744 DP(NETIF_MSG_LINK, "8706/8726 LASI status 0x%x-->"
4745 "0x%x\n", val1, val2);
4746
4747 bnx2x_cl45_read(bp, params->port, ext_phy_type,
4748 ext_phy_addr,
4749 MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_SD,
4750 &rx_sd);
4751 bnx2x_cl45_read(bp, params->port, ext_phy_type,
4752 ext_phy_addr,
4753 MDIO_PCS_DEVAD, MDIO_PCS_REG_STATUS,
4754 &pcs_status);
4755 bnx2x_cl45_read(bp, params->port, ext_phy_type,
4756 ext_phy_addr,
4757 MDIO_AN_DEVAD, MDIO_AN_REG_LINK_STATUS,
4758 &val2);
4759 bnx2x_cl45_read(bp, params->port, ext_phy_type,
4760 ext_phy_addr,
4761 MDIO_AN_DEVAD, MDIO_AN_REG_LINK_STATUS,
4762 &val2);
4763
4764 DP(NETIF_MSG_LINK, "8706/8726 rx_sd 0x%x"
4765 " pcs_status 0x%x 1Gbps link_status 0x%x\n",
4766 rx_sd, pcs_status, val2);
4767 /* link is up if both bit 0 of pmd_rx_sd and
4768 * bit 0 of pcs_status are set, or if the autoneg bit
4769 1 is set
4770 */
4771 ext_phy_link_up = ((rx_sd & pcs_status & 0x1) ||
4772 (val2 & (1<<1)));
4773 if (ext_phy_link_up) {
4774 if (ext_phy_type ==
4775 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726) {
4776 /* If transmitter is disabled,
4777 ignore false link up indication */
4778 bnx2x_cl45_read(bp, params->port,
4779 ext_phy_type,
4780 ext_phy_addr,
4781 MDIO_PMA_DEVAD,
4782 MDIO_PMA_REG_PHY_IDENTIFIER,
4783 &val1);
4784 if (val1 & (1<<15)) {
4785 DP(NETIF_MSG_LINK, "Tx is "
4786 "disabled\n");
4787 ext_phy_link_up = 0;
4788 break;
4789 }
4790 }
4791 if (val2 & (1<<1))
4792 vars->line_speed = SPEED_1000;
4793 else
4794 vars->line_speed = SPEED_10000;
4795 }
4796 break;
4797 5830
4798 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727: 5831 /* Clear MSG-OUT */
4799 { 5832 bnx2x_cl45_read(bp, phy,
4800 u16 link_status = 0; 5833 MDIO_PMA_DEVAD, MDIO_PMA_REG_M8051_MSGOUT_REG, &val1);
4801 u16 rx_alarm_status;
4802 /* Check the LASI */
4803 bnx2x_cl45_read(bp, params->port,
4804 ext_phy_type,
4805 ext_phy_addr,
4806 MDIO_PMA_DEVAD,
4807 MDIO_PMA_REG_RX_ALARM, &rx_alarm_status);
4808
4809 DP(NETIF_MSG_LINK, "8727 RX_ALARM_STATUS 0x%x\n",
4810 rx_alarm_status);
4811
4812 bnx2x_cl45_read(bp, params->port,
4813 ext_phy_type,
4814 ext_phy_addr,
4815 MDIO_PMA_DEVAD,
4816 MDIO_PMA_REG_LASI_STATUS, &val1);
4817 5834
4818 DP(NETIF_MSG_LINK, 5835 /*
4819 "8727 LASI status 0x%x\n", 5836 * If a module is present and there is need to check
4820 val1); 5837 * for over current
5838 */
5839 if (!(phy->flags & FLAGS_NOC) && !(rx_alarm_status & (1<<5))) {
5840 /* Check over-current using 8727 GPIO0 input*/
5841 bnx2x_cl45_read(bp, phy,
5842 MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_GPIO_CTRL,
5843 &val1);
5844
5845 if ((val1 & (1<<8)) == 0) {
5846 DP(NETIF_MSG_LINK, "8727 Power fault has been detected"
5847 " on port %d\n", params->port);
5848 netdev_err(bp->dev, "Error: Power fault on Port %d has"
5849 " been detected and the power to "
5850 "that SFP+ module has been removed"
5851 " to prevent failure of the card."
5852 " Please remove the SFP+ module and"
5853 " restart the system to clear this"
5854 " error.\n",
5855 params->port);
5856 /* Disable all RX_ALARMs except for mod_abs */
5857 bnx2x_cl45_write(bp, phy,
5858 MDIO_PMA_DEVAD,
5859 MDIO_PMA_REG_RX_ALARM_CTRL, (1<<5));
4821 5860
4822 /* Clear MSG-OUT */ 5861 bnx2x_cl45_read(bp, phy,
4823 bnx2x_cl45_read(bp, params->port, 5862 MDIO_PMA_DEVAD,
4824 ext_phy_type, 5863 MDIO_PMA_REG_PHY_IDENTIFIER, &val1);
4825 ext_phy_addr, 5864 /* Wait for module_absent_event */
4826 MDIO_PMA_DEVAD, 5865 val1 |= (1<<8);
4827 MDIO_PMA_REG_M8051_MSGOUT_REG, 5866 bnx2x_cl45_write(bp, phy,
4828 &val1); 5867 MDIO_PMA_DEVAD,
5868 MDIO_PMA_REG_PHY_IDENTIFIER, val1);
5869 /* Clear RX alarm */
5870 bnx2x_cl45_read(bp, phy,
5871 MDIO_PMA_DEVAD,
5872 MDIO_PMA_REG_RX_ALARM, &rx_alarm_status);
5873 return 0;
5874 }
5875 } /* Over current check */
5876
5877 /* When module absent bit is set, check module */
5878 if (rx_alarm_status & (1<<5)) {
5879 bnx2x_8727_handle_mod_abs(phy, params);
5880 /* Enable all mod_abs and link detection bits */
5881 bnx2x_cl45_write(bp, phy,
5882 MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_ALARM_CTRL,
5883 ((1<<5) | (1<<2)));
5884 }
5885 DP(NETIF_MSG_LINK, "Enabling 8727 TX laser if SFP is approved\n");
5886 bnx2x_8727_specific_func(phy, params, ENABLE_TX);
5887 /* If transmitter is disabled, ignore false link up indication */
5888 bnx2x_cl45_read(bp, phy,
5889 MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER, &val1);
5890 if (val1 & (1<<15)) {
5891 DP(NETIF_MSG_LINK, "Tx is disabled\n");
5892 return 0;
5893 }
4829 5894
4830 /* 5895 bnx2x_cl45_read(bp, phy,
4831 * If a module is present and there is need to check 5896 MDIO_PMA_DEVAD,
4832 * for over current 5897 MDIO_PMA_REG_8073_SPEED_LINK_STATUS, &link_status);
4833 */
4834 if (!(params->feature_config_flags &
4835 FEATURE_CONFIG_BCM8727_NOC) &&
4836 !(rx_alarm_status & (1<<5))) {
4837 /* Check over-current using 8727 GPIO0 input*/
4838 bnx2x_cl45_read(bp, params->port,
4839 ext_phy_type,
4840 ext_phy_addr,
4841 MDIO_PMA_DEVAD,
4842 MDIO_PMA_REG_8727_GPIO_CTRL,
4843 &val1);
4844
4845 if ((val1 & (1<<8)) == 0) {
4846 DP(NETIF_MSG_LINK, "8727 Power fault"
4847 " has been detected on "
4848 "port %d\n",
4849 params->port);
4850 netdev_err(bp->dev, "Error: Power fault on Port %d has been detected and the power to that SFP+ module has been removed to prevent failure of the card. Please remove the SFP+ module and restart the system to clear this error.\n",
4851 params->port);
4852 /*
4853 * Disable all RX_ALARMs except for
4854 * mod_abs
4855 */
4856 bnx2x_cl45_write(bp, params->port,
4857 ext_phy_type,
4858 ext_phy_addr,
4859 MDIO_PMA_DEVAD,
4860 MDIO_PMA_REG_RX_ALARM_CTRL,
4861 (1<<5));
4862
4863 bnx2x_cl45_read(bp, params->port,
4864 ext_phy_type,
4865 ext_phy_addr,
4866 MDIO_PMA_DEVAD,
4867 MDIO_PMA_REG_PHY_IDENTIFIER,
4868 &val1);
4869 /* Wait for module_absent_event */
4870 val1 |= (1<<8);
4871 bnx2x_cl45_write(bp, params->port,
4872 ext_phy_type,
4873 ext_phy_addr,
4874 MDIO_PMA_DEVAD,
4875 MDIO_PMA_REG_PHY_IDENTIFIER,
4876 val1);
4877 /* Clear RX alarm */
4878 bnx2x_cl45_read(bp, params->port,
4879 ext_phy_type,
4880 ext_phy_addr,
4881 MDIO_PMA_DEVAD,
4882 MDIO_PMA_REG_RX_ALARM,
4883 &rx_alarm_status);
4884 break;
4885 }
4886 } /* Over current check */
4887
4888 /* When module absent bit is set, check module */
4889 if (rx_alarm_status & (1<<5)) {
4890 bnx2x_8727_handle_mod_abs(params);
4891 /* Enable all mod_abs and link detection bits */
4892 bnx2x_cl45_write(bp, params->port,
4893 ext_phy_type,
4894 ext_phy_addr,
4895 MDIO_PMA_DEVAD,
4896 MDIO_PMA_REG_RX_ALARM_CTRL,
4897 ((1<<5) | (1<<2)));
4898 }
4899 5898
4900 /* If transmitter is disabled, 5899 /*
4901 ignore false link up indication */ 5900 * Bits 0..2 --> speed detected,
4902 bnx2x_cl45_read(bp, params->port, 5901 * Bits 13..15--> link is down
4903 ext_phy_type, 5902 */
4904 ext_phy_addr, 5903 if ((link_status & (1<<2)) && (!(link_status & (1<<15)))) {
4905 MDIO_PMA_DEVAD, 5904 link_up = 1;
4906 MDIO_PMA_REG_PHY_IDENTIFIER, 5905 vars->line_speed = SPEED_10000;
4907 &val1); 5906 DP(NETIF_MSG_LINK, "port %x: External link up in 10G\n",
4908 if (val1 & (1<<15)) { 5907 params->port);
4909 DP(NETIF_MSG_LINK, "Tx is disabled\n"); 5908 } else if ((link_status & (1<<0)) && (!(link_status & (1<<13)))) {
4910 ext_phy_link_up = 0; 5909 link_up = 1;
4911 break; 5910 vars->line_speed = SPEED_1000;
4912 } 5911 DP(NETIF_MSG_LINK, "port %x: External link up in 1G\n",
5912 params->port);
5913 } else {
5914 link_up = 0;
5915 DP(NETIF_MSG_LINK, "port %x: External link is down\n",
5916 params->port);
5917 }
5918 if (link_up) {
5919 bnx2x_ext_phy_resolve_fc(phy, params, vars);
5920 vars->duplex = DUPLEX_FULL;
5921 DP(NETIF_MSG_LINK, "duplex = 0x%x\n", vars->duplex);
5922 }
4913 5923
4914 bnx2x_cl45_read(bp, params->port, 5924 if ((DUAL_MEDIA(params)) &&
4915 ext_phy_type, 5925 (phy->req_line_speed == SPEED_1000)) {
4916 ext_phy_addr, 5926 bnx2x_cl45_read(bp, phy,
4917 MDIO_PMA_DEVAD, 5927 MDIO_PMA_DEVAD,
4918 MDIO_PMA_REG_8073_SPEED_LINK_STATUS, 5928 MDIO_PMA_REG_8727_PCS_GP, &val1);
4919 &link_status); 5929 /*
4920 5930 * In case of dual-media board and 1G, power up the XAUI side,
4921 /* Bits 0..2 --> speed detected, 5931 * otherwise power it down. For 10G it is done automatically
4922 bits 13..15--> link is down */ 5932 */
4923 if ((link_status & (1<<2)) && 5933 if (link_up)
4924 (!(link_status & (1<<15)))) { 5934 val1 &= ~(3<<10);
4925 ext_phy_link_up = 1; 5935 else
4926 vars->line_speed = SPEED_10000; 5936 val1 |= (3<<10);
4927 } else if ((link_status & (1<<0)) && 5937 bnx2x_cl45_write(bp, phy,
4928 (!(link_status & (1<<13)))) { 5938 MDIO_PMA_DEVAD,
4929 ext_phy_link_up = 1; 5939 MDIO_PMA_REG_8727_PCS_GP, val1);
4930 vars->line_speed = SPEED_1000; 5940 }
4931 DP(NETIF_MSG_LINK, 5941 return link_up;
4932 "port %x: External link" 5942}
4933 " up in 1G\n", params->port);
4934 } else {
4935 ext_phy_link_up = 0;
4936 DP(NETIF_MSG_LINK,
4937 "port %x: External link"
4938 " is down\n", params->port);
4939 }
4940 break;
4941 }
4942 5943
4943 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072: 5944static void bnx2x_8727_link_reset(struct bnx2x_phy *phy,
4944 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073: 5945 struct link_params *params)
4945 { 5946{
4946 u16 link_status = 0; 5947 struct bnx2x *bp = params->bp;
4947 u16 an1000_status = 0; 5948 /* Disable Transmitter */
4948 5949 bnx2x_sfp_set_transmitter(params, phy, 0);
4949 if (ext_phy_type == 5950 /* Clear LASI */
4950 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) { 5951 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_CTRL, 0);
4951 bnx2x_cl45_read(bp, params->port,
4952 ext_phy_type,
4953 ext_phy_addr,
4954 MDIO_PCS_DEVAD,
4955 MDIO_PCS_REG_LASI_STATUS, &val1);
4956 bnx2x_cl45_read(bp, params->port,
4957 ext_phy_type,
4958 ext_phy_addr,
4959 MDIO_PCS_DEVAD,
4960 MDIO_PCS_REG_LASI_STATUS, &val2);
4961 DP(NETIF_MSG_LINK,
4962 "870x LASI status 0x%x->0x%x\n",
4963 val1, val2);
4964 } else {
4965 /* In 8073, port1 is directed through emac0 and
4966 * port0 is directed through emac1
4967 */
4968 bnx2x_cl45_read(bp, params->port,
4969 ext_phy_type,
4970 ext_phy_addr,
4971 MDIO_PMA_DEVAD,
4972 MDIO_PMA_REG_LASI_STATUS, &val1);
4973
4974 DP(NETIF_MSG_LINK,
4975 "8703 LASI status 0x%x\n",
4976 val1);
4977 }
4978 5952
4979 /* clear the interrupt LASI status register */ 5953}
4980 bnx2x_cl45_read(bp, params->port,
4981 ext_phy_type,
4982 ext_phy_addr,
4983 MDIO_PCS_DEVAD,
4984 MDIO_PCS_REG_STATUS, &val2);
4985 bnx2x_cl45_read(bp, params->port,
4986 ext_phy_type,
4987 ext_phy_addr,
4988 MDIO_PCS_DEVAD,
4989 MDIO_PCS_REG_STATUS, &val1);
4990 DP(NETIF_MSG_LINK, "807x PCS status 0x%x->0x%x\n",
4991 val2, val1);
4992 /* Clear MSG-OUT */
4993 bnx2x_cl45_read(bp, params->port,
4994 ext_phy_type,
4995 ext_phy_addr,
4996 MDIO_PMA_DEVAD,
4997 MDIO_PMA_REG_M8051_MSGOUT_REG,
4998 &val1);
4999
5000 /* Check the LASI */
5001 bnx2x_cl45_read(bp, params->port,
5002 ext_phy_type,
5003 ext_phy_addr,
5004 MDIO_PMA_DEVAD,
5005 MDIO_PMA_REG_RX_ALARM, &val2);
5006
5007 DP(NETIF_MSG_LINK, "KR 0x9003 0x%x\n", val2);
5008
5009 /* Check the link status */
5010 bnx2x_cl45_read(bp, params->port,
5011 ext_phy_type,
5012 ext_phy_addr,
5013 MDIO_PCS_DEVAD,
5014 MDIO_PCS_REG_STATUS, &val2);
5015 DP(NETIF_MSG_LINK, "KR PCS status 0x%x\n", val2);
5016
5017 bnx2x_cl45_read(bp, params->port,
5018 ext_phy_type,
5019 ext_phy_addr,
5020 MDIO_PMA_DEVAD,
5021 MDIO_PMA_REG_STATUS, &val2);
5022 bnx2x_cl45_read(bp, params->port,
5023 ext_phy_type,
5024 ext_phy_addr,
5025 MDIO_PMA_DEVAD,
5026 MDIO_PMA_REG_STATUS, &val1);
5027 ext_phy_link_up = ((val1 & 4) == 4);
5028 DP(NETIF_MSG_LINK, "PMA_REG_STATUS=0x%x\n", val1);
5029 if (ext_phy_type ==
5030 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073) {
5031
5032 if (ext_phy_link_up &&
5033 ((params->req_line_speed !=
5034 SPEED_10000))) {
5035 if (bnx2x_bcm8073_xaui_wa(params)
5036 != 0) {
5037 ext_phy_link_up = 0;
5038 break;
5039 }
5040 }
5041 bnx2x_cl45_read(bp, params->port,
5042 ext_phy_type,
5043 ext_phy_addr,
5044 MDIO_AN_DEVAD,
5045 MDIO_AN_REG_LINK_STATUS,
5046 &an1000_status);
5047 bnx2x_cl45_read(bp, params->port,
5048 ext_phy_type,
5049 ext_phy_addr,
5050 MDIO_AN_DEVAD,
5051 MDIO_AN_REG_LINK_STATUS,
5052 &an1000_status);
5053
5054 /* Check the link status on 1.1.2 */
5055 bnx2x_cl45_read(bp, params->port,
5056 ext_phy_type,
5057 ext_phy_addr,
5058 MDIO_PMA_DEVAD,
5059 MDIO_PMA_REG_STATUS, &val2);
5060 bnx2x_cl45_read(bp, params->port,
5061 ext_phy_type,
5062 ext_phy_addr,
5063 MDIO_PMA_DEVAD,
5064 MDIO_PMA_REG_STATUS, &val1);
5065 DP(NETIF_MSG_LINK, "KR PMA status 0x%x->0x%x,"
5066 "an_link_status=0x%x\n",
5067 val2, val1, an1000_status);
5068
5069 ext_phy_link_up = (((val1 & 4) == 4) ||
5070 (an1000_status & (1<<1)));
5071 if (ext_phy_link_up &&
5072 bnx2x_8073_is_snr_needed(params)) {
5073 /* The SNR will improve about 2dbby
5074 changing the BW and FEE main tap.*/
5075
5076 /* The 1st write to change FFE main
5077 tap is set before restart AN */
5078 /* Change PLL Bandwidth in EDC
5079 register */
5080 bnx2x_cl45_write(bp, port, ext_phy_type,
5081 ext_phy_addr,
5082 MDIO_PMA_DEVAD,
5083 MDIO_PMA_REG_PLL_BANDWIDTH,
5084 0x26BC);
5085
5086 /* Change CDR Bandwidth in EDC
5087 register */
5088 bnx2x_cl45_write(bp, port, ext_phy_type,
5089 ext_phy_addr,
5090 MDIO_PMA_DEVAD,
5091 MDIO_PMA_REG_CDR_BANDWIDTH,
5092 0x0333);
5093 }
5094 bnx2x_cl45_read(bp, params->port,
5095 ext_phy_type,
5096 ext_phy_addr,
5097 MDIO_PMA_DEVAD,
5098 MDIO_PMA_REG_8073_SPEED_LINK_STATUS,
5099 &link_status);
5100
5101 /* Bits 0..2 --> speed detected,
5102 bits 13..15--> link is down */
5103 if ((link_status & (1<<2)) &&
5104 (!(link_status & (1<<15)))) {
5105 ext_phy_link_up = 1;
5106 vars->line_speed = SPEED_10000;
5107 DP(NETIF_MSG_LINK,
5108 "port %x: External link"
5109 " up in 10G\n", params->port);
5110 } else if ((link_status & (1<<1)) &&
5111 (!(link_status & (1<<14)))) {
5112 ext_phy_link_up = 1;
5113 vars->line_speed = SPEED_2500;
5114 DP(NETIF_MSG_LINK,
5115 "port %x: External link"
5116 " up in 2.5G\n", params->port);
5117 } else if ((link_status & (1<<0)) &&
5118 (!(link_status & (1<<13)))) {
5119 ext_phy_link_up = 1;
5120 vars->line_speed = SPEED_1000;
5121 DP(NETIF_MSG_LINK,
5122 "port %x: External link"
5123 " up in 1G\n", params->port);
5124 } else {
5125 ext_phy_link_up = 0;
5126 DP(NETIF_MSG_LINK,
5127 "port %x: External link"
5128 " is down\n", params->port);
5129 }
5130 } else {
5131 /* See if 1G link is up for the 8072 */
5132 bnx2x_cl45_read(bp, params->port,
5133 ext_phy_type,
5134 ext_phy_addr,
5135 MDIO_AN_DEVAD,
5136 MDIO_AN_REG_LINK_STATUS,
5137 &an1000_status);
5138 bnx2x_cl45_read(bp, params->port,
5139 ext_phy_type,
5140 ext_phy_addr,
5141 MDIO_AN_DEVAD,
5142 MDIO_AN_REG_LINK_STATUS,
5143 &an1000_status);
5144 if (an1000_status & (1<<1)) {
5145 ext_phy_link_up = 1;
5146 vars->line_speed = SPEED_1000;
5147 DP(NETIF_MSG_LINK,
5148 "port %x: External link"
5149 " up in 1G\n", params->port);
5150 } else if (ext_phy_link_up) {
5151 ext_phy_link_up = 1;
5152 vars->line_speed = SPEED_10000;
5153 DP(NETIF_MSG_LINK,
5154 "port %x: External link"
5155 " up in 10G\n", params->port);
5156 }
5157 }
5158 5954
5955/******************************************************************/
5956/* BCM8481/BCM84823/BCM84833 PHY SECTION */
5957/******************************************************************/
5958static void bnx2x_save_848xx_spirom_version(struct bnx2x_phy *phy,
5959 struct link_params *params)
5960{
5961 u16 val, fw_ver1, fw_ver2, cnt, adj;
5962 struct bnx2x *bp = params->bp;
5159 5963
5160 break; 5964 adj = 0;
5161 } 5965 if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
5162 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101: 5966 adj = -1;
5163 bnx2x_cl45_read(bp, params->port, ext_phy_type,
5164 ext_phy_addr,
5165 MDIO_PMA_DEVAD,
5166 MDIO_PMA_REG_LASI_STATUS, &val2);
5167 bnx2x_cl45_read(bp, params->port, ext_phy_type,
5168 ext_phy_addr,
5169 MDIO_PMA_DEVAD,
5170 MDIO_PMA_REG_LASI_STATUS, &val1);
5171 DP(NETIF_MSG_LINK,
5172 "10G-base-T LASI status 0x%x->0x%x\n",
5173 val2, val1);
5174 bnx2x_cl45_read(bp, params->port, ext_phy_type,
5175 ext_phy_addr,
5176 MDIO_PMA_DEVAD,
5177 MDIO_PMA_REG_STATUS, &val2);
5178 bnx2x_cl45_read(bp, params->port, ext_phy_type,
5179 ext_phy_addr,
5180 MDIO_PMA_DEVAD,
5181 MDIO_PMA_REG_STATUS, &val1);
5182 DP(NETIF_MSG_LINK,
5183 "10G-base-T PMA status 0x%x->0x%x\n",
5184 val2, val1);
5185 ext_phy_link_up = ((val1 & 4) == 4);
5186 /* if link is up
5187 * print the AN outcome of the SFX7101 PHY
5188 */
5189 if (ext_phy_link_up) {
5190 bnx2x_cl45_read(bp, params->port,
5191 ext_phy_type,
5192 ext_phy_addr,
5193 MDIO_AN_DEVAD,
5194 MDIO_AN_REG_MASTER_STATUS,
5195 &val2);
5196 vars->line_speed = SPEED_10000;
5197 DP(NETIF_MSG_LINK,
5198 "SFX7101 AN status 0x%x->Master=%x\n",
5199 val2,
5200 (val2 & (1<<14)));
5201 }
5202 break;
5203 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
5204 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823:
5205 /* Check 10G-BaseT link status */
5206 /* Check PMD signal ok */
5207 bnx2x_cl45_read(bp, params->port, ext_phy_type,
5208 ext_phy_addr,
5209 MDIO_AN_DEVAD,
5210 0xFFFA,
5211 &val1);
5212 bnx2x_cl45_read(bp, params->port, ext_phy_type,
5213 ext_phy_addr,
5214 MDIO_PMA_DEVAD,
5215 MDIO_PMA_REG_8481_PMD_SIGNAL,
5216 &val2);
5217 DP(NETIF_MSG_LINK, "PMD_SIGNAL 1.a811 = 0x%x\n", val2);
5218
5219 /* Check link 10G */
5220 if (val2 & (1<<11)) {
5221 vars->line_speed = SPEED_10000;
5222 ext_phy_link_up = 1;
5223 bnx2x_8481_set_10G_led_mode(params,
5224 ext_phy_type,
5225 ext_phy_addr);
5226 } else { /* Check Legacy speed link */
5227 u16 legacy_status, legacy_speed;
5228
5229 /* Enable expansion register 0x42
5230 (Operation mode status) */
5231 bnx2x_cl45_write(bp, params->port,
5232 ext_phy_type,
5233 ext_phy_addr,
5234 MDIO_AN_DEVAD,
5235 MDIO_AN_REG_8481_EXPANSION_REG_ACCESS,
5236 0xf42);
5237
5238 /* Get legacy speed operation status */
5239 bnx2x_cl45_read(bp, params->port,
5240 ext_phy_type,
5241 ext_phy_addr,
5242 MDIO_AN_DEVAD,
5243 MDIO_AN_REG_8481_EXPANSION_REG_RD_RW,
5244 &legacy_status);
5245
5246 DP(NETIF_MSG_LINK, "Legacy speed status"
5247 " = 0x%x\n", legacy_status);
5248 ext_phy_link_up = ((legacy_status & (1<<11))
5249 == (1<<11));
5250 if (ext_phy_link_up) {
5251 legacy_speed = (legacy_status & (3<<9));
5252 if (legacy_speed == (0<<9))
5253 vars->line_speed = SPEED_10;
5254 else if (legacy_speed == (1<<9))
5255 vars->line_speed =
5256 SPEED_100;
5257 else if (legacy_speed == (2<<9))
5258 vars->line_speed =
5259 SPEED_1000;
5260 else /* Should not happen */
5261 vars->line_speed = 0;
5262
5263 if (legacy_status & (1<<8))
5264 vars->duplex = DUPLEX_FULL;
5265 else
5266 vars->duplex = DUPLEX_HALF;
5267
5268 DP(NETIF_MSG_LINK, "Link is up "
5269 "in %dMbps, is_duplex_full"
5270 "= %d\n",
5271 vars->line_speed,
5272 (vars->duplex == DUPLEX_FULL));
5273 bnx2x_8481_set_legacy_led_mode(params,
5274 ext_phy_type,
5275 ext_phy_addr);
5276 }
5277 }
5278 break;
5279 default:
5280 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
5281 params->ext_phy_config);
5282 ext_phy_link_up = 0;
5283 break;
5284 }
5285 /* Set SGMII mode for external phy */
5286 if (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) {
5287 if (vars->line_speed < SPEED_1000)
5288 vars->phy_flags |= PHY_SGMII_FLAG;
5289 else
5290 vars->phy_flags &= ~PHY_SGMII_FLAG;
5291 }
5292 5967
5293 } else { /* SerDes */ 5968 /* For the 32 bits registers in 848xx, access via MDIO2ARM interface.*/
5294 ext_phy_type = SERDES_EXT_PHY_TYPE(params->ext_phy_config); 5969 /* (1) set register 0xc200_0014(SPI_BRIDGE_CTRL_2) to 0x03000000 */
5295 switch (ext_phy_type) { 5970 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA819 + adj, 0x0014);
5296 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT: 5971 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81A + adj, 0xc200);
5297 DP(NETIF_MSG_LINK, "SerDes Direct\n"); 5972 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81B + adj, 0x0000);
5298 ext_phy_link_up = 1; 5973 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81C + adj, 0x0300);
5299 break; 5974 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA817 + adj, 0x0009);
5300 5975
5301 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482: 5976 for (cnt = 0; cnt < 100; cnt++) {
5302 DP(NETIF_MSG_LINK, "SerDes 5482\n"); 5977 bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA818 + adj, &val);
5303 ext_phy_link_up = 1; 5978 if (val & 1)
5304 break; 5979 break;
5980 udelay(5);
5981 }
5982 if (cnt == 100) {
5983 DP(NETIF_MSG_LINK, "Unable to read 848xx phy fw version(1)\n");
5984 bnx2x_save_spirom_version(bp, params->port, 0,
5985 phy->ver_addr);
5986 return;
5987 }
5305 5988
5306 default: 5989
5307 DP(NETIF_MSG_LINK, 5990 /* 2) read register 0xc200_0000 (SPI_FW_STATUS) */
5308 "BAD SerDes ext_phy_config 0x%x\n", 5991 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA819 + adj, 0x0000);
5309 params->ext_phy_config); 5992 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81A + adj, 0xc200);
5310 ext_phy_link_up = 0; 5993 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA817 + adj, 0x000A);
5994 for (cnt = 0; cnt < 100; cnt++) {
5995 bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA818 + adj, &val);
5996 if (val & 1)
5311 break; 5997 break;
5312 } 5998 udelay(5);
5313 } 5999 }
6000 if (cnt == 100) {
6001 DP(NETIF_MSG_LINK, "Unable to read 848xx phy fw version(2)\n");
6002 bnx2x_save_spirom_version(bp, params->port, 0,
6003 phy->ver_addr);
6004 return;
6005 }
6006
6007 /* lower 16 bits of the register SPI_FW_STATUS */
6008 bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA81B + adj, &fw_ver1);
6009 /* upper 16 bits of register SPI_FW_STATUS */
6010 bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA81C + adj, &fw_ver2);
5314 6011
5315 return ext_phy_link_up; 6012 bnx2x_save_spirom_version(bp, params->port, (fw_ver2<<16) | fw_ver1,
6013 phy->ver_addr);
5316} 6014}
5317 6015
5318static void bnx2x_link_int_enable(struct link_params *params) 6016static void bnx2x_848xx_set_led(struct bnx2x *bp,
6017 struct bnx2x_phy *phy)
5319{ 6018{
5320 u8 port = params->port; 6019 u16 val, adj;
5321 u32 ext_phy_type;
5322 u32 mask;
5323 struct bnx2x *bp = params->bp;
5324 6020
5325 /* setting the status to report on link up 6021 adj = 0;
5326 for either XGXS or SerDes */ 6022 if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
5327 6023 adj = -1;
5328 if (params->switch_cfg == SWITCH_CFG_10G) {
5329 mask = (NIG_MASK_XGXS0_LINK10G |
5330 NIG_MASK_XGXS0_LINK_STATUS);
5331 DP(NETIF_MSG_LINK, "enabled XGXS interrupt\n");
5332 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config);
5333 if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) &&
5334 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
5335 (ext_phy_type !=
5336 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN)) {
5337 mask |= NIG_MASK_MI_INT;
5338 DP(NETIF_MSG_LINK, "enabled external phy int\n");
5339 }
5340 6024
5341 } else { /* SerDes */ 6025 /* PHYC_CTL_LED_CTL */
5342 mask = NIG_MASK_SERDES0_LINK_STATUS; 6026 bnx2x_cl45_read(bp, phy,
5343 DP(NETIF_MSG_LINK, "enabled SerDes interrupt\n"); 6027 MDIO_PMA_DEVAD,
5344 ext_phy_type = SERDES_EXT_PHY_TYPE(params->ext_phy_config); 6028 MDIO_PMA_REG_8481_LINK_SIGNAL + adj, &val);
5345 if ((ext_phy_type != 6029 val &= 0xFE00;
5346 PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT) && 6030 val |= 0x0092;
5347 (ext_phy_type != 6031
5348 PORT_HW_CFG_SERDES_EXT_PHY_TYPE_NOT_CONN)) { 6032 bnx2x_cl45_write(bp, phy,
5349 mask |= NIG_MASK_MI_INT; 6033 MDIO_PMA_DEVAD,
5350 DP(NETIF_MSG_LINK, "enabled external phy int\n"); 6034 MDIO_PMA_REG_8481_LINK_SIGNAL + adj, val);
5351 } 6035
5352 } 6036 bnx2x_cl45_write(bp, phy,
5353 bnx2x_bits_en(bp, 6037 MDIO_PMA_DEVAD,
5354 NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 6038 MDIO_PMA_REG_8481_LED1_MASK + adj,
5355 mask); 6039 0x80);
6040
6041 bnx2x_cl45_write(bp, phy,
6042 MDIO_PMA_DEVAD,
6043 MDIO_PMA_REG_8481_LED2_MASK + adj,
6044 0x18);
6045
6046 /* Select activity source by Tx and Rx, as suggested by PHY AE */
6047 bnx2x_cl45_write(bp, phy,
6048 MDIO_PMA_DEVAD,
6049 MDIO_PMA_REG_8481_LED3_MASK + adj,
6050 0x0006);
6051
6052 /* Select the closest activity blink rate to that in 10/100/1000 */
6053 bnx2x_cl45_write(bp, phy,
6054 MDIO_PMA_DEVAD,
6055 MDIO_PMA_REG_8481_LED3_BLINK + adj,
6056 0);
6057
6058 bnx2x_cl45_read(bp, phy,
6059 MDIO_PMA_DEVAD,
6060 MDIO_PMA_REG_84823_CTL_LED_CTL_1 + adj, &val);
6061 val |= MDIO_PMA_REG_84823_LED3_STRETCH_EN; /* stretch_en for LED3*/
6062
6063 bnx2x_cl45_write(bp, phy,
6064 MDIO_PMA_DEVAD,
6065 MDIO_PMA_REG_84823_CTL_LED_CTL_1 + adj, val);
5356 6066
5357 DP(NETIF_MSG_LINK, "port %x, is_xgxs %x, int_status 0x%x\n", port, 6067 /* 'Interrupt Mask' */
5358 (params->switch_cfg == SWITCH_CFG_10G), 6068 bnx2x_cl45_write(bp, phy,
5359 REG_RD(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4)); 6069 MDIO_AN_DEVAD,
5360 DP(NETIF_MSG_LINK, " int_mask 0x%x, MI_INT %x, SERDES_LINK %x\n", 6070 0xFFFB, 0xFFFD);
5361 REG_RD(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4),
5362 REG_RD(bp, NIG_REG_EMAC0_STATUS_MISC_MI_INT + port*0x18),
5363 REG_RD(bp, NIG_REG_SERDES0_STATUS_LINK_STATUS+port*0x3c));
5364 DP(NETIF_MSG_LINK, " 10G %x, XGXS_LINK %x\n",
5365 REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK10G + port*0x68),
5366 REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK_STATUS + port*0x68));
5367} 6071}
5368 6072
5369static void bnx2x_8481_rearm_latch_signal(struct bnx2x *bp, u8 port, 6073static u8 bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy,
5370 u8 is_mi_int) 6074 struct link_params *params,
6075 struct link_vars *vars)
5371{ 6076{
5372 u32 latch_status = 0, is_mi_int_status; 6077 struct bnx2x *bp = params->bp;
5373 /* Disable the MI INT ( external phy int ) 6078 u16 autoneg_val, an_1000_val, an_10_100_val;
5374 * by writing 1 to the status register. Link down indication 6079 /*
5375 * is high-active-signal, so in this case we need to write the 6080 * This phy uses the NIG latch mechanism since link indication
5376 * status to clear the XOR 6081 * arrives through its LED4 and not via its LASI signal, so we
6082 * get steady signal instead of clear on read
5377 */ 6083 */
5378 /* Read Latched signals */ 6084 bnx2x_bits_en(bp, NIG_REG_LATCH_BC_0 + params->port*4,
5379 latch_status = REG_RD(bp, 6085 1 << NIG_LATCH_BC_ENABLE_MI_INT);
5380 NIG_REG_LATCH_STATUS_0 + port*8); 6086
5381 is_mi_int_status = REG_RD(bp, 6087 bnx2x_cl45_write(bp, phy,
5382 NIG_REG_STATUS_INTERRUPT_PORT0 + port*4); 6088 MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x0000);
5383 DP(NETIF_MSG_LINK, "original_signal = 0x%x, nig_status = 0x%x," 6089
5384 "latch_status = 0x%x\n", 6090 bnx2x_848xx_set_led(bp, phy);
5385 is_mi_int, is_mi_int_status, latch_status); 6091
5386 /* Handle only those with latched-signal=up.*/ 6092 /* set 1000 speed advertisement */
5387 if (latch_status & 1) { 6093 bnx2x_cl45_read(bp, phy,
5388 /* For all latched-signal=up,Write original_signal to status */ 6094 MDIO_AN_DEVAD, MDIO_AN_REG_8481_1000T_CTRL,
5389 if (is_mi_int) 6095 &an_1000_val);
5390 bnx2x_bits_en(bp, 6096
5391 NIG_REG_STATUS_INTERRUPT_PORT0 6097 bnx2x_ext_phy_set_pause(params, phy, vars);
5392 + port*4, 6098 bnx2x_cl45_read(bp, phy,
5393 NIG_STATUS_EMAC0_MI_INT); 6099 MDIO_AN_DEVAD,
5394 else 6100 MDIO_AN_REG_8481_LEGACY_AN_ADV,
5395 bnx2x_bits_dis(bp, 6101 &an_10_100_val);
5396 NIG_REG_STATUS_INTERRUPT_PORT0 6102 bnx2x_cl45_read(bp, phy,
5397 + port*4, 6103 MDIO_AN_DEVAD, MDIO_AN_REG_8481_LEGACY_MII_CTRL,
5398 NIG_STATUS_EMAC0_MI_INT); 6104 &autoneg_val);
5399 /* For all latched-signal=up : Re-Arm Latch signals */ 6105 /* Disable forced speed */
5400 REG_WR(bp, NIG_REG_LATCH_STATUS_0 + port*8, 6106 autoneg_val &= ~((1<<6) | (1<<8) | (1<<9) | (1<<12) | (1<<13));
5401 (latch_status & 0xfffe) | (latch_status & 1)); 6107 an_10_100_val &= ~((1<<5) | (1<<6) | (1<<7) | (1<<8));
6108
6109 if (((phy->req_line_speed == SPEED_AUTO_NEG) &&
6110 (phy->speed_cap_mask &
6111 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) ||
6112 (phy->req_line_speed == SPEED_1000)) {
6113 an_1000_val |= (1<<8);
6114 autoneg_val |= (1<<9 | 1<<12);
6115 if (phy->req_duplex == DUPLEX_FULL)
6116 an_1000_val |= (1<<9);
6117 DP(NETIF_MSG_LINK, "Advertising 1G\n");
6118 } else
6119 an_1000_val &= ~((1<<8) | (1<<9));
6120
6121 bnx2x_cl45_write(bp, phy,
6122 MDIO_AN_DEVAD, MDIO_AN_REG_8481_1000T_CTRL,
6123 an_1000_val);
6124
6125 /* set 10 speed advertisement */
6126 if (((phy->req_line_speed == SPEED_AUTO_NEG) &&
6127 (phy->speed_cap_mask &
6128 (PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL |
6129 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF)))) {
6130 an_10_100_val |= (1<<7);
6131 /* Enable autoneg and restart autoneg for legacy speeds */
6132 autoneg_val |= (1<<9 | 1<<12);
6133
6134 if (phy->req_duplex == DUPLEX_FULL)
6135 an_10_100_val |= (1<<8);
6136 DP(NETIF_MSG_LINK, "Advertising 100M\n");
6137 }
6138 /* set 10 speed advertisement */
6139 if (((phy->req_line_speed == SPEED_AUTO_NEG) &&
6140 (phy->speed_cap_mask &
6141 (PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL |
6142 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF)))) {
6143 an_10_100_val |= (1<<5);
6144 autoneg_val |= (1<<9 | 1<<12);
6145 if (phy->req_duplex == DUPLEX_FULL)
6146 an_10_100_val |= (1<<6);
6147 DP(NETIF_MSG_LINK, "Advertising 10M\n");
6148 }
6149
6150 /* Only 10/100 are allowed to work in FORCE mode */
6151 if (phy->req_line_speed == SPEED_100) {
6152 autoneg_val |= (1<<13);
6153 /* Enabled AUTO-MDIX when autoneg is disabled */
6154 bnx2x_cl45_write(bp, phy,
6155 MDIO_AN_DEVAD, MDIO_AN_REG_8481_AUX_CTRL,
6156 (1<<15 | 1<<9 | 7<<0));
6157 DP(NETIF_MSG_LINK, "Setting 100M force\n");
5402 } 6158 }
6159 if (phy->req_line_speed == SPEED_10) {
6160 /* Enabled AUTO-MDIX when autoneg is disabled */
6161 bnx2x_cl45_write(bp, phy,
6162 MDIO_AN_DEVAD, MDIO_AN_REG_8481_AUX_CTRL,
6163 (1<<15 | 1<<9 | 7<<0));
6164 DP(NETIF_MSG_LINK, "Setting 10M force\n");
6165 }
6166
6167 bnx2x_cl45_write(bp, phy,
6168 MDIO_AN_DEVAD, MDIO_AN_REG_8481_LEGACY_AN_ADV,
6169 an_10_100_val);
6170
6171 if (phy->req_duplex == DUPLEX_FULL)
6172 autoneg_val |= (1<<8);
6173
6174 bnx2x_cl45_write(bp, phy,
6175 MDIO_AN_DEVAD,
6176 MDIO_AN_REG_8481_LEGACY_MII_CTRL, autoneg_val);
6177
6178 if (((phy->req_line_speed == SPEED_AUTO_NEG) &&
6179 (phy->speed_cap_mask &
6180 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) ||
6181 (phy->req_line_speed == SPEED_10000)) {
6182 DP(NETIF_MSG_LINK, "Advertising 10G\n");
6183 /* Restart autoneg for 10G*/
6184
6185 bnx2x_cl45_write(bp, phy,
6186 MDIO_AN_DEVAD, MDIO_AN_REG_CTRL,
6187 0x3200);
6188 } else if (phy->req_line_speed != SPEED_10 &&
6189 phy->req_line_speed != SPEED_100) {
6190 bnx2x_cl45_write(bp, phy,
6191 MDIO_AN_DEVAD,
6192 MDIO_AN_REG_8481_10GBASE_T_AN_CTRL,
6193 1);
6194 }
6195 /* Save spirom version */
6196 bnx2x_save_848xx_spirom_version(phy, params);
6197
6198 return 0;
5403} 6199}
5404/* 6200
5405 * link management 6201static u8 bnx2x_8481_config_init(struct bnx2x_phy *phy,
5406 */ 6202 struct link_params *params,
5407static void bnx2x_link_int_ack(struct link_params *params, 6203 struct link_vars *vars)
5408 struct link_vars *vars, u8 is_10g,
5409 u8 is_mi_int)
5410{ 6204{
5411 struct bnx2x *bp = params->bp; 6205 struct bnx2x *bp = params->bp;
5412 u8 port = params->port; 6206 /* Restore normal power mode*/
6207 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
6208 MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
5413 6209
5414 /* first reset all status 6210 /* HW reset */
5415 * we assume only one line will be change at a time */ 6211 bnx2x_ext_phy_hw_reset(bp, params->port);
5416 bnx2x_bits_dis(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4, 6212 bnx2x_wait_reset_complete(bp, phy, params);
5417 (NIG_STATUS_XGXS0_LINK10G |
5418 NIG_STATUS_XGXS0_LINK_STATUS |
5419 NIG_STATUS_SERDES0_LINK_STATUS));
5420 if ((XGXS_EXT_PHY_TYPE(params->ext_phy_config)
5421 == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481) ||
5422 (XGXS_EXT_PHY_TYPE(params->ext_phy_config)
5423 == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823)) {
5424 bnx2x_8481_rearm_latch_signal(bp, port, is_mi_int);
5425 }
5426 if (vars->phy_link_up) {
5427 if (is_10g) {
5428 /* Disable the 10G link interrupt
5429 * by writing 1 to the status register
5430 */
5431 DP(NETIF_MSG_LINK, "10G XGXS phy link up\n");
5432 bnx2x_bits_en(bp,
5433 NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
5434 NIG_STATUS_XGXS0_LINK10G);
5435 6213
5436 } else if (params->switch_cfg == SWITCH_CFG_10G) { 6214 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1<<15);
5437 /* Disable the link interrupt 6215 return bnx2x_848xx_cmn_config_init(phy, params, vars);
5438 * by writing 1 to the relevant lane 6216}
5439 * in the status register
5440 */
5441 u32 ser_lane = ((params->lane_config &
5442 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
5443 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
5444 6217
5445 DP(NETIF_MSG_LINK, "%d speed XGXS phy link up\n", 6218static u8 bnx2x_848x3_config_init(struct bnx2x_phy *phy,
5446 vars->line_speed); 6219 struct link_params *params,
5447 bnx2x_bits_en(bp, 6220 struct link_vars *vars)
5448 NIG_REG_STATUS_INTERRUPT_PORT0 + port*4, 6221{
5449 ((1 << ser_lane) << 6222 struct bnx2x *bp = params->bp;
5450 NIG_STATUS_XGXS0_LINK_STATUS_SIZE)); 6223 u8 port, initialize = 1;
6224 u16 val, adj;
6225 u16 temp;
6226 u32 actual_phy_selection, cms_enable;
6227 u8 rc = 0;
5451 6228
5452 } else { /* SerDes */ 6229 /* This is just for MDIO_CTL_REG_84823_MEDIA register. */
5453 DP(NETIF_MSG_LINK, "SerDes phy link up\n"); 6230 adj = 0;
5454 /* Disable the link interrupt 6231 if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
5455 * by writing 1 to the status register 6232 adj = 3;
5456 */
5457 bnx2x_bits_en(bp,
5458 NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
5459 NIG_STATUS_SERDES0_LINK_STATUS);
5460 }
5461 6233
5462 } else { /* link_down */ 6234 msleep(1);
6235 if (CHIP_IS_E2(bp))
6236 port = BP_PATH(bp);
6237 else
6238 port = params->port;
6239 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
6240 MISC_REGISTERS_GPIO_OUTPUT_HIGH,
6241 port);
6242 bnx2x_wait_reset_complete(bp, phy, params);
6243 /* Wait for GPHY to come out of reset */
6244 msleep(50);
6245 /*
6246 * BCM84823 requires that XGXS links up first @ 10G for normal behavior
6247 */
6248 temp = vars->line_speed;
6249 vars->line_speed = SPEED_10000;
6250 bnx2x_set_autoneg(&params->phy[INT_PHY], params, vars, 0);
6251 bnx2x_program_serdes(&params->phy[INT_PHY], params, vars);
6252 vars->line_speed = temp;
6253
6254 /* Set dual-media configuration according to configuration */
6255
6256 bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
6257 MDIO_CTL_REG_84823_MEDIA + adj, &val);
6258 val &= ~(MDIO_CTL_REG_84823_MEDIA_MAC_MASK |
6259 MDIO_CTL_REG_84823_MEDIA_LINE_MASK |
6260 MDIO_CTL_REG_84823_MEDIA_COPPER_CORE_DOWN |
6261 MDIO_CTL_REG_84823_MEDIA_PRIORITY_MASK |
6262 MDIO_CTL_REG_84823_MEDIA_FIBER_1G);
6263 val |= MDIO_CTL_REG_84823_CTRL_MAC_XFI |
6264 MDIO_CTL_REG_84823_MEDIA_LINE_XAUI_L;
6265
6266 actual_phy_selection = bnx2x_phy_selection(params);
6267
6268 switch (actual_phy_selection) {
6269 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
6270 /* Do nothing. Essentially this is like the priority copper */
6271 break;
6272 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
6273 val |= MDIO_CTL_REG_84823_MEDIA_PRIORITY_COPPER;
6274 break;
6275 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
6276 val |= MDIO_CTL_REG_84823_MEDIA_PRIORITY_FIBER;
6277 break;
6278 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
6279 /* Do nothing here. The first PHY won't be initialized at all */
6280 break;
6281 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
6282 val |= MDIO_CTL_REG_84823_MEDIA_COPPER_CORE_DOWN;
6283 initialize = 0;
6284 break;
5463 } 6285 }
6286 if (params->phy[EXT_PHY2].req_line_speed == SPEED_1000)
6287 val |= MDIO_CTL_REG_84823_MEDIA_FIBER_1G;
6288
6289 bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
6290 MDIO_CTL_REG_84823_MEDIA + adj, val);
6291 DP(NETIF_MSG_LINK, "Multi_phy config = 0x%x, Media control = 0x%x\n",
6292 params->multi_phy_config, val);
6293
6294 if (initialize)
6295 rc = bnx2x_848xx_cmn_config_init(phy, params, vars);
6296 else
6297 bnx2x_save_848xx_spirom_version(phy, params);
6298 cms_enable = REG_RD(bp, params->shmem_base +
6299 offsetof(struct shmem_region,
6300 dev_info.port_hw_config[params->port].default_cfg)) &
6301 PORT_HW_CFG_ENABLE_CMS_MASK;
6302
6303 bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
6304 MDIO_CTL_REG_84823_USER_CTRL_REG, &val);
6305 if (cms_enable)
6306 val |= MDIO_CTL_REG_84823_USER_CTRL_CMS;
6307 else
6308 val &= ~MDIO_CTL_REG_84823_USER_CTRL_CMS;
6309 bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
6310 MDIO_CTL_REG_84823_USER_CTRL_REG, val);
6311
6312
6313 return rc;
5464} 6314}
5465 6315
5466static u8 bnx2x_format_ver(u32 num, u8 *str, u16 len) 6316static u8 bnx2x_848xx_read_status(struct bnx2x_phy *phy,
6317 struct link_params *params,
6318 struct link_vars *vars)
5467{ 6319{
5468 u8 *str_ptr = str; 6320 struct bnx2x *bp = params->bp;
5469 u32 mask = 0xf0000000; 6321 u16 val, val1, val2, adj;
5470 u8 shift = 8*4; 6322 u8 link_up = 0;
5471 u8 digit; 6323
5472 if (len < 10) { 6324 /* Reg offset adjustment for 84833 */
5473 /* Need more than 10chars for this format */ 6325 adj = 0;
5474 *str_ptr = '\0'; 6326 if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
5475 return -EINVAL; 6327 adj = -1;
5476 } 6328
5477 while (shift > 0) { 6329 /* Check 10G-BaseT link status */
6330 /* Check PMD signal ok */
6331 bnx2x_cl45_read(bp, phy,
6332 MDIO_AN_DEVAD, 0xFFFA, &val1);
6333 bnx2x_cl45_read(bp, phy,
6334 MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_PMD_SIGNAL + adj,
6335 &val2);
6336 DP(NETIF_MSG_LINK, "BCM848xx: PMD_SIGNAL 1.a811 = 0x%x\n", val2);
6337
6338 /* Check link 10G */
6339 if (val2 & (1<<11)) {
6340 vars->line_speed = SPEED_10000;
6341 vars->duplex = DUPLEX_FULL;
6342 link_up = 1;
6343 bnx2x_ext_phy_10G_an_resolve(bp, phy, vars);
6344 } else { /* Check Legacy speed link */
6345 u16 legacy_status, legacy_speed;
6346
6347 /* Enable expansion register 0x42 (Operation mode status) */
6348 bnx2x_cl45_write(bp, phy,
6349 MDIO_AN_DEVAD,
6350 MDIO_AN_REG_8481_EXPANSION_REG_ACCESS, 0xf42);
6351
6352 /* Get legacy speed operation status */
6353 bnx2x_cl45_read(bp, phy,
6354 MDIO_AN_DEVAD,
6355 MDIO_AN_REG_8481_EXPANSION_REG_RD_RW,
6356 &legacy_status);
6357
6358 DP(NETIF_MSG_LINK, "Legacy speed status"
6359 " = 0x%x\n", legacy_status);
6360 link_up = ((legacy_status & (1<<11)) == (1<<11));
6361 if (link_up) {
6362 legacy_speed = (legacy_status & (3<<9));
6363 if (legacy_speed == (0<<9))
6364 vars->line_speed = SPEED_10;
6365 else if (legacy_speed == (1<<9))
6366 vars->line_speed = SPEED_100;
6367 else if (legacy_speed == (2<<9))
6368 vars->line_speed = SPEED_1000;
6369 else /* Should not happen */
6370 vars->line_speed = 0;
5478 6371
5479 shift -= 4; 6372 if (legacy_status & (1<<8))
5480 digit = ((num & mask) >> shift); 6373 vars->duplex = DUPLEX_FULL;
5481 if (digit < 0xa) 6374 else
5482 *str_ptr = digit + '0'; 6375 vars->duplex = DUPLEX_HALF;
5483 else 6376
5484 *str_ptr = digit - 0xa + 'a'; 6377 DP(NETIF_MSG_LINK, "Link is up in %dMbps,"
5485 str_ptr++; 6378 " is_duplex_full= %d\n", vars->line_speed,
5486 mask = mask >> 4; 6379 (vars->duplex == DUPLEX_FULL));
5487 if (shift == 4*4) { 6380 /* Check legacy speed AN resolution */
5488 *str_ptr = ':'; 6381 bnx2x_cl45_read(bp, phy,
5489 str_ptr++; 6382 MDIO_AN_DEVAD,
6383 MDIO_AN_REG_8481_LEGACY_MII_STATUS,
6384 &val);
6385 if (val & (1<<5))
6386 vars->link_status |=
6387 LINK_STATUS_AUTO_NEGOTIATE_COMPLETE;
6388 bnx2x_cl45_read(bp, phy,
6389 MDIO_AN_DEVAD,
6390 MDIO_AN_REG_8481_LEGACY_AN_EXPANSION,
6391 &val);
6392 if ((val & (1<<0)) == 0)
6393 vars->link_status |=
6394 LINK_STATUS_PARALLEL_DETECTION_USED;
5490 } 6395 }
5491 } 6396 }
5492 *str_ptr = '\0'; 6397 if (link_up) {
5493 return 0; 6398 DP(NETIF_MSG_LINK, "BCM84823: link speed is %d\n",
6399 vars->line_speed);
6400 bnx2x_ext_phy_resolve_fc(phy, params, vars);
6401 }
6402
6403 return link_up;
5494} 6404}
5495 6405
5496u8 bnx2x_get_ext_phy_fw_version(struct link_params *params, u8 driver_loaded, 6406static u8 bnx2x_848xx_format_ver(u32 raw_ver, u8 *str, u16 *len)
5497 u8 *version, u16 len)
5498{ 6407{
5499 struct bnx2x *bp; 6408 u8 status = 0;
5500 u32 ext_phy_type = 0; 6409 u32 spirom_ver;
5501 u32 spirom_ver = 0; 6410 spirom_ver = ((raw_ver & 0xF80) >> 7) << 16 | (raw_ver & 0x7F);
5502 u8 status; 6411 status = bnx2x_format_ver(spirom_ver, str, len);
6412 return status;
6413}
5503 6414
5504 if (version == NULL || params == NULL) 6415static void bnx2x_8481_hw_reset(struct bnx2x_phy *phy,
5505 return -EINVAL; 6416 struct link_params *params)
5506 bp = params->bp; 6417{
6418 bnx2x_set_gpio(params->bp, MISC_REGISTERS_GPIO_1,
6419 MISC_REGISTERS_GPIO_OUTPUT_LOW, 0);
6420 bnx2x_set_gpio(params->bp, MISC_REGISTERS_GPIO_1,
6421 MISC_REGISTERS_GPIO_OUTPUT_LOW, 1);
6422}
5507 6423
5508 spirom_ver = REG_RD(bp, params->shmem_base + 6424static void bnx2x_8481_link_reset(struct bnx2x_phy *phy,
5509 offsetof(struct shmem_region, 6425 struct link_params *params)
5510 port_mb[params->port].ext_phy_fw_version)); 6426{
6427 bnx2x_cl45_write(params->bp, phy,
6428 MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0x0000);
6429 bnx2x_cl45_write(params->bp, phy,
6430 MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1);
6431}
5511 6432
5512 status = 0; 6433static void bnx2x_848x3_link_reset(struct bnx2x_phy *phy,
5513 /* reset the returned value to zero */ 6434 struct link_params *params)
5514 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config); 6435{
5515 switch (ext_phy_type) { 6436 struct bnx2x *bp = params->bp;
5516 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101: 6437 u8 port;
6438 if (CHIP_IS_E2(bp))
6439 port = BP_PATH(bp);
6440 else
6441 port = params->port;
6442 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
6443 MISC_REGISTERS_GPIO_OUTPUT_LOW,
6444 port);
6445}
5517 6446
5518 if (len < 5) 6447static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy,
5519 return -EINVAL; 6448 struct link_params *params, u8 mode)
6449{
6450 struct bnx2x *bp = params->bp;
6451 u16 val;
5520 6452
5521 version[0] = (spirom_ver & 0xFF); 6453 switch (mode) {
5522 version[1] = (spirom_ver & 0xFF00) >> 8; 6454 case LED_MODE_OFF:
5523 version[2] = (spirom_ver & 0xFF0000) >> 16;
5524 version[3] = (spirom_ver & 0xFF000000) >> 24;
5525 version[4] = '\0';
5526 6455
5527 break; 6456 DP(NETIF_MSG_LINK, "Port 0x%x: LED MODE OFF\n", params->port);
5528 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
5529 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
5530 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
5531 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
5532 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
5533 status = bnx2x_format_ver(spirom_ver, version, len);
5534 break;
5535 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
5536 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823:
5537 spirom_ver = ((spirom_ver & 0xF80) >> 7) << 16 |
5538 (spirom_ver & 0x7F);
5539 status = bnx2x_format_ver(spirom_ver, version, len);
5540 break;
5541 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
5542 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
5543 version[0] = '\0';
5544 break;
5545 6457
5546 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE: 6458 if ((params->hw_led_mode << SHARED_HW_CFG_LED_MODE_SHIFT) ==
5547 DP(NETIF_MSG_LINK, "bnx2x_get_ext_phy_fw_version:" 6459 SHARED_HW_CFG_LED_EXTPHY1) {
5548 " type is FAILURE!\n"); 6460
5549 status = -EINVAL; 6461 /* Set LED masks */
6462 bnx2x_cl45_write(bp, phy,
6463 MDIO_PMA_DEVAD,
6464 MDIO_PMA_REG_8481_LED1_MASK,
6465 0x0);
6466
6467 bnx2x_cl45_write(bp, phy,
6468 MDIO_PMA_DEVAD,
6469 MDIO_PMA_REG_8481_LED2_MASK,
6470 0x0);
6471
6472 bnx2x_cl45_write(bp, phy,
6473 MDIO_PMA_DEVAD,
6474 MDIO_PMA_REG_8481_LED3_MASK,
6475 0x0);
6476
6477 bnx2x_cl45_write(bp, phy,
6478 MDIO_PMA_DEVAD,
6479 MDIO_PMA_REG_8481_LED5_MASK,
6480 0x0);
6481
6482 } else {
6483 bnx2x_cl45_write(bp, phy,
6484 MDIO_PMA_DEVAD,
6485 MDIO_PMA_REG_8481_LED1_MASK,
6486 0x0);
6487 }
5550 break; 6488 break;
6489 case LED_MODE_FRONT_PANEL_OFF:
5551 6490
5552 default: 6491 DP(NETIF_MSG_LINK, "Port 0x%x: LED MODE FRONT PANEL OFF\n",
6492 params->port);
6493
6494 if ((params->hw_led_mode << SHARED_HW_CFG_LED_MODE_SHIFT) ==
6495 SHARED_HW_CFG_LED_EXTPHY1) {
6496
6497 /* Set LED masks */
6498 bnx2x_cl45_write(bp, phy,
6499 MDIO_PMA_DEVAD,
6500 MDIO_PMA_REG_8481_LED1_MASK,
6501 0x0);
6502
6503 bnx2x_cl45_write(bp, phy,
6504 MDIO_PMA_DEVAD,
6505 MDIO_PMA_REG_8481_LED2_MASK,
6506 0x0);
6507
6508 bnx2x_cl45_write(bp, phy,
6509 MDIO_PMA_DEVAD,
6510 MDIO_PMA_REG_8481_LED3_MASK,
6511 0x0);
6512
6513 bnx2x_cl45_write(bp, phy,
6514 MDIO_PMA_DEVAD,
6515 MDIO_PMA_REG_8481_LED5_MASK,
6516 0x20);
6517
6518 } else {
6519 bnx2x_cl45_write(bp, phy,
6520 MDIO_PMA_DEVAD,
6521 MDIO_PMA_REG_8481_LED1_MASK,
6522 0x0);
6523 }
5553 break; 6524 break;
5554 } 6525 case LED_MODE_ON:
5555 return status;
5556}
5557 6526
5558static void bnx2x_set_xgxs_loopback(struct link_params *params, 6527 DP(NETIF_MSG_LINK, "Port 0x%x: LED MODE ON\n", params->port);
5559 struct link_vars *vars,
5560 u8 is_10g)
5561{
5562 u8 port = params->port;
5563 struct bnx2x *bp = params->bp;
5564 6528
5565 if (is_10g) { 6529 if ((params->hw_led_mode << SHARED_HW_CFG_LED_MODE_SHIFT) ==
5566 u32 md_devad; 6530 SHARED_HW_CFG_LED_EXTPHY1) {
6531 /* Set control reg */
6532 bnx2x_cl45_read(bp, phy,
6533 MDIO_PMA_DEVAD,
6534 MDIO_PMA_REG_8481_LINK_SIGNAL,
6535 &val);
6536 val &= 0x8000;
6537 val |= 0x2492;
5567 6538
5568 DP(NETIF_MSG_LINK, "XGXS 10G loopback enable\n"); 6539 bnx2x_cl45_write(bp, phy,
6540 MDIO_PMA_DEVAD,
6541 MDIO_PMA_REG_8481_LINK_SIGNAL,
6542 val);
5569 6543
5570 /* change the uni_phy_addr in the nig */ 6544 /* Set LED masks */
5571 md_devad = REG_RD(bp, (NIG_REG_XGXS0_CTRL_MD_DEVAD + 6545 bnx2x_cl45_write(bp, phy,
5572 port*0x18)); 6546 MDIO_PMA_DEVAD,
6547 MDIO_PMA_REG_8481_LED1_MASK,
6548 0x0);
5573 6549
5574 REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18, 0x5); 6550 bnx2x_cl45_write(bp, phy,
6551 MDIO_PMA_DEVAD,
6552 MDIO_PMA_REG_8481_LED2_MASK,
6553 0x20);
5575 6554
5576 bnx2x_cl45_write(bp, port, 0, 6555 bnx2x_cl45_write(bp, phy,
5577 params->phy_addr, 6556 MDIO_PMA_DEVAD,
5578 5, 6557 MDIO_PMA_REG_8481_LED3_MASK,
5579 (MDIO_REG_BANK_AER_BLOCK + 6558 0x20);
5580 (MDIO_AER_BLOCK_AER_REG & 0xf)),
5581 0x2800);
5582
5583 bnx2x_cl45_write(bp, port, 0,
5584 params->phy_addr,
5585 5,
5586 (MDIO_REG_BANK_CL73_IEEEB0 +
5587 (MDIO_CL73_IEEEB0_CL73_AN_CONTROL & 0xf)),
5588 0x6041);
5589 msleep(200);
5590 /* set aer mmd back */
5591 bnx2x_set_aer_mmd(params, vars);
5592 6559
5593 /* and md_devad */ 6560 bnx2x_cl45_write(bp, phy,
5594 REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18, 6561 MDIO_PMA_DEVAD,
5595 md_devad); 6562 MDIO_PMA_REG_8481_LED5_MASK,
6563 0x0);
6564 } else {
6565 bnx2x_cl45_write(bp, phy,
6566 MDIO_PMA_DEVAD,
6567 MDIO_PMA_REG_8481_LED1_MASK,
6568 0x20);
6569 }
6570 break;
5596 6571
5597 } else { 6572 case LED_MODE_OPER:
5598 u16 mii_control;
5599 6573
5600 DP(NETIF_MSG_LINK, "XGXS 1G loopback enable\n"); 6574 DP(NETIF_MSG_LINK, "Port 0x%x: LED MODE OPER\n", params->port);
6575
6576 if ((params->hw_led_mode << SHARED_HW_CFG_LED_MODE_SHIFT) ==
6577 SHARED_HW_CFG_LED_EXTPHY1) {
6578
6579 /* Set control reg */
6580 bnx2x_cl45_read(bp, phy,
6581 MDIO_PMA_DEVAD,
6582 MDIO_PMA_REG_8481_LINK_SIGNAL,
6583 &val);
6584
6585 if (!((val &
6586 MDIO_PMA_REG_8481_LINK_SIGNAL_LED4_ENABLE_MASK)
6587 >> MDIO_PMA_REG_8481_LINK_SIGNAL_LED4_ENABLE_SHIFT)) {
6588 DP(NETIF_MSG_LINK, "Setting LINK_SIGNAL\n");
6589 bnx2x_cl45_write(bp, phy,
6590 MDIO_PMA_DEVAD,
6591 MDIO_PMA_REG_8481_LINK_SIGNAL,
6592 0xa492);
6593 }
6594
6595 /* Set LED masks */
6596 bnx2x_cl45_write(bp, phy,
6597 MDIO_PMA_DEVAD,
6598 MDIO_PMA_REG_8481_LED1_MASK,
6599 0x10);
6600
6601 bnx2x_cl45_write(bp, phy,
6602 MDIO_PMA_DEVAD,
6603 MDIO_PMA_REG_8481_LED2_MASK,
6604 0x80);
5601 6605
5602 CL45_RD_OVER_CL22(bp, port, 6606 bnx2x_cl45_write(bp, phy,
5603 params->phy_addr, 6607 MDIO_PMA_DEVAD,
5604 MDIO_REG_BANK_COMBO_IEEE0, 6608 MDIO_PMA_REG_8481_LED3_MASK,
5605 MDIO_COMBO_IEEE0_MII_CONTROL, 6609 0x98);
5606 &mii_control); 6610
6611 bnx2x_cl45_write(bp, phy,
6612 MDIO_PMA_DEVAD,
6613 MDIO_PMA_REG_8481_LED5_MASK,
6614 0x40);
6615
6616 } else {
6617 bnx2x_cl45_write(bp, phy,
6618 MDIO_PMA_DEVAD,
6619 MDIO_PMA_REG_8481_LED1_MASK,
6620 0x80);
5607 6621
5608 CL45_WR_OVER_CL22(bp, port, 6622 /* Tell LED3 to blink on source */
5609 params->phy_addr, 6623 bnx2x_cl45_read(bp, phy,
5610 MDIO_REG_BANK_COMBO_IEEE0, 6624 MDIO_PMA_DEVAD,
5611 MDIO_COMBO_IEEE0_MII_CONTROL, 6625 MDIO_PMA_REG_8481_LINK_SIGNAL,
5612 (mii_control | 6626 &val);
5613 MDIO_COMBO_IEEO_MII_CONTROL_LOOPBACK)); 6627 val &= ~(7<<6);
6628 val |= (1<<6); /* A83B[8:6]= 1 */
6629 bnx2x_cl45_write(bp, phy,
6630 MDIO_PMA_DEVAD,
6631 MDIO_PMA_REG_8481_LINK_SIGNAL,
6632 val);
6633 }
6634 break;
5614 } 6635 }
5615} 6636}
6637/******************************************************************/
6638/* SFX7101 PHY SECTION */
6639/******************************************************************/
6640static void bnx2x_7101_config_loopback(struct bnx2x_phy *phy,
6641 struct link_params *params)
6642{
6643 struct bnx2x *bp = params->bp;
6644 /* SFX7101_XGXS_TEST1 */
6645 bnx2x_cl45_write(bp, phy,
6646 MDIO_XS_DEVAD, MDIO_XS_SFX7101_XGXS_TEST1, 0x100);
6647}
5616 6648
5617 6649static u8 bnx2x_7101_config_init(struct bnx2x_phy *phy,
5618static void bnx2x_ext_phy_loopback(struct link_params *params) 6650 struct link_params *params,
6651 struct link_vars *vars)
5619{ 6652{
6653 u16 fw_ver1, fw_ver2, val;
5620 struct bnx2x *bp = params->bp; 6654 struct bnx2x *bp = params->bp;
5621 u8 ext_phy_addr; 6655 DP(NETIF_MSG_LINK, "Setting the SFX7101 LASI indication\n");
5622 u32 ext_phy_type;
5623 6656
5624 if (params->switch_cfg == SWITCH_CFG_10G) { 6657 /* Restore normal power mode*/
5625 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config); 6658 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
5626 ext_phy_addr = XGXS_EXT_PHY_ADDR(params->ext_phy_config); 6659 MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
5627 /* CL37 Autoneg Enabled */ 6660 /* HW reset */
5628 switch (ext_phy_type) { 6661 bnx2x_ext_phy_hw_reset(bp, params->port);
5629 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT: 6662 bnx2x_wait_reset_complete(bp, phy, params);
5630 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN: 6663
5631 DP(NETIF_MSG_LINK, 6664 bnx2x_cl45_write(bp, phy,
5632 "ext_phy_loopback: We should not get here\n"); 6665 MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_CTRL, 0x1);
5633 break; 6666 DP(NETIF_MSG_LINK, "Setting the SFX7101 LED to blink on traffic\n");
5634 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705: 6667 bnx2x_cl45_write(bp, phy,
5635 DP(NETIF_MSG_LINK, "ext_phy_loopback: 8705\n"); 6668 MDIO_PMA_DEVAD, MDIO_PMA_REG_7107_LED_CNTL, (1<<3));
5636 break; 6669
5637 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706: 6670 bnx2x_ext_phy_set_pause(params, phy, vars);
5638 DP(NETIF_MSG_LINK, "ext_phy_loopback: 8706\n"); 6671 /* Restart autoneg */
5639 break; 6672 bnx2x_cl45_read(bp, phy,
5640 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726: 6673 MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, &val);
5641 DP(NETIF_MSG_LINK, "PMA/PMD ext_phy_loopback: 8726\n"); 6674 val |= 0x200;
5642 bnx2x_cl45_write(bp, params->port, ext_phy_type, 6675 bnx2x_cl45_write(bp, phy,
5643 ext_phy_addr, 6676 MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, val);
5644 MDIO_PMA_DEVAD, 6677
5645 MDIO_PMA_REG_CTRL, 6678 /* Save spirom version */
5646 0x0001); 6679 bnx2x_cl45_read(bp, phy,
5647 break; 6680 MDIO_PMA_DEVAD, MDIO_PMA_REG_7101_VER1, &fw_ver1);
5648 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101: 6681
5649 /* SFX7101_XGXS_TEST1 */ 6682 bnx2x_cl45_read(bp, phy,
5650 bnx2x_cl45_write(bp, params->port, ext_phy_type, 6683 MDIO_PMA_DEVAD, MDIO_PMA_REG_7101_VER2, &fw_ver2);
5651 ext_phy_addr, 6684 bnx2x_save_spirom_version(bp, params->port,
5652 MDIO_XS_DEVAD, 6685 (u32)(fw_ver1<<16 | fw_ver2), phy->ver_addr);
5653 MDIO_XS_SFX7101_XGXS_TEST1, 6686 return 0;
5654 0x100); 6687}
5655 DP(NETIF_MSG_LINK,
5656 "ext_phy_loopback: set ext phy loopback\n");
5657 break;
5658 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
5659 6688
5660 break; 6689static u8 bnx2x_7101_read_status(struct bnx2x_phy *phy,
5661 } /* switch external PHY type */ 6690 struct link_params *params,
5662 } else { 6691 struct link_vars *vars)
5663 /* serdes */ 6692{
5664 ext_phy_type = SERDES_EXT_PHY_TYPE(params->ext_phy_config); 6693 struct bnx2x *bp = params->bp;
5665 ext_phy_addr = (params->ext_phy_config & 6694 u8 link_up;
5666 PORT_HW_CFG_SERDES_EXT_PHY_ADDR_MASK) 6695 u16 val1, val2;
5667 >> PORT_HW_CFG_SERDES_EXT_PHY_ADDR_SHIFT; 6696 bnx2x_cl45_read(bp, phy,
6697 MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_STATUS, &val2);
6698 bnx2x_cl45_read(bp, phy,
6699 MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_STATUS, &val1);
6700 DP(NETIF_MSG_LINK, "10G-base-T LASI status 0x%x->0x%x\n",
6701 val2, val1);
6702 bnx2x_cl45_read(bp, phy,
6703 MDIO_PMA_DEVAD, MDIO_PMA_REG_STATUS, &val2);
6704 bnx2x_cl45_read(bp, phy,
6705 MDIO_PMA_DEVAD, MDIO_PMA_REG_STATUS, &val1);
6706 DP(NETIF_MSG_LINK, "10G-base-T PMA status 0x%x->0x%x\n",
6707 val2, val1);
6708 link_up = ((val1 & 4) == 4);
6709 /* if link is up print the AN outcome of the SFX7101 PHY */
6710 if (link_up) {
6711 bnx2x_cl45_read(bp, phy,
6712 MDIO_AN_DEVAD, MDIO_AN_REG_MASTER_STATUS,
6713 &val2);
6714 vars->line_speed = SPEED_10000;
6715 vars->duplex = DUPLEX_FULL;
6716 DP(NETIF_MSG_LINK, "SFX7101 AN status 0x%x->Master=%x\n",
6717 val2, (val2 & (1<<14)));
6718 bnx2x_ext_phy_10G_an_resolve(bp, phy, vars);
6719 bnx2x_ext_phy_resolve_fc(phy, params, vars);
5668 } 6720 }
6721 return link_up;
5669} 6722}
5670 6723
5671 6724
5672/* 6725static u8 bnx2x_7101_format_ver(u32 spirom_ver, u8 *str, u16 *len)
5673 *------------------------------------------------------------------------
5674 * bnx2x_override_led_value -
5675 *
5676 * Override the led value of the requsted led
5677 *
5678 *------------------------------------------------------------------------
5679 */
5680u8 bnx2x_override_led_value(struct bnx2x *bp, u8 port,
5681 u32 led_idx, u32 value)
5682{ 6726{
5683 u32 reg_val; 6727 if (*len < 5)
6728 return -EINVAL;
6729 str[0] = (spirom_ver & 0xFF);
6730 str[1] = (spirom_ver & 0xFF00) >> 8;
6731 str[2] = (spirom_ver & 0xFF0000) >> 16;
6732 str[3] = (spirom_ver & 0xFF000000) >> 24;
6733 str[4] = '\0';
6734 *len -= 5;
6735 return 0;
6736}
5684 6737
5685 /* If port 0 then use EMAC0, else use EMAC1*/ 6738void bnx2x_sfx7101_sp_sw_reset(struct bnx2x *bp, struct bnx2x_phy *phy)
5686 u32 emac_base = (port) ? GRCBASE_EMAC1 : GRCBASE_EMAC0; 6739{
6740 u16 val, cnt;
5687 6741
5688 DP(NETIF_MSG_LINK, 6742 bnx2x_cl45_read(bp, phy,
5689 "bnx2x_override_led_value() port %x led_idx %d value %d\n", 6743 MDIO_PMA_DEVAD,
5690 port, led_idx, value); 6744 MDIO_PMA_REG_7101_RESET, &val);
5691
5692 switch (led_idx) {
5693 case 0: /* 10MB led */
5694 /* Read the current value of the LED register in
5695 the EMAC block */
5696 reg_val = REG_RD(bp, emac_base + EMAC_REG_EMAC_LED);
5697 /* Set the OVERRIDE bit to 1 */
5698 reg_val |= EMAC_LED_OVERRIDE;
5699 /* If value is 1, set the 10M_OVERRIDE bit,
5700 otherwise reset it.*/
5701 reg_val = (value == 1) ? (reg_val | EMAC_LED_10MB_OVERRIDE) :
5702 (reg_val & ~EMAC_LED_10MB_OVERRIDE);
5703 REG_WR(bp, emac_base + EMAC_REG_EMAC_LED, reg_val);
5704 break;
5705 case 1: /*100MB led */
5706 /*Read the current value of the LED register in
5707 the EMAC block */
5708 reg_val = REG_RD(bp, emac_base + EMAC_REG_EMAC_LED);
5709 /* Set the OVERRIDE bit to 1 */
5710 reg_val |= EMAC_LED_OVERRIDE;
5711 /* If value is 1, set the 100M_OVERRIDE bit,
5712 otherwise reset it.*/
5713 reg_val = (value == 1) ? (reg_val | EMAC_LED_100MB_OVERRIDE) :
5714 (reg_val & ~EMAC_LED_100MB_OVERRIDE);
5715 REG_WR(bp, emac_base + EMAC_REG_EMAC_LED, reg_val);
5716 break;
5717 case 2: /* 1000MB led */
5718 /* Read the current value of the LED register in the
5719 EMAC block */
5720 reg_val = REG_RD(bp, emac_base + EMAC_REG_EMAC_LED);
5721 /* Set the OVERRIDE bit to 1 */
5722 reg_val |= EMAC_LED_OVERRIDE;
5723 /* If value is 1, set the 1000M_OVERRIDE bit, otherwise
5724 reset it. */
5725 reg_val = (value == 1) ? (reg_val | EMAC_LED_1000MB_OVERRIDE) :
5726 (reg_val & ~EMAC_LED_1000MB_OVERRIDE);
5727 REG_WR(bp, emac_base + EMAC_REG_EMAC_LED, reg_val);
5728 break;
5729 case 3: /* 2500MB led */
5730 /* Read the current value of the LED register in the
5731 EMAC block*/
5732 reg_val = REG_RD(bp, emac_base + EMAC_REG_EMAC_LED);
5733 /* Set the OVERRIDE bit to 1 */
5734 reg_val |= EMAC_LED_OVERRIDE;
5735 /* If value is 1, set the 2500M_OVERRIDE bit, otherwise
5736 reset it.*/
5737 reg_val = (value == 1) ? (reg_val | EMAC_LED_2500MB_OVERRIDE) :
5738 (reg_val & ~EMAC_LED_2500MB_OVERRIDE);
5739 REG_WR(bp, emac_base + EMAC_REG_EMAC_LED, reg_val);
5740 break;
5741 case 4: /*10G led */
5742 if (port == 0) {
5743 REG_WR(bp, NIG_REG_LED_10G_P0,
5744 value);
5745 } else {
5746 REG_WR(bp, NIG_REG_LED_10G_P1,
5747 value);
5748 }
5749 break;
5750 case 5: /* TRAFFIC led */
5751 /* Find if the traffic control is via BMAC or EMAC */
5752 if (port == 0)
5753 reg_val = REG_RD(bp, NIG_REG_NIG_EMAC0_EN);
5754 else
5755 reg_val = REG_RD(bp, NIG_REG_NIG_EMAC1_EN);
5756
5757 /* Override the traffic led in the EMAC:*/
5758 if (reg_val == 1) {
5759 /* Read the current value of the LED register in
5760 the EMAC block */
5761 reg_val = REG_RD(bp, emac_base +
5762 EMAC_REG_EMAC_LED);
5763 /* Set the TRAFFIC_OVERRIDE bit to 1 */
5764 reg_val |= EMAC_LED_OVERRIDE;
5765 /* If value is 1, set the TRAFFIC bit, otherwise
5766 reset it.*/
5767 reg_val = (value == 1) ? (reg_val | EMAC_LED_TRAFFIC) :
5768 (reg_val & ~EMAC_LED_TRAFFIC);
5769 REG_WR(bp, emac_base + EMAC_REG_EMAC_LED, reg_val);
5770 } else { /* Override the traffic led in the BMAC: */
5771 REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0
5772 + port*4, 1);
5773 REG_WR(bp, NIG_REG_LED_CONTROL_TRAFFIC_P0 + port*4,
5774 value);
5775 }
5776 break;
5777 default:
5778 DP(NETIF_MSG_LINK,
5779 "bnx2x_override_led_value() unknown led index %d "
5780 "(should be 0-5)\n", led_idx);
5781 return -EINVAL;
5782 }
5783 6745
5784 return 0; 6746 for (cnt = 0; cnt < 10; cnt++) {
6747 msleep(50);
6748 /* Writes a self-clearing reset */
6749 bnx2x_cl45_write(bp, phy,
6750 MDIO_PMA_DEVAD,
6751 MDIO_PMA_REG_7101_RESET,
6752 (val | (1<<15)));
6753 /* Wait for clear */
6754 bnx2x_cl45_read(bp, phy,
6755 MDIO_PMA_DEVAD,
6756 MDIO_PMA_REG_7101_RESET, &val);
6757
6758 if ((val & (1<<15)) == 0)
6759 break;
6760 }
5785} 6761}
5786 6762
6763static void bnx2x_7101_hw_reset(struct bnx2x_phy *phy,
6764 struct link_params *params) {
6765 /* Low power mode is controlled by GPIO 2 */
6766 bnx2x_set_gpio(params->bp, MISC_REGISTERS_GPIO_2,
6767 MISC_REGISTERS_GPIO_OUTPUT_LOW, params->port);
6768 /* The PHY reset is controlled by GPIO 1 */
6769 bnx2x_set_gpio(params->bp, MISC_REGISTERS_GPIO_1,
6770 MISC_REGISTERS_GPIO_OUTPUT_LOW, params->port);
6771}
5787 6772
5788u8 bnx2x_set_led(struct link_params *params, u8 mode, u32 speed) 6773static void bnx2x_7101_set_link_led(struct bnx2x_phy *phy,
6774 struct link_params *params, u8 mode)
5789{ 6775{
5790 u8 port = params->port; 6776 u16 val = 0;
5791 u16 hw_led_mode = params->hw_led_mode;
5792 u8 rc = 0;
5793 u32 tmp;
5794 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
5795 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config);
5796 struct bnx2x *bp = params->bp; 6777 struct bnx2x *bp = params->bp;
5797 DP(NETIF_MSG_LINK, "bnx2x_set_led: port %x, mode %d\n", port, mode);
5798 DP(NETIF_MSG_LINK, "speed 0x%x, hw_led_mode 0x%x\n",
5799 speed, hw_led_mode);
5800 switch (mode) { 6778 switch (mode) {
6779 case LED_MODE_FRONT_PANEL_OFF:
5801 case LED_MODE_OFF: 6780 case LED_MODE_OFF:
5802 REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 0); 6781 val = 2;
5803 REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, 6782 break;
5804 SHARED_HW_CFG_LED_MAC1); 6783 case LED_MODE_ON:
5805 6784 val = 1;
5806 tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED);
5807 EMAC_WR(bp, EMAC_REG_EMAC_LED, (tmp | EMAC_LED_OVERRIDE));
5808 break; 6785 break;
5809
5810 case LED_MODE_OPER: 6786 case LED_MODE_OPER:
5811 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) { 6787 val = 0;
5812 REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, 0); 6788 break;
5813 REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 1); 6789 }
6790 bnx2x_cl45_write(bp, phy,
6791 MDIO_PMA_DEVAD,
6792 MDIO_PMA_REG_7107_LINK_LED_CNTL,
6793 val);
6794}
6795
6796/******************************************************************/
6797/* STATIC PHY DECLARATION */
6798/******************************************************************/
6799
6800static struct bnx2x_phy phy_null = {
6801 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN,
6802 .addr = 0,
6803 .flags = FLAGS_INIT_XGXS_FIRST,
6804 .def_md_devad = 0,
6805 .reserved = 0,
6806 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
6807 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
6808 .mdio_ctrl = 0,
6809 .supported = 0,
6810 .media_type = ETH_PHY_NOT_PRESENT,
6811 .ver_addr = 0,
6812 .req_flow_ctrl = 0,
6813 .req_line_speed = 0,
6814 .speed_cap_mask = 0,
6815 .req_duplex = 0,
6816 .rsrv = 0,
6817 .config_init = (config_init_t)NULL,
6818 .read_status = (read_status_t)NULL,
6819 .link_reset = (link_reset_t)NULL,
6820 .config_loopback = (config_loopback_t)NULL,
6821 .format_fw_ver = (format_fw_ver_t)NULL,
6822 .hw_reset = (hw_reset_t)NULL,
6823 .set_link_led = (set_link_led_t)NULL,
6824 .phy_specific_func = (phy_specific_func_t)NULL
6825};
6826
6827static struct bnx2x_phy phy_serdes = {
6828 .type = PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT,
6829 .addr = 0xff,
6830 .flags = 0,
6831 .def_md_devad = 0,
6832 .reserved = 0,
6833 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
6834 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
6835 .mdio_ctrl = 0,
6836 .supported = (SUPPORTED_10baseT_Half |
6837 SUPPORTED_10baseT_Full |
6838 SUPPORTED_100baseT_Half |
6839 SUPPORTED_100baseT_Full |
6840 SUPPORTED_1000baseT_Full |
6841 SUPPORTED_2500baseX_Full |
6842 SUPPORTED_TP |
6843 SUPPORTED_Autoneg |
6844 SUPPORTED_Pause |
6845 SUPPORTED_Asym_Pause),
6846 .media_type = ETH_PHY_UNSPECIFIED,
6847 .ver_addr = 0,
6848 .req_flow_ctrl = 0,
6849 .req_line_speed = 0,
6850 .speed_cap_mask = 0,
6851 .req_duplex = 0,
6852 .rsrv = 0,
6853 .config_init = (config_init_t)bnx2x_init_serdes,
6854 .read_status = (read_status_t)bnx2x_link_settings_status,
6855 .link_reset = (link_reset_t)bnx2x_int_link_reset,
6856 .config_loopback = (config_loopback_t)NULL,
6857 .format_fw_ver = (format_fw_ver_t)NULL,
6858 .hw_reset = (hw_reset_t)NULL,
6859 .set_link_led = (set_link_led_t)NULL,
6860 .phy_specific_func = (phy_specific_func_t)NULL
6861};
6862
6863static struct bnx2x_phy phy_xgxs = {
6864 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT,
6865 .addr = 0xff,
6866 .flags = 0,
6867 .def_md_devad = 0,
6868 .reserved = 0,
6869 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
6870 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
6871 .mdio_ctrl = 0,
6872 .supported = (SUPPORTED_10baseT_Half |
6873 SUPPORTED_10baseT_Full |
6874 SUPPORTED_100baseT_Half |
6875 SUPPORTED_100baseT_Full |
6876 SUPPORTED_1000baseT_Full |
6877 SUPPORTED_2500baseX_Full |
6878 SUPPORTED_10000baseT_Full |
6879 SUPPORTED_FIBRE |
6880 SUPPORTED_Autoneg |
6881 SUPPORTED_Pause |
6882 SUPPORTED_Asym_Pause),
6883 .media_type = ETH_PHY_UNSPECIFIED,
6884 .ver_addr = 0,
6885 .req_flow_ctrl = 0,
6886 .req_line_speed = 0,
6887 .speed_cap_mask = 0,
6888 .req_duplex = 0,
6889 .rsrv = 0,
6890 .config_init = (config_init_t)bnx2x_init_xgxs,
6891 .read_status = (read_status_t)bnx2x_link_settings_status,
6892 .link_reset = (link_reset_t)bnx2x_int_link_reset,
6893 .config_loopback = (config_loopback_t)bnx2x_set_xgxs_loopback,
6894 .format_fw_ver = (format_fw_ver_t)NULL,
6895 .hw_reset = (hw_reset_t)NULL,
6896 .set_link_led = (set_link_led_t)NULL,
6897 .phy_specific_func = (phy_specific_func_t)NULL
6898};
6899
6900static struct bnx2x_phy phy_7101 = {
6901 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101,
6902 .addr = 0xff,
6903 .flags = FLAGS_FAN_FAILURE_DET_REQ,
6904 .def_md_devad = 0,
6905 .reserved = 0,
6906 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
6907 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
6908 .mdio_ctrl = 0,
6909 .supported = (SUPPORTED_10000baseT_Full |
6910 SUPPORTED_TP |
6911 SUPPORTED_Autoneg |
6912 SUPPORTED_Pause |
6913 SUPPORTED_Asym_Pause),
6914 .media_type = ETH_PHY_BASE_T,
6915 .ver_addr = 0,
6916 .req_flow_ctrl = 0,
6917 .req_line_speed = 0,
6918 .speed_cap_mask = 0,
6919 .req_duplex = 0,
6920 .rsrv = 0,
6921 .config_init = (config_init_t)bnx2x_7101_config_init,
6922 .read_status = (read_status_t)bnx2x_7101_read_status,
6923 .link_reset = (link_reset_t)bnx2x_common_ext_link_reset,
6924 .config_loopback = (config_loopback_t)bnx2x_7101_config_loopback,
6925 .format_fw_ver = (format_fw_ver_t)bnx2x_7101_format_ver,
6926 .hw_reset = (hw_reset_t)bnx2x_7101_hw_reset,
6927 .set_link_led = (set_link_led_t)bnx2x_7101_set_link_led,
6928 .phy_specific_func = (phy_specific_func_t)NULL
6929};
6930static struct bnx2x_phy phy_8073 = {
6931 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
6932 .addr = 0xff,
6933 .flags = FLAGS_HW_LOCK_REQUIRED,
6934 .def_md_devad = 0,
6935 .reserved = 0,
6936 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
6937 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
6938 .mdio_ctrl = 0,
6939 .supported = (SUPPORTED_10000baseT_Full |
6940 SUPPORTED_2500baseX_Full |
6941 SUPPORTED_1000baseT_Full |
6942 SUPPORTED_FIBRE |
6943 SUPPORTED_Autoneg |
6944 SUPPORTED_Pause |
6945 SUPPORTED_Asym_Pause),
6946 .media_type = ETH_PHY_UNSPECIFIED,
6947 .ver_addr = 0,
6948 .req_flow_ctrl = 0,
6949 .req_line_speed = 0,
6950 .speed_cap_mask = 0,
6951 .req_duplex = 0,
6952 .rsrv = 0,
6953 .config_init = (config_init_t)bnx2x_8073_config_init,
6954 .read_status = (read_status_t)bnx2x_8073_read_status,
6955 .link_reset = (link_reset_t)bnx2x_8073_link_reset,
6956 .config_loopback = (config_loopback_t)NULL,
6957 .format_fw_ver = (format_fw_ver_t)bnx2x_format_ver,
6958 .hw_reset = (hw_reset_t)NULL,
6959 .set_link_led = (set_link_led_t)NULL,
6960 .phy_specific_func = (phy_specific_func_t)NULL
6961};
6962static struct bnx2x_phy phy_8705 = {
6963 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705,
6964 .addr = 0xff,
6965 .flags = FLAGS_INIT_XGXS_FIRST,
6966 .def_md_devad = 0,
6967 .reserved = 0,
6968 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
6969 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
6970 .mdio_ctrl = 0,
6971 .supported = (SUPPORTED_10000baseT_Full |
6972 SUPPORTED_FIBRE |
6973 SUPPORTED_Pause |
6974 SUPPORTED_Asym_Pause),
6975 .media_type = ETH_PHY_XFP_FIBER,
6976 .ver_addr = 0,
6977 .req_flow_ctrl = 0,
6978 .req_line_speed = 0,
6979 .speed_cap_mask = 0,
6980 .req_duplex = 0,
6981 .rsrv = 0,
6982 .config_init = (config_init_t)bnx2x_8705_config_init,
6983 .read_status = (read_status_t)bnx2x_8705_read_status,
6984 .link_reset = (link_reset_t)bnx2x_common_ext_link_reset,
6985 .config_loopback = (config_loopback_t)NULL,
6986 .format_fw_ver = (format_fw_ver_t)bnx2x_null_format_ver,
6987 .hw_reset = (hw_reset_t)NULL,
6988 .set_link_led = (set_link_led_t)NULL,
6989 .phy_specific_func = (phy_specific_func_t)NULL
6990};
6991static struct bnx2x_phy phy_8706 = {
6992 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706,
6993 .addr = 0xff,
6994 .flags = FLAGS_INIT_XGXS_FIRST,
6995 .def_md_devad = 0,
6996 .reserved = 0,
6997 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
6998 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
6999 .mdio_ctrl = 0,
7000 .supported = (SUPPORTED_10000baseT_Full |
7001 SUPPORTED_1000baseT_Full |
7002 SUPPORTED_FIBRE |
7003 SUPPORTED_Pause |
7004 SUPPORTED_Asym_Pause),
7005 .media_type = ETH_PHY_SFP_FIBER,
7006 .ver_addr = 0,
7007 .req_flow_ctrl = 0,
7008 .req_line_speed = 0,
7009 .speed_cap_mask = 0,
7010 .req_duplex = 0,
7011 .rsrv = 0,
7012 .config_init = (config_init_t)bnx2x_8706_config_init,
7013 .read_status = (read_status_t)bnx2x_8706_read_status,
7014 .link_reset = (link_reset_t)bnx2x_common_ext_link_reset,
7015 .config_loopback = (config_loopback_t)NULL,
7016 .format_fw_ver = (format_fw_ver_t)bnx2x_format_ver,
7017 .hw_reset = (hw_reset_t)NULL,
7018 .set_link_led = (set_link_led_t)NULL,
7019 .phy_specific_func = (phy_specific_func_t)NULL
7020};
7021
7022static struct bnx2x_phy phy_8726 = {
7023 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726,
7024 .addr = 0xff,
7025 .flags = (FLAGS_HW_LOCK_REQUIRED |
7026 FLAGS_INIT_XGXS_FIRST),
7027 .def_md_devad = 0,
7028 .reserved = 0,
7029 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
7030 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
7031 .mdio_ctrl = 0,
7032 .supported = (SUPPORTED_10000baseT_Full |
7033 SUPPORTED_1000baseT_Full |
7034 SUPPORTED_Autoneg |
7035 SUPPORTED_FIBRE |
7036 SUPPORTED_Pause |
7037 SUPPORTED_Asym_Pause),
7038 .media_type = ETH_PHY_SFP_FIBER,
7039 .ver_addr = 0,
7040 .req_flow_ctrl = 0,
7041 .req_line_speed = 0,
7042 .speed_cap_mask = 0,
7043 .req_duplex = 0,
7044 .rsrv = 0,
7045 .config_init = (config_init_t)bnx2x_8726_config_init,
7046 .read_status = (read_status_t)bnx2x_8726_read_status,
7047 .link_reset = (link_reset_t)bnx2x_8726_link_reset,
7048 .config_loopback = (config_loopback_t)bnx2x_8726_config_loopback,
7049 .format_fw_ver = (format_fw_ver_t)bnx2x_format_ver,
7050 .hw_reset = (hw_reset_t)NULL,
7051 .set_link_led = (set_link_led_t)NULL,
7052 .phy_specific_func = (phy_specific_func_t)NULL
7053};
7054
7055static struct bnx2x_phy phy_8727 = {
7056 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727,
7057 .addr = 0xff,
7058 .flags = FLAGS_FAN_FAILURE_DET_REQ,
7059 .def_md_devad = 0,
7060 .reserved = 0,
7061 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
7062 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
7063 .mdio_ctrl = 0,
7064 .supported = (SUPPORTED_10000baseT_Full |
7065 SUPPORTED_1000baseT_Full |
7066 SUPPORTED_FIBRE |
7067 SUPPORTED_Pause |
7068 SUPPORTED_Asym_Pause),
7069 .media_type = ETH_PHY_SFP_FIBER,
7070 .ver_addr = 0,
7071 .req_flow_ctrl = 0,
7072 .req_line_speed = 0,
7073 .speed_cap_mask = 0,
7074 .req_duplex = 0,
7075 .rsrv = 0,
7076 .config_init = (config_init_t)bnx2x_8727_config_init,
7077 .read_status = (read_status_t)bnx2x_8727_read_status,
7078 .link_reset = (link_reset_t)bnx2x_8727_link_reset,
7079 .config_loopback = (config_loopback_t)NULL,
7080 .format_fw_ver = (format_fw_ver_t)bnx2x_format_ver,
7081 .hw_reset = (hw_reset_t)bnx2x_8727_hw_reset,
7082 .set_link_led = (set_link_led_t)bnx2x_8727_set_link_led,
7083 .phy_specific_func = (phy_specific_func_t)bnx2x_8727_specific_func
7084};
7085static struct bnx2x_phy phy_8481 = {
7086 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481,
7087 .addr = 0xff,
7088 .flags = FLAGS_FAN_FAILURE_DET_REQ |
7089 FLAGS_REARM_LATCH_SIGNAL,
7090 .def_md_devad = 0,
7091 .reserved = 0,
7092 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
7093 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
7094 .mdio_ctrl = 0,
7095 .supported = (SUPPORTED_10baseT_Half |
7096 SUPPORTED_10baseT_Full |
7097 SUPPORTED_100baseT_Half |
7098 SUPPORTED_100baseT_Full |
7099 SUPPORTED_1000baseT_Full |
7100 SUPPORTED_10000baseT_Full |
7101 SUPPORTED_TP |
7102 SUPPORTED_Autoneg |
7103 SUPPORTED_Pause |
7104 SUPPORTED_Asym_Pause),
7105 .media_type = ETH_PHY_BASE_T,
7106 .ver_addr = 0,
7107 .req_flow_ctrl = 0,
7108 .req_line_speed = 0,
7109 .speed_cap_mask = 0,
7110 .req_duplex = 0,
7111 .rsrv = 0,
7112 .config_init = (config_init_t)bnx2x_8481_config_init,
7113 .read_status = (read_status_t)bnx2x_848xx_read_status,
7114 .link_reset = (link_reset_t)bnx2x_8481_link_reset,
7115 .config_loopback = (config_loopback_t)NULL,
7116 .format_fw_ver = (format_fw_ver_t)bnx2x_848xx_format_ver,
7117 .hw_reset = (hw_reset_t)bnx2x_8481_hw_reset,
7118 .set_link_led = (set_link_led_t)bnx2x_848xx_set_link_led,
7119 .phy_specific_func = (phy_specific_func_t)NULL
7120};
7121
7122static struct bnx2x_phy phy_84823 = {
7123 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823,
7124 .addr = 0xff,
7125 .flags = FLAGS_FAN_FAILURE_DET_REQ |
7126 FLAGS_REARM_LATCH_SIGNAL,
7127 .def_md_devad = 0,
7128 .reserved = 0,
7129 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
7130 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
7131 .mdio_ctrl = 0,
7132 .supported = (SUPPORTED_10baseT_Half |
7133 SUPPORTED_10baseT_Full |
7134 SUPPORTED_100baseT_Half |
7135 SUPPORTED_100baseT_Full |
7136 SUPPORTED_1000baseT_Full |
7137 SUPPORTED_10000baseT_Full |
7138 SUPPORTED_TP |
7139 SUPPORTED_Autoneg |
7140 SUPPORTED_Pause |
7141 SUPPORTED_Asym_Pause),
7142 .media_type = ETH_PHY_BASE_T,
7143 .ver_addr = 0,
7144 .req_flow_ctrl = 0,
7145 .req_line_speed = 0,
7146 .speed_cap_mask = 0,
7147 .req_duplex = 0,
7148 .rsrv = 0,
7149 .config_init = (config_init_t)bnx2x_848x3_config_init,
7150 .read_status = (read_status_t)bnx2x_848xx_read_status,
7151 .link_reset = (link_reset_t)bnx2x_848x3_link_reset,
7152 .config_loopback = (config_loopback_t)NULL,
7153 .format_fw_ver = (format_fw_ver_t)bnx2x_848xx_format_ver,
7154 .hw_reset = (hw_reset_t)NULL,
7155 .set_link_led = (set_link_led_t)bnx2x_848xx_set_link_led,
7156 .phy_specific_func = (phy_specific_func_t)NULL
7157};
7158
7159static struct bnx2x_phy phy_84833 = {
7160 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833,
7161 .addr = 0xff,
7162 .flags = FLAGS_FAN_FAILURE_DET_REQ |
7163 FLAGS_REARM_LATCH_SIGNAL,
7164 .def_md_devad = 0,
7165 .reserved = 0,
7166 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
7167 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
7168 .mdio_ctrl = 0,
7169 .supported = (SUPPORTED_10baseT_Half |
7170 SUPPORTED_10baseT_Full |
7171 SUPPORTED_100baseT_Half |
7172 SUPPORTED_100baseT_Full |
7173 SUPPORTED_1000baseT_Full |
7174 SUPPORTED_10000baseT_Full |
7175 SUPPORTED_TP |
7176 SUPPORTED_Autoneg |
7177 SUPPORTED_Pause |
7178 SUPPORTED_Asym_Pause),
7179 .media_type = ETH_PHY_BASE_T,
7180 .ver_addr = 0,
7181 .req_flow_ctrl = 0,
7182 .req_line_speed = 0,
7183 .speed_cap_mask = 0,
7184 .req_duplex = 0,
7185 .rsrv = 0,
7186 .config_init = (config_init_t)bnx2x_848x3_config_init,
7187 .read_status = (read_status_t)bnx2x_848xx_read_status,
7188 .link_reset = (link_reset_t)bnx2x_848x3_link_reset,
7189 .config_loopback = (config_loopback_t)NULL,
7190 .format_fw_ver = (format_fw_ver_t)bnx2x_848xx_format_ver,
7191 .hw_reset = (hw_reset_t)NULL,
7192 .set_link_led = (set_link_led_t)bnx2x_848xx_set_link_led,
7193 .phy_specific_func = (phy_specific_func_t)NULL
7194};
7195
7196/*****************************************************************/
7197/* */
7198/* Populate the phy according. Main function: bnx2x_populate_phy */
7199/* */
7200/*****************************************************************/
7201
7202static void bnx2x_populate_preemphasis(struct bnx2x *bp, u32 shmem_base,
7203 struct bnx2x_phy *phy, u8 port,
7204 u8 phy_index)
7205{
7206 /* Get the 4 lanes xgxs config rx and tx */
7207 u32 rx = 0, tx = 0, i;
7208 for (i = 0; i < 2; i++) {
7209 /*
7210 * INT_PHY and EXT_PHY1 share the same value location in the
7211 * shmem. When num_phys is greater than 1, than this value
7212 * applies only to EXT_PHY1
7213 */
7214 if (phy_index == INT_PHY || phy_index == EXT_PHY1) {
7215 rx = REG_RD(bp, shmem_base +
7216 offsetof(struct shmem_region,
7217 dev_info.port_hw_config[port].xgxs_config_rx[i<<1]));
7218
7219 tx = REG_RD(bp, shmem_base +
7220 offsetof(struct shmem_region,
7221 dev_info.port_hw_config[port].xgxs_config_tx[i<<1]));
5814 } else { 7222 } else {
5815 REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, 7223 rx = REG_RD(bp, shmem_base +
5816 hw_led_mode); 7224 offsetof(struct shmem_region,
7225 dev_info.port_hw_config[port].xgxs_config2_rx[i<<1]));
7226
7227 tx = REG_RD(bp, shmem_base +
7228 offsetof(struct shmem_region,
7229 dev_info.port_hw_config[port].xgxs_config2_rx[i<<1]));
5817 } 7230 }
5818 7231
5819 REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0 + 7232 phy->rx_preemphasis[i << 1] = ((rx>>16) & 0xffff);
5820 port*4, 0); 7233 phy->rx_preemphasis[(i << 1) + 1] = (rx & 0xffff);
5821 /* Set blinking rate to ~15.9Hz */
5822 REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_RATE_P0 + port*4,
5823 LED_BLINK_RATE_VAL);
5824 REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_RATE_ENA_P0 +
5825 port*4, 1);
5826 tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED);
5827 EMAC_WR(bp, EMAC_REG_EMAC_LED,
5828 (tmp & (~EMAC_LED_OVERRIDE)));
5829 7234
5830 if (CHIP_IS_E1(bp) && 7235 phy->tx_preemphasis[i << 1] = ((tx>>16) & 0xffff);
5831 ((speed == SPEED_2500) || 7236 phy->tx_preemphasis[(i << 1) + 1] = (tx & 0xffff);
5832 (speed == SPEED_1000) || 7237 }
5833 (speed == SPEED_100) || 7238}
5834 (speed == SPEED_10))) {
5835 /* On Everest 1 Ax chip versions for speeds less than
5836 10G LED scheme is different */
5837 REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0
5838 + port*4, 1);
5839 REG_WR(bp, NIG_REG_LED_CONTROL_TRAFFIC_P0 +
5840 port*4, 0);
5841 REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_TRAFFIC_P0 +
5842 port*4, 1);
5843 }
5844 break;
5845 7239
5846 default: 7240static u32 bnx2x_get_ext_phy_config(struct bnx2x *bp, u32 shmem_base,
5847 rc = -EINVAL; 7241 u8 phy_index, u8 port)
5848 DP(NETIF_MSG_LINK, "bnx2x_set_led: Invalid led mode %d\n", 7242{
5849 mode); 7243 u32 ext_phy_config = 0;
7244 switch (phy_index) {
7245 case EXT_PHY1:
7246 ext_phy_config = REG_RD(bp, shmem_base +
7247 offsetof(struct shmem_region,
7248 dev_info.port_hw_config[port].external_phy_config));
5850 break; 7249 break;
7250 case EXT_PHY2:
7251 ext_phy_config = REG_RD(bp, shmem_base +
7252 offsetof(struct shmem_region,
7253 dev_info.port_hw_config[port].external_phy_config2));
7254 break;
7255 default:
7256 DP(NETIF_MSG_LINK, "Invalid phy_index %d\n", phy_index);
7257 return -EINVAL;
5851 } 7258 }
5852 return rc;
5853 7259
7260 return ext_phy_config;
5854} 7261}
5855 7262static u8 bnx2x_populate_int_phy(struct bnx2x *bp, u32 shmem_base, u8 port,
5856u8 bnx2x_test_link(struct link_params *params, struct link_vars *vars) 7263 struct bnx2x_phy *phy)
5857{ 7264{
5858 struct bnx2x *bp = params->bp; 7265 u32 phy_addr;
5859 u16 gp_status = 0; 7266 u32 chip_id;
7267 u32 switch_cfg = (REG_RD(bp, shmem_base +
7268 offsetof(struct shmem_region,
7269 dev_info.port_feature_config[port].link_config)) &
7270 PORT_FEATURE_CONNECTED_SWITCH_MASK);
7271 chip_id = REG_RD(bp, MISC_REG_CHIP_NUM) << 16;
7272 switch (switch_cfg) {
7273 case SWITCH_CFG_1G:
7274 phy_addr = REG_RD(bp,
7275 NIG_REG_SERDES0_CTRL_PHY_ADDR +
7276 port * 0x10);
7277 *phy = phy_serdes;
7278 break;
7279 case SWITCH_CFG_10G:
7280 phy_addr = REG_RD(bp,
7281 NIG_REG_XGXS0_CTRL_PHY_ADDR +
7282 port * 0x18);
7283 *phy = phy_xgxs;
7284 break;
7285 default:
7286 DP(NETIF_MSG_LINK, "Invalid switch_cfg\n");
7287 return -EINVAL;
7288 }
7289 phy->addr = (u8)phy_addr;
7290 phy->mdio_ctrl = bnx2x_get_emac_base(bp,
7291 SHARED_HW_CFG_MDC_MDIO_ACCESS1_BOTH,
7292 port);
7293 if (CHIP_IS_E2(bp))
7294 phy->def_md_devad = E2_DEFAULT_PHY_DEV_ADDR;
7295 else
7296 phy->def_md_devad = DEFAULT_PHY_DEV_ADDR;
5860 7297
5861 CL45_RD_OVER_CL22(bp, params->port, 7298 DP(NETIF_MSG_LINK, "Internal phy port=%d, addr=0x%x, mdio_ctl=0x%x\n",
5862 params->phy_addr, 7299 port, phy->addr, phy->mdio_ctrl);
5863 MDIO_REG_BANK_GP_STATUS,
5864 MDIO_GP_STATUS_TOP_AN_STATUS1,
5865 &gp_status);
5866 /* link is up only if both local phy and external phy are up */
5867 if ((gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS) &&
5868 bnx2x_ext_phy_is_link_up(params, vars, 1))
5869 return 0;
5870 7300
5871 return -ESRCH; 7301 bnx2x_populate_preemphasis(bp, shmem_base, phy, port, INT_PHY);
7302 return 0;
5872} 7303}
5873 7304
5874static u8 bnx2x_link_initialize(struct link_params *params, 7305static u8 bnx2x_populate_ext_phy(struct bnx2x *bp,
5875 struct link_vars *vars) 7306 u8 phy_index,
7307 u32 shmem_base,
7308 u32 shmem2_base,
7309 u8 port,
7310 struct bnx2x_phy *phy)
5876{ 7311{
5877 struct bnx2x *bp = params->bp; 7312 u32 ext_phy_config, phy_type, config2;
5878 u8 port = params->port; 7313 u32 mdc_mdio_access = SHARED_HW_CFG_MDC_MDIO_ACCESS1_BOTH;
5879 u8 rc = 0; 7314 ext_phy_config = bnx2x_get_ext_phy_config(bp, shmem_base,
5880 u8 non_ext_phy; 7315 phy_index, port);
5881 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config); 7316 phy_type = XGXS_EXT_PHY_TYPE(ext_phy_config);
7317 /* Select the phy type */
7318 switch (phy_type) {
7319 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7320 mdc_mdio_access = SHARED_HW_CFG_MDC_MDIO_ACCESS1_SWAPPED;
7321 *phy = phy_8073;
7322 break;
7323 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7324 *phy = phy_8705;
7325 break;
7326 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7327 *phy = phy_8706;
7328 break;
7329 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
7330 mdc_mdio_access = SHARED_HW_CFG_MDC_MDIO_ACCESS1_EMAC1;
7331 *phy = phy_8726;
7332 break;
7333 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC:
7334 /* BCM8727_NOC => BCM8727 no over current */
7335 mdc_mdio_access = SHARED_HW_CFG_MDC_MDIO_ACCESS1_EMAC1;
7336 *phy = phy_8727;
7337 phy->flags |= FLAGS_NOC;
7338 break;
7339 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
7340 mdc_mdio_access = SHARED_HW_CFG_MDC_MDIO_ACCESS1_EMAC1;
7341 *phy = phy_8727;
7342 break;
7343 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
7344 *phy = phy_8481;
7345 break;
7346 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823:
7347 *phy = phy_84823;
7348 break;
7349 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833:
7350 *phy = phy_84833;
7351 break;
7352 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7353 *phy = phy_7101;
7354 break;
7355 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7356 *phy = phy_null;
7357 return -EINVAL;
7358 default:
7359 *phy = phy_null;
7360 return 0;
7361 }
5882 7362
5883 /* Activate the external PHY */ 7363 phy->addr = XGXS_EXT_PHY_ADDR(ext_phy_config);
5884 bnx2x_ext_phy_reset(params, vars); 7364 bnx2x_populate_preemphasis(bp, shmem_base, phy, port, phy_index);
5885 7365
5886 bnx2x_set_aer_mmd(params, vars); 7366 /*
7367 * The shmem address of the phy version is located on different
7368 * structures. In case this structure is too old, do not set
7369 * the address
7370 */
7371 config2 = REG_RD(bp, shmem_base + offsetof(struct shmem_region,
7372 dev_info.shared_hw_config.config2));
7373 if (phy_index == EXT_PHY1) {
7374 phy->ver_addr = shmem_base + offsetof(struct shmem_region,
7375 port_mb[port].ext_phy_fw_version);
7376
7377 /* Check specific mdc mdio settings */
7378 if (config2 & SHARED_HW_CFG_MDC_MDIO_ACCESS1_MASK)
7379 mdc_mdio_access = config2 &
7380 SHARED_HW_CFG_MDC_MDIO_ACCESS1_MASK;
7381 } else {
7382 u32 size = REG_RD(bp, shmem2_base);
5887 7383
5888 if (vars->phy_flags & PHY_XGXS_FLAG) 7384 if (size >
5889 bnx2x_set_master_ln(params); 7385 offsetof(struct shmem2_region, ext_phy_fw_version2)) {
7386 phy->ver_addr = shmem2_base +
7387 offsetof(struct shmem2_region,
7388 ext_phy_fw_version2[port]);
7389 }
7390 /* Check specific mdc mdio settings */
7391 if (config2 & SHARED_HW_CFG_MDC_MDIO_ACCESS2_MASK)
7392 mdc_mdio_access = (config2 &
7393 SHARED_HW_CFG_MDC_MDIO_ACCESS2_MASK) >>
7394 (SHARED_HW_CFG_MDC_MDIO_ACCESS2_SHIFT -
7395 SHARED_HW_CFG_MDC_MDIO_ACCESS1_SHIFT);
7396 }
7397 phy->mdio_ctrl = bnx2x_get_emac_base(bp, mdc_mdio_access, port);
5890 7398
5891 rc = bnx2x_reset_unicore(params); 7399 /*
5892 /* reset the SerDes and wait for reset bit return low */ 7400 * In case mdc/mdio_access of the external phy is different than the
5893 if (rc != 0) 7401 * mdc/mdio access of the XGXS, a HW lock must be taken in each access
5894 return rc; 7402 * to prevent one port interfere with another port's CL45 operations.
7403 */
7404 if (mdc_mdio_access != SHARED_HW_CFG_MDC_MDIO_ACCESS1_BOTH)
7405 phy->flags |= FLAGS_HW_LOCK_REQUIRED;
7406 DP(NETIF_MSG_LINK, "phy_type 0x%x port %d found in index %d\n",
7407 phy_type, port, phy_index);
7408 DP(NETIF_MSG_LINK, " addr=0x%x, mdio_ctl=0x%x\n",
7409 phy->addr, phy->mdio_ctrl);
7410 return 0;
7411}
5895 7412
5896 bnx2x_set_aer_mmd(params, vars); 7413static u8 bnx2x_populate_phy(struct bnx2x *bp, u8 phy_index, u32 shmem_base,
7414 u32 shmem2_base, u8 port, struct bnx2x_phy *phy)
7415{
7416 u8 status = 0;
7417 phy->type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN;
7418 if (phy_index == INT_PHY)
7419 return bnx2x_populate_int_phy(bp, shmem_base, port, phy);
7420 status = bnx2x_populate_ext_phy(bp, phy_index, shmem_base, shmem2_base,
7421 port, phy);
7422 return status;
7423}
5897 7424
5898 /* setting the masterLn_def again after the reset */ 7425static void bnx2x_phy_def_cfg(struct link_params *params,
5899 if (vars->phy_flags & PHY_XGXS_FLAG) { 7426 struct bnx2x_phy *phy,
5900 bnx2x_set_master_ln(params); 7427 u8 phy_index)
5901 bnx2x_set_swap_lanes(params); 7428{
7429 struct bnx2x *bp = params->bp;
7430 u32 link_config;
7431 /* Populate the default phy configuration for MF mode */
7432 if (phy_index == EXT_PHY2) {
7433 link_config = REG_RD(bp, params->shmem_base +
7434 offsetof(struct shmem_region, dev_info.
7435 port_feature_config[params->port].link_config2));
7436 phy->speed_cap_mask = REG_RD(bp, params->shmem_base +
7437 offsetof(struct shmem_region,
7438 dev_info.
7439 port_hw_config[params->port].speed_capability_mask2));
7440 } else {
7441 link_config = REG_RD(bp, params->shmem_base +
7442 offsetof(struct shmem_region, dev_info.
7443 port_feature_config[params->port].link_config));
7444 phy->speed_cap_mask = REG_RD(bp, params->shmem_base +
7445 offsetof(struct shmem_region,
7446 dev_info.
7447 port_hw_config[params->port].speed_capability_mask));
7448 }
7449 DP(NETIF_MSG_LINK, "Default config phy idx %x cfg 0x%x speed_cap_mask"
7450 " 0x%x\n", phy_index, link_config, phy->speed_cap_mask);
7451
7452 phy->req_duplex = DUPLEX_FULL;
7453 switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) {
7454 case PORT_FEATURE_LINK_SPEED_10M_HALF:
7455 phy->req_duplex = DUPLEX_HALF;
7456 case PORT_FEATURE_LINK_SPEED_10M_FULL:
7457 phy->req_line_speed = SPEED_10;
7458 break;
7459 case PORT_FEATURE_LINK_SPEED_100M_HALF:
7460 phy->req_duplex = DUPLEX_HALF;
7461 case PORT_FEATURE_LINK_SPEED_100M_FULL:
7462 phy->req_line_speed = SPEED_100;
7463 break;
7464 case PORT_FEATURE_LINK_SPEED_1G:
7465 phy->req_line_speed = SPEED_1000;
7466 break;
7467 case PORT_FEATURE_LINK_SPEED_2_5G:
7468 phy->req_line_speed = SPEED_2500;
7469 break;
7470 case PORT_FEATURE_LINK_SPEED_10G_CX4:
7471 phy->req_line_speed = SPEED_10000;
7472 break;
7473 default:
7474 phy->req_line_speed = SPEED_AUTO_NEG;
7475 break;
5902 } 7476 }
5903 7477
5904 if (vars->phy_flags & PHY_XGXS_FLAG) { 7478 switch (link_config & PORT_FEATURE_FLOW_CONTROL_MASK) {
5905 if ((params->req_line_speed && 7479 case PORT_FEATURE_FLOW_CONTROL_AUTO:
5906 ((params->req_line_speed == SPEED_100) || 7480 phy->req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
5907 (params->req_line_speed == SPEED_10))) || 7481 break;
5908 (!params->req_line_speed && 7482 case PORT_FEATURE_FLOW_CONTROL_TX:
5909 (params->speed_cap_mask >= 7483 phy->req_flow_ctrl = BNX2X_FLOW_CTRL_TX;
5910 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL) && 7484 break;
5911 (params->speed_cap_mask < 7485 case PORT_FEATURE_FLOW_CONTROL_RX:
5912 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G) 7486 phy->req_flow_ctrl = BNX2X_FLOW_CTRL_RX;
5913 )) { 7487 break;
5914 vars->phy_flags |= PHY_SGMII_FLAG; 7488 case PORT_FEATURE_FLOW_CONTROL_BOTH:
5915 } else { 7489 phy->req_flow_ctrl = BNX2X_FLOW_CTRL_BOTH;
5916 vars->phy_flags &= ~PHY_SGMII_FLAG; 7490 break;
5917 } 7491 default:
7492 phy->req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
7493 break;
5918 } 7494 }
5919 /* In case of external phy existance, the line speed would be the 7495}
5920 line speed linked up by the external phy. In case it is direct only,
5921 then the line_speed during initialization will be equal to the
5922 req_line_speed*/
5923 vars->line_speed = params->req_line_speed;
5924 7496
5925 bnx2x_calc_ieee_aneg_adv(params, &vars->ieee_fc); 7497u32 bnx2x_phy_selection(struct link_params *params)
7498{
7499 u32 phy_config_swapped, prio_cfg;
7500 u32 return_cfg = PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT;
7501
7502 phy_config_swapped = params->multi_phy_config &
7503 PORT_HW_CFG_PHY_SWAPPED_ENABLED;
7504
7505 prio_cfg = params->multi_phy_config &
7506 PORT_HW_CFG_PHY_SELECTION_MASK;
7507
7508 if (phy_config_swapped) {
7509 switch (prio_cfg) {
7510 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
7511 return_cfg = PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY;
7512 break;
7513 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
7514 return_cfg = PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY;
7515 break;
7516 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
7517 return_cfg = PORT_HW_CFG_PHY_SELECTION_FIRST_PHY;
7518 break;
7519 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
7520 return_cfg = PORT_HW_CFG_PHY_SELECTION_SECOND_PHY;
7521 break;
7522 }
7523 } else
7524 return_cfg = prio_cfg;
5926 7525
5927 /* init ext phy and enable link state int */ 7526 return return_cfg;
5928 non_ext_phy = ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) || 7527}
5929 (params->loopback_mode == LOOPBACK_XGXS_10));
5930 7528
5931 if (non_ext_phy || 7529
5932 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) || 7530u8 bnx2x_phy_probe(struct link_params *params)
5933 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706) || 7531{
5934 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726) || 7532 u8 phy_index, actual_phy_idx, link_cfg_idx;
5935 (params->loopback_mode == LOOPBACK_EXT_PHY)) { 7533 u32 phy_config_swapped;
5936 if (params->req_line_speed == SPEED_AUTO_NEG) 7534 struct bnx2x *bp = params->bp;
5937 bnx2x_set_parallel_detection(params, vars->phy_flags); 7535 struct bnx2x_phy *phy;
5938 bnx2x_init_internal_phy(params, vars, non_ext_phy); 7536 params->num_phys = 0;
7537 DP(NETIF_MSG_LINK, "Begin phy probe\n");
7538 phy_config_swapped = params->multi_phy_config &
7539 PORT_HW_CFG_PHY_SWAPPED_ENABLED;
7540
7541 for (phy_index = INT_PHY; phy_index < MAX_PHYS;
7542 phy_index++) {
7543 link_cfg_idx = LINK_CONFIG_IDX(phy_index);
7544 actual_phy_idx = phy_index;
7545 if (phy_config_swapped) {
7546 if (phy_index == EXT_PHY1)
7547 actual_phy_idx = EXT_PHY2;
7548 else if (phy_index == EXT_PHY2)
7549 actual_phy_idx = EXT_PHY1;
7550 }
7551 DP(NETIF_MSG_LINK, "phy_config_swapped %x, phy_index %x,"
7552 " actual_phy_idx %x\n", phy_config_swapped,
7553 phy_index, actual_phy_idx);
7554 phy = &params->phy[actual_phy_idx];
7555 if (bnx2x_populate_phy(bp, phy_index, params->shmem_base,
7556 params->shmem2_base, params->port,
7557 phy) != 0) {
7558 params->num_phys = 0;
7559 DP(NETIF_MSG_LINK, "phy probe failed in phy index %d\n",
7560 phy_index);
7561 for (phy_index = INT_PHY;
7562 phy_index < MAX_PHYS;
7563 phy_index++)
7564 *phy = phy_null;
7565 return -EINVAL;
7566 }
7567 if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN)
7568 break;
7569
7570 bnx2x_phy_def_cfg(params, phy, phy_index);
7571 params->num_phys++;
5939 } 7572 }
5940 7573
5941 if (!non_ext_phy) 7574 DP(NETIF_MSG_LINK, "End phy probe. #phys found %x\n", params->num_phys);
5942 rc |= bnx2x_ext_phy_init(params, vars); 7575 return 0;
7576}
5943 7577
5944 bnx2x_bits_dis(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4, 7578static void set_phy_vars(struct link_params *params)
5945 (NIG_STATUS_XGXS0_LINK10G | 7579{
5946 NIG_STATUS_XGXS0_LINK_STATUS | 7580 struct bnx2x *bp = params->bp;
5947 NIG_STATUS_SERDES0_LINK_STATUS)); 7581 u8 actual_phy_idx, phy_index, link_cfg_idx;
7582 u8 phy_config_swapped = params->multi_phy_config &
7583 PORT_HW_CFG_PHY_SWAPPED_ENABLED;
7584 for (phy_index = INT_PHY; phy_index < params->num_phys;
7585 phy_index++) {
7586 link_cfg_idx = LINK_CONFIG_IDX(phy_index);
7587 actual_phy_idx = phy_index;
7588 if (phy_config_swapped) {
7589 if (phy_index == EXT_PHY1)
7590 actual_phy_idx = EXT_PHY2;
7591 else if (phy_index == EXT_PHY2)
7592 actual_phy_idx = EXT_PHY1;
7593 }
7594 params->phy[actual_phy_idx].req_flow_ctrl =
7595 params->req_flow_ctrl[link_cfg_idx];
5948 7596
5949 return rc; 7597 params->phy[actual_phy_idx].req_line_speed =
7598 params->req_line_speed[link_cfg_idx];
5950 7599
5951} 7600 params->phy[actual_phy_idx].speed_cap_mask =
7601 params->speed_cap_mask[link_cfg_idx];
5952 7602
7603 params->phy[actual_phy_idx].req_duplex =
7604 params->req_duplex[link_cfg_idx];
7605
7606 DP(NETIF_MSG_LINK, "req_flow_ctrl %x, req_line_speed %x,"
7607 " speed_cap_mask %x\n",
7608 params->phy[actual_phy_idx].req_flow_ctrl,
7609 params->phy[actual_phy_idx].req_line_speed,
7610 params->phy[actual_phy_idx].speed_cap_mask);
7611 }
7612}
5953 7613
5954u8 bnx2x_phy_init(struct link_params *params, struct link_vars *vars) 7614u8 bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
5955{ 7615{
5956 struct bnx2x *bp = params->bp; 7616 struct bnx2x *bp = params->bp;
5957 u32 val;
5958
5959 DP(NETIF_MSG_LINK, "Phy Initialization started\n"); 7617 DP(NETIF_MSG_LINK, "Phy Initialization started\n");
5960 DP(NETIF_MSG_LINK, "req_speed %d, req_flowctrl %d\n", 7618 DP(NETIF_MSG_LINK, "(1) req_speed %d, req_flowctrl %d\n",
5961 params->req_line_speed, params->req_flow_ctrl); 7619 params->req_line_speed[0], params->req_flow_ctrl[0]);
7620 DP(NETIF_MSG_LINK, "(2) req_speed %d, req_flowctrl %d\n",
7621 params->req_line_speed[1], params->req_flow_ctrl[1]);
5962 vars->link_status = 0; 7622 vars->link_status = 0;
5963 vars->phy_link_up = 0; 7623 vars->phy_link_up = 0;
5964 vars->link_up = 0; 7624 vars->link_up = 0;
@@ -5966,11 +7626,7 @@ u8 bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
5966 vars->duplex = DUPLEX_FULL; 7626 vars->duplex = DUPLEX_FULL;
5967 vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE; 7627 vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
5968 vars->mac_type = MAC_TYPE_NONE; 7628 vars->mac_type = MAC_TYPE_NONE;
5969 7629 vars->phy_flags = 0;
5970 if (params->switch_cfg == SWITCH_CFG_1G)
5971 vars->phy_flags = PHY_SERDES_FLAG;
5972 else
5973 vars->phy_flags = PHY_XGXS_FLAG;
5974 7630
5975 /* disable attentions */ 7631 /* disable attentions */
5976 bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + params->port*4, 7632 bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + params->port*4,
@@ -5981,55 +7637,13 @@ u8 bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
5981 7637
5982 bnx2x_emac_init(params, vars); 7638 bnx2x_emac_init(params, vars);
5983 7639
5984 if (CHIP_REV_IS_FPGA(bp)) { 7640 if (params->num_phys == 0) {
5985 7641 DP(NETIF_MSG_LINK, "No phy found for initialization !!\n");
5986 vars->link_up = 1; 7642 return -EINVAL;
5987 vars->line_speed = SPEED_10000; 7643 }
5988 vars->duplex = DUPLEX_FULL; 7644 set_phy_vars(params);
5989 vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
5990 vars->link_status = (LINK_STATUS_LINK_UP | LINK_10GTFD);
5991 /* enable on E1.5 FPGA */
5992 if (CHIP_IS_E1H(bp)) {
5993 vars->flow_ctrl |=
5994 (BNX2X_FLOW_CTRL_TX |
5995 BNX2X_FLOW_CTRL_RX);
5996 vars->link_status |=
5997 (LINK_STATUS_TX_FLOW_CONTROL_ENABLED |
5998 LINK_STATUS_RX_FLOW_CONTROL_ENABLED);
5999 }
6000
6001 bnx2x_emac_enable(params, vars, 0);
6002 bnx2x_pbf_update(params, vars->flow_ctrl, vars->line_speed);
6003 /* disable drain */
6004 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
6005
6006 /* update shared memory */
6007 bnx2x_update_mng(params, vars->link_status);
6008
6009 return 0;
6010
6011 } else
6012 if (CHIP_REV_IS_EMUL(bp)) {
6013
6014 vars->link_up = 1;
6015 vars->line_speed = SPEED_10000;
6016 vars->duplex = DUPLEX_FULL;
6017 vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
6018 vars->link_status = (LINK_STATUS_LINK_UP | LINK_10GTFD);
6019
6020 bnx2x_bmac_enable(params, vars, 0);
6021
6022 bnx2x_pbf_update(params, vars->flow_ctrl, vars->line_speed);
6023 /* Disable drain */
6024 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE
6025 + params->port*4, 0);
6026
6027 /* update shared memory */
6028 bnx2x_update_mng(params, vars->link_status);
6029
6030 return 0;
6031 7645
6032 } else 7646 DP(NETIF_MSG_LINK, "Num of phys on board: %d\n", params->num_phys);
6033 if (params->loopback_mode == LOOPBACK_BMAC) { 7647 if (params->loopback_mode == LOOPBACK_BMAC) {
6034 7648
6035 vars->link_up = 1; 7649 vars->link_up = 1;
@@ -6040,12 +7654,12 @@ u8 bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
6040 7654
6041 vars->phy_flags = PHY_XGXS_FLAG; 7655 vars->phy_flags = PHY_XGXS_FLAG;
6042 7656
6043 bnx2x_phy_deassert(params, vars->phy_flags); 7657 bnx2x_xgxs_deassert(params);
7658
6044 /* set bmac loopback */ 7659 /* set bmac loopback */
6045 bnx2x_bmac_enable(params, vars, 1); 7660 bnx2x_bmac_enable(params, vars, 1);
6046 7661
6047 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + 7662 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
6048 params->port*4, 0);
6049 7663
6050 } else if (params->loopback_mode == LOOPBACK_EMAC) { 7664 } else if (params->loopback_mode == LOOPBACK_EMAC) {
6051 7665
@@ -6057,80 +7671,62 @@ u8 bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
6057 7671
6058 vars->phy_flags = PHY_XGXS_FLAG; 7672 vars->phy_flags = PHY_XGXS_FLAG;
6059 7673
6060 bnx2x_phy_deassert(params, vars->phy_flags); 7674 bnx2x_xgxs_deassert(params);
6061 /* set bmac loopback */ 7675 /* set bmac loopback */
6062 bnx2x_emac_enable(params, vars, 1); 7676 bnx2x_emac_enable(params, vars, 1);
6063 bnx2x_emac_program(params, vars->line_speed, 7677 bnx2x_emac_program(params, vars);
6064 vars->duplex); 7678 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
6065 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE +
6066 params->port*4, 0);
6067 7679
6068 } else if ((params->loopback_mode == LOOPBACK_XGXS_10) || 7680 } else if ((params->loopback_mode == LOOPBACK_XGXS) ||
6069 (params->loopback_mode == LOOPBACK_EXT_PHY)) { 7681 (params->loopback_mode == LOOPBACK_EXT_PHY)) {
6070 7682
6071 vars->link_up = 1; 7683 vars->link_up = 1;
6072 vars->line_speed = SPEED_10000;
6073 vars->duplex = DUPLEX_FULL;
6074 vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE; 7684 vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
7685 vars->duplex = DUPLEX_FULL;
7686 if (params->req_line_speed[0] == SPEED_1000) {
7687 vars->line_speed = SPEED_1000;
7688 vars->mac_type = MAC_TYPE_EMAC;
7689 } else {
7690 vars->line_speed = SPEED_10000;
7691 vars->mac_type = MAC_TYPE_BMAC;
7692 }
6075 7693
6076 vars->phy_flags = PHY_XGXS_FLAG; 7694 bnx2x_xgxs_deassert(params);
6077
6078 val = REG_RD(bp,
6079 NIG_REG_XGXS0_CTRL_PHY_ADDR+
6080 params->port*0x18);
6081 params->phy_addr = (u8)val;
6082
6083 bnx2x_phy_deassert(params, vars->phy_flags);
6084 bnx2x_link_initialize(params, vars); 7695 bnx2x_link_initialize(params, vars);
6085 7696
6086 vars->mac_type = MAC_TYPE_BMAC; 7697 if (params->req_line_speed[0] == SPEED_1000) {
6087 7698 bnx2x_emac_program(params, vars);
6088 bnx2x_bmac_enable(params, vars, 0); 7699 bnx2x_emac_enable(params, vars, 0);
6089 7700 } else
6090 if (params->loopback_mode == LOOPBACK_XGXS_10) { 7701 bnx2x_bmac_enable(params, vars, 0);
7702 if (params->loopback_mode == LOOPBACK_XGXS) {
6091 /* set 10G XGXS loopback */ 7703 /* set 10G XGXS loopback */
6092 bnx2x_set_xgxs_loopback(params, vars, 1); 7704 params->phy[INT_PHY].config_loopback(
7705 &params->phy[INT_PHY],
7706 params);
7707
6093 } else { 7708 } else {
6094 /* set external phy loopback */ 7709 /* set external phy loopback */
6095 bnx2x_ext_phy_loopback(params); 7710 u8 phy_index;
7711 for (phy_index = EXT_PHY1;
7712 phy_index < params->num_phys; phy_index++) {
7713 if (params->phy[phy_index].config_loopback)
7714 params->phy[phy_index].config_loopback(
7715 &params->phy[phy_index],
7716 params);
7717 }
6096 } 7718 }
6097 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + 7719 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
6098 params->port*4, 0);
6099 7720
6100 bnx2x_set_led(params, LED_MODE_OPER, vars->line_speed); 7721 bnx2x_set_led(params, vars,
7722 LED_MODE_OPER, vars->line_speed);
6101 } else 7723 } else
6102 /* No loopback */ 7724 /* No loopback */
6103 { 7725 {
6104 bnx2x_phy_deassert(params, vars->phy_flags); 7726 if (params->switch_cfg == SWITCH_CFG_10G)
6105 switch (params->switch_cfg) { 7727 bnx2x_xgxs_deassert(params);
6106 case SWITCH_CFG_1G: 7728 else
6107 vars->phy_flags |= PHY_SERDES_FLAG; 7729 bnx2x_serdes_deassert(bp, params->port);
6108 if ((params->ext_phy_config &
6109 PORT_HW_CFG_SERDES_EXT_PHY_TYPE_MASK) ==
6110 PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482) {
6111 vars->phy_flags |= PHY_SGMII_FLAG;
6112 }
6113
6114 val = REG_RD(bp,
6115 NIG_REG_SERDES0_CTRL_PHY_ADDR+
6116 params->port*0x10);
6117
6118 params->phy_addr = (u8)val;
6119
6120 break;
6121 case SWITCH_CFG_10G:
6122 vars->phy_flags |= PHY_XGXS_FLAG;
6123 val = REG_RD(bp,
6124 NIG_REG_XGXS0_CTRL_PHY_ADDR+
6125 params->port*0x18);
6126 params->phy_addr = (u8)val;
6127
6128 break;
6129 default:
6130 DP(NETIF_MSG_LINK, "Invalid switch_cfg\n");
6131 return -EINVAL;
6132 }
6133 DP(NETIF_MSG_LINK, "Phy address = 0x%x\n", params->phy_addr);
6134 7730
6135 bnx2x_link_initialize(params, vars); 7731 bnx2x_link_initialize(params, vars);
6136 msleep(30); 7732 msleep(30);
@@ -6138,38 +7734,20 @@ u8 bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
6138 } 7734 }
6139 return 0; 7735 return 0;
6140} 7736}
6141
6142static void bnx2x_8726_reset_phy(struct bnx2x *bp, u8 port, u8 ext_phy_addr)
6143{
6144 DP(NETIF_MSG_LINK, "bnx2x_8726_reset_phy port %d\n", port);
6145
6146 /* Set serial boot control for external load */
6147 bnx2x_cl45_write(bp, port,
6148 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726, ext_phy_addr,
6149 MDIO_PMA_DEVAD,
6150 MDIO_PMA_REG_GEN_CTRL, 0x0001);
6151}
6152
6153u8 bnx2x_link_reset(struct link_params *params, struct link_vars *vars, 7737u8 bnx2x_link_reset(struct link_params *params, struct link_vars *vars,
6154 u8 reset_ext_phy) 7738 u8 reset_ext_phy)
6155{ 7739{
6156 struct bnx2x *bp = params->bp; 7740 struct bnx2x *bp = params->bp;
6157 u32 ext_phy_config = params->ext_phy_config; 7741 u8 phy_index, port = params->port, clear_latch_ind = 0;
6158 u8 port = params->port;
6159 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(ext_phy_config);
6160 u32 val = REG_RD(bp, params->shmem_base +
6161 offsetof(struct shmem_region, dev_info.
6162 port_feature_config[params->port].
6163 config));
6164 DP(NETIF_MSG_LINK, "Resetting the link of port %d\n", port); 7742 DP(NETIF_MSG_LINK, "Resetting the link of port %d\n", port);
6165 /* disable attentions */ 7743 /* disable attentions */
6166 vars->link_status = 0; 7744 vars->link_status = 0;
6167 bnx2x_update_mng(params, vars->link_status); 7745 bnx2x_update_mng(params, vars->link_status);
6168 bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 7746 bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4,
6169 (NIG_MASK_XGXS0_LINK_STATUS | 7747 (NIG_MASK_XGXS0_LINK_STATUS |
6170 NIG_MASK_XGXS0_LINK10G | 7748 NIG_MASK_XGXS0_LINK10G |
6171 NIG_MASK_SERDES0_LINK_STATUS | 7749 NIG_MASK_SERDES0_LINK_STATUS |
6172 NIG_MASK_MI_INT)); 7750 NIG_MASK_MI_INT));
6173 7751
6174 /* activate nig drain */ 7752 /* activate nig drain */
6175 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1); 7753 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1);
@@ -6185,77 +7763,34 @@ u8 bnx2x_link_reset(struct link_params *params, struct link_vars *vars,
6185 REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0); 7763 REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0);
6186 7764
6187 msleep(10); 7765 msleep(10);
6188 /* The PHY reset is controled by GPIO 1 7766 /* The PHY reset is controlled by GPIO 1
6189 * Hold it as vars low 7767 * Hold it as vars low
6190 */ 7768 */
6191 /* clear link led */ 7769 /* clear link led */
6192 bnx2x_set_led(params, LED_MODE_OFF, 0); 7770 bnx2x_set_led(params, vars, LED_MODE_OFF, 0);
6193 if (reset_ext_phy) {
6194 switch (ext_phy_type) {
6195 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
6196 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
6197 break;
6198 7771
6199 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727: 7772 if (reset_ext_phy) {
6200 { 7773 for (phy_index = EXT_PHY1; phy_index < params->num_phys;
6201 7774 phy_index++) {
6202 /* Disable Transmitter */ 7775 if (params->phy[phy_index].link_reset)
6203 u8 ext_phy_addr = 7776 params->phy[phy_index].link_reset(
6204 XGXS_EXT_PHY_ADDR(params->ext_phy_config); 7777 &params->phy[phy_index],
6205 if ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) == 7778 params);
6206 PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER) 7779 if (params->phy[phy_index].flags &
6207 bnx2x_sfp_set_transmitter(bp, port, 7780 FLAGS_REARM_LATCH_SIGNAL)
6208 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727, 7781 clear_latch_ind = 1;
6209 ext_phy_addr, 0);
6210 break;
6211 }
6212 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
6213 DP(NETIF_MSG_LINK, "Setting 8073 port %d into "
6214 "low power mode\n",
6215 port);
6216 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
6217 MISC_REGISTERS_GPIO_OUTPUT_LOW,
6218 port);
6219 break;
6220 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6221 {
6222 u8 ext_phy_addr =
6223 XGXS_EXT_PHY_ADDR(params->ext_phy_config);
6224 /* Set soft reset */
6225 bnx2x_8726_reset_phy(bp, params->port, ext_phy_addr);
6226 break;
6227 }
6228 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823:
6229 {
6230 u8 ext_phy_addr =
6231 XGXS_EXT_PHY_ADDR(params->ext_phy_config);
6232 bnx2x_cl45_write(bp, port,
6233 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481,
6234 ext_phy_addr,
6235 MDIO_AN_DEVAD,
6236 MDIO_AN_REG_CTRL, 0x0000);
6237 bnx2x_cl45_write(bp, port,
6238 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481,
6239 ext_phy_addr,
6240 MDIO_PMA_DEVAD,
6241 MDIO_PMA_REG_CTRL, 1);
6242 break;
6243 }
6244 default:
6245 /* HW reset */
6246 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
6247 MISC_REGISTERS_GPIO_OUTPUT_LOW,
6248 port);
6249 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
6250 MISC_REGISTERS_GPIO_OUTPUT_LOW,
6251 port);
6252 DP(NETIF_MSG_LINK, "reset external PHY\n");
6253 } 7782 }
6254 } 7783 }
6255 /* reset the SerDes/XGXS */
6256 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR,
6257 (0x1ff << (port*16)));
6258 7784
7785 if (clear_latch_ind) {
7786 /* Clear latching indication */
7787 bnx2x_rearm_latch_signal(bp, port, 0);
7788 bnx2x_bits_dis(bp, NIG_REG_LATCH_BC_0 + port*4,
7789 1 << NIG_LATCH_BC_ENABLE_MI_INT);
7790 }
7791 if (params->phy[INT_PHY].link_reset)
7792 params->phy[INT_PHY].link_reset(
7793 &params->phy[INT_PHY], params);
6259 /* reset BigMac */ 7794 /* reset BigMac */
6260 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 7795 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
6261 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port)); 7796 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
@@ -6269,467 +7804,454 @@ u8 bnx2x_link_reset(struct link_params *params, struct link_vars *vars,
6269 return 0; 7804 return 0;
6270} 7805}
6271 7806
6272static u8 bnx2x_update_link_down(struct link_params *params, 7807/****************************************************************************/
6273 struct link_vars *vars) 7808/* Common function */
6274{ 7809/****************************************************************************/
6275 struct bnx2x *bp = params->bp; 7810static u8 bnx2x_8073_common_init_phy(struct bnx2x *bp,
6276 u8 port = params->port; 7811 u32 shmem_base_path[],
6277 7812 u32 shmem2_base_path[], u8 phy_index,
6278 DP(NETIF_MSG_LINK, "Port %x: Link is down\n", port); 7813 u32 chip_id)
6279 bnx2x_set_led(params, LED_MODE_OFF, 0);
6280
6281 /* indicate no mac active */
6282 vars->mac_type = MAC_TYPE_NONE;
6283
6284 /* update shared memory */
6285 vars->link_status = 0;
6286 vars->line_speed = 0;
6287 bnx2x_update_mng(params, vars->link_status);
6288
6289 /* activate nig drain */
6290 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1);
6291
6292 /* disable emac */
6293 REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0);
6294
6295 msleep(10);
6296
6297 /* reset BigMac */
6298 bnx2x_bmac_rx_disable(bp, params->port);
6299 REG_WR(bp, GRCBASE_MISC +
6300 MISC_REGISTERS_RESET_REG_2_CLEAR,
6301 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
6302 return 0;
6303}
6304
6305static u8 bnx2x_update_link_up(struct link_params *params,
6306 struct link_vars *vars,
6307 u8 link_10g, u32 gp_status)
6308{
6309 struct bnx2x *bp = params->bp;
6310 u8 port = params->port;
6311 u8 rc = 0;
6312
6313 vars->link_status |= LINK_STATUS_LINK_UP;
6314 if (link_10g) {
6315 bnx2x_bmac_enable(params, vars, 0);
6316 bnx2x_set_led(params, LED_MODE_OPER, SPEED_10000);
6317 } else {
6318 rc = bnx2x_emac_program(params, vars->line_speed,
6319 vars->duplex);
6320
6321 bnx2x_emac_enable(params, vars, 0);
6322
6323 /* AN complete? */
6324 if (gp_status & MDIO_AN_CL73_OR_37_COMPLETE) {
6325 if (!(vars->phy_flags &
6326 PHY_SGMII_FLAG))
6327 bnx2x_set_gmii_tx_driver(params);
6328 }
6329 }
6330
6331 /* PBF - link up */
6332 rc |= bnx2x_pbf_update(params, vars->flow_ctrl,
6333 vars->line_speed);
6334
6335 /* disable drain */
6336 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 0);
6337
6338 /* update shared memory */
6339 bnx2x_update_mng(params, vars->link_status);
6340 msleep(20);
6341 return rc;
6342}
6343/* This function should called upon link interrupt */
6344/* In case vars->link_up, driver needs to
6345 1. Update the pbf
6346 2. Disable drain
6347 3. Update the shared memory
6348 4. Indicate link up
6349 5. Set LEDs
6350 Otherwise,
6351 1. Update shared memory
6352 2. Reset BigMac
6353 3. Report link down
6354 4. Unset LEDs
6355*/
6356u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars)
6357{
6358 struct bnx2x *bp = params->bp;
6359 u8 port = params->port;
6360 u16 gp_status;
6361 u8 link_10g;
6362 u8 ext_phy_link_up, rc = 0;
6363 u32 ext_phy_type;
6364 u8 is_mi_int = 0;
6365
6366 DP(NETIF_MSG_LINK, "port %x, XGXS?%x, int_status 0x%x\n",
6367 port, (vars->phy_flags & PHY_XGXS_FLAG),
6368 REG_RD(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4));
6369
6370 is_mi_int = (u8)(REG_RD(bp, NIG_REG_EMAC0_STATUS_MISC_MI_INT +
6371 port*0x18) > 0);
6372 DP(NETIF_MSG_LINK, "int_mask 0x%x MI_INT %x, SERDES_LINK %x\n",
6373 REG_RD(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4),
6374 is_mi_int,
6375 REG_RD(bp,
6376 NIG_REG_SERDES0_STATUS_LINK_STATUS + port*0x3c));
6377
6378 DP(NETIF_MSG_LINK, " 10G %x, XGXS_LINK %x\n",
6379 REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK10G + port*0x68),
6380 REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK_STATUS + port*0x68));
6381
6382 /* disable emac */
6383 REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0);
6384
6385 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config);
6386
6387 /* Check external link change only for non-direct */
6388 ext_phy_link_up = bnx2x_ext_phy_is_link_up(params, vars, is_mi_int);
6389
6390 /* Read gp_status */
6391 CL45_RD_OVER_CL22(bp, port, params->phy_addr,
6392 MDIO_REG_BANK_GP_STATUS,
6393 MDIO_GP_STATUS_TOP_AN_STATUS1,
6394 &gp_status);
6395
6396 rc = bnx2x_link_settings_status(params, vars, gp_status,
6397 ext_phy_link_up);
6398 if (rc != 0)
6399 return rc;
6400
6401 /* anything 10 and over uses the bmac */
6402 link_10g = ((vars->line_speed == SPEED_10000) ||
6403 (vars->line_speed == SPEED_12000) ||
6404 (vars->line_speed == SPEED_12500) ||
6405 (vars->line_speed == SPEED_13000) ||
6406 (vars->line_speed == SPEED_15000) ||
6407 (vars->line_speed == SPEED_16000));
6408
6409 bnx2x_link_int_ack(params, vars, link_10g, is_mi_int);
6410
6411 /* In case external phy link is up, and internal link is down
6412 ( not initialized yet probably after link initialization, it needs
6413 to be initialized.
6414 Note that after link down-up as result of cable plug,
6415 the xgxs link would probably become up again without the need to
6416 initialize it*/
6417
6418 if ((ext_phy_type != PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT) &&
6419 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) &&
6420 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706) &&
6421 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726) &&
6422 (ext_phy_link_up && !vars->phy_link_up))
6423 bnx2x_init_internal_phy(params, vars, 0);
6424
6425 /* link is up only if both local phy and external phy are up */
6426 vars->link_up = (ext_phy_link_up && vars->phy_link_up);
6427
6428 if (vars->link_up)
6429 rc = bnx2x_update_link_up(params, vars, link_10g, gp_status);
6430 else
6431 rc = bnx2x_update_link_down(params, vars);
6432
6433 return rc;
6434}
6435
6436static u8 bnx2x_8073_common_init_phy(struct bnx2x *bp, u32 shmem_base)
6437{ 7814{
6438 u8 ext_phy_addr[PORT_MAX]; 7815 struct bnx2x_phy phy[PORT_MAX];
7816 struct bnx2x_phy *phy_blk[PORT_MAX];
6439 u16 val; 7817 u16 val;
6440 s8 port; 7818 s8 port = 0;
6441 7819 s8 port_of_path = 0;
7820 u32 swap_val, swap_override;
7821 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
7822 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
7823 port ^= (swap_val && swap_override);
7824 bnx2x_ext_phy_hw_reset(bp, port);
6442 /* PART1 - Reset both phys */ 7825 /* PART1 - Reset both phys */
6443 for (port = PORT_MAX - 1; port >= PORT_0; port--) { 7826 for (port = PORT_MAX - 1; port >= PORT_0; port--) {
6444 /* Extract the ext phy address for the port */ 7827 u32 shmem_base, shmem2_base;
6445 u32 ext_phy_config = REG_RD(bp, shmem_base + 7828 /* In E2, same phy is using for port0 of the two paths */
6446 offsetof(struct shmem_region, 7829 if (CHIP_IS_E2(bp)) {
6447 dev_info.port_hw_config[port].external_phy_config)); 7830 shmem_base = shmem_base_path[port];
7831 shmem2_base = shmem2_base_path[port];
7832 port_of_path = 0;
7833 } else {
7834 shmem_base = shmem_base_path[0];
7835 shmem2_base = shmem2_base_path[0];
7836 port_of_path = port;
7837 }
6448 7838
7839 /* Extract the ext phy address for the port */
7840 if (bnx2x_populate_phy(bp, phy_index, shmem_base, shmem2_base,
7841 port_of_path, &phy[port]) !=
7842 0) {
7843 DP(NETIF_MSG_LINK, "populate_phy failed\n");
7844 return -EINVAL;
7845 }
6449 /* disable attentions */ 7846 /* disable attentions */
6450 bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 7847 bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 +
6451 (NIG_MASK_XGXS0_LINK_STATUS | 7848 port_of_path*4,
6452 NIG_MASK_XGXS0_LINK10G | 7849 (NIG_MASK_XGXS0_LINK_STATUS |
6453 NIG_MASK_SERDES0_LINK_STATUS | 7850 NIG_MASK_XGXS0_LINK10G |
6454 NIG_MASK_MI_INT)); 7851 NIG_MASK_SERDES0_LINK_STATUS |
6455 7852 NIG_MASK_MI_INT));
6456 ext_phy_addr[port] = XGXS_EXT_PHY_ADDR(ext_phy_config);
6457 7853
6458 /* Need to take the phy out of low power mode in order 7854 /* Need to take the phy out of low power mode in order
6459 to write to access its registers */ 7855 to write to access its registers */
6460 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, 7856 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
6461 MISC_REGISTERS_GPIO_OUTPUT_HIGH, port); 7857 MISC_REGISTERS_GPIO_OUTPUT_HIGH,
7858 port);
6462 7859
6463 /* Reset the phy */ 7860 /* Reset the phy */
6464 bnx2x_cl45_write(bp, port, 7861 bnx2x_cl45_write(bp, &phy[port],
6465 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073, 7862 MDIO_PMA_DEVAD,
6466 ext_phy_addr[port], 7863 MDIO_PMA_REG_CTRL,
6467 MDIO_PMA_DEVAD, 7864 1<<15);
6468 MDIO_PMA_REG_CTRL,
6469 1<<15);
6470 } 7865 }
6471 7866
6472 /* Add delay of 150ms after reset */ 7867 /* Add delay of 150ms after reset */
6473 msleep(150); 7868 msleep(150);
6474 7869
7870 if (phy[PORT_0].addr & 0x1) {
7871 phy_blk[PORT_0] = &(phy[PORT_1]);
7872 phy_blk[PORT_1] = &(phy[PORT_0]);
7873 } else {
7874 phy_blk[PORT_0] = &(phy[PORT_0]);
7875 phy_blk[PORT_1] = &(phy[PORT_1]);
7876 }
7877
6475 /* PART2 - Download firmware to both phys */ 7878 /* PART2 - Download firmware to both phys */
6476 for (port = PORT_MAX - 1; port >= PORT_0; port--) { 7879 for (port = PORT_MAX - 1; port >= PORT_0; port--) {
6477 u16 fw_ver1; 7880 if (CHIP_IS_E2(bp))
6478 7881 port_of_path = 0;
6479 bnx2x_bcm8073_external_rom_boot(bp, port, 7882 else
6480 ext_phy_addr[port], shmem_base); 7883 port_of_path = port;
6481 7884
6482 bnx2x_cl45_read(bp, port, PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073, 7885 DP(NETIF_MSG_LINK, "Loading spirom for phy address 0x%x\n",
6483 ext_phy_addr[port], 7886 phy_blk[port]->addr);
6484 MDIO_PMA_DEVAD, 7887 if (bnx2x_8073_8727_external_rom_boot(bp, phy_blk[port],
6485 MDIO_PMA_REG_ROM_VER1, &fw_ver1); 7888 port_of_path))
6486 if (fw_ver1 == 0 || fw_ver1 == 0x4321) {
6487 DP(NETIF_MSG_LINK,
6488 "bnx2x_8073_common_init_phy port %x:"
6489 "Download failed. fw version = 0x%x\n",
6490 port, fw_ver1);
6491 return -EINVAL; 7889 return -EINVAL;
6492 }
6493 7890
6494 /* Only set bit 10 = 1 (Tx power down) */ 7891 /* Only set bit 10 = 1 (Tx power down) */
6495 bnx2x_cl45_read(bp, port, 7892 bnx2x_cl45_read(bp, phy_blk[port],
6496 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073, 7893 MDIO_PMA_DEVAD,
6497 ext_phy_addr[port], 7894 MDIO_PMA_REG_TX_POWER_DOWN, &val);
6498 MDIO_PMA_DEVAD,
6499 MDIO_PMA_REG_TX_POWER_DOWN, &val);
6500 7895
6501 /* Phase1 of TX_POWER_DOWN reset */ 7896 /* Phase1 of TX_POWER_DOWN reset */
6502 bnx2x_cl45_write(bp, port, 7897 bnx2x_cl45_write(bp, phy_blk[port],
6503 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073, 7898 MDIO_PMA_DEVAD,
6504 ext_phy_addr[port], 7899 MDIO_PMA_REG_TX_POWER_DOWN,
6505 MDIO_PMA_DEVAD, 7900 (val | 1<<10));
6506 MDIO_PMA_REG_TX_POWER_DOWN,
6507 (val | 1<<10));
6508 } 7901 }
6509 7902
6510 /* Toggle Transmitter: Power down and then up with 600ms 7903 /*
6511 delay between */ 7904 * Toggle Transmitter: Power down and then up with 600ms delay
7905 * between
7906 */
6512 msleep(600); 7907 msleep(600);
6513 7908
6514 /* PART3 - complete TX_POWER_DOWN process, and set GPIO2 back to low */ 7909 /* PART3 - complete TX_POWER_DOWN process, and set GPIO2 back to low */
6515 for (port = PORT_MAX - 1; port >= PORT_0; port--) { 7910 for (port = PORT_MAX - 1; port >= PORT_0; port--) {
6516 /* Phase2 of POWER_DOWN_RESET */ 7911 /* Phase2 of POWER_DOWN_RESET */
6517 /* Release bit 10 (Release Tx power down) */ 7912 /* Release bit 10 (Release Tx power down) */
6518 bnx2x_cl45_read(bp, port, 7913 bnx2x_cl45_read(bp, phy_blk[port],
6519 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073, 7914 MDIO_PMA_DEVAD,
6520 ext_phy_addr[port], 7915 MDIO_PMA_REG_TX_POWER_DOWN, &val);
6521 MDIO_PMA_DEVAD, 7916
6522 MDIO_PMA_REG_TX_POWER_DOWN, &val); 7917 bnx2x_cl45_write(bp, phy_blk[port],
6523 7918 MDIO_PMA_DEVAD,
6524 bnx2x_cl45_write(bp, port, 7919 MDIO_PMA_REG_TX_POWER_DOWN, (val & (~(1<<10))));
6525 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
6526 ext_phy_addr[port],
6527 MDIO_PMA_DEVAD,
6528 MDIO_PMA_REG_TX_POWER_DOWN, (val & (~(1<<10))));
6529 msleep(15); 7920 msleep(15);
6530 7921
6531 /* Read modify write the SPI-ROM version select register */ 7922 /* Read modify write the SPI-ROM version select register */
6532 bnx2x_cl45_read(bp, port, 7923 bnx2x_cl45_read(bp, phy_blk[port],
6533 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073, 7924 MDIO_PMA_DEVAD,
6534 ext_phy_addr[port], 7925 MDIO_PMA_REG_EDC_FFE_MAIN, &val);
6535 MDIO_PMA_DEVAD, 7926 bnx2x_cl45_write(bp, phy_blk[port],
6536 MDIO_PMA_REG_EDC_FFE_MAIN, &val); 7927 MDIO_PMA_DEVAD,
6537 bnx2x_cl45_write(bp, port, 7928 MDIO_PMA_REG_EDC_FFE_MAIN, (val | (1<<12)));
6538 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
6539 ext_phy_addr[port],
6540 MDIO_PMA_DEVAD,
6541 MDIO_PMA_REG_EDC_FFE_MAIN, (val | (1<<12)));
6542 7929
6543 /* set GPIO2 back to LOW */ 7930 /* set GPIO2 back to LOW */
6544 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, 7931 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
6545 MISC_REGISTERS_GPIO_OUTPUT_LOW, port); 7932 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
6546 } 7933 }
6547 return 0; 7934 return 0;
6548
6549} 7935}
6550 7936static u8 bnx2x_8726_common_init_phy(struct bnx2x *bp,
6551static u8 bnx2x_8727_common_init_phy(struct bnx2x *bp, u32 shmem_base) 7937 u32 shmem_base_path[],
7938 u32 shmem2_base_path[], u8 phy_index,
7939 u32 chip_id)
6552{ 7940{
6553 u8 ext_phy_addr[PORT_MAX]; 7941 u32 val;
6554 s8 port, first_port, i; 7942 s8 port;
6555 u32 swap_val, swap_override; 7943 struct bnx2x_phy phy;
6556 DP(NETIF_MSG_LINK, "Executing BCM8727 common init\n"); 7944 /* Use port1 because of the static port-swap */
6557 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP); 7945 /* Enable the module detection interrupt */
6558 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE); 7946 val = REG_RD(bp, MISC_REG_GPIO_EVENT_EN);
7947 val |= ((1<<MISC_REGISTERS_GPIO_3)|
7948 (1<<(MISC_REGISTERS_GPIO_3 + MISC_REGISTERS_GPIO_PORT_SHIFT)));
7949 REG_WR(bp, MISC_REG_GPIO_EVENT_EN, val);
6559 7950
6560 bnx2x_ext_phy_hw_reset(bp, 1 ^ (swap_val && swap_override)); 7951 bnx2x_ext_phy_hw_reset(bp, 0);
6561 msleep(5); 7952 msleep(5);
7953 for (port = 0; port < PORT_MAX; port++) {
7954 u32 shmem_base, shmem2_base;
6562 7955
6563 if (swap_val && swap_override) 7956 /* In E2, same phy is using for port0 of the two paths */
6564 first_port = PORT_0; 7957 if (CHIP_IS_E2(bp)) {
6565 else 7958 shmem_base = shmem_base_path[port];
6566 first_port = PORT_1; 7959 shmem2_base = shmem2_base_path[port];
6567 7960 } else {
6568 /* PART1 - Reset both phys */ 7961 shmem_base = shmem_base_path[0];
6569 for (i = 0, port = first_port; i < PORT_MAX; i++, port = !port) { 7962 shmem2_base = shmem2_base_path[0];
7963 }
6570 /* Extract the ext phy address for the port */ 7964 /* Extract the ext phy address for the port */
6571 u32 ext_phy_config = REG_RD(bp, shmem_base + 7965 if (bnx2x_populate_phy(bp, phy_index, shmem_base, shmem2_base,
6572 offsetof(struct shmem_region, 7966 port, &phy) !=
6573 dev_info.port_hw_config[port].external_phy_config)); 7967 0) {
7968 DP(NETIF_MSG_LINK, "populate phy failed\n");
7969 return -EINVAL;
7970 }
6574 7971
6575 /* disable attentions */ 7972 /* Reset phy*/
6576 bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 7973 bnx2x_cl45_write(bp, &phy,
6577 (NIG_MASK_XGXS0_LINK_STATUS | 7974 MDIO_PMA_DEVAD, MDIO_PMA_REG_GEN_CTRL, 0x0001);
6578 NIG_MASK_XGXS0_LINK10G |
6579 NIG_MASK_SERDES0_LINK_STATUS |
6580 NIG_MASK_MI_INT));
6581 7975
6582 ext_phy_addr[port] = XGXS_EXT_PHY_ADDR(ext_phy_config);
6583 7976
6584 /* Reset the phy */ 7977 /* Set fault module detected LED on */
6585 bnx2x_cl45_write(bp, port, 7978 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
6586 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727, 7979 MISC_REGISTERS_GPIO_HIGH,
6587 ext_phy_addr[port], 7980 port);
6588 MDIO_PMA_DEVAD,
6589 MDIO_PMA_REG_CTRL,
6590 1<<15);
6591 } 7981 }
6592 7982
6593 /* Add delay of 150ms after reset */ 7983 return 0;
6594 msleep(150); 7984}
6595 7985static void bnx2x_get_ext_phy_reset_gpio(struct bnx2x *bp, u32 shmem_base,
6596 /* PART2 - Download firmware to both phys */ 7986 u8 *io_gpio, u8 *io_port)
6597 for (i = 0, port = first_port; i < PORT_MAX; i++, port = !port) { 7987{
6598 u16 fw_ver1;
6599
6600 bnx2x_bcm8727_external_rom_boot(bp, port,
6601 ext_phy_addr[port], shmem_base);
6602 7988
6603 bnx2x_cl45_read(bp, port, PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727, 7989 u32 phy_gpio_reset = REG_RD(bp, shmem_base +
6604 ext_phy_addr[port], 7990 offsetof(struct shmem_region,
6605 MDIO_PMA_DEVAD, 7991 dev_info.port_hw_config[PORT_0].default_cfg));
6606 MDIO_PMA_REG_ROM_VER1, &fw_ver1); 7992 switch (phy_gpio_reset) {
6607 if (fw_ver1 == 0 || fw_ver1 == 0x4321) { 7993 case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO0_P0:
6608 DP(NETIF_MSG_LINK, 7994 *io_gpio = 0;
6609 "bnx2x_8727_common_init_phy port %x:" 7995 *io_port = 0;
6610 "Download failed. fw version = 0x%x\n", 7996 break;
6611 port, fw_ver1); 7997 case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO1_P0:
6612 return -EINVAL; 7998 *io_gpio = 1;
6613 } 7999 *io_port = 0;
8000 break;
8001 case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO2_P0:
8002 *io_gpio = 2;
8003 *io_port = 0;
8004 break;
8005 case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO3_P0:
8006 *io_gpio = 3;
8007 *io_port = 0;
8008 break;
8009 case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO0_P1:
8010 *io_gpio = 0;
8011 *io_port = 1;
8012 break;
8013 case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO1_P1:
8014 *io_gpio = 1;
8015 *io_port = 1;
8016 break;
8017 case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO2_P1:
8018 *io_gpio = 2;
8019 *io_port = 1;
8020 break;
8021 case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO3_P1:
8022 *io_gpio = 3;
8023 *io_port = 1;
8024 break;
8025 default:
8026 /* Don't override the io_gpio and io_port */
8027 break;
6614 } 8028 }
6615
6616 return 0;
6617} 8029}
8030static u8 bnx2x_8727_common_init_phy(struct bnx2x *bp,
8031 u32 shmem_base_path[],
8032 u32 shmem2_base_path[], u8 phy_index,
8033 u32 chip_id)
8034{
8035 s8 port, reset_gpio;
8036 u32 swap_val, swap_override;
8037 struct bnx2x_phy phy[PORT_MAX];
8038 struct bnx2x_phy *phy_blk[PORT_MAX];
8039 s8 port_of_path;
8040 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
8041 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
6618 8042
8043 reset_gpio = MISC_REGISTERS_GPIO_1;
8044 port = 1;
6619 8045
6620static u8 bnx2x_8726_common_init_phy(struct bnx2x *bp, u32 shmem_base) 8046 /*
6621{ 8047 * Retrieve the reset gpio/port which control the reset.
6622 u8 ext_phy_addr; 8048 * Default is GPIO1, PORT1
6623 u32 val; 8049 */
6624 s8 port; 8050 bnx2x_get_ext_phy_reset_gpio(bp, shmem_base_path[0],
8051 (u8 *)&reset_gpio, (u8 *)&port);
6625 8052
6626 /* Use port1 because of the static port-swap */ 8053 /* Calculate the port based on port swap */
6627 /* Enable the module detection interrupt */ 8054 port ^= (swap_val && swap_override);
6628 val = REG_RD(bp, MISC_REG_GPIO_EVENT_EN); 8055
6629 val |= ((1<<MISC_REGISTERS_GPIO_3)| 8056 /* Initiate PHY reset*/
6630 (1<<(MISC_REGISTERS_GPIO_3 + MISC_REGISTERS_GPIO_PORT_SHIFT))); 8057 bnx2x_set_gpio(bp, reset_gpio, MISC_REGISTERS_GPIO_OUTPUT_LOW,
6631 REG_WR(bp, MISC_REG_GPIO_EVENT_EN, val); 8058 port);
8059 msleep(1);
8060 bnx2x_set_gpio(bp, reset_gpio, MISC_REGISTERS_GPIO_OUTPUT_HIGH,
8061 port);
6632 8062
6633 bnx2x_ext_phy_hw_reset(bp, 1);
6634 msleep(5); 8063 msleep(5);
6635 for (port = 0; port < PORT_MAX; port++) { 8064
8065 /* PART1 - Reset both phys */
8066 for (port = PORT_MAX - 1; port >= PORT_0; port--) {
8067 u32 shmem_base, shmem2_base;
8068
8069 /* In E2, same phy is using for port0 of the two paths */
8070 if (CHIP_IS_E2(bp)) {
8071 shmem_base = shmem_base_path[port];
8072 shmem2_base = shmem2_base_path[port];
8073 port_of_path = 0;
8074 } else {
8075 shmem_base = shmem_base_path[0];
8076 shmem2_base = shmem2_base_path[0];
8077 port_of_path = port;
8078 }
8079
6636 /* Extract the ext phy address for the port */ 8080 /* Extract the ext phy address for the port */
6637 u32 ext_phy_config = REG_RD(bp, shmem_base + 8081 if (bnx2x_populate_phy(bp, phy_index, shmem_base, shmem2_base,
6638 offsetof(struct shmem_region, 8082 port_of_path, &phy[port]) !=
6639 dev_info.port_hw_config[port].external_phy_config)); 8083 0) {
8084 DP(NETIF_MSG_LINK, "populate phy failed\n");
8085 return -EINVAL;
8086 }
8087 /* disable attentions */
8088 bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 +
8089 port_of_path*4,
8090 (NIG_MASK_XGXS0_LINK_STATUS |
8091 NIG_MASK_XGXS0_LINK10G |
8092 NIG_MASK_SERDES0_LINK_STATUS |
8093 NIG_MASK_MI_INT));
6640 8094
6641 ext_phy_addr = XGXS_EXT_PHY_ADDR(ext_phy_config);
6642 DP(NETIF_MSG_LINK, "8726_common_init : ext_phy_addr = 0x%x\n",
6643 ext_phy_addr);
6644 8095
6645 bnx2x_8726_reset_phy(bp, port, ext_phy_addr); 8096 /* Reset the phy */
8097 bnx2x_cl45_write(bp, &phy[port],
8098 MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1<<15);
8099 }
6646 8100
6647 /* Set fault module detected LED on */ 8101 /* Add delay of 150ms after reset */
6648 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0, 8102 msleep(150);
6649 MISC_REGISTERS_GPIO_HIGH, 8103 if (phy[PORT_0].addr & 0x1) {
6650 port); 8104 phy_blk[PORT_0] = &(phy[PORT_1]);
8105 phy_blk[PORT_1] = &(phy[PORT_0]);
8106 } else {
8107 phy_blk[PORT_0] = &(phy[PORT_0]);
8108 phy_blk[PORT_1] = &(phy[PORT_1]);
6651 } 8109 }
8110 /* PART2 - Download firmware to both phys */
8111 for (port = PORT_MAX - 1; port >= PORT_0; port--) {
8112 if (CHIP_IS_E2(bp))
8113 port_of_path = 0;
8114 else
8115 port_of_path = port;
8116 DP(NETIF_MSG_LINK, "Loading spirom for phy address 0x%x\n",
8117 phy_blk[port]->addr);
8118 if (bnx2x_8073_8727_external_rom_boot(bp, phy_blk[port],
8119 port_of_path))
8120 return -EINVAL;
6652 8121
8122 }
6653 return 0; 8123 return 0;
6654} 8124}
6655 8125
6656 8126static u8 bnx2x_ext_phy_common_init(struct bnx2x *bp, u32 shmem_base_path[],
6657static u8 bnx2x_84823_common_init_phy(struct bnx2x *bp, u32 shmem_base) 8127 u32 shmem2_base_path[], u8 phy_index,
6658{ 8128 u32 ext_phy_type, u32 chip_id)
6659 /* HW reset */
6660 bnx2x_ext_phy_hw_reset(bp, 1);
6661 return 0;
6662}
6663u8 bnx2x_common_init_phy(struct bnx2x *bp, u32 shmem_base)
6664{ 8129{
6665 u8 rc = 0; 8130 u8 rc = 0;
6666 u32 ext_phy_type;
6667
6668 DP(NETIF_MSG_LINK, "Begin common phy init\n");
6669
6670 /* Read the ext_phy_type for arbitrary port(0) */
6671 ext_phy_type = XGXS_EXT_PHY_TYPE(
6672 REG_RD(bp, shmem_base +
6673 offsetof(struct shmem_region,
6674 dev_info.port_hw_config[0].external_phy_config)));
6675 8131
6676 switch (ext_phy_type) { 8132 switch (ext_phy_type) {
6677 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073: 8133 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
6678 { 8134 rc = bnx2x_8073_common_init_phy(bp, shmem_base_path,
6679 rc = bnx2x_8073_common_init_phy(bp, shmem_base); 8135 shmem2_base_path,
8136 phy_index, chip_id);
6680 break; 8137 break;
6681 }
6682 8138
6683 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727: 8139 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
6684 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC: 8140 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC:
6685 rc = bnx2x_8727_common_init_phy(bp, shmem_base); 8141 rc = bnx2x_8727_common_init_phy(bp, shmem_base_path,
8142 shmem2_base_path,
8143 phy_index, chip_id);
6686 break; 8144 break;
6687 8145
6688 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726: 8146 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6689 /* GPIO1 affects both ports, so there's need to pull 8147 /*
6690 it for single port alone */ 8148 * GPIO1 affects both ports, so there's need to pull
6691 rc = bnx2x_8726_common_init_phy(bp, shmem_base); 8149 * it for single port alone
8150 */
8151 rc = bnx2x_8726_common_init_phy(bp, shmem_base_path,
8152 shmem2_base_path,
8153 phy_index, chip_id);
6692 break; 8154 break;
6693 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823: 8155 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
6694 rc = bnx2x_84823_common_init_phy(bp, shmem_base); 8156 rc = -EINVAL;
6695 break; 8157 break;
6696 default: 8158 default:
6697 DP(NETIF_MSG_LINK, 8159 DP(NETIF_MSG_LINK,
6698 "bnx2x_common_init_phy: ext_phy 0x%x not required\n", 8160 "ext_phy 0x%x common init not required\n",
6699 ext_phy_type); 8161 ext_phy_type);
6700 break; 8162 break;
6701 } 8163 }
6702 8164
8165 if (rc != 0)
8166 netdev_err(bp->dev, "Warning: PHY was not initialized,"
8167 " Port %d\n",
8168 0);
6703 return rc; 8169 return rc;
6704} 8170}
6705 8171
6706void bnx2x_sfx7101_sp_sw_reset(struct bnx2x *bp, u8 port, u8 phy_addr) 8172u8 bnx2x_common_init_phy(struct bnx2x *bp, u32 shmem_base_path[],
8173 u32 shmem2_base_path[], u32 chip_id)
6707{ 8174{
6708 u16 val, cnt; 8175 u8 rc = 0;
8176 u32 phy_ver;
8177 u8 phy_index;
8178 u32 ext_phy_type, ext_phy_config;
8179 DP(NETIF_MSG_LINK, "Begin common phy init\n");
6709 8180
6710 bnx2x_cl45_read(bp, port, 8181 /* Check if common init was already done */
6711 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101, 8182 phy_ver = REG_RD(bp, shmem_base_path[0] +
6712 phy_addr, 8183 offsetof(struct shmem_region,
6713 MDIO_PMA_DEVAD, 8184 port_mb[PORT_0].ext_phy_fw_version));
6714 MDIO_PMA_REG_7101_RESET, &val); 8185 if (phy_ver) {
8186 DP(NETIF_MSG_LINK, "Not doing common init; phy ver is 0x%x\n",
8187 phy_ver);
8188 return 0;
8189 }
6715 8190
6716 for (cnt = 0; cnt < 10; cnt++) { 8191 /* Read the ext_phy_type for arbitrary port(0) */
6717 msleep(50); 8192 for (phy_index = EXT_PHY1; phy_index < MAX_PHYS;
6718 /* Writes a self-clearing reset */ 8193 phy_index++) {
6719 bnx2x_cl45_write(bp, port, 8194 ext_phy_config = bnx2x_get_ext_phy_config(bp,
6720 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101, 8195 shmem_base_path[0],
6721 phy_addr, 8196 phy_index, 0);
6722 MDIO_PMA_DEVAD, 8197 ext_phy_type = XGXS_EXT_PHY_TYPE(ext_phy_config);
6723 MDIO_PMA_REG_7101_RESET, 8198 rc |= bnx2x_ext_phy_common_init(bp, shmem_base_path,
6724 (val | (1<<15))); 8199 shmem2_base_path,
6725 /* Wait for clear */ 8200 phy_index, ext_phy_type,
6726 bnx2x_cl45_read(bp, port, 8201 chip_id);
6727 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101, 8202 }
6728 phy_addr, 8203 return rc;
6729 MDIO_PMA_DEVAD, 8204}
6730 MDIO_PMA_REG_7101_RESET, &val);
6731 8205
6732 if ((val & (1<<15)) == 0) 8206u8 bnx2x_hw_lock_required(struct bnx2x *bp, u32 shmem_base, u32 shmem2_base)
6733 break; 8207{
8208 u8 phy_index;
8209 struct bnx2x_phy phy;
8210 for (phy_index = INT_PHY; phy_index < MAX_PHYS;
8211 phy_index++) {
8212 if (bnx2x_populate_phy(bp, phy_index, shmem_base, shmem2_base,
8213 0, &phy) != 0) {
8214 DP(NETIF_MSG_LINK, "populate phy failed\n");
8215 return 0;
8216 }
8217
8218 if (phy.flags & FLAGS_HW_LOCK_REQUIRED)
8219 return 1;
8220 }
8221 return 0;
8222}
8223
8224u8 bnx2x_fan_failure_det_req(struct bnx2x *bp,
8225 u32 shmem_base,
8226 u32 shmem2_base,
8227 u8 port)
8228{
8229 u8 phy_index, fan_failure_det_req = 0;
8230 struct bnx2x_phy phy;
8231 for (phy_index = EXT_PHY1; phy_index < MAX_PHYS;
8232 phy_index++) {
8233 if (bnx2x_populate_phy(bp, phy_index, shmem_base, shmem2_base,
8234 port, &phy)
8235 != 0) {
8236 DP(NETIF_MSG_LINK, "populate phy failed\n");
8237 return 0;
8238 }
8239 fan_failure_det_req |= (phy.flags &
8240 FLAGS_FAN_FAILURE_DET_REQ);
8241 }
8242 return fan_failure_det_req;
8243}
8244
8245void bnx2x_hw_reset_phy(struct link_params *params)
8246{
8247 u8 phy_index;
8248 for (phy_index = EXT_PHY1; phy_index < MAX_PHYS;
8249 phy_index++) {
8250 if (params->phy[phy_index].hw_reset) {
8251 params->phy[phy_index].hw_reset(
8252 &params->phy[phy_index],
8253 params);
8254 params->phy[phy_index] = phy_null;
8255 }
6734 } 8256 }
6735} 8257}
diff --git a/drivers/net/bnx2x/bnx2x_link.h b/drivers/net/bnx2x/bnx2x_link.h
index 40c2981de8ed..92f36b6950dc 100644
--- a/drivers/net/bnx2x/bnx2x_link.h
+++ b/drivers/net/bnx2x/bnx2x_link.h
@@ -1,4 +1,4 @@
1/* Copyright 2008-2009 Broadcom Corporation 1/* Copyright 2008-2011 Broadcom Corporation
2 * 2 *
3 * Unless you and Broadcom execute a separate written software license 3 * Unless you and Broadcom execute a separate written software license
4 * agreement governing use of this software, this software is licensed to you 4 * agreement governing use of this software, this software is licensed to you
@@ -22,7 +22,8 @@
22/***********************************************************/ 22/***********************************************************/
23/* Defines */ 23/* Defines */
24/***********************************************************/ 24/***********************************************************/
25#define DEFAULT_PHY_DEV_ADDR 3 25#define DEFAULT_PHY_DEV_ADDR 3
26#define E2_DEFAULT_PHY_DEV_ADDR 5
26 27
27 28
28 29
@@ -32,7 +33,7 @@
32#define BNX2X_FLOW_CTRL_BOTH PORT_FEATURE_FLOW_CONTROL_BOTH 33#define BNX2X_FLOW_CTRL_BOTH PORT_FEATURE_FLOW_CONTROL_BOTH
33#define BNX2X_FLOW_CTRL_NONE PORT_FEATURE_FLOW_CONTROL_NONE 34#define BNX2X_FLOW_CTRL_NONE PORT_FEATURE_FLOW_CONTROL_NONE
34 35
35#define SPEED_AUTO_NEG 0 36#define SPEED_AUTO_NEG 0
36#define SPEED_12000 12000 37#define SPEED_12000 12000
37#define SPEED_12500 12500 38#define SPEED_12500 12500
38#define SPEED_13000 13000 39#define SPEED_13000 13000
@@ -43,12 +44,156 @@
43#define SFP_EEPROM_VENDOR_NAME_SIZE 16 44#define SFP_EEPROM_VENDOR_NAME_SIZE 16
44#define SFP_EEPROM_VENDOR_OUI_ADDR 0x25 45#define SFP_EEPROM_VENDOR_OUI_ADDR 0x25
45#define SFP_EEPROM_VENDOR_OUI_SIZE 3 46#define SFP_EEPROM_VENDOR_OUI_SIZE 3
46#define SFP_EEPROM_PART_NO_ADDR 0x28 47#define SFP_EEPROM_PART_NO_ADDR 0x28
47#define SFP_EEPROM_PART_NO_SIZE 16 48#define SFP_EEPROM_PART_NO_SIZE 16
48#define PWR_FLT_ERR_MSG_LEN 250 49#define PWR_FLT_ERR_MSG_LEN 250
50
51#define XGXS_EXT_PHY_TYPE(ext_phy_config) \
52 ((ext_phy_config) & PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK)
53#define XGXS_EXT_PHY_ADDR(ext_phy_config) \
54 (((ext_phy_config) & PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >> \
55 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT)
56#define SERDES_EXT_PHY_TYPE(ext_phy_config) \
57 ((ext_phy_config) & PORT_HW_CFG_SERDES_EXT_PHY_TYPE_MASK)
58
59/* Single Media Direct board is the plain 577xx board with CX4/RJ45 jacks */
60#define SINGLE_MEDIA_DIRECT(params) (params->num_phys == 1)
61/* Single Media board contains single external phy */
62#define SINGLE_MEDIA(params) (params->num_phys == 2)
63/* Dual Media board contains two external phy with different media */
64#define DUAL_MEDIA(params) (params->num_phys == 3)
65#define FW_PARAM_MDIO_CTRL_OFFSET 16
66#define FW_PARAM_SET(phy_addr, phy_type, mdio_access) \
67 (phy_addr | phy_type | mdio_access << FW_PARAM_MDIO_CTRL_OFFSET)
68
69#define PFC_BRB_MAC_PAUSE_XOFF_THRESHOLD_PAUSEABLE 170
70#define PFC_BRB_MAC_PAUSE_XOFF_THRESHOLD_NON_PAUSEABLE 0
71
72#define PFC_BRB_MAC_PAUSE_XON_THRESHOLD_PAUSEABLE 250
73#define PFC_BRB_MAC_PAUSE_XON_THRESHOLD_NON_PAUSEABLE 0
74
75#define PFC_BRB_MAC_FULL_XOFF_THRESHOLD_PAUSEABLE 10
76#define PFC_BRB_MAC_FULL_XOFF_THRESHOLD_NON_PAUSEABLE 90
77
78#define PFC_BRB_MAC_FULL_XON_THRESHOLD_PAUSEABLE 50
79#define PFC_BRB_MAC_FULL_XON_THRESHOLD_NON_PAUSEABLE 250
80
81#define PFC_BRB_FULL_LB_XOFF_THRESHOLD 170
82#define PFC_BRB_FULL_LB_XON_THRESHOLD 250
83
49/***********************************************************/ 84/***********************************************************/
50/* Structs */ 85/* Structs */
51/***********************************************************/ 86/***********************************************************/
87#define INT_PHY 0
88#define EXT_PHY1 1
89#define EXT_PHY2 2
90#define MAX_PHYS 3
91
92/* Same configuration is shared between the XGXS and the first external phy */
93#define LINK_CONFIG_SIZE (MAX_PHYS - 1)
94#define LINK_CONFIG_IDX(_phy_idx) ((_phy_idx == INT_PHY) ? \
95 0 : (_phy_idx - 1))
96/***********************************************************/
97/* bnx2x_phy struct */
98/* Defines the required arguments and function per phy */
99/***********************************************************/
100struct link_vars;
101struct link_params;
102struct bnx2x_phy;
103
104typedef u8 (*config_init_t)(struct bnx2x_phy *phy, struct link_params *params,
105 struct link_vars *vars);
106typedef u8 (*read_status_t)(struct bnx2x_phy *phy, struct link_params *params,
107 struct link_vars *vars);
108typedef void (*link_reset_t)(struct bnx2x_phy *phy,
109 struct link_params *params);
110typedef void (*config_loopback_t)(struct bnx2x_phy *phy,
111 struct link_params *params);
112typedef u8 (*format_fw_ver_t)(u32 raw, u8 *str, u16 *len);
113typedef void (*hw_reset_t)(struct bnx2x_phy *phy, struct link_params *params);
114typedef void (*set_link_led_t)(struct bnx2x_phy *phy,
115 struct link_params *params, u8 mode);
116typedef void (*phy_specific_func_t)(struct bnx2x_phy *phy,
117 struct link_params *params, u32 action);
118
119struct bnx2x_phy {
120 u32 type;
121
122 /* Loaded during init */
123 u8 addr;
124
125 u8 flags;
126 /* Require HW lock */
127#define FLAGS_HW_LOCK_REQUIRED (1<<0)
128 /* No Over-Current detection */
129#define FLAGS_NOC (1<<1)
130 /* Fan failure detection required */
131#define FLAGS_FAN_FAILURE_DET_REQ (1<<2)
132 /* Initialize first the XGXS and only then the phy itself */
133#define FLAGS_INIT_XGXS_FIRST (1<<3)
134#define FLAGS_REARM_LATCH_SIGNAL (1<<6)
135#define FLAGS_SFP_NOT_APPROVED (1<<7)
136
137 u8 def_md_devad;
138 u8 reserved;
139 /* preemphasis values for the rx side */
140 u16 rx_preemphasis[4];
141
142 /* preemphasis values for the tx side */
143 u16 tx_preemphasis[4];
144
145 /* EMAC address for access MDIO */
146 u32 mdio_ctrl;
147
148 u32 supported;
149
150 u32 media_type;
151#define ETH_PHY_UNSPECIFIED 0x0
152#define ETH_PHY_SFP_FIBER 0x1
153#define ETH_PHY_XFP_FIBER 0x2
154#define ETH_PHY_DA_TWINAX 0x3
155#define ETH_PHY_BASE_T 0x4
156#define ETH_PHY_NOT_PRESENT 0xff
157
158 /* The address in which version is located*/
159 u32 ver_addr;
160
161 u16 req_flow_ctrl;
162
163 u16 req_line_speed;
164
165 u32 speed_cap_mask;
166
167 u16 req_duplex;
168 u16 rsrv;
169 /* Called per phy/port init, and it configures LASI, speed, autoneg,
170 duplex, flow control negotiation, etc. */
171 config_init_t config_init;
172
173 /* Called due to interrupt. It determines the link, speed */
174 read_status_t read_status;
175
176 /* Called when driver is unloading. Should reset the phy */
177 link_reset_t link_reset;
178
179 /* Set the loopback configuration for the phy */
180 config_loopback_t config_loopback;
181
182 /* Format the given raw number into str up to len */
183 format_fw_ver_t format_fw_ver;
184
185 /* Reset the phy (both ports) */
186 hw_reset_t hw_reset;
187
188 /* Set link led mode (on/off/oper)*/
189 set_link_led_t set_link_led;
190
191 /* PHY Specific tasks */
192 phy_specific_func_t phy_specific_func;
193#define DISABLE_TX 1
194#define ENABLE_TX 2
195};
196
52/* Inputs parameters to the CLC */ 197/* Inputs parameters to the CLC */
53struct link_params { 198struct link_params {
54 199
@@ -56,59 +201,57 @@ struct link_params {
56 201
57 /* Default / User Configuration */ 202 /* Default / User Configuration */
58 u8 loopback_mode; 203 u8 loopback_mode;
59#define LOOPBACK_NONE 0 204#define LOOPBACK_NONE 0
60#define LOOPBACK_EMAC 1 205#define LOOPBACK_EMAC 1
61#define LOOPBACK_BMAC 2 206#define LOOPBACK_BMAC 2
62#define LOOPBACK_XGXS_10 3 207#define LOOPBACK_XGXS 3
63#define LOOPBACK_EXT_PHY 4 208#define LOOPBACK_EXT_PHY 4
64#define LOOPBACK_EXT 5 209#define LOOPBACK_EXT 5
65 210#define LOOPBACK_UMAC 6
66 u16 req_duplex; 211#define LOOPBACK_XMAC 7
67 u16 req_flow_ctrl;
68 u16 req_fc_auto_adv; /* Should be set to TX / BOTH when
69 req_flow_ctrl is set to AUTO */
70 u16 req_line_speed; /* Also determine AutoNeg */
71 212
72 /* Device parameters */ 213 /* Device parameters */
73 u8 mac_addr[6]; 214 u8 mac_addr[6];
74 215
216 u16 req_duplex[LINK_CONFIG_SIZE];
217 u16 req_flow_ctrl[LINK_CONFIG_SIZE];
218
219 u16 req_line_speed[LINK_CONFIG_SIZE]; /* Also determine AutoNeg */
220
75 /* shmem parameters */ 221 /* shmem parameters */
76 u32 shmem_base; 222 u32 shmem_base;
77 u32 speed_cap_mask; 223 u32 shmem2_base;
224 u32 speed_cap_mask[LINK_CONFIG_SIZE];
78 u32 switch_cfg; 225 u32 switch_cfg;
79#define SWITCH_CFG_1G PORT_FEATURE_CON_SWITCH_1G_SWITCH 226#define SWITCH_CFG_1G PORT_FEATURE_CON_SWITCH_1G_SWITCH
80#define SWITCH_CFG_10G PORT_FEATURE_CON_SWITCH_10G_SWITCH 227#define SWITCH_CFG_10G PORT_FEATURE_CON_SWITCH_10G_SWITCH
81#define SWITCH_CFG_AUTO_DETECT PORT_FEATURE_CON_SWITCH_AUTO_DETECT 228#define SWITCH_CFG_AUTO_DETECT PORT_FEATURE_CON_SWITCH_AUTO_DETECT
82 229
83 u16 hw_led_mode; /* part of the hw_config read from the shmem */
84
85 /* phy_addr populated by the phy_init function */
86 u8 phy_addr;
87 /*u8 reserved1;*/
88
89 u32 lane_config; 230 u32 lane_config;
90 u32 ext_phy_config;
91#define XGXS_EXT_PHY_TYPE(ext_phy_config) \
92 ((ext_phy_config) & PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK)
93#define XGXS_EXT_PHY_ADDR(ext_phy_config) \
94 (((ext_phy_config) & PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >> \
95 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT)
96#define SERDES_EXT_PHY_TYPE(ext_phy_config) \
97 ((ext_phy_config) & PORT_HW_CFG_SERDES_EXT_PHY_TYPE_MASK)
98 231
99 /* Phy register parameter */ 232 /* Phy register parameter */
100 u32 chip_id; 233 u32 chip_id;
101 234
102 u16 xgxs_config_rx[4]; /* preemphasis values for the rx side */ 235 /* features */
103 u16 xgxs_config_tx[4]; /* preemphasis values for the tx side */
104
105 u32 feature_config_flags; 236 u32 feature_config_flags;
106#define FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED (1<<0) 237#define FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED (1<<0)
107#define FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY (1<<2) 238#define FEATURE_CONFIG_PFC_ENABLED (1<<1)
108#define FEATURE_CONFIG_BCM8727_NOC (1<<3) 239#define FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY (1<<2)
240#define FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY (1<<3)
241 /* Will be populated during common init */
242 struct bnx2x_phy phy[MAX_PHYS];
243
244 /* Will be populated during common init */
245 u8 num_phys;
246
247 u8 rsrv;
248 u16 hw_led_mode; /* part of the hw_config read from the shmem */
249 u32 multi_phy_config;
109 250
110 /* Device pointer passed to all callback functions */ 251 /* Device pointer passed to all callback functions */
111 struct bnx2x *bp; 252 struct bnx2x *bp;
253 u16 req_fc_auto_adv; /* Should be set to TX / BOTH when
254 req_flow_ctrl is set to AUTO */
112}; 255};
113 256
114/* Output parameters */ 257/* Output parameters */
@@ -129,12 +272,6 @@ struct link_vars {
129 u16 flow_ctrl; 272 u16 flow_ctrl;
130 u16 ieee_fc; 273 u16 ieee_fc;
131 274
132 u32 autoneg;
133#define AUTO_NEG_DISABLED 0x0
134#define AUTO_NEG_ENABLED 0x1
135#define AUTO_NEG_COMPLETE 0x2
136#define AUTO_NEG_PARALLEL_DETECTION_USED 0x3
137
138 /* The same definitions as the shmem parameter */ 275 /* The same definitions as the shmem parameter */
139 u32 link_status; 276 u32 link_status;
140}; 277};
@@ -142,8 +279,6 @@ struct link_vars {
142/***********************************************************/ 279/***********************************************************/
143/* Functions */ 280/* Functions */
144/***********************************************************/ 281/***********************************************************/
145
146/* Initialize the phy */
147u8 bnx2x_phy_init(struct link_params *input, struct link_vars *output); 282u8 bnx2x_phy_init(struct link_params *input, struct link_vars *output);
148 283
149/* Reset the link. Should be called when driver or interface goes down 284/* Reset the link. Should be called when driver or interface goes down
@@ -155,17 +290,15 @@ u8 bnx2x_link_reset(struct link_params *params, struct link_vars *vars,
155/* bnx2x_link_update should be called upon link interrupt */ 290/* bnx2x_link_update should be called upon link interrupt */
156u8 bnx2x_link_update(struct link_params *input, struct link_vars *output); 291u8 bnx2x_link_update(struct link_params *input, struct link_vars *output);
157 292
158/* use the following cl45 functions to read/write from external_phy 293/* use the following phy functions to read/write from external_phy
159 In order to use it to read/write internal phy registers, use 294 In order to use it to read/write internal phy registers, use
160 DEFAULT_PHY_DEV_ADDR as devad, and (_bank + (_addr & 0xf)) as 295 DEFAULT_PHY_DEV_ADDR as devad, and (_bank + (_addr & 0xf)) as
161 Use ext_phy_type of 0 in case of cl22 over cl45
162 the register */ 296 the register */
163u8 bnx2x_cl45_read(struct bnx2x *bp, u8 port, u32 ext_phy_type, 297u8 bnx2x_phy_read(struct link_params *params, u8 phy_addr,
164 u8 phy_addr, u8 devad, u16 reg, u16 *ret_val); 298 u8 devad, u16 reg, u16 *ret_val);
165
166u8 bnx2x_cl45_write(struct bnx2x *bp, u8 port, u32 ext_phy_type,
167 u8 phy_addr, u8 devad, u16 reg, u16 val);
168 299
300u8 bnx2x_phy_write(struct link_params *params, u8 phy_addr,
301 u8 devad, u16 reg, u16 val);
169/* Reads the link_status from the shmem, 302/* Reads the link_status from the shmem,
170 and update the link vars accordingly */ 303 and update the link vars accordingly */
171void bnx2x_link_status_update(struct link_params *input, 304void bnx2x_link_status_update(struct link_params *input,
@@ -178,11 +311,12 @@ u8 bnx2x_get_ext_phy_fw_version(struct link_params *params, u8 driver_loaded,
178 Basically, the CLC takes care of the led for the link, but in case one needs 311 Basically, the CLC takes care of the led for the link, but in case one needs
179 to set/unset the led unnaturally, set the "mode" to LED_MODE_OPER to 312 to set/unset the led unnaturally, set the "mode" to LED_MODE_OPER to
180 blink the led, and LED_MODE_OFF to set the led off.*/ 313 blink the led, and LED_MODE_OFF to set the led off.*/
181u8 bnx2x_set_led(struct link_params *params, u8 mode, u32 speed); 314u8 bnx2x_set_led(struct link_params *params, struct link_vars *vars,
182#define LED_MODE_OFF 0 315 u8 mode, u32 speed);
183#define LED_MODE_OPER 2 316#define LED_MODE_OFF 0
184 317#define LED_MODE_ON 1
185u8 bnx2x_override_led_value(struct bnx2x *bp, u8 port, u32 led_idx, u32 value); 318#define LED_MODE_OPER 2
319#define LED_MODE_FRONT_PANEL_OFF 3
186 320
187/* bnx2x_handle_module_detect_int should be called upon module detection 321/* bnx2x_handle_module_detect_int should be called upon module detection
188 interrupt */ 322 interrupt */
@@ -190,17 +324,76 @@ void bnx2x_handle_module_detect_int(struct link_params *params);
190 324
191/* Get the actual link status. In case it returns 0, link is up, 325/* Get the actual link status. In case it returns 0, link is up,
192 otherwise link is down*/ 326 otherwise link is down*/
193u8 bnx2x_test_link(struct link_params *input, struct link_vars *vars); 327u8 bnx2x_test_link(struct link_params *input, struct link_vars *vars,
328 u8 is_serdes);
194 329
195/* One-time initialization for external phy after power up */ 330/* One-time initialization for external phy after power up */
196u8 bnx2x_common_init_phy(struct bnx2x *bp, u32 shmem_base); 331u8 bnx2x_common_init_phy(struct bnx2x *bp, u32 shmem_base_path[],
332 u32 shmem2_base_path[], u32 chip_id);
197 333
198/* Reset the external PHY using GPIO */ 334/* Reset the external PHY using GPIO */
199void bnx2x_ext_phy_hw_reset(struct bnx2x *bp, u8 port); 335void bnx2x_ext_phy_hw_reset(struct bnx2x *bp, u8 port);
200 336
201void bnx2x_sfx7101_sp_sw_reset(struct bnx2x *bp, u8 port, u8 phy_addr); 337/* Reset the external of SFX7101 */
338void bnx2x_sfx7101_sp_sw_reset(struct bnx2x *bp, struct bnx2x_phy *phy);
339
340/* Read "byte_cnt" bytes from address "addr" from the SFP+ EEPROM */
341u8 bnx2x_read_sfp_module_eeprom(struct bnx2x_phy *phy,
342 struct link_params *params, u16 addr,
343 u8 byte_cnt, u8 *o_buf);
344
345void bnx2x_hw_reset_phy(struct link_params *params);
346
347/* Checks if HW lock is required for this phy/board type */
348u8 bnx2x_hw_lock_required(struct bnx2x *bp, u32 shmem_base,
349 u32 shmem2_base);
350
351/* Check swap bit and adjust PHY order */
352u32 bnx2x_phy_selection(struct link_params *params);
353
354/* Probe the phys on board, and populate them in "params" */
355u8 bnx2x_phy_probe(struct link_params *params);
356/* Checks if fan failure detection is required on one of the phys on board */
357u8 bnx2x_fan_failure_det_req(struct bnx2x *bp, u32 shmem_base,
358 u32 shmem2_base, u8 port);
359
360/* PFC port configuration params */
361struct bnx2x_nig_brb_pfc_port_params {
362 /* NIG */
363 u32 pause_enable;
364 u32 llfc_out_en;
365 u32 llfc_enable;
366 u32 pkt_priority_to_cos;
367 u32 rx_cos0_priority_mask;
368 u32 rx_cos1_priority_mask;
369 u32 llfc_high_priority_classes;
370 u32 llfc_low_priority_classes;
371 /* BRB */
372 u32 cos0_pauseable;
373 u32 cos1_pauseable;
374};
375
376/**
377 * Used to update the PFC attributes in EMAC, BMAC, NIG and BRB
378 * when link is already up
379 */
380void bnx2x_update_pfc(struct link_params *params,
381 struct link_vars *vars,
382 struct bnx2x_nig_brb_pfc_port_params *pfc_params);
383
384
385/* Used to configure the ETS to disable */
386void bnx2x_ets_disabled(struct link_params *params);
387
388/* Used to configure the ETS to BW limited */
389void bnx2x_ets_bw_limit(const struct link_params *params, const u32 cos0_bw,
390 const u32 cos1_bw);
202 391
203u8 bnx2x_read_sfp_module_eeprom(struct link_params *params, u16 addr, 392/* Used to configure the ETS to strict */
204 u8 byte_cnt, u8 *o_buf); 393u8 bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos);
205 394
395/* Read pfc statistic*/
396void bnx2x_pfc_statistic(struct link_params *params, struct link_vars *vars,
397 u32 pfc_frames_sent[2],
398 u32 pfc_frames_received[2]);
206#endif /* BNX2X_LINK_H */ 399#endif /* BNX2X_LINK_H */
diff --git a/drivers/net/bnx2x/bnx2x_main.c b/drivers/net/bnx2x/bnx2x_main.c
index f8c3f08e4ce7..74be989f51c5 100644
--- a/drivers/net/bnx2x/bnx2x_main.c
+++ b/drivers/net/bnx2x/bnx2x_main.c
@@ -1,6 +1,6 @@
1/* bnx2x_main.c: Broadcom Everest network driver. 1/* bnx2x_main.c: Broadcom Everest network driver.
2 * 2 *
3 * Copyright (c) 2007-2010 Broadcom Corporation 3 * Copyright (c) 2007-2011 Broadcom Corporation
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by 6 * it under the terms of the GNU General Public License as published by
@@ -23,7 +23,6 @@
23#include <linux/errno.h> 23#include <linux/errno.h>
24#include <linux/ioport.h> 24#include <linux/ioport.h>
25#include <linux/slab.h> 25#include <linux/slab.h>
26#include <linux/vmalloc.h>
27#include <linux/interrupt.h> 26#include <linux/interrupt.h>
28#include <linux/pci.h> 27#include <linux/pci.h>
29#include <linux/init.h> 28#include <linux/init.h>
@@ -50,13 +49,14 @@
50#include <linux/zlib.h> 49#include <linux/zlib.h>
51#include <linux/io.h> 50#include <linux/io.h>
52#include <linux/stringify.h> 51#include <linux/stringify.h>
52#include <linux/vmalloc.h>
53 53
54#define BNX2X_MAIN 54#define BNX2X_MAIN
55#include "bnx2x.h" 55#include "bnx2x.h"
56#include "bnx2x_init.h" 56#include "bnx2x_init.h"
57#include "bnx2x_init_ops.h" 57#include "bnx2x_init_ops.h"
58#include "bnx2x_cmn.h" 58#include "bnx2x_cmn.h"
59 59#include "bnx2x_dcb.h"
60 60
61#include <linux/firmware.h> 61#include <linux/firmware.h>
62#include "bnx2x_fw_file_hdr.h" 62#include "bnx2x_fw_file_hdr.h"
@@ -66,8 +66,9 @@
66 __stringify(BCM_5710_FW_MINOR_VERSION) "." \ 66 __stringify(BCM_5710_FW_MINOR_VERSION) "." \
67 __stringify(BCM_5710_FW_REVISION_VERSION) "." \ 67 __stringify(BCM_5710_FW_REVISION_VERSION) "." \
68 __stringify(BCM_5710_FW_ENGINEERING_VERSION) 68 __stringify(BCM_5710_FW_ENGINEERING_VERSION)
69#define FW_FILE_NAME_E1 "bnx2x-e1-" FW_FILE_VERSION ".fw" 69#define FW_FILE_NAME_E1 "bnx2x/bnx2x-e1-" FW_FILE_VERSION ".fw"
70#define FW_FILE_NAME_E1H "bnx2x-e1h-" FW_FILE_VERSION ".fw" 70#define FW_FILE_NAME_E1H "bnx2x/bnx2x-e1h-" FW_FILE_VERSION ".fw"
71#define FW_FILE_NAME_E2 "bnx2x/bnx2x-e2-" FW_FILE_VERSION ".fw"
71 72
72/* Time in jiffies before concluding the transmitter is hung */ 73/* Time in jiffies before concluding the transmitter is hung */
73#define TX_TIMEOUT (5*HZ) 74#define TX_TIMEOUT (5*HZ)
@@ -77,18 +78,20 @@ static char version[] __devinitdata =
77 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; 78 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
78 79
79MODULE_AUTHOR("Eliezer Tamir"); 80MODULE_AUTHOR("Eliezer Tamir");
80MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver"); 81MODULE_DESCRIPTION("Broadcom NetXtreme II "
82 "BCM57710/57711/57711E/57712/57712E Driver");
81MODULE_LICENSE("GPL"); 83MODULE_LICENSE("GPL");
82MODULE_VERSION(DRV_MODULE_VERSION); 84MODULE_VERSION(DRV_MODULE_VERSION);
83MODULE_FIRMWARE(FW_FILE_NAME_E1); 85MODULE_FIRMWARE(FW_FILE_NAME_E1);
84MODULE_FIRMWARE(FW_FILE_NAME_E1H); 86MODULE_FIRMWARE(FW_FILE_NAME_E1H);
87MODULE_FIRMWARE(FW_FILE_NAME_E2);
85 88
86static int multi_mode = 1; 89static int multi_mode = 1;
87module_param(multi_mode, int, 0); 90module_param(multi_mode, int, 0);
88MODULE_PARM_DESC(multi_mode, " Multi queue mode " 91MODULE_PARM_DESC(multi_mode, " Multi queue mode "
89 "(0 Disable; 1 Enable (default))"); 92 "(0 Disable; 1 Enable (default))");
90 93
91static int num_queues; 94int num_queues;
92module_param(num_queues, int, 0); 95module_param(num_queues, int, 0);
93MODULE_PARM_DESC(num_queues, " Number of queues for multi_mode=1" 96MODULE_PARM_DESC(num_queues, " Number of queues for multi_mode=1"
94 " (default is as a number of CPUs)"); 97 " (default is as a number of CPUs)");
@@ -120,10 +123,16 @@ MODULE_PARM_DESC(debug, " Default debug msglevel");
120 123
121static struct workqueue_struct *bnx2x_wq; 124static struct workqueue_struct *bnx2x_wq;
122 125
126#ifdef BCM_CNIC
127static u8 ALL_ENODE_MACS[] = {0x01, 0x10, 0x18, 0x01, 0x00, 0x01};
128#endif
129
123enum bnx2x_board_type { 130enum bnx2x_board_type {
124 BCM57710 = 0, 131 BCM57710 = 0,
125 BCM57711 = 1, 132 BCM57711 = 1,
126 BCM57711E = 2, 133 BCM57711E = 2,
134 BCM57712 = 3,
135 BCM57712E = 4
127}; 136};
128 137
129/* indexed by board_type, above */ 138/* indexed by board_type, above */
@@ -132,14 +141,17 @@ static struct {
132} board_info[] __devinitdata = { 141} board_info[] __devinitdata = {
133 { "Broadcom NetXtreme II BCM57710 XGb" }, 142 { "Broadcom NetXtreme II BCM57710 XGb" },
134 { "Broadcom NetXtreme II BCM57711 XGb" }, 143 { "Broadcom NetXtreme II BCM57711 XGb" },
135 { "Broadcom NetXtreme II BCM57711E XGb" } 144 { "Broadcom NetXtreme II BCM57711E XGb" },
145 { "Broadcom NetXtreme II BCM57712 XGb" },
146 { "Broadcom NetXtreme II BCM57712E XGb" }
136}; 147};
137 148
138
139static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = { 149static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
140 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 }, 150 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
141 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 }, 151 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
142 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E }, 152 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
153 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712), BCM57712 },
154 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712E), BCM57712E },
143 { 0 } 155 { 0 }
144}; 156};
145 157
@@ -149,10 +161,248 @@ MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
149* General service functions 161* General service functions
150****************************************************************************/ 162****************************************************************************/
151 163
164static inline void __storm_memset_dma_mapping(struct bnx2x *bp,
165 u32 addr, dma_addr_t mapping)
166{
167 REG_WR(bp, addr, U64_LO(mapping));
168 REG_WR(bp, addr + 4, U64_HI(mapping));
169}
170
171static inline void __storm_memset_fill(struct bnx2x *bp,
172 u32 addr, size_t size, u32 val)
173{
174 int i;
175 for (i = 0; i < size/4; i++)
176 REG_WR(bp, addr + (i * 4), val);
177}
178
179static inline void storm_memset_ustats_zero(struct bnx2x *bp,
180 u8 port, u16 stat_id)
181{
182 size_t size = sizeof(struct ustorm_per_client_stats);
183
184 u32 addr = BAR_USTRORM_INTMEM +
185 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id);
186
187 __storm_memset_fill(bp, addr, size, 0);
188}
189
190static inline void storm_memset_tstats_zero(struct bnx2x *bp,
191 u8 port, u16 stat_id)
192{
193 size_t size = sizeof(struct tstorm_per_client_stats);
194
195 u32 addr = BAR_TSTRORM_INTMEM +
196 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id);
197
198 __storm_memset_fill(bp, addr, size, 0);
199}
200
201static inline void storm_memset_xstats_zero(struct bnx2x *bp,
202 u8 port, u16 stat_id)
203{
204 size_t size = sizeof(struct xstorm_per_client_stats);
205
206 u32 addr = BAR_XSTRORM_INTMEM +
207 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id);
208
209 __storm_memset_fill(bp, addr, size, 0);
210}
211
212
213static inline void storm_memset_spq_addr(struct bnx2x *bp,
214 dma_addr_t mapping, u16 abs_fid)
215{
216 u32 addr = XSEM_REG_FAST_MEMORY +
217 XSTORM_SPQ_PAGE_BASE_OFFSET(abs_fid);
218
219 __storm_memset_dma_mapping(bp, addr, mapping);
220}
221
222static inline void storm_memset_ov(struct bnx2x *bp, u16 ov, u16 abs_fid)
223{
224 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(abs_fid), ov);
225}
226
227static inline void storm_memset_func_cfg(struct bnx2x *bp,
228 struct tstorm_eth_function_common_config *tcfg,
229 u16 abs_fid)
230{
231 size_t size = sizeof(struct tstorm_eth_function_common_config);
232
233 u32 addr = BAR_TSTRORM_INTMEM +
234 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid);
235
236 __storm_memset_struct(bp, addr, size, (u32 *)tcfg);
237}
238
239static inline void storm_memset_xstats_flags(struct bnx2x *bp,
240 struct stats_indication_flags *flags,
241 u16 abs_fid)
242{
243 size_t size = sizeof(struct stats_indication_flags);
244
245 u32 addr = BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(abs_fid);
246
247 __storm_memset_struct(bp, addr, size, (u32 *)flags);
248}
249
250static inline void storm_memset_tstats_flags(struct bnx2x *bp,
251 struct stats_indication_flags *flags,
252 u16 abs_fid)
253{
254 size_t size = sizeof(struct stats_indication_flags);
255
256 u32 addr = BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(abs_fid);
257
258 __storm_memset_struct(bp, addr, size, (u32 *)flags);
259}
260
261static inline void storm_memset_ustats_flags(struct bnx2x *bp,
262 struct stats_indication_flags *flags,
263 u16 abs_fid)
264{
265 size_t size = sizeof(struct stats_indication_flags);
266
267 u32 addr = BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(abs_fid);
268
269 __storm_memset_struct(bp, addr, size, (u32 *)flags);
270}
271
272static inline void storm_memset_cstats_flags(struct bnx2x *bp,
273 struct stats_indication_flags *flags,
274 u16 abs_fid)
275{
276 size_t size = sizeof(struct stats_indication_flags);
277
278 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(abs_fid);
279
280 __storm_memset_struct(bp, addr, size, (u32 *)flags);
281}
282
283static inline void storm_memset_xstats_addr(struct bnx2x *bp,
284 dma_addr_t mapping, u16 abs_fid)
285{
286 u32 addr = BAR_XSTRORM_INTMEM +
287 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
288
289 __storm_memset_dma_mapping(bp, addr, mapping);
290}
291
292static inline void storm_memset_tstats_addr(struct bnx2x *bp,
293 dma_addr_t mapping, u16 abs_fid)
294{
295 u32 addr = BAR_TSTRORM_INTMEM +
296 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
297
298 __storm_memset_dma_mapping(bp, addr, mapping);
299}
300
301static inline void storm_memset_ustats_addr(struct bnx2x *bp,
302 dma_addr_t mapping, u16 abs_fid)
303{
304 u32 addr = BAR_USTRORM_INTMEM +
305 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
306
307 __storm_memset_dma_mapping(bp, addr, mapping);
308}
309
310static inline void storm_memset_cstats_addr(struct bnx2x *bp,
311 dma_addr_t mapping, u16 abs_fid)
312{
313 u32 addr = BAR_CSTRORM_INTMEM +
314 CSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
315
316 __storm_memset_dma_mapping(bp, addr, mapping);
317}
318
319static inline void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid,
320 u16 pf_id)
321{
322 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid),
323 pf_id);
324 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid),
325 pf_id);
326 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid),
327 pf_id);
328 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid),
329 pf_id);
330}
331
332static inline void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid,
333 u8 enable)
334{
335 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid),
336 enable);
337 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid),
338 enable);
339 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid),
340 enable);
341 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid),
342 enable);
343}
344
345static inline void storm_memset_eq_data(struct bnx2x *bp,
346 struct event_ring_data *eq_data,
347 u16 pfid)
348{
349 size_t size = sizeof(struct event_ring_data);
350
351 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_DATA_OFFSET(pfid);
352
353 __storm_memset_struct(bp, addr, size, (u32 *)eq_data);
354}
355
356static inline void storm_memset_eq_prod(struct bnx2x *bp, u16 eq_prod,
357 u16 pfid)
358{
359 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_PROD_OFFSET(pfid);
360 REG_WR16(bp, addr, eq_prod);
361}
362
363static inline void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
364 u16 fw_sb_id, u8 sb_index,
365 u8 ticks)
366{
367
368 int index_offset = CHIP_IS_E2(bp) ?
369 offsetof(struct hc_status_block_data_e2, index_data) :
370 offsetof(struct hc_status_block_data_e1x, index_data);
371 u32 addr = BAR_CSTRORM_INTMEM +
372 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
373 index_offset +
374 sizeof(struct hc_index_data)*sb_index +
375 offsetof(struct hc_index_data, timeout);
376 REG_WR8(bp, addr, ticks);
377 DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d ticks %d\n",
378 port, fw_sb_id, sb_index, ticks);
379}
380static inline void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
381 u16 fw_sb_id, u8 sb_index,
382 u8 disable)
383{
384 u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
385 int index_offset = CHIP_IS_E2(bp) ?
386 offsetof(struct hc_status_block_data_e2, index_data) :
387 offsetof(struct hc_status_block_data_e1x, index_data);
388 u32 addr = BAR_CSTRORM_INTMEM +
389 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
390 index_offset +
391 sizeof(struct hc_index_data)*sb_index +
392 offsetof(struct hc_index_data, flags);
393 u16 flags = REG_RD16(bp, addr);
394 /* clear and set */
395 flags &= ~HC_INDEX_DATA_HC_ENABLED;
396 flags |= enable_flag;
397 REG_WR16(bp, addr, flags);
398 DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d disable %d\n",
399 port, fw_sb_id, sb_index, disable);
400}
401
152/* used only at init 402/* used only at init
153 * locking is done by mcp 403 * locking is done by mcp
154 */ 404 */
155void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val) 405static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
156{ 406{
157 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr); 407 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
158 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val); 408 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
@@ -172,6 +422,76 @@ static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
172 return val; 422 return val;
173} 423}
174 424
425#define DMAE_DP_SRC_GRC "grc src_addr [%08x]"
426#define DMAE_DP_SRC_PCI "pci src_addr [%x:%08x]"
427#define DMAE_DP_DST_GRC "grc dst_addr [%08x]"
428#define DMAE_DP_DST_PCI "pci dst_addr [%x:%08x]"
429#define DMAE_DP_DST_NONE "dst_addr [none]"
430
431static void bnx2x_dp_dmae(struct bnx2x *bp, struct dmae_command *dmae,
432 int msglvl)
433{
434 u32 src_type = dmae->opcode & DMAE_COMMAND_SRC;
435
436 switch (dmae->opcode & DMAE_COMMAND_DST) {
437 case DMAE_CMD_DST_PCI:
438 if (src_type == DMAE_CMD_SRC_PCI)
439 DP(msglvl, "DMAE: opcode 0x%08x\n"
440 "src [%x:%08x], len [%d*4], dst [%x:%08x]\n"
441 "comp_addr [%x:%08x], comp_val 0x%08x\n",
442 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
443 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
444 dmae->comp_addr_hi, dmae->comp_addr_lo,
445 dmae->comp_val);
446 else
447 DP(msglvl, "DMAE: opcode 0x%08x\n"
448 "src [%08x], len [%d*4], dst [%x:%08x]\n"
449 "comp_addr [%x:%08x], comp_val 0x%08x\n",
450 dmae->opcode, dmae->src_addr_lo >> 2,
451 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
452 dmae->comp_addr_hi, dmae->comp_addr_lo,
453 dmae->comp_val);
454 break;
455 case DMAE_CMD_DST_GRC:
456 if (src_type == DMAE_CMD_SRC_PCI)
457 DP(msglvl, "DMAE: opcode 0x%08x\n"
458 "src [%x:%08x], len [%d*4], dst_addr [%08x]\n"
459 "comp_addr [%x:%08x], comp_val 0x%08x\n",
460 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
461 dmae->len, dmae->dst_addr_lo >> 2,
462 dmae->comp_addr_hi, dmae->comp_addr_lo,
463 dmae->comp_val);
464 else
465 DP(msglvl, "DMAE: opcode 0x%08x\n"
466 "src [%08x], len [%d*4], dst [%08x]\n"
467 "comp_addr [%x:%08x], comp_val 0x%08x\n",
468 dmae->opcode, dmae->src_addr_lo >> 2,
469 dmae->len, dmae->dst_addr_lo >> 2,
470 dmae->comp_addr_hi, dmae->comp_addr_lo,
471 dmae->comp_val);
472 break;
473 default:
474 if (src_type == DMAE_CMD_SRC_PCI)
475 DP(msglvl, "DMAE: opcode 0x%08x\n"
476 DP_LEVEL "src_addr [%x:%08x] len [%d * 4] "
477 "dst_addr [none]\n"
478 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
479 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
480 dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
481 dmae->comp_val);
482 else
483 DP(msglvl, "DMAE: opcode 0x%08x\n"
484 DP_LEVEL "src_addr [%08x] len [%d * 4] "
485 "dst_addr [none]\n"
486 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
487 dmae->opcode, dmae->src_addr_lo >> 2,
488 dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
489 dmae->comp_val);
490 break;
491 }
492
493}
494
175const u32 dmae_reg_go_c[] = { 495const u32 dmae_reg_go_c[] = {
176 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3, 496 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
177 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7, 497 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
@@ -195,85 +515,139 @@ void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx)
195 REG_WR(bp, dmae_reg_go_c[idx], 1); 515 REG_WR(bp, dmae_reg_go_c[idx], 1);
196} 516}
197 517
198void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr, 518u32 bnx2x_dmae_opcode_add_comp(u32 opcode, u8 comp_type)
199 u32 len32)
200{ 519{
201 struct dmae_command dmae; 520 return opcode | ((comp_type << DMAE_COMMAND_C_DST_SHIFT) |
202 u32 *wb_comp = bnx2x_sp(bp, wb_comp); 521 DMAE_CMD_C_ENABLE);
203 int cnt = 200; 522}
204 523
205 if (!bp->dmae_ready) { 524u32 bnx2x_dmae_opcode_clr_src_reset(u32 opcode)
206 u32 *data = bnx2x_sp(bp, wb_data[0]); 525{
526 return opcode & ~DMAE_CMD_SRC_RESET;
527}
207 528
208 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)" 529u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type,
209 " using indirect\n", dst_addr, len32); 530 bool with_comp, u8 comp_type)
210 bnx2x_init_ind_wr(bp, dst_addr, data, len32); 531{
211 return; 532 u32 opcode = 0;
212 }
213 533
214 memset(&dmae, 0, sizeof(struct dmae_command)); 534 opcode |= ((src_type << DMAE_COMMAND_SRC_SHIFT) |
535 (dst_type << DMAE_COMMAND_DST_SHIFT));
536
537 opcode |= (DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET);
538
539 opcode |= (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0);
540 opcode |= ((BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT) |
541 (BP_E1HVN(bp) << DMAE_COMMAND_DST_VN_SHIFT));
542 opcode |= (DMAE_COM_SET_ERR << DMAE_COMMAND_ERR_POLICY_SHIFT);
215 543
216 dmae.opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
217 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
218 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
219#ifdef __BIG_ENDIAN 544#ifdef __BIG_ENDIAN
220 DMAE_CMD_ENDIANITY_B_DW_SWAP | 545 opcode |= DMAE_CMD_ENDIANITY_B_DW_SWAP;
221#else 546#else
222 DMAE_CMD_ENDIANITY_DW_SWAP | 547 opcode |= DMAE_CMD_ENDIANITY_DW_SWAP;
223#endif 548#endif
224 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) | 549 if (with_comp)
225 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT)); 550 opcode = bnx2x_dmae_opcode_add_comp(opcode, comp_type);
226 dmae.src_addr_lo = U64_LO(dma_addr); 551 return opcode;
227 dmae.src_addr_hi = U64_HI(dma_addr); 552}
228 dmae.dst_addr_lo = dst_addr >> 2; 553
229 dmae.dst_addr_hi = 0; 554static void bnx2x_prep_dmae_with_comp(struct bnx2x *bp,
230 dmae.len = len32; 555 struct dmae_command *dmae,
231 dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp)); 556 u8 src_type, u8 dst_type)
232 dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp)); 557{
233 dmae.comp_val = DMAE_COMP_VAL; 558 memset(dmae, 0, sizeof(struct dmae_command));
234 559
235 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n" 560 /* set the opcode */
236 DP_LEVEL "src_addr [%x:%08x] len [%d *4] " 561 dmae->opcode = bnx2x_dmae_opcode(bp, src_type, dst_type,
237 "dst_addr [%x:%08x (%08x)]\n" 562 true, DMAE_COMP_PCI);
238 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n", 563
239 dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo, 564 /* fill in the completion parameters */
240 dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, dst_addr, 565 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
241 dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val); 566 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
242 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n", 567 dmae->comp_val = DMAE_COMP_VAL;
568}
569
570/* issue a dmae command over the init-channel and wailt for completion */
571static int bnx2x_issue_dmae_with_comp(struct bnx2x *bp,
572 struct dmae_command *dmae)
573{
574 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
575 int cnt = CHIP_REV_IS_SLOW(bp) ? (400000) : 4000;
576 int rc = 0;
577
578 DP(BNX2X_MSG_OFF, "data before [0x%08x 0x%08x 0x%08x 0x%08x]\n",
243 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1], 579 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
244 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]); 580 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
245 581
246 mutex_lock(&bp->dmae_mutex); 582 /* lock the dmae channel */
583 spin_lock_bh(&bp->dmae_lock);
247 584
585 /* reset completion */
248 *wb_comp = 0; 586 *wb_comp = 0;
249 587
250 bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp)); 588 /* post the command on the channel used for initializations */
589 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
251 590
591 /* wait for completion */
252 udelay(5); 592 udelay(5);
253 593 while ((*wb_comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) {
254 while (*wb_comp != DMAE_COMP_VAL) {
255 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp); 594 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
256 595
257 if (!cnt) { 596 if (!cnt) {
258 BNX2X_ERR("DMAE timeout!\n"); 597 BNX2X_ERR("DMAE timeout!\n");
259 break; 598 rc = DMAE_TIMEOUT;
599 goto unlock;
260 } 600 }
261 cnt--; 601 cnt--;
262 /* adjust delay for emulation/FPGA */ 602 udelay(50);
263 if (CHIP_REV_IS_SLOW(bp)) 603 }
264 msleep(100); 604 if (*wb_comp & DMAE_PCI_ERR_FLAG) {
265 else 605 BNX2X_ERR("DMAE PCI error!\n");
266 udelay(5); 606 rc = DMAE_PCI_ERROR;
267 } 607 }
268 608
269 mutex_unlock(&bp->dmae_mutex); 609 DP(BNX2X_MSG_OFF, "data after [0x%08x 0x%08x 0x%08x 0x%08x]\n",
610 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
611 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
612
613unlock:
614 spin_unlock_bh(&bp->dmae_lock);
615 return rc;
616}
617
618void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
619 u32 len32)
620{
621 struct dmae_command dmae;
622
623 if (!bp->dmae_ready) {
624 u32 *data = bnx2x_sp(bp, wb_data[0]);
625
626 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
627 " using indirect\n", dst_addr, len32);
628 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
629 return;
630 }
631
632 /* set opcode and fixed command fields */
633 bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_PCI, DMAE_DST_GRC);
634
635 /* fill in addresses and len */
636 dmae.src_addr_lo = U64_LO(dma_addr);
637 dmae.src_addr_hi = U64_HI(dma_addr);
638 dmae.dst_addr_lo = dst_addr >> 2;
639 dmae.dst_addr_hi = 0;
640 dmae.len = len32;
641
642 bnx2x_dp_dmae(bp, &dmae, BNX2X_MSG_OFF);
643
644 /* issue the command and wait for completion */
645 bnx2x_issue_dmae_with_comp(bp, &dmae);
270} 646}
271 647
272void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32) 648void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
273{ 649{
274 struct dmae_command dmae; 650 struct dmae_command dmae;
275 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
276 int cnt = 200;
277 651
278 if (!bp->dmae_ready) { 652 if (!bp->dmae_ready) {
279 u32 *data = bnx2x_sp(bp, wb_data[0]); 653 u32 *data = bnx2x_sp(bp, wb_data[0]);
@@ -286,66 +660,24 @@ void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
286 return; 660 return;
287 } 661 }
288 662
289 memset(&dmae, 0, sizeof(struct dmae_command)); 663 /* set opcode and fixed command fields */
664 bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_GRC, DMAE_DST_PCI);
290 665
291 dmae.opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI | 666 /* fill in addresses and len */
292 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
293 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
294#ifdef __BIG_ENDIAN
295 DMAE_CMD_ENDIANITY_B_DW_SWAP |
296#else
297 DMAE_CMD_ENDIANITY_DW_SWAP |
298#endif
299 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
300 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
301 dmae.src_addr_lo = src_addr >> 2; 667 dmae.src_addr_lo = src_addr >> 2;
302 dmae.src_addr_hi = 0; 668 dmae.src_addr_hi = 0;
303 dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data)); 669 dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
304 dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data)); 670 dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
305 dmae.len = len32; 671 dmae.len = len32;
306 dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
307 dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
308 dmae.comp_val = DMAE_COMP_VAL;
309
310 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
311 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
312 "dst_addr [%x:%08x (%08x)]\n"
313 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
314 dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
315 dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, src_addr,
316 dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
317
318 mutex_lock(&bp->dmae_mutex);
319 672
320 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4); 673 bnx2x_dp_dmae(bp, &dmae, BNX2X_MSG_OFF);
321 *wb_comp = 0;
322
323 bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
324
325 udelay(5);
326
327 while (*wb_comp != DMAE_COMP_VAL) {
328
329 if (!cnt) {
330 BNX2X_ERR("DMAE timeout!\n");
331 break;
332 }
333 cnt--;
334 /* adjust delay for emulation/FPGA */
335 if (CHIP_REV_IS_SLOW(bp))
336 msleep(100);
337 else
338 udelay(5);
339 }
340 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
341 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
342 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
343 674
344 mutex_unlock(&bp->dmae_mutex); 675 /* issue the command and wait for completion */
676 bnx2x_issue_dmae_with_comp(bp, &dmae);
345} 677}
346 678
347void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr, 679static void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
348 u32 addr, u32 len) 680 u32 addr, u32 len)
349{ 681{
350 int dmae_wr_max = DMAE_LEN32_WR_MAX(bp); 682 int dmae_wr_max = DMAE_LEN32_WR_MAX(bp);
351 int offset = 0; 683 int offset = 0;
@@ -508,19 +840,24 @@ static void bnx2x_fw_dump(struct bnx2x *bp)
508 u32 mark, offset; 840 u32 mark, offset;
509 __be32 data[9]; 841 __be32 data[9];
510 int word; 842 int word;
511 843 u32 trace_shmem_base;
512 if (BP_NOMCP(bp)) { 844 if (BP_NOMCP(bp)) {
513 BNX2X_ERR("NO MCP - can not dump\n"); 845 BNX2X_ERR("NO MCP - can not dump\n");
514 return; 846 return;
515 } 847 }
516 848
517 addr = bp->common.shmem_base - 0x0800 + 4; 849 if (BP_PATH(bp) == 0)
850 trace_shmem_base = bp->common.shmem_base;
851 else
852 trace_shmem_base = SHMEM2_RD(bp, other_shmem_base_addr);
853 addr = trace_shmem_base - 0x0800 + 4;
518 mark = REG_RD(bp, addr); 854 mark = REG_RD(bp, addr);
519 mark = MCP_REG_MCPR_SCRATCH + ((mark + 0x3) & ~0x3) - 0x08000000; 855 mark = (CHIP_IS_E1x(bp) ? MCP_REG_MCPR_SCRATCH : MCP_A_REG_MCPR_SCRATCH)
856 + ((mark + 0x3) & ~0x3) - 0x08000000;
520 pr_err("begin fw dump (mark 0x%x)\n", mark); 857 pr_err("begin fw dump (mark 0x%x)\n", mark);
521 858
522 pr_err(""); 859 pr_err("");
523 for (offset = mark; offset <= bp->common.shmem_base; offset += 0x8*4) { 860 for (offset = mark; offset <= trace_shmem_base; offset += 0x8*4) {
524 for (word = 0; word < 8; word++) 861 for (word = 0; word < 8; word++)
525 data[word] = htonl(REG_RD(bp, offset + 4*word)); 862 data[word] = htonl(REG_RD(bp, offset + 4*word));
526 data[8] = 0x0; 863 data[8] = 0x0;
@@ -538,7 +875,12 @@ static void bnx2x_fw_dump(struct bnx2x *bp)
538void bnx2x_panic_dump(struct bnx2x *bp) 875void bnx2x_panic_dump(struct bnx2x *bp)
539{ 876{
540 int i; 877 int i;
541 u16 j, start, end; 878 u16 j;
879 struct hc_sp_status_block_data sp_sb_data;
880 int func = BP_FUNC(bp);
881#ifdef BNX2X_STOP_ON_ERROR
882 u16 start = 0, end = 0;
883#endif
542 884
543 bp->stats_state = STATS_STATE_DISABLED; 885 bp->stats_state = STATS_STATE_DISABLED;
544 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n"); 886 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
@@ -547,47 +889,150 @@ void bnx2x_panic_dump(struct bnx2x *bp)
547 889
548 /* Indices */ 890 /* Indices */
549 /* Common */ 891 /* Common */
550 BNX2X_ERR("def_c_idx(0x%x) def_u_idx(0x%x) def_x_idx(0x%x)" 892 BNX2X_ERR("def_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x)"
551 " def_t_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x)"
552 " spq_prod_idx(0x%x)\n", 893 " spq_prod_idx(0x%x)\n",
553 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx, 894 bp->def_idx, bp->def_att_idx,
554 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx); 895 bp->attn_state, bp->spq_prod_idx);
555 896 BNX2X_ERR("DSB: attn bits(0x%x) ack(0x%x) id(0x%x) idx(0x%x)\n",
556 /* Rx */ 897 bp->def_status_blk->atten_status_block.attn_bits,
557 for_each_queue(bp, i) { 898 bp->def_status_blk->atten_status_block.attn_bits_ack,
899 bp->def_status_blk->atten_status_block.status_block_id,
900 bp->def_status_blk->atten_status_block.attn_bits_index);
901 BNX2X_ERR(" def (");
902 for (i = 0; i < HC_SP_SB_MAX_INDICES; i++)
903 pr_cont("0x%x%s",
904 bp->def_status_blk->sp_sb.index_values[i],
905 (i == HC_SP_SB_MAX_INDICES - 1) ? ") " : " ");
906
907 for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++)
908 *((u32 *)&sp_sb_data + i) = REG_RD(bp, BAR_CSTRORM_INTMEM +
909 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
910 i*sizeof(u32));
911
912 pr_cont("igu_sb_id(0x%x) igu_seg_id (0x%x) "
913 "pf_id(0x%x) vnic_id(0x%x) "
914 "vf_id(0x%x) vf_valid (0x%x)\n",
915 sp_sb_data.igu_sb_id,
916 sp_sb_data.igu_seg_id,
917 sp_sb_data.p_func.pf_id,
918 sp_sb_data.p_func.vnic_id,
919 sp_sb_data.p_func.vf_id,
920 sp_sb_data.p_func.vf_valid);
921
922
923 for_each_eth_queue(bp, i) {
558 struct bnx2x_fastpath *fp = &bp->fp[i]; 924 struct bnx2x_fastpath *fp = &bp->fp[i];
559 925 int loop;
926 struct hc_status_block_data_e2 sb_data_e2;
927 struct hc_status_block_data_e1x sb_data_e1x;
928 struct hc_status_block_sm *hc_sm_p =
929 CHIP_IS_E2(bp) ?
930 sb_data_e2.common.state_machine :
931 sb_data_e1x.common.state_machine;
932 struct hc_index_data *hc_index_p =
933 CHIP_IS_E2(bp) ?
934 sb_data_e2.index_data :
935 sb_data_e1x.index_data;
936 int data_size;
937 u32 *sb_data_p;
938
939 /* Rx */
560 BNX2X_ERR("fp%d: rx_bd_prod(0x%x) rx_bd_cons(0x%x)" 940 BNX2X_ERR("fp%d: rx_bd_prod(0x%x) rx_bd_cons(0x%x)"
561 " *rx_bd_cons_sb(0x%x) rx_comp_prod(0x%x)" 941 " rx_comp_prod(0x%x)"
562 " rx_comp_cons(0x%x) *rx_cons_sb(0x%x)\n", 942 " rx_comp_cons(0x%x) *rx_cons_sb(0x%x)\n",
563 i, fp->rx_bd_prod, fp->rx_bd_cons, 943 i, fp->rx_bd_prod, fp->rx_bd_cons,
564 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod, 944 fp->rx_comp_prod,
565 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb)); 945 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
566 BNX2X_ERR(" rx_sge_prod(0x%x) last_max_sge(0x%x)" 946 BNX2X_ERR(" rx_sge_prod(0x%x) last_max_sge(0x%x)"
567 " fp_u_idx(0x%x) *sb_u_idx(0x%x)\n", 947 " fp_hc_idx(0x%x)\n",
568 fp->rx_sge_prod, fp->last_max_sge, 948 fp->rx_sge_prod, fp->last_max_sge,
569 le16_to_cpu(fp->fp_u_idx), 949 le16_to_cpu(fp->fp_hc_idx));
570 fp->status_blk->u_status_block.status_block_index);
571 }
572
573 /* Tx */
574 for_each_queue(bp, i) {
575 struct bnx2x_fastpath *fp = &bp->fp[i];
576 950
951 /* Tx */
577 BNX2X_ERR("fp%d: tx_pkt_prod(0x%x) tx_pkt_cons(0x%x)" 952 BNX2X_ERR("fp%d: tx_pkt_prod(0x%x) tx_pkt_cons(0x%x)"
578 " tx_bd_prod(0x%x) tx_bd_cons(0x%x)" 953 " tx_bd_prod(0x%x) tx_bd_cons(0x%x)"
579 " *tx_cons_sb(0x%x)\n", 954 " *tx_cons_sb(0x%x)\n",
580 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod, 955 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
581 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb)); 956 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
582 BNX2X_ERR(" fp_c_idx(0x%x) *sb_c_idx(0x%x)" 957
583 " tx_db_prod(0x%x)\n", le16_to_cpu(fp->fp_c_idx), 958 loop = CHIP_IS_E2(bp) ?
584 fp->status_blk->c_status_block.status_block_index, 959 HC_SB_MAX_INDICES_E2 : HC_SB_MAX_INDICES_E1X;
585 fp->tx_db.data.prod); 960
961 /* host sb data */
962
963#ifdef BCM_CNIC
964 if (IS_FCOE_FP(fp))
965 continue;
966#endif
967 BNX2X_ERR(" run indexes (");
968 for (j = 0; j < HC_SB_MAX_SM; j++)
969 pr_cont("0x%x%s",
970 fp->sb_running_index[j],
971 (j == HC_SB_MAX_SM - 1) ? ")" : " ");
972
973 BNX2X_ERR(" indexes (");
974 for (j = 0; j < loop; j++)
975 pr_cont("0x%x%s",
976 fp->sb_index_values[j],
977 (j == loop - 1) ? ")" : " ");
978 /* fw sb data */
979 data_size = CHIP_IS_E2(bp) ?
980 sizeof(struct hc_status_block_data_e2) :
981 sizeof(struct hc_status_block_data_e1x);
982 data_size /= sizeof(u32);
983 sb_data_p = CHIP_IS_E2(bp) ?
984 (u32 *)&sb_data_e2 :
985 (u32 *)&sb_data_e1x;
986 /* copy sb data in here */
987 for (j = 0; j < data_size; j++)
988 *(sb_data_p + j) = REG_RD(bp, BAR_CSTRORM_INTMEM +
989 CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id) +
990 j * sizeof(u32));
991
992 if (CHIP_IS_E2(bp)) {
993 pr_cont("pf_id(0x%x) vf_id (0x%x) vf_valid(0x%x) "
994 "vnic_id(0x%x) same_igu_sb_1b(0x%x)\n",
995 sb_data_e2.common.p_func.pf_id,
996 sb_data_e2.common.p_func.vf_id,
997 sb_data_e2.common.p_func.vf_valid,
998 sb_data_e2.common.p_func.vnic_id,
999 sb_data_e2.common.same_igu_sb_1b);
1000 } else {
1001 pr_cont("pf_id(0x%x) vf_id (0x%x) vf_valid(0x%x) "
1002 "vnic_id(0x%x) same_igu_sb_1b(0x%x)\n",
1003 sb_data_e1x.common.p_func.pf_id,
1004 sb_data_e1x.common.p_func.vf_id,
1005 sb_data_e1x.common.p_func.vf_valid,
1006 sb_data_e1x.common.p_func.vnic_id,
1007 sb_data_e1x.common.same_igu_sb_1b);
1008 }
1009
1010 /* SB_SMs data */
1011 for (j = 0; j < HC_SB_MAX_SM; j++) {
1012 pr_cont("SM[%d] __flags (0x%x) "
1013 "igu_sb_id (0x%x) igu_seg_id(0x%x) "
1014 "time_to_expire (0x%x) "
1015 "timer_value(0x%x)\n", j,
1016 hc_sm_p[j].__flags,
1017 hc_sm_p[j].igu_sb_id,
1018 hc_sm_p[j].igu_seg_id,
1019 hc_sm_p[j].time_to_expire,
1020 hc_sm_p[j].timer_value);
1021 }
1022
1023 /* Indecies data */
1024 for (j = 0; j < loop; j++) {
1025 pr_cont("INDEX[%d] flags (0x%x) "
1026 "timeout (0x%x)\n", j,
1027 hc_index_p[j].flags,
1028 hc_index_p[j].timeout);
1029 }
586 } 1030 }
587 1031
1032#ifdef BNX2X_STOP_ON_ERROR
588 /* Rings */ 1033 /* Rings */
589 /* Rx */ 1034 /* Rx */
590 for_each_queue(bp, i) { 1035 for_each_rx_queue(bp, i) {
591 struct bnx2x_fastpath *fp = &bp->fp[i]; 1036 struct bnx2x_fastpath *fp = &bp->fp[i];
592 1037
593 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10); 1038 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
@@ -621,7 +1066,7 @@ void bnx2x_panic_dump(struct bnx2x *bp)
621 } 1066 }
622 1067
623 /* Tx */ 1068 /* Tx */
624 for_each_queue(bp, i) { 1069 for_each_tx_queue(bp, i) {
625 struct bnx2x_fastpath *fp = &bp->fp[i]; 1070 struct bnx2x_fastpath *fp = &bp->fp[i];
626 1071
627 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10); 1072 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
@@ -642,13 +1087,13 @@ void bnx2x_panic_dump(struct bnx2x *bp)
642 i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]); 1087 i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
643 } 1088 }
644 } 1089 }
645 1090#endif
646 bnx2x_fw_dump(bp); 1091 bnx2x_fw_dump(bp);
647 bnx2x_mc_assert(bp); 1092 bnx2x_mc_assert(bp);
648 BNX2X_ERR("end crash dump -----------------\n"); 1093 BNX2X_ERR("end crash dump -----------------\n");
649} 1094}
650 1095
651void bnx2x_int_enable(struct bnx2x *bp) 1096static void bnx2x_hc_int_enable(struct bnx2x *bp)
652{ 1097{
653 int port = BP_PORT(bp); 1098 int port = BP_PORT(bp);
654 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0; 1099 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
@@ -672,14 +1117,19 @@ void bnx2x_int_enable(struct bnx2x *bp)
672 HC_CONFIG_0_REG_INT_LINE_EN_0 | 1117 HC_CONFIG_0_REG_INT_LINE_EN_0 |
673 HC_CONFIG_0_REG_ATTN_BIT_EN_0); 1118 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
674 1119
675 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n", 1120 if (!CHIP_IS_E1(bp)) {
676 val, port, addr); 1121 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
1122 val, port, addr);
677 1123
678 REG_WR(bp, addr, val); 1124 REG_WR(bp, addr, val);
679 1125
680 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0; 1126 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
1127 }
681 } 1128 }
682 1129
1130 if (CHIP_IS_E1(bp))
1131 REG_WR(bp, HC_REG_INT_MASK + port*4, 0x1FFFF);
1132
683 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n", 1133 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
684 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx"))); 1134 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
685 1135
@@ -690,9 +1140,9 @@ void bnx2x_int_enable(struct bnx2x *bp)
690 mmiowb(); 1140 mmiowb();
691 barrier(); 1141 barrier();
692 1142
693 if (CHIP_IS_E1H(bp)) { 1143 if (!CHIP_IS_E1(bp)) {
694 /* init leading/trailing edge */ 1144 /* init leading/trailing edge */
695 if (IS_E1HMF(bp)) { 1145 if (IS_MF(bp)) {
696 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4))); 1146 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
697 if (bp->port.pmf) 1147 if (bp->port.pmf)
698 /* enable nig and gpio3 attention */ 1148 /* enable nig and gpio3 attention */
@@ -708,16 +1158,91 @@ void bnx2x_int_enable(struct bnx2x *bp)
708 mmiowb(); 1158 mmiowb();
709} 1159}
710 1160
711static void bnx2x_int_disable(struct bnx2x *bp) 1161static void bnx2x_igu_int_enable(struct bnx2x *bp)
1162{
1163 u32 val;
1164 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
1165 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
1166
1167 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
1168
1169 if (msix) {
1170 val &= ~(IGU_PF_CONF_INT_LINE_EN |
1171 IGU_PF_CONF_SINGLE_ISR_EN);
1172 val |= (IGU_PF_CONF_FUNC_EN |
1173 IGU_PF_CONF_MSI_MSIX_EN |
1174 IGU_PF_CONF_ATTN_BIT_EN);
1175 } else if (msi) {
1176 val &= ~IGU_PF_CONF_INT_LINE_EN;
1177 val |= (IGU_PF_CONF_FUNC_EN |
1178 IGU_PF_CONF_MSI_MSIX_EN |
1179 IGU_PF_CONF_ATTN_BIT_EN |
1180 IGU_PF_CONF_SINGLE_ISR_EN);
1181 } else {
1182 val &= ~IGU_PF_CONF_MSI_MSIX_EN;
1183 val |= (IGU_PF_CONF_FUNC_EN |
1184 IGU_PF_CONF_INT_LINE_EN |
1185 IGU_PF_CONF_ATTN_BIT_EN |
1186 IGU_PF_CONF_SINGLE_ISR_EN);
1187 }
1188
1189 DP(NETIF_MSG_INTR, "write 0x%x to IGU mode %s\n",
1190 val, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
1191
1192 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
1193
1194 barrier();
1195
1196 /* init leading/trailing edge */
1197 if (IS_MF(bp)) {
1198 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
1199 if (bp->port.pmf)
1200 /* enable nig and gpio3 attention */
1201 val |= 0x1100;
1202 } else
1203 val = 0xffff;
1204
1205 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
1206 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
1207
1208 /* Make sure that interrupts are indeed enabled from here on */
1209 mmiowb();
1210}
1211
1212void bnx2x_int_enable(struct bnx2x *bp)
1213{
1214 if (bp->common.int_block == INT_BLOCK_HC)
1215 bnx2x_hc_int_enable(bp);
1216 else
1217 bnx2x_igu_int_enable(bp);
1218}
1219
1220static void bnx2x_hc_int_disable(struct bnx2x *bp)
712{ 1221{
713 int port = BP_PORT(bp); 1222 int port = BP_PORT(bp);
714 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0; 1223 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
715 u32 val = REG_RD(bp, addr); 1224 u32 val = REG_RD(bp, addr);
716 1225
717 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | 1226 /*
718 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 | 1227 * in E1 we must use only PCI configuration space to disable
719 HC_CONFIG_0_REG_INT_LINE_EN_0 | 1228 * MSI/MSIX capablility
720 HC_CONFIG_0_REG_ATTN_BIT_EN_0); 1229 * It's forbitten to disable IGU_PF_CONF_MSI_MSIX_EN in HC block
1230 */
1231 if (CHIP_IS_E1(bp)) {
1232 /* Since IGU_PF_CONF_MSI_MSIX_EN still always on
1233 * Use mask register to prevent from HC sending interrupts
1234 * after we exit the function
1235 */
1236 REG_WR(bp, HC_REG_INT_MASK + port*4, 0);
1237
1238 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1239 HC_CONFIG_0_REG_INT_LINE_EN_0 |
1240 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1241 } else
1242 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1243 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1244 HC_CONFIG_0_REG_INT_LINE_EN_0 |
1245 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
721 1246
722 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n", 1247 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
723 val, port, addr); 1248 val, port, addr);
@@ -730,6 +1255,32 @@ static void bnx2x_int_disable(struct bnx2x *bp)
730 BNX2X_ERR("BUG! proper val not read from IGU!\n"); 1255 BNX2X_ERR("BUG! proper val not read from IGU!\n");
731} 1256}
732 1257
1258static void bnx2x_igu_int_disable(struct bnx2x *bp)
1259{
1260 u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
1261
1262 val &= ~(IGU_PF_CONF_MSI_MSIX_EN |
1263 IGU_PF_CONF_INT_LINE_EN |
1264 IGU_PF_CONF_ATTN_BIT_EN);
1265
1266 DP(NETIF_MSG_INTR, "write %x to IGU\n", val);
1267
1268 /* flush all outstanding writes */
1269 mmiowb();
1270
1271 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
1272 if (REG_RD(bp, IGU_REG_PF_CONFIGURATION) != val)
1273 BNX2X_ERR("BUG! proper val not read from IGU!\n");
1274}
1275
1276static void bnx2x_int_disable(struct bnx2x *bp)
1277{
1278 if (bp->common.int_block == INT_BLOCK_HC)
1279 bnx2x_hc_int_disable(bp);
1280 else
1281 bnx2x_igu_int_disable(bp);
1282}
1283
733void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw) 1284void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
734{ 1285{
735 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0; 1286 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
@@ -750,7 +1301,7 @@ void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
750#ifdef BCM_CNIC 1301#ifdef BCM_CNIC
751 offset++; 1302 offset++;
752#endif 1303#endif
753 for_each_queue(bp, i) 1304 for_each_eth_queue(bp, i)
754 synchronize_irq(bp->msix_table[i + offset].vector); 1305 synchronize_irq(bp->msix_table[i + offset].vector);
755 } else 1306 } else
756 synchronize_irq(bp->pdev->irq); 1307 synchronize_irq(bp->pdev->irq);
@@ -781,7 +1332,7 @@ static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
781 DP(NETIF_MSG_HW, 1332 DP(NETIF_MSG_HW,
782 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n", 1333 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
783 resource, HW_LOCK_MAX_RESOURCE_VALUE); 1334 resource, HW_LOCK_MAX_RESOURCE_VALUE);
784 return -EINVAL; 1335 return false;
785 } 1336 }
786 1337
787 if (func <= 5) 1338 if (func <= 5)
@@ -800,7 +1351,6 @@ static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
800 return false; 1351 return false;
801} 1352}
802 1353
803
804#ifdef BCM_CNIC 1354#ifdef BCM_CNIC
805static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid); 1355static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid);
806#endif 1356#endif
@@ -817,76 +1367,35 @@ void bnx2x_sp_event(struct bnx2x_fastpath *fp,
817 fp->index, cid, command, bp->state, 1367 fp->index, cid, command, bp->state,
818 rr_cqe->ramrod_cqe.ramrod_type); 1368 rr_cqe->ramrod_cqe.ramrod_type);
819 1369
820 bp->spq_left++; 1370 switch (command | fp->state) {
821 1371 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP | BNX2X_FP_STATE_OPENING):
822 if (fp->index) { 1372 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n", cid);
823 switch (command | fp->state) { 1373 fp->state = BNX2X_FP_STATE_OPEN;
824 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
825 BNX2X_FP_STATE_OPENING):
826 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
827 cid);
828 fp->state = BNX2X_FP_STATE_OPEN;
829 break;
830
831 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
832 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
833 cid);
834 fp->state = BNX2X_FP_STATE_HALTED;
835 break;
836
837 default:
838 BNX2X_ERR("unexpected MC reply (%d) "
839 "fp[%d] state is %x\n",
840 command, fp->index, fp->state);
841 break;
842 }
843 mb(); /* force bnx2x_wait_ramrod() to see the change */
844 return;
845 }
846
847 switch (command | bp->state) {
848 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
849 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
850 bp->state = BNX2X_STATE_OPEN;
851 break; 1374 break;
852 1375
853 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT): 1376 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
854 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n"); 1377 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n", cid);
855 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
856 fp->state = BNX2X_FP_STATE_HALTED; 1378 fp->state = BNX2X_FP_STATE_HALTED;
857 break; 1379 break;
858 1380
859 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT): 1381 case (RAMROD_CMD_ID_ETH_TERMINATE | BNX2X_FP_STATE_TERMINATING):
860 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid); 1382 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] teminate ramrod\n", cid);
861 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED; 1383 fp->state = BNX2X_FP_STATE_TERMINATED;
862 break;
863
864#ifdef BCM_CNIC
865 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_OPEN):
866 DP(NETIF_MSG_IFDOWN, "got delete ramrod for CID %d\n", cid);
867 bnx2x_cnic_cfc_comp(bp, cid);
868 break;
869#endif
870
871 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
872 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
873 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
874 bp->set_mac_pending--;
875 smp_wmb();
876 break;
877
878 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
879 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
880 bp->set_mac_pending--;
881 smp_wmb();
882 break; 1384 break;
883 1385
884 default: 1386 default:
885 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n", 1387 BNX2X_ERR("unexpected MC reply (%d) "
886 command, bp->state); 1388 "fp[%d] state is %x\n",
1389 command, fp->index, fp->state);
887 break; 1390 break;
888 } 1391 }
889 mb(); /* force bnx2x_wait_ramrod() to see the change */ 1392
1393 smp_mb__before_atomic_inc();
1394 atomic_inc(&bp->cq_spq_left);
1395 /* push the change in fp->state and towards the memory */
1396 smp_wmb();
1397
1398 return;
890} 1399}
891 1400
892irqreturn_t bnx2x_interrupt(int irq, void *dev_instance) 1401irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
@@ -914,25 +1423,22 @@ irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
914 return IRQ_HANDLED; 1423 return IRQ_HANDLED;
915#endif 1424#endif
916 1425
917 for (i = 0; i < BNX2X_NUM_QUEUES(bp); i++) { 1426 for_each_eth_queue(bp, i) {
918 struct bnx2x_fastpath *fp = &bp->fp[i]; 1427 struct bnx2x_fastpath *fp = &bp->fp[i];
919 1428
920 mask = 0x2 << fp->sb_id; 1429 mask = 0x2 << (fp->index + CNIC_CONTEXT_USE);
921 if (status & mask) { 1430 if (status & mask) {
922 /* Handle Rx and Tx according to SB id */ 1431 /* Handle Rx and Tx according to SB id */
923 prefetch(fp->rx_cons_sb); 1432 prefetch(fp->rx_cons_sb);
924 prefetch(&fp->status_blk->u_status_block.
925 status_block_index);
926 prefetch(fp->tx_cons_sb); 1433 prefetch(fp->tx_cons_sb);
927 prefetch(&fp->status_blk->c_status_block. 1434 prefetch(&fp->sb_running_index[SM_RX_ID]);
928 status_block_index);
929 napi_schedule(&bnx2x_fp(bp, fp->index, napi)); 1435 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
930 status &= ~mask; 1436 status &= ~mask;
931 } 1437 }
932 } 1438 }
933 1439
934#ifdef BCM_CNIC 1440#ifdef BCM_CNIC
935 mask = 0x2 << CNIC_SB_ID(bp); 1441 mask = 0x2;
936 if (status & (mask | 0x1)) { 1442 if (status & (mask | 0x1)) {
937 struct cnic_ops *c_ops = NULL; 1443 struct cnic_ops *c_ops = NULL;
938 1444
@@ -1227,49 +1733,91 @@ static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
1227 return 0; 1733 return 0;
1228} 1734}
1229 1735
1736int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
1737{
1738 u32 sel_phy_idx = 0;
1739 if (bp->link_vars.link_up) {
1740 sel_phy_idx = EXT_PHY1;
1741 /* In case link is SERDES, check if the EXT_PHY2 is the one */
1742 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
1743 (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
1744 sel_phy_idx = EXT_PHY2;
1745 } else {
1746
1747 switch (bnx2x_phy_selection(&bp->link_params)) {
1748 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
1749 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
1750 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
1751 sel_phy_idx = EXT_PHY1;
1752 break;
1753 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
1754 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
1755 sel_phy_idx = EXT_PHY2;
1756 break;
1757 }
1758 }
1759 /*
1760 * The selected actived PHY is always after swapping (in case PHY
1761 * swapping is enabled). So when swapping is enabled, we need to reverse
1762 * the configuration
1763 */
1764
1765 if (bp->link_params.multi_phy_config &
1766 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
1767 if (sel_phy_idx == EXT_PHY1)
1768 sel_phy_idx = EXT_PHY2;
1769 else if (sel_phy_idx == EXT_PHY2)
1770 sel_phy_idx = EXT_PHY1;
1771 }
1772 return LINK_CONFIG_IDX(sel_phy_idx);
1773}
1774
1230void bnx2x_calc_fc_adv(struct bnx2x *bp) 1775void bnx2x_calc_fc_adv(struct bnx2x *bp)
1231{ 1776{
1777 u8 cfg_idx = bnx2x_get_link_cfg_idx(bp);
1232 switch (bp->link_vars.ieee_fc & 1778 switch (bp->link_vars.ieee_fc &
1233 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) { 1779 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
1234 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE: 1780 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
1235 bp->port.advertising &= ~(ADVERTISED_Asym_Pause | 1781 bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
1236 ADVERTISED_Pause); 1782 ADVERTISED_Pause);
1237 break; 1783 break;
1238 1784
1239 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH: 1785 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
1240 bp->port.advertising |= (ADVERTISED_Asym_Pause | 1786 bp->port.advertising[cfg_idx] |= (ADVERTISED_Asym_Pause |
1241 ADVERTISED_Pause); 1787 ADVERTISED_Pause);
1242 break; 1788 break;
1243 1789
1244 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC: 1790 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
1245 bp->port.advertising |= ADVERTISED_Asym_Pause; 1791 bp->port.advertising[cfg_idx] |= ADVERTISED_Asym_Pause;
1246 break; 1792 break;
1247 1793
1248 default: 1794 default:
1249 bp->port.advertising &= ~(ADVERTISED_Asym_Pause | 1795 bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
1250 ADVERTISED_Pause); 1796 ADVERTISED_Pause);
1251 break; 1797 break;
1252 } 1798 }
1253} 1799}
1254 1800
1255
1256u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode) 1801u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
1257{ 1802{
1258 if (!BP_NOMCP(bp)) { 1803 if (!BP_NOMCP(bp)) {
1259 u8 rc; 1804 u8 rc;
1260 1805 int cfx_idx = bnx2x_get_link_cfg_idx(bp);
1806 u16 req_line_speed = bp->link_params.req_line_speed[cfx_idx];
1261 /* Initialize link parameters structure variables */ 1807 /* Initialize link parameters structure variables */
1262 /* It is recommended to turn off RX FC for jumbo frames 1808 /* It is recommended to turn off RX FC for jumbo frames
1263 for better performance */ 1809 for better performance */
1264 if (bp->dev->mtu > 5000) 1810 if ((CHIP_IS_E1x(bp)) && (bp->dev->mtu > 5000))
1265 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX; 1811 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
1266 else 1812 else
1267 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH; 1813 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
1268 1814
1269 bnx2x_acquire_phy_lock(bp); 1815 bnx2x_acquire_phy_lock(bp);
1270 1816
1271 if (load_mode == LOAD_DIAG) 1817 if (load_mode == LOAD_DIAG) {
1272 bp->link_params.loopback_mode = LOOPBACK_XGXS_10; 1818 bp->link_params.loopback_mode = LOOPBACK_XGXS;
1819 bp->link_params.req_line_speed[cfx_idx] = SPEED_10000;
1820 }
1273 1821
1274 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars); 1822 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1275 1823
@@ -1281,7 +1829,7 @@ u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
1281 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP); 1829 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
1282 bnx2x_link_report(bp); 1830 bnx2x_link_report(bp);
1283 } 1831 }
1284 1832 bp->link_params.req_line_speed[cfx_idx] = req_line_speed;
1285 return rc; 1833 return rc;
1286 } 1834 }
1287 BNX2X_ERR("Bootcode is missing - can not initialize link\n"); 1835 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
@@ -1292,6 +1840,7 @@ void bnx2x_link_set(struct bnx2x *bp)
1292{ 1840{
1293 if (!BP_NOMCP(bp)) { 1841 if (!BP_NOMCP(bp)) {
1294 bnx2x_acquire_phy_lock(bp); 1842 bnx2x_acquire_phy_lock(bp);
1843 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
1295 bnx2x_phy_init(&bp->link_params, &bp->link_vars); 1844 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1296 bnx2x_release_phy_lock(bp); 1845 bnx2x_release_phy_lock(bp);
1297 1846
@@ -1310,13 +1859,14 @@ static void bnx2x__link_reset(struct bnx2x *bp)
1310 BNX2X_ERR("Bootcode is missing - can not reset link\n"); 1859 BNX2X_ERR("Bootcode is missing - can not reset link\n");
1311} 1860}
1312 1861
1313u8 bnx2x_link_test(struct bnx2x *bp) 1862u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes)
1314{ 1863{
1315 u8 rc = 0; 1864 u8 rc = 0;
1316 1865
1317 if (!BP_NOMCP(bp)) { 1866 if (!BP_NOMCP(bp)) {
1318 bnx2x_acquire_phy_lock(bp); 1867 bnx2x_acquire_phy_lock(bp);
1319 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars); 1868 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars,
1869 is_serdes);
1320 bnx2x_release_phy_lock(bp); 1870 bnx2x_release_phy_lock(bp);
1321 } else 1871 } else
1322 BNX2X_ERR("Bootcode is missing - can not test link\n"); 1872 BNX2X_ERR("Bootcode is missing - can not test link\n");
@@ -1371,13 +1921,11 @@ static void bnx2x_init_port_minmax(struct bnx2x *bp)
1371static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp) 1921static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
1372{ 1922{
1373 int all_zero = 1; 1923 int all_zero = 1;
1374 int port = BP_PORT(bp);
1375 int vn; 1924 int vn;
1376 1925
1377 bp->vn_weight_sum = 0; 1926 bp->vn_weight_sum = 0;
1378 for (vn = VN_0; vn < E1HVN_MAX; vn++) { 1927 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
1379 int func = 2*vn + port; 1928 u32 vn_cfg = bp->mf_config[vn];
1380 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
1381 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >> 1929 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
1382 FUNC_MF_CFG_MIN_BW_SHIFT) * 100; 1930 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
1383 1931
@@ -1405,11 +1953,12 @@ static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
1405 CMNG_FLAGS_PER_PORT_FAIRNESS_VN; 1953 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
1406} 1954}
1407 1955
1408static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func) 1956static void bnx2x_init_vn_minmax(struct bnx2x *bp, int vn)
1409{ 1957{
1410 struct rate_shaping_vars_per_vn m_rs_vn; 1958 struct rate_shaping_vars_per_vn m_rs_vn;
1411 struct fairness_vars_per_vn m_fair_vn; 1959 struct fairness_vars_per_vn m_fair_vn;
1412 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config); 1960 u32 vn_cfg = bp->mf_config[vn];
1961 int func = 2*vn + BP_PORT(bp);
1413 u16 vn_min_rate, vn_max_rate; 1962 u16 vn_min_rate, vn_max_rate;
1414 int i; 1963 int i;
1415 1964
@@ -1419,14 +1968,24 @@ static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
1419 vn_max_rate = 0; 1968 vn_max_rate = 0;
1420 1969
1421 } else { 1970 } else {
1971 u32 maxCfg = bnx2x_extract_max_cfg(bp, vn_cfg);
1972
1422 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >> 1973 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
1423 FUNC_MF_CFG_MIN_BW_SHIFT) * 100; 1974 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
1424 /* If min rate is zero - set it to 1 */ 1975 /* If fairness is enabled (not all min rates are zeroes) and
1425 if (!vn_min_rate) 1976 if current min rate is zero - set it to 1.
1977 This is a requirement of the algorithm. */
1978 if (bp->vn_weight_sum && (vn_min_rate == 0))
1426 vn_min_rate = DEF_MIN_RATE; 1979 vn_min_rate = DEF_MIN_RATE;
1427 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >> 1980
1428 FUNC_MF_CFG_MAX_BW_SHIFT) * 100; 1981 if (IS_MF_SI(bp))
1982 /* maxCfg in percents of linkspeed */
1983 vn_max_rate = (bp->link_vars.line_speed * maxCfg) / 100;
1984 else
1985 /* maxCfg is absolute in 100Mb units */
1986 vn_max_rate = maxCfg * 100;
1429 } 1987 }
1988
1430 DP(NETIF_MSG_IFUP, 1989 DP(NETIF_MSG_IFUP,
1431 "func %d: vn_min_rate %d vn_max_rate %d vn_weight_sum %d\n", 1990 "func %d: vn_min_rate %d vn_max_rate %d vn_weight_sum %d\n",
1432 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum); 1991 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
@@ -1450,7 +2009,8 @@ static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
1450 m_fair_vn.vn_credit_delta = 2009 m_fair_vn.vn_credit_delta =
1451 max_t(u32, (vn_min_rate * (T_FAIR_COEF / 2010 max_t(u32, (vn_min_rate * (T_FAIR_COEF /
1452 (8 * bp->vn_weight_sum))), 2011 (8 * bp->vn_weight_sum))),
1453 (bp->cmng.fair_vars.fair_threshold * 2)); 2012 (bp->cmng.fair_vars.fair_threshold +
2013 MIN_ABOVE_THRESH));
1454 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta %d\n", 2014 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta %d\n",
1455 m_fair_vn.vn_credit_delta); 2015 m_fair_vn.vn_credit_delta);
1456 } 2016 }
@@ -1467,11 +2027,103 @@ static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
1467 ((u32 *)(&m_fair_vn))[i]); 2027 ((u32 *)(&m_fair_vn))[i]);
1468} 2028}
1469 2029
2030static int bnx2x_get_cmng_fns_mode(struct bnx2x *bp)
2031{
2032 if (CHIP_REV_IS_SLOW(bp))
2033 return CMNG_FNS_NONE;
2034 if (IS_MF(bp))
2035 return CMNG_FNS_MINMAX;
2036
2037 return CMNG_FNS_NONE;
2038}
2039
2040void bnx2x_read_mf_cfg(struct bnx2x *bp)
2041{
2042 int vn, n = (CHIP_MODE_IS_4_PORT(bp) ? 2 : 1);
2043
2044 if (BP_NOMCP(bp))
2045 return; /* what should be the default bvalue in this case */
2046
2047 /* For 2 port configuration the absolute function number formula
2048 * is:
2049 * abs_func = 2 * vn + BP_PORT + BP_PATH
2050 *
2051 * and there are 4 functions per port
2052 *
2053 * For 4 port configuration it is
2054 * abs_func = 4 * vn + 2 * BP_PORT + BP_PATH
2055 *
2056 * and there are 2 functions per port
2057 */
2058 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2059 int /*abs*/func = n * (2 * vn + BP_PORT(bp)) + BP_PATH(bp);
2060
2061 if (func >= E1H_FUNC_MAX)
2062 break;
2063
2064 bp->mf_config[vn] =
2065 MF_CFG_RD(bp, func_mf_config[func].config);
2066 }
2067}
2068
2069static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type)
2070{
2071
2072 if (cmng_type == CMNG_FNS_MINMAX) {
2073 int vn;
2074
2075 /* clear cmng_enables */
2076 bp->cmng.flags.cmng_enables = 0;
2077
2078 /* read mf conf from shmem */
2079 if (read_cfg)
2080 bnx2x_read_mf_cfg(bp);
2081
2082 /* Init rate shaping and fairness contexts */
2083 bnx2x_init_port_minmax(bp);
2084
2085 /* vn_weight_sum and enable fairness if not 0 */
2086 bnx2x_calc_vn_weight_sum(bp);
2087
2088 /* calculate and set min-max rate for each vn */
2089 if (bp->port.pmf)
2090 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2091 bnx2x_init_vn_minmax(bp, vn);
2092
2093 /* always enable rate shaping and fairness */
2094 bp->cmng.flags.cmng_enables |=
2095 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
2096 if (!bp->vn_weight_sum)
2097 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2098 " fairness will be disabled\n");
2099 return;
2100 }
2101
2102 /* rate shaping and fairness are disabled */
2103 DP(NETIF_MSG_IFUP,
2104 "rate shaping and fairness are disabled\n");
2105}
2106
2107static inline void bnx2x_link_sync_notify(struct bnx2x *bp)
2108{
2109 int port = BP_PORT(bp);
2110 int func;
2111 int vn;
2112
2113 /* Set the attention towards other drivers on the same port */
2114 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2115 if (vn == BP_E1HVN(bp))
2116 continue;
2117
2118 func = ((vn << 1) | port);
2119 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2120 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2121 }
2122}
1470 2123
1471/* This function is called upon link interrupt */ 2124/* This function is called upon link interrupt */
1472static void bnx2x_link_attn(struct bnx2x *bp) 2125static void bnx2x_link_attn(struct bnx2x *bp)
1473{ 2126{
1474 u32 prev_link_status = bp->link_vars.link_status;
1475 /* Make sure that we are synced with the current statistics */ 2127 /* Make sure that we are synced with the current statistics */
1476 bnx2x_stats_handle(bp, STATS_EVENT_STOP); 2128 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
1477 2129
@@ -1480,7 +2132,7 @@ static void bnx2x_link_attn(struct bnx2x *bp)
1480 if (bp->link_vars.link_up) { 2132 if (bp->link_vars.link_up) {
1481 2133
1482 /* dropless flow control */ 2134 /* dropless flow control */
1483 if (CHIP_IS_E1H(bp) && bp->dropless_fc) { 2135 if (!CHIP_IS_E1(bp) && bp->dropless_fc) {
1484 int port = BP_PORT(bp); 2136 int port = BP_PORT(bp);
1485 u32 pause_enabled = 0; 2137 u32 pause_enabled = 0;
1486 2138
@@ -1504,47 +2156,27 @@ static void bnx2x_link_attn(struct bnx2x *bp)
1504 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP); 2156 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
1505 } 2157 }
1506 2158
1507 /* indicate link status only if link status actually changed */ 2159 if (bp->link_vars.link_up && bp->link_vars.line_speed) {
1508 if (prev_link_status != bp->link_vars.link_status) 2160 int cmng_fns = bnx2x_get_cmng_fns_mode(bp);
1509 bnx2x_link_report(bp);
1510 2161
1511 if (IS_E1HMF(bp)) { 2162 if (cmng_fns != CMNG_FNS_NONE) {
1512 int port = BP_PORT(bp); 2163 bnx2x_cmng_fns_init(bp, false, cmng_fns);
1513 int func; 2164 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
1514 int vn; 2165 } else
1515 2166 /* rate shaping and fairness are disabled */
1516 /* Set the attention towards other drivers on the same port */ 2167 DP(NETIF_MSG_IFUP,
1517 for (vn = VN_0; vn < E1HVN_MAX; vn++) { 2168 "single function mode without fairness\n");
1518 if (vn == BP_E1HVN(bp)) 2169 }
1519 continue;
1520
1521 func = ((vn << 1) | port);
1522 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
1523 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
1524 }
1525
1526 if (bp->link_vars.link_up) {
1527 int i;
1528 2170
1529 /* Init rate shaping and fairness contexts */ 2171 __bnx2x_link_report(bp);
1530 bnx2x_init_port_minmax(bp);
1531 2172
1532 for (vn = VN_0; vn < E1HVN_MAX; vn++) 2173 if (IS_MF(bp))
1533 bnx2x_init_vn_minmax(bp, 2*vn + port); 2174 bnx2x_link_sync_notify(bp);
1534
1535 /* Store it to internal memory */
1536 for (i = 0;
1537 i < sizeof(struct cmng_struct_per_port) / 4; i++)
1538 REG_WR(bp, BAR_XSTRORM_INTMEM +
1539 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
1540 ((u32 *)(&bp->cmng))[i]);
1541 }
1542 }
1543} 2175}
1544 2176
1545void bnx2x__link_status_update(struct bnx2x *bp) 2177void bnx2x__link_status_update(struct bnx2x *bp)
1546{ 2178{
1547 if ((bp->state != BNX2X_STATE_OPEN) || (bp->flags & MF_FUNC_DIS)) 2179 if (bp->state != BNX2X_STATE_OPEN)
1548 return; 2180 return;
1549 2181
1550 bnx2x_link_status_update(&bp->link_params, &bp->link_vars); 2182 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
@@ -1554,8 +2186,6 @@ void bnx2x__link_status_update(struct bnx2x *bp)
1554 else 2186 else
1555 bnx2x_stats_handle(bp, STATS_EVENT_STOP); 2187 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
1556 2188
1557 bnx2x_calc_vn_weight_sum(bp);
1558
1559 /* indicate link status */ 2189 /* indicate link status */
1560 bnx2x_link_report(bp); 2190 bnx2x_link_report(bp);
1561} 2191}
@@ -1570,8 +2200,13 @@ static void bnx2x_pmf_update(struct bnx2x *bp)
1570 2200
1571 /* enable nig attention */ 2201 /* enable nig attention */
1572 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4))); 2202 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
1573 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val); 2203 if (bp->common.int_block == INT_BLOCK_HC) {
1574 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val); 2204 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2205 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2206 } else if (CHIP_IS_E2(bp)) {
2207 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
2208 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
2209 }
1575 2210
1576 bnx2x_stats_handle(bp, STATS_EVENT_PMF); 2211 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
1577} 2212}
@@ -1585,23 +2220,26 @@ static void bnx2x_pmf_update(struct bnx2x *bp)
1585 */ 2220 */
1586 2221
1587/* send the MCP a request, block until there is a reply */ 2222/* send the MCP a request, block until there is a reply */
1588u32 bnx2x_fw_command(struct bnx2x *bp, u32 command) 2223u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param)
1589{ 2224{
1590 int func = BP_FUNC(bp); 2225 int mb_idx = BP_FW_MB_IDX(bp);
1591 u32 seq = ++bp->fw_seq; 2226 u32 seq;
1592 u32 rc = 0; 2227 u32 rc = 0;
1593 u32 cnt = 1; 2228 u32 cnt = 1;
1594 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10; 2229 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
1595 2230
1596 mutex_lock(&bp->fw_mb_mutex); 2231 mutex_lock(&bp->fw_mb_mutex);
1597 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq)); 2232 seq = ++bp->fw_seq;
2233 SHMEM_WR(bp, func_mb[mb_idx].drv_mb_param, param);
2234 SHMEM_WR(bp, func_mb[mb_idx].drv_mb_header, (command | seq));
2235
1598 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq)); 2236 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
1599 2237
1600 do { 2238 do {
1601 /* let the FW do it's magic ... */ 2239 /* let the FW do it's magic ... */
1602 msleep(delay); 2240 msleep(delay);
1603 2241
1604 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header); 2242 rc = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_header);
1605 2243
1606 /* Give the FW up to 5 second (500*10ms) */ 2244 /* Give the FW up to 5 second (500*10ms) */
1607 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500)); 2245 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
@@ -1623,6 +2261,351 @@ u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
1623 return rc; 2261 return rc;
1624} 2262}
1625 2263
2264static u8 stat_counter_valid(struct bnx2x *bp, struct bnx2x_fastpath *fp)
2265{
2266#ifdef BCM_CNIC
2267 if (IS_FCOE_FP(fp) && IS_MF(bp))
2268 return false;
2269#endif
2270 return true;
2271}
2272
2273/* must be called under rtnl_lock */
2274static void bnx2x_rxq_set_mac_filters(struct bnx2x *bp, u16 cl_id, u32 filters)
2275{
2276 u32 mask = (1 << cl_id);
2277
2278 /* initial seeting is BNX2X_ACCEPT_NONE */
2279 u8 drop_all_ucast = 1, drop_all_bcast = 1, drop_all_mcast = 1;
2280 u8 accp_all_ucast = 0, accp_all_bcast = 0, accp_all_mcast = 0;
2281 u8 unmatched_unicast = 0;
2282
2283 if (filters & BNX2X_ACCEPT_UNMATCHED_UCAST)
2284 unmatched_unicast = 1;
2285
2286 if (filters & BNX2X_PROMISCUOUS_MODE) {
2287 /* promiscious - accept all, drop none */
2288 drop_all_ucast = drop_all_bcast = drop_all_mcast = 0;
2289 accp_all_ucast = accp_all_bcast = accp_all_mcast = 1;
2290 if (IS_MF_SI(bp)) {
2291 /*
2292 * SI mode defines to accept in promiscuos mode
2293 * only unmatched packets
2294 */
2295 unmatched_unicast = 1;
2296 accp_all_ucast = 0;
2297 }
2298 }
2299 if (filters & BNX2X_ACCEPT_UNICAST) {
2300 /* accept matched ucast */
2301 drop_all_ucast = 0;
2302 }
2303 if (filters & BNX2X_ACCEPT_MULTICAST)
2304 /* accept matched mcast */
2305 drop_all_mcast = 0;
2306
2307 if (filters & BNX2X_ACCEPT_ALL_UNICAST) {
2308 /* accept all mcast */
2309 drop_all_ucast = 0;
2310 accp_all_ucast = 1;
2311 }
2312 if (filters & BNX2X_ACCEPT_ALL_MULTICAST) {
2313 /* accept all mcast */
2314 drop_all_mcast = 0;
2315 accp_all_mcast = 1;
2316 }
2317 if (filters & BNX2X_ACCEPT_BROADCAST) {
2318 /* accept (all) bcast */
2319 drop_all_bcast = 0;
2320 accp_all_bcast = 1;
2321 }
2322
2323 bp->mac_filters.ucast_drop_all = drop_all_ucast ?
2324 bp->mac_filters.ucast_drop_all | mask :
2325 bp->mac_filters.ucast_drop_all & ~mask;
2326
2327 bp->mac_filters.mcast_drop_all = drop_all_mcast ?
2328 bp->mac_filters.mcast_drop_all | mask :
2329 bp->mac_filters.mcast_drop_all & ~mask;
2330
2331 bp->mac_filters.bcast_drop_all = drop_all_bcast ?
2332 bp->mac_filters.bcast_drop_all | mask :
2333 bp->mac_filters.bcast_drop_all & ~mask;
2334
2335 bp->mac_filters.ucast_accept_all = accp_all_ucast ?
2336 bp->mac_filters.ucast_accept_all | mask :
2337 bp->mac_filters.ucast_accept_all & ~mask;
2338
2339 bp->mac_filters.mcast_accept_all = accp_all_mcast ?
2340 bp->mac_filters.mcast_accept_all | mask :
2341 bp->mac_filters.mcast_accept_all & ~mask;
2342
2343 bp->mac_filters.bcast_accept_all = accp_all_bcast ?
2344 bp->mac_filters.bcast_accept_all | mask :
2345 bp->mac_filters.bcast_accept_all & ~mask;
2346
2347 bp->mac_filters.unmatched_unicast = unmatched_unicast ?
2348 bp->mac_filters.unmatched_unicast | mask :
2349 bp->mac_filters.unmatched_unicast & ~mask;
2350}
2351
2352static void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p)
2353{
2354 struct tstorm_eth_function_common_config tcfg = {0};
2355 u16 rss_flgs;
2356
2357 /* tpa */
2358 if (p->func_flgs & FUNC_FLG_TPA)
2359 tcfg.config_flags |=
2360 TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
2361
2362 /* set rss flags */
2363 rss_flgs = (p->rss->mode <<
2364 TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE_SHIFT);
2365
2366 if (p->rss->cap & RSS_IPV4_CAP)
2367 rss_flgs |= RSS_IPV4_CAP_MASK;
2368 if (p->rss->cap & RSS_IPV4_TCP_CAP)
2369 rss_flgs |= RSS_IPV4_TCP_CAP_MASK;
2370 if (p->rss->cap & RSS_IPV6_CAP)
2371 rss_flgs |= RSS_IPV6_CAP_MASK;
2372 if (p->rss->cap & RSS_IPV6_TCP_CAP)
2373 rss_flgs |= RSS_IPV6_TCP_CAP_MASK;
2374
2375 tcfg.config_flags |= rss_flgs;
2376 tcfg.rss_result_mask = p->rss->result_mask;
2377
2378 storm_memset_func_cfg(bp, &tcfg, p->func_id);
2379
2380 /* Enable the function in the FW */
2381 storm_memset_vf_to_pf(bp, p->func_id, p->pf_id);
2382 storm_memset_func_en(bp, p->func_id, 1);
2383
2384 /* statistics */
2385 if (p->func_flgs & FUNC_FLG_STATS) {
2386 struct stats_indication_flags stats_flags = {0};
2387 stats_flags.collect_eth = 1;
2388
2389 storm_memset_xstats_flags(bp, &stats_flags, p->func_id);
2390 storm_memset_xstats_addr(bp, p->fw_stat_map, p->func_id);
2391
2392 storm_memset_tstats_flags(bp, &stats_flags, p->func_id);
2393 storm_memset_tstats_addr(bp, p->fw_stat_map, p->func_id);
2394
2395 storm_memset_ustats_flags(bp, &stats_flags, p->func_id);
2396 storm_memset_ustats_addr(bp, p->fw_stat_map, p->func_id);
2397
2398 storm_memset_cstats_flags(bp, &stats_flags, p->func_id);
2399 storm_memset_cstats_addr(bp, p->fw_stat_map, p->func_id);
2400 }
2401
2402 /* spq */
2403 if (p->func_flgs & FUNC_FLG_SPQ) {
2404 storm_memset_spq_addr(bp, p->spq_map, p->func_id);
2405 REG_WR(bp, XSEM_REG_FAST_MEMORY +
2406 XSTORM_SPQ_PROD_OFFSET(p->func_id), p->spq_prod);
2407 }
2408}
2409
2410static inline u16 bnx2x_get_cl_flags(struct bnx2x *bp,
2411 struct bnx2x_fastpath *fp)
2412{
2413 u16 flags = 0;
2414
2415 /* calculate queue flags */
2416 flags |= QUEUE_FLG_CACHE_ALIGN;
2417 flags |= QUEUE_FLG_HC;
2418 flags |= IS_MF_SD(bp) ? QUEUE_FLG_OV : 0;
2419
2420 flags |= QUEUE_FLG_VLAN;
2421 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
2422
2423 if (!fp->disable_tpa)
2424 flags |= QUEUE_FLG_TPA;
2425
2426 flags = stat_counter_valid(bp, fp) ?
2427 (flags | QUEUE_FLG_STATS) : (flags & ~QUEUE_FLG_STATS);
2428
2429 return flags;
2430}
2431
2432static void bnx2x_pf_rx_cl_prep(struct bnx2x *bp,
2433 struct bnx2x_fastpath *fp, struct rxq_pause_params *pause,
2434 struct bnx2x_rxq_init_params *rxq_init)
2435{
2436 u16 max_sge = 0;
2437 u16 sge_sz = 0;
2438 u16 tpa_agg_size = 0;
2439
2440 /* calculate queue flags */
2441 u16 flags = bnx2x_get_cl_flags(bp, fp);
2442
2443 if (!fp->disable_tpa) {
2444 pause->sge_th_hi = 250;
2445 pause->sge_th_lo = 150;
2446 tpa_agg_size = min_t(u32,
2447 (min_t(u32, 8, MAX_SKB_FRAGS) *
2448 SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff);
2449 max_sge = SGE_PAGE_ALIGN(bp->dev->mtu) >>
2450 SGE_PAGE_SHIFT;
2451 max_sge = ((max_sge + PAGES_PER_SGE - 1) &
2452 (~(PAGES_PER_SGE-1))) >> PAGES_PER_SGE_SHIFT;
2453 sge_sz = (u16)min_t(u32, SGE_PAGE_SIZE * PAGES_PER_SGE,
2454 0xffff);
2455 }
2456
2457 /* pause - not for e1 */
2458 if (!CHIP_IS_E1(bp)) {
2459 pause->bd_th_hi = 350;
2460 pause->bd_th_lo = 250;
2461 pause->rcq_th_hi = 350;
2462 pause->rcq_th_lo = 250;
2463 pause->sge_th_hi = 0;
2464 pause->sge_th_lo = 0;
2465 pause->pri_map = 1;
2466 }
2467
2468 /* rxq setup */
2469 rxq_init->flags = flags;
2470 rxq_init->cxt = &bp->context.vcxt[fp->cid].eth;
2471 rxq_init->dscr_map = fp->rx_desc_mapping;
2472 rxq_init->sge_map = fp->rx_sge_mapping;
2473 rxq_init->rcq_map = fp->rx_comp_mapping;
2474 rxq_init->rcq_np_map = fp->rx_comp_mapping + BCM_PAGE_SIZE;
2475
2476 /* Always use mini-jumbo MTU for FCoE L2 ring */
2477 if (IS_FCOE_FP(fp))
2478 rxq_init->mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
2479 else
2480 rxq_init->mtu = bp->dev->mtu;
2481
2482 rxq_init->buf_sz = fp->rx_buf_size;
2483 rxq_init->cl_qzone_id = fp->cl_qzone_id;
2484 rxq_init->cl_id = fp->cl_id;
2485 rxq_init->spcl_id = fp->cl_id;
2486 rxq_init->stat_id = fp->cl_id;
2487 rxq_init->tpa_agg_sz = tpa_agg_size;
2488 rxq_init->sge_buf_sz = sge_sz;
2489 rxq_init->max_sges_pkt = max_sge;
2490 rxq_init->cache_line_log = BNX2X_RX_ALIGN_SHIFT;
2491 rxq_init->fw_sb_id = fp->fw_sb_id;
2492
2493 if (IS_FCOE_FP(fp))
2494 rxq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_RX_CQ_CONS;
2495 else
2496 rxq_init->sb_cq_index = U_SB_ETH_RX_CQ_INDEX;
2497
2498 rxq_init->cid = HW_CID(bp, fp->cid);
2499
2500 rxq_init->hc_rate = bp->rx_ticks ? (1000000 / bp->rx_ticks) : 0;
2501}
2502
2503static void bnx2x_pf_tx_cl_prep(struct bnx2x *bp,
2504 struct bnx2x_fastpath *fp, struct bnx2x_txq_init_params *txq_init)
2505{
2506 u16 flags = bnx2x_get_cl_flags(bp, fp);
2507
2508 txq_init->flags = flags;
2509 txq_init->cxt = &bp->context.vcxt[fp->cid].eth;
2510 txq_init->dscr_map = fp->tx_desc_mapping;
2511 txq_init->stat_id = fp->cl_id;
2512 txq_init->cid = HW_CID(bp, fp->cid);
2513 txq_init->sb_cq_index = C_SB_ETH_TX_CQ_INDEX;
2514 txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW;
2515 txq_init->fw_sb_id = fp->fw_sb_id;
2516
2517 if (IS_FCOE_FP(fp)) {
2518 txq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_TX_CQ_CONS;
2519 txq_init->traffic_type = LLFC_TRAFFIC_TYPE_FCOE;
2520 }
2521
2522 txq_init->hc_rate = bp->tx_ticks ? (1000000 / bp->tx_ticks) : 0;
2523}
2524
2525static void bnx2x_pf_init(struct bnx2x *bp)
2526{
2527 struct bnx2x_func_init_params func_init = {0};
2528 struct bnx2x_rss_params rss = {0};
2529 struct event_ring_data eq_data = { {0} };
2530 u16 flags;
2531
2532 /* pf specific setups */
2533 if (!CHIP_IS_E1(bp))
2534 storm_memset_ov(bp, bp->mf_ov, BP_FUNC(bp));
2535
2536 if (CHIP_IS_E2(bp)) {
2537 /* reset IGU PF statistics: MSIX + ATTN */
2538 /* PF */
2539 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
2540 BNX2X_IGU_STAS_MSG_VF_CNT*4 +
2541 (CHIP_MODE_IS_4_PORT(bp) ?
2542 BP_FUNC(bp) : BP_VN(bp))*4, 0);
2543 /* ATTN */
2544 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
2545 BNX2X_IGU_STAS_MSG_VF_CNT*4 +
2546 BNX2X_IGU_STAS_MSG_PF_CNT*4 +
2547 (CHIP_MODE_IS_4_PORT(bp) ?
2548 BP_FUNC(bp) : BP_VN(bp))*4, 0);
2549 }
2550
2551 /* function setup flags */
2552 flags = (FUNC_FLG_STATS | FUNC_FLG_LEADING | FUNC_FLG_SPQ);
2553
2554 if (CHIP_IS_E1x(bp))
2555 flags |= (bp->flags & TPA_ENABLE_FLAG) ? FUNC_FLG_TPA : 0;
2556 else
2557 flags |= FUNC_FLG_TPA;
2558
2559 /* function setup */
2560
2561 /**
2562 * Although RSS is meaningless when there is a single HW queue we
2563 * still need it enabled in order to have HW Rx hash generated.
2564 */
2565 rss.cap = (RSS_IPV4_CAP | RSS_IPV4_TCP_CAP |
2566 RSS_IPV6_CAP | RSS_IPV6_TCP_CAP);
2567 rss.mode = bp->multi_mode;
2568 rss.result_mask = MULTI_MASK;
2569 func_init.rss = &rss;
2570
2571 func_init.func_flgs = flags;
2572 func_init.pf_id = BP_FUNC(bp);
2573 func_init.func_id = BP_FUNC(bp);
2574 func_init.fw_stat_map = bnx2x_sp_mapping(bp, fw_stats);
2575 func_init.spq_map = bp->spq_mapping;
2576 func_init.spq_prod = bp->spq_prod_idx;
2577
2578 bnx2x_func_init(bp, &func_init);
2579
2580 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
2581
2582 /*
2583 Congestion management values depend on the link rate
2584 There is no active link so initial link rate is set to 10 Gbps.
2585 When the link comes up The congestion management values are
2586 re-calculated according to the actual link rate.
2587 */
2588 bp->link_vars.line_speed = SPEED_10000;
2589 bnx2x_cmng_fns_init(bp, true, bnx2x_get_cmng_fns_mode(bp));
2590
2591 /* Only the PMF sets the HW */
2592 if (bp->port.pmf)
2593 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
2594
2595 /* no rx until link is up */
2596 bp->rx_mode = BNX2X_RX_MODE_NONE;
2597 bnx2x_set_storm_rx_mode(bp);
2598
2599 /* init Event Queue */
2600 eq_data.base_addr.hi = U64_HI(bp->eq_mapping);
2601 eq_data.base_addr.lo = U64_LO(bp->eq_mapping);
2602 eq_data.producer = bp->eq_prod;
2603 eq_data.index_id = HC_SP_INDEX_EQ_CONS;
2604 eq_data.sb_id = DEF_SB_ID;
2605 storm_memset_eq_data(bp, &eq_data, BP_FUNC(bp));
2606}
2607
2608
1626static void bnx2x_e1h_disable(struct bnx2x *bp) 2609static void bnx2x_e1h_disable(struct bnx2x *bp)
1627{ 2610{
1628 int port = BP_PORT(bp); 2611 int port = BP_PORT(bp);
@@ -1649,38 +2632,24 @@ static void bnx2x_e1h_enable(struct bnx2x *bp)
1649 */ 2632 */
1650} 2633}
1651 2634
1652static void bnx2x_update_min_max(struct bnx2x *bp) 2635/* called due to MCP event (on pmf):
2636 * reread new bandwidth configuration
2637 * configure FW
2638 * notify others function about the change
2639 */
2640static inline void bnx2x_config_mf_bw(struct bnx2x *bp)
1653{ 2641{
1654 int port = BP_PORT(bp); 2642 if (bp->link_vars.link_up) {
1655 int vn, i; 2643 bnx2x_cmng_fns_init(bp, true, CMNG_FNS_MINMAX);
1656 2644 bnx2x_link_sync_notify(bp);
1657 /* Init rate shaping and fairness contexts */
1658 bnx2x_init_port_minmax(bp);
1659
1660 bnx2x_calc_vn_weight_sum(bp);
1661
1662 for (vn = VN_0; vn < E1HVN_MAX; vn++)
1663 bnx2x_init_vn_minmax(bp, 2*vn + port);
1664
1665 if (bp->port.pmf) {
1666 int func;
1667
1668 /* Set the attention towards other drivers on the same port */
1669 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
1670 if (vn == BP_E1HVN(bp))
1671 continue;
1672
1673 func = ((vn << 1) | port);
1674 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
1675 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
1676 }
1677
1678 /* Store it to internal memory */
1679 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
1680 REG_WR(bp, BAR_XSTRORM_INTMEM +
1681 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
1682 ((u32 *)(&bp->cmng))[i]);
1683 } 2645 }
2646 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
2647}
2648
2649static inline void bnx2x_set_mf_bw(struct bnx2x *bp)
2650{
2651 bnx2x_config_mf_bw(bp);
2652 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW_ACK, 0);
1684} 2653}
1685 2654
1686static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event) 2655static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
@@ -1694,7 +2663,7 @@ static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
1694 * where the bp->flags can change so it is done without any 2663 * where the bp->flags can change so it is done without any
1695 * locks 2664 * locks
1696 */ 2665 */
1697 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) { 2666 if (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED) {
1698 DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n"); 2667 DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
1699 bp->flags |= MF_FUNC_DIS; 2668 bp->flags |= MF_FUNC_DIS;
1700 2669
@@ -1708,16 +2677,15 @@ static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
1708 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF; 2677 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
1709 } 2678 }
1710 if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) { 2679 if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
1711 2680 bnx2x_config_mf_bw(bp);
1712 bnx2x_update_min_max(bp);
1713 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION; 2681 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
1714 } 2682 }
1715 2683
1716 /* Report results to MCP */ 2684 /* Report results to MCP */
1717 if (dcc_event) 2685 if (dcc_event)
1718 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE); 2686 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE, 0);
1719 else 2687 else
1720 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK); 2688 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK, 0);
1721} 2689}
1722 2690
1723/* must be called under the spq lock */ 2691/* must be called under the spq lock */
@@ -1744,16 +2712,17 @@ static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
1744 /* Make sure that BD data is updated before writing the producer */ 2712 /* Make sure that BD data is updated before writing the producer */
1745 wmb(); 2713 wmb();
1746 2714
1747 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func), 2715 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
1748 bp->spq_prod_idx); 2716 bp->spq_prod_idx);
1749 mmiowb(); 2717 mmiowb();
1750} 2718}
1751 2719
1752/* the slow path queue is odd since completions arrive on the fastpath ring */ 2720/* the slow path queue is odd since completions arrive on the fastpath ring */
1753int bnx2x_sp_post(struct bnx2x *bp, int command, int cid, 2721int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
1754 u32 data_hi, u32 data_lo, int common) 2722 u32 data_hi, u32 data_lo, int common)
1755{ 2723{
1756 struct eth_spe *spe; 2724 struct eth_spe *spe;
2725 u16 type;
1757 2726
1758#ifdef BNX2X_STOP_ON_ERROR 2727#ifdef BNX2X_STOP_ON_ERROR
1759 if (unlikely(bp->panic)) 2728 if (unlikely(bp->panic))
@@ -1762,11 +2731,18 @@ int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
1762 2731
1763 spin_lock_bh(&bp->spq_lock); 2732 spin_lock_bh(&bp->spq_lock);
1764 2733
1765 if (!bp->spq_left) { 2734 if (common) {
1766 BNX2X_ERR("BUG! SPQ ring full!\n"); 2735 if (!atomic_read(&bp->eq_spq_left)) {
1767 spin_unlock_bh(&bp->spq_lock); 2736 BNX2X_ERR("BUG! EQ ring full!\n");
1768 bnx2x_panic(); 2737 spin_unlock_bh(&bp->spq_lock);
1769 return -EBUSY; 2738 bnx2x_panic();
2739 return -EBUSY;
2740 }
2741 } else if (!atomic_read(&bp->cq_spq_left)) {
2742 BNX2X_ERR("BUG! SPQ ring full!\n");
2743 spin_unlock_bh(&bp->spq_lock);
2744 bnx2x_panic();
2745 return -EBUSY;
1770 } 2746 }
1771 2747
1772 spe = bnx2x_sp_get_next(bp); 2748 spe = bnx2x_sp_get_next(bp);
@@ -1775,22 +2751,48 @@ int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
1775 spe->hdr.conn_and_cmd_data = 2751 spe->hdr.conn_and_cmd_data =
1776 cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) | 2752 cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) |
1777 HW_CID(bp, cid)); 2753 HW_CID(bp, cid));
1778 spe->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE); 2754
1779 if (common) 2755 if (common)
1780 spe->hdr.type |= 2756 /* Common ramrods:
1781 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT)); 2757 * FUNC_START, FUNC_STOP, CFC_DEL, STATS, SET_MAC
2758 * TRAFFIC_STOP, TRAFFIC_START
2759 */
2760 type = (NONE_CONNECTION_TYPE << SPE_HDR_CONN_TYPE_SHIFT)
2761 & SPE_HDR_CONN_TYPE;
2762 else
2763 /* ETH ramrods: SETUP, HALT */
2764 type = (ETH_CONNECTION_TYPE << SPE_HDR_CONN_TYPE_SHIFT)
2765 & SPE_HDR_CONN_TYPE;
2766
2767 type |= ((BP_FUNC(bp) << SPE_HDR_FUNCTION_ID_SHIFT) &
2768 SPE_HDR_FUNCTION_ID);
1782 2769
1783 spe->data.mac_config_addr.hi = cpu_to_le32(data_hi); 2770 spe->hdr.type = cpu_to_le16(type);
1784 spe->data.mac_config_addr.lo = cpu_to_le32(data_lo); 2771
2772 spe->data.update_data_addr.hi = cpu_to_le32(data_hi);
2773 spe->data.update_data_addr.lo = cpu_to_le32(data_lo);
2774
2775 /* stats ramrod has it's own slot on the spq */
2776 if (command != RAMROD_CMD_ID_COMMON_STAT_QUERY) {
2777 /* It's ok if the actual decrement is issued towards the memory
2778 * somewhere between the spin_lock and spin_unlock. Thus no
2779 * more explict memory barrier is needed.
2780 */
2781 if (common)
2782 atomic_dec(&bp->eq_spq_left);
2783 else
2784 atomic_dec(&bp->cq_spq_left);
2785 }
1785 2786
1786 bp->spq_left--;
1787 2787
1788 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/, 2788 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
1789 "SPQE[%x] (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n", 2789 "SPQE[%x] (%x:%x) command %d hw_cid %x data (%x:%x) "
2790 "type(0x%x) left (ETH, COMMON) (%x,%x)\n",
1790 bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping), 2791 bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
1791 (u32)(U64_LO(bp->spq_mapping) + 2792 (u32)(U64_LO(bp->spq_mapping) +
1792 (void *)bp->spq_prod_bd - (void *)bp->spq), command, 2793 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
1793 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left); 2794 HW_CID(bp, cid), data_hi, data_lo, type,
2795 atomic_read(&bp->cq_spq_left), atomic_read(&bp->eq_spq_left));
1794 2796
1795 bnx2x_sp_prod_update(bp); 2797 bnx2x_sp_prod_update(bp);
1796 spin_unlock_bh(&bp->spq_lock); 2798 spin_unlock_bh(&bp->spq_lock);
@@ -1827,32 +2829,27 @@ static void bnx2x_release_alr(struct bnx2x *bp)
1827 REG_WR(bp, GRCBASE_MCP + 0x9c, 0); 2829 REG_WR(bp, GRCBASE_MCP + 0x9c, 0);
1828} 2830}
1829 2831
2832#define BNX2X_DEF_SB_ATT_IDX 0x0001
2833#define BNX2X_DEF_SB_IDX 0x0002
2834
1830static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp) 2835static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
1831{ 2836{
1832 struct host_def_status_block *def_sb = bp->def_status_blk; 2837 struct host_sp_status_block *def_sb = bp->def_status_blk;
1833 u16 rc = 0; 2838 u16 rc = 0;
1834 2839
1835 barrier(); /* status block is written to by the chip */ 2840 barrier(); /* status block is written to by the chip */
1836 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) { 2841 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
1837 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index; 2842 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
1838 rc |= 1; 2843 rc |= BNX2X_DEF_SB_ATT_IDX;
1839 }
1840 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
1841 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
1842 rc |= 2;
1843 }
1844 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
1845 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
1846 rc |= 4;
1847 }
1848 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
1849 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
1850 rc |= 8;
1851 } 2844 }
1852 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) { 2845
1853 bp->def_t_idx = def_sb->t_def_status_block.status_block_index; 2846 if (bp->def_idx != def_sb->sp_sb.running_index) {
1854 rc |= 16; 2847 bp->def_idx = def_sb->sp_sb.running_index;
2848 rc |= BNX2X_DEF_SB_IDX;
1855 } 2849 }
2850
2851 /* Do not reorder: indecies reading should complete before handling */
2852 barrier();
1856 return rc; 2853 return rc;
1857} 2854}
1858 2855
@@ -1863,14 +2860,13 @@ static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
1863static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted) 2860static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
1864{ 2861{
1865 int port = BP_PORT(bp); 2862 int port = BP_PORT(bp);
1866 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
1867 COMMAND_REG_ATTN_BITS_SET);
1868 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 : 2863 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
1869 MISC_REG_AEU_MASK_ATTN_FUNC_0; 2864 MISC_REG_AEU_MASK_ATTN_FUNC_0;
1870 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 : 2865 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
1871 NIG_REG_MASK_INTERRUPT_PORT0; 2866 NIG_REG_MASK_INTERRUPT_PORT0;
1872 u32 aeu_mask; 2867 u32 aeu_mask;
1873 u32 nig_mask = 0; 2868 u32 nig_mask = 0;
2869 u32 reg_addr;
1874 2870
1875 if (bp->attn_state & asserted) 2871 if (bp->attn_state & asserted)
1876 BNX2X_ERR("IGU ERROR\n"); 2872 BNX2X_ERR("IGU ERROR\n");
@@ -1945,9 +2941,15 @@ static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
1945 2941
1946 } /* if hardwired */ 2942 } /* if hardwired */
1947 2943
1948 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n", 2944 if (bp->common.int_block == INT_BLOCK_HC)
1949 asserted, hc_addr); 2945 reg_addr = (HC_REG_COMMAND_REG + port*32 +
1950 REG_WR(bp, hc_addr, asserted); 2946 COMMAND_REG_ATTN_BITS_SET);
2947 else
2948 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_SET_UPPER*8);
2949
2950 DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", asserted,
2951 (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
2952 REG_WR(bp, reg_addr, asserted);
1951 2953
1952 /* now set back the mask */ 2954 /* now set back the mask */
1953 if (asserted & ATTN_NIG_FOR_FUNC) { 2955 if (asserted & ATTN_NIG_FOR_FUNC) {
@@ -1959,12 +2961,16 @@ static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
1959static inline void bnx2x_fan_failure(struct bnx2x *bp) 2961static inline void bnx2x_fan_failure(struct bnx2x *bp)
1960{ 2962{
1961 int port = BP_PORT(bp); 2963 int port = BP_PORT(bp);
1962 2964 u32 ext_phy_config;
1963 /* mark the failure */ 2965 /* mark the failure */
1964 bp->link_params.ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK; 2966 ext_phy_config =
1965 bp->link_params.ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE; 2967 SHMEM_RD(bp,
2968 dev_info.port_hw_config[port].external_phy_config);
2969
2970 ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2971 ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
1966 SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config, 2972 SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
1967 bp->link_params.ext_phy_config); 2973 ext_phy_config);
1968 2974
1969 /* log the failure */ 2975 /* log the failure */
1970 netdev_err(bp->dev, "Fan Failure on Network Controller has caused" 2976 netdev_err(bp->dev, "Fan Failure on Network Controller has caused"
@@ -1976,7 +2982,7 @@ static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
1976{ 2982{
1977 int port = BP_PORT(bp); 2983 int port = BP_PORT(bp);
1978 int reg_offset; 2984 int reg_offset;
1979 u32 val, swap_val, swap_override; 2985 u32 val;
1980 2986
1981 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : 2987 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
1982 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0); 2988 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
@@ -1990,30 +2996,7 @@ static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
1990 BNX2X_ERR("SPIO5 hw attention\n"); 2996 BNX2X_ERR("SPIO5 hw attention\n");
1991 2997
1992 /* Fan failure attention */ 2998 /* Fan failure attention */
1993 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) { 2999 bnx2x_hw_reset_phy(&bp->link_params);
1994 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
1995 /* Low power mode is controlled by GPIO 2 */
1996 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
1997 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
1998 /* The PHY reset is controlled by GPIO 1 */
1999 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2000 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2001 break;
2002
2003 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
2004 /* The PHY reset is controlled by GPIO 1 */
2005 /* fake the port number to cancel the swap done in
2006 set_gpio() */
2007 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
2008 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
2009 port = (swap_val && swap_override) ^ 1;
2010 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2011 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2012 break;
2013
2014 default:
2015 break;
2016 }
2017 bnx2x_fan_failure(bp); 3000 bnx2x_fan_failure(bp);
2018 } 3001 }
2019 3002
@@ -2087,6 +3070,10 @@ static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2087 /* RQ_USDMDP_FIFO_OVERFLOW */ 3070 /* RQ_USDMDP_FIFO_OVERFLOW */
2088 if (val & 0x18000) 3071 if (val & 0x18000)
2089 BNX2X_ERR("FATAL error from PXP\n"); 3072 BNX2X_ERR("FATAL error from PXP\n");
3073 if (CHIP_IS_E2(bp)) {
3074 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_1);
3075 BNX2X_ERR("PXP hw attention-1 0x%x\n", val);
3076 }
2090 } 3077 }
2091 3078
2092 if (attn & HW_INTERRUT_ASSERT_SET_2) { 3079 if (attn & HW_INTERRUT_ASSERT_SET_2) {
@@ -2117,16 +3104,31 @@ static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2117 int func = BP_FUNC(bp); 3104 int func = BP_FUNC(bp);
2118 3105
2119 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0); 3106 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2120 bp->mf_config = SHMEM_RD(bp, 3107 bp->mf_config[BP_VN(bp)] = MF_CFG_RD(bp,
2121 mf_cfg.func_mf_config[func].config); 3108 func_mf_config[BP_ABS_FUNC(bp)].config);
2122 val = SHMEM_RD(bp, func_mb[func].drv_status); 3109 val = SHMEM_RD(bp,
3110 func_mb[BP_FW_MB_IDX(bp)].drv_status);
2123 if (val & DRV_STATUS_DCC_EVENT_MASK) 3111 if (val & DRV_STATUS_DCC_EVENT_MASK)
2124 bnx2x_dcc_event(bp, 3112 bnx2x_dcc_event(bp,
2125 (val & DRV_STATUS_DCC_EVENT_MASK)); 3113 (val & DRV_STATUS_DCC_EVENT_MASK));
2126 bnx2x__link_status_update(bp); 3114
3115 if (val & DRV_STATUS_SET_MF_BW)
3116 bnx2x_set_mf_bw(bp);
3117
2127 if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF)) 3118 if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
2128 bnx2x_pmf_update(bp); 3119 bnx2x_pmf_update(bp);
2129 3120
3121 /* Always call it here: bnx2x_link_report() will
3122 * prevent the link indication duplication.
3123 */
3124 bnx2x__link_status_update(bp);
3125
3126 if (bp->port.pmf &&
3127 (val & DRV_STATUS_DCBX_NEGOTIATION_RESULTS) &&
3128 bp->dcbx_enabled > 0)
3129 /* start dcbx state machine */
3130 bnx2x_dcbx_set_params(bp,
3131 BNX2X_DCBX_STATE_NEG_RECEIVED);
2130 } else if (attn & BNX2X_MC_ASSERT_BITS) { 3132 } else if (attn & BNX2X_MC_ASSERT_BITS) {
2131 3133
2132 BNX2X_ERR("MC assert!\n"); 3134 BNX2X_ERR("MC assert!\n");
@@ -2149,13 +3151,13 @@ static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2149 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) { 3151 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
2150 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn); 3152 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2151 if (attn & BNX2X_GRC_TIMEOUT) { 3153 if (attn & BNX2X_GRC_TIMEOUT) {
2152 val = CHIP_IS_E1H(bp) ? 3154 val = CHIP_IS_E1(bp) ? 0 :
2153 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0; 3155 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN);
2154 BNX2X_ERR("GRC time-out 0x%08x\n", val); 3156 BNX2X_ERR("GRC time-out 0x%08x\n", val);
2155 } 3157 }
2156 if (attn & BNX2X_GRC_RSV) { 3158 if (attn & BNX2X_GRC_RSV) {
2157 val = CHIP_IS_E1H(bp) ? 3159 val = CHIP_IS_E1(bp) ? 0 :
2158 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0; 3160 REG_RD(bp, MISC_REG_GRC_RSV_ATTN);
2159 BNX2X_ERR("GRC reserved 0x%08x\n", val); 3161 BNX2X_ERR("GRC reserved 0x%08x\n", val);
2160 } 3162 }
2161 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff); 3163 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
@@ -2167,7 +3169,7 @@ static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2167#define LOAD_COUNTER_MASK (((u32)0x1 << LOAD_COUNTER_BITS) - 1) 3169#define LOAD_COUNTER_MASK (((u32)0x1 << LOAD_COUNTER_BITS) - 1)
2168#define RESET_DONE_FLAG_MASK (~LOAD_COUNTER_MASK) 3170#define RESET_DONE_FLAG_MASK (~LOAD_COUNTER_MASK)
2169#define RESET_DONE_FLAG_SHIFT LOAD_COUNTER_BITS 3171#define RESET_DONE_FLAG_SHIFT LOAD_COUNTER_BITS
2170#define CHIP_PARITY_SUPPORTED(bp) (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp)) 3172
2171/* 3173/*
2172 * should be run under rtnl lock 3174 * should be run under rtnl lock
2173 */ 3175 */
@@ -2460,6 +3462,74 @@ bool bnx2x_chk_parity_attn(struct bnx2x *bp)
2460 attn.sig[3]); 3462 attn.sig[3]);
2461} 3463}
2462 3464
3465
3466static inline void bnx2x_attn_int_deasserted4(struct bnx2x *bp, u32 attn)
3467{
3468 u32 val;
3469 if (attn & AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT) {
3470
3471 val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS_CLR);
3472 BNX2X_ERR("PGLUE hw attention 0x%x\n", val);
3473 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR)
3474 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3475 "ADDRESS_ERROR\n");
3476 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR)
3477 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3478 "INCORRECT_RCV_BEHAVIOR\n");
3479 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN)
3480 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3481 "WAS_ERROR_ATTN\n");
3482 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN)
3483 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3484 "VF_LENGTH_VIOLATION_ATTN\n");
3485 if (val &
3486 PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN)
3487 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3488 "VF_GRC_SPACE_VIOLATION_ATTN\n");
3489 if (val &
3490 PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN)
3491 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3492 "VF_MSIX_BAR_VIOLATION_ATTN\n");
3493 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN)
3494 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3495 "TCPL_ERROR_ATTN\n");
3496 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN)
3497 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3498 "TCPL_IN_TWO_RCBS_ATTN\n");
3499 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW)
3500 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3501 "CSSNOOP_FIFO_OVERFLOW\n");
3502 }
3503 if (attn & AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT) {
3504 val = REG_RD(bp, ATC_REG_ATC_INT_STS_CLR);
3505 BNX2X_ERR("ATC hw attention 0x%x\n", val);
3506 if (val & ATC_ATC_INT_STS_REG_ADDRESS_ERROR)
3507 BNX2X_ERR("ATC_ATC_INT_STS_REG_ADDRESS_ERROR\n");
3508 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND)
3509 BNX2X_ERR("ATC_ATC_INT_STS_REG"
3510 "_ATC_TCPL_TO_NOT_PEND\n");
3511 if (val & ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS)
3512 BNX2X_ERR("ATC_ATC_INT_STS_REG_"
3513 "ATC_GPA_MULTIPLE_HITS\n");
3514 if (val & ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT)
3515 BNX2X_ERR("ATC_ATC_INT_STS_REG_"
3516 "ATC_RCPL_TO_EMPTY_CNT\n");
3517 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR)
3518 BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR\n");
3519 if (val & ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU)
3520 BNX2X_ERR("ATC_ATC_INT_STS_REG_"
3521 "ATC_IREQ_LESS_THAN_STU\n");
3522 }
3523
3524 if (attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
3525 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)) {
3526 BNX2X_ERR("FATAL parity attention set4 0x%x\n",
3527 (u32)(attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
3528 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)));
3529 }
3530
3531}
3532
2463static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted) 3533static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2464{ 3534{
2465 struct attn_route attn, *group_mask; 3535 struct attn_route attn, *group_mask;
@@ -2473,7 +3543,7 @@ static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2473 try to handle this event */ 3543 try to handle this event */
2474 bnx2x_acquire_alr(bp); 3544 bnx2x_acquire_alr(bp);
2475 3545
2476 if (bnx2x_chk_parity_attn(bp)) { 3546 if (CHIP_PARITY_ENABLED(bp) && bnx2x_chk_parity_attn(bp)) {
2477 bp->recovery_state = BNX2X_RECOVERY_INIT; 3547 bp->recovery_state = BNX2X_RECOVERY_INIT;
2478 bnx2x_set_reset_in_progress(bp); 3548 bnx2x_set_reset_in_progress(bp);
2479 schedule_delayed_work(&bp->reset_task, 0); 3549 schedule_delayed_work(&bp->reset_task, 0);
@@ -2490,17 +3560,28 @@ static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2490 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4); 3560 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2491 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4); 3561 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2492 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4); 3562 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
2493 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n", 3563 if (CHIP_IS_E2(bp))
2494 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]); 3564 attn.sig[4] =
3565 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4);
3566 else
3567 attn.sig[4] = 0;
3568
3569 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x %08x\n",
3570 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3], attn.sig[4]);
2495 3571
2496 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) { 3572 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2497 if (deasserted & (1 << index)) { 3573 if (deasserted & (1 << index)) {
2498 group_mask = &bp->attn_group[index]; 3574 group_mask = &bp->attn_group[index];
2499 3575
2500 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n", 3576 DP(NETIF_MSG_HW, "group[%d]: %08x %08x "
2501 index, group_mask->sig[0], group_mask->sig[1], 3577 "%08x %08x %08x\n",
2502 group_mask->sig[2], group_mask->sig[3]); 3578 index,
3579 group_mask->sig[0], group_mask->sig[1],
3580 group_mask->sig[2], group_mask->sig[3],
3581 group_mask->sig[4]);
2503 3582
3583 bnx2x_attn_int_deasserted4(bp,
3584 attn.sig[4] & group_mask->sig[4]);
2504 bnx2x_attn_int_deasserted3(bp, 3585 bnx2x_attn_int_deasserted3(bp,
2505 attn.sig[3] & group_mask->sig[3]); 3586 attn.sig[3] & group_mask->sig[3]);
2506 bnx2x_attn_int_deasserted1(bp, 3587 bnx2x_attn_int_deasserted1(bp,
@@ -2514,11 +3595,15 @@ static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2514 3595
2515 bnx2x_release_alr(bp); 3596 bnx2x_release_alr(bp);
2516 3597
2517 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR); 3598 if (bp->common.int_block == INT_BLOCK_HC)
3599 reg_addr = (HC_REG_COMMAND_REG + port*32 +
3600 COMMAND_REG_ATTN_BITS_CLR);
3601 else
3602 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_CLR_UPPER*8);
2518 3603
2519 val = ~deasserted; 3604 val = ~deasserted;
2520 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n", 3605 DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", val,
2521 val, reg_addr); 3606 (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
2522 REG_WR(bp, reg_addr, val); 3607 REG_WR(bp, reg_addr, val);
2523 3608
2524 if (~bp->attn_state & deasserted) 3609 if (~bp->attn_state & deasserted)
@@ -2571,6 +3656,156 @@ static void bnx2x_attn_int(struct bnx2x *bp)
2571 bnx2x_attn_int_deasserted(bp, deasserted); 3656 bnx2x_attn_int_deasserted(bp, deasserted);
2572} 3657}
2573 3658
3659static inline void bnx2x_update_eq_prod(struct bnx2x *bp, u16 prod)
3660{
3661 /* No memory barriers */
3662 storm_memset_eq_prod(bp, prod, BP_FUNC(bp));
3663 mmiowb(); /* keep prod updates ordered */
3664}
3665
3666#ifdef BCM_CNIC
3667static int bnx2x_cnic_handle_cfc_del(struct bnx2x *bp, u32 cid,
3668 union event_ring_elem *elem)
3669{
3670 if (!bp->cnic_eth_dev.starting_cid ||
3671 (cid < bp->cnic_eth_dev.starting_cid &&
3672 cid != bp->cnic_eth_dev.iscsi_l2_cid))
3673 return 1;
3674
3675 DP(BNX2X_MSG_SP, "got delete ramrod for CNIC CID %d\n", cid);
3676
3677 if (unlikely(elem->message.data.cfc_del_event.error)) {
3678 BNX2X_ERR("got delete ramrod for CNIC CID %d with error!\n",
3679 cid);
3680 bnx2x_panic_dump(bp);
3681 }
3682 bnx2x_cnic_cfc_comp(bp, cid);
3683 return 0;
3684}
3685#endif
3686
3687static void bnx2x_eq_int(struct bnx2x *bp)
3688{
3689 u16 hw_cons, sw_cons, sw_prod;
3690 union event_ring_elem *elem;
3691 u32 cid;
3692 u8 opcode;
3693 int spqe_cnt = 0;
3694
3695 hw_cons = le16_to_cpu(*bp->eq_cons_sb);
3696
3697 /* The hw_cos range is 1-255, 257 - the sw_cons range is 0-254, 256.
3698 * when we get the the next-page we nned to adjust so the loop
3699 * condition below will be met. The next element is the size of a
3700 * regular element and hence incrementing by 1
3701 */
3702 if ((hw_cons & EQ_DESC_MAX_PAGE) == EQ_DESC_MAX_PAGE)
3703 hw_cons++;
3704
3705 /* This function may never run in parallel with itself for a
3706 * specific bp, thus there is no need in "paired" read memory
3707 * barrier here.
3708 */
3709 sw_cons = bp->eq_cons;
3710 sw_prod = bp->eq_prod;
3711
3712 DP(BNX2X_MSG_SP, "EQ: hw_cons %u sw_cons %u bp->cq_spq_left %u\n",
3713 hw_cons, sw_cons, atomic_read(&bp->eq_spq_left));
3714
3715 for (; sw_cons != hw_cons;
3716 sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) {
3717
3718
3719 elem = &bp->eq_ring[EQ_DESC(sw_cons)];
3720
3721 cid = SW_CID(elem->message.data.cfc_del_event.cid);
3722 opcode = elem->message.opcode;
3723
3724
3725 /* handle eq element */
3726 switch (opcode) {
3727 case EVENT_RING_OPCODE_STAT_QUERY:
3728 DP(NETIF_MSG_TIMER, "got statistics comp event\n");
3729 /* nothing to do with stats comp */
3730 continue;
3731
3732 case EVENT_RING_OPCODE_CFC_DEL:
3733 /* handle according to cid range */
3734 /*
3735 * we may want to verify here that the bp state is
3736 * HALTING
3737 */
3738 DP(NETIF_MSG_IFDOWN,
3739 "got delete ramrod for MULTI[%d]\n", cid);
3740#ifdef BCM_CNIC
3741 if (!bnx2x_cnic_handle_cfc_del(bp, cid, elem))
3742 goto next_spqe;
3743 if (cid == BNX2X_FCOE_ETH_CID)
3744 bnx2x_fcoe(bp, state) = BNX2X_FP_STATE_CLOSED;
3745 else
3746#endif
3747 bnx2x_fp(bp, cid, state) =
3748 BNX2X_FP_STATE_CLOSED;
3749
3750 goto next_spqe;
3751
3752 case EVENT_RING_OPCODE_STOP_TRAFFIC:
3753 DP(NETIF_MSG_IFUP, "got STOP TRAFFIC\n");
3754 bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_PAUSED);
3755 goto next_spqe;
3756 case EVENT_RING_OPCODE_START_TRAFFIC:
3757 DP(NETIF_MSG_IFUP, "got START TRAFFIC\n");
3758 bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_RELEASED);
3759 goto next_spqe;
3760 }
3761
3762 switch (opcode | bp->state) {
3763 case (EVENT_RING_OPCODE_FUNCTION_START |
3764 BNX2X_STATE_OPENING_WAIT4_PORT):
3765 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
3766 bp->state = BNX2X_STATE_FUNC_STARTED;
3767 break;
3768
3769 case (EVENT_RING_OPCODE_FUNCTION_STOP |
3770 BNX2X_STATE_CLOSING_WAIT4_HALT):
3771 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
3772 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
3773 break;
3774
3775 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_OPEN):
3776 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_DIAG):
3777 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
3778 if (elem->message.data.set_mac_event.echo)
3779 bp->set_mac_pending = 0;
3780 break;
3781
3782 case (EVENT_RING_OPCODE_SET_MAC |
3783 BNX2X_STATE_CLOSING_WAIT4_HALT):
3784 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
3785 if (elem->message.data.set_mac_event.echo)
3786 bp->set_mac_pending = 0;
3787 break;
3788 default:
3789 /* unknown event log error and continue */
3790 BNX2X_ERR("Unknown EQ event %d\n",
3791 elem->message.opcode);
3792 }
3793next_spqe:
3794 spqe_cnt++;
3795 } /* for */
3796
3797 smp_mb__before_atomic_inc();
3798 atomic_add(spqe_cnt, &bp->eq_spq_left);
3799
3800 bp->eq_cons = sw_cons;
3801 bp->eq_prod = sw_prod;
3802 /* Make sure that above mem writes were issued towards the memory */
3803 smp_wmb();
3804
3805 /* update producer */
3806 bnx2x_update_eq_prod(bp, bp->eq_prod);
3807}
3808
2574static void bnx2x_sp_task(struct work_struct *work) 3809static void bnx2x_sp_task(struct work_struct *work)
2575{ 3810{
2576 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work); 3811 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
@@ -2589,31 +3824,35 @@ static void bnx2x_sp_task(struct work_struct *work)
2589 DP(NETIF_MSG_INTR, "got a slowpath interrupt (status 0x%x)\n", status); 3824 DP(NETIF_MSG_INTR, "got a slowpath interrupt (status 0x%x)\n", status);
2590 3825
2591 /* HW attentions */ 3826 /* HW attentions */
2592 if (status & 0x1) { 3827 if (status & BNX2X_DEF_SB_ATT_IDX) {
2593 bnx2x_attn_int(bp); 3828 bnx2x_attn_int(bp);
2594 status &= ~0x1; 3829 status &= ~BNX2X_DEF_SB_ATT_IDX;
2595 } 3830 }
2596 3831
2597 /* CStorm events: STAT_QUERY */ 3832 /* SP events: STAT_QUERY and others */
2598 if (status & 0x2) { 3833 if (status & BNX2X_DEF_SB_IDX) {
2599 DP(BNX2X_MSG_SP, "CStorm events: STAT_QUERY\n"); 3834#ifdef BCM_CNIC
2600 status &= ~0x2; 3835 struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp);
3836
3837 if ((!NO_FCOE(bp)) &&
3838 (bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp)))
3839 napi_schedule(&bnx2x_fcoe(bp, napi));
3840#endif
3841 /* Handle EQ completions */
3842 bnx2x_eq_int(bp);
3843
3844 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID,
3845 le16_to_cpu(bp->def_idx), IGU_INT_NOP, 1);
3846
3847 status &= ~BNX2X_DEF_SB_IDX;
2601 } 3848 }
2602 3849
2603 if (unlikely(status)) 3850 if (unlikely(status))
2604 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n", 3851 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
2605 status); 3852 status);
2606 3853
2607 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx), 3854 bnx2x_ack_sb(bp, bp->igu_dsb_id, ATTENTION_ID,
2608 IGU_INT_NOP, 1); 3855 le16_to_cpu(bp->def_att_idx), IGU_INT_ENABLE, 1);
2609 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2610 IGU_INT_NOP, 1);
2611 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2612 IGU_INT_NOP, 1);
2613 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2614 IGU_INT_NOP, 1);
2615 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2616 IGU_INT_ENABLE, 1);
2617} 3856}
2618 3857
2619irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance) 3858irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
@@ -2627,7 +3866,8 @@ irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2627 return IRQ_HANDLED; 3866 return IRQ_HANDLED;
2628 } 3867 }
2629 3868
2630 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0); 3869 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0,
3870 IGU_INT_DISABLE, 0);
2631 3871
2632#ifdef BNX2X_STOP_ON_ERROR 3872#ifdef BNX2X_STOP_ON_ERROR
2633 if (unlikely(bp->panic)) 3873 if (unlikely(bp->panic))
@@ -2664,14 +3904,13 @@ static void bnx2x_timer(unsigned long data)
2664 3904
2665 if (poll) { 3905 if (poll) {
2666 struct bnx2x_fastpath *fp = &bp->fp[0]; 3906 struct bnx2x_fastpath *fp = &bp->fp[0];
2667 int rc;
2668 3907
2669 bnx2x_tx_int(fp); 3908 bnx2x_tx_int(fp);
2670 rc = bnx2x_rx_int(fp, 1000); 3909 bnx2x_rx_int(fp, 1000);
2671 } 3910 }
2672 3911
2673 if (!BP_NOMCP(bp)) { 3912 if (!BP_NOMCP(bp)) {
2674 int func = BP_FUNC(bp); 3913 int mb_idx = BP_FW_MB_IDX(bp);
2675 u32 drv_pulse; 3914 u32 drv_pulse;
2676 u32 mcp_pulse; 3915 u32 mcp_pulse;
2677 3916
@@ -2679,9 +3918,9 @@ static void bnx2x_timer(unsigned long data)
2679 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK; 3918 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
2680 /* TBD - add SYSTEM_TIME */ 3919 /* TBD - add SYSTEM_TIME */
2681 drv_pulse = bp->fw_drv_pulse_wr_seq; 3920 drv_pulse = bp->fw_drv_pulse_wr_seq;
2682 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse); 3921 SHMEM_WR(bp, func_mb[mb_idx].drv_pulse_mb, drv_pulse);
2683 3922
2684 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) & 3923 mcp_pulse = (SHMEM_RD(bp, func_mb[mb_idx].mcp_pulse_mb) &
2685 MCP_PULSE_SEQ_MASK); 3924 MCP_PULSE_SEQ_MASK);
2686 /* The delta between driver pulse and mcp response 3925 /* The delta between driver pulse and mcp response
2687 * should be 1 (before mcp response) or 0 (after mcp response) 3926 * should be 1 (before mcp response) or 0 (after mcp response)
@@ -2709,327 +3948,313 @@ timer_restart:
2709 * nic init service functions 3948 * nic init service functions
2710 */ 3949 */
2711 3950
2712static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id) 3951static inline void bnx2x_fill(struct bnx2x *bp, u32 addr, int fill, u32 len)
2713{ 3952{
2714 int port = BP_PORT(bp); 3953 u32 i;
3954 if (!(len%4) && !(addr%4))
3955 for (i = 0; i < len; i += 4)
3956 REG_WR(bp, addr + i, fill);
3957 else
3958 for (i = 0; i < len; i++)
3959 REG_WR8(bp, addr + i, fill);
2715 3960
2716 /* "CSTORM" */
2717 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
2718 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), 0,
2719 CSTORM_SB_STATUS_BLOCK_U_SIZE / 4);
2720 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
2721 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), 0,
2722 CSTORM_SB_STATUS_BLOCK_C_SIZE / 4);
2723} 3961}
2724 3962
2725void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb, 3963/* helper: writes FP SP data to FW - data_size in dwords */
2726 dma_addr_t mapping, int sb_id) 3964static inline void bnx2x_wr_fp_sb_data(struct bnx2x *bp,
3965 int fw_sb_id,
3966 u32 *sb_data_p,
3967 u32 data_size)
2727{ 3968{
2728 int port = BP_PORT(bp);
2729 int func = BP_FUNC(bp);
2730 int index; 3969 int index;
2731 u64 section; 3970 for (index = 0; index < data_size; index++)
3971 REG_WR(bp, BAR_CSTRORM_INTMEM +
3972 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
3973 sizeof(u32)*index,
3974 *(sb_data_p + index));
3975}
3976
3977static inline void bnx2x_zero_fp_sb(struct bnx2x *bp, int fw_sb_id)
3978{
3979 u32 *sb_data_p;
3980 u32 data_size = 0;
3981 struct hc_status_block_data_e2 sb_data_e2;
3982 struct hc_status_block_data_e1x sb_data_e1x;
3983
3984 /* disable the function first */
3985 if (CHIP_IS_E2(bp)) {
3986 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
3987 sb_data_e2.common.p_func.pf_id = HC_FUNCTION_DISABLED;
3988 sb_data_e2.common.p_func.vf_id = HC_FUNCTION_DISABLED;
3989 sb_data_e2.common.p_func.vf_valid = false;
3990 sb_data_p = (u32 *)&sb_data_e2;
3991 data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
3992 } else {
3993 memset(&sb_data_e1x, 0,
3994 sizeof(struct hc_status_block_data_e1x));
3995 sb_data_e1x.common.p_func.pf_id = HC_FUNCTION_DISABLED;
3996 sb_data_e1x.common.p_func.vf_id = HC_FUNCTION_DISABLED;
3997 sb_data_e1x.common.p_func.vf_valid = false;
3998 sb_data_p = (u32 *)&sb_data_e1x;
3999 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
4000 }
4001 bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
2732 4002
2733 /* USTORM */ 4003 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
2734 section = ((u64)mapping) + offsetof(struct host_status_block, 4004 CSTORM_STATUS_BLOCK_OFFSET(fw_sb_id), 0,
2735 u_status_block); 4005 CSTORM_STATUS_BLOCK_SIZE);
2736 sb->u_status_block.status_block_id = sb_id; 4006 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
2737 4007 CSTORM_SYNC_BLOCK_OFFSET(fw_sb_id), 0,
2738 REG_WR(bp, BAR_CSTRORM_INTMEM + 4008 CSTORM_SYNC_BLOCK_SIZE);
2739 CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id), U64_LO(section)); 4009}
2740 REG_WR(bp, BAR_CSTRORM_INTMEM +
2741 ((CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id)) + 4),
2742 U64_HI(section));
2743 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_USB_FUNC_OFF +
2744 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), func);
2745
2746 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
2747 REG_WR16(bp, BAR_CSTRORM_INTMEM +
2748 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id, index), 1);
2749 4010
2750 /* CSTORM */ 4011/* helper: writes SP SB data to FW */
2751 section = ((u64)mapping) + offsetof(struct host_status_block, 4012static inline void bnx2x_wr_sp_sb_data(struct bnx2x *bp,
2752 c_status_block); 4013 struct hc_sp_status_block_data *sp_sb_data)
2753 sb->c_status_block.status_block_id = sb_id; 4014{
4015 int func = BP_FUNC(bp);
4016 int i;
4017 for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++)
4018 REG_WR(bp, BAR_CSTRORM_INTMEM +
4019 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
4020 i*sizeof(u32),
4021 *((u32 *)sp_sb_data + i));
4022}
4023
4024static inline void bnx2x_zero_sp_sb(struct bnx2x *bp)
4025{
4026 int func = BP_FUNC(bp);
4027 struct hc_sp_status_block_data sp_sb_data;
4028 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
4029
4030 sp_sb_data.p_func.pf_id = HC_FUNCTION_DISABLED;
4031 sp_sb_data.p_func.vf_id = HC_FUNCTION_DISABLED;
4032 sp_sb_data.p_func.vf_valid = false;
2754 4033
2755 REG_WR(bp, BAR_CSTRORM_INTMEM + 4034 bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
2756 CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id), U64_LO(section));
2757 REG_WR(bp, BAR_CSTRORM_INTMEM +
2758 ((CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id)) + 4),
2759 U64_HI(section));
2760 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
2761 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), func);
2762 4035
2763 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++) 4036 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
2764 REG_WR16(bp, BAR_CSTRORM_INTMEM + 4037 CSTORM_SP_STATUS_BLOCK_OFFSET(func), 0,
2765 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id, index), 1); 4038 CSTORM_SP_STATUS_BLOCK_SIZE);
4039 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
4040 CSTORM_SP_SYNC_BLOCK_OFFSET(func), 0,
4041 CSTORM_SP_SYNC_BLOCK_SIZE);
2766 4042
2767 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
2768} 4043}
2769 4044
2770static void bnx2x_zero_def_sb(struct bnx2x *bp) 4045
4046static inline
4047void bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm,
4048 int igu_sb_id, int igu_seg_id)
2771{ 4049{
2772 int func = BP_FUNC(bp); 4050 hc_sm->igu_sb_id = igu_sb_id;
4051 hc_sm->igu_seg_id = igu_seg_id;
4052 hc_sm->timer_value = 0xFF;
4053 hc_sm->time_to_expire = 0xFFFFFFFF;
4054}
4055
4056static void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
4057 u8 vf_valid, int fw_sb_id, int igu_sb_id)
4058{
4059 int igu_seg_id;
4060
4061 struct hc_status_block_data_e2 sb_data_e2;
4062 struct hc_status_block_data_e1x sb_data_e1x;
4063 struct hc_status_block_sm *hc_sm_p;
4064 int data_size;
4065 u32 *sb_data_p;
4066
4067 if (CHIP_INT_MODE_IS_BC(bp))
4068 igu_seg_id = HC_SEG_ACCESS_NORM;
4069 else
4070 igu_seg_id = IGU_SEG_ACCESS_NORM;
4071
4072 bnx2x_zero_fp_sb(bp, fw_sb_id);
4073
4074 if (CHIP_IS_E2(bp)) {
4075 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
4076 sb_data_e2.common.p_func.pf_id = BP_FUNC(bp);
4077 sb_data_e2.common.p_func.vf_id = vfid;
4078 sb_data_e2.common.p_func.vf_valid = vf_valid;
4079 sb_data_e2.common.p_func.vnic_id = BP_VN(bp);
4080 sb_data_e2.common.same_igu_sb_1b = true;
4081 sb_data_e2.common.host_sb_addr.hi = U64_HI(mapping);
4082 sb_data_e2.common.host_sb_addr.lo = U64_LO(mapping);
4083 hc_sm_p = sb_data_e2.common.state_machine;
4084 sb_data_p = (u32 *)&sb_data_e2;
4085 data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
4086 } else {
4087 memset(&sb_data_e1x, 0,
4088 sizeof(struct hc_status_block_data_e1x));
4089 sb_data_e1x.common.p_func.pf_id = BP_FUNC(bp);
4090 sb_data_e1x.common.p_func.vf_id = 0xff;
4091 sb_data_e1x.common.p_func.vf_valid = false;
4092 sb_data_e1x.common.p_func.vnic_id = BP_VN(bp);
4093 sb_data_e1x.common.same_igu_sb_1b = true;
4094 sb_data_e1x.common.host_sb_addr.hi = U64_HI(mapping);
4095 sb_data_e1x.common.host_sb_addr.lo = U64_LO(mapping);
4096 hc_sm_p = sb_data_e1x.common.state_machine;
4097 sb_data_p = (u32 *)&sb_data_e1x;
4098 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
4099 }
4100
4101 bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID],
4102 igu_sb_id, igu_seg_id);
4103 bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_TX_ID],
4104 igu_sb_id, igu_seg_id);
2773 4105
2774 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY + 4106 DP(NETIF_MSG_HW, "Init FW SB %d\n", fw_sb_id);
2775 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0, 4107
2776 sizeof(struct tstorm_def_status_block)/4); 4108 /* write indecies to HW */
2777 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY + 4109 bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
2778 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), 0,
2779 sizeof(struct cstorm_def_status_block_u)/4);
2780 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
2781 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), 0,
2782 sizeof(struct cstorm_def_status_block_c)/4);
2783 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY +
2784 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
2785 sizeof(struct xstorm_def_status_block)/4);
2786} 4110}
2787 4111
2788static void bnx2x_init_def_sb(struct bnx2x *bp, 4112static void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u16 fw_sb_id,
2789 struct host_def_status_block *def_sb, 4113 u8 sb_index, u8 disable, u16 usec)
2790 dma_addr_t mapping, int sb_id)
2791{ 4114{
2792 int port = BP_PORT(bp); 4115 int port = BP_PORT(bp);
4116 u8 ticks = usec / BNX2X_BTR;
4117
4118 storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
4119
4120 disable = disable ? 1 : (usec ? 0 : 1);
4121 storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
4122}
4123
4124static void bnx2x_update_coalesce_sb(struct bnx2x *bp, u16 fw_sb_id,
4125 u16 tx_usec, u16 rx_usec)
4126{
4127 bnx2x_update_coalesce_sb_index(bp, fw_sb_id, U_SB_ETH_RX_CQ_INDEX,
4128 false, rx_usec);
4129 bnx2x_update_coalesce_sb_index(bp, fw_sb_id, C_SB_ETH_TX_CQ_INDEX,
4130 false, tx_usec);
4131}
4132
4133static void bnx2x_init_def_sb(struct bnx2x *bp)
4134{
4135 struct host_sp_status_block *def_sb = bp->def_status_blk;
4136 dma_addr_t mapping = bp->def_status_blk_mapping;
4137 int igu_sp_sb_index;
4138 int igu_seg_id;
4139 int port = BP_PORT(bp);
2793 int func = BP_FUNC(bp); 4140 int func = BP_FUNC(bp);
2794 int index, val, reg_offset; 4141 int reg_offset;
2795 u64 section; 4142 u64 section;
4143 int index;
4144 struct hc_sp_status_block_data sp_sb_data;
4145 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
4146
4147 if (CHIP_INT_MODE_IS_BC(bp)) {
4148 igu_sp_sb_index = DEF_SB_IGU_ID;
4149 igu_seg_id = HC_SEG_ACCESS_DEF;
4150 } else {
4151 igu_sp_sb_index = bp->igu_dsb_id;
4152 igu_seg_id = IGU_SEG_ACCESS_DEF;
4153 }
2796 4154
2797 /* ATTN */ 4155 /* ATTN */
2798 section = ((u64)mapping) + offsetof(struct host_def_status_block, 4156 section = ((u64)mapping) + offsetof(struct host_sp_status_block,
2799 atten_status_block); 4157 atten_status_block);
2800 def_sb->atten_status_block.status_block_id = sb_id; 4158 def_sb->atten_status_block.status_block_id = igu_sp_sb_index;
2801 4159
2802 bp->attn_state = 0; 4160 bp->attn_state = 0;
2803 4161
2804 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : 4162 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2805 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0); 4163 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2806
2807 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) { 4164 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2808 bp->attn_group[index].sig[0] = REG_RD(bp, 4165 int sindex;
2809 reg_offset + 0x10*index); 4166 /* take care of sig[0]..sig[4] */
2810 bp->attn_group[index].sig[1] = REG_RD(bp, 4167 for (sindex = 0; sindex < 4; sindex++)
2811 reg_offset + 0x4 + 0x10*index); 4168 bp->attn_group[index].sig[sindex] =
2812 bp->attn_group[index].sig[2] = REG_RD(bp, 4169 REG_RD(bp, reg_offset + sindex*0x4 + 0x10*index);
2813 reg_offset + 0x8 + 0x10*index); 4170
2814 bp->attn_group[index].sig[3] = REG_RD(bp, 4171 if (CHIP_IS_E2(bp))
2815 reg_offset + 0xc + 0x10*index); 4172 /*
4173 * enable5 is separate from the rest of the registers,
4174 * and therefore the address skip is 4
4175 * and not 16 between the different groups
4176 */
4177 bp->attn_group[index].sig[4] = REG_RD(bp,
4178 reg_offset + 0x10 + 0x4*index);
4179 else
4180 bp->attn_group[index].sig[4] = 0;
2816 } 4181 }
2817 4182
2818 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L : 4183 if (bp->common.int_block == INT_BLOCK_HC) {
2819 HC_REG_ATTN_MSG0_ADDR_L); 4184 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4185 HC_REG_ATTN_MSG0_ADDR_L);
2820 4186
2821 REG_WR(bp, reg_offset, U64_LO(section)); 4187 REG_WR(bp, reg_offset, U64_LO(section));
2822 REG_WR(bp, reg_offset + 4, U64_HI(section)); 4188 REG_WR(bp, reg_offset + 4, U64_HI(section));
4189 } else if (CHIP_IS_E2(bp)) {
4190 REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_L, U64_LO(section));
4191 REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_H, U64_HI(section));
4192 }
2823 4193
2824 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0); 4194 section = ((u64)mapping) + offsetof(struct host_sp_status_block,
4195 sp_sb);
2825 4196
2826 val = REG_RD(bp, reg_offset); 4197 bnx2x_zero_sp_sb(bp);
2827 val |= sb_id;
2828 REG_WR(bp, reg_offset, val);
2829 4198
2830 /* USTORM */ 4199 sp_sb_data.host_sb_addr.lo = U64_LO(section);
2831 section = ((u64)mapping) + offsetof(struct host_def_status_block, 4200 sp_sb_data.host_sb_addr.hi = U64_HI(section);
2832 u_def_status_block); 4201 sp_sb_data.igu_sb_id = igu_sp_sb_index;
2833 def_sb->u_def_status_block.status_block_id = sb_id; 4202 sp_sb_data.igu_seg_id = igu_seg_id;
2834 4203 sp_sb_data.p_func.pf_id = func;
2835 REG_WR(bp, BAR_CSTRORM_INTMEM + 4204 sp_sb_data.p_func.vnic_id = BP_VN(bp);
2836 CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func), U64_LO(section)); 4205 sp_sb_data.p_func.vf_id = 0xff;
2837 REG_WR(bp, BAR_CSTRORM_INTMEM +
2838 ((CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func)) + 4),
2839 U64_HI(section));
2840 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_USB_FUNC_OFF +
2841 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), func);
2842
2843 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
2844 REG_WR16(bp, BAR_CSTRORM_INTMEM +
2845 CSTORM_DEF_SB_HC_DISABLE_U_OFFSET(func, index), 1);
2846 4206
2847 /* CSTORM */ 4207 bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
2848 section = ((u64)mapping) + offsetof(struct host_def_status_block,
2849 c_def_status_block);
2850 def_sb->c_def_status_block.status_block_id = sb_id;
2851
2852 REG_WR(bp, BAR_CSTRORM_INTMEM +
2853 CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func), U64_LO(section));
2854 REG_WR(bp, BAR_CSTRORM_INTMEM +
2855 ((CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func)) + 4),
2856 U64_HI(section));
2857 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
2858 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), func);
2859
2860 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
2861 REG_WR16(bp, BAR_CSTRORM_INTMEM +
2862 CSTORM_DEF_SB_HC_DISABLE_C_OFFSET(func, index), 1);
2863
2864 /* TSTORM */
2865 section = ((u64)mapping) + offsetof(struct host_def_status_block,
2866 t_def_status_block);
2867 def_sb->t_def_status_block.status_block_id = sb_id;
2868
2869 REG_WR(bp, BAR_TSTRORM_INTMEM +
2870 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
2871 REG_WR(bp, BAR_TSTRORM_INTMEM +
2872 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
2873 U64_HI(section));
2874 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
2875 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
2876
2877 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
2878 REG_WR16(bp, BAR_TSTRORM_INTMEM +
2879 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
2880
2881 /* XSTORM */
2882 section = ((u64)mapping) + offsetof(struct host_def_status_block,
2883 x_def_status_block);
2884 def_sb->x_def_status_block.status_block_id = sb_id;
2885
2886 REG_WR(bp, BAR_XSTRORM_INTMEM +
2887 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
2888 REG_WR(bp, BAR_XSTRORM_INTMEM +
2889 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
2890 U64_HI(section));
2891 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
2892 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
2893
2894 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
2895 REG_WR16(bp, BAR_XSTRORM_INTMEM +
2896 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
2897 4208
2898 bp->stats_pending = 0; 4209 bp->stats_pending = 0;
2899 bp->set_mac_pending = 0; 4210 bp->set_mac_pending = 0;
2900 4211
2901 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0); 4212 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0);
2902} 4213}
2903 4214
2904void bnx2x_update_coalesce(struct bnx2x *bp) 4215void bnx2x_update_coalesce(struct bnx2x *bp)
2905{ 4216{
2906 int port = BP_PORT(bp);
2907 int i; 4217 int i;
2908 4218
2909 for_each_queue(bp, i) { 4219 for_each_eth_queue(bp, i)
2910 int sb_id = bp->fp[i].sb_id; 4220 bnx2x_update_coalesce_sb(bp, bp->fp[i].fw_sb_id,
2911 4221 bp->tx_ticks, bp->rx_ticks);
2912 /* HC_INDEX_U_ETH_RX_CQ_CONS */
2913 REG_WR8(bp, BAR_CSTRORM_INTMEM +
2914 CSTORM_SB_HC_TIMEOUT_U_OFFSET(port, sb_id,
2915 U_SB_ETH_RX_CQ_INDEX),
2916 bp->rx_ticks/(4 * BNX2X_BTR));
2917 REG_WR16(bp, BAR_CSTRORM_INTMEM +
2918 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id,
2919 U_SB_ETH_RX_CQ_INDEX),
2920 (bp->rx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
2921
2922 /* HC_INDEX_C_ETH_TX_CQ_CONS */
2923 REG_WR8(bp, BAR_CSTRORM_INTMEM +
2924 CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, sb_id,
2925 C_SB_ETH_TX_CQ_INDEX),
2926 bp->tx_ticks/(4 * BNX2X_BTR));
2927 REG_WR16(bp, BAR_CSTRORM_INTMEM +
2928 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id,
2929 C_SB_ETH_TX_CQ_INDEX),
2930 (bp->tx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
2931 }
2932} 4222}
2933 4223
2934static void bnx2x_init_sp_ring(struct bnx2x *bp) 4224static void bnx2x_init_sp_ring(struct bnx2x *bp)
2935{ 4225{
2936 int func = BP_FUNC(bp);
2937
2938 spin_lock_init(&bp->spq_lock); 4226 spin_lock_init(&bp->spq_lock);
4227 atomic_set(&bp->cq_spq_left, MAX_SPQ_PENDING);
2939 4228
2940 bp->spq_left = MAX_SPQ_PENDING;
2941 bp->spq_prod_idx = 0; 4229 bp->spq_prod_idx = 0;
2942 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX; 4230 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
2943 bp->spq_prod_bd = bp->spq; 4231 bp->spq_prod_bd = bp->spq;
2944 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT; 4232 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
2945
2946 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
2947 U64_LO(bp->spq_mapping));
2948 REG_WR(bp,
2949 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
2950 U64_HI(bp->spq_mapping));
2951
2952 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
2953 bp->spq_prod_idx);
2954} 4233}
2955 4234
2956static void bnx2x_init_context(struct bnx2x *bp) 4235static void bnx2x_init_eq_ring(struct bnx2x *bp)
2957{ 4236{
2958 int i; 4237 int i;
4238 for (i = 1; i <= NUM_EQ_PAGES; i++) {
4239 union event_ring_elem *elem =
4240 &bp->eq_ring[EQ_DESC_CNT_PAGE * i - 1];
2959 4241
2960 /* Rx */ 4242 elem->next_page.addr.hi =
2961 for_each_queue(bp, i) { 4243 cpu_to_le32(U64_HI(bp->eq_mapping +
2962 struct eth_context *context = bnx2x_sp(bp, context[i].eth); 4244 BCM_PAGE_SIZE * (i % NUM_EQ_PAGES)));
2963 struct bnx2x_fastpath *fp = &bp->fp[i]; 4245 elem->next_page.addr.lo =
2964 u8 cl_id = fp->cl_id; 4246 cpu_to_le32(U64_LO(bp->eq_mapping +
2965 4247 BCM_PAGE_SIZE*(i % NUM_EQ_PAGES)));
2966 context->ustorm_st_context.common.sb_index_numbers =
2967 BNX2X_RX_SB_INDEX_NUM;
2968 context->ustorm_st_context.common.clientId = cl_id;
2969 context->ustorm_st_context.common.status_block_id = fp->sb_id;
2970 context->ustorm_st_context.common.flags =
2971 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
2972 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
2973 context->ustorm_st_context.common.statistics_counter_id =
2974 cl_id;
2975 context->ustorm_st_context.common.mc_alignment_log_size =
2976 BNX2X_RX_ALIGN_SHIFT;
2977 context->ustorm_st_context.common.bd_buff_size =
2978 bp->rx_buf_size;
2979 context->ustorm_st_context.common.bd_page_base_hi =
2980 U64_HI(fp->rx_desc_mapping);
2981 context->ustorm_st_context.common.bd_page_base_lo =
2982 U64_LO(fp->rx_desc_mapping);
2983 if (!fp->disable_tpa) {
2984 context->ustorm_st_context.common.flags |=
2985 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA;
2986 context->ustorm_st_context.common.sge_buff_size =
2987 (u16)min_t(u32, SGE_PAGE_SIZE*PAGES_PER_SGE,
2988 0xffff);
2989 context->ustorm_st_context.common.sge_page_base_hi =
2990 U64_HI(fp->rx_sge_mapping);
2991 context->ustorm_st_context.common.sge_page_base_lo =
2992 U64_LO(fp->rx_sge_mapping);
2993
2994 context->ustorm_st_context.common.max_sges_for_packet =
2995 SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT;
2996 context->ustorm_st_context.common.max_sges_for_packet =
2997 ((context->ustorm_st_context.common.
2998 max_sges_for_packet + PAGES_PER_SGE - 1) &
2999 (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT;
3000 }
3001
3002 context->ustorm_ag_context.cdu_usage =
3003 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
3004 CDU_REGION_NUMBER_UCM_AG,
3005 ETH_CONNECTION_TYPE);
3006
3007 context->xstorm_ag_context.cdu_reserved =
3008 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
3009 CDU_REGION_NUMBER_XCM_AG,
3010 ETH_CONNECTION_TYPE);
3011 }
3012
3013 /* Tx */
3014 for_each_queue(bp, i) {
3015 struct bnx2x_fastpath *fp = &bp->fp[i];
3016 struct eth_context *context =
3017 bnx2x_sp(bp, context[i].eth);
3018
3019 context->cstorm_st_context.sb_index_number =
3020 C_SB_ETH_TX_CQ_INDEX;
3021 context->cstorm_st_context.status_block_id = fp->sb_id;
3022
3023 context->xstorm_st_context.tx_bd_page_base_hi =
3024 U64_HI(fp->tx_desc_mapping);
3025 context->xstorm_st_context.tx_bd_page_base_lo =
3026 U64_LO(fp->tx_desc_mapping);
3027 context->xstorm_st_context.statistics_data = (fp->cl_id |
3028 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
3029 } 4248 }
4249 bp->eq_cons = 0;
4250 bp->eq_prod = NUM_EQ_DESC;
4251 bp->eq_cons_sb = BNX2X_EQ_INDEX;
4252 /* we want a warning message before it gets rought... */
4253 atomic_set(&bp->eq_spq_left,
4254 min_t(int, MAX_SP_DESC_CNT - MAX_SPQ_PENDING, NUM_EQ_DESC) - 1);
3030} 4255}
3031 4256
3032static void bnx2x_init_ind_table(struct bnx2x *bp) 4257void bnx2x_push_indir_table(struct bnx2x *bp)
3033{ 4258{
3034 int func = BP_FUNC(bp); 4259 int func = BP_FUNC(bp);
3035 int i; 4260 int i;
@@ -3037,55 +4262,29 @@ static void bnx2x_init_ind_table(struct bnx2x *bp)
3037 if (bp->multi_mode == ETH_RSS_MODE_DISABLED) 4262 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
3038 return; 4263 return;
3039 4264
3040 DP(NETIF_MSG_IFUP,
3041 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
3042 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++) 4265 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
3043 REG_WR8(bp, BAR_TSTRORM_INTMEM + 4266 REG_WR8(bp, BAR_TSTRORM_INTMEM +
3044 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i, 4267 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
3045 bp->fp->cl_id + (i % bp->num_queues)); 4268 bp->fp->cl_id + bp->rx_indir_table[i]);
3046} 4269}
3047 4270
3048void bnx2x_set_client_config(struct bnx2x *bp) 4271static void bnx2x_init_ind_table(struct bnx2x *bp)
3049{ 4272{
3050 struct tstorm_eth_client_config tstorm_client = {0};
3051 int port = BP_PORT(bp);
3052 int i; 4273 int i;
3053 4274
3054 tstorm_client.mtu = bp->dev->mtu; 4275 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
3055 tstorm_client.config_flags = 4276 bp->rx_indir_table[i] = i % BNX2X_NUM_ETH_QUEUES(bp);
3056 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
3057 TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
3058#ifdef BCM_VLAN
3059 if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
3060 tstorm_client.config_flags |=
3061 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
3062 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
3063 }
3064#endif
3065
3066 for_each_queue(bp, i) {
3067 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
3068
3069 REG_WR(bp, BAR_TSTRORM_INTMEM +
3070 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
3071 ((u32 *)&tstorm_client)[0]);
3072 REG_WR(bp, BAR_TSTRORM_INTMEM +
3073 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
3074 ((u32 *)&tstorm_client)[1]);
3075 }
3076 4277
3077 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n", 4278 bnx2x_push_indir_table(bp);
3078 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
3079} 4279}
3080 4280
3081void bnx2x_set_storm_rx_mode(struct bnx2x *bp) 4281void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
3082{ 4282{
3083 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
3084 int mode = bp->rx_mode; 4283 int mode = bp->rx_mode;
3085 int mask = bp->rx_mode_cl_mask;
3086 int func = BP_FUNC(bp);
3087 int port = BP_PORT(bp); 4284 int port = BP_PORT(bp);
3088 int i; 4285 u16 cl_id;
4286 u32 def_q_filters = 0;
4287
3089 /* All but management unicast packets should pass to the host as well */ 4288 /* All but management unicast packets should pass to the host as well */
3090 u32 llh_mask = 4289 u32 llh_mask =
3091 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST | 4290 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
@@ -3093,28 +4292,58 @@ void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
3093 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN | 4292 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
3094 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN; 4293 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
3095 4294
3096 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
3097
3098 switch (mode) { 4295 switch (mode) {
3099 case BNX2X_RX_MODE_NONE: /* no Rx */ 4296 case BNX2X_RX_MODE_NONE: /* no Rx */
3100 tstorm_mac_filter.ucast_drop_all = mask; 4297 def_q_filters = BNX2X_ACCEPT_NONE;
3101 tstorm_mac_filter.mcast_drop_all = mask; 4298#ifdef BCM_CNIC
3102 tstorm_mac_filter.bcast_drop_all = mask; 4299 if (!NO_FCOE(bp)) {
4300 cl_id = bnx2x_fcoe(bp, cl_id);
4301 bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_NONE);
4302 }
4303#endif
3103 break; 4304 break;
3104 4305
3105 case BNX2X_RX_MODE_NORMAL: 4306 case BNX2X_RX_MODE_NORMAL:
3106 tstorm_mac_filter.bcast_accept_all = mask; 4307 def_q_filters |= BNX2X_ACCEPT_UNICAST | BNX2X_ACCEPT_BROADCAST |
4308 BNX2X_ACCEPT_MULTICAST;
4309#ifdef BCM_CNIC
4310 if (!NO_FCOE(bp)) {
4311 cl_id = bnx2x_fcoe(bp, cl_id);
4312 bnx2x_rxq_set_mac_filters(bp, cl_id,
4313 BNX2X_ACCEPT_UNICAST |
4314 BNX2X_ACCEPT_MULTICAST);
4315 }
4316#endif
3107 break; 4317 break;
3108 4318
3109 case BNX2X_RX_MODE_ALLMULTI: 4319 case BNX2X_RX_MODE_ALLMULTI:
3110 tstorm_mac_filter.mcast_accept_all = mask; 4320 def_q_filters |= BNX2X_ACCEPT_UNICAST | BNX2X_ACCEPT_BROADCAST |
3111 tstorm_mac_filter.bcast_accept_all = mask; 4321 BNX2X_ACCEPT_ALL_MULTICAST;
4322#ifdef BCM_CNIC
4323 /*
4324 * Prevent duplication of multicast packets by configuring FCoE
4325 * L2 Client to receive only matched unicast frames.
4326 */
4327 if (!NO_FCOE(bp)) {
4328 cl_id = bnx2x_fcoe(bp, cl_id);
4329 bnx2x_rxq_set_mac_filters(bp, cl_id,
4330 BNX2X_ACCEPT_UNICAST);
4331 }
4332#endif
3112 break; 4333 break;
3113 4334
3114 case BNX2X_RX_MODE_PROMISC: 4335 case BNX2X_RX_MODE_PROMISC:
3115 tstorm_mac_filter.ucast_accept_all = mask; 4336 def_q_filters |= BNX2X_PROMISCUOUS_MODE;
3116 tstorm_mac_filter.mcast_accept_all = mask; 4337#ifdef BCM_CNIC
3117 tstorm_mac_filter.bcast_accept_all = mask; 4338 /*
4339 * Prevent packets duplication by configuring DROP_ALL for FCoE
4340 * L2 Client.
4341 */
4342 if (!NO_FCOE(bp)) {
4343 cl_id = bnx2x_fcoe(bp, cl_id);
4344 bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_NONE);
4345 }
4346#endif
3118 /* pass management unicast packets as well */ 4347 /* pass management unicast packets as well */
3119 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST; 4348 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
3120 break; 4349 break;
@@ -3124,263 +4353,79 @@ void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
3124 break; 4353 break;
3125 } 4354 }
3126 4355
3127 REG_WR(bp, 4356 cl_id = BP_L_ID(bp);
3128 (port ? NIG_REG_LLH1_BRB1_DRV_MASK : NIG_REG_LLH0_BRB1_DRV_MASK), 4357 bnx2x_rxq_set_mac_filters(bp, cl_id, def_q_filters);
3129 llh_mask);
3130 4358
3131 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) { 4359 REG_WR(bp,
3132 REG_WR(bp, BAR_TSTRORM_INTMEM + 4360 (port ? NIG_REG_LLH1_BRB1_DRV_MASK :
3133 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4, 4361 NIG_REG_LLH0_BRB1_DRV_MASK), llh_mask);
3134 ((u32 *)&tstorm_mac_filter)[i]);
3135 4362
3136/* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i, 4363 DP(NETIF_MSG_IFUP, "rx mode %d\n"
3137 ((u32 *)&tstorm_mac_filter)[i]); */ 4364 "drop_ucast 0x%x\ndrop_mcast 0x%x\ndrop_bcast 0x%x\n"
3138 } 4365 "accp_ucast 0x%x\naccp_mcast 0x%x\naccp_bcast 0x%x\n"
4366 "unmatched_ucast 0x%x\n", mode,
4367 bp->mac_filters.ucast_drop_all,
4368 bp->mac_filters.mcast_drop_all,
4369 bp->mac_filters.bcast_drop_all,
4370 bp->mac_filters.ucast_accept_all,
4371 bp->mac_filters.mcast_accept_all,
4372 bp->mac_filters.bcast_accept_all,
4373 bp->mac_filters.unmatched_unicast
4374 );
3139 4375
3140 if (mode != BNX2X_RX_MODE_NONE) 4376 storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp));
3141 bnx2x_set_client_config(bp);
3142} 4377}
3143 4378
3144static void bnx2x_init_internal_common(struct bnx2x *bp) 4379static void bnx2x_init_internal_common(struct bnx2x *bp)
3145{ 4380{
3146 int i; 4381 int i;
3147 4382
3148 /* Zero this manually as its initialization is 4383 if (!CHIP_IS_E1(bp)) {
3149 currently missing in the initTool */
3150 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
3151 REG_WR(bp, BAR_USTRORM_INTMEM +
3152 USTORM_AGG_DATA_OFFSET + i * 4, 0);
3153}
3154
3155static void bnx2x_init_internal_port(struct bnx2x *bp)
3156{
3157 int port = BP_PORT(bp);
3158
3159 REG_WR(bp,
3160 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_U_OFFSET(port), BNX2X_BTR);
3161 REG_WR(bp,
3162 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_C_OFFSET(port), BNX2X_BTR);
3163 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
3164 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
3165}
3166
3167static void bnx2x_init_internal_func(struct bnx2x *bp)
3168{
3169 struct tstorm_eth_function_common_config tstorm_config = {0};
3170 struct stats_indication_flags stats_flags = {0};
3171 int port = BP_PORT(bp);
3172 int func = BP_FUNC(bp);
3173 int i, j;
3174 u32 offset;
3175 u16 max_agg_size;
3176
3177 tstorm_config.config_flags = RSS_FLAGS(bp);
3178
3179 if (is_multi(bp))
3180 tstorm_config.rss_result_mask = MULTI_MASK;
3181
3182 /* Enable TPA if needed */
3183 if (bp->flags & TPA_ENABLE_FLAG)
3184 tstorm_config.config_flags |=
3185 TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
3186
3187 if (IS_E1HMF(bp))
3188 tstorm_config.config_flags |=
3189 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
3190
3191 tstorm_config.leading_client_id = BP_L_ID(bp);
3192
3193 REG_WR(bp, BAR_TSTRORM_INTMEM +
3194 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
3195 (*(u32 *)&tstorm_config));
3196
3197 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
3198 bp->rx_mode_cl_mask = (1 << BP_L_ID(bp));
3199 bnx2x_set_storm_rx_mode(bp);
3200 4384
3201 for_each_queue(bp, i) { 4385 /* xstorm needs to know whether to add ovlan to packets or not,
3202 u8 cl_id = bp->fp[i].cl_id; 4386 * in switch-independent we'll write 0 to here... */
3203
3204 /* reset xstorm per client statistics */
3205 offset = BAR_XSTRORM_INTMEM +
3206 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
3207 for (j = 0;
3208 j < sizeof(struct xstorm_per_client_stats) / 4; j++)
3209 REG_WR(bp, offset + j*4, 0);
3210
3211 /* reset tstorm per client statistics */
3212 offset = BAR_TSTRORM_INTMEM +
3213 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
3214 for (j = 0;
3215 j < sizeof(struct tstorm_per_client_stats) / 4; j++)
3216 REG_WR(bp, offset + j*4, 0);
3217
3218 /* reset ustorm per client statistics */
3219 offset = BAR_USTRORM_INTMEM +
3220 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
3221 for (j = 0;
3222 j < sizeof(struct ustorm_per_client_stats) / 4; j++)
3223 REG_WR(bp, offset + j*4, 0);
3224 }
3225
3226 /* Init statistics related context */
3227 stats_flags.collect_eth = 1;
3228
3229 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
3230 ((u32 *)&stats_flags)[0]);
3231 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
3232 ((u32 *)&stats_flags)[1]);
3233
3234 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
3235 ((u32 *)&stats_flags)[0]);
3236 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
3237 ((u32 *)&stats_flags)[1]);
3238
3239 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
3240 ((u32 *)&stats_flags)[0]);
3241 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
3242 ((u32 *)&stats_flags)[1]);
3243
3244 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
3245 ((u32 *)&stats_flags)[0]);
3246 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
3247 ((u32 *)&stats_flags)[1]);
3248
3249 REG_WR(bp, BAR_XSTRORM_INTMEM +
3250 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
3251 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
3252 REG_WR(bp, BAR_XSTRORM_INTMEM +
3253 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
3254 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
3255
3256 REG_WR(bp, BAR_TSTRORM_INTMEM +
3257 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
3258 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
3259 REG_WR(bp, BAR_TSTRORM_INTMEM +
3260 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
3261 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
3262
3263 REG_WR(bp, BAR_USTRORM_INTMEM +
3264 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
3265 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
3266 REG_WR(bp, BAR_USTRORM_INTMEM +
3267 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
3268 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
3269
3270 if (CHIP_IS_E1H(bp)) {
3271 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET, 4387 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
3272 IS_E1HMF(bp)); 4388 bp->mf_mode);
3273 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET, 4389 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
3274 IS_E1HMF(bp)); 4390 bp->mf_mode);
3275 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET, 4391 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
3276 IS_E1HMF(bp)); 4392 bp->mf_mode);
3277 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET, 4393 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
3278 IS_E1HMF(bp)); 4394 bp->mf_mode);
3279
3280 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
3281 bp->e1hov);
3282 } 4395 }
3283 4396
3284 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */ 4397 if (IS_MF_SI(bp))
3285 max_agg_size = min_t(u32, (min_t(u32, 8, MAX_SKB_FRAGS) * 4398 /*
3286 SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff); 4399 * In switch independent mode, the TSTORM needs to accept
3287 for_each_queue(bp, i) { 4400 * packets that failed classification, since approximate match
3288 struct bnx2x_fastpath *fp = &bp->fp[i]; 4401 * mac addresses aren't written to NIG LLH
3289 4402 */
3290 REG_WR(bp, BAR_USTRORM_INTMEM + 4403 REG_WR8(bp, BAR_TSTRORM_INTMEM +
3291 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id), 4404 TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET, 2);
3292 U64_LO(fp->rx_comp_mapping));
3293 REG_WR(bp, BAR_USTRORM_INTMEM +
3294 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
3295 U64_HI(fp->rx_comp_mapping));
3296 4405
3297 /* Next page */ 4406 /* Zero this manually as its initialization is
3298 REG_WR(bp, BAR_USTRORM_INTMEM + 4407 currently missing in the initTool */
3299 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id), 4408 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
3300 U64_LO(fp->rx_comp_mapping + BCM_PAGE_SIZE));
3301 REG_WR(bp, BAR_USTRORM_INTMEM + 4409 REG_WR(bp, BAR_USTRORM_INTMEM +
3302 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id) + 4, 4410 USTORM_AGG_DATA_OFFSET + i * 4, 0);
3303 U64_HI(fp->rx_comp_mapping + BCM_PAGE_SIZE)); 4411 if (CHIP_IS_E2(bp)) {
3304 4412 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_IGU_MODE_OFFSET,
3305 REG_WR16(bp, BAR_USTRORM_INTMEM + 4413 CHIP_INT_MODE_IS_BC(bp) ?
3306 USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id), 4414 HC_IGU_BC_MODE : HC_IGU_NBC_MODE);
3307 max_agg_size);
3308 }
3309
3310 /* dropless flow control */
3311 if (CHIP_IS_E1H(bp)) {
3312 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
3313
3314 rx_pause.bd_thr_low = 250;
3315 rx_pause.cqe_thr_low = 250;
3316 rx_pause.cos = 1;
3317 rx_pause.sge_thr_low = 0;
3318 rx_pause.bd_thr_high = 350;
3319 rx_pause.cqe_thr_high = 350;
3320 rx_pause.sge_thr_high = 0;
3321
3322 for_each_queue(bp, i) {
3323 struct bnx2x_fastpath *fp = &bp->fp[i];
3324
3325 if (!fp->disable_tpa) {
3326 rx_pause.sge_thr_low = 150;
3327 rx_pause.sge_thr_high = 250;
3328 }
3329
3330
3331 offset = BAR_USTRORM_INTMEM +
3332 USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
3333 fp->cl_id);
3334 for (j = 0;
3335 j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
3336 j++)
3337 REG_WR(bp, offset + j*4,
3338 ((u32 *)&rx_pause)[j]);
3339 }
3340 }
3341
3342 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
3343
3344 /* Init rate shaping and fairness contexts */
3345 if (IS_E1HMF(bp)) {
3346 int vn;
3347
3348 /* During init there is no active link
3349 Until link is up, set link rate to 10Gbps */
3350 bp->link_vars.line_speed = SPEED_10000;
3351 bnx2x_init_port_minmax(bp);
3352
3353 if (!BP_NOMCP(bp))
3354 bp->mf_config =
3355 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
3356 bnx2x_calc_vn_weight_sum(bp);
3357
3358 for (vn = VN_0; vn < E1HVN_MAX; vn++)
3359 bnx2x_init_vn_minmax(bp, 2*vn + port);
3360
3361 /* Enable rate shaping and fairness */
3362 bp->cmng.flags.cmng_enables |=
3363 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
3364
3365 } else {
3366 /* rate shaping and fairness are disabled */
3367 DP(NETIF_MSG_IFUP,
3368 "single function mode minmax will be disabled\n");
3369 } 4415 }
4416}
3370 4417
3371 4418static void bnx2x_init_internal_port(struct bnx2x *bp)
3372 /* Store cmng structures to internal memory */ 4419{
3373 if (bp->port.pmf) 4420 /* port */
3374 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++) 4421 bnx2x_dcb_init_intmem_pfc(bp);
3375 REG_WR(bp, BAR_XSTRORM_INTMEM +
3376 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
3377 ((u32 *)(&bp->cmng))[i]);
3378} 4422}
3379 4423
3380static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code) 4424static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
3381{ 4425{
3382 switch (load_code) { 4426 switch (load_code) {
3383 case FW_MSG_CODE_DRV_LOAD_COMMON: 4427 case FW_MSG_CODE_DRV_LOAD_COMMON:
4428 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
3384 bnx2x_init_internal_common(bp); 4429 bnx2x_init_internal_common(bp);
3385 /* no break */ 4430 /* no break */
3386 4431
@@ -3389,7 +4434,8 @@ static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
3389 /* no break */ 4434 /* no break */
3390 4435
3391 case FW_MSG_CODE_DRV_LOAD_FUNCTION: 4436 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
3392 bnx2x_init_internal_func(bp); 4437 /* internal memory per function is
4438 initialized inside bnx2x_pf_init */
3393 break; 4439 break;
3394 4440
3395 default: 4441 default:
@@ -3398,43 +4444,65 @@ static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
3398 } 4444 }
3399} 4445}
3400 4446
4447static void bnx2x_init_fp_sb(struct bnx2x *bp, int fp_idx)
4448{
4449 struct bnx2x_fastpath *fp = &bp->fp[fp_idx];
4450
4451 fp->state = BNX2X_FP_STATE_CLOSED;
4452
4453 fp->cid = fp_idx;
4454 fp->cl_id = BP_L_ID(bp) + fp_idx;
4455 fp->fw_sb_id = bp->base_fw_ndsb + fp->cl_id + CNIC_CONTEXT_USE;
4456 fp->igu_sb_id = bp->igu_base_sb + fp_idx + CNIC_CONTEXT_USE;
4457 /* qZone id equals to FW (per path) client id */
4458 fp->cl_qzone_id = fp->cl_id +
4459 BP_PORT(bp)*(CHIP_IS_E2(bp) ? ETH_MAX_RX_CLIENTS_E2 :
4460 ETH_MAX_RX_CLIENTS_E1H);
4461 /* init shortcut */
4462 fp->ustorm_rx_prods_offset = CHIP_IS_E2(bp) ?
4463 USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id) :
4464 USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), fp->cl_id);
4465 /* Setup SB indicies */
4466 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
4467 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4468
4469 DP(NETIF_MSG_IFUP, "queue[%d]: bnx2x_init_sb(%p,%p) "
4470 "cl_id %d fw_sb %d igu_sb %d\n",
4471 fp_idx, bp, fp->status_blk.e1x_sb, fp->cl_id, fp->fw_sb_id,
4472 fp->igu_sb_id);
4473 bnx2x_init_sb(bp, fp->status_blk_mapping, BNX2X_VF_ID_INVALID, false,
4474 fp->fw_sb_id, fp->igu_sb_id);
4475
4476 bnx2x_update_fpsb_idx(fp);
4477}
4478
3401void bnx2x_nic_init(struct bnx2x *bp, u32 load_code) 4479void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
3402{ 4480{
3403 int i; 4481 int i;
3404 4482
3405 for_each_queue(bp, i) { 4483 for_each_eth_queue(bp, i)
3406 struct bnx2x_fastpath *fp = &bp->fp[i]; 4484 bnx2x_init_fp_sb(bp, i);
3407
3408 fp->bp = bp;
3409 fp->state = BNX2X_FP_STATE_CLOSED;
3410 fp->index = i;
3411 fp->cl_id = BP_L_ID(bp) + i;
3412#ifdef BCM_CNIC 4485#ifdef BCM_CNIC
3413 fp->sb_id = fp->cl_id + 1; 4486 if (!NO_FCOE(bp))
3414#else 4487 bnx2x_init_fcoe_fp(bp);
3415 fp->sb_id = fp->cl_id; 4488
4489 bnx2x_init_sb(bp, bp->cnic_sb_mapping,
4490 BNX2X_VF_ID_INVALID, false,
4491 CNIC_SB_ID(bp), CNIC_IGU_SB_ID(bp));
4492
3416#endif 4493#endif
3417 DP(NETIF_MSG_IFUP,
3418 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d sb %d\n",
3419 i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
3420 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
3421 fp->sb_id);
3422 bnx2x_update_fpsb_idx(fp);
3423 }
3424 4494
3425 /* ensure status block indices were read */ 4495 /* ensure status block indices were read */
3426 rmb(); 4496 rmb();
3427 4497
3428 4498 bnx2x_init_def_sb(bp);
3429 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
3430 DEF_SB_ID);
3431 bnx2x_update_dsb_idx(bp); 4499 bnx2x_update_dsb_idx(bp);
3432 bnx2x_update_coalesce(bp);
3433 bnx2x_init_rx_rings(bp); 4500 bnx2x_init_rx_rings(bp);
3434 bnx2x_init_tx_ring(bp); 4501 bnx2x_init_tx_rings(bp);
3435 bnx2x_init_sp_ring(bp); 4502 bnx2x_init_sp_ring(bp);
3436 bnx2x_init_context(bp); 4503 bnx2x_init_eq_ring(bp);
3437 bnx2x_init_internal(bp, load_code); 4504 bnx2x_init_internal(bp, load_code);
4505 bnx2x_pf_init(bp);
3438 bnx2x_init_ind_table(bp); 4506 bnx2x_init_ind_table(bp);
3439 bnx2x_stats_init(bp); 4507 bnx2x_stats_init(bp);
3440 4508
@@ -3470,8 +4538,7 @@ static int bnx2x_gunzip_init(struct bnx2x *bp)
3470 if (bp->strm == NULL) 4538 if (bp->strm == NULL)
3471 goto gunzip_nomem2; 4539 goto gunzip_nomem2;
3472 4540
3473 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(), 4541 bp->strm->workspace = vmalloc(zlib_inflate_workspacesize());
3474 GFP_KERNEL);
3475 if (bp->strm->workspace == NULL) 4542 if (bp->strm->workspace == NULL)
3476 goto gunzip_nomem3; 4543 goto gunzip_nomem3;
3477 4544
@@ -3494,10 +4561,11 @@ gunzip_nomem1:
3494 4561
3495static void bnx2x_gunzip_end(struct bnx2x *bp) 4562static void bnx2x_gunzip_end(struct bnx2x *bp)
3496{ 4563{
3497 kfree(bp->strm->workspace); 4564 if (bp->strm) {
3498 4565 vfree(bp->strm->workspace);
3499 kfree(bp->strm); 4566 kfree(bp->strm);
3500 bp->strm = NULL; 4567 bp->strm = NULL;
4568 }
3501 4569
3502 if (bp->gunzip_buf) { 4570 if (bp->gunzip_buf) {
3503 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf, 4571 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
@@ -3593,8 +4661,6 @@ static int bnx2x_int_mem_test(struct bnx2x *bp)
3593 else 4661 else
3594 factor = 1; 4662 factor = 1;
3595 4663
3596 DP(NETIF_MSG_HW, "start part1\n");
3597
3598 /* Disable inputs of parser neighbor blocks */ 4664 /* Disable inputs of parser neighbor blocks */
3599 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0); 4665 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
3600 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0); 4666 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
@@ -3728,12 +4794,22 @@ static int bnx2x_int_mem_test(struct bnx2x *bp)
3728 return 0; /* OK */ 4794 return 0; /* OK */
3729} 4795}
3730 4796
3731static void enable_blocks_attention(struct bnx2x *bp) 4797static void bnx2x_enable_blocks_attention(struct bnx2x *bp)
3732{ 4798{
3733 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0); 4799 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
3734 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0); 4800 if (CHIP_IS_E2(bp))
4801 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0x40);
4802 else
4803 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
3735 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0); 4804 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
3736 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0); 4805 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
4806 /*
4807 * mask read length error interrupts in brb for parser
4808 * (parsing unit and 'checksum and crc' unit)
4809 * these errors are legal (PU reads fixed length and CAC can cause
4810 * read length error on truncated packets)
4811 */
4812 REG_WR(bp, BRB1_REG_BRB1_INT_MASK, 0xFC00);
3737 REG_WR(bp, QM_REG_QM_INT_MASK, 0); 4813 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
3738 REG_WR(bp, TM_REG_TM_INT_MASK, 0); 4814 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
3739 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0); 4815 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
@@ -3752,8 +4828,16 @@ static void enable_blocks_attention(struct bnx2x *bp)
3752 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0); 4828 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
3753/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */ 4829/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
3754/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */ 4830/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
4831
3755 if (CHIP_REV_IS_FPGA(bp)) 4832 if (CHIP_REV_IS_FPGA(bp))
3756 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000); 4833 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
4834 else if (CHIP_IS_E2(bp))
4835 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0,
4836 (PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF
4837 | PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT
4838 | PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN
4839 | PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED
4840 | PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED));
3757 else 4841 else
3758 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000); 4842 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
3759 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0); 4843 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
@@ -3764,54 +4848,9 @@ static void enable_blocks_attention(struct bnx2x *bp)
3764 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0); 4848 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
3765 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0); 4849 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
3766/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */ 4850/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
3767 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */ 4851 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0x18); /* bit 3,4 masked */
3768} 4852}
3769 4853
3770static const struct {
3771 u32 addr;
3772 u32 mask;
3773} bnx2x_parity_mask[] = {
3774 {PXP_REG_PXP_PRTY_MASK, 0xffffffff},
3775 {PXP2_REG_PXP2_PRTY_MASK_0, 0xffffffff},
3776 {PXP2_REG_PXP2_PRTY_MASK_1, 0xffffffff},
3777 {HC_REG_HC_PRTY_MASK, 0xffffffff},
3778 {MISC_REG_MISC_PRTY_MASK, 0xffffffff},
3779 {QM_REG_QM_PRTY_MASK, 0x0},
3780 {DORQ_REG_DORQ_PRTY_MASK, 0x0},
3781 {GRCBASE_UPB + PB_REG_PB_PRTY_MASK, 0x0},
3782 {GRCBASE_XPB + PB_REG_PB_PRTY_MASK, 0x0},
3783 {SRC_REG_SRC_PRTY_MASK, 0x4}, /* bit 2 */
3784 {CDU_REG_CDU_PRTY_MASK, 0x0},
3785 {CFC_REG_CFC_PRTY_MASK, 0x0},
3786 {DBG_REG_DBG_PRTY_MASK, 0x0},
3787 {DMAE_REG_DMAE_PRTY_MASK, 0x0},
3788 {BRB1_REG_BRB1_PRTY_MASK, 0x0},
3789 {PRS_REG_PRS_PRTY_MASK, (1<<6)},/* bit 6 */
3790 {TSDM_REG_TSDM_PRTY_MASK, 0x18},/* bit 3,4 */
3791 {CSDM_REG_CSDM_PRTY_MASK, 0x8}, /* bit 3 */
3792 {USDM_REG_USDM_PRTY_MASK, 0x38},/* bit 3,4,5 */
3793 {XSDM_REG_XSDM_PRTY_MASK, 0x8}, /* bit 3 */
3794 {TSEM_REG_TSEM_PRTY_MASK_0, 0x0},
3795 {TSEM_REG_TSEM_PRTY_MASK_1, 0x0},
3796 {USEM_REG_USEM_PRTY_MASK_0, 0x0},
3797 {USEM_REG_USEM_PRTY_MASK_1, 0x0},
3798 {CSEM_REG_CSEM_PRTY_MASK_0, 0x0},
3799 {CSEM_REG_CSEM_PRTY_MASK_1, 0x0},
3800 {XSEM_REG_XSEM_PRTY_MASK_0, 0x0},
3801 {XSEM_REG_XSEM_PRTY_MASK_1, 0x0}
3802};
3803
3804static void enable_blocks_parity(struct bnx2x *bp)
3805{
3806 int i, mask_arr_len =
3807 sizeof(bnx2x_parity_mask)/(sizeof(bnx2x_parity_mask[0]));
3808
3809 for (i = 0; i < mask_arr_len; i++)
3810 REG_WR(bp, bnx2x_parity_mask[i].addr,
3811 bnx2x_parity_mask[i].mask);
3812}
3813
3814
3815static void bnx2x_reset_common(struct bnx2x *bp) 4854static void bnx2x_reset_common(struct bnx2x *bp)
3816{ 4855{
3817 /* reset_common */ 4856 /* reset_common */
@@ -3862,17 +4901,12 @@ static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
3862 */ 4901 */
3863 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE) 4902 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
3864 for (port = PORT_0; port < PORT_MAX; port++) { 4903 for (port = PORT_0; port < PORT_MAX; port++) {
3865 u32 phy_type =
3866 SHMEM_RD(bp, dev_info.port_hw_config[port].
3867 external_phy_config) &
3868 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
3869 is_required |= 4904 is_required |=
3870 ((phy_type == 4905 bnx2x_fan_failure_det_req(
3871 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) || 4906 bp,
3872 (phy_type == 4907 bp->common.shmem_base,
3873 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) || 4908 bp->common.shmem2_base,
3874 (phy_type == 4909 port);
3875 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481));
3876 } 4910 }
3877 4911
3878 DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required); 4912 DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
@@ -3896,26 +4930,97 @@ static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
3896 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val); 4930 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
3897} 4931}
3898 4932
3899static int bnx2x_init_common(struct bnx2x *bp) 4933static void bnx2x_pretend_func(struct bnx2x *bp, u8 pretend_func_num)
4934{
4935 u32 offset = 0;
4936
4937 if (CHIP_IS_E1(bp))
4938 return;
4939 if (CHIP_IS_E1H(bp) && (pretend_func_num >= E1H_FUNC_MAX))
4940 return;
4941
4942 switch (BP_ABS_FUNC(bp)) {
4943 case 0:
4944 offset = PXP2_REG_PGL_PRETEND_FUNC_F0;
4945 break;
4946 case 1:
4947 offset = PXP2_REG_PGL_PRETEND_FUNC_F1;
4948 break;
4949 case 2:
4950 offset = PXP2_REG_PGL_PRETEND_FUNC_F2;
4951 break;
4952 case 3:
4953 offset = PXP2_REG_PGL_PRETEND_FUNC_F3;
4954 break;
4955 case 4:
4956 offset = PXP2_REG_PGL_PRETEND_FUNC_F4;
4957 break;
4958 case 5:
4959 offset = PXP2_REG_PGL_PRETEND_FUNC_F5;
4960 break;
4961 case 6:
4962 offset = PXP2_REG_PGL_PRETEND_FUNC_F6;
4963 break;
4964 case 7:
4965 offset = PXP2_REG_PGL_PRETEND_FUNC_F7;
4966 break;
4967 default:
4968 return;
4969 }
4970
4971 REG_WR(bp, offset, pretend_func_num);
4972 REG_RD(bp, offset);
4973 DP(NETIF_MSG_HW, "Pretending to func %d\n", pretend_func_num);
4974}
4975
4976static void bnx2x_pf_disable(struct bnx2x *bp)
4977{
4978 u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
4979 val &= ~IGU_PF_CONF_FUNC_EN;
4980
4981 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
4982 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
4983 REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 0);
4984}
4985
4986static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code)
3900{ 4987{
3901 u32 val, i; 4988 u32 val, i;
3902#ifdef BCM_CNIC
3903 u32 wb_write[2];
3904#endif
3905 4989
3906 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp)); 4990 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_ABS_FUNC(bp));
3907 4991
3908 bnx2x_reset_common(bp); 4992 bnx2x_reset_common(bp);
3909 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff); 4993 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
3910 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc); 4994 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
3911 4995
3912 bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE); 4996 bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
3913 if (CHIP_IS_E1H(bp)) 4997 if (!CHIP_IS_E1(bp))
3914 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp)); 4998 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_MF(bp));
3915 4999
3916 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100); 5000 if (CHIP_IS_E2(bp)) {
3917 msleep(30); 5001 u8 fid;
3918 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0); 5002
5003 /**
5004 * 4-port mode or 2-port mode we need to turn of master-enable
5005 * for everyone, after that, turn it back on for self.
5006 * so, we disregard multi-function or not, and always disable
5007 * for all functions on the given path, this means 0,2,4,6 for
5008 * path 0 and 1,3,5,7 for path 1
5009 */
5010 for (fid = BP_PATH(bp); fid < E2_FUNC_MAX*2; fid += 2) {
5011 if (fid == BP_ABS_FUNC(bp)) {
5012 REG_WR(bp,
5013 PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER,
5014 1);
5015 continue;
5016 }
5017
5018 bnx2x_pretend_func(bp, fid);
5019 /* clear pf enable */
5020 bnx2x_pf_disable(bp);
5021 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
5022 }
5023 }
3919 5024
3920 bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE); 5025 bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
3921 if (CHIP_IS_E1(bp)) { 5026 if (CHIP_IS_E1(bp)) {
@@ -3943,12 +5048,7 @@ static int bnx2x_init_common(struct bnx2x *bp)
3943 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1); 5048 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
3944#endif 5049#endif
3945 5050
3946 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2); 5051 bnx2x_ilt_init_page_size(bp, INITOP_SET);
3947#ifdef BCM_CNIC
3948 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
3949 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
3950 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
3951#endif
3952 5052
3953 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp)) 5053 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
3954 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1); 5054 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
@@ -3967,9 +5067,65 @@ static int bnx2x_init_common(struct bnx2x *bp)
3967 return -EBUSY; 5067 return -EBUSY;
3968 } 5068 }
3969 5069
5070 /* Timers bug workaround E2 only. We need to set the entire ILT to
5071 * have entries with value "0" and valid bit on.
5072 * This needs to be done by the first PF that is loaded in a path
5073 * (i.e. common phase)
5074 */
5075 if (CHIP_IS_E2(bp)) {
5076 struct ilt_client_info ilt_cli;
5077 struct bnx2x_ilt ilt;
5078 memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
5079 memset(&ilt, 0, sizeof(struct bnx2x_ilt));
5080
5081 /* initialize dummy TM client */
5082 ilt_cli.start = 0;
5083 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
5084 ilt_cli.client_num = ILT_CLIENT_TM;
5085
5086 /* Step 1: set zeroes to all ilt page entries with valid bit on
5087 * Step 2: set the timers first/last ilt entry to point
5088 * to the entire range to prevent ILT range error for 3rd/4th
5089 * vnic (this code assumes existence of the vnic)
5090 *
5091 * both steps performed by call to bnx2x_ilt_client_init_op()
5092 * with dummy TM client
5093 *
5094 * we must use pretend since PXP2_REG_RQ_##blk##_FIRST_ILT
5095 * and his brother are split registers
5096 */
5097 bnx2x_pretend_func(bp, (BP_PATH(bp) + 6));
5098 bnx2x_ilt_client_init_op_ilt(bp, &ilt, &ilt_cli, INITOP_CLEAR);
5099 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
5100
5101 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN, BNX2X_PXP_DRAM_ALIGN);
5102 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_RD, BNX2X_PXP_DRAM_ALIGN);
5103 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_SEL, 1);
5104 }
5105
5106
3970 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0); 5107 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
3971 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0); 5108 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
3972 5109
5110 if (CHIP_IS_E2(bp)) {
5111 int factor = CHIP_REV_IS_EMUL(bp) ? 1000 :
5112 (CHIP_REV_IS_FPGA(bp) ? 400 : 0);
5113 bnx2x_init_block(bp, PGLUE_B_BLOCK, COMMON_STAGE);
5114
5115 bnx2x_init_block(bp, ATC_BLOCK, COMMON_STAGE);
5116
5117 /* let the HW do it's magic ... */
5118 do {
5119 msleep(200);
5120 val = REG_RD(bp, ATC_REG_ATC_INIT_DONE);
5121 } while (factor-- && (val != 1));
5122
5123 if (val != 1) {
5124 BNX2X_ERR("ATC_INIT failed\n");
5125 return -EBUSY;
5126 }
5127 }
5128
3973 bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE); 5129 bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
3974 5130
3975 /* clean the DMAE memory */ 5131 /* clean the DMAE memory */
@@ -3988,20 +5144,12 @@ static int bnx2x_init_common(struct bnx2x *bp)
3988 5144
3989 bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE); 5145 bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
3990 5146
3991#ifdef BCM_CNIC 5147 if (CHIP_MODE_IS_4_PORT(bp))
3992 wb_write[0] = 0; 5148 bnx2x_init_block(bp, QM_4PORT_BLOCK, COMMON_STAGE);
3993 wb_write[1] = 0; 5149
3994 for (i = 0; i < 64; i++) { 5150 /* QM queues pointers table */
3995 REG_WR(bp, QM_REG_BASEADDR + i*4, 1024 * 4 * (i%16)); 5151 bnx2x_qm_init_ptr_table(bp, bp->qm_cid_count, INITOP_SET);
3996 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL + i*8, wb_write, 2);
3997 5152
3998 if (CHIP_IS_E1H(bp)) {
3999 REG_WR(bp, QM_REG_BASEADDR_EXT_A + i*4, 1024*4*(i%16));
4000 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL_EXT_A + i*8,
4001 wb_write, 2);
4002 }
4003 }
4004#endif
4005 /* soft reset pulse */ 5153 /* soft reset pulse */
4006 REG_WR(bp, QM_REG_SOFT_RESET, 1); 5154 REG_WR(bp, QM_REG_SOFT_RESET, 1);
4007 REG_WR(bp, QM_REG_SOFT_RESET, 0); 5155 REG_WR(bp, QM_REG_SOFT_RESET, 0);
@@ -4011,21 +5159,35 @@ static int bnx2x_init_common(struct bnx2x *bp)
4011#endif 5159#endif
4012 5160
4013 bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE); 5161 bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
4014 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT); 5162 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BNX2X_DB_SHIFT);
5163
4015 if (!CHIP_REV_IS_SLOW(bp)) { 5164 if (!CHIP_REV_IS_SLOW(bp)) {
4016 /* enable hw interrupt from doorbell Q */ 5165 /* enable hw interrupt from doorbell Q */
4017 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0); 5166 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
4018 } 5167 }
4019 5168
4020 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE); 5169 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5170 if (CHIP_MODE_IS_4_PORT(bp)) {
5171 REG_WR(bp, BRB1_REG_FULL_LB_XOFF_THRESHOLD, 248);
5172 REG_WR(bp, BRB1_REG_FULL_LB_XON_THRESHOLD, 328);
5173 }
5174
4021 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE); 5175 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
4022 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf); 5176 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
4023#ifndef BCM_CNIC 5177#ifndef BCM_CNIC
4024 /* set NIC mode */ 5178 /* set NIC mode */
4025 REG_WR(bp, PRS_REG_NIC_MODE, 1); 5179 REG_WR(bp, PRS_REG_NIC_MODE, 1);
4026#endif 5180#endif
4027 if (CHIP_IS_E1H(bp)) 5181 if (!CHIP_IS_E1(bp))
4028 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp)); 5182 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_MF_SD(bp));
5183
5184 if (CHIP_IS_E2(bp)) {
5185 /* Bit-map indicating which L2 hdrs may appear after the
5186 basic Ethernet header */
5187 int has_ovlan = IS_MF_SD(bp);
5188 REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC, (has_ovlan ? 7 : 6));
5189 REG_WR(bp, PRS_REG_MUST_HAVE_HDRS, (has_ovlan ? 1 : 0));
5190 }
4029 5191
4030 bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE); 5192 bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
4031 bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE); 5193 bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
@@ -4042,6 +5204,9 @@ static int bnx2x_init_common(struct bnx2x *bp)
4042 bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE); 5204 bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
4043 bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE); 5205 bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
4044 5206
5207 if (CHIP_MODE_IS_4_PORT(bp))
5208 bnx2x_init_block(bp, XSEM_4PORT_BLOCK, COMMON_STAGE);
5209
4045 /* sync semi rtc */ 5210 /* sync semi rtc */
4046 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 5211 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
4047 0x80000000); 5212 0x80000000);
@@ -4052,9 +5217,16 @@ static int bnx2x_init_common(struct bnx2x *bp)
4052 bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE); 5217 bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
4053 bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE); 5218 bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
4054 5219
5220 if (CHIP_IS_E2(bp)) {
5221 int has_ovlan = IS_MF_SD(bp);
5222 REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC, (has_ovlan ? 7 : 6));
5223 REG_WR(bp, PBF_REG_MUST_HAVE_HDRS, (has_ovlan ? 1 : 0));
5224 }
5225
4055 REG_WR(bp, SRC_REG_SOFT_RST, 1); 5226 REG_WR(bp, SRC_REG_SOFT_RST, 1);
4056 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) 5227 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4)
4057 REG_WR(bp, i, random32()); 5228 REG_WR(bp, i, random32());
5229
4058 bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE); 5230 bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
4059#ifdef BCM_CNIC 5231#ifdef BCM_CNIC
4060 REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672); 5232 REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
@@ -4089,6 +5261,11 @@ static int bnx2x_init_common(struct bnx2x *bp)
4089 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000); 5261 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
4090 5262
4091 bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE); 5263 bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
5264
5265 if (CHIP_IS_E2(bp) && BP_NOMCP(bp))
5266 REG_WR(bp, IGU_REG_RESET_MEMORIES, 0x36);
5267
5268 bnx2x_init_block(bp, IGU_BLOCK, COMMON_STAGE);
4092 bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE); 5269 bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
4093 5270
4094 bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE); 5271 bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
@@ -4096,15 +5273,34 @@ static int bnx2x_init_common(struct bnx2x *bp)
4096 REG_WR(bp, 0x2814, 0xffffffff); 5273 REG_WR(bp, 0x2814, 0xffffffff);
4097 REG_WR(bp, 0x3820, 0xffffffff); 5274 REG_WR(bp, 0x3820, 0xffffffff);
4098 5275
5276 if (CHIP_IS_E2(bp)) {
5277 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_CONTROL_5,
5278 (PXPCS_TL_CONTROL_5_ERR_UNSPPORT1 |
5279 PXPCS_TL_CONTROL_5_ERR_UNSPPORT));
5280 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC345_STAT,
5281 (PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT4 |
5282 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT3 |
5283 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT2));
5284 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC678_STAT,
5285 (PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT7 |
5286 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT6 |
5287 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5));
5288 }
5289
4099 bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE); 5290 bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
4100 bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE); 5291 bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
4101 bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE); 5292 bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
4102 bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE); 5293 bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
4103 5294
4104 bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE); 5295 bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
4105 if (CHIP_IS_E1H(bp)) { 5296 if (!CHIP_IS_E1(bp)) {
4106 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp)); 5297 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_MF(bp));
4107 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp)); 5298 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_MF_SD(bp));
5299 }
5300 if (CHIP_IS_E2(bp)) {
5301 /* Bit-map indicating which L2 hdrs may appear after the
5302 basic Ethernet header */
5303 REG_WR(bp, NIG_REG_P0_HDRS_AFTER_BASIC, (IS_MF_SD(bp) ? 7 : 6));
4108 } 5304 }
4109 5305
4110 if (CHIP_REV_IS_SLOW(bp)) 5306 if (CHIP_REV_IS_SLOW(bp))
@@ -4128,27 +5324,17 @@ static int bnx2x_init_common(struct bnx2x *bp)
4128 } 5324 }
4129 REG_WR(bp, CFC_REG_DEBUG0, 0); 5325 REG_WR(bp, CFC_REG_DEBUG0, 0);
4130 5326
4131 /* read NIG statistic 5327 if (CHIP_IS_E1(bp)) {
4132 to see if this is our first up since powerup */ 5328 /* read NIG statistic
4133 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2); 5329 to see if this is our first up since powerup */
4134 val = *bnx2x_sp(bp, wb_data[0]); 5330 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4135 5331 val = *bnx2x_sp(bp, wb_data[0]);
4136 /* do internal memory self test */
4137 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
4138 BNX2X_ERR("internal mem self test failed\n");
4139 return -EBUSY;
4140 }
4141
4142 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
4143 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
4144 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
4145 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
4146 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
4147 bp->port.need_hw_lock = 1;
4148 break;
4149 5332
4150 default: 5333 /* do internal memory self test */
4151 break; 5334 if ((val == 0) && bnx2x_int_mem_test(bp)) {
5335 BNX2X_ERR("internal mem self test failed\n");
5336 return -EBUSY;
5337 }
4152 } 5338 }
4153 5339
4154 bnx2x_setup_fan_failure_detection(bp); 5340 bnx2x_setup_fan_failure_detection(bp);
@@ -4156,21 +5342,35 @@ static int bnx2x_init_common(struct bnx2x *bp)
4156 /* clear PXP2 attentions */ 5342 /* clear PXP2 attentions */
4157 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0); 5343 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
4158 5344
4159 enable_blocks_attention(bp); 5345 bnx2x_enable_blocks_attention(bp);
4160 if (CHIP_PARITY_SUPPORTED(bp)) 5346 if (CHIP_PARITY_ENABLED(bp))
4161 enable_blocks_parity(bp); 5347 bnx2x_enable_blocks_parity(bp);
4162 5348
4163 if (!BP_NOMCP(bp)) { 5349 if (!BP_NOMCP(bp)) {
4164 bnx2x_acquire_phy_lock(bp); 5350 /* In E2 2-PORT mode, same ext phy is used for the two paths */
4165 bnx2x_common_init_phy(bp, bp->common.shmem_base); 5351 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
4166 bnx2x_release_phy_lock(bp); 5352 CHIP_IS_E1x(bp)) {
5353 u32 shmem_base[2], shmem2_base[2];
5354 shmem_base[0] = bp->common.shmem_base;
5355 shmem2_base[0] = bp->common.shmem2_base;
5356 if (CHIP_IS_E2(bp)) {
5357 shmem_base[1] =
5358 SHMEM2_RD(bp, other_shmem_base_addr);
5359 shmem2_base[1] =
5360 SHMEM2_RD(bp, other_shmem2_base_addr);
5361 }
5362 bnx2x_acquire_phy_lock(bp);
5363 bnx2x_common_init_phy(bp, shmem_base, shmem2_base,
5364 bp->common.chip_id);
5365 bnx2x_release_phy_lock(bp);
5366 }
4167 } else 5367 } else
4168 BNX2X_ERR("Bootcode is missing - can not initialize link\n"); 5368 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
4169 5369
4170 return 0; 5370 return 0;
4171} 5371}
4172 5372
4173static int bnx2x_init_port(struct bnx2x *bp) 5373static int bnx2x_init_hw_port(struct bnx2x *bp)
4174{ 5374{
4175 int port = BP_PORT(bp); 5375 int port = BP_PORT(bp);
4176 int init_stage = port ? PORT1_STAGE : PORT0_STAGE; 5376 int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
@@ -4184,14 +5384,23 @@ static int bnx2x_init_port(struct bnx2x *bp)
4184 bnx2x_init_block(bp, PXP_BLOCK, init_stage); 5384 bnx2x_init_block(bp, PXP_BLOCK, init_stage);
4185 bnx2x_init_block(bp, PXP2_BLOCK, init_stage); 5385 bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
4186 5386
5387 /* Timers bug workaround: disables the pf_master bit in pglue at
5388 * common phase, we need to enable it here before any dmae access are
5389 * attempted. Therefore we manually added the enable-master to the
5390 * port phase (it also happens in the function phase)
5391 */
5392 if (CHIP_IS_E2(bp))
5393 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
5394
4187 bnx2x_init_block(bp, TCM_BLOCK, init_stage); 5395 bnx2x_init_block(bp, TCM_BLOCK, init_stage);
4188 bnx2x_init_block(bp, UCM_BLOCK, init_stage); 5396 bnx2x_init_block(bp, UCM_BLOCK, init_stage);
4189 bnx2x_init_block(bp, CCM_BLOCK, init_stage); 5397 bnx2x_init_block(bp, CCM_BLOCK, init_stage);
4190 bnx2x_init_block(bp, XCM_BLOCK, init_stage); 5398 bnx2x_init_block(bp, XCM_BLOCK, init_stage);
4191 5399
4192#ifdef BCM_CNIC 5400 /* QM cid (connection) count */
4193 REG_WR(bp, QM_REG_CONNNUM_0 + port*4, 1024/16 - 1); 5401 bnx2x_qm_init_cid_count(bp, bp->qm_cid_count, INITOP_SET);
4194 5402
5403#ifdef BCM_CNIC
4195 bnx2x_init_block(bp, TIMERS_BLOCK, init_stage); 5404 bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
4196 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20); 5405 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
4197 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31); 5406 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
@@ -4199,29 +5408,41 @@ static int bnx2x_init_port(struct bnx2x *bp)
4199 5408
4200 bnx2x_init_block(bp, DQ_BLOCK, init_stage); 5409 bnx2x_init_block(bp, DQ_BLOCK, init_stage);
4201 5410
4202 bnx2x_init_block(bp, BRB1_BLOCK, init_stage); 5411 if (CHIP_MODE_IS_4_PORT(bp))
4203 if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) { 5412 bnx2x_init_block(bp, QM_4PORT_BLOCK, init_stage);
4204 /* no pause for emulation and FPGA */ 5413
4205 low = 0; 5414 if (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp)) {
4206 high = 513; 5415 bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
4207 } else { 5416 if (CHIP_REV_IS_SLOW(bp) && CHIP_IS_E1(bp)) {
4208 if (IS_E1HMF(bp)) 5417 /* no pause for emulation and FPGA */
4209 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246); 5418 low = 0;
4210 else if (bp->dev->mtu > 4096) { 5419 high = 513;
4211 if (bp->flags & ONE_PORT_FLAG) 5420 } else {
4212 low = 160; 5421 if (IS_MF(bp))
4213 else { 5422 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
4214 val = bp->dev->mtu; 5423 else if (bp->dev->mtu > 4096) {
4215 /* (24*1024 + val*4)/256 */ 5424 if (bp->flags & ONE_PORT_FLAG)
4216 low = 96 + (val/64) + ((val % 64) ? 1 : 0); 5425 low = 160;
4217 } 5426 else {
4218 } else 5427 val = bp->dev->mtu;
4219 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160); 5428 /* (24*1024 + val*4)/256 */
4220 high = low + 56; /* 14*1024/256 */ 5429 low = 96 + (val/64) +
5430 ((val % 64) ? 1 : 0);
5431 }
5432 } else
5433 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
5434 high = low + 56; /* 14*1024/256 */
5435 }
5436 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
5437 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
4221 } 5438 }
4222 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
4223 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
4224 5439
5440 if (CHIP_MODE_IS_4_PORT(bp)) {
5441 REG_WR(bp, BRB1_REG_PAUSE_0_XOFF_THRESHOLD_0 + port*8, 248);
5442 REG_WR(bp, BRB1_REG_PAUSE_0_XON_THRESHOLD_0 + port*8, 328);
5443 REG_WR(bp, (BP_PORT(bp) ? BRB1_REG_MAC_GUARANTIED_1 :
5444 BRB1_REG_MAC_GUARANTIED_0), 40);
5445 }
4225 5446
4226 bnx2x_init_block(bp, PRS_BLOCK, init_stage); 5447 bnx2x_init_block(bp, PRS_BLOCK, init_stage);
4227 5448
@@ -4234,24 +5455,28 @@ static int bnx2x_init_port(struct bnx2x *bp)
4234 bnx2x_init_block(bp, USEM_BLOCK, init_stage); 5455 bnx2x_init_block(bp, USEM_BLOCK, init_stage);
4235 bnx2x_init_block(bp, CSEM_BLOCK, init_stage); 5456 bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
4236 bnx2x_init_block(bp, XSEM_BLOCK, init_stage); 5457 bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
5458 if (CHIP_MODE_IS_4_PORT(bp))
5459 bnx2x_init_block(bp, XSEM_4PORT_BLOCK, init_stage);
4237 5460
4238 bnx2x_init_block(bp, UPB_BLOCK, init_stage); 5461 bnx2x_init_block(bp, UPB_BLOCK, init_stage);
4239 bnx2x_init_block(bp, XPB_BLOCK, init_stage); 5462 bnx2x_init_block(bp, XPB_BLOCK, init_stage);
4240 5463
4241 bnx2x_init_block(bp, PBF_BLOCK, init_stage); 5464 bnx2x_init_block(bp, PBF_BLOCK, init_stage);
4242 5465
4243 /* configure PBF to work without PAUSE mtu 9000 */ 5466 if (!CHIP_IS_E2(bp)) {
4244 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0); 5467 /* configure PBF to work without PAUSE mtu 9000 */
5468 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
4245 5469
4246 /* update threshold */ 5470 /* update threshold */
4247 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16)); 5471 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
4248 /* update init credit */ 5472 /* update init credit */
4249 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22); 5473 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
4250 5474
4251 /* probe changes */ 5475 /* probe changes */
4252 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1); 5476 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
4253 msleep(5); 5477 udelay(50);
4254 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0); 5478 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
5479 }
4255 5480
4256#ifdef BCM_CNIC 5481#ifdef BCM_CNIC
4257 bnx2x_init_block(bp, SRCH_BLOCK, init_stage); 5482 bnx2x_init_block(bp, SRCH_BLOCK, init_stage);
@@ -4265,13 +5490,17 @@ static int bnx2x_init_port(struct bnx2x *bp)
4265 } 5490 }
4266 bnx2x_init_block(bp, HC_BLOCK, init_stage); 5491 bnx2x_init_block(bp, HC_BLOCK, init_stage);
4267 5492
5493 bnx2x_init_block(bp, IGU_BLOCK, init_stage);
5494
4268 bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage); 5495 bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
4269 /* init aeu_mask_attn_func_0/1: 5496 /* init aeu_mask_attn_func_0/1:
4270 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use 5497 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
4271 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF 5498 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
4272 * bits 4-7 are used for "per vn group attention" */ 5499 * bits 4-7 are used for "per vn group attention" */
4273 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 5500 val = IS_MF(bp) ? 0xF7 : 0x7;
4274 (IS_E1HMF(bp) ? 0xF7 : 0x7)); 5501 /* Enable DCBX attention for all but E1 */
5502 val |= CHIP_IS_E1(bp) ? 0 : 0x10;
5503 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, val);
4275 5504
4276 bnx2x_init_block(bp, PXPCS_BLOCK, init_stage); 5505 bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
4277 bnx2x_init_block(bp, EMAC0_BLOCK, init_stage); 5506 bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
@@ -4283,11 +5512,25 @@ static int bnx2x_init_port(struct bnx2x *bp)
4283 5512
4284 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1); 5513 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
4285 5514
4286 if (CHIP_IS_E1H(bp)) { 5515 if (!CHIP_IS_E1(bp)) {
4287 /* 0x2 disable e1hov, 0x1 enable */ 5516 /* 0x2 disable mf_ov, 0x1 enable */
4288 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4, 5517 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
4289 (IS_E1HMF(bp) ? 0x1 : 0x2)); 5518 (IS_MF_SD(bp) ? 0x1 : 0x2));
4290 5519
5520 if (CHIP_IS_E2(bp)) {
5521 val = 0;
5522 switch (bp->mf_mode) {
5523 case MULTI_FUNCTION_SD:
5524 val = 1;
5525 break;
5526 case MULTI_FUNCTION_SI:
5527 val = 2;
5528 break;
5529 }
5530
5531 REG_WR(bp, (BP_PORT(bp) ? NIG_REG_LLH1_CLS_TYPE :
5532 NIG_REG_LLH0_CLS_TYPE), val);
5533 }
4291 { 5534 {
4292 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0); 5535 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
4293 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0); 5536 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
@@ -4297,223 +5540,356 @@ static int bnx2x_init_port(struct bnx2x *bp)
4297 5540
4298 bnx2x_init_block(bp, MCP_BLOCK, init_stage); 5541 bnx2x_init_block(bp, MCP_BLOCK, init_stage);
4299 bnx2x_init_block(bp, DMAE_BLOCK, init_stage); 5542 bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
4300 5543 if (bnx2x_fan_failure_det_req(bp, bp->common.shmem_base,
4301 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) { 5544 bp->common.shmem2_base, port)) {
4302 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
4303 {
4304 u32 swap_val, swap_override, aeu_gpio_mask, offset;
4305
4306 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
4307 MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
4308
4309 /* The GPIO should be swapped if the swap register is
4310 set and active */
4311 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
4312 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
4313
4314 /* Select function upon port-swap configuration */
4315 if (port == 0) {
4316 offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
4317 aeu_gpio_mask = (swap_val && swap_override) ?
4318 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
4319 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
4320 } else {
4321 offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
4322 aeu_gpio_mask = (swap_val && swap_override) ?
4323 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
4324 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
4325 }
4326 val = REG_RD(bp, offset);
4327 /* add GPIO3 to group */
4328 val |= aeu_gpio_mask;
4329 REG_WR(bp, offset, val);
4330 }
4331 bp->port.need_hw_lock = 1;
4332 break;
4333
4334 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
4335 bp->port.need_hw_lock = 1;
4336 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
4337 /* add SPIO 5 to group 0 */
4338 {
4339 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : 5545 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4340 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0); 5546 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4341 val = REG_RD(bp, reg_addr); 5547 val = REG_RD(bp, reg_addr);
4342 val |= AEU_INPUTS_ATTN_BITS_SPIO5; 5548 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
4343 REG_WR(bp, reg_addr, val); 5549 REG_WR(bp, reg_addr, val);
4344 }
4345 break;
4346 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
4347 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
4348 bp->port.need_hw_lock = 1;
4349 break;
4350 default:
4351 break;
4352 } 5550 }
4353
4354 bnx2x__link_reset(bp); 5551 bnx2x__link_reset(bp);
4355 5552
4356 return 0; 5553 return 0;
4357} 5554}
4358 5555
4359#define ILT_PER_FUNC (768/2)
4360#define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
4361/* the phys address is shifted right 12 bits and has an added
4362 1=valid bit added to the 53rd bit
4363 then since this is a wide register(TM)
4364 we split it into two 32 bit writes
4365 */
4366#define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
4367#define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
4368#define PXP_ONE_ILT(x) (((x) << 10) | x)
4369#define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
4370
4371#ifdef BCM_CNIC
4372#define CNIC_ILT_LINES 127
4373#define CNIC_CTX_PER_ILT 16
4374#else
4375#define CNIC_ILT_LINES 0
4376#endif
4377
4378static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr) 5556static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
4379{ 5557{
4380 int reg; 5558 int reg;
4381 5559
4382 if (CHIP_IS_E1H(bp)) 5560 if (CHIP_IS_E1(bp))
4383 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
4384 else /* E1 */
4385 reg = PXP2_REG_RQ_ONCHIP_AT + index*8; 5561 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
5562 else
5563 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
4386 5564
4387 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr)); 5565 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
4388} 5566}
4389 5567
4390static int bnx2x_init_func(struct bnx2x *bp) 5568static inline void bnx2x_igu_clear_sb(struct bnx2x *bp, u8 idu_sb_id)
5569{
5570 bnx2x_igu_clear_sb_gen(bp, idu_sb_id, true /*PF*/);
5571}
5572
5573static inline void bnx2x_clear_func_ilt(struct bnx2x *bp, u32 func)
5574{
5575 u32 i, base = FUNC_ILT_BASE(func);
5576 for (i = base; i < base + ILT_PER_FUNC; i++)
5577 bnx2x_ilt_wr(bp, i, 0);
5578}
5579
5580static int bnx2x_init_hw_func(struct bnx2x *bp)
4391{ 5581{
4392 int port = BP_PORT(bp); 5582 int port = BP_PORT(bp);
4393 int func = BP_FUNC(bp); 5583 int func = BP_FUNC(bp);
5584 struct bnx2x_ilt *ilt = BP_ILT(bp);
5585 u16 cdu_ilt_start;
4394 u32 addr, val; 5586 u32 addr, val;
4395 int i; 5587 u32 main_mem_base, main_mem_size, main_mem_prty_clr;
5588 int i, main_mem_width;
4396 5589
4397 DP(BNX2X_MSG_MCP, "starting func init func %d\n", func); 5590 DP(BNX2X_MSG_MCP, "starting func init func %d\n", func);
4398 5591
4399 /* set MSI reconfigure capability */ 5592 /* set MSI reconfigure capability */
4400 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0); 5593 if (bp->common.int_block == INT_BLOCK_HC) {
4401 val = REG_RD(bp, addr); 5594 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
4402 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0; 5595 val = REG_RD(bp, addr);
4403 REG_WR(bp, addr, val); 5596 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
5597 REG_WR(bp, addr, val);
5598 }
4404 5599
4405 i = FUNC_ILT_BASE(func); 5600 ilt = BP_ILT(bp);
5601 cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start;
4406 5602
4407 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context)); 5603 for (i = 0; i < L2_ILT_LINES(bp); i++) {
4408 if (CHIP_IS_E1H(bp)) { 5604 ilt->lines[cdu_ilt_start + i].page =
4409 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i); 5605 bp->context.vcxt + (ILT_PAGE_CIDS * i);
4410 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES); 5606 ilt->lines[cdu_ilt_start + i].page_mapping =
4411 } else /* E1 */ 5607 bp->context.cxt_mapping + (CDU_ILT_PAGE_SZ * i);
4412 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4, 5608 /* cdu ilt pages are allocated manually so there's no need to
4413 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES)); 5609 set the size */
5610 }
5611 bnx2x_ilt_init_op(bp, INITOP_SET);
4414 5612
4415#ifdef BCM_CNIC 5613#ifdef BCM_CNIC
4416 i += 1 + CNIC_ILT_LINES; 5614 bnx2x_src_init_t2(bp, bp->t2, bp->t2_mapping, SRC_CONN_NUM);
4417 bnx2x_ilt_wr(bp, i, bp->timers_mapping);
4418 if (CHIP_IS_E1(bp))
4419 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
4420 else {
4421 REG_WR(bp, PXP2_REG_RQ_TM_FIRST_ILT, i);
4422 REG_WR(bp, PXP2_REG_RQ_TM_LAST_ILT, i);
4423 }
4424 5615
4425 i++; 5616 /* T1 hash bits value determines the T1 number of entries */
4426 bnx2x_ilt_wr(bp, i, bp->qm_mapping); 5617 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, SRC_HASH_BITS);
4427 if (CHIP_IS_E1(bp)) 5618#endif
4428 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i)); 5619
4429 else { 5620#ifndef BCM_CNIC
4430 REG_WR(bp, PXP2_REG_RQ_QM_FIRST_ILT, i); 5621 /* set NIC mode */
4431 REG_WR(bp, PXP2_REG_RQ_QM_LAST_ILT, i); 5622 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5623#endif /* BCM_CNIC */
5624
5625 if (CHIP_IS_E2(bp)) {
5626 u32 pf_conf = IGU_PF_CONF_FUNC_EN;
5627
5628 /* Turn on a single ISR mode in IGU if driver is going to use
5629 * INT#x or MSI
5630 */
5631 if (!(bp->flags & USING_MSIX_FLAG))
5632 pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
5633 /*
5634 * Timers workaround bug: function init part.
5635 * Need to wait 20msec after initializing ILT,
5636 * needed to make sure there are no requests in
5637 * one of the PXP internal queues with "old" ILT addresses
5638 */
5639 msleep(20);
5640 /*
5641 * Master enable - Due to WB DMAE writes performed before this
5642 * register is re-initialized as part of the regular function
5643 * init
5644 */
5645 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
5646 /* Enable the function in IGU */
5647 REG_WR(bp, IGU_REG_PF_CONFIGURATION, pf_conf);
4432 } 5648 }
4433 5649
4434 i++; 5650 bp->dmae_ready = 1;
4435 bnx2x_ilt_wr(bp, i, bp->t1_mapping); 5651
4436 if (CHIP_IS_E1(bp)) 5652 bnx2x_init_block(bp, PGLUE_B_BLOCK, FUNC0_STAGE + func);
4437 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i)); 5653
4438 else { 5654 if (CHIP_IS_E2(bp))
4439 REG_WR(bp, PXP2_REG_RQ_SRC_FIRST_ILT, i); 5655 REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, func);
4440 REG_WR(bp, PXP2_REG_RQ_SRC_LAST_ILT, i); 5656
5657 bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func);
5658 bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func);
5659 bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func);
5660 bnx2x_init_block(bp, CCM_BLOCK, FUNC0_STAGE + func);
5661 bnx2x_init_block(bp, XCM_BLOCK, FUNC0_STAGE + func);
5662 bnx2x_init_block(bp, TSEM_BLOCK, FUNC0_STAGE + func);
5663 bnx2x_init_block(bp, USEM_BLOCK, FUNC0_STAGE + func);
5664 bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func);
5665 bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func);
5666
5667 if (CHIP_IS_E2(bp)) {
5668 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_PATH_ID_OFFSET,
5669 BP_PATH(bp));
5670 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_PATH_ID_OFFSET,
5671 BP_PATH(bp));
4441 } 5672 }
4442 5673
4443 /* tell the searcher where the T2 table is */ 5674 if (CHIP_MODE_IS_4_PORT(bp))
4444 REG_WR(bp, SRC_REG_COUNTFREE0 + port*4, 16*1024/64); 5675 bnx2x_init_block(bp, XSEM_4PORT_BLOCK, FUNC0_STAGE + func);
4445 5676
4446 bnx2x_wb_wr(bp, SRC_REG_FIRSTFREE0 + port*16, 5677 if (CHIP_IS_E2(bp))
4447 U64_LO(bp->t2_mapping), U64_HI(bp->t2_mapping)); 5678 REG_WR(bp, QM_REG_PF_EN, 1);
4448 5679
4449 bnx2x_wb_wr(bp, SRC_REG_LASTFREE0 + port*16, 5680 bnx2x_init_block(bp, QM_BLOCK, FUNC0_STAGE + func);
4450 U64_LO((u64)bp->t2_mapping + 16*1024 - 64),
4451 U64_HI((u64)bp->t2_mapping + 16*1024 - 64));
4452 5681
4453 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, 10); 5682 if (CHIP_MODE_IS_4_PORT(bp))
4454#endif 5683 bnx2x_init_block(bp, QM_4PORT_BLOCK, FUNC0_STAGE + func);
5684
5685 bnx2x_init_block(bp, TIMERS_BLOCK, FUNC0_STAGE + func);
5686 bnx2x_init_block(bp, DQ_BLOCK, FUNC0_STAGE + func);
5687 bnx2x_init_block(bp, BRB1_BLOCK, FUNC0_STAGE + func);
5688 bnx2x_init_block(bp, PRS_BLOCK, FUNC0_STAGE + func);
5689 bnx2x_init_block(bp, TSDM_BLOCK, FUNC0_STAGE + func);
5690 bnx2x_init_block(bp, CSDM_BLOCK, FUNC0_STAGE + func);
5691 bnx2x_init_block(bp, USDM_BLOCK, FUNC0_STAGE + func);
5692 bnx2x_init_block(bp, XSDM_BLOCK, FUNC0_STAGE + func);
5693 bnx2x_init_block(bp, UPB_BLOCK, FUNC0_STAGE + func);
5694 bnx2x_init_block(bp, XPB_BLOCK, FUNC0_STAGE + func);
5695 bnx2x_init_block(bp, PBF_BLOCK, FUNC0_STAGE + func);
5696 if (CHIP_IS_E2(bp))
5697 REG_WR(bp, PBF_REG_DISABLE_PF, 0);
4455 5698
4456 if (CHIP_IS_E1H(bp)) { 5699 bnx2x_init_block(bp, CDU_BLOCK, FUNC0_STAGE + func);
4457 bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func);
4458 bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func);
4459 bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func);
4460 bnx2x_init_block(bp, CCM_BLOCK, FUNC0_STAGE + func);
4461 bnx2x_init_block(bp, XCM_BLOCK, FUNC0_STAGE + func);
4462 bnx2x_init_block(bp, TSEM_BLOCK, FUNC0_STAGE + func);
4463 bnx2x_init_block(bp, USEM_BLOCK, FUNC0_STAGE + func);
4464 bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func);
4465 bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func);
4466 5700
5701 bnx2x_init_block(bp, CFC_BLOCK, FUNC0_STAGE + func);
5702
5703 if (CHIP_IS_E2(bp))
5704 REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 1);
5705
5706 if (IS_MF(bp)) {
4467 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1); 5707 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
4468 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov); 5708 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->mf_ov);
4469 } 5709 }
4470 5710
5711 bnx2x_init_block(bp, MISC_AEU_BLOCK, FUNC0_STAGE + func);
5712
4471 /* HC init per function */ 5713 /* HC init per function */
4472 if (CHIP_IS_E1H(bp)) { 5714 if (bp->common.int_block == INT_BLOCK_HC) {
5715 if (CHIP_IS_E1H(bp)) {
5716 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
5717
5718 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5719 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5720 }
5721 bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
5722
5723 } else {
5724 int num_segs, sb_idx, prod_offset;
5725
4473 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0); 5726 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
4474 5727
4475 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0); 5728 if (CHIP_IS_E2(bp)) {
4476 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0); 5729 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0);
5730 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
5731 }
5732
5733 bnx2x_init_block(bp, IGU_BLOCK, FUNC0_STAGE + func);
5734
5735 if (CHIP_IS_E2(bp)) {
5736 int dsb_idx = 0;
5737 /**
5738 * Producer memory:
5739 * E2 mode: address 0-135 match to the mapping memory;
5740 * 136 - PF0 default prod; 137 - PF1 default prod;
5741 * 138 - PF2 default prod; 139 - PF3 default prod;
5742 * 140 - PF0 attn prod; 141 - PF1 attn prod;
5743 * 142 - PF2 attn prod; 143 - PF3 attn prod;
5744 * 144-147 reserved.
5745 *
5746 * E1.5 mode - In backward compatible mode;
5747 * for non default SB; each even line in the memory
5748 * holds the U producer and each odd line hold
5749 * the C producer. The first 128 producers are for
5750 * NDSB (PF0 - 0-31; PF1 - 32-63 and so on). The last 20
5751 * producers are for the DSB for each PF.
5752 * Each PF has five segments: (the order inside each
5753 * segment is PF0; PF1; PF2; PF3) - 128-131 U prods;
5754 * 132-135 C prods; 136-139 X prods; 140-143 T prods;
5755 * 144-147 attn prods;
5756 */
5757 /* non-default-status-blocks */
5758 num_segs = CHIP_INT_MODE_IS_BC(bp) ?
5759 IGU_BC_NDSB_NUM_SEGS : IGU_NORM_NDSB_NUM_SEGS;
5760 for (sb_idx = 0; sb_idx < bp->igu_sb_cnt; sb_idx++) {
5761 prod_offset = (bp->igu_base_sb + sb_idx) *
5762 num_segs;
5763
5764 for (i = 0; i < num_segs; i++) {
5765 addr = IGU_REG_PROD_CONS_MEMORY +
5766 (prod_offset + i) * 4;
5767 REG_WR(bp, addr, 0);
5768 }
5769 /* send consumer update with value 0 */
5770 bnx2x_ack_sb(bp, bp->igu_base_sb + sb_idx,
5771 USTORM_ID, 0, IGU_INT_NOP, 1);
5772 bnx2x_igu_clear_sb(bp,
5773 bp->igu_base_sb + sb_idx);
5774 }
5775
5776 /* default-status-blocks */
5777 num_segs = CHIP_INT_MODE_IS_BC(bp) ?
5778 IGU_BC_DSB_NUM_SEGS : IGU_NORM_DSB_NUM_SEGS;
5779
5780 if (CHIP_MODE_IS_4_PORT(bp))
5781 dsb_idx = BP_FUNC(bp);
5782 else
5783 dsb_idx = BP_E1HVN(bp);
5784
5785 prod_offset = (CHIP_INT_MODE_IS_BC(bp) ?
5786 IGU_BC_BASE_DSB_PROD + dsb_idx :
5787 IGU_NORM_BASE_DSB_PROD + dsb_idx);
5788
5789 for (i = 0; i < (num_segs * E1HVN_MAX);
5790 i += E1HVN_MAX) {
5791 addr = IGU_REG_PROD_CONS_MEMORY +
5792 (prod_offset + i)*4;
5793 REG_WR(bp, addr, 0);
5794 }
5795 /* send consumer update with 0 */
5796 if (CHIP_INT_MODE_IS_BC(bp)) {
5797 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5798 USTORM_ID, 0, IGU_INT_NOP, 1);
5799 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5800 CSTORM_ID, 0, IGU_INT_NOP, 1);
5801 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5802 XSTORM_ID, 0, IGU_INT_NOP, 1);
5803 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5804 TSTORM_ID, 0, IGU_INT_NOP, 1);
5805 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5806 ATTENTION_ID, 0, IGU_INT_NOP, 1);
5807 } else {
5808 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5809 USTORM_ID, 0, IGU_INT_NOP, 1);
5810 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5811 ATTENTION_ID, 0, IGU_INT_NOP, 1);
5812 }
5813 bnx2x_igu_clear_sb(bp, bp->igu_dsb_id);
5814
5815 /* !!! these should become driver const once
5816 rf-tool supports split-68 const */
5817 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0);
5818 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0);
5819 REG_WR(bp, IGU_REG_SB_MASK_LSB, 0);
5820 REG_WR(bp, IGU_REG_SB_MASK_MSB, 0);
5821 REG_WR(bp, IGU_REG_PBA_STATUS_LSB, 0);
5822 REG_WR(bp, IGU_REG_PBA_STATUS_MSB, 0);
5823 }
4477 } 5824 }
4478 bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
4479 5825
4480 /* Reset PCIE errors for debug */ 5826 /* Reset PCIE errors for debug */
4481 REG_WR(bp, 0x2114, 0xffffffff); 5827 REG_WR(bp, 0x2114, 0xffffffff);
4482 REG_WR(bp, 0x2120, 0xffffffff); 5828 REG_WR(bp, 0x2120, 0xffffffff);
4483 5829
5830 bnx2x_init_block(bp, EMAC0_BLOCK, FUNC0_STAGE + func);
5831 bnx2x_init_block(bp, EMAC1_BLOCK, FUNC0_STAGE + func);
5832 bnx2x_init_block(bp, DBU_BLOCK, FUNC0_STAGE + func);
5833 bnx2x_init_block(bp, DBG_BLOCK, FUNC0_STAGE + func);
5834 bnx2x_init_block(bp, MCP_BLOCK, FUNC0_STAGE + func);
5835 bnx2x_init_block(bp, DMAE_BLOCK, FUNC0_STAGE + func);
5836
5837 if (CHIP_IS_E1x(bp)) {
5838 main_mem_size = HC_REG_MAIN_MEMORY_SIZE / 2; /*dwords*/
5839 main_mem_base = HC_REG_MAIN_MEMORY +
5840 BP_PORT(bp) * (main_mem_size * 4);
5841 main_mem_prty_clr = HC_REG_HC_PRTY_STS_CLR;
5842 main_mem_width = 8;
5843
5844 val = REG_RD(bp, main_mem_prty_clr);
5845 if (val)
5846 DP(BNX2X_MSG_MCP, "Hmmm... Parity errors in HC "
5847 "block during "
5848 "function init (0x%x)!\n", val);
5849
5850 /* Clear "false" parity errors in MSI-X table */
5851 for (i = main_mem_base;
5852 i < main_mem_base + main_mem_size * 4;
5853 i += main_mem_width) {
5854 bnx2x_read_dmae(bp, i, main_mem_width / 4);
5855 bnx2x_write_dmae(bp, bnx2x_sp_mapping(bp, wb_data),
5856 i, main_mem_width / 4);
5857 }
5858 /* Clear HC parity attention */
5859 REG_RD(bp, main_mem_prty_clr);
5860 }
5861
5862 bnx2x_phy_probe(&bp->link_params);
5863
4484 return 0; 5864 return 0;
4485} 5865}
4486 5866
4487int bnx2x_init_hw(struct bnx2x *bp, u32 load_code) 5867int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
4488{ 5868{
4489 int i, rc = 0; 5869 int rc = 0;
4490 5870
4491 DP(BNX2X_MSG_MCP, "function %d load_code %x\n", 5871 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
4492 BP_FUNC(bp), load_code); 5872 BP_ABS_FUNC(bp), load_code);
4493 5873
4494 bp->dmae_ready = 0; 5874 bp->dmae_ready = 0;
4495 mutex_init(&bp->dmae_mutex); 5875 spin_lock_init(&bp->dmae_lock);
4496 rc = bnx2x_gunzip_init(bp);
4497 if (rc)
4498 return rc;
4499 5876
4500 switch (load_code) { 5877 switch (load_code) {
4501 case FW_MSG_CODE_DRV_LOAD_COMMON: 5878 case FW_MSG_CODE_DRV_LOAD_COMMON:
4502 rc = bnx2x_init_common(bp); 5879 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
5880 rc = bnx2x_init_hw_common(bp, load_code);
4503 if (rc) 5881 if (rc)
4504 goto init_hw_err; 5882 goto init_hw_err;
4505 /* no break */ 5883 /* no break */
4506 5884
4507 case FW_MSG_CODE_DRV_LOAD_PORT: 5885 case FW_MSG_CODE_DRV_LOAD_PORT:
4508 bp->dmae_ready = 1; 5886 rc = bnx2x_init_hw_port(bp);
4509 rc = bnx2x_init_port(bp);
4510 if (rc) 5887 if (rc)
4511 goto init_hw_err; 5888 goto init_hw_err;
4512 /* no break */ 5889 /* no break */
4513 5890
4514 case FW_MSG_CODE_DRV_LOAD_FUNCTION: 5891 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
4515 bp->dmae_ready = 1; 5892 rc = bnx2x_init_hw_func(bp);
4516 rc = bnx2x_init_func(bp);
4517 if (rc) 5893 if (rc)
4518 goto init_hw_err; 5894 goto init_hw_err;
4519 break; 5895 break;
@@ -4524,22 +5900,14 @@ int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
4524 } 5900 }
4525 5901
4526 if (!BP_NOMCP(bp)) { 5902 if (!BP_NOMCP(bp)) {
4527 int func = BP_FUNC(bp); 5903 int mb_idx = BP_FW_MB_IDX(bp);
4528 5904
4529 bp->fw_drv_pulse_wr_seq = 5905 bp->fw_drv_pulse_wr_seq =
4530 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) & 5906 (SHMEM_RD(bp, func_mb[mb_idx].drv_pulse_mb) &
4531 DRV_PULSE_SEQ_MASK); 5907 DRV_PULSE_SEQ_MASK);
4532 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq); 5908 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
4533 } 5909 }
4534 5910
4535 /* this needs to be done before gunzip end */
4536 bnx2x_zero_def_sb(bp);
4537 for_each_queue(bp, i)
4538 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
4539#ifdef BCM_CNIC
4540 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
4541#endif
4542
4543init_hw_err: 5911init_hw_err:
4544 bnx2x_gunzip_end(bp); 5912 bnx2x_gunzip_end(bp);
4545 5913
@@ -4548,288 +5916,153 @@ init_hw_err:
4548 5916
4549void bnx2x_free_mem(struct bnx2x *bp) 5917void bnx2x_free_mem(struct bnx2x *bp)
4550{ 5918{
4551 5919 bnx2x_gunzip_end(bp);
4552#define BNX2X_PCI_FREE(x, y, size) \
4553 do { \
4554 if (x) { \
4555 dma_free_coherent(&bp->pdev->dev, size, x, y); \
4556 x = NULL; \
4557 y = 0; \
4558 } \
4559 } while (0)
4560
4561#define BNX2X_FREE(x) \
4562 do { \
4563 if (x) { \
4564 vfree(x); \
4565 x = NULL; \
4566 } \
4567 } while (0)
4568
4569 int i;
4570 5920
4571 /* fastpath */ 5921 /* fastpath */
4572 /* Common */ 5922 bnx2x_free_fp_mem(bp);
4573 for_each_queue(bp, i) {
4574
4575 /* status blocks */
4576 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
4577 bnx2x_fp(bp, i, status_blk_mapping),
4578 sizeof(struct host_status_block));
4579 }
4580 /* Rx */
4581 for_each_queue(bp, i) {
4582
4583 /* fastpath rx rings: rx_buf rx_desc rx_comp */
4584 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
4585 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
4586 bnx2x_fp(bp, i, rx_desc_mapping),
4587 sizeof(struct eth_rx_bd) * NUM_RX_BD);
4588
4589 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
4590 bnx2x_fp(bp, i, rx_comp_mapping),
4591 sizeof(struct eth_fast_path_rx_cqe) *
4592 NUM_RCQ_BD);
4593
4594 /* SGE ring */
4595 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
4596 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
4597 bnx2x_fp(bp, i, rx_sge_mapping),
4598 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4599 }
4600 /* Tx */
4601 for_each_queue(bp, i) {
4602
4603 /* fastpath tx rings: tx_buf tx_desc */
4604 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
4605 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
4606 bnx2x_fp(bp, i, tx_desc_mapping),
4607 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4608 }
4609 /* end of fastpath */ 5923 /* end of fastpath */
4610 5924
4611 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping, 5925 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
4612 sizeof(struct host_def_status_block)); 5926 sizeof(struct host_sp_status_block));
4613 5927
4614 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping, 5928 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
4615 sizeof(struct bnx2x_slowpath)); 5929 sizeof(struct bnx2x_slowpath));
4616 5930
5931 BNX2X_PCI_FREE(bp->context.vcxt, bp->context.cxt_mapping,
5932 bp->context.size);
5933
5934 bnx2x_ilt_mem_op(bp, ILT_MEMOP_FREE);
5935
5936 BNX2X_FREE(bp->ilt->lines);
5937
4617#ifdef BCM_CNIC 5938#ifdef BCM_CNIC
4618 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024); 5939 if (CHIP_IS_E2(bp))
4619 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024); 5940 BNX2X_PCI_FREE(bp->cnic_sb.e2_sb, bp->cnic_sb_mapping,
4620 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024); 5941 sizeof(struct host_hc_status_block_e2));
4621 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024); 5942 else
4622 BNX2X_PCI_FREE(bp->cnic_sb, bp->cnic_sb_mapping, 5943 BNX2X_PCI_FREE(bp->cnic_sb.e1x_sb, bp->cnic_sb_mapping,
4623 sizeof(struct host_status_block)); 5944 sizeof(struct host_hc_status_block_e1x));
5945
5946 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, SRC_T2_SZ);
4624#endif 5947#endif
5948
4625 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE); 5949 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
4626 5950
4627#undef BNX2X_PCI_FREE 5951 BNX2X_PCI_FREE(bp->eq_ring, bp->eq_mapping,
4628#undef BNX2X_KFREE 5952 BCM_PAGE_SIZE * NUM_EQ_PAGES);
5953
5954 BNX2X_FREE(bp->rx_indir_table);
4629} 5955}
4630 5956
5957
4631int bnx2x_alloc_mem(struct bnx2x *bp) 5958int bnx2x_alloc_mem(struct bnx2x *bp)
4632{ 5959{
5960 if (bnx2x_gunzip_init(bp))
5961 return -ENOMEM;
4633 5962
4634#define BNX2X_PCI_ALLOC(x, y, size) \ 5963#ifdef BCM_CNIC
4635 do { \ 5964 if (CHIP_IS_E2(bp))
4636 x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \ 5965 BNX2X_PCI_ALLOC(bp->cnic_sb.e2_sb, &bp->cnic_sb_mapping,
4637 if (x == NULL) \ 5966 sizeof(struct host_hc_status_block_e2));
4638 goto alloc_mem_err; \ 5967 else
4639 memset(x, 0, size); \ 5968 BNX2X_PCI_ALLOC(bp->cnic_sb.e1x_sb, &bp->cnic_sb_mapping,
4640 } while (0) 5969 sizeof(struct host_hc_status_block_e1x));
4641
4642#define BNX2X_ALLOC(x, size) \
4643 do { \
4644 x = vmalloc(size); \
4645 if (x == NULL) \
4646 goto alloc_mem_err; \
4647 memset(x, 0, size); \
4648 } while (0)
4649
4650 int i;
4651
4652 /* fastpath */
4653 /* Common */
4654 for_each_queue(bp, i) {
4655 bnx2x_fp(bp, i, bp) = bp;
4656 5970
4657 /* status blocks */ 5971 /* allocate searcher T2 table */
4658 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk), 5972 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, SRC_T2_SZ);
4659 &bnx2x_fp(bp, i, status_blk_mapping), 5973#endif
4660 sizeof(struct host_status_block));
4661 }
4662 /* Rx */
4663 for_each_queue(bp, i) {
4664
4665 /* fastpath rx rings: rx_buf rx_desc rx_comp */
4666 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
4667 sizeof(struct sw_rx_bd) * NUM_RX_BD);
4668 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
4669 &bnx2x_fp(bp, i, rx_desc_mapping),
4670 sizeof(struct eth_rx_bd) * NUM_RX_BD);
4671
4672 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
4673 &bnx2x_fp(bp, i, rx_comp_mapping),
4674 sizeof(struct eth_fast_path_rx_cqe) *
4675 NUM_RCQ_BD);
4676
4677 /* SGE ring */
4678 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
4679 sizeof(struct sw_rx_page) * NUM_RX_SGE);
4680 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
4681 &bnx2x_fp(bp, i, rx_sge_mapping),
4682 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4683 }
4684 /* Tx */
4685 for_each_queue(bp, i) {
4686 5974
4687 /* fastpath tx rings: tx_buf tx_desc */
4688 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
4689 sizeof(struct sw_tx_bd) * NUM_TX_BD);
4690 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
4691 &bnx2x_fp(bp, i, tx_desc_mapping),
4692 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4693 }
4694 /* end of fastpath */
4695 5975
4696 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping, 5976 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
4697 sizeof(struct host_def_status_block)); 5977 sizeof(struct host_sp_status_block));
4698 5978
4699 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping, 5979 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
4700 sizeof(struct bnx2x_slowpath)); 5980 sizeof(struct bnx2x_slowpath));
4701 5981
4702#ifdef BCM_CNIC 5982 bp->context.size = sizeof(union cdu_context) * bp->l2_cid_count;
4703 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
4704
4705 /* allocate searcher T2 table
4706 we allocate 1/4 of alloc num for T2
4707 (which is not entered into the ILT) */
4708 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
4709
4710 /* Initialize T2 (for 1024 connections) */
4711 for (i = 0; i < 16*1024; i += 64)
4712 *(u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
4713 5983
4714 /* Timer block array (8*MAX_CONN) phys uncached for now 1024 conns */ 5984 BNX2X_PCI_ALLOC(bp->context.vcxt, &bp->context.cxt_mapping,
4715 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024); 5985 bp->context.size);
4716 5986
4717 /* QM queues (128*MAX_CONN) */ 5987 BNX2X_ALLOC(bp->ilt->lines, sizeof(struct ilt_line) * ILT_MAX_LINES);
4718 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
4719 5988
4720 BNX2X_PCI_ALLOC(bp->cnic_sb, &bp->cnic_sb_mapping, 5989 if (bnx2x_ilt_mem_op(bp, ILT_MEMOP_ALLOC))
4721 sizeof(struct host_status_block)); 5990 goto alloc_mem_err;
4722#endif
4723 5991
4724 /* Slow path ring */ 5992 /* Slow path ring */
4725 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE); 5993 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
4726 5994
5995 /* EQ */
5996 BNX2X_PCI_ALLOC(bp->eq_ring, &bp->eq_mapping,
5997 BCM_PAGE_SIZE * NUM_EQ_PAGES);
5998
5999 BNX2X_ALLOC(bp->rx_indir_table, sizeof(bp->rx_indir_table[0]) *
6000 TSTORM_INDIRECTION_TABLE_SIZE);
6001
6002 /* fastpath */
6003 /* need to be done at the end, since it's self adjusting to amount
6004 * of memory available for RSS queues
6005 */
6006 if (bnx2x_alloc_fp_mem(bp))
6007 goto alloc_mem_err;
4727 return 0; 6008 return 0;
4728 6009
4729alloc_mem_err: 6010alloc_mem_err:
4730 bnx2x_free_mem(bp); 6011 bnx2x_free_mem(bp);
4731 return -ENOMEM; 6012 return -ENOMEM;
4732
4733#undef BNX2X_PCI_ALLOC
4734#undef BNX2X_ALLOC
4735} 6013}
4736 6014
4737
4738/* 6015/*
4739 * Init service functions 6016 * Init service functions
4740 */ 6017 */
6018static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6019 int *state_p, int flags);
4741 6020
4742/** 6021int bnx2x_func_start(struct bnx2x *bp)
4743 * Sets a MAC in a CAM for a few L2 Clients for E1 chip
4744 *
4745 * @param bp driver descriptor
4746 * @param set set or clear an entry (1 or 0)
4747 * @param mac pointer to a buffer containing a MAC
4748 * @param cl_bit_vec bit vector of clients to register a MAC for
4749 * @param cam_offset offset in a CAM to use
4750 * @param with_bcast set broadcast MAC as well
4751 */
4752static void bnx2x_set_mac_addr_e1_gen(struct bnx2x *bp, int set, u8 *mac,
4753 u32 cl_bit_vec, u8 cam_offset,
4754 u8 with_bcast)
4755{ 6022{
4756 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config); 6023 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_START, 0, 0, 0, 1);
4757 int port = BP_PORT(bp);
4758
4759 /* CAM allocation
4760 * unicasts 0-31:port0 32-63:port1
4761 * multicast 64-127:port0 128-191:port1
4762 */
4763 config->hdr.length = 1 + (with_bcast ? 1 : 0);
4764 config->hdr.offset = cam_offset;
4765 config->hdr.client_id = 0xff;
4766 config->hdr.reserved1 = 0;
4767 6024
4768 /* primary MAC */ 6025 /* Wait for completion */
4769 config->config_table[0].cam_entry.msb_mac_addr = 6026 return bnx2x_wait_ramrod(bp, BNX2X_STATE_FUNC_STARTED, 0, &(bp->state),
4770 swab16(*(u16 *)&mac[0]); 6027 WAIT_RAMROD_COMMON);
4771 config->config_table[0].cam_entry.middle_mac_addr = 6028}
4772 swab16(*(u16 *)&mac[2]);
4773 config->config_table[0].cam_entry.lsb_mac_addr =
4774 swab16(*(u16 *)&mac[4]);
4775 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
4776 if (set)
4777 config->config_table[0].target_table_entry.flags = 0;
4778 else
4779 CAM_INVALIDATE(config->config_table[0]);
4780 config->config_table[0].target_table_entry.clients_bit_vector =
4781 cpu_to_le32(cl_bit_vec);
4782 config->config_table[0].target_table_entry.vlan_id = 0;
4783 6029
4784 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n", 6030static int bnx2x_func_stop(struct bnx2x *bp)
4785 (set ? "setting" : "clearing"), 6031{
4786 config->config_table[0].cam_entry.msb_mac_addr, 6032 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_STOP, 0, 0, 0, 1);
4787 config->config_table[0].cam_entry.middle_mac_addr,
4788 config->config_table[0].cam_entry.lsb_mac_addr);
4789
4790 /* broadcast */
4791 if (with_bcast) {
4792 config->config_table[1].cam_entry.msb_mac_addr =
4793 cpu_to_le16(0xffff);
4794 config->config_table[1].cam_entry.middle_mac_addr =
4795 cpu_to_le16(0xffff);
4796 config->config_table[1].cam_entry.lsb_mac_addr =
4797 cpu_to_le16(0xffff);
4798 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
4799 if (set)
4800 config->config_table[1].target_table_entry.flags =
4801 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
4802 else
4803 CAM_INVALIDATE(config->config_table[1]);
4804 config->config_table[1].target_table_entry.clients_bit_vector =
4805 cpu_to_le32(cl_bit_vec);
4806 config->config_table[1].target_table_entry.vlan_id = 0;
4807 }
4808 6033
4809 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0, 6034 /* Wait for completion */
4810 U64_HI(bnx2x_sp_mapping(bp, mac_config)), 6035 return bnx2x_wait_ramrod(bp, BNX2X_STATE_CLOSING_WAIT4_UNLOAD,
4811 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0); 6036 0, &(bp->state), WAIT_RAMROD_COMMON);
4812} 6037}
4813 6038
4814/** 6039/**
4815 * Sets a MAC in a CAM for a few L2 Clients for E1H chip 6040 * bnx2x_set_mac_addr_gen - set a MAC in a CAM for a few L2 Clients for E1x chips
4816 * 6041 *
4817 * @param bp driver descriptor 6042 * @bp: driver handle
4818 * @param set set or clear an entry (1 or 0) 6043 * @set: set or clear an entry (1 or 0)
4819 * @param mac pointer to a buffer containing a MAC 6044 * @mac: pointer to a buffer containing a MAC
4820 * @param cl_bit_vec bit vector of clients to register a MAC for 6045 * @cl_bit_vec: bit vector of clients to register a MAC for
4821 * @param cam_offset offset in a CAM to use 6046 * @cam_offset: offset in a CAM to use
6047 * @is_bcast: is the set MAC a broadcast address (for E1 only)
4822 */ 6048 */
4823static void bnx2x_set_mac_addr_e1h_gen(struct bnx2x *bp, int set, u8 *mac, 6049static void bnx2x_set_mac_addr_gen(struct bnx2x *bp, int set, const u8 *mac,
4824 u32 cl_bit_vec, u8 cam_offset) 6050 u32 cl_bit_vec, u8 cam_offset,
6051 u8 is_bcast)
4825{ 6052{
4826 struct mac_configuration_cmd_e1h *config = 6053 struct mac_configuration_cmd *config =
4827 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config); 6054 (struct mac_configuration_cmd *)bnx2x_sp(bp, mac_config);
6055 int ramrod_flags = WAIT_RAMROD_COMMON;
6056
6057 bp->set_mac_pending = 1;
4828 6058
4829 config->hdr.length = 1; 6059 config->hdr.length = 1;
4830 config->hdr.offset = cam_offset; 6060 config->hdr.offset = cam_offset;
4831 config->hdr.client_id = 0xff; 6061 config->hdr.client_id = 0xff;
4832 config->hdr.reserved1 = 0; 6062 /* Mark the single MAC configuration ramrod as opposed to a
6063 * UC/MC list configuration).
6064 */
6065 config->hdr.echo = 1;
4833 6066
4834 /* primary MAC */ 6067 /* primary MAC */
4835 config->config_table[0].msb_mac_addr = 6068 config->config_table[0].msb_mac_addr =
@@ -4841,29 +6074,43 @@ static void bnx2x_set_mac_addr_e1h_gen(struct bnx2x *bp, int set, u8 *mac,
4841 config->config_table[0].clients_bit_vector = 6074 config->config_table[0].clients_bit_vector =
4842 cpu_to_le32(cl_bit_vec); 6075 cpu_to_le32(cl_bit_vec);
4843 config->config_table[0].vlan_id = 0; 6076 config->config_table[0].vlan_id = 0;
4844 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov); 6077 config->config_table[0].pf_id = BP_FUNC(bp);
4845 if (set) 6078 if (set)
4846 config->config_table[0].flags = BP_PORT(bp); 6079 SET_FLAG(config->config_table[0].flags,
6080 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6081 T_ETH_MAC_COMMAND_SET);
4847 else 6082 else
4848 config->config_table[0].flags = 6083 SET_FLAG(config->config_table[0].flags,
4849 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE; 6084 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6085 T_ETH_MAC_COMMAND_INVALIDATE);
4850 6086
4851 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID mask %d\n", 6087 if (is_bcast)
6088 SET_FLAG(config->config_table[0].flags,
6089 MAC_CONFIGURATION_ENTRY_BROADCAST, 1);
6090
6091 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) PF_ID %d CLID mask %d\n",
4852 (set ? "setting" : "clearing"), 6092 (set ? "setting" : "clearing"),
4853 config->config_table[0].msb_mac_addr, 6093 config->config_table[0].msb_mac_addr,
4854 config->config_table[0].middle_mac_addr, 6094 config->config_table[0].middle_mac_addr,
4855 config->config_table[0].lsb_mac_addr, bp->e1hov, cl_bit_vec); 6095 config->config_table[0].lsb_mac_addr, BP_FUNC(bp), cl_bit_vec);
4856 6096
4857 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0, 6097 mb();
6098
6099 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
4858 U64_HI(bnx2x_sp_mapping(bp, mac_config)), 6100 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
4859 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0); 6101 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 1);
6102
6103 /* Wait for a completion */
6104 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, ramrod_flags);
4860} 6105}
4861 6106
4862static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx, 6107static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
4863 int *state_p, int poll) 6108 int *state_p, int flags)
4864{ 6109{
4865 /* can take a while if any port is running */ 6110 /* can take a while if any port is running */
4866 int cnt = 5000; 6111 int cnt = 5000;
6112 u8 poll = flags & WAIT_RAMROD_POLL;
6113 u8 common = flags & WAIT_RAMROD_COMMON;
4867 6114
4868 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n", 6115 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
4869 poll ? "polling" : "waiting", state, idx); 6116 poll ? "polling" : "waiting", state, idx);
@@ -4871,13 +6118,17 @@ static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
4871 might_sleep(); 6118 might_sleep();
4872 while (cnt--) { 6119 while (cnt--) {
4873 if (poll) { 6120 if (poll) {
4874 bnx2x_rx_int(bp->fp, 10); 6121 if (common)
4875 /* if index is different from 0 6122 bnx2x_eq_int(bp);
4876 * the reply for some commands will 6123 else {
4877 * be on the non default queue 6124 bnx2x_rx_int(bp->fp, 10);
4878 */ 6125 /* if index is different from 0
4879 if (idx) 6126 * the reply for some commands will
4880 bnx2x_rx_int(&bp->fp[idx], 10); 6127 * be on the non default queue
6128 */
6129 if (idx)
6130 bnx2x_rx_int(&bp->fp[idx], 10);
6131 }
4881 } 6132 }
4882 6133
4883 mb(); /* state is changed by bnx2x_sp_event() */ 6134 mb(); /* state is changed by bnx2x_sp_event() */
@@ -4904,212 +6155,781 @@ static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
4904 return -EBUSY; 6155 return -EBUSY;
4905} 6156}
4906 6157
4907void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set) 6158static u8 bnx2x_e1h_cam_offset(struct bnx2x *bp, u8 rel_offset)
4908{ 6159{
4909 bp->set_mac_pending++; 6160 if (CHIP_IS_E1H(bp))
4910 smp_wmb(); 6161 return E1H_FUNC_MAX * rel_offset + BP_FUNC(bp);
6162 else if (CHIP_MODE_IS_4_PORT(bp))
6163 return E2_FUNC_MAX * rel_offset + BP_FUNC(bp);
6164 else
6165 return E2_FUNC_MAX * rel_offset + BP_VN(bp);
6166}
4911 6167
4912 bnx2x_set_mac_addr_e1h_gen(bp, set, bp->dev->dev_addr, 6168/**
4913 (1 << bp->fp->cl_id), BP_FUNC(bp)); 6169 * LLH CAM line allocations: currently only iSCSI and ETH macs are
6170 * relevant. In addition, current implementation is tuned for a
6171 * single ETH MAC.
6172 */
6173enum {
6174 LLH_CAM_ISCSI_ETH_LINE = 0,
6175 LLH_CAM_ETH_LINE,
6176 LLH_CAM_MAX_PF_LINE = NIG_REG_LLH1_FUNC_MEM_SIZE
6177};
6178
6179static void bnx2x_set_mac_in_nig(struct bnx2x *bp,
6180 int set,
6181 unsigned char *dev_addr,
6182 int index)
6183{
6184 u32 wb_data[2];
6185 u32 mem_offset, ena_offset, mem_index;
6186 /**
6187 * indexes mapping:
6188 * 0..7 - goes to MEM
6189 * 8..15 - goes to MEM2
6190 */
6191
6192 if (!IS_MF_SI(bp) || index > LLH_CAM_MAX_PF_LINE)
6193 return;
6194
6195 /* calculate memory start offset according to the mapping
6196 * and index in the memory */
6197 if (index < NIG_LLH_FUNC_MEM_MAX_OFFSET) {
6198 mem_offset = BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM :
6199 NIG_REG_LLH0_FUNC_MEM;
6200 ena_offset = BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM_ENABLE :
6201 NIG_REG_LLH0_FUNC_MEM_ENABLE;
6202 mem_index = index;
6203 } else {
6204 mem_offset = BP_PORT(bp) ? NIG_REG_P1_LLH_FUNC_MEM2 :
6205 NIG_REG_P0_LLH_FUNC_MEM2;
6206 ena_offset = BP_PORT(bp) ? NIG_REG_P1_LLH_FUNC_MEM2_ENABLE :
6207 NIG_REG_P0_LLH_FUNC_MEM2_ENABLE;
6208 mem_index = index - NIG_LLH_FUNC_MEM_MAX_OFFSET;
6209 }
6210
6211 if (set) {
6212 /* LLH_FUNC_MEM is a u64 WB register */
6213 mem_offset += 8*mem_index;
6214
6215 wb_data[0] = ((dev_addr[2] << 24) | (dev_addr[3] << 16) |
6216 (dev_addr[4] << 8) | dev_addr[5]);
6217 wb_data[1] = ((dev_addr[0] << 8) | dev_addr[1]);
6218
6219 REG_WR_DMAE(bp, mem_offset, wb_data, 2);
6220 }
6221
6222 /* enable/disable the entry */
6223 REG_WR(bp, ena_offset + 4*mem_index, set);
4914 6224
4915 /* Wait for a completion */
4916 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
4917} 6225}
4918 6226
4919void bnx2x_set_eth_mac_addr_e1(struct bnx2x *bp, int set) 6227void bnx2x_set_eth_mac(struct bnx2x *bp, int set)
4920{ 6228{
4921 bp->set_mac_pending++; 6229 u8 cam_offset = (CHIP_IS_E1(bp) ? (BP_PORT(bp) ? 32 : 0) :
4922 smp_wmb(); 6230 bnx2x_e1h_cam_offset(bp, CAM_ETH_LINE));
6231
6232 /* networking MAC */
6233 bnx2x_set_mac_addr_gen(bp, set, bp->dev->dev_addr,
6234 (1 << bp->fp->cl_id), cam_offset , 0);
4923 6235
4924 bnx2x_set_mac_addr_e1_gen(bp, set, bp->dev->dev_addr, 6236 bnx2x_set_mac_in_nig(bp, set, bp->dev->dev_addr, LLH_CAM_ETH_LINE);
4925 (1 << bp->fp->cl_id), (BP_PORT(bp) ? 32 : 0), 6237
4926 1); 6238 if (CHIP_IS_E1(bp)) {
6239 /* broadcast MAC */
6240 static const u8 bcast[ETH_ALEN] = {
6241 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
6242 };
6243 bnx2x_set_mac_addr_gen(bp, set, bcast, 0, cam_offset + 1, 1);
6244 }
6245}
6246
6247static inline u8 bnx2x_e1_cam_mc_offset(struct bnx2x *bp)
6248{
6249 return CHIP_REV_IS_SLOW(bp) ?
6250 (BNX2X_MAX_EMUL_MULTI * (1 + BP_PORT(bp))) :
6251 (BNX2X_MAX_MULTICAST * (1 + BP_PORT(bp)));
6252}
6253
6254/* set mc list, do not wait as wait implies sleep and
6255 * set_rx_mode can be invoked from non-sleepable context.
6256 *
6257 * Instead we use the same ramrod data buffer each time we need
6258 * to configure a list of addresses, and use the fact that the
6259 * list of MACs is changed in an incremental way and that the
6260 * function is called under the netif_addr_lock. A temporary
6261 * inconsistent CAM configuration (possible in case of a very fast
6262 * sequence of add/del/add on the host side) will shortly be
6263 * restored by the handler of the last ramrod.
6264 */
6265static int bnx2x_set_e1_mc_list(struct bnx2x *bp)
6266{
6267 int i = 0, old;
6268 struct net_device *dev = bp->dev;
6269 u8 offset = bnx2x_e1_cam_mc_offset(bp);
6270 struct netdev_hw_addr *ha;
6271 struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, mcast_config);
6272 dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, mcast_config);
6273
6274 if (netdev_mc_count(dev) > BNX2X_MAX_MULTICAST)
6275 return -EINVAL;
6276
6277 netdev_for_each_mc_addr(ha, dev) {
6278 /* copy mac */
6279 config_cmd->config_table[i].msb_mac_addr =
6280 swab16(*(u16 *)&bnx2x_mc_addr(ha)[0]);
6281 config_cmd->config_table[i].middle_mac_addr =
6282 swab16(*(u16 *)&bnx2x_mc_addr(ha)[2]);
6283 config_cmd->config_table[i].lsb_mac_addr =
6284 swab16(*(u16 *)&bnx2x_mc_addr(ha)[4]);
6285
6286 config_cmd->config_table[i].vlan_id = 0;
6287 config_cmd->config_table[i].pf_id = BP_FUNC(bp);
6288 config_cmd->config_table[i].clients_bit_vector =
6289 cpu_to_le32(1 << BP_L_ID(bp));
6290
6291 SET_FLAG(config_cmd->config_table[i].flags,
6292 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6293 T_ETH_MAC_COMMAND_SET);
6294
6295 DP(NETIF_MSG_IFUP,
6296 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
6297 config_cmd->config_table[i].msb_mac_addr,
6298 config_cmd->config_table[i].middle_mac_addr,
6299 config_cmd->config_table[i].lsb_mac_addr);
6300 i++;
6301 }
6302 old = config_cmd->hdr.length;
6303 if (old > i) {
6304 for (; i < old; i++) {
6305 if (CAM_IS_INVALID(config_cmd->
6306 config_table[i])) {
6307 /* already invalidated */
6308 break;
6309 }
6310 /* invalidate */
6311 SET_FLAG(config_cmd->config_table[i].flags,
6312 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6313 T_ETH_MAC_COMMAND_INVALIDATE);
6314 }
6315 }
6316
6317 wmb();
6318
6319 config_cmd->hdr.length = i;
6320 config_cmd->hdr.offset = offset;
6321 config_cmd->hdr.client_id = 0xff;
6322 /* Mark that this ramrod doesn't use bp->set_mac_pending for
6323 * synchronization.
6324 */
6325 config_cmd->hdr.echo = 0;
6326
6327 mb();
6328
6329 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
6330 U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
6331}
6332
6333void bnx2x_invalidate_e1_mc_list(struct bnx2x *bp)
6334{
6335 int i;
6336 struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, mcast_config);
6337 dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, mcast_config);
6338 int ramrod_flags = WAIT_RAMROD_COMMON;
6339 u8 offset = bnx2x_e1_cam_mc_offset(bp);
6340
6341 for (i = 0; i < BNX2X_MAX_MULTICAST; i++)
6342 SET_FLAG(config_cmd->config_table[i].flags,
6343 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6344 T_ETH_MAC_COMMAND_INVALIDATE);
6345
6346 wmb();
6347
6348 config_cmd->hdr.length = BNX2X_MAX_MULTICAST;
6349 config_cmd->hdr.offset = offset;
6350 config_cmd->hdr.client_id = 0xff;
6351 /* We'll wait for a completion this time... */
6352 config_cmd->hdr.echo = 1;
6353
6354 bp->set_mac_pending = 1;
6355
6356 mb();
6357
6358 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
6359 U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
4927 6360
4928 /* Wait for a completion */ 6361 /* Wait for a completion */
4929 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1); 6362 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending,
6363 ramrod_flags);
6364
6365}
6366
6367/* Accept one or more multicasts */
6368static int bnx2x_set_e1h_mc_list(struct bnx2x *bp)
6369{
6370 struct net_device *dev = bp->dev;
6371 struct netdev_hw_addr *ha;
6372 u32 mc_filter[MC_HASH_SIZE];
6373 u32 crc, bit, regidx;
6374 int i;
6375
6376 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
6377
6378 netdev_for_each_mc_addr(ha, dev) {
6379 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
6380 bnx2x_mc_addr(ha));
6381
6382 crc = crc32c_le(0, bnx2x_mc_addr(ha),
6383 ETH_ALEN);
6384 bit = (crc >> 24) & 0xff;
6385 regidx = bit >> 5;
6386 bit &= 0x1f;
6387 mc_filter[regidx] |= (1 << bit);
6388 }
6389
6390 for (i = 0; i < MC_HASH_SIZE; i++)
6391 REG_WR(bp, MC_HASH_OFFSET(bp, i),
6392 mc_filter[i]);
6393
6394 return 0;
6395}
6396
6397void bnx2x_invalidate_e1h_mc_list(struct bnx2x *bp)
6398{
6399 int i;
6400
6401 for (i = 0; i < MC_HASH_SIZE; i++)
6402 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
4930} 6403}
4931 6404
4932#ifdef BCM_CNIC 6405#ifdef BCM_CNIC
4933/** 6406/**
4934 * Set iSCSI MAC(s) at the next enties in the CAM after the ETH 6407 * bnx2x_set_iscsi_eth_mac_addr - set iSCSI MAC(s).
4935 * MAC(s). This function will wait until the ramdord completion
4936 * returns.
4937 * 6408 *
4938 * @param bp driver handle 6409 * @bp: driver handle
4939 * @param set set or clear the CAM entry 6410 * @set: set or clear the CAM entry
4940 * 6411 *
4941 * @return 0 if cussess, -ENODEV if ramrod doesn't return. 6412 * This function will wait until the ramdord completion returns.
6413 * Return 0 if success, -ENODEV if ramrod doesn't return.
4942 */ 6414 */
4943int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set) 6415static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
4944{ 6416{
4945 u32 cl_bit_vec = (1 << BCM_ISCSI_ETH_CL_ID); 6417 u8 cam_offset = (CHIP_IS_E1(bp) ? ((BP_PORT(bp) ? 32 : 0) + 2) :
4946 6418 bnx2x_e1h_cam_offset(bp, CAM_ISCSI_ETH_LINE));
4947 bp->set_mac_pending++; 6419 u32 iscsi_l2_cl_id = BNX2X_ISCSI_ETH_CL_ID +
4948 smp_wmb(); 6420 BP_E1HVN(bp) * NONE_ETH_CONTEXT_USE;
6421 u32 cl_bit_vec = (1 << iscsi_l2_cl_id);
6422 u8 *iscsi_mac = bp->cnic_eth_dev.iscsi_mac;
4949 6423
4950 /* Send a SET_MAC ramrod */ 6424 /* Send a SET_MAC ramrod */
4951 if (CHIP_IS_E1(bp)) 6425 bnx2x_set_mac_addr_gen(bp, set, iscsi_mac, cl_bit_vec,
4952 bnx2x_set_mac_addr_e1_gen(bp, set, bp->iscsi_mac, 6426 cam_offset, 0);
4953 cl_bit_vec, (BP_PORT(bp) ? 32 : 0) + 2,
4954 1);
4955 else
4956 /* CAM allocation for E1H
4957 * unicasts: by func number
4958 * multicast: 20+FUNC*20, 20 each
4959 */
4960 bnx2x_set_mac_addr_e1h_gen(bp, set, bp->iscsi_mac,
4961 cl_bit_vec, E1H_FUNC_MAX + BP_FUNC(bp));
4962 6427
4963 /* Wait for a completion when setting */ 6428 bnx2x_set_mac_in_nig(bp, set, iscsi_mac, LLH_CAM_ISCSI_ETH_LINE);
4964 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
4965 6429
4966 return 0; 6430 return 0;
4967} 6431}
4968#endif
4969 6432
4970int bnx2x_setup_leading(struct bnx2x *bp) 6433/**
4971{ 6434 * bnx2x_set_fip_eth_mac_addr - set FCoE L2 MAC(s)
4972 int rc; 6435 *
6436 * @bp: driver handle
6437 * @set: set or clear the CAM entry
6438 *
6439 * This function will wait until the ramrod completion returns.
6440 * Returns 0 if success, -ENODEV if ramrod doesn't return.
6441 */
6442int bnx2x_set_fip_eth_mac_addr(struct bnx2x *bp, int set)
6443{
6444 u32 cl_bit_vec = (1 << bnx2x_fcoe(bp, cl_id));
6445 /**
6446 * CAM allocation for E1H
6447 * eth unicasts: by func number
6448 * iscsi: by func number
6449 * fip unicast: by func number
6450 * fip multicast: by func number
6451 */
6452 bnx2x_set_mac_addr_gen(bp, set, bp->fip_mac,
6453 cl_bit_vec, bnx2x_e1h_cam_offset(bp, CAM_FIP_ETH_LINE), 0);
4973 6454
4974 /* reset IGU state */ 6455 return 0;
4975 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0); 6456}
4976 6457
4977 /* SETUP ramrod */ 6458int bnx2x_set_all_enode_macs(struct bnx2x *bp, int set)
4978 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0); 6459{
6460 u32 cl_bit_vec = (1 << bnx2x_fcoe(bp, cl_id));
4979 6461
4980 /* Wait for completion */ 6462 /**
4981 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0); 6463 * CAM allocation for E1H
6464 * eth unicasts: by func number
6465 * iscsi: by func number
6466 * fip unicast: by func number
6467 * fip multicast: by func number
6468 */
6469 bnx2x_set_mac_addr_gen(bp, set, ALL_ENODE_MACS, cl_bit_vec,
6470 bnx2x_e1h_cam_offset(bp, CAM_FIP_MCAST_LINE), 0);
4982 6471
4983 return rc; 6472 return 0;
4984} 6473}
6474#endif
4985 6475
4986int bnx2x_setup_multi(struct bnx2x *bp, int index) 6476static void bnx2x_fill_cl_init_data(struct bnx2x *bp,
4987{ 6477 struct bnx2x_client_init_params *params,
4988 struct bnx2x_fastpath *fp = &bp->fp[index]; 6478 u8 activate,
6479 struct client_init_ramrod_data *data)
6480{
6481 /* Clear the buffer */
6482 memset(data, 0, sizeof(*data));
6483
6484 /* general */
6485 data->general.client_id = params->rxq_params.cl_id;
6486 data->general.statistics_counter_id = params->rxq_params.stat_id;
6487 data->general.statistics_en_flg =
6488 (params->rxq_params.flags & QUEUE_FLG_STATS) ? 1 : 0;
6489 data->general.is_fcoe_flg =
6490 (params->ramrod_params.flags & CLIENT_IS_FCOE) ? 1 : 0;
6491 data->general.activate_flg = activate;
6492 data->general.sp_client_id = params->rxq_params.spcl_id;
6493
6494 /* Rx data */
6495 data->rx.tpa_en_flg =
6496 (params->rxq_params.flags & QUEUE_FLG_TPA) ? 1 : 0;
6497 data->rx.vmqueue_mode_en_flg = 0;
6498 data->rx.cache_line_alignment_log_size =
6499 params->rxq_params.cache_line_log;
6500 data->rx.enable_dynamic_hc =
6501 (params->rxq_params.flags & QUEUE_FLG_DHC) ? 1 : 0;
6502 data->rx.max_sges_for_packet = params->rxq_params.max_sges_pkt;
6503 data->rx.client_qzone_id = params->rxq_params.cl_qzone_id;
6504 data->rx.max_agg_size = params->rxq_params.tpa_agg_sz;
6505
6506 /* We don't set drop flags */
6507 data->rx.drop_ip_cs_err_flg = 0;
6508 data->rx.drop_tcp_cs_err_flg = 0;
6509 data->rx.drop_ttl0_flg = 0;
6510 data->rx.drop_udp_cs_err_flg = 0;
6511
6512 data->rx.inner_vlan_removal_enable_flg =
6513 (params->rxq_params.flags & QUEUE_FLG_VLAN) ? 1 : 0;
6514 data->rx.outer_vlan_removal_enable_flg =
6515 (params->rxq_params.flags & QUEUE_FLG_OV) ? 1 : 0;
6516 data->rx.status_block_id = params->rxq_params.fw_sb_id;
6517 data->rx.rx_sb_index_number = params->rxq_params.sb_cq_index;
6518 data->rx.bd_buff_size = cpu_to_le16(params->rxq_params.buf_sz);
6519 data->rx.sge_buff_size = cpu_to_le16(params->rxq_params.sge_buf_sz);
6520 data->rx.mtu = cpu_to_le16(params->rxq_params.mtu);
6521 data->rx.bd_page_base.lo =
6522 cpu_to_le32(U64_LO(params->rxq_params.dscr_map));
6523 data->rx.bd_page_base.hi =
6524 cpu_to_le32(U64_HI(params->rxq_params.dscr_map));
6525 data->rx.sge_page_base.lo =
6526 cpu_to_le32(U64_LO(params->rxq_params.sge_map));
6527 data->rx.sge_page_base.hi =
6528 cpu_to_le32(U64_HI(params->rxq_params.sge_map));
6529 data->rx.cqe_page_base.lo =
6530 cpu_to_le32(U64_LO(params->rxq_params.rcq_map));
6531 data->rx.cqe_page_base.hi =
6532 cpu_to_le32(U64_HI(params->rxq_params.rcq_map));
6533 data->rx.is_leading_rss =
6534 (params->ramrod_params.flags & CLIENT_IS_LEADING_RSS) ? 1 : 0;
6535 data->rx.is_approx_mcast = data->rx.is_leading_rss;
6536
6537 /* Tx data */
6538 data->tx.enforce_security_flg = 0; /* VF specific */
6539 data->tx.tx_status_block_id = params->txq_params.fw_sb_id;
6540 data->tx.tx_sb_index_number = params->txq_params.sb_cq_index;
6541 data->tx.mtu = 0; /* VF specific */
6542 data->tx.tx_bd_page_base.lo =
6543 cpu_to_le32(U64_LO(params->txq_params.dscr_map));
6544 data->tx.tx_bd_page_base.hi =
6545 cpu_to_le32(U64_HI(params->txq_params.dscr_map));
6546
6547 /* flow control data */
6548 data->fc.cqe_pause_thr_low = cpu_to_le16(params->pause.rcq_th_lo);
6549 data->fc.cqe_pause_thr_high = cpu_to_le16(params->pause.rcq_th_hi);
6550 data->fc.bd_pause_thr_low = cpu_to_le16(params->pause.bd_th_lo);
6551 data->fc.bd_pause_thr_high = cpu_to_le16(params->pause.bd_th_hi);
6552 data->fc.sge_pause_thr_low = cpu_to_le16(params->pause.sge_th_lo);
6553 data->fc.sge_pause_thr_high = cpu_to_le16(params->pause.sge_th_hi);
6554 data->fc.rx_cos_mask = cpu_to_le16(params->pause.pri_map);
6555
6556 data->fc.safc_group_num = params->txq_params.cos;
6557 data->fc.safc_group_en_flg =
6558 (params->txq_params.flags & QUEUE_FLG_COS) ? 1 : 0;
6559 data->fc.traffic_type =
6560 (params->ramrod_params.flags & CLIENT_IS_FCOE) ?
6561 LLFC_TRAFFIC_TYPE_FCOE : LLFC_TRAFFIC_TYPE_NW;
6562}
6563
6564static inline void bnx2x_set_ctx_validation(struct eth_context *cxt, u32 cid)
6565{
6566 /* ustorm cxt validation */
6567 cxt->ustorm_ag_context.cdu_usage =
6568 CDU_RSRVD_VALUE_TYPE_A(cid, CDU_REGION_NUMBER_UCM_AG,
6569 ETH_CONNECTION_TYPE);
6570 /* xcontext validation */
6571 cxt->xstorm_ag_context.cdu_reserved =
6572 CDU_RSRVD_VALUE_TYPE_A(cid, CDU_REGION_NUMBER_XCM_AG,
6573 ETH_CONNECTION_TYPE);
6574}
6575
6576static int bnx2x_setup_fw_client(struct bnx2x *bp,
6577 struct bnx2x_client_init_params *params,
6578 u8 activate,
6579 struct client_init_ramrod_data *data,
6580 dma_addr_t data_mapping)
6581{
6582 u16 hc_usec;
6583 int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
6584 int ramrod_flags = 0, rc;
6585
6586 /* HC and context validation values */
6587 hc_usec = params->txq_params.hc_rate ?
6588 1000000 / params->txq_params.hc_rate : 0;
6589 bnx2x_update_coalesce_sb_index(bp,
6590 params->txq_params.fw_sb_id,
6591 params->txq_params.sb_cq_index,
6592 !(params->txq_params.flags & QUEUE_FLG_HC),
6593 hc_usec);
6594
6595 *(params->ramrod_params.pstate) = BNX2X_FP_STATE_OPENING;
6596
6597 hc_usec = params->rxq_params.hc_rate ?
6598 1000000 / params->rxq_params.hc_rate : 0;
6599 bnx2x_update_coalesce_sb_index(bp,
6600 params->rxq_params.fw_sb_id,
6601 params->rxq_params.sb_cq_index,
6602 !(params->rxq_params.flags & QUEUE_FLG_HC),
6603 hc_usec);
6604
6605 bnx2x_set_ctx_validation(params->rxq_params.cxt,
6606 params->rxq_params.cid);
6607
6608 /* zero stats */
6609 if (params->txq_params.flags & QUEUE_FLG_STATS)
6610 storm_memset_xstats_zero(bp, BP_PORT(bp),
6611 params->txq_params.stat_id);
6612
6613 if (params->rxq_params.flags & QUEUE_FLG_STATS) {
6614 storm_memset_ustats_zero(bp, BP_PORT(bp),
6615 params->rxq_params.stat_id);
6616 storm_memset_tstats_zero(bp, BP_PORT(bp),
6617 params->rxq_params.stat_id);
6618 }
6619
6620 /* Fill the ramrod data */
6621 bnx2x_fill_cl_init_data(bp, params, activate, data);
6622
6623 /* SETUP ramrod.
6624 *
6625 * bnx2x_sp_post() takes a spin_lock thus no other explict memory
6626 * barrier except from mmiowb() is needed to impose a
6627 * proper ordering of memory operations.
6628 */
6629 mmiowb();
4989 6630
4990 /* reset IGU state */
4991 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4992 6631
4993 /* SETUP ramrod */ 6632 bnx2x_sp_post(bp, ramrod, params->ramrod_params.cid,
4994 fp->state = BNX2X_FP_STATE_OPENING; 6633 U64_HI(data_mapping), U64_LO(data_mapping), 0);
4995 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
4996 fp->cl_id, 0);
4997 6634
4998 /* Wait for completion */ 6635 /* Wait for completion */
4999 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index, 6636 rc = bnx2x_wait_ramrod(bp, params->ramrod_params.state,
5000 &(fp->state), 0); 6637 params->ramrod_params.index,
6638 params->ramrod_params.pstate,
6639 ramrod_flags);
6640 return rc;
5001} 6641}
5002 6642
5003 6643/**
5004void bnx2x_set_num_queues_msix(struct bnx2x *bp) 6644 * bnx2x_set_int_mode - configure interrupt mode
6645 *
6646 * @bp: driver handle
6647 *
6648 * In case of MSI-X it will also try to enable MSI-X.
6649 */
6650static int __devinit bnx2x_set_int_mode(struct bnx2x *bp)
5005{ 6651{
6652 int rc = 0;
5006 6653
5007 switch (bp->multi_mode) { 6654 switch (bp->int_mode) {
5008 case ETH_RSS_MODE_DISABLED: 6655 case INT_MODE_MSI:
5009 bp->num_queues = 1; 6656 bnx2x_enable_msi(bp);
6657 /* falling through... */
6658 case INT_MODE_INTx:
6659 bp->num_queues = 1 + NONE_ETH_CONTEXT_USE;
6660 DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
5010 break; 6661 break;
6662 default:
6663 /* Set number of queues according to bp->multi_mode value */
6664 bnx2x_set_num_queues(bp);
5011 6665
5012 case ETH_RSS_MODE_REGULAR: 6666 DP(NETIF_MSG_IFUP, "set number of queues to %d\n",
5013 if (num_queues) 6667 bp->num_queues);
5014 bp->num_queues = min_t(u32, num_queues,
5015 BNX2X_MAX_QUEUES(bp));
5016 else
5017 bp->num_queues = min_t(u32, num_online_cpus(),
5018 BNX2X_MAX_QUEUES(bp));
5019 break;
5020 6668
6669 /* if we can't use MSI-X we only need one fp,
6670 * so try to enable MSI-X with the requested number of fp's
6671 * and fallback to MSI or legacy INTx with one fp
6672 */
6673 rc = bnx2x_enable_msix(bp);
6674 if (rc) {
6675 /* failed to enable MSI-X */
6676 if (bp->multi_mode)
6677 DP(NETIF_MSG_IFUP,
6678 "Multi requested but failed to "
6679 "enable MSI-X (%d), "
6680 "set number of queues to %d\n",
6681 bp->num_queues,
6682 1 + NONE_ETH_CONTEXT_USE);
6683 bp->num_queues = 1 + NONE_ETH_CONTEXT_USE;
6684
6685 if (!(bp->flags & DISABLE_MSI_FLAG))
6686 bnx2x_enable_msi(bp);
6687 }
5021 6688
5022 default:
5023 bp->num_queues = 1;
5024 break; 6689 break;
5025 } 6690 }
6691
6692 return rc;
6693}
6694
6695/* must be called prioir to any HW initializations */
6696static inline u16 bnx2x_cid_ilt_lines(struct bnx2x *bp)
6697{
6698 return L2_ILT_LINES(bp);
5026} 6699}
5027 6700
6701void bnx2x_ilt_set_info(struct bnx2x *bp)
6702{
6703 struct ilt_client_info *ilt_client;
6704 struct bnx2x_ilt *ilt = BP_ILT(bp);
6705 u16 line = 0;
6706
6707 ilt->start_line = FUNC_ILT_BASE(BP_FUNC(bp));
6708 DP(BNX2X_MSG_SP, "ilt starts at line %d\n", ilt->start_line);
6709
6710 /* CDU */
6711 ilt_client = &ilt->clients[ILT_CLIENT_CDU];
6712 ilt_client->client_num = ILT_CLIENT_CDU;
6713 ilt_client->page_size = CDU_ILT_PAGE_SZ;
6714 ilt_client->flags = ILT_CLIENT_SKIP_MEM;
6715 ilt_client->start = line;
6716 line += L2_ILT_LINES(bp);
6717#ifdef BCM_CNIC
6718 line += CNIC_ILT_LINES;
6719#endif
6720 ilt_client->end = line - 1;
6721
6722 DP(BNX2X_MSG_SP, "ilt client[CDU]: start %d, end %d, psz 0x%x, "
6723 "flags 0x%x, hw psz %d\n",
6724 ilt_client->start,
6725 ilt_client->end,
6726 ilt_client->page_size,
6727 ilt_client->flags,
6728 ilog2(ilt_client->page_size >> 12));
6729
6730 /* QM */
6731 if (QM_INIT(bp->qm_cid_count)) {
6732 ilt_client = &ilt->clients[ILT_CLIENT_QM];
6733 ilt_client->client_num = ILT_CLIENT_QM;
6734 ilt_client->page_size = QM_ILT_PAGE_SZ;
6735 ilt_client->flags = 0;
6736 ilt_client->start = line;
6737
6738 /* 4 bytes for each cid */
6739 line += DIV_ROUND_UP(bp->qm_cid_count * QM_QUEUES_PER_FUNC * 4,
6740 QM_ILT_PAGE_SZ);
6741
6742 ilt_client->end = line - 1;
6743
6744 DP(BNX2X_MSG_SP, "ilt client[QM]: start %d, end %d, psz 0x%x, "
6745 "flags 0x%x, hw psz %d\n",
6746 ilt_client->start,
6747 ilt_client->end,
6748 ilt_client->page_size,
6749 ilt_client->flags,
6750 ilog2(ilt_client->page_size >> 12));
6751
6752 }
6753 /* SRC */
6754 ilt_client = &ilt->clients[ILT_CLIENT_SRC];
6755#ifdef BCM_CNIC
6756 ilt_client->client_num = ILT_CLIENT_SRC;
6757 ilt_client->page_size = SRC_ILT_PAGE_SZ;
6758 ilt_client->flags = 0;
6759 ilt_client->start = line;
6760 line += SRC_ILT_LINES;
6761 ilt_client->end = line - 1;
6762
6763 DP(BNX2X_MSG_SP, "ilt client[SRC]: start %d, end %d, psz 0x%x, "
6764 "flags 0x%x, hw psz %d\n",
6765 ilt_client->start,
6766 ilt_client->end,
6767 ilt_client->page_size,
6768 ilt_client->flags,
6769 ilog2(ilt_client->page_size >> 12));
6770
6771#else
6772 ilt_client->flags = (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM);
6773#endif
6774
6775 /* TM */
6776 ilt_client = &ilt->clients[ILT_CLIENT_TM];
6777#ifdef BCM_CNIC
6778 ilt_client->client_num = ILT_CLIENT_TM;
6779 ilt_client->page_size = TM_ILT_PAGE_SZ;
6780 ilt_client->flags = 0;
6781 ilt_client->start = line;
6782 line += TM_ILT_LINES;
6783 ilt_client->end = line - 1;
6784
6785 DP(BNX2X_MSG_SP, "ilt client[TM]: start %d, end %d, psz 0x%x, "
6786 "flags 0x%x, hw psz %d\n",
6787 ilt_client->start,
6788 ilt_client->end,
6789 ilt_client->page_size,
6790 ilt_client->flags,
6791 ilog2(ilt_client->page_size >> 12));
5028 6792
6793#else
6794 ilt_client->flags = (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM);
6795#endif
6796}
5029 6797
5030static int bnx2x_stop_multi(struct bnx2x *bp, int index) 6798int bnx2x_setup_client(struct bnx2x *bp, struct bnx2x_fastpath *fp,
6799 int is_leading)
5031{ 6800{
5032 struct bnx2x_fastpath *fp = &bp->fp[index]; 6801 struct bnx2x_client_init_params params = { {0} };
5033 int rc; 6802 int rc;
5034 6803
5035 /* halt the connection */ 6804 /* reset IGU state skip FCoE L2 queue */
5036 fp->state = BNX2X_FP_STATE_HALTING; 6805 if (!IS_FCOE_FP(fp))
5037 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0); 6806 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0,
6807 IGU_INT_ENABLE, 0);
5038 6808
5039 /* Wait for completion */ 6809 params.ramrod_params.pstate = &fp->state;
5040 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index, 6810 params.ramrod_params.state = BNX2X_FP_STATE_OPEN;
5041 &(fp->state), 1); 6811 params.ramrod_params.index = fp->index;
5042 if (rc) /* timeout */ 6812 params.ramrod_params.cid = fp->cid;
5043 return rc;
5044 6813
5045 /* delete cfc entry */ 6814#ifdef BCM_CNIC
5046 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1); 6815 if (IS_FCOE_FP(fp))
6816 params.ramrod_params.flags |= CLIENT_IS_FCOE;
5047 6817
5048 /* Wait for completion */ 6818#endif
5049 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index, 6819
5050 &(fp->state), 1); 6820 if (is_leading)
6821 params.ramrod_params.flags |= CLIENT_IS_LEADING_RSS;
6822
6823 bnx2x_pf_rx_cl_prep(bp, fp, &params.pause, &params.rxq_params);
6824
6825 bnx2x_pf_tx_cl_prep(bp, fp, &params.txq_params);
6826
6827 rc = bnx2x_setup_fw_client(bp, &params, 1,
6828 bnx2x_sp(bp, client_init_data),
6829 bnx2x_sp_mapping(bp, client_init_data));
5051 return rc; 6830 return rc;
5052} 6831}
5053 6832
5054static int bnx2x_stop_leading(struct bnx2x *bp) 6833static int bnx2x_stop_fw_client(struct bnx2x *bp,
6834 struct bnx2x_client_ramrod_params *p)
5055{ 6835{
5056 __le16 dsb_sp_prod_idx;
5057 /* if the other port is handling traffic,
5058 this can take a lot of time */
5059 int cnt = 500;
5060 int rc; 6836 int rc;
5061 6837
5062 might_sleep(); 6838 int poll_flag = p->poll ? WAIT_RAMROD_POLL : 0;
5063 6839
5064 /* Send HALT ramrod */ 6840 /* halt the connection */
5065 bp->fp[0].state = BNX2X_FP_STATE_HALTING; 6841 *p->pstate = BNX2X_FP_STATE_HALTING;
5066 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0); 6842 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, p->cid, 0,
6843 p->cl_id, 0);
5067 6844
5068 /* Wait for completion */ 6845 /* Wait for completion */
5069 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0, 6846 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, p->index,
5070 &(bp->fp[0].state), 1); 6847 p->pstate, poll_flag);
5071 if (rc) /* timeout */ 6848 if (rc) /* timeout */
5072 return rc; 6849 return rc;
5073 6850
5074 dsb_sp_prod_idx = *bp->dsb_sp_prod; 6851 *p->pstate = BNX2X_FP_STATE_TERMINATING;
6852 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_TERMINATE, p->cid, 0,
6853 p->cl_id, 0);
6854 /* Wait for completion */
6855 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_TERMINATED, p->index,
6856 p->pstate, poll_flag);
6857 if (rc) /* timeout */
6858 return rc;
5075 6859
5076 /* Send PORT_DELETE ramrod */
5077 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
5078 6860
5079 /* Wait for completion to arrive on default status block 6861 /* delete cfc entry */
5080 we are going to reset the chip anyway 6862 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_CFC_DEL, p->cid, 0, 0, 1);
5081 so there is not much to do if this times out
5082 */
5083 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
5084 if (!cnt) {
5085 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
5086 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
5087 *bp->dsb_sp_prod, dsb_sp_prod_idx);
5088#ifdef BNX2X_STOP_ON_ERROR
5089 bnx2x_panic();
5090#endif
5091 rc = -EBUSY;
5092 break;
5093 }
5094 cnt--;
5095 msleep(1);
5096 rmb(); /* Refresh the dsb_sp_prod */
5097 }
5098 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
5099 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
5100 6863
6864 /* Wait for completion */
6865 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, p->index,
6866 p->pstate, WAIT_RAMROD_COMMON);
5101 return rc; 6867 return rc;
5102} 6868}
5103 6869
6870static int bnx2x_stop_client(struct bnx2x *bp, int index)
6871{
6872 struct bnx2x_client_ramrod_params client_stop = {0};
6873 struct bnx2x_fastpath *fp = &bp->fp[index];
6874
6875 client_stop.index = index;
6876 client_stop.cid = fp->cid;
6877 client_stop.cl_id = fp->cl_id;
6878 client_stop.pstate = &(fp->state);
6879 client_stop.poll = 0;
6880
6881 return bnx2x_stop_fw_client(bp, &client_stop);
6882}
6883
6884
5104static void bnx2x_reset_func(struct bnx2x *bp) 6885static void bnx2x_reset_func(struct bnx2x *bp)
5105{ 6886{
5106 int port = BP_PORT(bp); 6887 int port = BP_PORT(bp);
5107 int func = BP_FUNC(bp); 6888 int func = BP_FUNC(bp);
5108 int base, i; 6889 int i;
6890 int pfunc_offset_fp = offsetof(struct hc_sb_data, p_func) +
6891 (CHIP_IS_E2(bp) ?
6892 offsetof(struct hc_status_block_data_e2, common) :
6893 offsetof(struct hc_status_block_data_e1x, common));
6894 int pfunc_offset_sp = offsetof(struct hc_sp_status_block_data, p_func);
6895 int pfid_offset = offsetof(struct pci_entity, pf_id);
6896
6897 /* Disable the function in the FW */
6898 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(func), 0);
6899 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(func), 0);
6900 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(func), 0);
6901 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(func), 0);
6902
6903 /* FP SBs */
6904 for_each_eth_queue(bp, i) {
6905 struct bnx2x_fastpath *fp = &bp->fp[i];
6906 REG_WR8(bp,
6907 BAR_CSTRORM_INTMEM +
6908 CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id)
6909 + pfunc_offset_fp + pfid_offset,
6910 HC_FUNCTION_DISABLED);
6911 }
6912
6913 /* SP SB */
6914 REG_WR8(bp,
6915 BAR_CSTRORM_INTMEM +
6916 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
6917 pfunc_offset_sp + pfid_offset,
6918 HC_FUNCTION_DISABLED);
6919
6920
6921 for (i = 0; i < XSTORM_SPQ_DATA_SIZE / 4; i++)
6922 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_DATA_OFFSET(func),
6923 0);
5109 6924
5110 /* Configure IGU */ 6925 /* Configure IGU */
5111 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0); 6926 if (bp->common.int_block == INT_BLOCK_HC) {
5112 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0); 6927 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6928 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6929 } else {
6930 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0);
6931 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
6932 }
5113 6933
5114#ifdef BCM_CNIC 6934#ifdef BCM_CNIC
5115 /* Disable Timer scan */ 6935 /* Disable Timer scan */
@@ -5125,9 +6945,27 @@ static void bnx2x_reset_func(struct bnx2x *bp)
5125 } 6945 }
5126#endif 6946#endif
5127 /* Clear ILT */ 6947 /* Clear ILT */
5128 base = FUNC_ILT_BASE(func); 6948 bnx2x_clear_func_ilt(bp, func);
5129 for (i = base; i < base + ILT_PER_FUNC; i++) 6949
5130 bnx2x_ilt_wr(bp, i, 0); 6950 /* Timers workaround bug for E2: if this is vnic-3,
6951 * we need to set the entire ilt range for this timers.
6952 */
6953 if (CHIP_IS_E2(bp) && BP_VN(bp) == 3) {
6954 struct ilt_client_info ilt_cli;
6955 /* use dummy TM client */
6956 memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
6957 ilt_cli.start = 0;
6958 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
6959 ilt_cli.client_num = ILT_CLIENT_TM;
6960
6961 bnx2x_ilt_boundry_init_op(bp, &ilt_cli, 0, INITOP_CLEAR);
6962 }
6963
6964 /* this assumes that reset_port() called before reset_func()*/
6965 if (CHIP_IS_E2(bp))
6966 bnx2x_pf_disable(bp);
6967
6968 bp->dmae_ready = 0;
5131} 6969}
5132 6970
5133static void bnx2x_reset_port(struct bnx2x *bp) 6971static void bnx2x_reset_port(struct bnx2x *bp)
@@ -5159,7 +6997,7 @@ static void bnx2x_reset_port(struct bnx2x *bp)
5159static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code) 6997static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
5160{ 6998{
5161 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n", 6999 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
5162 BP_FUNC(bp), reset_code); 7000 BP_ABS_FUNC(bp), reset_code);
5163 7001
5164 switch (reset_code) { 7002 switch (reset_code) {
5165 case FW_MSG_CODE_DRV_UNLOAD_COMMON: 7003 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
@@ -5183,6 +7021,20 @@ static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
5183 } 7021 }
5184} 7022}
5185 7023
7024#ifdef BCM_CNIC
7025static inline void bnx2x_del_fcoe_eth_macs(struct bnx2x *bp)
7026{
7027 if (bp->flags & FCOE_MACS_SET) {
7028 if (!IS_MF_SD(bp))
7029 bnx2x_set_fip_eth_mac_addr(bp, 0);
7030
7031 bnx2x_set_all_enode_macs(bp, 0);
7032
7033 bp->flags &= ~FCOE_MACS_SET;
7034 }
7035}
7036#endif
7037
5186void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode) 7038void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
5187{ 7039{
5188 int port = BP_PORT(bp); 7040 int port = BP_PORT(bp);
@@ -5190,13 +7042,12 @@ void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
5190 int i, cnt, rc; 7042 int i, cnt, rc;
5191 7043
5192 /* Wait until tx fastpath tasks complete */ 7044 /* Wait until tx fastpath tasks complete */
5193 for_each_queue(bp, i) { 7045 for_each_tx_queue(bp, i) {
5194 struct bnx2x_fastpath *fp = &bp->fp[i]; 7046 struct bnx2x_fastpath *fp = &bp->fp[i];
5195 7047
5196 cnt = 1000; 7048 cnt = 1000;
5197 while (bnx2x_has_tx_work_unload(fp)) { 7049 while (bnx2x_has_tx_work_unload(fp)) {
5198 7050
5199 bnx2x_tx_int(fp);
5200 if (!cnt) { 7051 if (!cnt) {
5201 BNX2X_ERR("timeout waiting for queue[%d]\n", 7052 BNX2X_ERR("timeout waiting for queue[%d]\n",
5202 i); 7053 i);
@@ -5214,48 +7065,19 @@ void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
5214 /* Give HW time to discard old tx messages */ 7065 /* Give HW time to discard old tx messages */
5215 msleep(1); 7066 msleep(1);
5216 7067
5217 if (CHIP_IS_E1(bp)) { 7068 bnx2x_set_eth_mac(bp, 0);
5218 struct mac_configuration_cmd *config =
5219 bnx2x_sp(bp, mcast_config);
5220
5221 bnx2x_set_eth_mac_addr_e1(bp, 0);
5222
5223 for (i = 0; i < config->hdr.length; i++)
5224 CAM_INVALIDATE(config->config_table[i]);
5225
5226 config->hdr.length = i;
5227 if (CHIP_REV_IS_SLOW(bp))
5228 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
5229 else
5230 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
5231 config->hdr.client_id = bp->fp->cl_id;
5232 config->hdr.reserved1 = 0;
5233 7069
5234 bp->set_mac_pending++; 7070 bnx2x_invalidate_uc_list(bp);
5235 smp_wmb();
5236 7071
5237 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0, 7072 if (CHIP_IS_E1(bp))
5238 U64_HI(bnx2x_sp_mapping(bp, mcast_config)), 7073 bnx2x_invalidate_e1_mc_list(bp);
5239 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0); 7074 else {
5240 7075 bnx2x_invalidate_e1h_mc_list(bp);
5241 } else { /* E1H */
5242 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0); 7076 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
5243
5244 bnx2x_set_eth_mac_addr_e1h(bp, 0);
5245
5246 for (i = 0; i < MC_HASH_SIZE; i++)
5247 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
5248
5249 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
5250 } 7077 }
7078
5251#ifdef BCM_CNIC 7079#ifdef BCM_CNIC
5252 /* Clear iSCSI L2 MAC */ 7080 bnx2x_del_fcoe_eth_macs(bp);
5253 mutex_lock(&bp->cnic_mutex);
5254 if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
5255 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
5256 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
5257 }
5258 mutex_unlock(&bp->cnic_mutex);
5259#endif 7081#endif
5260 7082
5261 if (unload_mode == UNLOAD_NORMAL) 7083 if (unload_mode == UNLOAD_NORMAL)
@@ -5286,33 +7108,44 @@ void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
5286 7108
5287 /* Close multi and leading connections 7109 /* Close multi and leading connections
5288 Completions for ramrods are collected in a synchronous way */ 7110 Completions for ramrods are collected in a synchronous way */
5289 for_each_nondefault_queue(bp, i) 7111 for_each_queue(bp, i)
5290 if (bnx2x_stop_multi(bp, i)) 7112
7113 if (bnx2x_stop_client(bp, i))
7114#ifdef BNX2X_STOP_ON_ERROR
7115 return;
7116#else
5291 goto unload_error; 7117 goto unload_error;
7118#endif
5292 7119
5293 rc = bnx2x_stop_leading(bp); 7120 rc = bnx2x_func_stop(bp);
5294 if (rc) { 7121 if (rc) {
5295 BNX2X_ERR("Stop leading failed!\n"); 7122 BNX2X_ERR("Function stop failed!\n");
5296#ifdef BNX2X_STOP_ON_ERROR 7123#ifdef BNX2X_STOP_ON_ERROR
5297 return -EBUSY; 7124 return;
5298#else 7125#else
5299 goto unload_error; 7126 goto unload_error;
5300#endif 7127#endif
5301 } 7128 }
5302 7129#ifndef BNX2X_STOP_ON_ERROR
5303unload_error: 7130unload_error:
7131#endif
5304 if (!BP_NOMCP(bp)) 7132 if (!BP_NOMCP(bp))
5305 reset_code = bnx2x_fw_command(bp, reset_code); 7133 reset_code = bnx2x_fw_command(bp, reset_code, 0);
5306 else { 7134 else {
5307 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts %d, %d, %d\n", 7135 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts[%d] "
5308 load_count[0], load_count[1], load_count[2]); 7136 "%d, %d, %d\n", BP_PATH(bp),
5309 load_count[0]--; 7137 load_count[BP_PATH(bp)][0],
5310 load_count[1 + port]--; 7138 load_count[BP_PATH(bp)][1],
5311 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts %d, %d, %d\n", 7139 load_count[BP_PATH(bp)][2]);
5312 load_count[0], load_count[1], load_count[2]); 7140 load_count[BP_PATH(bp)][0]--;
5313 if (load_count[0] == 0) 7141 load_count[BP_PATH(bp)][1 + port]--;
7142 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts[%d] "
7143 "%d, %d, %d\n", BP_PATH(bp),
7144 load_count[BP_PATH(bp)][0], load_count[BP_PATH(bp)][1],
7145 load_count[BP_PATH(bp)][2]);
7146 if (load_count[BP_PATH(bp)][0] == 0)
5314 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON; 7147 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
5315 else if (load_count[1 + port] == 0) 7148 else if (load_count[BP_PATH(bp)][1 + port] == 0)
5316 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT; 7149 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
5317 else 7150 else
5318 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION; 7151 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
@@ -5322,12 +7155,18 @@ unload_error:
5322 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT)) 7155 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
5323 bnx2x__link_reset(bp); 7156 bnx2x__link_reset(bp);
5324 7157
7158 /* Disable HW interrupts, NAPI */
7159 bnx2x_netif_stop(bp, 1);
7160
7161 /* Release IRQs */
7162 bnx2x_free_irq(bp);
7163
5325 /* Reset the chip */ 7164 /* Reset the chip */
5326 bnx2x_reset_chip(bp, reset_code); 7165 bnx2x_reset_chip(bp, reset_code);
5327 7166
5328 /* Report UNLOAD_DONE to MCP */ 7167 /* Report UNLOAD_DONE to MCP */
5329 if (!BP_NOMCP(bp)) 7168 if (!BP_NOMCP(bp))
5330 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE); 7169 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
5331 7170
5332} 7171}
5333 7172
@@ -5353,7 +7192,6 @@ void bnx2x_disable_close_the_gate(struct bnx2x *bp)
5353 } 7192 }
5354} 7193}
5355 7194
5356
5357/* Close gates #2, #3 and #4: */ 7195/* Close gates #2, #3 and #4: */
5358static void bnx2x_set_234_gates(struct bnx2x *bp, bool close) 7196static void bnx2x_set_234_gates(struct bnx2x *bp, bool close)
5359{ 7197{
@@ -5391,26 +7229,27 @@ static void bnx2x_clp_reset_prep(struct bnx2x *bp, u32 *magic_val)
5391 MF_CFG_WR(bp, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC); 7229 MF_CFG_WR(bp, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
5392} 7230}
5393 7231
5394/* Restore the value of the `magic' bit. 7232/**
7233 * bnx2x_clp_reset_done - restore the value of the `magic' bit.
5395 * 7234 *
5396 * @param pdev Device handle. 7235 * @bp: driver handle
5397 * @param magic_val Old value of the `magic' bit. 7236 * @magic_val: old value of the `magic' bit.
5398 */ 7237 */
5399static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val) 7238static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val)
5400{ 7239{
5401 /* Restore the `magic' bit value... */ 7240 /* Restore the `magic' bit value... */
5402 /* u32 val = SHMEM_RD(bp, mf_cfg.shared_mf_config.clp_mb);
5403 SHMEM_WR(bp, mf_cfg.shared_mf_config.clp_mb,
5404 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val); */
5405 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb); 7241 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
5406 MF_CFG_WR(bp, shared_mf_config.clp_mb, 7242 MF_CFG_WR(bp, shared_mf_config.clp_mb,
5407 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val); 7243 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
5408} 7244}
5409 7245
5410/* Prepares for MCP reset: takes care of CLP configurations. 7246/**
7247 * bnx2x_reset_mcp_prep - prepare for MCP reset.
7248 *
7249 * @bp: driver handle
7250 * @magic_val: old value of 'magic' bit.
5411 * 7251 *
5412 * @param bp 7252 * Takes care of CLP configurations.
5413 * @param magic_val Old value of 'magic' bit.
5414 */ 7253 */
5415static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val) 7254static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val)
5416{ 7255{
@@ -5435,10 +7274,10 @@ static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val)
5435#define MCP_TIMEOUT 5000 /* 5 seconds (in ms) */ 7274#define MCP_TIMEOUT 5000 /* 5 seconds (in ms) */
5436#define MCP_ONE_TIMEOUT 100 /* 100 ms */ 7275#define MCP_ONE_TIMEOUT 100 /* 100 ms */
5437 7276
5438/* Waits for MCP_ONE_TIMEOUT or MCP_ONE_TIMEOUT*10, 7277/**
5439 * depending on the HW type. 7278 * bnx2x_mcp_wait_one - wait for MCP_ONE_TIMEOUT
5440 * 7279 *
5441 * @param bp 7280 * @bp: driver handle
5442 */ 7281 */
5443static inline void bnx2x_mcp_wait_one(struct bnx2x *bp) 7282static inline void bnx2x_mcp_wait_one(struct bnx2x *bp)
5444{ 7283{
@@ -5450,51 +7289,35 @@ static inline void bnx2x_mcp_wait_one(struct bnx2x *bp)
5450 msleep(MCP_ONE_TIMEOUT); 7289 msleep(MCP_ONE_TIMEOUT);
5451} 7290}
5452 7291
5453static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val) 7292/*
7293 * initializes bp->common.shmem_base and waits for validity signature to appear
7294 */
7295static int bnx2x_init_shmem(struct bnx2x *bp)
5454{ 7296{
5455 u32 shmem, cnt, validity_offset, val; 7297 int cnt = 0;
5456 int rc = 0; 7298 u32 val = 0;
5457
5458 msleep(100);
5459
5460 /* Get shmem offset */
5461 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
5462 if (shmem == 0) {
5463 BNX2X_ERR("Shmem 0 return failure\n");
5464 rc = -ENOTTY;
5465 goto exit_lbl;
5466 }
5467 7299
5468 validity_offset = offsetof(struct shmem_region, validity_map[0]); 7300 do {
7301 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7302 if (bp->common.shmem_base) {
7303 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
7304 if (val & SHR_MEM_VALIDITY_MB)
7305 return 0;
7306 }
5469 7307
5470 /* Wait for MCP to come up */ 7308 bnx2x_mcp_wait_one(bp);
5471 for (cnt = 0; cnt < (MCP_TIMEOUT / MCP_ONE_TIMEOUT); cnt++) {
5472 /* TBD: its best to check validity map of last port.
5473 * currently checks on port 0.
5474 */
5475 val = REG_RD(bp, shmem + validity_offset);
5476 DP(NETIF_MSG_HW, "shmem 0x%x validity map(0x%x)=0x%x\n", shmem,
5477 shmem + validity_offset, val);
5478 7309
5479 /* check that shared memory is valid. */ 7310 } while (cnt++ < (MCP_TIMEOUT / MCP_ONE_TIMEOUT));
5480 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
5481 == (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
5482 break;
5483 7311
5484 bnx2x_mcp_wait_one(bp); 7312 BNX2X_ERR("BAD MCP validity signature\n");
5485 }
5486 7313
5487 DP(NETIF_MSG_HW, "Cnt=%d Shmem validity map 0x%x\n", cnt, val); 7314 return -ENODEV;
7315}
5488 7316
5489 /* Check that shared memory is valid. This indicates that MCP is up. */ 7317static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val)
5490 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) != 7318{
5491 (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) { 7319 int rc = bnx2x_init_shmem(bp);
5492 BNX2X_ERR("Shmem signature not present. MCP is not up !!\n");
5493 rc = -ENOTTY;
5494 goto exit_lbl;
5495 }
5496 7320
5497exit_lbl:
5498 /* Restore the `magic' bit value */ 7321 /* Restore the `magic' bit value */
5499 if (!CHIP_IS_E1(bp)) 7322 if (!CHIP_IS_E1(bp))
5500 bnx2x_clp_reset_done(bp, magic_val); 7323 bnx2x_clp_reset_done(bp, magic_val);
@@ -5805,39 +7628,23 @@ reset_task_exit:
5805 * Init service functions 7628 * Init service functions
5806 */ 7629 */
5807 7630
5808static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func) 7631static u32 bnx2x_get_pretend_reg(struct bnx2x *bp)
5809{ 7632{
5810 switch (func) { 7633 u32 base = PXP2_REG_PGL_PRETEND_FUNC_F0;
5811 case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0; 7634 u32 stride = PXP2_REG_PGL_PRETEND_FUNC_F1 - base;
5812 case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1; 7635 return base + (BP_ABS_FUNC(bp)) * stride;
5813 case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
5814 case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
5815 case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
5816 case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
5817 case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
5818 case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
5819 default:
5820 BNX2X_ERR("Unsupported function index: %d\n", func);
5821 return (u32)(-1);
5822 }
5823} 7636}
5824 7637
5825static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func) 7638static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp)
5826{ 7639{
5827 u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val; 7640 u32 reg = bnx2x_get_pretend_reg(bp);
5828 7641
5829 /* Flush all outstanding writes */ 7642 /* Flush all outstanding writes */
5830 mmiowb(); 7643 mmiowb();
5831 7644
5832 /* Pretend to be function 0 */ 7645 /* Pretend to be function 0 */
5833 REG_WR(bp, reg, 0); 7646 REG_WR(bp, reg, 0);
5834 /* Flush the GRC transaction (in the chip) */ 7647 REG_RD(bp, reg); /* Flush the GRC transaction (in the chip) */
5835 new_val = REG_RD(bp, reg);
5836 if (new_val != 0) {
5837 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
5838 new_val);
5839 BUG();
5840 }
5841 7648
5842 /* From now we are in the "like-E1" mode */ 7649 /* From now we are in the "like-E1" mode */
5843 bnx2x_int_disable(bp); 7650 bnx2x_int_disable(bp);
@@ -5845,22 +7652,17 @@ static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
5845 /* Flush all outstanding writes */ 7652 /* Flush all outstanding writes */
5846 mmiowb(); 7653 mmiowb();
5847 7654
5848 /* Restore the original funtion settings */ 7655 /* Restore the original function */
5849 REG_WR(bp, reg, orig_func); 7656 REG_WR(bp, reg, BP_ABS_FUNC(bp));
5850 new_val = REG_RD(bp, reg); 7657 REG_RD(bp, reg);
5851 if (new_val != orig_func) {
5852 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
5853 orig_func, new_val);
5854 BUG();
5855 }
5856} 7658}
5857 7659
5858static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func) 7660static inline void bnx2x_undi_int_disable(struct bnx2x *bp)
5859{ 7661{
5860 if (CHIP_IS_E1H(bp)) 7662 if (CHIP_IS_E1(bp))
5861 bnx2x_undi_int_disable_e1h(bp, func);
5862 else
5863 bnx2x_int_disable(bp); 7663 bnx2x_int_disable(bp);
7664 else
7665 bnx2x_undi_int_disable_e1h(bp);
5864} 7666}
5865 7667
5866static void __devinit bnx2x_undi_unload(struct bnx2x *bp) 7668static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
@@ -5877,8 +7679,8 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
5877 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST); 7679 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
5878 if (val == 0x7) { 7680 if (val == 0x7) {
5879 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; 7681 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
5880 /* save our func */ 7682 /* save our pf_num */
5881 int func = BP_FUNC(bp); 7683 int orig_pf_num = bp->pf_num;
5882 u32 swap_en; 7684 u32 swap_en;
5883 u32 swap_val; 7685 u32 swap_val;
5884 7686
@@ -5888,32 +7690,33 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
5888 BNX2X_DEV_INFO("UNDI is active! reset device\n"); 7690 BNX2X_DEV_INFO("UNDI is active! reset device\n");
5889 7691
5890 /* try unload UNDI on port 0 */ 7692 /* try unload UNDI on port 0 */
5891 bp->func = 0; 7693 bp->pf_num = 0;
5892 bp->fw_seq = 7694 bp->fw_seq =
5893 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) & 7695 (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
5894 DRV_MSG_SEQ_NUMBER_MASK); 7696 DRV_MSG_SEQ_NUMBER_MASK);
5895 reset_code = bnx2x_fw_command(bp, reset_code); 7697 reset_code = bnx2x_fw_command(bp, reset_code, 0);
5896 7698
5897 /* if UNDI is loaded on the other port */ 7699 /* if UNDI is loaded on the other port */
5898 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) { 7700 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
5899 7701
5900 /* send "DONE" for previous unload */ 7702 /* send "DONE" for previous unload */
5901 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE); 7703 bnx2x_fw_command(bp,
7704 DRV_MSG_CODE_UNLOAD_DONE, 0);
5902 7705
5903 /* unload UNDI on port 1 */ 7706 /* unload UNDI on port 1 */
5904 bp->func = 1; 7707 bp->pf_num = 1;
5905 bp->fw_seq = 7708 bp->fw_seq =
5906 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) & 7709 (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
5907 DRV_MSG_SEQ_NUMBER_MASK); 7710 DRV_MSG_SEQ_NUMBER_MASK);
5908 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; 7711 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
5909 7712
5910 bnx2x_fw_command(bp, reset_code); 7713 bnx2x_fw_command(bp, reset_code, 0);
5911 } 7714 }
5912 7715
5913 /* now it's safe to release the lock */ 7716 /* now it's safe to release the lock */
5914 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI); 7717 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
5915 7718
5916 bnx2x_undi_int_disable(bp, func); 7719 bnx2x_undi_int_disable(bp);
5917 7720
5918 /* close input traffic and wait for it */ 7721 /* close input traffic and wait for it */
5919 /* Do not rcv packets to BRB */ 7722 /* Do not rcv packets to BRB */
@@ -5949,14 +7752,13 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
5949 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en); 7752 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
5950 7753
5951 /* send unload done to the MCP */ 7754 /* send unload done to the MCP */
5952 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE); 7755 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
5953 7756
5954 /* restore our func and fw_seq */ 7757 /* restore our func and fw_seq */
5955 bp->func = func; 7758 bp->pf_num = orig_pf_num;
5956 bp->fw_seq = 7759 bp->fw_seq =
5957 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) & 7760 (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
5958 DRV_MSG_SEQ_NUMBER_MASK); 7761 DRV_MSG_SEQ_NUMBER_MASK);
5959
5960 } else 7762 } else
5961 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI); 7763 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
5962 } 7764 }
@@ -5978,6 +7780,40 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
5978 val = REG_RD(bp, MISC_REG_BOND_ID); 7780 val = REG_RD(bp, MISC_REG_BOND_ID);
5979 id |= (val & 0xf); 7781 id |= (val & 0xf);
5980 bp->common.chip_id = id; 7782 bp->common.chip_id = id;
7783
7784 /* Set doorbell size */
7785 bp->db_size = (1 << BNX2X_DB_SHIFT);
7786
7787 if (CHIP_IS_E2(bp)) {
7788 val = REG_RD(bp, MISC_REG_PORT4MODE_EN_OVWR);
7789 if ((val & 1) == 0)
7790 val = REG_RD(bp, MISC_REG_PORT4MODE_EN);
7791 else
7792 val = (val >> 1) & 1;
7793 BNX2X_DEV_INFO("chip is in %s\n", val ? "4_PORT_MODE" :
7794 "2_PORT_MODE");
7795 bp->common.chip_port_mode = val ? CHIP_4_PORT_MODE :
7796 CHIP_2_PORT_MODE;
7797
7798 if (CHIP_MODE_IS_4_PORT(bp))
7799 bp->pfid = (bp->pf_num >> 1); /* 0..3 */
7800 else
7801 bp->pfid = (bp->pf_num & 0x6); /* 0, 2, 4, 6 */
7802 } else {
7803 bp->common.chip_port_mode = CHIP_PORT_MODE_NONE; /* N/A */
7804 bp->pfid = bp->pf_num; /* 0..7 */
7805 }
7806
7807 /*
7808 * set base FW non-default (fast path) status block id, this value is
7809 * used to initialize the fw_sb_id saved on the fp/queue structure to
7810 * determine the id used by the FW.
7811 */
7812 if (CHIP_IS_E1x(bp))
7813 bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E1x;
7814 else /* E2 */
7815 bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E2;
7816
5981 bp->link_params.chip_id = bp->common.chip_id; 7817 bp->link_params.chip_id = bp->common.chip_id;
5982 BNX2X_DEV_INFO("chip ID is 0x%x\n", id); 7818 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
5983 7819
@@ -5994,25 +7830,23 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
5994 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n", 7830 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
5995 bp->common.flash_size, bp->common.flash_size); 7831 bp->common.flash_size, bp->common.flash_size);
5996 7832
5997 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR); 7833 bnx2x_init_shmem(bp);
5998 bp->common.shmem2_base = REG_RD(bp, MISC_REG_GENERIC_CR_0); 7834
7835 bp->common.shmem2_base = REG_RD(bp, (BP_PATH(bp) ?
7836 MISC_REG_GENERIC_CR_1 :
7837 MISC_REG_GENERIC_CR_0));
7838
5999 bp->link_params.shmem_base = bp->common.shmem_base; 7839 bp->link_params.shmem_base = bp->common.shmem_base;
7840 bp->link_params.shmem2_base = bp->common.shmem2_base;
6000 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n", 7841 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n",
6001 bp->common.shmem_base, bp->common.shmem2_base); 7842 bp->common.shmem_base, bp->common.shmem2_base);
6002 7843
6003 if (!bp->common.shmem_base || 7844 if (!bp->common.shmem_base) {
6004 (bp->common.shmem_base < 0xA0000) ||
6005 (bp->common.shmem_base >= 0xC0000)) {
6006 BNX2X_DEV_INFO("MCP not active\n"); 7845 BNX2X_DEV_INFO("MCP not active\n");
6007 bp->flags |= NO_MCP_FLAG; 7846 bp->flags |= NO_MCP_FLAG;
6008 return; 7847 return;
6009 } 7848 }
6010 7849
6011 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
6012 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
6013 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
6014 BNX2X_ERROR("BAD MCP validity signature\n");
6015
6016 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config); 7850 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
6017 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config); 7851 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
6018 7852
@@ -6035,20 +7869,20 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
6035 if (val < BNX2X_BC_VER) { 7869 if (val < BNX2X_BC_VER) {
6036 /* for now only warn 7870 /* for now only warn
6037 * later we might need to enforce this */ 7871 * later we might need to enforce this */
6038 BNX2X_ERROR("This driver needs bc_ver %X but found %X, " 7872 BNX2X_ERR("This driver needs bc_ver %X but found %X, "
6039 "please upgrade BC\n", BNX2X_BC_VER, val); 7873 "please upgrade BC\n", BNX2X_BC_VER, val);
6040 } 7874 }
6041 bp->link_params.feature_config_flags |= 7875 bp->link_params.feature_config_flags |=
6042 (val >= REQ_BC_VER_4_VRFY_OPT_MDL) ? 7876 (val >= REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL) ?
6043 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0; 7877 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
7878
7879 bp->link_params.feature_config_flags |=
7880 (val >= REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL) ?
7881 FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY : 0;
7882
7883 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
7884 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
6044 7885
6045 if (BP_E1HVN(bp) == 0) {
6046 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
6047 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
6048 } else {
6049 /* no WOL capability for E1HVN != 0 */
6050 bp->flags |= NO_WOL_FLAG;
6051 }
6052 BNX2X_DEV_INFO("%sWoL capable\n", 7886 BNX2X_DEV_INFO("%sWoL capable\n",
6053 (bp->flags & NO_WOL_FLAG) ? "not " : ""); 7887 (bp->flags & NO_WOL_FLAG) ? "not " : "");
6054 7888
@@ -6061,404 +7895,349 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
6061 val, val2, val3, val4); 7895 val, val2, val3, val4);
6062} 7896}
6063 7897
7898#define IGU_FID(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID)
7899#define IGU_VEC(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR)
7900
7901static void __devinit bnx2x_get_igu_cam_info(struct bnx2x *bp)
7902{
7903 int pfid = BP_FUNC(bp);
7904 int vn = BP_E1HVN(bp);
7905 int igu_sb_id;
7906 u32 val;
7907 u8 fid;
7908
7909 bp->igu_base_sb = 0xff;
7910 bp->igu_sb_cnt = 0;
7911 if (CHIP_INT_MODE_IS_BC(bp)) {
7912 bp->igu_sb_cnt = min_t(u8, FP_SB_MAX_E1x,
7913 NUM_IGU_SB_REQUIRED(bp->l2_cid_count));
7914
7915 bp->igu_base_sb = (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn) *
7916 FP_SB_MAX_E1x;
7917
7918 bp->igu_dsb_id = E1HVN_MAX * FP_SB_MAX_E1x +
7919 (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn);
7920
7921 return;
7922 }
7923
7924 /* IGU in normal mode - read CAM */
7925 for (igu_sb_id = 0; igu_sb_id < IGU_REG_MAPPING_MEMORY_SIZE;
7926 igu_sb_id++) {
7927 val = REG_RD(bp, IGU_REG_MAPPING_MEMORY + igu_sb_id * 4);
7928 if (!(val & IGU_REG_MAPPING_MEMORY_VALID))
7929 continue;
7930 fid = IGU_FID(val);
7931 if ((fid & IGU_FID_ENCODE_IS_PF)) {
7932 if ((fid & IGU_FID_PF_NUM_MASK) != pfid)
7933 continue;
7934 if (IGU_VEC(val) == 0)
7935 /* default status block */
7936 bp->igu_dsb_id = igu_sb_id;
7937 else {
7938 if (bp->igu_base_sb == 0xff)
7939 bp->igu_base_sb = igu_sb_id;
7940 bp->igu_sb_cnt++;
7941 }
7942 }
7943 }
7944 bp->igu_sb_cnt = min_t(u8, bp->igu_sb_cnt,
7945 NUM_IGU_SB_REQUIRED(bp->l2_cid_count));
7946 if (bp->igu_sb_cnt == 0)
7947 BNX2X_ERR("CAM configuration error\n");
7948}
7949
6064static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp, 7950static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
6065 u32 switch_cfg) 7951 u32 switch_cfg)
6066{ 7952{
6067 int port = BP_PORT(bp); 7953 int cfg_size = 0, idx, port = BP_PORT(bp);
6068 u32 ext_phy_type;
6069
6070 switch (switch_cfg) {
6071 case SWITCH_CFG_1G:
6072 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
6073
6074 ext_phy_type =
6075 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
6076 switch (ext_phy_type) {
6077 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
6078 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
6079 ext_phy_type);
6080
6081 bp->port.supported |= (SUPPORTED_10baseT_Half |
6082 SUPPORTED_10baseT_Full |
6083 SUPPORTED_100baseT_Half |
6084 SUPPORTED_100baseT_Full |
6085 SUPPORTED_1000baseT_Full |
6086 SUPPORTED_2500baseX_Full |
6087 SUPPORTED_TP |
6088 SUPPORTED_FIBRE |
6089 SUPPORTED_Autoneg |
6090 SUPPORTED_Pause |
6091 SUPPORTED_Asym_Pause);
6092 break;
6093 7954
6094 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482: 7955 /* Aggregation of supported attributes of all external phys */
6095 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n", 7956 bp->port.supported[0] = 0;
6096 ext_phy_type); 7957 bp->port.supported[1] = 0;
6097 7958 switch (bp->link_params.num_phys) {
6098 bp->port.supported |= (SUPPORTED_10baseT_Half | 7959 case 1:
6099 SUPPORTED_10baseT_Full | 7960 bp->port.supported[0] = bp->link_params.phy[INT_PHY].supported;
6100 SUPPORTED_100baseT_Half | 7961 cfg_size = 1;
6101 SUPPORTED_100baseT_Full | 7962 break;
6102 SUPPORTED_1000baseT_Full | 7963 case 2:
6103 SUPPORTED_TP | 7964 bp->port.supported[0] = bp->link_params.phy[EXT_PHY1].supported;
6104 SUPPORTED_FIBRE | 7965 cfg_size = 1;
6105 SUPPORTED_Autoneg | 7966 break;
6106 SUPPORTED_Pause | 7967 case 3:
6107 SUPPORTED_Asym_Pause); 7968 if (bp->link_params.multi_phy_config &
6108 break; 7969 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
7970 bp->port.supported[1] =
7971 bp->link_params.phy[EXT_PHY1].supported;
7972 bp->port.supported[0] =
7973 bp->link_params.phy[EXT_PHY2].supported;
7974 } else {
7975 bp->port.supported[0] =
7976 bp->link_params.phy[EXT_PHY1].supported;
7977 bp->port.supported[1] =
7978 bp->link_params.phy[EXT_PHY2].supported;
7979 }
7980 cfg_size = 2;
7981 break;
7982 }
6109 7983
6110 default: 7984 if (!(bp->port.supported[0] || bp->port.supported[1])) {
6111 BNX2X_ERR("NVRAM config error. " 7985 BNX2X_ERR("NVRAM config error. BAD phy config."
6112 "BAD SerDes ext_phy_config 0x%x\n", 7986 "PHY1 config 0x%x, PHY2 config 0x%x\n",
6113 bp->link_params.ext_phy_config); 7987 SHMEM_RD(bp,
7988 dev_info.port_hw_config[port].external_phy_config),
7989 SHMEM_RD(bp,
7990 dev_info.port_hw_config[port].external_phy_config2));
6114 return; 7991 return;
6115 } 7992 }
6116 7993
7994 switch (switch_cfg) {
7995 case SWITCH_CFG_1G:
6117 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR + 7996 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
6118 port*0x10); 7997 port*0x10);
6119 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr); 7998 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
6120 break; 7999 break;
6121 8000
6122 case SWITCH_CFG_10G: 8001 case SWITCH_CFG_10G:
6123 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
6124
6125 ext_phy_type =
6126 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
6127 switch (ext_phy_type) {
6128 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
6129 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
6130 ext_phy_type);
6131
6132 bp->port.supported |= (SUPPORTED_10baseT_Half |
6133 SUPPORTED_10baseT_Full |
6134 SUPPORTED_100baseT_Half |
6135 SUPPORTED_100baseT_Full |
6136 SUPPORTED_1000baseT_Full |
6137 SUPPORTED_2500baseX_Full |
6138 SUPPORTED_10000baseT_Full |
6139 SUPPORTED_TP |
6140 SUPPORTED_FIBRE |
6141 SUPPORTED_Autoneg |
6142 SUPPORTED_Pause |
6143 SUPPORTED_Asym_Pause);
6144 break;
6145
6146 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
6147 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
6148 ext_phy_type);
6149
6150 bp->port.supported |= (SUPPORTED_10000baseT_Full |
6151 SUPPORTED_1000baseT_Full |
6152 SUPPORTED_FIBRE |
6153 SUPPORTED_Autoneg |
6154 SUPPORTED_Pause |
6155 SUPPORTED_Asym_Pause);
6156 break;
6157
6158 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
6159 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
6160 ext_phy_type);
6161
6162 bp->port.supported |= (SUPPORTED_10000baseT_Full |
6163 SUPPORTED_2500baseX_Full |
6164 SUPPORTED_1000baseT_Full |
6165 SUPPORTED_FIBRE |
6166 SUPPORTED_Autoneg |
6167 SUPPORTED_Pause |
6168 SUPPORTED_Asym_Pause);
6169 break;
6170
6171 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
6172 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
6173 ext_phy_type);
6174
6175 bp->port.supported |= (SUPPORTED_10000baseT_Full |
6176 SUPPORTED_FIBRE |
6177 SUPPORTED_Pause |
6178 SUPPORTED_Asym_Pause);
6179 break;
6180
6181 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
6182 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
6183 ext_phy_type);
6184
6185 bp->port.supported |= (SUPPORTED_10000baseT_Full |
6186 SUPPORTED_1000baseT_Full |
6187 SUPPORTED_FIBRE |
6188 SUPPORTED_Pause |
6189 SUPPORTED_Asym_Pause);
6190 break;
6191
6192 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6193 BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
6194 ext_phy_type);
6195
6196 bp->port.supported |= (SUPPORTED_10000baseT_Full |
6197 SUPPORTED_1000baseT_Full |
6198 SUPPORTED_Autoneg |
6199 SUPPORTED_FIBRE |
6200 SUPPORTED_Pause |
6201 SUPPORTED_Asym_Pause);
6202 break;
6203
6204 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
6205 BNX2X_DEV_INFO("ext_phy_type 0x%x (8727)\n",
6206 ext_phy_type);
6207
6208 bp->port.supported |= (SUPPORTED_10000baseT_Full |
6209 SUPPORTED_1000baseT_Full |
6210 SUPPORTED_Autoneg |
6211 SUPPORTED_FIBRE |
6212 SUPPORTED_Pause |
6213 SUPPORTED_Asym_Pause);
6214 break;
6215
6216 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
6217 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
6218 ext_phy_type);
6219
6220 bp->port.supported |= (SUPPORTED_10000baseT_Full |
6221 SUPPORTED_TP |
6222 SUPPORTED_Autoneg |
6223 SUPPORTED_Pause |
6224 SUPPORTED_Asym_Pause);
6225 break;
6226
6227 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
6228 BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n",
6229 ext_phy_type);
6230
6231 bp->port.supported |= (SUPPORTED_10baseT_Half |
6232 SUPPORTED_10baseT_Full |
6233 SUPPORTED_100baseT_Half |
6234 SUPPORTED_100baseT_Full |
6235 SUPPORTED_1000baseT_Full |
6236 SUPPORTED_10000baseT_Full |
6237 SUPPORTED_TP |
6238 SUPPORTED_Autoneg |
6239 SUPPORTED_Pause |
6240 SUPPORTED_Asym_Pause);
6241 break;
6242
6243 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
6244 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
6245 bp->link_params.ext_phy_config);
6246 break;
6247
6248 default:
6249 BNX2X_ERR("NVRAM config error. "
6250 "BAD XGXS ext_phy_config 0x%x\n",
6251 bp->link_params.ext_phy_config);
6252 return;
6253 }
6254
6255 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR + 8002 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
6256 port*0x18); 8003 port*0x18);
6257 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr); 8004 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
6258
6259 break; 8005 break;
6260 8006
6261 default: 8007 default:
6262 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n", 8008 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
6263 bp->port.link_config); 8009 bp->port.link_config[0]);
6264 return; 8010 return;
6265 } 8011 }
6266 bp->link_params.phy_addr = bp->port.phy_addr; 8012 /* mask what we support according to speed_cap_mask per configuration */
6267 8013 for (idx = 0; idx < cfg_size; idx++) {
6268 /* mask what we support according to speed_cap_mask */ 8014 if (!(bp->link_params.speed_cap_mask[idx] &
6269 if (!(bp->link_params.speed_cap_mask &
6270 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF)) 8015 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
6271 bp->port.supported &= ~SUPPORTED_10baseT_Half; 8016 bp->port.supported[idx] &= ~SUPPORTED_10baseT_Half;
6272 8017
6273 if (!(bp->link_params.speed_cap_mask & 8018 if (!(bp->link_params.speed_cap_mask[idx] &
6274 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL)) 8019 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
6275 bp->port.supported &= ~SUPPORTED_10baseT_Full; 8020 bp->port.supported[idx] &= ~SUPPORTED_10baseT_Full;
6276 8021
6277 if (!(bp->link_params.speed_cap_mask & 8022 if (!(bp->link_params.speed_cap_mask[idx] &
6278 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF)) 8023 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
6279 bp->port.supported &= ~SUPPORTED_100baseT_Half; 8024 bp->port.supported[idx] &= ~SUPPORTED_100baseT_Half;
6280 8025
6281 if (!(bp->link_params.speed_cap_mask & 8026 if (!(bp->link_params.speed_cap_mask[idx] &
6282 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL)) 8027 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
6283 bp->port.supported &= ~SUPPORTED_100baseT_Full; 8028 bp->port.supported[idx] &= ~SUPPORTED_100baseT_Full;
6284 8029
6285 if (!(bp->link_params.speed_cap_mask & 8030 if (!(bp->link_params.speed_cap_mask[idx] &
6286 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) 8031 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
6287 bp->port.supported &= ~(SUPPORTED_1000baseT_Half | 8032 bp->port.supported[idx] &= ~(SUPPORTED_1000baseT_Half |
6288 SUPPORTED_1000baseT_Full); 8033 SUPPORTED_1000baseT_Full);
6289 8034
6290 if (!(bp->link_params.speed_cap_mask & 8035 if (!(bp->link_params.speed_cap_mask[idx] &
6291 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G)) 8036 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
6292 bp->port.supported &= ~SUPPORTED_2500baseX_Full; 8037 bp->port.supported[idx] &= ~SUPPORTED_2500baseX_Full;
6293 8038
6294 if (!(bp->link_params.speed_cap_mask & 8039 if (!(bp->link_params.speed_cap_mask[idx] &
6295 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) 8040 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
6296 bp->port.supported &= ~SUPPORTED_10000baseT_Full; 8041 bp->port.supported[idx] &= ~SUPPORTED_10000baseT_Full;
6297 8042
6298 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported); 8043 }
8044
8045 BNX2X_DEV_INFO("supported 0x%x 0x%x\n", bp->port.supported[0],
8046 bp->port.supported[1]);
6299} 8047}
6300 8048
6301static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp) 8049static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
6302{ 8050{
6303 bp->link_params.req_duplex = DUPLEX_FULL; 8051 u32 link_config, idx, cfg_size = 0;
8052 bp->port.advertising[0] = 0;
8053 bp->port.advertising[1] = 0;
8054 switch (bp->link_params.num_phys) {
8055 case 1:
8056 case 2:
8057 cfg_size = 1;
8058 break;
8059 case 3:
8060 cfg_size = 2;
8061 break;
8062 }
8063 for (idx = 0; idx < cfg_size; idx++) {
8064 bp->link_params.req_duplex[idx] = DUPLEX_FULL;
8065 link_config = bp->port.link_config[idx];
8066 switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) {
8067 case PORT_FEATURE_LINK_SPEED_AUTO:
8068 if (bp->port.supported[idx] & SUPPORTED_Autoneg) {
8069 bp->link_params.req_line_speed[idx] =
8070 SPEED_AUTO_NEG;
8071 bp->port.advertising[idx] |=
8072 bp->port.supported[idx];
8073 } else {
8074 /* force 10G, no AN */
8075 bp->link_params.req_line_speed[idx] =
8076 SPEED_10000;
8077 bp->port.advertising[idx] |=
8078 (ADVERTISED_10000baseT_Full |
8079 ADVERTISED_FIBRE);
8080 continue;
8081 }
8082 break;
6304 8083
6305 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) { 8084 case PORT_FEATURE_LINK_SPEED_10M_FULL:
6306 case PORT_FEATURE_LINK_SPEED_AUTO: 8085 if (bp->port.supported[idx] & SUPPORTED_10baseT_Full) {
6307 if (bp->port.supported & SUPPORTED_Autoneg) { 8086 bp->link_params.req_line_speed[idx] =
6308 bp->link_params.req_line_speed = SPEED_AUTO_NEG; 8087 SPEED_10;
6309 bp->port.advertising = bp->port.supported; 8088 bp->port.advertising[idx] |=
6310 } else { 8089 (ADVERTISED_10baseT_Full |
6311 u32 ext_phy_type = 8090 ADVERTISED_TP);
6312 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config); 8091 } else {
8092 BNX2X_ERROR("NVRAM config error. "
8093 "Invalid link_config 0x%x"
8094 " speed_cap_mask 0x%x\n",
8095 link_config,
8096 bp->link_params.speed_cap_mask[idx]);
8097 return;
8098 }
8099 break;
6313 8100
6314 if ((ext_phy_type == 8101 case PORT_FEATURE_LINK_SPEED_10M_HALF:
6315 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) || 8102 if (bp->port.supported[idx] & SUPPORTED_10baseT_Half) {
6316 (ext_phy_type == 8103 bp->link_params.req_line_speed[idx] =
6317 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) { 8104 SPEED_10;
6318 /* force 10G, no AN */ 8105 bp->link_params.req_duplex[idx] =
6319 bp->link_params.req_line_speed = SPEED_10000; 8106 DUPLEX_HALF;
6320 bp->port.advertising = 8107 bp->port.advertising[idx] |=
6321 (ADVERTISED_10000baseT_Full | 8108 (ADVERTISED_10baseT_Half |
6322 ADVERTISED_FIBRE); 8109 ADVERTISED_TP);
6323 break; 8110 } else {
8111 BNX2X_ERROR("NVRAM config error. "
8112 "Invalid link_config 0x%x"
8113 " speed_cap_mask 0x%x\n",
8114 link_config,
8115 bp->link_params.speed_cap_mask[idx]);
8116 return;
6324 } 8117 }
6325 BNX2X_ERR("NVRAM config error. " 8118 break;
6326 "Invalid link_config 0x%x"
6327 " Autoneg not supported\n",
6328 bp->port.link_config);
6329 return;
6330 }
6331 break;
6332 8119
6333 case PORT_FEATURE_LINK_SPEED_10M_FULL: 8120 case PORT_FEATURE_LINK_SPEED_100M_FULL:
6334 if (bp->port.supported & SUPPORTED_10baseT_Full) { 8121 if (bp->port.supported[idx] &
6335 bp->link_params.req_line_speed = SPEED_10; 8122 SUPPORTED_100baseT_Full) {
6336 bp->port.advertising = (ADVERTISED_10baseT_Full | 8123 bp->link_params.req_line_speed[idx] =
6337 ADVERTISED_TP); 8124 SPEED_100;
6338 } else { 8125 bp->port.advertising[idx] |=
6339 BNX2X_ERROR("NVRAM config error. " 8126 (ADVERTISED_100baseT_Full |
6340 "Invalid link_config 0x%x" 8127 ADVERTISED_TP);
6341 " speed_cap_mask 0x%x\n", 8128 } else {
6342 bp->port.link_config, 8129 BNX2X_ERROR("NVRAM config error. "
6343 bp->link_params.speed_cap_mask); 8130 "Invalid link_config 0x%x"
6344 return; 8131 " speed_cap_mask 0x%x\n",
6345 } 8132 link_config,
6346 break; 8133 bp->link_params.speed_cap_mask[idx]);
8134 return;
8135 }
8136 break;
6347 8137
6348 case PORT_FEATURE_LINK_SPEED_10M_HALF: 8138 case PORT_FEATURE_LINK_SPEED_100M_HALF:
6349 if (bp->port.supported & SUPPORTED_10baseT_Half) { 8139 if (bp->port.supported[idx] &
6350 bp->link_params.req_line_speed = SPEED_10; 8140 SUPPORTED_100baseT_Half) {
6351 bp->link_params.req_duplex = DUPLEX_HALF; 8141 bp->link_params.req_line_speed[idx] =
6352 bp->port.advertising = (ADVERTISED_10baseT_Half | 8142 SPEED_100;
6353 ADVERTISED_TP); 8143 bp->link_params.req_duplex[idx] =
6354 } else { 8144 DUPLEX_HALF;
6355 BNX2X_ERROR("NVRAM config error. " 8145 bp->port.advertising[idx] |=
8146 (ADVERTISED_100baseT_Half |
8147 ADVERTISED_TP);
8148 } else {
8149 BNX2X_ERROR("NVRAM config error. "
6356 "Invalid link_config 0x%x" 8150 "Invalid link_config 0x%x"
6357 " speed_cap_mask 0x%x\n", 8151 " speed_cap_mask 0x%x\n",
6358 bp->port.link_config, 8152 link_config,
6359 bp->link_params.speed_cap_mask); 8153 bp->link_params.speed_cap_mask[idx]);
6360 return; 8154 return;
6361 } 8155 }
6362 break; 8156 break;
6363 8157
6364 case PORT_FEATURE_LINK_SPEED_100M_FULL: 8158 case PORT_FEATURE_LINK_SPEED_1G:
6365 if (bp->port.supported & SUPPORTED_100baseT_Full) { 8159 if (bp->port.supported[idx] &
6366 bp->link_params.req_line_speed = SPEED_100; 8160 SUPPORTED_1000baseT_Full) {
6367 bp->port.advertising = (ADVERTISED_100baseT_Full | 8161 bp->link_params.req_line_speed[idx] =
6368 ADVERTISED_TP); 8162 SPEED_1000;
6369 } else { 8163 bp->port.advertising[idx] |=
6370 BNX2X_ERROR("NVRAM config error. " 8164 (ADVERTISED_1000baseT_Full |
8165 ADVERTISED_TP);
8166 } else {
8167 BNX2X_ERROR("NVRAM config error. "
6371 "Invalid link_config 0x%x" 8168 "Invalid link_config 0x%x"
6372 " speed_cap_mask 0x%x\n", 8169 " speed_cap_mask 0x%x\n",
6373 bp->port.link_config, 8170 link_config,
6374 bp->link_params.speed_cap_mask); 8171 bp->link_params.speed_cap_mask[idx]);
6375 return; 8172 return;
6376 } 8173 }
6377 break; 8174 break;
6378 8175
6379 case PORT_FEATURE_LINK_SPEED_100M_HALF: 8176 case PORT_FEATURE_LINK_SPEED_2_5G:
6380 if (bp->port.supported & SUPPORTED_100baseT_Half) { 8177 if (bp->port.supported[idx] &
6381 bp->link_params.req_line_speed = SPEED_100; 8178 SUPPORTED_2500baseX_Full) {
6382 bp->link_params.req_duplex = DUPLEX_HALF; 8179 bp->link_params.req_line_speed[idx] =
6383 bp->port.advertising = (ADVERTISED_100baseT_Half | 8180 SPEED_2500;
8181 bp->port.advertising[idx] |=
8182 (ADVERTISED_2500baseX_Full |
6384 ADVERTISED_TP); 8183 ADVERTISED_TP);
6385 } else { 8184 } else {
6386 BNX2X_ERROR("NVRAM config error. " 8185 BNX2X_ERROR("NVRAM config error. "
6387 "Invalid link_config 0x%x" 8186 "Invalid link_config 0x%x"
6388 " speed_cap_mask 0x%x\n", 8187 " speed_cap_mask 0x%x\n",
6389 bp->port.link_config, 8188 link_config,
6390 bp->link_params.speed_cap_mask); 8189 bp->link_params.speed_cap_mask[idx]);
6391 return; 8190 return;
6392 } 8191 }
6393 break; 8192 break;
6394 8193
6395 case PORT_FEATURE_LINK_SPEED_1G: 8194 case PORT_FEATURE_LINK_SPEED_10G_CX4:
6396 if (bp->port.supported & SUPPORTED_1000baseT_Full) { 8195 case PORT_FEATURE_LINK_SPEED_10G_KX4:
6397 bp->link_params.req_line_speed = SPEED_1000; 8196 case PORT_FEATURE_LINK_SPEED_10G_KR:
6398 bp->port.advertising = (ADVERTISED_1000baseT_Full | 8197 if (bp->port.supported[idx] &
6399 ADVERTISED_TP); 8198 SUPPORTED_10000baseT_Full) {
6400 } else { 8199 bp->link_params.req_line_speed[idx] =
6401 BNX2X_ERROR("NVRAM config error. " 8200 SPEED_10000;
8201 bp->port.advertising[idx] |=
8202 (ADVERTISED_10000baseT_Full |
8203 ADVERTISED_FIBRE);
8204 } else {
8205 BNX2X_ERROR("NVRAM config error. "
6402 "Invalid link_config 0x%x" 8206 "Invalid link_config 0x%x"
6403 " speed_cap_mask 0x%x\n", 8207 " speed_cap_mask 0x%x\n",
6404 bp->port.link_config, 8208 link_config,
6405 bp->link_params.speed_cap_mask); 8209 bp->link_params.speed_cap_mask[idx]);
6406 return; 8210 return;
6407 } 8211 }
6408 break; 8212 break;
6409 8213
6410 case PORT_FEATURE_LINK_SPEED_2_5G: 8214 default:
6411 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
6412 bp->link_params.req_line_speed = SPEED_2500;
6413 bp->port.advertising = (ADVERTISED_2500baseX_Full |
6414 ADVERTISED_TP);
6415 } else {
6416 BNX2X_ERROR("NVRAM config error. " 8215 BNX2X_ERROR("NVRAM config error. "
6417 "Invalid link_config 0x%x" 8216 "BAD link speed link_config 0x%x\n",
6418 " speed_cap_mask 0x%x\n", 8217 link_config);
6419 bp->port.link_config, 8218 bp->link_params.req_line_speed[idx] =
6420 bp->link_params.speed_cap_mask); 8219 SPEED_AUTO_NEG;
6421 return; 8220 bp->port.advertising[idx] =
8221 bp->port.supported[idx];
8222 break;
6422 } 8223 }
6423 break;
6424 8224
6425 case PORT_FEATURE_LINK_SPEED_10G_CX4: 8225 bp->link_params.req_flow_ctrl[idx] = (link_config &
6426 case PORT_FEATURE_LINK_SPEED_10G_KX4: 8226 PORT_FEATURE_FLOW_CONTROL_MASK);
6427 case PORT_FEATURE_LINK_SPEED_10G_KR: 8227 if ((bp->link_params.req_flow_ctrl[idx] ==
6428 if (bp->port.supported & SUPPORTED_10000baseT_Full) { 8228 BNX2X_FLOW_CTRL_AUTO) &&
6429 bp->link_params.req_line_speed = SPEED_10000; 8229 !(bp->port.supported[idx] & SUPPORTED_Autoneg)) {
6430 bp->port.advertising = (ADVERTISED_10000baseT_Full | 8230 bp->link_params.req_flow_ctrl[idx] =
6431 ADVERTISED_FIBRE); 8231 BNX2X_FLOW_CTRL_NONE;
6432 } else {
6433 BNX2X_ERROR("NVRAM config error. "
6434 "Invalid link_config 0x%x"
6435 " speed_cap_mask 0x%x\n",
6436 bp->port.link_config,
6437 bp->link_params.speed_cap_mask);
6438 return;
6439 } 8232 }
6440 break;
6441 8233
6442 default: 8234 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl"
6443 BNX2X_ERROR("NVRAM config error. " 8235 " 0x%x advertising 0x%x\n",
6444 "BAD link speed link_config 0x%x\n", 8236 bp->link_params.req_line_speed[idx],
6445 bp->port.link_config); 8237 bp->link_params.req_duplex[idx],
6446 bp->link_params.req_line_speed = SPEED_AUTO_NEG; 8238 bp->link_params.req_flow_ctrl[idx],
6447 bp->port.advertising = bp->port.supported; 8239 bp->port.advertising[idx]);
6448 break;
6449 } 8240 }
6450
6451 bp->link_params.req_flow_ctrl = (bp->port.link_config &
6452 PORT_FEATURE_FLOW_CONTROL_MASK);
6453 if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
6454 !(bp->port.supported & SUPPORTED_Autoneg))
6455 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
6456
6457 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
6458 " advertising 0x%x\n",
6459 bp->link_params.req_line_speed,
6460 bp->link_params.req_duplex,
6461 bp->link_params.req_flow_ctrl, bp->port.advertising);
6462} 8241}
6463 8242
6464static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi) 8243static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
@@ -6472,50 +8251,29 @@ static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
6472static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp) 8251static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
6473{ 8252{
6474 int port = BP_PORT(bp); 8253 int port = BP_PORT(bp);
6475 u32 val, val2;
6476 u32 config; 8254 u32 config;
6477 u16 i; 8255 u32 ext_phy_type, ext_phy_config;
6478 u32 ext_phy_type;
6479 8256
6480 bp->link_params.bp = bp; 8257 bp->link_params.bp = bp;
6481 bp->link_params.port = port; 8258 bp->link_params.port = port;
6482 8259
6483 bp->link_params.lane_config = 8260 bp->link_params.lane_config =
6484 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config); 8261 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
6485 bp->link_params.ext_phy_config =
6486 SHMEM_RD(bp,
6487 dev_info.port_hw_config[port].external_phy_config);
6488 /* BCM8727_NOC => BCM8727 no over current */
6489 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
6490 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC) {
6491 bp->link_params.ext_phy_config &=
6492 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
6493 bp->link_params.ext_phy_config |=
6494 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727;
6495 bp->link_params.feature_config_flags |=
6496 FEATURE_CONFIG_BCM8727_NOC;
6497 }
6498 8262
6499 bp->link_params.speed_cap_mask = 8263 bp->link_params.speed_cap_mask[0] =
6500 SHMEM_RD(bp, 8264 SHMEM_RD(bp,
6501 dev_info.port_hw_config[port].speed_capability_mask); 8265 dev_info.port_hw_config[port].speed_capability_mask);
6502 8266 bp->link_params.speed_cap_mask[1] =
6503 bp->port.link_config = 8267 SHMEM_RD(bp,
8268 dev_info.port_hw_config[port].speed_capability_mask2);
8269 bp->port.link_config[0] =
6504 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config); 8270 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
6505 8271
6506 /* Get the 4 lanes xgxs config rx and tx */ 8272 bp->port.link_config[1] =
6507 for (i = 0; i < 2; i++) { 8273 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config2);
6508 val = SHMEM_RD(bp,
6509 dev_info.port_hw_config[port].xgxs_config_rx[i<<1]);
6510 bp->link_params.xgxs_config_rx[i << 1] = ((val>>16) & 0xffff);
6511 bp->link_params.xgxs_config_rx[(i << 1) + 1] = (val & 0xffff);
6512
6513 val = SHMEM_RD(bp,
6514 dev_info.port_hw_config[port].xgxs_config_tx[i<<1]);
6515 bp->link_params.xgxs_config_tx[i << 1] = ((val>>16) & 0xffff);
6516 bp->link_params.xgxs_config_tx[(i << 1) + 1] = (val & 0xffff);
6517 }
6518 8274
8275 bp->link_params.multi_phy_config =
8276 SHMEM_RD(bp, dev_info.port_hw_config[port].multi_phy_config);
6519 /* If the device is capable of WoL, set the default state according 8277 /* If the device is capable of WoL, set the default state according
6520 * to the HW 8278 * to the HW
6521 */ 8279 */
@@ -6523,14 +8281,15 @@ static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
6523 bp->wol = (!(bp->flags & NO_WOL_FLAG) && 8281 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
6524 (config & PORT_FEATURE_WOL_ENABLED)); 8282 (config & PORT_FEATURE_WOL_ENABLED));
6525 8283
6526 BNX2X_DEV_INFO("lane_config 0x%08x ext_phy_config 0x%08x" 8284 BNX2X_DEV_INFO("lane_config 0x%08x "
6527 " speed_cap_mask 0x%08x link_config 0x%08x\n", 8285 "speed_cap_mask0 0x%08x link_config0 0x%08x\n",
6528 bp->link_params.lane_config, 8286 bp->link_params.lane_config,
6529 bp->link_params.ext_phy_config, 8287 bp->link_params.speed_cap_mask[0],
6530 bp->link_params.speed_cap_mask, bp->port.link_config); 8288 bp->port.link_config[0]);
6531 8289
6532 bp->link_params.switch_cfg |= (bp->port.link_config & 8290 bp->link_params.switch_cfg = (bp->port.link_config[0] &
6533 PORT_FEATURE_CONNECTED_SWITCH_MASK); 8291 PORT_FEATURE_CONNECTED_SWITCH_MASK);
8292 bnx2x_phy_probe(&bp->link_params);
6534 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg); 8293 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
6535 8294
6536 bnx2x_link_settings_requested(bp); 8295 bnx2x_link_settings_requested(bp);
@@ -6539,106 +8298,317 @@ static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
6539 * If connected directly, work with the internal PHY, otherwise, work 8298 * If connected directly, work with the internal PHY, otherwise, work
6540 * with the external PHY 8299 * with the external PHY
6541 */ 8300 */
6542 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config); 8301 ext_phy_config =
8302 SHMEM_RD(bp,
8303 dev_info.port_hw_config[port].external_phy_config);
8304 ext_phy_type = XGXS_EXT_PHY_TYPE(ext_phy_config);
6543 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) 8305 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
6544 bp->mdio.prtad = bp->link_params.phy_addr; 8306 bp->mdio.prtad = bp->port.phy_addr;
6545 8307
6546 else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) && 8308 else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
6547 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN)) 8309 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
6548 bp->mdio.prtad = 8310 bp->mdio.prtad =
6549 XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config); 8311 XGXS_EXT_PHY_ADDR(ext_phy_config);
8312
8313 /*
8314 * Check if hw lock is required to access MDC/MDIO bus to the PHY(s)
8315 * In MF mode, it is set to cover self test cases
8316 */
8317 if (IS_MF(bp))
8318 bp->port.need_hw_lock = 1;
8319 else
8320 bp->port.need_hw_lock = bnx2x_hw_lock_required(bp,
8321 bp->common.shmem_base,
8322 bp->common.shmem2_base);
8323}
8324
8325#ifdef BCM_CNIC
8326static void __devinit bnx2x_get_cnic_info(struct bnx2x *bp)
8327{
8328 u32 max_iscsi_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
8329 drv_lic_key[BP_PORT(bp)].max_iscsi_conn);
8330 u32 max_fcoe_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
8331 drv_lic_key[BP_PORT(bp)].max_fcoe_conn);
8332
8333 /* Get the number of maximum allowed iSCSI and FCoE connections */
8334 bp->cnic_eth_dev.max_iscsi_conn =
8335 (max_iscsi_conn & BNX2X_MAX_ISCSI_INIT_CONN_MASK) >>
8336 BNX2X_MAX_ISCSI_INIT_CONN_SHIFT;
8337
8338 bp->cnic_eth_dev.max_fcoe_conn =
8339 (max_fcoe_conn & BNX2X_MAX_FCOE_INIT_CONN_MASK) >>
8340 BNX2X_MAX_FCOE_INIT_CONN_SHIFT;
8341
8342 BNX2X_DEV_INFO("max_iscsi_conn 0x%x max_fcoe_conn 0x%x\n",
8343 bp->cnic_eth_dev.max_iscsi_conn,
8344 bp->cnic_eth_dev.max_fcoe_conn);
8345
8346 /* If mamimum allowed number of connections is zero -
8347 * disable the feature.
8348 */
8349 if (!bp->cnic_eth_dev.max_iscsi_conn)
8350 bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG;
8351
8352 if (!bp->cnic_eth_dev.max_fcoe_conn)
8353 bp->flags |= NO_FCOE_FLAG;
8354}
8355#endif
8356
8357static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
8358{
8359 u32 val, val2;
8360 int func = BP_ABS_FUNC(bp);
8361 int port = BP_PORT(bp);
8362#ifdef BCM_CNIC
8363 u8 *iscsi_mac = bp->cnic_eth_dev.iscsi_mac;
8364 u8 *fip_mac = bp->fip_mac;
8365#endif
8366
8367 if (BP_NOMCP(bp)) {
8368 BNX2X_ERROR("warning: random MAC workaround active\n");
8369 random_ether_addr(bp->dev->dev_addr);
8370 } else if (IS_MF(bp)) {
8371 val2 = MF_CFG_RD(bp, func_mf_config[func].mac_upper);
8372 val = MF_CFG_RD(bp, func_mf_config[func].mac_lower);
8373 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
8374 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT))
8375 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
8376
8377#ifdef BCM_CNIC
8378 /* iSCSI and FCoE NPAR MACs: if there is no either iSCSI or
8379 * FCoE MAC then the appropriate feature should be disabled.
8380 */
8381 if (IS_MF_SI(bp)) {
8382 u32 cfg = MF_CFG_RD(bp, func_ext_config[func].func_cfg);
8383 if (cfg & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) {
8384 val2 = MF_CFG_RD(bp, func_ext_config[func].
8385 iscsi_mac_addr_upper);
8386 val = MF_CFG_RD(bp, func_ext_config[func].
8387 iscsi_mac_addr_lower);
8388 BNX2X_DEV_INFO("Read iSCSI MAC: "
8389 "0x%x:0x%04x\n", val2, val);
8390 bnx2x_set_mac_buf(iscsi_mac, val, val2);
8391 } else
8392 bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG;
8393
8394 if (cfg & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) {
8395 val2 = MF_CFG_RD(bp, func_ext_config[func].
8396 fcoe_mac_addr_upper);
8397 val = MF_CFG_RD(bp, func_ext_config[func].
8398 fcoe_mac_addr_lower);
8399 BNX2X_DEV_INFO("Read FCoE MAC to "
8400 "0x%x:0x%04x\n", val2, val);
8401 bnx2x_set_mac_buf(fip_mac, val, val2);
8402
8403 } else
8404 bp->flags |= NO_FCOE_FLAG;
8405 }
8406#endif
8407 } else {
8408 /* in SF read MACs from port configuration */
8409 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
8410 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
8411 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
8412
8413#ifdef BCM_CNIC
8414 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].
8415 iscsi_mac_upper);
8416 val = SHMEM_RD(bp, dev_info.port_hw_config[port].
8417 iscsi_mac_lower);
8418 bnx2x_set_mac_buf(iscsi_mac, val, val2);
8419#endif
8420 }
6550 8421
6551 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
6552 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
6553 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
6554 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN); 8422 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
6555 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN); 8423 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
6556 8424
6557#ifdef BCM_CNIC 8425#ifdef BCM_CNIC
6558 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_upper); 8426 /* Set the FCoE MAC in modes other then MF_SI */
6559 val = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_lower); 8427 if (!CHIP_IS_E1x(bp)) {
6560 bnx2x_set_mac_buf(bp->iscsi_mac, val, val2); 8428 if (IS_MF_SD(bp))
8429 memcpy(fip_mac, bp->dev->dev_addr, ETH_ALEN);
8430 else if (!IS_MF(bp))
8431 memcpy(fip_mac, iscsi_mac, ETH_ALEN);
8432 }
8433
8434 /* Disable iSCSI if MAC configuration is
8435 * invalid.
8436 */
8437 if (!is_valid_ether_addr(iscsi_mac)) {
8438 bp->flags |= NO_ISCSI_FLAG;
8439 memset(iscsi_mac, 0, ETH_ALEN);
8440 }
8441
8442 /* Disable FCoE if MAC configuration is
8443 * invalid.
8444 */
8445 if (!is_valid_ether_addr(fip_mac)) {
8446 bp->flags |= NO_FCOE_FLAG;
8447 memset(bp->fip_mac, 0, ETH_ALEN);
8448 }
6561#endif 8449#endif
6562} 8450}
6563 8451
6564static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp) 8452static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
6565{ 8453{
6566 int func = BP_FUNC(bp); 8454 int /*abs*/func = BP_ABS_FUNC(bp);
6567 u32 val, val2; 8455 int vn;
8456 u32 val = 0;
6568 int rc = 0; 8457 int rc = 0;
6569 8458
6570 bnx2x_get_common_hwinfo(bp); 8459 bnx2x_get_common_hwinfo(bp);
6571 8460
6572 bp->e1hov = 0; 8461 if (CHIP_IS_E1x(bp)) {
6573 bp->e1hmf = 0; 8462 bp->common.int_block = INT_BLOCK_HC;
6574 if (CHIP_IS_E1H(bp) && !BP_NOMCP(bp)) { 8463
6575 bp->mf_config = 8464 bp->igu_dsb_id = DEF_SB_IGU_ID;
6576 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config); 8465 bp->igu_base_sb = 0;
8466 bp->igu_sb_cnt = min_t(u8, FP_SB_MAX_E1x,
8467 NUM_IGU_SB_REQUIRED(bp->l2_cid_count));
8468 } else {
8469 bp->common.int_block = INT_BLOCK_IGU;
8470 val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION);
8471 if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
8472 DP(NETIF_MSG_PROBE, "IGU Backward Compatible Mode\n");
8473 bp->common.int_block |= INT_BLOCK_MODE_BW_COMP;
8474 } else
8475 DP(NETIF_MSG_PROBE, "IGU Normal Mode\n");
8476
8477 bnx2x_get_igu_cam_info(bp);
8478
8479 }
8480 DP(NETIF_MSG_PROBE, "igu_dsb_id %d igu_base_sb %d igu_sb_cnt %d\n",
8481 bp->igu_dsb_id, bp->igu_base_sb, bp->igu_sb_cnt);
8482
8483 /*
8484 * Initialize MF configuration
8485 */
8486
8487 bp->mf_ov = 0;
8488 bp->mf_mode = 0;
8489 vn = BP_E1HVN(bp);
8490
8491 if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) {
8492 DP(NETIF_MSG_PROBE,
8493 "shmem2base 0x%x, size %d, mfcfg offset %d\n",
8494 bp->common.shmem2_base, SHMEM2_RD(bp, size),
8495 (u32)offsetof(struct shmem2_region, mf_cfg_addr));
8496 if (SHMEM2_HAS(bp, mf_cfg_addr))
8497 bp->common.mf_cfg_base = SHMEM2_RD(bp, mf_cfg_addr);
8498 else
8499 bp->common.mf_cfg_base = bp->common.shmem_base +
8500 offsetof(struct shmem_region, func_mb) +
8501 E1H_FUNC_MAX * sizeof(struct drv_func_mb);
8502 /*
8503 * get mf configuration:
8504 * 1. existence of MF configuration
8505 * 2. MAC address must be legal (check only upper bytes)
8506 * for Switch-Independent mode;
8507 * OVLAN must be legal for Switch-Dependent mode
8508 * 3. SF_MODE configures specific MF mode
8509 */
8510 if (bp->common.mf_cfg_base != SHMEM_MF_CFG_ADDR_NONE) {
8511 /* get mf configuration */
8512 val = SHMEM_RD(bp,
8513 dev_info.shared_feature_config.config);
8514 val &= SHARED_FEAT_CFG_FORCE_SF_MODE_MASK;
8515
8516 switch (val) {
8517 case SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT:
8518 val = MF_CFG_RD(bp, func_mf_config[func].
8519 mac_upper);
8520 /* check for legal mac (upper bytes)*/
8521 if (val != 0xffff) {
8522 bp->mf_mode = MULTI_FUNCTION_SI;
8523 bp->mf_config[vn] = MF_CFG_RD(bp,
8524 func_mf_config[func].config);
8525 } else
8526 DP(NETIF_MSG_PROBE, "illegal MAC "
8527 "address for SI\n");
8528 break;
8529 case SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED:
8530 /* get OV configuration */
8531 val = MF_CFG_RD(bp,
8532 func_mf_config[FUNC_0].e1hov_tag);
8533 val &= FUNC_MF_CFG_E1HOV_TAG_MASK;
8534
8535 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
8536 bp->mf_mode = MULTI_FUNCTION_SD;
8537 bp->mf_config[vn] = MF_CFG_RD(bp,
8538 func_mf_config[func].config);
8539 } else
8540 DP(NETIF_MSG_PROBE, "illegal OV for "
8541 "SD\n");
8542 break;
8543 default:
8544 /* Unknown configuration: reset mf_config */
8545 bp->mf_config[vn] = 0;
8546 DP(NETIF_MSG_PROBE, "Unknown MF mode 0x%x\n",
8547 val);
8548 }
8549 }
6577 8550
6578 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[FUNC_0].e1hov_tag) &
6579 FUNC_MF_CFG_E1HOV_TAG_MASK);
6580 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
6581 bp->e1hmf = 1;
6582 BNX2X_DEV_INFO("%s function mode\n", 8551 BNX2X_DEV_INFO("%s function mode\n",
6583 IS_E1HMF(bp) ? "multi" : "single"); 8552 IS_MF(bp) ? "multi" : "single");
6584 8553
6585 if (IS_E1HMF(bp)) { 8554 switch (bp->mf_mode) {
6586 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func]. 8555 case MULTI_FUNCTION_SD:
6587 e1hov_tag) & 8556 val = MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) &
6588 FUNC_MF_CFG_E1HOV_TAG_MASK); 8557 FUNC_MF_CFG_E1HOV_TAG_MASK;
6589 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) { 8558 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
6590 bp->e1hov = val; 8559 bp->mf_ov = val;
6591 BNX2X_DEV_INFO("E1HOV for func %d is %d " 8560 BNX2X_DEV_INFO("MF OV for func %d is %d"
6592 "(0x%04x)\n", 8561 " (0x%04x)\n", func,
6593 func, bp->e1hov, bp->e1hov); 8562 bp->mf_ov, bp->mf_ov);
6594 } else { 8563 } else {
6595 BNX2X_ERROR("No valid E1HOV for func %d," 8564 BNX2X_ERR("No valid MF OV for func %d,"
6596 " aborting\n", func); 8565 " aborting\n", func);
6597 rc = -EPERM; 8566 rc = -EPERM;
6598 } 8567 }
6599 } else { 8568 break;
6600 if (BP_E1HVN(bp)) { 8569 case MULTI_FUNCTION_SI:
6601 BNX2X_ERROR("VN %d in single function mode," 8570 BNX2X_DEV_INFO("func %d is in MF "
6602 " aborting\n", BP_E1HVN(bp)); 8571 "switch-independent mode\n", func);
8572 break;
8573 default:
8574 if (vn) {
8575 BNX2X_ERR("VN %d in single function mode,"
8576 " aborting\n", vn);
6603 rc = -EPERM; 8577 rc = -EPERM;
6604 } 8578 }
8579 break;
6605 } 8580 }
8581
6606 } 8582 }
6607 8583
8584 /* adjust igu_sb_cnt to MF for E1x */
8585 if (CHIP_IS_E1x(bp) && IS_MF(bp))
8586 bp->igu_sb_cnt /= E1HVN_MAX;
8587
8588 /*
8589 * adjust E2 sb count: to be removed when FW will support
8590 * more then 16 L2 clients
8591 */
8592#define MAX_L2_CLIENTS 16
8593 if (CHIP_IS_E2(bp))
8594 bp->igu_sb_cnt = min_t(u8, bp->igu_sb_cnt,
8595 MAX_L2_CLIENTS / (IS_MF(bp) ? 4 : 1));
8596
6608 if (!BP_NOMCP(bp)) { 8597 if (!BP_NOMCP(bp)) {
6609 bnx2x_get_port_hwinfo(bp); 8598 bnx2x_get_port_hwinfo(bp);
6610 8599
6611 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) & 8600 bp->fw_seq =
6612 DRV_MSG_SEQ_NUMBER_MASK); 8601 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
8602 DRV_MSG_SEQ_NUMBER_MASK);
6613 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq); 8603 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
6614 } 8604 }
6615 8605
6616 if (IS_E1HMF(bp)) { 8606 /* Get MAC addresses */
6617 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper); 8607 bnx2x_get_mac_hwinfo(bp);
6618 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
6619 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
6620 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
6621 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
6622 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
6623 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
6624 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
6625 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
6626 bp->dev->dev_addr[5] = (u8)(val & 0xff);
6627 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
6628 ETH_ALEN);
6629 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
6630 ETH_ALEN);
6631 }
6632
6633 return rc;
6634 }
6635 8608
6636 if (BP_NOMCP(bp)) { 8609#ifdef BCM_CNIC
6637 /* only supposed to happen on emulation/FPGA */ 8610 bnx2x_get_cnic_info(bp);
6638 BNX2X_ERROR("warning: random MAC workaround active\n"); 8611#endif
6639 random_ether_addr(bp->dev->dev_addr);
6640 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
6641 }
6642 8612
6643 return rc; 8613 return rc;
6644} 8614}
@@ -6709,7 +8679,7 @@ out_not_found:
6709 8679
6710static int __devinit bnx2x_init_bp(struct bnx2x *bp) 8680static int __devinit bnx2x_init_bp(struct bnx2x *bp)
6711{ 8681{
6712 int func = BP_FUNC(bp); 8682 int func;
6713 int timer_interval; 8683 int timer_interval;
6714 int rc; 8684 int rc;
6715 8685
@@ -6729,7 +8699,13 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp)
6729 8699
6730 rc = bnx2x_get_hwinfo(bp); 8700 rc = bnx2x_get_hwinfo(bp);
6731 8701
8702 if (!rc)
8703 rc = bnx2x_alloc_mem_bp(bp);
8704
6732 bnx2x_read_fwinfo(bp); 8705 bnx2x_read_fwinfo(bp);
8706
8707 func = BP_FUNC(bp);
8708
6733 /* need to reset chip if undi was active */ 8709 /* need to reset chip if undi was active */
6734 if (!BP_NOMCP(bp)) 8710 if (!BP_NOMCP(bp))
6735 bnx2x_undi_unload(bp); 8711 bnx2x_undi_unload(bp);
@@ -6741,18 +8717,9 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp)
6741 dev_err(&bp->pdev->dev, "MCP disabled, " 8717 dev_err(&bp->pdev->dev, "MCP disabled, "
6742 "must load devices in order!\n"); 8718 "must load devices in order!\n");
6743 8719
6744 /* Set multi queue mode */
6745 if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
6746 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
6747 dev_err(&bp->pdev->dev, "Multi disabled since int_mode "
6748 "requested is not MSI-X\n");
6749 multi_mode = ETH_RSS_MODE_DISABLED;
6750 }
6751 bp->multi_mode = multi_mode; 8720 bp->multi_mode = multi_mode;
6752 bp->int_mode = int_mode; 8721 bp->int_mode = int_mode;
6753 8722
6754 bp->dev->features |= NETIF_F_GRO;
6755
6756 /* Set TPA flags */ 8723 /* Set TPA flags */
6757 if (disable_tpa) { 8724 if (disable_tpa) {
6758 bp->flags &= ~TPA_ENABLE_FLAG; 8725 bp->flags &= ~TPA_ENABLE_FLAG;
@@ -6771,13 +8738,10 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp)
6771 bp->mrrs = mrrs; 8738 bp->mrrs = mrrs;
6772 8739
6773 bp->tx_ring_size = MAX_TX_AVAIL; 8740 bp->tx_ring_size = MAX_TX_AVAIL;
6774 bp->rx_ring_size = MAX_RX_AVAIL;
6775
6776 bp->rx_csum = 1;
6777 8741
6778 /* make sure that the numbers are in the right granularity */ 8742 /* make sure that the numbers are in the right granularity */
6779 bp->tx_ticks = (50 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR); 8743 bp->tx_ticks = (50 / BNX2X_BTR) * BNX2X_BTR;
6780 bp->rx_ticks = (25 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR); 8744 bp->rx_ticks = (25 / BNX2X_BTR) * BNX2X_BTR;
6781 8745
6782 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ); 8746 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
6783 bp->current_interval = (poll ? poll : timer_interval); 8747 bp->current_interval = (poll ? poll : timer_interval);
@@ -6787,6 +8751,9 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp)
6787 bp->timer.data = (unsigned long) bp; 8751 bp->timer.data = (unsigned long) bp;
6788 bp->timer.function = bnx2x_timer; 8752 bp->timer.function = bnx2x_timer;
6789 8753
8754 bnx2x_dcbx_set_state(bp, true, BNX2X_DCBX_ENABLED_ON_NEG_ON);
8755 bnx2x_dcbx_init_params(bp);
8756
6790 return rc; 8757 return rc;
6791} 8758}
6792 8759
@@ -6853,12 +8820,197 @@ static int bnx2x_close(struct net_device *dev)
6853 return 0; 8820 return 0;
6854} 8821}
6855 8822
8823#define E1_MAX_UC_LIST 29
8824#define E1H_MAX_UC_LIST 30
8825#define E2_MAX_UC_LIST 14
8826static inline u8 bnx2x_max_uc_list(struct bnx2x *bp)
8827{
8828 if (CHIP_IS_E1(bp))
8829 return E1_MAX_UC_LIST;
8830 else if (CHIP_IS_E1H(bp))
8831 return E1H_MAX_UC_LIST;
8832 else
8833 return E2_MAX_UC_LIST;
8834}
8835
8836
8837static inline u8 bnx2x_uc_list_cam_offset(struct bnx2x *bp)
8838{
8839 if (CHIP_IS_E1(bp))
8840 /* CAM Entries for Port0:
8841 * 0 - prim ETH MAC
8842 * 1 - BCAST MAC
8843 * 2 - iSCSI L2 ring ETH MAC
8844 * 3-31 - UC MACs
8845 *
8846 * Port1 entries are allocated the same way starting from
8847 * entry 32.
8848 */
8849 return 3 + 32 * BP_PORT(bp);
8850 else if (CHIP_IS_E1H(bp)) {
8851 /* CAM Entries:
8852 * 0-7 - prim ETH MAC for each function
8853 * 8-15 - iSCSI L2 ring ETH MAC for each function
8854 * 16 till 255 UC MAC lists for each function
8855 *
8856 * Remark: There is no FCoE support for E1H, thus FCoE related
8857 * MACs are not considered.
8858 */
8859 return E1H_FUNC_MAX * (CAM_ISCSI_ETH_LINE + 1) +
8860 bnx2x_max_uc_list(bp) * BP_FUNC(bp);
8861 } else {
8862 /* CAM Entries (there is a separate CAM per engine):
8863 * 0-4 - prim ETH MAC for each function
8864 * 4-7 - iSCSI L2 ring ETH MAC for each function
8865 * 8-11 - FIP ucast L2 MAC for each function
8866 * 12-15 - ALL_ENODE_MACS mcast MAC for each function
8867 * 16 till 71 UC MAC lists for each function
8868 */
8869 u8 func_idx =
8870 (CHIP_MODE_IS_4_PORT(bp) ? BP_FUNC(bp) : BP_VN(bp));
8871
8872 return E2_FUNC_MAX * (CAM_MAX_PF_LINE + 1) +
8873 bnx2x_max_uc_list(bp) * func_idx;
8874 }
8875}
8876
8877/* set uc list, do not wait as wait implies sleep and
8878 * set_rx_mode can be invoked from non-sleepable context.
8879 *
8880 * Instead we use the same ramrod data buffer each time we need
8881 * to configure a list of addresses, and use the fact that the
8882 * list of MACs is changed in an incremental way and that the
8883 * function is called under the netif_addr_lock. A temporary
8884 * inconsistent CAM configuration (possible in case of very fast
8885 * sequence of add/del/add on the host side) will shortly be
8886 * restored by the handler of the last ramrod.
8887 */
8888static int bnx2x_set_uc_list(struct bnx2x *bp)
8889{
8890 int i = 0, old;
8891 struct net_device *dev = bp->dev;
8892 u8 offset = bnx2x_uc_list_cam_offset(bp);
8893 struct netdev_hw_addr *ha;
8894 struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, uc_mac_config);
8895 dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, uc_mac_config);
8896
8897 if (netdev_uc_count(dev) > bnx2x_max_uc_list(bp))
8898 return -EINVAL;
8899
8900 netdev_for_each_uc_addr(ha, dev) {
8901 /* copy mac */
8902 config_cmd->config_table[i].msb_mac_addr =
8903 swab16(*(u16 *)&bnx2x_uc_addr(ha)[0]);
8904 config_cmd->config_table[i].middle_mac_addr =
8905 swab16(*(u16 *)&bnx2x_uc_addr(ha)[2]);
8906 config_cmd->config_table[i].lsb_mac_addr =
8907 swab16(*(u16 *)&bnx2x_uc_addr(ha)[4]);
8908
8909 config_cmd->config_table[i].vlan_id = 0;
8910 config_cmd->config_table[i].pf_id = BP_FUNC(bp);
8911 config_cmd->config_table[i].clients_bit_vector =
8912 cpu_to_le32(1 << BP_L_ID(bp));
8913
8914 SET_FLAG(config_cmd->config_table[i].flags,
8915 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
8916 T_ETH_MAC_COMMAND_SET);
8917
8918 DP(NETIF_MSG_IFUP,
8919 "setting UCAST[%d] (%04x:%04x:%04x)\n", i,
8920 config_cmd->config_table[i].msb_mac_addr,
8921 config_cmd->config_table[i].middle_mac_addr,
8922 config_cmd->config_table[i].lsb_mac_addr);
8923
8924 i++;
8925
8926 /* Set uc MAC in NIG */
8927 bnx2x_set_mac_in_nig(bp, 1, bnx2x_uc_addr(ha),
8928 LLH_CAM_ETH_LINE + i);
8929 }
8930 old = config_cmd->hdr.length;
8931 if (old > i) {
8932 for (; i < old; i++) {
8933 if (CAM_IS_INVALID(config_cmd->
8934 config_table[i])) {
8935 /* already invalidated */
8936 break;
8937 }
8938 /* invalidate */
8939 SET_FLAG(config_cmd->config_table[i].flags,
8940 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
8941 T_ETH_MAC_COMMAND_INVALIDATE);
8942 }
8943 }
8944
8945 wmb();
8946
8947 config_cmd->hdr.length = i;
8948 config_cmd->hdr.offset = offset;
8949 config_cmd->hdr.client_id = 0xff;
8950 /* Mark that this ramrod doesn't use bp->set_mac_pending for
8951 * synchronization.
8952 */
8953 config_cmd->hdr.echo = 0;
8954
8955 mb();
8956
8957 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
8958 U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
8959
8960}
8961
8962void bnx2x_invalidate_uc_list(struct bnx2x *bp)
8963{
8964 int i;
8965 struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, uc_mac_config);
8966 dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, uc_mac_config);
8967 int ramrod_flags = WAIT_RAMROD_COMMON;
8968 u8 offset = bnx2x_uc_list_cam_offset(bp);
8969 u8 max_list_size = bnx2x_max_uc_list(bp);
8970
8971 for (i = 0; i < max_list_size; i++) {
8972 SET_FLAG(config_cmd->config_table[i].flags,
8973 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
8974 T_ETH_MAC_COMMAND_INVALIDATE);
8975 bnx2x_set_mac_in_nig(bp, 0, NULL, LLH_CAM_ETH_LINE + 1 + i);
8976 }
8977
8978 wmb();
8979
8980 config_cmd->hdr.length = max_list_size;
8981 config_cmd->hdr.offset = offset;
8982 config_cmd->hdr.client_id = 0xff;
8983 /* We'll wait for a completion this time... */
8984 config_cmd->hdr.echo = 1;
8985
8986 bp->set_mac_pending = 1;
8987
8988 mb();
8989
8990 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
8991 U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
8992
8993 /* Wait for a completion */
8994 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending,
8995 ramrod_flags);
8996
8997}
8998
8999static inline int bnx2x_set_mc_list(struct bnx2x *bp)
9000{
9001 /* some multicasts */
9002 if (CHIP_IS_E1(bp)) {
9003 return bnx2x_set_e1_mc_list(bp);
9004 } else { /* E1H and newer */
9005 return bnx2x_set_e1h_mc_list(bp);
9006 }
9007}
9008
6856/* called with netif_tx_lock from dev_mcast.c */ 9009/* called with netif_tx_lock from dev_mcast.c */
6857void bnx2x_set_rx_mode(struct net_device *dev) 9010void bnx2x_set_rx_mode(struct net_device *dev)
6858{ 9011{
6859 struct bnx2x *bp = netdev_priv(dev); 9012 struct bnx2x *bp = netdev_priv(dev);
6860 u32 rx_mode = BNX2X_RX_MODE_NORMAL; 9013 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
6861 int port = BP_PORT(bp);
6862 9014
6863 if (bp->state != BNX2X_STATE_OPEN) { 9015 if (bp->state != BNX2X_STATE_OPEN) {
6864 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state); 9016 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
@@ -6869,112 +9021,22 @@ void bnx2x_set_rx_mode(struct net_device *dev)
6869 9021
6870 if (dev->flags & IFF_PROMISC) 9022 if (dev->flags & IFF_PROMISC)
6871 rx_mode = BNX2X_RX_MODE_PROMISC; 9023 rx_mode = BNX2X_RX_MODE_PROMISC;
6872 9024 else if (dev->flags & IFF_ALLMULTI)
6873 else if ((dev->flags & IFF_ALLMULTI) ||
6874 ((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) &&
6875 CHIP_IS_E1(bp)))
6876 rx_mode = BNX2X_RX_MODE_ALLMULTI; 9025 rx_mode = BNX2X_RX_MODE_ALLMULTI;
9026 else {
9027 /* some multicasts */
9028 if (bnx2x_set_mc_list(bp))
9029 rx_mode = BNX2X_RX_MODE_ALLMULTI;
6877 9030
6878 else { /* some multicasts */ 9031 /* some unicasts */
6879 if (CHIP_IS_E1(bp)) { 9032 if (bnx2x_set_uc_list(bp))
6880 int i, old, offset; 9033 rx_mode = BNX2X_RX_MODE_PROMISC;
6881 struct netdev_hw_addr *ha;
6882 struct mac_configuration_cmd *config =
6883 bnx2x_sp(bp, mcast_config);
6884
6885 i = 0;
6886 netdev_for_each_mc_addr(ha, dev) {
6887 config->config_table[i].
6888 cam_entry.msb_mac_addr =
6889 swab16(*(u16 *)&ha->addr[0]);
6890 config->config_table[i].
6891 cam_entry.middle_mac_addr =
6892 swab16(*(u16 *)&ha->addr[2]);
6893 config->config_table[i].
6894 cam_entry.lsb_mac_addr =
6895 swab16(*(u16 *)&ha->addr[4]);
6896 config->config_table[i].cam_entry.flags =
6897 cpu_to_le16(port);
6898 config->config_table[i].
6899 target_table_entry.flags = 0;
6900 config->config_table[i].target_table_entry.
6901 clients_bit_vector =
6902 cpu_to_le32(1 << BP_L_ID(bp));
6903 config->config_table[i].
6904 target_table_entry.vlan_id = 0;
6905
6906 DP(NETIF_MSG_IFUP,
6907 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
6908 config->config_table[i].
6909 cam_entry.msb_mac_addr,
6910 config->config_table[i].
6911 cam_entry.middle_mac_addr,
6912 config->config_table[i].
6913 cam_entry.lsb_mac_addr);
6914 i++;
6915 }
6916 old = config->hdr.length;
6917 if (old > i) {
6918 for (; i < old; i++) {
6919 if (CAM_IS_INVALID(config->
6920 config_table[i])) {
6921 /* already invalidated */
6922 break;
6923 }
6924 /* invalidate */
6925 CAM_INVALIDATE(config->
6926 config_table[i]);
6927 }
6928 }
6929
6930 if (CHIP_REV_IS_SLOW(bp))
6931 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
6932 else
6933 offset = BNX2X_MAX_MULTICAST*(1 + port);
6934
6935 config->hdr.length = i;
6936 config->hdr.offset = offset;
6937 config->hdr.client_id = bp->fp->cl_id;
6938 config->hdr.reserved1 = 0;
6939
6940 bp->set_mac_pending++;
6941 smp_wmb();
6942
6943 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6944 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
6945 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
6946 0);
6947 } else { /* E1H */
6948 /* Accept one or more multicasts */
6949 struct netdev_hw_addr *ha;
6950 u32 mc_filter[MC_HASH_SIZE];
6951 u32 crc, bit, regidx;
6952 int i;
6953
6954 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
6955
6956 netdev_for_each_mc_addr(ha, dev) {
6957 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
6958 ha->addr);
6959
6960 crc = crc32c_le(0, ha->addr, ETH_ALEN);
6961 bit = (crc >> 24) & 0xff;
6962 regidx = bit >> 5;
6963 bit &= 0x1f;
6964 mc_filter[regidx] |= (1 << bit);
6965 }
6966
6967 for (i = 0; i < MC_HASH_SIZE; i++)
6968 REG_WR(bp, MC_HASH_OFFSET(bp, i),
6969 mc_filter[i]);
6970 }
6971 } 9034 }
6972 9035
6973 bp->rx_mode = rx_mode; 9036 bp->rx_mode = rx_mode;
6974 bnx2x_set_storm_rx_mode(bp); 9037 bnx2x_set_storm_rx_mode(bp);
6975} 9038}
6976 9039
6977
6978/* called with rtnl_lock */ 9040/* called with rtnl_lock */
6979static int bnx2x_mdio_read(struct net_device *netdev, int prtad, 9041static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
6980 int devad, u16 addr) 9042 int devad, u16 addr)
@@ -6982,23 +9044,15 @@ static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
6982 struct bnx2x *bp = netdev_priv(netdev); 9044 struct bnx2x *bp = netdev_priv(netdev);
6983 u16 value; 9045 u16 value;
6984 int rc; 9046 int rc;
6985 u32 phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
6986 9047
6987 DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n", 9048 DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
6988 prtad, devad, addr); 9049 prtad, devad, addr);
6989 9050
6990 if (prtad != bp->mdio.prtad) {
6991 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
6992 prtad, bp->mdio.prtad);
6993 return -EINVAL;
6994 }
6995
6996 /* The HW expects different devad if CL22 is used */ 9051 /* The HW expects different devad if CL22 is used */
6997 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad; 9052 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
6998 9053
6999 bnx2x_acquire_phy_lock(bp); 9054 bnx2x_acquire_phy_lock(bp);
7000 rc = bnx2x_cl45_read(bp, BP_PORT(bp), phy_type, prtad, 9055 rc = bnx2x_phy_read(&bp->link_params, prtad, devad, addr, &value);
7001 devad, addr, &value);
7002 bnx2x_release_phy_lock(bp); 9056 bnx2x_release_phy_lock(bp);
7003 DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc); 9057 DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
7004 9058
@@ -7012,24 +9066,16 @@ static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
7012 u16 addr, u16 value) 9066 u16 addr, u16 value)
7013{ 9067{
7014 struct bnx2x *bp = netdev_priv(netdev); 9068 struct bnx2x *bp = netdev_priv(netdev);
7015 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7016 int rc; 9069 int rc;
7017 9070
7018 DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x," 9071 DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
7019 " value 0x%x\n", prtad, devad, addr, value); 9072 " value 0x%x\n", prtad, devad, addr, value);
7020 9073
7021 if (prtad != bp->mdio.prtad) {
7022 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
7023 prtad, bp->mdio.prtad);
7024 return -EINVAL;
7025 }
7026
7027 /* The HW expects different devad if CL22 is used */ 9074 /* The HW expects different devad if CL22 is used */
7028 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad; 9075 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
7029 9076
7030 bnx2x_acquire_phy_lock(bp); 9077 bnx2x_acquire_phy_lock(bp);
7031 rc = bnx2x_cl45_write(bp, BP_PORT(bp), ext_phy_type, prtad, 9078 rc = bnx2x_phy_write(&bp->link_params, prtad, devad, addr, value);
7032 devad, addr, value);
7033 bnx2x_release_phy_lock(bp); 9079 bnx2x_release_phy_lock(bp);
7034 return rc; 9080 return rc;
7035} 9081}
@@ -7064,15 +9110,15 @@ static const struct net_device_ops bnx2x_netdev_ops = {
7064 .ndo_open = bnx2x_open, 9110 .ndo_open = bnx2x_open,
7065 .ndo_stop = bnx2x_close, 9111 .ndo_stop = bnx2x_close,
7066 .ndo_start_xmit = bnx2x_start_xmit, 9112 .ndo_start_xmit = bnx2x_start_xmit,
7067 .ndo_set_multicast_list = bnx2x_set_rx_mode, 9113 .ndo_select_queue = bnx2x_select_queue,
9114 .ndo_set_rx_mode = bnx2x_set_rx_mode,
7068 .ndo_set_mac_address = bnx2x_change_mac_addr, 9115 .ndo_set_mac_address = bnx2x_change_mac_addr,
7069 .ndo_validate_addr = eth_validate_addr, 9116 .ndo_validate_addr = eth_validate_addr,
7070 .ndo_do_ioctl = bnx2x_ioctl, 9117 .ndo_do_ioctl = bnx2x_ioctl,
7071 .ndo_change_mtu = bnx2x_change_mtu, 9118 .ndo_change_mtu = bnx2x_change_mtu,
9119 .ndo_fix_features = bnx2x_fix_features,
9120 .ndo_set_features = bnx2x_set_features,
7072 .ndo_tx_timeout = bnx2x_tx_timeout, 9121 .ndo_tx_timeout = bnx2x_tx_timeout,
7073#ifdef BCM_VLAN
7074 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
7075#endif
7076#ifdef CONFIG_NET_POLL_CONTROLLER 9122#ifdef CONFIG_NET_POLL_CONTROLLER
7077 .ndo_poll_controller = poll_bnx2x, 9123 .ndo_poll_controller = poll_bnx2x,
7078#endif 9124#endif
@@ -7090,7 +9136,7 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
7090 bp->dev = dev; 9136 bp->dev = dev;
7091 bp->pdev = pdev; 9137 bp->pdev = pdev;
7092 bp->flags = 0; 9138 bp->flags = 0;
7093 bp->func = PCI_FUNC(pdev->devfn); 9139 bp->pf_num = PCI_FUNC(pdev->devfn);
7094 9140
7095 rc = pci_enable_device(pdev); 9141 rc = pci_enable_device(pdev);
7096 if (rc) { 9142 if (rc) {
@@ -7172,7 +9218,7 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
7172 } 9218 }
7173 9219
7174 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2), 9220 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
7175 min_t(u64, BNX2X_DB_SIZE, 9221 min_t(u64, BNX2X_DB_SIZE(bp),
7176 pci_resource_len(pdev, 2))); 9222 pci_resource_len(pdev, 2)));
7177 if (!bp->doorbells) { 9223 if (!bp->doorbells) {
7178 dev_err(&bp->pdev->dev, 9224 dev_err(&bp->pdev->dev,
@@ -7198,22 +9244,23 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
7198 9244
7199 dev->netdev_ops = &bnx2x_netdev_ops; 9245 dev->netdev_ops = &bnx2x_netdev_ops;
7200 bnx2x_set_ethtool_ops(dev); 9246 bnx2x_set_ethtool_ops(dev);
7201 dev->features |= NETIF_F_SG; 9247
7202 dev->features |= NETIF_F_HW_CSUM; 9248 dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
9249 NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 |
9250 NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_HW_VLAN_TX;
9251
9252 dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
9253 NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_HIGHDMA;
9254
9255 dev->features |= dev->hw_features | NETIF_F_HW_VLAN_RX;
7203 if (bp->flags & USING_DAC_FLAG) 9256 if (bp->flags & USING_DAC_FLAG)
7204 dev->features |= NETIF_F_HIGHDMA; 9257 dev->features |= NETIF_F_HIGHDMA;
7205 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN); 9258
7206 dev->features |= NETIF_F_TSO6; 9259 /* Add Loopback capability to the device */
7207#ifdef BCM_VLAN 9260 dev->hw_features |= NETIF_F_LOOPBACK;
7208 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX); 9261
7209 bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG); 9262#ifdef BCM_DCBNL
7210 9263 dev->dcbnl_ops = &bnx2x_dcbnl_ops;
7211 dev->vlan_features |= NETIF_F_SG;
7212 dev->vlan_features |= NETIF_F_HW_CSUM;
7213 if (bp->flags & USING_DAC_FLAG)
7214 dev->vlan_features |= NETIF_F_HIGHDMA;
7215 dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
7216 dev->vlan_features |= NETIF_F_TSO6;
7217#endif 9264#endif
7218 9265
7219 /* get_port_hwinfo() will set prtad and mmds properly */ 9266 /* get_port_hwinfo() will set prtad and mmds properly */
@@ -7259,7 +9306,7 @@ static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp,
7259 *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT; 9306 *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
7260} 9307}
7261 9308
7262static int __devinit bnx2x_check_firmware(struct bnx2x *bp) 9309static int bnx2x_check_firmware(struct bnx2x *bp)
7263{ 9310{
7264 const struct firmware *firmware = bp->firmware; 9311 const struct firmware *firmware = bp->firmware;
7265 struct bnx2x_fw_file_hdr *fw_hdr; 9312 struct bnx2x_fw_file_hdr *fw_hdr;
@@ -7348,6 +9395,30 @@ static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
7348 } 9395 }
7349} 9396}
7350 9397
9398/**
9399 * IRO array is stored in the following format:
9400 * {base(24bit), m1(16bit), m2(16bit), m3(16bit), size(16bit) }
9401 */
9402static inline void bnx2x_prep_iro(const u8 *_source, u8 *_target, u32 n)
9403{
9404 const __be32 *source = (const __be32 *)_source;
9405 struct iro *target = (struct iro *)_target;
9406 u32 i, j, tmp;
9407
9408 for (i = 0, j = 0; i < n/sizeof(struct iro); i++) {
9409 target[i].base = be32_to_cpu(source[j]);
9410 j++;
9411 tmp = be32_to_cpu(source[j]);
9412 target[i].m1 = (tmp >> 16) & 0xffff;
9413 target[i].m2 = tmp & 0xffff;
9414 j++;
9415 tmp = be32_to_cpu(source[j]);
9416 target[i].m3 = (tmp >> 16) & 0xffff;
9417 target[i].size = tmp & 0xffff;
9418 j++;
9419 }
9420}
9421
7351static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n) 9422static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
7352{ 9423{
7353 const __be16 *source = (const __be16 *)_source; 9424 const __be16 *source = (const __be16 *)_source;
@@ -7370,7 +9441,7 @@ do { \
7370 (u8 *)bp->arr, len); \ 9441 (u8 *)bp->arr, len); \
7371} while (0) 9442} while (0)
7372 9443
7373static int __devinit bnx2x_init_firmware(struct bnx2x *bp, struct device *dev) 9444int bnx2x_init_firmware(struct bnx2x *bp)
7374{ 9445{
7375 const char *fw_file_name; 9446 const char *fw_file_name;
7376 struct bnx2x_fw_file_hdr *fw_hdr; 9447 struct bnx2x_fw_file_hdr *fw_hdr;
@@ -7380,22 +9451,24 @@ static int __devinit bnx2x_init_firmware(struct bnx2x *bp, struct device *dev)
7380 fw_file_name = FW_FILE_NAME_E1; 9451 fw_file_name = FW_FILE_NAME_E1;
7381 else if (CHIP_IS_E1H(bp)) 9452 else if (CHIP_IS_E1H(bp))
7382 fw_file_name = FW_FILE_NAME_E1H; 9453 fw_file_name = FW_FILE_NAME_E1H;
9454 else if (CHIP_IS_E2(bp))
9455 fw_file_name = FW_FILE_NAME_E2;
7383 else { 9456 else {
7384 dev_err(dev, "Unsupported chip revision\n"); 9457 BNX2X_ERR("Unsupported chip revision\n");
7385 return -EINVAL; 9458 return -EINVAL;
7386 } 9459 }
7387 9460
7388 dev_info(dev, "Loading %s\n", fw_file_name); 9461 BNX2X_DEV_INFO("Loading %s\n", fw_file_name);
7389 9462
7390 rc = request_firmware(&bp->firmware, fw_file_name, dev); 9463 rc = request_firmware(&bp->firmware, fw_file_name, &bp->pdev->dev);
7391 if (rc) { 9464 if (rc) {
7392 dev_err(dev, "Can't load firmware file %s\n", fw_file_name); 9465 BNX2X_ERR("Can't load firmware file %s\n", fw_file_name);
7393 goto request_firmware_exit; 9466 goto request_firmware_exit;
7394 } 9467 }
7395 9468
7396 rc = bnx2x_check_firmware(bp); 9469 rc = bnx2x_check_firmware(bp);
7397 if (rc) { 9470 if (rc) {
7398 dev_err(dev, "Corrupt firmware file %s\n", fw_file_name); 9471 BNX2X_ERR("Corrupt firmware file %s\n", fw_file_name);
7399 goto request_firmware_exit; 9472 goto request_firmware_exit;
7400 } 9473 }
7401 9474
@@ -7429,9 +9502,13 @@ static int __devinit bnx2x_init_firmware(struct bnx2x *bp, struct device *dev)
7429 be32_to_cpu(fw_hdr->csem_int_table_data.offset); 9502 be32_to_cpu(fw_hdr->csem_int_table_data.offset);
7430 INIT_CSEM_PRAM_DATA(bp) = bp->firmware->data + 9503 INIT_CSEM_PRAM_DATA(bp) = bp->firmware->data +
7431 be32_to_cpu(fw_hdr->csem_pram_data.offset); 9504 be32_to_cpu(fw_hdr->csem_pram_data.offset);
9505 /* IRO */
9506 BNX2X_ALLOC_AND_SET(iro_arr, iro_alloc_err, bnx2x_prep_iro);
7432 9507
7433 return 0; 9508 return 0;
7434 9509
9510iro_alloc_err:
9511 kfree(bp->init_ops_offsets);
7435init_offsets_alloc_err: 9512init_offsets_alloc_err:
7436 kfree(bp->init_ops); 9513 kfree(bp->init_ops);
7437init_ops_alloc_err: 9514init_ops_alloc_err:
@@ -7442,6 +9519,15 @@ request_firmware_exit:
7442 return rc; 9519 return rc;
7443} 9520}
7444 9521
9522static inline int bnx2x_set_qm_cid_count(struct bnx2x *bp, int l2_cid_count)
9523{
9524 int cid_count = L2_FP_COUNT(l2_cid_count);
9525
9526#ifdef BCM_CNIC
9527 cid_count += CNIC_CID_MAX;
9528#endif
9529 return roundup(cid_count, QM_CID_ROUND);
9530}
7445 9531
7446static int __devinit bnx2x_init_one(struct pci_dev *pdev, 9532static int __devinit bnx2x_init_one(struct pci_dev *pdev,
7447 const struct pci_device_id *ent) 9533 const struct pci_device_id *ent)
@@ -7449,10 +9535,30 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
7449 struct net_device *dev = NULL; 9535 struct net_device *dev = NULL;
7450 struct bnx2x *bp; 9536 struct bnx2x *bp;
7451 int pcie_width, pcie_speed; 9537 int pcie_width, pcie_speed;
7452 int rc; 9538 int rc, cid_count;
9539
9540 switch (ent->driver_data) {
9541 case BCM57710:
9542 case BCM57711:
9543 case BCM57711E:
9544 cid_count = FP_SB_MAX_E1x;
9545 break;
9546
9547 case BCM57712:
9548 case BCM57712E:
9549 cid_count = FP_SB_MAX_E2;
9550 break;
9551
9552 default:
9553 pr_err("Unknown board_type (%ld), aborting\n",
9554 ent->driver_data);
9555 return -ENODEV;
9556 }
9557
9558 cid_count += NONE_ETH_CONTEXT_USE + CNIC_CONTEXT_USE;
7453 9559
7454 /* dev zeroed in init_etherdev */ 9560 /* dev zeroed in init_etherdev */
7455 dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT); 9561 dev = alloc_etherdev_mq(sizeof(*bp), cid_count);
7456 if (!dev) { 9562 if (!dev) {
7457 dev_err(&pdev->dev, "Cannot allocate net device\n"); 9563 dev_err(&pdev->dev, "Cannot allocate net device\n");
7458 return -ENOMEM; 9564 return -ENOMEM;
@@ -7463,6 +9569,8 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
7463 9569
7464 pci_set_drvdata(pdev, dev); 9570 pci_set_drvdata(pdev, dev);
7465 9571
9572 bp->l2_cid_count = cid_count;
9573
7466 rc = bnx2x_init_dev(pdev, dev); 9574 rc = bnx2x_init_dev(pdev, dev);
7467 if (rc < 0) { 9575 if (rc < 0) {
7468 free_netdev(dev); 9576 free_netdev(dev);
@@ -7473,12 +9581,23 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
7473 if (rc) 9581 if (rc)
7474 goto init_one_exit; 9582 goto init_one_exit;
7475 9583
7476 /* Set init arrays */ 9584 /* calc qm_cid_count */
7477 rc = bnx2x_init_firmware(bp, &pdev->dev); 9585 bp->qm_cid_count = bnx2x_set_qm_cid_count(bp, cid_count);
7478 if (rc) { 9586
7479 dev_err(&pdev->dev, "Error loading firmware\n"); 9587#ifdef BCM_CNIC
7480 goto init_one_exit; 9588 /* disable FCOE L2 queue for E1x*/
7481 } 9589 if (CHIP_IS_E1x(bp))
9590 bp->flags |= NO_FCOE_FLAG;
9591
9592#endif
9593
9594 /* Configure interrupt mode: try to enable MSI-X/MSI if
9595 * needed, set bp->num_queues appropriately.
9596 */
9597 bnx2x_set_int_mode(bp);
9598
9599 /* Add all NAPI objects */
9600 bnx2x_add_all_napi(bp);
7482 9601
7483 rc = register_netdev(dev); 9602 rc = register_netdev(dev);
7484 if (rc) { 9603 if (rc) {
@@ -7486,11 +9605,24 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
7486 goto init_one_exit; 9605 goto init_one_exit;
7487 } 9606 }
7488 9607
9608#ifdef BCM_CNIC
9609 if (!NO_FCOE(bp)) {
9610 /* Add storage MAC address */
9611 rtnl_lock();
9612 dev_addr_add(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN);
9613 rtnl_unlock();
9614 }
9615#endif
9616
7489 bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed); 9617 bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
9618
7490 netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx," 9619 netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx,"
7491 " IRQ %d, ", board_info[ent->driver_data].name, 9620 " IRQ %d, ", board_info[ent->driver_data].name,
7492 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4), 9621 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
7493 pcie_width, (pcie_speed == 2) ? "5GHz (Gen2)" : "2.5GHz", 9622 pcie_width,
9623 ((!CHIP_IS_E2(bp) && pcie_speed == 2) ||
9624 (CHIP_IS_E2(bp) && pcie_speed == 1)) ?
9625 "5GHz (Gen2)" : "2.5GHz",
7494 dev->base_addr, bp->pdev->irq); 9626 dev->base_addr, bp->pdev->irq);
7495 pr_cont("node addr %pM\n", dev->dev_addr); 9627 pr_cont("node addr %pM\n", dev->dev_addr);
7496 9628
@@ -7525,22 +9657,45 @@ static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
7525 } 9657 }
7526 bp = netdev_priv(dev); 9658 bp = netdev_priv(dev);
7527 9659
9660#ifdef BCM_CNIC
9661 /* Delete storage MAC address */
9662 if (!NO_FCOE(bp)) {
9663 rtnl_lock();
9664 dev_addr_del(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN);
9665 rtnl_unlock();
9666 }
9667#endif
9668
9669#ifdef BCM_DCBNL
9670 /* Delete app tlvs from dcbnl */
9671 bnx2x_dcbnl_update_applist(bp, true);
9672#endif
9673
7528 unregister_netdev(dev); 9674 unregister_netdev(dev);
7529 9675
9676 /* Delete all NAPI objects */
9677 bnx2x_del_all_napi(bp);
9678
9679 /* Power on: we can't let PCI layer write to us while we are in D3 */
9680 bnx2x_set_power_state(bp, PCI_D0);
9681
9682 /* Disable MSI/MSI-X */
9683 bnx2x_disable_msi(bp);
9684
9685 /* Power off */
9686 bnx2x_set_power_state(bp, PCI_D3hot);
9687
7530 /* Make sure RESET task is not scheduled before continuing */ 9688 /* Make sure RESET task is not scheduled before continuing */
7531 cancel_delayed_work_sync(&bp->reset_task); 9689 cancel_delayed_work_sync(&bp->reset_task);
7532 9690
7533 kfree(bp->init_ops_offsets);
7534 kfree(bp->init_ops);
7535 kfree(bp->init_data);
7536 release_firmware(bp->firmware);
7537
7538 if (bp->regview) 9691 if (bp->regview)
7539 iounmap(bp->regview); 9692 iounmap(bp->regview);
7540 9693
7541 if (bp->doorbells) 9694 if (bp->doorbells)
7542 iounmap(bp->doorbells); 9695 iounmap(bp->doorbells);
7543 9696
9697 bnx2x_free_mem_bp(bp);
9698
7544 free_netdev(dev); 9699 free_netdev(dev);
7545 9700
7546 if (atomic_read(&pdev->enable_cnt) == 1) 9701 if (atomic_read(&pdev->enable_cnt) == 1)
@@ -7566,22 +9721,14 @@ static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
7566 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n"); 9721 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
7567 9722
7568 /* Release IRQs */ 9723 /* Release IRQs */
7569 bnx2x_free_irq(bp, false); 9724 bnx2x_free_irq(bp);
7570
7571 if (CHIP_IS_E1(bp)) {
7572 struct mac_configuration_cmd *config =
7573 bnx2x_sp(bp, mcast_config);
7574
7575 for (i = 0; i < config->hdr.length; i++)
7576 CAM_INVALIDATE(config->config_table[i]);
7577 }
7578 9725
7579 /* Free SKBs, SGEs, TPA pool and driver internals */ 9726 /* Free SKBs, SGEs, TPA pool and driver internals */
7580 bnx2x_free_skbs(bp); 9727 bnx2x_free_skbs(bp);
7581 for_each_queue(bp, i) 9728
9729 for_each_rx_queue(bp, i)
7582 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); 9730 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
7583 for_each_queue(bp, i) 9731
7584 netif_napi_del(&bnx2x_fp(bp, i, napi));
7585 bnx2x_free_mem(bp); 9732 bnx2x_free_mem(bp);
7586 9733
7587 bp->state = BNX2X_STATE_CLOSED; 9734 bp->state = BNX2X_STATE_CLOSED;
@@ -7613,8 +9760,9 @@ static void bnx2x_eeh_recover(struct bnx2x *bp)
7613 BNX2X_ERR("BAD MCP validity signature\n"); 9760 BNX2X_ERR("BAD MCP validity signature\n");
7614 9761
7615 if (!BP_NOMCP(bp)) { 9762 if (!BP_NOMCP(bp)) {
7616 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header) 9763 bp->fw_seq =
7617 & DRV_MSG_SEQ_NUMBER_MASK); 9764 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
9765 DRV_MSG_SEQ_NUMBER_MASK);
7618 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq); 9766 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
7619 } 9767 }
7620} 9768}
@@ -7697,7 +9845,8 @@ static void bnx2x_io_resume(struct pci_dev *pdev)
7697 struct bnx2x *bp = netdev_priv(dev); 9845 struct bnx2x *bp = netdev_priv(dev);
7698 9846
7699 if (bp->recovery_state != BNX2X_RECOVERY_DONE) { 9847 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
7700 printk(KERN_ERR "Handling parity error recovery. Try again later\n"); 9848 printk(KERN_ERR "Handling parity error recovery. "
9849 "Try again later\n");
7701 return; 9850 return;
7702 } 9851 }
7703 9852
@@ -7772,19 +9921,60 @@ static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
7772#endif 9921#endif
7773 9922
7774 spin_lock_bh(&bp->spq_lock); 9923 spin_lock_bh(&bp->spq_lock);
9924 BUG_ON(bp->cnic_spq_pending < count);
7775 bp->cnic_spq_pending -= count; 9925 bp->cnic_spq_pending -= count;
7776 9926
7777 for (; bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending;
7778 bp->cnic_spq_pending++) {
7779 9927
7780 if (!bp->cnic_kwq_pending) 9928 for (; bp->cnic_kwq_pending; bp->cnic_kwq_pending--) {
9929 u16 type = (le16_to_cpu(bp->cnic_kwq_cons->hdr.type)
9930 & SPE_HDR_CONN_TYPE) >>
9931 SPE_HDR_CONN_TYPE_SHIFT;
9932
9933 /* Set validation for iSCSI L2 client before sending SETUP
9934 * ramrod
9935 */
9936 if (type == ETH_CONNECTION_TYPE) {
9937 u8 cmd = (le32_to_cpu(bp->cnic_kwq_cons->
9938 hdr.conn_and_cmd_data) >>
9939 SPE_HDR_CMD_ID_SHIFT) & 0xff;
9940
9941 if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP)
9942 bnx2x_set_ctx_validation(&bp->context.
9943 vcxt[BNX2X_ISCSI_ETH_CID].eth,
9944 HW_CID(bp, BNX2X_ISCSI_ETH_CID));
9945 }
9946
9947 /* There may be not more than 8 L2 and not more than 8 L5 SPEs
9948 * We also check that the number of outstanding
9949 * COMMON ramrods is not more than the EQ and SPQ can
9950 * accommodate.
9951 */
9952 if (type == ETH_CONNECTION_TYPE) {
9953 if (!atomic_read(&bp->cq_spq_left))
9954 break;
9955 else
9956 atomic_dec(&bp->cq_spq_left);
9957 } else if (type == NONE_CONNECTION_TYPE) {
9958 if (!atomic_read(&bp->eq_spq_left))
9959 break;
9960 else
9961 atomic_dec(&bp->eq_spq_left);
9962 } else if ((type == ISCSI_CONNECTION_TYPE) ||
9963 (type == FCOE_CONNECTION_TYPE)) {
9964 if (bp->cnic_spq_pending >=
9965 bp->cnic_eth_dev.max_kwqe_pending)
9966 break;
9967 else
9968 bp->cnic_spq_pending++;
9969 } else {
9970 BNX2X_ERR("Unknown SPE type: %d\n", type);
9971 bnx2x_panic();
7781 break; 9972 break;
9973 }
7782 9974
7783 spe = bnx2x_sp_get_next(bp); 9975 spe = bnx2x_sp_get_next(bp);
7784 *spe = *bp->cnic_kwq_cons; 9976 *spe = *bp->cnic_kwq_cons;
7785 9977
7786 bp->cnic_kwq_pending--;
7787
7788 DP(NETIF_MSG_TIMER, "pending on SPQ %d, on KWQ %d count %d\n", 9978 DP(NETIF_MSG_TIMER, "pending on SPQ %d, on KWQ %d count %d\n",
7789 bp->cnic_spq_pending, bp->cnic_kwq_pending, count); 9979 bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
7790 9980
@@ -7822,8 +10012,8 @@ static int bnx2x_cnic_sp_queue(struct net_device *dev,
7822 10012
7823 DP(NETIF_MSG_TIMER, "L5 SPQE %x %x %x:%x pos %d\n", 10013 DP(NETIF_MSG_TIMER, "L5 SPQE %x %x %x:%x pos %d\n",
7824 spe->hdr.conn_and_cmd_data, spe->hdr.type, 10014 spe->hdr.conn_and_cmd_data, spe->hdr.type,
7825 spe->data.mac_config_addr.hi, 10015 spe->data.update_data_addr.hi,
7826 spe->data.mac_config_addr.lo, 10016 spe->data.update_data_addr.lo,
7827 bp->cnic_kwq_pending); 10017 bp->cnic_kwq_pending);
7828 10018
7829 if (bp->cnic_kwq_prod == bp->cnic_kwq_last) 10019 if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
@@ -7846,7 +10036,8 @@ static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
7846 int rc = 0; 10036 int rc = 0;
7847 10037
7848 mutex_lock(&bp->cnic_mutex); 10038 mutex_lock(&bp->cnic_mutex);
7849 c_ops = bp->cnic_ops; 10039 c_ops = rcu_dereference_protected(bp->cnic_ops,
10040 lockdep_is_held(&bp->cnic_mutex));
7850 if (c_ops) 10041 if (c_ops)
7851 rc = c_ops->cnic_ctl(bp->cnic_data, ctl); 10042 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
7852 mutex_unlock(&bp->cnic_mutex); 10043 mutex_unlock(&bp->cnic_mutex);
@@ -7889,7 +10080,7 @@ static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid)
7889 ctl.data.comp.cid = cid; 10080 ctl.data.comp.cid = cid;
7890 10081
7891 bnx2x_cnic_ctl_send_bh(bp, &ctl); 10082 bnx2x_cnic_ctl_send_bh(bp, &ctl);
7892 bnx2x_cnic_sp_post(bp, 1); 10083 bnx2x_cnic_sp_post(bp, 0);
7893} 10084}
7894 10085
7895static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl) 10086static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
@@ -7906,8 +10097,8 @@ static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
7906 break; 10097 break;
7907 } 10098 }
7908 10099
7909 case DRV_CTL_COMPLETION_CMD: { 10100 case DRV_CTL_RET_L5_SPQ_CREDIT_CMD: {
7910 int count = ctl->data.comp.comp_count; 10101 int count = ctl->data.credit.credit_count;
7911 10102
7912 bnx2x_cnic_sp_post(bp, count); 10103 bnx2x_cnic_sp_post(bp, count);
7913 break; 10104 break;
@@ -7917,8 +10108,27 @@ static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
7917 case DRV_CTL_START_L2_CMD: { 10108 case DRV_CTL_START_L2_CMD: {
7918 u32 cli = ctl->data.ring.client_id; 10109 u32 cli = ctl->data.ring.client_id;
7919 10110
7920 bp->rx_mode_cl_mask |= (1 << cli); 10111 /* Clear FCoE FIP and ALL ENODE MACs addresses first */
7921 bnx2x_set_storm_rx_mode(bp); 10112 bnx2x_del_fcoe_eth_macs(bp);
10113
10114 /* Set iSCSI MAC address */
10115 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
10116
10117 mmiowb();
10118 barrier();
10119
10120 /* Start accepting on iSCSI L2 ring. Accept all multicasts
10121 * because it's the only way for UIO Client to accept
10122 * multicasts (in non-promiscuous mode only one Client per
10123 * function will receive multicast packets (leading in our
10124 * case).
10125 */
10126 bnx2x_rxq_set_mac_filters(bp, cli,
10127 BNX2X_ACCEPT_UNICAST |
10128 BNX2X_ACCEPT_BROADCAST |
10129 BNX2X_ACCEPT_ALL_MULTICAST);
10130 storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp));
10131
7922 break; 10132 break;
7923 } 10133 }
7924 10134
@@ -7926,8 +10136,28 @@ static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
7926 case DRV_CTL_STOP_L2_CMD: { 10136 case DRV_CTL_STOP_L2_CMD: {
7927 u32 cli = ctl->data.ring.client_id; 10137 u32 cli = ctl->data.ring.client_id;
7928 10138
7929 bp->rx_mode_cl_mask &= ~(1 << cli); 10139 /* Stop accepting on iSCSI L2 ring */
7930 bnx2x_set_storm_rx_mode(bp); 10140 bnx2x_rxq_set_mac_filters(bp, cli, BNX2X_ACCEPT_NONE);
10141 storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp));
10142
10143 mmiowb();
10144 barrier();
10145
10146 /* Unset iSCSI L2 MAC */
10147 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
10148 break;
10149 }
10150 case DRV_CTL_RET_L2_SPQ_CREDIT_CMD: {
10151 int count = ctl->data.credit.credit_count;
10152
10153 smp_mb__before_atomic_inc();
10154 atomic_add(count, &bp->cq_spq_left);
10155 smp_mb__after_atomic_inc();
10156 break;
10157 }
10158
10159 case DRV_CTL_ISCSI_STOPPED_CMD: {
10160 bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_ISCSI_STOPPED);
7931 break; 10161 break;
7932 } 10162 }
7933 10163
@@ -7951,10 +10181,16 @@ void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
7951 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX; 10181 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
7952 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX; 10182 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
7953 } 10183 }
7954 cp->irq_arr[0].status_blk = bp->cnic_sb; 10184 if (CHIP_IS_E2(bp))
10185 cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e2_sb;
10186 else
10187 cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e1x_sb;
10188
7955 cp->irq_arr[0].status_blk_num = CNIC_SB_ID(bp); 10189 cp->irq_arr[0].status_blk_num = CNIC_SB_ID(bp);
10190 cp->irq_arr[0].status_blk_num2 = CNIC_IGU_SB_ID(bp);
7956 cp->irq_arr[1].status_blk = bp->def_status_blk; 10191 cp->irq_arr[1].status_blk = bp->def_status_blk;
7957 cp->irq_arr[1].status_blk_num = DEF_SB_ID; 10192 cp->irq_arr[1].status_blk_num = DEF_SB_ID;
10193 cp->irq_arr[1].status_blk_num2 = DEF_SB_IGU_ID;
7958 10194
7959 cp->num_irq = 2; 10195 cp->num_irq = 2;
7960} 10196}
@@ -7986,12 +10222,10 @@ static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
7986 10222
7987 cp->num_irq = 0; 10223 cp->num_irq = 0;
7988 cp->drv_state = CNIC_DRV_STATE_REGD; 10224 cp->drv_state = CNIC_DRV_STATE_REGD;
7989 10225 cp->iro_arr = bp->iro_arr;
7990 bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping, CNIC_SB_ID(bp));
7991 10226
7992 bnx2x_setup_cnic_irq_info(bp); 10227 bnx2x_setup_cnic_irq_info(bp);
7993 bnx2x_set_iscsi_eth_mac_addr(bp, 1); 10228
7994 bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
7995 rcu_assign_pointer(bp->cnic_ops, ops); 10229 rcu_assign_pointer(bp->cnic_ops, ops);
7996 10230
7997 return 0; 10231 return 0;
@@ -8003,10 +10237,6 @@ static int bnx2x_unregister_cnic(struct net_device *dev)
8003 struct cnic_eth_dev *cp = &bp->cnic_eth_dev; 10237 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
8004 10238
8005 mutex_lock(&bp->cnic_mutex); 10239 mutex_lock(&bp->cnic_mutex);
8006 if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
8007 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
8008 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
8009 }
8010 cp->drv_state = 0; 10240 cp->drv_state = 0;
8011 rcu_assign_pointer(bp->cnic_ops, NULL); 10241 rcu_assign_pointer(bp->cnic_ops, NULL);
8012 mutex_unlock(&bp->cnic_mutex); 10242 mutex_unlock(&bp->cnic_mutex);
@@ -8022,21 +10252,48 @@ struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
8022 struct bnx2x *bp = netdev_priv(dev); 10252 struct bnx2x *bp = netdev_priv(dev);
8023 struct cnic_eth_dev *cp = &bp->cnic_eth_dev; 10253 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
8024 10254
10255 /* If both iSCSI and FCoE are disabled - return NULL in
10256 * order to indicate CNIC that it should not try to work
10257 * with this device.
10258 */
10259 if (NO_ISCSI(bp) && NO_FCOE(bp))
10260 return NULL;
10261
8025 cp->drv_owner = THIS_MODULE; 10262 cp->drv_owner = THIS_MODULE;
8026 cp->chip_id = CHIP_ID(bp); 10263 cp->chip_id = CHIP_ID(bp);
8027 cp->pdev = bp->pdev; 10264 cp->pdev = bp->pdev;
8028 cp->io_base = bp->regview; 10265 cp->io_base = bp->regview;
8029 cp->io_base2 = bp->doorbells; 10266 cp->io_base2 = bp->doorbells;
8030 cp->max_kwqe_pending = 8; 10267 cp->max_kwqe_pending = 8;
8031 cp->ctx_blk_size = CNIC_CTX_PER_ILT * sizeof(union cdu_context); 10268 cp->ctx_blk_size = CDU_ILT_PAGE_SZ;
8032 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) + 1; 10269 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) +
10270 bnx2x_cid_ilt_lines(bp);
8033 cp->ctx_tbl_len = CNIC_ILT_LINES; 10271 cp->ctx_tbl_len = CNIC_ILT_LINES;
8034 cp->starting_cid = BCM_CNIC_CID_START; 10272 cp->starting_cid = bnx2x_cid_ilt_lines(bp) * ILT_PAGE_CIDS;
8035 cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue; 10273 cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
8036 cp->drv_ctl = bnx2x_drv_ctl; 10274 cp->drv_ctl = bnx2x_drv_ctl;
8037 cp->drv_register_cnic = bnx2x_register_cnic; 10275 cp->drv_register_cnic = bnx2x_register_cnic;
8038 cp->drv_unregister_cnic = bnx2x_unregister_cnic; 10276 cp->drv_unregister_cnic = bnx2x_unregister_cnic;
8039 10277 cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID;
10278 cp->iscsi_l2_client_id = BNX2X_ISCSI_ETH_CL_ID +
10279 BP_E1HVN(bp) * NONE_ETH_CONTEXT_USE;
10280 cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID;
10281
10282 if (NO_ISCSI_OOO(bp))
10283 cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI_OOO;
10284
10285 if (NO_ISCSI(bp))
10286 cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI;
10287
10288 if (NO_FCOE(bp))
10289 cp->drv_state |= CNIC_DRV_STATE_NO_FCOE;
10290
10291 DP(BNX2X_MSG_SP, "page_size %d, tbl_offset %d, tbl_lines %d, "
10292 "starting cid %d\n",
10293 cp->ctx_blk_size,
10294 cp->ctx_tbl_offset,
10295 cp->ctx_tbl_len,
10296 cp->starting_cid);
8040 return cp; 10297 return cp;
8041} 10298}
8042EXPORT_SYMBOL(bnx2x_cnic_probe); 10299EXPORT_SYMBOL(bnx2x_cnic_probe);
diff --git a/drivers/net/bnx2x/bnx2x_reg.h b/drivers/net/bnx2x/bnx2x_reg.h
index a1f3bf0cd630..86bba25d2d3f 100644
--- a/drivers/net/bnx2x/bnx2x_reg.h
+++ b/drivers/net/bnx2x/bnx2x_reg.h
@@ -1,6 +1,6 @@
1/* bnx2x_reg.h: Broadcom Everest network driver. 1/* bnx2x_reg.h: Broadcom Everest network driver.
2 * 2 *
3 * Copyright (c) 2007-2009 Broadcom Corporation 3 * Copyright (c) 2007-2011 Broadcom Corporation
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by 6 * it under the terms of the GNU General Public License as published by
@@ -18,18 +18,57 @@
18 * WR - Write Clear (write 1 to clear the bit) 18 * WR - Write Clear (write 1 to clear the bit)
19 * 19 *
20 */ 20 */
21#ifndef BNX2X_REG_H
22#define BNX2X_REG_H
21 23
22 24#define ATC_ATC_INT_STS_REG_ADDRESS_ERROR (0x1<<0)
25#define ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS (0x1<<2)
26#define ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU (0x1<<5)
27#define ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT (0x1<<3)
28#define ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR (0x1<<4)
29#define ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND (0x1<<1)
30/* [RW 1] Initiate the ATC array - reset all the valid bits */
31#define ATC_REG_ATC_INIT_ARRAY 0x1100b8
32/* [R 1] ATC initalization done */
33#define ATC_REG_ATC_INIT_DONE 0x1100bc
34/* [RC 6] Interrupt register #0 read clear */
35#define ATC_REG_ATC_INT_STS_CLR 0x1101c0
36/* [RW 19] Interrupt mask register #0 read/write */
37#define BRB1_REG_BRB1_INT_MASK 0x60128
23/* [R 19] Interrupt register #0 read */ 38/* [R 19] Interrupt register #0 read */
24#define BRB1_REG_BRB1_INT_STS 0x6011c 39#define BRB1_REG_BRB1_INT_STS 0x6011c
25/* [RW 4] Parity mask register #0 read/write */ 40/* [RW 4] Parity mask register #0 read/write */
26#define BRB1_REG_BRB1_PRTY_MASK 0x60138 41#define BRB1_REG_BRB1_PRTY_MASK 0x60138
27/* [R 4] Parity register #0 read */ 42/* [R 4] Parity register #0 read */
28#define BRB1_REG_BRB1_PRTY_STS 0x6012c 43#define BRB1_REG_BRB1_PRTY_STS 0x6012c
44/* [RC 4] Parity register #0 read clear */
45#define BRB1_REG_BRB1_PRTY_STS_CLR 0x60130
29/* [RW 10] At address BRB1_IND_FREE_LIST_PRS_CRDT initialize free head. At 46/* [RW 10] At address BRB1_IND_FREE_LIST_PRS_CRDT initialize free head. At
30 address BRB1_IND_FREE_LIST_PRS_CRDT+1 initialize free tail. At address 47 * address BRB1_IND_FREE_LIST_PRS_CRDT+1 initialize free tail. At address
31 BRB1_IND_FREE_LIST_PRS_CRDT+2 initialize parser initial credit. */ 48 * BRB1_IND_FREE_LIST_PRS_CRDT+2 initialize parser initial credit. Warning -
49 * following reset the first rbc access to this reg must be write; there can
50 * be no more rbc writes after the first one; there can be any number of rbc
51 * read following the first write; rbc access not following these rules will
52 * result in hang condition. */
32#define BRB1_REG_FREE_LIST_PRS_CRDT 0x60200 53#define BRB1_REG_FREE_LIST_PRS_CRDT 0x60200
54/* [RW 10] The number of free blocks below which the full signal to class 0
55 * is asserted */
56#define BRB1_REG_FULL_0_XOFF_THRESHOLD_0 0x601d0
57/* [RW 10] The number of free blocks above which the full signal to class 0
58 * is de-asserted */
59#define BRB1_REG_FULL_0_XON_THRESHOLD_0 0x601d4
60/* [RW 10] The number of free blocks below which the full signal to class 1
61 * is asserted */
62#define BRB1_REG_FULL_1_XOFF_THRESHOLD_0 0x601d8
63/* [RW 10] The number of free blocks above which the full signal to class 1
64 * is de-asserted */
65#define BRB1_REG_FULL_1_XON_THRESHOLD_0 0x601dc
66/* [RW 10] The number of free blocks below which the full signal to the LB
67 * port is asserted */
68#define BRB1_REG_FULL_LB_XOFF_THRESHOLD 0x601e0
69/* [RW 10] The number of free blocks above which the full signal to the LB
70 * port is de-asserted */
71#define BRB1_REG_FULL_LB_XON_THRESHOLD 0x601e4
33/* [RW 10] The number of free blocks above which the High_llfc signal to 72/* [RW 10] The number of free blocks above which the High_llfc signal to
34 interface #n is de-asserted. */ 73 interface #n is de-asserted. */
35#define BRB1_REG_HIGH_LLFC_HIGH_THRESHOLD_0 0x6014c 74#define BRB1_REG_HIGH_LLFC_HIGH_THRESHOLD_0 0x6014c
@@ -44,6 +83,9 @@
44/* [RW 10] The number of free blocks below which the Low_llfc signal to 83/* [RW 10] The number of free blocks below which the Low_llfc signal to
45 interface #n is asserted. */ 84 interface #n is asserted. */
46#define BRB1_REG_LOW_LLFC_LOW_THRESHOLD_0 0x6015c 85#define BRB1_REG_LOW_LLFC_LOW_THRESHOLD_0 0x6015c
86/* [RW 10] The number of blocks guarantied for the MAC port */
87#define BRB1_REG_MAC_GUARANTIED_0 0x601e8
88#define BRB1_REG_MAC_GUARANTIED_1 0x60240
47/* [R 24] The number of full blocks. */ 89/* [R 24] The number of full blocks. */
48#define BRB1_REG_NUM_OF_FULL_BLOCKS 0x60090 90#define BRB1_REG_NUM_OF_FULL_BLOCKS 0x60090
49/* [ST 32] The number of cycles that the write_full signal towards MAC #0 91/* [ST 32] The number of cycles that the write_full signal towards MAC #0
@@ -55,7 +97,19 @@
55 asserted. */ 97 asserted. */
56#define BRB1_REG_NUM_OF_PAUSE_CYCLES_0 0x600b8 98#define BRB1_REG_NUM_OF_PAUSE_CYCLES_0 0x600b8
57#define BRB1_REG_NUM_OF_PAUSE_CYCLES_1 0x600bc 99#define BRB1_REG_NUM_OF_PAUSE_CYCLES_1 0x600bc
58/* [RW 10] Write client 0: De-assert pause threshold. */ 100/* [RW 10] The number of free blocks below which the pause signal to class 0
101 * is asserted */
102#define BRB1_REG_PAUSE_0_XOFF_THRESHOLD_0 0x601c0
103/* [RW 10] The number of free blocks above which the pause signal to class 0
104 * is de-asserted */
105#define BRB1_REG_PAUSE_0_XON_THRESHOLD_0 0x601c4
106/* [RW 10] The number of free blocks below which the pause signal to class 1
107 * is asserted */
108#define BRB1_REG_PAUSE_1_XOFF_THRESHOLD_0 0x601c8
109/* [RW 10] The number of free blocks above which the pause signal to class 1
110 * is de-asserted */
111#define BRB1_REG_PAUSE_1_XON_THRESHOLD_0 0x601cc
112/* [RW 10] Write client 0: De-assert pause threshold. Not Functional */
59#define BRB1_REG_PAUSE_HIGH_THRESHOLD_0 0x60078 113#define BRB1_REG_PAUSE_HIGH_THRESHOLD_0 0x60078
60#define BRB1_REG_PAUSE_HIGH_THRESHOLD_1 0x6007c 114#define BRB1_REG_PAUSE_HIGH_THRESHOLD_1 0x6007c
61/* [RW 10] Write client 0: Assert pause threshold. */ 115/* [RW 10] Write client 0: Assert pause threshold. */
@@ -82,8 +136,12 @@
82#define CCM_REG_CCM_INT_MASK 0xd01e4 136#define CCM_REG_CCM_INT_MASK 0xd01e4
83/* [R 11] Interrupt register #0 read */ 137/* [R 11] Interrupt register #0 read */
84#define CCM_REG_CCM_INT_STS 0xd01d8 138#define CCM_REG_CCM_INT_STS 0xd01d8
139/* [RW 27] Parity mask register #0 read/write */
140#define CCM_REG_CCM_PRTY_MASK 0xd01f4
85/* [R 27] Parity register #0 read */ 141/* [R 27] Parity register #0 read */
86#define CCM_REG_CCM_PRTY_STS 0xd01e8 142#define CCM_REG_CCM_PRTY_STS 0xd01e8
143/* [RC 27] Parity register #0 read clear */
144#define CCM_REG_CCM_PRTY_STS_CLR 0xd01ec
87/* [RW 3] The size of AG context region 0 in REG-pairs. Designates the MS 145/* [RW 3] The size of AG context region 0 in REG-pairs. Designates the MS
88 REG-pair number (e.g. if region 0 is 6 REG-pairs; the value should be 5). 146 REG-pair number (e.g. if region 0 is 6 REG-pairs; the value should be 5).
89 Is used to determine the number of the AG context REG-pairs written back; 147 Is used to determine the number of the AG context REG-pairs written back;
@@ -117,9 +175,9 @@
117 the initial credit value; read returns the current value of the credit 175 the initial credit value; read returns the current value of the credit
118 counter. Must be initialized to 1 at start-up. */ 176 counter. Must be initialized to 1 at start-up. */
119#define CCM_REG_CFC_INIT_CRD 0xd0204 177#define CCM_REG_CFC_INIT_CRD 0xd0204
120/* [RW 2] Auxillary counter flag Q number 1. */ 178/* [RW 2] Auxiliary counter flag Q number 1. */
121#define CCM_REG_CNT_AUX1_Q 0xd00c8 179#define CCM_REG_CNT_AUX1_Q 0xd00c8
122/* [RW 2] Auxillary counter flag Q number 2. */ 180/* [RW 2] Auxiliary counter flag Q number 2. */
123#define CCM_REG_CNT_AUX2_Q 0xd00cc 181#define CCM_REG_CNT_AUX2_Q 0xd00cc
124/* [RW 28] The CM header value for QM request (primary). */ 182/* [RW 28] The CM header value for QM request (primary). */
125#define CCM_REG_CQM_CCM_HDR_P 0xd008c 183#define CCM_REG_CQM_CCM_HDR_P 0xd008c
@@ -300,6 +358,8 @@
300#define CDU_REG_CDU_PRTY_MASK 0x10104c 358#define CDU_REG_CDU_PRTY_MASK 0x10104c
301/* [R 5] Parity register #0 read */ 359/* [R 5] Parity register #0 read */
302#define CDU_REG_CDU_PRTY_STS 0x101040 360#define CDU_REG_CDU_PRTY_STS 0x101040
361/* [RC 5] Parity register #0 read clear */
362#define CDU_REG_CDU_PRTY_STS_CLR 0x101044
303/* [RC 32] logging of error data in case of a CDU load error: 363/* [RC 32] logging of error data in case of a CDU load error:
304 {expected_cid[15:0]; xpected_type[2:0]; xpected_region[2:0]; ctive_error; 364 {expected_cid[15:0]; xpected_type[2:0]; xpected_region[2:0]; ctive_error;
305 ype_error; ctual_active; ctual_compressed_context}; */ 365 ype_error; ctual_active; ctual_compressed_context}; */
@@ -331,6 +391,8 @@
331#define CFC_REG_CFC_PRTY_MASK 0x104118 391#define CFC_REG_CFC_PRTY_MASK 0x104118
332/* [R 4] Parity register #0 read */ 392/* [R 4] Parity register #0 read */
333#define CFC_REG_CFC_PRTY_STS 0x10410c 393#define CFC_REG_CFC_PRTY_STS 0x10410c
394/* [RC 4] Parity register #0 read clear */
395#define CFC_REG_CFC_PRTY_STS_CLR 0x104110
334/* [RW 21] CID cam access (21:1 - Data; alid - 0) */ 396/* [RW 21] CID cam access (21:1 - Data; alid - 0) */
335#define CFC_REG_CID_CAM 0x104800 397#define CFC_REG_CID_CAM 0x104800
336#define CFC_REG_CONTROL0 0x104028 398#define CFC_REG_CONTROL0 0x104028
@@ -362,6 +424,7 @@
362#define CFC_REG_NUM_LCIDS_ARRIVING 0x104004 424#define CFC_REG_NUM_LCIDS_ARRIVING 0x104004
363/* [R 9] Number of Leaving LCIDs in Link List Block */ 425/* [R 9] Number of Leaving LCIDs in Link List Block */
364#define CFC_REG_NUM_LCIDS_LEAVING 0x104018 426#define CFC_REG_NUM_LCIDS_LEAVING 0x104018
427#define CFC_REG_WEAK_ENABLE_PF 0x104124
365/* [RW 8] The event id for aggregated interrupt 0 */ 428/* [RW 8] The event id for aggregated interrupt 0 */
366#define CSDM_REG_AGG_INT_EVENT_0 0xc2038 429#define CSDM_REG_AGG_INT_EVENT_0 0xc2038
367#define CSDM_REG_AGG_INT_EVENT_10 0xc2060 430#define CSDM_REG_AGG_INT_EVENT_10 0xc2060
@@ -394,13 +457,13 @@
394#define CSDM_REG_AGG_INT_MODE_9 0xc21dc 457#define CSDM_REG_AGG_INT_MODE_9 0xc21dc
395/* [RW 13] The start address in the internal RAM for the cfc_rsp lcid */ 458/* [RW 13] The start address in the internal RAM for the cfc_rsp lcid */
396#define CSDM_REG_CFC_RSP_START_ADDR 0xc2008 459#define CSDM_REG_CFC_RSP_START_ADDR 0xc2008
397/* [RW 16] The maximum value of the competion counter #0 */ 460/* [RW 16] The maximum value of the completion counter #0 */
398#define CSDM_REG_CMP_COUNTER_MAX0 0xc201c 461#define CSDM_REG_CMP_COUNTER_MAX0 0xc201c
399/* [RW 16] The maximum value of the competion counter #1 */ 462/* [RW 16] The maximum value of the completion counter #1 */
400#define CSDM_REG_CMP_COUNTER_MAX1 0xc2020 463#define CSDM_REG_CMP_COUNTER_MAX1 0xc2020
401/* [RW 16] The maximum value of the competion counter #2 */ 464/* [RW 16] The maximum value of the completion counter #2 */
402#define CSDM_REG_CMP_COUNTER_MAX2 0xc2024 465#define CSDM_REG_CMP_COUNTER_MAX2 0xc2024
403/* [RW 16] The maximum value of the competion counter #3 */ 466/* [RW 16] The maximum value of the completion counter #3 */
404#define CSDM_REG_CMP_COUNTER_MAX3 0xc2028 467#define CSDM_REG_CMP_COUNTER_MAX3 0xc2028
405/* [RW 13] The start address in the internal RAM for the completion 468/* [RW 13] The start address in the internal RAM for the completion
406 counters. */ 469 counters. */
@@ -415,6 +478,8 @@
415#define CSDM_REG_CSDM_PRTY_MASK 0xc22bc 478#define CSDM_REG_CSDM_PRTY_MASK 0xc22bc
416/* [R 11] Parity register #0 read */ 479/* [R 11] Parity register #0 read */
417#define CSDM_REG_CSDM_PRTY_STS 0xc22b0 480#define CSDM_REG_CSDM_PRTY_STS 0xc22b0
481/* [RC 11] Parity register #0 read clear */
482#define CSDM_REG_CSDM_PRTY_STS_CLR 0xc22b4
418#define CSDM_REG_ENABLE_IN1 0xc2238 483#define CSDM_REG_ENABLE_IN1 0xc2238
419#define CSDM_REG_ENABLE_IN2 0xc223c 484#define CSDM_REG_ENABLE_IN2 0xc223c
420#define CSDM_REG_ENABLE_OUT1 0xc2240 485#define CSDM_REG_ENABLE_OUT1 0xc2240
@@ -505,6 +570,9 @@
505/* [R 32] Parity register #0 read */ 570/* [R 32] Parity register #0 read */
506#define CSEM_REG_CSEM_PRTY_STS_0 0x200124 571#define CSEM_REG_CSEM_PRTY_STS_0 0x200124
507#define CSEM_REG_CSEM_PRTY_STS_1 0x200134 572#define CSEM_REG_CSEM_PRTY_STS_1 0x200134
573/* [RC 32] Parity register #0 read clear */
574#define CSEM_REG_CSEM_PRTY_STS_CLR_0 0x200128
575#define CSEM_REG_CSEM_PRTY_STS_CLR_1 0x200138
508#define CSEM_REG_ENABLE_IN 0x2000a4 576#define CSEM_REG_ENABLE_IN 0x2000a4
509#define CSEM_REG_ENABLE_OUT 0x2000a8 577#define CSEM_REG_ENABLE_OUT 0x2000a8
510/* [RW 32] This address space contains all registers and memories that are 578/* [RW 32] This address space contains all registers and memories that are
@@ -590,10 +658,19 @@
590#define CSEM_REG_TS_8_AS 0x200058 658#define CSEM_REG_TS_8_AS 0x200058
591/* [RW 3] The arbitration scheme of time_slot 9 */ 659/* [RW 3] The arbitration scheme of time_slot 9 */
592#define CSEM_REG_TS_9_AS 0x20005c 660#define CSEM_REG_TS_9_AS 0x20005c
661/* [W 7] VF or PF ID for reset error bit. Values 0-63 reset error bit for 64
662 * VF; values 64-67 reset error for 4 PF; values 68-127 are not valid. */
663#define CSEM_REG_VFPF_ERR_NUM 0x200380
593/* [RW 1] Parity mask register #0 read/write */ 664/* [RW 1] Parity mask register #0 read/write */
594#define DBG_REG_DBG_PRTY_MASK 0xc0a8 665#define DBG_REG_DBG_PRTY_MASK 0xc0a8
595/* [R 1] Parity register #0 read */ 666/* [R 1] Parity register #0 read */
596#define DBG_REG_DBG_PRTY_STS 0xc09c 667#define DBG_REG_DBG_PRTY_STS 0xc09c
668/* [RC 1] Parity register #0 read clear */
669#define DBG_REG_DBG_PRTY_STS_CLR 0xc0a0
670/* [RW 1] When set the DMAE will process the commands as in E1.5. 1.The
671 * function that is used is always SRC-PCI; 2.VF_Valid = 0; 3.VFID=0;
672 * 4.Completion function=0; 5.Error handling=0 */
673#define DMAE_REG_BACKWARD_COMP_EN 0x10207c
597/* [RW 32] Commands memory. The address to command X; row Y is to calculated 674/* [RW 32] Commands memory. The address to command X; row Y is to calculated
598 as 14*X+Y. */ 675 as 14*X+Y. */
599#define DMAE_REG_CMD_MEM 0x102400 676#define DMAE_REG_CMD_MEM 0x102400
@@ -610,6 +687,8 @@
610#define DMAE_REG_DMAE_PRTY_MASK 0x102064 687#define DMAE_REG_DMAE_PRTY_MASK 0x102064
611/* [R 4] Parity register #0 read */ 688/* [R 4] Parity register #0 read */
612#define DMAE_REG_DMAE_PRTY_STS 0x102058 689#define DMAE_REG_DMAE_PRTY_STS 0x102058
690/* [RC 4] Parity register #0 read clear */
691#define DMAE_REG_DMAE_PRTY_STS_CLR 0x10205c
613/* [RW 1] Command 0 go. */ 692/* [RW 1] Command 0 go. */
614#define DMAE_REG_GO_C0 0x102080 693#define DMAE_REG_GO_C0 0x102080
615/* [RW 1] Command 1 go. */ 694/* [RW 1] Command 1 go. */
@@ -676,6 +755,8 @@
676#define DORQ_REG_DORQ_PRTY_MASK 0x170190 755#define DORQ_REG_DORQ_PRTY_MASK 0x170190
677/* [R 2] Parity register #0 read */ 756/* [R 2] Parity register #0 read */
678#define DORQ_REG_DORQ_PRTY_STS 0x170184 757#define DORQ_REG_DORQ_PRTY_STS 0x170184
758/* [RC 2] Parity register #0 read clear */
759#define DORQ_REG_DORQ_PRTY_STS_CLR 0x170188
679/* [RW 8] The address to write the DPM CID to STORM. */ 760/* [RW 8] The address to write the DPM CID to STORM. */
680#define DORQ_REG_DPM_CID_ADDR 0x170044 761#define DORQ_REG_DPM_CID_ADDR 0x170044
681/* [RW 5] The DPM mode CID extraction offset. */ 762/* [RW 5] The DPM mode CID extraction offset. */
@@ -742,9 +823,13 @@
742#define HC_REG_HC_PRTY_MASK 0x1080a0 823#define HC_REG_HC_PRTY_MASK 0x1080a0
743/* [R 3] Parity register #0 read */ 824/* [R 3] Parity register #0 read */
744#define HC_REG_HC_PRTY_STS 0x108094 825#define HC_REG_HC_PRTY_STS 0x108094
745#define HC_REG_INT_MASK 0x108108 826/* [RC 3] Parity register #0 read clear */
827#define HC_REG_HC_PRTY_STS_CLR 0x108098
828#define HC_REG_INT_MASK 0x108108
746#define HC_REG_LEADING_EDGE_0 0x108040 829#define HC_REG_LEADING_EDGE_0 0x108040
747#define HC_REG_LEADING_EDGE_1 0x108048 830#define HC_REG_LEADING_EDGE_1 0x108048
831#define HC_REG_MAIN_MEMORY 0x108800
832#define HC_REG_MAIN_MEMORY_SIZE 152
748#define HC_REG_P0_PROD_CONS 0x108200 833#define HC_REG_P0_PROD_CONS 0x108200
749#define HC_REG_P1_PROD_CONS 0x108400 834#define HC_REG_P1_PROD_CONS 0x108400
750#define HC_REG_PBA_COMMAND 0x108140 835#define HC_REG_PBA_COMMAND 0x108140
@@ -758,6 +843,96 @@
758#define HC_REG_USTORM_ADDR_FOR_COALESCE 0x108068 843#define HC_REG_USTORM_ADDR_FOR_COALESCE 0x108068
759#define HC_REG_VQID_0 0x108008 844#define HC_REG_VQID_0 0x108008
760#define HC_REG_VQID_1 0x10800c 845#define HC_REG_VQID_1 0x10800c
846#define IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN (0x1<<1)
847#define IGU_REG_ATTENTION_ACK_BITS 0x130108
848/* [R 4] Debug: attn_fsm */
849#define IGU_REG_ATTN_FSM 0x130054
850#define IGU_REG_ATTN_MSG_ADDR_H 0x13011c
851#define IGU_REG_ATTN_MSG_ADDR_L 0x130120
852/* [R 4] Debug: [3] - attention write done message is pending (0-no pending;
853 * 1-pending). [2:0] = PFID. Pending means attention message was sent; but
854 * write done didn't receive. */
855#define IGU_REG_ATTN_WRITE_DONE_PENDING 0x130030
856#define IGU_REG_BLOCK_CONFIGURATION 0x130000
857#define IGU_REG_COMMAND_REG_32LSB_DATA 0x130124
858#define IGU_REG_COMMAND_REG_CTRL 0x13012c
859/* [WB_R 32] Cleanup bit status per SB. 1 = cleanup is set. 0 = cleanup bit
860 * is clear. The bits in this registers are set and clear via the producer
861 * command. Data valid only in addresses 0-4. all the rest are zero. */
862#define IGU_REG_CSTORM_TYPE_0_SB_CLEANUP 0x130200
863/* [R 5] Debug: ctrl_fsm */
864#define IGU_REG_CTRL_FSM 0x130064
865/* [R 1] data available for error memory. If this bit is clear do not red
866 * from error_handling_memory. */
867#define IGU_REG_ERROR_HANDLING_DATA_VALID 0x130130
868/* [RW 11] Parity mask register #0 read/write */
869#define IGU_REG_IGU_PRTY_MASK 0x1300a8
870/* [R 11] Parity register #0 read */
871#define IGU_REG_IGU_PRTY_STS 0x13009c
872/* [RC 11] Parity register #0 read clear */
873#define IGU_REG_IGU_PRTY_STS_CLR 0x1300a0
874/* [R 4] Debug: int_handle_fsm */
875#define IGU_REG_INT_HANDLE_FSM 0x130050
876#define IGU_REG_LEADING_EDGE_LATCH 0x130134
877/* [RW 14] mapping CAM; relevant for E2 operating mode only. [0] - valid.
878 * [6:1] - vector number; [13:7] - FID (if VF - [13] = 0; [12:7] = VF
879 * number; if PF - [13] = 1; [12:10] = 0; [9:7] = PF number); */
880#define IGU_REG_MAPPING_MEMORY 0x131000
881#define IGU_REG_MAPPING_MEMORY_SIZE 136
882#define IGU_REG_PBA_STATUS_LSB 0x130138
883#define IGU_REG_PBA_STATUS_MSB 0x13013c
884#define IGU_REG_PCI_PF_MSI_EN 0x130140
885#define IGU_REG_PCI_PF_MSIX_EN 0x130144
886#define IGU_REG_PCI_PF_MSIX_FUNC_MASK 0x130148
887/* [WB_R 32] Each bit represent the pending bits status for that SB. 0 = no
888 * pending; 1 = pending. Pendings means interrupt was asserted; and write
889 * done was not received. Data valid only in addresses 0-4. all the rest are
890 * zero. */
891#define IGU_REG_PENDING_BITS_STATUS 0x130300
892#define IGU_REG_PF_CONFIGURATION 0x130154
893/* [RW 20] producers only. E2 mode: address 0-135 match to the mapping
894 * memory; 136 - PF0 default prod; 137 PF1 default prod; 138 - PF2 default
895 * prod; 139 PF3 default prod; 140 - PF0 - ATTN prod; 141 - PF1 - ATTN prod;
896 * 142 - PF2 - ATTN prod; 143 - PF3 - ATTN prod; 144-147 reserved. E1.5 mode
897 * - In backward compatible mode; for non default SB; each even line in the
898 * memory holds the U producer and each odd line hold the C producer. The
899 * first 128 producer are for NDSB (PF0 - 0-31; PF1 - 32-63 and so on). The
900 * last 20 producers are for the DSB for each PF. each PF has five segments
901 * (the order inside each segment is PF0; PF1; PF2; PF3) - 128-131 U prods;
902 * 132-135 C prods; 136-139 X prods; 140-143 T prods; 144-147 ATTN prods; */
903#define IGU_REG_PROD_CONS_MEMORY 0x132000
904/* [R 3] Debug: pxp_arb_fsm */
905#define IGU_REG_PXP_ARB_FSM 0x130068
906/* [RW 6] Write one for each bit will reset the appropriate memory. When the
907 * memory reset finished the appropriate bit will be clear. Bit 0 - mapping
908 * memory; Bit 1 - SB memory; Bit 2 - SB interrupt and mask register; Bit 3
909 * - MSIX memory; Bit 4 - PBA memory; Bit 5 - statistics; */
910#define IGU_REG_RESET_MEMORIES 0x130158
911/* [R 4] Debug: sb_ctrl_fsm */
912#define IGU_REG_SB_CTRL_FSM 0x13004c
913#define IGU_REG_SB_INT_BEFORE_MASK_LSB 0x13015c
914#define IGU_REG_SB_INT_BEFORE_MASK_MSB 0x130160
915#define IGU_REG_SB_MASK_LSB 0x130164
916#define IGU_REG_SB_MASK_MSB 0x130168
917/* [RW 16] Number of command that were dropped without causing an interrupt
918 * due to: read access for WO BAR address; or write access for RO BAR
919 * address or any access for reserved address or PCI function error is set
920 * and address is not MSIX; PBA or cleanup */
921#define IGU_REG_SILENT_DROP 0x13016c
922/* [RW 10] Number of MSI/MSIX/ATTN messages sent for the function: 0-63 -
923 * number of MSIX messages per VF; 64-67 - number of MSI/MSIX messages per
924 * PF; 68-71 number of ATTN messages per PF */
925#define IGU_REG_STATISTIC_NUM_MESSAGE_SENT 0x130800
926/* [RW 32] Number of cycles the timer mask masking the IGU interrupt when a
927 * timer mask command arrives. Value must be bigger than 100. */
928#define IGU_REG_TIMER_MASKING_VALUE 0x13003c
929#define IGU_REG_TRAILING_EDGE_LATCH 0x130104
930#define IGU_REG_VF_CONFIGURATION 0x130170
931/* [WB_R 32] Each bit represent write done pending bits status for that SB
932 * (MSI/MSIX message was sent and write done was not received yet). 0 =
933 * clear; 1 = set. Data valid only in addresses 0-4. all the rest are zero. */
934#define IGU_REG_WRITE_DONE_PENDING 0x130480
935#define MCP_A_REG_MCPR_SCRATCH 0x3a0000
761#define MCP_REG_MCPR_NVM_ACCESS_ENABLE 0x86424 936#define MCP_REG_MCPR_NVM_ACCESS_ENABLE 0x86424
762#define MCP_REG_MCPR_NVM_ADDR 0x8640c 937#define MCP_REG_MCPR_NVM_ADDR 0x8640c
763#define MCP_REG_MCPR_NVM_CFG4 0x8642c 938#define MCP_REG_MCPR_NVM_CFG4 0x8642c
@@ -880,6 +1055,11 @@
880 rom_parity; [29] MCP Latched ump_rx_parity; [30] MCP Latched 1055 rom_parity; [29] MCP Latched ump_rx_parity; [30] MCP Latched
881 ump_tx_parity; [31] MCP Latched scpad_parity; */ 1056 ump_tx_parity; [31] MCP Latched scpad_parity; */
882#define MISC_REG_AEU_AFTER_INVERT_4_MCP 0xa458 1057#define MISC_REG_AEU_AFTER_INVERT_4_MCP 0xa458
1058/* [R 32] Read fifth 32 bit after inversion of function 0. Mapped as
1059 * follows: [0] PGLUE config_space; [1] PGLUE misc_flr; [2] PGLUE B RBC
1060 * attention [3] PGLUE B RBC parity; [4] ATC attention; [5] ATC parity; [6]
1061 * CNIG attention (reserved); [7] CNIG parity (reserved); [31-8] Reserved; */
1062#define MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 0xa700
883/* [W 14] write to this register results with the clear of the latched 1063/* [W 14] write to this register results with the clear of the latched
884 signals; one in d0 clears RBCR latch; one in d1 clears RBCT latch; one in 1064 signals; one in d0 clears RBCR latch; one in d1 clears RBCT latch; one in
885 d2 clears RBCN latch; one in d3 clears RBCU latch; one in d4 clears RBCP 1065 d2 clears RBCN latch; one in d3 clears RBCU latch; one in d4 clears RBCP
@@ -1251,6 +1431,7 @@
1251#define MISC_REG_E1HMF_MODE 0xa5f8 1431#define MISC_REG_E1HMF_MODE 0xa5f8
1252/* [RW 32] Debug only: spare RW register reset by core reset */ 1432/* [RW 32] Debug only: spare RW register reset by core reset */
1253#define MISC_REG_GENERIC_CR_0 0xa460 1433#define MISC_REG_GENERIC_CR_0 0xa460
1434#define MISC_REG_GENERIC_CR_1 0xa464
1254/* [RW 32] Debug only: spare RW register reset by por reset */ 1435/* [RW 32] Debug only: spare RW register reset by por reset */
1255#define MISC_REG_GENERIC_POR_1 0xa474 1436#define MISC_REG_GENERIC_POR_1 0xa474
1256/* [RW 32] GPIO. [31-28] FLOAT port 0; [27-24] FLOAT port 0; When any of 1437/* [RW 32] GPIO. [31-28] FLOAT port 0; [27-24] FLOAT port 0; When any of
@@ -1347,6 +1528,8 @@
1347#define MISC_REG_MISC_PRTY_MASK 0xa398 1528#define MISC_REG_MISC_PRTY_MASK 0xa398
1348/* [R 1] Parity register #0 read */ 1529/* [R 1] Parity register #0 read */
1349#define MISC_REG_MISC_PRTY_STS 0xa38c 1530#define MISC_REG_MISC_PRTY_STS 0xa38c
1531/* [RC 1] Parity register #0 read clear */
1532#define MISC_REG_MISC_PRTY_STS_CLR 0xa390
1350#define MISC_REG_NIG_WOL_P0 0xa270 1533#define MISC_REG_NIG_WOL_P0 0xa270
1351#define MISC_REG_NIG_WOL_P1 0xa274 1534#define MISC_REG_NIG_WOL_P1 0xa274
1352/* [R 1] If set indicate that the pcie_rst_b was asserted without perst 1535/* [R 1] If set indicate that the pcie_rst_b was asserted without perst
@@ -1373,6 +1556,14 @@
1373#define MISC_REG_PLL_STORM_CTRL_2 0xa298 1556#define MISC_REG_PLL_STORM_CTRL_2 0xa298
1374#define MISC_REG_PLL_STORM_CTRL_3 0xa29c 1557#define MISC_REG_PLL_STORM_CTRL_3 0xa29c
1375#define MISC_REG_PLL_STORM_CTRL_4 0xa2a0 1558#define MISC_REG_PLL_STORM_CTRL_4 0xa2a0
1559/* [R 1] Status of 4 port mode enable input pin. */
1560#define MISC_REG_PORT4MODE_EN 0xa750
1561/* [RW 2] 4 port mode enable overwrite.[0] - Overwrite control; if it is 0 -
1562 * the port4mode_en output is equal to 4 port mode input pin; if it is 1 -
1563 * the port4mode_en output is equal to bit[1] of this register; [1] -
1564 * Overwrite value. If bit[0] of this register is 1 this is the value that
1565 * receives the port4mode_en output . */
1566#define MISC_REG_PORT4MODE_EN_OVWR 0xa720
1376/* [RW 32] reset reg#2; rite/read one = the specific block is out of reset; 1567/* [RW 32] reset reg#2; rite/read one = the specific block is out of reset;
1377 write/read zero = the specific block is in reset; addr 0-wr- the write 1568 write/read zero = the specific block is in reset; addr 0-wr- the write
1378 value will be written to the register; addr 1-set - one will be written 1569 value will be written to the register; addr 1-set - one will be written
@@ -1442,7 +1633,7 @@
1442 (~misc_registers_sw_timer_cfg_4.sw_timer_cfg_4[1] ) is set */ 1633 (~misc_registers_sw_timer_cfg_4.sw_timer_cfg_4[1] ) is set */
1443#define MISC_REG_SW_TIMER_RELOAD_VAL_4 0xa2fc 1634#define MISC_REG_SW_TIMER_RELOAD_VAL_4 0xa2fc
1444/* [RW 32] the value of the counter for sw timers1-8. there are 8 addresses 1635/* [RW 32] the value of the counter for sw timers1-8. there are 8 addresses
1445 in this register. addres 0 - timer 1; address 1 - timer 2, ... address 7 - 1636 in this register. address 0 - timer 1; address 1 - timer 2, ... address 7 -
1446 timer 8 */ 1637 timer 8 */
1447#define MISC_REG_SW_TIMER_VAL 0xa5c0 1638#define MISC_REG_SW_TIMER_VAL 0xa5c0
1448/* [RW 1] Set by the MCP to remember if one or more of the drivers is/are 1639/* [RW 1] Set by the MCP to remember if one or more of the drivers is/are
@@ -1453,6 +1644,8 @@
1453#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN (0x1<<4) 1644#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN (0x1<<4)
1454#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST (0x1<<2) 1645#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST (0x1<<2)
1455#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN (0x1<<3) 1646#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN (0x1<<3)
1647#define NIG_LLH0_XCM_MASK_REG_LLH0_XCM_MASK_BCN (0x1<<0)
1648#define NIG_LLH1_XCM_MASK_REG_LLH1_XCM_MASK_BCN (0x1<<0)
1456#define NIG_MASK_INTERRUPT_PORT0_REG_MASK_EMAC0_MISC_MI_INT (0x1<<0) 1649#define NIG_MASK_INTERRUPT_PORT0_REG_MASK_EMAC0_MISC_MI_INT (0x1<<0)
1457#define NIG_MASK_INTERRUPT_PORT0_REG_MASK_SERDES0_LINK_STATUS (0x1<<9) 1650#define NIG_MASK_INTERRUPT_PORT0_REG_MASK_SERDES0_LINK_STATUS (0x1<<9)
1458#define NIG_MASK_INTERRUPT_PORT0_REG_MASK_XGXS0_LINK10G (0x1<<15) 1651#define NIG_MASK_INTERRUPT_PORT0_REG_MASK_XGXS0_LINK10G (0x1<<15)
@@ -1582,12 +1775,16 @@
1582 ~ppp_enable.ppp_enable = 0 and pause_enable.pause_enable =0 for the same 1775 ~ppp_enable.ppp_enable = 0 and pause_enable.pause_enable =0 for the same
1583 port */ 1776 port */
1584#define NIG_REG_LLFC_ENABLE_0 0x16208 1777#define NIG_REG_LLFC_ENABLE_0 0x16208
1778#define NIG_REG_LLFC_ENABLE_1 0x1620c
1585/* [RW 16] classes are high-priority for port0 */ 1779/* [RW 16] classes are high-priority for port0 */
1586#define NIG_REG_LLFC_HIGH_PRIORITY_CLASSES_0 0x16058 1780#define NIG_REG_LLFC_HIGH_PRIORITY_CLASSES_0 0x16058
1781#define NIG_REG_LLFC_HIGH_PRIORITY_CLASSES_1 0x1605c
1587/* [RW 16] classes are low-priority for port0 */ 1782/* [RW 16] classes are low-priority for port0 */
1588#define NIG_REG_LLFC_LOW_PRIORITY_CLASSES_0 0x16060 1783#define NIG_REG_LLFC_LOW_PRIORITY_CLASSES_0 0x16060
1784#define NIG_REG_LLFC_LOW_PRIORITY_CLASSES_1 0x16064
1589/* [RW 1] Output enable of message to LLFC BMAC IF for port0 */ 1785/* [RW 1] Output enable of message to LLFC BMAC IF for port0 */
1590#define NIG_REG_LLFC_OUT_EN_0 0x160c8 1786#define NIG_REG_LLFC_OUT_EN_0 0x160c8
1787#define NIG_REG_LLFC_OUT_EN_1 0x160cc
1591#define NIG_REG_LLH0_ACPI_PAT_0_CRC 0x1015c 1788#define NIG_REG_LLH0_ACPI_PAT_0_CRC 0x1015c
1592#define NIG_REG_LLH0_ACPI_PAT_6_LEN 0x10154 1789#define NIG_REG_LLH0_ACPI_PAT_6_LEN 0x10154
1593#define NIG_REG_LLH0_BRB1_DRV_MASK 0x10244 1790#define NIG_REG_LLH0_BRB1_DRV_MASK 0x10244
@@ -1612,6 +1809,8 @@
1612/* [RW 8] event id for llh0 */ 1809/* [RW 8] event id for llh0 */
1613#define NIG_REG_LLH0_EVENT_ID 0x10084 1810#define NIG_REG_LLH0_EVENT_ID 0x10084
1614#define NIG_REG_LLH0_FUNC_EN 0x160fc 1811#define NIG_REG_LLH0_FUNC_EN 0x160fc
1812#define NIG_REG_LLH0_FUNC_MEM 0x16180
1813#define NIG_REG_LLH0_FUNC_MEM_ENABLE 0x16140
1615#define NIG_REG_LLH0_FUNC_VLAN_ID 0x16100 1814#define NIG_REG_LLH0_FUNC_VLAN_ID 0x16100
1616/* [RW 1] Determine the IP version to look for in 1815/* [RW 1] Determine the IP version to look for in
1617 ~nig_registers_llh0_dest_ip_0.llh0_dest_ip_0. 0 - IPv6; 1-IPv4 */ 1816 ~nig_registers_llh0_dest_ip_0.llh0_dest_ip_0. 0 - IPv6; 1-IPv4 */
@@ -1635,6 +1834,9 @@
1635#define NIG_REG_LLH1_ERROR_MASK 0x10090 1834#define NIG_REG_LLH1_ERROR_MASK 0x10090
1636/* [RW 8] event id for llh1 */ 1835/* [RW 8] event id for llh1 */
1637#define NIG_REG_LLH1_EVENT_ID 0x10088 1836#define NIG_REG_LLH1_EVENT_ID 0x10088
1837#define NIG_REG_LLH1_FUNC_MEM 0x161c0
1838#define NIG_REG_LLH1_FUNC_MEM_ENABLE 0x16160
1839#define NIG_REG_LLH1_FUNC_MEM_SIZE 16
1638/* [RW 8] init credit counter for port1 in LLH */ 1840/* [RW 8] init credit counter for port1 in LLH */
1639#define NIG_REG_LLH1_XCM_INIT_CREDIT 0x10564 1841#define NIG_REG_LLH1_XCM_INIT_CREDIT 0x10564
1640#define NIG_REG_LLH1_XCM_MASK 0x10134 1842#define NIG_REG_LLH1_XCM_MASK 0x10134
@@ -1656,17 +1858,106 @@
1656/* [R 32] Interrupt register #0 read */ 1858/* [R 32] Interrupt register #0 read */
1657#define NIG_REG_NIG_INT_STS_0 0x103b0 1859#define NIG_REG_NIG_INT_STS_0 0x103b0
1658#define NIG_REG_NIG_INT_STS_1 0x103c0 1860#define NIG_REG_NIG_INT_STS_1 0x103c0
1659/* [R 32] Parity register #0 read */ 1861/* [R 32] Legacy E1 and E1H location for parity error status register. */
1660#define NIG_REG_NIG_PRTY_STS 0x103d0 1862#define NIG_REG_NIG_PRTY_STS 0x103d0
1863/* [R 32] Parity register #0 read */
1864#define NIG_REG_NIG_PRTY_STS_0 0x183bc
1865#define NIG_REG_NIG_PRTY_STS_1 0x183cc
1866/* [RW 6] Bit-map indicating which L2 hdrs may appear after the basic
1867 * Ethernet header. */
1868#define NIG_REG_P0_HDRS_AFTER_BASIC 0x18038
1869/* [RW 1] HW PFC enable bit. Set this bit to enable the PFC functionality in
1870 * the NIG. Other flow control modes such as PAUSE and SAFC/LLFC should be
1871 * disabled when this bit is set. */
1872#define NIG_REG_P0_HWPFC_ENABLE 0x18078
1873#define NIG_REG_P0_LLH_FUNC_MEM2 0x18480
1874#define NIG_REG_P0_LLH_FUNC_MEM2_ENABLE 0x18440
1875/* [RW 32] Eight 4-bit configurations for specifying which COS (0-15 for
1876 * future expansion) each priorty is to be mapped to. Bits 3:0 specify the
1877 * COS for priority 0. Bits 31:28 specify the COS for priority 7. The 3-bit
1878 * priority field is extracted from the outer-most VLAN in receive packet.
1879 * Only COS 0 and COS 1 are supported in E2. */
1880#define NIG_REG_P0_PKT_PRIORITY_TO_COS 0x18054
1881/* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 0. A
1882 * priority is mapped to COS 0 when the corresponding mask bit is 1. More
1883 * than one bit may be set; allowing multiple priorities to be mapped to one
1884 * COS. */
1885#define NIG_REG_P0_RX_COS0_PRIORITY_MASK 0x18058
1886/* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 1. A
1887 * priority is mapped to COS 1 when the corresponding mask bit is 1. More
1888 * than one bit may be set; allowing multiple priorities to be mapped to one
1889 * COS. */
1890#define NIG_REG_P0_RX_COS1_PRIORITY_MASK 0x1805c
1891/* [RW 15] Specify which of the credit registers the client is to be mapped
1892 * to. Bits[2:0] are for client 0; bits [14:12] are for client 4. For
1893 * clients that are not subject to WFQ credit blocking - their
1894 * specifications here are not used. */
1895#define NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP 0x180f0
1896/* [RW 5] Specify whether the client competes directly in the strict
1897 * priority arbiter. The bits are mapped according to client ID (client IDs
1898 * are defined in tx_arb_priority_client). Default value is set to enable
1899 * strict priorities for clients 0-2 -- management and debug traffic. */
1900#define NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT 0x180e8
1901/* [RW 5] Specify whether the client is subject to WFQ credit blocking. The
1902 * bits are mapped according to client ID (client IDs are defined in
1903 * tx_arb_priority_client). Default value is 0 for not using WFQ credit
1904 * blocking. */
1905#define NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ 0x180ec
1906/* [RW 32] Specify the upper bound that credit register 0 is allowed to
1907 * reach. */
1908#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_0 0x1810c
1909#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_1 0x18110
1910/* [RW 32] Specify the weight (in bytes) to be added to credit register 0
1911 * when it is time to increment. */
1912#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_0 0x180f8
1913#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_1 0x180fc
1914/* [RW 12] Specify the number of strict priority arbitration slots between
1915 * two round-robin arbitration slots to avoid starvation. A value of 0 means
1916 * no strict priority cycles - the strict priority with anti-starvation
1917 * arbiter becomes a round-robin arbiter. */
1918#define NIG_REG_P0_TX_ARB_NUM_STRICT_ARB_SLOTS 0x180f4
1919/* [RW 15] Specify the client number to be assigned to each priority of the
1920 * strict priority arbiter. Priority 0 is the highest priority. Bits [2:0]
1921 * are for priority 0 client; bits [14:12] are for priority 4 client. The
1922 * clients are assigned the following IDs: 0-management; 1-debug traffic
1923 * from this port; 2-debug traffic from other port; 3-COS0 traffic; 4-COS1
1924 * traffic. The reset value[14:0] is set to 0x4688 (15'b100_011_010_001_000)
1925 * for management at priority 0; debug traffic at priorities 1 and 2; COS0
1926 * traffic at priority 3; and COS1 traffic at priority 4. */
1927#define NIG_REG_P0_TX_ARB_PRIORITY_CLIENT 0x180e4
1928#define NIG_REG_P1_LLH_FUNC_MEM2 0x184c0
1929#define NIG_REG_P1_LLH_FUNC_MEM2_ENABLE 0x18460
1930/* [RW 32] Eight 4-bit configurations for specifying which COS (0-15 for
1931 * future expansion) each priorty is to be mapped to. Bits 3:0 specify the
1932 * COS for priority 0. Bits 31:28 specify the COS for priority 7. The 3-bit
1933 * priority field is extracted from the outer-most VLAN in receive packet.
1934 * Only COS 0 and COS 1 are supported in E2. */
1935#define NIG_REG_P1_PKT_PRIORITY_TO_COS 0x181a8
1936/* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 0. A
1937 * priority is mapped to COS 0 when the corresponding mask bit is 1. More
1938 * than one bit may be set; allowing multiple priorities to be mapped to one
1939 * COS. */
1940#define NIG_REG_P1_RX_COS0_PRIORITY_MASK 0x181ac
1941/* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 1. A
1942 * priority is mapped to COS 1 when the corresponding mask bit is 1. More
1943 * than one bit may be set; allowing multiple priorities to be mapped to one
1944 * COS. */
1945#define NIG_REG_P1_RX_COS1_PRIORITY_MASK 0x181b0
1661/* [RW 1] Pause enable for port0. This register may get 1 only when 1946/* [RW 1] Pause enable for port0. This register may get 1 only when
1662 ~safc_enable.safc_enable = 0 and ppp_enable.ppp_enable =0 for the same 1947 ~safc_enable.safc_enable = 0 and ppp_enable.ppp_enable =0 for the same
1663 port */ 1948 port */
1664#define NIG_REG_PAUSE_ENABLE_0 0x160c0 1949#define NIG_REG_PAUSE_ENABLE_0 0x160c0
1950#define NIG_REG_PAUSE_ENABLE_1 0x160c4
1665/* [RW 1] Input enable for RX PBF LP IF */ 1951/* [RW 1] Input enable for RX PBF LP IF */
1666#define NIG_REG_PBF_LB_IN_EN 0x100b4 1952#define NIG_REG_PBF_LB_IN_EN 0x100b4
1667/* [RW 1] Value of this register will be transmitted to port swap when 1953/* [RW 1] Value of this register will be transmitted to port swap when
1668 ~nig_registers_strap_override.strap_override =1 */ 1954 ~nig_registers_strap_override.strap_override =1 */
1669#define NIG_REG_PORT_SWAP 0x10394 1955#define NIG_REG_PORT_SWAP 0x10394
1956/* [RW 1] PPP enable for port0. This register may get 1 only when
1957 * ~safc_enable.safc_enable = 0 and pause_enable.pause_enable =0 for the
1958 * same port */
1959#define NIG_REG_PPP_ENABLE_0 0x160b0
1960#define NIG_REG_PPP_ENABLE_1 0x160b4
1670/* [RW 1] output enable for RX parser descriptor IF */ 1961/* [RW 1] output enable for RX parser descriptor IF */
1671#define NIG_REG_PRS_EOP_OUT_EN 0x10104 1962#define NIG_REG_PRS_EOP_OUT_EN 0x10104
1672/* [RW 1] Input enable for RX parser request IF */ 1963/* [RW 1] Input enable for RX parser request IF */
@@ -1733,6 +2024,14 @@
1733#define NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_LINK10G (0x1<<15) 2024#define NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_LINK10G (0x1<<15)
1734#define NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_LINK_STATUS (0xf<<18) 2025#define NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_LINK_STATUS (0xf<<18)
1735#define NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_LINK_STATUS_SIZE 18 2026#define NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_LINK_STATUS_SIZE 18
2027/* [RW 31] The upper bound of the weight of COS0 in the ETS command arbiter. */
2028#define PBF_REG_COS0_UPPER_BOUND 0x15c05c
2029/* [RW 31] The weight of COS0 in the ETS command arbiter. */
2030#define PBF_REG_COS0_WEIGHT 0x15c054
2031/* [RW 31] The upper bound of the weight of COS1 in the ETS command arbiter. */
2032#define PBF_REG_COS1_UPPER_BOUND 0x15c060
2033/* [RW 31] The weight of COS1 in the ETS command arbiter. */
2034#define PBF_REG_COS1_WEIGHT 0x15c058
1736/* [RW 1] Disable processing further tasks from port 0 (after ending the 2035/* [RW 1] Disable processing further tasks from port 0 (after ending the
1737 current task in process). */ 2036 current task in process). */
1738#define PBF_REG_DISABLE_NEW_TASK_PROC_P0 0x14005c 2037#define PBF_REG_DISABLE_NEW_TASK_PROC_P0 0x14005c
@@ -1742,6 +2041,17 @@
1742/* [RW 1] Disable processing further tasks from port 4 (after ending the 2041/* [RW 1] Disable processing further tasks from port 4 (after ending the
1743 current task in process). */ 2042 current task in process). */
1744#define PBF_REG_DISABLE_NEW_TASK_PROC_P4 0x14006c 2043#define PBF_REG_DISABLE_NEW_TASK_PROC_P4 0x14006c
2044#define PBF_REG_DISABLE_PF 0x1402e8
2045/* [RW 1] Indicates that ETS is performed between the COSes in the command
2046 * arbiter. If reset strict priority w/ anti-starvation will be performed
2047 * w/o WFQ. */
2048#define PBF_REG_ETS_ENABLED 0x15c050
2049/* [RW 6] Bit-map indicating which L2 hdrs may appear after the basic
2050 * Ethernet header. */
2051#define PBF_REG_HDRS_AFTER_BASIC 0x15c0a8
2052/* [RW 1] Indicates which COS is conncted to the highest priority in the
2053 * command arbiter. */
2054#define PBF_REG_HIGH_PRIORITY_COS_NUM 0x15c04c
1745#define PBF_REG_IF_ENABLE_REG 0x140044 2055#define PBF_REG_IF_ENABLE_REG 0x140044
1746/* [RW 1] Init bit. When set the initial credits are copied to the credit 2056/* [RW 1] Init bit. When set the initial credits are copied to the credit
1747 registers (except the port credits). Should be set and then reset after 2057 registers (except the port credits). Should be set and then reset after
@@ -1765,6 +2075,12 @@
1765#define PBF_REG_MAC_IF1_ENABLE 0x140034 2075#define PBF_REG_MAC_IF1_ENABLE 0x140034
1766/* [RW 1] Enable for the loopback interface. */ 2076/* [RW 1] Enable for the loopback interface. */
1767#define PBF_REG_MAC_LB_ENABLE 0x140040 2077#define PBF_REG_MAC_LB_ENABLE 0x140040
2078/* [RW 6] Bit-map indicating which headers must appear in the packet */
2079#define PBF_REG_MUST_HAVE_HDRS 0x15c0c4
2080/* [RW 16] The number of strict priority arbitration slots between 2 RR
2081 * arbitration slots. A value of 0 means no strict priority cycles; i.e. the
2082 * strict-priority w/ anti-starvation arbiter is a RR arbiter. */
2083#define PBF_REG_NUM_STRICT_ARB_SLOTS 0x15c064
1768/* [RW 10] Port 0 threshold used by arbiter in 16 byte lines used when pause 2084/* [RW 10] Port 0 threshold used by arbiter in 16 byte lines used when pause
1769 not suppoterd. */ 2085 not suppoterd. */
1770#define PBF_REG_P0_ARB_THRSH 0x1400e4 2086#define PBF_REG_P0_ARB_THRSH 0x1400e4
@@ -1795,6 +2111,10 @@
1795#define PBF_REG_PBF_INT_MASK 0x1401d4 2111#define PBF_REG_PBF_INT_MASK 0x1401d4
1796/* [R 5] Interrupt register #0 read */ 2112/* [R 5] Interrupt register #0 read */
1797#define PBF_REG_PBF_INT_STS 0x1401c8 2113#define PBF_REG_PBF_INT_STS 0x1401c8
2114/* [RW 20] Parity mask register #0 read/write */
2115#define PBF_REG_PBF_PRTY_MASK 0x1401e4
2116/* [RC 20] Parity register #0 read clear */
2117#define PBF_REG_PBF_PRTY_STS_CLR 0x1401dc
1798#define PB_REG_CONTROL 0 2118#define PB_REG_CONTROL 0
1799/* [RW 2] Interrupt mask register #0 read/write */ 2119/* [RW 2] Interrupt mask register #0 read/write */
1800#define PB_REG_PB_INT_MASK 0x28 2120#define PB_REG_PB_INT_MASK 0x28
@@ -1804,6 +2124,261 @@
1804#define PB_REG_PB_PRTY_MASK 0x38 2124#define PB_REG_PB_PRTY_MASK 0x38
1805/* [R 4] Parity register #0 read */ 2125/* [R 4] Parity register #0 read */
1806#define PB_REG_PB_PRTY_STS 0x2c 2126#define PB_REG_PB_PRTY_STS 0x2c
2127/* [RC 4] Parity register #0 read clear */
2128#define PB_REG_PB_PRTY_STS_CLR 0x30
2129#define PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR (0x1<<0)
2130#define PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW (0x1<<8)
2131#define PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR (0x1<<1)
2132#define PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN (0x1<<6)
2133#define PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN (0x1<<7)
2134#define PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN (0x1<<4)
2135#define PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN (0x1<<3)
2136#define PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN (0x1<<5)
2137#define PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN (0x1<<2)
2138/* [R 8] Config space A attention dirty bits. Each bit indicates that the
2139 * corresponding PF generates config space A attention. Set by PXP. Reset by
2140 * MCP writing 1 to icfg_space_a_request_clr. Note: register contains bits
2141 * from both paths. */
2142#define PGLUE_B_REG_CFG_SPACE_A_REQUEST 0x9010
2143/* [R 8] Config space B attention dirty bits. Each bit indicates that the
2144 * corresponding PF generates config space B attention. Set by PXP. Reset by
2145 * MCP writing 1 to icfg_space_b_request_clr. Note: register contains bits
2146 * from both paths. */
2147#define PGLUE_B_REG_CFG_SPACE_B_REQUEST 0x9014
2148/* [RW 1] Type A PF enable inbound interrupt table for CSDM. 0 - disable; 1
2149 * - enable. */
2150#define PGLUE_B_REG_CSDM_INB_INT_A_PF_ENABLE 0x9194
2151/* [RW 18] Type B VF inbound interrupt table for CSDM: bits[17:9]-mask;
2152 * its[8:0]-address. Bits [1:0] must be zero (DW resolution address). */
2153#define PGLUE_B_REG_CSDM_INB_INT_B_VF 0x916c
2154/* [RW 1] Type B VF enable inbound interrupt table for CSDM. 0 - disable; 1
2155 * - enable. */
2156#define PGLUE_B_REG_CSDM_INB_INT_B_VF_ENABLE 0x919c
2157/* [RW 16] Start offset of CSDM zone A (queue zone) in the internal RAM */
2158#define PGLUE_B_REG_CSDM_START_OFFSET_A 0x9100
2159/* [RW 16] Start offset of CSDM zone B (legacy zone) in the internal RAM */
2160#define PGLUE_B_REG_CSDM_START_OFFSET_B 0x9108
2161/* [RW 5] VF Shift of CSDM zone B (legacy zone) in the internal RAM */
2162#define PGLUE_B_REG_CSDM_VF_SHIFT_B 0x9110
2163/* [RW 1] 0 - Zone A size is 136x32B; 1 - Zone A size is 152x32B. */
2164#define PGLUE_B_REG_CSDM_ZONE_A_SIZE_PF 0x91ac
2165/* [R 8] FLR request attention dirty bits for PFs 0 to 7. Each bit indicates
2166 * that the FLR register of the corresponding PF was set. Set by PXP. Reset
2167 * by MCP writing 1 to flr_request_pf_7_0_clr. Note: register contains bits
2168 * from both paths. */
2169#define PGLUE_B_REG_FLR_REQUEST_PF_7_0 0x9028
2170/* [W 8] FLR request attention dirty bits clear for PFs 0 to 7. MCP writes 1
2171 * to a bit in this register in order to clear the corresponding bit in
2172 * flr_request_pf_7_0 register. Note: register contains bits from both
2173 * paths. */
2174#define PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR 0x9418
2175/* [R 32] FLR request attention dirty bits for VFs 96 to 127. Each bit
2176 * indicates that the FLR register of the corresponding VF was set. Set by
2177 * PXP. Reset by MCP writing 1 to flr_request_vf_127_96_clr. */
2178#define PGLUE_B_REG_FLR_REQUEST_VF_127_96 0x9024
2179/* [R 32] FLR request attention dirty bits for VFs 0 to 31. Each bit
2180 * indicates that the FLR register of the corresponding VF was set. Set by
2181 * PXP. Reset by MCP writing 1 to flr_request_vf_31_0_clr. */
2182#define PGLUE_B_REG_FLR_REQUEST_VF_31_0 0x9018
2183/* [R 32] FLR request attention dirty bits for VFs 32 to 63. Each bit
2184 * indicates that the FLR register of the corresponding VF was set. Set by
2185 * PXP. Reset by MCP writing 1 to flr_request_vf_63_32_clr. */
2186#define PGLUE_B_REG_FLR_REQUEST_VF_63_32 0x901c
2187/* [R 32] FLR request attention dirty bits for VFs 64 to 95. Each bit
2188 * indicates that the FLR register of the corresponding VF was set. Set by
2189 * PXP. Reset by MCP writing 1 to flr_request_vf_95_64_clr. */
2190#define PGLUE_B_REG_FLR_REQUEST_VF_95_64 0x9020
2191/* [R 8] Each bit indicates an incorrect behavior in user RX interface. Bit
2192 * 0 - Target memory read arrived with a correctable error. Bit 1 - Target
2193 * memory read arrived with an uncorrectable error. Bit 2 - Configuration RW
2194 * arrived with a correctable error. Bit 3 - Configuration RW arrived with
2195 * an uncorrectable error. Bit 4 - Completion with Configuration Request
2196 * Retry Status. Bit 5 - Expansion ROM access received with a write request.
2197 * Bit 6 - Completion with pcie_rx_err of 0000; CMPL_STATUS of non-zero; and
2198 * pcie_rx_last not asserted. Bit 7 - Completion with pcie_rx_err of 1010;
2199 * and pcie_rx_last not asserted. */
2200#define PGLUE_B_REG_INCORRECT_RCV_DETAILS 0x9068
2201#define PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER 0x942c
2202#define PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ 0x9430
2203#define PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_WRITE 0x9434
2204#define PGLUE_B_REG_INTERNAL_VFID_ENABLE 0x9438
2205/* [R 9] Interrupt register #0 read */
2206#define PGLUE_B_REG_PGLUE_B_INT_STS 0x9298
2207/* [RC 9] Interrupt register #0 read clear */
2208#define PGLUE_B_REG_PGLUE_B_INT_STS_CLR 0x929c
2209/* [R 2] Parity register #0 read */
2210#define PGLUE_B_REG_PGLUE_B_PRTY_STS 0x92a8
2211/* [R 13] Details of first request received with error. [2:0] - PFID. [3] -
2212 * VF_VALID. [9:4] - VFID. [11:10] - Error Code - 0 - Indicates Completion
2213 * Timeout of a User Tx non-posted request. 1 - unsupported request. 2 -
2214 * completer abort. 3 - Illegal value for this field. [12] valid - indicates
2215 * if there was a completion error since the last time this register was
2216 * cleared. */
2217#define PGLUE_B_REG_RX_ERR_DETAILS 0x9080
2218/* [R 18] Details of first ATS Translation Completion request received with
2219 * error. [2:0] - PFID. [3] - VF_VALID. [9:4] - VFID. [11:10] - Error Code -
2220 * 0 - Indicates Completion Timeout of a User Tx non-posted request. 1 -
2221 * unsupported request. 2 - completer abort. 3 - Illegal value for this
2222 * field. [16:12] - ATC OTB EntryID. [17] valid - indicates if there was a
2223 * completion error since the last time this register was cleared. */
2224#define PGLUE_B_REG_RX_TCPL_ERR_DETAILS 0x9084
2225/* [W 8] Debug only - Shadow BME bits clear for PFs 0 to 7. MCP writes 1 to
2226 * a bit in this register in order to clear the corresponding bit in
2227 * shadow_bme_pf_7_0 register. MCP should never use this unless a
2228 * work-around is needed. Note: register contains bits from both paths. */
2229#define PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR 0x9458
2230/* [R 8] SR IOV disabled attention dirty bits. Each bit indicates that the
2231 * VF enable register of the corresponding PF is written to 0 and was
2232 * previously 1. Set by PXP. Reset by MCP writing 1 to
2233 * sr_iov_disabled_request_clr. Note: register contains bits from both
2234 * paths. */
2235#define PGLUE_B_REG_SR_IOV_DISABLED_REQUEST 0x9030
2236/* [R 32] Indicates the status of tags 32-63. 0 - tags is used - read
2237 * completion did not return yet. 1 - tag is unused. Same functionality as
2238 * pxp2_registers_pgl_exp_rom_data2 for tags 0-31. */
2239#define PGLUE_B_REG_TAGS_63_32 0x9244
2240/* [RW 1] Type A PF enable inbound interrupt table for TSDM. 0 - disable; 1
2241 * - enable. */
2242#define PGLUE_B_REG_TSDM_INB_INT_A_PF_ENABLE 0x9170
2243/* [RW 16] Start offset of TSDM zone A (queue zone) in the internal RAM */
2244#define PGLUE_B_REG_TSDM_START_OFFSET_A 0x90c4
2245/* [RW 16] Start offset of TSDM zone B (legacy zone) in the internal RAM */
2246#define PGLUE_B_REG_TSDM_START_OFFSET_B 0x90cc
2247/* [RW 5] VF Shift of TSDM zone B (legacy zone) in the internal RAM */
2248#define PGLUE_B_REG_TSDM_VF_SHIFT_B 0x90d4
2249/* [RW 1] 0 - Zone A size is 136x32B; 1 - Zone A size is 152x32B. */
2250#define PGLUE_B_REG_TSDM_ZONE_A_SIZE_PF 0x91a0
2251/* [R 32] Address [31:0] of first read request not submitted due to error */
2252#define PGLUE_B_REG_TX_ERR_RD_ADD_31_0 0x9098
2253/* [R 32] Address [63:32] of first read request not submitted due to error */
2254#define PGLUE_B_REG_TX_ERR_RD_ADD_63_32 0x909c
2255/* [R 31] Details of first read request not submitted due to error. [4:0]
2256 * VQID. [5] TREQ. 1 - Indicates the request is a Translation Request.
2257 * [20:8] - Length in bytes. [23:21] - PFID. [24] - VF_VALID. [30:25] -
2258 * VFID. */
2259#define PGLUE_B_REG_TX_ERR_RD_DETAILS 0x90a0
2260/* [R 26] Details of first read request not submitted due to error. [15:0]
2261 * Request ID. [19:16] client ID. [20] - last SR. [24:21] - Error type -
2262 * [21] - Indicates was_error was set; [22] - Indicates BME was cleared;
2263 * [23] - Indicates FID_enable was cleared; [24] - Indicates VF with parent
2264 * PF FLR_request or IOV_disable_request dirty bit is set. [25] valid -
2265 * indicates if there was a request not submitted due to error since the
2266 * last time this register was cleared. */
2267#define PGLUE_B_REG_TX_ERR_RD_DETAILS2 0x90a4
2268/* [R 32] Address [31:0] of first write request not submitted due to error */
2269#define PGLUE_B_REG_TX_ERR_WR_ADD_31_0 0x9088
2270/* [R 32] Address [63:32] of first write request not submitted due to error */
2271#define PGLUE_B_REG_TX_ERR_WR_ADD_63_32 0x908c
2272/* [R 31] Details of first write request not submitted due to error. [4:0]
2273 * VQID. [20:8] - Length in bytes. [23:21] - PFID. [24] - VF_VALID. [30:25]
2274 * - VFID. */
2275#define PGLUE_B_REG_TX_ERR_WR_DETAILS 0x9090
2276/* [R 26] Details of first write request not submitted due to error. [15:0]
2277 * Request ID. [19:16] client ID. [20] - last SR. [24:21] - Error type -
2278 * [21] - Indicates was_error was set; [22] - Indicates BME was cleared;
2279 * [23] - Indicates FID_enable was cleared; [24] - Indicates VF with parent
2280 * PF FLR_request or IOV_disable_request dirty bit is set. [25] valid -
2281 * indicates if there was a request not submitted due to error since the
2282 * last time this register was cleared. */
2283#define PGLUE_B_REG_TX_ERR_WR_DETAILS2 0x9094
2284/* [RW 10] Type A PF/VF inbound interrupt table for USDM: bits[9:5]-mask;
2285 * its[4:0]-address relative to start_offset_a. Bits [1:0] can have any
2286 * value (Byte resolution address). */
2287#define PGLUE_B_REG_USDM_INB_INT_A_0 0x9128
2288#define PGLUE_B_REG_USDM_INB_INT_A_1 0x912c
2289#define PGLUE_B_REG_USDM_INB_INT_A_2 0x9130
2290#define PGLUE_B_REG_USDM_INB_INT_A_3 0x9134
2291#define PGLUE_B_REG_USDM_INB_INT_A_4 0x9138
2292#define PGLUE_B_REG_USDM_INB_INT_A_5 0x913c
2293#define PGLUE_B_REG_USDM_INB_INT_A_6 0x9140
2294/* [RW 1] Type A PF enable inbound interrupt table for USDM. 0 - disable; 1
2295 * - enable. */
2296#define PGLUE_B_REG_USDM_INB_INT_A_PF_ENABLE 0x917c
2297/* [RW 1] Type A VF enable inbound interrupt table for USDM. 0 - disable; 1
2298 * - enable. */
2299#define PGLUE_B_REG_USDM_INB_INT_A_VF_ENABLE 0x9180
2300/* [RW 1] Type B VF enable inbound interrupt table for USDM. 0 - disable; 1
2301 * - enable. */
2302#define PGLUE_B_REG_USDM_INB_INT_B_VF_ENABLE 0x9184
2303/* [RW 16] Start offset of USDM zone A (queue zone) in the internal RAM */
2304#define PGLUE_B_REG_USDM_START_OFFSET_A 0x90d8
2305/* [RW 16] Start offset of USDM zone B (legacy zone) in the internal RAM */
2306#define PGLUE_B_REG_USDM_START_OFFSET_B 0x90e0
2307/* [RW 5] VF Shift of USDM zone B (legacy zone) in the internal RAM */
2308#define PGLUE_B_REG_USDM_VF_SHIFT_B 0x90e8
2309/* [RW 1] 0 - Zone A size is 136x32B; 1 - Zone A size is 152x32B. */
2310#define PGLUE_B_REG_USDM_ZONE_A_SIZE_PF 0x91a4
2311/* [R 26] Details of first target VF request accessing VF GRC space that
2312 * failed permission check. [14:0] Address. [15] w_nr: 0 - Read; 1 - Write.
2313 * [21:16] VFID. [24:22] - PFID. [25] valid - indicates if there was a
2314 * request accessing VF GRC space that failed permission check since the
2315 * last time this register was cleared. Permission checks are: function
2316 * permission; R/W permission; address range permission. */
2317#define PGLUE_B_REG_VF_GRC_SPACE_VIOLATION_DETAILS 0x9234
2318/* [R 31] Details of first target VF request with length violation (too many
2319 * DWs) accessing BAR0. [12:0] Address in DWs (bits [14:2] of byte address).
2320 * [14:13] BAR. [20:15] VFID. [23:21] - PFID. [29:24] - Length in DWs. [30]
2321 * valid - indicates if there was a request with length violation since the
2322 * last time this register was cleared. Length violations: length of more
2323 * than 2DWs; length of 2DWs and address not QW aligned; window is GRC and
2324 * length is more than 1 DW. */
2325#define PGLUE_B_REG_VF_LENGTH_VIOLATION_DETAILS 0x9230
2326/* [R 8] Was_error indication dirty bits for PFs 0 to 7. Each bit indicates
2327 * that there was a completion with uncorrectable error for the
2328 * corresponding PF. Set by PXP. Reset by MCP writing 1 to
2329 * was_error_pf_7_0_clr. */
2330#define PGLUE_B_REG_WAS_ERROR_PF_7_0 0x907c
2331/* [W 8] Was_error indication dirty bits clear for PFs 0 to 7. MCP writes 1
2332 * to a bit in this register in order to clear the corresponding bit in
2333 * flr_request_pf_7_0 register. */
2334#define PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR 0x9470
2335/* [R 32] Was_error indication dirty bits for VFs 96 to 127. Each bit
2336 * indicates that there was a completion with uncorrectable error for the
2337 * corresponding VF. Set by PXP. Reset by MCP writing 1 to
2338 * was_error_vf_127_96_clr. */
2339#define PGLUE_B_REG_WAS_ERROR_VF_127_96 0x9078
2340/* [W 32] Was_error indication dirty bits clear for VFs 96 to 127. MCP
2341 * writes 1 to a bit in this register in order to clear the corresponding
2342 * bit in was_error_vf_127_96 register. */
2343#define PGLUE_B_REG_WAS_ERROR_VF_127_96_CLR 0x9474
2344/* [R 32] Was_error indication dirty bits for VFs 0 to 31. Each bit
2345 * indicates that there was a completion with uncorrectable error for the
2346 * corresponding VF. Set by PXP. Reset by MCP writing 1 to
2347 * was_error_vf_31_0_clr. */
2348#define PGLUE_B_REG_WAS_ERROR_VF_31_0 0x906c
2349/* [W 32] Was_error indication dirty bits clear for VFs 0 to 31. MCP writes
2350 * 1 to a bit in this register in order to clear the corresponding bit in
2351 * was_error_vf_31_0 register. */
2352#define PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR 0x9478
2353/* [R 32] Was_error indication dirty bits for VFs 32 to 63. Each bit
2354 * indicates that there was a completion with uncorrectable error for the
2355 * corresponding VF. Set by PXP. Reset by MCP writing 1 to
2356 * was_error_vf_63_32_clr. */
2357#define PGLUE_B_REG_WAS_ERROR_VF_63_32 0x9070
2358/* [W 32] Was_error indication dirty bits clear for VFs 32 to 63. MCP writes
2359 * 1 to a bit in this register in order to clear the corresponding bit in
2360 * was_error_vf_63_32 register. */
2361#define PGLUE_B_REG_WAS_ERROR_VF_63_32_CLR 0x947c
2362/* [R 32] Was_error indication dirty bits for VFs 64 to 95. Each bit
2363 * indicates that there was a completion with uncorrectable error for the
2364 * corresponding VF. Set by PXP. Reset by MCP writing 1 to
2365 * was_error_vf_95_64_clr. */
2366#define PGLUE_B_REG_WAS_ERROR_VF_95_64 0x9074
2367/* [W 32] Was_error indication dirty bits clear for VFs 64 to 95. MCP writes
2368 * 1 to a bit in this register in order to clear the corresponding bit in
2369 * was_error_vf_95_64 register. */
2370#define PGLUE_B_REG_WAS_ERROR_VF_95_64_CLR 0x9480
2371/* [RW 1] Type A PF enable inbound interrupt table for XSDM. 0 - disable; 1
2372 * - enable. */
2373#define PGLUE_B_REG_XSDM_INB_INT_A_PF_ENABLE 0x9188
2374/* [RW 16] Start offset of XSDM zone A (queue zone) in the internal RAM */
2375#define PGLUE_B_REG_XSDM_START_OFFSET_A 0x90ec
2376/* [RW 16] Start offset of XSDM zone B (legacy zone) in the internal RAM */
2377#define PGLUE_B_REG_XSDM_START_OFFSET_B 0x90f4
2378/* [RW 5] VF Shift of XSDM zone B (legacy zone) in the internal RAM */
2379#define PGLUE_B_REG_XSDM_VF_SHIFT_B 0x90fc
2380/* [RW 1] 0 - Zone A size is 136x32B; 1 - Zone A size is 152x32B. */
2381#define PGLUE_B_REG_XSDM_ZONE_A_SIZE_PF 0x91a8
1807#define PRS_REG_A_PRSU_20 0x40134 2382#define PRS_REG_A_PRSU_20 0x40134
1808/* [R 8] debug only: CFC load request current credit. Transaction based. */ 2383/* [R 8] debug only: CFC load request current credit. Transaction based. */
1809#define PRS_REG_CFC_LD_CURRENT_CREDIT 0x40164 2384#define PRS_REG_CFC_LD_CURRENT_CREDIT 0x40164
@@ -1866,9 +2441,13 @@
1866#define PRS_REG_FLUSH_REGIONS_TYPE_5 0x40018 2441#define PRS_REG_FLUSH_REGIONS_TYPE_5 0x40018
1867#define PRS_REG_FLUSH_REGIONS_TYPE_6 0x4001c 2442#define PRS_REG_FLUSH_REGIONS_TYPE_6 0x4001c
1868#define PRS_REG_FLUSH_REGIONS_TYPE_7 0x40020 2443#define PRS_REG_FLUSH_REGIONS_TYPE_7 0x40020
2444/* [RW 6] Bit-map indicating which L2 hdrs may appear after the basic
2445 * Ethernet header. */
2446#define PRS_REG_HDRS_AFTER_BASIC 0x40238
1869/* [RW 4] The increment value to send in the CFC load request message */ 2447/* [RW 4] The increment value to send in the CFC load request message */
1870#define PRS_REG_INC_VALUE 0x40048 2448#define PRS_REG_INC_VALUE 0x40048
1871/* [RW 1] If set indicates not to send messages to CFC on received packets */ 2449/* [RW 6] Bit-map indicating which headers must appear in the packet */
2450#define PRS_REG_MUST_HAVE_HDRS 0x40254
1872#define PRS_REG_NIC_MODE 0x40138 2451#define PRS_REG_NIC_MODE 0x40138
1873/* [RW 8] The 8-bit event ID for cases where there is no match on the 2452/* [RW 8] The 8-bit event ID for cases where there is no match on the
1874 connection. Used in packet start message to TCM. */ 2453 connection. Used in packet start message to TCM. */
@@ -1902,6 +2481,8 @@
1902#define PRS_REG_PRS_PRTY_MASK 0x401a4 2481#define PRS_REG_PRS_PRTY_MASK 0x401a4
1903/* [R 8] Parity register #0 read */ 2482/* [R 8] Parity register #0 read */
1904#define PRS_REG_PRS_PRTY_STS 0x40198 2483#define PRS_REG_PRS_PRTY_STS 0x40198
2484/* [RC 8] Parity register #0 read clear */
2485#define PRS_REG_PRS_PRTY_STS_CLR 0x4019c
1905/* [RW 8] Context region for pure acknowledge packets. Used in CFC load 2486/* [RW 8] Context region for pure acknowledge packets. Used in CFC load
1906 request message */ 2487 request message */
1907#define PRS_REG_PURE_REGIONS 0x40024 2488#define PRS_REG_PURE_REGIONS 0x40024
@@ -1919,6 +2500,13 @@
1919#define PRS_REG_TCM_CURRENT_CREDIT 0x40160 2500#define PRS_REG_TCM_CURRENT_CREDIT 0x40160
1920/* [R 8] debug only: TSDM current credit. Transaction based. */ 2501/* [R 8] debug only: TSDM current credit. Transaction based. */
1921#define PRS_REG_TSDM_CURRENT_CREDIT 0x4015c 2502#define PRS_REG_TSDM_CURRENT_CREDIT 0x4015c
2503#define PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT (0x1<<19)
2504#define PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF (0x1<<20)
2505#define PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN (0x1<<22)
2506#define PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED (0x1<<23)
2507#define PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED (0x1<<24)
2508#define PXP2_PXP2_INT_STS_0_REG_WR_PGLUE_EOP_ERROR (0x1<<7)
2509#define PXP2_PXP2_INT_STS_CLR_0_REG_WR_PGLUE_EOP_ERROR (0x1<<7)
1922/* [R 6] Debug only: Number of used entries in the data FIFO */ 2510/* [R 6] Debug only: Number of used entries in the data FIFO */
1923#define PXP2_REG_HST_DATA_FIFO_STATUS 0x12047c 2511#define PXP2_REG_HST_DATA_FIFO_STATUS 0x12047c
1924/* [R 7] Debug only: Number of used entries in the header FIFO */ 2512/* [R 7] Debug only: Number of used entries in the header FIFO */
@@ -2048,6 +2636,9 @@
2048/* [R 32] Parity register #0 read */ 2636/* [R 32] Parity register #0 read */
2049#define PXP2_REG_PXP2_PRTY_STS_0 0x12057c 2637#define PXP2_REG_PXP2_PRTY_STS_0 0x12057c
2050#define PXP2_REG_PXP2_PRTY_STS_1 0x12058c 2638#define PXP2_REG_PXP2_PRTY_STS_1 0x12058c
2639/* [RC 32] Parity register #0 read clear */
2640#define PXP2_REG_PXP2_PRTY_STS_CLR_0 0x120580
2641#define PXP2_REG_PXP2_PRTY_STS_CLR_1 0x120590
2051/* [R 1] Debug only: The 'almost full' indication from each fifo (gives 2642/* [R 1] Debug only: The 'almost full' indication from each fifo (gives
2052 indication about backpressure) */ 2643 indication about backpressure) */
2053#define PXP2_REG_RD_ALMOST_FULL_0 0x120424 2644#define PXP2_REG_RD_ALMOST_FULL_0 0x120424
@@ -2244,8 +2835,17 @@
2244/* [RW 1] When '1'; requests will enter input buffers but wont get out 2835/* [RW 1] When '1'; requests will enter input buffers but wont get out
2245 towards the glue */ 2836 towards the glue */
2246#define PXP2_REG_RQ_DISABLE_INPUTS 0x120330 2837#define PXP2_REG_RQ_DISABLE_INPUTS 0x120330
2247/* [RW 1] 1 - SR will be aligned by 64B; 0 - SR will be aligned by 8B */ 2838/* [RW 4] Determines alignment of write SRs when a request is split into
2839 * several SRs. 0 - 8B aligned. 1 - 64B aligned. 2 - 128B aligned. 3 - 256B
2840 * aligned. 4 - 512B aligned. */
2248#define PXP2_REG_RQ_DRAM_ALIGN 0x1205b0 2841#define PXP2_REG_RQ_DRAM_ALIGN 0x1205b0
2842/* [RW 4] Determines alignment of read SRs when a request is split into
2843 * several SRs. 0 - 8B aligned. 1 - 64B aligned. 2 - 128B aligned. 3 - 256B
2844 * aligned. 4 - 512B aligned. */
2845#define PXP2_REG_RQ_DRAM_ALIGN_RD 0x12092c
2846/* [RW 1] when set the new alignment method (E2) will be applied; when reset
2847 * the original alignment method (E1 E1H) will be applied */
2848#define PXP2_REG_RQ_DRAM_ALIGN_SEL 0x120930
2249/* [RW 1] If 1 ILT failiue will not result in ELT access; An interrupt will 2849/* [RW 1] If 1 ILT failiue will not result in ELT access; An interrupt will
2250 be asserted */ 2850 be asserted */
2251#define PXP2_REG_RQ_ELT_DISABLE 0x12066c 2851#define PXP2_REG_RQ_ELT_DISABLE 0x12066c
@@ -2415,7 +3015,7 @@
2415 block. Should be used for close the gates. */ 3015 block. Should be used for close the gates. */
2416#define PXP_REG_HST_DISCARD_DOORBELLS 0x1030a4 3016#define PXP_REG_HST_DISCARD_DOORBELLS 0x1030a4
2417/* [R 1] debug only: '1' means this PSWHST is discarding doorbells. This bit 3017/* [R 1] debug only: '1' means this PSWHST is discarding doorbells. This bit
2418 should update accoring to 'hst_discard_doorbells' register when the state 3018 should update according to 'hst_discard_doorbells' register when the state
2419 machine is idle */ 3019 machine is idle */
2420#define PXP_REG_HST_DISCARD_DOORBELLS_STATUS 0x1030a0 3020#define PXP_REG_HST_DISCARD_DOORBELLS_STATUS 0x1030a0
2421/* [RW 1] When 1; new internal writes arriving to the block are discarded. 3021/* [RW 1] When 1; new internal writes arriving to the block are discarded.
@@ -2423,7 +3023,7 @@
2423#define PXP_REG_HST_DISCARD_INTERNAL_WRITES 0x1030a8 3023#define PXP_REG_HST_DISCARD_INTERNAL_WRITES 0x1030a8
2424/* [R 6] debug only: A bit mask for all PSWHST internal write clients. '1' 3024/* [R 6] debug only: A bit mask for all PSWHST internal write clients. '1'
2425 means this PSWHST is discarding inputs from this client. Each bit should 3025 means this PSWHST is discarding inputs from this client. Each bit should
2426 update accoring to 'hst_discard_internal_writes' register when the state 3026 update according to 'hst_discard_internal_writes' register when the state
2427 machine is idle. */ 3027 machine is idle. */
2428#define PXP_REG_HST_DISCARD_INTERNAL_WRITES_STATUS 0x10309c 3028#define PXP_REG_HST_DISCARD_INTERNAL_WRITES_STATUS 0x10309c
2429/* [WB 160] Used for initialization of the inbound interrupts memory */ 3029/* [WB 160] Used for initialization of the inbound interrupts memory */
@@ -2436,10 +3036,13 @@
2436#define PXP_REG_PXP_INT_STS_1 0x103078 3036#define PXP_REG_PXP_INT_STS_1 0x103078
2437/* [RC 32] Interrupt register #0 read clear */ 3037/* [RC 32] Interrupt register #0 read clear */
2438#define PXP_REG_PXP_INT_STS_CLR_0 0x10306c 3038#define PXP_REG_PXP_INT_STS_CLR_0 0x10306c
2439/* [RW 26] Parity mask register #0 read/write */ 3039#define PXP_REG_PXP_INT_STS_CLR_1 0x10307c
3040/* [RW 27] Parity mask register #0 read/write */
2440#define PXP_REG_PXP_PRTY_MASK 0x103094 3041#define PXP_REG_PXP_PRTY_MASK 0x103094
2441/* [R 26] Parity register #0 read */ 3042/* [R 26] Parity register #0 read */
2442#define PXP_REG_PXP_PRTY_STS 0x103088 3043#define PXP_REG_PXP_PRTY_STS 0x103088
3044/* [RC 27] Parity register #0 read clear */
3045#define PXP_REG_PXP_PRTY_STS_CLR 0x10308c
2443/* [RW 4] The activity counter initial increment value sent in the load 3046/* [RW 4] The activity counter initial increment value sent in the load
2444 request */ 3047 request */
2445#define QM_REG_ACTCTRINITVAL_0 0x168040 3048#define QM_REG_ACTCTRINITVAL_0 0x168040
@@ -2566,6 +3169,7 @@
2566#define QM_REG_PAUSESTATE7 0x16e698 3169#define QM_REG_PAUSESTATE7 0x16e698
2567/* [RW 2] The PCI attributes field used in the PCI request. */ 3170/* [RW 2] The PCI attributes field used in the PCI request. */
2568#define QM_REG_PCIREQAT 0x168054 3171#define QM_REG_PCIREQAT 0x168054
3172#define QM_REG_PF_EN 0x16e70c
2569/* [R 16] The byte credit of port 0 */ 3173/* [R 16] The byte credit of port 0 */
2570#define QM_REG_PORT0BYTECRD 0x168300 3174#define QM_REG_PORT0BYTECRD 0x168300
2571/* [R 16] The byte credit of port 1 */ 3175/* [R 16] The byte credit of port 1 */
@@ -2595,6 +3199,8 @@
2595#define QM_REG_QM_PRTY_MASK 0x168454 3199#define QM_REG_QM_PRTY_MASK 0x168454
2596/* [R 12] Parity register #0 read */ 3200/* [R 12] Parity register #0 read */
2597#define QM_REG_QM_PRTY_STS 0x168448 3201#define QM_REG_QM_PRTY_STS 0x168448
3202/* [RC 12] Parity register #0 read clear */
3203#define QM_REG_QM_PRTY_STS_CLR 0x16844c
2598/* [R 32] Current queues in pipeline: Queues from 32 to 63 */ 3204/* [R 32] Current queues in pipeline: Queues from 32 to 63 */
2599#define QM_REG_QSTATUS_HIGH 0x16802c 3205#define QM_REG_QSTATUS_HIGH 0x16802c
2600/* [R 32] Current queues in pipeline: Queues from 96 to 127 */ 3206/* [R 32] Current queues in pipeline: Queues from 96 to 127 */
@@ -2880,6 +3486,8 @@
2880#define QM_REG_WRRWEIGHTS_9 0x168848 3486#define QM_REG_WRRWEIGHTS_9 0x168848
2881/* [R 6] Keep the fill level of the fifo from write client 1 */ 3487/* [R 6] Keep the fill level of the fifo from write client 1 */
2882#define QM_REG_XQM_WRC_FIFOLVL 0x168000 3488#define QM_REG_XQM_WRC_FIFOLVL 0x168000
3489/* [W 1] reset to parity interrupt */
3490#define SEM_FAST_REG_PARITY_RST 0x18840
2883#define SRC_REG_COUNTFREE0 0x40500 3491#define SRC_REG_COUNTFREE0 0x40500
2884/* [RW 1] If clr the searcher is compatible to E1 A0 - support only two 3492/* [RW 1] If clr the searcher is compatible to E1 A0 - support only two
2885 ports. If set the searcher support 8 functions. */ 3493 ports. If set the searcher support 8 functions. */
@@ -2908,6 +3516,8 @@
2908#define SRC_REG_SRC_PRTY_MASK 0x404c8 3516#define SRC_REG_SRC_PRTY_MASK 0x404c8
2909/* [R 3] Parity register #0 read */ 3517/* [R 3] Parity register #0 read */
2910#define SRC_REG_SRC_PRTY_STS 0x404bc 3518#define SRC_REG_SRC_PRTY_STS 0x404bc
3519/* [RC 3] Parity register #0 read clear */
3520#define SRC_REG_SRC_PRTY_STS_CLR 0x404c0
2911/* [R 4] Used to read the value of the XX protection CAM occupancy counter. */ 3521/* [R 4] Used to read the value of the XX protection CAM occupancy counter. */
2912#define TCM_REG_CAM_OCCUP 0x5017c 3522#define TCM_REG_CAM_OCCUP 0x5017c
2913/* [RW 1] CDU AG read Interface enable. If 0 - the request input is 3523/* [RW 1] CDU AG read Interface enable. If 0 - the request input is
@@ -3034,8 +3644,12 @@
3034#define TCM_REG_TCM_INT_MASK 0x501dc 3644#define TCM_REG_TCM_INT_MASK 0x501dc
3035/* [R 11] Interrupt register #0 read */ 3645/* [R 11] Interrupt register #0 read */
3036#define TCM_REG_TCM_INT_STS 0x501d0 3646#define TCM_REG_TCM_INT_STS 0x501d0
3647/* [RW 27] Parity mask register #0 read/write */
3648#define TCM_REG_TCM_PRTY_MASK 0x501ec
3037/* [R 27] Parity register #0 read */ 3649/* [R 27] Parity register #0 read */
3038#define TCM_REG_TCM_PRTY_STS 0x501e0 3650#define TCM_REG_TCM_PRTY_STS 0x501e0
3651/* [RC 27] Parity register #0 read clear */
3652#define TCM_REG_TCM_PRTY_STS_CLR 0x501e4
3039/* [RW 3] The size of AG context region 0 in REG-pairs. Designates the MS 3653/* [RW 3] The size of AG context region 0 in REG-pairs. Designates the MS
3040 REG-pair number (e.g. if region 0 is 6 REG-pairs; the value should be 5). 3654 REG-pair number (e.g. if region 0 is 6 REG-pairs; the value should be 5).
3041 Is used to determine the number of the AG context REG-pairs written back; 3655 Is used to determine the number of the AG context REG-pairs written back;
@@ -3193,6 +3807,10 @@
3193#define TM_REG_TM_INT_MASK 0x1640fc 3807#define TM_REG_TM_INT_MASK 0x1640fc
3194/* [R 1] Interrupt register #0 read */ 3808/* [R 1] Interrupt register #0 read */
3195#define TM_REG_TM_INT_STS 0x1640f0 3809#define TM_REG_TM_INT_STS 0x1640f0
3810/* [RW 7] Parity mask register #0 read/write */
3811#define TM_REG_TM_PRTY_MASK 0x16410c
3812/* [RC 7] Parity register #0 read clear */
3813#define TM_REG_TM_PRTY_STS_CLR 0x164104
3196/* [RW 8] The event id for aggregated interrupt 0 */ 3814/* [RW 8] The event id for aggregated interrupt 0 */
3197#define TSDM_REG_AGG_INT_EVENT_0 0x42038 3815#define TSDM_REG_AGG_INT_EVENT_0 0x42038
3198#define TSDM_REG_AGG_INT_EVENT_1 0x4203c 3816#define TSDM_REG_AGG_INT_EVENT_1 0x4203c
@@ -3204,13 +3822,13 @@
3204#define TSDM_REG_AGG_INT_T_1 0x420bc 3822#define TSDM_REG_AGG_INT_T_1 0x420bc
3205/* [RW 13] The start address in the internal RAM for the cfc_rsp lcid */ 3823/* [RW 13] The start address in the internal RAM for the cfc_rsp lcid */
3206#define TSDM_REG_CFC_RSP_START_ADDR 0x42008 3824#define TSDM_REG_CFC_RSP_START_ADDR 0x42008
3207/* [RW 16] The maximum value of the competion counter #0 */ 3825/* [RW 16] The maximum value of the completion counter #0 */
3208#define TSDM_REG_CMP_COUNTER_MAX0 0x4201c 3826#define TSDM_REG_CMP_COUNTER_MAX0 0x4201c
3209/* [RW 16] The maximum value of the competion counter #1 */ 3827/* [RW 16] The maximum value of the completion counter #1 */
3210#define TSDM_REG_CMP_COUNTER_MAX1 0x42020 3828#define TSDM_REG_CMP_COUNTER_MAX1 0x42020
3211/* [RW 16] The maximum value of the competion counter #2 */ 3829/* [RW 16] The maximum value of the completion counter #2 */
3212#define TSDM_REG_CMP_COUNTER_MAX2 0x42024 3830#define TSDM_REG_CMP_COUNTER_MAX2 0x42024
3213/* [RW 16] The maximum value of the competion counter #3 */ 3831/* [RW 16] The maximum value of the completion counter #3 */
3214#define TSDM_REG_CMP_COUNTER_MAX3 0x42028 3832#define TSDM_REG_CMP_COUNTER_MAX3 0x42028
3215/* [RW 13] The start address in the internal RAM for the completion 3833/* [RW 13] The start address in the internal RAM for the completion
3216 counters. */ 3834 counters. */
@@ -3273,6 +3891,8 @@
3273#define TSDM_REG_TSDM_PRTY_MASK 0x422bc 3891#define TSDM_REG_TSDM_PRTY_MASK 0x422bc
3274/* [R 11] Parity register #0 read */ 3892/* [R 11] Parity register #0 read */
3275#define TSDM_REG_TSDM_PRTY_STS 0x422b0 3893#define TSDM_REG_TSDM_PRTY_STS 0x422b0
3894/* [RC 11] Parity register #0 read clear */
3895#define TSDM_REG_TSDM_PRTY_STS_CLR 0x422b4
3276/* [RW 5] The number of time_slots in the arbitration cycle */ 3896/* [RW 5] The number of time_slots in the arbitration cycle */
3277#define TSEM_REG_ARB_CYCLE_SIZE 0x180034 3897#define TSEM_REG_ARB_CYCLE_SIZE 0x180034
3278/* [RW 3] The source that is associated with arbitration element 0. Source 3898/* [RW 3] The source that is associated with arbitration element 0. Source
@@ -3352,6 +3972,9 @@
3352#define TSEM_REG_SLOW_EXT_STORE_EMPTY 0x1802a0 3972#define TSEM_REG_SLOW_EXT_STORE_EMPTY 0x1802a0
3353/* [RW 8] List of free threads . There is a bit per thread. */ 3973/* [RW 8] List of free threads . There is a bit per thread. */
3354#define TSEM_REG_THREADS_LIST 0x1802e4 3974#define TSEM_REG_THREADS_LIST 0x1802e4
3975/* [RC 32] Parity register #0 read clear */
3976#define TSEM_REG_TSEM_PRTY_STS_CLR_0 0x180118
3977#define TSEM_REG_TSEM_PRTY_STS_CLR_1 0x180128
3355/* [RW 3] The arbitration scheme of time_slot 0 */ 3978/* [RW 3] The arbitration scheme of time_slot 0 */
3356#define TSEM_REG_TS_0_AS 0x180038 3979#define TSEM_REG_TS_0_AS 0x180038
3357/* [RW 3] The arbitration scheme of time_slot 10 */ 3980/* [RW 3] The arbitration scheme of time_slot 10 */
@@ -3402,6 +4025,14 @@
3402/* [R 32] Parity register #0 read */ 4025/* [R 32] Parity register #0 read */
3403#define TSEM_REG_TSEM_PRTY_STS_0 0x180114 4026#define TSEM_REG_TSEM_PRTY_STS_0 0x180114
3404#define TSEM_REG_TSEM_PRTY_STS_1 0x180124 4027#define TSEM_REG_TSEM_PRTY_STS_1 0x180124
4028/* [W 7] VF or PF ID for reset error bit. Values 0-63 reset error bit for 64
4029 * VF; values 64-67 reset error for 4 PF; values 68-127 are not valid. */
4030#define TSEM_REG_VFPF_ERR_NUM 0x180380
4031/* [RW 32] Indirect access to AG context with 32-bits granularity. The bits
4032 * [10:8] of the address should be the offset within the accessed LCID
4033 * context; the bits [7:0] are the accessed LCID.Example: to write to REG10
4034 * LCID100. The RBC address should be 12'ha64. */
4035#define UCM_REG_AG_CTX 0xe2000
3405/* [R 5] Used to read the XX protection CAM occupancy counter. */ 4036/* [R 5] Used to read the XX protection CAM occupancy counter. */
3406#define UCM_REG_CAM_OCCUP 0xe0170 4037#define UCM_REG_CAM_OCCUP 0xe0170
3407/* [RW 1] CDU AG read Interface enable. If 0 - the request input is 4038/* [RW 1] CDU AG read Interface enable. If 0 - the request input is
@@ -3546,6 +4177,8 @@
3546#define UCM_REG_UCM_INT_STS 0xe01c8 4177#define UCM_REG_UCM_INT_STS 0xe01c8
3547/* [R 27] Parity register #0 read */ 4178/* [R 27] Parity register #0 read */
3548#define UCM_REG_UCM_PRTY_STS 0xe01d8 4179#define UCM_REG_UCM_PRTY_STS 0xe01d8
4180/* [RC 27] Parity register #0 read clear */
4181#define UCM_REG_UCM_PRTY_STS_CLR 0xe01dc
3549/* [RW 2] The size of AG context region 0 in REG-pairs. Designates the MS 4182/* [RW 2] The size of AG context region 0 in REG-pairs. Designates the MS
3550 REG-pair number (e.g. if region 0 is 6 REG-pairs; the value should be 5). 4183 REG-pair number (e.g. if region 0 is 6 REG-pairs; the value should be 5).
3551 Is used to determine the number of the AG context REG-pairs written back; 4184 Is used to determine the number of the AG context REG-pairs written back;
@@ -3651,13 +4284,13 @@
3651#define USDM_REG_AGG_INT_T_6 0xc40d0 4284#define USDM_REG_AGG_INT_T_6 0xc40d0
3652/* [RW 13] The start address in the internal RAM for the cfc_rsp lcid */ 4285/* [RW 13] The start address in the internal RAM for the cfc_rsp lcid */
3653#define USDM_REG_CFC_RSP_START_ADDR 0xc4008 4286#define USDM_REG_CFC_RSP_START_ADDR 0xc4008
3654/* [RW 16] The maximum value of the competion counter #0 */ 4287/* [RW 16] The maximum value of the completion counter #0 */
3655#define USDM_REG_CMP_COUNTER_MAX0 0xc401c 4288#define USDM_REG_CMP_COUNTER_MAX0 0xc401c
3656/* [RW 16] The maximum value of the competion counter #1 */ 4289/* [RW 16] The maximum value of the completion counter #1 */
3657#define USDM_REG_CMP_COUNTER_MAX1 0xc4020 4290#define USDM_REG_CMP_COUNTER_MAX1 0xc4020
3658/* [RW 16] The maximum value of the competion counter #2 */ 4291/* [RW 16] The maximum value of the completion counter #2 */
3659#define USDM_REG_CMP_COUNTER_MAX2 0xc4024 4292#define USDM_REG_CMP_COUNTER_MAX2 0xc4024
3660/* [RW 16] The maximum value of the competion counter #3 */ 4293/* [RW 16] The maximum value of the completion counter #3 */
3661#define USDM_REG_CMP_COUNTER_MAX3 0xc4028 4294#define USDM_REG_CMP_COUNTER_MAX3 0xc4028
3662/* [RW 13] The start address in the internal RAM for the completion 4295/* [RW 13] The start address in the internal RAM for the completion
3663 counters. */ 4296 counters. */
@@ -3722,6 +4355,8 @@
3722#define USDM_REG_USDM_PRTY_MASK 0xc42c0 4355#define USDM_REG_USDM_PRTY_MASK 0xc42c0
3723/* [R 11] Parity register #0 read */ 4356/* [R 11] Parity register #0 read */
3724#define USDM_REG_USDM_PRTY_STS 0xc42b4 4357#define USDM_REG_USDM_PRTY_STS 0xc42b4
4358/* [RC 11] Parity register #0 read clear */
4359#define USDM_REG_USDM_PRTY_STS_CLR 0xc42b8
3725/* [RW 5] The number of time_slots in the arbitration cycle */ 4360/* [RW 5] The number of time_slots in the arbitration cycle */
3726#define USEM_REG_ARB_CYCLE_SIZE 0x300034 4361#define USEM_REG_ARB_CYCLE_SIZE 0x300034
3727/* [RW 3] The source that is associated with arbitration element 0. Source 4362/* [RW 3] The source that is associated with arbitration element 0. Source
@@ -3851,6 +4486,20 @@
3851/* [R 32] Parity register #0 read */ 4486/* [R 32] Parity register #0 read */
3852#define USEM_REG_USEM_PRTY_STS_0 0x300124 4487#define USEM_REG_USEM_PRTY_STS_0 0x300124
3853#define USEM_REG_USEM_PRTY_STS_1 0x300134 4488#define USEM_REG_USEM_PRTY_STS_1 0x300134
4489/* [RC 32] Parity register #0 read clear */
4490#define USEM_REG_USEM_PRTY_STS_CLR_0 0x300128
4491#define USEM_REG_USEM_PRTY_STS_CLR_1 0x300138
4492/* [W 7] VF or PF ID for reset error bit. Values 0-63 reset error bit for 64
4493 * VF; values 64-67 reset error for 4 PF; values 68-127 are not valid. */
4494#define USEM_REG_VFPF_ERR_NUM 0x300380
4495#define VFC_MEMORIES_RST_REG_CAM_RST (0x1<<0)
4496#define VFC_MEMORIES_RST_REG_RAM_RST (0x1<<1)
4497#define VFC_REG_MEMORIES_RST 0x1943c
4498/* [RW 32] Indirect access to AG context with 32-bits granularity. The bits
4499 * [12:8] of the address should be the offset within the accessed LCID
4500 * context; the bits [7:0] are the accessed LCID.Example: to write to REG10
4501 * LCID100. The RBC address should be 13'ha64. */
4502#define XCM_REG_AG_CTX 0x28000
3854/* [RW 2] The queue index for registration on Aux1 counter flag. */ 4503/* [RW 2] The queue index for registration on Aux1 counter flag. */
3855#define XCM_REG_AUX1_Q 0x20134 4504#define XCM_REG_AUX1_Q 0x20134
3856/* [RW 2] Per each decision rule the queue index to register to. */ 4505/* [RW 2] Per each decision rule the queue index to register to. */
@@ -4149,13 +4798,13 @@
4149#define XSDM_REG_AGG_INT_MODE_1 0x1661bc 4798#define XSDM_REG_AGG_INT_MODE_1 0x1661bc
4150/* [RW 13] The start address in the internal RAM for the cfc_rsp lcid */ 4799/* [RW 13] The start address in the internal RAM for the cfc_rsp lcid */
4151#define XSDM_REG_CFC_RSP_START_ADDR 0x166008 4800#define XSDM_REG_CFC_RSP_START_ADDR 0x166008
4152/* [RW 16] The maximum value of the competion counter #0 */ 4801/* [RW 16] The maximum value of the completion counter #0 */
4153#define XSDM_REG_CMP_COUNTER_MAX0 0x16601c 4802#define XSDM_REG_CMP_COUNTER_MAX0 0x16601c
4154/* [RW 16] The maximum value of the competion counter #1 */ 4803/* [RW 16] The maximum value of the completion counter #1 */
4155#define XSDM_REG_CMP_COUNTER_MAX1 0x166020 4804#define XSDM_REG_CMP_COUNTER_MAX1 0x166020
4156/* [RW 16] The maximum value of the competion counter #2 */ 4805/* [RW 16] The maximum value of the completion counter #2 */
4157#define XSDM_REG_CMP_COUNTER_MAX2 0x166024 4806#define XSDM_REG_CMP_COUNTER_MAX2 0x166024
4158/* [RW 16] The maximum value of the competion counter #3 */ 4807/* [RW 16] The maximum value of the completion counter #3 */
4159#define XSDM_REG_CMP_COUNTER_MAX3 0x166028 4808#define XSDM_REG_CMP_COUNTER_MAX3 0x166028
4160/* [RW 13] The start address in the internal RAM for the completion 4809/* [RW 13] The start address in the internal RAM for the completion
4161 counters. */ 4810 counters. */
@@ -4216,6 +4865,8 @@
4216#define XSDM_REG_XSDM_PRTY_MASK 0x1662bc 4865#define XSDM_REG_XSDM_PRTY_MASK 0x1662bc
4217/* [R 11] Parity register #0 read */ 4866/* [R 11] Parity register #0 read */
4218#define XSDM_REG_XSDM_PRTY_STS 0x1662b0 4867#define XSDM_REG_XSDM_PRTY_STS 0x1662b0
4868/* [RC 11] Parity register #0 read clear */
4869#define XSDM_REG_XSDM_PRTY_STS_CLR 0x1662b4
4219/* [RW 5] The number of time_slots in the arbitration cycle */ 4870/* [RW 5] The number of time_slots in the arbitration cycle */
4220#define XSEM_REG_ARB_CYCLE_SIZE 0x280034 4871#define XSEM_REG_ARB_CYCLE_SIZE 0x280034
4221/* [RW 3] The source that is associated with arbitration element 0. Source 4872/* [RW 3] The source that is associated with arbitration element 0. Source
@@ -4333,6 +4984,9 @@
4333#define XSEM_REG_TS_8_AS 0x280058 4984#define XSEM_REG_TS_8_AS 0x280058
4334/* [RW 3] The arbitration scheme of time_slot 9 */ 4985/* [RW 3] The arbitration scheme of time_slot 9 */
4335#define XSEM_REG_TS_9_AS 0x28005c 4986#define XSEM_REG_TS_9_AS 0x28005c
4987/* [W 7] VF or PF ID for reset error bit. Values 0-63 reset error bit for 64
4988 * VF; values 64-67 reset error for 4 PF; values 68-127 are not valid. */
4989#define XSEM_REG_VFPF_ERR_NUM 0x280380
4336/* [RW 32] Interrupt mask register #0 read/write */ 4990/* [RW 32] Interrupt mask register #0 read/write */
4337#define XSEM_REG_XSEM_INT_MASK_0 0x280110 4991#define XSEM_REG_XSEM_INT_MASK_0 0x280110
4338#define XSEM_REG_XSEM_INT_MASK_1 0x280120 4992#define XSEM_REG_XSEM_INT_MASK_1 0x280120
@@ -4345,6 +4999,9 @@
4345/* [R 32] Parity register #0 read */ 4999/* [R 32] Parity register #0 read */
4346#define XSEM_REG_XSEM_PRTY_STS_0 0x280124 5000#define XSEM_REG_XSEM_PRTY_STS_0 0x280124
4347#define XSEM_REG_XSEM_PRTY_STS_1 0x280134 5001#define XSEM_REG_XSEM_PRTY_STS_1 0x280134
5002/* [RC 32] Parity register #0 read clear */
5003#define XSEM_REG_XSEM_PRTY_STS_CLR_0 0x280128
5004#define XSEM_REG_XSEM_PRTY_STS_CLR_1 0x280138
4348#define MCPR_NVM_ACCESS_ENABLE_EN (1L<<0) 5005#define MCPR_NVM_ACCESS_ENABLE_EN (1L<<0)
4349#define MCPR_NVM_ACCESS_ENABLE_WR_EN (1L<<1) 5006#define MCPR_NVM_ACCESS_ENABLE_WR_EN (1L<<1)
4350#define MCPR_NVM_ADDR_NVM_ADDR_VALUE (0xffffffL<<0) 5007#define MCPR_NVM_ADDR_NVM_ADDR_VALUE (0xffffffL<<0)
@@ -4371,6 +5028,23 @@
4371#define BIGMAC_REGISTER_TX_SOURCE_ADDR (0x08<<3) 5028#define BIGMAC_REGISTER_TX_SOURCE_ADDR (0x08<<3)
4372#define BIGMAC_REGISTER_TX_STAT_GTBYT (0x20<<3) 5029#define BIGMAC_REGISTER_TX_STAT_GTBYT (0x20<<3)
4373#define BIGMAC_REGISTER_TX_STAT_GTPKT (0x0C<<3) 5030#define BIGMAC_REGISTER_TX_STAT_GTPKT (0x0C<<3)
5031#define BIGMAC2_REGISTER_BMAC_CONTROL (0x00<<3)
5032#define BIGMAC2_REGISTER_BMAC_XGXS_CONTROL (0x01<<3)
5033#define BIGMAC2_REGISTER_CNT_MAX_SIZE (0x05<<3)
5034#define BIGMAC2_REGISTER_PFC_CONTROL (0x06<<3)
5035#define BIGMAC2_REGISTER_RX_CONTROL (0x3A<<3)
5036#define BIGMAC2_REGISTER_RX_LLFC_MSG_FLDS (0x62<<3)
5037#define BIGMAC2_REGISTER_RX_MAX_SIZE (0x3C<<3)
5038#define BIGMAC2_REGISTER_RX_STAT_GR64 (0x40<<3)
5039#define BIGMAC2_REGISTER_RX_STAT_GRIPJ (0x5f<<3)
5040#define BIGMAC2_REGISTER_RX_STAT_GRPP (0x51<<3)
5041#define BIGMAC2_REGISTER_TX_CONTROL (0x1C<<3)
5042#define BIGMAC2_REGISTER_TX_MAX_SIZE (0x1E<<3)
5043#define BIGMAC2_REGISTER_TX_PAUSE_CONTROL (0x20<<3)
5044#define BIGMAC2_REGISTER_TX_SOURCE_ADDR (0x1D<<3)
5045#define BIGMAC2_REGISTER_TX_STAT_GTBYT (0x39<<3)
5046#define BIGMAC2_REGISTER_TX_STAT_GTPOK (0x22<<3)
5047#define BIGMAC2_REGISTER_TX_STAT_GTPP (0x24<<3)
4374#define EMAC_LED_1000MB_OVERRIDE (1L<<1) 5048#define EMAC_LED_1000MB_OVERRIDE (1L<<1)
4375#define EMAC_LED_100MB_OVERRIDE (1L<<2) 5049#define EMAC_LED_100MB_OVERRIDE (1L<<2)
4376#define EMAC_LED_10MB_OVERRIDE (1L<<3) 5050#define EMAC_LED_10MB_OVERRIDE (1L<<3)
@@ -4405,7 +5079,23 @@
4405#define EMAC_REG_EMAC_TX_MODE 0xbc 5079#define EMAC_REG_EMAC_TX_MODE 0xbc
4406#define EMAC_REG_EMAC_TX_STAT_AC 0x280 5080#define EMAC_REG_EMAC_TX_STAT_AC 0x280
4407#define EMAC_REG_EMAC_TX_STAT_AC_COUNT 22 5081#define EMAC_REG_EMAC_TX_STAT_AC_COUNT 22
5082#define EMAC_REG_RX_PFC_MODE 0x320
5083#define EMAC_REG_RX_PFC_MODE_PRIORITIES (1L<<2)
5084#define EMAC_REG_RX_PFC_MODE_RX_EN (1L<<1)
5085#define EMAC_REG_RX_PFC_MODE_TX_EN (1L<<0)
5086#define EMAC_REG_RX_PFC_PARAM 0x324
5087#define EMAC_REG_RX_PFC_PARAM_OPCODE_BITSHIFT 0
5088#define EMAC_REG_RX_PFC_PARAM_PRIORITY_EN_BITSHIFT 16
5089#define EMAC_REG_RX_PFC_STATS_XOFF_RCVD 0x328
5090#define EMAC_REG_RX_PFC_STATS_XOFF_RCVD_COUNT (0xffff<<0)
5091#define EMAC_REG_RX_PFC_STATS_XOFF_SENT 0x330
5092#define EMAC_REG_RX_PFC_STATS_XOFF_SENT_COUNT (0xffff<<0)
5093#define EMAC_REG_RX_PFC_STATS_XON_RCVD 0x32c
5094#define EMAC_REG_RX_PFC_STATS_XON_RCVD_COUNT (0xffff<<0)
5095#define EMAC_REG_RX_PFC_STATS_XON_SENT 0x334
5096#define EMAC_REG_RX_PFC_STATS_XON_SENT_COUNT (0xffff<<0)
4408#define EMAC_RX_MODE_FLOW_EN (1L<<2) 5097#define EMAC_RX_MODE_FLOW_EN (1L<<2)
5098#define EMAC_RX_MODE_KEEP_MAC_CONTROL (1L<<3)
4409#define EMAC_RX_MODE_KEEP_VLAN_TAG (1L<<10) 5099#define EMAC_RX_MODE_KEEP_VLAN_TAG (1L<<10)
4410#define EMAC_RX_MODE_PROMISCUOUS (1L<<8) 5100#define EMAC_RX_MODE_PROMISCUOUS (1L<<8)
4411#define EMAC_RX_MODE_RESET (1L<<0) 5101#define EMAC_RX_MODE_RESET (1L<<0)
@@ -4478,6 +5168,8 @@
4478#define HW_LOCK_RESOURCE_SPIO 2 5168#define HW_LOCK_RESOURCE_SPIO 2
4479#define HW_LOCK_RESOURCE_UNDI 5 5169#define HW_LOCK_RESOURCE_UNDI 5
4480#define PRS_FLAG_OVERETH_IPV4 1 5170#define PRS_FLAG_OVERETH_IPV4 1
5171#define AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT (0x1<<4)
5172#define AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR (0x1<<5)
4481#define AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR (1<<18) 5173#define AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR (1<<18)
4482#define AEU_INPUTS_ATTN_BITS_CCM_HW_INTERRUPT (1<<31) 5174#define AEU_INPUTS_ATTN_BITS_CCM_HW_INTERRUPT (1<<31)
4483#define AEU_INPUTS_ATTN_BITS_CDU_HW_INTERRUPT (1<<9) 5175#define AEU_INPUTS_ATTN_BITS_CDU_HW_INTERRUPT (1<<9)
@@ -4504,6 +5196,8 @@
4504#define AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR (1<<20) 5196#define AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR (1<<20)
4505#define AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR (1<<0) 5197#define AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR (1<<0)
4506#define AEU_INPUTS_ATTN_BITS_PBF_HW_INTERRUPT (1<<31) 5198#define AEU_INPUTS_ATTN_BITS_PBF_HW_INTERRUPT (1<<31)
5199#define AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT (0x1<<2)
5200#define AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR (0x1<<3)
4507#define AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT (1<<3) 5201#define AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT (1<<3)
4508#define AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR (1<<2) 5202#define AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR (1<<2)
4509#define AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_HW_INTERRUPT (1<<5) 5203#define AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_HW_INTERRUPT (1<<5)
@@ -4796,6 +5490,253 @@
4796#define PCI_ID_VAL1 0x434 5490#define PCI_ID_VAL1 0x434
4797#define PCI_ID_VAL2 0x438 5491#define PCI_ID_VAL2 0x438
4798 5492
5493#define PXPCS_TL_CONTROL_5 0x814
5494#define PXPCS_TL_CONTROL_5_UNKNOWNTYPE_ERR_ATTN (1 << 29) /*WC*/
5495#define PXPCS_TL_CONTROL_5_BOUNDARY4K_ERR_ATTN (1 << 28) /*WC*/
5496#define PXPCS_TL_CONTROL_5_MRRS_ERR_ATTN (1 << 27) /*WC*/
5497#define PXPCS_TL_CONTROL_5_MPS_ERR_ATTN (1 << 26) /*WC*/
5498#define PXPCS_TL_CONTROL_5_TTX_BRIDGE_FORWARD_ERR (1 << 25) /*WC*/
5499#define PXPCS_TL_CONTROL_5_TTX_TXINTF_OVERFLOW (1 << 24) /*WC*/
5500#define PXPCS_TL_CONTROL_5_PHY_ERR_ATTN (1 << 23) /*RO*/
5501#define PXPCS_TL_CONTROL_5_DL_ERR_ATTN (1 << 22) /*RO*/
5502#define PXPCS_TL_CONTROL_5_TTX_ERR_NP_TAG_IN_USE (1 << 21) /*WC*/
5503#define PXPCS_TL_CONTROL_5_TRX_ERR_UNEXP_RTAG (1 << 20) /*WC*/
5504#define PXPCS_TL_CONTROL_5_PRI_SIG_TARGET_ABORT1 (1 << 19) /*WC*/
5505#define PXPCS_TL_CONTROL_5_ERR_UNSPPORT1 (1 << 18) /*WC*/
5506#define PXPCS_TL_CONTROL_5_ERR_ECRC1 (1 << 17) /*WC*/
5507#define PXPCS_TL_CONTROL_5_ERR_MALF_TLP1 (1 << 16) /*WC*/
5508#define PXPCS_TL_CONTROL_5_ERR_RX_OFLOW1 (1 << 15) /*WC*/
5509#define PXPCS_TL_CONTROL_5_ERR_UNEXP_CPL1 (1 << 14) /*WC*/
5510#define PXPCS_TL_CONTROL_5_ERR_MASTER_ABRT1 (1 << 13) /*WC*/
5511#define PXPCS_TL_CONTROL_5_ERR_CPL_TIMEOUT1 (1 << 12) /*WC*/
5512#define PXPCS_TL_CONTROL_5_ERR_FC_PRTL1 (1 << 11) /*WC*/
5513#define PXPCS_TL_CONTROL_5_ERR_PSND_TLP1 (1 << 10) /*WC*/
5514#define PXPCS_TL_CONTROL_5_PRI_SIG_TARGET_ABORT (1 << 9) /*WC*/
5515#define PXPCS_TL_CONTROL_5_ERR_UNSPPORT (1 << 8) /*WC*/
5516#define PXPCS_TL_CONTROL_5_ERR_ECRC (1 << 7) /*WC*/
5517#define PXPCS_TL_CONTROL_5_ERR_MALF_TLP (1 << 6) /*WC*/
5518#define PXPCS_TL_CONTROL_5_ERR_RX_OFLOW (1 << 5) /*WC*/
5519#define PXPCS_TL_CONTROL_5_ERR_UNEXP_CPL (1 << 4) /*WC*/
5520#define PXPCS_TL_CONTROL_5_ERR_MASTER_ABRT (1 << 3) /*WC*/
5521#define PXPCS_TL_CONTROL_5_ERR_CPL_TIMEOUT (1 << 2) /*WC*/
5522#define PXPCS_TL_CONTROL_5_ERR_FC_PRTL (1 << 1) /*WC*/
5523#define PXPCS_TL_CONTROL_5_ERR_PSND_TLP (1 << 0) /*WC*/
5524
5525
5526#define PXPCS_TL_FUNC345_STAT 0x854
5527#define PXPCS_TL_FUNC345_STAT_PRI_SIG_TARGET_ABORT4 (1 << 29) /* WC */
5528#define PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT4\
5529 (1 << 28) /* Unsupported Request Error Status in function4, if \
5530 set, generate pcie_err_attn output when this error is seen. WC */
5531#define PXPCS_TL_FUNC345_STAT_ERR_ECRC4\
5532 (1 << 27) /* ECRC Error TLP Status Status in function 4, if set, \
5533 generate pcie_err_attn output when this error is seen.. WC */
5534#define PXPCS_TL_FUNC345_STAT_ERR_MALF_TLP4\
5535 (1 << 26) /* Malformed TLP Status Status in function 4, if set, \
5536 generate pcie_err_attn output when this error is seen.. WC */
5537#define PXPCS_TL_FUNC345_STAT_ERR_RX_OFLOW4\
5538 (1 << 25) /* Receiver Overflow Status Status in function 4, if \
5539 set, generate pcie_err_attn output when this error is seen.. WC \
5540 */
5541#define PXPCS_TL_FUNC345_STAT_ERR_UNEXP_CPL4\
5542 (1 << 24) /* Unexpected Completion Status Status in function 4, \
5543 if set, generate pcie_err_attn output when this error is seen. WC \
5544 */
5545#define PXPCS_TL_FUNC345_STAT_ERR_MASTER_ABRT4\
5546 (1 << 23) /* Receive UR Statusin function 4. If set, generate \
5547 pcie_err_attn output when this error is seen. WC */
5548#define PXPCS_TL_FUNC345_STAT_ERR_CPL_TIMEOUT4\
5549 (1 << 22) /* Completer Timeout Status Status in function 4, if \
5550 set, generate pcie_err_attn output when this error is seen. WC */
5551#define PXPCS_TL_FUNC345_STAT_ERR_FC_PRTL4\
5552 (1 << 21) /* Flow Control Protocol Error Status Status in \
5553 function 4, if set, generate pcie_err_attn output when this error \
5554 is seen. WC */
5555#define PXPCS_TL_FUNC345_STAT_ERR_PSND_TLP4\
5556 (1 << 20) /* Poisoned Error Status Status in function 4, if set, \
5557 generate pcie_err_attn output when this error is seen.. WC */
5558#define PXPCS_TL_FUNC345_STAT_PRI_SIG_TARGET_ABORT3 (1 << 19) /* WC */
5559#define PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT3\
5560 (1 << 18) /* Unsupported Request Error Status in function3, if \
5561 set, generate pcie_err_attn output when this error is seen. WC */
5562#define PXPCS_TL_FUNC345_STAT_ERR_ECRC3\
5563 (1 << 17) /* ECRC Error TLP Status Status in function 3, if set, \
5564 generate pcie_err_attn output when this error is seen.. WC */
5565#define PXPCS_TL_FUNC345_STAT_ERR_MALF_TLP3\
5566 (1 << 16) /* Malformed TLP Status Status in function 3, if set, \
5567 generate pcie_err_attn output when this error is seen.. WC */
5568#define PXPCS_TL_FUNC345_STAT_ERR_RX_OFLOW3\
5569 (1 << 15) /* Receiver Overflow Status Status in function 3, if \
5570 set, generate pcie_err_attn output when this error is seen.. WC \
5571 */
5572#define PXPCS_TL_FUNC345_STAT_ERR_UNEXP_CPL3\
5573 (1 << 14) /* Unexpected Completion Status Status in function 3, \
5574 if set, generate pcie_err_attn output when this error is seen. WC \
5575 */
5576#define PXPCS_TL_FUNC345_STAT_ERR_MASTER_ABRT3\
5577 (1 << 13) /* Receive UR Statusin function 3. If set, generate \
5578 pcie_err_attn output when this error is seen. WC */
5579#define PXPCS_TL_FUNC345_STAT_ERR_CPL_TIMEOUT3\
5580 (1 << 12) /* Completer Timeout Status Status in function 3, if \
5581 set, generate pcie_err_attn output when this error is seen. WC */
5582#define PXPCS_TL_FUNC345_STAT_ERR_FC_PRTL3\
5583 (1 << 11) /* Flow Control Protocol Error Status Status in \
5584 function 3, if set, generate pcie_err_attn output when this error \
5585 is seen. WC */
5586#define PXPCS_TL_FUNC345_STAT_ERR_PSND_TLP3\
5587 (1 << 10) /* Poisoned Error Status Status in function 3, if set, \
5588 generate pcie_err_attn output when this error is seen.. WC */
5589#define PXPCS_TL_FUNC345_STAT_PRI_SIG_TARGET_ABORT2 (1 << 9) /* WC */
5590#define PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT2\
5591 (1 << 8) /* Unsupported Request Error Status for Function 2, if \
5592 set, generate pcie_err_attn output when this error is seen. WC */
5593#define PXPCS_TL_FUNC345_STAT_ERR_ECRC2\
5594 (1 << 7) /* ECRC Error TLP Status Status for Function 2, if set, \
5595 generate pcie_err_attn output when this error is seen.. WC */
5596#define PXPCS_TL_FUNC345_STAT_ERR_MALF_TLP2\
5597 (1 << 6) /* Malformed TLP Status Status for Function 2, if set, \
5598 generate pcie_err_attn output when this error is seen.. WC */
5599#define PXPCS_TL_FUNC345_STAT_ERR_RX_OFLOW2\
5600 (1 << 5) /* Receiver Overflow Status Status for Function 2, if \
5601 set, generate pcie_err_attn output when this error is seen.. WC \
5602 */
5603#define PXPCS_TL_FUNC345_STAT_ERR_UNEXP_CPL2\
5604 (1 << 4) /* Unexpected Completion Status Status for Function 2, \
5605 if set, generate pcie_err_attn output when this error is seen. WC \
5606 */
5607#define PXPCS_TL_FUNC345_STAT_ERR_MASTER_ABRT2\
5608 (1 << 3) /* Receive UR Statusfor Function 2. If set, generate \
5609 pcie_err_attn output when this error is seen. WC */
5610#define PXPCS_TL_FUNC345_STAT_ERR_CPL_TIMEOUT2\
5611 (1 << 2) /* Completer Timeout Status Status for Function 2, if \
5612 set, generate pcie_err_attn output when this error is seen. WC */
5613#define PXPCS_TL_FUNC345_STAT_ERR_FC_PRTL2\
5614 (1 << 1) /* Flow Control Protocol Error Status Status for \
5615 Function 2, if set, generate pcie_err_attn output when this error \
5616 is seen. WC */
5617#define PXPCS_TL_FUNC345_STAT_ERR_PSND_TLP2\
5618 (1 << 0) /* Poisoned Error Status Status for Function 2, if set, \
5619 generate pcie_err_attn output when this error is seen.. WC */
5620
5621
5622#define PXPCS_TL_FUNC678_STAT 0x85C
5623#define PXPCS_TL_FUNC678_STAT_PRI_SIG_TARGET_ABORT7 (1 << 29) /* WC */
5624#define PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT7\
5625 (1 << 28) /* Unsupported Request Error Status in function7, if \
5626 set, generate pcie_err_attn output when this error is seen. WC */
5627#define PXPCS_TL_FUNC678_STAT_ERR_ECRC7\
5628 (1 << 27) /* ECRC Error TLP Status Status in function 7, if set, \
5629 generate pcie_err_attn output when this error is seen.. WC */
5630#define PXPCS_TL_FUNC678_STAT_ERR_MALF_TLP7\
5631 (1 << 26) /* Malformed TLP Status Status in function 7, if set, \
5632 generate pcie_err_attn output when this error is seen.. WC */
5633#define PXPCS_TL_FUNC678_STAT_ERR_RX_OFLOW7\
5634 (1 << 25) /* Receiver Overflow Status Status in function 7, if \
5635 set, generate pcie_err_attn output when this error is seen.. WC \
5636 */
5637#define PXPCS_TL_FUNC678_STAT_ERR_UNEXP_CPL7\
5638 (1 << 24) /* Unexpected Completion Status Status in function 7, \
5639 if set, generate pcie_err_attn output when this error is seen. WC \
5640 */
5641#define PXPCS_TL_FUNC678_STAT_ERR_MASTER_ABRT7\
5642 (1 << 23) /* Receive UR Statusin function 7. If set, generate \
5643 pcie_err_attn output when this error is seen. WC */
5644#define PXPCS_TL_FUNC678_STAT_ERR_CPL_TIMEOUT7\
5645 (1 << 22) /* Completer Timeout Status Status in function 7, if \
5646 set, generate pcie_err_attn output when this error is seen. WC */
5647#define PXPCS_TL_FUNC678_STAT_ERR_FC_PRTL7\
5648 (1 << 21) /* Flow Control Protocol Error Status Status in \
5649 function 7, if set, generate pcie_err_attn output when this error \
5650 is seen. WC */
5651#define PXPCS_TL_FUNC678_STAT_ERR_PSND_TLP7\
5652 (1 << 20) /* Poisoned Error Status Status in function 7, if set, \
5653 generate pcie_err_attn output when this error is seen.. WC */
5654#define PXPCS_TL_FUNC678_STAT_PRI_SIG_TARGET_ABORT6 (1 << 19) /* WC */
5655#define PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT6\
5656 (1 << 18) /* Unsupported Request Error Status in function6, if \
5657 set, generate pcie_err_attn output when this error is seen. WC */
5658#define PXPCS_TL_FUNC678_STAT_ERR_ECRC6\
5659 (1 << 17) /* ECRC Error TLP Status Status in function 6, if set, \
5660 generate pcie_err_attn output when this error is seen.. WC */
5661#define PXPCS_TL_FUNC678_STAT_ERR_MALF_TLP6\
5662 (1 << 16) /* Malformed TLP Status Status in function 6, if set, \
5663 generate pcie_err_attn output when this error is seen.. WC */
5664#define PXPCS_TL_FUNC678_STAT_ERR_RX_OFLOW6\
5665 (1 << 15) /* Receiver Overflow Status Status in function 6, if \
5666 set, generate pcie_err_attn output when this error is seen.. WC \
5667 */
5668#define PXPCS_TL_FUNC678_STAT_ERR_UNEXP_CPL6\
5669 (1 << 14) /* Unexpected Completion Status Status in function 6, \
5670 if set, generate pcie_err_attn output when this error is seen. WC \
5671 */
5672#define PXPCS_TL_FUNC678_STAT_ERR_MASTER_ABRT6\
5673 (1 << 13) /* Receive UR Statusin function 6. If set, generate \
5674 pcie_err_attn output when this error is seen. WC */
5675#define PXPCS_TL_FUNC678_STAT_ERR_CPL_TIMEOUT6\
5676 (1 << 12) /* Completer Timeout Status Status in function 6, if \
5677 set, generate pcie_err_attn output when this error is seen. WC */
5678#define PXPCS_TL_FUNC678_STAT_ERR_FC_PRTL6\
5679 (1 << 11) /* Flow Control Protocol Error Status Status in \
5680 function 6, if set, generate pcie_err_attn output when this error \
5681 is seen. WC */
5682#define PXPCS_TL_FUNC678_STAT_ERR_PSND_TLP6\
5683 (1 << 10) /* Poisoned Error Status Status in function 6, if set, \
5684 generate pcie_err_attn output when this error is seen.. WC */
5685#define PXPCS_TL_FUNC678_STAT_PRI_SIG_TARGET_ABORT5 (1 << 9) /* WC */
5686#define PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5\
5687 (1 << 8) /* Unsupported Request Error Status for Function 5, if \
5688 set, generate pcie_err_attn output when this error is seen. WC */
5689#define PXPCS_TL_FUNC678_STAT_ERR_ECRC5\
5690 (1 << 7) /* ECRC Error TLP Status Status for Function 5, if set, \
5691 generate pcie_err_attn output when this error is seen.. WC */
5692#define PXPCS_TL_FUNC678_STAT_ERR_MALF_TLP5\
5693 (1 << 6) /* Malformed TLP Status Status for Function 5, if set, \
5694 generate pcie_err_attn output when this error is seen.. WC */
5695#define PXPCS_TL_FUNC678_STAT_ERR_RX_OFLOW5\
5696 (1 << 5) /* Receiver Overflow Status Status for Function 5, if \
5697 set, generate pcie_err_attn output when this error is seen.. WC \
5698 */
5699#define PXPCS_TL_FUNC678_STAT_ERR_UNEXP_CPL5\
5700 (1 << 4) /* Unexpected Completion Status Status for Function 5, \
5701 if set, generate pcie_err_attn output when this error is seen. WC \
5702 */
5703#define PXPCS_TL_FUNC678_STAT_ERR_MASTER_ABRT5\
5704 (1 << 3) /* Receive UR Statusfor Function 5. If set, generate \
5705 pcie_err_attn output when this error is seen. WC */
5706#define PXPCS_TL_FUNC678_STAT_ERR_CPL_TIMEOUT5\
5707 (1 << 2) /* Completer Timeout Status Status for Function 5, if \
5708 set, generate pcie_err_attn output when this error is seen. WC */
5709#define PXPCS_TL_FUNC678_STAT_ERR_FC_PRTL5\
5710 (1 << 1) /* Flow Control Protocol Error Status Status for \
5711 Function 5, if set, generate pcie_err_attn output when this error \
5712 is seen. WC */
5713#define PXPCS_TL_FUNC678_STAT_ERR_PSND_TLP5\
5714 (1 << 0) /* Poisoned Error Status Status for Function 5, if set, \
5715 generate pcie_err_attn output when this error is seen.. WC */
5716
5717
5718#define BAR_USTRORM_INTMEM 0x400000
5719#define BAR_CSTRORM_INTMEM 0x410000
5720#define BAR_XSTRORM_INTMEM 0x420000
5721#define BAR_TSTRORM_INTMEM 0x430000
5722
5723/* for accessing the IGU in case of status block ACK */
5724#define BAR_IGU_INTMEM 0x440000
5725
5726#define BAR_DOORBELL_OFFSET 0x800000
5727
5728#define BAR_ME_REGISTER 0x450000
5729#define ME_REG_PF_NUM_SHIFT 0
5730#define ME_REG_PF_NUM\
5731 (7L<<ME_REG_PF_NUM_SHIFT) /* Relative PF Num */
5732#define ME_REG_VF_VALID (1<<8)
5733#define ME_REG_VF_NUM_SHIFT 9
5734#define ME_REG_VF_NUM_MASK (0x3f<<ME_REG_VF_NUM_SHIFT)
5735#define ME_REG_VF_ERR (0x1<<3)
5736#define ME_REG_ABS_PF_NUM_SHIFT 16
5737#define ME_REG_ABS_PF_NUM\
5738 (7L<<ME_REG_ABS_PF_NUM_SHIFT) /* Absolute PF Num */
5739
4799 5740
4800#define MDIO_REG_BANK_CL73_IEEEB0 0x0 5741#define MDIO_REG_BANK_CL73_IEEEB0 0x0
4801#define MDIO_CL73_IEEEB0_CL73_AN_CONTROL 0x0 5742#define MDIO_CL73_IEEEB0_CL73_AN_CONTROL 0x0
@@ -4964,6 +5905,8 @@
4964#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN 0x0001 5905#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN 0x0001
4965#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_AN_FST_TMR 0x0040 5906#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_AN_FST_TMR 0x0040
4966#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1 0x14 5907#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1 0x14
5908#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_SGMII 0x0001
5909#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_LINK 0x0002
4967#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_DUPLEX 0x0004 5910#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_DUPLEX 0x0004
4968#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_SPEED_MASK 0x0018 5911#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_SPEED_MASK 0x0018
4969#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_SPEED_SHIFT 3 5912#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_SPEED_SHIFT 3
@@ -5135,28 +6078,36 @@ Theotherbitsarereservedandshouldbezero*/
5135#define MDIO_PMA_REG_8727_TWO_WIRE_SLAVE_ADDR 0x8005 6078#define MDIO_PMA_REG_8727_TWO_WIRE_SLAVE_ADDR 0x8005
5136#define MDIO_PMA_REG_8727_TWO_WIRE_DATA_BUF 0x8007 6079#define MDIO_PMA_REG_8727_TWO_WIRE_DATA_BUF 0x8007
5137#define MDIO_PMA_REG_8727_TWO_WIRE_DATA_MASK 0xff 6080#define MDIO_PMA_REG_8727_TWO_WIRE_DATA_MASK 0xff
5138#define MDIO_PMA_REG_8727_MISC_CTRL 0x8309
5139#define MDIO_PMA_REG_8727_TX_CTRL1 0xca02 6081#define MDIO_PMA_REG_8727_TX_CTRL1 0xca02
5140#define MDIO_PMA_REG_8727_TX_CTRL2 0xca05 6082#define MDIO_PMA_REG_8727_TX_CTRL2 0xca05
5141#define MDIO_PMA_REG_8727_PCS_OPT_CTRL 0xc808 6083#define MDIO_PMA_REG_8727_PCS_OPT_CTRL 0xc808
5142#define MDIO_PMA_REG_8727_GPIO_CTRL 0xc80e 6084#define MDIO_PMA_REG_8727_GPIO_CTRL 0xc80e
6085#define MDIO_PMA_REG_8727_PCS_GP 0xc842
6086#define MDIO_PMA_REG_8727_OPT_CFG_REG 0xc8e4
6087
6088#define MDIO_AN_REG_8727_MISC_CTRL 0x8309
5143 6089
5144#define MDIO_PMA_REG_8073_CHIP_REV 0xc801 6090#define MDIO_PMA_REG_8073_CHIP_REV 0xc801
5145#define MDIO_PMA_REG_8073_SPEED_LINK_STATUS 0xc820 6091#define MDIO_PMA_REG_8073_SPEED_LINK_STATUS 0xc820
5146#define MDIO_PMA_REG_8073_XAUI_WA 0xc841 6092#define MDIO_PMA_REG_8073_XAUI_WA 0xc841
6093#define MDIO_PMA_REG_8073_OPT_DIGITAL_CTRL 0xcd08
5147 6094
5148#define MDIO_PMA_REG_7101_RESET 0xc000 6095#define MDIO_PMA_REG_7101_RESET 0xc000
5149#define MDIO_PMA_REG_7107_LED_CNTL 0xc007 6096#define MDIO_PMA_REG_7107_LED_CNTL 0xc007
6097#define MDIO_PMA_REG_7107_LINK_LED_CNTL 0xc009
5150#define MDIO_PMA_REG_7101_VER1 0xc026 6098#define MDIO_PMA_REG_7101_VER1 0xc026
5151#define MDIO_PMA_REG_7101_VER2 0xc027 6099#define MDIO_PMA_REG_7101_VER2 0xc027
5152 6100
5153#define MDIO_PMA_REG_8481_PMD_SIGNAL 0xa811 6101#define MDIO_PMA_REG_8481_PMD_SIGNAL 0xa811
5154#define MDIO_PMA_REG_8481_LED1_MASK 0xa82c 6102#define MDIO_PMA_REG_8481_LED1_MASK 0xa82c
5155#define MDIO_PMA_REG_8481_LED2_MASK 0xa82f 6103#define MDIO_PMA_REG_8481_LED2_MASK 0xa82f
5156#define MDIO_PMA_REG_8481_LED3_MASK 0xa832 6104#define MDIO_PMA_REG_8481_LED3_MASK 0xa832
5157#define MDIO_PMA_REG_8481_LED3_BLINK 0xa834 6105#define MDIO_PMA_REG_8481_LED3_BLINK 0xa834
5158#define MDIO_PMA_REG_8481_SIGNAL_MASK 0xa835 6106#define MDIO_PMA_REG_8481_LED5_MASK 0xa838
5159#define MDIO_PMA_REG_8481_LINK_SIGNAL 0xa83b 6107#define MDIO_PMA_REG_8481_SIGNAL_MASK 0xa835
6108#define MDIO_PMA_REG_8481_LINK_SIGNAL 0xa83b
6109#define MDIO_PMA_REG_8481_LINK_SIGNAL_LED4_ENABLE_MASK 0x800
6110#define MDIO_PMA_REG_8481_LINK_SIGNAL_LED4_ENABLE_SHIFT 11
5160 6111
5161 6112
5162#define MDIO_WIS_DEVAD 0x2 6113#define MDIO_WIS_DEVAD 0x2
@@ -5188,6 +6139,8 @@ Theotherbitsarereservedandshouldbezero*/
5188#define MDIO_XS_8706_REG_BANK_RX3 0x80ec 6139#define MDIO_XS_8706_REG_BANK_RX3 0x80ec
5189#define MDIO_XS_8706_REG_BANK_RXA 0x80fc 6140#define MDIO_XS_8706_REG_BANK_RXA 0x80fc
5190 6141
6142#define MDIO_XS_REG_8073_RX_CTRL_PCIE 0x80FA
6143
5191#define MDIO_AN_DEVAD 0x7 6144#define MDIO_AN_DEVAD 0x7
5192/*ieee*/ 6145/*ieee*/
5193#define MDIO_AN_REG_CTRL 0x0000 6146#define MDIO_AN_REG_CTRL 0x0000
@@ -5210,14 +6163,44 @@ Theotherbitsarereservedandshouldbezero*/
5210#define MDIO_AN_REG_CL37_FC_LP 0xffe5 6163#define MDIO_AN_REG_CL37_FC_LP 0xffe5
5211 6164
5212#define MDIO_AN_REG_8073_2_5G 0x8329 6165#define MDIO_AN_REG_8073_2_5G 0x8329
6166#define MDIO_AN_REG_8073_BAM 0x8350
5213 6167
6168#define MDIO_AN_REG_8481_10GBASE_T_AN_CTRL 0x0020
5214#define MDIO_AN_REG_8481_LEGACY_MII_CTRL 0xffe0 6169#define MDIO_AN_REG_8481_LEGACY_MII_CTRL 0xffe0
6170#define MDIO_AN_REG_8481_LEGACY_MII_STATUS 0xffe1
5215#define MDIO_AN_REG_8481_LEGACY_AN_ADV 0xffe4 6171#define MDIO_AN_REG_8481_LEGACY_AN_ADV 0xffe4
6172#define MDIO_AN_REG_8481_LEGACY_AN_EXPANSION 0xffe6
5216#define MDIO_AN_REG_8481_1000T_CTRL 0xffe9 6173#define MDIO_AN_REG_8481_1000T_CTRL 0xffe9
5217#define MDIO_AN_REG_8481_EXPANSION_REG_RD_RW 0xfff5 6174#define MDIO_AN_REG_8481_EXPANSION_REG_RD_RW 0xfff5
5218#define MDIO_AN_REG_8481_EXPANSION_REG_ACCESS 0xfff7 6175#define MDIO_AN_REG_8481_EXPANSION_REG_ACCESS 0xfff7
6176#define MDIO_AN_REG_8481_AUX_CTRL 0xfff8
5219#define MDIO_AN_REG_8481_LEGACY_SHADOW 0xfffc 6177#define MDIO_AN_REG_8481_LEGACY_SHADOW 0xfffc
5220 6178
6179/* BCM84823 only */
6180#define MDIO_CTL_DEVAD 0x1e
6181#define MDIO_CTL_REG_84823_MEDIA 0x401a
6182#define MDIO_CTL_REG_84823_MEDIA_MAC_MASK 0x0018
6183 /* These pins configure the BCM84823 interface to MAC after reset. */
6184#define MDIO_CTL_REG_84823_CTRL_MAC_XFI 0x0008
6185#define MDIO_CTL_REG_84823_MEDIA_MAC_XAUI_M 0x0010
6186 /* These pins configure the BCM84823 interface to Line after reset. */
6187#define MDIO_CTL_REG_84823_MEDIA_LINE_MASK 0x0060
6188#define MDIO_CTL_REG_84823_MEDIA_LINE_XAUI_L 0x0020
6189#define MDIO_CTL_REG_84823_MEDIA_LINE_XFI 0x0040
6190 /* When this pin is active high during reset, 10GBASE-T core is power
6191 * down, When it is active low the 10GBASE-T is power up
6192 */
6193#define MDIO_CTL_REG_84823_MEDIA_COPPER_CORE_DOWN 0x0080
6194#define MDIO_CTL_REG_84823_MEDIA_PRIORITY_MASK 0x0100
6195#define MDIO_CTL_REG_84823_MEDIA_PRIORITY_COPPER 0x0000
6196#define MDIO_CTL_REG_84823_MEDIA_PRIORITY_FIBER 0x0100
6197#define MDIO_CTL_REG_84823_MEDIA_FIBER_1G 0x1000
6198#define MDIO_CTL_REG_84823_USER_CTRL_REG 0x4005
6199#define MDIO_CTL_REG_84823_USER_CTRL_CMS 0x0080
6200
6201#define MDIO_PMA_REG_84823_CTL_LED_CTL_1 0xa8e3
6202#define MDIO_PMA_REG_84823_LED3_STRETCH_EN 0x0080
6203
5221#define IGU_FUNC_BASE 0x0400 6204#define IGU_FUNC_BASE 0x0400
5222 6205
5223#define IGU_ADDR_MSIX 0x0000 6206#define IGU_ADDR_MSIX 0x0000
@@ -5239,6 +6222,11 @@ Theotherbitsarereservedandshouldbezero*/
5239#define IGU_INT_NOP 2 6222#define IGU_INT_NOP 2
5240#define IGU_INT_NOP2 3 6223#define IGU_INT_NOP2 3
5241 6224
6225#define IGU_USE_REGISTER_ustorm_type_0_sb_cleanup 0
6226#define IGU_USE_REGISTER_ustorm_type_1_sb_cleanup 1
6227#define IGU_USE_REGISTER_cstorm_type_0_sb_cleanup 2
6228#define IGU_USE_REGISTER_cstorm_type_1_sb_cleanup 3
6229
5242#define COMMAND_REG_INT_ACK 0x0 6230#define COMMAND_REG_INT_ACK 0x0
5243#define COMMAND_REG_PROD_UPD 0x4 6231#define COMMAND_REG_PROD_UPD 0x4
5244#define COMMAND_REG_ATTN_BITS_UPD 0x8 6232#define COMMAND_REG_ATTN_BITS_UPD 0x8
@@ -5281,6 +6269,50 @@ Theotherbitsarereservedandshouldbezero*/
5281#define IGU_REG_SISR_MDPC_WOMASK_UPPER 0x05a6 6269#define IGU_REG_SISR_MDPC_WOMASK_UPPER 0x05a6
5282 6270
5283#define IGU_REG_RESERVED_UPPER 0x05ff 6271#define IGU_REG_RESERVED_UPPER 0x05ff
6272/* Fields of IGU PF CONFIGRATION REGISTER */
6273#define IGU_PF_CONF_FUNC_EN (0x1<<0) /* function enable */
6274#define IGU_PF_CONF_MSI_MSIX_EN (0x1<<1) /* MSI/MSIX enable */
6275#define IGU_PF_CONF_INT_LINE_EN (0x1<<2) /* INT enable */
6276#define IGU_PF_CONF_ATTN_BIT_EN (0x1<<3) /* attention enable */
6277#define IGU_PF_CONF_SINGLE_ISR_EN (0x1<<4) /* single ISR mode enable */
6278#define IGU_PF_CONF_SIMD_MODE (0x1<<5) /* simd all ones mode */
6279
6280/* Fields of IGU VF CONFIGRATION REGISTER */
6281#define IGU_VF_CONF_FUNC_EN (0x1<<0) /* function enable */
6282#define IGU_VF_CONF_MSI_MSIX_EN (0x1<<1) /* MSI/MSIX enable */
6283#define IGU_VF_CONF_PARENT_MASK (0x3<<2) /* Parent PF */
6284#define IGU_VF_CONF_PARENT_SHIFT 2 /* Parent PF */
6285#define IGU_VF_CONF_SINGLE_ISR_EN (0x1<<4) /* single ISR mode enable */
6286
6287
6288#define IGU_BC_DSB_NUM_SEGS 5
6289#define IGU_BC_NDSB_NUM_SEGS 2
6290#define IGU_NORM_DSB_NUM_SEGS 2
6291#define IGU_NORM_NDSB_NUM_SEGS 1
6292#define IGU_BC_BASE_DSB_PROD 128
6293#define IGU_NORM_BASE_DSB_PROD 136
6294
6295#define IGU_CTRL_CMD_TYPE_WR\
6296 1
6297#define IGU_CTRL_CMD_TYPE_RD\
6298 0
6299
6300#define IGU_SEG_ACCESS_NORM 0
6301#define IGU_SEG_ACCESS_DEF 1
6302#define IGU_SEG_ACCESS_ATTN 2
6303
6304 /* FID (if VF - [6] = 0; [5:0] = VF number; if PF - [6] = 1; \
6305 [5:2] = 0; [1:0] = PF number) */
6306#define IGU_FID_ENCODE_IS_PF (0x1<<6)
6307#define IGU_FID_ENCODE_IS_PF_SHIFT 6
6308#define IGU_FID_VF_NUM_MASK (0x3f)
6309#define IGU_FID_PF_NUM_MASK (0x7)
6310
6311#define IGU_REG_MAPPING_MEMORY_VALID (1<<0)
6312#define IGU_REG_MAPPING_MEMORY_VECTOR_MASK (0x3F<<1)
6313#define IGU_REG_MAPPING_MEMORY_VECTOR_SHIFT 1
6314#define IGU_REG_MAPPING_MEMORY_FID_MASK (0x7F<<7)
6315#define IGU_REG_MAPPING_MEMORY_FID_SHIFT 7
5284 6316
5285 6317
5286#define CDU_REGION_NUMBER_XCM_AG 2 6318#define CDU_REGION_NUMBER_XCM_AG 2
@@ -5362,3 +6394,4 @@ static inline u8 calc_crc8(u32 data, u8 crc)
5362} 6394}
5363 6395
5364 6396
6397#endif /* BNX2X_REG_H */
diff --git a/drivers/net/bnx2x/bnx2x_stats.c b/drivers/net/bnx2x/bnx2x_stats.c
index c74724461020..e535bfa08945 100644
--- a/drivers/net/bnx2x/bnx2x_stats.c
+++ b/drivers/net/bnx2x/bnx2x_stats.c
@@ -1,6 +1,6 @@
1/* bnx2x_stats.c: Broadcom Everest network driver. 1/* bnx2x_stats.c: Broadcom Everest network driver.
2 * 2 *
3 * Copyright (c) 2007-2010 Broadcom Corporation 3 * Copyright (c) 2007-2011 Broadcom Corporation
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by 6 * it under the terms of the GNU General Public License as published by
@@ -14,8 +14,8 @@
14 * Statistics and Link management by Yitchak Gertner 14 * Statistics and Link management by Yitchak Gertner
15 * 15 *
16 */ 16 */
17 #include "bnx2x_cmn.h" 17#include "bnx2x_cmn.h"
18 #include "bnx2x_stats.h" 18#include "bnx2x_stats.h"
19 19
20/* Statistics */ 20/* Statistics */
21 21
@@ -153,24 +153,26 @@ static inline long bnx2x_hilo(u32 *hiref)
153static void bnx2x_storm_stats_post(struct bnx2x *bp) 153static void bnx2x_storm_stats_post(struct bnx2x *bp)
154{ 154{
155 if (!bp->stats_pending) { 155 if (!bp->stats_pending) {
156 struct eth_query_ramrod_data ramrod_data = {0}; 156 struct common_query_ramrod_data ramrod_data = {0};
157 int i, rc; 157 int i, rc;
158 158
159 spin_lock_bh(&bp->stats_lock); 159 spin_lock_bh(&bp->stats_lock);
160 160
161 if (bp->stats_pending) {
162 spin_unlock_bh(&bp->stats_lock);
163 return;
164 }
165
161 ramrod_data.drv_counter = bp->stats_counter++; 166 ramrod_data.drv_counter = bp->stats_counter++;
162 ramrod_data.collect_port = bp->port.pmf ? 1 : 0; 167 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
163 for_each_queue(bp, i) 168 for_each_eth_queue(bp, i)
164 ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id); 169 ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
165 170
166 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0, 171 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_STAT_QUERY, 0,
167 ((u32 *)&ramrod_data)[1], 172 ((u32 *)&ramrod_data)[1],
168 ((u32 *)&ramrod_data)[0], 0); 173 ((u32 *)&ramrod_data)[0], 1);
169 if (rc == 0) { 174 if (rc == 0)
170 /* stats ramrod has it's own slot on the spq */
171 bp->spq_left++;
172 bp->stats_pending = 1; 175 bp->stats_pending = 1;
173 }
174 176
175 spin_unlock_bh(&bp->stats_lock); 177 spin_unlock_bh(&bp->stats_lock);
176 } 178 }
@@ -188,20 +190,12 @@ static void bnx2x_hw_stats_post(struct bnx2x *bp)
188 /* loader */ 190 /* loader */
189 if (bp->executer_idx) { 191 if (bp->executer_idx) {
190 int loader_idx = PMF_DMAE_C(bp); 192 int loader_idx = PMF_DMAE_C(bp);
193 u32 opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC,
194 true, DMAE_COMP_GRC);
195 opcode = bnx2x_dmae_opcode_clr_src_reset(opcode);
191 196
192 memset(dmae, 0, sizeof(struct dmae_command)); 197 memset(dmae, 0, sizeof(struct dmae_command));
193 198 dmae->opcode = opcode;
194 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
195 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
196 DMAE_CMD_DST_RESET |
197#ifdef __BIG_ENDIAN
198 DMAE_CMD_ENDIANITY_B_DW_SWAP |
199#else
200 DMAE_CMD_ENDIANITY_DW_SWAP |
201#endif
202 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
203 DMAE_CMD_PORT_0) |
204 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
205 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0])); 199 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
206 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0])); 200 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
207 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM + 201 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
@@ -253,26 +247,17 @@ static void bnx2x_stats_pmf_update(struct bnx2x *bp)
253 u32 *stats_comp = bnx2x_sp(bp, stats_comp); 247 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
254 248
255 /* sanity */ 249 /* sanity */
256 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) { 250 if (!IS_MF(bp) || !bp->port.pmf || !bp->port.port_stx) {
257 BNX2X_ERR("BUG!\n"); 251 BNX2X_ERR("BUG!\n");
258 return; 252 return;
259 } 253 }
260 254
261 bp->executer_idx = 0; 255 bp->executer_idx = 0;
262 256
263 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI | 257 opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_GRC, DMAE_DST_PCI, false, 0);
264 DMAE_CMD_C_ENABLE |
265 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
266#ifdef __BIG_ENDIAN
267 DMAE_CMD_ENDIANITY_B_DW_SWAP |
268#else
269 DMAE_CMD_ENDIANITY_DW_SWAP |
270#endif
271 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
272 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
273 258
274 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); 259 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
275 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC); 260 dmae->opcode = bnx2x_dmae_opcode_add_comp(opcode, DMAE_COMP_GRC);
276 dmae->src_addr_lo = bp->port.port_stx >> 2; 261 dmae->src_addr_lo = bp->port.port_stx >> 2;
277 dmae->src_addr_hi = 0; 262 dmae->src_addr_hi = 0;
278 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats)); 263 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
@@ -283,7 +268,7 @@ static void bnx2x_stats_pmf_update(struct bnx2x *bp)
283 dmae->comp_val = 1; 268 dmae->comp_val = 1;
284 269
285 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); 270 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
286 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI); 271 dmae->opcode = bnx2x_dmae_opcode_add_comp(opcode, DMAE_COMP_PCI);
287 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX; 272 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
288 dmae->src_addr_hi = 0; 273 dmae->src_addr_hi = 0;
289 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) + 274 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
@@ -304,7 +289,6 @@ static void bnx2x_port_stats_init(struct bnx2x *bp)
304{ 289{
305 struct dmae_command *dmae; 290 struct dmae_command *dmae;
306 int port = BP_PORT(bp); 291 int port = BP_PORT(bp);
307 int vn = BP_E1HVN(bp);
308 u32 opcode; 292 u32 opcode;
309 int loader_idx = PMF_DMAE_C(bp); 293 int loader_idx = PMF_DMAE_C(bp);
310 u32 mac_addr; 294 u32 mac_addr;
@@ -319,16 +303,8 @@ static void bnx2x_port_stats_init(struct bnx2x *bp)
319 bp->executer_idx = 0; 303 bp->executer_idx = 0;
320 304
321 /* MCP */ 305 /* MCP */
322 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC | 306 opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC,
323 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE | 307 true, DMAE_COMP_GRC);
324 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
325#ifdef __BIG_ENDIAN
326 DMAE_CMD_ENDIANITY_B_DW_SWAP |
327#else
328 DMAE_CMD_ENDIANITY_DW_SWAP |
329#endif
330 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
331 (vn << DMAE_CMD_E1HVN_SHIFT));
332 308
333 if (bp->port.port_stx) { 309 if (bp->port.port_stx) {
334 310
@@ -359,16 +335,8 @@ static void bnx2x_port_stats_init(struct bnx2x *bp)
359 } 335 }
360 336
361 /* MAC */ 337 /* MAC */
362 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI | 338 opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_GRC, DMAE_DST_PCI,
363 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE | 339 true, DMAE_COMP_GRC);
364 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
365#ifdef __BIG_ENDIAN
366 DMAE_CMD_ENDIANITY_B_DW_SWAP |
367#else
368 DMAE_CMD_ENDIANITY_DW_SWAP |
369#endif
370 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
371 (vn << DMAE_CMD_E1HVN_SHIFT));
372 340
373 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) { 341 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
374 342
@@ -379,13 +347,21 @@ static void bnx2x_port_stats_init(struct bnx2x *bp)
379 BIGMAC_REGISTER_TX_STAT_GTBYT */ 347 BIGMAC_REGISTER_TX_STAT_GTBYT */
380 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); 348 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
381 dmae->opcode = opcode; 349 dmae->opcode = opcode;
382 dmae->src_addr_lo = (mac_addr + 350 if (CHIP_IS_E1x(bp)) {
351 dmae->src_addr_lo = (mac_addr +
352 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
353 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
383 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2; 354 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
355 } else {
356 dmae->src_addr_lo = (mac_addr +
357 BIGMAC2_REGISTER_TX_STAT_GTPOK) >> 2;
358 dmae->len = (8 + BIGMAC2_REGISTER_TX_STAT_GTBYT -
359 BIGMAC2_REGISTER_TX_STAT_GTPOK) >> 2;
360 }
361
384 dmae->src_addr_hi = 0; 362 dmae->src_addr_hi = 0;
385 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats)); 363 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
386 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats)); 364 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
387 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
388 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
389 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; 365 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
390 dmae->comp_addr_hi = 0; 366 dmae->comp_addr_hi = 0;
391 dmae->comp_val = 1; 367 dmae->comp_val = 1;
@@ -394,15 +370,31 @@ static void bnx2x_port_stats_init(struct bnx2x *bp)
394 BIGMAC_REGISTER_RX_STAT_GRIPJ */ 370 BIGMAC_REGISTER_RX_STAT_GRIPJ */
395 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); 371 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
396 dmae->opcode = opcode; 372 dmae->opcode = opcode;
397 dmae->src_addr_lo = (mac_addr +
398 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
399 dmae->src_addr_hi = 0; 373 dmae->src_addr_hi = 0;
400 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) + 374 if (CHIP_IS_E1x(bp)) {
401 offsetof(struct bmac_stats, rx_stat_gr64_lo)); 375 dmae->src_addr_lo = (mac_addr +
402 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) + 376 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
403 offsetof(struct bmac_stats, rx_stat_gr64_lo)); 377 dmae->dst_addr_lo =
404 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ - 378 U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
405 BIGMAC_REGISTER_RX_STAT_GR64) >> 2; 379 offsetof(struct bmac1_stats, rx_stat_gr64_lo));
380 dmae->dst_addr_hi =
381 U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
382 offsetof(struct bmac1_stats, rx_stat_gr64_lo));
383 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
384 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
385 } else {
386 dmae->src_addr_lo =
387 (mac_addr + BIGMAC2_REGISTER_RX_STAT_GR64) >> 2;
388 dmae->dst_addr_lo =
389 U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
390 offsetof(struct bmac2_stats, rx_stat_gr64_lo));
391 dmae->dst_addr_hi =
392 U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
393 offsetof(struct bmac2_stats, rx_stat_gr64_lo));
394 dmae->len = (8 + BIGMAC2_REGISTER_RX_STAT_GRIPJ -
395 BIGMAC2_REGISTER_RX_STAT_GR64) >> 2;
396 }
397
406 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; 398 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
407 dmae->comp_addr_hi = 0; 399 dmae->comp_addr_hi = 0;
408 dmae->comp_val = 1; 400 dmae->comp_val = 1;
@@ -483,16 +475,8 @@ static void bnx2x_port_stats_init(struct bnx2x *bp)
483 dmae->comp_val = 1; 475 dmae->comp_val = 1;
484 476
485 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); 477 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
486 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI | 478 dmae->opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_GRC, DMAE_DST_PCI,
487 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE | 479 true, DMAE_COMP_PCI);
488 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
489#ifdef __BIG_ENDIAN
490 DMAE_CMD_ENDIANITY_B_DW_SWAP |
491#else
492 DMAE_CMD_ENDIANITY_DW_SWAP |
493#endif
494 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
495 (vn << DMAE_CMD_E1HVN_SHIFT));
496 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 : 480 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
497 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2; 481 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
498 dmae->src_addr_hi = 0; 482 dmae->src_addr_hi = 0;
@@ -522,16 +506,8 @@ static void bnx2x_func_stats_init(struct bnx2x *bp)
522 bp->executer_idx = 0; 506 bp->executer_idx = 0;
523 memset(dmae, 0, sizeof(struct dmae_command)); 507 memset(dmae, 0, sizeof(struct dmae_command));
524 508
525 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC | 509 dmae->opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC,
526 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE | 510 true, DMAE_COMP_PCI);
527 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
528#ifdef __BIG_ENDIAN
529 DMAE_CMD_ENDIANITY_B_DW_SWAP |
530#else
531 DMAE_CMD_ENDIANITY_DW_SWAP |
532#endif
533 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
534 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
535 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats)); 511 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
536 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats)); 512 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
537 dmae->dst_addr_lo = bp->func_stx >> 2; 513 dmae->dst_addr_lo = bp->func_stx >> 2;
@@ -571,7 +547,6 @@ static void bnx2x_stats_restart(struct bnx2x *bp)
571 547
572static void bnx2x_bmac_stats_update(struct bnx2x *bp) 548static void bnx2x_bmac_stats_update(struct bnx2x *bp)
573{ 549{
574 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
575 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats); 550 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
576 struct bnx2x_eth_stats *estats = &bp->eth_stats; 551 struct bnx2x_eth_stats *estats = &bp->eth_stats;
577 struct { 552 struct {
@@ -579,35 +554,74 @@ static void bnx2x_bmac_stats_update(struct bnx2x *bp)
579 u32 hi; 554 u32 hi;
580 } diff; 555 } diff;
581 556
582 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets); 557 if (CHIP_IS_E1x(bp)) {
583 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors); 558 struct bmac1_stats *new = bnx2x_sp(bp, mac_stats.bmac1_stats);
584 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts); 559
585 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong); 560 /* the macros below will use "bmac1_stats" type */
586 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments); 561 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
587 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers); 562 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
588 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived); 563 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
589 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered); 564 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
590 UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf); 565 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
591 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent); 566 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
592 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone); 567 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
593 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets); 568 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
594 UPDATE_STAT64(tx_stat_gt127, 569 UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
570 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
571 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
572 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
573 UPDATE_STAT64(tx_stat_gt127,
595 tx_stat_etherstatspkts65octetsto127octets); 574 tx_stat_etherstatspkts65octetsto127octets);
596 UPDATE_STAT64(tx_stat_gt255, 575 UPDATE_STAT64(tx_stat_gt255,
597 tx_stat_etherstatspkts128octetsto255octets); 576 tx_stat_etherstatspkts128octetsto255octets);
598 UPDATE_STAT64(tx_stat_gt511, 577 UPDATE_STAT64(tx_stat_gt511,
599 tx_stat_etherstatspkts256octetsto511octets); 578 tx_stat_etherstatspkts256octetsto511octets);
600 UPDATE_STAT64(tx_stat_gt1023, 579 UPDATE_STAT64(tx_stat_gt1023,
601 tx_stat_etherstatspkts512octetsto1023octets); 580 tx_stat_etherstatspkts512octetsto1023octets);
602 UPDATE_STAT64(tx_stat_gt1518, 581 UPDATE_STAT64(tx_stat_gt1518,
603 tx_stat_etherstatspkts1024octetsto1522octets); 582 tx_stat_etherstatspkts1024octetsto1522octets);
604 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047); 583 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
605 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095); 584 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
606 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216); 585 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
607 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383); 586 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
608 UPDATE_STAT64(tx_stat_gterr, 587 UPDATE_STAT64(tx_stat_gterr,
609 tx_stat_dot3statsinternalmactransmiterrors); 588 tx_stat_dot3statsinternalmactransmiterrors);
610 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl); 589 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
590
591 } else {
592 struct bmac2_stats *new = bnx2x_sp(bp, mac_stats.bmac2_stats);
593
594 /* the macros below will use "bmac2_stats" type */
595 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
596 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
597 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
598 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
599 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
600 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
601 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
602 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
603 UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
604 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
605 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
606 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
607 UPDATE_STAT64(tx_stat_gt127,
608 tx_stat_etherstatspkts65octetsto127octets);
609 UPDATE_STAT64(tx_stat_gt255,
610 tx_stat_etherstatspkts128octetsto255octets);
611 UPDATE_STAT64(tx_stat_gt511,
612 tx_stat_etherstatspkts256octetsto511octets);
613 UPDATE_STAT64(tx_stat_gt1023,
614 tx_stat_etherstatspkts512octetsto1023octets);
615 UPDATE_STAT64(tx_stat_gt1518,
616 tx_stat_etherstatspkts1024octetsto1522octets);
617 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
618 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
619 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
620 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
621 UPDATE_STAT64(tx_stat_gterr,
622 tx_stat_dot3statsinternalmactransmiterrors);
623 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
624 }
611 625
612 estats->pause_frames_received_hi = 626 estats->pause_frames_received_hi =
613 pstats->mac_stx[1].rx_stat_bmac_xpf_hi; 627 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
@@ -757,7 +771,7 @@ static int bnx2x_storm_stats_update(struct bnx2x *bp)
757 estats->no_buff_discard_hi = 0; 771 estats->no_buff_discard_hi = 0;
758 estats->no_buff_discard_lo = 0; 772 estats->no_buff_discard_lo = 0;
759 773
760 for_each_queue(bp, i) { 774 for_each_eth_queue(bp, i) {
761 struct bnx2x_fastpath *fp = &bp->fp[i]; 775 struct bnx2x_fastpath *fp = &bp->fp[i];
762 int cl_id = fp->cl_id; 776 int cl_id = fp->cl_id;
763 struct tstorm_per_client_stats *tclient = 777 struct tstorm_per_client_stats *tclient =
@@ -969,6 +983,7 @@ static void bnx2x_net_stats_update(struct bnx2x *bp)
969{ 983{
970 struct bnx2x_eth_stats *estats = &bp->eth_stats; 984 struct bnx2x_eth_stats *estats = &bp->eth_stats;
971 struct net_device_stats *nstats = &bp->dev->stats; 985 struct net_device_stats *nstats = &bp->dev->stats;
986 unsigned long tmp;
972 int i; 987 int i;
973 988
974 nstats->rx_packets = 989 nstats->rx_packets =
@@ -985,10 +1000,10 @@ static void bnx2x_net_stats_update(struct bnx2x *bp)
985 1000
986 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi); 1001 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
987 1002
988 nstats->rx_dropped = estats->mac_discard; 1003 tmp = estats->mac_discard;
989 for_each_queue(bp, i) 1004 for_each_rx_queue(bp, i)
990 nstats->rx_dropped += 1005 tmp += le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
991 le32_to_cpu(bp->fp[i].old_tclient.checksum_discard); 1006 nstats->rx_dropped = tmp;
992 1007
993 nstats->tx_dropped = 0; 1008 nstats->tx_dropped = 0;
994 1009
@@ -1077,7 +1092,7 @@ static void bnx2x_stats_update(struct bnx2x *bp)
1077 bp->dev->name, 1092 bp->dev->name,
1078 estats->brb_drop_lo, estats->brb_truncate_lo); 1093 estats->brb_drop_lo, estats->brb_truncate_lo);
1079 1094
1080 for_each_queue(bp, i) { 1095 for_each_eth_queue(bp, i) {
1081 struct bnx2x_fastpath *fp = &bp->fp[i]; 1096 struct bnx2x_fastpath *fp = &bp->fp[i];
1082 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats; 1097 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
1083 1098
@@ -1091,7 +1106,7 @@ static void bnx2x_stats_update(struct bnx2x *bp)
1091 fp->rx_calls, fp->rx_pkt); 1106 fp->rx_calls, fp->rx_pkt);
1092 } 1107 }
1093 1108
1094 for_each_queue(bp, i) { 1109 for_each_eth_queue(bp, i) {
1095 struct bnx2x_fastpath *fp = &bp->fp[i]; 1110 struct bnx2x_fastpath *fp = &bp->fp[i];
1096 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats; 1111 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
1097 struct netdev_queue *txq = 1112 struct netdev_queue *txq =
@@ -1123,24 +1138,17 @@ static void bnx2x_port_stats_stop(struct bnx2x *bp)
1123 1138
1124 bp->executer_idx = 0; 1139 bp->executer_idx = 0;
1125 1140
1126 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC | 1141 opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC, false, 0);
1127 DMAE_CMD_C_ENABLE |
1128 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
1129#ifdef __BIG_ENDIAN
1130 DMAE_CMD_ENDIANITY_B_DW_SWAP |
1131#else
1132 DMAE_CMD_ENDIANITY_DW_SWAP |
1133#endif
1134 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
1135 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
1136 1142
1137 if (bp->port.port_stx) { 1143 if (bp->port.port_stx) {
1138 1144
1139 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); 1145 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
1140 if (bp->func_stx) 1146 if (bp->func_stx)
1141 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC); 1147 dmae->opcode = bnx2x_dmae_opcode_add_comp(
1148 opcode, DMAE_COMP_GRC);
1142 else 1149 else
1143 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI); 1150 dmae->opcode = bnx2x_dmae_opcode_add_comp(
1151 opcode, DMAE_COMP_PCI);
1144 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats)); 1152 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
1145 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats)); 1153 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
1146 dmae->dst_addr_lo = bp->port.port_stx >> 2; 1154 dmae->dst_addr_lo = bp->port.port_stx >> 2;
@@ -1164,7 +1172,8 @@ static void bnx2x_port_stats_stop(struct bnx2x *bp)
1164 if (bp->func_stx) { 1172 if (bp->func_stx) {
1165 1173
1166 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); 1174 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
1167 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI); 1175 dmae->opcode =
1176 bnx2x_dmae_opcode_add_comp(opcode, DMAE_COMP_PCI);
1168 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats)); 1177 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
1169 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats)); 1178 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
1170 dmae->dst_addr_lo = bp->func_stx >> 2; 1179 dmae->dst_addr_lo = bp->func_stx >> 2;
@@ -1230,14 +1239,14 @@ void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
1230 if (unlikely(bp->panic)) 1239 if (unlikely(bp->panic))
1231 return; 1240 return;
1232 1241
1242 bnx2x_stats_stm[bp->stats_state][event].action(bp);
1243
1233 /* Protect a state change flow */ 1244 /* Protect a state change flow */
1234 spin_lock_bh(&bp->stats_lock); 1245 spin_lock_bh(&bp->stats_lock);
1235 state = bp->stats_state; 1246 state = bp->stats_state;
1236 bp->stats_state = bnx2x_stats_stm[state][event].next_state; 1247 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
1237 spin_unlock_bh(&bp->stats_lock); 1248 spin_unlock_bh(&bp->stats_lock);
1238 1249
1239 bnx2x_stats_stm[state][event].action(bp);
1240
1241 if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp)) 1250 if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp))
1242 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n", 1251 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
1243 state, event, bp->stats_state); 1252 state, event, bp->stats_state);
@@ -1257,16 +1266,8 @@ static void bnx2x_port_stats_base_init(struct bnx2x *bp)
1257 bp->executer_idx = 0; 1266 bp->executer_idx = 0;
1258 1267
1259 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); 1268 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
1260 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC | 1269 dmae->opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC,
1261 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE | 1270 true, DMAE_COMP_PCI);
1262 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
1263#ifdef __BIG_ENDIAN
1264 DMAE_CMD_ENDIANITY_B_DW_SWAP |
1265#else
1266 DMAE_CMD_ENDIANITY_DW_SWAP |
1267#endif
1268 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
1269 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
1270 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats)); 1271 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
1271 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats)); 1272 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
1272 dmae->dst_addr_lo = bp->port.port_stx >> 2; 1273 dmae->dst_addr_lo = bp->port.port_stx >> 2;
@@ -1283,9 +1284,7 @@ static void bnx2x_port_stats_base_init(struct bnx2x *bp)
1283 1284
1284static void bnx2x_func_stats_base_init(struct bnx2x *bp) 1285static void bnx2x_func_stats_base_init(struct bnx2x *bp)
1285{ 1286{
1286 int vn, vn_max = IS_E1HMF(bp) ? E1HVN_MAX : E1VN_MAX; 1287 int vn, vn_max = IS_MF(bp) ? E1HVN_MAX : E1VN_MAX;
1287 int port = BP_PORT(bp);
1288 int func;
1289 u32 func_stx; 1288 u32 func_stx;
1290 1289
1291 /* sanity */ 1290 /* sanity */
@@ -1298,9 +1297,9 @@ static void bnx2x_func_stats_base_init(struct bnx2x *bp)
1298 func_stx = bp->func_stx; 1297 func_stx = bp->func_stx;
1299 1298
1300 for (vn = VN_0; vn < vn_max; vn++) { 1299 for (vn = VN_0; vn < vn_max; vn++) {
1301 func = 2*vn + port; 1300 int mb_idx = !CHIP_IS_E2(bp) ? 2*vn + BP_PORT(bp) : vn;
1302 1301
1303 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param); 1302 bp->func_stx = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_param);
1304 bnx2x_func_stats_init(bp); 1303 bnx2x_func_stats_init(bp);
1305 bnx2x_hw_stats_post(bp); 1304 bnx2x_hw_stats_post(bp);
1306 bnx2x_stats_comp(bp); 1305 bnx2x_stats_comp(bp);
@@ -1324,16 +1323,8 @@ static void bnx2x_func_stats_base_update(struct bnx2x *bp)
1324 bp->executer_idx = 0; 1323 bp->executer_idx = 0;
1325 memset(dmae, 0, sizeof(struct dmae_command)); 1324 memset(dmae, 0, sizeof(struct dmae_command));
1326 1325
1327 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI | 1326 dmae->opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_GRC, DMAE_DST_PCI,
1328 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE | 1327 true, DMAE_COMP_PCI);
1329 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
1330#ifdef __BIG_ENDIAN
1331 DMAE_CMD_ENDIANITY_B_DW_SWAP |
1332#else
1333 DMAE_CMD_ENDIANITY_DW_SWAP |
1334#endif
1335 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
1336 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
1337 dmae->src_addr_lo = bp->func_stx >> 2; 1328 dmae->src_addr_lo = bp->func_stx >> 2;
1338 dmae->src_addr_hi = 0; 1329 dmae->src_addr_hi = 0;
1339 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats_base)); 1330 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats_base));
@@ -1351,8 +1342,9 @@ static void bnx2x_func_stats_base_update(struct bnx2x *bp)
1351void bnx2x_stats_init(struct bnx2x *bp) 1342void bnx2x_stats_init(struct bnx2x *bp)
1352{ 1343{
1353 int port = BP_PORT(bp); 1344 int port = BP_PORT(bp);
1354 int func = BP_FUNC(bp); 1345 int mb_idx = BP_FW_MB_IDX(bp);
1355 int i; 1346 int i;
1347 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
1356 1348
1357 bp->stats_pending = 0; 1349 bp->stats_pending = 0;
1358 bp->executer_idx = 0; 1350 bp->executer_idx = 0;
@@ -1361,7 +1353,7 @@ void bnx2x_stats_init(struct bnx2x *bp)
1361 /* port and func stats for management */ 1353 /* port and func stats for management */
1362 if (!BP_NOMCP(bp)) { 1354 if (!BP_NOMCP(bp)) {
1363 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx); 1355 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
1364 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param); 1356 bp->func_stx = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_param);
1365 1357
1366 } else { 1358 } else {
1367 bp->port.port_stx = 0; 1359 bp->port.port_stx = 0;
@@ -1394,6 +1386,19 @@ void bnx2x_stats_init(struct bnx2x *bp)
1394 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats)); 1386 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
1395 } 1387 }
1396 1388
1389 /* FW stats are currently collected for ETH clients only */
1390 for_each_eth_queue(bp, i) {
1391 /* Set initial stats counter in the stats ramrod data to -1 */
1392 int cl_id = bp->fp[i].cl_id;
1393
1394 stats->xstorm_common.client_statistics[cl_id].
1395 stats_counter = 0xffff;
1396 stats->ustorm_common.client_statistics[cl_id].
1397 stats_counter = 0xffff;
1398 stats->tstorm_common.client_statistics[cl_id].
1399 stats_counter = 0xffff;
1400 }
1401
1397 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats)); 1402 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
1398 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats)); 1403 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
1399 1404
diff --git a/drivers/net/bnx2x/bnx2x_stats.h b/drivers/net/bnx2x/bnx2x_stats.h
index 38a4e908f4fb..45d14d8bc1aa 100644
--- a/drivers/net/bnx2x/bnx2x_stats.h
+++ b/drivers/net/bnx2x/bnx2x_stats.h
@@ -1,6 +1,6 @@
1/* bnx2x_stats.h: Broadcom Everest network driver. 1/* bnx2x_stats.h: Broadcom Everest network driver.
2 * 2 *
3 * Copyright (c) 2007-2010 Broadcom Corporation 3 * Copyright (c) 2007-2011 Broadcom Corporation
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by 6 * it under the terms of the GNU General Public License as published by
@@ -9,6 +9,10 @@
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com> 9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir 10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver 11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
15 *
12 */ 16 */
13 17
14#ifndef BNX2X_STATS_H 18#ifndef BNX2X_STATS_H
@@ -49,7 +53,6 @@ struct bnx2x_eth_q_stats {
49 u32 hw_csum_err; 53 u32 hw_csum_err;
50}; 54};
51 55
52#define BNX2X_NUM_Q_STATS 13
53#define Q_STATS_OFFSET32(stat_name) \ 56#define Q_STATS_OFFSET32(stat_name) \
54 (offsetof(struct bnx2x_eth_q_stats, stat_name) / 4) 57 (offsetof(struct bnx2x_eth_q_stats, stat_name) / 4)
55 58
@@ -221,19 +224,14 @@ struct bnx2x_eth_stats {
221 u32 nig_timer_max; 224 u32 nig_timer_max;
222}; 225};
223 226
224#define BNX2X_NUM_STATS 43
225#define STATS_OFFSET32(stat_name) \ 227#define STATS_OFFSET32(stat_name) \
226 (offsetof(struct bnx2x_eth_stats, stat_name) / 4) 228 (offsetof(struct bnx2x_eth_stats, stat_name) / 4)
227 229
228/* Forward declaration */ 230/* Forward declaration */
229struct bnx2x; 231struct bnx2x;
230 232
231
232void bnx2x_stats_init(struct bnx2x *bp); 233void bnx2x_stats_init(struct bnx2x *bp);
233 234
234extern const u32 dmae_reg_go_c[]; 235extern const u32 dmae_reg_go_c[];
235extern int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
236 u32 data_hi, u32 data_lo, int common);
237
238 236
239#endif /* BNX2X_STATS_H */ 237#endif /* BNX2X_STATS_H */